././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315602.2216678 manila-21.0.0/0000775000175000017500000000000000000000000013056 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/.coveragerc0000664000175000017500000000014700000000000015201 0ustar00zuulzuul00000000000000[run] branch = True source = manila omit = manila/test* concurrency = eventlet [report] precision = 0 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/.pre-commit-config.yaml0000664000175000017500000000157500000000000017347 0ustar00zuulzuul00000000000000--- repos: - repo: https://github.com/pre-commit/pre-commit-hooks rev: v4.6.0 hooks: - id: trailing-whitespace - id: mixed-line-ending args: ['--fix', 'lf'] exclude: '.*\.(svg)$' - id: check-byte-order-marker - id: check-executables-have-shebangs - id: check-merge-conflict - id: debug-statements - id: check-yaml files: .*\.(yaml|yml)$ exclude: '^(zuul.d|rally-jobs)/.*$' - repo: https://github.com/PyCQA/doc8 rev: v1.1.1 hooks: - id: doc8 args: ['--ignore', 'D001'] - repo: https://github.com/openstack/bashate rev: 2.1.1 hooks: - id: bashate args: ['--ignore', 'E006,E042,E043'] - repo: https://opendev.org/openstack/hacking rev: 6.1.0 hooks: - id: hacking additional_dependencies: [] exclude: '^(doc|releasenotes|tools)/.*$' ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/.pylintrc0000664000175000017500000001253600000000000014732 0ustar00zuulzuul00000000000000[MASTER] # A comma-separated list of package or module names from where C extensions may # be loaded. Extensions are loading into the active Python interpreter and may # run arbitrary code. extension-pkg-whitelist= # Add files or directories to the blacklist. They should be base names, not # paths. ignore=CVS,tests,test # Add files or directories matching the regex patterns to the blacklist. The # regex matches against base names, not paths. ignore-patterns= # Python code to execute, usually for sys.path manipulation such as # pygtk.require(). #init-hook= # Use multiple processes to speed up Pylint. Specifying 0 will auto-detect the # number of processors available to use. jobs=1 # Control the amount of potential inferred values when inferring a single # object. This can help the performance when dealing with large functions or # complex, nested conditions. limit-inference-results=100 # List of plugins (as comma separated values of python modules names) to load, # usually to register additional checkers. load-plugins= # Pickle collected data for later comparisons. persistent=yes # Specify a configuration file. #rcfile= # When enabled, pylint would attempt to guess common misconfiguration and emit # user-friendly hints instead of false-positive error messages. suggestion-mode=yes # Allow loading of arbitrary C extensions. Extensions are imported into the # active Python interpreter and may run arbitrary code. unsafe-load-any-extension=no [MESSAGES CONTROL] # Only show warnings with the listed confidence levels. Leave empty to show # all. Valid levels: HIGH, INFERENCE, INFERENCE_FAILURE, UNDEFINED. confidence= # Disable the message, report, category or checker with the given id(s). You # can either give multiple identifiers separated by comma (,) or put this # option multiple times (only on the command line, not in the configuration # file where it should appear only once). You can also use "--disable=all" to # disable everything first and then reenable specific checks. For example, if # you want to run only the similarities checker, you can use "--disable=all # --enable=similarities". If you want to run only the classes checker, but have # no Warning level messages displayed, use "--disable=all --enable=classes # --disable=W". disable= # "F" Fatal errors that prevent further processing import-error, # "I" Informational noise locally-disabled, c-extension-no-member, # "E" Error for important programming issues (likely bugs) no-member, too-many-function-args, not-callable, assignment-from-none, unsubscriptable-object, used-prior-global-declaration, not-an-iterable, # "W" Warnings for stylistic problems or minor programming issues unused-argument, bad-indentation, unused-variable, useless-else-on-loop, pointless-string-statement, unused-import, redefined-outer-name, redefined-builtin, attribute-defined-outside-init, abstract-method, fixme, exec-used, anomalous-backslash-in-string, broad-except, protected-access, arguments-differ, undefined-loop-variable, try-except-raise, global-statement, super-init-not-called, pointless-statement, global-statement, unnecessary-lambda, keyword-arg-before-vararg, deprecated-method, useless-super-delegation, eval-used, wildcard-import, reimported, expression-not-assigned, cell-var-from-loop, signature-differs, # "C" Coding convention violations missing-docstring, invalid-name, wrong-import-order, len-as-condition, wrong-import-position, bad-continuation, too-many-lines, misplaced-comparison-constant, bad-mcs-classmethod-argument, ungrouped-imports, superfluous-parens, unidiomatic-typecheck, consider-iterating-dictionary, bad-whitespace, dangerous-default-value, line-too-long, consider-using-enumerate, useless-import-alias, singleton-comparison, # "R" Refactor recommendations no-self-use, no-else-return, too-many-locals, too-many-public-methods, consider-using-set-comprehension, inconsistent-return-statements, useless-object-inheritance, too-few-public-methods, too-many-boolean-expressions, too-many-instance-attributes, too-many-return-statements, literal-comparison, too-many-statements, too-many-ancestors, literal-comparison, consider-merging-isinstance, too-many-nested-blocks, trailing-comma-tuple, simplifiable-if-statement, consider-using-in, consider-using-ternary, too-many-arguments [REPORTS] # Tells whether to display a full report or only the messages. reports=no [BASIC] # Variable names can be 1 to 31 characters long, with lowercase and underscores variable-rgx=[a-z_][a-z0-9_]{0,30}$ # Argument names can be 2 to 31 characters long, with lowercase and underscores argument-rgx=[a-z_][a-z0-9_]{1,30}$ # Method names should be at least 3 characters long # and be lowercased with underscores method-rgx=([a-z_][a-z0-9_]{2,}|setUp|tearDown)$ # Module names matching neutron-* are ok (files in bin/) module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+)|(neutron-[a-z0-9_-]+))$ # Don't require docstrings on tests. no-docstring-rgx=((__.*__)|([tT]est.*)|setUp|tearDown)$ [FORMAT] # Maximum number of characters on a single line. max-line-length=79 [VARIABLES] # List of additional names supposed to be defined in builtins. Remember that # you should avoid to define new builtins when possible. additional-builtins=_ [TYPECHECK] # List of module names for which member attributes should not be checked ignored-modules=six.moves,_MovedItems,alembic.op ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/.stestr.conf0000664000175000017500000000005600000000000015330 0ustar00zuulzuul00000000000000[DEFAULT] test_path=./manila/tests top_dir=./ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315601.0 manila-21.0.0/AUTHORS0000664000175000017500000004654700000000000014146 0ustar00zuulzuul00000000000000119Vik Abhilash Divakaran Accela Zhao Adam Harwell Akshai Parthasarathy Aleks Chirko Alex Deiter Alex Meade Alex O'Rourke Alex O'Rourke Alexander Deiter Alexander Epaneshnikov Alexander Job Haskins Alexey Khodos Alexey Ovchinnikov Alfredo Moralejo Alin Balutoiu Alyson Rosa Amir Nikpour Amit Oren Andrea Frittoli (andreaf) Andrea Frittoli Andrea Ma Andreas Jaeger Andreas Jaeger Andrei Ta Andrei V. Ostapenko Andrew Kerr Andrey Kurilin Anh Tran Ankit Agrawal Anthony Lee Arjun Kashyap Arne Wiebalck Arnon Yaari Artem Goncharov Arturo Borrero Gonzalez Ashley Rodriguez Atsushi SAKAI Ben Swartzlander Ben Swartzlander Ben Swartzlander Bertrand Lallau Besjana Gjika Bill Owen Bin Zhou Bob Callaway Bob-OpenStack <295988511@qq.com> Brian Rosmaita BubaVV Béla Vancsics Caique Mello CaiqueMello Cameron Kolodjski Cao Xuan Hoang Carlos Eduardo Carlos da Silva Cedric Zhuang Chandan Kumar ChangBo Guo(gcb) Chaozhe.Chen Che, Roger Chris Yang Christian Berendt Chuan Chuan Miao Chuan Miao Chuck Short Ciara Stacke Clifford Clifford Emeka Clinton Knight Cloud User Colleen Murphy Corey Bryant Csaba Henk Cyril Roelandt Dai Dang Van Dan Sneddon Daniel Gonzalez Daniel Mellado Daniel Russell Daniel Stelter-Gliese Danny Al-Gaaf Daria Kobtseva Davanum Srinivas Dave Hill David Caro David Disseldorp David Sariel Deepak C Shetty Deepika Gupta Deliang Fan Diem Tran Dina Saparbaeva Dirk Mueller Dmitriy Rabotyagov Dmitry Bogun Dmitry Galkin Doug Hellmann Douglas Viroel Duan Jiong Dustin Schoenbrun Eduardo Olivares Eduardo Santos Elias Wimmer Elod Illes Elvis Acheampong Emilien Macchi Eric Harney Fabio Oliveira Felipe Rodrigues Felipe Rodrigues Fernando Ferraz Flavio Percoco Gaurang Tapase George Melikov Gerardo Gonzalez Ghanshyam Mann Gireesh Awasthi Goutham Pacha Ravi Goutham Pacha Ravi Graham Hayes Gray Lutalo Gábor Antal Ha Van Tu Harshada Mangesh Kakad Helen Walsh Helena Dantas Hervé Beraud Hiroyuki Eguchi Hongbin Lu Ian Wienand Igor Malinovskiy Iswarya_Vakati Ivan Anfimov Ivan Kolodyazhny James E. Blair James Page James Page Jan Provaznik Jan Vondra Javier Pena Jay Mehta Jay Xu Jeremiah Dabney Jeremy Liu Jeremy Stanley Jiao Pengju Joe Gordon Joel Capitao Johannes Kulik John Spray John Spray JonathanKoerber Jordan Pittier Jose Castro Leon Jose Falavinha Jose Porrua Joshua Cornutt Juan Antonio Osorio Robles Julia Varlamova Kafilat Adeleke Kamil Rykowski Keerthivasan Ken'ichi Ohmichi Kiran Pawar Kuirong.Chen Lance Bragstad Li Wei Li, Chen Lin Yang LinPeiWen <591171850@qq.com> LiuNanke Liyankun Longgeek Lori Ruffing Lucas Oliveira Lucian Petrut Lucio Seki Lucky samadhiya Luigi Toscano Luis Pabón Luisa Amaral Luiz Santos LuizSantos Lukas Bezdicka Luong Anh Tuan M V P Nitesh Manideep Maddileti Manish Honap Marc Koderer Marc Solanas Tarre Maria Gallego Mariusz Adamski Mark McLoughlin Mark Sturdevant Mark Sturdevant Martin Kletzander Marty Turner Masaki Matsushita Matheus Andrade MatheusAndrade777 Matt Riedemann Maurice Escher Maurice Schreiber Maysa Macedo Megharth MelloCaique Michael Arndt Michael Krotscheck Michael Still Mike Bayer Miriam Yumi Mohammed Naser Monty Taylor Nahim Alves de Souza Naresh Kumar Gunjalli Ngo Quoc Cuong Nguyen Hai Truong Nguyen Hung Phuong Nguyen Phuong An Nguyen Van Trung Nicolas Trangez Nilesh Bhosale Nilesh Thathagar Nishant Kumar OTSUKA, Yuanying Okeke Christian Ondřej Nový OpenStack Release Bot Pan Pavlo Shchelokovskyy Pengju Jiao Pete Zaitcev Peter Wang Petr Kuběna Pierre Riteau Ponomaryov Valeriy Pony Chou Prudhvi Rao Shedimbi Quique Llorente Rafael Rivero Raffaela Cunha Raffaela de Castro Cunha Raissa Sarmento Ralf Rantzau Ram Raja Ramana Raja Ramy Asselin Ratnakaram Rajesh Ravichandran Nudurumati Renan Vitor Renan Vitor Rich Hagarty Rishabh Dave Rob Esker Rodrigo Barbieri Rodrigo Barbieri Rodrigo Barbieri Romain Dupont Ronald Bradford Rui Yuan Dou Rushil Chugh Ryan Hefner Ryan Liang Ryan Liang Saikumar Pulluri Saju.Madhavan Sam Wan Saravanan Manickam Sascha Peilicke Sean McGinnis Sean McGinnis Sean McGinnis Sebastian Lohff Sergey Vilgelm Shaohui Wang Shatadru Bandyopadhyay Shaun Edwards Shaun Edwards Shuquan Huang Silvan Kaiser Simon Dodsley Simon Dodsley SolKuczala Solly Stefan Nica Stephen Finucane Stephen Gordon Steve Kowalik Sumit Kumar Sun Jun Surya Ghatty Swapnil Kulkarni (coolsvap) Sylvan Le Deunff Takashi Kajinami Takashi Kajinami Takashi NATSUME Takashi Natsume Thierry Carrez Thomas Bechtold Thomas Goirand Tiago Pasqualini Tin Lam Tina Tina Tang Tobias Urdin Tom Barron Tom Patzig TommyLike Tony Faijue Ubuntu Valeriy Valeriy Ponomaryov Vasyl Saienko Victor Sergeyev Victoria Martinez de la Cruz Vida Haririan Vijay Bellur Vincent Untz Vitaliy Levitksi Vivek Soni Vladimir Vechkanov Volodymyr Boiko Vu Cong Tuan Woohyung Han Xiaoyang Zhang XieYingYun Xing Yang Yaguang Tang Yang Wei Yatin Kumbhare Yian Zong Yiping Huang Yogesh Yong Huang Youngjun Your Name YuYang Yulia Portnova Yulia Portnova Yusuke Hayashi Yuval Brik Zachary Goggin Zhao Lei ZhiQiang Fan Zhiteng Huang ZhongShengping Zhongyue Luo agireesh andrebeltrami andrebeltrami archanaserver arthurnsantos ashrod98 binean bswartz caowei caoyuan carloss chao liu chen-li chenaidong1 chengebj5238 chenghuiyu chenxing chuan137 cuiyeliu daiki kato danielarthurt darkwsh debeltrami dengzhaosen digvijay2016 dingd dongdongpei dtapia dviroel ericxiett fpxie gaofei gecong1973 gengchc2 ghanshyam guotao.bj gyh haixin haixin haobing1 hongp houming-wang howardlee hparekh huayue hulina huwenhui huyang inspur-storage janonymous jason bishop jayaanand borra jayaanand.borra@netapp.com jeckxie ji-xuepeng jiaqi07 jinxingfang junboli katie kavithahr kedy klindgren kpdev kutner leiyashuai li,chen lijunbo lijunjie linpeiwen linpeiwen liucheng liujiong liuke2 liumengwen liushi liushuobj liuyamin liviacavalcanti liyanhang ljhuang lkuchlan luqitao maaoyu maaritamm maqi marcusvrn mark.sturdevant mark.sturdevant mayurindalkar melakualehegn melissaml nidhimittalhada niuke npraveen35 olamidepeterojo pangliye paulali pawnesh.kumar pengdake <19921207pq@gmail.com> pengyuesheng peter_wang rajat29 renanpiranguinho renanv ricolin scottda shangxiaobj shaoxj sharat.sharma shenxindi shews shubhendu silvacarloss silvacarloss silvacarloss smcginnis snpd songwenping sonu.kumar soyoon-lee stack stack sunjia sunjiazz tclayton ting.wang tpsilva tutkuna ubu venkatamahesh vponomaryov vrushti wang yong wangdequn wangqi wangqiangbj weiting-chen whhan91 whoami-rajat wlhc <1216083447@qq.com> wu.shiming wudong xiaozhuangqing xing-yang xinyanzhang <236592348@qq.com> xuanyandong xulei xuleibj xurong00037997 yanghuichan yangweiwei yangyapeng yanjun.fu yatinkarel yfzhao yfzhao yogesh yuhui_inspur zengyingzhe zhang.lei zhangbailin zhangdaolong zhangdebo zhangdebo1987 zhangguoqing zhangqing zhangqing zhangshj zhangxuanyuan zhangyangyang zhangyanxian zhangyanxian zhaohua zhengwei6082 zhiguo.li zhongjun zhongjun2 zhongjun2 zhu.boxiang zhufl zzxwill 홍인용 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/CONTRIBUTING.rst0000664000175000017500000000125200000000000015517 0ustar00zuulzuul00000000000000The source repository for this project can be found at: https://opendev.org/openstack/manila This repository is mirrored to GitHub at: https://github.com/openstack/manila Pull requests submitted through GitHub are not monitored. To start contributing to OpenStack, follow the steps in the contribution guide to set up and use Gerrit: https://docs.openstack.org/contributors/code-and-documentation/quick-start.html Bugs should be filed on Launchpad: https://bugs.launchpad.net/manila For more specific information about contributing to this repository, see the Manila contributor guide: https://docs.openstack.org/manila/latest/contributor/contributing.html ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315600.0 manila-21.0.0/ChangeLog0000664000175000017500000051524700000000000014646 0ustar00zuulzuul00000000000000CHANGES ======= 21.0.0 ------ * Improve scheduler performance on thin provisioning * Update TOX\_CONSTRAINTS\_FILE for stable/2025.2 * Update .gitreview for stable/2025.2 21.0.0.0rc1 ----------- * Add reno for x-openstack-request-id header * Add option to select keystone endpoint interface * Handle Neuton Subnet Full Exception * Return X-OpenStack-Request-Id header in response * Avoid conflicting update of provisioned\_capacity\_gb * Support region\_name for identity API access * Add missing auth plugin options in [barbican] section * Refactor key manager code * use os cli for shared-file-crud-share.rst * Add barbican set up to devstack * Fix duplicate registration of barbican options * Fix versioning for out of place restore * doc: Add explanation about neutron\_physical\_net\_name * Add support for out of place share backup restores * Remove url tags from README * Use encryption key ref during share create * [NetApp-REST] Fixing basic share creation failure * [NetApp] Barbican share-server/share encryption support * Update get\_snapshot to return 404 if snapshot is inaccessible * [NetApp-ZAPI] Fixing CIFS share creation workflow * Fix ensure shares operation * Dell PowerScale: Add space in the log (follow-up) * Extend request context from\_dict method from oslo context instead of overriding * Add extra spec 'netapp:max\_files\_multiplier' * Dell PowerScale: Rename Isilon to PowerScale in Manila Driver * Fix server instance subnet cleanup * Dell PowerScale: Added support of thin provisioning * Dell PowerScale: add support for ensure shares * Dell PowerScale: add support for update share stats * Dell PowerScale: Http auth issue, SSL verification error and etc * Remove use of exception\_to\_unicode * Drop explicit executor argument * Fix CephFS support matrix table formatting * Run fips-py311 job * Make create\_service\_share\_servers wait until VM is created * Netapp - Enable logical space reporting * NetApp - Preserve custom snapshot policies * [NetApp] Add config option for aes-encryption for cifs shares * Drop unused doc8 from doc requirements * os-api-ref is only a doc requirement * Imported Translations from Zanata * Dell Unity: Fix driver startup issue with LACP configured * Fix: escape special characters in name~ filter for literal search * sqlalchemy: Use built-in declarative * Replace deprecated assertItemsEqual * Remove Python 3.9 support * [NetApp] guard already existing cifs access * Drop unused imports * NetApp: Fix vserver create REST API * add guard to qualified\_replica * Netapp: Removes stanza check * Allow clean service shutdown * Make grenade skip level job non-voting temporarily * NetApp cDOT only modify qos policy if troughput changed * [doc] Update the "lock\_visibility" description in the POST grant access API * NetApp: delete vlan even if ipspace is reused * improve db calls in periodic replica tasks * [DOC] Update Ceph Testing Matrix * setup: Remove pbr's wsgi\_scripts * devstack: Remove MANILA\_USE\_UWSGI * devstack: Remove MANILA\_USE\_MOD\_WSGI * Add pyproject.toml * Remove installation guide for openSUSE/SLES * Fix NFS-Ganesha crashes when updating CephFS access rules * updated share group type doc * NetApp: Fix export location during replica promote * Add new manila.wsgi module * Updated documentation to openstack commands * Fix CLI Commands in async failures guide * use openstack cli commands * Update CLI commands in share basic operations documentation * Fixes CLI documentation errors for manila-manage * Fix 'server\_migrating' status of non-active replica * added API token based auth * Add mount snapshot support to user guide * Add max api microversion annotation * Pure Storage - bump version number for 2025.2 (Flamingo) * Update master for stable/2025.1 * fix volume resize issue when host not recognized * netapp: Use oslo.config feature to describe sample value 20.0.0.0rc1 ----------- * Doc converte for manila share list to openstack cli * Doc convert for share snapshot create from manila command * addressed outdated documentation * Converted manila snapshot-list to openstack cli command * Fixed the remaining review comments for patch #928608 * Updated NetApp certificate auth options help section * NetApp - Restrict LIF Creation Per HA Pair * Actually try to log in via SSH to validate server * NetApp: Implement share network subnet metadata update method * Pass share network subnet metadata updates to backend drivers * Drop direct test dependency on iso8601 * [NetApp] Certificate based authentication for NetApp drivers * [devstack] cleanup iptables rules * Don't override nfs protocols w/ CephFS * Fix access rule update * Add missing API version description * Ignore not found exception during port delete * [doc] grant access rule response code * Drop redundant pep8 scripts * Skip installation to speed up pep8 * [cephfs] Fix access rule update * Drop duplicated choices * Drop broken manila-all script * Allow filtering shares on mount\_point\_name property * Allow providing mount\_point\_name when creating volume from snapshot in NetApp driver * purestorage: Drop unused HAS\_PURITY\_FB flag * Adapt to nova api 2.89 and skip some cinder calls * Deprecate options used solely by Ceph NFSProtocolHelper * Support nova api 2.47 and newer * doc: Use dnf instead of yum * Allow to update access level of access rule * Dell EMC: Refactor redundant if-elas block * Update cephfs driver to return provision capacity * Imported Translations from Zanata * Add new policy \`list\_all\_projects\` for share/share-snapshot * [Netapp] make deleted volume retention period configurable * Updates the share extend and shrink documentation * Improve get all instances with share data * reno: Update master for unmaintained/2023.1 * fix manila to use device uuid instead of name for mounting * Replace deprecated FormatChecker.cls\_checks * vast: return all available export locations * Make default mount point prefix configurable, and allow empty prefix * updated VAST driver documentation * Add periodic task that detects down services * Drop duplicate hacking check of thrid party mock * Fix access rule visibility locks * [netapp] Allow share server migration with replicas * Remove default override for config options policy\_file * Add ensure shares admin docs * Fix docs job reference issues * Skip functional tests on pre-commit config update * Replace deprecated functions in datetime * Drop ineffective required=True * Enable manila-grenade * devstack: Fix missing manila command * decorate all missing \*\_update methods with db\_retry on deadlock * [doc] Add format\_output option in documentation * Add ability to trace SQL transactions with profiler * Improve unit tests in NetApp Driver * Require valid value for endpoint\_type * tests: Actually raise on invalid schemas * [Doc] Annotate max api microversions (Caracal/Dalmatian) * [netapp] remove last-transfer-size check for updating replica state * Pure Storage - bump version numbers for 2025.1 (Epoxy) * Imported Translations from Zanata * Remove Python 3.8 support * NetApp: fix delay clone split when creating share from snapshot (#124) * Fix release name in docs * Remove workaround for eventlet bug #592 * [doc] Driver updatable metadata * Fix share server migration not reusing allocations * Imported Translations from Zanata * Update share/snapshot instance deferred deletion * Add api-ref for ensure shares API * Add CephFS Manage/unmanage documentation * Remove extra policy check * [NetApp] Fixed share creation workflow via REST * Drop SQLALCHEMY\_WARN\_20 * Update master for stable/2024.2 * Add exception handling to share server deletion * Fix "preferred" export locations in the CephFS driver * Fix export location metadata updates by drivers 19.0.0.0rc1 ----------- * Fix export locations update when subnet is added * Fix the remaining review comments from SnapLock patch * [api-ref] fix resource lock api reference * Fix broken release note rendering * [NetApp] SnapLock support for NetApp driver * Metadata for Share Export Location * svm migration across physical networks with multiple port bindings * [NetApp] Custom storage efficiency policy support for Manila share * Imported Translations from Zanata * API for ensure shares * Add json and yaml formatting to manila-manage * Allow scheduling to disabled manila-share host * Python 3.13: do not use removed module "pipes" * Manage/unmanage for CephFS drivers * Allow drivers to set share size after revert to snapshot * NetApp: don't check kerberos config on mgmt LIF * NetApp: Implement share metadata update method * Lock shares when creating a locked access rule * Create ports as disabled with external networks * Remove default override for RBAC config options * pre-commit: Bump versions * api: Add schemas for messages * Initialize profiler after ProcessLauncher fork * update requirements for VAST manila driver * Pass share metadata updates to backend drivers * OpenStack Shares Backup management documentation * Human readable export location documentation * Imported Translations from Zanata * Add share driver for VastData storage * [NetApp] Fix FlexGroup share creation while configuring dedup/compression * Handle race condition for share server delete * Netapp: Retry Dedupe enable/disable if busy * Explicitly decode with utf-8 in validation helpers * Fixed bug 2069125: Manila driver error with ONTAP SVM-scoped user * bug fix: error message fix * Add OpenAPI generation job * [Netapp] Fix soft\_delete\_snapshot get children case * api: Add descriptions to API schemas * Bump pylint version and fix pylint issues * Dell PowerFlex: Set default value for port config * api: Add schemas for availability\_zones * tests: Ensure API schemas are actually valid * tests: Ensure APIs have schemas * Fix leak of ports on share server deletion * Fix share/snapshot show for deferred deletion * [NetApp] Pause and resume clone split during rename snapshot * api: Add schemas for resource\_locks * [CI] Grenade job changes for 2024.2 release cycle * Applies nosec in Dell EMC eNAS * Specify bandit rule to nosec in ZFSSA driver * Fixes for Bandit Issues in Nexenta Drivers * api: Add validation middleware * QNAP bandit follow up * reno: Update master for unmaintained/zed * Refactor mockup file load * Replace deprecated xml.etree.cElementTree * Remove logic for Python 2 compatibility * Remove fallback for Python 2 * Optimize deferred deletion share instance get query * Remove old excludes * Enable Bandit testing in Manila * Replaces qnap driver XML with defusedxml * Fix the backup restore issue for NetApp driver for REST client * Adds nosec comments to Ganesha drivers * Adds usedforsecurity=False to NetApp ONTAP MD5 hashes * Ensure password values are not logged * Adds "usedforsecurity=False" to veritas drivers * RBAC: Enable "new" defaults and scope checks * Adds timeouts to Quobyte driver file * Remove SQLAlchemy tips jobs * Adds usedforsecurity=False to HPE 3PAR driver * Add job to test with SQLAlchemy master (2.x) * Adds nosec comment to ZFSSA driver * tests: Don't (always) auto-create snapshot instances * Add delete CLI helper to ShareCommands * db: rename\_table is not a batch operation * [doc] Quota paramaters have moved to the [quota] section * db: Don't use strings to indicate relationship names * db: Remove unnecessary warning filter * Replaces Huawei driver XML with defusedxml * db: turn off autocommit * db: update migrations to use SQLA 2.0 patterns * Fix backup delete issue when source and destination vserver are same * db: Remove final use of legacy facade * db: Post-migration cleanup * db: Remove 'get\_session' * db: Remove session arguments from AZ methods * db: Migrate "share" APIs to enginefacade * db: Migrate "share snapshot metadata" APIs to enginefacade * db: Migrate "share snapshot export location" APIs to enginefacade * db: Migrate "share \* access" APIs to enginefacade * db: Migrate "share snapshot", "share snapshot instance" APIs to enginefacade * [doc] Mount options in the ceph native driver * [DOC] Add subnet metadata to user guide * [DOC] Add snapshot metadata to user guide * db: Unwind unnecessary independent transactions * Use config validation to reject invalid max\_over\_subscription\_ratio * [doc] add instructions regarding healthchecks * [doc] Admin only modifiable metadata * Fix backup creation errors with NetApp driver * Update master for stable/2024.1 18.0.1 ------ * Add more tests for share/snapshot deferred deletion * Fix the link to get the next share replica * Human readable export location NetApp driver changes * refactoring: modify function parameters and annotations to match * Deprecate the Ceph NFSProtocolHelper * Add support for share/snapshot deferred deletion * Share backups enhancement * Implement the share backup for NetApp driver * [doc] Update CephFS admin guide * Add doc for service disable reason * Forbid resetting state of active replicas * Change admin metadata config option name * Human readable export location core implementation * reno: Update master for unmaintained/xena * reno: Update master for unmaintained/wallaby * reno: Update master for unmaintained/victoria * Deprecate Windows SMB share driver * Add cephfs filesystem to shares metadata * Add disabled reason field to service * Drop reference to WalkVersionsMixin from oslo.db * Add a new config option to specify admin metadata * [CI] Drop glusterfs jobs * reno: Update master for unmaintained/yoga * is\_valid\_ip\_address: Catch non-string values early * db: Fix charset updates in migrations * Deprecate untested Tegile share driver * Remove unused oslo.db.concurrency namespace options * Deprecate GlusterFS (Native) Driver * [CI] Grenade testing updates * tests: has\_calls -> assert\_has\_calls * Conditional Import for FIPS Compliance * Retry on connection error to neutron * Changed user message, and removed unnecessary log message * Dell EMC: Deprecate VNX driver * Dell EMC: Drop "vmax" alias * api: Dump config options when deployed as a wsgi app * Change misleading user message when user services are down * Fixing skipped default override for CORS options * Bump hacking * Clean up removed services from devstack options * Drop upgrade scripts for old releases * Add new feature guidelines documentation * Drop logic for Ubuntu 14.04 * Update supported python versions * Remove unnecessary setup\_hook * Python 3.12: use assertAlmostEqual * NetApp: Stop clone split while deleting child share * [Pure Storage] Add support for multiple data VIPs * TrivialFix: Fix arguments on a mock of \_\_init\_\_ * Pure Storage - bump version numbers for 2024.1 (Caracal) * Fix typo in the release note for bug 2038607 * db: Migrate "share instance", "share replica" APIs to enginefacade * db: Remove unnecessary helper * Change status and error handling for /shares API * update irc dcoumentation * Don't fail remove-export in NFS-Ganesha * Fix BadRequest from share-server-migration-get-progress * Fix python shebang * Fix policy check in metadata APIs * Fix parsing of protocol access mapping options * Delete share network subnet during network deletion * [Doc] Add a quick and dirty IRC guide * [Doc] Annotate max api microversion in Bobcat * Fix missing manila-data options * Add service\_network\_host option for port host * NetApp Derive vserver name from share server identifier * NetApp API failed. Reason - 18177:Relationship is in use by SnapMirror in local cluster * tests: Add 'reason' argument to exceptions * db: Remove unnecessary wrapper class * db: Migrate remaining "transfer" APIs to enginefacade * db: Migrate "transfer" APIs to enginefacade * db: Add '\_share\_update', '\_share\_instance\_update' * db: Convert '\_ensure\_availability\_zone\_exists' to decorator * db: Migrate "export locations", "export location metadata" APIs to enginefacade * Respect provided "host" when plugging ports * Use pre-commit for 'pep8' tox target * [pep8][black] Fix new formatting issues in code * db: Add '\_share\_instance\_get' * db: Add '\_share\_get' * db: Rename 'share\_export\_location\_\*' to 'export\_location\_\*' * db: Rename 'share\_instances\_\*' to 'share\_instance\_\*' * db: Rename some methods * [RBAC] Enforce check for share updates * [NetApp] Add exception for insufficient privilege or incorrect credentials * Fix share network delete procedure * [rbac] Pull up policy checks on share/snapshot APIs * Fix py311 unit test issues * [NetApp] Fix ipspace naming is confusing * Fixes share server manager match of identifiers * Fix count in the response of shares/snapshots list API * Update master for stable/2023.2 17.0.0 ------ * TrivialFix: Fix typo in release note * Add preferred info to ceph nfs export locations * Implement Ensure Shares in the CephFS driver * Support reapplying rules during ensuring * Dell PowerStore and PowerFlex Manila drivers enhancement * Fix openstack-tox-py311 job * Revert "Improve scheduler performance when thin provisioning" * Add default to read\_deleted in context's from\_dict * Add NetApp Active IQ scheduler weigher * Follow-up access rules restrictions change * [Pure Storage] Fix connection issue on network failure * Update Share backup APIs and add api ref * Allow restricting access rules fields and deletion * TrivialFix: Don't warn on legitimate all\_projects usage * Adds a new Manila driver for Dell PowerFlex storage backend * Resource Locks: Support for share deletion lock * Include share\_type information in notifications * Adds a new Manila driver for Dell PowerStore storage backend * Turn off global venv in rally jobs * Improve scheduler performance when thin provisioning * [RFE] NetApp share server migration get progress * Set "updated\_at" field on access rules * [DOC] Add Snapshot Metadata Documentation * Validate provider\_location while managing snapshot * [api-ref] add provider\_location to snapshots * Implement share backup * NetApp: Improve REST API cover and fix internals * [NetApp] Fix share server migration * [API] Validate display name/description length * [NetApp] Recreate security cert during vserver create * [NetApp] Fixed replica promotion to updated autosize attributes * Fix error message from share server API * [NetApp] Fix NetApp driver create from snapshot * Update timedelta and old schedules as per netapp\_snapmirror\_schedule * Add count info in 'snapshot list' API * Handle access rules on share replicas * skip periodic update on replicas in 'error\_deleting' * [Doc] Annotate max api microversion in Antelope * Fix duplicate entries in share\_server\_backend\_details * Stop overriding auth/identity options of tempest * [DOC] Add doc for Scheduler Hints * [DOC] Add doc for Share Transfer * Add pre-commit * Remove trailing spaces, convert dos2unix * Add max share extend size limited by share\_type * Update the generic driver 'Known restrictions' section * [api-ref] Enhance quotas per share types doc * Enhance quota documentation * onlyHostFilter: Fix follow-up suggestions * Report PortLimitExceeded error to customer * Imported Translations from Zanata * sqlalchemy: add generic repr to base model * db: Migrate "share metadata" APIs to enginefacade * db: Migrate "share network subnet metadata" APIs to enginefacade * db: Migrate "share network subnet" APIs to enginefacade * db: Migrate "share network" APIs to enginefacade * db: Migrate "security service" APIs to enginefacade * db: Migrate "share server" APIs to enginefacade * db: Migrate "availability zone" APIs to enginefacade * db: Migrate "driver private data" APIs to enginefacade * db: Migrate "network allocation" APIs to enginefacade * db: Migrate 'purge\_deleted\_records' API to enginefacade * db: Migrate "share type extra specs" APIs to enginefacade * db: Migrate "share type access" APIs to enginefacade * db: Migrate "share type" APIs to enginefacade * db: Migrate "async operation" APIs to enginefacade * db: Migrate remaining quota APIs to enginefacade * db: Migrate quota sync APIs to enginefacade * db: Migrate "share group", "share group type" APIs to enginefacade * db: Migrate 'share\_resources\_host\_update' helper API to enginefacade * Use DLM when creating service network * [DOC] Add api reference to share network subnet metadata * fix to get soft-deleted objects on db model query * Change the default value for \`is\_public\` for share group type creation * Move skip level job to manila-grenade-skip-level-always * add reno to allowlist\_externals * Pure Storage - bump version numbers for 2023.2 (Bobcat) * [doc] Add sample dummy driver config * adds share server uuid on migration cancel * [NetApp] Fix non-disruptive migration cifs shares * [coordination] backend\_url should be secret * Fix Share Network Subnet Metadata policy error * Remove TripleO job * Check share network for share groups before deletion * Update master for stable/2023.1 16.0.0 ------ * NetApp ONTAP: Fix SnapMirror snapshots not being cleaned up * [NetApp] Fix DHSS mode fails to create share on a new pool * update capacity filter during extend share * [docs] Update documentation for Infinidat driver * Fix db query for service cleanup * [NetApp] Configure VLAN/MTU through subnet metadata * allow overide of basepython with TOX\_PYTHON env * NetApp ONTAP: REST transition - DHSS True functions * NetApp ONTAP: REST transition - Data motion related functions * Add 'state' column in 'services' table * tox4 needs allowlist\_externals for fast8 * [CI] Bump timeout for the migrations test case * NetApp ONTAP: REST transition - DHSS False Basic operations * Fix exception in share transfer accept routine * TrivialFix: Fix releasenote file location * check\_exec: /usr/bin/python3 bad interpreter * NetApp: move split job to end * NetApp: Use default\_ad\_site in active directory * Fix wrong assertion methods * Metadata for Share Network Subnet Resource * Prevent failure on get quiesce\_wait\_time * Update micversion to 2.77, support share transfer between project * service instance: Read public ssh key directly * Remove placeholder from upgrade check * Add 'default\_ad\_site' field to security service object * NetApp ONTAP: Implemented REST transition Client * Fix Manila API error message * Add quiesce\_wait\_time option to share replica promote API * Imported Translations from Zanata * [Cephfs] Fix erroneous share mode override on ensure\_shares * NetApp: Disable tunneling in vserver\_exists * db: Migrate "quota usage" APIs to enginefacade * db: Migrate "backend info" APIs to enginefacade * db: Migrate "quota class" APIs to enginefacade * db: Migrate "message" APIs to enginefacade * Add filesystem info to FSAL in CephFS NFS * Drop old notification driver names * Use new get\_rpc\_client API from oslo.messaging * Ignore replicas in error state during allow/deny access * Fix tests for 'share-network' param in share replica create * Fix tox4 errors * db: Migrate "quota" APIs to enginefacade * db: Migrate "service" APIs to enginefacade * db: Prepare 'model\_query' for migration to enginefacade * db: Use oslo\_db.sqlalchemy.enginefacade * tests: Enable SQLAlchemy 2.0 deprecation warnings * test: Add warning fixture * Add config for netapp asynchronous SnapMirror schedule * [NetApp] check snapshot presence after creation * [Infinidat] fixed host assisted migration * [NetApp] catch errors on getting vserver when reusing share server * [NetApp] fallback to cifs-server force delete on vserver cleanup * Imported Translations from Zanata * init share server updated\_at field * Shorten snapshots names in CephFS drivers * Set configured\_ip\_versions fact in cephfs driver * Allow setting endpoint\_type for glance * Rename 'nfs\_cluster\_id' to 'cluster\_id' in the NFSProtocolHelper * Raise an exc if no VIP/backend is available * Update manila-grenade-skip-level job for new release * Pure Storage - bump version numbers for Antelope * Macrosan driver: some improvements about variables and syntax * [devstack][RBAC] Update bootstrap user * Fix neutron plugin get network\_type * Imported Translations from Zanata * Switch to 2023.1 Python3 unit tests and generic template name * Update master for stable/zed 15.0.0 ------ * [RBAC] Return 404 if share is inaccessible * [RBAC] Retain legacy admin behaviour * Change to use iproute2 instead of brctl * [NetApp]: Fix issues with managed snapshot * Fix Create From Snapshot with Server Limits * [NetApp] Consider last transfer size and error for replica state * [Doc] Annotate max api microversion in Zed * [Infinidat] bump driver version and update docs * Fix misuse of assertTrue * Metadata for Share Snapshots Resource * Refactor the Ceph NFS driver to use Cephadm NFS * test: Rename Database fixture to DatabaseFixture * Fix compatibility with oslo.db 12.1.0 * Add scheduler\_default\_extend\_filters option * Set DC discovery-mode to 'none' if server is specified in SS * [devstack] Remove setting for tempest plugin * Fix DriverFilter/GoodnessWeigher string evaluations * [NetApp] Fix lack of retry HTTP requests * Add Macrosan Manila Driver * Fix KeyError exceptions in Manila unit tests * [Infinidat] add support for TLS/SSL communication * Migrate GlusterFS to privsep style * Add "share-network" option for replica create API * remove unicode prefix from code * neutron: do not create default route for service subnet if connect\_share\_server\_to\_tenant\_network is on * remove unicode from code * Replace abc.abstractproperty with property and abc.abstractmethod * Fix reference to deprecated quota\_driver option * Fix InvalidInput wrong message * Nova: Remove unused server operations * Nova: Remove unused image apis * Remove workaround for Python 3.4.[012] * [NetApp] Fix lack of aggregate pool home state * [DOC] Replace references to policy.json * Add FIPS testing job * Stop logging sensitive login information * Use defined project config of manila-tempest-plugin * Fix manila plugin.sh set backend\_availability\_zone * policy.json is no longer required * Fix available share servers to be reused * Add config option reserved\_share\_extend\_percentage * Update python testing as per zed cycle testing runtime * Increase RAM in manila flavor * Rollback quota when share network create API fails * Simplify composition of processutils.execute kwarg * Fix reference to the debug config option * devstack: Support different ssh key format * Mark Python 3.9 as supported * Add Python3 zed unit tests * Define queues at project level * DOC: fix the uri for share groups * Increase MANILA\_SERVICE\_VM\_FLAVOR\_DISK * Remove admin context check, update unit tests * [Native CephFS] Don't fail to deny missing rules * Drop lower-constraints.txt and its testing * [CI] Install dependencies for docs target * DOC: fix the uri for share group types * Fix the bug of TypeError with JsonFilter * Shares Metadata follow-up patch * Deprecate [DEFAULT] use\_forwarded\_for * Use list values for cephfs\_ganesha\_export\_ips * Add missing \`updated\_at\` field in share-instance show * Fix wrong attribute to find remote address * fix typo in reno of bug-1946990 * Remove unused method from manila/utils * Add Guru Meditation Report to wsgi * Fix headings in the API ref * Pure Storage - bump driver version for Zed * Fix Dummy driver fails to get subnet\_allocations * Fix GaneshaNASHelper update\_access signature * Update master for stable/yoga 14.0.0 ------ * Add missing deprecated messages to policy * Fix sporadic test failure in manila * Add validation to share network * DOC: Add neutron binding to network plugins * Fix replica quotas allocation during share migration * [doc] Annotate max api micro version in Yoga * Metadata for Share Resource * Container: Multiple subnets per AZ * [api-ref] Fix missing parameters in share network list * NetApp ONTAP: Add support to multiple subnets per AZ * Add multiple subnets per AZ support * share recycle bin, Fix follow-up suggestions * Migrate LVM driver to privsep * Add grenade-skip-level job * Update micversion to API2.69, Manila share support Recycle Bin * change to valid error status in test\_generic * Check project permissions for share replicas * Implement privsep boilerplate in Manila * Mock tenacity in gluster driver unit tests * Add support of scheduler\_hints in share replica create * Fix quota-set no required parameters on update * remove usage of six library * Add temporary link to patched paramiko * Add Python 3 only classifier * [DOC] Incorrect response code for share network creation * Remove usage of six lib for third party/vendors drivers * Cinder: Remove unused API * Nova: Remove non-existing methods from fake class * Add admin only keys to share metadata * Force disk wipe when running lvcreate * Fix note in the share manager * api-ref: Add share instances by share endpoint * OnlyHostFilter allows user to specify host during share create * ratelimit should not be enabled in nolimit pipeline * Add enforce\_scope setting support for Manila * Set tempest api version config in devstack plugin * remove usage of six library in api layer * Force share server selection on nondisruptive migration * Support group specs search for share group type API * NetApp OnTap: Fix compatibility check for share migrate * Move release notes to correct folders * Move db release note to correct folder * Add api-ref for security service update * Handle successful deletion of snapshot if quota commit fails * early return for \_share\_replica\_update() if there is no active replica * [devstack] Use OSC to set up share types * [api-ref] Fix datatype of cast\_rules\_to\_readonly * replace deprecated pyparsing method * Source admin credentials for manila setup * Modify docker instalation for fedora systems * Drop non-ASCII character from manila config * [NetApp] Fix default thin\_provisioned volumes on AFF * [doc] update doc about query user message * [doc] update cli doc about extend share * [doc] Update PTL documentation * Fix glusterfs test bug * [TrivialFix] Fix up a bugfix release note * [api-ref] Add request examples for share replica APIs * Deletes the six library and all its usages from manila/utils.py * [doc] Fix config and install guide for the generic driver * service\_instance: allow overriding the instance name * Adapt CephFS driver to do not try to escape export ip * Fix ignored [neutron] options * Pure Storage FlashBlade Manila - Version increment for Yoga * retry deadlock for share\_type\_destroy * Replace Jenkins with Zuul * Changed 'Jenkins' to 'Zuul' * NetApp: properly use netapp\_server\_port config * Add release notes command to tox * XENA release note regarding scheduler filters * doc: admin: generic\_driver: extend documentation * doc: admin: generic\_driver: fix indentation * PowerMax Manila - Version increment for Xena * Add Python3 yoga unit tests * Update master for stable/xena 13.0.0 ------ * [doc] Add Xena max api version annotation * Optimize the query logic for share network list * Follow up for share server migration enhancements * [NetApp] Add FlexGroup volume support * Share server migration enhancements * [NetApp] Share server migration through SVM migrate * Add Share Affinity/Anti-Affinity Scheduler Filters * [NetApp] Add readable replication type support * Replace retrying with tenacity * Fix api-ref for access rules * [Optimise] Use ThredGroup to manage periodic tasks * [NetApp] Fix list of mandatory services for CIFS share creation * [doc] add since and before parameter to message-list cli * [NetApp] Fixed scoped account replica delete * Add config option reserved\_share\_from\_snapshot\_percentage * Extend share will go through scheduler * Add missing [oslo\_reports] options * Add missing oslo.service options * Change cifs value from string to list for Dell manila drivers * Use oslo-config-generator conf to load options from libraries * handle replica state on migration complete * [Pure Storage] Honour the share\_name\_template parameter * Early validate for CIFS without security service * Migrate all quota parameters to [quota] section * Use tox 3.1.1 fixes * Add generic fuzzy matching logic to the database layer * tests: Remove use of 'oslo\_db.sqlalchemy.test\_base' * Update api-ref documentation * Replace deprecated scheduler filter module name * Use List for allowed\_origin * Add Pure Storage FlashBlade to Manila Admin docs * Deprecate create snapshot from share group in CephFS * Replace deprecated import of ABCs from collections * Add missing share-type user message * Replace assertDictMatch with assertDictEqual method in tests * Pass sync\_backref=False to relationship call * Replace oslo\_utils.fnmatch with fnmatch * Fix ipaddress issues in the infinidat driver * Adds a clear error message to operator-set limits * TrivialFix: Fix the filter name in config helper * Add Pure Storage FlashBlade driver * Add Ceph version check * Handle service client authorization errors * Remove duplicate line in release note * Filter shares by share type "extra\_specs" * fixes availability zone filter when creating a share from snapshot * init\_host should be called before RPC consumer is created * Fix PDF build * Changed minversion in tox to 3.18.0 * Remove references to sys.version\_info * Update IRC references * Add documentation for per share gigabytes quotas * [devstack] Drop manila-tempest-plugin installation * Fix with\_entities usage in db queries * Fix API reference of share snapshot creation * Replace deprecated SQLAlchemy "with\_lockmode" * Fix api error message for snapshot creation failure * Drop support for SQLite < 3.7 * optimize share size check * Updates CephFS drivers docs * [Glusterfs] Fix create share from snapshot failed * setup.cfg: Replace dashes with underscores * Remove deprecated config and auth * Don't run periodic\_share\_replica\_update() on active replicas * Reuse \_set\_instances\_share\_data for replicas * Direct mgr commands to the mgr daemon * Add documentation for share server limits * Update admin, user and contributor guide * PowerMax Manila - version change for Wallaby * Document policy rule using the description parameter * [doc] Annotate max api microversion in Wallaby * Add Python3 xena unit tests * Update master for stable/wallaby 12.0.0 ------ * Add release note for secure rbac work * Set "context\_is\_admin" to system scope admin roles * Bump RPC version for the scheduler rpc call * [NetApp] Follow up fixes for update security service * Support host assisted share migration for Zadara manila driver * Add exception for insufficient privileges when using security services * Fix wrong totalcount returned by share listing query * Add create share from snapshot in CephFS * Update cephfs drivers to use ceph-mgr client * Move params from DocumentedRule to DeprecatedRule * Remove unused model properties and increase unit test coverage * Put ensure\_share into thread pool to speed up the startup of share service * fix unmange share with manage\_error status will lead to quota error * Disable some policy warnings * Fix generic share resize with 0.0.0.0/24 access * Filter storage protocol in the scheduler * [NetApp] Implement cached status pool * Fix share server lookup * Fix inconsistent ordering caused by low datetime precision * scheduler should ignore earlier time service capabilities * [NetApp] Implement security service update * Add security service update support to the container driver * Add security service update for in-use share networks * [ci] Part 2: Temporarily set docs job to non-voting * [ci] Temporarily set docs job to non-voting * [NetApp] Add support for FPolicy native mode * Implement secure RBAC for shares * Implement secure RBAC for share replicas * Change RBAC for share group snapshots * Implement secure RBAC for share snapshots * [NetApp] Fix security service configuration for LDAP servers * Implement secure RBAC for share type extra spec * Implement secure RBAC for share snapshot instances * Add config option to set per\_share\_size\_limit * Implement secure RBAC for share group type specs * Implement secure RBAC for share network subnets * Implement secure RBAC for share networks * Add share server limits * Implement secure RBAC for share group types * Implement secure RBAC for share groups * Implement secure RBAC for share instances * Implement secure RBAC for group snapshots * Implement secure RBAC for share types * Implement secure RBAC for share servers * Implement secure RBAC for share snapshot instance export locations * Implement secure RBAC for share snapshot locations * Implement secure RBAC for share replica locations * Implement secure RBAC for share instance export location * Fixed some syntax errors * Adding Zadara Manila driver * Adds support min/max share size limited by share\_type * Implement secure RBAC for share locations * Implement secure RBAC for access rule metadata * Implement secure RBAC for share access rules * Implement secure RBAC for services * Implement secure RBAC for security services * Fix traceback in scheduler-stats API * RBAC tightening for share access rule * Clean up some policy code * Drop "system\_scope" from context constructor * Doc: Add profiler support documentation * Integrate OSprofiler and Manila * [NetApp] Fix deepcopy of sqlalchemy objects inside the driver * Remove deprecated public share policies * Fix unit tests to use requests * Catch up to changes in RequestContext * [api-ref] Fix incorrect parameters * tox miniversion update to 3.1.1 * [doc] remove project\_id from api endpoints * [devstack] Setup a "shared-file-system" service * [devstack] create endpoint without project\_id * Advertise v2 API routes without project\_id * Enable healthcheck middleware * [Native CephFS] Add messages for async ACL ops * remove usage of six library from unit tests * remove usage of six library * Remove backend deprecated variables * add additional driver data to migration get progress view * Adds missing caps for mgr in cephfs configuration * [NetApp] Fix kerberos security service issues * Update doc for manila-service-image download * Update manila and Ceph matrix support doc * Fix missing group and group\_snapshots in absolute limits * Implement secure RBAC for storage pool statistics * Implement secure RBAC for quotas * Implement secure RBAC for quota classes * Implement secure RBAC for messages * Implement secure RBAC for storage availability zones * Always use new\_size when extend * Uses local variable with ipv4 gateway config * [Doc] Add admin doc for share group * Add api reference for share server migration * Add developer reference to share server migration * Delete unavailable py2 package * [goal] Deprecate the JSON formatted policy file * Stop manila when unstacking * Update requirements * Updates to support in Ceph local.conf sample * Switch from base64 encodestring to encodebytes * [glusterfs] don't reinit volume list on deletion * Introduce base personas for secure RBAC * Bump oslo.log version to 4.3.0 * Docs: NFS-Ganesha and dbus * [NetApp] Support NFS shares on windows * [NetApp] Fix hard-coded CA cert path for SSL * Retry unmount operation on the LVM driver * Replace deprecated UPPER\_CONSTRAINTS\_FILE variable * Retry unmount operation on the ZFSOnLinux driver * Update share replicas export location API ref * fix reno file location and indention * Remove the unused coding style modules * Move shares filtering to database layer * Fix logic that determines a share exists before manage * disable M325 * Set Victoria max version in the api doc * Fix 'update\_share\_replica' doesn't provide share server model to drivers * Add share server migration admin documentation * [NetApp] Fix access rules for CIFS promoted replica * Add Python3 wallaby unit tests * Update master for stable/victoria 11.0.0 ------ * [DOC] Add admin and user documentation for share revert to snapshot * [NetApp] Improve Adaptive QoS functionality * Documentation for Share Migration Ocata Improvements * Remove unused "\_migrate" API method * requirements: Drop os-testr * [NetApp] Fix issues with share server migration * Follow up change for share server migration * [NetApp] Fix python-manila package version * [NetApp] Fix CIFS promote back issue * Update share replicas api-ref * Update api-ref for share groups graduation * Fix manage share server for container in Focal * Fix manila OverQuota issue while managing shares * Fix capacity calculations in the CephFS driver * Improve migration\_get\_progress error handling * Remove install unnecessary packages * [NetApp] Adding support for Adaptive QoS in NetApp driver with dhss false * [NetApp] Add support for share server migration * Modify share groups w/ manila-manage's update-host * [Container driver] Adds share and share server migration * [NetApp] Enables configuring NFS transfer limits * [Glusterfs] Fix delete share, Couldn't find the 'gluster\_used\_vols' * Add share server migration for Dummy driver * Improve IPv6 documentation for access rules * Add share server migration * Update user doc index * [goal] Migrate tox based testing to ubuntu focal * Update share replica show docstrings * Update "Create and manage shares" doc * Handle oslo.messaging ping endpoint * [Glusterfs] Fix delete share, mount point not disconnected * Fix WsgiLimiterProxy check * Graduate share replication feature * PowerMax Manila - Version increment for Victoria * [doc] clean up compute client options * Fix documentation for types and extra specs * DOC: Update admin generic driver network-options immutable * Add the share-type for the server setup metadata * Update docs for create share from snapshot in another back end * Update access rules documentation for user guide * Updates the API reference for some share operations * Add mod\_wsgi configuration samples * Add uWSGI configuration samples * Fix wrong capacity in pool\_stat for DellEMC manila drivers * Update Share types api-ref * Add api-ref for share network subnets * Move hacking checks to tests dir * [ci] Replace experimental tripleo job * [victoria][goal] Native Zuulv3 CI jobs * Harden LVM driver deletion paths * Zuulv3 native grenade job * [ci] Remove explicit compression of log files * Fix fallback share group snapshot implementation * Add verify-noapi testcase to grenade * Update LVM volume extend * Update NFS helper restart * Use assertEqual instead of assertItemsEqual * [ci] Re-enable scenario tests for lvm job * [NetApp] update set\_preferred\_dc for ontapi 1.150 * Add plugin name to devstack/settings * add share server update to manila-manage share update\_host * [ci] Temporarily disable scenario tests * Add upgrade documentation * Enforce policy checks getting share-type by name * Enable cephfs snapshots * TrivialFix: Correct cephfs support release note * Add Ceph support statement * [api-ref] fix typo * Fix NFS/CIFS share creation failure issue * Make test-setup.sh compatible with mysql8 * [NetApp] Fix HTTPS connection for python 3.7 * [api-ref] Fix HTTP method on the manage API * add manila-manage service cleanup * [NetApp] Updating the release note for bugfix 1688620 * Fix uwsgi path location * [NetApp] Fix svm scoped account * Use unittest.mock instead of mock * Update admin guide for share network subnets change * Add user guide for share network subnets * Add documentation for share replica quotas * Make volume mount path configurable for ContainerShareDriver * Add user message when share shrinking fails at the driver * CIFS extension failing because of volume in use * [devstack] Allow cephfs daemon port access * [NetApp] Fix default ipspace deletion issue * [Unity] Update release note for filter function * Remove unused ManilaNode sqlalchemy model * [Unity]: Failed to enabled ACE for share * Get ports filtered by subnet id on share-server cleanup * Switch to newer openstackdocstheme and reno versions * Fix hacking min version to 3.0.1 * Fix pep8 issues * [NetApp] add max\_over\_subscription\_ratio to pool stats * TrivialFix: Remove unnecessary future imports * [devstack][ci] Set public network ID in tempest.conf * [ci][devstack] Install bridge-utils for Container driver * Monkey patch original current\_thread \_active * [NetApp] Fix falsely report migration cancelation success * [devstack][ci] Move bgp setup to plugin * [devstack][ci] Fix floatingip allocation * [Unity] Implements default filter function * [Unity] Fix unit test issue * [devstack][ci] Modify firewall in ds-plugin * [CI] Remove unused jobs * [devstack][ci] Stop requiring g-reg service * [doc] Fix tempest doc * Remove translation sections from setup.cfg * Remove Babel requirement * [grenade] Switch base version * [CI] Fix grenade share networks test * Trivial fix: Add spec website link to contributor doc * Add py38 package metadata * Add Python3 victoria unit tests * Update master for stable/ussuri 10.0.0.0rc1 ----------- * Drop install\_command usage in tox * [NetApp] Fix vserver peer accept on intra cluster replication * [NetApp] Fix share shrink error status * Update share-manager behavior for shrink share operation * fix bug in quota checking * [doc] Annotate max api microversion in Ussuri * fix bug in consume from share * [cycle-goals] Add PTL and contributor quickstart * Use unittest.mock instead of third party lib * fix bug in tox py test * Fix invalid assert statement * VNX/PowerMax: Fix export locations * [NetApp] Fix vserver peer creation with same vserver * Fix docs duplicated autoclass definition * [CI] Stop gating with manila-tempest-minimal-dsvm-lvm * [NetApp] Improve create share from snapshot functionality * [ZFSonLinux] Create share from snapshot in different backends * Remove experimental flag from share groups feature * Create share from snapshot in another pool or backend * [Unity] Manage/unmanage share server/share/snap * Remove provisioned calculation on non thin provision backends * Delete type access list when deleting types * Add new quota for share replicas * Prevent share type deletion if linked to group types * Increase MANILA\_SERVICE\_VM\_FLAVOR\_DISK * Support query user message by timestamp * Revert "Remove provisioned calculation on non thin provision backends" * Hacking: Fix W605 * Hacking: Fix E731 * Hacking: Fix E741 * Hacking: Fix E305 * Hacking: Fix E117 * Hacking: Fix E226 * Hacking: Fix E241 * Hacking: Fix F601 * Hacking: Fix F841 * Hacking: Fix F632 * Update hacking for Python3 * Remove provisioned calculation on non thin provision backends * Cleanup Python 2.7 support * If only .pyc exist, the extension API will be disabled * [ci] Stop requiring neutron-tempest-plugin * Enforce policy checks for share export locations * Fix URLs in code and documentation * [NetApp] cDOT to set valid QoS during migration * Enable the use scheduler creating share from snapshot flag * [NetApp] Fix driver to honor standard extra specs * share\_networks: enable project\_only API only * Cleanup docs building * Update devstack repository URL * Fix database loading for some resources * Fix release note for LP 1853940's bugfix * Add opt doc and reno for noop interface driver * Add asynchronous error info into messages when share extend error * Use psycopg2-binary for test-requirements * Introduce noop interface driver * Refactor route clearing to linux interface * clear\_outdated\_routes: reduce neutron calls * generic: Refactor network functions to l3\_init * Use StrOpt for instance type * Improve share list speed using lazy='subquery' * Store ganesha logs and configs * [Unity]: Failed to delete cifs share if wrong access set * Fix over-quota exception of snapshot creation * Don't send heartbeats if driver not initializing correctly * Fix missing parameter in the log message * Fix a wrong comma in log message * Add manila-specs link to readme.rst * fix a typo * Fix error that failed to get image for booting server * Make extra\_specs value as case-insensitive * VNX/Powermax: Make it work under python3 * [ussuri][goal] Drop python2.7 support * PowerMax and VNX Manila - Read only policy is not working correctly * [NetApp] Fix share replica failing for 'transfer in progress' error * Document max Train-release API version * [Unity] Add release note and tests for IPv6 fix * Fix invalid assert statement * Fix share network update erroneously returns success * [Unity] Sync Unity related Docs * Enable glusterfs-native ci * [NetApp] Allow extension/shrinking of NetApp replicated share * add document,source,bugs,blueprints links to readme * update readme links * Imported Translations from Zanata * Update master for stable/train 9.0.0 ----- * Fix pagination does not speed up queries bug * Fix timeout when compute server was soft-deleted * Fix [Unity] verification and convert mgmt ipv6 * Retrieve compatible share servers using subnet id * Fix error print format * Skip NFS/Samba install for CephFS * [train][goal] Define new manila-tempest-minimal-lvm-ipv6-only job * Add share network with multiple subnets * Add manila-status to man-pages list * [NetApp] Adds support for replication with DHSS=True * Pylint: use -j 0 arg * update share group test in db * Add update share-type API to Share Types * Remove backend spec from share type while creating replica * Remove support for \`\`data\_node\_access\_ip\`\` * [Unity] Driver supports the mode that does not create and destory share servers (DHSS=False) * Fix \_list\_view function for count * Change PDF file name * [Nexenta] Refactored NexentaStor5 NFS driver * Add PDF documentation build * [Infortrend] Add Infortrend Manila Doc * Fix subsections for container driver * Enable replication tests (DHSS=True) on Dummy driver * Add extend/shrink feature for glusterfs directory layout * Validate API sample JSON files * Correct json format in api-ref * [CI] Enable glusterfs-nfs ci * Fix incorrect 'cephfnfs1' to 'cephfsnfs1' * Add missing space * Add Infortrend Manila Driver * Add manila-ui config instructions * Remove support for "lvm\_share\_export\_ip" * [CI] Convert rally jobs to zuulv3 native * Fix usage of deprecated devstack function * Make manila-tempest-plugin installation optional * [api-ref] Correct share metadata API ref * Conditionally restore default route in setup\_ipv6 * Run tempest jobs under python3 * add IPv6 support for CephFS/NFS back end * [api-ref] Use relative links and fix grammar * Update api-ref location * Manila PowerMax - rebrand from VMAX to PowerMax * Add Python 3 Train unit tests * Remove the redunant table from windows' editor * Unmount NetApp active share after replica promote * Bump the openstackdocstheme extension to 1.20 * Check NetApp SnapRestore license for pools * Fix an invalid assert state * Manila share driver for Inspur InStorage series * [CI] Add bindep.txt * Adding documentation for User Messages in Manila Documentation * Fix typo in Manila docs in manila.rst file * [CI] Run scenario tests in the cephfs-nfs job * Add admin ref for manage/unmanage servers DHSS=True * Blacklist python-cinderclient 4.0.0 * Manila VMAX docs - notification of removal of tags * Update sphinx dependency * [NetApp] Fix race condition issues on vserver deletion * [CI] Bump timeout for the migrations test case * NeutronBindNetworkPlugin: fix multi segment mtu * [api-ref] Update JSON samples for scheduler-stats API * Fix error print format * [Unity] Update doc for revert to snap support * OpenDev Migration Patch * Dropping the py35 testing * The parameters of 'list shares' are optional * [api-ref] Delete unused parameters * [api-ref] De-duplicate name and description parameters * [api-ref] De-duplicate date and time parameters * [api-ref] Replace "tenant" terminology with "project" * Fix misuse of assertFalse * [grenade] Switch base version * [tests] Fix PYTHON3\_VERSION * Manila VMAX docs - clarify backend configurations * [doc][api-ref] Fix annotation and missing parameters * Add api-ref for manage/unmanage with DHSS=True * [doc][api-ref] Clarify manage/unmanage APIs * Replace openstack.org git:// URLs with https:// * [doc][api-ref] snapshot user\_id and project\_id fields * Update master for stable/stein 8.0.0 ----- * Fix server delete attempt along with share net deletion * INFINIDAT: suppress 'no-member' pylint errors * Dummy driver: Don't fail unmanage on malformed share servers * Document Windows SMB driver * Only allow IP access type for CephFS NFS * Drop run\_tests.sh and tools/colorizer.py * Check all\_tenants value in share\_networks api * NetApp cDOT assume disabled compression on empty result * Check all\_tenants value in security\_service api * Fix parameters passed to exception * Destroy type quotas when a share type is deleted * Replacing the HTTP protocol with HTTPS * Fix driver filter to not check share\_backend\_name * Fix logging in wsgi module * Use legacy base to run CI/CD on Bionic * Manila VMAX docs - differences between quotas * Deploy manila with uwsgi on devstack * Fix API version inferred w/ un-versioned URLs * Add missing ws seperator between words * Manila VMAX docs - improve pre-configurations on VMAX section * Bump timeout on sqlalchemy migration test * Bump pylint job timeout * Manila VMAX docs - clarify snapshot support * Fix hyperlink reference to security section * Manila VMAX docs - clarify driver\_handles\_share\_servers * Fix version selector when for proxy-style URLs * VMAX manila doc - SSL Support * TrivialFix: Remove trailing whitespace in tox.ini * [pylint] Fix Manage-Unmanage with DHSS=True pylint issues * [Pylint] Bump pylint version to latest * [pylint] Use filenames in coding-checks * [pylint] Run pylint separately for code and tests * [NetApp] Add manage/unmanage of share servers * Add manage/unmanage of shares in DHSS=True * Fix missing size value in snapshot instance * Add manage/unmanage implementation to Container Driver * Refactor Container Driver * Move grenade job to bionic and run with python 3 * Update docs landing page to follow guideline * [pylint] Fix/ignore pylint errors in test modules * Fix error message when updating quota values * [pylint] Fix/ignore pylint errors in non-test modules * Extend remove\_version\_from\_href support * [NetApp] Fix race condition issue in NetApp driver * Fix tls-proxy issues with the devstack plugin * [pylint] Remove lint tox environment * Include .inc files in doc8 linting * Suppress pylint warnings from dell\_emc drivers * Fix sshpool.remove * Fix typo in test name * Add policy to create/update public shares * [ZFSOnLinux] Log ZFS options as they are retrieved * Return request-id to APIs that don't respond with a body * Fix service image boot issues * Add api ref for access rule metadata feature * [Unity] Shrink share in Unity driver * Allow configuring availability\_zones in share types * Bump timeout on dsvm jobs * Add tripleo scenario004 job to experimental queu * Match job names in playbooks to their names * Address E0102 pylint errors * [CI] Drop redundant if condition in the LVM job playbook * NetApp ONTAP: allow multiple DNS IPs * Run cephfs jobs under py3 * Fix pylint errors for ganesha manager * Set mode for CephFS volumes and snapshots * Deprecated config option [DEFAUL]memcached\_servers * Deprecate [DEFAULT]/share\_usage\_size\_audit\_period * Fix spurious pylint import errors for ddt and mock * Configure per backend availability zones in devstack * Allow configuration of a back end specific availability zone * [Trivial fix] add missing ws seperator between words * Drop [DEFAULT]root\_helper config option * [Unity] Revert to snapshot support * Convert dummy job to py3 * Separate APIs for share & replica export locations * Set paramiko logging to DEBUG level * Change ssh\_utils parameter to correctly send keepalive packets * devstack: Do a vgscan before checking if the VG is there * QNAP: Fix inconsistent cases while create/manage from snapshot * Fix the misspelling of "except" * Publish sample config file in the genconfig job * Improve service instance module debug logging * Move/Drop useless SQL related config options * Drop param2id() from cmd/manage.py * Drop trycmd() from manila/utils.py * QNAP: driver should not manage snapshot which does not exist * Add Ubuntu Bionic CephFS jobs * Drop is\_eventlet\_bug105() from manila/utils.py * QNAP: Support QES FW on TDS series NAS * Adjust ssh timeouts * Add devstack instructions and local.conf samples * [doc] Fix api sections in the contributor doc * Set ram for manila service image to 256 * [Manila Unity/VNX] add 'snapshot support' related Doc for Unity/VNX driver * NetApp cDOT store port IDs and addresses at share server backend details * Deprecate old keystone session config opts * speed up GET scheduler-stats/pools/detail * Fix image\_name retrieval in custom-image jobs * Only run the needed services for CephFS jobs * Use the canonical URL for Manila repositories * fix http link to https link * NetApp ONTAP: cifs add AD security service server as preferred DC * Change openstack-dev to openstack-discuss * Fix ganesha for 0.0.0.0/0 access * Add missing ws separator between words * VMAX manila doc - support for IPv6 * [api-ref] Added share servers show and corrected path to details * [CI][LVM] Run the LVM job on Bionic Beaver * [LVM][IPv6] Quagga changes to support Bionic Beaver * Use OS CLI instead of the neutronclient * Remove i18n.enable\_lazy() translation * Delete the duplicate words in cephfs\_driver.rst * The URL of SSL is missing * [DevRef] Add code review guideline * [Trivial Fix] Correct spelling error of "throughput" * [CI] Switch Xenial tempest jobs to Bionic Beaver * VMAX manila - deprecate old tags correctly * inspur: transfer 'rw' to 'rwx' when Shared File Systems protocol is cifs * NeutronBindNetworkPlugin: fix multi segment neutron data save * NetApp ONTAP: Fix use of multiple subnets with DHSS=True * VMAX manila doc - use of correct VMAX tags * Add manila-status upgrade check command framework * [LVM] Run filesystem check before assigning UUID * Change python3.5 job to python3.7 job on Stein+ * Increment versioning with pbr instruction * Make coverage non-voting and fix use of rpc\_backend * Simplify running pylint * Don't quote {posargs} in tox.ini * remove glusterfs-nfs job from check queue * change tox envlist from 3.5 to 3 * Remove run\_tests.sh * [grenade] Switch base version * [Container driver] Fix volume group data collection * [ZFSOnLinux] Allow devstack bootstrap in Ubuntu > 16.04 * 3PAR: Update Storage Driver docs * Remove install-guide-jobs * Use templates for cover and lower-constraints * Spelling Errors * Add version maximum annotation to API versions doc * Add command to update share instance hosts * add python 3.6 unit test job * switch documentation job to new PTI * import zuul job settings from project-config * NetApp ONTAP fix test allocate container with share\_instance * Remove logging overrides from plugin.sh * adjust response code in 'service.inc' * Adds export path option to Quobyte driver * Fix manila-ui link in the contributor doc * Fix ShareGroup sqlalchemy model ShareGroupTypes relation * [ZFSOnLinux] Retry unmounting old datasets during manage * Update reno for stable/rocky * NetApp ONTAP: change cifs server valid dns hostname * NetApp cDOT driver switch volume efficiency 7.0.0 ----- * replace 'data=' with 'message=' * NetApp cDOT driver qos policy same name * Test share type per test suite changes * INFINIDAT: unit tests - remove fake exception body * Fix grenade job * Fix mutable config in manila-scheduler * Fix ZFSOnLinux doc about manage ops * INFINIDAT: add host.created\_by metadata key * check all\_tenants value in share api * NetApp cDOT: use security service ou 7.0.0.0b3 --------- * Api-ref: Add min\_version in the API parameters * Retrieve is\_default value to fix empty display in CLI * [Docs] Don't include unittest documentation * Support metadata for access rule resource * QNAP: Add support for QES 2.1.0 * [CI] Don't set test config for API microversions if master * Api-ref: Add missing parameter in the version api * Allow setting test API microversions in gate tests * Api-ref: change fix \`\`extra-spec-key\`\` key in path * Docs: glance image-create returns an error issue * [NetApp driver] Control snapshot folder visibility * Fix results capturing for the dummy driver * Fix ensure\_shares bugs * [NetApp driver] NVE License not present fix * Change depreciated to deprecated * Fix bare exceptions in ganesha manager * INFINIDAT: change create\_child to create\_snapshot * Manila share driver for Inspur AS13000 series * Add share instance index on share\_id * [Manila Unity/VNX] admin doc failed to render * DB Migration: fix downgrade in 579c267fbb4d * Cannot remove user rule for NFS share * Fix mutable default argument in Quobyte jsonrpc * API: Add \`\`all\_tenants\`\` parameter * Fix doc warnings * [API] Doc snapshot and share net deletion preconditions * Address trivial TODOs * NetApp cDOT driver skip vserver route with no gateway * Remove confusing DB deprecation messages * add release notes to README.rst * rectify 'a export ID' to 'an export ID' * rectify 'a extra specs' to 'an extra specs' * rectify 'a exact match' to 'an exact match' * Document the preconditions for deleting a share * Use volume\_uuid in \_resize\_share of Quobyte Driver * Limit formatting routes when adding resources * Allow api\_version\_request.matches to accept a string or None * Update link address * Generic driver - Limiting SSH access from tenant network * [Trivialfix] Remove the useless parameter 'ext\_mgr' * Delete unused test check * [Doc] Add 'gateway' and 'mtu' in share network api-ref * QNAP: driver changes share size when manage share * Trivial: Update pypi url to new url * Config for cephfs volume path prefix * Switch to oslo\_messaging.ConfFixture.transport\_url * Use class name in invocation of super * Fix use of pbr version release 7.0.0.0b2 --------- * Default pylint to run using python3 * fix tox python3 overrides * [Grenade] Switch base to stable/queens * Set initial quota in Quobyte and correct resizing * Trivial:Update pypi url to new url * Fix share-service VM restart problem * Fix test plugin issues in dsvm-lvm-centos job * Fix manila-tempest-\*-centos-7 jobs * VMAX driver - Implement IPv6 support for Dell EMC VMAX driver * Fix post-execution for tempest tests * Fix access control for single host addresses * Switch from ostestr to stestr * Update "auth\_url" in install docs * NetApp ONTAP: Fix delete-share for vsadmin users * Fix title overline too short when generate docs * Fix bug for share type filter search * Update auth\_url value in install docs * Fix doc build warnings * Add ou to security service * [Manila Unity] Set unity\_server\_meta\_pool option as required * Use 'Default' as the value of domain name in install guide * Remove deprecated DEFAULT options * uncap eventlet * Update auth\_uri option to www\_authenticate\_uri * Fix allow the use of blank in user group name to access the share 7.0.0.0b1 --------- * move securiy service error explanation from comment * Run pep8/fast8 with python3 * Circumvent bug #1747721 to prevent CI failures * Remove option standalone\_network\_plugin\_ip\_version * Updated from global requirements * Support filter search for share type API * Fix typos in help text of Generic driver and ZFSSA config opts * Remove the deprecated "giturl" option * Disable tempest in rally jobs * Modify grammatical errors * Use rest\_status\_code for api-ref response codes * Updated from global requirements * add lower-constraints job * Update the new PTI for document build * Add manila-tempest-plugin as a requirement in rally job definitions * use http code constant instead of int * Adding driver to mysql connection URL * Log config options with oslo.config * Fix tap device disappear after node restart * Updated from global requirements * Update doc name and path for dell emc vnx and unity driver * Fetch and install manila-tempest-plugin system-wide * INFINIDAT: fix release notes * Updated from global requirements * Change a parameter key for CIFS mounting command * Updated NetApp driver features support mapping * INFINIDAT: set REST API client parameters * Add docs for quota\_class\_set API * Fix the incorrect reference links * Rename Zuul jobs * Remove the nonexistent install-guide directory * Remove use of unsupported TEMPEST\_SERVICES variable * Fix manila logging rabbitmq password in debug mode * Updated from global requirements * Replace Chinese quotes to English quotes * Fix db migration for mariadb >= 10.2.8 * Move openstackdocstheme to extensions in api-ref * Update documentation links * Fix typos * Update reno for stable/queens * Update docs since manila\_tempest\_tests are installed system-wide 6.0.0.0rc1 ---------- * Revert Id905d47600bda9923cebae617749c8286552ec94 * Fix LVM driver not handling IPv6 in recovery mode * Fix UnicodeDecodeError when decode API input * Fix Host-assisted Share Migration with IPv4+IPv6 * Add manila.data.helper options to config sample * INFINIDAT: load-balance shares inside network space * INFINIDAT: support deleting datasets with snapshots * Replace chinese double quotes to English double quotes * Remove the unused variable * Fix boolean types in db migration tests * drivers/cephfs: log an error if RO access is used and it's unavailable * Fix a trivial bug of Dell EMC Manila IPv6 implementation * Handle TZ change in iso8601 >=0.1.12 6.0.0.0b3 --------- * Use native Zuul v3 tox job * fix misspelling of 'password' * Enable IPv6 scenario tests in Upstream CI * Update manila plugin to support IPv6 * NetApp cDOT: Add NVE support in Manila * Update unreachable link * Replace curly quotes with straight quotes * Updated from global requirements * Update contributor/tempest\_tests.rst * Implement IPv6 support for Manila Dell EMC Unity driver * Disable security group rule when create port * Modify outdated links * Updated from global requirements * Add ipv6 for share network admin doc * Follow the new PTI for document build * Updated from global requirements * DocImpact: Add MapR-FS native driver * Use stestr for coverage * Fix NFS/CIFS share creation failure issue * Implement IPv6 support for Dell EMC VNX driver * Fix version details API does not return 200 OK * QNAP: Add support for QES 2.0.0 * Remove ordering attempts of 'unorderable types' * Fix volume attach error in generic driver * Always disable root-squash * Add support for enhanced features to the QNAP Manila driver * Fix error message in the manage API * DocImpact: Add quotas per share type * Fix running docs job failure * Raise error when image status is not active * ganesha: read and store non-ASCII data in exports * Api-ref: add show details for share type * Replace invalid link in manila doc * Fix incorrect api ref parameters * [Doc] Correct a known restriction in cephfs\_driver * QNAP Manila driver: Access rule setting is override by the later rule setting * Fix install docs reference error * Fix default and detailed share type result not correct * Remove in-tree tempest plugin * Updated from global requirements * Add policy documentation and sample file [10/10] * [policy in code] Add support for AZ, scheduler and message resource [9/10] * [policy in code] Add support for share and type extra resource [8/10] * [policy in code] Add support for replicas, networks and security services [7/10] * [policy in code] Add support for group resource [6/10] * Huawei driver supports snapshot revert * Updated from global requirements * Fix getting share networks and security services error * Updated from global requirements * Change ensure share to make startup faster * [policy in code] Add support for service and quota resource [5/10] * Remove unused configuration options * [policy in code] Add support for snapshot resource [4/10] * Add count info in /shares and /shares/detail response * Extend .gitignore for linux swap files range * [policy in code] Add support for share resource [3/10] * [policy in code] Add support for share type resource [2/10] * Add count info in /shares and /shares/detail API doc * Updated from global requirements * Remove usage of deprecated config 'resources\_prefix' * ganesha: store exports and export counter in RADOS * INFINIDAT add Manila driver 6.0.0.0b2 --------- * Updated from global requirements * Simplify the way drivers report support for ipv6 * QNAP: Add support for QES 1.1.4 * Update docs to fix broken links * Add utils methods to write files * Fix drivers\_private\_data update on deleted entries * Use v3 cinder client for share volume * Updated from global requirements * Added Handling Newer Quobyte API Error Codes * Remove 'branches:' lines from .zuul.yaml * Install centos-release-openstack-pike * Add 'description' in share type API Doc * Add 'description' in share type APIs * [Api-ref] update parameters for share types api * fix keystone auth failed since project\_domain\_id and user\_domain\_id * [Doc]Update cephfs\_auth\_id for cephfsnfs Configuration * Fix quota usages update deleting same share from several API endpoints * [Doc] Use share group instead of consistency group in driver\_requirements * Fix shared-file-systems-share-types URL * Utilize requests lib for Huawei storage connection * Remove setting of version/release from releasenotes * Add ssl support for manila API access * Remove unused functions from api/extensions.py * Api ref contains incorrect parameters * Updated from global requirements * [policy in code] Add support for share instance export location resource * Remove hdfs job from check queue * Updated from global requirements * Advertise IPv6 support in the NetApp driver * Allow IPv6 gateways for the default route * Allow ZAPI over IPv6 * Remove glusterfs-native job from check queue * Updated from global requirements 6.0.0.0b1 --------- * Add API document for share group [3/3] * Add API document for share group [2/3] * The default cephfs\_enable\_snapshots set to False * Add admin documentation for following keys of quotas: -'share\_groups' -'share\_group\_snapshots' * Add API document for share group [1/3] * Purge doc of references to nova net * Remove deprecated ganesha\_nfs\_export\_options * Fix missing neutron net plugin options * Zuul: add file extension to playbook path * Fix duplicate standalone\_network\_plugin\_ip\_version * Fix issue with different decimal separators * Use sslutils from oslo\_service * Impove coverage job accuracy * NetApp ONTAP: Fix share size when creating from snapshot * [Doc] Fix parameters in share network api-ref * [Doc] Fix wrong links in docs * [doc] Fix install guide doc * Don't attempt to escalate manila-manage privileges * CentOS share node install docs * Migrating legacy jobs * doc: move stuff from contributor to admin * Delete limited\_by\_marker from api/common.py * Rename to index.rst * Restore .testr.conf * Fix 'project\_share\_type\_quotas' DB table unique constraint * Updated from global requirements * Use generic user for both zuul v2 and v3 * [Doc] Add share group in doc * Updated from global requirements * Fixed creation neutron api mapping for security groups * cleanup test-requirements * Add default configuration files to data\_files * NetApp ONTAP: Add support for filtering API tracing * Updated from global requirements * Switch base to latest in link address * Enable mutable config in Manila * ganesha: cleanup of tmp config files * [Doc] Delete consistency group in doc * tempest: remove call to set\_network\_resources() * Removes use of timeutils.set\_time\_override * Updated from global requirements * Implementation of Manila driver for Veritas Access * tests: replace .testr.conf with .stestr.conf * [install-guide] remove install-guide doc * [doc] Add API document for snapshot instances * Remove auto generated files and unnecessary .gitignore file * Allows the use of dollar sign in usernames * [Api-ref] Delete the duplicate tenant arguments in parameters.yaml * Fix html\_last\_updated\_fmt in conf.py * Fix test\_rpc\_consumer\_isolation for oslo.messaging 5.31.0 * Fix wrong links in manila * Delete the 'share\_extension:types\_extra\_specs' policy * Add API document for share replica * [Grenade] Switch base to stable/pike * NetApp: Fix usage of iso8601\_from\_timestamp * Use newer location for iso8601 UTC * Remove name and description from the search\_options list * Fix a typo in share\_migration.rst * Fix a typo: replace microverison with microversion * Remove "os\_region\_name" config option * [doc] Move Experimental APIs description to a common place * [Api-ref] Remove unused parameter extra\_specs\_2 in parameters.yaml * Updated from global requirements * Remove vestigate HUDSON\_PUBLISH\_DOCS reference * Add API document for share type quota * doc migration: update the doc link address * Update the documentation link for doc migration * Fix incorrect literal\_block error when build docs * Updated from global requirements * doc migration: configuration reference * Fix man page build * Remove unused variables and broken links * doc migration: cli reference * doc migration: user-guide * doc migration: install guide * doc migration: admin guide * doc migration: new directory layout * doc migration: openstackdocstheme completion * NetApp ONTAP: Fix revert-to-snapshot * Updated from global requirements * [Doc] Fix access rule description in api-ref * Update reno for stable/pike * [Doc] Add more description to user messages api-ref * [Api-ref] remove "is\_public" in snapshot updated description * TrivialFix: Add code block and format JSON data * Fix the duplicate hacking check M312 and H203 5.0.0 ----- * Re-enable broken CG code in NetApp driver * Fix wrong links * [Api-ref] Add supported protocol "MAPRFS" in doc * Add API document for share group quotas * [Doc] Remove unused 'provider\_location' parameter * [Doc] Fix API document for Consistency group * Change the way to create image service * [Tempest] Fix tests for pre-existing share network * NetApp cDOT: Fix security style for CIFS shares * Remove duplicate variables * Remove tempest pin * Imported Translations from Zanata * Update links in README * Fix NFSHelper 0-length netmask bug * Enable some off-by-default checks * Imported Translations from Zanata * Fix multiple issues with revert to snapshot in LVM * Add exception for no default share type configured * Fix cannot deny ipv6 access rules * Removed unnecessary setUp() calls in tests * [Trivialfix]Fix typos * Imported Translations from Zanata * Use tempest-plugin service client registration * Imported Translations from Zanata * Add ipaddress in manila requirements * Updated from global requirements * Enable IPv6 in manila(documentation) * Updated from global requirements 5.0.0.0b3 --------- * Enable IPv6 in manila(network plugins and drivers) * Add share groups and share group snapshots quotas * Add share usage size tracking in doc * Add share usage size tracking * Update location of dynamic creds in tempest tests * Provide filter name in user messages * Fix the exact filter can be filter by inexact value * NetApp cDOT: Add support for QoS/throughput ceilings * Updated from global requirements * NetApp: Define 'preferred' to False instead of none * Updated from global requirements * Add quotas per share type * Fix deprecated options version * Replace test.attr with decorators.attr * Allow 2 or more export IPs for LVM driver * Updated from global requirements * Disable notifications * Add user messages periodic cleanup task * Added like filter in api-ref * Enable IPv6 in manila(allow access) * NetApp cDOT: Fix share specs on migration * Updated from global requirements * Updated from global requirements * Update the documentation link for doc migration * Fix grammatical mistake, Changed character from "a" to "an" * Update URL home-page in documents according to document migration * Extend usage of user messages * User Messages * Add prefix 'test' to test name in test\_shares * Fix inappropriate parameters * VMAX VNX Manila - Refactor VMAX and VNX to use common code * Allow docs build without git * VNX: bump the version for Pike * Add like filter * TrivialFix: replace set(sorted(x)) with sorted(set(x)) * Remove --omit argument in run\_tests.sh * Unity: unexpected data in share from snapshot * VNX: share server cannot be deleted * Add export-location filter in share and share instance list API * NetApp cDOT: Add gateway information to create static routes * Add create/delete/extend/shrink share notifications * Updated from global requirements * NetApp cDOT: Fix share server deletion * Updated from global requirements * Replace the usage of 'admin\_manager' with 'os\_admin' * Add support for Guru Meditation Reports for manila * Replace the usage of 'manager' with 'os\_primary' * Use parenthesis instead of backslashes in tempest folder * Retry backend initialization * Allow endless retry loops in the utility function * Updated from global requirements * cephfs/driver: add nfs protocol support * Use parenthesis instead of backslashes in tests folder * Use parenthesis instead of backslashes in scheduler folder * Updated from global requirements * Wrong substitution of replica ID in log message * Fix ShareSnapshotInstance DB table * Use parenthesis instead of backslashes in db folder * Use parenthesis instead of backslashes in share folder * Use parenthesis instead of backslashes in API folder * Replace assertEqual([], items) with assertEmpty(items) * Change example value in docs for CephFS snapshots * [Docs] Correct glusterfs references * Updated from global requirements * Updated from global requirements 5.0.0.0b2 --------- * Imported Translations from Zanata * GPFS: Changing default value of NFS server type * Use get\_rpc\_transport instead of get\_transport * CI: Update tempest commit * [Share Groups] Squash SGS member and SS instances DB tables * Updated from global requirements * [Share Groups] Add two new fields to SG API object * [Share Groups] Add availability zone support * [Share Groups] Fix creation of share group types with wrong specs values * [Generic driver] Fix incompatibility with novaclient * ganesha: dynamically update access of share * [Share groups] Add scheduler filter ConsistentSnapshotFilter * Remove pbr warnerrors in favor of sphinx check * Clean releasenotes and install-guide build dir * Fix pep8 M325 error with python 3.5 * Use get\_notification\_transport for notifications * Updated from global requirements * Implement update\_access in Isilon Driver * Replace oslo\_utils.timeutils.isotime * Use ShareInstance model to access share properties * CI: Update tempest commit * Fix share instance list API display error * Remove unused function in test\_share\_snapshot\_instances file * Updated from global requirements * Refactor share instances tempest test * GPFS Path: Fix bugs related to initialization of GPFS Driver * Updated from global requirements * Add a releasenote for tooz heartbeat * coordination: use tooz builtin heartbeat feature * Updated from global requirements * Remove unused self.context * Correct re-raising of exception in VNX driver * Updated from global requirements * Fix typos in document * Fix unit test failures in gate * Replaced exc.message with str(exc) * Change to share access list API * Fix update share instance pool fail * Change share to share snapshot in snapshot list API annotation * devstack: clone Manila client only if marked to * api-ref:Update ref link * Set access\_policy for messaging's dispatcher * Fix api-ref doc generation for Python3 * Optimize the link address * Add periodic task to clean up expired reservation * Refactor and rename CephFSNativeDriver * Remove usage of parameter enforce\_type * Capitalize the first letter in comment 5.0.0.0b1 --------- * Add comment explaining ignore D001 for doc8 * Updated from global requirements * Add possibility to run 'manila-api' with wsgi web servers * Hacking: do not translate log messages * Updated from global requirements * Remove log translations in others 5/5 * Replace six.iteritems() with .items() * Add sem-ver flag so pbr generates correct version * Fix important:: directive display in install guide * [CI] Add support for CI jobs with custom images * Updated from global requirements * Remove service\_instance\_network\_helper\_type option * Update to current tempest tag * Add read-only tests for cephx access rules * Remove log translations in share and share\_group 4/5 * Remove log translations in scheduler 3/5 * [Rally] fix jobs * Remove unnecessary setUp function in testcase * Remove log translations in cmd,common,data,db and network 2/5 * Updated from global requirements * Remove log translations in api 1/5 * Updated from global requirements * Remove deprecated manila-all command * setup \_IntegratedTestBase without verbose flag * Handle SSL from VNX driver * [Dell EMC Unity] Create with user capacity * Move create\_manila\_accounts to post-config * Imported Translations from Zanata * change user access name limit from 32 to 255 characters * Fix some reST field lists in docstrings * Fix docs failures caused by latest eventlet * Use HostAddressOpt for opts that accept IP and hostnames * set basepython for pylint tox env * Updated from global requirements * Remove old oslo.messaging transport aliases * Revert "Handle ssl for VNX manila driver" * remove hacking rule that enforces log translation * docs: fix build failure on html\_last\_updated\_fmt * devstack: skip nfs kernel install if nfs-ganesha * Updated from global requirements * Handle ssl for VNX manila driver * Update share replicas after promotion in proper order * Enable share groups back * Updated from global requirements * Switch to use stable data\_utils * Deprecate 'ganesha\_nfs\_export\_options' * Local copy of scenario test base class * Rename wrapped methods in share manager * CephFS driver: change CG variables to SG variables * [api-ref]: Add missing share statuses * Fix python3 pep8 errors * Update share server provisioning for share groups * Send resize parameters in rpc as list in the Quobyte driver * [Tempest] Fix concurrency in test with listing share servers * The python version is added Python 3 and 3.5 version was missing * Start NFS and SMB services on fedora platforms * Remove unused "share\_id" parameter * Remove unused assignments in share manager * Updated from global requirements * Unblock gate failure on docs build * Fix 3 CI breakages * [Grenade] Fix devstack configuration in CI hook * Change tempest tag to 15.0.0 * Fix gate breakage caused by localrc usage * Fix host-assisted migration stale source share * Fix syntax in devstack plugin * Align policy.json with code * Add Apache License Content in index.rst * Use https instead of http for git.openstack.org * Remove unused pylintrc * Address family neutrality for container driver * container driver: log network id as network id * Remove redundant revert-to-snapshot test option * Fix some typos * Update tempest pin to 15.0.0 * Only return share host for admins using shares API * Fix migration\_success before completing * Update HNAS driver version history * doc: verify all rst files * Fix to use correct config options for network\_for\_ssh * Adds manila-manage 'db purge' command to man page * Enable devstack deploy of container driver on Fedora * Add Share Migration devref docs * Improve HNAS driver coverage * Updated from global requirements * [Tempest] Refactor api/tests/admin/test\_share\_servers module * 3PAR: Replace ConsistencyGroup * Updated from global requirements * Updated from global requirements * Update tempest pin to latest commit ref * [Tempest] Split up share migration tests to separate classes * Mock time.sleep in tests that sleep * HNAS: Fix concurrency creating/deleting snapshots * [Grenade] Add test with creation of share snapshot * Fix Windows SMB helper * [Grenade] Switch base to stable/ocata * Use more specific asserts in tests * Optimize opposite driver modes migration test * HNAS: ensure snapshot before trying to revert * Update reno for stable/ocata 4.0.0.0rc1 ---------- * Add 'consistent\_snapshot\_support' attr to 'share\_groups' DB model * Pass access rules to driver on snapshot revert * Fix default approach for share group snapshot creation * Remove a py34 environment from tox * Disable share groups APIs by default * Fix devstack manila nfs install for fedora * Improve test coverage for share migration * Replaces yaml.load() with yaml.safe\_load() * Prepare for using standard python tests * Fix nonsense variable name * Fix wrong access-rule negative test * Fix migration of mountable snapshots * Fix HNAS driver inconsistent exceptions * Fix HNAS driver always handling mountable snapshots * HNAS: Fix syntax to make shares read-only in snapshot create * Blocked migration of shares within share groups * HNAS: Fix managed snapshots not being mounted * Fix multiple export locations during migration * Fix snapshot export locations incorrectly handled * HNAS: avoid mismatch access level for managed shares * Fix error'ed access rules being sent to driver * Fix setup of DHSS=False mode for generic driver * HNAS: Fix concurrency error when managing snapshots * Enable host-assisted migration in ZFSOnLinux CI * Decrease share migration periodic task interval * Add access-rules tests to improve the coverage * Remove unit test that is not relevant anymore * Fix creation of share group types using share type names * Mark 'v1' API deprecated in the versions response * Fix Generic driver DHSS=False setup * Fix string formatting in access-deny API error message * Make LVM export IP configurable * Updated from global requirements 4.0.0.0b3 --------- * Updated from global requirements * Revert "[Devstack] Workaround osclient breakage" * Add mountable snapshots support to HNAS driver * Improve share migration scenario test validation * Mountable snapshots scenario tests * Fix MapRFS test\_\_execute to not impact others * Add mountable snapshots support * Fix devstack plugin to not depend on private network * VMAX manila plugin - Support for VMAX in Manila * NetApp: Support share revert to snapshot * [Tempest] Add functional tests for share groups feature * Manila Share Groups * Rename consistency group modules to share groups * [api-ref] Fix missing parameters in api-ref * Removes unnecessary utf-8 coding * NetApp cDOT: Add Intra-Vserver migration support * Updated from global requirements * Add QNAP Manila Driver * Add cast\_rules\_to\_readonly to share instances * Don't call update\_access if there are no rules * Implement Revert-to-snapshot in HNAS Driver * Share Migration Ocata Improvements * Refactor Access Rules APIs * Tooz integration * Trivial fixes to snapshot revert patch * [api-ref] Refactor share network documentation * Fix \`\`exportfs -u\`\` usage in generic driver * Add manila-manage db purge command * [Unity driver] VLAN enhancement * Implement share revert to snapshot * Fix metadata's soft-delete error when deleting shares * Fix license and E265 errors in doc/source/conf.py * Updated from global requirements * tests: remove useless variables in db\_utils methods * Some share api test cleanup * Update .gitignore * Fix column name error in migration script * Fix error message in Share Networks API * Remove NovaNetworkPlugin * [TrivialFix] Add negative test in quota detail * Add MapR-FS native driver * Allow skipping manila tempest tests * Properly deprecate service\_instance\_network\_helper\_type * remove devref jenkins doc * Support python 3.5 in tox * Unity/VNX Driver: Rename driver options * Migration Data Check fixes * Remove trailing backtick * Updated from global requirements * Remove nova net support from service\_instance * [api-ref] Refactor share instance export locations API documentation * GPFS: Add update\_access() * Report create\_share\_from\_snapshot\_support * Allow share status reset to migration status * Add support for manage/unmanage in GPFS driver * [api-ref] Refactor share actions API documentation * [api-ref] Refactor share export location API documentation * Add the ability to check the tenant quota in detail * Fix test variable injection in CI * [TrivialFix] optimize get filesystem id in huawei driver * [Devstack] Workaround osclient breakage * Updated from global requirements * GPFS KNFS: Fix deny access to succeed when possible * GPFS KNFS: Do not reuse ssh prefix in loop * Add create\_share\_from\_snapshot\_support extra spec * Trivial fix LOG.exception issues * [Grenade] Do not run tempest tests * Fix typo in rootwrap.conf * use six.StringIO for compatibility with io.StringIO in python3 * Trivial fix translate issues * NetApp: set proper broadcast domain for IPspace * Add Apache 2.0 license to source file * [Dell EMC Unity] Support create share smaller than 3 GB * Updated from global requirements * [TrivialFix] Move share type filter tempest to test\_scheduler\_stats.py * [devref] copy samples/local.conf correctly * GPFS CES: Fix bugs related to access rules not found * Add DriverFilter and GoodnessWeigher documentation * Setting up a development env with devstack instructions * Enable scenario tests for LVM and ZFSonLinux drivers * [Tempest] Add scenario test creating share from snapshot 4.0.0.0b2 --------- * Decouple Manila UI from Manila Devstack plugin * [Generic driver] Fix generation of admin export location * Fix undefined attribute in scenario test class * Fix Manila service image config for 3rd party CIs * Change network allocation of Unity driver to 1 * [LVM,Generic drivers] Fix relationships between parent and child shares * Replace six.iteritems() with .items() * Add "update\_access" interface support for VNX * Add share\_type filter support to pool\_list * [TrivialFix] Fix doc typo error * Updated from global requirements * [Tempest] Fix concurrency issue in scenario test * Add support for manage/unmanage snapshots in HNAS driver * [ZFSonLinux] Stop inheriting options creating share from snapshot * Updated from global requirements * [Devstack] Use openstack CLI instead of other clients * [Devstack] Fix DHSS=False setup for Generic driver * [Devstack] Run tempest update in proper time * Fix wrong data type in database migration * LOG marker mismatch in the code * [hacking] Ensure not to use LOG.warn * Fix devstack smb configuration outside ubuntu * Fix share writable in host-assisted migration * Remove unused function in db api * [api-ref] Refactor Manila scheduler stats API * TrivialFix: Remove Duplicate Keys * Show team and repo badges on README * Fix wrong instructions in the install guide * [Dummy driver] Add possibility to set delays for driver methods * [Devstack] Fix devstack plugin compatibility * Add Admin network support to HNAS driver * Fix extend operation of shrinked share in generic driver 4.0.0.0b1 --------- * [Tempest] Make share size configurable in scenario tests * [Tempest] Port remote\_client into Manila * hacking: Use uuidutils to generate UUID * devref/driver\_requirements: add cephfs protocol * Add Rally CI jobs with Manila scenarios * Fix spelling mistakes in cover.sh * Updated from global requirements * Check ceph backend connection on driver setup * Move EMC drivers to dell\_emc folder * NetApp cDOT controller utilization metrics * Replaces uuid.uuid4 with uuidutils.generate\_uuid() * Remove unused link * [install] Make the rabbitmq configuration simpler * Add testscenario to test-requirements * Fix share manage tempest test cleanup * Updated from global requirements * [Devstack] Create additional custom share types by default * Remove fake CG support from Generic share driver * Correct the order of parameters in assertEqual() * Use cors.set\_defaults instead of cfg.set\_defaults * Fix missing 'migration\_completing' task state * Replace 'assertEqual(None, ...)' with 'assertIsNone(...)' * Compare the encoded tag more accurately for huawei driver * Updated from global requirements * Add support of endpoint\_type and region\_name to clients manila uses * Updated from global requirements * [Tempest] Fix visibility of test\_quotas.py module * Fix a typo * Remove broken modindex link from devref * Clarify language in release notes * Updated from global requirements * Remove warnings for dropped context arguments * NetApp cDOT driver enhanced support logging * Add utility of boolean value parser * Fix concurrency issues in container driver * Updated from global requirements * Remove unused functions in utils * Update .coveragerc after the removal of openstack directory * [Grenade] Update devstack and pre\_test\_hook * Fix a typo in parameters.yaml * updated positional argument and output * Fix a typo in api\_version\_request.py * Updated from global requirements * NetApp cDOT driver should not report untenable pools * [api-ref] Refactor Manila snapshot API * [Container] Fix deletion of veths * Updated from global requirements * Enable release notes translation * Updated from global requirements * Avoid Forcing the Translation of Translatable Variables * Fix devstack for ubuntu-xenial * Stop adding ServiceAvailable group option * cephfs\_native: doc fixes * Remove tempest.test usage from manila tests * Fix typo in test\_gpfs.py * Use assert(Not)In/Greater(Equal)/LessEqual/IsNotNone * Updated from global requirements * Use method is\_ipv6\_enabled from oslo.utils * Files with no code must be left completely empty * TrivialFix: Remove default=None when set defaul value in Config * [TrivialFix] Correct file mode settings * [api-ref] Refactor Manila security service API * Remove redundant 'the' * Adjust doc about threading * Updated Hitachi NAS Platform Driver documentation * Updated from global requirements * Remove unused methods * Fix huawei driver username/password encoding bug * Use fnmatch from oslo.utils * Updated from global requirements * Fix check for nfsd presence * [api-ref] Refactor Manila availability-zones API * Fix huawei driver cannot delete qos while status is idle * Bring remote and local executors into accord * Add tempest tests for mtu and gateway fields * Make port\_binding\_extension mandatory if host\_id is specified * [api-ref] Refactor Manila quota set API * [api-ref] Remove temporary block in conf.py * Make nfs-kernel-server run on a clean host * Modify use of assertTrue(A in B) * Updated from global requirements * 3PAR driver fails to validate conf share server IPs * Manila install guide: Fix wrong instructions * delete python bytecode including pyo before every test run * Update installation tutorial and api-ref instructions * Update reno for stable/newton * [api-ref] Refactor limits and services API * [api-ref] Refactor manila extension API * [api-ref] Refactor consistency group API 3.0.0 ----- * Add cleanup to create from snap in Manila HNAS driver * [ZFSonLinux] Fix share migration using remote host * Put all imports from manila.i18n in one line * Fix access rules for managed shares in HSP driver * Improve Share Migration tempest tests * Fix allow/deny error message and race in migration * Fix for LV mounting issue in docker containers * Fix flaky Neutron port binding unit tests * Fix useless statements in unit tests * [docs] Update dev docs for ZFSonLinux share driver * [ZFSonLinux] Add test coverage for share migration * NetApp cDOT driver autosupport broken * Fix dedup/compression description in doc * huawei driver default create thin type share * HPE 3PAR: file share support of AD in devref * Updated from global requirements * glusterfs: handle new cli XML format * Add provisioned\_capacity\_gb estimation * Fix typo in response status code * standardize release note page ordering * Fix race condition updating routes * share-size not set to 1 with 'manage\_error' state * Config logABug feature for Manila api-ref * NetApp cDOT: Avoid cleaning up 'invalid' mirrors * [ZFSonLinux] Fix share migration support * Update to tempest 12.2.0 3.0.0.0b3 --------- * Add multi-segment support * Add binding\_profile option for backends * Nexenta: adding share drivers for NexentaStor * Updated from global requirements * Windows SMB: implement 'update\_access' method * Windows SMB: remove redundant operations * [Dummy driver] Add share migration support * [ZFSonLinux] Add share migration support * Add share type change to Share Migration * HPE 3PAR driver pool support * Share migration Newton improvements * Unity: Use job for NFS share creation * Correct reraising of exception * Windows SMB: avoid default read share access * Change assertTrue(isinstance()) by optimal assert * Fix Share Migration improper behavior for drivers * Fix Manila HNAS driver managing a share twice * Fix test bugs for replication CI * Implement replication support in huawei driver * Fix connectivity problem in Scenario job * Updated from global requirements * [CI FIX] Fix 'ip route' matching multiple subnets * Clean imports in code * Clarify grenade failure message * Updated from global requirements * Add documentation for EMC Unity Driver for Manila * Remove enable\_v1\_api and enable\_v2\_api config opts * 3PAR: Add update\_access support * add access\_key to share\_access\_map * Add missing filter function in HSP driver * Get ready for os-api-ref sphinx theme change * Fix fallback share migration with empty files * Rename and move HNAS driver * Updated from global requirements * Add neutron driver for binding * Fix sample config generation for cinder, nova and neutron opts * Add Hitachi HSP driver * manila\_tempest\_tests: fix exception messages * Container driver * Tox Upper Constraints - strip out reinstalls for remaining jobs * NetApp cDOT: Apply network MTU to VLAN ports * Fix typo in glusterfs driver comment * [dev-docs] Changed small case letters to capital * Add MTU information in DB and API * In-tree Install Guide * Updated from global requirements * cephfs\_native: enhance update\_access() * TrivialFix: Change LOG.warn to LOG.warning * Fix the broken UT of huawei driver for py34/35 * Add dedupe report in HNAS driver * cephfs\_native: add read-only share support * Updated from global requirements * Refactor GPFS driver for NFS ganesha support * NetApp cDOT driver configurable clone split * NetApp cDOT multi-SVM driver configurable NFS versions * Add support for CIFS shares in HNAS driver * Fix KeyError on err in unit test * Fix concurrent usage of update\_access method for share instances * NetApp cDOT vserver deletion fails if no lifs present * Fix ZFSonLinux driver prerequisites setup * Updated from global requirements * HPE3PAR make share from snapshot writable * Check for usage of same Cephx ID as manila service * Fix share migration test with snapshot support * [Tempest] Fix concurrency in "test\_show\_share\_server" test * [ZFSonLinux] Fix replicated snapshot deletion error * Fix race condition in tempest test * Replaces httplib with requests lib in Quobyte RPC layer * Add EMC Unity Driver for Manila * Add snapshot instances admin APIs * TrivialFix: Fix a wrong order bug in resource\_cleanup() * [ZFSonLinux] Add 'manage snapshot' feature support * Minor optimization and formatting corrections in Quobyte driver * Add retry in VNX driver when DB lock error happened * Remove "host" from driver private data * NetApp: Report hybrid aggregates in share stats * share/access: allow maintenance mode to be triggered * Migrate API reference into tree * Fix devref README and remove Makefile * Add dummy driver * Correct Quobyte driver capacity reporting * Updated from global requirements * Huawei: Support reporting disk type of pool * Documentation changes for thin/thick provisioning * Check 'thin\_provisioning' in extra specs * HPE3PAR: Fix filestore quota decrement * HPE3PAR: Handle exceptions on deleted shares * Fix pep8 job * Add reno notes about http\_proxy\_to\_wsgi middleware * Add DriverFilter and GoodnessWeigher to manila * Use http\_proxy\_to\_wsgi instead of ssl middleware * Use constraints for coverage job * Do not put real hostname and IP address to manila config sample * Add tox job for db revision creation * Add interface port configuration in EMC VNX driver 3.0.0.0b2 --------- * Huawei: Add share sectorsize config in Huawei driver * Huawei driver support access of all IPs * update min tox version to 2.0 * Updated from global requirements * [Tempest] Handle errored shares correctly using recreation logic * [Tempest] Create heavy scenario resources in parallel * Update tempest to newer commit version * Add share manage/unmanage of Oracle ZFSSA driver * Delete duplicated broken tempest test * Add lvm driver options to sample config * Updated from global requirements * [ZFSonLinux] Add 'manage share' feature support * Fix snapshot manage Tempest test * Manage / unmanage snapshot in NetApp cDOT drivers * Add gateway in network\_info and share network API * Fixed a spelling mistake of "seperate" to "separate" * Add share\_size config option * Config: no need to set default=None * Use upper-constraints in tox installs * Updated from global requirements * Update quota usages correctly in manage share operation * Change user\_id and project\_id to 255 length * Add user\_id and project\_id to snapshot APIs * [Tempest] Fix negative replication test * [Tempest] Remove noqa filters * Updated from global requirements * Cleanup unused DB APIs * glusterfs: Implement update\_access() method * ganesha: implement update\_access * Huawei: Add manage share snapshot in Huawei driver * Delete VLAN on delete\_vserver in Netapp cmode * Use is\_valid\_ipv4 and is\_valid\_ipv6 from oslo.utils * Updated from global requirements * Do not supply logging arguments as tuple * cephfs\_native: Fix client eviction * Pass context down to ViewBuilder method * Add more dir exceptions to pep8 tox job * [Tempest] Bump tempest version * [Tempest] Stop using deprecated Tempest opts * [Tempest] Add valuable tags to tests * [Tempest] HotFix for broken CI jobs * Updated from global requirements 3.0.0.0b1 --------- * Fix issue with testtool testrunner * HPE3PAR driver doesn't decrease fstore capacity * Updated from global requirements * Fix badly formatted release note * Use oslo IntOpt function instead of explicit check * Document instructions for documentation * Adding info to use venv of tox for reno * Polish hook decorator * Updated from global requirements * Updated from global requirements * Fix HDS HNAS errors caused by incorrect IDs * Huawei: Fix exception in update\_access not found * Hacking check for str in exception breaks in py34 * Add hacking rule for assertEqual(None, \*) * Squash E042 and E043 bashate warnings * Removed the invalid link from Manila Dev Guide * Use assertTrue rather than assertEqual(True, ...) * Replace assertEqual(None, \*) with assertIsNone in tests * Updated from global requirements * Remove retry logic from manage API * Fix tox errors and warnings in the devref * [Doc] Update quick start guide to Mitaka release * Updated from global requirements * HDS\_HNAS: Fix improper error message * HDS\_HNAS: Remove unused parameter * Fix context warning spam of scheduler and share logs * Updated from global requirements * Fix docs for REST API history and Scheduler * Fix Manila RequestContext.to\_dict() AttributeError * Add wraps function to decorator * Fix context decorator usage in DB API * Add hint how to configure fake\_driver in manila-share * Test: make enforce\_type=True in CONF.set\_override * Remove NetAppCmodeClient.delete\_network\_interface * Updated from global requirements * Add user\_id echo in manila show/create/manage API * Bump Tempest version * Remove deprecated manila RequestBodySizeLimiter * Fixed references for scheduler drivers in doc * Fix share server info in CGs created from CGs * Skip over quota tests if quota tests disabled * Delete Snapshot: status wrongly set when busy * Updated from global requirements * Fix HNAS error with unconfined filesystems * Developer Reference: Adopt the openstackdocstheme * Fix IPv6 standalone network plugin test * cephfs\_native: doc fixes * Added docs for commit message tags * Fix docstring for policy.enforce method * Updated from global requirements * Fix tempest.conf generation * [Trivial] replace logging with oslo.log * Add Grenade support to Manila * NetApp: DR look up config via host name * [Devstack] Set proper driver mode for ZFSonLinux driver * use thread safe fnmatch * Updated from global requirements * Make devstack functions support grenade * Fix microversion usage in share manage functional tests * Handle manage/unmanage for replicated shares * Fix HNAS driver exception messages * Updated from global requirements * Add doc for Share Replication * Fix Share status when driver migrates * Fix doc build if git is absent * Remove unused tenant\_id variable * [Fix CI] Bump Tempest version * Detect addition of executable files * Updated from global requirements * Add release notes usage and documentation * Deprecate manila-all command * update hacking checks for manila * Fix creation of Neutron network in Devstack * Fix manage tempest test validation * Update HPE 3PAR devref docs * NetApp cDOT driver should honor reserved percentage * Remove Devstack workaround for Neutron * Remove unused logging import and LOG global * cephfs\_native: Change backend snapshot dir's name * Remove openstack-common.conf * update dev env doc for Fedora releases * Fix force-delete on snapshot resource * Increase Cinder oversubscription ratio in CI * Use install\_package when preparing LVM driver installation * Fix Manage API synchronous call * Generic driver: ignore VolumeNotFound in deleting * Removing some redundant words * Add common capabilities matrix to devref * Add caution to test-requirements * Increase logging for driver initialization * Capitalize global var for clients * Fix typos * Update ZFSonLinux share driver docs * Update reno for stable/mitaka 2.0.0 ----- * Fix call of clients in post\_test\_hook.sh * Add tests to ensure snapshots across replicas * NetApp cDOT: Handle replicated snapshots * Data Replication: Ensure Snapshots across replicas * Fix update\_access concurrency issue * Fix manage API ignoring type extra specs * Make ZFSonLinux driver handle snapshots of replicated shares properly * Fix keystone v3 issues for all clients * Fix for incorrect LVMMixin exception message * NetApp cDOT: Fix status updates for replicas * NetApp cDOT: Raise ShareResourceNotFound in update\_access * Add hacking check to ensure not to use xrange() * Fix generic and LVM driver access rules for CIDRs * Fix report of ZFSonLinux driver capabilities * Fix the scheduler choose a disable share service * Fix typos * Fix error logged for wrong HPE 3par client * 3PAR remove file tree on delete when using nested shares * HDS-HNAS: Fix exception in update\_access not found * Revert "LXC/LXD driver" * Fix Hitachi HNAS driver version * service instance: also recognize instance name * Fix update of access rules in ZFSonLinux driver * Check share-network in 'share create' API * glusterfs volume layout: take care of deletion of DOA shares * Fix delete when share not found in update\_access * Remove default values for update\_access() * NetApp cDOT driver should not split clones * Fix handling of share server details after error * HDS-HNAS: fixed exception when export not found * Fix lock decorator usage for LVM and Generic drivers * Fix HNAS snapshot creation on deleted shares * Move iso8601 from requirements to test-requirements * Fix typos * glusterfs.common: GlusterManager.gluster\_call error report fix * glusterfs.GlusterNFSVolHelper: remove \_\_init\_\_ * Add tempest tests for Share Replication * register the config generator default hook with the right name * Windows driver: fix share access actions * Collapse common os\_region\_name option * Disallow scheduling multiple replicas on a given pool * update quota of origin user on share extend/shrink * Update quota of proper user on resource delete * Fix Share Migration access rule mapping * Fix unstable DB migration tests * Fix Share Migration KeyError on dict.pop * NetApp cDOT APIs may get too little data * HNAS: Enable no\_root\_squash option when allowing access to a share * Fix HNAS driver crash with unmounted filesystems * Fix compatibility with Tempest * Set proper image name for tempest * Remove nsenter dependency * Fix ZFSonLinux driver share replica SSHing * Fix ZFSonLinux access rules for CIDRs * Fix HNAS driver thin\_provisioning support * Fix pylxd hard dependencies * Squash consequent DB calls in create\_share\_instance * Fix slow unit test * Run ZfsOnLinux gate tests with SSH enabled * Fix status update for replicas * Set TCP keepalive options * Fix manila devstack plugin for keystone v3 usage * Add /usr/local/{sbin,bin} to rootwrap exec\_dirs * Updated from global requirements * Use official location for service image * Allow devstack plugin to work without Cinder * Download service image only when needed * glusterManager instantiation regexp validation 2.0.0.0b3 --------- * Moved CORS middleware configuration into oslo-config-generator * Move Share Migration code to Data Service * Remove unintended exposure of private attribute * Add share driver for Tegile IntelliFlash Arrays * Update tempest commit and switch to tempest.lib * LXC/LXD driver * Update export location retrieval APIs * Huawei driver improve support of StandaloneNetworkPlugin * Add Ceph Native driver * Introduced Data Service * Implement admin network in generic driver * NetApp: Add Replication support in cDOT * Fix NFS helper root squashing in RW access level * Add ZFSonLinux share driver * glusterfs.common: move the numreduct function to toplevel * glusterfs\_native: relocate module under glusterfs * Huawei driver code review * Add QoS description in Huawei * glusterfs/ganesha: add symbolic access-id to export location * Add share resize support to Oracle ZFSSA driver * Implement update\_access() method in huawei driver * Update Huawei driver doc for Mitaka * Remove unused pngmath Sphinx extension * Implement update\_access() in generic driver + LVM * Add doc for export location metadata * gluster\*: clean up volume option querying * Admin networks in NetApp cDOT multi-SVM driver * Support export location metadata in NetApp cDOT drivers * Change sudo to run\_as\_root in LVM driver * Huawei driver: change CIFS rw to full control * Updated from global requirements * Fix NetApp cDOT driver update\_access negative test * Define context.roles with base class * Subclass context from oslo\_context base class * Add Replication admin APIs and driver i/f changes * glusterfs/common: don't suppress vol set errors * Improve exception msg when attaching/detaching volumes * Use assertIsNone instead of assertEqual(None, \*\*\*) * Scheduler enhancements for Share Replication * Fix typo in comment message * Remove aggressive assert from share server test * Fix scenario tests * EMC Isilon Driver Support For CIFS Read-Only Share * Add update\_access() interface to Quobyte driver * Check for device node availability before mkfs * Replace TENANT => PROJECT for manila plugin * Validate qos during share creation * Fix doc string in driver interface * Fix neutron port concurrency in generic driver * Add additional documentation on extra spec operations * Implement update\_access() method in Hitachi HNAS driver * Fix share migration tests in gate * Update help text for some service instance config opts * Three ways to set Thin/Thick Type in Huawei driver * Squash E006 bashate warnings * Implement update\_access() in NetApp cDOT drivers * Add tox fast8 option * Use ostestr to run unit test * Make consistency group timeout exception message more robust * Manage and unmanage snapshot * Stop proxying share\_server\_id through share in share.manager * Remove deprecated share attribute usage from manila.share.api * Get host from share['instance'] in share RPC API * Cleanup deprecation warnings from using share proxy properties in API * Add possibility to skip quota tests in Tempest * Remove default=None from config options * Add space to message in manila\_tempest\_tests/tests/api/test\_shares.py * Fix rpcapi identifiers for better readability * Add admin network for DHSS=True share drivers * Allow DHSS=False tests to override Tempest concurrency * Remove \`None\` as a redundant argument to dict.get() * gluster\*: add proper getter/setters for volume options * Unify usage of project name in doc to 'manila' * Removed ignored checks from tox.ini and fixed pep8 issues * Updated from global requirements * Fix tempest test for export locations API * Support devstack install without nova * EMC Isilon Driver Support For NFS Read-Only Share * replace string format arguments with function parameters * Converted MultiStrOpt to ListOpt * Fix Hitachi HNAS Driver default helper * Use existing "insecure" options when creating nova/cinder clients * Fix Share Replica details in the API * Share Replication API and Scheduler Support * Fixed Hitachi HNAS slow test * Replace 'stack' with $STACK\_USER in devstack plugin * Replace deprecated oslo\_messaging \_impl\_messaging * Avoid KeyError on instance\_id in ensure\_service\_instance * Hitachi HNAS driver share shrink * LVM driver: Pass '--units g' to vgs invocation * Updated from global requirements * Fix scheduling with instance properties * Add update\_access() method to driver interface * Update the home page * Fix issue in hacking with underscore imports * Added Keystone and RequestID headers to CORS middleware * Ext. exception handling for httplib and socket errors in Quobyte driver * Huawei: Create share from snapshot support in Huawei driver * Don't convert share object to dict on create * Fix Cinder's NoValidHostFound errors * Remove outdated pot files * Fix Devstack and Manila-ui interaction * Fix devstack function call recreate db * tempest: wait for deletion of cert rule * Bump tempest version * Fix params order in assertEqual * Removed unnecessary string conversions on Hitachi HNAS Driver * Add feature support information of Oracle ZFSSA Manila driver * extra-specs should work with string True/False * Fix db shim layer mismatches with implementation * TrivialFix: Remove deprecated option 'DEFAULT/verbose' * isoformat instead of deprecated timeutils.isotime 2.0.0.0b2 --------- * Return appropriate data on share create * Hitachi HNAS driver refactoring * Trivial Fix: fix missing import * Remove unused server\_get() method * QoS support for Huawei Driver * Add LVM driver * Fix release of resources created by Tempest * Fix access rules tempest v2 client * Huawei: Ensure that share is exported * Using dict.items() is better than six.iteritems(dict) * Updated from global requirements * gluster\*: refactor gluster\_call * Fix pep8 failure * Fix Mutable default argument * Fix devstack in non-neutron environments * Fix usage of standlone\_network\_plugin * Implement export location metadata feature * Doc: Remove prerequisite: Ubuntu * Hide snapshots with no instances from listing * QoS support for shares * Huawei: Add share server support * Isilon Driver: Update Share Backends Feature Doc * Clean up removed hacking rule from [flake8] ignore lists * Fix Manila tempest tests * Adds extend\_share for Quobyte shares * Update NetApp driver support matrix line * Fix response code for various NotFound exceptions * Huawei driver report pool capabilities [True, False] * Fix 'extend' API for 2.7+ microversions * Replace assertEqual(None, \*) with assertIsNone in tests * Delete Share Instance of unmanaged share * Add debug testenv in tox * A tempest test in services API using unsafe assert * Cannot return a value from \_\_init\_\_ * Make Manila UI be installed after Horizon * Use new approach for setting up CI jobs * Add doc for share driver hooks * Add more documentation to share/driver * Fix grammatical mistake, Changed character from "an" to "a" * Huawei: Add manage share with share type in Huawei driver * Refactor share metadata tests to use DB * Replace deprecated [logger/LOG].warn with warning * Add snap reserve config option to NetApp cDOT driver * Updated from global requirements * Fix tempest case "test\_delete\_ss\_from\_sn\_used\_by\_share\_server" * Fix CI Tempest jobs * glusterfs/vol layout: remove manila-created vols upon delete\_share * Use constants instead of literals in Huawei Driver * Fix unit test of ShareSnapshotNotFound * Fix handling of Novaclient exceptions * Drop MANIFEST.in - it's not needed with PBR * Replace deprecated library function os.popen() with subprocess * Change assertTrue(isinstance()) by optimal assert * EMC Isilon Driver Doc Update for Extend Share * [docs] Fix table elements view on page with list of supported features * Trivial: Remove unused logging import * Set timeout for parmiko ssh connection * Fix wrong flake8 exception and pep8 violations * Remove unused oslo-incubator \_i18n.py from Manila * Deprecated tox -downloadcache option removed * Keep py3.X compatibility for urllib * EMC VNX: Fix the interface garbage in VNX backend * EMC Isilon Driver Support For Extend Share * HPE3PAR finds CIFS share with either prefix * Improve tempest tests for shares listing APIs * Updated from global requirements * Support standard Manila capability flags in NetApp cDOT driver * Mock out service availability check in unit test * Capability lists in Manila scheduler * HPE3PAR support for share extend and shrink * Pop off user/tenant kwargs in RequestContext init * Move the config environment variables into devstack/settings file * glusterfs: document Gluster NFS misbehavior * Change instance service default path for private key to None * Use isoformat() instead of timeutils.strtime() * EMC VNX: Add multi-pools support * Add space to message in manila/consistency\_group/api.py * Remove duplicate keys from dictionary * Fix Tempest microversion comparison approach * Prevent removal of share server used by CG * HPE3PAR support for access-level (ro,rw) * Performance: leverage dict comprehension in PEP-0274 * Updated from global requirements * Document correction in quick\_start.rst * glusterfs\_native: fix parsing of the dynamic-auth option * Fix wrong check message * NetApp cDOT driver should support read-only CIFS shares * Do not allow to modify access for public share type * EMC VNX: Add share extend support * Allow to set share visibility using "manage" API * Remove version per M-1 release instructions * Updated from global requirements * [CI] Speed up Tempest jobs * Avoid service\_instance neutron port clash in HA 2.0.0.0b1 --------- * EMC: Fix bugs when domain controller is not available * Put py34 first in the env order of tox * Move API module 'share\_instances' under v2 dir * Change manila\_tempest\_tests to use credentials\_factory * timeutils.total\_seconds() is deprecated * Reorganize scheduler and merge code from Oslo incubator * glusterfs: add missing i18n import * Fix Share status precedence based on instances * doc: document the non-standard export semantics of Ganesha * Liberty doc updates for GlusterFS drivers * Add new URLs for APIs ported from extensions * Updated from global requirements * NetApp cDOT multi-SVM driver can't handle duplicate addresses * Remove mention of isilon\_share\_root\_dir * Add share-networks validation * Simplify ping usage for service VM check in CI * Improve Tempest tests for consistency groups * Add sleep to CI hooks to avoid races * add Red Hat GlusterFS drivers feature support info * Add reno for release notes management * Delete python bytecode before every test run * Updated from global requirements * Add support of 'network\_type' to standalone network plugin * Fix import of devstack functions for common CI script * Last sync to Manila from oslo-incubator * glusterfs/volume layout: indicate volume usage on volumes themselves * glusterfs/volume layout: fix incorrect usage of export\_location * Refactor authorize() method in wsgi.py * Implements ensure\_share() in Quobyte driver * Prevent Share operations during share migration * Fix typo on quota limit error message * Refactor HP 3PAR share driver to now be HPE * OpenStack typo * Added driver minimum requirements and features doc * Remove httplib2 useless requirement * Added CONTRIBUTING file in .rst format * HPE3PAR create share from snapshot fails * Updated from global requirements * EMC VNX Manila Driver Refactoring * Updated from global requirements * Port share type extensions to core API * Port admin actions extension to core API * Use oslo\_config new type PortOpt for port options * Added CORS support to Manila * Split common logic of CI hooks to separate file * Port share actions to core API * Port quotas to core API * Port services to core API * remove default=None for config options * Add mount automation example based on Zaqar * Make setup.py install Manila Tempest plugin * Sync Manila Tempest plugin with latest Tempest * Port manage/unmanage extensions to core API * Updated from global requirements * Rephrase comments for Share create API * Use assertTrue/False instead of assertEqual(T/F) * Fix no-share-servers CI job * Use default Keystone API version in Devstack * Updated from global requirements * Port availability zones to core API * Generic driver: wait for common server during setup * Port used limits to core API * Updated from global requirements * Add IBM GPFS Manila driver * Fix list-availability-zones API for PostgreSQL * Fix share type model scalability for get request 1.0.0 ----- * Fix usage of dependencies * Fix usage of dependencies * Use 'False' as default value for "compression" common capability * Stop using deprecated tempest options * Make share service understand driver init failure * Fix broken unit tests * Enable extend\_share in HDFS driver * Verify common server in Generic driver on startup * Updated from global requirements * Improve Manila HDS HNAS Driver Manual * Fix order of arguments in assertEqual * Fix order of arguments in assertEqual * Fix order of arguments in assertEqual * Update feature support matrix for Windows SMB 1.0.0.0rc2 ---------- * Share manager: catch exception raised by driver's setup() * Fix display of availability-zone for manila-manage command * glusterfs\_native: use dynamic-auth option if available * Fix setting of "snapshot\_support" extra spec for tempest * Fix deletion of error state access rules * Fix response data for API access-allow * Fix display of availability-zone for manila-manage command * glusterfs: check nfs.export-volumes with Gluster NFS + vol layout * glusterfs: manage nfs.rpc-auth-allow not being set * glusterfs vol layout: start volume cloned from snapshot * glusterfs\_native: use dynamic-auth option if available * NetApp cDOT driver isn't reentrant * Can't create shares on drivers that don't support snapshots * Revert netapp\_lib dependency in NetApp cDOT Manila drivers * Set defaultbranch to stable/liberty in .gitreview * Feature support matrix update for HP 3PAR * Fix \`test\_trans\_add\` for Python 3.4.3 * Remove misleading snapshot methods from Quobyte driver * Fix response data for API access-allow * Improve logging of calls in ShareManager * Use random IPs in security service tests * EMC Isilon Manila Driver Feature Support * Fix deletion of error state access rules * Fix order of arguments in assertEqual * glusterfs vol layout: start volume cloned from snapshot * Fix order of arguments in assertEqual * NetApp cDOT driver isn't reentrant * Fix mentioned DEFAULT\_API\_VERSION in doc * Revert netapp\_lib dependency in NetApp cDOT Manila drivers * Fix \`test\_trans\_add\` for Python 3.4.3 * Adds Quobyte share backend feature support mapping data * Remove language about future features from driver doc * Remove LegacyFormatter from logging\_sample.conf * Fix setting of "snapshot\_support" extra spec for tempest * Fix some spelling typo in manual and error message * glusterfs: check nfs.export-volumes with Gluster NFS + vol layout * glusterfs: manage nfs.rpc-auth-allow not being set * Can't create shares on drivers that don't support snapshots * Add Huawei driver details in doc * Add Hitachi HNAS driver documentation * Open Mitaka development 1.0.0.0rc1 ---------- * glusterfs\*: fix ssh credential options * Make Quobyte shares actually read-only when requested * Fixes a Quobyte backend call issue with a wrong field name * Fix error response when denying snapshot creation * Fix 'cover' tox job * glusterfs: fix gluster-nfs export for volume mapped layout * Updated from global requirements * Fix experimental=True for view in microversion 2.5 * glusterfs\_native: Hardwire Manila Host CN pattern * Fix HDS HNAS manage incorrect share size * glusterfs\*: amend export location * Fix HDS HNAS Create from snapshot ignoring Size * Fix pool\_list filter tests to match pools exactly * Non-admin user can perform 'extra-specs-list' * Fix improper handling of extending error * Update feature support mapping doc for NetApp cDOT * Remove IBM GPFS driver due to lack of CI * Add 'snapshot\_support' attr to share details * Fix get\_stats to return real used space in HNAS * Add new features description in Huawei doc * Fix API version history in Huawei driver * Fix task\_state field shown on API < 2.5 * glusterfs: Fix use of ShareSnapshotInstance object * NetApp cDOT driver should prefer aggregate-local LIFs * Fix HDS HNAS snapshot creation tracking * Return share\_type UUID instead of name in Share API * doc: turn ascii art tables into proper reST grid tables * Make scenario tests able to run with DHSS=False * Fix missing value types for log message * glusterfs\_native: Fix typo for protocol compatibility * Fix typo in test\_hook * Fix Share Migration tempest tests * Remove support for 'latest' microversion * Adds retry function to HNAS driver * Corrects capabilities returned by Quobyte Manila driver * Fix create snapshot API in Huawei driver * Check the snapshot directory before copy it * Remove HDS SOP driver due to lack of CI * Missing check in ShareManager::manage\_existing() * Add v2 Manila API path as base for microversions * Huawei driver: fix reports reduplicate pools * Enhance base driver checking if a method was implemented * Updated from global requirements * Allow service image download to be skipped * Use 'False' as default value for "dedupe" common capability * Capacity filter should check free space if total space is unknown * Fix usage of novaclient * NetApp cDOT driver with vserver creds can't create shares * Fix unstable unit test 'test\_get\_all\_host\_states\_share' * Fix concurrency issue in tempest test * Fix description in Huawei driver * Replaces xrange() with range() for py 2/3 compatibility * Updated from global requirements * Consistency groups in NetApp cDOT drivers * Fix keypair creation * Add functional tests for Manila consistency groups * Place tempest microversions test module in proper place * Consistency Group Support for the Generic Driver * Add Share Migration tempest functional tests * Share Migration support in generic driver * Add Share Migration feature * glusterfs: directory mapped share layout * glusterfs: volume mapped share layout * glusterfs/layout: add layout base classes * Add Consistency Groups API * Scheduler changes for consistency groups * Add DB changes for consistency-groups * Use Tempest plugin interface * Make devstack plugin independent from default Identity API version * glusterfs-native: cut back on redundancy * glusterfs/common: refactor GlusterManager * glusterfs\*: factor out common parts * Add share hooks * Add possibility to setup password for generic driver * Use devstack functions for registering Manila * devstack plug-in to reflect new manila-ui plug-in * HP 3PAR extra-spec prefix needs to be hp3par * Fix the typo "version" * Updated from global requirements 1.0.0.0b3 --------- * Add attributes 'name' and 'share\_name' to ShareSnapshotInstance * Fix data copying issue in DB migration 1f0bd302c1a6 * HP 3PAR driver handles shares servers * Updated from global requirements * Fix failing Quobyte unit test * Remove instances of "infinite" capacity from Manila * Replace thin/thick capabilities with thin\_provisioning * Add Share instances Admin API * Add Windows SMB share driver * Remove ununsed dependency: discover * Implement snapshot tracking in HDS HNAS driver * Use Share Instance ID in 'name' property * Ignore git backup merge files * Tempest: wrong assertion on the number of shares created * Ignore unavailable volumes when deleting a share * Updated from global requirements * New Manila HDS HNAS Driver * Tempest: wait for access rule to be deleted * Fix Tempest tests targeting user based access rules * glusterfs\_native: Add create share from snapshot * Generic driver:Create Cinder volume in correct AZ * Reduce dependency to tempest: exceptions * Add possibility to filter back ends by snapshot support * Add tempest tests for "cert" based access type * Clean up admin\_actions API extension unit tests * Use service availability\_zone for Share Server VM * Add availability zones support * Add methods for share instances in Share API * Add compression in common capabilities doc * HP 3PAR add more info to the share comment * Add tempest tests for REST API microversions * Huawei driver support smartcache and smartpartition * Manila experimental REST APIs * Fix compatibility with sqlalchemy 0.9.7 * Updated from global requirements * Fix incorrect use of snapshot instances * HP 3PAR reports capabilities * Lazy Load Services * Replace assertEqual(None, \*) with assertIsNone in tests * Updated from global requirements * Fix incorrect variable name in some exception class * Update NetApp cDOT Manila drivers to use netapp\_lib * Add manage/unmanage support to NetApp cDOT driver * Service Instance: Add instance reboot method * Add WinRM helper * Common capabilities documentation * Fix Neutron config setting in pre\_test\_hook * Add share instances and snapshot instances * Fix extend share API in Huawei driver * Huawei driver support dedup, compression, thin and thick * Fix the log level in scheduler manage * Enable Tempest tests for glusterfs/hdfs protocols * Support shrink\_share in NetApp cDOT drivers * Fix sample config file generation * Change huawei driver send REST command serially * Support extend\_share in NetApp cDOT drivers * Fix for Isilon driver failing to connect * Updated from global requirements * Fix bug to locate hdfs command in HDFS native driver * Fix AttributeError without share type provided * Implement Manila REST API microversions * Add retry logic when delete a NFS share in VNX * Cleanup shares created by Tempest * Add py34 to test environment to tox.ini * Allow Tempest to skip snapshot tests * Add retries for deadlock-vulnerable DB methods * Adding extend share support in IBM GPFS Driver * Make QuobyteHttpsConnectionWithCaVerification py3 compatible * Add SSL middleware to fix incorrect version host\_url * Updated from global requirements * Fix HTTP headers case for API unit tests * Fix bug to run command as root in HDFS driver * Fix typos in neutron\_network\_plugin.py * Remove incorrect URLs from jenkins.rst * Remove ordering attempts of 'unorderable types' * Fix 'hacking' unit tests for py3 compatibility * Skip unit tests for SSL + py3 * Fix string/binary conversions for py34 compatibility * Make 'utils.monkey\_patch' py3 compatible * Decouple some of the Service Instance logic * Wrap iterators and 'dict\_items' for py34 compatibitity * Update Documents to use HDFS Driver * Fix two typos on documentation and one typo on CLI help * Stop using deprecated contextlib.nested * Fix imports for py34 compatibility * Fix exceptions handling for py34 compatibility * Rename from il8n.rst to i18n.rst * Remove copyright from empty file * Fix HP3PAR extra-specs scoping prefix bug * Updated from global requirements * Support manage\_existing in Huawei driver * Fix HP3PAR SMB extra-specs for ABE and CA * Generic: add service instance mgr set up method * Fix Generic driver share extend * Replace py2 xrange with six.moves.range * Fix integer/float conversions for py34 compatibility * Fix dictionary initialization for Python 3 compatibility * Replace (int, long) with six.integer\_types * Fix list creation * Replace dict.iteritems() with six.iteritems() * Add doc share features mapping * Replace 'types.StringTypes' with 'six.string\_types' * Replace '\_\_metaclass\_\_' with '@six.add\_metaclass' * Fix ZFSSA driver for py34 compatibility * Listen on :: instead of 0.0.0.0 by default 1.0.0.0b2 --------- * Fix slow unit tests * Remove Cinder leftover unit tests * Eventlet green threads not released back to pool * Add client\_socket\_timeout option to manila.wsgi.Server * Catch error\_deleting state for more resources than just shares * Updated from global requirements * Make coverage tox job fail when test coverage was reduced * Add test coverage for periodic tasks * Change \_LE to \_LW (at manila/share/manager.py) * Fix 'extend\_share' in generic driver * Fix unit tests for quobyte * Support shrink\_share in Huawei driver * GlusterFS: fix retrieval of management address of GlusterFS volumes * Explicit backend connect call in Quobyte RPCs * Enable multi-process for API service * Updated from global requirements * Make config opt 'enabled\_share\_protocols' verification case insensitive * glusterfs\_native: prefix GlusterFS snap names with "manila-" * glusterfs\_native: delete\_snapshot(): find out real GlusterFS snap name * glusterfs\_native: fix delete share * Reuse 'periodic\_task' from oslo\_service * Implement shrink\_share() method in Generic driver * doc: fix typo s/virutalenv/virtualenv/ * Cleanup DB API unit tests * Add negative tests for admin-only API * Updated from global requirements * HP 3PAR uses scoped extra-specs to influence share creation options * Retry \_unmount\_device in generic driver * Add 'retry' wrapper to manila/utils.py * Huawei driver support storage pools * Updated from global requirements * Modify confusing name in Huawei driver * Use all types of migrations in devstack installation * Close DB migration sessions explicitly for compatibility with PyMySQL * Delete redundant period in ManilaException messages * Use soft\_delete() methods in DB api * Use uuidutils to generate id's in DB api * Add license header to migrations template * Remove models usage from migrations * Huawei manila driver support multi RestURLs * EMC VNX: Fix the total capacity for dynamic Pool * Updated from global requirements * Updated from global requirements * Add access-level support in VNX Manila driver * Enable Manila multi-SVM driver on NetApp cDOT 8.3 * Support for oversubscription in thin provisioning * Fix for SchedulerStatsAdminTest fails on timestamp * Print devstack command traces before executing command * Fix unit tests for compatibility with new mock==1.1.0 * Change "volume" to "share" in filter and weigher * Updated from global requirements * Remove unneeded OS\_TEST\_DBAPI\_ADMIN\_CONNECTION * Remove duplicated options in manila/opts.py * More Manila cDOT qualified specs * Add PoolWeigher for Manila scheduler * Remove unused manila/openstack/common/eventlet\_backdoor.py * Updated from global requirements 1.0.0.0b1 --------- * Use loopingcall from oslo.service * Updated from global requirements * Use new manila-service-image with public-key auth * Allow drivers to ask for additional share\_servers * HP 3PAR driver config has unused username/password * Huawei manila driver support Read-Only share * Override opportunistic database tests to PyMySQL * Support share-server-to-pool mapping in NetApp cDOT driver * Remove unused files from oslo-incubator * Update version for Liberty 1.0.0a0 ------- * Support extend\_share in Huawei driver * Fix incompatiblity issue in VNX manila driver * Updated from global requirements * Updated from global requirements * Reduce amount of tempest threads for no-share-servers jobs * Add retry on volume attach error in Generic driver * HP 3PAR Add version checking and logging * Bump supported tempest version * Share\_server-pool mapping * Replace it.next() with next(it) for py3 compat * Fix tempest ShareUserRules\* tests * Updated from global requirements * Stop using deprecated 'oslo' namespace * Use oslo.utils to get host IP address * Remove deprecated WritableLogger * Make required function arguments explicit * Remove unused contrib/ci files * Fix docstrings in tempest plugin * Updated from global requirements * Add share shrink API * Implement tempest tests for share extend API * Implement extend\_share() method in Generic driver * Huawei manila driver code refactoring * Transform share and share servers statuses to lowercase * Updated from global requirements * Fix policy check for API 'security service update' * Remove unused attr status from models * Drop incubating theme from docs * Make devstack install manila-ui if horizon is enabled * glusterfs: Edit doc and comments * Simplify generic driver with private data storage API * Provide private data storage API for drivers * Remove usage of utils.test\_utils * Remove ServiceClient from share\_client * Switch from MySQL-python to PyMySQL * Add share extend API * Export custom Share model properties with \_extra\_keys * Release Neutron ports after share server deletion using generic driver * Make generic driver use only ipv4 addresses from service instances * Fix share-server resources cleanup in generic driver * ganesha: Add doc * Update Quickstart guide * NetApp cDOT driver fails Tempest cleanup on clone workflows * Updated from global requirements * Add doc for network plugins * Fix 'AllocType' read failure in Huawei driver * Sync tempest plugin with latest tempest * Updated from global requirements * Improve ShareServer DB model * Updated from global requirements * Add multi vm scenario test * Imported Translations from Transifex * Drop use of 'oslo' namespace package * Updated from global requirements * EMC: Remove unnecessary parameter emc\_share\_driver * Add doc with basic deployment steps * Move to the oslo.middleware library * Clean up redundant code and nits from EMC VNX driver * Remove unused oslo-incubator modules * EMC VNX Manila Driver Feature Support * Allow overriding the manila test regex * Updated from global requirements 2015.1.0 -------- * NetApp cDOT driver clones NFS export policy * Add config\_group\_name for NeutronNetworkHelper * Remove ping check from basic scenario test * Sync contrib/tempest to newer state * Fix for the deletion of an error share server * NetApp cDOT driver clones NFS export policy * Sync oslo-incubator code * EMC VNX Driver: Fix typo issues * Remove passing DB reference to drivers in Share Manager * Use oslo\_policy lib instead of oslo-incubator code * Use oslo\_log instead of oslo-incubator code * Use lib lxml for handling of XML request * Updated from global requirements * Remove direct DB calls from glusterfs\_native driver * Release Import of Translations from Transifex * Remove maniladir() and debug() function from utils * Use identity\_uri for keystone\_authtoken in devstack * Switch to new style policy for test policy * Add mount/umount in scenario tests * update .gitreview for stable/kilo * Update doc-strings for snapshot methods in Share Driver * Use openstackclient in devstack plugin * Remove direct DB usage from NetApp driver * Move response code verification to share client * Use entry\_points for manila scripts * Switch to new style policy language 2015.1.0rc1 ----------- * Remove Limited XML API Support from Manila * Prevent hanging share server in 'creating' state * More flexible matching in SSL error test * Imported Translations from Transifex * Mock out base share driver \_\_init\_\_ in EMC driver * Add object caching in manila REST API requests * glusterfs\_native: Fix Gluster command call * glusterfs, glusterfs\_native: perform version checks * Open Liberty development * Add Glossary with basic Manila terms * Restrict access only to vm ip * NetApp cDOT driver is too strict in delete workflows * Adding configuration instructions in huawei\_nas\_driver.rst * Update openstack-common reference in openstack/common/README * Prevent share server creation with unsupported network types with cDOT * Fix log/error message formatting * Updated from global requirements * Add segmentation ID checks for different segmentation types * glusterfs\_native: make {allow,deny}\_access non-destructive * glusterfs\_native: negotiate volumes with glusterd * NetApp cDOT driver uses deprecated APIs for NFS exports * Automatic cleanup of share\_servers * Fix fields 'deleted' in various DB models for PostgreSQL compatibility * Add tempest coverage for share type access operations * Enable developers to see pylint output * Allow overwriting some Manila tempest settings in CI jobs * Set share-type on share created from snapshot * cDOT multi-SVM driver may choose unsuitable physical port for LIFs * cDOT driver should split clone from snapshot after creation * Replace SQL code for ORM analog in DB migration scripts * Delete skipped tempest tests that won't be enabled * NetApp cDOT drivers should not start without aggregates * IBM GPFS Manila Driver Docs - update * Switch to v2 version of novaclient * Backslashify CIFS share export paths for Generic * NetApp cDOT multi-SVM driver should work with non-VLAN networks * NetApp cDOT multi-SVM driver should not start with cDOT 8.3 * Fix CIFS export format in EMC VNX driver * Forbid unmanage operation for shares with snapshots * Fix deletion of export locations * Add initial scenario test for Manila * Fix setting of share name and description with manage API * HP 3PAR driver documentation * Fix setting of extra specs for share types * Huawei NAS driver returns CIFS export locations in wrong format * IBM GPFS Manila Driver Docs * Fix common misspellings * Add share state verification for API 'unmanage' * Updated from global requirements * Sync tempest plugin with latest tempest * Make generic driver update export location after manage operation * Deal with PEP-0476 certificate chaining checking * Fix manage operation in generic driver * Imported Translations from Transifex 2015.1.0b3 ---------- * Implement manage/unmanage support in generic driver * cDOT driver should report all share export locations * Enable bashate during pep8 run * Allow updates to export locations * NFS based driver for Quobyte file storage system * glusterfs\_native: partially implement snapshot * Fix issues with get\_pool scheduler API * Use SoftDeleteMixin from oslo.db * Imported Translations from Transifex * Fix cleanup order for tempest test * Enable downgrade migrations in unit tests * Allow shares to have multiple export locations * Add basic manage/unmanage share functionality * Set proper attr "deleted" for ShareTypes model * Imported Translations from Transifex * EMC Isilon Manila Driver Docs * HP3PAR driver log the SHA1 for driver and mediator correctly * Add public attr for shares * Imported Translations from Transifex * Add ro level of access support to generic driver * Remove CLI tests from tempest plugin * Manila Scheduler should read full driver capabilities * NetApp cDOT driver should not create useless export-policy rule * Manila cDOT driver should use loopingcall for ASUP report timing * EMC Isilon Manila driver * Implement private share\_types * Updated from global requirements * Always allow delete share-network when no shares exist * Imported Translations from Transifex * Add nova network plugin * Manila cDOT qualified specs * Make extra spec driver\_handles\_share\_servers required * Failed to load xml configure file * Updated from global requirements * Allow tempest to skip RO access level tests * Manila cDOT netapp:thin\_provisioned qualified extra spec * Replace TEMPEST\_CONCURRENCY with Manila-specific var * doc: Add glusterfs\_native driver developer doc * Fix example style in admin doc * Imported Translations from Transifex * Improve error handling in GPFS driver * Updated from global requirements * Add doc for hdfs\_native driver * Remove copypasted export\_location field from snapshots * HP 3PAR use one filestore per tenant * Single-SVM Manila driver for NetApp Clustered Data ONTAP * Remove hacking exception for oslo.messaging import * Remove Python 2.6 classifier * Remove obsolete option: enabled\_backends * Manila access-allow API doesn't accept backslash * Add temporary workaround to scheduler * Add doc for Dynamic Storage Pools for Manila scheduler * Fix config opts description for class NeutronSingleNetworkPlugin * Add snapshot gigabytes quota * Use devstack plugin in CI hooks * HP 3PAR driver fix for delete snapshot * Add Nova-network support to service\_instance module * Updated from global requirements * Sync tempest plugin * Manila cDOT storage service catalog * Add devstack plugin * Generic Driver image supported protocols * Updated from global requirements * glusterfs: add NFS-Ganesha based service backend * ganesha utils: allow remote execution as root * Remove left-over modules from Cinder * Add share\_type\_default() method to API * Add support of default share type * Support Manila pools in NetApp Clustered Data ONTAP driver * Move definition of couple of config opts to proper module * Add support of nova network for share-networks API and DB * Make listing of networks compatible for neutron and nova in devstack * ganesha: fix execute call using invalid argument * Imported Translations from Transifex * Rename volume\_type to share\_type * Imported Translations from Transifex * Add possibility to enable/disable some share protocols * Add standalone network plugin * Add possibility to define driver mode within pre\_test\_hook for CI * Skip multisvm tempest tests for singlesvm setup * Correct the share server's db info after its deletion * Add support for HDFS native protocol driver * Fix cinderclient compatibility of list filtering by name * Fix spelling mistake * Fixed spelling mistake in tests * Manila NetApp cDOT driver refactoring * glusterfs: Add doc * Imported Translations from Transifex * fix case sensitivity * Fix generation of config sample * Use oslo\_log lib * unify some messages * HP 3PAR Driver for Manila * Do not instantiate network plugin when not used by driver 2015.1.0b2 ---------- * Pool-aware Scheduler Support * Implement additional test for db migrations * Updated from global requirements * Add share driver for HDS NAS Scale-out Platform * Replace legacy StubOutForTesting class * Add unit test for volume types * Add CI job support for second mode of Generic driver * Implement additional driver mode for Generic driver * ganesha: fix resetting of exports * Remove workaround for Nova VM boot bug * Add tracing facility to NetApp cDOT driver * Remove startswith for share\_proto check * Remove copy-pasted code for fake-share * driver: Fix ganesha config option registry * Workaround Nova VM boot bug * Add access levels for shares * Imported Translations from Transifex * Add factory for NetApp drivers * Updated from global requirements * Search snapshot by ID instead of name in Huawei driver * Fix documentation for some Ganesha config variables * Add Neutron single network plugin * Add unit test for quota remains functionality * Switch to using oslo\_\* instead of oslo.\* * utils: Allow discovery of private key in ~/.ssh * Updated from global requirements * Do not use router for service instance with direct connect * Port cinder EMS and ASUP support to manila * Adapt readme to usual structure * glusterfs: add infrastructure to accommodate NAS helpers * Fix tempest pep8 failures * Release resources in tempest test properly * Replace string driver modes with boolean value * Adding required rootwrap filters for GPFS driver * Add doc for Huawei driver * Fix pep8 error E265 in wsgi * fix typo in config.py * fix typo in nova.py helpline * fix typo in rpc.rst * Fix typo "authogenerate" in manila-manage * Updated from global requirements * Fix searching mechanism of share-networks within tempest * Fix small typo in 70-manila.sh * Change default migration in "manila-manage db downgrade" command * Add manila.conf.sample to .gitignore * Fix deletion of share-server within Generic driver * Fix devstack compatibility * Reuse network resources in share-server creation test * Updated from global requirements * Add share driver for Huawei V3 Storage * Make Tempest tests use networks only from same project * Refactor tempest test 'test\_create\_share\_with\_size\_bigger\_than\_quota' * Sync tempest plugin with latest Tempest * Update message for exception ShareNetworkNotFound * Update documentation for tempest integration * Add error suppressing to isolated creds cleanup in Tempest plugin * Updated from global requirements * Fix handling of share-networks with single\_svm drivers * Set pbr 'warnerrors' option for doc build * Fix nit in tempest naming * Fix documentation build * Imported Translations from Transifex * Fix TypeError in tempest retry functionality * Fix using anyjson in fake\_notifier * Fix typo in db migration test function name * Use Cinder v2 API within Generic driver * Add driver mode attr definition for all drivers * Fix concurrency problem in getting share network in Tempest * Make it possible to update tempest conf in all CI Tempest jobs * Use oslotest.base.BaseTestCase as test base class * Add possibility to create lots of shares in parallel for tempest * Add service id to information provided by API * Raise error immediately for undeletable share in tempest * py3: use function next() instead of next() method on iterator objects * Allow deleting share with invalid share server in generic driver * Rename share driver stats update method * Remove unsed python modules from requirements * Remove unused conf option 'fake\_tests' * Make tempest cleanup errors be suppressed in all CI jobs * Add retries for share creation within Tempest plugin * Remove unused sslutils module * Improve share driver mode setting * py3: use six.moves.range instead of xrange * py3: use six.moves.urllib.parse instead of urlparse * Use lockutils from "oslo concurrency" lib * Remove non-active host from host\_state\_map * Strip exec\_dirs prefix from rootwrap filters * Add possibility to suppress errors in Tempest plugin cleanup * Make Tempest repo stable for Manila * Use uuidutils from oslo.utils * Cleanup manila/utils.py * Remove configs sql\_connection and sql\_connection\_debug * Remove unused configs pybasedir and bindir * Remove unused connection\_type config * Fix tempest test with share server listing with no filters * Improve tempest share server filtering * Increase quotas and number of threads for tempest * Use oslo.context lib * Imported Translations from Transifex * Add missing imports for sample config generation * Fix tempest compatibility for network client * Fix driver mode opt definition * Adds Oracle ZFSSA driver for Manila 2015.1.0b1 ---------- * ganesha: NFS-Ganesha instrumentation * Add driver mode interface * Updated from global requirements * Updated from global requirements * Move networking from share manager to driver interface * Workflow documentation is now in infra-manual * Fix error message in share delete method * glusterfs: create share of specific size * Fix metadata validation in share api * Fix devstack plugin custom config opt setting * Enhance devstack plugin * Update EMC Manila driver framework using stevedore * Alternative way to import emc.plugins.registry * Fix wrong mock assertions in unit tests * Release network resources properly * Updated from global requirements * Imported Translations from Transifex * Add support for volume types with Generic driver * Fix H302 rule after release of oslo.concurrency 0.3.0 * Fix for debugging m-shr in PyCharm * Updated from global requirements * Fix tempest compatibility for cli tests * Fix context.elevated * Updated from global requirements * Updated from global requirements * Remove obsolete methods from tempest service client * Switch to oslo.concurrency for processutils * Updated from global requirements * Use oslo.utils.netutils function to set tcp\_keepalive * Fix couple of nit picks * Use keystonemiddleware and manila.conf for config * Imported Translations from Transifex * Updated from global requirements * Fix share manager to save data after driver error * Adding GPFS Manila driver * Remove object in wsgi LOG.info * Fix share network id in tempest test * Convert files to use \_LE and friends * Imported Translations from Transifex * Fix concurrency issue in security-service tempest test * Sync Tempest plugin with latest Tempest changes * Improve share-network list API filtering * Updated from global requirements * Don't translate LOG messages in testsuite * Add admin doc for multiple backends configuration * Remove gettextutils * Use proper value for osap\_share\_extension * Refactor shares client init in Tempest plugin * Delete unused versionutils module * Sync with oslo-incubator * Updated from global requirements * Use oslo.utils - remove importutils usage * Switch to oslo.config * Use oslo.serialization * Use oslo.utils * Silence tox warning * Add manila specific hacking checks * Remove extra flake8 args * Sync with global requirements * Improve share snapshots list API filtering * Use oslo.i18n * Use six instead of str for exceptions * Add info to cDOT driver doc * Fix tempest compatibility * Add new search options for security service * Fix doc build * Add Admin doc for an Introduction to Manila * Add share server id field in shares detail info * Improve share list API filtering * Fix doc build warnings so docs build clean * Remove extraneous vim editor configuration comments * Add share network id field in share server info * Fix tempest compatibility * Use 'generate\_request\_id' func from common code * Remove vim headers * Add info to generic driver doc * Open Kilo development * Add doc for EMC VNX driver 2014.2 ------ * Fix creation of share from snapshot * Specify the correct Samba share path * Fixes several typos (Manila) * Fix KeyError while creating share from snapshot * Fix references in jenkins.rst * Update translation information * Mention Samba in intro.rst * Add doc for an Introduction to Manila 2014.2.rc1 ---------- * Add support for working with multiple glusterfs volumes * Minor Manila doc change * Make copyrights in docs as comments instead of page content * Update challenges in the developer docs * Update naming from clustered mode to cDOT * Fix doc build errors in db/sqlalchemy/models.py * Improve documentation build * Add doc for netapp cluster mode driver * Add doc for generic driver * Fix using key for ssh * Fix getting ssh key if ssh path is not set * Rename stackforge to openstack in docs * Move from stackforge to openstack * Fix two functional tests within tempest\_plugin * glusterfs: edit config option specifying volume * Change exception thrown by db method * Fix some LOG.debug invocations * Fix Invalid pathname for netapp cmode driver * Make block devices mounts permanent within service instances * Stop using intersphinx * Increase share-network default quota * Don't allow security service to be updated if used * Move db related unittests to proper places * Fix update of backend details in cmode driver * Update shares and snapshot create to show details * Use oslosphinx and remove local copy of doc theme * Move driver unittest modules to proper place * Move unittests related to manila/share/\*.py modules to proper place * Make NFS exports in generic driver permanent * Fix ssh connection recreation in generic driver * Drop a forgotten fragment * warn against sorting requirements * Fix version number to Juno 2014.2.b3 --------- * Add support for glusterfs native protocol driver * Fix some LOG invocations and messages * EMC VNX Manila Plugin * Add support for cert based access type * Make m-shr more stable on start up * Fix scheduled share creation with generic driver * Add "." at end of exceptions * py3: Use six module for StringIO imports * Update share\_network obj after db update * Transform Exception args to strings when exceptions * Fix string concatenation * glusterfs: Fix docstring * Fix concurrent policy issue in unittest * Remove redundant glance config options * Improve help strings * Remove hash seed dependency for unittests * Updated usage of locks * Fix creation of cifs entry in cmode driver * Flake8: Fix and enable H405 * Forbid to attach security services with same type to share network * Flake8: Fix H501 * Flake8: Fix and enable H404 * Flake8: Fix E128 * Fix device mount/umount methods in generic driver * Change service VM connectivity * Use Alembic instead of Sqlalchemy-migrate in Manila * Flake8: Fix H302 * Remove NetApp 7-mode driver as obsolete * Flake8: Fix F841 * Remove bin/manila-rpc-zmq-receiver * Cmode, CIFS shares, fix allowed share access type * Fix obtaining of service VM ip * EMC Manila driver * Add specific docs build option to tox * Flake8: Fix some occurences of F841 * Flake8: Fix E126 and E127 * Flake8: Fix F401 * pep8: Enable H303 and F403 * Sync requirements with global requirements * Remove extra setenv from tox.ini * Enable E121,E122,E123,E124,E125,E129 flake8 tests * Refactor NetApp Cmode driver * Use opportunistic migrations * Add config option for share volume fs type * Fix failing of unittests in one thread * Fix H402 hacking rules * Fix pep8 issues in manila/tests * Clean up devstack plugin after LVM driver removal * Remove LVM driver * Fix pep8 failures in manila/{db,volume} * Handle missing config options for tests gracefully * Add oslo.utils and oslo.i18n libs to requirements * Issue one SQL statement per execute() call * Further pep8 fixes * Fix pep8 F811 and F812 * Rename 'sid' to 'user' in access rules and sec services * Decrease amount of threads for Tempest tests * Flake8 in bin/\* * Remove manila-clear-rabbit-queues * Sync scripts with oslo-incubator * Replace utils.config\_find with CONF.find\_file * Use common code within manila.policy module * Fix bad indentation in manila * Refactor cifs helper for generic driver * Fix share status waiter within tempest * Fix update of share with share-server-id * Use common config generator * Add config module from oslo-incubator * Remove dangerous arguments default * Remove unused imports * Fix F402 pep8 * Make flake8 ignore list more fine granular * Sync common modules from Oslo * Add share\_server\_id filter option to 'get\_all' share API method * Fix tempest compatibility * Fix pep8 F821 * Update requirements file matching global requ * glusterfs: Edit comments and docstrings * glusterfs: Modify interface methods * Fix setting up security-services in Cmode * Update pep8 testing * Added calculating capacity info in Cmode * Added calculating capacity info to 7mode driver * Adds undocumented policies and defaults in policy.json * Add check on eventlet bug #105 (ipv6 support) * Remove reference to 'in-use' state in share manager * Enable check for H237 * Use oslo.rootwrap library instead of local copy * py3.x: Use six.text\_type() instead of unicode() * py3: use six.string\_types instead of basestring * Use oslo.db in manila * Fix compatibility with tempest project * README merge * Refactor test framework * Add interprocess locks to net interfaces handlers * Fix obtaining of service instance ip * Setup for translation * Enabled hacking checks H305 and H307 * Fix service subnet capacity within service\_instance module * Fix metaclasses assignment * Enable hacking check H236 * Add share-server-delete API * Change get\_client\_with\_isolated\_creads() to \*\_creds() * Sync with global requirements * Fix E112 expected an indented block * Fix E713 test for membership should be 'not in' * Fix E131 continuation line unaligned for hanging indent * Address H104 File contains nothing but comments * Fix E251 unexpected spaces around keyword / parameter equals * Fix E265 block comment should start with '# ' * Fix usage of ProcessExecutionError exception * Enabled hacking check H403 * py33: use six.iteritems for item iterations (part2) * Cleanup manila.utils module (part1) * glusterfs: Implement methods to update share stats * glusterfs: Fix issues in backend instrumentation * Enabled hacking check H401 * Use ssh\_execute function from common code * Use execute() and trycmd() functions from common code * Use looping calls for running services from common code * Fix typo in error message for share\_export\_ip * py33: use six.iteritems for item iterations (part1) * Change logging level AUDIT to INFO * Teardown/setup server enhancements * Removed custom synchronized in service\_instance * Migrate to oslo.messaging instead of commom/rpc * Removed redundant methods from singletenant drivers * Replace python print operator with print function (pep H233, py33) * share.manager: Modify allow\_access method call * Delete skipped quota tests as invalid * Add CLI tests for share-server-list API * Added retrieving vserver name from backend details * Update ci scripts * service\_instance: Add lock to creation of security\_group * Enable skipped tests from test\_capacity\_weigher.py * Add using share-server backend details in Generic driver * Fixed passing share\_server to teardown\_network * Fix create\_share\_from\_snapshot method * Added tempest tests * Cleaned up exception module and added unittests * Check share net ids when creating share from snapshot * Update manila's docs * Replace usage of unittest module with manila.test * Fix tempest test's rare concurrent issue * Improved share\_servers db api * Fixed passing share\_server to ensure\_share * Rewrited mox tests to mock (part 2) * Fix lvm driver to be compatible with share manager * Rewrited mox tests to mock (part 1) * Replace json with jsonutils from common code * Removed redundant code for glance * Use testtools module instead unittest module * Cleanup resources with tempest more reliably * Added service\_instance\_locks directory to .gitignore * Added force-delete action to admin actions * Update contrib/ci bash scripts * devstack: strip obsolete part of m-shr instumentation * Sync common modules from Oslo * Several fixies to tempest plugin * Moved exports needed for tempest into post\_test\_hook * Fix some cosmetic issues in README.rst * Fixed ci bash scripts * Remove explicit dependency on amqplib * Added share server api * Removed redundant dependency of hp3parclient * Add multibackend test suite for tempest plugin * Added bash scripts for ci jobs * Added multibackendency to devstack plugin * Switch to Hacking 0.8.x * Use Python 3.x compatible except construct * assertEquals is deprecated, use assertEqual * Share server details * Added locks into service\_instance module * Removed redundant option from devstack plugin * Separated locks for cifs and server operations * Share servers implementation * Made safe get of security\_groups with nova's response * Made service\_instance consider driver's config * Set locks for shared resources in generic driver's cifs helper * change assertEquals to assertEqual * change assert\_ to assertTrue * Added handling of secgroup for service\_instance module * set default auth\_strategy to keystone * Enabled ip rules tests for cifs in tempest * Increase default quota for share networks from 3 to 5 * debug level logs should not be translated * tempest plugin update * Fixed tempest plugin compatibility * Fixed possibility to have more than 25 shares with generic driver * Retrieve share\_backend name from config on get\_share\_stats * Fixed retrieving export ip address in Cmode drv * Made template for service VM unique using generic driver * Fixed usage of config option in generic driver * Replaced manila.conf.sample with README.manila.conf * Added API to manage volume types * Fixed rise of Duplicate exception for DB * Added volume\_types to DB * Removed unused module from unittests * Raise max header size to accommodate large tokens * Added cli tests for service-list request * Allowed devstack not fail if couldn't stop smb service * Removed redundant keystone token usage * Refactored service-list filters * Fixed tempest plugin compatibility with master * Checking security service is not used while deleting * Added creation of secgroup for service vms in devstack plugin * Removed unique constraint for share networks * Added type field to security services index list * Update tempest plugin for latest changes of manila * Made max limit name for snapshots unique * Made limits usages names unique * Fixed ownership for service volumes * Fixed quotas for share-networks * Fixes bug with share network deactivation * Added extension that provides used resources in absolute limits * Fixed detail list for shares * Added quota for share-networks * Teardown share network in Netapp Cmode driver * Fixed detail list for security-services * Fix venv installation for run\_tests.sh * Updated generic\_driver and service\_instance with activation * Added Cmode driver * Fixed race condition in tempest plugin * Fixes bug with simultaneous network modification * Fixes bug with keypair creating * Update tempest plugin, make it more stable * Add exception to tempest plugin * Splits service\_instance module from generic driver * Make functions in manila uniquenamed * Fixed creation of cinder's volumes * Add share network activate and deactivate * Separate action and creation tests in tempest * Add handling of share-networks to tempest plugin * Fix sequence of called functions in devstack plugin * Update policy.json * Enforce function declaration format in bash8 * Switched devstack plugin to use generic driver * DevStack plugin: make source dirs configurable * Fixes bug with getting hostname * Fix DevStack plugin's source collection issue * Let DevStack plugin get python executable path * Removed swiftclient from dependencies * Use uuid instead of uuidutils * Update plugin for tempest * Add detail filter for share-network-list * Add function cidr\_to\_netmask to utils * Fixes bug with path to ssh keys * Fixed detail list for security-services * Removed cinder artifacts in devstack plugin * Added to devstack plugin passwords for services * Generic driver * Fix devstack plugin's usage of RECLONE option * Removes use of timeutils.set\_time\_override * Adds modules for managing network interfaces for generic driver * Extends neutron api with methods needed for generic driver * Adds nova api needed for generic driver implementation * Adds cinder api needed for generic driver implementation * Squash all migrations into one * Add network id verification on share creation * Add policy checks in share networks API * Fix policy.py * Updated from global requirements * Fix bad calls to model\_query() * Change manila DB to have working unique constraint * Change 'deleted' to Boolean in project\_user\_quotas * Fixes handling of duplicate share access rule creation * Fixes empty network\_info for share * Use actual rootwrap option in manila.conf instead deprecated one * Fix xml response for create/update security service * Add 'password' field to the security service * Adds network creation to ShareManager * Checking if access rule exists in share api * Add share's networks API * Add share's networks DB model, API and neutron support * Fix manila's devstack plugin for using Fedora/CentOS/RHEL distro * Add manila's tempest-plugin * Security service API * Add security service DB model and API * Remove redundant options in devstack plugin * Fix bug with full access to reset-state * glusterfs: Add GlusterFS driver * Fix manila's devstack plugin * Adds an ability to reset snapshot state * Adds validation of access rules * Adds admin actions extension to provide reset-state command * Refactoring driver interfaces * Move NetAppApiClient to separate module * Moved netapp.py from drivers to drivers/netapp * Insert validation of losetup duplicates * Remove redundant options for manila * Place devstack files to proper dirs * Fixes inappropriate size of metadata value * Adds 'metadata' key to list of options for xml responses * Adds an ability to manage share metadata * Added Neutron API module * Add consume\_from\_share method to HostState class * Add devstack integration * Update requirements.txt for keystoneclient * Support building wheels (PEP-427) * Update openstack/common/lockutils * Remove unused manila.compute.aggregate\_states * Remove obsolete redhat-eventlet.patch * Added per user-tenant quota support * Change wording of short description * Removing deprecated using of flags module from project * Fixed share size validation while creating from snapshot * Fixed xml response for share snapshot * Added share size checking if creating from snapshot * Fixed values passed to share\_rpcapi.create\_share * Remove d2to1 dependency * Update functionality implementation for manila api * Fixed policy check for manila api * Added XML serialization for access actions * Check policy implementation for shares api * Update README with relevant Manila information * Fix xml response content for share list/show * Add .gitreview file * Unittests failure fix * Fixed snapshot\_id None for share * Quota releasing on snapshot deleting bug fixed * Fixed absolute limits * fixed pep8 * Stubed driver do\_setup in start\_service * Quota tests fixed * removed egg-info * modified conf sample * modified docs * docs * snapshot view, size added * quotas for snapshot * fixed api error * snapshot size * fixed TYPO * Access create empty boy fix * User cannot delete snapshot fix * Can not delete share with error status fixed * response status for share with snapshot delete request - fixed * fixed null value validation for snapshot id * fixed share temaplate name * fixed share snapshots * pep8 fix * License flake8 error fixed * Fixed flake8 errors * Api share-snapshots to snapshots * Removed unused imports * Fixed api tests * Removed v2 api. Moved shares and snapshots from contrib to v1 * quotas exception fix * Quotas fix * Deleted api v2 * Quotas fixed. quotas unittests fixed * Removed ubused unittests * fixed fake flags * Removed volume specific tests * merge * Mass replace osapi\_volume to osapi\_share Removed locale * Update connfig.sample scripts * Update connfig.sample scripts * Removed unused opts from flags.py * removed some volume occurances * removed block specific exceptions * osapi\_volume to osapi\_share * removed volumes from bin scripts * Added help to smb\_config\_path conf * modified fake flags * deleted brick * fixed manila manage * api-paste.ini: osapi\_volume to osapi-share * Replaced cinder with manila * Renamed service api config opts. Set default port to 8786 * removed volumes from scheduler * deleteted .idea, added .gitignore * volume api removed * fixed keystone context * api fix * Removed backups * DB cleaned * Removed SM models and migrations * Modified models * Modified migrations * Removed block-specific from DB api * Deleted manila.volume * Renamed cinder to manila. Fixed setup.py, fixed bin scripts * Initialize from cinder * Initial commit ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/HACKING.rst0000664000175000017500000000770100000000000014661 0ustar00zuulzuul00000000000000Manila Style Commandments ========================= - Step 1: Read the OpenStack Style Commandments https://docs.openstack.org/hacking/latest/ - Step 2: Read on Manila Specific Commandments ---------------------------- - [M310] Check for improper use of logging format arguments. - [M313] Use assertTrue(...) rather than assertEqual(True, ...). - [M323] Ensure that the _() function is explicitly imported to ensure proper translations. - [M326] Translated messages cannot be concatenated. String should be included in translated message. - [M333] ``oslo_`` should be used instead of ``oslo.`` - [M336] Must use a dict comprehension instead of a dict constructor with a sequence of key-value pairs. - [M337] Ensure to not use xrange(). - [M338] Ensure to not use LOG.warn(). - [M354] Use oslo_utils.uuidutils to generate UUID instead of uuid4(). - [M359] Validate that log messages are not translated. LOG Translations ---------------- Beginning with the Pike series, OpenStack no longer supports log translation. It is not useful to add translation instructions to new code, the instructions can be removed from old code, and the hacking checks that enforced use of special translation markers for log messages have been removed. Other user-facing strings, e.g. in exception messages, should be translated using ``_()``. A common pattern is to define a single message object and use it more than once, for the log call and the exception. In that case, ``_()`` must be used because the message is going to appear in an exception that may be presented to the user. For more details about translations, see https://docs.openstack.org/oslo.i18n/latest/user/guidelines.html Creating Unit Tests ------------------- For every new feature, unit tests should be created that both test and (implicitly) document the usage of said feature. If submitting a patch for a bug that had no unit test, a new passing unit test should be added. If a submitted bug fix does have a unit test, be sure to add a new one that fails without the patch and passes with the patch. For more information on creating unit tests and utilizing the testing infrastructure in OpenStack Manila, please read manila/testing/README.rst. Running Tests ------------- The testing system is based on a combination of tox and testr. If you just want to run the whole suite, run `tox` and all will be fine. However, if you'd like to dig in a bit more, you might want to learn some things about testr itself. A basic walkthrough for OpenStack can be found at http://wiki.openstack.org/testr OpenStack Trademark ------------------- OpenStack is a registered trademark of OpenStack, LLC, and uses the following capitalization: OpenStack Commit Messages --------------- Using a common format for commit messages will help keep our git history readable. Follow these guidelines: First, provide a brief summary (it is recommended to keep the commit title under 50 chars). The first line of the commit message should provide an accurate description of the change, not just a reference to a bug or blueprint. It must be followed by a single blank line. If the change relates to a specific driver (libvirt, xenapi, qpid, etc...), begin the first line of the commit message with the driver name, lowercased, followed by a colon. Following your brief summary, provide a more detailed description of the patch, manually wrapping the text at 72 characters. This description should provide enough detail that one does not have to refer to external resources to determine its high-level functionality. Once you use 'git review', two lines will be appended to the commit message: a blank line followed by a 'Change-Id'. This is important to correlate this commit with a specific review in Gerrit, and it should not be modified. For further information on constructing high quality commit messages, and how to split up commits into a series of changes, consult the project wiki: http://wiki.openstack.org/GitCommitMessages ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/LICENSE0000664000175000017500000002363700000000000014076 0ustar00zuulzuul00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315602.2216678 manila-21.0.0/PKG-INFO0000644000175000017500000000701200000000000014151 0ustar00zuulzuul00000000000000Metadata-Version: 2.1 Name: manila Version: 21.0.0 Summary: Shared Storage for OpenStack Home-page: https://docs.openstack.org/manila/latest/ Author: OpenStack Author-email: openstack-discuss@lists.openstack.org Classifier: Environment :: OpenStack Classifier: Intended Audience :: Information Technology Classifier: Intended Audience :: System Administrators Classifier: License :: OSI Approved :: Apache Software License Classifier: Operating System :: POSIX :: Linux Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 3 :: Only Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.10 Classifier: Programming Language :: Python :: 3.11 Classifier: Programming Language :: Python :: 3.12 Requires-Python: >=3.10 License-File: LICENSE Requires-Dist: pbr>=5.5.0 Requires-Dist: alembic>=1.4.2 Requires-Dist: castellan>=3.7.0 Requires-Dist: defusedxml>=0.7.1 Requires-Dist: eventlet>=0.27.0 Requires-Dist: greenlet>=0.4.16 Requires-Dist: lxml>=4.5.2 Requires-Dist: netaddr>=0.8.0 Requires-Dist: oslo.config>=8.3.2 Requires-Dist: oslo.context>=3.1.1 Requires-Dist: oslo.db>=8.4.0 Requires-Dist: oslo.i18n>=5.0.1 Requires-Dist: oslo.log>=4.4.0 Requires-Dist: oslo.messaging>=14.1.0 Requires-Dist: oslo.middleware>=4.1.1 Requires-Dist: oslo.policy>=4.5.0 Requires-Dist: oslo.privsep>=2.4.0 Requires-Dist: oslo.reports>=2.2.0 Requires-Dist: oslo.rootwrap>=6.2.0 Requires-Dist: oslo.serialization>=4.0.1 Requires-Dist: oslo.service>=2.4.0 Requires-Dist: oslo.upgradecheck>=1.3.0 Requires-Dist: oslo.utils>=7.0.0 Requires-Dist: oslo.concurrency>=4.3.0 Requires-Dist: osprofiler>=3.4.0 Requires-Dist: paramiko>=2.7.2 Requires-Dist: Paste>=3.4.3 Requires-Dist: PasteDeploy>=2.1.0 Requires-Dist: pyparsing>=2.4.7 Requires-Dist: python-neutronclient>=6.7.0 Requires-Dist: keystoneauth1>=4.2.1 Requires-Dist: keystonemiddleware>=9.1.0 Requires-Dist: requests>=2.23.0 Requires-Dist: tenacity>=6.3.1 Requires-Dist: Routes>=2.4.1 Requires-Dist: SQLAlchemy>=1.4.0 Requires-Dist: SQLAlchemy-Utils>=0.38.3 Requires-Dist: stevedore>=3.2.2 Requires-Dist: tooz>=2.7.1 Requires-Dist: python-cinderclient>=4.0.1 Requires-Dist: python-novaclient>=17.2.1 Requires-Dist: python-glanceclient>=3.2.2 Requires-Dist: WebOb>=1.8.6 Requires-Dist: cachetools>=4.2.1 Requires-Dist: packaging>=20.9 ====== Manila ====== .. image:: https://governance.openstack.org/tc/badges/manila.svg .. Change things from this point on You have come across an OpenStack shared file system service. It has identified itself as "Manila". It was abstracted from the Cinder project. * Wiki: https://wiki.openstack.org/wiki/Manila * Developer docs: https://docs.openstack.org/manila/latest/ Getting Started --------------- If you'd like to run from the master branch, you can clone the git repo: git clone https://opendev.org/openstack/manila For developer information please see `HACKING.rst `_ You can raise bugs here https://bugs.launchpad.net/manila Python client ------------- https://opendev.org/openstack/python-manilaclient * Documentation for the project can be found at: https://docs.openstack.org/manila/latest/ * Release notes for the project can be found at: https://docs.openstack.org/releasenotes/manila/ * Source for the project: https://opendev.org/openstack/manila * Bugs: https://bugs.launchpad.net/manila * Blueprints: https://blueprints.launchpad.net/manila * Design specifications are tracked at: https://specs.openstack.org/openstack/manila-specs/ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/README.rst0000664000175000017500000000234100000000000014545 0ustar00zuulzuul00000000000000====== Manila ====== .. image:: https://governance.openstack.org/tc/badges/manila.svg .. Change things from this point on You have come across an OpenStack shared file system service. It has identified itself as "Manila". It was abstracted from the Cinder project. * Wiki: https://wiki.openstack.org/wiki/Manila * Developer docs: https://docs.openstack.org/manila/latest/ Getting Started --------------- If you'd like to run from the master branch, you can clone the git repo: git clone https://opendev.org/openstack/manila For developer information please see `HACKING.rst `_ You can raise bugs here https://bugs.launchpad.net/manila Python client ------------- https://opendev.org/openstack/python-manilaclient * Documentation for the project can be found at: https://docs.openstack.org/manila/latest/ * Release notes for the project can be found at: https://docs.openstack.org/releasenotes/manila/ * Source for the project: https://opendev.org/openstack/manila * Bugs: https://bugs.launchpad.net/manila * Blueprints: https://blueprints.launchpad.net/manila * Design specifications are tracked at: https://specs.openstack.org/openstack/manila-specs/ ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315601.6296754 manila-21.0.0/api-ref/0000775000175000017500000000000000000000000014401 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315601.7056744 manila-21.0.0/api-ref/source/0000775000175000017500000000000000000000000015701 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/availability-zones.inc0000664000175000017500000000176400000000000022212 0ustar00zuulzuul00000000000000.. -*- rst -*- Availability zones ================== Describes availability zones that the Shared File Systems service is configured with. .. important:: For API versions 2.6 and prior, replace ``availability-zones`` in the URLs with ``os-availability-zone``. List availability zones ~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v2/availability-zones Lists all availability zones. Response codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path Response parameters ------------------- .. rest_parameters:: parameters.yaml - availability_zones: availability_zones - id: availability_zone_id - name: availability_zone_name - created_at: created_at - updated_at: updated_at Response example ---------------- .. literalinclude:: samples/availability-zones-list-response.json :language: javascript ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315601.7056744 manila-21.0.0/api-ref/source/common/0000775000175000017500000000000000000000000017171 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/common/share-network-span-multiple-subnets-note.rst0000664000175000017500000000053000000000000027726 0ustar00zuulzuul00000000000000.. note:: Since API version 2.51, a share network is allowed to span multiple subnets and the fields ``neutron_net_id``, ``neutron_subnet_id``, ``network_type``, ``cidr``, ``ip_version``, ``gateway``, ``segmentation_id`` and ``mtu`` were moved from the share network to the subnet. The response will look like the below example. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/conf.py0000664000175000017500000002327100000000000017205 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # manila documentation build configuration file, created by # sphinx-quickstart on Sat May 7 13:35:27 2016. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import os import sys html_theme = 'openstackdocs' html_theme_options = { "sidebar_mode": "toc", } extensions = [ 'os_api_ref', 'openstackdocstheme', ] # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. sys.path.insert(0, os.path.abspath('../../')) sys.path.insert(0, os.path.abspath('../')) sys.path.insert(0, os.path.abspath('./')) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. # Add any paths that contain templates here, relative to this directory. # templates_path = ['_templates'] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # source_suffix = ['.rst', '.md'] source_suffix = '.rst' # The encoding of source files. # source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. copyright = u'2010-present, OpenStack Foundation' # openstackdocstheme options openstackdocs_repo_name = 'openstack/manila' openstackdocs_bug_project = 'manila' openstackdocs_bug_tag = 'api-ref' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. # language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: # today = '' # Else, today_fmt is used as the format for a strftime call. # today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This patterns also effect to html_static_path and html_extra_path exclude_patterns = [] # The reST default role (used for this markup: `text`) to use for all # documents. # default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. # add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). add_module_names = False # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'native' # A list of ignored prefixes for module index sorting. # modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. # keep_warnings = False # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # html_theme = 'alabaster' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. # html_theme_path = [] # The name for this set of Sphinx documents. # " v documentation" by default. # html_title = u'Shared File Systems API Reference v2' # A shorter title for the navigation bar. Default is the same as html_title. # html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. # html_logo = None # The name of an image file (relative to this directory) to use as a favicon of # the docs. This file should be a Windows icon file (.ico) being 16x16 or # 32x32 pixels large. # html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". # html_static_path = ['_static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. # html_extra_path = [] # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. # html_use_smartypants = True # Custom sidebar templates, maps document names to template names. # html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. # html_additional_pages = {} # If false, no module index is generated. # html_domain_indices = True # If false, no index is generated. # html_use_index = True # If true, the index is split into individual pages for each letter. # html_split_index = False # If true, links to the reST sources are added to the pages. # html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. # html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. # html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. # html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). # html_file_suffix = None # Language to be used for generating the HTML full-text search index. # Sphinx supports the following languages: # 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' # 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh' # html_search_language = 'en' # A dictionary with options for the search language support, empty by default. # 'ja' uses this config value. # 'zh' user can custom change `jieba` dictionary path. # html_search_options = {'type': 'default'} # The name of a javascript file (relative to the configuration directory) that # implements a search results scorer. If empty, the default will be used. # html_search_scorer = 'scorer.js' # Output file base name for HTML help builder. htmlhelp_basename = 'maniladoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. # 'preamble': '', # Latex figure (float) alignment # 'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ (master_doc, 'manila.tex', u'OpenStack Shared File Systems API Documentation', u'OpenStack Foundation', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. # latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. # latex_use_parts = False # If true, show page references after internal links. # latex_show_pagerefs = False # If true, show URL addresses after external links. # latex_show_urls = False # Documents to append as an appendix to all manuals. # latex_appendices = [] # If false, no module index is generated. # latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ (master_doc, 'manila', u'OpenStack Shared File Systems API Documentation', u'Openstack Foundation', 1) ] # If true, show URL addresses after external links. # man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ (master_doc, 'Manila', u'OpenStack Shared File Systems API Documentation', u'OpenStack Foundation', 'Manila', 'OpenStack Shared File Systems', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. # texinfo_appendices = [] # If false, no module index is generated. # texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. # texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. # texinfo_no_detailmenu = False ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/experimental.inc0000664000175000017500000000073400000000000021075 0ustar00zuulzuul00000000000000.. -*- rst -*- ================= Experimental APIs ================= .. important:: The following APIs are part of the `experimental feature `_ introduced in version 2.4. The APIs may change or be removed in future versions of the Shared File Systems API. All experimental APIs require the ``X-OpenStack-Manila-API-Experimental: True`` header to be sent in the requests. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/extensions.inc0000664000175000017500000000143100000000000020572 0ustar00zuulzuul00000000000000.. -*- rst -*- API extensions ============== Lists available Shared File Systems API extensions. List extensions ~~~~~~~~~~~~~~~ .. rest_method:: GET /v2/extensions Lists all extensions. Response codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path Response parameters ------------------- .. rest_parameters:: parameters.yaml - name: extension_name - links: extension_links - description: extension_description - alias: extension_alias - updated: updated_at_extensions Response example ---------------- .. literalinclude:: samples/extensions-list-response.json :language: javascript ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/index.rst0000664000175000017500000000426500000000000017551 0ustar00zuulzuul00000000000000:tocdepth: 3 ======================= Shared File Systems API ======================= This is a reference for version 2 of the OpenStack Shared File Systems API which is provided by the Manila project. Manila provides a RESTful HTTP service through which it offers on-demand, scalable, self-service access to shared file system storage resources. .. important:: Prior to the Wallaby release, Shared File System service required the caller to specify their "project_id" in the API URLs. This requirement has been dropped. The API service now behaves the same way whether or not "project_id" is included in the URLs. If your cloud does not yet support version 2.60, all the resource URLs below will require a project ID. For example: GET /v2/{project_id}/shares .. rest_expand_all:: .. include:: versions.inc .. include:: extensions.inc .. include:: limits.inc .. include:: shares.inc .. include:: share-export-locations.inc .. include:: share-metadata.inc .. include:: share-actions.inc .. include:: snapshots.inc .. include:: snapshot-metadata.inc .. include:: snapshot-instances.inc .. include:: share-replicas.inc .. include:: share-replica-export-locations.inc .. include:: share-networks.inc .. include:: share-network-subnets.inc .. include:: share-network-subnets-metadata.inc .. include:: security-services.inc .. include:: share-servers.inc .. include:: share-instances.inc .. include:: share-instance-export-locations.inc .. include:: share-types.inc .. include:: scheduler-stats.inc .. include:: services.inc .. include:: availability-zones.inc .. include:: os-share-manage.inc .. include:: quota-sets.inc .. include:: quota-classes.inc .. include:: user-messages.inc .. include:: share-access-rules.inc .. include:: share-access-rule-metadata.inc .. include:: share-groups.inc .. include:: share-group-types.inc .. include:: share-group-snapshots.inc .. include:: share-transfers.inc .. include:: resource-locks.inc ====================================== Shared File Systems API (EXPERIMENTAL) ====================================== .. rest_expand_all:: .. include:: experimental.inc .. include:: share-migration.inc .. include:: share-server-migration.inc .. include:: share-backups.inc ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/limits.inc0000664000175000017500000000475600000000000017711 0ustar00zuulzuul00000000000000.. -*- rst -*- Limits ====== Limits are the resource limitations that are allowed for each tenant (project). An administrator can configure limits in the ``manila.conf`` file. Users can query their rate and absolute limits. The absolute limits contain information about: - Total maximum share memory, in GiBs. - Number of share-networks. - Number of share-snapshots. - Number of shares. - Shares and total used memory, in GiBs. - Snapshots and total used memory, in GiBs. - Number of share replicas (since API version 2.53). - Share replicas and total used memory, in GiBs (since API version 2.53). Rate limits control the frequency at which users can issue specific API requests. Administrators use rate limiting to configure limits on the type and number of API calls that can be made in a specific time interval. For example, a rate limit can control the number of GET requests that can be processed during a one-minute period. List share limits ~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v2/limits Lists share limits. Response codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path Response parameters ------------------- .. rest_parameters:: parameters.yaml - maxTotalShareGigabytes: maxTotalShareGigabytes - maxTotalSnapshotGigabytes: maxTotalSnapshotGigabytes - maxTotalShares: maxTotalShares - maxTotalShareSnapshots: maxTotalShareSnapshots - maxTotalShareNetworks: maxTotalShareNetworks - maxTotalShareReplicas: maxTotalShareReplicas - maxTotalReplicaGigabytes: maxTotalReplicaGigabytes - maxTotalShareBackups: maxTotalShareBackups - maxTotalBackupGigabytes: maxTotalBackupGigabytes - totalSharesUsed: totalSharesUsed - totalShareSnapshotsUsed: totalShareSnapshotsUsed - totalShareNetworksUsed: totalShareNetworksUsed - totalShareGigabytesUsed: totalShareGigabytesUsed - totalSnapshotGigabytesUsed: totalSnapshotGigabytesUsed - totalShareReplicasUsed: totalShareReplicasUsed - totalReplicaGigabytesUsed: totalReplicaGigabytesUsed - totalShareBackupsUsed: totalShareBackupsUsed - totalBackupGigabytesUsed: totalBackupGigabytesUsed - uri: uri - regex: regex - value: value - verb: verb - remaining: remaining - unit: unit - next-available: next-available Response example ---------------- .. literalinclude:: samples/limits-response.json :language: javascript ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/os-share-manage.inc0000664000175000017500000000710600000000000021347 0ustar00zuulzuul00000000000000.. -*- rst -*- Manage and unmanage shares (DEPRECATED) ======================================= Allows bringing shared file systems under service management. Manage share (DEPRECATED) ~~~~~~~~~~~~~~~~~~~~~~~~~ .. warning:: This API is deprecated starting with microversion 2.7 and requests to this API will fail with a 404 starting from microversion 2.7. Use `Share Manage API <#manage-share-since-api-v2-7>`_ instead of this API from version 2.7. .. rest_method:: POST /v2/os-share-manage Use this API to bring a share under the management of the Shared File Systems service. In the service, the share will be represented as a resource in the database. It can have a user defined name and description. Administrator only. Use the ``policy.yaml`` file to grant permissions for this action to other roles. Response codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 - 409 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - share: share - protocol: protocol - name: name_request - display_name: display_name_request - share_type: share_type_request - driver_options: driver_options - export_path: export_path - service_host: service_host - description: description_request - display_description: display_description_request Request example --------------- .. literalinclude:: samples/share-manage-request.json :language: javascript Response parameters ------------------- .. rest_parameters:: parameters.yaml - share: share - links: links - availability_zone: availability_zone - share_network_id: share_network_id - export_locations: export_locations - share_server_id: share_server_id - snapshot_id: snapshot_id_shares_response - id: share_id_response - size: size_response - share_type: share_type_shares_response - share_type_name: share_type_name - export_location: export_location - project_id: project_id - metadata: metadata - status: share_status_response - description: description - host: host_resource_response - is_public: is_public_shares_response - snapshot_support: snapshot_support - name: name - created_at: created_at - share_proto: share_proto Response example ---------------- .. literalinclude:: samples/share-manage-response.json :language: javascript Unmanage share (DEPRECATED) ~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. warning:: This API is deprecated starting with microversion 2.7 and requests to this API will fail with a 404 starting from microversion 2.7. Use `Share Unmanage API <#unmanage-share-since-api-v2-7>`_ instead of this API from version 2.7. .. rest_method:: POST /v2/os-share-unmanage/{share_id}/unmanage Use this API to remove a share from the management of the Shared File Systems service without deleting the share. Administrator only. Use the ``policy.yaml`` file to grant permissions for this action to other roles. Preconditions: - This API does not support unmanaging shares that are created on top of share servers (i.e. created with share networks). - You should remove any snapshots and share replicas before attempting to unmanage a share. Response codes -------------- .. rest_status_code:: success status.yaml - 202 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 - 409 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - share_id: share_id Response parameters ------------------- There is no body content for the response. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/parameters.yaml0000664000175000017500000035470700000000000020750 0ustar00zuulzuul00000000000000# variables in header #{} service_token_locks: description: | An auth-token specified via the header ``X-Service-Token``. With the OpenStack Identity (Keystone) context, this token can be obtained by a user that has the ``service`` role. The presence of this header is used by resource lock API methods to set or match the lock user's context. A resource lock created by a service user cannot be manipulated by non-service users. in: header required: false type: string # variables in path access_id_path: description: | The UUID of the access rule to which access is granted. in: path required: true type: string api_version: in: path required: true type: string description: > The API version as returned in the links from the ``GET /`` call. backup_id_request_path: description: | The UUID of the share backup. in: path required: true type: string export_location_id_path: description: | The UUID of the export location. in: path required: true type: string extra_spec_key_path: description: | The extra specification key in: path required: true type: string group_snapshot_id_path: description: | The group snapshot ID. in: path required: true type: string message_id: description: | The UUID of the message. in: path required: false type: string metadata_key_path: description: | The key of a metadata item. For example, if the metadata on an existing share or access rule is as follows: ``"project": "my_test", "aim": "testing"``, the keys are "project" and "aim". in: path required: false type: string project_id_path: description: | The project ID of the user or service making the API request. This parameter is optional if the service supports API version 2.60. If the service doesn't yet support API version 2.60, ensure that the service catalog endpoint obtained for the service has the user's project_id after the "/v2/" component, for example, the API to retrieve shares is *GET /v2/{project_id}/shares*. If the service doesn't yet support API version 2.60, and the project_id is ommitted from the API URL, a Malformed Request error is returned (HTTP 400). in: path required: false type: string project_id_quota_request_path: description: | The ID of the project whose quotas must be acted upon by the API. This is optional, and if it is not specified, the project ID is derived from the caller's API token. System/Domain scoped users interacting with this API *must* specify the project ID for the project whose quotas they need to query or manipulate. Note that this ID can be different from the project ID that precedes the resource name "quota-sets". For example, in a multi-tenant cloud, the first ID in the URL is typically the project ID of a privileged user (such as a cloud administrator) that can create, query or delete quotas of other projects in the cloud. If a server supports API version 2.60, URLs no longer need the privileged user's project ID prior to the resource name. in: path required: false type: string quota_class_name: description: The name of the quota class for which to set quotas. in: path required: true type: string resource_lock_id_path: description: | The UUID of the resource lock. in: path required: true type: string security_service_id_path: description: | The UUID of the security service. in: path required: true type: string share_group_id_path: description: | The UUID of the share group. in: path required: true type: string share_group_type_id_path: description: | The UUID of the share group type. in: path required: true type: string share_id: description: | The UUID of the share. in: path required: true type: string share_instance_id: description: | The UUID of the share instance. in: path required: true type: string share_network_id_path: description: | The UUID of the share network. in: path required: true type: string share_network_subnet_id_path: description: | The UUID of the share network subnet. in: path required: true type: string share_replica_id_path: description: | The UUID of the share replica. in: path required: true type: string share_type_for_quota: description: | The name or UUID of the share type. If you specify this parameter in the URI, you show, update, or delete quotas for this share type. This parameter is mutually exclusive with the "user_id" query parameter. in: path required: false type: string min_version: 2.39 share_type_id: description: | The UUID of the share type. in: path required: true type: string snapshot_id_path: description: | The UUID of the snapshot. in: path required: true type: string snapshot_instance_id_path: description: | The UUID of the share snapshot instance. in: path required: true type: string transfer_id: description: | The unique identifier for a transfer. in: path required: true type: string # variables in query action_id: in: query required: false type: string description: > The ID of the action during which the message was created. all_tenants_query: description: | (Admin only). Defines whether to list the requested resources for all projects. Set to ``1`` to list resources for all projects. Set to ``0`` to list resources only for the current project. Examples of resources include shares, snapshots, share networks, security services and share groups. in: query required: false type: boolean backend_capabilities_query: description: | The capabilities for the storage back end. in: query required: false type: string backend_host_query: description: | The host name for the back end. in: query required: false type: string backend_pool_query: description: | The pool name for the back end. in: query required: false type: string backend_query: description: | The name of the back end. in: query required: false type: string backup_host_query: description: | The host name of the backup to query with. Querying by hostname is a privileged operation. If restricted by API policy, this query parameter may be silently ignored by the server. in: query required: false type: string backup_share_id_query: description: | The UUID of the share that the backup pertains to. in: query required: false type: string backup_status_query: description: | Filters by a backup status. A valid filter value can be one of 'creating', 'error', 'available', 'restoring'. in: query required: false type: string backup_topic_query: description: | Filters by a backup topic. A valid filter value can be one of 'manila-data', 'manila-share'. in: query required: false type: string cidr_query: description: | The CIDR to filter share networks. in: query required: false type: string created_before: description: | The date and time stamp when the query operation, only return user messages before it. The date and time stamp format is `ISO 8601 `_: :: CCYY-MM-DDThh:mm:ss±hh:mm The ``±hh:mm`` value, if included, returns the time zone as an offset from UTC. For example, ``2021-11-10T09:49:58+08:00``. in: query required: false type: string min_version: 2.52 created_before_query: description: | Search for the list of resources that were created prior to the specified date. The date is in 'yyyy-mm-dd' format. in: query required: false type: string created_since: description: | The date and time stamp when the query operation, only return user messages since it. The date and time stamp format is `ISO 8601 `_: :: CCYY-MM-DDThh:mm:ss±hh:mm The ``±hh:mm`` value, if included, returns the time zone as an offset from UTC. For example, ``2021-11-10T09:49:58+08:00``. in: query required: false type: string min_version: 2.52 created_since_query: description: | Search for the list of resources that were created after the specified date. The date is in 'yyyy-mm-dd' format. in: query required: false type: string description_inexact_query: description: | The description pattern that can be used to filter shares, share snapshots, share networks or share groups. in: query required: false type: string min_version: 2.36 description_inexact_query_versionless: description: | The description pattern that can be used to filter share backups. in: query required: false type: string description_query: description: | The user defined description text that can be used to filter resources. in: query required: false type: string detail_id: in: query required: false type: string description: > The ID of the message detail. encryption_key_ref_query: description: | The encryption key ref that can be used to filter shares or share instances. in: query required: false type: string min_version: 2.90 export_location_id_query: description: | The export location UUID that can be used to filter shares or share instances. in: query required: false type: string min_version: 2.35 export_location_path_query: description: | The export location path that can be used to filter shares or share instances. in: query required: false type: string min_version: 2.35 extra_specs_query: description: | The extra specifications as a set of one or more key-value pairs. In each pair, the key is the name of the extra specification and the value is the share type that was used to filter search share type list. The query must be a “percent-encoded” string, for example, the following query parameters: {'extra-specs': {'snapshot_support': 'true', 'availability_zones': 'az1'}} is encoded as 'extra_specs=%7B%27snapshot_support%27%3A+%27true%27%2C+%27availability_zones%27%3A+%27az1%27%7D' in: query required: false type: string min_version: 2.43 group_snapshot_status_query: description: | Filters by a share group snapshot status. A valid value is ``creating``, ``error``, ``available``, ``deleting``, ``error_deleting``. in: query required: false type: string group_specs_query: description: | The group specifications as a set of one or more key-value pairs. In each pair, the key is the name of the group specification and the value is the share group type that was used to filter search share group type list. The query must be a “percent-encoded” string, for example, the following query parameters: {'group-specs': {'consistent_snapshot_support': 'true'}} is encoded as 'group_specs=%7B%27consistent_snapshot_support%27%3A+%27True%27%7D' in: query required: false type: string min_version: 2.66 host_query: description: | The host name of the resource to query with. Querying by hostname is a privileged operation. If restricted by API policy, this query parameter may be silently ignored by the server. in: query required: false type: string ip_version_query: description: | The IP version to filter share networks. in: query required: false type: string is_public_query: description: | A boolean query parameter that, when set to true, allows retrieving public resources that belong to all projects. in: query required: false type: boolean is_soft_deleted_query: description: | A boolean query parameter that, when set to True, will return all shares in recycle bin. Default is False, will return all shares not in recycle bin. in: query required: false type: boolean min_version: 2.69 limit: description: | The maximum number of resource records to return. in: query required: false type: integer limit_query: description: | The maximum number of share groups members to return. in: query required: false type: integer message_level: in: query required: false type: string description: > The message level. metadata_query: in: query required: false type: object description: | One or more metadata key and value pairs as a url encoded dictionary of strings. name_inexact_query: description: | The name pattern that can be used to filter shares, share snapshots, share networks, transfers or share groups. in: query required: false type: string min_version: 2.36 name_inexact_query_versionless: description: | The name pattern that can be used to filter share backups. in: query required: false type: string name_query: description: | The user defined name of the resource to filter resources by. in: query required: false type: string network_type_query: description: | The network type to filter out share networks. in: query required: false type: string neutron_net_id_query: description: | The neutron network id to filter out share networks. in: query required: false type: string neutron_subnet_id_query: description: | The neutron network subnet id to filter out share networks. in: query required: false type: string nova_net_id_query: description: | The ID of a Nova network to filter out share networks. in: query required: false type: string max_version: 2.26 offset: description: | The offset to define start point of resource listing. in: query required: false type: integer project_id_messages: description: | The ID of the project for which the message was created. in: query required: false type: string project_id_query: description: | The ID of the project that owns the resource. This query parameter is useful in conjunction with the ``all_tenants`` parameter. in: query required: false type: string request_id: description: | The ID of the request during which the message was created. in: query required: false type: string resource_id: description: | The UUID of the resource for which the message was created. in: query required: false type: string resource_lock_all_projects_query: description: | Set this parameter to True to get resource locks across all project namespaces. in: query required: false type: string resource_lock_id_query: description: | The ID of the resource lock to filter resource locks by. in: query required: false type: string resource_lock_lock_context_query: description: | The lock creator's context to filter locks by. in: query required: false type: string resource_lock_lock_reason_inexact_query: description: | The lock reason pattern that can be used to filter resource locks. in: query required: false type: string resource_lock_lock_reason_query: description: | The lock reason that can be used to filter resource locks. in: query required: false type: string resource_lock_project_id_query: description: | The ID of a project to filter resource locks by. in: query required: false type: string resource_lock_resource_action_query: description: | The ``action`` prevented by the filtered resource locks. in: query required: false type: string resource_lock_resource_id_query: description: | The ID of the resource that the locks pertain to to filter resource locks by. in: query required: false type: string resource_lock_resource_type_query: description: | The type of the resource that the locks pertain to to filter resource locks by. in: query required: false type: string resource_lock_user_id_query: description: | The ID of a user to filter resource locks by. in: query required: false type: string resource_type: description: | The type of the resource for which the message was created. in: query required: false type: string resource_type_query: description: | The type of the resource for which the transfer was created. in: query required: false type: string security_service_query: description: | The security service ID to filter out share networks. in: query required: false type: string segmentation_id_query: description: | The segmentation id to filter out share networks. in: query required: false type: string service_binary_query: description: | The service binary name. Default is the base name of the executable. in: query required: false type: string service_host_query: description: | The service host name. in: query required: false type: string service_state_query: description: | The current state of the service. A valid value is ``up`` or ``down``. in: query required: false type: string service_status_query: description: | The service status, which is ``enabled`` or ``disabled``. in: query required: false type: string service_zone_query: description: | The availability zone. in: query required: false type: string share_group_id_query: description: | The UUID of a share group to filter resource. in: query required: false type: string min_version: 2.31 share_group_status_query: description: | Filters by a share group status. A valid value is ``creating``, ``error``, ``available``, ``deleting``, ``error_deleting``. in: query required: false type: string share_group_type_id_query: description: | The share group type ID to filter share groups. in: query required: false type: string share_id_access_rules_query: description: | The share ID to filter share access rules with. in: query required: true type: string share_id_replicas_query: description: | The share ID to filter share replicas with. in: query required: false type: string share_network_id_query: description: | The UUID of the share network to filter resources by. in: query required: false type: string share_server_id_query: description: | The UUID of the share server. in: query required: false type: string share_type_id_query: description: | The UUID of a share type to query resources by. in: query required: false type: string share_type_query: description: | The share type name or UUID. Allows filtering back end pools based on the extra-specs in the share type. in: query required: false type: string min_version: 2.23 share_types_query: description: | A list of one or more share type IDs. Allows filtering share groups. in: query required: false type: array snapshot_id_query: description: | The UUID of the share's base snapshot to filter the request based on. in: query required: false type: string sort_dir: description: | The direction to sort a list of resources. A valid value is ``asc``, or ``desc``. in: query required: false type: string sort_key: description: | The key to sort a list of shares. A valid value is ``id``, ``status``, ``size``, ``host``, ``share_proto``, ``export_location``, ``availability_zone``, ``user_id``, ``project_id``, ``created_at``, ``updated_at``, ``display_name``, ``name``, ``share_type_id``, ``share_type``, ``share_network_id``, ``share_network``, ``snapshot_id``, or ``snapshot``. in: query required: false type: string sort_key_backup: description: | The key to sort a list of share backups. A valid value is ``id``, ``status``, ``size``, ``host``, ``share_id`` ``availability_zone``, ``created_at``, ``updated_at``, ``display_name``, ``topic``, ``progress`` and ``restore_progress`` in: query required: false type: string sort_key_messages: description: | The key to sort a list of messages. A valid value is ``id``, ``project_id``, ``request_id``, ``resource_type``, ``action_id``, ``detail_id``, ``resource_id``, ``message_level``, ``expires_at``, ``created_at``. in: query required: false type: string sort_key_resource_locks: description: | The key to sort a list of resource locks. A valid value is ``id``, ``resource_id``, ``resource_type``, ``resource_action``, ``user_id``, ``project_id``, ``created_at``, ``updated_act``, ``lock_context``. in: query required: false type: string sort_key_transfer: description: | The key to sort a list of transfers. A valid value is ``id``, ``name``, ``resource_type``, ``resource_id``, ``source_project_id``, ``destination_project_id``, ``created_at``, ``expires_at``. in: query required: false type: string source_backup_id_query: description: | The UUID of the share's backup to filter the request based on. in: query required: false type: string source_share_group_snapshot_id_query: description: | The source share group snapshot ID to list the share group. in: query required: false type: string min_version: 2.31 status_query: description: | Filters by a share status. For valid statuses, see the `section above <#shares>`_. in: query required: false type: string user_id_query: description: | The ID of the user. If you specify this query parameter, you retrieve or update the quotas for this user in the project. If you omit this parameter, you query or update the quotas for the whole project. This parameter is mutually exclusive with the "share_type" parameter. in: query required: false type: string with_count_query: description: | Whether to show ``count`` in share list API response or not, default is ``False``. This query parameter is useful with pagination. in: query required: false type: boolean min_version: 2.42 with_count_query_without_min_version: description: | Whether to show ``count`` in API response or not, default is ``False``. This query parameter is useful with pagination. in: query required: false type: boolean with_count_snapshot_query: description: | Whether to show ``count`` in share snapshot list API response or not, default is ``False``. This query parameter is useful with pagination. in: query required: false type: boolean min_version: 2.79 # variables in body accepted: description: | Whether the transfer has been accepted. in: body required: true type: boolean access: description: | The ``access`` object. in: body required: true type: object access_id: description: | The UUID of the access rule to which access is granted. in: body required: true type: string access_key: description: | The access credential of the entity granted share access. in: body required: true type: string min_version: 2.21 access_key_share_access_rules: description: | The access credential of the entity granted share access. in: body required: true type: string access_level: description: | The access level to the share. To grant or deny access to a share, you specify one of the following share access levels: - ``rw``. Read and write (RW) access. - ``ro``. Read-only (RO) access. in: body required: true type: string access_list: description: | The object of the access rule. To list access rules, set this value to ``null``. in: body required: true type: string access_metadata: description: | One or more access rule metadata key and value pairs as a dictionary of strings. in: body required: true type: object access_metadata_grant_access: description: | One or more access rule metadata key and value pairs as a dictionary of strings. in: body required: true type: object min_version: 2.45 access_rule_id: description: | The access rule ID. in: body required: true type: string access_rules_status: description: | The share instance access rules status. A valid value is ``active``, ``error``, or ``syncing``. In versions prior to 2.28, ``syncing`` was represented with status ``out_of_sync``. in: body required: true type: string min_version: 2.10 access_share_id: description: | The UUID of the share to which you are granted or denied access. in: body required: true type: string access_to: description: | The value that defines the access. The back end grants or denies the access to it. A valid value is one of these values: - ``ip``: Authenticates a client through its IP address, that can be IPv4 or IPv6. You may specify a single client IP address or a range of IP addresses in CIDR notation. For example ``0.0.0.0/0`` for IPv4 or ``::/0`` for IPv6. - ``cert``: Authenticates an instance through a TLS certificate. Specify the TLS identity as the IDENTKEY. A valid value is any string up to 64 characters long in the common name (CN) of the certificate. The meaning of a string depends on its interpretation. - ``user``: Authenticates by a user or group name. A valid value is an alphanumeric string that can contain some special characters and is from 4 to 32 characters long. in: body required: true type: string access_type: in: body required: true type: string description: | The access rule type. A valid value for the share access rule type is one of the following values: - ``ip``: Authenticates a client through its IP address, that can be IPv4 or IPv6. You may specify a single client IP address or a range of IP addresses in CIDR notation. For example ``0.0.0.0/0`` for IPv4 or ``::/0`` for IPv6. - ``cert``: Authenticates a client through a TLS certificate. Specify the TLS identity as the IDENTKEY. A valid value is any string up to 64 characters long in the common name (CN) of the certificate. The meaning of a string depends on its interpretation. - ``user``: Authenticates by a user or group name. A valid value is an alphanumeric string that can contain some special characters and is from 4 to 32 characters long. action_id_body: in: body required: true type: string description: > The ID of the action during which the message was created. add_project_access: description: | An object representing the project resource that access should be granted to. in: body required: true type: object allow_access: description: | The object of grant access. in: body required: true type: object auth_key: description: | The authentication key for the transfer. in: body required: true type: string availability_zone: description: | The name of the availability zone the share exists within. in: body required: true type: string availability_zone_id: description: | The availability zone ID the resource exists within. in: body required: true type: string availability_zone_id_share_group: description: | The availability zone ID that the share group exists within. in: body required: true type: string min_version: 2.34 availability_zone_name: description: | The name of the availability zone. in: body required: true type: string availability_zone_request: description: | The UUID or name of an availability zone for resource to be created within. in: body required: false type: string availability_zones: description: | Top level response body element. in: body required: true type: string backend: description: | The name of the back end. in: body required: true type: string backend_details: description: | The back-end details for a server. Each back end can store any key-value information that it requires. For example, the generic back-end driver might store the router ID. in: body required: true type: object backend_host: description: | The host name for the back end. in: body required: true type: string backend_name: description: | The name of the back end in this format: ``host@backend#POOL``: - ``host``. The host name for the back end. - ``backend``. The name of the back end. - ``POOL``. The pool name for the back end. in: body required: true type: string backup_az: description: | The availability zone. in: body required: true type: string backup_id_response: description: | The UUID of the share backup. in: body required: true type: string backup_options_request: description: | One or more backup options key and value pairs as a url encoded dictionary of strings. in: body required: false type: object backup_progress: description: | The progress of the backup creation in percentange. in: body required: true type: string backup_restore_progress: description: | The progress of the backup restoration in percentage. in: body required: true type: string backup_share_id: description: | The UUID of the share that the backup pertains to. in: body required: true type: string backup_size: description: | The share backup size, in GiBs. in: body required: true type: integer backup_status: description: | The status of backup which can be one of ``creating``, ``error``, ``available``, ``restoring``. in: body required: true type: string backup_status_request: description: | The backup status, which can be ``available``, ``error``, ``creating``, ``deleting``, ``restoring``. in: body required: false type: string capabilities: description: | The back end capabilities which include ``qos``, ``total_capacity_gb``, etc. in: body required: true type: object capability_driver_handles_share_servers: description: | Share server is usually a storage virtual machine or a lightweight container that is used to export shared file systems. Storage backends may be able to work with configured share servers or allow the share driver to create and manage the lifecycle of share servers. This capability specifies whether the pool's associated share driver is responsible to create and manage the lifecycle of share servers. If ``false``, the administrator of the shared file systems service has configured the share server as necessary for the given back end. in: body required: true type: boolean capability_driver_version: description: | The driver version of the back end. in: body required: true type: string capability_free_capacity_gb: description: | The amount of free capacity for the back end, in GiBs. A valid value is a string, such as ``unknown``, or an integer. in: body required: true type: string capability_qos: description: | The quality of service (QoS) support. in: body required: true type: boolean capability_replication_domain: description: | The back end replication domain. in: body required: true type: string capability_reserved_percentage: description: | The percentage of the total capacity that is reserved for the internal use by the back end. in: body required: true type: integer capability_server_pools_mapping: description: | The mapping between servers and pools. in: body required: true type: object capability_share_backend_name: description: | The name of the share back end. in: body required: true type: string capability_snapshot_support: description: | The specification that filters back ends by whether they do or do not support share snapshots. in: body required: true type: boolean capability_storage_protocol: description: | The storage protocol for the back end. For example, ``NFS_CIFS``, ``glusterfs``, ``HDFS``, etc. in: body required: true type: string capability_total_capacity_gb: description: | The total capacity for the back end, in GiBs. A valid value is a string, such as ``unknown``, or an integer. in: body required: true type: string capability_vendor_name: description: | The name of the vendor for the back end. in: body required: true type: string cidr: description: | The IP block from which to allocate the network, in CIDR notation. For example, ``172.16.0.0/24`` or ``2001:DB8::/64``. This parameter is automatically set to a value determined by the network provider. in: body required: true type: string max_version: 2.50 clear_access_rules: description: | Whether clear all access rules when accept share. in: body required: false type: boolean compatible: description: | Whether the destination backend can or can't handle the share server migration considering the received entries for ``share_network_id``, ``host``, ``nondisruptive``, ``writable`` and ``preserve_snapshots`` matched with the driver supported capabilities. in: body required: true type: boolean consistent_snapshot_support: description: | The consistency snapshot support. in: body required: true type: string min_version: 2.34 count: description: | The total count of requested resource before pagination is applied. This parameter is only present in the API response if "with_count=True" is supplied in the query. in: body required: false type: integer min_version: 2.42 count_without_min_version: description: | The total count of requested resource before pagination is applied. This parameter is only present in the API response if "with_count=True" is supplied in the query. in: body required: false type: integer create_share_from_snapshot_support: description: | Boolean extra spec used for filtering of back ends by their capability to create shares from snapshots. in: body required: false type: boolean min_version: 2.24 create_share_from_snapshot_support_body: description: | Boolean extra spec used for filtering of back ends by their capability to create shares from snapshots. in: body required: false type: boolean create_share_from_snapshot_support_share_capability: description: | Whether or not this share supports snapshots that can be cloned into new shares. Inconsequential if the share doesn't support snapshots. (see capability "snapshot_support") in: body required: true type: boolean min_version: 2.24 created_at: description: | The date and time stamp when the resource was created within the service's database. The date and time stamp format is `ISO 8601 `_: :: CCYY-MM-DDThh:mm:ss±hh:mm The ``±hh:mm`` value, if included, returns the time zone as an offset from UTC. For example, ``2019-03-27T09:49:58-05:00``. in: body required: true type: string current_share_network_security_service_id: description: | The ID of a security service that is currently attached to a share network. in: body required: true type: string deny_access: description: | The ``deny_access`` object. in: body required: true type: object description: description: | The user defined description of the resource. in: body required: true type: string description_request: description: | The user defined description of the resource. The value of this field is limited to 255 characters. in: body required: false type: string destination_project_id: description: | UUID of the destination project to accept transfer resource. in: body required: true type: string destination_share_server_id: description: | UUID of the share server that was created in the destination backend during a share server migration operation. in: body required: true type: string detail_id_body: in: body required: true type: string description: > The ID of the message detail. display_description_request: description: | The user defined description of the resource. This field sets the ``description`` parameter. in: body required: false type: string display_name_request: description: | The user defined name of the resource. This field sets the ``name`` parameter. in: body required: false type: string driver_handles_share_servers: description: | An extra specification that defines the driver mode for share server, or storage, life cycle management. The Shared File Systems service creates a share server for the export of shares. This value is ``true`` when the share driver manages, or handles, the share server life cycle. This value is ``false`` when an administrator rather than a share driver manages the storage life cycle. in: body required: true type: boolean driver_options: description: | A set of one or more key and value pairs, as a dictionary of strings, that describe driver options. Details for driver options should be taken from `appropriate share driver documentation `_. in: body required: false type: object encryption_key_ref_request: description: | The encryption key ref is valid barbican secret UUID that will be used to get encryption key by the storage drivers. in: body required: false type: object min_version: 2.90 encryption_key_ref_response: description: | The encryption key ref is valid barbican secret UUID that will be used to get encryption key by the storage drivers. in: body required: true type: object export_location: description: | The export location. For newer API versions it is available in separate APIs. See sections `Share export locations <#share-share-export-locations>`_ and `Share instance export locations <#share-share-instance-export-locations>`_. in: body required: true type: string max_version: 2.8 export_location_availability_zone: description: | The name of the availability zone that the export location belongs to. in: body required: true type: string export_location_id: description: | The share export location UUID. in: body required: true type: string export_location_is_admin_only: description: | Defines purpose of an export location. If set to ``true``, then it is expected to be used for service needs and by administrators only. If it is set to ``false``, then this export location can be used by end users. This parameter is only available to users with an "administrator" role, and cannot be controlled via policy .json. in: body required: true type: boolean export_location_path: description: | The export location path that should be used for mount operation. in: body required: true type: string export_location_preferred: description: | Drivers may use this field to identify which export locations are most efficient and should be used preferentially by clients. By default it is set to ``false`` value. in: body required: true type: boolean min_version: 2.14 export_location_preferred_replicas: description: | Drivers may use this field to identify which export locations are most efficient and should be used preferentially by clients. By default it is set to ``false`` value. in: body required: true type: boolean export_location_share_instance_id: description: | The UUID of the share instance that this export location belongs to. This parameter is only available to users with an "administrator" role, and cannot be controlled via policy.yaml. in: body required: true type: string export_locations: description: | A list of export locations. For example, when a share server has more than one network interface, it can have multiple export locations. For newer API versions it is available in separate APIs. See sections `Share export locations <#share-share-export-locations>`_ and `Share instance export locations <#share-share-instance- export-locations>`_. in: body required: true type: array max_version: 2.8 export_path: description: | The share export path in the format appropriate for the protocol: - NFS protocol. ``10.0.0.1:/foo_path``. For example, ``10.254.0 .5:/shares/share-42033c24-0261-424f-abda-4fef2f6dbfd5``. - CIFS protocol. For example, ``\\10.0.0.1\foo_name_of_cifs_share``. in: body required: true type: string extend: description: | The ``extend`` object. in: body required: true type: object extension_alias: description: | The alias for the extension. For example, "FOXNSOX", "os-availability-zone", "os-extended-quotas", "os- share-unmanage", or "os-used-limits". in: body required: true type: string extension_description: description: | The description of the extension API. in: body required: true type: string extension_links: description: | The extension links. in: body required: true type: array extension_name: description: | The name of the extension. For example, "Fox In Socks." in: body required: true type: string extra_spec_key: description: | The extra specification key in: body required: true type: string extra_specs: description: | Extra specifications of the share type. These are key=value pairs of capabilities that the shares of this type are expected to possess. For more information, see `Share Types `_. Some examples include: ``driver_handles_share_servers``, ``replication_type``, ``snapshot_support``, ``mount_snapshot_support``, ``revert_to_snapshot_support``, ``create_share_from_snapshot_support`` in: body required: true type: object extra_specs_request_body: description: | Extra specifications of the share type. These are key=value pairs of capabilities that the shares of this type are expected to possess. For more information, see `Share Types `_. When creating a new share type, required extra-specifications **must** be provided. ``driver_handles_share_servers`` is a required extra-specification, and ``snapshot_support`` was considered a required extra-specification until API version 2.24. When updating extra-specs of a share type, there's no need to provide required extra specifications unless they need to be updated. Some examples of extra-specifications include: ``replication_type``, ``snapshot_support``, ``mount_snapshot_support``, ``revert_to_snapshot_support``, ``create_share_from_snapshot_support`` in: body required: true type: object force: description: | Indicates whether to permit or deny the force- update of a quota that is already used and the requested value exceeds the configured quota. Set to ``True`` to permit the force-update of the quota. Set to ``False`` to deny the force- update of the quota. in: body required: false type: boolean force_delete_2: description: | To force-delete a share instance, set this value to ``null``. The force-delete action, unlike the delete action, ignores the share instance status. in: body required: true type: string force_host_assisted_migration: description: | Forces the host-assisted mechanism to be used, thus using the Data Service to copy data across back ends. This parameter value defaults to ``False``. When set to ``True``, it skips the driver-assisted approach which would otherwise be attempted first. If this option is set to ``True``, all driver-assisted options must be set to ``False``. in: body required: false type: boolean force_snapshot_request: description: | Indicates whether snapshot creation must be attempted when a share's status is not ``available``. Set to ``true`` to force snapshot creation when the share is busy performing other operations. Default is ``false``. in: body required: false type: boolean group_snapshot_id: description: | The share group snapshot ID. in: body required: true type: object group_snapshot_links: description: | The share group snapshot links. in: body required: true type: string group_snapshot_members: description: | The share group snapshot members. in: body required: true type: string group_snapshot_status_required: description: | Filters by a share group snapshot status. A valid value is ``creating``, ``error``, ``available``, ``deleting``, ``error_deleting``. in: body required: true type: string group_spec_key: description: | The extra specification key for the share group type. in: body required: true type: string group_specs: description: | The extra specifications for the share group type. in: body required: false type: object group_specs_required: description: | The extra specifications for the share group type. in: body required: true type: object has_replicas: description: | Indicates whether a share has replicas or not. in: body required: true type: boolean min_version: 2.11 host: description: | The target pool to which the share should be migrated to, in format ``host@backend#pool``. E.g. ``ubuntu@generic1#GENERIC1``. in: body required: true type: string host_resource_response: description: | The host name of the service back end that the resource is contained within. This parameter is always present in the response schema, but the value may be represented as "null" to non-admin users. in: body required: true type: string host_share_server_body: description: | The share server host name or IP address. in: body required: true type: string host_share_server_migration: description: | The target backend to which the share server should be migrated to, in format ``host@backend``. E.g. ``ubuntu@generic1``. in: body required: true type: string hosts_check_result: description: | (Admin only). Result received from each host in a security service update check operation. in: body required: true type: object id_13: description: | The share instance ID. in: body required: true type: string identifier: description: | The identifier of the share server in the back-end storage system. in: body required: true type: string ip_version: description: | The IP version of the network. A valid value is ``4`` or ``6``. This parameter is automatically set to a value determined by the network provider. in: body required: true type: integer max_version: 2.50 is_auto_deletable: description: | Defines if a share server can be deleted automatically by the service. Share server deletion can be automated with configuration. However, Share servers that have ever had a share removed from service management cannot be automatically deleted by the service. in: body required: true type: boolean is_default_type: description: | Defines the share type created is default or not. If the returning value is true, then it is the default share type, otherwise, it is not default. in: body required: true type: boolean min_version: 2.46 is_default_type_body: description: | Defines the share type created is default or not. If the returning value is true, then it is the default share type, otherwise, it is not default. in: body required: true type: boolean is_group_type_default: description: | Defines the share group type created is default or not. If the returning value is true, then it is the default share group type, otherwise, it is not default. in: body required: true type: boolean min_version: 2.46 is_public_manage_request: description: | The level of visibility for the share. Set to ``true`` to make share visible to all projects in the cloud. Set to ``false`` to make it private to your project. Default value is ``false``. in: body required: false type: boolean min_version: 2.8 is_public_request: description: | The level of visibility for the share. Set to ``true`` to make share visible to all projects in the cloud. Set to ``false`` to make it private to your project. Default value is ``false``. in: body required: false type: boolean is_public_shares_response: description: | Whether the share is visible publicly (by all projects in the cloud) or not. in: body required: true type: boolean is_soft_deleted_response: description: | Whether the share has been soft deleted to recycle bin or not. in: body required: false type: boolean min_version: 2.69 links: description: | Pagination and bookmark links for the resource. in: body required: true type: array lock_deletion: description: | Whether the resource should have its deletion locked or not. in: body required: false type: string min_version: 2.82 lock_visibility: description: | Whether the resource should have its sensitive fields restricted or not. When enabled, other users will see the "access_to" and "access_key" fields set to ****** in: body required: false type: string min_version: 2.82 manage_host: description: | The host of the destination back end, in this format: ``host@backend``. - ``host``: The host name for the destination back end. - ``backend``: The name of the destination back end. in: body required: true type: string manage_share_server_id: description: | The UUID of the share server. in: body required: true type: string min_version: 2.49 managed_share_user_id: description: | ID of the user who brought the share under manila management. in: body required: true type: string min_version: 2.16 maxTotalBackupGigabytes: description: | The total maximum number of backup gigabytes that are allowed in a project. in: body required: true type: integer min_version: 2.80 maxTotalBackupGigabytesOptional: description: | The total maximum number of backup gigabytes that are allowed in a project. in: body required: false type: integer min_version: 2.80 maxTotalEncryptionKeys: description: | The number of encryption keys allowed in a project. in: body required: true type: integer min_version: 2.90 maxTotalEncryptionKeysOptional: description: | The number of encryption keys allowed in a project. in: body required: false type: integer min_version: 2.90 maxTotalReplicaGigabytes: description: | The maximum number of replica gigabytes that are allowed in a project. You cannot create a share, share replica, manage a share or extend a share if it is going to exceed the allowed replica gigabytes quota. in: body required: true type: integer min_version: 2.53 maxTotalReplicaGigabytesOptional: description: | The maximum number of replica gigabytes that are allowed in a project. You cannot create a share, share replica, manage a share or extend a share if it is going to exceed the allowed replica gigabytes quota. in: body required: false type: integer min_version: 2.53 maxTotalShareBackups: description: | The total maximum number of share backups that are allowed in a project. in: body required: true type: integer min_version: 2.80 maxTotalShareBackupsOptional: description: | The total maximum number of share backups that are allowed in a project. in: body required: false type: integer min_version: 2.80 maxTotalShareGigabytes: description: | The total maximum number of share gigabytes that are allowed in a project. You cannot request a share that exceeds the allowed gigabytes quota. in: body required: true type: integer maxTotalShareGigabytesOptional: description: | The total maximum number of share gigabytes that are allowed in a project. You cannot request a share that exceeds the allowed gigabytes quota. in: body required: false type: integer maxTotalShareGroups: description: | The maximum number of share groups. in: body required: true type: integer min_version: 2.40 maxTotalShareGroupSnapshots: description: | The maximum number of share group snapshots. in: body required: true type: integer min_version: 2.40 maxTotalShareNetworks: description: | The total maximum number of share-networks that are allowed in a project. in: body required: true type: integer maxTotalShareNetworksOptional: description: | The total maximum number of share-networks that are allowed in a project. in: body required: false type: integer maxTotalShareReplicas: description: | The maximum number of share replicas that is allowed. in: body required: true type: integer min_version: 2.53 maxTotalShareReplicasOptional: description: | The maximum number of share replicas that is allowed. in: body required: false type: integer min_version: 2.53 maxTotalShares: description: | The total maximum number of shares that are allowed in a project. in: body required: true type: integer maxTotalShareSnapshots: description: | The total maximum number of share snapshots that are allowed in a project. in: body required: true type: integer maxTotalShareSnapshotsOptional: description: | The total maximum number of share snapshots that are allowed in a project. in: body required: false type: integer maxTotalSharesOptional: description: | The total maximum number of shares that are allowed in a project. in: body required: false type: integer maxTotalSnapshotGigabytes: description: | The total maximum number of snapshot gigabytes that are allowed in a project. in: body required: true type: integer maxTotalSnapshotGigabytesOptional: description: | The total maximum number of snapshot gigabytes that are allowed in a project. in: body required: false type: integer message_expires_at_body: description: | The date and time stamp when the resource message will expire within the service's database. The date and time stamp format is `ISO 8601 `_: :: CCYY-MM-DDThh:mm:ss±hh:mm The ``±hh:mm`` value, if included, returns the time zone as an offset from UTC. For example, ``2016-12-31T13:14:15-05:00``. in: body required: true type: string message_id_body: description: | The UUID of the message. in: body required: true type: string message_level_body: in: body required: true type: string description: > The message level. message_links: description: | The message links. in: body required: true type: array message_members_links: description: | The message member links. in: body required: true type: array metadata: description: | One or more metadata key and value pairs as a dictionary of strings. in: body required: true type: object metadata_item: description: | A single metadata key and value pair. in: body required: true type: object metadata_key_request: description: | The key of a metadata item. For example, if the metadata on an existing resource is as follows: ``"project": "my_test", "aim": "testing"``, the keys are "project" and "aim". in: body required: true type: object metadata_request: description: | One or more metadata key-value pairs, as a dictionary of strings. For example, ``"project": "my_test", "aim": "testing"``. The share server does not respect case-sensitive key names. For example, ``"key": "v1"`` and ``"KEY": "V1"`` are equivalent. If you specify both key-value pairs, the server sets and returns only the ``"KEY": "V1"`` key-value pair. in: body required: true type: object migrate-start: description: | The ``migrate-start`` object. in: body required: true type: object migrate_share: description: | The ``migrate_share`` object. in: body required: true type: object migration_complete: description: | The ``migration_complete`` object. in: body required: true type: object migration_progress_details: description: | Additional driver specific details of the migration progress. in: body required: true type: object min_version: 2.59 mount_snapshot_support: description: | Boolean extra spec used for filtering of back ends by their capability to mount share snapshots. in: body required: false type: boolean min_version: 2.32 mount_snapshot_support_body: description: | Boolean extra spec used for filtering of back ends by their capability to mount share snapshots. in: body required: false type: boolean mount_snapshot_support_share_capability: description: | Whether or not this share supports snapshots that can be mounted and access controlled independently of the share. Inconsequential if the share doesn't support snapshots (see capability "snapshot_support"). in: body required: true type: boolean min_version: 2.32 name: description: | The user defined name of the resource. in: body required: true type: string name_request: description: | The user defined name of the resource. The value of this field is limited to 255 characters. in: body required: false type: string network_type: description: | The network type. A valid value is ``VLAN``, ``VXLAN``, ``GRE``, or ``flat``. This parameter is automatically set to a value determined by the network provider. in: body required: true type: string max_version: 2.50 neutron_net_id: description: | The neutron network ID. in: body required: true type: string max_version: 2.50 neutron_net_id_request: description: | The UUID of a neutron network when setting up or updating a share network subnet with neutron. Specify both a neutron network and a neutron subnet that belongs to that neutron network. in: body required: false type: string neutron_subnet_id: description: | The neutron subnet ID. in: body required: true type: string max_version: 2.50 neutron_subnet_id_request: description: | The UUID of the neutron subnet when setting up or updating a share network subnet with neutron. Specify both a neutron network and a neutron subnet that belongs to that neutron network. in: body required: false type: string new_share_network_id: description: | If willing to change the share’s share-network so it can be allocated in the desired destination pool, the invoker may supply a new share network to be used. This is often suited when the share is to be migrated to a pool which operates in a different availability zone or managed by a driver that handles share servers. in: body required: false type: string new_share_network_id_server_migration: description: | If willing to change the share server’s share-network so it can be allocated in the desired destination backend, the invoker may supply a new share network to be used. in: body required: false type: string new_share_network_security_service_id: description: | The ID of a security service that must be attached to a share network after a share network security service update operation. in: body required: true type: string new_share_type_id: description: | If willing to retype the share so it can be allocated in the desired destination pool, the invoker may supply a new share type to be used. This is often suited when the share is to be migrated to a pool which operates in the opposite driver mode. in: body required: false type: string next-available: description: | The date and time stamp when next issues are available. The date and time stamp format is `ISO 8601 `_: :: CCYY-MM-DDThh:mm:ss±hh:mm The ``±hh:mm`` value, if included, returns the time zone as an offset from UTC. For example, ``2015-08-27T09:49:58-05:00``. in: body required: false type: string nondisruptive: description: | Specifies whether migration should only be performed without disrupting clients during migration. For such, it is also expected that the export location does not change. When set to ``True`` and drivers are not capable of allowing the share to remain accessible through the two phases of the migration, migration will result in an error status. As of Ocata release, host-assisted migration cannot provide this capability. in: body required: true type: boolean nondisruptive_server_migration: description: | Specifies whether share server migration should only be performed without disrupting clients during migration. For such, it is also expected that the export location does not change. When set to ``True`` and drivers are not capable of allowing the share server to remain accessible through the two phases of the migration, migration will result in an error status. in: body required: true type: boolean operation_is_compatible: description: | Indicates the result of a check operation. If ``True`` indicates that the addition/update of security service is possible. in: body required: true type: boolean os-share-type-access:is_public: description: | Indicates whether a share type is publicly accessible. Default is ``true``, or publicly accessible. in: body required: false type: boolean max_version: 2.6 perShareGigabytes: description: | The number of gigabytes per share allowed in a project. in: body required: true type: integer min_version: 2.62 perShareGigabytesOptional: description: | The number of gigabytes per share allowed in a project. in: body required: false type: integer min_version: 2.62 pool: description: | The pool name for the back end. in: body required: true type: string pools: description: | The pools for the back end. This value is either ``null`` or a string value that indicates the capabilities for each pool. For example, ``pool_name``, ``total_capacity_gb``, ``qos``, and so on. in: body required: true type: string preserve_metadata: description: | Specifies whether migration should enforce the preservation of all file system metadata. When set to ``True`` and drivers are not capable of ensuring preservation of file system metadata, migration will result in an error status. As of Ocata release, host-assisted migration cannot provide any guarantees of preserving file system metadata. in: body required: true type: boolean preserve_snapshots: description: | Specifies whether migration should enforce the preservation of all existing snapshots at the destination. When set to ``True`` and drivers are not capable of migrating the snapshots, migration will result in an error status. As of Ocata release, host-assisted migration cannot provide this capability. in: body required: true type: boolean preserve_snapshots_server_migration: description: | Specifies whether migration should enforce the preservation of all existing snapshots at the destination. When set to ``True`` and drivers are not capable of migrating the snapshots, migration will result in an error status. in: body required: true type: boolean progress: description: | The progress of the snapshot creation. in: body required: true type: string progress_share_instance: description: | The progress of the share creation. in: body min_version: 2.54 required: true type: string project: description: | The UUID of the project to which access to the share type is granted. in: body required: true type: string project_id: description: | The ID of the project that owns the resource. in: body required: true type: string project_id_messages_body: description: | The ID of the project for which the message was created. in: body required: true type: string project_id_type_access: description: | The ID of the project that has been granted access to the type resource. in: body required: true type: string project_id_type_access_grant_request: description: | The ID of the project that needs to have access to the type resource. in: body required: true type: string project_id_type_access_revoke_request: description: | The ID of the project whose access to the type resource must be revoked. in: body required: true type: string protocol: description: | The Shared File Systems protocol of the share to manage. A valid value is ``NFS``, ``CIFS``, ``GlusterFS``, ``CEPHFS``, ``HDFS`` or ``MAPRFS``. in: body required: true type: string quota_backup_gigabytes: description: | The number of gigabytes for the backups allowed for each project. in: body min_version: 2.80 required: true type: integer quota_backup_gigabytes_detail: description: | The limit, in_use, reserved number of gigabytes for the backups allowed for each project. in: body min_version: 2.80 required: true type: object quota_backup_gigabytes_request: description: | The number of gigabytes for the backups for the project. in: body min_version: 2.80 required: false type: integer quota_backups: description: | The number of backups allowed for each project. in: body min_version: 2.80 required: true type: integer quota_backups_detail: description: | The limit, in_use, reserved number of backups allowed for each project. in: body min_version: 2.80 required: true type: object quota_backups_request: description: | The number of backups for the project. in: body min_version: 2.80 required: false type: integer quota_class_id: description: | A ``quota_class_set`` id. in: body required: true type: string quota_class_set: description: | A ``quota_class_set`` object. in: body required: true type: object quota_encryption_keys: description: | The number of encryption keys allowed for each project. in: body required: true min_version: 2.90 type: integer quota_encryption_keys_detail: description: | The limit, in_use, reserved number of encryption keys for each project. in: body min_version: 2.90 required: true type: object quota_encryption_keys_request: description: | The number of encryption keys allowed for each project. in: body min_version: 2.90 required: false type: integer quota_gigabytes: description: | The number of gigabytes allowed for each project. in: body required: true type: integer quota_gigabytes_detail: description: | The limit, in_use, reserved number of gigabytes allowed for each project. in: body min_version: 2.25 required: true type: object quota_gigabytes_request: description: | The number of gigabytes for the project. in: body required: false type: integer quota_per_share_gigabytes: description: | The number of gigabytes per share allowed for each project. in: body required: true min_version: 2.62 type: integer quota_per_share_gigabytes_detail: description: | The limit, in_use, reserved number of per share gigabytes for each project. in: body min_version: 2.62 required: true type: object quota_per_share_gigabytes_request: description: | The number of gigabytes per share allowed for each project. in: body min_version: 2.62 required: false type: integer quota_project_id: description: | The ID of the project the quota pertains to. in: body required: true type: string quota_replica_gigabytes: description: | The number of gigabytes for the share replicas allowed for each project. in: body required: true min_version: 2.53 type: integer quota_replica_gigabytes_detail: description: | The limit, in_use, reserved number of replica gigabytes for each project. in: body min_version: 2.53 required: true type: object quota_replica_gigabytes_request: description: | The number of gigabytes for share replicas for the project. in: body min_version: 2.53 required: false type: integer quota_set: description: | The ``quota_set`` object. in: body required: true type: object quota_share_group_snapshots: description: | The number of share group snapshots allowed for each project or user. in: body min_version: 2.40 required: true type: integer quota_share_group_snapshots_detail: description: | The limit, in_use, reserved number of share group snapshots for each project or user. in: body min_version: 2.40 required: true type: object quota_share_group_snapshots_request: description: | The number of share group snapshots allowed for each project or user. in: body min_version: 2.40 required: false type: integer quota_share_groups: description: | The number of share groups allowed for each project or user. in: body min_version: 2.40 required: true type: integer quota_share_groups_detail: description: | The limit, in_use, reserved number of share groups for each project or user. in: body min_version: 2.40 required: true type: object quota_share_groups_request: description: | The number of share groups allowed for each project or user. in: body min_version: 2.40 required: false type: integer quota_share_networks: description: | The number of share networks allowed for user and project, but not share type. in: body required: false type: integer quota_share_networks_default: description: | The number of share networks allowed for each project. in: body required: true type: integer quota_share_networks_detail: description: | The limit, in_use, reserved number of share networks allowed for user and project, but not share type. in: body min_version: 2.25 required: false type: object quota_share_networks_request: description: | The number of share networks for the project. in: body required: false type: integer quota_share_replicas: description: | The number of share replicas allowed for each project. in: body required: true min_version: 2.53 type: integer quota_share_replicas_detail: description: | The limit, in_use, reserved number of share replicas for each project. in: body min_version: 2.53 required: true type: object quota_share_replicas_request: description: | The number of share replicas allowed for each project or user. in: body min_version: 2.53 required: false type: integer quota_shares: description: | The number of shares allowed for each project. in: body required: true type: integer quota_shares_detail: description: | The limit, in_use, reserved number of shares allowed for each project. in: body min_version: 2.25 required: true type: object quota_shares_request: description: | The number of shares for the project. in: body required: false type: integer quota_snapshot_gigabytes: description: | The number of gigabytes for the snapshots allowed for each project. in: body required: true type: integer quota_snapshot_gigabytes_detail: description: | The limit, in_use, reserved number of gigabytes for the snapshots allowed for each project. in: body min_version: 2.25 required: true type: object quota_snapshot_gigabytes_request: description: | The number of gigabytes for the snapshots for the project. in: body required: false type: integer quota_snapshots: description: | The number of snapshots allowed for each project. in: body required: true type: integer quota_snapshots_detail: description: | The limit, in_use, reserved number of snapshots allowed for each project. in: body min_version: 2.25 required: true type: object quota_snapshots_request: description: | The number of snapshots for the project. in: body required: false type: integer regex: description: | An API regular expression. For example, ``^/shares`` for the ``/shares`` API URI or ``.*`` for any URI. in: body required: false type: string remaining: description: | The remaining number of allowed requests. in: body required: false type: integer remove_project_access: description: | An object representing the project resource that access should be revoked from. in: body required: true type: object replica_state: description: | The share replica state. Has set value only when replication is used. List of possible values: ``active``, ``in_sync``, ``out_of_sync``, and ``error``. in: body required: true type: string min_version: 2.11 replication_type: description: | Type of replication supported for shares of this type. Valid values are: - ``null``: replication is not supported - ``readable``: users can create replicas of the share that are read-only until they are promoted, - ``writable``: users can create read/write replicas - ``dr``: users can create replicas that are not exported until they are promoted. in: body required: false type: string replication_type_share_capability: description: | The share replication type. The value can be: - ``null``, if the share cannot be replicated. - ``readable``, if one or more read-only replicas of the share can be created - ``writable``, if one or more active replicas of the share can be created - ``dr``, if one or more replicas of the share can be created that will remain inaccessible until they are promoted. in: body required: true type: string min_version: 2.11 request_id_body: description: | The UUID of the request during which the message was created. in: body required: true type: string requested_capabilities: description: | The parameters that were sent to the server in order to check if the destination host can handle the share server migration. This object contains the following attributes: ``writable``, ``nondisruptive``, ``preserve_snapshots``, ``share_network_id`` and ``host``. in: body required: true type: object requested_check_operation: description: | Information about the requested operation. in: body required: true type: object requested_operation: description: | Data about the operation that was requested. in: body required: true type: object requested_operation_name: description: | The name of the check operation that was triggered, being ``add_security_service`` or ``update_security_service``. in: body required: true type: string required_extra_specs: description: | The required extra specifications for the share type: ``driver_handles_share_servers``. ``snapshot_support`` was treated as a required extra-specification until api version 2.24. in: body required: true type: object reset_operation: description: | Whether a share network security service check update or add operation for a given share network should ignore previous check results, and check the operation's compatibility again. in: body required: true type: boolean reset_status: description: | The ``reset_status`` object. in: body required: true type: object resource_id_body: description: | The UUID of the resource for which the message was created. in: body required: true type: string resource_lock_id: description: | The UUID identifying the specific resource lock. in: body required: true type: string resource_lock_lock_context: description: | The lock creator's context. Resource locks can be created by users with different roles. If a user with ``admin`` role creates the lock, the value of this field is ``admin``. If a user with ``service`` role creates the lock, the value of this field is ``service``. For all other contexts, the value of this field is ``user``. This field also determines the user's role that is required to unlock or manipulate a lock by virtue of the service's default RBAC. in: body required: true type: string resource_lock_lock_reason: description: | A blob of text representing the reason for the specific resource lock. in: body required: true type: string resource_lock_lock_reason_optional: description: | A blob of text representing the reason for the specific resource lock. in: body required: false type: string resource_lock_object: description: | A resource lock object when making resource lock requests. All other parameters are included in this object. in: body required: true type: object resource_lock_project_id: description: | The ID of the project that the resource lock was created for. in: body required: true type: string resource_lock_resource_action: description: | The action pertaining to a resource that the resource lock prevents. For example, if a resource lock prevents deletion of a share, the value of ``resource_action`` is ``delete``. Resource locks are not supported for all API actions. in: body required: true type: string resource_lock_resource_action_create_optional: description: | The action pertaining to a resource that the resource lock prevents. For example, if a resource lock prevents deletion of a share, the value of ``resource_action`` is ``delete``. Resource locks are not supported for all API actions. Currently support only exists for ``delete``, and for specific resources. If not provided, the value of this parameter defaults to ``delete``. in: body required: false type: string resource_lock_resource_action_optional: description: | The action pertaining to a resource that the resource lock prevents. For example, if a resource lock prevents deletion of a share, the value of ``resource_action`` is ``delete``. Resource locks are not supported for all API actions. in: body required: false type: string resource_lock_resource_id: description: | The UUID of the resource that the lock pertains to. For example, this can be the ID of the share that is locked from deletion. in: body required: true type: string resource_lock_resource_type: description: | The type of resource that the ID in ``resource_id`` denotes. For example, ``share`` is the resource type that is specified when the resource lock pertains to a share being locked from deletion. Resource locks are not supported for all resources. Currently support only exists for ``share``. in: body required: true type: string resource_lock_user_id: description: | The ID of the user the resource lock was created for. in: body required: true type: string resource_locks_object: description: | A resource locks object containing a collection or list of resource locks. in: body required: true type: object resource_type_body: description: | The type of the resource for which the message was created. in: body required: true type: string revert_to_snapshot_support: description: | Boolean extra spec used for filtering of back ends by their capability to revert shares to snapshots. in: body required: false type: boolean min_version: 2.27 revert_to_snapshot_support_body: description: | Boolean extra spec used for filtering of back ends by their capability to revert shares to snapshots. in: body required: false type: boolean revert_to_snapshot_support_share_capability: description: | Whether or not this share supports being reverted to its latest snapshot. Inconsequential if the share doesn't support snapshots (see capability "snapshot_support"). in: body required: true type: boolean min_version: 2.27 scheduled_to_be_deleted_at_response: description: | Estimated time at which the share in the recycle bin will be deleted automatically. in: body required: false type: string min_version: 2.69 scheduler_hints: description: | One or more scheduler_hints key and value pairs as a dictionary of strings. Accepted hints are: - ``same_host`` or ``different_host``: values must be a comma separated list of Share IDs - ``only_host``: value must be a manage-share service host in ``host@backend#POOL`` format (admin only). Only available in and beyond API version 2.67 in: body required: false type: object min_version: 2.65 security_service_default_ad_site: description: | The security service default AD site. in: body required: true type: string min_version: 2.76 security_service_default_ad_site_request: description: | The security service default AD site. in: body required: false type: string min_version: 2.76 security_service_dns_ip: description: | The DNS IP address that is used inside the project network. in: body required: true type: string security_service_dns_ip_request: description: | The DNS IP address that is used inside the project network. in: body required: false type: string security_service_domain: description: | The security service domain. in: body required: true type: string security_service_domain_request: description: | The security service domain. in: body required: false type: string security_service_id: description: | The security service ID. in: body required: true type: string security_service_ou: description: | The security service ou. in: body required: true type: string min_version: 2.44 security_service_ou_request: description: | The security service ou. An organizational unit can be added to specify where the share ends up. in: body required: false type: string min_version: 2.44 security_service_password: description: | The user password, if you specify a ``user``. in: body required: true type: string security_service_password_request: description: | The user password, if you specify a ``user``. in: body required: false type: string security_service_server: description: | The security service host name or IP address. in: body required: true type: string security_service_server_request: description: | The security service host name or IP address. in: body required: false type: string security_service_status: description: | The security service status. in: body required: true type: string security_service_type: description: | The security service type. A valid value is ``ldap``, ``kerberos``, or ``active_directory``. in: body required: true type: string security_service_type_request: description: | The security service type. A valid value is ``ldap``, ``kerberos``, or ``active_directory``. in: body required: false type: string security_service_update_support: description: | Whether a share network or server supports security service updates or not. in: body required: true type: boolean min_version: 2.63 security_service_user: description: | The security service user or group name that is used by the project. in: body required: true type: string security_service_user_request: description: | The security service user or group name that is used by the project. in: body required: false type: string segmentation_id: description: | The segmentation ID. This parameter is automatically set to a value determined by the network provider. For VLAN, this value is an integer from 1 to 4094. For VXLAN, this value is an integer from 1 to 16777215. For GRE, this value is an integer from 1 to 4294967295. in: body required: true type: integer max_version: 2.50 service_binary_response: description: | The service binary name. Default is the base name of the executable. in: body required: true type: string service_disable_binary_request: description: | The name of the service binary that you want to disable. Typically, this name is the base name of the executable. in: body required: true type: string service_disable_binary_response: description: | The name of the disabled service binary. Typically, this name is the base name of the executable. in: body required: true type: string service_disable_host_request: description: | The host name of the service that you want to disable. in: body required: true type: string service_disable_host_response: description: | The host name of the disabled service. in: body required: true type: string service_disable_reason_request: description: | A reason for the service being disabled. in: body required: false type: string min_version: 2.83 service_disable_reason_response: description: | A reason for the service being disabled. in: body required: true type: string min_version: 2.83 service_disabled_response: description: | Indicates whether the service is disabled. in: body required: true type: boolean max_version: 2.82 service_enable_binary_request: description: | The name of the service binary that you want to enable. Typically, this name is the base name of the executable. in: body required: true type: string service_enable_host_request: description: | The host name of the service that you want to enable. in: body required: true type: string service_enable_host_response: description: | The host name of the enabled service. in: body required: true type: string service_ensure_shares_host_request: description: | The host name of the manila-share binary that you want to start the ensure shares procedure in the ``host@backend`` format. in: body required: true type: string service_ensuring_response: description: | Whether the service is currently running ensure shares or not. in: body required: true type: string min_version: 2.86 service_host: description: | The manage-share service host in this format: ``host@backend#POOL``: - ``host``. The host name for the back end. - ``backend``. The name of the back end. - ``POOL``. The pool name for the back end. in: body required: true type: string service_host_response: description: | The service host name. in: body required: true type: string service_id_response: description: | The service ID. in: body required: true type: integer service_state_response: description: | The current state of the service. A valid value is ``up`` or ``down``. in: body required: true type: string service_status_new_response: description: | The service status, which is ``enabled`` or ``disabled``. in: body required: true type: string min_version: 2.83 service_status_response: description: | The service status, which is ``enabled`` or ``disabled``. in: body required: true type: string service_zone_response: description: | The service availability zone. in: body required: true type: string services: description: | Top element in the response body. in: body required: true type: string share: description: | A ``share`` object. in: body required: true type: object share_force_delete: description: | To force-delete a share or share group, set this value to ``null``. The force-delete action, unlike the delete action, ignores the share or share group status. in: body required: true type: string share_force_extend: description: | (Admin only). Defines whether to go through scheduler, Set to `True` will extend share directly. Set to `False` will go through scheduler, default is `False`. in: body required: false type: boolean min_version: 2.64 share_group_host: description: | The share group host name. in: body required: false type: string share_group_id: description: | The UUID of the share group. in: body required: true type: string min_version: 2.31 share_group_id_request: description: | The UUID of the share group. in: body required: false type: string min_version: 2.31 share_group_links: description: | The share group links. in: body required: true type: string share_group_status: description: | The share group status, which is ``available``, ``error``, ``creating``, or ``deleting``. in: body required: true type: string share_group_type_id: description: | The share group type ID to create a share group. in: body required: false type: string share_group_type_id_required: description: | The share group type ID. in: body required: true type: string share_group_type_is_public: description: | The level of visibility for the share group type. Set to ``true`` to make share group type public. Set to ``false`` to make it private. Default value is ``true``. in: body required: true type: boolean share_group_type_is_public_request: description: | The level of visibility for the share group type. Set to ``true`` to make share group type public. Set to ``false`` to make it private. Default value is ``false``. in: body required: false type: boolean share_group_type_name: description: | The share group type name. in: body required: true type: string share_group_type_name_request: description: | The name of the share group type resource. The value of this field is limited to 255 characters. in: body required: false type: string share_id_request: description: | The UUID of the share. in: body required: true type: string share_id_response: description: | The UUID of the share. in: body required: true type: string share_id_share_instances_response: description: | The UUID of the share to which the share instance belongs to. in: body required: true type: string share_instance_cast_rules_to_readonly: description: | If the share instance has its ``cast_rules_to_readonly`` attribute set to True, all existing access rules will be cast to read/only. in: body required: true type: boolean min_version: 2.30 share_instance_id_1: description: | The UUID of the share instance. in: body required: true type: string share_network_availability_zone_request: description: | The UUID or name of an availability zone for the share network subnet. in: body required: false type: string min_version: 2.51 share_network_gateway: description: | The gateway of a share network. in: body required: true type: string min_version: 2.18 max_version: 2.50 share_network_id: description: | The share network ID where the resource is exported to. in: body required: true type: string share_network_id_manage_server_request: description: | The UUID of the share network that the share server will pertain to. in: body required: true type: string share_network_id_request: description: | The ID of a share network that the resource must be exported to. Note that when using a share type with the ``driver_handles_share_servers`` extra spec as ``False``, you should not provide a ``share_network_id``. in: body required: false type: string share_network_id_share_networks_response: description: | The UUID of a share network resource. in: body required: true type: string share_network_id_share_servers_response: description: | The UUID of a share network that is associated with the share server. in: body required: true type: string share_network_mtu: description: The MTU value of a share network. in: body required: true type: integer min_version: 2.20 max_version: 2.50 share_network_name: description: | The name of a share network that is associated with the share server. in: body required: true type: string share_network_security_service_id: description: | The UUID of the security service to remove from the share network. For details, see the security service section. in: body required: true type: string share_network_security_service_update_support: description: | Whether the share network supports its security services being updated when it is already being used. in: body required: true type: boolean min_version: 2.63 share_network_share_network_subnets: description: | A list of share network subnets that pertain to the related share network. in: body required: true type: array min_version: 2.51 share_network_status: description: | The status of a share network. Possible values are: ``active``, ``error`` or ``network_change``. in: body required: true type: string min_version: 2.63 share_network_subnet_availability_zone: description: | The name of the availability zone that the share network subnet belongs to. in: body required: true type: string share_network_subnet_cidr: description: | The IP block from which to allocate the network, in CIDR notation. For example, ``172.16.0.0/24`` or ``2001:DB8::/64``. This parameter is automatically set to a value determined by the network provider. in: body required: true type: string share_network_subnet_gateway: description: | The gateway of a share network subnet. in: body required: true type: string share_network_subnet_id: description: | The UUID of the share network subnet. in: body required: true type: string share_network_subnet_id_manage_server_body: description: | The UUID of the share network subnet that the share server pertain to. in: body required: true type: string min_version: 2.51 share_network_subnet_id_manage_server_request: description: | The UUID of the share network subnet that the share server will pertain to. If not specified, the share network's default subnet UUID will be used. in: body required: false type: string min_version: 2.51 share_network_subnet_id_share_server_body: description: | The UUID of the share network subnet that the share server pertains to. in: body required: true type: string min_version: 2.51 share_network_subnet_ip_version: description: | The IP version of the network. A valid value is ``4`` or ``6``. This parameter is automatically set to a value determined by the network provider. in: body required: true type: integer share_network_subnet_mtu: description: | The MTU of a share network subnet. in: body required: true type: string share_network_subnet_network_type: description: | The network type. A valid value is ``VLAN``, ``VXLAN``, ``GRE``, or ``flat``. This parameter is automatically set to a value determined by the network provider. in: body required: true type: string share_network_subnet_neutron_net_id: description: | The neutron network ID. in: body required: true type: string share_network_subnet_neutron_subnet_id: description: | The neutron subnet ID. in: body required: true type: string share_network_subnet_segmentation_id: description: | The segmentation ID. This parameter is automatically set to a value determined by the network provider. For VLAN, this value is an integer from 1 to 4094. For VXLAN, this value is an integer from 1 to 16777215. For GRE, this value is an integer from 1 to 4294967295. in: body required: true type: integer share_network_subnet_share_network_id: description: | The UUID of the share network that the share network subnet belongs to. in: body required: true type: string share_network_subnet_share_network_name: description: | The name of the share network that the share network subnet belongs to. in: body required: true type: string share_new_size: description: | New size of the share, in GiBs. in: body required: true type: integer share_proto: description: | The Shared File Systems protocol. A valid value is ``NFS``, ``CIFS``, ``GlusterFS``, ``HDFS``, ``CephFS``, ``MAPRFS``, ``CephFS`` supported is starting with API v2.13. in: body required: true type: string share_replica_az: description: | The availability zone. in: body required: false type: string share_replica_cast_rules_to_readonly: description: | If the share replica has its ``cast_rules_to_readonly`` attribute set to True, all existing access rules will be cast to read/only. in: body required: true type: boolean min_version: 2.30 share_replica_force_delete: description: | To force-delete a share replica, set this value to ``null``. The force-delete action, unlike the delete action, ignores the share replica status. in: body required: true type: string share_replica_host: description: | The host name of the share replica. in: body required: true type: string share_replica_id: description: | The UUID of the share replica. in: body required: true type: string share_replica_quiesce_wait_time: description: | The quiesce wait time in seconds used during replica promote. in: body required: false type: integer min_version: 2.75 share_replica_replica_state: description: | The replica state of a share replica. List of possible values: ``active``, ``in_sync``, ``out_of_sync``, and ``error``. in: body required: true type: string share_replica_reset_replica_state: description: | The ``reset_replica_state`` object. in: body required: true type: object share_replica_scheduler_hints: description: | One or more scheduler_hints key and value pairs as a dictionary of strings. Accepted hints are: - ``only_host``: value must be a share manager service host in ``host@backend#POOL`` format (admin only) in: body required: false type: object min_version: 2.67 share_replica_share_id: description: | The UUID of the share from which to create a share replica. in: body required: true type: string share_replica_share_network_id: description: | The UUID of the share network. in: body required: false type: string share_replica_status: description: | The status of a share replica. List of possible values: ``available``, ``error``, ``creating``, ``deleting``, or ``error_deleting``. in: body required: true type: string share_server_id: description: | The UUID of the share server. in: body required: true type: string share_server_security_service_update_support: description: | Whether the share server supports its security services being updated after its creation. in: body required: true type: boolean min_version: 2.63 share_server_show_identifier: description: | The identifier of the share server in the back-end storage system. in: body required: true type: string min_version: 2.49 share_server_show_is_auto_deletable: description: | Defines if a share server can be deleted automatically by the service. Share server deletion can be automated with configuration. However, Share servers that have ever had a share removed from service management cannot be automatically deleted by the service. in: body required: true type: boolean min_version: 2.49 share_server_status: description: | The share server status, which can be ``active``, ``error``, ``creating``, ``deleting``, ``manage_starting``, ``manage_error``, ``unmanage_starting``, ``unmanage_error`` or ``error_deleting``. in: body required: true type: string share_server_unmanage: description: | To unmanage a share server, either set this value to ``null`` or {}. Optionally, the ``force`` attribute can be included in this object. in: body required: true type: object share_share_type_name: description: | Name of the share type. in: body required: true type: string min_version: 2.6 share_status_request: description: | The share or share instance status to be set. Possible values are listed in the `section above <#shares>`_. in: body required: true type: string share_status_response: description: | The share or share instance status. Possible values are listed in the `section above <#shares>`_. in: body required: true type: string share_type_access:is_public: description: | Indicates whether a share type is publicly accessible. Default is ``true``, or publicly accessible. in: body required: false type: boolean min_version: 2.7 share_type_access:is_public_body: description: | Indicates whether a share type is accessible by all projects (tenants) in the cloud. in: body required: true type: boolean share_type_access:is_public_update_request: description: | Indicates whether the share type should be accessible by all projects (tenants) in the cloud. If not specified, the visibility of the share type is not altered. in: body required: false type: boolean share_type_description: description: | The description of the share type. in: body required: true type: string min_version: 2.41 share_type_description_body: description: | The description of the share type. in: body required: true type: string share_type_description_request: description: | The description of the share type. The value of this field is limited to 255 characters. in: body required: false type: string min_version: 2.41 share_type_description_update_request: description: | New description for the share type. in: body required: false type: string share_type_id_body: description: | The UUID of the share type. in: body required: true type: string share_type_name: description: | Name of the share type. in: body required: true type: string share_type_name_request: description: | Name of the share type. The value of this field is limited to 255 characters. in: body required: false type: string share_type_request: description: | The name or ID of the share type to be used to create the resource. If you omit this parameter, the default share type is used. To view the default share type set by the administrator, issue a list default share types request. in: body required: false type: string share_type_shares_response: description: | The UUID of the share type that the share belongs to. Prior to API version 2.6, this parameter resolved to the name of the share type. In API version 2.6 and beyond, this parameter holds the share type ID instead of the name. in: body required: true type: string share_types: description: | A list of one or more share type IDs. in: body required: false type: array share_types_1: description: | A list of share type IDs. in: body required: true type: array share_unmanage: description: | To unmanage a share, set this value to ``null``. in: body required: true type: string share_user_id: description: | ID of the user that the share was created by. in: body required: true type: string min_version: 2.16 shrink: description: | The ``shrink`` object. in: body required: true type: object size_request: description: | The share size, in GiBs. The requested share size cannot be greater than the allowed GiB quota. To view the allowed quota, issue a get limits request. in: body required: true type: integer size_response: description: | The share size, in GiBs. in: body required: true type: integer snapshot_force_delete: description: | To force-delete a snapshot, include this param and set its value to ``null``. The force-delete action, unlike the delete action, ignores the snapshot status. in: body required: true type: string snapshot_id: description: | The UUID of the snapshot. in: body required: true type: string snapshot_id_request: description: | The UUID of the share's base snapshot. in: body required: false type: string snapshot_id_shares_response: description: | The UUID of the snapshot that was used to create the share. in: body required: true type: string snapshot_instance_id: description: | The UUID of the share snapshot instance. in: body required: false type: string snapshot_instance_id_response: description: | The UUID of the share snapshot instance. in: body required: true type: string snapshot_instance_status: description: | The snapshot instance status. A valid value is ``available``, ``error``, ``creating``, ``deleting``, and ``error_deleting``, ``restoring``, ``unmanage_starting``, ``unmanage_error``, ``manage_starting``, ``manage_error``. in: body required: true type: string snapshot_manage_share_id: description: | The UUID of the share that has snapshot which should be managed. in: body required: true type: string snapshot_manage_status: description: | The snapshot status, which could be ``manage_starting``, ``manage_error``, ``unmanage_starting``, or ``unmanage_error``. in: body required: true type: string snapshot_project_id: description: | ID of the project that the snapshot belongs to. in: body required: true type: string min_version: 2.17 snapshot_provider_location: description: | Provider location of the snapshot on the backend. in: body required: true type: string snapshot_provider_location_optional: description: | Provider location of the snapshot on the backend. This parameter is only available to users with an "administrator" role by virtue of default RBAC. This behavior can be modified by overriding the ``context_is_admin`` policy via a custom ``policy.yaml``. in: body required: false type: string min_version: 2.12 snapshot_provider_location_request: description: | Provider location of the snapshot on the backend. in: body required: true type: string snapshot_share_id: description: | The UUID of the source share that was used to create the snapshot. in: body required: true type: string snapshot_share_id_request: description: | The UUID of the share from which to create a snapshot. in: body required: true type: string snapshot_share_protocol: description: | The file system protocol of a share snapshot. A valid value is ``NFS``, ``CIFS``, ``GlusterFS``, ``HDFS``, ``CephFS`` or ``MAPRFS``. ``CephFS`` is supported starting with API v2.13. in: body required: true type: string snapshot_share_size: description: | The share snapshot size, in GiBs. in: body required: true type: integer snapshot_size: description: | The snapshot size, in GiBs. in: body required: true type: integer snapshot_status: description: | The snapshot status, which can be ``available``, ``error``, ``creating``, ``deleting``, ``manage_starting``, ``manage_error``, ``unmanage_starting``, ``unmanage_error`` or ``error_deleting``. in: body required: true type: string snapshot_status_request: description: | The snapshot status, which can be ``available``, ``error``, ``creating``, ``deleting``, ``manage_starting``, ``manage_error``, ``unmanage_starting``, ``unmanage_error`` or ``error_deleting``. in: body required: false type: string snapshot_support: description: | An extra specification that filters back ends by whether they do or do not support share snapshots. in: body required: true type: boolean min_version: 2.2 snapshot_support_share_capability: description: | Whether or not this share supports snapshots. Snapshots are point in time backups of the share. in: body required: true type: boolean min_version: 2.2 snapshot_unmanage: description: | To unmanage a share snapshot, include this parameter and set its value to ``null``. in: body required: true type: string snapshot_user_id: description: | ID of the user that the snapshot was created by. in: body required: true type: string min_version: 2.17 source_backup_id_shares_response: description: | The UUID of the backup that was restored in the share. in: body required: true type: string min_version: 2.80 source_share_group_snapshot_id: description: | The source share group snapshot ID to create the share group. in: body required: false type: string source_share_group_snapshot_id_response: description: | The source share group snapshot ID to create the share group. in: body required: true type: string source_share_group_snapshot_member_id: description: | ID of the group snapshot instance that was the source of this share. in: body required: true type: string min_version: 2.31 state: description: | Prior to versions 2.28, the state of all access rules of a given share is the same at all times. This could be ``new``, ``active`` or ``error``. Since 2.28, the state of each access rule of a share is independent of the others and can be ``queued_to_apply``, ``applying``, ``active``, ``error``, ``queued_to_deny`` or ``denying``. A new rule starts out in ``queued_to_apply`` state and is successfully applied if it transitions to ``active`` state. in: body required: true type: string status_share_server_body: description: | The share server status, which is ``active``, ``error``, ``creating``, or ``deleting``. in: body required: true type: string supported_capabilities: description: | The driver's supported attributes for a share server migration. It will contain the following items: ``writable``, ``nondisruptive``, ``preserve_snapshots`` and ``share_network_id``. Drivers will also report if they can perform ``migration_cancel`` and ``migration_get_progress`` operations. All of the mentioned parameters will be present in this object. All parameters but the ``share_network_id`` are boolean values. in: body required: true type: object task_state: description: | For the share migration, the migration task state. A valid value is ``null``, ``migration_starting``, ``migration_error``, ``migration_success``, ``migration_completing``, or ``migrating``. The ``task_state`` is ``null`` unless the share is migrated from one back-end to another. in: body required: true type: string min_version: 2.5 task_state_server_migration: description: | For the share server migration, the migration task state. A valid value is ``null``, ``migration_in_progress``, ``migration_cancel_in_progress``, ``migration_cancelled``, ``migration_driver_starting``, ``migration_driver_in_progress``, or ``migration_phase_1_done``. in: body required: true type: string timestamp: description: | The date and time stamp when the API request was issued. The date and time stamp format is `ISO 8601 `_: :: CCYY-MM-DDThh:mm:ss±hh:mm The ``±hh:mm`` value, if included, returns the time zone as an offset from UTC. For example, ``2015-08-27T09:49:58-05:00``. in: body required: true type: string total_progress: description: | Defines a total progress of share migration. in: body required: true type: integer total_progress_server_migration: description: | Defines a total progress of share server migration. in: body required: true type: integer totalBackupGigabytesUsed: description: | The total number of gigabytes used in a project by backups. in: body required: true type: integer totalReplicaGigabytesUsed: description: | The total number of replica gigabytes used in a project by share replicas. in: body required: true type: integer totalShareBackupsUsed: description: | The total number of created share backups in a project. in: body required: true type: integer totalShareGigabytesUsed: description: | The total number of gigabytes used in a project by shares. in: body required: true type: integer totalShareNetworksUsed: description: | The total number of created share-networks in a project. in: body required: true type: integer totalShareReplicasUsed: description: | The total number of created share replicas in a project. in: body required: true type: integer totalShareSnapshotsUsed: description: | The total number of created share snapshots in a project. in: body required: true type: integer totalSharesUsed: description: | The total number of created shares in a project. in: body required: true type: integer totalSnapshotGigabytesUsed: description: | The total number of gigabytes used in a project by snapshots. in: body required: true type: integer transfer: description: | The transfer object. in: body required: true type: object transfer_expires_at_body: description: | The date and time stamp when the resource transfer will expire. After transfer expired, will be automatically deleted. The date and time stamp format is `ISO 8601 `_: :: CCYY-MM-DDThh:mm:ss±hh:mm The ``±hh:mm`` value, if included, returns the time zone as an offset from UTC. For example, ``2016-12-31T13:14:15-05:00``. in: body required: true type: string transfer_id_in_body: description: | The transfer UUID. in: body required: true type: string transfer_name: description: | The transfer display name. in: body required: false type: string transfer_resource_id: description: | The UUID of the resource for the transfer. in: body required: true type: string transfer_resource_type: description: | The type of the resource for the transfer. in: body required: true type: string transfers: description: | List of transfers. in: body required: true type: array unit: description: | The time interval during which a number of API requests are allowed. A valid value is ``SECOND``, ``MINUTE``, ``HOUR``, or ``DAY``. Used in conjunction with the ``value`` parameter, expressed as ``value`` per ``unit``. For example, 120 requests are allowed per minute. in: body required: false type: string unrestrict_access: description: | Whether the service should attempt to remove deletion restrictions during the access rule deletion or not. in: body required: false type: string min_version: 2.82 updated_at: description: | The date and time stamp when the resource was last updated within the service's database. If a resource was never updated after it was created, the value of this parameter is set to ``null``. The date and time stamp format is `ISO 8601 `_: :: CCYY-MM-DDThh:mm:ss±hh:mm The ``±hh:mm`` value, if included, returns the time zone as an offset from UTC. For example, ``2016-12-31T13:14:15-05:00``. in: body required: true type: string updated_at_extensions: description: | The date and time stamp when the extension API was last updated. The date and time stamp format is `ISO 8601 `_: :: CCYY-MM-DDThh:mm:ss±hh:mm The ``±hh:mm`` value, if included, returns the time zone as an offset from UTC. For example, ``2015-08-27T09:49:58-05:00``. in: body required: true type: string uri: description: | A human-readable URI of a rate limit. format: uri in: body required: false type: string user_id: description: | ID of the user that is part of a given project. in: body required: false type: string value: description: | The number of API requests that are allowed during a time interval. Used in conjunction with the ``unit`` parameter, expressed as ``value`` per ``unit``. For example, 120 requests are allowed per minute. in: body required: false type: integer verb: description: | The HTTP method for the API request. For example, ``GET``, ``POST``, ``DELETE``, and so on. in: body required: false type: string version: description: | The version. in: body required: true type: string version_id: type: string in: body required: true description: > A common name for the version in question. Informative only, it has no real semantic meaning. version_max: type: string in: body required: true description: > If this version of the API supports microversions, the maximum microversion that is supported. This will be the empty string if microversions are not supported. version_media_types: description: | Media types supported by the API. in: body required: true type: object version_min: type: string in: body required: true description: > If this version of the API supports microversions, the minimum microversion that is supported. This will be the empty string if microversions are not supported. version_status: type: string in: body required: true description: | The status of this API version. This can be one of: - ``CURRENT``: this is the preferred version of the API to use - ``SUPPORTED``: this is an older, but still supported version of the API - ``DEPRECATED``: a deprecated version of the API that is slated for removal version_updated: description: | A date and time stamp for API versions. This field presents no meaningful information. in: body required: true type: string versions: type: array in: body required: true description: > A list of version objects that describe the API versions available. volume_type: description: | The volume type. The use of the ``volume_type`` object is deprecated but supported. It is recommended that you use the ``share_type`` object when you create a share type. When you issue a create a share type request, you can submit a request body with either a ``share_type`` or ``volume_type`` object. No matter which object type you include in the request, the API creates both a ``volume_type`` object and a ``share_type`` object. Both objects have the same ID. When you issue a list share types request, the response shows both ``share_types`` and ``volume_types`` objects. in: body required: false type: string volume_type_shares_response: description: | The share type ID. This is a legacy parameter that contains the same value as the ``share_type`` parameter. Do not rely on this parameter as it may be removed in a future API revision. in: body required: true type: string writable: description: | Specifies whether migration should only be performed if the share can remain writable. When this behavior is set to ``True`` and drivers are not capable of allowing the share to remain writable, migration will result in an error status. If drivers are not capable of performing a nondisruptive migration, manila will ensure that the share will remain writable through the data copy phase of migration. However, during the switchover phase the share will be re-exported at the destination, causing the share to be rendered inaccessible for the duration of this phase. As of Ocata release, host-assisted migration cannot provide this capability. in: body required: true type: boolean writable_server_migration: description: | Specifies whether migration should only be performed if the shares can remain writable. When this behavior is set to ``True`` and drivers are not capable of allowing the shares to remain writable, migration will result in an error status. If drivers are not capable of performing a nondisruptive migration, manila will ensure that the shares will remain writable through the data copy phase of migration. However, during the switchover phase all shares will be re-exported at the destination, causing the shares to be rendered inaccessible for the duration of this phase. in: body required: true type: boolean ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/quota-classes.inc0000664000175000017500000000742300000000000021166 0ustar00zuulzuul00000000000000.. -*- rst -*- Quota class set =============== Quota classes can be shown and updated for a project. .. important:: Share replicas and replica gigabytes were added to quota management APIs in API version 2.53. Per share gigabytes was added to quota management APIs in API version 2.62. Share backups and backup gigabytes were added to quota management APIs in API version 2.80. Encryption keys was added to quota management APIs in API version 2.90. Show quota classes for a project ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v2/quota-class-sets/{quota_class_name} Shows quota class set for a project. If no specific value for the quota class resource exists, then the default value will be reported. Response codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 403 - 404 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - quota_class_name: quota_class_name Response Parameters ------------------- .. rest_parameters:: parameters.yaml - quota_class_set: quota_class_set - share_groups: maxTotalShareGroups - gigabytes: maxTotalShareGigabytes - share_group_snapshots: maxTotalShareGroupSnapshots - snapshots: maxTotalShareSnapshots - snapshot_gigabytes: maxTotalSnapshotGigabytes - shares: maxTotalShares - id: quota_class_id - share_networks: maxTotalShareNetworks - share_replicas: maxTotalShareReplicas - replica_gigabytes: maxTotalReplicaGigabytes - per_share_gigabytes: perShareGigabytes - backups: maxTotalShareBackups - backup_gigabytes: maxTotalBackupGigabytes - encryption_keys: maxTotalEncryptionKeys Response Example ---------------- .. literalinclude:: ./samples/quota-classes-show-response.json :language: javascript Update quota classes for a project ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: PUT /v2/quota-class-sets/{quota_class_name} Updates quota class set for a project. If the ``quota_class_name`` key does not exist, then the API will create one. Response codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 403 - 404 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - quota_class_name: quota_class_name - shares: maxTotalSharesOptional - snapshots: maxTotalShareSnapshotsOptional - gigabytes: maxTotalShareGigabytesOptional - snapshot-gigabytes: maxTotalSnapshotGigabytesOptional - share-networks: maxTotalShareNetworksOptional - share-replicas: maxTotalShareReplicasOptional - replica-gigabytes: maxTotalReplicaGigabytesOptional - per-share-gigabytes: perShareGigabytesOptional - backups: maxTotalShareBackupsOptional - backup-gigabytes: maxTotalBackupGigabytesOptional - encryption_keys: maxTotalEncryptionKeysOptional Request Example --------------- .. literalinclude:: ./samples/quota-classes-update-request.json :language: javascript Response Parameters ------------------- .. rest_parameters:: parameters.yaml - quota_class_set: quota_class_set - share_groups: maxTotalShareGroups - gigabytes: maxTotalShareGigabytes - share_group_snapshots: maxTotalShareGroupSnapshots - snapshots: maxTotalShareSnapshots - snapshot_gigabytes: maxTotalSnapshotGigabytes - shares: maxTotalShares - share_networks: maxTotalShareNetworks - share_replicas: maxTotalShareReplicas - replica_gigabytes: maxTotalReplicaGigabytes - per_share_gigabytes: perShareGigabytes - backups: maxTotalShareBackups - backup_gigabytes: maxTotalBackupGigabytes - encryption_keys: maxTotalEncryptionKeys Response Example ---------------- .. literalinclude:: ./samples/quota-classes-update-response.json :language: javascript ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/quota-sets.inc0000664000175000017500000002147500000000000020512 0ustar00zuulzuul00000000000000.. -*- rst -*- Quota sets ========== Provides quotas management support. .. important:: For API versions 2.6 and prior, replace ``quota-sets`` in the URLs with ``os-quota-sets``. Share type quotas were added in API version 2.39. It is possible to set quotas per share type for the following quota resources: - ``gigabytes`` - ``snapshots`` - ``shares`` - ``snapshot_gigabytes`` - ``share_groups`` (since API version 2.40) - ``share_group_snapshots`` (since API version 2.40) - ``share_replicas`` (since API version 2.53) - ``replica_gigabytes`` (since API version 2.53) - ``per_share_gigabytes`` (since API version 2.62) - ``backups`` (since API version 2.80) - ``backup_gigabytes`` (since API version 2.80) - ``encryption_keys`` (since API version 2.90) In order to manipulate share type quotas, the requests will be similar to the examples below, except that the ``user_id={user_id}`` must be replaced by ``share_type={share_type_name_or_id}`` in the request path. Share groups and share group snapshots were added to quota management APIs in API version 2.40. Share replicas and replica gigabytes were added to quota management APIs in API version 2.53. Per share gigabytes was added to quota management APIs in API version 2.62. Share backups and backup gigabytes were added to quota management APIs in API version 2.80. Encryption keys was added to quota management APIs in API version 2.90. Show default quota set ~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v2/quota-sets/{project_id_quota_request_path}/defaults Shows default quotas for a given project. Response codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - project_id_quota_request_path: project_id_quota_request_path Response parameters ------------------- .. rest_parameters:: parameters.yaml - quota_set: quota_set - id: quota_project_id - gigabytes: quota_gigabytes - snapshots: quota_snapshots - shares: quota_shares - snapshot_gigabytes: quota_snapshot_gigabytes - share_networks: quota_share_networks - share_groups: quota_share_groups - share_group_snapshots: quota_share_group_snapshots - share_networks: quota_share_networks_default - share_replicas: quota_share_replicas - replica_gigabytes: quota_replica_gigabytes - per_share_gigabytes: quota_per_share_gigabytes - backups: quota_backups - backup_gigabytes: quota_backup_gigabytes - encryption_keys: quota_encryption_keys Response example ---------------- .. literalinclude:: samples/quota-show-response.json :language: javascript Show quota set ~~~~~~~~~~~~~~ .. rest_method:: GET /v2/quota-sets/{project_id_quota_request_path}?user_id={user_id} Shows quotas for a given project.. If you specify the optional ``user_id`` query parameter, you get the quotas for this user in the project. If you omit this parameter, you get the quotas for the project. Response codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - project_id_quota_request_path: project_id_quota_request_path - user_id: user_id_query - share_type: share_type_for_quota Response parameters ------------------- .. rest_parameters:: parameters.yaml - quota_set: quota_set - id: quota_project_id - gigabytes: quota_gigabytes - snapshots: quota_snapshots - shares: quota_shares - snapshot_gigabytes: quota_snapshot_gigabytes - share_networks: quota_share_networks - share_groups: quota_share_groups - share_group_snapshots: quota_share_group_snapshots - share_replicas: quota_share_replicas - replica_gigabytes: quota_replica_gigabytes - per_share_gigabytes: quota_per_share_gigabytes - backups: quota_backups - backup_gigabytes: quota_backup_gigabytes - encryption_keys: quota_encryption_keys Response example ---------------- .. literalinclude:: samples/quota-show-response.json :language: javascript Show quota set in detail (since API v2.25) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v2/quota-sets/{project_id_quota_request_path}/detail?user_id={user_id} .. versionadded:: 2.25 Shows quotas for a project in detail. If you specify the optional ``user_id`` query parameter, you get the quotas for this user in the project. If you omit this parameter, you get the quotas for the project. Response codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - project_id_quota_request_path: project_id_quota_request_path - user_id: user_id_query - share_type: share_type_for_quota Response parameters ------------------- .. rest_parameters:: parameters.yaml - quota_set: quota_set - id: quota_project_id - gigabytes: quota_gigabytes_detail - snapshots: quota_snapshots_detail - shares: quota_shares_detail - snapshot_gigabytes: quota_snapshot_gigabytes_detail - share_networks: quota_share_networks_detail - share_groups: quota_share_groups_detail - share_group_snapshots: quota_share_group_snapshots_detail - share_replicas: quota_share_replicas_detail - replica_gigabytes: quota_replica_gigabytes_detail - per_share_gigabytes: quota_per_share_gigabytes_detail - backups: quota_backups_detail - backup_gigabytes: quota_backup_gigabytes_detail - encryption_keys: quota_encryption_keys_detail Response example ---------------- .. literalinclude:: samples/quota-show-detail-response.json :language: javascript Update quota set ~~~~~~~~~~~~~~~~ .. rest_method:: PUT /v2/quota-sets/{project_id_quota_request_path}?user_id={user_id} Updates quotas for a project. If you specify the optional ``user_id`` query parameter, you update the quotas for this user in the project. If you omit this parameter, you update the quotas for the project. Response codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - project_id_quota_request_path: project_id_quota_request_path - user_id: user_id_query - quota_set: quota_set - force: force - gigabytes: quota_gigabytes_request - snapshots: quota_snapshots_request - snapshot_gigabytes: quota_snapshot_gigabytes_request - shares: quota_shares_request - share_networks: quota_share_networks_request - share_groups: quota_share_groups_request - share_group_snapshots: quota_share_group_snapshots_request - share_type: share_type_for_quota - share_replicas: quota_share_replicas_request - replica_gigabytes: quota_replica_gigabytes_request - per_share_gigabytes: quota_per_share_gigabytes_request - backups: quota_backups_request - backup_gigabytes: quota_backup_gigabytes_request - encryption_keys: quota_encryption_keys_request Request example --------------- .. literalinclude:: samples/quota-update-request.json :language: javascript Response parameters ------------------- .. rest_parameters:: parameters.yaml - quota_set: quota_set - id: quota_project_id - gigabytes: quota_gigabytes - snapshots: quota_snapshots - shares: quota_shares - snapshot_gigabytes: quota_snapshot_gigabytes - share_networks: quota_share_networks - share_groups: quota_share_groups - share_group_snapshots: quota_share_group_snapshots - share_replicas: quota_share_replicas - replica_gigabytes: quota_replica_gigabytes - per_share_gigabytes: quota_per_share_gigabytes - backups: quota_backups - backup_gigabytes: quota_backup_gigabytes - encryption_keys: quota_encryption_keys Response example ---------------- .. literalinclude:: samples/quota-update-response.json :language: javascript Delete quota set ~~~~~~~~~~~~~~~~ .. rest_method:: DELETE /v2/quota-sets/{project_id_quota_request_path}?user_id={user_id} Deletes quotas for a project. The quota reverts to the default quota. If you specify the optional ``user_id`` query parameter, you delete the quotas for this user in the project. If you omit this parameter, you delete the quotas for the project. Response codes -------------- .. rest_status_code:: success status.yaml - 202 .. rest_status_code:: error status.yaml - 400 - 401 - 403 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - project_id_quota_request_path: project_id_quota_request_path - user_id: user_id_query - share_type: share_type_for_quota ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/resource-locks.inc0000664000175000017500000001701500000000000021340 0ustar00zuulzuul00000000000000.. -*- rst -*- Resource Locks (since API v2.81) ================================ Create, list, update and delete locks on user actions on resources. Create a resource lock ~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v2/resource-locks .. versionadded:: 2.81 Lock a specific action on a given resource. Not all resources are supported, and not actions on supported resources can be prevented with this mechanism. A lock can only be removed or manipulated by the user that created it, or by a more privileged user. The cloud administrator can use a ``policy.yaml`` file to tweak permissions on who can manipulate and delete locks created by other users. Response codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 - 409 Request ------- .. rest_parameters:: parameters.yaml - service_token: service_token_locks - resource_lock: resource_lock_object - resource_id: resource_lock_resource_id - resource_type: resource_lock_resource_type - resource_action: resource_lock_resource_action_create_optional - lock_reason: resource_lock_lock_reason_optional Request Example --------------- .. literalinclude:: ./samples/resource-lock-create-request.json :language: javascript Response Parameters ------------------- .. rest_parameters:: parameters.yaml - resource_lock: resource_lock_object - id: resource_lock_id - user_id: resource_lock_user_id - project_id: resource_lock_project_id - lock_context: resource_lock_lock_context - resource_type: resource_lock_resource_type - resource_id: resource_lock_resource_id - resource_action: resource_lock_resource_action - lock_reason: resource_lock_lock_reason - created_at: created_at - updated_at: updated_at - links: links Response Example ---------------- .. literalinclude:: ./samples/resource-lock-create-response.json :language: javascript List resource locks ~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v2/resource-locks .. versionadded:: 2.81 Retrieve resource locks with filters Response codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 401 - 403 - 404 Request ------- .. rest_parameters:: parameters.yaml - service_token: service_token_locks - id: resource_lock_id_query - resource_id: resource_lock_resource_id_query - resource_action: resource_lock_resource_action_query - resource_type: resource_lock_resource_type_query - user_id: resource_lock_user_id_query - project_id: resource_lock_project_id_query - all_projects: resource_lock_all_projects_query - lock_context: resource_lock_lock_context_query - created_since: created_since_query - created_before: created_before_query - lock_reason: resource_lock_lock_reason_query - lock_reason~: resource_lock_lock_reason_inexact_query - sort_key: sort_key_resource_locks - sort_dir: sort_dir - offset: offset - with_count: with_count_query_without_min_version Response Parameters ------------------- .. rest_parameters:: parameters.yaml - resource_locks: resource_locks_object - id: resource_lock_id - user_id: resource_lock_user_id - project_id: resource_lock_project_id - lock_context: resource_lock_lock_context - resource_type: resource_lock_resource_type - resource_id: resource_lock_resource_id - resource_action: resource_lock_resource_action - lock_reason: resource_lock_lock_reason - created_at: created_at - updated_at: updated_at - links: links - count: count_without_min_version Response Example ---------------- .. literalinclude:: ./samples/resource-lock-get-all-response.json :language: javascript Get a resource lock ~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v2/resource-locks/{resource-lock-id} .. versionadded:: 2.81 Retrieve a specific resource lock By default, resource locks can be viewed by all users within a project that owns the locks. The cloud administrator can use a ``policy.yaml`` file to tweak this behavior. Response codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 401 - 403 - 404 Request ------- .. rest_parameters:: parameters.yaml - service_token: service_token_locks - resource_lock_id: resource_lock_id_path Response Parameters ------------------- .. rest_parameters:: parameters.yaml - resource_lock: resource_lock_object - id: resource_lock_id - user_id: resource_lock_user_id - project_id: resource_lock_project_id - lock_context: resource_lock_lock_context - resource_type: resource_lock_resource_type - resource_id: resource_lock_resource_id - resource_action: resource_lock_resource_action - lock_reason: resource_lock_lock_reason - created_at: created_at - updated_at: updated_at - links: links Response Example ---------------- .. literalinclude:: ./samples/resource-lock-get-response.json :language: javascript Update a resource lock ~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: PUT /v2/resource-locks/{resource-lock-id} .. versionadded:: 2.81 Update a specific resource lock By default, resource locks can be updated by the user that created the lock unless the ``lock_context`` is set to ``admin`` or ``service``. A user with ``service`` role is required to manipulate locks that have a ``lock_context`` set to ``service``. Users with ``admin`` role can manipulate all locks. Administrators can use ``policy.yaml`` to tweak this behavior. Response codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 401 - 403 - 404 Request ------- .. rest_parameters:: parameters.yaml - service_token: service_token_locks - resource_lock_id: resource_lock_id_path - resource_lock: resource_lock_object - resource_action: resource_lock_resource_action_optional - lock_reason: resource_lock_lock_reason_optional Request Example ---------------- .. literalinclude:: ./samples/resource-lock-update-request.json :language: javascript Response Parameters ------------------- .. rest_parameters:: parameters.yaml - resource_lock: resource_lock_object - id: resource_lock_id - user_id: resource_lock_user_id - project_id: resource_lock_project_id - lock_context: resource_lock_lock_context - resource_type: resource_lock_resource_type - resource_id: resource_lock_resource_id - resource_action: resource_lock_resource_action - lock_reason: resource_lock_lock_reason - created_at: created_at - updated_at: updated_at - links: links Response Example ---------------- .. literalinclude:: ./samples/resource-lock-update-response.json :language: javascript Delete a resource lock ~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: DELETE /v2/resource-locks/{resource-lock-id} .. versionadded:: 2.81 Delete a specific resource lock By default, resource locks can be deleted by the user that created the lock unless the ``lock_context`` is set to ``admin`` or ``service``. A user with ``service`` role is required to delete locks that have a ``lock_context`` set to ``service``. Users with ``admin`` role can delete any lock. Administrators can use ``policy.yaml`` to tweak this behavior. This request provides no response body. Response codes -------------- .. rest_status_code:: success status.yaml - 204 .. rest_status_code:: error status.yaml - 401 - 403 - 404 Request ------- .. rest_parameters:: parameters.yaml - service_token: service_token_locks - resource_lock_id: resource_lock_id_path ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315601.7536738 manila-21.0.0/api-ref/source/samples/0000775000175000017500000000000000000000000017345 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/availability-zones-list-response.json0000664000175000017500000000034700000000000026657 0ustar00zuulzuul00000000000000{ "availability_zones": [ { "name": "nova", "created_at": "2015-09-18T09:50:55.000000", "updated_at": null, "id": "388c983d-258e-4a0e-b1ba-10da37d766db" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/export-location-list-response.json0000664000175000017500000000120000000000000026165 0ustar00zuulzuul00000000000000{ "export_locations": [ { "path": "10.254.0.3:/shares/share-e1c2d35e-fe67-4028-ad7a-45f668732b1d", "share_instance_id": "e1c2d35e-fe67-4028-ad7a-45f668732b1d", "is_admin_only": false, "id": "b6bd76ce-12a2-42a9-a30a-8a43b503867d", "preferred": false }, { "path": "10.0.0.3:/shares/share-e1c2d35e-fe67-4028-ad7a-45f668732b1d", "share_instance_id": "e1c2d35e-fe67-4028-ad7a-45f668732b1d", "is_admin_only": true, "id": "6921e862-88bc-49a5-a2df-efeed9acd583", "preferred": false } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/export-location-show-response.json0000664000175000017500000000061700000000000026205 0ustar00zuulzuul00000000000000{ "export_location": { "created_at": "2016-03-24T14:20:47.000000", "updated_at": "2016-03-24T14:20:47.000000", "preferred": false, "is_admin_only": true, "share_instance_id": "e1c2d35e-fe67-4028-ad7a-45f668732b1d", "path": "10.0.0.3:/shares/share-e1c2d35e-fe67-4028-ad7a-45f668732b1d", "id": "6921e862-88bc-49a5-a2df-efeed9acd583" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/extensions-list-response.json0000664000175000017500000000645500000000000025256 0ustar00zuulzuul00000000000000{ "extensions": [ { "alias": "os-extended-quotas", "updated": "2013-06-09T00:00:00+00:00", "name": "ExtendedQuotas", "links": [], "description": "Extend quotas. Adds ability for admins to delete quota and optionally force the update Quota command." }, { "alias": "os-quota-sets", "updated": "2011-08-08T00:00:00+00:00", "name": "Quotas", "links": [], "description": "Quotas management support." }, { "alias": "os-quota-class-sets", "updated": "2012-03-12T00:00:00+00:00", "name": "QuotaClasses", "links": [], "description": "Quota classes management support." }, { "alias": "os-share-unmanage", "updated": "2015-02-17T00:00:00+00:00", "name": "ShareUnmanage", "links": [], "description": "Enable share unmanage operation." }, { "alias": "os-types-manage", "updated": "2011-08-24T00:00:00+00:00", "name": "TypesManage", "links": [], "description": "Types manage support." }, { "alias": "share-actions", "updated": "2012-08-14T00:00:00+00:00", "name": "ShareActions", "links": [], "description": "Enable share actions." }, { "alias": "os-availability-zone", "updated": "2015-07-28T00:00:00+00:00", "name": "AvailabilityZones", "links": [], "description": "Describe Availability Zones." }, { "alias": "os-user-quotas", "updated": "2013-07-18T00:00:00+00:00", "name": "UserQuotas", "links": [], "description": "Project user quota support." }, { "alias": "os-share-type-access", "updated": "2015-03-02T00:00:00Z", "name": "ShareTypeAccess", "links": [], "description": "share type access support." }, { "alias": "os-types-extra-specs", "updated": "2011-08-24T00:00:00+00:00", "name": "TypesExtraSpecs", "links": [], "description": "Type extra specs support." }, { "alias": "os-admin-actions", "updated": "2015-08-03T00:00:00+00:00", "name": "AdminActions", "links": [], "description": "Enable admin actions." }, { "alias": "os-used-limits", "updated": "2014-03-27T00:00:00+00:00", "name": "UsedLimits", "links": [], "description": "Provide data on limited resources that are being used." }, { "alias": "os-services", "updated": "2012-10-28T00:00:00-00:00", "name": "Services", "links": [], "description": "Services support." }, { "alias": "os-share-manage", "updated": "2015-02-17T00:00:00+00:00", "name": "ShareManage", "links": [], "description": "Allows existing share to be 'managed' by Manila." } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/limits-response.json0000664000175000017500000000147700000000000023406 0ustar00zuulzuul00000000000000{ "limits": { "rate": [], "absolute": { "totalShareNetworksUsed": 0, "maxTotalShareGigabytes": 1000, "maxTotalShareNetworks": 10, "totalSharesUsed": 0, "totalShareGigabytesUsed": 0, "totalShareSnapshotsUsed": 0, "maxTotalShares": 50, "totalSnapshotGigabytesUsed": 0, "maxTotalSnapshotGigabytes": 1000, "maxTotalShareSnapshots": 50, "maxTotalShareReplicas": 100, "maxTotalReplicaGigabytes": 1000, "totalShareReplicasUsed": 0, "totalReplicaGigabytesUsed": 0, "maxTotalShareBackups": 100, "maxTotalBackupGigabytes": 1000, "totalShareBackupsUsed": 0, "totalBackupGigabytesUsed": 0 } } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/pools-list-detailed-response.json0000664000175000017500000000752100000000000025757 0ustar00zuulzuul00000000000000{ "pools": [ { "name": "opencloud@alpha#ALPHA_pool", "host": "opencloud", "backend": "alpha", "pool": "ALPHA_pool", "capabilities": { "pool_name": "ALPHA_pool", "total_capacity_gb": 1230.0, "free_capacity_gb": 1210.0, "reserved_percentage": 0, "share_backend_name": "ALPHA", "storage_protocol": "NFS_CIFS", "vendor_name": "Open Source", "driver_version": "1.0", "timestamp": "2019-05-07T00:28:02.935569", "driver_handles_share_servers": true, "snapshot_support": true, "create_share_from_snapshot_support": true, "revert_to_snapshot_support": true, "mount_snapshot_support": true, "dedupe": false, "compression": false, "replication_type": null, "replication_domain": null, "sg_consistent_snapshot_support": "pool", "ipv4_support": true, "ipv6_support": false } }, { "name": "opencloud@beta#BETA_pool", "host": "opencloud", "backend": "beta", "pool": "BETA_pool", "capabilities": { "pool_name": "BETA_pool", "total_capacity_gb": 1230.0, "free_capacity_gb": 1210.0, "reserved_percentage": 0, "share_backend_name": "BETA", "storage_protocol": "NFS_CIFS", "vendor_name": "Open Source", "driver_version": "1.0", "timestamp": "2019-05-07T00:28:02.817309", "driver_handles_share_servers": true, "snapshot_support": true, "create_share_from_snapshot_support": true, "revert_to_snapshot_support": true, "mount_snapshot_support": true, "dedupe": false, "compression": false, "replication_type": null, "replication_domain": null, "sg_consistent_snapshot_support": "pool", "ipv4_support": true, "ipv6_support": false } }, { "name": "opencloud@gamma#GAMMA_pool", "host": "opencloud", "backend": "gamma", "pool": "GAMMA_pool", "capabilities": { "pool_name": "GAMMA_pool", "total_capacity_gb": 1230.0, "free_capacity_gb": 1210.0, "reserved_percentage": 0, "replication_type": "readable", "share_backend_name": "GAMMA", "storage_protocol": "NFS_CIFS", "vendor_name": "Open Source", "driver_version": "1.0", "timestamp": "2019-05-07T00:28:02.899888", "driver_handles_share_servers": false, "snapshot_support": true, "create_share_from_snapshot_support": true, "revert_to_snapshot_support": true, "mount_snapshot_support": true, "dedupe": false, "compression": false, "replication_domain": "replica_domain_store1", "sg_consistent_snapshot_support": "pool", "ipv4_support": true, "ipv6_support": false } }, { "name": "opencloud@delta#DELTA_pool", "host": "opencloud", "backend": "delta", "pool": "DELTA_pool", "capabilities": { "pool_name": "DELTA_pool", "total_capacity_gb": 1230.0, "free_capacity_gb": 1210.0, "reserved_percentage": 0, "replication_type": "readable", "share_backend_name": "DELTA", "storage_protocol": "NFS_CIFS", "vendor_name": "Open Source", "driver_version": "1.0", "timestamp": "2019-05-07T00:28:02.963660", "driver_handles_share_servers": false, "snapshot_support": true, "create_share_from_snapshot_support": true, "revert_to_snapshot_support": true, "mount_snapshot_support": true, "dedupe": false, "compression": false, "replication_domain": "replica_domain_store1", "sg_consistent_snapshot_support": "pool", "ipv4_support": true, "ipv6_support": false } } ] }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/pools-list-response.json0000664000175000017500000000106300000000000024201 0ustar00zuulzuul00000000000000{ "pools": [ { "name": "opencloud@alpha#ALPHA_pool", "host": "opencloud", "backend": "alpha", "pool": "ALPHA_pool" }, { "name": "opencloud@beta#BETA_pool", "host": "opencloud", "backend": "beta", "pool": "BETA_pool" }, { "name": "opencloud@gamma#GAMMA_pool", "host": "opencloud", "backend": "gamma", "pool": "GAMMA_pool" }, { "name": "opencloud@delta#DELTA_pool", "host": "opencloud", "backend": "delta", "pool": "DELTA_pool" } ] }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/quota-classes-show-response.json0000664000175000017500000000064600000000000025644 0ustar00zuulzuul00000000000000{ "quota_class_set": { "share_groups": 50, "gigabytes": 1000, "share_group_snapshots": 50, "snapshots": 50, "snapshot_gigabytes": 1000, "shares": 50, "id": "default", "share_networks": 10, "share_replicas": 100, "replica_gigabytes": 1000, "per_share_gigabytes": -1, "backups": 50, "backup_gigabytes": 1000 } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/quota-classes-update-request.json0000664000175000017500000000015400000000000025772 0ustar00zuulzuul00000000000000{ "quota_class_set": { "class_name": "test-qupta-class-update", "gigabytes": 20 } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/quota-classes-update-response.json0000664000175000017500000000061300000000000026140 0ustar00zuulzuul00000000000000{ "quota_class_set": { "share_groups": 50, "gigabytes": 20, "share_group_snapshots": 50, "snapshots": 50, "snapshot_gigabytes": 1000, "shares": 50, "share_networks": 10, "share_replicas": 100, "replica_gigabytes": 1000, "per_share_gigabytes": -1, "backups": 50, "backup_gigabytes": 1000 } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/quota-show-detail-response.json0000664000175000017500000000301100000000000025436 0ustar00zuulzuul00000000000000{ "quota_set": { "id": "16e1ab15c35a457e9c2b2aa189f544e1", "gigabytes": {"in_use": 0, "limit": 1000, "reserved": 0}, "shares": {"in_use": 0, "limit": 50, "reserved": 0}, "snapshot_gigabytes": {"in_use": 0, "limit": 1000, "reserved": 0}, "snapshots": {"in_use": 0, "limit": 50, "reserved": 0}, "share_networks": {"in_use": 0, "limit": 10, "reserved": 0}, "share_groups": {"in_use": 0, "limit": 10, "reserved": 0}, "share_group_snapshots": {"in_use": 0, "limit": 10, "reserved": 0}, "share_replicas": {"in_use": 0, "limit": 100, "reserved": 0}, "replica_gigabytes": {"in_use": 0, "limit": 1000, "reserved": 0}, "per_share_gigabytes": {"in_use": 0, "limit": -1, "reserved": 0}, "backup_gigabytes": {"in_use": 0, "limit": 1000, "reserved": 0}, "backups": {"in_use": 0, "limit": 50, "reserved": 0} } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/quota-show-response.json0000664000175000017500000000067100000000000024207 0ustar00zuulzuul00000000000000{ "quota_set": { "gigabytes": 1000, "shares": 50, "snapshot_gigabytes": 1000, "snapshots": 50, "id": "16e1ab15c35a457e9c2b2aa189f544e1", "share_networks": 10, "share_groups": 10, "share_group_snapshots": 10, "share_replicas": 100, "replica_gigabytes": 1000, "per_share_gigabytes": -1, "backups": 50, "backup_gigabytes": 1000 } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/quota-update-request.json0000664000175000017500000000026500000000000024342 0ustar00zuulzuul00000000000000{ "quota_set": { "snapshot_gigabytes": 999, "snapshots": 49, "share_networks": 9, "share_replicas": 89, "per_share_gigabytes": 5 } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/quota-update-response.json0000664000175000017500000000060300000000000024504 0ustar00zuulzuul00000000000000{ "quota_set": { "gigabytes": 1000, "snapshot_gigabytes": 999, "shares": 50, "snapshots": 49, "share_networks": 9, "share_groups": 12, "share_group_snapshots": 12, "share_replicas": 89, "replica_gigabytes": 1000, "per_share_gigabytes": -1, "backups": 40, "backup_gigabytes": 500 } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/resource-lock-create-request.json0000664000175000017500000000035200000000000025744 0ustar00zuulzuul00000000000000{ "resource_lock": { "resource_id": "5a313549-d346-44b6-9650-738ce08a9fee", "resource_type": "share", "resource_action": "delete", "lock_reason": "Locked for deletion until year end audit." } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/resource-lock-create-response.json0000664000175000017500000000140100000000000026106 0ustar00zuulzuul00000000000000{ "resource_lock": { "id": "713dc92d-bf5e-4b04-875b-2b2d284d8f94", "user_id": "89de351d3b5744b9853ec4829aa0e714", "project_id": "db2e72fef7864bbbbf210f22da7f1158", "lock_context": "user", "resource_type": "share", "resource_id": "5a313549-d346-44b6-9650-738ce08a9fee", "resource_action": "delete", "lock_reason": "Locked for deletion until year end audit.", "created_at": "2023-07-17T22:11:48.144302", "updated_at": null, "links": [ { "rel": "self", "href": "http://203.0.113.30/share/v2/resource_locks/713dc92d-bf5e-4b04-875b-2b2d284d8f94" }, { "rel": "bookmark", "href": "http://203.0.113.30/share/resource_locks/713dc92d-bf5e-4b04-875b-2b2d284d8f94" } ] } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/resource-lock-get-all-response.json0000664000175000017500000000305000000000000026172 0ustar00zuulzuul00000000000000{ "resource_locks": [ { "id": "118750ee-b62b-4cae-9a94-7da29a4f831f", "user_id": "89de351d3b5744b9853ec4829aa0e714", "project_id": "db2e72fef7864bbbbf210f22da7f1158", "lock_context": "user", "resource_type": "share", "resource_id": "4c0b4d35-4ea8-4811-a1e2-a065c64225a8", "resource_action": "delete", "lock_reason": null, "created_at": "2023-07-17T22:53:18.894553", "updated_at": null, "links": [ { "rel": "self", "href": "http://203.0.113.30/share/v2/resource_locks/118750ee-b62b-4cae-9a94-7da29a4f831f" }, { "rel": "bookmark", "href": "http://203.0.113.30/share/resource_locks/118750ee-b62b-4cae-9a94-7da29a4f831f" } ] }, { "id": "713dc92d-bf5e-4b04-875b-2b2d284d8f94", "user_id": "89de351d3b5744b9853ec4829aa0e714", "project_id": "db2e72fef7864bbbbf210f22da7f1158", "lock_context": "user", "resource_type": "share", "resource_id": "5a313549-d346-44b6-9650-738ce08a9fee", "resource_action": "delete", "lock_reason": "Locked for deletion until year end audit.", "created_at": "2023-07-17T22:11:48.144302", "updated_at": null, "links": [ { "rel": "self", "href": "http://203.0.113.30/share/v2/resource_locks/713dc92d-bf5e-4b04-875b-2b2d284d8f94" }, { "rel": "bookmark", "href": "http://203.0.113.30/share/resource_locks/713dc92d-bf5e-4b04-875b-2b2d284d8f94" } ] } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/resource-lock-get-response.json0000664000175000017500000000140100000000000025422 0ustar00zuulzuul00000000000000{ "resource_lock": { "id": "713dc92d-bf5e-4b04-875b-2b2d284d8f94", "user_id": "89de351d3b5744b9853ec4829aa0e714", "project_id": "db2e72fef7864bbbbf210f22da7f1158", "lock_context": "user", "resource_type": "share", "resource_id": "5a313549-d346-44b6-9650-738ce08a9fee", "resource_action": "delete", "lock_reason": "Locked for deletion until year end audit.", "created_at": "2023-07-17T22:11:48.144302", "updated_at": null, "links": [ { "rel": "self", "href": "http://203.0.113.30/share/v2/resource_locks/713dc92d-bf5e-4b04-875b-2b2d284d8f94" }, { "rel": "bookmark", "href": "http://203.0.113.30/share/resource_locks/713dc92d-bf5e-4b04-875b-2b2d284d8f94" } ] } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/resource-lock-update-request.json0000664000175000017500000000012400000000000025760 0ustar00zuulzuul00000000000000{ "resource_lock": { "lock_reason": "This is a protected share" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/resource-lock-update-response.json0000664000175000017500000000141100000000000026126 0ustar00zuulzuul00000000000000{ "resource_lock": { "id": "118750ee-b62b-4cae-9a94-7da29a4f831f", "user_id": "89de351d3b5744b9853ec4829aa0e714", "project_id": "db2e72fef7864bbbbf210f22da7f1158", "lock_context": "user", "resource_type": "share", "resource_id": "4c0b4d35-4ea8-4811-a1e2-a065c64225a8", "resource_action": "delete", "lock_reason": "This is a protected share", "created_at": "2023-07-17T22:53:18.894553", "updated_at": "2023-07-17T23:18:44.284565", "links": [ { "rel": "self", "href": "http://203.0.113.30/share/v2/resource_locks/118750ee-b62b-4cae-9a94-7da29a4f831f" }, { "rel": "bookmark", "href": "http://203.0.113.30/share/resource_locks/118750ee-b62b-4cae-9a94-7da29a4f831f" } ] } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/security-service-create-request.json0000664000175000017500000000035400000000000026476 0ustar00zuulzuul00000000000000{ "security_service": { "description": "Creating my first Security Service", "dns_ip": "10.0.0.0/24", "user": "demo", "password": "***", "type": "kerberos", "name": "SecServ1" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/security-service-create-response.json0000664000175000017500000000106200000000000026641 0ustar00zuulzuul00000000000000{ "security_service": { "status": "new", "domain": null, "ou": null, "project_id": "16e1ab15c35a457e9c2b2aa189f544e1", "name": "SecServ1", "created_at": "2015-09-07T12:19:10.695211", "updated_at": null, "server": null, "default_ad_site": null, "dns_ip": "10.0.0.0/24", "user": "demo", "password": "supersecret", "type": "kerberos", "id": "3c829734-0679-4c17-9637-801da48c0d5f", "description": "Creating my first Security Service" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/security-service-show-response.json0000664000175000017500000000106200000000000026356 0ustar00zuulzuul00000000000000{ "security_service": { "status": "new", "domain": null, "ou": null, "project_id": "16e1ab15c35a457e9c2b2aa189f544e1", "name": "SecServ1", "created_at": "2015-09-07T12:19:10.000000", "updated_at": null, "server": null, "default_ad_site": null, "dns_ip": "10.0.0.0/24", "user": "demo", "password": "supersecret", "type": "kerberos", "id": "3c829734-0679-4c17-9637-801da48c0d5f", "description": "Creating my first Security Service" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/security-service-update-request.json0000664000175000017500000000030600000000000026512 0ustar00zuulzuul00000000000000{ "security_service": { "domain": "my_domain", "ou": "CN=Computers", "password": "***", "user": "new_user", "description": "Adding a description" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/security-service-update-response.json0000664000175000017500000000105100000000000026656 0ustar00zuulzuul00000000000000{ "security_service": { "status": "new", "domain": "my_domain", "ou": "CN=Computers", "project_id": "16e1ab15c35a457e9c2b2aa189f544e1", "name": "SecServ1", "created_at": "2015-09-07T12:19:10.000000", "updated_at": "2015-09-07T12:47:21.858737", "server": null, "dns_ip": "10.0.0.0/24", "user": "new_user", "password": "pass", "type": "kerberos", "id": "3c829734-0679-4c17-9637-801da48c0d5f", "description": "Adding a description" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/security-services-list-detailed-response.json0000664000175000017500000000243000000000000030305 0ustar00zuulzuul00000000000000{ "security_services": [ { "status": "new", "domain": null, "ou": null, "project_id": "16e1ab15c35a457e9c2b2aa189f544e1", "name": "SecServ1", "created_at": "2015-09-07T12:19:10.000000", "description": "Creating my first Security Service", "updated_at": null, "server": null, "default_ad_site": null, "dns_ip": "10.0.0.0/24", "user": "demo", "password": "supersecret", "type": "kerberos", "id": "3c829734-0679-4c17-9637-801da48c0d5f", "share_networks": [] }, { "status": "new", "domain": null, "ou": null, "project_id": "16e1ab15c35a457e9c2b2aa189f544e1", "name": "SecServ2", "created_at": "2015-09-07T12:25:03.000000", "description": "Creating my second Security Service", "updated_at": null, "server": null, "default_ad_site": null, "dns_ip": "10.0.0.0/24", "user": null, "password": null, "type": "ldap", "id": "5a1d3a12-34a7-4087-8983-50e9ed03509a", "share_networks": [] } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/security-services-list-for-share-network-response.json0000664000175000017500000000264000000000000032112 0ustar00zuulzuul00000000000000{ "security_services": [ { "status": "new", "domain": null, "ou": null, "project_id": "16e1ab15c35a457e9c2b2aa189f544e1", "name": "SecServ1", "created_at": "2015-09-07T12:19:10.000000", "description": "Creating my first Security Service", "updated_at": null, "server": null, "default_ad_site": null, "dns_ip": "10.0.0.0/24", "user": "demo", "password": "supersecret", "type": "kerberos", "id": "3c829734-0679-4c17-9637-801da48c0d5f", "share_networks": [ "d8ae6799-2567-4a89-aafb-fa4424350d2b" ] }, { "status": "new", "domain": null, "ou": null, "project_id": "16e1ab15c35a457e9c2b2aa189f544e1", "name": "SecServ2", "created_at": "2015-09-07T12:25:03.000000", "description": "Creating my second Security Service", "updated_at": null, "server": null, "default_ad_site": null, "dns_ip": "10.0.0.0/24", "user": null, "password": null, "type": "ldap", "id": "5a1d3a12-34a7-4087-8983-50e9ed03509a", "share_networks": [ "d8ae6799-2567-4a89-aafb-fa4424350d2b" ] } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/security-services-list-response.json0000664000175000017500000000056600000000000026544 0ustar00zuulzuul00000000000000{ "security_services": [ { "status": "new", "type": "kerberos", "id": "3c829734-0679-4c17-9637-801da48c0d5f", "name": "SecServ1" }, { "status": "new", "type": "ldap", "id": "5a1d3a12-34a7-4087-8983-50e9ed03509a", "name": "SecServ2" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/service-disable-request.json0000664000175000017500000000022400000000000024765 0ustar00zuulzuul00000000000000{ "binary": "manila-share", "host": "openstackhost@generic#pool_0", "disabled_reason": "Service taken down for maintenance until May" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/service-disable-response.json0000664000175000017500000000025600000000000025140 0ustar00zuulzuul00000000000000{ "binary": "manila-share", "host": "openstackhost@generic#pool_0", "status": "disabled", "disabled_reason": "Service taken down for maintenance until May" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/service-enable-request.json0000664000175000017500000000011500000000000024607 0ustar00zuulzuul00000000000000{ "binary": "manila-share", "host": "openstackhost@generic#pool_0" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/service-enable-response.json0000664000175000017500000000020100000000000024751 0ustar00zuulzuul00000000000000{ "binary": "manila-share", "host": "openstackhost@generic#pool_0", "status": "enabled", "disabled_reason": "" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/service-ensure-shares-request.json0000664000175000017500000000005700000000000026152 0ustar00zuulzuul00000000000000{ "host": "openstackhost@storagebackend" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/services-list-response.json0000664000175000017500000000104400000000000024667 0ustar00zuulzuul00000000000000{ "services": [ { "status": "enabled", "binary": "manila-share", "zone": "nova", "host": "manila2@generic1", "updated_at": "2015-09-07T13:03:57.000000", "state": "up", "id": 1 }, { "status": "enabled", "binary": "manila-scheduler", "zone": "nova", "host": "manila2", "updated_at": "2015-09-07T13:03:57.000000", "state": "up", "id": 2 } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/services-list-with-filters-response.json0000664000175000017500000000044200000000000027307 0ustar00zuulzuul00000000000000{ "services": [ { "status": "enabled", "binary": "manila-share", "zone": "nova", "host": "manila2@generic1", "updated_at": "2015-09-07T13:14:27.000000", "state": "up", "id": 1 } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-access-rules-list-response.json0000664000175000017500000000170500000000000026541 0ustar00zuulzuul00000000000000{ "access_list": [ { "access_level": "rw", "state": "error", "id": "507bf114-36f2-4f56-8cf4-857985ca87c1", "access_type": "cert", "access_to": "example.com", "access_key": null, "created_at": "2018-07-17T02:01:04.000000", "updated_at": "2018-07-17T02:01:04.000000", "metadata": { "key1": "value1", "key2": "value2" } }, { "access_level": "rw", "state": "active", "id": "a25b2df3-90bd-4add-afa6-5f0dbbd50452", "access_type": "ip", "access_to": "0.0.0.0/0", "access_key": null, "created_at": "2018-07-16T01:03:21.000000", "updated_at": "2018-07-16T01:03:21.000000", "metadata": { "key3": "value3", "key4": "value4" } } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-access-rules-show-response.json0000664000175000017500000000074600000000000026552 0ustar00zuulzuul00000000000000{ "access": { "access_level": "rw", "state": "error", "id": "507bf114-36f2-4f56-8cf4-857985ca87c1", "share_id": "fb213952-2352-41b4-ad7b-2c4c69d13eef", "access_type": "cert", "access_to": "example.com", "access_key": null, "created_at": "2018-07-17T02:01:04.000000", "updated_at": "2018-07-17T02:01:04.000000", "metadata": { "key1": "value1", "key2": "value2" } } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-access-rules-update-metadata-request.json0000664000175000017500000000021300000000000030451 0ustar00zuulzuul00000000000000{ "metadata": { "aim": "changed_doc", "speed": "my_fast_access", "new_metadata_key": "new_information" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-access-rules-update-metadata-response.json0000664000175000017500000000021300000000000030617 0ustar00zuulzuul00000000000000{ "metadata": { "aim": "changed_doc", "speed": "my_fast_access", "new_metadata_key": "new_information" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-access-rules-update-request.json0000664000175000017500000000007600000000000026702 0ustar00zuulzuul00000000000000{ "update_access": { "access_level": "ro" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-access-rules-update-response.json0000664000175000017500000000074200000000000027050 0ustar00zuulzuul00000000000000{ "access": { "access_level": "ro", "state": "error", "id": "507bf114-36f2-4f56-8cf4-857985ca87c1", "share_id": "fb213952-2352-41b4-ad7b-2c4c69d13eef", "access_type": "ip", "access_to": "0.0.0.0/0", "access_key": null, "created_at": "2024-12-17T02:01:04.000000", "updated_at": "2024-12-17T02:01:04.000000", "metadata": { "key1": "value1", "key2": "value2" } } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-actions-extend-request.json0000664000175000017500000000011100000000000025744 0ustar00zuulzuul00000000000000{ "extend": { "new_size": 2, "force": "true" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-actions-force-delete-request.json0000664000175000017500000000003500000000000027020 0ustar00zuulzuul00000000000000{ "force_delete": null } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-actions-grant-access-request.json0000664000175000017500000000053400000000000027040 0ustar00zuulzuul00000000000000{ "allow_access": { "access_level": "rw", "access_type": "ip", "access_to": "0.0.0.0/0", "metadata":{ "key1": "value1", "key2": "value2" }, "lock_visibility": false, "lock_deletion": true, "lock_reason": "Locked for deletion until year end audit." } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-actions-grant-access-response.json0000664000175000017500000000065700000000000027214 0ustar00zuulzuul00000000000000{ "access": { "share_id": "406ea93b-32e9-4907-a117-148b3945749f", "created_at": "2015-09-07T09:14:48.000000", "updated_at": null, "access_type": "ip", "access_to": "0.0.0.0/0", "access_level": "rw", "access_key": null, "id": "a25b2df3-90bd-4add-afa6-5f0dbbd50452", "metadata":{ "key1": "value1", "key2": "value2" } } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-actions-list-access-rules-request.json0000664000175000017500000000003400000000000030023 0ustar00zuulzuul00000000000000{ "access_list": null } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-actions-list-access-rules-response.json0000664000175000017500000000101500000000000030171 0ustar00zuulzuul00000000000000{ "access_list": [ { "access_level": "rw", "state": "error", "id": "507bf114-36f2-4f56-8cf4-857985ca87c1", "access_type": "cert", "access_to": "example.com", "access_key": null }, { "access_level": "rw", "state": "active", "id": "a25b2df3-90bd-4add-afa6-5f0dbbd50452", "access_type": "ip", "access_to": "0.0.0.0/0", "access_key": null } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-actions-reset-state-request.json0000664000175000017500000000007200000000000026723 0ustar00zuulzuul00000000000000{ "reset_status": { "status": "error" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-actions-restore-request.json0000664000175000017500000000003000000000000026140 0ustar00zuulzuul00000000000000{ "restore": null } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-actions-revert-to-snapshot-request.json0000664000175000017500000000013000000000000030242 0ustar00zuulzuul00000000000000{ "revert": { "snapshot_id": "6020af24-a305-4155-9a29-55e20efcb0e8" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-actions-revoke-access-request.json0000664000175000017500000000016700000000000027222 0ustar00zuulzuul00000000000000{ "deny_access": { "access_id": "a25b2df3-90bd-4add-afa6-5f0dbbd50452", "unrestrict": true } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-actions-shrink-request.json0000664000175000017500000000006000000000000025756 0ustar00zuulzuul00000000000000{ "shrink": { "new_size": 1 } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-actions-soft-delete-request.json0000664000175000017500000000003400000000000026674 0ustar00zuulzuul00000000000000{ "soft_delete": null } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-actions-unmanage-request.json0000664000175000017500000000003100000000000026251 0ustar00zuulzuul00000000000000{ "unmanage": null } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-backup-create-request.json0000664000175000017500000000026100000000000025533 0ustar00zuulzuul00000000000000{ "share_backup": { "share_id": "7b11dd53-546e-43cd-af0e-875434238c30", "backup_options": {}, "description": null, "name": "backup1" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-backup-create-response.json0000664000175000017500000000067700000000000025714 0ustar00zuulzuul00000000000000{ "share_backup": { "id": "c1cdc0ce-4ddc-4018-9796-505d2e26fcc7", "share_id": "7b11dd53-546e-43cd-af0e-875434238c30", "status": "creating", "name": "backup1", "description": null, "size": 1, "created_at": "2023-08-16T13:03:59.020692", "updated_at": "2023-08-16T13:03:59.020692", "availability_zone": null, "progress": "0", "restore_progress": "0" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-backup-reset-status-request.json0000664000175000017500000000007200000000000026733 0ustar00zuulzuul00000000000000{ "reset_status": { "status": "error" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-backup-restore-request.json0000664000175000017500000000003000000000000025745 0ustar00zuulzuul00000000000000{ "restore": null } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-backup-restore-response.json0000664000175000017500000000022300000000000026117 0ustar00zuulzuul00000000000000{ "restore": { "backup_id": "c1cdc0ce-4ddc-4018-9796-505d2e26fcc7", "share_id": "7b11dd53-546e-43cd-af0e-875434238c30" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-backup-show-response.json0000664000175000017500000000070200000000000025416 0ustar00zuulzuul00000000000000{ "share_backup": { "id": "c1cdc0ce-4ddc-4018-9796-505d2e26fcc7", "share_id": "7b11dd53-546e-43cd-af0e-875434238c30", "status": "available", "name": "backup1", "description": null, "size": 1, "created_at": "2023-08-16T13:03:59.000000", "updated_at": "2023-08-16T13:04:15.000000", "availability_zone": null, "progress": "100", "restore_progress": "0" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-backup-update-request.json0000664000175000017500000000022700000000000025554 0ustar00zuulzuul00000000000000{ "share_backup": { "display_name": "backup2", "display_description": "I am changing a description also. Here is a backup" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-backup-update-response.json0000664000175000017500000000076200000000000025726 0ustar00zuulzuul00000000000000{ "share_backup": { "id": "fa32a89f-ed0f-4906-b1d7-92eedf98fbb5", "share_id": "7b11dd53-546e-43cd-af0e-875434238c30", "status": "available", "name": "backup2", "description": "I am changing a description also. Here is a backup", "size": 1, "created_at": "2023-08-16T13:18:55.000000", "updated_at": "2023-08-16T13:33:15.000000", "availability_zone": null, "progress": "100", "restore_progress": "0" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-backups-list-detailed-response.json0000664000175000017500000000174200000000000027352 0ustar00zuulzuul00000000000000{ "share_backups": [ { "id": "1125c47a-0216-4ee0-a517-0460d63301a6", "share_id": "112dffd-f033-4248-a315-319ca2bd70c8", "status": "available", "name": "backup3", "description": null, "size": 1, "created_at": "2023-08-16T12:34:57.000000", "updated_at": "2023-08-17T12:14:15.000000", "availability_zone": null, "progress": "100", "restore_progress": "0" }, { "id": "c1cdc0ce-4ddc-4018-9796-505d2e26fcc7", "share_id": "7b11dd53-546e-43cd-af0e-875434238c30", "status": "creating", "name": "backup1", "description": null, "size": 1, "created_at": "2023-08-16T13:03:59.020692", "updated_at": "2023-08-16T13:13:15.000002", "availability_zone": null, "progress": "0", "restore_progress": "0" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-backups-list-response.json0000664000175000017500000000067600000000000025606 0ustar00zuulzuul00000000000000{ "share_backups": [ { "id": "1125c47a-0216-4ee0-a517-0460d63301a6", "name": "backup3", "share_id": "112dffd-f033-4248-a315-319ca2bd70c8", "status": "available" }, { "id": "c1cdc0ce-4ddc-4018-9796-505d2e26fcc7", "name": "backup1", "share_id": "7b11dd53-546e-43cd-af0e-875434238c30", "status": "creating" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-create-request.json0000664000175000017500000000130600000000000024271 0ustar00zuulzuul00000000000000{ "share": { "description": "My custom share London", "share_type": null, "share_proto": "nfs", "share_network_id": "713df749-aac0-4a54-af52-10f6c991e80c", "share_group_id": null, "name": "share_London", "snapshot_id": null, "is_public": true, "size": 1, "metadata": { "project": "my_app", "aim": "doc" }, "scheduler_hints": { "same_host": "d9c66489-cf02-4156-b0f2-527f3211b243,4ffee55f-ba98-42d2-a8ce-e7cecb169182", "different_host": "903685eb-f242-4105-903d-4bef2db94be4" }, "encryption_key_ref": "86babe9b-7277-4c3a-a081-6eb3eac9231d" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-create-response.json0000664000175000017500000000265400000000000024446 0ustar00zuulzuul00000000000000{ "share": { "status": null, "progress": null, "share_server_id": null, "project_id": "16e1ab15c35a457e9c2b2aa189f544e1", "name": "share_London", "share_type": "25747776-08e5-494f-ab40-a64b9d20d8f7", "share_type_name": "default", "availability_zone": null, "created_at": "2015-09-18T10:25:24.533287", "export_location": null, "links": [ { "href": "http://172.18.198.54:8786/v1/16e1ab15c35a457e9c2b2aa189f544e1/shares/011d21e2-fbc3-4e4a-9993-9ea223f73264", "rel": "self" }, { "href": "http://172.18.198.54:8786/16e1ab15c35a457e9c2b2aa189f544e1/shares/011d21e2-fbc3-4e4a-9993-9ea223f73264", "rel": "bookmark" } ], "share_network_id": null, "share_group_id": null, "export_locations": [], "share_proto": "NFS", "host": null, "access_rules_status": "active", "has_replicas": false, "replication_type": null, "task_state": null, "snapshot_support": true, "volume_type": "default", "snapshot_id": null, "is_public": true, "metadata": { "project": "my_app", "aim": "doc" }, "id": "011d21e2-fbc3-4e4a-9993-9ea223f73264", "size": 1, "description": "My custom share London" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-group-create-request.json0000664000175000017500000000064000000000000025423 0ustar00zuulzuul00000000000000{ "share_group": { "share_types": ["ecd11f4c-d811-4471-b656-c755c77e02ba"], "name": "my_group", "description": "for_test", "share_group_type_id": "89861c2a-10bf-4013-bdd4-3d020466aee4", "availability_zone": "nova", "share_network_id": "82168c2a-10bf-4013-bcc4-3d984136aee3", "source_share_group_snapshot_id": "69861c2a-10bf-4013-bcc4-3d020466aee3" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-group-create-response.json0000664000175000017500000000204100000000000025566 0ustar00zuulzuul00000000000000{ "share_groups": { "status": "creating", "description": null, "links": [ { "href": "http://192.168.98.191:8786/v2/e23850eeb91d4fa3866af634223e454c/share_groups/f9c1f80c-2392-4e34-bd90-fc89cdc5bf93", "rel": "self" }, { "href": "http://192.168.98.191:8786/e23850eeb91d4fa3866af634223e454c/share_groups/f9c1f80c-2392-4e34-bd90-fc89cdc5bf93", "rel": "bookmark" } ], "availability_zone": null, "source_share_group_snapshot_id": null, "share_network_id": null, "share_server_id": null, "host": null, "share_group_type_id": "89861c2a-10bf-4013-bdd4-3d020466aee4", "consistent_snapshot_support": null, "id": "f9c1f80c-2392-4e34-bd90-fc89cdc5bf93", "name": null, "created_at": "2017-08-03T19:20:33.974421", "project_id": "e23850eeb91d4fa3866af634223e454c", "share_types": ["ecd11f4c-d811-4471-b656-c755c77e02ba"] } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-group-reset-state-request.json0000664000175000017500000000007200000000000026417 0ustar00zuulzuul00000000000000{ "reset_status": { "status": "error" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-group-show-response.json0000664000175000017500000000213000000000000025302 0ustar00zuulzuul00000000000000{ "share_groups": { "links": [ { "href": "http://172.18.198.54:8786/v2/16e1ab15c35a457e9c2b2aa189f544e1/share_groups/011d21e2-fbc3-4e4a-9993-9ea223f73264", "rel": "self" }, { "href": "http://172.18.198.54:8786/16e1ab15c35a457e9c2b2aa189f544e1/share_groups/011d21e2-fbc3-4e4a-9993-9ea223f73264", "rel": "bookmark" } ], "availability_zone": "nova", "consistent_snapshot_support": true, "share_group_type_id": "313df749-aac0-1a54-af52-10f6c991e80c", "share_network_id": "713df749-aac0-4a54-af52-10f6c991e80c", "id": "011d21e2-fbc3-4e4a-9993-9ea223f73264", "share_types": ["25747776-08e5-494f-ab40-a64b9d20d8f7"], "project_id": "16e1ab15c35a457e9c2b2aa189f544e1", "status": "available", "description": "My custom share London", "host": "manila2@generic1#GENERIC1", "source_share_group_snapshot_id": null, "name": "share_London", "created_at": "2015-09-18T10:25:24.000000" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-group-snapshot-actions-reset-state-request.json0000664000175000017500000000007200000000000031712 0ustar00zuulzuul00000000000000{ "reset_status": { "status": "error" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-group-snapshot-create-request.json0000664000175000017500000000025400000000000027261 0ustar00zuulzuul00000000000000{ "share_group_snapshot": { "share_group_id": "cd7a3d06-23b3-4d05-b4ca-7c9a20faa95f", "name": "test", "description": "test description" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-group-snapshot-create-response.json0000664000175000017500000000146100000000000027430 0ustar00zuulzuul00000000000000{ "share_group_snapshot": { "status": "creating", "share_group_id": "cd7a3d06-23b3-4d05-b4ca-7c9a20faa95f", "links": [ { "href": "http://192.168.98.191:8786/v2/e23850eeb91d4fa3866af634223e454c/share_group_snapshot/46bf5875-58d6-4816-948f-8828423b0b9f", "rel": "self" }, { "href": "http://192.168.98.191:8786/e23850eeb91d4fa3866af634223e454c/share_group_snapshot/46bf5875-58d6-4816-948f-8828423b0b9f", "rel": "bookmark" } ], "name": null, "members": [], "created_at": "2017-08-10T03:01:39.442509", "project_id": "e23850eeb91d4fa3866af634223e454c", "id": "46bf5875-58d6-4816-948f-8828423b0b9f", "description": null } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-group-snapshot-show-response.json0000664000175000017500000000146100000000000027145 0ustar00zuulzuul00000000000000{ "share_group_snapshot": { "status": "creating", "share_group_id": "cd7a3d06-23b3-4d05-b4ca-7c9a20faa95f", "links": [ { "href": "http://192.168.98.191:8786/v2/e23850eeb91d4fa3866af634223e454c/share_group_snapshot/46bf5875-58d6-4816-948f-8828423b0b9f", "rel": "self" }, { "href": "http://192.168.98.191:8786/e23850eeb91d4fa3866af634223e454c/share_group_snapshot/46bf5875-58d6-4816-948f-8828423b0b9f", "rel": "bookmark" } ], "name": null, "members": [], "created_at": "2017-08-10T03:01:39.442509", "project_id": "e23850eeb91d4fa3866af634223e454c", "id": "46bf5875-58d6-4816-948f-8828423b0b9f", "description": null } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-group-snapshot-update-request.json0000664000175000017500000000016300000000000027277 0ustar00zuulzuul00000000000000{ "share_group_snapshot": { "name": "update name", "description": "update description" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-group-snapshot-update-response.json0000664000175000017500000000146100000000000027447 0ustar00zuulzuul00000000000000{ "share_group_snapshot": { "status": "creating", "share_group_id": "cd7a3d06-23b3-4d05-b4ca-7c9a20faa95f", "links": [ { "href": "http://192.168.98.191:8786/v2/e23850eeb91d4fa3866af634223e454c/share_group_snapshot/46bf5875-58d6-4816-948f-8828423b0b9f", "rel": "self" }, { "href": "http://192.168.98.191:8786/e23850eeb91d4fa3866af634223e454c/share_group_snapshot/46bf5875-58d6-4816-948f-8828423b0b9f", "rel": "bookmark" } ], "name": null, "members": [], "created_at": "2017-08-10T03:01:39.442509", "project_id": "e23850eeb91d4fa3866af634223e454c", "id": "46bf5875-58d6-4816-948f-8828423b0b9f", "description": null } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-group-snapshots-list-detailed-response.json0000664000175000017500000000336600000000000031102 0ustar00zuulzuul00000000000000{ "share_group_snapshots": [ { "status": "available", "share_group_id": "cd7a3d06-23b3-4d05-b4ca-7c9a20faa95f", "links": [ { "href": "http://192.168.98.191:8786/v2/e23850eeb91d4fa3866af634223e454c/share_group_snapshot/46bf5875-58d6-4816-948f-8828423b0b9f", "rel": "self" }, { "href": "http://192.168.98.191:8786/e23850eeb91d4fa3866af634223e454c/share_group_snapshot/46bf5875-58d6-4816-948f-8828423b0b9f", "rel": "bookmark" } ], "name": null, "members": [], "created_at": "2017-08-10T03:01:39.000000", "project_id": "e23850eeb91d4fa3866af634223e454c", "id": "46bf5875-58d6-4816-948f-8828423b0b9f", "description": null }, { "status": "available", "share_group_id": "cd7a3d06-23b3-4d05-b4ca-7c9a20faa95f", "links": [ { "href": "http://192.168.98.191:8786/v2/e23850eeb91d4fa3866af634223e454c/share_group_snapshot/9d8ed9be-4454-4df0-b0ae-8360b623d93d", "rel": "self" }, { "href": "http://192.168.98.191:8786/e23850eeb91d4fa3866af634223e454c/share_group_snapshot/9d8ed9be-4454-4df0-b0ae-8360b623d93d", "rel": "bookmark" } ], "name": null, "members": [], "created_at": "2017-08-10T03:01:28.000000", "project_id": "e23850eeb91d4fa3866af634223e454c", "id": "9d8ed9be-4454-4df0-b0ae-8360b623d93d", "description": null } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-group-snapshots-list-members-response.json0000664000175000017500000000125500000000000030754 0ustar00zuulzuul00000000000000{ "share_group_snapshot_members": [ { "status": "available", "share_id": "406ea93b-32e9-4907-a117-148b3945749f", "created_at": "2017-09-07T11:50:39.000000", "share_proto": "NFS", "share_size": 1, "id": "6d221c1d-0200-461e-8d20-24b4776b9ddb", "size": 1 }, { "status": "available", "share_id": "406ea93b-32e9-4907-a117-148b3945749f", "created_at": "2015-09-07T11:50:39.000000", "share_proto": "NFS", "share_size": 1, "id": "6d221c1d-0200-461e-8d20-24b4776b9ddb", "size": 1 } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-group-snapshots-list-response.json0000664000175000017500000000116300000000000027322 0ustar00zuulzuul00000000000000{ "share_group_snapshot": [ { "links": [ { "href": "http://192.168.98.191:8786/v2/e23850eeb91d4fa3866af634223e454c/share_group_snapshot/46bf5875-58d6-4816-948f-8828423b0b9f", "rel": "self" }, { "href": "http://192.168.98.191:8786/e23850eeb91d4fa3866af634223e454c/share_group_snapshot/46bf5875-58d6-4816-948f-8828423b0b9f", "rel": "bookmark" } ], "name": null, "id": "46bf5875-58d6-4816-948f-8828423b0b9f" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-group-type-create-request.json0000664000175000017500000000035500000000000026405 0ustar00zuulzuul00000000000000{ "share_group_type": { "is_public": true, "group_specs": { "snapshot_support": true }, "share_types": ["ecd11f4c-d811-4471-b656-c755c77e02ba"], "name": "my_new_group_type" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-group-type-create-response.json0000664000175000017500000000042000000000000026544 0ustar00zuulzuul00000000000000{ "share_group_type": { "is_public": true, "group_specs": {}, "share_types": ["ecd11f4c-d811-4471-b656-c755c77e02ba"], "id": "89861c2a-10bf-4013-bdd4-3d020466aee4", "name": "test_group_type", "is_default": false } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-group-type-grant-access-request.json0000664000175000017500000000013200000000000027505 0ustar00zuulzuul00000000000000{ "addProjectAccess": { "project": "e1284adea3ee4d2482af5ed214f3ad90" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-group-type-revoke-access-request.json0000664000175000017500000000013500000000000027670 0ustar00zuulzuul00000000000000{ "removeProjectAccess": { "project": "818a3f48dcd644909b3fa2e45a399a27" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-group-type-set-request.json0000664000175000017500000000011000000000000025722 0ustar00zuulzuul00000000000000{ "group_specs": { "my_group_key": "my_group_value" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-group-type-set-response.json0000664000175000017500000000011000000000000026070 0ustar00zuulzuul00000000000000{ "group_specs": { "my_group_key": "my_group_value" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-group-types-default-list-response.json0000664000175000017500000000041700000000000030067 0ustar00zuulzuul00000000000000{ "share_group_type": { "is_public": true, "group_specs": {}, "share_types": ["ecd11f4c-d811-4471-b656-c755c77e02ba"], "id": "89861c2a-10bf-4013-bdd4-3d020466aee4", "name": "test_group_type", "is_default": true } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-group-types-group-specs-list-response.json0000664000175000017500000000010200000000000030701 0ustar00zuulzuul00000000000000{ "group_specs": { "snapshot_support": "True" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-group-types-list-access-response.json0000664000175000017500000000054400000000000027705 0ustar00zuulzuul00000000000000{ "share_group_type_access": [ { "share_group_type_id": "1732f284-401d-41d9-a494-425451e8b4b8", "project_id": "818a3f48dcd644909b3fa2e45a399a27" }, { "share_group_type_id": "1732f284-401d-41d9-a494-425451e8b4b8", "project_id": "e1284adea3ee4d2482af5ed214f3ad90" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-group-types-list-response.json0000664000175000017500000000047500000000000026451 0ustar00zuulzuul00000000000000{ "share_group_types": [ { "is_public": true, "group_specs": {}, "share_types": ["ecd11f4c-d811-4471-b656-c755c77e02ba"], "id": "89861c2a-10bf-4013-bdd4-3d020466aee4", "name": "test_group_type", "is_default": false } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-group-update-request.json0000664000175000017500000000017200000000000025442 0ustar00zuulzuul00000000000000{ "share_group": { "name": "new name", "description": "Changing the share group description." } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-group-update-response.json0000664000175000017500000000211200000000000025604 0ustar00zuulzuul00000000000000{ "share_groups": { "status": "creating", "description": "Changing the share group description.", "links": [ { "href": "http://192.168.98.191:8786/v2/e23850eeb91d4fa3866af634223e454c/share_groups/f9c1f80c-2392-4e34-bd90-fc89cdc5bf93", "rel": "self" }, { "href": "http://192.168.98.191:8786/e23850eeb91d4fa3866af634223e454c/share_groups/f9c1f80c-2392-4e34-bd90-fc89cdc5bf93", "rel": "bookmark" } ], "availability_zone": null, "source_share_group_snapshot_id": null, "share_network_id": null, "share_server_id": null, "host": null, "share_group_type_id": "89861c2a-10bf-4013-bdd4-3d020466aee4", "consistent_snapshot_support": null, "id": "f9c1f80c-2392-4e34-bd90-fc89cdc5bf93", "name": "new name", "created_at": "2017-08-03T19:20:33.974421", "project_id": "e23850eeb91d4fa3866af634223e454c", "share_types": ["ecd11f4c-d811-4471-b656-c755c77e02ba"] } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-groups-list-detailed-response.json0000664000175000017500000000455100000000000027242 0ustar00zuulzuul00000000000000{ "share_groups": [ { "id": "b94a8548-2079-4be0-b21c-0a887acd31ca", "links": [ { "href": "http://172.18.198.54:8786/v2/16e1ab15c35a457e9c2b2aa189f544e1/share_groups/b94a8548-2079-4be0-b21c-0a887acd31ca", "rel": "self" }, { "href": "http://172.18.198.54:8786/16e1ab15c35a457e9c2b2aa189f544e1/share_groups/b94a8548-2079-4be0-b21c-0a887acd31ca", "rel": "bookmark" } ], "name": "My_share_group", "availability_zone": "nova", "consistent_snapshot_support": true, "share_group_type_id": "313df749-aac0-1a54-af52-10f6c991e80c", "share_network_id": "713df749-aac0-4a54-af52-10f6c991e80c", "share_types": ["25747776-08e5-494f-ab40-a64b9d20d8f7"], "project_id": "16e1ab15c35a457e9c2b2aa189f544e1", "status": "available", "description": "My share group", "host": "manila2@generic1#GENERIC1", "source_share_group_snapshot_id": null, "created_at": "2015-09-18T10:25:24.000000" }, { "id": "306ea93c-32e9-4907-a117-148b3945749f", "links": [ { "href": "http://172.18.198.54:8786/v2/16e1ab15c35a457e9c2b2aa189f544e1/share_groups/306ea93c-32e9-4907-a117-148b3945749f", "rel": "self" }, { "href": "http://172.18.198.54:8786/16e1ab15c35a457e9c2b2aa189f544e1/share_groups/306ea93c-32e9-4907-a117-148b3945749f", "rel": "bookmark" } ], "name": "Test_Share_group", "availability_zone": "nova", "consistent_snapshot_support": true, "share_group_type_id": "313df749-aac0-1a54-af52-10f6c991e80c", "share_network_id": "713df749-aac0-4a54-af52-10f6c991e80c", "share_types": ["25747776-08e5-494f-ab40-a64b9d20d8f7"], "project_id": "16e1ab15c35a457e9c2b2aa189f544e1", "status": "available", "description": "Test share group", "host": "manila2@generic1#GENERIC1", "source_share_group_snapshot_id": null, "created_at": "2015-09-18T10:25:24.000000" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-groups-list-response.json0000664000175000017500000000225500000000000025470 0ustar00zuulzuul00000000000000{ "share_groups": [ { "id": "b94a8548-2079-4be0-b21c-0a887acd31ca", "links": [ { "href": "http://172.18.198.54:8786/v2/16e1ab15c35a457e9c2b2aa189f544e1/share_groups/b94a8548-2079-4be0-b21c-0a887acd31ca", "rel": "self" }, { "href": "http://172.18.198.54:8786/16e1ab15c35a457e9c2b2aa189f544e1/share_groups/b94a8548-2079-4be0-b21c-0a887acd31ca", "rel": "bookmark" } ], "name": "My_share_group" }, { "id": "306ea93c-32e9-4907-a117-148b3945749f", "links": [ { "href": "http://172.18.198.54:8786/v2/16e1ab15c35a457e9c2b2aa189f544e1/share_groups/306ea93c-32e9-4907-a117-148b3945749f", "rel": "self" }, { "href": "http://172.18.198.54:8786/16e1ab15c35a457e9c2b2aa189f544e1/share_groups/306ea93c-32e9-4907-a117-148b3945749f", "rel": "bookmark" } ], "name": "Test_Share_group" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-instance-actions-force-delete-request.json0000664000175000017500000000003500000000000030622 0ustar00zuulzuul00000000000000{ "force_delete": null } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-instance-actions-reset-state-request.json0000664000175000017500000000007600000000000030531 0ustar00zuulzuul00000000000000{ "reset_status": { "status": "available" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-instances-list-response.json0000664000175000017500000000435400000000000026142 0ustar00zuulzuul00000000000000{ "share_instances": [ { "status": "error", "progress": null, "share_id": "406ea93b-32e9-4907-a117-148b3945749f", "availability_zone": "nova", "replica_state": null, "created_at": "2015-09-07T08:41:20.000000", "updated_at": "2015-09-07T08:43:10.000000", "share_network_id": "713df749-aac0-4a54-af52-10f6c991e80c", "cast_rules_to_readonly": false, "share_server_id": "ba11930a-bf1a-4aa7-bae4-a8dfbaa3cc73", "host": "manila2@generic1#GENERIC1", "access_rules_status": "active", "share_type_id": "78dee8a9-1ee6-4a29-9081-14e6596fbb96", "id": "081f7030-c54f-42f5-98ee-93a37393e0f2" }, { "status": "available", "progress": "100%", "share_id": "d94a8548-2079-4be0-b21c-0a887acd31ca", "availability_zone": "nova", "replica_state": null, "created_at": "2015-09-07T08:51:34.000000", "updated_at": "2015-09-10T02:01:22.000000", "share_network_id": "713df749-aac0-4a54-af52-10f6c991e80c", "cast_rules_to_readonly": false, "share_server_id": "ba11930a-bf1a-4aa7-bae4-a8dfbaa3cc73", "host": "manila2@generic1#GENERIC1", "access_rules_status": "active", "share_type_id": "78dee8a9-1ee6-4a29-9081-14e6596fbb96", "id": "75559a8b-c90c-42a7-bda2-edbe86acfb7b" }, { "status": "creating_from_snapshot", "progress": "30%", "share_id": "9bb15af4-27e5-4174-ae15-dc549d4a3b51", "availability_zone": "nova", "replica_state": null, "created_at": "2015-09-07T09:01:15.000000", "updated_at": "2015-09-07T09:02:30.000000", "share_network_id": "713df749-aac0-4a54-af52-10f6c991e80c", "cast_rules_to_readonly": false, "share_server_id": "ba11930a-bf1a-4aa7-bae4-a8dfbaa3cc73", "host": "manila2@generic1#GENERIC1", "access_rules_status": "active", "share_type_id": "78dee8a9-1ee6-4a29-9081-14e6596fbb96", "id": "48155648-2fd3-480d-b02b-44b995c24bab" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-manage-request.json0000664000175000017500000000111400000000000024253 0ustar00zuulzuul00000000000000{ "share": { "protocol": "nfs", "name": "accounting_p8787", "share_type": "gold", "driver_options": { "opt1": "opt1", "opt2": "opt2" }, "export_path": "192.162.10.6:/shares/share-accounting_p8787", "service_host": "manila2@openstackstor01#accountingpool", "is_public": true, "description": "Common storage for spreadsheets and presentations. Please contact John Accessman to be added to the users of this drive.", "share_server_id": "00137b40-ca06-4ae8-83a3-2c5989eebcce" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-manage-response.json0000664000175000017500000000274100000000000024430 0ustar00zuulzuul00000000000000{ "share": { "links": [ { "href": "http://172.18.198.54:8786/v2/16e1ab15c35a457e9c2b2aa189f544e1/shares/00137b40-ca06-4ae8-83a3-2c5989eebcce", "rel": "self" }, { "href": "http://172.18.198.54:8786/16e1ab15c35a457e9c2b2aa189f544e1/shares/00137b40-ca06-4ae8-83a3-2c5989eebcce", "rel": "bookmark" } ], "availability_zone": null, "share_network_id": null, "export_locations": [], "share_server_id": "00137b40-ca06-4ae8-83a3-2c5989eebcce", "share_group_id": null, "snapshot_id": null, "id": "00137b40-ca06-4ae8-83a3-2c5989eebcce", "size": null, "share_type": "14747856-08e5-494f-ab40-a64b9d20d8f7", "share_type_name": "d", "export_location": "10.254.0.5:/shares/share-42033c24-0261-424f-abda-4fef2f6dbfd5", "project_id": "16e1ab15c35a457e9c2b2aa189f544e1", "metadata": {}, "status": "manage_starting", "description": "Lets manage share.", "user_id": "66ffd308757e44b9a8bec381322b0b88", "host": "manila2@unmanage1#UNMANAGE1", "access_rules_status": "active", "has_replicas": false, "replication_type": null, "is_public": false, "snapshot_support": true, "name": "share_texas1", "created_at": "2019-03-05T10:00:00.000000", "share_proto": "NFS", "volume_type": "d" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-migration-cancel-request.json0000664000175000017500000000016000000000000026237 0ustar00zuulzuul00000000000000{ "migration_cancel": { "share_id": "406ea93b-32e9-4907-a117-148b3945749f" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-migration-complete-request.json0000664000175000017500000000016100000000000026623 0ustar00zuulzuul00000000000000{ "migration_complete": { "share_id": "406ea93b-32e9-4907-a117-148b3945749f" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-migration-get-process-request.json0000664000175000017500000000016400000000000027251 0ustar00zuulzuul00000000000000{ "migration_get_process": { "share_id": "406ea93b-32e9-4907-a117-148b3945749f" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-migration-get-process-response.json0000664000175000017500000000012000000000000027407 0ustar00zuulzuul00000000000000{ "total_progress": 100, "task_state": "migration_driver_phase1_done" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-migration-start-request.json0000664000175000017500000000072400000000000026155 0ustar00zuulzuul00000000000000{ "migration_start": { "share_id": "406ea93b-32e9-4907-a117-148b3945749f", "writable": true, "preserve_snapshots": true, "preserve_metadata": true, "nondisruptive": true, "host": "ubuntu@generic2#GENERIC2", "new_share_type_id": "foo_share_type_id", "new_share_network_id": "bar_share_network_id", "force_host_assisted_migration": false } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-network-add-security-service-check-request.json0000664000175000017500000000022500000000000031622 0ustar00zuulzuul00000000000000{ "add_security_service_check": { "security_service_id": "8971c5f6-52ec-4c53-bf6a-3fae38a9221e", "reset_operation": false } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-network-add-security-service-check-response.json0000664000175000017500000000043500000000000031773 0ustar00zuulzuul00000000000000{ "compatible": true, "requested_operation": { "operation": "add_security_service", "current_security_service": null, "new_security_service": "8971c5f6-52ec-4c53-bf6a-3fae38a9221e" }, "hosts_check_result": { "ubuntu@dummy2": true } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-network-add-security-service-request.json0000664000175000017500000000015600000000000030552 0ustar00zuulzuul00000000000000{ "add_security_service": { "security_service_id": "3c829734-0679-4c17-9637-801da48c0d5f" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-network-add-security-service-response-with-subnets.json0000664000175000017500000000166700000000000033362 0ustar00zuulzuul00000000000000{ "share_network": { "name": "net2", "created_at": "2019-11-10T12:31:12.000000", "updated_at": null, "id": "d8ae6799-2567-4a89-aafb-fa4424350d2b", "project_id": "16e1ab15c35a457e9c2b2aa189f544e1", "description": null, "share_network_subnets": [ { "id": "e4db03dc-6041-4c6a-a8f9-80bb4141a1eb", "availability_zone": null, "created_at": "2019-11-10T12:31:12.000000", "updated_at": "2019-11-10T12:31:12.000000", "segmentation_id": null, "neutron_net_id": "62187648-6617-4509-a780-ffc973a7fe43", "neutron_subnet_id": "2276888a-27c1-47c2-82a0-ea33050128b5", "ip_version": 4, "cidr": "172.24.5.0/24", "network_type": "flat", "mtu": 1500, "gateway": "172.24.5.1" } ] } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-network-add-security-service-response.json0000664000175000017500000000110400000000000030712 0ustar00zuulzuul00000000000000{ "share_network": { "name": "net2", "segmentation_id": null, "created_at": "2015-09-07T12:31:12.000000", "neutron_subnet_id": null, "updated_at": null, "id": "d8ae6799-2567-4a89-aafb-fa4424350d2b", "neutron_net_id": null, "ip_version": null, "cidr": null, "project_id": "16e1ab15c35a457e9c2b2aa189f544e1", "network_type": null, "description": null, "gateway": null, "mtu": null, "security_service_update_support": true, "status": "active" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-network-create-request.json0000664000175000017500000000044600000000000025764 0ustar00zuulzuul00000000000000{ "share_network": { "neutron_net_id": "998b42ee-2cee-4d36-8b95-67b5ca1f2109", "neutron_subnet_id": "53482b62-2c84-4a53-b6ab-30d9d9800d06", "name": "my_network", "description": "This is my share network", "availability_zone": "manila-zone-0" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-network-create-response-with-subnets.json0000664000175000017500000000200100000000000030551 0ustar00zuulzuul00000000000000{ "share_network": { "name": "my_network", "created_at": "2019-09-07T14:37:00.583656", "updated_at": null, "id": "77eb3421-4549-4789-ac39-0d5185d68c29", "project_id": "e10a683c20da41248cfd5e1ab3d88c62", "description": "This is my share network", "security_service_update_support": true, "status": "active", "share_network_subnets": [ { "id": "91cc63b5-6c61-4078-b054-560923709654", "availability_zone": "manila-zone-0", "created_at": "2019-10-04T20:49:11.000000", "updated_at": null, "segmentation_id": null, "neutron_net_id": "998b42ee-2cee-4d36-8b95-67b5ca1f2109", "neutron_subnet_id": "53482b62-2c84-4a53-b6ab-30d9d9800d06", "ip_version": null, "cidr": null, "network_type": null, "mtu": null, "gateway": null } ] } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-network-create-response.json0000664000175000017500000000124400000000000026127 0ustar00zuulzuul00000000000000{ "share_network": { "name": "my_network", "segmentation_id": null, "created_at": "2015-09-07T14:37:00.583656", "neutron_subnet_id": "53482b62-2c84-4a53-b6ab-30d9d9800d06", "updated_at": null, "id": "77eb3421-4549-4789-ac39-0d5185d68c29", "neutron_net_id": "998b42ee-2cee-4d36-8b95-67b5ca1f2109", "ip_version": null, "cidr": null, "project_id": "e10a683c20da41248cfd5e1ab3d88c62", "network_type": null, "description": "This is my share network", "gateway": null, "mtu": null, "security_service_update_support": true, "status": "active" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-network-remove-security-service-request.json0000664000175000017500000000016100000000000031313 0ustar00zuulzuul00000000000000{ "remove_security_service": { "security_service_id": "3c829734-0679-4c17-9637-801da48c0d5f" } } ././@PaxHeader0000000000000000000000000000020500000000000011452 xustar0000000000000000111 path=manila-21.0.0/api-ref/source/samples/share-network-remove-security-service-response-with-subnets.json 22 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-network-remove-security-service-response-with-subnets.jso0000664000175000017500000000200400000000000033733 0ustar00zuulzuul00000000000000{ "share_network": { "name": "net2", "created_at": "2019-11-07T12:31:12.000000", "updated_at": null, "id": "d8ae6799-2567-4a89-aafb-fa4424350d2b", "project_id": "16e1ab15c35a457e9c2b2aa189f544e1", "description": null, "security_service_update_support": true, "status": "active", "share_network_subnets": [ { "id": "e4db03dc-6041-4c6a-a8f9-80bb4141a1eb", "availability_zone": null, "created_at": "2019-11-07T12:31:12.000000", "updated_at": "2019-12-12T12:31:12.000000", "segmentation_id": null, "neutron_net_id": "62187648-6617-4509-a780-ffc973a7fe43", "neutron_subnet_id": "2276888a-27c1-47c2-82a0-ea33050128b5", "ip_version": 4, "cidr": "172.24.5.0/24", "network_type": "flat", "mtu": 1500, "gateway": "172.24.5.1" } ] } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-network-remove-security-service-response.json0000664000175000017500000000110400000000000031457 0ustar00zuulzuul00000000000000{ "share_network": { "name": "net2", "segmentation_id": null, "created_at": "2015-09-07T12:31:12.000000", "neutron_subnet_id": null, "updated_at": null, "id": "d8ae6799-2567-4a89-aafb-fa4424350d2b", "neutron_net_id": null, "ip_version": null, "cidr": null, "project_id": "16e1ab15c35a457e9c2b2aa189f544e1", "network_type": null, "description": null, "gateway": null, "mtu": null, "security_service_update_support": true, "status": "active" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-network-reset-state-request.json0000664000175000017500000000007200000000000026754 0ustar00zuulzuul00000000000000{ "reset_status": { "status": "active" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-network-show-response-with-subnets.json0000664000175000017500000000175300000000000030303 0ustar00zuulzuul00000000000000{ "share_network": { "id": "1324e7d3-fba8-45e4-bb37-b59c12eb06dc", "name": "net_my1", "project_id": "16e1ab15c35a457e9c2b2aa189f544e1", "created_at": "2019-10-02T17:49:43.000000", "description": null, "security_service_update_support": true, "status": "active", "share_network_subnets": [ { "id": "e4db03dc-6041-4c6a-a8f9-80bb4141a1eb", "availability_zone": null, "created_at": "2019-10-02T17:49:43.000000", "updated_at": "2019-10-03T12:17:39.000000", "segmentation_id": null, "neutron_net_id": "62187648-6617-4509-a780-ffc973a7fe43", "neutron_subnet_id": "2276888a-27c1-47c2-82a0-ea33050128b5", "ip_version": 4, "cidr": "172.24.5.0/24", "network_type": "flat", "mtu": 1500, "gateway": "172.24.5.1" } ] } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-network-show-response.json0000664000175000017500000000121600000000000025643 0ustar00zuulzuul00000000000000{ "share_network": { "name": "net_my1", "segmentation_id": null, "created_at": "2015-09-04T14:56:45.000000", "neutron_subnet_id": "53482b62-2c84-4a53-b6ab-30d9d9800d06", "updated_at": null, "id": "7f950b52-6141-4a08-bbb5-bb7ffa3ea5fd", "neutron_net_id": "998b42ee-2cee-4d36-8b95-67b5ca1f2109", "ip_version": null, "cidr": null, "project_id": "16e1ab15c35a457e9c2b2aa189f544e1", "network_type": null, "description": "descr", "gateway": null, "mtu": null, "security_service_update_support": true, "status": "active" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-network-subnet-create-request.json0000664000175000017500000000033400000000000027256 0ustar00zuulzuul00000000000000{ "share-network-subnet": { "neutron_net_id": "62187648-6617-4509-a780-ffc973a7fe43", "neutron_subnet_id": "2276888a-27c1-47c2-82a0-ea33050128b5", "availability_zone": "manila-zone-0" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-network-subnet-create-response.json0000664000175000017500000000115600000000000027427 0ustar00zuulzuul00000000000000{ "share_network_subnet": { "id": "8ebe964d-ac48-4e43-93ed-b1768148f8f4", "availability_zone": "manila-zone-0", "share_network_id": "1324e7d3-fba8-45e4-bb37-b59c12eb06dc", "share_network_name": "net_my1", "created_at": "2019-10-03T02:25:12.000000", "segmentation_id": null, "neutron_subnet_id": "2276888a-27c1-47c2-82a0-ea33050128b5", "updated_at": null, "neutron_net_id": "62187648-6617-4509-a780-ffc973a7fe43", "ip_version": null, "cidr": null, "network_type": null, "mtu": null, "gateway": null } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-network-subnet-list-response.json0000664000175000017500000000234700000000000027142 0ustar00zuulzuul00000000000000{ "share_network_subnets": [ { "id": "a7507a16-98bb-476c-ba90-487e4b4775fa", "availability_zone": null, "share_network_id": "8bc488d8-52f7-46cb-91b1-89dd92cae972", "share_network_name": "sn_test", "created_at": "2019-10-03T18:30:15.000000", "segmentation_id": null, "neutron_subnet_id": "dc0a37f0-81b0-4eb5-aad8-deffda5ff4ca", "updated_at": null, "neutron_net_id": "70bc8f03-525c-4334-a51b-261a024681c5", "ip_version": 4, "cidr": "10.190.5.0/24", "network_type": "flat", "mtu": 1500, "gateway": "10.190.5.1" }, { "id": "8ebe964d-ac48-4e43-93ed-b1768148f8f4", "availability_zone": "manila-zone-0", "share_network_id": "8bc488d8-52f7-46cb-91b1-89dd92cae972", "share_network_name": "sn_test", "created_at": "2019-10-02T01:35:10.000000", "segmentation_id": null, "neutron_subnet_id": "2276888a-27c1-47c2-82a0-ea33050128b5", "updated_at": null, "neutron_net_id": "62187648-6617-4509-a780-ffc973a7fe43", "ip_version": 4, "cidr": "172.24.5.0/24", "network_type": "flat", "mtu": 1500, "gateway": "172.24.5.1" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-network-subnet-set-metadata-request.json0000664000175000017500000000006500000000000030365 0ustar00zuulzuul00000000000000{ "metadata": { "key1": "value1" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-network-subnet-set-metadata-response.json0000664000175000017500000000026700000000000030537 0ustar00zuulzuul00000000000000{ "metadata": { "aim": "changed_doc", "project": "my_app", "key1": "value1", "new_metadata_key": "new_information", "key": "value" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-network-subnet-show-metadata-response.json0000664000175000017500000000012000000000000030710 0ustar00zuulzuul00000000000000{ "metadata": { "project": "my_app", "key": "value" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-network-subnet-show-response.json0000664000175000017500000000121400000000000027137 0ustar00zuulzuul00000000000000{ "share_network_subnet": { "id": "e4db03dc-6041-4c6a-a8f9-80bb4141a1eb", "availability_zone": null, "share_network_id":"1324e7d3-fba8-45e4-bb37-b59c12eb06dc", "share_network_name": "net_my1", "created_at": "2019-10-01T17:49:43.000000", "segmentation_id": null, "neutron_subnet_id": "2276888a-27c1-47c2-82a0-ea33050128b5", "updated_at": "2019-11-02T12:17:39.000000", "neutron_net_id": "62187648-6617-4509-a780-ffc973a7fe43", "ip_version": 4, "cidr": "172.24.5.0/24", "network_type": "flat", "mtu": 1500, "gateway": "172.24.5.1" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-network-subnet-update-metadata-request.json0000664000175000017500000000020500000000000031050 0ustar00zuulzuul00000000000000{ "metadata": { "aim": "changed_doc", "project": "my_app", "new_metadata_key": "new_information" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-network-subnet-update-metadata-response.json0000664000175000017500000000020500000000000031216 0ustar00zuulzuul00000000000000{ "metadata": { "aim": "changed_doc", "project": "my_app", "new_metadata_key": "new_information" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-network-subnet-update-null-metadata-request.json0000664000175000017500000000003100000000000032015 0ustar00zuulzuul00000000000000{ "metadata": null } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-network-subnet-update-null-metadata-response.json0000664000175000017500000000003100000000000032163 0ustar00zuulzuul00000000000000{ "metadata": null } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-network-update-request.json0000664000175000017500000000037700000000000026006 0ustar00zuulzuul00000000000000{ "share_network": { "neutron_net_id": "998b42ee-2cee-4d36-8b95-67b5ca1f2109", "neutron_subnet_id": "53482b62-2c84-4a53-b6ab-30d9d9800d06", "name": "update my network", "description": "i'm adding a description" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-network-update-response-with-subnets.json0000664000175000017500000000204600000000000030601 0ustar00zuulzuul00000000000000{ "share_network":{ "id": "2b33cd3a-3049-4f36-a2fd-f7a211eb9202", "name": "update my network", "project_id": "79ed3be75dbb4d03afd687b758fcc2c0", "created_at": "2019-11-12T17:18:10.000000", "updated_at": null, "description": "i'm adding a description", "security_service_update_support": true, "status": "active", "share_network_subnets": [ { "id": "687ab361-5c40-406e-945c-6326254782d4", "availability_zone": null, "created_at": "2019-11-13T17:18:10.000000", "updated_at": "2019-11-13T17:18:56.000000", "segmentation_id": null, "neutron_net_id": "998b42ee-2cee-4d36-8b95-67b5ca1f2109", "neutron_subnet_id": "53482b62-2c84-4a53-b6ab-30d9d9800d06", "ip_version": 4, "cidr": "172.24.5.0/24", "network_type": "flat", "mtu": 1500, "gateway": "172.24.5.1" } ] } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-network-update-response.json0000664000175000017500000000126700000000000026153 0ustar00zuulzuul00000000000000{ "share_network": { "name": "net_my", "segmentation_id": null, "created_at": "2015-09-04T14:54:25.000000", "neutron_subnet_id": "53482b62-2c84-4a53-b6ab-30d9d9800d06", "updated_at": "2015-09-07T08:02:53.512184", "id": "713df749-aac0-4a54-af52-10f6c991e80c", "neutron_net_id": "998b42ee-2cee-4d36-8b95-67b5ca1f2109", "ip_version": "4", "cidr": null, "project_id": "16e1ab15c35a457e9c2b2aa189f544e1", "network_type": null, "description": "i'm adding a description", "gateway": null, "mtu": null, "security_service_update_support": true, "status": "active" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-network-update-security-service-check-request.json0000664000175000017500000000033100000000000032352 0ustar00zuulzuul00000000000000{ "update_security_service_check": { "current_service_id": "8971c5f6-52ec-4c53-bf6a-3fae38a9221e", "new_service_id": "6cff8d33-f73b-483f-88af-e5429ad9daef", "reset_operation": false } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-network-update-security-service-check-response.json0000664000175000017500000000050200000000000032520 0ustar00zuulzuul00000000000000{ "compatible": true, "requested_operation": { "operation": "update_security_service", "current_security_service": "8971c5f6-52ec-4c53-bf6a-3fae38a9221e", "new_security_service": "6cff8d33-f73b-483f-88af-e5429ad9daef" }, "hosts_check_result": { "ubuntu@dummy2": true } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-network-update-security-service-request.json0000664000175000017500000000026100000000000031301 0ustar00zuulzuul00000000000000{ "update_security_service": { "current_service_id": "8971c5f6-52ec-4c53-bf6a-3fae38a9221e", "new_service_id": "6cff8d33-f73b-483f-88af-e5429ad9daef" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-network-update-security-service-response.json0000664000175000017500000000175700000000000031462 0ustar00zuulzuul00000000000000{ "share_network": { "id": "1e3f43b2-2290-4fb8-bdc3-fb741c336c2a", "name": "my_share_network", "project_id": "838f27f65c1d43baa37743c6884958ce", "created_at": "2021-03-25T17: 48: 51.925433", "updated_at": "2021-03-29T15: 06: 19.464021", "description": null, "share_network_subnets": [ { "id": "14f7f4f6-b6b6-4b7e-a89c-1040700f3166", "availability_zone": null, "created_at": "2021-03-25T17: 48: 52.014525", "updated_at": "2021-03-29T14: 50: 56.993391", "segmentation_id": 1010, "neutron_net_id": null, "neutron_subnet_id": null, "ip_version": 4, "cidr": "10.0.0.0/24", "network_type": "vlan", "mtu": 1500, "gateway": "10.0.0.1" } ], "status": "network_change", "security_service_update_support": true } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-networks-list-detailed-response-with-subnets.json0000664000175000017500000000524600000000000032233 0ustar00zuulzuul00000000000000{ "share_networks": [ { "id": "03987b5f-cb79-4f5f-a590-f6936b91b49e", "name": "net_my1", "project_id": "16e1ab15c35a457e9c2b2aa189f544e1", "created_at": "2019-10-02T12:33:36.000000", "updated_at": null, "description": null, "share_network_subnets": [ { "id": "022aa495-845e-42a6-9d83-a38f164053c9", "availability_zone": null, "created_at": "2019-10-02T12:33:36.000000", "updated_at": null, "segmentation_id": null, "neutron_net_id": "f00732aa-7721-455d-ba14-ec37619ea13f", "neutron_subnet_id": "eb7adcf8-ce71-43e3-b4c2-cf81da9f89a", "ip_version": null, "cidr": null, "network_type": null, "mtu": null, "gateway": null } ] }, { "id": "1324e7d3-fba8-45e4-bb37-b59c12eb06dc", "name": "net_my2", "project_id": "16e1ab15c35a457e9c2b2aa189f544e1", "created_at": "2019-07-01T17:49:43.000000", "updated_at": "2019-07-02T12:17:39.000000", "description": null, "share_network_subnets": [ { "id": "8ebe964d-ac48-4e43-93ed-b1768148f8f4", "availability_zone": "manila-zone-0", "created_at": "2019-10-03T02:25:12.000000", "updated_at": null, "segmentation_id": null, "neutron_net_id": "62187648-6617-4509-a780-ffc973a7fe43", "neutron_subnet_id": "2276888a-27c1-47c2-82a0-ea3050128b5", "ip_version": null, "cidr": null, "network_type": null, "mtu": null, "gateway": null }, { "id": "e4db03dc-6041-4c6a-a8f9-80bb4141a1eb", "availability_zone": null, "created_at": "2019-07-01T17:49:43.000000", "updated_at": "2019-07-02T12:17:39.000000", "segmentation_id": null, "neutron_net_id": "62187648-6617-4509-a780-ffc973a7fe43", "neutron_subnet_id": "2276888a-27c1-47c2-820-ea33050128b5", "ip_version": 4, "cidr": "172.24.5.0/24", "network_type": "flat", "mtu": 1500, "gateway": "172.24.5.1" } ] } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-networks-list-detailed-response.json0000664000175000017500000000343400000000000027576 0ustar00zuulzuul00000000000000{ "share_networks": [ { "name": "net_my1", "segmentation_id": null, "created_at": "2015-09-04T14:57:13.000000", "neutron_subnet_id": "53482b62-2c84-4a53-b6ab-30d9d9800d06", "updated_at": null, "id": "32763294-e3d4-456a-998d-60047677c2fb", "neutron_net_id": "998b42ee-2cee-4d36-8b95-67b5ca1f2109", "ip_version": null, "cidr": null, "project_id": "16e1ab15c35a457e9c2b2aa189f544e1", "network_type": null, "description": "descr", "gateway": null, "mtu": null }, { "name": "net_my", "segmentation_id": null, "created_at": "2015-09-04T14:54:25.000000", "neutron_subnet_id": "53482b62-2c84-4a53-b6ab-30d9d9800d06", "updated_at": null, "id": "713df749-aac0-4a54-af52-10f6c991e80c", "neutron_net_id": "998b42ee-2cee-4d36-8b95-67b5ca1f2109", "ip_version": null, "cidr": null, "project_id": "16e1ab15c35a457e9c2b2aa189f544e1", "network_type": null, "description": "desecr", "gateway": null, "mtu": null }, { "name": null, "segmentation_id": null, "created_at": "2015-09-04T14:51:41.000000", "neutron_subnet_id": null, "updated_at": null, "id": "fa158a3d-6d9f-4187-9ca5-abbb82646eb2", "neutron_net_id": null, "ip_version": null, "cidr": null, "project_id": "16e1ab15c35a457e9c2b2aa189f544e1", "network_type": null, "description": null, "gateway": null, "mtu": null } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-networks-list-response.json0000664000175000017500000000054200000000000026022 0ustar00zuulzuul00000000000000{ "share_networks": [ { "id": "32763294-e3d4-456a-998d-60047677c2fb", "name": "net_my1" }, { "id": "713df749-aac0-4a54-af52-10f6c991e80c", "name": "net_my" }, { "id": "fa158a3d-6d9f-4187-9ca5-abbb82646eb2", "name": null } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-replica-create-request.json0000664000175000017500000000033400000000000025706 0ustar00zuulzuul00000000000000{ "share_replica": { "share_id": "50a6a566-6bac-475c-ad69-5035c86696c0", "availability_zone": "nova", "scheduler_hints": { "only_host": "host1@generic1#GENERIC1" } } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-replica-create-response.json0000664000175000017500000000070100000000000026052 0ustar00zuulzuul00000000000000{ "share_replica": { "status": "creating", "share_id": "5043dffd-f033-4248-a315-319ca2bd70c8", "availability_zone": null, "cast_rules_to_readonly": true, "updated_at": null, "share_network_id": null, "share_server_id": null, "host": "", "id": "c9f52e33-d780-41d8-89ba-fc06869f465f", "replica_state": null, "created_at": "2017-08-15T20:21:43.493731" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-replica-export-location-list-response.json0000664000175000017500000000144400000000000030714 0ustar00zuulzuul00000000000000{ "export_locations": [ { "path": "10.254.0.3:/shares/share-e1c2d35e-fe67-4028-ad7a-45f668732b1d", "share_instance_id": "e1c2d35e-fe67-4028-ad7a-45f668732b1d", "is_admin_only": false, "id": "b6bd76ce-12a2-42a9-a30a-8a43b503867d", "preferred": false, "replica_state": "in_sync", "availability_zone": "paris" }, { "path": "10.0.0.3:/shares/share-e1c2d35e-fe67-4028-ad7a-45f668732b1d", "share_instance_id": "e1c2d35e-fe67-4028-ad7a-45f668732b1d", "is_admin_only": true, "id": "6921e862-88bc-49a5-a2df-efeed9acd583", "preferred": false, "replica_state": "in_sync", "availability_zone": "paris" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-replica-export-location-show-response.json0000664000175000017500000000073100000000000030717 0ustar00zuulzuul00000000000000{ "export_location": { "created_at": "2016-03-24T14:20:47.000000", "updated_at": "2016-03-24T14:20:47.000000", "preferred": false, "is_admin_only": true, "share_instance_id": "e1c2d35e-fe67-4028-ad7a-45f668732b1d", "path": "10.0.0.3:/shares/share-e1c2d35e-fe67-4028-ad7a-45f668732b1d", "id": "6921e862-88bc-49a5-a2df-efeed9acd583", "replica_state": "in_sync", "availability_zone": "paris" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-replica-promote-request.json0000664000175000017500000000007300000000000026130 0ustar00zuulzuul00000000000000{ "promote": { "quiesce_wait_time": 30 } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-replica-resync-request.json0000664000175000017500000000002700000000000025745 0ustar00zuulzuul00000000000000{ "resync": null } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-replicas-force-delete-request.json0000664000175000017500000000003500000000000027162 0ustar00zuulzuul00000000000000{ "force_delete": null } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-replicas-list-detail-response.json0000664000175000017500000000216500000000000027213 0ustar00zuulzuul00000000000000{ "share_replicas": [ { "status": "available", "share_id": "5043dffd-f033-4248-a315-319ca2bd70c8", "availability_zone": "nova", "cast_rules_to_readonly": false, "updated_at": "2017-08-15T20:20:50.000000", "share_network_id": null, "share_server_id": null, "host": "ubuntu@generic3#fake_pool_for_DummyDriver", "id": "57f5c47a-0216-4ee0-a517-0460d63301a6", "replica_state": "active", "created_at": "2017-08-15T20:20:45.000000" }, { "status": "available", "share_id": "5043dffd-f033-4248-a315-319ca2bd70c8", "availability_zone": "nova", "cast_rules_to_readonly": true, "updated_at": "2017-08-15T20:21:49.000000", "share_network_id": null, "share_server_id": null, "host": "ubuntu@generic2#fake_pool_for_DummyDriver", "id": "c9f52e33-d780-41d8-89ba-fc06869f465f", "replica_state": "in_sync", "created_at": "2017-08-15T20:21:43.000000" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-replicas-list-response.json0000664000175000017500000000072200000000000025750 0ustar00zuulzuul00000000000000{ "share_replicas": [ { "status": "available", "share_id": "5043dffd-f033-4248-a315-319ca2bd70c8", "id": "57f5c47a-0216-4ee0-a517-0460d63301a6", "replica_state": "active" }, { "status": "available", "share_id": "5043dffd-f033-4248-a315-319ca2bd70c8", "id": "c9f52e33-d780-41d8-89ba-fc06869f465f", "replica_state": "in_sync" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-replicas-reset-replica-state-request.json0000664000175000017500000000011600000000000030501 0ustar00zuulzuul00000000000000{ "reset_replica_state": { "replica_state": "out_of_sync" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-replicas-reset-state-request.json0000664000175000017500000000007600000000000027071 0ustar00zuulzuul00000000000000{ "reset_status": { "status": "available" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-replicas-show-response.json0000664000175000017500000000102400000000000025751 0ustar00zuulzuul00000000000000{ "share_replica": { "status": "available", "share_id": "5043dffd-f033-4248-a315-319ca2bd70c8", "availability_zone": "nova", "cast_rules_to_readonly": false, "updated_at": "2017-08-15T20:20:50.000000", "share_network_id": null, "share_server_id": null, "host": "ubuntu@generic3#fake_pool_for_DummyDriver", "id": "57f5c47a-0216-4ee0-a517-0460d63301a6", "replica_state": "active", "created_at": "2017-08-15T20:20:45.000000" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-server-manage-request.json0000664000175000017500000000052700000000000025566 0ustar00zuulzuul00000000000000{ "share_server": { "host": "myhost@mybackend", "share_network_id": "78cef6eb-648a-4bbd-9ae1-d2eaaf594cc0", "share_network_subnet_id": "f53252f0-c2a9-4d7c-af41-1c6f3cfb3af3", "identifier": "4ef3507e-0513-4140-beda-f619ab30d424", "driver_options": { "opt1": "opt1_value" } } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-server-manage-response.json0000664000175000017500000000123000000000000025724 0ustar00zuulzuul00000000000000{ "share_server": { "id": "dd218d97-6b16-45b7-9b23-19681ccdec3a", "project_id": "5b23075b4b504261a5987b18588f86cf", "updated_at": null, "status": "manage_starting", "host": "myhost@mybackend", "share_network_name": "share-net-name", "share_network_id": "78cef6eb-648a-4bbd-9ae1-d2eaaf594cc0", "share_network_subnet_id": "f53252f0-c2a9-4d7c-af41-1c6f3cfb3af3", "created_at": "2019-03-06T11:59:41.000000", "backend_details": {}, "is_auto_deletable": false, "identifier": "4ef3507e-0513-4140-beda-f619ab30d424", "security_service_update_support": true } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-server-migration-cancel-request.json0000664000175000017500000000004000000000000027540 0ustar00zuulzuul00000000000000{ "migration_cancel":null } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-server-migration-check-compatibility-request.json0000664000175000017500000000032000000000000032240 0ustar00zuulzuul00000000000000{ "migration_check": { "host": "foohost2@backend2", "preserve_snapshots": "True", "writable": "True", "nondisruptive": "True", "new_share_network_id": null } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-server-migration-check-compatibility-response.json0000664000175000017500000000077600000000000032425 0ustar00zuulzuul00000000000000{ "compatible": false, "requested_capabilities": { "writable": "True", "nondisruptive": "True", "preserve_snapshots": "True", "share_network_id": null, "host": "foohost2@backend2" }, "supported_capabilities": { "writable": true, "nondisruptive": false, "preserve_snapshots": true, "share_network_id": "1d04b755-649f-46a4-964c-be9f0395af13", "migration_cancel": true, "migration_get_progress": true } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-server-migration-complete-request.json0000664000175000017500000000004300000000000030126 0ustar00zuulzuul00000000000000{ "migration_complete": null } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-server-migration-complete-response.json0000664000175000017500000000011600000000000030275 0ustar00zuulzuul00000000000000{ "destination_share_server_id": "c2f71561-85e2-4ccb-a91a-e44f9ff6f7ef" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-server-migration-get-progress-request.json0000664000175000017500000000004700000000000030743 0ustar00zuulzuul00000000000000{ "migration_get_progress": null } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-server-migration-get-progress-response.json0000664000175000017500000000023200000000000031105 0ustar00zuulzuul00000000000000{ "total_progress": 50, "task_state": "migration_driver_in_progress", "destination_share_server_id": "c2f71561-85e2-4ccb-a91a-e44f9ff6f7ef" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-server-migration-start-request.json0000664000175000017500000000032100000000000027452 0ustar00zuulzuul00000000000000{ "migration_start": { "host": "foohost2@backend2", "preserve_snapshots": "True", "writable": "True", "nondisruptive": "False", "new_share_network_id": null } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-server-reset-state-request.json0000664000175000017500000000007200000000000026571 0ustar00zuulzuul00000000000000{ "reset_status": { "status": "active" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-server-show-details-response.json0000664000175000017500000000064500000000000027110 0ustar00zuulzuul00000000000000{ "details": { "username": "manila", "router_id": "4b62ce91-56c5-45c1-b0ef-8cbbe5dd34f4", "pk_path": "/opt/stack/.ssh/id_rsa", "subnet_id": "16e99ad6-5191-461c-9f34-ac84a39c3adb", "ip": "10.254.0.3", "instance_id": "75f2f282-af65-49ba-a7b1-525705b1bf1a", "public_address": "10.254.0.3", "service_port_id": "8ff21760-961e-4b83-a032-03fd559bb1d3" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-server-show-response.json0000664000175000017500000000176100000000000025465 0ustar00zuulzuul00000000000000{ "share_server": { "status": "active", "backend_details": { "username": "manila", "router_id": "4b62ce91-56c5-45c1-b0ef-8cbbe5dd34f4", "pk_path": "/opt/stack/.ssh/id_rsa", "subnet_id": "16e99ad6-5191-461c-9f34-ac84a39c3adb", "ip": "10.254.0.3", "instance_id": "75f2f282-af65-49ba-a7b1-525705b1bf1a", "public_address": "10.254.0.3", "service_port_id": "8ff21760-961e-4b83-a032-03fd559bb1d3" }, "created_at": "2015-09-07T08:37:19.000000", "updated_at": "2015-09-07T08:52:15.000000", "share_network_name": "net_my", "host": "manila2@generic1", "share_network_id": "713df749-aac0-4a54-af52-10f6c991e80c", "share_network_subnet_id": "f53252f0-c2a9-4d7c-af41-1c6f3cfb3af3", "project_id": "16e1ab15c35a457e9c2b2aa189f544e1", "id": "ba11930a-bf1a-4aa7-bae4-a8dfbaa3cc73", "security_service_update_support": true } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-server-unmanage-request.json0000664000175000017500000000005400000000000026124 0ustar00zuulzuul00000000000000{ "unmanage": { "force": "false" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-servers-list-response.json0000664000175000017500000000104400000000000025635 0ustar00zuulzuul00000000000000{ "share_servers": [ { "status": "active", "updated_at": "2015-09-07T08:52:15.000000", "share_network_id": "713df749-aac0-4a54-af52-10f6c991e80c", "host": "manila2@generic1", "share_network_name": "net_my", "share_network_subnet_id": "f53252f0-c2a9-4d7c-af41-1c6f3cfb3af3", "project_id": "16e1ab15c35a457e9c2b2aa189f544e1", "id": "ba11930a-bf1a-4aa7-bae4-a8dfbaa3cc73", "security_service_update_support": true } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-set-metadata-request.json0000664000175000017500000000006500000000000025400 0ustar00zuulzuul00000000000000{ "metadata": { "key1": "value1" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-set-metadata-response.json0000664000175000017500000000026700000000000025552 0ustar00zuulzuul00000000000000{ "metadata": { "aim": "changed_doc", "project": "my_app", "key1": "value1", "new_metadata_key": "new_information", "key": "value" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-show-instance-response.json0000664000175000017500000000120000000000000025747 0ustar00zuulzuul00000000000000{ "share_instance": { "status": "available", "progress": "100%", "share_id": "d94a8548-2079-4be0-b21c-0a887acd31ca", "availability_zone": "nova", "replica_state": null, "created_at": "2015-09-07T08:51:34.000000", "updated_at": "2015-09-07T08:52:20.000000", "cast_rules_to_readonly": false, "share_network_id": "713df749-aac0-4a54-af52-10f6c991e80c", "share_server_id": "ba11930a-bf1a-4aa7-bae4-a8dfbaa3cc73", "host": "manila2@generic1#GENERIC1", "access_rules_status": "active", "id": "75559a8b-c90c-42a7-bda2-edbe86acfb7b" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-show-metadata-item-response.json0000664000175000017500000000006400000000000026666 0ustar00zuulzuul00000000000000{ "meta": { "project": "my_app" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-show-metadata-response.json0000664000175000017500000000011600000000000025730 0ustar00zuulzuul00000000000000{ "metadata": { "project": "my_app", "aim": "doc" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-show-network-subnet-metadata-item-response.json0000664000175000017500000000006400000000000031653 0ustar00zuulzuul00000000000000{ "meta": { "project": "my_app" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-show-response.json0000664000175000017500000000311100000000000024150 0ustar00zuulzuul00000000000000{ "share": { "links": [ { "href": "http://172.18.198.54:8786/v2/16e1ab15c35a457e9c2b2aa189f544e1/shares/011d21e2-fbc3-4e4a-9993-9ea223f73264", "rel": "self" }, { "href": "http://172.18.198.54:8786/16e1ab15c35a457e9c2b2aa189f544e1/shares/011d21e2-fbc3-4e4a-9993-9ea223f73264", "rel": "bookmark" } ], "availability_zone": "nova", "share_network_id": "713df749-aac0-4a54-af52-10f6c991e80c", "export_locations": [], "share_server_id": "e268f4aa-d571-43dd-9ab3-f49ad06ffaef", "share_group_id": null, "snapshot_id": null, "id": "011d21e2-fbc3-4e4a-9993-9ea223f73264", "size": 1, "share_type": "25747776-08e5-494f-ab40-a64b9d20d8f7", "share_type_name": "default", "export_location": null, "project_id": "16e1ab15c35a457e9c2b2aa189f544e1", "metadata": { "project": "my_app", "aim": "doc" }, "status": "available", "progress": "100%", "description": "My custom share London", "host": "manila2@generic1#GENERIC1", "user_id": "66ffd308757e44b9a8bec381322b0b88", "access_rules_status": "active", "has_replicas": false, "replication_type": null, "task_state": null, "is_public": true, "snapshot_support": true, "name": "share_London", "created_at": "2015-09-18T10:25:24.000000", "share_proto": "NFS", "volume_type": "default" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-transfer-accept-request.json0000664000175000017500000000014500000000000026107 0ustar00zuulzuul00000000000000{ "accept": { "auth_key": "d7ef426932068a33", "clear_access_rules": true } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-transfer-create-request.json0000664000175000017500000000017000000000000026111 0ustar00zuulzuul00000000000000{ "transfer": { "share_id": "29476819-28a9-4b1a-a21d-3b2d203025a0", "name": "test_transfer" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-transfer-create-response.json0000664000175000017500000000152500000000000026264 0ustar00zuulzuul00000000000000{ "transfer": { "id": "f21c72c4-2b77-445b-aa12-e8d1b44163a2", "created_at": "2022-09-06T08:17:43.629495", "name": "test_transfer", "resource_type": "share", "resource_id": "29476819-28a9-4b1a-a21d-3b2d203025a0", "auth_key": "406a2d67cdb09afe", "source_project_id": "714198c7ac5e45a4b785de732ea4695d", "destination_project_id": null, "accepted": false, "expires_at": "2022-09-06T08:22:43.629495", "links": [ { "rel": "self", "href": "http://192.168.48.129/shar/v2/share-transfer/f21c72c4-2b77-445b-aa12-e8d1b44163a2" }, { "rel": "bookmark", "href": "http://192.168.48.129/shar/share-transfer/f21c72c4-2b77-445b-aa12-e8d1b44163a2" } ] } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-transfer-show-response.json0000664000175000017500000000145100000000000025777 0ustar00zuulzuul00000000000000{ "transfer": { "id": "d2035732-d0c0-4380-a44c-f978a264ab1a", "created_at": "2022-09-07T01:12:29.000000", "name": "transfer1", "resource_type": "share", "resource_id": "29476819-28a9-4b1a-a21d-3b2d203025a0", "source_project_id": "714198c7ac5e45a4b785de732ea4695d", "destination_project_id": null, "accepted": false, "expires_at": "2022-09-07T01:17:29.000000", "links": [ { "rel": "self", "href": "http://192.168.48.129/shar/v2/share-transfer/d2035732-d0c0-4380-a44c-f978a264ab1a" }, { "rel": "bookmark", "href": "http://192.168.48.129/shar/share-transfer/d2035732-d0c0-4380-a44c-f978a264ab1a" } ] } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-transfers-list-detailed-response.json0000664000175000017500000000337000000000000027730 0ustar00zuulzuul00000000000000{ "transfers": [ { "id": "42b0fab4-df77-4f25-a958-5370e1c95ed2", "created_at": "2022-09-07T01:52:39.000000", "name": "transfer2", "resource_type": "share", "resource_id": "0fe7cf64-b879-4902-9d86-f80aeff12b06", "source_project_id": "714198c7ac5e45a4b785de732ea4695d", "destination_project_id": null, "accepted": false, "expires_at": "2022-09-07T01:57:39.000000", "links": [ { "rel": "self", "href": "http://192.168.48.129/shar/v2/share-transfer/42b0fab4-df77-4f25-a958-5370e1c95ed2" }, { "rel": "bookmark", "href": "http://192.168.48.129/shar/share-transfer/42b0fab4-df77-4f25-a958-5370e1c95ed2" } ] }, { "id": "506a7e77-42e7-4f33-ac36-1d1dd7f2b9af", "created_at": "2022-09-07T01:52:30.000000", "name": "transfer1", "resource_type": "share", "resource_id": "29476819-28a9-4b1a-a21d-3b2d203025a0", "source_project_id": "714198c7ac5e45a4b785de732ea4695d", "destination_project_id": null, "accepted": false, "expires_at": "2022-09-07T01:57:30.000000", "links": [ { "rel": "self", "href": "http://192.168.48.129/shar/v2/share-transfer/506a7e77-42e7-4f33-ac36-1d1dd7f2b9af" }, { "rel": "bookmark", "href": "http://192.168.48.129/shar/share-transfer/506a7e77-42e7-4f33-ac36-1d1dd7f2b9af" } ] } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-transfers-list-response.json0000664000175000017500000000237000000000000026156 0ustar00zuulzuul00000000000000{ "transfers": [ { "id": "02a948b4-671b-4c62-b13a-18d613cb4576", "resource_type": "share", "resource_id": "0fe7cf64-b879-4902-9d86-f80aeff12b06", "name": "transfer2", "links": [ { "rel": "self", "href": "http://192.168.48.129/shar/v2/share-transfer/02a948b4-671b-4c62-b13a-18d613cb4576" }, { "rel": "bookmark", "href": "http://192.168.48.129/shar/share-transfer/02a948b4-671b-4c62-b13a-18d613cb4576" } ] }, { "id": "a10209ff-b55d-4fed-9f63-abea53b6f107", "resource_type": "share", "resource_id": "29476819-28a9-4b1a-a21d-3b2d203025a0", "name": "transfer1", "links": [ { "rel": "self", "href": "http://192.168.48.129/shar/v2/share-transfer/a10209ff-b55d-4fed-9f63-abea53b6f107" }, { "rel": "bookmark", "href": "http://192.168.48.129/shar/share-transfer/a10209ff-b55d-4fed-9f63-abea53b6f107" } ] } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-type-create-request.json0000664000175000017500000000072300000000000025252 0ustar00zuulzuul00000000000000{ "share_type": { "extra_specs": { "replication_type": "readable", "driver_handles_share_servers": true, "mount_snapshot_support": false, "revert_to_snapshot_support": false, "create_share_from_snapshot_support": true, "snapshot_support": true }, "share_type_access:is_public": true, "name": "testing", "description": "share type description" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-type-create-response.json0000664000175000017500000000243200000000000025417 0ustar00zuulzuul00000000000000{ "share_type": { "required_extra_specs": { "driver_handles_share_servers": true }, "share_type_access:is_public": true, "extra_specs": { "replication_type": "readable", "driver_handles_share_servers": "True", "mount_snapshot_support": "False", "revert_to_snapshot_support": "False", "create_share_from_snapshot_support": "True", "snapshot_support": "True" }, "id": "7fa1342b-de9d-4d89-bdc8-af67795c0e52", "name": "testing", "is_default": false, "description": "share type description" }, "volume_type": { "required_extra_specs": { "driver_handles_share_servers": true }, "share_type_access:is_public": true, "extra_specs": { "replication_type": "readable", "driver_handles_share_servers": "True", "mount_snapshot_support": "False", "revert_to_snapshot_support": "False", "create_share_from_snapshot_support": "True", "snapshot_support": "True" }, "id": "7fa1342b-de9d-4d89-bdc8-af67795c0e52", "name": "testing", "is_default": false, "description": "share type description" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-type-grant-access-request.json0000664000175000017500000000013200000000000026353 0ustar00zuulzuul00000000000000{ "addProjectAccess": { "project": "e1284adea3ee4d2482af5ed214f3ad90" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-type-revoke-access-request.json0000664000175000017500000000013500000000000026536 0ustar00zuulzuul00000000000000{ "removeProjectAccess": { "project": "818a3f48dcd644909b3fa2e45a399a27" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-type-set-request.json0000664000175000017500000000007400000000000024601 0ustar00zuulzuul00000000000000{ "extra_specs": { "my_key": "my_value" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-type-set-response.json0000664000175000017500000000007400000000000024747 0ustar00zuulzuul00000000000000{ "extra_specs": { "my_key": "my_value" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-type-show-response.json0000664000175000017500000000151000000000000025130 0ustar00zuulzuul00000000000000{ "share_type": { "required_extra_specs": { "driver_handles_share_servers": "True" }, "share_type_access:is_public": true, "extra_specs": { "driver_handles_share_servers": "True" }, "id": "2780fc88-526b-464a-a72c-ecb83f0e3929", "name": "default-share-type", "is_default": true, "description": "manila share type" }, "volume_type": { "required_extra_specs": { "driver_handles_share_servers": "True" }, "share_type_access:is_public": true, "extra_specs": { "driver_handles_share_servers": "True" }, "id": "2780fc88-526b-464a-a72c-ecb83f0e3929", "name": "default-share-type", "is_default": true, "description": "manila share type" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-type-update-request.json0000664000175000017500000000021000000000000025260 0ustar00zuulzuul00000000000000{ "share_type": { "share_type_access:is_public": true, "name": "testing", "description": "share type description" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-type-update-response.json0000664000175000017500000000243200000000000025436 0ustar00zuulzuul00000000000000{ "share_type": { "required_extra_specs": { "driver_handles_share_servers": true }, "share_type_access:is_public": true, "extra_specs": { "replication_type": "readable", "driver_handles_share_servers": "True", "mount_snapshot_support": "False", "revert_to_snapshot_support": "False", "create_share_from_snapshot_support": "True", "snapshot_support": "True" }, "id": "7fa1342b-de9d-4d89-bdc8-af67795c0e52", "name": "testing", "is_default": false, "description": "share type description" }, "volume_type": { "required_extra_specs": { "driver_handles_share_servers": true }, "share_type_access:is_public": true, "extra_specs": { "replication_type": "readable", "driver_handles_share_servers": "True", "mount_snapshot_support": "False", "revert_to_snapshot_support": "False", "create_share_from_snapshot_support": "True", "snapshot_support": "True" }, "id": "7fa1342b-de9d-4d89-bdc8-af67795c0e52", "name": "testing", "is_default": false, "description": "share type description" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-types-default-list-response.json0000664000175000017500000000147400000000000026741 0ustar00zuulzuul00000000000000{ "share_type": { "required_extra_specs": { "driver_handles_share_servers": "True" }, "share_type_access:is_public": true, "extra_specs": { "driver_handles_share_servers": "True" }, "id": "420e6a31-3f3d-4ed7-9d11-59450372182a", "name": "default", "is_default": true, "description": "share type description" }, "volume_type": { "required_extra_specs": { "driver_handles_share_servers": "True" }, "share_type_access:is_public": true, "extra_specs": { "driver_handles_share_servers": "True" }, "id": "420e6a31-3f3d-4ed7-9d11-59450372182a", "name": "default", "is_default": true, "description": "share type description" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-types-extra-specs-list-response.json0000664000175000017500000000045200000000000027546 0ustar00zuulzuul00000000000000{ "extra_specs": { "replication_type": "readable", "driver_handles_share_servers": "True", "create_share_from_snapshot_support": "True", "revert_to_snapshot_support": "False", "mount_snapshot_support": "False", "snapshot_support": "True" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-types-list-access-response.json0000664000175000017500000000052200000000000026547 0ustar00zuulzuul00000000000000{ "share_type_access": [ { "share_type_id": "1732f284-401d-41d9-a494-425451e8b4b8", "project_id": "818a3f48dcd644909b3fa2e45a399a27" }, { "share_type_id": "1732f284-401d-41d9-a494-425451e8b4b8", "project_id": "e1284adea3ee4d2482af5ed214f3ad90" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-types-list-response.json0000664000175000017500000000451400000000000025315 0ustar00zuulzuul00000000000000{ "volume_types": [ { "required_extra_specs": { "driver_handles_share_servers": "True" }, "share_type_access:is_public": true, "extra_specs": { "driver_handles_share_servers": "True" }, "id": "420e6a31-3f3d-4ed7-9d11-59450372182a", "name": "default", "is_default": true, "description": "share type description" }, { "required_extra_specs": { "driver_handles_share_servers": "True" }, "share_type_access:is_public": true, "extra_specs": { "replication_type": "readable", "driver_handles_share_servers": "True", "mount_snapshot_support": "False", "revert_to_snapshot_support": "False", "create_share_from_snapshot_support": "True", "snapshot_support": "True" }, "id": "7fa1342b-de9d-4d89-bdc8-af67795c0e52", "name": "testing", "is_default": false, "description": "share type description" } ], "share_types": [ { "required_extra_specs": { "driver_handles_share_servers": "True" }, "share_type_access:is_public": true, "extra_specs": { "driver_handles_share_servers": "True" }, "id": "420e6a31-3f3d-4ed7-9d11-59450372182a", "name": "default", "is_default": true, "description": "share type description" }, { "required_extra_specs": { "driver_handles_share_servers": "True" }, "share_type_access:is_public": true, "extra_specs": { "replication_type": "readable", "driver_handles_share_servers": "True", "mount_snapshot_support": "False", "revert_to_snapshot_support": "False", "create_share_from_snapshot_support": "True", "snapshot_support": "True" }, "id": "7fa1342b-de9d-4d89-bdc8-af67795c0e52", "name": "testing", "is_default": false, "description": "share type description" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-update-metadata-request.json0000664000175000017500000000020500000000000026063 0ustar00zuulzuul00000000000000{ "metadata": { "aim": "changed_doc", "project": "my_app", "new_metadata_key": "new_information" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-update-metadata-response.json0000664000175000017500000000020500000000000026231 0ustar00zuulzuul00000000000000{ "metadata": { "aim": "changed_doc", "project": "my_app", "new_metadata_key": "new_information" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-update-null-metadata-request.json0000664000175000017500000000003100000000000027030 0ustar00zuulzuul00000000000000{ "metadata": null } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-update-null-metadata-response.json0000664000175000017500000000003100000000000027176 0ustar00zuulzuul00000000000000{ "metadata": null } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-update-request.json0000664000175000017500000000016500000000000024312 0ustar00zuulzuul00000000000000{ "share": { "is_public": true, "display_description": "Changing the share description." } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/share-update-response.json0000664000175000017500000000271000000000000024456 0ustar00zuulzuul00000000000000{ "share": { "links": [ { "href": "http://172.18.198.54:8786/v2/16e1ab15c35a457e9c2b2aa189f544e1/shares/011d21e2-fbc3-4e4a-9993-9ea223f73264", "rel": "self" }, { "href": "http://172.18.198.54:8786/16e1ab15c35a457e9c2b2aa189f544e1/shares/011d21e2-fbc3-4e4a-9993-9ea223f73264", "rel": "bookmark" } ], "availability_zone": "nova", "share_network_id": "713df749-aac0-4a54-af52-10f6c991e80c", "export_locations": [], "share_server_id": "e268f4aa-d571-43dd-9ab3-f49ad06ffaef", "share_group_id": null, "snapshot_id": null, "id": "011d21e2-fbc3-4e4a-9993-9ea223f73264", "size": 1, "share_type": "25747776-08e5-494f-ab40-a64b9d20d8f7", "share_type_name": "default", "export_location": null, "project_id": "16e1ab15c35a457e9c2b2aa189f544e1", "metadata": { "project": "my_app", "aim": "doc" }, "status": "error", "description": "Changing the share description.", "host": "manila2@generic1#GENERIC1", "task_state": null, "is_public": true, "snapshot_support": true, "name": "share_London", "created_at": "2015-09-18T10:25:24.000000", "share_proto": "NFS", "volume_type": "default", "user_id": "66ffd308757e44b9a8bec381322b0b88" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/shares-list-detailed-response.json0000664000175000017500000000702700000000000026111 0ustar00zuulzuul00000000000000{ "shares": [ { "links": [ { "href": "http://172.18.198.54:8786/v2/16e1ab15c35a457e9c2b2aa189f544e1/shares/f45cc5b2-d1bb-4a3e-ba5b-5c4125613adc", "rel": "self" }, { "href": "http://172.18.198.54:8786/16e1ab15c35a457e9c2b2aa189f544e1/shares/f45cc5b2-d1bb-4a3e-ba5b-5c4125613adc", "rel": "bookmark" } ], "availability_zone": "nova", "share_network_id": "f9b2e754-ac01-4466-86e1-5c569424754e", "export_locations": [], "share_server_id": "87d8943a-f5da-47a4-b2f2-ddfa6794aa82", "share_group_id": null, "snapshot_id": null, "source_backup_id": null, "id": "f45cc5b2-d1bb-4a3e-ba5b-5c4125613adc", "size": 1, "share_type": "25747776-08e5-494f-ab40-a64b9d20d8f7", "share_type_name": "default", "export_location": null, "project_id": "16e1ab15c35a457e9c2b2aa189f544e1", "metadata": {}, "status": "error", "progress": null, "access_rules_status": "active", "description": "There is a share description.", "host": "manila2@generic1#GENERIC1", "task_state": null, "is_public": true, "snapshot_support": true, "user_id": "66ffd308757e44b9a8bec381322b0b88", "name": "my_share4", "has_replicas": false, "replication_type": null, "created_at": "2015-09-16T18:19:50.000000", "share_proto": "NFS", "volume_type": "default" }, { "links": [ { "href": "http://172.18.198.54:8786/v2/16e1ab15c35a457e9c2b2aa189f544e1/shares/c4a2ced4-2c9f-4ae1-adaa-6171833e64df", "rel": "self" }, { "href": "http://172.18.198.54:8786/16e1ab15c35a457e9c2b2aa189f544e1/shares/c4a2ced4-2c9f-4ae1-adaa-6171833e64df", "rel": "bookmark" } ], "availability_zone": "nova", "share_network_id": "f9b2e754-ac01-4466-86e1-5c569424754e", "export_locations": [ "10.254.0.5:/shares/share-50ad5e7b-f6f1-4b78-a651-0812cef2bb67" ], "share_server_id": "87d8943a-f5da-47a4-b2f2-ddfa6794aa82", "snapshot_id": null, "source_backup_id": null, "id": "c4a2ced4-2c9f-4ae1-adaa-6171833e64df", "size": 1, "share_type": "25747776-08e5-494f-ab40-a64b9d20d8f7", "share_type_name": "default", "export_location": "10.254.0.5:/shares/share-50ad5e7b-f6f1-4b78-a651-0812cef2bb67", "project_id": "16e1ab15c35a457e9c2b2aa189f544e1", "metadata": {}, "status": "available", "progress": "100%", "access_rules_status": "active", "description": "Changed description.", "host": "manila2@generic1#GENERIC1", "task_state": null, "is_public": true, "snapshot_support": true, "name": "my_share4", "has_replicas": false, "replication_type": null, "created_at": "2015-09-16T17:26:28.000000", "user_id": "66ffd308757e44b9a8bec381322b0b88", "share_proto": "NFS", "volume_type": "default" } ], "count": 10 } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/shares-list-response.json0000664000175000017500000000222000000000000024326 0ustar00zuulzuul00000000000000{ "shares": [ { "id": "d94a8548-2079-4be0-b21c-0a887acd31ca", "links": [ { "href": "http://172.18.198.54:8786/v1/16e1ab15c35a457e9c2b2aa189f544e1/shares/d94a8548-2079-4be0-b21c-0a887acd31ca", "rel": "self" }, { "href": "http://172.18.198.54:8786/16e1ab15c35a457e9c2b2aa189f544e1/shares/d94a8548-2079-4be0-b21c-0a887acd31ca", "rel": "bookmark" } ], "name": "My_share" }, { "id": "406ea93b-32e9-4907-a117-148b3945749f", "links": [ { "href": "http://172.18.198.54:8786/v1/16e1ab15c35a457e9c2b2aa189f544e1/shares/406ea93b-32e9-4907-a117-148b3945749f", "rel": "self" }, { "href": "http://172.18.198.54:8786/16e1ab15c35a457e9c2b2aa189f544e1/shares/406ea93b-32e9-4907-a117-148b3945749f", "rel": "bookmark" } ], "name": "Share1" } ], "count": 10 } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/snapshot-actions-force-delete-request.json0000664000175000017500000000003500000000000027555 0ustar00zuulzuul00000000000000{ "force_delete": null } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/snapshot-actions-reset-state-request.json0000664000175000017500000000007200000000000027460 0ustar00zuulzuul00000000000000{ "reset_status": { "status": "error" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/snapshot-actions-unmanage-request.json0000664000175000017500000000003100000000000027006 0ustar00zuulzuul00000000000000{ "unmanage": null } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/snapshot-create-request.json0000664000175000017500000000032000000000000025021 0ustar00zuulzuul00000000000000{ "snapshot": { "share_id": "406ea93b-32e9-4907-a117-148b3945749f", "force": "True", "name": "snapshot_share1", "description": "Here is a snapshot of share Share1" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/snapshot-create-response.json0000664000175000017500000000163600000000000025202 0ustar00zuulzuul00000000000000{ "snapshot": { "status": "creating", "share_id": "406ea93b-32e9-4907-a117-148b3945749f", "user_id": "5c7bdb6eb0504d54a619acf8375c08ce", "name": "snapshot_share1", "links": [ { "href": "http://172.18.198.54:8786/v1/16e1ab15c35a457e9c2b2aa189f544e1/snapshots/6d221c1d-0200-461e-8d20-24b4776b9ddb", "rel": "self" }, { "href": "http://172.18.198.54:8786/16e1ab15c35a457e9c2b2aa189f544e1/snapshots/6d221c1d-0200-461e-8d20-24b4776b9ddb", "rel": "bookmark" } ], "created_at": "2015-09-07T11:50:39.756808", "description": "Here is a snapshot of share Share1", "share_proto": "NFS", "share_size": 1, "id": "6d221c1d-0200-461e-8d20-24b4776b9ddb", "project_id": "cadd7139bc3148b8973df097c0911016", "size": 1 } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/snapshot-instance-actions-reset-state-request.json0000664000175000017500000000007600000000000031266 0ustar00zuulzuul00000000000000{ "reset_status": { "status": "available" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/snapshot-instance-show-response.json0000664000175000017500000000111300000000000026507 0ustar00zuulzuul00000000000000{ "snapshot_instance": { "status": "available", "share_id": "618599ab-09a1-432d-973a-c102564c7fec", "share_instance_id": "8edff0cb-e5ce-4bab-aa99-afe02ed6a76a", "snapshot_id": "d447de19-a6d3-40b3-ae9f-895c86798924", "progress": "100%", "created_at": "2017-08-04T00:44:52.000000", "id": "275516e8-c998-4e78-a41e-7dd3a03e71cd", "provider_location": "/path/to/fake/snapshot/snapshot_d447de19_a6d3_40b3_ae9f_895c86798924_275516e8_c998_4e78_a41e_7dd3a03e71cd", "updated_at": "2017-08-04T00:44:54.000000" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/snapshot-instances-list-response.json0000664000175000017500000000033100000000000026666 0ustar00zuulzuul00000000000000{ "snapshot_instances": [ { "status": "available", "snapshot_id": "d447de19-a6d3-40b3-ae9f-895c86798924", "id": "275516e8-c998-4e78-a41e-7dd3a03e71cd" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/snapshot-instances-list-with-detail-response.json0000664000175000017500000000120000000000000031073 0ustar00zuulzuul00000000000000{ "snapshot_instances": [ { "status": "available", "share_id": "618599ab-09a1-432d-973a-c102564c7fec", "share_instance_id": "8edff0cb-e5ce-4bab-aa99-afe02ed6a76a", "snapshot_id": "d447de19-a6d3-40b3-ae9f-895c86798924", "progress": "100%", "created_at": "2017-08-04T00:44:52.000000", "id": "275516e8-c998-4e78-a41e-7dd3a03e71cd", "provider_location": "/path/to/fake/snapshot/snapshot_d447de19_a6d3_40b3_ae9f_895c86798924_275516e8_c998_4e78_a41e_7dd3a03e71cd", "updated_at": "2017-08-04T00:44:54.000000" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/snapshot-manage-request.json0000664000175000017500000000053000000000000025011 0ustar00zuulzuul00000000000000{ "snapshot": { "share_id": "dd6c5d35-9db1-4662-a7ae-8b52f880aeba", "provider_location": "4045fee5-4e0e-408e-97f3-15e25239dbc9", "name": "managed_snapshot", "description": "description_of_managed_snapshot", "driver_options": { "opt1": "opt1", "opt2": "opt2" } } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/snapshot-manage-response.json0000664000175000017500000000174000000000000025163 0ustar00zuulzuul00000000000000{ "snapshot": { "id": "22de7000-3a32-4fe1-bd0c-38d03f93dec3", "share_id": "dd6c5d35-9db1-4662-a7ae-8b52f880aeba", "share_size": 1, "created_at": "2016-04-01T15:16:17.000000", "status": "manage_starting", "name": "managed_snapshot", "description": "description_of_managed_snapshot", "size": 1, "share_proto": "NFS", "user_id": "5c7bdb6eb0504d54a619acf8375c08ce", "project_id": "cadd7139bc3148b8973df097c0911016", "links": [ { "href": "http://127.0.0.1:8786/v2/907004508ef4447397ce6741a8f037c1/snapshots/22de7000-3a32-4fe1-bd0c-38d03f93dec3", "rel": "self" }, { "href": "http://127.0.0.1:8786/907004508ef4447397ce6741a8f037c1/snapshots/22de7000-3a32-4fe1-bd0c-38d03f93dec3", "rel": "bookmark" } ], "provider_location": "4045fee5-4e0e-408e-97f3-15e25239dbc9" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/snapshot-set-metadata-request.json0000664000175000017500000000026700000000000026141 0ustar00zuulzuul00000000000000{ "metadata": { "aim": "changed_doc", "project": "my_app", "key1": "value1", "new_metadata_key": "new_information", "key": "value" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/snapshot-set-metadata-response.json0000664000175000017500000000026700000000000026307 0ustar00zuulzuul00000000000000{ "metadata": { "aim": "changed_doc", "project": "my_app", "key1": "value1", "new_metadata_key": "new_information", "key": "value" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/snapshot-show-metadata-item-response.json0000664000175000017500000000006400000000000027423 0ustar00zuulzuul00000000000000{ "meta": { "project": "my_app" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/snapshot-show-metadata-response.json0000664000175000017500000000011600000000000026465 0ustar00zuulzuul00000000000000{ "metadata": { "project": "my_app", "aim": "doc" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/snapshot-show-response.json0000664000175000017500000000163700000000000024720 0ustar00zuulzuul00000000000000{ "snapshot": { "status": "available", "share_id": "406ea93b-32e9-4907-a117-148b3945749f", "user_id": "5c7bdb6eb0504d54a619acf8375c08ce", "name": "snapshot_share1", "links": [ { "href": "http://172.18.198.54:8786/v1/16e1ab15c35a457e9c2b2aa189f544e1/snapshots/6d221c1d-0200-461e-8d20-24b4776b9ddb", "rel": "self" }, { "href": "http://172.18.198.54:8786/16e1ab15c35a457e9c2b2aa189f544e1/snapshots/6d221c1d-0200-461e-8d20-24b4776b9ddb", "rel": "bookmark" } ], "created_at": "2015-09-07T11:50:39.000000", "description": "Here is a snapshot of share Share1", "share_proto": "NFS", "share_size": 1, "id": "6d221c1d-0200-461e-8d20-24b4776b9ddb", "project_id": "cadd7139bc3148b8973df097c0911016", "size": 1 } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/snapshot-update-metadata-request.json0000664000175000017500000000020500000000000026620 0ustar00zuulzuul00000000000000{ "metadata": { "aim": "changed_doc", "project": "my_app", "new_metadata_key": "new_information" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/snapshot-update-metadata-response.json0000664000175000017500000000020500000000000026766 0ustar00zuulzuul00000000000000{ "metadata": { "aim": "changed_doc", "project": "my_app", "new_metadata_key": "new_information" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/snapshot-update-null-metadata-request.json0000664000175000017500000000003100000000000027565 0ustar00zuulzuul00000000000000{ "metadata": null } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/snapshot-update-null-metadata-response.json0000664000175000017500000000003100000000000027733 0ustar00zuulzuul00000000000000{ "metadata": null } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/snapshot-update-request.json0000664000175000017500000000025500000000000025047 0ustar00zuulzuul00000000000000{ "snapshot": { "display_name": "snapshot_Share1", "display_description": "I am changing a description also. Here is a snapshot of share Share1" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/snapshot-update-response.json0000664000175000017500000000170100000000000025212 0ustar00zuulzuul00000000000000{ "snapshot": { "status": "available", "share_id": "406ea93b-32e9-4907-a117-148b3945749f", "name": "snapshot_Share1", "user_id": "5c7bdb6eb0504d54a619acf8375c08ce", "project_id": "cadd7139bc3148b8973df097c0911016", "links": [ { "href": "http://172.18.198.54:8786/v1/16e1ab15c35a457e9c2b2aa189f544e1/snapshots/6d221c1d-0200-461e-8d20-24b4776b9ddb", "rel": "self" }, { "href": "http://172.18.198.54:8786/16e1ab15c35a457e9c2b2aa189f544e1/snapshots/6d221c1d-0200-461e-8d20-24b4776b9ddb", "rel": "bookmark" } ], "created_at": "2015-09-07T11:50:39.000000", "description": "I am changing a description also. Here is a snapshot of share Share1", "share_proto": "NFS", "share_size": 1, "id": "6d221c1d-0200-461e-8d20-24b4776b9ddb", "size": 1 } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/snapshots-list-detailed-response.json0000664000175000017500000000376700000000000026655 0ustar00zuulzuul00000000000000{ "snapshots": [ { "status": "creating", "share_id": "d94a8548-2079-4be0-b21c-0a887acd31ca", "user_id": "5c7bdb6eb0504d54a619acf8375c08ce", "name": "snapshot_My_share", "links": [ { "href": "http://172.18.198.54:8786/v1/16e1ab15c35a457e9c2b2aa189f544e1/snapshots/086a1aa6-c425-4ecd-9612-391a3b1b9375", "rel": "self" }, { "href": "http://172.18.198.54:8786/16e1ab15c35a457e9c2b2aa189f544e1/snapshots/086a1aa6-c425-4ecd-9612-391a3b1b9375", "rel": "bookmark" } ], "created_at": "2015-09-07T11:55:09.000000", "description": "Here is a snapshot of share My_share", "share_proto": "NFS", "share_size": 1, "id": "086a1aa6-c425-4ecd-9612-391a3b1b9375", "project_id": "cadd7139bc3148b8973df097c0911016", "size": 1 }, { "status": "available", "share_id": "406ea93b-32e9-4907-a117-148b3945749f", "user_id": "5c7bdb6eb0504d54a619acf8375c08ce", "name": "snapshot_share1", "links": [ { "href": "http://172.18.198.54:8786/v1/16e1ab15c35a457e9c2b2aa189f544e1/snapshots/6d221c1d-0200-461e-8d20-24b4776b9ddb", "rel": "self" }, { "href": "http://172.18.198.54:8786/16e1ab15c35a457e9c2b2aa189f544e1/snapshots/6d221c1d-0200-461e-8d20-24b4776b9ddb", "rel": "bookmark" } ], "created_at": "2015-09-07T11:50:39.000000", "description": "Here is a snapshot of share Share1", "share_proto": "NFS", "share_size": 1, "id": "6d221c1d-0200-461e-8d20-24b4776b9ddb", "project_id": "cadd7139bc3148b8973df097c0911016", "size": 1 } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/snapshots-list-response.json0000664000175000017500000000224000000000000025065 0ustar00zuulzuul00000000000000{ "snapshots": [ { "id": "086a1aa6-c425-4ecd-9612-391a3b1b9375", "links": [ { "href": "http://172.18.198.54:8786/v1/16e1ab15c35a457e9c2b2aa189f544e1/snapshots/086a1aa6-c425-4ecd-9612-391a3b1b9375", "rel": "self" }, { "href": "http://172.18.198.54:8786/16e1ab15c35a457e9c2b2aa189f544e1/snapshots/086a1aa6-c425-4ecd-9612-391a3b1b9375", "rel": "bookmark" } ], "name": "snapshot_My_share" }, { "id": "6d221c1d-0200-461e-8d20-24b4776b9ddb", "links": [ { "href": "http://172.18.198.54:8786/v1/16e1ab15c35a457e9c2b2aa189f544e1/snapshots/6d221c1d-0200-461e-8d20-24b4776b9ddb", "rel": "self" }, { "href": "http://172.18.198.54:8786/16e1ab15c35a457e9c2b2aa189f544e1/snapshots/6d221c1d-0200-461e-8d20-24b4776b9ddb", "rel": "bookmark" } ], "name": "snapshot_share1" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/user-message-show-response.json0000664000175000017500000000206300000000000025453 0ustar00zuulzuul00000000000000{ "message": { "links": [ { "href": "http://192.168.122.180:8786/v2/2e3de76b49b444fd9dc7ca9f7048ce6b/messages/4b319d29-d5b7-4b6e-8e7c-8d6e53f3c3d5", "rel": "self" }, { "href": "http://192.168.122.180:8786/2e3de76b49b444fd9dc7ca9f7048ce6b/messages/4b319d29-d5b7-4b6e-8e7c-8d6e53f3c3d5", "rel": "bookmark" } ], "resource_id": "351cc796-2d79-4a08-b878-a8ed933b6b68", "message_level": "ERROR", "user_message": "allocate host: No storage could be allocated for this share request. Trying again with a different size or share type may succeed.", "expires_at": "2017-07-10T10:27:43.000000", "id": "4b319d29-d5b7-4b6e-8e7c-8d6e53f3c3d5", "created_at": "2017-07-10T10:26:43.000000", "detail_id": "002", "request_id": "req-24e7ccb6-a7d5-4ddd-a8e4-d8f72a4509c8", "project_id": "2e3de76b49b444fd9dc7ca9f7048ce6b", "resource_type": "SHARE", "action_id": "001" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/user-messages-list-response.json0000664000175000017500000000223400000000000025631 0ustar00zuulzuul00000000000000{ "messages": [ { "links": [ { "href": "http://192.168.122.180:8786/v2/2e3de76b49b444fd9dc7ca9f7048ce6b/messages/4b319d29-d5b7-4b6e-8e7c-8d6e53f3c3d5", "rel": "self" }, { "href": "http://192.168.122.180:8786/2e3de76b49b444fd9dc7ca9f7048ce6b/messages/4b319d29-d5b7-4b6e-8e7c-8d6e53f3c3d5", "rel": "bookmark" } ], "id": "4b319d29-d5b7-4b6e-8e7c-8d6e53f3c3d5", "resource_id": "351cc796-2d79-4a08-b878-a8ed933b6b68", "message_level": "ERROR", "user_message": "allocate host: No storage could be allocated for this share request. Trying again with a different size or share type may succeed.", "expires_at": "2017-07-10T10:27:43.000000", "created_at": "2017-07-10T10:26:43.000000", "detail_id": "002", "request_id": "req-24e7ccb6-a7d5-4ddd-a8e4-d8f72a4509c8", "project_id": "2e3de76b49b444fd9dc7ca9f7048ce6b", "resource_type": "SHARE", "action_id": "001" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/versions-get-version-response.json0000664000175000017500000000140100000000000026200 0ustar00zuulzuul00000000000000{ "versions": [ { "status": "CURRENT", "updated": "2015-08-27T11:33:21Z", "links": [ { "href": "http://docs.openstack.org/", "type": "text/html", "rel": "describedby" }, { "href": "http://172.18.198.54:8786/v2/", "rel": "self" } ], "min_version": "2.0", "version": "2.15", "media-types": [ { "base": "application/json", "type": "application/vnd.openstack.share+json;version=1" } ], "id": "v2.0" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/samples/versions-index-response.json0000664000175000017500000000274300000000000025057 0ustar00zuulzuul00000000000000{ "versions": [ { "status": "DEPRECATED", "updated": "2015-08-27T11:33:21Z", "links": [ { "href": "http://docs.openstack.org/", "type": "text/html", "rel": "describedby" }, { "href": "http://172.18.198.54:8786/v1/", "rel": "self" } ], "min_version": "", "version": "", "media-types": [ { "base": "application/json", "type": "application/vnd.openstack.share+json;version=1" } ], "id": "v1.0" }, { "status": "CURRENT", "updated": "2015-08-27T11:33:21Z", "links": [ { "href": "http://docs.openstack.org/", "type": "text/html", "rel": "describedby" }, { "href": "http://172.18.198.54:8786/v2/", "rel": "self" } ], "min_version": "2.0", "version": "2.15", "media-types": [ { "base": "application/json", "type": "application/vnd.openstack.share+json;version=1" } ], "id": "v2.0" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/scheduler-stats.inc0000664000175000017500000000572400000000000021516 0ustar00zuulzuul00000000000000.. -*- rst -*- Scheduler Stats - Storage Pools =============================== An administrator can list all back-end storage pools that are known to the scheduler service. List back-end storage pools ~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v2/scheduler-stats/pools?pool={pool_name}&host={host_name}&backend={backend_name}&capabilities={capabilities}&share_type={share_type} Lists all back-end storage pools. If search options are provided, the pool list that is returned is filtered with these options. Response codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - pool_name: backend_pool_query - host_name: backend_host_query - backend_name: backend_query - capabilities: backend_capabilities_query - share_type: share_type_query Response parameters ------------------- .. rest_parameters:: parameters.yaml - backend: backend - host: backend_host - pool: pool - name: backend_name Response example ---------------- .. literalinclude:: samples/pools-list-response.json :language: javascript List back-end storage pools with details ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v2/scheduler-stats/pools/detail?pool={pool_name}&host={host_name}&backend={backend_name}&capabilities={capabilities}&share_type={share_type} Lists all back-end storage pools with details. If search options are provided, the pool list that is returned is filtered with these options. Response codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - pool_name: backend_pool_query - host_name: backend_host_query - backend_name: backend_query - capabilities: backend_capabilities_query - share_type: share_type_query Response parameters ------------------- .. rest_parameters:: parameters.yaml - pools: pools - name: backend_name - backend: backend - pool: pool - host: backend_host - capabilities: capabilities - qos: capability_qos - timestamp: timestamp - share_backend_name: capability_share_backend_name - server_pools_mapping: capability_server_pools_mapping - driver_handles_share_servers: capability_driver_handles_share_servers - driver_version: capability_driver_version - total_capacity_gb: capability_total_capacity_gb - free_capacity_gb: capability_free_capacity_gb - reserved_percentage: capability_reserved_percentage - vendor_name: capability_vendor_name - snapshot_support: capability_snapshot_support - replication_domain: capability_replication_domain - storage_protocol: capability_storage_protocol Response example ---------------- .. literalinclude:: samples/pools-list-detailed-response.json :language: javascript ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/security-services.inc0000664000175000017500000001770500000000000022076 0ustar00zuulzuul00000000000000.. -*- rst -*- Security services ================= You can create, update, view, and delete security services. A security service resource represents configuration information for clients for authentication and authorization (AuthN/AuthZ). For example, a share server will be the client for an existing security service such as LDAP, Kerberos, or Microsoft Active Directory. The Shared File Systems service supports three security service types: - ``ldap``. LDAP. - ``kerberos``. Kerberos. - ``active_directory``. Microsoft Active Directory. You can configure a security service with these options: - A DNS IP address. Some drivers may allow a comma separated list of multiple addresses, e.g. NetApp ONTAP. - An IP address or host name. - A domain. - An ou, the organizational unit. (available starting with API version 2.44) - A user or group name. - The password for the user, if you specify a user name. - A default AD site, optional (available starting with API version 2.76) A security service resource can also be given a user defined name and description. List security services ~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v2/security-services Lists all security services. Response codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - all_tenants: all_tenants_query Response parameters ------------------- .. rest_parameters:: parameters.yaml - status: security_service_status - type: security_service_type - id: security_service_id - name: name Response example ---------------- .. literalinclude:: samples/security-services-list-response.json :language: javascript List security services with details ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v2/security-services/detail Lists all security services with details. Response codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - all_tenants: all_tenants_query Response parameters ------------------- .. rest_parameters:: parameters.yaml - status: security_service_status - id: security_service_id - project_id: project_id - type: security_service_type - name: name - description: description - dns_ip: security_service_dns_ip - user: security_service_user - password: security_service_password - domain: security_service_domain - ou: security_service_ou - server: security_service_server - default_ad_site: security_service_default_ad_site - updated_at: updated_at - created_at: created_at Response example ---------------- .. literalinclude:: samples/security-services-list-detailed-response.json :language: javascript Show security service details ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v2/security-services/{security_service_id} Shows details for a security service. Response codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - security_service_id: security_service_id_path Response parameters ------------------- .. rest_parameters:: parameters.yaml - status: security_service_status - id: security_service_id - project_id: project_id - type: security_service_type - name: name - description: description - dns_ip: security_service_dns_ip - user: security_service_user - password: security_service_password - domain: security_service_domain - ou: security_service_ou - server: security_service_server - default_ad_site: security_service_default_ad_site - updated_at: updated_at - created_at: created_at Response example ---------------- .. literalinclude:: samples/security-service-show-response.json :language: javascript Create security service ~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v2/security-services Creates a security service. Response codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 422 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - type: security_service_type - name: name_request - description: description_request - dns_ip: security_service_dns_ip_request - user: security_service_user_request - password: security_service_password_request - domain: security_service_domain_request - ou: security_service_ou_request - server: security_service_server_request - default_ad_site: security_service_default_ad_site_request Request example --------------- .. literalinclude:: samples/security-service-create-request.json :language: javascript Response parameters ------------------- .. rest_parameters:: parameters.yaml - status: security_service_status - id: security_service_id - project_id: project_id - type: security_service_type - name: name - description: description - dns_ip: security_service_dns_ip - user: security_service_user - password: security_service_password - domain: security_service_domain - ou: security_service_ou - server: security_service_server - default_ad_site: security_service_default_ad_site - updated_at: updated_at - created_at: created_at Response example ---------------- .. literalinclude:: samples/security-service-create-response.json :language: javascript Update security service ~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: PUT /v2/security-services/{security_service_id} Updates a security service. If the security service is in ``active`` state, you can update only the ``name`` and ``description`` attributes. A security service in ``active`` state is attached to a share network with an associated share server. Response codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 - 422 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - security_service_id: security_service_id_path - type: security_service_type - name: name_request - description: description_request - dns_ip: security_service_dns_ip_request - user: security_service_user_request - password: security_service_password_request - domain: security_service_domain_request - ou: security_service_ou_request - server: security_service_server_request - default_ad_site: security_service_default_ad_site_request Request example --------------- .. literalinclude:: samples/security-service-update-request.json :language: javascript Response parameters ------------------- .. rest_parameters:: parameters.yaml - status: security_service_status - id: security_service_id - project_id: project_id - type: security_service_type - name: name - description: description - dns_ip: security_service_dns_ip - user: security_service_user - password: security_service_password - domain: security_service_domain - ou: security_service_ou - server: security_service_server - default_ad_site: security_service_default_ad_site - updated_at: updated_at - created_at: created_at Response example ---------------- .. literalinclude:: samples/security-service-update-response.json :language: javascript Delete security service ~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: DELETE /v2/security-services/{security_service_id} Deletes a security service. Response codes -------------- .. rest_status_code:: success status.yaml - 202 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - security_service_id: security_service_id_path ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/services.inc0000664000175000017500000001040200000000000020214 0ustar00zuulzuul00000000000000.. -*- rst -*- Services ======== These APIs help in interacting with the Shared File Systems services, ``manila-scheduler``, ``manila-share`` and ``manila-data``. .. important:: For API versions 2.6 and prior, replace ``services`` in the URLs with ``os-services``. .. note:: Starting from API version 2.83, ``disabled`` field will be replaced by ``status`` field in response of enable or disable service request. Re-enabling a disabled service will automatically clear ``disable reason``. List services ~~~~~~~~~~~~~ .. rest_method:: GET /v2/services?host={host}&binary={binary}&zone={zone}&state={state}&status={status} Lists all services optionally filtered with the specified search options. Response codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - host: service_host_query - binary: service_binary_query - zone: service_zone_query - state: service_state_query - status: service_status_query Response parameters ------------------- .. rest_parameters:: parameters.yaml - services: services - id: service_id_response - status: service_status_response - binary: service_binary_response - zone: service_zone_response - host: service_host_response - state: service_state_response - updated_at: updated_at - disabled_reason: service_disable_reason_response - ensuring: service_ensuring_response Response example ---------------- .. literalinclude:: samples/services-list-response.json :language: javascript Enable service ~~~~~~~~~~~~~~ .. rest_method:: PUT /v2/services/enable Enables a service. Response codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - binary: service_enable_binary_request - host: service_enable_host_request Request example --------------- .. literalinclude:: samples/service-enable-request.json :language: javascript Response parameters ------------------- .. rest_parameters:: parameters.yaml - host: service_enable_host_response - binary: service_binary_response - disabled: service_disabled_response - status: service_status_new_response - disabled_reason: service_disable_reason_response Response example ---------------- .. literalinclude:: samples/service-enable-response.json :language: javascript Disable service ~~~~~~~~~~~~~~~ .. rest_method:: PUT /v2/services/disable Disables a service. Response codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - binary: service_disable_binary_request - host: service_disable_host_request - disabled_reason: service_disable_reason_request Request example --------------- .. literalinclude:: samples/service-disable-request.json :language: javascript Response parameters ------------------- .. rest_parameters:: parameters.yaml - host: service_disable_host_response - binary: service_disable_binary_response - disabled: service_disabled_response - status: service_status_new_response - disabled_reason: service_disable_reason_response Response example ---------------- .. literalinclude:: samples/service-disable-response.json :language: javascript Ensure shares (Since API Version 2.86) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v2/services/ensure-shares Starts the ensure shares procedure for a manila-share binary. Response codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 - 409 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - host: service_ensure_shares_host_request Request example --------------- .. literalinclude:: samples/service-ensure-shares-request.json :language: javascript Response parameters ------------------- There is no body content for the response. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/share-access-rule-metadata.inc0000664000175000017500000000316600000000000023466 0ustar00zuulzuul00000000000000.. -*- rst -*- Share access rule metadata (since API v2.45) ============================================ Updates, and unsets share access rule metadata. Update share access rule metadata ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: PUT /v2/share-access-rules/{access_id}/metadata .. versionadded:: 2.45 Updates the metadata for a share access rule. Response codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - access_id: access_id_path - metadata: access_metadata Request example --------------- .. literalinclude:: samples/share-access-rules-update-metadata-request.json :language: javascript Response parameters ------------------- .. rest_parameters:: parameters.yaml - metadata: access_metadata Response example ---------------- .. literalinclude:: samples/share-access-rules-update-metadata-response.json :language: javascript Unset share access rule metadata ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: DELETE /v2/share-access-rules/{access_id}/metadata/{key} .. versionadded:: 2.45 Un-sets the metadata on a share access rule. To unset a metadata key value, specify only the key name in the URI. Response codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - access_id: access_id_path - key: metadata_key_path ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/share-access-rules.inc0000664000175000017500000000763400000000000022077 0ustar00zuulzuul00000000000000.. -*- rst -*- .. _get-access-rules-after-2-45: Share access rules (since API v2.45) ==================================== Retrieve details about access rules .. note:: Starting from API version 2.82, access rule visibility can be restricted by a project user, or any user with "service" or "admin" roles. When restricted, the access_to and access_key fields will be redacted to other users. This redaction applies irrespective of the API version. Describe share access rule ~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v2/share-access-rules/{access_id} .. versionadded:: 2.45 Retrieve details about a specified access rule. Response codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - access_id: access_id_path Response parameters ------------------- .. rest_parameters:: parameters.yaml - share_id: access_share_id - created_at: created_at - updated_at: updated_at - access_type: access_type - access_to: access_to - access_key: access_key_share_access_rules - state: state - access_level: access_level - id: access_rule_id - metadata: access_metadata Response example ---------------- .. literalinclude:: samples/share-access-rules-show-response.json :language: javascript List share access rules ~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v2/share-access-rules?share_id={share-id} .. versionadded:: 2.45 Lists the share access rules on a share. .. note:: This API replaces the older :ref:`List share access rules ` API from version 2.45. .. note:: Starting from API version 2.82, access rule visibility can be restricted by a project user, or any user with "service" or "admin" roles. When restricted, the access_to and access_key fields will be redacted to other users. This redaction applies irrespective of the API version. Response codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 - 409 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - share_id: share_id_access_rules_query - metadata: metadata Response parameters ------------------- .. rest_parameters:: parameters.yaml - metadata: access_metadata - access_type: access_type - access_key: access_key_share_access_rules - access_to: access_to - access_level: access_level - state: state - access_list: access_list - id: access_rule_id - created_at: created_at - updated_at: updated_at Response example ---------------- .. literalinclude:: samples/share-access-rules-list-response.json :language: javascript Update share access rule ~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: PUT /v2/share-access-rules/{access_id} .. versionadded:: 2.88 Update ``access_level`` of a specified access rule. Response codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - access_id: access_id_path - access_level: access_level Request example --------------- .. literalinclude:: samples/share-access-rules-update-request.json :language: javascript Response parameters ------------------- .. rest_parameters:: parameters.yaml - share_id: access_share_id - created_at: created_at - updated_at: updated_at - access_type: access_type - access_to: access_to - access_key: access_key_share_access_rules - state: state - access_level: access_level - id: access_rule_id - metadata: access_metadata Response example ---------------- .. literalinclude:: samples/share-access-rules-update-response.json :language: javascript ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/share-actions.inc0000664000175000017500000002706200000000000021143 0ustar00zuulzuul00000000000000.. -*- rst -*- .. _get-access-rules-before-2-45: Share actions ============= Share actions include granting or revoking share access, listing the available access rules for a share, explicitly updating the state of a share, resizing a share and un-managing a share. As administrator, you can reset the state of a share and force- delete a share in any state. Use the ``policy.yaml`` file to grant permissions for this action to other roles. You can set the state of a share to one of these supported states: - ``available`` - ``error`` - ``creating`` - ``deleting`` - ``error_deleting`` If API version 1.0-2.6 is used then all share actions, defined below, should include prefix ``os-`` in top element of request JSON's body. For example: {"access_list": null} is valid for v2.7+. And {"os- access_list": null} is valid for v1.0-2.6 Grant access ~~~~~~~~~~~~ All manila shares begin with no access. Clients must be provided with explicit access via this API. To grant access, specify one of these supported share access levels: - ``rw``. Read and write (RW) access. - ``ro``. Read-only (RO) access. You must also specify one of these supported authentication methods: - ``ip``. Authenticates an instance through its IP address. The value specified should be a valid IPv4 or an IPv6 address, or a subnet in CIDR notation. A valid format is ``X:X:X:X:X:X:X:X``, ``X:X:X:X:X:X:X:X/XX``, ``XX.XX.XX.XX``, or ``XX.XX.XX.XX/XX``, etc. For example ``0.0.0.0/0`` or ``::/0``. .. important:: IPv6 based access is only supported with API version 2.38 and beyond. .. note:: Starting from API version 2.82, it is possible to lock the deletion, restrict the visibility of sensible fields of the access rules, and specify a reason for such locks while invoking the grant access API through the parameters ``lock_deletion``, ``lock_visibility`` and ``lock_reason`` respectively. - ``cert``. Authenticates an instance through a TLS certificate. Specify the TLS identity as the IDENTKEY. A valid value is any string up to 64 characters long in the common name (CN) of the certificate. The meaning of a string depends on its interpretation. - ``user``. Authenticates by a user or group name. A valid value is an alphanumeric string that can contain some special characters and is from 4 to 255 characters long. .. rest_method:: POST /v2/shares/{share_id}/action Grants access to a share. Response codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - share_id: share_id - allow_access: allow_access - access_level: access_level - access_type: access_type - access_to: access_to - metadata: access_metadata_grant_access - lock_visibility: lock_visibility - lock_deletion: lock_deletion - lock_reason: resource_lock_lock_reason Request example --------------- .. literalinclude:: samples/share-actions-grant-access-request.json :language: javascript Response parameters ------------------- .. rest_parameters:: parameters.yaml - share_id: access_share_id - created_at: created_at - updated_at: updated_at - access_type: access_type - access_to: access_to - access_key: access_key - access: access - access_level: access_level - id: access_rule_id - metadata: access_metadata Response example ---------------- .. literalinclude:: samples/share-actions-grant-access-response.json :language: javascript Revoke access ~~~~~~~~~~~~~ .. rest_method:: POST /v2/shares/{share_id}/action The shared file systems service stores each access rule in its database and assigns it a unique ID. This ID can be used to revoke access after access has been requested. .. note:: In case the access rule had its deletion locked, it will be necessary to provide the ``unrestrict`` parameter in the revoke access request. Response codes -------------- .. rest_status_code:: success status.yaml - 202 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - share_id: share_id - deny_access: deny_access - access_id: access_id - unrestrict: unrestrict_access Request example --------------- .. literalinclude:: samples/share-actions-revoke-access-request.json :language: javascript List access rules (DEPRECATED) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. warning:: This API is deprecated starting with microversion 2.45 and requests to this API will fail with a 404 starting from microversion 2.45. Use :ref:`List share access rules ` API instead of this API from version 2.45. .. rest_method:: POST /v2/shares/{share_id}/action Lists access rules for a share. The Access ID returned is necessary to deny access. Response codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - share_id: share_id - access_list: access_list Request example --------------- .. literalinclude:: samples/share-actions-list-access-rules-request.json :language: javascript Response parameters ------------------- .. rest_parameters:: parameters.yaml - access_type: access_type - access_key: access_key - access_to: access_to - access_level: access_level - state: state - access_list: access_list - id: access_rule_id - created_at: created_at - updated_at: updated_at Response example ---------------- .. literalinclude:: samples/share-actions-list-access-rules-response.json :language: javascript Reset share state ~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v2/shares/{share_id}/action Administrator only. Explicitly updates the state of a share. Use the ``policy.yaml`` file to grant permissions for this action to other roles. Response codes -------------- .. rest_status_code:: success status.yaml - 202 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - share_id: share_id - reset_status: reset_status - status: share_status_request Request example --------------- .. literalinclude:: samples/share-actions-reset-state-request.json :language: javascript Force-delete share ~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v2/shares/{share_id}/action Administrator only. Force-deletes a share in any state. Use the ``policy.yaml`` file to grant permissions for this action to other roles. Response codes -------------- .. rest_status_code:: success status.yaml - 202 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - share_id: share_id - force_delete: share_force_delete Request example --------------- .. literalinclude:: samples/share-actions-force-delete-request.json :language: javascript Extend share ~~~~~~~~~~~~ .. rest_method:: POST /v2/shares/{share_id}/action Increases the size of a share. Response codes -------------- .. rest_status_code:: success status.yaml - 202 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - share_id: share_id - extend: extend - new_size: share_new_size - force: share_force_extend Request example --------------- .. literalinclude:: samples/share-actions-extend-request.json :language: javascript Shrink share ~~~~~~~~~~~~ .. rest_method:: POST /v2/shares/{share_id}/action Shrinks the size of a share. Response codes -------------- .. rest_status_code:: success status.yaml - 202 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - share_id: share_id - shrink: shrink - new_size: share_new_size Request example --------------- .. literalinclude:: samples/share-actions-shrink-request.json :language: javascript Unmanage share (since API v2.7) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v2/shares/{share_id}/action .. versionadded:: 2.7 Use this API to remove a share from the management of the Shared File Systems service without deleting the share. Administrator only. Use the ``policy.yaml`` file to grant permissions for this action to other roles. Preconditions: - You should remove any snapshots and share replicas before attempting to unmanage a share. .. note:: Unmanaging shares that are created on top of share servers (i.e. created with share networks) is not supported prior to API version 2.49. Response codes -------------- .. rest_status_code:: success status.yaml - 202 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 - 409 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - share_id: share_id - unmanage: share_unmanage Request example --------------- .. literalinclude:: samples/share-actions-unmanage-request.json :language: javascript Response parameters ------------------- There is no body content for the response. Revert share to snapshot (since API v2.27) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v2/shares/{share_id}/action .. versionadded:: 2.27 Reverts a share to the specified snapshot, which must be the most recent one known to manila. Response codes -------------- .. rest_status_code:: success status.yaml - 202 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 - 409 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - share_id: share_id - snapshot_id: snapshot_id Request example --------------- .. literalinclude:: samples/share-actions-revert-to-snapshot-request.json :language: javascript Soft delete share (since API v2.69) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v2/shares/{share_id}/action .. versionadded:: 2.69 Soft delete a share to recycle bin. Preconditions - Share status must be ``available``, ``error`` or ``inactive`` - Share can't have any snapshot. - Share can't have a share group snapshot. - Share can't have dependent replicas. - You cannot soft delete share that already is in the Recycle Bin.. - You cannot soft delete a share that doesn't belong to your project. - You cannot soft delete a share is busy with an active task. Response codes -------------- .. rest_status_code:: success status.yaml - 202 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 - 409 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - share_id: share_id Request example --------------- .. literalinclude:: samples/share-actions-soft-delete-request.json :language: javascript Restore share (since API v2.69) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v2/shares/{share_id}/action .. versionadded:: 2.69 Restore a share from recycle bin. Response codes -------------- .. rest_status_code:: success status.yaml - 202 .. rest_status_code:: error status.yaml - 401 - 403 - 404 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - share_id: share_id Request example --------------- .. literalinclude:: samples/share-actions-restore-request.json :language: javascript ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/share-backups.inc0000664000175000017500000002125400000000000021130 0ustar00zuulzuul00000000000000.. -*- rst -*- Share backups (since API v2.80) =============================== Use the Shared File Systems service to make backups of shares. A share backup is a point-in-time, read-only copy of the data that is contained in a share. The APIs below allow controlling share backups. They are represented by a "backup" resource in the Shared File Systems service, and they can have user-defined metadata such as a name and description. You can create, restore, update, list and delete share backups. After you create a share backup, you can access backup and use it. You can also restore a backup into a share as long as certain criteria are met e.g. size. You can update a share backup to change its name or description. As administrator, you can also reset the state of a backup. Backup can be in one of the following states: - ``available`` - ``error`` - ``creating`` - ``deleting`` - ``restoring`` During a backup or restore operation, share can be in one of the following states: - ``available`` - ``backup_creating`` - ``backup_restoring`` - ``backup_restoring_error`` List share backups ~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v2/share-backups .. versionadded:: 2.80 Lists all share backups. Response codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 Request ------- .. rest_parameters:: parameters.yaml - share_id: backup_share_id_query - name~: name_inexact_query_versionless - description~: description_inexact_query_versionless - limit: limit - offset: offset - sort_key: sort_key_backup - sort_dir: sort_dir - status: backup_status_query - host: backup_host_query - topic: backup_topic_query Response parameters ------------------- .. rest_parameters:: parameters.yaml - id: backup_id_response - share_id: backup_share_id - status: backup_status Response example ---------------- .. literalinclude:: samples/share-backups-list-response.json :language: javascript List share backups with details ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v2/share-backups/detail .. versionadded:: 2.80 Lists all share backups with details. Response codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 Request ------- .. rest_parameters:: parameters.yaml - share_id: backup_share_id_query - name~: name_inexact_query_versionless - description~: description_inexact_query_versionless - limit: limit - offset: offset - sort_key: sort_key_backup - sort_dir: sort_dir - status: backup_status_query - host: backup_host_query - topic: backup_topic_query Response parameters ------------------- .. rest_parameters:: parameters.yaml - id: backup_id_response - share_id: backup_share_id - status: backup_status - size: backup_size - availability_zone: backup_az - name: name - description: description - created_at: created_at - updated_at: updated_at - progress: backup_progress - restore_progress: backup_restore_progress Response example ---------------- .. literalinclude:: samples/share-backups-list-detailed-response.json :language: javascript Show share backup details ~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v2/share-backups/{backup_id} .. versionadded:: 2.80 Shows details for a share backup. Response codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 404 Request ------- .. rest_parameters:: parameters.yaml - backup_id: backup_id_request_path Response parameters ------------------- .. rest_parameters:: parameters.yaml - id: backup_id_response - share_id: backup_share_id - status: backup_status - size: backup_size - availability_zone: backup_az - name: name - description: description - created_at: created_at - updated_at: updated_at - progress: backup_progress - restore_progress: backup_restore_progress Response example ---------------- .. literalinclude:: samples/share-backup-show-response.json :language: javascript Create share backup ~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v2/share-backups .. versionadded:: 2.80 Creates a backup from a share. Response codes -------------- .. rest_status_code:: success status.yaml - 202 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 - 409 - 422 Request ------- .. rest_parameters:: parameters.yaml - share_id: backup_share_id - name: name_request - description: description_request - backup_options: backup_options_request Request example --------------- .. literalinclude:: samples/share-backup-create-request.json :language: javascript Response parameters ------------------- .. rest_parameters:: parameters.yaml - id: backup_id_response - share_id: backup_share_id - status: backup_status - size: backup_size - availability_zone: backup_az - name: name - description: description - created_at: created_at - updated_at: updated_at - progress: backup_progress - restore_progress: backup_restore_progress Response example ---------------- .. literalinclude:: samples/share-backup-create-response.json :language: javascript Update share backup ~~~~~~~~~~~~~~~~~~~ .. rest_method:: PUT /v2/share-backups/{backup_id} .. versionadded:: 2.80 Updates a share backup. You can update these attributes: - ``display_name``, which changes the ``name`` of the share backup. - ``display_description``, which changes the ``description`` of the share backup. If you try to update other attributes, they retain their previous values. Response codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 Request ------- .. rest_parameters:: parameters.yaml - backup_id: backup_id_request_path - display_name: display_name_request - display_description: display_description_request Request example --------------- .. literalinclude:: samples/share-backup-update-request.json :language: javascript Response parameters ------------------- .. rest_parameters:: parameters.yaml - id: backup_id_response - share_id: backup_share_id - status: backup_status - size: backup_size - availability_zone: backup_az - name: name - description: description - created_at: created_at - updated_at: updated_at - progress: backup_progress - restore_progress: backup_restore_progress Response example ---------------- .. literalinclude:: samples/share-backup-update-response.json :language: javascript Delete share backup ~~~~~~~~~~~~~~~~~~~ .. rest_method:: DELETE /v2/share-backups/{backup_id} .. versionadded:: 2.80 Deletes a share backup. Preconditions - Share backup status must be ``available`` or ``error``. Response codes -------------- .. rest_status_code:: success status.yaml - 202 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 Request ------- .. rest_parameters:: parameters.yaml - backup_id: backup_id_request_path Restore a share backup ~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v2/share-backups/{backup_id}/action .. versionadded:: 2.80 Restores a share backup into original share. Preconditions - Share backup status must be ``available``. Response codes -------------- .. rest_status_code:: success status.yaml - 202 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 Request ------- .. rest_parameters:: parameters.yaml - backup_id: backup_id_request_path Request example --------------- .. literalinclude:: samples/share-backup-restore-request.json :language: javascript Response parameters ------------------- .. rest_parameters:: parameters.yaml - backup_id: backup_id_response - share_id: backup_share_id Response example ---------------- .. literalinclude:: samples/share-backup-restore-response.json :language: javascript Reset share backup status ~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v2/share-backups/{backup_id}/action .. versionadded:: 2.80 Administrator only. Explicitly updates the state of a share backup. Use the ``policy.yaml`` file to grant permissions for this action to other roles. Response codes -------------- .. rest_status_code:: success status.yaml - 202 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - backup_id: backup_id_request_path - status: backup_status_request Request example --------------- .. literalinclude:: samples/share-backup-reset-status-request.json :language: javascript ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/share-export-locations.inc0000664000175000017500000000443700000000000023016 0ustar00zuulzuul00000000000000.. -*- rst -*- Share export locations (since API v2.9) ======================================= Set of APIs used for viewing export locations of shares. These APIs allow retrieval of export locations belonging to non-active share replicas until API version 2.46. In and beyond API version 2.47, export locations of non-active share replicas can only be retrieved using the :ref:`Share Replica Export Locations APIs `. List export locations ~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v2/shares/{share_id}/export_locations .. versionadded:: 2.9 Lists all export locations for a share. Response codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - share_id: share_id Response parameters ------------------- .. rest_parameters:: parameters.yaml - id: export_location_id - share_instance_id: export_location_share_instance_id - path: export_location_path - is_admin_only: export_location_is_admin_only - preferred: export_location_preferred Response example ---------------- .. literalinclude:: samples/export-location-list-response.json :language: javascript Show single export location ~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v2/shares/{share_id}/export_locations/​{export_location_id}​ .. versionadded:: 2.9 Show details of an export location belonging to a share. Response codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - share_id: share_id - export_location_id: export_location_id_path Response parameters ------------------- .. rest_parameters:: parameters.yaml - id: export_location_id - share_instance_id: export_location_share_instance_id - path: export_location_path - is_admin_only: export_location_is_admin_only - preferred: export_location_preferred - created_at: created_at - updated_at: updated_at Response example ---------------- .. literalinclude:: samples/export-location-show-response.json :language: javascript ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/share-group-snapshots.inc0000664000175000017500000002041600000000000022653 0ustar00zuulzuul00000000000000.. -*- rst -*- Share group snapshots (since API v2.31) ======================================= Use the Shared File Systems Service to make snapshots of share groups. A share group snapshot is a point-in-time, read-only copy of the data that is contained in a share group. You can create, update, and delete share group snapshots. After you create a share group snapshot, you can create a share group from it. You can update a share group snapshot to rename it, change its description, or update its state. As administrator, you can also reset the state of a group snapshot. Use the ``policy.yaml`` file to grant permissions for these actions to other roles. .. note:: Share Group Snapshot APIs are no longer considered experimental since API version 2.55. List share group snapshots ~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v2/share-group-snapshots .. versionadded:: 2.31 Lists all share group snapshots. Response codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - all_tenants: all_tenants_query - name: name_query - description: description_query - status: group_snapshot_status_query - share_group_id: share_group_id_query - limit: limit_query - offset: offset - sort_key: sort_key - sort_dir: sort_dir Response parameters ------------------- .. rest_parameters:: parameters.yaml - id: group_snapshot_id - name: name - links: group_snapshot_links Response example ---------------- .. literalinclude:: samples/share-group-snapshots-list-response.json :language: javascript List share group snapshots with details ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v2/share-group-snapshots/detail .. versionadded:: 2.31 Lists all share group snapshots with details. Response codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - all_tenants: all_tenants_query - name: name_query - description: description_query - status: group_snapshot_status_query - share_group_id: share_group_id_query - limit: limit_query - offset: offset - sort_key: sort_key - sort_dir: sort_dir Response parameters ------------------- .. rest_parameters:: parameters.yaml - id: group_snapshot_id - project_id: project_id - status: group_snapshot_status_required - share_group_id: share_group_id - name: name - description: description - created_at: created_at - members: group_snapshot_members - links: group_snapshot_links Response example ---------------- .. literalinclude:: samples/share-group-snapshots-list-detailed-response.json :language: javascript List share group snapshots members ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v2/share-group-snapshots/{group_snapshot_id}/members .. versionadded:: 2.31 Lists all share group snapshots members. Response codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - group_snapshot_id: group_snapshot_id_path Response parameters ------------------- .. rest_parameters:: parameters.yaml - id: group_snapshot_id - created_at: created_at - project_id: project_id - size: snapshot_size - share_protocol: snapshot_share_protocol - name: name - share_group_snapshot_id: group_snapshot_id - share_id: snapshot_share_id Response example ---------------- .. literalinclude:: samples/share-group-snapshots-list-members-response.json :language: javascript Show share group snapshot details ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v2/share-group-snapshots/{group_snapshot_id} .. versionadded:: 2.31 Shows details for a share group snapshot. Response codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 404 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - group_snapshot_id: group_snapshot_id_path Response parameters ------------------- .. rest_parameters:: parameters.yaml - id: group_snapshot_id - project_id: project_id - status: group_snapshot_status_required - share_group_id: share_group_id - name: name - description: description - created_at: created_at - members: group_snapshot_members - links: group_snapshot_links Response example ---------------- .. literalinclude:: samples/share-group-snapshot-show-response.json :language: javascript Create share group snapshot ~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v2/share-group-snapshots .. versionadded:: 2.31 Creates a snapshot from a share. Response codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 - 422 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - name: name_request - description: description_request - share_group_id: share_group_id Request example --------------- .. literalinclude:: samples/share-group-snapshot-create-request.json :language: javascript Response parameters ------------------- .. rest_parameters:: parameters.yaml - id: group_snapshot_id - project_id: project_id - status: group_snapshot_status_required - share_group_id: share_group_id - name: name - description: description - created_at: created_at - members: group_snapshot_members - links: group_snapshot_links Response example ---------------- .. literalinclude:: samples/share-group-snapshot-create-response.json :language: javascript Reset share group snapshot state ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v2/share-group-snapshots/{group_snapshot_id}/action .. versionadded:: 2.31 Administrator only. Explicitly updates the state of a share group snapshot. Use the ``policy.yaml`` file to grant permissions for this action to other roles. Response codes -------------- .. rest_status_code:: success status.yaml - 202 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - group_snapshot_id: group_snapshot_id_path - status: group_snapshot_status_required Request example --------------- .. literalinclude:: samples/snapshot-actions-reset-state-request.json :language: javascript Update share group snapshot ~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: PUT /v2/share-group-snapshots/{group_snapshot_id} .. versionadded:: 2.31 Updates a share group snapshot. Response codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 - 422 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - group_snapshot_id: group_snapshot_id_path - name: name_request - description: description_request Request example --------------- .. literalinclude:: samples/snapshot-update-request.json :language: javascript Response parameters ------------------- .. rest_parameters:: parameters.yaml - id: group_snapshot_id - project_id: project_id - status: group_snapshot_status_required - share_group_id: share_group_id - name: name - description: description - created_at: created_at - members: group_snapshot_members - links: group_snapshot_links Response example ---------------- .. literalinclude:: samples/share-group-snapshot-update-response.json :language: javascript Delete share group snapshot ~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: DELETE /v2/share-group-snapshots/{group_snapshot_id} .. versionadded:: 2.31 Deletes a share group snapshot. Response codes -------------- .. rest_status_code:: success status.yaml - 202 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - group_snapshot_id: group_snapshot_id_path ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/share-group-types.inc0000664000175000017500000002246000000000000021776 0ustar00zuulzuul00000000000000.. -*- rst -*- Share group types (since API v2.31) =================================== A share group type enables you to filter or choose back ends before you create a share group. You can set share group types as either public or private. By default a share group type is created as publicly accessible. Set ``share_group_type_access:is_public`` to ``False`` to make a share group type private. You can manage access to the private share group types for different projects. You can add access, remove access, and get information about access for a private share group type. Administrators can specify which `share type(s) <#experimental-apis>`_ a given group type may contain. If Administrators do not explicitly associate share types with a given share group type, the service will associate the share type configured as the ``default_share_type`` with the share group type. When creating a share group, the scheduler picks one of the back ends that match a combination of the extra specs in the specified share type(s) and share group type. Administrators can also set additional group extra specifications for a share group type for the following purposes: - Filter back ends by group scheduler. Specify these group extras specifications in this format: ``group_specs=value``. For example, ``consistent_snapshot_support=true``. .. note:: Share Group Type APIs are no longer considered experimental since API version 2.55. List share group types ~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v2/share-group-types .. versionadded:: 2.31 Lists all share group types. Response codes -------------- .. rest_status_code:: success status.yaml - 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - is_public: is_public_query - group_specs: group_specs_query Response parameters ------------------- .. rest_parameters:: parameters.yaml - id: share_group_type_id_required - is_public: share_group_type_is_public - share_types: share_types_1 - name: share_group_type_name - group_specs: group_specs_required - is_default: is_group_type_default Response example ---------------- .. literalinclude:: samples/share-group-types-list-response.json :language: javascript List default share group types ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v2/share-group-types/default .. versionadded:: 2.31 Lists default share group types. Response codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path Response parameters ------------------- .. rest_parameters:: parameters.yaml - id: share_group_type_id_required - is_public: share_group_type_is_public - share_types: share_types_1 - name: share_group_type_name - group_specs: group_specs_required - is_default: is_group_type_default Response example ---------------- .. literalinclude:: samples/share-group-types-default-list-response.json :language: javascript List share group types extra specs ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v2/share-group-types/{share_group_type_id}/group-specs .. versionadded:: 2.31 Lists the extra specifications for a share group type. Response codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - share_group_type_id: share_group_type_id_required Response parameters ------------------- .. rest_parameters:: parameters.yaml - group_specs: group_specs_required Response example ---------------- .. literalinclude:: samples/share-group-types-group-specs-list-response.json :language: javascript Create share group type ~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v2/share-group-types .. versionadded:: 2.31 Creates a share group type. Response codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 - 409 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - share_types: share_types_1 - name: share_group_type_name_request - group_specs: group_specs - is_public: share_group_type_is_public_request Request example --------------- .. literalinclude:: samples/share-group-type-create-request.json :language: javascript Response parameters ------------------- .. rest_parameters:: parameters.yaml - id: share_group_type_id_required - group_specs: group_specs_required - name: share_group_type_name - share_types: share_types_1 - is_public: share_group_type_is_public - is_default: is_group_type_default Response example ---------------- .. literalinclude:: samples/share-group-type-create-response.json :language: javascript Show share group type access details ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v2/share-group-types/{share_group_type_id}/access .. versionadded:: 2.31 Shows access details for a share group type. You can view access details for private share group types only. Response codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - share_group_type_id: share_group_type_id_required Response parameters ------------------- .. rest_parameters:: parameters.yaml - share_group_type_id: share_group_type_id_required - project_id: project_id_type_access Response example ---------------- .. literalinclude:: samples/share-group-types-list-access-response.json :language: javascript Set extra spec for share group type ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v2/share-group-types/{share_group_type_id}/group-specs .. versionadded:: 2.31 Sets an extra specification for the share group type. Response codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 - 409 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - share_group_type_id: share_group_type_id_required - group_specs: group_specs_required Request example --------------- .. literalinclude:: samples/share-group-type-set-request.json :language: javascript Response parameters ------------------- .. rest_parameters:: parameters.yaml - group_specs: group_specs_required Response example ---------------- .. literalinclude:: samples/share-group-type-set-response.json :language: javascript Unset an group spec ~~~~~~~~~~~~~~~~~~~ .. rest_method:: DELETE /v2/share-group-types/{share_group_type_id}/group-specs/{group_spec_key} .. versionadded:: 2.31 Unsets an extra specification for the share type. Response codes -------------- .. rest_status_code:: success status.yaml - 202 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - share_group_type_id: share_group_type_id_required - group_spec_key: group_spec_key Add share group type access ~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v2/share-group-types/{share_group_type_id}/action .. versionadded:: 2.31 Adds share group type access for a project. You can add access to private share group types only. Response codes -------------- .. rest_status_code:: success status.yaml - 202 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 - 409 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - share_group_type_id: share_group_type_id_path - addProjectAccess: add_project_access - project: project_id_type_access_grant_request Request example --------------- .. literalinclude:: samples/share-group-type-grant-access-request.json :language: javascript Remove share group type access ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v2/share-group-types/{share_group_type_id}/action .. versionadded:: 2.31 Removes share group type access from a project. You can remove access from private share group types only. Response codes -------------- .. rest_status_code:: success status.yaml - 202 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - share_group_type_id: share_group_type_id_path - removeProjectAccess: remove_project_access - project: project_id_type_access_revoke_request Request example --------------- .. literalinclude:: samples/share-group-type-revoke-access-request.json :language: javascript Delete share group type ~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: DELETE /v2/share-group-types/{share_group_type_id} .. versionadded:: 2.31 Deletes a share group type. Response codes -------------- .. rest_status_code:: success status.yaml - 202 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - share_group_type_id: share_group_type_id_path ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/share-groups.inc0000664000175000017500000002221300000000000021013 0ustar00zuulzuul00000000000000.. -*- rst -*- Share groups (since API v2.31) ============================== The share groups enable you to create a group of volumes and manage them together. A project can put shares be used in the same application together in a share group, such as consistency group snapshot, clone, backup, migrate, replicate, retype, etc. Shares should be able to become a part of a share group only on share creation step. If share was created without provided ``share_group_id`` then this share won't be able to become a part of any share group. You can create a share group and associate it with multiple shares, list share groups, and show information for delete a share group. .. note:: Share Group APIs are no longer considered experimental since API version 2.55. The ``availability_zone_id`` and ``consistent_snapshot_support`` fields were added to ``share_group`` object since version 2.34. List share groups ~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v2/share-groups .. versionadded:: 2.31 Lists all share groups. Response codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - all_tenants: all_tenants_query - name: name_query - description: description_query - status: share_group_status_query - share_server_id: share_server_id_query - snapshot_id: snapshot_id_query - host: backend_host_query - share_network_id: share_network_id_query - share_group_type_id: share_group_type_id_query - share_group_snapshot_id: source_share_group_snapshot_id_query - share_types: share_types_query - limit: limit_query - offset: offset - sort_key: sort_key - sort_dir: sort_dir - name~: name_inexact_query - description~: description_inexact_query Response parameters ------------------- .. rest_parameters:: parameters.yaml - id: share_group_id - links: share_group_links - name: name - status: share_group_status - description: description Response example ---------------- .. literalinclude:: samples/share-groups-list-response.json :language: javascript List share groups with details ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v2/share-groups/detail .. versionadded:: 2.31 Lists all share groups with details. Response codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - all_tenants: all_tenants_query - name: name_query - description: description_query - status: share_group_status_query - share_server_id: share_server_id_query - snapshot_id: snapshot_id_query - host: backend_host_query - share_network_id: share_network_id_query - share_group_type_id: share_group_type_id_query - share_group_snapshot_id: source_share_group_snapshot_id_query - share_types: share_types_query - limit: limit_query - offset: offset - sort_key: sort_key - sort_dir: sort_dir - name~: name_inexact_query - description~: description_inexact_query Response parameters ------------------- .. rest_parameters:: parameters.yaml - id: share_group_id - name: name - created_at: created_at - status: share_group_status - description: description - project_id: project_id - host: backend_host - share_group_type_id: share_group_type_id_required - source_share_group_snapshot_id: source_share_group_snapshot_id_response - share_network_id: share_network_id - share_types: share_types_1 - links: share_group_links - availability_zone: availability_zone_id_share_group - consistent_snapshot_support: consistent_snapshot_support Response example ---------------- .. literalinclude:: samples/share-groups-list-detailed-response.json :language: javascript Show share group details ~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v2/share-groups/{share_group_id} .. versionadded:: 2.31 Shows details for a share group. Response codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - share_group_id: share_group_id_path Response parameters ------------------- .. rest_parameters:: parameters.yaml - id: share_group_id - name: name - created_at: created_at - status: share_group_status - description: description - project_id: project_id - host: backend_host - share_group_type_id: share_group_type_id_required - source_share_group_snapshot_id: source_share_group_snapshot_id_response - share_network_id: share_network_id - share_types: share_types_1 - links: share_group_links - availability_zone: availability_zone_id_share_group - consistent_snapshot_support: consistent_snapshot_support Response example ---------------- .. literalinclude:: samples/share-group-show-response.json :language: javascript Create share group ~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v2/share-groups .. versionadded:: 2.31 Creates a share group. Response codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 - 409 - 422 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - name: name_request - description: description_request - share_types: share_types - share_group_type: share_group_type_id - share_network: share_network_id_request - source_share_group_snapshot: source_share_group_snapshot_id - availability_zone: availability_zone_id_share_group Request example --------------- .. literalinclude:: samples/share-group-create-request.json :language: javascript Response parameters ------------------- .. rest_parameters:: parameters.yaml - id: share_group_id - name: name - created_at: created_at - status: share_group_status - description: description - project_id: project_id - host: share_group_host - share_group_type_id: share_group_type_id_required - source_share_group_snapshot_id: source_share_group_snapshot_id_response - share_network_id: share_network_id - share_types: share_types_1 - links: share_group_links - availability_zone: availability_zone_id_share_group - consistent_snapshot_support: consistent_snapshot_support Response example ---------------- .. literalinclude:: samples/share-group-create-response.json :language: javascript Reset share group state ~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v2/share-groups/{share_group_id}/action .. versionadded:: 2.31 Administrator only. Explicitly updates the state of a share group. Use the ``policy.yaml`` file to grant permissions for this action to other roles. Response codes -------------- .. rest_status_code:: success status.yaml - 202 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - share_group_id: share_group_id_path - reset_status: reset_status - status: share_group_status Request example --------------- .. literalinclude:: samples/share-group-reset-state-request.json :language: javascript Update share group ~~~~~~~~~~~~~~~~~~ .. rest_method:: PUT /v2/share-groups/{share_group_id} .. versionadded:: 2.31 Updates a share group. Response codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 - 422 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - share_group_id: share_group_id_path - name: name_request - description: description_request Request example --------------- .. literalinclude:: samples/share-group-update-request.json :language: javascript Response parameters ------------------- .. rest_parameters:: parameters.yaml - id: share_group_id - name: name - created_at: created_at - status: share_group_status - description: description - project_id: project_id - host: share_group_host - share_group_type_id: share_group_type_id_required - source_share_group_snapshot_id: source_share_group_snapshot_id - share_network_id: share_network_id - share_types: share_types_1 - links: share_group_links - availability_zone: availability_zone_id_share_group - consistent_snapshot_support: consistent_snapshot_support Response example ---------------- .. literalinclude:: samples/share-group-update-response.json :language: javascript Delete share group ~~~~~~~~~~~~~~~~~~ .. rest_method:: DELETE /v2/share-groups/{share_group_id} .. versionadded:: 2.31 Deletes a share group. Response codes -------------- .. rest_status_code:: success status.yaml - 202 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 - 409 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - share_group_id: share_group_id_path - force: share_force_delete ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/share-instance-export-locations.inc0000664000175000017500000000433400000000000024614 0ustar00zuulzuul00000000000000.. -*- rst -*- Share instance export locations (since API v2.9) ================================================ Set of APIs used to view export locations of share instances. By default, these APIs are admin-only. Use the ``policy.yaml`` file to grant permissions for these actions to other roles. Lists all export locations for a share instance. Show details of an export location belonging to a share instance. List export locations ~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v2/share_instances/{share_instance_id}/export_locations .. versionadded:: 2.9 Response codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - share_instance_id: share_instance_id Response parameters ------------------- .. rest_parameters:: parameters.yaml - id: export_location_id - share_instance_id: export_location_share_instance_id - path: export_location_path - is_admin_only: export_location_is_admin_only - preferred: export_location_preferred Response example ---------------- .. literalinclude:: samples/export-location-list-response.json :language: javascript Show single export location ~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v2/share_instances/{share_instance_id}/export_locations/{export_location_id} .. versionadded:: 2.9 Response codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - share_instance_id: share_instance_id - export_location_id: export_location_id_path Response parameters ------------------- .. rest_parameters:: parameters.yaml - id: export_location_id - share_instance_id: export_location_share_instance_id - path: export_location_path - is_admin_only: export_location_is_admin_only - preferred: export_location_preferred - created_at: created_at - updated_at: updated_at Response example ---------------- .. literalinclude:: samples/export-location-show-response.json :language: javascript ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/share-instances.inc0000664000175000017500000001117300000000000021466 0ustar00zuulzuul00000000000000.. -*- rst -*- Share instances (since API v2.3) ================================ A share instance is an internal representation for a share. A share that is replicated or is in the process of being migrated is physically stored in multiple locations. Each of these individual locations is called an "instance" within the Shared File Systems service. End users need not be concerned with this internal representation. As an administrator, you can list, show information for, explicitly set the state of, and force-delete share instances. Use the ``policy.yaml`` file to grant permissions for these actions to other roles. List share instances ~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v2/share_instances .. versionadded:: 2.3 Lists all share instances. Response codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - export_location_id: export_location_id_query - export_location_path: export_location_path_query Response parameters ------------------- .. rest_parameters:: parameters.yaml - status: share_status_response - access_rules_status: access_rules_status - share_id: share_id_share_instances_response - progress: progress_share_instance - availability_zone: availability_zone - created_at: created_at - updated_at: updated_at - replica_state: replica_state - export_location: export_location - export_locations: export_locations - cast_rules_to_readonly: share_instance_cast_rules_to_readonly - share_network_id: share_network_id - share_server_id: share_server_id - host: host_resource_response - access_rules_status: access_rules_status - share_type_id: share_type_id - id: id_13 Response example ---------------- .. literalinclude:: samples/share-instances-list-response.json :language: javascript Show share instance details ~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v2/share_instances/{share_instance_id} .. versionadded:: 2.3 Shows details for a share instance. Response codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - share_instance_id: share_instance_id Response parameters ------------------- .. rest_parameters:: parameters.yaml - status: share_status_response - access_rules_status: access_rules_status - share_id: share_id_share_instances_response - progress: progress_share_instance - availability_zone: availability_zone - created_at: created_at - updated_at: updated_at - replica_state: replica_state - export_location: export_location - export_locations: export_locations - cast_rules_to_readonly: share_instance_cast_rules_to_readonly - share_network_id: share_network_id - share_server_id: share_server_id - host: host_resource_response - id: id_13 Response example ---------------- .. literalinclude:: samples/share-show-instance-response.json :language: javascript Reset share instance state ~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v2/share_instances/{share_instance_id}/action .. versionadded:: 2.3 Administrator only. Explicitly updates the state of a share instance. Use the ``policy.yaml`` file to grant permissions for this action to other roles. Response codes -------------- .. rest_status_code:: success status.yaml - 202 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - share_instance_id: share_instance_id - status: share_status_request Request example --------------- .. literalinclude:: samples/share-instance-actions-reset-state-request.json :language: javascript Force-delete share instance ~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v2/share_instances/{share_instance_id}/action .. versionadded:: 2.3 Administrator only. Force-deletes a share instance. Use the ``policy.yaml`` file to grant permissions for this action to other roles. Response codes -------------- .. rest_status_code:: success status.yaml - 202 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - share_instance_id: share_instance_id - force_delete: force_delete_2 Request example --------------- .. literalinclude:: samples/share-instance-actions-force-delete-request.json :language: javascript ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/share-metadata.inc0000664000175000017500000001066500000000000021264 0ustar00zuulzuul00000000000000.. -*- rst -*- Share metadata ============== Shows, sets, updates, and unsets share metadata. Show all share metadata ~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v2/shares/{share_id}/metadata Shows all the metadata for a share, as key and value pairs. Response codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - share_id: share_id Response parameters ------------------- .. rest_parameters:: parameters.yaml - metadata: metadata Response example ---------------- .. literalinclude:: samples/share-show-metadata-response.json :language: javascript Show share metadata item ~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v2/shares/{share_id}/metadata/{key} Retrieves a specific metadata item from a share's metadata by its key. If the specified key does not represent a valid metadata item, the API will respond with HTTP 404. Response codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - share_id: share_id - key: metadata_key_request Response parameters ------------------- .. rest_parameters:: parameters.yaml - metadata: metadata_item Response example ---------------- .. literalinclude:: samples/share-show-metadata-item-response.json :language: javascript Set share metadata ~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v2/shares/{share_id}/metadata Allows adding new metadata items as key-value pairs. This API will not delete pre-existing metadata items. If the request object contains metadata items that already exist, they will be updated with new values as specified in the request object. Response codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 - 409 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - share_id: share_id - metadata: metadata_request Request example --------------- .. literalinclude:: samples/share-set-metadata-request.json :language: javascript Response parameters ------------------- .. rest_parameters:: parameters.yaml - metadata: metadata Response example ---------------- .. literalinclude:: samples/share-set-metadata-response.json :language: javascript Update share metadata ~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: PUT /v2/shares/{share_id}/metadata Replaces the metadata for a given share with the metadata (specified as key-value pairs) in the request object. All pre-existing metadata of the share will be deleted and replaced with the new metadata supplied. Response codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - share_id: share_id - metadata: metadata_request Request example --------------- .. literalinclude:: samples/share-update-metadata-request.json :language: javascript Response parameters ------------------- .. rest_parameters:: parameters.yaml - metadata: metadata Response example ---------------- .. literalinclude:: samples/share-update-metadata-response.json :language: javascript To delete all existing metadata items on a given share, the request object needs to specify an empty metadata object: Request example --------------- .. literalinclude:: samples/share-update-null-metadata-request.json :language: javascript Response example ---------------- .. literalinclude:: samples/share-update-null-metadata-response.json :language: javascript Delete share metadata item ~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: DELETE /v2/shares/{share_id}/metadata/{key} Deletes a single metadata item on a share, idetified by its key. If the specified key does not represent a valid metadata item, the API will respond with HTTP 404. Response codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - share_id: share_id - key: metadata_key_request ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/share-migration.inc0000664000175000017500000001355400000000000021475 0ustar00zuulzuul00000000000000.. -*- rst -*- Share Migration (since API v2.22) ================================= The Share Migration API is an administrator-only experimental API that allows the invoker to select a destination pool to migrate a share to, while still allowing clients to access the source "share instance" during migration. Share migration is implemented in a 2-phase approach. The first phase of migration is when operations that take the longest are performed, such as data copying or replication. After first phase of data copying is complete, it is up to administrator to trigger the second phase, often referred to as switchover phase, which may perform operations such as final sync and deleting the source share instance. During the data copy phase, user can remain connected to the source, and may have to reconnect after the switchover phase. In order to migrate a share, manila may employ one of two mechanisms which are driver-assisted migration and host-assisted migration. - ``Driver-assisted migration``: This mechanism is intended to make use of driver optimizations to migrate shares between pools of the same storage vendor. This mechanism allows migrating shares nondisruptively while the source remains writable, preserving all filesystem metadata and snapshots. The migration workload is performed in the storage back end. - ``Host-assisted migration``: This mechanism is intended to migrate shares in an agnostic manner between two different pools, regardless of storage vendor. The implementation for this mechanism does not offer the same properties found in driver-assisted migration. In host-assisted migration, the source remains readable, snapshots must be deleted prior to starting the migration, filesystem metadata may be lost, and the clients will get disconnected by the end of migration. The migration workload is performed by the Data Service, which is a dedicated manila service for intensive data operations. These methods provide different capabilities and affect how efficiently the data copy and switchover are achieved. Generally speaking, driver-assisted migration is limited to homogenous storage backends and when available, is expected to be faster and more efficient than host-assisted migration. Driver-assisted migration occurs on the storage backend, while host-assisted migration occurs on the OpenStack nodes running the manila data service. When starting a migration, ``driver-assisted migration`` is attempted first. If the shared file system service detects it is not possible to perform the ``driver-assisted migration``, it proceeds to attempt ``host-assisted migration``. Possible use cases for data migration include: - Migrating shares along with snapshots. - Bring down a physical storage device for maintenance - Free up space in a thinly-provisioned back end. - Load balancing among share servers. - Retyping a share .. note:: Share Migration APIs are `experimental APIs <#experimental-apis>`_ . Start Migration ~~~~~~~~~~~~~~~ .. rest_method:: POST /v2/shares/{share_id}/action .. versionadded:: 2.22 Initiates share migration. This API will initiate the share data copy to the new host. The copy operation is non-disruptive. Response codes -------------- .. rest_status_code:: success status.yaml - 202 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 - 409 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id - share_id: share_id - force_host_assisted_migration: force_host_assisted_migration - preserve_snapshots: preserve_snapshots - preserve_metadata: preserve_metadata - nondisruptive: nondisruptive - writable: writable - new_share_type_id: new_share_type_id - new_share_network_id: new_share_network_id - host: host Request example --------------- .. literalinclude:: samples/share-migration-start-request.json :language: javascript Complete Migration ~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v2/shares/{share_id}/action .. versionadded:: 2.22 Completes share migration. This API will initiate the switch-over from the source to destination share. This operation can be disruptive. Response codes -------------- .. rest_status_code:: success status.yaml - 202 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 - 409 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id - share_id: share_id Request example --------------- .. literalinclude:: samples/share-migration-complete-request.json :language: javascript Migration Get Process ~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v2/shares/{share_id}/action .. versionadded:: 2.22 Response codes -------------- .. rest_status_code:: success status.yaml - 202 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 - 409 Request ------- .. rest_parameters:: parameters.yaml - share_id: share_id - project_id: project_id Response parameters ------------------- .. rest_parameters:: parameters.yaml - details: migration_progress_details - total_progress: total_progress - task_state: task_state Request example --------------- .. literalinclude:: samples/share-migration-get-process-request.json :language: javascript Response_parameters ------------------- .. literalinclude:: samples/share-migration-get-process-response.json :language: javascript Cancel Migration ~~~~~~~~~~~~~~~~ .. rest_method:: POST /v2/shares/{share_id}/action .. versionadded:: 2.22 Response codes -------------- .. rest_status_code:: success status.yaml - 202 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 - 409 Request ------- .. rest_parameters:: parameters.yaml - share_id: share_id - project_id: project_id Request example --------------- .. literalinclude:: samples/share-migration-cancel-request.json :language: javascript ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/share-network-subnets-metadata.inc0000664000175000017500000001276100000000000024433 0ustar00zuulzuul00000000000000.. -*- rst -*- Share network subnets metadata (since API v2.78) ================================================ Shows, sets, updates, and unsets share network subnets metadata. Show all share network subnets metadata ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v2/share-networks/{share_network_id}/subnets/{share_network_subnet_id}/metadata Shows all share network subnet metadata in the given share network subnet. Response codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - share_network_id: share_network_id_path - share_network_subnet_id: share_network_subnet_id_path Response parameters ------------------- .. rest_parameters:: parameters.yaml - metadata: metadata Response example ---------------- .. literalinclude:: samples/share-network-subnet-show-metadata-response.json :language: javascript Show share network subnet metadata item ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v2/share-networks/{share_network_id}/subnets/{share_network_subnet_id}/metadata/{key} Retrieves a specific metadata item from a share network subnet's metadata by its key. If the specified key does not represent a valid metadata item, the API will respond with HTTP 404. Response codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - share_network_id: share_network_id_path - share_network_subnet_id: share_network_subnet_id_path - key: metadata_key_request Response parameters ------------------- .. rest_parameters:: parameters.yaml - meta: metadata_item Response example ---------------- .. literalinclude:: samples/share-show-network-subnet-metadata-item-response.json :language: javascript Set share network subnet metadata ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v2/share-networks/{share_network_id}/subnets/{share_network_subnet_id}/metadata Allows adding new metadata items as key-value pairs. This API will not delete pre-existing metadata items. If the request object contains metadata items that already exist, they will be updated with new values as specified in the request object. Response codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - share_network_id: share_network_id_path - share_network_subnet_id: share_network_subnet_id_path - metadata: metadata_request Request example --------------- .. literalinclude:: samples/share-network-subnet-set-metadata-request.json :language: javascript Response parameters ------------------- .. rest_parameters:: parameters.yaml - metadata: metadata Response example ---------------- .. literalinclude:: samples/share-network-subnet-set-metadata-response.json :language: javascript Update share network subnet metadata ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: PUT /v2/share-networks/{share_network_id}/subnets/{share_network_subnet_id}/metadata Replaces the metadata for a given share network subnet with the metadata (specified as key-value pairs) in the request object. All pre-existing metadata of the share network subnet will be deleted and replaced with the new metadata supplied. Response codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - share_network_id: share_network_id_path - share_network_subnet_id: share_network_subnet_id_path - metadata: metadata_request Request example --------------- .. literalinclude:: samples/share-network-subnet-update-metadata-request.json :language: javascript Response parameters ------------------- .. rest_parameters:: parameters.yaml - metadata: metadata Response example ---------------- .. literalinclude:: samples/share-network-subnet-update-metadata-response.json :language: javascript To delete all existing metadata items on a given share network subnet, the request object needs to specify an empty metadata object: Request example --------------- .. literalinclude:: samples/share-network-subnet-update-null-metadata-request.json :language: javascript Response example ---------------- .. literalinclude:: samples/share-network-subnet-update-null-metadata-response.json :language: javascript Delete share network subnet metadata item ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: DELETE /v2/share-networks/{share_network_id}/subnets/{share_network_subnet_id}/metadata/{key} Deletes a single metadata item on a share network subnet, identified by its key. If the specified key does not represent a valid metadata item, the API will respond with HTTP 404. Response codes -------------- .. rest_status_code:: success status.yaml - 202 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - share_network_id: share_network_id_path - share_network_subnet_id: share_network_subnet_id_path - key: metadata_key_request ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/share-network-subnets.inc0000664000175000017500000001535200000000000022654 0ustar00zuulzuul00000000000000.. -*- rst -*- Share network subnets (since API v2.51) ======================================= A share network subnet stores network information to create and manage share servers. You can list all subnets that pertain to a share network, also create, delete and view a share network subnet. When you create a share network, you may optionally specify an associated neutron network, subnetwork and an availability zone. If you do not specify an availability zone, the subnet you are creating will have the availability zone field empty and this subnet will be considered default. The default subnet is recognized by the Shared File Systems service as a subnet that spans all the available storage availability zones. .. note:: A share network can have only one default subnet. Likewise, a share network cannot have more than one subnet in a given availability zone. For more information about supported plug-ins for share network subnets, see `Manila Network Plugins `_. A share network subnet resource has these attributes: - The IP block in Classless Inter-Domain Routing (CIDR) notation from which to allocate the network. - The IP version of the network. - The network type, which is ``vlan``, ``vxlan``, ``gre``, or ``flat``. - If the network uses segmentation, a segmentation identifier. For example, VLAN, VXLAN, and GRE networks use segmentation. - The availability zone, when ``null`` signifies that the share network subnet is available across all storage availability zones known to the Shared File Systems Service. List share network subnets ~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v2/share-networks/{share_network_id}/subnets .. versionadded:: 2.51 Lists all share network subnets in the given share network. Response codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 404 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - share_network_id: share_network_id_path Response parameters ------------------- .. rest_parameters:: parameters.yaml - id: share_network_subnet_id - availability_zone: share_network_subnet_availability_zone - share_network_id: share_network_subnet_share_network_id - share_network_name: share_network_subnet_share_network_name - created_at: created_at - updated_at: updated_at - segmentation_id: share_network_subnet_segmentation_id - neutron_net_id: share_network_subnet_neutron_net_id - neutron_subnet_id: share_network_subnet_neutron_subnet_id - ip_version: share_network_subnet_ip_version - cidr: share_network_subnet_cidr - network_type: share_network_subnet_network_type - gateway: share_network_subnet_gateway - mtu: share_network_subnet_mtu Response example ---------------- .. literalinclude:: samples/share-network-subnet-list-response.json :language: javascript Show share network subnet details ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v2/share-networks/{share_network_id}/subnets/{share_network_subnet_id} .. versionadded:: 2.51 Shows details of a share network subnet. Response codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 404 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - share_network_id: share_network_id_path - share_network_subnet_id: share_network_subnet_id_path Response parameters ------------------- .. rest_parameters:: parameters.yaml - id: share_network_subnet_id - neutron_net_id: share_network_subnet_neutron_net_id - neutron_subnet_id: share_network_subnet_neutron_subnet_id - network_type: share_network_subnet_network_type - segmentation_id: share_network_subnet_segmentation_id - cidr: share_network_subnet_cidr - ip_version: share_network_subnet_ip_version - created_at: created_at - updated_at: updated_at - gateway: share_network_subnet_gateway - mtu: share_network_subnet_mtu - availability_zone: share_network_subnet_availability_zone - share_network_id: share_network_subnet_share_network_id - share_network_name: share_network_subnet_share_network_name Response example ---------------- .. literalinclude:: samples/share-network-subnet-show-response.json :language: javascript Create share network subnet ~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v2/share-networks/{share_network_id}/subnets .. versionadded:: 2.51 Creates a share network subnet in the given share network. Response codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 404 - 409 - 500 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - share_network_id: share_network_id_path - neutron_net_id: neutron_net_id_request - neutron_subnet_id: neutron_subnet_id_request - availability_zone: availability_zone_request Request example --------------- .. literalinclude:: samples/share-network-subnet-create-request.json :language: javascript Response parameters ------------------- .. rest_parameters:: parameters.yaml - id: share_network_subnet_id - neutron_net_id: share_network_subnet_neutron_net_id - neutron_subnet_id: share_network_subnet_neutron_subnet_id - network_type: share_network_subnet_network_type - segmentation_id: share_network_subnet_segmentation_id - cidr: share_network_subnet_cidr - ip_version: share_network_subnet_ip_version - created_at: created_at - updated_at: updated_at - gateway: share_network_subnet_gateway - mtu: share_network_subnet_mtu - availability_zone: share_network_subnet_availability_zone - share_network_id: share_network_subnet_share_network_id - share_network_name: share_network_subnet_share_network_name Response example ---------------- .. literalinclude:: samples/share-network-subnet-create-response.json :language: javascript Delete share network subnet ~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: DELETE /v2/share-networks/{share_network_id}/subnets/{share_network_subnet_id} .. versionadded:: 2.51 Deletes a share network subnet. Preconditions - You cannot delete a share network subnet if it has shares created/exported on it. - You cannot delete a share network subnet if it has share servers with the flag ``is_auto_deletable`` set to False. Response codes -------------- .. rest_status_code:: success status.yaml - 202 .. rest_status_code:: error status.yaml - 404 - 409 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - share_network_id: share_network_id_path - share_network_subnet_id: share_network_subnet_id_path ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/share-networks.inc0000664000175000017500000005067100000000000021361 0ustar00zuulzuul00000000000000.. -*- rst -*- Share networks ============== A share network resource stores network information to create and manage share servers. Shares created with share networks are exported on these networks with the help of share servers. You can create, update, view, and delete a share network. When you create a share network, you may optionally specify an associated neutron network and subnetwork. For more information about supported plug-ins for share networks, see `Manila Network Plugins `_. A share network resource has these attributes: - The IP block in Classless Inter-Domain Routing (CIDR) notation from which to allocate the network. - The IP version of the network. - The network type, which is ``vlan``, ``vxlan``, ``gre``, or ``flat``. - If the network uses segmentation, a segmentation identifier. For example, VLAN, VXLAN, and GRE networks use segmentation. A share network resource can also have a user defined name and description. .. note:: Since API version 2.51, a share network is allowed to span multiple subnets and the fields ``neutron_net_id``, ``neutron_subnet_id``, ``network_type``, ``cidr``, ``ip_version``, ``gateway``, ``segmentation_id`` and ``mtu`` were moved from the share network to the subnet. The share network subnet also contains a an attribute called ``availability_zone``. List share networks ~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v2/share-networks Lists all share networks. Response codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - all_tenants: all_tenants_query - name~: name_inexact_query - description~: description_inexact_query - name: name_query - description: description_query - created_since: created_since_query - created_before: created_before_query - security_service_id: security_service_query - nova_net_id: nova_net_id_query - neutron_net_id: neutron_net_id_query - neutron_subnet_id: neutron_subnet_id_query - network_type: network_type_query - segmentation_id: segmentation_id_query - cidr: cidr_query - ip_version: ip_version_query - offset: offset - limit: limit Response parameters ------------------- .. rest_parameters:: parameters.yaml - id: share_network_id_share_networks_response - name: name Response example ---------------- .. literalinclude:: samples/share-networks-list-response.json :language: javascript List share networks with details ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v2/share-networks/detail Lists all share networks with details. Response codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - all_tenants: all_tenants_query - name~: name_inexact_query - description~: description_inexact_query - created_since: created_since_query - created_before: created_before_query - nova_net_id: nova_net_id_query - neutron_net_id: neutron_net_id_query - neutron_subnet_id: neutron_subnet_id_query - network_type: network_type_query - segmentation_id: segmentation_id_query - cidr: cidr_query - ip_version: ip_version_query - offset: offset - limit: limit - security_service_id: security_service_query Response parameters ------------------- .. rest_parameters:: parameters.yaml - id: share_network_id_share_networks_response - project_id: project_id - neutron_net_id: neutron_net_id - neutron_subnet_id: neutron_subnet_id - network_type: network_type - segmentation_id: segmentation_id - cidr: cidr - ip_version: ip_version - name: name - description: description - created_at: created_at - updated_at: updated_at - gateway: share_network_gateway - mtu: share_network_mtu - share_network_subnets: share_network_share_network_subnets - security_service_update_support: share_network_security_service_update_support - status: share_network_status Response example ---------------- .. literalinclude:: samples/share-networks-list-detailed-response.json :language: javascript .. include:: common/share-network-span-multiple-subnets-note.rst .. literalinclude:: samples/share-networks-list-detailed-response-with-subnets.json :language: javascript Show share network details ~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v2/share-networks/{share_network_id} Shows details for a share network. Response codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - share_network_id: share_network_id_path Response parameters ------------------- .. rest_parameters:: parameters.yaml - id: share_network_id_share_networks_response - project_id: project_id - neutron_net_id: neutron_net_id - neutron_subnet_id: neutron_subnet_id - network_type: network_type - segmentation_id: segmentation_id - cidr: cidr - ip_version: ip_version - name: name - description: description - created_at: created_at - updated_at: updated_at - gateway: share_network_gateway - mtu: share_network_mtu - share_network_subnets: share_network_share_network_subnets - security_service_update_support: share_network_security_service_update_support - status: share_network_status Response example ---------------- .. literalinclude:: samples/share-network-show-response.json :language: javascript .. include:: common/share-network-span-multiple-subnets-note.rst .. literalinclude:: samples/share-network-show-response-with-subnets.json :language: javascript Create share network ~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v2/share-networks Creates a share network. Response codes -------------- .. rest_status_code:: success status.yaml - 202 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 413 - 422 - 500 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - neutron_net_id: neutron_net_id_request - neutron_subnet_id: neutron_subnet_id_request - name: name_request - description: description_request - availability_zone: share_network_availability_zone_request Request example --------------- .. literalinclude:: samples/share-network-create-request.json :language: javascript .. note:: Since API version 2.51, an ``availability_zone`` can be specified with the share network creation request. In case you do not specify an avaiability zone, this field will be set to null and the subnet created will be considered a ``default`` subnet by the Shared File Systems service. You can have only one default subnet per share network. If you attempt to create another default subnet in a share network that already has a default one, the Shared File Systems Service will deny the operation. Response parameters ------------------- .. rest_parameters:: parameters.yaml - id: share_network_id_share_networks_response - project_id: project_id - neutron_net_id: neutron_net_id - neutron_subnet_id: neutron_subnet_id - network_type: network_type - segmentation_id: segmentation_id - cidr: cidr - ip_version: ip_version - name: name - description: description - created_at: created_at - updated_at: updated_at - gateway: share_network_gateway - mtu: share_network_mtu - share_network_subnets: share_network_share_network_subnets - security_service_update_support: share_network_security_service_update_support - status: share_network_status Response example ---------------- .. literalinclude:: samples/share-network-create-response.json :language: javascript .. note:: Since API version 2.51, share networks are able to span multiple subnets, so when creating a share network, a new subnet will automatically be created by the Shared File Systems service and attached into the share network. .. literalinclude:: samples/share-network-create-response-with-subnets.json :language: javascript Add security service to share network ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v2/share-networks/{share_network_id}/action Adds a security service to a share network. .. note:: As of API version 2.63, manila allows adding security services to share networks that are being used if supported by the share backends. Before requesting to add a security service to a share network that is in use, make sure to use the `check add API `_. Response codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 - 409 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id - share_network_id: share_network_id_path - security_service_id: security_service_id Request example --------------- .. literalinclude:: samples/share-network-add-security-service-request.json :language: javascript Response parameters ------------------- .. rest_parameters:: parameters.yaml - id: share_network_id_share_networks_response - project_id: project_id - neutron_net_id: neutron_net_id - neutron_subnet_id: neutron_subnet_id - network_type: network_type - segmentation_id: segmentation_id - cidr: cidr - ip_version: ip_version - name: name - description: description - created_at: created_at - updated_at: updated_at - gateway: share_network_gateway - mtu: share_network_mtu - share_network_subnets: share_network_share_network_subnets - security_service_update_support: share_network_security_service_update_support - status: share_network_status Response example ---------------- .. literalinclude:: samples/share-network-add-security-service-response.json :language: javascript .. include:: common/share-network-span-multiple-subnets-note.rst .. literalinclude:: samples/share-network-add-security-service-response-with-subnets.json :language: javascript Remove security service from share network ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v2/share-networks/{share_network_id}/action Removes a security service from a share network. Response codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - share_network_id: share_network_id_path - security_service_id: share_network_security_service_id Request example --------------- .. literalinclude:: samples/share-network-remove-security-service-request.json :language: javascript Response parameters ------------------- .. rest_parameters:: parameters.yaml - id: share_network_id_share_networks_response - project_id: project_id - neutron_net_id: neutron_net_id - neutron_subnet_id: neutron_subnet_id - network_type: network_type - segmentation_id: segmentation_id - cidr: cidr - ip_version: ip_version - name: name - description: description - created_at: created_at - updated_at: updated_at - gateway: share_network_gateway - mtu: share_network_mtu - security_service_update_support: share_network_security_service_update_support - status: share_network_status Response example ---------------- .. literalinclude:: samples/share-network-remove-security-service-response.json :language: javascript .. include:: common/share-network-span-multiple-subnets-note.rst .. literalinclude:: samples/share-network-remove-security-service-response-with-subnets.json :language: javascript Update share network ~~~~~~~~~~~~~~~~~~~~ .. rest_method:: PUT /v2/share-networks/{share_network_id} Updates a share network. Note that if the share network is used by any share server, you can update only the ``name`` and ``description`` attributes. .. note:: Since API version 2.51, can only able to update the ``neutron_net_id`` and ``neutron_subnet_id`` of the default subnet. Response codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 - 422 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - share_network_id: share_network_id_path - name: name_request - description: description_request - neutron_net_id: neutron_net_id_request - neutron_subnet_id: neutron_subnet_id_request Request example --------------- .. literalinclude:: samples/share-network-update-request.json :language: javascript Response parameters ------------------- .. rest_parameters:: parameters.yaml - id: share_network_id_share_networks_response - project_id: project_id - neutron_net_id: neutron_net_id - neutron_subnet_id: neutron_subnet_id - network_type: network_type - segmentation_id: segmentation_id - cidr: cidr - ip_version: ip_version - name: name - description: description - created_at: created_at - updated_at: updated_at - gateway: share_network_gateway - mtu: share_network_mtu - security_service_update_support: share_network_security_service_update_support - status: share_network_status Response example ---------------- .. literalinclude:: samples/share-network-update-response.json :language: javascript .. include:: common/share-network-span-multiple-subnets-note.rst .. literalinclude:: samples/share-network-update-response-with-subnets.json :language: javascript Delete share network ~~~~~~~~~~~~~~~~~~~~ .. rest_method:: DELETE /v2/share-networks/{share_network_id} Deletes a share network. Preconditions - You cannot delete a share network if it has shares created/exported on it. - You cannot delete a share network if it has share groups created on it. - You cannot delete a share network if it has more than one share network subnet on it (since API version 2.51). Response codes -------------- .. rest_status_code:: success status.yaml - 202 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 - 409 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - share_network_id: share_network_id_path Update share network security service (since API v2.63) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v2/share-networks/{share_network_id}/action .. versionadded:: 2.63 Replaces a security service in a share network. Both current and the new security service must have the same type and must have the ``security_service_update_support`` capability set to ``True``. .. important:: Before calling the update share network security service API, make sure to check if the share backends within the share network can hold the operation, through the `check update API `_. Response codes -------------- .. rest_status_code:: success status.yaml - 202 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 - 409 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - share_network_id: share_network_id_path - current_security_service: current_share_network_security_service_id - new_security_service: new_share_network_security_service_id Request example --------------- .. literalinclude:: samples/share-network-update-security-service-request.json :language: javascript Response parameters ------------------- .. rest_parameters:: parameters.yaml - id: share_network_id - project_id: project_id - neutron_net_id: neutron_net_id - neutron_subnet_id: neutron_subnet_id - network_type: network_type - segmentation_id: segmentation_id - cidr: cidr - ip_version: ip_version - name: name - description: description - created_at: created_at - updated_at: updated_at - gateway: share_network_gateway - mtu: share_network_mtu - share_network_subnets: share_network_share_network_subnets - security_service_update_support: share_network_security_service_update_support - status: share_network_status Response example ---------------- .. literalinclude:: samples/share-network-update-security-service-response.json :language: javascript .. _share-net-sec-service-update-check: Check share network security service update (since API v2.63) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v2/share-networks/{share_network_id}/action .. versionadded:: 2.63 Checks if an existing security service can be replaced on a share network that is in use. This operation must be triggered before an actual request to update a share network security service is requested. Response codes -------------- .. rest_status_code:: success status.yaml - 202 .. rest_status_code:: error status.yaml - 400 - 401 - 404 - 409 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - share_network_id: share_network_id_path - current_service_id: current_share_network_security_service_id - new_service_id: new_share_network_security_service_id - reset_operation: reset_operation Request example --------------- .. literalinclude:: samples/share-network-update-security-service-check-request.json :language: javascript Response parameters ------------------- .. rest_parameters:: parameters.yaml - operation: requested_operation_name - current_security_service: current_share_network_security_service_id - new_security_service: new_share_network_security_service_id - compatible: operation_is_compatible - requested_operation: requested_check_operation - hosts_check_result: hosts_check_result Response example ---------------- .. literalinclude:: samples/share-network-update-security-service-check-response.json :language: javascript Check share network security service add (since API v2.63) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v2/share-networks/{share_network_id}/action .. versionadded:: 2.63 Checks if a new security service can be added to a share network that's already in use. This operation must be triggered before an actual request to add a security service to an in use share network. Response codes -------------- .. rest_status_code:: success status.yaml - 202 .. rest_status_code:: error status.yaml - 400 - 401 - 404 - 409 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - share_network_id: share_network_id_path - security_service_id: current_share_network_security_service_id - reset_operation: reset_operation Request example --------------- .. literalinclude:: samples/share-network-add-security-service-check-request.json :language: javascript Response parameters ------------------- .. rest_parameters:: parameters.yaml - operation: requested_operation_name - current_security_service: current_share_network_security_service_id - new_security_service: new_share_network_security_service_id - compatible: operation_is_compatible - requested_operation: requested_operation - hosts_check_result: hosts_check_result Response example ---------------- .. literalinclude:: samples/share-network-add-security-service-check-response.json :language: javascript Reset status (since API v2.63) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v2/share-networks/{share_network_id}/action .. versionadded:: 2.63 Resets a share network status. Administrator only. Explicitly updates the state of a share network. Response codes -------------- .. rest_status_code:: success status.yaml - 202 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 Request parameters ------------------ .. rest_parameters:: parameters.yaml - project_id: project_id_path - share_network_id: share_network_id_path - status: share_network_status Request example --------------- .. literalinclude:: samples/share-network-reset-state-request.json :language: javascript Response parameters ------------------- There is no body content for the response. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/share-replica-export-locations.inc0000664000175000017500000000442500000000000024430 0ustar00zuulzuul00000000000000.. -*- rst -*- .. _share_replica_export_locations: Share replica export locations (since API v2.47) ================================================ Set of APIs used to view export locations of share replicas. List export locations ~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v2/share-replicas/{share_replica_id}/export-locations .. versionadded:: 2.47 Response codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - share_replica_id: share_replica_id_path Response parameters ------------------- .. rest_parameters:: parameters.yaml - id: export_location_id - share_instance_id: export_location_share_instance_id - path: export_location_path - is_admin_only: export_location_is_admin_only - preferred: export_location_preferred_replicas - availability_zone: export_location_availability_zone - replica_state: share_replica_replica_state Response example ---------------- .. literalinclude:: samples/share-replica-export-location-list-response.json :language: javascript Show single export location ~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v2/share-replicas/{share_replica_id}/export-locations/{export-location-id} .. versionadded:: 2.47 Response codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - share_replica_id: share_replica_id_path - export_location_id: export_location_id_path Response parameters ------------------- .. rest_parameters:: parameters.yaml - id: export_location_id - share_instance_id: export_location_share_instance_id - path: export_location_path - is_admin_only: export_location_is_admin_only - preferred: export_location_preferred_replicas - availability_zone: export_location_availability_zone - replica_state: share_replica_replica_state - created_at: created_at - updated_at: updated_at Response example ---------------- .. literalinclude:: samples/share-replica-export-location-show-response.json :language: javascript ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/share-replicas.inc0000664000175000017500000002540200000000000021301 0ustar00zuulzuul00000000000000.. -*- rst -*- Share replicas (since API v2.11) ================================ Share replicas are the replicated copies of the existing share. You can use Share Replicas to sync data so that each share replica has an identical copy of the same share. Share replication can be used as a disaster recovery solution or as a load sharing mirroring solution. Manila supports replication of shares between different storage pools. These pools may be on different back-end storage systems or within the same back end, depending upon the replication style chosen, the capability of the driver and the configuration of back ends. To ensure that a secondary copy is scheduled to a distinct back end, you must specify the ``availability_zone`` attribute. .. note:: You can create a replicated share with the help of a share type that has an extra-spec ``replication_type`` specified with a valid replication style. Once a replicated share has been created, it always starts out with an ``active`` replica. You may then create secondary copies of the share. A secondary copy can be "promoted" to fail-over to becoming the ``active`` replica. To create a share that supports replication, the share type must specify one of these supported replication types: - writable Synchronously replicated shares where all replicas are writable. Promotion is not supported and not needed because all copies are already exported and can be accessed simultaneously. - readable Mirror-style replication with a primary (writable) copy and one or more secondary (read-only) copies which can become writable after a promotion. - dr (for Disaster Recovery) Generalized replication with secondary copies that are inaccessible until they are promoted to become the active replica. .. important:: The term active replica refers to the primary share. In writable style of replication, all replicas are active, and there could be no distinction of a primary share. In readable and dr styles of replication, a secondary replica may be referred to as passive, non-active or simply replica. Create share replica ~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v2/share-replicas .. versionadded:: 2.11 Create a share replica for the share. Response codes -------------- .. rest_status_code:: success status.yaml - 202 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - share_id: share_replica_share_id - availability_zone: share_replica_az - share_network_id: share_replica_share_network_id - scheduler_hints: share_replica_scheduler_hints Request example --------------- .. literalinclude:: samples/share-replica-create-request.json :language: javascript .. note:: Since API version 2.72, the parameter ``share_network_id`` is added which was earlier supported but later deprecated from version 2.51. In case, the parameter is not specified, it will be inherited from its parent share, and the Shared File Systems service will automatically choose which share network subnet your share replica will be placed, according to the specified availability zone. Response parameters ------------------- .. rest_parameters:: parameters.yaml - share_id: share_replica_share_id - status: share_replica_status - cast_rules_to_readonly: share_replica_cast_rules_to_readonly - updated_at: updated_at - share_network_id: share_network_id - share_server_id: share_server_id - host: share_replica_host - id: share_replica_id - replica_state: share_replica_replica_state - created_at: created_at Response example ---------------- .. literalinclude:: samples/share-replica-create-response.json :language: javascript Promote share replica ~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v2/share-replicas/{share_replica_id}/action .. versionadded:: 2.11 Promotes a replica to ``active`` replica state. Response codes -------------- .. rest_status_code:: success status.yaml - 202 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - share_replica_id: share_replica_id_path - quiesce_wait_time: share_replica_quiesce_wait_time Request example --------------- .. literalinclude:: samples/share-replica-promote-request.json :language: javascript Resync share replica ~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v2/share-replicas/{share_replica_id}/action .. versionadded:: 2.11 Resync a replica with its ``active`` mirror. Response codes -------------- .. rest_status_code:: success status.yaml - 202 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - share_replica_id: share_replica_id_path Request example --------------- .. literalinclude:: samples/share-replica-resync-request.json :language: javascript List share replicas ~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v2/share-replicas?share_id={share_id} .. versionadded:: 2.11 Lists share replicas. Response codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - share_id: share_id Response parameters ------------------- .. rest_parameters:: parameters.yaml - share_id: share_replica_share_id - status: share_replica_status - id: share_replica_id - replica_state: share_replica_replica_state Response example ---------------- .. literalinclude:: samples/share-replicas-list-response.json :language: javascript List share replicas with details ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v2/share-replicas/detail?share_id={share_id} .. versionadded:: 2.11 Lists share replicas with details. Response codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - share_id: share_id_replicas_query Response parameters ------------------- .. rest_parameters:: parameters.yaml - share_id: share_replica_share_id - status: share_replica_status - cast_rules_to_readonly: share_replica_cast_rules_to_readonly - updated_at: updated_at - share_network_id: share_network_id - share_server_id: share_server_id - host: share_replica_host - id: share_replica_id - replica_state: share_replica_replica_state - created_at: created_at Response example ---------------- .. literalinclude:: samples/share-replicas-list-detail-response.json :language: javascript Show share replica ~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v2/share-replicas/{share_replica_id} .. versionadded:: 2.11 Show a share replica. Response codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - share_replica_id: share_replica_id_path Response parameters ------------------- .. rest_parameters:: parameters.yaml - share_id: share_replica_share_id - status: share_replica_status - cast_rules_to_readonly: share_replica_cast_rules_to_readonly - updated_at: updated_at - share_network_id: share_network_id - share_server_id: share_server_id - host: share_replica_host - id: share_replica_id - replica_state: share_replica_replica_state - created_at: created_at Response example ---------------- .. literalinclude:: samples/share-replicas-show-response.json :language: javascript Reset status of the share replica ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v2/share-replicas/{share_replica_id}/action .. versionadded:: 2.11 Administrator only. Explicitly updates the ``status`` of a share replica. Use the ``policy.yaml`` file to grant permissions for this action to other roles. Response codes -------------- .. rest_status_code:: success status.yaml - 202 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - share_replica_id: share_replica_id_path - reset_status: reset_status - status: share_replica_status Request example --------------- .. literalinclude:: samples/share-replicas-reset-state-request.json :language: javascript Reset replica_state of the share replica ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v2/share-replicas/{share_replica_id}/action .. versionadded:: 2.11 Administrator only. Explicitly updates the ``replica state`` of a share replica. Use the ``policy.yaml`` file to grant permissions for this action to other roles. Response codes -------------- .. rest_status_code:: success status.yaml - 202 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - share_replica_id: share_replica_id_path - reset_replica_state: share_replica_reset_replica_state - replica_state: share_replica_replica_state Request example --------------- .. literalinclude:: samples/share-replicas-reset-replica-state-request.json :language: javascript Delete share replica ~~~~~~~~~~~~~~~~~~~~ .. rest_method:: DELETE /v2/share-replicas/{share_replica_id} .. versionadded:: 2.11 Deletes a share replica. Response codes -------------- .. rest_status_code:: success status.yaml - 202 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 - 409 .. note:: The ``active`` replica cannot be deleted with this API. Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - share_replica_id: share_replica_id_path Force-delete share replica ~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v2/share-replicas/{share_replica_id}/action .. versionadded:: 2.11 Administrator only. Force-deletes a share replica in any state. Use the ``policy.yaml`` file to grant permissions for this action to other roles. Response codes -------------- .. rest_status_code:: success status.yaml - 202 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 .. note:: The ``active`` replica cannot be deleted with this API. Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - share_replica_id: share_replica_id_path - force_delete: share_replica_force_delete Request example --------------- .. literalinclude:: samples/share-replicas-force-delete-request.json :language: javascript ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/share-server-migration.inc0000664000175000017500000001523300000000000022775 0ustar00zuulzuul00000000000000.. -*- rst -*- Share Server Migration (since API v2.57) ======================================== The Share Server Migration API is an administrator-only experimental API that allows the invoker to select a destination backend to migrate a share server to. Share server migration uses a 2-phased approach. In the first phase of the migration, the data copying or replication operations are performed, so it's the longer phase. After the first phase is finished, the administrator can trigger the second phase, which consists in final syncs and making the new share server available to be used and the latter one inactive. During the data copy phase, source shares will remain available and writable if it was required by the administrator and supported by the share driver. After the second phase, users may need to reconnect to the shares affected by the migration. All shares belonging to a share server are migrated and their access rules are preserved through the migration. The share snapshots are going to be copied if specified by the administrator and/or supported by the driver. .. important:: In order to migrate a share server, the administrator must make sure that none of the shares on the server are replicated or in a share group. .. note:: Share Server Migration APIs are `experimental APIs <#experimental-apis>`_ . Possible uses for share server migration include: - Migrating a share server and all its shares and snapshots at once. - Bring down a physical storage device for maintenance. - Free up space in a thinly-provisioned back end. - Load balancing among backends. Share Server Migration Check Compatibility ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v2/{project_id}/share_servers/{share_server_id}/action .. versionadded:: 2.57 Checks if a share driver can handle a share server migration according to the specified destination host and ``new_share_network_id``, as well as the ``writable``, ``nondisruptive`` and ``preserve_snapshots`` flags. Response codes -------------- .. rest_status_code:: success status.yaml - 202 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 - 409 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id - share_server_id: share_server_id - preserve_snapshots: preserve_snapshots_server_migration - nondisruptive: nondisruptive_server_migration - writable: writable_server_migration - new_share_network_id: new_share_network_id_server_migration - host: host_share_server_migration Response parameters ------------------- .. rest_parameters:: parameters.yaml - compatible: compatible - requested_capabilities: requested_capabilities - supported_capabilities: supported_capabilities Request example --------------- .. literalinclude:: samples/share-server-migration-check-compatibility-request.json :language: javascript Response example ---------------- .. literalinclude:: samples/share-server-migration-check-compatibility-response.json :language: javascript Start a Share Server Migration ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v2/{project_id}/share_servers/{share_server_id}/action .. versionadded:: 2.57 Triggers a share server migration. This API will initiate the first phase of the share server migration to a new host. Response codes -------------- .. rest_status_code:: success status.yaml - 202 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 - 409 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id - share_server_id: share_server_id - preserve_snapshots: preserve_snapshots_server_migration - nondisruptive: nondisruptive_server_migration - writable: writable_server_migration - new_share_network_id: new_share_network_id_server_migration - host: host_share_server_migration Request example --------------- .. literalinclude:: samples/share-server-migration-start-request.json :language: javascript Complete Share Server Migration ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v2/{project_id}/share_servers/{share_server_id}/action .. versionadded:: 2.57 Completes share server migration. This API will initiate the switch-over from the source to destination share server. This operation can be disruptive. Response codes -------------- .. rest_status_code:: success status.yaml - 202 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id - share_server_id: share_server_id Response parameters ------------------- .. rest_parameters:: parameters.yaml - total_progress: total_progress_server_migration - task_state: task_state_server_migration - destination_share_server_id: destination_share_server_id Request example --------------- .. literalinclude:: samples/share-server-migration-complete-request.json :language: javascript Response example ---------------- .. literalinclude:: samples/share-server-migration-complete-response.json :language: javascript Share Server Migration Get Progress ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v2/{project_id}/share_servers/{share_server_id}/action .. versionadded:: 2.57 Returns the completed percentage and the destination share server id of an ongoing share server migration. Response codes -------------- .. rest_status_code:: success status.yaml - 202 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 409 Request ------- .. rest_parameters:: parameters.yaml - share_server_id: share_server_id - project_id: project_id Response parameters ------------------- .. rest_parameters:: parameters.yaml - total_progress: total_progress_server_migration - task_state: task_state_server_migration - destination_share_server_id: destination_share_server_id Request example --------------- .. literalinclude:: samples/share-server-migration-get-progress-request.json :language: javascript Response example ---------------- .. literalinclude:: samples/share-server-migration-get-progress-response.json :language: javascript Cancel Share Server Migration ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v2/{project_id}/share_servers/{share_server_id}/action .. versionadded:: 2.57 Response codes -------------- .. rest_status_code:: success status.yaml - 202 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 Request ------- .. rest_parameters:: parameters.yaml - share_server_id: share_server_id - project_id: project_id Request example --------------- .. literalinclude:: samples/share-server-migration-cancel-request.json :language: javascript ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/share-servers.inc0000664000175000017500000002170100000000000021166 0ustar00zuulzuul00000000000000.. -*- rst -*- Share servers ============= A share server is created by multi-tenant back-end drivers where shares are hosted. For example, with the ``generic`` driver, shares are hosted on Compute VMs. Administrators can perform read and delete actions for share servers. An administrator can delete an active share server only if it contains no dependent shares. If an administrator deletes the share server, the Shared File Systems service creates a share server in response to a subsequent create share request. An administrator can use the ``policy.yaml`` file to grant permissions for share server actions to other roles. The status of a share server indicates its current state. After you successfully set up a share server, its status is ``active``. If errors occur during set up such as when server data is not valid, its status is ``error``. The possible share servers statuses are: **Share server statuses** +--------------+------------------------------------------------------------------+ | Status | Description | +--------------+------------------------------------------------------------------+ | ``active`` | Share server was successfully set up. | +--------------+------------------------------------------------------------------+ | ``error`` | The set up or deletion of the share server failed. | +--------------+------------------------------------------------------------------+ | ``deleting`` | The share server has no dependent shares and is being deleted. | +--------------+------------------------------------------------------------------+ | ``creating`` | The share server is being created on the back end with data from | | | the database. | +--------------+------------------------------------------------------------------+ List share servers ~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v2/share-servers Lists all share servers. Response codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path Response parameters ------------------- .. rest_parameters:: parameters.yaml - id: share_server_id - project_id: project_id - status: status_share_server_body - share_network_id: share_network_id_share_servers_response - share_network_name: share_network_name - host: host_share_server_body - updated_at: updated_at - share_network_subnet_id: share_network_subnet_id_share_server_body - security_service_update_support: share_server_security_service_update_support Response example ---------------- .. literalinclude:: samples/share-servers-list-response.json :language: javascript Show share server ~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v2/share-servers/{share_server_id} Show a share server's details. Response codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - share_server_id: share_server_id Response parameters ------------------- .. rest_parameters:: parameters.yaml - id: share_server_id - project_id: project_id - status: status_share_server_body - backend_details: backend_details - share_network_id: share_network_id_share_servers_response - share_network_name: share_network_name - host: host_share_server_body - created_at: created_at - updated_at: updated_at - identifier: share_server_show_identifier - is_auto_deletable: share_server_show_is_auto_deletable - share_network_subnet_id: share_network_subnet_id_share_server_body - security_service_update_support: security_service_update_support - encryption_key_ref: encryption_key_ref_response Response example ---------------- .. literalinclude:: samples/share-server-show-response.json :language: javascript Show share server back end details ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v2/share-servers/{share_server_id}/details Shows back end details of a share server. Response codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - share_server_id: share_server_id Response parameters ------------------- Response parameters can differ based on the back end used. Each back end can store any key-value information that it requires. For example, the generic back end driver might store the router ID. Response example ---------------- .. literalinclude:: samples/share-server-show-details-response.json :language: javascript Delete share server ~~~~~~~~~~~~~~~~~~~ .. rest_method:: DELETE /v2/share-servers/{share_server_id} Deletes a share server. An administrator can delete an active share server only if it contains no dependent shares. Response codes -------------- .. rest_status_code:: success status.yaml - 202 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 - 409 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - share_server_id: share_server_id Manage share server (since API v2.49) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v2/share-servers/manage .. versionadded:: 2.49 Manages a share server An administrator can bring a pre-existing share server if the back end driver is operating in ``driver_handles_share_servers=True`` mode. Response codes -------------- .. rest_status_code:: success status.yaml - 202 .. rest_status_code:: error status.yaml - 400 - 403 - 422 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - host: manage_host - identifier: identifier - share_network: share_network_id_manage_server_request - driver_options: driver_options - share_network_subnet_id: share_network_subnet_id_manage_server_request Request example --------------- .. literalinclude:: samples/share-server-manage-request.json :language: javascript Response parameters ------------------- .. rest_parameters:: parameters.yaml - id: share_server_id - project_id: project_id - updated_at: updated_at - status: share_server_status - host: manage_host - share_network_name: share_network_name - share_network_id: share_network_id_share_servers_response - created_at: created_at - backend_details: backend_details - is_auto_deletable: is_auto_deletable - identifier: identifier - share_network_subnet_id: share_network_subnet_id_manage_server_body - security_service_update_support: security_service_update_support - encryption_key_ref: encryption_key_ref_response Response examples ----------------- .. literalinclude:: samples/share-server-manage-response.json :language: javascript Unmanage share server (since API v2.49) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v2/share-servers/{share_server_id}/action .. versionadded:: 2.49 Unmanages a share server An administrator can remove a share server from the Shared File System service's management if there are no associated shares that the service is aware of. The share server will not be torn down in the back end. Preconditions - Share server status must be either ``error``, ``manage_error``, ``active`` or ``unmanage_error``. Response codes -------------- .. rest_status_code:: success status.yaml - 202 .. rest_status_code:: error status.yaml - 400 - 404 Request parameters ------------------ .. rest_parameters:: parameters.yaml - project_id: project_id_path - share_server_id: share_server_id - force: force - unmanage: share_server_unmanage Request example --------------- .. literalinclude:: samples/share-server-unmanage-request.json :language: javascript Response parameters ------------------- There is no body content for the response. Reset status (since API v2.49) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v2/share-servers/{share_server_id}/action .. versionadded:: 2.49 Resets a share server status Administrator only. Explicitly updates the state of a share server. Use the ``policy.yaml`` file to grant permissions for this action to other roles. Response codes -------------- .. rest_status_code:: success status.yaml - 202 .. rest_status_code:: error status.yaml - 400 - 404 Request parameters ------------------ .. rest_parameters:: parameters.yaml - project_id: project_id_path - share_server_id: share_server_id - status: share_server_status Request example --------------- .. literalinclude:: samples/share-server-reset-state-request.json :language: javascript Response parameters ------------------- There is no body content for the response. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/share-transfers.inc0000664000175000017500000001220500000000000021503 0ustar00zuulzuul00000000000000.. -*- rst -*- Share transfer (since API v2.77) ================================ Transfers a share across projects. Create a share transfer ~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v2/share-transfers Initiates a share transfer from a source project namespace to a destination project namespace. **Preconditions** * The share ``status`` must be ``available`` * If the share has snapshots, those snapshots must be ``available`` * The share can not belong to share group Response codes -------------- .. rest_status_code:: success status.yaml - 202 .. rest_status_code:: error status.yaml - 400 - 403 - 404 Request ------- .. rest_parameters:: parameters.yaml - transfer: transfer - name: transfer_name - share_id: share_id_request Request Example --------------- .. literalinclude:: ./samples/share-transfer-create-request.json :language: javascript Response Parameters ------------------- .. rest_parameters:: parameters.yaml - id: transfer_id_in_body - created_at: created_at - name: transfer_name - resource_type: transfer_resource_type - resource_id: transfer_resource_id - auth_key: auth_key - source_project_id: project_id - destination_project_id: destination_project_id - accepted: accepted - expires_at: transfer_expires_at_body - links: links Response Example ---------------- .. literalinclude:: ./samples/share-transfer-create-response.json :language: javascript Accept a share transfer in the destination project namespace ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v2/share-transfers/{transfer_id}/accept Accepts a share transfer. Response codes -------------- .. rest_status_code:: success status.yaml - 202 .. rest_status_code:: error status.yaml - 400 - 403 - 404 - 413 Request ------- .. rest_parameters:: parameters.yaml - transfer_id: transfer_id - auth_key: auth_key - clear_access_rules: clear_access_rules Request Example --------------- .. literalinclude:: ./samples/share-transfer-accept-request.json :language: javascript List share transfers for a project ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v2/share-transfers Lists share transfers. Response codes -------------- .. rest_status_code:: success status.yaml - 200 Request ------- .. rest_parameters:: parameters.yaml - all_tenants: all_tenants_query - limit: limit_query - offset: offset - sort_key: sort_key_transfer - sort_dir: sort_dir - name: name_query - name~: name_inexact_query - resource_type: resource_type_query Response Parameters ------------------- .. rest_parameters:: parameters.yaml - transfers: transfers - id: transfer_id_in_body - resource_type: transfer_resource_type - resource_id: transfer_resource_id - name: transfer_name - links: links Response Example ---------------- .. literalinclude:: ./samples/share-transfers-list-response.json :language: javascript List share transfers and details ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v2/share-transfers/detail Lists share transfers, with details. Response codes -------------- .. rest_status_code:: success status.yaml - 200 Request ------- .. rest_parameters:: parameters.yaml - all_tenants: all_tenants_query - limit: limit_query - offset: offset - sort_key: sort_key_transfer - sort_dir: sort_dir Response Parameters ------------------- .. rest_parameters:: parameters.yaml - transfers: transfers - id: transfer_id_in_body - created_at: created_at - name: transfer_name - resource_type: transfer_resource_type - resource_id: transfer_resource_id - source_project_id: project_id - destination_project_id: destination_project_id - accepted: accepted - expires_at: transfer_expires_at_body - links: links Response Example ---------------- .. literalinclude:: ./samples/share-transfers-list-detailed-response.json :language: javascript Show share transfer detail ~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v2/share-transfers/{transfer_id} Shows details for a share transfer. Response codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 404 Request ------- .. rest_parameters:: parameters.yaml - transfer_id: transfer_id Response Parameters ------------------- .. rest_parameters:: parameters.yaml - id: transfer_id_in_body - created_at: created_at - name: transfer_name - resource_type: transfer_resource_type - resource_id: transfer_resource_id - source_project_id: project_id - destination_project_id: destination_project_id - accepted: accepted - expires_at: transfer_expires_at_body - links: links Response Example ---------------- .. literalinclude:: ./samples/share-transfer-show-response.json :language: javascript Delete a share transfer ~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: DELETE /v2/share-transfers/{transfer_id} Deletes a share transfer. Response codes -------------- .. rest_status_code:: success status.yaml - 202 Request ------- .. rest_parameters:: parameters.yaml - transfer_id: transfer_id ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/share-types.inc0000664000175000017500000003266600000000000020655 0ustar00zuulzuul00000000000000.. -*- rst -*- Share types =========== A share type provides hints to the scheduler service to aid placement of new workloads. It contains extra specifications that can either match back end storage capabilities or provide instructions to the Shared File Systems service with regard to the workload being requested. A share type behaves in the same way as a Block Storage volume type behaves. See the `administration reference on share types `_ for more information. You can create publicly visible share type that are visible to all projects and users within the cloud, or make them private and control which projects can access them. When you issue a create a share type request, you can submit a request body with either a ``share_type`` or ``volume_type`` object. .. important:: The use of the ``volume_type`` object is deprecated but supported. It is recommended that you use the ``share_type`` object when you create a share type. No matter which object type you include in the request, the API creates both a ``volume_type`` object and a ``share_type`` object. Both objects have the same ID. When you issue a list share types request, the response shows both ``share_type`` and ``volume_type`` objects. You can set share types as either public or private. By default a share type is created as publicly accessible. Set ``share_type_access:is_public`` (``os-share-type-access:is_public`` for API versions 1.0-2.6) to ``False`` to make the share type private. You can manage the access to the private share types for the different projects. You can add access, remove access, and get information about access for a private share type. Administrators can create share types with these extra specifications that are used to filter back ends: - ``driver_handles_share_servers``. Required. Defines the driver mode for share server, or storage, life cycle management. The Shared File Systems service creates a share server for the export of shares. Set to ``True`` when the share driver manages or handles the share server life cycle. Set to ``False`` when an administrator rather than a share driver manages the share server life cycle. - ``snapshot_support``. Filters back ends by whether they do or do not support share snapshots. Set to ``True`` to find back ends that support share snapshots. Set to ``False`` to find back ends that do not support share snapshots. Administrators can also set additional extra specifications for a share type for the following purposes: - Filter back ends. Specify these unqualified extra specifications in this format: ``extra_spec=value``. For example, ``netapp_raid_type=raid4``. - Set data for the driver. Except for the special ``capabilities`` prefix, you specify these qualified extra specifications with its prefix followed by a colon: ``vendor:extra_spec=value``. For example, ``netapp:thin_provisioned=true``. The scheduler uses the special ``capabilities`` prefix for filtering. The scheduler can only create a share on a back end that reports capabilities that match the un-scoped extra-spec keys for the share type. For details, see `Capabilities and Extra-Specs `_. Each driver implementation determines which extra specification keys it uses. For details, see the documentation for the driver. An administrator can use the ``policy.yaml`` file to grant permissions for share type creation with extra specifications to other roles. List share types ~~~~~~~~~~~~~~~~ .. rest_method:: GET /v2/types?is_public={is_public}&extra-specs={extra-specs-as-dict} Lists all share types. Response codes -------------- .. rest_status_code:: success status.yaml - 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - is_public: is_public_query - extra_specs: extra_specs_query Response parameters ------------------- .. rest_parameters:: parameters.yaml - id: share_type_id_body - name: share_type_name - required_extra_specs: required_extra_specs - extra_specs: extra_specs - share_type_access:is_public: share_type_access:is_public_body - description: share_type_description - is_default: is_default_type Response example ---------------- .. literalinclude:: samples/share-types-list-response.json :language: javascript List default share types ~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v2/types/default Lists default share types. Response codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path Response parameters ------------------- .. rest_parameters:: parameters.yaml - id: share_type_id_body - required_extra_specs: required_extra_specs - extra_specs: extra_specs - share_type_access:is_public: share_type_access:is_public_body - name: share_type_name - description: share_type_description - is_default: is_default_type Response example ---------------- .. literalinclude:: samples/share-types-default-list-response.json :language: javascript Show share type detail ~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v2/types/{share_type_id} Shows details for a specified share type. Response codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - share_type_id: share_type_id Response Parameters ------------------- .. rest_parameters:: parameters.yaml - id: share_type_id_body - required_extra_specs: required_extra_specs - extra_specs: extra_specs - share_type_access:is_public: share_type_access:is_public_body - name: share_type_name - description: share_type_description - is_default: is_default_type Response Example ---------------- .. literalinclude:: ./samples/share-type-show-response.json :language: javascript List extra specs ~~~~~~~~~~~~~~~~ .. rest_method:: GET /v2/types/{share_type_id}/extra_specs Lists the extra specifications for a share type. Response codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - share_type_id: share_type_id Response parameters ------------------- .. rest_parameters:: parameters.yaml - extra_specs: extra_specs Response example ---------------- .. literalinclude:: samples/share-types-extra-specs-list-response.json :language: javascript Create share type ~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v2/types Creates a share type. Response codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 - 409 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - extra_specs: extra_specs_request_body - os-share-type-access:is_public: os-share-type-access:is_public - name: share_type_name - description: share_type_description_request Request example --------------- .. literalinclude:: samples/share-type-create-request.json :language: javascript Response parameters ------------------- .. rest_parameters:: parameters.yaml - id: share_type_id_body - required_extra_specs: required_extra_specs - extra_specs: extra_specs - os-share-type-access:is_public: os-share-type-access:is_public - share_type_access:is_public: share_type_access:is_public - name: share_type_name - description: share_type_description - is_default: is_default_type Response example ---------------- .. literalinclude:: samples/share-type-create-response.json :language: javascript Show share type access details ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v2/types/{share_type_id}/share_type_access Shows access details for a share type. You can view access details for private share types only. Response codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - share_type_id: share_type_id Response parameters ------------------- .. rest_parameters:: parameters.yaml - project_id: project_id_type_access - share_type_id: share_type_id_body Response example ---------------- .. literalinclude:: samples/share-types-list-access-response.json :language: javascript Set extra spec for share type ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v2/types/{share_type_id}/extra_specs Sets an extra specification for the share type. Each driver implementation determines which extra specification keys it uses. For details, see `Capabilities and Extra-Specs `_ and documentation for your driver. Response codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 - 409 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - share_type_id: share_type_id - extra_specs: extra_specs_request_body Request example --------------- .. literalinclude:: samples/share-type-set-request.json :language: javascript Response parameters ------------------- .. rest_parameters:: parameters.yaml - extra_specs: extra_specs Response example ---------------- .. literalinclude:: samples/share-type-set-response.json :language: javascript Unset an extra spec ~~~~~~~~~~~~~~~~~~~ .. rest_method:: DELETE /v2/types/{share_type_id}/extra_specs/{extra-spec-key} Unsets an extra specification for the share type. Response codes -------------- .. rest_status_code:: success status.yaml - 202 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - share_type_id: share_type_id - extra-spec-key: extra_spec_key_path Add share type access ~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v2/types/{share_type_id}/action Adds share type access for a project. You can add access to private share types only. Response codes -------------- .. rest_status_code:: success status.yaml - 202 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 - 409 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - share_type_id: share_type_id - addProjectAccess: add_project_access - project: project_id_type_access_grant_request Request example --------------- .. literalinclude:: samples/share-type-grant-access-request.json :language: javascript Remove share type access ~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v2/types/{share_type_id}/action Removes share type access from a project. You can remove access from private share types only. Response codes -------------- .. rest_status_code:: success status.yaml - 202 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - share_type_id: share_type_id - removeProjectAccess: remove_project_access - project: project_id_type_access_revoke_request Request example --------------- .. literalinclude:: samples/share-type-revoke-access-request.json :language: javascript Delete share type ~~~~~~~~~~~~~~~~~ .. rest_method:: DELETE /v2/types/{share_type_id} Deletes a share type. Response codes -------------- .. rest_status_code:: success status.yaml - 202 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - share_type_id: share_type_id Update share type (since API v2.50) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: PUT /v2/types/{share_type_id} .. versionadded:: 2.50 Update a share type. Share type extra-specs cannot be updated with this API. Please use the respective APIs to `set extra specs <#set-extra-spec-for-share-type>`_ or `unset extra specs <#unset-an-extra-spec>`_. Response codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 - 409 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - share_type_id: share_type_id - name: share_type_name_request - share_type_access:is_public: share_type_access:is_public_update_request - description: share_type_description_update_request Request example --------------- .. literalinclude:: samples/share-type-update-request.json :language: javascript Response parameters ------------------- .. rest_parameters:: parameters.yaml - id: share_type_id_body - required_extra_specs: required_extra_specs - extra_specs: extra_specs - share_type_access:is_public: share_type_access:is_public_body - name: share_type_name - description: share_type_description_body - is_default: is_default_type_body Response example ---------------- .. literalinclude:: samples/share-type-update-response.json :language: javascript ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/shares.inc0000664000175000017500000005753200000000000017675 0ustar00zuulzuul00000000000000.. -*- rst -*- Shares ====== A share is a remote, mountable file system. In the APIs below, a share resource is a representation of this remote file system within the Shared File Systems service. This resource representation includes useful metadata, communicating the characteristics of the remote file system as determined by the user and the Shared File Systems service. You can create a share and associate it with a network, list shares, and show information for, update, and delete a share. To create a share, specify one of these supported protocols: - ``NFS``. Network File System (NFS). - ``CIFS``. Common Internet File System (CIFS). - ``GLUSTERFS``. Gluster file system (GlusterFS). - ``HDFS``. Hadoop Distributed File System (HDFS). - ``CEPHFS``. Ceph File System (CephFS). - ``MAPRFS``. MapR File System (MAPRFS). You can also create snapshots of shares. To create a snapshot, you specify the ID of the share that you want to snapshot. A share has one of these status values: **Share statuses** +----------------------------------------+--------------------------------------------------------+ | Status | Description | +----------------------------------------+--------------------------------------------------------+ | ``backup_creating`` | The share is being backed up. | +----------------------------------------+--------------------------------------------------------+ | ``backup_restoring`` | The share is being restored from backup. | +----------------------------------------+--------------------------------------------------------+ | ``backup_restoring_error`` | An error occurred during share backup restore. | +----------------------------------------+--------------------------------------------------------+ | ``creating`` | The share is being created. | +----------------------------------------+--------------------------------------------------------+ | ``creating_from_snapshot`` | The share is being created from a parent snapshot. | +----------------------------------------+--------------------------------------------------------+ | ``deleting`` | The share is being deleted. | +----------------------------------------+--------------------------------------------------------+ | ``deleted`` | The share was deleted. | +----------------------------------------+--------------------------------------------------------+ | ``error`` | An error occurred during share creation. | +----------------------------------------+--------------------------------------------------------+ | ``error_deleting`` | An error occurred during share deletion. | +----------------------------------------+--------------------------------------------------------+ | ``available`` | The share is ready to use. | +----------------------------------------+--------------------------------------------------------+ | ``inactive`` | The share is inactive. | +----------------------------------------+--------------------------------------------------------+ | ``manage_starting`` | Share manage started. | +----------------------------------------+--------------------------------------------------------+ | ``manage_error`` | Share manage failed. | +----------------------------------------+--------------------------------------------------------+ | ``unmanage_starting`` | Share unmanage started. | +----------------------------------------+--------------------------------------------------------+ | ``unmanage_error`` | Share cannot be unmanaged. | +----------------------------------------+--------------------------------------------------------+ | ``unmanaged`` | Share was unmanaged. | +----------------------------------------+--------------------------------------------------------+ | ``extending`` | The extend, or increase, share size request was issued | | | successfully. | +----------------------------------------+--------------------------------------------------------+ | ``extending_error`` | Extend share failed. | +----------------------------------------+--------------------------------------------------------+ | ``shrinking`` | Share is being shrunk. | +----------------------------------------+--------------------------------------------------------+ | ``shrinking_error`` | Failed to update quota on share shrinking. | +----------------------------------------+--------------------------------------------------------+ | ``shrinking_possible_data_loss_error`` | Shrink share failed due to possible data loss. | +----------------------------------------+--------------------------------------------------------+ | ``migrating`` | Share is currently migrating. | +----------------------------------------+--------------------------------------------------------+ | ``migrating_to`` | Share is a migration destination. | +----------------------------------------+--------------------------------------------------------+ | ``replication_change`` | The share is undergoing a replication change. | +----------------------------------------+--------------------------------------------------------+ | ``reverting`` | Share is being reverted to a snapshot. | +----------------------------------------+--------------------------------------------------------+ | ``reverting_error`` | Share revert to snapshot failed. | +----------------------------------------+--------------------------------------------------------+ | ``awaiting_transfer`` | Share is being transferred to a different project's | | | namespace. | +----------------------------------------+--------------------------------------------------------+ List shares ~~~~~~~~~~~ .. rest_method:: GET /v2/shares Lists all shares. Response codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - all_tenants: all_tenants_query - name: name_query - status: status_query - share_server_id: share_server_id_query - metadata: metadata_query - extra_specs: extra_specs_query - share_type_id: share_type_id_query - snapshot_id: snapshot_id_query - source_backup_id: source_backup_id_query - host: host_query - share_network_id: share_network_id_query - project_id: project_id_query - is_public: is_public_query - share_group_id: share_group_id_query - export_location_id: export_location_id_query - export_location_path: export_location_path_query - name~: name_inexact_query - description~: description_inexact_query - with_count: with_count_query - is_soft_deleted: is_soft_deleted_query - limit: limit - offset: offset - sort_key: sort_key - sort_dir: sort_dir - encryption_key_ref: encryption_key_ref_query Response parameters ------------------- .. rest_parameters:: parameters.yaml - id: share_id_response - links: links - name: name - count: count Response example ---------------- .. literalinclude:: samples/shares-list-response.json :language: javascript List shares with details ~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v2/shares/detail Lists all shares, with details. Response codes -------------- .. rest_status_code:: success status.yaml - 202 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 - 409 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - all_tenants: all_tenants_query - status: status_query - share_server_id: share_server_id_query - metadata: metadata_query - extra_specs: extra_specs_query - share_type_id: share_type_id_query - name: name_query - snapshot_id: snapshot_id_query - source_backup_id: source_backup_id_query - host: host_query - share_network_id: share_network_id_query - project_id: project_id_query - is_public: is_public_query - share_group_id: share_group_id_query - export_location_id: export_location_id_query - export_location_path: export_location_path_query - name~: name_inexact_query - description~: description_inexact_query - with_count: with_count_query - is_soft_deleted: is_soft_deleted_query - limit: limit - offset: offset - sort_key: sort_key - sort_dir: sort_dir - encryption_key_ref: encryption_key_ref_query Response parameters ------------------- .. rest_parameters:: parameters.yaml - id: share_id_response - size: size_response - availability_zone: availability_zone - created_at: created_at - status: share_status_response - name: name - description: description - project_id: project_id - snapshot_id: snapshot_id_shares_response - source_backup_id: source_backup_id_shares_response - share_network_id: share_network_id - share_proto: share_proto - metadata: metadata - share_type: share_type_shares_response - links: links - is_public: is_public_shares_response - share_server_id: share_server_id - host: host_resource_response - snapshot_support: snapshot_support_share_capability - task_state: task_state - share_type_name: share_type_name - access_rules_status: access_rules_status - replication_type: replication_type_share_capability - has_replicas: has_replicas - user_id: share_user_id - create_share_from_snapshot_support: create_share_from_snapshot_support_share_capability - revert_to_snapshot_support: revert_to_snapshot_support_share_capability - share_group_id: share_group_id - source_share_group_snapshot_member_id: source_share_group_snapshot_member_id - mount_snapshot_support: mount_snapshot_support_share_capability - progress: progress_share_instance - count: count - volume_type: volume_type_shares_response - export_location: export_location - export_locations: export_locations - is_soft_deleted: is_soft_deleted_response - scheduled_to_be_deleted_at: scheduled_to_be_deleted_at_response - encryption_key_ref: encryption_key_ref_response Response example ---------------- .. literalinclude:: samples/shares-list-detailed-response.json :language: javascript Show share details ~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v2/shares/{share_id} Shows details for a share. Response codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - share_id: share_id Response parameters ------------------- .. rest_parameters:: parameters.yaml - id: share_id_response - size: size_response - availability_zone: availability_zone - created_at: created_at - status: share_status_response - name: name - description: description - project_id: project_id - snapshot_id: snapshot_id_shares_response - share_network_id: share_network_id - share_proto: share_proto - metadata: metadata - share_type: share_type_shares_response - links: links - is_public: is_public_shares_response - share_server_id: share_server_id - host: host_resource_response - snapshot_support: snapshot_support_share_capability - task_state: task_state - share_type_name: share_type_name - access_rules_status: access_rules_status - replication_type: replication_type_share_capability - has_replicas: has_replicas - user_id: share_user_id - create_share_from_snapshot_support: create_share_from_snapshot_support_share_capability - revert_to_snapshot_support: revert_to_snapshot_support_share_capability - share_group_id: share_group_id - source_share_group_snapshot_member_id: source_share_group_snapshot_member_id - mount_snapshot_support: mount_snapshot_support_share_capability - progress: progress_share_instance - count: count - volume_type: volume_type_shares_response - export_location: export_location - export_locations: export_locations - is_soft_deleted: is_soft_deleted_response - scheduled_to_be_deleted_at: scheduled_to_be_deleted_at_response - encryption_key_ref: encryption_key_ref_response Response example ---------------- .. literalinclude:: samples/share-show-response.json :language: javascript List share instances ~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v2/shares/{share_id}/instances .. versionadded:: 2.3 Lists instances of a share. A share instance is an internal representation for a share. A share that is replicated or is in the process of being migrated is physically stored in multiple locations. Each of these individual locations is called an "instance" within the Shared File Systems service. End users need not be concerned with this internal representation. As an administrator, you can list all instances of a share through this endpoint resource. Use the ``policy.yaml`` file to grant permissions for this action to other roles. Response codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - share_id: share_id Response parameters ------------------- .. rest_parameters:: parameters.yaml - status: share_status_response - access_rules_status: access_rules_status - share_id: share_id_share_instances_response - progress: progress_share_instance - availability_zone: availability_zone - created_at: created_at - replica_state: replica_state - export_location: export_location - export_locations: export_locations - cast_rules_to_readonly: share_instance_cast_rules_to_readonly - share_network_id: share_network_id - share_server_id: share_server_id - host: host_resource_response - access_rules_status: access_rules_status - share_type_id: share_type_id - id: id_13 - encryption_key_ref: encryption_key_ref_response Response example ---------------- .. literalinclude:: samples/share-instances-list-response.json :language: javascript Create share ~~~~~~~~~~~~ .. rest_method:: POST /v2/shares Creates a share. Response codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 - 409 - 422 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - share_proto: share_proto - size: size_request - name: name_request - description: description_request - display_name: display_name_request - display_description: display_description_request - share_type: share_type_request - snapshot_id: snapshot_id_request - is_public: is_public_request - share_group_id: share_group_id_request - metadata: metadata - share_network_id: share_network_id_request - availability_zone: availability_zone_request - scheduler_hints: scheduler_hints - encryption_key_ref: encryption_key_ref_request Request example --------------- .. literalinclude:: samples/share-create-request.json :language: javascript Response parameters ------------------- .. rest_parameters:: parameters.yaml - id: share_id_response - size: size_response - availability_zone: availability_zone - created_at: created_at - status: share_status_response - name: name - description: description - project_id: project_id - snapshot_id: snapshot_id_shares_response - share_network_id: share_network_id - share_proto: share_proto - metadata: metadata - share_type: share_type_shares_response - links: links - is_public: is_public_shares_response - share_server_id: share_server_id - host: host_resource_response - snapshot_support: snapshot_support_share_capability - task_state: task_state - share_type_name: share_type_name - access_rules_status: access_rules_status - replication_type: replication_type_share_capability - has_replicas: has_replicas - user_id: share_user_id - create_share_from_snapshot_support: create_share_from_snapshot_support_share_capability - revert_to_snapshot_support: revert_to_snapshot_support_share_capability - share_group_id: share_group_id - source_share_group_snapshot_member_id: source_share_group_snapshot_member_id - mount_snapshot_support: mount_snapshot_support_share_capability - progress: progress_share_instance - count: count - volume_type: volume_type_shares_response - export_location: export_location - export_locations: export_locations - is_soft_deleted: is_soft_deleted_response - scheduled_to_be_deleted_at: scheduled_to_be_deleted_at_response - encryption_key_ref: encryption_key_ref_response Response example ---------------- .. literalinclude:: samples/share-create-response.json :language: javascript Manage share (since API v2.7) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v2/shares/manage .. versionadded:: 2.7 Use this API to bring a share under the management of the Shared File Systems service. Administrator only. Use the ``policy.yaml`` file to grant permissions for this action to other roles. .. note:: Managing shares that are created on top of managed share servers (i.e. with parameter ``share_server_id``) is not supported prior to API version 2.49. Response codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 - 409 - 422 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - share: share - protocol: protocol - name: name_request - share_type: share_type_request - driver_options: driver_options - export_path: export_path - service_host: service_host - share_server_id: manage_share_server_id - is_public: is_public_manage_request - description: description_request Request example --------------- .. literalinclude:: samples/share-manage-request.json :language: javascript Response parameters ------------------- .. rest_parameters:: parameters.yaml - id: share_id_response - size: size_response - availability_zone: availability_zone - created_at: created_at - status: share_status_response - name: name - description: description - project_id: project_id - snapshot_id: snapshot_id_shares_response - share_network_id: share_network_id - share_proto: share_proto - metadata: metadata - share_type: share_type_shares_response - links: links - is_public: is_public_shares_response - share_server_id: share_server_id - host: host_resource_response - snapshot_support: snapshot_support_share_capability - task_state: task_state - share_type_name: share_type_name - access_rules_status: access_rules_status - replication_type: replication_type_share_capability - has_replicas: has_replicas - user_id: managed_share_user_id - create_share_from_snapshot_support: create_share_from_snapshot_support_share_capability - revert_to_snapshot_support: revert_to_snapshot_support_share_capability - share_group_id: share_group_id - source_share_group_snapshot_member_id: source_share_group_snapshot_member_id - mount_snapshot_support: mount_snapshot_support_share_capability - progress: progress_share_instance - count: count - volume_type: volume_type_shares_response - export_location: export_location - export_locations: export_locations - is_soft_deleted: is_soft_deleted_response - scheduled_to_be_deleted_at: scheduled_to_be_deleted_at_response - encryption_key_ref: encryption_key_ref_response Response example ---------------- .. literalinclude:: samples/share-manage-response.json :language: javascript Update share ~~~~~~~~~~~~ .. rest_method:: PUT /v2/shares/{share_id} Updates a share. You can update these attributes: - ``display_name``, which also changes the ``name`` of the share. - ``display_description``, which also changes the ``description`` of the share. - ``is_public``. Changes the level of visibility. If you try to update other attributes, they retain their previous values. Response codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 - 422 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - share_id: share_id - is_public: is_public_request - display_name: display_name_request - display_description: display_description_request Request example --------------- .. literalinclude:: samples/share-update-request.json :language: javascript Response parameters ------------------- .. rest_parameters:: parameters.yaml - id: share_id_response - size: size_response - availability_zone: availability_zone - created_at: created_at - status: share_status_response - name: name - description: description - project_id: project_id - snapshot_id: snapshot_id_shares_response - share_network_id: share_network_id - share_proto: share_proto - metadata: metadata - share_type: share_type_shares_response - links: links - is_public: is_public_shares_response - share_server_id: share_server_id - host: host_resource_response - snapshot_support: snapshot_support_share_capability - task_state: task_state - share_type_name: share_type_name - access_rules_status: access_rules_status - replication_type: replication_type_share_capability - has_replicas: has_replicas - user_id: share_user_id - create_share_from_snapshot_support: create_share_from_snapshot_support_share_capability - revert_to_snapshot_support: revert_to_snapshot_support_share_capability - share_group_id: share_group_id - source_share_group_snapshot_member_id: source_share_group_snapshot_member_id - mount_snapshot_support: mount_snapshot_support_share_capability - progress: progress_share_instance - count: count - volume_type: volume_type_shares_response - export_location: export_location - export_locations: export_locations - is_soft_deleted: is_soft_deleted_response - scheduled_to_be_deleted_at: scheduled_to_be_deleted_at_response - encryption_key_ref: encryption_key_ref_response Response example ---------------- .. literalinclude:: samples/share-update-response.json :language: javascript Delete share ~~~~~~~~~~~~ .. rest_method:: DELETE /v2/shares/{share_id} Deletes a share. Preconditions - Share status must be ``available``, ``error`` or ``inactive`` - You cannot already have a snapshot of the share. - You cannot already have a group snapshot of the share. - You cannot already have a replica of the share. Response codes -------------- .. rest_status_code:: success status.yaml - 202 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 - 409 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - share_id: share_id ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/snapshot-instances.inc0000664000175000017500000001034000000000000022216 0ustar00zuulzuul00000000000000.. -*- rst -*- Share snapshot instances (since API v2.19) ========================================== A share snapshot instance is an internal representation for a snapshot of a share. A single snapshot can have multiple snapshot instances if the parent share has multiple ``instances``. When a share is replicated or is in the process of being migrated, it can live in multiple places and each individual location is called an "instance", internally within the Shared File Systems service. By default administrators can list, show information for and explicitly set the state of share snapshot instances. Use the ``policy.yaml`` file to grant permissions for these actions to other roles. List share snapshot instances ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v2/snapshot-instances .. versionadded:: 2.19 Lists all share snapshot instances. Response codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - snapshot_id: snapshot_id_query Response parameters ------------------- .. rest_parameters:: parameters.yaml - id: snapshot_instance_id_response - snapshot_id: snapshot_id - status: snapshot_instance_status Response example ---------------- .. literalinclude:: samples/snapshot-instances-list-response.json :language: javascript List share snapshot instances with details ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v2/snapshot-instances/detail .. versionadded:: 2.19 Lists all share snapshot instances with details. Response codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - snapshot_id: snapshot_id_query Response parameters ------------------- .. rest_parameters:: parameters.yaml - id: snapshot_instance_id_response - snapshot_id: snapshot_id - created_at: created_at - updated_at: updated_at - status: snapshot_instance_status - share_id: share_id - share_instance_id: share_instance_id_1 - progress: progress - provider_location: snapshot_provider_location Response example ---------------- .. literalinclude:: samples/snapshot-instances-list-with-detail-response.json :language: javascript Show share snapshot instance details ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v2/snapshot-instances/{snapshot_instance_id} .. versionadded:: 2.19 Shows details for a share snapshot instance. Response codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - snapshot_instance_id: snapshot_instance_id_path Response parameters ------------------- .. rest_parameters:: parameters.yaml - id: snapshot_instance_id_response - snapshot_id: snapshot_id - created_at: created_at - updated_at: updated_at - status: snapshot_instance_status - share_id: share_id - share_instance_id: share_instance_id_1 - progress: progress - provider_location: snapshot_provider_location Response example ---------------- .. literalinclude:: samples/snapshot-instance-show-response.json :language: javascript Reset share snapshot instance state ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v2/snapshot-instances/{snapshot_instance_id}/action .. versionadded:: 2.19 Administrator only. Explicitly updates the state of a share snapshot instance. Use the ``policy.yaml`` file to grant permissions for this action to other roles. Response codes -------------- .. rest_status_code:: success status.yaml - 202 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - snapshot_instance_id: snapshot_instance_id_path - status: snapshot_instance_status Request example --------------- .. literalinclude:: samples/snapshot-instance-actions-reset-state-request.json :language: javascript ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/snapshot-metadata.inc0000664000175000017500000001136000000000000022012 0ustar00zuulzuul00000000000000.. -*- rst -*- Snapshot metadata (Since API v2.73) =================================== Shows, sets, updates, and unsets snapshot metadata. Show all snapshot metadata ~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v2/snapshots/{snapshot_id}/metadata .. versionadded:: 2.73 Shows all the metadata for a snapshot, as key and value pairs. Response codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - snapshot_id: snapshot_id_path Response parameters ------------------- .. rest_parameters:: parameters.yaml - metadata: metadata Response example ---------------- .. literalinclude:: samples/snapshot-show-metadata-response.json :language: javascript Show snapshot metadata item ~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v2/snapshots/{snapshot_id}/metadata/{key} .. versionadded:: 2.73 Retrieves a specific metadata item from a snapshot's metadata by its key. If the specified key does not represent a valid metadata item, the API will respond with HTTP 404. Response codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - snapshot_id: snapshot_id_path - key: metadata_key_path Response parameters ------------------- .. rest_parameters:: parameters.yaml - metadata: metadata_item Response example ---------------- .. literalinclude:: samples/snapshot-show-metadata-item-response.json :language: javascript Set snapshot metadata ~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v2/snapshots/{snapshot_id}/metadata .. versionadded:: 2.73 Allows adding new metadata items as key-value pairs. This API will not delete pre-existing metadata items. If the request object contains metadata items that already exist, they will be updated with new values as specified in the request object. Response codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 - 409 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - snapshot_id: snapshot_id_path - metadata: metadata_request Request example --------------- .. literalinclude:: samples/snapshot-set-metadata-request.json :language: javascript Response parameters ------------------- .. rest_parameters:: parameters.yaml - metadata: metadata Response example ---------------- .. literalinclude:: samples/snapshot-set-metadata-response.json :language: javascript Update snapshot metadata ~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: PUT /v2/snapshots/{snapshot_id}/metadata .. versionadded:: 2.73 Replaces the metadata for a given snapshot with the metadata (specified as key-value pairs) in the request object. All pre-existing metadata of the snapshot will be deleted and replaced with the new metadata supplied. Response codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - snapshot_id: snapshot_id_path - metadata: metadata_request Request example --------------- .. literalinclude:: samples/snapshot-update-metadata-request.json :language: javascript Response parameters ------------------- .. rest_parameters:: parameters.yaml - metadata: metadata Response example ---------------- .. literalinclude:: samples/snapshot-update-metadata-response.json :language: javascript To delete all existing metadata items on a given snapshot, the request object needs to specify an empty metadata object: Request example --------------- .. literalinclude:: samples/snapshot-update-null-metadata-request.json :language: javascript Response example ---------------- .. literalinclude:: samples/snapshot-update-null-metadata-response.json :language: javascript Delete snapshot metadata item ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: DELETE /v2/snapshots/{snapshot_id}/metadata/{key} .. versionadded:: 2.73 Deletes a single metadata item on a snapshot, idetified by its key. If the specified key does not represent a valid metadata item, the API will respond with HTTP 404. Response codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - snapshot_id: snapshot_id_path - key: metadata_key_path ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/snapshots.inc0000664000175000017500000002577600000000000020437 0ustar00zuulzuul00000000000000.. -*- rst -*- Share snapshots =============== Use the Shared File Systems service to make snapshots of shares. A share snapshot is a point-in-time, read-only copy of the data that is contained in a share. The APIs below allow controlling share snapshots. They are represented by a "snapshot" resource in the Shared File Systems service, and they can have user-defined metadata such as a name and description. You can create, manage, update, and delete share snapshots. After you create or manage a share snapshot, you can create a share from it. You can also revert a share to its most recent snapshot. You can update a share snapshot to rename it, change its description, or update its state to one of these supported states: - ``available`` - ``error`` - ``creating`` - ``deleting`` - ``error_deleting`` - ``manage_starting`` - ``manage_error`` - ``unmanage_starting`` - ``unmanage_error`` - ``restoring`` As administrator, you can also reset the state of a snapshot and force-delete a share snapshot in any state. Use the ``policy.yaml`` file to grant permissions for these actions to other roles. List share snapshots ~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v2/snapshots Lists all share snapshots. Response codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - all_tenants: all_tenants_query - name~: name_inexact_query - description~: description_inexact_query - with_count: with_count_snapshot_query Response parameters ------------------- .. rest_parameters:: parameters.yaml - id: snapshot_id - name: name Response example ---------------- .. literalinclude:: samples/snapshots-list-response.json :language: javascript List share snapshots with details ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v2/snapshots/detail Lists all share snapshots with details. Response codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - all_tenants: all_tenants_query - name~: name_inexact_query - description~: description_inexact_query - with_count: with_count_snapshot_query Response parameters ------------------- .. rest_parameters:: parameters.yaml - id: snapshot_id - status: snapshot_status - share_id: snapshot_share_id - name: name - description: description - created_at: created_at - share_proto: snapshot_share_protocol - share_size: snapshot_share_size - size: snapshot_size - project_id: snapshot_project_id - user_id: snapshot_user_id - provider_location: snapshot_provider_location_optional Response example ---------------- .. literalinclude:: samples/snapshots-list-detailed-response.json :language: javascript Show share snapshot details ~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v2/snapshots/{snapshot_id} Shows details for a share snapshot. Response codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 404 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - snapshot_id: snapshot_id_path Response parameters ------------------- .. rest_parameters:: parameters.yaml - id: snapshot_id - status: snapshot_status - share_id: snapshot_share_id - name: name - description: description - created_at: created_at - share_proto: snapshot_share_protocol - share_size: snapshot_share_size - size: snapshot_size - project_id: snapshot_project_id - user_id: snapshot_user_id - provider_location: snapshot_provider_location_optional Response example ---------------- .. literalinclude:: samples/snapshot-show-response.json :language: javascript Create share snapshot ~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v2/snapshots Creates a snapshot from a share. Response codes -------------- .. rest_status_code:: success status.yaml - 202 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 - 422 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - share_id: snapshot_share_id_request - force: force_snapshot_request - name: name_request - description: description_request - display_name: display_name_request - display_description: display_description_request Request example --------------- .. literalinclude:: samples/snapshot-create-request.json :language: javascript Response parameters ------------------- .. rest_parameters:: parameters.yaml - id: snapshot_id - share_id: snapshot_share_id - status: snapshot_status - name: name - description: description - created_at: created_at - share_proto: snapshot_share_protocol - share_size: snapshot_share_size - provider_location: snapshot_provider_location_optional - size: snapshot_size - project_id: snapshot_project_id - user_id: snapshot_user_id Response example ---------------- .. literalinclude:: samples/snapshot-create-response.json :language: javascript Manage share snapshot (since API v2.12) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v2/snapshots/manage .. versionadded:: 2.12 Configures Shared File Systems to manage a share snapshot. .. note:: Managing snapshots of shares that are created on top of share servers (i.e. created with share networks) is not supported prior to API version 2.49. Response codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 - 409 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - share_id: snapshot_manage_share_id - provider_location: snapshot_provider_location_request - name: name_request - display_name: display_name_request - description: description_request - display_description: display_description_request - driver_options: driver_options Request example --------------- .. literalinclude:: samples/snapshot-manage-request.json :language: javascript Response parameters ------------------- .. rest_parameters:: parameters.yaml - id: snapshot_id - share_id: snapshot_share_id - status: snapshot_manage_status - name: name - description: description - created_at: created_at - share_proto: snapshot_share_protocol - share_size: snapshot_share_size - provider_location: snapshot_provider_location - size: snapshot_size - project_id: snapshot_project_id - user_id: snapshot_user_id Response example ---------------- .. literalinclude:: samples/snapshot-manage-response.json :language: javascript Unmanage share snapshot (since API v2.12) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v2/snapshots/{snapshot_id}/action .. versionadded:: 2.12 Configures Shared File Systems to stop managing a share snapshot. .. note:: Unmanaging snapshots of shares that are created on top of share servers (i.e. created with share networks) is not supported prior to API version 2.49. Response codes -------------- .. rest_status_code:: success status.yaml - 202 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 - 409 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - snapshot_id: snapshot_id_path - unmanage: snapshot_unmanage Request example --------------- .. literalinclude:: samples/snapshot-actions-unmanage-request.json :language: javascript Response parameters ------------------- There is no body content for the response. Reset share snapshot state ~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v2/snapshots/{snapshot_id}/action Administrator only. Explicitly updates the state of a share snapshot. Use the ``policy.yaml`` file to grant permissions for this action to other roles. Response codes -------------- .. rest_status_code:: success status.yaml - 202 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - snapshot_id: snapshot_id_path - status: snapshot_status_request Request example --------------- .. literalinclude:: samples/snapshot-actions-reset-state-request.json :language: javascript Force-delete share snapshot ~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v2/snapshots/{snapshot_id}/action Administrator only. Force-deletes a share snapshot in any state. Use the ``policy.yaml`` file to grant permissions for this action to other roles. Response codes -------------- .. rest_status_code:: success status.yaml - 202 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - snapshot_id: snapshot_id_path - force_delete: snapshot_force_delete Request example --------------- .. literalinclude:: samples/snapshot-actions-force-delete-request.json :language: javascript Update share snapshot ~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: PUT /v2/snapshots/{snapshot_id} Updates a share snapshot. You can update these attributes: - ``display_name``, which also changes the ``name`` of the share snapshot. - ``display_description``, which also changes the ``description`` of the share snapshot. If you try to update other attributes, they retain their previous values. Response codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 - 422 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - snapshot_id: snapshot_id_path - display_name: display_name_request - display_description: display_description_request Request example --------------- .. literalinclude:: samples/snapshot-update-request.json :language: javascript Response parameters ------------------- .. rest_parameters:: parameters.yaml - id: snapshot_id - status: snapshot_status - share_id: snapshot_share_id - name: name - description: description - created_at: created_at - share_proto: snapshot_share_protocol - share_size: snapshot_share_size - size: snapshot_size - project_id: snapshot_project_id - user_id: snapshot_user_id Response example ---------------- .. literalinclude:: samples/snapshot-update-response.json :language: javascript Delete share snapshot ~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: DELETE /v2/snapshots/{snapshot_id} Deletes a share snapshot. Preconditions - Share snapshot status must be ``available`` or ``error``. Response codes -------------- .. rest_status_code:: success status.yaml - 202 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - snapshot_id: snapshot_id_path ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/status.yaml0000664000175000017500000000325100000000000020111 0ustar00zuulzuul00000000000000 200: default: | Request was successful. 201: default: | Request has been fulfilled and new resource created. 202: default: | Request is accepted, but processing may take some time. 203: default: | Returned information is not full set, but a subset. 204: default: | Request fulfilled but service does not return anything. 300: default: | The resource corresponds to more than one representation. 400: default: | Some content in the request was invalid. 401: default: | User must authenticate before making a request. 403: default: | Policy does not allow current user to do this operation. 404: default: | The requested resource could not be found. 405: default: | Method is not valid for this endpoint and resource. 409: default: | This resource has an action in progress that would conflict with this request. 413: default: | This operation cannot be completed. 415: default: | The entity of the request is in a format not supported by the requested resource for the method. 422: default: | The entity of the request is in a format not processable by the requested resource for the method. 500: default: | Something went wrong with the service which prevents it from fulfilling the request. 501: default: | The service does not have the functionality required to fulfill this request. 503: default: | The service cannot handle the request right now. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/user-messages.inc0000664000175000017500000000675600000000000021175 0ustar00zuulzuul00000000000000.. -*- rst -*- User messages (since API 2.37) ============================== Lists, shows and deletes user messages. User messages are automatically created when an asynchronous action fails on a resource. In such situations an error is logged in the appropriate log file but end users may not have access to the log files. User messages can be used by users to get error details for failed actions. This is handy for example when creating shares - if a share creation fails because a scheduling filter doesn't find suitable back-end host for the share, this share will end up in error state, but from user messages API users can get details about the last executed filter which helps them identify the issue and perhaps re-attempt the creation request with different parameters. List user messages ~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v2/messages .. versionadded:: 2.37 Lists all user messages. Response codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - limit: limit - offset: offset - sort_key: sort_key_messages - sort_dir: sort_dir - action_id: action_id - detail_id: detail_id - message_level: message_level - project_id: project_id_messages - request_id: request_id - resource_id: resource_id - resource_type: resource_type - created_since: created_since - created_before: created_before Response parameters ------------------- .. rest_parameters:: parameters.yaml - id: message_id_body - action_id: action_id_body - detail_id: detail_id_body - message_level: message_level_body - project_id: project_id_messages_body - request_id: request_id_body - resource_id: resource_id_body - resource_type: resource_type_body - message_members_links: message_members_links - expires_at : message_expires_at_body - created_at : created_at Response example ---------------- .. literalinclude:: samples/user-messages-list-response.json :language: javascript Show user message details ~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v2/messages/{message_id} .. versionadded:: 2.37 Shows details for a user message. Response codes -------------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - message_id: message_id Response parameters ------------------- .. rest_parameters:: parameters.yaml - id: message_id_body - action_id: action_id_body - detail_id: detail_id_body - message_level: message_level_body - project_id: project_id_messages_body - request_id: request_id_body - resource_id: resource_id_body - resource_type: resource_type_body - message_links: message_links - expires_at : message_expires_at_body - created_at : created_at Response example ---------------- .. literalinclude:: samples/user-message-show-response.json :language: javascript Delete message ~~~~~~~~~~~~~~ .. rest_method:: DELETE /v2/messages/{message_id} .. versionadded:: 2.37 Deletes a user message. Response codes -------------- .. rest_status_code:: success status.yaml - 202 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - message_id: message_id ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/api-ref/source/versions.inc0000664000175000017500000000605700000000000020254 0ustar00zuulzuul00000000000000.. -*- rst -*- API versions ============ Lists information for all Shared File Systems API versions. Concepts ~~~~~~~~ In order to bring new features to users over time, the Shared File Systems API supports versioning. There are two kinds of versions in the Shared File Systems API: - ''major versions'', which have dedicated URLs - ''microversions'', which can be requested through the use of the ``X-OpenStack-Manila-API-Version`` header Read more about microversion guidelines that the service adheres to `here `_ See `A history of the Shared File Systems API versions `_ to view the evolution of the API and pick an appropriate version for API requests. List All Major Versions ~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET / This fetches all the information about all known major API versions in the deployment. Links to more specific information will be provided for each API version, as well as information about supported min and max microversions. Response codes -------------- .. rest_status_code:: success status.yaml - 300 Response -------- .. rest_parameters:: parameters.yaml - versions: versions - id: version_id - updated: version_updated - status: version_status - links: links - media-types: version_media_types - version: version_max - min_version: version_min .. note:: The ``updated`` and ``media-types`` parameters in the response are vestigial and provide no useful information. They will probably be deprecated and removed in the future. Response Example ---------------- This demonstrates the expected response from a bleeding edge server that supports up to the current microversion. When querying OpenStack environments you will typically find the current microversion on the v2.1 API is lower than listed below. .. literalinclude:: samples/versions-index-response.json :language: javascript Show Details of Specific API Version ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /{api_version}/ This gets the details of a specific API at it's root. Nearly all this information exists at the API root, so this is mostly a redundant operation. Response codes -------------- .. rest_status_code:: success status.yaml - 200 Request ------- .. rest_parameters:: parameters.yaml - api_version: api_version Response -------- .. rest_parameters:: parameters.yaml - version: version - id: version_id - status: version_status - links: links - version: version_max - updated: version_updated - min_version: version_min - media-types: version_media_types .. note:: The ``updated`` and ``media-types`` parameters in the response are vestigial and provide no useful information. They will probably be deprecated and removed in the future. Response Example ---------------- This is an example of a ``GET /v2/`` on a relatively current server. .. literalinclude:: samples/versions-get-version-response.json :language: javascript ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/bindep.txt0000664000175000017500000000243400000000000015063 0ustar00zuulzuul00000000000000# This is a cross-platform list tracking distribution packages needed for # install and tests; # see https://docs.openstack.org/infra/bindep/ for additional information. build-essential [platform:dpkg test] gcc [platform:rpm test] # gettext and graphviz are needed by doc builds only. For transition, # have them in both doc and test. # TODO(jaegerandi): Remove test once infra scripts are updated. gettext [!platform:suse doc test] gettext-runtime [platform:suse doc test] graphviz [doc test] libffi-dev [platform:dpkg] libffi-devel [platform:redhat] libffi48-devel [platform:suse] virtual/libffi [platform:gentoo] libssl-dev [platform:dpkg] openssl-devel [platform:rpm !platform:suse] libopenssl-devel [platform:suse !platform:rpm] locales [platform:debian] mariadb [platform:rpm] mariadb-server [platform:redhat platform:debian] mariadb-devel [platform:redhat] libmariadb-dev-compat [platform:debian] libmysqlclient-dev [platform:ubuntu] libmysqlclient-devel [platform:suse] libpq-dev [platform:dpkg] mysql-client [platform:dpkg !platform:debian] mysql-server [platform:dpkg !platform:debian] postgresql postgresql-client [platform:dpkg] postgresql-devel [platform:rpm] postgresql-server [platform:rpm] libxml2-dev [platform:dpkg test] libxslt-devel [platform:rpm test] libxslt1-dev [platform:dpkg test] ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315601.6296754 manila-21.0.0/contrib/0000775000175000017500000000000000000000000014516 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315601.7576737 manila-21.0.0/contrib/ci/0000775000175000017500000000000000000000000015111 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/contrib/ci/post_test_hook.sh0000775000175000017500000002213700000000000020521 0ustar00zuulzuul00000000000000#!/bin/bash -xe # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # This script is executed inside post_test_hook function in devstack gate. # First argument ($1) expects 'multibackend' as value for setting appropriate # tempest conf opts, all other values will assume singlebackend installation. SCRIPT_IS_DEPRECATED="Manila's pre_test_hook and post_test_hook scripts are DEPRECATED. Please use alternate tools to configure devstack's local.conf file or run tempest tests" sudo chown -R $USER:stack $BASE/new/tempest sudo chown -R $USER:stack $BASE/data/tempest sudo chmod -R o+rx $BASE/new/devstack/files # Import devstack functions 'iniset', 'iniget' and 'trueorfalse' source $BASE/new/devstack/functions export TEMPEST_CONFIG=${TEMPEST_CONFIG:-$BASE/new/tempest/etc/tempest.conf} # === Handle script arguments === # First argument is expected to contain value equal either to 'singlebackend' # or 'multibackend' that defines how many back-ends are used. BACK_END_TYPE=$1 # Second argument is expected to have codename of a share driver. DRIVER=$2 # Third argument is expected to contain either 'api' or 'scenario' values # that define test suites to be run. TEST_TYPE=$3 # Fourth argument is expected to be boolean-like and it should be 'true' # when PostgreSQL DB back-end is used and 'false' when MySQL. POSTGRES_ENABLED=$4 POSTGRES_ENABLED=$(trueorfalse True POSTGRES_ENABLED) if [[ "$BACK_END_TYPE" == "multibackend" ]]; then iniset $TEMPEST_CONFIG share multi_backend True # Set share backends names, they are defined within pre_test_hook export BACKENDS_NAMES="LONDON,PARIS" else export BACKENDS_NAMES="LONDON" fi iniset $TEMPEST_CONFIG share backend_names $BACKENDS_NAMES # Set two retries for CI jobs iniset $TEMPEST_CONFIG share share_creation_retry_number 2 # Suppress errors in cleanup of resources SUPPRESS_ERRORS=${SUPPRESS_ERRORS_IN_CLEANUP:-True} iniset $TEMPEST_CONFIG share suppress_errors_in_cleanup $SUPPRESS_ERRORS USERNAME_FOR_USER_RULES=${USERNAME_FOR_USER_RULES:-"manila"} PASSWORD_FOR_SAMBA_USER=${PASSWORD_FOR_SAMBA_USER:-$USERNAME_FOR_USER_RULES} # Enable feature tests: # Default options are as specified in tempest. RUN_MANILA_QUOTA_TESTS=${RUN_MANILA_QUOTA_TESTS:-True} RUN_MANILA_SHRINK_TESTS=${RUN_MANILA_SHRINK_TESTS:-True} RUN_MANILA_SNAPSHOT_TESTS=${RUN_MANILA_SNAPSHOT_TESTS:-True} RUN_MANILA_REVERT_TO_SNAPSHOT_TESTS=${RUN_MANILA_REVERT_TO_SNAPSHOT_TESTS:-False} RUN_MANILA_SG_TESTS=${RUN_MANILA_SG_TESTS:-${RUN_MANILA_CG_TESTS:-True}} RUN_MANILA_MANAGE_TESTS=${RUN_MANILA_MANAGE_TESTS:-True} RUN_MANILA_MANAGE_SNAPSHOT_TESTS=${RUN_MANILA_MANAGE_SNAPSHOT_TESTS:-False} RUN_MANILA_REPLICATION_TESTS=${RUN_MANILA_REPLICATION_TESTS:-False} RUN_MANILA_HOST_ASSISTED_MIGRATION_TESTS=${RUN_MANILA_HOST_ASSISTED_MIGRATION_TESTS:-False} RUN_MANILA_DRIVER_ASSISTED_MIGRATION_TESTS=${RUN_MANILA_DRIVER_ASSISTED_MIGRATION_TESTS:-False} RUN_MANILA_MOUNT_SNAPSHOT_TESTS=${RUN_MANILA_MOUNT_SNAPSHOT_TESTS:-False} RUN_MANILA_MIGRATION_WITH_PRESERVE_SNAPSHOTS_TESTS=${RUN_MANILA_MIGRATION_WITH_PRESERVE_SNAPSHOTS_TESTS:-False} RUN_MANILA_IPV6_TESTS=${RUN_MANILA_IPV6_TESTS:-False} MANILA_CONF=${MANILA_CONF:-/etc/manila/manila.conf} # Capabilitities CAPABILITY_CREATE_SHARE_FROM_SNAPSHOT_SUPPORT=${CAPABILITY_CREATE_SHARE_FROM_SNAPSHOT_SUPPORT:-True} MANILA_CONFIGURE_DEFAULT_TYPES=${MANILA_CONFIGURE_DEFAULT_TYPES:-True} if [[ -z "$MULTITENANCY_ENABLED" ]]; then # Define whether share drivers handle share servers or not. # Requires defined config option 'driver_handles_share_servers'. NO_SHARE_SERVER_HANDLING_MODES=0 WITH_SHARE_SERVER_HANDLING_MODES=0 # Convert backend names to config groups using lowercase translation CONFIG_GROUPS=${BACKENDS_NAMES,,} for CG in ${CONFIG_GROUPS//,/ }; do DRIVER_HANDLES_SHARE_SERVERS=$(iniget $MANILA_CONF $CG driver_handles_share_servers) if [[ $DRIVER_HANDLES_SHARE_SERVERS == False ]]; then NO_SHARE_SERVER_HANDLING_MODES=$((NO_SHARE_SERVER_HANDLING_MODES+1)) elif [[ $DRIVER_HANDLES_SHARE_SERVERS == True ]]; then WITH_SHARE_SERVER_HANDLING_MODES=$((WITH_SHARE_SERVER_HANDLING_MODES+1)) else echo "Config option 'driver_handles_share_servers' either is not defined or \ defined with improper value - '$DRIVER_HANDLES_SHARE_SERVERS'." exit 1 fi done if [[ $NO_SHARE_SERVER_HANDLING_MODES -ge 1 && $WITH_SHARE_SERVER_HANDLING_MODES -ge 1 || \ $NO_SHARE_SERVER_HANDLING_MODES -eq 0 && $WITH_SHARE_SERVER_HANDLING_MODES -eq 0 ]]; then echo 'Allowed only same driver modes for all backends to be run with Tempest job.' exit 1 elif [[ $NO_SHARE_SERVER_HANDLING_MODES -ge 1 ]]; then MULTITENANCY_ENABLED='False' elif [[ $WITH_SHARE_SERVER_HANDLING_MODES -ge 1 ]]; then MULTITENANCY_ENABLED='True' else echo 'Should never get here unless an error occurred.' exit 1 fi else MULTITENANCY_ENABLED=$(trueorfalse True MULTITENANCY_ENABLED) fi # Set multitenancy configuration for Tempest iniset $TEMPEST_CONFIG share multitenancy_enabled $MULTITENANCY_ENABLED if [[ "$MULTITENANCY_ENABLED" == "False" ]]; then # Using approach without handling of share servers we have bigger load for # volume creation in Cinder using Generic driver. So, reduce amount of # threads to avoid errors for Cinder volume creations that appear # because of lack of free space. MANILA_TEMPEST_CONCURRENCY=${MANILA_TEMPEST_CONCURRENCY:-8} iniset $TEMPEST_CONFIG auth create_isolated_networks False fi # let us control if we die or not set +o errexit cd $BASE/new/tempest export MANILA_TEMPEST_CONCURRENCY=${MANILA_TEMPEST_CONCURRENCY:-6} export MANILA_TESTS=${MANILA_TESTS:-'manila_tempest_tests.tests.api'} # Enable quota tests iniset $TEMPEST_CONFIG share run_quota_tests $RUN_MANILA_QUOTA_TESTS # Enable shrink tests iniset $TEMPEST_CONFIG share run_shrink_tests $RUN_MANILA_SHRINK_TESTS # Enable snapshot tests iniset $TEMPEST_CONFIG share run_snapshot_tests $RUN_MANILA_SNAPSHOT_TESTS # Enable revert to snapshot tests iniset $TEMPEST_CONFIG share run_revert_to_snapshot_tests $RUN_MANILA_REVERT_TO_SNAPSHOT_TESTS # Enable share group tests iniset $TEMPEST_CONFIG share run_share_group_tests $RUN_MANILA_SG_TESTS # Enable manage/unmanage tests iniset $TEMPEST_CONFIG share run_manage_unmanage_tests $RUN_MANILA_MANAGE_TESTS # Enable manage/unmanage snapshot tests iniset $TEMPEST_CONFIG share run_manage_unmanage_snapshot_tests $RUN_MANILA_MANAGE_SNAPSHOT_TESTS # Enable replication tests iniset $TEMPEST_CONFIG share run_replication_tests $RUN_MANILA_REPLICATION_TESTS # Enable migration tests iniset $TEMPEST_CONFIG share run_host_assisted_migration_tests $RUN_MANILA_HOST_ASSISTED_MIGRATION_TESTS iniset $TEMPEST_CONFIG share run_driver_assisted_migration_tests $RUN_MANILA_DRIVER_ASSISTED_MIGRATION_TESTS iniset $TEMPEST_CONFIG share run_migration_with_preserve_snapshots_tests $RUN_MANILA_MIGRATION_WITH_PRESERVE_SNAPSHOTS_TESTS # Enable mountable snapshots tests iniset $TEMPEST_CONFIG share run_mount_snapshot_tests $RUN_MANILA_MOUNT_SNAPSHOT_TESTS # Create share from snapshot support iniset $TEMPEST_CONFIG share capability_create_share_from_snapshot_support $CAPABILITY_CREATE_SHARE_FROM_SNAPSHOT_SUPPORT iniset $TEMPEST_CONFIG validation ip_version_for_ssh 4 iniset $TEMPEST_CONFIG validation network_for_ssh ${PRIVATE_NETWORK_NAME:-"private"} if [ $(trueorfalse False MANILA_CONFIGURE_DEFAULT_TYPES) == True ]; then iniset $TEMPEST_CONFIG share default_share_type_name ${MANILA_DEFAULT_SHARE_TYPE:-default} fi ADMIN_DOMAIN_NAME=${ADMIN_DOMAIN_NAME:-"Default"} export OS_PROJECT_DOMAIN_NAME=$ADMIN_DOMAIN_NAME export OS_USER_DOMAIN_NAME=$ADMIN_DOMAIN_NAME source $BASE/new/devstack/openrc admin admin public_net_id=$(openstack network list --name $PUBLIC_NETWORK_NAME -f value -c ID ) iniset $TEMPEST_CONFIG network public_network_id $public_net_id # Set config to run IPv6 tests according to env var iniset $TEMPEST_CONFIG share run_ipv6_tests $RUN_MANILA_IPV6_TESTS if ! [[ -z "$OVERRIDE_IP_FOR_NFS_ACCESS" ]]; then # Set config to use specified IP as access rule on NFS scenario tests # in order to workaround multiple NATs between the VMs and the storage # controller iniset $TEMPEST_CONFIG share override_ip_for_nfs_access $OVERRIDE_IP_FOR_NFS_ACCESS fi echo "Manila service details" source $BASE/new/devstack/openrc admin admin manila service-list echo $SCRIPT_IS_DEPRECATED echo "Running tempest manila test suites" cd $BASE/new/tempest/ # List plugins in logs to enable debugging sudo -H -u $USER tempest list-plugins sudo -H -u $USER tempest run -r $MANILA_TESTS --concurrency=$MANILA_TEMPEST_CONCURRENCY RETVAL=$? cd - exit $RETVAL ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/contrib/ci/pre_test_hook.sh0000775000175000017500000001212300000000000020314 0ustar00zuulzuul00000000000000#!/bin/bash -xe # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # This script is executed inside pre_test_hook function in devstack gate. # First argument ($1) expects boolean as value where: # 'False' means share driver will not handle share servers # 'True' means it will handle share servers. # Import devstack function 'trueorfalse', 'deprecated' source $BASE/new/devstack/functions deprecated "Manila's pre_test_hook and post_test_hook scripts are DEPRECATED. Please use alternate tools to configure devstack's local.conf file" localconf=$BASE/new/devstack/local.conf echo "[[local|localrc]]" >> $localconf echo "DEVSTACK_GATE_TEMPEST_ALLOW_TENANT_ISOLATION=1" >> $localconf echo "API_RATE_LIMIT=False" >> $localconf echo "VOLUME_BACKING_FILE_SIZE=22G" >> $localconf echo "CINDER_LVM_TYPE=thin" >> $localconf # Set DevStack's PYTHON3_VERSION variable if CI scripts specify it if [[ ! -z "$PYTHON3_VERSION" ]]; then echo "PYTHON3_VERSION=$PYTHON3_VERSION" >> $localconf fi # NOTE(mkoderer): switch to keystone v3 by default echo "IDENTITY_API_VERSION=3" >> $localconf # NOTE(vponomaryov): Set oversubscription ratio for Cinder LVM driver # bigger than 1.0, because in CI we do not need such small value. # It will allow us to avoid exceeding real capacity in CI test runs. echo "CINDER_OVERSUBSCRIPTION_RATIO=100.0" >> $localconf echo "MANILA_ENABLED_BACKENDS=london,paris" >> $localconf echo "MANILA_ALLOW_NAS_SERVER_PORTS_ON_HOST=${MANILA_ALLOW_NAS_SERVER_PORTS_ON_HOST:=False}" >> $localconf # === Handle script arguments === # First argument is expected to be a boolean-like value for DHSS. DHSS=$1 DHSS=$(trueorfalse True DHSS) # Second argument is expected to have codename of a share driver. DRIVER=$2 # Third argument is expected to contain value equal either to 'singlebackend' # or 'multibackend' that defines how many back-ends should be configured. BACK_END_TYPE=$3 echo "MANILA_OPTGROUP_london_driver_handles_share_servers=$DHSS" >> $localconf echo "MANILA_OPTGROUP_paris_driver_handles_share_servers=$DHSS" >> $localconf echo "MANILA_USE_SERVICE_INSTANCE_PASSWORD=True" >> $localconf echo "MANILA_USE_DOWNGRADE_MIGRATIONS=True" >> $localconf # Set MANILA_ADMIN_NET_RANGE for admin_network and data_service IP echo "MANILA_ADMIN_NET_RANGE=${MANILA_ADMIN_NET_RANGE:=10.2.5.0/24}" >> $localconf echo "MANILA_DATA_NODE_IP=${MANILA_DATA_NODE_IP:=$MANILA_ADMIN_NET_RANGE}" >> $localconf echo "MANILA_DATA_COPY_CHECK_HASH=${MANILA_DATA_COPY_CHECK_HASH:=True}" >> $localconf # Share Migration CI tests migration_continue period task interval echo "MANILA_SHARE_MIGRATION_PERIOD_TASK_INTERVAL=${MANILA_SHARE_MIGRATION_PERIOD_TASK_INTERVAL:=1}" >> $localconf # Share Server Migration CI tests migration_continue period task interval echo "MANILA_SERVER_MIGRATION_PERIOD_TASK_INTERVAL=${MANILA_SERVER_MIGRATION_PERIOD_TASK_INTERVAL:=10}" >> $localconf MANILA_SERVICE_IMAGE_ENABLED=${MANILA_SERVICE_IMAGE_ENABLED:-False} DEFAULT_EXTRA_SPECS=${DEFAULT_EXTRA_SPECS:-"'snapshot_support=True create_share_from_snapshot_support=True'"} if [[ "$DRIVER" == "windows" ]]; then MANILA_SERVICE_IMAGE_ENABLED=True echo "SHARE_DRIVER=manila.share.drivers.windows.windows_smb_driver.WindowsSMBDriver" >> $localconf fi echo "MANILA_SERVICE_IMAGE_ENABLED=$MANILA_SERVICE_IMAGE_ENABLED" >> $localconf if [[ "$MANILA_SERVICE_IMAGE_ENABLED" == True ]]; then echo "MANILA_SERVICE_IMAGE_URL=$MANILA_SERVICE_IMAGE_URL" >> $localconf echo "MANILA_SERVICE_IMAGE_NAME=$MANILA_SERVICE_IMAGE_NAME" >> $localconf fi echo "MANILA_DEFAULT_SHARE_TYPE_EXTRA_SPECS=$DEFAULT_EXTRA_SPECS" >> $localconf echo "MANILA_CONFIGURE_DEFAULT_TYPES=${MANILA_CONFIGURE_DEFAULT_TYPES:-True}" >> $localconf # Enabling isolated metadata in Neutron is required because # Tempest creates isolated networks and created vm's in scenario tests don't # have access to Nova Metadata service. This leads to unavailability of # created vm's in scenario tests. echo "ENABLE_ISOLATED_METADATA=True" >> $localconf echo "TEMPEST_USE_TEST_ACCOUNTS=True" >> $localconf echo "TEMPEST_ALLOW_TENANT_ISOLATION=False" >> $localconf echo "TEMPEST_CONCURRENCY=${MANILA_TEMPEST_CONCURRENCY:-8}" >> $localconf MANILA_SETUP_IPV6=${MANILA_SETUP_IPV6:-False} echo "MANILA_SETUP_IPV6=${MANILA_SETUP_IPV6}" >> $localconf if [[ "$MANILA_SETUP_IPV6" == True ]]; then # When setting up proper IPv6 networks, we should do it ourselves so we can # use Neutron Dynamic Routing plugin with address scopes instead of the # regular Neutron DevStack configuration. echo "NEUTRON_CREATE_INITIAL_NETWORKS=False" >> $localconf echo "IP_VERSION=4+6" >> $localconf echo "MANILA_RESTORE_IPV6_DEFAULT_ROUTE=False" >> $localconf fi ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315601.7576737 manila-21.0.0/contrib/share_driver_hooks/0000775000175000017500000000000000000000000020376 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/contrib/share_driver_hooks/README.rst0000664000175000017500000000626700000000000022100 0ustar00zuulzuul00000000000000Manila mount automation example using share driver hooks feature ================================================================ Manila has feature called 'share driver hooks'. Which allows to perform actions before and after driver actions such as 'create share' or 'access allow', also allows to do custom things on periodic basis. Here, we provide example of mount automation using this feature. This example uses OpenStack Zaqar project for sending notifications when operations 'access allow' and 'access deny' are performed. Server side hook will send notifications about changed access for shares after granting and prior to denying access. Possibilities of the mount automation example (consumer) -------------------------------------------------------- - Supports only 'NFS' protocol. - Supports only 'IP' rules. - Supports both levels of access - 'RW' and 'RO'. - Consume interval can be configured. - Allows to choose parent mount directory. Server side setup and run ------------------------- 1. Place files 'zaqarclientwrapper.py' and 'zaqar_notification.py' to dir %manila_dir%/manila/share/hooks. Then update manila configuration file with following options: :: [share_backend_config_group] hook_drivers = manila.share.hooks.zaqar_notification.ZaqarNotification enable_pre_hooks = True enable_post_hooks = True enable_periodic_hooks = False [zaqar] zaqar_auth_url = http://%ip_of_endpoint_with_keystone%:35357/v2.0/ zaqar_region_name = %name_of_region_optional% zaqar_username = foo_user zaqar_password = foo_tenant zaqar_project_name = foo_password zaqar_queues = manila_notification 2. Restart manila-share service. Consumer side setup and run --------------------------- 1. Place files 'zaqarclientwrapper.py' and 'zaqar_notification_example_consumer.py' to any dir on user machine, but they both should be in the same dir. 2. Make sure that following dependencies are installed: - PIP dependencies: - netaddr - oslo_concurrency - oslo_config - oslo_utils - python-zaqarclient - System libs that install 'mount' and 'mount.nfs' apps. 3. Create file with following options: :: [zaqar] # Consumer-related options sleep_between_consume_attempts = 7 mount_dir = "/tmp" expected_ip_addresses = 10.254.0.4 # Common options for consumer and server sides zaqar_auth_url = http://%ip_of_endpoint_with_keystone%:35357/v2.0/ zaqar_region_name = %name_of_region_optional% zaqar_username = foo_user zaqar_password = foo_tenant zaqar_project_name = foo_password zaqar_queues = manila_notification Consumer options descriptions: - 'sleep_between_consume_attempts' - wait interval between consuming notifications from message queue. - 'mount_dir' - parent mount directory that will contain all mounted shares as subdirectories. - 'expected_ip_addresses' - list of IP addresses that are expected to be granted access for. Could be either equal to or be part of a CIDR. Match triggers [un]mount operations. 4. Run consumer with following command: :: $ zaqar_notification_example_consumer.py --config-file path/to/config.conf 5. Now create NFS share and grant IP access to consumer by its IP address. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/contrib/share_driver_hooks/zaqar_notification.py0000664000175000017500000001147500000000000024644 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Mirantis, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log from oslo_utils import timeutils from manila import exception from manila.share import api from manila.share import hook from manila.share.hooks import zaqarclientwrapper # noqa CONF = zaqarclientwrapper.CONF LOG = log.getLogger(__name__) ZAQARCLIENT = zaqarclientwrapper.ZAQARCLIENT class ZaqarNotification(hook.HookBase): share_api = api.API() def _access_changed_trigger(self, context, func_name, access_rules_ids, share_instance_id): access = [self.db.share_access_get(context, rule_id) for rule_id in access_rules_ids] share_instance = self.db.share_instance_get(context, share_instance_id) share = self.share_api.get(context, share_id=share_instance.share_id) def rules_view(rules): result = [] for rule in rules: access_instance = None for ins in rule.instance_mappings: if ins.share_instance_id == share_instance_id: access_instance = ins break else: raise exception.InstanceNotFound( instance_id=share_instance_id) result.append({ 'access_id': rule.id, 'access_instance_id': access_instance.id, 'access_type': rule.access_type, 'access_to': rule.access_to, 'access_level': rule.access_level, }) return result is_allow_operation = 'allow' in func_name results = { 'share_id': share.share_id, 'share_instance_id': share_instance_id, 'export_locations': [ el.path for el in share_instance.export_locations], 'share_proto': share.share_proto, 'access_rules': rules_view(access), 'is_allow_operation': is_allow_operation, 'availability_zone': share_instance.availability_zone, } LOG.debug(results) return results def _execute_pre_hook(self, context, func_name, *args, **kwargs): LOG.debug("\n PRE zaqar notification has been called for " "method '%s'.\n", func_name) if func_name == "deny_access": LOG.debug("\nSending notification about denied access.\n") data = self._access_changed_trigger( context, func_name, kwargs.get('access_rules'), kwargs.get('share_instance_id'), ) self._send_notification(data) def _execute_post_hook(self, context, func_name, pre_hook_data, driver_action_results, *args, **kwargs): LOG.debug("\n POST zaqar notification has been called for " "method '%s'.\n", func_name) if func_name == "allow_access": LOG.debug("\nSending notification about allowed access.\n") data = self._access_changed_trigger( context, func_name, kwargs.get('access_rules'), kwargs.get('share_instance_id'), ) self._send_notification(data) def _send_notification(self, data): for queue_name in CONF.zaqar.zaqar_queues: ZAQARCLIENT.queue_name = queue_name message = { "body": { "example_message": ( "message generated at '%s'" % timeutils.utcnow()), "data": data, } } LOG.debug( "\n Sending message %(m)s to '%(q)s' queue using '%(u)s' user " "and '%(p)s' project.", { 'm': message, 'q': queue_name, 'u': CONF.zaqar.zaqar_username, 'p': CONF.zaqar.zaqar_project_name, } ) queue = ZAQARCLIENT.queue(queue_name) queue.post(message) def _execute_periodic_hook(self, context, periodic_hook_data, *args, **kwargs): LOG.debug("Periodic zaqar notification has been called. (Placeholder)") ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/contrib/share_driver_hooks/zaqar_notification_example_consumer.py0000775000175000017500000001653100000000000030273 0ustar00zuulzuul00000000000000#!/usr/bin/env python3 # # Copyright (c) 2015 Mirantis, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import pprint import signal import sys import time import netaddr from oslo_concurrency import processutils from oslo_config import cfg from oslo_utils import timeutils opts = [ cfg.IntOpt( "consume_interval", default=5, deprecated_name="sleep_between_consume_attempts", help=("Time that script will sleep between requests for consuming " "Zaqar messages in seconds."), ), cfg.StrOpt( "mount_dir", default="/tmp", help="Directory that will contain all mounted shares." ), cfg.ListOpt( "expected_ip_addresses", default=[], help=("List of IP addresses that are expected to be found in access " "rules to trigger [un]mount operation for a share.") ), ] CONF = cfg.CONF def print_with_time(data): time = str(timeutils.utcnow()) print(time + " " + str(data)) def print_pretty_dict(d): pprint.pprint(d) def pop_zaqar_messages(client, queues_names): if not isinstance(queues_names, (list, set, tuple)): queues_names = (queues_names, ) try: user = client.conf['auth_opts']['options']['os_username'] project = client.conf['auth_opts']['options']['os_project_name'] messages = [] for queue_name in queues_names: queue = client.queue(queue_name) messages.extend([str(m.body) for m in queue.pop()]) print_with_time( "Received %(len)s message[s] from '%(q)s' " "queue using '%(u)s' user and '%(p)s' project." % { 'len': len(messages), 'q': queue_name, 'u': user, 'p': project, } ) return messages except Exception as e: print_with_time("Caught exception - %s" % e) return [] def signal_handler(signal, frame): print("") print_with_time("Ctrl+C was pressed. Shutting down consumer.") sys.exit(0) def parse_str_to_dict(string): if not isinstance(string, str): return string result = eval(string) return result def handle_message(data): """Handles consumed message. Expected structure of a message is following: {'data': { 'access_rules': [ { 'access_id': u'b28268b9-36c6-40d3-a485-22534077328f', 'access_instance_id': u'd137b2cb-f549-4141-9dd7-36b2789fb973', 'access_level': u'rw', 'access_state': u'active', 'access_to': u'7.7.7.7', 'access_type': u'ip', } ], 'availability_zone': u'nova', 'export_locations': [u'127.0.0.1:/path/to/nfs/share'], 'is_allow_operation': True, 'share_id': u'053eae9a-726f-4f7e-8502-49d7b1adf290', 'share_instance_id': u'dc33e554-e0b9-40f5-9046-c198716d73a0', 'share_proto': u'NFS' }} """ if 'data' in data.keys(): data = data['data'] valid_access = ( 'access_rules' in data and len(data['access_rules']) == 1 and data['access_rules'][0].get('access_type', '?').lower() == 'ip' and data.get('share_proto', '?').lower() == 'nfs' ) if valid_access: is_allow_operation = data['is_allow_operation'] export_location = data['export_locations'][0] if is_allow_operation: mount_share(export_location, data['access_to']) else: unmount_share(export_location, data['access_to']) else: print_with_time('Do nothing with above message.') def execute(cmd): try: print_with_time('Executing following command: \n%s' % cmd) cmd = cmd.split() stdout, stderr = processutils.execute(*cmd) if stderr: print_with_time('Got error: %s' % stderr) return stdout, stderr except Exception as e: print_with_time('Got following error: %s' % e) return False, True def is_share_mounted(mount_point): mounts, stderr = execute('mount') return mount_point in mounts def rule_affects_me(ip_or_cidr): if '/' in ip_or_cidr: net = netaddr.IPNetwork(ip_or_cidr) for my_ip in CONF.zaqar.expected_ip_addresses: if netaddr.IPAddress(my_ip) in net: return True else: for my_ip in CONF.zaqar.expected_ip_addresses: if my_ip == ip_or_cidr: return True return False def mount_share(export_location, access_to): data = { 'mount_point': os.path.join(CONF.zaqar.mount_dir, export_location.split('/')[-1]), 'export_location': export_location, } if (rule_affects_me(access_to) and not is_share_mounted(data['mount_point'])): print_with_time( "Mounting '%(export_location)s' share to %(mount_point)s.") execute('sudo mkdir -p %(mount_point)s' % data) stdout, stderr = execute( 'sudo mount.nfs %(export_location)s %(mount_point)s' % data) if stderr: print_with_time("Mount operation failed.") else: print_with_time("Mount operation went OK.") def unmount_share(export_location, access_to): if rule_affects_me(access_to) and is_share_mounted(export_location): print_with_time("Unmounting '%(export_location)s' share.") stdout, stderr = execute('sudo umount %s' % export_location) if stderr: print_with_time("Unmount operation failed.") else: print_with_time("Unmount operation went OK.") def main(): # Register other local modules cur = os.path.dirname(__file__) pathtest = os.path.join(cur) sys.path.append(pathtest) # Init configuration CONF(sys.argv[1:], project="manila_notifier", version=1.0) CONF.register_opts(opts, group="zaqar") # Import common config and Zaqar client import zaqarclientwrapper # Handle SIGINT signal.signal(signal.SIGINT, signal_handler) # Run consumer print_with_time("Consumer was successfully run.") while True: messages = pop_zaqar_messages( zaqarclientwrapper.ZAQARCLIENT, CONF.zaqar.zaqar_queues) if not messages: message = ("No new messages in '%s' queue[s] " "found." % ','.join(CONF.zaqar.zaqar_queues)) else: message = "Got following messages:" print_with_time(message) for message in messages: message = parse_str_to_dict(message) print_pretty_dict(message) handle_message(message) time.sleep(CONF.zaqar.consume_interval) if __name__ == '__main__': main() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/contrib/share_driver_hooks/zaqarclientwrapper.py0000664000175000017500000000537000000000000024673 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Mirantis, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from zaqarclient.queues import client as zaqar zaqar_notification_opts = [ cfg.StrOpt( "zaqar_username", help="Username that should be used for init of zaqar client.", ), cfg.StrOpt( "zaqar_password", secret=True, help="Password for user specified in opt 'zaqar_username'.", ), cfg.StrOpt( "zaqar_project_name", help=("Project/Tenant name that is owns user specified " "in opt 'zaqar_username'."), ), cfg.StrOpt( "zaqar_auth_url", default="http://127.0.0.1:35357/v2.0/", help="Auth url to be used by Zaqar client.", ), cfg.StrOpt( "zaqar_region_name", help="Name of the region that should be used. Optional.", ), cfg.StrOpt( "zaqar_service_type", default="messaging", help="Service type for Zaqar. Optional.", ), cfg.StrOpt( "zaqar_endpoint_type", default="publicURL", help="Type of endpoint to be used for init of Zaqar client. Optional.", ), cfg.FloatOpt( "zaqar_api_version", default=1.1, help="Version of Zaqar API to use. Optional.", ), cfg.ListOpt( "zaqar_queues", default=["manila_notification_qeueue"], help=("List of queues names to be used for sending Manila " "notifications. Optional."), ), ] CONF = cfg.CONF CONF.register_opts(zaqar_notification_opts, group='zaqar') ZAQARCLIENT = zaqar.Client( version=CONF.zaqar.zaqar_api_version, conf={ "auth_opts": { "backend": "keystone", "options": { "os_username": CONF.zaqar.zaqar_username, "os_password": CONF.zaqar.zaqar_password, "os_project_name": CONF.zaqar.zaqar_project_name, "os_auth_url": CONF.zaqar.zaqar_auth_url, "os_region_name": CONF.zaqar.zaqar_region_name, "os_service_type": CONF.zaqar.zaqar_service_type, "os_endpoint_type": CONF.zaqar.zaqar_endpoint_type, "insecure": True, }, }, }, ) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315601.7576737 manila-21.0.0/devstack/0000775000175000017500000000000000000000000014662 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/devstack/README.rst0000664000175000017500000000127400000000000016355 0ustar00zuulzuul00000000000000====================== Enabling in Devstack ====================== We can enable the manila service in DevStack. For details, please refer to `development-environment-devstack`_, the following steps can be used as a quickstart reference: 1. Download DevStack 2. Add this repo as an external repository:: > cat local.conf [[local|localrc]] # Enable manila enable_plugin manila https://opendev.org/openstack/manila # Enable manila ui in the dashboard enable_plugin manila-ui https://opendev.org/openstack/manila-ui 3. run ``stack.sh`` .. _development-environment-devstack: https://docs.openstack.org/manila/latest/contributor/development-environment-devstack.html ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/devstack/apache-manila.template0000664000175000017500000000147500000000000021106 0ustar00zuulzuul00000000000000Listen %PORT% LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-agent}i\" %D(us)" manila_combined WSGIDaemonProcess manila-api processes=%APIWORKERS% threads=2 user=%USER% display-name=%{GROUP} WSGIProcessGroup manila-api WSGIScriptAlias / %MANILA_BIN_DIR%/manila-wsgi WSGIApplicationGroup %{GLOBAL} WSGIPassAuthorization On = 2.4> ErrorLogFormat "%{cu}t %M" ErrorLog /var/log/%APACHE_NAME%/manila_api.log CustomLog /var/log/%APACHE_NAME%/manila_api_access.log manila_combined = 2.4> Require all granted Order allow,deny Allow from all ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315601.6296754 manila-21.0.0/devstack/files/0000775000175000017500000000000000000000000015764 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315601.7576737 manila-21.0.0/devstack/files/debs/0000775000175000017500000000000000000000000016701 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/devstack/files/debs/manila0000664000175000017500000000000500000000000020060 0ustar00zuulzuul00000000000000lvm2 ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315601.7576737 manila-21.0.0/devstack/files/rpms/0000775000175000017500000000000000000000000016745 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/devstack/files/rpms/manila0000664000175000017500000000000500000000000020124 0ustar00zuulzuul00000000000000lvm2 ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315601.7576737 manila-21.0.0/devstack/files/rpms-suse/0000775000175000017500000000000000000000000017722 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/devstack/files/rpms-suse/manila0000664000175000017500000000000500000000000021101 0ustar00zuulzuul00000000000000lvm2 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/devstack/plugin.sh0000775000175000017500000015645300000000000016535 0ustar00zuulzuul00000000000000#!/bin/bash # Plugin file for enabling manila services # ---------------------------------------- # Save trace setting XTRACE=$(set +o | grep xtrace) set -o xtrace # Entry Points # ------------ function _clean_share_group { local vg=$1 local vg_prefix=$2 # Clean out existing shares for lv in `sudo lvs --noheadings -o lv_name $vg`; do # vg_prefix prefixes the LVs we want if [[ "${lv#$vg_prefix}" != "$lv" ]]; then sudo umount -f $MANILA_MNT_DIR/$lv sudo lvremove -f $vg/$lv sudo rm -rf $MANILA_MNT_DIR/$lv fi done } function _clean_manila_lvm_backing_file { local vg=$1 # if there is no logical volume left, it's safe to attempt a cleanup # of the backing file if [ -z "`sudo lvs --noheadings -o lv_name $vg`" ]; then # if the backing physical device is a loop device, it was probably setup by devstack VG_DEV=$(sudo losetup -j $DATA_DIR/${vg}-backing-file | awk -F':' '/backing-file/ { print $1 }') if [[ -n "$VG_DEV" ]]; then sudo losetup -d $VG_DEV rm -f $DATA_DIR/${vg}-backing-file fi fi } function _clean_zfsonlinux_data { for filename in "$MANILA_ZFSONLINUX_BACKEND_FILES_CONTAINER_DIR"/*; do if [[ $(sudo zpool list | grep $filename) ]]; then echo "Destroying zpool named $filename" sudo zpool destroy -f $filename file="$MANILA_ZFSONLINUX_BACKEND_FILES_CONTAINER_DIR$filename" echo "Destroying file named $file" rm -f $file fi done } function _clean_ip_tables { for ipcmd in iptables ip6tables; do # cleanup rules in the "manila-storage" chain sudo $ipcmd -S -v | sed "s/-c [0-9]* [0-9]* //g" | \ grep "manila-storage" | grep "\-A" | sed "s/-A/-D/g" | \ awk -v ipcmd="$ipcmd" '{print "sudo " ipcmd,$0}' | bash # cleanup the "manila-storage" chain sudo $ipcmd -S -v | sed "s/-c [0-9]* [0-9]* //g" | \ grep "manila-storage" | grep "\-N" | sed "s/-N/-X/g" | \ awk -v ipcmd="$ipcmd" '{print "sudo " ipcmd,$0}' | bash done } # cleanup_manila - Remove residual data files, anything left over from previous # runs that a clean run would need to clean up function cleanup_manila { # All stuff, that are created by share drivers will be cleaned up by other services. _clean_share_group $SHARE_GROUP $SHARE_NAME_PREFIX _clean_manila_lvm_backing_file $SHARE_GROUP _clean_zfsonlinux_data _clean_ip_tables remove_uwsgi_config "$MANILA_UWSGI_CONF" "$MANILA_WSGI" } # configure_backends - Configures backends enabled by MANILA_ENABLED_BACKENDS function configure_backends { # Configure MANILA_ENABLED_BACKENDS backends for group_name in $(echo $MANILA_ENABLED_BACKENDS | sed "s/,/ /g"); do iniset $MANILA_CONF $group_name share_driver $SHARE_DRIVER iniset $MANILA_CONF $group_name share_backend_name ${group_name^^} iniset $MANILA_CONF $group_name path_to_public_key $MANILA_PATH_TO_PUBLIC_KEY iniset $MANILA_CONF $group_name path_to_private_key $MANILA_PATH_TO_PRIVATE_KEY iniset $MANILA_CONF $group_name service_image_name $MANILA_SERVICE_IMAGE_NAME iniset $MANILA_CONF $group_name service_instance_user $MANILA_SERVICE_INSTANCE_USER iniset $MANILA_CONF $group_name driver_handles_share_servers True if [ "$SHARE_DRIVER" == $MANILA_CONTAINER_DRIVER ]; then iniset $MANILA_CONF $group_name network_api_class $MANILA_NETWORK_API_CLASS iniset $MANILA_CONF $group_name neutron_host_id $(hostname) iniset $MANILA_CONF $group_name neutron_vnic_type $MANILA_NEUTRON_VNIC_TYPE fi if [ $(trueorfalse False MANILA_USE_SERVICE_INSTANCE_PASSWORD) == True ]; then iniset $MANILA_CONF $group_name service_instance_password $MANILA_SERVICE_INSTANCE_PASSWORD fi if [ "$SHARE_DRIVER" == "manila.share.drivers.generic.GenericShareDriver" ]; then iniset $MANILA_CONF $group_name ssh_conn_timeout $MANILA_SSH_TIMEOUT fi done } # set_config_opts - this allows to set any config opt to any config group, # parsing env vars by prefix special 'MANILA_OPTGROUP_'. function set_config_opts { # expects only one param - name of config group(s) as list separated by commas GROUP_NAMES=$1 if [[ -n "$GROUP_NAMES" ]]; then for be in ${GROUP_NAMES//,/ }; do # get backend_specific opt values prefix=MANILA_OPTGROUP_$be\_ ( set -o posix ; set ) | grep ^$prefix | while read -r line ; do # parse it to opt names and values opt=${line#$prefix} opt_name=${opt%%=*} opt_value=${opt##*=} iniset $MANILA_CONF $be $opt_name $opt_value done done fi } # set_cinder_quotas - Sets Cinder quotas, that is useful for generic driver, # which uses Cinder volumes and snapshots. function set_cinder_quotas { # Update Cinder configuration to make sure default quotas are enough # for Manila using Generic driver with parallel testing. if is_service_enabled cinder; then if [[ ! "$CINDER_CONF" ]]; then CINDER_CONF=/etc/cinder/cinder.conf fi iniset $CINDER_CONF DEFAULT quota_volumes 50 iniset $CINDER_CONF DEFAULT quota_snapshots 50 iniset $CINDER_CONF DEFAULT quota_gigabytes 1000 fi } function set_backend_availability_zones { ENABLED_BACKENDS=$1 echo_summary "Setting up backend_availability_zone option \ for any enabled backends that do not use the Generic driver and have \ not been set previously. Availability zones for the Generic driver \ must coincide with those created for Nova and Cinder." local zonenum generic_driver='manila.share.drivers.generic.GenericShareDriver' for BE in ${ENABLED_BACKENDS//,/ }; do share_driver=$(iniget $MANILA_CONF $BE share_driver) az=$(iniget $MANILA_CONF $BE backend_availability_zone) if [[ -z $az && $share_driver != $generic_driver ]]; then zone="manila-zone-$((zonenum++))" iniset $MANILA_CONF $BE backend_availability_zone $zone fi done } # configure_manila - Set config files, create data dirs, etc function configure_manila { if [[ ! -d $MANILA_CONF_DIR ]]; then sudo mkdir -p $MANILA_CONF_DIR fi sudo chown $STACK_USER $MANILA_CONF_DIR # Set the paths of certain binaries MANILA_ROOTWRAP=$(get_rootwrap_location manila) # If Manila ships the new rootwrap filters files, deploy them # (owned by root) and add a parameter to $MANILA_ROOTWRAP ROOTWRAP_MANILA_SUDOER_CMD="$MANILA_ROOTWRAP" if [[ -d $MANILA_DIR/etc/manila/rootwrap.d ]]; then # Wipe any existing rootwrap.d files first if [[ -d $MANILA_CONF_DIR/rootwrap.d ]]; then sudo rm -rf $MANILA_CONF_DIR/rootwrap.d fi # Deploy filters to /etc/manila/rootwrap.d sudo mkdir -m 755 $MANILA_CONF_DIR/rootwrap.d sudo cp $MANILA_DIR/etc/manila/rootwrap.d/*.filters $MANILA_CONF_DIR/rootwrap.d sudo chown -R root:root $MANILA_CONF_DIR/rootwrap.d sudo chmod 644 $MANILA_CONF_DIR/rootwrap.d/* # Set up rootwrap.conf, pointing to /etc/manila/rootwrap.d sudo cp $MANILA_DIR/etc/manila/rootwrap.conf $MANILA_CONF_DIR/ sudo sed -e "s:^filters_path=.*$:filters_path=$MANILA_CONF_DIR/rootwrap.d:" -i $MANILA_CONF_DIR/rootwrap.conf sudo chown root:root $MANILA_CONF_DIR/rootwrap.conf sudo chmod 0644 $MANILA_CONF_DIR/rootwrap.conf # Specify rootwrap.conf as first parameter to manila-rootwrap MANILA_ROOTWRAP="$MANILA_ROOTWRAP $MANILA_CONF_DIR/rootwrap.conf" ROOTWRAP_MANILA_SUDOER_CMD="$MANILA_ROOTWRAP *" fi TEMPFILE=`mktemp` echo "$USER ALL=(root) NOPASSWD: $ROOTWRAP_MANILA_SUDOER_CMD" >$TEMPFILE chmod 0440 $TEMPFILE sudo chown root:root $TEMPFILE sudo mv $TEMPFILE /etc/sudoers.d/manila-rootwrap cp $MANILA_DIR/etc/manila/api-paste.ini $MANILA_API_PASTE_INI # Remove old conf file if exists rm -f $MANILA_CONF configure_keystone_authtoken_middleware $MANILA_CONF manila iniset $MANILA_CONF DEFAULT auth_strategy keystone iniset $MANILA_CONF DEFAULT debug True iniset $MANILA_CONF DEFAULT scheduler_driver $MANILA_SCHEDULER_DRIVER iniset $MANILA_CONF DEFAULT share_name_template ${SHARE_NAME_PREFIX}%s iniset $MANILA_CONF DATABASE connection `database_connection_url manila` iniset $MANILA_CONF DATABASE max_pool_size 40 iniset $MANILA_CONF DEFAULT api_paste_config $MANILA_API_PASTE_INI iniset $MANILA_CONF DEFAULT rootwrap_config $MANILA_CONF_DIR/rootwrap.conf iniset $MANILA_CONF DEFAULT osapi_share_extension manila.api.contrib.standard_extensions iniset $MANILA_CONF DEFAULT state_path $MANILA_STATE_PATH # Note: Sample share types will still be created if the below is False if [ $(trueorfalse False MANILA_CONFIGURE_DEFAULT_TYPES) == True ]; then iniset $MANILA_CONF DEFAULT default_share_type $MANILA_DEFAULT_SHARE_TYPE iniset $MANILA_CONF DEFAULT default_share_group_type $MANILA_DEFAULT_SHARE_GROUP_TYPE fi if ! [[ -z $MANILA_SHARE_MIGRATION_PERIOD_TASK_INTERVAL ]]; then iniset $MANILA_CONF DEFAULT migration_driver_continue_update_interval $MANILA_SHARE_MIGRATION_PERIOD_TASK_INTERVAL fi if ! [[ -z $MANILA_SERVER_MIGRATION_PERIOD_TASK_INTERVAL ]]; then iniset $MANILA_CONF DEFAULT server_migration_driver_continue_update_interval $MANILA_SERVER_MIGRATION_PERIOD_TASK_INTERVAL fi if ! [[ -z $MANILA_CREATE_BACKUP_CONTINUE_TASK_INTERVAL ]]; then iniset $MANILA_CONF DEFAULT driver_backup_continue_update_interval $MANILA_CREATE_BACKUP_CONTINUE_TASK_INTERVAL fi if ! [[ -z $MANILA_RESTORE_BACKUP_CONTINUE_TASK_INTERVAL ]]; then iniset $MANILA_CONF DEFAULT driver_restore_continue_update_interval $MANILA_RESTORE_BACKUP_CONTINUE_TASK_INTERVAL fi if ! [[ -z $MANILA_DEFERRED_DELETE_TASK_INTERVAL ]]; then iniset $MANILA_CONF DEFAULT periodic_deferred_delete_interval $MANILA_DEFERRED_DELETE_TASK_INTERVAL fi if ! [[ -z $MANILA_DATA_COPY_CHECK_HASH ]]; then iniset $MANILA_CONF DEFAULT check_hash $MANILA_DATA_COPY_CHECK_HASH fi iniset $MANILA_CONF DEFAULT enabled_share_protocols $MANILA_ENABLED_SHARE_PROTOCOLS iniset $MANILA_CONF oslo_concurrency lock_path $MANILA_LOCK_PATH iniset $MANILA_CONF DEFAULT wsgi_keep_alive False iniset $MANILA_CONF DEFAULT lvm_share_volume_group $SHARE_GROUP # Set the replica_state_update_interval iniset $MANILA_CONF DEFAULT replica_state_update_interval $MANILA_REPLICA_STATE_UPDATE_INTERVAL # Set the use_scheduler_creating_share_from_snapshot iniset $MANILA_CONF DEFAULT use_scheduler_creating_share_from_snapshot $MANILA_USE_SCHEDULER_CREATING_SHARE_FROM_SNAPSHOT if is_service_enabled neutron; then configure_keystone_authtoken_middleware $MANILA_CONF neutron neutron fi if is_service_enabled nova; then configure_keystone_authtoken_middleware $MANILA_CONF nova nova fi if is_service_enabled cinder; then configure_keystone_authtoken_middleware $MANILA_CONF cinder cinder fi if is_service_enabled glance; then configure_keystone_authtoken_middleware $MANILA_CONF glance glance fi if [ ! $MANILA_ENABLED_BACKENDS ]; then # MANILA_ENABLED_BACKENDS is a required option echo -"No configured backends, please set a value to MANILA_ENABLED_BACKENDS" exit 1 fi if is_service_enabled barbican; then configure_keystone_authtoken_middleware $MANILA_CONF barbican barbican iniset $MANILA_CONF barbican barbican_endpoint_type $BARBICAN_ENDPOINT_TYPE iniset $MANILA_CONF barbican auth_endpoint $BARBICAN_KEYSTONE_ENDPOINT iniset $MANILA_CONF key_manager backend $KEY_MANAGER_BACKEND fi configure_backends iniset $MANILA_CONF DEFAULT enabled_share_backends $MANILA_ENABLED_BACKENDS if [ ! -f $MANILA_PATH_TO_PRIVATE_KEY ]; then ssh-keygen -N "" -t $MANILA_KEY_FORMAT -f $MANILA_PATH_TO_PRIVATE_KEY; fi iniset $MANILA_CONF DEFAULT manila_service_keypair_name $MANILA_SERVICE_KEYPAIR_NAME REAL_MANILA_SERVICE_PORT=$MANILA_SERVICE_PORT if is_service_enabled tls-proxy; then # Set the protocol to 'https', update the endpoint base and set the default port MANILA_SERVICE_PROTOCOL="https" MANILA_ENDPOINT_BASE="${MANILA_ENDPOINT_BASE/http:/https:}" REAL_MANILA_SERVICE_PORT=$MANILA_SERVICE_PORT_INT # Set the service port for a proxy to take the original iniset $MANILA_CONF DEFAULT osapi_share_listen_port $REAL_MANILA_SERVICE_PORT iniset $MANILA_CONF oslo_middleware enable_proxy_headers_parsing True fi iniset_rpc_backend manila $MANILA_CONF DEFAULT setup_logging $MANILA_CONF MANILA_CONFIGURE_GROUPS=${MANILA_CONFIGURE_GROUPS:-"$MANILA_ENABLED_BACKENDS"} set_config_opts $MANILA_CONFIGURE_GROUPS set_config_opts DEFAULT set_backend_availability_zones $MANILA_ENABLED_BACKENDS write_uwsgi_config "$MANILA_UWSGI_CONF" "$MANILA_WSGI" "/share" "" "manila-api" if [[ "$MANILA_ENFORCE_SCOPE" == True ]] ; then iniset $MANILA_CONF oslo_policy enforce_scope true iniset $MANILA_CONF oslo_policy enforce_new_defaults true OS_CLOUD="devstack-admin" fi } function create_manila_service_keypair { if is_service_enabled nova; then local keypair_exists=$( openstack --os-cloud devstack-admin keypair list | grep " $MANILA_SERVICE_KEYPAIR_NAME " ) if [[ -z $keypair_exists ]]; then openstack --os-cloud devstack-admin keypair create $MANILA_SERVICE_KEYPAIR_NAME --public-key $MANILA_PATH_TO_PUBLIC_KEY fi fi } function is_driver_enabled { driver_name=$1 for BE in ${MANILA_ENABLED_BACKENDS//,/ }; do share_driver=$(iniget $MANILA_CONF $BE share_driver) if [ "$share_driver" == "$driver_name" ]; then return 0 fi done return 1 } # create_service_share_servers - creates service Nova VMs, one per generic # driver, and only if it is configured to mode without handling of share servers. function create_service_share_servers { created_admin_network=false for BE in ${MANILA_ENABLED_BACKENDS//,/ }; do driver_handles_share_servers=$(iniget $MANILA_CONF $BE driver_handles_share_servers) share_driver=$(iniget $MANILA_CONF $BE share_driver) generic_driver='manila.share.drivers.generic.GenericShareDriver' if [[ $share_driver == $generic_driver ]]; then if [[ $(trueorfalse False driver_handles_share_servers) == False ]]; then vm_name='manila_service_share_server_'$BE local vm_exists=$( openstack --os-cloud devstack-admin server list --all-projects | grep " $vm_name " ) if [[ -z $vm_exists ]]; then private_net_id=$(openstack --os-cloud devstack-admin network show $PRIVATE_NETWORK_NAME -f value -c id) vm_id=$(timeout 120 openstack --os-cloud devstack-admin server create $vm_name \ --flavor $MANILA_SERVICE_VM_FLAVOR_NAME \ --image $MANILA_SERVICE_IMAGE_NAME \ --nic net-id=$private_net_id \ --security-group $MANILA_SERVICE_SECGROUP \ --key-name $MANILA_SERVICE_KEYPAIR_NAME \ --wait \ | grep ' id ' | get_field 2) else vm_id=$(openstack --os-cloud devstack-admin server show $vm_name -f value -c id) fi floating_ip=$(openstack --os-cloud devstack-admin floating ip create $PUBLIC_NETWORK_NAME --subnet $PUBLIC_SUBNET_NAME | grep 'floating_ip_address' | get_field 2) openstack --os-cloud devstack-admin server add floating ip $vm_id $floating_ip iniset $MANILA_CONF $BE service_instance_name_or_id $vm_id iniset $MANILA_CONF $BE service_net_name_or_ip $floating_ip iniset $MANILA_CONF $BE tenant_net_name_or_ip $PRIVATE_NETWORK_NAME else if is_service_enabled neutron; then if ! [[ -z $MANILA_ADMIN_NET_RANGE ]]; then if [ $created_admin_network == false ]; then project_id=$(openstack --os-cloud devstack-admin project show $SERVICE_PROJECT_NAME -c id -f value) local admin_net_id=$( openstack --os-cloud devstack-admin network show admin_net -f value -c id ) if [[ -z $admin_net_id ]]; then openstack --os-cloud devstack-admin network create admin_net --project $project_id admin_net_id=$(openstack --os-cloud devstack-admin network show admin_net -f value -c id) fi local admin_subnet_id=$( openstack --os-cloud devstack-admin subnet show admin_subnet -f value -c id ) if [[ -z $admin_subnet_id ]]; then openstack --os-cloud devstack-admin subnet create admin_subnet --project $project_id --ip-version 4 --network $admin_net_id --gateway None --subnet-range $MANILA_ADMIN_NET_RANGE admin_subnet_id=$(openstack --os-cloud devstack-admin subnet show admin_subnet -f value -c id) fi created_admin_network=true fi iniset $MANILA_CONF $BE admin_network_id $admin_net_id iniset $MANILA_CONF $BE admin_subnet_id $admin_subnet_id fi fi fi fi done configure_data_service_generic_driver } function configure_data_service_generic_driver { enabled_backends=(${MANILA_ENABLED_BACKENDS//,/ }) share_driver=$(iniget $MANILA_CONF ${enabled_backends[0]} share_driver) generic_driver='manila.share.drivers.generic.GenericShareDriver' if [[ $share_driver == $generic_driver ]]; then driver_handles_share_servers=$(iniget $MANILA_CONF ${enabled_backends[0]} driver_handles_share_servers) if [[ $(trueorfalse False driver_handles_share_servers) == False ]]; then iniset $MANILA_CONF DEFAULT data_node_access_ips $PUBLIC_NETWORK_GATEWAY else if ! [[ -z $MANILA_DATA_NODE_IP ]]; then iniset $MANILA_CONF DEFAULT data_node_access_ips $MANILA_DATA_NODE_IP fi fi fi } # create_manila_service_flavor - creates flavor, that will be used by backends # with configured generic driver to boot Nova VMs with. function create_manila_service_flavor { if is_service_enabled nova; then local flavor_exists=$( openstack --os-cloud devstack-admin flavor list | grep " $MANILA_SERVICE_VM_FLAVOR_NAME " ) if [[ -z $flavor_exists ]]; then # Create flavor for Manila's service VM openstack --os-cloud devstack-admin --os-cloud devstack-admin flavor create \ $MANILA_SERVICE_VM_FLAVOR_NAME \ --id $MANILA_SERVICE_VM_FLAVOR_REF \ --ram $MANILA_SERVICE_VM_FLAVOR_RAM \ --disk $MANILA_SERVICE_VM_FLAVOR_DISK \ --vcpus $MANILA_SERVICE_VM_FLAVOR_VCPUS fi fi } # create_manila_service_image - creates image, that will be used by backends # with configured generic driver to boot Nova VMs from. function create_manila_service_image { if is_service_enabled nova g-api; then TOKEN=$(openstack --os-cloud devstack-admin token issue -c id -f value) local image_exists=$( openstack --os-cloud devstack-admin image list | grep " $MANILA_SERVICE_IMAGE_NAME " ) if [[ -z $image_exists ]]; then # Download Manila's image upload_image $MANILA_SERVICE_IMAGE_URL $TOKEN fi fi } # create_manila_service_secgroup - creates security group that is used by # Nova VMs when generic driver is configured. function create_manila_service_secgroup { # Create a secgroup if ! openstack --os-cloud devstack-admin security group list | grep -q $MANILA_SERVICE_SECGROUP; then openstack --os-cloud devstack-admin security group create $MANILA_SERVICE_SECGROUP --description "$MANILA_SERVICE_SECGROUP description" if ! timeout 30 sh -c "while ! openstack --os-cloud devstack-admin security group list | grep -q $MANILA_SERVICE_SECGROUP; do sleep 1; done"; then echo "Security group not created" exit 1 fi fi # Configure Security Group Rules if ! openstack --os-cloud devstack-admin security group rule list $MANILA_SERVICE_SECGROUP | grep -q icmp; then openstack --os-cloud devstack-admin security group rule create $MANILA_SERVICE_SECGROUP --protocol icmp fi if ! openstack --os-cloud devstack-admin security group rule list $MANILA_SERVICE_SECGROUP | grep -q " tcp .* 22 "; then openstack --os-cloud devstack-admin security group rule create $MANILA_SERVICE_SECGROUP --protocol tcp --dst-port 22 fi if ! openstack --os-cloud devstack-admin security group rule list $MANILA_SERVICE_SECGROUP | grep -q " tcp .* 2049 "; then openstack --os-cloud devstack-admin security group rule create $MANILA_SERVICE_SECGROUP --protocol tcp --dst-port 2049 fi if ! openstack --os-cloud devstack-admin security group rule list $MANILA_SERVICE_SECGROUP | grep -q " udp .* 2049 "; then openstack --os-cloud devstack-admin security group rule create $MANILA_SERVICE_SECGROUP --protocol udp --dst-port 2049 fi if ! openstack --os-cloud devstack-admin security group rule list $MANILA_SERVICE_SECGROUP | grep -q " udp .* 445 "; then openstack --os-cloud devstack-admin security group rule create $MANILA_SERVICE_SECGROUP --protocol udp --dst-port 445 fi if ! openstack --os-cloud devstack-admin security group rule list $MANILA_SERVICE_SECGROUP | grep -q " tcp .* 445 "; then openstack --os-cloud devstack-admin security group rule create $MANILA_SERVICE_SECGROUP --protocol tcp --dst-port 445 fi if ! openstack --os-cloud devstack-admin security group rule list $MANILA_SERVICE_SECGROUP | grep -q " tcp .* 139 "; then openstack --os-cloud devstack-admin security group rule create $MANILA_SERVICE_SECGROUP --protocol tcp --dst-port 137:139 fi if ! openstack --os-cloud devstack-admin security group rule list $MANILA_SERVICE_SECGROUP | grep -q " udp .* 139 "; then openstack --os-cloud devstack-admin security group rule create $MANILA_SERVICE_SECGROUP --protocol udp --dst-port 137:139 fi # List secgroup rules openstack --os-cloud devstack-admin security group rule list $MANILA_SERVICE_SECGROUP } # create_manila_accounts - Set up common required manila accounts function create_manila_accounts { create_service_user "manila" get_or_create_service "manila" "share" "Manila Shared Filesystem Service" get_or_create_endpoint "share" "$REGION_NAME" \ "$MANILA_ENDPOINT_BASE/v1/\$(project_id)s" # Set up Manila v2 service and endpoint - as of microversion 2.60, # project_id is no longer necessary in the v2 endpoint get_or_create_service "manilav2" "sharev2" "Manila Shared Filesystem Service V2" get_or_create_endpoint "sharev2" "$REGION_NAME" \ "$MANILA_ENDPOINT_BASE/v2" # Set up Manila legacy v2 service and endpoint - as of microversion 2.60, # project_id is no longer necessary in the v2 endpoint get_or_create_service "manilav2_legacy" "sharev2_legacy" "Manila Shared Filesystem Service V2 (Legacy 2.0)" get_or_create_endpoint "sharev2_legacy" "$REGION_NAME" \ "$MANILA_ENDPOINT_BASE/v2/\$(project_id)s" # Set up an endpoint for "shared-file-system" - this is necessary to # standardize a naming for the v2 API and for the openstacksdk. # See: https://specs.openstack.org/openstack/service-types-authority/ get_or_create_service "shared-file-system" "shared-file-system" "Manila Shared Filesystem Service v2 API (alias of the sharev2 service)" get_or_create_endpoint "shared-file-system" "$REGION_NAME" \ "$MANILA_ENDPOINT_BASE/v2" } # create_default_share_group_type - create share group type that will be set as default. function create_default_share_group_type { local type_exists=$( openstack --os-cloud $OS_CLOUD share group type list | grep " $MANILA_DEFAULT_SHARE_GROUP_TYPE " ) if [[ -z $type_exists ]]; then openstack --os-cloud $OS_CLOUD share group type create $MANILA_DEFAULT_SHARE_GROUP_TYPE $MANILA_DEFAULT_SHARE_TYPE fi if [[ $MANILA_DEFAULT_SHARE_GROUP_TYPE_SPECS ]]; then openstack --os-cloud $OS_CLOUD share group type set $MANILA_DEFAULT_SHARE_GROUP_TYPE --group-specs $MANILA_DEFAULT_SHARE_GROUP_TYPE_SPECS fi } # create_default_share_type - create share type that will be set as default # if $MANILA_CONFIGURE_DEFAULT_TYPES is set to True, if set to False, the share # type identified by $MANILA_DEFAULT_SHARE_TYPE is still created, but not # configured as default. function create_default_share_type { enabled_backends=(${MANILA_ENABLED_BACKENDS//,/ }) driver_handles_share_servers=$(iniget $MANILA_CONF ${enabled_backends[0]} driver_handles_share_servers) local type_exists=$( openstack --os-cloud $OS_CLOUD share type list | grep " $MANILA_DEFAULT_SHARE_TYPE " ) if [[ -z $type_exists ]]; then local command_args="$MANILA_DEFAULT_SHARE_TYPE $driver_handles_share_servers" if [[ $MANILA_DEFAULT_SHARE_TYPE_EXTRA_SPECS ]]; then command_args="$command_args --extra-specs $MANILA_DEFAULT_SHARE_TYPE_EXTRA_SPECS" fi openstack --os-cloud $OS_CLOUD share type create $command_args fi } # create_custom_share_types - create share types suitable for both possible # driver modes with names "dhss_true" and "dhss_false". function create_custom_share_types { local command_args="dhss_true True" if [[ $MANILA_DHSS_TRUE_SHARE_TYPE_EXTRA_SPECS ]]; then command_args="$command_args --extra-specs $MANILA_DHSS_TRUE_SHARE_TYPE_EXTRA_SPECS" fi openstack --os-cloud $OS_CLOUD share type create $command_args command_args="dhss_false False" if [[ $MANILA_DHSS_FALSE_SHARE_TYPE_EXTRA_SPECS ]]; then command_args="$command_args --extra-specs $MANILA_DHSS_FALSE_SHARE_TYPE_EXTRA_SPECS" fi openstack --os-cloud $OS_CLOUD share type create $command_args } # configure_backing_file - Set up backing file for LVM function configure_backing_file { sudo vgscan if ! sudo vgs $SHARE_GROUP; then if [ "$CONFIGURE_BACKING_FILE" = "True" ]; then SHARE_BACKING_FILE=${SHARE_BACKING_FILE:-$DATA_DIR/${SHARE_GROUP}-backing-file} # Only create if the file doesn't already exists [[ -f $SHARE_BACKING_FILE ]] || truncate -s $SHARE_BACKING_FILE_SIZE $SHARE_BACKING_FILE DEV=`sudo losetup -f --show $SHARE_BACKING_FILE` else DEV=$SHARE_BACKING_FILE fi # Only create if the loopback device doesn't contain $SHARE_GROUP if ! sudo vgs $SHARE_GROUP; then sudo vgcreate $SHARE_GROUP $DEV; fi fi mkdir -p $MANILA_STATE_PATH/shares mkdir -p /tmp/shares } # init_manila - Initializes database and creates manila dir if absent function init_manila { if is_service_enabled $DATABASE_BACKENDS; then # (re)create manila database recreate_database manila $MANILA_BIN_DIR/manila-manage db sync if [[ $(trueorfalse False MANILA_USE_DOWNGRADE_MIGRATIONS) == True ]]; then # Use both - upgrade and downgrade migrations to verify that # downgrade migrations do not break structure of Manila database. $MANILA_BIN_DIR/manila-manage db downgrade $MANILA_BIN_DIR/manila-manage db sync fi # Display version as debug-action (see bug/1473400) $MANILA_BIN_DIR/manila-manage db version fi if [ "$SHARE_DRIVER" == "manila.share.drivers.lvm.LVMShareDriver" ]; then if is_service_enabled m-shr; then # Configure a default volume group called '`lvm-shares`' for the share # service if it does not yet exist. If you don't wish to use a file backed # volume group, create your own volume group called ``stack-volumes`` before # invoking ``stack.sh``. # # By default, the backing file is 8G in size, and is stored in ``/opt/stack/data``. configure_backing_file fi elif [ "$SHARE_DRIVER" == $MANILA_CONTAINER_DRIVER ]; then if is_service_enabled m-shr; then SHARE_GROUP=$MANILA_CONTAINER_VOLUME_GROUP_NAME configure_backing_file fi elif [ "$SHARE_DRIVER" == "manila.share.drivers.zfsonlinux.driver.ZFSonLinuxShareDriver" ]; then if is_service_enabled m-shr; then mkdir -p $MANILA_ZFSONLINUX_BACKEND_FILES_CONTAINER_DIR file_counter=0 MANILA_ZFSONLINUX_SERVICE_IP=${MANILA_ZFSONLINUX_SERVICE_IP:-"127.0.0.1"} for BE in ${MANILA_ENABLED_BACKENDS//,/ }; do if [[ $file_counter == 0 ]]; then # NOTE(vponomaryov): create two pools for first ZFS backend # to cover different use cases that are supported by driver: # - Support of more than one zpool for share backend. # - Support of nested datasets. local first_file="$MANILA_ZFSONLINUX_BACKEND_FILES_CONTAINER_DIR"/alpha local second_file="$MANILA_ZFSONLINUX_BACKEND_FILES_CONTAINER_DIR"/betta truncate -s $MANILA_ZFSONLINUX_ZPOOL_SIZE $first_file truncate -s $MANILA_ZFSONLINUX_ZPOOL_SIZE $second_file sudo zpool create alpha $first_file sudo zpool create betta $second_file # Create subdir (nested dataset) for second pool sudo zfs create betta/subdir iniset $MANILA_CONF $BE zfs_zpool_list alpha,betta/subdir elif [[ $file_counter == 1 ]]; then local file="$MANILA_ZFSONLINUX_BACKEND_FILES_CONTAINER_DIR"/gamma truncate -s $MANILA_ZFSONLINUX_ZPOOL_SIZE $file sudo zpool create gamma $file iniset $MANILA_CONF $BE zfs_zpool_list gamma else local filename=file"$file_counter" local file="$MANILA_ZFSONLINUX_BACKEND_FILES_CONTAINER_DIR"/"$filename" truncate -s $MANILA_ZFSONLINUX_ZPOOL_SIZE $file sudo zpool create $filename $file iniset $MANILA_CONF $BE zfs_zpool_list $filename fi iniset $MANILA_CONF $BE zfs_share_export_ip $MANILA_ZFSONLINUX_SHARE_EXPORT_IP iniset $MANILA_CONF $BE zfs_service_ip $MANILA_ZFSONLINUX_SERVICE_IP iniset $MANILA_CONF $BE zfs_dataset_creation_options $MANILA_ZFSONLINUX_DATASET_CREATION_OPTIONS iniset $MANILA_CONF $BE zfs_use_ssh $MANILA_ZFSONLINUX_USE_SSH iniset $MANILA_CONF $BE zfs_ssh_username $MANILA_ZFSONLINUX_SSH_USERNAME iniset $MANILA_CONF $BE replication_domain $MANILA_ZFSONLINUX_REPLICATION_DOMAIN iniset $MANILA_CONF $BE driver_handles_share_servers False let "file_counter=file_counter+1" done # Install the server's SSH key in our known_hosts file eval STACK_HOME=~$STACK_USER ssh-keyscan ${MANILA_ZFSONLINUX_SERVICE_IP} >> $STACK_HOME/.ssh/known_hosts # If the server is this machine, setup trust for ourselves (otherwise you're on your own) if [ "$MANILA_ZFSONLINUX_SERVICE_IP" = "127.0.0.1" ] || [ "$MANILA_ZFSONLINUX_SERVICE_IP" = "localhost" ] ; then # Trust our own SSH keys eval SSH_USER_HOME=~$MANILA_ZFSONLINUX_SSH_USERNAME cat $STACK_HOME/.ssh/*.pub >> $SSH_USER_HOME/.ssh/authorized_keys # Give ssh user sudo access echo "$MANILA_ZFSONLINUX_SSH_USERNAME ALL=(ALL) NOPASSWD: ALL" | sudo tee -a /etc/sudoers > /dev/null iniset $MANILA_CONF DEFAULT data_node_access_ips $MANILA_ZFSONLINUX_SERVICE_IP fi fi fi } # check_nfs_kernel_service_state_ubuntu- Make sure nfsd is running function check_nfs_kernel_service_state_ubuntu { # (aovchinnikov): Workaround for nfs-utils bug 1052264 if [[ $(sudo service nfs-kernel-server status &> /dev/null || echo 'fail') == 'fail' ]]; then echo "Apparently nfsd is not running. Trying to fix that." sudo mkdir -p "/media/nfsdonubuntuhelper" # (aovchinnikov): shell wrapping is needed for cases when a file to be written # is owned by root. sudo sh -c "echo '/media/nfsdonubuntuhelper 127.0.0.1(ro)' >> /etc/exports" sudo service nfs-kernel-server start fi if [[ $(sudo service nfs-kernel-server status &> /dev/null || echo 'fail') == 'fail' ]]; then echo "Failed to start nfsd. Exiting." exit 1 fi } function _install_nfs_and_samba { if is_ubuntu; then install_package nfs-kernel-server nfs-common samba check_nfs_kernel_service_state_ubuntu elif is_fedora; then install_package nfs-utils samba sudo systemctl enable smb.service sudo systemctl start smb.service sudo systemctl enable nfs-server.service sudo systemctl start nfs-server.service elif is_suse; then install_package nfs-kernel-server nfs-utils samba else echo "This distro is not supported. Skipping step of NFS and Samba installation." fi } # install_manilaclient - Collect source and prepare # In order to install from git, add LIBS_FROM_GIT="python-manilaclient" # to local.conf function install_manilaclient { if use_library_from_git "python-manilaclient"; then git_clone $MANILACLIENT_REPO $MANILACLIENT_DIR $MANILACLIENT_BRANCH setup_develop $MANILACLIENT_DIR else pip_install python-manilaclient fi if [[ "$GLOBAL_VENV" == "True" ]]; then sudo ln -sf /opt/stack/data/venv/bin/manila /usr/local/bin fi } # install_manila - Collect source and prepare function install_manila { setup_develop $MANILA_DIR if is_service_enabled m-shr; then if [[ ! $(systemctl is-active nfs-ganesha.service) == 'active' ]] ; then if [ "$SHARE_DRIVER" != "manila.share.drivers.cephfs.driver.CephFSDriver" ] ; then _install_nfs_and_samba fi fi if [ "$SHARE_DRIVER" == "manila.share.drivers.zfsonlinux.driver.ZFSonLinuxShareDriver" ]; then if [[ $(sudo zfs list &> /dev/null && sudo zpool list &> /dev/null || echo 'absent') == 'absent' ]]; then # ZFS not found, try to install it if is_ubuntu; then sudo apt-get install -y zfsutils-linux else echo "Manila Devstack plugin supports installation "\ "of ZFS packages only for 'Ubuntu' distros. "\ "Please, install it first by other means or add its support "\ "for your distro." exit 1 fi sudo modprobe zfs sudo modprobe zpool fi check_nfs_kernel_service_state_ubuntu elif [ "$SHARE_DRIVER" == $MANILA_CONTAINER_DRIVER ]; then if is_ubuntu; then echo "Installing docker...." install_docker_ubuntu echo "Importing docker image" import_docker_service_image_ubuntu elif is_fedora; then echo "Installing docker...." install_docker_fedora echo "Importing docker image" # TODO(tbarron): See if using a fedora container image # is faster/smaller because of fewer extra dependencies. import_docker_service_image_ubuntu else echo "Manila Devstack plugin does not support Container Driver on"\ " distros other than Ubuntu or Fedora." exit 1 fi fi fi } #configure_samba - Configure node as Samba server function configure_samba { if [ "$SHARE_DRIVER" == "manila.share.drivers.lvm.LVMShareDriver" ]; then # TODO(vponomaryov): add here condition for ZFSonLinux driver too # when it starts to support SAMBA samba_daemon_name=smbd if is_service_enabled m-shr; then if is_fedora; then samba_daemon_name=smb fi sudo service $samba_daemon_name restart || echo "Couldn't restart '$samba_daemon_name' service" fi if [[ -e /usr/share/samba/smb.conf ]]; then sudo cp /usr/share/samba/smb.conf $SMB_CONF fi sudo chown $STACK_USER -R /etc/samba iniset $SMB_CONF global include registry iniset $SMB_CONF global security user if [ ! -d "$SMB_PRIVATE_DIR" ]; then sudo mkdir $SMB_PRIVATE_DIR sudo touch $SMB_PRIVATE_DIR/secrets.tdb fi for backend_name in ${MANILA_ENABLED_BACKENDS//,/ }; do iniset $MANILA_CONF $backend_name driver_handles_share_servers False iniset $MANILA_CONF $backend_name lvm_share_export_ips $MANILA_LVM_SHARE_EXPORT_IPS done iniset $MANILA_CONF DEFAULT data_node_access_ips $HOST_IP fi } # start_manila_api - starts manila API services and checks its availability function start_manila_api { echo "Deploying with UWSGI" run_process m-api "$(which uwsgi) --ini $MANILA_UWSGI_CONF --procname-prefix manila-api" echo "Waiting for Manila API to start..." if ! wait_for_service $SERVICE_TIMEOUT $MANILA_ENDPOINT_BASE; then die $LINENO "Manila API did not start" fi # Start proxies if enabled # # If tls-proxy is enabled, a generic http-services-tls-proxy will be set up # to handle tls-termination to manila as well as all the other https # services, we don't need to create our own. if is_service_enabled tls-proxy; then start_tls_proxy manila '*' $MANILA_SERVICE_PORT $MANILA_SERVICE_HOST $MANILA_SERVICE_PORT_INT fi } # start_rest_of_manila - starts non-api manila services function start_rest_of_manila { run_process m-shr "$MANILA_BIN_DIR/manila-share --config-file $MANILA_CONF" run_process m-sch "$MANILA_BIN_DIR/manila-scheduler --config-file $MANILA_CONF" run_process m-dat "$MANILA_BIN_DIR/manila-data --config-file $MANILA_CONF" } # start_manila - start all manila services. This function is kept for compatibility # reasons with old approach. function start_manila { start_manila_api start_rest_of_manila } # stop_manila - Stop running processes function stop_manila { local serv # Kill all other manila processes for serv in m-api m-sch m-shr m-dat; do stop_process $serv done } # update_tempest - Function used for updating Tempest config if Tempest service enabled function update_tempest { if is_service_enabled tempest; then if [[ "$(trueorfalse False MANILA_SETUP_IPV6)" == "True" ]]; then # The public network was created by us, so set it explicitly in # tempest.conf public_net_id=$(openstack --os-cloud devstack-admin network list --name $PUBLIC_NETWORK_NAME -f value -c ID ) iniset $TEMPEST_CONFIG network public_network_id $public_net_id fi TEMPEST_CONFIG=${TEMPEST_CONFIG:-$TEMPEST_DIR/etc/tempest.conf} if [ $(trueorfalse False MANILA_USE_SERVICE_INSTANCE_PASSWORD) == True ]; then iniset $TEMPEST_CONFIG share image_password $MANILA_SERVICE_INSTANCE_PASSWORD fi iniset $TEMPEST_CONFIG share image_with_share_tools $MANILA_SERVICE_IMAGE_NAME iniset $TEMPEST_CONFIG enforce_scope manila "$MANILA_ENFORCE_SCOPE" # If testing a stable branch, we need to ensure we're testing with supported # API micro-versions; so set the versions from code if we're not testing the # master branch. If we're testing master, we'll allow manila-tempest-plugin # (which is branchless) tell us what versions it wants to test. if [[ "$TARGET_BRANCH" != "master" ]]; then # Grab the supported API micro-versions from the code _DEFAULT_MIN_VERSION=$(openstack --os-cloud devstack versions show --service sharev2 -c 'Min Microversion' --status CURRENT -f value) _DEFAULT_MAX_VERSION=$(openstack --os-cloud devstack versions show --service sharev2 -c 'Max Microversion' --status CURRENT -f value) # Override the *_api_microversion tempest options if present MANILA_TEMPEST_MIN_API_MICROVERSION=${MANILA_TEMPEST_MIN_API_MICROVERSION:-$_DEFAULT_MIN_VERSION} MANILA_TEMPEST_MAX_API_MICROVERSION=${MANILA_TEMPEST_MAX_API_MICROVERSION:-$_DEFAULT_MAX_VERSION} # Set these options in tempest.conf iniset $TEMPEST_CONFIG share min_api_microversion $MANILA_TEMPEST_MIN_API_MICROVERSION iniset $TEMPEST_CONFIG share max_api_microversion $MANILA_TEMPEST_MAX_API_MICROVERSION fi fi } function install_docker_ubuntu { sudo apt-get update install_package apparmor install_package docker.io } function install_docker_fedora { install_package docker sudo systemctl enable docker sudo systemctl start docker } function download_image { local image_url=$1 local image image_fname image_fname=`basename "$image_url"` if [[ $image_url != file* ]]; then # Downloads the image (uec ami+akistyle), then extracts it. if [[ ! -f $FILES/$image_fname || "$(stat -c "%s" $FILES/$image_fname)" = "0" ]]; then wget --progress=dot:giga -c $image_url -O $FILES/$image_fname if [[ $? -ne 0 ]]; then echo "Not found: $image_url" return fi fi image="$FILES/${image_fname}" else # File based URL (RFC 1738): ``file://host/path`` # Remote files are not considered here. # unix: ``file:///home/user/path/file`` # windows: ``file:///C:/Documents%20and%20Settings/user/path/file`` image=$(echo $image_url | sed "s/^file:\/\///g") if [[ ! -f $image || "$(stat -c "%s" $image)" == "0" ]]; then echo "Not found: $image_url" return fi fi } function import_docker_service_image_ubuntu { GZIPPED_IMG_NAME=`basename "$MANILA_DOCKER_IMAGE_URL"` IMG_NAME_LOAD=${GZIPPED_IMG_NAME%.*} LOCAL_IMG_NAME=${IMG_NAME_LOAD%.*} if [[ "$(sudo docker images -q $LOCAL_IMG_NAME)" == "" ]]; then download_image $MANILA_DOCKER_IMAGE_URL # Import image in Docker gzip -d $FILES/$GZIPPED_IMG_NAME sudo docker load --input $FILES/$IMG_NAME_LOAD fi } function remove_docker_service_image { sudo docker rmi $MANILA_DOCKER_IMAGE_ALIAS } function install_libraries { if [ $(trueorfalse True RUN_MANILA_HOST_ASSISTED_MIGRATION_TESTS) == True ]; then if is_ubuntu; then install_package nfs-common else install_package nfs-utils fi fi } function allow_host_ports_for_share_mounting { if [[ $MANILA_ENABLED_SHARE_PROTOCOLS =~ NFS ]]; then # 111 and 2049 are for rpcbind and NFS # Other ports are for NFSv3 statd, mountd and lockd daemons MANILA_TCP_PORTS=(2049 111 32803 892 875 662) MANILA_UDP_PORTS=(111 32769 892 875 662) fi if [[ $MANILA_ENABLED_SHARE_PROTOCOLS =~ CEPHFS ]]; then # clients need access to the ceph daemons MANILA_TCP_PORTS=(${MANILA_TCP_PORTS[*]} 6789 6800:7300) fi if [[ -v MANILA_TCP_PORTS || -v MANILA_UDP_PORTS ]]; then for ipcmd in iptables ip6tables; do sudo $ipcmd -N manila-storage sudo $ipcmd -I INPUT 1 -j manila-storage for port in ${MANILA_TCP_PORTS[*]}; do sudo $ipcmd -A manila-storage -m tcp -p tcp --dport $port -j ACCEPT done for port in ${MANILA_UDP_PORTS[*]}; do sudo $ipcmd -A manila-storage -m udp -p udp --dport $port -j ACCEPT done done fi } function setup_ipv6 { # This will fail with multiple default routes and is not needed in CI # but may be useful when developing with devstack locally if [ $(trueorfalse False MANILA_RESTORE_IPV6_DEFAULT_ROUTE) == True ]; then # save IPv6 default route to add back later after enabling forwarding local default_route=$(ip -6 route | grep default | cut -d ' ' -f1,2,3,4,5) fi # make sure those system values are set sudo sysctl -w net.ipv6.conf.lo.disable_ipv6=0 sudo sysctl -w net.ipv6.conf.all.accept_ra=2 sudo sysctl -w net.ipv6.conf.all.forwarding=1 # Disable in-band as our communication is only internal sudo ovs-vsctl set Bridge $PUBLIC_BRIDGE other_config:disable-in-band=true # Create address scopes and subnet pools openstack --os-cloud devstack-admin address scope create --share --ip-version 4 scope-v4 openstack --os-cloud devstack-admin address scope create --share --ip-version 6 scope-v6 openstack --os-cloud devstack-admin subnet pool create $SUBNETPOOL_NAME_V4 --default-prefix-length $SUBNETPOOL_SIZE_V4 --pool-prefix $SUBNETPOOL_PREFIX_V4 --address-scope scope-v4 --default --share openstack --os-cloud devstack-admin subnet pool create $SUBNETPOOL_NAME_V6 --default-prefix-length $SUBNETPOOL_SIZE_V6 --pool-prefix $SUBNETPOOL_PREFIX_V6 --address-scope scope-v6 --default --share # Create example private network and router openstack --os-cloud devstack-admin router create $Q_ROUTER_NAME openstack --os-cloud devstack-admin network create $PRIVATE_NETWORK_NAME openstack --os-cloud devstack-admin subnet create --ip-version 6 --use-default-subnet-pool --ipv6-address-mode $IPV6_ADDRESS_MODE --ipv6-ra-mode $IPV6_RA_MODE --network $PRIVATE_NETWORK_NAME $IPV6_PRIVATE_SUBNET_NAME openstack --os-cloud devstack-admin subnet create --ip-version 4 --use-default-subnet-pool --network $PRIVATE_NETWORK_NAME $PRIVATE_SUBNET_NAME openstack --os-cloud devstack-admin router add subnet $Q_ROUTER_NAME $IPV6_PRIVATE_SUBNET_NAME openstack --os-cloud devstack-admin router add subnet $Q_ROUTER_NAME $PRIVATE_SUBNET_NAME # Create public network openstack --os-cloud devstack-admin network create $PUBLIC_NETWORK_NAME --external --default --provider-network-type flat --provider-physical-network $PUBLIC_PHYSICAL_NETWORK local public_gateway_ipv6=$(openstack --os-cloud devstack-admin subnet create $IPV6_PUBLIC_SUBNET_NAME --ip-version 6 --network $PUBLIC_NETWORK_NAME --subnet-pool $SUBNETPOOL_NAME_V6 --no-dhcp -c gateway_ip -f value) local public_gateway_ipv4=$(openstack --os-cloud devstack-admin subnet create $PUBLIC_SUBNET_NAME --ip-version 4 --network $PUBLIC_NETWORK_NAME --subnet-range $FLOATING_RANGE --no-dhcp -c gateway_ip -f value) # Set router to use public network openstack --os-cloud devstack-admin router set --external-gateway $PUBLIC_NETWORK_NAME $Q_ROUTER_NAME # Configure interfaces due to NEUTRON_CREATE_INITIAL_NETWORKS=False local ipv4_cidr_len=${FLOATING_RANGE#*/} sudo ip -6 addr add "$public_gateway_ipv6"/$SUBNETPOOL_SIZE_V6 dev $PUBLIC_BRIDGE sudo ip addr add "$public_gateway_ipv4"/"$ipv4_cidr_len" dev $PUBLIC_BRIDGE # Enabling interface is needed due to NEUTRON_CREATE_INITIAL_NETWORKS=False sudo ip link set $PUBLIC_BRIDGE up if [ "$SHARE_DRIVER" == "manila.share.drivers.lvm.LVMShareDriver" ]; then for backend_name in ${MANILA_ENABLED_BACKENDS//,/ }; do iniset $MANILA_CONF $backend_name lvm_share_export_ips $public_gateway_ipv4,$public_gateway_ipv6 done iniset $MANILA_CONF DEFAULT data_node_access_ips $public_gateway_ipv4 fi if [ "$SHARE_DRIVER" == "manila.share.drivers.cephfs.driver.CephFSDriver" ]; then for backend_name in ${MANILA_ENABLED_BACKENDS//,/ }; do iniset $MANILA_CONF $backend_name cephfs_ganesha_export_ips $public_gateway_ipv4,$public_gateway_ipv6 done iniset $MANILA_CONF DEFAULT data_node_access_ips $public_gateway_ipv4 fi # install Quagga for setting up the host routes dynamically install_package quagga # set Quagga daemons ( echo "zebra=yes" echo "bgpd=yes" echo "ospfd=no" echo "ospf6d=no" echo "ripd=no" echo "ripngd=no" echo "isisd=no" echo "babeld=no" ) | sudo tee /etc/quagga/daemons > /dev/null # set Quagga zebra.conf ( echo "hostname dsvm" echo "password openstack" echo "log file /var/log/quagga/zebra.log" ) | sudo tee /etc/quagga/zebra.conf > /dev/null # set Quagga vtysh.conf ( echo "service integrated-vtysh-config" echo "username quagga nopassword" ) | sudo tee /etc/quagga/vtysh.conf > /dev/null # set Quagga bgpd.conf ( echo "log file /var/log/quagga/bgpd.log" echo "bgp multiple-instance" echo "router bgp 200" echo " bgp router-id 1.2.3.4" echo " neighbor $public_gateway_ipv6 remote-as 100" echo " neighbor $public_gateway_ipv6 passive" echo " address-family ipv6" echo " neighbor $public_gateway_ipv6 activate" echo "line vty" echo "debug bgp events" echo "debug bgp filters" echo "debug bgp fsm" echo "debug bgp keepalives" echo "debug bgp updates" ) | sudo tee /etc/quagga/bgpd.conf > /dev/null # Quagga logging sudo mkdir -p /var/log/quagga sudo touch /var/log/quagga/zebra.log sudo touch /var/log/quagga/bgpd.log sudo chown -R quagga:quagga /var/log/quagga GetOSVersion QUAGGA_SERVICES="zebra bgpd" if [[ is_ubuntu && "$os_CODENAME" == "xenial" ]]; then # In Ubuntu Xenial, the services bgpd and zebra are under # one systemd unit: quagga QUAGGA_SERVICES="quagga" elif is_fedora; then # Disable SELinux rule that conflicts with Zebra sudo setsebool -P zebra_write_config 1 fi sudo systemctl enable $QUAGGA_SERVICES sudo systemctl restart $QUAGGA_SERVICES # log the systemd status sudo systemctl status $QUAGGA_SERVICES # This will fail with mutltiple default routes and is not needed in CI # but may be useful when developing with devstack locally if [ $(trueorfalse False MANILA_RESTORE_IPV6_DEFAULT_ROUTE) == True ]; then # add default IPv6 route back if ! [[ -z $default_route ]]; then # "replace" should ignore "RTNETLINK answers: File exists" # error if the route wasn't flushed by the bgp setup we did earlier. sudo ip -6 route replace $default_route fi fi } function setup_bgp_for_ipv6 { public_gateway_ipv6=$(openstack --os-cloud devstack-admin subnet show ipv6-public-subnet -c gateway_ip -f value) openstack --os-cloud devstack-admin bgp speaker create --ip-version 6 --local-as 100 bgpspeaker openstack --os-cloud devstack-admin bgp speaker add network bgpspeaker $PUBLIC_NETWORK_NAME openstack --os-cloud devstack-admin bgp peer create --peer-ip $public_gateway_ipv6 --remote-as 200 bgppeer openstack --os-cloud devstack-admin bgp speaker add peer bgpspeaker bgppeer } # Main dispatcher if [[ "$1" == "stack" && "$2" == "install" ]]; then echo_summary "Installing Manila Client" install_manilaclient echo_summary "Installing Manila" install_manila set_cinder_quotas elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then echo_summary "Configuring Manila" configure_manila echo_summary "Initializing Manila" init_manila echo_summary "Installing extra libraries" install_libraries echo_summary "Creating Manila entities for auth service" create_manila_accounts # Cinder config update if is_service_enabled cinder && [[ -n "$CINDER_OVERSUBSCRIPTION_RATIO" ]]; then CINDER_CONF=${CINDER_CONF:-/etc/cinder/cinder.conf} CINDER_ENABLED_BACKENDS=$(iniget $CINDER_CONF DEFAULT enabled_backends) for BN in ${CINDER_ENABLED_BACKENDS//,/ }; do iniset $CINDER_CONF $BN lvm_max_over_subscription_ratio $CINDER_OVERSUBSCRIPTION_RATIO done iniset $CINDER_CONF DEFAULT max_over_subscription_ratio $CINDER_OVERSUBSCRIPTION_RATIO fi elif [[ "$1" == "stack" && "$2" == "extra" ]]; then if is_service_enabled nova; then echo_summary "Creating Manila service flavor" create_manila_service_flavor echo_summary "Creating Manila service security group" create_manila_service_secgroup fi # Skip image downloads when disabled. # This way vendor Manila driver CI tests can skip # this potentially long and unnecessary download. if [ "$MANILA_SERVICE_IMAGE_ENABLED" = "True" ]; then echo_summary "Creating Manila service image" create_manila_service_image else echo_summary "Skipping download of Manila service image" fi if is_service_enabled nova; then echo_summary "Creating Manila service keypair" create_manila_service_keypair fi echo_summary "Configure Samba server" configure_samba echo_summary "Configuring IPv6" if [ $(trueorfalse False MANILA_SETUP_IPV6) == True ]; then setup_ipv6 fi echo_summary "Starting Manila API" start_manila_api # Workaround for bug #1660304 if [ "$SHARE_DRIVER" != "manila.share.drivers.generic.GenericShareDriver" ]; then echo_summary "Starting rest of Manila services - scheduler, share and data" start_rest_of_manila fi echo_summary "Creating Manila default share type" create_default_share_type echo_summary "Creating Manila default share group type" create_default_share_group_type echo_summary "Creating Manila custom share types" create_custom_share_types echo_summary "Manila UI is no longer enabled by default. \ Add enable_plugin manila-ui https://opendev.org/openstack/manila-ui \ to your local.conf file to enable Manila UI" elif [[ "$1" == "stack" && "$2" == "test-config" ]]; then ########################################################################### # NOTE(vponomaryov): Workaround for bug #1660304 # We are able to create Nova VMs now only when last Nova step is performed # which is registration of cell0. It is registered as last action in # "post-extra" section. if is_service_enabled nova; then echo_summary "Creating Manila service VMs for generic driver \ backends for which handlng of share servers is disabled." create_service_share_servers fi if [ "$SHARE_DRIVER" == "manila.share.drivers.generic.GenericShareDriver" ]; then echo_summary "Starting rest of Manila services - scheduler, share and data" start_rest_of_manila fi ########################################################################### echo_summary "Update Tempest config" update_tempest if [[ "$(trueorfalse False MANILA_ALLOW_NAS_SERVER_PORTS_ON_HOST)" == "True" ]]; then echo_summary "Allowing IPv4 and IPv6 access to NAS ports on the host" allow_host_ports_for_share_mounting fi if [[ "$(trueorfalse False MANILA_SETUP_IPV6)" == "True" ]]; then # Now that all plugins are loaded, setup BGP echo_summary "Setting up BGP speaker to advertise routes to project networks" setup_bgp_for_ipv6 fi fi if [[ "$1" == "unstack" ]]; then stop_manila cleanup_manila fi if [[ "$1" == "clean" ]]; then stop_manila cleanup_manila sudo rm -rf /etc/manila fi # Restore xtrace $XTRACE ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/devstack/settings0000664000175000017500000002466500000000000016462 0ustar00zuulzuul00000000000000# Setting configuration file for manila services # ---------------------------------------------- # 1) It is possible to set any custom opt to any config group using following: # $ export MANILA_OPTGROUP_foo_bar=value # where 'foo' is name of config group and 'bar' is name of option. # # 2) 'MANILA_CONFIGURE_GROUPS' contains list of config group names used to create # config groups, but 'MANILA_ENABLED_BACKENDS' is used to set config groups as # Manila share back ends. Both can be set like following: # $ export MANILA_ENABLED_BACKENDS=foo,bar # where 'foo' and 'bar' are names of config groups with opts for some share # drivers. By default they are equal. Also be attentive, if you modify both, # make sure 'MANILA_CONFIGURE_GROUPS' contains all values from # 'MANILA_ENABLED_BACKENDS'. # DEFAULT group is always defined, no need to specify it within 'MANILA_CONFIGURE_GROUPS'. # # 3) 'CINDER_OVERSUBSCRIPTION_RATIO' - manila devstack-plugin env var that is # useful for all share drivers that use Cinder. If it is set, then it will be # applied for two Cinder options: 'max_over_subscription_ratio' and # 'lvm_max_over_subscription_ratio'. Should be float. Example: # CINDER_OVERSUBSCRIPTION_RATIO=20.0 define_plugin manila # Defaults # -------- OS_CLOUD=${OS_CLOUD:-"devstack-admin"} MANILA_GIT_BASE=${MANILA_GIT_BASE:-https://opendev.org} MANILA_REPO_ROOT=${MANILA_REPO_ROOT:-openstack} MANILACLIENT_REPO=${MANILA_GIT_BASE}/${MANILA_REPO_ROOT}/python-manilaclient MANILACLIENT_BRANCH=${MANILACLIENT_BRANCH:-master} # Set up default directories MANILA_DIR=${MANILA_DIR:=$DEST/manila} MANILA_LOCK_PATH=${MANILA_LOCK_PATH:=$OSLO_LOCK_PATH} MANILA_LOCK_PATH=${MANILA_LOCK_PATH:=$MANILA_DIR/manila_locks} MANILACLIENT_DIR=${MANILACLIENT_DIR:=$DEST/python-manilaclient} MANILA_STATE_PATH=${MANILA_STATE_PATH:=$DATA_DIR/manila} MANILA_CONF_DIR=${MANILA_CONF_DIR:-/etc/manila} MANILA_CONF=$MANILA_CONF_DIR/manila.conf MANILA_API_PASTE_INI=$MANILA_CONF_DIR/api-paste.ini # barbican defaults BARBICAN_ENDPOINT_TYPE=${BARBICAN_ENDPOINT_TYPE:-internal} KEY_MANAGER_BACKEND=${KEY_MANAGER_BACKEND:-barbican} BARBICAN_KEYSTONE_ENDPOINT=${BARBICAN_KEYSTONE_ENDPOINT:-$KEYSTONE_SERVICE_URI} # Set this to False to leave "default_share_type" and # "default_share_group_type" configuration options empty. MANILA_CONFIGURE_DEFAULT_TYPES=${MANILA_CONFIGURE_DEFAULT_TYPES:-True} MANILA_DEFAULT_SHARE_TYPE=${MANILA_DEFAULT_SHARE_TYPE:-default} # MANILA_DEFAULT_SHARE_TYPE_EXTRA_SPECS is expected to contain extra specs key-value pairs, # that should be assigned to default share type. Both - qualified and unqualified extra specs are supported. # Pairs are separated by spaces, value is assigned to key using sign of equality. Examples: # MANILA_DEFAULT_SHARE_TYPE_EXTRA_SPECS='foo=bar' # MANILA_DEFAULT_SHARE_TYPE_EXTRA_SPECS='foo=bar quuz=xyzzy' # MANILA_DEFAULT_SHARE_TYPE_EXTRA_SPECS='foo=bar quuz=xyzzy fakeprefix:baz=waldo' MANILA_DEFAULT_SHARE_TYPE_EXTRA_SPECS=${MANILA_DEFAULT_SHARE_TYPE_EXTRA_SPECS:-''} MANILA_DHSS_TRUE_SHARE_TYPE_EXTRA_SPECS=${MANILA_DHSS_TRUE_SHARE_TYPE_EXTRA_SPECS:-$MANILA_DEFAULT_SHARE_TYPE_EXTRA_SPECS} MANILA_DHSS_FALSE_SHARE_TYPE_EXTRA_SPECS=${MANILA_DHSS_FALSE_SHARE_TYPE_EXTRA_SPECS:-$MANILA_DEFAULT_SHARE_TYPE_EXTRA_SPECS} # Share groups and their specs MANILA_DEFAULT_SHARE_GROUP_TYPE=${MANILA_DEFAULT_SHARE_GROUP_TYPE:-default} # MANILA_DEFAULT_SHARE_GROUP_TYPE_SPECS is expected to contain key-value pairs, # that should be assigned to default share group type. Both - qualified and unqualified specs are supported. # Pairs are separated by spaces, value is assigned to key using sign of equality. Examples: # MANILA_DEFAULT_SHARE_GROUP_TYPE_SPECS='foo=bar' # MANILA_DEFAULT_SHARE_GROUP_TYPE_SPECS='foo=bar quuz=xyzzy' # MANILA_DEFAULT_SHARE_GROUP_TYPE_SPECS='foo=bar quuz=xyzzy fakeprefix:baz=waldo' MANILA_DEFAULT_SHARE_GROUP_TYPE_SPECS=${MANILA_DEFAULT_SHARE_GROUP_TYPE_SPECS:-''} # Public facing bits MANILA_SERVICE_HOST=${MANILA_SERVICE_HOST:-$SERVICE_HOST} MANILA_SERVICE_PORT=${MANILA_SERVICE_PORT:-8786} MANILA_SERVICE_PORT_INT=${MANILA_SERVICE_PORT_INT:-18786} MANILA_SERVICE_PROTOCOL=${MANILA_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL} MANILA_ENDPOINT_BASE=$MANILA_SERVICE_PROTOCOL://$MANILA_SERVICE_HOST:$MANILA_SERVICE_PORT # Support entry points installation of console scripts if [[ -d $MANILA_DIR/bin ]]; then MANILA_BIN_DIR=$MANILA_DIR/bin else MANILA_BIN_DIR=$(get_python_exec_prefix) fi # Common opts SHARE_NAME_PREFIX=${SHARE_NAME_PREFIX:-share-} MANILA_ENABLED_SHARE_PROTOCOLS=${ENABLED_SHARE_PROTOCOLS:-"NFS,CIFS"} MANILA_ENABLED_BACKENDS=${MANILA_ENABLED_BACKENDS:-generic1,generic2} MANILA_SCHEDULER_DRIVER=${MANILA_SCHEDULER_DRIVER:-manila.scheduler.drivers.filter.FilterScheduler} MANILA_SERVICE_SECGROUP="manila-service" # Following env var defines whether to apply downgrade migrations setting up DB or not. # If it is set to False, then only 'upgrade' migrations will be applied. # If it is set to True, then will be applied 'upgrade', 'downgrade' and 'upgrade' # migrations again. MANILA_USE_DOWNGRADE_MIGRATIONS=${MANILA_USE_DOWNGRADE_MIGRATIONS:-"False"} MANILA_WSGI=manila.wsgi.api:application MANILA_UWSGI_CONF=$MANILA_CONF_DIR/manila-uwsgi.ini MANILA_ENDPOINT_BASE=$MANILA_SERVICE_PROTOCOL://$MANILA_SERVICE_HOST/share # Common info for Generic driver(s) SHARE_DRIVER=${SHARE_DRIVER:-manila.share.drivers.generic.GenericShareDriver} eval USER_HOME=~ MANILA_KEY_FORMAT=${MANILA_KEY_FORMAT:-"ecdsa"} MANILA_PATH_TO_PUBLIC_KEY=${MANILA_PATH_TO_PUBLIC_KEY:-"$USER_HOME/.ssh/id_${MANILA_KEY_FORMAT}.pub"} MANILA_PATH_TO_PRIVATE_KEY=${MANILA_PATH_TO_PRIVATE_KEY:-"$USER_HOME/.ssh/id_${MANILA_KEY_FORMAT}"} MANILA_SERVICE_KEYPAIR_NAME=${MANILA_SERVICE_KEYPAIR_NAME:-"manila-service"} MANILA_SERVICE_INSTANCE_USER=${MANILA_SERVICE_INSTANCE_USER:-"manila"} MANILA_SERVICE_IMAGE_URL=${MANILA_SERVICE_IMAGE_URL:-"http://tarballs.openstack.org/manila-image-elements/images/manila-service-image-master.qcow2"} MANILA_SERVICE_IMAGE_NAME=${MANILA_SERVICE_IMAGE_NAME:-"manila-service-image-master"} MANILA_USE_SCHEDULER_CREATING_SHARE_FROM_SNAPSHOT=${MANILA_USE_SCHEDULER_CREATING_SHARE_FROM_SNAPSHOT:-"False"} # Third party CI Vendors should set this to false to skip the service image download MANILA_SERVICE_IMAGE_ENABLED=$(trueorfalse True MANILA_SERVICE_IMAGE_ENABLED) MANILA_USE_SERVICE_INSTANCE_PASSWORD=${MANILA_USE_SERVICE_INSTANCE_PASSWORD:-"False"} MANILA_SERVICE_INSTANCE_PASSWORD=${MANILA_SERVICE_INSTANCE_PASSWORD:-"manila"} MANILA_SERVICE_VM_FLAVOR_REF=${MANILA_SERVICE_VM_FLAVOR_REF:-100} MANILA_SERVICE_VM_FLAVOR_NAME=${MANILA_SERVICE_VM_FLAVOR_NAME:-"manila-service-flavor"} MANILA_SERVICE_VM_FLAVOR_RAM=${MANILA_SERVICE_VM_FLAVOR_RAM:-512} MANILA_SERVICE_VM_FLAVOR_DISK=${MANILA_SERVICE_VM_FLAVOR_DISK:-5} MANILA_SERVICE_VM_FLAVOR_VCPUS=${MANILA_SERVICE_VM_FLAVOR_VCPUS:-1} # Enable this option when using a storage backend that is on the same host # as the devstack host, these iptable rules are necessary to allow mounting # shares from the host MANILA_ALLOW_NAS_SERVER_PORTS_ON_HOST=${MANILA_ALLOW_NAS_SERVER_PORTS_ON_HOST:-False} # Options for configuration of LVM share driver SHARE_BACKING_FILE_SIZE=${SHARE_BACKING_FILE_SIZE:-8400M} SHARE_GROUP=${SHARE_GROUP:-lvm-shares} MANILA_MNT_DIR=${MANILA_MNT_DIR:=$MANILA_STATE_PATH/mnt} SMB_CONF=${SMB_CONF:-/etc/samba/smb.conf} SMB_PRIVATE_DIR=${SMB_PRIVATE_DIR:-/var/lib/samba/private} CONFIGURE_BACKING_FILE=${CONFIGURE_BACKING_FILE:-"True"} MANILA_LVM_SHARE_EXPORT_IPS=${MANILA_LVM_SHARE_EXPORT_IPS:-$HOST_IP} # Options for replication MANILA_REPLICA_STATE_UPDATE_INTERVAL=${MANILA_REPLICA_STATE_UPDATE_INTERVAL:-300} # Options for configuration of ZFSonLinux driver # 'MANILA_ZFSONLINUX_ZPOOL_SIZE' defines size of each zpool. That value # will be used for creation of sparse files. MANILA_ZFSONLINUX_ZPOOL_SIZE=${MANILA_ZFSONLINUX_ZPOOL_SIZE:-"30G"} MANILA_ZFSONLINUX_BACKEND_FILES_CONTAINER_DIR=${MANILA_ZFSONLINUX_BACKEND_FILES_CONTAINER_DIR:-"/opt/stack/data/manila/zfsonlinux"} MANILA_ZFSONLINUX_SHARE_EXPORT_IP=${MANILA_ZFSONLINUX_SHARE_EXPORT_IP:-$HOST_IP} MANILA_ZFSONLINUX_SERVICE_IP=${MANILA_ZFSONLINUX_SERVICE_IP:-$HOST_IP} MANILA_ZFSONLINUX_DATASET_CREATION_OPTIONS=${MANILA_ZFSONLINUX_DATASET_CREATION_OPTIONS:-"compression=gzip"} MANILA_ZFSONLINUX_USE_SSH=${MANILA_ZFSONLINUX_USE_SSH:-"False"} MANILA_ZFSONLINUX_SSH_USERNAME=${MANILA_ZFSONLINUX_SSH_USERNAME:-$STACK_USER} # If MANILA_ZFSONLINUX_REPLICATION_DOMAIN is set to empty value then # Manila will consider replication feature as disabled for ZFSonLinux share driver. MANILA_ZFSONLINUX_REPLICATION_DOMAIN=${MANILA_ZFSONLINUX_REPLICATION_DOMAIN:-"ZFSonLinux"} # Container Driver MANILA_CONTAINER_DRIVER=${MANILA_CONTAINER_DRIVER:-"manila.share.drivers.container.driver.ContainerShareDriver"} MANILA_DOCKER_IMAGE_ALIAS=${MANILA_DOCKER_IMAGE_ALIAS:-"manila_docker_image"} MANILA_CONTAINER_VOLUME_GROUP_NAME=${MANILA_CONTAINER_VOLUME_GROUP_NAME:-"manila_docker_volumes"} # (aovchinnikov): This location is temporary and will be changed to a # permanent one as soon as possible. MANILA_DOCKER_IMAGE_URL=${MANILA_DOCKER_IMAGE_URL:-"https://github.com/a-ovchinnikov/manila-image-elements-lxd-images/releases/download/0.1.0/manila-docker-container.tar.gz"} # Network Plugin MANILA_NETWORK_API_CLASS=${MANILA_NETWORK_API_CLASS:-"manila.network.neutron.neutron_network_plugin.NeutronBindNetworkPlugin"} MANILA_NEUTRON_VNIC_TYPE=${MANILA_NEUTRON_VNIC_TYPE:-"normal"} # SSH TIMEOUT MANILA_SSH_TIMEOUT=${MANILA_SSH_TIMEOUT:-180} # Share and snapshot deferred deletion task interval MANILA_DEFERRED_DELETE_TASK_INTERVAL=${MANILA_DEFERRED_DELETE_TASK_INTERVAL:-10} # Admin Network setup MANILA_ADMIN_NET_RANGE=${MANILA_ADMIN_NET_RANGE:=10.2.5.0/24} # Data Service IP configuration MANILA_DATA_NODE_IP=${MANILA_DATA_NODE_IP:=$MANILA_ADMIN_NET_RANGE} # Data Service copy validation MANILA_DATA_COPY_CHECK_HASH=${MANILA_DATA_COPY_CHECK_HASH:=True} # Manila IPv6 Setup flag MANILA_SETUP_IPV6=${MANILA_SETUP_IPV6:=False} MANILA_RESTORE_IPV6_DEFAULT_ROUTE=${MANILA_RESTORE_IPV6_DEFAULT_ROUTE:=True} # This option controls whether or not to enforce scope when evaluating policies. Learn more: # https://docs.openstack.org/oslo.policy/latest/configuration/index.html#oslo_policy.enforce_scope MANILA_ENFORCE_SCOPE=$(trueorfalse False MANILA_ENFORCE_SCOPE) # Enable manila services # ---------------------- # We have to add Manila to enabled services for screen_it to work # It consists of 4 parts: m-api (API), m-shr (Share), m-sch (Scheduler) # and m-dat (Data). enable_service manila enable_service m-api enable_service m-shr enable_service m-sch enable_service m-dat ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315601.7576737 manila-21.0.0/devstack/upgrade/0000775000175000017500000000000000000000000016311 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/devstack/upgrade/resources.sh0000775000175000017500000005004100000000000020662 0ustar00zuulzuul00000000000000#!/bin/bash set -o errexit source $GRENADE_DIR/grenaderc source $GRENADE_DIR/functions source $TOP_DIR/openrc admin demo set -o xtrace ################################# Settings #################################### # Access rules data specific to first enabled backend. MANILA_GRENADE_ACCESS_TYPE=${MANILA_GRENADE_ACCESS_TYPE:-"ip"} MANILA_GRENADE_ACCESS_TO=${MANILA_GRENADE_ACCESS_TO:-"0.0.0.0/0"} # Network information that will be used in case DHSS=True driver is used # with non-single-network-plugin. MANILA_GRENADE_NETWORK_NAME=${MANILA_GRENADE_NETWORK_NAME:-"private"} MANILA_GRENADE_SUBNET_NAME=${MANILA_GRENADE_SUBNET_NAME:-"private-subnet"} # Timeout that will be used for share creation wait operation. MANILA_GRENADE_WAIT_STEP=${MANILA_GRENADE_WAIT_STEP:-"4"} MANILA_GRENADE_WAIT_TIMEOUT=${MANILA_GRENADE_WAIT_TIMEOUT:-"300"} MANILA_GRENADE_SHARE_NETWORK_NAME=${MANILA_GRENADE_SHARE_NETWORK_NAME:-"manila_grenade_share_network"} MANILA_GRENADE_SHARE_TYPE_NAME=${MANILA_GRENADE_SHARE_TYPE_NAME:-"manila_grenade_share_type"} MANILA_GRENADE_SHARE_NAME=${MANILA_GRENADE_SHARE_NAME:-"manila_grenade_share"} MANILA_GRENADE_SHARE_SNAPSHOT_NAME=${MANILA_GRENADE_SHARE_SNAPSHOT_NAME:-"manila_grenade_share_snapshot"} # Extra specs that will be set for newly created share type MANILA_GRENADE_SHARE_TYPE_SNAPSHOT_SUPPORT_EXTRA_SPEC=${MANILA_GRENADE_SHARE_TYPE_SNAPSHOT_SUPPORT_EXTRA_SPEC:-"True"} MANILA_GRENADE_SHARE_TYPE_CREATE_SHARE_FROM_SNAPSHOT_SUPPORT_EXTRA_SPEC=${MANILA_GRENADE_SHARE_TYPE_CREATE_SHARE_FROM_SNAPSHOT_SUPPORT_EXTRA_SPEC:-"True"} MANILA_GRENADE_SHARE_TYPE_REVERT_TO_SNAPSHOT_SUPPORT_EXTRA_SPEC=${MANILA_GRENADE_SHARE_TYPE_REVERT_TO_SNAPSHOT_SUPPORT_EXTRA_SPEC:-"True"} MANILA_GRENADE_SHARE_TYPE_MOUNT_SNAPSHOT_SUPPORT_EXTRA_SPEC=${MANILA_GRENADE_SHARE_TYPE_MOUNT_SNAPSHOT_SUPPORT_EXTRA_SPEC:-"True"} MANILA_CONF_DIR=${MANILA_CONF_DIR:-/etc/manila} MANILA_CONF=$MANILA_CONF_DIR/manila.conf ################################ Scenarios #################################### function scenario_1_do_share_with_rules_and_metadata { # NOTE(vponomaryov): nova-network with DHSS=True drivers is not supported # by this scenario. enabled_share_backends=$(iniget $MANILA_CONF DEFAULT enabled_share_backends) backend=$( echo $enabled_share_backends | cut -d',' -f 1 ) enabled_share_protocols=$(iniget $MANILA_CONF DEFAULT enabled_share_protocols) share_protocol=$( echo $enabled_share_protocols | cut -d',' -f 1 ) driver_handles_share_servers=$(iniget $MANILA_CONF $backend driver_handles_share_servers) create_share_cmd="manila create $share_protocol 1 " create_share_cmd+="--share-type $MANILA_GRENADE_SHARE_TYPE_NAME " create_share_cmd+="--name $MANILA_GRENADE_SHARE_NAME" resource_save manila share_protocol $share_protocol if [[ $(trueorfalse False driver_handles_share_servers) == True ]]; then share_driver=$(iniget $MANILA_CONF $backend share_driver) generic_driver='manila.share.drivers.generic.GenericShareDriver' windows_driver='manila.share.drivers.windows.windows_smb_driver.WindowsSMBDriver' network_plugin=$(iniget $MANILA_CONF $backend network_plugin) share_network_cmd="manila share-network-create " share_network_cmd+="--name $MANILA_GRENADE_SHARE_NETWORK_NAME" if is_service_enabled neutron; then if [[ $share_driver == $generic_driver || \ $share_driver == $windows_driver || \ ! $network_plugin =~ 'Single' || \ ! $network_plugin =~ 'Standalone' ]]; then net_id=$(openstack network show $MANILA_GRENADE_NETWORK_NAME -c id -f value) subnet_id=$(openstack subnet show $MANILA_GRENADE_SUBNET_NAME -c id -f value) share_network_cmd+=" --neutron-net $net_id --neutron-subnet $subnet_id" fi else echo 'Neutron service is disabled, creating empty share-network' fi create_share_cmd+=" --share-network $MANILA_GRENADE_SHARE_NETWORK_NAME" resource_save manila share_network $MANILA_GRENADE_SHARE_NETWORK_NAME else resource_save manila share_network 'None' fi # Create share-network eval $share_network_cmd # Create share-type manila type-create \ $MANILA_GRENADE_SHARE_TYPE_NAME \ $driver_handles_share_servers \ --snapshot_support $MANILA_GRENADE_SHARE_TYPE_SNAPSHOT_SUPPORT_EXTRA_SPEC \ --create_share_from_snapshot_support $MANILA_GRENADE_SHARE_TYPE_CREATE_SHARE_FROM_SNAPSHOT_SUPPORT_EXTRA_SPEC \ --revert_to_snapshot_support $MANILA_GRENADE_SHARE_TYPE_REVERT_TO_SNAPSHOT_SUPPORT_EXTRA_SPEC \ --mount_snapshot_support $MANILA_GRENADE_SHARE_TYPE_MOUNT_SNAPSHOT_SUPPORT_EXTRA_SPEC # Create share eval $create_share_cmd # Wait for share creation results wait_timeout=$MANILA_GRENADE_WAIT_TIMEOUT available='false' while (( wait_timeout > 0 )) ; do current_status=$( manila show $MANILA_GRENADE_SHARE_NAME | \ grep " status " | get_field 2 ) if [[ $current_status == 'available' ]]; then available='true' break elif [[ $current_status == 'creating' ]]; then ((wait_timeout-=$MANILA_GRENADE_WAIT_STEP)) sleep $MANILA_GRENADE_WAIT_STEP elif [[ $current_status == 'error' ]]; then die $LINENO "Share is in 'error' state." else die $LINENO "Should never reach this line." fi done if [[ $available == 'true' ]]; then echo "Share has been created successfully." else die $LINENO "Share timed out to reach 'available' status." fi # grab the export location export_path=$(manila share-export-location-list \ $MANILA_GRENADE_SHARE_NAME | grep ":/" | \ cut -d "|" -f 3 | head -1) resource_save manila export_path $export_path # Create some metadata manila metadata $MANILA_GRENADE_SHARE_NAME set gre=nade # Add access rules manila access-allow $MANILA_GRENADE_SHARE_NAME \ $MANILA_GRENADE_ACCESS_TYPE $MANILA_GRENADE_ACCESS_TO # Wait for access rule creation results wait_timeout=$MANILA_GRENADE_WAIT_TIMEOUT active='false' while (( wait_timeout > 0 )) ; do current_state=$( manila access-list $MANILA_GRENADE_SHARE_NAME | \ grep " $MANILA_GRENADE_ACCESS_TO " | get_field 5 ) case $current_state in active) active='true' break;; creating|new|queued_to_apply|applying) ((wait_timeout-=$MANILA_GRENADE_WAIT_STEP)) sleep $MANILA_GRENADE_WAIT_STEP;; error) die $LINENO "Failed to create access rule.";; *) die $LINENO "Should never reach this line.";; esac done if [[ $active == 'true' ]]; then echo "Access rule has been created successfully." else die $LINENO "Access rule timed out to reach 'active' state." fi } function scenario_1_verify_share_with_rules_and_metadata { share_status=$(manila show $MANILA_GRENADE_SHARE_NAME | \ grep " status " | get_field 2) if [[ $share_status != "available" ]]; then die $LINENO "Share status is not 'available'. It is $share_status" fi rule_state=$(manila access-list $MANILA_GRENADE_SHARE_NAME | \ grep " $MANILA_GRENADE_ACCESS_TO " | get_field 5) if [[ $rule_state != "active" ]]; then die $LINENO "Share rule state is not 'active'. It is $rule_state" fi metadata=$(manila metadata-show $MANILA_GRENADE_SHARE_NAME | \ grep 'gre' | get_field 2) if [[ $metadata != "nade" ]]; then die $LINENO "Share metadata is not 'gre=nade'. It is gre=$metadata" fi } function scenario_1_destroy_share_with_rules_and_metadata { manila delete $MANILA_GRENADE_SHARE_NAME wait_timeout=$MANILA_GRENADE_WAIT_TIMEOUT found='true' while (( wait_timeout > 0 )) ; do share_status=$( manila list --columns id,name,status | \ grep $MANILA_GRENADE_SHARE_NAME | get_field 3) if [[ -z $share_status ]]; then found='false' break elif [[ $share_status == 'deleting' ]]; then ((wait_timeout-=$MANILA_GRENADE_WAIT_STEP)) sleep $MANILA_GRENADE_WAIT_STEP elif [[ $share_status == 'error_deleting' ]]; then die $LINENO "Share failed to be deleted." else die $LINENO "Should never reach this line." fi done if [[ $found == 'true' ]]; then die $LINENO "Share timed out to be deleted." else echo "Share has been deleted successfully." fi share_network=$(resource_get manila share_network) if [[ -n $share_network && $share_network != 'None' ]]; then manila share-network-delete $MANILA_GRENADE_SHARE_NETWORK_NAME fi manila type-delete $MANILA_GRENADE_SHARE_TYPE_NAME } ##### function scenario_2_do_attach_ss_to_sn { manila security-service-create \ ldap \ --name fake_ss_name \ --description fake_ss_description \ --dns-ip fake_dns_ip \ --server fake_server \ --domain fake_domain \ --user fake_user \ --password fake_password manila share-network-create \ --name fake_sn_name \ --description fake_sn_description \ --neutron-net-id fake_net \ --neutron-subnet-id fake_subnet manila share-network-security-service-add fake_sn_name fake_ss_name } function scenario_2_verify_attach_ss_to_sn { attached_security_service=$(\ manila share-network-security-service-list fake_sn_name | \ grep "fake_ss_name") if [[ -z $attached_security_service ]] ; then die $LINENO "Security service 'fake_ss_name' is not attached "\ "to share-network 'fake_sn_name'." fi function assert { actual=$(manila $1 $2 | grep " $3 " | get_field 2) if [[ $actual != $4 ]]; then die $LINENO "Field $3 for command $1 with arg $2 has "\ "value $actual, but $4 is expected." fi } assert share-network-show fake_sn_name description fake_sn_description # From API version 2.51, share-network-show command doesn't have # neutron_net_id and neutron_subnet_id, that information is in # "share-network-subnets" assert "--os-share-api-version 2.50 share-network-show" fake_sn_name neutron_net_id fake_net assert "--os-share-api-version 2.50 share-network-show" fake_sn_name neutron_subnet_id fake_subnet share_network_subnets=$(manila share-network-show fake_sn_name | grep share_network_subnets) if [[ ! -z "$share_network_subnets" ]]; then neutron_net_id=$(echo $share_network_subnets | tr ',' '\n' | grep neutron_net_id | cut -d "'" -f4) neutron_subnet_id=$(echo $share_network_subnets | tr ',' '\n' | grep neutron_subnet_id | cut -d "'" -f4) if [[ $neutron_net_id != fake_net ]]; then die $LINENO "Neutron net ID for share network isn't fake_net, it is $neutron_net_id" fi if [[ $neutron_subnet_id != fake_subnet ]]; then die $LINENO "Neutron subnet ID for share network isn't fake_subnet, it is $neutron_subnet_id" fi fi assert security-service-show fake_ss_name description fake_ss_description assert security-service-show fake_ss_name dns_ip fake_dns_ip assert security-service-show fake_ss_name server fake_server assert security-service-show fake_ss_name domain fake_domain assert security-service-show fake_ss_name user fake_user assert security-service-show fake_ss_name password fake_password } function scenario_2_destroy_attach_ss_to_sn { manila share-network-delete fake_sn_name manila security-service-delete fake_ss_name } ##### function scenario_3_do_quotas { current_shares_quota=$(manila quota-show --tenant fake | \ grep " shares " | get_field 2) ((new_shares_quota=$current_shares_quota + 5)) manila quota-update fake --shares $new_shares_quota resource_save manila quota $new_shares_quota } function scenario_3_verify_quotas { shares_quota=$(manila quota-show --tenant fake | \ grep " shares " | get_field 2) expected=$(resource_get manila quota) if [[ $shares_quota != $expected ]] ; then die $LINENO "Shares quota for 'fake' tenant is expected "\ "as $expected but it is $shares_quota." fi } function scenario_3_destroy_quotas { manila quota-delete --tenant fake } ##### function scenario_4_do_private_share_types { manila type-create ${MANILA_GRENADE_SHARE_TYPE_NAME}_scenario4 false \ --is-public false manila type-access-add ${MANILA_GRENADE_SHARE_TYPE_NAME}_scenario4 \ $(openstack project show demo -c id -f value) } function scenario_4_verify_private_share_types { share_type_visibility=$(manila type-list --all \ --columns name,visibility | \ grep ${MANILA_GRENADE_SHARE_TYPE_NAME}_scenario4 | get_field 2) if [[ $share_type_visibility != 'private' ]] ; then die $LINENO "Visibility of share type "\ "${MANILA_GRENADE_SHARE_TYPE_NAME}_scenario4 is not "\ "'private'. It is $share_type_visibility" fi project_id=$(openstack project show demo -c id -f value) access=$(manila type-access-list \ ${MANILA_GRENADE_SHARE_TYPE_NAME}_scenario4 | grep $project_id) if [[ -z $access ]]; then die $LINENO "Expected $project_id project ID is not found in list "\ "of allowed projects of "\ "${MANILA_GRENADE_SHARE_TYPE_NAME}_scenario4 share type." fi } function scenario_4_destroy_private_share_types { manila type-delete ${MANILA_GRENADE_SHARE_TYPE_NAME}_scenario4 } ##### function scenario_5_do_share_snapshot { if [[ $(trueorfalse True MANILA_GRENADE_SHARE_TYPE_SNAPSHOT_SUPPORT_EXTRA_SPEC) == True ]]; then # Create share snapshot manila snapshot-create $MANILA_GRENADE_SHARE_NAME \ --name $MANILA_GRENADE_SHARE_SNAPSHOT_NAME resource_save manila share_snapshot $MANILA_GRENADE_SHARE_SNAPSHOT_NAME # Wait for share snapshot creation results wait_timeout=$MANILA_GRENADE_WAIT_TIMEOUT available='false' while (( wait_timeout > 0 )) ; do current_status=$( manila snapshot-show $MANILA_GRENADE_SHARE_SNAPSHOT_NAME | \ grep " status " | get_field 2 ) if [[ $current_status == 'available' ]]; then available='true' break elif [[ $current_status == 'creating' ]]; then ((wait_timeout-=$MANILA_GRENADE_WAIT_STEP)) sleep $MANILA_GRENADE_WAIT_STEP elif [[ $current_status == 'error' ]]; then die $LINENO "Share snapshot is in 'error' state." else die $LINENO "Should never reach this line." fi done if [[ $available == 'true' ]]; then echo "Share snapshot has been created successfully." else die $LINENO "Share snapshot timed out to reach 'available' status." fi else echo "Skipping scenario '5' with creation of share snapshot." fi } function scenario_5_verify_share_snapshot { if [[ $(trueorfalse True MANILA_GRENADE_SHARE_TYPE_SNAPSHOT_SUPPORT_EXTRA_SPEC) == True ]]; then # Check that source share ID is set share_id_in_snapshot=$( manila snapshot-show \ $MANILA_GRENADE_SHARE_SNAPSHOT_NAME \ | grep "| share_id " | get_field 2 ) if [[ -z $share_id_in_snapshot ]]; then die $LINENO "Source share ID is not set." fi # Check that snapshot's source share ID is correct share_id=$( manila show $MANILA_GRENADE_SHARE_NAME \ | grep "| id " | get_field 2 ) if [[ $share_id != $share_id_in_snapshot ]]; then die $LINENO "Actual source share ID '$share_id_in_snapshot' is not "\ "equal to expected value '$share_id'." fi # Check presence of expected columns in snapshot view snapshot_output=$( manila snapshot-show $MANILA_GRENADE_SHARE_SNAPSHOT_NAME ) for snapshot_column in 'id' 'provider_location' 'name' 'size' 'export_locations'; do echo $snapshot_output | grep "| $snapshot_column " if [[ $? != 0 ]]; then die $LINENO "'$snapshot_column' column was not found in output '$snapshot_output'" fi done fi } function scenario_5_destroy_share_snapshot { if [[ $(trueorfalse True MANILA_GRENADE_SHARE_TYPE_SNAPSHOT_SUPPORT_EXTRA_SPEC) == True ]]; then manila snapshot-delete $MANILA_GRENADE_SHARE_SNAPSHOT_NAME wait_timeout=$MANILA_GRENADE_WAIT_TIMEOUT found='true' while (( wait_timeout > 0 )) ; do snapshot_status=$( manila snapshot-list --columns id,name,status | \ grep $MANILA_GRENADE_SHARE_SNAPSHOT_NAME | get_field 3) if [[ -z $snapshot_status ]]; then found='false' break elif [[ $snapshot_status == 'deleting' ]]; then ((wait_timeout-=$MANILA_GRENADE_WAIT_STEP)) sleep $MANILA_GRENADE_WAIT_STEP elif [[ $snapshot_status == 'error_deleting' ]]; then die $LINENO "Share snapshot failed to be deleted." else die $LINENO "Should never reach this line." fi done if [[ $found == 'true' ]]; then die $LINENO "Share snapshot timed out to be deleted." else echo "Share snapshot has been deleted successfully." fi fi } ##### function scenario_6_do_share_mount_and_write_data { mkdir -p /tmp/manila-share export_path=$(resource_get manila export_path) share_protocol=$(resource_get manila share_protocol| awk '{print tolower($0)}') sudo mount -t $share_protocol $export_path /tmp/manila-share test_msg="Hello from the past" echo $test_msg | sudo tee /tmp/manila-share/testfile && sudo sync } function scenario_6_verify_share_mount_and_read_data { export_path=$(resource_get manila export_path) share_is_mounted=$(sudo mount | grep $export_path) [[ -z $share_is_mounted ]] && die $LINENO "Share $export_path is not mounted" read_data=$(sudo cat /tmp/manila-share/testfile|xargs) if [[ $read_data == "Hello from the past" ]]; then echo "Share data remains unmodified." else die $LINENO "Share data does not match what was written before upgrade." fi } function scenario_6_destroy_share_mount { export_path=$(resource_get manila export_path) sudo umount -f /tmp/manila-share } ################################# Main logic ################################## function create { scenario_1_do_share_with_rules_and_metadata scenario_2_do_attach_ss_to_sn scenario_3_do_quotas scenario_4_do_private_share_types scenario_5_do_share_snapshot scenario_6_do_share_mount_and_write_data echo "Manila 'create': SUCCESS" } function verify { scenario_1_verify_share_with_rules_and_metadata scenario_2_verify_attach_ss_to_sn scenario_3_verify_quotas scenario_4_verify_private_share_types scenario_5_verify_share_snapshot scenario_6_verify_share_mount_and_read_data echo "Manila 'verify': SUCCESS" } function destroy { scenario_6_destroy_share_mount scenario_5_destroy_share_snapshot scenario_1_destroy_share_with_rules_and_metadata scenario_2_destroy_attach_ss_to_sn scenario_3_destroy_quotas scenario_4_destroy_private_share_types echo "Manila 'destroy': SUCCESS" } function verify_noapi { scenario_6_verify_share_mount_and_read_data } ################################# Dispatcher ################################## case $1 in "create") create ;; "verify_noapi") verify_noapi ;; "verify") verify ;; "destroy") destroy ;; "force_destroy") set +o errexit destroy ;; esac ############################################################################### ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/devstack/upgrade/settings0000664000175000017500000000066400000000000020102 0ustar00zuulzuul00000000000000#!/bin/bash register_project_for_upgrade manila register_db_to_save manila export BASE_RUN_SMOKE=False export TARGET_RUN_SMOKE=False # NOTE(vponomaryov): stable client is used for keeping scenarios stable # so they are not broken by changed CLI views. devstack_localrc base MANILACLIENT_BRANCH="stable/ussuri" devstack_localrc target MANILACLIENT_BRANCH="stable/ussuri" devstack_localrc target MANILA_USE_DOWNGRADE_MIGRATIONS=False ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/devstack/upgrade/shutdown.sh0000775000175000017500000000072000000000000020522 0ustar00zuulzuul00000000000000#!/bin/bash # # set -o errexit source $GRENADE_DIR/grenaderc source $GRENADE_DIR/functions source $BASE_DEVSTACK_DIR/functions source $BASE_DEVSTACK_DIR/stackrc # needed for status directory # Locate the manila plugin and get its functions MANILA_DEVSTACK_DIR=$(dirname $(dirname $0)) source $MANILA_DEVSTACK_DIR/plugin.sh set -o xtrace stop_manila # Ensure everything is stopped ensure_services_stopped manila-api manila-share manila-scheduler manila-data ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/devstack/upgrade/upgrade.sh0000775000175000017500000000421500000000000020301 0ustar00zuulzuul00000000000000#!/usr/bin/env bash # ``upgrade-manila`` echo "*********************************************************************" echo "Begin $0" echo "*********************************************************************" # Clean up any resources that may be in use cleanup() { set +o errexit echo "*********************************************************************" echo "ERROR: Abort $0" echo "*********************************************************************" # Kill ourselves to signal any calling process trap 2; kill -2 $$ } trap cleanup SIGHUP SIGINT SIGTERM # Keep track of the grenade directory RUN_DIR=$(cd $(dirname "$0") && pwd) # Source params source $GRENADE_DIR/grenaderc # Import common functions source $GRENADE_DIR/functions # This script exits on an error so that errors don't compound and you see # only the first error that occurred. set -o errexit # Upgrade Manila # ============== # Locate manila devstack plugin, the directory above the # grenade plugin. MANILA_DEVSTACK_DIR=$(dirname $(dirname $0)) # Get functions from current DevStack source $TARGET_DEVSTACK_DIR/functions source $TARGET_DEVSTACK_DIR/lib/tls source $TARGET_DEVSTACK_DIR/stackrc source $(dirname $(dirname $BASH_SOURCE))/settings source $(dirname $(dirname $BASH_SOURCE))/plugin.sh # Print the commands being run so that we can see the command that triggers # an error. It is also useful for following allowing as the install occurs. set -o xtrace # Save current config files for posterity [[ -d $SAVE_DIR/etc.manila ]] || cp -pr $MANILA_CONF_DIR $SAVE_DIR/etc.manila # Install the target manila install_manila # calls upgrade-manila for specific release upgrade_project manila $RUN_DIR $BASE_DEVSTACK_BRANCH $TARGET_DEVSTACK_BRANCH # Migrate the database $MANILA_BIN_DIR/manila-manage db sync || die $LINENO "DB migration error" start_manila # Don't succeed unless the services come up ensure_services_started manila-api manila-share manila-scheduler manila-data set +o xtrace echo "*********************************************************************" echo "SUCCESS: End $0" echo "*********************************************************************" ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315601.7616737 manila-21.0.0/doc/0000775000175000017500000000000000000000000013623 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/README.rst0000664000175000017500000000154100000000000015313 0ustar00zuulzuul00000000000000======================= Manila Development Docs ======================= Files under this directory tree are used for generating the documentation for the manila source code. Developer documentation is built to: https://docs.openstack.org/manila/latest/ Tools ===== Sphinx The Python Sphinx package is used to generate the documentation output. Information on Sphinx, including formatting information for RST source files, can be found in the `Sphinx online documentation `_. Graphviz Some of the diagrams are generated using the ``dot`` language from Graphviz. See the `Graphviz documentation `_ for Graphviz and dot language usage information. Building Documentation ====================== Doc builds are performed using tox with the ``docs`` target:: % cd .. % tox -e docs ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315601.7616737 manila-21.0.0/doc/ext/0000775000175000017500000000000000000000000014423 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/ext/__init__.py0000664000175000017500000000000000000000000016522 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/requirements.txt0000664000175000017500000000030200000000000017102 0ustar00zuulzuul00000000000000openstackdocstheme>=2.2.1 # Apache-2.0 reno>=3.1.0 # Apache-2.0 sphinx>=2.1.1 # BSD os-api-ref>=1.4.0 # Apache-2.0 ddt>=1.0.1 # MIT fixtures>=3.0.0 # Apache-2.0/BSD oslotest>=3.2.0 # Apache-2.0 ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315601.7616737 manila-21.0.0/doc/source/0000775000175000017500000000000000000000000015123 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315601.7616737 manila-21.0.0/doc/source/_extra/0000775000175000017500000000000000000000000016405 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/_extra/.htaccess0000664000175000017500000000013500000000000020202 0ustar00zuulzuul00000000000000RedirectMatch 301 ^/drivers/emc-isilon-driver.html$ /drivers/dell-emc-powerscale-driver.html ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315601.7736735 manila-21.0.0/doc/source/admin/0000775000175000017500000000000000000000000016213 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/admin/capabilities_and_extra_specs.rst0000664000175000017500000003566300000000000024635 0ustar00zuulzuul00000000000000.. _capabilities_and_extra_specs: Capabilities and Extra-Specs ============================ Cloud Administrators create :ref:`shared_file_systems_share_types` with extra-specs to: - influence the scheduler's decision to place new shares, and - instruct the Shared File System service or its storage driver/s to perform certain special actions with respect to the users' shares. As an administrator, you can choose a descriptive name or provide good descriptions for your share types to convey the share type capabilities to end users. End users can view standard ``tenant-visible`` extra-specs that can let them seek required behavior and automate their applications accordingly. By design, however, all other extra-specs of a share type are not exposed to non-privileged users. Types of Extra-Specs -------------------- The Shared File Systems service back-end storage drivers offer a wide range of capabilities. The variation in these capabilities allows cloud administrators to provide a storage service catalog to their end users. Share type extra-specs tie-in with these capabilities. Some back-end capabilities are very specific to a storage system, and are opaque to the Shared File System service or the end users. These capabilities are invoked with the help of "scoped" extra-specs. Using scoped extra-specs is a way to provide programmatic directives to the concerned storage driver to do something during share creation or share manipulation. You can learn about the opaque capabilities through driver documentation and configure these capabilities within share types as scoped extra-specs (e.g.: hpe3par:nfs_options). The Shared File System service scheduler ignores scoped extra-specs during its quest to find the right back end to provision shares. There are some back-end capabilities in manila that do matter to the scheduler. For our understanding, lets call these non-scoped or non-opaque capabilities. All non-scoped capabilities can be directly used as share types extra-specs. They are considered by the scheduler’s capabilities filter (and any custom filter defined by deployers). You can get a list of non-scoped capabilities from the scheduler by using: .. code-block:: console $ manila pool-list --detail The non-scoped capabilities can be of three types: - **Capabilities pertaining to a specific back end storage system driver**: For example, *huawei_smartcache*. No Shared File System service API relies on non-opaque back end specific capabilities. - **Common capabilities that are not visible to end users**: The manila community has standardized some cross-platform capabilities like *thin_provisioning*, *dedupe*, *compression*, *qos*, *ipv6_support* and *ipv4_support*. Values of these options do not matter to any Shared File System service APIs; however, they can signify something to the manila services themselves. For example when a back end supports thin_provisioning, the scheduler service performs over-provisioning, and if a back end does not report *ipv6_support* as True, the share-manager service drops IPv6 access rules before invoking the storage driver to update access rules. - **Common capabilities that are visible to end users**: Some capabilities affect functionality exposed via the Shared File System service API. For example, not all back ends support snapshots, and even if they do, they may not support all of the snapshot operations. For example, cloning snapshots into new shares, reverting shares in-place to snapshots, etc. The support for these capabilities determines whether users would be able to perform certain control-plane operations with manila. For example, a back end driver may report *snapshot_support=True* allowing end users to create share snapshots, however, the driver can report *create_share_from_snapshot_support=False*. This reporting allows cloud administrators to create share types that support snapshots but not creating shares from snapshots. When a user uses such a share type, they will not be able to clone snapshots into new shares. Tenant-visible capabilities aid manila in validating requests and failing fast on requests it cannot accommodate. They also help level set the user expectations on some failures. For example, if snapshot_support is set to False on the share type, since users can see this, they will not invoke the create snapshot API, and even if they do, they will understand the HTTP 400 (and error message) in better context. .. important:: All extra-specs are optional, except one: *driver_handles_share_servers*. Scheduler's treatment of non-scoped extra specs ----------------------------------------------- The CapabilitiesFilter in the Shared File System scheduler uses the following for matching operators: * No operator This defaults to doing a python ==. Additionally it will match boolean values. * **<=, >=, ==, !=** This does a float conversion and then uses the python operators as expected. * **** This either chooses a host that has partially matching string in the capability or chooses a host if it matches any value in a list. For example, if " sse4" is used, it will match a host that reports capability of "sse4_1" or "sse4_2". * **** This chooses a host that has one of the items specified. If the first word in the string is , another and value pair can be concatenated. Examples are " 3", " 3 5", and " 1 3 7". This is for string values only. * **** This chooses a host that matches a boolean capability. An example extra-spec value would be " True". * **=** This does a float conversion and chooses a host that has equal to or greater than the resource specified. This operator behaves this way for historical reasons. * **s==, s!=, s>=, s>, s<=, s<** The "s" indicates it is a string comparison. These choose a host that satisfies the comparison of strings in capability and specification. For example, if "capabilities:replication_type s== dr", a host that reports replication_type of "dr" will be chosen. If "share_backend_name s!= cephfs" is used, any host not named "cephfs" can be chosen. For vendor-specific non-scoped capabilities (which need to be visible to the scheduler), drivers are recommended to use the vendor prefix followed by an underscore. This is not a strict requirement, but can provide a consistent look along-side the scoped extra-specs and will be a clear indicator of vendor capabilities vs. common capabilities. Common Capabilities ------------------- Common capabilities apply to multiple backends. Like all other backend reported capabilities, these capabilities can be used verbatim as extra_specs in share types used to create shares. Share type common capability extra-specs that are visible to end users: ----------------------------------------------------------------------- * **driver_handles_share_servers** is a special, required common capability. When set to True, the scheduler matches requests with back ends that can isolate user workloads with dedicated share servers exporting shares on user provided share networks. * **snapshot_support** indicates whether snapshots are supported for shares created on the pool/backend. When administrators do not set this capability as an extra-spec in a share type, the scheduler can place new shares of that type in pools without regard for whether snapshots are supported, and those shares will not support snapshots. * **create_share_from_snapshot_support** indicates whether a backend can create a new share from a snapshot. When administrators do not set this capability as an extra-spec in a share type, the scheduler can place new shares of that type in pools without regard for whether creating shares from snapshots is supported, and those shares will not support creating shares from snapshots. * **revert_to_snapshot_support** indicates that a driver is capable of reverting a share in place to its most recent snapshot. When administrators do not set this capability as an extra-spec in a share type, the scheduler can place new shares of that type in pools without regard for whether reverting shares to snapshots is supported, and those shares will not support reverting shares to snapshots. * **mount_snapshot_support** indicates that a driver is capable of exporting share snapshots for mounting. Users can provide and revoke access to mountable snapshots just like they can with their shares. * **replication_type** indicates the style of replication supported for the backend/pool. This extra_spec will have a string value and could be one of :term:`writable`, :term:`readable` or :term:`dr`. `writable` replication type involves synchronously replicated shares where all replicas are writable. Promotion is not supported and not needed. `readable` and `dr` replication types involve a single `active` or `primary` replica and one or more `non-active` or secondary replicas per share. In `readable` type of replication, `non-active` replicas have one or more export_locations and can thus be mounted and read while the `active` replica is the only one that can be written into. In `dr` style of replication, only the `active` replica can be mounted, read from and written into. * **availability_zones** indicates a comma separated list of availability zones that can be used for provisioning. Users can always provide a specific availability zone during share creation, and they will receive a synchronous failure message if they attempt to create a share in an availability zone that the share type does not permit. If you do not set this extra-spec, the share type is assumed to be serviceable in all availability zones known to the Shared File Systems service. * **mount_point_name_support** whether a custom export location could be specified during share creation. To enable users to specify a custom mount point for their shares, administrators must set this extra-specification in the share type to True. They must also provide an extra-spec named ``provisioning:mount_point_prefix``. The service will use this prefix in conjunction with the mount point name provided by end users during share creation. When ``provisioning:mount_point_prefix`` is not set on a share type, but ``mount_point_name_support`` is enabled, the share's export location will be prefixed with the ``project_id``. However, shares created with a ``project_id`` prefix are not eligible for transfer. For these shares to be transferred to a different project, the admin will need to manually unmount them from the current project and mount them to the target project. Share type common capability extra-specs that are not visible to end users: --------------------------------------------------------------------------- * **dedupe** indicates that a backend/pool can provide shares using some deduplication technology. The default value of the dedupe capability (if a driver doesn't report it) is False. Drivers can support both dedupe and non-deduped shares in a single storage pool by reporting ``dedupe=[True, False]``. You can make a share type use deduplication by setting this extra-spec to ' True', or prevent it by setting this extra-spec to ' False'. * **compression** indicates that a backend/pool can provide shares using some compression technology. The default value of the compression capability (if a driver doesn't report it) is False. Drivers can support compressed and non-compressed shares in a single storage pool by reporting ``compression=[True, False]``. You can make a share type use compression by setting this extra-spec to ' True', or prevent it by setting this extra-spec to ' False'. * **thin_provisioning** can be enabled where shares will not be guaranteed space allocations and overprovisioning will be enabled. This capability defaults to False. Back ends/pools that support thin provisioning report True for this capability. Administrators can make a share type use thin provisioned shares by setting this extra-spec to ' True'. If a driver reports thin_provisioning=False (the default) then it's assumed that the driver is doing thick provisioning and overprovisioning is turned off. A driver can support thin provisioned and thick provisioned shares in the same pool by reporting ``thin_provisioning=[True, False]``. To provision a thick share on a back end that supports both thin and thick provisioning, set one of the following in extra specs: :: {'thin_provisioning': 'False'} {'thin_provisioning': ' False'} {'capabilities:thin_provisioning': 'False'} {'capabilities:thin_provisioning': ' False'} * **qos** indicates that a backend/pool can provide shares using some QoS (Quality of Service) specification. The default value of the qos capability (if a driver doesn't report it) is False. You can make a share type use QoS by setting this extra-spec to ' True' and also setting the relevant QoS-related extra specs for the drivers being used. Administrators can prevent a share type from using QoS by setting this extra-spec to ' False'. Different drivers have different ways of specifying QoS limits (or guarantees) and this extra spec merely allows the scheduler to filter by pools that either have or don't have QoS support enabled. * **ipv4_support** indicates whether a back end can create a share that can be accessed via IPv4 protocol. If administrators do not set this capability as an extra-spec in a share type, the scheduler can place new shares of that type in pools without regard for whether IPv4 is supported. * **ipv6_support** - indicates whether a back end can create a share that can be accessed via IPv6 protocol. If administrators do not set this capability as an extra-spec in a share type, the scheduler can place new shares of that type in pools without regard for whether IPv6 is supported. * **provisioning:max_share_size** can set the max size of share, the value must be an integer and greater than 0. If administrators set this capability as an extra-spec in a share type, the size of share created with the share type can not be greater than the specified value. * **provisioning:min_share_size** can set the min size of share, the value must be an integer and greater than 0. If administrators set this capability as an extra-spec in a share type, the size of share created with the share type can not be less than the specified value. * **provisioning:max_share_extend_size** can set the max size of share extend, the value must be an integer and greater than 0. If administrators set this capability as an extra-spec in a share type, the size of share extended with the share type can not be greater than the specified value. This capability is ignored for regular users and the "provisioning:max_share_size" is the only effective limit. * **provisioning:mount_point_prefix** can set prefix for human readable mount_point_name, the value must be a string containing ASCII alphabets and optionally, the underscore character. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/admin/container_driver.rst0000664000175000017500000001064500000000000022310 0ustar00zuulzuul00000000000000.. Copyright 2016 Mirantis Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Container Driver ================ The Container driver provides a lightweight solution for share servers management. It allows to use Docker containers for hosting userspace shared file systems services. Supported operations -------------------- - Create CIFS share; - Delete CIFS share; - Allow user access to CIFS share; - Deny user access to CIFS share; - Extend CIFS share. Restrictions ------------ - Current implementation has been tested only on Ubuntu. Devstack plugin won't work on other distributions however it should be possible to install prerequisites and set the driver up manually; - The only supported protocol is CIFS; - The following features are not implemented: * Manage/unmanage share; * Shrink share; * Create/delete snapshots; * Create a share from a snapshot; * Manage/unmanage snapshots. Known problems -------------- - May demonstrate unstable behaviour when running concurrently. It is strongly suggested that the driver should be used with extreme care in cases other than building lightweight development and testing environments. Setting up container driver with devstack ----------------------------------------- The driver could be set up via devstack. This requires the following update to local.conf: .. code-block:: ini enable_plugin manila https://opendev.org/openstack/manila MANILA_ENABLED_BACKENDS=london MANILA_OPTGROUP_london_driver_handles_share_servers=True MANILA_OPTGROUP_london_neutron_host_id= SHARE_DRIVER=manila.share.drivers.container.driver.ContainerShareDriver SHARE_BACKING_FILE_SIZE= MANILA_DEFAULT_SHARE_TYPE_EXTRA_SPECS='snapshot_support=false' where is change reference, which could be copied from gerrit web-interface, is the name of the host with running neutron Setting Container Driver Up Manually ------------------------------------ This section describes steps needed to be performed to set the driver up manually. The driver has been tested on Ubuntu 14.04, thus in case of any other distribution package names might differ. The following packages must be installed: - docker.io One can verify if the package is installed by issuing ``sudo docker info`` command. In case of normal operation it should return docker usage statistics. In case it fails complaining on inaccessible socket try installing ``apparmor``. Please note that docker usage requires superuser privileges. After docker is successfully installed a docker image containing necessary packages must be provided. Currently such image could be downloaded from https://github.com/a-ovchinnikov/manila-image-elements-lxd-images/releases/download/0.1.0/manila-docker-container.tar.gz The image has to be unpacked but not untarred. This could be achieved by running 'gzip -d ' command. Resulting tar-archive of the image could be uploaded to docker via .. code-block:: console sudo docker load --input If the previous command finished successfully you will be able to see the image in the image list: .. code-block:: console sudo docker images The driver expects to find a folder /tmp/shares on the host where it is running as well as a logical volume group "manila_docker_volumes". When installing the driver manually one must make sure that 'brctl' and 'docker' commands are present in the /etc/manila/rootwrap.d/share.filters and could be executed as root. Finally to use the driver one must add a backend to the config file containing the following settings: .. code-block:: ini driver_handles_share_servers = True share_driver = manila.share.drivers.container.driver.ContainerShareDriver neutron_host_id = where is the name of the host running neutron. (In case of single VM devstack it is VM's name). After restarting manila services you should be able to use the driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/admin/dell_emc_powerscale_driver.rst0000664000175000017500000000621100000000000024310 0ustar00zuulzuul00000000000000.. Copyright (c) 2015 EMC Corporation All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. PowerScale Driver ================= The EMC manila driver framework (EMCShareDriver) utilizes Dell storage products to provide shared filesystems to OpenStack. The EMC manila driver is a plugin based driver which is designed to use different plugins to manage different Dell storage products. The PowerScale manila driver is a plugin for the EMC manila driver framework which allows manila to interface with an PowerScale backend to provide a shared filesystem. The EMC driver framework with the PowerScale plugin is referred to as the "PowerScale Driver" in this document. This PowerScale Driver interfaces with an PowerScale cluster via the REST PowerScale Platform API (PAPI) and the RESTful Access to Namespace API (RAN). Requirements ------------ - PowerScale cluster running OneFS 9.10 or higher Supported Operations -------------------- The following operations are supported on an PowerScale cluster: * Create CIFS/NFS Share * Delete CIFS/NFS Share * Allow CIFS/NFS Share access * Only IP access type is supported for NFS and CIFS * Only RW access supported * Deny CIFS/NFS Share access * Create snapshot * Delete snapshot * Create share from snapshot * Extend share Backend Configuration --------------------- The following parameters need to be configured in the manila configuration file for the PowerScale driver: * share_driver = manila.share.drivers.dell_emc.driver.EMCShareDriver * driver_handles_share_servers = False * emc_share_backend = powerscale * emc_nas_server = * emc_nas_server_port = * emc_nas_login = * emc_nas_password = * emc_nas_root_dir = Restart of :term:`manila-share` service is needed for the configuration changes to take effect. Restrictions ------------ The PowerScale driver has the following restrictions: - Only IP access type is supported for NFS and CIFS. - Only FLAT network is supported. The :mod:`manila.share.drivers.dell_emc.driver` Module ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: manila.share.drivers.dell_emc.driver :noindex: :members: :undoc-members: :show-inheritance: The :mod:`manila.share.drivers.dell_emc.plugins.powerscale.powerscale` Module ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: manila.share.drivers.dell_emc.plugins.powerscale.powerscale :noindex: :members: :undoc-members: :show-inheritance: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/admin/emc_vnx_driver.rst0000664000175000017500000003210700000000000021762 0ustar00zuulzuul00000000000000.. Copyright (c) 2014 EMC Corporation All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. VNX Driver ========== EMC manila driver framework (EMCShareDriver) utilizes the EMC storage products to provide the shared filesystems to OpenStack. The EMC manila driver is a plugin based driver which is designed to use different plugins to manage different EMC storage products. VNX plugin is the plugin which manages the VNX to provide shared filesystems. EMC driver framework with VNX plugin is referred to as VNX driver in this document. This driver performs the operations on VNX by XMLAPI and the File command line. Each backend manages one Data Mover of VNX. Multiple manila backends need to be configured to manage multiple Data Movers. .. note:: Dell EMC VNX driver has been deprecated and will be removed in a future release Requirements ------------ - VNX OE for File version 7.1 or higher. - VNX Unified, File only, or Gateway system with single storage backend. - The following licenses should be activated on VNX for File: * CIFS * NFS * SnapSure (for snapshot) * ReplicationV2 (for create share from snapshot) Supported Operations -------------------- The following operations will be supported on VNX array: - Create CIFS/NFS Share - Delete CIFS/NFS Share - Allow CIFS/NFS Share access * Only IP access type is supported for NFS. * Only user access type is supported for CIFS. - Deny CIFS/NFS Share access - Create snapshot - Delete snapshot - Create share from snapshot While the generic driver creates shared filesystems based on Cinder volumes attached to Nova VMs, the VNX driver performs similar operations using the Data Movers on the array. Pre-Configurations on VNX ------------------------- 1. Enable Unicode on Data mover VNX driver requires that the Unicode is enabled on Data Mover. CAUTION: After enabling Unicode, you cannot disable it. If there are some filesystems created before Unicode is enabled on the VNX, consult the storage administrator before enabling Unicode. To check the Unicode status on Data Mover, use the following VNX File command on VNX control station: server_cifs | head where: mover_name = Check the value of `I18N mode` field. UNICODE mode is shown as `I18N mode = UNICODE` To enable the Unicode for Data Mover: uc_config -on -mover where: mover_name = Refer to the document `Using International Character Sets on VNX for File` on [EMC support site](https://support.emc.com) for more information. 2. Enable CIFS service on Data Mover Ensure the CIFS service is enabled on the Data Mover which is going to be managed by VNX driver. To start the CIFS service, use the following command: server_setup -Protocol cifs -option start [=] where: = [=] = Note: If there is 1 GB of memory on the Data Mover, the default is 96 threads; however, if there is over 1 GB of memory, the default number of threads is 256. To check the CIFS service status, use this command: server_cifs | head where: = The command output will show the number of CIFS threads started. 3. NTP settings on Data Mover VNX driver only supports CIFS share creation with share network which has an Active Directory security-service associated. Creating CIFS share requires that the time on the Data Mover is in sync with the Active Directory domain so that the CIFS server can join the domain. Otherwise, the domain join will fail when creating share with this security service. There is a limitation that the time of the domains used by security-services even for different tenants and different share networks should be in sync. Time difference should be less than 10 minutes. It is recommended to set the NTP server to the same public NTP server on both the Data Mover and domains used in security services to ensure the time is in sync everywhere. Check the date and time on Data Mover: server_date where: mover_name = Set the NTP server for Data Mover: server_date timesvc start ntp [ ...] where: mover_name = host = Note: The host must be running the NTP protocol. Only 4 host entries are allowed. 4. Configure User Mapping on the Data Mover Before creating CIFS share using VNX driver, you must select a method of mapping Windows SIDs to UIDs and GIDs. EMC recommends using usermapper in single protocol (CIFS) environment which is enabled on VNX by default. To check usermapper status, use this command syntax: server_usermapper where: = If usermapper is not started, the following command can be used to start the usermapper: server_usermapper -enable where: = For multiple protocol environment, refer to `Configuring VNX User Mapping` on [EMC support site](https://support.emc.com) for additional information. 5. Network Connection In the current release, the share created by VNX driver uses the first network device (physical port on NIC) of Data Mover to access the network. Go to Unisphere to check the device list: Settings -> Network -> Settings for File (Unified system only) -> Device. Backend Configuration --------------------- The following parameters need to be configured in `/etc/manila/manila.conf` for the VNX driver: emc_share_backend = vnx emc_nas_server = emc_nas_password = emc_nas_login = emc_nas_server_container = emc_nas_pool_name = emc_interface_ports = share_driver = manila.share.drivers.dell_emc.driver.EMCShareDriver driver_handles_share_servers = True - `emc_share_backend` is the plugin name. Set it to `vnx` for the VNX driver. - `emc_nas_server` is the control station IP address of the VNX system to be managed. - `emc_nas_password` and `emc_nas_login` fields are used to provide credentials to the VNX system. Only local users of VNX File is supported. - `emc_nas_server_container` field is the name of the Data Mover to serve the share service. - `emc_nas_pool_name` is the pool name user wants to create volume from. The pools can be created using Unisphere for VNX. - `emc_interface_ports` is comma separated list specifying the ports(devices) of Data Mover that can be used for share server interface. Members of the list can be Unix-style glob expressions (supports Unix shell-style wildcards). This list is optional. In the absence of this option, any of the ports on the Data Mover can be used. - `driver_handles_share_servers` must be True, the driver will choose a port from port list which configured in emc_interface_ports. Restart of :term:`manila-share` service is needed for the configuration changes to take effect. IPv6 support ------------ IPv6 support for VNX driver is introduced in Queens release. The feature is divided into two parts: 1. The driver is able to manage share or snapshot in the Neutron IPv6 network. 2. The driver is able to connect VNX management interface using its IPv6 address. Pre-Configurations for IPv6 support ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The following parameters need to be configured in `/etc/manila/manila.conf` for the VNX driver: network_plugin_ipv6_enabled = True - `network_plugin_ipv6_enabled` indicates IPv6 is enabled. If you want to connect VNX using IPv6 address, you should configure IPv6 address by `nas_cs` command for VNX and specify the address in `/etc/manila/manila.conf`: emc_nas_server = Snapshot support ---------------- In the Mitaka and Newton release of OpenStack, Snapshot support is enabled by default for a newly created share type. Starting with the Ocata release, the snapshot_support extra spec must be set to True in order to allow snapshots for a share type. If the 'snapshot_support' extra_spec is omitted or if it is set to False, users would not be able to create snapshots on shares of this share type. The feature is divided into two parts: 1. The driver is able to create/delete snapshot of share. 2. The driver is able to create share from snapshot. Pre-Configurations for Snapshot support ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The following extra specifications need to be configured with share type. - snapshot_support = True - create_share_from_snapshot_support = True For new share type, these extra specifications can be set directly when creating share type: .. code-block:: console manila type-create --snapshot_support True --create_share_from_snapshot_support True ${share_type_name} True Or you can update already existing share type with command: .. code-block:: console manila type-key ${share_type_name} set snapshot_support=True manila type-key ${share_type_name} set create_share_from_snapshot_support=True To snapshot a share and create share from the snapshot ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Firstly, you need create a share from share type that has extra specifications(snapshot_support=True, create_share_from_snapshot_support=True). Then snapshot the share with command: .. code-block:: console manila snapshot-create ${source_share_name} --name ${target_snapshot_name} --description " " After creating the snapshot from previous step, you can create share from that snapshot. Use command: .. code-block:: console manila create nfs 1 --name ${target_share_name} --metadata source=snapshot --description " " --snapshot-id ${source_snapshot_id} Restrictions ------------ The VNX driver has the following restrictions: - Only IP access type is supported for NFS. - Only user access type is supported for CIFS. - Only FLAT network and VLAN network are supported. - VLAN network is supported with limitations. The Neutron subnets in different VLANs that are used to create share networks cannot have overlapped address spaces. Otherwise, VNX may have a problem to communicate with the hosts in the VLANs. To create shares for different VLANs with same subnet address, use different Data Movers. - The 'Active Directory' security service is the only supported security service type and it is required to create CIFS shares. - Only one security service can be configured for each share network. - Active Directory domain name of the 'active_directory' security service should be unique even for different tenants. - The time on Data Mover and the Active Directory domains used in security services should be in sync (time difference should be less than 10 minutes). It is recommended to use same NTP server on both the Data Mover and Active Directory domains. - On VNX the snapshot is stored in the SavVols. VNX system allows the space used by SavVol to be created and extended until the sum of the space consumed by all SavVols on the system exceeds the default 20% of the total space available on the system. If the 20% threshold value is reached, an alert will be generated on VNX. Continuing to create snapshot will cause the old snapshot to be inactivated (and the snapshot data to be abandoned). The limit percentage value can be changed manually by storage administrator based on the storage needs. Administrator is recommended to configure the notification on the SavVol usage. Refer to `Using VNX SnapSure` document on [EMC support site](https://support.emc.com) for more information. - VNX has limitations on the overall numbers of Virtual Data Movers, filesystems, shares, checkpoints, and etc. Virtual Data Mover(VDM) is created by the VNX driver on the VNX to serve as the manila share server. Similarly, filesystem is created, mounted, and exported from the VDM over CIFS or NFS protocol to serve as the manila share. The VNX checkpoint serves as the manila share snapshot. Refer to the `NAS Support Matrix` document on [EMC support site](https://support.emc.com) for the limitations and configure the quotas accordingly. The :mod:`manila.share.drivers.dell_emc.driver` Module ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: manila.share.drivers.dell_emc.driver :noindex: :members: :undoc-members: :show-inheritance: The :mod:`manila.share.drivers.dell_emc.plugins.vnx.connection` Module ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: manila.share.drivers.dell_emc.plugins.vnx.connection :noindex: :members: :undoc-members: :show-inheritance: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/admin/export_location_metadata.rst0000664000175000017500000000343300000000000024021 0ustar00zuulzuul00000000000000Export Location Metadata ======================== Manila shares can have one or more export locations. The exact number depends on the driver and the storage controller, and there is no preference for more or fewer export locations. Usually drivers create an export location for each physical network interface through which the share can be accessed. Because not all export locations have the same qualities, Manila allows drivers to add additional keys to the dict returned for each export location when a share is created. The share manager stores these extra keys and values in the database and they are available to the API service, which may expose them through the REST API or use them for filtering. Metadata Keys ------------- Only keys defined in this document are valid. Arbitrary driver-defined keys are not allowed. The following keys are defined: * `is_admin_only` - May be True or False. Defaults to False. Indicates that the export location exists for administrative purposes. If is_admin_only=True, then the export location is hidden from non-admin users calling the REST API. Also, these export locations are assumed to be reachable directly from the admin network, which is important for drivers that support share servers and which have some export locations only accessible to tenants. * `preferred` - May be True or False. Defaults to False. Indicates that clients should prefer to mount this export location over other export locations that are not preferred. This may be used by drivers which have fast/slow paths to indicate to clients which paths are faster. It could be used to indicate a path is preferred for another reason, as long as the reason isn't one that changes over the life of the manila-share service. This key is always visible through the REST API. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/admin/generic_driver.rst0000664000175000017500000002654200000000000021745 0ustar00zuulzuul00000000000000.. Copyright 2014 Mirantis Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Generic approach for share provisioning ======================================= The Shared File Systems service can be configured to use Nova VMs and Cinder volumes. Using this driver, Manila will use SSH to configure the shares on the service virtual machine instance. The following options may be specified in the manila.conf configuration file: .. code-block:: ini # User in service instance that will be used for authentication. # (string value) #service_instance_user = # Password for service instance user. (string value) #service_instance_password = # Path to host's private key. (string value) #path_to_private_key = # Maximum time in seconds to wait for creating service instance. # (integer value) #max_time_to_build_instance = 300 # Block SSH connection to the service instance from other networks # than service network. (boolean value) #limit_ssh_access = false Additionally, this driver supports both ``DHSS=False`` and ``DHSS=True``. Depending on which one you use, you need to specify different configuration options in your manila.conf configuration file. - With ``DHSS=False``: .. code-block:: ini # Name or ID of service instance in Nova to use for share exports. # Used only when share servers handling is disabled. (string value) #service_instance_name_or_id = # Can be either name of network that is used by service instance # within Nova to get IP address or IP address itself (either IPv4 or # IPv6) for managing shares there. Used only when share servers # handling is disabled. (host address value) #service_net_name_or_ip = # Can be either name of network that is used by service instance # within Nova to get IP address or IP address itself (either IPv4 or # IPv6) for exporting shares. Used only when share servers handling is # disabled. (host address value) #tenant_net_name_or_ip = - With ``DHSS=True``: .. code-block:: ini # Name of image in Glance, that will be used for service instance # creation. Only used if driver_handles_share_servers=True. (string # value) #service_image_name = manila-service-image # Name of service instance. Only used if # driver_handles_share_servers=True. (string value) #service_instance_name_template = manila_service_instance_%s # Keypair name that will be created and used for service instances. # Only used if driver_handles_share_servers=True. (string value) #manila_service_keypair_name = manila-service # Path to hosts public key. Only used if # driver_handles_share_servers=True. (string value) #path_to_public_key = ~/.ssh/id_rsa.pub # Security group name, that will be used for service instance # creation. Only used if driver_handles_share_servers=True. (string # value) #service_instance_security_group = manila-service # ID of flavor, that will be used for service instance creation. Only # used if driver_handles_share_servers=True. (string value) #service_instance_flavor_id = 100 # Name of manila service network. Used only with Neutron. Only used if # driver_handles_share_servers=True. (string value) #service_network_name = manila_service_network # CIDR of manila service network. Used only with Neutron and if # driver_handles_share_servers=True. (string value) #service_network_cidr = 10.254.0.0/16 # This mask is used for dividing service network into subnets, IP # capacity of subnet with this mask directly defines possible amount # of created service VMs per tenant's subnet. Used only with Neutron # and if driver_handles_share_servers=True. (integer value) #service_network_division_mask = 28 # Module path to the Virtual Interface (VIF) driver class. This option # is used only by drivers operating in # `driver_handles_share_servers=True` mode that provision OpenStack # compute instances as share servers. This option is only supported # with Neutron networking. Drivers provided in tree work with Linux # Bridge (manila.network.linux.interface.BridgeInterfaceDriver) and # OVS (manila.network.linux.interface.OVSInterfaceDriver). If the # manila-share service is running on a host that is connected to the # administrator network, a no-op driver # (manila.network.linux.interface.NoopInterfaceDriver) may be used. # (string value) #interface_driver = manila.network.linux.interface.OVSInterfaceDriver # Attach share server directly to share network. Used only with # Neutron and if driver_handles_share_servers=True. (boolean value) #connect_share_server_to_tenant_network = false # ID of neutron network used to communicate with admin network, to # create additional admin export locations on. (string value) #admin_network_id = # ID of neutron subnet used to communicate with admin network, to # create additional admin export locations on. Related to # 'admin_network_id'. (string value) #admin_subnet_id = Configuring the right options depends on the network layout of your setup, see next section for more details. Network configurations ---------------------- If using ``DHSS=True``, there are two possible network configurations that can be chosen for share provisioning using this driver: - Service VM (SVM) has one NIC connected to a network that connects to a public router. This is, the service VM will be connected to a static administrative network created beforehand by an administrator. This approach is valid in 'flat' network topologies, where a single Neutron network is defined for all projects (no tenant networks). - Service VM has two NICs, first one connected to service network, second one connected directly to user's network. This is, in a tenant-networks-enabled Neutron deployment, manila will create a dedicated network for the share. Depending on the setup, specific configuration options are required in the manila.conf file. In particular, if you are using only a static administrative network, you need the following: .. code-block:: ini driver_handles_share_servers = True connect_share_server_to_tenant_network = True admin_network_id = admin_subnet_id = # Module path to the Virtual Interface (VIF) driver class. This option # is used only by drivers operating in # `driver_handles_share_servers=True` mode that provision OpenStack # compute instances as share servers. This option is only supported # with Neutron networking. Drivers provided in tree work with Linux # Bridge (manila.network.linux.interface.BridgeInterfaceDriver) and # OVS (manila.network.linux.interface.OVSInterfaceDriver). If the # manila-share service is running on a host that is connected to the # administrator network, a no-op driver # (manila.network.linux.interface.NoopInterfaceDriver) may be used. # (string value) interface_driver = manila.network.linux.interface.NoopInterfaceDriver Requirements for service image ------------------------------ - Linux based distro - NFS server - Samba server >=3.2.0, that can be configured by data stored in registry - SSH server - Two net interfaces configured to DHCP (see network approaches) - 'exportfs' and 'net conf' libraries used for share actions - Following files will be used, so if their paths differ one needs to create at least symlinks for them: * /etc/exports (permanent file with NFS exports) * /var/lib/nfs/etab (temporary file with NFS exports used by 'exportfs') * /etc/fstab (permanent file with mounted filesystems) * /etc/mtab (temporary file with mounted filesystems used by 'mount') Supported shared filesystems ---------------------------- - NFS (access by IP) - CIFS (access by IP) Known restrictions ------------------ - One of Nova's configurations only allows 26 shares per server. This limit comes from the maximum number of virtual PCI interfaces that are used for block device attaching. There are 28 virtual PCI interfaces, in this configuration, two of them are used for server needs and other 26 are used for attaching block devices that are used for shares. - Juno version works only with Neutron. Each share should be created with neutron-net and neutron-subnet IDs provided via share-network entity. - Juno version handles security group, flavor, image, keypair for Nova VM and also creates service networks, but does not use availability zones for Nova VMs and volume types for Cinder block devices. - Juno version does not use security services data provided with share-network. These data will be just ignored. - Liberty version adds a share extend capability. Share access will be briefly interrupted during an extend operation. - Liberty version adds a share shrink capability, but this capability is not effective because generic driver shrinks only filesystem size and doesn't shrink the size of Cinder volume. - Modifying network-related configuration options, such as ``service_network_cidr`` or ``service_network_division_mask``, after manila has already created some shares using those options is not supported. - One of the limitations that severely affects availability in the cloud is the Single Point of Failure (SPOF) issue. The driver uses a Nova VM as its NAS (NFS/CIFS) server. If/When the server goes down, there is no way to continue serving data. Due to this SPOF, today's open source SVM solutions for multi-tenant manila service do not really constitute a viable alternative to using proprietary, vendor-supplied storage arrays or appliances that combine per-tenant virtualization and solid HA recovery mechanisms. They are useful as objects of reference and study but are not acceptable to operators of real life clouds whose customers will not tolerate having to wait for manual intervention to recover from unpredictable storage data path outages. - The generic driver assumes the manila-share service is running on a node where there is an integration bridge where it can plug in the service VM (nova instance in this case). This condition does not hold in a common deployment topology where manila-share is run on a controller node and networking services are run on a separate dedicated node. Using Windows instances ~~~~~~~~~~~~~~~~~~~~~~~ While the generic driver only supports Linux instances, you may use the Windows SMB driver when Windows VMs are preferred. For more details, please check out the following page: :ref:`windows_smb_driver`. .. note:: This driver has been deprecated in 18.0.0 release. The :mod:`manila.share.drivers.generic` Module ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: manila.share.drivers.generic :noindex: :members: :undoc-members: :show-inheritance: The :mod:`manila.share.drivers.service_instance` Module ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: manila.share.drivers.service_instance :noindex: :members: :undoc-members: :show-inheritance: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/admin/glusterfs_driver.rst0000664000175000017500000001712200000000000022341 0ustar00zuulzuul00000000000000.. Copyright 2015 Red Hat, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. GlusterFS driver ================ GlusterFS driver uses GlusterFS, an open source distributed file system, as the storage backend for serving file shares to manila clients. .. note:: This driver has been deprecated in 18.0.0 release. Supported shared filesystems ---------------------------- - NFS (access by IP) Supported Operations -------------------- - Create share - Delete share - Allow share access (rw) - Deny share access - With volume layout: - Create snapshot - Delete snapshot - Create share from snapshot Requirements ------------ - Install glusterfs-server package, version >= 3.5.x, on the storage backend. - Install NFS-Ganesha, version >=2.1, if using NFS-Ganesha as the NFS server for the GlusterFS backend. - Install glusterfs and glusterfs-fuse package, version >=3.5.x, on the manila host. - Establish network connection between the manila host and the storage backend. Manila driver configuration setting ----------------------------------- The following parameters in the manila's configuration file need to be set: - `share_driver` = manila.share.drivers.glusterfs.GlusterfsShareDriver The following configuration parameters are optional: - `glusterfs_nfs_server_type` = - `glusterfs_share_layout` = ; cf. :ref:`glusterfs_layouts` - `glusterfs_path_to_private_key` = - `glusterfs_server_password` = If Ganesha NFS server is used (``glusterfs_nfs_server_type = Ganesha``), then by default the Ganesha server is supposed to run on the manila host and is managed by local commands. If it's deployed somewhere else, then it's managed via ssh, which can be configured by the following parameters: - `glusterfs_ganesha_server_ip` - `glusterfs_ganesha_server_username` - `glusterfs_ganesha_server_password` In lack of ``glusterfs_ganesha_server_password`` ssh access will fall back to key based authentication, using the key specified by ``glusterfs_path_to_private_key``, or, in lack of that, a key at one of the OpenSSH-style default key locations (*~/.ssh/id_{r,d,ecd}sa*). Layouts have also their set of parameters, see :ref:`glusterfs_layouts` about that. .. _glusterfs_layouts: Layouts ------- New in Liberty, multiple share layouts can be used with glusterfs driver. A layout is a strategy of allocating storage from GlusterFS backends for shares. Currently there are two layouts implemented: - `directory mapped layout` (or `directory layout`, or `dir layout` for short): a share is backed by top-level subdirectories of a given GlusterFS volume. Directory mapped layout is the default and backward compatible with Kilo. The following setting explicitly specifies its usage: ``glusterfs_share_layout = layout_directory.GlusterfsDirectoryMappedLayout``. Options: - `glusterfs_target`: address of the volume that hosts the directories. If it's of the format `:/`, then the manila host is expected to be part of the GlusterFS cluster of the volume and GlusterFS management happens through locally calling the ``gluster`` utility. If it's of the format `@:/`, then we ssh to `@` to execute ``gluster`` (`` is supposed to have administrative privileges on ``). - `glusterfs_mount_point_base` = (optional; defaults to *$state_path*\ ``/mnt``, where *$state_path* defaults to ``/var/lib/manila``) Limitations: - directory layout does not support snapshot operations. - `volume mapped layout` (or `volume layout`, or `vol layout` for short): a share is backed by a whole GlusterFS volume. Volume mapped layout is new in Liberty. It can be chosen by setting ``glusterfs_share_layout = layout_volume.GlusterfsVolumeMappedLayout``. Options (required): - `glusterfs_servers` - `glusterfs_volume_pattern` Volume mapped layout is implemented as a common backend of the glusterfs and glusterfs-native drivers; see the description of these options in :doc:`glusterfs_native_driver`: :ref:`gluster_native_manila_conf`. Gluster NFS with volume mapped layout ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ A special configuration choice is :: glusterfs_nfs_server_type = Gluster glusterfs_share_layout = layout_volume.GlusterfsVolumeMappedLayout that is, Gluster NFS used to export whole volumes. All other GlusterFS backend configurations (including GlusterFS set up with glusterfs-native) require the ``nfs.export-volumes = off`` GlusterFS setting. Gluster NFS with volume layout requires ``nfs.export-volumes = on``. ``nfs.export-volumes`` is a *cluster-wide* setting, so a given GlusterFS cluster cannot host a share backend with Gluster NFS + volume layout and other share backend configurations at the same time. There is another caveat with ``nfs.export-volumes``: setting it to ``on`` without enough care is a security risk, as the default access control for the volume exports is "allow all". For this reason, while the ``nfs.export-volumes = off`` setting is automatically set by manila for all other share backend configurations, ``nfs.export-volumes = on`` is *not* set by manila in case of a Gluster NFS with volume layout setup. It's left to the GlusterFS admin to make this setting in conjunction with the associated safeguards (that is, for those volumes of the cluster which are not used by manila, access restrictions have to be manually configured through the ``nfs.rpc-auth-{allow,reject}`` options). Known Restrictions ------------------ - The driver does not support network segmented multi-tenancy model, but instead works over a flat network, where the tenants share a network. - If NFS Ganesha is the NFS server used by the GlusterFS backend, then the shares can be accessed by NFSv3 and v4 protocols. However, if Gluster NFS is used by the GlusterFS backend, then the shares can only be accessed by NFSv3 protocol. - All manila shares, which map to subdirectories within a GlusterFS volume, are currently created within a single GlusterFS volume of a GlusterFS storage pool. - The driver does not provide read-only access level for shares. - Assume that share S is exported through Gluster NFS, and tenant machine T has mounted S. If at this point access of T to S is revoked through `access-deny`, the pre-existing mount will be still usable and T will still be able to access the data in S as long as that mount is in place. (This violates the principle *Access deny should always result in immediate loss of access to the share*, see http://lists.openstack.org/pipermail/openstack-dev/2015-July/069109.html.) The :mod:`manila.share.drivers.glusterfs` Module ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: manila.share.drivers.glusterfs :noindex: :members: :undoc-members: :show-inheritance: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/admin/glusterfs_native_driver.rst0000664000175000017500000001462200000000000023711 0ustar00zuulzuul00000000000000.. Copyright 2015 Red Hat, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. GlusterFS Native driver ======================= GlusterFS Native driver uses GlusterFS, an open source distributed file system, as the storage backend for serving file shares to manila clients. A manila share is a GlusterFS volume. This driver uses flat-network (share-server-less) model. Instances directly talk with the GlusterFS backend storage pool. The instances use 'glusterfs' protocol to mount the GlusterFS shares. Access to each share is allowed via TLS Certificates. Only the instance which has the TLS trust established with the GlusterFS backend can mount and hence use the share. Currently only 'rw' access is supported. .. note:: This driver has been deprecated in 18.0.0 release. Network Approach ---------------- L3 connectivity between the storage backend and the host running the manila share service should exist. Supported shared filesystems ---------------------------- - GlusterFS (share protocol: ``glusterfs``, access by TLS certificates (``cert`` access type)) Multi-tenancy model ------------------- The driver does not support network segmented multi-tenancy model. Instead multi-tenancy is supported using tenant specific TLS certificates. Supported Operations -------------------- - Create share - Delete share - Allow share access (rw) - Deny share access - Create snapshot - Delete snapshot - Create share from snapshot Requirements ------------ - Install glusterfs-server package, version >= 3.6.x, on the storage backend. - Install glusterfs and glusterfs-fuse package, version >=3.6.x, on the manila host. - Establish network connection between the manila host and the storage backend. .. _gluster_native_manila_conf: Manila driver configuration setting ----------------------------------- The following parameters in manila's configuration file need to be set: - `share_driver` = manila.share.drivers.glusterfs.glusterfs_native.GlusterfsNativeShareDriver - `glusterfs_servers` = List of GlusterFS servers which provide volumes that can be used to create shares. The servers are expected to be of distinct Gluster clusters (ie. should not be gluster peers). Each server should be of the form ``[@]``. The optional ``@`` part of the server URI indicates SSH access for cluster management (see related optional parameters below). If it is not given, direct command line management is performed (ie. manila host is assumed to be part of the GlusterFS cluster the server belongs to). - `glusterfs_volume_pattern` = Regular expression template used to filter GlusterFS volumes for share creation. The regex template can contain the #{size} parameter which matches a number (sequence of digits) and the value shall be interpreted as size of the volume in GB. Examples: ``manila-share-volume-\d+$``, ``manila-share-volume-#{size}G-\d+$``; with matching volume names, respectively: *manila-share-volume-12*, *manila-share-volume-3G-13*". In latter example, the number that matches ``#{size}``, that is, 3, is an indication that the size of volume is 3G. The following configuration parameters are optional: - `glusterfs_mount_point_base` = - `glusterfs_path_to_private_key` = - `glusterfs_server_password` = Host and backend configuration ------------------------------ - SSL/TLS should be enabled on the I/O path for GlusterFS servers and volumes involved (ie. ones specified in ``glusterfs_servers``), as described in https://docs.gluster.org/en/latest/Administrator%20Guide/SSL/. (Enabling SSL/TLS for the management path is also possible but not recommended currently.) - The manila host should be also configured for GlusterFS SSL/TLS (ie. `/etc/ssl/glusterfs.{pem,key,ca}` files has to be deployed as the above document specifies). - There is a further requirement for the CA-s used: the set of CA-s involved should be consensual, ie. `/etc/ssl/glusterfs.ca` should be identical across all the servers and the manila host. - There is a further requirement for the common names (CN-s) of the certificates used: the certificates of the servers should have a common name starting with `glusterfs-server`, and the certificate of the host should have common name starting with `manila-host`. - To support snapshots, bricks that consist the GlusterFS volumes used by manila should be thinly provisioned LVM ones (cf. https://gluster.readthedocs.org/en/latest/Administrator%20Guide/Managing%20Snapshots/). Known Restrictions ------------------ - GlusterFS volumes are not created on demand. A pre-existing set of GlusterFS volumes should be supplied by the GlusterFS cluster(s), conforming to the naming convention encoded by ``glusterfs_volume_pattern``. However, the GlusterFS endpoint is allowed to extend this set any time (so manila and GlusterFS endpoints are expected to communicate volume supply/demand out-of-band). ``glusterfs_volume_pattern`` can include a size hint (with ``#{size}`` syntax), which, if present, requires the GlusterFS end to indicate the size of the shares in GB in the name. (On share creation, manila picks volumes *at least* as big as the requested one.) - Certificate setup (aka trust setup) between instance and storage backend is out of band of manila. - For manila to use GlusterFS volumes, the name of the trashcan directory in GlusterFS volumes must not be changed from the default. The :mod:`manila.share.drivers.glusterfs.glusterfs_native.GlusterfsNativeShareDriver` Module ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: manila.share.drivers.glusterfs.glusterfs_native :noindex: :members: :undoc-members: :show-inheritance: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/admin/gpfs_driver.rst0000664000175000017500000000656600000000000021274 0ustar00zuulzuul00000000000000.. Copyright 2015 IBM Corp. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. GPFS Driver =========== GPFS driver uses IBM General Parallel File System (GPFS), a high-performance, clustered file system, developed by IBM, as the storage backend for serving file shares to the manila clients. Supported shared filesystems ---------------------------- - NFS (access by IP) Supported Operations -------------------- - Create NFS Share - Delete NFS Share - Create Share Snapshot - Delete Share Snapshot - Create Share from a Share Snapshot - Allow NFS Share access * Currently only 'rw' access level is supported - Deny NFS Share access Requirements ------------ - Install GPFS with server license, version >= 2.0, on the storage backend. - Install Kernel NFS or Ganesha NFS server on the storage backend servers. - If using Ganesha NFS, currently NFS Ganesha v1.5 and v2.0 are supported. - Create a GPFS cluster and create a filesystem on the cluster, that will be used to create the manila shares. - Enable quotas for the GPFS file system (`mmchfs -Q yes`). - Establish network connection between the manila host and the storage backend. Manila driver configuration setting ----------------------------------- The following parameters in the manila configuration file need to be set: - `share_driver` = manila.share.drivers.ibm.gpfs.GPFSShareDriver - `gpfs_share_export_ip` = - If the backend GPFS server is not running on the manila host machine, the following options are required to SSH to the remote GPFS backend server: - `gpfs_ssh_login` = and one of the following settings is required to execute commands over SSH: - `gpfs_ssh_private_key` = - `gpfs_ssh_password` = The following configuration parameters are optional: - `gpfs_mount_point_base` = - `gpfs_nfs_server_type` = - `gpfs_nfs_server_list` = - `gpfs_ssh_port` = Restart of :term:`manila-share` service is needed for the configuration changes to take effect. Known Restrictions ------------------ - The driver does not support a segmented-network multi-tenancy model but instead works over a flat network where the tenants share a network. - While using remote GPFS node, with Ganesha NFS, 'gpfs_ssh_private_key' for remote login to the GPFS node must be specified and there must be a passwordless authentication already setup between the manila share service and the remote GPFS node. The :mod:`manila.share.drivers.ibm.gpfs` Module ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: manila.share.drivers.ibm.gpfs :noindex: :members: :undoc-members: :show-inheritance: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/admin/group_capabilities_and_extra_specs.rst0000664000175000017500000000367000000000000026042 0ustar00zuulzuul00000000000000.. _group_capabilities_and_extra_specs: Group Capabilities and group-specs ================================== Manila Administrators create share group types with :ref:`shared_file_systems_share_types` and group-specs to allow users to request a group type of share group to create. The Administrator chooses a name for the share group type and decides how to communicate the significance of the different share group types in terms that the users should understand or need to know. By design, most of the details of a share group type (the extra- specs) are not exposed to users -- only Administrators. Share group Types ----------------- Refer to the manila client command-line help for information on how to create a share group type and set "share types", "group-spec" key/value pairs for a share group type. Group-Specs ----------- The group specs contains the group capabilities, similar to snapshot_support in share types. Users know what a group can do from group specs. The group specs is an exact match requirement in share group filter (such as ConsistentSnapshotFilter). When the ConsistentSnapshotFilter is enabled (it is enabled by default), the scheduler will only create a share group on a backend that reports capabilities that match the share group type's group-spec keys. Common Group Capabilities ------------------------- For group capabilities that apply to multiple backends a common capability can be created. Like all other backend reported group capabilities, these group capabilities can be used verbatim as group_specs in share group types used to create share groups. * `consistent_snapshot_support` - indicates that a backend can enable you to create snapshots at the exact same point in time from multiple shares. The default value of the consistent_snapshot_support capability (if a driver doesn't report it) is None. Administrators can make a share group type use consistent snapshot support by setting this group-spec to 'host'. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/admin/hdfs_native_driver.rst0000664000175000017500000000610600000000000022615 0ustar00zuulzuul00000000000000.. Copyright 2015 Intel, Corp. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. HDFS native driver ================== HDFS native driver is a plugin based on the OpenStack manila service, which uses Hadoop distributed file system (HDFS), a distributed file system designed to hold very large amounts of data, and provide high-throughput access to the data. A manila share in this driver is a subdirectory in hdfs root directory. Instances talk directly to the HDFS storage backend with 'hdfs' protocol. And access to each share is allowed by user based access type, which is aligned with HDFS ACLs to support access control of multiple users and groups. Network configuration --------------------- The storage backend and manila hosts should be in a flat network, otherwise, the L3 connectivity between them should exist. Supported shared filesystems ---------------------------- - HDFS (authentication by user) Supported Operations -------------------- - Create HDFS share - Delete HDFS share - Allow HDFS Share access * Only support user access type * Support level of access (ro/rw) - Deny HDFS Share access - Create snapshot - Delete snapshot - Create share from snapshot - Extend share Requirements ------------ - Install HDFS package, version >= 2.4.x, on the storage backend - To enable access control, the HDFS file system must have ACLs enabled - Establish network connection between the manila host and storage backend Manila driver configuration --------------------------- - `share_driver` = manila.share.drivers.hdfs.hdfs_native.HDFSNativeShareDriver - `hdfs_namenode_ip` = the IP address of the HDFS namenode, and only single namenode is supported now - `hdfs_namenode_port` = the port of the HDFS namenode service - `hdfs_ssh_port` = HDFS namenode SSH port - `hdfs_ssh_name` = HDFS namenode SSH login name - `hdfs_ssh_pw` = HDFS namenode SSH login password, this parameter is not necessary, if the following `hdfs_ssh_private_key` is configured - `hdfs_ssh_private_key` = Path to the HDFS namenode private key to ssh login Known Restrictions ------------------ - This driver does not support network segmented multi-tenancy model. Instead multi-tenancy is supported by the tenant specific user authentication - Only support for single HDFS namenode in Kilo release The :mod:`manila.share.drivers.hdfs.hdfs_native` Module ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: manila.share.drivers.hdfs.hdfs_native :noindex: :members: :undoc-members: :show-inheritance: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/admin/hitachi_hnas_driver.rst0000664000175000017500000005214300000000000022747 0ustar00zuulzuul00000000000000.. Copyright 2016 Hitachi Data Systems, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ======================================================= Hitachi NAS Platform File Services Driver for OpenStack ======================================================= ------------------ Driver Version 3.0 ------------------ Hitachi NAS Platform Storage Requirements ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This Hitachi NAS Platform File Services Driver for OpenStack provides support for Hitachi NAS Platform (HNAS) models 3080, 3090, 4040, 4060, 4080 and 4100 with NAS OS 12.2 or higher. Before configuring the driver, ensure the HNAS has at least: - 1 storage pool (span) configured. - 1 EVS configured. - 1 file system in this EVS, created without replication target option and should be in mounted state. It is recommended to disable auto-expansion, because the scheduler uses the current free space reported by the file system when creating shares. - 1 Management User configured with "supervisor" permission level. - Hitachi NAS Management interface should be reachable from manila-share node. Also, if the driver is going to create CIFS shares, either LDAP servers or domains must be configured previously in HNAS to provide the users and groups. Supported Operations ~~~~~~~~~~~~~~~~~~~~ The following operations are supported in this version of Hitachi NAS Platform File Services Driver for OpenStack: - Create and delete CIFS and NFS shares; - Extend and shrink shares; - Manage rules to shares (allow/deny access); - Allow and deny share access; - ``IP`` access type supported for ``NFS`` shares; - ``User`` access type supported for ``CIFS`` shares; - Both ``RW`` and ``RO`` access level are supported for NFS and CIFS shares; - Manage and unmanage shares; - Create and delete snapshots; - Create shares from snapshots. Driver Configuration ~~~~~~~~~~~~~~~~~~~~ This document contains the installation and user guide of the Hitachi NAS Platform File Services Driver for OpenStack. Although mentioning some Shared File Systems service operations and HNAS commands, both are not in the scope of this document. Please refer to their own guides for details. Before configuring the driver, make sure that the nodes running the manila-share service have access to the HNAS management port, and compute and network nodes have access to the data ports (EVS IPs or aggregations). The driver configuration can be summarized in the following steps: #. Configure HNAS parameters on ``manila.conf``; #. Prepare the network ensuring all OpenStack-HNAS connections mentioned above; #. Configure/create share type; #. Restart the services; #. Configure OpenStack networks. Step 1 - HNAS Parameters Configuration ************************************** The following parameters need to be configured in the [DEFAULT] section of ``/etc/manila/manila.conf``: +----------------------------+------------------------------------------------+ | **Option** | **Description** | +============================+================================================+ | enabled_share_backends | Name of the section on ``manila.conf`` used to | | | specify a backend. For example: | | | *enabled_share_backends = hnas1* | +----------------------------+------------------------------------------------+ | enabled_share_protocols | Specify a list of protocols to be allowed for | | | share creation. This driver version supports | | | NFS and/or CIFS. | +----------------------------+------------------------------------------------+ The following parameters need to be configured in the [backend] section of ``/etc/manila/manila.conf``: +-------------------------------------------------+-----------------------------------------------------------------------------------------------------+ | **Option** | **Description** | +=================================================+=====================================================================================================+ | share_backend_name | A name for the backend. | +-------------------------------------------------+-----------------------------------------------------------------------------------------------------+ | share_driver | Python module path. For this driver **this must be**: | | | *manila.share.drivers.hitachi.hnas.driver.HitachiHNASDriver* | +-------------------------------------------------+-----------------------------------------------------------------------------------------------------+ | driver_handles_share_servers | Driver working mode. For this driver **this must be**: | | | *False*. | +-------------------------------------------------+-----------------------------------------------------------------------------------------------------+ | hitachi_hnas_ip | HNAS management interface IP for communication between manila-share node and HNAS. | +-------------------------------------------------+-----------------------------------------------------------------------------------------------------+ | hitachi_hnas_user | This field is used to provide user credential to HNAS. Provided management user must have | | | "supervisor" level. | +-------------------------------------------------+-----------------------------------------------------------------------------------------------------+ | hitachi_hnas_password | This field is used to provide password credential to HNAS. | | | Either hitachi_hnas_password or hitachi_hnas_ssh_private_key must be set. | +-------------------------------------------------+-----------------------------------------------------------------------------------------------------+ | hitachi_hnas_ssh_private_key | Set this parameter with RSA/DSA private key path to allow the driver to connect into HNAS. | +-------------------------------------------------+-----------------------------------------------------------------------------------------------------+ | hitachi_hnas_evs_id | ID from EVS which this backend is assigned to (ID can be listed by CLI "evs list" | | | or EVS Management in HNAS Interface). | +-------------------------------------------------+-----------------------------------------------------------------------------------------------------+ | hitachi_hnas_evs_ip | EVS IP for mounting shares (this can be listed by CLI "evs list" or EVS Management in HNAS | | | interface). | +-------------------------------------------------+-----------------------------------------------------------------------------------------------------+ | hitachi_hnas_file_system_name | Name of the file system in HNAS, located in the specified EVS. | +-------------------------------------------------+-----------------------------------------------------------------------------------------------------+ | hitachi_hnas_cluster_admin_ip0* | If HNAS is in a multi-farm (one SMU managing multiple HNAS) configuration, set this parameter with | | | the IP of the cluster's admin node. | +-------------------------------------------------+-----------------------------------------------------------------------------------------------------+ | hitachi_hnas_stalled_job_timeout* | Tree-clone-job commands are used to create snapshots and create shares from snapshots. | | | This parameter sets a timeout (in seconds) to wait for jobs to complete. Default value is | | | 30 seconds. | +-------------------------------------------------+-----------------------------------------------------------------------------------------------------+ | hitachi_hnas_driver_helper* | Python module path for the driver helper. For this driver, it should use (default value): | | | *manila.share.drivers.hitachi.hnas.ssh.HNASSSHBackend* | +-------------------------------------------------+-----------------------------------------------------------------------------------------------------+ | hitachi_hnas_allow_cifs_snapshot_while_mounted* | By default, CIFS snapshots are not allowed to be taken while the share has clients connected | | | because point-in-time replica cannot be guaranteed for all files. This parameter can be set | | | to *True* to allow snapshots to be taken while the share has clients connected. **WARNING**: | | | Setting this parameter to *True* might cause inconsistent snapshots on CIFS shares. Default | | | value is *False*. | +-------------------------------------------------+-----------------------------------------------------------------------------------------------------+ \* Non mandatory parameters. Below is an example of a valid configuration of HNAS driver: .. code-block:: ini [DEFAULT]`` ... enabled_share_backends = hitachi1 enabled_share_protocols = CIFS,NFS ... [hitachi1] share_backend_name = HITACHI1 share_driver = manila.share.drivers.hitachi.hnas.driver.HitachiHNASDriver driver_handles_share_servers = False hitachi_hnas_ip = 172.24.44.15 hitachi_hnas_user = supervisor hitachi_hnas_password = supervisor hitachi_hnas_evs_id = 1 hitachi_hnas_evs_ip = 10.0.1.20 hitachi_hnas_file_system_name = FS-Manila Step 2 - Prepare the Network **************************** In the driver mode used by Hitachi NAS Platform File Services Driver for OpenStack, driver_handles_share_servers (DHSS) as False, the driver does not handle network configuration, it is up to the administrator to configure it. It is mandatory that HNAS management interface is reachable from a manila-share node through admin network, while the selected EVS data interface is reachable from OpenStack Cloud, such as through neutron flat networking. Here is a step-by-step of an example configuration: | **Manila-Share Node:** | **eth0**: Admin Network, can ping HNAS management interface. | **eth1**: Data Network, can ping HNAS EVS IP (data interface). This interface is only required if you plan to use Share Migration. | **Network Node and Compute Nodes:** | **eth0**: Admin Network, can ping HNAS management interface. | **eth1**: Data Network, can ping HNAS EVS IP (data interface). The following image represents the described scenario: .. image:: /images/rpc/hds_network.jpg :width: 60% Run in **Network Node**: .. code-block:: console $ sudo ifconfig eth1 0 $ sudo ovs-vsctl add-br br-eth1 $ sudo ovs-vsctl add-port br-eth1 eth1 $ sudo ifconfig eth1 up Edit */etc/neutron/plugins/ml2/ml2_conf.ini* (default directory), change the following settings as follows in their respective tags: .. code-block:: ini [ml2] type_drivers = flat,vlan,vxlan,gre mechanism_drivers = openvswitch [ml2_type_flat] flat_networks = physnet1,physnet2 [ml2_type_vlan] network_vlan_ranges = physnet1:1000:1500,physnet2:2000:2500 [ovs] bridge_mappings = physnet1:br-ex,physnet2:br-eth1 You may have to repeat the last line above in another file in the Compute Node, if it exists is located in: */etc/neutron/plugins/openvswitch/ovs_neutron_plugin.ini*. Create a route in HNAS to the tenant network. Please make sure multi-tenancy is enabled and routes are configured per EVS. Use the command "route-net-add" in HNAS console, where the network parameter should be the tenant's private network, while the gateway parameter should be the flat network gateway and the "console-context --evs" parameter should be the ID of EVS in use, such as in the following example: .. code-block:: console $ console-context --evs 3 route-net-add --gateway 192.168.1.1 10.0.0.0/24 Step 3 - Share Type Configuration ********************************* Shared File Systems service requires that the share type includes the driver_handles_share_servers extra-spec. This ensures that the share will be created on a backend that supports the requested driver_handles_share_servers capability. For the Hitachi NAS Platform File Services Driver for OpenStack this must be set to False. .. code-block:: console $ manila type-create hitachi False Additionally, the driver also reports the following common capabilities that can be specified in the share type: +----------------------------+----------------------------------------------+ | **Capability** | **Description** | +============================+==============================================+ | thin_provisioning = True | All shares created on HNAS are always thin | | | provisioned. So, if you set it, the value | | | **must be**: *True*. | +----------------------------+----------------------------------------------+ | dedupe = True/False | HNAS supports deduplication on its file | | | systems and the driver will report | | | *dedupe=True* if it is enabled on the file | | | system being used. To use it, go to HNAS and | | | enable the feature on the file system used. | +----------------------------+----------------------------------------------+ To specify a common capability on the share type, use the *type-key* command, for example: .. code-block:: console $ manila type-key hitachi set dedupe=True Step 4 - Restart the Services ***************************** Restart all Shared File Systems services (manila-share, manila-scheduler and manila-api) and neutron services (neutron-\*). This step is specific to your environment. If you are running in devstack for example, you have to log into screen (``screen -r``), stop the process (``Ctrl^C``) and run it again. If you are running it in a distro like RHEL, a systemd command (for example *systemctl restart manila-api*) is used to restart the service. Step 5 - Configure OpenStack Networks ************************************* In Neutron Controller it is necessary to create a network, a subnet and to add this subnet interface to a router: Create a network to the given tenant (demo), providing the DEMO_ID (this can be fetched using *keystone tenant-list*), a name for the network, the name of the physical network over which the virtual network is implemented and the type of the physical mechanism by which the virtual network is implemented: .. code-block:: console $ neutron net-create --tenant-id hnas_network --provider:physical_network=physnet2 --provider:network_type=flat Create a subnet to same tenant (demo), providing the DEMO_ID (this can be fetched using *keystone tenant-list*), the gateway IP of this subnet, a name for the subnet, the network ID created on previously step (this can be fetched using *neutron net-list*) and CIDR of subnet: .. code-block:: console $ neutron subnet-create --tenant-id --gateway --name hnas_subnet Finally, add the subnet interface to a router, providing the router ID and subnet ID created on previously step (can be fetched using *neutron subnet-list*): .. code-block:: console $ neutron router-interface-add Manage and Unmanage Shares ~~~~~~~~~~~~~~~~~~~~~~~~~~ Manila has the ability to manage and unmanage shares. If there is a share in the storage and it is not in OpenStack, you can manage that share and use it as a manila share. Hitachi NAS Platform File Services Driver for OpenStack use virtual-volumes (V-VOLs) to create shares. Only V-VOLs with a quota limit can be used by the driver, also, they must be created or moved inside the directory '/shares/' and exported (as NFS or CIFS shares). The unmanage operation only unlinks the share from OpenStack, preserving all data in the share. To **manage** shares use: .. code-block:: console $ manila manage [--name ] [--description ] [--share_type ] [--driver_options [ [ ...]]] Where: +------------------+----------------------------------------------------------+ | Parameter | Description | +==================+==========================================================+ | | Manila host, backend and share name. For example | | service_host | ubuntu\@hitachi1#HITACHI1. The available hosts can be | | | listed with the command: *manila pool-list* (admin only).| +------------------+---------------------+------------------------------------+ | protocol | NFS or CIFS protocols are currently supported. | +------------------+----------------------------------------------------------+ | export_path | The export path of the share. | | | For example: *172.24.44.31:/shares/some_share_id* | +------------------+----------------------------------------------------------+ To **unmanage** a share use: .. code-block:: console $ manila unmanage Where: +------------------+---------------------------------------------------------+ | Parameter | Description | +==================+=========================================================+ | share_id | Manila ID of the share to be unmanaged. This list can | | | be fetched with: *manila list*. | +------------------+---------------------+-----------------------------------+ Additional Notes ~~~~~~~~~~~~~~~~ - HNAS has some restrictions about the number of EVSs, file systems, virtual-volumes and simultaneous SSC connections. Check the manual specification for your system. - Shares and snapshots are thin provisioned. It is reported to manila only the real used space in HNAS. Also, a snapshot does not initially take any space in HNAS, it only stores the difference between the share and the snapshot, so it grows when share data is changed. - Admins should manage the tenant's quota (*manila quota-update*) to control the backend usage. - By default, CIFS snapshots are disabled when the share is mounted, since it uses tree-clone to create snapshots and does not guarantee point-in-time replicas when the source directory tree is changing, also, changing permissions to *read-only* does not affect already mounted shares. So, enable it if your source directory can be static while taking snapshots. Currently, it affects only CIFS protocol. For more information check the tree-clone feature in HNAS with *man tree-clone*. The :mod:`manila.share.drivers.hitachi.hnas.driver` Module ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: manila.share.drivers.hitachi.hnas.driver :noindex: :members: :undoc-members: :show-inheritance: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/admin/hpe_3par_driver.rst0000664000175000017500000002665200000000000022034 0ustar00zuulzuul00000000000000.. Copyright 2015 Hewlett Packard Development Company, L.P. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. HPE 3PAR Driver for OpenStack Manila ==================================== The HPE 3PAR manila driver provides NFS and CIFS shared file systems to OpenStack using HPE 3PAR's File Persona capabilities. .. note:: In OpenStack releases prior to Mitaka this driver was called the HP 3PAR driver. The Liberty configuration reference can be found at: http://docs.openstack.org/liberty/config-reference/content/hp-3par-share-driver.html For information on HPE 3PAR Driver for OpenStack Manila, refer to `content kit page `_. Supported Operations -------------------- The following operations are supported with HPE 3PAR File Persona: - Create/delete NFS and CIFS shares * Shares are not accessible until access rules allow access - Allow/deny NFS share access * IP access rules are required for NFS share access - Allow/deny CIFS share access * CIFS shares require user access rules. * User access requires a 3PAR local or AD user (LDAP is not yet supported) - Create/delete snapshots - Create shares from snapshots Share networks are not supported. Shares are created directly on the 3PAR without the use of a share server or service VM. Network connectivity is setup outside of manila. Requirements ------------ On the system running the manila share service: - python-3parclient 4.2.0 or newer from PyPI. On the HPE 3PAR array: - HPE 3PAR Operating System software version 3.2.1 MU3 or higher - The array class and hardware configuration must support File Persona Pre-Configuration on the HPE 3PAR --------------------------------- - HPE 3PAR File Persona must be initialized and started (:code:`startfs`) - A File Provisioning Group (FPG) must be created for use with manila - A Virtual File Server (VFS) must be created for the FPG - The VFS must be configured with an appropriate share export IP address - A local user in the Administrators group is needed for CIFS shares Backend Configuration --------------------- The following parameters need to be configured in the manila configuration file for the HPE 3PAR driver: - `share_backend_name` = - `share_driver` = manila.share.drivers.hpe.hpe_3par_driver.HPE3ParShareDriver - `driver_handles_share_servers` = False - `hpe3par_fpg` = - `hpe3par_share_ip_address` = - `hpe3par_san_ip` = - `hpe3par_api_url` = <3PAR WS API Server URL> - `hpe3par_username` = <3PAR username with the 'edit' role> - `hpe3par_password` = <3PAR password for the user specified in hpe3par_username> - `hpe3par_san_login` = - `hpe3par_san_password` = - `hpe3par_debug` = - `hpe3par_cifs_admin_access_username` = - `hpe3par_cifs_admin_access_password` = - `hpe3par_cifs_admin_access_domain` = - `hpe3par_share_mount_path` = The `hpe3par_share_ip_address` must be a valid IP address for the configured FPG's VFS. This IP address is used in export locations for shares that are created. Networking must be configured to allow connectivity from clients to shares. `hpe3par_cifs_admin_access_username` and `hpe3par_cifs_admin_access_password` must be provided to delete nested CIFS shares. If they are not, the share contents will not be deleted. `hpe3par_cifs_admin_access_domain` and `hpe3par_share_mount_path` can be provided for additional configuration. Restart of :term:`manila-share` service is needed for the configuration changes to take effect. Backend Configuration for AD user --------------------------------- The following parameters need to be configured through HPE 3PAR CLI to access file share using AD. Set authentication parameters:: $ setauthparam ldap-server IP_ADDRESS_OF_AD_SERVER $ setauthparam binding simple $ setauthparam user-attr AD_DOMAIN_NAME\\ $ setauthparam accounts-dn CN=Users,DC=AD,DC=DOMAIN,DC=NAME $ setauthparam account-obj user $ setauthparam account-name-attr sAMAccountName $ setauthparam memberof-attr memberOf $ setauthparam super-map CN=AD_USER_GROUP,DC=AD,DC=DOMAIN,DC=NAME Verify new authentication parameters set as expected:: $ showauthparam Verify AD users set as expected:: $ checkpassword AD_USER Command result should show ``user AD_USER is authenticated and authorized`` message on successful configuration. Add 'ActiveDirectory' in authentication providers list:: $ setfs auth ActiveDirectory Local Verify authentication provider list shows 'ActiveDirectory':: $ showfs -auth Set/Add AD user on FS:: $ setfs ad –passwd PASSWORD AD_USER AD_DOMAIN_NAME Verify FS user details:: $ showfs -ad Example of using AD user to access CIFS share --------------------------------------------- Pre-requisite: - Share type should be configured for 3PAR backend Create a CIFS file share with 2GB of size:: $ manila create --name FILE_SHARE_NAME --share-type SHARE_TYPE CIFS 2 Check file share created as expected:: $ manila show FILE_SHARE_NAME Configuration to provide share access to AD user:: $ manila access-allow FILE_SHARE_NAME user AD_DOMAIN_NAME\\\\AD_USER --access-level rw Check users permission set as expected:: $ manila access-list FILE_SHARE_NAME The AD_DOMAIN_NAME\\AD_USER must be listed in access_to column and should show active in its state column as result of this command. Network Approach ---------------- Connectivity between the storage array (SSH/CLI and WSAPI) and the manila host is required for share management. Connectivity between the clients and the VFS is required for mounting and using the shares. This includes: - Routing from the client to the external network - Assigning the client an external IP address (e.g., a floating IP) - Configuring the manila host networking properly for IP forwarding - Configuring the VFS networking properly for client subnets Share Types ----------- When creating a share, a share type can be specified to determine where and how the share will be created. If a share type is not specified, the `default_share_type` set in the manila configuration file is used. Manila requires that the share type includes the `driver_handles_share_servers` extra-spec. This ensures that the share will be created on a backend that supports the requested driver_handles_share_servers (share networks) capability. For the HPE 3PAR driver, this must be set to False. Another common manila extra-spec used to determine where a share is created is `share_backend_name`. When this extra-spec is defined in the share type, the share will be created on a backend with a matching share_backend_name. The HPE 3PAR driver automatically reports capabilities based on the FPG used for each backend. Share types with extra specs can be created by an administrator to control which share types are allowed to use FPGs with or without specific capabilities. The following extra-specs are used with the capabilities filter and the HPE 3PAR driver: - `hpe3par_flash_cache` = ' True' or ' False' - `thin_provisioning` = ' True' or ' False' - `dedupe` = ' True' or ' False' `hpe3par_flash_cache` will be reported as True for backends that have 3PAR's Adaptive Flash Cache enabled. `thin_provisioning` will be reported as True for backends that use thin provisioned volumes. FPGs that use fully provisioned volumes will report False. Backends that use thin provisioning also support manila's over-subscription feature. `dedupe` will be reported as True for backends that use deduplication technology. Scoped extra-specs are used to influence vendor-specific implementation details. Scoped extra-specs use a prefix followed by a colon. For HPE 3PAR these extra-specs have a prefix of `hpe3par`. For HP 3PAR these extra-specs have a prefix of `hp3par`. The following HPE 3PAR extra-specs are used when creating CIFS (SMB) shares: - `hpe3par:smb_access_based_enum` = true or false - `hpe3par:smb_continuous_avail` = true or false - `hpe3par:smb_cache` = off, manual, optimized or auto `smb_access_based_enum` (Access Based Enumeration) specifies if users can see only the files and directories to which they have been allowed access on the shares. The default is `false`. `smb_continuous_avail` (Continuous Availability) specifies if SMB3 continuous availability features should be enabled for this share. If not specified, the default is `true`. This setting will be ignored with hp3parclient 3.2.1 or earlier. `smb_cache` specifies client-side caching for offline files. Valid values are: * `off`: The client must not cache any files from this share. The share is configured to disallow caching. * `manual`: The client must allow only manual caching for the files open from this share. * `optimized`: The client may cache every file that it opens from this share. Also, the client may satisfy the file requests from its local cache. The share is configured to allow automatic caching of programs and documents. * `auto`: The client may cache every file that it opens from this share. The share is configured to allow automatic caching of documents. * If this is not specified, the default is `manual`. The following HPE 3PAR extra-specs are used when creating NFS shares: - `hpe3par:nfs_options` = Comma separated list of NFS export options The NFS export options have the following limitations: * `ro` and `rw` are not allowed (manila will determine the read-only option) * `no_subtree_check` and `fsid` are not allowed per HPE 3PAR CLI support * `(in)secure` and `(no_)root_squash` are not allowed because the HPE 3PAR driver controls those settings All other NFS options are forwarded to the HPE 3PAR as part of share creation. The HPE 3PAR will do additional validation at share creation time. Refer to HPE 3PAR CLI help for more details. Delete Nested Shares -------------------- When a nested share is deleted (nested shares will be created when ``hpe_3par_fstore_per_share`` is set to ``False``), the file tree also attempts to be deleted. With NFS shares, there is no additional configuration that needs to be done. For CIFS shares, ``hpe3par_cifs_admin_access_username`` and ``hpe3par_cifs_admin_access_password`` must be provided. If they are omitted, the original functionality is honored and the file tree remains untouched. ``hpe3par_cifs_admin_access_domain`` and ``hpe3par_share_mount_path`` can also be specified to create further customization. The :mod:`manila.share.drivers.hpe.hpe_3par_driver` Module ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: manila.share.drivers.hpe.hpe_3par_driver :noindex: :members: :undoc-members: :show-inheritance: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/admin/huawei_nas_driver.rst0000664000175000017500000002245200000000000022450 0ustar00zuulzuul00000000000000.. Copyright (c) 2015 Huawei Technologies Co., Ltd. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Huawei Driver ============= Huawei NAS Driver is a plugin based the OpenStack manila service. The Huawei NAS Driver can be used to provide functions such as the share and snapshot for virtual machines(instances) in OpenStack. Huawei NAS Driver enables the OceanStor V3 series V300R002 storage system to provide only network filesystems for OpenStack. Requirements ------------ - The OceanStor V3 series V300R002 storage system. - The following licenses should be activated on V3 for File: * CIFS * NFS * HyperSnap License (for snapshot) Supported Operations -------------------- The following operations is supported on V3 storage: - Create CIFS/NFS Share - Delete CIFS/NFS Share - Allow CIFS/NFS Share access * IP and USER access types are supported for NFS(ro/rw). * Only USER access type is supported for CIFS(ro/rw). - Deny CIFS/NFS Share access - Create snapshot - Delete snapshot - Manage CIFS/NFS share - Support pools in one backend - Extend share - Shrink share - Support multi RestURLs() - Support multi-tenancy - Ensure share - Create share from snapshot - Support QoS Pre-Configurations on Huawei ---------------------------- 1. Create a driver configuration file. The driver configuration file name must be the same as the manila_huawei_conf_file item in the manila_conf configuration file. 2. Configure Product. Product indicates the storage system type. For the OceanStor V3 series V300R002 storage systems, the driver configuration file is as follows: :: V3 x.x.x.x abc;CTE0.A.H1 https://x.x.x.x:8088/deviceManager/rest/; https://x.x.x.x:8088/deviceManager/rest/ xxxxxxxxx xxxxxxxxx xxxxxxxxx 64 3 60 x.x.x.x xxxxxxxxx xxxxxxxxx - `Product` is a type of a storage product. Set it to `V3`. - `LogicalPortIP` is an IP address of the logical port. - `Port` is a port name list of bond port or ETH port, used to create vlan and logical port. Multi Ports can be configured in (separated by ";"). If is not configured, then will choose an online port on the array. - `RestURL` is an access address of the REST interface. Multi RestURLs can be configured in (separated by ";"). When one of the RestURL failed to connect, driver will retry another automatically. - `UserName` is a user name of an administrator. - `UserPassword` is a password of an administrator. - `StoragePool` is a name of a storage pool to be used. - `SectorSize` is the size of the disk blocks, optional value can be "4", "8", "16", "32" or "64", and the units is KB. If "sectorsize" is configured in both share_type and xml file, the value of sectorsize in the share_type will be used. If "sectorsize" is configured in neither share_type nor xml file, huawei storage backends will provide a default value(64) when creating a new share. - `WaitInterval` is the interval time of querying the file system status. - `Timeout` is the timeout period for waiting command execution of a device to complete. - `NFSClient\IP` is the backend IP in admin network to use for mounting NFS share. - `CIFSClient\UserName` is the backend user name in admin network to use for mounting CIFS share. - `CIFSClient\UserPassword` is the backend password in admin network to use for mounting CIFS share. Backend Configuration --------------------- Modify the `manila.conf` manila configuration file and add share_driver and manila_huawei_conf_file items. Example for configuring a storage system: - `share_driver` = manila.share.drivers.huawei.huawei_nas.HuaweiNasDriver - `manila_huawei_conf_file` = /etc/manila/manila_huawei_conf.xml - `driver_handles_share_servers` = True or False .. note:: - If `driver_handles_share_servers` is True, the driver will choose a port in to create vlan and logical port for each tenant network. And the share type with the DHSS extra spec should be set to True when creating shares. - If `driver_handles_share_servers` is False, then will use the IP in . Also the share type with the DHSS extra spec should be set to False when creating shares. Restart of manila-share service is needed for the configuration changes to take effect. Share Types ----------- When creating a share, a share type can be specified to determine where and how the share will be created. If a share type is not specified, the `default_share_type` set in the manila configuration file is used. Manila requires that the share type includes the `driver_handles_share_servers` extra-spec. This ensures that the share will be created on a backend that supports the requested driver_handles_share_servers (share networks) capability. For the Huawei driver, this must be set to False. To create a share on a backend with a specific type of disks, include the `huawei_disk_type` extra-spec in the share type. Valid values for this extra-spec are 'ssd', 'sas', 'nl_sas' or 'mix'. This share will be created on a backend with a matching disk type. Another common manila extra-spec used to determine where a share is created is `share_backend_name`. When this extra-spec is defined in the share type, the share will be created on a backend with a matching share_backend_name. Manila "share types" may contain qualified extra-specs, -extra-specs that have significance for the backend driver and the CapabilityFilter. This commit makes the Huawei driver report the following boolean capabilities: - capabilities:dedupe - capabilities:compression - capabilities:thin_provisioning - capabilities:huawei_smartcache * huawei_smartcache:cachename - capabilities:huawei_smartpartition * huawei_smartpartition:partitionname - capabilities:qos * qos:maxIOPS * qos:minIOPS * qos:minbandwidth * qos:maxbandwidth * qos:latency * qos:iotype - capabilities:huawei_sectorsize The scheduler will choose a host that supports the needed capability when the CapabilityFilter is used and a share type uses one or more of the following extra-specs: - capabilities:dedupe=' True' or ' False' - capabilities:compression=' True' or ' False' - capabilities:thin_provisioning=' True' or ' False' - capabilities:huawei_smartcache=' True' or ' False' * huawei_smartcache:cachename=test_cache_name - capabilities:huawei_smartpartition=' True' or ' False' * huawei_smartpartition:partitionname=test_partition_name - capabilities:qos=' True' or ' False' * qos:maxIOPS=100 * qos:minIOPS=10 * qos:maxbandwidth=100 * qos:minbandwidth=10 * qos:latency=10 * qos:iotype=0 - capabilities:huawei_sectorsize=' True' or ' False' * huawei_sectorsize:sectorsize=4 - huawei_disk_type='ssd' or 'sas' or 'nl_sas' or 'mix' `thin_provisioning` will be reported as [True, False] for Huawei backends. `dedupe` will be reported as [True, False] for Huawei backends. `compression` will be reported as [True, False] for Huawei backends. `huawei_smartcache` will be reported as [True, False] for Huawei backends. Adds SSDs into a high-speed cache pool and divides the pool into multiple cache partitions to cache hotspot data in random and small read I/Os. `huawei_smartpartition` will be reported as [True, False] for Huawei backends. Add share to the smartpartition named 'test_partition_name'. Allocates cache resources based on service characteristics, ensuring the quality of critical services. `qos` will be reported as True for backends that use QoS (Quality of Service) specification. `huawei_sectorsize` will be reported as [True, False] for Huawei backends. `huawei_disk_type` will be reported as "ssd", "sas", "nl_sas" or "mix" for Huawei backends. Restrictions ------------ The Huawei driver has the following restrictions: - IP and USER access types are supported for NFS. - Only LDAP domain is supported for NFS. - Only USER access type is supported for CIFS. - Only AD domain is supported for CIFS. The :mod:`manila.share.drivers.huawei.huawei_nas` Module ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: manila.share.drivers.huawei.huawei_nas :noindex: :members: :undoc-members: :show-inheritance: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/admin/index.rst0000664000175000017500000000733400000000000020063 0ustar00zuulzuul00000000000000.. _shared_file_systems_intro: =========== Admin Guide =========== Shared File Systems service provides a set of services for management of shared file systems in a multi-project cloud environment. The service resembles OpenStack block-based storage management from the OpenStack Block Storage service project. With the Shared File Systems service, you can create a remote file system, mount the file system on your instances, and then read and write data from your instances to and from your file system. The Shared File Systems service serves same purpose as the Amazon Elastic File System (EFS) does. The Shared File Systems service can run in a single-node or multiple node configuration. The Shared File Systems service can be configured to provision shares from one or more back ends, so it is required to declare at least one back end. Shared File System service contains several configurable components. It is important to understand these components: * Share networks * Shares * Multi-tenancy * Back ends The Shared File Systems service consists of four types of services, most of which are similar to those of the Block Storage service: - ``manila-api`` - ``manila-data`` - ``manila-scheduler`` - ``manila-share`` Installation of first three - ``manila-api``, ``manila-data``, and ``manila-scheduler`` is common for almost all deployments. But configuration of ``manila-share`` is backend-specific and can differ from deployment to deployment. .. toctree:: :maxdepth: 1 shared-file-systems-key-concepts.rst shared-file-systems-share-management.rst shared-file-systems-share-types.rst shared-file-systems-quotas.rst shared-file-systems-share-group-types.rst shared-file-systems-share-groups.rst shared-file-systems-snapshots.rst shared-file-systems-share-server-management.rst shared-file-systems-security-services.rst shared-file-systems-share-migration.rst shared-file-systems-share-replication.rst shared-file-systems-multi-backend.rst shared-file-systems-healthcheck.rst shared-file-systems-networking.rst shared-file-systems-troubleshoot.rst shared-file-systems-profiling.rst shared-file-systems-upgrades.rst shared-file-systems-share-revert-to-snapshot.rst shared-file-systems-share-server-migration.rst share_mount_point_name.rst share_back_ends_feature_support_mapping capabilities_and_extra_specs group_capabilities_and_extra_specs export_location_metadata shared-file-systems-share-backup-management.rst shared-file-systems-services-manage.rst Supported share back ends ------------------------- The manila share service must be configured to use drivers for one or more storage back ends, as described in general terms below. See the drivers section in the `Configuration Reference `_ for detailed configuration options for each back end. .. toctree:: :maxdepth: 3 container_driver zfs_on_linux_driver netapp_cluster_mode_driver emc_vnx_driver dell_emc_powerscale_driver ../configuration/shared-file-systems/drivers/dell-emc-unity-driver ../configuration/shared-file-systems/drivers/dell-emc-powerstore-driver ../configuration/shared-file-systems/drivers/dell-emc-powerflex-driver generic_driver glusterfs_driver glusterfs_native_driver ../configuration/shared-file-systems/drivers/cephfs_driver gpfs_driver huawei_nas_driver hdfs_native_driver hitachi_hnas_driver hpe_3par_driver infortrend_driver macrosan_driver purestorage_flashblade_driver tegile_driver nexentastor5_driver ../configuration/shared-file-systems/drivers/windows-smb-driver zadara_driver ../configuration/shared-file-systems/drivers/vastdata_driver ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/admin/infortrend_driver.rst0000664000175000017500000000613400000000000022476 0ustar00zuulzuul00000000000000.. Copyright (c) 2019 Infortrend Technologies Co., Ltd. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Infortrend Driver for OpenStack Manila ====================================== The `Infortrend `__ Manila driver provides NFS and CIFS shared file systems to Openstack. Requirements ------------ - The EonStor GS/GSe series Fireware version 139A23 Supported shared filesystems and operations ------------------------------------------- This driver supports NFS and CIFS shares. The following operations are supported: - Create CIFS/NFS Share - Delete CIFS/NFS Share - Allow CIFS/NFS Share access * Only IP access type is supported for NFS (ro/rw). * Only USER access type is supported for CIFS (ro/rw). - Deny CIFS/NFS Share access - Manage a share. - Unmanage a share. - Extend a share. - Shrink a share. Backend Configuration --------------------- The following parameters need to be configured in the manila configuration file for the Infortrend driver: - `share_backend_name` = - `share_driver` = manila.share.drivers.infortrend.driver.InfortrendNASDriver - `driver_handles_share_servers` = False - `infortrend_nas_ip` = - `infortrend_nas_user` = - `infortrend_nas_password` = - `infortrend_share_pools` = - `infortrend_share_channels` = Share Types ----------- When creating a share, a share type can be specified to determine where and how the share will be created. If a share type is not specified, the `default_share_type` set in the manila configuration file is used. Manila requires that the share type includes the `driver_handles_share_servers` extra-spec. This ensures that the share will be created on a backend that supports the requested driver_handles_share_servers (share networks) capability. For the Infortrend driver, this must be set to False. Back-end configuration example ------------------------------ .. code-block:: ini [DEFAULT] enabled_share_backends = ift-manila enabled_share_protocols = NFS, CIFS [ift-manila] share_backend_name = ift-manila share_driver = manila.share.drivers.infortrend.driver.InfortrendNASDriver driver_handles_share_servers = False infortrend_nas_ip = FAKE_IP infortrend_nas_user = FAKE_USER infortrend_nas_password = FAKE_PASS infortrend_share_pools = pool-1, pool-2 infortrend_share_channels = 0, 1 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/admin/macrosan_driver.rst0000664000175000017500000000662600000000000022135 0ustar00zuulzuul00000000000000.. Copyright (c) 2022 Macrosan Technologies Co., Ltd. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==================================== Macrosan Driver for OpenStack Manila ==================================== The `Macrosan `__ driver provides NFS and CIFS shared file systems to Openstack. Requirements ------------ - The following service should be enabled on NAS system: * CIFS * NFS Supported Operations -------------------- The following operations are supported: - Create CIFS/NFS Share - Delete CIFS/NFS Share - Allow CIFS/NFS Share access * Only IP access type is supported for NFS (ro/rw). * Only USER access type is supported for CIFS (ro/rw). - Deny CIFS/NFS Share access - Extend a share. - Shrink a share. Backend Configuration --------------------- The following parameters need to be configured in the [DEFAULT] section of manila configuration (/etc/manila/manila.conf): - `enabled_share_backends` - Name of the section on manila.conf used to specify a backend i.e. *enabled_share_backends = macrosan* - `enabled_share_protocols` - Specify a list of protocols to be allowed for share creation. The VPSA driver support the following options: *NFS* or *CIFS* or *NFS, CIFS* The following parameters need to be configured in the [backend] section of manila configuration (/etc/manila/manila.conf): - `share_backend_name` = - `share_driver` = manila.share.drivers.macrosan.macrosan_nas.MacrosanNasDriver - `driver_handles_share_servers` = False - `macrosan_nas_ip` = - `macrosan_nas_port` = - `macrosan_nas_user` = - `macrosan_nas_password` = - `macrosan_share_pools` = Share Types ----------- When creating a share, a share type can be specified to determine where and how the share will be created. If a share type is not specified, the `default_share_type` set in the manila configuration file is used. Manila requires that the share type includes the `driver_handles_share_servers` extra-spec. This ensures that the share will be created on a backend that supports the requested driver_handles_share_servers (share networks) capability. For the Macrosan driver, this must be set to False. Back-end configuration example ------------------------------ .. code-block:: ini [DEFAULT] enabled_share_backends = macrosan enabled_share_protocols = NFS, CIFS [macrosan] share_backend_name = MACROSAN share_driver = manila.share.drivers.macrosan.macrosan_nas.MacrosanNasDriver driver_handles_share_servers = False macrosan_nas_ip = FAKE_IP macrosan_nas_port = 8443 macrosan_nas_user = FAKE_USER macrosan_nas_password = FAKE_PASSWORD macrosan_share_pools = fake_pool1, fake_pool2 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/admin/netapp_cluster_mode_driver.rst0000664000175000017500000001407500000000000024363 0ustar00zuulzuul00000000000000.. Copyright 2014 Mirantis Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. NetApp Clustered Data ONTAP =========================== The Shared File Systems service can be configured to use NetApp Clustered Data ONTAP (cDOT) version 8.2 and later. The driver can work with two types of pools: FlexGroup and FlexVol. By default, it only works with FlexVol, if desired, the FlexGroup pool can be enabled together or standalone. FlexGroup pool requires ONTAP version 9.8 or later. Supported Operations -------------------- The following operations are supported on Clustered Data ONTAP: - Create CIFS/NFS Share - Delete CIFS/NFS Share - Allow NFS Share access * IP access type is supported for NFS. * Read/write and read-only access are supported for NFS. - Allow CIFS Share access * User access type is supported for CIFS. * Read/write access is supported for CIFS. - Deny CIFS/NFS Share access - Create snapshot - Delete snapshot - Create share from snapshot - Extend share - Shrink share - Manage share - Unmanage share - Create consistency group - Delete consistency group - Create consistency group from CG snapshot - Create CG snapshot - Delete CG snapshot - Create a replica (DHSS=False) - Promote a replica (DHSS=False) - Delete a replica (DHSS=False) - Update a replica (DHSS=False) - Create a replicated snapshot (DHSS=False) - Delete a replicated snapshot (DHSS=False) - Update a replicated snapshot (DHSS=False) - Migrate share - Migrate share server - Create share backup - Restore share backup - Delete share backup .. note:: The operations are not fully supported configuring FlexGroup pool: - Consistency group operations are only supported configuring the driver without any FlexGroup pool. - For FlexGroup share, create more than one replica is only allowed with ONTAP 9.9.1 and newer. - Migration of FlexGroup shares is not allowed. - Migration of share servers containing FlexGroup share is not allowed. .. note:: :term:`DHSS` is abbreviated from `driver_handles_share_servers`. Supported Operating Modes ------------------------- The cDOT driver supports both 'driver_handles_share_servers' (:term:`DHSS`) modes. If 'driver_handles_share_servers' is True, the driver will create a storage virtual machine (SVM, previously known as vServers) for each unique tenant network and provision each of a tenant's shares into that SVM. This requires the user to specify both a share network as well as a share type with the DHSS extra spec set to True when creating shares. If 'driver_handles_share_servers' is False, the manila admin must configure a single SVM, along with associated LIFs and protocol services, that will be used for provisioning shares. The SVM is specified in the manila config file. Network approach ---------------- L3 connectivity between the storage cluster and manila host must exist, and VLAN segmentation may be configured. All of manila's network plug-ins are supported with the cDOT driver. Supported shared filesystems ---------------------------- - NFS (access by IP address or subnet) - CIFS (authentication by user) Required licenses ----------------- - NFS - CIFS - FlexClone Known restrictions ------------------ - For CIFS shares an external Active Directory (AD) service is required. The AD details should be provided via a manila security service that is attached to the specified share network. - Share access rules for CIFS shares may be created only for existing users in Active Directory. - The time on external security services and storage must be synchronized. The maximum allowed clock skew is 5 minutes. - cDOT supports only flat and VLAN network segmentation types. How to take backup for NetApp shares ------------------------------------ Starting from 2024.1, a concept named ``backup_type`` has been introduced. At present, it has been implemented for NetApp driver. The ``backup_type`` is a construct which consists of backup specific configuration parameters such as ``backup_type_name``, ``netapp_backup_backend_section_name``, ``netapp_backup_vserver``, ``netapp_backup_share``, ``netapp_snapmirror_job_timeout``. .. note:: The sample config will look like this: ``eng_data_backup`` is the backup_type here.:: [eng_data_backup] netapp_backup_backend_section_name = ontap2 netapp_backup_vserver = backup_vserver_name netapp_backup_volume = backup_volume_name_inside_vserver netapp_snapmirror_job_timeout = 180 [nas_storage] vendor_name = NetApp share_driver = manila.share.drivers.netapp.common.NetAppDriver driver_handles_share_servers = False netapp_login = admin .... .... enabled_backup_types = eng_data_backup If the option ``netapp_backup_volume`` is not specified, the backup volume (destination volume) would be created automatically by the driver inside the vserver. The options "netapp_backup_vserver" and "netapp_backup_volume" are optional and it works as below: In case of "driver_handles_share_servers=true", "netapp_backup_vserver" and "netapp_backup_volume" will be created by driver on "backend" mentioned under backup type stanza. In case of "driver_handles_share_servers=false", it will use the existing vserver of the "backend" mentioned under backup type stanza and will create the new volume. The :mod:`manila.share.drivers.netapp.common.py` Module ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: manila.share.drivers.netapp.common :noindex: :members: :undoc-members: :show-inheritance: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/admin/nexentastor5_driver.rst0000664000175000017500000000635700000000000022772 0ustar00zuulzuul00000000000000.. Copyright 2019 Nexenta by DDN, Inc. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. NexentaStor5 Driver for OpenStack Manila ======================================== The `NexentaStor5 `__ Manila driver provides NFS shared file systems to OpenStack. Requirements ------------ - The NexentaStor 5.1 or newer Supported shared filesystems and operations ------------------------------------------- This driver supports NFS shares. The following operations are supported: - Create NFS Share - Delete NFS Share - Allow NFS Share access * Only IP access type is supported for NFS (ro/rw). - Deny NFS Share access - Manage a share. - Unmanage a share. - Extend a share. - Shrink a share. - Create snapshot - Revert to snapshot - Delete snapshot - Create share from snapshot Backend Configuration --------------------- The following parameters need to be configured in the manila configuration file for the NexentaStor5 driver: - `share_backend_name` = - `share_driver` = manila.share.drivers.nexenta.ns5.nexenta_nas.NexentaNasDriver - `driver_handles_share_servers` = False - `nexenta_nas_host` = - `nexenta_user` = - `nexenta_password` = - `nexenta_pool` = - `nexenta_rest_addresses` = - `nexenta_folder` = - `nexenta_nfs` = True Share Types ----------- When creating a share, a share type can be specified to determine where and how the share will be created. If a share type is not specified, the `default_share_type` set in the manila configuration file is used. Manila requires that the share type includes the `driver_handles_share_servers` extra-spec. This ensures that the share will be created on a backend that supports the requested driver_handles_share_servers (share networks) capability. For the NexentaStor driver, this extra-spec's value must be set to False. Restrictions ------------ - Only IP share access control is allowed for NFS shares. Back-end configuration example ------------------------------ .. code-block:: ini [DEFAULT] enabled_share_backends = NexentaStor5 [NexentaStor5] share_backend_name = NexentaStor5 driver_handles_share_servers = False nexenta_folder = manila share_driver = manila.share.drivers.nexenta.ns5.nexenta_nas.NexentaNasDriver nexenta_rest_addresses = 10.3.1.1,10.3.1.2 nexenta_nas_host = 10.3.1.10 nexenta_rest_port = 8443 nexenta_pool = pool1 nexenta_nfs = True nexenta_user = admin nexenta_password = secret_password nexenta_thin_provisioning = True ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/admin/purestorage_flashblade_driver.rst0000664000175000017500000002464000000000000025033 0ustar00zuulzuul00000000000000.. Copyright 2021 Pure Storage Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. =================================================== Pure Storage FlashBlade Driver for OpenStack Manila =================================================== The Pure Storage FlashBlade Manila driver provides NFS shared file systems to OpenStack using Pure Storage's FlashBlade native filesystem capabilities. Supported Operations ~~~~~~~~~~~~~~~~~~~~ The following operations are supported with Pure Storage FlashBlade: - Create/delete NFS shares * Shares are not accessible until access rules allow access - Allow/deny NFS share access * IP access rules are required for NFS share access - Create/delete snapshots - Expand and Shrink shares - Revert to Snapshot Share networks are not supported. Shares are created directly on the FlashBlade without the use of a share server or service VM. Network connectivity is setup outside of Manila. General Requirements ~~~~~~~~~~~~~~~~~~~~ On the system running the Manila share service: - purity_fb 1.12.1 or newer from PyPI. On the Pure Storage FlashBlade: - Purity//FB Operating System software version 2.3.0 or higher Network Requirements ~~~~~~~~~~~~~~~~~~~~ Connectivity between the FlashBlade (REST) and the manila host is required for share management. Connectivity between the clients and the FlashBlade is required for mounting and using the shares. This includes: - Routing from the client to the external network - Assigning the client an external IP address (e.g., a floating IP) - Configuring the manila host networking properly for IP forwarding - Configuring the FlashBlade networking properly for client subnets Driver Configuration ~~~~~~~~~~~~~~~~~~~~ Before configuring the driver, make sure the following networking requirements have been met: - A management subnet must be accessible from the system running the Manila share services - A data subnet must be accessible from the system running the Nova compute services - An API token must be available for a user with administrative privileges Perform the following steps: #. Configure the Pure Storage FlashBlade parameters in `manila.conf` #. Configure/create a share type #. Restart the services It is also assumed that the OpenStack networking has been confiured correctly. Step 1 - FlashBlade Parameters configuration ******************************************** The following parameters need to be configured in the [DEFAULT] section of `/etc/manila/manila.conf`: +----------------------------+------------------------------------------------+ | **Option** | **Description** | +============================+================================================+ | enabled_share_backends | Name of the section on ``manila.conf`` used to | | | specify a backend. For example: | | | *enabled_share_backends = flashblade* | +----------------------------+------------------------------------------------+ | enabled_share_protocols | Specify a list of protocols to be allowed for | | | share creation. This driver version only | | | supports NFS | +----------------------------+------------------------------------------------+ The following parameters need to be configured in the [backend] section of ``/etc/manila/manila.conf``: +-------------------------------------------------+-----------------------------------------------------------------------------------------------------+ | **Option** | **Description** | +=================================================+=====================================================================================================+ | share_backend_name | A name for the backend. | +-------------------------------------------------+-----------------------------------------------------------------------------------------------------+ | share_driver | Python module path. For this driver **this must be**: | | | *manila.share.drivers.purestorage.flashblade.FlashBladeShareDriver* | +-------------------------------------------------+-----------------------------------------------------------------------------------------------------+ | driver_handles_share_servers | Driver working mode. For this driver **this must be**: | | | *False*. | +-------------------------------------------------+-----------------------------------------------------------------------------------------------------+ | flashblade_mgmt_vip | The name (or IP address) for the Pure Storage FlashBlade storage system management VIP. | +-------------------------------------------------+-----------------------------------------------------------------------------------------------------+ | flashblade_data_vip | The names (or IP address) for the Pure Storage FlashBlade storage system data VIPs. | +-------------------------------------------------+-----------------------------------------------------------------------------------------------------+ | flashblade_api | API token for an administrative user account | +-------------------------------------------------+-----------------------------------------------------------------------------------------------------+ | flashblade_eradicate (Optional) | When enabled, all FlashBlade file systems and snapshots will be eradicated at the time | | | of deletion in Manila. Data will NOT be recoverable after a delete with this set to True! | | | When disabled, file systems and snapshots will go into pending eradication state and can be | | | recovered. Default value is *True*. | +-------------------------------------------------+-----------------------------------------------------------------------------------------------------+ Below is an example of a valid configuration of the FlashBlade driver: .. code-block:: ini [DEFAULT] ... enabled_share_backends = flashblade enabled_share_protocols = NFS ... [flashblade] share_backend_name = flashblade share_driver = manila.share.drivers.purestorage.flashblade.FlashBladeShareDriver driver_handles_share_servers = False flashblade_mgmt_vip = 1.2.3.4 flashblade_data_vip = 1.2.3.5,1.2.3.6 flashblade_api = T-xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx Restart of :term:`manila-share` service is needed for the configuration changes to take effect. Step 2 - Share Type Configuration ********************************* Shared File Systems service requires that the share type includes the driver_handles_share_servers extra-spec. This ensures that the share will be created on a backend that supports the requested driver_handles_share_servers capability. For the Pure Storage FlashBlade Driver for OpenStack this must be set to False. .. code-block:: console $ manila type-create flashblade False Additionally, the driver also reports the following common capabilities that can be specified in the share type: +----------------------------------+------------------------------------------------------+ | **Capability** | **Description** | +==================================+======================================================+ | thin_provisioning = True | All shares created on FlashBlade are always thin | | | provisioned. If you set it this, the value | | | **must be**: *True*. | +----------------------------------+------------------------------------------------------+ | snapshot_support = True/False | FlashBlade supports share snapshots. | | | If you set this, the value **must be**: *True*. | +----------------------------------+------------------------------------------------------+ | revert_to_snapshot = True/False | FlashBlade supports reverting a share to the latest | | | available snapshot. If you set this, the value | | | **must be**: *True*. | +----------------------------------+------------------------------------------------------+ To specify a common capability on the share type, use the *type-key* command, for example: .. code-block:: console $ manila type-key flashblade set snapshot_support=True $ manila type-key flashblade set revert_to_snapshot=True Step 3 - Restart the Services ***************************** Restart all Shared File Systems services (manila-share, manila-scheduler and manila-api). This step is specific to your environment. for example, `systemctl restart @manila-shr` is used to restart the share service. The :mod:`manila.share.drivers.purestorage.flashblade` Module ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: manila.share.drivers.purestorage.flashblade :noindex: :members: :undoc-members: :show-inheritance: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/admin/share_back_ends_feature_support_mapping.rst0000664000175000017500000017663000000000000027077 0ustar00zuulzuul00000000000000.. Copyright 2015 Mirantis Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. .. _share_back_ends_feature_support_mapping: Manila share features support mapping ===================================== Here we provide information on support of different share features by different share drivers. Column values contain the OpenStack release letter when a feature was added to the driver. Column value "?" means that this field requires an update with current information. Column value "-" means that this feature is not currently supported. Mapping of share drivers and share features support --------------------------------------------------- +----------------------------------------+-----------------------+-----------------------+--------------------------+--------------------------+------------------------+-----------------------------------+--------------------------+--------------------+--------------------+ | Driver name | create delete share | manage unmanage share | extend share | shrink share | create delete snapshot | create share from snapshot | manage unmanage snapshot | revert to snapshot | mountable snapshot | +========================================+=======================+=======================+==========================+==========================+========================+===================================+==========================+====================+====================+ | ZFSonLinux | M | N | M | M | M | M | N | \- | \- | +----------------------------------------+-----------------------+-----------------------+--------------------------+--------------------------+------------------------+-----------------------------------+--------------------------+--------------------+--------------------+ | Container | N | \- | N | \- | \- | \- | \- | \- | \- | +----------------------------------------+-----------------------+-----------------------+--------------------------+--------------------------+------------------------+-----------------------------------+--------------------------+--------------------+--------------------+ | Generic (Cinder as back-end) | J | K | L | L | J | J | M | \- | \- | +----------------------------------------+-----------------------+-----------------------+--------------------------+--------------------------+------------------------+-----------------------------------+--------------------------+--------------------+--------------------+ | NetApp Clustered Data ONTAP | J | L | L | L | J |same pool (J), across back ends (U)| N | O | \- | +----------------------------------------+-----------------------+-----------------------+--------------------------+--------------------------+------------------------+-----------------------------------+--------------------------+--------------------+--------------------+ | Dell PowerMax | O | \- | O | \- | O | O | \- | \- | \- | +----------------------------------------+-----------------------+-----------------------+--------------------------+--------------------------+------------------------+-----------------------------------+--------------------------+--------------------+--------------------+ | Dell VNX | J | \- | \- | \- | J | J | \- | \- | \- | +----------------------------------------+-----------------------+-----------------------+--------------------------+--------------------------+------------------------+-----------------------------------+--------------------------+--------------------+--------------------+ | Dell Unity | N | U | N | S | N | N | U | S | \- | +----------------------------------------+-----------------------+-----------------------+--------------------------+--------------------------+------------------------+-----------------------------------+--------------------------+--------------------+--------------------+ | Dell PowerScale | K | \- | M | \- | K | K | \- | \- | \- | +----------------------------------------+-----------------------+-----------------------+--------------------------+--------------------------+------------------------+-----------------------------------+--------------------------+--------------------+--------------------+ | Dell PowerStore | B | \- | B | B | B | B | \- | B | \- | +----------------------------------------+-----------------------+-----------------------+--------------------------+--------------------------+------------------------+-----------------------------------+--------------------------+--------------------+--------------------+ | Dell PowerFlex | B | \- | B | \- | B | \- | \- | \- | \- | +----------------------------------------+-----------------------+-----------------------+--------------------------+--------------------------+------------------------+-----------------------------------+--------------------------+--------------------+--------------------+ | GlusterFS | J | \- | directory layout (T) | directory layout (T) | volume layout (L) | volume layout (L) | \- | \- | \- | +----------------------------------------+-----------------------+-----------------------+--------------------------+--------------------------+------------------------+-----------------------------------+--------------------------+--------------------+--------------------+ | GlusterFS-Native | J | \- | \- | \- | K | L | \- | \- | \- | +----------------------------------------+-----------------------+-----------------------+--------------------------+--------------------------+------------------------+-----------------------------------+--------------------------+--------------------+--------------------+ | HDFS | K | \- | M | \- | K | K | \- | \- | \- | +----------------------------------------+-----------------------+-----------------------+--------------------------+--------------------------+------------------------+-----------------------------------+--------------------------+--------------------+--------------------+ | Hitachi HNAS | L | L | L | M | L | L | O | O | O | +----------------------------------------+-----------------------+-----------------------+--------------------------+--------------------------+------------------------+-----------------------------------+--------------------------+--------------------+--------------------+ | Hitachi HSP | N | N | N | N | \- | \- | \- | \- | \- | +----------------------------------------+-----------------------+-----------------------+--------------------------+--------------------------+------------------------+-----------------------------------+--------------------------+--------------------+--------------------+ | HPE 3PAR | K | \- | \- | \- | K | K | \- | \- | \- | +----------------------------------------+-----------------------+-----------------------+--------------------------+--------------------------+------------------------+-----------------------------------+--------------------------+--------------------+--------------------+ | Huawei | K | L | L | L | K | M | \- | \- | \- | +----------------------------------------+-----------------------+-----------------------+--------------------------+--------------------------+------------------------+-----------------------------------+--------------------------+--------------------+--------------------+ | IBM GPFS | K | O | L | \- | K | K | \- | \- | \- | +----------------------------------------+-----------------------+-----------------------+--------------------------+--------------------------+------------------------+-----------------------------------+--------------------------+--------------------+--------------------+ | INFINIDAT | Q | \- | Q | \- | Q | Q | \- | Q | Q | +----------------------------------------+-----------------------+-----------------------+--------------------------+--------------------------+------------------------+-----------------------------------+--------------------------+--------------------+--------------------+ | INSPUR AS13000 | R | \- | R | \- | R | R | \- | \- | \- | +----------------------------------------+-----------------------+-----------------------+--------------------------+--------------------------+------------------------+-----------------------------------+--------------------------+--------------------+--------------------+ | INSPUR InStorage | T | \- | T | \- | \- | \- | \- | \- | \- | +----------------------------------------+-----------------------+-----------------------+--------------------------+--------------------------+------------------------+-----------------------------------+--------------------------+--------------------+--------------------+ | Infortrend | T | T | T | T | \- | \- | \- | \- | \- | +----------------------------------------+-----------------------+-----------------------+--------------------------+--------------------------+------------------------+-----------------------------------+--------------------------+--------------------+--------------------+ | Macrosan | Z | \- | Z | Z | \- | \- | \- | \- | \- | +----------------------------------------+-----------------------+-----------------------+--------------------------+--------------------------+------------------------+-----------------------------------+--------------------------+--------------------+--------------------+ | LVM | M | \- | M | \- | M | M | \- | O | O | +----------------------------------------+-----------------------+-----------------------+--------------------------+--------------------------+------------------------+-----------------------------------+--------------------------+--------------------+--------------------+ | Quobyte | K | \- | M | M | \- | \- | \- | \- | \- | +----------------------------------------+-----------------------+-----------------------+--------------------------+--------------------------+------------------------+-----------------------------------+--------------------------+--------------------+--------------------+ | Windows SMB | L | L | L | L | L | L | \- | \- | \- | +----------------------------------------+-----------------------+-----------------------+--------------------------+--------------------------+------------------------+-----------------------------------+--------------------------+--------------------+--------------------+ | Oracle ZFSSA | K | N | M | M | K | K | \- | \- | \- | +----------------------------------------+-----------------------+-----------------------+--------------------------+--------------------------+------------------------+-----------------------------------+--------------------------+--------------------+--------------------+ | CephFS | M | D (2024.2) | M | M | M | W | D (2024.2) | \- | \- | +----------------------------------------+-----------------------+-----------------------+--------------------------+--------------------------+------------------------+-----------------------------------+--------------------------+--------------------+--------------------+ | Tegile | M | \- | M | M | M | M | \- | \- | \- | +----------------------------------------+-----------------------+-----------------------+--------------------------+--------------------------+------------------------+-----------------------------------+--------------------------+--------------------+--------------------+ | NexentaStor4 | N | \- | N | \- | N | N | \- | \- | \- | +----------------------------------------+-----------------------+-----------------------+--------------------------+--------------------------+------------------------+-----------------------------------+--------------------------+--------------------+--------------------+ | NexentaStor5 | N | T | N | N | N | N | \- | T | \- | +----------------------------------------+-----------------------+-----------------------+--------------------------+--------------------------+------------------------+-----------------------------------+--------------------------+--------------------+--------------------+ | MapRFS | O | O | O | O | O | O | O | \- | \- | +----------------------------------------+-----------------------+-----------------------+--------------------------+--------------------------+------------------------+-----------------------------------+--------------------------+--------------------+--------------------+ | QNAP | O | O | O | \- | O | O | O | \- | \- | +----------------------------------------+-----------------------+-----------------------+--------------------------+--------------------------+------------------------+-----------------------------------+--------------------------+--------------------+--------------------+ | Pure Storage FlashBlade | X | \- | X | X | X | \- | \- | X | \- | +----------------------------------------+-----------------------+-----------------------+--------------------------+--------------------------+------------------------+-----------------------------------+--------------------------+--------------------+--------------------+ | Vastdata | D | \- | D | D | D | \- | \- | \- | \- | +----------------------------------------+-----------------------+-----------------------+--------------------------+--------------------------+------------------------+-----------------------------------+--------------------------+--------------------+--------------------+ Mapping of share drivers and share access rules support ------------------------------------------------------- +----------------------------------------+--------------------------------------------------------------------------+------------------------------------------------------------------------+ | | Read & Write | Read Only | + Driver name +--------------+--------------+----------------+------------+--------------+--------------+--------------+----------------+------------+------------+ | | IPv4 | IPv6 | USER | Cert | CephX | IPv4 | IPv6 | USER | Cert | CephX | +========================================+==============+==============+================+============+==============+==============+==============+================+============+============+ | ZFSonLinux | NFS (M) | \- | \- | \- | \- | NFS (M) | \- | \- | \- | \- | +----------------------------------------+--------------+--------------+----------------+------------+--------------+--------------+--------------+----------------+------------+------------+ | Container | \- | \- | CIFS (N) | \- | \- | \- | \- | CIFS (N) | \- | \- | +----------------------------------------+--------------+--------------+----------------+------------+--------------+--------------+--------------+----------------+------------+------------+ | Generic (Cinder as back-end) | NFS,CIFS (J) | \- | \- | \- | \- | NFS (K) | \- | \- | \- | \- | +----------------------------------------+--------------+--------------+----------------+------------+--------------+--------------+--------------+----------------+------------+------------+ | NetApp Clustered Data ONTAP | NFS (J) | NFS (Q) | CIFS (J) | \- | \- | NFS (K) | NFS (Q) | CIFS (M) | \- | \- | +----------------------------------------+--------------+--------------+----------------+------------+--------------+--------------+--------------+----------------+------------+------------+ | Dell PowerMax | NFS (O) | NFS (R) | CIFS (O) | \- | \- | NFS (O) | NFS (R) | CIFS (O) | \- | \- | +----------------------------------------+--------------+--------------+----------------+------------+--------------+--------------+--------------+----------------+------------+------------+ | Dell VNX | NFS (J) | NFS (Q) | CIFS (J) | \- | \- | NFS (L) | NFS (Q) | CIFS (L) | \- | \- | +----------------------------------------+--------------+--------------+----------------+------------+--------------+--------------+--------------+----------------+------------+------------+ | Dell Unity | NFS (N) | NFS (Q) | CIFS (N) | \- | \- | NFS (N) | NFS (Q) | CIFS (N) | \- | \- | +----------------------------------------+--------------+--------------+----------------+------------+--------------+--------------+--------------+----------------+------------+------------+ | Dell PowerScale | NFS,CIFS (K) | \- | CIFS (M) | \- | \- | NFS (M) | \- | CIFS (M) | \- | \- | +----------------------------------------+--------------+--------------+----------------+------------+--------------+--------------+--------------+----------------+------------+------------+ | Dell PowerStore | NFS (B) | \- | CIFS (B) | \- | \- | NFS (B) | \- | CIFS (B) | \- | \- | +----------------------------------------+--------------+--------------+----------------+------------+--------------+--------------+--------------+----------------+------------+------------+ | Dell PowerFlex | NFS (B) | \- | \- | \- | \- | NFS (B) | \- | \- | \- | \- | +----------------------------------------+--------------+--------------+----------------+------------+--------------+--------------+--------------+----------------+------------+------------+ | GlusterFS | NFS (J) | \- | \- | \- | \- | \- | \- | \- | \- | \- | +----------------------------------------+--------------+--------------+----------------+------------+--------------+--------------+--------------+----------------+------------+------------+ | GlusterFS-Native | \- | \- | \- | J | \- | \- | \- | \- | \- | \- | +----------------------------------------+--------------+--------------+----------------+------------+--------------+--------------+--------------+----------------+------------+------------+ | HDFS | \- | \- | HDFS(K) | \- | \- | \- | \- | HDFS(K) | \- | \- | +----------------------------------------+--------------+--------------+----------------+------------+--------------+--------------+--------------+----------------+------------+------------+ | Hitachi HNAS | NFS (L) | \- | CIFS (N) | \- | \- | NFS (L) | \- | CIFS (N) | \- | \- | +----------------------------------------+--------------+--------------+----------------+------------+--------------+--------------+--------------+----------------+------------+------------+ | Hitachi HSP | NFS (N) | \- | \- | \- | \- | NFS (N) | \- | \- | \- | \- | +----------------------------------------+--------------+--------------+----------------+------------+--------------+--------------+--------------+----------------+------------+------------+ | HPE 3PAR | NFS,CIFS (K) | \- | CIFS (K) | \- | \- | \- | \- | \- | \- | \- | +----------------------------------------+--------------+--------------+----------------+------------+--------------+--------------+--------------+----------------+------------+------------+ | Huawei | NFS (K) | \- |NFS (M),CIFS (K)| \- | \- | NFS (K) | \- |NFS (M),CIFS (K)| \- | \- | +----------------------------------------+--------------+--------------+----------------+------------+--------------+--------------+--------------+----------------+------------+------------+ | LVM | NFS (M) | NFS (P) | CIFS (M) | \- | \- | NFS (M) | NFS (P) | CIFS (M) | \- | \- | +----------------------------------------+--------------+--------------+----------------+------------+--------------+--------------+--------------+----------------+------------+------------+ | Quobyte | NFS (K) | \- | \- | \- | \- | NFS (K) | \- | \- | \- | \- | +----------------------------------------+--------------+--------------+----------------+------------+--------------+--------------+--------------+----------------+------------+------------+ | Windows SMB | \- | \- | CIFS (L) | \- | \- | \- | \- | CIFS (L) | \- | \- | +----------------------------------------+--------------+--------------+----------------+------------+--------------+--------------+--------------+----------------+------------+------------+ | IBM GPFS | NFS (K) | \- | \- | \- | \- | NFS (K) | \- | \- | \- | \- | +----------------------------------------+--------------+--------------+----------------+------------+--------------+--------------+--------------+----------------+------------+------------+ | INFINIDAT | NFS (Q) | \- | \- | \- | \- | NFS (Q) | \- | \- | \- | \- | +----------------------------------------+--------------+--------------+----------------+------------+--------------+--------------+--------------+----------------+------------+------------+ | INSPUR AS13000 | NFS (R) | \- | CIFS (R) | \- | \- | NFS (R) | \- | CIFS (R) | \- | \- | +----------------------------------------+--------------+--------------+----------------+------------+--------------+--------------+--------------+----------------+------------+------------+ | INSPUR InStorage | NFS (T) | \- | CIFS (T) | \- | \- | NFS (T) | \- | CIFS (T) | \- | \- | +----------------------------------------+--------------+--------------+----------------+------------+--------------+--------------+--------------+----------------+------------+------------+ | Infortrend | NFS (T) | \- | CIFS (T) | \- | \- | NFS (T) | \- | CIFS (T) | \- | \- | +----------------------------------------+--------------+--------------+----------------+------------+--------------+--------------+--------------+----------------+------------+------------+ | Macrosan | NFS (Z) | \- | CIFS (Z) | \- | \- | NFS (Z) | \- | CIFS (Z) | \- | \- | +----------------------------------------+--------------+--------------+----------------+------------+--------------+--------------+--------------+----------------+------------+------------+ | Oracle ZFSSA | NFS,CIFS(K) | \- | \- | \- | \- | \- | \- | \- | \- | \- | +----------------------------------------+--------------+--------------+----------------+------------+--------------+--------------+--------------+----------------+------------+------------+ | CephFS | NFS (P) | NFS (T) | \- | \- | CEPHFS (M) | NFS (P) | NFS (T) | \- | \- | CEPHFS (N) | +----------------------------------------+--------------+--------------+----------------+------------+--------------+--------------+--------------+----------------+------------+------------+ | Tegile | NFS (M) | \- |NFS (M),CIFS (M)| \- | \- | NFS (M) | \- |NFS (M),CIFS (M)| \- | \- | +----------------------------------------+--------------+--------------+----------------+------------+--------------+--------------+--------------+----------------+------------+------------+ | NexentaStor4 | NFS (N) | \- | \- | \- | \- | NFS (N) | \- | \- | \- | \- | +----------------------------------------+--------------+--------------+----------------+------------+--------------+--------------+--------------+----------------+------------+------------+ | NexentaStor5 | NFS (N) | T | \- | \- | \- | NFS (N) | T | \- | \- | \- | +----------------------------------------+--------------+--------------+----------------+------------+--------------+--------------+--------------+----------------+------------+------------+ | MapRFS | \- | \- | MapRFS(O) | \- | \- | \- | \- | MapRFS(O) | \- | \- | +----------------------------------------+--------------+--------------+----------------+------------+--------------+--------------+--------------+----------------+------------+------------+ | QNAP | NFS (O) | \- | \- | \- | \- | NFS (O) | \- | \- | \- | \- | +----------------------------------------+--------------+--------------+----------------+------------+--------------+--------------+--------------+----------------+------------+------------+ | Pure Storage FlashBlade | NFS (X) | \- | \- | \- | \- | NFS (X) | \- | \- | \- | \- | +----------------------------------------+--------------+--------------+----------------+------------+--------------+--------------+--------------+----------------+------------+------------+ | Vastdata | NFS (D) | \- | \- | \- | \- | NFS (D) | \- | \- | \- | \- | +----------------------------------------+--------------+--------------+----------------+------------+--------------+--------------+--------------+----------------+------------+------------+ Mapping of share drivers and security services support ------------------------------------------------------ +----------------------------------------+------------------+-----------------+------------------+ | Driver name | Active Directory | LDAP | Kerberos | +========================================+==================+=================+==================+ | ZFSonLinux | \- | \- | \- | +----------------------------------------+------------------+-----------------+------------------+ | Container | \- | \- | \- | +----------------------------------------+------------------+-----------------+------------------+ | Generic (Cinder as back-end) | \- | \- | \- | +----------------------------------------+------------------+-----------------+------------------+ | NetApp Clustered Data ONTAP | J | J | J | +----------------------------------------+------------------+-----------------+------------------+ | Dell PowerMax | O | \- | \- | +----------------------------------------+------------------+-----------------+------------------+ | Dell VNX | J | \- | \- | +----------------------------------------+------------------+-----------------+------------------+ | Dell Unity | N | \- | \- | +----------------------------------------+------------------+-----------------+------------------+ | EMC PowerScale | \- | \- | \- | +----------------------------------------+------------------+-----------------+------------------+ | Dell PowerStore | B | \- | \- | +----------------------------------------+------------------+-----------------+------------------+ | Dell PowerFlex | \- | \- | \- | +----------------------------------------+------------------+-----------------+------------------+ | GlusterFS | \- | \- | \- | +----------------------------------------+------------------+-----------------+------------------+ | GlusterFS-Native | \- | \- | \- | +----------------------------------------+------------------+-----------------+------------------+ | HDFS | \- | \- | \- | +----------------------------------------+------------------+-----------------+------------------+ | Hitachi HNAS | \- | \- | \- | +----------------------------------------+------------------+-----------------+------------------+ | Hitachi HSP | \- | \- | \- | +----------------------------------------+------------------+-----------------+------------------+ | HPE 3PAR | \- | \- | \- | +----------------------------------------+------------------+-----------------+------------------+ | Huawei | M | M | \- | +----------------------------------------+------------------+-----------------+------------------+ | LVM | \- | \- | \- | +----------------------------------------+------------------+-----------------+------------------+ | Quobyte | \- | \- | \- | +----------------------------------------+------------------+-----------------+------------------+ | Windows SMB | L | \- | \- | +----------------------------------------+------------------+-----------------+------------------+ | IBM GPFS | \- | \- | \- | +----------------------------------------+------------------+-----------------+------------------+ | INFINIDAT | \- | \- | \- | +----------------------------------------+------------------+-----------------+------------------+ | INSPUR AS13000 | \- | \- | \- | +----------------------------------------+------------------+-----------------+------------------+ | INSPUR InStorage | \- | \- | \- | +----------------------------------------+------------------+-----------------+------------------+ | Infortrend | \- | \- | \- | +----------------------------------------+------------------+-----------------+------------------+ | Macrosan | \- | \- | \- | +----------------------------------------+------------------+-----------------+------------------+ | Oracle ZFSSA | \- | \- | \- | +----------------------------------------+------------------+-----------------+------------------+ | CephFS | \- | \- | \- | +----------------------------------------+------------------+-----------------+------------------+ | Tegile | \- | \- | \- | +----------------------------------------+------------------+-----------------+------------------+ | NexentaStor4 | \- | \- | \- | +----------------------------------------+------------------+-----------------+------------------+ | NexentaStor5 | \- | \- | \- | +----------------------------------------+------------------+-----------------+------------------+ | MapRFS | \- | \- | \- | +----------------------------------------+------------------+-----------------+------------------+ | QNAP | \- | \- | \- | +----------------------------------------+------------------+-----------------+------------------+ | Pure Storage FlashBlade | \- | \- | \- | +----------------------------------------+------------------+-----------------+------------------+ | Vastdata | \- | \- | \- | +----------------------------------------+------------------+-----------------+------------------+ Mapping of share drivers and common capabilities ------------------------------------------------ More information: :ref:`capabilities_and_extra_specs` +----------------------------------------+-----------+------------+--------+-------------+-------------------+--------------------+-----+----------------------------+--------------------+--------------------+--------------+--------------+-------------------------+--------------------------+ | Driver name | DHSS=True | DHSS=False | dedupe | compression | thin_provisioning | thick_provisioning | qos | create share from snapshot | revert to snapshot | mountable snapshot | ipv4_support | ipv6_support | multiple subnets per AZ | mount point name support | +========================================+===========+============+========+=============+===================+====================+=====+============================+====================+====================+==============+==============+=========================+==========================+ | ZFSonLinux | \- | M | M | M | M | \- | \- | M | \- | \- | P | \- | \- | \- | +----------------------------------------+-----------+------------+--------+-------------+-------------------+--------------------+-----+----------------------------+--------------------+--------------------+--------------+--------------+-------------------------+--------------------------+ | Container | N | \- | \- | \- | \- | N | \- | \- | \- | \- | P | \- | Y | \- | +----------------------------------------+-----------+------------+--------+-------------+-------------------+--------------------+-----+----------------------------+--------------------+--------------------+--------------+--------------+-------------------------+--------------------------+ | Generic (Cinder as back-end) | J | K | \- | \- | \- | L | \- | J | \- | \- | P | \- | \- | \- | +----------------------------------------+-----------+------------+--------+-------------+-------------------+--------------------+-----+----------------------------+--------------------+--------------------+--------------+--------------+-------------------------+--------------------------+ | NetApp Clustered Data ONTAP | J | K | M | M | M | L | P | J | O | \- | P | Q | \- | Y | +----------------------------------------+-----------+------------+--------+-------------+-------------------+--------------------+-----+----------------------------+--------------------+--------------------+--------------+--------------+-------------------------+--------------------------+ | Dell PowerMax | O | \- | \- | \- | \- | \- | \- | O | \- | \- | P | R | \- | \- | +----------------------------------------+-----------+------------+--------+-------------+-------------------+--------------------+-----+----------------------------+--------------------+--------------------+--------------+--------------+-------------------------+--------------------------+ | Dell VNX | J | \- | \- | \- | \- | L | \- | J | \- | \- | P | Q | \- | \- | +----------------------------------------+-----------+------------+--------+-------------+-------------------+--------------------+-----+----------------------------+--------------------+--------------------+--------------+--------------+-------------------------+--------------------------+ | Dell Unity | N | T | \- | \- | N | \- | \- | N | S | \- | P | Q | \- | \- | +----------------------------------------+-----------+------------+--------+-------------+-------------------+--------------------+-----+----------------------------+--------------------+--------------------+--------------+--------------+-------------------------+--------------------------+ | Dell PowerScale | \- | K | \- | \- | \- | L | \- | K | \- | \- | P | \- | \- | \- | +----------------------------------------+-----------+------------+--------+-------------+-------------------+--------------------+-----+----------------------------+--------------------+--------------------+--------------+--------------+-------------------------+--------------------------+ | Dell PowerStore | \- | B | \- | \- | B | \- | \- | B | B | \- | B | \- | \- | \- | +----------------------------------------+-----------+------------+--------+-------------+-------------------+--------------------+-----+----------------------------+--------------------+--------------------+--------------+--------------+-------------------------+--------------------------+ | Dell PowerFlex | \- | B | \- | \- | B | \- | \- | \- | \- | \- | B | \- | \- | \- | +----------------------------------------+-----------+------------+--------+-------------+-------------------+--------------------+-----+----------------------------+--------------------+--------------------+--------------+--------------+-------------------------+--------------------------+ | GlusterFS | \- | J | \- | \- | \- | L | \- | volume layout (L) | \- | \- | P | \- | \- | \- | +----------------------------------------+-----------+------------+--------+-------------+-------------------+--------------------+-----+----------------------------+--------------------+--------------------+--------------+--------------+-------------------------+--------------------------+ | GlusterFS-Native | \- | J | \- | \- | \- | L | \- | L | \- | \- | P | \- | \- | \- | +----------------------------------------+-----------+------------+--------+-------------+-------------------+--------------------+-----+----------------------------+--------------------+--------------------+--------------+--------------+-------------------------+--------------------------+ | HDFS | \- | K | \- | \- | \- | L | \- | K | \- | \- | P | \- | \- | \- | +----------------------------------------+-----------+------------+--------+-------------+-------------------+--------------------+-----+----------------------------+--------------------+--------------------+--------------+--------------+-------------------------+--------------------------+ | Hitachi HNAS | \- | L | N | \- | L | \- | \- | L | O | O | P | \- | \- | \- | +----------------------------------------+-----------+------------+--------+-------------+-------------------+--------------------+-----+----------------------------+--------------------+--------------------+--------------+--------------+-------------------------+--------------------------+ | Hitachi HSP | \- | N | \- | \- | N | \- | \- | \- | \- | \- | P | \- | \- | \- | +----------------------------------------+-----------+------------+--------+-------------+-------------------+--------------------+-----+----------------------------+--------------------+--------------------+--------------+--------------+-------------------------+--------------------------+ | HPE 3PAR | L | K | L | \- | L | L | \- | K | \- | \- | P | \- | \- | \- | +----------------------------------------+-----------+------------+--------+-------------+-------------------+--------------------+-----+----------------------------+--------------------+--------------------+--------------+--------------+-------------------------+--------------------------+ | Huawei | M | K | L | L | L | L | M | M | \- | \- | P | \- | \- | \- | +----------------------------------------+-----------+------------+--------+-------------+-------------------+--------------------+-----+----------------------------+--------------------+--------------------+--------------+--------------+-------------------------+--------------------------+ | INFINIDAT | \- | Q | \- | \- | Q | Q | \- | Q | Q | Q | Q | \- | \- | \- | +----------------------------------------+-----------+------------+--------+-------------+-------------------+--------------------+-----+----------------------------+--------------------+--------------------+--------------+--------------+-------------------------+--------------------------+ | Infortrend | \- | T | \- | \- | \- | \- | \- | \- | \- | \- | T | \- | \- | \- | +----------------------------------------+-----------+------------+--------+-------------+-------------------+--------------------+-----+----------------------------+--------------------+--------------------+--------------+--------------+-------------------------+--------------------------+ | LVM | \- | M | \- | \- | \- | M | \- | K | O | O | P | P | \- | Y | +----------------------------------------+-----------+------------+--------+-------------+-------------------+--------------------+-----+----------------------------+--------------------+--------------------+--------------+--------------+-------------------------+--------------------------+ | Macrosan | \- | Z | \- | \- | \- | Z | \- | \- | \- | \- | Z | \- | \- | \- | +----------------------------------------+-----------+------------+--------+-------------+-------------------+--------------------+-----+----------------------------+--------------------+--------------------+--------------+--------------+-------------------------+--------------------------+ | Quobyte | \- | K | \- | \- | \- | L | \- | M | \- | \- | P | \- | \- | \- | +----------------------------------------+-----------+------------+--------+-------------+-------------------+--------------------+-----+----------------------------+--------------------+--------------------+--------------+--------------+-------------------------+--------------------------+ | Windows SMB | L | L | \- | \- | \- | L | \- | \- | \- | \- | P | \- | \- | \- | +----------------------------------------+-----------+------------+--------+-------------+-------------------+--------------------+-----+----------------------------+--------------------+--------------------+--------------+--------------+-------------------------+--------------------------+ | IBM GPFS | \- | K | \- | \- | \- | L | \- | L | \- | \- | P | \- | \- | \- | +----------------------------------------+-----------+------------+--------+-------------+-------------------+--------------------+-----+----------------------------+--------------------+--------------------+--------------+--------------+-------------------------+--------------------------+ | Oracle ZFSSA | \- | K | \- | \- | \- | L | \- | K | \- | \- | P | \- | \- | \- | +----------------------------------------+-----------+------------+--------+-------------+-------------------+--------------------+-----+----------------------------+--------------------+--------------------+--------------+--------------+-------------------------+--------------------------+ | CephFS | \- | M | \- | \- | \- | M | \- | \- | \- | \- | P | \- | \- | \- | +----------------------------------------+-----------+------------+--------+-------------+-------------------+--------------------+-----+----------------------------+--------------------+--------------------+--------------+--------------+-------------------------+--------------------------+ | Tegile | \- | M | M | M | M | \- | \- | M | \- | \- | P | \- | \- | \- | +----------------------------------------+-----------+------------+--------+-------------+-------------------+--------------------+-----+----------------------------+--------------------+--------------------+--------------+--------------+-------------------------+--------------------------+ | NexentaStor4 | \- | N | N | N | N | N | \- | N | \- | \- | P | \- | \- | \- | +----------------------------------------+-----------+------------+--------+-------------+-------------------+--------------------+-----+----------------------------+--------------------+--------------------+--------------+--------------+-------------------------+--------------------------+ | NexentaStor5 | \- | N | \- | N | N | N | \- | N | T | \- | P | \- | \- | \- | +----------------------------------------+-----------+------------+--------+-------------+-------------------+--------------------+-----+----------------------------+--------------------+--------------------+--------------+--------------+-------------------------+--------------------------+ | MapRFS | \- | N | \- | \- | \- | N | \- | O | \- | \- | P | \- | \- | \- | +----------------------------------------+-----------+------------+--------+-------------+-------------------+--------------------+-----+----------------------------+--------------------+--------------------+--------------+--------------+-------------------------+--------------------------+ | QNAP | \- | O | Q | Q | O | Q | \- | O | \- | \- | P | \- | \- | \- | +----------------------------------------+-----------+------------+--------+-------------+-------------------+--------------------+-----+----------------------------+--------------------+--------------------+--------------+--------------+-------------------------+--------------------------+ | INSPUR AS13000 | \- | R | \- | \- | R | \- | \- | R | \- | \- | R | \- | \- | \- | +----------------------------------------+-----------+------------+--------+-------------+-------------------+--------------------+-----+----------------------------+--------------------+--------------------+--------------+--------------+-------------------------+--------------------------+ | INSPUR InStorage | \- | T | \- | \- | \- | T | \- | \- | \- | \- | T | \- | \- | \- | +----------------------------------------+-----------+------------+--------+-------------+-------------------+--------------------+-----+----------------------------+--------------------+--------------------+--------------+--------------+-------------------------+--------------------------+ | Pure Storage FlashBlade | \- | X | \- | \- | X | \- | \- | \- | X | \- | X | \- | \- | \- | +----------------------------------------+-----------+------------+--------+-------------+-------------------+--------------------+-----+----------------------------+--------------------+--------------------+--------------+--------------+-------------------------+--------------------------+ | Vastdata | \- | D | \- | \- | \- | \- | \- | \- | \- | \- | D | \- | \- | \- | +----------------------------------------+-----------+------------+--------+-------------+-------------------+--------------------+-----+----------------------------+--------------------+--------------------+--------------+--------------+-------------------------+--------------------------+ .. note:: The common capability reported by back ends differs from some names seen in the above table: * `DHSS` is reported as ``driver_handles_share_servers`` (See details for :term:`DHSS`) * `create share from snapshot` is reported as ``create_share_from_snapshot_support`` * `multiple subnets per AZ` is reported as ``multiple_subnets_per_availability_zone`` ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/admin/share_mount_point_name.rst0000664000175000017500000000636600000000000023515 0ustar00zuulzuul00000000000000.. _shared_mount_point_name: Mount Point Name Support ======================== The Shared File Systems service supports user defined mount point names. This feature allows users to specify a custom `mount_point_name` during share creation, which will be reflected in the share's export location. However, for this feature to be available to users, administrators must enable an extra-spec in the share type, `mount_point_name_support`, and set an extra-spec named `provisioning:mount_point_prefix`. If the `provisioning:mount_point_prefix` extra-spec is not set, the `project_id` from the `RequestContext` is used as a fallback. However, using the `project_id` as a prefix can restrict the transfer of share ownership, and manual action required to facilitate the transfer. .. note:: - In order to use this feature, the available backend in your deployment must have support for it. The list of backends that support this feature in the manila can be found in the :doc:`share_back_ends_feature_support_mapping`. - This feature is only available in API version 2.84 and beyond. - The extra-spec type `mount_point_name_support` required for this feature to work. - When the `project_id` is used as a prefix due to the absence of the `provisioning:mount_point_prefix` extra-spec, the transfer of share ownership may be restricted, and manual action may be required. Administrator Guide =================== 1. **Configuring `mount_point_name_support` and `provisioning:mount_point_prefix`** .. code-block:: bash openstack share type set --extra-spec \ mount_point_name_support=" True" provisioning:mount_point_prefix= Replace `` with the name of the share type you are configuring, and `` with the desired prefix. The `` should be a string containing ASCII alphabets and optionally, the underscore character. 2. **Default Behavior and Security Considerations** If `provisioning:mount_point_prefix` is not set, the system will use the `project_id` as the default prefix for the `mount_point_name`. However, be aware that setting `provisioning:mount_point_prefix` to a constant string and sharing the share type with multiple projects could potentially leak information about the existence of other shares. This could be considered a security hole and should be avoided. 3. **Share Transfer** During a share transfer, if `provisioning:mount_point_prefix` contains a `project_id`, the system will refuse to perform the transfer and return an HTTP 400 error. This indicates that the share has some project identity that requires administrator intervention. To perform the transfer, administrators will need to unmanage the share from the current project and manage it into the target project. 4. **Constructing `mount_point_name`** The `mount_point_name` is constructed by combining the prefix from the share type (set by `provisioning:mount_point_prefix`) and the suffix provided by the user. This combined string must be ASCII alphanumeric, allowing only underscores as special characters. If this validation fails, the system will log an error and return a message indicating that the `mount_point_name` is not appropriate. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/admin/shared-file-systems-crud-share.rst0000664000175000017500000014472300000000000024703 0ustar00zuulzuul00000000000000.. _shared_file_systems_crud_share: ====================== Share basic operations ====================== General concepts ---------------- To create a file share, and access it, the following general concepts are prerequisite knowledge: #. To create a share, use :command:`openstack share create` command and specify the required arguments: the size of the share and the shared file system protocol. ``NFS``, ``CIFS``, ``GlusterFS``, ``HDFS``, ``CephFS`` or ``MAPRFS`` share file system protocols are supported. #. You can also optionally specify the share network and the share type. #. After the share becomes available, use the :command:`openstack share show` command to get the share export locations. #. After getting the share export locations, you can create an :ref:`access rule ` for the share, mount it and work with files on the remote file system. There are big number of the share drivers created by different vendors in the Shared File Systems service. As a Python class, each share driver can be set for the :ref:`back end ` and run in the back end to manage the share operations. Initially there are two driver modes for the back ends: * no share servers mode * share servers mode Each share driver supports one or two of possible back end modes that can be configured in the ``manila.conf`` file. The configuration option ``driver_handles_share_servers`` in the ``manila.conf`` file sets the share servers mode or no share servers mode, and defines the driver mode for share storage lifecycle management: +------------------+-------------------------------------+--------------------+ | Mode | Config option | Description | +==================+=====================================+====================+ | no share servers | driver_handles_share_servers = False| An administrator | | | | rather than a share| | | | driver manages the | | | | bare metal storage | | | | with some net | | | | interface instead | | | | of the presence of | | | | the share servers. | +------------------+-------------------------------------+--------------------+ | share servers | driver_handles_share_servers = True | The share driver | | | | creates the share | | | | server and manages,| | | | or handles, the | | | | share server life | | | | cycle. | +------------------+-------------------------------------+--------------------+ It is :ref:`the share types ` which have the extra specifications that help scheduler to filter back ends and choose the appropriate back end for the user that requested to create a share. The required extra boolean specification for each share type is ``driver_handles_share_servers``. As an administrator, you can create the share types with the specifications you need. For details of managing the share types and configuration the back ends, see :ref:`shared_file_systems_share_types` and :ref:`shared_file_systems_multi_backend` documentation. You can create a share in two described above modes: * in a no share servers mode without specifying the share network and specifying the share type with ``driver_handles_share_servers = False`` parameter. See subsection :ref:`create_share_in_no_share_server_mode`. * in a share servers mode with specifying the share network and the share type with ``driver_handles_share_servers = True`` parameter. See subsection :ref:`create_share_in_share_server_mode`. .. _create_share_in_no_share_server_mode: Create a share in no share servers mode --------------------------------------- To create a file share in no share servers mode, you need to: #. To create a share, use :command:`openstack share create` command and specify the required arguments: the size of the share and the shared file system protocol. ``NFS``, ``CIFS``, ``GlusterFS``, ``HDFS``, ``CephFS`` or ``MAPRFS`` share file system protocols are supported. #. You should specify the :ref:`share type ` with ``driver_handles_share_servers = False`` extra specification. #. You must not specify the ``share network`` because no share servers are created. In this mode the Shared File Systems service expects that administrator has some bare metal storage with some net interface. #. The :command:`openstack share create` command creates a share. This command does the following things: * The :ref:`manila-scheduler ` service will find the back end with ``driver_handles_share_servers = False`` mode due to filtering the extra specifications of the share type. * The share is created using the storage that is specified in the found back end. #. After the share becomes available, use the :command:`openstack share show` command to get the share export locations. In the example to create a share, the created already share type named ``dhss_false`` with ``driver_handles_share_servers = False`` extra specification is used. Check share types that exist, run: .. code-block:: console $ openstack share type list +----------+----------+------------+------------+----------------------+----------------------+-------------+ | ID | Name | Visibility | Is Default | Required Extra Specs | Optional Extra Specs | Description | +----------+----------+------------+------------+----------------------+----------------------+-------------+ | 807e5cd7 | default | public | True | driver_handles_share | snapshot_support : | None | | -a0e7- | | | | _servers : True | True | | | 4912- | | | | | create_share_from_sn | | | 8f7d- | | | | | apshot_support : | | | 352512ce | | | | | True | | | 51c3 | | | | | revert_to_snapshot_s | | | | | | | | upport : True | | | | | | | | mount_snapshot_suppo | | | | | | | | rt : True | | | d57dfcb5 | dhss_fal | public | False | driver_handles_share | snapshot_support : | None | | -3026- | se | | | _servers : False | True | | | 4018- | | | | | create_share_from_sn | | | be87- | | | | | apshot_support : | | | 3d7ca511 | | | | | True | | | 60cc | | | | | revert_to_snapshot_s | | | | | | | | upport : True | | | | | | | | mount_snapshot_suppo | | | | | | | | rt : True | | | a5e531e6 | dhss_tru | public | False | driver_handles_share | snapshot_support : | None | | -8a89- | e | | | _servers : True | True | | | 4333- | | | | | create_share_from_sn | | | 9920- | | | | | apshot_support : | | | 59cd420d | | | | | True | | | 4f79 | | | | | revert_to_snapshot_s | | | | | | | | upport : True | | | | | | | | mount_snapshot_suppo | | | | | | | | rt : True | | +----------+----------+------------+------------+----------------------+----------------------+-------------+ Create a private share with ``dhss_false`` share type, NFS shared file system protocol, and size 1 GB: .. code-block:: console $ openstack share create nfs 1 --name Share1 --description "My share" --share-type dhss_false +---------------------------------------+--------------------------------------+ | Field | Value | +---------------------------------------+--------------------------------------+ | id | c1de2cdc-2ccf-4e8d-afe9-b25c84bf3953 | | size | 1 | | availability_zone | None | | created_at | 2025-04-05T22:05:29.343767 | | status | creating | | name | Share1 | | description | My share | | project_id | c0bc204890ad428796f364b677a8516b | | snapshot_id | None | | share_network_id | None | | share_proto | NFS | | metadata | {} | | share_type | d57dfcb5-3026-4018-be87-3d7ca51160cc | | volume_type | dhss_false | | is_public | False | | snapshot_support | True | | task_state | None | | share_type_name | dhss_false | | access_rules_status | active | | replication_type | None | | has_replicas | False | | user_id | c5d0c19aae6e4484a41e241f0d8b04fb | | create_share_from_snapshot_support | True | | revert_to_snapshot_support | True | | share_group_id | None | | source_share_group_snapshot_member_id | None | | mount_snapshot_support | True | | progress | None | | is_soft_deleted | False | | scheduled_to_be_deleted_at | None | | source_backup_id | None | | share_server_id | None | | host | | +---------------------------------------+--------------------------------------+ New share ``Share1`` should have a status ``available``: .. code-block:: console $ openstack share show Share1 +---------------------------------------+------------------------------------------+ | Field | Value | +---------------------------------------+------------------------------------------+ | id | c1de2cdc-2ccf-4e8d-afe9-b25c84bf3953 | | size | 1 | | availability_zone | manila-zone-1 | | created_at | 2025-04-05T22:05:29.343767 | | status | available | | name | Share1 | | description | My share | | project_id | c0bc204890ad428796f364b677a8516b | | snapshot_id | None | | share_network_id | None | | share_proto | NFS | | share_type | d57dfcb5-3026-4018-be87-3d7ca51160cc | | volume_type | dhss_false | | is_public | False | | snapshot_support | True | | task_state | None | | share_type_name | dhss_false | | access_rules_status | active | | replication_type | None | | has_replicas | False | | user_id | c5d0c19aae6e4484a41e241f0d8b04fb | | create_share_from_snapshot_support | True | | revert_to_snapshot_support | True | | share_group_id | None | | source_share_group_snapshot_member_id | None | | mount_snapshot_support | True | | progress | 100% | | is_soft_deleted | False | | scheduled_to_be_deleted_at | None | | source_backup_id | None | | share_server_id | None | | host | manila@paris#shares | | export_locations | | | | id = | | | 30d8ad5a-05b2-401a-9dbd-caf496f4ab12 | | | path = 11.0.0.11:/shares/share_c1de2 | | | cdc_2ccf_4e8d_afe9_b25c84bf3953_86ef2 | | | fc0_acbe_444c_888a_c52c05242dce | | | preferred = False | | | metadata = {} | | | share_instance_id = | | | 86ef2fc0-acbe-444c-888a-c52c05242dce | | | is_admin_only = True | | | id = | | | acdd47f6-aef5-4d3b-86b2-db7d73d4bbfe | | | path = 10.0.0.10:/shares/share_c1de2 | | | cdc_2ccf_4e8d_afe9_b25c84bf3953_86ef2 | | | fc0_acbe_444c_888a_c52c05242dce | | | preferred = True | | | metadata = {} | | | share_instance_id = | | | 86ef2fc0-acbe-444c-888a-c52c05242dce | | | is_admin_only = False | | | id = | | | 224f223f-6dea-4e08-92c5-66de161cf43d | | | path = 10.0.0.20:shares/share_c1de2 | | | cdc_2ccf_4e8d_afe9_b25c84bf3953_86ef2 | | | fc0_acbe_444c_888a_c52c05242dce | | | preferred = False | | | metadata = {} | | | share_instance_id = | | | 86ef2fc0-acbe-444c-888a-c52c05242dce | | | is_admin_only = False | | properties | | +---------------------------------------+------------------------------------------+ .. _create_share_in_share_server_mode: Create a share in share servers mode ------------------------------------ To create a file share in share servers mode, you need to: #. To create a share, use :command:`openstack share create` command and specify the required arguments: the size of the share and the shared file system protocol. ``NFS``, ``CIFS``, ``GlusterFS``, ``HDFS``, ``CephFS`` or ``MAPRFS`` share file system protocols are supported. #. You should specify the :ref:`share type ` with ``driver_handles_share_servers = True`` extra specification. #. You should specify the :ref:`share network `. #. The :command:`openstack share create` command creates a share. This command does the following things: * The :ref:`manila-scheduler ` service will find the back end with ``driver_handles_share_servers = True`` mode due to filtering the extra specifications of the share type. * The share driver will create a share server with the share network. For details of creating the resources, see the `documentation `_ of the specific share driver. #. After the share becomes available, use the :command:`manila show` command to get the share export location. In the example to create a share, the default share type and the already existing share network are used. .. note:: There is no default share type just after you started manila as the administrator. See :ref:`shared_file_systems_share_types` to create the default share type. To create a share network, use :ref:`shared_file_systems_share_networks`. Check share networks that exist, run: .. code-block:: console $ openstack share network list +--------------------------------------+-------+ | ID | Name | +--------------------------------------+-------+ | 1e0b9a80-2bce-4244-9da4-f8589c6bd56b | mynet | +--------------------------------------+-------+ Create a public share with ``my_share_net`` network, ``default`` share type, NFS shared file system protocol, and size 1 GB: .. code-block:: console $ openstack share create nfs 1 \ --name "Share2" \ --description "My second share" \ --share-type default \ --share-network my_net \ --metadata aim=testing \ --public +---------------------------------------+--------------------------------------+ | Property | Value | +---------------------------------------+--------------------------------------+ | id | a37c3d1d-023f-4fcf-b640-3dbbb3e89193 | | size | 1 | | availability_zone | None | | created_at | 2025-04-05T22:25:51.609837 | | status | creating | | name | Share2 | | description | My second share | | project_id | c0bc204890ad428796f364b677a8516b | | snapshot_id | None | | share_network_id | 1e0b9a80-2bce-4244-9da4-f8589c6bd56b | | share_proto | NFS | | metadata | {'aim': 'testing'} | | share_type | 807e5cd7-a0e7-4912-8f7d-352512ce51c3 | | is_public | True | | snapshot_support | True | | task_state | None | | share_type_name | default | | access_rules_status | active | | replication_type | None | | has_replicas | False | | user_id | c5d0c19aae6e4484a41e241f0d8b04fb | | create_share_from_snapshot_support | True | | revert_to_snapshot_support | True | | share_group_id | None | | source_share_group_snapshot_member_id | None | | mount_snapshot_support | True | | progress | None | | is_soft_deleted | False | | scheduled_to_be_deleted_at | None | | source_backup_id | None | | share_server_id | None | | host | | +---------------------------------------+--------------------------------------+ The share also can be created from a share snapshot. For details, see :ref:`shared_file_systems_snapshots`. See the share in a share list: .. code-block:: console $ openstack share list +--------------------------------------+----------------+------+-------------+--------------+-----------+-----------------+----------------------+-------------------+ | ID | Name | Size | Share Proto | Status | Is Public | Share Type Name | Host | Availability Zone | +--------------------------------------+----------------+------+-------------+--------------+-----------+-----------------+----------------------+-------------------+ | a37c3d1d-023f-4fcf-b640-3dbbb3e89193 | Share2 | 1 | NFS | available | True | default | manila@lima#shares | manila-zone-1 | | c1de2cdc-2ccf-4e8d-afe9-b25c84bf3953 | Share1 | 1 | NFS | available | False | dhss_false | manila@paris#shares | manila-zone-1 | +--------------------------------------+----------------+------+-------------+--------------+-----------+-----------------+----------------------+-------------------+ Check the share status and see the share export locations. After ``creating`` status share should have status ``available``: .. code-block:: console $ openstack share show Share2 +---------------------------------------+------------------------------------------+ | Field | Value | +---------------------------------------+------------------------------------------+ | id | a37c3d1d-023f-4fcf-b640-3dbbb3e89193 | | size | 1 | | availability_zone | manila-zone-1 | | created_at | 2025-04-05T22:25:51.609837 | | status | available | | name | Share2 | | description | My second share | | project_id | c0bc204890ad428796f364b677a8516b | | snapshot_id | None | | share_network_id | 1e0b9a80-2bce-4244-9da4-f8589c6bd56b | | share_proto | NFS | | share_type | 807e5cd7-a0e7-4912-8f7d-352512ce51c3 | | volume_type | default | | is_public | True | | snapshot_support | True | | task_state | None | | share_type_name | default | | access_rules_status | active | | replication_type | None | | has_replicas | False | | user_id | c5d0c19aae6e4484a41e241f0d8b04fb | | create_share_from_snapshot_support | True | | revert_to_snapshot_support | True | | share_group_id | None | | source_share_group_snapshot_member_id | None | | mount_snapshot_support | True | | progress | None | | is_soft_deleted | False | | scheduled_to_be_deleted_at | None | | source_backup_id | None | | share_server_id | None | | host | manila@lima#shares | | export_locations | | | | id = | | | aeac5f3e-60e3-461c-8ca8-6696e0f59f39 | | | path = 12.0.0.12:/shares/share_cdc_2c | | | cf_4e8d_afe9_b25c84bf3953_86ef2 | | | 789-f1f5-4171-9e43-3afabddf8b5f | | | preferred = False | | | metadata = {} | | | share_instance_id = | | | 86ef2fc0-acbe-444c-888a-c52c05242dce | | | is_admin_only = True | | | id = | | | 965aa536-9ba4-4f8b-9ddd-a6a916968597 | | | path = 10.0.0.10:/shares/share_cdc_2c | | | cf_4e8d_afe9_b25c84bf3953_86ef2 | | | 789-f1f5-4171-9e43-3afabddf8b5f | | | preferred = True | | | metadata = {} | | | share_instance_id = | | | 86ef2fc0-acbe-444c-888a-c52c05242dce | | | is_admin_only = False | | | id = | | | 224f223f-6dea-4e08-92c5-66de161cf43d | | | path = 10.0.0.20:/shares/share_cdc_2c | | | cf_4e8d_afe9_b25c84bf3953_86ef2 | | | 789-f1f5-4171-9e43-3afabddf8b5f | | | preferred = False | | | metadata = {} | | | share_instance_id = | | | 86ef2fc0-acbe-444c-888a-c52c05242dce | | properties | aim='testing' | +---------------------------------------+------------------------------------------+ ``is_public`` defines the level of visibility for the share: whether other projects can or cannot see the share. By default, the share is private. Update share ------------ Update the name, or description, or level of visibility for all projects for the share if you need: .. code-block:: console $ openstack share set Share2 --description "My second share. Updated" --public False $ openstack share show Share2 +---------------------------------------+--------------------------------------+ | Field | Value | +---------------------------------------+--------------------------------------+ | id | a37c3d1d-023f-4fcf-b640-3dbbb3e89193 | | size | 1 | | availability_zone | manila-zone-1 | | created_at | 2025-04-05T22:25:51.609837 | | status | available | | name | Share2 | | description | My second share. Updated | | project_id | c0bc204890ad428796f364b677a8516b | | snapshot_id | None | | share_network_id | 1e0b9a80-2bce-4244-9da4-f8589c6bd56b | | share_proto | NFS | | share_type | 807e5cd7-a0e7-4912-8f7d-352512ce51c3 | | volume_type | default | | is_public | False | | snapshot_support | True | | task_state | None | | share_type_name | default | | access_rules_status | active | | replication_type | None | | has_replicas | False | | user_id | c5d0c19aae6e4484a41e241f0d8b04fb | | create_share_from_snapshot_support | True | | revert_to_snapshot_support | True | | share_group_id | None | | source_share_group_snapshot_member_id | None | | mount_snapshot_support | True | | progress | None | | is_soft_deleted | False | | scheduled_to_be_deleted_at | None | | source_backup_id | None | | share_server_id | None | | host | manila@lima#shares | | export_locations | | | properties | aim='testing' | +---------------------------------------+--------------------------------------+ A share can have one of these status values: +-----------------------------------+-----------------------------------------+ | Status | Description | +===================================+=========================================+ | creating | The share is being created. | +-----------------------------------+-----------------------------------------+ | deleting | The share is being deleted. | +-----------------------------------+-----------------------------------------+ | error | An error occurred during share creation.| +-----------------------------------+-----------------------------------------+ | error_deleting | An error occurred during share deletion.| +-----------------------------------+-----------------------------------------+ | available | The share is ready to use. | +-----------------------------------+-----------------------------------------+ | manage_starting | Share manage started. | +-----------------------------------+-----------------------------------------+ | manage_error | Share manage failed. | +-----------------------------------+-----------------------------------------+ | unmanage_starting | Share unmanage started. | +-----------------------------------+-----------------------------------------+ | unmanage_error | Share cannot be unmanaged. | +-----------------------------------+-----------------------------------------+ | unmanaged | Share was unmanaged. | +-----------------------------------+-----------------------------------------+ | extending | The extend, or increase, share size | | | request was issued successfully. | +-----------------------------------+-----------------------------------------+ | extending_error | Extend share failed. | +-----------------------------------+-----------------------------------------+ | shrinking | Share is being shrunk. | +-----------------------------------+-----------------------------------------+ | shrinking_error | Failed to update quota on share | | | shrinking. | +-----------------------------------+-----------------------------------------+ | shrinking_possible_data_loss_error| Shrink share failed due to possible data| | | loss. | +-----------------------------------+-----------------------------------------+ | migrating | Share migration is in progress. | +-----------------------------------+-----------------------------------------+ .. _share_metadata: Share metadata -------------- If you want to set the metadata key-value pairs on the share, run: .. code-block:: console $ openstack share set Share2 --property project=my_abc Get all metadata key-value pairs of the share: .. code-block:: console $ openstack share show -c properties Share2 +------------+------------------------------------------------------+ | Field | Value | +------------+------------------------------------------------------+ | properties | aim='testing', deadline='01/20/16', project='my_abc' | +------------+------------------------------------------------------+ You can update the metadata: .. code-block:: console $ openstack share set Share2 --proper deadline='01/30/16' $ openstack share show -c properties Share2 +------------+------------------------------------------------------+ | Field | Value | +------------+------------------------------------------------------+ | properties | aim='testing', deadline='01/30/16', project='my_abc' | +------------+------------------------------------------------------+ You also can unset the metadata using **openstack share unset --property **. .. note:: In case you want to prevent certain metadata key-values to be manipulated by less privileged users, you can provide a list of such keys through the admin only metadata configuration option listed in the :ref:`additional configuration options page `. In case you want to pass certain metadata key-values to be consumed by share drivers, you can provide a list of such keys through the driver updatable metadata configuration option listed in the :ref:`additional configuration options page `. Reset share state ----------------- As administrator, you can reset the state of a share. Use **openstack share set --status** command to reset share state, where ``state`` indicates which state to assign the share. Options include ``available``, ``error``, ``creating``, ``deleting``, ``error_deleting`` states. .. code-block:: console $ openstack share set Share2 --status deleting $ openstack share show Share2 +---------------------------------------+--------------------------------------+ | Field | Value | +---------------------------------------+--------------------------------------+ | id | a37c3d1d-023f-4fcf-b640-3dbbb3e89193 | | size | 1 | | availability_zone | manila-zone-1 | | created_at | 2025-04-05T22:25:51.609837 | | status | deleting | | name | Share2 | | description | My second share. Updated | | project_id | c0bc204890ad428796f364b677a8516b | | snapshot_id | None | | share_network_id | 1e0b9a80-2bce-4244-9da4-f8589c6bd56b | | share_proto | NFS | | share_type | 807e5cd7-a0e7-4912-8f7d-352512ce51c3 | | volume_type | default | | is_public | False | | snapshot_support | True | | task_state | None | | share_type_name | default | | access_rules_status | active | | replication_type | None | | has_replicas | False | | user_id | c5d0c19aae6e4484a41e241f0d8b04fb | | create_share_from_snapshot_support | True | | revert_to_snapshot_support | True | | share_group_id | None | | source_share_group_snapshot_member_id | None | | mount_snapshot_support | True | | progress | None | | is_soft_deleted | False | | scheduled_to_be_deleted_at | None | | source_backup_id | None | | share_server_id | None | | host | manila@lima#shares | | export_locations | | | properties | deadline='01/30/16' | +---------------------------------------+--------------------------------------+ Delete and force-delete share ----------------------------- You also can force-delete a share. The shares cannot be deleted in transitional states. The transitional states are ``creating``, ``deleting``, ``managing``, ``unmanaging``, ``migrating``, ``extending``, and ``shrinking`` statuses for the shares. Force-deletion deletes an object in any state. Use the ``policy.yaml`` file to grant permissions for this action to other roles. .. tip:: The configuration file ``policy.yaml`` may be used from different places. The path ``/etc/manila/policy.yaml`` is one of expected paths by default. Use **openstack share delete ** command to delete a specified share: .. code-block:: console $ openstack share delete %share_name_or_id% .. code-block:: console $ openstack share delete %share_name_or_id% --share-group %share-group-id% .. code-block:: console $ openstack share delete Share2 Print the list of all shares for all projects: .. code-block:: console $ openstack share delete --force Share2 $ openstack share list --all +--------------------------------------+----------------+------+-------------+--------------+-----------+-----------------+------------------------+-------------------+ | ID | Name | Size | Share Proto | Status | Is Public | Share Type Name | Host | Availability Zone | +--------------------------------------+----------------+------+-------------+--------------+-----------+-----------------+------------------------+-------------------+ | c1de2cdc-2ccf-4e8d-afe9-b25c84bf3953 | Share1 | 1 | NFS | available | False | default | manila@paris#shares | manila-zone-1 | +--------------------------------------+----------------+------+-------------+--------------+-----------+-----------------+------------------------+-------------------+ .. _access_to_share: Manage access to share ---------------------- The Shared File Systems service allows to grant or deny access to a specified share, and list the permissions for a specified share. To grant or deny access to a share, specify one of these supported share access levels: - **rw**. Read and write (RW) access. This is the default value. - **ro**. Read-only (RO) access. You must also specify one of these supported authentication methods: - **ip**. Authenticates an instance through its IP address. A valid format is ``XX.XX.XX.XX`` or ``XX.XX.XX.XX/XX``. For example ``0.0.0.0/0``. - **user**. Authenticates by a specified user or group name. A valid value is an alphanumeric string that can contain some special characters and is from 4 to 32 characters long. - **cert**. Authenticates an instance through a TLS certificate. Specify the TLS identity as the IDENTKEY. A valid value is any string up to 64 characters long in the common name (CN) of the certificate. The meaning of a string depends on its interpretation. - **cephx**. Ceph authentication system. Specify the Ceph auth ID that needs to be authenticated and authorized for share access by the Ceph back end. A valid value must be non-empty, consist of ASCII printable characters, and not contain periods. Try to mount NFS share with export path ``10.0.0.10:/shares/share_cdc_2ccf_4e8d_afe9_b25c84bf3953_86ef2789-f1f5-4171-9e43-3afabddf8b5f`` on the node with IP address ``10.0.0.13``: .. code-block:: console $ sudo mount -v -t nfs 10.0.0.10:/shares/share_cdc_2ccf_4e8d_afe9_b25c84bf3953_86ef2789-f1f5-4171-9e43-3afabddf8b5f /mnt/ mount.nfs: timeout set for Tue Oct 6 10:37:23 2015 mount.nfs: trying text-based options 'vers=4,addr=10.0.0.10,clientaddr=10.0.0.13' mount.nfs: mount(2): Permission denied mount.nfs: access denied by server while mounting 10.0.0.10:/shares/share_cdc_2ccf_4e8d_afe9_b25c84bf3953_86ef2789-f1f5-4171-9e43-3afabddf8b5f An error message "Permission denied" appeared, so you are not allowed to mount a share without an access rule. Allow access to the share with ``ip`` access type and ``10.0.2.13`` IP address: .. code-block:: console $ openstack share access create Share1 ip 10.0.2.13 --access-level rw +--------------+--------------------------------------+ | Field | Value | +--------------+--------------------------------------+ | id | 56d344c5-95cb-477b-bf33-39f6e9b43edf | | share_id | c1de2cdc-2ccf-4e8d-afe9-b25c84bf3953 | | access_level | rw | | access_to | 10.0.2.13 | | access_type | ip | | state | queued_to_apply | | access_key | None | | created_at | 2025-04-05T23:44:31.165395 | | updated_at | None | | properties | | +--------------+--------------------------------------+ Try to mount a share again. This time it is mounted successfully: .. code-block:: console $ sudo mount -v -t nfs 10.0.0.10:/shares/share_cdc_2ccf_4e8d_afe9_b25c84bf3953_86ef2789-f1f5-4171-9e43-3afabddf8b5f /mnt/ .. note:: Different share features are supported by different share drivers. For the example, the Generic driver with the Block Storage service as a back-end doesn't support ``user`` and ``cert`` authentications methods. For details of supporting of features by different drivers, see `Manila share features support mapping `_. .. tip:: Starting from the 2023.2 (Bobcat) release, in case you want to restrict the visibility of the sensitive fields (``access_to`` and ``access_key``), or avoid the access rule being deleted by other users, you can specify ``--lock-visibility`` and ``--lock-deletion`` in the Manila OpenStack command for creating access rules. A reason (``--lock-reason``) can also be provided. Only the user that placed the lock, system administrators and services will be able to view sensitive fields of, or manipulate such access rules by virtue of default RBAC. In case the deletion of the access rule was locked, Manila will also place an additional lock on the share, to ensure it will not be deleted and cause disconnections. To verify that the access rules (ACL) were configured correctly for a share, you list permissions for a share: .. code-block:: console $ openstack share access list Share1 +--------------------------------------+-------------+-----------+--------------+--------+------------+----------------------------+-------------------------+ | ID | Access Type | Access To | Access Level | State | Access Key | Created At | Updated At | +--------------------------------------+-------------+-----------+--------------+--------+------------+----------------------------+-------------------------+ | 56d344c5-95cb-477b-bf33-39f6e9b43edf | ip | 10.0.0.13 | rw | active | None | 2025-04-05T23:44:31.165395 | 2025-04-05T23:45:50.780 | +--------------------------------------+-------------+-----------+--------------+--------+------------+----------------------------+-------------------------+ Deny access to the share and check that deleted access rule is absent in the access rule list: .. code-block:: console $ openstack share access delete Share1 56d344c5-95cb-477b-bf33-39f6e9b43edf .. note:: Starting from the 2023.2 (Bobcat) release, it is possible to prevent the deletion of an access rule. In case the deletion was locked, the ``--unrestrict`` argument from the Manila's OpenStack Client must be used in the request to revoke the access. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/admin/shared-file-systems-healthcheck.rst0000664000175000017500000001631000000000000025077 0ustar00zuulzuul00000000000000============ Healthchecks ============ The health of a the Shared File Systems API service can be determined with the help of a "healthcheck" middleware. This middleware is enabled by default with the `api-paste`_ file that is packaged with the software. There is hence a ``/healthcheck`` endpoint that responds to GET requests with HTTP 200 "OK" as the body if the API service is functional. If the API service is not functional, the response is HTTP 503 "Service Unavailable". This ``/healthcheck`` endpoint can be polled by load balancers to determine service availability. The end point behaves very similar to `mod_status` in apache. A sample configuration that can be added to the `api-paste`_ file is as follows. .. _api-paste: ../configuration/shared-file-systems/samples/api-paste.ini.html .. code-block:: [app:healthcheck] paste.app_factory = oslo_middleware:Healthcheck.app_factory backends = disable_by_file disable_by_file_path = /etc/manila/healthcheck_disable detailed = False Example healthcheck requests and responses: .. code-block:: $ curl -i -X GET http://203.0.113.30/share/healthcheck HTTP/1.1 200 OK Date: Wed, 20 Mar 2024 23:00:19 GMT Server: Apache/2.4.52 (Ubuntu) Content-Type: text/plain; charset=UTF-8 Content-Length: 2 Connection: close Vary: Accept-Encoding OK $ curl -i -X GET http://203.0.113.30/share/healthcheck -H "Accept: application/json" HTTP/1.1 200 OK Date: Wed, 20 Mar 2024 23:01:08 GMT Server: Apache/2.4.52 (Ubuntu) Content-Type: application/json Content-Length: 62 Connection: close { "detailed": false, "reasons": [ "OK" ] } $ curl -i -X GET http://203.0.113.30/share/healthcheck -H "Accept: text/html" HTTP/1.1 200 OK Date: Wed, 20 Mar 2024 23:02:27 GMT Server: Apache/2.4.52 (Ubuntu) Content-Type: text/html; charset=UTF-8 Content-Length: 239 Connection: close Vary: Accept-Encoding Healthcheck Status

Result of 1 checks:

Reason
OK

A "detail" response can be sought if ``detailed`` is set to ``True`` in the ``[app:healthcheck]`` section of the api paste configuration file. This is not done by default. .. code-block:: $ curl -i -X GET http://203.0.113.30/share/healthcheck -H "Accept: application/json" HTTP/1.1 200 OK Date: Wed, 20 Mar 2024 23:06:19 GMT Server: Apache/2.4.52 (Ubuntu) Content-Type: application/json Content-Length: 4177 Connection: close { "detailed": true, "gc": { "counts": [ 400, 5, 0 ], "threshold": [ 700, 10, 10 ] }, "greenthreads": [ " File \"/opt/stack/data/venv/lib/python3.10/site-packages/paste/urlmap.py\", line 216, in __call__\n return app(environ, start_response)\n File \"/opt/stack/data/venv/lib/python3.10/site-packages/webob/dec.py\", line 129, in __call__\n resp = self.call_func(req, *args, **kw)\n File \"/opt/stack/data/venv/lib/python3.10/site-packages/webob/dec.py\", line 193, in call_func\n return self.func(req, *args, **kwargs)\n File \"/opt/stack/data/venv/lib/python3.10/site-packages/oslo_middleware/base.py\", line 121, in __call__\n response = self.process_request(req)\n File \"/opt/stack/data/venv/lib/python3.10/site-packages/webob/dec.py\", line 146, in __call__\n return self.call_func(req, *args, **kw)\n File \"/opt/stack/data/venv/lib/python3.10/site-packages/webob/dec.py\", line 193, in call_func\n return self.func(req, *args, **kwargs)\n File \"/opt/stack/data/venv/lib/python3.10/site-packages/oslo_middleware/healthcheck/__init__.py\", line 582, in process_request\n body, content_type = functor(results, healthy)\n File \"/opt/stack/data/venv/lib/python3.10/site-packages/oslo_middleware/healthcheck/__init__.py\", line 510, in _make_json_response\n body['greenthreads'] = self._get_greenstacks()\n File \"/opt/stack/data/venv/lib/python3.10/site-packages/oslo_middleware/healthcheck/__init__.py\", line 464, in _get_greenstacks\n traceback.print_stack(gt.gr_frame, file=buf)\n" ], "now": "2024-03-20 23:06:19.907279", "platform": "Linux-5.15.0-91-generic-x86_64-with-glibc2.35", "python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]", "reasons": [ { "class": "HealthcheckResult", "details": "Path '/etc/manila/healthcheck_disable' was not found", "reason": "OK" } ], "threads": [ " File \"/usr/lib/python3.10/threading.py\", line 973, in _bootstrap\n self._bootstrap_inner()\n File \"/usr/lib/python3.10/threading.py\", line 1016, in _bootstrap_inner\n self.run()\n File \"/usr/lib/python3.10/threading.py\", line 953, in run\n self._target(*self._args, **self._kwargs)\n File \"/opt/stack/data/venv/lib/python3.10/site-packages/tooz/coordination.py\", line 208, in _beat_forever_until_stopped\n self._dead.wait(has_to_sleep_for / 2.0)\n File \"/usr/lib/python3.10/threading.py\", line 607, in wait\n signaled = self._cond.wait(timeout)\n File \"/usr/lib/python3.10/threading.py\", line 324, in wait\n gotit = waiter.acquire(True, timeout)\n", " File \"/opt/stack/data/venv/lib/python3.10/site-packages/paste/urlmap.py\", line 216, in __call__\n return app(environ, start_response)\n File \"/opt/stack/data/venv/lib/python3.10/site-packages/webob/dec.py\", line 129, in __call__\n resp = self.call_func(req, *args, **kw)\n File \"/opt/stack/data/venv/lib/python3.10/site-packages/webob/dec.py\", line 193, in call_func\n return self.func(req, *args, **kwargs)\n File \"/opt/stack/data/venv/lib/python3.10/site-packages/oslo_middleware/base.py\", line 121, in __call__\n response = self.process_request(req)\n File \"/opt/stack/data/venv/lib/python3.10/site-packages/webob/dec.py\", line 146, in __call__\n return self.call_func(req, *args, **kw)\n File \"/opt/stack/data/venv/lib/python3.10/site-packages/webob/dec.py\", line 193, in call_func\n return self.func(req, *args, **kwargs)\n File \"/opt/stack/data/venv/lib/python3.10/site-packages/oslo_middleware/healthcheck/__init__.py\", line 582, in process_request\n body, content_type = functor(results, healthy)\n File \"/opt/stack/data/venv/lib/python3.10/site-packages/oslo_middleware/healthcheck/__init__.py\", line 511, in _make_json_response\n body['threads'] = self._get_threadstacks()\n File \"/opt/stack/data/venv/lib/python3.10/site-packages/oslo_middleware/healthcheck/__init__.py\", line 452, in _get_threadstacks\n traceback.print_stack(stack, file=buf)\n" ] } You may disable the healthcheck endpoint dynamically by creating a file called ``/etc/manila/healthcheck_disable``. The name of this file can be customized with the configuration option ``disable_by_file_path`` in the ``[app:healthcheck]`` section of the api paste configuration file. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/admin/shared-file-systems-key-concepts.rst0000664000175000017500000001141500000000000025241 0ustar00zuulzuul00000000000000.. _shared_file_systems_key_concepts: ============ Key concepts ============ Share ~~~~~ In the Shared File Systems service ``share`` is the fundamental resource unit allocated by the Shared File System service. It represents an allocation of a persistent, readable, and writable filesystems. Compute instances access these filesystems. Depending on the deployment configuration, clients outside of OpenStack can also access the filesystem. .. note:: A ``share`` is an abstract storage object that may or may not directly map to a "share" concept from the underlying storage provider. See the description of ``share instance`` for more details. Share instance ~~~~~~~~~~~~~~ This concept is tied with ``share`` and represents created resource on specific back end, when ``share`` represents abstraction between end user and back-end storages. In common cases, it is one-to-one relation. One single ``share`` has more than one ``share instance`` in two cases: - When ``share migration`` is being applied - When ``share replication`` is enabled Therefore, each ``share instance`` stores information specific to real allocated resource on storage. And ``share`` represents the information that is common for ``share instances``. A user with ``member`` role will not be able to work with it directly. Only a user with ``admin`` role has rights to perform actions against specific share instances. Snapshot ~~~~~~~~ A ``snapshot`` is a point-in-time, read-only copy of a ``share``. You can create ``Snapshots`` from an existing, operational ``share`` regardless of whether a client has mounted the file system. A ``snapshot`` can serve as the content source for a new ``share``. Specify the **Create from snapshot** option when creating a new ``share`` on the dashboard. Storage Pools ~~~~~~~~~~~~~ With the Kilo release of OpenStack, Shared File Systems can use ``storage pools``. The storage may present one or more logical storage resource pools that the Shared File Systems service will select as a storage location when provisioning ``shares``. Share Type ~~~~~~~~~~ ``Share type`` is an abstract collection of criteria used to characterize ``shares``. They are most commonly used to create a hierarchy of functional capabilities. This hierarchy represents tiered storage services levels. For example, an administrator might define a premium ``share type`` that indicates a greater level of performance than a basic ``share type``. Premium represents the best performance level. Share Access Rules ~~~~~~~~~~~~~~~~~~ ``Share access rules`` define which users can access a particular ``share``. For example, administrators can declare rules for NFS shares by listing the valid IP networks which will access the ``share``. List the IP networks in CIDR notation. Security Services ~~~~~~~~~~~~~~~~~ ``Security services`` allow granular client access rules for administrators. They can declare rules for authentication or authorization to access ``share`` content. External services including LDAP, Active Directory, and Kerberos can be declared as resources. Examine and consult these resources when making an access decision for a particular ``share``. You can associate ``Shares`` with multiple security services, but only one service per one type. Share Networks ~~~~~~~~~~~~~~ A ``share network`` is an object that defines a relationship between a project network and subnet, as defined in an OpenStack Networking service or Compute service. The ``share network`` is also defined in ``shares`` created by the same project. A project may find it desirable to provision ``shares`` such that only instances connected to a particular OpenStack-defined network have access to the ``share``. Also, ``security services`` can be attached to ``share networks``, because most of auth protocols require some interaction with network services. The Shared File Systems service has the ability to work outside of OpenStack. That is due to the ``StandaloneNetworkPlugin``. The plugin is compatible with any network platform, and does not require specific network services in OpenStack like Compute or Networking service. You can set the network parameters in the ``manila.conf`` file. Share Servers ~~~~~~~~~~~~~ A ``share server`` is a logical entity that hosts the shares created on a specific ``share network``. A ``share server`` may be a configuration object within the storage controller, or it may represent logical resources provisioned within an OpenStack deployment used to support the data path used to access ``shares``. ``Share servers`` interact with network services to determine the appropriate IP addresses on which to export ``shares`` according to the related ``share network``. The Shared File Systems service has a pluggable network model that allows ``share servers`` to work with different implementations of the Networking service. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/admin/shared-file-systems-manage-and-unmanage-share.rst0000664000175000017500000003270500000000000027523 0ustar00zuulzuul00000000000000.. _shared_file_systems_manage_and_unmanage_share: ========================= Manage and unmanage share ========================= To ``manage`` a share means that an administrator, rather than a share driver, manages the storage lifecycle. This approach is appropriate when an administrator already has the custom non-manila share with its size, shared file system protocol, and export path, and an administrator wants to register it in the Shared File System service. To ``unmanage`` a share means to unregister a specified share from the Shared File Systems service. Administrators can revert an unmanaged share to managed status if needed. .. _unmanage_share: Unmanage a share ---------------- .. note:: The ``unmanage`` operation is not supported for shares that were created on top of share servers and created with share networks until Shared File Systems API version ``2.49`` (Stein/Manila 8.0.0 release). .. important:: Shares that have dependent snapshots or share replicas cannot be removed from the Shared File Systems service unless the snapshots have been removed or unmanaged and the share replicas have been removed. Unmanaging a share removes it from the management of the Shared File Systems service without deleting the share. It is a non-disruptive operation and existing clients are not disconnected, and the functionality is aimed at aiding infrastructure operations and maintenance workflows. To unmanage a share, run the :command:`openstack share abandon` command. Then try to print the information about the share. The returned result should indicate that Shared File Systems service won't find the share: .. code-block:: console $ openstack share abandon share_for_docs $ openstack share show share_for_docs ERROR: No share with a name or ID of 'share_for_docs' exists. .. _manage_share: Manage a share -------------- .. note:: The ``manage`` operation is not supported for shares that are exported on share servers via share networks until Shared File Systems API version ``2.49`` (Stein/Manila 8.0.0 release). .. note:: From API version 2.53, if the requester specifies a share type containing a ``replication_type`` extra spec while managing a share, manila quota system will reserve and consume resources for two additional quotas: ``share_replicas`` and ``replica_gigabytes``. From API version 2.62, manila quota system will validate size of the share against ``per_share_gigabytes`` quota. To register the non-managed share in the File System service, run the :command:`openstack share adopt` command: .. code-block:: console openstack share adopt [-h] [-f {json,shell,table,value,yaml}] [-c COLUMN] [--noindent] [--prefix PREFIX] [--max-width ] [--fit-width] [--print-empty] [--name ] [--description ] [--share-type ] [--driver-options [ ...]] [--public] [--share-server-id ] [--wait] The positional arguments are: - service_host. The manage-share service host in ``host@backend#POOL`` format, which consists of the host name for the back end, the name of the back end, and the pool name for the back end. - protocol. The Shared File Systems protocol of the share to manage. Valid values are NFS, CIFS, GlusterFS, HDFS or MAPRFS. - export_path. The share export path in the format appropriate for the protocol: - NFS protocol. 10.0.0.1:/foo_path. - CIFS protocol. \\\\10.0.0.1\\foo_name_of_cifs_share. - HDFS protocol. hdfs://10.0.0.1:foo_port/foo_share_name. - GlusterFS. 10.0.0.1:/foo_volume. - MAPRFS. maprfs:///share-0 -C -Z -N foo. The optional arguments are: - name. The name of the share that is being managed. - share_type. The share type of the share that is being managed. If not specified, the service will try to manage the share with the configured default share type. - share_server_id. must be provided to manage shares within share networks. This argument can only be used with File Systems API version ``2.49`` (Stein/Manila 8.0.0 release) and beyond. - driver_options. An optional set of one or more key and value pairs that describe driver options. As a result, a special share type named ``for_managing`` was used in example. To manage share, run: .. code-block:: console $ openstack share adopt \ manila@saopaulo#shares \ nfs \ 10.0.0.10:/shares/share_e113729a_8da4_45f3_bbbf_0014f_350380c_c4b06060_9c56_459e_9219_b86a0777054b \ --name share_for_docs \ --description "We manage share." \ --share-type default +-------------------------------------+--------------------------------------+ | Field | Value | +-------------------------------------+--------------------------------------+ | id | 8b3aa39d-e07f-4255-82ac-f6f56565a725 | | size | None | | availability_zone | None | | created_at | 2025-04-03T10:57:19.230793 | | status | manage_starting | | name | share_for_docs | | description | We manage share. | | project_id | c0bc204890ad428796f364b677a8516b | | snapshot_id | None | | share_network_id | None | | share_proto | NFS | | metadata | {} | | share_type | 807e5cd7-a0e7-4912-8f7d-352512ce51c3 | | volume_type | default | | is_public | False | | snapshot_support | True | | task_state | None | | share_type_name | default | | access_rules_status | active | | replication_type | None | | has_replicas | False | | user_id | c5d0c19aae6e4484a41e241f0d8b04fb | | create_share_from_snapshot_support | True | | revert_to_snapshot_support | True | | share_group_id | None | | source_share_group_snapshot_member_ | None | | id | | | mount_snapshot_support | True | | progress | None | | is_soft_deleted | False | | scheduled_to_be_deleted_at | None | | source_backup_id | None | | share_server_id | None | | host | manila@saopaulo#shares | +-------------------------------------+--------------------------------------+ Check that the share is available: .. code-block:: console $ openstack share show share_for_docs +-------------------------------------+--------------------------------------+ | Field | Value | +-------------------------------------+--------------------------------------+ | id | 8b3aa39d-e07f-4255-82ac-f6f56565a725 | | size | 1 | | availability_zone | manila-zone-1 | | created_at | 2025-04-03T10:57:19.230793 | | status | available | | name | share_for_docs | | description | We manage share. | | project_id | c0bc204890ad428796f364b677a8516b | | snapshot_id | None | | share_network_id | None | | share_proto | NFS | | share_type | 807e5cd7-a0e7-4912-8f7d-352512ce51c3 | | volume_type | default | | is_public | False | | snapshot_support | True | | task_state | None | | share_type_name | default | | access_rules_status | active | | replication_type | None | | has_replicas | False | | user_id | c5d0c19aae6e4484a41e241f0d8b04fb | | create_share_from_snapshot_support | True | | revert_to_snapshot_support | True | | share_group_id | None | | source_share_group_snapshot_member_ | None | | id | | | mount_snapshot_support | True | | progress | 100% | | is_soft_deleted | False | | scheduled_to_be_deleted_at | None | | source_backup_id | None | | share_server_id | None | | host | manila@saopaulo#shares | | export_locations | | | | id = | | | ba4ad0cd-6d25-422f-97f6-a1bc383ae49d | | | path = 11.0.0.11:/shares/share_e1137 | | | 29a_8da4_45f3_bbbf_0014f350380c_c4b0 | | | 6060_9c56_459e_9219_b86a0777054b | | | preferred = False | | | metadata = {} | | | share_instance_id = | | | c4b06060-9c56-459e-9219-b86a0777054b | | | is_admin_only = True | | | id = | | | c525a3aa-b52a-4565-acf3-aacaca1167ec | | | path = 10.0.0.10:/shares/share_e1137 | | | 29a_8da4_45f3_bbbf_0014f350380c_c4b0 | | | 6060_9c56_459e_9219_b86a0777054b | | | preferred = True | | | metadata = {} | | | share_instance_id = | | | c4b06060-9c56-459e-9219-b86a0777054b | | | is_admin_only = False | | | id = | | | b5c26041-eba0-415d-8bda-f46ca67a55b9 | | | path = 10.0.0.20:/shares/share_e1137 | | | 29a_8da4_45f3_bbbf_0014f350380c_c4b0 | | | 6060_9c56_459e_9219_b86a0777054b | | | preferred = False | | | metadata = {} | | | share_instance_id = | | | c4b06060-9c56-459e-9219-b86a0777054b | | | is_admin_only = False | | properties | | +-------------------------------------+--------------------------------------+ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/admin/shared-file-systems-manage-and-unmanage-snapshot.rst0000664000175000017500000001070400000000000030253 0ustar00zuulzuul00000000000000.. _shared_file_systems_manage_and_unmanage_snapshot: ================================== Manage and unmanage share snapshot ================================== To ``manage`` a share snapshot means that an administrator, rather than a share driver, manages the storage lifecycle. This approach is appropriate when an administrator manages share snapshots outside of the Shared File Systems service and wants to register it with the service. To ``unmanage`` a share snapshot means to unregister a specified share snapshot from the Shared File Systems service. Administrators can revert an unmanaged share snapshot to managed status if needed. .. _unmanage_share_snapshot: Unmanage a share snapshot ------------------------- The ``unmanage`` operation is not supported for shares that were created on top of share servers and created with share networks. The Share service should have the option ``driver_handles_share_servers = False`` set in the ``manila.conf`` file. To unmanage managed share snapshot, run the :command:`manila snapshot-unmanage ` command. Then try to print the information about the share snapshot. The returned result should indicate that Shared File Systems service won't find the share snapshot: .. code-block:: console $ manila snapshot-unmanage my_test_share_snapshot $ manila snapshot-show my_test_share_snapshot ERROR: No sharesnapshot with a name or ID of 'my_test_share_snapshot' exists. .. _manage_share_snapshot: Manage a share snapshot ----------------------- To register the non-managed share snapshot in the File System service, run the :command:`manila snapshot-manage` command: .. code-block:: console manila snapshot-manage [--name ] [--description ] [--driver_options [ [ ...]]] The positional arguments are: - share. Name or ID of the share. - provider_location. Provider location of the share snapshot on the backend. The ``driver_options`` is an optional set of one or more key and value pairs that describe driver options. To manage share snapshot, run: .. code-block:: console $ manila snapshot-manage \ 9ba52cc6-c97e-4b40-8653-4bcbaaf9628d \ 4d1e2863-33dd-4243-bf39-f7354752097d \ --name my_test_share_snapshot \ --description "My test share snapshot" \ +-------------------+--------------------------------------+ | Property | Value | +-------------------+--------------------------------------+ | status | manage_starting | | share_id | 9ba52cc6-c97e-4b40-8653-4bcbaaf9628d | | user_id | d9f4003655c94db5b16c591920be1f91 | | description | My test share snapshot | | created_at | 2016-07-25T04:49:42.600980 | | size | None | | share_proto | NFS | | provider_location | 4d1e2863-33dd-4243-bf39-f7354752097d | | id | 89c663b5-026d-45c7-a43b-56ef0ba0faab | | project_id | aaa33a0ca4324965a3e65ae47e864e94 | | share_size | 1 | | name | my_test_share_snapshot | +-------------------+--------------------------------------+ Check that the share snapshot is available: .. code-block:: console $ manila snapshot-show my_test_share_snapshot +-------------------+--------------------------------------+ | Property | Value | +-------------------+--------------------------------------+ | status | available | | share_id | 9ba52cc6-c97e-4b40-8653-4bcbaaf9628d | | user_id | d9f4003655c94db5b16c591920be1f91 | | description | My test share snapshot | | created_at | 2016-07-25T04:49:42.000000 | | size | 1 | | share_proto | NFS | | provider_location | 4d1e2863-33dd-4243-bf39-f7354752097d | | id | 89c663b5-026d-45c7-a43b-56ef0ba0faab | | project_id | aaa33a0ca4324965a3e65ae47e864e94 | | share_size | 1 | | name | my_test_share_snapshot | +-------------------+--------------------------------------+ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/admin/shared-file-systems-multi-backend.rst0000664000175000017500000000376400000000000025364 0ustar00zuulzuul00000000000000.. _shared_file_systems_multi_backend: =========================== Multi-storage configuration =========================== The Shared File Systems service can provide access to one or more file storage back ends. In general, the workflow with multiple back ends looks similar to the Block Storage service one. Using ``manila.conf``, you can spawn multiple share services. To do it, you should set the `enabled_share_backends` flag in the ``manila.conf`` file. This flag defines the comma-separated names of the configuration stanzas for the different back ends. One name is associated to one configuration group for a back end. The following example runs three configured share services: .. code-block:: ini [DEFAULT] enabled_share_backends=backendEMC1,backendGeneric1,backendNetApp [backendGeneric1] share_driver=manila.share.drivers.generic.GenericShareDriver share_backend_name=one_name_for_two_backends service_instance_user=ubuntu_user service_instance_password=ubuntu_user_password service_image_name=ubuntu_image_name path_to_private_key=/home/foouser/.ssh/id_rsa path_to_public_key=/home/foouser/.ssh/id_rsa.pub [backendEMC1] share_driver=manila.share.drivers.emc.driver.EMCShareDriver share_backend_name=backendEMC2 emc_share_backend=vnx emc_nas_server=1.1.1.1 emc_nas_password=password emc_nas_login=user emc_nas_server_container=server_3 emc_nas_pool_name="Pool 2" [backendNetApp] share_driver = manila.share.drivers.netapp.common.NetAppDriver driver_handles_share_servers = True share_backend_name=backendNetApp netapp_login=user netapp_password=password netapp_server_hostname=1.1.1.1 netapp_root_volume_aggregate=aggr01 To spawn separate groups of share services, you can use separate configuration files. If it is necessary to control each back end in a separate way, you should provide a single configuration file per each back end. .. toctree:: shared-file-systems-scheduling.rst shared-file-systems-services-manage.rst ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/admin/shared-file-systems-network-plugins.rst0000664000175000017500000001246700000000000026015 0ustar00zuulzuul00000000000000.. _shared_file_systems_network_plugins: ================ Network plug-ins ================ The Shared File Systems service architecture defines an abstraction layer for network resource provisioning and allowing administrators to choose from a different options for how network resources are assigned to their projects' networked storage. There are a set of network plug-ins that provide a variety of integration approaches with the network services that are available with OpenStack. What is a network plugin in Manila? ----------------------------------- A network plugin is a python class that uses a specific facility (e.g. Neutron network) to provide network resources to the :term:`manila-share` service. When to use a network plugin? ----------------------------- A Manila `share driver` may be configured in one of two modes, where it is managing the lifecycle of `share servers` on its own or where it is merely providing storage resources on a pre-configured share server. This mode is defined using the boolean option `driver_handles_share_servers` in the Manila configuration file. A network plugin is only useful when a driver is handling its own share servers. .. note:: Not all share drivers support both modes. Each driver must report which mode(s) it supports to the manila-share service. When `driver_handles_share_servers` is set to `True`, a share driver will be called to create share servers for shares using information provided within a `share network`. This information will be provided to one of the enabled network plugins that will handle reservation, creation and deletion of network resources including `IP addresses` and `network interfaces`. The Shared File Systems service may need a network resource provisioning if share service with specified driver works in mode, when a share driver manages lifecycle of share servers on its own. This behavior is defined by a flag ``driver_handles_share_servers`` in share service configuration. When ``driver_handles_share_servers`` is set to ``True``, a share driver will be called to create share servers for shares using information provided within a share network. This information will be provided to one of the enabled network plug-ins that will handle reservation, creation and deletion of network resources including IP addresses and network interfaces. What network plug-ins are available? ------------------------------------ There are three network plug-ins and three python classes in the Shared File Systems service: #. Network plug-in for using the OpenStack Networking service. It allows to use any network segmentation that the Networking service supports. It is up to each share driver to support at least one network segmentation type. a) ``manila.network.neutron.neutron_network_plugin.NeutronNetworkPlugin``. This is a default network plug-in. It requires the ``neutron_net_id`` property and the ``neutron_subnet_id`` property to be provided when creating the share network that will be used for the creation of share servers. The user may define any number of share networks corresponding to the various physical network segments in a project environment. .. note:: When a share network contains a router provider network with multiple segments, the ``neutron_physical_net_name`` option should be additionally configured in ``manila.conf``. b) ``manila.network.neutron.neutron_network_plugin.NeutronSingleNetworkPlugin``. This is a simplification of the previous case. It accepts values for ``neutron_net_id`` and ``neutron_subnet_id`` from the ``manila.conf`` configuration file and uses one network for all shares. c) ``manila.network.neutron.neutron_network_plugin.NeutronBindNetworkPlugin``. This driver waits for active binding and fails if a Neutron port can't be bound or an error occurs. This plugin is useful for agent based binding (like OVS with docker driver) and fabric binding where real hardware reconfiguration is taking place. The existing ``NeutronBindSingleNetworkPlugin`` is a combination of `1b` and `1c`. When only a single network is needed, the NeutronSingleNetworkPlugin (1.b) is a simple solution. Otherwise NeutronNetworkPlugin (1.a) should be chosen. #. Network plug-in for specifying networks independently from OpenStack networking services. a) ``manila.network.standalone_network_plugin.StandaloneNetworkPlugin``. This plug-in uses a pre-existing network that is available to the manila-share host. This network may be handled either by OpenStack or be created independently by any other means. The plug-in supports any type of network - flat and segmented. As above, it is completely up to the share driver to support the network type for which the network plug-in is configured. .. note:: The ip version of the share network is defined by the flags of ``network_plugin_ipv4_enabled`` and ``network_plugin_ipv6_enabled`` in the ``manila.conf`` configuration since Pike. The ``network_plugin_ipv4_enabled`` default value is set to True. The ``network_plugin_ipv6_enabled`` default value is set to False. If ``network_plugin_ipv6_enabled`` option is True, the value of ``network_plugin_ipv4_enabled`` will be ignored, it means to support both IPv4 and IPv6 share network. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/admin/shared-file-systems-networking.rst0000664000175000017500000000107000000000000025020 0ustar00zuulzuul00000000000000.. _shared_file_systems_networking: ========== Networking ========== Unlike the OpenStack Block Storage service, the Shared File Systems service must connect to the Networking service. The share service requires the option to self-manage share servers. For client authentication and authorization, you can configure the Shared File Systems service to work with different network authentication services, like LDAP, Kerberos protocols, or Microsoft Active Directory. .. toctree:: shared-file-systems-share-networks.rst shared-file-systems-network-plugins.rst ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/admin/shared-file-systems-profiling.rst0000664000175000017500000002057100000000000024631 0ustar00zuulzuul00000000000000.. _shared_file_systems_profiling: ========================================== Profiling the Shared File Systems service ========================================== Profiler ^^^^^^^^ The detailed description of the profiler and its config options is available at `Profiler docs `_. Using Profiler ^^^^^^^^^^^^^^ To start profiling Manila code, the following steps have to be taken: #. Add the following lines to the ``/etc/manila/manila.conf`` file (the profiling is disabled by default). .. code-block:: console [profiler] connection_string = redis://localhost:6379 hmac_keys = SECRET_KEY trace_sqlalchemy = True enabled = True Examples of possible values for ``connection_string`` option: * ``messaging://`` - use oslo_messaging driver for sending spans. * ``redis://127.0.0.1:6379`` - use redis driver for sending spans. * ``mongodb://127.0.0.1:27017`` - use mongodb driver for sending spans. * ``elasticsearch://127.0.0.1:9200`` - use elasticsearch driver for sending spans. * ``jaeger://127.0.0.1:6831`` - use jaeger tracing as driver for sending spans. #. Restart all manila services and keystone service. #. To verify profiler with manilaclient, run any command with ``--profile .`` The key (e.g. SECRET_KEY) should be one of the ``hmac_keys`` mentioned in manila.conf. To generate correct profiling information across all services at least one key needs to be consistent between OpenStack projects. .. code-block:: console $ manila --profile SECRET_KEY create NFS 1 --name Share1 --share-network testNetwork --share-type dhss_true +---------------------------------------+--------------------------------------+ | Property | Value | +---------------------------------------+--------------------------------------+ | id | 9703da88-25ba-41e6-827d-a6932f708dd4 | | size | 1 | | availability_zone | None | | created_at | 2021-02-23T11:21:38.000000 | | status | creating | | name | Share1 | | description | None | | project_id | c67b2fd35b054060971d28cf654ee92a | | snapshot_id | None | | share_network_id | 03754c58-1456-497f-b7d6-8f36a4d644f0 | | share_proto | NFS | | metadata | {} | | share_type | 5b1a4133-371c-4583-a801-f2b6e1ae102d | | is_public | False | | snapshot_support | False | | task_state | None | | share_type_name | dhss_true | | access_rules_status | active | | replication_type | None | | has_replicas | False | | user_id | 7ecd60ddae1448b79449dc6434460eaf | | create_share_from_snapshot_support | False | | revert_to_snapshot_support | False | | share_group_id | None | | source_share_group_snapshot_member_id | None | | mount_snapshot_support | False | | progress | None | | share_server_id | None | | host | | +---------------------------------------+--------------------------------------+ Profiling trace ID: 1705dfd8-e45a-46cd-b0e2-2e40fd9e5f22 To display trace use next command: osprofiler trace show --html 1705dfd8-e45a-46cd-b0e2-2e40fd9e5f22 #. To verify profiler with openstackclient, run any command with ``--os-profile ``. .. code-block:: console $ openstack --os-profile SECRET_KEY share create NFS 1 --name Share2 --share-network testNetwork --share-type dhss_true +---------------------------------------+--------------------------------------+ | Field | Value | +---------------------------------------+--------------------------------------+ | access_rules_status | active | | availability_zone | None | | create_share_from_snapshot_support | False | | created_at | 2021-02-23T11:23:41.000000 | | description | None | | has_replicas | False | | host | | | id | 78a19734-394f-4967-9671-c226df00a023 | | is_public | False | | metadata | {} | | mount_snapshot_support | False | | name | Share2 | | progress | None | | project_id | c67b2fd35b054060971d28cf654ee92a | | replication_type | None | | revert_to_snapshot_support | False | | share_group_id | None | | share_network_id | 03754c58-1456-497f-b7d6-8f36a4d644f0 | | share_proto | NFS | | share_server_id | None | | share_type | 5b1a4133-371c-4583-a801-f2b6e1ae102d | | share_type_name | dhss_true | | size | 1 | | snapshot_id | None | | snapshot_support | False | | source_share_group_snapshot_member_id | None | | status | creating | | task_state | None | | user_id | 7ecd60ddae1448b79449dc6434460eaf | | volume_type | dhss_true | +---------------------------------------+--------------------------------------+ Trace ID: 0ca7ce01-36a9-481c-8b3d-263a3b5caa35 Short trace ID for OpenTracing-based drivers: 8b3d263a3b5caa35 Display trace data with command: osprofiler trace show --html 0ca7ce01-36a9-481c-8b3d-263a3b5caa35 #. To display the trace date in HTML format, run below command. .. code-block:: console $ osprofiler trace show --html 0ca7ce01-36a9-481c-8b3d-263a3b5caa35 --connection-string redis://localhost:6379 --out /opt/stack/output.html ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/admin/shared-file-systems-quotas.rst0000664000175000017500000002425400000000000024156 0ustar00zuulzuul00000000000000.. _shared_file_systems_quotas: ================= Quotas and limits ================= Limits are usage restrictions imposed on consumers of the Shared File Systems service (manila). These limits can be of two types: * Limits on resource consumption (also referred to as ``quotas``) * Limits on usage of APIs (also referred to as ``rate-limits``) Administrators can setup and manipulate these limits at any point of time. Users can query their rate limits and quotas. If an administrator does not set up limits explicitly, the service does not impose any rate limits but it enforces default resource limits (also referred to as ``default quotas``). Users can query their absolute limits using the :command:`openstack share limits show --absolute` command. .. code-block:: console $ openstack share limits show --absolute +------------------------------+-------+ | Name | Value | +------------------------------+-------+ | maxTotalShares | 50 | | maxTotalShareSnapshots | 50 | | maxTotalShareGigabytes | 1000 | | maxTotalSnapshotGigabytes | 1000 | | maxTotalShareNetworks | 10 | | maxTotalShareGroups | 50 | | maxTotalShareGroupSnapshots | 50 | | maxTotalShareReplicas | 100 | | maxTotalReplicaGigabytes | 1000 | | maxTotalShareBackups | 10 | | maxTotalBackupGigabytes | 1000 | | totalSharesUsed | 2 | | totalShareSnapshotsUsed | 0 | | totalShareGigabytesUsed | 2 | | totalSnapshotGigabytesUsed | 0 | | totalShareNetworksUsed | 0 | | totalShareGroupsUsed | 0 | | totalShareGroupSnapshotsUsed | 0 | | totalShareReplicasUsed | 0 | | totalReplicaGigabytesUsed | 0 | | totalShareBackupsUsed | 0 | | totalBackupGigabytesUsed | 0 | +------------------------------+-------+ API Rate Limits ~~~~~~~~~~~~~~~ API Rate limits control the frequency at which users can make specific API requests. Administrators can use rate limiting on the type and number of API calls that can be made in a specific time interval. For example, a rate limit can control the number of ``GET`` requests processed during a one-minute period. To set API rate limits, copy and modify the ``etc/manila/api-paste.ini`` file. You need to restart ``manila-api`` service after you edit the ``api-paste.ini`` file. .. code-block:: ini [filter:ratelimit] paste.filter_factory = manila.api.v1.limits:RateLimitingMiddleware.factory limits = (POST, "*/shares", ^/shares, 120, MINUTE);(PUT, "*/shares", .*, 120, MINUTE);(DELETE, "*", .*, 120, MINUTE) Also, add the ``ratelimit`` to ``noauth`` and ``keystone`` parameters in the ``[composite:openstack_share_api]`` and ``[composite:openstack_share_api_v2]`` groups. .. code-block:: ini [composite:openstack_share_api] use = call:manila.api.middleware.auth:pipeline_factory noauth = cors faultwrap ssl ratelimit sizelimit noauth api keystone = cors faultwrap ssl ratelimit sizelimit authtoken keystonecontext api keystone_nolimit = cors faultwrap ssl sizelimit authtoken keystonecontext api [composite:openstack_share_api_v2] use = call:manila.api.middleware.auth:pipeline_factory noauth = cors faultwrap ssl ratelimit sizelimit noauth apiv2 keystone = cors faultwrap ssl ratelimit sizelimit authtoken keystonecontext apiv2 keystone_nolimit = cors faultwrap ssl sizelimit authtoken keystonecontext apiv2 Finally, set the ``[DEFAULT]/api_rate_limit`` parameter in ``manila.conf`` to ``True``. .. code-block:: ini [DEFAULT] api_rate_limit=True To see the rate limits, run: .. code-block:: console $ openstack share limits show --rate +--------+----------+------------+-------+-----------+--------+----------------------+ | Verb | Regex | URI | Value | Remaining | Unit | Next Available | +--------+----------+------------+-------+-----------+--------+----------------------+ | POST | ^/shares | "*/shares" | 120 | 120 | MINUTE | 2025-02-25T02:15:39Z | | PUT | .* | "*/shares" | 120 | 120 | MINUTE | 2025-02-25T02:15:39Z | | DELETE | .* | "*" | 120 | 120 | MINUTE | 2025-02-25T02:15:39Z | +--------+----------+------------+-------+-----------+--------+----------------------+ Default Resource Quotas ~~~~~~~~~~~~~~~~~~~~~~~ It is possible to set limits on the number of ``shares``, ``snapshots``, ``share-networks``, ``share_groups`` (requires API version 2.40), ``share_group_snapshots`` (requires API version 2.40) and ``share_replicas`` (requires API version 2.53). Alongside limits can also be set on capacity with ``gigabytes`` (total size of shares allowed), ``snapshot-gigabytes`` (total size of snapshots allowed), ``replica_gigabytes`` (requires API version 2.53) or ``per_share_gigabytes`` (requires API version 2.62). If these resource quotas are not set by an administrator, default quotas that are hardcoded in the service will apply. To view these default quotas, the administrator can use the :command:`openstack share quota show –class default` command: .. code-block:: console $ openstack share quota show %project_id% --defaults +-----------------------+----------------------------------+ | Field | Value | +-----------------------+----------------------------------+ | backup_gigabytes | 1000 | | backups | 10 | | gigabytes | 1000 | | id | a0ce678da60e4ca18010016d44ee6e83 | | per_share_gigabytes | -1 | | replica_gigabytes | 1000 | | share_group_snapshots | 50 | | share_groups | 50 | | share_networks | 10 | | share_replicas | 100 | | shares | 50 | | snapshot_gigabytes | 1000 | | snapshots | 50 | +-----------------------+----------------------------------+ Administrators can modify default quotas with the :command:`openstack share quota set --class default` command: .. code-block:: console openstack share quota set --class default --shares 30 --snapshots 50 --share-groups 15 Alternatively, you can also specify these defaults via the ``manila.conf``. The following is an example: .. code-block:: ini [quota] shares = 30 share_gigabytes = 10000 share_networks = 50 share_snapshots = 100 .. important:: Default quotas specified via the API will always take precedence over any defaults applied via ``manila.conf``. Therefore it is recommended to always use the API when creating or manipulating default quotas. Custom quotas ~~~~~~~~~~~~~ The administrator can customize quotas for a specific project, or for a specific user within a project context, or for a share type used by users of a project. To list the quotas for a project or user, use the :command:`openstack share quota show` command. If you specify the optional ``--user`` parameter, you get the quotas for this user in the specified project. If you omit this parameter, you get the quotas for the specified project. If there are no overrides, the quotas shown will match the defaults. .. note:: The Shared File Systems service does not perform mapping of usernames and project names to IDs. Provide only ID values to get correct setup of quotas. Setting it by names you set quota for nonexistent project/user. In case quota is not set explicitly by project/user ID, The Shared File Systems service just applies default quotas. .. code-block:: console $ openstack share quota show %project_id% --user %user_id% +-----------------------+----------------------------------+ | Field | Value | +-----------------------+----------------------------------+ | backup_gigabytes | 1000 | | backups | 10 | | gigabytes | 1000 | | id | a0ce678da60e4ca18010016d44ee6e83 | | per_share_gigabytes | -1 | | replica_gigabytes | 1000 | | share_group_snapshots | 50 | | share_groups | 50 | | share_networks | 10 | | share_replicas | 100 | | shares | 50 | | snapshot_gigabytes | 1000 | | snapshots | 50 | +-----------------------+----------------------------------+ These quotas can be updated with the :command:`openstack share quota set` command. .. code-block:: console $ openstack share quota set %project_id% --user %user_id% --shares 49 --snapshots 49 The service will prevent the quota being set lower than the current consumption. However, a quota update can still be made if necessary with the``force`` key. .. code-block:: console $ openstack share quota set %project_id% --shares 51 --snapshots 51 --force The administrator can also update the quotas for a specific share type. Share Type quotas cannot be set for individual users within a project. They can only be applied across all users of a particular project. .. code-block:: console $ openstack share quota set %project_id% --share-type %share_type_id% To revert quotas to default for a project or for a user, simply delete the quota that has been set: .. code-block:: console $ openstack share quota delete %project_id% --user %user_id% Share type quotas can be reverted in the same way. Except, Share Type quotas can not be set for individual users within a project, so they cannot be unset either. .. code-block:: console $ openstack share quota delete %project_id% --share-type %share_type_id% ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/admin/shared-file-systems-scheduling.rst0000664000175000017500000000300100000000000024752 0ustar00zuulzuul00000000000000.. _shared_file_systems_scheduling: ========== Scheduling ========== The Shared File Systems service uses a scheduler to provide unified access for a variety of different types of shared file systems. The scheduler collects information from the active shared services, and makes decisions such as what shared services will be used to create a new share. To manage this process, the Shared File Systems service provides Share types API. A share type is a list from key-value pairs called extra-specs. The scheduler uses required and un-scoped extra-specs to look up the shared service most suitable for a new share with the specified share type. For more information about extra-specs and their type, see `Capabilities and Extra-Specs `_ section in developer documentation. The general scheduler workflow: #. Share services report information about their existing pool number, their capacities, and their capabilities. #. When a request on share creation arrives, the scheduler picks a service and pool that best serves the request, using share type filters and back end capabilities. If back end capabilities pass through, all filters request the selected back end where the target pool resides. #. The share driver receives a reply on the request status, and lets the target pool serve the request as the scheduler instructs. The scoped and un-scoped share types are available for the driver implementation to use as needed. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/admin/shared-file-systems-security-services.rst0000664000175000017500000002755400000000000026340 0ustar00zuulzuul00000000000000.. _shared_file_systems_security_services: ================= Security services ================= A security service stores client configuration information used for authentication and authorization (AuthN/AuthZ). For example, a share server will be the client for an existing service such as LDAP, Kerberos, or Microsoft Active Directory. You can associate a share with one to three security service types: - ``ldap``: LDAP. - ``kerberos``: Kerberos. - ``active_directory``: Microsoft Active Directory. You can configure a security service with these options: - A DNS IP address. - An IP address or host name. - A domain. - A user or group name. - The password for the user, if you specify a user name. You can add the security service to the :ref:`share network `. To create a security service, specify the security service type, a description of a security service, DNS IP address used inside project's network, security service IP address or host name, domain, security service user or group used by project, and a password for the user. The share name is optional. Create a ``ldap`` security service: .. code-block:: console $ openstack share security service create ldap \ --dns-ip 8.8.8.8 --server 10.254.0.3 \ --name my_ldap_security_service +-----------------+--------------------------------------+ | Field | Value | +-----------------+--------------------------------------+ | id | 266d7c94-db18-47af-b6db-0c3a663e39f5 | | name | my_ldap_security_service | | type | ldap | | status | new | | created_at | 2025-04-04T12:44:17.131358 | | updated_at | None | | description | None | | dns_ip | 8.8.8.8 | | server | 10.254.0.3 | | domain | None | | user | None | | password | None | | project_id | ae096acaa6ce4a3bb4f5a7f7a324514c | | ou | None | | default_ad_site | None | +-----------------+--------------------------------------+ To create ``kerberos`` security service, run: .. code-block:: console $ openstack share security service create kerberos \ --server 10.254.0.3 --user demo --password secret \ --name my_kerberos_security_service \ --description "Kerberos security service" +-----------------+--------------------------------------+ | Field | Value | +-----------------+--------------------------------------+ | id | a6b3634d-63ba-460d-b506-bde475d9c634 | | name | my_kerberos_security_service | | type | kerberos | | status | new | | created_at | 2025-04-04T12:52:24.537002 | | updated_at | None | | description | Kerberos security service | | dns_ip | None | | server | 10.254.0.3 | | domain | None | | user | demo | | password | secret | | project_id | ae096acaa6ce4a3bb4f5a7f7a324514c | | ou | None | | default_ad_site | None | +-----------------+--------------------------------------+ To see the list of created security service use :command:`openstack share security service list`: .. code-block:: console $ openstack share security service list +--------------------------------------+------------------------------+--------+----------+ | ID | Name | Status | Type | +--------------------------------------+------------------------------+--------+----------+ | 266d7c94-db18-47af-b6db-0c3a663e39f5 | my_ldap_security_service | new | ldap | | a6b3634d-63ba-460d-b506-bde475d9c634 | my_kerberos_security_service | new | kerberos | +--------------------------------------+------------------------------+--------+----------+ You can add a security service to the existing :ref:`share network `, which is not yet used (a ``share network`` not associated with a share). Add a security service to the share network with ``openstack share network set --new-security-service`` specifying share network and security service. The command returns information about the security service. You can see view new attributes and ``share_networks`` using the associated share network ID. .. code-block:: console $ openstack share network set share_net2 \ --new-security-service my_ldap_security_service $ openstack share security service show my_ldap_security_service +-----------------+-------------------------------------------+ | Property | Value | +-----------------+-------------------------------------------+ | id | 266d7c94-db18-47af-b6db-0c3a663e39f5 | | name | my_ldap_security_service | | type | ldap | | status | new | | created_at | 2025-04-04T12:44:17.131358 | | updated_at | None | | description | None | | dns_ip | 8.8.8.8 | | server | 10.254.0.3 | | domain | None | | user | None | | password | None | | project_id | ae096acaa6ce4a3bb4f5a7f7a324514c | | ou | None | | default_ad_site | None | | share_networks | [u'6d36c41f-d310-4aff-a0c2-ffd870e91cab'] | +----------------+--------------------------------------------+ It is possible to see the list of security services associated with a given share network. List security services for ``share_net2`` share network with: .. code-block:: console $ openstack share network show share_net2 +-----------------------------------+------------------------------------------------------------+ | Field | Value | +-----------------------------------+------------------------------------------------------------+ | id | 6d36c41f-d310-4aff-a0c2-ffd870e91cab | | name | share_net2 | | project_id | ae096acaa6ce4a3bb4f5a7f7a324514c | | created_at | 2025-04-03T12:34:12.211349 | | updated_at | None | | description | None | | status | active | | security_service_update_support | True | | network_allocation_update_support | True | | share_network_subnets | | | | id = 55916458-1272-4d41-95d9-b1bfbc2e2da1 | | | availability_zone = None | | | created_at = 2025-04-08T21:27:22.735925 | | | updated_at = None | | | segmentation_id = None | | | neutron_net_id = None | | | neutron_subnet_id = None | | | ip_version = None | | | cidr = None | | | network_type = None | | | mtu = None | | | gateway = None | | | properties = | | security_services | | | | security_service_name = my_ldap_security_service | | | security_service_id = 266d7c94-db18-47af-b6db-0c3a663e39f5 | +-----------------------------------+------------------------------------------------------------+ You also can dissociate a security service from the share network and confirm that the security service now has an empty list of share networks: .. code-block:: console $ openstack share network unset --security-service my_ldap_security_service share_net2 $ openstack share security service show my_ldap_security_service +-----------------+--------------------------------------+ | Property | Value | +-----------------+--------------------------------------+ | id | 266d7c94-db18-47af-b6db-0c3a663e39f5 | | name | my_ldap_security_service | | type | ldap | | status | new | | created_at | 2025-04-04T12:44:17.131358 | | updated_at | None | | description | None | | dns_ip | 8.8.8.8 | | server | 10.254.0.3 | | domain | None | | user | None | | password | None | | project_id | ae096acaa6ce4a3bb4f5a7f7a324514c | | ou | None | | default_ad_site | None | | share_networks | [] | +-----------------+--------------------------------------+ The Shared File Systems service allows you to update a security service field using :command:`openstack share security service set` command with optional arguments such as ``--dns-ip``, ``--server``, ``--domain``, ``--ou``, ``server``, ``default_ad_site``, ``--user``, ``--password``, ``--name``, or ``--description`` and a required ``security-service`` argument. To remove a security service not associated with any share networks run: .. code-block:: console $ openstack share security service delete my_ldap_security_service ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/admin/shared-file-systems-services-manage.rst0000664000175000017500000000411400000000000025704 0ustar00zuulzuul00000000000000.. _shared_file_systems_services_manage.rst: ====================== Manage shares services ====================== The Shared File Systems service provides API that allows to manage running share services (`Share services API `_). Using the :command:`manila service-list` command, it is possible to get a list of all kinds of running services. To select only share services, you can pick items that have field ``binary`` equal to ``manila-share``. Also, you can enable or disable share services using raw API requests. Disabling means that share services are excluded from the scheduler cycle and new shares will not be placed on the disabled back end. However, shares from this service stay available. With 2024.2 release, admin can schedule share on disabled back end using ``only_host`` scheduler hint. Recalculating the shares' export location ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Until the 2024.2 release, recalculating a share's export location required restarting the corresponding manila-share manager service. This action triggered the backend driver's "ensure shares" operation, which would execute a series of steps to update the export locations. Starting with the 2024.2 release, as an administrator, you can initiate export location recalculation without restarting the manila-share service. This can now be done directly through the "ensure shares" API. It is possible to start the ensure shares procedure even if a service is already running it. To start ensure shares on a given manila-share binary, run the :command:`openstack share service ensure shares` command: .. code-block:: console $ openstack share service ensure shares .. note:: When this command is issued, the ``manila-share`` manager will by default change the status of the shares to ``ensuring``, unless the :ref:`common configuration option ` named ``update_shares_status_on_ensure`` is changed to ``False``. .. note:: The service will have its ``ensuring`` field set to ``True`` while this operation is still in progress. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/admin/shared-file-systems-share-backup-management.rst0000664000175000017500000003641500000000000027323 0ustar00zuulzuul00000000000000.. _shared-file-systems-share-backup-management: ======================= Share backup management ======================= Share backup management is the feature that provides the capability to create a backup for the given share, restore a backup, and delete a backup. It is a valuable feature for most shared file system users, especially for NAS users. Use cases ~~~~~~~~~ As an administrator, you may want to backup and restore your share so that archival can be made simpler and you can bring back the old data whenever required. It includes: * Create a backup * Delete a backup * Restore a backup in specified share Backup/Restore workflows ~~~~~~~~~~~~~~~~~~~~~~~~ Starting from 2023.2, a generic approach for backing up shares through the manila data service has been implemented where the backup of the shares can be stored on a NFS path which are mounted on control nodes. This driver matches the workflows of cinder NFSBackupDriver and thus it helps users with less learning time, and provides the basic backup ability. The vendor that supports NFS, must provide space for NFS to interconnect with NFS backup drivers. The implementation of NFS backup driver will be generic though. The backup process for this driver consists of: * Make sure share is in available state and not busy. * Allow read access to share and write access to backup share. * Mount the share and backend driver's share(i.e. backup share) to the data service node. * Copy data from share to backup share. * Unmount the share and backup share. * Deny access to share and backup share. For the generic NFS backup approach, only one backup backend is allowed for simplicity, at the moment. By default no backup driver will be enabled. To enable the backup driver, use the below configurations in manila.conf .. code-block:: console backup_driver = manila.data.drivers.nfs.NFSBackupDriver backup_mount_export = :/ backup_mount_options = '-o vers=',minorversion=1 New status for backup and share: * backup * creating * available * deleting * deleted * error_deleting * backup_restoring * error * share * backing_creating * backup_restoring * backup_restoring_error During backup, share will be marked as busy and other operations on share such as delete, soft_delete, migration, extend, shrink, ummanage, revert_to_snapshot, crate_snapshot, create_replica etc can not be performed unless share becomes available. Finally, whether or not the share is successfully backed up, the state of the share is rolled back to the available state. In case the backup fails, share task_state will contain the failure information. Also, failure message will be recorded. New clean up actions: The backup and restore actions could break when service is down, so new clean up action will be added to reset the status and clean temporary files (if involved). New quotas for backup : * ``quota_backups``: indicate the share backups allowed per project. * ``quota_backup_gigabytes``: indicate the total amount of storage, in gigabytes, allowed for backups per project. Using the backup APIs (CLI): ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The commands to interact with the share backup API are: * ``openstack share backup create``: It creates a backup for the share on the NFS path. The backup becomes creating and it becomes availabe when the backup is completed. .. code-block:: console $ openstack share backup create --help # to see the help of all the available options $ openstack share backup create --name manila_backup1 25a6f80e-306e-4bb8-ad27-cf6800955228 +-------------------+--------------------------------------+ | Field | Value | +-------------------+--------------------------------------+ | availability_zone | manila-zone-0 | | created_at | 2024-03-21T12:49:35.719214 | | description | None | | host | None | | id | c2022366-0701-44d2-b48b-aa95a666efa5 | | name | manila_backup1 | | progress | 0 | | restore_progress | 0 | | share_id | 25a6f80e-306e-4bb8-ad27-cf6800955228 | | size | 1 | | status | creating | | topic | None | | updated_at | None | +-------------------+--------------------------------------+ * ``openstack share backup list``: It prints the current status of the backup. It is set to ``available`` if all operations succeeded. .. code-block:: console $ openstack share backup list +--------------------------------------+----------------+--------------------------------------+-----------+ | ID | Name | Share ID | Status | +--------------------------------------+----------------+--------------------------------------+-----------+ | c2022366-0701-44d2-b48b-aa95a666efa5 | manila_backup1 | 25a6f80e-306e-4bb8-ad27-cf6800955228 | available | +--------------------------------------+----------------+--------------------------------------+-----------+ $ * ``openstack share backup show``: It obtains the latest information of the backup. .. code-block:: console $ openstack share backup show c2022366-0701-44d2-b48b-aa95a666efa5 +-------------------+--------------------------------------+ | Field | Value | +-------------------+--------------------------------------+ | availability_zone | manila-zone-0 | | created_at | 2024-03-21T12:49:36.000000 | | description | None | | host | vm.openstack.opendev.com | | id | c2022366-0701-44d2-b48b-aa95a666efa5 | | name | manila_backup1 | | progress | 100 | | restore_progress | 0 | | share_id | 25a6f80e-306e-4bb8-ad27-cf6800955228 | | size | 1 | | status | available | | topic | manila-data | | updated_at | 2024-03-21T12:50:07.000000 | +-------------------+--------------------------------------+ $ * ``openstack share backup set``: It sets the name and description for the backup. .. code-block:: console $ openstack share backup set c2022366-0701-44d2-b48b-aa95a666efa5 --name "new_name" --description "backup_taken_on_march_21" .. note:: This command has no output. .. code-block:: console $ openstack share backup show c2022366-0701-44d2-b48b-aa95a666efa5 +-------------------+--------------------------------------+ | Field | Value | +-------------------+--------------------------------------+ | availability_zone | manila-zone-0 | | created_at | 2024-03-21T12:49:36.000000 | | description | backup_taken_on_march_21 | | host | vm.openstack.opendev.com | | id | c2022366-0701-44d2-b48b-aa95a666efa5 | | name | new_name | | progress | 100 | | restore_progress | 0 | | share_id | 25a6f80e-306e-4bb8-ad27-cf6800955228 | | size | 1 | | status | available | | topic | manila-data | | updated_at | 2024-03-21T12:57:09.000000 | +-------------------+--------------------------------------+ Using the backup APIs (REST): ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ APIs will be experimental, until some cycles of testing, and the eventual graduation of them. You can refer to this link for more information `REST API Support `_ Backup/Restore via backup types (Vendor specific) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ There are use cases such that, the individual storage vendors/drivers might have robust solution in their own storage to backup the data. When such features are available in the storage, the individual drivers can be enhanced to build their own backup solutions by extending the existing manila backup drivers with the use of backup types. Thus shares created in Manila on such storage, can be easily backed up via vendor specific solutions. .. note:: `backup_type` was added to backup API responses in version 2.85. Starting from 2024.1, a concept named ``backup_type`` has been introduced. This is needed for creating backups with third party drivers, in case an implementation is available. The ``backup_type`` is a construct which should have backup specific parameters such as ``backup_type_name`` .. note:: The sample config will look like this: ``eng_data_backup`` is the backup_type here.:: [eng_data_backup] backup_type_name=my_backup [nas_storage] enabled_backup_types = eng_data_backup Backup/Restore workflows via backup type: ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Users can create, delete and restore backups on vendor specific storage using backup_type. .. note:: Before using this feature, you need to check with your storage partner for the availability of this feature in Manila drivers. The workflow of creating, viewing, restoring and deleting backups captured below for user reference. .. code-block:: console $ openstack share backup list +--------------------------------------+-------+--------------------------------------+-----------+ | ID | Name | Share ID | Status | +--------------------------------------+-------+--------------------------------------+-----------+ | 8a9b3ce0-23bb-4923-b8ce-d0dd1f56b2b8 | test4 | 983c6dd5-ef93-4c73-9359-ef02fe3bbce7 | available | +--------------------------------------+-------+--------------------------------------+-----------+ $ openstack share backup create --name test5 --backup-options backup_type=eng_data_backup source_share +-------------------+--------------------------------------+ | Field | Value | +-------------------+--------------------------------------+ | availability_zone | manila-zone-0 | | backup_type | backup_type1 | | created_at | 2024-03-11T18:15:32.183982 | | description | None | | host | vm.openstack.opendev.com@nas_storage | | id | 4b468327-d03f-4df7-97ef-c5230b5beafc | | name | test5 | | progress | 0 | | restore_progress | 0 | | share_id | 983c6dd5-ef93-4c73-9359-ef02fe3bbce7 | | size | 1 | | status | creating | | topic | None | | updated_at | None | +-------------------+--------------------------------------+ $ openstack share backup list +--------------------------------------+-------+--------------------------------------+-----------+ | ID | Name | Share ID | Status | +--------------------------------------+-------+--------------------------------------+-----------+ | 4b468327-d03f-4df7-97ef-c5230b5beafc | test5 | 983c6dd5-ef93-4c73-9359-ef02fe3bbce7 | creating | | 8a9b3ce0-23bb-4923-b8ce-d0dd1f56b2b8 | test4 | 983c6dd5-ef93-4c73-9359-ef02fe3bbce7 | available | +--------------------------------------+-------+--------------------------------------+-----------+ $ openstack share backup show test5 +-------------------+------------------------------------------------+ | Field | Value | +-------------------+------------------------------------------------+ | availability_zone | manila-zone-0 | | backup_type | backup_type1 | | created_at | 2024-03-11T18:15:32.000000 | | description | None | | host | scs000215254-1.nb.openenglab.netapp.com@ontap1 | | id | 4b468327-d03f-4df7-97ef-c5230b5beafc | | name | test5 | | progress | 0 | | restore_progress | 0 | | share_id | 983c6dd5-ef93-4c73-9359-ef02fe3bbce7 | | size | 1 | | status | creating | | topic | manila-share | | updated_at | 2024-03-11T18:15:32.000000 | +-------------------+------------------------------------------------+ $ openstack share backup list +--------------------------------------+-------+--------------------------------------+-----------+ | ID | Name | Share ID | Status | +--------------------------------------+-------+--------------------------------------+-----------+ | 4b468327-d03f-4df7-97ef-c5230b5beafc | test5 | 983c6dd5-ef93-4c73-9359-ef02fe3bbce7 | available | | 8a9b3ce0-23bb-4923-b8ce-d0dd1f56b2b8 | test4 | 983c6dd5-ef93-4c73-9359-ef02fe3bbce7 | available | +--------------------------------------+-------+--------------------------------------+-----------+ $ openstack share backup restore test4 $ openstack share backup list +--------------------------------------+-------+--------------------------------------+-----------+ | ID | Name | Share ID | Status | +--------------------------------------+-------+--------------------------------------+-----------+ | 4b468327-d03f-4df7-97ef-c5230b5beafc | test5 | 983c6dd5-ef93-4c73-9359-ef02fe3bbce7 | available | | 8a9b3ce0-23bb-4923-b8ce-d0dd1f56b2b8 | test4 | 983c6dd5-ef93-4c73-9359-ef02fe3bbce7 | restoring | +--------------------------------------+-------+--------------------------------------+-----------+ $ openstack share backup delete test5 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/admin/shared-file-systems-share-group-types.rst0000664000175000017500000002174000000000000026235 0ustar00zuulzuul00000000000000.. _shared_file_systems_share_group_types: ================= Share group types ================= Share group types are types for share groups just like :ref:`share types for shares`. A group type is associated with group specs similar to the way extra specs are associated with a share type. A share group type aids the scheduler to filter or choose back ends when you create a share group and to set any backend specific parameters on the share group. Any driver that can perform a group operation in an advantaged way may report that as a group capability, such as: * Ordered writes * Consistent snapshots * Group replication * Group backup Share group types may contain group specs corresponding to the group capabilities reported by the backends. A group capability applies across all the shares inside the share group, for example, a backend may support `consistent_snapshot_support`, and using this group type extra spec in the group type will allow scheduling share groups onto that backend. Any time a snapshot of the group is initiated, a crash consistent simultaneous snapshot of all the constituent shares is taken. Shares in a share group may each have different share types because they can each be on separate pools, have different capabilities and perhaps end users can even be billed differently for using each of them. To allow for this possibility, one or more share types can be associated with a group type. The admin also specifies which share type(s) a given group type may contain. At least one share type must be provided to create a share group type. When an user creates a share group, the scheduler creates the group on one of the backends that match the specified share type(s) and share group type. In the Shared File Systems configuration file ``manila.conf``, the administrator can set the share group type used by default for the share group creation. To create a share group type, use :command:`openstack share group type create` command as: .. code-block:: console openstack share group type create [-h] [-f {json,shell,table,value,yaml}] [-c COLUMN] [--noindent] [--prefix PREFIX] [--max-width ] [--fit-width] [--print-empty] [--group-specs [ ...]] [--public ] [ ...] Where the ``name`` is the share group type name and ``--public`` defines the level of the visibility for the share group type. One share group can include multiple ``share-types``. ``--group-specs`` are the extra specifications used to filter back ends. .. note:: The extra specifications set in the share group types are explained further in :ref:`shared_file_systems_scheduling`. Administrators can create share group types with these extra specifications for the back ends filtering. An administrator can use the ``policy.yaml`` file to grant permissions for share group type creation with extra specifications to other roles. You set a share group type to private or public and :ref:`manage the access` to the private share group types. By default a share group type is created as publicly accessible. Set ``--public`` to ``False`` to make the share group type private. Share group type operations --------------------------- To create a new share group type you need to specify the name of the new share group type and existing share types. The new share group type can also be public. One share group can include multiple share types. .. code-block:: console $ openstack share group type create group_type_for_cg default --public True +-------------+--------------------------------------+ | Field | Value | +-------------+--------------------------------------+ | id | cd7173f2-93f9-4977-aa55-eb8884333a07 | | name | group_type_for_cg | | share_types | c069126a-2d87-4bbb-a395-1dc5a5ac5d96 | | visibility | public | | is_default | False | | group_specs | | +-------------+--------------------------------------+ $ openstack share group type list +--------------------------------------+-------------------+--------------------------------------+------------+------------+-------------+ | ID | Name | Share Types | Visibility | Is Default | Group Specs | +--------------------------------------+-------------------+--------------------------------------+------------+------------+-------------+ | cd7173f2-93f9-4977-aa55-eb8884333a07 | group_type_for_cg | c069126a-2d87-4bbb-a395-1dc5a5ac5d96 | public | False | | +--------------------------------------+-------------------+--------------------------------------+------------+------------+-------------+ You can set extra specifications for a share group type using **openstack share group type set --group-specs ** command. .. code-block:: console $ openstack share group type set group_type_for_cg --group-specs consistent_snapshot_support=host It is also possible to view a list of current share group types and extra specifications: .. code-block:: console $ openstack share group type list +--------------------------------------+-------------------+--------------------------------------+------------+------------+------------------------------------+ | ID | Name | Share Types | Visibility | Is Default | Group Specs | +--------------------------------------+-------------------+--------------------------------------+------------+------------+------------------------------------+ | cd7173f2-93f9-4977-aa55-eb8884333a07 | group_type_for_cg | c069126a-2d87-4bbb-a395-1dc5a5ac5d96 | public | False | consistent_snapshot_support : host | +--------------------------------------+-------------------+--------------------------------------+------------+------------+------------------------------------+ Use :command:`openstack share group type unset ` to unset one or more extra specifications. .. code-block:: console $ openstack share group type unset test_group_type mount_snapshot_support A public or private share group type can be deleted with the :command:`openstack share group type delete ` command. .. _share_group_type_access: Share group type access ----------------------- You can manage access to a private share group type for different projects. Administrators can provide access, revoke access, and retrieve information about access for a specified private share group type. Create a private group type: .. code-block:: console $ openstack share group type create my_type1 default --public False +-------------+--------------------------------------+ | Field | Value | +-------------+--------------------------------------+ | id | 0c488ca6-8843-4313-ba2b-cc33acb2af73 | | name | my_type1 | | share_types | c069126a-2d87-4bbb-a395-1dc5a5ac5d96 | | visibility | private | | is_default | False | | group_specs | | +-------------+--------------------------------------+ .. note:: If you run :command:`openstack share group type list` both public and private share group types appear. Grant access to created private type for a demo and alt_demo projects by providing their IDs: .. code-block:: console $ openstack share group type access create my_type1 63ce0a1452384fce9edb0189425ea0e2 $ openstack share group type access create my_type1 d274cfc59e2543d38aa223af4f5eb327 To view information about access for a private share group type, use the command :command:`openstack share group type access list my_type1`: .. code-block:: console $ openstack share group type access list my_type1 +----------------------------------+ | Project ID | +----------------------------------+ | 63ce0a1452384fce9edb0189425ea0e2 | | d274cfc59e2543d38aa223af4f5eb327 | +----------------------------------+ After granting access to the share group type, the target project can see the share group type in the list, and create private share groups. To deny access for a specified project, use :command:`openstack share group type access delete ` command. .. code-block:: console $ openstack share group type access delete my_type1 b0fa13353e594d6f809dfa405fedc46a ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/admin/shared-file-systems-share-groups.rst0000664000175000017500000004322500000000000025260 0ustar00zuulzuul00000000000000.. Copyright (c) 2017 Jun Zhong Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ============ Share groups ============ Share group support is available in Manila since the Ocata release. A share group is a group of shares upon which users can perform group based operations, such as taking a snapshot together. This framework is meant to allow migrating or replicating a group of shares in unison in future releases of manila. Support currently exists for creating group types and group specs, creating groups of shares, and creating snapshots of groups. These group operations can be performed using the command line client. To create a share group, and access it, the following general concepts are prerequisite knowledge: #. To create a share group, use :command:`manila share-group-create` command. #. You can specify the ``share-network``, :ref:`share group type `, ``source-share-group-snapshot``, ``availability-zone``, :ref:`share type `. #. After the share group becomes available, use the :command:`manila create` command to create a share within the share group. .. note:: A share group is limited to a single backend, i.e. all shares created within a particular share group end up on the same backend. If the backend supports pools, the shares may be created within separate pools. So this feature is apt for those that would like co-locality of different shares. Actions on a share group ~~~~~~~~~~~~~~~~~~~~~~~~ A few actions, such as extend & shrink, are inherently applicable only to individual shares. One could theoretically apply extend to a group, increasing the size of each member, but this would not be a use-case covered initially. Any actions in this category must remain available to group members, and other actions such as taking snapshots of group members can be allowed, but actions such as migration or replication would be available only at the group level and not on its members. ====================== ======================================================== Share Action Share Group Action ====================== ======================================================== Create (share type) Create (share types, group type) Delete Delete (group) Snapshot Snapshot (may or may not be a consistent group snapshot) Create from snapshot Create from group snapshot Clone Clone group (and all members) (planned) Replicate Replicate (planned) Migrate Migrate (planned) Extend/shrink N/A ====================== ======================================================== Creating a share with share group ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Creating a share group type --------------------------- In this example, we will create a new share group type and specify the `consistent_snapshot_support` as an group-spec within the share-group-type-create being used. Use the :command:`manila type-list` command to get a share type. Then use the share type to create a share group type. .. code-block:: console $ manila type-list +--------------------------------------+---------------------+------------+------------+--------------------------------------+-----------------------------+ | ID | Name | visibility | is_default | required_extra_specs | optional_extra_specs | +--------------------------------------+---------------------+------------+------------+--------------------------------------+-----------------------------+ | ee6287aa-448b-432b-a928-41ce9d8e149f | default_share_type | public | - | driver_handles_share_servers : False | | +--------------------------------------+---------------------+------------+------------+--------------------------------------+-----------------------------+ Use the :command:`manila share-group-type-create` command to create a new share group type. Specify the name and share types. .. code-block:: console $ manila share-group-type-create group_type_for_cg default_share_type +------------+--------------------------------------+ | Property | Value | +------------+--------------------------------------+ | is_default | - | | ID | cfe42f20-d13e-4348-9370-f0763e426db3 | | Visibility | public | | Name | group_type_for_cg | +------------+--------------------------------------+ Use the :command:`manila share-group-type-key` command to set a group-spec to the share group type. .. code-block:: console $ manila share-group-type-key group_type_for_cg set consistent_snapshot_support=host .. note:: This command has no output. To verify the group-spec, use the :command:`manila share-group-type-specs-list` command and specify the share group type's name or ID as a parameter. Creating a share group ---------------------- Use the :command:`manila share-group-create` command to create a share group. Specify the share group type that we created. .. code-block:: console $ manila share-group-create --share-group-type group_type_for_cg +--------------------------------+--------------------------------------+ | Property | Value | +--------------------------------+--------------------------------------+ | status | creating | | description | None | | created_at | 2017-09-11T02:08:52.319921 | | source_share_group_snapshot_id | None | | share_network_id | None | | share_server_id | None | | host | None | | share_group_type_id | cfe42f20-d13e-4348-9370-f0763e426db3 | | project_id | 87ba30b5315c40ec8ec5e3346112eae4 | | share_types | ee6287aa-448b-432b-a928-41ce9d8e149f | | id | ecf78d45-546a-48df-a969-c153e68f0376 | | name | None | +--------------------------------+--------------------------------------+ .. note:: One share group can include multiple share types. The share types are going to be inherited directly from the share group type. Use the :command:`manila share-group-show` command to retrieve details of the share. Specify the share ID or name as a parameter. .. code-block:: console $ manila share-group-show ecf78d45-546a-48df-a969-c153e68f0376 +--------------------------------+-------------------------------------------+ | Property | Value | +--------------------------------+-------------------------------------------+ | status | available | | description | None | | created_at | 2017-09-11T02:08:53.000000 | | source_share_group_snapshot_id | None | | share_network_id | None | | share_server_id | None | | host | ubuntu@generic2#test_pool | | share_group_type_id | cfe42f20-d13e-4348-9370-f0763e426db3 | | project_id | 87ba30b5315c40ec8ec5e3346112eae4 | | share_types | ee6287aa-448b-432b-a928-41ce9d8e149f | | id | ecf78d45-546a-48df-a969-c153e68f0376 | | name | None | +--------------------------------+-------------------------------------------+ Create a share with the share group ----------------------------------- Use the :command:`manila create` command to create a share. Specify the share protocol, size, share group type and the share name. .. code-block:: console $ manila create NFS 1 --share-group ecf78d45-546a-48df-a969-c153e68f0376 --name test_group_share_1 +---------------------------------------+-------------------------------------------+ | Property | Value | +---------------------------------------+-------------------------------------------+ | status | creating | | share_type_name | default_share_type | | description | None | | availability_zone | None | | share_network_id | None | | share_server_id | None | | share_group_id | ecf78d45-546a-48df-a969-c153e68f0376 | | host | ubuntu@generic2#test_pool | | revert_to_snapshot_support | False | | access_rules_status | active | | snapshot_id | None | | create_share_from_snapshot_support | False | | is_public | False | | task_state | None | | snapshot_support | False | | id | 21997eaf-712e-433e-8872-4ff085683657 | | size | 1 | | source_share_group_snapshot_member_id | None | | user_id | b7f2c522a5644a83b78b3f61f50c6d71 | | name | test_group_share_1 | | share_type | ee6287aa-448b-432b-a928-41ce9d8e149f | | has_replicas | False | | replication_type | None | | created_at | 2017-09-11T02:28:16.000000 | | share_proto | NFS | | mount_snapshot_support | False | | project_id | 87ba30b5315c40ec8ec5e3346112eae4 | | metadata | {} | +---------------------------------------+-------------------------------------------+ Create another share with a same share group, and named 'test_group_share_2'. .. code-block:: console $ manila create NFS 1 --share-group ecf78d45-546a-48df-a969-c153e68f0376 --name test_group_share_2 +---------------------------------------+-------------------------------------------+ | Property | Value | +---------------------------------------+-------------------------------------------+ | status | creating | | share_type_name | default_share_type | | description | None | | availability_zone | None | | share_network_id | None | | share_server_id | None | | share_group_id | ecf78d45-546a-48df-a969-c153e68f0376 | | host | ubuntu@generic2#test_pool | | revert_to_snapshot_support | False | | access_rules_status | active | | snapshot_id | None | | create_share_from_snapshot_support | False | | is_public | False | | task_state | None | | snapshot_support | False | | id | 8d34a9a3-3b8c-4771-af2c-66c78fe1e0b1 | | size | 1 | | source_share_group_snapshot_member_id | None | | user_id | b7f2c522a5644a83b78b3f61f50c6d71 | | name | test_group_share_2 | | share_type | ee6287aa-448b-432b-a928-41ce9d8e149f | | has_replicas | False | | replication_type | None | | created_at | 2017-09-11T21:01:36.000000 | | share_proto | NFS | | mount_snapshot_support | False | | project_id | 87ba30b5315c40ec8ec5e3346112eae4 | | metadata | {} | +---------------------------------------+-------------------------------------------+ Creating a share group snapshot ------------------------------- Create a share group sanpshot of the share group Use the :command:`manila share-group-snapshot-create` command to create a share group snapshot. Specify the share group ID or name. .. code-block:: console $ manila share-group-snapshot-create ecf78d45-546a-48df-a969-c153e68f0376 +----------------+--------------------------------------+ | Property | Value | +----------------+--------------------------------------+ | status | creating | | name | None | | created_at | 2017-09-11T21:04:54.612737 | | share_group_id | ecf78d45-546a-48df-a969-c153e68f0376 | | project_id | 87ba30b5315c40ec8ec5e3346112eae4 | | id | ac387240-08dc-4b23-80f6-ffc481e6c87a | | description | None | +----------------+--------------------------------------+ Show the members of the share group snapshot Use the :command:`manila share-group-snapshot-create` command to see all share members of share group snapshot. Specify the share group snapshot ID or name. .. code-block:: console $ manila share-group-snapshot-list-members ac387240-08dc-4b23-80f6-ffc481e6c87a +--------------------------------------+------+ | Share ID | Size | +--------------------------------------+------+ | 21997eaf-712e-433e-8872-4ff085683657 | 1 | | 8d34a9a3-3b8c-4771-af2c-66c78fe1e0b1 | 1 | +--------------------------------------+------+ Show the details of the share group snapshot .. code-block:: console $ manila share-group-snapshot-show ac387240-08dc-4b23-80f6-ffc481e6c87a +----------------+--------------------------------------+ | Property | Value | +----------------+--------------------------------------+ | status | available | | name | None | | created_at | 2017-09-11T21:04:55.000000 | | share_group_id | ecf78d45-546a-48df-a969-c153e68f0376 | | project_id | 87ba30b5315c40ec8ec5e3346112eae4 | | id | ac387240-08dc-4b23-80f6-ffc481e6c87a | | description | None | +----------------+--------------------------------------+ Deleting share groups --------------------- Use the :command:`manila share-group-delete ` to delete share groups. Deleting share group snapshots ------------------------------ Use the :command:`manila share-group-snapshot-delete ` to delete share a share group snapshot. .. important:: Before attempting to delete a share group or a share group snapshot, make sure that all its constituent shares and snapshots were deleted. Users will need to delete share group snapshots before attempting to delete shares within ashare group or the group itself. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/admin/shared-file-systems-share-management.rst0000664000175000017500000000240200000000000026045 0ustar00zuulzuul00000000000000.. _shared_file_systems_share_management: ================ Share management ================ A share is a remote, mountable file system. You can mount a share to and access a share from several hosts by several users at a time. You can create a share and associate it with a network, list shares, and show information for, update, and delete a specified share. You can also create snapshots of shares. To create a snapshot, you specify the ID of the share that you want to snapshot. The shares are based on of the supported Shared File Systems protocols: * *NFS*. Network File System (NFS). * *CIFS*. Common Internet File System (CIFS). * *GLUSTERFS*. Gluster file system (GlusterFS). * *HDFS*. Hadoop Distributed File System (HDFS). * *CEPHFS*. Ceph File System (CephFS). * *MAPRFS*. MapR File System (MAPRFS). The Shared File Systems service provides set of drivers that enable you to use various network file storage devices, instead of the base implementation. That is the real purpose of the Shared File Systems service in production. .. toctree:: shared-file-systems-crud-share.rst shared-file-systems-manage-and-unmanage-share.rst shared-file-systems-manage-and-unmanage-snapshot.rst shared-file-systems-share-resize.rst shared-file-systems-quotas.rst ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/admin/shared-file-systems-share-migration.rst0000664000175000017500000003220300000000000025724 0ustar00zuulzuul00000000000000.. _shared_file_systems_share_migration: =============== Share migration =============== Share migration is the feature that migrates a share between different storage pools. Use cases ~~~~~~~~~ As an administrator, you may want to migrate your share from one storage pool to another for several reasons. Examples include: * Maintenance or evacuation * Evacuate a back end for hardware or software upgrades * Evacuate a back end experiencing failures * Evacuate a back end which is tagged end-of-life * Optimization * Defragment back ends to empty and be taken offline to conserve power * Rebalance back ends to maximize available performance * Move data and compute closer together to reduce network utilization and decrease latency or increase bandwidth * Moving shares * Migrate from old hardware generation to a newer generation * Migrate from one vendor to another Migration workflows ~~~~~~~~~~~~~~~~~~~ Moving shares across different storage pools is generally expected to be a disruptive operation that disconnects existing clients when the source ceases to exist. For this reason, share migration is implemented in a 2-phase approach that allows the administrator to control the timing of the disruption. The first phase performs data copy while users retain access to the share. When copying is complete, the second phase may be triggered to perform a switchover that may include a last sync and deleting the source, generally requiring users to reconnect to continue accessing the share. In order to migrate a share, one of two possible mechanisms may be employed, which provide different capabilities and affect how the disruption occurs with regards to user access during data copy phase and disconnection during switchover phase. Those two mechanisms are: * Driver-assisted migration: This mechanism is intended to make use of driver optimizations to migrate shares between pools of the same storage vendor. This mechanism allows migrating shares nondisruptively while the source remains writable, preserving all filesystem metadata and snapshots. The migration workload is performed in the storage back end. * Host-assisted migration: This mechanism is intended to migrate shares in an agnostic manner between two different pools, regardless of storage vendor. The implementation for this mechanism does not offer the same properties found in driver-assisted migration. In host-assisted migration, the source remains readable, snapshots must be deleted prior to starting the migration, filesystem metadata may be lost, and the clients will get disconnected by the end of migration. The migration workload is performed by the Data Service, which is a dedicated manila service for intensive data operations. When starting a migration, driver-assisted migration is attempted first. If the shared file system service detects it is not possible to perform the driver-assisted migration, it proceeds to attempt host-assisted migration. Using the migration APIs ~~~~~~~~~~~~~~~~~~~~~~~~ The commands to interact with the share migration API are: * ``migration_start``: starts a migration while retaining access to the share. Migration is paused and waits for ``migration_complete`` invocation when it has copied all data and is ready to take down the source share. .. code-block:: console $ manila migration-start share_1 ubuntu@generic2#GENERIC2 --writable False --preserve-snapshots False --preserve-metadata False --nondisruptive False .. note:: This command has no output. * ``migration_complete``: completes a migration, removing the source share and setting the destination share instance to ``available``. .. code-block:: console $ manila migration-complete share_1 .. note:: This command has no output. * ``migration_get_progress``: obtains migration progress information of a share. .. code-block:: console $ manila migration-get-progress share_1 +----------------+--------------------------+ | Property | Value | +----------------+--------------------------+ | task_state | data_copying_in_progress | | total_progress | 37 | +----------------+--------------------------+ * ``migration_cancel``: cancels an in-progress migration of a share. .. code-block:: console $ manila migration-cancel share_1 .. note:: This command has no output. The parameters -------------- To start a migration, an administrator should specify several parameters. Among those, two of them are key for the migration. * ``share``: The share that will be migrated. * ``destination_pool``: The destination pool to which the share should be migrated to, in format host@backend#pool. Several other parameters, referred to here as ``driver-assisted parameters``, *must* be specified in the ``migration_start`` API. They are: * ``preserve_metadata``: whether preservation of filesystem metadata should be enforced for this migration. * ``preserve_snapshots``: whether preservation of snapshots should be enforced for this migration. * ``writable``: whether the source share remaining writable should be enforced for this migration. * ``nondisruptive``: whether it should be enforced to keep clients connected throughout the migration. Specifying any of the boolean parameters above as ``True`` will disallow a host-assisted migration. In order to appropriately move a share to a different storage pool, it may be required to change one or more share properties, such as the share type, share network, or availability zone. To accomplish this, use the optional parameters: * ``new_share_type_id``: Specify the ID of the share type that should be set in the migrated share. * ``new_share_network_id``: Specify the ID of the share network that should be set in the migrated share. If driver-assisted migration should not be attempted, you may provide the optional parameter: * ``force_host_assisted_migration``: whether driver-assisted migration attempt should be skipped. If this option is set to ``True``, all driver-assisted options must be set to ``False``. Configuration ~~~~~~~~~~~~~ For share migration to work in the cloud, there are several configuration requirements that need to be met: For driver-assisted migration: it is necessary that the configuration of all back end stanzas is present in the file manila.conf of all manila-share nodes. Also, network connectivity between the nodes running manila-share service and their respective storage back ends is required. For host-assisted migration: it is necessary that the Data Service (manila-data) is installed and configured in a node connected to the cloud's administrator network. The drivers pertaining to the source back end and destination back end involved in the migration should be able to provide shares that can be accessed from the administrator network. This can easily be accomplished if the driver supports ``admin_only`` export locations, else it is up to the administrator to set up means of connectivity. In order for the Data Service to mount the source and destination instances, it must use manila share access APIs to grant access to mount the instances. The access rule type varies according to the share protocol, so there are a few config options to set the access value for each type: * ``data_node_access_ips``: For IP-based access type, provide one or more administrator network IP addresses of the host running the Data Service. For NFS shares, drivers should always add rules with the "no_root_squash" property. * ``data_node_access_cert``: For certificate-based access type, provide the value of the certificate name that grants access to the Data Service. * ``data_node_access_admin_user``: For user-based access type, provide the value of a username that grants access and administrator privileges to the files in the share. * ``data_node_mount_options``: Provide the value of a mapping of protocol name to respective mount options. The Data Service makes use of mount command templates that by default have a dedicated field to inserting mount options parameter. The default value for this config option already includes the username and password parameters for CIFS shares and NFS v3 enforcing parameter for NFS shares. * ``mount_tmp_location``: Provide the value of a string representing the path where the share instances used in migration should be temporarily mounted. The default value is ``/tmp/``. * ``check_hash``: This boolean config option value determines whether the hash of all files copied in migration should be validated. Setting this option increases the time it takes to migrate files, and is recommended for ultra-dependable systems. It defaults to disabled. The configuration options above are respective to the Data Service only and should be defined the ``DEFAULT`` group of the ``manila.conf`` configuration file. Also, the Data Service node must have all the protocol-related libraries pre-installed to be able to run the mount commands for each protocol. You may need to change some driver-specific configuration options from their default value to work with specific drivers. If so, they must be set under the driver configuration stanza in ``manila.conf``. See a detailed description for each one below: * ``migration_ignore_files``: Provide value as a list containing the names of files or folders to be ignored during migration for a specific driver. The default value is a list containing only ``lost+found`` folder. * ``share_mount_template``: Provide a string that defines the template for the mount command for a specific driver. The template should contain the following entries to be formatted by the code: * proto: The share protocol. Automatically formatted by the Data Service. * options: The mount options to be formatted by the Data Service according to the data_node_mount_options config option. * export: The export path of the share. Automatically formatted by the Data Service with the share's ``admin_only`` export location. * path: The path to mount the share. Automatically formatted by the Data Service according to the mount_tmp_location config option. The default value for this config option is:: mount -vt %(proto)s %(options)s %(export)s %(path)s. * ``share_unmount_template``: Provide the value of a string that defines the template for the unmount command for a specific driver. The template should contain the path of where the shares are mounted, according to the ``mount_tmp_location`` config option, to be formatted automatically by the Data Service. The default value for this config option is:: umount -v %(path)s * ``protocol_access_mapping``: Provide the value of a mapping of access rule type to protocols supported. The default value specifies IP and user based access types mapped to NFS and CIFS respectively, which are the combinations supported by manila. If a certain driver uses a different protocol for IP or user access types, or is not included in the default mapping, it should be specified in this configuration option. Other remarks ~~~~~~~~~~~~~ * There is no need to manually add any of the previously existing access rules after a migration is complete, they will be persisted on the destination after the migration. * Once migration of a share has started, the user will see the status ``migrating`` and it will block other share actions, such as adding or removing access rules, creating or deleting snapshots, resizing, among others. * The destination share instance export locations, although it may exist from the beginning of a host-assisted migration, are not visible nor accessible as access rules cannot be added. * During a host-assisted migration, an access rule granting access to the Data Service will be added and displayed by querying the ``access-list`` API. This access rule should not be tampered with, it will otherwise cause migration to fail. * Resources allocated are cleaned up automatically when a migration fails, except if this failure occurs during the 2nd phase of a driver-assisted migration. Each step in migration is saved to the field ``task_state`` present in the Share model. If for any reason the state is not set to ``migration_error`` during a failure, it will need to be reset using the ``reset-task-state`` API. * It is advised that the node running the Data Service is well secured, since it will be mounting shares with highest privileges, temporarily exposing user data to whoever has access to this node. * The two mechanisms of migration are affected differently by service restarts: * If performing a host-assisted migration, all services may be restarted except for the manila-data service when performing the copy (the ``task_state`` field value starts with ``data_copying_``). In other steps of the host-assisted migration, both the source and destination manila-share services should not be restarted. * If performing a driver-assisted migration, the migration is affected minimally by driver restarts if the ``task_state`` is ``migration_driver_in_progress``, while the copy is being done in the back end. Otherwise, the source and destination manila-share services should not be restarted. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/admin/shared-file-systems-share-networks.rst0000664000175000017500000005261400000000000025617 0ustar00zuulzuul00000000000000.. _shared_file_systems_share_networks: ============== Share networks ============== Share networks are essential to allow end users a path to hard multi-tenancy. When backed by isolated networks, the Shared File Systems service can guarantee hard network path isolation for the users' shares. Users can be allowed to designate their project networks as share networks. When a share network is provided during share creation, the share driver sets up a virtual share server (NAS server) on the share network and exports shares using this NAS server. The share server itself is abstracted away from the user. You must ensure that the storage system can connect the share servers it provisions to the networks users can use as their share networks. .. note:: Not all shared file systems storage backends support share networks. Share networks can only be used when using a share type that has the specification ``driver_handles_share_servers=True``. To see what storage back ends support this specification, refer to the :doc:`share_back_ends_feature_support_mapping`. How to create share network ~~~~~~~~~~~~~~~~~~~~~~~~~~~ To list networks in a project, run: .. code-block:: console $ openstack network list +--------------+---------+--------------------+ | ID | Name | Subnets | +--------------+---------+--------------------+ | bee7411d-... | public | 884a6564-0f11-... | | | | e6da81fa-5d5f-... | | 5ed5a854-... | private | 74dcfb5a-b4d7-... | | | | cc297be2-5213-... | +--------------+---------+--------------------+ A share network stores network information that share servers can use where shares are hosted. You can associate a share with a single share network. You must always specify a share network when creating a share with a share type that requests hard multi-tenancy, i.e., has extra-spec 'driver_handles_share_servers=True'. For more information about supported plug-ins for share networks, see :ref:`shared_file_systems_network_plugins`. A share network has these attributes: - The IP block in Classless Inter-Domain Routing (CIDR) notation from which to allocate the network. - The IP version of the network. - The network type, which is `vlan`, `vxlan`, `gre`, or `flat`. If the network uses segmentation, a segmentation identifier. For example, VLAN, VXLAN, and GRE networks use segmentation. To create a share network with private network and subnetwork, run: .. code-block:: console $ manila share-network-create --neutron-net-id 5ed5a854-21dc-4ed3-870a-117b7064eb21 \ --neutron-subnet-id 74dcfb5a-b4d7-4855-86f5-a669729428dc --name my_share_net \ --description "My first share network" --availability-zone manila-zone-0 +-------------------+--------------------------------------+ | Property | Value | +-------------------+--------------------------------------+ | name | my_share_net | | segmentation_id | None | | created_at | 2015-09-24T12:06:32.602174 | | neutron_subnet_id | 74dcfb5a-b4d7-4855-86f5-a669729428dc | | updated_at | None | | network_type | None | | neutron_net_id | 5ed5a854-21dc-4ed3-870a-117b7064eb21 | | ip_version | None | | cidr | None | | project_id | 20787a7ba11946adad976463b57d8a2f | | id | 5c3cbabb-f4da-465f-bc7f-fadbe047b85a | | description | My first share network | +-------------------+--------------------------------------+ The ``segmentation_id``, ``cidr``, ``ip_version``, and ``network_type`` share network attributes are automatically set to the values determined by the network provider. .. note:: You are able to specify the parameter ``availability_zone`` only with API versions >= 2.51. From the version 2.51, a share network is able to span multiple subnets in different availability zones. The network parameters ``neutron_net_id``, ``neutron_subnet_id``, ``segmentation_id``, ``cidr``, ``ip_version``, ``network_type``, ``gateway`` and ``mtu`` were moved to the share network subnet and no longer pertain to the share network. If you do not specify an availability zone during the share network creation, the created subnet will be considered default by the Shared File Systems Service. A default subnet is expected to be reachable from all availability zones in the cloud. .. note:: Since API version 2.63, the share network will have two additional fields: ``status`` and ``security_service_update_support``. The former indicates the current status of a share network, and the latter informs if all the share network's resources can hold updating or adding security services after they are already deployed. To check the network list, run: .. code-block:: console $ manila share-network-list +--------------------------------------+--------------+ | id | name | +--------------------------------------+--------------+ | 5c3cbabb-f4da-465f-bc7f-fadbe047b85a | my_share_net | +--------------------------------------+--------------+ If you configured the generic driver with ``driver_handles_share_servers = True`` (with the share servers) and already had previous operations in the Shared File Systems service, you can see ``manila_service_network`` in the neutron list of networks. This network was created by the generic driver for internal use. .. code-block:: console $ openstack network list +--------------+------------------------+--------------------+ | ID | Name | Subnets | +--------------+------------------------+--------------------+ | 3b5a629a-e...| manila_service_network | 4f366100-50... | | bee7411d-... | public | 884a6564-0f11-... | | | | e6da81fa-5d5f-... | | 5ed5a854-... | private | 74dcfb5a-b4d7-... | | | | cc297be2-5213-... | +--------------+------------------------+--------------------+ You also can see detailed information about the share network including ``network_type``, and ``segmentation_id`` fields: .. code-block:: console $ openstack network show manila_service_network +---------------------------+--------------------------------------+ | Field | Value | +---------------------------+--------------------------------------+ | admin_state_up | UP | | availability_zone_hints | | | availability_zones | nova | | created_at | 2016-12-13T09:31:30Z | | description | | | id | 3b5a629a-e7a1-46a3-afb2-ab666fb884bc | | ipv4_address_scope | None | | ipv6_address_scope | None | | mtu | 1450 | | name | manila_service_network | | port_security_enabled | True | | project_id | f6ac448a469b45e888050cf837b6e628 | | provider:network_type | vxlan | | provider:physical_network | None | | provider:segmentation_id | 73 | | revision_number | 7 | | router:external | Internal | | shared | False | | status | ACTIVE | | subnets | 682e3329-60b0-440f-8749-83ef53dd8544 | | tags | [] | | updated_at | 2016-12-13T09:31:36Z | +---------------------------+--------------------------------------+ You also can add and remove the security services from the share network. For more detail, see :ref:`shared_file_systems_security_services`. How to reset the state of a share network (Since API version 2.63) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ To reset the state of a given share network, run: .. code-block:: console $ manila share-network-reset-state manila_service_network --state active ============================================== Share network subnets (Since API version 2.51) ============================================== Share network subnet is an entity that stores network data from the OpenStack Networking service. A share network can span multiple share network subnets in different availability zones. How to create share network subnet ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ When you create a share network, a primary share network subnet is automatically created. The share network subnet stores network information that share servers can use where shares are hosted. If a share network subnet is not assigned to a specific availability zone, it is considered to be available across all availability zones. Such a subnet is referred to as ``default`` subnet. A share network can have only one default subnet. However, having a default subnet is not necessary. A share can be associated with only one share network. To list share networks in a project, run: .. code-block:: console $ manila share-network-list +--------------------------------------+-----------------------+ | id | name | +--------------------------------------+-----------------------+ | 483a9787-5116-48b2-bd89-473022fad060 | sharenetwork1 | | bcb9c650-a501-410d-a418-97f28b8ab61a | sharenetwork2 | +--------------------------------------+-----------------------+ You can attach any number of share network subnets into a share network. However, only one share network subnet is allowed per availability zone in a given share network. If you try to create another subnet in a share network that already contains a subnet in a specific availability zone, the operation will be denied. To create a share network subnet in a specific share network, run: .. code-block:: console $ manila share-network-subnet-create sharenetwork1 \ --availability-zone manila-zone-0 \ --neutron-net-id 5ed5a854-21dc-4ed3-870a-117b7064eb21 \ --neutron-subnet-id 74dcfb5a-b4d7-4855-86f5-a669729428dc +--------------------+--------------------------------------+ | Property | Value | +--------------------+--------------------------------------+ | id | 20f3cd2c-0faa-4b4b-a00a-4f188eb1cf38 | | availability_zone | manila-zone-0 | | share_network_id | 483a9787-5116-48b2-bd89-473022fad060 | | share_network_name | sharenetwork1 | | created_at | 2019-12-03T00:37:30.000000 | | segmentation_id | None | | neutron_subnet_id | 74dcfb5a-b4d7-4855-86f5-a669729428dc | | updated_at | None | | neutron_net_id | 5ed5a854-21dc-4ed3-870a-117b7064eb21 | | ip_version | None | | cidr | None | | network_type | None | | mtu | None | | gateway | None | +--------------------+--------------------------------------+ To list all the share network subnets of a given share network, you need to show the share network, and then all subnets will be displayed, as shown below: .. code-block:: console $ manila share-network-show sharenetwork1 +-----------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | Property | Value | +-----------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | id | 483a9787-5116-48b2-bd89-473022fad060 | | name | sharenetwork1 | | project_id | 58ff89e14f9245d7843b8cf290525b5b | | created_at | 2019-12-03T00:16:39.000000 | | updated_at | 2019-12-03T00:31:58.000000 | | description | None | | share_network_subnets | [{'id': '20f3cd2c-0faa-4b4b-a00a-4f188eb1cf38', 'availability_zone': 'manila-zone-0', 'created_at': '2019-12-03T00:37:30.000000', 'updated_at': None, 'segmentation_id': None, 'neutron_net_id': '5ed5a854-21dc-4ed3-870a-117b7064eb21', 'neutron_subnet_id': '74dcfb5a-b4d7-4855-86f5-a669729428dc', 'ip_version': None, 'cidr': None, 'network_type': None, 'mtu': None, 'gateway': None}, {'id': '8b532c15-3ac7-4ea1-b1bc-732614a82313', 'availability_zone': None, 'created_at': '2019-12-03T00:16:39.000000', 'updated_at': None, 'segmentation_id': None, 'neutron_net_id': None, 'neutron_subnet_id': None, 'ip_version': None, 'cidr': None, 'network_type': None, 'mtu': None, 'gateway': None}] | +-----------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ To show a specific share network subnet, run: .. code-block:: console $ manila share-network-subnet-show sharenetwork1 20f3cd2c-0faa-4b4b-a00a-4f188eb1cf38 +--------------------+--------------------------------------+ | Property | Value | +--------------------+--------------------------------------+ | id | 20f3cd2c-0faa-4b4b-a00a-4f188eb1cf38 | | availability_zone | manila-zone-0 | | share_network_id | 483a9787-5116-48b2-bd89-473022fad060 | | share_network_name | sharenetwork1 | | created_at | 2019-12-03T00:37:30.000000 | | segmentation_id | None | | neutron_subnet_id | 74dcfb5a-b4d7-4855-86f5-a669729428dc | | updated_at | None | | neutron_net_id | 5ed5a854-21dc-4ed3-870a-117b7064eb21 | | ip_version | None | | cidr | None | | network_type | None | | mtu | None | | gateway | None | +--------------------+--------------------------------------+ To delete a share network subnet, run: .. code-block:: console $ manila share-network-subnet-delete sharenetwork1 20f3cd2c-0faa-4b4b-a00a-4f188eb1cf38 If you want to remove a share network subnet, make sure that no other resource is using the subnet, otherwise the Shared File Systems Service will deny the operation. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/admin/shared-file-systems-share-replication.rst0000664000175000017500000010011600000000000026243 0ustar00zuulzuul00000000000000.. _shared_file_systems_share_replication: ================= Share replication ================= Replication of data has a number of use cases in the cloud. One use case is High Availability of the data in a shared file system, used for example, to support a production database. Another use case is ensuring Data Protection; i.e being prepared for a disaster by having a replication location that will be ready to back up your primary data source. The Shared File System service supports user facing APIs that allow users to create shares that support replication, add and remove share replicas and manage their snapshots and access rules. Three replication types are currently supported and they vary in the semantics associated with the primary share and the secondary copies. .. important:: **Share replication** is an **experimental** Shared File Systems API in the Mitaka release. Contributors can change or remove the experimental part of the Shared File Systems API in further releases without maintaining backward compatibility. Experimental APIs have an ``X-OpenStack-Manila-API-Experimental: true`` header in their HTTP requests. Replication types supported ~~~~~~~~~~~~~~~~~~~~~~~~~~~ Before using share replication, make sure the Shared File System driver that you are running supports this feature. You can check it in the ``manila-scheduler`` service reports. The ``replication_type`` capability reported can have one of the following values: writable The driver supports creating ``writable`` share replicas. All share replicas can be accorded read/write access and would be synchronously mirrored. readable The driver supports creating ``read-only`` share replicas. All secondary share replicas can be accorded read access. Only the primary (or ``active`` share replica) can be written into. dr The driver supports creating ``dr`` (abbreviated from Disaster Recovery) share replicas. A secondary share replica is inaccessible until after a ``promotion``. None The driver does not support Share Replication. .. note:: The term ``active`` share replica refers to the ``primary`` share. In ``writable`` style of replication, all share replicas are ``active``, and there could be no distinction of a ``primary`` share. In ``readable`` and ``dr`` styles of replication, a ``secondary`` share replica may be referred to as ``passive``, ``non-active`` or simply, ``replica``. Configuration ~~~~~~~~~~~~~ Two new configuration options have been introduced to support Share Replication. replica_state_update_interval Specify this option in the ``DEFAULT`` section of your ``manila.conf``. The Shared File Systems service requests periodic update of the `replica_state` of all ``non-active`` share replicas. The update occurs with respect to an interval corresponding to this option. If it is not specified, it defaults to 300 seconds. replication_domain Specify this option in the backend stanza when using a multi-backend style configuration. The value can be any ASCII string. Two backends that can replicate between each other would have the same ``replication_domain``. This comes from the premise that the Shared File Systems service expects Share Replication to be performed between symmetric backends. This option is *required* for using the Share Replication feature. Health of a share replica ~~~~~~~~~~~~~~~~~~~~~~~~~ Apart from the ``status`` attribute, share replicas have the ``replica_state`` attribute to denote the state of data replication on the storage backend. The ``primary`` share replica will have it's `replica_state` attribute set to `active`. The ``secondary`` share replicas may have one of the following as their ``replica_state``: in_sync The share replica is up to date with the ``active`` share replica (possibly within a backend-specific ``recovery point objective``). out_of_sync The share replica is out of date (all new share replicas start out in this ``replica_state``). error When the scheduler fails to schedule this share replica or some potentially irrecoverable error occurred with regard to updating data for this replica. Promotion or failover ~~~~~~~~~~~~~~~~~~~~~ For ``readable`` and ``dr`` types of replication, we refer to the task of switching a `non-active` share replica with the ``active`` replica as `promotion`. For the ``writable`` style of replication, promotion does not make sense since all share replicas are ``active`` (or writable) at all times. The `status` attribute of the non-active replica being promoted will be set to ``replication_change`` during its promotion. This has been classified as a ``busy`` state and thus API interactions with the share are restricted while one of its share replicas is in this state. Share replication workflows ~~~~~~~~~~~~~~~~~~~~~~~~~~~ The following examples have been implemented with the ZFSonLinux driver that is a reference implementation in the Shared File Systems service. It operates in ``driver_handles_share_servers=False`` mode and supports the ``readable`` type of replication. In the example, we assume a configuration of two Availability Zones [1]_, called `availability_zone_1` and `availability_zone_2`. Since the Train release, some drivers operating in ``driver_handles_share_server=True`` mode support share replication. Multiple availability zones are not necessary to use the replication feature. However, the use of an availability zone as a ``failure domain`` is encouraged. Pay attention to the network configuration for the ZFS driver. Here, we assume a configuration of ``zfs_service_ip`` and ``zfs_share_export_ip`` from two separate networks. The service network is reachable from the host where the ``manila-share`` service is running. The share export IP is from a network that allows user access. See `Configuring the ZFSonLinux driver `_ for information on how to set up the ZFSonLinux driver. Creating a share that supports replication ------------------------------------------ Create a new share type and specify the `replication_type` as an extra-spec within the share-type being used. Use the :command:`manila type-create` command to create a new share type. Specify the name and the value for the extra-spec ``driver_handles_share_servers``. .. code-block:: console $ manila type-create readable_type_replication False +----------------------+--------------------------------------+ | Property | Value | +----------------------+--------------------------------------+ | required_extra_specs | driver_handles_share_servers : False | | Name | readable_type_replication | | Visibility | public | | is_default | - | | ID | 3b3ee3f7-6e43-4aa1-859d-0b0511c43074 | | optional_extra_specs | snapshot_support : True | +----------------------+--------------------------------------+ Use the :command:`manila type-key` command to set an extra-spec to the share type. .. code-block:: console $ manila type-key readable_type_replication set replication_type=readable .. note:: This command has no output. To verify the extra-spec, use the :command:`manila extra-specs-list` command and specify the share type's name or ID as a parameter. Create a share with the share type Use the :command:`manila create` command to create a share. Specify the share protocol, size and the availability zone. .. code-block:: console $ manila create NFS 1 --share_type readable_type_replication --name my_share --description "This share will have replicas" --az availability_zone_1 +-----------------------------+--------------------------------------+ | Property | Value | +-----------------------------+--------------------------------------+ | status | creating | | share_type_name | readable_type_replication | | description | This share will have replicas | | availability_zone | availability_zone_1 | | share_network_id | None | | share_server_id | None | | share_group_id | None | | host | | | access_rules_status | active | | snapshot_id | None | | is_public | False | | task_state | None | | snapshot_support | True | | id | e496ed61-8f2e-436b-b299-32c3e90991cc | | size | 1 | | name | my_share | | share_type | 3b3ee3f7-6e43-4aa1-859d-0b0511c43074 | | has_replicas | False | | replication_type | readable | | created_at | 2016-03-29T20:22:18.000000 | | share_proto | NFS | | project_id | 48a5ca76ac69405e99dc1c13c5195186 | | metadata | {} | +-----------------------------+--------------------------------------+ .. note:: If you are creating a share with the share type specification ``driver_handles_share_servers=True``, the share network parameter is required for the operation to be performed. Use the :command:`manila show` command to retrieve details of the share. Specify the share ID or name as a parameter. .. code-block:: console $ manila show my_share +-----------------------------+--------------------------------------------------------------------+ | Property | Value | +-----------------------------+--------------------------------------------------------------------+ | status | available | | share_type_name | readable_type_replication | | description | This share will have replicas | | availability_zone | availability_zone_1 | | share_network_id | None | | export_locations | | | | path = | | |10.32.62.26:/alpha/manila_share_38efc042_50c2_4825_a6d8_cba2a8277b28| | | preferred = False | | | is_admin_only = False | | | id = e1d754b5-ec06-42d2-afff-3e98c0013faf | | | share_instance_id = 38efc042-50c2-4825-a6d8-cba2a8277b28 | | | path = | | |172.21.0.23:/alpha/manila_share_38efc042_50c2_4825_a6d8_cba2a8277b28| | | preferred = False | | | is_admin_only = True | | | id = 6f843ecd-a7ea-4939-86de-e1e01d9e8672 | | | share_instance_id = 38efc042-50c2-4825-a6d8-cba2a8277b28 | | share_server_id | None | | share_group_id | None | | host | openstack4@zfsonlinux_1#alpha | | access_rules_status | active | | snapshot_id | None | | is_public | False | | task_state | None | | snapshot_support | True | | id | e496ed61-8f2e-436b-b299-32c3e90991cc | | size | 1 | | name | my_share | | share_type | 3b3ee3f7-6e43-4aa1-859d-0b0511c43074 | | has_replicas | False | | replication_type | readable | | created_at | 2016-03-29T20:22:18.000000 | | share_proto | NFS | | project_id | 48a5ca76ac69405e99dc1c13c5195186 | | metadata | {} | +-----------------------------+--------------------------------------------------------------------+ .. note:: When you create a share that supports replication, an ``active`` replica is created for you. You can verify this with the :command:`manila share-replica-list` command. From API version 2.53, when creating a replicated share, the manila quota system will reserve and consume resources for two additional quotas: ``share_replicas`` and ``replica_gigabytes``. Creating and promoting share replicas ------------------------------------- Create a share replica Use the :command:`manila share-replica-create` command to create a share replica. Specify the share ID or name as a parameter. You may optionally provide the `availability_zone` or the `scheduler_hints`. For the scheduler hints, the value of ``only_host`` parameter must be a manila-share service host in ``host@backend#POOL`` format. .. code-block:: console $ manila share-replica-create my_share --az availability_zone_2 --scheduler_hints only_host=openstack4@zfsonlinux_2#beta +-------------------+--------------------------------------+ | Property | Value | +-------------------+--------------------------------------+ | status | creating | | share_id | e496ed61-8f2e-436b-b299-32c3e90991cc | | availability_zone | availability_zone_2 | | created_at | 2016-03-29T20:24:53.148992 | | updated_at | None | | share_network_id | None | | share_server_id | None | | host | | | replica_state | None | | id | 78a5ef96-6c36-42e0-b50b-44efe7c1807e | +-------------------+--------------------------------------+ See details of the newly created share replica .. note:: Since API version 2.51 (Train release), a share network is able to span multiple subnets in different availability zones. So, when using a share type with specification ``driver_handles_share_servers=True``, users must ensure that the share network has a subnet in the availability zone that they desire the share replica to be created in. .. note:: Scheduler hints are available only for API version >= 2.67. Use the :command:`manila share-replica-show` command to see details of the newly created share replica. Specify the share replica's ID as a parameter. .. code-block:: console $ manila share-replica-show 78a5ef96-6c36-42e0-b50b-44efe7c1807e +-------------------+--------------------------------------+ | Property | Value | +-------------------+--------------------------------------+ | status | available | | share_id | e496ed61-8f2e-436b-b299-32c3e90991cc | | availability_zone | availability_zone_2 | | created_at | 2016-03-29T20:24:53.000000 | | updated_at | 2016-03-29T20:24:58.000000 | | share_network_id | None | | share_server_id | None | | host | openstack4@zfsonlinux_2#beta | | replica_state | in_sync | | id | 78a5ef96-6c36-42e0-b50b-44efe7c1807e | +-------------------+--------------------------------------+ See all replicas of the share Use the :command:`manila share-replica-list` command to see all the replicas of the share. Specify the share ID or name as an optional parameter. .. code-block:: console $ manila share-replica-list --share-id my_share +--------------------------------------+-----------+---------------+--------------------------------------+-------------------------------+---------------------+----------------------------+ | ID | Status | Replica State | Share ID | Host | Availability Zone | Updated At | +--------------------------------------+-----------+---------------+--------------------------------------+-------------------------------+---------------------+----------------------------+ | 38efc042-50c2-4825-a6d8-cba2a8277b28 | available | active | e496ed61-8f2e-436b-b299-32c3e90991cc | openstack4@zfsonlinux_1#alpha | availability_zone_1 | 2016-03-29T20:22:19.000000 | | 78a5ef96-6c36-42e0-b50b-44efe7c1807e | available | in_sync | e496ed61-8f2e-436b-b299-32c3e90991cc | openstack4@zfsonlinux_2#beta | availability_zone_2 | 2016-03-29T20:24:58.000000 | +--------------------------------------+-----------+---------------+--------------------------------------+-------------------------------+---------------------+----------------------------+ Promote the secondary share replica to be the new active replica Use the :command:`manila share-replica-promote` command to promote a non-active share replica to become the ``active`` replica. Specify the non-active replica's ID as a parameter. .. code-block:: console $ manila share-replica-promote 78a5ef96-6c36-42e0-b50b-44efe7c1807e .. note:: This command has no output. The promotion may take time. During the promotion, the ``replica_state`` attribute of the share replica being promoted will be set to ``replication_change``. .. code-block:: console $ manila share-replica-list --share-id my_share +--------------------------------------+-----------+--------------------+--------------------------------------+-------------------------------+---------------------+----------------------------+ | ID | Status | Replica State | Share ID | Host | Availability Zone | Updated At | +--------------------------------------+-----------+--------------------+--------------------------------------+-------------------------------+---------------------+----------------------------+ | 38efc042-50c2-4825-a6d8-cba2a8277b28 | available | active | e496ed61-8f2e-436b-b299-32c3e90991cc | openstack4@zfsonlinux_1#alpha | availability_zone_1 | 2016-03-29T20:32:19.000000 | | 78a5ef96-6c36-42e0-b50b-44efe7c1807e | available | replication_change | e496ed61-8f2e-436b-b299-32c3e90991cc | openstack4@zfsonlinux_2#beta | availability_zone_2 | 2016-03-29T20:32:19.000000 | +--------------------------------------+-----------+--------------------+--------------------------------------+-------------------------------+---------------------+----------------------------+ Once the promotion is complete, the ``replica_state`` will be set to ``active``. .. code-block:: console $ manila share-replica-list --share-id my_share +--------------------------------------+-----------+---------------+--------------------------------------+-------------------------------+---------------------+----------------------------+ | ID | Status | Replica State | Share ID | Host | Availability Zone | Updated At | +--------------------------------------+-----------+---------------+--------------------------------------+-------------------------------+---------------------+----------------------------+ | 38efc042-50c2-4825-a6d8-cba2a8277b28 | available | in_sync | e496ed61-8f2e-436b-b299-32c3e90991cc | openstack4@zfsonlinux_1#alpha | availability_zone_1 | 2016-03-29T20:32:19.000000 | | 78a5ef96-6c36-42e0-b50b-44efe7c1807e | available | active | e496ed61-8f2e-436b-b299-32c3e90991cc | openstack4@zfsonlinux_2#beta | availability_zone_2 | 2016-03-29T20:32:19.000000 | +--------------------------------------+-----------+---------------+--------------------------------------+-------------------------------+---------------------+----------------------------+ Access rules ------------ Create an IP access rule for the share Use the :command:`manila access-allow` command to add an access rule. Specify the share ID or name, protocol and the target as parameters. .. code-block:: console $ manila access-allow my_share ip 0.0.0.0/0 --access-level rw +--------------+--------------------------------------+ | Property | Value | +--------------+--------------------------------------+ | share_id | e496ed61-8f2e-436b-b299-32c3e90991cc | | access_type | ip | | access_to | 0.0.0.0/0 | | access_level | rw | | state | new | | id | 8b339cdc-c1e0-448f-bf6d-f068ee6e8f45 | +--------------+--------------------------------------+ .. note:: Access rules are not meant to be different across the replicas of the share. However, as per the type of replication, drivers may choose to modify the access level prescribed. In the above example, even though read/write access was requested for the share, the driver will provide read-only access to the non-active replica to the same target, because of the semantics of the replication type: ``readable``. However, the target will have read/write access to the (currently) non-active replica when it is promoted to become the ``active`` replica. The :command:`manila access-deny` command can be used to remove a previously applied access rule. List the export locations of the share Use the :command:`manila share-export-locations-list` command to list the export locations of a share. .. code-block:: console $ manila share-export-location-list my_share +--------------------------------------+---------------------------------------------------------------------------+-----------+ | ID | Path | Preferred | +--------------------------------------+---------------------------------------------------------------------------+-----------+ | 3ed3fbf5-2fa1-4dc0-8440-a0af72398cb6 | 10.32.62.21:/beta/subdir/manila_share_78a5ef96_6c36_42e0_b50b_44efe7c1807e| False | | 6f843ecd-a7ea-4939-86de-e1e01d9e8672 | 172.21.0.23:/alpha/manila_share_38efc042_50c2_4825_a6d8_cba2a8277b28 | False | | e1d754b5-ec06-42d2-afff-3e98c0013faf | 10.32.62.26:/alpha/manila_share_38efc042_50c2_4825_a6d8_cba2a8277b28 | False | | f3c5585f-c2f7-4264-91a7-a4a1e754e686 | 172.21.0.29:/beta/subdir/manila_share_78a5ef96_6c36_42e0_b50b_44efe7c1807e| False | +--------------------------------------+---------------------------------------------------------------------------+-----------+ Identify the export location corresponding to the share replica on the user accessible network and you may mount it on the target node. .. note:: As an administrator, you can list the export locations for a particular share replica by using the :command:`manila share-instance-export-location-list` command and specifying the share replica's ID as a parameter. Snapshots --------- Create a snapshot of the share Use the :command:`manila snapshot-create` command to create a snapshot of the share. Specify the share ID or name as a parameter. .. code-block:: console $ manila snapshot-create my_share --name "my_snapshot" +-------------------+--------------------------------------+ | Property | Value | +-------------------+--------------------------------------+ | status | creating | | share_id | e496ed61-8f2e-436b-b299-32c3e90991cc | | user_id | 5c7bdb6eb0504d54a619acf8375c08ce | | description | None | | created_at | 2016-03-29T21:14:03.000000 | | share_proto | NFS | | provider_location | None | | id | 06cdccaf-93a0-4e57-9a39-79fb1929c649 | | project_id | cadd7139bc3148b8973df097c0911016 | | size | 1 | | share_size | 1 | | name | my_snapshot | +-------------------+--------------------------------------+ Show the details of the snapshot Use the :command:`manila snapshot-show` to view details of a snapshot. Specify the snapshot ID or name as a parameter. .. code-block:: console $ manila snapshot-show my_snapshot +-------------------+--------------------------------------+ | Property | Value | +-------------------+--------------------------------------+ | status | available | | share_id | e496ed61-8f2e-436b-b299-32c3e90991cc | | user_id | 5c7bdb6eb0504d54a619acf8375c08ce | | description | None | | created_at | 2016-03-29T21:14:03.000000 | | share_proto | NFS | | provider_location | None | | id | 06cdccaf-93a0-4e57-9a39-79fb1929c649 | | project_id | cadd7139bc3148b8973df097c0911016 | | size | 1 | | share_size | 1 | | name | my_snapshot | +-------------------+--------------------------------------+ .. note:: The ``status`` attribute of a snapshot will transition from ``creating`` to ``available`` only when it is present on all the share replicas that have their ``replica_state`` attribute set to ``active`` or ``in_sync``. Likewise, the ``replica_state`` attribute of a share replica will transition from ``out_of_sync`` to ``in_sync`` only when all ``available`` snapshots are present on it. Planned failovers ----------------- As an administrator, you can use the :command:`manila share-replica-resync` command to attempt to sync data between ``active`` and ``non-active`` share replicas of a share before promotion. This will ensure that share replicas have the most up-to-date data and their relationships can be safely switched. .. code-block:: console $ manila share-replica-resync 38efc042-50c2-4825-a6d8-cba2a8277b28 .. note:: This command has no output. Updating attributes ------------------- If an error occurs while updating data or replication relationships (during a ``promotion``), the Shared File Systems service may not be able to determine the consistency or health of a share replica. It may require administrator intervention to make any fixes on the storage backend as necessary. In such a situation, state correction within the Shared File Systems service is possible. As an administrator, you can: Reset the ``status`` attribute of a share replica Use the :command:`manila share-replica-reset-state` command to reset the ``status`` attribute. Specify the share replica's ID as a parameter and use the ``--state`` option to specify the state intended. .. code-block:: console $ manila share-replica-reset-state 38efc042-50c2-4825-a6d8-cba2a8277b28 --state=available .. note:: This command has no output. Reset the ``replica_state`` attribute Use the :command:`manila share-replica-reset-replica-state` command to reset the ``replica_state`` attribute. Specify the share replica's ID and use the ``--state`` option to specify the state intended. .. code-block:: console $ manila share-replica-reset-replica-state 38efc042-50c2-4825-a6d8-cba2a8277b28 --state=out_of_sync .. note:: This command has no output. Force delete a specified share replica in any state Use the :command:`manila share-replica-delete` command with the '--force' key to remove the share replica, regardless of the state it is in. .. code-block:: console $ manila share-replica-show 9513de5d-0384-4528-89fb-957dd9b57680 +-------------------+--------------------------------------+ | Property | Value | +-------------------+--------------------------------------+ | status | error | | share_id | e496ed61-8f2e-436b-b299-32c3e90991cc | | availability_zone | availability_zone_1 | | created_at | 2016-03-30T01:32:47.000000 | | updated_at | 2016-03-30T01:34:25.000000 | | share_network_id | None | | share_server_id | None | | host | openstack4@zfsonlinux_1#alpha | | replica_state | out_of_sync | | id | 38efc042-50c2-4825-a6d8-cba2a8277b28 | +-------------------+--------------------------------------+ $ manila share-replica-delete --force 38efc042-50c2-4825-a6d8-cba2a8277b28 .. note:: This command has no output. Use the ``policy.yaml`` file to grant permissions for these actions to other roles. Deleting share replicas ----------------------- Use the :command:`manila share-replica-delete` command with the share replica's ID to delete a share replica. .. code-block:: console $ manila share-replica-delete 38efc042-50c2-4825-a6d8-cba2a8277b28 .. note:: This command has no output. .. note:: You cannot delete the last ``active`` replica with this command. You should use the :command:`manila delete` command to remove the share. .. [1] When running in a multi-backend configuration, until the Stein release, deployers could only configure one Availability Zone per manila configuration file. This is achieved with the option ``storage_availability_zone`` defined under the ``[DEFAULT]`` section. Beyond the Stein release, the option ``backend_availability_zone`` can be specified in each back end stanza. The value of this configuration option will override any configuration of the ``storage_availability_zone`` from the ``[DEFAULT]`` section. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/admin/shared-file-systems-share-resize.rst0000664000175000017500000001662100000000000025242 0ustar00zuulzuul00000000000000.. _shared_file_systems_share_resize: ============ Resize share ============ For most drivers, resizing the share is safe operation. If you want to be sure that your data is safe, you can make a share back up by creating a snapshot of it. You can extend and shrink the share with the :command:`openstack share resize` command, and specify the share with the new size that does not exceed the quota. For details, see :ref:`Quotas and Limits `. You also cannot shrink share size to 0 or to a greater value than the current share size. .. note:: From API version 2.53, extending a replicated share, manila quota system will reserve and consume resources for two additional quotas: ``share_replicas`` and ``replica_gigabytes``. This request will fail if there is no available quotas to extend the share and all of its share replicas. While extending, the share has an ``extending`` status. This means that the increase share size request was issued successfully. To extend the share and check the result, run: .. code-block:: console $ openstack share resize docs_resize 2 $ openstack share show docs_resize +---------------------------------------+---------------------------------------+ | Property | Value | +---------------------------------------+---------------------------------------+ | id | a3454cf1-bb1d-4e4d-a8e4-a3881c593720 | | size | 2 | | availability_zone | manila-zone-0 | | created_at | 2024-09-26T14:53:18.153832 | | status | extending | | name | docs_resize | | description | None | | project_id | 1f31ee1c3e3c443bbf9aee5684456daa | | snapshot_id | None | | share_network_id | None | | share_proto | NFS | | metadata | {} | | share_type | 303f0a73-711e-4beb-a4f7-a60acc1d588e | | is_public | True | | snapshot_support | True | | task_state | None | | share_type_name | default | | access_rules_status | active | | replication_type | None | | has_replicas | False | | user_id | b47d81c8c8c74ea3a7c13461f30ad5ed | | create_share_from_snapshot_support | True | | revert_to_snapshot_support | False | | share_group_id | None | | source_share_group_snapshot_member_id | None | | mount_snapshot_support | False | | progress | 100% | | is_soft_deleted | False | | scheduled_to_be_deleted_at | None | | source_backup_id | None | | share_server_id | None | | host | host@backend1#poolA | +---------------------------------------+---------------------------------------+ While shrinking, the share has a ``shrinking`` status. This means that the decrease share size request was issued successfully. To shrink the share and check the result, run: .. code-block:: console $ openstack share resize docs_resize 1 $ openstack share show docs_resize +---------------------------------------+---------------------------------------+ | Property | Value | +---------------------------------------+---------------------------------------+ | id | a3454cf1-bb1d-4e4d-a8e4-a3881c593720 | | size | 1 | | availability_zone | manila-zone-0 | | created_at | 2024-09-26T14:53:18.153832 | | status | shrinking | | name | docs_resize | | description | None | | project_id | 1f31ee1c3e3c443bbf9aee5684456daa | | snapshot_id | None | | share_network_id | None | | share_proto | NFS | | metadata | {'__mount_options': 'fs=cephfs'} | | share_type | 303f0a73-711e-4beb-a4f7-a60acc1d588e | | is_public | True | | snapshot_support | True | | task_state | None | | share_type_name | default | | access_rules_status | active | | replication_type | None | | has_replicas | False | | user_id | b47d81c8c8c74ea3a7c13461f30ad5ed | | create_share_from_snapshot_support | True | | revert_to_snapshot_support | False | | share_group_id | None | | source_share_group_snapshot_member_id | None | | mount_snapshot_support | False | | progress | 100% | | is_soft_deleted | False | | scheduled_to_be_deleted_at | None | | source_backup_id | None | | share_server_id | None | | host | host@backend1#poolA | +---------------------------------------+---------------------------------------+ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/admin/shared-file-systems-share-revert-to-snapshot.rst0000664000175000017500000000335600000000000027526 0ustar00zuulzuul00000000000000.. _shared_file_systems_share_revert_to_snapshot: ======================== Share revert to snapshot ======================== To revert a share to the latest available snapshot, use the :command:`manila revert-to-snapshot`. .. note:: - In order to use this feature, the available backend in your deployment must have support for it. The list of backends that support this feature in the manila can be found in the :doc:`share_back_ends_feature_support_mapping`. - This feature is only available in API version 2.27 and beyond. To create shares that are revertible, the share type used must contain the extra-spec ``revert_to_snapshot_support`` set to ``True``. The default value for this is ``False``. - The revert operation can only be performed to the most recent available snapshot of the share known to manila. If revert to an earlier snapshot is desired, later snapshots must explicitly be deleted. In order to determine the most recent snapshot, the ``created_at`` field on the snapshot object is used. While reverting, the share is in ``reverting`` status and the snapshot is in ``restoring`` status. After a successful restoration, the share and snapshot states will again be set to ``available``. If the restoration fails the share will be set to ``reverting_error`` state and the snapshot will be set to ``available``. When a replicated share is reverted, the share becomes ready to be used only when all ``active`` replicas have been reverted. All secondary replicas will remain in ``out-of-sync`` state until they are consistent with the ``active`` replicas. To revert a share to a snapshot, run: .. code-block:: console $ manila revert-to-snapshot 14ee8575-aac2-44af-8392-d9c9d344f392 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/admin/shared-file-systems-share-server-management.rst0000664000175000017500000002214600000000000027360 0ustar00zuulzuul00000000000000.. _shared_file_systems_share_server_management: ============= Share servers ============= A share server is a resource created by the Shared File Systems service when the driver is operating in the `driver_handles_share_servers = True` mode. A share server exports users' shares, manages their exports and access rules. Share servers are abstracted away from end users. Drivers operating in `driver_handles_share_servers = True` mode manage the lifecycle of these share servers automatically. Administrators can however remove the share servers from the management of the Shared File Systems service without destroying them. They can also bring in existing share servers under the Shared File Systems service. They can list all available share servers and update their status attribute. They can delete an specific share server if it has no dependent shares. ======================= Share server management ======================= To ``manage`` a share server means that when the driver is operating in the ``driver_handles_share_servers = True`` mode, the administrator can bring a pre-existing share server under the management of the Shared File Systems service. To ``unmanage`` means that the administrator is able to unregister an existing share server from the Shared File Systems service without deleting it from the storage back end. To be unmanaged, the referred share server cannot have any shares known to the Shared File Systems service. Manage a share server --------------------- To bring a share server under the Shared File System service, use the :command:`manila share-server-manage` command: .. code-block:: console manila share-server-manage [--driver_options [ [ ...]]] [--share_network_subnet ]] The positional arguments are: - host. The manage-share service host in ``host@backend`` format, which consists of the host name for the back end and the name of the back end. - share_network. The share network where the share server is contained. - identifier. The identifier of the share server on the back end storage. The ``driver_options`` is an optional set of one or more driver-specific metadata items as key and value pairs. The specific key-value pairs necessary vary from driver to driver. Consult the driver-specific documentation to determine if any specific parameters must be supplied. Ensure that the share type has the ``driver_handles_share_servers = True`` extra-spec. The ``share_network_subnet`` is an optional parameter which was introduced in Train release. Due to a change in the share networks structure, a share network no longer contains the following attributes: ``neutron_net_id``, ``neutron_subnet_id``, ``gateway``, ``mtu``, ``network_type``, ``ip_version``, ``segmentation_id``. These attributes now pertain to the share network subnet entity, and a share network can span multiple share network subnets in different availability zones. If you do not specify a share network subnet, the Shared File Systems Service will choose the default one (which does not pertain to any availability zone). If using an OpenStack Networking (Neutron) based plugin, ensure that: - There are some ports created, which correspond to the share server interfaces. - The correct IP addresses are allocated to these ports. - ``manila:share`` is set as the owner of these ports. To manage a share server, run: .. code-block:: console $ manila share-server-manage \ manila@paris \ share_net_test \ backend_server_1 \ +--------------------+------------------------------------------+ | Property | Value | +--------------------+------------------------------------------+ | id | 441d806f-f0e0-4c90-b7e2-a553c6aa76b2 | | project_id | 907004508ef4447397ce6741a8f037c1 | | updated_at | None | | status | manage_starting | | host | manila@paris | | share_network_name | share_net_test | | share_network_id | c895fe26-92be-4152-9e6c-f2ad230efb13 | | created_at | 2019-04-25T18:25:23.000000 | | backend_details | {} | | is_auto_deletable | False | | identifier | backend_server_1 | +--------------------+------------------------------------------+ .. note:: The ``is_auto_deletable`` property is used by the Shared File Systems service to identify a share server that can be deleted by internal routines. The service can automatically delete share servers if there are no shares associated with them. To delete a share server when the last share is deleted, set the option: ``delete_share_server_with_last_share``. If a scheduled cleanup is desired instead, ``automatic_share_server_cleanup`` and ``unused_share_server_cleanup_interval`` options can be set. Only one of the cleanup methods can be used at one time. Any share server that has a share unmanaged from it cannot be automatically deleted by the Shared File Systems service. The same is true for share servers that have been managed into the service. Cloud administrators can delete such share servers manually if desired. Unmanage a share server ----------------------- To ``unmanage`` a share server, run :command:`manila share-server-unmanage `. .. code-block:: console $ manila share-server-unmanage 441d806f-f0e0-4c90-b7e2-a553c6aa76b2 $ manila share-server-show 441d806f-f0e0-4c90-b7e2-a553c6aa76b2 ERROR: Share server 441d806f-f0e0-4c90-b7e2-a553c6aa76b2 could not be found. Reset the share server state ---------------------------- As administrator you are able to reset a share server state. To reset the state of a share server, run :command:`manila share-server-reset-state --state `. The positional arguments are: - share-server. The share server name or id. - state. The state to be assigned to the share server. The options are: - ``active`` - ``error`` - ``deleting`` - ``creating`` - ``managing`` - ``unmanaging`` - ``unmanage_error`` - ``manage_error`` List share servers ------------------ To list share servers, run :command:`manila share-server-list` command: .. code-block:: console manila share-server-list [--host ] [--status ] [--share-network ] [--project-id ] [--columns ] All the arguments above are optional. They can ben used to filter share servers. The options to filter: - host. Shows all the share servers pertaining to the specified host. - status. Shows all the share servers that are in the specified status. - share_network. Shows all the share servers that pertain in the same share network. - project_id. Shows all the share servers pertaining to the same project. - columns. The administrator specifies which columns to display in the result of the list operation. .. code-block:: console $ manila share-server-list +--------------------------------------+--------------+--------+----------------+----------------------------------+------------+ | Id | Host | Status | Share Network | Project Id | Updated_at | +--------------------------------------+--------------+--------+----------------+----------------------------------+------------+ | 441d806f-f0e0-4c90-b7e2-a553c6aa76b2 | manila@paris | active | share_net_test | fd6d30efa5ff4c99834dc0d13f96e8eb | None | +--------------------------------------+--------------+--------+----------------+----------------------------------+------------+ =========================================== Share server limits (Since Wallaby release) =========================================== Since Wallaby release, it is possible to specify limits for share servers size and amount of instances. It helps administrators to provision their resources in the cloud system and balance the share servers' size. If a value is not configured, there is no behavioral change and manila will consider it as unlimited. Then, will reuse share servers regardless their size and amount of built instances. - ``max_share_server_size``: Maximum sum of gigabytes a share server can have considering all its share instances and snapshots. - ``max_shares_per_share_server``: Maximum number of share instances created in a share server. .. note:: If one of these limits is reached during a request that requires a share server to be provided, manila will create a new share server to place such request. .. note:: The limits can be ignored when placing a new share created from parent snapshot in the same host as the parent. For this scenario, the share server must be the same, so it does not take the limit in account, reusing the share server anyway. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/admin/shared-file-systems-share-server-migration.rst0000664000175000017500000004652200000000000027241 0ustar00zuulzuul00000000000000.. _shared_file_systems_share_server_migration: ====================== Share server migration ====================== Share server migration is a functionality that lets administrators migrate a share server, and all its shares and snapshots, to a new destination. As with share migration, a 2-phase approach was implemented for share server migration, which allows to control the right time to complete the operation, that usually ends on clients disruption. The process of migrating a share server involves different operations over the share server, but can be achieved by invoking two main operations: "start" and "complete". You'll need to begin with the "start" operation and wait until the service has completed the first phase of the migration to call the "complete" operation. When a share server is undergoing the first phase, it's possible to choose to "cancel" it, or get a report of the progress. A new operation called "migration check" is available to assist on a pre-migration phase, by validating within the destination host if the migration can or not be completed, providing an output with the compatible capabilities supported by the driver. Share server migration is driven by share drivers, which means that both source and destination backends must support this functionality, and the driver must provide such operation in an efficient way. Server migration workflows ~~~~~~~~~~~~~~~~~~~~~~~~~~ Before actually starting the migration, you can use the operation :ref:`migration_check ` to verify if the destination host and the requested capabilities are supported by the driver. If the answer is ``compatible`` equal to ``True``, you can proceed with the migration process, otherwise you'll need to identify the conflicting parameters or, in more complex scenarios, search for messages directly in the manila logs. The available capabilities are: ``writable``, ``nondisruptive``, ``preserve_snapshots`` and ``new_share_network_id``, which are detailed in :ref:`shared_file_systems_share_server_migration_parameters`. The migration process starts by invoking the :ref:`migration_start ` operation for a given share server. This operation will start the first phase of the migration that copies all data, from source to destination, including all shares, their access rules and even snapshots if supported by the driver controlling the destination host. For all ongoing migrations, you can optionally request the current status of a share server migration using :ref:`migration_get_progress ` operation to retrieve the total progress of the data copy and its current task state. If supported by the driver, you can also cancel this operation by issuing :ref:`migration_cancel ` and wait until all status become ``active`` and ``available`` again. After completing the data copy, the first phase is completed and the next operation, :ref:`migration_complete `, can be initiated to finish the migration. The :ref:`migration_complete ` operation usually disrupts clients access, since the export locations of the shares will change. The new export locations will be derived from the new share server that is provisioned at the destination, which is instantiated with distinct network allocations. A new field ``task_state`` is available in the share server model to help track which operation is being executed during this process. The following tables show, for each phase, the expected ``task_state``, along with their order of execution and a brief description of the actions that are being executed in the back end. .. table:: **Share server migration states - 1st phase** ============ ================================ ======================================================================================================================================================= Sequence *task_state* Description ============ ================================ ======================================================================================================================================================= 1 migration_starting All initial validations passed, all shares and snapshots can't be modified until the end of the migration. 2 migration_in_progress The destination host started the process of migration. If the driver doesn't support remain ``writable``, all access rules are modified to read only. 3 migration_driver_starting The driver was called to initiate the process of migrating the share server. Manila will wait for driver's answer. 4 migration_driver_in_progress The driver accepted the request and started copying the data to the new share server. It will remain in this state until the end of the data copy. 5 migration_driver_phase1_done Driver finished copying the data and it's ready to complete the migration. ============ ================================ ======================================================================================================================================================= Along with the share server migration progress (in percentage) and the the current task state, the API also provides the destination share server ID. Alternatively, you may check the destination share server ID by querying the share server for a ``source_share_server_id`` set to the ID of the share server being migrated. During the entire migration process, the source source share server will remain with ``server_migrating`` status while the destination share server will remain with ``server_migrating_to`` status. If an error occurs during the 1st phase of the migration, the source share server has its status reverted to ``active`` again, while the destination server has its status set to ``error``. Both share servers will have their ``task_state`` updated to ``migration_error``. All shares and snapshots are updated to ``available`` and any ``read-only`` rules are reset to allow writing into the shares. .. table:: **Share server migration states - 2nd phase** ============ ================================ ======================================================================================================================== Sequence *task_state* Description ============ ================================ ======================================================================================================================== 1 migration_completing The destination host started processing the operation and the driver is called to complete the share server migration. 2 migration_success The migration was completed with success. All shares and snapshots are ``available`` again. ============ ================================ ======================================================================================================================== After finishing the share server migration, all shares and snapshots have their status updated to ``available``. The source share server status is set to ``inactive`` and the destination share server to ``active``. If an error occurs during the 2nd phase of the migration, both source and destination share servers will have their status updated to ``error``, along with their shares and snapshots, since it's not possible to infer if they are working properly and the current status of the migration. In this scenario, you will need to manually verify the health of all share server's resources and manually fix their statuses. Both share servers will have their ``task_state`` set to ``migration_error``. .. table:: **Share server migration states - migration cancel** ============ ================================ ========================================================================================================================================= Sequence *task_state* Description ============ ================================ ========================================================================================================================================= 1 migration_cancel_in_progress The destination host started the cancel process. It will remain in this state until the driver finishes all tasks that are in progress. 2 migration_cancelled The migration was successfully cancelled. ============ ================================ ========================================================================================================================================= If an error occurs during the migration cancel operation, the source share server has its status reverted to ``active`` again, while the destination server has its status updated to ``error``. Both share servers will have their ``task_state`` set to ``migration_error``. All shares and snapshots have their statuses updated to ``available``. Using share server migration CLI ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The available commands to interact with the share server migration API are the following: .. _share_server_migration_check_cli: * ``migration_check``: call a migration check operation to validate if the provided destination host is compatible with the requested operation and its parameters. The output shows if the destination host is compatible or not and the migration capabilities supported by the back end. .. code-block:: console $ manila share-server-migration-check f3089d4f-89e8-4730-b6e6-7cab553df071 stack@dummy2 --nondisruptive False --writable True --preserve_snapshots True +------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | Property | Value | +------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | compatible | True | | requested_capabilities | {'writable': 'True', 'nondisruptive': 'False', 'preserve_snapshots': 'True', 'share_network_id': None, 'host': 'stack@dummy2'} | | supported_capabilities | {'writable': True, 'nondisruptive': False, 'preserve_snapshots': True, 'share_network_id': 'ac8e103f-c21a-4442-bddc-fdadee093099', 'migration_cancel': True, 'migration_get_progress': True} | +------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ The ``share_network_id`` attribute in the ``supported_capabilities`` will correspond to the value ``--new_share_network`` option if provided, otherwise it will be the same as the source share network. In the output it is possible to identify if the destination host supports the ``migration_cancel`` and ``migration_get_progress`` operations before starting the migration. The request parameters are the same for both ``migration_check`` and ``migration_start`` operations and are detailed in the following section. .. note:: Back ends might use this operation to do many other validations with regards of storage compatibility, free space checks, share-type extra-specs validations, and so on. A ``compatible`` equal to ``False`` answer may not carry the actual conflict. You must check the ``manila-share`` logs for more details. .. _share_server_migration_start_cli: * ``migration_start``: starts a share server migration to the provided destination host. This command starts the 1st phase of the migration that is an asynchronous operation and can take long to finish, depending on the size of the share server and the efficiency of the storage on copying all the data. .. code-block:: console $ manila share-server-migration-start f3089d4f-89e8-4730-b6e6-7cab553df071 stack@dummy2 --nondisruptive False --writable True --preserve_snapshots True The parameters description is detailed in the following section. .. note:: This operation doesn't support migrating share servers with shares that have replicas or that belong to share groups. .. note:: The current migration state and progress can be retrieve using the ``migration-get-progress`` command. .. note:: This command has no output. .. _share_server_migration_complete_cli: * ``migration_complete``: completes a migration that already finished the 1st phase. This operation can't be cancelled and might end up on disrupting clients' access after all shares migrate to the new share server. .. code-block:: console $ manila share-server-migration-complete f3089d4f-89e8-4730-b6e6-7cab553df071 +-----------------------------+--------------------------------------+ | Property | Value | +-----------------------------+--------------------------------------+ | destination_share_server_id | f3fb808f-c2a4-4caa-9805-7caaf55c0522 | +-----------------------------+--------------------------------------+ .. _share_server_migration_cancel_cli: * ``migration_cancel``: cancels an in-progress share server migration. This operation can only be started while the migration is still on the 1st phase of the migration. .. code-block:: console $ manila share-server-migration-cancel f3089d4f-89e8-4730-b6e6-7cab553df071 .. note:: This command has no output. .. _share_server_migration_get_progress_cli: * ``migration_get_progress``: obtains the current progress information of a share server migration. .. code-block:: console $ manila share-server-migration-get-progress f3089d4f-89e8-4730-b6e6-7cab553df071 +-----------------------------+--------------------------------------+ | Property | Value | +-----------------------------+--------------------------------------+ | total_progress | 50 | | task_state | migration_driver_in_progress | | destination_share_server_id | f3fb808f-c2a4-4caa-9805-7caaf55c0522 | +-----------------------------+--------------------------------------+ .. _shared_file_systems_share_server_migration_parameters: Migration check and migration start parameters ---------------------------------------------- Share server :ref:`migration_check ` and :ref:`migration_start ` operations have specific parameters that have the semantic detailed below. From these, only ``new_share_network`` stands as an optional parameter. * ``share_server_id``: The ID of the share server that will be migrated. * ``destination_host``: The destination host to which the share server should be migrated to, in format ``host@backend``. * ``preserve_snapshots``: enforces when the preservation of snapshots is mandatory for the requested migration. If the destination host doesn't support it, the operation will be denied. If this parameter is set to ``False``, it will be the driver's supported capability that will define if the snapshots will be preserved or not. .. note:: If the driver doesn't support preserving snapshots but at least one share has a snapshot, the operation will fail and the you will need to manually remove the remaining snapshots before proceeding. * ``writable``: enforces whether the source share server should remain writable for the requested migration. If the destination host doesn't support it, the operation will be denied. If this parameter is set to ``False``, it will be the driver's supported capability that will define if all shares will remain writable or not. * ``nondisruptive``: enforces whether the migration should keep clients connected throughout the migration process. If the destination host doesn't support it, the operation will be denied. If this parameter is set to ``False``, it will be the driver's supported capability that will define if all clients will remain connected or not. In order to appropriately move a share server to a different host, it may be required to change the destination share network to be used by the new share server. In this case, a new share network can be provided using the following optional parameter: * ``new_share_network_id``: specifies the ID of the share network that should be used when setting up the new share server. .. note:: It is not possible to choose the destination share network subnet since it will be automatically selected according to the destination host's availability zone. If the new share network doesn't have a share network subnet in the destination host's availability zone or doesn't have a default subnet, the operation will fail. Configuration ~~~~~~~~~~~~~ For share server migration to work it is necessary to have compatible back end stanzas present in the manila configuration of all ``manila-share`` nodes. Some drivers may provide some driver-specific configuration options that can be changed to adapt to specific workload. Check :ref:`share_drivers` documentation for more details. Important notes ~~~~~~~~~~~~~~~ * Once the migration of a share server has started, the user will see that the status of all associated resources change to ``server_migrating`` and this will block any other share actions, such as adding or removing access rules, creating or deleting snapshots, resizing, among others. * Since this is a driver-assisted migration, there is no guarantee that the destination share server will be cleaned up after a migration failure. For this reason, the destination share server will be always updated to ``error`` if any failure occurs. The same assumption is made for a source share server after a successful migration, where manila updates its status to ``inactive`` to avoid being reused for new shares. * If a failure occurs during the 2nd phase of the migration, you will need to manually identify the current status of the source share server in order to revert it back to ``active`` again. If the share server and all its resources remain healthy, you will need to reset the status using ``reset_status`` API for each affected resource. * Each step in the migration process is saved to the field ``task_state`` present in the share server model. If for any reason the state is not set to ``migration_error`` after a failure, it will need to be reset using the ``reset_task_state`` API, to unlock new share actions. * After a failure occurs, the destination share server will have its status updated to ``error`` and will continue pointing to the original source share server. This can help you to identify the failed share servers when running multiple migrations in parallel. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/admin/shared-file-systems-share-types.rst0000664000175000017500000003424400000000000025106 0ustar00zuulzuul00000000000000.. _shared_file_systems_share_types: =========== Share types =========== The Shared File System service back-end storage drivers offer a wide range of capabilities. The variation in these capabilities allows cloud administrators to provide a storage service catalog to their end users. Share types can be used to create this storage service catalog. Cloud administrators can influence provisioning of users' shares with the help of Share types. All shares are associated with a share type. Share types are akin to ``flavors`` in the OpenStack Compute service (nova), or ``volume types`` in the OpenStack Block Storage service (cinder), or ``storage classes`` in Kubernetes. You can allow a share type to be accessible to all users in your cloud if you wish. You can also create private share types that allow only users belonging to certain OpenStack projects to access them. You can have an unlimited number of share types in your cloud, but for practical purposes, you may want to create only a handful of publicly accessible share types. Each share type is an object that encompasses ``extra-specs`` (extra specifications). These extra-specs can map to storage back-end capabilities, or can be directives to the service. Consider for example, offering three share types in your cloud to map to "service levels": +--------+--------------------------------------------------------------------------------------------------+ | Type | Capabilities/Instructions | +========+==================================================================================================+ | Gold | Allow creating snapshots, reverting to snapshots and share replication, "thick" provision shares | +--------+--------------------------------------------------------------------------------------------------+ | Silver | Allow creating snapshots, "thin" provision shares | +--------+--------------------------------------------------------------------------------------------------+ | Bronze | Don't allow creating snapshots, "thin" provision shares | +--------+--------------------------------------------------------------------------------------------------+ Capabilities or instructions such as the ones above are coded as extra-specs that your users and the Shared File System service understand. Users in OpenStack projects can see all public share types along with private share types that are made accessible to them. Not all extra-specs that you configure in a share type are visible to your users. This design helps preserve the cloud abstraction. Along with the share type names, they can see the share type descriptions and "tenant-visible" extra-specs. For more details on extra-specs, see :ref:`capabilities_and_extra_specs`. The Shared File Systems service also allows using quota controls with share types. Quotas can help you maintain your SLAs by limiting the number of consumable resources or aid in billing. See :ref:`shared_file_systems_quotas` for more details. Driver Handles Share Servers (DHSS) ----------------------------------- To provide secure and hard multi-tenancy on the network data path, the Shared File Systems service allows users to use their own "share networks". When shares are created on a share network, users can be sure they have their own isolated "share servers" that export their shares on the share network that have the ability plug into user-determined authentication domains ("security services"). Not all Shared File System service storage drivers support share networks. Those that do assert the capability ``driver_handles_share_servers=True``. When creating a share type, you are *required* to set an extra-spec that matches this capability. It is visible to end users. Default Share Type ------------------ When you are operating a cloud where all your tenants are trusted, you may want to create a "default" share type that applies to all of them. It simplifies share creation for your end users since they don't need to worry about share types. Use of a default share type is not recommended in a multi-tenant cloud where you may want to separate your user workloads, or offer different service capabilities. In such instances, you must always encourage your users to specify a share type at share creation time, and not rely on the default share type. .. important:: If you do not create and configure a default share type, users *must* specify a valid share type during share creation, or share creation requests will fail. To configure the default share type, edit the ``manila.conf`` file, and set the configuration option [DEFAULT]/default_share_type. You must then create a share type, using :command:`manila type-create`: .. code-block:: console manila type-create [--is_public ] [--description ] [--extra-specs ] where: - ``name`` is the share type name - ``is_public`` defines the visibility for the share type (true/false) - ``description`` is a free form text field to describe the characteristics of the share type for your users' benefit - ``extra-specs`` defines a comma separated set of key=value pairs of optional extra specifications - ``spec_driver_handles_share_servers`` is the mandatory extra-spec (true/false) Share type operations --------------------- To create a new share type you need to specify the name of the new share type. You also require an extra spec ``driver_handles_share_servers``. The new share type can be public or private. .. code-block:: console $ manila manila type-create default-shares False \ --description "Default share type for the cloud, no fancy capabilities" $ manila type-list +--------------------------------------+-----------------------------------+------------+------------+--------------------------------------+-------------------------------------------+---------------------------------------------------------+ | ID | Name | visibility | is_default | required_extra_specs | optional_extra_specs | Description | +--------------------------------------+-----------------------------------+------------+------------+--------------------------------------+-------------------------------------------+---------------------------------------------------------+ | cf1f92ec-4d0a-4b79-8f18-6bb82c22840a | default-shares | public | - | driver_handles_share_servers : False | | Default share type for the cloud, no fancy capabilities | +--------------------------------------+-----------------------------------+------------+------------+--------------------------------------+-------------------------------------------+---------------------------------------------------------+ $ manila type-show default-shares +----------------------+---------------------------------------------------------+ | Property | Value | +----------------------+---------------------------------------------------------+ | id | cf1f92ec-4d0a-4b79-8f18-6bb82c22840a | | name | default-shares | | visibility | public | | is_default | NO | | description | Default share type for the cloud, no fancy capabilities | | required_extra_specs | driver_handles_share_servers : False | | optional_extra_specs | | +----------------------+---------------------------------------------------------+ You did not provide optional capabilities, so they are all *assumed to be off by default*. So, Non-privileged users see some tenant-visible capabilities explicitly. .. code-block:: console $ source demorc $ manila type-list +--------------------------------------+-----------------------------------+------------+------------+--------------------------------------+--------------------------------------------+---------------------------------------------------------+ | ID | Name | visibility | is_default | required_extra_specs | optional_extra_specs | Description | +--------------------------------------+-----------------------------------+------------+------------+--------------------------------------+--------------------------------------------+---------------------------------------------------------+ | cf1f92ec-4d0a-4b79-8f18-6bb82c22840a | default-shares | public | - | driver_handles_share_servers : False | snapshot_support : False | Default share type for the cloud, no fancy capabilities | +--------------------------------------+-----------------------------------+------------+------------+--------------------------------------+--------------------------------------------+---------------------------------------------------------+ $ manila type-show default-shares +----------------------+---------------------------------------------------------+ | Property | Value | +----------------------+---------------------------------------------------------+ | id | cf1f92ec-4d0a-4b79-8f18-6bb82c22840a | | name | default-shares | | visibility | public | | is_default | NO | | description | Default share type for the cloud, no fancy capabilities | | required_extra_specs | driver_handles_share_servers : False | | optional_extra_specs | snapshot_support : False | | | create_share_from_snapshot_support : False | | | revert_to_snapshot_support : False | | | mount_snapshot_support : False | +----------------------+---------------------------------------------------------+ You can set or unset extra specifications for a share type using **manila type-key set ** command. .. code-block:: console $ manila type-key default-shares set snapshot_support=True $ manila type-show default-shares +----------------------+---------------------------------------------------------+ | Property | Value | +----------------------+---------------------------------------------------------+ | id | cf1f92ec-4d0a-4b79-8f18-6bb82c22840a | | name | default-shares | | visibility | public | | is_default | NO | | description | Default share type for the cloud, no fancy capabilities | | required_extra_specs | driver_handles_share_servers : False | | optional_extra_specs | snapshot_support : True | +----------------------+---------------------------------------------------------+ Use :command:`manila type-key unset ` to unset an extra specification. A share type can be deleted with the :command:`manila type-delete ` command. However, a share type can only be deleted if there are no shares, share groups or share group types associated with the share type. .. _share_type_access: Share type access control ------------------------- You can provide access, revoke access, and retrieve list of allowed projects for a specified private share. Create a private type: .. code-block:: console $ manila type-create my_type1 True \ --is_public False \ --extra-specs snapshot_support=True +----------------------+--------------------------------------+ | Property | Value | +----------------------+--------------------------------------+ | required_extra_specs | driver_handles_share_servers : True | | Name | my_type1 | | Visibility | private | | is_default | - | | ID | 06793be5-9a79-4516-89fe-61188cad4d6c | | optional_extra_specs | snapshot_support : True | +----------------------+--------------------------------------+ .. note:: If you run :command:`manila type-list` only public share types appear. To see private share types, run :command:`manila type-list --all``. Grant access to created private type for a demo and alt_demo projects by providing their IDs: .. code-block:: console $ manila type-access-add my_type1 d8f9af6915404114ae4f30668a4f5ba7 $ manila type-access-add my_type1 e4970f57f1824faab2701db61ee7efdf To view information about access for a private share, type ``my_type1``: .. code-block:: console $ manila type-access-list my_type1 +----------------------------------+ | Project_ID | +----------------------------------+ | d8f9af6915404114ae4f30668a4f5ba7 | | e4970f57f1824faab2701db61ee7efdf | +----------------------------------+ After granting access to the share, the users in the allowed projects can see the share type and use it to create shares. To deny access for a specified project, use :command:`manila type-access-remove ` command. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/admin/shared-file-systems-snapshots.rst0000664000175000017500000002201300000000000024653 0ustar00zuulzuul00000000000000.. _shared_file_systems_snapshots: =============== Share snapshots =============== The Shared File Systems service provides a snapshot mechanism to help users restore data by running the :command:`manila snapshot-create` command. To export a snapshot, create a share from it, then mount the new share to an instance. Copy files from the attached share into the archive. To import a snapshot, create a new share with appropriate size, attach it to instance, and then copy a file from the archive to the attached file system. .. note:: You cannot delete a share while it has saved dependent snapshots. Create a snapshot from the share: .. code-block:: console $ manila snapshot-create Share1 --name Snapshot1 --description "Snapshot of Share1" +-------------+--------------------------------------+ | Property | Value | +-------------+--------------------------------------+ | status | creating | | share_id | aca648eb-8c03-4394-a5cc-755066b7eb66 | | user_id | 5c7bdb6eb0504d54a619acf8375c08ce | | description | Snapshot of Share1 | | created_at | 2015-09-25T05:27:38.000000 | | size | 1 | | share_proto | NFS | | id | 962e8126-35c3-47bb-8c00-f0ee37f42ddd | | project_id | cadd7139bc3148b8973df097c0911016 | | share_size | 1 | | name | Snapshot1 | +-------------+--------------------------------------+ Update snapshot name or description if needed: .. code-block:: console $ manila snapshot-rename Snapshot1 Snapshot_1 --description "Snapshot of Share1. Updated." Check that status of a snapshot is ``available``: .. code-block:: console $ manila snapshot-show Snapshot1 +-------------+--------------------------------------+ | Property | Value | +-------------+--------------------------------------+ | status | available | | share_id | aca648eb-8c03-4394-a5cc-755066b7eb66 | | user_id | 5c7bdb6eb0504d54a619acf8375c08ce | | name | Snapshot1 | | created_at | 2015-09-25T05:27:38.000000 | | share_proto | NFS | | id | 962e8126-35c3-47bb-8c00-f0ee37f42ddd | | project_id | cadd7139bc3148b8973df097c0911016 | | size | 1 | | share_size | 1 | | description | Snapshot of Share1 | +-------------+--------------------------------------+ To create a copy of your data from a snapshot, use :command:`manila create` with key ``--snapshot-id``. This creates a new share from an existing snapshot. Create a share from a snapshot and check whether it is available: .. code-block:: console $ manila create nfs 1 --name Share2 --metadata source=snapshot --description "Share from a snapshot." --snapshot-id 962e8126-35c3-47bb-8c00-f0ee37f42ddd +-----------------------------+--------------------------------------+ | Property | Value | +-----------------------------+--------------------------------------+ | status | None | | share_type_name | default | | description | Share from a snapshot. | | availability_zone | None | | share_network_id | None | | export_locations | [] | | share_server_id | None | | share_group_id | None | | host | None | | snapshot_id | 962e8126-35c3-47bb-8c00-f0ee37f42ddd | | is_public | False | | task_state | None | | snapshot_support | True | | id | b6b0617c-ea51-4450-848e-e7cff69238c7 | | size | 1 | | name | Share2 | | share_type | c0086582-30a6-4060-b096-a42ec9d66b86 | | created_at | 2015-09-25T06:25:50.240417 | | export_location | None | | share_proto | NFS | | project_id | 20787a7ba11946adad976463b57d8a2f | | metadata | {u'source': u'snapshot'} | +-----------------------------+--------------------------------------+ $ manila show Share2 +-----------------------------+-------------------------------------------+ | Property | Value | +-----------------------------+-------------------------------------------+ | status | available | | share_type_name | default | | description | Share from a snapshot. | | availability_zone | nova | | share_network_id | 5c3cbabb-f4da-465f-bc7f-fadbe047b85a | | export_locations | 10.254.0.3:/shares/share-1dc2a471-3d47-...| | share_server_id | 41b7829d-7f6b-4c96-aea5-d106c2959961 | | share_group_id | None | | host | manila@generic1#GENERIC1 | | snapshot_id | 962e8126-35c3-47bb-8c00-f0ee37f42ddd | | is_public | False | | task_state | None | | snapshot_support | True | | id | b6b0617c-ea51-4450-848e-e7cff69238c7 | | size | 1 | | name | Share2 | | share_type | c0086582-30a6-4060-b096-a42ec9d66b86 | | created_at | 2015-09-25T06:25:50.000000 | | share_proto | NFS | | project_id | 20787a7ba11946adad976463b57d8a2f | | metadata | {u'source': u'snapshot'} | +-----------------------------+-------------------------------------------+ By default, the Shared File Systems service will place the new share in the source share's pool, unless a different destination availability zone is provided by the user, using the key ``--availability-zone``. Starting from Ussuri release, a new filter and weigher were added to the scheduler to enhance the selection of a destination pool when creating shares from snapshot. Drivers that support creating shares from snapshots across back ends also need the back end configuration option ``replication_domain`` to be specified. This option can be an arbitrary string. As an administrator, you are expected to determine which back ends are compatible to copy data between each other. Once you have identified these back ends, configure ``replication_domain`` in their respective configuration sections to the same string. Refer to the :ref:`feature support mapping ` for identifying which back ends support this feature. The use of scheduler when creating share from a snapshot must be enabled using the configuration flag ``[DEFAULT]/use_scheduler_creating_share_from_snapshot``. This option is disabled by default. .. note:: When combining both ``--snapshot-id`` and ``--availability-zone`` keys, you'll need to make sure that the configuration flag ``[DEFAULT]/use_scheduler_creating_share_from_snapshot`` is enabled, or the operation will be denied when source and destination availability zones are different. You can soft-delete a snapshot using :command:`manila snapshot-delete `. If a snapshot is in busy state, and during the delete an ``error_deleting`` status appeared, administrator can force-delete it or explicitly reset the state. Use :command:`snapshot-reset-state [--state ] ` to update the state of a snapshot explicitly. A valid value of a status are ``available``, ``error``, ``creating``, ``deleting``, ``error_deleting``. If no state is provided, the ``available`` state will be used. Use :command:`manila snapshot-force-delete ` to force-delete a specified share snapshot in any state. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/admin/shared-file-systems-troubleshoot.rst0000664000175000017500000000605700000000000025374 0ustar00zuulzuul00000000000000.. _shared_file_systems_troubleshoot: ======================================== Troubleshoot Shared File Systems service ======================================== Failures in Share File Systems service during a share creation ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Problem ------- New shares can enter ``error`` state during the creation process. Solution -------- #. Make sure, that share services are running in debug mode. If the debug mode is not set, you will not get any tips from logs how to fix your issue. #. Find what share service holds a specified share. To do that, run command :command:`manila show ` and find a share host in the output. Host uniquely identifies what share service holds the broken share. #. Look thought logs of this share service. Usually, it can be found at ``/etc/var/log/manila-share.log``. This log should contain kind of traceback with extra information to help you to find the origin of issues. No valid host was found ~~~~~~~~~~~~~~~~~~~~~~~ Problem ------- If a share type contains invalid extra specs, the scheduler will not be able to locate a valid host for the shares. Solution -------- To diagnose this issue, make sure that scheduler service is running in debug mode. Try to create a new share and look for message ``Failed to schedule create_share: No valid host was found.`` in ``/etc/var/log/manila-scheduler.log``. To solve this issue look carefully through the list of extra specs in the share type, and the list of share services reported capabilities. Make sure that extra specs are pointed in the right way. Created share is unreachable ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Problem ------- By default, a new share does not have any active access rules. Solution -------- To provide access to new share, you need to create appropriate access rule with the right value. The value must defines access. Service becomes unavailable after upgrade ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Problem ------- After upgrading the Shared File Systems service from version v1 to version v2.x, you must update the service endpoint in the OpenStack Identity service. Otherwise, the service may become unavailable. Solution -------- #. To get the service type related to the Shared File Systems service, run: .. code-block:: console # openstack endpoint list # openstack endpoint show You will get the endpoints expected from running the Shared File Systems service. #. Make sure that these endpoints are updated. Otherwise, delete the outdated endpoints and create new ones. Failures during management of internal resources ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Problem ------- The Shared File System service manages internal resources effectively. Administrators may need to manually adjust internal resources to handle failures. Solution -------- Some drivers in the Shared File Systems service can create service entities, like servers and networks. If it is necessary, you can log in to project ``service`` and take manual control over it. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/admin/shared-file-systems-upgrades.rst0000664000175000017500000001365400000000000024456 0ustar00zuulzuul00000000000000Upgrading the Shared File System service ======================================== This document outlines steps and notes for operators for reference when upgrading their Shared File System service (manila) from previous versions of OpenStack. The service aims to provide a minimal downtime upgrade experience. Since the service does not operate in the data plane, the accessibility of any provisioned resources such as shares, share snapshots, share groups, share replicas, share servers, security services and share networks will not be affected during an upgrade. Clients can continue to actively use these resources while the service control plane is being upgraded. Plan the upgrade ---------------- It is highly recommended that you: * update the Shared File System service to the latest code from the release you are currently using. * read the `Shared File System service release notes `_ for the release that you intended to upgrade to. Pay special attention to the deprecations and upgrade notes. * consider the impact of the service control plane upgrade to your cloud's users. The upgrade process interrupts provisioning of new shared file systems and associated resources. It also prevents management operations on existing shared file systems and associated resources. Data path access to shared file systems will remain uninterrupted. * take a backup of the shared file system service database so you can rollback any failed upgrades to a previous version of the software. Although the ``manila-manage`` command offers a database downgrade command, it is not supported for production use. The only way to recover from a failed update is to restore the database from a backup. * identify your Shared File System service back end storage systems/solutions and their drivers. Ensure that the version of each storage system is supported by the respective driver in the target release. If you're using a storage solution from a third party vendor, consult their product pages to determine if the solution is supported by the release of OpenStack that you are upgrading to. Many vendors publish a support matrix either within this service administration guide, or on their own websites. If you find an incompatibility, stop, and determine if you have to upgrade the storage solution first. * develop an upgrade procedure and assess it thoroughly by using a test environment similar to your production environment. Graceful service shutdown ------------------------- Shared File System service components (scheduler, share-manager, data-manager) are python processes listening for messages on a AMQP queue. When the operator sends SIGTERM signal to the process, they stop getting new work from the queue, complete any outstanding work and then terminate. Database Migration ------------------ The Shared File System service only supports cold upgrades, meaning that the service plane is expected to be down during the database upgrade. Database upgrades include schema changes as well as data migrations to accommodate newer versions of the schema. Once upgraded, downgrading the database is not supported. When the database has been upgraded, older services may misbehave when accessing database objects, so ensure all ``manila-*`` services are down before you upgrade the database. Prune deleted database rows --------------------------- Shared File System service resources are soft deleted in the database, so users are able to track instances in the DB that are created and destroyed in production. Soft-deletion also helps cloud operators adhere to data retention policies. Not purging soft-deleted entries affects DB performance as indices grow very large and data migrations take longer as there is more data to migrate. It is recommended that you prune the service database before upgrading to prevent unnecessary data migrations. Pruning permanently deletes soft deleted database records. .. code:: manila-manage db purge Upgrade procedure ----------------- #. Ensure you're running the latest Shared File System service packages for the OpenStack release that you currently use. #. Run the ``manila-status upgrade check`` command to validate that the service is ready for upgrade. #. Backup the manila database #. Gracefully stop all Shared File System service processes. We recommend in this order: manila-api, manila-scheduler, manila-share and manila-data. .. note:: The manila-data service may be processing time consuming data migrations. Shutting it down will interrupt any ongoing migrations, and these will not be automatically started when the service comes back up. You can check the status on ongoing migrations with ``manila migration-get-progress`` command; issue ``manila migration-complete`` for any ongoing migrations that have completed their data copy phase. #. Upgrade all the service packages. If upgrading from distribution packages, your system package manager is expected to handle this automatically. #. Fix any deprecated configuration options used. #. Fix any deprecated api policies used. #. Run ``manila-manage db sync`` from any node with the latest manila packages. #. Start all the Shared File System service processes. #. Inspect the ``services`` by running ``manila service-list``. If there are any orphaned records, run ``manila-manage service cleanup`` to delete them. Upgrade testing --------------- The Shared File System service code is continually tested for upgrade from a previous release to the current release using `Grenade `_. Grenade is an OpenStack test harness project that validates upgrade scenarios between releases. It uses DevStack to initially perform a base OpenStack install and then upgrade to a target version. Tests include the creation of a variety of Shared File System service resources on the prior release, and verification for their existence and functionality after the upgrade. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/admin/tegile_driver.rst0000664000175000017500000001613500000000000021577 0ustar00zuulzuul00000000000000.. Copyright (c) 2016 Tegile Systems Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Tegile Driver ============= The Tegile Manila driver uses Tegile IntelliFlash Arrays to provide shared filesystems to OpenStack. The Tegile Driver interfaces with a Tegile Array via the REST API. Requirements ------------ - Tegile IntelliFlash version 3.5.1 - For using CIFS, Active Directory must be configured in the Tegile Array. Supported Operations -------------------- The following operations are supported on a Tegile Array: * Create CIFS/NFS Share * Delete CIFS/NFS Share * Allow CIFS/NFS Share access * Only IP access type is supported for NFS * USER access type is supported for NFS and CIFS * RW and RO access supported * Deny CIFS/NFS Share access * IP access type is supported for NFS * USER access type is supported for NFS and CIFS * Create snapshot * Delete snapshot * Extend share * Shrink share * Create share from snapshot Backend Configuration --------------------- The following parameters need to be configured in the [DEFAULT] section of */etc/manila/manila.conf*: +-----------------------------------------------------------------------------------------------------------------------------------+ | [DEFAULT] | +============================+======================================================================================================+ | **Option** | **Description** | +----------------------------+-----------+------------------------------------------------------------------------------------------+ | enabled_share_backends | Name of the section on manila.conf used to specify a backend. | | | E.g. *enabled_share_backends = tegileNAS* | +----------------------------+------------------------------------------------------------------------------------------------------+ | enabled_share_protocols | Specify a list of protocols to be allowed for share creation. For Tegile driver this can be: | | | *NFS* or *CIFS* or *NFS, CIFS*. | +----------------------------+------------------------------------------------------------------------------------------------------+ The following parameters need to be configured in the [backend] section of */etc/manila/manila.conf*: +-------------------------------------------------------------------------------------------------------------------------------------+ | [tegileNAS] | +===============================+=====================================================================================================+ | **Option** | **Description** | +-------------------------------+-----------------------------------------------------------------------------------------------------+ | share_backend_name | A name for the backend. | +-------------------------------+-----------------------------------------------------------------------------------------------------+ | share_driver | Python module path. For Tegile driver this must be: | | | *manila.share.drivers.tegile.tegile.TegileShareDriver*. | +-------------------------------+-----------------------------------------------------------------------------------------------------+ | driver_handles_share_servers| DHSS, Driver working mode. For Tegile driver **this must be**: | | | *False*. | +-------------------------------+-----------------------------------------------------------------------------------------------------+ | tegile_nas_server | Tegile array IP to connect from the Manila node. | +-------------------------------+-----------------------------------------------------------------------------------------------------+ | tegile_nas_login | This field is used to provide username credential to Tegile array. | +-------------------------------+-----------------------------------------------------------------------------------------------------+ | tegile_nas_password | This field is used to provide password credential to Tegile array. | +-------------------------------+-----------------------------------------------------------------------------------------------------+ | tegile_default_project | This field can be used to specify the default project in Tegile array where shares are created. | | | This field is optional. | +-------------------------------+-----------------------------------------------------------------------------------------------------+ Below is an example of a valid configuration of Tegile driver: | ``[DEFAULT]`` | ``enabled_share_backends = tegileNAS`` | ``enabled_share_protocols = NFS,CIFS`` | ``[tegileNAS]`` | ``driver_handles_share_servers = False`` | ``share_backend_name = tegileNAS`` | ``share_driver = manila.share.drivers.tegile.tegile.TegileShareDriver`` | ``tegile_nas_server = 10.12.14.16`` | ``tegile_nas_login = admin`` | ``tegile_nas_password = password`` | ``tegile_default_project = financeshares`` Restart of :term:`manila-share` service is needed for the configuration changes to take effect. Restrictions ------------ The Tegile driver has the following restrictions: - IP access type is supported only for NFS. - Only FLAT network is supported. The :mod:`manila.share.drivers.tegile.tegile` Module ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: manila.share.drivers.tegile.tegile :noindex: :members: :undoc-members: :show-inheritance: :exclude-members: TegileAPIExecutor, debugger ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/admin/zadara_driver.rst0000664000175000017500000001121500000000000021562 0ustar00zuulzuul00000000000000.. Copyright (c) 2021 Zadara Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ======================================= Zadara VPSA Driver for OpenStack Manila ======================================= `Zadara’s `__ Virtual Private Storage Array (VPSA) is the first software defined, Enterprise-Storage-as-a-Service. It is an elastic and private block and file storage system which provides enterprise-grade data protection and data management storage services. Manila VPSA driver provides a seamless management capabilities for VPSA volumes, in this case, NFS & SMB volumes without losing the added value provided by the VPSA Storage Array/Flash-Array. Requirements ------------ - VPSA Storage Array/Flash-Array running version 20.12 or higher. - Networking preparation - the Zadara VPSA driver for Manila support DHSS=False (driver_handles_share_servers), the driver does not handle the network configuration, it is up to the administrator to ensure connectivity from a manila-share node and the Openstack cloud to the VPSA Front-End network (such as neutron flat/VLAN network). Supported shared filesystems and operations ------------------------------------------- Share file system supported ~~~~~~~~~~~~~~~~~~~~~~~~~~~ - SMB (CIFS) - NFS Supported operations ~~~~~~~~~~~~~~~~~~~~ The following operations are supported: - Create a share. - Delete a share. - Extend a share. - Create a snapshot. - Delete a snapshot. - Create a share from snapshot. - Allow share access. - Manage a share. .. note:: - Only IP access type is supported - Both RW and RO access levels supported Backend Configuration ~~~~~~~~~~~~~~~~~~~~~ The following parameters need to be configured in the [DEFAULT] section of manila configuration (/etc/manila/manila.conf): - `enabled_share_backends` = Name of the section on manila.conf used to specify a backend i.e. *enabled_share_backends = zadaravpsa* - `enabled_share_protocols` - Specify a list of protocols to be allowed for share creation. The VPSA driver support the following options: *NFS* or *CIFS* or *NFS, CIFS* The following parameters need to be configured in the [backend] section of manila configuration (/etc/manila/manila.conf): Driver options -------------- - `zadara_vpsa_host` = - `zadara_vpsa_port` = - `zadara_vpsa_use_ssl` = - `zadara_driver_ssl_cert_path` = - `zadara_vpsa_poolname` - - `zadara_vol_encrypt` = - `zadara_gen3_vol_compress` = - `zadara_share_name_template` = - `zadara_share_snap_name_template` = - `driver_handles_share_servers` = - `share_driver` = manila.share.drivers.zadara.zadara.ZadaraVPSAShareDriver Back-end configuration example ------------------------------ .. code-block:: ini [DEFAULT] enabled_share_backends = zadaravpsa enabled_share_protocols = NFS,CIFS [zadaravpsa] driver_handles_share_servers = False zadara_vpsa_host = vsa-00000010-mycloud.zadaravpsa.com zadara_vpsa_port = 443 zadara_access_key = MYSUPERSECRETACCESSKEY zadara_vpsa_poolname = pool-00010001 share_backend_name = zadaravpsa zadara_vpsa_use_ssl = true share_driver = manila.share.drivers.zadara.zadara.ZadaraVPSAShareDriver ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/admin/zfs_on_linux_driver.rst0000664000175000017500000001356100000000000023043 0ustar00zuulzuul00000000000000.. Copyright (c) 2016 Mirantis Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ZFS (on Linux) Driver ===================== Manila ZFSonLinux share driver uses ZFS filesystem for exporting NFS shares. Written and tested using Linux version of ZFS. Requirements ------------ * 'NFS' daemon that can be handled via "exportfs" app. * 'ZFS' filesystem packages, either Kernel or FUSE versions. * ZFS zpools that are going to be used by Manila should exist and be configured as desired. Manila will not change zpool configuration. * For remote ZFS hosts according to manila-share service host SSH should be installed. * For ZFS hosts that support replication: * SSH access for each other should be passwordless. * Service IP addresses should be available by ZFS hosts for each other. Supported Operations -------------------- The following operations are supported: * Create NFS Share * Delete NFS Share * Manage NFS Share * Unmanage NFS Share * Allow NFS Share access * Only IP access type is supported for NFS * Both access levels are supported - 'RW' and 'RO' * Deny NFS Share access * Create snapshot * Delete snapshot * Manage snapshot * Unmanage snapshot * Create share from snapshot * Extend share * Shrink share * Replication (experimental): * Create/update/delete/promote replica operations are supported * Share migration (experimental) Possibilities ------------- * Any amount of ZFS zpools can be used by share driver. * Allowed to configure default options for ZFS datasets that are used for share creation. * Any amount of nested datasets is allowed to be used. * All share replicas are read-only, only active one is RW. * All share replicas are synchronized periodically, not continuously. So, status 'in_sync' means latest sync was successful. Time range between syncs equals to value of config global opt 'replica_state_update_interval'. * Driver is able to use qualified extra spec 'zfsonlinux:compression'. It can contain any value that is supported by used ZFS app. But if it is disabled via config option with value 'compression=off', then it will not be used. Restrictions ------------ The ZFSonLinux share driver has the following restrictions: * Only IP access type is supported for NFS. * Only FLAT network is supported. * 'Promote share replica' operation will switch roles of current 'secondary' replica and 'active'. It does not make more than one active replica available. * 'SaMBa' based sharing is not yet implemented. * 'Thick provisioning' is not yet implemented. Known problems -------------- * 'Promote share replica' operation will make ZFS filesystem that became secondary as RO only on NFS level. On ZFS level system will stay mounted as was - RW. Backend Configuration --------------------- The following parameters need to be configured in the manila configuration file for the ZFSonLinux driver: * share_driver = manila.share.drivers.zfsonlinux.driver.ZFSonLinuxShareDriver * driver_handles_share_servers = False * replication_domain = custom_str_value_as_domain_name * if empty, then replication will be disabled * if set then will be able to be used as replication peer for other backend with same value. * zfs_share_export_ip = * zfs_service_ip = * zfs_zpool_list = zpoolname1,zpoolname2/nested_dataset_for_zpool2 * can be one or more zpools * can contain nested datasets * zfs_dataset_creation_options = * readonly,quota,sharenfs and sharesmb options will be ignored * zfs_dataset_name_prefix = * Prefix to be used in each dataset name. * zfs_dataset_snapshot_name_prefix = * Prefix to be used in each dataset snapshot name. * zfs_use_ssh = * set 'False' if ZFS located on the same host as 'manila-share' service * set 'True' if 'manila-share' service should use SSH for ZFS configuration * zfs_ssh_username = * required for replication operations * required for SSH'ing to ZFS host if 'zfs_use_ssh' is set to 'True' * zfs_ssh_user_password = * password for 'zfs_ssh_username' of ZFS host. * used only if 'zfs_use_ssh' is set to 'True' * zfs_ssh_private_key_path = * used only if 'zfs_use_ssh' is set to 'True' * zfs_share_helpers = NFS=manila.share.drivers.zfsonlinux.utils.NFSviaZFSHelper * Approach for setting up helpers is similar to various other share driver * At least one helper should be used. * zfs_replica_snapshot_prefix = * Prefix to be used in dataset snapshot names that are created by 'update replica' operation. * zfs_migration_snapshot_prefix = * Prefix to be used in dataset snapshot names that are created for 'migration' operation. Restart of :term:`manila-share` service is needed for the configuration changes to take effect. The :mod:`manila.share.drivers.zfsonlinux.driver` Module ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: manila.share.drivers.zfsonlinux.driver :noindex: :members: :undoc-members: :show-inheritance: The :mod:`manila.share.drivers.zfsonlinux.utils` Module ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: manila.share.drivers.zfsonlinux.utils :noindex: :members: :undoc-members: :show-inheritance: ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315601.7736735 manila-21.0.0/doc/source/cli/0000775000175000017500000000000000000000000015672 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/cli/index.rst0000664000175000017500000000130400000000000017531 0ustar00zuulzuul00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Command Line Interface ---------------------- .. toctree:: :maxdepth: 1 manila manila-manage manila-status ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/cli/manila-manage.rst0000664000175000017500000001244000000000000021114 0ustar00zuulzuul00000000000000============= manila-manage ============= ------------------------------------- control and manage shared filesystems ------------------------------------- :Author: openstack-discuss@lists.openstack.org :Copyright: OpenStack LLC :Manual section: 1 :Manual group: shared filesystems SYNOPSIS ======== manila-manage [] DESCRIPTION =========== manila-manage controls shared filesystems service. More information about OpenStack Manila is at https://wiki.openstack.org/wiki/Manila OPTIONS ======= The standard pattern for executing a manila-manage command is: ``manila-manage []`` For example, to obtain a list of all hosts: ``manila-manage host list`` Run without arguments to see a list of available command categories: ``manila-manage`` Categories are shell, logs, service, db, host, version, config, share, and share_server. Global Options ============== ``--config-dir DIR`` Path to a config directory to pull `*.conf` files from. The set is parsed after `--config-file` arguments. ``--config-file PATH`` Path to a config file to use. Multiple config files can be specified, with values in later files taking precedence. ``--debug, -d`` Set logging level to DEBUG instead of the default INFO level. ``--log-config-append PATH`` Append a logging configuration file. If set, other logging options are ignored. ``--log-date-format DATE_FORMAT`` Defines the format string for %(asctime)s in log records. ``--log-dir LOG_DIR`` The base directory used for relative log_file paths. ``--log-file PATH`` Name of the log file to send logging output to. ``--use-journal / --nouse-journal`` Enable or disable journald for logging. ``--use-json / --nouse-json`` Enable or disable JSON formatting for logs. ``--use-syslog / --nouse-syslog`` Enable or disable syslog for logging. ``--watch-log-file / --nowatch-log-file`` Monitor log file changes. ``--shell_completion SHELL_COMPLETION`` Display a shell completion script (allowed values: bash, zsh). ``--state_path STATE_PATH`` Top-level directory for maintaining Manila's state. ``--syslog-log-facility SYSLOG_LOG_FACILITY`` Syslog facility to receive log lines. ``--version`` Show program version and exit. Manila Db ~~~~~~~~~ ``manila-manage db version`` Print the current database version. ``manila-manage db sync`` Sync the database up to the most recent version. This is the standard way to create the db as well. ``manila-manage db downgrade `` Downgrade database to given version. ``manila-manage db stamp `` Stamp database with given version. ``manila-manage db revision `` Generate new migration. ``manila-manage db purge `` Purge deleted rows older than a given age from Manila database tables. If age_in_days is not given or is specified as 0 all available rows will be deleted. Manila Logs ~~~~~~~~~~~ ``manila-manage logs errors`` Displays Manila errors from log files. ``manila-manage logs syslog `` Displays Manila alerts from syslog. Manila Shell ~~~~~~~~~~~~ ``manila-manage shell bpython`` Starts a new bpython shell. ``manila-manage shell ipython`` Starts a new ipython shell. ``manila-manage shell python`` Starts a new python shell. ``manila-manage shell run`` Starts a new shell using python. ``manila-manage shell script `` Runs the named script from the specified path with flags set. Manila Host ~~~~~~~~~~~ ``manila-manage host list`` Returns list of running Manila hosts. Manila Config ~~~~~~~~~~~~~ ``manila-manage config list`` Returns list of currently set config options and their values. Manila Service ~~~~~~~~~~~~~~ ``manila-manage service list [--format_output table|json|yaml]`` Returns list of Manila services. Output format can be modified using `--format_output`. It can be `table`, `json`, or `yaml`. Defaults to `table`. Manila Version ~~~~~~~~~~~~~~ ``manila-manage version list`` Returns list of versions. Manila Share ~~~~~~~~~~~~ ``manila-manage share delete `` Deletes a specific share by ID. ``manila-manage share update_host [-h] --currenthost CURRENTHOST --newhost NEWHOST [--force FORCE]`` Update the ``host`` attribute within a share. This can be used to alter existing share records if the backend or host name has been updated in manila configuration. A fully qualified host string is of the format 'HostA@BackendB#PoolC'. Provide only the host name (ex: 'HostA') to update the hostname part of the host string. Provide only the host name and backend name (ex: 'HostA@BackendB') to update the host and backend names. ``--force`` parameter can be used to skip validations. Manila Share Server ~~~~~~~~~~~~~~~~~~~ ``manila-manage share_server update_share_server_capabilities [-h] --share_servers SHARE_SERVERS --capabilities CAPABILITIES [--value VALUE]`` Set share server boolean capabilities such as `security_service_update_support` and `network_allocation_update_support`. FILES ===== The manila-manage.conf file contains configuration information in the form of parameter settings. BUGS ==== * Manila's bug tracker is on Launchpad. You can view current bugs and file new bugs on `OpenStack Manila Bug Tracker `_ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/cli/manila-status.rst0000664000175000017500000000334500000000000021213 0ustar00zuulzuul00000000000000============= manila-status ============= Synopsis ======== :: manila-status [] Description =========== :program:`manila-status` is a tool that provides routines for checking the status of a Manila deployment. Options ======= The standard pattern for executing a :program:`manila-status` command is:: manila-status [] Run without arguments to see a list of available command categories:: manila-status Categories are: * ``upgrade`` Detailed descriptions are below. You can also run with a category argument such as ``upgrade`` to see a list of all commands in that category:: manila-status upgrade These sections describe the available categories and arguments for :program:`manila-status`. Upgrade ~~~~~~~ .. _manila-status-checks: ``manila-status upgrade check`` Performs a release-specific readiness check before restarting services with new code. This command expects to have complete configuration and access to databases and services. **Return Codes** .. list-table:: :widths: 20 80 :header-rows: 1 * - Return code - Description * - 0 - All upgrade readiness checks passed successfully and there is nothing to do. * - 1 - At least one check encountered an issue and requires further investigation. This is considered a warning but the upgrade may be OK. * - 2 - There was an upgrade status check failure that needs to be investigated. This should be considered something that stops an upgrade. * - 255 - An unexpected error occurred. **History of Checks** **8.0.0 (Stein)** * Placeholder to be filled in with checks as they are added in Stein. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/cli/manila.rst0000664000175000017500000026770400000000000017705 0ustar00zuulzuul00000000000000.. ################################################### .. ## WARNING ###################################### .. ############## WARNING ########################## .. ########################## WARNING ############## .. ###################################### WARNING ## .. ################################################### .. ################################################### .. ## .. This file is tool-generated. Do not edit manually. .. http://docs.openstack.org/contributor-guide/ .. doc-tools/cli-reference.html .. ## .. ## WARNING ###################################### .. ############## WARNING ########################## .. ########################## WARNING ############## .. ###################################### WARNING ## .. ################################################### ======================================================== Shared File Systems service (manila) command-line client ======================================================== The manila client is the command-line interface (CLI) for the Shared File Systems service (manila) API and its extensions. This chapter documents :command:`manila` version ``1.16.0``. For help on a specific :command:`manila` command, enter: .. code-block:: console $ manila help COMMAND .. _manila_command_usage: manila usage ~~~~~~~~~~~~ .. code-block:: console usage: manila [--version] [-d] [--os-cache] [--os-reset-cache] [--os-user-id ] [--os-username ] [--os-password ] [--os-tenant-name ] [--os-project-name ] [--os-tenant-id ] [--os-project-id ] [--os-user-domain-id ] [--os-user-domain-name ] [--os-project-domain-id ] [--os-project-domain-name ] [--os-auth-url ] [--os-region-name ] [--os-token ] [--bypass-url ] [--service-type ] [--service-name ] [--share-service-name ] [--endpoint-type ] [--os-share-api-version ] [--os-cacert ] [--retries ] [--os-cert ] ... **Subcommands:** ``absolute-limits`` Print a list of absolute limits for a user. ``access-allow`` Allow access to the share. ``access-deny`` Deny access to a share. ``access-list`` Show access list for share. ``api-version`` Display the API version information. ``availability-zone-list`` List all availability zones. ``create`` Creates a new share (NFS, CIFS, CephFS, GlusterFS or HDFS). ``credentials`` Show user credentials returned from auth. ``delete`` Remove one or more shares. ``endpoints`` Discover endpoints that get returned from the authenticate services. ``extend`` Increases the size of an existing share. ``extra-specs-list`` Print a list of current 'share types and extra specs' (Admin Only). ``force-delete`` Attempt force-delete of share, regardless of state (Admin only). ``list`` List NAS shares with filters. ``manage`` Manage share not handled by Manila (Admin only). ``message-delete`` Remove one or more messages. ``message-list`` Lists all messages. ``message-show`` Show message's details. ``metadata`` Set or delete metadata on a share. ``metadata-show`` Show metadata of given share. ``metadata-update-all`` Update all metadata of a share. ``migration-cancel`` Cancels migration of a given share when copying (Admin only, Experimental). ``migration-complete`` Completes migration for a given share (Admin only, Experimental). ``migration-get-progress`` Gets migration progress of a given share when copying (Admin only, Experimental). ``migration-start`` Migrates share to a new host (Admin only, Experimental). ``pool-list`` List all backend storage pools known to the scheduler (Admin only). ``quota-class-show`` List the quotas for a quota class. ``quota-class-update`` Update the quotas for a quota class (Admin only). ``quota-defaults`` List the default quotas for a tenant. ``quota-delete`` Delete quota for a tenant/user. The quota will revert back to default (Admin only). ``quota-show`` List the quotas for a tenant/user. ``quota-update`` Update the quotas for a tenant/user (Admin only). ``rate-limits`` Print a list of rate limits for a user. ``reset-state`` Explicitly update the state of a share (Admin only). ``reset-task-state`` Explicitly update the task state of a share (Admin only, Experimental). ``revert-to-snapshot`` Revert a share to the specified snapshot. ``security-service-create`` Create security service used by tenant. ``security-service-delete`` Delete one or more security services. ``security-service-list`` Get a list of security services. ``security-service-show`` Show security service. ``security-service-update`` Update security service. ``service-disable`` Disables 'manila-share' or 'manila-scheduler' services (Admin only). ``service-enable`` Enables 'manila-share' or 'manila-scheduler' services (Admin only). ``service-list`` List all services (Admin only). ``share-export-location-list`` List export locations of a given share. ``share-export-location-show`` Show export location of the share. ``share-group-create`` Creates a new share group (Experimental). ``share-group-delete`` Remove one or more share groups (Experimental). ``share-group-list`` List share groups with filters (Experimental). ``share-group-reset-state`` Explicitly update the state of a share group (Admin only, Experimental). ``share-group-show`` Show details about a share group (Experimental). ``share-group-snapshot-create`` Creates a new share group snapshot (Experimental). ``share-group-snapshot-delete`` Remove one or more share group snapshots (Experimental). ``share-group-snapshot-list`` List share group snapshots with filters (Experimental). ``share-group-snapshot-list-members`` List members of a share group snapshot (Experimental). ``share-group-snapshot-reset-state`` Explicitly update the state of a share group snapshot (Admin only, Experimental). ``share-group-snapshot-show`` Show details about a share group snapshot (Experimental). ``share-group-snapshot-update`` Update a share group snapshot (Experimental). ``share-group-type-access-add`` Adds share group type access for the given project (Admin only). ``share-group-type-access-list`` Print access information about a share group type (Admin only). ``share-group-type-access-remove`` Removes share group type access for the given project (Admin only). ``share-group-type-create`` Create a new share group type (Admin only). ``share-group-type-delete`` Delete a specific share group type (Admin only). ``share-group-type-key`` Set or unset group_spec for a share group type (Admin only). ``share-group-type-list`` Print a list of available 'share group types'. ``share-group-type-specs-list`` Print a list of 'share group types specs' (Admin Only). ``share-group-update`` Update a share group (Experimental). ``share-instance-export-location-list`` List export locations of a given share instance. ``share-instance-export-location-show`` Show export location for the share instance. ``share-instance-force-delete`` Force-delete the share instance, regardless of state (Admin only). ``share-instance-list`` List share instances (Admin only). ``share-instance-reset-state`` Explicitly update the state of a share instance (Admin only). ``share-instance-show`` Show details about a share instance (Admin only). ``share-network-create`` Create description for network used by the tenant. ``share-network-delete`` Delete one or more share networks. ``share-network-list`` Get a list of network info. ``share-network-security-service-add`` Associate security service with share network. ``share-network-security-service-list`` Get list of security services associated with a given share network. ``share-network-security-service-remove`` Dissociate security service from share network. ``share-network-show`` Get a description for network used by the tenant. ``share-network-update`` Update share network data. ``share-replica-create`` Create a share replica (Experimental). ``share-replica-delete`` Remove one or more share replicas (Experimental). ``share-replica-list`` List share replicas (Experimental). ``share-replica-promote`` Promote specified replica to 'active' replica_state (Experimental). ``share-replica-reset-replica-state`` Explicitly update the 'replica_state' of a share replica (Experimental). ``share-replica-reset-state`` Explicitly update the 'status' of a share replica (Experimental). ``share-replica-resync`` Attempt to update the share replica with its 'active' mirror (Experimental). ``share-replica-show`` Show details about a replica (Experimental). ``share-server-delete`` Delete one or more share servers (Admin only). ``share-server-details`` Show share server details (Admin only). ``share-server-list`` List all share servers (Admin only). ``share-server-show`` Show share server info (Admin only). ``show`` Show details about a NAS share. ``shrink`` Decreases the size of an existing share. ``snapshot-access-allow`` Allow read only access to a snapshot. ``snapshot-access-deny`` Deny access to a snapshot. ``snapshot-access-list`` Show access list for a snapshot. ``snapshot-create`` Add a new snapshot. ``snapshot-delete`` Remove one or more snapshots. ``snapshot-export-location-list`` List export locations of a given snapshot. ``snapshot-export-location-show`` Show export location of the share snapshot. ``snapshot-force-delete`` Attempt force-deletion of one or more snapshots. Regardless of the state (Admin only). ``snapshot-instance-export-location-list`` List export locations of a given snapshot instance. ``snapshot-instance-export-location-show`` Show export location of the share instance snapshot. ``snapshot-instance-list`` List share snapshot instances. ``snapshot-instance-reset-state`` Explicitly update the state of a share snapshot instance. ``snapshot-instance-show`` Show details about a share snapshot instance. ``snapshot-list`` List all the snapshots. ``snapshot-manage`` Manage share snapshot not handled by Manila (Admin only). ``snapshot-rename`` Rename a snapshot. ``snapshot-reset-state`` Explicitly update the state of a snapshot (Admin only). ``snapshot-show`` Show details about a snapshot. ``snapshot-unmanage`` Unmanage one or more share snapshots (Admin only). ``type-access-add`` Adds share type access for the given project (Admin only). ``type-access-list`` Print access information about the given share type (Admin only). ``type-access-remove`` Removes share type access for the given project (Admin only). ``type-create`` Create a new share type (Admin only). ``type-delete`` Delete one or more specific share types (Admin only). ``type-key`` Set or unset extra_spec for a share type (Admin only). ``type-list`` Print a list of available 'share types'. ``unmanage`` Unmanage share (Admin only). ``update`` Rename a share. ``bash-completion`` Print arguments for bash_completion. Prints all of the commands and options to stdout so that the manila.bash_completion script doesn't have to hard code them. ``help`` Display help about this program or one of its subcommands. ``list-extensions`` List all the os-api extensions that are available. .. _manila_command_options: manila optional arguments ~~~~~~~~~~~~~~~~~~~~~~~~~ ``--version`` show program's version number and exit ``-d, --debug`` Print debugging output. ``--os-cache`` Use the auth token cache. Defaults to ``env[OS_CACHE]``. ``--os-reset-cache`` Delete cached password and auth token. ``--os-user-id `` Defaults to env [OS_USER_ID]. ``--os-username `` Defaults to ``env[OS_USERNAME]``. ``--os-password `` Defaults to ``env[OS_PASSWORD]``. ``--os-tenant-name `` Defaults to ``env[OS_TENANT_NAME]``. ``--os-project-name `` Another way to specify tenant name. This option is mutually exclusive with --os-tenant-name. Defaults to ``env[OS_PROJECT_NAME]``. ``--os-tenant-id `` Defaults to ``env[OS_TENANT_ID]``. ``--os-project-id `` Another way to specify tenant ID. This option is mutually exclusive with --os-tenant-id. Defaults to ``env[OS_PROJECT_ID]``. ``--os-user-domain-id `` OpenStack user domain ID. Defaults to ``env[OS_USER_DOMAIN_ID]``. ``--os-user-domain-name `` OpenStack user domain name. Defaults to ``env[OS_USER_DOMAIN_NAME]``. ``--os-project-domain-id `` Defaults to ``env[OS_PROJECT_DOMAIN_ID]``. ``--os-project-domain-name `` Defaults to ``env[OS_PROJECT_DOMAIN_NAME]``. ``--os-auth-url `` Defaults to ``env[OS_AUTH_URL]``. ``--os-region-name `` Defaults to ``env[OS_REGION_NAME]``. ``--os-token `` Defaults to ``env[OS_TOKEN]``. ``--bypass-url `` Use this API endpoint instead of the Service Catalog. Defaults to ``env[OS_MANILA_BYPASS_URL]``. ``--service-type `` Defaults to compute for most actions. ``--service-name `` Defaults to ``env[OS_MANILA_SERVICE_NAME]``. ``--share-service-name `` Defaults to ``env[OS_MANILA_SHARE_SERVICE_NAME]``. ``--endpoint-type `` Defaults to ``env[OS_MANILA_ENDPOINT_TYPE]`` or publicURL. ``--os-share-api-version `` Accepts 1.x to override default to ``env[OS_SHARE_API_VERSION]``. ``--os-cacert `` Specify a CA bundle file to use in verifying a TLS (https) server certificate. Defaults to ``env[OS_CACERT]``. ``--retries `` Number of retries. ``--os-cert `` Defaults to ``env[OS_CERT]``. .. _manila_absolute-limits: manila absolute-limits ---------------------- .. code-block:: console usage: manila absolute-limits Print a list of absolute limits for a user. .. _manila_access-allow: manila access-allow ------------------- .. code-block:: console usage: manila access-allow [--access-level ] Allow access to the share. **Positional arguments:** ```` Name or ID of the NAS share to modify. ```` Access rule type (only "ip", "user"(user or group), "cert" or "cephx" are supported). ```` Value that defines access. **Optional arguments:** ``--access-level , --access_level `` Share access level ("rw" and "ro" access levels are supported). Defaults to rw. .. _manila_access-deny: manila access-deny ------------------ .. code-block:: console usage: manila access-deny Deny access to a share. **Positional arguments:** ```` Name or ID of the NAS share to modify. ```` ID of the access rule to be deleted. .. _manila_access-list: manila access-list ------------------ .. code-block:: console usage: manila access-list [--columns ] Show access list for share. **Positional arguments:** ```` Name or ID of the share. **Optional arguments:** ``--columns `` Comma separated list of columns to be displayed example --columns "access_type,access_to". .. _manila_api-version: manila api-version ------------------ .. code-block:: console usage: manila api-version Display the API version information. .. _manila_availability-zone-list: manila availability-zone-list ----------------------------- .. code-block:: console usage: manila availability-zone-list [--columns ] List all availability zones. **Optional arguments:** ``--columns `` Comma separated list of columns to be displayed example --columns "id,name". .. _manila_create: manila create ------------- .. code-block:: console usage: manila create [--snapshot-id ] [--name ] [--metadata [ [ ...]]] [--share-network ] [--description ] [--share-type ] [--public] [--availability-zone ] [--share-group ] Creates a new share (NFS, CIFS, CephFS, GlusterFS or HDFS). **Positional arguments:** ```` Share protocol (NFS, CIFS, CephFS, GlusterFS or HDFS). ```` Share size in GiB. **Optional arguments:** ``--snapshot-id , --snapshot_id `` Optional snapshot ID to create the share from. (Default=None) ``--name `` Optional share name. (Default=None) ``--metadata [ [ ...]]`` Metadata key=value pairs (Optional, Default=None). ``--share-network , --share_network `` Optional network info ID or name. ``--description `` Optional share description. (Default=None) ``--share-type , --share_type , --volume-type , --volume_type `` Optional share type. Use of optional volume type is deprecated. (Default=None) ``--public`` Level of visibility for share. Defines whether other tenants are able to see it or not. ``--availability-zone , --availability_zone , --az `` Availability zone in which share should be created. ``--share-group , --share_group , --group `` Optional share group name or ID in which to create the share (Experimental, Default=None). .. _manila_credentials: manila credentials ------------------ .. code-block:: console usage: manila credentials Show user credentials returned from auth. .. _manila_delete: manila delete ------------- .. code-block:: console usage: manila delete [--share-group ] [ ...] Remove one or more shares. **Positional arguments:** ```` Name or ID of the share(s). **Optional arguments:** ``--share-group , --share_group , --group `` Optional share group name or ID which contains the share (Experimental, Default=None). .. _manila_endpoints: manila endpoints ---------------- .. code-block:: console usage: manila endpoints Discover endpoints that get returned from the authenticate services. .. _manila_extend: manila extend ------------- .. code-block:: console usage: manila extend [--wait] [--force] Increases the size of an existing share. **Positional arguments:** ```` Name or ID of share to extend. ```` New size of share, in GiBs. **Optional arguments:** ``--wait`` Wait for share extension. ``--force`` Extend share directly and not go through scheduler. .. _manila_extra-specs-list: manila extra-specs-list ----------------------- .. code-block:: console usage: manila extra-specs-list [--columns ] Print a list of current 'share types and extra specs' (Admin Only). **Optional arguments:** ``--columns `` Comma separated list of columns to be displayed example --columns "id,name". .. _manila_force-delete: manila force-delete ------------------- .. code-block:: console usage: manila force-delete [ ...] Attempt force-delete of share, regardless of state (Admin only). **Positional arguments:** ```` Name or ID of the share(s) to force delete. .. _manila_list: manila list ----------- .. code-block:: console usage: manila list [--all-tenants [<0|1>]] [--name ] [--status ] [--share-server-id ] [--metadata [ [ ...]]] [--extra-specs [ [ ...]]] [--share-type ] [--limit ] [--offset ] [--sort-key ] [--sort-dir ] [--snapshot ] [--host ] [--share-network ] [--project-id ] [--public] [--share-group ] [--columns ] List NAS shares with filters. **Optional arguments:** ``--all-tenants [<0|1>]`` Display information from all tenants (Admin only). ``--name `` Filter results by name. ``--status `` Filter results by status. ``--share-server-id , --share-server_id , --share_server-id , --share_server_id `` Filter results by share server ID (Admin only). ``--metadata [ [ ...]]`` Filters results by a metadata key and value. OPTIONAL: Default=None. ``--extra-specs [ [ ...]], --extra_specs [ [ ...]]`` Filters results by an extra specs key and value of share type that was used for share creation. OPTIONAL: Default=None. ``--share-type , --volume-type , --share_type , --share-type-id , --volume-type-id , --share-type_id , --share_type-id , --share_type_id , --volume_type , --volume_type_id `` Filter results by a share type id or name that was used for share creation. ``--limit `` Maximum number of shares to return. OPTIONAL: Default=None. ``--offset `` Set offset to define start point of share listing. OPTIONAL: Default=None. ``--sort-key , --sort_key `` Key to be sorted, available keys are ('id', 'status', 'size', 'host', 'share_proto', 'availability_zone', 'user_id', 'project_id', 'created_at', 'updated_at', 'display_name', 'name', 'share_type_id', 'share_type', 'share_network_id', 'share_network', 'snapshot_id', 'snapshot'). OPTIONAL: Default=None. ``--sort-dir , --sort_dir `` Sort direction, available values are ('asc', 'desc'). OPTIONAL: Default=None. ``--snapshot `` Filter results by snapshot name or id, that was used for share. ``--host `` Filter results by host. ``--share-network , --share_network `` Filter results by share-network name or id. ``--project-id , --project_id `` Filter results by project id. Useful with set key '--all-tenants'. ``--public`` Add public shares from all tenants to result. ``--share-group , --share_group , --group `` Filter results by share group name or ID (Experimental, Default=None). ``--columns `` Comma separated list of columns to be displayed example --columns "export_location,is public". .. _manila_list-extensions: manila list-extensions ---------------------- .. code-block:: console usage: manila list-extensions List all the os-api extensions that are available. .. _manila_manage: manila manage ------------- .. code-block:: console usage: manila manage [--name ] [--description ] [--share_type ] [--driver_options [ [ ...]]] [--public] Manage share not handled by Manila (Admin only). **Positional arguments:** ```` manage-share service host: some.host@driver#pool. ```` Protocol of the share to manage, such as NFS or CIFS. ```` Share export path, NFS share such as: 10.0.0.1:/example_path, CIFS share such as: \\\\10.0.0.1\\example_cifs_share. **Optional arguments:** ``--name `` Optional share name. (Default=None) ``--description `` Optional share description. (Default=None) ``--share_type , --share-type `` Optional share type assigned to share. (Default=None) ``--driver_options [ [ ...]], --driver-options [ [ ...]]`` Driver option key=value pairs (Optional, Default=None). ``--public`` Level of visibility for share. Defines whether other tenants are able to see it or not. Available only for microversion >= 2.8. .. _manila_message-delete: manila message-delete ---------------------- .. code-block:: console usage: manila message-delete [ ...] Remove one or more messages. **Positional arguments:** ```` ID of the message(s). .. _manila_message-list: manila message-list ---------------------- .. code-block:: console usage: manila message-list [--resource_id ] [--resource_type ] [--action_id ] [--detail_id ] [--request_id ] [--level ] [--limit ] [--offset ] [--sort-key ] [--sort-dir ] [--columns ] [--since ] [--before ] Lists all messages. **Optional arguments:** ``--resource_id , --resource-id , --resource `` Filters results by a resource uuid. (Default=None). ``--resource_type , --resource-type `` Filters results by a resource type. (Default=None). Example: "manila message-list --resource_type share" ``--action_id , --action-id , --action `` Filters results by action id. (Default=None). ``--detail_id , --detail-id , --detail `` Filters results by detail id. (Default=None). ``--request_id , --request-id , --request `` Filters results by request id. (Default=None). ``--level , --message_level , --message-level `` Filters results by the message level. (Default=None). Example: "manila message-list --level ERROR". ``--limit `` Maximum number of messages to return. (Default=None) ``--offset `` Start position of message listing. ``--sort-key , --sort_key `` Key to be sorted, available keys are ('id', 'project_id', 'request_id', 'resource_type', 'action_id', 'detail_id', 'resource_id', 'message_level', 'expires_at', 'request_id', 'created_at'). (Default=desc). ``--sort-dir , --sort_dir `` Sort direction, available values are ('asc', 'desc'). OPTIONAL: Default=None. ``--columns `` Comma separated list of columns to be displayed example --columns "resource_id, user_message". ``--since `` Return only user messages created since given date. The date format must be conforming to ISO8601. Available only for microversion >= 2.52. ``--before `` Return only user messages created before given date. The date format must be conforming to ISO8601. Available only for microversion >= 2.52. .. _manila_message-show: manila message-show ---------------------- .. code-block:: console usage: manila message-show Show details about a message. **Positional arguments:** ```` ID of the message. .. _manila_metadata: manila metadata --------------- .. code-block:: console usage: manila metadata [ ...] Set or delete metadata on a share. **Positional arguments:** ```` Name or ID of the share to update metadata on. ```` Actions: 'set' or 'unset'. ```` Metadata to set or unset (key is only necessary on unset). .. _manila_metadata-show: manila metadata-show -------------------- .. code-block:: console usage: manila metadata-show Show metadata of given share. **Positional arguments:** ```` Name or ID of the share. .. _manila_metadata-update-all: manila metadata-update-all -------------------------- .. code-block:: console usage: manila metadata-update-all [ ...] Update all metadata of a share. **Positional arguments:** ```` Name or ID of the share to update metadata on. ```` Metadata entry or entries to update. .. _manila_migration-cancel: manila migration-cancel ----------------------- .. code-block:: console usage: manila migration-cancel Cancels migration of a given share when copying (Admin only, Experimental). **Positional arguments:** ```` Name or ID of share to cancel migration. .. _manila_migration-complete: manila migration-complete ------------------------- .. code-block:: console usage: manila migration-complete Completes migration for a given share (Admin only, Experimental). **Positional arguments:** ```` Name or ID of share to complete migration. .. _manila_migration-get-progress: manila migration-get-progress ----------------------------- .. code-block:: console usage: manila migration-get-progress Gets migration progress of a given share when copying (Admin only, Experimental). **Positional arguments:** ```` Name or ID of the share to get share migration progress information. .. _manila_migration-start: manila migration-start ---------------------- .. code-block:: console usage: manila migration-start [--force_host_assisted_migration ] --preserve-metadata --preserve-snapshots --writable --nondisruptive [--new_share_network ] [--new_share_type ] Migrates share to a new host (Admin only, Experimental). **Positional arguments:** ```` Name or ID of share to migrate. ```` Destination host where share will be migrated to. Use the format 'host@backend#pool'. **Optional arguments:** ``--force_host_assisted_migration , --force-host-assisted-migration `` Enforces the use of the host-assisted migration approach, which bypasses driver optimizations. Default=False. ``--preserve-metadata , --preserve_metadata `` Enforces migration to preserve all file metadata when moving its contents. If set to True, host-assisted migration will not be attempted. ``--preserve-snapshots , --preserve_snapshots `` Enforces migration of the share snapshots to the destination. If set to True, host-assisted migration will not be attempted. ``--writable `` Enforces migration to keep the share writable while contents are being moved. If set to True, host-assisted migration will not be attempted. ``--nondisruptive `` Enforces migration to be nondisruptive. If set to True, host-assisted migration will not be attempted. ``--new_share_network , --new-share-network `` Specify the new share network for the share. Do not specify this parameter if the migrating share has to be retained within its current share network. ``--new_share_type , --new-share-type `` Specify the new share type for the share. Do not specify this parameter if the migrating share has to be retained with its current share type. .. _manila_pool-list: manila pool-list ---------------- .. code-block:: console usage: manila pool-list [--host ] [--backend ] [--pool ] [--columns ] [--detail] [--share-type ] List all backend storage pools known to the scheduler (Admin only). **Optional arguments:** ``--host `` Filter results by host name. Regular expressions are supported. ``--backend `` Filter results by backend name. Regular expressions are supported. ``--pool `` Filter results by pool name. Regular expressions are supported. ``--columns `` Comma separated list of columns to be displayed example --columns "name,host". ``--detail, --detailed`` Show detailed information about pools. (Default=False) ``--share-type , --share_type , --share-type-id , --share_type_id `` Filter results by share type name or ID. (Default=None)Available only for microversion >= 2.23. .. _manila_quota-class-show: manila quota-class-show ----------------------- .. code-block:: console usage: manila quota-class-show List the quotas for a quota class. **Positional arguments:** ```` Name of quota class to list the quotas for. .. _manila_quota-class-update: manila quota-class-update ------------------------- .. code-block:: console usage: manila quota-class-update [--shares ] [--snapshots ] [--gigabytes ] [--snapshot-gigabytes ] [--share-networks ] [--share-groups ] [--share-group-snapshots ] Update the quotas for a quota class (Admin only). **Positional arguments:** ```` Name of quota class to set the quotas for. **Optional arguments:** ``--shares `` New value for the "shares" quota. ``--snapshots `` New value for the "snapshots" quota. ``--gigabytes `` New value for the "gigabytes" quota. ``--snapshot-gigabytes , --snapshot_gigabytes `` New value for the "snapshot_gigabytes" quota. ``--share-networks , --share_networks `` New value for the "share_networks" quota. ``--share-groups , --share_groups `` New value for the "share_groups" quota. ``--share-group-snapshots , --share_group_snapshots `` New value for the "share_group_snapshots" quota. .. _manila_quota-defaults: manila quota-defaults --------------------- .. code-block:: console usage: manila quota-defaults [--tenant ] List the default quotas for a tenant. **Optional arguments:** ``--tenant `` ID of tenant to list the default quotas for. .. _manila_quota-delete: manila quota-delete ------------------- .. code-block:: console usage: manila quota-delete [--tenant ] [--user ] [--share-type ] Delete quota for a tenant/user. The quota will revert back to default (Admin only). **Optional arguments:** ``--tenant `` ID of tenant to delete quota for. ``--user `` ID of user to delete quota for. ``--share-type , --share_type `` UUID or name of a share type to set the quotas for. Optional. Mutually exclusive with '--user-id'. Available only for microversion >= 2.39 .. _manila_quota-show: manila quota-show ----------------- .. code-block:: console usage: manila quota-show [--tenant ] [--user ] [--share-type ] [--detail] List the quotas for a tenant/user. **Optional arguments:** ``--tenant `` ID of tenant to list the quotas for. ``--user `` ID of user to list the quotas for. ``--share-type , --share_type `` UUID or name of a share type to set the quotas for. Optional. Mutually exclusive with '--user-id'. Available only for microversion >= 2.39 ``--detail`` Optional flag to indicate whether to show quota in detail. Default false, available only for microversion >= 2.25. .. _manila_quota-update: manila quota-update ------------------- .. code-block:: console usage: manila quota-update [--user ] [--shares ] [--snapshots ] [--gigabytes ] [--snapshot-gigabytes ] [--share-networks ] [--share-groups ] [--share-group-snapshots ] [--share-type ] [--force] Update the quotas for a tenant/user (Admin only). **Positional arguments:** ```` UUID of tenant to set the quotas for. **Optional arguments:** ``--user `` ID of user to set the quotas for. ``--shares `` New value for the "shares" quota. ``--snapshots `` New value for the "snapshots" quota. ``--gigabytes `` New value for the "gigabytes" quota. ``--snapshot-gigabytes , --snapshot_gigabytes `` New value for the "snapshot_gigabytes" quota. ``--share-networks , --share_networks `` New value for the "share_networks" quota. ``--share-groups , --share_groups `` New value for the "share_groups" quota. ``--share-group-snapshots , --share_group_snapshots `` New value for the "share_group_snapshots" quota. ``--share-type , --share_type `` UUID or name of a share type to set the quotas for. Optional. Mutually exclusive with '--user-id'. Available only for microversion >= 2.39 ``--force`` Whether force update the quota even if the already used and reserved exceeds the new quota. .. _manila_rate-limits: manila rate-limits ------------------ .. code-block:: console usage: manila rate-limits [--columns ] Print a list of rate limits for a user. **Optional arguments:** ``--columns `` Comma separated list of columns to be displayed example --columns "verb,uri,value". .. _manila_reset-state: manila reset-state ------------------ .. code-block:: console usage: manila reset-state [--state ] Explicitly update the state of a share (Admin only). **Positional arguments:** ```` Name or ID of the share to modify. **Optional arguments:** ``--state `` Indicate which state to assign the share. Options include available, error, creating, deleting, error_deleting. If no state is provided, available will be used. .. _manila_reset-task-state: manila reset-task-state ----------------------- .. code-block:: console usage: manila reset-task-state [--task-state ] Explicitly update the task state of a share (Admin only, Experimental). **Positional arguments:** ```` Name or ID of the share to modify. **Optional arguments:** ``--task-state , --task_state , --state `` Indicate which task state to assign the share. Options include migration_starting, migration_in_progress, migration_completing, migration_success, migration_error, migration_cancelled, migration_driver_in_progress, migration_driver_phase1_done, data_copying_starting, data_copying_in_progress, data_copying_completing, data_copying_completed, data_copying_cancelled, data_copying_error. If no value is provided, None will be used. .. _manila_revert-to-snapshot: manila revert-to-snapshot ------------------------- .. code-block:: console usage: manila revert-to-snapshot Revert a share to the specified snapshot. **Positional arguments:** ```` Name or ID of the snapshot to restore. The snapshot must be the most recent one known to manila. .. _manila_security-service-create: manila security-service-create ------------------------------ .. code-block:: console usage: manila security-service-create [--dns-ip ] [--server ] [--domain ] [--user ] [--password ] [--name ] [--description ] Create security service used by tenant. **Positional arguments:** ```` Security service type: 'ldap', 'kerberos' or 'active_directory'. **Optional arguments:** ``--dns-ip `` DNS IP address used inside tenant's network. ``--server `` Security service IP address or hostname. ``--domain `` Security service domain. ``--user `` Security service user or group used by tenant. ``--password `` Password used by user. ``--name `` Security service name. ``--description `` Security service description. .. _manila_security-service-delete: manila security-service-delete ------------------------------ .. code-block:: console usage: manila security-service-delete [ ...] Delete one or more security services. **Positional arguments:** ```` Name or ID of the security service(s) to delete. .. _manila_security-service-list: manila security-service-list ---------------------------- .. code-block:: console usage: manila security-service-list [--all-tenants [<0|1>]] [--share-network ] [--status ] [--name ] [--type ] [--user ] [--dns-ip ] [--server ] [--domain ] [--detailed [<0|1>]] [--offset ] [--limit ] [--columns ] Get a list of security services. **Optional arguments:** ``--all-tenants [<0|1>]`` Display information from all tenants (Admin only). ``--share-network , --share_network `` Filter results by share network id or name. ``--status `` Filter results by status. ``--name `` Filter results by name. ``--type `` Filter results by type. ``--user `` Filter results by user or group used by tenant. ``--dns-ip , --dns_ip `` Filter results by DNS IP address used inside tenant's network. ``--server `` Filter results by security service IP address or hostname. ``--domain `` Filter results by domain. ``--detailed [<0|1>]`` Show detailed information about filtered security services. ``--offset `` Start position of security services listing. ``--limit `` Number of security services to return per request. ``--columns `` Comma separated list of columns to be displayed example --columns "name,type". .. _manila_security-service-show: manila security-service-show ---------------------------- .. code-block:: console usage: manila security-service-show Show security service. **Positional arguments:** ```` Security service name or ID to show. .. _manila_security-service-update: manila security-service-update ------------------------------ .. code-block:: console usage: manila security-service-update [--dns-ip ] [--server ] [--domain ] [--user ] [--password ] [--name ] [--description ] Update security service. **Positional arguments:** ```` Security service name or ID to update. **Optional arguments:** ``--dns-ip `` DNS IP address used inside tenant's network. ``--server `` Security service IP address or hostname. ``--domain `` Security service domain. ``--user `` Security service user or group used by tenant. ``--password `` Password used by user. ``--name `` Security service name. ``--description `` Security service description. .. _manila_service-disable: manila service-disable ---------------------- .. code-block:: console usage: manila service-disable Disables 'manila-share' or 'manila-scheduler' services (Admin only). **Positional arguments:** ```` Host name as 'example_host@example_backend'. ```` Service binary, could be 'manila-share' or 'manila-scheduler'. .. _manila_service-enable: manila service-enable --------------------- .. code-block:: console usage: manila service-enable Enables 'manila-share' or 'manila-scheduler' services (Admin only). **Positional arguments:** ```` Host name as 'example_host@example_backend'. ```` Service binary, could be 'manila-share' or 'manila-scheduler'. .. _manila_service-list: manila service-list ------------------- .. code-block:: console usage: manila service-list [--host ] [--binary ] [--status ] [--state ] [--zone ] [--columns ] List all services (Admin only). **Optional arguments:** ``--host `` Name of host. ``--binary `` Service binary. ``--status `` Filter results by status. ``--state `` Filter results by state. ``--zone `` Availability zone. ``--columns `` Comma separated list of columns to be displayed example --columns "id,host". .. _manila_share-export-location-list: manila share-export-location-list --------------------------------- .. code-block:: console usage: manila share-export-location-list [--columns ] List export locations of a given share. **Positional arguments:** ```` Name or ID of the share. **Optional arguments:** ``--columns `` Comma separated list of columns to be displayed example --columns "id,host,status". .. _manila_share-export-location-show: manila share-export-location-show --------------------------------- .. code-block:: console usage: manila share-export-location-show Show export location of the share. **Positional arguments:** ```` Name or ID of the share. ```` ID of the share export location. .. _manila_share-group-create: manila share-group-create ------------------------- .. code-block:: console usage: manila share-group-create [--name ] [--description ] [--share-types ] [--share-group-type ] [--share-network ] [--source-share-group-snapshot ] [--availability-zone ] Creates a new share group (Experimental). **Optional arguments:** ``--name `` Optional share group name. (Default=None) ``--description `` Optional share group description. (Default=None) ``--share-types , --share_types `` Comma-separated list of share types. (Default=None) ``--share-group-type , --share_group_type , --type `` Share group type name or ID of the share group to be created. (Default=None) ``--share-network , --share_network `` Specify share network name or id. ``--source-share-group-snapshot , --source_share_group_snapshot `` Optional share group snapshot name or ID to create the share group from. (Default=None) ``--availability-zone , --availability_zone , --az `` Optional availability zone in which group should be created. (Default=None) .. _manila_share-group-delete: manila share-group-delete ------------------------- .. code-block:: console usage: manila share-group-delete [--force] [ ...] Remove one or more share groups (Experimental). **Positional arguments:** ```` Name or ID of the share_group(s). **Optional arguments:** ``--force`` Attempt to force delete the share group (Default=False) (Admin only). .. _manila_share-group-list: manila share-group-list ----------------------- .. code-block:: console usage: manila share-group-list [--all-tenants [<0|1>]] [--name ] [--status ] [--share-server-id ] [--share-group-type ] [--snapshot ] [--host ] [--share-network ] [--project-id ] [--limit ] [--offset ] [--sort-key ] [--sort-dir ] [--columns ] List share groups with filters (Experimental). **Optional arguments:** ``--all-tenants [<0|1>]`` Display information from all tenants (Admin only). ``--name `` Filter results by name. ``--status `` Filter results by status. ``--share-server-id , --share-server_id , --share_server-id , --share_server_id `` Filter results by share server ID (Admin only). ``--share-group-type , --share-group-type-id , --share_group_type , --share_group_type_id `` Filter results by a share group type ID or name that was used for share group creation. ``--snapshot `` Filter results by share group snapshot name or ID that was used to create the share group. ``--host `` Filter results by host. ``--share-network , --share_network `` Filter results by share-network name or ID. ``--project-id , --project_id `` Filter results by project ID. Useful with set key '--all-tenants'. ``--limit `` Maximum number of share groups to return. (Default=None) ``--offset `` Start position of share group listing. ``--sort-key , --sort_key `` Key to be sorted, available keys are ('id', 'name', 'status', 'host', 'user_id', 'project_id', 'created_at', 'availability_zone', 'share_network', 'share_network_id', 'share_group_type', 'share_group_type_id', 'source_share_group_snapshot_id'). Default=None. ``--sort-dir , --sort_dir `` Sort direction, available values are ('asc', 'desc'). OPTIONAL: Default=None. ``--columns `` Comma separated list of columns to be displayed example --columns "id,name". .. _manila_share-group-reset-state: manila share-group-reset-state ------------------------------ .. code-block:: console usage: manila share-group-reset-state [--state ] Explicitly update the state of a share group (Admin only, Experimental). **Positional arguments:** ```` Name or ID of the share group to modify. **Optional arguments:** ``--state `` Indicate which state to assign the share group. Options include available, error, creating, deleting, error_deleting. If no state is provided, available will be used. .. _manila_share-group-show: manila share-group-show ----------------------- .. code-block:: console usage: manila share-group-show Show details about a share group (Experimental). **Positional arguments:** ```` Name or ID of the share group. .. _manila_share-group-snapshot-create: manila share-group-snapshot-create ---------------------------------- .. code-block:: console usage: manila share-group-snapshot-create [--name ] [--description ] Creates a new share group snapshot (Experimental). **Positional arguments:** ```` Name or ID of the share group. **Optional arguments:** ``--name `` Optional share group snapshot name. (Default=None) ``--description `` Optional share group snapshot description. (Default=None) .. _manila_share-group-snapshot-delete: manila share-group-snapshot-delete ---------------------------------- .. code-block:: console usage: manila share-group-snapshot-delete [--force] [ ...] Remove one or more share group snapshots (Experimental). **Positional arguments:** ```` Name or ID of the share group snapshot(s) to delete. **Optional arguments:** ``--force`` Attempt to force delete the share group snapshot(s) (Default=False) (Admin only). .. _manila_share-group-snapshot-list: manila share-group-snapshot-list -------------------------------- .. code-block:: console usage: manila share-group-snapshot-list [--all-tenants [<0|1>]] [--name ] [--status ] [--share-group-id ] [--limit ] [--offset ] [--sort-key ] [--sort-dir ] [--detailed DETAILED] [--columns ] List share group snapshots with filters (Experimental). **Optional arguments:** ``--all-tenants [<0|1>]`` Display information from all tenants (Admin only). ``--name `` Filter results by name. ``--status `` Filter results by status. ``--share-group-id , --share_group_id `` Filter results by share group ID. ``--limit `` Maximum number of share group snapshots to return. (Default=None) ``--offset `` Start position of share group snapshot listing. ``--sort-key , --sort_key `` Key to be sorted, available keys are ('id', 'name', 'status', 'host', 'user_id', 'project_id', 'created_at', 'share_group_id'). Default=None. ``--sort-dir , --sort_dir `` Sort direction, available values are ('asc', 'desc'). OPTIONAL: Default=None. ``--detailed DETAILED`` Show detailed information about share group snapshots. ``--columns `` Comma separated list of columns to be displayed example --columns "id,name". .. _manila_share-group-snapshot-list-members: manila share-group-snapshot-list-members ---------------------------------------- .. code-block:: console usage: manila share-group-snapshot-list-members [--columns ] List members of a share group snapshot (Experimental). **Positional arguments:** ```` Name or ID of the share group snapshot. **Optional arguments:** ``--columns `` Comma separated list of columns to be displayed example --columns "id,name". .. _manila_share-group-snapshot-reset-state: manila share-group-snapshot-reset-state --------------------------------------- .. code-block:: console usage: manila share-group-snapshot-reset-state [--state ] Explicitly update the state of a share group snapshot (Admin only, Experimental). **Positional arguments:** ```` Name or ID of the share group snapshot. **Optional arguments:** ``--state `` Indicate which state to assign the share group snapshot. Options include available, error, creating, deleting, error_deleting. If no state is provided, available will be used. .. _manila_share-group-snapshot-show: manila share-group-snapshot-show -------------------------------- .. code-block:: console usage: manila share-group-snapshot-show Show details about a share group snapshot (Experimental). **Positional arguments:** ```` Name or ID of the share group snapshot. .. _manila_share-group-snapshot-update: manila share-group-snapshot-update ---------------------------------- .. code-block:: console usage: manila share-group-snapshot-update [--name ] [--description ] Update a share group snapshot (Experimental). **Positional arguments:** ```` Name or ID of the share group snapshot to update. **Optional arguments:** ``--name `` Optional new name for the share group snapshot. (Default=None) ``--description `` Optional share group snapshot description. (Default=None) .. _manila_share-group-type-access-add: manila share-group-type-access-add ---------------------------------- .. code-block:: console usage: manila share-group-type-access-add Adds share group type access for the given project (Admin only). **Positional arguments:** ```` Share group type name or ID to add access for the given project. ```` Project ID to add share group type access for. .. _manila_share-group-type-access-list: manila share-group-type-access-list ----------------------------------- .. code-block:: console usage: manila share-group-type-access-list Print access information about a share group type (Admin only). **Positional arguments:** ```` Filter results by share group type name or ID. .. _manila_share-group-type-access-remove: manila share-group-type-access-remove ------------------------------------- .. code-block:: console usage: manila share-group-type-access-remove Removes share group type access for the given project (Admin only). **Positional arguments:** ```` Share group type name or ID to remove access for the given project. ```` Project ID to remove share group type access for. .. _manila_share-group-type-create: manila share-group-type-create ------------------------------ .. code-block:: console usage: manila share-group-type-create [--is_public ] Create a new share group type (Admin only). **Positional arguments:** ```` Name of the new share group type. ```` Comma-separated list of share type names or IDs. **Optional arguments:** ``--is_public , --is-public `` Make type accessible to the public (default true). .. _manila_share-group-type-delete: manila share-group-type-delete ------------------------------ .. code-block:: console usage: manila share-group-type-delete Delete a specific share group type (Admin only). **Positional arguments:** ```` Name or ID of the share group type to delete. .. _manila_share-group-type-key: manila share-group-type-key --------------------------- .. code-block:: console usage: manila share-group-type-key [ [ ...]] Set or unset group_spec for a share group type (Admin only). **Positional arguments:** ```` Name or ID of the share group type. ```` Actions: 'set' or 'unset'. ```` Group specs to set or unset (key is only necessary on unset). .. _manila_share-group-type-list: manila share-group-type-list ---------------------------- .. code-block:: console usage: manila share-group-type-list [--all] [--columns ] Print a list of available 'share group types'. **Optional arguments:** ``--all`` Display all share group types (Admin only). ``--columns `` Comma separated list of columns to be displayed example --columns "id,name". .. _manila_share-group-type-specs-list: manila share-group-type-specs-list ---------------------------------- .. code-block:: console usage: manila share-group-type-specs-list [--columns ] Print a list of 'share group types specs' (Admin Only). **Optional arguments:** ``--columns `` Comma separated list of columns to be displayed example --columns "id,name". .. _manila_share-group-update: manila share-group-update ------------------------- .. code-block:: console usage: manila share-group-update [--name ] [--description ] Update a share group (Experimental). **Positional arguments:** ```` Name or ID of the share group to update. **Optional arguments:** ``--name `` Optional new name for the share group. (Default=None) ``--description `` Optional share group description. (Default=None) .. _manila_share-instance-export-location-list: manila share-instance-export-location-list ------------------------------------------ .. code-block:: console usage: manila share-instance-export-location-list [--columns ] List export locations of a given share instance. **Positional arguments:** ```` Name or ID of the share instance. **Optional arguments:** ``--columns `` Comma separated list of columns to be displayed example --columns "id,host,status". .. _manila_share-instance-export-location-show: manila share-instance-export-location-show ------------------------------------------ .. code-block:: console usage: manila share-instance-export-location-show Show export location for the share instance. **Positional arguments:** ```` Name or ID of the share instance. ```` ID of the share instance export location. .. _manila_share-instance-force-delete: manila share-instance-force-delete ---------------------------------- .. code-block:: console usage: manila share-instance-force-delete [ ...] Force-delete the share instance, regardless of state (Admin only). **Positional arguments:** ```` Name or ID of the instance(s) to force delete. .. _manila_share-instance-list: manila share-instance-list -------------------------- .. code-block:: console usage: manila share-instance-list [--share-id ] [--columns ] List share instances (Admin only). **Optional arguments:** ``--share-id , --share_id `` Filter results by share ID. ``--columns `` Comma separated list of columns to be displayed example --columns "id,host,status". .. _manila_share-instance-reset-state: manila share-instance-reset-state --------------------------------- .. code-block:: console usage: manila share-instance-reset-state [--state ] Explicitly update the state of a share instance (Admin only). **Positional arguments:** ```` Name or ID of the share instance to modify. **Optional arguments:** ``--state `` Indicate which state to assign the instance. Options include available, error, creating, deleting, error_deleting, migrating,migrating_to. If no state is provided, available will be used. .. _manila_share-instance-show: manila share-instance-show -------------------------- .. code-block:: console usage: manila share-instance-show Show details about a share instance (Admin only). **Positional arguments:** ```` Name or ID of the share instance. .. _manila_share-network-create: manila share-network-create --------------------------- .. code-block:: console usage: manila share-network-create [--neutron-net-id ] [--neutron-subnet-id ] [--name ] [--description ] Create description for network used by the tenant. **Optional arguments:** ``--neutron-net-id , --neutron-net_id , --neutron_net_id , --neutron_net-id `` Neutron network ID. Used to set up network for share servers. ``--neutron-subnet-id , --neutron-subnet_id , --neutron_subnet_id , --neutron_subnet-id `` Neutron subnet ID. Used to set up network for share servers. This subnet should belong to specified neutron network. ``--name `` Share network name. ``--description `` Share network description. .. _manila_share-network-delete: manila share-network-delete --------------------------- .. code-block:: console usage: manila share-network-delete [ ...] Delete one or more share networks. **Positional arguments:** ```` Name or ID of share network(s) to be deleted. .. _manila_share-network-list: manila share-network-list ------------------------- .. code-block:: console usage: manila share-network-list [--all-tenants [<0|1>]] [--project-id ] [--name ] [--created-since ] [--created-before ] [--security-service ] [--neutron-net-id ] [--neutron-subnet-id ] [--network-type ] [--segmentation-id ] [--cidr ] [--ip-version ] [--offset ] [--limit ] [--columns ] Get a list of network info. **Optional arguments:** ``--all-tenants [<0|1>]`` Display information from all tenants (Admin only). ``--project-id , --project_id `` Filter results by project ID. ``--name `` Filter results by name. ``--created-since , --created_since `` Return only share networks created since given date. The date is in the format 'yyyy-mm-dd'. ``--created-before , --created_before `` Return only share networks created until given date. The date is in the format 'yyyy-mm-dd'. ``--security-service , --security_service `` Filter results by attached security service. ``--neutron-net-id , --neutron_net_id , --neutron_net-id , --neutron-net_id `` Filter results by neutron net ID. ``--neutron-subnet-id , --neutron_subnet_id , --neutron-subnet_id , --neutron_subnet-id `` Filter results by neutron subnet ID. ``--network-type , --network_type `` Filter results by network type. ``--segmentation-id , --segmentation_id `` Filter results by segmentation ID. ``--cidr `` Filter results by CIDR. ``--ip-version , --ip_version `` Filter results by IP version. ``--offset `` Start position of share networks listing. ``--limit `` Number of share networks to return per request. ``--columns `` Comma separated list of columns to be displayed example --columns "id". .. _manila_share-network-security-service-add: manila share-network-security-service-add ----------------------------------------- .. code-block:: console usage: manila share-network-security-service-add Associate security service with share network. **Positional arguments:** ```` Share network name or ID. ```` Security service name or ID to associate with. .. _manila_share-network-security-service-list: manila share-network-security-service-list ------------------------------------------ .. code-block:: console usage: manila share-network-security-service-list [--columns ] Get list of security services associated with a given share network. **Positional arguments:** ```` Share network name or ID. **Optional arguments:** ``--columns `` Comma separated list of columns to be displayed example --columns "id,name". .. _manila_share-network-security-service-remove: manila share-network-security-service-remove -------------------------------------------- .. code-block:: console usage: manila share-network-security-service-remove Dissociate security service from share network. **Positional arguments:** ```` Share network name or ID. ```` Security service name or ID to dissociate. .. _manila_share-network-show: manila share-network-show ------------------------- .. code-block:: console usage: manila share-network-show Get a description for network used by the tenant. **Positional arguments:** ```` Name or ID of the share network to show. .. _manila_share-network-update: manila share-network-update --------------------------- .. code-block:: console usage: manila share-network-update [--neutron-net-id ] [--neutron-subnet-id ] [--name ] [--description ] Update share network data. **Positional arguments:** ```` Name or ID of share network to update. **Optional arguments:** ``--neutron-net-id , --neutron-net_id , --neutron_net_id , --neutron_net-id `` Neutron network ID. Used to set up network for share servers. This option is deprecated and will be rejected in newer releases of OpenStack Manila. ``--neutron-subnet-id , --neutron-subnet_id , --neutron_subnet_id , --neutron_subnet-id `` Neutron subnet ID. Used to set up network for share servers. This subnet should belong to specified neutron network. ``--name `` Share network name. ``--description `` Share network description. .. _manila_share-replica-create: manila share-replica-create --------------------------- .. code-block:: console usage: manila share-replica-create [--availability-zone ] [--share-network ] Create a share replica (Experimental). **Positional arguments:** ```` Name or ID of the share to replicate. **Optional arguments:** ``--availability-zone , --availability_zone , --az `` Optional Availability zone in which replica should be created. ``--share-network , --share_network `` Optional network info ID or name. .. _manila_share-replica-delete: manila share-replica-delete --------------------------- .. code-block:: console usage: manila share-replica-delete [--force] [ ...] Remove one or more share replicas (Experimental). **Positional arguments:** ```` ID of the share replica. **Optional arguments:** ``--force`` Attempt to force deletion of a replica on its backend. Using this option will purge the replica from Manila even if it is not cleaned up on the backend. Defaults to False. .. _manila_share-replica-list: manila share-replica-list ------------------------- .. code-block:: console usage: manila share-replica-list [--share-id ] [--columns ] List share replicas (Experimental). **Optional arguments:** ``--share-id , --share_id , --si `` List replicas belonging to share. ``--columns `` Comma separated list of columns to be displayed example --columns "replica_state,id". .. _manila_share-replica-promote: manila share-replica-promote ---------------------------- .. code-block:: console usage: manila share-replica-promote Promote specified replica to 'active' replica_state (Experimental). **Positional arguments:** ```` ID of the share replica. .. _manila_share-replica-reset-replica-state: manila share-replica-reset-replica-state ---------------------------------------- .. code-block:: console usage: manila share-replica-reset-replica-state [--replica-state ] Explicitly update the 'replica_state' of a share replica (Experimental). **Positional arguments:** ```` ID of the share replica to modify. **Optional arguments:** ``--replica-state , --replica_state , --state `` Indicate which replica_state to assign the replica. Options include in_sync, out_of_sync, active, error. If no state is provided, out_of_sync will be used. .. _manila_share-replica-reset-state: manila share-replica-reset-state -------------------------------- .. code-block:: console usage: manila share-replica-reset-state [--state ] Explicitly update the 'status' of a share replica (Experimental). **Positional arguments:** ```` ID of the share replica to modify. **Optional arguments:** ``--state `` Indicate which state to assign the replica. Options include available, error, creating, deleting, error_deleting. If no state is provided, available will be used. .. _manila_share-replica-resync: manila share-replica-resync --------------------------- .. code-block:: console usage: manila share-replica-resync Attempt to update the share replica with its 'active' mirror (Experimental). **Positional arguments:** ```` ID of the share replica to resync. .. _manila_share-replica-show: manila share-replica-show ------------------------- .. code-block:: console usage: manila share-replica-show Show details about a replica (Experimental). **Positional arguments:** ```` ID of the share replica. .. _manila_share-server-delete: manila share-server-delete -------------------------- .. code-block:: console usage: manila share-server-delete [ ...] Delete one or more share servers (Admin only). **Positional arguments:** ```` ID of the share server(s) to delete. .. _manila_share-server-details: manila share-server-details --------------------------- .. code-block:: console usage: manila share-server-details Show share server details (Admin only). **Positional arguments:** ```` ID of share server. .. _manila_share-server-list: manila share-server-list ------------------------ .. code-block:: console usage: manila share-server-list [--host ] [--status ] [--share-network ] [--project-id ] [--columns ] List all share servers (Admin only). **Optional arguments:** ``--host `` Filter results by name of host. ``--status `` Filter results by status. ``--share-network `` Filter results by share network. ``--project-id `` Filter results by project ID. ``--columns `` Comma separated list of columns to be displayed example --columns "id,host,status". .. _manila_share-server-show: manila share-server-show ------------------------ .. code-block:: console usage: manila share-server-show Show share server info (Admin only). **Positional arguments:** ```` ID of share server. .. _manila_show: manila show ----------- .. code-block:: console usage: manila show Show details about a NAS share. **Positional arguments:** ```` Name or ID of the NAS share. .. _manila_shrink: manila shrink ------------- .. code-block:: console usage: manila shrink Decreases the size of an existing share. **Positional arguments:** ```` Name or ID of share to shrink. ```` New size of share, in GiBs. .. _manila_snapshot-access-allow: manila snapshot-access-allow ---------------------------- .. code-block:: console usage: manila snapshot-access-allow Allow read only access to a snapshot. **Positional arguments:** ```` Name or ID of the share snapshot to allow access to. ```` Access rule type (only "ip", "user"(user or group), "cert" or "cephx" are supported). ```` Value that defines access. .. _manila_snapshot-access-deny: manila snapshot-access-deny --------------------------- .. code-block:: console usage: manila snapshot-access-deny [ ...] Deny access to a snapshot. **Positional arguments:** ```` Name or ID of the share snapshot to deny access to. ```` ID(s) of the access rule(s) to be deleted. .. _manila_snapshot-access-list: manila snapshot-access-list --------------------------- .. code-block:: console usage: manila snapshot-access-list [--columns ] Show access list for a snapshot. **Positional arguments:** ```` Name or ID of the share snapshot to list access of. **Optional arguments:** ``--columns `` Comma separated list of columns to be displayed example --columns "access_type,access_to". .. _manila_snapshot-create: manila snapshot-create ---------------------- .. code-block:: console usage: manila snapshot-create [--force ] [--name ] [--description ] Add a new snapshot. **Positional arguments:** ```` Name or ID of the share to snapshot. **Optional arguments:** ``--force `` Optional flag to indicate whether to snapshot a share even if it's busy. (Default=False) ``--name `` Optional snapshot name. (Default=None) ``--description `` Optional snapshot description. (Default=None) .. _manila_snapshot-delete: manila snapshot-delete ---------------------- .. code-block:: console usage: manila snapshot-delete [ ...] Remove one or more snapshots. **Positional arguments:** ```` Name or ID of the snapshot(s) to delete. .. _manila_snapshot-export-location-list: manila snapshot-export-location-list ------------------------------------ .. code-block:: console usage: manila snapshot-export-location-list [--columns ] List export locations of a given snapshot. **Positional arguments:** ```` Name or ID of the snapshot. **Optional arguments:** ``--columns `` Comma separated list of columns to be displayed example --columns "id,path". .. _manila_snapshot-export-location-show: manila snapshot-export-location-show ------------------------------------ .. code-block:: console usage: manila snapshot-export-location-show Show export location of the share snapshot. **Positional arguments:** ```` Name or ID of the snapshot. ```` ID of the share snapshot export location. .. _manila_snapshot-force-delete: manila snapshot-force-delete ---------------------------- .. code-block:: console usage: manila snapshot-force-delete [ ...] Attempt force-deletion of one or more snapshots. Regardless of the state (Admin only). **Positional arguments:** ```` Name or ID of the snapshot(s) to force delete. .. _manila_snapshot-instance-export-location-list: manila snapshot-instance-export-location-list --------------------------------------------- .. code-block:: console usage: manila snapshot-instance-export-location-list [--columns ] List export locations of a given snapshot instance. **Positional arguments:** ```` Name or ID of the snapshot instance. **Optional arguments:** ``--columns `` Comma separated list of columns to be displayed example --columns "id,path,is_admin_only". .. _manila_snapshot-instance-export-location-show: manila snapshot-instance-export-location-show --------------------------------------------- .. code-block:: console usage: manila snapshot-instance-export-location-show Show export location of the share instance snapshot. **Positional arguments:** ```` ID of the share snapshot instance. ```` ID of the share snapshot instance export location. .. _manila_snapshot-instance-list: manila snapshot-instance-list ----------------------------- .. code-block:: console usage: manila snapshot-instance-list [--snapshot ] [--columns ] [--detailed ] List share snapshot instances. **Optional arguments:** ``--snapshot `` Filter results by share snapshot ID. ``--columns `` Comma separated list of columns to be displayed example --columns "id". ``--detailed `` Show detailed information about snapshot instances. (Default=False) .. _manila_snapshot-instance-reset-state: manila snapshot-instance-reset-state ------------------------------------ .. code-block:: console usage: manila snapshot-instance-reset-state [--state ] Explicitly update the state of a share snapshot instance. **Positional arguments:** ```` ID of the snapshot instance to modify. **Optional arguments:** ``--state `` Indicate which state to assign the snapshot instance. Options include available, error, creating, deleting, error_deleting. If no state is provided, available will be used. .. _manila_snapshot-instance-show: manila snapshot-instance-show ----------------------------- .. code-block:: console usage: manila snapshot-instance-show Show details about a share snapshot instance. **Positional arguments:** ```` ID of the share snapshot instance. .. _manila_snapshot-list: manila snapshot-list -------------------- .. code-block:: console usage: manila snapshot-list [--all-tenants [<0|1>]] [--name ] [--status ] [--share-id ] [--usage [any|used|unused]] [--limit ] [--offset ] [--sort-key ] [--sort-dir ] [--columns ] List all the snapshots. **Optional arguments:** ``--all-tenants [<0|1>]`` Display information from all tenants (Admin only). ``--name `` Filter results by name. ``--status `` Filter results by status. ``--share-id , --share_id `` Filter results by source share ID. ``--usage [any|used|unused]`` Either filter or not snapshots by its usage. OPTIONAL: Default=any. ``--limit `` Maximum number of share snapshots to return. OPTIONAL: Default=None. ``--offset `` Set offset to define start point of share snapshots listing. OPTIONAL: Default=None. ``--sort-key , --sort_key `` Key to be sorted, available keys are ('id', 'status', 'size', 'share_id', 'user_id', 'project_id', 'progress', 'name', 'display_name'). Default=None. ``--sort-dir , --sort_dir `` Sort direction, available values are ('asc', 'desc'). OPTIONAL: Default=None. ``--columns `` Comma separated list of columns to be displayed example --columns "id,name". .. _manila_snapshot-manage: manila snapshot-manage ---------------------- .. code-block:: console usage: manila snapshot-manage [--name ] [--description ] [--driver_options [ [ ...]]] Manage share snapshot not handled by Manila (Admin only). **Positional arguments:** ```` Name or ID of the share. ```` Provider location of the snapshot on the backend. **Optional arguments:** ``--name `` Optional snapshot name (Default=None). ``--description `` Optional snapshot description (Default=None). ``--driver_options [ [ ...]], --driver-options [ [ ...]]`` Optional driver options as key=value pairs (Default=None). .. _manila_snapshot-rename: manila snapshot-rename ---------------------- .. code-block:: console usage: manila snapshot-rename [--description ] [] Rename a snapshot. **Positional arguments:** ```` Name or ID of the snapshot to rename. ```` New name for the snapshot. **Optional arguments:** ``--description `` Optional snapshot description. (Default=None) .. _manila_snapshot-reset-state: manila snapshot-reset-state --------------------------- .. code-block:: console usage: manila snapshot-reset-state [--state ] Explicitly update the state of a snapshot (Admin only). **Positional arguments:** ```` Name or ID of the snapshot to modify. **Optional arguments:** ``--state `` Indicate which state to assign the snapshot. Options include available, error, creating, deleting, error_deleting. If no state is provided, available will be used. .. _manila_snapshot-show: manila snapshot-show -------------------- .. code-block:: console usage: manila snapshot-show Show details about a snapshot. **Positional arguments:** ```` Name or ID of the snapshot. .. _manila_snapshot-unmanage: manila snapshot-unmanage ------------------------ .. code-block:: console usage: manila snapshot-unmanage [ ...] Unmanage one or more share snapshots (Admin only). **Positional arguments:** ```` Name or ID of the snapshot(s). .. _manila_type-access-add: manila type-access-add ---------------------- .. code-block:: console usage: manila type-access-add Adds share type access for the given project (Admin only). **Positional arguments:** ```` Share type name or ID to add access for the given project. ```` Project ID to add share type access for. .. _manila_type-access-list: manila type-access-list ----------------------- .. code-block:: console usage: manila type-access-list Print access information about the given share type (Admin only). **Positional arguments:** ```` Filter results by share type name or ID. .. _manila_type-access-remove: manila type-access-remove ------------------------- .. code-block:: console usage: manila type-access-remove Removes share type access for the given project (Admin only). **Positional arguments:** ```` Share type name or ID to remove access for the given project. ```` Project ID to remove share type access for. .. _manila_type-create: manila type-create ------------------ .. code-block:: console usage: manila type-create [--snapshot_support ] [--create_share_from_snapshot_support ] [--revert_to_snapshot_support ] [--mount_snapshot_support ] [--extra-specs [ [ ...]]] [--is_public ] Create a new share type (Admin only). **Positional arguments:** ```` Name of the new share type. ```` Required extra specification. Valid values are 'true'/'1' and 'false'/'0'. **Optional arguments:** ``--snapshot_support , --snapshot-support `` Boolean extra spec used for filtering of back ends by their capability to create share snapshots. ``--create_share_from_snapshot_support , --create-share-from-snapshot-support `` Boolean extra spec used for filtering of back ends by their capability to create shares from snapshots. ``--revert_to_snapshot_support , --revert-to-snapshot-support `` Boolean extra spec used for filtering of back ends by their capability to revert shares to snapshots. (Default is False). ``--mount_snapshot_support , --mount-snapshot-support `` Boolean extra spec used for filtering of back ends by their capability to mount share snapshots. (Default is False). ``--extra-specs [ [ ...]], --extra_specs [ [ ...]]`` Extra specs key and value of share type that will be used for share type creation. OPTIONAL: Default=None. example --extra-specs thin_provisioning=' True', replication_type=readable. ``--is_public , --is-public `` Make type accessible to the public (default true). .. _manila_type-delete: manila type-delete ------------------ .. code-block:: console usage: manila type-delete [ ...] Delete one or more specific share types (Admin only). **Positional arguments:** ```` Name or ID of the share type(s) to delete. .. _manila_type-key: manila type-key --------------- .. code-block:: console usage: manila type-key [ [ ...]] Set or unset extra_spec for a share type (Admin only). **Positional arguments:** ```` Name or ID of the share type. ```` Actions: 'set' or 'unset'. ```` Extra_specs to set or unset (key is only necessary on unset). .. _manila_type-list: manila type-list ---------------- .. code-block:: console usage: manila type-list [--all] [--columns ] Print a list of available 'share types'. **Optional arguments:** ``--all`` Display all share types (Admin only). ``--columns `` Comma separated list of columns to be displayed example --columns "id,name". .. _manila_unmanage: manila unmanage --------------- .. code-block:: console usage: manila unmanage Unmanage share (Admin only). **Positional arguments:** ```` Name or ID of the share(s). .. _manila_update: manila update ------------- .. code-block:: console usage: manila update [--name ] [--description ] [--is-public ] Rename a share. **Positional arguments:** ```` Name or ID of the share to rename. **Optional arguments:** ``--name `` New name for the share. ``--description `` Optional share description. (Default=None) ``--is-public , --is_public `` Public share is visible for all tenants. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/conf.py0000664000175000017500000002013500000000000016423 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # manila documentation build configuration file, created by # sphinx-quickstart on Sat May 1 15:17:47 2010. # # This file is execfile()d with the current directory set # to its containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import eventlet import sys import os # NOTE(dims): monkey patch subprocess to prevent failures in latest eventlet # See https://github.com/eventlet/eventlet/issues/398 try: eventlet.monkey_patch(subprocess=True) except TypeError: pass # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. sys.path.insert(0, os.path.abspath('../../')) sys.path.insert(0, os.path.abspath('../')) sys.path.insert(0, os.path.abspath('./')) # -- General configuration ---------------------------------------------------- # Add any Sphinx extension module names here, as strings. # They can be extensions coming with Sphinx (named 'sphinx.ext.*') # or your custom ones. extensions = ['sphinx.ext.autodoc', 'sphinx.ext.coverage', 'sphinx.ext.ifconfig', 'sphinx.ext.graphviz', 'openstackdocstheme', 'oslo_config.sphinxconfiggen', 'oslo_policy.sphinxext', 'oslo_policy.sphinxpolicygen', ] config_generator_config_file = ( '../../etc/oslo-config-generator/manila.conf') sample_config_basename = '_static/manila' policy_generator_config_file = ( '../../etc/manila/manila-policy-generator.conf') sample_policy_basename = '_static/manila' # openstackdocstheme options openstackdocs_repo_name = 'openstack/manila' openstackdocs_pdf_link = True openstackdocs_bug_project = 'manila' openstackdocs_bug_tag = 'docs' todo_include_todos = True # Add any paths that contain templates here, relative to this directory. templates_path = [] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. # source_encoding = 'utf-8' # The master toctree document. master_doc = 'index' # General information about the project. copyright = '2010-present, Manila contributors' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: # today = '' # Else, today_fmt is used as the format for a strftime call. # today_fmt = '%B %d, %Y' # List of documents that shouldn't be included in the build. unused_docs = [ 'api_ext/rst_extension_template', 'installer', ] # List of directories, relative to source directory, that shouldn't be searched # for source files. exclude_trees = [] # The reST default role (used for this markup: `text`) to use # for all documents. # default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. # add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). add_module_names = False # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'native' # A list of ignored prefixes for module index sorting. modindex_common_prefix = ['manila.'] # -- Options for man page output ---------------------------------------------- # Grouping the document tree for man pages. # List of tuples 'sourcefile', 'target', u'title', u'Authors name', 'manual' man_pages = [ ('cli/manila-manage', 'manila-manage', 'Cloud controller fabric', ['OpenStack'], 1), ('cli/manila-status', 'manila-status', 'Cloud controller fabric', ['OpenStack'], 1), ] # -- Options for HTML output -------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'openstackdocs' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. html_theme_options = { "show_other_versions": "True", } # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". # html_title = None # A shorter title for the navigation bar. Default is the same as html_title. # html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. # html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. # html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". # html_static_path = ['_static'] # Add any paths that contain "extra" files, such as .htaccess. html_extra_path = ['_extra'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. # html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. # html_use_smartypants = True # Custom sidebar templates, maps document names to template names. # html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. # html_additional_pages = {} # If false, no module index is generated. # html_use_modindex = True # If false, no index is generated. # html_use_index = True # If true, the index is split into individual pages for each letter. # html_split_index = False # If true, links to the reST sources are added to the pages. # html_show_sourcelink = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. # html_use_opensearch = '' # If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). # html_file_suffix = '' # Output file base name for HTML help builder. htmlhelp_basename = 'maniladoc' # -- Options for LaTeX output ------------------------------------------------- # The paper size ('letter' or 'a4'). # latex_paper_size = 'letter' # The font size ('10pt', '11pt' or '12pt'). # latex_font_size = '10pt' # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass # [howto/manual]). latex_documents = [ ('index', 'doc-manila.tex', 'Manila Developer Documentation', 'Manila contributors', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. # latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. # latex_use_parts = False # Additional stuff for the LaTeX preamble. # latex_preamble = '' # Documents to append as an appendix to all manuals. # latex_appendices = [] # If false, no module index is generated. # latex_use_modindex = True latex_domain_indices = False latex_elements = { 'makeindex': '', 'printindex': '', 'preamble': r'\setcounter{tocdepth}{3}', 'maxlistdepth': 10, } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315601.7736735 manila-21.0.0/doc/source/configuration/0000775000175000017500000000000000000000000017772 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315601.7776735 manila-21.0.0/doc/source/configuration/figures/0000775000175000017500000000000000000000000021436 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/configuration/figures/hds_network.jpg0000664000175000017500000047404200000000000024502 0ustar00zuulzuul00000000000000JFIF& }!1AQa"q2#BR$3br %&'()*456789:CDEFGHIJSTUVWXYZcdefghijstuvwxyz w!1AQaq"2B #3Rbr $4%&'()*56789:CDEFGHIJSTUVWXYZcdefghijstuvwxyz ?( ( ( ( ( ( ( ( zt&;h~i,@AW!#Vr:-y$,eiU<)ibn9^xfLjebl m`e-_'aȴg_ 䝇" %v@&~%I-_'aȴg_ 䝇" %v@&~%I-_'aȴg_ 䝇" %v@&~%I-_'aȴg_ 䝇" %v@&~%I-_'aȴg_ 䝇" %v@&~%I-_'aȴg_ 䝇" %v@&~%I-_'aȴg_ 䝇" %v@AG<6v>A4i TwmkviO2D@֓U)AuޑL xU,h ( ( ( ( ( ( ( ( (:;PQrݳaϴ@P@P@P@P@P@P@P@ψKmo2dp_fR05+Rogy9.%

Hu6%( ( ( ( ( ( ( (; }=KKA<@ g2FI!`bGI$HUtt`22`H (P@P@P@P@P@P@P@P@P@?twfh ( ( ( ( ( ( ( cTG羛 c]Ŝ* pH .PϷS^]Hdw.zʊ9ڈ$j8TUQ P@P@|mSWO- QcjĿ|HWGUh7]">&Χk^4|I\/3p oԿeOڍ?>T1D]\jO<8LH>ܻ3d 'g??ƀuG oc@: F7QGGXvѣ#M,hT;?4ê?hS?N4)'g??ƀuG oc@: F7QGGXvѣ#M,hT;?4O<;H{ > KkCRG%b_ ?i|>%Үo/<xP_'>'Ŧ6/<;iZ}Z&KhBEM^t-Vaf@P@C2Ocg3 *PZ ( ( ( ( ( ( ( ( /7@P@P@P@P@P@P@P@x3qŦ~N{.^(h ( (c<(?e_RWe@?h߳-y'RW =<\[zn<~~jn#~|t;? %X~)|/Wx'Y,^?Vҵⵢ-3Ncr yAf} HGj@|E⟁Z޺,5OԵa]_:kgmqra!|/|D{^ׁe&<i'7èh!߈K]{ /[K ( (GԆVR^~e|<*e`,P=dpP@P@P@P@P@P@P@P@G ?n_ol?? ( ( ( ( ( ( U7z9$_d2*mQP ( ( 8cU%q@TP@(QgT@L+k7k[}?ο<_|,Ӽ9jZ| F֠ڌ\> 얌 y*iZhothJ_ڃr>s ]/W|;Vӣzvy,5-s:}j| Ft&EX c%o,fu)z4XYҧ>7Vf.Hf.٧ͱ~'XqozJG⹞㇌xڄvx/Zuɨ6M߇6Z7("w9j߲|_ӭ7[B=/h^֛akqhWQr?(?_eZ8( ( (>\4f$zcHQG#tǓ( ( ( ( ( ( ( ( (:;PQrݳaϴ@P@P@P@P@P@P@PI$NI 9%:%I&}+هֻ{|ҵjW\jy5ԓ4\KUW OeLE~x ? l~ҟڛq3f4!Nmy?>i+Ot&(Ou˄ kO}ڟέ~>o]'/l0|I-m ˛[X(1Lڇ UoڇwL|I4۟ٷ_f[#֓x5^u? ޅ tMڴIO!s_=x/)_4t?x߉5]}BYn5ψO^)*^j{krWG /_ğKe.Wt4OO $xVZ/x|[BDm5ojWng?1b|;ٯG~do؟׆#_:kF oR< 7ĭĞy—}-Wº=@++\ }@P@P@ܜk#<=A?@@P@P@P@P@P@P@P@P@~ۗ[}( ( ( ( ( ( (>_ ( (>s-#s~&g+'_)I<l( ( ( ( ('ŧcf "O 2~)x݀xUt%X0 *AE}@P@P@c? ( ( ( ( ( ( ( ( (~ܿl~P@P@P@P@P@P@P@P@P@:~|S:|,Dƾ0'4k&uq|;oe7aĶ,~𭦛amSPkKx6M7|6?‘ܧ| CKƥuiCfw6O΢`MGT|ۇڀ ( (=CP@P@P@P@P@P@P@P@P((ٿg ( ( ( ( ( ( (( ( ( ( 纚;{himv=x'Tb&?(?V4z?c[Ytpo>|#xȎɫeOWPmj:6O7^f\fʸu5yTq8TTxڰ=qOԹ?]%2/\*siR ڒhTm?kaSlR뫛Sxq"?hu7/:g/~|=a}OX^]%OBM kf\3Cp_Q8̧dGQc%NR|5[񟈜'ḳ8TZYF*pr%v?/( [_aOQY~ntx6Wzwic`6mKxm%ZƵWǂpWΕ)R1ymkiШeB1:>*|F{Wk=׉Z/菉.U^nѣ7|e*~ !2#E3JtkQ֠u(_$1#/gˊjptxmdX,cK9fþ\/ZtF5~F~ˏ1|+G)񌸟 (eCsl6U,R}^T*֖"8ueN_r/v??$/G.{Pմ [S[GY-u=L-VWW ]]գX2ε8r^**uƥ9&ԡ5(f֫fmb0c_ ^p:5`M8ԧ(-4iRg ?/hxW}ſ io>t6'K2ؽḴI',27_iٻu1<;^eUT[sԌ%*\ɭ2OH(] 5_p?$y?Z!?_yH_EuZ]x'/? Qf;jB.?m ( ( wPP@P@P@P@P@P@P@P@P@?twfh ( ( ( ( ( ( ( ( (/Lj|!g> Px>>/xiR4_Z|&BYs=#<*Pz/iwxM?ߵ;O+yeo:rh%Zſ|uhAk7OBcø(VWT9,3Zқ?d*`v&g͕GȅP@U|?k['_ߋ8yq勏x-#V{Zya'GeۍxO"RyXY6<]:ػ-pxwW;mQn>!PY siGن-Pyl>߳=oWIjֵi㿊.JA񖧧vzN8l,p8LO|- *x.'8:r)C2T"_~~$?؟ğ'U?' ? (CK?'A7LVx~W@P@P@c? ( ( ( ( ( ( ( ( (~ܿl~P@P@P@P@P@P@P@P@P@fwH}OkT&"xTXt'xH䍊igpYϫu0Teu&ۻJeLj'c?~.o^Q֥ѧF4hT t"٣EgX>.%'ZU}C\5j>#$yơxI$Ha݄+~XF8rH:9oN{upg*iٸ:i̺}UOr#.$i#N.KP -ԍqek;c 5jLMӆ|<9 0[MHmBfR}ԟ'Gz cڟW7SOB]5%+ƣA_i.&hNI:X )+s/gbYPTqr\<`. 9&rU<ҭlNK~Ra:WhG,EJQcIX+NWC<]k G`?׏AC4v2Mj^ 4ۣ_:\NsfKPn^qbK3_TZ<" u*jX'chԚ_Z0><9n妱O5ddzVyoet&I;U?i_ЋT\E&gVn^VVxw 3xWja(a ړ(ߊ?T_[N_ ់ux? <1uwB|Sm'k_ x~{Ҡ8n.{Csu;#<4|&[NesLaUq)bkVڕYE7[݌Oc9sO<:St2e &*hrQԪ_L]w(EZ/Fa4 jZ5K pxp#ĻCZ(?':ui4۔*Ir=|M/b2x~K[ ,|R]Jxc1Ǥ\ NsC8>ibJMVkV3?v[_ Ujy&snU {P\oE.o>-"k?W?eo>&h4zC:އy״9,-u]?^Q{b+ʳn+^'"~CWi`Ij֎!PuaJ.Jp' p_yqp|&iᇥWpΜbg(?iCPaڟQh.6|!|1|H~ki{M6 yuy#Ӯ]YoH"~kQ610R \ƋjcKjrN3riÎ4YE8 ҃1 Rx)USJua]I46sB.d3Ə9|ItPeGt7pUqhP@P@P|7ÿ=B ( ( ( ( ( ( ( ( /7@P@P@P@P@P@P@P@|@P@P@x_c&glٝ?G_SZ%ɸȞ+<9JF(>_~n_>"%KPe.d>Ex2-RYa7Iҕ&{e xļY|bŨ}y`50񎒌}S}lV3QRP4pY!g>OMKQ9ٯ1UY~ڞIv*Pr/ (CK?'A?GO3|wY!_P@P@z;o( ( ( ( ( ( ( ( (:;PQrݳaϴ@P@P@P@P@P@P@PP@P@Pgu [F?^tO#5kxw՜~VGqeYO52+$nRDx#kBl5z8=YaӯB98TZJUiYt2Z$֨p،.B(R̮xK4*X+sXiԖ Fus獾\U|F73eʤ8dyگ[ IZ*.tX\e]n͟?_Qosr k.KQ$dYh~oT8+^iJ^ڽ_N8M6nkk=~ *0-|6URv<6S ;|gWׁ.#A/1ěk]>MǕ&ߴ&`Upk:5捿II4-q9+s^anIM9eK^៬Eqo4 >>M |M x^92@5oqRl\d,"$y4ѦiR N3KҌeG!Ǹ2ViM;:M _4OJf',Cҿi_wֿ i޵t|=1' ;Lo5FykQxW9C^7î#7ZPcÕJPp_.SRIʖQYǤӺugJ5ʕG>qS/֩x}/_?êT㇥^ߝs?DvM|?J莮z ~KT#_.֑xXy, \;Y;Nu(ԧ40+&-/dy]7_q#f(˱4j{B6Xݧ KC޵s $f<{q! A9[^Vj!fpm EIGe(xC01V,nVm{-)][ލ즢G+Ŭ,*Ng'8 [.FiKckÕKk?K(PH?ߵ%:XC\|ؐ|+c,|콯דrZ^ ׵?SgA;כ2t~-qZΪ|ևNECHZC:Io:֧ Z#+?SF7${$PIϊ02MI77xdjE6x> ӌ+eHmibbҷ$`ob%~ٚƍoq4Kx\@x#D,cwn7Gi$X(o-*Y=!*Ғ(2Y!X&Ѷұ/:A>.u1=9ww"Ƈm|Kv-eE^[^ 墧qUh|FZ*SUayDg:,^`:VR'(IsJ|\G֦T t+F|VG_mJZtjcp)ԍ5 u}innn~yioiBK,3<;3f%5c1Qs<1RQVmJ1KD%Z# )Os93)MR'w)Im5!_?׿huG$?2_&o֫O|%:?=X;Pբ__AZmţn$^+ /u4θ5c1N4(g[N,. Rժ:tYWVԬ׌02py]`3MNXWeYl5lv/FhvV188_fzW3?h|)|_xwF?OW5ox;֚$Ƨcgt˩H,M㻉{𙤸G6rʸ~'g8hRӡ|4iԛ"ڌJ~QN- q^IvI=ȳ0،~Tbysb+R7) ~jigHP@߂Nw㵲?g%AZ%Z|[bvr7S8S'.כGRnUhNU$y5y6{9O>hɳM$uq'~-ۗ7ﶛǞ]_Y3es뼿Ů/'}Y+_kY/"/!6ږs Sd~Pu!OarL ;\|F|wF.zz3|QI)8.+8qjGy+bgYݚP@Z?O'L2.wH|/s[۾r$P` (CK?'AGO3|wY!_P@P@z;o( ( ( ( ( ( ( ( (:;PQrݳaϴ@P@P@P@P@P@P@PP@P@P@Pg0nீR_mFi`u?{4v |S[':|%sR%*y֞ ;xCH>.fc`r\괱S,4JG#Tl;v???Z׏_(m;S}cxZfϧ0XZ+[ik,QM'p~oY0thsU8<'C)G*4/k$͇5[/f|=A>.|P晎#r{UZ88BY we㋨E7__$ /x^W5)lSce#ڌj.}kNJ,$M)*Ͳ]n :UMN: SMJj(ե4Rioy/&02T:lM9Sj5ika(b)9ѯIƭΜ'@P@P@__~1x@qs >Qԯ$#t8kckGSM좞Qx`._ӄVYNOݧN U*)1~Qcrl'2̱,6 JUkTi pWZ(ѦVN2T};ZG ]iW){em-m!.&)4MfI6&R%ŴѧxCB'ZJ?[f U\^55iS&&Ux ^#8,-ΌJp2lAN>짇Bt*UR(I/ (CK?'AGO3|wY!_P@P@z;o( ( ( ( ( ( ( ( (:;PQrݳaϴ@P@P@P@P@P@P@PP@P@P@P37Ѿ# C'h&N,E ZFZy=X7ǯmÿ|"CZj  }E ߋC㏇1_[FŇx)F3eЍWC+TpaeK1}pa:Tɇ3ow1مKya[|2d}EZdq$vh guO4{?üPG>'vs*[ Ε̖~>5fėp\hַͭM,De]f!v3 p-\_⧉3cJ%Jɜ 7ax짏-2w=MhTyz.q0Y!8Aѿ?tV?E fuE?$׈l {Y$O.] 0|MnV{&{$6H~)8#\lwT8^,$׸xFm42|$|S~WYlV+Ԋt!O5RTp KV`o{Yü_>|L<\-,˙aynE|u2 qe$SZOutfi.5W3~%hYa&.VtR_J_koCx)+Mo<-?ڠ/71vYď yjQG&j\E{qz2Gg (q6udhp ƥHbsm?5L^adpM:J5#4ٟ )9SƼwCvTj`w.h1Ʀ&VR^SOR|q _ c_ aχ+[oo%[ya~b6=XɸVxO UTs/sN4КFG&ԳNxlOg^vg8.pdxn"E 8EX*qX:Xfx= jթ'S)-z3gl<̍ڸL-LGaO x/KӥPPXSNQlUzp~6chRi^mV!Zq{j1W۩I|r^"+/5#_gS}?Se/E_W'_k/0Y8/o*|Eu؜OB?`'M|Oy濵G}ߋ_O/uOCe/|e֧cHOMy7I6k]GYda֬NQKxW =hgYWa*zM, ʫ<Ò,jǚWIQHq<8,6Ừ [Wa3s.TliJ+´״-F8ptULbH< (] 5_p?$y?Z!?_yH_EuZ]x'/? Qf;jB.?m ( ( wPP@P@P@P@P@P@P@P@P@?twfh ( ( ( ( ( ( ( ( ( ( )e=G }|<״O _!|IxA_M_@, kMuV#WGsOgPG MrkVR8bUZ0SF|+˚WeO<_%*74?>%\8/Lf/[z?վ'GįY2_x7c5?߈?>%? S6mYx~~ѿ?>Gb~Ϟ",i|>kk>56U/w6&x[,xZu]8y19a3<#G*j8%%Jt(R'9υm_x\[V/(hd?z_W?~EV7P).Gg_ˤ77Iies;' +9ekgVʣ^>8e{HRt骭|.soGj}"8:V=eR<~igujʂƩӜY!!c^*6(5;_[k1kkk1_GE%z^v2܁0zpN 3𗎒N9aC.JtE:N8[K+ŴXx<2CNhbx\*񪦪\O}x;*Y/><ؿm9߁hq' RhiqE߿O}x;*>#݋?/~طa+P_|MW.ۤ6[Cnã}sUTC\-m" BÅkxUdV8iae_$΃efsjq/Þ+'s8rf*us2ʽ4a8fS*{?*xFNgma㨿k??*:@.Ug %WY0R~9K>(?W<^˓NK[C!ҏћGxgO}(*{^o{s_S/oPwqt>,iܗ Ul:1 :vQ^ ; m<o[C^>5}#/m]nXjf<t?}rvײ?? `_Hh W)'/ŏ?A?m3~O֫~O'A@Qu#{և_4O*?oZˏB ( (=CP@P@P@P@P@P@P@P@P((ٿg ( ( ( ( ( ( (( ( ( (_ })'oxFok~\bx~ZJzeݽ'JTxw%bu(b9U:1oԄ}=̟CQ,sGϨ2y'C4o ,%B8Tk=?^"W Y^$c׾ |M\>*MR|Bu5.&N/,<^y+c x*^9I<'21Kr{5Ia̫V/YXXJx)Wڹ7'Soocoq?9)8k 7Ao7 ]Qn'^</! EGs_hIe d~#ꏫq?9)p_/;A濵&/Um|mcO)yn\lUx,nt 8ZǼ,~%,Tiq~?/`VU[C0 ]4⽯u9GUu-guK 3G2rdI ]Vq9aKrLbH< (] 5_p?$y?Z!Uaa}Yiz]ޥWvvam5oieein\]]\IG$"E3>HR՜)ӧ TR!NNS&F)RJ)6HZ*SF֫8RJ%RZ$ t9MFrI6?'G@w:|Wez֞#բo.֩c(:|Ɇzl54x:'[ޥ*ޱrsN5ڱt4ӯB[n "rc7 Me,9)a%3XTqupӍL4+K]/It{EѼ1XAoloIEM4`4DHUUUUF.\N*\N&Vz'VjwJ*9NsRmapj, 8l.:z`aJQ:tEE-%btP@P@z;o( ( ( ( ( ( ( ( (:;PQrݳaϴ@P@P@P@P@P@P@PP@P@P@P 7ot-K > a{[zGy=Ŧ躍ŋYj}/D~jk(gf62us,5W8W:\e*RN8/ΕHg-nCY}<5lnMdx-,d*ԭCJ 5eMSJMm4|[5MsSuk 5bTյmSGPMOPQo#Mu{}{u47ww2qsq,M#xWJ:th҄)RK:tӊ)ӄ2BJ0RbI$qWzjV^έjժέZgJu*Ԝ:'')ɹI(#{ EvOpPĵFF?@*HQ<*?CѥO? =`7_k pPĵki|§ u㯊&~'G˟xw#|.&7Yx&Ǻ-F[xίjMma5ryOE?o{Y?^ &:xēдNa4-Z~oa7x#.;k214)/bWaSXZEJ)RRGd5T?f]Зq/eeʟ5j򋥆^)~~L/>02j5Fo-WW­ # Dy]ГĚų܃>j2Φ UiTLR hSXh FJ6ZyTUg qNc]S8T{WjX8ڥ 7%NT匝9t(Ֆ?r,#I$M~V~G@P@P@ 3pP ( ( ( ( ( ( ( (?AGv>P@P@P@P@P@P@P@/@P@P@P@Nj>|(k?> |ih7g/kKXS[/oapW-Ś ob?: WI0Q+)@ 7ѱ~͟c?Ͽ@D ? …ؿf1CGRgx??n ??Bl_g_j)3N+מ+b5y}#Z"NHF9UO!I1bX[˰x\?`pj^ҤT(B*~ҭI՟,5IrmsA@P@P@P|7ÿ=B ( ( ( ( ( ( ( ( /7@P@P@P@P@P@P@P@|@P@P@P@P@P@P@P@P@P@c? ( ( ( ( ( ( ( ( (~ܿl~P@P@P@P@P@P@P@P@P@P@P@P@P@P@P@P@z;o( ( ( ( ( ( ( ( (:;PQrݳaϴ@P@P@P@P@P@P@PP@P@P@P@P@P@> xᾑ~4&m#1O{cgQI/owbg}@@P@P@P@P@P@P@P@P@~ۗ[}( ( ( ( ( ( (>_ ( ( ( ( ( ? ?hO:gW㿋5/h:_m't|:-^Ӭ|=X\xN*(G¿7E(HG߅A|$#">+|#h#hqraxc-1y|'m[DҴ[Z85=Oׯ5+AvP@c? ( ( ( ( ( ( ( ( (~ܿl~P@P@P@P@P@P@P@$f,nTG ( {|s_?g7h~'_QXK_ Y^7y=`v(g TUߍ=_~k#kOգt55_>)3x_C/O׾(?<o^xcḀ:[_>߆o&ƍ7ƍXß?Ɵ.kh<--[Z?|-X_i;/?[|TO'Qu&?O' /ŏ??^3xn 𶥪Pc>|=A|Q*>&~Пa(ѿe~)|#oN/sx^05U|c^_-6Ú΢#GmWKxĿ|#c_ǿwg;|@wwZlvXj_P5!KM[Px "?kg>P|85Yk NyhIT3sr4]@8>?~^ÿNVOZx0~ ;Ǿ Uo |MJ-D3ZXOt'wo?j/_>"#CO^!ij:fcg/+IM/a5 5bv>9?ƿoş_@NJkiI|+\ޏxgÞ*xȒ{ SI.uM;VNLԢ r__|AoC'"_o SG_K]/H}鱵>:f\i)>?)+PhY8_Ȓ > ܻr]c}q@P@~ğ>~J߂_~9K~$]ּ5o }UsE_<ug 9ڇwn w}atb7?bE6 uڧW_ug^s|&~#hW^-] {e?ÉnW'zĠ' '}/#O?Mo'ˤGV|W챤j?(gT_>x7׺?-I|c[]}b#6ey~>qO//?txG|A/| |ACZ6㧆)g⨏Q4oj~!c=d/&O5=#׾i͡|NG/g/Tk7ڕ 5wǿK?~I>T߯|)^#NJIxtm#g<]xZSľ<|?׍u:-6hW[&&?A`{ _~Ʒ 'h5/xV'DߊQ<煾'O? ǦVV rR/ƿ}@@P@{,MlX=ڰpoߩ ( ( ( ( ( ( ( (:;PQrݳaϴ@P@P@P@P@P@P@Psw|GLp@P@6yQo ~ʿ(ʀ ?x_^, Bw|MC_~>7Í%?[>9Eqyu36wwmR-PcM cU?VO|IayQ/:|g1 h~!7?]x;W?:5mv[?D/*W5kGգӿ߲sya33Ws\hxB_u)=cL _k7 % YZ?3/E><)smO ~Ο h;]~&|iG|Wo  ?xJ7˫^7{k߇W~?|c?k~˿?KOXѾ ԚMI*GѬ|;F;BM7zT:CEPÏ?o(_tJהP@J06աz o>&iPo ~1W&>2_L_?|GwǁYzwĿZ?luxOf~!xũQkq[(ҵ 9q[BuOU#|{j__7D?rjG?هO?,I|43|Mk~[? Yo пo__?m/].ןuKk<kT{|Q~%EI5|,Ivpƿ%8Ҿ!4!IPx㇇Ŀ> #B?'_~),3⿊߆|(ls^Oiv?m;g[\`t"?kg>!N @Ow noti Gʬ@ЊTUP 8@ @P@P@P@P@P@P@P@P@~ۗ[}( ( ( ( ( ( ( U$l#ݧۥ6NǦ3ݰ<%P@x엣i?q~4tJ> osFn&'O?a?oC{G~ x%ӭxAX>s,Vv "z_amIg5~?gOͥo Uq%Z,$>"&"5(u5{oѢ[ ßJO>>(? <W2_kZxW|3隷K=Eu]꺂kظr?d?;_#`>=jzE~4|mJGKdžۍ> (*zc|?~͗Pj8 `|-Fn:=ߗ{xǚ[8[^ >x/녺_ ]V]·?%˦z|%B'o#LdW W5-+R6jRiΓ&  UcG]}g|m技b:;o4TX~V?u߷&wh*36E@1Uw*?ኬoLP;7ӯ`_ڗ03kؿo,Wx緓Xu(Ԥɶh$h@>W/}Y\7yoޟg"d'!Gxs?@"d?!Gxs?@"d?!Gxs?@"d?!G~DwO?Joڣ5z(oC:߇|Ei~4C;Xүoݥ_ig?X\˺F<[ ™l=j+Cx[[_?hkt>MD}-)ubxS*Vi{jR|27)K1U۴RKD},%@q[.W&n EocӥBVjΜyTRW'?!Gxs?@ <% 2,YnX~*@uzYZFǷꡛ2I.P@P@P@P@P@P@P@P@P@P@?twfh ( ( ( ( ( ( ( ( ( (w?cnDι?>_hP@P@P@P1(|?P| H?CH >~_>5G?i'oh }P@P@P@P@P@P@P@P@P@P@P((ٿg ( ( ( ( ( ( ( ( ( ("._OP@P@P@ ?k?+B?70GR7ïpyjtx{$~QuZcC)/?pG_h3P@P@P@P@P@P@P@P@P@P@G ?n_ol?? ( ( ( ( ( ( ( ( ( .p_H?_Kh9fP@P@P@~.JL3;a8?CuZ]CVbFv0~@P@P@P@P@P@P@P@P@P@P@~ۗ[}( ( ( ( ( ( ( ( ( (?/e:-~@P@P@P@pAR//#L#|:XN'7P!~֠GGPզ4>"ݣ' u0P@P@P@P@P@P@P@P@P@P@P@?twfh ( ( ( ( ( ( ( ( ( (w?cnDι?>_hP@P@P@P1(|?P| H?CH >~_>5G?i'oh }P@P@P@P@P@P@P@P@P@P@P((ٿg ( ( ( ( ( ( ( ( ( ("._OP@P@P@ ?k?+B?70GR7ïpyjtx{$~QuZcC)/?pG_h3P@P@P@P@P@P@P@P@P@P@G ?n_ol?? ( ( ( ( ( ( ( ( ( .p_H?_Kh9fP@P@P@~.JL3;a8?CuZ]CVbFv0~@P@P@P@P@P@P@P@P@P@P@~ۗ[}( ( ( ( ( ( ( ( ( (?/e:-~@P@P@P@pAR//#L#|:XN'7P!~֠GGPզ4>"ݣ' u0P@P@P@P@P@P@P@P@P@P@P@?twfh ( ( ( ( ( ( ( ( ( (w?cnDι?>_hP@P@P@P1(|?P| H?CH >~_>5G?i'oh }P@P@P@P@P@P@P@P@P@P@P((ٿg ( ( ( ( ( ( ( ( ( ("._OP@P@P@ ?k?+B?70GR7ïpyjt=?(ZcC)/_G_f3P@P@P@P@P@P@P@P@P@P@G ?n_ol??J|I~?- ~'xេxw1_'!xǟ,p[xjBmfou] ɧzxC%O~:O[4ý(~.h?,7{ {?~zg퍥wߍc[j^}{Bÿ<9{w OL A)ἒ_o? ]~/Ÿ/tG=G/ |VTt>aӯu hFȓƚk"ҼC>~_  E|Zk~|i- >"6.u|G~'iޭx[K>JԬm">/f>+/ s oٓWCּcxKOE]4="X}=uOGonYtH%ßBu֚mgw>O~ȿ -KC]xcğ=eAwhyOү+-[H #o|UK~@oMKN MF Ԛ't[97kmo(Ŀ|dO^߅;ï >ơb1,B᷏Tu]>-@u~rT'Y_oGx_GſG>;ž<[>ov@~ҟhۛ_|X7Ÿ(= oWMF⮥VXh YW=ޯxZx./u41q|+~~!eqY|5QCGGxgO4_) j:pk~m,k__[M~F6_WĽO_;Y.s|<iw-.P/ia%]c{anxG|~[go<#]$W5~&0> <3K^ -WS@='+a|_oY7oV 0z0|'{3<_Un.OJ,CǭQB~@?g5|B ( ( ( ( ( .p_H?_Kh9fP@P@P@~.JCV#gw)8ƺ<=?(ZcC)/?G_h3P@P@P@P@P@P@P@P@P@P@G ?n_ol?>?_%ꐹ S?P|q]`>kگoѥѺ+375I>*4ohz|wMԵ A-m|E=z~Ofja` +߶c ~Ԟ=?hkp]oGk>|xQz]>ၼMJߵ&kV`N~V/~~_g~u&㟉:~?1:GIu.u; O|aC-ZD"Z/&71pj)0<(xo^Wx/h? =(д;]OB5mcqz|Z^cdN~V/~~_g~u&㟉:~?1:GIu.u; O|aC-ZDKc_xuŁY_-ݿ| vgմvmBLZxnlB\T,74 /cEc* |_?k?wm{G֫~7~'8|S .[^{_v??֩jJ?gF;r|lwwgǟ}3^,$vv>lV>'}/O/-@?PzƟWϊe*W&r||^>Joյώ_^⟆zƍ7+[2)xwž,ܺ펱M+s|AᏊ<_ģVT-9oՃ77|]0m߂߲oG'7_hP@P@P@P1(|?P| H?i?~/(|P|k#LOH:C( ( ( ( ( ( ( ( ( ( (:;PQrݳaϴ^7o?i/Y|>w| <^爾x?ľ /%V-F񽞟i^+Ӽ?v/x2=q<? zW7oAG*C|"澀n؃'TĆE}ݿO ?b+ߴ7hk |S _<7vӮ|UMU,ijZW|oU>%t˿ xzׇ?A ( ( ( ( ("._OP@P@P@ͿWŸ_\>ږ\"|⧄{Jbf _vŞ}cQu )f *d|Д)G*4~Cӎrs[TϠ熼Axx9V.YmdSC)>h޶U*QV1aЮG$RJ!ԯA2Y[rm?g.i4NMnL? ( ( ( ( ( ( ( ( ( ( /7@?Gso&_cSd_  gC'l/ DOj.:Uɘ?߅_P ] 8~>!i@<q.+t|BҀx'H]WͥO9 J?s!w_+6?B¿WG/m(?߅_P ] 8~>!i@<q.+t|BҀx'H]WͥO9 J?s!w_+6?B¿WG/m(?߅_PyU g GwJG=3B1izK&$gDY*u3m]P=`|o@=`|o@=`|o@=`|o@=`|o@=`|o@=`|o@=`|o@=`|o@=`|o@=`|o@=`|o@=`|o@=`|o@=`|o@=`|o@=`|o@=`|o@=`|o@=`|o@=`|o@#E_۟2/=$'v)$)ǯWkO O{/m~/x"?B;l:X~=`|o^FiWytFiWytFiWytFiWytFiWytFiWytFiWytFiWyt_,ux>2phMW*Wc?3}S\^OKѴm.Q5by(MIƆlMy(8ѡNUjIF)MB2j1Mlr0\&ܢkmde էB G: 4cS^%VNnsb\O_/W×1 |3 7[Υ⯅_?{O%^x~Vk;}fKt}FNUUӮ^1,>wgj̲P^XitQj)ҫ(Ռd҅jo+;{}gqx/ܪuO/1U]| jc)br\)MT =`|o^FiWytFiWytG|g/=ߏ67f k'Mpxf_jºeύ|E}VSu}K\kPʘjYߊpٚSBxz  VEIҩ(EX4V.^.<O/θ"R晶_p='&fykД*Z9FN1US(?P꿇>| #x?FgG<Z WPдO:]۵KJ춱}mQG N4piP RNÚNrciJRvJm[gqfsg;5q>cqƕ<=mT^J|%J:q!o0?J7˭Nүүүүүүүүүүүүx7Xwg߇dϋZO5vm5gOjz;3̰F a*X\?'Q֧B5)*h6}_pWgp;>> -lVN~_1\؜]JXz\,"#(*p7/xC>>.xF}j )iٚ晩hf˩SmTS,̰ a*\GS'MVBwE)MjҺѦo\Cf|XJX'qX|m:_~4p*౸zԗ$ӄo{H*.ϔ{H*.{H*.{H*.{H*.{H*.{H*.{H*.{H*.{H*.{H*.{H*.{H*.{H*.{H*.{H*.{H*.{H*.{H*.{H*.{H*.{H*.{H*.{H*.{H*.{H*.{H*.{H*.B,G=7_n +,>g GY9x_;#w3v@@P@P@P@P@P@P@P@P@P@P@P@P@|]"w~eߵ'WğN%?gy<%G(gTw_Ob?.?hNgSg_!@?rg^ ow>{v~ x~/.~|9}fQ kAek * {5[Z]xOwČ겡Rh*Z apتS+:xZRQRIڸBrS2P\1x,.BscթUNx\^y:SgRe[CP?Oſ&Ex6Qnk qj74C_;[+rPg8;1f7cU15aZ<4g$(U\ܜcWgG.xWpy~''`0s,$sZ3֣GR<,(ӫVHʬL€ ( ( (8cP^֠)_i?~'(|P|k#LOH:C( ( ( ( ( ( (?AGv>P@P@P@P@P@P@P@P@P@P@P@P@wQ{ٗ~ԟu^/;ؗ4 oSj7OQ߳|g?Nuy8k͞MOӥ~  ?k?+B?70GR7ïpyjtx{$~QuZcC)/?pG_h3P@P@P@pAR//#L#|:XN'7P!~֠GGPզ4>"ݣ' u0P@P@P@P@P@P@P@~ۗ[}( ( ( ( ( ( ( ( ( ( ( ( ().?Gx^$w>.i5</֧*??/Gwo=gJ 9q5@?p<Z3?J4 (s>~ן@> W$~otZ`韡o_w ?d?HƇR7_f ( ( (??CytZ|__GOզFuwwNo?CA?Lh}1?#|EGXN?h` ( ( ( ( ( ( /7@>%x7gg5g=Z_J 69 _Wg" ;Hҭo5=Br!O>>6m7*yI_Vo9 7),2Ib<5"eBykCEOO9 ?߱?k/rϿc_>+*~_(~ƿ:|WTP>tY}_S!@<5"eBykCEOO9 ?߱?k/rϿc_>+*~_(~ƿ:|WTP>tY}_S!@{~;;Yை KtmG&WֱeM, ]iV7؋Wxi> ( ( ( ( ( ( ( E(˿jOQ:?ϿK sxKg7ʏ5 QG'Ҿ3Ÿ'q:\M~<5f&֧ҿM?B \?j~!?V#gw)q8ƺ<=?(1a8`? ( ( (8cP^֠)_i?~'(|P|k#LOH:C( ( ( ( ( ( (?AGv>ާ6𷂵{+-CMyx7] ^>'ڵ<H xVFLn,t{Nk{Okn[^^\Ciii 7WW2\O+,P JK,g_گgh hZs[?/1rh:Ms^te4GOKlo $pOt hWitK>$'u+-@Mk湬jS[iNqoeaeo5A(?o >%!$.bTе{mJONKIg4'xW~>'|67g^U–~(#Ś[].__Gԯyh澼(t)i&𿉴7>'+-k@YCh懬i\ڶi6naqqeeq լ,r0Q}q*>gi7>A όլ%)ZjNLJ$J:( ( ( ( ( ( ( (_̻xIĹ?LWO'sxZ\P~"+<)wcG_lojx+d(8cP^֠)_i?~'(|P|k#LOH:C( ( ( \?j~!?V#gw)q8ƺ<=?(1a8`? ( ( ( ( ( ( (~ܿl}:EtObcs⏍Ph^ Xᯆ|__x~C- /!Og 'H|>׿SB'_ kz{'>7hU^7> h/j\ܦ CG5EtO.?_?'C0xOXxO_4}|D׼a~Hľ"S|=_ռ/uxcPĀ/~ͿO69ُ_ :)q1ƞ%kG֩(u_[~ v[/<CĖ>}?ෞ$xo_+_''-h Sɟ G~ i"x#d c|*+xZ8֯a];g]wY4&:mu\k4F#4QwVLYxa= u{'M,c׿wa63Gu{j7fD,hx :N/~W+i]7uws#?/ JB}Yj6:^pK(/S?]nG{:ǯ>k?Ϋ'ǟ35iĻ__z_,16mGH{PbV__3%|^?ҟg}hii<`~" / ` }ã @Ÿ+ G&Ÿ_🄿io+OSxO?>߲v^<-@|'O>)J4 _Sմm?X:<5~w Oо _k>xQ1k 3k-}#WRjZ,Z\/:z_~}|V?wj xf o~| |e+V~:v__<'N?|9oе_QKOŦ`K &3x5 ( ( ( ( ( ( ( (_̻xIĹ?LWO'sxZ\P~"+<)wcG_lojx+d(8cP^֠)_i?~'(|P|k#LOH:C( ( ( \?j~!?V#gw)q8ƺ<=?(1a8`? ( ( ( ( ( ( (~ܿl}:Et_P%x)g?W,xZ־Ygo4 A6Ѵ k&K[ ac >,57[f?Oc_؟|;+ g=kZV.?tM6'[>k_FFSgG<]er7.üwy꿳;3Gn|__cfO2:ߏW ~ Sτ,-,/7w/og? >Ykæ>8|L #?,_ C\al}?q@T~IP~+x%x_aY /fX,߰W>_Om<?_7; N?? { 7&_|k?cfaW~P ?%O'ß)O_Eg->/់h*ڳ? >|>x;zƚT~5ߎx:ǟx{BZ<\ڽ:$79j GeSd8oX?h o_߀־|Tcictk'$+M4 Iao^iP[Zj\]C'"p]c֍bž|^\cկŖ> ?|6>?w'fz+U?|e0~ fj:vxgº|,.|K<%xĞ +Xu/]3U<=۟io ſWk~>2/\?/>QrDu+~#GB4|9h m[[o#f"X:Gc-x/~ |9Y_AtwGW[~¿',|13~˾ iM!>kk5 o,>'~O&%Z|A3LMͮx+=l*7Ə|!ր?@( ( ( ( ( ( ( (>.;oo2ړTwO's\3_<?jrs Bw|f;tIo_O' ٳɿ3tOᐠ??CytZ|__GOզFuwwNo?CA?Lh}1?#|EGXN?h` ( ( (s>~ן@> W$~otZ`韡o_w ?d?HƇR7_f ( ( ( ( ( ( (:;PQrݳaϴC |{>^9O ᯊ>Եx]>Ykwo=5o _Z?Bh<~Z%m*~͗4oq'3vOx?EMl;iʼnx35?wg8j?k4٧%pdi߳O,KӿfX?P ]ѧ~?/'Nb^ @6OwF4ļl;iʼnx35?wg8j?k4٧%pdi߳O,KӿfX?P ]ѧ~?/'Nb^ @6OwF4ļ:~Һ%?h-_gxkѾ1 >,|oh^湫d7-ιxf c^ޙxO>W?,hW?,hW?,hW?,hW?,hW?,hW?,hW?,hW?,hW?,hW?,hW?,hW?,hW?,hW?,hW?,hW?,hW?,hW?,hW?,hW?,hW?,hW?,hW?,hW?,hW?,hW?,hW?,hW?,hW?,hW?,h>?dIgZYS0u_ kxImKH,m@ף&7!pTʔ¾ B4bhNEh'+)V|oҿc%1O+r-,v&i9fueU*ե,0*8ӥSJC _ocXٟ _ocX _ocXƾ gO|[ڇ(h״gW//nnwsysk;k_ɵiN FS0' F)Nٞ %v~7'؊ԨQVHRnhGciJ1Wji-ZA|'?gZ '5+FKd^^[}:gxE*;P3%IJ5f~G*grURiOf(˖Q]OT _ocXa _ocX _ocX _ocX _ocX _ocX _ocX _ocX _ocX _ocX _ocX _ocX _ocXD~4^#b;ό nss`_[i~$A> |Mώ7ggf슒W,ud+aƤ4 68_鸧qj_j?I/ uex eҩ -t1ywX e\>OQ#Z:CK79 | %|j)0Au·"٦qàGexK"Ş#M|"xgTIb1+ԅ9XQm>vVY㿈9W~+q_d<%4Qw(ɪJ>J>J>J>J>J>J>J>J>J>J>J>J>J>J>J>J>J>J>J>J>J>J>J>J>J>~>,?#^%D| "~?p> ?O?8xwH<;ςo$O@@'G t[I#P:|-ÿ(>D| "~?p> ?O?8xwH<;ςo$O@@'G t[I#P:|-ÿ(>D| "~?p> ?O?8xwH<;ςo$O@H/7_w߁Sco=;Z3X{ _8G`a^U־{RuksssOK⸗kXrYgԿ9=;.noe콄m''@'G௷? t[I#P:|-ÿ(>D| "~?p> ?O?8xwH<;ςo$O@@'G t[I#P:|-ÿ(>D| "~?p> ?O?8xwH<;ςo$O@@'G t[I#P:|-ÿ(o /Cn{#7*lm?'U Cfz]_Kc\?~#xhL 0_P+ʿ֯~jTy>nnnwI~TWUK,''c}e콗>D| "~?p> ?O?8xwH<;ςo$O@To Ka_ǿh: [~6Ӵɳ'4qi{1uçWw콶FIjqU4%FgrD| "~?p> ?O?8xwH<;ςo$O@@'G t[I#P:|-ÿ(>D| "~?p> ?O?8xw~- Uhmߴ~†MJh]c+ilˇopoTi S%yWZJ/'խI?O/S_cYʩeRXo̹?|H<;ς\o$O@@'G t[I#P:|-ÿ(?`f)G{A-D| "~?p> ?O?8xwH<;ςo$O@@'G t[I#P:|-ÿ(>D|ߴ|>||\~||xPeo ]=gk|>//|I< nx k7'gn-&kactq0*J 5n~gNMC/+*wG8ox+<._<-)cq}D| "~?p> ?O?8xwH<;ςo$O@@'G t[I#P:|-ÿ(>D| "~?p> ?O?8xw`~۟C ?WFh|}oÍ;"l? `?iO`4>_ھf( ( ( ( ( ( .ПWo~1?Iޙ~_?Z3B ( ( ( ( .ПWo~1?Iޙ~_?Z3B (?>?>h%e.l>ROx~ WQʶφKizBO-o-hiV5¬aEN*J5hajգQ&N#RZQM;~YOUf'Z#WR`3<8:(ᱸM|.& *ԧ8ɣ Bw|f;tIoѿO' ٳɿ3tOᐠ ( ( .ПWo~1?Iޙ~_?Z3B (?8 .X7ú߄u9? ^5 VFRѯ4ǚhEB fUQ:w Tf8:u9Ss)S$Q2sL&eټ8mc10Y<q7 O1ҩ aqh¬hiSMF#%ƺ#p}1?#|EGXN?h` ( (>.;oo2ړTwO's\3_<?jrs Bw|f;tIo_O' ٳɿ3tOᐠ ( ( (?AGv>P@P@P@P@P@P@9 Mߗ.i5</֧*??/Gwo=gJ 9q5@?p<Z3?J4 ( ( ( ݦCt=y|E4( \?j~!?V#gw)q8ƺ<=?(1a8`? ( ( E(˿jOQ:?ϿK sxKg7ʏ5 QG'Ҿ3Ÿ'q:\M~<5f&֧ҿM?B ( ( ( /7@P@P@P@P@P@P@_hO+7~^jLLh?-~@P@P@P@P@_hO+7~^jLLh?-~@PR/Gwf]RxI$}b\Pkx+'_9NT~._z??G~Ε;#Prk6y7~ן@> W$~otZ`韡o_w ?d?HƇR7_f ( ( ().?Gx^$w>.i5</֧*??/Gwo=gJ 9q5@?p<Z3?J4 ( ( ( (~ܿl~P@P@P@P@P@P@]?zc32g h9fP@P@P@P@]?zc32g h9fP@H߷wI;U'sOA~O 9Q;3D:WxS$7XAˏ':WP@P@P@_hO+7~^jLLh?-~@P1(|?P| H?CH >~_>5G?i'oh }P@P@PR/Gwf]RxI$}b\Pkx+'_9NT~._z??G~Ε;#Prk6y7.;oo2ړTwO's\3_<?jrs Bw|f;tIo_O' ٳɿ3tOᐠ ( ( .ПWo~1?Iޙ~_?Z3B (8cP^֠)_i?~'(|P|k#LOH:C( ( (_̻xIĹ?LWO'sxZ\P~"+<)wcG_lojx+d( ( ( (:;PQrݳaϴ@P@P@P@P@P@PuBO͞AP@P@P@P@PuBO͞AP@|]"w~eߵ'WğN%?gy<%G(gTw_Ob?.?hNgSg_!@P@P@]?zc32g h9fP@pAR//#L#|:XN'7P!~֠GGPզ4>"ݣ' u0P@P@P@H߷wI;U'sOA~O 9Q;3D:WxS$7XAˏ':WP@P@P@P@?twfh ( ( ( ( ( ( ݦCt=y|E4( ( ( ( ( ݦCt=y|E4( E(˿jOQ:?ϿK sxKg7ʏ5 QG'Ҿ3Ÿ'q:\M~<5f&֧ҿM?B ( ( (B!_iDŽ?P'zgeEA ks (??CytZ|__GOզFuwwNo?CA?Lh}1?#|EGXN?h` ( (>.;oo2ړTwO's\3_<?jrs Bw|f;tIo_O' ٳɿ3tOᐠ ( ( (?AGv>P@P@P@P@P@P@9 Mߗgb-.i5|/֧*?_jW~>k.eo߳3ne#A0^2I'<)wcG_lojx3/Nkd?3/NhĿ;E?K$?LKA/Z?3/NhĿ;E?K$?LKA/Z?3/NhĿ;E?K$??o4ZԴo럳kͼh/|R۝6p1DŽ?P'zgeEA 3/NksLKA/Z?3/NhĿ;E?K$??^&5$_gy{[M +֑̏i_H1"+|3AL3;a8?&4$gg{C ˏ֒m?i_r-s#|q <=?(1a8`Ŀ;E??K$?LKA/Z?3/NhĿ;E?K$?LKA/Z?3/NhĿ;E?K$?LKA/Z \?%چn?cfSK)~ x$]lܬg*A?ϿK sxKg7ʏǯ5_r4(_x__Ï~ǾO9x>~0@v5m;6MjCѦ4]4o:u| uEK3Ag@N" w ,+&|T1NYO LN+{O/sXT_X75~|MdOm}Of ۟?Xױc>??i uEK3A?&I1[+-3s\IpeH&epɸG,9 HB/cVӴ9~|IҴMM@ f5?_-mT}hiZ:捤ki@P@P@P@ xGWh ig ^g?_xAw{]sHyI59| YtZO+]M}WU֮uMHG:P uEK3A̿'GmWI; wϋ G>*hw'ZN'~WxO]'ս}orr{*t/K mmo?H>&WX\'U쟬}g_{eW4G:WG9/Ch,?u  |n`῍+;߳-N7Լ/x2O^Ou qwu֍隝 >%Հ9A|U%8'{OÚG<3_x7i:-Xa<'~Cd֭=m#NKC[w_T`3??*/_|A!t y[to_i<+OR4{jVVZ6&]G -.? ~)B7 T-bhΌd͸&Z q'B'9O8L6+}[s6eB#N_aV{*{9Kqe~,?oO'eAȞ6|O7YwV}k¾ Ɵl|?sF͊60mdU/&8/ `jpקW<\|u(Т⽜!Uj黹kk%4xŞx8+)eauXiᰙkBOqU}թ˖Ҵ/QN~fh/~U[|$ w?K.$82K8ed#Kyh |On1XiO~ſ $Z&߇~ xt 3Z/̖ڶsiz>4BsF5o`_ (>@a/~τ~&'ƺn_jr^ W6??i uEK3A? s_3> ~Y|@Z3s~4_s8ԝ+g/+ç-hG#:j"Z_pW4~:% |7⛍1żIǮ%(uA_V9Gwkh*£R___٫c/ SO"?ں o_ x]x &>;?LOu/Y}{a7?4I6WV/[K*%GIVUԯwv_Q+[_2Uj{JrÖ1kg^%?_RYP35ïT"v|M?Ǡ8eՏF;ܦkk(|ma^Iv%XKgquY^i:Ɨqs]RzŅv77wFI( xWU𞥨iF]ՐԬilMݭͨ/A4[n\~3RRel.au^Lqy\9^+ׯN*xOQE{8B<4"wrKh Z0AtBoۑV++)YO>AG PP@P@P@|}A-nbۢVʈ-ِY̟ޕtP@P~տk/_E@ƛ?jXx:O Z,o|1|?CvAj?߈}gߴψg |/{_gBk;{ wXx}o7D/Zfm;Ekm}։iq5Ŏx@-"{ɬ4k9ooc ~<9@hP@P@P@P*??goVROǯLm7w>:_|  ~о#W.>._M&ԬEjxB/M@PC k O`B>;Ҽ.wg |<2G߇;ᮯik>mSڴŭwwwJm'#UτdٷVu☿eW|IďjZ?KLJ^غ> d|9impf>W~z|}rGࣞ_O~Ͽ o|1>.cEG?thTď _x1v%>/7~/\z~}?Ó÷Z%ίieE&6oamـ|J{7eޚoC a>+O5k jdǬ躇u;k6Z5 ;/ɻ'f?R/?V\[u~3|S/Ckw/bKVѼ%^+,.t$~:WETPmF oX^O*o's~xa_u6[%-V;,b#k f3x|8nϟocIw7㖵/=to>#K? h mvO}~3']54 ¶_KjZU~YP)kZK@?m??KN( (Tɀ~-T?h_o|u*!gg  !/;|G\|] \MXE3]i,_Κ?M:?# _.|w7x]co?^yexw[?]_? |4ڧh5k[Enf +O) Gkxǟ >n1~77گ>2+߉4ԴW>a9?u{|`r|2X?o?WE>$G< ڟT~k//:c|]Dž9_<-y&t5]bxg7Ifz2~ֿuS|K}h_Co1o h_5><)e,''nK_J%ʋ\MNm.OM[ a'~?e&_B|o |,~.ωI?Z7|1]C}gEm?{yjF) M Ŀ|{sI΋[ke;oO |ֵA4K?#]2WֺLJ5JkhVR?kطX?c ʿ.u?bOثO|g|QOk1 l#g.翷_q_}o_ [~~uo+xR47SK-i6/jiEPMu}B}<O%4x'D_6>x>+)AMcwMW#|^GEt~x{QX5MSX}q^ &WW@թ (< \|bw*4xRǞ?|Wj~c Yuv׺mص++kjwv7>_wm<$N,?_jC0f_/:$׃%_{/쭣xKGмWX\3Iu&Vڍ(4UO[,''>Q ¿ */m<oK{:[9EW:wXF@-g8;qY?nēxo-k_zߌ|F~ѾRx5ڔM  Jf(=#k%(ǯ Q@վ+>|G?3_m{DsI} Ŗ>Y+]GK_/S|e'>1|*5i,3[h]QKKn,t]KĚiMa]Y{x5̠ cB ( ( (9[Pk oZ[ fL0 {*FB躀 ( ( (>`bXcĒ}I9' (<߅5| o^ Эh5x?X4HmIo (b ^Yyf@~:O~:g/7coĺw(|?~xQ[ ({cZ_JiޑxU:Vkt?π!Z-<-Y8fkMo_{j7_\~'k=RJ5-O k 1]~1NCq oĘ!w _G#}?ob _٤~a(eJ<a wG?tF 6O0lF %$i4"J;{JsnxJ7ndkk_Oc/;ßqA~߲ſث_4ggo?Y⏏7 x[ŚjVzM;S^iookf?pdoN/ q*|"f|>xbmO;y}u]{{{xX]՗O}WS[+ko&0SFn-?hwoߵ59OD><~߳GǏ~Rss/?į'4V7u53R:~j?;n_)z ޡ{Hԯ?!g/'{0?[¯Yn^ ?f|R]GNeGjQAsx G/2+;e@oL~ſw1o)o쟠jW)R_>Qf>(2{y z4&sk&GgJd A,|,a?Fi M%x'wm? K5F5Hʺ9f5MO$*ROfO.tS8^焣{ZJ?;t?G;j?'OڋQڟh[^&|iZi|P1⯄>%zV *Q—wVVWf\X]2O'g#N~+xƟ>+|V⏎4_|V_Z<{{GVj chv2&i]{Ωxj)njjG|~oxS^'x3_Þ$ޕx]:4+?$k:G$뿱> o kgc5G}E ~zֳ_ks_qK*K#{TpQs kS/Q~ D V_Hx" 5tOO/E'P>"Di94mI]I%mf)?_g ?>hcx5ucF`6E$٧9tݪBPm])U{4?9>$~Ÿ S?'?B&Z෌C?~ }^/.|QjsI[I-Y'/>7ߌvCSG x+ww .O쿅?|MЮoo4ɵ>tP*[k#q:][inl|SkV^f!?"hA|Q᫭'ZơoH@> (>D><~߳GǏ~Rss/?į'4V7u53R:~j?;n_)z ޡ{Hԯ?!g/'{0?[¯Yn^ ?f|R]GNeGjQAsx G/2+;e@oL~ſw1o)o쟠jW)R_>Qf>(2{y z4&sk&GgJd A,|,a?Fi M%x'wm? K5F5Hʺ9f5MO$*ROfO.tS8^焣{ZJ?;t?G;j?'OڋQڟh[^&|iZi|P1⯄>%zV *Q—wVVWf\X]2O'g#N~+xƟ>+|V⏎4_|V_Z<{{GVj chv2&i]{Ωx }WtR_ ^/ ~2x^ N(+z#^vX>OD5i:4϶B[0~5nog _ǀo? *_ ?ú;~>.|?i1|Sj=+QT@n/%Ѩ jCy |ٟR/;Zs?^k`f;FsX۠j馻PJ2gFKx8]]];5t<1_؇JQO _RKK6~.|Z eLj|M/u]^vr_mN>ķOݏ߶죦x#n3'_h~7o,e}BONE?+뚕Aun?\f?>~<)+P5m#]ծtZKլmSxJV/QѵUuhޙZQj( o~|y/G;]x #xSBhm|EcH {!&6h1ydDV`G$뿱> o kgc5G}E ~zֳ_ks_qK*K#{TpQs kS/Q~ D V_Hx" 5tOO/E'P>"Di94mI]I%mf)?_g ?>hcx5ucF`6E$٧9tݪBPm])U{4?<|5>98qgߴ+|[uOxvSj(p'Yi<פӵ1-曨lFg߲/iW_s׆&h!KX5_>'5Yt;[7u;m?dmN~Z|w[Ě핗|YOw6jdmw'x`V]5K0jTque )QVe(ʟubƼD˂9Sq1Yq&|&Xzx*WPEIb«Wg> F_Otd?~aG7\D9OƬ!:B ( o> ^x5Ս'7M帓5k;ga xG߄I# _Zd"ǶmkLAgFҵ=B?0VEGg9u,W/}KKoi8ԭFݥVq?kJOjVmU+94pR]tڝ>#^z兝װY]4# #UĺqG20#n+)rx_M ?7-WG%xw@?<߉0g&ow-<}_<7~Qwdžcc|7V  oa5,σ?՟e|W=/SWZ>_7[/']WĺqG20#&낿)爿&o!>N(C7\D9O`qy62Nib8m<>zap،F&0thЫRq/?~~7MG~#r/M+\LJ>kvռV:T~(m %^Io f!73Kp^$b/ #gSZݿv6sIxj́%m/7e|BԼ6)j:Ňxw@ԏOSþ 4(Im;=GM_=ebQa!a_7FpuӥfjԧM9Nԟ_ 8KW/d0gS 牎]0jBY*Q ZZT ( |C犅ψ|sOj6RG4o\XxB\_kzi]ɌAvyWJZ)ѣNuUiҥJRIB 9I1NRm$2Z\F"*(SZT*4iSJjq:pr%7&[ ko>d& |55-K5 2QêqyO\a(c1X)^<Q,]?NI-]/i)VE Џ^{)9~ pym,pePZ_(ӫg:.' }oG7?u^K!b< k'Gs jKlf>&qO z_f?]_ UnEp/꯳3grxoo7֪M4# #U.QĊ?}&/> F_O%ӊ?y1M5?Sm?زeeeo  m-mXmjCE 1"Gh%ӊ?y1M5?S?෿N ZS?cߊ:q-x7ud]{fyfGG%$<~&낿)O[/']Q<~&낿)o; = G/~?|#M7DS⏂|o};?_ j:D?c#3.?Lq?ы`8(bVUkUiSaG.RN(C~Qu_MqOĹoM~ǿJ+COo+;d]{{dU)$Y0N(C7\D9O |^xC⮥;OC~#5-V/Ge~mڵ'K}CRxR cq7a3֥1yd13`^,&PxK ^SjHJ^֝XSׂ8 ƹv0YV? y;)g50t,+ FmG.|n꾿%ӊ?y1u_MqOώV?&_A*|Rd|Ak4ou K jĶ6k}54'kl{T^ڗ0/Q/kHTKn} }5'xgk8%Ϟ[yE:3,6`5,ilj܍F7}3 ᯢ VM{\]LW=_Ws̵\]oo/z~g <=^[W14+.ef^ESj4XyCS|ܰ[/']WĺqG20# k# aӍHc/Uє2@ "N(C7\D9O<189Q0F*^Ymld0J5k (`ԔԚ䟳J4> [YFSap53yn.c2,*f80%){YBUЭnzlպ3$Jʹ**Wg:( Qگ ( ( (( (]Gi^X:6gqkZyo6g^j:w$VVv<*$p$B#0TשNsZN*PJ*M9MF1ri$ٕjpj1iPB֯Z)QJ\TV)ӄS9()4 'Ʒa-W3ѼcMg WI6n~ Fy)eG{PsrJuԮڸ^oE/7obJpěpcb^Jy %V.OޖmCmGQ5BVկ5MWTu=OQPu 幻K$yY]vc;N:4J!J(F:tSNQ!BJ1RbI$UVZթZjZYʥZjIΥZ&T69ɹJMMJ( ( (%CGEUkzOطık|cE%Ҿ -FSzuGHN nt˛=73^_[ |:CCo<3? *{ο"G kV2xAͼ[aNa|sXN3lʶQ3NS2T;{:JRJ*թ\yr'gw ptS9*ؚMRi1iaPK ( >(|Nc+ko4L׵>vRG<7$|C)#t[ۇI V2(r\5ŽYc߻JtJUkTvB.Je pV~q.GyV#:eeׯ+JF =(ޮ'VRЅJ|i6_(p/vٝAaKpLbfaq$qi1?2uCo_~e\ y͸%8qeߗ/R)ʴ^Z*I{ xd*w~?,FK{|[9ac>\9{sfIyVYBrYi:_!P@P@PO? <-m~7 ԇ]eoxt+ΗiYxKBN^~UƷci<9{{x313dVqa+)s*?UsWV4a9Bk)b?8g#ʼ.^"q3q83xTx;p?y᪪2U+}?JaLmml,`(IpUaq =]a7)JMΥJrVܧV9Royq&mybc<ʿ 1xNsh0TaJ(F4ѧңJN1"_ͧ P@>8_]{"̼lrwjSoxz-vKxnY6w֞ps9<$TgiT(a^bt(ì+Se r-|uę,Jck . 3jJ*U|`/o<8~~v7x~iw7񐵛}SXqտ>-V8VK rn Oy?jCXG(TrSW3_YҟsxZ_' )U=BW#9Mݧ 7,3PJ8S^U? ( ( (kJ>.S'o3?=*h?gWs3č?|B\^|$pIZfomausL4\uRj<;}˪YJa,ּ\*hNQG&~"Гx[/c|ڕiF.2 5źN[|!ЯUq*ןR//R sS!z^Ge]7,w#i&V.umBok+O 2L*(ѧ8:|n*Q{lf.Y]B)R)S#s:3g6]<]ղ céK7)ԩ*Nz*K[C("/tDּUsF<5aqxž%mtoJugi1ʀpXl6#Pb*F = sZIF:pR'Slcpyv,xaiέjet.Qo.m棪x[s}K \7<{kPG'RKxOj@,!vk_!C/ҥƮZlKIFyOДIQ{+V'<;[z8& ~>:TMF_nʑ>ױ 1͌Kq,,<4L$K#Ydr$;wrYؖbI&QJ1J1I$I%dZ$-?%)JR)7)JMJMMzoVGL ( (bTWx"G|QA|wV84xZ.tcKkw+|mݵ2\3C yxl$ܫq/OkV1`ptR4Ekq7bI5%%6[(keaa:U!I~Zgsxqk6_$P.? kEܟ桨oT핻ϨھBMugq~w¹|I{<h?utOoڞ.҆(VVCd>X#7GRdx˿?~5??Ϳ?j6_ fZ_Lw4ƿGG#22OYFA_ I.Q(6@I?h?3k)2 O~#NYSÊV9C[:s|G\\]J >߫jޟp>1/5LgHc`*^7\W ]G=Ex_hɺx,"+>i}g4rD6#:ׯCJ/uYx^4ɴO*>'Wop|o#7GRdx˿?~5??Ϳ?j6_ fZ_Lw4ƿGG#22OYFA_ I.Q(6@I?h?3k)2 e\jN5GRyU_'_d|g뀆N,AoH_;>7m WO.Ee7a2|,9aT9>jԲuk֛u*i^NQc6s?sS9^z-*4hpԮ.*|)Ԕ/ k]//ChHlWHGG`j]݉TE '|1s~ ?aM11|eso~s{-◈,-ͮl0iWw:Gxqx׫tr3,L_C.9Ek^WˆԽIUp?Gì,0d8T,?5ޮuğ8Lx,&_0xirZIխ^S/ +wW:`3 3?3/+@qBg`3 0̿_J? +wW:>/Q32~7 +&x _ 0QG$?Й'uC/ EL?3/+@qBg`3 0̿_J? +wW:>/Q32~7 +&x _ 0QG$?Й'uC/ EL?3/+@qBg`3 0̿_J? +wW:>/Q32~7 +k@| x/^?m5ʿ $׃niIτүUg[ekX k8SCx.TqOO-| 岲c{u'M9'χ_8-xX2 j:xL\lÌs^TrLg(5ӊ:qNi{<^+uZ|F*7Oæx{CO0iV @_b1oc }Oiea)¹ e/:f8 u񘙷'9|ԜiBIwqmǙs(!ɲU<%ʨ]ar8+uU2RMRK_ fw'V$VWeN U9'N>udNe?:4?<Ǐ_[=ZF>45Xb@ѼZ>iQ[jiZpJ\e4I8']t&T [,V0gPUR`ixWʭmziŸ◊\on.Tk U*T[~UTK euJSR ~83N_{A_(ɟff_/%V;^f (aпX#|]->en4J}L!@? NxK19Y/ N7 JXP[UΤ[TRYM51V'<׫b<4 Ezէ**Q֯VrmέYΥI=e9JNW? ?6EwK:Ohm}ݣ9#PQt| ?R:%G_*̿+?=E7rw>? [GU$GB?Ve͡-qC?R:%?uD /m~.kx /_Qï&0Uh{w\o|şI|2"~1¬C#ߋZ'pॿu^,K1#/f_]9?;-g_ !_La2!)oW?eE:c Y6GOKY/b(G_*̿?=E7rw>? [GU$GB?Ve͡-qC?R:%?uD /m~.kx /_Qï&0Uh{w\o|şI|2"~1¬C#ߋZ'qU2i|@CŞ-wxU[[Uugy;Mҭt y䳱bGD|/:aipN"a'N0\*Jueʤ9rkv}YkYqcGVWS2tVTaN'8R:tx)(^O fw'V$VWeN U9GI뎿@( +g/ߴ|_?𽏇| }io|sm[ʺ<-jR]u=A{7%ExuydYma+O1OafjWM:kt,wmx/wdy\<.[R~N2,*18_=IYͨ*1_)oW?eE}B?Veͧ]9?;-g_ !_La2!)oW?eE:c Y6GOKY/b(G_*̿?=E7rw>? [GU$GB?Ve͡-qC?R:%?uD /m~.kx /_Qï&0Uh{w\o|şI|2"~1¬C#ߋZ'pॿu^,K1#/f_]9?;-g_ !_La2!)oW?eE:c Y6GOKY/b(G_*̿?=E7rw]xjO$oO|z|B4WO- F0hWz֗,>t[{IuKKKW;nm 8SgnpwNnjiFx r/vROv}+x LOgXҶo Fu)jyєpt0 V*Zg-[[Fr?O?`>ouտ~8+?TϷN?[pp!?)\q@P@?r/U@uP@P@P@|@P@c'KOw+4;q_"r^!^~ ,ᦍcKU~7S|=i>*!'¯vWEċei4=o]O4=X$ _7xĞ)|s{ip΃#w>uq?<k_<ÃqasJy#4kQ6QF'81Y2k #pOx -|vI[!Sd$MNF8ƩkJNRx;<qZ\28JPZ-8BykyFXEG*~΄8SV/5Z+#✻q-Hѧ[WCΥ<#% _k)ԭB\f+~ؿ( Q<B~ǞkKU yxok|o i=o^-ӵ?M 74Ͳ:%BZr/-Ĝx6*֌Vuo>W<q5ԣM*06iqH(v'2:W_ 11B?<3~_5gGO7OPx{-0h7 RVSUԯlo? ?dc 0 ydO2TTω,E ӭx|4gK JZξ&?qS߁.Wf|9q٭c3Pl+9x:Ѝ7R\>~b_Z 2`&_;GI$wwWE\׈?aޭ^W.c~k? t-wǟ a WŅ; 뺫k:ׇ4Xrx *ysQc u%i8aZ,w[WRhm|a쟌|,PƮeg^SeӝjPb(KF,ti 9ZܰJ**~f?K/zO9,AyE+xo]kz>6:,,OdANJuG|<Lӵay:.>U8gu2 R0U14byp8L<,V6~Ҽ#*s|AxOye?9yJ:QT{,Fgp(bqYm?g8UN8,:/ 7?j&UK˧g֣a^+mX׵?3Z_Cs©#>9tX74_x03X "U1L]:.a1u)Ucu)taF,V te pgYS>K,~f?W;B .~u:t1z,bIRaKMj؇dkO~%~տ4R?o~>!vw|)K~/ _w^E2.9ϥ¾)'^(nyF\4hС=BxoᏈ_ƹ: ?u/j )|Um/:6K?>*9}>|+~ |oO e7pIps>:˱ˡF?O (JrGQPPF0bؿdN s3!N9_<ڮO,MoBi%#XX)Uu!1Jf۪hb{)?׀c|:DO]פDtQ_~ IUGT'< &bxnϫFy𸶣 x(5EFa[c%VQu)b*?/8>N fXcɳ)NܪG/(BrpJg5iiBHGR6OffukX,7c05|E*Ur*3U15h ]XQt+V6ں?F̿sqxl-z\wqTpt1iSQ.ZqPwJE;IjO(54? h=_C!<Ӵ&xŏ7|?h735KR.5Oĝ4>A𮵡~8K"ນunk< 1ptuT%<#VROm'^T"B\KjY#-upƶYfa=\~/BmNWpMէx<7c~Þ:z,bBW<).?tO^YO/~%i/_5MV)_GY x:!:vXp]wc9> /S50PTሯJ#,UYx| Y_Oe]G!̤Yld_K*U{)01CF<\"oxسg| FxKdƗE~jzxOCź[k;_|Cx~/>V7g_ Y^k=c?7^*c͇XʔE,=zTgZtkrl"J_*QUx*a|6"\!jx\>; _ }:/)'}CC7#߱֘֟дh6cM_U2aRˤ8״KueMβ YH}KkzӊyT*'o8siO2xTeY2Y2MQ*J2:T3δuUzJ&??j/[j*2O]|iѩB< h ,\7F>5Hqxėou+7=W'mœxRĹRR҇+c0:tRxʕ-\W `i)OWa5*)cS,k]qϚJVCGY\-zx }~`s(WNJܷPuG_hP@G?eH.< oMEq#¾ЬB[]/ӴW縖+kxs4PG$c\1qX-*`Tm1vS[sey CS=|N&iQڌy$+rSk#.~˞<g ܞ,wMog>xFG~̺X_&9il|3}\j.sx~k֫%, c,E%g,Dq.0hҨㅧOZlD?x&k |MT#z8>YakI`rFx0؜6{ ]^)7?bSž$ώ_ÝWG2|c(>,xCT4zω#Hmt w\⇋ C2.:q&MF3ʱ8Jj?ӄ(J$!JeźiSRt(0oJY>'1"N{xw ^baKԩ[8NL+֩ m\e~|o'~zoĿx,Wkw9c&O:,GGB{s6xu=CI4 /#n 3 J 6kDRexl$JTS89JkbVss?_|#ps¹>;lV3"ͺq3f>1P^fK aVt0P ql~ԷO_ m@84ٓoK ։tvznh~t_沈杨àYմ1^_aJaa/H̥Ӕ%.ycgMT*5u_O ɖYM[ y.XVXx;Te|+޹o>/|2>.7dxa2|F"10|t[:W',4qJ5)׭Nq[VtRҩSK}sQJ0u_!Ǽqb֮Ep}LSBvź[?Eo  5MO}}\ g`y:od/".cM':4)j:x, '05j,"~,{>_JJ&JJlJ W_7@ > K}rWߋ<ť05 k-g 5NGn]h4xs^-7 03Nyvup%:WJ\j8.T0V*^/ar*qiVmyV!G ƧuAVk SSFJ\³傤[ᦟS??k> _{B(w/u [(!<1nu+Zu_x[s:>2NYn;3zlDq933Yt:l0&tRKRx|D__b>$g9e)a!V8\v$g ,M|7],}ya,Vt*%OZ?ox7Wgߎ>?3Xs65v^񟄮/a<7_Z{j$.BHwn:aFXY.m W/4kQRnUC%[Rxyx3(ʥ\^O,nCԊR'C8ʆ)SONT?kJ4{V&^_Mw4? եCQ??7+I&=kiײhw]#Pfl]ogkMT6ڭ\ֲȳArۤcrIp~QAԂSpirJQhV_P:#RI5 UӜoQHZ^ gؘҨ;Bu :4b3 S$jV:PR9i@p(_gNSE3O?wxauI~|%cW; [|=//%Ξ"-5yOp32n+9n+zX'kQQJ^ί>bQЩajY L&'7|ŪfU*=.~հX*V&3^ i_|_|1b|Aw^?u?>8|^#>^Ro mľ>':շfqwxqr8i Qq,pH9F ֧'Zx5jW/+eKxW̰Y upx|%^1KV+Bqc0aK%|%Fp_oxB[x=݆Im~"x_~!4iy[Lӯ_|T'Ym/iR0t0rQX"jpJXFHѡNIT/٨}GO8㼫.⿲7/1X"˳+a(U$5#BҞ#Z.TS uK'(G_05_Ɯu{^OY*? mNTu Uo [.|o9#QFA/w;b,5OWZ?^ΖgTⰎ2IШ^gN,>.3^{S 086\G+k.kRqќgF*iZ֭_:^27!$@e[w/oڻ]*[\QA7>~6~ͩd[ρOi/E44[=G׾!S:nZ-wG%apqf/BshœѡӍ,4珧*ЧNN" 39x{X:4ȳ T!Ju_NWVKJJ<=\>?V_/8пc~i\T4KS▷oxUr? 1W&{J q^o,G."*:~Ve*0r ѧ,= .x96;|C,5 έ%,Ƅ2'& ,;*R'C EXʑo;Ge~f'ïJ'_"sYDPG;αΦeF.3Z2K XJeg{|]:M/D!~?K+-[>.-mcvzv'ş׺qxdž|oMKCW,G:=i<ѕ|1F0R)n5Q(Wq~K ʗp^"8\/sֿRQԬcN}gGN8=*_Q|"<~vJN -Ʌ-cQ꟔I5ۿ5Rڃ^5k5izį $V#i2ltۗY*<Rk;RJuk؊)&Z)ҧRqSyu ؏ U+NfO5/޿ڴ/C|hͨˢkV'VKf`?& :}jibx4E rנd55ZZN\qL>ب|U|l1pK|6*8~#b!WPtSS 8Z|w?g,a{~-wtyK^%ͦ\Eh?|Cxwƴ{-7Ŵ?zƟYo |A̲^$[HҮ~e*+ʬ|bxØ(N_NU+A\4)':|:[g{Cٞ>#Ƭ fٓQ_o|Koțm =[;I&AegOo N;6xlFcЭKSq#^ҍㅄ1|<le`c0O1q8z Srx\ [N}iф<6..?W׍n{Ļֵ|3eKY\ip8b0<ʮ-ўa*:8_ԛpج)-qQ&|W?bWh3/*πߴǎ~~ʿ |;H_߬x oiV<cIܴ>4jK.o;O"᜿ <lj(+c\p|FS5<#IJn1ž-ӷ5:m5TW3<mpv# C-Ԇ hʞ:5k8AQ˕JVq9ԥV0?q~hz*-S} x?u|NqkfMmgŞI] T_8 <|KU7kNXV-ω^uqWs%d_JO q͡.WB+q iue) J1OXXPJVTE|q_ї|QJ8%~֟|5Q[ݴVZ{h x~X5 bZ|S | :ϋ"8w(j'Xr*1xG' i\>2Z|ЩWڸjT K:/aV1 -[qQT1xل 5T*Qڮ"t#<1?? j}.F|3 Z!oTE?I ~ GFU͖}L32̷/>+&&i{[S(ӕ,ʣI]J柤' q&M.qgr"X3?q<aN5%K:J\N2N_'M}ٓUh~5O x u㷉|-|[Zo+ƩsV1b}gU[~x,3χՇϳ,eR1jU]4q'yVq®__nYpՀřpQ(䩀u\ƬSp1԰pMG .x꟱>|wms]}z4[Ƶ4|96m<1gu#;_kīmqׂ2apXSfѯRT0:q[NTq#N3b*cis3V}xQc,*Xj5+¬r(Ũ0֕8mT1 Y|_K"_ϋP?YLeOĈ|;/k=~Y!{~{g- źZ3-qu~*_^W V "UQӧL5jqT)^{W,W ^/IN\G;{z8uQ9eZWھo3/gB&xw7c<ڎGug |!A]}|@jCֿ<+Xfy|!^^*1ův 8׆ uVpǎe~5pA9A'9C/XGifkVE)G S1PSw[#Zo '6c:9>f ;鵿k ^=^<_D)K%Ąws-A{:uq'\+s8j8&lV"IX\5=WQa:% TN?g|q` KFpU|v6hpsMӡF51iR#? av#jw-ɓPTyLkO vyrn5 JWu~5L׍yy*4ԩe rn uN +T'(רN(ѥqGAO%"b+8WͳZQfWZNJr JwZ1GP!š@ ⶗\x#Rij~$nj~+K> \ֱxRO,βÈp]ZSNД#j'QWRxaӺnpe<#,;]Wx-2YT>Ҝh5nHׯ<\SNU??OiJu>!7E#7~++^卢ƞeƹX>*iXjvחڇSm|\gX"X*QVGxa[*1Wn |%O 8`5J"ocN7 Al K'wZnSZ >:.mvϠAO|`, 3Rҵm<|O\Pߊ0RgC&S?1)cMІ(N9R^ SI=SׂsN*q1F6tV.O _I:lm/2|e42f^e.siciY4({\:wi/"82sl7\V~I~X t|BJ)Jpi|.u{ {#)|vwx: |B^[hau;WXԼ%t >omd8Y|6L-|l'<oy}4&XJ|i$˸ rc5e1XJ_4|AʝzU#Z3p1f4)G/_(|eXGyh/)bOq'/4|K57*ij5}&3G >%) ZUj\^5a^8JO2N7^q6qOǃhe߱Nx>`pOV l>.6xZ2?Bs>5 V|x^/]k_y4o67|m umcm>{qg`83m(b#5H}bg*~Ւ)BsNWpL[kxcQ熼em?Z~۞8K qxV-!fa36HMPo-~O5U87i7 ZqK qT!AUK7^<ĮY/05,ʆg:9v],e,L/ JXIbjb] :҇?E e|e |TҼ' XkcS|t.~$xCSCP'/~C M_ Yf W 5tgSa? QpX:xN*Ӕh›Jhk)gy@/uk+Qq&/ ۷LXo+d"8佸E Z PI."-)\Ŭjwi޾%x\n_)6^?{Ғ|rUbҔTkUf>'g<#¹uX5u&fU=:5a% Jxj~oßxO_ Xx/ǀ{}Ŧ[dY^[FfSԯgu 뛛˛3,YeckJ'Uz%䒌#)ӄcN8Ɲ8WunA`rl KexaxJ)R9ʥZ%:uju')== :˦]Py MѼMJ._O]+W_l~ݓ{:^K)!֒Z\E3l1\Ex08UJ1`%¥hޭKFW*jXJ?3UpY,s L#9sH>zp򟹇`^*qiJ1RvԌ1?`/?cO gWmmFAƿ^)׵=W_t6֩ 'kо!>!59#U/8p:4s.uhÖ^zSZJuC5w{Z/MI&+1xb1?Ǝ".|!0jX\VYRO Nq_TRXJ?G37|q_A9g@~%hτ%jZ~|{xQ#Ϧ h!GI0+LЇ.xyVcJjR Z1F^έ1Z\gfobj{(`)gJi֣:jNx.Q,ol<)i> k>[xtRFkMCzjk+*]s?ڝv\FO?X?hh`*K1F8*^aZ Rƌ'#(!dҼ!ax[2|4qw83߳Z9&(_O+oTҡ/F'nEGBъmmc'IT֣UIlMzX灘\ ORTjiT 6E:)5R5p1RrVB ߈>#+~xÿ4O7ƾݦx@EX)EW~d3epeIn!hS_3*2N T4FSiQQ}bN[% SS;⪵abZxS8b0|c?G'k_M|BbNki3~V?4[/V:K˫|_+}FFxum{Mih3?B[" "{%RXK݅:i+ZZ6噆x<^*TOLj x7>4Zϊ"8L],TӼmNY~;)WGdyou,wO'ͱ)+iVi޶"ֻaVә~c,*2G FfS͗NmFӕiVƱ1{{|F3|h-A棫qxL0ɟ~.d&#?|GCjt9lx7ϏrRb34KCVSJ2^8L7^mQ.igIWS>N`iq8 1ŷ(KL†I1ut_߰>7x ~a_ԊK&Ǡǫk 9(Ddn]߇,sƘ ¹^ i3˱q0M*:40iS;Eฏ3C}O60udᾭUVbq-?¬n*'5 tTڷWJ`>ſe{ĺO[TajMHk~.xº\:桧7~+`S*9ng_X|ENWXy4꺘_Ood\0hs|O6fjqⰘ_bf'AQjITT>ɮ+X~?SxeM@F/dN'VdOF9KK|UHK<k!h id_xW6O2+c\%ean`YkΦLhBOYO*c*u$y?d| ƟwφRx[,wV|o᷀4|&mf).!|zxi:x9եaOBt)CZjxj+IƆR_e~|?MGNѣּ?5OࣀF>&iQ)S 4ޖ#*Ӓ'Ne,4CtS8fhTkjLhBw`/io25#o?xMkx86io2j uO x{6M}.l>!i)&nLWeR\Ɲx:#%<.%b׃*Z8>TsFwOʥG(2RXlNT;q8JM|$t/\dg+i>.>'~ӟ]tH@oxy_4m:[([(c2,.>׈0|U\[Kҧ弹1Uj0NSN?OMPLn+C-b3WjʦxOSV0nhO_[}2-j<=xčKoMk55WO&}2n&4ϱ G[2I&XKb*bV2 ԩJj|O*~,4]}(5ʸ\gdIK NXRvgR/+5F|6/(YKJmև#ljdO??xÞҿh?٫_(<7]B׭O^ ֬n toN ZŦoxu|C$ Io[q&_4\U NV_ 58eХRI0MJ[^|x?589v-ƌhѡ oI{F*ф%R U{Hb(`x G:K]FQh%a,&. ]cbQsZ[|_?5o3M<)q|_~~$|5 =喝*G;>i5kxL󬯞zc8p]N}^椫*aUѫJ"  )*#e\gyOcfYj-ľJ*MjQSiWZUcYTOq'~<)>&gYд+nfAַ~L/5-P.Kx5M6n*gd:^eP4塊RhiI7$O5oԧ`0gy6-u^)X_'<7NUA+/?_~1FO ~((s775WMt_|e]mh>2~]]ǪXYX^+K򇉼EC31_GCJ+apT>VqVXJ2xܗ&+:#2+X\n>.AO ~&pux>hF&eqdF6}` F VHz7POC\VK᜛J2UX4B5Nthƾp<_7ke8>'YRIx8[m#KM/ZxRi?N|Oei`w%Σ(SZ>qQXa r*9d*wŵx/l8 ˋx'5Wxl)u /+HsFu+VP_~c+N -^;ۆ?;:<_gO;{Z֫sXZ qus,q(eEfxlקahԯV*4iEΥIܮ?`17 j~;K aVb+MSJ#9%ӫi&; Ɩ/?j/s_ڭ< 5e`*;F⩨jqh~O$I_P@~(ȿW[]IP@P@P@P@#qg/3>8W_HʜorC[{ćOm#_؞:YKO+:! 0?<5èxc_{V5M{M>xLJ7`sL=?e0^RT141XxRb:SFe'cR a<WL{:j (B0ѫʝ)S~4(OGմe2|ey^C|Pon{lMzu&aφ}OH.4_xs ̳,v#?aj*j5|*SW 'NIJ1 ΄K=Ubr|-pJT16.?5ƭRӭ9iEږ&lo||)o "W\-/>~ )L>`>#.]lB5Yn~ga?O;ŵž/ `cKf4a HAVQ uVIДJ/ܷ)dwe'VV*V:zP㉅eJ'~+a[Gu xlx#χt/H߄^Y!3W X^fs-֟'>$ֵ? q9nkX6{1j5*U)BI:J5+4YF)_xwY\-r ܢE”UVBkΎ9:srm໿LAx,-?5ox\-xzRm%_^H,AaoiN];x<}l/1x.KVN # U*S*nrʰ.kax8 ]li<=L|ic!FOQ&VPVlUڔn<>{hx/ /_|OnA#O|?ÏHִ/OH/43W\+QѬ~3Q(Սh.qt+*7s2K*{*|k-ᾙk ~⦁`v{gaX0\k[^jwZv$CJ%l|9fsʰu& |l?#㯈-?ǟxNvNR6't)# [xf]4_h -| [i:7_8.eS8xS(8֡e[q49,d8ӝ.INœ0SIJIO57>qoK|UpxL¬kFYtUY֥_IZ.Fi)7]pW>SU*}=47h >&/>&#*# \/໏'uaNkb#G(VBS:Sķֽ#¢V>KHSRqy <q9hT֤i!^1/~55'T&94mJihfbl?2Gi^[ekeY]h8Jj J.b)T9^u*Ju*JSslxöw/_q Ʒo \z߉O>,񎽫k,t.m^i:6x[ 90iṞR=< qPBZWہ~&xŞ&Sc2*5Uxdsር *َ&LeJ7~ 40Е,7{_9}7P/3I>57Vo.5WO*Ju׬4K{(%\1fjg/1X_9( R4'GMƼ5xR(VVgVR 2<b)prQN)Y|XhaT*phќ(4/|{K׎GĽw ψzϏhG'u":mMEXhVt_K_ dy-nt?1pqԱ ǺT ־0is+duVkyyk:ZOm1h5V?|sن)< L5mHEt1BQAףRI_Y_lp 9g |̨qu*JѫCKIUXlNыQ᷉~7ekx?WԆmoDx,Ze4o 9-#A݌ oi\+d8 +`qscT157R:NXm4N)Si9~ž%qw(3:3([*<6LTU,:eTyի5ZNH(o_))-ޙ+G_XG>~ ~Ԟ><]apx^ѯ~:~ sJTCb#Pj(R 8ѝ*8(⽬UjET> 4kV3:ag>+?Fdb)ץJJRRkU3,Ꮙ0eUEL.5uTa>=_o/hFE? j?ul<74_V{jZ^V=:dT'. C(R)Zӕ,rTi/,_™.67 3c1XHͦ ”(Jkf8)/F[o6|J|Z7~̿ +7o7 wjַ=GH; xW`mtzw捣j:epggqˢc"N8z0tc/yPh9{ҧ&}o`0po'S6xg*Y5%IcXxx´b>]OU:5'쑨_5ZKw_ Dk,F֚8ƻk>K~UwpbjB-\]5}uMIӼ/Jp7B'R9qS [_xz8Zp\$Z1he:ziR:o;q0<%)q26.Uqxu sPЧ燗0z0GRdקJ=o(nkls/|L,I,|/O 1COxKtM:=r[MVv>†UjpwBrN6z18m׌a/<bw&DžRe|1ggi5mthLz^ 7FUZC b׼Ox^%woÆrpL2(f'¶:,4,40^ U[ )Ux|S9wżKx,Q^2U26YJԕ 5.X<7=П NK%pzur4M:sJ1FiTm9Sx)SӝZug+s E˃qkPiF6ӄaGS:ӭIEc ԠJZ0ſcNO5xY׵)Q8j-[\n;S5wĺ޸Qu Yc{96x_8p.' $ 8EFx؊JU*U(ь)ӥAR<ƾ=ޯ;2SXt{m7OjN(g,E,Lg0jy֝6|s*2t~Q8VBYzqVΜQS*NNJqSTS5b>R"^G| g?_.h>۬$⏊~=N]~>$~| DJЭu-9P,aCKWф`)ӭ^NaO18PJ,=JTDT<1_W;(|Ix%Rl> TNU 14kb+bjAIA:MZ[imII1( (@BٰzL= .<> J = 0TQJ *TScE$?xV?cqXezV+RUk15JjMTV9MRmݟ?fϋ?ÿ$GO>:E)y./ hƯjkˤsEx佻1MqYeTgq/QBcaqPb-wBtUGNB37?g? |4x7P5H>*n.u-3J<)ZXkS ZhPxNӵ^+:G4_;<x5\CUbiTkK ,\eN7dJ:T{x7L/2)dr<u0(:seL8rT^jU9[ ZUzMYUׄ>(|9V 5k;6ծ5kiega,piֶ}aͳ6K+j?+P,W4U9a*\gI{:jpMsYCÒ"r8D԰^vB n:XLdc[*҅\=Zy^8Zzo_?%Bo ~ hK'{;FGloxP5O|s^Amk&mi>ož9V/3|jf%Ɉ ;u#IWIT^U JlpW:0YCF ERiamFia,J*Te+G W._~ݟ .-5;7 ; U&x* q'ۼok5鑙9׵]>nL fQ|o__Yob%1k8NtWt.UUaiNujJHS*sJ)SOZ8# />V^b#ա.jZJ8j3QKNjJeշK?ggg g|=i ,?a=#ՕiJ|AG#2Wẕq.q1pyo-\ ZènT*G (cB F/+?o幆oC3⾳TqQ9Ba^^ʌ0:Xh8mkGf4E?h:4O e ]\#A7 Mqpl/oWڧu?kڦ{5 p1JLQC83Y*uBQT(ʯjӪ֭^jTC8Ǝ4'K9#`rF>Ά'NoNP^O \Ӽ;mEH'H%HuZxᅭ SHƫY^ Z\ag&e} qGj,FLqͧ<5JI{<ƕjJ1<]b2" βGe SN.4eda%*\ß Cs~|u-?V3_ jr>:S K}{jZOywkWf^pXC9gͱ18Y_CUamJ5VJ# jKP/F:s 5' ~E`13rf8ZE•WF,=)եB=&U #x^Ywkյz^+mBsW(>8\^$.<'x/8oe.ZrJ#VukPraBBtN0,>ѥGapXJrTjJu*Nz**ի?ן:>/$ULֵwt}Fҭ'5m[S/3M[ᵳIn%cyT>yZnW 4(pVZ TRԩRrj0!)MF1M?J^q F#ф֯^)ӥF8':jԔaNNSbi/ ĞJusc-oX|gILSSh[K;kYφt+jv*y6KZ\sSq6&]tڋar_ZjXCFci>H{ Z <Kr?Mళt"7ݙ3,$$IJld<GF>AΠtoxw!-C0h/OO͇><GF>AΠtoxw!-C0h/OO͇><GF>AΠtoxw!-C0h/OO͇><GF>AΠtoxw!-C0h/OO͇><GF>AΠtoxw!-C0h/OO͇><GF>AΠtoxWk/wܟOI |}uxZcYx,~ "?\7~2%滠kc|Wq&u8g߯cU xe[\/(ʤ`aYU>wg&K~1feVf^cn%oWVT3T){Zg?gF:PA(8W;z_?pĺx7DwlJq8sԜNoh+#< ఙv p|=J = nYNOgF!Vs%rZ: ( QI5 H`3XBhHi 9KXM?x @~Qҏ鿳 o,!+6{u ICLGЯ6YCf:KI'Qˊ?b8|-Y>%'Ïq/?%Ѥm?(1kB,aYCaඟ 5_ mϰI\m,Oi?[O?qGLGЯ6ĤqC6ÖEçd4-~&#WE?R|8"l?_O?b\Qƿ+o"Ϳ)>덿6/' iG.(_f?urȿtFOأq [o}JO:o9d_ :KI'Qˊ?b8|-Y>%'Ïq/?%Ѥm?(1kB,aYCaඟ 5_ mϰI\m,{o$=?I7~nZiϩO?;`]q;aIƿ+o"Ϳ)>덿6~S+ޛ+P)=/g5b|?<)<={1zT5ծ5/QѬ>%ΡJ9> È,~etO1O[ u&tJU(ЫZ:q4T*=?~~ʸK'|5kSrex|v5afU0tTU40h*Yϒ*T7 CB (ۛh_gX~ޟ~7xN^㯇Xм!gxGS!<mex:xY[cKңЦ[2<A`2<}\n0ֱmOk:\LZ05.i'9~ ƟG~ &̸78fSC9~.irLexPҝN|EKՔH8?çd4-~&#WEo?urȿtFOأq [o}JO:o9d_ :KI'Qˊ?b8|-Y>%'Ïq/?%Ѥm?(1kB,aYCaඟ 5_ mϰI\m,Oi?[O?qGLGЯ6ĤqC6ÖEçd4-~&#WE?R|8"l?_O?b\Qƿ+o"Ϳ)>덿6/' iG.(_f?urȿtFOأq [o}JO:o9d_ :KI'Qˊ?b8|-Y>%'Ïq/?%Ѥm?(1kB,aYCgsao| O+ƿIeSþ|)X5|.q|2 gW֛n>)ВpYi=c{Ƹ6/2T``9rc0Սj ʢJ0j+ }? xq`reKN|_aOFr )J4NpX7 NI4,I#3#gwcfcXO$Ÿ  ( Qگ ( ( (( 'u +5 I?EfI-<x>#1?ٞ慡ڢ .V1@P@P@P@|`Ais&~YW*Z ω++"u-O^wi&h9/$w#,嘒MzP@x&c( &~c'_5|i| <]_3Tu kpizʅtM4/+LlmҀ ( ( (?ॺWB{(.?OᠵO7~ h0i?GmqCrih'౾ǿk |1x[ OZG<7'o~/>"/|!Ҽ0_|M_ˣ:k֎4n 꿴;=\6Ǹ>47ྏusJ?gGѡ]Iu]WMӑZ@4)L~_|[χ? ~;?ᖝ.߆ Ox0/9kzI^c{ipG$ɝBFSIZVI~>PusOwjL'_K{4-q/ j:ީޒG2}gh2}y]~Gfu 4_/` EllB7߳_g߆~4Q?JE{Xu{g{x.ai.#b%>$7CƟGϏ^k{ ?*G?k?~,ϟ |-}O7HUkGNѵ0^>x|{O*g"wsh_qxHbC|chvu%Ζ72*I^/g/>{4ߋzP_jV+^zνeo+=+ #Ԯ|;& ˬ]g ?_xz=?W 1w!Z|!}~W~Ο g?V ]CI;LFW;1o—'u5Դ𿆠Mi f?d|'LzWij7 u?xVs[Ok2hXqY\|9vWn-g__xwE>"Ӽ!|K׋4o^>xw>=Oj׭wl!}D鿃\/sBƁCx{Yh ޡazz>!UXhΙ/D#ӓ{Tmo+|L@o߱?|"5_|W|*&4Kϊ'5škZ֡o.,~=k X<6[e [gu)'|^~$K@1 .Q ծm.G > C ƶ)MeDGO?wďL{]>|]Q^o xHz  5sυ5}GAQ#%o/ x_߆c8]O|?ahZ|QaizƁmV4cWdԀ?A E } ( w ?&oSv9*(7 @g >|W;7*xc~+P_5?UoZƑc5ޱښG{vPgjv_tO'm߳_4_>#|;[Kŏ))|9':&᥸$kyBCyq~_J~|J #LxWG hzڮ|Gi F; K7O4I%6ַ3{Fy~>3~η{_?|EٟDh|o X.~iBRmoN4dl/)nmO Ξ?QĿ+>KaCWQ;q=ީӦ}_~ /mnI_?c/٫OIq].>s 39vixbEԬ{»X оZ}3f?^|aߵWSO3Ĩu>*Nwi~h| OţEլ@Vilu(n5=:[Y}|l2~ iO G>m/|Kc7_{7?7izT<'+MV "wYkEO ֯o60w3q_ ~)6dω|A75~˿4_W<: ieӯO'^)-[Iu}>uKZVca'G4o:kfwö(o_?Ǟ(6smj ~c:bҮ}+V#Ҭ KK:\ W?쿴ׅ5o\Ҽ1|]> xֵzz6]gIE!ePjV7u Ӏg|)78|G78KDO|h:H'څf|)i࿊:^:ooxSFnmFK`b89<]{x?Qώ<#ߍ?f_:OOY²gXZܰ . \IUk`>^|7{_ kkƏ~//~|b"wx>8LJ45sO)ςa%&Yd&a?ę?"|PM">|++᷈|Gi,چ Cx ˟v+m6R{?<{|-:_٢ogQ藞'?xk+4o D+4]j% /~ SxY z-66}H|!?xUOǺ47*W|oGG|'nZ\i:RxSCxRn`osi|>?̿`ĚM Ɩ_uMG U~ݟy_:|c.>v/ aF'j̪yWCثxnYA&0]KO~~߶ׂ PMPG}_\sɞ?O;Cτ~>>:/,?i_Wេ~Ԟ*?f7O?+ӭ??x Su^5~~ßs[)S;#_~K1oxH?k|F6| ӴZ _ٷ෉?e~_P#gO7$+ _>L|ogL$K{猴E1|_ >4Z?WŏK ;{$/캳kX$`#tK|F8O7_ i1Z|J վ1z-x]eƟ=NĿe?e?6\f5վHk/W?MǍ-~xFmcQ bXп<+ jdzb}];^8]i5_~?S 죫߱/tM>$| ?kľ >YkKhfb5?Sᧉ~ 9^:Wě~4}3>7}⟅>+Լu6o/_uHt D̟mhP@_o'd PԟdP j߶/1N2~Ҿ8?ewaq|Ct{PݾG xVPԟN[]{NXxAtF pJ|~>3)3L@~ |fxO-KڞsxsAM:.,*Q@_Ij,' 1C@k>:@|G/{~/ ZSjtk὾w=" 3_mZN,v[<vH8ac?"㟌~ U~xb_ 7qZ꺎xRW"tD_@GΉ?|cA|*OKgh/t]r΍ NZƛ&?.d/'o>Sgᯌg|;Έ|?|;vzn|&4O`k|s_hWzOv.~?N]G ه9 7/>o>뿴umtN|Hwi7rx^;~^|l>STco h_~&xFEͿ!?c^8P|lDžh__4ߍTύZ5|j#4/^qaCPnxL4B;]M]k E } ( w ?&oSv9*(?AAL9P< ~?KaOڛ $DeV2h ?h?z;R4/l4f6cRn~:ɯ_R-?koo1OS7{o~,n_>%-6?|S>3:.=_ >.CV׵}6v:pN𿅿_Og┟o?-~_:oGD7>9k|J, -4kqGE~x|1G/1;׼xƚ*|Qyil,[ht'|WU]7Kzx>wTP|+w c:m ~-64k vCk][HԒ'Zkx?iRYo$C`/?2.ǎ4?>g嗄{cI֮K=JľJm'-e|9kjQ>*`ߍ? ~YWsŏ &𥯊~!x?@.xSjd3?jvo~֞lIеk_l-2Q_~80FqZx[X>\'7PJ ??W:R (. qw-[G4{KS>Q?o1SO~7' 3D:"?]t?Xx / '~'Y{O驮_Aϯh~X@<}3 ߷z?v/5e[˨oLV퇆4{OivZWe\?](l)wO|a_ OzO {It=Śo<)iU6/.5k3~+?kInE =M :",q4M[OV(]ͼ[|8j<<)CCx_Oϯ|#%l5ǿ]SF>ѬAszޕi_ikp@߲i7Yهᾫ<+Y+1{MD_/UBxt]~$kA`Gi^ψ_č?~g/ ;xY|=Ūeox.ZTex7Wz~Fb"?-oWojѼm?>.>=<~ssŸ7&-4o?jƹm{O!fЬUk7>yf^j< GO~֙eQ|j /i֟ 4 k]*XҼM5펅wހ4Am Vooq FC 1"QET8U4E ( &|*= ( (9PQݪa : ( ( (>_ (mȱ\~u~P@P@P@{&Pٻkkjdm' @p (bmigzbA_!8wgv P_o:*/?Ttu8ʰ v (>?`%'~ߴj/?5 k'GAd5x+ ?z~ >  ( 5iswǟf7 }>U:<ٛ#ܻ`sҀ= (>#|SHv>aKχ$Tp' ( ( ( (?3kvN#IT7]#q 4P@Jx`/$GGQԣ OVz ( ( ( '#G̟ OÖ8U($p'@@P@P((@P@P@P@/@P~Ɲ!xBk}[ލx/s{@Ьso^@u[hִ y5owW~ u6eĐ$2|C)7/RbH .m Ksx>O xL4 <?tM+L94k4B㵶,@m@P{K[[k{+y/,6ex'xTxYXړnvoO~џ]s_7 >"sZ]oqwt^M?þ2ӮsPЬ}7Z+}_~!i6x^;xDaxO|mxXLj9u],C-G>?/hP@P@~(ȿW[]IP@P@P@P@P@P@P@P@P@P@P@P@P@P@P@P@P@P@P@P@P@P@P@P@P((@P@P@P@/@P@P@P@P@P@P@P@P@P@P@P@P@P@P@P@P@P@P@P@P@P@~(ȿW[]IP@P@P@+#20!=C)J( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( (OFB? 7쮀?l/c"<@?( ( ( si[Hy ݹIC$OpH?@P@P@P@P@P@P@P@P@P@P@P@P@P@P@P@P@P@P@P@P@P@~9:sXnb[.{,<)P@P@P@P3u #ڗ۞C|C$RE` bG"ta؃Ђ$h*( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( (; xfMb.Pl&<<.Q?@`?l?: ,%&G{Yd~ο,%&G{Yd~ο,%&G{Yd~ο,%&G{Yd~ο,%&G{Yd~ο,%&G{Yd~ο,%&G{Yd~οX(5j):dΉ:~",`E`b @< Aʼn=O'wOY"8|Oe3?M_HOk@< I~i(5#OM?ٯe&$ 5̠W??4fy'&2y_W?P+jG_<?M_HOk@< I~i(5#OM?ٯe&$ 5̠W??4fy'&2y_W?P+jG_<?M_HOk@< I~i(5#OM?ٯe&$ 5̠W??4fy'&2y_W?P+jG_<?M_HOk@< I~i(5#OM?ٯe&$ 5̠W??4fy'&2y_W?P+jG_<?M_HOk@< I~i(5#OM?ٯe&$ 5̠W??4fy'&2y_W?P+jG_<?M_HOk@< I~i(5#OM?ٯe&$ 5̠W??4fy'&2y_W?P+jG_<?M_HOk@< I~i(5#OM?ٯe&$ 5̠W??4fy'&2y_W?P+jG_<?M_HOk@< I~i(5#OM?ٯe&$ 5̠W??4fy'&2y_W?P+jG_<?M_HOk@< I~i(5#OM?ٯe&$ 5̠W??4fy'&2y_W?P+jG_<?M_HOk@< I~i(5#OM?ٯe&$ 5̠W??4fy'&2y_W?P+jG_<?M_HOk@E(iH_ Md|_QD?|B}K)&ট ٢ghHf^@3+/PA (OأBq߳"(*"F @q'c~d|Q ï;'W7Z|']_GW]hfyVVW3././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/configuration/figures/hsp_network.png0000664000175000017500000045750100000000000024523 0ustar00zuulzuul00000000000000PNG  IHDRm9sBITOsRGBgAMA a pHYs+tEXtSoftwaregnome-screenshot>qIDATx^Uio&7 BB XWEE}uu~{Aۺ(TTRCBznn{۴yI.1@Ƚ Is9sfys3xy<σ mhCІ6 m#0 0 0vaaaXɭal#ʏZӉPP)|x<>pS( &al=Ln csOr~)\˲ IiGx uYIr(׍%/A6 ØLn c3#/{Iz.RD)-WJ0 P[^Zfm15mĦ>je)ԖrC`n mEew0v V6B ^A[۲WPԊVox (dI-keidwral-Ln cQ t:vNh7Q $IJ ̖T`4k`{WH).jj=-+?g ØNLn c+yY%N[ѐֱNL.e66Sׄz.Rp˪ 3brktbrk[R\K ԗ捏*#Jmuɋ[R RyDòd|GCǢ׉Z^OqaS0?ZoaG/RIj9qQƶ$ޡ<,KwD$uFtk\l6xpd׋aӍɭal*K%^֯h^RUES5Qrwe[m^аQ^etY[US(u 0 [ HJѓЪ*W%WRr}oBX"RP$^]t~_bxxWv%ZgkRr@]P?7 9\{LVuK6P1LFsZO2}Dkd``f!>} wkt%e u| ۾c|6 ø?V@_b.Ŷ;#~$[N!*y{'?pf̘J$!$5i'7-e.r/^\kD^3=b rieO*p\!*u5(ů̿,XN8]GƲtwGCi1고RF敂[Sp#a<al,²>GNvʕi8c.81n[ҭ*rW\au5ǣ eedvmTb\txS)~>xի^o-rz;"J>ײ(EVMal)v1~׎*@gxғkHj'@~Y 헁+a1^6\=UTImJ-vUTb"(Pܿkn$ S|7Qw#Ru\\M9 0wp0 IzGdI3]w݅>W$EǹuKۤE)Jc3'YJ5|3C!ZSUw qrkA%)NnC0 ˟魇k[T&(_κE]YJB'Drx#mR*-Zޘ[Եw,Rj%[MNQ']3:z)y<00MCt/mw$$$Jl%Zzǘ52mR{ ӓ mqSN0I2z]dL+%NcM[m%]tUv/nӚV*z1Oy\ƇO3gAדi0-0Z_O4B)TBԫ>פŒq™!uJ!Myu;[KYr V^$EE"bn[ "0:G}%\\Mljw"=ƴ2Z.7 RbS@_ei*4:|K_#x{q5CJ 1?_1xcǷ)_g8q'3*G#0`†1E*~N >Ɔ;Ez-FUy `&\c4*"Wx(=8| ``-_ +|m!CVz;38%ʰ|l:-3}~B[0[Øʒ۲1ݨd/D' "y?1~v/V5)H= `]knK_*wp7aٸ?ccNZ]%>9ƆF0K)1b?Z [{@4 c4/dckTvr|ÿ>8=KBԑ>g"CWsu'= {f́_RCY;oկ~ .D9}Mk0a+0YfhrkT05i@r[+ xH%z{9ssٮDjs7u◾Cy3wko] ~[կ_jd=5ߖr'˗,-x;߉w>֍j|#pܑH1ݔUM0-0}!o%(TPBB1O|E.WG*>2$ŭy/z!~_7 |xNFVQa095ܤ3aGCPnWMYRϏ1ULn c X_e0ݤJ5tZk4P41k f4׬Pj)EZ`]+]7{'u^{?/}Kpwq9>O j9 _K.?9sn]έaS0_VvsmҪ/QÕ]BuKzIW zZӟ4Z1t?]8]jNBu-~ga?§V s5(??1 c:05 c''km#c R+ J.f j/xgBww7Λ}s˖Yxg?dWuѬWxۢSޢZٳ 0kLn QW "L ;B: ^j*j)}mxo{yUDQb\500Ї|'O 5n D9QB^Nv%&a3^n cJqt:upwQ/| 8Ӱ>L,F.]7^AZ N{s1N<C  T9k;݆;aeuBE7@;(:fQ9zoۖ -ܲۙ(p/al VrkƎeT]lh6$?UGe3JOz {Gc>;a֪8/k~sBllc5 0?v6i@%N ;Sm^^,]+m3վ* PEWslTUT)(jZ 0燮 Gw/'8kܭJni" mS,xK 015 cR%Xa:8,nZGԣT̉2l` .}ƔA[9Ԗ7vjU^ doqm0{@RUJR[vZal}*1W\aɭa}ͦI\ƶAR+fΜ΅Jmaa#&a+ewjVo/^j"]t){owTW͚a0Djlv 7V%'z)|(oˇrߎ?9mr0 15 ^Qi^Qz*n^p$QeuAUB[楆Wƅ^nZy[0 ؁N cHFAM$wR[n׿5֬Y#<tf͚n'[WߪPpM7k_v…n3frn0`rkSdg[f̘˗cܹws9Xh/ݙ|͟?wFFFKߊ+Y05 c:15)˭n:&uZOUt*mOj2\߸($y :*+*ݙ&15 c:a'WzD,|/;m-@yb88'ܜR2\(mWNoL /+R"exUnbۀ§e"K^Jq 0M0al1*QۙKnr'W={yIp5sA? ?^{Z{^4c/Sk+8tX)؀h} YɭaӅɭaLmAQ2*ʂ{ǧ[/V\w0L`U"W[i[7'Ln ØNLn cn 2Nn%vcj`)iWGFM\I I7.x$on-[0MbrkSv[@aU=V7Pϙ)d WJJ79 }wHarkqϘ1VVY_y*%OCJo5火:ͿϽűe9ӱ^ʄgEDkcnTf;00G7Em!d| 9M/UB$IZ9--P^B27ewXlbCN%4{s^#.gTâɵ:h\skWbX@ʙ;8&aL'&1ELn>z]1{ʕrf Dr<nYjZ & MOlS뉻/SuQΚD(5.UR̳aiuMtm4VR4wDLn ØNLn cn}$cjJˉ$ˑcxz}is[(mR|ەA;jSص{!XZxqJoMn 0[Ø"&ޥ]JY-˵V ::,')@BFZ̧sY8\Yc(*;|ZYU1Ln 06[Ø"&[o=*ݘ6z)S\^0 15)breHXROH 1ԩj^{jYo`yU %C th|yPA'!‰=m\bpU9BxW9\1(>Ӛq$sE1xޓOxg?)t8`, >eYFQ(qѨnsnj%xOw`]s6BmF 6o =tlnTT"Ơ 2N1by1V`cw ƛ@PO|,Ou+5 c:[0T!BdS n.MR$UV9>G |]=T~Fr/=DtZ'nEPW j}8 ,eJZ]ǡ!{5 ^Օx˞7Xr*?]} &1{uUWZL?؟ai0nRǔbZ\ǔqQ|ZZ9׸E}X7E^sr0N+r9wpJKxVV4bѣ>%F_<=&q/nE;L?;T_w괢JAZLa6Nߎ1122~Z/Vюhο:|cUDUϕvF/ݟ[Jl@5f8urߘ\mrZ&I ~j^|ʉ8koF9.2G@qEΛq+"m-AMMn 06ɭaL-CYSj$F$Lah `4⎡qϙ?Õw܅-Oeq;CRXT% PoeVxXC5[ȧpb:i\hz򸸯ev6A9KWԣ.Cx# ~<3^q:zU+J4▜'?~ q.s1>֤tcBMmUΛ<.6{w.s4 y jY1IviG'x s-ƥj!53 '1=qrƏ"50Uj/Dr;Qd_Gϥ2NmHPl6rɭ]hw|Ln ØNLn cnT5WrE548X2W)}\/aVF%S檆sw[)^9>y^L&#} Ӧ yFqG̛wLNюɭaӉ5f} rMܾ[WAPipbh1O4s\Mj 14abZۥ&/s K"Ȉ n[p ܕDLAcQP1KVlN~幌ֲ:D0 VrkSd.?{\c6Œ#C(vy%qǼH]ziniN)A$?Z/~ X;JP/oqO|/.Wb4D ^. $ ^)-h2׭ox cjG@}=[ъ{NUiUkj<8syS7ué+s\sgEI+ 1媫E(G3ϵҚk~ sBǘd][[0MarkSdg[WRz)sN$g<7)ۻ3n5}0cƺ1VݹWݶXvj^*GtU{utlpw[$Uǫ[-@ Snag8m*ɭɭaӅɭaL]nڢBڛ :e9sF)w e/.-n׆k^c\/eE5 !'=I׵qK lXr3)Ъ+^)y] Jd7ԒCÑhx^^ҡ*D NDo pQ{wUkalkLn ø|j^R-(z* rK/Ak! sNP^kG}U)KhT>W!ﺓ틻vaSCzƩ QL8\T"\ZB U-A$d ^tz۰f]x{0[qHU BMsV)CV)8GyaƶDEaHISx~DfSf 切WVkrE JLDs< 0^`ZA;bϣrØd{_B# 8C C ]P.}RØâ~a7:Z&X5{яG>f4dLNJoe ͝$yŤa 15 ^q/${)%R8I#Y1$^BMfH)i@U0㶮4ړ2$!iJ'.je)KBK | /)aMCrCHG]QSZaպ]zZ㱨ߵQ%V+d)QscXa\9ӣBHr9Ky,<gjndžaӁM øW|^*vRT5Q3.wӞYx۳/7Czm4*1Q*R#ށB!* \?V(p%IVje31|8L(P,"x)5. C].z+Ox1?Y=lRZ,&-A̖Wc̡d7(=im2fPUpIlHKj: n(u\.s 0 Ø*aL[:i:Xo3_97݀O}.psn.jkd:QW{XKѡ0Bʄ&[ Q=)>JRX*׍(!Zab=Jc#|jDp̣xcÝ ^Ş=oNt=*(W@Ҭ0Cs]g0:j}0/y:h0̕sCn6p8EW!_qֵ5fT05)s˭:q\EߨvooD71Eg~0yV12ªuXz-VX|2,Z/ʎNcR_*nS׮{V^dj=d\PoRgQ>8Q.I,{̉Lc'C%ia80lǷ^OR[}1Uwʭ:\=\U]p+?s0)arkSdg['OyJn *3sm&vqQ|z\F]T3>l=GA6#,2gΝ~Zq81t^i"16<ʕcXnqNlfX9nbh6CuDNɎMO/1N/F ]/Kn{eNHXcݚaܾv^w\yO®<>jݪpĹ˃Bkh:5FyMMnɭaS0N-;W FۤƬx_,GuT"(w \|dFCtRbFm0{]>CkQsJ6UM3Lf8 _ÃMc `{0=t/`.Ϛ5øpɢ!,boѡvzT>:UJ8MR)$Ksxi[e@g׀A۬.g->>zC8_N|.;UЊ^$)umZ3frKLn Ø &1Ev[ɕ6A;]+gPD%]*Ud+u-~z/^*9AAy|wnTlaZ/("ƹ['=P< GcvUcͥ^$aNkiBid;crE2ǑQiGžFΣHWט昂1a6 5C<׭h-4P 04G?}X8{&nc~ (1Ⰺj cLnɭaS0N-EXrOfrS kJC5%9tS<)vc㾨#Y0<O>jkpoq,ĘQdͣ{n?xX"z`&zv2(D)rě ̺xNuZkYx~/Ab?F6QcRvg|>7 xԑo7,wϽޱ-B)OӶ-15 c*ٙ6@֐r 6E57{^/ūСz*1q='oK#VCW1UW9, P?^'7W< ǻ+uZc&[11ӐW₀<&i<($5IBQe&U?*MYQ"ɕn] =S1UǸPOx ň)IҤsQ)82"l _[ymڱvnɭaSA2aDP{B>xAG%k-=ʥ帶sBJׁZ"Аr*aUeLJQXNQUSN¯~>=HCqNaCM)uܲ:wꌃ4M)i၌)V:qQs"I~).s͓1,BiZw0}=K/A€˻=D*T: h5ȪjuyL]S*0 0ɭa^QblF,!UsVjU EQq^GxGLyUc2j0: $13Yx lkܻJ=9=i Tm_WK(cx:YWRxE-2HDZWg>a}=>0$c| )V-0 c: c/TܣR\NJPCv﹗܄sF{U-/;Tڧ%C \/K(̿lXqi=/S ǓO<+qu7]+o\v9(1jܷUG(QLLs(%jw}B +ץLp4:[Ш)߻ϙC>l_~~1 -X} Hb׵20ɭaL[n3$^51 v QI_?(+zCWgRʢ< CWja`> لH $z+yŞ3^;GЗG[_ Ÿ+K/Jݏ8mO(iT8>%LǪ[Z3n{^n1NT0Tn#Qe::O8<=PZoc '+qkN=k{uC-WU0arkSdMJ>Dy,Zb@m`ي!\t)rj\pe0 XNJR/f/EF%%# B0(US?]\t2DIA q>ȃ? ފ.Xp1,]9;5CX;FǠjoJȠ}AKt.Դl㣨P٬ n}2fVR`] @_:˯NܱV,YWCC/xc/bjFG+hܚ15Ln cr㓈rWK)2T+L^ Q MD0ypob㱇8x/E` 7e6j2U#T5MK_~ExKoX `fkj8:hWDV 'GbF@B5:UCXj5֭t]j#7MwJ [U0sLc9u,eW7)qU3ۋt ִ>a bZ&eǴ? oxg̴$C-crKLn Ø &1EvfSfs_KUрqx +jUV-UMjuEc"<ļehuz%1Vs<VUE5V[iy}}!xl^q>&I>6G1g¹s0YG=an_pay50YCguܩcKq4'Xz - ׬E'0ҥwR,Z(^GSX=d *XO=d|g~zF>Ƙv[[0ɭaLZnywP=P>  V8FL+J#~QOb +\KXZs/&\(AQG<zE p|[8u97 keFGyRnEN}Gubk=zF/(jWmX6_qjiJLGR@ g1(5^buW#hR(zMԪ̴iǞ{ܵ0arkSdg[\V(mۘ.*Ȥr>ݒJ唷8OE.JU' bP=~\qڀeӿ$ICmZZq}+O}ur98@bDČ_mҺզm3TȸWM]cMB-P- (\BT"s[b#t_ɘҬ a:=˲2QQ}IX|gaP ?PՙɭɭaSsalaZ*|Uc= bUs= -0r|UiP)jT62.s8G3to]5VN~WSoz%>􆗣hlh5jĕQ fPuzzU V01 꼡Q8RJupm=Cʧq%(dRAA<uҠyjW͌h MkHU"R\kz4s;ڮ :Z&F'A׫1-a15 ^Q$Wh09H*UQP"HQx$ fEQ>‹osT1f^NL)ݚ!TZs:n*LCRΙ[Q% yB C1 %:6OΙ$ `+4 ØLn x(TOU8E+MqM7K߄/^u(D_* ʊT)i %M}_!W7U]CEP)šU16F-QsTzRnc?@+[r]Ia1ULn x@PjE =NY Q`z o~); }W<4G=SW]zSYK9 UNW ߏG[ D *Uu= NӃ֜ҭ SmJl5՝_6 06ɭa :Y?@ۯe#_U_܁]t+ځJ: mNUGN(醩)c|z2Ռ\)flZw}/7֋j A%wUK kIjRp}. s* 5H+\na1uLn x@RaFa MїvHFkGTaƺ4HXGZ+Jg^c(w5zrؐ KV/8/k'{-9 ba&JnDc[t+)qC׃_U;!Gȡz"'1R7)k MnS3rg/O\>ZsgV. W7=^A۩6a1eLn x@P)f/kCDh yX2ŧx?(t'WphcF¼z ˛kh8Aڋ];3*qpL[ }J ,FvQI;0]ZZE5#uz 1X'<'نaƦN c=mEQFK06ׯqڻ> 1yK0tr>jB#ga`p]qٿ܂!X^swҐ9B[$ns)64bv8!VZ٪έSuu\^*sxZ}vpQ'٧p7; Ƈ2Tǭb81Ln cn=Y8Hrw}k*!ߵgPCQcIO:G…8˖7/ߌw X0$MLu^MnQCv1ۀr-*s5A9p?uo pV^?߰ђ5{`q,@1[brkT05)bre!33pz籢RŌ`6:6ER>ʮ:>HP1Q8 wzn5V NrJa1>T5g"Z]PSf褕w9=1vYU;~ {BA>9@ oKߛn__B\y(zTBY䖘1Ln cn))e&ÀϦܮ" u3?)% ]:Jyϕ>LnMn Ø.Ln cnTF'k&%Hnsr؁հ! a&x k"os'G%"FVTкWPPՀۏQ2(r)Bƺӭ+k_/ %˽y$ҥiFeA ~NT( /LnMn Ø. aƶgI$L~<񤣱> .׬bw.u_0_}}^ {r|wwCuyh2Ygb.M XK~#B~D㙧Zں\(7Pk~Jƭ^xVc2 0VrkSJn u&^(+N?ܸx9~ an^;ct8KmVR5*= u缹G?o_t=>vΕN\U+\4j!Hpg^QrE7> XhC>[+5 c05)breHnSչ$v'_\vխ˺i\7W`2y)~G*CE樨JP՚ C$A<ȇ_On&O!eYe!$Nn0{Zf`rktb x$g9W_3gb]wh̚9YY0s971trs<3w4o.2ws/@8kwDE>{kl򘍣s|C&B8#s© CV=܌Jxy )0 cb%1EvP 7蛁kVww}BϋpҾ 0#1Rk)2 ?US\Zw,,ocVZ vΟMn@Ln ØN 01005DqYRnx}YM'-PswtC&ܐe }g>2}>_jrbrktbrkSvȽYa9Ww)g6>O}e)( i[mh-AbWܾ&Š2^Rջ-ZK^Cot{ͥؾEWYk $&aL'&1ELnXQOcuc~ pp˲Qd]eAj0B<ӣ Fa)$U%cqT$ %G/ ~ɭaݿ) 0Y^C0n* md 2+a}v`p¼`f{5p3aP}]Q6Zq˝K&Ry],Ie)U`p<.Narנ%PY?кaXɭaL+2$s9J/]:vﮍB!:t2^" .MRoBYnRzMH݇kLܦͽKaVEv'@?BT+ןFɸ/I\⺐V#Fm#b:3E/ãwޚ_R S[TRL.+5 c:15)bre:Y:!)Nn}BSQ0Fq~* @&p.љ\< U? |~7:/'S-Z3!g NTC+[+)ݺc8띯CjW[RuL.%ߝ[0[Ø"&[=Q{_ |#_݅SJ+S6%2AD) )jLx M{Z3>>w]J3&zAF%Y[$Qs(#QxK F2o֬zHOur$$ɭaӉɭaL-}ӯ>./X"ܪ^,_NjwyԖ-6y+5jȕΩ7qϞw9N7r3*0XUe\sT10}ifgG/{ xCE?F0b?q$B1V uL1[0[Ø"&[= o= * qih6έ(~/TA]&\WN5θo*r tV2*\ .)xHhT+uZlqoxIs:4 cj53 Uj\dИ112In49}SgzK6On)^榗  ӗ %6O( vK~x%$j"Q;Ҕ'BcF-szNz>UF.ҤsEWªӗYɭa=`rkSv˸'==saBj/jn^4JG%jVMyivWο_ylozZ>3J~ʐƍ)n00Qbס¸_ǭmy'>mF )oJցŮ h6y2ZP.9u -ڋU:pT|wG CMTا i:u|.AkOO8Yk UNʛJlKV)+q "u4crktbrkSvȂC;MkZx>JlܺY0,E* W.H)ZO3i'Kzo@&=i6¡vjjԠQzdžQ[;^Jw׼{҇^+C@=DmMHl!RZx05 c:15)brem/5foKV(m lBc"S'>-6fP'/.P9xt%&&aL&1ELnCC&ZEmfͽ5Y8 Pa7㓗&qVGLwG[uIS#R֍ )a #ve~|laXC2Mn ØFLn cna#ZYۋQܦE_[nJ&ST] T2kK[X%LL7D[տ-Jn赻WBq^"QhA6 x,zKPU[ve[erkrk`rkSv]A!uAӋS*#Pp%II-Ʀ - +ĻhsA!WOT5YņS\ǧ8M1Ln cn1Ů.խMxYJzRɦOI]Y5ũq!9tZɰz N^&. 6NJ%R'Z\G04QLQtZCR?#&aL'0 c#YPC|*JHNW}nX61b۫[6^ѲJD%!ևjq׉2 yQ[Jhq'8r0 cz[Ø"VreSXFDݔ'2H;ٜ~7F*X9U1]%EyrqM"[^&Jn ØNLn cnY։NڧڢTWUj$iy1͍Gmj37pzNn_ `\ $Zχɭa) 1 B -:$,Nl11ʍOLolxsS⭠.x]|fpP[J^Sb,~mgFaƴ`%1E01Xɭaa`rkaa4aa; &aaNɭaa`rkaa4aa; &aaNɭa::؜tL,w}lk}O57487,+ݰ4P$h=F'MS6s0 0HCYv=t]̘1=FC]!yH!Pr7[:w2ݴs=%\w"o{ĥ5G%#Bd^cH7wGg1epb<ƓG NWqB]s_c'㾕ʷqs|4T*r?t˴;t>ah=E H%e;rN(z*.2Ċ&p5D98V$|S)7ΏBv=o%ibMI_!3u։h_0CJ@i}k)h)[{Z.lc̊Y)%En/-:<[K )ؒ ]fÓdΏDZG]:zޮRMn Ø &1 Z-'%;◲g֬YXz5*w`…klQ&1/ʛΦrnIlrÉX_[H hP~]h3\鬶j28ɝZR&4[qs 7CK&k"&H͸s1~_aݺ!/u^Z=H" {al &1ʏ~WIvG[Oq%]w9QI`}y:Y=ziҷA m_q W2_3UxA!*-~LKv07Z7_#By] mg/eg}[1N9u\ק"Ot+ ͦɭa[ug3'K$E]N;͉mxhS >8.;Ҳk@*Ɣ צ7iIijQ 1nE5O8ơPqq^ql[ c׾|Un4L[ȆcZz <97⸸R[(_0 cK15)P2 _k׮ tLi#;()bJ[@.dȹX34jvI!zZ'UY&12ʕZHTXN+ |i;qx-vUB9}}NYMRn5[f|m-=nb汤Q}8 DiJW*K&.4Dt9ܶ ~N^thw0bJ$Ee!z z+-˝l+-3[XL`l|B$NQJF/+JdrOIҪc| p /th1oFǻ{̋isPHyL*iN(:ɹ.Z>cQXuR%Wi^(Ӣ)maxgu5HЊ7ꇇ)>3HhQbv⥩Rk8p5עK׫T#߄zt?iNYjrE)0=*H&4+mBʗРi-{+^#<q%Sʸ#SuTԕE/-ST+3x< \y|C2U\zgJY?Cal[.bS,aJ靇^CWxOŬYqeW{g~k`rR(k֮ͷ-pv0:ªU\Zh46֮YCAv˖-[?J${S*lpwr1,_8CVewbXzMQBPUNS}LQ3W(׺W]`.N xPQUc7>6GuݺuNʔ^~\ڵLŋ]Tڭ7p??G<Rzm̏!Wc|l\^Sf/ 7trw.+V,ǐju>o!+ʧ[3 Ø &1EvR'X[)||7w˕"|ͮRbnZ<٧G?x r ;x)8ѓyO\Sip~8|UUP_r !g|O|:vkg?ЏttI8GsG眊wrnԽNlҮNV .-\ǟp~8c"˿'я~ ã=~{Dqq~QW[qvT]&aL&1TTʭĶr궺_Sx㍸;`nBc# Zg>d]o|?K/a]?_%Ka]wŗ%\|N|; n7tZcX}R>Aٽ ~8q/~V,?;/{9<Ѹʫ}nzGIE?qҪo}[GoFẫ_7/gƉ>9f=0>K-;;}ǯx\S ֆϒɭaӁɭaLRh5TRx7Ez8Q!o['Oy]/~p[hhGpſG; qa!> ƫz?8'{ձUn/8f{)Z A!'>o{?!>ɏ ά^2SOx+7DƌGGy̕hV^BP;4u~U {ͯ}yv}x솷0qzxsv-j >r%cԃfν%oO5Ҝ;{p#c5+(mu-\[PkW 2_>ƿ.>$~fz [ݔ)q5IܖUσ%FU0 Ø*&1KZ[m%/OنFƻ8䐇&?;?5A^btt:8묳q~^:]'y>ωzK;m3}Sp]i @hӃnxj!(zm/ꉪ?D!:2|ȡap cH.hتp;?8P/vURڋ_b߃3wUt},qQ|,[v'qNzƟMk 壆'aƖ`rkS֗%'jΨp--:ق ԧ>~ֳG3f"rf-C- 8sdR hr{`}wMu䗿vysNu/jw~~1cJ^Ղ8fĬysq-7K/g𨣏7߂5C~+.ftjR^Qy8ْ|阸2uW ,%KņL>W}> Ǻ!Jv~WuTB0̙3wݹG⡇?^TmC==11<SO1q=[M/͘9åq{@y:sQ~f&H~N ~05)/⍿'ĺQS(!juFsdhԦlU'iOmf5w.^ <9A^U8aͤ绗gGyh6<{O:IꞾouz7;bDZqVj&*:Aua2:߇nM!UImM%=[%2='W\> ϩI_9Xh\U"8}{D|꼪G,__]p!ko /wn6 )z֭?8ەw"=,[zΡg/ē0|[@oe_>b[K6//v Y,R}Ja~ tmK k ɠ;fb¤щ·)JYi P!̜:*UA.]#j7בXl8yCY%pe)d69U:mIo}&`Ǝɭal^~iK\?_ eDPa{@i$m5 vGKU2O*(U]')W#!q'S)DWP.-$ӡ[IpwndS,*yQEbCw;[QLOsUz7Ӵ!BgAO!hs`1VB_l:ѕdlOr7%z*^T\RXAݲ88Hyk=fuZ4K>jkuž%sp2 {UT֗jPJn1orП~6 H9rԭnlM1z6o (N#\k׼=\R@ ׼Jatb/VfGL~"ޘRtKFJ߷-B:*:"uovO<2cI(%ȱ[ (:viIl PrUP((ԥƂjٲ)b@DEi,[$)Q`;ǒ34  C5Av1vQ'bN7C `|CaH%,o躙`R:E)BuhX0 c[b҆AH- ֵ:.)0mɭalC&@_PJn):'I(b*ZPzM=,cȋ* z k9ANǬ$1<2(h\59ſnAB)upÙӘD,|+߸ 0`)}<7oGy!Sgj"s|G%|t{Ct? aEn(}H}G-*1Bw'T5p?Lḧ_ $Z&ypr0 ؞05I¸視2"~FGo|m;.<[)|tɵ|n[]?G>GFン0 cV.|{DnW%TDum9{$u&TZ80p j2U:.Xi?2z 3,x$e:< C7.&gr[MQAi1*2o&swM7[V̜9sqQ`rkF-o'>ɟDK\gx|P4sjÀH6HdMu{%s?9hr)P@k_JTeZdOmSGB9h[[h=ɽy(RFC"}qy*MO~1 0 0 6)X'pɗB)R5)F2cx(0('|8\Sa 'ڵk]|j*X%֭s\G2eSSť8s__ݲҬ)-PhQaaa܃J6%qڮ~ITRJz}sG>ի-[J'JYn7:>}/~1:oc>rWf(oC#RIkşG|__  $nTY. w\_%dߢܮurY7 ]1נ%LVUHD(q uFj\ms30o/5/㪫.?fA>Lݒ{z00 0 g*!RbVw3Bz6G; \w ÚpEe+r%>я;g~KW_Eu\?矇/=>y/}7r?o`x*DCr^Ws?=g㏗\wg>ko]dBe-'f[ī κt Oi q7㓟$>V c~{yNf׎ 7ŋy,1~pÒ?荴v*|k_~ ý6۵љg[n÷(ovPS??9׸k UCLh 0 0 ?eIq,Kyv.OkzX1W< ڑ|ƌyy̾ZGN Οë'<8~A^¼1Pͽj>/O|qwSW]uU>y_ϫzV/x{<ӑ<̓e;R}_7勗ޖ/c^~G /ϟSFWF~a'zvH~AK_ί_t[^k6>?oM>|C@QU+jZ9׍ kӒMozO`h|-L[V 0AW;=/Cv/K4sU/^Wbxx~qyA{#O:ēo|xӞ%nFtmV__G?q\s5xs罘;w.71wp6¨c-߀G?Ľ6o}k'>J_WpcC'Taai6֥Ԝ&) @фX'Q[`[.ڵC%`9>'?v)KT9x'>sgS 5dRttZٳp|'Fvʼ{ի_</e~ƺø/AREP̙^'o~vE(;$Z7BQLz0 0 06=TpW$Ea&J/ 83O! |r;\3(4}.@rAa֋b Zۄ͘nPp뮤ullifFIQ({l]vCB[֐rÐp])O|3GW^W7ǟ#=p%AIx__{Ncj1Ӫf RJ*!p{cpf?~qYs0B6 0 0MʭR§Z+{L-Jfz(Og}D~+e###rN{2Mo ?R촓fiRZ)j NTZs2$8Q`||zS Z6| Еx[Glⳟ M 2ơ,:G~L|3ŵ^z+u^#<úut8c)ls6w[<9^7(+˜u pT*i>/{)ӝLwEq1>j$Q -G{~ HS5 0k6)bH*QCJW N>'=}cW­sgvdw?\u_cE3{4Pd5i aPq(BsBZT)T#5-w|W/5oEUijuTup$͐+ME\*!p+]xmT}~+?GHqx'b]w╯zN: l~7f_ Ӈq=g,Ë_/uQg x?s/' ZwXQRNj_k@U0 0{ǣn I>#=UԻUGI)QTamF)UIjUZJ 3Q&qfHBuիnY-B" 7z2nEWm6wiiG1{\tB.wG\9sa67B)qR$cCgu<msǰhb̙gAyMz"ԣA$uu6AM<<4O0ka@%c^0o:_r+^B [˯ b@/144.Ċu;ȃSns SL{s~՜F.Qep\uU.O ?:>p&DWTۼό Ƭ#U|3fsioD? Si}iQR U/YQE$+bap~FA $Q~o4{h%=YAE+ӄJmd>vD]ߤ i? Q%N0EPA)A 6WnMqZ8aQõ:ŒG5'S;zUJ)+=$cZ|vzm7415jz*"MǟԘ&z!9"yNxi|g^pbx 05{0 㟗{1np(O14C'|vUG(ƪ+ŶCWL]򋂒\QEGUX/`R0(<W (YuMUjg+SՃӢ<-Zjv[/q}-Otcq}t(vjo@ŗKW% 9mq9\iWbHq/!wR'kUdbtѯ/5b~%%NgZZq?ơF'`W).opC/x9 svb&f9:i?(lb֤cJLᦲry΃/Լͧ11`lL:=OӤb0\ƶfPNևlRnlsUbVWz'J+ˠhp>W^ NN=R̍«6c]fsQa/GI6PuRUQZ)5ʨXqT*ǩLG)&UJ@= Umx#I\VgFVA_߀KRڠЪ)1R O9$̀q1$jߖE羘.̿91C?JVV贞)iTs4s5FqED(ڻyQk hoC;& ;pPy6Yѡ`yO%Ls]Aʏ2L: ssvO2?;~|hsp3'] ^ -K?\W>W$xϋ>#<9wnS)|x9ӇFuTll_*b F9%ς].sN{~wMpmWtF9˙n8^YF^jC&%hCI3 |>샵kb޼yklJC\LUI_Wx?6;~+C?,Z}WY;U~pz|`RUvZx"ȇDs&()biUA!S-%7lc?xN% UВh!y] a+]PŜ*D40QoM!8S(r%Pzu-s..c{A:)fv]!=5F(*I41R}R~Ƽt>SP2!?}Lܵc!/U: 5.5*W[ٱf+_ 7׸xfbz|Rf$@볇qlwImߚQ܊6>q߱Q@e6=TjkaI#dwB9gF'2ρV'k8<<ٺF .P|=hj9'U僾KקeEmǮ9b~› >oFy#)/ lWsnaG3U+'w|؇>ۏ0yey ^8r{G kcsHnҚWժBhl7_`I>iď:?׼a_+]#c SlwkDEPRE;8ZAwsGv;sx-ZK%K,:lRn% Az+뮪ɭf^(zxdуsFX `mO׍ P'N4C62_G렟.*TNhf6? < gKઐRl`"Pn)$3UM4Vsy׏Z ơCJؽ (幋y“ʵ7p ]r'V0H wܺ mUT_cAU GR=u2X2a]lj&Usx9!{U\ps >|!ڸ̙3gdrVn}+5g)e7r_%d)|`7@Rm_1ARNJʛڳdOjH9r. ~qorCZ'JqfjfETWEd{ZO%mϐ7a ??<2.W! s%ؾ*OOJeObpm,þ"&{';9^Fh5n%==o*̧Uh 0{eUΙWŭ喋Sonj;( ]+j[)axKhnkX 5ԋU A%.)N&(Ayh]xc-\<Ш>OZPt]3r^A㺎}#mKrT?{WhGqkן=D@PZ8"RHqiEk @wߙ}7yڦLv3s3;;#I_NP lª|A/پyN3"3-n_ΊgnܬpX?*ҒU?P?'QxR{ $c8Y i*B'hZ쿈~fw=_H B+`Ww<.kwP2 @ cBKP+P,Άly,JCҗ]z`ȧAO?!A!-NB5V(\őY?eeT+x? \˫~"$P:kh 2]~.c{ˡ=_bjL#۟[RBBpb2(̡fQ,Yڧ(g5Qӫ{KR9LҕE:@玊<';%Qo*LGƂ~-mBO~ i?=&q!DOVB8e߽ǖW.;dy?6(mg{HoƊ[%UtzW9tECˀg|IRL^ vEziۤㄶ yеQCCCCCCCC@[ ~Mn544444444 4;xԥ̳.Owtr&_2>Ku4OXc?Cn&jhhhhhhhhhroɭF&jhhhhhhhhhroɭF&jhhhhhhhhhroɭF&jhhhhhhhhhroɭF&jhhhhhhhhhroɭF&jhhhhhhhhhroɭF&jhhhhhhhhhroɭF&jhhhhhhhhhroɭF&jhhhhhhhhhroɭF&jhhhhhhhhhroɭF&jhhhhhhhhhroɭF&jhhhhhhhhhrQmʁY6B5`~ C_!f0 $L.|a| B)V.C84[K -]Hr TY,E> jŊ(j \!:\F(o_FcŃTz묧Ã:c(E?ѥ?aPZCHq> B;]qT vAR O>I"oOE҄0{eLX4|Ib94,' "7yzȱ8} CBȯIr+(B8Iϯ,a!s9|m`N}31᥇.e@)dRw֗*ja >S(9W~/!%Vp]U>u`&CC '6~^P{H_ܕW G9Vn߄ڏ˪V,14]ƐWzemRQ[!A,"_[ܦHn?`t?$oИAmH'NlQx ERJrk pbE})˓1k8GQ0d 1fmPNHoU8?ߒvFWEwz܊[Wzˬ[5D/_]JOQBɤS_\ׯ.?w=E}vY/YﻮKGR$Wne+_C4] -!\.#˩\!_JBȗm8$HS"$u|Kym,'=1v$f2B:`ҳ[2qŧu,&0MYWey= =N&:)C=g yg 0a@FcY UʲbtFY[hlk$-B:-4-E9y B===ʶU~umƲ&B`EEjB _M0ߒMPFjR]9ٿQcJr![W<.=qy2 P9^S%5,.I~4#yTb!@/l$IhX`[#YY_eQ {\lkԪ@jeOJoz'z|^ݚ.c(^xRbI\r(W٫׭'hLL~5c$* f 4iYt26V[lT5$HpTk|u!(2za2j<F(2zc1X`P,i Xu)ߚó2޴萒Bnu];@ltI:-J9&%e+M[-R1ƫ&{Wl>Lǿ'H[y"& R<$Ye$ؔX yFY: H"HL!-P,XR 8Rj,[Hy+g}\K\OheX(58CP~ Rf ,rnd/s"kC]VRBd+e7u2€ɭSoCG4Gg8XB]U}?? o]Bu[#)z7 e__?Z_B*z)OHF@Ѭ7qJ!ԶLX ێ1p{PK jAFfpe3芻ȧ5dPIBV}dS),H~%y&[L&jHDE4-TrN&.`*=C #0x`477c0`FAcXwMq/<5gF>MC0~X<C 4Fرc1tPP$lV/D"tR㿇T uKi m&H}tl.On'M1;<=GMzd:,ɗ@,W㿃Y)C@F>~gK/Q/6iul |t͸-!]-#Gṧiȓy6Wtܨ?TbfϞW^y"C(ÉH`=FNHHTIx<$8^Ň}LҺ"9HeVXLJo0y\+sf>wM2~Xc .r ٭j,;b \o6E<܄>0zErT+TtmTP'@k}O\G?GO>/*Ӻ¨Ԓ |ȶ2 ]8f>=bEaa]w%&My%XfO^SyvEP-;.![Bv14@*H D|ȭLEA|(SFF,$tBiHubJ=1t@+D$tAA@i+lw2$LVHv5RrڍS""-^B{{;rKsҐT5NʝwO>a[oK/LɲP[o<}kLۥ,FD"~u饰(6Q.GgW'pg`!qmn_>+m4yQJ/L +F 5Q(Q|@j^-h([d no*D=L - T{nĽz\w̓O?=pw{n~s93gVշxSOz(yY*Apqի(G\/G @괜};u'xgLGUIEd86r?ķ;^}]Gwq7jq8. }x(t`|Y0~L'N9T|{P0d7Xg _ԻO4B:t|(\'?N;txAx ΂C=~*dTOC̳୩ABicʔbж +SBT1<(@dZ,N<op?O~c?}tʩj=}W ƼypGo~;{c=p駫YUr6+CQ'jC׮BlU& $(DP{2JǙk$8`oͷ_Ë/=_x!~k2j&/?0,D:;nM[,yLcvM3Oa ;(K>d,dX8ӰєM³.PmBOwjfPx%i$4?m 6]w H[IXԯ u-jjX́i`jʮO"{:Q)q嗟o^{O= {/8q" Bt*h̦a;N4{{g},N[oLmWqWBn`։&KȭlD "BcM55 <$ܡc#UԀd.Wx O>yU_#E<ɪWIfhfqD)q'͊! tX˵&7a#3ATMnftvŐ5"E_ݱa@ˆmj ǩ& nV=c%dU5)b;cvGg]kdڕRזwlm?7WVz:]QDV5$,BY #J.c`UW瞏<WrԂ*|!(?BTAZFSS3$;XrI8N|!F`4̝ I&I^Imthj*eά@VNN7(ZGÆN+I"PnX5ɓaRԇWj/r5Nҭl!s\zY^'rY7܅mBd&?m\lBR[7> &$}/gcPI]8j(gO|x0u{taʼw^ՇK*Q"s (uOԯPzgYg]7]#&a`Dk`%44$IE^MQP(z:{ވSn"dX!(9ͶE8Bzl R֋u20/$z5ARIouAf%%ҋd L8Wެ$^6=8T.Byb>Icׇ뮻V`$I7o)6dsl&XoZTJ1 k!V_}1 &NBpcbͰ5jPXm5p?T~lW!2/f>ᅬ5"m+U5nb52M<#8"cy˘J/H+E0"$o1e=>lP*ԫIGI}Qt\ hQ3rLV^_)-cw#fm/!z}7[GӑdU6šd& úS6$nnfBRprRhU&b'5Jv&L 1u[[nn5jZJYVʚz}!CX ʙ, S@i,{|']ʻcUWP<# E^a) ET'R+|Կ:/ +SN-7߄BBpPsS,否!$6&a !.ul|Fx^ȉ I2[/t²s xEPmW Q%镅ЁeQ$Dٳf;ı}VҎ'Q)2Z|b|'p&M6*2eLdJ&}8$({ HH\#SnqHK]p⼶NzBU:,Xt7gdX8o5^zmTOe1$i#_IfQ#X)mƫ:SvE$EBPbAϡ!Qe@# RYADSc{Jo❬Ӄ R+ɵ4-a!%|񟼅Ћ8Dv)b|m,FX-#Pe|zmʪƃNr*)@"VCP.s2|vNMY7QUYTCmW WD;j@rjiz96D4na !id4$ȫ3Ϩʶv{E aZ.h,S+n >!)lџ|tKԉΏv"$q8<"<&B# r'gBmȑ#1sLǞhm@ C˸a}D"TZx ѹu tdO$cL5EƐQ(XS82d:K2\)Fq)JcC[Mn5&[Q[ Z*[HfeC WP*$U^XS ?|ϰj9N M(]HvOHk2줃"uPo,3E>߆d.4eV>,#GBUYO(=J*[c>K/_=IT5 km!X}G5,M c2`3m0D$#FGy,!DrQ\ sX|4K<%1_%q=5}$DDVÑnhANkG3 >okDbMd7#=saa wR"ͨfK9D-?Y=}%"0C$L_ StY$-nA!Rmy@$^OrO# !z&us8{?)&#E = 5IUCc" 0l&(sVrkդ*ep(JȦho댖~̈́rzD}QRoL8W$&#F!b !*O 3DuzcQ9ʳ!4Vc HzQ"hr"[IYfI^ɓmMӕ"9+8Aqۧq0z*htR!5#ѐyCҍJ+Y&H/cOb{S՘ݍ ƠV6[&ѰX%= 0kV`(-9VY묷.O @=d}}&gBsy=eX 5%HK$_n3!wsXrHd_Slek,ǐ2'eWK(ZcK\ 鉫}K#Ҿ:/Çm-q®:]z"ߜ@GgBiz`I>iDXto%KVHdFޢW}RL9 7βL}86_.D$J9ɶF"\->)#c.uZVïd+SI:r.&ɧ3K/:/;1jr9ejQh):s|>=e=2F=Wg¯2,V]yz, 2NsOmhp6  , ?ׁ w`P~KfQN:@囧#]fqsWhrɭv1V$r2BzTUoꕑc1􈙌#Ն^f(c|Ta0#Pzm,4@n<2^BxC5(@Eʵ.9Ϟ-mȭE X$eU[ uSjS!/"GE򤟩4zK"$( MyT{!V̛F hD =mrTrnTT 4uU2nUtG$YIp]:"&:u_ HXG l7+Y|2dH*j RArg5d)Qi;ȫ ֈncaFꣅSVzG`3%/R J:NHϵu8g},UH]v*  Eh| 1tDx%- Xv;Ͳ4e/zo;U8mo@;oX2K}cPXE7~aNj3|jwhlj1nL3A'BJHeވ0풔34Vc hr+ ##D'j%DMݑsQϭqQjUZ" 5@^#r?x22ڮi%^* N *$=R6b|$WIJxc\yo E*SVuCskR='L"&) " WV)O% )E/<؋/|v!"cU'[%}9.?ID2#rgt ?D-#D $"םoߔ\PU9> L$A-_KW2%CEPR(cQ+3OA)"VuݮFe=l@xW!?Q,>!݃䩣e'jǝ_yEv# ࣉ rϥEAj垎. 0DNLJ[KKe R@r+f.+-ƓߝCK: c>ͽn[ndEY)i2QDY.Reh<<BגQ+?S knϿ9U9sQo= 5J!uYd}D+YLBtb JK䤃c]Y3єN ϐV[>Jm܊]I?uVgyVbY0Gsp^z %4 TOC/g LWzWUnEbH)13>IcX"k4 :Szr$1/}k!療;C{p A%|)>S|4xET2J.V,r n0?>7 ƌbCtI/?QmQ ˨@Bf0SbBd(R)@%B)OUCP!(Lgdco}G-׾%:K\8Kb }l g(gNҥODBr f5d>blj1yPe,2qu\yRQBmLzMH@ `Ei^M?<7ґae'J6EĴ/*h_ !0ɲJʥ@GΕPv CICF}!ÜyIԃ\7 >M܀XQ[;IJ"B/GbT<{m!^YhR+dwWz~5%TRit iX4t2׿L4FH'\'"eFG8%:=Q_̴IF|=(^B"˫z8ɩG$\x *vT^\p*&m>!pRjլQ@XK lU AդVaD2&l#>(ظ⇷>A=ftQ3K=)1jGsV,GH-ޓ_]#[9#y'.2_oT* KG"=}oqJأX'o+lӄ)%f`T4~n#V?_|t(61VwW7,WbgrY>-UYB76n/8׷ヒvĶωqhPC|Q%nXS#I֒-K Wy.!,^/k,nrͿ7/$Y,E۾~,(MVnO{Mk> Sp΍QMշ'˃@E&xYgJY-l0рAq䏯mϼ j8 #p`z.슃xFj A9}ܧ/8$Ϟ&=0چ>iNZ^K3ls7Ѷ r)qjpMEL'c1a<0w%N q#) KZ[w 3"\"jȱ<~ ymk<ݵ `yB]qP=v!˯^}˸~w>Ꝑli:Wfo+u}u}ӈYܒ '}1_ڡ2a=NJ>a Ic1(#{< uK7`>_݅oﶗ ł\tVe*U!2a },]` QPGyR U T]!quYRԓ4jaz6L4 ($2~ U~lD'Nt܈R?L%:>]y֖OcnZHŒeBqS'y# 1wֱlqϠ`(\MfДId O")Bv׋0/bt q)i"fQQyNΫғ Kodtls[ ]y35=x\Ld&\̳|eBF[Ɛڬ#qd$$2O:|<9a5aȸF_^Swj W}2E]Y{Ed r診^]G^<(X}<鿃.s\e&ϭ'Tg3ZoULS|oɬgg0%knd6eͥeBMƘj\7VKIxYxw\?-gk'jnjQYSPiI}*s.Yx\R\d"'KVn EĜ8 $y?ITB75vhet?@[UaF _y6gh1|*7;1+<8nB!`c~[θާ1G[%FéHh2$C^,j)MP1+I&֨ =jJ[*C澌%q4[1.-0IkCC5&vDda]潇.6"͜*Jj+!fIz8”*td|?<6x]dkᔓVe£RUt'|t$\?T0Ѡ$J Je\\S@^Q^~,H-lDt>Aa-;6z `I M41Jc;2tEP}fS+%0--8 2K$/Cg[B:3Dt¥B~L(?YuN}qtqp$Ql" zcv3(0g@'2 yL:֡b5bA now4 tlBcH4U z9|ijP3>~xˣC9Wҍ%Sx,7 %e@E"ucnIX2DKH%IU{aA'CBtO/N3(kJt7,3$gU` jqqNb).|RC4LbVeg +l-%$bn`D"gy3 3X+~_VY>AHbW7 "TQTAo\(˺%-Ȇr]2(QBc !d 2C@bF5L_A]VF3磝Ҋ'2dZDK@^I}?몡BC^!-BѠȸd[VjNfYHΪ(%" $4Պ|nYB5 4X%622ɨPTzCAQ4x?gx U LY䕪ĽYb <+@c&A[ vZk4.:e?4l:{;G:( ) 1}˥󿃤Q"=l! N]$ d$14a[s:p >e\7f,[08Ӳ;4H N23X=kߟgp[}5}Y OC>|6%啰Eb!S¹&K޻gYL[p:OSiG`*eZ']q30xE!FU:?s10[QRI`HrƣWIi7eq"HsȋOn7`ĨRO\IcMyVLk2 U<G}`-UFpx_ۤ{,WNKSĽ꿗stđhЙG5 L)dO N,WVG;\CecTeYFUQ#NF?hZA -mE5Oᑷ?ƙW߀{}aj: h*p¢`[ً[iS>aѹh_Ҵ$^l?mQU֜a$Y|$!;tOk nZ߿AQ /8,IBAzʏՃ#!%CK)UNh3Sg_?/}:v)7¥ob  OJ~ !s. ?]8ũ[_7~}ׂhNGɠ̗1\ZM7W󮂕j?ReHUik΢ }tIS!䋪'"4d=Y6Ok55aa+.Whʱ-.0j6ƕ SIzCT}X2UE>F[sH&(99t8#p(=t}~3թR谔c l1Vʒ>zW;@f_/{1tl6)8f< DZԼuT/yN%_!_ d_EHJn<$:YYVk71(cAcƑy$G=21^kJ! S6W:hl}YBjU- !29? ~bMY(u0'WA,HbVL羚tOX43$,?ÜNLye Jjy9xxgf\yJ#Wpa`r0!+$I`~~(Q FYOAGEV+ D>|&SaZS /16?-pAJV9FD#!Cլj:2^Zӯ"/9-z]hi0`̘K>m8;0uz,cʶfVTwoVZ#KOAclE3#өY?(z}PR@2풽jB5D"%}}6~{'@ouUv%/_ե7tiq5 IGhdnUC5yO,`oyݸ׷s$)l kVVO^Elm.^"`PՐ>)Gח旿d|#`:$BV}O~ lz]kxr )/@T|? da-Or+cjbq}xpo9BqQ-~jHsܚ PʨaISRo(SҶl, ]qS^.:]ea /a~H/47L˯&UۛGEk>Zw'lƘT\a6(l 2v4Vc hr+ u\NY(Wh&d h8-Ѭ{izV,r+9etKK\.:Iyo^̜GOlk$=~6 Y(0Ϫ WWjΦq٣.~N,rʸa*̲xᥙxi_s EXhRz[2$d'r,P3_jd.!y㙲a#W~kn+f\$LD'> ;Y[+}k%qDdup m.F:>.v+g C"hrɭv1a 6Ѐ\I$ዹ.i7G|.'/\_0DciJ4I˜>oCTInJ<@hl255JLb:>H"m 0_)Pˋ̣^1 1cAZe#(=}&+O+=>"BOE%S3ʇmrQ:$ :z>l 0~ouqȵU5QN*&j,MncE!d3Nf|P֍7k z̦:y!:IC"H`ѦSW*Vٞ^mwYG"HX&뿋<MnC[Mn5&[{u5!)iȊPdMxڞX4h#rRAI L4z] qꖛ}>'T0A |Iİ1>Y2,!8.{ m2sIpmFREH)"A=`H!z) YAHǒY2ey@V щj( Zq+/.Y?30nUI(Dz,7Gғcљ`F#ACIr/1( "7!W6o 5[iEuDn̊\iixm?W qPsVOH]PN݁]Fp=;ˣZBa^_MnCۯa_C( zwm_Jd+R!LXVaV*f2hwRhgd6QF.͡"\C5FAzyHzU+)KCg,ꭁ T6שF,U%%5 =۠& |S#id~Q]H&%X yy^A@^Ej|%!i-Rl|'IC@BB׌J i< 8Q"1)KHe7I"?Xf=!z8ađ`pq(DGEґ,ds$m "w2$MѸZKʘlW,e3VHzeۑKq[LDۇ$|v#:Gd$ymŲLȃ:"rRe/E@D71u u}PucOxj_q!QeE$__ٞob &cL] ҙM -e4THrzs#r-[m544JBsbIiɲf}e'GeR O/aa K40"ɉ l11MGZq:P)я#fgPrn4e̓Ui'I0fV1\&fi_3(En7lqIdbmp…0+U$2#&wڱH۴ 1$s]+]K%K7(pZZ/Jo3ɂ kRyd`Y~5&B a)|Ȏ"%=o33$n0IB@1F](g%h`[Iy_ݚyU4"xq&-sٖe8l?gX],Y PƟ¼'C~{~ݠ60!j6N7kM>ո گeJ2L=O=Z7#L e6$2F}Zk %y#\V^L]7[{l>n hJ'Q'vX}`;7m)V: >yo*JVfAZ15vpvq-0w/apа(6il1~ r d̚/DۜH640iiu#q7`ͦΏ?[O@ҕ%YKV*p]R] 2O[} &LOBA3(%jLkZhC2adذHu]Od$'Y9fRxj,}mg,*m,$RFd,Y~|!@zIzx.H-6mOY#>}擤DV O 317aՈrPȩj5;l(jmv18qw[ntՅᅢTA'맬ci"E8.YΛyIj } +R#%%Huk:M} ٶl(Iw ᾌtP6C}ixevb^(ve yyjZ4z24 [[txBJ2xuN8|m14'oz#d :% Ϻtl+߂fY~/LJrvE r⨝`][gu:=f<45H3SS4)F7cՇcTioF:d# vqn֟)C7>kj|F1C]bFzØ.hs/h6`3MՖ(zF4&UtteNcǡr-K3;2ov-oY?Y?uyUWg /j_eQ?}@ۥ&[a>>-$g45d73B8v N= ?z8n%<~jh ws "!9$ cXwL5n*VيkN8C¯!;!m[$܀2.>j?j$E"i4YR$n!6֛1f5y 8rMqAcp-`o}= hF}%c(C6VC2g 1"ބ?yF2]Z#ކo*#ڄp}O`5 IB$P]i C[> zs(Ke襨A!f[HʋsYckU5$NGr)% t8L[m6bc|(꘱iIE!6eGo!y#lؔAsH7w?tL+kɁybe Ie6a')qGVa4%c7\+kEc6o,mCLňf ?!m50qz^^9!:5svXkP)bñVk|XE`7ij G3O;l!~rv(U$T"@5(*l4s$#8pP%O7Dw5j<QΩr!^I;Ԃ‚LHq&XV c'‭AC~ 묍YEsv'lnl@Ef̞645Er7yx~q>$acͱ+ቻnG~7hx Zخ[`I1yL Rtt=z$2 |+氾؃ƨzЁ܌kM{ XY$܁ iQp Mr݀M70ֻ/Q6e#e9n6Bn_E*Ƅb ?MM@r[ݾ!jX05+ˡPEn54rǬe#OQapC? ͽwŌ4|;1rH\ PyIr{= ccŔ詆XoUp߅cxt~xXƕ=n-V^o29t}̌ѡ` 0?kuv=Gxe3;G#:.l22'{*._>".{-o~kyp`ګ{0 ? o2;CK=xWqxA$ZXugO[o9 p/'p/}e +p&0:B?2NG婇E1tP3~չ^zgZ+=?"vxQ-a 5Ny8a0e8vM~@$2_Tz>-qDR,MS&A"#]IIct-e\H,;.ʹ!76+Ϲ r?';D/ԋ8x8xU0uY]JAZpO.YoyAl5谝C`-v $~1x[ycdϭV Ș/+͏Vġ-x_` `ə&xoc=A^}O]7sfL;bwc%|T{1yK91#Q.ο@b3\%+՝^B)S=2OLWJJ+444JB7ӄg_nz>|E'?FJ+DY̴੶.|c7Nfxvӣ_⚿+z%[nkt| >~Y\zh\y [!.·vC7Y ;1Ix8??܀QxsxR~7fc|눃̴w`P832Y\qN{7l$IQ76`^\@56>J)Gb.e?4A'eȩ$B7}B&͞_y",y]LP_:Ѭ5Ibq^7݄;|C<~<}'FCs˷In>x%pࡸipObCˠA`w"XOコGq(z~n1S~ٷ pv#Eپ8ϟ$JgqfE0u.tݎ {;(Hzxi|+?GX ƻ{>Ə^~{5HO-҂1*!vot? @ۈ[Nٍn7p-@Nl^{},n;xյc5=#dz wh1W\z羊+c@MA+~\3,Z2/!63>}:K7eamL'X {>b ίɶ3Nx Qk4F%9eHb{6bd<{!@5;U+\!6 8S0a ݾ}G; 0>JV {;V3'wa#P<alҵHt$wqwFRƣ/_7#VCR(Xqڏ:58̓pwOVGظMpϢۋiྷ^E pG\|\y}Ɔ4~s^ WJW1v$4}*9+ؕ`njƩ{F#RZFKQ%_iZ#{=x%iIlT=}R5=m ?8|]y|[kcGG(מo+r$gq נ1J:h*I# :'j^)vΩW 6NHLGQ41t*;бgw6&KtKoMkǟf2=Lx`x!tZ*fZ#S {&\7~IL03,EtB>rWWhH 8lwg5~r5h*?K0R;g)1dz8F'ޛ5b&Rߚ1#_1vx#y|:W0G2a:Pc[;on0 WBep|`ʫ0By3*9\"`y {A}J'F>t"ԏCZq́{J]a+UTنjh7hr񕄼dUi-Z'?~sfC2YLT"a7?E6!rp^]7f*$|.F3x' c{ aP'Q+H/1 +֌ H jBgk3Z7؎'\ x7Pu txa%ȧab9Ln ^|{: 7|73IL+f0<,jڬ)뭋693g!3 UDQz !^fQ0b) uF쎍2ߝ4$Bjߨ4[Lf_]&p#>t0*grВ7T2> o ,>UPq|6'MQ5?(ʇk'ʢlȑ[984w"-3XBfd`ݸHӰ`'Zc/bf`=\qfU[ qѿlI #aґcQ[Oc~nm0PbDUmԯg?A+NAyD81]>DUŘFؽRSAF&_Ij$H\cS$8oÛ<4XsH'6SXmꨒmdASW4\17" Qcp&A$lu]Gq?2YWy$GJ.DǬVDKi8ƳazvSH`mha>S0  ǠЀ&#CbTBͤAv+k=ʓ{ $V\"2fȗ:f5a9 ~ 7FǁY_*dщ$ ~M2t܀4gȓm9cID]]AU Q50`*"=}8Vo/`y`7Ig'1Ax5J"l.yY橼F^t!&h1l:b9r 9MͧJ =c4f,ԇ4B?U#]]apǭ:`G11 JZd_q Wa 44jӍ_12dc~+i28me)랍=? jAGҔQsZw9z뭊ஹpڧc`o.& k[z^m)^ e (aB?Ξ_P$I]fX4b/l0~m3+,$ &+y>#Ô _.digO=}w{T4: TVǔq 8`~6x8vrs,%#-6k> kgwCI4MG[ Fй2ol5}:3X,2T̡N!yUO>?aU9bJs3nVVuti* i71^pˣx׳-\x8yQzqZ7')R$.< .|fw^Q4SSm܆K HÍ=o yoc1qѷFS:n\_㌟^I뮅}l :Y> t9 wanODBߤ1HP涕It]8~@L9aO|鬔+|y2_֐9e1!`(4,IG ,?ęOBF;ocItq|bxg4mNLSx׺ñ8~qݝDOƆo3Nڞ}CkO(}:['bj$Gw^BPہ4PC$;l-nm8jMʽPqq&kaUVB`qٯyw'>yUt8 Mr "8 =.:|"7 sK5fĶj KsHiIX,[~H:Aqg 'F}v4eJp̄ϭ//H?teA]f2xp/㭅jKϻ!Ŏo!l,Nģϼ .{NǧԱH;bv`Ic# w't<^8}spstc]7иg_|[ v}Glņzi4V*H*D]?o9ZHh=xo*Xsp.[` P9a۬G5U.Jsι8ร0`x3z. P:+c=ѧ5:ǣ#7d bEz]k]M?PYl!hsUtQ7~vExe"_ vxp57*k{k24N:0d&hl><", ޟ}뭻2b'YwUxq rM#ᐜf5x`` 1G7%ɔ8~$#)3Te. 3ZH|%陕dgMIrpR-xOԴ)! ;]„qImh15V$s˯?Hr8[k$ӱ,n8c*N  0yXcJ8`)8b1]6G p1a`s \6ÖoS:,FFlf=1l]٘:d/?]w;W]g~ h::Ü"v2'P^wyt_1j eUB%mX-]NOr[6N !waSهG˩"kY1!2zK\H EgK4dϾ.ޛ1K j| R~- tAWcñ8=l ,§=y?{~>jnզ80j'>Wd)bUƢ4o&gfSh)ᔳ/A%;¹~W >zy$l;Z |ȮXu}袣ě0j >[l)F nP`olJooaMjgOT{2ُx k#t\:#Nm:QO 俩G"7cGR+U"IÑ!I,Wvb<ǒ齕:!ϩq"Ucz!P(Z]yXe3 JZ-#Gṧi8d/ߐac -)CL,c~&Y4UlYKN9kfpl3틾>sH$(̚v;m뽸W7bqh : ІIdu8grx'17؝GcS C<>j^}[М43E:ac&;FRDX!7ě~7zhJnd6hs7HA \{h1 PTf6cY.k"aszcB&7ƺ"UEO>.-RS#E$=WfsKGQm`ldUaט>OzOG~ H%>[4 =0TRkGwpeב@6\=nbwOUVF9Cs"F)G6jyDb]26@:HeIX3}-,4l8 ? ֧L$mMձ W^RSc"aYè~+kM$UU!6jVmJ(n hn7=.ԏW0eQۇ`LA=Tc[~ aH\, קD];;a;Pf` c5l#+sL=T@mkXk\X`Pg0X CC!s$2I:"|2ym䥣>`]pĎS`q.N< HrWa|F'$ajv9q,l5;oLION}dVkc $;o^}c|˟:j%Ckoäc|WbO8jreMC ʥB\`='a&Ng6v@9jol2q]9I7/,xy˟ X` x-H<9|?nzH@rrj4lnj93pǟSɬMHYQc9xQstJ2夃iPe'aIanK> $AeZXCo4c$f /45!tY']U3`y m/'Wb:kQC1y u,+/|GFdX'] N|ΘC.-֛ [8=5Ri$i?O@qɁ`Ǟӿ6"֑GDwȐI+&.46b~y['(2݀-6t,(S@"}֖Օd I<(teA&UqFQ3AUD˸X>LH6L~Q)/$ yAD䴚BCF >ue˲=Gp3)*uCͪpUmö$sѢ#єh_v OrSc嚉b~@!{6]>uR+6HL8 xaHg1]rI08w:Hl=RJ?ީXu<!*C9K= PVI:eȭ:ʠ<3u.4tw$nBґ`$-tPeRcOBX`ZQ-`|:*In֧H AYk2nҁ$c9K!3FJRoYgu? "0LYY4 B̃+zdRA+,d#b`Sڐ8AT`[Ig͜T ˴0AfMLS›3fK~vý?8k7'eeAUֽou&_/r5^+ 1N-"YM,\$ky F3R$"v Tߘ˶`&BsIa*Y -"H=rlbE9g p+ jCa!K 4si$tB*sĒ6ni4b4@ $]jwʣbI찓Faqɐ-ECiLF/r(;NX@@Z}>-&1޺FIpaastg;BJ=$tB_ъ - \!Ẏd?5:,Sˆ*eNX*r(1CFS+d&IBY`f =pHL1N'GBK88Zr"I47«ԇZ]tIb}tHLJ,ec!5dh2]Yg4I P\NZN92HD~ÄAY\}`APBNMU%GEQr@g}gt˴e!aT,>3Fqǒ-$F:0#Ԝh_jE)ejj$EQmAˀ.M3\wJgdkskU"ѝE`>(d6 Ed*Pȡ)BaR0N!A=r} qF7j%[nʰ܄-AG΃K0]:e*OMg6@N݄y$[j(KנՄ,!0IeH+I2zk]vƶ&t-Tm)+}{lV+5U/ɠPyv?LiЈ$]4!E1Ɵ+2 H2m> ITƍ$P% ,x i =J̡J)'Z#-P@$~ /szUޝּL#VQQ̫O2L9:^#s_>2R[3t#w"Y5 sG"AWRC dpG@_hM4Y6x$<"(=-.y~XñFTJF&9(C5P HX>FI9 r$9.2(O.D'dNƩC4 F~e q$$2ޱL}+d#Nˇx@OK䵄*gze˃07˨TU1>#O$7y{޸LKUscF8r1 pBX;asDa(p{c #< cБQLqX=A C,$Wn Ic])WeR7nb7ZXv'2b&Ӡn0ONN"nĹ)C:n0uKHpI~Iu ֤Q@7LӉx@N*&((4^;dנPz7qlݢxU?kBz ͱFyz 򟔁3$LI̤wRPc~!$l|J2#)fhSarV@'!yKK`˘FiH>l䱘8 *o4+v&((PL!9ў\+}hy@fFq5zZeqZH2,t@\Gq}IHh{HlS a 41e'rHlX~9;8Ɍ~ :zS'Cl_[HDetq% )yFLZmDVl5AYdgۉ-Z:P%$$v Ewx̕oy [pS6z:zԪmM빔 cBB|!EWϤQq"Z\I,iSVY(q'Ip\B@>Z]z 'ek$nD zJɮlGݗefYZI,M[B$=UMm cEGtGfY,W] |x\JXGnd*i3AusyX3CM=hyNe yqlTȈRl|5܎S'R>ɭxI)f]gzRNrE2U}y\BȤįM~hd/I L$m/K`KJ&HҢy񀤡`ex\n+uz8rPR"g[lt:rXep ~JF+JH`Ũ-1>: #)iى&ɋCb'yLyQ_m+-.iHȜ#bixLzT#=8J I|+K\6(,,CzoSz%]5+zJb2xDRJ^"B{dɭC7ybѕh@2q)ɫz[&*6YT /&RQyEOɒ9] ZGz$\G,I -gqy Sɋ,`YK}R$Is*r^~*gP $\&Gie:UKyxn-O.ΊJJyPrHKXtcIO*QkAthNˇU]y8}dHX-TʌRtSIܔTC'w 0x:%;DTV-*r'FNI:1MuڗEVA!t>wq<Gfبy]H)Do,mDuiZ&+"hrɭv1,r$r~4N;8`1aX8I@L AU¢Lb8F^3aF^ j!r pIra#9&;Ѱ˲1EDiܙ e,yL^KnDdg\D7("cr/:'BCfz},E>]|Xtje@A%(·ݏFjr4]q \'o-ڪjmyGZzd, 2֕FSEdHɖ()#"Qz2?kkMi2TP-!MѳGWb!F A5܈E=I#aU6:juh-nG%OVxBy|/=/?6X/S\@b+eQIH;)ψLɄ Kx|vHe{`ؠ!te%QG)AȜG7}$%yx':+=R9DW݂Kϙ,ԛBȴ|T]uB {DgTYqPߒW2I*g4e װ`v^ђǶ3>KI.Fw{eH2,ŲϺ-KioǟtZ[`v@ː!H1ntVBf%JQ8rZVo<@޷TyWΨk<0!'uR-GoR˒㎤z?xRbVErIyNJꞸzn՝xEɃcx>3 Ⱦ{pNmx2~=4~_AۯJt-/WYxbH\βVO? $OKѯ:Lmdg14=*N5@S<}vXiPIn8`ʤýe*]kyZ }[*'A \l7O" S&XHIha &e)-8o9'eڮW$kEzIdA+=|K'UVC8s'wDZE7$TEr%Q< w^ٓɊ=b஻}>cDsV,RuT9IPq:"AbYD%!i:Hg؊#)#w(#+ɖPԭx mDr?u\.JSt1y N̕,A\?ɑ|Z7 lNǭgҌR.62$)rn54mi44Z>SlͶ(JHїb_}59wJ,IP#P%D->R|\=4aR`Zk⩧Bİn8ضz;"P,"c}bEP??>AB"iK7"L_?%gcGx<%\.GgTKe޴Jdm1W oqyz ~P}"pwukmȒa?)=BH:7O>VV Y;4Jͭ*_WF[Qm1DC%=Wƣ>Q(HD`]Į.E$V2&PD!"NcK!.}jS+ԡH͔ۥ :eY`"a#hES^4HDzVOB^u=Z2,h" .C4vwQ'8R/d_!cƌQE :C[$d]}0a 5y}H+fDȩ)hԘB!˾}*!GaI$QOpdcaDcC\=(˄FPF9ZH]S&_m<12]Ʒ "SolЫ #ȏ+>NHY\!DM8tXJ}BE|?gہ(Dsi'A,NCiqD"zݐ3fC]?R~_ghrL8n^VdL^=v{^d[ܸ1ZPW=٥1A}0Ј*@H@_!F"Q wPDV [y( C_ 6bCzoHIOފދ'.2}qdDdռ U4?Kf4^GdY NE[Z5w+E$_9Y6< Bχ,)5y]hwQk?Ӿ}HBM:* vE"EĆ^+6~ XqQZ A:H֯MN l@9ϷN;sy)tQjr?10DGhq;_ R*,Icz&!o3[xӠK9'%4&,WȏQ %/%[՟RZf)_!#BCprkNZ GDGzҥ,U9Q@"--nc&6e"yuݐKlr) WM/":FycYԴ-˽IQ 'lrVif e-ҠjG(D*)Tѕ265tNm)'=KW[W*e4|o,VQ j فQȇGvv|_k %OW$zElcF]S஘j*#c0Yѡe&σ1R:󹆒_ŨIۤaNb8oތ$`iWWK$Bۂ~]W(/^l!k*$̟?߸Uc̙&}4W^=nQ)밬>h=ß.&u{DKfZQ6_odݧ_:aOy^]2v:kxQGap`4\6ɶipQr# kaa19XrkNfiYdBG>8/Xn"RC)%{ 7?_l~~CЧ>iCb$"R_AduM[Y@(0NiY'Bwx*[^G$Y0S%1?i،0slZ`Ku1hV +wgE<4$оODy(UԽDVC+TBSV\qI[{Η/iⵯ}-|]d o=lҮ 3Uc^oyM;S̆2>p?goK@R^Z-;dwx:|sx>ol3V%vYyâK|^w D]]DnȀ_yKl-,Vb9LR"GSI蓽k}Pva}2>W5x˛ތ6/9l~\zɟeU bƌ[׽l1وᮻ2?c=F%7'S?/Qd$ z+݋~!.FyCng|Kgcٲp 8񖷼p *wy;lV {[µ7$QOީF!yԪW=X&'?2+9T*]X`z=[l]wzkp$񏝆W+ Qu? W=W]upGp%3 ^җOdcfS\tE c=nim8Lcl IG8H[툝v z*gଌ"7Sjrۍ5 5yYַ//Wl ݑ<.Z|^ՁYFW/~8w㠃[m<~5.q>F_CCp;_J|f= gۃM7>|'n=U˺7@oSdxqaFt;6+_*׿1|g0cϽ¹_!aaa1yXrknNbpGe@;Eq͵W,삍H 㪿_3>{YHXda%v`]wo S6Fc-ƙg|7t1xN_n{Cgwxnymq9_#c|O8_4+ETQ}Xyo4dxKހ$ _jnKw㟱&[aͷ]i,=}c2-0eW^zbͷDgg!˛m<}ݸ[qw ~Ń~U;u<=(r)wދ]s=8c٘oᒋ/%#cY2B~,Y6%_+6M楋KG7}N<$h.x KHjÂo7n!u8KSNnxn_Il{ù瞇|Xw37m|ݸ'?99򾷣_~K|#!kh$c}pV a­k~◘&x\y >݇y<~<}}G|DZ %-,,&%&H<*s}MۮNϞe⪫;ͪO{*vc7|kFWw'FFp8xlF3я݉pu| x?Oy'O?lw {'.Bx M_}K1O|N)~G&O5df~aw] B&ۋ~~VBh̼Gᦛn2n(Dl_fgI6Zc$eOH6섏|쳸qy_77Gߎ$UzҪAn <07̀CYQ/ceG_,W{УbRh?83?_^|ꓟ6}l%̚suɏ-oy.Z d#)M2)ͤhhiL؇qeużqѬs˺ͺg?cNYg~x^ct0čW_I7uo:ʩͶo=X^w}7uFW97~_Gpb3.#f!YfTށ7=#7e?6wЋ^/>ޏ:w Ò[uzsɆ_b4_ُ\z)x ۨGʹR'yy׼t=RfR83gj$ǐFifjLc٦`R1]." o8̐) k=paa=0c~k_QT:x|{W qÍ7p.(GGm\ W{ghL^MFJP+Qi2e.U|d 5la=4'</q-FՄ}?Aϣ 뽖;Htu=QW:a-{/{ tbl4a vqAXz()?G[]^c5`bL %6oޣ`tt@V߉SN~7>ϘCK[m z͆dv4E:'UݙLSN2<| *JwYXt1b6iѬVGX;:IY;Qx-,,&[c,I`h,D *Z5ƛl CGbsQ5 e|1STT`p`%YhQDxRh#>oMsfcYg}s!o g}N?LgsYm]P#fNc$j˭l`)_@eIWDr&]-WI!v-}"J0MedRn=UrT]gp==52CF;;{Dсk Ale\wƒ|ZִP==]f1aˈFj{`F'fYs6P~3<أw7mm5k_0m4T+5 3 ѿqffgl;Wc B&x ـӜ9m5\/7t76d3DI λqw3z;$HxJ4, mf$ԗ|_ j٩U4Gy:f`?s6̧;+L,o'Y Yo| `mAOoH!%kZXlX'9/5,ng'##acC }D'j e]$W^iȄ,"-N' <ЃओޅKciT7`IEn:/,8 w`fQٖDEzvcL _Qiq˭bYs Tk8_k^*|oo[CpAj] ga^~Қ:1fo!^yx^}B[ayYg1/wnh"{牸K$IҌ-6jq;lȇ?=vY$<W}1#5BuTu}V VS[VrdIB >qG߅/xӛ߄G?;`}c8wP_/{kM7YfCoL;M嫙uQO!l C2 pi44,?d$uTب7n,_akp[CD$OZ4Es;,Z| vm|K_ow00 e`'Ať9ms ɉFG[h)تO"11RO"+/BUƳW|^'KE,4.ڑU:H>}r5D,4nx+.G6dYV#NiQprMuL['ML Jrޟ?\\z x^ڟvK/*B2>஻azzl>L6#8/L8L'nz7~G;]GlŖPc\y {[Ab,t};H|'v^/3@"Ss0m^?_zS2dmH>t{0~?i?&)ɣt?d);gxDZo[b;lL8lH~$~czpYů~G oĞ{yI'd?_vjCů}={(P %k[> Ne^w$tuvv|-[EgGNy{V[7^kU^5TK"c)R@i~Gά&Td4TkS.mza]s9O;tC֤ʞ2v%%+6Xi0H>3+Va5W SQ68Z@{"i_]DuVQ$vU?Gf$1RHq46MTyI\3)+tExu8䩑!#c=?\_[L[uOWa9 nHyu67 Z!Lȣ"C*dqSæ_>1G?T̪e؟QO:6al(X/`#IFt2,y~|;{ܯ|_yJPX_c6=WYyꆺx%K4TT$#lq8 J㻁Y4TK7Hr+R{Lw]2og @6$ϙ(*pkph_jrwɬme5 R~/B'JtXX|a4dr˩*|ϻ) &KO1*RK`҄NҔ$PUVBVwe-rtEV9>UY !ֳBD 5/- bxkDH/pT]{"EVA1yc "4sщZ)jKb$9 yR6_^_׋ H똲}CyMS07G.rQDV*Ӡ55%JgW%=1},T,,=/ 5 {Y"6v-)dz:TVRȏ $ړ%5Pz&Gqn<_*jV['sƐJ>VI_Gԁ$EMYtEj X^}k>0?YRCxeASC'݈JrHqs0_(WNA{%NSMAl[lLonQ@譚/!xPz.?>OLQy21 !,;KbACej%aaa1yZX{_W0hLHMM!㯋r0aP؄LvO1Vª~xsQp&u+b_dV&?b[u,lkŸiȊȦE]yZO`YhꑗaYkE֜(lLFT0Mc5:Dr1eB50* 6sHUԯ~tc+\SbcrZf={ʽ/ۭ{fJŌ)*{+<%EQ-,9H[kȯẸE<{ {pu5seXU8Qr{V|eL_O9u 10DigRNKRGD8έgp@ȐINV57Ȭk+(A#ϣ"`no9r'\cAQDr42nZv[<Խ9mS̞`:wK~d~$dVUsGU3?\k Ri }uo(ܹ Ubʛb@ZZ !&IxL_4 ˈʱ!R\_B$Q?\Iq\r+flP6~g*+B{~fyPOe#)}ݽTw-jj*,)zMդY-t.c#:[5'lkdhr@L7'J*fb%@s(V^.X NS3T/ɟ+'p(XjՇVa3sQm63T-,9<F j(YWPK6Y%-?+2Mqm˸iq[V\_rS䲲I&}f5VLJ~3f0k3de(R.K[)vYN0\H0LtmUR^(mus"=r}6CӧOgYYf&S]d2%zxnIiwV$T>1sj mAt-,WXrkNb-4+hU()C!_(x+(=D}Q%/16CmXXXXXXX(t, 2yc~(l uZ}=a$PTJOE%Ѿ͐ ]ilq Mr[*̾*JaյX{ J__-ڰ߄ZEe@ 2FI_zZ[zb’VV jk SPOV[-Hn!UكtDYzok~ V>uĶ8/)YXXXXXXYF*!:_Ug(sxs҄Ar[bJpMWaϗ!,wcpRNPJܗ&Ÿ (y8eӭ!*;ۍMcǴ.MQq S_^j#ܼoK旅V ݵ6g&=eEЏC[rkZb jЗi`Rgz5&QD^m:Pa!<2 Q<|,GI[ cZڥ "an~RD$1:fa>l lSV]>*䮣Y/}_\]% SS -XWaɭAD+cb-5)9@ +X׿V ._זdCZHSN s"mIIy%P"tpо7/ĝ= ߯O3.::ՋW?vu[tu@O :cV5̒[uZXXXXX.H6H\tH@=u1H{- y@薐,H5%hVH.K$:fZA6b;{n^JȐGD?͋p]aq -uG^gxspv̐9uiD>˭,[XXXXXXXXପ黢:V4IA#f` Μ_y^ 2>[>I4`QqBJIͤU<3oޅV 'iۦf(VrkZ[ CzSm$>^~ Gᡇc 60oM-q$n54(A6 JdIX q{mCRbnˮA?)pϢ%֐7NhݲBd9{tt`FgYFd@D1|V|Kn-V ZXXXXXL9gKG/\O8 /aKr2TbYr>AVЌ3,b<8o<nϛNG|珸eԵD~6,}<%\<9XRAzr`ԃD4mY 3bU-V3{y 6 = 2Հ:ߵb݃%f흃ms&6vsجMzjآ}xAM":Th4tَňc௷=}h]ԃ =y}ܿ3r~|Fcxn91<ڜܪ-ɶ Hytt1vч~]ز#FwgX %Kn-,,,,,&MUK?_3 J>4ja x}$[l)vvͦ; *я p ҁHLJ/oⶃ NFo<{d}a\#Q~%YLrFڋ(Ў4 U*.Ø.NHp+!RGj_o) %,<7˨ nRb0#$!J $uT+-l>u=w%ҁ[:KqEa\y'uޮff6BM)P GI>6; [pfbk̙;핸򛀎4Hx[^Op1 Izh|\ƴ6.:3ؠ1$Z?sKn-V ZXXXXXLқMnrwGmqo¤ARHt$>܄#FYrsw‡}hK/X:8[`jq.NQF: } ]gaj ,EOg'Y&z- F@DV<­U0MC;Ffǿ Z{b$ĭua<C~2A.1ui!v3];576GYCՆH,Tj54G0z+x>-YGKPѭZ4#^>ywp$ 0B\12'EJ]p6/}?{/p<\4Řo|[zda5Qb݃%=ƅ܄7JZ!DP ktWBh@X H"Sj$i@" _T*3io3y6 [|Ǘ`hN鮳(>(F8_ݍ-7ϙ-S)H뎩; C)Z8?GN:ttcA סZkb4`Wu76n;|o K j6J D栫ѥ$m8˝n :Q]qacF|F_|5?cӇѴQ .GnfuݷaL$z{Jl,l9w.6у]ؠsHy H|C~VڀoMe ({z*%L`zOF{[-oבio?R|Owy/ v9%, -)RbYyÏ!n=]o.M>yAZ=D5KJ`F'c_n%6+JIg2Knj=zQ-ubtȢ&$6zgva[o /pӜ.@8Ȩ[[|`sZwbxtC#|> ::*@'afʼ/J34bG}߂%b 6,AW\IΪpn2>Hϱꯟ _qjE9%Kn-,,,,,&Ɠ[-C^4UiVY ,dz3sc=)%]b1c `HԁhihXG5|̹T5hhL tpՕ?u5p0ɭY"EѪ,n~僨ӯw-!E ,,,,,,,,, Ĉ߃$LF+H I" ?n-Fy2l!eF@DZqD"0PKkaYǦMc#,K7xe/o nf-F (ZmH.irj0ZM{ܠzRV׈\x qeX]%WXcXrkaaaaaaaUHJd0EE$rDRt]Ql&t:ҍ8مRWnd!i6I=߶FP"hHH>[@}PTvrj:EHZA$XcYacy5OO~%k`a#n7!it%YN%Nޅ!vFڙ̫H*9M_+":? IDN{$DGMtmwu@#"9$VI~v)z[)J!qbIhPZ'?%4"X}״^sfVQrGx-2̶ ?YɣfyCD~_>$Q쒰VȜ+$#u^n8cR'Id=>O\iLҜ %}§~RVoȚ\׏cԢ6%Vu}kOH,,,,,,,,aKm,\r[_VuLI,dK9U~1pJ$!M+5 x"+a6rm]sYV6|n5?ovJHvEu8=6*zZ#3ώF9k ,x\H!q3}4]N4d1AH^t$nmrHX25RVC`)䕹$r-.Aå*FJ5* %yN(mn9 Eیa 'Hz{(KYJ.ÏCtdn½6[ 5ozCGo:$VB3fDLFVF&de[R DkX.;$ɩK*«E&pWJ  ?ޒwдbcDM5kg,|6-(WW@GW^s4,i<Ò[ 5ʪKp_s6)iZ.%PPbWS :< -'!|$msf2 }G+e6;1(u ǿ#n`0MZA{9Yۚwwd!ia@L NQ 5͗M4cm|uɸ͏sB^ZHa]Zl;F4Dql5taZ,X=hWD JHN I?yx3<[˚>"tt/Ri|YkE!'ɚD;.gL-gB Cjfz0hڀ2)ZUs8p[3J;HcrXYzYO$1,X-Xrkaaaaaz\u3ぺت,#yd^W#ԯayxcGq!8ehHu.n8͡ :*U4 RVk£6%un@ jEf {`m6N,mW=~K4M1!W^zr>ǰb`ɭAA)uX g QW6]=!]"\ww?F11<@F3!L|TbHoq-@B2p=5dt$:ftv` gc`fad2Mt.^;c+s7szb<#ĶsSJ`N"tDwɇgpVrl$~ n _A3P3"9u4CĘ.'),Xmh>Ij %5s$cRxY WPCԲ<*}N&z#uUV͢$yY"Kw .-9 X hgfԬ}}f RM"6|Ĭcaaaaaaal[+'AEVuGN[zg bG>y͒[ gZ)3~Jx'-r˃TDzك%HSЌdtlT$hкz7',${"42Gqb2tHZJse8R+"[&HI#խyFM04V,,,,,,,&%Oe ԪfR"&uJww'>O?»O~KWpQo\3<YX~ӟb-W<|{üyPxcl\6Z+)޵ ,O"|s HR*~ą^/5x׿5|F ChZ;I'cv!QjZa nCC$0R"~XXXr ș-96mϖA ]]|U2- ֩ h4h6188ux:̌ Z676Lj3``l>}4AtPcɬY3u!"zn.]5VXbeꉾzП:ֵB_ %J_-69.mu!Чb*U@Uo#pB,^###i+S[jqǭm%vp޷áYhmoW_6ȷg|>8]\r0}nO~ϛƩ>_]c1oW>?_'llԚ|4x;a^f"EXXXXX<{; ҍҡ}}}1m4lVq1cƌifAJg?U) 8>+`3 U!*w]|G׾5khEERQR" ?u}݃!3[ju\rtnT9.zsgCw ;x[߁o},}W nrtm|_9|>}k8e/7\93v߮3z*sQxކ}{%tbЁ^TTחC뮻?0sL3CK|aTnXdyϺc`3 -cO"_~YawFCTDd/*X[]/ZqE/3j vA) — TJ ~xƹ!{?/?opÏO~լ뮺?oϞg>Y<~tt[_*%&mK5N8簂%Ԇtba/wyA8tM7?96pCϦnj H=#%ZA'>7 r-i*DD :V%վ "'wEe|n:&"aXnb<ʕ:vmqH='Fu`4PRA28I]$nɗSgeKѩUЦs1 VVidaaaa1u1DT}k'%ғEVj|;fmpG72~& ª WX\j|wM-HGF/$l#J*R+i 53 "1@wW j$.j:\$qi$'u=|tqᠳiʑʂkzArnS҃㻠XP$U/x "ʍo'?iIk_׭qd6Vb$wߍ[nv9X[AlmyeK.g?YQឞ1Af8*:]P:~_ܹsqᇛcMb݄t٣>jl %ek!?k]Kn,Y78묳 1TQ%ewm@g_ok:³6 k/3NdIilaaaaaTpWO\n֒[I]w/ N>ds^8I۵b-\u d~6!kqooI }Ymg% tDzY*|}nCrkPXX}8ꨣLőuVD K*ٺ\M_&.BY MQ^kYk7`JO ȭfKip]X`ÿ UMWd +,guKA"E0ElE,gS!r Oi?Dpo?z`b~I@FdU wqzc*WaվH O­kٔ(Yi,,,,,MHg*mmJ?x=n1ua$ %+Ux_*XXXXXXX<. 3"@SO5Ez>,$TTqdUVCk[Aӂ0ui}_m %]]v(,EYp!'t]+[XXXXXX<>VZ@fKˍHԆ%*C1IV hNVU%A g"w[Sf͚eK/[^}n6,$T zb-X |,,,,,,,}HOK+ȭ #ٳ\ԅI"j je2,*GAp-xnPUg.Qڰv5 ߼y0}k+mP[ m Kn'u>We(A__U j9ypA\Yp-.,DhUї-A\ "f[A[+ KnWVaJF*Aے۩ Kn'h[tOX!Dj [ kaaaaaaaa1e`ɭŔ%SZPwbW=WXD.cL$&gUwPܷ_ڄ7;ȟb%&֤'{U~+cuc]M5n;YW7 ܷ~)N8 ֖4,2b[ )Kn-MhuLpNǓڀ*T.*x*(O*diO+k& דȚLuZXǰbDZܤe5~45&.5[ciڌjdM6ʺad5~=opQ,,,]XrknBK(Ldy )D>h'gO 3)D2ѽO-k&pя;qVU% &oڀB2Q<:4yY5O%B,,,]XrkBJt5DbL*扬:g& ȏ'y̟K(e"yn @~OǓɓfN8(O%C~[XXXrk™oukѧG4?iq֜V5'W>k7ٴwM6S,~T*O.ڍ!}duEy-R,,, ,XG[&#ZXF&gU;V'NgJ&JDHo%:KΓǂ]C0vUAqM|&%Cǂp-x</8,[Wog%!|xx~ww?3.C=om$I[0G|{He0v\%_ 3NaY&.qxIT^q+pE*H^rU/ >˓k^Aӧ]aF)CG:I)%4G{Jٕw&YlT"M4J9e*JLQF2R0R Q 1E7tFr,#(L]T]c9S_J[bЉī2N$awxN3qbZ)wUo!QL~I+$ WDWvF&^|L^˲,%F-|2ֱHd"<(+jҁitPZx@7/6ܻD`ir/l"y`{銆Ji53,3\Q*yZQߐG&.9_ 5<6@9Oc#!Xn 7> W=yGy噼gM4,Ò[Kn-&%DnI ۄZqZHX߾?^Fr;Jئu %X(KЭ Ů/߹_4s)r sCnE" %[oio-oc%[ lWo}!ۼǨӚ!OLk^yŶ7X7H[wpEècI'Ɏ$=y Yq9<,X:^A3"X5/&eX5Y'GW};ٕ|/^+$:s }ww7"a2-]"lĴ!ɜwyܖGpxk,~|%rr+&+vCpU> ?VN6"+ݏm+(r,GV0lԂfVteKpA(GyFi9eUVj=MLǺW)s2_.-1>Q4*z)d/-||F$m&aɭ%gR!U+_kGJdS믽wxon;FtR 'l;*UAeIr+k/n1F' wtڟ_ i]@nBm>Wm#8-p+6E7o=|%=- ӌqv@> +cyxoBP%aִ*$QB%` B}Z{q.EMxHrKo~, /S\ƧU/gc C+Ӱtϫy)J"Kƫg"W-/ Lfv7H^̗(dê@Pc$>1nOʝcIʡTϘon0{ }h Z`7t̊{N)~2kd>C 9 AnYF u0JP$è{8Ƿw.F; WE`’:h@d4P6tA׊S4iL(e~%dƪcؔ뚉FHܬsO9;qF&S-3XrRfɭŪـmPV ܖ2㸯 Knt#|/ 6L_oғճbT&EʼV(] ǪÆ!tTI*NmuT,|[$;H+ȭz ]'L2opg!yKqycAvz!aSƩH|ҿ}EvP$ガۊ[#8SB>jȭ<3-?%w ]z?۱`ޫlȆO>bfaz[FdU[ $m %q=$L#o>hiS3s)k`5 O-Lt{mW7u5.owd8ߘ/Lu c란 `~[v#A- H R ?/#mj&;]ezhluK?5l6l:gCLLTp;)膼H׋s+d1mc#LFtn7QS.ÈP}~-XSbyKIrGfVYkRCud^5Zk>skDLɈܒpuDHM4IyDˌ"C* &kWPVFw̠e4zNR 1Lm8i~s8Pf1R[&i° @Jyf6m?C=t0I04H!2YxZb߇ 31]uF[c1.O)r!L3SbjJCLDYyElԏ92b# Ø8+t>c8=AR3 G^Iǘ /OIIp0?Yb߉[F,8Lai'><1,,C4 rѡѓ:5T<ÆW|DL(maQz (|MD|i7԰YQ )aMJ1&nUD}}NW0ƋϫH:11J >0 To`T?nЏ3?KFnb(ax^ZgRߪE_5Vai &}) CO'CBr=s]TCjO$GN[R>z8u}sMSeI|ڣ8eNJeOUO$?s<#+c" Edͭ2_ 1k ?)#scLJP.CWq¿ژJ6q{x@*ʝ:{J7S F5ַUW_PC3F/ /ʪ h{U\yWҤW6Yƞ0ct Y`ȋ`x/#*&܌E)A_A;f+2"c|ꕍJ{n LͭIuMyHs8w'iUMC zy|[QF(İOBHկ?ӧx$HAd/Yȸ'?S]9_,.j[`n <+rMӃ]|F)r`z]Ee IP"I~~KV'j:[ѧx6#(=iLɡ-*ycVٕ61 S'siGi^{S)+ˊrBV>E/MEC-\w<\χ`8Fij 3M^^LnJ's/w)?Zz­3Kn-hn|~XH"#+ c~BEN tϭ[}ꊱʲۇK|8.6L m/Agngz?݋}" MTzi%BC3&kAʚaSQxoH eS[ьǫMrWjʵ'f}ϗu!LKR}o5U!0Kba>2޺Ɉ~jM_ZB+jcn3 g.| =r|[0v0<3t-$$/Y| 9a#DqhQ"0e|j_ק_c5c& ,bˁ7VCL4Kbް'YR\ fI]Rn"]uw#ZhA7ߘwS)iBde=v -hϯ{νx7^p@t9JbX2ެM¦cY8بbLc~<TNZu*M6T4O:PsCSM~j9=Q]ľ,Ė7~7!̚&Tf똓a*g )B *Z zf2i,M_3ݻ6l[-;|pބHl_ iD1٥-N)T9a27X*ڊ;EL麡jp9sQ:`Dn?D=LTgzNc#/wV fp}{xgoA;" 'aY w$a8ĝDV4(Z>pPTLA )]yh!Ⱦ[5^CB1E2-Bf,c騜)V$ND^r42 qנ]6G~b|W6?sɝзoFx8^4ug8yC%͘Z)S|0ChP U3 GN#PKLbѾ*7˿St72O#-UUSWmuPy4}I4}HYE)4 ɯVf:kfV1u⾸uUl4\-x&x4 pn٠ӝ@ P}.[PY{DBʣVdo7d#MX&R[g-,,T,,63B }UG ~/8 ,IvF<,DXլ0mxk\<2|m I&IR\'L8o}2 j 2|qyG#|ܟkC\uO!W`F@R&ɝI?Bucc.ZxO.}zfBP!δT,9)[aα%_ޜ,{FF)|~Fn:Jϕ%0:6@+A+26jhPʦ[ѪveyW>7&=ב.cɔV ԭC9V4,,,[Xrk@}cI0Sx}R3+ nk@/r3y_4~6t>{rĥIYF4łF(@Ttm,i[)>=D&_>'6 E`N-Mt1zZ#n5DN 9hV7FR-22tƏDvb(PR#G_VNJ=5A(y:K/) cDz؃.$%i0]fIi! ZZ!Eo`fi<,F 9!d,4P"=pތv4ov Y4SY'qCx8}qwDPOdMMuZ$9]Ohy8c*o"QdYmFLD6f;1l?ް=N;v/t<0Ɨsnyqѧy2ѭYi;2|{7?>g_|nttCyh[RK ?qNtù_=1@T Pl%Ab6yNֱJna1aɭzuGȨi2V0mIg|i`U7o=>W=b|㤝qK6E?.{r~Q dh]^4-%dmF\ClAiP?Rt ݁O}{ [ř'B|ivK{N|JΡ 0B&DngHu cyn,EX!*%$r188 +qmKMb+|[#öO o}ٶ<8 ݉% !l |UhɎ i0ƥӔ]- oc,t| ~r<ooḗ7N|6س?q7O~o}Y']7ctzMu64-訙oHXX b=1*6#d>IՁV aڅG:/‘Fn/AbwU 70^b-zN~q=o\V/RICKoWnŨ_xy]7 ARY23%!I]h3f QaFBVG1J"?صnnl3exw޻_pၸz@Diy[g&3e: (ju+ yl# #T*p`tOɥ787?=K:{E &馷h}3 0BȄaBH,^DgȎY13Ag4 *KP F/Daat\c|6v-wݝ{`6#b]hhᆴNHHjޕ̬K4dͿژnaÒ[)&\7DA9*Ԩ,JTl! =4mL= C.ֿIևo!jhe$$g `bV [ eځ>WӁ͙1pw$ 2z32j)ȧZ^Su Ӷ>/%cCRmKpgta< :IktOPS/2ҩ>dmBf`cAlN2!R3C +QZ2] p3@l0+᲻XR>JQ~s-a5M(J-[2Zy*<?vjM,4KK53ݒA{hՠV4ؐ`-mT =F*~{pl B3D(s3Doduה[͡M13&LVͻgL  5 jT,,R 53IHZ$J7 p|26H|Kn>40Hf=*J{B}&}8Net&()Xy mN$MVh5$HYUCfFPFݭq%iLȬǴЧa@,M/Q i!R<\4ωcx2i61[7!A G9LSsp^X U^`uQD0S{f$_A/j[OaÄץ"2Opީ-@NOQX?)LZߣ gNB5e)fCU]D"46}ixAbcMGs$(GMǏ<MqU8$OCpqgaaoxZXLYH-\92pA9:CD&߅PN棣Ob׽ND~nYa51:jUpTs:$ +ID!MH,~F/8f<6nV'5YY/!SߐB雩ԀXנ033 Kr:H"QrKf22ec!ཕ$C9H2GU$lRf0+˔+J'FԈi3]j7RR6 @FLyNK5ecI2=pٸ滢ED"yub[Fp@hMY!F$hZ1+ DċJAT *H(u/0V&| /λ%/ eVe9[uc!by0@r'n0)m&!I0'e1޼ :.0|6('F `#$#Z4 ZTENBńO0SYnI5[K-6Zt!M2i LSMgN}w5A)<ƜyF>m\`Wy5 x'6%[zH5!yU\%MGn!>˷s%8q]:*TE͆G-TM-,8bnqtvK_ykJ֩5,#YuW#4f;mfƴ l@ԽNk7"7FHnD,A?TֳwQ*"c+s-,, Zb=QR҆;H6Y}eeyRyQo ݸdZG^K#F&0Jr&-kܱݯv9fRDpjzX:i@ ,TZ>NeTA.')N}ԑ[wbf]Y%' D@k5yiVT2c e6>e{m&Xr+ƑD(5y E%P! F嬉dIGp |/M*=r8 2d$U3!O«U{y¾x&}][-EU 1ό͋rd`{PC&\U:qf#|s~ZXN-7}9*,Dob8tQ/^"YYj#Ш3U Gh#sx[cA%EEsf*=wųsh]@~C$"iRƆőmsNy^F$u3LQ KP a-|c7\lgb&>ݫo˟'=*E#wNO}`uY)>!8]h3KsKn-h8T&"% |FY\%mtQj`N7^ft$8`HW>!L[BJ!R&b) N3 `=-`Dq hpG-j8=Ϝn7`+bJjsƨ#T0aAr' XFb7֑$ONl XfHúFn^3r'%Og4WvxލBlUpk^{0L':QXV=*[ΡUHhf[6<3%Ԇx(mȰym\C{gŨUhl۳ =(N:E8Eb3&XG@>k'yLsBI],X`ɭZTZYWQD}evMZOŕ:$YZK^!@n$Xy_T|NByBmJ URҠCw1:2fUʘY1RƦHR[ᠩ*bn#c!VEo X4L|!*FRR 3%[KGx4ilѭ\!@t5;(CE3-qa1z&eCYZ]TI:HHd;HvK|vOy.JA>2x}TLj)< *.OW*N4B;дZ1S73vIڐ=̓:E\$Le9/*yzu!g͌eS5Nr\bJP博+OB3 ʬbze,f^^?I2#/=3eyUW}3-fcFsN  |өńMaURY-ūs,TlŴ2)|nlPKjcnϱxieク 5\L?1In[ Љn5 ˫$?jj6#W-̲4ol[XX^lGJ^$"|n4fObCѬKU&JID #LRi8zg%:>0it҅ LkPY|/ʂ8(J Yy0'gVgy2{DpiRjYW!Ct0ytD1+lLe)J+JoDJ"Uc]J'3#= e:uE01_L6/%FK$LIm4ݩvaU:RtRR)C&==!xQY&q6ظ Y'[l#Y /|(RƟDؠ5ɗRfY0~L4[\e4*wcoCe^וZMnbӢ`9Q|GGF֑{3 5Q FQ ԑrEex2"mQv_ZΠZ@E4uQDiJ8#Id#$mI_N7DQ.#!F@%EeHIsxE~Zl蓻riٺ 8gD3Ekj0T}Q`^2٘adb8[(r 0-0M E0m}0iS te06IG/#,K,|nJIx-FYsIT2݇6ҠNZH2Cl=FUKDhY/7i>Gb|}gD RQŠ+rxvל&0eeZ*nˢBe-6 ^;`9 M(J#m~^m*'^Y*=KO>)*k200jG#5RT{FEϫaOW*{1G~O͋0Rۘ$%,TRDgt}hc/"bj?Q4*4yIOE  S\՗Xa2)7F2rŖzKOcF%AW: Wʨ=o|ݾ䷁8Hy?{?~aϽ %3N0#D8>WOWM,O*o+njϜRr%(M2gUz+JZDi\iƍ«3ykw?cz)<G敐HӘ]9t/~ʧqt\12FnE5e2P#|.h5BGuA&ڣq g&\xG a~Bd|ޚu\VU_cY]W>ؤIt S_i4'Y' 8c : boNiFxh ԉKw$IqZrk1,]P!̕XQRj@T}@& d~㬟t PhuD&ʵN9ޣkYzE$"!>3#+2=|1~F?_Af YɏkR25E`Th ױqe;rwrto![(ݧ""svur:O=q mUѮNCwf}6FZFQCBFO#?Dl0H+tKDnZrWIJ<qS^x?Z tiKG:~Hp0h,Ōd>.諰 nGٿ-;saY`#J$q3]c..F!Rn 1Qm䉡N__YV3ͳd]{SOݼ +-t,kb<٠dއtS"9kc,'Vd19&KnUrDn{pGQuBBG5Dz&"H^EAE)"t "O-=_y}wݷɗ5 6'om;3;3"*KṠ3]lp/B=Abjj0}QNܲwnwc3,C@)J̾mɞ%AI 輩OfƇ9Sj5usa@ҡ,j0nt|g^sx-#sÑۏ*=IĊ6Kk!B N\k}oM*r)«7Yyy4RĊOXc0ҬxI5n1avJ讔Ho| fMޏTC:4xlש*E$"QT!c-L4Y`䏪4ơ-:cbt𺘨'M |ԶN-^y70~Øv[#.Y&ì#;QIcRܓdf (7,DS}8x$%i(O'F0_ih˓.KȯMV (Bl eM QL|Y-=+%X殧ě=;M汖-גfň+ʭ8o!߉f `]Cһeƈw CrNG3`tg(=F^+R(_+ :"I_?CVNYoUegk/ oq]/禡\.![Ȣ̴K2y1+ꊧAc;m?n= J@iPށ:́ʊebYy:bBԭao^[$2諁湮2}!)l?~k,'hkkwvXȭ##?Tn40JsԴP)۠dZ,b*APdhV!)XJIe.&;NN)fӪ1qj4B+5A#-P!Wa~lvdT% R$WdCT̈TeNiD)9c:*ï0kyXijB0~jּ1{oNHOrM|luyiS{͂"[bNR6ae[WN!{6ʀ'pcƲij$:F$r5VX }qy %*wcqAǴ]yE`-¨6 +QlƚeGrq‘[GnGn?~J )*LMP0gujL9ͥE'RvR""UN.DM*_T9<^/N~CPJ,ZuHj6PJw)K) YC= S*,ay_,HK?EZM#|_lb gCiϱq\^;I8 KB跙Z34ha? E%T6CW0?8dl )喤"iu dOĭJ#x6_&,[Ո(ІzZD@UdݖU?Y%fud L\]AѼ3c.}>Lɟ頤47[GyyyE:AJXdeV֛%JNERWd~I=+~;?P ,3Q$ֺI9䯚 h;Y Lԙtj Es\˂+![wm֌Vyo/ޗ#*z#-%K(MdXY+&j OQAT DB(MloƛʹTšD^,|\1|,ƪ~AYEXǢ5(\2z$IJP ZbI #)_MK%H5s|eLrW[@r$L$- >^apHD^D<#4Xt'ݍ.0ZV[YD2Ht2J3Ӕ;ٰ\I3DjrHYFʄ䊴e*K$+-QyubYoV[B~ =R=0VueV^>j9_F,1OiིHl_pXfvP٠50~ un.e^&DN: 1֢w=sp:.~uD:PC5aiRcǡȶE_ؾ4&3@-B6,T`"T=HXR:F"UT2=a$:iU4HTx_A pU4B^4˅z qpX0 F y0,˭zI⩢tmf$ T* I6D{yFR32,@U!#AȐgmIIdA&&w@>z|,QJ1購zBL3]9y:#u? >9VGb833^"}YY ߅ /V HW>"""$ƭS 5[pԬy bg'5RX:aZHMf!0NPbcLˏһsIqa,k=^. D5kDՠ7YmUTm3,e[ӧY }YmeL2׽U'd|Td՗x^vSʴ#ϠrV%_^26Wuf\5Wu hFsˬ'(5&2|gU0+!qgd.mS>>zqpX!{ 04HKiHOBR 2$yR"j;] }X@@$ >?МQ)x$ZֶYdVEq1M`f/2-$$੔f ` "N{({)#nEjdZ"$=eo^zOWsZrjTDߔ@4$^Gs(z) ;b|ӔYY2bJ2RFуb4PE/HوF?e +&L37$;O K@٤,1)}ʇP_80oti Yԛ(t>4H"o>n;V+S~<+b.Y^&jej6wyt5H?4TcD-j?kРx9b&ۀk*Wݐou[7 n-aჾ$Ƴ"(P3j4>U> \;ϙ%RᕣVƴ7 (GǠxf*ہfZ2@>ͽ ϲ!bH$2ϕgg/6Y4 |ܵFlj$nEbdA2~rmmfG^÷ox2_$Ju1lGDUX7R̒ ENydMl6ʴ2#e"ˡ_n!a{mMҚLR d9-P,TL&es8Ө[Į:':adw*Lv۟_s?DԋO6<6CBD85_07qgBx%HV1EvsˎD?'tv.ƌc+te ޡSvXxfK`\fKpv>‡WzܔHEAX3в/7ϼ-k KxDf~6yrhl#"УF"iM^%GyV]sf6;L3"c$ 6[m5{t V|D"9ZhBL[rN6+` B_2xb Gc-c෣&a CXC1jD'ɯs)7]s|WXea3 v=vrM:ƚVyc@dK$ި>HdpߞŒFTe-^"d$"*MYhk_E#>WYRE$2I63 ۭ, +r`MQfCPe)sY،1U<)5\x3 嘗Cf3jXah;"k 'Fb4:xp֑[yۅFnTGcrO"jiTyNuׄqE^o8*6G$"|MW. H2R|4 X)j?%{{k$|ق\4ڟ:q52R40P30(]'dq;aFDYkh2*2%7܃ߘ fL9/gD<ҍ~[>ѤlI>Dl)ifa_3~՚,$,$ncw>͆S+Ei^sQ]O+n7S ʠ@srEaCSYh&e))] tF|>;fftBp>b!,NZ[N˳+{۶"r#HmD_[_S^~w*~uՍx䡜R2iA@m9(#uQȈ&_{Am~Rr+ ac9`e-v@db=w46[s]ȭ## On #OZSsW^2)]ޣi3݄͆dlNUa*M1 *y균BO:8DOB&.'k[=B̐v-M$7- {DRZKnrX Cy˨>+<eоj𗞠{$#ݣx`m SoI;G(d}YzWj3+$$AL r \7HߣλK;`聉;byͷW"RdxFnm; aw-hztiMnNSm_%IDAT&'ώ4ʸT#Y,hR3s[ ]2iLT;dҡ-7ߺNr>ۜ" "疰[GnGn>Jk>XA%$ Lʿ\Q6~RѧnBi !*(^eK_k0(Q}4)R>d4e_J:" =ULU>"qv6ⓤl^S)4MͰ_E4iNVҲ" }TR%/ Y֯WQ*RlUSb܊ eA˭rU)Ig &SLVKS,_-6+Yϻ5HL.4d/?g4/Hb{d9\#Jޤ0w`UGYF%6ǨD"3Yl^<4[]S)LHu+ʮUTV Tʷť/rMbg졽9w֠ZBΡ} ZGn,:,k-M*u?Rixa0uYۨT6SAR3nd+@1 4m FDk"YPftקx/;IQMG 6>7"ewZ˙2N6(YM=2R ,Q5ʩ.I2CQjiLtNo2)#8hR_UߤBd S"ZK$i \:)7BEзIcr͵!-k(z }`J^Ip5fMIDJ_%P4sH..M>K>4;]|:3abM8J[Rm[ryɲOGyXꝸv7n>2Ҭz,HY~+Tʨ =yojԾ,+v-I4bAZ 'GYd,PIYsW'%QjU*5*)`1&` 2ƓRye#'EE"Wn4益פ75d9T6HҴVJkp`|)f%?\FI!J7GYp(jyb$"jP)naܸ/+yKr%Ȱ-}gU)j=@C<$XXLwH/=}%ȲW21+PJ+5a>f;9+Q˴B.Z9vN:iMkgzhY-6> q!g94HQe6 BZ,S+jfuZf}UC\nX) m/W`NjLqgeTn#M`MeeQfW;88,X8rH"QmdұH.$ͦQ1DdFkj.\*:y}OI>m/j}0P//k/c9F3hY*jոj))x }Հl3&$^"laB73&r%bkCk" Ӭ,+n#1QMnM-?3oVmfFrL$`II$jfd=y# )Ζj<:k[6~1R/BwXM-ge`-0si#+uFupX(Nval]U6e[o7Ŵ)4|HjyP85|L[$8lPY)L&Ik *7i=wv"2 bMeJ2͓utvua0rr6JPU^АW&A+ FL}9L<Ɏ VMj2ZQ`Z^TSZ4)$@N_ mAO"+\ʝQAGaC'?]!Jd(z6=<b_?5bD5ӄ6+8ū*$mi ;P˿gqeR+9܍1 T*(.S5[ as| (B0ԐUي裏wq3.Y. .ؔyꆡfZ]W.p嗪ܳ-5"5˂ ^I)Y;:0h`'Ys|L8[6*n@pvсޕ}7qyᰯ~ }$ꤩ(5Tb-G-,o]Hjԋ^F's%FJ{)>}$ҿ6\cf[aܸȷwA[[;0XeUP*WlАdI$LuV {lHVAD@bo#! %qh XZ(v-UInk[ޖ )l>^㟢{$(i?8s=(b;@5d?RHQvU=Y%@Y/QHjm]IZȵIڈD8DW.MU9; kvaLyw(@VĤ7 l6XyJ$F`m&#Uj:M-B+Z'i6CׇJ[sξg䰺BAo{챇{)P]о5?7"ZZ~!~Ҭ?$ eWDw]4͈{I Vg!F|$oNƗΟ-já|e_S(WfCU̞~tw !eΐةO~*,{YR"Y9u*C' "C}Z>mY>S/T)N8D9o.+KH"ꫯK/->;DMhat\eu8%Z}W6]1n2JF=|1a #|Ys$ڪjN\Vcc72dv}VJ;rd% Kr+?W^9&2̐x͘>nSN!-h US`|$rwP%y칡B^ֶ* ^}U#ZڕFd? 3؋8}SҼLV Qntڪ' bĎw$`znƫ,{)^DX,VVyYnU?OT.vW^yoYQ5θ:eՉڵ IYT^'1XLt"UIn=χK5駟6r]UOn;ȭbrl))r)~.e ۛ^{wE64$C"f[ ɴFthTa{W7 m*t.*/⸣&o)S 6I8dF$v˚'ى*H6=\z'P\q&癕_-kO[o-~=&l+[P;`O)էr([<1-Y7R_{}~/Ldi4Ddeԕr̐Qi/ME%,yón :[cFo =w&`Od9iyCdaQ\l^*XXIz{9>//#QmP* x3q}/۩Ӭ]]H#d(*b +ic=SN&.gipgg>\{~I޾'z ><_/`wQW_Eb#n682;yLE|zŶxԢȶv " 5H,7Sy*cOP$m%p1|pđG5v.n&˸J":{i<}?1zYPYEd55)E܊ԪƝ/"%ʲEAy*#|( C2 /n#uX -ŝ(x$B{ώ}qI>3>ؓ8N;l?,HtӍ8gLDK3"pj{3p Y-i,o-`a!7?i|)?WI"S,z"T1rѿY`Q,W=O£=ai'N^{ w_z/[oA:I״Y2F6p}um3f%'|=|:F5Z]~HEEJ#KpIO& ,\~|ZFhڀ]3|z-0bQ}ijDXE4% 5($ 7hTT$H"DV>Wo47r$5SUSm8kvM".M[}o*͏wذaucDO1eXVcXGSE :l}_ pff}r<<_4 slΒ|8ob/썫\r|'3|⊄ KN[[;:;1g wUX ~?> fs)*i^.`l5$ Pp[:,"ޱe$aFNK aG&6 AC N-]_㕗_SF3d\"P3ۻ8щ^3"%b)Ive )M^ϝƵ71;S$Wj{=kL8s0sFozfΜmi\,x+%?"NFJ2 (8M[ǩ}/ $uʬ^0sfnOxYn+/: r K-PKuNy4>ˮ]$7xu(6;%+rkpŕWCw \c1{A# * ^y?p!V#Ug0r&(;Z`WFWg;V]eMXqq//#!#Zj0~p&l( Z!!хeVXo! rO4bHZt^@򋔇lB;67ެn4v}*. y{cttO=!;~0ld-IvWGR6Obڇog VgiƦST.jׯ&-9h06lS >KW]D1t8_oC,̲8#qbxw7N8[m@{6)gaK[movzK L>'x4=Ql\)w \\ڮ+?r:R'B ]7~sXnkbyž{}O=$ypq1n-ki˧rȲ-Xo&ѦXc5Ŧj`d|ZuBSeGհɎ{}&L$#Mi+Z!XYxtm>3S_5j;>n_[2:CNB>ǃcV<sX|qi6Fɓ'ǽu RFV_}uL8,3v| T"fT'apg'r^'J3IڳɫAw79t;֎Z}C8 Mc$wc9zMx.B"5a`ӡ\KN j(ԁŒ혙HNsu!IbhWSq2 Լ[zo~[f_ K~<5lm5+Ev~Vs`4]/XRp󺈈7<`#CNZpmYf6Vf ]hC K@{$AA~pIp|b cJr"l֠Kɫ=<r(9TLFiHuhMF¬lLd11 bQZ_~ڟ99f5C꼩LY ̒~]Oaw2^ ܮ:кi+"#9f;({isk(-rOqf[Vf M|eIN5;EʖxsYHUZ-W,Y"!x+hKJ"bzߋ"Ӫ~;nMRiS ~QVk_$cʱ&6y*Pi (5%Y0O*VS{Sj,*=ޫVd:m:|vU`)(_$ GK+xqLlg<,yJ1,c&u,$Ffy^ۘ zC3B%I≤8H2(q:t(M:0AGk}RzSVK=42Jh֒ا56T AsǣT XZK.2ְ/Rڷj+|{߳)$ǹBDu狸⋐(P)6^.:Kfn꘿cב!D>#8l*#^h \T"Ds94m*ȱ# fQ\7%x5D$ǟ##U6ET*q]{fv|QD2ǭO,Li>9V yiIZ8_s Hdof$C9WܒJ?|7Ξуw6j$hPr};Գ6|syfʩ4uTFZ`jroZ`}1Rd!!,qq`=l/1gV܂f2Xj̅&Di䊮jwSZi%ZѪ:ohz<P}O~b!YSg^N)oÑD|,J \Ԁ]i+|D"ۏ_5gR8k1|@ArؑB:ț25.);jPf&d@վDbB<[z \bo 7)PQ5AVd/&m; "P?/R^C5W\[1R/j۸ӝ@'eMҡtàAi$ M a_kcC`?/fN,bGڳL6JQJg-v tAAuP '<$<ɏK@{$cvyg<;Q^$ÂH/ʓ%1;("z= NeOYj?ifGϖKĢ`yTCt*Hʃ>wޒ0$}ӐTڀ#j\y|ҵ*!C<)*)ܵmAՈݧAG#ţO`Ia`<ɻKd\P[YtE_$oce3;S)ڤi%iyx4J+=Oϐ,<#:'A#@P/\é}{1yt/.;瞞VG-PI$ѵrުJi9ו j4XiP*Ux/;j|(*A:>6[oE_ĢMJ**a3 h{1hB+~8YgzIW'Z_t<'nm !& ׀_}rH,"[_gGbdB Q&v NY|2RM8Đ$~I 36<[r#K j\V^%W<7awB1 ?뮽q}s;J,<̓wWᕗ_‰'x n[Pކ /?0=h}xgC_:N;;x\o.b<83qW|7J5);|_7xSxg.LE^|y{ko }zsG.<<#ٲӖ+In+l?b!kI RO=tvvu׊accU䓗>=%=tzi$+UDEl5A2Tj0Lwdcq5l[anP0>:}B63eRv[<8 D<>A5e(.-Dl'y ˬʭ*F=.4*G3dHGe+6ڄd [_㾎 7Xj\pRLG>w =eH9`Šm]`DL4!"hMV]x,]v` 0|RpDr묽6 Ȓ|ʟuLRDrVd/A\<SMnfN. [mZT0 Mʵ_@ϓN_ .t  iNUeַ*=AU"g]x4tT"HRMQi ,l50mZȹgYR>FHĴjRe@%HO~m Tv;7=V[K"Lvie28؞i4~ k _:Y4 l٦K}Yud|Z,E}y4Y"M?|N8qCZ!wWdӘ6sΘ#i䮳|i!fG{Pءu4J=`^%i0e A_~^z0x6C\M1Wf-XkA8|w.~-y?/6ԥU nԹB2IC0Nnɷv`[+2Bki:dy½*@m}*X@\ IPEq;,*h ,#}5kD(`nH%ߎ͂2(Yô!H"?1jX 9%8BJY-uvŲCWuL"i>hTCqr 1e+mqY?gv Ip[l ~F(wcw\x5k 6H\+)b eZzW}-T+Ozg|;8D.V[m V\a4:5n 'pY8_ љ@44r$$w|+%|bWkc?sxGgyf͓+:k־ xOg'+ɚD-G?—% ]2Uw7|3Nj *z:ߪp "KF#} g|~~Ə_Wq衇R7>!<ۈt:VM 7Y!K٤ːj.Br-5j Z rWĉqya=4cvu1W^n ^z)VZi%x8soM5uݾiňtdHt{ZAsα4ÒGn?$TTϞVhL'KRknv[X:S+Ͼ@3r,]::SYsһJo 2\aI#0Z/^s_o֬O;B 988888|85E"xZA=#Fbb̘I:͠^p՜˭ ,XzQM2h9- Who7[f2Y $JfD|sɓGYLe͕V& 6lKCUe2:8[Ӓ*"]bmJyZ2)x֛Py];Nv"B& nCetjk%iVNmyr <1ﴠ:)ęk{Gg ܗ;Q護O?;~K ]68CAk"'1-ЛzkuRVeW _}MWa u._2BD\Vs5(_|Do>'u`ڂc| {'abBߘ^['j~ !dd TK@Ko`V%"MrzUgxyx'&Z]s}+Hy_e7f]BjJ@ I촙s㝑+ ?KE^?, lsGTJ~yfC8jF, m#b˯㵔Ȓ5Ie Wu`<`-x\Wr@7q$6&YBzՎ$XQvu R 9e5uO\%˸\?/b$rjz4HobZ J',\U͆YDg5 ?+hPHY (N;(Y1>3]UV.Ud\i>7}!|W:ѣ: 8rఄCp`0 =D988888888,6pa# uppppppppXlȭbGn8r[:88888888,6pa# uppppppppXlȭbGn8r[:88888888,6pa V:g?cs#tʂC<9_ s<<0+q..ٱun!9/0s+|V&򐊔Iҁ.Cil6G;#afeV2J#/CL3y>+7+D_#⯄ C}2bZ GFTBWWe=u!?gTqݡ_?p-R+u `}4 /;N#z ɰY+.uǝToMO<A>Oyey>%>\Tu&bpPS;,r &^s>r?ʫ-:6,eV{G9}8*)L|W/zK3:#doC1sńNEիbK/moBMfےӍ̢e澷~'qp_Vf ݶ ldk ( JSGbQ]V;/RȃozJ۳Dq\u霡QpPج,nVdX?[Ǔ`%A+.V\g4 ɓqM7a„ Ft YyZnz=SO<'xx2> ,r b4 δX]CX9 FӖOc\4x)(ZxHOFye]C,scsHl($ k @65)C={a8$Yn?8+rCEYnGp[ jYngcYre]54jN"hf4%_hqII9RBu@,Z"26j +Tsl}rV2 m=k^.+0kI#;]c6$ IfLG{{IWunIZ=٦W*D@hfQTs!I&/2!jIox ʓqˏX~"Ld\fc|VY[0&E :mmh+?}j]?^|J}(syTNèџ@&YY(BNVN׉K/?A߲*6Ȯe1n6=4y,e e?۱J+koi9?̾ Bi Mg|^,j =4~L ^r4,ܔ&%%)Tj}=3=[$ڳgsvҤ)?1v{0Y'<⫇N"_%yInCL4e fLNӋH6=̘>HeGóLǝghv#I^>_c;ѣW0 _[[=AH3ˤHNi+'hG!?=5IƸ!ifS`<::$ }x%#zN6,7aeޣ:N?z2m y?vM,& -KV@r-FԹ ʹȳ`|^Ŕ1-*VcJ5aKֹA b r+ǂHDI}39wucgQ.qmw{~~Ob},J m2/|w3M7|_җOg-]w]pxGqW+{.2xmƀ1c`VǏrLDR1tRhٹObZZ?) c,"uMx`vk*~_qe Ͻ_^9?F=ㄓN—4qw<N>$ZfY j4>#;Sf9c:nDrA/y?$y1MH:8888888<ɭԀM[Q]plbB>~$k)R#:ӏO܅>r<ӸQKooWqq'mH# e}̓K#`S}iDC3l@R8x`䰑Xș0y Ǧ[~~Lro <=:}b`1ƃ>'3gc 72n큯~P|xͅAwx>f>ZOYm6q/!bM/0"=&D |ШlXZg}5WWE| Gb+kp;N>&X{1ݩx'1)Az5L.1n0yL0b \+qg{9*.jx=}Xq㯏?F1?k~uե'͍A_D_"ԩD\}mwppppppX0o*hAszd&v~p׎OϻZ РAPcǮVZYYN9GfpYg2nw,jk;츓hܤIlU܊ܮsSS0Md2<$=3g`V>x?YH{iL2rZ=a{|dRSPF=^`.]S. ;qE ZXXiHܗ˜0<^}UvI)HkGܿߝsppppppX7MRqo]&-,O<_4|4UԦix4ʕ>2?O}S$|瞋]; ⥗&~ o*~_kB2em?oW'.c=]p!Xiez~OX/sÔIŞvذa6AX$c8p}{rF 1Y{3\~eI"jUœXߏu^ddW8t|n >8㉧$!'Q&:u9P$cIB6vmo+=cÍ6ĠAX`6W/3^O;Aq:d -$!Is'-^'{<nln!ƿeFTJfaq"KZ$Y[@s6)lM1f/m!1+ Ʈ7XpǞx㍰a73Ֆ[OշDd o +`ڴi$[cmƍ_M6ل/|qsOP()m@f_oWgypolV2x=4:ڋ3ig??' 7kVڮ. 29l~7g<ӛoݔGˮ__t=3|aCfa5VB湥CzbG÷} ?9?Ƚ"n@lHώUT&Ib;jaHXE!kZx GZG$1ÐJ{yM R&09KE DO1*ə&yoJΕ%9қ,>r׋h@&Ւ[SehNZ> ή)Z &TCZat>~.YĂTL mqׂV@P&5Z,'THS( AE|y HC/xx}|-ۚXv׷!)*_ 7:?t?|u ň7dE|?i z ɰɲNFa7Ʀ' Jur;u 'dmUknU_2_;`QrNx=,5IKʆa& "}v [4F&B2Joo@G _*&Xr]4龔M 3itAlaBiVϗr׳UGW{Zm׈ż򜲡fLfJ)z ᣅO?za jk}kUh9@s$$q%hLZ2"ޱ-r@iThj;OmBr@'ŀd0YӜBȜοoOr7^=ǂ,s7ۙ "XYd63t`V~sBlHf9z-̈́bhD榣"RG cMr_VH;cqj&xV0$)3EJ7O2$[4ޭF ~K9Xs# qa+ƨy!!rۤ@r{/ #!Ar@:[1B]~$*&c"i"k# jA>!"lgs$ tXH~㟌Jd3& F (9edG]ս" V,:͍ދiƈ#<2>`2k1uH7IXg$QJ$7eMѮ YDIKhx"eEq\y4wiDRbԲŁɽPŧtiV|TyZ+oSyN{)+bm!Tu.vhΓ*o`|$I+Ah!(.u(LNPg [/9}n;LDx>8Tgj x^F/-@uA|'Wc @l NFT1d(j@Y3cyםe6 4}qa\5lqlh;$8?݇L=Ռ(՗5O=6ƾW]"`=FC6mCD43j,xĮB sO}$\uҥŧqtuBiPs[D盺?R$0b`ƴ}oI)- #'~ҼcP:ɴgSiu]\d>x]E':V}:j麹dTvh3 X}9XB"aIʽ2)WHYG;`"Y{=v" =wKre dycx̟l:pG"WZԋ({<@ DnàvWo\v v|1(n!T]˥؁2AQ_!Ș'*b+˭VW}?InNiT~{{WpXH+Ç2?$Ħ"+q{m &FSG9FQ$]Ɍd"i>T&,[u-J5_(ru<6z},"WaL\5@K(T*LYdc]Ӹcjf B?hERϺ^'X7ckR NhJʸ# /f Ϡ jǸA(TqO/^ژ:~UXƅE 3rLVh4i :;'InԭթNH&`x^u2b+A*l;f|xݗᵨOצ9? OuG y 6R#Ĵ:q뛯|8<" b+d=YhÄSA׫$Ft v׬MHEZg_Yo -̢53E2iwkRwᣁQW3njtoP*K%XHei@%;Ȱ;wSj,Aջㆳ{+o z۲diHy3zZaXCm(D nvd\|q;`=>Qb~pΕʎ|GW3O!yIv} si"'f, tͨARAG*!;-XŏH|S6dQBVվ0~xsQIn\D~6<u٨<ԨHmـD$d 1hVw=^~ 3J eF[*^͖qx2a, qG)fkh+_ u7!bj@g' j7Jzp8"*qͰnB5kB q=z>bV"dFӬU,ibzFezQpaVl&YFo P |[YDKqߚwPSokGǨ}CL2%MCc!AMpXf\۶F ,zUٶ6d}A fNAhSX֣~{}pCq# ;9vGǔNtW55l6Mz+n_,spC_@aTrFlu3#{eqJo jˌY ]ˌFNQДVRT1ma_ +S(XL1I`"B;2hW<]AN22`6C.nWupX4B6REoy><fH]QÑ[`_eY<~vm|9T*@kj@ٸ#&3}ih ߲LGKE![ %(S؇!i /&`/ "Mց^ $[ʟ^;88,ADMINIBM9292 2017:06:14 14:43:022017:06:14 14:43:02ADMINIBM http://ns.adobe.com/xap/1.0/ 2017-06-14T14:43:02.921ADMINIBM C   '!%"."%()+,+ /3/*2'*+*C  ***************************************************M" }!1AQa"q2#BR$3br %&'()*456789:CDEFGHIJSTUVWXYZcdefghijstuvwxyz w!1AQaq"2B #3Rbr $4%&'()*56789:CDEFGHIJSTUVWXYZcdefghijstuvwxyz ?F(((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((G T&Hd"ew`Ü`8s <)/k_|yL7Ǯ̽@ rxa@Ey)?yQ o?4W›@a@Ey)?yQ o?4W›@a@Ey)?yQ o?4W›@a@Ey)?yQ o?4W›@a@Ey)?yQ o?4W›@a@Ey)?yQ o?4W›@a@Ey)?yQ o?4W›@a@Ey)?yQ o?4W›@a@Ey)?yQ o?4W›@a@@ {ž wQ&pdn@k> \׺i6F&q́8V}yEy)?yQ o?4W›@a@Ey)?yQ o?4W›@a@Ey)?yQ o?4W›@a@Ey)?yQ o?4W›@a@Ey)?yQ o?4W›@a@Ey)?yQ o?4W›@a@Ey)?yQ o?4W›@a@Ey)?yQ o?4W›@a@Ey)?yQ o?4W|(t>&Y]FI?'`*@&]o\nihbxܧ z ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( (w<(ZűjSDM>[ *{AO⪎ ZrϦۻ+9f )8gQ?AmWKQcS??!ٞExOAO1tUdv'[UƧTkj:_*2vgQ^+e~CM('S^o[-.[[dF+(J\)+QKNѝEQ\PQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEcx__r^Fg=ϰzɨe%vZu7DIݤ꜖o (ƀyR]^LHr?Sp*:iKzDoZ KJn|E(7S"VPG׫y~4_ zmR}#=2k_ᦻV"麃\*rE&nX{(~_P^z_--+@OWQO|ռK7S"ҿJ(0o#!Mon;+|?tvc[}M>OA55gNnӥ^ݐuqG5L6Ҟ>WևQT4M^sGm p~׎NM]QHaEPEPEPEPEPEPEPEPEPEPEPEr&񽦅+Z[/F zn??N\wKG`Hc]TjSJzEx&&ԿG&&Կ[gK=h^5o GQ.VS#5៲ַ =hq׷|P۶*xS+>3+) Ao?b^KXgᛃaU6j?wA] ?xR@c$~UB.rQ]I"ɵZ 7248}y+YyK{o$v6ʈddV?%݉fv9f''8Rj[íT"q<u&N2-/7\(jZ(-/7\(jZ(-/7\(jZ(2eЂ?|0)ʲgiu`ŸQK:q"}dScq$aN#~\U?r7`=2++~\xj>F!EW)QEQEQEQEQEQEQEQEQEQEm\~@ "9>?h =Rk][~'|Pvw5v(>8>um'ňƝ ,;<ŝI>T_e[^LJ--_/Q%%}xE| 5 ho<"dCBZ*/!01CrQhMG^R62T fm0y1H#Ev_ I/v6Iת Y}ntSaEW!QEQEQEQEQEQEQEQEW'AJZ|kw| pn#d~$q\犵-6q&)`Wqtn͟cKQa~GMrcyfy$swcNy?/or?exժ{Y2Z*/'4}?/ou1ЖfOK[h4%2z_Gٓ5 h̟ޗdƍCBZ*K"v\,MA=ES2v`5D^'m5-$ /訫'o**ncl፤| ?ƗxЖ_62Hnts,YRH%AgCN륯<9ke|v.a!Y(P3'sEpz/ gY.SPs<Ng*GkJȹxE-29 wtWh_<;kzNY7>]E}s);NH ;?SĽoDӼScღ[E.e}iW%73B9@Ey~)@ςk3}oRnI dbm`1yö^"}6M?Z{HƜYfgSݜ##J+'?k_ҥ5QӬ,[#s>ARBkjxF@꤀q9E\((((((((((((((()f 2I=(k~'_F|Qóa pRxzĖ\m2U'_@:TUWJU$y8Td8]}{}u<o=kbCB/xG_ةhPЋv/Q*Z(4"01>*ZB=Da1)PКlr 0S(8ijZ+R@_~k?^^s?5oF_C0(:(((((((((m$ 31w&OVk#GA.qHHя'rjEYڶ&eϠB\6J</q-1.no/&Xi2;y?MKEz6VG?M?_5-EyEOF?MKEEyEOF?MKEEyEI:CTP++d ЊZA?)?֧e&Aopz~5EsBt:8^ȫի~'xK"Vv{/ࠢ+ ( ( ( ( ( ( ( ~eD0gy'ϊ}"HGk|<J17 NHy$ffbYKԓED;swU~~䤮/Puʆn{BGϨ ?2椈yC䴗=ؔ$;O:OO*;gMKғz$lESX˫jr+>VFm!U:OO*:OO*o;>cm!QcΓ}qQh&/J-خY7#TGU#~yY?ƅcS,-5ȍYvG2R2_b3 -~cg{Jv~s 0XNחb(IR1NS? <^QG#dM7kRM7kR-~EE]մws[MѴo0q _QWm_54O>h6xy!xj]iM֎QX[b'<]S½/Hͽċ$Z\ʏ"+eU$uf+ky'"%/$0UE$z;וMچjt;]葹ʓ.J8Px9 <]]'Zޯak t%]˷kʜeL9x]f|cዋ}>ui/.'dS =ޕ2 E-y ǁDOˆby`mL @X7|܃sP|D=GZo["y 23|O|݇@-?xzMx^ėwli1^@$(gO^c [M_CUlϮ[-toojYxwCY VvqfuB(@c.~6=σ;exxaRck ]rs=;fx㏈zM4m%]EX\/{w{&Ch:Ȑ_[q` 3jdxRY/,>!n#0pX$a vz((((((((((((((((Weck#G]=q?_RtV)1ڥ&iE5"."(dzI+f8\#J߫+ bʄ.QI8h_5_v`f_E's<˹q_cbI8h_4oee>Ryw?/<?Mo%Aϯ?-._4GR:]8|;͉ʱX{J&F%E5Ds-z(Y?n?V[q]-+g9kѫ~\}Q\PQEQEQEQEQEQEQEQEQE Hr+ܱt!oQW~T(taBkE"QZQJSK:v*__2ϴ[od}X?Oƌ>~k5k{voc2@Uˠb3c}+F- y毜KvqA繕H';+2,ZNݎC2ϴ}k- ea 9KђUPvyrKgw-6vH q}:<1W:y1,-vZ9 >~k4f__tW7ɏk3/I݃nV.?q.U4[ؾFIo~ne=`mؽLz*8*@T?Bh2ׄ2*j򟆿8^ȫի ((;B(((((((~h^^sS?4oq⮼t9qY|R.͒vE\0U?Z_)UgT x\T Dǥc K_p$~o ԵԵˑ7~G\KQ7~G\K@Eov-~EE]|'>Ľ>Ua&7{ 8I*=I4O>{w[m-Ε,T6:n rJ%<)tpMcvpq92~!٨i/͢[i1M[].@rg$ `B;h ˫ۻky呒k@P.Xa#5oqǮ6:DKF0BzTe̖r\֓=h@m1/tJsPo?M ^*xWTD-(7r 9<ך >245Ť66$zHoF-EޣZ[wH19miW=(nWJ(XdmuffK 6׷-팳60 _6K-6o< Ƙ0P} HSKҬtªZMn@B01ێ*;O hV p:.l.X'"yTlB(bO>]x~?ɢƳO}DW) .03_-ޭoCiso@fUrQ⺖gJM-76d61LcxM 5oo V$0B#5 `(%Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@q?_Rm\O/?Tц4}L1'gA5r|-V渗䏩 ( MmN}9kt8Bq_=NQn> b]egn 9?m6Mo/1}I޺%^g ?g_+xiE;AK^|x)fݾlMc*g2]ޏxWsrףW ĭ6P:WXЮv}|]g,ï?xx)Vm.4g^% [M(-c:mKĿ$Z'G5 .?M{m_~4^ZlG//?#^% @׊-w? P^% ,jhMU?Ꮘ#N1@Ex4=o-Q G 6thhy E ?lKZ+Ŀuo SwB@:%8{m_Ԟ?_rj'<nYvSvzu5ixPEQiCT0n 'lh>MΙ/OylN#r:o|JOI\d6FOJv m@h9LX)מOSTN1rWo%עԤ{k+!ݕЪVo?/7IHuGlj",E*@rl1'4WR\C('1!FT21h6&[K3p.m"m sO-~oL=.9+{Vll|& u!=|Ѥ9\|qBgΑ[H[8(X7!uaiq~aK*%8*%GsGK?C/ NHP \9f[$'E2hFbr/㌶39_${h9ǽk|Ce3RС-tń3_.eW+`ʤ#=k>j:okxT鼇EDO w4ec{ ,#חw.uhRD ܖc 0#QM$1E :uQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQER~EK]q?_RF0^|-V꜁VAFojQy,yl i>T#t$owbzkVv#hۀr0G>KOG"¾qa AWõg5SZhS9 [` #O׭HILeG)StI9EM?lߦ eY}??~.&$6NTiFfWO(#)ێA?7>ݫ#OG"gY~!0}Σ2y' zjOG"¡wpˎ# e8螫\_?Zw"¢,f*UUv=NzW1N\%sf5"z-H1I&_oFWU?jZ=(AKQCjZ+g9kѫ~\}Q\PQEQEyޛ&x4usirK18 9 FAr[9Ե1X*נxoW|)Z,uϨg(?p|BGn&QQ 7o? g~R=6TPo?<7E]J嶤y?K⦍jvqe W+F OzyOѿ󊺰i:͊mQ?OjU?Z_#2Q\?"?)a34AK-8u<0O:3}tvz퍴vWqmfMswGl-m/Xki* :R8yK{uލ޽2׵u Šς$AȡE#U6gbE01`;ܒ8Heh)Xw9$6r孭&"ERbUpX9^+Oc])/᧢f=Re9h_N$4{X;@ܫ}ҶtB+dtϟ02I =J;ۋ4XvwFqֳU]7ڒpw=L˝b`LcUi鹀juk)p߸J_iIe\ 15w])ݻ8YV8wMhT/v;]qFMaRc $p,hAN95=~z0Vm#!Sۿ*K7~G\KQ7~G\K^M7kRM7kR-~EE]q? _QWm_54O>EW9QEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEWKE_-vR~EK]oGziEW8QEQEQEQEQEDs-Ds- "mt Ե?n?V;;-z5y¿띿wy]?P+ ( >#x}:ojE6KCԭ|#  {:(>xz]njuQx-TiR,T#8x>0ֽ7E4}An]CST##ʌ>h,? D_wO OXyo5TV $nd,<ylFӔ$t+((((((m?ʥ?ʥ[* (rIQ*@j#\au"ѴpEt־ ^m9e7Y·>*Ip2qcG4׫iN_fiS^=dfQE蜡EPQZǜ?KQZǜ?..j+C'*Z:Ck#/Z2ׄ2*j1g (NТ޺LjmϡT(>_U$mږ+p ~|S ?[M7wB2>^ECYXqL>yHhݛhOcFE_?g?i2Z;z%io]~7MtZ6+hV5 9f+?[?fk}{U5i4ocxW@9hx{Exa{?(((~h^^sS?4oq⮼t9quϹnR2 $ Ŏkr|EzXkhzsC-g]M KTF F{Nh@HFz3YhxOG'1df+]迵׽Dhh^>4[spOʸ~Wt^|m,+vo+돑"oZoZ7~G\KQ7~G\K@Eov-~EE]|'>Q\EPEPEPEPEPEPEPEPEPEPEPEPEPEPEv>ֵ"@,t{=iQ^= (<,5ށoMI|vܝ8x]i6sj}OR  jN[9ʏ`0 ڊ_7jq Xc6>^?~ xr94x$M:!hܶ=IWNh|Iu+ú7׶8qH~.\tLtmU6:TlF)"'ӱPQ^n| +E+mNRFy/lC $#:/ůZ7!K:Kd+r[#r3@\|UE jhõʬ2<`s^+'O$dn I|8AbWx=_/kEu Vϊ_/k hb?/C(c(((((?#o֥?#o֥PAZmt Դ gw?5oF9WsrףW?/OGQErAEPEPEPEPEPEPEPEPEP6s_RVs_R֭{QL vg tu/A OW_S |lVgt՟ٺ<)Č6Gr ,:Xrn﷮!4#EW…QL8*8*]C\?!R=NK{qa=k~'VXyV3~5i =w2ׄ2*9T$gROJoZ̞ӭ)$qLz!ĿzԀ5->&U{HRIO/K+}?Ug{G~Vhg0عEo=Ghl._>[h<?E9aϤThdw 0*U`NAj4: 8@?־ ͱ΅wͥ>g?pj*˭EWDs-Ds- "oZoZ=/o**["jO}1 (Š((((((((((((((xv_ XЭXfhw}}'e~lt=&h|Bt%:D2 +OxN8=#\c8zOxUBm/#Ho1d'?2v)Xm'L";Ki4k$\pddtR/ot\Vb(}tI̍ F09-35Ey]ExvLt4)m;sdžm{כxGRKUwvE$VRFT1RrO#%p@^59 $Uq N8РmE|qUBՎm|e $yNIÌd'?/a-GR|QmC|Ztv6DbcH@}'jv6uR6vpɴQp' ˱-KM<۝Rjq26Pxtj|5ֹ%7ROY`yRFY:UydXFwrUFKM KanFBΪF qjfb* JI2֬&KXnVWFp;Oj$jpf`A]5ƩYjs,NcbF2Ãצ^i{saK$ϖ0ϧތz׭{ksөKA QO{utnnfAHXk&TӒ(&;db@?^2: ƹhzAݥuYn$K{itE&V J} {e5ʴgx5'$ bӤd:Gc?Oz]O!&@ƻs{g'8Ȫ^”VʫUh0mQ\EPEPEPEPEPEPEPEPEPEPEPEPEPEPEPh- CDz_WKW콻J:]S1<01cs/H序ʖS,Nؕg=I ¿g+%Mn~gcHlQF NF;<]Oš8ZIx:y>y5O/ kwwYn_Rq|EѲvHHjψk+뗖s^ـ>t<78\v¿oo_y;,{[b+ `+kAޓ{4K_wts/Wp8{Pq?_Rm\O/?Tц4}L1R1UFX(5"njܸFnN&UxA;E$ݞ ),;W~m>[ϼ_>yq Gc'm_iRZ,!5~U9Vj/"դ76wWE>JNWu '/mcE&;*琼zq_o\g%9}}dx]YiմՒQ<tZ5ެ= ,!VQ)#a=iG 9K?pys%| >m>"[ 11';OU=i r]\>&vGa(pɵ?#o֥?#o֥H[q]-Eۏ?h@_~k?^^s?5oF_C0(:(((((((((m?ʥ?ʥ[* NK)yu^MMVԼf=[=B sJQrI͟Ś_InKE( Hp =EW\aX*N. 2l#:W]ou-| 7d}VjdsUd>"i-de^H班s*_z7%-.;LiQ)} W^SN@+j>V}rYm7z-ϧ5_;\Ni FeA뵹lS֭Twؗ mKW'Y+m\`U:n^]-rw^J<#'=#\ 4Vsl$ gK>s*\ɲie럛=*x+A%~۲ǁWn->GRyJ.u3wȷ F |{=L] |W!RoI;qT YGO Ӳ7lssTOVZ[y ;P> /_XҾ/]t-`-]9$+ S}/RV672;pzU5i>km2J$gnn^cq9UVuFJַ?#^.տ TqVAk<^%pQE~~fDs-Ds- "oZoZ=/o**["jO}1 (Š(((((((((((((((%me*pBIL HՔ|^ OMNot{GCԥf$bBwnSq@b'O{:uwk:TSUK$Ϝw'<ȟ];—WvQKimo"HжYP+bo%Ӭ|x_Q"Ӧ]. >3kf_hW|l3Y);x?]_2ÖvJkq!o9vJGjqkY|'Ϩ6."Xe"8#q@H#^ =4o+P&='F%JKdXwkOI"wgӱpD4$ߎ٧?T1v>:D ^$Cۨz֪x>l6:DLizJ}j?^(Γ~HtH޶Yx7o* X'H99 {Q@q?_Rm\O/?Tц4}L1G>-? U[O菩% CE&nm YX&bHIv3T;_O eYn-dbÎOLjk *[i!g4xf-Ǡ8<׋ *QOܽOnufCRLa` I'uk9dH<~[򏼠s׭3JK ["V7@Tm;ֱj z;/Nq4\ nIW g"FZMLNi㶾}2CqF#Si(8RG%Zeu%l6p~@}Jy"kʭNBIfX#'wsǓbĉ119 ) xz+a}_Pڿo¨/? +,/3|W'"5j&5kcb(AKQCjZ3WsrףW+g9kѫqǗ '#u ((/xEmɍ$'E'5^K7G?'/ڷy^o;7ng8Ÿ{1Gys y HnqWpdg/]7,7zn5:nK nFN~\MM=SYh^[ۣ-M9S* b>fTRBNGE4鋨qYOvG('ʞG$WῈ k֝hNw['U;eLeCcc^1.}OF=+]7Z31 ?R}Fx/~9x 7CMJS\Oc;RKmxnze [4I/\ma2" SX0Ю_̺1:r䏛O˚5_iHsꚞEΕ4s,CC#e0qym>ܟkIg-@Ow?P5 ԢYUwK 2w~qހ;Wۧ _cMdtj~)+ºh&#7MU=~b8 9^e /!ci۷˜{f:m$~<iZB:D!c=7C(p$jt` =E-`xP>W]Yal0Pn<1ǓַOj+OkVʽµtmb*QbQG' TTTyQRFlxYu3vn )Er29o> `JpYf"@%o#m?) 0pqW,ӛ>glqXx+F#t%/yi,j\X82r:>/-^X-շI3ʭ# }G7bɫ9/KAj%ȝo&V#\KKm_M*ɕ#![7nsQI!ܰ+34cAx?&tN?3ԇ,`WO0+¢8*8*]C\?!RW?tOTuWG?_zyO_d dUcwQEpEPEPEPEPEPEPEP^sS?4oqF97sqW^Kpe;$jeX)ȯG2ᏣLs UyAQT 4?W]/^(4?RUj0ZϧGG٠1V_sz/QT~}(Uj0ZϧܳG۞9'ц33z{RQW1Ns)#)sIxyw, cQEGΑ7~G\KQ7~G\KH?#o֥?#o֥K["j~ȿ_ھkizG }(sp(((((((((((((((((((('*Z"0Ə#2<ſAjU|[uUˍ%#db$i8c4VMfY.60r9QZڊ(r0VHWJg=/OT{ukJg5EuzMX I\+Sms r̸Kux/ jz2[FT|gռ-[|"-=yz_ mLdr̓C io$ʣqc'ک,,,b/,Qqaz7^h+;(ӻ_Nflɥ\<+NX nxɪI6B$̉h-Fv@Za$\qQjaOT{BLJ~ oZoZf?"mt Ե?n?V;;-z5y¿띿wy]?P+ ( ּ5]:Mb]>x[ N[IG_?}ȃqOGƭJ*ʣ8?Qw܈>44}/i[jz({pȾT*a,YYK)=F:SWy j1Gw?2ϨSi-ԵԵԵԴ _QWm\OTUWb/SQEnQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQER~EK]q?_RF0^b)Q{**kĒEok\%a%R2r.U3|UAOF7ڋi/.@&:qTms;]}ϸ7\s5h?Czzb\SAmI73L1̣ysϭ: q d-鞝++G٠1ZWнѱ6q2m7ʹ 2d-k+G٠1|CުD8TA;*rrؑ o٠!'NׁBZ|&uTa}.B&5j&5kώ"mt Ե?n?V;;-z5y¿띿wy]?P+ ( ( ( |zwqJefG;3)=ب-|]ŚRѣZ-BVI6$H%98- i4RMwopn 7.3Ny_p(h|/Ϧ+kbn\ ʲjc(fS ("eZR}x?2 ( ("oZoZCjZ+RЁ¿띿_~k?^_;Uh 6f]($XYI^ 63tƥT{S(lu Oilvv4 9cBMW֗6Ew.=+pO8IMPo.dӻEUQ GҩGSm=mړy^Q8ž秶3ɬ-C6~THg3I chb288R,7]ݕd$=Gr3g1SYZ4J$ؒЩOOJ&wh5GǑ9qU/4ׯO/8 .іkx>F\`zETg6'طe9:R-c]IZZ_ {wZgٺAq~dQEtF**Rr*+Oj+OԞsDЅKQ\?!R:__d dU? q%ёWW;=PQEvQ@Q@Q@Q@Q@Q@Q@yOѿ~h]x?r/SlE+ƿ ~`n&k'%brM:+ѧNd:IԓDs-Ds-QM7kRM7kR-~EE]q_ ѯ؏b/SQEnQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQER~EK]q?_RF0^QEjv6zZ)d|-cg~#e>ѐ7npIhmcKr0m9˦XZYdyP1ּa_G9D-lM̴6\#]ʒ鶫5ma._R-%Hpp$}{W%gem)z<U/,w{mSv:gx.[a)ko5_&X XpǷw^,4-˽' BxccHe K<..dI ,ӊ>/+-; tؖyfYlr>{m$e|Bqݰ'ָjm";j7e%$`sb$]L7D iv_XVK$ė$QAe[[Yر?RiRg,rm7~G\KQ7~G\KL(AKQCk[CЯ|Cx LF0"/qyˌe9Z'gRaDxO wWLӭ2 5+ s$~jfOiQϹT((((((((((<kZ<kZձppûo꛱d53Mk&js;H5xz%kh} Ҿ%'9.camܥ_4<9trK-cr 0ݧ1zYtF/ϰ`=rNRo_SHԩ򥧡kxrݮ>[gsnθ13on*,mF[xgYncPBlINGZjzu6́bR۸;5N[.kY CJJO]ujZ]λY|+n&ᱷ5X<0LBϯQSźv5!vw7~^%ލEK@H]QyVw]'VЬm/m X EW*60ǎAX #NѦݠ˟r) x\ҳ,|Q'tEYǩi$-^O5-s 3VU{U;#J5M2I͍MñwqxOh<5]"΁v/zsϭVK,]da[ӧ8IYJRK_B*( ?#o֥?#o֥M7kR1 dju6\' x:ǹCmXߠ8#"S8yjEͭ 2M$~[4Q_;)97&} bAET((((((((((((((((((((+"j~)ȿa_7a 4+鏜 bx]zc}*:UfG UpA&2X\L/cʠ?)#OxYƗMu[l$[ș9+'8$~Uwnŭ.aP B@ټQikܮҤ [?p…H'?jr{tM鏢j1ͦ -BJػid6 瞂Q֮!Rk erGN+źͻC=mDQp9$7㧥sfy&kpx!tc+FUCI/=6U}^Ŭ+^[0c9g 6 Qʭ~b/r3۽yZ^;?PLԬZ &$`JŸ e<5Z}[j7?mVeG.:§KsWt\6WFX SJ+ww <kZ<k.p @"`º}*RiyvTsץX}~aR0>/zyx\' Ǟe{}6;(PBQGX=}((((((((~h^^sS?4oq⮼t9q(3Š( 7y[v~xO]Yt"{Xw|$vO5C/q0m.UzWG/-JѢM*LJ ׵r֌ܢ((Hfkm:t[:bI-6¹?1lf]}JT7"GWnE-oVIЭ[QIs#S #ujkCuIXʚ_3@_ZoĿ]CGXzn 6vGڙ f'_s\Kjfgpw)H=+wmBWoܢ ]4lècS ]Q$ ĞSť=*Όp (N@(&5ijG2M!!v":״;=a>G냕?5 OXqGQO nq l3OwW&>eV,mo5+oi೵fq_ѵyMEn_)$n>WSN Gr鴦44Nšѭm0IgBÿ_94iDֈ##[Al'kvgN{=+E&dVm#;fܶZy2_<+y9/t_NxY;}CYX4wfim6Qo CYgk (=rpyoZfSڲX#Yy3P?ږ,}~oʨݜsz`u_K;&ilm,#5O;{ kW\Tl-ZG=k6cM% OsQE#0E,u&2:_iv_s 6e]`5눋*FUF? V%4w76)Gc'jkuJp(:(((((((((((lJծmd\\O^?uNںK,{ym3Y\2[Ncv<6"5`E R2R1*ʰpp=Q]G)/tZ[C5@ũZ?{s-n o4qfe6ߕ~+/{oMNϭ+?w׉.4ZK(sFt{@p*Xg׵,O4"vfL|3 gQSl}?֝_hj[y@~11ǯy]jڋM{ vLƘPT1Vp꛾3]VQ3L­,pƥm-t6I)[iHuf<c|3%lN*c浼lnUP-[>VAӺ|ܴ_\ZOca1Eg՚(-g(@QEQEQEQEQEQEQEQE|QgX&h_EwuWSӠմn1N[GqZѩ)2iQWuBZy(YJ2S\EمQTI.Ru& WO PD-%J"³4*2I=1OQ׶~ Py&q=>H+%3qvW|aH8]sϧ9fߍC<<_ּ=mk<[CO!OR{e׃ [s!WO Snx7t1>N6 I̸x|6p?Ox؋yui )G;w~+DRVi* w}Fk7sSϥC? ǡChCpfY_,ỊM{)5rb,br횣~g`%^[ۥeTF9OZ6 ]tQEvE5Υ}Fk>ꎀxzmE]''du -Mf$0A,ۏ~b2;E+({c`+RgSrGP騰+p(((((((((((((((>}[A&\j+dvPcµ8 vnQ^} ? jX!SU4e&z cږsϕݮƜɛU=Zm;IhSr[ǝAq_ %r %X *ᖱZ^XΝRu46ʑYVYVLK);QWKZ}סJKZ{ ?L?|C=4ejZiT,l!Cju?O5]Qdeµ?"O7U4kY˧{cy>u?O5V>Zޙ5^KÕtvJXLUݿ aµ?"O7G+_M3$uxY"}Kyry9ž?*بtrAXV!gIkDnn<s,i]JUG*0?X!SU1^_y&b|4׋$j/rH+~XiSܛcC؅'ܓu?O5]5Rf&t z#aZ!MFԨNDV>^P'Ǖv?*MI-VFZLT:;=S\:jQ^} ? jX!SU4g&z {ھ-otJ3v_tvqwkg%Ц䷏;>*BQ|4rͨ6wVrYM2n{y33r>.mY,<+{X\B_i'z}h%'ʁ%vuW:''A9?_4G4V_u[gI[2m.blwG_G7^ 4ݦvA‘YәZč fVD,:p_u?O5R[_(J2aA=U D{hIV?cn'~7 ڹv6(?i%ƮҹVH7e:+W5 ? j)]~JbA'A9?ncږs:';I> X_sЖ6N{:G#?t(W?? 'ޣ[]x6kH_lc1밸TȅV tq䒺_G"q/u[gI[2m.blwG_gږsZw.u 3[so>Eki_W??𫶚}[%LBoX!ST:'t5d*ߓ=ƽ}O&\i Tv\cʭI fGt/M# x.Isr5\ ( 72ƞԥTr SRc^} ? jX!SU~§gQ[E+bg@̇:V^^P'Ǖv?*MdnEeUѴ&&-~W% ? j4%tDF.+ϿbxOV]:_VL1GnPutf愪/ɝeOVNn.l佚ܖt`F{6Ye4ɹϡʳ.bھ5ou8B+ K$Oe:'u7V L+ϿbxOW[VYVLK);QJTv8ԌEc:嗈4> "{{i}p~dVč fVD,:CV.Q^} ? jX!SU4e&z [_(J2aA=W{Q(J[u2V6ڍ\Dy>υڤhĠ}9S]'cn'~7 O_hVIhJY ݔpZӝZr僱HRi+*'Q  /,OЃr?bxOWOoe«_?;9]<xOlVvi\Wq~cOR9PɆUQ:'}O/m3h>u?O5]5Rf&t z#D[QXvy{ COHUdL5rMZZ.[d̼?RqijI(((((((((OVEu"Mf$؛|ʻJcK^ [RI~ʫi/Tz QXQ@Q@Q@Q@Q@Q@Q@Q@s~vZ.+o.kUӅtQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQESп +R4QEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEW7}%OB.knJ`]KGIEPEPEPEPEPEPEPEPEP^}wkϵ.x7=mGwjQ4QEblQEQEQEQEQEQEQEQE}AɤMCKk^›#W:Ҝ[fD䗩r( ( ( ( ( ( ( ( ( "_8\WI\߀]֫ ((((((((fTRBNk/ __ +/7d[}ɟM@TU]CSm~Ӫ[X's2ƻe>j ( * ]:[BWtO DAXI \=,2x䍃+ހEPEgj!tCjh˔wqNV:UtU⤊EPEPEPF#I QEQUouK 5Qk q<ʆi.Oǰ( )MHCI*I=?H?(xnLԭ|AOcj@6znppGSVXEV{HL@J.rI.QEQEb? i^۽BLm-ڢC(*r PEPE"ʐFH= ( ( ( ( ( ( ( ( _Կ}tSп (((((((((+0V56lNʭ?봨+p4ec,7j'ؙEJ$* ( ( ( ( ( ( ( ( ǹG'~ѻg=[NMZZ.[d̼?UF2rQRPQEQEQEQEQEQEQEQEW7?n5J"_8\PIEPEPEPEPEPEPEPxV+k"-R 5{b,6b`_wxH߈/|3?vuY.%EH߯_k:}; .f;D%a=?" W=$txB.m_F\눚3YIny|Ru/iMyuiv;pTI]펗mj 9a@A2xc8=]OŻH_aU4AWo -'u2-CT<XL]}= # >]*]= ,χlZD)-{X(n/`Įx8=8(<?|0 N|RuXBpc_/یg^٠D?{7_xOw&֣\\[n *׆$׆umWRFCgMR뗶d]Ǒ\5]lzw6rcBۀrzgkNoe C&+ !t`>wٴa g@&Uψk?~۟Q%q3lc9VYv35&xNpGb:|Ak8վ0K_9E!24g/;1`8m_wbAb2yx=Ɵwׄ7umjkb-VnVH[YѮt ~%m>@ѓ29\Oד:Va6/>Q$h;#*x#z?zw&ss7}]J;4Gl [?0 s@?覯(k  !e_vֽ_! M\7ŸKQQC-jM>K4_>@еMK[$m'%U}+קK? 9lb^kj|bltgoci DƙI¨d~u/,y|NԦ O]x]2O*]kRw;qڷ|koA{5=6Q v݈ʞ8*8n֏&>Z ,sw. Wm?j~ռKbuXnAǔmHPQ^[WAa)_0/|9J j5?a\b)mW g{LmƯ{u GI&ßm A=\狾xF[n ц) M2>q]|mkOI8:ZA7-!gvz⸟\W -Gpn"c-aÓs^{x<sqq!_!YSz'#jo5 5 2q̮0Nz 늎oS'_jVՄ[ H\®wIVmoYUuȮ~M vr*^o?? 5_(:$SkZR[eR-nQEy)c+ttm&Nit؍!%~rfH|;7/Lg?\մu&Ni56WZB=?ׇS-/?T_4W-4mZQyz,!R} VXF#DS]ˋZQY~Ӭ$HULO ?\>=MQrft:J+ϿMx{5?G)c*i7j=DŽ|'m<t줂8'<3g?Yt~Kxy} /.[Sb+A\;Ar cI('%=?ׇS|G5O\߀]֫ -[khBJā|95jH` 8oSYhkY~"tӯ8YB6GMx{5?WyD44Wšj?"|=H֬*2ЮE7v__X'YESմu&Ni4i:d:6oZ lF{ KkrΑ]YdT+ \5EhN3r}#Ey)c+|5a$Bf~R`e6oEc>A:XgT`DV+sm,HYPbB$ׇSšj?"߇.j=]J 0AV4 ޑ ;v, -h1Nzh8Xsj$r%bXjֿflmʹd]ÐpZ`j n+q say>7'`*?NŖAq $cEk;˖:]jTkc,>@0#vovktzxpͧkmo%dG>+5E=?߇>j&{)t? -[=F^PԢea:]ri`{H {]8X~6;ӧ@'u sV?k]%&urа Y= N\kQO4MѴ7cHw}vҨ[khBJā tMgVnkl*-'$W:MW6ޑc*}ylnt;GE9Z±' Mx{5?G)c*i7j(7K NuU[%Qkҭmmc;x"c$= I!Ѵ}:դxmb4#U?Fl-͟~OptsvWz]h-ׄ9{F@/]Ke,41\F *V[V!t:Hncd  k {!I?u\tGЖE48Js HS ϠZ:֕c6n1 F?r2:S^OU'njnd N4$TNLҡxk(䑥tcVsՈP2Os֫xoCu( CFa{Ty#MFGW![i`rBʅ#kI]:I]4 d|95hy%3^BmSē伋;ɩ}2I˽ӭ.4cxvF1@i,}F{(P'AKq*Ϥj.vRAX0W2z+O-Mak}+"AW]HUHC}v,o:?%{ "Xy^3ҹ^l/ ms $$ƻcp=+|5a$Bf~Tzl?iηo#?{_CK]Jm oLaScIT\0"&m4+{+dD@-+sm,HYPb/S^OS'%𫝖MΕXOtA#i Sx'u_Ht .=Cvﵥb\s+ kmswÀe?ܫ߇9/vWNw[_Zyi­$;#+LRQKudS,jB2Zfßl ɼ<qYic]n^}2MR=JKf3wM Q%C {gjO;[A-KS($r s_=?#5"Rz/+ϿMx{5?[6;ӧ@'u sQ(KI~Rz:J+/^.uG :hXZ𭵴P!%b@O\KkrJ+վέq]_9(>CTMx{5?ZӶ?TEy)c+ttm&Nit؍!2]EEͿyXEc3g o$>lk|c3Ʈjd:Γq]4 lv{d+GwgrS^OQ kӖ~L/_Կ}g? tMVmnkwދ$T| +s+G .k!Q%r֪QEewWNi!Wg 3?@+?|Ysq4QDNyʚ5-(>5E=?߇9/+iJv3T ͟gsy,OA4omM( ss,wː%'?ܢ*/v®wWšj?"S^OUo ?z +mmIX"b 5x%/櫰(@2k%mMu6(x~ĺKiM,`# r_=?`׼"R~+ϿMx{5?ZsxkV]FIW袛;i/JU/d:Γq]4 lv{d42IӭZG6#HAb=g5rX׃3֯~%՛QfEB/z75;+ VYܥNp'oﭻNM+is+(((((((((PV-n+}D:,$P}7ܙI@G4Oi G#'OuW#@m?S(sD6{:+sD69OT =u9OT ?? *p? *h?w{]Er?h?G4Oi G=㮢G4Oi G#'OQ\#'OQx먮GQ@m?S({uEۯ jph?°<hW .b5]EAkd'`N? =Er?h?G4Oi G=㮢G4Oi G#'OQ\#'OQx먮GQ@m?S({uW#@m?S(sD6{:+sD69OT =u9OT ?? *p? *h?w{]Er?h?G4Oi G=㮢G4Oi G#'OQ\#'OQx먮GQ@m?S({uW#@m?S(sD6{:+sD69OT =u9OT ?? *p? *h?w{]Er?h?G4Oi G=㮢G4Oi G#'OW7}%OB.j#'O Gxb#m*>̘$Miu?W#@m?S(sD6{:+sD69OT =u9OT ?? *p? *h?w{]Er?h?G4Oi G=㮢G4Oi G#'OQ\#'OQx먮GQ@m?S({uW#@m?S(sD6{:+sD69OT =u9OT ?? *p? *h?w{]Er?h?G4Oi G=㮢G4Oi G#'OQ\#'OQx먮GQ@m?S({uW#@m?S(sD6{:+sD69OT =u9OT ?? *p*zu/cU? *`^h84iWd"kLcpMG4Oi G#'OQ\#'OQx먮GQ@m?S({uW#@m?S(sD6{:+sD69OT =u9OT ?? *p? *h?w{]Er?h?G4Oi G=㮢G4Oi G#'OQ\#'OQx먮GQogk6GƺJ_Կ}tQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQ\VY.pb2!N:p}0nI(r'8.R7ukM6{ FU]f/SVAq!ZLJIGi^I1&hѷ*Yag9?ag9J*wdhT[+z?u[闰K'e^ImY6稯?]hT[(+(:v;#ag9?ag9J(Υ݇N?Z(Rc1VׯYC.= wWi:4Hrѷd'=px-| h;41r嚱TQEyQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQ\ߍ|J-1R|ώH3\k[f.rȷnR:"y*!de fi$y%vG;WOM/Vx1tGw RQquZ浭\jN3?WM<5*O S"Ei2-/7\(jZ+s-/7\(jZ(-/7\(jZ(-/M󎰩vL0*j(4]u^ʸa2bZ9>whZFӰaʐpAihvo^[)պ7xAֵk%>:QEIAEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPYψ ڍGaF裟ǥG}y<=Q$DpFOs}xR]^Jr7S=j]m^';ۏjFDg;Ě߈uz@đy嶀I';GR}; ϢzxZT4VSRЋu&/9ȷ\(hq<h\(hq<h\(hq<h:‡'ZKpѱiE* 4jP̒\猣EM@C's ?Ҧ[o]0DQE0=¿'hZլ ȝׄ-kVN>? ((((((((((((((((((((((((((((((((((((((((((((((((((((((((((̾'4퍦~Hm̸v#dMquJ?FK\}&ZO;֐QEsQ@Q@Q@Q@Q@qup?>?MQ\?!Rmt? TmZ(xWD kZ_oEjgGAETQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEyOįcdWW+G/-rODWQEuEPEPEPEPEPW?tOT]B-.*)CKQM`h`{gNѿY^;F?Z֭|&}D~QEIAEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPJ?FK\uq"ђ)_IOAEWQQEQEQEQEQEEsDЅKQ\?!Rmt? TmZ(xWD kZ_oEjgGAETQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEyOįcdWW+G/-rODWQEuEPEPEPEPEPW?tOT]B-.*)CKQM`h`{gNѿY^;F?Z֭|&}D~QEIAEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPJ?FK\uq"ђ)_IOAEae7F8TE,O 9QS\\J#B7 =pjO`[QL(((j+C'*Z]CTSAjmt? PE ȝׄ-kV@֙A>H;lwchrQ4W%Ey^;G^;@EpOu_3ғP yIɑgF`1'9z'((((((((((((((((((((((((((((((((((((((((((((((((((((((()#J%xE%R ;4<#~nnEh@ְ͆wݭ̈́ 1y Gֶx8S,QjچjR1W<ɻ?CDpji )EG<(&üaЁ?ӴU@:+d_++7#_?ғʕy#-ʌV_nӵQ, $w=]y7(/Irl%'$.5qU'I9Q\HQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQE?q"ђ)]_įcdWa?q_ƐQEs=R]|ove`2/_ 8f)J%֗o$- 3ɶHXdtx8͢[ Õkx>Sڪ_pJnߙ5u!i֌_SY4zW0>m#'=#ץ X[4/kCd5݌(y $YN2px=IK;$V̥aۡ|g7^γZn\1$zG}ޛ]W;B#`qun#5Ć6$`kV<;-Ҽy!(^8p7qv+j21 (1"T?BiuQMZo-CKES<+"v^赭Z7xAֵk3#~*\?U pyU\軑BnQ阻wW 7(#zWg3s`FHWKEvd[?4ou ?- i}S#5-2IccցwSK"V#\9q!#+g (((((((((((((((((((((((((((((((((((((((((((((((((((((~%xE%R_8^k'⿍ + ( ( ( ( ("T?BiuQMZo-CKES<+"v^赭Z7xAֵk3#~*\?UwiQ]G8QEQEQEQEQE]B-EsDЅKKt o-RSAj"Z(_oEjWNѿ_'?Qoa]hk6RUѶB>Wuѫc?^ 3_%޸?gT}'fC݈١ɨ}?/oIG&C=j @<vyQc؂?u$oΓ̙;}ݿ4hw'/rl$?.zy7uGN~.5cwQEqEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPJ?FK\uq"ђ)_IOAEWQQEQEQEQEQEEsDЅKQ\?!Rmt? TmZ(xWD kZ_oEjgGGT8~h\%} 3QEvEPEPWG?_zyO_d dUQ\'hQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQE?q"ђ)]_įcdWa?q_ƐQEsQ@Q@Q@Q@Q@\?!RW?tOT@[o]-E7%l;AfPr; XZ!;anG`:jѥfmF˕Y?Eb [[#U(nѭ#F}aya A*c/gA5恁bjXб# } ]|kF*5V\ٞEz|9)%l䄞\H~Mh~KB?eOS]0Lx f0/vAOᆎy?AO|GY]s^ÿǏ2i/,t'M`:u?D7]k9f0(9|8-Zż[}ʻL p+袼]YӥMR* (5 ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( (gGQ^ J}[KStG>[J+V_︿t j_n<ҊUW?/7G*+Z|54/nA-O J}0o#)QZIR(C#Asz_+eIH+|9hy:mG# 4K{tSKWe6d'͠=o:usܟ_k9T=hAB*1 ((((((((((((((((((((((((((((((((((././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/configuration/index.rst0000664000175000017500000000063200000000000021634 0ustar00zuulzuul00000000000000============= Configuration ============= .. toctree:: :maxdepth: 1 shared-file-systems/overview shared-file-systems/api.rst shared-file-systems/drivers.rst shared-file-systems/log-files.rst shared-file-systems/config-options.rst shared-file-systems/samples/index.rst The Shared File Systems service works with many different drivers that you can configure by using these instructions. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315601.7776735 manila-21.0.0/doc/source/configuration/shared-file-systems/0000775000175000017500000000000000000000000023662 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/configuration/shared-file-systems/api.rst0000664000175000017500000000045000000000000025164 0ustar00zuulzuul00000000000000===================================== Shared File Systems API configuration ===================================== Configuration options ~~~~~~~~~~~~~~~~~~~~~ The following options allow configuration of the APIs that Shared File Systems service supports. .. include:: ../tables/manila-api.inc ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/configuration/shared-file-systems/config-options.rst0000664000175000017500000000113200000000000027347 0ustar00zuulzuul00000000000000================== Additional options ================== These options can also be set in the ``manila.conf`` file. .. include:: ../tables/manila-ca.inc .. include:: ../tables/manila-common.inc .. include:: ../tables/manila-compute.inc .. include:: ../tables/manila-ganesha.inc .. include:: ../tables/manila-hnas.inc .. include:: ../tables/manila-quota.inc .. include:: ../tables/manila-redis.inc .. include:: ../tables/manila-san.inc .. include:: ../tables/manila-scheduler.inc .. include:: ../tables/manila-share.inc .. include:: ../tables/manila-tegile.inc .. include:: ../tables/manila-winrm.inc ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315601.7816734 manila-21.0.0/doc/source/configuration/shared-file-systems/drivers/0000775000175000017500000000000000000000000025340 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/configuration/shared-file-systems/drivers/cephfs_driver.rst0000664000175000017500000006377300000000000030735 0ustar00zuulzuul00000000000000.. Copyright 2016 Red Hat, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ============= CephFS driver ============= The CephFS driver enables manila to export shared filesystems backed by Ceph's File System (CephFS) using either the Ceph network protocol or NFS protocol. Guests require a native Ceph client or an NFS client in order to mount the filesystem. When guests access CephFS using the native Ceph protocol, access is controlled via Ceph's cephx authentication system. If a user requests share access for an ID, Ceph creates a corresponding Ceph auth ID and a secret key if they do not already exist, and authorizes the ID to access the share. The client can then mount the share using the ID and the secret key. To learn more about configuring Ceph clients to access the shares created using this driver, please see the `Ceph documentation`_ And when guests access CephFS through NFS, an NFS-Ganesha server (or CephFS NFS service) mediates access to CephFS. The driver enables access control by managing the NFS-Ganesha server's exports. Supported Operations ~~~~~~~~~~~~~~~~~~~~ The following operations are supported with CephFS backend: - Create, delete, update and list share - Allow/deny access to share * Only ``cephx`` access type is supported for CephFS native protocol. * Only ``ip`` access type is supported for NFS protocol. * ``read-only`` and ``read-write`` access levels are supported. - Extend/shrink share - Manage/unmanage shares - Manage/unmanage share snapshots - Create, delete, update and list snapshot - Create, delete, update and list share groups - Delete and list share group snapshots .. important:: Share group snapshot creation is no longer supported in mainline CephFS. This feature has been removed from manila W release. Prerequisites ~~~~~~~~~~~~~ .. important:: A manila share backed by CephFS is only as good as the underlying filesystem. Take care when configuring your Ceph cluster, and consult the latest guidance on the use of CephFS in the `Ceph documentation`_. Ceph testing matrix ------------------- As Ceph and Manila continue to grow, it is essential to test and support combinations of releases supported by both projects. However, there is little community bandwidth to cover all of them. For simplicity sake, we are focused on testing (and therefore supporting) the current Ceph active releases. Check out the list of Ceph active releases `here `_. Below is the current state of testing for Ceph releases with this project. Adjacent components such as `devstack-plugin-ceph `_ and `tripleo `_ are added to the table below. Contributors to those projects can determine what versions of ceph are tested and supported with manila by those components; however, their state is presented here for ease of access. +-----------------------+---------+----------------------+ | OpenStack release | Manila | devstack-plugin-ceph | +=======================+=========+======================+ +-----------------------+---------+----------------------+ | 2024.1 ("Caracal") | Reef | Reef | +-----------------------+---------+----------------------+ | 2024.2 ("Dalmatian") | Reef | Reef | +-----------------------+---------+----------------------+ | 2025.1 ("Epoxy") | Squid | Squid | +-----------------------+---------+----------------------+ | 2025.2 ("Flamingo") | Squid | Squid | +-----------------------+---------+----------------------+ Additionally, it is expected that the version of the Ceph client available to manila is aligned with the Ceph server version. Mixing server and client versions is strongly unadvised. In case of using the NFS Ganesha driver, it's also a good practice to use the versions that align with the Ceph version of choice. Common Prerequisites -------------------- - A Ceph cluster with a filesystem configured (See `Create ceph filesystem`_ on how to create a filesystem.) - ``python3-rados`` and ``python3-ceph-argparse`` packages installed in the servers running the :term:`manila-share` service. - Network connectivity between your Ceph cluster's public network and the servers running the :term:`manila-share` service. For CephFS native shares ------------------------ - Ceph client installed in the guest - Network connectivity between your Ceph cluster's public network and guests. See :ref:`security_cephfs_native`. For CephFS NFS shares --------------------- There are two ways for the CephFS driver to provision and export CephFS shares via NFS. Both ways involve the user space NFS service, NFS-Ganesha. Since the Quincy release of Ceph, there is support to create and manage an NFS-Ganesha based "ceph nfs" service. This service can be clustered, i.e., it can have one or more active NFS services working in tandem to provide high availability. You can also optionally deploy an ingress service to front-end this cluster natively using ceph's management commands. Doing this allows ease of management of an NFS service to serve CephFS shares securely as well provides an active/active high availability configuration for it which may be highly desired in production environments. Please `follow the ceph documentation `_ for instructions to deploy a cluster with necessary configuration. With an NFS cluster, the CephFS driver uses Ceph mgr APIs to create and manipulate exports when share access rules are created and deleted. The CephFS driver can also work with Manila's in-built NFS-Ganesha driver to interface with an independent, standalone NFS-Ganesha service that is not orchestrated via Ceph. Unlike when under Ceph's management, the high availability of the NFS server must be externally managed. Typically deployers use Pacemaker/Corosync for providing active/passive availability for such a standalone NFS-Ganesha service. See `the NFS-Ganesha documentation `_ for more information. The CephFS driver can be configured to store the NFS recovery data in a RADOS pool to facilitate the server's recovery if the service is shut down and respawned due to failures/outages. Since the Antelope (2023.1) release of OpenStack Manila, we recommend the use of ceph orchestrator deployed NFS service. The use of a standalone NFS-Ganesha service is deprecated as of the Caracal release (2024.1) and support will be removed in a future release. The CephFS driver does not specify an NFS protocol version when setting up exports. This is to allow the deployer to configure the appropriate NFS protocol version/s directly in NFS-Ganesha configuration. NFS-Ganesha enables both NFS version 3 and version 4.x by virtue of default configuration. Please note that there are many differences at the protocol level between NFS versions. Many deployers enable only NFS version 4.1 (and beyond) to take advantage of enhancements in locking, security and ease of port management. Be aware that not all clients support the latest versions of NFS. The pre-requisites for NFS are: - NFS client installed in the guest. - Network connectivity between your Ceph cluster's public network and NFS-Ganesha service. - Network connectivity between your NFS-Ganesha service and the client mounting the manila share. - Appropriate firewall rules to permit port access between the clients and the NFS-Ganesha service. If you're deploying a standalone NFS-Ganesha service, we recommend using the latest version of NFS-Ganesha. The server must be deployed with at least NFS-Ganesha version 3.5. .. _authorize_ceph_driver: Authorizing the driver to communicate with Ceph ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Capabilities required for the Ceph manila identity have changed from the Wallaby release. The Ceph manila identity configured no longer needs any MDS capability. The MON and OSD capabilities can be reduced as well. However new MGR capabilities are now required. If not accorded, the driver cannot communicate to the Ceph Cluster. .. important:: The driver in the Wallaby (or later) release requires a Ceph identity with a different set of Ceph capabilities when compared to the driver in a pre-Wallaby release. When upgrading to Wallaby you'll also have to update the capabilities of the Ceph identity used by the driver (refer to `Ceph user capabilities docs `_) E.g. a native driver that already uses `client.manila` Ceph identity, issue command `ceph auth caps client.manila mon 'allow r' mgr 'allow rw'` If you are deploying the CephFS driver with Native CephFS or using an NFS service deployed with ceph management commands, the auth ID should be set as follows: .. code-block:: console ceph auth get-or-create client.manila -o manila.keyring \ mgr 'allow rw' \ mon 'allow r' If you're deploying the CephFS NFS driver with a standalone NFS-Ganesha service, we use a specific pool to store exports (configurable with the config option "ganesha_rados_store_pool_name"). The `client.manila` ceph user requires permission to access this pool. So, the auth ID should be set as follows: .. code-block:: console ceph auth get-or-create client.manila -o manila.keyring \ osd 'allow rw pool=" \ mgr 'allow rw' \ mon 'allow r' ``manila.keyring``, along with your ``ceph.conf`` file, will then need to be placed on the server running the :term:`manila-share` service. .. important:: To communicate with the Ceph backend, a CephFS driver instance (represented as a backend driver section in manila.conf) requires its own Ceph auth ID that is not used by other CephFS driver instances running in the same controller node. In the server running the :term:`manila-share` service, you can place the ``ceph.conf`` and ``manila.keyring`` files in the /etc/ceph directory. Set the same owner for the :term:`manila-share` process and the ``manila.keyring`` file. Add the following section to the ``ceph.conf`` file. .. code-block:: ini [client.manila] client mount uid = 0 client mount gid = 0 log file = /opt/stack/logs/ceph-client.manila.log admin socket = /opt/stack/status/stack/ceph-$name.$pid.asok keyring = /etc/ceph/manila.keyring It is advisable to modify the Ceph client's admin socket file and log file locations so that they are co-located with manila services's pid files and log files respectively. Enabling snapshot support in Ceph backend ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ From Ceph Nautilus, all new filesystems created on Ceph have snapshots enabled by default. If you've upgraded your ceph cluster and want to enable snapshots on a pre-existing filesystem, you can do so: .. code-block:: console ceph fs set {fs_name} allow_new_snaps true Configuring CephFS backend in manila.conf ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Configure CephFS native share backend in manila.conf ---------------------------------------------------- Add CephFS to ``enabled_share_protocols`` (enforced at manila api layer). In this example we leave NFS and CIFS enabled, although you can remove these if you will only use a CephFS backend: .. code-block:: ini enabled_share_protocols = NFS,CIFS,CEPHFS Create a section like this to define a CephFS native backend: .. code-block:: ini [cephfsnative1] driver_handles_share_servers = False share_backend_name = CEPHFSNATIVE1 share_driver = manila.share.drivers.cephfs.driver.CephFSDriver cephfs_conf_path = /etc/ceph/ceph.conf cephfs_protocol_helper_type = CEPHFS cephfs_auth_id = manila cephfs_cluster_name = ceph cephfs_filesystem_name = cephfs Set ``driver-handles-share-servers`` to ``False`` as the driver does not manage the lifecycle of ``share-servers``. For the driver backend to expose shares via the native Ceph protocol, set ``cephfs_protocol_helper_type`` to ``CEPHFS``. Then edit ``enabled_share_backends`` to point to the driver's backend section using the section name. In this example we are also including another backend ("generic1"), you would include whatever other backends you have configured. Finally, edit ``cephfs_filesystem_name`` with the name of the Ceph filesystem (also referred to as a CephFS volume) you want to use. If you have more than one Ceph filesystem in the cluster, you need to set this option. .. important:: For Native CephFS shares, the backing ``cephfs_filesystem_name`` is visible to end users through the ``__mount_options`` metadata. Make sure to add the ``__mount_options`` metadata key to the list of admin only modifiable metadata keys (``admin_only_metadata``), as explained in the :ref:`additional configuration options page `. .. code-block:: ini enabled_share_backends = generic1, cephfsnative1 Configure CephFS NFS share backend in manila.conf ------------------------------------------------- .. note:: Prior to configuring the Manila CephFS driver to use NFS, you must have installed and configured NFS-Ganesha. If you're using ceph orchestrator to create the NFS-Ganesha service and manage it alongside ceph, refer to the Ceph documentation on how to setup this service. If you're using an independently deployed standalone NFS-Ganesha service, refer to the `NFS-Ganesha setup guide <../contributor/ganesha.html#nfs-ganesha-configuration>`_. Add NFS to ``enabled_share_protocols`` if it's not already there: .. code-block:: ini enabled_share_protocols = NFS,CIFS,CEPHFS Create a section to define a CephFS NFS share backend. The following is an example for using a ceph orchestrator deployed NFS service: .. code-block:: ini [cephfsnfs1] driver_handles_share_servers = False share_backend_name = CEPHFSNFS1 share_driver = manila.share.drivers.cephfs.driver.CephFSDriver cephfs_protocol_helper_type = NFS cephfs_conf_path = /etc/ceph/ceph.conf cephfs_auth_id = manila cephfs_cluster_name = ceph cephfs_filesystem_name = cephfs cephfs_nfs_cluster_id = mycephfsnfscluster The following is an example for using an independently deployed standalone NFS-Ganesha service: .. code-block:: ini [cephfsnfs1] driver_handles_share_servers = False share_backend_name = CEPHFSNFS1 share_driver = manila.share.drivers.cephfs.driver.CephFSDriver cephfs_protocol_helper_type = NFS cephfs_conf_path = /etc/ceph/ceph.conf cephfs_auth_id = manila cephfs_cluster_name = ceph cephfs_filesystem_name = cephfs cephfs_ganesha_server_is_remote= False cephfs_ganesha_server_ip = 172.24.4.3 ganesha_rados_store_enable = True ganesha_rados_store_pool_name = cephfs_data The following options are set in the driver backend sections above: * ``driver-handles-share-servers`` to ``False`` as the driver does not manage the lifecycle of ``share-servers``. * ``cephfs_protocol_helper_type`` to ``NFS`` to allow NFS protocol access to the CephFS backed shares. * ``ceph_auth_id`` to the ceph auth ID created in :ref:`authorize_ceph_driver`. * ``cephfs_nfs_cluster_id`` - Use this option with a ceph orchestrator deployed clustered NFS service. Set it to the name of the cluster created with the ceph orchestrator. * ``cephfs_ganesha_server_is_remote`` - Use this option with a standalone NFS-Ganesha service. Set it to False if the NFS-ganesha server is co-located with the :term:`manila-share` service. If the NFS-Ganesha server is remote, then set the options to ``True``, and set other options such as ``cephfs_ganesha_server_ip``, ``cephfs_ganesha_server_username``, and ``cephfs_ganesha_server_password`` (or ``cephfs_ganesha_path_to_private_key``) to allow the driver to manage the NFS-Ganesha export entries over SSH. * ``cephfs_ganesha_server_ip`` - Use this option with a standalone NFS-Ganesha service. Set it to the ganesha server IP address. It is recommended to set this option even if the ganesha server is co-located with the :term:`manila-share` service. * ``ganesha_rados_store_enable`` - Use this option with a standalone NFS-Ganesha service. Set it to True or False. Setting this option to True allows NFS Ganesha to store exports and its export counter in Ceph RADOS objects. We recommend setting this to True and using a RADOS object since it is useful for highly available NFS-Ganesha deployments to store their configuration efficiently in an already available distributed storage system. * ``ganesha_rados_store_pool_name`` - Use this option with a standalone NFS-Ganesha service. Set it to the name of the RADOS pool you have created for use with NFS-Ganesha. Set this option only if also setting the ``ganesha_rados_store_enable`` option to True. If you want to use one of the backend CephFS's RADOS pools, then using CephFS's data pool is preferred over using its metadata pool. Edit ``enabled_share_backends`` to point to the driver's backend section using the section name, ``cephfsnfs1``. Finally, edit ``cephfs_filesystem_name`` with the name of the Ceph filesystem (also referred to as a CephFS volume) you want to use. If you have more than one Ceph filesystem in the cluster, you need to set this option. .. code-block:: ini enabled_share_backends = generic1, cephfsnfs1 Space considerations ~~~~~~~~~~~~~~~~~~~~ The CephFS driver reports total and free capacity available across the Ceph cluster to manila to allow provisioning. All CephFS shares are thinly provisioned, i.e., empty shares do not consume any significant space on the cluster. The CephFS driver does not allow controlling oversubscription via manila. So, as long as there is free space, provisioning will continue, and eventually this may cause your Ceph cluster to be over provisioned and you may run out of space if shares are being filled to capacity. It is advised that you use Ceph's monitoring tools to monitor space usage and add more storage when required in order to honor space requirements for provisioned manila shares. You may use the driver configuration option ``reserved_share_percentage`` to prevent manila from filling up your Ceph cluster, and allow existing shares to grow. Creating shares ~~~~~~~~~~~~~~~ Create CephFS native share -------------------------- The default share type may have ``driver_handles_share_servers`` set to True. Configure a share type suitable for CephFS native share: .. code-block:: console openstack share type create cephfsnativetype false openstack share type set cephfsnativetype --extra-specs vendor_name=Ceph storage_protocol=CEPHFS Then create a share, .. code-block:: console openstack share create --share-type cephfsnativetype --name cephnativeshare1 cephfs 1 Note the export location of the share: .. code-block:: console openstack share export location list cephnativeshare1 The export location of the share contains the Ceph monitor (mon) addresses and ports, and the path to be mounted. It is of the form, ``{mon ip addr:port}[,{mon ip addr:port}]:{path to be mounted}`` Create CephFS NFS share ----------------------- Configure a share type suitable for CephFS NFS share: .. code-block:: console openstack share type create cephfsnfstype false openstack share type set cephfsnfstype --extra-specs vendor_name=Ceph storage_protocol=NFS Then create a share: .. code-block:: console openstack share create --share-type cephfsnfstype --name cephnfsshare1 nfs 1 Note the export location of the share: .. code-block:: console openstack share export location list cephnfsshare1 The export location of the share contains the IP address of the NFS-Ganesha server and the path to be mounted. It is of the form, ``{NFS-Ganesha server address}:{path to be mounted}`` Managing existing shares and snapshots ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Starting from the 2024.2 (Dalmatian) release, it is possible to bring both CephFS Native and NFS shares (subvolumes) that already exist in the Ceph Storage under Manila's management. The workflow will be slightly different when compared to the original approach: - The :ref:`share adoption process` should be used, but the name of the subvolume must be supplied as the ``export path`` parameter, so that the driver can appropriately locate the subvolume and manage it. - The subvolume's ``subvolume_mode`` will not be overwritten by manila. - The subvolumes will not be renamed in the Ceph Storage, and at the end of the manage operation, their names will be preserved. - In case the subvolume has its current allocated size (``bytes_quota``) set as infinite, please make sure specify a new ``size`` within the ``driver_options`` parameter. The driver will attempt to resize the share using the provided size and will fail if it is not enough. - While managing CephFS Snapshots, the CephFS driver will consider the size of the parent share and account it as the snapshot's quota. Allowing access to shares ~~~~~~~~~~~~~~~~~~~~~~~~~ Allow access to CephFS native share ----------------------------------- Allow Ceph auth ID ``alice`` access to the share using ``cephx`` access type. .. code-block:: console openstack share access create cephnativeshare1 cephx alice Note the access status, and the access/secret key of ``alice``. .. code-block:: console openstack share access list cephnativeshare1 Allow access to CephFS NFS share -------------------------------- Allow a guest access to the share using ``ip`` access type. .. code-block:: console openstack share access create cephnfsshare1 ip 172.24.4.225 Mounting CephFS shares ~~~~~~~~~~~~~~~~~~~~~~ .. note:: The cephfs filesystem name will be available in the ``__mount_options`` share's metadata. Mounting CephFS native share using FUSE client ---------------------------------------------- Using the secret key of the authorized ID ``alice`` create a keyring file, ``alice.keyring`` like: .. code-block:: ini [client.alice] key = AQA8+ANW/4ZWNRAAOtWJMFPEihBA1unFImJczA== Using the mon IP addresses from the share's export location, create a configuration file, ``ceph.conf`` like: .. code-block:: ini [client] client quota = true mon host = 192.168.1.7:6789, 192.168.1.8:6789, 192.168.1.9:6789 Finally, mount the filesystem, substituting the filenames of the keyring and configuration files you just created, and substituting the path to be mounted from the share's export location: .. code-block:: console sudo ceph-fuse ~/mnt \ --id=alice \ --conf=./ceph.conf \ --keyring=./alice.keyring \ --client-mountpoint=/volumes/_nogroup/4c55ad20-9c55-4a5e-9233-8ac64566b98c Mounting CephFS native share using Kernel client ------------------------------------------------ If you have the ``ceph-common`` package installed in the client host, you can use the kernel client to mount CephFS shares. .. important:: If you choose to use the kernel client rather than the FUSE client the share size limits set in manila may not be obeyed in versions of kernel older than 4.17 and Ceph versions older than mimic. See the `quota limitations documentation`_ to understand CephFS quotas. The mount command is as follows: .. code-block:: console mount -t ceph {mon1 ip addr}:6789,{mon2 ip addr}:6789,{mon3 ip addr}:6789:/ \ {mount-point} -o name={access-id},secret={access-key} With our earlier examples, this would be: .. code-block:: console mount -t ceph 192.168.1.7:6789, 192.168.1.8:6789, 192.168.1.9:6789:/ \ /volumes/_nogroup/4c55ad20-9c55-4a5e-9233-8ac64566b98c \ -o name=alice,secret='AQA8+ANW/4ZWNRAAOtWJMFPEihBA1unFImJczA==' Mount CephFS NFS share using NFS client --------------------------------------- In the guest, mount the share using the NFS client and knowing the share's export location. .. code-block:: ini sudo mount -t nfs 172.24.4.3:/volumes/_nogroup/6732900b-32c1-4816-a529-4d6d3f15811e /mnt/nfs/ Known restrictions ~~~~~~~~~~~~~~~~~~ - A CephFS driver instance, represented as a backend driver section in manila.conf, requires a Ceph auth ID unique to the backend Ceph Filesystem. Using a non-unique Ceph auth ID will result in the driver unintentionally evicting other CephFS clients using the same Ceph auth ID to connect to the backend. - Snapshots are read-only. A user can read a snapshot's contents from the ``.snap/{manila-snapshot-id}_{unknown-id}`` folder within the mounted share. Security ~~~~~~~~ - Each share's data is mapped to a distinct Ceph RADOS namespace. A guest is restricted to access only that particular RADOS namespace. https://docs.ceph.com/en/latest/cephfs/file-layouts/ .. _security_cephfs_native: Security with CephFS native share backend ----------------------------------------- As the guests need direct access to Ceph's public network, CephFS native share backend is suitable only in private clouds where guests can be trusted. .. _Ceph documentation: https://docs.ceph.com/en/latest/cephfs/ .. _Create ceph filesystem: https://docs.ceph.com/en/latest/cephfs/createfs/ .. _limitations on snapshots: https://docs.ceph.com/en/latest/dev/cephfs-snapshots/ .. _quota limitations documentation: https://docs.ceph.com/en/latest/cephfs/quota/#limitations Configuration Reference ----------------------- .. include:: ../../tables/manila-cephfs.inc The :mod:`manila.share.drivers.cephfs.driver` Module ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: manila.share.drivers.cephfs.driver :noindex: :members: :undoc-members: :show-inheritance: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/configuration/shared-file-systems/drivers/dell-emc-powerflex-driver.rst0000664000175000017500000001564100000000000033065 0ustar00zuulzuul00000000000000========================= Dell EMC PowerFlex driver ========================= The Dell EMC Shared File Systems service driver framework (EMCShareDriver) utilizes the Dell EMC storage products to provide the shared file systems to OpenStack. The Dell EMC driver is a plug-in based driver which is designed to use different plug-ins to manage different Dell EMC storage products. The PowerFlex SDNAS plug-in manages the PowerFlex system to provide shared filesystems. The Dell EMC driver framework with the PowerFlex SDNAS plug-in is referred to as the PowerFlex SDNAS driver in this document. The PowerFlex SDNAS driver can be used to provide functions such as share and snapshot for instances. The PowerFlex SDNAS driver enables the PowerFlex 4.x storage system to provide file system management through REST API operations to OpenStack. Requirements ------------ - PowerFlex 4.x storage system - SDNAS cluster registrated with SDNAS Gateway. Supported shared filesystems and operations ------------------------------------------- The driver suppors NFS shares only. The following operations are supported: * Create a share. * Delete a share. * Allow share access. * Deny share access. * Extend a share. * Create a snapshot. * Delete a snapshot. Driver configuration -------------------- Edit the ``manila.conf`` file, which is usually located under the following path ``/etc/manila/manila.conf``. * Add a section for the PowerFlex SDNAS driver backend. * Under the ``[DEFAULT]`` section, set the ``enabled_share_backends`` parameter with the name of the new backend section. * Configure the driver backend section with the parameters below. .. code-block:: ini share_driver = manila.share.drivers.dell_emc.driver.EMCShareDriver emc_share_backend = powerflex dell_nas_backend_host = dell_nas_backend_port = dell_nas_server = dell_nas_login = dell_nas_password = powerflex_storage_pool = powerflex_protection_domain = share_backend_name = powerflex dell_ssl_cert_verify = dell_ssl_certificate_path = Where: +---------------------------------+----------------------------------------------------+ | **Parameter** | **Description** | +=================================+====================================================+ | ``share_driver`` | Full path of the EMCShareDriver used to enable | | | the plugin. | +---------------------------------+----------------------------------------------------+ | ``emc_share_backend`` | The plugin name. Set it to `powerflex` to | | | enable the PowerFlex SDNAS driver. | +---------------------------------+----------------------------------------------------+ | ``dell_nas_backend_host`` | The management IP of the PowerFlex system. | +---------------------------------+----------------------------------------------------+ | ``dell_nas_backend_port`` | The port number used for secured connection. | | | 443 by default if not provided. | +---------------------------------+----------------------------------------------------+ | ``dell_nas_server`` | The name of the NAS server within the | | | PowerFlex system. | +---------------------------------+----------------------------------------------------+ | ``dell_nas_login`` | The login to use to connect to the PowerFlex | | | system. It must have administrator privileges. | +---------------------------------+----------------------------------------------------+ | ``dell_nas_password`` | The password associated with the login. | +---------------------------------+----------------------------------------------------+ | ``powerflex_storage_pool`` | The name of the storage pool within the | | | PowerFlex system. | +---------------------------------+----------------------------------------------------+ | ``powerflex_protection_domain`` | The name of the protection domain within the | | | PowerFlex system. | +---------------------------------+----------------------------------------------------+ | ``share_backend_name`` | The name of the backend which provides shares. | | | Must be set to powerflex | +---------------------------------+----------------------------------------------------+ | ``dell_ssl_cert_verify`` | Boolean to enable the usage of SSL certificates. | | | False is the default value. | +---------------------------------+----------------------------------------------------+ | ``dell_ssl_certificate_path`` | Full path to SSL certificates. | | | Applies only when the usage of SSL certificate is | | | enabled. | +---------------------------------+----------------------------------------------------+ Restart of manila-share service is needed for the configuration changes to take effect. Required operations prior to any usage -------------------------------------- A new share type needs to be created before going further. .. code-block:: console $ openstack share type create powerflex False Map this share type to the backend section configured in Manila .. code-block:: console $ openstack share type set --extra_specs share_backend_name=powerflex powerflex Specific configuration for Snapshot support ------------------------------------------- The following extra specifications need to be configured with share type. - snapshot_support = True For new share type, these extra specifications can be set directly when creating share type: .. code-block:: console $ openstack share type create --extra_specs snapshot_support=True powerflex False Or you can update already existing share type with command: .. code-block:: console $ openstack share type set --extra_specs snapshot_support=True powerflex Known restrictions ------------------ The PowerFlex SDNAS driver has the following restrictions. - Minimum size 3GiB. - Only NFS protocol is supported. - Only DHSS=False is supported ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/configuration/shared-file-systems/drivers/dell-emc-powermax-driver.rst0000664000175000017500000004414300000000000032713 0ustar00zuulzuul00000000000000======================== Dell EMC PowerMax Plugin ======================== The Dell EMC Shared File Systems service driver framework (EMCShareDriver) utilizes the Dell EMC storage products to provide the shared file systems to OpenStack. The Dell EMC driver is a plug-in based driver which is designed to use different plug-ins to manage different Dell EMC storage products. The PowerMax plug-in manages the PowerMax to provide shared file systems. The Dell EMC driver framework with the PowerMax plug-in is referred to as the PowerMax driver in this document. This driver performs the operations on PowerMax eNAS by XMLAPI and the file command line. Each back end manages one Data Mover of PowerMax. Multiple Shared File Systems service back ends need to be configured to manage multiple Data Movers. Requirements ~~~~~~~~~~~~ - PowerMax eNAS OE for File version 8.1 or higher - PowerMax Unified or File only - The following licenses should be activated on PowerMax for File: - CIFS - NFS - SnapSure (for snapshot) - ReplicationV2 (for create share from snapshot) Supported shared file systems and operations ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The driver supports CIFS and NFS shares. The following operations are supported: - Create a share. - Delete a share. - Allow share access. Note the following limitations: - Only IP access type is supported for NFS. - Only user access type is supported for CIFS. - Deny share access. - Create a snapshot. - Delete a snapshot. - Create a share from a snapshot. While the generic driver creates shared file systems based on cinder volumes attached to nova VMs, the PowerMax driver performs similar operations using the Data Movers on the array. Pre-configurations on PowerMax ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #. Configure a storage pool There is a one to one relationship between a storage pool in embedded NAS to a storage group on the PowerMax. The best way to provision storage for file is from the Unisphere for PowerMax UI rather than eNAS UI. Go to :menuselection:`{array} > SYSTEM > FIle` and under :menuselection:`Actions` click :menuselection:`PROVISION STORAGE FOR FILE` .. note:: When creating a new storage group you have the ability to assign a service level e.g. Diamond and disable compression/deduplication which is enabled by default. To pick up the newly created storage pool in the eNAS UI, go to :menuselection:`{Control Station} > Storage > Storage Configuration > Storage Pools` and under :menuselection:`File Storage` click :menuselection:`Rescan Storage Systems` or on the command line: .. code-block:: console $ nas_diskmark -mark -all -discovery y -monitor y The new storage pool should now appear in the eNAS UI #. Make sure you have the appropriate licenses .. code-block:: console $ nas_license -l key status value site_key online xx xx xx xx nfs online cifs online snapsure online replicatorV2 online filelevelretention online #. Enable CIFS service on Data Mover. Ensure the CIFS service is enabled on the Data Mover which is going to be managed by PowerMax driver. To start the CIFS service, use the following command: .. code-block:: console $ server_setup -Protocol cifs -option start [=] # movername = name of the Data Mover # n = number of threads for CIFS users .. note:: If there is 1 GB of memory on the Data Mover, the default is 96 threads. However, if there is over 1 GB of memory, the default number of threads is 256. To check the CIFS service status, use the following command: .. code-block:: console $ server_cifs | head # movername = name of the Data Mover The command output will show the number of CIFS threads started. #. NTP settings on Data Mover. PowerMax driver only supports CIFS share creation with share network which has an Active Directory security-service associated. Creating CIFS share requires that the time on the Data Mover is in sync with the Active Directory domain so that the CIFS server can join the domain. Otherwise, the domain join will fail when creating a share with this security service. There is a limitation that the time of the domains used by security-services, even for different tenants and different share networks, should be in sync. Time difference should be less than 5 minutes. .. note:: If there is a clock skew then you may see the following error "The local machine and the remote machine are not synchronized. Kerberos protocol requires a synchronization of both participants within the same 5 minutes". To fix this error you must make sure the times of the eNas controller host and the Domain Controller or within 5 minutes of each other. You must be root to change the date of the eNas control station. Check also that your time zones coincide. We recommend setting the NTP server to the same public NTP server on both the Data Mover and domains used in security services to ensure the time is in sync everywhere. Check the date and time on Data Mover with the following command: .. code-block:: console $ server_date # movername = name of the Data Mover Set the NTP server for Data Mover with the following command: .. code-block:: console $ server_date timesvc start ntp [ ...] # movername = name of the Data Mover # host = IP address of the time server host .. note:: The host must be running the NTP protocol. Only 4 host entries are allowed. #. Configure User Mapping on the Data Mover. Before creating CIFS share using PowerMax driver, you must select a method of mapping Windows SIDs to UIDs and GIDs. DELL EMC recommends using usermapper in single protocol (CIFS) environment which is enabled on PowerMax eNAS by default. To check usermapper status, use the following command syntax: .. code-block:: console $ server_usermapper # movername = name of the Data Mover If usermapper does not start, use the following command to start the usermapper: .. code-block:: console $ server_usermapper -enable # movername = name of the Data Mover For a multiple protocol environment, refer to Configuring PowerMax eNAS User Mapping on `EMC support site `_ for additional information. #. Configure network connection. Find the network devices (physical port on NIC) of the Data Mover that has access to the share network. To check the device list on the eNAS UI go to :menuselection:`{Control Station} > Settings > Network > Devices`. or on the command line: .. code-block:: console $ server_sysconfig server_2 -pci server_2 : PCI DEVICES: On Board: VendorID=0x1120 DeviceID=0x1B00 Controller 0: scsi-0 IRQ: 32 0: scsi-16 IRQ: 33 0: scsi-32 IRQ: 34 0: scsi-48 IRQ: 35 Broadcom 10 Gigabit Ethernet Controller 0: fxg-3-0 IRQ: 36 speed=10000 duplex=full txflowctl=disable rxflowctl=disable Link: Up 0: fxg-3-1 IRQ: 38 speed=10000 duplex=full txflowctl=disable rxflowctl=disable Link: Down Back-end configurations ~~~~~~~~~~~~~~~~~~~~~~~ .. note:: The following deprecated tags will be removed in the T release: - emc_nas_server_container - emc_nas_pool_names - emc_interface_ports The following parameters need to be configured in the ``/etc/manila/manila.conf`` file for the PowerMax driver: .. code-block:: ini emc_share_backend = powermax emc_nas_server = emc_nas_password = emc_nas_login = driver_handles_share_servers = True powermax_server_container = powermax_share_data_pools = share_driver = manila.share.drivers.dell_emc.driver.EMCShareDriver powermax_ethernet_ports = emc_ssl_cert_verify = True emc_ssl_cert_path = share_backend_name = - `emc_share_backend` The plug-in name. Set it to ``powermax`` for the PowerMax driver. Other values are ``powerscale``, ``vnx`` and ``unity``. - `emc_nas_server` The control station IP address of the PowerMax system to be managed. - `emc_nas_password` and `emc_nas_login` The fields that are used to provide credentials to the PowerMax system. Only local users of PowerMax File is supported. - `driver_handles_share_servers` PowerMax only supports True, where the share driver handles the provisioning and management of the share servers. - `powermax_server_container` Name of the Data Mover to serve the share service. - `powermax_share_data_pools` Comma separated list specifying the name of the pools to be used by this back end. Do not set this option if all storage pools on the system can be used. Wild card character is supported. Examples: pool_1, pool_*, * - `powermax_ethernet_ports (optional)` Comma-separated list specifying the ports (devices) of Data Mover that can be used for share server interface. Do not set this option if all ports on the Data Mover can be used. Wild card character is supported. Examples: fxg-9-0, fxg-_*, * - `emc_ssl_cert_verify (optional)` By default this is True, setting it to False is not recommended - `emc_ssl_cert_path (optional)` The path to the This must be set if emc_ssl_cert_verify is True which is the recommended configuration. See ``SSL Support`` section for more details. - `share_backend_name` The backend name for a given driver implementation. Restart of the ``manila-share`` service is needed for the configuration changes to take effect. SSL Support ----------- #. Run the following on eNas Control Station, to display the CA certification for the active CS. .. code-block:: console $ /nas/sbin/nas_ca_certificate -display .. warning:: This cert will be different for the secondary CS so if there is a failover a different certificate must be used. #. Copy the contents and create a file with a .pem extention on your manila host. .. code-block:: ini -----BEGIN CERTIFICATE----- the cert contents are here -----END CERTIFICATE----- #. To verify the cert by running the following and examining the output: .. code-block:: console $ openssl x509 -in test.pem -text -noout .. code-block:: ini Certificate: Data: Version: 3 (0x2) Serial Number: xxxxxx Signature Algorithm: sha1WithRSAEncryption Issuer: O=VNX Certificate Authority, CN=xxx Validity Not Before: Feb 27 16:02:41 2019 GMT Not After : Mar 4 16:02:41 2024 GMT Subject: O=VNX Certificate Authority, CN=xxxxxx Subject Public Key Info: Public Key Algorithm: rsaEncryption Public-Key: (2048 bit) Modulus: xxxxxx Exponent: xxxxxx X509v3 extensions: X509v3 Subject Key Identifier: xxxxxx X509v3 Authority Key Identifier: keyid:xxxxx DirName:/O=VNX Certificate Authority/CN=xxxxxx serial:xxxxx X509v3 Basic Constraints: CA:TRUE X509v3 Subject Alternative Name: DNS:xxxxxx, DNS:xxxxxx.localdomain, DNS:xxxxxxx, DNS:xxxxx Signature Algorithm: sha1WithRSAEncryption xxxxxx #. As it is the capath and not the cafile that is expected, copy the file to either new directory or an existing directory (where other .pem files exist). #. Run the following on the directory .. code-block:: console $ c_rehash $PATH_TO_CERTS #. Update manila.conf with the directory where the .pem exists. .. code-block:: ini emc_ssl_cert_path = /path_to_certs/ #. Restart manila services. Snapshot Support ~~~~~~~~~~~~~~~~ Snapshot support is disabled by default, so in order to allow shapshots for a share type, the ``snapshot_support`` extra spec must be set to True. Creating a share from a snapshot is also disabled by default so ``create_share_from_snapshot_support`` must also be set to True if this functionality is required. For a new share type: .. code-block:: console $ manila type-create --snapshot_support True \ --create_share_from_snapshot_support True \ ${share_type_name} True For an existing share type: .. code-block:: console $ manila type-key ${share_type_name} \ set snapshot_support=True $ manila type-key ${share_type_name} \ set create_share_from_snapshot_support=True To create a snapshot from a share where snapshot_support=True: .. code-block:: console $ manila snapshot-create ${source_share_name} --name ${target_snapshot_name} To create a target share from a shapshot where create_share_from_snapshot_support=True: .. code-block:: console $ manila create cifs 3 --name ${target_share_name} \ --share-network ${share_network} \ --share-type ${share_type_name} \ --metadata source=snapshot \ --snapshot-id ${snapshot_id} IPv6 support ~~~~~~~~~~~~ IPv6 support for PowerMax Manila driver was introduced in Rocky release. The feature is divided into two parts: #. The driver is able to manage share or snapshot in the Neutron IPv6 network. #. The driver is able to connect PowerMax management interface using its IPv6 address. Pre-Configurations for IPv6 support ----------------------------------- The following parameters need to be configured in ``/etc/manila/manila.conf`` for the PowerMax driver: .. code-block:: ini network_plugin_ipv6_enabled = True If you want to connect to the eNAS controller using IPv6 address specify the address in ``/etc/manila/manila.conf``: .. code-block:: ini emc_nas_server = Restrictions ~~~~~~~~~~~~ The PowerMax driver has the following restrictions: - Only ``driver_handles_share_servers`` equals True is supported. - Only IP access type is supported for NFS. - Only user access type is supported for CIFS. - Only FLAT network and VLAN network are supported. - VLAN network is supported with limitations. The neutron subnets in different VLANs that are used to create share networks cannot have overlapped address spaces. Otherwise, PowerMax may have a problem to communicate with the hosts in the VLANs. To create shares for different VLANs with same subnet address, use different Data Movers. - The **Active Directory** security service is the only supported security service type and it is required to create CIFS shares. - Only one security service can be configured for each share network. - The domain name of the ``active_directory`` security service should be unique even for different tenants. - The time on the Data Mover and the Active Directory domains used in security services should be in sync (time difference should be less than 10 minutes). We recommended using same NTP server on both the Data Mover and Active Directory domains. - On eNAS, the snapshot is stored in the SavVols. eNAS system allows the space used by SavVol to be created and extended until the sum of the space consumed by all SavVols on the system exceeds the default 20% of the total space available on the system. If the 20% threshold value is reached, an alert will be generated on eNAS. Continuing to create snapshot will cause the old snapshot to be inactivated (and the snapshot data to be abandoned). The limit percentage value can be changed manually by storage administrator based on the storage needs. We recommend the administrator configures the notification on the SavVol usage. Refer to Using eNAS SnapSure document on `EMC support site `_ for more information. - eNAS has limitations on the overall numbers of Virtual Data Movers, filesystems, shares, and checkpoints. Virtual Data Mover(VDM) is created by the eNAS driver on the eNAS to serve as the Shared File Systems service share server. Similarly, the filesystem is created, mounted, and exported from the VDM over CIFS or NFS protocol to serve as the Shared File Systems service share. The eNAS checkpoint serves as the Shared File Systems service share snapshot. Refer to the NAS Support Matrix document on `EMC support site `_ for the limitations and configure the quotas accordingly. Other Remarks ~~~~~~~~~~~~~ - eNAS ``nas_quotas`` should not be confused with OpenStack manila quotas. The former edits quotas for mounted file systems, and displays a listing of quotas and disk usage at the file system level (by the user, group, or tree), or at the quota-tree level (by the user or group). ``nas_quotas`` also turns quotas on and off, and clears quotas records for a file system, quota tree, or a Data Mover. Refer to PowerMax eNAS CLI Reference guide on `EMC support site `_ for additional information. ``OpenStack manila quotas`` delimit the number of shares, snapshots etc. a user can create. .. code-block:: console $ manila quota-show --tenant --user +-----------------------+-------+ | Property | Value | +-----------------------+-------+ | share_groups | 50 | | gigabytes | 1000 | | snapshot_gigabytes | 1000 | | share_group_snapshots | 50 | | snapshots | 50 | | shares | 50 | | share_networks | 10 | +-----------------------+-------+ Driver options ~~~~~~~~~~~~~~ Configuration options specific to this driver: .. include:: ../../tables/manila-powermax.inc ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/configuration/shared-file-systems/drivers/dell-emc-powerscale-driver.rst0000664000175000017500000000444500000000000033216 0ustar00zuulzuul00000000000000====================== Dell PowerScale driver ====================== The EMC Shared File Systems driver framework (EMCShareDriver) utilizes Dell storage products to provide shared file systems to OpenStack. The EMC driver is a plug-in based driver which is designed to use different plug-ins to manage different Dell storage products. The PowerScale driver is a plug-in for the EMC framework which allows the Shared File Systems service to interface with an PowerScale back end to provide a shared filesystem. The EMC driver framework with the PowerScale plug-in is referred to as the ``PowerScale Driver`` in this document. This PowerScale Driver interfaces with an PowerScale cluster via the REST PowerScale Platform API (PAPI) and the RESTful Access to Namespace API (RAN). Requirements ~~~~~~~~~~~~ - PowerScale cluster running OneFS 9.10 or higher Supported shared filesystems and operations ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The drivers supports CIFS and NFS shares. The following operations are supported: - Create a share. - Delete a share. - Allow share access. Note the following limitations: - Only IP access type is supported. - Only read-write access is supported. - Deny share access. - Create a snapshot. - Delete a snapshot. - Create a share from a snapshot. - Ensure shares. Back end configuration ~~~~~~~~~~~~~~~~~~~~~~ The following parameters need to be configured in the Shared File Systems service configuration file for the PowerScale driver: .. code-block:: ini share_driver = manila.share.drivers.emc.driver.EMCShareDriver emc_share_backend = powerscale emc_nas_server = emc_nas_login = emc_nas_password = Thin Provisioning ~~~~~~~~~~~~~~~~~ PowerScale systems have thin provisioning enabled by default. Add the parameter below to set an advisory limit. .. code-block:: ini powerscale_threshold_limit = Restrictions ~~~~~~~~~~~~ The PowerScale driver has the following restrictions: - Only IP access type is supported for NFS and CIFS. - Only FLAT network is supported. - Quotas are not yet supported. Driver options ~~~~~~~~~~~~~~ The following table contains the configuration options specific to the share driver. .. include:: ../../tables/manila-emc.inc ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/configuration/shared-file-systems/drivers/dell-emc-powerstore-driver.rst0000664000175000017500000001701300000000000033256 0ustar00zuulzuul00000000000000=========================== Dell EMC PowerStore Plugin =========================== The Dell EMC Shared File Systems service driver framework (EMCShareDriver) utilizes the Dell EMC storage products to provide the shared file systems to OpenStack. The Dell EMC driver is a plug-in based driver which is designed to use different plug-ins to manage different Dell EMC storage products. The PowerStore plug-in manages the PowerStore to provide shared file systems. The Dell EMC driver framework with the PowerStore plug-in is referred to as the PowerStore driver in this document. This driver performs the operations on PowerStore through RESTful APIs. Each backend manages one PowerStore storage system. Configure multiple Shared File Systems service backends to manage multiple PowerStore systems. Requirements ------------ - PowerStore version 3.0 or higher. - PowerStore File is enabled. Supported shared filesystems and operations ------------------------------------------- The driver supports NFS shares and CIFS shares. The following operations are supported. - Create a share. - Delete a share. - Allow share access. - Deny share access. - Extend a share. - Shrink a share. - Create a snapshot. - Delete a snapshot. - Create a share from a snapshot. - Revert a share to a snapshot. Driver configuration -------------------- Edit the configuration file ``/etc/manila/manila.conf``. * Add a section for the PowerStore driver backend. * Under the ``[DEFAULT]`` section, set the ``enabled_share_backends`` parameter with the name of the new backend section. * Configure the driver backend section with the parameters below. .. code-block:: ini share_driver = manila.share.drivers.dell_emc.driver.EMCShareDriver emc_share_backend = powerstore dell_nas_backend_host = dell_nas_server = dell_ad_domain = dell_nas_login = dell_nas_password = share_backend_name = dell_ssl_cert_verify = True/False dell_ssl_cert_path = Where: +---------------------------------+----------------------------------------------------+ | **Parameter** | **Description** | +=================================+====================================================+ | ``share_driver`` | Full path of the EMCShareDriver used to enable | | | the plugin. | +---------------------------------+----------------------------------------------------+ | ``emc_share_backend`` | The plugin name. Set it to `powerstore` to | | | enable the PowerStore driver. | +---------------------------------+----------------------------------------------------+ | ``dell_nas_backend_host`` | The management IP of the PowerStore system. | +---------------------------------+----------------------------------------------------+ | ``dell_nas_server`` | The name of the NAS server in the | | | PowerStore system. | +---------------------------------+----------------------------------------------------+ | ``dell_ad_domain`` | The name of the Active Directory Domain. | | | Only applicable when the SMB server joins | | | to the Active Directory Domain. | +---------------------------------+----------------------------------------------------+ | ``dell_nas_login`` | The login to use to connect to the PowerStore | | | system. It must have administrator privileges. | +---------------------------------+----------------------------------------------------+ | ``dell_nas_password`` | The password associated with the login. | +---------------------------------+----------------------------------------------------+ | ``share_backend_name`` | The share backend name for a given driver | | | implementation. | +---------------------------------+----------------------------------------------------+ | ``dell_ssl_cert_verify`` | The https client validates the SSL certificate of | | | the PowerStore endpoint. Optional. | | | Value: True or False. | | | Default: False. | +---------------------------------+----------------------------------------------------+ | ``dell_ssl_cert_path`` | The path to PowerStore SSL certificate on | | | Manila host. Optional. | +---------------------------------+----------------------------------------------------+ Restart of ``manila-share`` service is needed for the configuration changes to take effect. Pre-configurations for share support (DHSS=False) -------------------------------------------------- To create a file share in this mode, you need to: #. Create NAS server with network interface in PowerStore system. #. Set 'dell_nas_server' in ``/etc/manila/manila.conf``: .. code-block:: ini dell_nas_server = #. Create the share type with driver_handles_share_servers = False extra specification: .. code-block:: console $ openstack share type create ${share_type_name} False #. Map this share type to the share backend name .. code-block:: console $ openstack share type set ${share_type_name} \ --extra-specs share_backend_name=${share_backend_name} #. Create NFS share. .. code-block:: console $ openstack share create NFS ${size} --name ${share_name} --share-type ${share_type_name} Pre-configurations for snapshot support --------------------------------------- The driver can: - create/delete a snapshot - create a share from a snapshot - revert a share to a snapshot The following extra specifications need to be configured with share type. - snapshot_support = True - create_share_from_snapshot_support = True - revert_to_snapshot_support = True For new share type, these extra specifications can be set directly when creating share type: .. code-block:: console $ openstack share type create ${share_type_name} False \ --snapshot-support=True \ --create-share-from-snapshot-support=True \ --revert-to-snapshot-support=True Or you can update already existing share type with command: .. code-block:: console $ openstack share type set ${share_type_name} \ --extra-specs snapshot_support=True \ create_share_from_snapshot_support=True \ revert_to_snapshot_support=True Known restrictions ------------------ The PowerStore driver has the following restrictions. - Minimum share size is 3GiB. - Only IP access type is supported for NFS shares. - Only user access type is supported for CIFS shares. - Only DHSS=False is supported. - Modification of CIFS share access is supported in PowerStore 3.5 and above. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/configuration/shared-file-systems/drivers/dell-emc-unity-driver.rst0000664000175000017500000004127000000000000032217 0ustar00zuulzuul00000000000000===================== Dell EMC Unity driver ===================== The EMC Shared File Systems service driver framework (EMCShareDriver) utilizes the EMC storage products to provide the shared file systems to OpenStack. The EMC driver is a plug-in based driver which is designed to use different plug-ins to manage different EMC storage products. The Unity plug-in manages the Unity system to provide shared filesystems. The EMC driver framework with the Unity plug-in is referred to as the Unity driver in this document. This driver performs the operations on Unity through RESTful APIs. Each backend manages one Storage Processor of Unity. Configure multiple Shared File Systems service backends to manage multiple Unity systems. Requirements ------------ - Unity OE 4.1.x or higher. - StorOps 1.1.0 or higher is installed on Manila node. - Following licenses are activated on Unity: * CIFS/SMB Support * Network File System (NFS) * Thin Provisioning * Fiber Channel (FC) * Internet Small Computer System Interface (iSCSI) Supported shared filesystems and operations ------------------------------------------- In detail, users are allowed to do following operation with EMC Unity Storage Systems. * Create/delete a NFS share. * Create/delete a CIFS share. * Extend the size of a share. * Shrink the size of a share. * Modify the host access privilege of a NFS share. * Modify the user access privilege of a CIFS share. * Create/Delete snapshot of a share. * Create a new share from snapshot. * Revert a share to a snapshot. * Manage/Unmanage a share server. * Manage/Unmanage a share. * Manage/Unmanage a snapshot. Supported Network Topologies ---------------------------- * Flat This type is fully supported by Unity share driver, however flat networks are restricted due to the limited number of tenant networks that can be created from them. * VLAN We recommend this type of network topology in Manila. In most use cases, VLAN is used to isolate the different tenants and provide an isolated network for each tenant. To support this function, an administrator needs to set a slot connected with Unity Ethernet port in ``Trunk`` mode or allow multiple VLANs from the slot. * VXLAN Unity native VXLAN is still unavailable. However, with the `HPB `_ (Hierarchical Port Binding) in Networking and Shared file system services, it is possible that Unity co-exists with VXLAN enabled network environment. Pre-Configurations ------------------ On Manila Node ~~~~~~~~~~~~~~~ Python library ``storops`` is required to run Unity driver. Install it with the ``pip`` command. You may need root privilege to install python libraries. .. code-block:: console $ pip install storops On Unity System ~~~~~~~~~~~~~~~~ #. Configure system level NTP server. Open ``Unisphere`` of your Unity system and navigate to: .. code-block:: console Unisphere -> Settings -> Management -> System Time and NTP Select ``Enable NTP synchronization`` and add your NTP server(s). The time on the Unity system and the Active Directory domains used in security services should be in sync. We recommend using the same NTP server on both the Unity system and Active Directory domains. #. Configure system level DNS server. Open ``Unisphere`` of your Unity system and navigate to: .. code-block:: console Unisphere -> Settings -> Management -> DNS Server Select ``Configure DNS server address manually`` and add your DNS server(s). Backend configurations ---------------------- Following configurations need to be configured in `/etc/manila/manila.conf` for the Unity driver. .. code-block:: ini share_driver = manila.share.drivers.dell_emc.driver.EMCShareDriver emc_share_backend = unity emc_nas_server = emc_nas_login = emc_nas_password = unity_server_meta_pool = unity_share_data_pools = unity_ethernet_ports = driver_handles_share_servers = True/False unity_share_server = report_default_filter_function = True/False - `emc_share_backend` The plugin name. Set it to `unity` for the Unity driver. - `emc_nas_server` The management IP for Unity. - `unity_server_meta_pool` The name of the pool to persist the meta-data of NAS server. This option is required. - `unity_share_data_pools` Comma separated list specifying the name of the pools to be used by this backend. Do not set this option if all storage pools on the system can be used. Wild card character is supported. Examples: .. code-block:: ini # Only use pool_1 unity_share_data_pools = pool_1 # Only use pools whose name stars from pool_ unity_share_data_pools = pool_* # Use all pools on Unity unity_share_data_pools = * - `unity_ethernet_ports` Comma separated list specifying the ethernet ports of Unity system that can be used for share. Do not set this option if all ethernet ports can be used. Wild card character is supported. Both the normal ethernet port and link aggregation port can be used by Unity share driver. Examples: .. code-block:: ini # Only use spa_eth1 unity_ethernet_ports = spa_eth1 # Use port whose name stars from spa_ unity_ethernet_ports = spa_* # Use all Link Aggregation ports unity_ethernet_ports = sp*_la_* # Use all available ports unity_ethernet_ports = * - `driver_handles_share_servers` Unity driver requires this option to be as `True` or `False`. Need to set `unity_share_server` when the value is `False`. - `unity_share_server` One of NAS server names in Unity, it is used for share creation when the driver is in `DHSS=False` mode. - `report_default_filter_function` Whether or not report default filter function. Default value is False. However, this value will be changed to True in a future release to ensure compliance with design expectations in Manila. So we recommend always setting this option in your deployment to True or False per your desired behavior. Restart of :term:`manila-share` service is needed for the configuration changes to take effect. Supported MTU size ------------------ Unity currently only supports 1500 and 9000 as the mtu size, the user can change the above mtu size from Unity Unisphere: #. In the Unisphere, go to `Settings`, `Access`, and then `Ethernet`. #. Double click the ethernet port. #. Select the `MTU` size from the drop down list. The Unity driver will select the port where mtu is equal to the mtu of share network during share server creation. IPv6 support ------------ IPv6 support for Unity driver is introduced in Queens release. The feature is divided into two parts: #. The driver is able to manage share or snapshot in the Neutron IPv6 network. #. The driver is able to connect Unity management interface using its IPv6 address. Pre-Configurations for IPv6 support ----------------------------------- The following parameters need to be configured in `/etc/manila/manila.conf` for the Unity driver: network_plugin_ipv6_enabled = True - `network_plugin_ipv6_enabled` indicates IPv6 is enabled. If you want to connect Unity using IPv6 address, you should configure IPv6 address by `/net/if/mgmt` uemcli command, `mgmtInterfaceSettings` RESTful api or the system settings of Unity GUI for Unity and specify the address in `/etc/manila/manila.conf`: emc_nas_server = Supported share creation in mode that driver does not create and destroy share servers (DHSS=False) --------------------------------------------------------------------------------------------------- To create a file share in this mode, you need to: #. Create NAS server with network interface in Unity system. #. Set 'driver_handles_share_servers=False' and 'unity_share_server' in ``/etc/manila/manila.conf``: .. code-block:: ini driver_handles_share_servers = False unity_share_server = #. Specify the share type with driver_handles_share_servers = False extra specification: .. code-block:: console $ manila type-create ${share_type_name} False #. Create share. .. code-block:: console $ manila create ${share_protocol} ${size} --name ${share_name} --share-type ${share_type_name} .. note:: Do not specify the share network in share creation command because no share servers will be created. Driver will use the unity_share_server specified for share creation. Snapshot support ---------------- In the Mitaka and Newton release of OpenStack, Snapshot support is enabled by default for a newly created share type. Starting with the Ocata release, the snapshot_support extra spec must be set to True in order to allow snapshots for a share type. If the 'snapshot_support' extra_spec is omitted or if it is set to False, users would not be able to create snapshots on shares of this share type. The feature is divided into two parts: 1. The driver is able to create/delete snapshot of share. 2. The driver is able to create share from snapshot. Pre-Configurations for Snapshot support --------------------------------------- The following extra specifications need to be configured with share type. - snapshot_support = True - create_share_from_snapshot_support = True For new share type, these extra specifications can be set directly when creating share type: .. code-block:: console $ manila type-create --snapshot_support True --create_share_from_snapshot_support True ${share_type_name} True Or you can update already existing share type with command: .. code-block:: console $ manila type-key ${share_type_name} set snapshot_support=True $ manila type-key ${share_type_name} set create_share_from_snapshot_support=True To snapshot a share and create share from the snapshot ------------------------------------------------------ Firstly, you need create a share from share type that has extra specifications (snapshot_support=True, create_share_from_snapshot_support=True). Then snapshot the share with command: .. code-block:: console $ manila snapshot-create ${source_share_name} --name ${target_snapshot_name} --description " " After creating the snapshot from previous step, you can create share from that snapshot. Use command: .. code-block:: console $ manila create nfs 1 --name ${target_share_name} --metadata source=snapshot --description " " --snapshot-id ${source_snapshot_id} To manage an existing share server ---------------------------------- To manage a share server existing in Unity System, you need to: #. Create network, subnet, port (ip address of nas server in Unity system) and share network in OpenStack. .. code-block:: console $ openstack network create ${network_name} --provider-network-type ${network_type} $ openstack subnet create ${subnet_name} --network ${network_name} --subnet-range ${subnet_range} $ openstack port create --network ${network_name} --fixed-ip subnet=${subnet_name},ip-address=${ip address} \ ${port_name} --device-owner=manila:share $ manila share-network-create --name ${share_network_name} --neutron-net-id ${network_name} \ --neutron-subnet-id ${subnet_name} #. Manage the share server in OpenStack: .. code-block:: console $ manila share-server-manage ${host} ${share_network_name} ${identifier} .. note:: '${identifier}' is the nas server name in Unity system. To un-manage a Manila share server ---------------------------------- To unmanage a share server existing in OpenStack: .. code-block:: console $ manila share-server-unmanage ${share_server_id} To manage an existing share --------------------------- To manage a share existing in Unity System: - In DHSS=True mode Need make sure the related share server is existing in OpenStack, otherwise need to manage share server first (check the step of 'Supported Manage share server'). .. code-block:: console $ manila manage ${service_host} ${protocol} '${export_path}' --name ${share_name} --driver_options size=${share_size} \ --share_type ${share_type} --share_server_id ${share_server_id} .. note:: '${share_server_id}' is the id of share server in OpenStack. '${share_type}' should have the property 'driver_handles_share_servers=True'. - In DHSS=False mode .. code-block:: console $ manila manage ${service_host} ${protocol} '${export_path}' --name ${share_name} --driver_options size=${share_size} \ --share_type ${share_type} .. note:: '${share_type}' should have the property 'driver_handles_share_servers=False'. To un-manage a Manila share --------------------------- To unmanage a share existing in OpenStack: .. code-block:: console $ manila unmanage ${share_id} To manage an existing share snapshot ------------------------------------ To manage a snapshot existing in Unity System, you need make sure the related share instance is existing in OpenStack, otherwise need to manage share first (check the step of 'Supported Manage share'). .. code-block:: console $ manila snapshot-manage --name ${name} ${share_name} ${provider_location} --driver_options size=${snapshot_size} .. note:: '${provider_location}' is the snapshot name in Unity system. '${share_name}' is the share name or id in OpenStack. To un-manage a Manila share snapshot ------------------------------------ To unmanage a snapshot existing in OpenStack: .. code-block:: console $ manila snapshot-unmanage ${snapshot_id} Supported security services --------------------------- Unity share driver provides ``IP`` based authentication method support for ``NFS`` shares and ``user`` based authentication method for ``CIFS`` shares respectively. For ``CIFS`` share, Microsoft Active Directory is the only supported security service. .. _unity_file_io_load_balance: IO Load balance --------------- The Unity driver automatically distributes the file interfaces per storage processor based on the option ``unity_ethernet_ports``. This balances IO traffic. The recommended configuration for ``unity_ethernet_ports`` specifies balanced ports per storage processor. For example: .. code-block:: ini # Use eth2 from both SPs unity_ethernet_ports = spa_eth2, spb_eth2 Default filter function ----------------------- Unity does not support the file system creation with size smaller than 3GB, if the size of share user create is smaller than 3GB, Unity driver will supplement the size to 3GB in Unity. Unity driver implemented the get_default_filter_function API to report the default filter function, if the share size is smaller than 3GB, Manila will not schedule the share creation to Unity backend. Unity driver provides an option ``report_default_filter_function`` to disable or enable the filter function reporting, the default value is disabled. Restrictions ------------ The Unity driver has following restrictions. - EMC Unity does not support the same IP in different VLANs. - Only IP access type is supported for NFS. - Only user access type is supported for CIFS. API Implementations ------------------- Following driver features are implemented in the plugin. * create_share: Create a share and export it based on the protocol used (NFS or CIFS). * create_share_from_snapshot: Create a share from a snapshot - clone a snapshot. * delete_share: Delete a share. * extend_share: Extend the maximum size of a share. * shrink_share: Shrink the minimum size of a share. * create_snapshot: Create a snapshot for the specified share. * delete_snapshot: Delete the snapshot of the share. * update_access: recover, add or delete user/host access to a share. * allow_access: Allow access (read write/read only) of a user to a CIFS share. Allow access (read write/read only) of a host to a NFS share. * deny_access: Remove access (read write/read only) of a user from a CIFS share. Remove access (read write/read only) of a host from a NFS share. * ensure_share: Check whether share exists or not. * update_share_stats: Retrieve share related statistics from Unity. * get_network_allocations_number: Returns number of network allocations for creating VIFs. * setup_server: Set up and configures share server with given network parameters. * teardown_server: Tear down the share server. * revert_to_snapshot: Revert a share to a snapshot. * get_default_filter_function: Report a default filter function. Driver options -------------- Configuration options specific to this driver: .. include:: ../../tables/manila-unity.inc ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/configuration/shared-file-systems/drivers/dell-emc-vnx-driver.rst0000664000175000017500000002352400000000000031664 0ustar00zuulzuul00000000000000=================== Dell EMC VNX driver =================== The EMC Shared File Systems service driver framework (EMCShareDriver) utilizes the EMC storage products to provide the shared file systems to OpenStack. The EMC driver is a plug-in based driver which is designed to use different plug-ins to manage different EMC storage products. The VNX plug-in is the plug-in which manages the VNX to provide shared filesystems. The EMC driver framework with the VNX plug-in is referred to as the VNX driver in this document. This driver performs the operations on VNX by XMLAPI and the file command line. Each back end manages one Data Mover of VNX. Multiple Shared File Systems service back ends need to be configured to manage multiple Data Movers. .. note:: Dell EMC VNX driver has been deprecated and will be removed in a future release Requirements ~~~~~~~~~~~~ - VNX OE for File version 7.1 or higher - VNX Unified, File only, or Gateway system with a single storage back end - The following licenses should be activated on VNX for File: - CIFS - NFS - SnapSure (for snapshot) - ReplicationV2 (for create share from snapshot) Supported shared filesystems and operations ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The driver supports CIFS and NFS shares. The following operations are supported: - Create a share. - Delete a share. - Allow share access. Note the following limitations: - Only IP access type is supported for NFS. - Only user access type is supported for CIFS. - Deny share access. - Create a snapshot. - Delete a snapshot. - Create a share from a snapshot. While the generic driver creates shared filesystems based on cinder volumes attached to nova VMs, the VNX driver performs similar operations using the Data Movers on the array. Pre-configurations on VNX ~~~~~~~~~~~~~~~~~~~~~~~~~ #. Enable unicode on Data Mover. The VNX driver requires that the unicode is enabled on Data Mover. .. warning:: After enabling Unicode, you cannot disable it. If there are some filesystems created before Unicode is enabled on the VNX, consult the storage administrator before enabling Unicode. To check the Unicode status on Data Mover, use the following VNX File command on the VNX control station:: server_cifs | head # mover_name = Check the value of I18N mode field. UNICODE mode is shown as ``I18N mode = UNICODE``. To enable the Unicode for Data Mover:: uc_config -on -mover # mover_name = Refer to the document Using International Character Sets on VNX for File on `EMC support site `_ for more information. #. Enable CIFS service on Data Mover. Ensure the CIFS service is enabled on the Data Mover which is going to be managed by VNX driver. To start the CIFS service, use the following command:: server_setup -Protocol cifs -option start [=] # mover_name = # n = .. note:: If there is 1 GB of memory on the Data Mover, the default is 96 threads; however, if there is over 1 GB of memory, the default number of threads is 256. To check the CIFS service status, use this command:: server_cifs | head # mover_name = The command output will show the number of CIFS threads started. #. NTP settings on Data Mover. VNX driver only supports CIFS share creation with share network which has an Active Directory security-service associated. Creating CIFS share requires that the time on the Data Mover is in sync with the Active Directory domain so that the CIFS server can join the domain. Otherwise, the domain join will fail when creating share with this security service. There is a limitation that the time of the domains used by security-services even for different tenants and different share networks should be in sync. Time difference should be less than 10 minutes. It is recommended to set the NTP server to the same public NTP server on both the Data Mover and domains used in security services to ensure the time is in sync everywhere. Check the date and time on Data Mover:: server_date # mover_name = Set the NTP server for Data Mover:: server_date timesvc start ntp [ ...] # mover_name = # host = .. note:: The host must be running the NTP protocol. Only 4 host entries are allowed. #. Configure User Mapping on the Data Mover. Before creating CIFS share using VNX driver, you must select a method of mapping Windows SIDs to UIDs and GIDs. EMC recommends using usermapper in single protocol (CIFS) environment which is enabled on VNX by default. To check usermapper status, use this command syntax:: server_usermapper # movername = If usermapper is not started, the following command can be used to start the usermapper:: server_usermapper -enable # movername = For a multiple protocol environment, refer to Configuring VNX User Mapping on `EMC support site `_ for additional information. #. Network Connection. Find the network devices (physical port on NIC) of Data Mover that has access to the share network. Go to :guilabel:`Unisphere` to check the device list: :menuselection:`Settings > Network > Settings for File (Unified system only) > Device`. Back-end configurations ~~~~~~~~~~~~~~~~~~~~~~~ The following parameters need to be configured in the ``/etc/manila/manila.conf`` file for the VNX driver: .. code-block:: ini emc_share_backend = vnx emc_nas_server = emc_nas_password = emc_nas_login = vnx_server_container = vnx_share_data_pools = share_driver = manila.share.drivers.emc.driver.EMCShareDriver vnx_ethernet_ports = - `emc_share_backend` The plug-in name. Set it to ``vnx`` for the VNX driver. - `emc_nas_server` The control station IP address of the VNX system to be managed. - `emc_nas_password` and `emc_nas_login` The fields that are used to provide credentials to the VNX system. Only local users of VNX File is supported. - `vnx_server_container` Name of the Data Mover to serve the share service. - `vnx_share_data_pools` Comma separated list specifying the name of the pools to be used by this back end. Do not set this option if all storage pools on the system can be used. Wild card character is supported. Examples: pool_1, pool_*, * - `vnx_ethernet_ports` Comma separated list specifying the ports (devices) of Data Mover that can be used for share server interface. Do not set this option if all ports on the Data Mover can be used. Wild card character is supported. Examples: spa_eth1, spa_*, * Restart of the ``manila-share`` service is needed for the configuration changes to take effect. Restrictions ~~~~~~~~~~~~ The VNX driver has the following restrictions: - Only IP access type is supported for NFS. - Only user access type is supported for CIFS. - Only FLAT network and VLAN network are supported. - VLAN network is supported with limitations. The neutron subnets in different VLANs that are used to create share networks cannot have overlapped address spaces. Otherwise, VNX may have a problem to communicate with the hosts in the VLANs. To create shares for different VLANs with same subnet address, use different Data Movers. - The ``Active Directory`` security service is the only supported security service type and it is required to create CIFS shares. - Only one security service can be configured for each share network. - Active Directory domain name of the 'active\_directory' security service should be unique even for different tenants. - The time on Data Mover and the Active Directory domains used in security services should be in sync (time difference should be less than 10 minutes). It is recommended to use same NTP server on both the Data Mover and Active Directory domains. - On VNX the snapshot is stored in the SavVols. VNX system allows the space used by SavVol to be created and extended until the sum of the space consumed by all SavVols on the system exceeds the default 20% of the total space available on the system. If the 20% threshold value is reached, an alert will be generated on VNX. Continuing to create snapshot will cause the old snapshot to be inactivated (and the snapshot data to be abandoned). The limit percentage value can be changed manually by storage administrator based on the storage needs. Administrator is recommended to configure the notification on the SavVol usage. Refer to Using VNX SnapSure document on `EMC support site `_ for more information. - VNX has limitations on the overall numbers of Virtual Data Movers, filesystems, shares, checkpoints, etc. Virtual Data Mover(VDM) is created by the VNX driver on the VNX to serve as the Shared File Systems service share server. Similarly, filesystem is created, mounted, and exported from the VDM over CIFS or NFS protocol to serve as the Shared File Systems service share. The VNX checkpoint serves as the Shared File Systems service share snapshot. Refer to the NAS Support Matrix document on `EMC support site `_ for the limitations and configure the quotas accordingly. Driver options ~~~~~~~~~~~~~~ Configuration options specific to this driver: .. include:: ../../tables/manila-vnx.inc ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/configuration/shared-file-systems/drivers/generic-driver.rst0000664000175000017500000000650700000000000031007 0ustar00zuulzuul00000000000000======================================= Generic approach for share provisioning ======================================= The Shared File Systems service can be configured to use Compute VMs and Block Storage service volumes. There are two modules that handle them in the Shared File Systems service: - The ``service_instance`` module creates VMs in Compute with a predefined image called ``service image``. This module can be used by any driver for provisioning of service VMs to be able to separate share resources among tenants. - The ``generic`` module operates with Block Storage service volumes and VMs created by the ``service_instance`` module, then creates shared filesystems based on volumes attached to VMs. Network configurations ~~~~~~~~~~~~~~~~~~~~~~ Each driver can handle networking in its own way, see: https://wiki.openstack.org/wiki/manila/Networking. One of the two possible configurations can be chosen for share provisioning using the ``service_instance`` module: - Service VM has one network interface from a network that is connected to a public router. For successful creation of a share, the user network should be connected to a public router, too. - Service VM has two network interfaces, the first one is connected to the service network, the second one is connected directly to the user's network. Requirements for service image ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - Linux based distro - NFS server - Samba server >= 3.2.0, that can be configured by data stored in registry - SSH server - Two network interfaces configured to DHCP (see network approaches) - ``exportfs`` and ``net conf`` libraries used for share actions - The following files will be used, so if their paths differ one needs to create at least symlinks for them: - ``/etc/exports``: permanent file with NFS exports. - ``/var/lib/nfs/etab``: temporary file with NFS exports used by ``exportfs``. - ``/etc/fstab``: permanent file with mounted filesystems. - ``/etc/mtab``: temporary file with mounted filesystems used by ``mount``. Supported shared filesystems and operations ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The driver supports CIFS and NFS shares. The following operations are supported: - Create a share. - Delete a share. - Allow share access. Note the following limitations: - Only IP access type is supported for NFS and CIFS. - Deny share access. - Create a snapshot. - Delete a snapshot. - Create a share from a snapshot. - Extend a share. - Shrink a share. Known restrictions ~~~~~~~~~~~~~~~~~~ - One of nova's configurations only allows 26 shares per server. This limit comes from the maximum number of virtual PCI interfaces that are used for block device attaching. There are 28 virtual PCI interfaces, in this configuration, two of them are used for server needs and the other 26 are used for attaching block devices that are used for shares. Using Windows instances ~~~~~~~~~~~~~~~~~~~~~~~ While the generic driver only supports Linux instances, you may use the Windows SMB driver when Windows instances are preferred. For more details, please check out the following page: :ref:`windows_smb_driver`. Driver options ~~~~~~~~~~~~~~ The following table contains the configuration options specific to this driver. .. include:: ../../tables/manila-generic.inc ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/configuration/shared-file-systems/drivers/glusterfs-driver.rst0000664000175000017500000000501300000000000031400 0ustar00zuulzuul00000000000000================ GlusterFS driver ================ GlusterFS driver uses GlusterFS, an open source distributed file system, as the storage back end for serving file shares to the Shared File Systems clients. .. note:: This driver has been deprecated in 18.0.0 release. Supported shared filesystems and operations ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The driver supports NFS shares. The following operations are supported: - Create a share. - Delete a share. - Allow share access. Note the following limitations: - Only IP access type is supported - Only read-write access is supported. - Deny share access. Requirements ~~~~~~~~~~~~ - Install glusterfs-server package, version >= 3.5.x, on the storage back end. - Install NFS-Ganesha, version >=2.1, if using NFS-Ganesha as the NFS server for the GlusterFS back end. - Install glusterfs and glusterfs-fuse package, version >=3.5.x, on the Shared File Systems service host. - Establish network connection between the Shared File Systems service host and the storage back end. Shared File Systems service driver configuration setting ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The following parameters in the Shared File Systems service's configuration file ``manila.conf`` need to be set: .. code-block:: ini share_driver = manila.share.drivers.glusterfs.GlusterfsShareDriver If the back-end GlusterFS server runs on the Shared File Systems service host machine: .. code-block:: ini glusterfs_target = :/ If the back-end GlusterFS server runs remotely: .. code-block:: ini glusterfs_target = @:/ Known restrictions ~~~~~~~~~~~~~~~~~~ - The driver does not support network segmented multi-tenancy model, but instead works over a flat network, where the tenants share a network. - If NFS Ganesha is the NFS server used by the GlusterFS back end, then the shares can be accessed by NFSv3 and v4 protocols. However, if Gluster NFS is used by the GlusterFS back end, then the shares can only be accessed by NFSv3 protocol. - All Shared File Systems service shares, which map to subdirectories within a GlusterFS volume, are currently created within a single GlusterFS volume of a GlusterFS storage pool. - The driver does not provide read-only access level for shares. Driver options ~~~~~~~~~~~~~~ The following table contains the configuration options specific to the share driver. .. include:: ../../tables/manila-glusterfs.inc ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/configuration/shared-file-systems/drivers/glusterfs-native-driver.rst0000664000175000017500000001054200000000000032667 0ustar00zuulzuul00000000000000======================= GlusterFS Native driver ======================= GlusterFS Native driver uses GlusterFS, an open source distributed file system, as the storage back end for serving file shares to Shared File Systems service clients. A Shared File Systems service share is a GlusterFS volume. This driver uses flat-network (share-server-less) model. Instances directly talk with the GlusterFS back end storage pool. The instances use ``glusterfs`` protocol to mount the GlusterFS shares. Access to each share is allowed via TLS Certificates. Only the instance which has the TLS trust established with the GlusterFS back end can mount and hence use the share. Currently only ``read-write (rw)`` access is supported. .. note:: This driver has been deprecated in 18.0.0 release. Network approach ~~~~~~~~~~~~~~~~ L3 connectivity between the storage back end and the host running the Shared File Systems share service should exist. Multi-tenancy model ~~~~~~~~~~~~~~~~~~~ The driver does not support network segmented multi-tenancy model. Instead multi-tenancy is supported using tenant specific TLS certificates. Supported shared filesystems and operations ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The driver supports GlusterFS shares. The following operations are supported: - Create a share. - Delete a share. - Allow share access. Note the following limitations: - Only access by TLS Certificates (``cert`` access type) is supported. - Only read-write access is supported. - Deny share access. - Create a snapshot. - Delete a snapshot. Requirements ~~~~~~~~~~~~ - Install glusterfs-server package, version >= 3.6.x, on the storage back end. - Install glusterfs and glusterfs-fuse package, version >= 3.6.x, on the Shared File Systems service host. - Establish network connection between the Shared File Systems service host and the storage back end. Shared File Systems service driver configuration setting ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The following parameters in the Shared File Systems service's configuration file need to be set: .. code-block:: ini share_driver = manila.share.drivers.glusterfs_native.GlusterfsNativeShareDriver glusterfs_servers = glustervolserver glusterfs_volume_pattern = manila-share-volume-\d+$ The parameters are: ``glusterfs_servers`` List of GlusterFS servers which provide volumes that can be used to create shares. The servers are expected to be of distinct Gluster clusters, so they should not be Gluster peers. Each server should be of the form ``[@]``. The optional ``@`` part of the server URI indicates SSH access for cluster management (see related optional parameters below). If it is not given, direct command line management is performed (the Shared File Systems service host is assumed to be part of the GlusterFS cluster the server belongs to). ``glusterfs_volume_pattern`` Regular expression template used to filter GlusterFS volumes for share creation. The regular expression template can contain the ``#{size}`` parameter which matches a number and the value will be interpreted as size of the volume in GB. Examples: ``manila-share-volume-\d+$``, ``manila-share-volume-#{size}G-\d+$``; with matching volume names, respectively: ``manila-share-volume-12``, ``manila-share-volume-3G-13``. In the latter example, the number that matches ``#{size}``, which is 3, is an indication that the size of volume is 3 GB. On share creation, the Shared File Systems service picks volumes at least as large as the requested one. When setting up GlusterFS shares, note the following: - GlusterFS volumes are not created on demand. A pre-existing set of GlusterFS volumes should be supplied by the GlusterFS cluster(s), conforming to the naming convention encoded by ``glusterfs_volume_pattern``. However, the GlusterFS endpoint is allowed to extend this set any time, so the Shared File Systems service and GlusterFS endpoints are expected to communicate volume supply and demand out-of-band. - Certificate setup, also known as trust setup, between instance and storage back end is out of band of the Shared File Systems service. - For the Shared File Systems service to use GlusterFS volumes, the name of the trashcan directory in GlusterFS volumes must not be changed from the default. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/configuration/shared-file-systems/drivers/hdfs-native-driver.rst0000664000175000017500000000424500000000000031600 0ustar00zuulzuul00000000000000================== HDFS native driver ================== The HDFS native driver is a plug-in for the Shared File Systems service. It uses Hadoop distributed file system (HDFS), a distributed file system designed to hold very large amounts of data, and provide high-throughput access to the data. A Shared File Systems service share in this driver is a subdirectory in the hdfs root directory. Instances talk directly to the HDFS storage back end using the ``hdfs`` protocol. Access to each share is allowed by user based access type, which is aligned with HDFS ACLs to support access control of multiple users and groups. Network configuration ~~~~~~~~~~~~~~~~~~~~~ The storage back end and Shared File Systems service hosts should be in a flat network, otherwise L3 connectivity between them should exist. Supported shared filesystems and operations ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The driver supports HDFS shares. The following operations are supported: - Create a share. - Delete a share. - Allow share access. Note the following limitations: - Only user access type is supported. - Deny share access. - Create a snapshot. - Delete a snapshot. - Create a share from a snapshot. Requirements ~~~~~~~~~~~~ - Install HDFS package, version >= 2.4.x, on the storage back end. - To enable access control, the HDFS file system must have ACLs enabled. - Establish network connection between the Shared File Systems service host and storage back end. Shared File Systems service driver configuration ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ To enable the driver, set the ``share_driver`` option in file ``manila.conf`` and add other options as appropriate. .. code-block:: ini share_driver = manila.share.drivers.hdfs.hdfs_native.HDFSNativeShareDriver Known restrictions ~~~~~~~~~~~~~~~~~~ - This driver does not support network segmented multi-tenancy model. Instead multi-tenancy is supported by the tenant specific user authentication. - Only support for single HDFS namenode in Kilo release. Driver options ~~~~~~~~~~~~~~ The following table contains the configuration options specific to the share driver. .. include:: ../../tables/manila-hdfs.inc ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/configuration/shared-file-systems/drivers/hitachi-hnas-driver.rst0000664000175000017500000004062500000000000031732 0ustar00zuulzuul00000000000000========================= Hitachi NAS (HNAS) driver ========================= The HNAS driver provides NFS Shared File Systems to OpenStack. Requirements ~~~~~~~~~~~~ - Hitachi NAS Platform Models 3080, 3090, 4040, 4060, 4080, and 4100. - HNAS/SMU software version is 12.2 or higher. - HNAS configuration and management utilities to create a storage pool (span) and an EVS. - GUI (SMU). - SSC CLI. Supported shared filesystems and operations ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The driver supports NFS and CIFS shares. The following operations are supported: - Create a share. - Delete a share. - Allow share access. - Deny share access. - Create a snapshot. - Delete a snapshot. - Create a share from a snapshot. - Revert a share to a snapshot. - Extend a share. - Manage a share. - Unmanage a share. - Shrink a share. - Mount snapshots. - Allow snapshot access. - Deny snapshot access. - Manage a snapshot. - Unmanage a snapshot. Driver options ~~~~~~~~~~~~~~ This table contains the configuration options specific to the share driver. .. include:: ../../tables/manila-hds_hnas.inc Pre-configuration on OpenStack deployment ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #. Install the OpenStack environment with manila. See the `OpenStack installation guide `_. #. Configure the OpenStack networking so it can reach HNAS Management interface and HNAS EVS Data interface. .. note :: In the driver mode used by HNAS Driver (DHSS = ``False``), the driver does not handle network configuration, it is up to the administrator to configure it. * Configure the network of the manila-share node network to reach HNAS management interface through the admin network. * Configure the network of the Compute and Networking nodes to reach HNAS EVS data interface through the data network. * Example of networking architecture: .. figure:: ../../figures/hds_network.jpg :width: 60% :align: center :alt: Example networking scenario * Edit the ``/etc/neutron/plugins/ml2/ml2_conf.ini`` file and update the following settings in their respective tags. In case you use linuxbridge, update bridge mappings at linuxbridge section: .. important :: It is mandatory that HNAS management interface is reachable from the Shared File System node through the admin network, while the selected EVS data interface is reachable from OpenStack Cloud, such as through Neutron flat networking. .. code-block:: ini [ml2] type_drivers = flat,vlan,vxlan,gre mechanism_drivers = openvswitch [ml2_type_flat] flat_networks = physnet1,physnet2 [ml2_type_vlan] network_vlan_ranges = physnet1:1000:1500,physnet2:2000:2500 [ovs] bridge_mappings = physnet1:br-ex,physnet2:br-eth1 You may have to repeat the last line above in another file on the Compute node, if it exists it is located in: ``/etc/neutron/plugins/openvswitch/ovs_neutron_plugin.ini``. * In case openvswitch for neutron agent, run in network node: .. code-block:: console # ifconfig eth1 0 # ovs-vsctl add-br br-eth1 # ovs-vsctl add-port br-eth1 eth1 # ifconfig eth1 up * Restart all neutron processes. #. Create the data HNAS network in OpenStack: * List the available projects: .. code-block:: console $ openstack project list * Create a network to the given project (DEMO), providing the project name, a name for the network, the name of the physical network over which the virtual network is implemented, and the type of the physical mechanism by which the virtual network is implemented: .. code-block:: console $ openstack network create --project DEMO \ --provider-network-type flat \ --provider-physical-network physnet2 hnas_network * Optional: List available networks: .. code-block:: console $ openstack network list * Create a subnet to the same project (DEMO), the gateway IP of this subnet, a name for the subnet, the network name created before, and the CIDR of subnet: .. code-block:: console $ openstack subnet create --project DEMO --gateway GATEWAY \ --subnet-range SUBNET_CIDR --network NETWORK HNAS_SUBNET * Optional: List available subnets: .. code-block:: console $ openstack subnet list * Add the subnet interface to a router, providing the router name and subnet name created before: .. code-block:: console $ openstack router add subnet SUBNET ROUTER Pre-configuration on HNAS ~~~~~~~~~~~~~~~~~~~~~~~~~ #. Create a file system on HNAS. See the `Hitachi HNAS reference `_. .. important:: Make sure that the filesystem is not created as a replication target. For more information, refer to the official HNAS administration guide. #. Prepare the HNAS EVS network. * Create a route in HNAS to the project network: .. code-block:: console $ console-context --evs route-net-add \ --gateway .. important:: Make sure multi-tenancy is enabled and routes are configured per EVS. .. code-block:: console $ console-context --evs 3 route-net-add --gateway 192.168.1.1 \ 10.0.0.0/24 #. Configure the CIFS security. * Before using CIFS shares with the HNAS driver, make sure to configure a security service in the back end. For details, refer to the `Hitachi HNAS reference `_. Back end configuration ~~~~~~~~~~~~~~~~~~~~~~ #. Configure HNAS driver. * Configure HNAS driver according to your environment. This example shows a minimal HNAS driver configuration: .. code-block:: ini [DEFAULT] enabled_share_backends = hnas1 enabled_share_protocols = NFS,CIFS [hnas1] share_backend_name = HNAS1 share_driver = manila.share.drivers.hitachi.hnas.driver.HitachiHNASDriver driver_handles_share_servers = False hitachi_hnas_ip = 172.24.44.15 hitachi_hnas_user = supervisor hitachi_hnas_password = supervisor hitachi_hnas_evs_id = 1 hitachi_hnas_evs_ip = 10.0.1.20 hitachi_hnas_file_system_name = FS-Manila hitachi_hnas_cifs_snapshot_while_mounted = True .. note:: The ``hds_hnas_cifs_snapshot_while_mounted`` parameter allows snapshots to be taken while CIFS shares are mounted. This parameter is set to ``False`` by default, which prevents a snapshot from being taken if the share is mounted or in use. #. Optional. HNAS multi-backend configuration. * Update the ``enabled_share_backends`` flag with the names of the back ends separated by commas. * Add a section for every back end according to the example bellow: .. code-block:: ini [DEFAULT] enabled_share_backends = hnas1,hnas2 enabled_share_protocols = NFS,CIFS [hnas1] share_backend_name = HNAS1 share_driver = manila.share.drivers.hitachi.hnas.driver.HitachiHNASDriver driver_handles_share_servers = False hitachi_hnas_ip = 172.24.44.15 hitachi_hnas_user = supervisor hitachi_hnas_password = supervisor hitachi_hnas_evs_id = 1 hitachi_hnas_evs_ip = 10.0.1.20 hitachi_hnas_file_system_name = FS-Manila1 hitachi_hnas_cifs_snapshot_while_mounted = True [hnas2] share_backend_name = HNAS2 share_driver = manila.share.drivers.hitachi.hnas.driver.HitachiHNASDriver driver_handles_share_servers = False hitachi_hnas_ip = 172.24.44.15 hitachi_hnas_user = supervisor hitachi_hnas_password = supervisor hitachi_hnas_evs_id = 1 hitachi_hnas_evs_ip = 10.0.1.20 hitachi_hnas_file_system_name = FS-Manila2 hitachi_hnas_cifs_snapshot_while_mounted = True #. Disable DHSS for HNAS share type configuration: .. note:: Shared File Systems requires that the share type includes the ``driver_handles_share_servers`` extra-spec. This ensures that the share will be created on a back end that supports the requested ``driver_handles_share_servers`` capability. .. code-block:: console $ manila type-create hitachi False #. Optional: Add extra-specs for enabling HNAS-supported features: * These commands will enable various snapshot-related features that are supported in HNAS. .. code-block:: console $ manila type-key hitachi set snapshot_support=True $ manila type-key hitachi set mount_snapshot_support=True $ manila type-key hitachi set revert_to_snapshot_support=True $ manila type-key hitachi set create_share_from_snapshot_support=True * To specify which HNAS back end will be created by the share, in case of multiple back end setups, add an extra-spec for each share-type to match a specific back end. Therefore, it is possible to specify which back end the Shared File System service will use when creating a share. .. code-block:: console $ manila type-key hitachi set share_backend_name=hnas1 $ manila type-key hitachi2 set share_backend_name=hnas2 #. Restart all Shared File Systems services (``manila-share``, ``manila-scheduler`` and ``manila-api``). Share migration ~~~~~~~~~~~~~~~ Extra configuration is needed for allowing shares to be migrated from or to HNAS. In the OpenStack deployment, the manila-share node needs an additional connection to the EVS data interface. Furthermore, make sure to add ``hitachi_hnas_admin_network_ip`` to the configuration. This should match the value of ``data_node_access_ips``. For more in-depth documentation, refer to the `share migration documents `_ Manage and unmanage shares ~~~~~~~~~~~~~~~~~~~~~~~~~~ Shared File Systems has the ability to manage and unmanage shares. If there is a share in the storage and it is not in OpenStack, you can manage that share and use it as a Shared File Systems share. Administrators have to make sure the exports are under the ``/shares`` folder beforehand. HNAS drivers use virtual-volumes (V-VOL) to create shares. Only V-VOL shares can be used by the driver, and V-VOLs must have a quota limit. If the NFS export is an ordinary FS export, it is not possible to use it in Shared File Systems. The unmanage operation only unlinks the share from Shared File Systems, all data is preserved. Both manage and unmanage operations are non-disruptive by default, until access rules are modified. To **manage** a share, use: .. code-block:: console $ manila manage [--name ] [--description ] [--share_type ] [--driver_options [ [ ...]]] [--public] Where: +--------------------+------------------------------------------------------+ | **Parameter** | **Description** | +====================+======================================================+ | | Manila host, back end and share name. For example, | | ``service_host`` | ``ubuntu@hitachi1#hsp1``. The available hosts can | | | be listed with the command: ``manila pool-list`` | | | (admin only). | +--------------------+------------------------------------------------------+ | ``protocol`` | Protocol of share to manage, such as NFS or CIFS. | +--------------------+------------------------------------------------------+ | ``export_path`` | Share export path. | | | For NFS: ``10.0.0.1:/shares/share_name`` | | | | | | For CIFS: ``\\10.0.0.1\share_name`` | +--------------------+------------------------------------------------------+ .. note:: For NFS exports, ``export_path`` **must** include ``/shares/`` after the target address. Trying to reference the share name directly or under another path will fail. .. note:: For CIFS exports, although the shares will be created under the ``/shares/`` folder in the back end, only the share name is needed in the export path. It should also be noted that the backslash ``\`` character has to be escaped when entered in Linux terminals. For additional details, refer to ``manila help manage``. To **unmanage** a share, use: .. code-block:: console $ manila unmanage Where: +------------------+---------------------------------------------------------+ | **Parameter** | **Description** | +==================+=========================================================+ | ``share`` | ID or name of the share to be unmanaged. A list of | | | shares can be fetched with ``manila list``. | +------------------+---------------------------------------------------------+ Manage and unmanage snapshots ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The Shared File Systems service also has the ability to manage share snapshots. Existing HNAS snapshots can be managed, as long as the snapshot directory is located in ``/snapshots/share_ID``. New snapshots created through the Shared File Systems service are also created according to this specific folder structure. To **manage** a snapshot, use: .. code-block:: console $ manila snapshot-manage [--name ] [--description ] [--driver_options [ [ ...]]] Where: +------------------------+-------------------------------------------------+ | **Parameter** | **Description** | +========================+=================================================+ | ``share`` | ID or name of the share to be managed. A list | | | of shares can be fetched with ``manila list``. | +------------------------+-------------------------------------------------+ | ``provider_location`` | Location of the snapshot on the back end, such | | | as ``/snapshots/share_ID/snapshot_ID``. | +------------------------+-------------------------------------------------+ | ``--driver_options`` | Driver-related configuration, passed such as | | | ``size=10``. | +------------------------+-------------------------------------------------+ .. note:: The mandatory ``provider_location`` parameter uses the same syntax for both NFS and CIFS shares. This is only the case for snapshot management. .. note:: The ``--driver_options`` parameter ``size`` is **required** for the HNAS driver. Administrators need to know the size of the to-be-managed snapshot beforehand. .. note:: If the ``mount_snapshot_support=True`` extra-spec is set in the share type, the HNAS driver will automatically create an export when managing a snapshot if one does not already exist. To **unmanage** a snapshot, use: .. code-block:: console $ manila snapshot-unmanage Where: +---------------+--------------------------------+ | **Parameter** | **Description** | +===============+================================+ | ``snapshot`` | Name or ID of the snapshot(s). | +---------------+--------------------------------+ Additional notes ~~~~~~~~~~~~~~~~ * HNAS has some restrictions about the number of EVSs, filesystems, virtual-volumes, and simultaneous SSC connections. Check the manual specification for your system. * Shares and snapshots are thin provisioned. It is reported to Shared File System only the real used space in HNAS. Also, a snapshot does not initially take any space in HNAS, it only stores the difference between the share and the snapshot, so it grows when share data is changed. * Administrators should manage the project's quota (:command:`manila quota-update`) to control the back end usage. * Shares will need to be remounted after a revert-to-snapshot operation. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/configuration/shared-file-systems/drivers/hitachi-hsp-driver.rst0000664000175000017500000001560400000000000031572 0ustar00zuulzuul00000000000000=================================================================== Hitachi Hyper Scale-Out Platform File Services Driver for OpenStack =================================================================== The Hitachi Hyper Scale-Out Platform File Services Driver for OpenStack provides the management of file shares, supporting NFS shares with IP based rules to control access. It has a layer that handles the complexity of the protocol used to communicate to Hitachi Hyper Scale-Out Platform via a RESTful API, formatting and sending requests to the backend. Requirements ~~~~~~~~~~~~ - Hitachi Hyper Scale-Out Platform (HSP) version 1.1. - HSP user with ``file-system-full-access`` role. - Established network connection between the HSP interface and OpenStack nodes. Supported shared filesystems and operations ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The driver supports NFS shares. The following operations are supported: - Create a share. - Delete a share. - Extend a share. - Shrink a share. - Allow share access. - Deny share access. - Manage a share. - Unmanage a share. .. note:: - Only ``IP`` access type is supported - Both ``RW`` and ``RO`` access levels supported Known restrictions ~~~~~~~~~~~~~~~~~~ - The Hitachi HSP allows only 1024 virtual file systems per cluster. This determines the limit of shares the driver can provide. - The Hitachi HSP file systems must have at least 128 GB. This means that all shares created by Shared File Systems service should have 128 GB or more. .. note:: The driver has an internal filter function that accepts only requests for shares size greater than or equal to 128 GB, otherwise the request will fail or be redirected to another available storage backend. Driver options ~~~~~~~~~~~~~~ The following table contains the configuration options specific to the share driver. .. include:: ../../tables/manila-hds_hsp.inc Network approach ~~~~~~~~~~~~~~~~ .. note:: In the driver mode used by HSP Driver (DHSS = ``False``), the driver does not handle network configuration, it is up to the administrator to configure it. * Configure the network of the manila-share, Compute and Networking nodes to reach HSP interface. For this, your provider network should be capable of reaching HSP Cluster-Virtual-IP. These connections are mandatory so nova instances are capable of accessing shares provided by the backend. * The following image represents a valid scenario: .. image:: ../../figures/hsp_network.png :width: 60% .. note:: To HSP, the Virtual IP is the address through which clients access shares and the Shared File Systems service sends commands to the management interface. This IP can be checked in HSP using its CLI: .. code-block:: console $ hspadm ip-address list Back end configuration ~~~~~~~~~~~~~~~~~~~~~~ #. Configure HSP driver according to your environment. This example shows a valid HSP driver configuration: .. code-block:: ini [DEFAULT] # ... enabled_share_backends = hsp1 enabled_share_protocols = NFS # ... [hsp1] share_backend_name = HITACHI1 share_driver = manila.share.drivers.hitachi.hsp.driver.HitachiHSPDriver driver_handles_share_servers = False hitachi_hsp_host = 172.24.47.190 hitachi_hsp_username = admin hitachi_hsp_password = admin_password #. Configure HSP share type. .. note:: Shared File Systems service requires that the share type includes the ``driver_handles_share_servers`` extra-spec. This ensures that the share will be created on a backend that supports the requested ``driver_handles_share_servers`` capability. Also, ``snapshot_support`` extra-spec should be provided if its value differs from the default value (``True``), as this driver version that currently does not support snapshot operations. For this driver both extra-specs must be set to ``False``. .. code-block:: console $ manila type-create --snapshot_support False hsp False #. Restart all Shared File Systems services (``manila-share``, ``manila-scheduler`` and ``manila-api``). Manage and unmanage shares ~~~~~~~~~~~~~~~~~~~~~~~~~~ The Shared File Systems service has the ability to manage and unmanage shares. If there is a share in the storage and it is not in OpenStack, you can manage that share and use it as a Shared File Systems share. Previous access rules are not imported by manila. The unmanage operation only unlinks the share from OpenStack, preserving all data in the share. In order to manage a HSP share, it must adhere to the following rules: - File system and share name must not contain spaces. - Share name must not contain backslashes (`\\`). To **manage** a share use: .. code-block:: console $ manila manage [--name ] [--description ] [--share_type ] [--driver_options [ [ ...]]] Where: +--------------------+------------------------------------------------------+ | **Parameter** | **Description** | +====================+======================================================+ | | Manila host, backend and share name. For example, | | ``service_host`` | ``ubuntu@hitachi1#hsp1``. The available hosts can | | | be listed with the command: ``manila pool-list`` | | | (admin only). | +--------------------+---------------------+--------------------------------+ | ``protocol`` | Must be **NFS**, the only supported protocol in this | | | driver version. | +--------------------+------------------------------------------------------+ | ``export_path`` | The Hitachi Hyper Scale-Out Platform export path of | | | the share, for example: | | | ``172.24.47.190:/some_share_name`` | +--------------------+------------------------------------------------------+ | To **unmanage** a share use: .. code-block:: console $ manila unmanage Where: +------------------+---------------------------------------------------------+ | **Parameter** | **Description** | +==================+=========================================================+ | ``share`` | ID or name of the share to be unmanaged. This list can | | | be fetched with: ``manila list``. | +------------------+---------------------+-----------------------------------+ Additional notes ~~~~~~~~~~~~~~~~ - Shares are thin provisioned. It is reported to manila only the real used space in HSP. - Administrators should manage the tenant's quota (``manila quota-update``) to control the backend usage. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/configuration/shared-file-systems/drivers/hpe-3par-share-driver.rst0000664000175000017500000006201400000000000032105 0ustar00zuulzuul00000000000000==================================== HPE 3PAR Driver for OpenStack Manila ==================================== The HPE 3PAR driver provides NFS and CIFS shared file systems to OpenStack using HPE 3PAR's File Persona capabilities. For information on HPE 3PAR Driver for OpenStack Manila, refer to `content kit page `_. HPE 3PAR File Persona Software Suite concepts and terminology ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The software suite comprises the following managed objects: - File Provisioning Groups (FPGs) - Virtual File Servers (VFSs) - File Stores - File Shares The File Persona Software Suite is built upon the resilient mesh-active architecture of HPE 3PAR StoreServ and benefits from HPE 3PAR storage foundation of wide-striped logical disks and autonomic ``Common Provisioning Groups (CPGs)``. A CPG can be shared between file and block to create the File Shares or the logical unit numbers (LUNs) to provide true convergence. ``A File Provisioning Group (FPG)`` is an instance of the HPE intellectual property Adaptive File System. It controls how files are stored and retrieved. Each FPG is transparently constructed from one or multiple Virtual Volumes (VVs) and is the unit for replication and disaster recovery for File Persona Software Suite. There are up to 16 FPGs supported on a node pair. ``A Virtual File Server (VFS)`` is conceptually like a server. As such, it presents virtual IP addresses to clients, participates in user authentication services, and can have properties for such things as user/group quota management and antivirus policies. Up to 16 VFSs are supported on a node pair, one per FPG. ``File Stores`` are the slice of a VFS and FPG at which snapshots are taken, capacity quota management can be performed, and antivirus scan service policies customized. There are up to 256 File Stores supported on a node pair, 16 File Stores per VFS. ``File Shares`` are what provide data access to clients via SMB, NFS, and the Object Access API, subject to the share permissions applied to them. Multiple File Shares can be created for a File Store and at different directory levels within a File Store. Supported shared filesystems ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The driver supports CIFS and NFS shares. Operations supported ~~~~~~~~~~~~~~~~~~~~ - Create a share. – Share is not accessible until access rules allow access. - Delete a share. - Allow share access. Note the following limitations: – IP access rules are required for NFS share access. – User access rules are not allowed for NFS shares. – User access rules are required for SMB share access. – User access requires a File Persona local user for SMB shares. – Shares are read/write (and subject to ACLs). - Deny share access. - Create a snapshot. - Delete a snapshot. - Create a share from a snapshot. - Extend a share. - Shrink a share. - Share networks. HPE 3PAR File Persona driver can be configured to work with or without share networks. When using share networks, the HPE 3PAR driver allocates an FSIP on the back end FPG (VFS) to match the share network's subnet and segmentation ID. Security groups associated with share networks are ignored. Operations not supported ~~~~~~~~~~~~~~~~~~~~~~~~ - Manage and unmanage - Manila Experimental APIs (consistency groups, replication, and migration) were added in Mitaka but have not yet been implemented by the HPE 3PAR File Persona driver. Requirements ~~~~~~~~~~~~ On the OpenStack host running the Manila share service: - python-3parclient version 4.2.0 or newer from PyPI. On the HPE 3PAR array: - HPE 3PAR Operating System software version 3.2.1 MU3 or higher. - The array class and hardware configuration must support File Persona. Pre-configuration on the HPE 3PAR StoreServ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The following HPE 3PAR CLI commands show how to set up the HPE 3PAR StoreServ to use File Persona with OpenStack Manila. HPE 3PAR File Persona must be initialized, and started on the HPE 3PAR storage. .. code-block:: console cli% startfs 0:2:1 1:2:1 cli% setfs nodeip -ipaddress 10.10.10.11 -subnet 255.255.240.0 0 cli% setfs nodeip -ipaddress 10.10.10.12 -subnet 255.255.240.0 1 cli% setfs dns 192.168.8.80,127.127.5.50 foo.com,bar.com cli% setfs gw 10.10.10.10 - A File Provisioning Group (FPG) must be created for use with the Shared File Systems service. .. code-block:: console cli% createfpg examplecpg examplefpg 18T - A Virtual File Server (VFS) must be created on the FPG. - The VFS must be configured with an appropriate share export IP address. .. code-block:: console cli% createvfs -fpg examplefpg 10.10.10.101 255.255.0.0 examplevfs - A local user in the Administrators group is needed for CIFS (SMB) shares. .. code-block:: console cli% createfsgroup fsusers cli% createfsuser –passwd -enable true -grplist Users,Administrators –primarygroup fsusers fsadmin - The WSAPI with HTTP and/or HTTPS must be enabled and started. .. code-block:: console cli% setwsapi -https enable cli% startwsapi HPE 3PAR shared file system driver configuration ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - Install the python-3parclient python package on the OpenStack Block Storage system: .. code-block:: console $ pip install 'python-3parclient>=4.0,<5.0' - Manila configuration file The Manila configuration file (typically ``/etc/manila/manila.conf``) defines and configures the Manila drivers and backends. After updating the configuration file, the Manila share service must be restarted for changes to take effect. - Enable share protocols To enable share protocols, an optional list of supported protocols can be specified using the ``enabled_share_protocols`` setting in the ``DEFAULT`` section of the ``manila.conf`` file. The default is ``NFS, CIFS`` which allows both protocols supported by HPE 3PAR (NFS and SMB). Where Manila uses the term ``CIFS``, HPE 3PAR uses the term ``SMB``. Use the ``enabled_share_protocols`` option if you want to only provide one type of share (for example, only NFS) or if you want to explicitly avoid the introduction of other protocols that can be added for other drivers in the future. - Enable share back ends In the ``[DEFAULT]`` section of the Manila configuration file, use the ``enabled_share_backends`` option to specify the name of one or more back-end configuration sections to be enabled. To enable multiple back ends, use a comma-separated list. .. note:: The name of the backend's configuration section is used (which may be different from the ``share_backend_name`` value) - Configure each back end For each back end, a configuration section defines the driver and back end options. These include common Manila options, as well as driver-specific options. The following ``Driver options`` section describes the parameters that need to be configured in the Manila configuration file for the HPE 3PAR driver. Driver options ~~~~~~~~~~~~~~ The following table contains the configuration options specific to the share driver. .. include:: ../../tables/manila-hpe3par.inc HPE 3PAR Manila driver configuration example ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The following parameters shows a sample subset of the ``manila.conf`` file, which configures two backends and the relevant ``[DEFAULT]`` options. A real configuration would include additional ``[DEFAULT]`` options and additional sections that are not discussed in this document. In this example, the backends are using different FPGs on the same array: .. code-block:: ini [DEFAULT] enabled_share_backends = HPE1,HPE2 enabled_share_protocols = NFS,CIFS default_share_type = default [HPE1] share_backend_name = HPE3PAR1 share_driver = manila.share.drivers.hpe.hpe_3par_driver.HPE3ParShareDriver driver_handles_share_servers = False max_over_subscription_ratio = 1 hpe3par_fpg = examplefpg,10.10.10.101 hpe3par_san_ip = 10.20.30.40 hpe3par_api_url = https://10.20.30.40:8080/api/v1 hpe3par_username = hpe3par_password = hpe3par_san_login = hpe3par_san_password = hpe3par_debug = False hpe3par_cifs_admin_access_username = hpe3par_cifs_admin_access_password = [HPE2] share_backend_name = HPE3PAR2 share_driver = manila.share.drivers.hpe.hpe_3par_driver.HPE3ParShareDriver driver_handles_share_servers = False max_over_subscription_ratio = 1 hpe3par_fpg = examplefpg2,10.10.10.102 hpe3par_san_ip = 10.20.30.40 hpe3par_api_url = https://10.20.30.40:8080/api/v1 hpe3par_username = hpe3par_password = hpe3par_san_login = hpe3par_san_password = hpe3par_debug = False hpe3par_cifs_admin_access_username = hpe3par_cifs_admin_access_password = Network approach ~~~~~~~~~~~~~~~~ Network connectivity between the storage array (SSH/CLI and WSAPI) and the Manila host is required for share management. Network connectivity between the clients and the VFS is required for mounting and using the shares. This includes: - Routing from the client to the external network. - Assigning the client an external IP address, for example a floating IP. - Configuring the Shared File Systems service host networking properly for IP forwarding. - Configuring the VFS networking properly for client subnets. - Configuring network segmentation, if applicable. In the OpenStack Kilo release, the HPE 3PAR driver did not support share networks. Share access from clients to HPE 3PAR shares required external network access (external to OpenStack) and was set up and configured outside of Manila. In the OpenStack Liberty release, the HPE 3PAR driver could run with or without share networks. The configuration option ``driver_handles_share_servers``( ``True`` or ``False`` ) indicated whether share networks could be used. When set to ``False``, the HPE 3PAR driver behaved as described earlier for Kilo. When set to ``True``, the share network's subnet, segmentation ID and IP address range were used to allocate an FSIP on the HPE 3PAR. There is a limit of four FSIPs per VFS. For clients to communicate with shares via this FSIP, the client must have access to the external network using the subnet and segmentation ID of the share network. For example, the client must be routed to the neutron provider network with external access. The Manila host networking configuration and network switches must support the subnet routing. If the VLAN segmentation ID is used, communication with the share will use the FSIP IP address. Neutron networking is required for HPE 3PAR share network support. Flat and VLAN provider networks are supported, but the HPE 3PAR driver does not support share network security groups. Share access ~~~~~~~~~~~~ A share that is mounted before access is allowed can appear to be an empty read-only share. After granting access, the share must be remounted. - IP access rules are required for NFS. - SMB shares require user access rules. With the proper access rules, share access is not limited to the OpenStack environment. Access rules added via Manila or directly in HPE 3PAR CLI can be used to allow access to clients outside of the stack. The HPE 3PAR VFS/FSIP settings determine the subnets visible for HPE 3PAR share access. - IP access rules To allow IP access to a share in the horizon UI, find the share in the Project|Manage Compute|Shares view. Use the ``Manage Rules`` action to add a rule. Select IP as the access type, and enter the external IP address (for example, the floating IP) of the client in the ``Access to`` box. You can also use the command line to allow IP access to a share in the horizon UI with the command: .. code-block:: console $ manila access-allow ip - User access rules To allow user access to a share in the horizon UI, find the share in the Project|Manage Compute|Shares view. Use the ``Manage Rules`` action to add a rule. Select user as the access type and enter user name in the ``Access to`` box. You can also use the command line to allow user access to a share in the horizon UI with the command: .. code-block:: console $ manila access-allow user The user name must be an HPE 3PAR user. Share access is different from file system permissions, for example, ACLs on files and folders. If a user wants to read a file, the user must have at least read permissions on the share and an ACL that grants him read permissions on the file or folder. Even with full control share access, it does not mean every user can do everything due to the additional restrictions of the folder ACLs. To modify the file or folder ACLs, allow access to an HPE 3PAR File Persona local user that is in the administrator's group and connect to the share using that user's credentials. Then, use the appropriate mechanism to modify the ACL or permissions to allow different access than what is provided by default. .. _Share types: Share types ~~~~~~~~~~~ When creating a share, a share type can be specified to determine where and how the share will be created. If a share type is not specified, the ``default_share_type`` set in the Shared File Systems service configuration file is used. Manila share types are a type or label that can be selected at share creation time in OpenStack. These types can be created either in the ``Admin`` horizon UI or using the command line, as follows: .. code-block:: console $ manila --os-username admin --os-tenant-name demo type-create –is_public false false The ```` is the name of the new share type. False at the end specifies ``driver_handles_share_servers=False``. The ``driver_handles_share_servers`` setting in the share type needs to match the setting configured for the back end in the ``manila.conf`` file. ``is_public`` is used to indicate whether this share type is applicable to all tenants or will be assigned to specific tenants. ``--os-username admin --os-tenant-name demo`` are only needed if your environment variables do not specify the desired user and tenant. For share types that are not public, use Manila ``type-access-add`` to assign the share type to a tenant. - Using share types to require share networks The Shared File Systems service requires that the share type include the ``driver_handles_share_servers`` extra-spec. This ensures that the share is created on a back end that supports the requested ``driver_handles_share_servers`` (share networks) capability. From the Liberty release forward, both ``True`` and ``False`` are supported. The ``driver_handles_share_servers`` setting in the share type must match the setting in the back end configuration. - Using share types to select backends by name Administrators can optionally specify that a particular share type be explicitly associated with a single back end (or group of backends) by including the extra spec share_backend_name to match the name specified within the ``share_backend_name`` option in the back end configuration. When a share type is not selected during share creation, the default share type is used. To prevent creating these shares on any back end, the default share type needs to be specific enough to find appropriate default backends (or to find none if the default should not be used). The following example shows how to set share_backend_name for a share type. .. code-block:: console $ manila --os-username admin --os-tenant-name demo type-key set share_backend_name=HPE3PAR2 - Using share types to select backends with capabilities The HPE 3PAR driver automatically reports capabilities based on the FPG used for each back end. An administrator can create share types with extra specs, which controls share types that can use FPGs with or without specific capabilities. With the OpenStack Liberty release or later, below section shows the extra specs used with the capabilities filter and the HPE 3PAR driver: ``hpe3par_flash_cache`` When the value is set to `` True`` (or `` False``), shares of this type are only created on a back end that uses HPE 3PAR Adaptive Flash Cache. For Adaptive Flash Cache, the HPE 3PAR StoreServ Storage array must meet the following requirements: - Adaptive Flash Cache enabled - Available SSDs - Adaptive Flash Cache must be enabled on the HPE 3PAR StoreServ Storage array. This is done with the following CLI command: .. code-block:: console cli% createflashcache ```` must be in 16 GB increments. For example, the below command creates 128 GB of Flash Cache for each node pair in the array. .. code-block:: console cli% createflashcache 128g - Adaptive Flash Cache must be enabled for the VV set used by an FPG. For example, ``setflashcache vvset:``. The VV set name is the same as the FPG name. .. note:: This setting affects all shares in that FPG (on that back end). ``Dedupe`` When the value is set to `` True`` (or `` False``), shares of this type are only created on a back end that uses deduplication. For HPE 3PAR File Persona, the provisioning type is determined when the FPG is created. Using the ``createfpg –tdvv`` option creates an FPG that supports both dedupe and thin provisioning. The thin deduplication must be enabled to use the tdvv option. ``thin_provisioning`` When the value is set to `` True`` (or `` False``), shares of this type are only created on a back end that uses thin (or full) provisioning. For HPE 3PAR File Persona, the provisioning type is determined when the FPG is created. By default, FPGs are created with thin provisioning. The capacity filter uses the total provisioned space and configured ``max_oversubscription_ratio`` when filtering and weighing backends that use thin provisioning. - Using share types to influence share creation options Scoped extra-specs are used to influence vendor-specific implementation details. Scoped extra-specs use a prefix followed by a colon. For HPE 3PAR, these extra specs have a prefix of hpe3par. The following HPE 3PAR extra-specs are used when creating CIFS (SMB) shares: ``hpe3par:smb_access_based_enum`` ``smb_access_based_enum`` (Access Based Enumeration) specifies if users can see only the files and directories to which they have been allowed access on the shares. Valid values are ``True`` or ``False``. The default is ``False``. ``hpe3par:smb_continuous_avail`` ``smb_continuous_avail`` (Continuous Availability) specifies if continuous availability features of SMB3 should be enabled for this share. Valid values are ``True`` or ``False``. The default is ``True``. ``hpe3par:smb_cache`` ``smb_cache`` specifies client-side caching for offline files. The default value is ``manual``. Valid values are: - ``off`` — the client must not cache any files from this share. The share is configured to disallow caching. - ``manual`` — the client must allow only manual caching for the files open from this share. - ``optimized`` — the client may cache every file that it opens from this share. Also, the client may satisfy the file requests from its local cache. The share is configured to allow automatic caching of programs and documents. - ``auto`` — the client may cache every file that it opens from this share. The share is configured to allow automatic caching of documents. When creating NFS shares, the following HPE 3PAR extra-specs are used: ``hpe3par:nfs_options`` Comma separated list of NFS export options. The NFS export options have the following limitations: ``ro`` and ``rw`` are not allowed (will be determined by the driver) ``no_subtree_check`` and ``fsid`` are not allowed per HPE 3PAR CLI support ``(in)secure`` and ``(no_)root_squash`` are not allowed because the HPE 3PAR driver controls those settings All other NFS options are forwarded to the HPE 3PAR as part of share creation. The HPE 3PAR performs additional validation at share creation time. For details, see the HPE 3PAR CLI help. Implementation characteristics ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - Shares from snapshots - When a share is created from a snapshot, the share must be deleted before the snapshot can be deleted. This is enforced by the driver. - A snapshot of an empty share will appear to work correctly, but attempting to create a share from an empty share snapshot may fail with an ``NFS Create export`` error. - HPE 3PAR File Persona snapshots are for an entire File Store. In Manila, they appear as snapshots of shares. A share sub-directory is used to give the appearance of a share snapshot when using ``create share from snapshot`` . - Snapshots - For HPE 3PAR File Persona, snapshots are per File Store and not per share. So, the HPE 3PAR limit of 1024 snapshots per File Store results in a Manila limit of 1024 snapshots per tenant on each back end FPG. - Before deleting a share, you must delete its snapshots. This is enforced by Manila. For HPE 3PAR File Persona, this also kicks off a snapshot reclamation. - Size enforcement Manila users create shares with size limits. HPE 3PAR enforces size limits by using File Store quotas. When using ``hpe3par_fstore_per_share``= ``True``(the non-default setting) there is only one share per File Store, so the size enforcement acts as expected. When using ``hpe3par_fstore_per_share`` = ``False`` (the default), the HPE 3PAR Manila driver uses one File Store for multiple shares. In this case, the size of the File Store limit is set to the cumulative limit of its Manila share sizes. This can allow one tenant share to exceed the limit and affect the space available for the same tenant's other shares. One tenant cannot use another tenant's File Store. - File removal When shares are removed and the ``hpe3par_fstore_per_share``=``False`` setting is used (the default), files may be left behind in the File Store. Prior to Mitaka, removal of obsolete share directories and files that have been stranded would require tools outside of OpenStack/Manila. In Mitaka and later, the driver mounts the File Store to remove the deleted share's subdirectory and files. For SMB/CIFS share, it requires the ``hpe3par_cifs_admin_access_username`` and ``hpe3par_cifs_admin_access_password`` configuration. If the mount and delete cannot be performed, an error is logged and the share is deleted in Manila. Due to the potential space held by leftover files, File Store quotas are not reduced when shares are removed. - Multi-tenancy - Network The ``driver_handles_share_servers`` configuration setting determines whether share networks are supported. When ``driver_handles_share_servers`` is set to ``True``, a share network is required to create a share. The administrator creates share networks with the desired network, subnet, IP range, and segmentation ID. The HPE 3PAR is configured with an FSIP using the same subnet and segmentation ID and an IP address allocated from the neutron network. Using share network-specific IP addresses, subnets, and segmentation IDs give the appearance of better tenant isolation. Shares on an FPG, however, are accessible via any of the FSIPs (subject to access rules). Back end filtering should be used for further separation. - Back end filtering A Manila HPE 3PAR back end configuration refers to a specific array and a specific FPG. With multiple backends and multiple tenants, the scheduler determines where shares will be created. In a scenario where an array or back end needs to be restricted to one or more specific tenants, share types can be used to influence the selection of a back end. For more information on using share types, see `Share types`_ . - Tenant limit The HPE 3PAR driver uses one File Store per tenant per protocol in each configured FPG. When only one back end is configured, this results in a limit of eight tenants (16 if only using one protocol). Use multiple back end configurations to introduce additional FPGs on the same array to increase the tenant limit. When using share networks, an FSIP is created for each share network (when its first share is created on the back end). The HPE 3PAR supports 4 FSIPs per FPG (VFS). One of those 4 FSIPs is reserved for the initial VFS IP, so the share network limit is 48 share networks per node pair. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/configuration/shared-file-systems/drivers/huawei-nas-driver.rst0000664000175000017500000000713000000000000031425 0ustar00zuulzuul00000000000000============= Huawei driver ============= Huawei NAS driver is a plug-in based on the Shared File Systems service. The Huawei NAS driver can be used to provide functions such as the share and snapshot for virtual machines, or instances, in OpenStack. Huawei NAS driver enables the OceanStor V3 series V300R002 storage system to provide only network filesystems for OpenStack. Requirements ~~~~~~~~~~~~ - The OceanStor V3 series V300R002 storage system. - The following licenses should be activated on V3 for File: CIFS, NFS, HyperSnap License (for snapshot). Supported shared filesystems and operations ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The driver supports CIFS and NFS shares. The following operations are supported: - Create a share. - Delete a share. - Allow share access. Note the following limitations: - Only IP access type is supported for NFS. - Only user access is supported for CIFS. - Deny share access. - Create a snapshot. - Delete a snapshot. - Support pools in one backend. - Extend a share. - Shrink a share. - Create a replica. - Delete a replica. - Promote a replica. - Update a replica state. Pre-configurations on Huawei ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #. Create a driver configuration file. The driver configuration file name must be the same as the ``manila_huawei_conf_file`` item in the ``manila_conf`` configuration file. #. Configure the product. Product indicates the storage system type. For the OceanStor V3 series V300R002 storage systems, the driver configuration file is as follows: .. code-block:: xml V3 x.x.x.x https://x.x.x.x:8088/deviceManager/rest/ xxxxxxxxx xxxxxxxxx xxxxxxxxx xxxxxxxxx 3 60 The options are: - ``Product`` is a type of storage product. Set it to ``V3``. - ``LogicalPortIP`` is the IP address of the logical port. - ``RestURL`` is an access address of the REST interface. Multiple RestURLs can be configured in ````, separated by ";". The driver will automatically retry another ``RestURL`` if one fails to connect. - ``UserName`` is the user name of an administrator. - ``UserPassword`` is the password of an administrator. - ``Thin_StoragePool`` is the name of a thin storage pool to be used. - ``Thick_StoragePool`` is the name of a thick storage pool to be used. - ``WaitInterval`` is the interval time of querying the file system status. - ``Timeout`` is the timeout period for waiting command execution of a device to complete. Back end configuration ~~~~~~~~~~~~~~~~~~~~~~ Modify the ``manila.conf`` Shared File Systems service configuration file and add ``share_driver`` and ``manila_huawei_conf_file`` items. Here is an example for configuring a storage system: .. code-block:: ini share_driver = manila.share.drivers.huawei.huawei_nas.HuaweiNasDriver manila_huawei_conf_file = /etc/manila/manila_huawei_conf.xml driver_handles_share_servers = False Driver options ~~~~~~~~~~~~~~ The following table contains the configuration options specific to the share driver. .. include:: ../../tables/manila-huawei.inc ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/configuration/shared-file-systems/drivers/ibm-spectrumscale-driver.rst0000664000175000017500000001243200000000000033004 0ustar00zuulzuul00000000000000=============================== IBM Spectrum Scale share driver =============================== IBM Spectrum Scale is a flexible software-defined storage product that can be deployed as high-performance file storage or a cost optimized large-scale content repository. IBM Spectrum Scale, previously known as IBM General Parallel File System (GPFS), is designed to scale performance and capacity with no bottlenecks. IBM Spectrum Scale is a cluster file system that provides concurrent access to file systems from multiple nodes. The storage provided by these nodes can be direct attached, network attached, SAN attached, or a combination of these methods. Spectrum Scale provides many features beyond common data access, including data replication, policy based storage management, and space efficient file snapshot and clone operations. Supported shared filesystems and operations (NFS shares only) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The Spectrum Scale share driver supports NFS shares. The following operations are supported: - Create a share. - Delete a share. - Allow share access. - Only IP access type is supported. - Both RW & RO access level is supported. - Deny share access. - Create a share snapshot. - Delete a share snapshot. - Create a share from a snapshot. - Extend a share. - Manage a share. - Unmanage a share. Requirements ~~~~~~~~~~~~ Spectrum Scale must be installed and a cluster must be created that includes one or more storage nodes and protocol server nodes. The NFS server running on these nodes is used to export shares to storage consumers in OpenStack virtual machines or even to bare metal storage consumers in the OpenStack environment. A file system must also be created and mounted on these nodes before configuring the manila service to use Spectrum Scale storage. For more details, refer to `Spectrum Scale product documentation `_. Spectrum Scale supports two ways of exporting data through NFS with high availability. #. CES (which uses Ganesha NFS) * This is provided inherently by the protocol support in Spectrum Scale and is a recommended method for NFS access. #. CNFS (which uses kernel NFS) For more information on NFS support in Spectrum Scale, refer to `Protocol support in Spectrum Scale `_ and `NFS Support overview in Spectrum Scale `_. The following figure is an example of Spectrum Scale architecture with OpenStack services: .. figure:: ../../figures/openstack-spectrumscale-setup.JPG :width: 90% :align: center :alt: OpenStack with Spectrum Scale Setup Quotas should be enabled for the Spectrum Scale filesystem to be exported through NFS using Spectrum Scale share driver. Use the following command to enable quota for a filesystem: .. code-block:: console $ mmchfs -Q yes Limitation ~~~~~~~~~~ Spectrum Scale share driver currently supports creation of NFS shares in the flat network space only. For example, the Spectrum Scale storage node exporting the data should be in the same network as that of the Compute VMs which mount the shares acting as NFS clients. Driver configuration ~~~~~~~~~~~~~~~~~~~~ Spectrum Scale share driver supports creation of shares using both NFS servers (Ganesha using Spectrum Scale CES/Kernel NFS). For both the NFS server types, you need to set the ``share_driver`` in the ``manila.conf`` as: .. code-block:: ini share_driver = manila.share.drivers.ibm.gpfs.GPFSShareDriver Spectrum Scale CES (NFS Ganesha server) --------------------------------------- To use Spectrum Scale share driver in this mode, set the ``gpfs_share_helpers`` in the ``manila.conf`` as: .. code-block:: ini gpfs_share_helpers = CES=manila.share.drivers.ibm.gpfs.CESHelper Following table lists the additional configuration options which are used with this driver configuration. .. include:: ../../tables/manila-spectrumscale_ces.inc .. note:: Configuration options related to ssh are required only if ``is_gpfs_node`` is set to ``False``. Spectrum Scale Clustered NFS (Kernel NFS server) ------------------------------------------------ To use Spectrum Scale share driver in this mode, set the ``gpfs_share_helpers`` in the ``manila.conf`` as: .. code-block:: ini gpfs_share_helpers = KNFS=manila.share.drivers.ibm.gpfs.KNFSHelper Following table lists the additional configuration options which are used with this driver configuration. .. include:: ../../tables/manila-spectrumscale_knfs.inc .. note:: Configuration options related to ssh are required only if ``is_gpfs_node`` is set to ``False``. Share creation steps ~~~~~~~~~~~~~~~~~~~~ Sample configuration -------------------- .. code-block:: ini [gpfs] share_driver = manila.share.drivers.ibm.gpfs.GPFSShareDriver gpfs_share_export_ip = x.x.x.x gpfs_mount_point_base = /ibm/gpfs0 gpfs_nfs_server_type = CES is_gpfs_node = True gpfs_share_helpers = CES=manila.share.drivers.ibm.gpfs.CESHelper share_backend_name = GPFS driver_handles_share_servers = False Create GPFS share type and set extra spec ----------------------------------------- .. code-block:: console $ manila type-create --snapshot_support True \ --create_share_from_snapshot_support True gpfs False $ manila type-key gpfs set share_backend_name=GPFS ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/configuration/shared-file-systems/drivers/infinidat-share-driver.rst0000664000175000017500000001226500000000000032436 0ustar00zuulzuul00000000000000================================ INFINIDAT InfiniBox Share driver ================================ The INFINIDAT Share driver provides support for managing filesystem shares on the INFINIDAT InfiniBox storage systems. This section explains how to configure the INFINIDAT driver. Supported operations ~~~~~~~~~~~~~~~~~~~~ - Create and delete filesystem shares. - Ensure filesystem shares. - Extend a share. - Create and delete filesystem snapshots. - Create a share from a share snapshot. - Revert a share to its snapshot. - Mount a snapshot. - Set access rights to shares and snapshots. Note the following limitations: - Only IP access type is supported. - Both RW & RO access levels are supported. External package installation ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The driver requires the ``infinisdk`` package for communicating with InfiniBox systems. Install the package from PyPI using the following command: .. code-block:: console $ pip install infinisdk Setting up the storage array ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Create a storage pool object on the InfiniBox array in advance. The storage pool will contain shares managed by OpenStack. Refer to the InfiniBox manuals for details on pool management. Driver configuration ~~~~~~~~~~~~~~~~~~~~ Edit the ``manila.conf`` file, which is usually located under the following path ``/etc/manila/manila.conf``. * Add a section for the INFINIDAT driver back end. * Under the ``[DEFAULT]`` section, set the ``enabled_share_backends`` parameter with the name of the new back-end section. Configure the driver back-end section with the parameters below. * Configure the driver name by setting the following parameter: .. code-block:: ini share_driver = manila.share.drivers.infinidat.infinibox.InfiniboxShareDriver * Configure the management IP of the InfiniBox array by adding the following parameter: .. code-block:: ini infinibox_hostname = InfiniBox management IP * Configure SSL support for InfiniBox management API: We recommend enabling SSL support for InfiniBox management API. Refer to the InfiniBox manuals for details on security management. Configure SSL options by adding the following parameters: .. code-block:: ini infinidat_use_ssl = true/false infinidat_suppress_ssl_warnings = true/false These parameters defaults to ``false``. * Configure user credentials: The driver requires an InfiniBox user with administrative privileges. We recommend creating a dedicated OpenStack user account that holds a pool admin user role. Refer to the InfiniBox manuals for details on user account management. Configure the user credentials by adding the following parameters: .. code-block:: ini infinibox_login = Infinibox management login infinibox_password = Infinibox management password * Configure the name of the InfiniBox pool by adding the following parameter: .. code-block:: ini infinidat_pool_name = Pool as defined in the InfiniBox * Configure the name of the InfiniBox NAS network space by adding the following parameter: .. code-block:: ini infinidat_nas_network_space_name = Network space as defined in the InfiniBox * The back-end name is an identifier for the back end. We recommend using the same name as the name of the section. Configure the back-end name by adding the following parameter: .. code-block:: ini share_backend_name = back-end name * Thin provisioning: The INFINIDAT driver supports creating thin or thick provisioned filesystems. Configure thin or thick provisioning by adding the following parameter: .. code-block:: ini infinidat_thin_provision = true/false This parameter defaults to ``true``. * Controls access to the ``.snapshot`` directory: .. code-block:: ini infinidat_snapdir_accessible = true/false By default, each share allows access to its own ``.snapshot`` directory, which contains files and directories of each snapshot taken. To restrict access to the ``.snapshot`` directory on the client side, this option should be set to ``false``. This parameter defaults to ``true``. * Controls visibility of the ``.snapshot`` directory: .. code-block:: ini infinidat_snapdir_visible = true/false By default, each share contains the ``.snapshot`` directory, which is hidden on the client side. To make the ``.snapshot`` directory visible, this option should be set to ``true``. This parameter defaults to ``false``. Configuration example ~~~~~~~~~~~~~~~~~~~~~ .. code-block:: ini [DEFAULT] enabled_share_backends = infinidat-pool-a [infinidat-pool-a] share_driver = manila.share.drivers.infinidat.infinibox.InfiniboxShareDriver share_backend_name = infinidat-pool-a driver_handles_share_servers = false infinibox_hostname = 10.1.2.3 infinidat_use_ssl = true infinidat_suppress_ssl_warnings = true infinibox_login = openstackuser infinibox_password = openstackpass infinidat_pool_name = pool-a infinidat_nas_network_space_name = nas_space infinidat_thin_provision = true infinidat_snapdir_accessible = true infinidat_snapdir_visible = false Driver options ~~~~~~~~~~~~~~ Configuration options specific to this driver: .. include:: ../../tables/manila-infinidat.inc ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/configuration/shared-file-systems/drivers/infortrend-nas-driver.rst0000664000175000017500000000357100000000000032322 0ustar00zuulzuul00000000000000======================== Infortrend Manila driver ======================== The `Infortrend `__ Manila driver provides NFS and CIFS shared file systems to OpenStack. Requirements ~~~~~~~~~~~~ To use the Infortrend Manila driver, the following items are required: - GS/GSe Family firmware version v73.1.0-4 and later. - Configure at least one channel for shared file systems. Supported shared filesystems and operations ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This driver supports NFS and CIFS shares. The following operations are supported: - Create a share. - Delete a share. - Allow share access. Note the following limitations: - Only IP access type is supported for NFS. - Only user access type is supported for CIFS. - Deny share access. - Manage a share. - Unmanage a share. - Extend a share. - Shrink a share. Restrictions ~~~~~~~~~~~~ The Infortrend manila driver has the following restrictions: - Only IP access type is supported for NFS. - Only user access type is supported for CIFS. - Only file-level data service channel can offer the NAS service. Driver configuration ~~~~~~~~~~~~~~~~~~~~ On ``manila-share`` nodes, set the following in your ``/etc/manila/manila.conf``, and use the following options to configure it: Driver options -------------- .. include:: ../../tables/manila-infortrend.inc Back-end configuration example ------------------------------ .. code-block:: ini [DEFAULT] enabled_share_backends = ift-manila enabled_share_protocols = NFS, CIFS [ift-manila] share_backend_name = ift-manila share_driver = manila.share.drivers.infortrend.driver.InfortrendNASDriver driver_handles_share_servers = False infortrend_nas_ip = FAKE_IP infortrend_nas_user = FAKE_USER infortrend_nas_password = FAKE_PASS infortrend_share_pools = pool-1, pool-2 infortrend_share_channels = 0, 1 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/configuration/shared-file-systems/drivers/lvm-driver.rst0000664000175000017500000000464000000000000030165 0ustar00zuulzuul00000000000000================ LVM share driver ================ The Shared File Systems service can be configured to use LVM share driver. LVM share driver relies solely on LVM running on the same host with manila-share service. It does not require any services not related to the Shared File Systems service to be present to work. Prerequisites ~~~~~~~~~~~~~ The following packages must be installed on the same host with manila-share service: - NFS server - Samba server >= 3.2.0 - LVM2 >= 2.02.66 Services must be up and running, ports used by the services must not be blocked. A node with manila-share service should be accessible to share service users. LVM should be preconfigured. By default, LVM driver expects to find a volume group named ``lvm-shares``. This volume group will be used by the driver for share provisioning. It should be managed by node administrator separately. Shared File Systems service driver configuration setting ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ To use the driver, one should set up a corresponding back end. A driver must be explicitly specified as well as export IP address. A minimal back-end specification that will enable LVM share driver is presented below: .. code-block:: ini [LVM_sample_backend] driver_handles_share_servers = False share_driver = manila.share.drivers.lvm.LVMShareDriver lvm_share_export_ips = 1.2.3.4 In the example above, ``lvm_share_export_ips`` is the address to be used by clients for accessing shares. In the simplest case, it should be the same as host's address. The option allows configuring more than one IP address as a comma separated string. Supported shared file systems and operations ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The driver supports CIFS and NFS shares. The following operations are supported: - Create a share. - Delete a share. - Allow share access. Note the following limitations: - Only IP access type is supported for NFS. - Deny share access. - Create a snapshot. - Delete a snapshot. - Create a share from a snapshot. - Extend a share. Known restrictions ~~~~~~~~~~~~~~~~~~ - LVM driver should not be used on a host running Neutron agents, simultaneous usage might cause issues with share deletion (shares will not get deleted from volume groups). Driver options ~~~~~~~~~~~~~~ The following table contains the configuration options specific to this driver. .. include:: ../../tables/manila-lvm.inc ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/configuration/shared-file-systems/drivers/maprfs-native-driver.rst0000664000175000017500000000766600000000000032156 0ustar00zuulzuul00000000000000==================== MapRFS native driver ==================== MapR-FS native driver is a plug-in based on the Shared File Systems service and provides high-throughput access to the data on MapR-FS distributed file system, which is designed to hold very large amounts of data. A Shared File Systems service share in this driver is a volume in MapR-FS. Instances talk directly to the MapR-FS storage backend via the (mapr-posix) client. To mount a MapR-FS volume, the MapR POSIX client is required. Access to each share is allowed by user and group based access type, which is aligned with MapR-FS ACEs to support access control for multiple users and groups. If user name and group name are the same, the group access type will be used by default. For more details, see `MapR documentation `_. Network configuration ~~~~~~~~~~~~~~~~~~~~~ The storage backend and Shared File Systems service hosts should be in a flat network. Otherwise, the L3 connectivity between them should exist. Supported shared filesystems and operations ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The driver supports MapR-FS shares. The following operations are supported: - Create MapR-FS share. - Delete MapR-FS share. - Allow MapR-FS Share access. - Only support user and group access type. - Support level of access (ro/rw). - Deny MapR-FS Share access. - Update MapR-FS Share access. - Create snapshot. - Delete snapshot. - Create share from snapshot. - Extend share. - Shrink share. - Manage share. - Unmanage share. - Manage snapshot. - Unmanage snapshot. - Ensure share. Requirements ~~~~~~~~~~~~ - Install MapR core packages, version >= 5.2.x, on the storage backend. - To enable snapshots, the MapR cluster should have at least M5 license. - Establish network connection between the Shared File Systems service hosts and storage backend. - Obtain a `ticket `_ for user who will be used to access MapR-FS. Back end configuration (manila.conf) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Add MapR-FS protocol to ``enabled_share_protocols``: .. code-block:: ini enabled_share_protocols = MAPRFS Create a section for MapR-FS backend. Example: .. code-block:: ini [maprfs] driver_handles_share_servers = False share_driver = manila.share.drivers.maprfs.maprfs_native.MapRFSNativeShareDriver maprfs_clinode_ip = example maprfs_ssh_name = mapr maprfs_ssh_pw = mapr share_backend_name = maprfs Set ``driver-handles-share-servers`` to ``False`` as the driver does not manage the lifecycle of ``share-servers``. Add driver backend to ``enabled_share_backends``: .. code-block:: ini enabled_share_backends = maprfs Driver options ~~~~~~~~~~~~~~ The following table contains the configuration options specific to this driver. .. include:: ../../tables/manila-maprfs.inc Known restrictions ~~~~~~~~~~~~~~~~~~ This driver does not handle user authentication, no tickets or users are created by this driver. This means that when 'access_allow' or 'update_access' is calling, this will have no effect without providing tickets to users. Share metadata ~~~~~~~~~~~~~~ MapR-FS shares can be created by specifying additional options. Metadata is used for this purpose. Every metadata option with ``-`` prefix is passed to MapR-FS volume. For example, to specify advisory volume quota add ``_advisoryquota=10G`` option to metadata: .. code-block:: console $ manila create MAPRFS 1 --metadata _advisoryquota=10G If you need to create a share with your custom backend name or export location instead if uuid, you can specify ``_name`` and ``_path`` options: .. code-block:: console $ manila create MAPRFS 1 --metadata _name=example _path=/example .. WARNING:: Specifying invalid options will cause an error. The list of allowed options depends on mapr-core version. See `volume create `_ for more information. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/configuration/shared-file-systems/drivers/netapp-cluster-mode-driver.rst0000664000175000017500000000371500000000000033261 0ustar00zuulzuul00000000000000================================== NetApp Clustered Data ONTAP driver ================================== The Shared File Systems service can be configured to use NetApp clustered Data ONTAP version 8. Network approach ~~~~~~~~~~~~~~~~ L3 connectivity between the storage cluster and Shared File Systems service host should exist, and VLAN segmentation should be configured. The clustered Data ONTAP driver creates storage virtual machines (SVM, previously known as vServers) as representations of the Shared File Systems service share server interface, configures logical interfaces (LIFs) and stores shares there. Supported shared filesystems and operations ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The driver supports CIFS and NFS shares. The following operations are supported: - Create a share. - Delete a share. - Allow share access. Note the following limitations: - Only IP access type is supported for NFS. - Only user access type is supported for CIFS. - Deny share access. - Create a snapshot. - Delete a snapshot. - Create a share from a snapshot. - Extend a share. - Shrink a share. - Create a consistency group. - Delete a consistency group. - Create a consistency group snapshot. - Delete a consistency group snapshot. Required licenses ~~~~~~~~~~~~~~~~~ - NFS - CIFS - FlexClone Known restrictions ~~~~~~~~~~~~~~~~~~ - For CIFS shares an external active directory service is required. Its data should be provided via security-service that is attached to used share-network. - Share access rule by user for CIFS shares can be created only for existing user in active directory. - To be able to configure clients to security services, the time on these external security services and storage should be synchronized. The maximum allowed clock skew is 5 minutes. Driver options ~~~~~~~~~~~~~~ The following table contains the configuration options specific to the share driver. .. include:: ../../tables/manila-netapp.inc ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/configuration/shared-file-systems/drivers/nexentastor5-driver.rst0000664000175000017500000000413000000000000032020 0ustar00zuulzuul00000000000000=================== NexentaStor5 Driver =================== Nexentastor5 can be used as a storage back end for the OpenStack Shared File System service. Shares in the Shared File System service are mapped 1:1 to Nexentastor5 filesystems. Access is provided via NFS protocol and IP-based authentication. Network approach ~~~~~~~~~~~~~~~~ L3 connectivity between the storage back end and the host running the Shared File Systems share service should exist. Supported shared filesystems and operations ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The drivers supports NFS shares. The following operations are supported: - Create NFS share - Delete share - Extend share - Shrink share - Allow share access Note the following limitation: * Only IP based access is supported (ro/rw). - Deny share access - Create snapshot - Revert to snapshot - Delete snapshot - Create share from snapshot - Manage share - Unmanage share Requirements ~~~~~~~~~~~~ - NexentaStor 5.x Appliance pre-provisioned and licensed - Pool and parent filesystem configured (this filesystem will contain all manila shares) Restrictions ~~~~~~~~~~~~ - Only IP share access control is allowed for NFS shares. Configuration ~~~~~~~~~~~~~~ .. code-block:: ini enabled_share_backends = NexentaStor5 Create the new back end configuration section, in this case named ``NexentaStor5``: .. code-block:: ini [NexentaStor5] share_backend_name = NexentaStor5 driver_handles_share_servers = False nexenta_folder = manila share_driver = manila.share.drivers.nexenta.ns5.nexenta_nas.NexentaNasDriver nexenta_rest_addresses = 10.3.1.1,10.3.1.2 nexenta_nas_host = 10.3.1.10 nexenta_rest_port = 8443 nexenta_pool = pool1 nexenta_nfs = True nexenta_user = admin nexenta_password = secret_password nexenta_thin_provisioning = True More information can be found at the `Nexenta documentation webpage `. Driver options ~~~~~~~~~~~~~~ The following table contains the configuration options specific to the share driver. .. include:: ../../tables/manila-nexentastor5.inc ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/configuration/shared-file-systems/drivers/purestorage-flashblade-driver.rst0000664000175000017500000000722200000000000034011 0ustar00zuulzuul00000000000000============================== Pure Storage FlashBlade driver ============================== The Pure Storage FlashBlade driver provides support for managing filesystem shares on the Pure Storage FlashBlade storage systems. The driver is compatible with Pure Storage FlashBlades that support REST API version 1.6 or higher (Purity//FB v2.3.0 or higher). This section explains how to configure the FlashBlade driver. Supported operations ~~~~~~~~~~~~~~~~~~~~ - Create and delete NFS shares. - Extend/Shrink a share. - Create and delete filesystem snapshots (No support for create-from or mount). - Revert to Snapshot. - Both RW and RO access levels are supported. - Set access rights to NFS shares. Note the following limitations: - Only IP (for NFS shares) access types are supported. External package installation ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The driver requires the ``purity_fb`` package for communicating with FlashBlade systems. Install the package from PyPI using the following command: .. code-block:: console $ pip install purity_fb Driver configuration ~~~~~~~~~~~~~~~~~~~~ Edit the ``manila.conf`` file, which is usually located under the following path ``/etc/manila/manila.conf``. * Add a section for the FlashBlade driver back end. * Under the ``[DEFAULT]`` section, set the ``enabled_share_backends`` parameter with the name of the new back-end section. Configure the driver back-end section with the parameters below. * Configure the driver name by setting the following parameter: .. code-block:: ini share_driver = manila.share.drivers.purestorage.flashblade.FlashBladeShareDriver * Configure the management and data VIPs of the FlashBlade array by adding the following parameters: .. code-block:: ini flashblade_mgmt_vip = FlashBlade management VIP flashblade_data_vip = FlashBlade data VIP(s) * Configure user credentials: The driver requires a FlashBlade user with administrative privileges. We recommend creating a dedicated OpenStack user account that holds an administrative user role. Refer to the FlashBlade manuals for details on user account management. Configure the user credentials by adding the following parameters: .. code-block:: ini flashblade_api = FlashBlade API token for admin-privileged user * (Optional) Configure File System and Snapshot Eradication: The option, when enabled, all FlashBlade file systems and snapshots will be eradicated at the time of deletion in Manila. Data will NOT be recoverable after a delete with this set to True! When disabled, file systems and snapshots will go into pending eradication state and can be recovered. Recovery of these pending eradication snapshots cannot be accomplished through Manila. These snapshots will self-eradicate after 24 hours unless manually restored. The default setting is True. .. code-block:: ini flashblade_eradicate = { True | False } * The back-end name is an identifier for the back end. We recommend using the same name as the name of the section. Configure the back-end name by adding the following parameter: .. code-block:: ini share_backend_name = back-end name Configuration example ~~~~~~~~~~~~~~~~~~~~~ .. code-block:: ini [DEFAULT] enabled_share_backends = flashblade-1 [flashblade-1] share_driver = manila.share.drivers.purestorage.flashblade.FlashBladeShareDriver share_backend_name = flashblade-1 driver_handles_share_servers = false flashblade_mgmt_vip = 10.1.2.3 flashblade_data_vip = 10.1.2.4 flashblade_api = pureuser API Driver options ~~~~~~~~~~~~~~ Configuration options specific to this driver: .. include:: ../../tables/manila-purestorage-flashblade.inc ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/configuration/shared-file-systems/drivers/quobyte-driver.rst0000664000175000017500000000412500000000000031055 0ustar00zuulzuul00000000000000============== Quobyte Driver ============== Quobyte can be used as a storage back end for the OpenStack Shared File System service. Shares in the Shared File System service are mapped 1:1 to Quobyte volumes. Access is provided via NFS protocol and IP-based authentication. The Quobyte driver uses the Quobyte API service. Supported shared filesystems and operations ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The drivers supports NFS shares. The following operations are supported: - Create a share. - Delete a share. - Allow share access. Note the following limitations: - Only IP access type is supported. - Deny share access. Driver options ~~~~~~~~~~~~~~ The following table contains the configuration options specific to the share driver. .. include:: ../../tables/manila-quobyte.inc Configuration ~~~~~~~~~~~~~~ To configure Quobyte access for the Shared File System service, a back end configuration section has to be added in the ``manila.conf`` file. Add the name of the configuration section to ``enabled_share_backends`` in the ``manila.conf`` file. For example, if the section is named ``Quobyte``: .. code-block:: ini enabled_share_backends = Quobyte Create the new back end configuration section, in this case named ``Quobyte``: .. code-block:: ini [Quobyte] share_driver = manila.share.drivers.quobyte.quobyte.QuobyteShareDriver share_backend_name = QUOBYTE quobyte_api_url = http://api.myserver.com:1234/ quobyte_delete_shares = False quobyte_volume_configuration = BASE quobyte_default_volume_user = myuser quobyte_default_volume_group = mygroup The section name must match the name used in the ``enabled_share_backends`` option described above. The ``share_driver`` setting is required as shown, the other options should be set according to your local Quobyte setup. Other security-related options are: .. code-block:: ini quobyte_api_ca = /path/to/API/server/verification/certificate quobyte_api_username = api_user quobyte_api_password = api_user_pwd Quobyte support can be found at the `Quobyte support webpage `_. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/configuration/shared-file-systems/drivers/vastdata_driver.rst0000664000175000017500000000672500000000000031266 0ustar00zuulzuul00000000000000==================================== Vastdata Share Driver ==================================== VAST Share Driver integrates OpenStack with `VAST Data `__'s Storage System. Shares in the Shared File System service are mapped to directories on VAST, and are accessed via NFS protocol using a Virtual IP Pool. Supported shared filesystems ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The driver supports NFS shares. Operations supported ~~~~~~~~~~~~~~~~~~~~ The driver supports NFS shares. The following operations are supported: - Create a share. - Delete a share. - Allow share access. - Deny share access. - Extend a share. - Shrink a share. Requirements ~~~~~~~~~~~~ - The Trash Folder Access functionality must be enabled on the VAST cluster. Driver options ~~~~~~~~~~~~~~ The following table contains the configuration options specific to the share driver. .. include:: ../../tables/manila-vastdata.inc VAST Share Driver configuration example ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The following example shows parameters in the ``manila.conf`` file that are used to configure VAST Share Driver. They include two options under ``[DEFAULT]`` and parameters under ``[vast]``. Note that a real ``manila.conf`` file would also include other parameters that are not specific to VAST Share Driver. .. code-block:: ini [DEFAULT] enabled_share_backends = vast enabled_share_protocols = NFS [vast] share_driver = manila.share.drivers.vastdata.driver.VASTShareDriver share_backend_name = vast driver_handles_share_servers = False snapshot_support = True vast_mgmt_host = {vms_ip} vast_mgmt_port = {vms_port} vast_mgmt_user = {mgmt_user} vast_mgmt_password = {mgmt_password} vast_api_token = {vast_api_token} vast_vippool_name = {vip_pool} vast_root_export = {root_export} Restart of the ``manila-share`` service is needed for the configuration changes to take effect. Pre-configurations for share support -------------------------------------------------- To create a file share, you need to: Create the share type: .. code-block:: console openstack share type create ${share_type_name} False \ --extra-specs share_backend_name=${share_backend_name} Create an NFS share: .. code-block:: console openstack share create NFS ${size} --name ${share_name} --share-type ${share_type_name} Pre-Configurations for Snapshot support -------------------------------------------------- The share type must have the following parameter specified: - snapshot_support = True You can specify it when creating a new share type: .. code-block:: console openstack share type create ${share_type_name} false \ --snapshot-support=true \ --extra-specs share_backend_name=${share_backend_name} Or you can add it to an existing share type: .. code-block:: console openstack share type set ${share_type_name} --extra-specs snapshot_support=True To snapshot a share and create share from the snapshot ------------------------------------------------------ Create a share using a share type with snapshot_support=True. Then, create a snapshot of the share using the command: .. code-block:: console openstack share snapshot create ${source_share_name} --name ${target_snapshot_name} The :mod:`manila.share.drivers.vastdata.driver` Module ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: manila.share.drivers.vastdata.driver :noindex: :members: :undoc-members: :show-inheritance: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/configuration/shared-file-systems/drivers/windows-smb-driver.rst0000664000175000017500000000531100000000000031634 0ustar00zuulzuul00000000000000.. _windows_smb_driver: Windows SMB driver ================== While the generic driver only supports Linux instances, you may use the Windows SMB driver when Windows VMs are preferred. This driver extends the generic one in order to provide Windows instance support. It can integrate with Active Directory domains through the Manila security service feature, which can ease access control. Although Samba is a great SMB share server, Windows instances may provide improved SMB 3 support. Limitations ----------- - ip access rules are not supported at the moment, only user based ACLs may be used - SMB (also known as CIFS) is the only supported share protocol - although it can handle Windows VMs, Manila cannot run on Windows at the moment. The VMs on the other hand may very well run on Hyper-V, KVM or any other hypervisor supported by Nova. Prerequisites ------------- This driver requires a Windows Server image having cloudbase-init installed. Cloudbase-init is the de-facto standard tool for initializing Windows VMs running on OpenStack. The driver relies on it to do tasks such as: - configuring WinRM access using password or certificate based authentication - network configuration - setting the host name .. note:: This driver was initially developed with Windows Nano Server in mind. Unfortunately, Microsoft no longer supports running Nano Servers on bare metal or virtual machines, for which reason you may want to use Windows Server Core images. Configuring ----------- Below is a config sample that enables the Windows SMB driver. .. code-block:: ini [DEFAULT] manila_service_keypair_name = manila-service enabled_share_backends = windows_smb enabled_share_protocols = CIFS [windows_smb] service_net_name_or_ip = private tenant_net_name_or_ip = private share_mount_path = C:/shares # The driver can either create share servers by itself # or use existing ones. driver_handles_share_servers = True service_instance_user = Admin service_image_name = ws2016 # nova get-password may be used to retrieve passwords generated # by cloudbase-init and encrypted with the public key. path_to_private_key = /etc/manila/ssh/id_rsa path_to_public_key = /etc/manila/ssh/id_rsa.pub winrm_cert_pem_path = /etc/manila/ssl/winrm_client_cert.pem winrm_cert_key_pem_path = /etc/manila/ssl/winrm_client_cert.key # If really needed, you can use password based authentication as well. winrm_use_cert_based_auth = True winrm_conn_timeout = 40 max_time_to_build_instance = 900 share_backend_name = windows_smb share_driver = manila.share.drivers.windows.windows_smb_driver.WindowsSMBDriver service_instance_flavor_id = 100 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/configuration/shared-file-systems/drivers/zfs-on-linux-driver.rst0000664000175000017500000001235600000000000031743 0ustar00zuulzuul00000000000000===================== ZFS (on Linux) driver ===================== Manila ZFSonLinux share driver uses ZFS file system for exporting NFS shares. Written and tested using Linux version of ZFS. Requirements ~~~~~~~~~~~~ - NFS daemon that can be handled through ``exportfs`` app. - ZFS file system packages, either Kernel or FUSE versions. - ZFS zpools that are going to be used by Manila should exist and be configured as desired. Manila will not change zpool configuration. - For remote ZFS hosts according to manila-share service host SSH should be installed. - For ZFS hosts that support replication: - SSH access for each other should be passwordless. - Service IP addresses should be available by ZFS hosts for each other. Supported shared filesystems and operations ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The driver supports NFS shares. The following operations are supported: - Create a share. - Delete a share. - Allow share access. - Only IP access type is supported. - Both access levels are supported - ``RW`` and ``RO``. - Deny share access. - Bring an existing ZFSOnLinux share under the shared file system service (Managing a share) - Remove a ZFSOnLinux share from the shared file system service without deleting it (Unmanaging a share) - Create a snapshot. - Delete a snapshot. - Bring an existing ZFSOnLinux snapshot under the shared file system service (Managing a snapshot) - Remove a ZFSOnLinux snapshot from the shared file system service without deleting it (Unmanaging a snapshot) - Create a share from snapshot. - Extend a share. - Shrink a share. - Share replication (experimental): - Create, update, delete, and promote replica operations are supported. Possibilities ~~~~~~~~~~~~~ - Any amount of ZFS zpools can be used by share driver. - Allowed to configure default options for ZFS datasets that are used for share creation. - Any amount of nested datasets is allowed to be used. - All share replicas are read-only, only active one is read-write. - All share replicas are synchronized periodically, not continuously. Status ``in_sync`` means latest sync was successful. Time range between syncs equals to the value of the ``replica_state_update_interval`` configuration global option. - Driver can use qualified extra spec ``zfsonlinux:compression``. It can contain any value that ZFS app supports. But if it is disabled through the configuration option with the value ``compression=off``, then it will not be used. Restrictions ~~~~~~~~~~~~ The ZFSonLinux share driver has the following restrictions: - Only IP access type is supported for NFS. - Only FLAT network is supported. - ``Promote share replica`` operation will switch roles of current ``secondary`` replica and ``active``. It does not make more than one active replica available. - The below items are not yet implemented: - ``SaMBa`` based sharing. - ``Thick provisioning`` capability. Known problems ~~~~~~~~~~~~~~ - ``Promote share replica`` operation will make ZFS file system that became secondary as RO only on NFS level. On ZFS level system will stay mounted as was - RW. Back-end configuration ~~~~~~~~~~~~~~~~~~~~~~ The following parameters need to be configured in the manila configuration file for back-ends that use the ZFSonLinux driver: - ``share_driver`` = manila.share.drivers.zfsonlinux.driver.ZFSonLinuxShareDriver - ``driver_handles_share_servers`` = False - ``replication_domain`` = custom_str_value_as_domain_name - If empty, then replication will be disabled. - If set, then will be able to be used as replication peer for other back ends with the same value. - ``zfs_share_export_ip`` = - ``zfs_service_ip`` = - ``zfs_zpool_list`` = zpoolname1,zpoolname2/nested_dataset_for_zpool2 - Can be one or more zpools. - Can contain nested datasets. - ``zfs_dataset_creation_options`` = - readonly, quota, sharenfs and sharesmb options will be ignored. - ``zfs_dataset_name_prefix`` = - Prefix to be used in each dataset name. - ``zfs_dataset_snapshot_name_prefix`` = - Prefix to be used in each dataset snapshot name. - ``zfs_use_ssh`` = - Set ``False`` if ZFS located on the same host as `manila-share` service. - Set ``True`` if `manila-share` service should use SSH for ZFS configuration. - ``zfs_ssh_username`` = - Required for replication operations. - Required for SSH``ing to ZFS host if ``zfs_use_ssh`` is set to ``True``. - ``zfs_ssh_user_password`` = - Password for ``zfs_ssh_username`` of ZFS host. - Used only if ``zfs_use_ssh`` is set to ``True``. - ``zfs_ssh_private_key_path`` = - Used only if ``zfs_use_ssh`` is set to ``True``. - ``zfs_share_helpers`` = NFS=manila.share.drivers.zfsonlinux.utils.NFSviaZFSHelper - Approach for setting up helpers is similar to various other share drivers. - At least one helper should be used. - ``zfs_replica_snapshot_prefix`` = - Prefix to be used in dataset snapshot names that are created by ``update replica`` operation. Driver options ~~~~~~~~~~~~~~ .. include:: ../../tables/manila-zfs.inc ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/configuration/shared-file-systems/drivers/zfssa-manila-driver.rst0000664000175000017500000001113600000000000031752 0ustar00zuulzuul00000000000000=================================== Oracle ZFS Storage Appliance driver =================================== The Oracle ZFS Storage Appliance driver, version 1.0.0, enables the Oracle ZFS Storage Appliance (ZFSSA) to be used seamlessly as a shared storage resource for the OpenStack File System service (manila). The driver provides the ability to create and manage NFS and CIFS shares on the appliance, allowing virtual machines to access the shares simultaneously and securely. Requirements ~~~~~~~~~~~~ Oracle ZFS Storage Appliance Software version 2013.1.2.0 or later. Supported operations ~~~~~~~~~~~~~~~~~~~~ - Create NFS and CIFS shares. - Delete NFS and CIFS shares. - Allow or deny IP access to NFS shares. - Create snapshots of a share. - Delete snapshots of a share. - Create share from snapshot. Restrictions ~~~~~~~~~~~~ - Access to CIFS shares are open and cannot be changed from manila. - Version 1.0.0 of the driver only supports Single SVM networking mode. Appliance configuration ~~~~~~~~~~~~~~~~~~~~~~~ #. Enable RESTful service on the ZFSSA Storage Appliance. #. Create a new user on the appliance with the following authorizations:: scope=stmf - allow_configure=true scope=nas - allow_clone=true, allow_createProject=true, allow_createShare=true, allow_changeSpaceProps=true, allow_changeGeneralProps=true, allow_destroy=true, allow_rollback=true, allow_takeSnap=true You can create a role with authorizations as follows:: zfssa:> configuration roles zfssa:configuration roles> role OpenStackRole zfssa:configuration roles OpenStackRole (uncommitted)> set description="OpenStack Manila Driver" zfssa:configuration roles OpenStackRole (uncommitted)> commit zfssa:configuration roles> select OpenStackRole zfssa:configuration roles OpenStackRole> authorizations create zfssa:configuration roles OpenStackRole auth (uncommitted)> set scope=stmf zfssa:configuration roles OpenStackRole auth (uncommitted)> set allow_configure=true zfssa:configuration roles OpenStackRole auth (uncommitted)> commit You can create a user with a specific role as follows:: zfssa:> configuration users zfssa:configuration users> user cinder zfssa:configuration users cinder (uncommitted)> set fullname="OpenStack Manila Driver" zfssa:configuration users cinder (uncommitted)> set initial_password=12345 zfssa:configuration users cinder (uncommitted)> commit zfssa:configuration users> select cinder set roles=OpenStackRole #. Create a storage pool. An existing pool can also be used if required. You can create a pool as follows:: zfssa:> configuration storage zfssa:configuration storage> config pool zfssa:configuration storage verify> set data=2 zfssa:configuration storage verify> done zfssa:configuration storage config> done #. Create a new project. You can create a project as follows:: zfssa:> shares zfssa:shares> project proj zfssa:shares proj (uncommitted)> commit #. Create a new or use an existing data IP address. You can create an interface as follows:: zfssa:> configuration net interfaces ip zfssa:configuration net interfaces ip (uncommitted)> set v4addrs=127.0.0.1/24 v4addrs = 127.0.0.1/24 (uncommitted) zfssa:configuration net interfaces ip (uncommitted)> set links=vnic1 links = vnic1 (uncommitted) zfssa:configuration net interfaces ip (uncommitted)> set admin=false admin = false (uncommitted) zfssa:configuration net interfaces ip (uncommitted)> commit It is required that both interfaces used for data and management are configured properly. The data interface must be different from the management interface. #. Configure the cluster. If a cluster is used as the manila storage resource, the following verifications are required: - Verify that both the newly created pool and the network interface are of type singleton and are not locked to the current controller. This approach ensures that the pool and the interface used for data always belong to the active controller, regardless of the current state of the cluster. - Verify that the management IP, data IP and storage pool belong to the same head. .. note:: A short service interruption occurs during failback or takeover, but once the process is complete, manila should be able to access the pool through the data IP. Driver options ~~~~~~~~~~~~~~ The Oracle ZFSSA driver supports these options: .. include:: ../../tables/manila-zfssa.inc ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/configuration/shared-file-systems/drivers.rst0000664000175000017500000000450500000000000026076 0ustar00zuulzuul00000000000000.. _share_drivers: ============= Share drivers ============= .. sort by the drivers by open source software .. and the drivers for proprietary components .. toctree:: :maxdepth: 1 drivers/generic-driver.rst drivers/cephfs_driver.rst drivers/dell-emc-powerflex-driver.rst drivers/dell-emc-powermax-driver.rst drivers/dell-emc-unity-driver.rst drivers/dell-emc-vnx-driver.rst drivers/dell-emc-powerstore-driver.rst drivers/glusterfs-driver.rst drivers/glusterfs-native-driver.rst drivers/hdfs-native-driver.rst drivers/lvm-driver.rst drivers/zfs-on-linux-driver.rst drivers/zfssa-manila-driver.rst drivers/dell-emc-powerscale-driver.rst drivers/hitachi-hnas-driver.rst drivers/hitachi-hsp-driver.rst drivers/hpe-3par-share-driver.rst drivers/huawei-nas-driver.rst drivers/ibm-spectrumscale-driver.rst drivers/infinidat-share-driver.rst drivers/infortrend-nas-driver.rst drivers/maprfs-native-driver.rst drivers/netapp-cluster-mode-driver.rst drivers/quobyte-driver.rst drivers/windows-smb-driver.rst drivers/nexentastor5-driver.rst drivers/purestorage-flashblade-driver.rst drivers/vastdata_driver.rst To use different share drivers for the Shared File Systems service, use the parameters described in these sections. The Shared File Systems service can handle multiple drivers at once. The configuration for all of them follows a common paradigm: #. In the configuration file ``manila.conf``, configure the option ``enabled_backends`` with the list of names for your configuration. For example, if you want to enable two drivers and name them ``Driver1`` and ``Driver2``: .. code-block:: ini [Default] # ... enabled_backends = Driver1 Driver2 #. Configure a separate section for each driver using these names. You need to define in each section at least the option ``share_driver`` and assign it the value of your driver. In this example it is the generic driver: .. code-block:: ini [Driver1] share_driver = manila.share.drivers.generic.GenericShareDriver # ... [Driver2] share_driver = manila.share.drivers.generic.GenericShareDriver # ... The share drivers are included in the `Shared File Systems repository `_. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/configuration/shared-file-systems/log-files.rst0000664000175000017500000000157000000000000026300 0ustar00zuulzuul00000000000000===================================== Log files used by Shared File Systems ===================================== The corresponding log file of each Shared File Systems service is stored in the ``/var/log/manila/`` directory of the host on which each service runs. .. list-table:: Log files used by Shared File Systems services :header-rows: 1 * - Log file - Service/interface (for CentOS, Fedora and Red Hat Enterprise Linux) - Service/interface (for Ubuntu and Debian) * - ``api.log`` - ``openstack-manila-api`` - ``manila-api`` * - ``manila-manage.log`` - ``manila-manage`` - ``manila-manage`` * - ``scheduler.log`` - ``openstack-manila-scheduler`` - ``manila-scheduler`` * - ``share.log`` - ``openstack-manila-share`` - ``manila-share`` * - ``data.log`` - ``openstack-manila-data`` - ``manila-data`` ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/configuration/shared-file-systems/overview.rst0000664000175000017500000001051000000000000026257 0ustar00zuulzuul00000000000000=============================================== Introduction to the Shared File Systems service =============================================== The Shared File Systems service provides shared file systems that Compute instances can consume. The overall Shared File Systems service is implemented via the following specific services: manila-api A WSGI app that authenticates and routes requests throughout the Shared File Systems service. It supports the OpenStack APIs. manila-data A standalone service whose purpose is to receive requests, process data operations with potentially long running time such as copying, share migration or backup. manila-scheduler Schedules and routes requests to the appropriate share service. The scheduler uses configurable filters and weighers to route requests. The Filter Scheduler is the default and enables filters on things like Capacity, Availability Zone, Share Types, and Capabilities as well as custom filters. manila-share Manages back-end devices that provide shared file systems. A manila-share service can run in one of two modes, with or without handling of share servers. Share servers export file shares via share networks. When share servers are not used, the networking requirements are handled outside of Manila. The Shared File Systems service contains the following components: **Back-end storage devices** The Shared File Services service requires some form of back-end shared file system provider that the service is built on. The reference implementation uses the Block Storage service (Cinder) and a service VM to provide shares. Additional drivers are used to access shared file systems from a variety of vendor solutions. **Users and tenants (projects)** The Shared File Systems service can be used by many different cloud computing consumers or customers (tenants on a shared system), using role-based access assignments. Roles control the actions that a user is allowed to perform. In the default configuration, most actions do not require a particular role unless they are restricted to administrators, but this can be configured by the system administrator in the appropriate ``policy.yaml`` file that maintains the rules. A user's access to manage particular shares is limited by tenant. Guest access to mount and use shares is secured by IP and/or user access rules. Quotas used to control resource consumption across available hardware resources are per tenant. For tenants, quota controls are available to limit: - The number of shares that can be created. - The number of gigabytes that can be provisioned for shares. - The number of share snapshots that can be created. - The number of gigabytes that can be provisioned for share snapshots. - The number of share networks that can be created. - The number of share groups that can be created. - The number of share group snapshots that can be created. - The number of share replicas that can be created. - The number of gigabytes that can be provisioned for share replicas. - The number of gigabytes that can be provisioned for each share. You can revise the default quota values with the Shared File Systems CLI, so the limits placed by quotas are editable by admin users. **Shares, snapshots, and share networks** The basic resources offered by the Shared File Systems service are shares, snapshots and share networks: **Shares** A share is a unit of storage with a protocol, a size, and an access list. Shares are the basic primitive provided by Manila. All shares exist on a backend. Some shares are associated with share networks and share servers. The main protocols supported are NFS and CIFS, but other protocols are supported as well. **Snapshots** A snapshot is a point in time copy of a share. Snapshots can only be used to create new shares (containing the snapshotted data). Shares cannot be deleted until all associated snapshots are deleted. **Share networks** A share network is a tenant-defined object that informs Manila about the security and network configuration for a group of shares. Share networks are only relevant for backends that manage share servers. A share network contains a security service and network/subnet. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315601.7856734 manila-21.0.0/doc/source/configuration/shared-file-systems/samples/0000775000175000017500000000000000000000000025326 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/configuration/shared-file-systems/samples/api-paste.ini.rst0000664000175000017500000000033700000000000030524 0ustar00zuulzuul00000000000000============= api-paste.ini ============= The shared file systems service stores its API configuration settings in the ``api-paste.ini`` file. .. literalinclude:: ../../../../../etc/manila/api-paste.ini :language: ini ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/configuration/shared-file-systems/samples/index.rst0000664000175000017500000000054400000000000027172 0ustar00zuulzuul00000000000000====================================================== Shared File Systems service sample configuration files ====================================================== All the files in this section can be found in ``/etc/manila``. .. toctree:: :maxdepth: 1 manila.conf.rst api-paste.ini.rst rootwrap.conf.rst policy.rst sample_policy.rst ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/configuration/shared-file-systems/samples/manila.conf.rst0000664000175000017500000000103400000000000030243 0ustar00zuulzuul00000000000000=========== manila.conf =========== The ``manila.conf`` file is installed in ``/etc/manila`` by default. When you manually install the Shared File Systems service, the options in the ``manila.conf`` file are set to default values. The ``manila.conf`` file contains most of the options needed to configure the Shared File Systems service. .. only:: html .. literalinclude:: ../../../_static/manila.conf.sample :language: ini .. only:: latex See the online version of this documentation for the full config file example. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/configuration/shared-file-systems/samples/policy.rst0000664000175000017500000000127700000000000027366 0ustar00zuulzuul00000000000000==================== Policy configuration ==================== .. warning:: JSON formatted policy file is deprecated since Manila 12.0.0 (Wallaby). This `oslopolicy-convert-json-to-yaml`__ tool will migrate your existing JSON-formatted policy file to YAML in a backward-compatible way. .. __: https://docs.openstack.org/oslo.policy/latest/cli/oslopolicy-convert-json-to-yaml.html Configuration ~~~~~~~~~~~~~ .. only:: html The following is an overview of all available policies in Manila. .. show-policy:: :config-file: etc/manila/manila-policy-generator.conf .. only:: latex See the online version of this documentation for the list of available policies in Manila. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/configuration/shared-file-systems/samples/rootwrap.conf.rst0000664000175000017500000000045700000000000030667 0ustar00zuulzuul00000000000000============= rootwrap.conf ============= The ``rootwrap.conf`` file defines configuration values used by the ``rootwrap`` script when the Shared File Systems service must escalate its privileges to those of the root user. .. literalinclude:: ../../../../../etc/manila/rootwrap.conf :language: ini ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/configuration/shared-file-systems/samples/sample_policy.rst0000664000175000017500000000231300000000000030717 0ustar00zuulzuul00000000000000==================== Manila Sample Policy ==================== .. warning:: JSON formatted policy file is deprecated since Manila 12.0.0 (Wallaby). This `oslopolicy-convert-json-to-yaml`__ tool will migrate your existing JSON-formatted policy file to YAML in a backward-compatible way. .. __: https://docs.openstack.org/oslo.policy/latest/cli/oslopolicy-convert-json-to-yaml.html The following is a sample Manila policy file that has been auto-generated from default policy values in code. If you're using the default policies, then the maintenance of this file is not necessary. It is here to help explain which policy operations protect specific Manila API, but it is not suggested to copy and paste into a deployment unless you're planning on providing a different policy for an operation that is not the default. For instance, if you want to change the default value of "share:create", you only need to keep this single rule in your policy config file (**/etc/manila/policy.yaml**). .. only:: html .. literalinclude:: ../../../_static/manila.policy.yaml.sample :language: ini .. only:: latex See the online version of this documentation for the sample file (``manila.policy.yaml.sample``). ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315601.7936733 manila-21.0.0/doc/source/configuration/tables/0000775000175000017500000000000000000000000021244 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/configuration/tables/manila-api.inc0000664000175000017500000000635700000000000023762 0ustar00zuulzuul00000000000000.. _manila-api: .. list-table:: Description of API configuration options :header-rows: 1 :class: config-ref-table * - Configuration option = Default value - Description * - **[DEFAULT]** - * - ``admin_network_config_group`` = ``None`` - (String) If share driver requires to setup admin network for share, then define network plugin config options in some separate config group and set its name here. Used only with another option 'driver_handles_share_servers' set to 'True'. * - ``admin_network_id`` = ``None`` - (String) ID of neutron network used to communicate with admin network, to create additional admin export locations on. * - ``admin_subnet_id`` = ``None`` - (String) ID of neutron subnet used to communicate with admin network, to create additional admin export locations on. Related to 'admin_network_id'. * - ``api_paste_config`` = ``api-paste.ini`` - (String) File name for the paste.deploy config for manila-api. * - ``api_rate_limit`` = ``True`` - (Boolean) Whether to rate limit the API. * - ``db_backend`` = ``sqlalchemy`` - (String) The backend to use for database. * - ``max_header_line`` = ``16384`` - (Integer) Maximum line size of message headers to be accepted. Option max_header_line may need to be increased when using large tokens (typically those generated by the Keystone v3 API with big service catalogs). * - ``osapi_max_limit`` = ``1000`` - (Integer) The maximum number of items returned in a single response from a collection resource. * - ``osapi_share_base_URL`` = ``None`` - (String) Base URL to be presented to users in links to the Share API * - ``osapi_share_ext_list`` = - (List) Specify list of extensions to load when using osapi_share_extension option with manila.api.contrib.select_extensions. * - ``osapi_share_extension`` = ``manila.api.contrib.standard_extensions`` - (List) The osapi share extensions to load. * - ``osapi_share_listen`` = ``::`` - (String) IP address for OpenStack Share API to listen on. * - ``osapi_share_listen_port`` = ``8786`` - (Port number) Port for OpenStack Share API to listen on. * - ``osapi_share_workers`` = ``1`` - (Integer) Number of workers for OpenStack Share API service. * - ``share_api_class`` = ``manila.share.api.API`` - (String) The full class name of the share API class to use. * - ``volume_api_class`` = ``manila.volume.cinder.API`` - (String) The full class name of the Volume API class to use. * - ``volume_name_template`` = ``manila-share-%s`` - (String) Volume name template. * - ``volume_snapshot_name_template`` = ``manila-snapshot-%s`` - (String) Volume snapshot name template. * - **[oslo_middleware]** - * - ``enable_proxy_headers_parsing`` = ``False`` - (Boolean) Whether the application is behind a proxy or not. This determines if the middleware should parse the headers or not. * - ``max_request_body_size`` = ``114688`` - (Integer) The maximum body size for each request, in bytes. * - ``secure_proxy_ssl_header`` = ``X-Forwarded-Proto`` - (String) DEPRECATED: The HTTP Header that will be used to determine what the original request protocol scheme was, even if it was hidden by a SSL termination proxy. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/configuration/tables/manila-ca.inc0000664000175000017500000000141100000000000023556 0ustar00zuulzuul00000000000000.. Warning: Do not edit this file. It is automatically generated from the software project's code and your changes will be overwritten. The tool to generate this file lives in openstack-doc-tools repository. Please make any changes needed in the code, then run the autogenerate-config-doc tool from the openstack-doc-tools repository, or ask for help on the documentation mailing list, IRC channel or meeting. .. _manila-ca: .. list-table:: Description of Certificate Authority configuration options :header-rows: 1 :class: config-ref-table * - Configuration option = Default value - Description * - **[DEFAULT]** - * - ``osapi_share_use_ssl`` = ``False`` - (Boolean) Wraps the socket in a SSL context if True is set. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/configuration/tables/manila-cephfs.inc0000664000175000017500000000551600000000000024455 0ustar00zuulzuul00000000000000.. Warning: Do not edit this file. It is automatically generated from the software project's code and your changes will be overwritten. The tool to generate this file lives in openstack-doc-tools repository. Please make any changes needed in the code, then run the autogenerate-config-doc tool from the openstack-doc-tools repository, or ask for help on the documentation mailing list, IRC channel or meeting. .. _manila-cephfs: .. list-table:: Description of CephFS share driver configuration options :header-rows: 1 :class: config-ref-table * - Configuration option = Default value - Description * - **[DEFAULT]** - * - ``cephfs_auth_id`` = manila - (String) The name of the ceph auth identity to use. * - ``cephfs_cluster_name`` = ceph - (String) The name of the cluster in use, if it is not the default ('ceph'). * - ``cephfs_conf_path`` = /etc/ceph/ceph.conf - (String) Fully qualified path to the ceph.conf file. * - ``cephfs_protocol_helper_type`` = CEPHFS - (String) The type of protocol helper to use. Default is CEPHFS. * - ``cephfs_ganesha_server_is_remote`` = False - (Boolean) Whether the NFS-Ganesha server is remote to the driver. * - ``cephfs_ganesha_server_ip`` = None - (String) The IP address of the NFS-Ganesha server. * - ``cephfs_protocol_helper_type`` = CEPHFS - (String) The type of protocol helper to use. Default is CEPHFS. * - ``cephfs_ganesha_server_username`` = root - (String) The username to authenticate as in the remote NFS-Ganesha server host. * - ``cephfs_ganesha_path_to_private_key`` = None - (String) The path of the driver host's private SSH key file. * - ``cephfs_ganesha_server_password`` = None - (String) The password to authenticate as the user in the remote Ganesha server host. This is not required if 'cephfs_ganesha_path_to_private_key' is configured. * - ``cephfs_ganesha_export_ips`` = [] - (String) List of IPs to export shares. If not supplied, then the value of 'cephfs_ganesha_server_ip' will be used to construct share export locations. * - ``cephfs_volume_mode`` = 755 - (String) The read/write/execute permissions mode for CephFS volumes, snapshots, and snapshot groups expressed in Octal as with linux 'chmod' or 'umask' commands. * - ``cephfs_filesystem_name`` = None - (String) The name of the filesystem to use, if there are multiple filesystems in the cluster." * - ``cephfs_ensure_all_shares_salt`` = manila_cephfs_reef_caracal - (String) Provide a unique string value to make the driver ensure all of the shares it has created during startup. Ensuring would re-export shares and this action isn't always required, unless something has been administratively modified on CephFS. * - ``cephfs_nfs_cluster_id`` = None - (String) The ID of the NFS cluster to use. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/configuration/tables/manila-common.inc0000664000175000017500000002170100000000000024467 0ustar00zuulzuul00000000000000.. Warning: Do not edit this file. It is automatically generated from the software project's code and your changes will be overwritten. The tool to generate this file lives in openstack-doc-tools repository. Please make any changes needed in the code, then run the autogenerate-config-doc tool from the openstack-doc-tools repository, or ask for help on the documentation mailing list, IRC channel or meeting. .. _manila-common: .. list-table:: Description of Common configuration options :header-rows: 1 :class: config-ref-table * - Configuration option = Default value - Description * - **[DEFAULT]** - * - ``check_hash`` = ``False`` - (Boolean) Chooses whether hash of each file should be checked on data copying. * - ``client_socket_timeout`` = ``900`` - (Integer) Timeout for client connections socket operations. If an incoming connection is idle for this number of seconds it will be closed. A value of '0' means wait forever. * - ``compute_api_class`` = ``manila.compute.nova.API`` - (String) The full class name of the Compute API class to use. * - ``data_access_wait_access_rules_timeout`` = ``180`` - (Integer) Time to wait for access rules to be allowed/denied on backends when migrating a share (seconds). * - ``data_manager`` = ``manila.data.manager.DataManager`` - (String) Full class name for the data manager. * - ``data_node_access_admin_user`` = ``None`` - (String) The admin user name registered in the security service in order to allow access to user authentication-based shares. * - ``data_node_access_cert`` = ``None`` - (String) The certificate installed in the data node in order to allow access to certificate authentication-based shares. * - ``data_node_access_ips`` = ``None`` - (String) A list of the IPs of the node interface connected to the admin network. Used for allowing access to the mounting shares. Default is []. * - ``data_node_mount_options`` = ``{}`` - (Dict) Mount options to be included in the mount command for share protocols. Use dictionary format, example: {'nfs': '-o nfsvers=3', 'cifs': '-o user=foo,pass=bar'} * - ``data_topic`` = ``manila-data`` - (String) The topic data nodes listen on. * - ``enable_new_services`` = ``True`` - (Boolean) Services to be added to the available pool on create. * - ``fatal_exception_format_errors`` = ``False`` - (Boolean) Whether to make exception message format errors fatal. * - ``filter_function`` = ``None`` - (String) String representation for an equation that will be used to filter hosts. * - ``host`` = ```` - (String) Name of this node. This can be an opaque identifier. It is not necessarily a hostname, FQDN, or IP address. * - ``max_over_subscription_ratio`` = ``20.0`` - (Floating point) Float representation of the over subscription ratio when thin provisioning is involved. Default ratio is 20.0, meaning provisioned capacity can be 20 times the total physical capacity. If the ratio is 10.5, it means provisioned capacity can be 10.5 times the total physical capacity. A ratio of 1.0 means provisioned capacity cannot exceed the total physical capacity. A ratio lower than 1.0 is invalid. * - ``memcached_servers`` = ``None`` - (List) Memcached servers or None for in process cache. * - ``monkey_patch`` = ``False`` - (Boolean) Whether to log monkey patching. * - ``monkey_patch_modules`` = - (List) List of modules or decorators to monkey patch. * - ``mount_tmp_location`` = ``/tmp/`` - (String) Temporary path to create and mount shares during migration. * - ``my_ip`` = ```` - (String) IP address of this host. * - ``num_shell_tries`` = ``3`` - (Integer) Number of times to attempt to run flakey shell commands. * - ``periodic_fuzzy_delay`` = ``60`` - (Integer) Range of seconds to randomly delay when starting the periodic task scheduler to reduce stampeding. (Disable by setting to 0) * - ``periodic_hooks_interval`` = ``300.0`` - (Floating point) Interval in seconds between execution of periodic hooks. Used when option 'enable_periodic_hooks' is set to True. Default is 300. * - ``periodic_interval`` = ``60`` - (Integer) Seconds between running periodic tasks. * - ``replica_state_update_interval`` = ``300`` - (Integer) This value, specified in seconds, determines how often the share manager will poll for the health (replica_state) of each replica instance. * - ``replication_domain`` = ``None`` - (String) A string specifying the replication domain that the backend belongs to. This option needs to be specified the same in the configuration sections of all backends that support replication between each other. If this option is not specified in the group, it means that replication is not enabled on the backend. * - ``report_interval`` = ``10`` - (Integer) Seconds between nodes reporting state to datastore. * - ``reserved_share_percentage`` = ``0`` - (Integer) The percentage of backend capacity reserved. * - ``rootwrap_config`` = ``None`` - (String) Path to the rootwrap configuration file to use for running commands as root. * - ``service_down_time`` = ``60`` - (Integer) Maximum time since last check-in for up service. * - ``smb_template_config_path`` = ``$state_path/smb.conf`` - (String) Path to smb config. * - ``sql_idle_timeout`` = ``3600`` - (Integer) Timeout before idle SQL connections are reaped. * - ``sql_max_retries`` = ``10`` - (Integer) Maximum database connection retries during startup. (setting -1 implies an infinite retry count). * - ``sql_retry_interval`` = ``10`` - (Integer) Interval between retries of opening a SQL connection. * - ``sqlite_clean_db`` = ``clean.sqlite`` - (String) File name of clean sqlite database. * - ``sqlite_db`` = ``manila.sqlite`` - (String) The filename to use with sqlite. * - ``sqlite_synchronous`` = ``True`` - (Boolean) If passed, use synchronous mode for sqlite. * - ``state_path`` = ``/var/lib/manila`` - (String) Top-level directory for maintaining manila's state. * - ``storage_availability_zone`` = ``nova`` - (String) Availability zone of this node. * - ``tcp_keepalive`` = ``True`` - (Boolean) Sets the value of TCP_KEEPALIVE (True/False) for each server socket. * - ``tcp_keepalive_count`` = ``None`` - (Integer) Sets the value of TCP_KEEPCNT for each server socket. Not supported on OS X. * - ``tcp_keepalive_interval`` = ``None`` - (Integer) Sets the value of TCP_KEEPINTVL in seconds for each server socket. Not supported on OS X. * - ``tcp_keepidle`` = ``600`` - (Integer) Sets the value of TCP_KEEPIDLE in seconds for each server socket. Not supported on OS X. * - ``until_refresh`` = ``0`` - (Integer) Count of reservations until usage is refreshed. * - ``use_forwarded_for`` = ``False`` - (Boolean) Treat X-Forwarded-For as the canonical remote address. Only enable this if you have a sanitizing proxy. * - ``wsgi_keep_alive`` = ``True`` - (Boolean) If False, closes the client socket connection explicitly. Setting it to True to maintain backward compatibility. Recommended setting is set it to False. * - ``admin_only_metadata`` = ``__affinity_same_host,__affinity_different_host`` - (List) The affinity keys are default to ensure backwards compatibility. Update the list with metadata items that should only be manipulated by people allowed by the "update_admin_only_metadata" policy. * - ``driver_updatable_metadata`` = ``None`` - (List) Metadata keys that will decide which share metadata can be passed to share drivers as part of metadata create/update operations. * - ``driver_updatable_subnet_metadata`` = ``None`` - (List) Metadata keys that will decide which share network_subnet_metadata can be passed to share drivers as part of metadata create/update operations. * - **[coordination]** - * - ``backend_url`` = ``file://$state_path`` - (String) The back end URL to use for distributed coordination. * - **[healthcheck]** - * - ``backends`` = - (List) Additional backends that can perform health checks and report that information back as part of a request. * - ``detailed`` = ``False`` - (Boolean) Show more detailed information as part of the response * - ``disable_by_file_path`` = ``None`` - (String) Check the presence of a file to determine if an application is running on a port. Used by DisableByFileHealthcheck plugin. * - ``disable_by_file_paths`` = - (List) Check the presence of a file based on a port to determine if an application is running on a port. Expects a "port:path" list of strings. Used by DisableByFilesPortsHealthcheck plugin. * - ``path`` = ``/healthcheck`` - (String) DEPRECATED: The path to respond to healtcheck requests on. * - ``update_shares_status_on_ensure`` = ``True`` - (Boolean) Defines whether Manila should update the status of the shares when ensuring shares or not. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/configuration/tables/manila-compute.inc0000664000175000017500000000114100000000000024647 0ustar00zuulzuul00000000000000.. Note: this file is actually not auto-generated. See https://bugs.launchpad.net/manila/+bug/1713062. .. _manila-compute: .. list-table:: Description of Compute configuration options :header-rows: 1 :class: config-ref-table * - Configuration option = Default value - Description * - **[nova]** - * - ``api_microversion`` = ``2.10`` - (String) Version of Nova API to be used. * - ``endpoint_type`` = ``publicURL`` - (String) Endpoint type to be used with nova client calls. * - ``region_name`` = ``None`` - (String) Region name for connecting to nova. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/configuration/tables/manila-emc.inc0000664000175000017500000000266400000000000023752 0ustar00zuulzuul00000000000000.. Warning: Do not edit this file. It is automatically generated from the software project's code and your changes will be overwritten. The tool to generate this file lives in openstack-doc-tools repository. Please make any changes needed in the code, then run the autogenerate-config-doc tool from the openstack-doc-tools repository, or ask for help on the documentation mailing list, IRC channel or meeting. .. _manila-emc: .. list-table:: Description of EMC share driver configuration options :header-rows: 1 :class: config-ref-table * - Configuration option = Default value - Description * - **[DEFAULT]** - * - ``emc_nas_login`` = ``None`` - (String) User name for the EMC server. * - ``emc_nas_password`` = ``None`` - (String) Password for the EMC server. * - ``emc_nas_root_dir`` = ``None`` - (String) The root directory where shares will be located. * - ``emc_nas_server`` = ``None`` - (String) EMC server hostname or IP address. * - ``emc_nas_server_container`` = ``None`` - (String) DEPRECATED: Storage processor to host the NAS server. Obsolete. Unity driver supports nas server auto load balance. * - ``emc_nas_server_port`` = ``8080`` - (Port number) Port number for the EMC server. * - ``emc_nas_server_secure`` = ``True`` - (Boolean) Use secure connection to server. * - ``emc_share_backend`` = ``None`` - (String) Share backend. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/configuration/tables/manila-ganesha.inc0000664000175000017500000000270000000000000024603 0ustar00zuulzuul00000000000000.. Warning: Do not edit this file. It is automatically generated from the software project's code and your changes will be overwritten. The tool to generate this file lives in openstack-doc-tools repository. Please make any changes needed in the code, then run the autogenerate-config-doc tool from the openstack-doc-tools repository, or ask for help on the documentation mailing list, IRC channel or meeting. .. _manila-ganesha: .. list-table:: Description of Ganesha configuration options :header-rows: 1 :class: config-ref-table * - Configuration option = Default value - Description * - **[DEFAULT]** - * - ``ganesha_config_dir`` = ``/etc/ganesha`` - (String) Directory where Ganesha config files are stored. * - ``ganesha_config_path`` = ``$ganesha_config_dir/ganesha.conf`` - (String) Path to main Ganesha config file. * - ``ganesha_db_path`` = ``$state_path/manila-ganesha.db`` - (String) Location of Ganesha database file. (Ganesha module only.) * - ``ganesha_export_dir`` = ``$ganesha_config_dir/export.d`` - (String) Path to directory containing Ganesha export configuration. (Ganesha module only.) * - ``ganesha_export_template_dir`` = ``/etc/manila/ganesha-export-templ.d`` - (String) Path to directory containing Ganesha export block templates. (Ganesha module only.) * - ``ganesha_service_name`` = ``ganesha.nfsd`` - (String) Name of the ganesha nfs service. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/configuration/tables/manila-generic.inc0000664000175000017500000004123100000000000024613 0ustar00zuulzuul00000000000000.. Warning: Do not edit this file. It is automatically generated from the software project's code and your changes will be overwritten. The tool to generate this file lives in openstack-doc-tools repository. Please make any changes needed in the code, then run the autogenerate-config-doc tool from the openstack-doc-tools repository, or ask for help on the documentation mailing list, IRC channel or meeting. .. _manila-generic: .. list-table:: Description of Generic share driver configuration options :header-rows: 1 :class: config-ref-table * - Configuration option = Default value - Description * - **[DEFAULT]** - * - ``connect_share_server_to_tenant_network`` = ``False`` - (Boolean) Attach share server directly to share network. Used only with Neutron and if driver_handles_share_servers=True. * - ``container_volume_group`` = ``manila_docker_volumes`` - (String) LVM volume group to use for volumes. This volume group must be created by the cloud administrator independently from manila operations. * - ``driver_handles_share_servers`` = ``None`` - (Boolean) There are two possible approaches for share drivers in Manila. First is when share driver is able to handle share-servers and second when not. Drivers can support either both or only one of these approaches. So, set this opt to True if share driver is able to handle share servers and it is desired mode else set False. It is set to None by default to make this choice intentional. * - ``goodness_function`` = ``None`` - (String) String representation for an equation that will be used to determine the goodness of a host. * - ``interface_driver`` = ``manila.network.linux.interface.OVSInterfaceDriver`` - (String) Vif driver. Used only with Neutron and if driver_handles_share_servers=True. * - ``manila_service_keypair_name`` = ``manila-service`` - (String) Keypair name that will be created and used for service instances. Only used if driver_handles_share_servers=True. * - ``max_time_to_attach`` = ``120`` - (Integer) Maximum time to wait for attaching cinder volume. * - ``max_time_to_build_instance`` = ``300`` - (Integer) Maximum time in seconds to wait for creating service instance. * - ``max_time_to_create_volume`` = ``180`` - (Integer) Maximum time to wait for creating cinder volume. * - ``max_time_to_extend_volume`` = ``180`` - (Integer) Maximum time to wait for extending cinder volume. * - ``ovs_integration_bridge`` = ``br-int`` - (String) Name of Open vSwitch bridge to use. * - ``path_to_private_key`` = ``None`` - (String) Path to host's private key. * - ``path_to_public_key`` = ``~/.ssh/id_rsa.pub`` - (String) Path to hosts public key. Only used if driver_handles_share_servers=True. * - ``protocol_access_mapping`` = ``{'ip': ['nfs'], 'user': ['cifs']}`` - (Dict) Protocol access mapping for this backend. Should be a dictionary comprised of {'access_type1': ['share_proto1', 'share_proto2'], 'access_type2': ['share_proto2', 'share_proto3']}. * - ``service_image_name`` = ``manila-service-image`` - (String) Name of image in Glance, that will be used for service instance creation. Only used if driver_handles_share_servers=True. * - ``service_instance_flavor_id`` = ``100`` - (String) ID of flavor, that will be used for service instance creation. Only used if driver_handles_share_servers=True. * - ``service_instance_name_or_id`` = ``None`` - (String) Name or ID of service instance in Nova to use for share exports. Used only when share servers handling is disabled. * - ``service_instance_name_template`` = ``%s`` - (String) Name of service instance. Only used if driver_handles_share_servers=True. * - ``service_instance_network_helper_type`` = ``neutron`` - (String) DEPRECATED: Used to select between neutron and nova helpers when driver_handles_share_servers=True. Obsolete. This option isn't used any longer because nova networking is no longer supported. * - ``service_instance_password`` = ``None`` - (String) Password for service instance user. * - ``service_instance_security_group`` = ``manila-service`` - (String) Security group name, that will be used for service instance creation. Only used if driver_handles_share_servers=True. * - ``service_instance_smb_config_path`` = ``$share_mount_path/smb.conf`` - (String) Path to SMB config in service instance. * - ``service_instance_user`` = ``None`` - (String) User in service instance that will be used for authentication. * - ``service_net_name_or_ip`` = ``None`` - (String) Can be either name of network that is used by service instance within Nova to get IP address or IP address itself for managing shares there. Used only when share servers handling is disabled. * - ``service_network_cidr`` = ``10.254.0.0/16`` - (String) CIDR of manila service network. Used only with Neutron and if driver_handles_share_servers=True. * - ``service_network_division_mask`` = ``28`` - (Integer) This mask is used for dividing service network into subnets, IP capacity of subnet with this mask directly defines possible amount of created service VMs per tenant's subnet. Used only with Neutron and if driver_handles_share_servers=True. * - ``service_network_name`` = ``manila_service_network`` - (String) Name of manila service network. Used only with Neutron. Only used if driver_handles_share_servers=True. * - ``share_helpers`` = ``CIFS=manila.share.drivers.helpers.CIFSHelperIPAccess, NFS=manila.share.drivers.helpers.NFSHelper`` - (List) Specify list of share export helpers. * - ``share_mount_path`` = ``/shares`` - (String) Parent path in service instance where shares will be mounted. * - ``share_mount_template`` = ``mount -vt %(proto)s %(options)s %(export)s %(path)s`` - (String) The template for mounting shares for this backend. Must specify the executable with all necessary parameters for the protocol supported. 'proto' template element may not be required if included in the command. 'export' and 'path' template elements are required. It is advisable to separate different commands per backend. * - ``share_unmount_template`` = ``umount -v %(path)s`` - (String) The template for unmounting shares for this backend. Must specify the executable with all necessary parameters for the protocol supported. 'path' template element is required. It is advisable to separate different commands per backend. * - ``share_volume_fstype`` = ``ext4`` - (String) Filesystem type of the share volume. * - ``tenant_net_name_or_ip`` = ``None`` - (String) Can be either name of network that is used by service instance within Nova to get IP address or IP address itself for exporting shares. Used only when share servers handling is disabled. * - ``volume_name_template`` = ``manila-share-%s`` - (String) Volume name template. * - ``volume_snapshot_name_template`` = ``manila-snapshot-%s`` - (String) Volume snapshot name template. * - **[glance]** - * - ``api_microversion`` = ``2`` - (String) Version of Glance API to be used. * - ``region_name`` = ``RegionOne`` - (String) Region name for connecting to glance. * - ``auth_url`` = ``None`` - (String) Authentication URL * - ``auth_type`` = ``None`` - (String) Authentication type to load * - ``cafile`` = ``None`` - (String) PEM encoded Certificate Authority to use when verifying HTTPs connections. * - ``certfile`` = ``None`` - (String) PEM encoded client certificate cert file * - ``collect_timing`` = ``false`` - (Boolean) Collect per-API call timing information. * - ``default_domain_id`` = ``None`` - (String) Optional domain ID to use with v3 and v2 parameters. It will be used for both the user and project domain in v3 and ignored in v2 authentication * - ``default_domain_name`` = ``None`` - (String) Optional domain name to use with v3 API and v2 parameters. It will be used for both the user and project domain in v3 and ignored in v2 authentication. * - ``domain_id`` = ``None`` - (String) Domain ID to scope to * - ``domain_name`` = ``None`` - (String) Domain name to scope to * - ``insecure`` = ``false`` - (Boolean) Verify HTTPS connections. * - ``keyfile`` = ``None`` - (String) PEM encoded client certificate key file * - ``password`` = ``None`` - (String) User's password. * - ``project_domain_id`` = ``None`` - (String) Domain ID containing project * - ``project_domain_name`` = ``None`` - (String) Domain name containing project * - ``project_id`` = ``None`` - (String) Project ID to scope to * - ``project_name`` = ``None`` - (String) Project name to scope to * - ``split_loggers`` = ``false`` - (Boolean) Log requests to multiple loggers. * - ``system_scope`` = ``None`` - (String) Scope for system operations * - ``timeout`` = ``None`` - (Integer) Timeout value for http requests * - ``trust_id`` = ``None`` - (String) Trust ID * - ``user_domain_id`` = ``None`` - (String) User's domain id * - ``user_domain_name`` = ``None`` - (String) User's domain name * - ``user_id`` = ``None`` - (String) User id * - ``username`` = ``None`` - (String) Username * - **[cinder]** - * - ``cross_az_attach`` = ``True`` - (Boolean) Allow attaching between instances and volumes in different availability zones. * - ``http_retries`` = ``3`` - (Integer) Number of cinderclient retries on failed HTTP calls. * - ``endpoint_type`` = ``publicURL`` - (String) Endpoint type to be used with cinder client calls. * - ``region_name`` = ``RegionOne`` - (String) Region name for connecting to cinder. * - ``auth_url`` = ``None`` - (String) Authentication URL * - ``auth_type`` = ``None`` - (String) Authentication type to load * - ``cafile`` = ``None`` - (String) PEM encoded Certificate Authority to use when verifying HTTPs connections. * - ``certfile`` = ``None`` - (String) PEM encoded client certificate cert file * - ``collect_timing`` = ``false`` - (Boolean) Collect per-API call timing information. * - ``default_domain_id`` = ``None`` - (String) Optional domain ID to use with v3 and v2 parameters. It will be used for both the user and project domain in v3 and ignored in v2 authentication * - ``default_domain_name`` = ``None`` - (String) Optional domain name to use with v3 API and v2 parameters. It will be used for both the user and project domain in v3 and ignored in v2 authentication. * - ``domain_id`` = ``None`` - (String) Domain ID to scope to * - ``domain_name`` = ``None`` - (String) Domain name to scope to * - ``insecure`` = ``false`` - (Boolean) Verify HTTPS connections. * - ``keyfile`` = ``None`` - (String) PEM encoded client certificate key file * - ``password`` = ``None`` - (String) User's password. * - ``project_domain_id`` = ``None`` - (String) Domain ID containing project * - ``project_domain_name`` = ``None`` - (String) Domain name containing project * - ``project_id`` = ``None`` - (String) Project ID to scope to * - ``project_name`` = ``None`` - (String) Project name to scope to * - ``split_loggers`` = ``false`` - (Boolean) Log requests to multiple loggers. * - ``system_scope`` = ``None`` - (String) Scope for system operations * - ``timeout`` = ``None`` - (Integer) Timeout value for http requests * - ``trust_id`` = ``None`` - (String) Trust ID * - ``user_domain_id`` = ``None`` - (String) User's domain id * - ``user_domain_name`` = ``None`` - (String) User's domain name * - ``user_id`` = ``None`` - (String) User id * - ``username`` = ``None`` - (String) Username * - **[neutron]** - * - ``url`` = ``http://127.0.0.1:9696`` - (String) URL for connecting to neutron. * - ``url_timeout`` = ``30`` - (Integer) Timeout value for connecting to neutron in seconds. * - ``auth_strategy`` = ``keystone`` - (String) Auth strategy for connecting to neutron in admin context. * - ``endpoint_type`` = ``publicURL`` - (String) Endpoint type to be used with neutron client calls. * - ``region_name`` = ``None`` - (String) Region name for connecting to neutron in admin context. * - ``auth_url`` = ``None`` - (String) Authentication URL * - ``auth_type`` = ``None`` - (String) Authentication type to load * - ``cafile`` = ``None`` - (String) PEM encoded Certificate Authority to use when verifying HTTPs connections. * - ``certfile`` = ``None`` - (String) PEM encoded client certificate cert file * - ``collect_timing`` = ``false`` - (Boolean) Collect per-API call timing information. * - ``default_domain_id`` = ``None`` - (String) Optional domain ID to use with v3 and v2 parameters. It will be used for both the user and project domain in v3 and ignored in v2 authentication * - ``default_domain_name`` = ``None`` - (String) Optional domain name to use with v3 API and v2 parameters. It will be used for both the user and project domain in v3 and ignored in v2 authentication. * - ``domain_id`` = ``None`` - (String) Domain ID to scope to * - ``domain_name`` = ``None`` - (String) Domain name to scope to * - ``insecure`` = ``false`` - (Boolean) Verify HTTPS connections. * - ``keyfile`` = ``None`` - (String) PEM encoded client certificate key file * - ``password`` = ``None`` - (String) User's password. * - ``project_domain_id`` = ``None`` - (String) Domain ID containing project * - ``project_domain_name`` = ``None`` - (String) Domain name containing project * - ``project_id`` = ``None`` - (String) Project ID to scope to * - ``project_name`` = ``None`` - (String) Project name to scope to * - ``split_loggers`` = ``false`` - (Boolean) Log requests to multiple loggers. * - ``system_scope`` = ``None`` - (String) Scope for system operations * - ``timeout`` = ``None`` - (Integer) Timeout value for http requests * - ``trust_id`` = ``None`` - (String) Trust ID * - ``user_domain_id`` = ``None`` - (String) User's domain id * - ``user_domain_name`` = ``None`` - (String) User's domain name * - ``user_id`` = ``None`` - (String) User id * - ``username`` = ``None`` - (String) Username * - **[nova]** - * - ``api_microversion`` = ``2.10`` - (String) Version of Nova API to be used. * - ``endpoint_type`` = ``publicURL`` - (String) Endpoint type to be used with nova client calls. * - ``region_name`` = ``None`` - (String) Region name for connecting to nova. * - ``auth_url`` = ``None`` - (String) Authentication URL * - ``auth_type`` = ``None`` - (String) Authentication type to load * - ``cafile`` = ``None`` - (String) PEM encoded Certificate Authority to use when verifying HTTPs connections. * - ``certfile`` = ``None`` - (String) PEM encoded client certificate cert file * - ``collect_timing`` = ``false`` - (Boolean) Collect per-API call timing information. * - ``default_domain_id`` = ``None`` - (String) Optional domain ID to use with v3 and v2 parameters. It will be used for both the user and project domain in v3 and ignored in v2 authentication * - ``default_domain_name`` = ``None`` - (String) Optional domain name to use with v3 API and v2 parameters. It will be used for both the user and project domain in v3 and ignored in v2 authentication. * - ``domain_id`` = ``None`` - (String) Domain ID to scope to * - ``domain_name`` = ``None`` - (String) Domain name to scope to * - ``insecure`` = ``false`` - (Boolean) Verify HTTPS connections. * - ``keyfile`` = ``None`` - (String) PEM encoded client certificate key file * - ``password`` = ``None`` - (String) User's password. * - ``project_domain_id`` = ``None`` - (String) Domain ID containing project * - ``project_domain_name`` = ``None`` - (String) Domain name containing project * - ``project_id`` = ``None`` - (String) Project ID to scope to * - ``project_name`` = ``None`` - (String) Project name to scope to * - ``split_loggers`` = ``false`` - (Boolean) Log requests to multiple loggers. * - ``system_scope`` = ``None`` - (String) Scope for system operations * - ``timeout`` = ``None`` - (Integer) Timeout value for http requests * - ``trust_id`` = ``None`` - (String) Trust ID * - ``user_domain_id`` = ``None`` - (String) User's domain id * - ``user_domain_name`` = ``None`` - (String) User's domain name * - ``user_id`` = ``None`` - (String) User id * - ``username`` = ``None`` - (String) Username ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/configuration/tables/manila-glusterfs.inc0000664000175000017500000000552200000000000025220 0ustar00zuulzuul00000000000000.. Warning: Do not edit this file. It is automatically generated from the software project's code and your changes will be overwritten. The tool to generate this file lives in openstack-doc-tools repository. Please make any changes needed in the code, then run the autogenerate-config-doc tool from the openstack-doc-tools repository, or ask for help on the documentation mailing list, IRC channel or meeting. .. _manila-glusterfs: .. list-table:: Description of GlusterFS share driver configuration options :header-rows: 1 :class: config-ref-table * - Configuration option = Default value - Description * - **[DEFAULT]** - * - ``glusterfs_ganesha_server_ip`` = ``None`` - (String) Remote Ganesha server node's IP address. * - ``glusterfs_ganesha_server_password`` = ``None`` - (String) Remote Ganesha server node's login password. This is not required if 'glusterfs_path_to_private_key' is configured. * - ``glusterfs_ganesha_server_username`` = ``root`` - (String) Remote Ganesha server node's username. * - ``glusterfs_mount_point_base`` = ``$state_path/mnt`` - (String) Base directory containing mount points for Gluster volumes. * - ``glusterfs_nfs_server_type`` = ``Gluster`` - (String) Type of NFS server that mediate access to the Gluster volumes (Gluster or Ganesha). * - ``glusterfs_path_to_private_key`` = ``None`` - (String) Path of Manila host's private SSH key file. * - ``glusterfs_server_password`` = ``None`` - (String) Remote GlusterFS server node's login password. This is not required if 'glusterfs_path_to_private_key' is configured. * - ``glusterfs_servers`` = - (List) List of GlusterFS servers that can be used to create shares. Each GlusterFS server should be of the form [remoteuser@], and they are assumed to belong to distinct Gluster clusters. * - ``glusterfs_share_layout`` = ``None`` - (String) Specifies GlusterFS share layout, that is, the method of associating backing GlusterFS resources to shares. * - ``glusterfs_target`` = ``None`` - (String) Specifies the GlusterFS volume to be mounted on the Manila host. It is of the form [remoteuser@]:. * - ``glusterfs_volume_pattern`` = ``None`` - (String) Regular expression template used to filter GlusterFS volumes for share creation. The regex template can optionally (ie. with support of the GlusterFS backend) contain the #{size} parameter which matches an integer (sequence of digits) in which case the value shall be interpreted as size of the volume in GB. Examples: "manila-share-volume-\d+$", "manila-share-volume-#{size}G-\d+$"; with matching volume names, respectively: "manila-share-volume-12", "manila-share-volume-3G-13". In latter example, the number that matches "#{size}", that is, 3, is an indication that the size of volume is 3G. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/configuration/tables/manila-hdfs.inc0000664000175000017500000000237100000000000024125 0ustar00zuulzuul00000000000000.. Warning: Do not edit this file. It is automatically generated from the software project's code and your changes will be overwritten. The tool to generate this file lives in openstack-doc-tools repository. Please make any changes needed in the code, then run the autogenerate-config-doc tool from the openstack-doc-tools repository, or ask for help on the documentation mailing list, IRC channel or meeting. .. _manila-hdfs: .. list-table:: Description of HDFS share driver configuration options :header-rows: 1 :class: config-ref-table * - Configuration option = Default value - Description * - **[DEFAULT]** - * - ``hdfs_namenode_ip`` = ``None`` - (String) The IP of the HDFS namenode. * - ``hdfs_namenode_port`` = ``9000`` - (Port number) The port of HDFS namenode service. * - ``hdfs_ssh_name`` = ``None`` - (String) HDFS namenode ssh login name. * - ``hdfs_ssh_port`` = ``22`` - (Port number) HDFS namenode SSH port. * - ``hdfs_ssh_private_key`` = ``None`` - (String) Path to HDFS namenode SSH private key for login. * - ``hdfs_ssh_pw`` = ``None`` - (String) HDFS namenode SSH login password, This parameter is not necessary, if 'hdfs_ssh_private_key' is configured. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/configuration/tables/manila-hds_hnas.inc0000664000175000017500000000522700000000000024773 0ustar00zuulzuul00000000000000.. Warning: Do not edit this file. It is automatically generated from the software project's code and your changes will be overwritten. The tool to generate this file lives in openstack-doc-tools repository. Please make any changes needed in the code, then run the autogenerate-config-doc tool from the openstack-doc-tools repository, or ask for help on the documentation mailing list, IRC channel or meeting. .. _manila-hds_hnas: .. list-table:: Description of HDS NAS share driver configuration options :header-rows: 1 :class: config-ref-table * - Configuration option = Default value - Description * - **[DEFAULT]** - * - ``hitachi_hnas_admin_network_ip`` = ``None`` - (String) Specify IP for mounting shares in the Admin network. * - ``hitachi_hnas_allow_cifs_snapshot_while_mounted`` = ``False`` - (Boolean) By default, CIFS snapshots are not allowed to be taken when the share has clients connected because consistent point-in-time replica cannot be guaranteed for all files. Enabling this might cause inconsistent snapshots on CIFS shares. * - ``hitachi_hnas_cluster_admin_ip0`` = ``None`` - (String) The IP of the clusters admin node. Only set in HNAS multinode clusters. * - ``hitachi_hnas_driver_helper`` = ``manila.share.drivers.hitachi.hnas.ssh.HNASSSHBackend`` - (String) Python class to be used for driver helper. * - ``hitachi_hnas_evs_id`` = ``None`` - (Integer) Specify which EVS this backend is assigned to. * - ``hitachi_hnas_evs_ip`` = ``None`` - (String) Specify IP for mounting shares. * - ``hitachi_hnas_file_system_name`` = ``None`` - (String) Specify file-system name for creating shares. * - ``hitachi_hnas_ip`` = ``None`` - (String) HNAS management interface IP for communication between Manila controller and HNAS. * - ``hitachi_hnas_password`` = ``None`` - (String) HNAS user password. Required only if private key is not provided. * - ``hitachi_hnas_ssh_private_key`` = ``None`` - (String) RSA/DSA private key value used to connect into HNAS. Required only if password is not provided. * - ``hitachi_hnas_stalled_job_timeout`` = ``30`` - (Integer) The time (in seconds) to wait for stalled HNAS jobs before aborting. * - ``hitachi_hnas_user`` = ``None`` - (String) HNAS username Base64 String in order to perform tasks such as create file-systems and network interfaces. * - **[hnas1]** - * - ``share_backend_name`` = ``None`` - (String) The backend name for a given driver implementation. * - ``share_driver`` = ``manila.share.drivers.generic.GenericShareDriver`` - (String) Driver to use for share creation. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/configuration/tables/manila-hds_hsp.inc0000664000175000017500000000161100000000000024625 0ustar00zuulzuul00000000000000.. Warning: Do not edit this file. It is automatically generated from the software project's code and your changes will be overwritten. The tool to generate this file lives in openstack-doc-tools repository. Please make any changes needed in the code, then run the autogenerate-config-doc tool from the openstack-doc-tools repository, or ask for help on the documentation mailing list, IRC channel or meeting. .. _manila-hds_hsp: .. list-table:: Description of HDS HSP share driver configuration options :header-rows: 1 :class: config-ref-table * - Configuration option = Default value - Description * - **[hsp1]** - * - ``share_backend_name`` = ``None`` - (String) The backend name for a given driver implementation. * - ``share_driver`` = ``manila.share.drivers.generic.GenericShareDriver`` - (String) Driver to use for share creation. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/configuration/tables/manila-hnas.inc0000664000175000017500000000145000000000000024127 0ustar00zuulzuul00000000000000.. Warning: Do not edit this file. It is automatically generated from the software project's code and your changes will be overwritten. The tool to generate this file lives in openstack-doc-tools repository. Please make any changes needed in the code, then run the autogenerate-config-doc tool from the openstack-doc-tools repository, or ask for help on the documentation mailing list, IRC channel or meeting. .. _manila-hnas: .. list-table:: Description of hnas configuration options :header-rows: 1 :class: config-ref-table * - Configuration option = Default value - Description * - **[DEFAULT]** - * - ``hitachi_hnas_driver_helper`` = ``manila.share.drivers.hitachi.hnas.ssh.HNASSSHBackend`` - (String) Python class to be used for driver helper. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/configuration/tables/manila-hpe3par.inc0000664000175000017500000000411100000000000024535 0ustar00zuulzuul00000000000000.. Warning: Do not edit this file. It is automatically generated from the software project's code and your changes will be overwritten. The tool to generate this file lives in openstack-doc-tools repository. Please make any changes needed in the code, then run the autogenerate-config-doc tool from the openstack-doc-tools repository, or ask for help on the documentation mailing list, IRC channel or meeting. .. _manila-hpe3par: .. list-table:: Description of HPE 3PAR share driver configuration options :header-rows: 1 :class: config-ref-table * - Configuration option = Default value - Description * - **[DEFAULT]** - * - ``hpe3par_api_url`` = - (String) 3PAR WSAPI Server Url like https://<3par ip>:8080/api/v1 * - ``hpe3par_cifs_admin_access_domain`` = ``LOCAL_CLUSTER`` - (String) File system domain for the CIFS admin user. * - ``hpe3par_cifs_admin_access_password`` = - (String) File system admin password for CIFS. * - ``hpe3par_cifs_admin_access_username`` = - (String) File system admin user name for CIFS. * - ``hpe3par_debug`` = ``False`` - (Boolean) Enable HTTP debugging to 3PAR * - ``hpe3par_fpg`` = ``None`` - (Unknown) The File Provisioning Group (FPG) to use * - ``hpe3par_fstore_per_share`` = ``False`` - (Boolean) Use one filestore per share * - ``hpe3par_password`` = - (String) 3PAR password for the user specified in hpe3par_username * - ``hpe3par_require_cifs_ip`` = ``False`` - (Boolean) Require IP access rules for CIFS (in addition to user) * - ``hpe3par_san_ip`` = - (String) IP address of SAN controller * - ``hpe3par_san_login`` = - (String) Username for SAN controller * - ``hpe3par_san_password`` = - (String) Password for SAN controller * - ``hpe3par_san_ssh_port`` = ``22`` - (Port number) SSH port to use with SAN * - ``hpe3par_share_mount_path`` = ``/mnt/`` - (String) The path where shares will be mounted when deleting nested file trees. * - ``hpe3par_username`` = - (String) 3PAR username with the 'edit' role ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/configuration/tables/manila-huawei.inc0000664000175000017500000000145600000000000024466 0ustar00zuulzuul00000000000000.. Warning: Do not edit this file. It is automatically generated from the software project's code and your changes will be overwritten. The tool to generate this file lives in openstack-doc-tools repository. Please make any changes needed in the code, then run the autogenerate-config-doc tool from the openstack-doc-tools repository, or ask for help on the documentation mailing list, IRC channel or meeting. .. _manila-huawei: .. list-table:: Description of Huawei share driver configuration options :header-rows: 1 :class: config-ref-table * - Configuration option = Default value - Description * - **[DEFAULT]** - * - ``manila_huawei_conf_file`` = ``/etc/manila/manila_huawei_conf.xml`` - (String) The configuration file for the Manila Huawei driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/configuration/tables/manila-infinidat.inc0000664000175000017500000000353100000000000025145 0ustar00zuulzuul00000000000000.. _manila-infinidat: .. list-table:: Description of INFINIDAT InfiniBox share driver configuration options :header-rows: 1 :class: config-ref-table * - Configuration option = Default value - Description * - **[DEFAULT]** - * - ``infinibox_hostname`` = ``None`` - (String) The name (or IP address) for the INFINIDAT Infinibox storage system. * - ``infinidat_use_ssl`` = ``False`` - (Boolean) Enable SSL communication to access the INFINIDAT Infinibox storage system. * - ``infinidat_suppress_ssl_warnings`` = ``False`` - (Boolean) Suppress requests library SSL certificate warnings. * - ``infinibox_login`` = ``None`` - (String) Administrative user account name used to access the INFINIDAT Infinibox storage system. * - ``infinibox_password`` = ``None`` - (String) Password for the administrative user account specified in the infinibox_login option. * - ``infinidat_pool_name`` = ``None`` - (String) Name of the pool from which volumes are allocated. * - ``infinidat_nas_network_space_name`` = ``None`` - (String) Name of the NAS network space on the INFINIDAT InfiniBox. * - ``infinidat_thin_provision`` = ``True`` - (Boolean) Use thin provisioning. * - ``infinidat_snapdir_accessible`` = ``True`` - (Boolean) Controls access to the ``.snapshot`` directory. By default, each share allows access to its own ``.snapshot`` directory, which contains files and directories of each snapshot taken. To restrict access to the ``.snapshot`` directory, this option should be set to ``False``. * - ``infinidat_snapdir_visible`` = ``False`` - (Boolean) Controls visibility of the ``.snapshot`` directory. By default, each share contains the ``.snapshot`` directory, which is hidden on the client side. To make the ``.snapshot`` directory visible, this option should be set to ``True``. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/configuration/tables/manila-infortrend.inc0000664000175000017500000000275400000000000025360 0ustar00zuulzuul00000000000000.. Warning: Do not edit this file. It is automatically generated from the software project's code and your changes will be overwritten. The tool to generate this file lives in openstack-doc-tools repository. Please make any changes needed in the code, then run the autogenerate-config-doc tool from the openstack-doc-tools repository, or ask for help on the documentation mailing list, IRC channel or meeting. .. _manila-infortrend: .. list-table:: Description of Infortrend Manila driver configuration options :header-rows: 1 :class: config-ref-table * - Configuration option = Default value - Description * - **[DEFAULT]** - * - ``infortrend_nas_ip`` = ``None`` - (String) Infortrend NAS ip. It is the ip for management. * - ``infortrend_nas_user`` = ``manila`` - (String) Infortrend NAS username. * - ``infortrend_nas_password`` = ``None`` - (String) Password for the Infortrend NAS server. This is not necessary if infortrend_nas_ssh_key is set. * - ``infortrend_nas_ssh_key`` = ``None`` - (String) SSH key for the Infortrend NAS server. This is not necessary if infortrend_nas_password is set. * - ``infortrend_share_pools`` = ``None`` - (String) Infortrend nas pool name list. It is separated with comma. * - ``infortrend_share_channels`` = ``None`` - (String) Infortrend channels for file service. It is separated with comma. * - ``infortrend_cli_timeout`` = ``30`` - (Integer) CLI timeout in seconds. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/configuration/tables/manila-lvm.inc0000664000175000017500000000253500000000000024001 0ustar00zuulzuul00000000000000.. Warning: Do not edit this file. It is automatically generated from the software project's code and your changes will be overwritten. The tool to generate this file lives in openstack-doc-tools repository. Please make any changes needed in the code, then run the autogenerate-config-doc tool from the openstack-doc-tools repository, or ask for help on the documentation mailing list, IRC channel or meeting. .. _manila-lvm: .. list-table:: Description of LVM share driver configuration options :header-rows: 1 :class: config-ref-table * - Configuration option = Default value - Description * - **[DEFAULT]** - * - ``lvm_share_export_ips`` = ``None`` - (String) List of IPs to export shares belonging to the LVM storage driver. * - ``lvm_share_export_root`` = ``$state_path/mnt`` - (String) Base folder where exported shares are located. * - ``lvm_share_helpers`` = ``CIFS=manila.share.drivers.helpers.CIFSHelperUserAccess, NFS=manila.share.drivers.helpers.NFSHelper`` - (List) Specify list of share export helpers. * - ``lvm_share_mirrors`` = ``0`` - (Integer) If set, create LVMs with multiple mirrors. Note that this requires lvm_mirrors + 2 PVs with available space. * - ``lvm_share_volume_group`` = ``lvm-shares`` - (String) Name for the VG that will contain exported shares. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/configuration/tables/manila-maprfs.inc0000664000175000017500000000317700000000000024476 0ustar00zuulzuul00000000000000.. Warning: Do not edit this file. It is automatically generated from the software project's code and your changes will be overwritten. The tool to generate this file lives in openstack-doc-tools repository. Please make any changes needed in the code, then run the autogenerate-config-doc tool from the openstack-doc-tools repository, or ask for help on the documentation mailing list, IRC channel or meeting. .. _manila-maprfs: .. list-table:: Description of MapRFS share driver configuration options :header-rows: 1 :class: config-ref-table * - Configuration option = Default value - Description * - **[DEFAULT]** - * - ``maprfs_base_volume_dir`` = ``/`` - (String) Path in MapRFS where share volumes must be created. * - ``maprfs_cldb_ip`` = ``None`` - (List) The list of IPs or hostnames of CLDB nodes. * - ``maprfs_clinode_ip`` = ``None`` - (List) The list of IPs or hostnames of nodes where mapr-core is installed. * - ``maprfs_rename_managed_volume`` = ``True`` - (Boolean) Specify whether existing volume should be renamed when start managing. * - ``maprfs_ssh_name`` = ``mapr`` - (String) Cluster admin user ssh login name. * - ``maprfs_ssh_port`` = ``22`` - (Port number) CLDB node SSH port. * - ``maprfs_ssh_private_key`` = ``None`` - (String) Path to SSH private key for login. * - ``maprfs_ssh_pw`` = ``None`` - (String) Cluster node SSH login password, This parameter is not necessary, if 'maprfs_ssh_private_key' is configured. * - ``maprfs_zookeeper_ip`` = ``None`` - (List) The list of IPs or hostnames of ZooKeeper nodes. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/configuration/tables/manila-netapp.inc0000664000175000017500000000705100000000000024470 0ustar00zuulzuul00000000000000.. Warning: Do not edit this file. It is automatically generated from the software project's code and your changes will be overwritten. The tool to generate this file lives in openstack-doc-tools repository. Please make any changes needed in the code, then run the autogenerate-config-doc tool from the openstack-doc-tools repository, or ask for help on the documentation mailing list, IRC channel or meeting. .. _manila-netapp: .. list-table:: Description of NetApp share driver configuration options :header-rows: 1 :class: config-ref-table * - Configuration option = Default value - Description * - **[DEFAULT]** - * - ``netapp_aggregate_name_search_pattern`` = ``(.*)`` - (String) Pattern for searching available aggregates for provisioning. * - ``netapp_enabled_share_protocols`` = ``nfs3, nfs4.0`` - (List) The NFS protocol versions that will be enabled. Supported values include nfs3, nfs4.0, nfs4.1. This option only applies when the option driver_handles_share_servers is set to True. * - ``netapp_lif_name_template`` = ``os_%(net_allocation_id)s`` - (String) Logical interface (LIF) name template * - ``netapp_login`` = ``None`` - (String) Administrative user account name used to access the storage system. * - ``netapp_password`` = ``None`` - (String) Password for the administrative user account specified in the netapp_login option. * - ``netapp_port_name_search_pattern`` = ``(.*)`` - (String) Pattern for overriding the selection of network ports on which to create Vserver LIFs. * - ``netapp_root_volume`` = ``root`` - (String) Root volume name. * - ``netapp_root_volume_aggregate`` = ``None`` - (String) Name of aggregate to create Vserver root volumes on. This option only applies when the option driver_handles_share_servers is set to True. * - ``netapp_server_hostname`` = ``None`` - (String) The hostname (or IP address) for the storage system. * - ``netapp_server_port`` = ``None`` - (Port number) The TCP port to use for communication with the storage system or proxy server. If not specified, Data ONTAP drivers will use 80 for HTTP and 443 for HTTPS. * - ``netapp_snapmirror_quiesce_timeout`` = ``3600`` - (Integer) The maximum time in seconds to wait for existing snapmirror transfers to complete before aborting when promoting a replica. * - ``netapp_storage_family`` = ``ontap_cluster`` - (String) The storage family type used on the storage system; valid values include ontap_cluster for using clustered Data ONTAP. * - ``netapp_trace_flags`` = ``None`` - (String) Comma-separated list of options that control which trace info is written to the debug logs. Values include method and api. * - ``netapp_transport_type`` = ``http`` - (String) The transport protocol used when communicating with the storage system or proxy server. Valid values are http or https. * - ``netapp_volume_move_cutover_timeout`` = ``3600`` - (Integer) The maximum time in seconds to wait for the completion of a volume move operation after the cutover was triggered. * - ``netapp_volume_name_template`` = ``share_%(share_id)s`` - (String) NetApp volume name template. * - ``netapp_volume_snapshot_reserve_percent`` = ``5`` - (Integer) The percentage of share space set aside as reserve for snapshot usage; valid values range from 0 to 90. * - ``netapp_vserver_name_template`` = ``os_%s`` - (String) Name template to use for new Vserver. When using CIFS protocol make sure to not configure characters illegal in DNS hostnames. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/configuration/tables/manila-nexentastor5.inc0000664000175000017500000000512000000000000025633 0ustar00zuulzuul00000000000000.. _manila-nexentastor5: .. list-table:: Description of NexentaStor5 configuration options :header-rows: 1 :class: config-ref-table * - Configuration option = Default value - Description * - **[DEFAULT]** - * - ``nexenta_rest_addresses`` = ``None`` - (List) One or more comma delimited IP addresses for management communication with NexentaStor appliance. * - ``nexenta_rest_port`` = ``8443`` - (Integer) Port to connect to Nexenta REST API server. * - ``nexenta_use_https`` = ``True`` - (Boolean) Use HTTP secure protocol for NexentaStor management REST API connections. * - ``nexenta_user`` = ``admin`` - (String) User name to connect to Nexenta SA. * - ``nexenta_password`` = ``None`` - (String) Password to connect to Nexenta SA. * - ``nexenta_pool`` = ``pool1`` - (String) Pool name on NexentaStor. * - ``nexenta_nfs`` = ``True`` - (Boolean) Defines whether share over NFS is enabled. * - ``nexenta_ssl_cert_verify`` = ``False`` - (Boolean) Defines whether the driver should check ssl cert. * - ``nexenta_rest_connect_timeout`` = ``30`` - (Float) Specifies the time limit (in seconds), within which the connection to NexentaStor management REST API server must be established. * - ``nexenta_rest_read_timeout`` = ``300`` - (Float) Specifies the time limit (in seconds), within which NexentaStor management REST API server must send a response. * - ``nexenta_rest_backoff_factor`` = ``1`` - (Float) Specifies the backoff factor to apply between connection attempts to NexentaStor management REST API server. * - ``nexenta_rest_retry_count`` = ``5`` - (Integer) Specifies the number of times to repeat NexentaStor management REST API call in case of connection errors and NexentaStor appliance EBUSY or ENOENT errors. * - ``nexenta_nas_host`` = ``None`` - (Hostname) Data IP address of Nexenta storage appliance. * - ``nexenta_mount_point_base`` = ``$state_path/mnt`` - (String) Base directory that contains NFS share mount points. * - ``nexenta_share_name_prefix`` = ``share-`` - (String) Nexenta share name prefix. * - ``nexenta_folder`` = ``folder`` - (String) Parent folder on NexentaStor. * - ``nexenta_dataset_compression`` = ``on`` - (String) Compression value for new ZFS folders. * - ``nexenta_thin_provisioning`` = ``True`` - (Boolean) If True shares will not be space guaranteed and overprovisioning will be enabled. * - ``nexenta_dataset_record_size`` = ``131072`` - (Integer) Specifies a suggested block size in for files in a file system. (bytes) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/configuration/tables/manila-powermax.inc0000664000175000017500000000212100000000000025034 0ustar00zuulzuul00000000000000.. Warning: Do not edit this file. It is automatically generated from the software project's code and your changes will be overwritten. The tool to generate this file lives in openstack-doc-tools repository. Please make any changes needed in the code, then run the autogenerate-config-doc tool from the openstack-doc-tools repository, or ask for help on the documentation mailing list, IRC channel or meeting. .. _manila-powermax: .. list-table:: Description of Dell EMC PowerMax share driver configuration options :header-rows: 1 :class: config-ref-table * - Configuration option = Default value - Description * - **[DEFAULT]** - * - ``powermax_ethernet_ports`` = ``None`` - (List) Comma separated list of ports that can be used for share server interfaces. Members of the list can be Unix-style glob expressions. * - ``powermax_server_container`` = ``None`` - (String) Data mover to host the NAS server. * - ``powermax_share_data_pools`` = ``None`` - (List) Comma separated list of pools that can be used to persist share data. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/configuration/tables/manila-purestorage-flashblade.inc0000664000175000017500000000142100000000000027617 0ustar00zuulzuul00000000000000.. _manila-purestorage-flashblade: .. list-table:: Description of Pure Storage FlashBlade share driver configuration options :header-rows: 1 :class: config-ref-table * - Configuration option = Default value - Description * - **[DEFAULT]** - * - ``flashblade_mgmt_vip`` = ``None`` - (String) The name (or IP address) for the Pure Storage FlashBlade storage system management port. * - ``flashblade_data_vip`` = ``None`` - (String) The names (or IP address) for the Pure Storage FlashBlade storage system data ports. * - ``flashblade_api`` = ``None`` - (String) API token for an administrative level user account. * - ``flashblade_eradicate`` = ``True`` - (Boolean) Enable or disable filesystem and snapshot eradication on delete. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/configuration/tables/manila-quobyte.inc0000664000175000017500000000271200000000000024670 0ustar00zuulzuul00000000000000.. Warning: Do not edit this file. It is automatically generated from the software project's code and your changes will be overwritten. The tool to generate this file lives in openstack-doc-tools repository. Please make any changes needed in the code, then run the autogenerate-config-doc tool from the openstack-doc-tools repository, or ask for help on the documentation mailing list, IRC channel or meeting. .. _manila-quobyte: .. list-table:: Description of Quobyte share driver configuration options :header-rows: 1 :class: config-ref-table * - Configuration option = Default value - Description * - **[DEFAULT]** - * - ``quobyte_api_ca`` = ``None`` - (String) The X.509 CA file to verify the server cert. * - ``quobyte_api_password`` = ``quobyte`` - (String) Password for Quobyte API server * - ``quobyte_api_url`` = ``None`` - (String) URL of the Quobyte API server (http or https) * - ``quobyte_api_username`` = ``admin`` - (String) Username for Quobyte API server. * - ``quobyte_default_volume_group`` = ``root`` - (String) Default owning group for new volumes. * - ``quobyte_default_volume_user`` = ``root`` - (String) Default owning user for new volumes. * - ``quobyte_delete_shares`` = ``False`` - (Boolean) Actually deletes shares (vs. unexport) * - ``quobyte_volume_configuration`` = ``BASE`` - (String) Name of volume configuration used for new shares. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/configuration/tables/manila-quota.inc0000664000175000017500000000431500000000000024332 0ustar00zuulzuul00000000000000.. Warning: Do not edit this file. It is automatically generated from the software project's code and your changes will be overwritten. The tool to generate this file lives in openstack-doc-tools repository. Please make any changes needed in the code, then run the autogenerate-config-doc tool from the openstack-doc-tools repository, or ask for help on the documentation mailing list, IRC channel or meeting. .. _manila-quota: .. list-table:: Description of Quota configuration options :header-rows: 1 :class: config-ref-table * - Configuration option = Default value - Description * - **[quota]** - * - ``max_age`` = ``0`` - (Integer) Number of seconds between subsequent usage refreshes. * - ``max_gigabytes`` = ``10000`` - (Integer) Maximum number of volume gigabytes to allow per host. * - ``driver`` = ``manila.quota.DbQuotaDriver`` - (String) Default driver to use for quota checks. * - ``gigabytes`` = ``1000`` - (Integer) Number of share gigabytes allowed per project. * - ``share_networks`` = ``10`` - (Integer) Number of share-networks allowed per project. * - ``shares`` = ``50`` - (Integer) Number of shares allowed per project. * - ``snapshot_gigabytes`` = ``1000`` - (Integer) Number of snapshot gigabytes allowed per project. * - ``snapshots`` = ``50`` - (Integer) Number of share snapshots allowed per project. * - ``share_groups`` = ``50`` - (Integer) Number of share groups allowed. * - ``share_group_snapshots`` = ``50`` - (Integer) Number of share group snapshots allowed. * - ``reservation_expire`` = ``86400`` - (Integer) Number of seconds until a reservation expires. * - ``backups`` = ``10`` - (Integer) Number of share backups allowed per project. * - ``backup_gigabytes`` = ``1000`` - (Integer) Total amount of storage, in gigabytes, allowed for backups per project. * - ``per_share_gigabytes`` = ``-1`` - (Integer) Max size allowed per share, in gigabytes. * - ``replica_gigabytes`` = ``1000`` - (Integer) Number of replica gigabytes allowed per project. * - ``share_replicas`` = ``100`` - (Integer) Number of share-replicas allowed per project. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/configuration/tables/manila-redis.inc0000664000175000017500000000313100000000000024302 0ustar00zuulzuul00000000000000.. Warning: Do not edit this file. It is automatically generated from the software project's code and your changes will be overwritten. The tool to generate this file lives in openstack-doc-tools repository. Please make any changes needed in the code, then run the autogenerate-config-doc tool from the openstack-doc-tools repository, or ask for help on the documentation mailing list, IRC channel or meeting. .. _manila-redis: .. list-table:: Description of Redis configuration options :header-rows: 1 :class: config-ref-table * - Configuration option = Default value - Description * - **[matchmaker_redis]** - * - ``check_timeout`` = ``20000`` - (Integer) Time in ms to wait before the transaction is killed. * - ``host`` = ``127.0.0.1`` - (String) DEPRECATED: Host to locate redis. Replaced by [DEFAULT]/transport_url * - ``password`` = - (String) DEPRECATED: Password for Redis server (optional). Replaced by [DEFAULT]/transport_url * - ``port`` = ``6379`` - (Port number) DEPRECATED: Use this port to connect to redis host. Replaced by [DEFAULT]/transport_url * - ``sentinel_group_name`` = ``oslo-messaging-zeromq`` - (String) Redis replica set name. * - ``sentinel_hosts`` = - (List) DEPRECATED: List of Redis Sentinel hosts (fault tolerance mode), e.g., [host:port, host1:port ... ] Replaced by [DEFAULT]/transport_url * - ``socket_timeout`` = ``10000`` - (Integer) Timeout in ms on blocking socket operations. * - ``wait_timeout`` = ``2000`` - (Integer) Time in ms to wait between connection attempts. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/configuration/tables/manila-san.inc0000664000175000017500000000166200000000000023764 0ustar00zuulzuul00000000000000.. Warning: Do not edit this file. It is automatically generated from the software project's code and your changes will be overwritten. The tool to generate this file lives in openstack-doc-tools repository. Please make any changes needed in the code, then run the autogenerate-config-doc tool from the openstack-doc-tools repository, or ask for help on the documentation mailing list, IRC channel or meeting. .. _manila-san: .. list-table:: Description of SAN configuration options :header-rows: 1 :class: config-ref-table * - Configuration option = Default value - Description * - **[DEFAULT]** - * - ``ssh_conn_timeout`` = ``60`` - (Integer) Backend server SSH connection timeout. * - ``ssh_max_pool_conn`` = ``10`` - (Integer) Maximum number of connections in the SSH pool. * - ``ssh_min_pool_conn`` = ``1`` - (Integer) Minimum number of connections in the SSH pool. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/configuration/tables/manila-scheduler.inc0000664000175000017500000000410200000000000025151 0ustar00zuulzuul00000000000000.. Warning: Do not edit this file. It is automatically generated from the software project's code and your changes will be overwritten. The tool to generate this file lives in openstack-doc-tools repository. Please make any changes needed in the code, then run the autogenerate-config-doc tool from the openstack-doc-tools repository, or ask for help on the documentation mailing list, IRC channel or meeting. .. _manila-scheduler: .. list-table:: Description of Scheduler configuration options :header-rows: 1 :class: config-ref-table * - Configuration option = Default value - Description * - **[DEFAULT]** - * - ``capacity_weight_multiplier`` = ``1.0`` - (Floating point) Multiplier used for weighing share capacity. Negative numbers mean to stack vs spread. * - ``pool_weight_multiplier`` = ``1.0`` - (Floating point) Multiplier used for weighing pools which have existing share servers. Negative numbers mean to spread vs stack. * - ``scheduler_default_filters`` = ``AvailabilityZoneFilter, CapacityFilter, CapabilitiesFilter, DriverFilter, ShareReplicationFilter`` - (List) Which filter class names to use for filtering hosts when not specified in the request. * - ``scheduler_default_weighers`` = ``CapacityWeigher, GoodnessWeigher`` - (List) Which weigher class names to use for weighing hosts. * - ``scheduler_driver`` = ``manila.scheduler.drivers.filter.FilterScheduler`` - (String) Default scheduler driver to use. * - ``scheduler_host_manager`` = ``manila.scheduler.host_manager.HostManager`` - (String) The scheduler host manager class to use. * - ``scheduler_json_config_location`` = - (String) Absolute path to scheduler configuration JSON file. * - ``scheduler_manager`` = ``manila.scheduler.manager.SchedulerManager`` - (String) Full class name for the scheduler manager. * - ``scheduler_max_attempts`` = ``3`` - (Integer) Maximum number of attempts to schedule a share. * - ``scheduler_topic`` = ``manila-scheduler`` - (String) The topic scheduler nodes listen on. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/configuration/tables/manila-share.inc0000664000175000017500000001306600000000000024306 0ustar00zuulzuul00000000000000.. Warning: Do not edit this file. It is automatically generated from the software project's code and your changes will be overwritten. The tool to generate this file lives in openstack-doc-tools repository. Please make any changes needed in the code, then run the autogenerate-config-doc tool from the openstack-doc-tools repository, or ask for help on the documentation mailing list, IRC channel or meeting. .. _manila-share: .. list-table:: Description of Share configuration options :header-rows: 1 :class: config-ref-table * - Configuration option = Default value - Description * - **[DEFAULT]** - * - ``automatic_share_server_cleanup`` = ``True`` - (Boolean) If set to True, then Manila will delete all share servers which were unused more than specified time .If set to False - automatic deletion of share servers will be disabled. * - ``backlog`` = ``4096`` - (Integer) Number of backlog requests to configure the socket with. * - ``default_share_group_type`` = ``None`` - (String) Default share group type to use. * - ``default_share_type`` = ``None`` - (String) Default share type to use. * - ``delete_share_server_with_last_share`` = ``False`` - (Boolean) Whether share servers will be deleted on deletion of the last share. * - ``driver_handles_share_servers`` = ``None`` - (Boolean) There are two possible approaches for share drivers in Manila. First is when share driver is able to handle share-servers and second when not. Drivers can support either both or only one of these approaches. So, set this opt to True if share driver is able to handle share servers and it is desired mode else set False. It is set to None by default to make this choice intentional. * - ``enable_periodic_hooks`` = ``False`` - (Boolean) Whether to enable periodic hooks or not. * - ``enable_post_hooks`` = ``False`` - (Boolean) Whether to enable post hooks or not. * - ``enable_pre_hooks`` = ``False`` - (Boolean) Whether to enable pre hooks or not. * - ``enabled_share_backends`` = ``None`` - (List) A list of share backend names to use. These backend names should be backed by a unique [CONFIG] group with its options. * - ``enabled_share_protocols`` = ``NFS, CIFS`` - (List) Specify list of protocols to be allowed for share creation. Available values are '('NFS', 'CIFS', 'GLUSTERFS', 'HDFS', 'CEPHFS', 'MAPRFS')' * - ``executor_thread_pool_size`` = ``64`` - (Integer) Size of executor thread pool. * - ``hook_drivers`` = - (List) Driver(s) to perform some additional actions before and after share driver actions and on a periodic basis. Default is []. * - ``migration_create_delete_share_timeout`` = ``300`` - (Integer) Timeout for creating and deleting share instances when performing share migration (seconds). * - ``migration_driver_continue_update_interval`` = ``60`` - (Integer) This value, specified in seconds, determines how often the share manager will poll the driver to perform the next step of migration in the storage backend, for a migrating share. * - ``migration_ignore_files`` = ``lost+found`` - (List) List of files and folders to be ignored when migrating shares. Items should be names (not including any path). * - ``migration_readonly_rules_support`` = ``True`` - (Boolean) DEPRECATED: Specify whether read only access rule mode is supported in this backend. Obsolete. All drivers are now required to support read-only access rules. * - ``migration_wait_access_rules_timeout`` = ``180`` - (Integer) Time to wait for access rules to be allowed/denied on backends when migrating shares using generic approach (seconds). * - ``network_config_group`` = ``None`` - (String) Name of the configuration group in the Manila conf file to look for network config options.If not set, the share backend's config group will be used.If an option is not found within provided group, then'DEFAULT' group will be used for search of option. * - ``share_manager`` = ``manila.share.manager.ShareManager`` - (String) Full class name for the share manager. * - ``share_name_template`` = ``share-%s`` - (String) Template string to be used to generate share names. * - ``share_snapshot_name_template`` = ``share-snapshot-%s`` - (String) Template string to be used to generate share snapshot names. * - ``share_topic`` = ``manila-share`` - (String) The topic share nodes listen on. * - ``suppress_post_hooks_errors`` = ``False`` - (Boolean) Whether to suppress post hook errors (allow driver's results to pass through) or not. * - ``suppress_pre_hooks_errors`` = ``False`` - (Boolean) Whether to suppress pre hook errors (allow driver perform actions) or not. * - ``unmanage_remove_access_rules`` = ``False`` - (Boolean) If set to True, then manila will deny access and remove all access rules on share unmanage.If set to False - nothing will be changed. * - ``unused_share_server_cleanup_interval`` = ``10`` - (Integer) Unallocated share servers reclamation time interval (minutes). Minimum value is 10 minutes, maximum is 60 minutes. The reclamation function is run every 10 minutes and delete share servers which were unused more than unused_share_server_cleanup_interval option defines. This value reflects the shortest time Manila will wait for a share server to go unutilized before deleting it. * - ``use_scheduler_creating_share_from_snapshot`` = ``False`` - (Boolean) If set to False, then share creation from snapshot will be performed on the same host. If set to True, then scheduling step will be used. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/configuration/tables/manila-spectrumscale_ces.inc0000664000175000017500000000352500000000000026707 0ustar00zuulzuul00000000000000.. Warning: Do not edit this file. It is automatically generated from the software project's code and your changes will be overwritten. The tool to generate this file lives in openstack-doc-tools repository. Please make any changes needed in the code, then run the autogenerate-config-doc tool from the openstack-doc-tools repository, or ask for help on the documentation mailing list, IRC channel or meeting. .. _manila-spectrumscale_ces: .. list-table:: Description of IBM Spectrum Scale CES share driver configuration options :header-rows: 1 :class: config-ref-table * - Configuration option = Default value - Description * - **[DEFAULT]** - * - ``gpfs_mount_point_base`` = ``$state_path/mnt`` - (String) Base folder where exported shares are located. * - ``gpfs_nfs_server_type`` = ``CES`` - (String) NFS Server type. Valid choices are "CES" (Ganesha NFS) or "KNFS" (Kernel NFS). * - ``gpfs_share_export_ip`` = ``None`` - (Host address) IP to be added to GPFS export string. * - ``gpfs_share_helpers`` = ``KNFS=manila.share.drivers.ibm.gpfs.KNFSHelper, CES=manila.share.drivers.ibm.gpfs.CESHelper`` - (List) Specify list of share export helpers. * - ``gpfs_ssh_login`` = ``None`` - (String) GPFS server SSH login name. * - ``gpfs_ssh_password`` = ``None`` - (String) GPFS server SSH login password. The password is not needed, if 'gpfs_ssh_private_key' is configured. * - ``gpfs_ssh_port`` = ``22`` - (Port number) GPFS server SSH port. * - ``gpfs_ssh_private_key`` = ``None`` - (String) Path to GPFS server SSH private key for login. * - ``is_gpfs_node`` = ``False`` - (Boolean) True:when Manila services are running on one of the Spectrum Scale node. False:when Manila services are not running on any of the Spectrum Scale node. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/configuration/tables/manila-spectrumscale_knfs.inc0000664000175000017500000000376200000000000027101 0ustar00zuulzuul00000000000000.. Warning: Do not edit this file. It is automatically generated from the software project's code and your changes will be overwritten. The tool to generate this file lives in openstack-doc-tools repository. Please make any changes needed in the code, then run the autogenerate-config-doc tool from the openstack-doc-tools repository, or ask for help on the documentation mailing list, IRC channel or meeting. .. _manila-spectrumscale_knfs: .. list-table:: Description of IBM Spectrum Scale KNFS share driver configuration options :header-rows: 1 :class: config-ref-table * - Configuration option = Default value - Description * - **[DEFAULT]** - * - ``gpfs_mount_point_base`` = ``$state_path/mnt`` - (String) Base folder where exported shares are located. * - ``gpfs_nfs_server_list`` = ``None`` - (List) A list of the fully qualified NFS server names that make up the OpenStack Manila configuration. * - ``gpfs_nfs_server_type`` = ``CES`` - (String) NFS Server type. Valid choices are "CES" (Ganesha NFS) or "KNFS" (Kernel NFS). * - ``gpfs_share_export_ip`` = ``None`` - (Host address) IP to be added to GPFS export string. * - ``gpfs_share_helpers`` = ``KNFS=manila.share.drivers.ibm.gpfs.KNFSHelper, CES=manila.share.drivers.ibm.gpfs.CESHelper`` - (List) Specify list of share export helpers. * - ``gpfs_ssh_login`` = ``None`` - (String) GPFS server SSH login name. * - ``gpfs_ssh_password`` = ``None`` - (String) GPFS server SSH login password. The password is not needed, if 'gpfs_ssh_private_key' is configured. * - ``gpfs_ssh_port`` = ``22`` - (Port number) GPFS server SSH port. * - ``gpfs_ssh_private_key`` = ``None`` - (String) Path to GPFS server SSH private key for login. * - ``is_gpfs_node`` = ``False`` - (Boolean) True:when Manila services are running on one of the Spectrum Scale node. False:when Manila services are not running on any of the Spectrum Scale node. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/configuration/tables/manila-tegile.inc0000664000175000017500000000202400000000000024445 0ustar00zuulzuul00000000000000.. Warning: Do not edit this file. It is automatically generated from the software project's code and your changes will be overwritten. The tool to generate this file lives in openstack-doc-tools repository. Please make any changes needed in the code, then run the autogenerate-config-doc tool from the openstack-doc-tools repository, or ask for help on the documentation mailing list, IRC channel or meeting. .. _manila-tegile: .. list-table:: Description of Tegile share driver configuration options :header-rows: 1 :class: config-ref-table * - Configuration option = Default value - Description * - **[DEFAULT]** - * - ``tegile_default_project`` = ``None`` - (String) Create shares in this project * - ``tegile_nas_login`` = ``None`` - (String) User name for the Tegile NAS server. * - ``tegile_nas_password`` = ``None`` - (String) Password for the Tegile NAS server. * - ``tegile_nas_server`` = ``None`` - (String) Tegile NAS server hostname or IP address. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/configuration/tables/manila-unity.inc0000664000175000017500000000235000000000000024346 0ustar00zuulzuul00000000000000.. Warning: Do not edit this file. It is automatically generated from the software project's code and your changes will be overwritten. The tool to generate this file lives in openstack-doc-tools repository. Please make any changes needed in the code, then run the autogenerate-config-doc tool from the openstack-doc-tools repository, or ask for help on the documentation mailing list, IRC channel or meeting. .. _manila-unity: .. list-table:: Description of Dell EMC Unity share driver configuration options :header-rows: 1 :class: config-ref-table * - Configuration option = Default value - Description * - **[DEFAULT]** - * - ``unity_ethernet_ports`` = ``None`` - (List) Comma separated list of ports that can be used for share server interfaces. Members of the list can be Unix-style glob expressions. * - ``unity_server_meta_pool`` = ``None`` - (String) Pool to persist the meta-data of NAS server. * - ``unity_share_data_pools`` = ``None`` - (List) Comma separated list of pools that can be used to persist share data. * - ``unity_share_server`` = ``None`` - One of NAS server names in Unity, it is used for share creation when the driver is in ``DHSS=False`` mode.. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/configuration/tables/manila-vastdata.inc0000664000175000017500000000245400000000000025012 0ustar00zuulzuul00000000000000.. Warning: Do not edit this file. It is automatically generated from the software project's code and your changes will be overwritten. The tool to generate this file lives in openstack-doc-tools repository. Please make any changes needed in the code, then run the autogenerate-config-doc tool from the openstack-doc-tools repository, or ask for help on the documentation mailing list, IRC channel or meeting. .. _manila-vastdata: .. list-table:: Description of VAST share driver configuration options :header-rows: 1 :class: config-ref-table * - Configuration option = Default value - Description * - **[vast]** - * - ``vast_mgmt_host`` = - (String) Hostname or IP address VAST storage system management VIP. * - ``vast_mgmt_port`` = ``443`` - (String) Port for VAST management API. * - ``vast_vippool_name`` = - (String) Name of Virtual IP pool. * - ``vast_root_export`` = ``manila`` - (String) Base path for shares. * - ``vast_mgmt_user`` = - (String) Username for VAST management API. * - ``vast_mgmt_password`` = - (String) Password for VAST management API. * - ``vast_api_token`` = - (String) API token for accessing VAST mgmt. If provided, it will be used instead of 'vast_mgmt_user' and 'vast_mgmt_password'. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/configuration/tables/manila-vnx.inc0000664000175000017500000000207000000000000024010 0ustar00zuulzuul00000000000000.. Warning: Do not edit this file. It is automatically generated from the software project's code and your changes will be overwritten. The tool to generate this file lives in openstack-doc-tools repository. Please make any changes needed in the code, then run the autogenerate-config-doc tool from the openstack-doc-tools repository, or ask for help on the documentation mailing list, IRC channel or meeting. .. _manila-vnx: .. list-table:: Description of Dell EMC VNX share driver configuration options :header-rows: 1 :class: config-ref-table * - Configuration option = Default value - Description * - **[DEFAULT]** - * - ``vnx_ethernet_ports`` = ``None`` - (List) Comma separated list of ports that can be used for share server interfaces. Members of the list can be Unix-style glob expressions. * - ``vnx_server_container`` = ``None`` - (String) Data mover to host the NAS server. * - ``vnx_share_data_pools`` = ``None`` - (List) Comma separated list of pools that can be used to persist share data. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/configuration/tables/manila-winrm.inc0000664000175000017500000000252500000000000024336 0ustar00zuulzuul00000000000000.. Warning: Do not edit this file. It is automatically generated from the software project's code and your changes will be overwritten. The tool to generate this file lives in openstack-doc-tools repository. Please make any changes needed in the code, then run the autogenerate-config-doc tool from the openstack-doc-tools repository, or ask for help on the documentation mailing list, IRC channel or meeting. .. _manila-winrm: .. list-table:: Description of WinRM configuration options :header-rows: 1 :class: config-ref-table * - Configuration option = Default value - Description * - **[DEFAULT]** - * - ``winrm_cert_key_pem_path`` = ``~/.ssl/key.pem`` - (String) Path to the x509 certificate key. * - ``winrm_cert_pem_path`` = ``~/.ssl/cert.pem`` - (String) Path to the x509 certificate used for accessing the serviceinstance. * - ``winrm_conn_timeout`` = ``60`` - (Integer) WinRM connection timeout. * - ``winrm_operation_timeout`` = ``60`` - (Integer) WinRM operation timeout. * - ``winrm_retry_count`` = ``3`` - (Integer) WinRM retry count. * - ``winrm_retry_interval`` = ``5`` - (Integer) WinRM retry interval in seconds * - ``winrm_use_cert_based_auth`` = ``False`` - (Boolean) Use x509 certificates in order to authenticate to theservice instance. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/configuration/tables/manila-zfs.inc0000664000175000017500000000622700000000000024007 0ustar00zuulzuul00000000000000.. Warning: Do not edit this file. It is automatically generated from the software project's code and your changes will be overwritten. The tool to generate this file lives in openstack-doc-tools repository. Please make any changes needed in the code, then run the autogenerate-config-doc tool from the openstack-doc-tools repository, or ask for help on the documentation mailing list, IRC channel or meeting. .. _manila-zfs: .. list-table:: Description of ZFS share driver configuration options :header-rows: 1 :class: config-ref-table * - Configuration option = Default value - Description * - **[DEFAULT]** - * - ``zfs_dataset_creation_options`` = ``None`` - (List) Define here list of options that should be applied for each dataset creation if needed. Example: compression=gzip,dedup=off. Note that, for secondary replicas option 'readonly' will be set to 'on' and for active replicas to 'off' in any way. Also, 'quota' will be equal to share size. Optional. * - ``zfs_dataset_name_prefix`` = ``manila_share_`` - (String) Prefix to be used in each dataset name. Optional. * - ``zfs_dataset_snapshot_name_prefix`` = ``manila_share_snapshot_`` - (String) Prefix to be used in each dataset snapshot name. Optional. * - ``zfs_migration_snapshot_prefix`` = ``tmp_snapshot_for_share_migration_`` - (String) Set snapshot prefix for usage in ZFS migration. Required. * - ``zfs_replica_snapshot_prefix`` = ``tmp_snapshot_for_replication_`` - (String) Set snapshot prefix for usage in ZFS replication. Required. * - ``zfs_service_ip`` = ``None`` - (String) IP to be added to admin-facing export location. Required. * - ``zfs_share_export_ip`` = ``None`` - (String) IP to be added to user-facing export location. Required. * - ``zfs_share_helpers`` = ``NFS=manila.share.drivers.zfsonlinux.utils.NFSviaZFSHelper`` - (List) Specify list of share export helpers for ZFS storage. It should look like following: 'FOO_protocol=foo.FooClass,BAR_protocol=bar.BarClass'. Required. * - ``zfs_ssh_private_key_path`` = ``None`` - (String) Path to SSH private key that should be used for SSH'ing ZFS storage host. Not used for replication operations. Optional. * - ``zfs_ssh_user_password`` = ``None`` - (String) Password for user that is used for SSH'ing ZFS storage host. Not used for replication operations. They require passwordless SSH access. Optional. * - ``zfs_ssh_username`` = ``None`` - (String) SSH user that will be used in 2 cases: 1) By manila-share service in case it is located on different host than its ZFS storage. 2) By manila-share services with other ZFS backends that perform replication. It is expected that SSH'ing will be key-based, passwordless. This user should be passwordless sudoer. Optional. * - ``zfs_use_ssh`` = ``False`` - (Boolean) Remote ZFS storage hostname that should be used for SSH'ing. Optional. * - ``zfs_zpool_list`` = ``None`` - (List) Specify list of zpools that are allowed to be used by backend. Can contain nested datasets. Examples: Without nested dataset: 'zpool_name'. With nested dataset: 'zpool_name/nested_dataset_name'. Required. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/configuration/tables/manila-zfssa.inc0000664000175000017500000000423400000000000024327 0ustar00zuulzuul00000000000000.. Warning: Do not edit this file. It is automatically generated from the software project's code and your changes will be overwritten. The tool to generate this file lives in openstack-doc-tools repository. Please make any changes needed in the code, then run the autogenerate-config-doc tool from the openstack-doc-tools repository, or ask for help on the documentation mailing list, IRC channel or meeting. .. _manila-zfssa: .. list-table:: Description of ZFSSA share driver configuration options :header-rows: 1 :class: config-ref-table * - Configuration option = Default value - Description * - **[DEFAULT]** - * - ``zfssa_auth_password`` = ``None`` - (String) ZFSSA management authorized userpassword. * - ``zfssa_auth_user`` = ``None`` - (String) ZFSSA management authorized username. * - ``zfssa_data_ip`` = ``None`` - (String) IP address for data. * - ``zfssa_host`` = ``None`` - (String) ZFSSA management IP address. * - ``zfssa_manage_policy`` = ``loose`` - (String) Driver policy for share manage. A strict policy checks for a schema named manila_managed, and makes sure its value is true. A loose policy does not check for the schema. * - ``zfssa_nas_checksum`` = ``fletcher4`` - (String) Controls checksum used for data blocks. * - ``zfssa_nas_compression`` = ``off`` - (String) Data compression-off, lzjb, gzip-2, gzip, gzip-9. * - ``zfssa_nas_logbias`` = ``latency`` - (String) Controls behavior when servicing synchronous writes. * - ``zfssa_nas_mountpoint`` = - (String) Location of project in ZFS/SA. * - ``zfssa_nas_quota_snap`` = ``true`` - (String) Controls whether a share quota includes snapshot. * - ``zfssa_nas_rstchown`` = ``true`` - (String) Controls whether file ownership can be changed. * - ``zfssa_nas_vscan`` = ``false`` - (String) Controls whether the share is scanned for viruses. * - ``zfssa_pool`` = ``None`` - (String) ZFSSA storage pool name. * - ``zfssa_project`` = ``None`` - (String) ZFSSA project name. * - ``zfssa_rest_timeout`` = ``None`` - (String) REST connection timeout (in seconds). ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315601.8016732 manila-21.0.0/doc/source/contributor/0000775000175000017500000000000000000000000017475 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/contributor/adding_release_notes.rst0000664000175000017500000001516500000000000024375 0ustar00zuulzuul00000000000000.. _adding_release_notes: Release Notes ============= What are release notes? ~~~~~~~~~~~~~~~~~~~~~~~ Release notes are important for change management within manila. Since manila follows a release cycle with milestones, release notes provide a way for the community and users to quickly grasp what changes occurred within a development milestone. To the OpenStack release management and documentation teams, release notes are a way to compile changes per milestone. These notes are published on the `OpenStack Releases website `_. Automated tooling is built around ``releasenotes`` and they get appropriately handled per release milestone, including any back-ports to stable releases. What needs a release note? ~~~~~~~~~~~~~~~~~~~~~~~~~~ * Changes that impact an upgrade, most importantly, those that require a deployer to take some action while upgrading * API changes * New APIs * Changes to the response schema of existing APIs * Changes to request/response headers * Non-trivial API changes such as response code changes from 2xx to 4xx * Deprecation of APIs or response fields * Removal of APIs * A new feature is implemented, such as a new core feature in manila, driver support for an existing manila feature or a new driver * An existing feature is deprecated * An existing feature is removed * Behavior of an existing feature has changed in a discernible way to an end user or administrator * Backend driver interface changes * A security bug is fixed * New configuration option is added What does not need a release note? ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * A code change that doesn't change the general behavior of any feature such as code refactor or logging changes. One case of this could be the exercise that all drivers went through by removing ``allow_access`` and ``deny_access`` interfaces in favor of an ``update_access`` interface as suggested in the Mitaka release. * Tempest or unit test coverage enhancement * Changes to response message with API failure codes 4xx and 5xx * Any change submitted with a justified TrivialFix flag added in the commit message * Adding or changing documentation within in-tree documentation guides How do I add a release note? ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We use `Reno `_ to create and manage release notes. The new subcommand combines a random suffix with a "slug" value to make the new file with a unique name that is easy to identify again later. To create a release note for your change, use: .. code-block:: console $ reno new slug-goes-here If reno is not installed globally on your system, you can use a tox environment in manila: .. code-block:: console $ tox -e newnote -- slug-goes-here .. note:: When you are adding a bug-fix reno, name your file using the template: "bug--slug-goes-here". Then add the notes in ``yaml`` format in the file created. Pay attention to the type of section. The following are general sections to use: prelude General comments about the change. The prelude from all notes in a release are combined, in note order, to produce a single prelude introducing the release. features New features introduced issues A list of known issues with respect to the change being introduced. For example, if the new feature in the change is experimental or known to not work in some cases, it should be mentioned here. upgrade A list of upgrade notes in the release. Any removals that affect upgrades are to be noted here. deprecations Any features, APIs, configuration options that the change has deprecated. Deprecations are not removals. Deprecations suggest that there will be support for a certain timeline. Deprecation should allow time for users to make necessary changes for the removal to happen in a future release. It is important to note the timeline of deprecation in this section. critical A list of *fixed* critical bugs (descriptions only). security A list of *fixed* security issues (descriptions only). fixes A list of other *fixed* bugs (descriptions only). other Other notes that are important but do not fall into any of the given categories. :: --- prelude: > Replace this text with content to appear at the top of the section for this change. features: - List new features here, or remove this section. issues: - List known issues here, or remove this section. upgrade: - List upgrade notes here, or remove this section. deprecations: - List deprecation notes here, or remove this section critical: - Add critical notes here, or remove this section. security: - Add security notes here, or remove this section. fixes: - Add normal bug fixes here, or remove this section. other: - Add other notes here, or remove this section. Dos and Don'ts ~~~~~~~~~~~~~~ * Release notes need to be succinct. Short and unambiguous descriptions are preferred * Write in past tense, unless you are writing an imperative statement * Do not have blank sections in the file * Do not include code or links * Avoid special rst formatting unless absolutely necessary * Always prefer including a release note in the same patch * Release notes are not a replacement for developer/user/admin documentation * Release notes are not a way of conveying behavior of any features or usage of any APIs * Limit a release note to fewer than 2-3 lines per change per section * OpenStack prefers atomic changes. So remember that your change may need the fewest sections possible * General writing guidelines can be found `here `_ * Proofread your note. Pretend you are a user or a deployer who is reading the note after a milestone or a release has been cut Examples ~~~~~~~~ The following need only be considered as directions for formatting. They are **not** fixes or features in manila. * *fix-failing-automount-23aef89a7e98c8.yaml* .. code-block:: yaml --- deprecations: - displaying mount options via the array listing API is deprecated. fixes: - users can mount shares on debian systems with kernel version 32.2.41.* with share-mount API * *add-librsync-backup-plugin-for-m-bkup-41cad17c1498a3.yaml* .. code-block:: yaml --- features: - librsync support added for NFS incremental backup upgrade: - Copy new rootwrap.d/librsync.filters file into /etc/manila/rootwrap.d directory. issues: - librsync has not been tested thoroughly in all operating systems that manila is qualified for. m-bkup is an experimental feature. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/contributor/addmethod.openstackapi.rst0000664000175000017500000000543600000000000024650 0ustar00zuulzuul00000000000000.. Copyright 2010-2011 OpenStack LLC All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. .. _adding-a-new-api: Adding a Method to the OpenStack Manila API =========================================== The interface to manila is a RESTful API. REST stands for Representational State Transfer and provides an architecture "style" for distributed systems using HTTP for transport. Figure out a way to express your request and response in terms of resources that are being created, modified, read, or destroyed. Manila's API aims to conform to the `guidelines `_ set by OpenStack API SIG. Routing ------- To map URLs to controllers+actions, manila uses the Routes package. See the `routes package documentation `_ for more information. URLs are mapped to "action" methods on "controller" classes in ``manila/api//router.py``. These are two methods of the routes package that are used to perform the mapping and the routing: - mapper.connect() lets you map a single URL to a single action on a controller. - mapper.resource() connects many standard URLs to actions on a controller. Controllers and actions ----------------------- Controllers live in ``manila/api/v1`` and ``manila/api/v2``. See ``manila/api/v1/shares.py`` for an example. Action methods take parameters that are sucked out of the URL by mapper.connect() or .resource(). The first two parameters are self and the WebOb request, from which you can get the req.environ, req.body, req.headers, etc. Actions return a dictionary, and wsgi.Controller serializes that to JSON. Faults ------ If you need to return a non-200, you should return faults.Fault(webob.exc .HTTPNotFound()) replacing the exception as appropriate. Evolving the API ---------------- The ``v1`` version of the manila API has been deprecated. The ``v2`` version of the API supports micro versions. So all changes to the v2 API strive to maintain stability at any given API micro version, so consumers can safely rely on a specific micro version of the API never to change the request and response semantics. Read more about :doc:`API Microversions ` to understand how stability and backwards compatibility are maintained. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/contributor/apache-httpd.rst0000664000175000017500000000653300000000000022600 0ustar00zuulzuul00000000000000==================================== Running manila API with a web server ==================================== As part of the `community goals for Pike`_, manila has packaged a wsgi script entrypoint that allows you to run it with a real web server like Apache HTTPD or NGINX. This doc shows a sample of deploying manila with uwsgi Installing the API via uwsgi ----------------------------- For this deployment we use uwsgi as a web server bound to a random local port. Then we configure apache using mod_proxy to forward all incoming requests on the specified endpoint to that local webserver. This has the advantage of letting apache manage all inbound http connections, but allowing uwsgi run the python code. This also means that when we make changes to manila code or configuration we don't need to restart all of apache (which may be running other services as well) and just need to restart the local uwsgi daemon. The httpd/ directory contains sample files for configuring HTTPD to run manila under uwsgi. To use sample configs, simply copy `httpd/uwsgi-manila.conf` to the appropiate location for your apache server. On RHEL/CentOS/Fedora it is:: /etc/httpd/conf.d/uwsgi-manila.conf On Debian/Ubuntu it is:: /etc/apache2/sites-available/uwsgi-manila.conf Enable mod_proxy by running ``sudo a2enmod proxy`` On Ubuntu/Debian systems enable the site using the a2ensite tool:: sudo a2ensite /etc/apache2/sites-available/uwsgi-manila.conf This is not required on RHEL/CentOS/Fedora systems. Start or restart HTTPD/Apache2 to pick up the new configuration. Now we have to configure and start the uwsgi service. Copy the `httpd/manila-uwsgi.ini` file to `/etc/manila`. Update the file to match your system configuration (i.e. tweak the number of processes and threads) Install uwsgi. On RHEL/CentOS/Fedora:: sudo dnf install uwsgi-plugin-python3 On Ubuntu/Debian:: sudo apt-get install uwsgi-plugin-python3 And start the manila server using uwsgi:: uwsgi --ini /etc/manila/manila-uwsgi.ini .. NOTE:: In the sample configs port 51999 is used, this is a randomly selected number. Installing the API via mod_wsgi ------------------------------- The httpd/ directory contains sample files for configuring HTTPD to run manila API via mod_wsgi. To use sample configs, simply copy `httpd/mod_wsgi-manila.conf` to the appropiate location for your apache server. On RHEL/CentOS/Fedora it is:: /etc/httpd/conf.d/mod_wsgi-manila.conf On Debian/Ubuntu it is:: /etc/apache2/sites-available/mod_wsgi-manila.conf On Ubuntu/Debian systems enable the site using the a2ensite tool:: sudo a2ensite /etc/apache2/sites-available/mod_wsgi-manila.conf This is not required on RHEL/CentOS/Fedora systems. Start or restart HTTPD/Apache2 to pick up the new configuration. .. NOTE:: manila's primary configuration file (etc/manila.conf) and the PasteDeploy configuration file (etc/manila-paste.ini) must be readable to httpd in one of the default locations described in Configuring Manila. Access Control -------------- If you are running with Linux kernel security module enabled (for example SELinux or AppArmor), make sure that the configuration file has the appropriate context to access the linked file. .. _community goals for Pike: https://governance.openstack.org/tc/goals/pike/deploy-api-in-wsgi.html#control-plane-api-endpoints-deployment-via-wsgi ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/contributor/api_microversion_dev.rst0000664000175000017500000002535300000000000024445 0ustar00zuulzuul00000000000000API Microversions ================= Background ---------- Manila uses a framework we called 'API Microversions' for allowing changes to the API while preserving backward compatibility. The basic idea is that a user has to explicitly ask for their request to be treated with a particular version of the API. So breaking changes can be added to the API without breaking users who don't specifically ask for it. This is done with an HTTP header ``X-OpenStack-Manila-API-Version`` which is a monotonically increasing semantic version number starting from ``1.0``. If a user makes a request without specifying a version, they will get the ``DEFAULT_API_VERSION`` as defined in ``manila/api/openstack/api_version_request.py``. This value is currently ``2.0`` and is expected to remain so for quite a long time. The Nova project was the first to implement microversions. For full details please read Nova's `Kilo spec for microversions `_ When do I need a new Microversion? ---------------------------------- A microversion is needed when the contract to the user is changed. The user contract covers many kinds of information such as: - the Request - the list of resource urls which exist on the server Example: adding a new shares/{ID}/foo which didn't exist in a previous version of the code - the list of query parameters that are valid on urls Example: adding a new parameter ``is_yellow`` servers/{ID}?is_yellow=True - the list of query parameter values for non free form fields Example: parameter filter_by takes a small set of constants/enums "A", "B", "C". Adding support for new enum "D". - new headers accepted on a request - the Response - the list of attributes and data structures returned Example: adding a new attribute 'locked': True/False to the output of shares/{ID} - the allowed values of non free form fields Example: adding a new allowed ``status`` to shares/{ID} - the list of status codes allowed for a particular request Example: an API previously could return 200, 400, 403, 404 and the change would make the API now also be allowed to return 409. - changing a status code on a particular response Example: changing the return code of an API from 501 to 400. - new headers returned on a response The following flow chart attempts to walk through the process of "do we need a microversion". .. graphviz:: digraph states { label="Do I need a microversion?" silent_fail[shape="diamond", style="", label="Did we silently fail to do what is asked?"]; ret_500[shape="diamond", style="", label="Did we return a 500 before?"]; new_error[shape="diamond", style="", label="Are we changing what status code is returned?"]; new_attr[shape="diamond", style="", label="Did we add or remove an attribute to a payload?"]; new_param[shape="diamond", style="", label="Did we add or remove an accepted query string parameter or value?"]; new_resource[shape="diamond", style="", label="Did we add or remove a resource url?"]; no[shape="box", style=rounded, label="No microversion needed"]; yes[shape="box", style=rounded, label="Yes, you need a microversion"]; no2[shape="box", style=rounded, label="No microversion needed, it's a bug"]; silent_fail -> ret_500[label="no"]; silent_fail -> no2[label="yes"]; ret_500 -> no2[label="yes [1]"]; ret_500 -> new_error[label="no"]; new_error -> new_attr[label="no"]; new_error -> yes[label="yes"]; new_attr -> new_param[label="no"]; new_attr -> yes[label="yes"]; new_param -> new_resource[label="no"]; new_param -> yes[label="yes"]; new_resource -> no[label="no"]; new_resource -> yes[label="yes"]; {rank=same; yes new_attr} {rank=same; no2 ret_500} {rank=min; silent_fail} } **Footnotes** [1] - When fixing 500 errors that previously caused stack traces, try to map the new error into the existing set of errors that API call could previously return (400 if nothing else is appropriate). Changing the set of allowed status codes from a request is changing the contract, and should be part of a microversion. The reason why we are so strict on contract is that we'd like application writers to be able to know, for sure, what the contract is at every microversion in manila. If they do not, they will need to write conditional code in their application to handle ambiguities. When in doubt, consider application authors. If it would work with no client side changes on both manila versions, you probably don't need a microversion. If, on the other hand, there is any ambiguity, a microversion is probably needed. In Code ------- In ``manila/api/openstack/wsgi.py`` we define an ``@api_version`` decorator which is intended to be used on top-level Controller methods. It is not appropriate for lower-level methods. Some examples: Adding a new API method ~~~~~~~~~~~~~~~~~~~~~~~ In the controller class:: @wsgi.Controller.api_version("2.4") def my_api_method(self, req, id): .... This method would only be available if the caller had specified an ``X-OpenStack-Manila-API-Version`` of >= ``2.4``. If they had specified a lower version (or not specified it and received the default of ``2.1``) the server would respond with ``HTTP/404``. Removing an API method ~~~~~~~~~~~~~~~~~~~~~~ In the controller class:: @wsgi.Controller.api_version("2.1", "2.4") def my_api_method(self, req, id): .... This method would only be available if the caller had specified an ``X-OpenStack-Manila-API-Version`` of <= ``2.4``. If ``2.5`` or later is specified the server will respond with ``HTTP/404``. Changing a method's behaviour ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ In the controller class:: @wsgi.Controller.api_version("2.1", "2.3") def my_api_method(self, req, id): .... method_1 ... @wsgi.Controller.api_version("2.4") # noqa def my_api_method(self, req, id): .... method_2 ... If a caller specified ``2.1``, ``2.2`` or ``2.3`` (or received the default of ``2.1``) they would see the result from ``method_1``, ``2.4`` or later ``method_2``. It is vital that the two methods have the same name, so the second of them will need ``# noqa`` to avoid failing flake8's ``F811`` rule. The two methods may be different in any kind of semantics (schema validation, return values, response codes, etc) A method with only small changes between versions ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ A method may have only small changes between microversions, in which case you can decorate a private method:: @api_version("2.1", "2.4") def _version_specific_func(self, req, arg1): pass @api_version(min_version="2.5") # noqa def _version_specific_func(self, req, arg1): pass def show(self, req, id): .... common stuff .... self._version_specific_func(req, "foo") .... common stuff .... A change in schema only ~~~~~~~~~~~~~~~~~~~~~~~ If there is no change to the method, only to the schema that is used for validation, you can add a version range to the ``validation.schema`` decorator:: @wsgi.Controller.api_version("2.1") @validation.schema(dummy_schema.dummy, "2.3", "2.8") @validation.schema(dummy_schema.dummy2, "2.9") def update(self, req, id, body): .... This method will be available from version ``2.1``, validated according to ``dummy_schema.dummy`` from ``2.3`` to ``2.8``, and validated according to ``dummy_schema.dummy2`` from ``2.9`` onward. When not using decorators ~~~~~~~~~~~~~~~~~~~~~~~~~ When you don't want to use the ``@api_version`` decorator on a method or you want to change behaviour within a method (say it leads to simpler or simply a lot less code) you can directly test for the requested version with a method as long as you have access to the api request object (commonly called ``req``). Every API method has an api_version_request object attached to the req object and that can be used to modify behaviour based on its value:: def index(self, req): req_version = req.api_version_request if req_version.matches("2.1", "2.5"): ....stuff.... elif req_version.matches("2.6", "2.10"): ....other stuff.... elif req_version > api_version_request.APIVersionRequest("2.10"): ....more stuff..... The first argument to the matches method is the minimum acceptable version and the second is maximum acceptable version. A specified version can be null:: null_version = APIVersionRequest() If the minimum version specified is null then there is no restriction on the minimum version, and likewise if the maximum version is null there is no restriction the maximum version. Alternatively a one sided comparison can be used as in the example above. Other necessary changes ----------------------- If you are adding a patch which adds a new microversion, it is necessary to add changes to other places which describe your change: * Update ``REST_API_VERSION_HISTORY`` in ``manila/api/openstack/api_version_request.py`` * Update ``_MAX_API_VERSION`` in ``manila/api/openstack/api_version_request.py`` * Add a verbose description to ``manila/api/openstack/rest_api_version_history.rst``. There should be enough information that it could be used by the docs team for release notes. * Update the expected versions in affected tests. Allocating a microversion ------------------------- If you are adding a patch which adds a new microversion, it is necessary to allocate the next microversion number. Except under extremely unusual circumstances and this would have been mentioned in the blueprint for the change, the minor number of ``_MAX_API_VERSION`` will be incremented. This will also be the new microversion number for the API change. It is possible that multiple microversion patches would be proposed in parallel and the microversions would conflict between patches. This will cause a merge conflict. We don't reserve a microversion for each patch in advance as we don't know the final merge order. Developers may need over time to rebase their patch calculating a new version number as above based on the updated value of ``_MAX_API_VERSION``. Testing Microversioned API Methods ---------------------------------- Testing a microversioned API method is very similar to a normal controller method test, you just need to add the ``X-OpenStack-Manila-API-Version`` header, for example:: req = fakes.HTTPRequest.blank('/testable/url/endpoint') req.headers = {'X-OpenStack-Manila-API-Version': '2.2'} req.api_version_request = api_version.APIVersionRequest('2.6') controller = controller.TestableController() res = controller.index(req) ... assertions about the response ... ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/contributor/api_microversion_history.rst0000664000175000017500000000011000000000000025350 0ustar00zuulzuul00000000000000.. include:: ../../../manila/api/openstack/rest_api_version_history.rst ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/contributor/architecture.rst0000664000175000017500000000600400000000000022711 0ustar00zuulzuul00000000000000.. Copyright 2010-2011 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. Copyright 2014 Mirantis, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Manila System Architecture ========================== The Shared File Systems service is intended to be ran on one or more nodes. Manila uses a sql-based central database that is shared by all manila services in the system. The amount and depth of the data fits into a sql database quite well. For small deployments this seems like an optimal solution. For larger deployments, and especially if security is a concern, manila will be moving towards multiple data stores with some kind of aggregation system. Components ---------- Below you will a brief explanation of the different components. :: /- ( LDAP ) [ Auth Manager ] --- | \- ( DB ) | | | [ Web Dashboard ]- manilaclient -[ manila-api ] -- < AMQP > -- [ manila-scheduler ] -- [ manila-share ] -- ( shared filesystem ) | | | | | < REST > * DB: sql database for data storage. Used by all components (LINKS NOT SHOWN) * Web Dashboard: external component that talks to the api, implemented as a plugin to the OpenStack Dashboard (Horizon) project, source is `here `_. * :term:`manila-api` * Auth Manager: component responsible for users/projects/and roles. Can backend to DB or LDAP. This is not a separate binary, but rather a python class that is used by most components in the system. * :term:`manila-scheduler` * :term:`manila-share` Further Challenges ------------------ * More efficient share/snapshot size calculation * Create a notion of "attached" shares with automation of mount operations * Allow admin-created share-servers and share-networks to be used by multiple tenants * Support creation of new subnets for share servers (to connect VLANs with VXLAN/GRE/etc) * Gateway mediated networking model with NFS-Ganesha * Add support for more backends ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/contributor/auth.rst0000664000175000017500000000263300000000000021174 0ustar00zuulzuul00000000000000.. Copyright 2010-2011 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. Copyright 2014 Mirantis, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. .. _auth: Authentication and Authorization ================================ The :mod:`manila.quota` Module ------------------------------ .. automodule:: manila.quota :noindex: :members: :undoc-members: :show-inheritance: The :mod:`manila.policy` Module ------------------------------- .. automodule:: manila.policy :noindex: :members: :undoc-members: :show-inheritance: System limits ------------- The following limits need to be defined and enforced: * Maximum cumulative size of shares and snapshots (GB) * Total number of shares * Total number of snapshots * Total number of share networks ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/contributor/commit_message_tags.rst0000664000175000017500000000520300000000000024241 0ustar00zuulzuul00000000000000.. _commit_message_tags: Using Commit Message Tags in Manila =================================== When writing git commit messages for code submissions into manila, it can be useful to provide tags in the message for both human consumption as well as linking to other external resources, such as Launchpad. Each tag should be placed on a separate line. The following tags are used in manila. - **APIImpact** - Use this tag when the code change modifies a public HTTP API interface. This tag indicates that the patch creates, changes, or deletes a public API interface or changes its behavior. The tag may be followed by a reason beginning on the next line. If you are touching manila's API layer and you are unsure if your change has an impact on the API, use this tag anyway. - **Change-id** - This tag is automatically generated by a Gerrit hook and is a unique hash that describes the change. This hash should not be changed when rebasing as it is used by Gerrit to keep track of the change. - **Closes-Bug: | Partial-Bug: | Related-Bug:** *<#launchpad_bug_id>* - These tags are used when the change closes, partially closes, or relates to the bug referenced by the Launchpad bug ID respectively. This will automatically generate a link to the bug in Launchpad for easy access for reviewers. - **DocImpact** - Use this tag when the code change requires changes or updates to documentation in order to be understood. This tag can also be used if the documentation is provided along with the patch itself. This will also generate a Launchpad bug in manila for triaging and tracking. Refer to the section on :ref:`documenting_your_work` to understand where to add documentation. - **Implements: | Partially Implements:** *blueprint * - Use this tag when a change implements or partially implements the given blueprint in Launchpad. This will automatically generate a link to the blueprint in Gerrit for easy access for reviewers. - **TrivialFix** - This tag is used for a trivial issue, such as a typo, an unclear log message, or a simple code refactor that does not change existing behavior which does not require the creation of a separate bug or blueprint in Launchpad. Make sure that the **Closes-Bug**, **Partial-Bug**, **Related-Bug**, **blueprint**, and **Change-id** tags are at the very end of the commit message. The Gerrit hooks will automatically put the hash at the end of the commit message. For more information on tags and some examples of good commit messages, refer to the GitCommitMessages_ documentation. .. _GitCommitMessages: https://wiki.openstack.org/wiki/GitCommitMessages#Including_external_references ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/contributor/contributing.rst0000664000175000017500000002657700000000000022757 0ustar00zuulzuul00000000000000============================ So You Want to Contribute... ============================ For general information on contributing to OpenStack, check out the `contributor guide `_ to get started. It covers all the basics that are common to all OpenStack projects: the accounts you need, the basics of interacting with our Gerrit review system, how we communicate as a community, etc. Below will cover the more project specific information you need to get started with Manila (Shared File System service). Where is the code? ~~~~~~~~~~~~~~~~~~ manila | The OpenStack Shared File System Service | code: https://opendev.org/openstack/manila | docs: https://docs.openstack.org/manila/ | api-ref: https://docs.openstack.org/api-ref/shared-file-system | release model: https://releases.openstack.org/reference/release_models.html#cycle-with-rc | Launchpad: https://launchpad.net/manila python-manilaclient | Python client library for the OpenStack Shared File System Service API; includes standalone CLI shells and OpenStack client plugin and shell | code: https://opendev.org/openstack/python-manilaclient | docs: https://docs.openstack.org/python-manilaclient | release model: https://releases.openstack.org/reference/release_models.html#cycle-with-intermediary | Launchpad: https://launchpad.net/python-manilaclient manila-ui | OpenStack dashboard plugin for the Shared File System Service | code: https://opendev.org/openstack/manila-ui | docs: https://docs.openstack.org/manila-ui | release model: https://releases.openstack.org/reference/release_models.html#cycle-with-intermediary | Launchpad: https://launchpad.net/manila-ui manila-tempest-plugin | An OpenStack test integration (tempest) plugin containing API and scenario tests for the Shared File System Service | code: https://opendev.org/openstack/manila-tempest-plugin | release model: https://releases.openstack.org/reference/release_models.html#cycle-automatic | Launchpad: https://launchpad.net/manila manila-image-elements | A Disk Image Builder project with scripts to build a bootable Linux image for testing and use by some Shared File System Service storage drivers including the Generic Driver | code: https://opendev.org/openstack/manila-tempest-plugin | release model: no releases | Launchpad: https://launchpad.net/manila manila-test-image | A project with scripts to create a Buildroot based image to create a small bootable Linux image, primarily for the purposes of testing Manila | code: https://opendev.org/openstack/manila-image-elements | images: https://tarballs.opendev.org/openstack/manila-image-elements/ | release model: no releases | Launchpad: https://launchpad.net/manila-image-elements manila-specs | Design Specifications for the Shared File System service | code: https://opendev.org/openstack/manila-specs | published specs: https://specs.openstack.org/openstack/manila-specs/ | release model: no releases | Launchpad: https://launchpad.net/manila See the ``CONTRIBUTING.rst`` file in each code repository for more information about contributing to that specific deliverable. Additionally, you should look over the docs links above; most components have helpful developer information specific to that deliverable. Manila and its associated projects follow a coordinated release alongside other OpenStack projects. Development cycles are code named. See the `OpenStack Releases website`_ for names and schedules of the current, past and future development cycles. Communication ~~~~~~~~~~~~~ IRC --- The team uses `IRC `_ extensively for communication and coordination of project activities. The IRC channel is ``#openstack-manila`` on OFTC. If you'd like a quick and dirty guide to get connected with the community, refer to `this page `_. Contributors work in various timezones across the world; so many of them run IRC Bouncers and appear to be always online. If you ping someone, or raise a question on the IRC channel, someone will get back to you when they are back on their computer. Additionally, the community IRC channel is logged, so if you ask a question when no one is around, you can `check the log `_ to see if it has been answered. Team Meetings ------------- We host a one-hour IRC based community meeting every Thursday at 1500 UTC on ``#openstack-meeting-alt`` channel. See the `OpenStack meetings page `_ for the most up-to-date meeting information and for downloading the ICS file to integrate this slot with your calendar. The community meeting is a good opportunity to gather the attention of multiple contributors synchronously. If you wish to do so, add a meeting topic along with your IRC nick to the `Meeting agenda `_. Mailing List ------------ In addition to IRC, the team uses the `OpenStack Discuss Mailing List`_ for development discussions. This list is meant for communication about all things developing OpenStack; so we also use this list to engage with contributors across projects, and make any release cycle announcements. Since it is a wide distribution list, the use of subject line tags is encouraged to make sure you reach the right people. Prefix the subject line with ``[manila]`` when sending email that concern Manila on this list. Other Communication Avenues --------------------------- Contributors gather at least once per release at the `OpenDev Project Team Gathering `_ to discuss plans for an upcoming development cycle. This is usually where developers pool ideas and brainstorm features and bug fixes. We have had both virtual, and in-person Project Technical Gathering events in the past. Before every such event, we gather opinions from the community via IRC Meetings and the Mailing list on planning these Project Technical Gatherings. We make extensive use of `Etherpads `_. You can find some of them that the team used in the past `in the project Wiki `_. To share code snippets or logs, we use `PasteBin `_. .. _contacting-the-core-team: Contacting the Core Team ~~~~~~~~~~~~~~~~~~~~~~~~ When you contribute patches, your change will need to be approved by one or more `maintainers (collectively known as the "Core Team") `_. We're always looking for more maintainers! If you're looking to help maintain Manila, express your interest to the existing core team. We have mentored many individuals for one or more development cycles and added them to the core team. Any new core reviewer needs to be nominated to the team by an existing core reviewer by making a proposal on `OpenStack Discuss Mailing List`_. Other maintainers and contributors can then express their approval or disapproval by responding to the proposal. If there is a decision, the project team lead will add the concerned individual to the core reviewers team. An example proposal is `here. `_ New Feature Planning ~~~~~~~~~~~~~~~~~~~~ If you'd like to propose a new feature, do so by `creating a blueprint on Launchpad. `_ For significant changes we might require a design specification. .. _features-that-require-spefication: Feature changes that need a specification include: -------------------------------------------------- - Adding new API methods - Substantially modifying the behavior of existing API methods - Adding a new database resource or modifying existing resources - Modifying a share back end driver interface, thereby affecting all share back end drivers What doesn't need a design specification: ----------------------------------------- - Making trivial (backwards compatible) changes to the behavior of an existing API method. Examples include adding a new field to the response schema of an existing method, or introducing a new query parameter. See :doc:`api_microversion_dev` on how Manila APIs are versioned. - Adding new share back end drivers or modifying share drivers, without affecting the share back end driver interface - Adding or changing tests After filing a blueprint, if you're in doubt whether to create a design specification, contact the maintainers. Design specifications are tracked in the `Manila Specifications `_ repository and are published on the `OpenStack Project Specifications website. `_ Refer to the `specification template `_ to structure your design spec. Specifications and new features have deadlines. Usually, specifications for an upcoming release are frozen midway into the release development cycle. To determine the exact deadlines, see the published release calendars by navigating to the specific release from the `OpenStack releases website`_. Task Tracking ~~~~~~~~~~~~~ - We track our bugs in Launchpad: https://bugs.launchpad.net/manila If you're looking for some smaller, easier work item to pick up and get started on, search for the 'low-hanging-fruit' tag - We track future features as blueprints on Launchpad: https://blueprints.launchpad.net/manila - Unimplemented specifications are tracked here: https://specs.openstack.org/openstack/manila-specs/#unimplemented-specs These specifications need a new owner. If you're interested to pick them up and drive them to completion, you can update the corresponding blueprint and get in touch with the project maintainers for help Reporting a Bug ~~~~~~~~~~~~~~~ You found an issue and want to make sure we are aware of it? You can do so on `Launchpad `_. Getting Your Patch Merged ~~~~~~~~~~~~~~~~~~~~~~~~~ When you submit your change through Gerrit, a number of automated Continuous Integration tests are run on your change. A change must receive a +1 vote from the `OpenStack CI system `_ in order for it to be merge-worthy. If these tests are failing and you can't determine why, contact the maintainers. See the :doc:`manila-review-policy` to understand our code review conventions. Generally, reviewers look at new code submissions pro-actively; if you do not have sufficient attention to your change, or are looking for help, do not hesitate to jump into the team's IRC channel, or bring our attention to your issue during a community meeting. The core team would prefer to have an open discussion instead of a one-on-one/private chat. Project Team Lead Duties ~~~~~~~~~~~~~~~~~~~~~~~~ A `project team lead `_ is elected from the project contributors each cycle. Manila Project specific responsibilities for a lead are listed in the :doc:`project-team-lead`. .. _OpenStack Releases website: .. _OpenStack Discuss Mailing List: http://lists.openstack.org/cgi-bin/mailman/listinfo/openstack-discuss .. _Manila Project Team Lead guide: ../project-team-lead.rst .. _API Microversions: ../api_microversion_dev.rst ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/contributor/database.rst0000664000175000017500000000415000000000000021773 0ustar00zuulzuul00000000000000.. Copyright 2010-2011 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. The Database Layer ================== The :mod:`manila.db.api` Module ------------------------------- .. automodule:: manila.db.api :noindex: :members: :undoc-members: :show-inheritance: The Sqlalchemy Driver --------------------- The :mod:`manila.db.sqlalchemy.api` Module ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: manila.db.sqlalchemy.api :noindex: The :mod:`manila.db.sqlalchemy.models` Module ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: manila.db.sqlalchemy.models :noindex: :members: :undoc-members: :show-inheritance: Tests ----- Tests are lacking for the db api layer and for the sqlalchemy driver. Failures in the drivers would be detected in other test cases, though. DB migration revisions ---------------------- If a DB schema needs to be updated, a new DB migration file needs to be added in ``manila/db/migrations/alembic/versions``. To create such a file it's possible to use ``manila-manage db revision`` or the corresponding tox command:: tox -e dbrevision "change_foo_table" In addition every migration script must be tested. See examples in ``manila/tests/db/migrations/alembic/migrations_data_checks.py``. .. note:: When writing database migrations that create tables with unique constraints or foreign keys, please ensure that the ``mysql_charset`` matches the referenced table. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/contributor/development-environment-devstack.rst0000664000175000017500000002311600000000000026720 0ustar00zuulzuul00000000000000.. Copyright 2016 Red Hat, Inc. All Rights Reserved. not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. .. _setting-up-manila-devstack: Setting up a development environment with devstack ================================================== This page describes how to setup a working development environment that can be used in deploying ``manila`` and ``manila-ui`` on latest releases of Ubuntu, Fedora or CentOS. These instructions assume you are already familiar with git. We recommend using devstack to develop and test code changes to ``manila`` and/or ``manila-ui``, in order to simply evaluate the manila and/or project. Devstack is a shell script to build complete OpenStack development environments on a virtual machine. If you are not familiar with devstack, these pages can give you context: * `Testing Changes with DevStack `_ * `Devstack project documentation `_ Be aware that ``manila`` and ``manila-ui`` are not enabled in devstack by default; you will need to add a few lines to the devstack ``local.conf`` file to let devstack deploy and configure ``manila`` and ``manila-ui`` on your virtual machine. .. note:: If you do not intend to deploy with the OpenStack Dashboard (horizon) service, you can ignore instructions about enabling ``manila-ui``. Getting devstack ---------------- Start by cloning the devstack repository:: git clone https://opendev.org/openstack/devstack Change to devstack directory:: cd devstack/ You're now on ``master`` branch of devstack, switch to the branch you want to test or develop against. Sample local.conf files that get you started -------------------------------------------- Now that you have cloned the devstack repository, you need to configure devstack before deploying it. This is done with a ``local.conf`` file. For manila, the local.conf file can also determine which back end(s) are set up. The choice of back end(s) is important because there are optional API features in Manila `that are not supported by some drivers <../admin/share_back_ends_feature_support_mapping.html>`_. .. caution:: When using devstack with the below configurations, be aware that you will be setting up with node local storage. The `LVM`, `Generic`, `ZFSOnLinux` drivers have not been developed for production use. They exist to provide a vanilla development and testing environment for manila contributors. DHSS=False (`driver_handles_share_servers=False`) mode: ````````````````````````````````````````````````````````` This is the easier mode for new contributors. Manila share back-end drivers that operate in ``driver_handles_share_servers=False`` mode do not allow creating shares on private project networks. On the resulting stack, all manila shares created by you are exported on the host network and hence are accessible to any compute resource (e.g.: virtual machine, baremetal, container) that is able to reach the devstack host. * :download:`LVM driver ` * :download:`ZFSOnLinux driver ` * :download:`CEPHFS driver ` DHSS=True (`driver_handles_share_servers=True`) mode: ``````````````````````````````````````````````````````` You may use the following setups if you are familiar with manila, and would like to test with the project (tenant) isolation that manila provides on the network and data path. Manila share back-end drivers that operate in ``driver_handles_share_servers=True`` mode create shares on isolated project networks if told to do so. On the resulting stack, when creating a share, you must specify a share network to export the share to, and the share will be accessible to any compute resource (e.g.: Virtual machine, baremetal, containers) that is able to reach the share network you indicated. Typically, new contributors take a while to understand OpenStack networking, and we recommend that you familiarize yourself with the ``DHSS=False`` mode setup before attempting ``DHSS=True``. * :download:`Generic driver ` * :download:`Container driver ` Using a dummy back end driver ````````````````````````````` If you're absolutely new to manila code development, you may want to skip a real storage driver altogether and attempt a development environment that abstracts the back end storage layer. This could also be the situation if you're building API integrations such as CLI, UI or SDK clients. Here, you probably don't care about restrictions that individual back end choices bring you such as their lack of support for optional API features. Manila ships a fake backend driver called "Dummy Driver" that supports all API features and is capable of operating in both DHSS modes. You may use the following `local .conf` sample to bootstrap your devstack with a "Dummy" driver. Do remember however that you cannot really *use* the resources that are provisioned by this driver. * :download:`Dummy driver ` Building your devstack ---------------------- * Copy the appropriate sample local.conf file into the devstack folder on your virtual machine, make sure to name it ``local.conf`` * Make sure to read inline comments and customize values where necessary * If you would like to run minimal services in your stack, or allow devstack to bootstrap tempest testing framework for you, see :ref:`more-customization` * Finally, run the ``stack.sh`` script from within the devstack directory. We recommend that your run this inside a screen or tmux session because it could take a while:: ./stack.sh * After the script completes, you should have manila services running. You can verify that the services are running with the following commands:: $ systemctl status devstack@m-sch $ systemctl status devstack@m-shr $ systemctl status devstack@m-dat * By default, devstack sets up manila-api behind apache. The service name is ``httpd`` on Red Hat based systems and ``apache2`` on Debian based systems. * You may also use your "demo" credentials to invoke the command line clients:: $ source DEVSTACK_DIR/openrc admin demo $ manila service-list * The logs are accessible through ``journalctl``. The following commands let you query logs. You may use the ``-f`` option to tail these logs:: $ journalctl -a -o short-precise --unit devstack@m-sch $ journalctl -a -o short-precise --unit devstack@m-shr $ journalctl -a -o short-precise --unit devstack@m-dat * If running behind apache, the manila-api logs will be in ``/var/log/httpd/manila_api.log`` (Red Hat) or in ``/var/log/apache2/manila_api.log`` (Debian). * Manila UI will now be available through OpenStack Horizon; look for the Shares tab under Project > Share. .. _more-customization: More devstack customizations ---------------------------- Testing branches and changes submitted for review ````````````````````````````````````````````````` To test a patch in review:: enable_plugin manila https://opendev.org/openstack/manila If the ref is from review.opendev.org, it is structured as:: refs/changes/// For example, if you want to test patchset 4 of https://review.opendev.org/#/c/614170/, you can provide this in your ``local.conf``:: enable_plugin manila https://opendev.org/openstack/manila refs/changes/70/614170/4 ref can also simply be a stable branch name, for example:: enable_plugin manila https://opendev.org/openstack/manila stable/train Limiting the services enabled in your stack ```````````````````````````````````````````` Manila needs only a message queue (rabbitmq) and a database (mysql, postgresql) to operate. Additionally, keystone service provides project administration if necessary, all other OpenStack services are not necessary to set up a basic test system. [#f1]_ [#f2]_ You can add the following to your ``local.conf`` to deploy your stack in a minimal fashion. This saves you a lot of time and resources, but could limit your testing:: ENABLED_SERVICES=key,mysql,rabbit,tempest,manila,m-api,m-sch,m-shr,m-dat Optionally, you can deploy with Manila, Nova, Neutron, Glance and Tempest:: ENABLED_SERVICES=key,mysql,rabbit,tempest,g-api ENABLED_SERVICES+=n-api,n-cpu,n-cond,n-sch,n-crt,placement-api,placement-client ENABLED_SERVICES+=q-svc,q-dhcp,q-meta,q-l3,q-agt ENABLED_SERVICES+=tempest You can also enable ``tls-proxy`` with ``ENABLED_SERVICES`` to allow devstack to use Apache and setup a TLS proxy to terminate TLS connections. Using tls-proxy secures all OpenStack service API endpoints and inter-service communication on your devstack. Bootstrapping Tempest ````````````````````` Add the following options in your ``local.conf`` to set up tempest:: ENABLE_ISOLATED_METADATA=True TEMPEST_USE_TEST_ACCOUNTS=True TEMPEST_ALLOW_TENANT_ISOLATION=False TEMPEST_CONCURRENCY=8 .. [#f1] The Generic driver cannot be run without deploying Cinder, Nova, Glance and Neutron. .. [#f2] You must enable Horizon to use manila-ui. Horizon will not work well when Nova, Cinder, Glance and Neutron are not enabled. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/contributor/development.environment.rst0000664000175000017500000001312400000000000025115 0ustar00zuulzuul00000000000000.. Copyright 2010-2011 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Setting Up a Development Environment ==================================== This page describes how to setup a working Python development environment that can be used in developing manila on Ubuntu, Fedora or Mac OS X. These instructions assume you're already familiar with git. Refer to `Getting the code`_ for additional information. .. _Getting the code: http://wiki.openstack.org/GettingTheCode Following these instructions will allow you to run the manila unit tests. If you want to be able to run manila (i.e., create NFS/CIFS shares), you will also need to install dependent projects: nova, neutron, cinder and glance. For this purpose 'devstack' project can be used (A documented shell script to build complete OpenStack development environments). You can check out `Setting up a development environment with devstack`_ for instructions on how to enable manila on devstack. .. _Setting up a development environment with devstack: https://docs.openstack.org/manila/latest/contributor/development-environment-devstack.html Virtual environments -------------------- Manila development uses `virtualenv `__ to track and manage Python dependencies while in development and testing. This allows you to install all of the Python package dependencies in a virtual environment or "virtualenv" (a special subdirectory of your manila directory), instead of installing the packages at the system level. .. note:: Virtualenv is useful for running the unit tests, but is not typically used for full integration testing or production usage. Linux Systems ------------- .. note:: This section is tested for manila on Ubuntu and Fedora-based distributions. Feel free to add notes and change according to your experiences or operating system. Install the prerequisite packages. - On Ubuntu/Debian:: sudo apt-get install python-dev libssl-dev python-pip \ libmysqlclient-dev libxml2-dev libxslt-dev libpq-dev git \ git-review libffi-dev gettext graphviz libjpeg-dev - On RHEL8/Centos8:: sudo dnf install openssl-devel python3-pip mysql-devel \ libxml2-devel libxslt-devel postgresql-devel git git-review \ libffi-devel gettext graphviz gcc libjpeg-turbo-devel \ python3-tox python3-devel python3 .. note:: If using RHEL and dnf reports "No package python3-pip available" and "No package git-review available", use the EPEL software repository. Instructions can be found at ``_. - On Fedora 22 and higher:: sudo dnf install python-devel openssl-devel python-pip mysql-devel \ libxml2-devel libxslt-devel postgresql-devel git git-review \ libffi-devel gettext graphviz gcc libjpeg-turbo-devel \ python-tox python3-devel python3 .. note:: Additionally, if using Fedora 23, ``redhat-rpm-config`` package should be installed so that development virtualenv can be built successfully. Mac OS X Systems ---------------- Install virtualenv:: sudo easy_install virtualenv Check the version of OpenSSL you have installed:: openssl version If you have installed OpenSSL 1.0.0a, which can happen when installing a MacPorts package for OpenSSL, you will see an error when running ``manila.tests.auth_unittest.AuthTestCase.test_209_can_generate_x509``. The stock version of OpenSSL that ships with Mac OS X 10.6 (OpenSSL 0.9.8l) or Mac OS X 10.7 (OpenSSL 0.9.8r) works fine with manila. Getting the code ---------------- Grab the code:: git clone https://opendev.org/openstack/manila cd manila Running unit tests ------------------ The preferred way to run the unit tests is using ``tox``. Tox executes tests in isolated environment, by creating separate virtualenv and installing dependencies from the ``requirements.txt`` and ``test-requirements.txt`` files, so the only package you install is ``tox`` itself:: sudo pip install tox Run the unit tests with:: tox -e py{python-version} Example:: tox -epy36 See :doc:`unit_tests` for more details. .. _virtualenv: Manually installing and using the virtualenv -------------------------------------------- You can also manually install the virtual environment:: tox -epy36 --notest This will install all of the Python packages listed in the ``requirements.txt`` file into your virtualenv. To activate the Manila virtualenv you can run:: $ source .tox/py36/bin/activate To exit your virtualenv, just type:: $ deactivate Or, if you prefer, you can run commands in the virtualenv on a case by case basis by running:: $ tox -e venv -- Contributing Your Work ---------------------- Once your work is complete you may wish to contribute it to the project. Manila uses the Gerrit code review system. For information on how to submit your branch to Gerrit, see GerritWorkflow_. .. _GerritWorkflow: https://docs.openstack.org/infra/manual/developers.html#development-workflow ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/contributor/documenting_your_work.rst0000664000175000017500000002077500000000000024676 0ustar00zuulzuul00000000000000.. _documenting_your_work: ===================== Documenting your work ===================== As with most OpenStack services and libraries, manila suffers from appearing very complicated to understand, develop, deploy, administer and use. As OpenStack developers working on manila, our responsibility goes beyond introducing new features and maintaining existing features. We ought to provide adequate documentation for the benefit of all kinds of audiences. The guidelines below will explain how you can document (or maintain documentation for) new (or existing) features and bug fixes in the core manila project and other projects that are part of the manila suite. Where to add documentation? ~~~~~~~~~~~~~~~~~~~~~~~~~~~ OpenStack User Guide -------------------- - Any documentation targeted at end users of manila in OpenStack needs to go here. This contains high level information about any feature as long as it is available on ``python-manilaclient`` and/or ``manila-ui``. - If you develop an end user facing feature, you need to provide an overview, use cases and example work-flows as part of this documentation. - The source files for the user guide live in manila's code tree. - **Link**: `User guide `_ OpenStack Administrator Guide ----------------------------- - Documentation for administrators of manila deployments in OpenStack clouds needs to go here. - Document instructions for administrators to perform necessary set up for utilizing a feature, along with managing and troubleshooting manila when the feature is used. - Relevant configuration options may be mentioned here briefly. - The source files for the administrator guide live in manila's code tree. - **Link**: `Administrator guide `_ OpenStack Configuration Reference --------------------------------- - Instructions regarding configuration of different manila back ends need to be added in this document. - The configuration reference also contains sections where manila's configuration options are auto-documented. - It contains sample configuration files for using manila with various configuration options. - If you are a driver maintainer, please ensure that your driver and all of its relevant configuration is documented here. - The source files for the configuration guide live in manila's code tree. - **Link**: `Manila release configuration reference `_ OpenStack Installation Tutorial ------------------------------- - Instructions regarding setting up manila on OpenStack need to be documented here. - This tutorial covers step-by-step deployment of OpenStack services using a functional example architecture suitable for new users of OpenStack with sufficient Linux experience. - The instructions are written with reference to different distributions. - The source files for this tutorial live in manila's code tree. - **Link**: `Draft installation tutorial `_ OpenStack API Reference ----------------------- - When you add or change a REST API in manila, you will need to add or edit descriptions of the API, request and response parameters, microversions and expected HTTP response codes as part of the API reference. - For releases prior to Newton, the API reference was maintained in `Web Application Description Language (WADL) `_ in the `api-site `_ project. - Since the Newton release, manila's API reference is maintained in-tree in custom YAML/JSON format files. - **Link**: `REST API reference of the Shared File Systems Project v2.0 `_ Manila Developer Reference -------------------------- - When working on a feature in manila, provide judicious inline documentation in the form of comments and docstrings. Code is our best developer reference. - Driver entry-points must be documented with docstrings explaining the expected behavior from a driver routine. - Apart from inline documentation, further developer facing documentation will be necessary when you are introducing changes that will affect vendor drivers, consumers of the manila database and when building a utility in manila that can be consumed by other developers. - The developer reference for manila is maintained in-tree. - Feel free to use it as a sandbox for other documentation that does not live in manila's code-tree. - **Link**: `Manila developer reference `_ OpenStack Security Guide ------------------------ - Any feature that has a security impact needs to be documented here. - In general, administrators will follow the guidelines regarding best practices of setting up their manila deployments with this guide. - Any changes to ``policy.yaml`` based authorization, share network related security, ``access`` to manila resources, tenant and user related information needs to be documented here. - **Link**: `Security guide `_ - **Repository**: The security guide is maintained within the `OpenStack Security-doc project `_ OpenStack Command Line Reference -------------------------------- - Help text provided in the ``python-manilaclient`` is extracted into this document automatically. - No manual corrections are allowed on this repository; make necessary corrections in the ``python-manilaclient`` repository." - **Link**: `Manila CLI reference `_. Important things to note ~~~~~~~~~~~~~~~~~~~~~~~~ - When implementing a new feature, use appropriate Commit Message Tags (:ref:`commit_message_tags`). - Using the ``DocImpact`` flag in particular will create a ``[doc]`` bug under the `manila project in launchpad `_. When your code patch merges, assign this bug to yourself and track your documentation changes with it. - When writing documentation outside of manila, use either a commit message header that includes the word ``Manila`` or set the topic of the change-set to ``manila-docs``. This will make it easy for manila reviewers to find your patches to aid with a technical content review. - When writing documentation in user/admin/config/api/install guides, *always* refer to the project with its service name: ``Shared File Systems service`` and not the service type (``share``) or the project name (``manila``). - Follow documentation styles prescribed in the `OpenStack Documentation Contributor Guide `_. Pay heed to the `RST formatting conventions `_ and `Writing style `_. - Use CamelCase to spell out `OpenStack` and sentence casing to spell out service types, ex: `Shared File Systems service` and lower case to spell out project names, ex: `manila` (except when the project name is in the beginning of a sentence or a title). - **ALWAYS** use a first party driver when documenting a feature in the user or administrator guides. Provide cross-references to configuration reference sections to lead readers to detailed setup instructions for these drivers. - The manila developer reference, the OpenStack user guide, administrator reference, API reference and security guide are always *current*, i.e, get built with every commit in the respective codebase. Therefore, documentation added here need not be backported to previous releases. - You may backport changes to some documentation such as the configuration reference and the installation guide. - **Important "documentation" that isn't really documentation** - ``specs`` and ``release notes`` are *NOT* documentation. A specification document is written to initiate a dialogue and gather feedback regarding the design of a feature. Neither developers nor users will regard a specification document as official documentation after a feature has been implemented. Release notes (:ref:`adding_release_notes`) allow for gathering release summaries and they are not used to understand, configure, use or troubleshoot any manila feature. - **Less is not more, more is more** - Always add detail when possible. The health and maturity of our community is reflected in our documentation. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/contributor/driver_filter_goodness_weigher.rst0000664000175000017500000002611200000000000026504 0ustar00zuulzuul00000000000000.. _driver_filter_goodness_weigher: ========================================================== Configure and use driver filter and weighing for scheduler ========================================================== OpenStack manila enables you to choose a share back end based on back-end specific properties by using the DriverFilter and GoodnessWeigher for the scheduler. The driver filter and weigher scheduling can help ensure that the scheduler chooses the best back end based on requested share properties as well as various back-end specific properties. What is driver filter and weigher and when to use it ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The driver filter and weigher give you the ability to more finely control how manila scheduler chooses the best back end to use when handling a share provisioning request. One example scenario where using the driver filter and weigher can be if a back end that utilizes thin-provisioning is used. The default filters use the ``free capacity`` property to determine the best back end, but that is not always perfect. If a back end has the ability to provide a more accurate back-end specific value, it can be used as part of the weighing process to find the best possible host for a new share. Some more examples of the use of these filters could be with respect to back end specific limitations. For example, some back ends may be limited by the number of shares that can be created on them, or by the minimum or maximum size allowed per share or by the fact that provisioning beyond a particular capacity affects their performance. The driver filter and weigher can provide a way for these limits to be accounted for during scheduling. Defining your own filter and goodness functions ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ You can define your own filter and goodness functions through the use of various capabilities that manila exposes. Capabilities exposed include information about the share request being made, ``share_type`` settings, and back-end specific information about drivers. All of these allow for a lot of control over how the ideal back end for a share request will be decided. The ``filter_function`` option is a string defining a function that will determine whether a back end should be considered as a potential candidate by the scheduler. The ``goodness_function`` option is a string defining a function that will rate the quality of the potential host (0 to 100, 0 lowest, 100 highest). .. important:: The driver filter and weigher will use default values for filter and goodness functions for each back end if you do not define them yourself. If complete control is desired then a filter and goodness function should be defined for each of the back ends in the ``manila.conf`` file. Supported operations in filter and goodness functions ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Below is a table of all the operations currently usable in custom filter and goodness functions created by you: +--------------------------------+-------------------------+ | Operations | Type | +================================+=========================+ | +, -, \*, /, ^ | standard math | +--------------------------------+-------------------------+ | not, and, or, &, \|, ! | logic | +--------------------------------+-------------------------+ | >, >=, <, <=, ==, <>, != | equality | +--------------------------------+-------------------------+ | +, - | sign | +--------------------------------+-------------------------+ | x ? a : b | ternary | +--------------------------------+-------------------------+ | abs(x), max(x, y), min(x, y) | math helper functions | +--------------------------------+-------------------------+ .. caution:: Syntax errors in filter or goodness strings are thrown at a share creation time. Available capabilities when creating custom functions ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ There are various properties that can be used in either the ``filter_function`` or the ``goodness_function`` strings. The properties allow access to share info, qos settings, extra specs, and so on. The following capabilities are currently available for use: Host capabilities for a back end -------------------------------- host The host's name share\_backend\_name The share back end name vendor\_name The vendor name driver\_version The driver version storage\_protocol The storage protocol qos Boolean signifying whether QoS is supported total\_capacity\_gb The total capacity in gibibytes allocated\_capacity\_gb The allocated capacity in gibibytes free\_capacity\_gb The free capacity in gibibytes reserved\_percentage The reserved storage percentage driver\_handles\_share\_server The driver mode used by this host thin\_provisioning Whether or not thin provisioning is supported by this host updated Last time this host's stats were updated dedupe Whether or not dedupe is supported by this host compression Whether or not compression is supported by this host snapshot\_support Whether or not snapshots are supported by this host replication\_domain The replication domain of this host replication\_type The replication type supported by this host provisioned\_capacity\_gb The provisioned capacity of this host in gibibytes pools This host's storage pools max\_over\_subscription\_ratio This hosts's over subscription ratio for thin provisioning Capabilities specific to a back end ----------------------------------- These capabilities are determined by the specific back end you are creating filter and goodness functions for. Some back ends may not have any capabilities available here. Requested share capabilities ---------------------------- availability\_zone\_id ID of the availability zone of this share share\_network\_id ID of the share network used by this share share\_server\_id ID of the share server of this share host Host name of this share is\_public Whether or not this share is public snapshot\_support Whether or not snapshots are supported by this share status Status for the requested share share\_type\_id The share type ID share\_id The share ID user\_id The share's user ID project\_id The share's project ID id The share instance ID replica\_state The share's replication state replication\_type The replication type supported by this share snapshot\_id The ID of the snapshot of which this share was created from size The size of the share in gibibytes share\_proto The protocol of this share metadata General share metadata The most used capability from this list will most likely be the ``size``. Extra specs for the requested share type ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ View the available properties for share types by running: .. code-block:: console $ manila extra-specs-list Driver filter and weigher usage examples ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Below are examples for using the filter and weigher separately, together, and using driver-specific properties. Example ``manila.conf`` file configuration for customizing the filter function: .. code-block:: ini [default] enabled_backends = generic1, generic2 [generic1] share_driver = manila.share.drivers.generic.GenericShareDriver share_backend_name = GENERIC1 filter_function = "share.size < 10" [generic2] share_driver = manila.share.drivers.generic.GenericShareDriver share_backend_name = GENERIC2 filter_function = "share.size >= 10" The above example will filter share to different back ends depending on the size of the requested share. Shares with a size less than 10 GB are sent to generic1 and shares with a size greater than or equal to 10 GB are sent to generic2. Example ``manila.conf`` file configuration for customizing the goodness function: .. code-block:: ini [default] enabled_backends = generic1, generic2 [generic1] share_driver = manila.share.drivers.generic.GenericShareDriver share_backend_name = GENERIC1 goodness_function = "(share.size < 5) ? 100 : 50" [generic2] share_driver = manila.share.drivers.generic.GenericShareDriver share_backend_name = GENERIC2 goodness_function = "(share.size >= 5) ? 100 : 25" The above example will determine the goodness rating of a back end based on the requested share's size. The example shows how the ternary if statement can be used in a filter or goodness function. If a requested share is of size 10 GB then generic1 is rated as 50 and generic2 is rated as 100. In this case generic2 wins. If a requested share is of size 3 GB then generic1 is rated 100 and generic2 is rated 25. In this case generic1 would win. Example ``manila.conf`` file configuration for customizing both the filter and goodness functions: .. code-block:: ini [default] enabled_backends = generic1, generic2 [generic1] share_driver = manila.share.drivers.generic.GenericShareDriver share_backend_name = GENERIC1 filter_function = "stats.total_capacity_gb < 500" goodness_function = "(share.size < 25) ? 100 : 50" [generic2] share_driver = manila.share.drivers.generic.GenericShareDriver share_backend_name = GENERIC2 filter_function = "stats.total_capacity_gb >= 500" goodness_function = "(share.size >= 25) ? 100 : 75" The above example combines the techniques from the first two examples. The best back end is now decided based on the total capacity of the back end and the requested share's size. Example ``manila.conf`` file configuration for accessing driver specific properties: .. code-block:: ini [default] enabled_backends = example1, example2, example3 [example1] share_driver = manila.share.drivers.example.ExampleShareDriver share_backend_name = EXAMPLE1 filter_function = "share.size < 5" goodness_function = "(capabilities.provisioned_capacity_gb < 30) ? 100 : 50" [example2] share_driver = manila.share.drivers.example.ExampleShareDriver share_backend_name = EXAMPLE2 filter_function = "shares.size < 5" goodness_function = "(capabilities.provisioned_capacity_gb < 80) ? 100 : 50" [example3] share_driver = manila.share.drivers.example.ExampleShareDriver share_backend_name = EXAMPLE3 goodness_function = "55" The above is an example of how back-end specific capabilities can be used in the filter and goodness functions. In this example, the driver has a ``provisioned_capacity_gb`` capability that is being used to determine which back end gets used during a share request. In the above example, ``example1`` and ``example2`` will handle share requests for all shares with a size less than 5 GB. ``example1`` will have priority until the provisioned capacity of all shares on it hits 30 GB. After that, ``example2`` will have priority until the provisioned capacity of all shares on it hits 80 GB. ``example3`` will collect all shares greater or equal to 5 GB as well as all shares once ``example1`` and ``example2`` lose priority. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/contributor/driver_requirements.rst0000664000175000017500000003541700000000000024337 0ustar00zuulzuul00000000000000.. Copyright (c) 2015 Hitachi Data Systems All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Manila minimum requirements and features ======================================== In order for a driver to be accepted into manila code base, there are certain minimum requirements and features that must be met, in order to ensure interoperability and standardized manila functionality among cloud providers. At least one driver mode (:term:`DHSS` true/false) -------------------------------------------------- Driver modes determine if the driver is managing network resources (:term:`DHSS` = true) in an automated way, in order to segregate tenants and private networks by making use of manila Share Networks, or if it is up to the administrator to manually configure all networks (:term:`DHSS` = false) and be responsible for segregation, if that is desired. At least one driver mode must be supported. In :term:`DHSS` = true mode, Share Server entities are used, so the driver must implement functions that setup and teardown such servers. At least one file system sharing protocol ----------------------------------------- In order to serve shares as a shared file system service, the driver must support at least one file system sharing protocol, which can be a new protocol or one of the currently supported protocols. The current list of supported protocols is as follows: - NFS - CIFS - GlusterFS - HDFS - MapRFS - CephFS Access rules ------------ Access rules control how shares are accessible, by whom, and what the level of access is. Access rule operations include allowing access and denying access to a given share. The authentication type should be based on IP, User and/or Certificate. Drivers must support read-write and read-only access levels for each supported protocol, either through individual access rules or separate export locations. Shares ------ Share servicing is the core functionality of a shared file system service, so a driver must be able to create and delete shares. Share extending --------------- In order to best satisfy cloud service requirements, shares must be elastic, so drivers must implement a share extend function that allows shares' size to be increased. Capabilities ------------ In order for manila to function accordingly to the driver being used, the driver must provide a set of information to manila, known as capabilities. Share driver can use Share type extra-specs (scoped and un-scoped) to serve new shares. See :doc:`../admin/capabilities_and_extra_specs` for more information. At a minimum your driver must report: - share_backend_name: a name for the backend; - driver_handles_share_servers: driver mode, whether this driver instance handles share servers, possible values are true or false; - vendor_name: driver vendor name; - driver_version: current driver instance version; - storage_protocol: list of shared file system protocols supported by this driver instance; - total_capacity_gb: total amount of storage space provided, in GB; - free_capacity_gb: amount of storage space available for use, in GB; - reserved_percentage: percentage of total storage space to be kept from being used. Certain features, if supported by drivers, need to be reported in order to function correctly in manila, such as: - dedupe: whether the backend supports deduplication; - compression: whether the backend supports compressed shares; - thin_provisioning: whether the backend is overprovisioning shares; - pools: list of storage pools managed by this driver instance; - qos: whether the backend supports quality of service for shares; - replication_domain: string specifying a common group name for all backends that can replicate between each other; - replication_type: string specifying the type of replication supported by the driver. Can be one of ('readable', 'writable' or 'dr'). - security_service_update_support: boolean specifying whether the driver supports updating or adding security services in an already deployed share server. It defaults to ``False``. Below is an example of drivers with multiple pools. "my" is used as an example vendor prefix: :: { 'driver_handles_share_servers': 'False', #\ 'share_backend_name': 'My Backend', # backend level 'vendor_name': 'MY', # mandatory/fixed 'driver_version': '1.0', # stats & capabilities 'storage_protocol': 'NFS_CIFS', #/ #\ 'my_capability_1': 'custom_val', # "my" optional vendor 'my_capability_2': True, # stats & capabilities #/ 'pools': [ {'pool_name': 'thin-dedupe-compression pool', #\ 'total_capacity_gb': 500, # mandatory stats for 'free_capacity_gb': 230, # pools 'reserved_percentage': 0, #/ #\ 'dedupe': True, # common capabilities 'compression': True, # 'snapshot_support': True, # 'create_share_from_snapshot_support': True, 'revert_to_snapshot_support': True, 'qos': True, # this backend supports QoS 'thin_provisioning': True, # 'max_over_subscription_ratio': 10, # (mandatory for thin) 'provisioned_capacity_gb': 270, # (mandatory for thin) # # 'replication_type': 'dr', # this backend supports # replication_type 'dr' #/ 'my_dying_disks': 100, #\ 'my_super_hero_1': 'Hulk', # "my" optional vendor 'my_super_hero_2': 'Spider-Man', # stats & capabilities #/ #\ # can replicate to other 'replication_domain': 'asgard', # backends in # replication_domain 'asgard' #/ 'ipv4_support': True, 'ipv6_support': True, 'security_service_update_support': False, }, {'pool_name': 'thick pool', 'total_capacity_gb': 1024, 'free_capacity_gb': 1024, 'qos': False, 'snapshot_support': True, 'create_share_from_snapshot_support': False, # this pool does not # allow creating # shares from # snapshots 'revert_to_snapshot_support': True, 'reserved_percentage': 0, 'dedupe': False, 'compression': False, 'thin_provisioning': False, 'replication_type': None, 'my_dying_disks': 200, 'my_super_hero_1': 'Batman', 'my_super_hero_2': 'Robin', 'ipv4_support': True, 'ipv6_support': True, 'security_service_update_support': False, }, ] } Continuous Integration systems ------------------------------ Every driver vendor must supply a CI system that tests its drivers continuously for each patch submitted to OpenStack gerrit. This allows for better QA and quicker response and notification for driver vendors when a patch submitted affects an existing driver. The CI system must run all applicable tempest tests, test all patches Zuul has posted +1 and post its test results. .. note:: for more information please see http://docs.openstack.org/infra/system-config/third_party.html Unit tests ---------- All drivers submitted must be contemplated with unit tests covering at least 90% of the code, preferably 100% if possible. Unit tests must use mock framework and be located in-tree using a structure that mirrors the functional code, such as directory names and filenames. See template below: :: manila/[tests/]path/to/brand/new/[test_]driver.py Documentation ------------- Drivers submitted must provide and maintain related documentation on openstack-manuals, containing instructions on how to properly install and configure. The intended audience for this manual is cloud operators and administrators. Also, driver maintainers must update the manila share features support mapping documentation found at https://docs.openstack.org/manila/latest/admin/share_back_ends_feature_support_mapping.html Manila optional requirements and features since Mitaka ====================================================== Additional to the minimum required features supported by manila, other optional features can be supported by drivers as they are already supported in manila and can be accessed through the API. Snapshots --------- Share Snapshots allow for data respective to a particular point in time to be saved in order to be used later. In manila API, share snapshots taken can only be restored by creating new shares from them, thus the original share remains unaffected. If Snapshots are supported by drivers, they must be crash-consistent. Managing/Unmanaging shares -------------------------- If :term:`DHSS` = false mode is used, then drivers may implement a function that supports reading existing shares in the backend that were not created by manila. After the previously existing share is registered in manila, it is completely controlled by manila and should not be handled externally anymore. Additionally, a function that de-registers such shares from manila but do not delete from backend may also be supported. Share shrinking --------------- Manila API supports share shrinking, thus a share can be shrunk in a similar way it can be extended, but the driver is responsible for making sure no data is compromised. Share ensuring -------------- In some situations, such as when the driver is restarted, manila attempts to perform maintenance on created shares, on the purpose of ensuring previously created shares are available and being serviced correctly. The driver can implement this function by checking shares' status and performing maintenance operations if needed, such as re-exporting. Manila experimental features since Mitaka ========================================= Some features are initially released as experimental and can be accessed by including specific additional HTTP Request headers. Those features are not recommended for production cloud environments while in experimental stage. Share Migration --------------- Shares can be migrated between different backends and pools. Manila implements migration using an approach that works for any manufacturer, but driver vendors can implement a better optimized migration function for when migration involves backends or pools related to the same vendor. Share Groups (since Ocata) -------------------------- The share groups provides the ability to manage a group of shares together. This feature is implemented at the manager level, every driver gets this feature by default. If a driver wants to override the default behavior to support additional functionalities such as consistent group snapshot, the driver vendors may report this capability as a group capability, such as: Ordered writes, Consistent snapshots, Group replication. Drivers need to report group capabilities as part of the updated stats (e.g. capacity) and filled in 'share_group_stats' node for their back end. Share group type group-specs (scoped and un-scoped) are available for the driver implementation to use as-needed. Below is an example of the share stats payload from the driver having multiple pools and group capabilities. "my" is used as an example vendor prefix: :: { 'driver_handles_share_servers': 'False', #\ 'share_backend_name': 'My Backend', # backend level 'vendor_name': 'MY', # mandatory/fixed 'driver_version': '1.0', # stats & capabilities 'storage_protocol': 'NFS_CIFS', #/ #\ 'my_capability_1': 'custom_val', # "my" optional vendor 'my_capability_2': True, # stats & capabilities #/ 'share_group_stats': { #\ 'my_group_capability_1': 'custom_val', # "my" optional vendor 'my_group_capability_2': True, # stats & group capabilities #/ 'consistent_snapshot_support': 'host', #\ # common group capabilities #/ }, ] } .. note:: for more information please see :doc:`../admin/group_capabilities_and_extra_specs` Share Replication ----------------- Replicas of shares can be created for either data protection (for disaster recovery) or for load sharing. In order to utilize this feature, drivers must report the ``replication_type`` they support as a capability and implement necessary methods. More details can be found at: :doc:`../admin/shared-file-systems-share-replication` Update "used_size" of shares ---------------------------- Drivers can update, for all the shares created on a particular backend, the consumed space in GiB. While the polling interval for drivers to update this information is configurable, drivers can choose to submit cached information as necessary, but specify a time at which this information was "gathered_at". Share Server Migration (Since Victoria) --------------------------------------- Shares servers can be migrated between different backends. Driver vendors need to implement the share server migration functions in order to migrate share servers in an efficient way. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/contributor/experimental_apis.rst0000664000175000017500000000651600000000000023750 0ustar00zuulzuul00000000000000Experimental APIs ================= Background ---------- Manila uses API microversions to allow natural evolution of its REST APIs over time. But microversions alone cannot solve the question of how to ship APIs that are experimental in nature, are expected to change at any time, and could even be removed entirely without a typical deprecation period. In conjunction with microversions, manila has added a facility for marking individual REST APIs as experimental. To call an experimental API, clients must include a specific HTTP header, ``X-OpenStack-Manila-API-Experimental``, with a value of ``True``. If a user calls an experimental API without including the experimental header, the server would respond with ``HTTP/404``. This forces the client to acknowledge the experimental status of the API and prevents anyone from building an application around a manila feature without realizing the feature could change significantly or even disappear. On the other hand, if a request is made to a non-experimental manila API with ``X-OpenStack-Manila-API-Experimental: True``, the server would respond as if the header had not been included. This is a convenience mechanism, as it allows the client to specify both the requested API version as well as the experimental header (if desired) in one place instead of having to set the headers separately for each API call (although that would be fine, too). When do I need to set an API experimental? ------------------------------------------ An API should be marked as experimental if any of the following is true: - the API is not yet considered a stable, core API - the API is expected to change in later releases - the API could be removed altogether if a feature is redesigned - the API controls a feature that could change or be removed When do I need to remove the experimental annotation from an API? ----------------------------------------------------------------- When the community is satisfied that an experimental feature and its APIs have had sufficient time to gather and incorporate user feedback to consider it stable, which could be one or more OpenStack release cycles, any relevant APIs must be re-released with a microversion bump and without the experimental flag. The maturation period can vary between features, but experimental is NOT a stable state, and an experimental feature should not be left in that state any longer than necessary. Because experimental APIs have no conventional deprecation period, the manila core team may optionally choose to remove any experimental versions of an API at the same time that a microversioned stable version is added. In Code ------- The ``@api_version`` decorator defined in ``manila/api/openstack/wsgi.py``, which is used for specifying API versions on top-level Controller methods, also allows for tagging an API as experimental. For example: In the controller class:: @wsgi.Controller.api_version("2.4", experimental=True) def my_api_method(self, req, id): .... This method would only be available if the caller had specified an ``X-OpenStack-Manila-API-Version`` of >= ``2.4``. and had also included ``X-OpenStack-Manila-API-Experimental: True``. If they had specified a lower version (or not specified it and received a lower default version), or if they had failed to include the experimental header, the server would respond with ``HTTP/404``. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/contributor/fakes.rst0000664000175000017500000000343400000000000021324 0ustar00zuulzuul00000000000000.. Copyright 2010-2011 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Fake Drivers ============ When the real thing isn't available and you have some development to do these fake implementations of various drivers let you get on with your day. The :mod:`fake_compute` Module ------------------------------ .. automodule:: manila.tests.fake_compute :noindex: :members: :undoc-members: :show-inheritance: The :mod:`fake_driver` Module ----------------------------- .. automodule:: manila.tests.fake_driver :noindex: :members: :undoc-members: :show-inheritance: The :mod:`fake_network` Module ------------------------------ .. automodule:: manila.tests.fake_service_instance :noindex: :members: :undoc-members: :show-inheritance: The :mod:`fake_utils` Module ---------------------------- .. automodule:: manila.tests.fake_utils :noindex: :members: :undoc-members: :show-inheritance: The :mod:`fake_volume` Module ------------------------------ .. automodule:: manila.tests.fake_volume :noindex: :members: :undoc-members: :show-inheritance: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/contributor/ganesha.rst0000664000175000017500000003117100000000000021640 0ustar00zuulzuul00000000000000.. Copyright 2015 Red Hat, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Ganesha Library =============== The Ganesha Library provides base classes that can be used by drivers to provision shares via NFS (NFSv3 and NFSv4), utilizing the NFS-Ganesha NFS server. Supported operations -------------------- - Allow NFS Share access - Only IP access type is supported. - Deny NFS Share access Supported manila drivers ------------------------ - CephFS driver uses ``ganesha.GaneshaNASHelper2`` library class - GlusterFS driver uses ``ganesha.GaneshaNASHelper`` library class Requirements ------------ - Preferred: `NFS-Ganesha `_ v2.4 or later, which allows dynamic update of access rules. Use with manila's ``ganesha.GaneshaNASHelper2`` class as described later in :ref:`using_ganesha_library`. (or) `NFS-Ganesha `_ v2.5.4 or later that allows dynamic update of access rules, and can make use of highly available Ceph RADOS (distributed object storage) as its shared storage for NFS client recovery data, and exports. Use with Ceph v12.2.2 or later, and ``ganesha.GaneshaNASHelper2`` library class in manila Queens release or later. - For use with limitations documented in :ref:`ganesha_known_issues`: `NFS-Ganesha `_ v2.1 to v2.3. Use with manila's ``ganesha.GaneshaNASHelper`` class as described later in :ref:`using_ganesha_library`. NFS-Ganesha configuration ------------------------- The library has just modest requirements against general NFS-Ganesha (in the following: Ganesha) configuration; a best effort was made to remain agnostic towards it as much as possible. This section describes the few requirements. Note that Ganesha's concept of storage backend modules is called FSAL ("File System Abstraction Layer"). The FSAL the driver intends to leverage needs to be enabled in Ganesha config. Beyond that (with default manila config) the following line is needed to be present in the Ganesha config file (that defaults to /etc/ganesha/ganesha.conf): ``%include /etc/ganesha/export.d/INDEX.conf`` The above paths can be customized through manila configuration as follows: - `ganesha_config_dir` = toplevel directory for Ganesha configuration, defaults to /etc/ganesha - `ganesha_config_path` = location of the Ganesha config file, defaults to ganesha.conf in `ganesha_config_dir` - `ganesha_export_dir` = directory where manila generated config bits are stored, defaults to `export.d` in `ganesha_config_dir`. The following line is required to be included (with value expanded) in the Ganesha config file (at `ganesha_config_path`): ``%include /INDEX.conf`` In versions 2.5.4 or later, Ganesha can store NFS client recovery data in Ceph RADOS, and also read exports stored in Ceph RADOS. These features are useful to make Ganesha server that has access to a Ceph (luminous or later) storage backend, highly available. The Ganesha library class `GaneshaNASHelper2` (in manila Queens or later) allows you to store Ganesha exports directly in a shared storage, RADOS objects, by setting the following manila config options in the driver section: - `ganesha_rados_store_enable` = 'True' to persist Ganesha exports and export counter in Ceph RADOS objects - `ganesha_rados_store_pool_name` = name of the Ceph RADOS pool to store Ganesha exports and export counter objects - `ganesha_rados_export_index` = name of the Ceph RADOS object used to store a list of export RADOS object URLs (defaults to 'ganesha-export-index') Check out the `cephfs_driver` documentation for an example driver section that uses these options. To allow Ganesha to read from RADOS objects add the below code block in ganesha's configuration file, substituting values per your setup. .. code-block:: console # To read exports from RADOS objects RADOS_URLS { ceph_conf = "/etc/ceph/ceph.conf"; userid = "admin"; } # Replace with actual pool name, and export index object %url rados:/// # To store client recovery data in the same RADOS pool NFSv4 { RecoveryBackend = "rados_kv"; } RADOS_KV { ceph_conf = "/etc/ceph/ceph.conf"; userid = "admin"; # Replace with actual pool name pool = ; } For a fresh setup, make sure to create the Ganesha export index object as an empty object before starting the Ganesha server. .. code-block:: console echo | sudo rados -p ${GANESHA_RADOS_STORE_POOL_NAME} put ganesha-export-index - Further Ganesha related manila configuration -------------------------------------------- There are further Ganesha related options in manila (which affect the behavior of Ganesha, but do not affect how to set up the Ganesha service itself). These are: - `ganesha_service_name` = name of the system service representing Ganesha, defaults to ganesha.nfsd - `ganesha_db_path` = location of on-disk database storing permanent Ganesha state, e.g. an export ID counter to generate export IDs for shares (or) When `ganesha_rados_store_enabled` is set to True, the ganesha export counter is stored in a Ceph RADOS object instead of in a SQLite database local to the manila driver. The counter can be optionally configured with, `ganesha_rados_export_counter` = name of the Ceph RADOS object used as the Ganesha export counter (defaults to 'ganesha-export-counter') - `ganesha_export_template_dir` = directory from where Ganesha loads export customizations (cf. "Customizing Ganesha exports"). .. _using_ganesha_library: Using Ganesha Library in drivers -------------------------------- A driver that wants to use the Ganesha Library has to inherit from ``driver.GaneshaMixin``. The driver has to contain a subclass of ``ganesha.GaneshaNASHelper2``, instantiate it along with the driver instance and delegate ``update_access`` method to it (when appropriate, i.e., when ``access_proto`` is NFS). .. note:: You can also subclass ``ganesha.GaneshaNASHelper``. It works with NFS-Ganesha v2.1 to v2.3 that doesn't support dynamic update of exports. To update access rules without having to restart NFS-Ganesha server, the class manipulates exports created per share access rule (rather than per share) introducing limitations documented in :ref:`ganesha_known_issues`. In the following we explain what has to be implemented by the ``ganesha.GaneshaNASHelper2`` subclass (to which we refer as "helper class"). Ganesha exports are described by so-called *Ganesha export blocks* (introduced in the 2.* release series), that is, snippets of Ganesha config specifying key-pair values. The Ganesha Library generates sane default export blocks for the exports it manages, with one thing left blank, the so-called *FSAL subblock*. The helper class has to implement the ``_fsal_hook`` method which returns the FSAL subblock (in Python represented as a dict with string keys and values). It has one mandatory key, ``Name``, to which the value should be the name of the FSAL (eg.: ``{"Name": "CEPH"}``). Further content of it is optional and FSAL specific. Customizing Ganesha exports --------------------------- As noted, the Ganesha Library provides sane general defaults. However, the driver is allowed to: - customize defaults - allow users to customize exports The config format for Ganesha Library is called *export block template*. They are syntactically either Ganesha export blocks, (please consult the Ganesha documentation about the format), or isomorphic JSON (as Ganesha export blocks are by-and-large equivalent to arrayless JSON), with two special placeholders for values: ``@config`` and ``@runtime``. ``@config`` means a value that shall be filled from manila config, and ``@runtime`` means a value that's filled at runtime with dynamic data. As an example, we show the library's defaults in JSON format (also valid Python literal): :: { "EXPORT": { "Export_Id": "@runtime", "Path": "@runtime", "FSAL": { "Name": "@config" }, "Pseudo": "@runtime", "SecType": "sys", "Tag": "@runtime", "CLIENT": { "Clients": "@runtime", "Access_Type": "RW" }, "Squash": "None" } } The Ganesha Library takes these values from *manila/share/drivers/ganesha/conf/00-base-export-template.conf* where the same data is stored in Ganesha conf format (also supplied with comments). For customization, the driver has to extend the ``_default_config_hook`` method as follows: - take the result of the super method (a dict representing an export block template) - set up another export block dict that include your custom values, either by - using a predefined export block dict stored in code - loading a predefined export block from the manila source tree - loading an export block from an user exposed location (to allow user configuration) - merge the two export block dict using the ``ganesha_utils.patch`` method - return the result With respect to *loading export blocks*, that can be done through the utility method ``_load_conf_dir``. Known Restrictions ------------------ - The library does not support network segmented multi-tenancy model but instead works over a flat network, where the tenants share a network. .. _ganesha_known_issues: Known Issues ------------ Following issues concern only users of `ganesha.GaneshaNASHelper` class that works with NFS-Ganesha v2.1 to v2.3. - The export location for shares of a driver that uses the Ganesha Library will be of the format ``:/share-``. However, this is incomplete information, because it pertains only to NFSv3 access, which is partially broken. NFSv4 mounts work well but the actual NFSv4 export paths differ from the above. In detail: - The export location is usable only for NFSv3 mounts. - The export location works only for the first access rule that's added for the given share. Tenants that should be allowed to access according to a further access rule will be refused (cf. https://bugs.launchpad.net/manila/+bug/1513061). - The share is, however, exported through NFSv4, just on paths that differ from the one indicated by the export location, namely at: ``:/share---``, where ```` ranges over the ID-s of access rules of the share (and the export with ```` is accessible according to the access rule of that ID). - NFSv4 access also works with pseudofs. That is, the tenant can do a v4 mount of``:/`` and access the shares allowed for her at the respective ``share---`` subdirectories. Deployment considerations ------------------------- When using `NFS-Ganesha`_ v2.4 or later and manila's ``ganesha.GaneshaNASHelper2`` class, dynamic export of access rules is implemented by using the `dbus-send`_ command to signal `NFS-Ganesha`_ to update its exports. The `dbus-send`_ command is executed on the host where `NFS-Ganesha`_ runs. This may be the same host where the :term:`manila-share` service runs, or it may be remote to :term:`manila-share` depending on how the relevant driver has been configured. Either way, the `dbus-send`_ command and `NFS-Ganesha`_ must be able to communicate over an *abstract socket* and *must be in the same namespace*. Consequently, if you deploy `NFS-Ganesha`_ in a container you likely should run the container in the host namespace (e.g. 'docker run --net=host ...') rather than in its own network namespace. For details, see this `article `_. The :mod:`manila.share.drivers.ganesha` Module ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: manila.share.drivers.ganesha :noindex: :members: :undoc-members: :show-inheritance: .. _NFS-Ganesha: https://github.com/nfs-ganesha/nfs-ganesha/wiki .. _dbus-send: https://dbus.freedesktop.org/doc/dbus-send.1.html ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/contributor/gerrit.rst0000664000175000017500000000106300000000000021523 0ustar00zuulzuul00000000000000.. _code-reviews-with-gerrit: Code Reviews with Gerrit ======================== Manila uses the `Gerrit`_ tool to review proposed code changes. The review site is https://review.opendev.org. Gerrit is a complete replacement for Github pull requests. `All Github pull requests to the manila repository will be ignored`. See the `Development Workflow`_ for more detailed documentation on how to work with Gerrit. .. _Gerrit: http://code.google.com/p/gerrit .. _Development Workflow: https://docs.openstack.org/infra/manual/developers.html#development-workflow ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/contributor/guru_meditation_report.rst0000664000175000017500000001042000000000000025016 0ustar00zuulzuul00000000000000.. Copyright (c) 2017 Fiberhome Telecommunication Technologies Co.,LTD All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Guru Meditation Reports ======================= Manila contains a mechanism whereby developers and system administrators can generate a report about the state of a running Manila executable. This report is called a *Guru Meditation Report* (*GMR* for short). Generating a GMR ---------------- A *GMR* can be generated by sending the *SIGUSR1/SIGUSR2* signal to any Manila process with support (see below). The *GMR* will then output to standard error for that particular process. For example, suppose that ``manila-api`` has process id ``8675``, and was run with ``2>/var/log/manila/manila-api-err.log``. Then, ``kill -SIGUSR1 8675`` will trigger the Guru Meditation report to be printed to ``/var/log/manila/manila-api-err.log``. It could save these reports to a well known directory for later analysis by the sysadmin or automated bug analysis tools. To configure *GMR* you have to add the following section to manila.conf: [oslo_reports] log_dir = '/path/to/logs/dir' There is other way to trigger a generation of report, user should add a configuration in Manila's conf file:: [oslo_reports] file_event_handler=['The path to a file to watch for changes to trigger ' 'the reports, instead of signals. Setting this option ' 'disables the signal trigger for the reports.'] file_event_handler_interval=['How many seconds to wait between polls when ' 'file_event_handler is set, default value ' 'is 1'] a *GMR* can be generated by "touch"ing the file which was specified in file_event_handler. The *GMR* will then output to standard error for that particular process. For example, suppose that ``manila-api`` was run with ``2>/var/log/manila/manila-api-err.log``, and the file path is ``/tmp/guru_report``. Then, ``touch /tmp/guru_report`` will trigger the Guru Meditation report to be printed to ``/var/log/manila/manila-api-err.log``. Structure of a GMR ------------------ The *GMR* is designed to be extensible; any particular executable may add its own sections. However, the base *GMR* consists of several sections: Package Shows information about the package to which this process belongs, including version information Threads Shows stack traces and thread ids for each of the threads within this process Green Threads Shows stack traces for each of the green threads within this process (green threads don't have thread ids) Configuration Lists all the configuration options currently accessible via the CONF object for the current process Adding Support for GMRs to New Executables ------------------------------------------ Adding support for a *GMR* to a given executable is fairly easy. First import the module (currently residing in oslo.reports), as well as the Manila version module: .. code-block:: python from oslo_reports import guru_meditation_report as gmr from manila import version Then, register any additional sections (optional): .. code-block:: python TextGuruMeditation.register_section('Some Special Section', some_section_generator) Finally (under main), before running the "main loop" of the executable (usually ``service.server(server)`` or something similar), register the *GMR* hook: .. code-block:: python TextGuruMeditation.setup_autorun(version) Extending the GMR ----------------- As mentioned above, additional sections can be added to the GMR for a particular executable. For more information, see the inline documentation about oslo.reports: `oslo.reports `_ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/contributor/i18n.rst0000664000175000017500000000274000000000000021011 0ustar00zuulzuul00000000000000Internationalization ==================== Manila uses `gettext `_ so that user-facing strings appear in the appropriate language in different locales. Beginning with the Pike series, OpenStack no longer supports log translation. It is not useful to add translation instructions to new code, and the instructions can be removed from old code. Other user-facing strings, e.g. in exception messages, should be translated. To use gettext, make sure that the strings passed to the logger are wrapped in a ``_()`` function call. For example:: msg = _("Share group %s not found.") % share_group_id raise exc.HTTPNotFound(explanation=msg) Do not use ``locals()`` for formatting messages because: 1. It is not as clear as using explicit dicts. 2. It could produce hidden errors during refactoring. 3. Changing the name of a variable causes a change in the message. 4. It creates a lot of otherwise unused variables. If you do not follow the project conventions, your code may cause the LocalizationTestCase.test_multiple_positional_format_placeholders test to fail in manila/tests/test_localization.py. The ``_()`` function is brought into the global scope by doing:: from manila.openstack.common import gettextutils gettextutils.install("manila") These lines are needed in any toplevel script before any manila modules are imported. If this code is missing, it may result in an error that looks like:: NameError: name '_' is not defined ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/contributor/index.rst0000664000175000017500000000437500000000000021347 0ustar00zuulzuul00000000000000.. Copyright 2010-2011 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Contributor/Developer Guide =========================== In this section you will find information helpful for contributing to manila. Basic Information ----------------- .. toctree:: :maxdepth: 3 contributing Programming HowTos and Tutorials -------------------------------- .. toctree:: :maxdepth: 3 new_feature_workflow development.environment development-environment-devstack apache-httpd unit_tests tempest_tests addmethod.openstackapi documenting_your_work adding_release_notes commit_message_tags guru_meditation_report user_messages ganesha Background Concepts for manila ------------------------------ .. toctree:: :maxdepth: 3 architecture threading i18n rpc driver_requirements pool-aware-manila-scheduler Other Resources --------------- .. toctree:: :maxdepth: 3 launchpad gerrit manila-review-policy project-team-lead irc-quick-and-dirty API Reference ------------- .. toctree:: :maxdepth: 3 Manila API v2 Reference api_microversion_dev api_microversion_history experimental_apis Module Reference ---------------- .. toctree:: :maxdepth: 3 intro services database share share_hooks auth scheduler fakes manila share_replication driver_filter_goodness_weigher share_migration share_server_migration .. only:: html Indices and tables ------------------ * :ref:`genindex` * :ref:`search` ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/contributor/intro.rst0000664000175000017500000000361000000000000021362 0ustar00zuulzuul00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Introduction to the Shared File Systems service =========================================================== Manila is the file share service project for OpenStack. Manila provides the management of file shares for example, NFS and CIFS as a core service to OpenStack. Manila works with a variety of proprietary backend storage arrays and appliances, with open source distributed filesystems, as well as with a base Linux NFS or Samba server. There are a number of concepts that will help in better understanding of the solutions provided by manila. One aspect can be to explore the different service possibilities provided by manila. Manila, depending on the driver, requires the user by default to create a share network using neutron-net-id and neutron-subnet-id (GlusterFS native driver does not require it). After creation of the share network, the user can proceed to create the shares. Users in manila can configure multiple back-ends just like Cinder. Manila has a share server assigned to every tenant. This is the solution for all back-ends except for GlusterFS. The customer in this scenario is prompted to create a share server using neutron net-id and subnet-id before even trying to create a share. The current low-level services available in manila are: - :term:`manila-api` - :term:`manila-scheduler` - :term:`manila-share` ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/contributor/irc-quick-and-dirty.rst0000664000175000017500000000663200000000000024016 0ustar00zuulzuul00000000000000Communicating with the manila team over IRC =========================================== This is a quick-and-dirty, opinionated, "how-to" on connecting from `Matrix `_ to the OFTC IRC server to chat with the #openstack-manila team. You don't need this method if you're willing to use a regular IRC client, or a hosted IRC service (like `IRC Cloud `_). However, most new contributors struggle with the concept of staying connected, and continuing conversations with contributors across timezones. So this guide presents a simpler alternative. The longer, definitive guide to IRC for OpenStack is hosted in the `OpenStack Contributor Guide. `_ Step 1: Register a "nick" on OFTC --------------------------------- The #openstack-manila channel is hosted on the OFTC IRC server. You're going to need a registered username on this server to speak to other users on this channel. * Navigate to OFTC's web interface: https://webchat.oftc.net/ * Here, you'll be able to choose a Nickname and join channels straight away. * We recommend picking out a Nickname that's easy to remember. We highly recommend picking this Nicknam as your `Launchpad user ID `_ and `OpenDev Gerrit user ID `_. It makes things far less confusing! * Enter the Nickname you pick in the interface, you don't need to specify any channels. Click, "Connect" * In the webpage that opens, enter the following, by picking your own and :: /msg NickServ REGISTER * Remember to set a secure password that isn't shared with any other digital account. The email you enter will be used to recover the password if you ever forget it. Step 2: Register on Element --------------------------- `Element `_ is a popular Matrix client. You can find a `handy desktop client or mobile app `_ for it too. * Navigate to Element's web interface: https://app.element.io/#/welcome * Click on "Create Account" * You must pick "matrix.org" as your home server. You may use a sign in provider (such as google.com, github.com, gitlab.com) if you have an account with any of these, or, register with your email directly and a set a password. * You’ll be asked to accept the terms and conditions of the service. * If you are registering an account via email, you will be prompted to verify your email address. Step 3: Join the #openstack-manila channel ------------------------------------------ * On Element, Start a chat with `@oftc-irc:matrix.org` * The following commands are entered into this chat window. * Set your username to the nickname you registered in Step 1:: !username * Provide your password:: !storepass * Log in by issuing:: !reconnect * Join the #openstack-manila channel:: !join #openstack-manila * Repeat the above step for any channel you'd like to join on OFTC Step 4: Chatting across timezones --------------------------------- * Be aware that community members may appear "online", but might actually not be at their computers. So messages that you send them will not be received until they return to their computers. * So be sure view `logs of the official OpenStack OFTC channels `_ in case you started a conversation that picked up asynchronously. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/contributor/launchpad.rst0000664000175000017500000000315500000000000022172 0ustar00zuulzuul00000000000000Project hosting with Launchpad ============================== `Launchpad`_ hosts the manila project. The manila project homepage on Launchpad is http://launchpad.net/manila. Launchpad credentials --------------------- Creating a login on Launchpad is important even if you don't use the Launchpad site itself, since Launchpad credentials are used for logging in on several OpenStack-related sites. These sites include: * `Wiki`_ * Gerrit (see :doc:`gerrit`) Mailing list ------------ The mailing list email is ``openstack@lists.launchpad.net``. This is a common mailing list across the OpenStack projects. To participate in the mailing list: #. Join the `Manila Team`_ on Launchpad. #. Subscribe to the list on the `OpenStack Team`_ page on Launchpad. The mailing list archives are at https://lists.launchpad.net/openstack. Bug tracking ------------ Report manila bugs at https://bugs.launchpad.net/manila Feature requests (Blueprints) ----------------------------- Manila uses Launchpad Blueprints to track feature requests. Blueprints are at https://blueprints.launchpad.net/manila. Technical support (Answers) --------------------------- Manila uses Launchpad Answers to track manila technical support questions. The manila Answers page is at https://answers.launchpad.net/manila. Note that the `OpenStack Forums`_ (which are not hosted on Launchpad) can also be used for technical support requests. .. _Launchpad: http://launchpad.net .. _Wiki: http://wiki.openstack.org .. _Manila Team: https://launchpad.net/~manila .. _OpenStack Team: https://launchpad.net/~openstack .. _OpenStack Forums: http://forums.openstack.org/ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/contributor/manila-review-policy.rst0000664000175000017500000001163700000000000024274 0ustar00zuulzuul00000000000000.. _manila-review-policy: Manila team code review policy ============================== Peer code review and the OpenStack Way ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Manila adheres to the `OpenStack code review policy and guidelines `_. Similar to other projects hosted on `opendev.org `_, all of manila's code is curated and maintained by a small group of individuals called the "core team". The `primary core team `_ consists of members from diverse affiliations. There are special core teams such as the `manila release core team `_ and the `manila stable maintenance core team `_ that have specific roles as the names suggest. To make a code change in openstack/manila or any of the associated code repositories (openstack/manila-image-elements, openstack/manila-specs, openstack/manila-tempest-plugin, openstack/manila-test-image, openstack/manila-ui and openstack/python-manilaclient), contributors need to follow the :ref:`Code Submission Process ` and upload their code on the `OpenStack Gerrit `_ website. They can then seek reviews by adding individual members of the `manila core team `_ or alert the entire core team by inviting the Gerrit group "manila-core" to the review. Anyone with a membership to the OpenStack Gerrit system may review the code change. However, only the core team can accept and merge the code change. Reviews from contributors outside the core team are encouraged. Reviewing code meticulously and often is a pre-requisite for contributors aspiring to join the core reviewer team. One or more core reviewers will take cognizance of the contribution and provide feedback, or accept the code. For the submission to be accepted, it will need a minimum of one Code-Review:+2 and one Workflow:+1 votes, along with getting a Verified:+1 vote from the CI system. If no core reviewer pays attention to a code submission, feel free to remind the team on the #openstack-manila IRC channel on irc.oftc.net. [#]_ [#]_ Core code review guidelines ~~~~~~~~~~~~~~~~~~~~~~~~~~~ By convention rather than rule, we require that a minimum of two code reviewers provide a Code-Review:+2 vote on each code submission before it is given a Workflow:+1 vote. Having two core reviewers approve a change adds diverse perspective, and is extremely valuable in case of: - Feature changes in the manila service stack - Changes to configuration options - Addition of new tests or significant test bug-fixes in manila-tempest-plugin - New features to manila-ui, manila-test-image, manila-image-elements - Bug fixes Trivial changes --------------- Trivial changes are: - Continuous Integration (CI) system break-fixes that are simple, i.e.: - No job or test is being deleted - Change does not break third-party CI - Documentation changes, especially typographical fixes and grammar corrections. - Automated changes generated by tooling - translations, lower-requirements changes, etc. We do not need two core reviewers to approve trivial changes. Affiliation of core reviewers ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Previously, the manila core team informally enforced a code review convention that each code change be reviewed and merged by reviewers of different affiliations. This was followed because the OpenStack Technical Committee used the diversity of affiliation of the core reviewer team as a metric for maturity of the project. However, since the Rocky release cycle, the TC has changed its view on the subject [#]_ [#]_. We believe this is a step in the right direction. While there is no strict requirement that two core reviewers accepting a code change have different affiliations. Other things being equal, we will continue to informally encourage organizational diversity by having core reviewers from different organizations. Core reviewers have the professional responsibility of avoiding conflicts of interest. Vendor code and review ~~~~~~~~~~~~~~~~~~~~~~ All code in the manila repositories is open-source and anyone can submit changes to these repositories as long as they seek to improve the code base. Manila supports over 30 vendor storage systems, and many of these vendors participate in the development and maintenance of their drivers. To the extent possible, core reviewers will seek out driver maintainer feedback on code changes pertaining to vendor integrations. References ~~~~~~~~~~ .. [#] Getting started with IRC: https://docs.openstack.org/contributors/common/irc.html .. [#] IRC guidelines: https://docs.openstack.org/infra/manual/irc.html .. [#] TC Report 18-28: https://anticdent.org/tc-report-18-28.html .. [#] TC vote to remove team diversity tags: https://review.opendev.org/#/c/579870/ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/contributor/manila.rst0000664000175000017500000000362000000000000021471 0ustar00zuulzuul00000000000000.. Copyright 2010-2011 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Common and Misc Libraries ========================= Libraries common throughout manila or just ones that haven't yet been categorized in depth. The :mod:`manila.context` Module -------------------------------- .. automodule:: manila.context :noindex: :members: :undoc-members: :show-inheritance: The :mod:`manila.exception` Module ---------------------------------- .. automodule:: manila.exception :noindex: :members: :undoc-members: :show-inheritance: The :mod:`manila.test` Module ----------------------------- .. automodule:: manila.test :noindex: :members: :undoc-members: :show-inheritance: The :mod:`manila.utils` Module ------------------------------ .. automodule:: manila.utils :noindex: :members: :undoc-members: :show-inheritance: The :mod:`manila.wsgi` Module ----------------------------- .. automodule:: manila.wsgi :noindex: :members: :undoc-members: :show-inheritance: Tests ----- The :mod:`test_exception` Module ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: manila.tests.test_exception :noindex: :members: :undoc-members: :show-inheritance: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/contributor/new_feature_workflow.rst0000664000175000017500000002176400000000000024477 0ustar00zuulzuul00000000000000.. Copyright 2023 Red Hat, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Proposing new features ====================== Planning and working on new features is a hard task. This documentation suggests a workflow and highlights what is expected, suggested and required when working on new features in the OpenStack Manila project. Planning the feature ~~~~~~~~~~~~~~~~~~~~ Features should always start with planning. It is important to start by discussing the problems and possible solutions with the community, bringing up the use cases it will cover, corner cases and alternative approaches, so we suggest the following process: Registering a blueprint ----------------------- When starting a new feature, you should file a blueprint in the OpenStack Manila `Launchpad tracker `_. This blueprint should have a brief description of the feature, and it will be used to track all changes proposed to the implementation, including the Manila core changes, functional tests and the OpenStack client changes. One example blueprint would be the share server migration `blueprint `_. Discussing the feature during the PTG ------------------------------------- The OpenStack PTG is a very good timing to discuss new features, as the upstream community is focused on planning and shaping the upcoming release. So it is encouraged that you host a topic during the PTG to talk about the design of the feature and have the community helping you shape it. In case you missed the PTG deadline to bring up such features, you can also add a topic to the Manila community weekly IRC meeting `agenda `_ and request feedback from the community. As a result, you can get different perspectives on the design of the feature and raise awareness, so there are no surprises when the feature is being proposed. One other outcome of this discussion is determining the necessity of a specification. The community will use :ref:`pre-defined ` factors to decide if you need a specification or not. Writing a specification ----------------------- After determining if a specification is necessary for your proposed feature, you will need to write in more details about the problem you are trying to solve, the use cases and all the impact this change will have in terms of API changes, database, security and other aspects. The specification will be reviewed by different people, and this process is crucial for hashing out details. Please check the OpenStack Manila `example specification `_ and follow its guidelines. If you are working on a smaller feature, you may submit a "lite spec". Please follow `this example `_. Working on the implementation ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This is when the coding happens. You will work on the feature and the code you write must follow the OpenStack coding `guidelines `_. You can find examples of database migrations, driver implementations, RPC APIs and many other changes in the Manila code base. As our code has been worked on and improved over the years, there is a high chance that someone else implemented something similar to what you are doing now, so keeping the consistency with feature implementation is very important. In case you are making changes to the API or adding new APIs, please read the :ref:`Manila API contributor docs `. Development environment ----------------------- So you have worked on your changes and would like to test them? :ref:`Devstack ` is likely the easiest way to go. With devstack, you can have OpenStack installed quickly. You can chose to install it using the Manila Dummy Driver or another backend driver of your choice. SDK and OpenStack client ------------------------ If the feature changes the Manila API, please ensure that support these changes through the SDK bindings and OpenStack client in the `python-manilaclient repository `_. Unit tests ---------- All new changes you implement must be covered by unit tests. We have a Zuul job that will always check the coverage percentage of the changes and the reviewers will also be actively looking at it. .. important:: Please :ref:`run the unit tests locally ` to ensure that your tests are passing before submitting them to the upstream gerrit. Tempest tests ------------- All API changes must be tested with negative and positive tests within the `manila-tempest-plugin repository `_. You can use your development environment to :ref:`run such tests `, and ensure your changes don't break existing functional tests. Documenting your work --------------------- :ref:`Documenting your work ` is very important. `API documentation `_ and make changes where necessary. Also, you must include a release notes with your change. On devstack you may use `tox -e reno new insert-title-here` to generate a release note." Collaborative review sessions ----------------------------- `Collaborative review sessions `_ are a good way to speed up the review process. It is encouraged that you propose one as early as possible. In the session, you can walk through the key aspects of the changes you are working on and explain some decisions you took during the implementation. It has proven to be very valuable to both change owners and reviewers. To schedule, please bring it up during the Manila upstream weekly meeting or send an email using the manila tag to OpenStack discuss mailing list. Complying to the deadlines ~~~~~~~~~~~~~~~~~~~~~~~~~~ The deadlines are defined in the official `OpenStack release schedule `_. The Manila team also defines some extra project specific deadlines. Below, we have specified what is expected from you in each of these deadlines. Manila spec freeze ------------------ All specifications must be merged prior to this date. Manila feature proposal freeze ------------------------------ New features must be submitted to gerrit before this deadline. The core, client and tempest changes must be available on gerrit, but it does not mean the changes should be merged by this deadline. Manila new driver deadline -------------------------- By the end of the week all new backend drivers for Manila must be substantially complete, with unit tests, and passing 3rd party CI. Drivers do not have to actually merge until feature freeze. Feature freeze -------------- The client release follows a different timeline from the core component, so the client changes must be merged prior to this deadline, as defined in the release schedule. The client must contain unit and functional tests. .. important:: API changes must have the documentation updated in the same change as the entire feature change. All features and new drivers must be merged by the feature freeze date. In case you need extra time for the Manila core change, please reach out to the team's PTL during an upstream weekly meeting, and we can discuss a possible feature freeze exception, considering if the comments were resolved in a reasonable amount of time and if the change is already in a good shape to be merged. Acceptance criteria ~~~~~~~~~~~~~~~~~~~ - Changes were proposed in time and module deadlines were respected - The code introduced or changed is covered by unit tests - New functional tests were proposed and reports are positive - API changes are correct and not introducing backwards compatible changes - API changes are documented Additional tips ~~~~~~~~~~~~~~~ - Remember to join the upstream meetings often - Make sure to use the :ref:`commit message tags ` in your changes. - Submit the changes upstream as early as possible. - Remember to run pep8, unit tests and coverage locally before you submit your changes to gerrit. - Ensure you keep a review discipline. The best way to have reviewers looking at your change is to also provide reviews to other people's changes. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/contributor/pool-aware-manila-scheduler.rst0000664000175000017500000001750300000000000025516 0ustar00zuulzuul00000000000000Pool-Aware Scheduler Support ============================ https://blueprints.launchpad.net/manila/+spec/dynamic-storage-pools Manila currently sees each share backend as a whole, even if the backend consists of several smaller pools with totally different capabilities and capacities. Extending manila to support storage pools within share backends will make manila scheduling decisions smarter as it now knows the full set of capabilities of a backend. Problem Description ------------------- The provisioning decisions in manila are based on the statistics reported by backends. Any backend is assumed to be a single discrete unit with a set of capabilities and single capacity. In reality this assumption is not true for many storage providers, as their storage can be further divided or partitioned into pools to offer completely different sets of capabilities and capacities. That is, there are storage backends which are a combination of storage pools rather than a single homogeneous entity. Usually shares/snapshots can't be placed across pools on such backends. In the current implementation, an attempt is made to map a single backend to a single storage controller, and the following problems may arise: * After the scheduler selects a backend on which to place a new share, the backend may have to make a second decision about where to place the share within that backend. This logic is driver-specific and hard for admins to deal with. * The capabilities that the backend reports back to the scheduler may not apply universally. A single backend may support both SATA and SSD-based storage, but perhaps not at the same time. Backends need a way to express exactly what they support and how much space is consumed out of each type of storage. Therefore, it is important to extend manila so that it is aware of storage pools within each backend and can use them as the finest granularity for resource placement. Proposed change --------------- A pool-aware scheduler will address the need for supporting multiple pools from one storage backend. Terminology ----------- Pool A logical concept to describe a set of storage resources that can be used to serve core manila requests, e.g. shares/snapshots. This notion is almost identical to manila Share Backend, for it has similar attributes (capacity, capability). The difference is that a Pool may not exist on its own; it must reside in a Share Backend. One Share Backend can have multiple Pools but Pools do not have sub-Pools (meaning even if they have them, sub-Pools do not get to exposed to manila, yet). Each Pool has a unique name in the Share Backend namespace, which means a Share Backend cannot have two pools using same name. Design ------ The workflow in this change is simple: 1) Share Backends report how many pools and what those pools look like and are capable of to scheduler; 2) When request comes in, scheduler picks a pool that fits the need best to serve the request, it passes the request to the backend where the target pool resides; 3) Share driver gets the message and lets the target pool serve the request as scheduler instructed. To support placing resources (share/snapshot) onto a pool, these changes will be made to specific components of manila: 1. Share Backends reporting capacity/capabilities at pool level; 2. Scheduler filtering/weighing based on pool capacity/capability and placing shares/snapshots to a pool of a certain backend; 3. Record which backend and pool a resource is located on. Data model impact ----------------- No DB schema change involved, however, the host field of Shares table will now include pool information but no DB migration is needed. Original host field of Shares: ``HostX@BackendY`` With this change: ``HostX@BackendY#pool0`` REST API impact --------------- With pool support added to manila, there is an awkward situation where we require admin to input the exact location for shares to be imported, which must have pool info. But there is no way to find out what pools are there for backends except looking at the scheduler log. That causes a poor user experience and thus is a problem from the User's Point of View. This change simply adds a new admin-api extension to allow admin to fetch all the pool information from scheduler cache (memory), which closes the gap for end users. This extension provides two level of pool information: names only or detailed information: Pool name only: GET http://MANILA_API_ENDPOINT/v1/TENANT_ID/scheduler-stats/pools Detailed Pool info: GET http://MANILA_API_ENDPOINT/v1/TENANT_ID/scheduler-stats/pools/detail Security impact --------------- N/A Notifications impact -------------------- Host attribute of shares now includes pool information in it, consumer of notification can now extend to extract pool information if needed. Admin impact ------------ Administrators now need to suffix commands with ``#pool`` to manage shares. Other end user impact --------------------- No impact visible to the end user directly, but administrators now need to prefix commands that refer to the backend host with the concatenation of the hashtag (``#``) sign and the name of the pool (e.g. ``#poolName``) to manage shares. Other impacts might include scenarios where if a backend does not expose pools, the backend name is used as the pool name. For instance, ``HostX@BackendY#BackendY`` would be used when the driver does not expose pools. Performance Impact ------------------ The size of RPC message for each share stats report will be bigger than before (linear to the number of pools a backend has). It should not really impact the RPC facility in terms of performance and even if it did, pure text compression should easily mitigate this problem. Developer impact ---------------- For those share backends that would like to expose internal pools to manila for more flexibility, developers should update their drivers to include all pool capacities and capabilities in the share stats it reports to scheduler. Share backends without multiple pools do not need to change their implementation. Below is an example of new stats message having multiple pools: :: { 'share_backend_name': 'My Backend', #\ 'vendor_name': 'OpenStack', # backend level 'driver_version': '1.0', # mandatory/fixed 'storage_protocol': 'NFS/CIFS', #- stats&capabilities 'active_shares': 10, #\ 'IOPS_provisioned': 30000, # optional custom 'fancy_capability_1': 'eat', # stats & capabilities 'fancy_capability_2': 'drink', #/ 'pools': [ {'pool_name': '1st pool', #\ 'total_capacity_gb': 500, # mandatory stats for 'free_capacity_gb': 230, # pools 'allocated_capacity_gb': 270, # | 'qos': True, # | 'reserved_percentage': 0, #/ 'dying_disks': 100, #\ 'super_hero_1': 'spider-man', # optional custom 'super_hero_2': 'flash', # stats & capabilities 'super_hero_3': 'neoncat' #/ }, {'pool_name': '2nd pool', 'total_capacity_gb': 1024, 'free_capacity_gb': 1024, 'allocated_capacity_gb': 0, 'qos': False, 'reserved_percentage': 0, 'dying_disks': 200, 'super_hero_1': 'superman', 'super_hero_2': ' ', 'super_hero_2': 'Hulk', } ] } Documentation Impact -------------------- Documentation impact for changes in manila are introduced by the API changes. Also, doc changes are needed to append pool names to host names. Driver changes may also introduce new configuration options which would lead to Doc changes. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/contributor/project-team-lead.rst0000664000175000017500000001747000000000000023535 0ustar00zuulzuul00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Manila Project Team Lead guide ============================== A `project team lead `_ for Manila is elected from the project contributors. A candidate for PTL needn't be a core reviewer on the team, but, must be a contributor, and be familiar with the project to lead the project through its release process. If you would like to be a core reviewer begin by :ref:`contacting-the-core-team`. All the responsibilities below help us in maintaining the project. A project team lead can perform any of these or delegate tasks to other contributors. General Responsibilities ------------------------ * Ensure manila meetings have a chair * https://opendev.org/opendev/irc-meetings/src/branch/master/meetings/manila-team-meeting.yaml * Update the team people wiki * https://wiki.openstack.org/wiki/Manila#People Release cycle activities ------------------------ * Get acquainted with the release schedule and set Project specific milestones in the `OpenStack Releases repository `_ * Example: https://releases.openstack.org/victoria/schedule.html * Ensure the Manila `Cross Project Liaisons `_ are aware of their duties and are plugged into the respective areas * Acknowledge `community wide cycle goals `_ and find leaders and coordinate with the goal liaisons * Plan team activities such as: * ``Documentation day/s`` to groom documentation bugs and re-write release cycle docs * ``Bug Triage day/s`` to ensure the bug backlog is well groomed * ``Bug Squash day/s`` to close bugs * ``Collaborative Review meeting/s`` to perform a high-touch review of a code submission over a synchronous call * Milestone driven work: * ``Milestone-1``: - Request a release for the python-manilaclient and manila-ui - Retarget any bugs whose fixes missed Milestone-1 * ``Milestone-2``: - Retarget any bugs whose fixes missed Milestone-2 - Create a review priority etherpad and share it with the community and have reviewers sign up * ``Milestone-3``: - Groom the release notes for python-manilaclient and add a 'prelude' section describing the most important changes in the release - Request a final cycle release for python-manilaclient - Retarget any bugs whose fixes missed Milestone-3 - Grant/Deny any Feature Freeze Exception Requests - Update task trackers for Community Wide Goals - Write the cycle-highlights in marketing-friendly sentences and propose to the openstack/releases repo. Usually based on reno prelude but made more readable and friendly * Example: https://review.opendev.org/717801/ - Create the launchpad series and milestones for the next cycle in manila, python-manilaclient and manila-ui. Examples: * manila: https://launchpad.net/manila/ussuri * python-manilaclient: https://launchpad.net/python-manilaclient/ussuri * manila-ui: https://launchpad.net/manila-ui/ussuri * ``Before RC-1``: - Groom the release notes for manila-ui and add a 'prelude' section describing the most important changes in the release - Request a final cycle release for manila-ui - Groom the release notes for manila, add a 'prelude' section describing the most important changes in the release - Mark bugs as {release}-rc-potential bugs in launchpad, ensure they are targeted and addressed by RC * ``RC-1``: - Request a RC-1 release for manila - Request a final cycle tagged release for manila-tempest-plugin - Ensure all blueprints for the release have been marked "Implemented" or are re-targeted * ``After RC-1``: - Close the currently active series on Launchpad for manila, python-manilaclient and manila-ui and set the "Development Focus" to the next release. Alternatively, you can switch this on the series page by setting the next release to “active development” - Set the last series status in each of these projects to “current stable branch release” - Set the previous release's series status to “supported” - Move any Unimplemented specs in `the specs repo `_ to "Unimplemented" - Create a new specs directory in the specs repo for the next cycle so people can start proposing new specs * You should NOT plan to have more than one RC. RC2 should only happen if there was a mistake and something was missed for RC-1, or a new regression was discovered * Periodically during the release: * ``Every Week``: - Coordinate the weekly Community Meeting agenda - Coordinate with the Bug Czar and ensure bugs are properly triaged - Check whether any bug-fixes must be back-ported to older stable releases * ``Every 3 weeks``: - Ensure stable branch releases are proposed in case there are any release worthy changes. If there are only documentation or CI/test related fixes, no release for that branch is necessary * To request a release of any manila deliverable: * ``git checkout {branch-to-release-from}`` * ``git log --no-merges {last tag}..`` * Examine commits that will go into the release and use it to decide whether the release is a major, minor, or revision bump according to semver * Then, propose the release with version according to semver x.y.z * X - backward-incompatible changes * Y - features * Z - bug fixes * Use the ``new-release`` command to generate the release * https://releases.openstack.org/reference/using.html#using-new-release-command .. note:: When proposing new releases, ensure that the releases for newer branches are proposed and accepted in the order of the most recent branch to the older. Project Team Gathering ---------------------- * Create etherpads for PTG planning, cycle retrospective and PTG discussions and announce the Planning etherpad to the community members via the Manila community meeting as well as the `OpenStack Discuss Mailing List` * `Example PTG Planning Etherpad `_ * `Example Retrospective Etherpad `_ * `Example PTG Discussions Etherpad `_ * If the PTG is a physical event, gather an estimate of attendees and request the OpenDev Foundation staff for appropriate meeting space. Ensure the sessions are remote attendee friendly. Coordinate A/V logistics * Set discussion schedule and find an owner to run each proposed discussion at the PTG * All sessions must be recorded, nominate note takers for each discussion * Sign up for group photo at the PTG (if applicable) * After the event, send PTG session summaries and the meeting recording to the `OpenStack Discuss Mailing List` Summit ------ * Prepare the project update presentation. Enlist help of others * Prepare the on-boarding session materials. Enlist help of others .. _OpenStack Discuss Mailing List: http://lists.openstack.org/cgi-bin/mailman/listinfo/openstack-discuss .. _contacting the core team: contributing#contacting-the-core-team ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/contributor/rpc.rst0000664000175000017500000003212300000000000021014 0ustar00zuulzuul00000000000000.. Copyright (c) 2010 Citrix Systems, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. AMQP and manila =============== AMQP is the messaging technology chosen by the OpenStack cloud. The AMQP broker, either RabbitMQ or Qpid, sits between any two manila components and allows them to communicate in a loosely coupled fashion. More precisely, manila components (the compute fabric of OpenStack) use Remote Procedure Calls (RPC hereinafter) to communicate to one another; however such a paradigm is built atop the publish/subscribe paradigm so that the following benefits can be achieved: * Decoupling between client and servant (such as the client does not need to know where the servant's reference is). * Full a-synchronism between client and servant (such as the client does not need the servant to run at the same time of the remote call). * Random balancing of remote calls (such as if more servants are up and running, one-way calls are transparently dispatched to the first available servant). Manila uses direct, fanout, and topic-based exchanges. The architecture looks like the one depicted in the figure below: .. image:: /images/rpc/arch.png :width: 60% .. Manila implements RPC (both request+response, and one-way, respectively nicknamed 'rpc.call' and 'rpc.cast') over AMQP by providing an adapter class which take cares of marshaling and unmarshaling of messages into function calls. Each manila service (for example Compute, Volume, etc.) create two queues at the initialization time, one which accepts messages with routing keys 'NODE-TYPE.NODE-ID' (for example compute.hostname) and another, which accepts messages with routing keys as generic 'NODE-TYPE' (for example compute). The former is used specifically when Manila-API needs to redirect commands to a specific node like 'euca-terminate instance'. In this case, only the compute node whose host's hypervisor is running the virtual machine can kill the instance. The API acts as a consumer when RPC calls are request/response, otherwise is acts as publisher only. Manila RPC Mappings ------------------- The figure below shows the internals of a message broker node (referred to as a RabbitMQ node in the diagrams) when a single instance is deployed and shared in an OpenStack cloud. Every manila component connects to the message broker and, depending on its personality (for example a compute node or a network node), may use the queue either as an Invoker (such as API or Scheduler) or a Worker (such as Compute, Volume or Network). Invokers and Workers do not actually exist in the manila object model, but we are going to use them as an abstraction for sake of clarity. An Invoker is a component that sends messages in the queuing system via two operations: 1) rpc.call and ii) rpc.cast; a Worker is a component that receives messages from the queuing system and reply accordingly to rcp.call operations. Figure 2 shows the following internal elements: * Topic Publisher: A Topic Publisher comes to life when an rpc.call or an rpc.cast operation is executed; this object is instantiated and used to push a message to the queuing system. Every publisher connects always to the same topic-based exchange; its life-cycle is limited to the message delivery. * Direct Consumer: A Direct Consumer comes to life if (an only if) a rpc.call operation is executed; this object is instantiated and used to receive a response message from the queuing system; Every consumer connects to a unique direct-based exchange via a unique exclusive queue; its life-cycle is limited to the message delivery; the exchange and queue identifiers are determined by a UUID generator, and are marshaled in the message sent by the Topic Publisher (only rpc.call operations). * Topic Consumer: A Topic Consumer comes to life as soon as a Worker is instantiated and exists throughout its life-cycle; this object is used to receive messages from the queue and it invokes the appropriate action as defined by the Worker role. A Topic Consumer connects to the same topic-based exchange either via a shared queue or via a unique exclusive queue. Every Worker has two topic consumers, one that is addressed only during rpc.cast operations (and it connects to a shared queue whose exchange key is 'topic') and the other that is addressed only during rpc.call operations (and it connects to a unique queue whose exchange key is 'topic.host'). * Direct Publisher: A Direct Publisher comes to life only during rpc.call operations and it is instantiated to return the message required by the request/response operation. The object connects to a direct-based exchange whose identity is dictated by the incoming message. * Topic Exchange: The Exchange is a routing table that exists in the context of a virtual host (the multi-tenancy mechanism provided by Qpid or RabbitMQ); its type (such as topic vs. direct) determines the routing policy; a message broker node will have only one topic-based exchange for every topic in manila. * Direct Exchange: This is a routing table that is created during rpc.call operations; there are many instances of this kind of exchange throughout the life-cycle of a message broker node, one for each rpc.call invoked. * Queue Element: A Queue is a message bucket. Messages are kept in the queue until a Consumer (either Topic or Direct Consumer) connects to the queue and fetch it. Queues can be shared or can be exclusive. Queues whose routing key is 'topic' are shared amongst Workers of the same personality. .. image:: /images/rpc/rabt.png :width: 60% .. RPC Calls --------- The diagram below shows the message flow during an rpc.call operation: 1. A Topic Publisher is instantiated to send the message request to the queuing system; immediately before the publishing operation, a Direct Consumer is instantiated to wait for the response message. 2. Once the message is dispatched by the exchange, it is fetched by the Topic Consumer dictated by the routing key (such as 'topic.host') and passed to the Worker in charge of the task. 3. Once the task is completed, a Direct Publisher is allocated to send the response message to the queuing system. 4. Once the message is dispatched by the exchange, it is fetched by the Direct Consumer dictated by the routing key (such as 'msg_id') and passed to the Invoker. .. image:: /images/rpc/flow1.png :width: 60% .. RPC Casts --------- The diagram below the message flow during an rp.cast operation: 1. A Topic Publisher is instantiated to send the message request to the queuing system. 2. Once the message is dispatched by the exchange, it is fetched by the Topic Consumer dictated by the routing key (such as 'topic') and passed to the Worker in charge of the task. .. image:: /images/rpc/flow2.png :width: 60% .. AMQP Broker Load ---------------- At any given time the load of a message broker node running either Qpid or RabbitMQ is function of the following parameters: * Throughput of API calls: The number of API calls (more precisely rpc.call ops) being served by the OpenStack cloud dictates the number of direct-based exchanges, related queues and direct consumers connected to them. * Number of Workers: There is one queue shared amongst workers with the same personality; however there are as many exclusive queues as the number of workers; the number of workers dictates also the number of routing keys within the topic-based exchange, which is shared amongst all workers. The figure below shows the status of a RabbitMQ node after manila components' bootstrap in a test environment. Exchanges and queues being created by manila components are: * Exchanges 1. manila (topic exchange) * Queues 1. compute.phantom (phantom is hostname) 2. compute 3. network.phantom (phantom is hostname) 4. network 5. share.phantom (phantom is hostname) 6. share 7. scheduler.phantom (phantom is hostname) 8. scheduler .. image:: /images/rpc/state.png :width: 60% .. RabbitMQ Gotchas ---------------- Manila uses Kombu to connect to the RabbitMQ environment. Kombu is a Python library that in turn uses AMQPLib, a library that implements the standard AMQP 0.8 at the time of writing. When using Kombu, Invokers and Workers need the following parameters in order to instantiate a Connection object that connects to the RabbitMQ server (please note that most of the following material can be also found in the Kombu documentation; it has been summarized and revised here for sake of clarity): * Hostname: The hostname to the AMQP server. * Userid: A valid username used to authenticate to the server. * Password: The password used to authenticate to the server. * Virtual_host: The name of the virtual host to work with. This virtual host must exist on the server, and the user must have access to it. Default is "/". * Port: The port of the AMQP server. Default is 5672 (amqp). The following parameters are default: * Insist: Insist on connecting to a server. In a configuration with multiple load-sharing servers, the Insist option tells the server that the client is insisting on a connection to the specified server. Default is False. * Connect_timeout: The timeout in seconds before the client gives up connecting to the server. The default is no timeout. * SSL: use SSL to connect to the server. The default is False. More precisely Consumers need the following parameters: * Connection: The above mentioned Connection object. * Queue: Name of the queue. * Exchange: Name of the exchange the queue binds to. * Routing_key: The interpretation of the routing key depends on the value of the exchange_type attribute. * Direct exchange: If the routing key property of the message and the routing_key attribute of the queue are identical, then the message is forwarded to the queue. * Fanout exchange: messages are forwarded to the queues bound the exchange, even if the binding does not have a key. * Topic exchange: If the routing key property of the message matches the routing key of the key according to a primitive pattern matching scheme, then the message is forwarded to the queue. The message routing key then consists of words separated by dots (".", like domain names), and two special characters are available; star ("") and hash ("#"). The star matches any word, and the hash matches zero or more words. For example ".stock.#" matches the routing keys "usd.stock" and "eur.stock.db" but not "stock.nasdaq". * Durable: This flag determines the durability of both exchanges and queues; durable exchanges and queues remain active when a RabbitMQ server restarts. Non-durable exchanges/queues (transient exchanges/queues) are purged when a server restarts. It is worth noting that AMQP specifies that durable queues cannot bind to transient exchanges. Default is True. * Auto_delete: If set, the exchange is deleted when all queues have finished using it. Default is False. * Exclusive: exclusive queues (such as non-shared) may only be consumed from by the current connection. When exclusive is on, this also implies auto_delete. Default is False. * Exchange_type: AMQP defines several default exchange types (routing algorithms) that covers most of the common messaging use cases. * Auto_ack: acknowledgment is handled automatically once messages are received. By default auto_ack is set to False, and the receiver is required to manually handle acknowledgment. * No_ack: It disable acknowledgment on the server-side. This is different from auto_ack in that acknowledgment is turned off altogether. This functionality increases performance but at the cost of reliability. Messages can get lost if a client dies before it can deliver them to the application. * Auto_declare: If this is True and the exchange name is set, the exchange will be automatically declared at instantiation. Auto declare is on by default. Publishers specify most the parameters of Consumers (such as they do not specify a queue name), but they can also specify the following: * Delivery_mode: The default delivery mode used for messages. The value is an integer. The following delivery modes are supported by RabbitMQ: * 1 or "transient": The message is transient. Which means it is stored in memory only, and is lost if the server dies or restarts. * 2 or "persistent": The message is persistent. Which means the message is stored both in-memory, and on disk, and therefore preserved if the server dies or restarts. The default value is 2 (persistent). During a send operation, Publishers can override the delivery mode of messages so that, for example, transient messages can be sent over a durable queue. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315601.8056731 manila-21.0.0/doc/source/contributor/samples/0000775000175000017500000000000000000000000021141 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/contributor/samples/cephfs_local.conf0000664000175000017500000000270500000000000024436 0ustar00zuulzuul00000000000000###################################################################### # This local.conf sets up Devstack with manila enabling the CEPHFS # driver which operates in driver_handles_share_services=False # mode. Pay attention to the storage protocol configuration to run # the cephfs driver with either the native CEPHFS protocol or the NFS # protocol ####################################################################### [[local|localrc]] ADMIN_PASSWORD=secret DATABASE_PASSWORD=$ADMIN_PASSWORD RABBIT_PASSWORD=$ADMIN_PASSWORD SERVICE_PASSWORD=$ADMIN_PASSWORD DEST=/opt/stack DATA_DIR=/opt/stack/data LOGFILE=/opt/stack/devstacklog.txt # Enabling manila services LIBS_FROM_GIT=python-manilaclient enable_plugin manila https://opendev.org/openstack/manila enable_plugin manila-ui https://opendev.org/openstack/manila-ui enable_plugin manila-tempest-plugin https://opendev.org/openstack/manila-tempest-plugin # Enabling ceph enable_plugin devstack-plugin-ceph https://opendev.org/openstack/devstack-plugin-ceph ENABLE_CEPH_MANILA=True # IMPORTANT - Comment out / remove the following line to use # the CEPH driver with the native CEPHFS protocol MANILA_CEPH_DRIVER=cephfsnfs # CEPHFS backend options SHARE_DRIVER=manila.share.drivers.cephfs.driver.CephFSDriver MANILA_SERVICE_IMAGE_ENABLED=False MANILA_DEFAULT_SHARE_TYPE_EXTRA_SPECS='snapshot_support=True' MANILA_CONFIGURE_DEFAULT_TYPES=True # Required for mounting shares MANILA_ALLOW_NAS_SERVER_PORTS_ON_HOST=True ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/contributor/samples/container_local.conf0000664000175000017500000000235000000000000025144 0ustar00zuulzuul00000000000000###################################################################### # This local.conf sets up Devstack with manila enabling the Container # driver that uses Docker and operates in # driver_handles_share_services=True mode ####################################################################### [[local|localrc]] ADMIN_PASSWORD=secret DATABASE_PASSWORD=$ADMIN_PASSWORD RABBIT_PASSWORD=$ADMIN_PASSWORD SERVICE_PASSWORD=$ADMIN_PASSWORD DEST=/opt/stack DATA_DIR=/opt/stack/data LOGFILE=/opt/stack/devstacklog.txt # Enabling manila services LIBS_FROM_GIT=python-manilaclient enable_plugin manila https://opendev.org/openstack/manila enable_plugin manila-ui https://opendev.org/openstack/manila-ui enable_plugin manila-tempest-plugin https://opendev.org/openstack/manila-tempest-plugin # Container Backend config options MANILA_SERVICE_IMAGE_ENABLED=False SHARE_DRIVER=manila.share.drivers.container.driver.ContainerShareDriver MANILA_ENABLED_BACKENDS=vienna,prague MANILA_OPTGROUP_vienna_driver_handles_share_servers=True MANILA_OPTGROUP_prague_driver_handles_share_servers=True MANILA_DEFAULT_SHARE_TYPE_EXTRA_SPECS='snapshot_support=false' MANILA_CONFIGURE_DEFAULT_TYPES=True # Required for mounting shares MANILA_ALLOW_NAS_SERVER_PORTS_ON_HOST=True ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/contributor/samples/dummy_local.conf0000664000175000017500000000713400000000000024322 0ustar00zuulzuul00000000000000################################################################# # This local.conf sets up Devstack with manila enabling the Dummy # driver which operates in driver_handles_share_services=False # and driver_handles_share_services=True modes via four distinct # backends. It's important to remember that this driver does # not provision real storage. ################################################################# [[local|localrc]] ADMIN_PASSWORD=secret DATABASE_PASSWORD=$ADMIN_PASSWORD RABBIT_PASSWORD=$ADMIN_PASSWORD SERVICE_PASSWORD=$ADMIN_PASSWORD DEST=/opt/stack DATA_DIR=/opt/stack/data LOGFILE=/opt/stack/devstacklog.txt # Enabling manila services LIBS_FROM_GIT=python-manilaclient enable_plugin manila https://opendev.org/openstack/manila enable_plugin manila-ui https://opendev.org/openstack/manila-ui enable_plugin manila-tempest-plugin https://opendev.org/openstack/manila-tempest-plugin # Dummy Backends config options SHARE_DRIVER=manila.tests.share.drivers.dummy.DummyDriver MANILA_CONFIGURE_GROUPS=buenosaires,saopaulo,lima,bogota,membernet,adminnet MANILA_CONFIGURE_DEFAULT_TYPES=true MANILA_SERVICE_IMAGE_ENABLED=false MANILA_SHARE_MIGRATION_PERIOD_TASK_INTERVAL=1 MANILA_SERVER_MIGRATION_PERIOD_TASK_INTERVAL=10 MANILA_REPLICA_STATE_UPDATE_INTERVAL=10 MANILA_DEFERRED_DELETE_TASK_INTERVAL=10 MANILA_DEFAULT_SHARE_TYPE_EXTRA_SPECS='snapshot_support=True create_share_from_snapshot_support=True revert_to_snapshot_support=True mount_snapshot_support=True' MANILA_ENABLED_BACKENDS=buenosaires,saopaulo,lima,bogota MANILA_OPTGROUP_buenosaires_driver_handles_share_servers=false MANILA_OPTGROUP_buenosaires_replication_domain=DUMMY_DOMAIN MANILA_OPTGROUP_buenosaires_share_backend_name=ALPHA MANILA_OPTGROUP_buenosaires_share_driver=manila.tests.share.drivers.dummy.DummyDriver MANILA_OPTGROUP_saopaulo_driver_handles_share_servers=false MANILA_OPTGROUP_saopaulo_replication_domain=DUMMY_DOMAIN MANILA_OPTGROUP_saopaulo_share_backend_name=BETA MANILA_OPTGROUP_saopaulo_share_driver=manila.tests.share.drivers.dummy.DummyDriver MANILA_OPTGROUP_lima_driver_handles_share_servers=true MANILA_OPTGROUP_lima_network_config_group=membernet MANILA_OPTGROUP_lima_share_backend_name=GAMMA MANILA_OPTGROUP_lima_share_driver=manila.tests.share.drivers.dummy.DummyDriver MANILA_OPTGROUP_lima_admin_network_config_group=membernet MANILA_OPTGROUP_bogota_share_driver=manila.tests.share.drivers.dummy.DummyDriver MANILA_OPTGROUP_bogota_driver_handles_share_servers=False MANILA_OPTGROUP_bogota_share_backend_name=DELTA MANILA_OPTGROUP_bogota_replication_domain=DUMMY_DOMAIN MANILA_OPTGROUP_membernet_network_api_class=manila.network.standalone_network_plugin.StandaloneNetworkPlugin MANILA_OPTGROUP_membernet_network_plugin_ipv4_enabled=true MANILA_OPTGROUP_membernet_standalone_network_plugin_allowed_ip_ranges=10.0.0.10-10.0.0.209 MANILA_OPTGROUP_membernet_standalone_network_plugin_gateway=10.0.0.1 MANILA_OPTGROUP_membernet_standalone_network_plugin_mask=24 MANILA_OPTGROUP_membernet_standalone_network_plugin_network_type=vlan MANILA_OPTGROUP_membernet_standalone_network_plugin_segmentation_id=1010 MANILA_OPTGROUP_adminnet_network_api_class=manila.network.standalone_network_plugin.StandaloneNetworkPlugin MANILA_OPTGROUP_adminnet_standalone_network_plugin_gateway=11.0.0.1 MANILA_OPTGROUP_adminnet_standalone_network_plugin_mask=24 MANILA_OPTGROUP_adminnet_standalone_network_plugin_network_type=vlan MANILA_OPTGROUP_adminnet_standalone_network_plugin_segmentation_id=1011 MANILA_OPTGROUP_adminnet_standalone_network_plugin_allowed_ip_ranges=11.0.0.10-11.0.0.19,11.0.0.30-11.0.0.39,11.0.0.50-11.0.0.199 MANILA_OPTGROUP_adminnet_network_plugin_ipv4_enabled=True ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/contributor/samples/generic_local.conf0000664000175000017500000000303400000000000024576 0ustar00zuulzuul00000000000000###################################################################### # This local.conf sets up Devstack with manila enabling the Generic # driver that uses Cinder to provide back-end storage and Nova to # serve storage virtual machines (share servers) in the tenant's domain. # This driver operates in driver_handles_share_services=True mode ####################################################################### [[local|localrc]] ADMIN_PASSWORD=secret DATABASE_PASSWORD=$ADMIN_PASSWORD RABBIT_PASSWORD=$ADMIN_PASSWORD SERVICE_PASSWORD=$ADMIN_PASSWORD DEST=/opt/stack DATA_DIR=/opt/stack/data LOGFILE=/opt/stack/devstacklog.txt # Enabling manila services LIBS_FROM_GIT=python-manilaclient enable_plugin manila https://opendev.org/openstack/manila enable_plugin manila-ui https://opendev.org/openstack/manila-ui enable_plugin manila-tempest-plugin https://opendev.org/openstack/manila-tempest-plugin # Generic Back end config options SHARE_DRIVER=manila.share.drivers.generic.GenericShareDriver MANILA_ENABLED_BACKENDS=tokyo,shanghai MANILA_OPTGROUP_tokyo_driver_handles_share_servers=True MANILA_OPTGROUP_shanghai_driver_handles_share_servers=True MANILA_OPTGROUP_tokyo_connect_share_server_to_tenant_network=True MANILA_OPTGROUP_shanghai_connect_share_server_to_tenant_network=True MANILA_DEFAULT_SHARE_TYPE_EXTRA_SPECS='snapshot_support=True create_share_from_snapshot_support=True' MANILA_CONFIGURE_DEFAULT_TYPES=True # Storage Virtual Machine settings for the generic driver MANILA_SERVICE_IMAGE_ENABLED=True MANILA_USE_SERVICE_INSTANCE_PASSWORD=True././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/contributor/samples/lvm_local.conf0000664000175000017500000000246400000000000023766 0ustar00zuulzuul00000000000000################################################################ # This local.conf sets up Devstack with manila enabling the LVM # driver which operates in driver_handles_share_services=False # mode ################################################################ [[local|localrc]] ADMIN_PASSWORD=secret DATABASE_PASSWORD=$ADMIN_PASSWORD RABBIT_PASSWORD=$ADMIN_PASSWORD SERVICE_PASSWORD=$ADMIN_PASSWORD DEST=/opt/stack DATA_DIR=/opt/stack/data LOGFILE=/opt/stack/devstacklog.txt # Enabling manila services LIBS_FROM_GIT=python-manilaclient enable_plugin manila https://opendev.org/openstack/manila enable_plugin manila-ui https://opendev.org/openstack/manila-ui enable_plugin manila-tempest-plugin https://opendev.org/openstack/manila-tempest-plugin # LVM Backend config options MANILA_SERVICE_IMAGE_ENABLED=False SHARE_DRIVER=manila.share.drivers.lvm.LVMShareDriver MANILA_ENABLED_BACKENDS=chicago,denver MANILA_OPTGROUP_chicago_driver_handles_share_servers=False MANILA_OPTGROUP_denver_driver_handles_share_servers=False SHARE_BACKING_FILE_SIZE=32000M MANILA_DEFAULT_SHARE_TYPE_EXTRA_SPECS='snapshot_support=True create_share_from_snapshot_support=True revert_to_snapshot_support=True mount_snapshot_support=True' MANILA_CONFIGURE_DEFAULT_TYPES=True # Required for mounting shares MANILA_ALLOW_NAS_SERVER_PORTS_ON_HOST=True ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/contributor/samples/zfsonlinux_local.conf0000664000175000017500000000252000000000000025400 0ustar00zuulzuul00000000000000###################################################################### # This local.conf sets up Devstack with manila enabling the ZFSOnLinux # driver which operates in driver_handles_share_services=False # mode ####################################################################### [[local|localrc]] ADMIN_PASSWORD=secret DATABASE_PASSWORD=$ADMIN_PASSWORD RABBIT_PASSWORD=$ADMIN_PASSWORD SERVICE_PASSWORD=$ADMIN_PASSWORD DEST=/opt/stack DATA_DIR=/opt/stack/data LOGFILE=/opt/stack/devstacklog.txt # Enabling manila services LIBS_FROM_GIT=python-manilaclient enable_plugin manila https://opendev.org/openstack/manila enable_plugin manila-ui https://opendev.org/openstack/manila-ui enable_plugin manila-tempest-plugin https://opendev.org/openstack/manila-tempest-plugin # ZfsOnLinux Back end config options MANILA_SERVICE_IMAGE_ENABLED=False SHARE_DRIVER=manila.share.drivers.zfsonlinux.driver.ZFSonLinuxShareDriver MANILA_ENABLED_BACKENDS=bangalore,mumbai MANILA_OPTGROUP_bangalore_driver_handles_share_servers=False MANILA_OPTGROUP_mumbai_driver_handles_share_servers=False MANILA_REPLICA_STATE_UPDATE_INTERVAL=60 MANILA_DEFAULT_SHARE_TYPE_EXTRA_SPECS='snapshot_support=True create_share_from_snapshot_support=True replication_type=readable' MANILA_CONFIGURE_DEFAULT_TYPES=True # Required for mounting shares MANILA_ALLOW_NAS_SERVER_PORTS_ON_HOST=True ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/contributor/scheduler.rst0000664000175000017500000001371100000000000022210 0ustar00zuulzuul00000000000000.. Copyright 2010-2011 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Scheduler ========= The :mod:`manila.scheduler.manager` Module ------------------------------------------ .. automodule:: manila.scheduler.manager :noindex: :members: :undoc-members: :show-inheritance: The :mod:`manila.scheduler.base_handler` Module ----------------------------------------------- .. automodule:: manila.scheduler.base_handler :noindex: :members: :undoc-members: :show-inheritance: The :mod:`manila.scheduler.host_manager` Module ----------------------------------------------- .. automodule:: manila.scheduler.host_manager :noindex: :members: :undoc-members: :show-inheritance: The :mod:`manila.scheduler.rpcapi` Module ----------------------------------------- .. automodule:: manila.scheduler.rpcapi :noindex: :members: :undoc-members: :show-inheritance: The :mod:`manila.scheduler.scheduler_options` Module ---------------------------------------------------- .. automodule:: manila.scheduler.scheduler_options :noindex: :members: :undoc-members: :show-inheritance: The :mod:`manila.scheduler.drivers.filter` Module ------------------------------------------------- .. automodule:: manila.scheduler.drivers.filter :noindex: :members: :undoc-members: :show-inheritance: The :mod:`manila.scheduler.drivers.base` Module ----------------------------------------------- .. automodule:: manila.scheduler.drivers.base :noindex: :members: :undoc-members: :show-inheritance: The :mod:`manila.scheduler.drivers.chance` Module ------------------------------------------------- .. automodule:: manila.scheduler.drivers.chance :noindex: :members: :undoc-members: :show-inheritance: The :mod:`manila.scheduler.drivers.simple` Module ------------------------------------------------- .. automodule:: manila.scheduler.drivers.simple :noindex: :members: :undoc-members: :show-inheritance: Scheduler Filters ================= The :mod:`manila.scheduler.filters.availability_zone` Filter ------------------------------------------------------------ .. automodule:: manila.scheduler.filters.availability_zone :noindex: :members: :undoc-members: :show-inheritance: The :mod:`manila.scheduler.filters.base` Filter ----------------------------------------------- .. automodule:: manila.scheduler.filters.base :noindex: :members: :undoc-members: :show-inheritance: The :mod:`manila.scheduler.filters.base_host` Filter ---------------------------------------------------- .. automodule:: manila.scheduler.filters.base_host :noindex: :members: :undoc-members: :show-inheritance: The :mod:`manila.scheduler.filters.capabilities` Filter ------------------------------------------------------- .. automodule:: manila.scheduler.filters.capabilities :noindex: :members: :undoc-members: :show-inheritance: The :mod:`manila.scheduler.filters.capacity` Filter --------------------------------------------------- .. automodule:: manila.scheduler.filters.capacity :noindex: :members: :undoc-members: :show-inheritance: The :mod:`manila.scheduler.filters.extra_specs_ops` Filter ---------------------------------------------------------- .. automodule:: manila.scheduler.filters.extra_specs_ops :noindex: :members: :undoc-members: :show-inheritance: The :mod:`manila.scheduler.filters.ignore_attempted_hosts` Filter ----------------------------------------------------------------- .. automodule:: manila.scheduler.filters.ignore_attempted_hosts :noindex: :members: :undoc-members: :show-inheritance: The :mod:`manila.scheduler.filters.json` Filter ----------------------------------------------- .. automodule:: manila.scheduler.filters.json :noindex: :members: :undoc-members: :show-inheritance: The :mod:`manila.scheduler.filters.retry` Filter ------------------------------------------------ .. automodule:: manila.scheduler.filters.retry :noindex: :members: :undoc-members: :show-inheritance: The :mod:`manila.scheduler.filters.share_replication` Filter ------------------------------------------------------------ .. automodule:: manila.scheduler.filters.share_replication :noindex: :members: :undoc-members: :show-inheritance: Scheduler Weighers ================== The :mod:`manila.scheduler.weighers.base` Weigher ------------------------------------------------- .. automodule:: manila.scheduler.weighers.base :noindex: :members: :undoc-members: :show-inheritance: The :mod:`manila.scheduler.weighers.base_host` Weigher ------------------------------------------------------ .. automodule:: manila.scheduler.weighers.base_host :noindex: :members: :undoc-members: :show-inheritance: The :mod:`manila.scheduler.weighers.capacity` Weigher ----------------------------------------------------- .. automodule:: manila.scheduler.weighers.capacity :noindex: :members: :undoc-members: :show-inheritance: The :mod:`manila.scheduler.weighers.pool` Weigher ------------------------------------------------- .. automodule:: manila.scheduler.weighers.pool :noindex: :members: :undoc-members: :show-inheritance: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/contributor/services.rst0000664000175000017500000000457100000000000022061 0ustar00zuulzuul00000000000000.. Copyright 2010-2011 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. .. _service_manager_driver: Services, Managers and Drivers ============================== The responsibilities of Services, Managers, and Drivers, can be a bit confusing to people that are new to manila. This document attempts to outline the division of responsibilities to make understanding the system a little bit easier. Currently, Managers and Drivers are specified by flags and loaded using utils.load_object(). This method allows for them to be implemented as singletons, classes, modules or objects. As long as the path specified by the flag leads to an object (or a callable that returns an object) that responds to getattr, it should work as a manager or driver. The :mod:`manila.service` Module -------------------------------- .. automodule:: manila.service :noindex: :members: :undoc-members: :show-inheritance: The :mod:`manila.manager` Module -------------------------------- .. automodule:: manila.manager :noindex: :members: :undoc-members: :show-inheritance: Implementation-Specific Drivers ------------------------------- A manager will generally load a driver for some of its tasks. The driver is responsible for specific implementation details. Anything running shell commands on a host, or dealing with other non-python code should probably be happening in a driver. Drivers should minimize touching the database, although it is currently acceptable for implementation specific data. This may be reconsidered at some point. It usually makes sense to define an Abstract Base Class for the specific driver (i.e. VolumeDriver), to define the methods that a different driver would need to implement. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/contributor/share.rst0000664000175000017500000000226400000000000021335 0ustar00zuulzuul00000000000000.. Copyright 2010-2011 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Shared Filesystems ================== The :mod:`manila.share.manager` Module -------------------------------------- .. automodule:: manila.share.manager :noindex: :members: :undoc-members: :show-inheritance: The :mod:`manila.share.driver` Module ------------------------------------- .. automodule:: manila.share.driver :noindex: :members: :undoc-members: :show-inheritance: :exclude-members: FakeAOEDriver ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/contributor/share_hooks.rst0000664000175000017500000000654100000000000022542 0ustar00zuulzuul00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Manila share driver hooks ========================= Manila share driver hooks are designed to provide additional possibilities for each :term:`manila-share` service; such as any kind of notification and additional actions before and after share driver calls. Possibilities ------------- - Perform actions before some share driver method calls. - Perform actions after some share driver method calls with results of driver call and preceding hook call. - Call additional 'periodic' hook each 'N' ticks. - Possibility to update results of driver's action by post-running hook. Features -------- - Errors in hook execution can be suppressed. - Any hook can be disabled. - Any amount of hook instances can be run at once for each manila-share service. Limitations ----------- - Hooks approach is not asynchronous. That is, if we run hooks, and especially, more than one hook instance, then all of them will be executed in one thread. Implementation in share drivers ------------------------------- Share drivers can [re]define method `get_periodic_hook_data` that runs with each execution of 'periodic' hook and receives list of shares (as parameter) with existing access rules. So, each share driver, for each of its shares can add/update some information that will be used then in the periodic hook. What is required for writing new 'hook' implementation? ------------------------------------------------------- All implementations of 'hook' interface are expected to be in 'manila/share/hooks'. Each implementation should inherit class 'manila.share.hook:HookBase' and redefine its abstract methods. How to use 'hook' implementations? ---------------------------------- Just set config option 'hook_drivers' in driver's config group. For example:: [MY_DRIVER] hook_drivers=path.to:FooClass,path.to:BarClass Then all classes defined above will be initialized. In the same config group, any config option of hook modules can be redefined too. .. note:: More info about common config options for hooks can be found in module `manila.share.hook` Driver methods that are wrapped with hooks ------------------------------------------ - allow_access - create_share_instance - create_snapshot - delete_share_instance - delete_share_server - delete_snapshot - deny_access - extend_share - init_host - manage_share - publish_service_capabilities - shrink_share - unmanage_share - create_share_replica - promote_share_replica - delete_share_replica - update_share_replica - create_replicated_snapshot - delete_replicated_snapshot - update_replicated_snapshot Above list with wrapped methods can be extended in future. The :mod:`manila.share.hook.py` Module ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: manila.share.hook :noindex: :members: :undoc-members: :show-inheritance: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/contributor/share_migration.rst0000664000175000017500000004115700000000000023412 0ustar00zuulzuul00000000000000.. Copyright (c) 2016 Hitachi Data Systems Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. =============== Share Migration =============== As of the Ocata release of OpenStack, :term:`manila` supports migration of shares across different pools through an experimental API. Since it was first introduced, several enhancements have been made through the subsequent releases while still in experimental state. This developer document reflects the latest version of the experimental Share Migration API. Feature definition ~~~~~~~~~~~~~~~~~~ The Share Migration API is an administrator-only experimental API that allows the invoker to select a destination pool to migrate a share to, while still allowing clients to access the source share instance during the migration. Migration of data is generally expected to be disruptive for users accessing the source, because at some point it will cease to exist. For this reason, the share migration feature is implemented in a 2-phase approach, for the purpose of controlling the timing of that expected disruption of migrating shares. The first phase of migration is when operations that take the longest are performed, such as data copying or replication. After the first pass of data copying is complete, it is up to the administrator to trigger the second phase, often referred to as switchover phase, which may perform operations such as a last sync and deleting the source share instance. During the data copy phase, users remain connected to the source, and may have to reconnect after the switchover phase. In order to migrate a share, manila may employ one of two mechanisms which provide different capabilities and affect how the disruption occurs with regards to user access during data copy phase and disconnection during switchover phase. Those two mechanisms are: **driver-assisted migration** This mechanism uses the underlying driver running in the manila-share service node to coordinate the migration. The migration itself is performed directly on the storage. In order for this mechanism to be used, it requires the driver to implement this functionality, while also requiring that the driver which manages the destination pool is compatible with driver-assisted migration. Typically, drivers would be able to assist migration of shares within storage systems from the same vendor. It is likely that this will be the most efficient and reliable mechanism to migrate a given share, as the storage back end may be able to migrate the share while remaining writable, preserving all file system metadata, snapshots, and possibly perform this operation non-disruptively. When this mechanism cannot be used, the host-assisted migration will be attempted. **host-assisted migration** This mechanism uses the Data Service (manila-data) to copy the source share's data to a new destination share created in the given destination pool. For this mechanism to work, it is required that the Data Service is properly configured in the cloud environment and the migration operation for the source share's protocol and access rule type combination is supported by the Data Service. This is the most suited mechanism to migrate shares when the two pools are from different storage vendors. Given that this mechanism is a rough copy of files and the back ends are unaware that their share contents are being copied over, the optimizations found in the driver-assisted migration are not present here, thus the source share remains read-only, snapshots cannot be transferred, some file system metadata such as permissions and ownership may be lost, and users are expected to be disconnected by the end of migration. Note that during a share migration, access rules cannot be added or removed. As of Ocata release, this feature allows several concurrent migrations (driver-assisted or host-assisted) to be performed, having a best-effort type of scalability. API description ~~~~~~~~~~~~~~~ The migration of a share is started by invoking the ``migration_start`` API. The parameters are: **share** The share to be migrated. This parameter is mandatory. **destination** The destination pool in ``host@backend#pool`` representation. This parameter is mandatory. **force_host_assisted_migration** Forces the host-assisted mechanism to be used, thus using the Data Service to copy data across back ends. This parameter value defaults to `False`. When set to `True`, it skips the driver-assisted approach which would otherwise be attempted first. This parameter is optional. **preserve_metadata** Specifies whether migration should enforce the preservation of all file system metadata. When this behavior is expected (i.e, this parameter is set to `True`) and drivers are not capable of ensuring preservation of file system metadata, migration will result in an error status. As of Ocata release, host-assisted migration cannot provide any guarantees of preserving file system metadata. This parameter is mandatory. **preserve_snapshots** Specifies whether migration should enforce the preservation of all existing snapshots at the destination. In other words, the existing snapshots must be migrated along with the share data. When this behavior is expected (i.e, this parameter is set to `True`) and drivers are not capable of migrating the snapshots, migration will result in an error status. As of Ocata release, host-assisted migration cannot provide this capability. This parameter is mandatory. **nondisruptive** Specifies whether migration should only be performed without disrupting clients during migration. For such, it is also expected that the export location does not change. When this behavior is expected (i.e, this parameter is set to `True`) and drivers are not capable of allowing the share to remain accessible through the two phases of the migration, migration will result in an error status. As of Ocata release, host-assisted migration cannot provide this capability. This parameter is mandatory. **writable** Specifies whether migration should only be performed if the share can remain writable. When this behavior is expected (i.e, this parameter is set to `True`) and drivers are not capable of allowing the share to remain writable, migration will result in an error status. If drivers are not capable of performing a nondisruptive migration, manila will ensure that the share will remain writable through the data copy phase of migration. However, during the switchover phase the share will be re-exported at the destination, causing the share to be rendered inaccessible for the duration of this phase. As of Ocata release, host-assisted migration cannot provide this capability. This parameter is mandatory. **new_share_type** If willing to retype the share so it can be allocated in the desired destination pool, the invoker may supply a new share type to be used. This is often suited when the share is to be migrated to a pool which operates in the opposite driver mode. This parameter is optional. **new_share_network** If willing to change the share's share-network so it can be allocated in the desired destination pool, the invoker may supply a new share network to be used. This is often suited when the share is to be migrated to a pool which operates in a different availability zone or managed by a driver that handles share servers. This parameter is optional. After started, a migration may be cancelled through the ``migration_cancel`` API, have its status obtained through the ``migration_get_progress`` API, and completed through the ``migration_complete`` API after reaching a certain state (see ``Workflows`` section below). Workflows ~~~~~~~~~ Upon invoking ``migration_start``, several validations will be performed by the API layer, such as: * If supplied API parameters are valid. * If the share does not have replicas. * If the share is not member of a share group. * If the access rules of the given share are not in error status. * If the driver-assisted parameters specified do not conflict with `force_host_assisted_migration` parameter. * If `force_host_assisted_migration` parameter is set to True while snapshots do not exist. * If share status is `available` and is not busy with other tasks. * If the destination pool chosen to migrate the share to exists and is running. * If share service or Data Service responsible for performing the migration exists and is running. * If the combination of share network and share type resulting is compatible with regards to driver modes. If any of the above validations fail, the API will return an error. Otherwise, the `task_state` field value will transition to `migration_starting` and the share's status will transition to `migrating`. Past this point, all validations, state transitions and errors will not produce any notifications to the user. Instead, the given share's `task_state` field value will transition to `migration_error`. Following API validation, the scheduler will validate if the supplied destination is compatible with the desired share type according to the pool's capabilities. If this validation fails, the `task_state` field value will transition to `migration_error`. The scheduler then invokes the source share pool's manager to proceed with the migration, transitioning the `task_state` field value to `migration_in_progress`. If `force-host-assisted-migration` API parameter is not set, then a driver-assisted migration will be attempted first. Note that whichever mechanism is employed, there will be a new share instance created in the database, referred to as the "destination instance", with a status field value `migrating_to`. This share instance will not have its export location displayed during migration and will prevail instead of the original instance database entry when migration is complete. Driver-assisted migration data copy phase ----------------------------------------- A share server will be created as needed at the destination pool. Then, the share server details are provided to the driver to report the set of migration capabilities for this destination. If the API parameters `writable`, `nondisruptive`, `preserve_metadata` and `preserve_snapshots` are satisfied by the reported migration capabilities, the `task_state` field value transitions to `migration_driver_starting` and the driver is invoked to start the migration. The driver's migration_start method should start a job in the storage back end and return, allowing the `task_state` field value to transition to `migration_driver_in_progress`. If any of the API parameters described previously are not satisfied, or the driver raises an exception in `migration_start`, the driver-assisted migration ends setting the `task_state` field value to `migration_error`, all allocated resources will be cleaned up and migration will proceed to the host-assisted migration mechanism. Once the `migration_start` driver method succeeds, a periodic task that checks for shares with `task_state` field value `migration_driver_in_progress` will invoke the driver's `migration_continue` method, responsible for executing the next steps of migration until the data copy phase is completed, transitioning the `task_state` field value to `migration_driver_phase1_done`. If this step fails, the `task_state` field value transitions to `migration_error` and all allocated resources will be cleaned up. Host-assisted migration data copy phase --------------------------------------- A new share will be created at the destination pool and the source share's access rules will be changed to read-only. The `task_state` field value transitions to `data_copying_starting` and the Data Service is then invoked to mount both shares and copy data from the source to the destination. In order for the Data Service to mount the shares, it will ask the storage driver to allow access to the node where the Data Service is running. It will then attempt to mount the shares via their respective administrator-only export locations that are served in the administrator network when available, otherwise the regular export locations will be used. In order for the access and mount procedures to succeed, the administrator-only export location must be reachable from the Data Service and the access parameter properly configured in the Data Service configuration file. For instance, a NFS share should require an IP configuration, whereas a CIFS share should require a username credential. Those parameters should be previously set in the Data Service configuration file by the administrator. The data copy routine runs commands as root user for the purpose of setting the correct file metadata to the newly created files at the destination share. It can optionally verify the integrity of all files copied through a configuration parameter. Once copy is completed, the shares are unmounted, their access from the Data Service are removed and the `task_state` field value transitions to `data_copying_completed`, allowing the switchover phase to be invoked. Share migration switchover phase -------------------------------- When invoked, the `task_state` field value transitions to `migration_completing`. Whichever migration mechanism is used, the source share instance is deleted and the access rules are applied to the destination share instance. In the driver-assisted migration, the driver is first invoked to perform a final sync. The last step is to update the share model's optional capability fields, such as `create_share_from_snapshot_support`, `revert_to_snapshot_support` and `mount_snapshot_support`, according to the `new_share_type`, if it had been specified when the migration was initiated. At last, the `task_state` field value transitions to `migration_success`. If the `nondisruptive` driver-assisted capability is not supported or the host-assisted migration mechanism is used, the export location will change and clients will need to remount the share. Driver interfaces ~~~~~~~~~~~~~~~~~ All drivers that implement the driver-assisted migration mechanism should be able to perform all required steps from the source share instance back end within the implementation of the interfaces listed in the section below. Those steps include: * Validating compatibility and connectivity between the source and destination back end; * Start the migration job in the storage back end. Return after the job request has been submitted; * Subsequent invocations to the driver to monitor the job status, cancel it and obtain its progress in percentage value; * Complete migration by performing a last sync if necessary and delete the original share from the source back end. For host-assisted migration, drivers may override some methods defined in the base class in case it is necessary to support it. Additional notes ~~~~~~~~~~~~~~~~ * In case of an error in the storage back end during the execution of the migration job, the driver should raise an exception within the ``migration_continue`` method. * If the manila-share service is restarted during a migration, in case it is a driver-assisted migration, the driver's ``migration_continue`` will be invoked continuously with an interval configured in the share manager service (``migration_driver_continue_interval``). The invocation will stop when the driver finishes the data copy phase. In case of host-assisted migration, the migration job is disrupted only if the manila-data service is restarted. In such event, the migration has to be restarted from the beginning. * To be compatible with host-assisted migration, drivers must also support the ``update_access`` interface, along with its `recovery mode` mechanism. Share Migration driver-assisted interfaces: ------------------------------------------- .. autoclass:: manila.share.driver.ShareDriver :noindex: :members: migration_check_compatibility, migration_start, migration_continue, migration_complete, migration_cancel, migration_get_progress Share Migration host-assisted interfaces: ----------------------------------------- .. autoclass:: manila.share.driver.ShareDriver :noindex: :members: connection_get_info ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/contributor/share_replication.rst0000664000175000017500000003331100000000000023723 0ustar00zuulzuul00000000000000.. Copyright (c) 2016 Goutham Pacha Ravi Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ================= Share Replication ================= As of the Mitaka release of OpenStack, :term:`manila` supports replication of shares between different pools for drivers that operate with ``driver_handles_share_servers=False`` mode. These pools may be on different backends or within the same backend. This feature can be used as a disaster recovery solution or as a load sharing mirroring solution depending upon the replication style chosen, the capability of the driver and the configuration of backends. This feature assumes and relies on the fact that share drivers will be responsible for communicating with ALL storage controllers necessary to achieve any replication tasks, even if that involves sending commands to other storage controllers in other Availability Zones (or AZs). End users would be able to create and manage their replicas, alongside their shares and snapshots. Storage availability zones and replication domains ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Replication is supported within the same availability zone, but in an ideal solution, an Availability Zone should be perceived as a single failure domain. So this feature provides the most value in an inter-AZ replication use case. The ``replication_domain`` option is a backend specific StrOpt option to be used within ``manila.conf``. The value can be any ASCII string. Two backends that can replicate between each other would have the same ``replication_domain``. This comes from the premise that manila expects Share Replication to be performed between backends that have similar characteristics. When scheduling new replicas, the scheduler takes into account the ``replication_domain`` option to match similar backends. It also ensures that only one replica can be scheduled per pool. When backends report multiple pools, manila would allow for replication between two pools on the same backend. The ``replication_domain`` option is meant to be used in conjunction with the ``storage_availability_zone`` (or back end specific ``backend_availability_zone``) option to utilize this solution for Data Protection/Disaster Recovery. Replication types ~~~~~~~~~~~~~~~~~ When creating a share that is meant to have replicas in the future, the user will use a ``share_type`` with an extra_spec, :term:`replication_type` set to a valid replication type that manila supports. Drivers must report the replication type that they support as the :term:`replication_type` capability during the ``_update_share_stats()`` call. Three types of replication are currently supported: **writable** Synchronously replicated shares where all replicas are writable. Promotion is not supported and not needed. **readable** Mirror-style replication with a primary (writable) copy and one or more secondary (read-only) copies which can become writable after a promotion. **dr (for Disaster Recovery)** Generalized replication with secondary copies that are inaccessible until they are promoted to become the ``active`` replica. .. note:: The term :term:`active` replica refers to the ``primary`` share. In :term:`writable` style of replication, all replicas are :term:`active`, and there could be no distinction of a ``primary`` share. In :term:`readable` and :term:`dr` styles of replication, a ``secondary`` replica may be referred to as ``passive``, ``non-active`` or simply ``replica``. Health of a share replica ~~~~~~~~~~~~~~~~~~~~~~~~~ Apart from the ``status`` attribute, share replicas have the :term:`replica_state` attribute to denote the state of the replica. The ``primary`` replica will have it's :term:`replica_state` attribute set to :term:`active`. A ``secondary`` replica may have one of the following values as its :term:`replica_state`: **in_sync** The replica is up to date with the active replica (possibly within a backend specific :term:`recovery point objective`). **out_of_sync** The replica has gone out of date (all new replicas start out in this :term:`replica_state`). **error** When the scheduler failed to schedule this replica or some potentially irrecoverable damage occurred with regard to updating data for this replica. Manila requests periodic update of the :term:`replica_state` of all non-active replicas. The update occurs with respect to an interval defined through the ``replica_state_update_interval`` option in ``manila.conf``. Administrators have an option of initiating a ``resync`` of a secondary replica (for :term:`readable` and :term:`dr` types of replication). This could be performed before a planned failover operation in order to have the most up-to-date data on the replica. Promotion ~~~~~~~~~ For :term:`readable` and :term:`dr` styles, we refer to the task of switching a ``non-active`` replica with the :term:`active` replica as `promotion`. For the :term:`writable` style of replication, promotion does not make sense since all replicas are :term:`active` (or writable) at all given points of time. The ``status`` attribute of the non-active replica being promoted will be set to :term:`replication_change` during its promotion. This has been classified as a ``busy`` state and hence API interactions with the share are restricted while one of its replicas is in this state. Promotion of replicas with :term:`replica_state` set to ``error`` may not be fully supported by the backend. However, manila allows the action as an administrator feature and such an attempt may be honored by backends if possible. When multiple replicas exist, multiple replication relationships between shares may need to be redefined at the backend during the promotion operation. If the driver fails at this stage, the replicas may be left in an inconsistent state. The share manager will set all replicas to have the ``status`` attribute set to ``error``. Recovery from this state would require administrator intervention. Snapshots ~~~~~~~~~ If the driver supports snapshots, the replication of a snapshot is expected to be initiated simultaneously with the creation of the snapshot on the :term:`active` replica. Manila tracks snapshots across replicas as separate snapshot instances. The aggregate snapshot object itself will be in ``creating`` state until it is ``available`` across all of the share's replicas that have their :term:`replica_state` attribute set to :term:`active` or ``in_sync``. Therefore, for a driver that supports snapshots, the definition of being ``in_sync`` with the primary is not only that data is ensured (within the :term:`recovery point objective`), but also that any 'available' snapshots on the primary are ensured on the replica as well. If the snapshots cannot be ensured, the :term:`replica_state` *must* be reported to manila as being ``out_of_sync`` until the snapshots have been replicated. When a snapshot instance has its ``status`` attribute set to ``creating`` or ``deleting``, manila will poll the respective drivers for a status update. As described earlier, the parent snapshot itself will be ``available`` only when its instances across the :term:`active` and ``in_sync`` replicas of the share are ``available``. The polling interval will be the same as ``replica_state_update_interval``. Access Rules ~~~~~~~~~~~~ Access rules are not meant to be different across the replicas of the share. Manila expects drivers to handle these access rules effectively depending on the style of replication supported. For example, the :term:`dr` style of replication does mean that the non-active replicas are inaccessible, so if read-write rules are expected, then the rules should be applied on the :term:`active` replica only. Similarly, drivers that support :term:`readable` replication type should apply any read-write rules as read-only for the non-active replicas. Drivers will receive all the access rules in ``create_replica``, ``delete_replica`` and ``update_replica_state`` calls and have ample opportunity to reconcile these rules effectively across replicas. Understanding Replication Workflows ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Creating a share that supports replication ------------------------------------------ Administrators can create a share type with extra-spec :term:`replication_type`, matching the style of replication the desired backend supports. Users can use the share type to create a new share that allows/supports replication. A replicated share always starts out with one replica, the ``primary`` share itself. The :term:`manila-scheduler` service will filter and weigh available pools to find a suitable pool for the share being created. In particular, * The ``CapabilityFilter`` will match the :term:`replication_type` extra_spec in the request share_type with the ``replication_type`` capability reported by a pool. * The ``ShareReplicationFilter`` will further ensure that the pool has a non-empty ``replication_domain`` capability being reported as well. * The ``AvailabilityZoneFilter`` will ensure that the availability_zone requested matches with the pool's availability zone. Creating a replica ------------------ The user has to specify the share name/id of the share that is supposed to be replicated and optionally an availability zone for the replica to exist in. The replica inherits the parent share's share_type and associated extra_specs. Scheduling of the replica is similar to that of the share. * The `ShareReplicationFilter` will ensure that the pool is within the same ``replication_domain`` as the :term:`active` replica and also ensures that the pool does not already have a replica for that share. Drivers supporting :term:`writable` style **must** set the :term:`replica_state` attribute to :term:`active` when the replica has been created and is ``available``. Deleting a replica ------------------ Users can remove replicas that have their `status` attribute set to ``error``, ``in_sync`` or ``out_of_sync``. They could even delete an :term:`active` replica as long as there is another :term:`active` replica (as could be the case with `writable` replication style). Before the ``delete_replica`` call is made to the driver, an update_access call is made to ensure access rules are safely removed for the replica. Administrators may also ``force-delete`` replicas. Any driver exceptions will only be logged and not re-raised; the replica will be purged from manila's database. Promoting a replica ------------------- Users can promote replicas that have their :term:`replica_state` attribute set to ``in_sync``. Administrators can attempt to promote replicas that have their :term:`replica_state` attribute set to ``out_of_sync`` or ``error``. During a promotion, if the driver raises an exception, all replicas will have their `status` attribute set to `error` and recovery from this state will require administrator intervention. Resyncing a replica ------------------- Prior to a planned failover, an administrator could attempt to update the data on the replica. The ``update_replica_state`` call will be made during such an action, giving drivers an opportunity to push the latest updates from the `active` replica to the secondaries. Creating a snapshot ------------------- When a user takes a snapshot of a share that has replicas, manila creates as many snapshot instances as there are share replicas. These snapshot instances all begin with their `status` attribute set to `creating`. The driver is expected to create the snapshot of the ``active`` replica and then begin to replicate this snapshot as soon as the :term:`active` replica's snapshot instance is created and becomes ``available``. Deleting a snapshot ------------------- When a user deletes a snapshot, the snapshot instances corresponding to each replica of the share have their ``status`` attribute set to ``deleting``. Drivers must update their secondaries as soon as the :term:`active` replica's snapshot instance is deleted. Driver Interfaces ~~~~~~~~~~~~~~~~~ As part of the ``_update_share_stats()`` call, the base driver reports the ``replication_domain`` capability. Drivers are expected to update the :term:`replication_type` capability. Drivers must implement the methods enumerated below in order to support replication. ``promote_replica``, ``update_replica_state`` and ``update_replicated_snapshot`` need not be implemented by drivers that support the :term:`writable` style of replication. The snapshot methods ``create_replicated_snapshot``, ``delete_replicated_snapshot`` and ``update_replicated_snapshot`` need not be implemented by a driver that does not support snapshots. Each driver request is made on a specific host. Create/delete operations on secondary replicas are always made on the destination host. Create/delete operations on snapshots are always made on the :term:`active` replica's host. ``update_replica_state`` and ``update_replicated_snapshot`` calls are made on the host that the replica or snapshot resides on. Share Replica interfaces: ------------------------- .. autoclass:: manila.share.driver.ShareDriver :members: create_replica, delete_replica, promote_replica, update_replica_state Replicated Snapshot interfaces: ------------------------------- .. autoclass:: manila.share.driver.ShareDriver :noindex: :members: create_replicated_snapshot, delete_replicated_snapshot, update_replicated_snapshot ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/contributor/share_server_migration.rst0000664000175000017500000002556200000000000025002 0ustar00zuulzuul00000000000000====================== Share Server Migration ====================== As of the Victoria release of OpenStack, Manila supports migration of share servers across different pools through an experimental API. This developer document reflects the latest version of the experimental Share Server Migration API. Feature definition ~~~~~~~~~~~~~~~~~~ The Share Server Migration API is an administrator-only API that allows the invoker to select a destination backend to migrate a share server to, while still allowing clients to access the source share server resources during the migration. Migration of data is expected to be disruptive for users accessing the source, because at some point it will cease to exist. For this reason, the share server migration feature is implemented in a 2-phase approach, for the purpose of controlling the timing of that expected disruption of migrating share servers. The first phase of the migration is when operations that take the longest are performed, such as data copying or replication. After the first phase of data copying is complete, it is up to the administrator to trigger the second phase, often referred to as switchover phase, which may perform operations such as a last sync and changing the source share server to inactive. During the data copy phase, users remain connected to the source, and may have to reconnect after the switchover phase. Share server migration only supports driver-assisted migration. This mechanism uses the underlying driver running in the manila-share service node to coordinate the migration. The migration is performed directly in the storage. In order to use this mechanism, the driver should implement this functionality. Also, the driver managing the destination back end should support driver-assisted migration. Typically, drivers would be able to assist migration of share servers within storage systems from the same vendor. It is likely that this will be the most efficient and reliable mechanism to migrate a given share server, as the storage back end may be able to migrate the share server while remaining writable, snapshots, and possibly perform this operation non-disruptively. Note that during a share server migration, access rules cannot be added or removed. Also, it is not possible to modify existent access rules for shares and share snapshots created upon the share server being migrated. API description ~~~~~~~~~~~~~~~ The migration of a share server is started by invoking the ``migration_start`` API. The parameters are: **share_server_id** The share server to be migrated. This parameter is mandatory. **destination** The destination backend in ``host@backend`` representation. This parameter is mandatory. **preserve_snapshots** Specifies whether migration should enforce the preservation of all existing snapshots at the destination. In other words, the existing snapshots must be migrated along with the share server data. When this behavior is expected (i.e, this parameter is set to `True`) and drivers are not capable of migrating the snapshots, migration will result in an error status. This parameter is mandatory. **nondisruptive** Specifies whether the migration should only be performed without disrupting clients during migration. For such, it is also expected that the export location does not change. When this behavior is expected (i.e, this parameter is set to `True`) and drivers are not capable of allowing the share server shares to remain accessible through the two phases of the migration, migration will result in an error status. This parameter is mandatory. **writable** Specifies whether migration should only be performed if the share server shares can remain writable. When this behavior is expected (i.e, this parameter is set to `True`) and drivers are not capable of allowing the share server shares to remain writable, migration will result in an error status. If drivers are not capable of performing a nondisruptive migration, manila will ensure that the share server shares will remain writable through the data copy phase of migration. However, during the switchover phase the shares will be re-exported at the destination, causing the share to be rendered inaccessible for the duration of this phase. This parameter is mandatory. **new_share_network_id** If willing to change the share server's share-network so it can be allocated in the desired destination backend, the invoker may supply a new share network to be used. This is often suited when the share server is to be migrated to a backend which operates in a different availability zone or managed by a driver that handles share servers. This parameter is optional. After started, a migration may be cancelled through the ``migration_cancel`` API, have its status obtained through the ``migration_get_progress`` API, and completed through the ``migration_complete`` API after reaching a certain state (see ``Workflows`` section below). Workflows ~~~~~~~~~ Upon invoking ``migration_start``, several validations will be performed by the API layer, such as: * If supplied API parameters are valid. * If share server status is `active`. * If there are share groups related to the share server. * If a new share network id was provided and is compatible with the destination. * If a new host and share network id were provided and they're different from the source share server. * If the share server to be migrated serves as destination to another share server. * If all the availability zones match with all shares' share types within the share server. * If the share server's shares do not have replicas. * If the share server's shares are not member of a share group. * If the access rules of the given share server's shares are not in error status. * If the snapshots of all share server shares are in `available` state. * If the destination backend chosen to migrate the share server to exists, as well as it and its share service are running. If any of the above validations fail, the API will return an error. Otherwise, the `task_state` field value will transition to `migration_starting` and the share server's status will transition to `server_migrating`. Past this point, all validations, state transitions and errors will not produce any notifications to the user. Instead, the given share server's `task_state` field value will transition to `migration_error`. Right after the API validations, a driver call will be performed in the destination backend in order to validate if the destination host is compatible within the requested operation. The driver will then determine the compatibility between source and destination hosts for the share server migration. A new share server will be created in the database, referred to as the "destination share server", with a status field value `server_migrating_to`. Share server migration data copy phase -------------------------------------- A share server will be created as needed at the destination backend. Then, the share server details are provided to the driver to report the set of migration capabilities for this destination. If the API parameters `writable`, `nondisruptive`, `preserve_metadata` and `preserve_snapshots` are satisfied by the reported migration capabilities, the `task_state` field value transitions to `migration_driver_starting` and the driver is invoked to start the migration. The driver's ``share_server_migration_start`` method should start a job in the storage back end and return, allowing the `task_state` field value to transition to `migration_driver_in_progress`. If any of the API parameters described previously are not satisfied, or the driver raises an exception in `share_server_migration_start`, the migration ends setting the `task_state` field value to `migration_error`, and the created share server will have its status set to error. Once the ``share_server_migration_start`` driver method succeeds, a periodic task that checks for shares with `task_state` field value `migration_driver_in_progress` will invoke the driver's ``share_server_migration_continue`` method, responsible for executing the next steps of migration until the data copy phase is completed, transitioning the `task_state` field value to `migration_driver_phase1_done`. If this step fails, the `task_state` field value transitions to `migration_error` and all allocated resources will be cleaned up. Share server migration switchover phase --------------------------------------- When invoked, the `task_state` field value transitions to `migration_completing`. In this phase, these operations will happen: * The source share instances are deleted * The source share server will have its status set to inactive * The access rules are applied to the shares of the destination share server * A final sync is also performed. At last, the `task_state` field value transitions to `migration_success`. If the `nondisruptive` capability is not supported, the export locations will change and clients will need to remount the shares. Driver interfaces ~~~~~~~~~~~~~~~~~ All drivers that implement the migration mechanism should be able to perform all required steps from the source share server back end within the implementation of the interfaces listed in the section below. Those steps include: * Validating compatibility and connectivity between the source and destination back end; * Start the migration job in the storage back end. Return after the job request has been submitted; * Subsequent invocations to the driver to monitor the job status. * Complete migration by performing a last sync if necessary and delete the original shares from the source back end. .. note:: The implementation of the ``share_server_migration_cancel`` and ``share_server_migration_get_progress`` operations is not mandatory. If the driver is able to perform such operations, make sure to set ``share_server_migration_cancel`` and ``share_server_migration_get_progress`` equal to ``True`` in the response of the ``share_server_migration_check`` operation. Additional notes ~~~~~~~~~~~~~~~~ * In case of an error in the storage back end during the execution of the migration job, the driver should raise an exception within the ``share_server_migration_continue`` method. * If the manila-share service is restarted during a migration, the driver's ``share_server_migration_continue`` will be invoked periodically with an interval configured in the share manager service (``share_server_migration_driver_continue_interval``). The invocation will stop when the driver finishes the data copy phase. Share Server Migration interfaces: ---------------------------------- .. autoclass:: manila.share.driver.ShareDriver :noindex: :members: share_server_migration_check_compatibility, share_server_migration_start, share_server_migration_continue, share_server_migration_complete, share_server_migration_cancel, share_server_migration_get_progress ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/contributor/tempest_tests.rst0000664000175000017500000001366100000000000023141 0ustar00zuulzuul00000000000000.. _installing-tempest-tests: Tempest Tests ============= Manila's functional API and scenario tests are in the `manila tempest plugin `_ repository. Installation of plugin to tempest --------------------------------- Tempest plugin installation is common for all its plugins and detailed information can be found in its `docs`_. In simple words: if you have installed manila project on the same machine as tempest, then tempest will find it. In case the plugin is not installed (see the verification steps below), you can clone and install it yourself. .. code-block:: console $ git clone https://opendev.org/openstack/manila-tempest-plugin $ pip install -e manila-tempest-plugin .. _docs: https://docs.openstack.org/tempest/latest/plugin.html#using-plugins Verifying installation ---------------------- To verify that the plugin is installed on your system, run the following command and find "manila_tests" in its output. .. code-block:: console $ tempest list-plugins Alternatively, or to double-check, list all the tests available on the system and find manila tests in it. .. code-block:: console $ tempest run -l Configuration of manila-related tests in tempest.conf ----------------------------------------------------- All config options for manila are defined in `manila_tempest_tests/config.py`_ module. They can be set/redefined in ``tempest.conf`` file. Here is a configuration example: .. code-block:: ini [service_available] manila = True [share] # Capabilities capability_storage_protocol = NFS capability_snapshot_support = True capability_create_share_from_snapshot_support = True backend_names = Backendname1,BackendName2 backend_replication_type = readable # Enable/Disable test groups multi_backend = True multitenancy_enabled = True enable_protocols = nfs,cifs,glusterfs,cephfs enable_ip_rules_for_protocols = nfs enable_user_rules_for_protocols = cifs enable_cert_rules_for_protocols = glusterfs enable_cephx_rules_for_protocols = cephfs username_for_user_rules = foouser enable_ro_access_level_for_protocols = nfs run_quota_tests = True run_extend_tests = True run_shrink_tests = True run_snapshot_tests = True run_replication_tests = True run_migration_tests = True run_manage_unmanage_tests = True run_manage_unmanage_snapshot_tests = True .. note:: None of existing share drivers support all features. So, make sure that share backends really support features you enable in config. See the :doc:`../admin/share_back_ends_feature_support_mapping` to see what features are supported by the back end that you are testing. Running tests ------------- To run tests, it is required to install `pip`_, `tox`_ and `virtualenv`_ packages on host machine. Then run following command from tempest root directory: .. code-block:: console $ tempest run -r manila_tempest_tests.tests.api or to run only scenario tests: .. code-block:: console $ tempest run -r manila_tempest_tests.tests.scenario .. _pip: https://pypi.org/project/pip/ .. _tox: https://pypi.org/project/tox/ .. _virtualenv: https://pypi.org/project/virtualenv Running a subset of tests based on test location ------------------------------------------------ Instead of running all tests, you can specify an individual directory, file, class, or method that contains test code. To run the tests in the ``manila_tempest_tests/tests/api/admin`` directory: .. code-block:: console $ tempest run -r manila_tempest_tests.tests.api.admin To run the tests in the ``manila_tempest_tests/tests/api/admin/test_admin_actions.py`` module: .. code-block:: console $ tempest run -r manila_tempest_tests.tests.api.admin.test_admin_actions To run the tests in the `AdminActionsTest` class in ``manila_tempest_tests/tests/api/admin/test_admin_actions.py`` module: .. code-block:: console $ tempest run -r manila_tempest_tests.tests.api.admin.test_admin_actions.AdminActionsTest To run the `AdminActionsTest.test_reset_share_state` test method in ``manila_tempest_tests/tests/api/admin/test_admin_actions.py`` module: .. code-block:: console $ tempest run -r manila_tempest_tests.tests.api.admin.test_admin_actions.AdminActionsTest.test_reset_share_state Running a subset of tests based on service involvement ------------------------------------------------------ To run the tests that require only `manila-api` service running: .. code-block:: console $ tempest run -r \ \(\?\=\.\*\\\[\.\*\\bapi\\b\.\*\\\]\) \ \(\^manila_tempest_tests.tests.api\) To run the tests that require all manila services running, but intended to test API behaviour: .. code-block:: console $ tempest run -r \ \(\?\=\.\*\\\[\.\*\\b\(api\|api_with_backend\)\\b\.\*\\\]\) \ \(\^manila_tempest_tests.tests.api\) To run the tests that require all manila services running, but intended to test back-end (manila-share) behaviour: .. code-block:: console $ tempest run -r \ \(\?\=\.\*\\\[\.\*\\bbackend\\b\.\*\\\]\) \ \(\^manila_tempest_tests.tests.api\) Running a subset of positive or negative tests ---------------------------------------------- To run only positive tests, use following command: .. code-block:: console $ tempest run -r \ \(\?\=\.\*\\\[\.\*\\bpositive\\b\.\*\\\]\) \ \(\^manila_tempest_tests.tests.api\) To run only negative tests, use following command: .. code-block:: console $ tempest run -r \ \(\?\=\.\*\\\[\.\*\\bnegative\\b\.\*\\\]\) \ \(\^manila_tempest_tests.tests.api\) To run only positive API tests, use following command: .. code-block:: console $ tempest run -r \ \(\?\=\.\*\\\[\.\*\\bpositive\\b\.\*\\\]\) \ \(\?\=\.\*\\\[\.\*\\bapi\\b\.\*\\\]\) \ \(\^manila_tempest_tests.tests.api\) .. _manila_tempest_tests/config.py: https://opendev.org/openstack/manila-tempest-plugin/src/branch/master/manila_tempest_tests/config.py ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/contributor/threading.rst0000664000175000017500000000523000000000000022174 0ustar00zuulzuul00000000000000Threading model =============== All OpenStack services use *green thread* model of threading, implemented through using the Python `eventlet `_ and `greenlet `_ libraries. Green threads use a cooperative model of threading: thread context switches can only occur when specific eventlet or greenlet library calls are made (e.g., sleep, certain I/O calls). From the operating system's point of view, each OpenStack service runs in a single thread. The use of green threads reduces the likelihood of race conditions, but does not completely eliminate them. In some cases, you may need to use the ``@utils.synchronized(...)`` decorator to avoid races. In addition, since there is only one operating system thread, a call that blocks that main thread will block the entire process. Yielding the thread in long-running tasks ----------------------------------------- If a code path takes a long time to execute and does not contain any methods that trigger an eventlet context switch, the long-running thread will block any pending threads. This scenario can be avoided by adding calls to the eventlet sleep method in the long-running code path. The sleep call will trigger a context switch if there are pending threads, and using an argument of 0 will avoid introducing delays in the case that there is only a single green thread:: from eventlet import greenthread ... greenthread.sleep(0) In current code, time.sleep(0) does the same thing as greenthread.sleep(0) if time module is patched through eventlet.monkey_patch(). To be explicit, we recommend contributors to use ``greenthread.sleep()`` instead of ``time.sleep()``. MySQL access and eventlet ------------------------- There are some MySQL DB API drivers for oslo.db, like `PyMySQL`_, MySQL-python, etc. PyMySQL is the default MySQL DB API driver for oslo.db, and it works well with eventlet. MySQL-python uses an external C library for accessing the MySQL database. Since eventlet cannot use monkey-patching to intercept blocking calls in a C library, queries to the MySQL database will block the main thread of a service. The Diablo release contained a thread-pooling implementation that did not block, but this implementation resulted in a `bug`_ and was removed. See this `mailing list thread`_ for a discussion of this issue, including a discussion of the `impact on performance`_. .. _bug: https://bugs.launchpad.net/manila/+bug/838581 .. _mailing list thread: https://lists.launchpad.net/openstack/msg08118.html .. _impact on performance: https://lists.launchpad.net/openstack/msg08217.html .. _PyMySQL: https://wiki.openstack.org/wiki/PyMySQL_evaluation ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/contributor/unit_tests.rst0000664000175000017500000000455600000000000022442 0ustar00zuulzuul00000000000000.. _running-unit-tests: Unit Tests ========== Manila contains a suite of unit tests, in the manila/tests directory. Any proposed code change will be automatically rejected by the OpenStack Zuul server if the change causes unit test failures. Running the tests ----------------- To run all unit tests simply run:: tox This will create a virtual environment [#f1]_, load all the packages from test-requirements.txt and run all unit tests as well as run flake8 and hacking checks against the code. You may run individual test targets, for example only unit tests, by running:: tox -e py3 Note that you can inspect the tox.ini file to get more details on the available options and what the test run does by default. Running a subset of tests ------------------------- Instead of running all tests, you can specify an individual directory, file, class, or method that contains test code. To run the tests in the ``manila/tests/scheduler`` directory:: tox -epy3 -- manila.tests.scheduler To run the tests in the `ShareManagerTestCase` class in ``manila/tests/share/test_manager.py``:: tox -epy3 -- manila.tests.share.test_manager.ShareManagerTestCase To run the `ShareManagerTestCase::test_share_manager_instance` test method in ``manila/tests/share/test_manager.py``:: tox -epy3 -- manila.tests.share.test_manager.ShareManagerTestCase.test_share_manager_instance For more information on these options and details about stestr, please see the `stestr documentation `_. Database Setup -------------- Some unit tests will use a local database. You can use ``tools/test-setup.sh`` to set up your local system the same way as it's setup in the CI environment. Gotchas ------- **Running Tests from Shared Folders** If you are running the unit tests from a shared folder, you may see tests start to fail or stop completely as a result of Python lockfile issues [#f2]_. You can get around this by manually setting or updating the following line in ``manila/tests/conf_fixture.py``:: FLAGS['lock_path'].SetDefault('/tmp') Note that you may use any location (not just ``/tmp``!) as long as it is not a shared folder. .. rubric:: Footnotes .. [#f1] See :doc:`development.environment` for more details about the use of virtualenv. .. [#f2] See Vish's comment in this bug report: https://bugs.launchpad.net/manila/+bug/882933 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/contributor/user_messages.rst0000664000175000017500000000365300000000000023103 0ustar00zuulzuul00000000000000User Messages ============= User messages are a way to inform users about the state of asynchronous operations. One example would be notifying the user of why a share provisioning request failed. These messages can be requested via the `/messages` API. All user visible messages must be defined in the permitted messages module in order to prevent sharing sensitive information with users. Example message generation:: from manila import context from manila.message import api as message_api from manila.message import message_field self.message_api = message_api.API() context = context.RequestContext() project_id = '6c430ede-9476-4128-8838-8d3929ced223' share_id = 'f292cc0c-54a7-4b3b-8174-d2ff82d87008' self.message_api.create( context, message_field.Actions.CREATE, project_id, resource_type=message_field.Resource.SHARE, resource_id=SHARE_id, detail=message_field.Detail.NO_VALID_HOST) Will produce the following:: GET /v2/6c430ede-9476-4128-8838-8d3929ced223/messages { "messages": [ { "id": "5429fffa-5c76-4d68-a671-37a8e24f37cf", "action_id": "001", "detail_id": "002", "user_message": "create: No storage could be allocated for this share " "request. Trying again with a different size " "or share type may succeed."", "message_level": "ERROR", "resource_type": "SHARE", "resource_id": "f292cc0c-54a7-4b3b-8174-d2ff82d87008", "created_at": 2015-08-27T09:49:58-05:00, "expires_at": 2015-09-26T09:49:58-05:00, "request_id": "req-936666d2-4c8f-4e41-9ac9-237b43f8b848", } ] } The Message API Module ---------------------- .. automodule:: manila.message.api :noindex: :members: :undoc-members: The Permitted Messages Module ----------------------------- .. automodule:: manila.message.message_field :noindex: :members: :undoc-members: :show-inheritance: ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315601.6336753 manila-21.0.0/doc/source/images/0000775000175000017500000000000000000000000016370 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315601.809673 manila-21.0.0/doc/source/images/rpc/0000775000175000017500000000000000000000000017154 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/images/rpc/arch.png0000664000175000017500000006410200000000000020602 0ustar00zuulzuul00000000000000PNG  IHDRp HsRGB@} pHYs+tEXtSoftwareMicrosoft Office5qgIDATx \gy9g3q[j[ Xhq()X)ϴ~=:3mVqhV9xTy2^)Ea| #p)SK43:v Y9KH0wo{[?sU}~~o@vT \*!9 Ge#qUm, q }}ΛgtwY׽o@XɖF.ʶ[s+n^-;uWnhxbV>Mk6u9' N@ל7lt̗-'e#p鶹J!p&py\[n nWv} Z_+e+ufzC p \kdӆDo2 3K`N|7ls8FuI{+7nSx^9[k~@#p5뮹~n>KDޥ%iUӿy˹m̻eV p~pL[$Nn p p@ΥYuT>bs屺)7xJ^K[Usuݪ{_* n w9nI LMU@U_"U&V'dZ`KBDdE 7ɀҎLiI'p@'peI+bg[mGs@% \'py ѼymZR*N˜@8f89 W+rfDlۦZuͤwv\xWp2U蜛ULĭ2 nl\~p0ǁ&˾yt+pGۛ}g?W̞<8@8@=j-Q:ArU?Շ[g p T}"CT^(z~  p@յ@1_[ p{@-zV1UL"ކ#pR%ai8 pRU'a zi.S8@*:^}z p A7˻i p7WΛ6c#ru 1ۙ p{muM6, p}K;-o~7Ьey(]y啟߼yM6u#MlܸBaEٲe˅e{݈e?VN @zg۫_պ|辞Y۽5eNc{ ԓߖ^#Cx6}fVk>}K'}ǚA-2(/N$\2"m"[ܿ7i]DD$:vׯ4?~y8Ν;|'Zrk׮g4z'4 "6rOiYN4+p"\d_\+%Ѭ_8vX+5 /Id[oȨDgff>&QEN&f;ԗZr8yԏOY^ "q"t C{qD٤?Y$n?Vx3g&Bڽ$xѦD/YEf9`ed5|aJ4<ۥitff{ed+++S!mq,,ѹ 6|5$i8?9+^ \L 5"p i$VH&Qϛm 5u"pC8)#3%ږihfƍ;33+t0i'DD,Ҭj3 pC8I*#He$|#- LӼn!mذᖫ&e鸼$m۶F"w.pMdJR4m8҈ pm.˽okv…|gɈLKRK'y_G;xky,p~G$Ft2G~kuLC:8 oSO=o^'"^{G-4'9 C qeQ%pRtΝߑ/p"k?pGuz;8oh2 \;KIKS9an@`lN*(IxE^D>O~Z%J"`"Pmּ뮻ɖlKǮ#O8(p|'}{ُ/p;ɶ*~տΗemn$d*iFC8)%B'5$tiN|}INEeEt/>*hNoX񿇼~NU>w_]W?틧ø(\q!p C^L8۔ (Ӵ Q#=&LY_"W*p?X+ UwwmfڿIh]Y}I9WnýkEnx厛>!T0q*]`dw{m& )P,$PYKfm3 *E| nq/I["0'&IM@8nLNRsܟ>ſ ?Kj^s5gu4 NrW|;Y6 ZT&oe]F>ڪv?ML.#?wDDeLC8Pꫯڇ>u}x%ͪ"pqA^w[n&&pZo>ڣVn|9:"HVd2Mee85]G  mтvK]F~|}wҮ~+eV#۶mDZ^2[ٴiol (p*T6zr F+оty.9m7q7odPXo\o/i.ݵk׳ҿ0{*pފȑ/V>2ҸmHPB?,GD佢Q9v@&Ñe˖;\YY^裏6_>i&붏  pU"cڟL3-IGT"/mIA86;;333ۿ.ٳڦS+|Y8ی*BMPjI?7`#A'E?!Y@Ñ㒈]c=6u9IA6I0= gY1d-fNѮpFʺ2@͛(͆RjRs{= ]tыGI`"Niis BF V"pMj\m:YF[v/!p=TvHmOU*J:{MqܹL[#G{KXS}D̀)Q.jd<4iδD4凊.7wkojeIKyt^~/ wC:{lNSqGDHF "K0^ܫ|i %.97rH~ 8bO+5,8@Oщ%AIK/ȔE#y"ocƍ?KsE{pߋDDbǢ pT \*;I*8ynZ}^t[lމV%W>o[},[G88Fi81Ras2VM Y[u(鲡&T'sx{ߗiYÅW`Qp \vC8@{INiڌUEyiщޢ׬}hBE8@`.ois(N'%h?~NnW$:"0U'$r$B*aÎ Ҳˋ2)(F%mwF88@C@*@88fd~^@18M+h=AU/IL#7-' F88^ $ -KWy!-B8@"oEɼ4d CN^͗VOV lKk2)VkW~J}8,,F^!pPNu ҈H/,cבZ|^dL[ѓ:WvlQ{ݞJLSe}9D8Pi\ؠͬPDOh//u:WHTL5Gnr p-^ɰSe5jהvg?/+g*zU|c+p*AI͇$ p p8mT4Gr"EVO~6XN4=lDC8  p pS#p[_~ |7{CC8mx)y%N<98@88@8@8@C8@8Bo} rȮ/8@8@ƙNn]E`[:Z0wח8#(E4.(`Q]Ku[K>!\X͸9'ry..nl!7n+=3\}N͵uwmo+v'!p#*޴~>(镈^Q7h{wuf*!v?>ќT~;"q~,>r躪F!p.VMs9#m }IXAY uULd~FT("p!pOv_Y$nNVąXK/яrG:9بX}YIeY[nL)y*&bWw&C8@5ڇc/ymjΛVH#p'oU|ZUrQ5g}e'e8õMWqb`6ֽ抁\4mO/Bt p7>ק^hom>_|8w$>TupYv?7ѷ{ K&nIB94}@%#y(UmjU~o6@%.%`=0m]Wn14mޔܳL{?g!p I{aLCk˵'8Oa+plN8K2_85B^@D$fT^!p!p@q-H!p7zH Vd62CR6(%&&ͨڔ*5!p_5nڌM2}%/\&ӉQ]G%ǹ #8C\jMYVݘ*unN.H3"}ߤG8CF=D`YĹ*SN ,WbޫĐY_$ \`4\s/P8@8.k0ebaJej U3F :u]}As w'7绻kG߿3_U/I+z!p-vFiIX%_gmfJeB hVtpe?3rkK7\>/V`5w GW%&Q7i: !pHwвYm/H['` ߤ>pKȽُf^Pm:#pZ'ͧ0ivg=0=5mЭMAETڤߛD$H/8@8?\޳\ 0NW4P .p \ tX'pF?\M/:^ZJKGф!p7뀤0^n 93d"knE~]n[Ծmf=1^moe+__ovN8@xy>CC\*.iK0_u@E|gZN7%))8i T7K?88nn2'TL9MsUf7ɼ38[ӑ&TCv 6%z@FμnM/j8#y p\ّؗQ!p]cΘݫyNw|g4ij?MK=d9"9N1 ppz8M#"zM!pgvlnv pЗKӈhTȀ!pK$n;%pZUp*s!p\q-Aj'HN襤ŞD8TN"nkŞg 8HEN8s78kځ=KG9NJC8SH  Bഘ ͨA !pd }K2 p!pC^@`(gn!p\Qqn8&#Ry!pK{ p0pc*!p\_4I{ B C8NqN8; C8|&i/ p0XKMQ2 8@8 K{s8@8I{Cːg( p\F!peH(-pg+#=} :#i/ pq'ه'#CǾt_s۫_N}7qGs.wi I{C8h{-Y&&BҒ8_"p.iiJfϱ&??Z}0M"u<[o ܑ2'p$m/,w&m#ozs)?ҝw4Y8;-&Gܴ)5Ko=i굛e~/} 8䤥TYri~"y{ byڟ9w'#{N8NxC8DZG&VtsS|'DN?=KGA8!q8‰O Y8^$E8!q8 騒s(!pC#pS!prHڋ!pC#S!p6sBw<8=7'ed@Zfڐ4 Z 3fg7~~מ"25.ud8!q8m*6o/$GG8!űMexMC^/BhNI-g>B\wHdE4q> 8|AhN4x=\+Y&!pAn*eM)ZEc/&GN8c!pm\nZ"p pMn`By ceEhB 5q|p؄:M%85Mj}m$pc/ѵ2=)g^U=T\)amho{8ܚ/`~6v8;cn"cZbIͮyzqY֗8%)C$uȴ<385UEl3vG'Ix;=w FT-є5^3sQysЭSX0f4%?Jcs}]G?xg=pxi}w.E!p5iuhSmNS;7kg?7>֍S+h.$#p]nx˭."nP~tQ \hQ^q+ѫLkȱ1;:Y+\c]tAC8n v/nJ?ZOG|نVY}mC b6`f`ќJf;%nzNj%Jq;n|9^Cw'|I~w>̍{K$`c-p˧N)Rg+ _DRJF&T=4ݧD⒍m٨8]?TTM]7 ,8[0 70ȼ<.pI&{ jXϻ>:&C*IQ~pi' =v3MxY~EX_ܠFZyMV\Ɋ ڞ~TtZ{j.i7d>[i+/yilBO/6 7QgC=HRG$1lG.t3& K8˻6Q9<5MoH4 kO'?hD'.  _)/˽{6&hGM 9!\j 7Z4{sSFɛ]= \^ q֎1Vgf#֎#)'&vΗa$$=$ä"8}3FB{l4=_CWX~ŏN?\wMUݼm:Pň2.cCCAͺi$mGNE"JO}j2/&%M[KZ5Z 'fIzh]j%)6{.kIe_n˖󣽡h} @F"pClBuIMX>OBQ4tqY"նoM9% 5}jGAH \:ǹ8:Bk7W;6@/^ky5t}k.A4Ku¯Gg`;%pk"PRӈu칦sPkz[)ϵӈlٲiW2^JV K7ȒY!p}( T;SqM~"P h}ŴSd/5o{sE71d_I()AJ!}#ceͩr~K42Y;6mܱ{MȡLIe\ۚ^Y1 M |nu lbP\oB6RPs?tNjtzct?Cǰ} i)Nsv 3gW$p"#p f "p_#6!*~Iuա=iKLisD- F[?+NEfԁK78/I2)O85߱=)GD$č"Ljw؅dI>rX:(AaN4!$R p4C 鹥N8ӇͥX^lr(>oR)PscoSih6srIlS$5}gG&vJ8V=?A/pXz/t"ycr!p pCS4 дM 1G*IC NK)-=*Eo~6 &ˮ:{ pU3i^$%ErmʵzٶowF`h7L[1{@H oC:9^{j@7h:$NqcMu.n8 bP#!p9j"uE".x!p p,KsRk IjlWN /͸}u"pMY?IBe۽J*<8='p˒,4'[^#wv]nٚ~6N$p~~^j(#pC#pc)p:9%2Vp9SJZ4T*Uv%+O~Ύ:Wi ?G+lY>zqc!p)˵'v]ۋ%7qv'+Ez!t_!S1Gk%#8!q8PG)pK!p{=^{ٗ8'#G8X rT5P;=^.'\rT-&tϒi3h鶚K\ŕq:^8%7M1gRO :+TCٗ3iZ}8!q8QS*[n"NQ;}٬iH:8+uݗ?AL/<8=7K}Hj8/89.]M`:˝+nn :x9(J4"pKrjϱf&dt_qPrILkx厛>>G? :? C8sx87 .'ܹI.p}}GNm_<OC8w&ۑ]F/t"V.p:y9םǞB8[NCk{%/ pMMpҜ!p/ps+.!( pH{ght5nj7*;}͋.!i2%-U4=ѽ,i e{ρIõ~C 86߄1g 7ZK?C8J8;\; -< p7'ͫړT~er!p!p!pA'H!p!p8y3C8@;OʨT8CC#p˒C8@+pC8@\\]=!5R8@8C8.+TqC8C2"p.'ܹI(p!pMh+p85_C8! RmGtΞᜁn8C8=#"y!p .;wPR9oC8oeDNJNI+ppB󩧞 !p%Eͻo^}#p!pIݵؗs8n$6^dC8k{p!p7=\;"p!p\FN[99C*pZ5=!p@q8}}ӟnEgy4"!p2wv z<97Tѧ|>8qC8D^*pBN8@ DBR!puY[y78CpKy|8nh8|8C>Sι7pm/7Ji!pu}/G<7p MIE97pӺ6&B8n=G|I88Cz:q> \$&i*!p=.'9  E%?%'Є!p|4  p.r3S!psiIy!p p8)%8C"p2Uj5C8.5;9 7ț: "tRJ C8~,b? p@ΎFѧ"sA8󼾻wr -@ ܉'V+3ڨ!p6 pܘ DDz-99kG/@TN1< b@8K9'7 3w00o* RZ>!p@nF#\y啟߼yM6>7} Š[7oٲ傿e]l_9cZNFJ7k]}G8Cz>ˈT1ȬEK 1331.K/G*[z{m>-8}@Ξ?}A'?'ᵯ}"yߎNFl4/8}1 C8.\ 2!p"<lr/ȚҭGoٳǏ_kwDJ'El C8K 8`,E܈}O<^+++wnݺLC!p p-p.'9iN圃\.{[ltN:gle~ ۔biH⾙ Cos˜smذo{yE[iwĉ־5tƿ뮻n:yIF$C8@_$ᜃTNFJn߾z]gO&#'m-MgH[GyH@&_9ZrAj'?|TNNOSaבmؔv<ɲ2]~R%?]]_hX7F@88)%8 ^۶lrxR􏋋hYђyϙ|yt;S>%MLҧMiTqWfY8i%S%M"1Q#v?>coMm''$p{h(N% FgDb(jh+Kj%}6mWc+RC , \*x3{jie 9[ ,X'LW[_"^ *0i Q]UHJ6v"p=x0Μ9TMkN埡Y{GDÊXGT#i Uqϲ^rބۄڍ٨,/͠@麡"~tivW۾vJc*M4\or=5] ~2M8nNkA%L{5o:UmTt_ĹfčZA,C8FFo'H mSdsѰ_>|ͫ LTn"p pFj;$3gaBb?\J΍lU}fN#p  `@90l[pѪbH.h{BWksfCN _!_7=?҃!pr= ]CZyLҊʵrwm$o8䄻M0VpQ p!p^""z~0d!L~.jC&V91|Y?si=lʀWJ`F8`:سT=\} ps*޴F(Ȁ_BC&õ=!p8|V>0ꕽ*xӋ*1)s#{cwߪI[rIx~ {[hBOyh pc#p/_eN8n:PKF*Fƴ"&-!V4/M;Vn(8}yߵn=,7~\B%D:#D$!pEom#I m.gWer~=++yR;MpKѵsC%m!Q*y͐K WFLkAD81vSInzMkZ-f]ҡSw1Aw(֯k-W?p,K^85r$2eU쳑$On[ŀ)cdۊr@?!p*)&S`}7iF iM )@qC8 #p]CCFD8 #-vDKЈPzaK`3"r.an +g3]Mo. W\knre?ݚ'?݇08D~E}`:@Q;,׎uMpq ܺT^z7 foa/Fz\ Y i{>x& :¢W̾`*鶶iTCuL%[!nX~KKtT~tэLjɖ`k{w/מ@.gZ px=$'u*qO⋝\ޥյ*L\;" p\ZWtѰ7=Xʦ1T;9 Y p\KrM!p!,j{׊dõ=!p@V%7 pA2Hu@[팮.oI _҆ pxmCzKRvWf9+.j48 #@^#`TimTVk2_3mƑɢM\|?8I@*2*p~Ky侥?Mi_I؛ E\ݒWdWLbޒMŅ&u˗#u?*脳A_k_D3;ŗ]/BCG{Cs۫_7qGDN[/qU'Fk}"勞^qrXtSWt4B\g;|>O4b !}l}UZR'dµ,C\ƒF#_dɫaZJhYhKhUJlǟ^ @,r ܧ!pܿ/Vd̓eykeBӺn'l3MJjy'K5} g=fڼ[?ׅ8@ 8 .Y(֏?[)e5z=ѓn :VBr& :_ewM]'\ ڢ67秊=yf'oz͛gE p0Q{j" șHO͎"jg<=0]F$LI5g?vN'-#h4yr6gϘռFE7dRZylgA G͈)tU/O4O}="4"d6Y i+.2$Z:Me+6jnNӁz]ƒ\ p@H8'oʏw'=*IF▵"'k.VDԴ֧iM.{Y2U[G p}kxN%ITZn?}s}FΏMGFJ1˭ۏC#|kh<jB{8~MqҴګte pN/מ |(uNH>[)ҦJګUrfQ$gbuVNu`0HI8DoXJ *V*Gqԋv/2HU3q۝h9̴I b@8N%=1WRDlXR34CJ3O+qq42'ےmE |Yn(.~IJ,֢Il|ش !pɸ (sD(B7d)o=]+Y3yBhrUL~K[{ V ~t݊Nfy}ُ-gߨ/ p\ % 1 RrL%Z*:TwjEtb pyj-_ "QrHڤ91t*p1 杘j4ڊ%]\y>%BǤ~?`k/>k1.Egͩ +yNi m Hu"p%;xҴlL4[ݿ0B}5eUL_ӄ`G2&CElM8y:?:'1{~f+ !pU#ju#ꁁeoz$팋pi$oeJ&4[>4ɵ-?biHFMAI71|-xK5@:,l3Utb4I!& p5aOqvZd7qkJJV?h7 >.޼Ri[ެIDͭR;"LCH`puN;)iSMC/[\̀B׶|ݦFBQ=]&TP/0x봫ڍ:uH8_hqDGʶ{T.3f _i^nG>ډ b%UY22M뚪 ٮz}::O+Ifa{j?_ه~^ !p0T\(b'pzz>|R3 0Xay9SJZ%R:aD"pON#zf[XvL6]ͯw7.^\ oFv҄$I%WnIQ? $z%BBbASw&Y ^n'q~t7oƔG}tpM YHiz3{w#pzMvK=~H^ezopnuΛy @I[:r[ v8?YlWgж6ӆEF`0I8'_NuМqOC畴ļyjKbed6nW^[6Yy.|v"76: Ok'\IT V8$È!p0D1M1 BR^e.N~bM5P՝-[>k (SkQ8JfE%]].onFnjqi8mF$e!Tl׍:3#78mfИ^"%/WrQ ̳ͯ"pWiCҬ ^TfXJ ;D"K''"[}N viWN89I7o_ p7eyC`JGp uy]0rVItZ=$Q&lnޭS67om5 FmE3h\#p7ßo?zAo$y:\/oRm7'_e7Nk-SԨٶOW4Ӌ^@z"^*p!pϧrd@Ϧe +.!p%`G|٬2b.=8C`*k%HR?=Ȱ*# p&&[\i|hٸ}pQ7< KM3kn%ifeVs+*2O-*P!pW}*3׈@r7DElNmZC5MUdyM)mO_8`M FTD$7+y5:~mC8ۦSur\HMp~'lZFLK,BԡO-:ge04MZp5F+^\[k/_pڥ bѽ1ZRKrA \R=A]._wQI>! pᇌVTh fS[0^-Y#\gHe\-SCb(Ҥ5-4 p\zurȪi(pu͆)VG|Yo{fl 'pYH6Nde4MQ$ FIW4Q8/Z{[#gG+FCj_9mJ@Cĩt "j2 >P!Sl:*U0.JRu=:;@%ai12`͇:1+(`Gq$^>j~j6Tt 5"78>pc+0xٲ#OYx}+r!Ls8@8ٰeȹ A!p0 pTY~ e+njޤh/+Y4B)O}l#f+es-俜K9 ^b.꼙em-ՆB'gJ}/󾧢"NEWaQmO@8 [:n~/mTU'V4-}?w1f%JDI܂}rZU.H p(J[ɫP7Fl5-YjxߣNO %\MHa@8)._54ڧ%/JsM߹$R\U/ -m.e2=+RX=n0h ̜rk.ЯnMG4& N5Iӯ|'ܢ,8`XXz6Ħ_˙A%AOt|zW5uui~\L/@8Q \tL@lsZ[i޴Jq9,|@8 \3v) px0 8@@^BS$q$p;D`d<8IQSi3IENDB`././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/images/rpc/arch.svg0000664000175000017500000004405000000000000020615 0ustar00zuulzuul00000000000000 Page-1 Box.8 Compute Compute Box.2 Volume Storage VolumeStorage Box Auth Manager Auth Manager Box.4 Cloud Controller CloudController Box.3 API Server API Server Box.6 Object Store ObjectStore Box.7 Node Controller NodeController Dynamic connector Dynamic connector.11 Dynamic connector.12 http http Circle Manila-Manage Manila-Manage Circle.15 Euca2ools Euca2ools Dynamic connector.16 Dynamic connector.17 Sheet.15 Project User Role Network VPN ProjectUserRoleNetworkVPN Sheet.16 VM instance Security group Volume Snapshot VM image IP address... VM instanceSecurity groupVolumeSnapshotVM imageIP addressSSH keyAvailability zone Box.20 Network Controller Network Controller Box.5 Storage Controller Storage Controller Dot & arrow Dot & arrow.14 Dynamic connector.13 Sheet.22 AMQP AMQP Sheet.23 AMQP AMQP Sheet.24 AMQP AMQP Sheet.25 REST REST Sheet.26 local method local method Sheet.27 local method local method Sheet.28 local method local method ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/images/rpc/flow1.png0000664000175000017500000012002600000000000020713 0ustar00zuulzuul00000000000000PNG  IHDRYiásRGBgAMA a cHRMz&u0`:pQ< pHYs&?IDATx^ ?pEPWJHb"nqGçDTB *"ʧĨ QED%cHL&1Im=3=<̽oWw_SU P*@T P*@T P*@T P*@T P*@T P*@T g}N۷oFm3Q̶xn+[)G!{84T P* mm}馛}|?1r;Z5e @v7Q^=ˇy7h555QCe\< *@PdžnD1WKW>  <6o~;wf 7VzMxT P*@@eP-|166P zٹ|Դv5T P*@@;v\cF Zllllh+WG;v9ϝ ύ P*@Ȏ=6d?~`6666.ḩb鮭1;kJT MMѥ-ו!llllͷM4bf |T dCF6Gh6666.]RU&]Ґ-gWebӿoZ8+C[m:.6666 r;O|VUOl ^,2fMNj0NȓC1X.&7S| _ ŹeI&fT P*Z'4<h}Fw33~*+@e T P*V7^8e N"iRmQ`.yT T@۶>XtE$FKoY=@lllnnMuhYLړrdTfW?}4m0 L9[mFKjXT P*@,W @dmQ=aKT P hhM'mmQ=]u.%?T $@> CғњGT `llllDlϮ=OQ1eOZK.grDe*@@h磾O`Nx# jyE!F"ˠT  `u ( kdlvEl F@> `"md ,V8z`x4*@@F =2DyxjX^ u{(A@ 6666i :u9N9 Ϩ^DQ6;eP*P4r)DѱKߵ[=աCL뮻s#)_9/QF5L= N/`25휅8 %YW SF$}Pkk/]w\>ǏWs{E]䔿K.jΜ9Ny0uSܦ͜g5֩'` N 3Q*@Z+)Pf>B@K&+ ` X> U(@e66H ` ^lp)JoXa&&I,6,` l޻5uVmm 6@%ֹK |`vW>T dBD פ==<lkXl&z*I%`x$*@r(@"mKQũ`Ū\zj~΢k>;>v!euh%&G lb}|/-_#a(@"mKc'Z9`[n%!X Ib/e=8B)r8hօa>C9U&_Y"GM{QxoXlnXl;ĄG+)i3g{ `3FG\_LwNG9KYiC u A CJ,oll,Pk`y[=a/+y&b?nf7JVs`zX,(˶g/׏KB`c!OSS oL}77*E9/P AN{@lj/ +R᥽aONNXl̨O)2Eaq)@KYK7y x Xv(edzYhzQj^as"'Ӌm⽕j6{" `pX*@GD׎e$K8'B&$N,!L4<#9ݳiEVU"*ٸSz# ɉ‰qd\ d ` 97pl,@ R:N{B'b+ rK[!¶E(|}ӌ~YS T " m/ 66H `F&²(1+[ <6USF` Kا=\XP: .ix^,>TV!mvnցrecK Ј1kk7m#*eZQ&ʐ|?VQlJ#̱ $0Ɉ NW>y|',[x/ {Y7yX7 4'X*W9A@,ʇAl/^&pF{PyL$ vɉ`vǝv"U_W>װ?`5g&&2[[ "l(pP~0><sHX,; vQCd=@Գ>I! !NJLµ|靧1B\h.8[Bm20dX.~/VC&sL@_^?")OnMuܹsziV%c5l Xly`^X+}b?٬+Kk/_'Qs6b{YbЉ2n:_f5Gƽ^BlN |f9~^W_Z.L 0H%zt.%p}?|RZ`4hP)OϭVSΈ^Y̋PO,Ao"jVg_y]_{냊 Wypd?˟^K^foCHfyvR/y>\]|W(,-7So}OX)C`cMyx] JPڵkWuM7zl{U޽K eYVWГK%֧e,<GVB!M@yڰ}k2-+Ʒ*;h&c.s\,ݲu+6N!,:KSz_:tu/{7Ϩ,k3:6g!<6 J^Q `voB_%E߉Gɱfn VR|ئ7ooг> S]` -mn=hQ8W,#کS'u 7իWGLݾ8~5$IL]u&` 9E?,Ҍv9cccc`mlgYs|,l[ظDf]}!`kj@ݙ@Xޠ8#F7xC^5Gj|숑VYr,Ko2E4+<ƕ:GOP9Sc3ks=!K؄*!:ֆ ^Wm~{Yj˱#fh8fxxe^!,:d,s;ҋ/=Xc|lguf jKKZyd`Ye#|a2k^ g dR&.Z֞1uAqƈVhpFaÖ EA5 ` Q.eCYW6lO'xBׯ@` oT;w.,r|l4,_NMMMS'L㐠8`j6o|7~CB6M<[>Kyr2eNuvkypw&e(?ͺ0-028i9,h}G1z~59c&o=8;Z $` X,Ee <럸\uH6QQ9@Lo@10ȋQvC2E7)PjzPy emB|^f} ,Va"(,G?zմin<@Ϫ?uMX?~SuMx,˞8J0 pg{FB|@s9$m(6Ӌ[ 4weJXt%- 8f5!=>SG/H\pBˠ6c)t^NH]pZh0aѣ6lZjU]!=sLէOj)Y, ` 6a_`ˁOMϪYF_ޠyoxXMFJ%vvi'/aaE(gl_ji9ۗS%وfmƎ>裺qƵ{ҩUz\QD%`s֓t6nJքz,f*L!)komӠR]v)o7=ǥL S0zY_ kN%X?)i{+;6'NrYM(A:czV2i V~McA~G{,pPE<&@+chM>+p6]l6M+B,8_Bul^}ݞVZt:=k_׺_޹Ոd٢l{k`c ?|?8qBh&7L m*Ui}>W{fcmw rqb@(<+~V&C۱ iXl-RvK{G=?\}]۶׿ZmV\,XShNX l~(seHHr%؉[US$Ne#ٳgP_)Mjv*+xq;?\T|I]ɠDXg紟 6fVZGuO\+C3^a3Vj>GGqSj-(XLd{LBًLdzq,n֘{Kl{'ڴU+Z1;0v[O<PI,E6fgHu FDѮ(^BӜI[/azZ,,$N2iI1&b(Ij'OP͜;B"c`1h=uu oxٳ:KCM:U}駉>LV?O?.Rl (le=f[Y2 Nv{>)[`3i륇x4.vj,.e`ᕑL)3 與՜^F.Ky8n~XoȌwy%胔rP1Ly*q#ۥI]r%&q1B,X,f&~ bR(f ?k^Ń>H `+[l}bAUk5x| )clSk6h S4@Sk ԬԼhaRX3,Nt CSR͚+[miُ[sT@dW]oT2lX`1 6mxa]ӏ5vpCue)BZz>}ݻ:՛oQ%_y['pLM'zȞQ=.oGNGc+@@v4b,jf?26.2`Y6PȎ! p^s5j 6 v9 ;n8oJX?O>PW]uٳ3fn,B}MqG^x {I 7<Փ|؉nYróJasGʳ}69?֩&:L `M`v\xmLM,Ʊ9 8R+m'zrFS%mYaEK#;!wɘ+Iخ`-ZcȧB|g-,oHou6 @f><.k{c]|X'S$39* `fyϣv"9x6lvI9! p9?mzBWP R gV:&w}j;=&MZ}քZl^HJ]ClcKmaQB<^rG km7l!xTk{zD0Dy_]#bRW=.5%wXFtjTsϩoQ=%,}: /r `}% 7:l0Ld/\O>K%BI7ݦ `ǃYW h.Ab;|E8&62W`su9~2mSH\xM]#t11BI{uPA,p}ꩧu1qSW\qE}#lP t uh:I{EݫJ8!IB o~ҞY<[wʖ !~]wݥ0$,cilKՏVڴ|=Iױǝ:q&K-7U+-3 ^Z$OUM+9~vjY!ZR9x~ͬ?fEVj`]ղN=Ƿf`1#0f6w3br0h E~|W_uޝ[~s`TuI{@8kv;YDL{:PCnnԬY2q輪.eK,\PaJXĉ` ̩ |ro+&YXշ~[]ry-e= u 7WꤣVWn-z"\x^7*[D8`zi>a귖jr^(/x~/d+o_Wc`csqw眣~zjwb‹uy]?6xaO<(;h5iXz<_l /&r lNeoNBXmN@"+0o9H"#)a~+!dIʖr\lC9&dC sT'z`. ZWj/j,ׄ}/}%@I~!P`'( aHXJ `S٧  cõSl6 T  d`1k5& ʨ: 96hmT*`cK 1ʕ+ը#=//'NPC[MK:~D߫N;[>yd V`|Me,,a  P/ꕵ6@fT d@,`PBY]֞؄Jke,$-C1#>*`c^ucZW{3'ք׫/l-{ԀexPuܸq(V1v"v;o,6.ǡ%`'% d`-&0(o VJPl{Rs0m/ z'[Y1<:ýz=X@묣~{UWy XK8&f ɘAy޼y^3^r= ɢ v@dwM 41@ DK:hq\mu_y&lnze+ȏ0`^w9AXWgl.2KXQu`_"bҦFH0†ixⷻᄏٓĽvU=kR`Xjc`W ^I[Hx)ݧErA2> `V,Ac`1v/`++kڳ'BmsgKXV:D&:L `T6,KƸժ !~%/:u^a}hu\U.S/f̘+ 0+/y`A)\/sZ½D6U];Lu : lX,[-RE0rh=t=>o_o&cdXٰc1GP \Cu\"D&:L `T6,KƐ,wq5g!;ݛmsgϮ'N&r_x)`Lt]j ܧ5$:X'QPF%` _c[;"P8eo A`u]^X,V\.o̘1jСjN.`"zT\>`l"<6,K0F :먣:J=c>sjᄏ6hyI{_&zgJK<}-D,Bh"ZTL>׏X'K:2m׮/G1eOQ9j="K%Vkxbc?d{b %KZ,ւuI#>[ >'O5ar.ӳ uM`v?lzEkn׎dFMMMSӭ}IN & 6 #,c.Z#`[&kvt-UW d{s56hmVs;kޚ=&[nEmmCLX3&lz lzE <Յd%`Cy?s`a'% DFAmQ Ɣ! pj0%m2O VсN:Cm.%Wbvߠ5Yz"P=O#}xc|kBMu5^H[/Ν;7t"`ueOMj{Z\"φn赕]o K6m8CYaA,֩s< I\JuW2JgDlo'~gtJ~V<4uV؇5 n>NQ=PݻP.D1%go ]tS~9s>vs᭭@~d-lgd$FMRmh d(Hz?~Ho\,:F=gyt7zc`` 7n,`#|Pa2(oϽD"R##a$x'&kJ`wd$FMRmh \bL:В7vM7U^{4N_.אq~4G?<$XeKM!`-ZW`jP>lF=K'S{>%; 1t\zm"4rԩjžiҤIb9z)-:vTM Ξ=[U>1mOd;HH{ ` 삣TKmma)9d}hS`U,ŀF$@$BnMB{O~ /԰ ̘FpҩgK\yN%`Gb'% `Sr!rR l IpI(,vv:rHhѢRz衇=T] E%lw@: dVXlj;O=JXO?* >LX;s/Ǟc|~H%`jBDZ9QuWp+ !:q-c3KYi/&Fq ΪVTKM=ar]wۣ| kΟ?K;m4/r-jРAxj< ٝxkqݻW/cܔ' vE͙3ǩh0uS{0ԠO;UhW=E[|X~;C(„G7 ӳ{SRVJ%AXlf ALԱc|jƌva "ZtjQk2si4(=HQZ $bx_x,`+Ʒa/+y6p N 3Q+@%f .Tc۴i*+\GH.Xl6v V?Pe,[Z?~fr#QH `3҉b#:vUzt^&))!m!` Y-[%xXl߬9TK4aR=[n{52}.iˤC%`sh]?%X{<+æU6'r2&[yG\O\=oAKHm\C%` ɛihXCXթGG="VEd D/PI֐E>7'63<#9ՠ)WK%`Xl򖂦%I7H)X ե@<+$Q,'t,"ٳrb˕W uyh*!غK:O'Ym XlV ~av%_f! u<+[McOTMiއ[;/C6 x|+p>=+5&6p ` ~kyeWk& i:6Qq 6.?)Q$%W"hiDv%ʎeep!` a˵WvmX+∣쁕tE޻͐{ĥBǥ.-X,@mK~۶m} yy>6ɲjT[ܝ TP^玘gT>)l6V}s٩.Cs?^)/2.]9s8 a6Ե}GB`szE]̉ -ey3؟`k`{myfW^:"최cfdĒZ41/]wA 1k-t~_tHtqM'l&|1t`R(ס [XdQXg#} l=`kR;6U˼؄'ݢ7mC؆e;;%lΩeCg/vcer,f~۰eoX,n..f9ucP-ͲN;_ٯ^(H6#z߾aa ?,]gu`hhKCP=IpTlI}&W= s`/6;jO$[l Ap.+0[I󵽺c;ز]gMn ݐl uu ZkOkz!c 3ܳ(9r|Am/7kn󋜲˗')yUR>g-l\~R6%"'@ ?_(;?fg 5y3T:U̐S@]FGǬ ?`K^U RtGlkQ F*aQ@K&ZONvq ն4\sáܾ.לyJ` JQ n zI6/4 `!*/ZK.04엲Ug_AHC4\ԁh1;ZVe9:yclϘ҉TW4e˸]s*3#|MPm28xq,bs68$`iģ@oB' z,x`74J$,4$sOTlB~@3ʸF|WX 5={fDž}e ~G2UfD.!ɘYׄF\(KB]rm85˖I!eH'.0.,Vf/tRGݾ9I$L`^fX˯ I_]Ji)ZJ`LrQ6fXVū߄Dk8ZZ~%dva6K9fÝέs˭Ejau~E9] F=>7:(:Ko3%%cP+"\p#{d#P)@%:r"`g;$ű`\Tnrbx[t%`sfOz:Xbu\1Hok.@~${"'&Ƕqc>R&kdt=Fܟ C,67KJuZ` 032gaBSCŒT ` XF#$B[k<}78Yɤ`ՇP*X,F@a_ux%.x<<*X,F@#` ʤ`3uϲTn ` 9K$݁xŞL#ueM%֭O Xd@ `[,f11@YTZ>hnO?T&yYK=uz^$Z8@IMX4st#G}~_%ݡO?vX`;wD͙3'0~j뭷'eW577;tM"ϥa}3w ` Q,l[ظDf]Ϊj?_*Ph!`Kr xw}4 3X{q^Sw+0ͷU;찓S{mvmwoS>#Lw=(+A%F htqnd2`љG)26X4bsf}#7ܷ9N: vxlXR&'6zV*V xl@%`nlf"fPk~cNMw/W3$Ya%`i2 0WYGʳё~-y9),i1YtuL1ј \j:9ԒqˀϢma)9d}hS|@6wQq/}&q\sj~͸K6/\t`MR`ibP /%.yd:+!1CMϤ{Z ͨ^:q@cO0Y;Y=H$ʶL6U~qQ>c}<灜,F>\ruvꀲ$ïiX,*@^JUIxuV4`Ő4biR?㽖o1 ^-QmUvh]Q"yt%4CʳNj#(7^W.+k2K=L Ica_9>`O֖{qKϢ䥔]5^]WK-9-F.O04=koڍ 6RX{oԣ\x1 6_Au[!ߘSR=96/H.HƫC%`Mxq= ^Q3$_/h3Q,3x|À[,7vHzuʥR¤;ӝr2VQ9xA;#` oXۀ@WՄVs `즷Ƌ0»k$n[ףv'<X'[j5)'3Q`K%>ZDȃY,#$i$X" !MViV_ tۃtl}7@lS(@MϵCM&@ hh*xƷt\/"ꫯzU3g]ۭ[w5GAW `[_@p1pq,X6C4Yg^{5oWLSNU{.l>+U˸ΛqKϢ䥔]5^]WKJhg`Xu=|b\a;*][nXa fRby)eW$e$!` /qypX.=0&`b؃7!I'Kw}j*u:@NTeRjrɌTjL]WKqJe@9JfS 0}Yzʕ+= 6C,„;d&*y)XDs$*wF%^2/UY3_2Jչ&%p=蠃ŋ{epbJ(ǁ>qyQdK Թ8d&*y)XDs4Mkt8s^6<":L `T6=ȠqnC]:aʤwkA#`ix^n{`:u}<Л?[sN뙐Ȝ`LQLe3Pbʮ4:,KeHi0駟V .T\sѣ+\5g;73$c)L<_,Z!:L `T6؅{vS45Ël,4kQi<=묳fmƎ>裺yۯݺuWnӖ)֩ `4х.DTtmrj13`#1uE.Ԋ+26[mm 5m>sjܹꨣb5k5UmŸ]}WfNzCu#Xm`tav [N-L+]h'zЪf;k5 ^-YD=jwVV/_Rt7頃Ju#'uFf(Ѡ}KuenZL%!<$ڞra ,H0Bq6 /Zl0a|~3[o?n kՎ=ZuС=ղ9Ku"ا Ff(Ѡqz`aFNhY0NM,ߝ gXԘeԅ#Gag=20uOv;1/gaPIjX@ !~ǔ,b|,@kbq7|Svi%mll"MDu2Ψ F NS[<Zn6D'LQm߷袋7tQC 'ekN ><0_|(wwT{vʋaZ_S~8~66P `^_1V3g_Z>CJ ۩&jk#+@` l[}L؂XBSAFcgm{ɓ'do[#SO;ep jСNy)L]s"to7tF>|~3^O4D+WT/~;?p[5=j/=~{iso` n`f"'zސr1 _0PrĕXxQキj?MJ' W_}kVu]v'|RkظKKX~[ptd6786{BLK%L9sǏ~aQb<[n^L:T&y"ڟeMtZiYF8eol?` Q֞|VXlZcZO:HuzyW}wۭbx`o7… ՠAڣh"駟ƞx'|R͘1|#`26߷'K'` )5{ yT]!K\s52ط~[&@;{ァ>0ٳլYc.K- 5סɞyG;)PCc;Sc1[cShhvN1]w.zj/+TݺuSƍSΨqߘkV ) 0ŘvKH\pA Vy7|qEhzTK,=9PnB h$NmUuNKXz&"ƼV DOzիzƪV>s\.>uXXlKCm ({x^ u<*#bO?_(õ^(z`遭g+{`yu 7Uf!~)Z|~ k9XLGycbw_o,_sp>Zdzg }w|YfLsFl;L!Ee3fFKI٢# osuJSƍᑕ Dxb<|)o.6K(a|4郺`c Klj<}SCetnf_/-YX\=?ϥY{F>80A(!/"^2:ۭ O3¾])+v[+մcAasǩ<lrI=THm-7xL rcFv#첪ڇmõ^(z`遭g㱃=X,fuZUӞX٦1|x(|wG,^dL淀+f2Lflٻ ۙXۙ;4ɨ?ؾvWÖKM>Ǒ LxEZm6 n{ W\qECѬSsL,=@%s=Տ=2:V&x /k^577:,^]V,b -f6&/;EulM{`ќ? U28[8kL`0Űe~, $lk޸>lN |Zv{ pXz`xl7,bM^xA-[[FcFbxa%wNo&sXYFǜ * [wL t"\?BXn{Υ'ծE@?TVV6Ӌ+b3c;I^8`kki߻kߜ.+y!9'qj}`遥<sO5.IJp[l `NА -JmW[4̚:^D, IO$? %)tOʀVs.:>qi5oݶ>B}skghK߄hbtv=MiF='% 4znB6?G)iڕP▥qZ2dp l|XlF}[jNag)lg>Fcam G(cB죈j8>V{m{oo~*/#J'tTD: lqEt&͂ծcLضE$oYuZ]{e>,FkBME7 m4zc^g=:va)ST{n OcoyeBrwV#< >"D\`1YӱǝzO']ho:c8#0-}K.JqXLi̩v( uYF{TwrK 53ACpe{`Q@<*D5 %KI5V@cnO}Z/E慓& 8&hv]A G՞W=#ӽf7\P0!$f͊ev{ Hf~@_xD;oك%Nb Vxִ` +Mg7X؟~a?w`+٥vbNJV&?=Ys~Z34ﭔyCg־pӸae4es;i_]+C'`[Ծ}o pBi έ^y$NǏW[n:g!AŋYzꙇpS6gĉLĘ燂XXx\`ًὝ={5k1cׯ"%e=Qwbf' v 0'rǹXN䚑vx1% RM\,l[ظDL`1 )7,b? ! ~RDʉ` )~P b,{+.\P=3 PxUWJ+c#9D;K0]OtO:نyF3&rI!xW A@ .kqgf9@+ٹ)!ftvmL /EB[b$D0Wj%DA7Frùx`˅d70|(țrӘgKm,!K,=yqԱ%/G=iU<2:AK4^1 0 5L%` IXaksTMY`a,6ٹoRzL6^JX\$Vbm+Ltxk"dr,}mLcr>oܲy RcӻE,=l<ݕSR bs9`L^vB,to[{cefG}v}`j!WQb} SnY\z`km?sT/qޠ3\።z;3xL픩37t->{챪I-K&[`m]tZ+a'qbX~*}4I8U ٕhC`ـل%"obs GtɜX %M}o V2$|юAWܟK,=9pɘ+Ucz~Ue=Sp^Y%whƛ]WmoW]Lb|]?c0K 3 lP Dl#eiQȋy[f16㶷 ڷu/QנZ`W6yU78O tKlP`oi-[2'yj;vl(y晪^J:dNgFe|9ڸ5XFyD%`T>6V6` --Sy(FXlLjHcRXV[mtRc~U 0/h;Hy=M\66dLgIl3s%tgX,{G` !fq&oWuةw=$O.aHWRƬY<;wnD͓%`uoܐ S+݇Cc3iFɢبˋרgb|l3֎x8u^;b`u]>j&Ϙ1A}rbKUsF3hYg@!?yRKrF!u`(z1>vq7V]wK$N:g)ƾXC m5'k2s!` R9,K%`+}A]!Cr bCwqGջwoή9uF}矌UYS|w dqw} kVJ&b6>#S}Ob3gz Om5IVÄ|X'CK%:*D%` S#FrJ?>'jOG8E5յl׼g ?['冩kpNUNxF}=&tС~xb$o;,Vnu_zKM%`ng"` بdoŋj|j.z=.:Z_kƮu"$q+m` n,td3S/]AVKmiC{C*8q-Ԥ̬zz2Y3/]7[nnP-*kj&?b9t饗M7ݴt!8ݰϣ':Y Xl 6.?)QYףYؔU0! [lYXMs`u˞y?X҆'^i3g{7H>}7cq<X$~_]v٥t c\n֩'`XFL)@- 2:ɵlpU%C{;`asakU۶m=\GzQGy橅 iӦyr K`r7N;+n^4p%:؂۷)9"عbʦ' sX~e6ئ6Šk{I%8K.D-X`t{~SOm5#/X'"R}_Wn)CN-2@Tۨa0eR5nȲ L`4Cf# a=0.CAn P/c6'.ƴBlp)yin-UW db u7~ f8מ={= m|IՍdD td9[-LlN)@c5V`RP&y„F:3hWO F H3 ]2R/9:<:u2^q<(Ko1ҠvS%w-^{ ϟ?_M:X#M4Il06W N}xIdtd994bZ~r6g*ht ~0} 7r[ hխE& ˱2BM/pjlݬ,o3KzmڴQ'|2ed;cK\gϘqlyV P:(@5h0cʱ /H.HƫCm} si)7=W*2Cih+PNJ]q%pXewv1,rΙ]㵋 `3n)YT ` BLa2Қ3%']p6P6@͸gQRʮ/H.HƫC%~F4o+Xs-3)'YW`^NuuN9 +@^ ( ʝ` ,Bl1:Hc`)wr,μf :Ru9N9 +@^ (CnHSX'3$39*@u*lMIʵX,֘6mm 6@uN21XG dQ,Ke`HUu{t*(#:GX'QPF*5Jeѓ666&G:`swIzBغσSx ` d `5P:ؔ]> Z: ~P sF=L0a&z-c&/m嫔:[vx)]ʽΩ6$g7kac P:YeYi<\!N&F/Giװ*Q60 P*v +cy'ּAmIaʼnn)=?tw'a'e|Mx8'*uR''`e ^*6 ]*Hȧ;%xZNOx-\͹s*{~cebxi!T7X/a_%| r̝uǐmm,)rpa_M;2D&v"ʅ}o!#яaP*PAj7\M `0@,XM煰RLXuugoːh9[xd v9AфvDo=&'8,d3IW@5+q02nլ<0dxQ y@`{zz`ۇKe``` `g DPcQS  ՜c QeWʰ!:SgT TP/A`|$ DEB 1`"5Z荠Gmmmm[ ,%RPwt4Zֶmq~\jjp#rOT P)9(jcxmmm5asFgc;)ed ъ%iU&]1<|t.y&0cԕֻ ء7nTo12~Z(cN~cBfI0;1Ca=rN86cHPWoy|f⑮5 s| 6IBRh]0#QNj,5a9;hf#n3Gō$J(׬ B5Ȣ79K Ģa% It9{$@&x@E{醲r^ԡ>2q<_<6aĊ l/}>(+M}ӡ Ûd,Va^Sl͡t0ZU)KhfJ'8 85!֮jY lOL^s"ۀx#]`v:c[KYI{ҩ>С=HPA] V+`2ZE%I|J]cx6OaS˛3, רa-;fWUv_2txzA{v",&J7ؠßuXUĝR\$gGuŎ =,;,E 3F}ze(f<8Dx-iFFN%F+I~KC&bpxH(`M[ۨT_$V }f>^|^J(5GnfWZ?M{fE7جH;E.Cx(@ `Qaإ=-"uOM;a|6l70m\q,F7AkB,SA y`f|['Vn80BM&aĵ< ޱc/ḩi1X| ++@~~]vd+Z< M"=S#foa`gͺӨ vʝK8Gi㧍YI[hwUNnP;ҩc yÌY{ 9aT9?wh⍓E3?&%%@mQ;l5ss6ӛ +6XIJy~}.1:[5+ǐ}Źoh2=4bsbxX&+_2y Y$jM1dB͘YWd\1qƝ:=rɧsL,'|B8hI8cH \s?cI1ܜ0Eډw]רQ f 7n@y[d{tÔ)y,n^?Tmvxd̕0l#7@=u#?2k:\-|1{| =T> 4gd JԞ v;V Gyrؖ@,uَ`˹͖lf8/V43Vǽ³ A`ޤaD #j˱7IvFzZWa΃ym^ml\Z!?S}zcD 2\кm.GƳ LaQdHoaʌ+ohŸ(˥T D@S 8E'.`, P*P'z6:UY*VsfcEwC[1ݧ {ъ^1GT P*@Q L;X!axLbn$?8TǢT P*@T P*@T P*@T P*@T P*@T P*@T P*@T P*@T P*@T P*@fĬXi\ẏSODT P*@@)%,uy֟eO ([9 ,>AL,K1d|rJ'\ʋݨT P*@FF@+jKj!ؕM]SΥ`fQOW~f+=ʍ. o, P*@T P0=Â7@(ҮJ+N~y ƒP,M^m 6]rKuQyT P*@ȼBvb]O$EpXxŻX),X~}P%@#1`d*@T P*@@%d Wp B~h,N~gy`_6 8Ts8=h@.QGW(:+?`ECeV5BrQ~pHnT P*@T +P `l(ȘN'߆#6!$^H? 3cL˘ScHMhϻ̺PNyhtuL5.Wwk?Zü[`%4Zݢ{1AT P*@T P_,\+J .M57˒ c?+k1pi?J1 jX_{WlzM-5?~!2|u2CŇ%T P*@TYs s< RBfz<()Pm' c6t1ߣXYmmv9WW>B)c'Ow:.۠\Q*@T P*@JKRUni9L`]ARH3+^a36 m ٨6hvg?5V4)w [ m% njT P*@T Psd9,V/i{* r". e{/XZ a~\",`yZG8f {рl5CT P*@(M$ڢ شZ[ !A;JwY:*E9rΑ۩T P*@ Txx[.&,03VF偅h8o$ X5k^.;78@2O /Pgc%5F@ J=b3]onkkyubM P*@T P(UZRԘ=gSpr8rk"O]}ʕe ǖ8 Page-1 Rounded rectangle ATM switch name: control_exchange (type: topic) Sheet.3 Sheet.4 Sheet.5 Sheet.6 Sheet.7 Sheet.8 name: control_exchange(type: topic) Sheet.9 Rectangle Rectangle.10 Rectangle.11 Rectangle.12 Rectangle.13 Rectangle.14 Rectangle.15 Sheet.17 Rectangle Rectangle.10 Rectangle.11 Rectangle.12 Rectangle.13 Rectangle.14 Rectangle.15 Sheet.25 Sheet.26 key: topic key: topic Sheet.27 key: topic.host key: topic.host Sheet.28 Rectangle Topic Consumer Topic Consumer Rectangle.30 Topic Consumer Topic Consumer Sheet.31 Sheet.32 Sheet.33 Rectangle.34 Rectangle.35 Direct Publisher DirectPublisher Sheet.36 Worker (e.g. compute) Worker(e.g. compute) ATM switch.37 name: msg_id (type: direct) Sheet.38 Sheet.39 Sheet.40 Sheet.41 Sheet.42 Sheet.43 name: msg_id(type: direct) Sheet.44 Rectangle Rectangle.10 Rectangle.11 Rectangle.12 Rectangle.13 Rectangle.14 Rectangle.15 Sheet.52 key: msg_id key: msg_id Sheet.53 Sheet.54 Rectangle.57 Rectangle.56 Direct Consumer DirectConsumer Sheet.57 Invoker (e.g. api) Invoker(e.g. api) Rectangle.55 Topic Publisher Topic Publisher Sheet.59 Sheet.60 Sheet.61 RabbitMQ Node RabbitMQ Node Sheet.62 Sheet.64 rpc.call (topic.host) rpc.call(topic.host) Sheet.63 Sheet.66 Sheet.67 Sheet.68 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/images/rpc/flow2.png0000664000175000017500000007367200000000000020732 0ustar00zuulzuul00000000000000PNG  IHDRWcsRGBgAMA a cHRMz&u0`:pQ< pHYs&?w#IDATx^ $E!CfoA@<("Ȣ0(=,.l31 #8 ( "*SdVFUeUETu]y9OƲj|PPPPPPPPPPPPPPPPPPPPPPPP`p ,v۩fϞ}Zkuc6 `fͺqbbҩ}+(((+ h=rbo1cpЋyq'.3o64l ?8 3>?%l-~~|xu{Ofzq(((c y|!/`eClF;X+ˆX, dfMܿWF c60~6pg*}xkfdۢQp@@@U9*oxv m݀GY:wqΜq-m3@@@3g/y٢ݿx^lmn)6xӥ8 h*~]ț?n<;6 ܳb-|ێfK] d̙kj_+v>_z /3w]6EV!T2o[[_7wW\3MiPJa^[džM45C`["MNmFܒݖoo]hGIPP 3"?&pU2^U̞=mHHADǵ)Ǖ|%l`«ޓ@.(0j 8@6 `M;P/`epD`q1@+6帒 `@u`q1@$`q8l`6p+[bK.B_>eWDw6K<;Ц`؁>r\ P Xe5b{>Q۬Yw޹6;Xtݍ6ڨX*n`ATz7lT7޸2lb酗<;AvM; } @ Š[3޶N*N9"+V&9sԦS#8XhQTZ%dMe˖Ew/tg}.Dh!f# =>D(xCPIͰOWl5 =>D(,3(:mk,a"gm"o@Woh!&l(Xvn`d*SNCB @& G |;X6fqP`XvPOA-XW0|l(XvZϞ`+5ڳE |i^m`siTΑأYRn!.ndV}_p@w0ox Ālˠ @ 0o9~vizɔbw(M`;NA>~i#y Mj`1j (Q?tC1 Q"# {{[+[AW`, _σ*|~jš{rקl,pѯ|-X6نp8{=-|t7cGTZ}ݏC0T!v8ݰ*HP<,uX #rm 0Hi*ES} VitLii=,<]y8*$}g6ƶX4}.Xi9vݔS|'6nU;@`w4zv  6]lٸ:`:;zQ) Kl0u+.UX%iS6_{hR3,X`'EءLg,s[ه5xVj ֑lj4UZyO|譥O-i >5ԒO VJE`>Nv@!r]/G寫y]\+SVOՌ,6DN,XF 2gDOڎcT 5gt`l`V`ʩ}3U.#f*^XEPe~MGbYlp/KZ >`dׇK< [_:]" l]\w;gyYl1yl)lhXucRk)  *{3ܾmU* .mvetF`#}#P  o,60@`a1ljeC4f-a @gm0|Ɇy_Y__Ua=XϏ8relv8߉-i މ(#ƪO]NX`bor;Ϻ*VXQ/̙SN 8bѢEQihM6)-[^u zCGߨ}*)\ݡGXk?y+vgT+Cl6/n[~<#?O[m3=度iS66v"O}vD`31 ;>u=;`؁l҆d6&NYD`g}WݾG֛= y}F.^Hw^*kƆz^5 9`}ē Mz+ !ύ!{ݒ73fhu=jm 1֡`N1u뢬ZNb\ډޯ둼Z7Thk]8QH$j `OVTl5~Xv /)nA \\sbM7?ˮX 2uescu[cWv*j==-|* ^}i+ՑM܂{s&ٲ`pb= XtMSN)f͚:xU' `XV, `Zu w/mU6JuȡizI|_]wÖS?:`&s-?ox+_9 4-GpL, ڄwv{n: ejaw(惮޹уtLinU_oDQ^_5-?-|[9²*d xfLquQl,.LaVVk;裋}{ŏ~MKt(:k;W\}]_LO lmY9&?un7Si xRZ㣎YJ ,|yj1;m:_u;SkJcmʧ!6>.2/7MDqmʍ\@FN̄]xw,?qvy9ldzK dI^u,H{&lD@A`؝X(4X] X]5A _@qܪ,~&Uy4 ;@PXo C w]3(~t?(N8UǪ;φn`XV)0[ 9}cfl_R׺[:=_wN5 8y= k9{a$?y[|[-Y5. -HK`ս7˲U.U9eY䶮 q-:\ Gݟ`XVAjۮ?_owvֳv߳[f~#i, ^6\VﶺI..mX$O!]+['[ [x;v$;IKZ~F\6`]x֬Pjos9f31,;j-iW2:;16&Mc,݊V۹#Jg+t g6ەIw[w>vu`;4l=j-r _ַg۴ϛ鷿t, ¶,Vhg ~Վ2q:ơ3[uuMOs4I6ѓ/6 qjIOwQp~s;NvVBu Ewu #29t v ^9`#ql{yu5,N??6Nôjqn*_~%}tܺ+?c!6Dޮue~[K^奁mMR3q`;؛okE.6+z;W}g:G, {ۺ6wtϷa=ٓ8 b|"1cF.vdIe][@VbӪ}86ŕFF&J`bwܾ/ ܟԧ7߼ws[v gS`o[o-.]W{zhh㪼ہ>;5fW݌>w:*>6t}0n~~T|h97Yy>bw.MnZ;׿6Eciv)lT&1nxcxWF{+/8*]e^9,ԁ`h`HiIoV݄} {&8뮻Z׼5nşgGyd_z.myyqt`_ [#f]M>[B\fH&%wp1wpM:Iu`6*@m6p0vHť`08䠃M]zqM&&JV3^[<.絢oiO<5.VewM9/n'ظT,+SB<*k]czΚv݇u}v `È߭8URe)Yzأr9:sTj{^Z?я[ne񖷼noۡlńoA,ASN| mM`؞V]dqf7Ty1]ݬdW`4~bw,ji:E`؎i'vbm`⁑Qn|[Zu+V(4_l%\2pXmFm`7Z1q`v~;wSq*]jbNj徏w1Vi3 ,>ZV3 c`-??}<i2gIfoWn۟r-a⧻gf`w~_nyI4ֺ[V{}?я ->/_PT>C5\Ӛ-,5՚_-|H`q¿wSC>lt3}KMȿk~gdK'm#n}®7tf# ft/v[-zscƦfaB(5X,˷*mlo`WU/V3^tgr$C'>Qlu!=_M^xÑG7Z\V]xӟ]vYV[{xxG_mV|K_j;ëM` 6on2`iy`Ѥˇ-O E[ [@:}mKj4$t7usbx zEakF9uM'`<a-[FcXΗ&)m=Z:lc^׬FH?m s=7`?ϊw-G>҂^7{ō7ؚL (E~tS}AD`}o+v;mfޝ-?uX]}n1񲚨hǝvx٘u`ΟJ1B:V5+sY8d}v!>؊r!Ů/fʕ+[}kl"cx=}_p*?|i]vݭ}㗶)ʩ:"׹گs۵_O۵_ ԔʢoS:uR2~,04- k`6b|MdZ |/6hbxn:[r@;Z6 `52:6LGfwON27*vi? MOj!7Dl}ۏ WL;QA F-iw4 @:fI+[WS׳4VVQY<5+_K n{ylt; FKE¾&=rɋݶ,mE'fo3`:viy:hV_vQIEF{ꢐݼC2wⷯ=-;8̍ύu[o-^W/]sU^VVbeV}of ifWu[3+2f{!ROG>kp((RTZT p-@Wf8.BoOgye SNFlT$QbwUGqr>9+M|x57UvBO}W<د녺oS,,;lE{f3gƝF`Ս8t" .lu"<#ٍMu'7`Bܐ558֚+/d ^ Bu{EOSVSN9xH O(:vZ˖-ku+^hQUXU:E^SX?~KwQHgPhm|G7?ܰp6lsvFlT$D6 t@OL|;GL$, d.jfb-7)JNU׬XM5]-RuוºunVd_6m/6[o?>;;wnZU)f~>ǭ 1U&cy i5]ֈ tnl8fVleyB>jRzXur:ֆ>sXո;Vڰe0)ϘtLdlWI?mMk9_\wKtI'x`5{Y)Q*žzVf$ֶ/(]}n<7tbDhS2k[+8 x ǜ*u-MdyҔN_WQ\g]O׳Q_>JdTMR nhHԲ9ro[D '|r曯 o^Ʀn]sܶk 2<lOy`&Nr`vmw `nbqد~n@ws7Io|cq}Cnp X߾Jt6VE[n&J6R+-IcqT;G"oݷ2/cݖ_x-) 2:ͷgfVa7Q ֻzm.== UV?(x`uYlf42`nߟ uS Xt~O8a.7u\<\o\lRnO&UeA0Vp&4QS]v#A㨕l~cٸ~\'25 ZgncnW]y{c]S\W`Ea7sT= 4r a57֕=5=96S+2NX=~Գ;I .ݒRU`$V0qT9yw6??:BtڧS0ky:}]YMj]>lpۉS^+_Jk)+~-=(~mwn|yrJCه)uh5~]_?k6lzl DIiTR'lt; FKEBWXBlQXQi`} yj_Z 뀳 >R^{)Y`m?]ϋ\4nWp@kb[dϫ9ն&X[sun8[ Q>Ѿ- k8'IPl"6X &(*Hֵp AuÞA6|بνVպoU]vokp*nU Vwviuk_Z+n,PPWU Ѯ%ڼ$Dj+dQbJԠ]Vk]` f{6;1لu`5A&qRv1JǻtT5Qԗ_k'ԧ*elu댓-l=?;J^6*`Җu 2,4е_E`mlٌ51P9k]٦b +n//|akYR?<ȖEQ_ᆱj&6ؠַ؛nmw AZ+Z{7^{m/}>WlF;HG]{Zcy]UohF|;M(P$Zw`fIlV&`R:jZ:LON4udPY-}Cn=\w`EJ7qcZ"׺I5לؽ]w^qnҪf0~'":GalK` LTomm&G`󵁑XE誺m&`עekhׅ؟MX{[ɚ`zs=*vw^zn?U]˺ھ<;ZGV3 ˶Tv酗T.ޠQ'|;M(kYl,Yv=m׭^"^Lbtl0[;TMU `b+UXiok.g} `5V٪m]Y@+zЋ^RVv1c8mW\}ݦz)l49 4 vr> ;y>~,"\=8j&}[u.Nq9裋]mXgDQU۠Wo^w->ymvs6s95|;M(kB8.J=l}[dAt>v[kVagQ?kt^wZ=uZ_lٲnÚIsλ4ŽM7%;Qxvp7WB))aS`؂lO(9^wuŖ믿J`8@aZ{v밺 enmM'yҾ7߼&W׿^̝95{~vk__r1Tw5Io}G݂rb"\NFYWO6>åNMBVllPޙZL t\7uccWz3ֈdKW@VݎMTvL{5״R\v~g5ɷ*`ϟl"uC9>4WݹM/^?);U|A]Sk]WnV@V/| {\p \WGث;j7 lZ1ȺkѮ- k`D|'`Bzg `o𗽬x/1~Ӌ\%X8)I&\vL{UW~{ M>Yp-6`"! 1 ;.`iMZFon*馛9._ܚj;5i-^vemӆy`Fl_ZݘΗֹ6ڨ5C0˺ASN| vhHblciG-e%aݠz;\xqq׷V2}+635U*iVmt\u39G AgU>+U w +l:uhע pW;-[ &}BFmCyM %m\D`_Yubu7I@v]7~Oo~X`떷{k2'U>E{]K`nD`Qqnɛ9s>N*e={[ D*2ΜnY^O?[O84/u?uMU/:4 >c\g`~O,t奁mMB0۝3K 4vaf![5<Κ՚xG|}nkwL `?϶Ƶv1nA`_M'yl .q>m lMQœ6U%o = `\N`sW]uUkv[^gZ `/B0/~>Gz6 2+<.J Mnʌն9e y.!S Fba:1c,[G]h X7qYkPsNum^}]mx`q콐7`N67xwxXY87am'a4[Mx FA>[o}&umOkNjcER;ݎ=BKLklq. cy4lQ\!Tk&rР튫+ g>Ph9q(*pXZ|WFoZvivQ]`t,5 ,&pZ؄+'â@;XM̤H⨣j-6E]T\q_nZgT0`X6C!"' ߼kEqWn*nFڟsF-6@%ՏRPu|`${Vk= 1C:3U,;Gw=*jYV阺+w?VaEzu[´lOB WNE`dN4jN;q{_~yk[ti{NfK/${L͙dyXkHYUn~b~}JouUzKc)w/`|Q*o,ls_Fmb#"߀O|-/~X'!؄+'âl6wy&`Rbm/vK̝;zYZ)7ڔ` |rlتqTE[ `n:n`X6`Um V0 fړa}' :0v, &dx۴ H ƥPFO`kK^V;m.X;[3'SPP`krlFS鸓Fb;GFnn;.^]uםUX6__̙36qE'&˖-۽SuZ?g4Nfp۩lm5XـN͝7w{{'C?v|p_4>˝``ky׊kmQo^xI60RDKx\qK_(S"1}ф^Ko`pnPAV GyvAk9. r)lQ[\yُ&J> ҅XA} ]k`O]sT\'| ,׺wڅ`8Ku $N` j0#@V [#$cepgt^x*?k)NAVٹo}ؾ `>Uÿ0ϪkuU~G>ƏVՃPVŮ>l&`~`ؑmHnևW]u /8GfIoM&dzk-2}n TemP; /mU`Ď۠ EkX#mZٷq5*.ku҅Xl۷ozƿײzu5xViUvl~`5  d +m/k]15 NP +bM*  2 |4ТzR??5>)x;Tf+lS:ǠJe+Ҕu!6k*]̵bGAu>CmοgrڏUꃿiaew; CgI6g*-Pr>XFw&zԤ|mIaQCȮe=бAaYuOv_M i 2gG=IOŽiT`k]2'aΠz;ԢeN5дb}K;ԶVUCiPH(E VW~i`)q>{`17y}ز/|t;~({MُC˗.kFjPaztY]~|,M>r/ ZwRWo3`bB`AߘAY;X ZUECMFۢcPU^NʉkʁPՏd???mwV^Н-(, 8$XlDmMLv=n,nXo`^?vZ U >M}*'a4[6 E-c%"a~4vzviҮZN" Eu~8 l *Ch?xCbC?~@<o?l?`ӪK&x^#@ ?.Fp.l\8~ +ˢ\-I_m4c[5 %5'F:f~:'x`˟:2wu{X{&h#P$v*1[`XcѶ^#l6&,_hCX4~7ܘ?35e͵? v_VkUVUׅαz`.V^~7eHit`؄|Q)J_"6PSIMh?HcN"zYuQ׻.ģ`~ZW? ҇il=аQjf1,kFlLmx{W~LVk5y9az+[/@?tB` (E; t`gRu@~-nz s{z6Yc˺c`;pO#ilT$PQ:"XcT4L>X6m&Ig `.g^1#c7=| g' ׉gج{ {Όq q:*N]e2rf]H3RX6y&U `Vl΁Nz =xgY)jȩ]STf^YI:̓W`b`ؑ&R>܍Clb~s+ 10w 3:ͨ2(* WA6\k` |Aq|^>lۋwnz~wL o6p j36(0dXc<.q8RC]`5qCn.l$,{0R Y2dCy @ pZ3ywu4R,Q[|H[.-ǁv6mҿ 6;suBJ(hk*}MS~GP̎#_H.^XlYԶ_|qmڳ:M~^{V7p3ΈJީQ:-Oq&>{כ=|ZZX)56 H)0{9w֥su|Ӡ n酗g[m>s7 oYN~…SJ哷*vQ7|tYW\}]6",P<`.6n֦Ȭ"8:ǠN\EJu҇cG혎oWޚ!tct]+gg]}6&Tcڴ . я QP(8 paw[1HU_ȭR4-s$ʣtSl׷%G4lYذˮҰ۱`1P*EU}ؕ"ԇ)lyX޺2PZֆݬ꒺l`%߄]AhP]Ɯ[ !$[ Sӵ+U?6 p elt loB6ߺ +>~2g #uk -16 btk?ln'm#֋z3尽]g`|]%Wt>Nq* g!dʢÏj Nu S12 %+SԺmCz ]`]&vެYn9s iPu||o};w-a&qH>eXyjMo$Q~e'n~7L!iL??S31TY{yfMh=?{beh  GZ+-;،+/k\""-'3)1GflE;met옮<:gxGX~2:ue}O)_Rs k7ۏ@gGyT}6U6FgѩI ,s&(M4%RS57XBiCgvX6U&ӧrp^t `5X{Lo'6Q`<6ɰh,J0Y8u 짳ډvkAup|կInQ fe}vnLԢ3*Iܰ;-{T6]콡rW/ ܦ )hdz fQ`A6ɰh,;AV`Թq퇓XgSe*sqֺǺ]m4'Ͷ=sַƆzaxn\DMΦ6a^mMe`݆6cߧ6Bg#ȹ+&\l•aXvHK?:ol8qA'󗪒f7F-YgYb7}ʹU+ʆTMtoTpv #ȹ+&\l•aXESsG<6X89} d2-k" #Uaڰ|׆ȾVm06C"@`oO=Ɂ, @lecam~7\ְpWl8N $Y}r 4 C`gP؄MMr2, /}:w6 -Yڃut* ' `OD`Iʺ{? fqPdl)w'8l 2:u<:#}$q>+E^Vllb#wjkα ,kYT\U?) ֵíRqf@3 ;p/kEz%K k@je4ialB y~fkww6ͱZ~~gX:P5.3lxzd*{^tj@l֡GR`>K 4elĵ^UKTuNUxS?i:N֕]^M^M`.3|PW^}ΟG`Xl 1nƃZW9MA `)6Z*(b"(0& , ZiFiU_k.F{QlT$`@), `@6F;YlT$`@)p*2<%¹)6 h5 3!f(0> 8rt. - QPPS=(Nl`X6FlT$D@@qo {u5l -`] 6Z* 6 h(3\яϷT.iE/&Q6 m=- Q _滢a(Р,r2e~lnhH*[wեNMBV@;@i*Y/ 96-gm-+&\yl•aX5 dΎ7em#ȹ+&\l•aX`lh ="l5&\9 `q\v\^BC4 ="l5&\9 `Xm#ȹ+&\l•aXǵa5' nlE]6` , `@6fQ`A6ɰh,ڰJa6F{D)I`6ɰh, 6 4llGtK9/:5 QZ6aC~Ghy)8 ;G(?Olw`.3|PW^3?ُ, 6 4ll#FKEXLDqmq%z{ 0w`(6Z* `Xla`,6Z* X )0s:޶nܣ(H 6 tclwFKEr'P PF''sl(6}`"! x 86 `MbRPPXLe>bRPPX>8MEpȇh 6 .- Q _&\ȷT.:86 flt[FKEBWz 4LyIl _`h6Z*@ %OX6_G'lnhH*[w)\)Slh5 &l"l•aъ ܗ":86 fltS FKE㓨lkة`qSs)6 kl[tK9#:5 QZ6a` ,R:6 4llE]6` 8 ;DQw]S6fQ`A6ɰh, 6 4llE]6` 8 ;MEpȇh 6 zD9w؄kMr2, ,6 ` GDsWM؄+'â,kÎ+Q|fuה zD9w؄kMr2, ,6 ` GDsWM؄+'â,kÎkS! klG4/:% Ql&\9 `Xmr)A^`{UOUA|P XǵaǕYQ3ꎺkhe b(N`Xl6`"aD`1Xǵaǵ 6ڋ`"! H`Xl6`"! H1f\ۖߍװG)uGa]RF}(c۽v6 jlFKEB@@`qqlhh(((e.6 h(((qm*C>D|mv1hH*0ኾ4SrHS(Sw6 m=- Q _滢a(Р,pj0&|mnhH*[wD|mvhHb(g3gm9lmvhHb(R`S7Al.hHX TB@QN<$6 - QPPSQɧl 5`] 6Z* 8ԜF`@ 6FlT$D@@el`lFKEBW W->%G4 1Q6;F(]AhPv4FR6`"! oQ`qzSpz)v >- Q _|.ŒbQ&v4FR6%`"aVL*2-w`qzSpz)v 4- kXOR iK`j zlvEKEB6_`K,ч%4Rp) ]4 M؄+'â, b6а zD9w؄kMr2, 6^ >`3(r pdX4`Xl6C"p  WNE`X׆T"@h$60<`3(r pdX4`Xl6C"p  WNE`X׆W^Ëz=ڧblE]6` , `@6F{D)I`6ɰh,ڰJrglG¥ozUU>?mnk6_e*`Xla`=a]W巶_9^MF[ SJt6^Qga=E3yw5^ ѮN}YK/C5}((0: spF@@HRzhglM=B@zJvW60j6 ((0,Nw6ω"B\#?XQsl PbP *Zt=lQv yrk\ P`$`qǴ~il`x6&Ѵm\P= ˂4>v r@UVSq{!og%#o\Š|reg`d=6 # ݊~j+eiTlT `OFouG6|9h>֥€X?_kܣmRz][ūAH%+zhgl*!U]TS;Ǣa[j+ >762,)_23.4J wa _ayv^av{q spFرs-kA,ł$,jc>~1x<}P1cǮaU2E t=p/{X(塏;Em-=O *@`lMo,j(U~da@kUBǂ/CJgk t ?+Շb[&M+)ڋ@5XZʻW;eׂؕ2\nVtp̬e%%;M7K$@is |~tR Ee_0M@^CVŁ5;F}xZ^f蛆KD3ކ<-~WfS"6Gvx6•QP9z8h `fl3msRգ/ z*Wt{6uXa6FL " @[XQsl ct(Yz >ۯway>`T93LKul͓G)fMLu8|sl`Tl)A@@9s؇pGŁ>el`x6S"@@5?%Çh `b8H>((((7pЋl yZkpZ+2F@@@[oJ8|60*6pu?eE@&ݭ a6н lrB[@@Q@ XUwª@ M:R`a7C;lgm݅!P0A_t '?Y>>9&+[]rBVT־cؿ?]4?JcW3ohƬYg{@l̘1M Kdռl*(چލ=t(eЧ_d^,UWS1u_9:麖Fy?je4q5&_5 v{p޺s ݰl`\m@_gF-'[}nq*~oY ܉Q4Y) '?xE#vUH~A_re+*i #5:a7 `@6:qoC&&TO@h`%(0{Ծگsѷ҄ gst;u$P!K^,UXh'k%å `>zX(=9ݩY)p-}bl>rE3gu5fq r (h60>/zr@>o|_Wi /-;ς;~9]ߘon `~ G=P;׺ۋ?K#g6k֬/Bt@34qq*'dO@P)Pϕ#*>\Tn:CW Au!u` EqrĸWxœrk'քQr {VE&gVrW/u2<^~9{DRbi]oAu/Gk;"?ֺI H ÞN;v!w l wa޼ ַ f>5ȞXom `AZ(Pu~[7a_pv'u$Y{u=<&FS`bb6b.U@,clso>G8g-j"h+R >6螀6/ZY4պ6QAm,-fX$@(, tjz0zUVČՃ]"GSmgϞ}Q#6 `g[̚xx?k4p.{*Njd0:[ClKP镶]/&ݧV\6l?z䍍-ln6Ef=s:akq^ D +GFva@.6pGO͞Vrkۄ}(i'`0,CO ,rփ&<ձ0O|529ul FO͝q+PS(ؼl79y+pZ᭷vMǝTh{;=`3V6 `y#^{[{u~"wxϻ٢S~3zukw=SwͲj_y(( * :-vߓlh `@v6`%SuG2zϞMS6LWA5PPPPPFu޴~WON.a&/k=? G3=AJsf2|PPPPPP` N­Bi=miXtLZBB+L^`*AYݢ~(n;qjoaܮAꤓ:V9PPPPPd Mĺ{X^Gi5T ([*u<:VIaewv F5A@@@@h\XEa=40w/~RFR ؘA,oyZDW=2Vb(((((zeQH[UUs= NFi@Ri>eZ:++#ۃqr* @^ vq:WA}WMQ(F+#k{:Yqm:f+ʤtK*ZG]ں!D|UNXi2_YX=>Wi.~:LU#0QPPPPW `}5ˢ|u  ,iװ|>U3UZ2{SY>v~̤Nu?Wj`rZ^~ުVki.u _QPPPPPT*U,<c?!G] ȧcS+'^VkyYe,Y>e[$Q_O!l[}?B\hGM&&E@@@@@U0@3X#ief@,i08a@Vآ&V)ӲX,[qP76.jݮ1 t@ZӪe윲n"M>`}.ezuQ=\~t8N].' ehv5u1FԕE"{ʫ@cN@[ǫ`} jϱ"޷p|o: q(((((+PUi죁:+P궚 J8+Ֆ rݏEЯ: ӖMskPPPPPPU[5VҺѵnWuޕ]4xentK;QU7]vV_X+lB\C@@@@@(PXժbl/j5*j]ҲTU*w6҄P\UUePz®nY]lvi/Fa];yY]@PPPPPLv[Yl7pXc(\ZzKmUe͇*ѪL++ge5mMTf@Դ@418!DZM[іQ xPPPPPPQ.d>뎪og0vXAct MYwU,bە߇[rVu:Oԅ״y*+}+:gj̮3YQ5u:_*CcUzv6tW:+WF:ڪ4`UnHxkn((((((*Gہa\((((((c@UW1E@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@1Pߎő>7IENDB`././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/images/rpc/flow2.svg0000664000175000017500000005775000000000000020744 0ustar00zuulzuul00000000000000 Page-1 Rounded rectangle ATM switch name: control_exchange (type: topic) Sheet.3 Sheet.4 Sheet.5 Sheet.6 Sheet.7 Sheet.8 name: control_exchange(type: topic) Sheet.9 Rectangle Rectangle.10 Rectangle.11 Rectangle.12 Rectangle.13 Rectangle.14 Rectangle.15 Sheet.17 Rectangle Rectangle.10 Rectangle.11 Rectangle.12 Rectangle.13 Rectangle.14 Rectangle.15 Sheet.25 Sheet.26 key: topic key: topic Sheet.27 key: topic.host key: topic.host Sheet.28 Rectangle Topic Consumer Topic Consumer Rectangle.30 Topic Consumer Topic Consumer Sheet.31 Sheet.32 Sheet.33 Rectangle.34 Sheet.36 Worker (e.g. compute) Worker(e.g. compute) Rectangle.57 Sheet.57 Invoker (e.g. api) Invoker(e.g. api) Rectangle.55 Topic Publisher Topic Publisher Sheet.59 Sheet.61 RabbitMQ Node RabbitMQ Node Sheet.62 Sheet.63 rpc.cast(topic) rpc.cast(topic) Sheet.64 Sheet.65 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/images/rpc/hds_network.jpg0000664000175000017500000047404200000000000022220 0ustar00zuulzuul00000000000000JFIF& }!1AQa"q2#BR$3br %&'()*456789:CDEFGHIJSTUVWXYZcdefghijstuvwxyz w!1AQaq"2B #3Rbr $4%&'()*56789:CDEFGHIJSTUVWXYZcdefghijstuvwxyz ?( ( ( ( ( ( ( ( zt&;h~i,@AW!#Vr:-y$,eiU<)ibn9^xfLjebl m`e-_'aȴg_ 䝇" %v@&~%I-_'aȴg_ 䝇" %v@&~%I-_'aȴg_ 䝇" %v@&~%I-_'aȴg_ 䝇" %v@&~%I-_'aȴg_ 䝇" %v@&~%I-_'aȴg_ 䝇" %v@&~%I-_'aȴg_ 䝇" %v@AG<6v>A4i TwmkviO2D@֓U)AuޑL xU,h ( ( ( ( ( ( ( ( (:;PQrݳaϴ@P@P@P@P@P@P@P@ψKmo2dp_fR05+Rogy9.%

Hu6%( ( ( ( ( ( ( (; }=KKA<@ g2FI!`bGI$HUtt`22`H (P@P@P@P@P@P@P@P@P@?twfh ( ( ( ( ( ( ( cTG羛 c]Ŝ* pH .PϷS^]Hdw.zʊ9ڈ$j8TUQ P@P@|mSWO- QcjĿ|HWGUh7]">&Χk^4|I\/3p oԿeOڍ?>T1D]\jO<8LH>ܻ3d 'g??ƀuG oc@: F7QGGXvѣ#M,hT;?4ê?hS?N4)'g??ƀuG oc@: F7QGGXvѣ#M,hT;?4O<;H{ > KkCRG%b_ ?i|>%Үo/<xP_'>'Ŧ6/<;iZ}Z&KhBEM^t-Vaf@P@C2Ocg3 *PZ ( ( ( ( ( ( ( ( /7@P@P@P@P@P@P@P@x3qŦ~N{.^(h ( (c<(?e_RWe@?h߳-y'RW =<\[zn<~~jn#~|t;? %X~)|/Wx'Y,^?Vҵⵢ-3Ncr yAf} HGj@|E⟁Z޺,5OԵa]_:kgmqra!|/|D{^ׁe&<i'7èh!߈K]{ /[K ( (GԆVR^~e|<*e`,P=dpP@P@P@P@P@P@P@P@G ?n_ol?? ( ( ( ( ( ( U7z9$_d2*mQP ( ( 8cU%q@TP@(QgT@L+k7k[}?ο<_|,Ӽ9jZ| F֠ڌ\> 얌 y*iZhothJ_ڃr>s ]/W|;Vӣzvy,5-s:}j| Ft&EX c%o,fu)z4XYҧ>7Vf.Hf.٧ͱ~'XqozJG⹞㇌xڄvx/Zuɨ6M߇6Z7("w9j߲|_ӭ7[B=/h^֛akqhWQr?(?_eZ8( ( (>\4f$zcHQG#tǓ( ( ( ( ( ( ( ( (:;PQrݳaϴ@P@P@P@P@P@P@PI$NI 9%:%I&}+هֻ{|ҵjW\jy5ԓ4\KUW OeLE~x ? l~ҟڛq3f4!Nmy?>i+Ot&(Ou˄ kO}ڟέ~>o]'/l0|I-m ˛[X(1Lڇ UoڇwL|I4۟ٷ_f[#֓x5^u? ޅ tMڴIO!s_=x/)_4t?x߉5]}BYn5ψO^)*^j{krWG /_ğKe.Wt4OO $xVZ/x|[BDm5ojWng?1b|;ٯG~do؟׆#_:kF oR< 7ĭĞy—}-Wº=@++\ }@P@P@ܜk#<=A?@@P@P@P@P@P@P@P@P@~ۗ[}( ( ( ( ( ( (>_ ( (>s-#s~&g+'_)I<l( ( ( ( ('ŧcf "O 2~)x݀xUt%X0 *AE}@P@P@c? ( ( ( ( ( ( ( ( (~ܿl~P@P@P@P@P@P@P@P@P@:~|S:|,Dƾ0'4k&uq|;oe7aĶ,~𭦛amSPkKx6M7|6?‘ܧ| CKƥuiCfw6O΢`MGT|ۇڀ ( (=CP@P@P@P@P@P@P@P@P((ٿg ( ( ( ( ( ( (( ( ( ( 纚;{himv=x'Tb&?(?V4z?c[Ytpo>|#xȎɫeOWPmj:6O7^f\fʸu5yTq8TTxڰ=qOԹ?]%2/\*siR ڒhTm?kaSlR뫛Sxq"?hu7/:g/~|=a}OX^]%OBM kf\3Cp_Q8̧dGQc%NR|5[񟈜'ḳ8TZYF*pr%v?/( [_aOQY~ntx6Wzwic`6mKxm%ZƵWǂpWΕ)R1ymkiШeB1:>*|F{Wk=׉Z/菉.U^nѣ7|e*~ !2#E3JtkQ֠u(_$1#/gˊjptxmdX,cK9fþ\/ZtF5~F~ˏ1|+G)񌸟 (eCsl6U,R}^T*֖"8ueN_r/v??$/G.{Pմ [S[GY-u=L-VWW ]]գX2ε8r^**uƥ9&ԡ5(f֫fmb0c_ ^p:5`M8ԧ(-4iRg ?/hxW}ſ io>t6'K2ؽḴI',27_iٻu1<;^eUT[sԌ%*\ɭ2OH(] 5_p?$y?Z!?_yH_EuZ]x'/? Qf;jB.?m ( ( wPP@P@P@P@P@P@P@P@P@?twfh ( ( ( ( ( ( ( ( (/Lj|!g> Px>>/xiR4_Z|&BYs=#<*Pz/iwxM?ߵ;O+yeo:rh%Zſ|uhAk7OBcø(VWT9,3Zқ?d*`v&g͕GȅP@U|?k['_ߋ8yq勏x-#V{Zya'GeۍxO"RyXY6<]:ػ-pxwW;mQn>!PY siGن-Pyl>߳=oWIjֵi㿊.JA񖧧vzN8l,p8LO|- *x.'8:r)C2T"_~~$?؟ğ'U?' ? (CK?'A7LVx~W@P@P@c? ( ( ( ( ( ( ( ( (~ܿl~P@P@P@P@P@P@P@P@P@fwH}OkT&"xTXt'xH䍊igpYϫu0Teu&ۻJeLj'c?~.o^Q֥ѧF4hT t"٣EgX>.%'ZU}C\5j>#$yơxI$Ha݄+~XF8rH:9oN{upg*iٸ:i̺}UOr#.$i#N.KP -ԍqek;c 5jLMӆ|<9 0[MHmBfR}ԟ'Gz cڟW7SOB]5%+ƣA_i.&hNI:X )+s/gbYPTqr\<`. 9&rU<ҭlNK~Ra:WhG,EJQcIX+NWC<]k G`?׏AC4v2Mj^ 4ۣ_:\NsfKPn^qbK3_TZ<" u*jX'chԚ_Z0><9n妱O5ddzVyoet&I;U?i_ЋT\E&gVn^VVxw 3xWja(a ړ(ߊ?T_[N_ ់ux? <1uwB|Sm'k_ x~{Ҡ8n.{Csu;#<4|&[NesLaUq)bkVڕYE7[݌Oc9sO<:St2e &*hrQԪ_L]w(EZ/Fa4 jZ5K pxp#ĻCZ(?':ui4۔*Ir=|M/b2x~K[ ,|R]Jxc1Ǥ\ NsC8>ibJMVkV3?v[_ Ujy&snU {P\oE.o>-"k?W?eo>&h4zC:އy״9,-u]?^Q{b+ʳn+^'"~CWi`Ij֎!PuaJ.Jp' p_yqp|&iᇥWpΜbg(?iCPaڟQh.6|!|1|H~ki{M6 yuy#Ӯ]YoH"~kQ610R \ƋjcKjrN3riÎ4YE8 ҃1 Rx)USJua]I46sB.d3Ə9|ItPeGt7pUqhP@P@P|7ÿ=B ( ( ( ( ( ( ( ( /7@P@P@P@P@P@P@P@|@P@P@x_c&glٝ?G_SZ%ɸȞ+<9JF(>_~n_>"%KPe.d>Ex2-RYa7Iҕ&{e xļY|bŨ}y`50񎒌}S}lV3QRP4pY!g>OMKQ9ٯ1UY~ڞIv*Pr/ (CK?'A?GO3|wY!_P@P@z;o( ( ( ( ( ( ( ( (:;PQrݳaϴ@P@P@P@P@P@P@PP@P@Pgu [F?^tO#5kxw՜~VGqeYO52+$nRDx#kBl5z8=YaӯB98TZJUiYt2Z$֨p،.B(R̮xK4*X+sXiԖ Fus獾\U|F73eʤ8dyگ[ IZ*.tX\e]n͟?_Qosr k.KQ$dYh~oT8+^iJ^ڽ_N8M6nkk=~ *0-|6URv<6S ;|gWׁ.#A/1ěk]>MǕ&ߴ&`Upk:5捿II4-q9+s^anIM9eK^៬Eqo4 >>M |M x^92@5oqRl\d,"$y4ѦiR N3KҌeG!Ǹ2ViM;:M _4OJf',Cҿi_wֿ i޵t|=1' ;Lo5FykQxW9C^7î#7ZPcÕJPp_.SRIʖQYǤӺugJ5ʕG>qS/֩x}/_?êT㇥^ߝs?DvM|?J莮z ~KT#_.֑xXy, \;Y;Nu(ԧ40+&-/dy]7_q#f(˱4j{B6Xݧ KC޵s $f<{q! A9[^Vj!fpm EIGe(xC01V,nVm{-)][ލ즢G+Ŭ,*Ng'8 [.FiKckÕKk?K(PH?ߵ%:XC\|ؐ|+c,|콯דrZ^ ׵?SgA;כ2t~-qZΪ|ևNECHZC:Io:֧ Z#+?SF7${$PIϊ02MI77xdjE6x> ӌ+eHmibbҷ$`ob%~ٚƍoq4Kx\@x#D,cwn7Gi$X(o-*Y=!*Ғ(2Y!X&Ѷұ/:A>.u1=9ww"Ƈm|Kv-eE^[^ 墧qUh|FZ*SUayDg:,^`:VR'(IsJ|\G֦T t+F|VG_mJZtjcp)ԍ5 u}innn~yioiBK,3<;3f%5c1Qs<1RQVmJ1KD%Z# )Os93)MR'w)Im5!_?׿huG$?2_&o֫O|%:?=X;Pբ__AZmţn$^+ /u4θ5c1N4(g[N,. Rժ:tYWVԬ׌02py]`3MNXWeYl5lv/FhvV188_fzW3?h|)|_xwF?OW5ox;֚$Ƨcgt˩H,M㻉{𙤸G6rʸ~'g8hRӡ|4iԛ"ڌJ~QN- q^IvI=ȳ0،~Tbysb+R7) ~jigHP@߂Nw㵲?g%AZ%Z|[bvr7S8S'.כGRnUhNU$y5y6{9O>hɳM$uq'~-ۗ7ﶛǞ]_Y3es뼿Ů/'}Y+_kY/"/!6ږs Sd~Pu!OarL ;\|F|wF.zz3|QI)8.+8qjGy+bgYݚP@Z?O'L2.wH|/s[۾r$P` (CK?'AGO3|wY!_P@P@z;o( ( ( ( ( ( ( ( (:;PQrݳaϴ@P@P@P@P@P@P@PP@P@P@Pg0nீR_mFi`u?{4v |S[':|%sR%*y֞ ;xCH>.fc`r\괱S,4JG#Tl;v???Z׏_(m;S}cxZfϧ0XZ+[ik,QM'p~oY0thsU8<'C)G*4/k$͇5[/f|=A>.|P晎#r{UZ88BY we㋨E7__$ /x^W5)lSce#ڌj.}kNJ,$M)*Ͳ]n :UMN: SMJj(ե4Rioy/&02T:lM9Sj5ika(b)9ѯIƭΜ'@P@P@__~1x@qs >Qԯ$#t8kckGSM좞Qx`._ӄVYNOݧN U*)1~Qcrl'2̱,6 JUkTi pWZ(ѦVN2T};ZG ]iW){em-m!.&)4MfI6&R%ŴѧxCB'ZJ?[f U\^55iS&&Ux ^#8,-ΌJp2lAN>짇Bt*UR(I/ (CK?'AGO3|wY!_P@P@z;o( ( ( ( ( ( ( ( (:;PQrݳaϴ@P@P@P@P@P@P@PP@P@P@P37Ѿ# C'h&N,E ZFZy=X7ǯmÿ|"CZj  }E ߋC㏇1_[FŇx)F3eЍWC+TpaeK1}pa:Tɇ3ow1مKya[|2d}EZdq$vh guO4{?üPG>'vs*[ Ε̖~>5fėp\hַͭM,De]f!v3 p-\_⧉3cJ%Jɜ 7ax짏-2w=MhTyz.q0Y!8Aѿ?tV?E fuE?$׈l {Y$O.] 0|MnV{&{$6H~)8#\lwT8^,$׸xFm42|$|S~WYlV+Ԋt!O5RTp KV`o{Yü_>|L<\-,˙aynE|u2 qe$SZOutfi.5W3~%hYa&.VtR_J_koCx)+Mo<-?ڠ/71vYď yjQG&j\E{qz2Gg (q6udhp ƥHbsm?5L^adpM:J5#4ٟ )9SƼwCvTj`w.h1Ʀ&VR^SOR|q _ c_ aχ+[oo%[ya~b6=XɸVxO UTs/sN4КFG&ԳNxlOg^vg8.pdxn"E 8EX*qX:Xfx= jթ'S)-z3gl<̍ڸL-LGaO x/KӥPPXSNQlUzp~6chRi^mV!Zq{j1W۩I|r^"+/5#_gS}?Se/E_W'_k/0Y8/o*|Eu؜OB?`'M|Oy濵G}ߋ_O/uOCe/|e֧cHOMy7I6k]GYda֬NQKxW =hgYWa*zM, ʫ<Ò,jǚWIQHq<8,6Ừ [Wa3s.TliJ+´״-F8ptULbH< (] 5_p?$y?Z!?_yH_EuZ]x'/? Qf;jB.?m ( ( wPP@P@P@P@P@P@P@P@P@?twfh ( ( ( ( ( ( ( ( ( ( )e=G }|<״O _!|IxA_M_@, kMuV#WGsOgPG MrkVR8bUZ0SF|+˚WeO<_%*74?>%\8/Lf/[z?վ'GįY2_x7c5?߈?>%? S6mYx~~ѿ?>Gb~Ϟ",i|>kk>56U/w6&x[,xZu]8y19a3<#G*j8%%Jt(R'9υm_x\[V/(hd?z_W?~EV7P).Gg_ˤ77Iies;' +9ekgVʣ^>8e{HRt骭|.soGj}"8:V=eR<~igujʂƩӜY!!c^*6(5;_[k1kkk1_GE%z^v2܁0zpN 3𗎒N9aC.JtE:N8[K+ŴXx<2CNhbx\*񪦪\O}x;*Y/><ؿm9߁hq' RhiqE߿O}x;*>#݋?/~طa+P_|MW.ۤ6[Cnã}sUTC\-m" BÅkxUdV8iae_$΃efsjq/Þ+'s8rf*us2ʽ4a8fS*{?*xFNgma㨿k??*:@.Ug %WY0R~9K>(?W<^˓NK[C!ҏћGxgO}(*{^o{s_S/oPwqt>,iܗ Ul:1 :vQ^ ; m<o[C^>5}#/m]nXjf<t?}rvײ?? `_Hh W)'/ŏ?A?m3~O֫~O'A@Qu#{և_4O*?oZˏB ( (=CP@P@P@P@P@P@P@P@P((ٿg ( ( ( ( ( ( (( ( ( (_ })'oxFok~\bx~ZJzeݽ'JTxw%bu(b9U:1oԄ}=̟CQ,sGϨ2y'C4o ,%B8Tk=?^"W Y^$c׾ |M\>*MR|Bu5.&N/,<^y+c x*^9I<'21Kr{5Ia̫V/YXXJx)Wڹ7'Soocoq?9)8k 7Ao7 ]Qn'^</! EGs_hIe d~#ꏫq?9)p_/;A濵&/Um|mcO)yn\lUx,nt 8ZǼ,~%,Tiq~?/`VU[C0 ]4⽯u9GUu-guK 3G2rdI ]Vq9aKrLbH< (] 5_p?$y?Z!Uaa}Yiz]ޥWvvam5oieein\]]\IG$"E3>HR՜)ӧ TR!NNS&F)RJ)6HZ*SF֫8RJ%RZ$ t9MFrI6?'G@w:|Wez֞#բo.֩c(:|Ɇzl54x:'[ޥ*ޱrsN5ڱt4ӯB[n "rc7 Me,9)a%3XTqupӍL4+K]/It{EѼ1XAoloIEM4`4DHUUUUF.\N*\N&Vz'VjwJ*9NsRmapj, 8l.:z`aJQ:tEE-%btP@P@z;o( ( ( ( ( ( ( ( (:;PQrݳaϴ@P@P@P@P@P@P@PP@P@P@P 7ot-K > a{[zGy=Ŧ躍ŋYj}/D~jk(gf62us,5W8W:\e*RN8/ΕHg-nCY}<5lnMdx-,d*ԭCJ 5eMSJMm4|[5MsSuk 5bTյmSGPMOPQo#Mu{}{u47ww2qsq,M#xWJ:th҄)RK:tӊ)ӄ2BJ0RbI$qWzjV^έjժέZgJu*Ԝ:'')ɹI(#{ EvOpPĵFF?@*HQ<*?CѥO? =`7_k pPĵki|§ u㯊&~'G˟xw#|.&7Yx&Ǻ-F[xίjMma5ryOE?o{Y?^ &:xēдNa4-Z~oa7x#.;k214)/bWaSXZEJ)RRGd5T?f]Зq/eeʟ5j򋥆^)~~L/>02j5Fo-WW­ # Dy]ГĚų܃>j2Φ UiTLR hSXh FJ6ZyTUg qNc]S8T{WjX8ڥ 7%NT匝9t(Ֆ?r,#I$M~V~G@P@P@ 3pP ( ( ( ( ( ( ( (?AGv>P@P@P@P@P@P@P@/@P@P@P@Nj>|(k?> |ih7g/kKXS[/oapW-Ś ob?: WI0Q+)@ 7ѱ~͟c?Ͽ@D ? …ؿf1CGRgx??n ??Bl_g_j)3N+מ+b5y}#Z"NHF9UO!I1bX[˰x\?`pj^ҤT(B*~ҭI՟,5IrmsA@P@P@P|7ÿ=B ( ( ( ( ( ( ( ( /7@P@P@P@P@P@P@P@|@P@P@P@P@P@P@P@P@P@c? ( ( ( ( ( ( ( ( (~ܿl~P@P@P@P@P@P@P@P@P@P@P@P@P@P@P@P@z;o( ( ( ( ( ( ( ( (:;PQrݳaϴ@P@P@P@P@P@P@PP@P@P@P@P@P@> xᾑ~4&m#1O{cgQI/owbg}@@P@P@P@P@P@P@P@P@~ۗ[}( ( ( ( ( ( (>_ ( ( ( ( ( ? ?hO:gW㿋5/h:_m't|:-^Ӭ|=X\xN*(G¿7E(HG߅A|$#">+|#h#hqraxc-1y|'m[DҴ[Z85=Oׯ5+AvP@c? ( ( ( ( ( ( ( ( (~ܿl~P@P@P@P@P@P@P@$f,nTG ( {|s_?g7h~'_QXK_ Y^7y=`v(g TUߍ=_~k#kOգt55_>)3x_C/O׾(?<o^xcḀ:[_>߆o&ƍ7ƍXß?Ɵ.kh<--[Z?|-X_i;/?[|TO'Qu&?O' /ŏ??^3xn 𶥪Pc>|=A|Q*>&~Пa(ѿe~)|#oN/sx^05U|c^_-6Ú΢#GmWKxĿ|#c_ǿwg;|@wwZlvXj_P5!KM[Px "?kg>P|85Yk NyhIT3sr4]@8>?~^ÿNVOZx0~ ;Ǿ Uo |MJ-D3ZXOt'wo?j/_>"#CO^!ij:fcg/+IM/a5 5bv>9?ƿoş_@NJkiI|+\ޏxgÞ*xȒ{ SI.uM;VNLԢ r__|AoC'"_o SG_K]/H}鱵>:f\i)>?)+PhY8_Ȓ > ܻr]c}q@P@~ğ>~J߂_~9K~$]ּ5o }UsE_<ug 9ڇwn w}atb7?bE6 uڧW_ug^s|&~#hW^-] {e?ÉnW'zĠ' '}/#O?Mo'ˤGV|W챤j?(gT_>x7׺?-I|c[]}b#6ey~>qO//?txG|A/| |ACZ6㧆)g⨏Q4oj~!c=d/&O5=#׾i͡|NG/g/Tk7ڕ 5wǿK?~I>T߯|)^#NJIxtm#g<]xZSľ<|?׍u:-6hW[&&?A`{ _~Ʒ 'h5/xV'DߊQ<煾'O? ǦVV rR/ƿ}@@P@{,MlX=ڰpoߩ ( ( ( ( ( ( ( (:;PQrݳaϴ@P@P@P@P@P@P@Psw|GLp@P@6yQo ~ʿ(ʀ ?x_^, Bw|MC_~>7Í%?[>9Eqyu36wwmR-PcM cU?VO|IayQ/:|g1 h~!7?]x;W?:5mv[?D/*W5kGգӿ߲sya33Ws\hxB_u)=cL _k7 % YZ?3/E><)smO ~Ο h;]~&|iG|Wo  ?xJ7˫^7{k߇W~?|c?k~˿?KOXѾ ԚMI*GѬ|;F;BM7zT:CEPÏ?o(_tJהP@J06աz o>&iPo ~1W&>2_L_?|GwǁYzwĿZ?luxOf~!xũQkq[(ҵ 9q[BuOU#|{j__7D?rjG?هO?,I|43|Mk~[? Yo пo__?m/].ןuKk<kT{|Q~%EI5|,Ivpƿ%8Ҿ!4!IPx㇇Ŀ> #B?'_~),3⿊߆|(ls^Oiv?m;g[\`t"?kg>!N @Ow noti Gʬ@ЊTUP 8@ @P@P@P@P@P@P@P@P@~ۗ[}( ( ( ( ( ( ( U$l#ݧۥ6NǦ3ݰ<%P@x엣i?q~4tJ> osFn&'O?a?oC{G~ x%ӭxAX>s,Vv "z_amIg5~?gOͥo Uq%Z,$>"&"5(u5{oѢ[ ßJO>>(? <W2_kZxW|3隷K=Eu]꺂kظr?d?;_#`>=jzE~4|mJGKdžۍ> (*zc|?~͗Pj8 `|-Fn:=ߗ{xǚ[8[^ >x/녺_ ]V]·?%˦z|%B'o#LdW W5-+R6jRiΓ&  UcG]}g|m技b:;o4TX~V?u߷&wh*36E@1Uw*?ኬoLP;7ӯ`_ڗ03kؿo,Wx緓Xu(Ԥɶh$h@>W/}Y\7yoޟg"d'!Gxs?@"d?!Gxs?@"d?!Gxs?@"d?!G~DwO?Joڣ5z(oC:߇|Ei~4C;Xүoݥ_ig?X\˺F<[ ™l=j+Cx[[_?hkt>MD}-)ubxS*Vi{jR|27)K1U۴RKD},%@q[.W&n EocӥBVjΜyTRW'?!Gxs?@ <% 2,YnX~*@uzYZFǷꡛ2I.P@P@P@P@P@P@P@P@P@P@?twfh ( ( ( ( ( ( ( ( ( (w?cnDι?>_hP@P@P@P1(|?P| H?CH >~_>5G?i'oh }P@P@P@P@P@P@P@P@P@P@P((ٿg ( ( ( ( ( ( ( ( ( ("._OP@P@P@ ?k?+B?70GR7ïpyjtx{$~QuZcC)/?pG_h3P@P@P@P@P@P@P@P@P@P@G ?n_ol?? ( ( ( ( ( ( ( ( ( .p_H?_Kh9fP@P@P@~.JL3;a8?CuZ]CVbFv0~@P@P@P@P@P@P@P@P@P@P@~ۗ[}( ( ( ( ( ( ( ( ( (?/e:-~@P@P@P@pAR//#L#|:XN'7P!~֠GGPզ4>"ݣ' u0P@P@P@P@P@P@P@P@P@P@P@?twfh ( ( ( ( ( ( ( ( ( (w?cnDι?>_hP@P@P@P1(|?P| H?CH >~_>5G?i'oh }P@P@P@P@P@P@P@P@P@P@P((ٿg ( ( ( ( ( ( ( ( ( ("._OP@P@P@ ?k?+B?70GR7ïpyjtx{$~QuZcC)/?pG_h3P@P@P@P@P@P@P@P@P@P@G ?n_ol?? ( ( ( ( ( ( ( ( ( .p_H?_Kh9fP@P@P@~.JL3;a8?CuZ]CVbFv0~@P@P@P@P@P@P@P@P@P@P@~ۗ[}( ( ( ( ( ( ( ( ( (?/e:-~@P@P@P@pAR//#L#|:XN'7P!~֠GGPզ4>"ݣ' u0P@P@P@P@P@P@P@P@P@P@P@?twfh ( ( ( ( ( ( ( ( ( (w?cnDι?>_hP@P@P@P1(|?P| H?CH >~_>5G?i'oh }P@P@P@P@P@P@P@P@P@P@P((ٿg ( ( ( ( ( ( ( ( ( ("._OP@P@P@ ?k?+B?70GR7ïpyjt=?(ZcC)/_G_f3P@P@P@P@P@P@P@P@P@P@G ?n_ol??J|I~?- ~'xេxw1_'!xǟ,p[xjBmfou] ɧzxC%O~:O[4ý(~.h?,7{ {?~zg퍥wߍc[j^}{Bÿ<9{w OL A)ἒ_o? ]~/Ÿ/tG=G/ |VTt>aӯu hFȓƚk"ҼC>~_  E|Zk~|i- >"6.u|G~'iޭx[K>JԬm">/f>+/ s oٓWCּcxKOE]4="X}=uOGonYtH%ßBu֚mgw>O~ȿ -KC]xcğ=eAwhyOү+-[H #o|UK~@oMKN MF Ԛ't[97kmo(Ŀ|dO^߅;ï >ơb1,B᷏Tu]>-@u~rT'Y_oGx_GſG>;ž<[>ov@~ҟhۛ_|X7Ÿ(= oWMF⮥VXh YW=ޯxZx./u41q|+~~!eqY|5QCGGxgO4_) j:pk~m,k__[M~F6_WĽO_;Y.s|<iw-.P/ia%]c{anxG|~[go<#]$W5~&0> <3K^ -WS@='+a|_oY7oV 0z0|'{3<_Un.OJ,CǭQB~@?g5|B ( ( ( ( ( .p_H?_Kh9fP@P@P@~.JCV#gw)8ƺ<=?(ZcC)/?G_h3P@P@P@P@P@P@P@P@P@P@G ?n_ol?>?_%ꐹ S?P|q]`>kگoѥѺ+375I>*4ohz|wMԵ A-m|E=z~Ofja` +߶c ~Ԟ=?hkp]oGk>|xQz]>ၼMJߵ&kV`N~V/~~_g~u&㟉:~?1:GIu.u; O|aC-ZD"Z/&71pj)0<(xo^Wx/h? =(д;]OB5mcqz|Z^cdN~V/~~_g~u&㟉:~?1:GIu.u; O|aC-ZDKc_xuŁY_-ݿ| vgմvmBLZxnlB\T,74 /cEc* |_?k?wm{G֫~7~'8|S .[^{_v??֩jJ?gF;r|lwwgǟ}3^,$vv>lV>'}/O/-@?PzƟWϊe*W&r||^>Joյώ_^⟆zƍ7+[2)xwž,ܺ펱M+s|AᏊ<_ģVT-9oՃ77|]0m߂߲oG'7_hP@P@P@P1(|?P| H?i?~/(|P|k#LOH:C( ( ( ( ( ( ( ( ( ( (:;PQrݳaϴ^7o?i/Y|>w| <^爾x?ľ /%V-F񽞟i^+Ӽ?v/x2=q<? zW7oAG*C|"澀n؃'TĆE}ݿO ?b+ߴ7hk |S _<7vӮ|UMU,ijZW|oU>%t˿ xzׇ?A ( ( ( ( ("._OP@P@P@ͿWŸ_\>ږ\"|⧄{Jbf _vŞ}cQu )f *d|Д)G*4~Cӎrs[TϠ熼Axx9V.YmdSC)>h޶U*QV1aЮG$RJ!ԯA2Y[rm?g.i4NMnL? ( ( ( ( ( ( ( ( ( ( /7@?Gso&_cSd_  gC'l/ DOj.:Uɘ?߅_P ] 8~>!i@<q.+t|BҀx'H]WͥO9 J?s!w_+6?B¿WG/m(?߅_P ] 8~>!i@<q.+t|BҀx'H]WͥO9 J?s!w_+6?B¿WG/m(?߅_PyU g GwJG=3B1izK&$gDY*u3m]P=`|o@=`|o@=`|o@=`|o@=`|o@=`|o@=`|o@=`|o@=`|o@=`|o@=`|o@=`|o@=`|o@=`|o@=`|o@=`|o@=`|o@=`|o@=`|o@=`|o@=`|o@#E_۟2/=$'v)$)ǯWkO O{/m~/x"?B;l:X~=`|o^FiWytFiWytFiWytFiWytFiWytFiWytFiWytFiWyt_,ux>2phMW*Wc?3}S\^OKѴm.Q5by(MIƆlMy(8ѡNUjIF)MB2j1Mlr0\&ܢkmde էB G: 4cS^%VNnsb\O_/W×1 |3 7[Υ⯅_?{O%^x~Vk;}fKt}FNUUӮ^1,>wgj̲P^XitQj)ҫ(Ռd҅jo+;{}gqx/ܪuO/1U]| jc)br\)MT =`|o^FiWytFiWytG|g/=ߏ67f k'Mpxf_jºeύ|E}VSu}K\kPʘjYߊpٚSBxz  VEIҩ(EX4V.^.<O/θ"R晶_p='&fykД*Z9FN1US(?P꿇>| #x?FgG<Z WPдO:]۵KJ춱}mQG N4piP RNÚNrciJRvJm[gqfsg;5q>cqƕ<=mT^J|%J:q!o0?J7˭Nүүүүүүүүүүүүx7Xwg߇dϋZO5vm5gOjz;3̰F a*X\?'Q֧B5)*h6}_pWgp;>> -lVN~_1\؜]JXz\,"#(*p7/xC>>.xF}j )iٚ晩hf˩SmTS,̰ a*\GS'MVBwE)MjҺѦo\Cf|XJX'qX|m:_~4p*౸zԗ$ӄo{H*.ϔ{H*.{H*.{H*.{H*.{H*.{H*.{H*.{H*.{H*.{H*.{H*.{H*.{H*.{H*.{H*.{H*.{H*.{H*.{H*.{H*.{H*.{H*.{H*.{H*.{H*.{H*.{H*.B,G=7_n +,>g GY9x_;#w3v@@P@P@P@P@P@P@P@P@P@P@P@P@|]"w~eߵ'WğN%?gy<%G(gTw_Ob?.?hNgSg_!@?rg^ ow>{v~ x~/.~|9}fQ kAek * {5[Z]xOwČ겡Rh*Z apتS+:xZRQRIڸBrS2P\1x,.BscթUNx\^y:SgRe[CP?Oſ&Ex6Qnk qj74C_;[+rPg8;1f7cU15aZ<4g$(U\ܜcWgG.xWpy~''`0s,$sZ3֣GR<,(ӫVHʬL€ ( ( (8cP^֠)_i?~'(|P|k#LOH:C( ( ( ( ( ( (?AGv>P@P@P@P@P@P@P@P@P@P@P@P@wQ{ٗ~ԟu^/;ؗ4 oSj7OQ߳|g?Nuy8k͞MOӥ~  ?k?+B?70GR7ïpyjtx{$~QuZcC)/?pG_h3P@P@P@pAR//#L#|:XN'7P!~֠GGPզ4>"ݣ' u0P@P@P@P@P@P@P@~ۗ[}( ( ( ( ( ( ( ( ( ( ( ( ().?Gx^$w>.i5</֧*??/Gwo=gJ 9q5@?p<Z3?J4 (s>~ן@> W$~otZ`韡o_w ?d?HƇR7_f ( ( (??CytZ|__GOզFuwwNo?CA?Lh}1?#|EGXN?h` ( ( ( ( ( ( /7@>%x7gg5g=Z_J 69 _Wg" ;Hҭo5=Br!O>>6m7*yI_Vo9 7),2Ib<5"eBykCEOO9 ?߱?k/rϿc_>+*~_(~ƿ:|WTP>tY}_S!@<5"eBykCEOO9 ?߱?k/rϿc_>+*~_(~ƿ:|WTP>tY}_S!@{~;;Yை KtmG&WֱeM, ]iV7؋Wxi> ( ( ( ( ( ( ( E(˿jOQ:?ϿK sxKg7ʏ5 QG'Ҿ3Ÿ'q:\M~<5f&֧ҿM?B \?j~!?V#gw)q8ƺ<=?(1a8`? ( ( (8cP^֠)_i?~'(|P|k#LOH:C( ( ( ( ( ( (?AGv>ާ6𷂵{+-CMyx7] ^>'ڵ<H xVFLn,t{Nk{Okn[^^\Ciii 7WW2\O+,P JK,g_گgh hZs[?/1rh:Ms^te4GOKlo $pOt hWitK>$'u+-@Mk湬jS[iNqoeaeo5A(?o >%!$.bTе{mJONKIg4'xW~>'|67g^U–~(#Ś[].__Gԯyh澼(t)i&𿉴7>'+-k@YCh懬i\ڶi6naqqeeq լ,r0Q}q*>gi7>A όլ%)ZjNLJ$J:( ( ( ( ( ( ( (_̻xIĹ?LWO'sxZ\P~"+<)wcG_lojx+d(8cP^֠)_i?~'(|P|k#LOH:C( ( ( \?j~!?V#gw)q8ƺ<=?(1a8`? ( ( ( ( ( ( (~ܿl}:EtObcs⏍Ph^ Xᯆ|__x~C- /!Og 'H|>׿SB'_ kz{'>7hU^7> h/j\ܦ CG5EtO.?_?'C0xOXxO_4}|D׼a~Hľ"S|=_ռ/uxcPĀ/~ͿO69ُ_ :)q1ƞ%kG֩(u_[~ v[/<CĖ>}?ෞ$xo_+_''-h Sɟ G~ i"x#d c|*+xZ8֯a];g]wY4&:mu\k4F#4QwVLYxa= u{'M,c׿wa63Gu{j7fD,hx :N/~W+i]7uws#?/ JB}Yj6:^pK(/S?]nG{:ǯ>k?Ϋ'ǟ35iĻ__z_,16mGH{PbV__3%|^?ҟg}hii<`~" / ` }ã @Ÿ+ G&Ÿ_🄿io+OSxO?>߲v^<-@|'O>)J4 _Sմm?X:<5~w Oо _k>xQ1k 3k-}#WRjZ,Z\/:z_~}|V?wj xf o~| |e+V~:v__<'N?|9oе_QKOŦ`K &3x5 ( ( ( ( ( ( ( (_̻xIĹ?LWO'sxZ\P~"+<)wcG_lojx+d(8cP^֠)_i?~'(|P|k#LOH:C( ( ( \?j~!?V#gw)q8ƺ<=?(1a8`? ( ( ( ( ( ( (~ܿl}:Et_P%x)g?W,xZ־Ygo4 A6Ѵ k&K[ ac >,57[f?Oc_؟|;+ g=kZV.?tM6'[>k_FFSgG<]er7.üwy꿳;3Gn|__cfO2:ߏW ~ Sτ,-,/7w/og? >Ykæ>8|L #?,_ C\al}?q@T~IP~+x%x_aY /fX,߰W>_Om<?_7; N?? { 7&_|k?cfaW~P ?%O'ß)O_Eg->/់h*ڳ? >|>x;zƚT~5ߎx:ǟx{BZ<\ڽ:$79j GeSd8oX?h o_߀־|Tcictk'$+M4 Iao^iP[Zj\]C'"p]c֍bž|^\cկŖ> ?|6>?w'fz+U?|e0~ fj:vxgº|,.|K<%xĞ +Xu/]3U<=۟io ſWk~>2/\?/>QrDu+~#GB4|9h m[[o#f"X:Gc-x/~ |9Y_AtwGW[~¿',|13~˾ iM!>kk5 o,>'~O&%Z|A3LMͮx+=l*7Ə|!ր?@( ( ( ( ( ( ( (>.;oo2ړTwO's\3_<?jrs Bw|f;tIo_O' ٳɿ3tOᐠ??CytZ|__GOզFuwwNo?CA?Lh}1?#|EGXN?h` ( ( (s>~ן@> W$~otZ`韡o_w ?d?HƇR7_f ( ( ( ( ( ( (:;PQrݳaϴC |{>^9O ᯊ>Եx]>Ykwo=5o _Z?Bh<~Z%m*~͗4oq'3vOx?EMl;iʼnx35?wg8j?k4٧%pdi߳O,KӿfX?P ]ѧ~?/'Nb^ @6OwF4ļl;iʼnx35?wg8j?k4٧%pdi߳O,KӿfX?P ]ѧ~?/'Nb^ @6OwF4ļ:~Һ%?h-_gxkѾ1 >,|oh^湫d7-ιxf c^ޙxO>W?,hW?,hW?,hW?,hW?,hW?,hW?,hW?,hW?,hW?,hW?,hW?,hW?,hW?,hW?,hW?,hW?,hW?,hW?,hW?,hW?,hW?,hW?,hW?,hW?,hW?,hW?,hW?,hW?,hW?,hW?,h>?dIgZYS0u_ kxImKH,m@ף&7!pTʔ¾ B4bhNEh'+)V|oҿc%1O+r-,v&i9fueU*ե,0*8ӥSJC _ocXٟ _ocX _ocXƾ gO|[ڇ(h״gW//nnwsysk;k_ɵiN FS0' F)Nٞ %v~7'؊ԨQVHRnhGciJ1Wji-ZA|'?gZ '5+FKd^^[}:gxE*;P3%IJ5f~G*grURiOf(˖Q]OT _ocXa _ocX _ocX _ocX _ocX _ocX _ocX _ocX _ocX _ocX _ocX _ocX _ocXD~4^#b;ό nss`_[i~$A> |Mώ7ggf슒W,ud+aƤ4 68_鸧qj_j?I/ uex eҩ -t1ywX e\>OQ#Z:CK79 | %|j)0Au·"٦qàGexK"Ş#M|"xgTIb1+ԅ9XQm>vVY㿈9W~+q_d<%4Qw(ɪJ>J>J>J>J>J>J>J>J>J>J>J>J>J>J>J>J>J>J>J>J>J>J>J>J>J>~>,?#^%D| "~?p> ?O?8xwH<;ςo$O@@'G t[I#P:|-ÿ(>D| "~?p> ?O?8xwH<;ςo$O@@'G t[I#P:|-ÿ(>D| "~?p> ?O?8xwH<;ςo$O@H/7_w߁Sco=;Z3X{ _8G`a^U־{RuksssOK⸗kXrYgԿ9=;.noe콄m''@'G௷? t[I#P:|-ÿ(>D| "~?p> ?O?8xwH<;ςo$O@@'G t[I#P:|-ÿ(>D| "~?p> ?O?8xwH<;ςo$O@@'G t[I#P:|-ÿ(o /Cn{#7*lm?'U Cfz]_Kc\?~#xhL 0_P+ʿ֯~jTy>nnnwI~TWUK,''c}e콗>D| "~?p> ?O?8xwH<;ςo$O@To Ka_ǿh: [~6Ӵɳ'4qi{1uçWw콶FIjqU4%FgrD| "~?p> ?O?8xwH<;ςo$O@@'G t[I#P:|-ÿ(>D| "~?p> ?O?8xw~- Uhmߴ~†MJh]c+ilˇopoTi S%yWZJ/'խI?O/S_cYʩeRXo̹?|H<;ς\o$O@@'G t[I#P:|-ÿ(?`f)G{A-D| "~?p> ?O?8xwH<;ςo$O@@'G t[I#P:|-ÿ(>D|ߴ|>||\~||xPeo ]=gk|>//|I< nx k7'gn-&kactq0*J 5n~gNMC/+*wG8ox+<._<-)cq}D| "~?p> ?O?8xwH<;ςo$O@@'G t[I#P:|-ÿ(>D| "~?p> ?O?8xw`~۟C ?WFh|}oÍ;"l? `?iO`4>_ھf( ( ( ( ( ( .ПWo~1?Iޙ~_?Z3B ( ( ( ( .ПWo~1?Iޙ~_?Z3B (?>?>h%e.l>ROx~ WQʶφKizBO-o-hiV5¬aEN*J5hajգQ&N#RZQM;~YOUf'Z#WR`3<8:(ᱸM|.& *ԧ8ɣ Bw|f;tIoѿO' ٳɿ3tOᐠ ( ( .ПWo~1?Iޙ~_?Z3B (?8 .X7ú߄u9? ^5 VFRѯ4ǚhEB fUQ:w Tf8:u9Ss)S$Q2sL&eټ8mc10Y<q7 O1ҩ aqh¬hiSMF#%ƺ#p}1?#|EGXN?h` ( (>.;oo2ړTwO's\3_<?jrs Bw|f;tIo_O' ٳɿ3tOᐠ ( ( (?AGv>P@P@P@P@P@P@9 Mߗ.i5</֧*??/Gwo=gJ 9q5@?p<Z3?J4 ( ( ( ݦCt=y|E4( \?j~!?V#gw)q8ƺ<=?(1a8`? ( ( E(˿jOQ:?ϿK sxKg7ʏ5 QG'Ҿ3Ÿ'q:\M~<5f&֧ҿM?B ( ( ( /7@P@P@P@P@P@P@_hO+7~^jLLh?-~@P@P@P@P@_hO+7~^jLLh?-~@PR/Gwf]RxI$}b\Pkx+'_9NT~._z??G~Ε;#Prk6y7~ן@> W$~otZ`韡o_w ?d?HƇR7_f ( ( ().?Gx^$w>.i5</֧*??/Gwo=gJ 9q5@?p<Z3?J4 ( ( ( (~ܿl~P@P@P@P@P@P@]?zc32g h9fP@P@P@P@]?zc32g h9fP@H߷wI;U'sOA~O 9Q;3D:WxS$7XAˏ':WP@P@P@_hO+7~^jLLh?-~@P1(|?P| H?CH >~_>5G?i'oh }P@P@PR/Gwf]RxI$}b\Pkx+'_9NT~._z??G~Ε;#Prk6y7.;oo2ړTwO's\3_<?jrs Bw|f;tIo_O' ٳɿ3tOᐠ ( ( .ПWo~1?Iޙ~_?Z3B (8cP^֠)_i?~'(|P|k#LOH:C( ( (_̻xIĹ?LWO'sxZ\P~"+<)wcG_lojx+d( ( ( (:;PQrݳaϴ@P@P@P@P@P@PuBO͞AP@P@P@P@PuBO͞AP@|]"w~eߵ'WğN%?gy<%G(gTw_Ob?.?hNgSg_!@P@P@]?zc32g h9fP@pAR//#L#|:XN'7P!~֠GGPզ4>"ݣ' u0P@P@P@H߷wI;U'sOA~O 9Q;3D:WxS$7XAˏ':WP@P@P@P@?twfh ( ( ( ( ( ( ݦCt=y|E4( ( ( ( ( ݦCt=y|E4( E(˿jOQ:?ϿK sxKg7ʏ5 QG'Ҿ3Ÿ'q:\M~<5f&֧ҿM?B ( ( (B!_iDŽ?P'zgeEA ks (??CytZ|__GOզFuwwNo?CA?Lh}1?#|EGXN?h` ( (>.;oo2ړTwO's\3_<?jrs Bw|f;tIo_O' ٳɿ3tOᐠ ( ( (?AGv>P@P@P@P@P@P@9 Mߗgb-.i5|/֧*?_jW~>k.eo߳3ne#A0^2I'<)wcG_lojx3/Nkd?3/NhĿ;E?K$?LKA/Z?3/NhĿ;E?K$?LKA/Z?3/NhĿ;E?K$??o4ZԴo럳kͼh/|R۝6p1DŽ?P'zgeEA 3/NksLKA/Z?3/NhĿ;E?K$??^&5$_gy{[M +֑̏i_H1"+|3AL3;a8?&4$gg{C ˏ֒m?i_r-s#|q <=?(1a8`Ŀ;E??K$?LKA/Z?3/NhĿ;E?K$?LKA/Z?3/NhĿ;E?K$?LKA/Z \?%چn?cfSK)~ x$]lܬg*A?ϿK sxKg7ʏǯ5_r4(_x__Ï~ǾO9x>~0@v5m;6MjCѦ4]4o:u| uEK3Ag@N" w ,+&|T1NYO LN+{O/sXT_X75~|MdOm}Of ۟?Xױc>??i uEK3A?&I1[+-3s\IpeH&epɸG,9 HB/cVӴ9~|IҴMM@ f5?_-mT}hiZ:捤ki@P@P@P@ xGWh ig ^g?_xAw{]sHyI59| YtZO+]M}WU֮uMHG:P uEK3A̿'GmWI; wϋ G>*hw'ZN'~WxO]'ս}orr{*t/K mmo?H>&WX\'U쟬}g_{eW4G:WG9/Ch,?u  |n`῍+;߳-N7Լ/x2O^Ou qwu֍隝 >%Հ9A|U%8'{OÚG<3_x7i:-Xa<'~Cd֭=m#NKC[w_T`3??*/_|A!t y[to_i<+OR4{jVVZ6&]G -.? ~)B7 T-bhΌd͸&Z q'B'9O8L6+}[s6eB#N_aV{*{9Kqe~,?oO'eAȞ6|O7YwV}k¾ Ɵl|?sF͊60mdU/&8/ `jpקW<\|u(Т⽜!Uj黹kk%4xŞx8+)eauXiᰙkBOqU}թ˖Ҵ/QN~fh/~U[|$ w?K.$82K8ed#Kyh |On1XiO~ſ $Z&߇~ xt 3Z/̖ڶsiz>4BsF5o`_ (>@a/~τ~&'ƺn_jr^ W6??i uEK3A? s_3> ~Y|@Z3s~4_s8ԝ+g/+ç-hG#:j"Z_pW4~:% |7⛍1żIǮ%(uA_V9Gwkh*£R___٫c/ SO"?ں o_ x]x &>;?LOu/Y}{a7?4I6WV/[K*%GIVUԯwv_Q+[_2Uj{JrÖ1kg^%?_RYP35ïT"v|M?Ǡ8eՏF;ܦkk(|ma^Iv%XKgquY^i:Ɨqs]RzŅv77wFI( xWU𞥨iF]ՐԬilMݭͨ/A4[n\~3RRel.au^Lqy\9^+ׯN*xOQE{8B<4"wrKh Z0AtBoۑV++)YO>AG PP@P@P@|}A-nbۢVʈ-ِY̟ޕtP@P~տk/_E@ƛ?jXx:O Z,o|1|?CvAj?߈}gߴψg |/{_gBk;{ wXx}o7D/Zfm;Ekm}։iq5Ŏx@-"{ɬ4k9ooc ~<9@hP@P@P@P*??goVROǯLm7w>:_|  ~о#W.>._M&ԬEjxB/M@PC k O`B>;Ҽ.wg |<2G߇;ᮯik>mSڴŭwwwJm'#UτdٷVu☿eW|IďjZ?KLJ^غ> d|9impf>W~z|}rGࣞ_O~Ͽ o|1>.cEG?thTď _x1v%>/7~/\z~}?Ó÷Z%ίieE&6oamـ|J{7eޚoC a>+O5k jdǬ躇u;k6Z5 ;/ɻ'f?R/?V\[u~3|S/Ckw/bKVѼ%^+,.t$~:WETPmF oX^O*o's~xa_u6[%-V;,b#k f3x|8nϟocIw7㖵/=to>#K? h mvO}~3']54 ¶_KjZU~YP)kZK@?m??KN( (Tɀ~-T?h_o|u*!gg  !/;|G\|] \MXE3]i,_Κ?M:?# _.|w7x]co?^yexw[?]_? |4ڧh5k[Enf +O) Gkxǟ >n1~77گ>2+߉4ԴW>a9?u{|`r|2X?o?WE>$G< ڟT~k//:c|]Dž9_<-y&t5]bxg7Ifz2~ֿuS|K}h_Co1o h_5><)e,''nK_J%ʋ\MNm.OM[ a'~?e&_B|o |,~.ωI?Z7|1]C}gEm?{yjF) M Ŀ|{sI΋[ke;oO |ֵA4K?#]2WֺLJ5JkhVR?kطX?c ʿ.u?bOثO|g|QOk1 l#g.翷_q_}o_ [~~uo+xR47SK-i6/jiEPMu}B}<O%4x'D_6>x>+)AMcwMW#|^GEt~x{QX5MSX}q^ &WW@թ (< \|bw*4xRǞ?|Wj~c Yuv׺mص++kjwv7>_wm<$N,?_jC0f_/:$׃%_{/쭣xKGмWX\3Iu&Vڍ(4UO[,''>Q ¿ */m<oK{:[9EW:wXF@-g8;qY?nēxo-k_zߌ|F~ѾRx5ڔM  Jf(=#k%(ǯ Q@վ+>|G?3_m{DsI} Ŗ>Y+]GK_/S|e'>1|*5i,3[h]QKKn,t]KĚiMa]Y{x5̠ cB ( ( (9[Pk oZ[ fL0 {*FB躀 ( ( (>`bXcĒ}I9' (<߅5| o^ Эh5x?X4HmIo (b ^Yyf@~:O~:g/7coĺw(|?~xQ[ ({cZ_JiޑxU:Vkt?π!Z-<-Y8fkMo_{j7_\~'k=RJ5-O k 1]~1NCq oĘ!w _G#}?ob _٤~a(eJ<a wG?tF 6O0lF %$i4"J;{JsnxJ7ndkk_Oc/;ßqA~߲ſث_4ggo?Y⏏7 x[ŚjVzM;S^iookf?pdoN/ q*|"f|>xbmO;y}u]{{{xX]՗O}WS[+ko&0SFn-?hwoߵ59OD><~߳GǏ~Rss/?į'4V7u53R:~j?;n_)z ޡ{Hԯ?!g/'{0?[¯Yn^ ?f|R]GNeGjQAsx G/2+;e@oL~ſw1o)o쟠jW)R_>Qf>(2{y z4&sk&GgJd A,|,a?Fi M%x'wm? K5F5Hʺ9f5MO$*ROfO.tS8^焣{ZJ?;t?G;j?'OڋQڟh[^&|iZi|P1⯄>%zV *Q—wVVWf\X]2O'g#N~+xƟ>+|V⏎4_|V_Z<{{GVj chv2&i]{Ωxj)njjG|~oxS^'x3_Þ$ޕx]:4+?$k:G$뿱> o kgc5G}E ~zֳ_ks_qK*K#{TpQs kS/Q~ D V_Hx" 5tOO/E'P>"Di94mI]I%mf)?_g ?>hcx5ucF`6E$٧9tݪBPm])U{4?9>$~Ÿ S?'?B&Z෌C?~ }^/.|QjsI[I-Y'/>7ߌvCSG x+ww .O쿅?|MЮoo4ɵ>tP*[k#q:][inl|SkV^f!?"hA|Q᫭'ZơoH@> (>D><~߳GǏ~Rss/?į'4V7u53R:~j?;n_)z ޡ{Hԯ?!g/'{0?[¯Yn^ ?f|R]GNeGjQAsx G/2+;e@oL~ſw1o)o쟠jW)R_>Qf>(2{y z4&sk&GgJd A,|,a?Fi M%x'wm? K5F5Hʺ9f5MO$*ROfO.tS8^焣{ZJ?;t?G;j?'OڋQڟh[^&|iZi|P1⯄>%zV *Q—wVVWf\X]2O'g#N~+xƟ>+|V⏎4_|V_Z<{{GVj chv2&i]{Ωx }WtR_ ^/ ~2x^ N(+z#^vX>OD5i:4϶B[0~5nog _ǀo? *_ ?ú;~>.|?i1|Sj=+QT@n/%Ѩ jCy |ٟR/;Zs?^k`f;FsX۠j馻PJ2gFKx8]]];5t<1_؇JQO _RKK6~.|Z eLj|M/u]^vr_mN>ķOݏ߶죦x#n3'_h~7o,e}BONE?+뚕Aun?\f?>~<)+P5m#]ծtZKլmSxJV/QѵUuhޙZQj( o~|y/G;]x #xSBhm|EcH {!&6h1ydDV`G$뿱> o kgc5G}E ~zֳ_ks_qK*K#{TpQs kS/Q~ D V_Hx" 5tOO/E'P>"Di94mI]I%mf)?_g ?>hcx5ucF`6E$٧9tݪBPm])U{4?<|5>98qgߴ+|[uOxvSj(p'Yi<פӵ1-曨lFg߲/iW_s׆&h!KX5_>'5Yt;[7u;m?dmN~Z|w[Ě핗|YOw6jdmw'x`V]5K0jTque )QVe(ʟubƼD˂9Sq1Yq&|&Xzx*WPEIb«Wg> F_Otd?~aG7\D9OƬ!:B ( o> ^x5Ս'7M帓5k;ga xG߄I# _Zd"ǶmkLAgFҵ=B?0VEGg9u,W/}KKoi8ԭFݥVq?kJOjVmU+94pR]tڝ>#^z兝װY]4# #UĺqG20#n+)rx_M ?7-WG%xw@?<߉0g&ow-<}_<7~Qwdžcc|7V  oa5,σ?՟e|W=/SWZ>_7[/']WĺqG20#&낿)爿&o!>N(C7\D9O`qy62Nib8m<>zap،F&0thЫRq/?~~7MG~#r/M+\LJ>kvռV:T~(m %^Io f!73Kp^$b/ #gSZݿv6sIxj́%m/7e|BԼ6)j:Ňxw@ԏOSþ 4(Im;=GM_=ebQa!a_7FpuӥfjԧM9Nԟ_ 8KW/d0gS 牎]0jBY*Q ZZT ( |C犅ψ|sOj6RG4o\XxB\_kzi]ɌAvyWJZ)ѣNuUiҥJRIB 9I1NRm$2Z\F"*(SZT*4iSJjq:pr%7&[ ko>d& |55-K5 2QêqyO\a(c1X)^<Q,]?NI-]/i)VE Џ^{)9~ pym,pePZ_(ӫg:.' }oG7?u^K!b< k'Gs jKlf>&qO z_f?]_ UnEp/꯳3grxoo7֪M4# #U.QĊ?}&/> F_O%ӊ?y1M5?Sm?زeeeo  m-mXmjCE 1"Gh%ӊ?y1M5?S?෿N ZS?cߊ:q-x7ud]{fyfGG%$<~&낿)O[/']Q<~&낿)o; = G/~?|#M7DS⏂|o};?_ j:D?c#3.?Lq?ы`8(bVUkUiSaG.RN(C~Qu_MqOĹoM~ǿJ+COo+;d]{{dU)$Y0N(C7\D9O |^xC⮥;OC~#5-V/Ge~mڵ'K}CRxR cq7a3֥1yd13`^,&PxK ^SjHJ^֝XSׂ8 ƹv0YV? y;)g50t,+ FmG.|n꾿%ӊ?y1u_MqOώV?&_A*|Rd|Ak4ou K jĶ6k}54'kl{T^ڗ0/Q/kHTKn} }5'xgk8%Ϟ[yE:3,6`5,ilj܍F7}3 ᯢ VM{\]LW=_Ws̵\]oo/z~g <=^[W14+.ef^ESj4XyCS|ܰ[/']WĺqG20# k# aӍHc/Uє2@ "N(C7\D9O<189Q0F*^Ymld0J5k (`ԔԚ䟳J4> [YFSap53yn.c2,*f80%){YBUЭnzlպ3$Jʹ**Wg:( Qگ ( ( (( (]Gi^X:6gqkZyo6g^j:w$VVv<*$p$B#0TשNsZN*PJ*M9MF1ri$ٕjpj1iPB֯Z)QJ\TV)ӄS9()4 'Ʒa-W3ѼcMg WI6n~ Fy)eG{PsrJuԮڸ^oE/7obJpěpcb^Jy %V.OޖmCmGQ5BVկ5MWTu=OQPu 幻K$yY]vc;N:4J!J(F:tSNQ!BJ1RbI$UVZթZjZYʥZjIΥZ&T69ɹJMMJ( ( (%CGEUkzOطık|cE%Ҿ -FSzuGHN nt˛=73^_[ |:CCo<3? *{ο"G kV2xAͼ[aNa|sXN3lʶQ3NS2T;{:JRJ*թ\yr'gw ptS9*ؚMRi1iaPK ( >(|Nc+ko4L׵>vRG<7$|C)#t[ۇI V2(r\5ŽYc߻JtJUkTvB.Je pV~q.GyV#:eeׯ+JF =(ޮ'VRЅJ|i6_(p/vٝAaKpLbfaq$qi1?2uCo_~e\ y͸%8qeߗ/R)ʴ^Z*I{ xd*w~?,FK{|[9ac>\9{sfIyVYBrYi:_!P@P@PO? <-m~7 ԇ]eoxt+ΗiYxKBN^~UƷci<9{{x313dVqa+)s*?UsWV4a9Bk)b?8g#ʼ.^"q3q83xTx;p?y᪪2U+}?JaLmml,`(IpUaq =]a7)JMΥJrVܧV9Royq&mybc<ʿ 1xNsh0TaJ(F4ѧңJN1"_ͧ P@>8_]{"̼lrwjSoxz-vKxnY6w֞ps9<$TgiT(a^bt(ì+Se r-|uę,Jck . 3jJ*U|`/o<8~~v7x~iw7񐵛}SXqտ>-V8VK rn Oy?jCXG(TrSW3_YҟsxZ_' )U=BW#9Mݧ 7,3PJ8S^U? ( ( (kJ>.S'o3?=*h?gWs3č?|B\^|$pIZfomausL4\uRj<;}˪YJa,ּ\*hNQG&~"Гx[/c|ڕiF.2 5źN[|!ЯUq*ןR//R sS!z^Ge]7,w#i&V.umBok+O 2L*(ѧ8:|n*Q{lf.Y]B)R)S#s:3g6]<]ղ céK7)ԩ*Nz*K[C("/tDּUsF<5aqxž%mtoJugi1ʀpXl6#Pb*F = sZIF:pR'Slcpyv,xaiέjet.Qo.m棪x[s}K \7<{kPG'RKxOj@,!vk_!C/ҥƮZlKIFyOДIQ{+V'<;[z8& ~>:TMF_nʑ>ױ 1͌Kq,,<4L$K#Ydr$;wrYؖbI&QJ1J1I$I%dZ$-?%)JR)7)JMJMMzoVGL ( (bTWx"G|QA|wV84xZ.tcKkw+|mݵ2\3C yxl$ܫq/OkV1`ptR4Ekq7bI5%%6[(keaa:U!I~Zgsxqk6_$P.? kEܟ桨oT핻ϨھBMugq~w¹|I{<h?utOoڞ.҆(VVCd>X#7GRdx˿?~5??Ϳ?j6_ fZ_Lw4ƿGG#22OYFA_ I.Q(6@I?h?3k)2 O~#NYSÊV9C[:s|G\\]J >߫jޟp>1/5LgHc`*^7\W ]G=Ex_hɺx,"+>i}g4rD6#:ׯCJ/uYx^4ɴO*>'Wop|o#7GRdx˿?~5??Ϳ?j6_ fZ_Lw4ƿGG#22OYFA_ I.Q(6@I?h?3k)2 e\jN5GRyU_'_d|g뀆N,AoH_;>7m WO.Ee7a2|,9aT9>jԲuk֛u*i^NQc6s?sS9^z-*4hpԮ.*|)Ԕ/ k]//ChHlWHGG`j]݉TE '|1s~ ?aM11|eso~s{-◈,-ͮl0iWw:Gxqx׫tr3,L_C.9Ek^WˆԽIUp?Gì,0d8T,?5ޮuğ8Lx,&_0xirZIխ^S/ +wW:`3 3?3/+@qBg`3 0̿_J? +wW:>/Q32~7 +&x _ 0QG$?Й'uC/ EL?3/+@qBg`3 0̿_J? +wW:>/Q32~7 +&x _ 0QG$?Й'uC/ EL?3/+@qBg`3 0̿_J? +wW:>/Q32~7 +k@| x/^?m5ʿ $׃niIτүUg[ekX k8SCx.TqOO-| 岲c{u'M9'χ_8-xX2 j:xL\lÌs^TrLg(5ӊ:qNi{<^+uZ|F*7Oæx{CO0iV @_b1oc }Oiea)¹ e/:f8 u񘙷'9|ԜiBIwqmǙs(!ɲU<%ʨ]ar8+uU2RMRK_ fw'V$VWeN U9'N>udNe?:4?<Ǐ_[=ZF>45Xb@ѼZ>iQ[jiZpJ\e4I8']t&T [,V0gPUR`ixWʭmziŸ◊\on.Tk U*T[~UTK euJSR ~83N_{A_(ɟff_/%V;^f (aпX#|]->en4J}L!@? NxK19Y/ N7 JXP[UΤ[TRYM51V'<׫b<4 Ezէ**Q֯VrmέYΥI=e9JNW? ?6EwK:Ohm}ݣ9#PQt| ?R:%G_*̿+?=E7rw>? [GU$GB?Ve͡-qC?R:%?uD /m~.kx /_Qï&0Uh{w\o|şI|2"~1¬C#ߋZ'pॿu^,K1#/f_]9?;-g_ !_La2!)oW?eE:c Y6GOKY/b(G_*̿?=E7rw>? [GU$GB?Ve͡-qC?R:%?uD /m~.kx /_Qï&0Uh{w\o|şI|2"~1¬C#ߋZ'qU2i|@CŞ-wxU[[Uugy;Mҭt y䳱bGD|/:aipN"a'N0\*Jueʤ9rkv}YkYqcGVWS2tVTaN'8R:tx)(^O fw'V$VWeN U9GI뎿@( +g/ߴ|_?𽏇| }io|sm[ʺ<-jR]u=A{7%ExuydYma+O1OafjWM:kt,wmx/wdy\<.[R~N2,*18_=IYͨ*1_)oW?eE}B?Veͧ]9?;-g_ !_La2!)oW?eE:c Y6GOKY/b(G_*̿?=E7rw>? [GU$GB?Ve͡-qC?R:%?uD /m~.kx /_Qï&0Uh{w\o|şI|2"~1¬C#ߋZ'pॿu^,K1#/f_]9?;-g_ !_La2!)oW?eE:c Y6GOKY/b(G_*̿?=E7rw]xjO$oO|z|B4WO- F0hWz֗,>t[{IuKKKW;nm 8SgnpwNnjiFx r/vROv}+x LOgXҶo Fu)jyєpt0 V*Zg-[[Fr?O?`>ouտ~8+?TϷN?[pp!?)\q@P@?r/U@uP@P@P@|@P@c'KOw+4;q_"r^!^~ ,ᦍcKU~7S|=i>*!'¯vWEċei4=o]O4=X$ _7xĞ)|s{ip΃#w>uq?<k_<ÃqasJy#4kQ6QF'81Y2k #pOx -|vI[!Sd$MNF8ƩkJNRx;<qZ\28JPZ-8BykyFXEG*~΄8SV/5Z+#✻q-Hѧ[WCΥ<#% _k)ԭB\f+~ؿ( Q<B~ǞkKU yxok|o i=o^-ӵ?M 74Ͳ:%BZr/-Ĝx6*֌Vuo>W<q5ԣM*06iqH(v'2:W_ 11B?<3~_5gGO7OPx{-0h7 RVSUԯlo? ?dc 0 ydO2TTω,E ӭx|4gK JZξ&?qS߁.Wf|9q٭c3Pl+9x:Ѝ7R\>~b_Z 2`&_;GI$wwWE\׈?aޭ^W.c~k? t-wǟ a WŅ; 뺫k:ׇ4Xrx *ysQc u%i8aZ,w[WRhm|a쟌|,PƮeg^SeӝjPb(KF,ti 9ZܰJ**~f?K/zO9,AyE+xo]kz>6:,,OdANJuG|<Lӵay:.>U8gu2 R0U14byp8L<,V6~Ҽ#*s|AxOye?9yJ:QT{,Fgp(bqYm?g8UN8,:/ 7?j&UK˧g֣a^+mX׵?3Z_Cs©#>9tX74_x03X "U1L]:.a1u)Ucu)taF,V te pgYS>K,~f?W;B .~u:t1z,bIRaKMj؇dkO~%~տ4R?o~>!vw|)K~/ _w^E2.9ϥ¾)'^(nyF\4hС=BxoᏈ_ƹ: ?u/j )|Um/:6K?>*9}>|+~ |oO e7pIps>:˱ˡF?O (JrGQPPF0bؿdN s3!N9_<ڮO,MoBi%#XX)Uu!1Jf۪hb{)?׀c|:DO]פDtQ_~ IUGT'< &bxnϫFy𸶣 x(5EFa[c%VQu)b*?/8>N fXcɳ)NܪG/(BrpJg5iiBHGR6OffukX,7c05|E*Ur*3U15h ]XQt+V6ں?F̿sqxl-z\wqTpt1iSQ.ZqPwJE;IjO(54? h=_C!<Ӵ&xŏ7|?h735KR.5Oĝ4>A𮵡~8K"ນunk< 1ptuT%<#VROm'^T"B\KjY#-upƶYfa=\~/BmNWpMէx<7c~Þ:z,bBW<).?tO^YO/~%i/_5MV)_GY x:!:vXp]wc9> /S50PTሯJ#,UYx| Y_Oe]G!̤Yld_K*U{)01CF<\"oxسg| FxKdƗE~jzxOCź[k;_|Cx~/>V7g_ Y^k=c?7^*c͇XʔE,=zTgZtkrl"J_*QUx*a|6"\!jx\>; _ }:/)'}CC7#߱֘֟дh6cM_U2aRˤ8״KueMβ YH}KkzӊyT*'o8siO2xTeY2Y2MQ*J2:T3δuUzJ&??j/[j*2O]|iѩB< h ,\7F>5Hqxėou+7=W'mœxRĹRR҇+c0:tRxʕ-\W `i)OWa5*)cS,k]qϚJVCGY\-zx }~`s(WNJܷPuG_hP@G?eH.< oMEq#¾ЬB[]/ӴW縖+kxs4PG$c\1qX-*`Tm1vS[sey CS=|N&iQڌy$+rSk#.~˞<g ܞ,wMog>xFG~̺X_&9il|3}\j.sx~k֫%, c,E%g,Dq.0hҨㅧOZlD?x&k |MT#z8>YakI`rFx0؜6{ ]^)7?bSž$ώ_ÝWG2|c(>,xCT4zω#Hmt w\⇋ C2.:q&MF3ʱ8Jj?ӄ(J$!JeźiSRt(0oJY>'1"N{xw ^baKԩ[8NL+֩ m\e~|o'~zoĿx,Wkw9c&O:,GGB{s6xu=CI4 /#n 3 J 6kDRexl$JTS89JkbVss?_|#ps¹>;lV3"ͺq3f>1P^fK aVt0P ql~ԷO_ m@84ٓoK ։tvznh~t_沈杨àYմ1^_aJaa/H̥Ӕ%.ycgMT*5u_O ɖYM[ y.XVXx;Te|+޹o>/|2>.7dxa2|F"10|t[:W',4qJ5)׭Nq[VtRҩSK}sQJ0u_!Ǽqb֮Ep}LSBvź[?Eo  5MO}}\ g`y:od/".cM':4)j:x, '05j,"~,{>_JJ&JJlJ W_7@ > K}rWߋ<ť05 k-g 5NGn]h4xs^-7 03Nyvup%:WJ\j8.T0V*^/ar*qiVmyV!G ƧuAVk SSFJ\³傤[ᦟS??k> _{B(w/u [(!<1nu+Zu_x[s:>2NYn;3zlDq933Yt:l0&tRKRx|D__b>$g9e)a!V8\v$g ,M|7],}ya,Vt*%OZ?ox7Wgߎ>?3Xs65v^񟄮/a<7_Z{j$.BHwn:aFXY.m W/4kQRnUC%[Rxyx3(ʥ\^O,nCԊR'C8ʆ)SONT?kJ4{V&^_Mw4? եCQ??7+I&=kiײhw]#Pfl]ogkMT6ڭ\ֲȳArۤcrIp~QAԂSpirJQhV_P:#RI5 UӜoQHZ^ gؘҨ;Bu :4b3 S$jV:PR9i@p(_gNSE3O?wxauI~|%cW; [|=//%Ξ"-5yOp32n+9n+zX'kQQJ^ί>bQЩajY L&'7|ŪfU*=.~հX*V&3^ i_|_|1b|Aw^?u?>8|^#>^Ro mľ>':շfqwxqr8i Qq,pH9F ֧'Zx5jW/+eKxW̰Y upx|%^1KV+Bqc0aK%|%Fp_oxB[x=݆Im~"x_~!4iy[Lӯ_|T'Ym/iR0t0rQX"jpJXFHѡNIT/٨}GO8㼫.⿲7/1X"˳+a(U$5#BҞ#Z.TS uK'(G_05_Ɯu{^OY*? mNTu Uo [.|o9#QFA/w;b,5OWZ?^ΖgTⰎ2IШ^gN,>.3^{S 086\G+k.kRqќgF*iZ֭_:^27!$@e[w/oڻ]*[\QA7>~6~ͩd[ρOi/E44[=G׾!S:nZ-wG%apqf/BshœѡӍ,4珧*ЧNN" 39x{X:4ȳ T!Ju_NWVKJJ<=\>?V_/8пc~i\T4KS▷oxUr? 1W&{J q^o,G."*:~Ve*0r ѧ,= .x96;|C,5 έ%,Ƅ2'& ,;*R'C EXʑo;Ge~f'ïJ'_"sYDPG;αΦeF.3Z2K XJeg{|]:M/D!~?K+-[>.-mcvzv'ş׺qxdž|oMKCW,G:=i<ѕ|1F0R)n5Q(Wq~K ʗp^"8\/sֿRQԬcN}gGN8=*_Q|"<~vJN -Ʌ-cQ꟔I5ۿ5Rڃ^5k5izį $V#i2ltۗY*<Rk;RJuk؊)&Z)ҧRqSyu ؏ U+NfO5/޿ڴ/C|hͨˢkV'VKf`?& :}jibx4E rנd55ZZN\qL>ب|U|l1pK|6*8~#b!WPtSS 8Z|w?g,a{~-wtyK^%ͦ\Eh?|Cxwƴ{-7Ŵ?zƟYo |A̲^$[HҮ~e*+ʬ|bxØ(N_NU+A\4)':|:[g{Cٞ>#Ƭ fٓQ_o|Koțm =[;I&AegOo N;6xlFcЭKSq#^ҍㅄ1|<le`c0O1q8z Srx\ [N}iф<6..?W׍n{Ļֵ|3eKY\ip8b0<ʮ-ўa*:8_ԛpج)-qQ&|W?bWh3/*πߴǎ~~ʿ |;H_߬x oiV<cIܴ>4jK.o;O"᜿ <lj(+c\p|FS5<#IJn1ž-ӷ5:m5TW3<mpv# C-Ԇ hʞ:5k8AQ˕JVq9ԥV0?q~hz*-S} x?u|NqkfMmgŞI] T_8 <|KU7kNXV-ω^uqWs%d_JO q͡.WB+q iue) J1OXXPJVTE|q_ї|QJ8%~֟|5Q[ݴVZ{h x~X5 bZ|S | :ϋ"8w(j'Xr*1xG' i\>2Z|ЩWڸjT K:/aV1 -[qQT1xل 5T*Qڮ"t#<1?? j}.F|3 Z!oTE?I ~ GFU͖}L32̷/>+&&i{[S(ӕ,ʣI]J柤' q&M.qgr"X3?q<aN5%K:J\N2N_'M}ٓUh~5O x u㷉|-|[Zo+ƩsV1b}gU[~x,3χՇϳ,eR1jU]4q'yVq®__nYpՀřpQ(䩀u\ƬSp1԰pMG .x꟱>|wms]}z4[Ƶ4|96m<1gu#;_kīmqׂ2apXSfѯRT0:q[NTq#N3b*cis3V}xQc,*Xj5+¬r(Ũ0֕8mT1 Y|_K"_ϋP?YLeOĈ|;/k=~Y!{~{g- źZ3-qu~*_^W V "UQӧL5jqT)^{W,W ^/IN\G;{z8uQ9eZWھo3/gB&xw7c<ڎGug |!A]}|@jCֿ<+Xfy|!^^*1ův 8׆ uVpǎe~5pA9A'9C/XGifkVE)G S1PSw[#Zo '6c:9>f ;鵿k ^=^<_D)K%Ąws-A{:uq'\+s8j8&lV"IX\5=WQa:% TN?g|q` KFpU|v6hpsMӡF51iR#? av#jw-ɓPTyLkO vyrn5 JWu~5L׍yy*4ԩe rn uN +T'(רN(ѥqGAO%"b+8WͳZQfWZNJr JwZ1GP!š@ ⶗\x#Rij~$nj~+K> \ֱxRO,βÈp]ZSNД#j'QWRxaӺnpe<#,;]Wx-2YT>Ҝh5nHׯ<\SNU??OiJu>!7E#7~++^卢ƞeƹX>*iXjvחڇSm|\gX"X*QVGxa[*1Wn |%O 8`5J"ocN7 Al K'wZnSZ >:.mvϠAO|`, 3Rҵm<|O\Pߊ0RgC&S?1)cMІ(N9R^ SI=SׂsN*q1F6tV.O _I:lm/2|e42f^e.siciY4({\:wi/"82sl7\V~I~X t|BJ)Jpi|.u{ {#)|vwx: |B^[hau;WXԼ%t >omd8Y|6L-|l'<oy}4&XJ|i$˸ rc5e1XJ_4|AʝzU#Z3p1f4)G/_(|eXGyh/)bOq'/4|K57*ij5}&3G >%) ZUj\^5a^8JO2N7^q6qOǃhe߱Nx>`pOV l>.6xZ2?Bs>5 V|x^/]k_y4o67|m umcm>{qg`83m(b#5H}bg*~Ւ)BsNWpL[kxcQ熼em?Z~۞8K qxV-!fa36HMPo-~O5U87i7 ZqK qT!AUK7^<ĮY/05,ʆg:9v],e,L/ JXIbjb] :҇?E e|e |TҼ' XkcS|t.~$xCSCP'/~C M_ Yf W 5tgSa? QpX:xN*Ӕh›Jhk)gy@/uk+Qq&/ ۷LXo+d"8佸E Z PI."-)\Ŭjwi޾%x\n_)6^?{Ғ|rUbҔTkUf>'g<#¹uX5u&fU=:5a% Jxj~oßxO_ Xx/ǀ{}Ŧ[dY^[FfSԯgu 뛛˛3,YeckJ'Uz%䒌#)ӄcN8Ɲ8WunA`rl KexaxJ)R9ʥZ%:uju')== :˦]Py MѼMJ._O]+W_l~ݓ{:^K)!֒Z\E3l1\Ex08UJ1`%¥hޭKFW*jXJ?3UpY,s L#9sH>zp򟹇`^*qiJ1RvԌ1?`/?cO gWmmFAƿ^)׵=W_t6֩ 'kо!>!59#U/8p:4s.uhÖ^zSZJuC5w{Z/MI&+1xb1?Ǝ".|!0jX\VYRO Nq_TRXJ?G37|q_A9g@~%hτ%jZ~|{xQ#Ϧ h!GI0+LЇ.xyVcJjR Z1F^έ1Z\gfobj{(`)gJi֣:jNx.Q,ol<)i> k>[xtRFkMCzjk+*]s?ڝv\FO?X?hh`*K1F8*^aZ Rƌ'#(!dҼ!ax[2|4qw83߳Z9&(_O+oTҡ/F'nEGBъmmc'IT֣UIlMzX灘\ ORTjiT 6E:)5R5p1RrVB ߈>#+~xÿ4O7ƾݦx@EX)EW~d3epeIn!hS_3*2N T4FSiQQ}bN[% SS;⪵abZxS8b0|c?G'k_M|BbNki3~V?4[/V:K˫|_+}FFxum{Mih3?B[" "{%RXK݅:i+ZZ6噆x<^*TOLj x7>4Zϊ"8L],TӼmNY~;)WGdyou,wO'ͱ)+iVi޶"ֻaVә~c,*2G FfS͗NmFӕiVƱ1{{|F3|h-A棫qxL0ɟ~.d&#?|GCjt9lx7ϏrRb34KCVSJ2^8L7^mQ.igIWS>N`iq8 1ŷ(KL†I1ut_߰>7x ~a_ԊK&Ǡǫk 9(Ddn]߇,sƘ ¹^ i3˱q0M*:40iS;Eฏ3C}O60udᾭUVbq-?¬n*'5 tTڷWJ`>ſe{ĺO[TajMHk~.xº\:桧7~+`S*9ng_X|ENWXy4꺘_Ood\0hs|O6fjqⰘ_bf'AQjITT>ɮ+X~?SxeM@F/dN'VdOF9KK|UHK<k!h id_xW6O2+c\%ean`YkΦLhBOYO*c*u$y?d| ƟwφRx[,wV|o᷀4|&mf).!|zxi:x9եaOBt)CZjxj+IƆR_e~|?MGNѣּ?5OࣀF>&iQ)S 4ޖ#*Ӓ'Ne,4CtS8fhTkjLhBw`/io25#o?xMkx86io2j uO x{6M}.l>!i)&nLWeR\Ɲx:#%<.%b׃*Z8>TsFwOʥG(2RXlNT;q8JM|$t/\dg+i>.>'~ӟ]tH@oxy_4m:[([(c2,.>׈0|U\[Kҧ弹1Uj0NSN?OMPLn+C-b3WjʦxOSV0nhO_[}2-j<=xčKoMk55WO&}2n&4ϱ G[2I&XKb*bV2 ԩJj|O*~,4]}(5ʸ\gdIK NXRvgR/+5F|6/(YKJmև#ljdO??xÞҿh?٫_(<7]B׭O^ ֬n toN ZŦoxu|C$ Io[q&_4\U NV_ 58eХRI0MJ[^|x?589v-ƌhѡ oI{F*ф%R U{Hb(`x G:K]FQh%a,&. ]cbQsZ[|_?5o3M<)q|_~~$|5 =喝*G;>i5kxL󬯞zc8p]N}^椫*aUѫJ"  )*#e\gyOcfYj-ľJ*MjQSiWZUcYTOq'~<)>&gYд+nfAַ~L/5-P.Kx5M6n*gd:^eP4塊RhiI7$O5oԧ`0gy6-u^)X_'<7NUA+/?_~1FO ~((s775WMt_|e]mh>2~]]ǪXYX^+K򇉼EC31_GCJ+apT>VqVXJ2xܗ&+:#2+X\n>.AO ~&pux>hF&eqdF6}` F VHz7POC\VK᜛J2UX4B5Nthƾp<_7ke8>'YRIx8[m#KM/ZxRi?N|Oei`w%Σ(SZ>qQXa r*9d*wŵx/l8 ˋx'5Wxl)u /+HsFu+VP_~c+N -^;ۆ?;:<_gO;{Z֫sXZ qus,q(eEfxlקahԯV*4iEΥIܮ?`17 j~;K aVb+MSJ#9%ӫi&; Ɩ/?j/s_ڭ< 5e`*;F⩨jqh~O$I_P@~(ȿW[]IP@P@P@P@#qg/3>8W_HʜorC[{ćOm#_؞:YKO+:! 0?<5èxc_{V5M{M>xLJ7`sL=?e0^RT141XxRb:SFe'cR a<WL{:j (B0ѫʝ)S~4(OGմe2|ey^C|Pon{lMzu&aφ}OH.4_xs ̳,v#?aj*j5|*SW 'NIJ1 ΄K=Ubr|-pJT16.?5ƭRӭ9iEږ&lo||)o "W\-/>~ )L>`>#.]lB5Yn~ga?O;ŵž/ `cKf4a HAVQ uVIДJ/ܷ)dwe'VV*V:zP㉅eJ'~+a[Gu xlx#χt/H߄^Y!3W X^fs-֟'>$ֵ? q9nkX6{1j5*U)BI:J5+4YF)_xwY\-r ܢE”UVBkΎ9:srm໿LAx,-?5ox\-xzRm%_^H,AaoiN];x<}l/1x.KVN # U*S*nrʰ.kax8 ]li<=L|ic!FOQ&VPVlUڔn<>{hx/ /_|OnA#O|?ÏHִ/OH/43W\+QѬ~3Q(Սh.qt+*7s2K*{*|k-ᾙk ~⦁`v{gaX0\k[^jwZv$CJ%l|9fsʰu& |l?#㯈-?ǟxNvNR6't)# [xf]4_h -| [i:7_8.eS8xS(8֡e[q49,d8ӝ.INœ0SIJIO57>qoK|UpxL¬kFYtUY֥_IZ.Fi)7]pW>SU*}=47h >&/>&#*# \/໏'uaNkb#G(VBS:Sķֽ#¢V>KHSRqy <q9hT֤i!^1/~55'T&94mJihfbl?2Gi^[ekeY]h8Jj J.b)T9^u*Ju*JSslxöw/_q Ʒo \z߉O>,񎽫k,t.m^i:6x[ 90iṞR=< qPBZWہ~&xŞ&Sc2*5Uxdsር *َ&LeJ7~ 40Е,7{_9}7P/3I>57Vo.5WO*Ju׬4K{(%\1fjg/1X_9( R4'GMƼ5xR(VVgVR 2<b)prQN)Y|XhaT*phќ(4/|{K׎GĽw ψzϏhG'u":mMEXhVt_K_ dy-nt?1pqԱ ǺT ־0is+duVkyyk:ZOm1h5V?|sن)< L5mHEt1BQAףRI_Y_lp 9g |̨qu*JѫCKIUXlNыQ᷉~7ekx?WԆmoDx,Ze4o 9-#A݌ oi\+d8 +`qscT157R:NXm4N)Si9~ž%qw(3:3([*<6LTU,:eTyի5ZNH(o_))-ޙ+G_XG>~ ~Ԟ><]apx^ѯ~:~ sJTCb#Pj(R 8ѝ*8(⽬UjET> 4kV3:ag>+?Fdb)ץJJRRkU3,Ꮙ0eUEL.5uTa>=_o/hFE? j?ul<74_V{jZ^V=:dT'. C(R)Zӕ,rTi/,_™.67 3c1XHͦ ”(Jkf8)/F[o6|J|Z7~̿ +7o7 wjַ=GH; xW`mtzw捣j:epggqˢc"N8z0tc/yPh9{ҧ&}o`0po'S6xg*Y5%IcXxx´b>]OU:5'쑨_5ZKw_ Dk,F֚8ƻk>K~UwpbjB-\]5}uMIӼ/Jp7B'R9qS [_xz8Zp\$Z1he:ziR:o;q0<%)q26.Uqxu sPЧ燗0z0GRdקJ=o(nkls/|L,I,|/O 1COxKtM:=r[MVv>†UjpwBrN6z18m׌a/<bw&DžRe|1ggi5mthLz^ 7FUZC b׼Ox^%woÆrpL2(f'¶:,4,40^ U[ )Ux|S9wżKx,Q^2U26YJԕ 5.X<7=П NK%pzur4M:sJ1FiTm9Sx)SӝZug+s E˃qkPiF6ӄaGS:ӭIEc ԠJZ0ſcNO5xY׵)Q8j-[\n;S5wĺ޸Qu Yc{96x_8p.' $ 8EFx؊JU*U(ь)ӥAR<ƾ=ޯ;2SXt{m7OjN(g,E,Lg0jy֝6|s*2t~Q8VBYzqVΜQS*NNJqSTS5b>R"^G| g?_.h>۬$⏊~=N]~>$~| DJЭu-9P,aCKWф`)ӭ^NaO18PJ,=JTDT<1_W;(|Ix%Rl> TNU 14kb+bjAIA:MZ[imII1( (@BٰzL= .<> J = 0TQJ *TScE$?xV?cqXezV+RUk15JjMTV9MRmݟ?fϋ?ÿ$GO>:E)y./ hƯjkˤsEx佻1MqYeTgq/QBcaqPb-wBtUGNB37?g? |4x7P5H>*n.u-3J<)ZXkS ZhPxNӵ^+:G4_;<x5\CUbiTkK ,\eN7dJ:T{x7L/2)dr<u0(:seL8rT^jU9[ ZUzMYUׄ>(|9V 5k;6ծ5kiega,piֶ}aͳ6K+j?+P,W4U9a*\gI{:jpMsYCÒ"r8D԰^vB n:XLdc[*҅\=Zy^8Zzo_?%Bo ~ hK'{;FGloxP5O|s^Amk&mi>ož9V/3|jf%Ɉ ;u#IWIT^U JlpW:0YCF ERiamFia,J*Te+G W._~ݟ .-5;7 ; U&x* q'ۼok5鑙9׵]>nL fQ|o__Yob%1k8NtWt.UUaiNujJHS*sJ)SOZ8# />V^b#ա.jZJ8j3QKNjJeշK?ggg g|=i ,?a=#ՕiJ|AG#2Wẕq.q1pyo-\ ZènT*G (cB F/+?o幆oC3⾳TqQ9Ba^^ʌ0:Xh8mkGf4E?h:4O e ]\#A7 Mqpl/oWڧu?kڦ{5 p1JLQC83Y*uBQT(ʯjӪ֭^jTC8Ǝ4'K9#`rF>Ά'NoNP^O \Ӽ;mEH'H%HuZxᅭ SHƫY^ Z\ag&e} qGj,FLqͧ<5JI{<ƕjJ1<]b2" βGe SN.4eda%*\ß Cs~|u-?V3_ jr>:S K}{jZOywkWf^pXC9gͱ18Y_CUamJ5VJ# jKP/F:s 5' ~E`13rf8ZE•WF,=)եB=&U #x^Ywkյz^+mBsW(>8\^$.<'x/8oe.ZrJ#VukPraBBtN0,>ѥGapXJrTjJu*Nz**ի?ן:>/$ULֵwt}Fҭ'5m[S/3M[ᵳIn%cyT>yZnW 4(pVZ TRԩRrj0!)MF1M?J^q F#ф֯^)ӥF8':jԔaNNSbi/ ĞJusc-oX|gILSSh[K;kYφt+jv*y6KZ\sSq6&]tڋar_ZjXCFci>H{ Z <Kr?Mళt"7ݙ3,$$IJld<GF>AΠtoxw!-C0h/OO͇><GF>AΠtoxw!-C0h/OO͇><GF>AΠtoxw!-C0h/OO͇><GF>AΠtoxw!-C0h/OO͇><GF>AΠtoxw!-C0h/OO͇><GF>AΠtoxWk/wܟOI |}uxZcYx,~ "?\7~2%滠kc|Wq&u8g߯cU xe[\/(ʤ`aYU>wg&K~1feVf^cn%oWVT3T){Zg?gF:PA(8W;z_?pĺx7DwlJq8sԜNoh+#< ఙv p|=J = nYNOgF!Vs%rZ: ( QI5 H`3XBhHi 9KXM?x @~Qҏ鿳 o,!+6{u ICLGЯ6YCf:KI'Qˊ?b8|-Y>%'Ïq/?%Ѥm?(1kB,aYCaඟ 5_ mϰI\m,Oi?[O?qGLGЯ6ĤqC6ÖEçd4-~&#WE?R|8"l?_O?b\Qƿ+o"Ϳ)>덿6/' iG.(_f?urȿtFOأq [o}JO:o9d_ :KI'Qˊ?b8|-Y>%'Ïq/?%Ѥm?(1kB,aYCaඟ 5_ mϰI\m,{o$=?I7~nZiϩO?;`]q;aIƿ+o"Ϳ)>덿6~S+ޛ+P)=/g5b|?<)<={1zT5ծ5/QѬ>%ΡJ9> È,~etO1O[ u&tJU(ЫZ:q4T*=?~~ʸK'|5kSrex|v5afU0tTU40h*Yϒ*T7 CB (ۛh_gX~ޟ~7xN^㯇Xм!gxGS!<mex:xY[cKңЦ[2<A`2<}\n0ֱmOk:\LZ05.i'9~ ƟG~ &̸78fSC9~.irLexPҝN|EKՔH8?çd4-~&#WEo?urȿtFOأq [o}JO:o9d_ :KI'Qˊ?b8|-Y>%'Ïq/?%Ѥm?(1kB,aYCaඟ 5_ mϰI\m,Oi?[O?qGLGЯ6ĤqC6ÖEçd4-~&#WE?R|8"l?_O?b\Qƿ+o"Ϳ)>덿6/' iG.(_f?urȿtFOأq [o}JO:o9d_ :KI'Qˊ?b8|-Y>%'Ïq/?%Ѥm?(1kB,aYCgsao| O+ƿIeSþ|)X5|.q|2 gW֛n>)ВpYi=c{Ƹ6/2T``9rc0Սj ʢJ0j+ }? xq`reKN|_aOFr )J4NpX7 NI4,I#3#gwcfcXO$Ÿ  ( Qگ ( ( (( 'u +5 I?EfI-<x>#1?ٞ慡ڢ .V1@P@P@P@|`Ais&~YW*Z ω++"u-O^wi&h9/$w#,嘒MzP@x&c( &~c'_5|i| <]_3Tu kpizʅtM4/+LlmҀ ( ( (?ॺWB{(.?OᠵO7~ h0i?GmqCrih'౾ǿk |1x[ OZG<7'o~/>"/|!Ҽ0_|M_ˣ:k֎4n 꿴;=\6Ǹ>47ྏusJ?gGѡ]Iu]WMӑZ@4)L~_|[χ? ~;?ᖝ.߆ Ox0/9kzI^c{ipG$ɝBFSIZVI~>PusOwjL'_K{4-q/ j:ީޒG2}gh2}y]~Gfu 4_/` EllB7߳_g߆~4Q?JE{Xu{g{x.ai.#b%>$7CƟGϏ^k{ ?*G?k?~,ϟ |-}O7HUkGNѵ0^>x|{O*g"wsh_qxHbC|chvu%Ζ72*I^/g/>{4ߋzP_jV+^zνeo+=+ #Ԯ|;& ˬ]g ?_xz=?W 1w!Z|!}~W~Ο g?V ]CI;LFW;1o—'u5Դ𿆠Mi f?d|'LzWij7 u?xVs[Ok2hXqY\|9vWn-g__xwE>"Ӽ!|K׋4o^>xw>=Oj׭wl!}D鿃\/sBƁCx{Yh ޡazz>!UXhΙ/D#ӓ{Tmo+|L@o߱?|"5_|W|*&4Kϊ'5škZ֡o.,~=k X<6[e [gu)'|^~$K@1 .Q ծm.G > C ƶ)MeDGO?wďL{]>|]Q^o xHz  5sυ5}GAQ#%o/ x_߆c8]O|?ahZ|QaizƁmV4cWdԀ?A E } ( w ?&oSv9*(7 @g >|W;7*xc~+P_5?UoZƑc5ޱښG{vPgjv_tO'm߳_4_>#|;[Kŏ))|9':&᥸$kyBCyq~_J~|J #LxWG hzڮ|Gi F; K7O4I%6ַ3{Fy~>3~η{_?|EٟDh|o X.~iBRmoN4dl/)nmO Ξ?QĿ+>KaCWQ;q=ީӦ}_~ /mnI_?c/٫OIq].>s 39vixbEԬ{»X оZ}3f?^|aߵWSO3Ĩu>*Nwi~h| OţEլ@Vilu(n5=:[Y}|l2~ iO G>m/|Kc7_{7?7izT<'+MV "wYkEO ֯o60w3q_ ~)6dω|A75~˿4_W<: ieӯO'^)-[Iu}>uKZVca'G4o:kfwö(o_?Ǟ(6smj ~c:bҮ}+V#Ҭ KK:\ W?쿴ׅ5o\Ҽ1|]> xֵzz6]gIE!ePjV7u Ӏg|)78|G78KDO|h:H'څf|)i࿊:^:ooxSFnmFK`b89<]{x?Qώ<#ߍ?f_:OOY²gXZܰ . \IUk`>^|7{_ kkƏ~//~|b"wx>8LJ45sO)ςa%&Yd&a?ę?"|PM">|++᷈|Gi,چ Cx ˟v+m6R{?<{|-:_٢ogQ藞'?xk+4o D+4]j% /~ SxY z-66}H|!?xUOǺ47*W|oGG|'nZ\i:RxSCxRn`osi|>?̿`ĚM Ɩ_uMG U~ݟy_:|c.>v/ aF'j̪yWCثxnYA&0]KO~~߶ׂ PMPG}_\sɞ?O;Cτ~>>:/,?i_Wេ~Ԟ*?f7O?+ӭ??x Su^5~~ßs[)S;#_~K1oxH?k|F6| ӴZ _ٷ෉?e~_P#gO7$+ _>L|ogL$K{猴E1|_ >4Z?WŏK ;{$/캳kX$`#tK|F8O7_ i1Z|J վ1z-x]eƟ=NĿe?e?6\f5վHk/W?MǍ-~xFmcQ bXп<+ jdzb}];^8]i5_~?S 죫߱/tM>$| ?kľ >YkKhfb5?Sᧉ~ 9^:Wě~4}3>7}⟅>+Լu6o/_uHt D̟mhP@_o'd PԟdP j߶/1N2~Ҿ8?ewaq|Ct{PݾG xVPԟN[]{NXxAtF pJ|~>3)3L@~ |fxO-KڞsxsAM:.,*Q@_Ij,' 1C@k>:@|G/{~/ ZSjtk὾w=" 3_mZN,v[<vH8ac?"㟌~ U~xb_ 7qZ꺎xRW"tD_@GΉ?|cA|*OKgh/t]r΍ NZƛ&?.d/'o>Sgᯌg|;Έ|?|;vzn|&4O`k|s_hWzOv.~?N]G ه9 7/>o>뿴umtN|Hwi7rx^;~^|l>STco h_~&xFEͿ!?c^8P|lDžh__4ߍTύZ5|j#4/^qaCPnxL4B;]M]k E } ( w ?&oSv9*(?AAL9P< ~?KaOڛ $DeV2h ?h?z;R4/l4f6cRn~:ɯ_R-?koo1OS7{o~,n_>%-6?|S>3:.=_ >.CV׵}6v:pN𿅿_Og┟o?-~_:oGD7>9k|J, -4kqGE~x|1G/1;׼xƚ*|Qyil,[ht'|WU]7Kzx>wTP|+w c:m ~-64k vCk][HԒ'Zkx?iRYo$C`/?2.ǎ4?>g嗄{cI֮K=JľJm'-e|9kjQ>*`ߍ? ~YWsŏ &𥯊~!x?@.xSjd3?jvo~֞lIеk_l-2Q_~80FqZx[X>\'7PJ ??W:R (. qw-[G4{KS>Q?o1SO~7' 3D:"?]t?Xx / '~'Y{O驮_Aϯh~X@<}3 ߷z?v/5e[˨oLV퇆4{OivZWe\?](l)wO|a_ OzO {It=Śo<)iU6/.5k3~+?kInE =M :",q4M[OV(]ͼ[|8j<<)CCx_Oϯ|#%l5ǿ]SF>ѬAszޕi_ikp@߲i7Yهᾫ<+Y+1{MD_/UBxt]~$kA`Gi^ψ_č?~g/ ;xY|=Ūeox.ZTex7Wz~Fb"?-oWojѼm?>.>=<~ssŸ7&-4o?jƹm{O!fЬUk7>yf^j< GO~֙eQ|j /i֟ 4 k]*XҼM5펅wހ4Am Vooq FC 1"QET8U4E ( &|*= ( (9PQݪa : ( ( (>_ (mȱ\~u~P@P@P@{&Pٻkkjdm' @p (bmigzbA_!8wgv P_o:*/?Ttu8ʰ v (>?`%'~ߴj/?5 k'GAd5x+ ?z~ >  ( 5iswǟf7 }>U:<ٛ#ܻ`sҀ= (>#|SHv>aKχ$Tp' ( ( ( (?3kvN#IT7]#q 4P@Jx`/$GGQԣ OVz ( ( ( '#G̟ OÖ8U($p'@@P@P((@P@P@P@/@P~Ɲ!xBk}[ލx/s{@Ьso^@u[hִ y5owW~ u6eĐ$2|C)7/RbH .m Ksx>O xL4 <?tM+L94k4B㵶,@m@P{K[[k{+y/,6ex'xTxYXړnvoO~џ]s_7 >"sZ]oqwt^M?þ2ӮsPЬ}7Z+}_~!i6x^;xDaxO|mxXLj9u],C-G>?/hP@P@~(ȿW[]IP@P@P@P@P@P@P@P@P@P@P@P@P@P@P@P@P@P@P@P@P@P@P@P@P((@P@P@P@/@P@P@P@P@P@P@P@P@P@P@P@P@P@P@P@P@P@P@P@P@P@~(ȿW[]IP@P@P@+#20!=C)J( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( (OFB? 7쮀?l/c"<@?( ( ( si[Hy ݹIC$OpH?@P@P@P@P@P@P@P@P@P@P@P@P@P@P@P@P@P@P@P@P@P@~9:sXnb[.{,<)P@P@P@P3u #ڗ۞C|C$RE` bG"ta؃Ђ$h*( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( (; xfMb.Pl&<<.Q?@`?l?: ,%&G{Yd~ο,%&G{Yd~ο,%&G{Yd~ο,%&G{Yd~ο,%&G{Yd~ο,%&G{Yd~ο,%&G{Yd~οX(5j):dΉ:~",`E`b @< Aʼn=O'wOY"8|Oe3?M_HOk@< I~i(5#OM?ٯe&$ 5̠W??4fy'&2y_W?P+jG_<?M_HOk@< I~i(5#OM?ٯe&$ 5̠W??4fy'&2y_W?P+jG_<?M_HOk@< I~i(5#OM?ٯe&$ 5̠W??4fy'&2y_W?P+jG_<?M_HOk@< I~i(5#OM?ٯe&$ 5̠W??4fy'&2y_W?P+jG_<?M_HOk@< I~i(5#OM?ٯe&$ 5̠W??4fy'&2y_W?P+jG_<?M_HOk@< I~i(5#OM?ٯe&$ 5̠W??4fy'&2y_W?P+jG_<?M_HOk@< I~i(5#OM?ٯe&$ 5̠W??4fy'&2y_W?P+jG_<?M_HOk@< I~i(5#OM?ٯe&$ 5̠W??4fy'&2y_W?P+jG_<?M_HOk@< I~i(5#OM?ٯe&$ 5̠W??4fy'&2y_W?P+jG_<?M_HOk@< I~i(5#OM?ٯe&$ 5̠W??4fy'&2y_W?P+jG_<?M_HOk@E(iH_ Md|_QD?|B}K)&ট ٢ghHf^@3+/PA (OأBq߳"(*"F @q'c~d|Q ï;'W7Z|']_GW]hfyVVW3././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/images/rpc/rabt.png0000664000175000017500000012764400000000000020630 0ustar00zuulzuul00000000000000PNG  IHDR6sRGBgAMA a cHRMz&u0`:pQ< pHYs&? IDATx^ U H dWV5"AЈ#,FAAq (O"@%&D@v1 (*:3sm{9TuuuuZ6 `m`g͡޳f. @M2:qU_7M/*}șa]Fv lvs?sne +|ml*@@@J+tJ+'9_z{/l6Y\ykV]uo## `66[OwG +jk|ȏ~wXh?N76 `؀o;=5*GC`~МGl@6p#n׽2;,WÄQ~{'ˆȂ( $6y WslhВpZ׼4PP[e}͑ BAl`l`:!yo5OPPS/?w?@qs<l/M\eئ(((0 ?<8=qN:6 `aZO9~4IIW@ ,W^`yl`(m '>\uH`7؆^+@E٨a;606e4*@`t5h T89`6P9xWl;(0 GZԏ e, v՛Y7)}q5> n7M*l_a`9+ RRmVU~UWn3aC$ 0?0O<֒ t z[uUc_ٳg)SDh~\/n޸fx׻nA|tzӒoP! ,kcVHלl>9s8CKb4G2~} 6%S`zRU?5|0\{/ǕW-T O=;s_pTiӦnɝ}nݾF/iMӪu],[w-CT|) n7{xv;(; pp쬊).BS%̹7y: .M?;0o׷.ziU^:`X:,.Km{)[Y?rV4akֵr<i5cm[Qv7OSCE3 B-*8>ә?{׻5zݑ3q.~* ;X-VLLJR7*?"8#*UV{xcοR7[ەG)#_ByL dԵ@c6Y-qڧj{PX;70V;_,ɏ`Î3r{{gUW]z2&igD=p;i;g'j l-;d.++jXEXP!8'UbEfW}6.xZ몬w=׭mJMmqX~ץunVNu< U`ql1G&aǹnnpկJ}knҤI 5qq\=vP, ؂CvXA--bdw. jnVjbVGPUL+cpmuYR  leŃ_[) z[1MSY\~awGeYf˲;Cޥ`bleNED=AM9a *3ܦE|cV.Jk( ufYjuQH~`Ǎ3+IbN쏱&YsGRyGŋg}Ҵ`{RW2>V3k;v ]X-v?99} :Tɚq'z6݌.1]3zlݡc|u`%eVdj>ZĐi'RY%*Q-x 5&Y1vӧOww{纚.b֫5q^_ ZO`leRE'w= `ET~bAm]Mgkd(`֜<`yVwvpr98MdfsOf 1xc~ǝݍCҵ`ble~F+x Oj݃f*N돉MB![bOei~ ;WQFP[C蘴zÉXs[Nϱ^|U vhKll1G'G{UVi⪫9O^xQw'O 7ڸ'n<(s ,[wt L)vTW/6ߴ#slde`#`;6=ݡYg/Q+~g'>1Lid͘\r:N5`X}F: !kT,)LƤ1On`9:"_W>G?rozVF-ɌP/Z& e2/`K`XJ|r`9:1;|ww;mܣ>~5*]nʔ)L>̠;*5`X= V*li`ӟ/<3Ug?ӀleR"{]w uk;[oT:j?QY3+W Uչjl ->}f=[c,UB[ovmJˬ}E`+{p*-{?^׻|;_lTz'̙3nj41٘Zgόb6ݏz [o}ckSN9e еh|:\u^p}l? `;(,6.Ċw}wsql;C/FA.sm(Ns%8}m7|IgqK~򪼢sᇻ78MEME9zklm` usW~3~]F]V謺ݎ}@~ۧϣJYӶ=1[]S7=[kܩ?6.|n„ [vw-Xj&foOq;6J`167oϯrvS&oxct$7xjScN9M`5eMmFrUm`V&4@4UF7c) Zv`}+;kX؇~)R;cƌVD+tѨ㏻~v4ԩ`;U}6YSoQ"lYXEDZXu/#\?낫nFIYvvz޼QB;& neq'pB*zǜy{VF%iƌeٝj'm5S^me tjrqymVPv 63MS v޼y$bk^K^0%7{r"OOg}nTַ56̹`;m6ӭ.D` -F`($S`L9*F(h8c[ ._#p#}=[^ v~OQooh7 `jlYc`yc`:1]*a w˝Rȫ"XŋSO=4AN~򓟸Y/??}7v,l3K75eVsF5,U`f_u 'cRuX,XHCusjewi lW} `;wҺns>&c`?k]};<[{'kؗ^z}ݭ|`Ts}3'Ы h`ZuI/m՛^$ t!:8C"K-ԍU>[n'\VDžj)l٭2>8`v6mydE`\~y[fg!I~+sϹ:˭ڭ߀_| .׿oG?Tlg /^[1X>`+C&&%.[R6 i5تmTl6S) vnAlW_=<7+դN'reN}O?>O 7]veNYu2pUwaD>H/[bek#li"ۧ)߱ۨ60luDž^p!leRRP] lش:3ßYs=+nYmwfƬCu;KYt, b-ۿ23hM9m $}| Wy Nm'l8V)z8[A_Vmm!@Tum:^AS`k ^VIIA( 6W  `/H7[ɎH*hS#Xnn9Gs.pmove;Fg!3g"[EaՕ(o¤YMyM6q?Zլ$pGg5 Kt ׫WNr] f ~0) KD[ ԮyX}/n;5 *?Z-k6 vNuk`\lTdAIli5y+J*jWYj~&2"Vss4_IօVKwK^\1=, 뮻m?׿R=eVr9kO^ nMpsM CPմhQ"El# `6ܞ5a楃PLJ @in=]B<+1|%>9 ЅgϷ@ēN-Cy{=C_\0f/M_B~z{+b6wۚku5VWa?-x<O=b+B[v]V.9"DW44Sd3**p?cUWܢq^Bg2W LJ @(T$ai$ɭkU*7H7Vsϔ-}Qm"mtubV"i5ցtM~ j=guV4tŭɔ` YMn+GWCUa.[> <~>2T^O(E҅;9&sǿS=et> >㏻6޸>*ɚQ=s[?Y;[oeb9?dkl}݌ګnl{`+*P {>QS'cq!/86:U"/@Uweଲ p'X+S[0Zx["Ν*sΘ5ӓg$*{T!>?w?.ܘ|žs9j+XMKZGmx衇ZhsAo7~뮻57 =<&a 'b<|P  , |P,,*"yȫpb16u-6Ej<Ϫ`{{;\UwъYf̤MXE^ ^H@v{p5BEa-i 4!Ԏ]R `m`fU 2:>E*iKt 6V6`:l ꏱXK~6]ƖVuה+XE/^ ~|Aett(+{r0Uz=UDu7q x7:Fp߶ŋ&s;^O` W `Umnx̚؟ *` :k]  jl۹Õ̜VL"\7ovfEO=XEc7JgΞ:ooTR?v!2HZW20[AU(PMqi4)d6{+ykWegc ږQY~TyOOQa;Ͻa5Qǻt 'L/'|m2ՄNZSVsm~C8+ jYO:ia*1 qv<9c7W RYwfCZ\NdPk@),<~],MiPo]`# ;p!{mtXdGbTr4a2ɢieذMlS9Wdh_ZbǯVKc`N,xlNkMN_j݀#r_K"}XT0qj߾{>EO~Ҋ>mSS_"gҤRբD`;a![i ; U@e<2f=vz%`;wB=]* 8P$bkzH2v~Z3x≎ֆ6^șr7j?i}\{ CC"CzD;mǍ`;wBk -ksHY=_%u '-s„Qx=)9vV[fVM_cWۍk}rK `y jmYszʶQ0KWXθá![?Q l)`{yU8i8QXomn…|u;(^c{[K쬝@ dw*7/5`r#Ofswedҙvv tUE/ [21)iNeQ Ԡ 5hUWAsg7 `%I$v5pW_}ܔd Uex6r<@k_kMXMn7܊j4쬱D`c leźII++PXF s/kN>:k t >͓I`8yV[}ݣ_ynqۮK Ә.7[`ͮ|u׵h$T}{νpPЫsϪltӮ)5`+,ʤ`Xz,XX-GկvR,UUo׶0%["Io],Y tMf)I8K/=] leN[T,$̺`;wԲVݭ(m9`7yw(DOLn=k_Kf͚4@W+}{oT`;^Aۿ׮W63y1leRRP Mg.M4+1X%>9 ;V,+ Or8'p,ĚTdM%pdg@7cn7Xil.x?=] le[T fN*lw[E;j7zwƐjǍ1ZvK-x*r$w)<_jml`;^۟׭BB\V,(uwՌlG7g;;nܹcV /LG`yo}[nɜ4+7X+Cm c۹mn`;v"-U4~DԤdj>7'$pZk3eK`;wvNW%vm-X{'ft\ph"Z6Y+V3+ ;)鎬oiYPl+(-> 188a t!.A,q\7v檫Nzi"i5XկKerAQ㫪=Xhتr9Ze7qd'$3tI-UwauZؑGcً5Q {cʘ&nUWmՕV_ߊ |$: 56'6rCQY>%$O4VV) vhL(VPigItؽ op}\uX'Fd78]tEe`L"~qhj:85D`ÆMS2W =cߏ؟#Hߒ8K*|yd_jd:xmS޵^yJl}vkOYkÎt;sT~vMʫ2/s7ol]tWˮ,vjUF: XoD`r۩ 9}3>}o+2Zb@v$.X͓w `e]뷕29} g$%u+-Z6NƻǶh~[;A26^1lu~xH>Ul`{%0mFAvsG'h)ik5Ubu5r4nUXM"U6";`]GcWMͯr!SLmRkB*1s`#d+ 2Iw @`#{(9 +2 믿; Af!V qvZoѕMlm%dX(Nթ (^u 8`#mmh|cd2ҵ;쮺*wwm7h F dW9 B;I6 f!nSluG: 6YBHߧJ+`TjQ`zR Ol1GȏqZf5 J@tƌ[.\ `uxd<컦MsZՎ1]'.s{+)|9W,[`*lYD'زr PStͽz8 hiIG}Úk;oC{x(ܦ=ߴ_-}0w9 V|P l1G?EW};`ۯTޱo 9s渝vi8#g4h[ [`XؽV#U"nK[g98mjyMԏ>y{ح/(UHybN??hp\k`lÐ XUY LqX1`|O `͚(Pll1GD ~H!?;T>3Nr;Zp'V~Mn}l FWl1Gq) ~H!?[ZAͿ1 - {`X} F: :IC,;s6>c`#1[Ы6l)6a)b`8ϝ*`|k st 4 ,[7tU!vybNiגetۀϭV]uh~\ٳg)SDh~\9s8(C~ևoa}79_6@ϰ>,=y;tz7+|~l>/n5,|-d_XIipkߋj8/)}dP5U鸓m`#//;/֛aIyMkO'Ҵc]r \V _&~|[IFj&OѣjoWn>(ȫ>6mJpAyrͿ,]#dU`bQ|97_Vo 4A6*z0[AeͺU]W)~]X6a lPOMj'iHu,蔵Ɂs`b O'GMpFtm5P68Zt<+82X*x`˚`b? 4P.u8ݾVq۷"íϴOIHU:VyE4G#G-[y/^1]vn{Y)+OWS;Vm QI+-lWYUv>vLzk;ZK?_Otva'kb?Eg.Yc[D`쟴dbКZV=#[Ѥg ς߯}%P:QCX(=0Pҏ`P "zog0npp]ʷvXUiPC4hdD*G49(lN_ڦ:^ Ͽ F*OUP2iH~meZyt W=>N@ nZIwQ9a8.w\Q[!y sK%Ex6V`? u4bWI zBPAy  6i~\( X3PkAV,2@+S[C- :mR@hD0صs#jsxja~0:c|K!эwr:`l^ygKϠg2np2`*3fu[[:ҶOoվ:m}HPY hVd~-|aTYHgvQ, nkiE#,4xjږuJ{=tGdW럧{ܐ l|Kڌ=#%ې%늬m>K[+6*|Y4eX%ް~àzL`# @AjXb(`+ FCS՛_!kcpp986F:VLxu`XĤY_ y/ד={e{>c~$4`{fÉV+ga(?ígz 0+$+Ut Ml^vSV6`BBdaN7-*:"k!6@ KkgҔ;`XϺI+yN3%|akz{ļ\ {v aOzf+IWlwš +$ 4UD`j61gq YD2|V6`}nNcl`Nb $ۛ_ [jpx]C 6ζ]u|XvyQN`XM"ÅbVeY6[k޷AoޱlWM GRIkRߺv:և:+;i?En>?ipipo]Ah8Em5+Xp[ڟ!l= cu'/3u]®u{ZP`X>Ll+Rƴb5P=*ǯ#X/+wl;H#YƖ j\3k̨a6ɢWGtL(ZSO`-ZW4sÞ#ip8 ~xU]۽W]gY$y؏5)0+GOCN'0vy"uONQyQVy`XnQ_=,gtZ.U /3VZ>Z^VmɵɝT oT,NwyJlr$o3VYgJɸWSJ{,k9iCГp6 l@S|_T蘴zGacTn/fs9< XPll3.аkw\ `H\X~{+*rQ(Pll~yP=6P ,&cH]/As tG6RW-(tXte`8j`#'N85 Al, w8 Z stXte`XܽQ(pk `x,[xE"w5-.ot: `H\6`;D`kQ7K˔!I=Ui~e΁ Yvʔ)={+r̜9s1'Og l`8zFeNeR tU"ِC\/&E8~D^ iuN0QRtZjWEUE=9ԑÎ,66H!?$ 嘥; } 2l`mT7XmJM}5}8H(/6F: ITWn>(ȫf;r9:n oYAc8Ӻ> .{Mw׎ #H;aY-뗫\EdlЛl,[f$v!օneq(FH &k M ®Ƃ:ZY֝V 4Vx[F}ǛZ9Y#eO[d,mЪUVZ0kqEA63`ill,[ زq\Ԭ'G6`& J+8 ZC8A^15߰I|NsڧrԾ"MC.ayc\^6 46U`ز:59м)~hl#~$3tvFj{Yc] os6\i݊<~1p'`@7mmsJX vU^ G(;YKdM ljuMV~?qX"B^x/**u;ܺF@/+B"3 4C6~ l)i w'B6cʬx[+.6n291`7u $[%gBDS' t? *M.OzjNdT5VU?n&vjkVTl4,VT5d`\Jgb@N ݛuou卣󩢽l@+ţë`IlI4P5NΕE! .ѱHԍ_`.ѓ6ɱ>i G٘p4) ngif(.~@r-6 B `+"Pnl h׭7ʁLX|E@TvΤOmiW_iݪcl@VዯN)L^%;nۋ.BH_ U-ǕvM `mWjZ>) ڂo+Seile[Tӓ2f):TeF!YN wQ>h͈mk3iy=2Һ[ `pDǪ fZL$Iu=ء<Ir'`y톶Ql'LG@`mo%HԒ"@ ]4ov@=M~WkVJ4x`# kgҶ-$:C[l^.>a]AZltӏfu !`MXn:.l]~>(*9+,i@)F^N^Aol{6`$U&R5ӘkTy!Ԫ,"! ӺEt!ni_u#lxwMOl5`L8Al:HvԆp^[)-kc`K4c+еq9Z+h7p__$bΌNӮv$NC~66_#rԧ[Tllus4u#a4j]ӖQ9f!ƂFVݟXkcy?߀7mc.vmȯ_ߦ4}flÐ ׈)֧55U)$ 4͙=dY熓9 e%`Fk>#`}W6/ǏZ7mU]H[(m[ɖ*J[c6OC~66_#rԧ[Tl,PH}\m \fat>`#`aw^9l)#lt>`#`{4Nh 49n*+o{lÐ ׈)֧55U)$<z؝I:mtMLܜ(Pl-2SI j8u:ԅa@ltȆ@ۀ 4A6*8Ms&i6 `u0 K(rkȫ ((R `M6a  l]/Z(0}$!F,dӜIڃMb@6*@*059CΌlBG*((R `M MB`kjPJXɦ9:mҫ,/^Z 8u:ԅa@l!BUkϙ,dӜIڃMb@6CQCp,b"uaofs'ݰ?/[11)iNeQ Ԡ[TU+`Fly Zf &:SϽ96 VY(jPAd@`DN{l`IX7}}ǀ{6`mV[͝u@lC ̳`+RKiA ` :uƀf}ln„ yf[w ,[SV&%ե[$W4v?~(n /tO?t/2te|llU`+*P`zRr<_ʔD7zz瘣}tך̶ s9gMrcjw/JleRRP HȝT @iL6RJ"q t4p]{ٳݯ~ctx0g`5"G} iMM)F  pݰmēNu"&Mr'p[x{g;J*GYٛm<\_g6akDح\Q_ *F}y[{:r8G|IsU}QwGeYf=zPlt>P +kv)D[lm`΅8EB-**vÍ6v_z% [DOlÐ ׈)֧55U)$6I^iWw?яo~FuQc֛-,d;Y6akD`Ӛ*R&o01c[c5ܧ?8/B#OS}LÎ`|l F: |Q[ƍIC7>ll; `կo}l9soƤ+M2śyc|l9[`#llFO6 :h\*i6RL4606ϟ﮽Z]vzw]c7M6g}C~IĝP';MWtwqi5uZ/*_>O;-d#C%t` , `M* @CmG! wl-[Z(vrk @;sOk\_zw'Ig1cymlQϡ LLX%ih4zӒ +6ު%`[ DJJ|@+p/bcfOfmƌ=s_pO=" l 硆C47ޘX s*4-(pK;=(CtqxEi2wb?4&{cNf|l*t|}?7pёUIJͳn"lX.lD6ޱ |( ~o[y駟v~_;nحl뮽aؑh,L^~}Q ގQW2qg>N;GmW_wm]|W{&Nt$3s}ZN1{'>~aO}gqƤܽ3>v>2>6خ P, F,M9ƓY};i k*_9FtrT4Sٳg 蠌8;F|E]Zw]7UjM>hv[m?5&m;(.3~| y|,$䕶l lwZu~ŋ]GpYS4.r&YuUT4 .z?>V"wt˻ɽk =SꫯvSNuz׻C=P:~GvHV;`+v,**`+2,[ǝVqlc /gK r!~ss믴;}֨b'p/g_}~u _Q`+s,&&%U6  Vfm `X;:8`تfʋ)7n\O>>^{mw/=M_ֵ`*~cq&'`تl]9, qU\Dl E}qf:o~MI `z{oK'?qYkz饗vvR;,[kXY1RSf몎a%`d(/ޫ[1n‹^L~2\Bg.>>3WkI[p+vש]{uB\{H= CJq2`L9]ؙO8'`)ꋻ7) `V\ѽA;+[nqN,>swzp I'?8u:)*pڴwq馛Xo*V*pH)NO,{vuebcјK AS槁ne_J V[̚,[5(Q^o5Oet4 MWt+~'I&KT}#i[`߸n̙nM6q]vJ">h ^mk{le .IJs'Jjzu!@j`y;nSUleRsX6|w/5],UwMWY=72 60)b>n-,/~-Zk/{x#8WM$@O.֋vr`Rl8XsVyE1.rL/=8J9,t}qrNY+뷢ߨ-;lS>{`Ѕ.nY"28?':֌PoTȪ.ĭY#_A-srX6|w/5]<n<}~޿+#Y(q va7=LJjVݙX-C2 8zKS 6) %t ,H5ձ0x˜]v؆i. 3,'4-(`m:xѾ{9O ,hu!u`F (UW]8W?]q-{'ƺn FtNl.i`32+Oízr*rh CULe)جR@f7 <`MQ3p0o|l-(ƍ`^KM)`5)f Vz$عs3L;n緿=u}nu`0g#pHؕ7 2pjZoK`ZM0B2I]F'߬ 1z7< {%jXl9=,t}qrN?ywH2KևDcN_=䓭O?{gZR9'4͙3=i6s܍78fX\`_6aklͣ#Zq0 ɴYp8X xװkf&y`Eohl&q"t3l?^jN{v_ߚXi˱&waMdۓ1&-# #-gle^llЧ3l8!eD``t7P{BϢ~x?*+.kڊXX6&Z&6`ئ틻t`~a{>k-raӦ^5^֒'MrIO؞wyg4V-[\IIVI Z>Pz?nr0Yc`>zl2#,"D`0~H,$<5}#ہ7k lթm9З-sxX6|w/5]NVW\]vYO}j pq ~I#YEf6A`[_ذ1qDZMlaهz9 ha%~[U7a.'` +Dz8`^Ծ9^┺qΝ$ԇպ'Nt>hk 췓hwXDb'IVC}_ó҂SK[gtvfz+aoLlHX-B6Ӱe8iw.V 4Mn)) l7`2P85Tfeƻr$H%Q`9(>iw-VYŽAIÇzgZZU@)͟?5& [ܦHl~o̬>Ԇ&cubz\j~ڞIּN.~ «~bn:6`*2< `~~ǝ݉'n[4VՒnXmlfkيZ?(YVK~cJ VӒ`k֩UCMtw'nrZs{y_waG?] `oJS>4ϺϥkZ%C~Uy~O6Ρ`تfʋtXf7pH]{üB0U :]1%0{^2+{&\"m7&q,=+UVC^U/m[>Rʯn iG6ʰXԍ&]1yP:nܳ#X`GIZ7rt5X5q/Χr|WEфjtWEJ+qI6&FbRAoL7<[}o[}׊pg?s>,r8ց-]K9]R˛&qzܸ/<<4OM q6 v\w(Hs/~1;mD@6&5`w˾uXa7« VuGS Rۥ:&qzvSwuRHzSz `E{#0U:.oʸq9o4Oʃ?qn) `ӢGZFG>æk]آ%:ޫ {z_k[dzRkXuSJZzdv+ӂb #9C/U γJ}مXF"y!#1Cq_(wm֛˧aWdڮr|hqͯ'{cfEfkKu]:Nos¶uƻUVHlSmQi[ITEXLmwv%pjzTi=dօ1jIk *-<@n`>\XEmk3so6ƸdOݍ̸}c.ȂLIm3hQ(4pRmKoz_A)Oگm*˺Lox87Oћ9ZO6`~tisn'u;ݷѱ[f7!I X&~reG^4idccNf~馛ZyiL qN@Gs{nPV{`+3 `ȦE}ƈɓkC l@LFi@6?<FoVWo Exa4эrS(-r<[كTAl ygE`oI"I8}pK u$/#cmR3fMWܵ5b3]hIlyv2zD=1}?To}^.?2~'tNaO_6oGE`J,9]Sgz;nO{olZ" aY7rCݏpYC,r[=SMCSVcYSuq63l`/`/V֟+t;o-Y";3ۂʴفoF7yd⣒ΛI8{.3&`zK7ÝA|ߐȾ!K}!~ HYP^i=?TY~Pcɩ"~}ʬ_ 1VUFaƣrt!. v34B\:9)\ q6/F;;v…(jwOtw,gq[1 RAM6iɞ6Hf2`XPش^El[;3Ap+36;qT'>ֱlYmvolҥP0z7#3ó7 ~;Xzԃp҅8?K6Ζf'ƍ7؝}٭%o4V([C}sђ7'tR+L_WUw}wt`|Xc~#1p' {m+&`m VVNhe%w%.N/oT\9l ^ܽ2,:unWٵ^{kllt9,_j jD5~XM&U&tMNK=sk']> %N4u5K<l`rVXK/= o~[{e7%@֢GS[nT`^S~HVl}pK.QO,ꖨW|s7wHq6NIcx^d|Ig}{ĉ< bOO"h9\sM `uI, vhS+,\ y8l>qNsm7d>ڱEMd<,7pC?N&)BmKuX& ui9, x+igV^yd79s渻;*mF; b2:j ؗmر8ȔlwZuqNmny]ҫ_|VRIز[VkMF_5/6=f-royt!Nho4xtșǸWo}[_ >jyE_Їw1`$]|NcvJ+ ;۔΃q{f9 ==06]DU^Q,cqI6=Fci~N:f|2:Z'- `$@˻7߼5[+lE`e}ahE~HV,[e%K;:ph?v6pWu_(0&SOM½.?|%uh={_.-{Mwͪ9Cm3[)ܯ!jiy|_GUuնSկo"r&ʗ6VTgǨ qEf:>4r( ӜYZۧ6% ScjOUKlFtWtsUW~~5]yDQFmC~Jv$.d&V(GnyEbɌSsp4_?lhہeFhU":΢VZykt8!Z?1l-Fw gs[׿͚5=tzʶNCatUU2XC&)F^ cQİk ч\ߏpg]8eV ~ 5^U6i9 `CX`Z `k񱂬^z4"?aD\K.QF: |Ql}ZSSE B_H~wa`$Er8)F+!Zqi6-fk1g4]3>vpIpt>`#`9$N6Iuni3a*hL^EA5f.ayy2-Ӵ(.c`p^Y6^b' 0g`5"G} iMM)F s~et6yVPiqx3Ӵ樖Zڤ[Mڤr-8!ZdSN x`#llFO>"H!x'G **@󣗊k"Ӣayiδe(!XDmipE:~[M(6d?B/lz`#llFO>"H!ꝡ( ƔY6=Yup\o ȷ6akD`Ӛ*Rwn Hn7-ӺZg-Yx67v Ԛ9ydoәÓ<@#`qhDH&v.TF˱`ͱzmtȆ@۫Z6:J8 `@ltȆ@N@]o-yf9R8\l6a  l]/Z(0}$!Fl)zc6,`qP` աygF6O!@`Hr=li6ڤ21]^[l5)$4ՠ@ 8E=c3؀&tK9K+ڀfVמ;`*( KE8LNlh (\S(Џ l7tY9'=1"g>x4lEw6P-4tn ou߰m5StQzZ}V-irl[VVvֹ 6&OpSOZ(PlygcJ~kM}䈙NTJߏjm<_tOu18c i_nx=_<[ePV}nCqY'( a~+bM:ԁ((C]`A_l>0pɄ!|k#AS&v "'-qgSg l6``UvL+ׇTw+׏>}PC}7}LCrG@@:`scEqMr6 i|} (PH\dX[N!b[uۀ͟WoxQ}#0ni.lA[FGyնp=oEd:16˲AͻN?WϟY՞Prom<0iܔll#e7!0jD2bҩIQ)P#COt},šVX+W7}6?ڦK琶LhI;ElZj\f"A6le.Š$[=-O3`ºt~! `Nʀm)h <i TujeZ;غ%(`uܰll@{3ݚ.ƊZd6 kҘ6ʟPj@ d=rC~\jw{ .}PaXArVt5m x/%o ɹa@6 Uw_n̶̎`ֶ M` (U/ T'-H ,, bqY&C CtY|my@,aFb}F5! B-[~ V0ya+606tU_?mFufaP*V+|zMoZln*P`}6Enu!#~u, :v `6 T )`2U %g4_>-7}60‚߳X34lYiՅXMi݊n.[AǨm&oC}=mk5jYA'̆[c]~O4-xdG Hj{-F-c Hֈ&>doUB1og-].e`qb؆KfZ& 3jGJG:^ـ` ix)]3T[liUyCUg@RSX`닗aD7,ۢ*dQ``Aæd뭿{hImkjڼJkJײ68v枚! +*P2P &>p#41=6;H-E B˦iMݨKtWYe~#4 PPP`KU{CF\OlVҏTۭ g Vٴq~7Y*{Ë"Cueu,] ڡV鐥v.0M8a@@@>P`Vx,SĹ&//6`jpi[ x!*Ϗ1MꯒU>BiWhuvn~bMOU;i, :oCDZ;\x[inG6MD@@Xy-%gaۀ`,:u5aTpçAOI58w֧V( / 4`UO[emh9B܏]4?Sr-5<@@@>P`+_{9Ѕ.60P6`p.^| !ASDg9 D6@LJ0kX#G[>4PPP Q`OD?5 >XF- `|lu7v5iZ]g+oo(FobT<@SS^5Ŷև0{_/r606ۅXiMռl^43沠 f}ȓne0>9g@T@k( TfWGpE }Q08XǟuHvi,3I kc` f/mcHjXwf^Ӵ6뱵V2ӗN;í &r"{s)r*e?~g;/ 9^6`6^˰?q 8#uXeڌ!̽;IFƝZYUi+˖Q9~j[Vȟ9 7Mp~n̳c4 BZoV)U ]rSԎ)zmǥuoTKqӶ17/ߩ+U:!h"jKɜ}Lij1Im֡2~mWڕ!VYvNa{*29US-p'N? r+(lh.SdVIfiхcWoD72}>_# SeH!nAl(jYfc3 O\=NtY3w^=w+ 0<:Y-8HC0{ .aRӇGƬɐ"O+pou -h_OhYT+ڧn[(,˩`؀u.xRETc-zVw; rZ?% $m8RowX/P9w+Rc.pS7lzE4¥tK5R}p9VN.3yJx> 7[I1[5z5vb],Xf]3M>h_v3}ԆAGui^3\_Lߠڱu3ѥEvNV[TUF*ol׾ct(6 `ՋqϺ[I}P 4m(`?g*_umJ ;CȠiӼ|LشYOSQi|v-xFiŰLJݼ12Lu|,ݝZꪓO^8ԉS `6- Zmⳗ6u0teUӱ]sgi-YŏԆ+X7c7s줭R:6 X4+CLZЀޒ0(-bc-'rqPlE^ߺ~k~yٜшaSY=CùflioO+#wYuL6l$*҂tlo˴'v2DXq&`ONg"f~7F۲?Pzn[qi{t3.gzp̱lh ,\H<҆]aj³6tMHC?SVC woMM+cV:Z]3ea\GY^3lUŶ~ւd8v /4&aպ+s- m &Zflk3u:`6o[uҤgVC O&ZD:N?3p8C擫>}:u!qVIM6Z/BP A?7ʪ1MOVT9G|rmɋ"8QN6 `W<ۭj/N8=qӜaжav0d+)Y-Xծ/oijpgeZy~WN_ZS ֵڇh) @]G<vf!- ^"MhԺ b,}8VaSW^y{Xc?~șq, `6PO8//_p4yp{g跇˾KP>oj@kvk3 P~Lruc#i5?3\/GF̎umq *c4՝#\1vۑꍌ9)Iq 9 ows?snvl6`=sv:Ə_)<Mf};_qr-{9@@@@@X>)GEn_Tt2kФE?lB:h=BW/5츲8+K[t~e{ɋ(((((0 &YIQBXK?. ԥAky;gvv_yŜ{<l r< @KXA4m 1S'vEt v1GR"WTo)Xдv|1W<(((((Sذq^l0TENm't,>Eu!NXt&1݆4PPPPP Fvu~Eo"vݍCTF'-ke+Ϗ8eGcifg2&im^<4λiۂE۵ʻ1L@@@@@V `Ɍ|P ZwP6*Auv]~V7+ kQSՍZ2mjV9k;k_uUNkw]fZ~Ktژ`ٕ_Kyɩ X+ XZɴҢMб>M^P~g &X #ifAFuOx]Z4S~0rx6TyO(l 1~lY^]GficZ鬺 ,× iv⃯-¬sL%;@@@@@h)``֭VpQfЬm*fz> U )+j],=L7^K:㳺 !녅I>67k4]eQPPPP\?2h]j53P6oW+ˠ&EL>٘6mتtg>6"t6i^0OLd9\Hk ۖG@@@@T @;5 R$UaFqCP[+*X#UldNֵ8'eq-pM!=9]@@@@@_]OТ(((((}mσr]G6 _ce{U10N+W/j† @* pM8v%#x ۮV_nj. YPcF ڭYݙ^aLC;6jjf9_BMUF Ĵ!.Tڷ3ߞAu>mZ:ԝ(((((mH*smR%e߾ U *Q!ژU}Y3qaQW>JjYGtvܬˡsY9w V~pib6L.Ŵ!.o߀6\Z&,˺lG@@@@P@PNdvx3yT`Ӡ %BX lڸ߬(i AݱX@^0&FK6(iFѭ6X=ۆłoi܇UE#n 4Am18оp,mI!LBuv3e bISUWeHkC<]p[xiggڤ`^;cڐVFڶMɿo:<6Mա (((((PR0;ƕX Ni|Ӝ Ӗ`U+tlܱ̃S]> 9 PPPPU!OF% %`Mjp)pI2 ` " D?cam 1]v.YjB _b!ͮm>Q~Ft=qj?XV3 qv@@@@l_|Jn3kG- -QP^nYT"t?Vg8mW.;,2=m[x}ٙv/\{}YHp;_}^b#u#,_ٔl(C|V&(ە& duLZT-pF[ՙ,_EwϬxڢ㲎-]^Ӯm;߼6䝃owl[ Ŷ|((((( P mңe3}hNA4PPPPPPP4T~7>wFꖜ PPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPP KEmWIENDB`././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/images/rpc/rabt.svg0000664000175000017500000010200700000000000020625 0ustar00zuulzuul00000000000000 Page-1 Rounded rectangle ATM switch name: control_exchange (type: topic) Sheet.3 Sheet.4 Sheet.5 Sheet.6 Sheet.7 Sheet.8 name: control_exchange(type: topic) Sheet.17 Rectangle Rectangle.10 Rectangle.11 Rectangle.12 Rectangle.13 Rectangle.14 Rectangle.15 Sheet.9 Rectangle Rectangle.10 Rectangle.11 Rectangle.12 Rectangle.13 Rectangle.14 Rectangle.15 Sheet.25 Sheet.27 key: topic key: topic Sheet.28 key: topic.host key: topic.host Sheet.26 Rectangle Topic Consumer Topic Consumer Rectangle.30 Topic Consumer Topic Consumer Sheet.31 Sheet.32 Sheet.33 Rectangle.34 Rectangle.35 Direct Publisher DirectPublisher Sheet.36 Worker (e.g. compute) Worker(e.g. compute) ATM switch.37 name: msg_id (type: direct) Sheet.38 Sheet.39 Sheet.40 Sheet.41 Sheet.42 Sheet.43 name: msg_id(type: direct) Sheet.44 Rectangle Rectangle.10 Rectangle.11 Rectangle.12 Rectangle.13 Rectangle.14 Rectangle.15 Sheet.52 key: msg_id key: msg_id Sheet.53 Sheet.54 Rectangle.57 Rectangle.58 Direct Consumer DirectConsumer Sheet.59 Invoker (e.g. api) Invoker(e.g. api) Rectangle.55 Topic Publisher Topic Publisher Sheet.56 Sheet.60 Sheet.62 RabbitMQ Node (single virtual host context) RabbitMQ Node(single virtual host context) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/images/rpc/state.png0000664000175000017500000011321700000000000021007 0ustar00zuulzuul00000000000000PNG  IHDR `JmsRGBgAMA a cHRMz&u0`:pQ< pHYsodIDATx^E]`vDfB[ IAN$$\qAeq]Yv{[շns={zoΩsРAbۦѶ 9snWڟ%S Г!K6nLcB_|} /ұ/9ߏ/5)հ9?>T>?D~ۧYSO @>i}mӇGA!m ۶lm+mnlMClfĶ16ц~F n}]ښ6,Žn&vAEF>li&>ͬ[K[ne }?|L}(sk39?t)Fxy?5l94zXeƴMF[ShtmoM?]3&QF(׍Y=w75#oEWgzq׮kk3<bX~wĘ>V+v5v#)cߎ]~յvوkvW{vW+lg]z%]xrve.(οȱ.Dٹ);*;sΣNy|tg*;ؓNcN(4dh{CC [ـ!{*?x07h(ͱ>PgAzۍH Pֵҫ?uWY}u֛g$X.=]؍`%*kݮZDtTֲMjQҞSyyλAW[ԝu҃5ӓӫ 6Di?knko=iX{>VB/tǾRàvrv0C>tңÏ>^ݢyvv '+;S{ҩܾc'5p{⩧ٞIɧ)[3)ʞy9f-*5Xfs9~.Ɩ?ٚu ΠV)+[3aZ;ͱ l]ʷ>6!l&1ߔ3Glme]ک)isOEソn.Zkt+\ά^N /q@R4T-`{ X*0^wF]u3l,0z,8`nY6 q46F>&00GmHk +lVA9W1.`R%W_ye^@ K'=L:3踓j"=/=8@ `MW ^w 뀕/ll~>₋ o" E m۩gLCV,̈́l@pIL Ӱ?1^d b`A[3vi f۵9×MW5/ tx1x+*@JZxF`]{3p={z1 \Bl{Сt^ ߃x yv G27ߢv>SlO+s [I_q% QyhGծϷk' 淚lٶNr#]o8~]}̏iʶmqh=Z5V'}ッ 8CswPC{-\7$V32] `U@s <6}k9~R~'Z~ʣ $6;ȴ18Fz;[|twn]÷f nM&F0dzǀM6@:Y_v`}5]^3ep籷{.Cs3q˰==ۣr'/eؚb'BV{/6'뇬b kzړ5AkBV{ jS ,ɺ^dK2,@M{a- 7j.gUQു5aukl5p;<^Xt=]Cw@.]7 .`k][ׁ [m^oe֬/TVgD^|޼Neﲭf{;i Z I6muwMϜI}wmx.Z˶lﲽŶ%t^C<W^}fd`;c `OkM@ [GkMk\V+yw;﹏~1{ࡌ[v}RÏ w?^Ox{)nRvݞƃ @X(dNcn875B8o_zT<~C{ O'qs<[!cj"dgÓY+T Ț&dŚ.=(ȶC-ȓ(u@ 0nlüX&tnkCtyvY{ 5ˀ^!f xݟY0*/k E}hcK|豥[lڒe1mK,[qr λtڋiOdٲ'd?DvM<8Ц =ԣiջlouzm5mw mw4}Ǯ > <M8J~eςl6u}]}Xz-c -c ʂm:*4T?<$֭bxa7r^*3w}>L?=8~=$?q2=2qJ0l{x$Jb=2#pax~<`zp/lӃsf0hO-| 9ƜE,.sp=l5lp37cY.dMXnXXd="dֆDzs6l[sf1?f,ȳ5iGk ޸ڰyۤsIA fw3$e ݘ]g^+^t t1\3&)rZtB .p۶;Л|?%pl&MAN${M9 \Ņ)i^-|` Țs6dMldh1?Bքo۳pM64at7jqUetܤsnfN3UxܨTAfhYCrETвb.7h>!e հ}i*ze&ΊQdע|MmҔ|-eiNVDvO|Ӧ@^;Sg /;ߏ_zmk+V]w1ue6;l,/ul]D]N:=xYg:흑[lo6zmյKȷTLҾ=`P2vT mпW^yƎufx&W/&3*xQFbu-,{$ٜf=1r1jY1m6C6>9 h),>{<~,kx;gϟ ~pD^U>۝X<2V-c2V>q̉l0Ze06zu1FdBldl`moVV\iz [ӫ5C札 %yUYBDVM CZlyA lo7B*żnkjOW /eXl*ؾlqO=۱qCCDm"ClyI' # sAϰ7x5<2=‹t!{{ӳ9V\y>|pe1b8Ui5W;޵u|{-]Y{չ=k>V߾uyk#Σl/t"lޚ1Օ=5WkZju\رd¶nC' Є,B],XX"ڳEwa ߣm>BB[ѱ|gT]_@} R{mle(tc"T Of" yn;45wER7|:ldZ17 U Y6+u؅ 0ȚaQa m H6(t5ʞ. [Y <ݠ&t5p|oXhٞ5P[^eضiA-TN}̓imX s/3oE0`9SO?CaVݻ}klT0_~Q1]{LCO]tgؖ=Yg Ul]r֥g;lﱽ ZkN^jeg*{v+I/B۳lO1:{apl5hYh1g. ڛwyG>O/]2{ D&sinKQ,2uoKQkLan wscs͞ac7q7xhg7<^\| »=a{1ǫQTs&kX2b9 jV-fM/Vd]bKhhL}miA6 -PBO ,g؞g{%n~sc9_ag[nm %GyQEv4_{6mr+ijMͳY e-kz;W=.V-Re?+ m{M3+H?\AvMsyknʏ{V92]~:Y"sv@lx71¦uCM%3h/e::m E{c:#ɼp *e=2:osEH^/>`coO66ŚmȚՐfMzgyۈ Zu Z:6XosLf[{mV 9)lu)ŭ`V!*l/mB̼p~]&3W.csXr ]rIYD6CIkS핗~*5`Dx2{P%j>Aאg|[LGvtỐAK$UA7?lŋ@iVz[+zmBV4mی-gK׳-ٜjNK{7ؖ=>7ۘTiXG/rg؞b[ƶw-UD{9Zhj4m"ۄ驫/Ailuv$, Ѕ>ز bjN,¹$'_2q;v=4/St΃ Ychٞ Ai)/Y%ʳ~f؞})[ʞ2Y{ ~HׄZF_23QiTr1 öm;ݓ#M{ll3O+: 4dE+6ۅQ r+]H 駟 s0Y!vMh{zh|bX1ì,l3z^nAKe}[Slٞg{0'wN8/Yؖ-\ػ%݊.iZ6g c =} [^yAk5zBvj0$gu@810C{Y:YfٓF:6|Yc1 ߇i8Cw Zt!{8=]{ҹsw=`˞xȁ-cH/a(/yA+D=Ʃigxι3i԰?mϡ{?^{vֽ|tjB&^Oןv'Mvfo>s)O\,zLa%=AŶ-zK!=Jy{{{:i*S5]0}wЄh4m ی:Ah.|ElKg{rpzvp{znH{zqH;p\~9&vt xq/0=-g{fHzmh "w9lfHSwHS&={Õ4h[ Z@@V-?(*HM7:l7\s<9dIcp:!}q'!cǟOLȿOƚf϶8'mv&ٛՇ`VWo϶p^h gAϲ=ۋ>q>}t/c{mkOϟ϶m1w}hLlSٖaGh[:CyZg1ӣ^-Gye 3y3psryr^χ-{R~_x ~:ۇ}x\e4 c˿܇<"Ǘ7<,* xj~x<}Jә|L2lG!| j!` 4 Nɏ Ex#';٣ݽ`f'|6mJVے*gwkpgóN^KNqQ|GP@ ~\@Ffb52rќlUݍS-P@X!kjj!i.~A Z X_ ڪm]mu@[Bo^5W-AUAs|ўϵ3Q \ˀ-l'M Loئw#Eiǜ|wNŽ)X#]N<:vi񚛉4g=iji|z ε~a]ď;v;2fm=2FvhF&?HJ3f6*v0ZؑCi)lO3=fp± V۴}[ ӏÖ;DSO5-f[t0Z6=t ;jz[iX]*[s~lǦk" lu4v<<@v*%`oOx鰱.5z02sJ:QI k/|2sa2NYg^4@׆nzAۀ] r `7*հl"${q.raruC̺PVV*-U:|5QU tJt<^x | @rb3xwi S6õs.<h_,m/Hed: /Wz c#"~ܵ0]cQ&MM#3'ܓylOҢSؖ=~ҳlϟy0p9˰sW\{om8UJE,gJٞa{Ciibz8 ?kJ9Amc<=]XX3q2onCW#c ]yNY}O I='t&@ a:@f8 Uyz 醂g!j{Y億ϺO/rW'z1ەHuX;' Ggw`F*3`O*:U{xVm2:T;=\G5؝Fw@v4exrhЈ-?ziU*$~BIBXUkyqY1B!9!#UEC8 #Z\$P\X.ٚ>6Ɓ ɅBZBY■lH6aq;[Tls]'5ݠ4f!{P|nV!"y-tY]3@ι+3`<7ܵ||+a$%*:c +y?uTvYv1KE]:vU7rͬX@7TfҢNEL"-DzɋN}'֓腋O='˗H.;^eo\~i[|?ˮ5~+NWV^Οɉv-w?uI vi e.ͼU]T70Eh5l*csձ.ȃ\w57I[*k %߮` bEnb١i:«d=w3M9gq$؞z#o9x;xOA9gU^'oQɾe@ {5[zyWx SPrVϻLwLOf*oN96cKC=ƞ;FO.؀\j[J_˫٫ŖdR9Z\uQ  -W{ _a$VQb{@kʚ{eò=%ʢ̼l[%к^NkBH%lmjO׮kg V&c\8->vC<ݨUne U +JM?^]bG<4~);/[ {-Zߖ>\ڞ⿟i0ǖ,{1\AjǞx֞ۓ|ߵǟ\ΎQ=ŏSמ[n߽mz%zszл3{hl6lf׶mmdzl.?`[l]w@kg6w-6ƍQ J [f^Nla(hN6h5i6'JjrJw;LGHnu(^V>M%}So5 Y~R 'HgoN--mQZQ[Z1Z1].%t\Wnmm]@'3 xz ǟq\tlvtL׶t4QlGv*#:-ճ=bZ \4nh5tV6(rl1.'y[7[0`=\qaS6R.: Hȣy0ާK 0zD7j@esK8ͼɘC^W/?2ICv\HQwVqޮ^,m=6r伮 [7 t3j tc"tѷZ-%0kF T\^Qz_0$9Syiʰ`0LJ! #r٪l' {mCm2sh[amiH1k*%1$ iAVLz&hMFC5#-Wy7ߢLmw xd`¾Pdebery%0ib4Y&G<0(ETu=N]ORaH24501Z΅afZUVGqym*|ԯEEZ^uPhXo9 ^SNixAV^T@/2Kus Tfa ݪеAcaЭpߪN mcj~*ǮzJ{7 7UV8s;@[rP ȣ.cp!Ze gc̹b`hn|]ʋP> .<Q^.<\n[f l/q[U{ ]DG3\</N(垜Ш< 7 ;3V+m=m|2im|ln1GYtA T Rs0 H`&V;t ]zjlW2+Չ(k='U^թǝgO- &P{8՝! Cjo֞)Ak9$#hyq T 2AFӞ͚ݥ)Cq!CuHݠ𲙍L6$U@٧]aa^nQ^nU<\xu!l h&1l ,O.x^ĐhΜ.6;'x.o|3aR W'0CAsI\3l{GI3ܸyܠ2`[a$!e&Wsڦa#jm_$Y ۠[ ] 8=^/8 _ /x|ks=@<BաT iouz$8a?9[2eS d/4X{mDE l"8]^'+ag ;E-YP@q O@!v-DRp^ ,vbpbTрEc4d{m=.dmІA^K\N8+qxYI0W)NV>lޓ[Eky|vٞ^Dce2knJ*,&,l`{$^VNg?hy-y:4Q8|e[f+oðO[J{%ezA {K={a^xQrop~aO/~ֱYɧUpqlRT-.| !C*r s#S^WdrmU*ϲQ^m2s-E (s0&ajy)js2LciV׈q9ӲWs2ma{&}qƋGY+}꾻@߆dn#6_Myw Az='`/6Gm=DC?DT6Er?!}i.c])_}e||y~^Sk+{E&4 o_}1 V0+kvXآy=5nZ$&m  D @$@V˔ DuS/o~~g/>nM_~~vm\y¶w [MHg& *4ak/ J`[% DuSin6lXq,.M4  TNN[lV ܼmi9X±2OFX2(/܎e+|VV4 CUb{M^-(VTPYw0+4lU(@'}}V-ڣ&jq{|=^[ ZaU?jD9ɮs{ݪklZ6#AgTl*ʩ4a|Jzg5ϋ{5- "$Wt\ IuՎI?ykjC9poCZ 괏addX mY2l?kPo68CW^6nM﫴\_K}|f<)Yﱎ Q[އ7.7?xG' #wzbგxU"{#|~J7H.+Ѿ)/uh#_!G;TaOBJ(,0e<Bm_5\Wk7})}cT?%w(bk@۫W,[Lr}~/|:agOH|_B=Os^5y [/|lymf;tlOUL;:v̅|ls>3{@c \*9ʌ8 NfTn?o@Nq|q AB?AOh1^8m=7': Ʀ^جVu<[;2' n3χ۞x"7קW}vo|<տ?:?$j]zMl5hKJJhvN;8>~[N{5g o]FD[{҄U0a-B_}I:ΔpöP؆7ƴ _噃Vվ8v{7A `w{y|Dog^gk3}sοm%NW[ vѡJFqtwxis׮>Ta+m\6+^w`Q AfJ {>tK3W*h4vvX5o>OKҾU=O wqПl b0JpNlu990rgƄBMYO| mپЕ"*sЪ{q4'5՟H~%/PRo^ՃWl#o2C3YaʠOھ~y1 A}yaH&d'i k$_ק , >'*`/@LOr|\s1GؚFvl͂l?grt S>Ysil)tA9R,픬jEjxr?I`۸2-JW+Eڿ^2KIǬ^ǴH{V=Ju~ = lJG㬿Yν{@~kfa{ϫwʺي[r~ 6(M QتNKV3ad҉EрhH Ͽ}E >\V% lEUO# % ۳ϻnjd*MYRgcDU@ZE)sO>ϽYleTc.(, s/8"FC0_٪8TG/יJrщᚬ1[S.Pr>|RBlQK. .L‚l>TF#E 7JG! m sϿek`뀶*5;UN=ͨ<ðzIU -R3RN\25! ҄0F7 ^ti`GvaiǫZO3 իճtrJθv O_!j -ؾel_1_=[3|'W=S4c0rlCAիi%):e%y^4"; [=_ fKռ-lI-9۠y XO3-آIh!ΐ0)LuP&u!j Wul0l8nْ, l{LROJ k׫P*hי"W"'9yMyH}@Z]tEy]Y`BkxpIA&W'0iJN4rӰ=ZԲulEh@4PH /q)Ub$`f<[ #K96bG$ ԮRUn@VR϶vOt>iрh@4P3H ZS _ХW\E.p2l*lE5#tigigрh65lᤂ`9[W%WPad7Yy ez*-рh@4H pR5KS_ p򫮡F\MFY~ZrB y|9 ԾR-;'X- W\u+IWXI+TTP9?y3*{*25/^5+r9T#}AIs  5:lqe'/l׌$W*;p+5VՇUpQu f*'$hUeVY>LC}|_1^[^umSoUK/ jSiܼFun7Mf^h@4  d4l_c؎!Q0˯ wF4F8ڳs[6I/ D` n^op0l΄qcil7Qoq)emRW' ҄ eʄ-4cxo-?RG4  ҂q,W_lk=6Iۘ[ͷJ'+NVh#t=u*`[=L` F+]ٛo lEh@4P4lMP7g5~'~]lųV4  ҂[M| #ڝ|{'jtdum.+^h } [8KQa-~nsw'l2Ih@4PH f7-{莻UVγ!kCwD`<*2WLyC,f=ۄb3ǹ^xqpdvY>鈅y R{)4S5a;ݯlkmֳuNtg믗R6r/;g/+dtr~ Siv5;s8wg;=qX Ԏ҂-[VF?-% ҂~D2?Pŝ˞%l-}[8} VtZqϛ$_&Z$=?;9/@Zx2(qgǟ|iZƷKx>'[9.EI:C> K75E`1;K1C=>E?3_i؈qSV= R4Tjrb/\K'L2ߋxqg[ՁB*F<9Fۭѝ4 a~)+3k#H]4 ȥ҂Iu4=ɷO2S|瞧gÞSO?ͻz^nd hly+¨z^^c# P`-z>̲UjP0KVn b0 IzmV.pgEiJ<=ru ˯°}i/({|mԳB[ -`׻u:Md]c­}>^,l|B/B9b5lqCa/;/V}wla OпeМ߳N6 VGB $8\w|Dii -I}4Kg+啯K/rl*/m(l0a[o֚ #d!vj{ ,Vϫsq~g J4 (P [8+^ZI+^v \7|g+*{y+>\9/1dѕlŝW_{^yuzUkŗ~ئP5[6u9\_'9K b~Kߙ0o^-z7ߤW_ꫜzuM >RW.uf@Z˯ `wxm|^gꫯs4G>C:h@4  ` '+ 3-;[oޡ7پm.HCr  EiN*c;wWl|#-!h@4 @je'2(x|}=~يg+[D@k -~N0d]\d {l5CoJ'+N&}D`7:Nk`` ;tB鄢рh5l9"p\c[vZn5l}}^zm1\g,;c9Ǣ@H rDxuc`Z{^[l2I- * ND}V>Գ*SʴLKRisNsZ0Cu(_R dhĵ#銫M2 V/ nD=[ʱS 麻zwcɪ}u dP" ҂UҥW^MF\x z?#ci^{lW6ʳg3NGW5^@?lSɅOΑh@4`k -[ ٓ6t/f龴3ŝ{07h(7<|gۘD+AVyۼ ɅT.@e5l[@쮬nCOlAP3p͝hճ m|=[tv@yhF4P?4lS~Q ]nK l|-` Ж;͂mB ~[K[JD0 ] [npٺzXb5VU*7Գ˅O4  Ҁm]vܝڏZsBYU!d v]r |i`9рh@4&l[wmOj٦{2׶f¦r&\y"nh@4  ҀmQ4m֣ԪloԢa #7RnQMG^!V: le9~/ϗ(!P Ҁm}=+[ l[p|m+j[DSg+;_94llܜK͋{PV]Nnֺ#vc&ņW3hÄ؊g'޽h@4 HMДiYK,{Zu u}ݤ`ۺ}Z3eζLF2 ҀmiԙԤE{jܢ ߶|nL=[,MְEY{/}@4P4 lIS+ڼXYlآ,B JHzрh@4PO4ls.\GYV VٰEYa4#TЩvP&+]b&UB/6S#2縞|UןQk9ע@*mZD&Md-3adlUe-` !86#UƮ窀k&⏬7['B:h@4 (\ ۿ6nAm҂vjڂaۂ֮ ltZ [O6MiY_Y' NTYf%% D9@:mΞk"mQluA:ޱU^.lXF;Rs+V4 Ha=f{jq9[F6K0$Uq jr{>jEрh l5p aˏ[{6Q=لa`Y!z9hणyE@j MejlEd+29rnE`ɔjV#!_.Hрh056lwܵYgS5 l SPr* 5&lwܥ)sFF(V<[E@}@ڰa @j6/n>5VЈD4a *u RjGdrAs- w n(lZtK  ?n֬ l5hGdrAs- w Ŝx{Ǔ/}@4P4+[H%iDрh5 A4  rmXBE+T$[ηh@4Z$1I4  ' lųh@4  X5 [I(#4*z ҂ ZB֖لʅH4   [[ұKǒ)Z L 8l%P.Bрhi m鯍 RV:[}l{EYG`[E'9@}@` 64]쳕V:^Ѽh~k@`Uw/_4 jVD'рh@4P5 V2LjD@5 q'u/+VPyzN"  lSmYE9V3\\W S-sUt@JJ4 ȵ ><=꾂Y=@7HZ ߀mzs?'ʳU-/g~t\w|јh@4+ lix>ju=X.Ϋ5a!ޫ -?X|[-@i ->21SGl F'򼰪^0d4)l|)G_g+]s[ZZ4_`4úګ䷋w# G zUrg[аߚ4 , A lmبZӂ=ېz^Vyen0*,>/^˨>z7E`;y*I 5gmMF4ڇ;B-`~_TY^mvY`+¾+@öYvFl`A+-рh@4PHzE2[g~YmaG.rE@2 (ljK0fjdleQh@4  Ԓ҄NM[Nw|-!}׸-2(= ,*ߞff@qf)y٪\Zč@!K>6+V/*q6{|}L`w1Xd}I_WۧVE/-}N҂- uLe6˳ؖUwwV:p pAh fw瓴_mq%/}T;&9$A^ps?G5 [7qfÝȱgNueF~8՛'6<皩S*f<s-3f`|Lcz {+t%"XGI:ޣeǞs~;R'H.+ѾQt|mm êx6TJQP6ώ,vCD?cOlTRoVivWL˺u2"y2LC׆|_wȸzq;pV [s!1ޯk{ʥTjt:|ywhqg{ύo6KVjq0%X|l"?AOh2C|zqwG~t~qBMc/9z5!1خa…D^U6,tWo6yrI`u#quVl>?-B?{P'gΚpP,➯6l"B7_p<ϟ7A I6qP{PX-$x|Wzӑ7Qk@l77yQs!ZwV:|\yƟ[{ǿlsaϪG[:iE=[4COx u8Z`ƭk?Ō&`#MW/IE1:7a]s]h5p l:\ X8pƇKמ~kzrrl4(ݹ[g[G`&Z#rSگr%JͳMviFg+}Q9s  ҆mSTs,M=>21`gk / nK I#_(]hS\OF{ij @mֺ5y[x,V`+MSY'рhhFaFމ| 0tQ8E@u4P mV$J D@ ֟]أm<&Z8 lUJ&`1יPr{et- ҂3yq{j޺=b۰QFlQ'h#R|*'O8*|fwR#9 Dta-v%cbu&%4atjZ:1lNa9NSO3sb/1me;߭SuJϯ/C·hf5lhYTTܑێ Z@#W`um{.N&Nɰ-I [-DUQ |%lju~yf;h4*lK:S[bvanmmǰm.;`M2hlU _.IQ 7Po3zO^t֢@:mljѦ+[7jٶ;lד6lL 6oo{ҮE=غ.EԼeHfb=ʹ`E_CA-z!9 [^PFBM[&r[Bdz:\ӌ$&z=[uf]]ƽ_."5}͉5l4l0hQqAԦjuO퇴v [Ynp[IK*m/AD`;m\h*mm?ڸ#v;*6/DM[x#guaUVuFz@:mEfsڡԮ>ԡǁi ۝L³mHjYĦsbE@h -Ngض;GlFB͊ERWK[t>L{]΅ рh4lٖtİa佨C9 ϖjd,^m/j=AI-j!{D@4l[ry.t6Jދa.rnyʣUI-8Ԅ35J' D@m̫jj" @v8mc΍<-Am5|tBрh@4 ȕҁ-'Pl{QkNhQ[J: R# k+RUԹqqDрh 4 l jlũ 83UZ;viA;pm[_=ۄ%j?q r DU@:-t8/r׺*߸a˅Sv]n?܌OL)6I!\1(OtU/"Wh } [kQ҉:ߝ?(v{;7ul9Ϊ'rzDWA7{N *+3N֋S)Յ|UT"Sc,/+ UNKBl=$kGJ/7+m*m*;H]϶\i[T6l:eoզ<ۦVt@d߬^tޠ4P=q (4plt7諯 (׮JW_ Cݹ0ȹs%HWցl nxg[N0&\ k!06km^"N-`M.>ou]:l_u_:oWSS4Pw4 ph9h¤u Js`Qa+ ,׷Zpa(؆_zk0+:4Mra;9WrDjV`W$ r}r´|~_e~6ݎ*))7MlǗ-[/ riVLNNрh } l%Md%/ҦҦkFa}YjzV{AIKDl lųV4  rmXFx2 DVFрh@4c lQzf!>*=RVFiI>K$Y lkk.SH=ۚr1 @6jPF3pL=V/iQƫk|?]?~Lic԰NNh@g+:N-#Z zIņzqUkܼYyJ>:7~)ەz׉&D4 @vzK"{ml#$]܁_U5(^v\nEBν{@5 q֐"zsҶI[D5mO|\lD@i@`+рh@4c ls2rh@4 leD+ D9ր6 ,9.D@i@`+h@4  X5۵MZxem͍d*m- jWJfj^t0@U4w5vVz"rT\KhTA*2 3znd}^ŘzTOUNG:h@4 o l2w3@hCͺS"fپ$b\A~Ks-\P%'r9?U@uma|ڲAxx(计mS`~,{Lrr=E@~k֪&lەBΏрh QԴ"9۳ W딣Ӟgdل5=Ux<^Uap8ir1 DuKy[ǮW ,RRϛc] rgbyb!z2'sрhi `њԋ[#\jA>[  ԤRYԲm'jѦ#tP~Fڦa#jmZąså^t\K>[% DiV`LR RE. R:h@4 _܉ vDрh QܬJ϶~d/[4 l1o ˚JgϝM~_4P5P+B)DMC7rq79rI[ Eрh QҼ5Prڞ]#'{Ue c@Ҍ|$~Ȩ66   lq² T{S!}i} u'ؤoz竾\wf*QUVS7k=_^ml[~46o`; Fǫ줳4scKR7+Cg(  6X ջڟgkכEج FNROC-(4rvY^Y8kP <6hP]zBT"9rn ][&NferƇt#ayy`eҬ$zJR63цZ,zp=+5Oq6E[4 G l-a$nȉV{!9n^/$m~벅v99Couׅ%(~рh HyLYl|X}7ݨnj>fH7ެ@h/mwko~|ml&i5յq IޯkCU\"& U lj#qK^8D H^1NjeF!W!Dmx,Rmr j x5r%ފdl#/ j@`+T DkV`IS[dt$DmG3Ngs%J4 ȕjMZmlӯZXls% \8Dx  lqӯ9HdZV:D|66 rԳue#&g qgEG4  Գ5r Wtt:h@4jo 4^,l?|'^>O.&рh4g~ԳoZ\4  jM[%"9ȳzNY<ۚm^Z4  lS7clcDǐDij `ϒ" Dm䋘8& 5P+ż-lrjdU4   O[leh@4  X5 fۑ^,md4.T4  AY`+Q:h@4 6ǡ(*r1 D~ l2W# D9@fPEsET]F2 D e[.+dGK"fD@vFj^ܞ")۳[ˍt8QPt^Rϓ.-w"㫧zzHG(HΑh57@ed׷Fz-`lV <L}E|U\!1<ެYDί_@2 l} 8!5FsCU `DSA2 DB@Z6cjיZD-tT#4lD MCZF֞m(lM0F̃FbK o7E:pj oDU@)jdaۀiy 5=P1< e/n2<_][J̪t4y\E[y[sAQsտ)`,R-ަ ̭?+'_οh~j o`+.]4 Hm9[m7n #ׇ(рh@4P4*lR[Yрh@4 04*lų59 DH7RH4  ? ]6'V<' h9 Dl{=Zd M2%˦Níi/k?@` tQTAHM>7+?"xIIDQ-@W__짟~#}7g}NoK*6֞mxJ'K0VBрh@4Pl'Mo~W_/f~A3ӆ7ަMЄ ##g#nNs-kIdn a\K/8,а՞͠W׿~'?>~v+o*6"gΖ=[6}4ܖ)tA%SqnʦϜM͊Z> cwE^4 M _@?GB[|H6l5k6oG[![Ocnk{kl6ڎ6nL]P;eiԼUqJX^ ۰zծ+,m-m- Ҁm-hTܙN]=۰3H5ږ4v{ڸy uu5YsS5$ ;rv9mvksd%-#{рh@4P;H-Z9ԦGn73p[6mz Fn((jӾf`Tn%FV%JXoW^;Bvv jSiiV4c\jׇzQlϴyg>{נ=}t1TMXo6O|\lD@h 6/)Y SA ummܺm=`mV8 ݏWټ.lu)U}*gFgF*'Nvv ԼҀmQ4{Bא96}O`?`;`Ra+h P)rj2𸐼JjzL$#Gŭ/na V&⋯BmW#Ԟ*,EtvӀ=b;PK`+'ȱE@5`hС4s#={,ضґ<*ݗm?ۗ6!m~gjVJ'6 D ^pT1{6=䓞˴z@ض։>q9o{ۿ0lysnCg +JS4   ZildGnuH `gHjdrA YF?s$>k 0lXeBʞg֟C>ngbjݶ@@ėDuXi[v@s=F=އzh;ho^ !m#/jԢ=bJjѲ (V@k 0rhc >2P!Fkܴy+u3(BƢV%"Xb lH? #7oۆf=:vߍ6m}N!-ԱG?U>4 BINWU#mey\Q=*5Ҟ@logʕ\gCjf4sY嚶]#;V`+ Ҁmcz9qkQVQ4Tw]ضlۉ`Sx"!Kb\ٸ{^lWwp\Jy&wOshlsc~u6 \r- m]^h76eΜEw8FضnEAEʦNgm. o]wߗ^s&5Nm>? zq߬z=o<<3 $2Qg?O`^' hk 6WaDӊ[[lU|~0Xc>Qya'L %'^,x N\?U#¶mت02^TV_zN #jzea'K@ KӳE d ?IV}+K`+؊ XlC`Il0rg Ƅa´zN1yY= " -I[5՟ߘɎ߿HInXs!Z[>؊h64`5lW?~{U u6{ﭥ7zVzo '~`^ad-`!&߂98ilD+x5$ hhU4믿enQ`}۫ߢW D-W l!dE2K)鷩\MEyv…W_ѯJ~ʻ??>oV.޳{4|س,r`@ki#͛ZƂ-R1 '5~mIǮI-8,Zl B#[GވC.r Dh &#k*M1;;V`+ D4`FN0m:#&yJ;D@gy- r~D@Yت{]TA4.UB"t+)) E (lv \ԵdԳmҬfœ$,рh@4P5P3:{7m]{S=3f-e!xрh@4P-? 5u46lmڼ:@Rn}hԬEn`Ѩ?9@-@۷o_ԩ]{A\p7NA8fy="hiV0u;ٍ:@̧V%[ъD@Ak ʳ=謳΢QFԌVR lCa-[?ރ^RGEqۂn4FD2 Du[as:|ҢE@LR>϶S'iG^р=CUbjզVFрh@4Pа|)“]dAEPni9{0 @h X ,Ѻ='O4 HC?ʁ 4K5 BFO;>oGѰ!*.XJxm?D>C:h@4  4l0@gmrWku ܣi G{p|m9.鸢рh@44l,X X6g(4xRR(MDgA4  4lÀ1 K[#qБh톑ų^D׀mLO[@jѠjUN"w-F΍hf4a hVbHm}|HOxO!?'MƯ׷v> e%tP[qǂSݢ55-jM6iBxnܴ +XӢV 1w?|F+zj31iрh@4 5p ']h?q:k=a*EC .L=y h4k|YmͪLg[~^?Sm}ߋp ~Y*a1cONϚl cQV1f*2M>L6&O irMl ?cL#M16aTMB {N&NGblWe֦Sy]v|Go. ԃ+u]5o[BmV{-JÆV9~Mq~--=num:Ryq{j֎ִU[&-ېi[PZTLҼ5EZm;5mIq&-ȴ4.8qTYafg޹)USmgg~tyyњҷJoA`]‚|<ߒ~P?N}/%7A}%f?I'*zX5mav? OP_ҿP4[tljܤ)APYou  ;T R`m:u15^t>6@M43fBV54.J mٶ k Y-r ZZynħ6=۫\CՃ-h-{ϏΧÎ:+" U@ k@"̫}!3 TM]tJN`0h0]\,\b*Tر$&" @ąɹmXXW6N0:3imTYآ/ա\Ml`_, or by directly using the `REST API `_. Tools for using Manila ~~~~~~~~~~~~~~~~~~~~~~ Contents: .. toctree:: :maxdepth: 1 user/index Using the Manila API ~~~~~~~~~~~~~~~~~~~~ All features of Manila are exposed via a REST API that can be used to build more complicated logic or automation with Manila. This can be consumed directly or via various SDKs. The following resources can help you get started consuming the API directly: * `Manila API `_ * :doc:`Manila microversion history ` For operators ------------- This section has details for deploying and maintaining Manila services. Installing Manila ~~~~~~~~~~~~~~~~~ Manila can be configured standalone using the configuration setting ``auth_strategy = noauth``, but in most cases you will want to at least have the `Keystone `_ Identity service and other `OpenStack services `_ installed. .. toctree:: :maxdepth: 1 install/index Administrating Manila ~~~~~~~~~~~~~~~~~~~~~ Contents: .. toctree:: :maxdepth: 1 admin/index Reference ~~~~~~~~~ Contents: .. toctree:: :maxdepth: 1 configuration/index cli/index Additional resources ~~~~~~~~~~~~~~~~~~~~ * `Manila release notes `_ For contributors ---------------- If you are a ``new contributor`` :doc:`start here `. .. toctree:: :maxdepth: 1 contributor/index API Microversions Additional reference ~~~~~~~~~~~~~~~~~~~~ Contents: .. toctree:: :maxdepth: 1 reference/index .. only:: html Additional reference ~~~~~~~~~~~~~~~~~~~~ Contents: * :ref:`genindex` ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315601.809673 manila-21.0.0/doc/source/install/0000775000175000017500000000000000000000000016571 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315601.813673 manila-21.0.0/doc/source/install/common/0000775000175000017500000000000000000000000020061 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/install/common/controller-node-common-configuration.rst0000664000175000017500000000444300000000000030061 0ustar00zuulzuul000000000000003. Complete the rest of the configuration in ``manila.conf``: * In the ``[DEFAULT]`` section, configure ``RabbitMQ`` message queue access: .. code-block:: ini [DEFAULT] ... transport_url = rabbit://openstack:RABBIT_PASS@controller Replace ``RABBIT_PASS`` with the password you chose for the ``openstack`` account in ``RabbitMQ``. * In the ``[DEFAULT]`` section, set the following config values: .. code-block:: ini [DEFAULT] ... default_share_type = default_share_type share_name_template = share-%s rootwrap_config = /etc/manila/rootwrap.conf api_paste_config = /etc/manila/api-paste.ini .. important:: The ``default_share_type`` option specifies the default share type to be used when shares are created without specifying the share type in the request. The default share type that is specified in the configuration file has to be created with the necessary required extra-specs (such as ``driver_handles_share_servers``) set appropriately with reference to the driver mode used. This is further explained in the section discussing the setup and configuration of the share node. * In the ``[DEFAULT]`` and ``[keystone_authtoken]`` sections, configure Identity service access: .. code-block:: ini [DEFAULT] ... auth_strategy = keystone [keystone_authtoken] ... memcached_servers = controller:11211 www_authenticate_uri = http://controller:5000 auth_url = http://controller:5000 auth_type = password project_domain_name = Default user_domain_name = Default project_name = service username = manila password = MANILA_PASS Replace ``MANILA_PASS`` with the password you chose for the ``manila`` user in the Identity service. * In the ``[DEFAULT]`` section, configure the ``my_ip`` option to use the management interface IP address of the controller node: .. code-block:: ini [DEFAULT] ... my_ip = 10.0.0.11 * In the ``[oslo_concurrency]`` section, configure the lock path: .. code-block:: ini [oslo_concurrency] ... lock_path = /var/lock/manila ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/install/common/controller-node-prerequisites.rst0000664000175000017500000002313400000000000026626 0ustar00zuulzuul00000000000000Prerequisites ------------- Before you install and configure the Shared File Systems service, you must create a database, service credentials, and `API endpoints`. #. To create the database, complete these steps: * Use the database access client to connect to the database server as the ``root`` user: .. code-block:: console $ mysql -u root -p * Create the `manila` database: .. code-block:: console CREATE DATABASE manila; * Grant proper access to the ``manila`` database: .. code-block:: console GRANT ALL PRIVILEGES ON manila.* TO 'manila'@'localhost' \ IDENTIFIED BY 'MANILA_DBPASS'; GRANT ALL PRIVILEGES ON manila.* TO 'manila'@'%' \ IDENTIFIED BY 'MANILA_DBPASS'; Replace ``MANILA_DBPASS`` with a suitable password. * Exit the database access client. #. Source the ``admin`` credentials to gain access to admin CLI commands: .. code-block:: console $ . admin-openrc.sh #. To create the service credentials, complete these steps: * Create a ``manila`` user: .. code-block:: console $ openstack user create --domain default --password-prompt manila User Password: Repeat User Password: +---------------------+----------------------------------+ | Field | Value | +---------------------+----------------------------------+ | domain_id | e0353a670a9e496da891347c589539e9 | | enabled | True | | id | 83a3990fc2144100ba0e2e23886d8acc | | name | manila | | options | {} | | password_expires_at | None | +---------------------+----------------------------------+ * Add the ``admin`` role to the ``manila`` user: .. code-block:: console $ openstack role add --project service --user manila admin .. note:: This command provides no output. * Create the ``manila`` and ``manilav2`` service entities: .. code-block:: console $ openstack service create --name manila \ --description "OpenStack Shared File Systems" share +-------------+----------------------------------+ | Field | Value | +-------------+----------------------------------+ | description | OpenStack Shared File Systems | | enabled | True | | id | 82378b5a16b340aa9cc790cdd46a03ba | | name | manila | | type | share | +-------------+----------------------------------+ .. code-block:: console $ openstack service create --name manilav2 \ --description "OpenStack Shared File Systems V2" sharev2 +-------------+----------------------------------+ | Field | Value | +-------------+----------------------------------+ | description | OpenStack Shared File Systems V2 | | enabled | True | | id | 30d92a97a81a4e5d8fd97a32bafd7b88 | | name | manilav2 | | type | sharev2 | +-------------+----------------------------------+ .. note:: The Shared File Systems services require two service entities. #. Create the Shared File Systems service API endpoints: .. code-block:: console $ openstack endpoint create --region RegionOne \ share public http://controller:8786/v1/%\(tenant_id\)s +--------------+------------------------------------------+ | Field | Value | +--------------+------------------------------------------+ | enabled | True | | id | 0bd2bbf8d28b433aaea56a254c69f69d | | interface | public | | region | RegionOne | | region_id | RegionOne | | service_id | 82378b5a16b340aa9cc790cdd46a03ba | | service_name | manila | | service_type | share | | url | http://controller:8786/v1/%(project_id)s | +--------------+------------------------------------------+ $ openstack endpoint create --region RegionOne \ share internal http://controller:8786/v1/%\(tenant_id\)s +--------------+------------------------------------------+ | Field | Value | +--------------+------------------------------------------+ | enabled | True | | id | a2859b5732cc48b5b083dd36dafb6fd9 | | interface | internal | | region | RegionOne | | region_id | RegionOne | | service_id | 82378b5a16b340aa9cc790cdd46a03ba | | service_name | manila | | service_type | share | | url | http://controller:8786/v1/%(project_id)s | +--------------+------------------------------------------+ $ openstack endpoint create --region RegionOne \ share admin http://controller:8786/v1/%\(tenant_id\)s +--------------+------------------------------------------+ | Field | Value | +--------------+------------------------------------------+ | enabled | True | | id | f7f46df93a374cc49c0121bef41da03c | | interface | admin | | region | RegionOne | | region_id | RegionOne | | service_id | 82378b5a16b340aa9cc790cdd46a03ba | | service_name | manila | | service_type | share | | url | http://controller:8786/v1/%(project_id)s | +--------------+------------------------------------------+ .. code-block:: console $ openstack endpoint create --region RegionOne \ sharev2 public http://controller:8786/v2 +--------------+-----------------------------------------+ | Field | Value | +--------------+-----------------------------------------+ | enabled | True | | id | d63cc0d358da4ea680178657291eddc1 | | interface | public | | region | RegionOne | | region_id | RegionOne | | service_id | 30d92a97a81a4e5d8fd97a32bafd7b88 | | service_name | manilav2 | | service_type | sharev2 | | url | http://controller:8786/v2 | +--------------+-----------------------------------------+ $ openstack endpoint create --region RegionOne \ sharev2 internal http://controller:8786/v2 +--------------+-----------------------------------------+ | Field | Value | +--------------+-----------------------------------------+ | enabled | True | | id | afc86e5f50804008add349dba605da54 | | interface | internal | | region | RegionOne | | region_id | RegionOne | | service_id | 30d92a97a81a4e5d8fd97a32bafd7b88 | | service_name | manilav2 | | service_type | sharev2 | | url | http://controller:8786/v2 | +--------------+-----------------------------------------+ $ openstack endpoint create --region RegionOne \ sharev2 admin http://controller:8786/v2 +--------------+-----------------------------------------+ | Field | Value | +--------------+-----------------------------------------+ | enabled | True | | id | e814a0cec40546e98cf0c25a82498483 | | interface | admin | | region | RegionOne | | region_id | RegionOne | | service_id | 30d92a97a81a4e5d8fd97a32bafd7b88 | | service_name | manilav2 | | service_type | sharev2 | | url | http://controller:8786/v2 | +--------------+-----------------------------------------+ .. note:: The Shared File Systems services require endpoints for each service entity. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/install/common/dhss-false-mode-configuration.rst0000664000175000017500000000633100000000000026436 0ustar00zuulzuul000000000000002. Create the LVM physical volume ``/dev/sdc``: .. code-block:: console # pvcreate /dev/sdc Physical volume "/dev/sdc" successfully created #. Create the LVM volume group ``manila-volumes``: .. code-block:: console # vgcreate manila-volumes /dev/sdc Volume group "manila-volumes" successfully created The Shared File Systems service creates logical volumes in this volume group. #. Only instances can access Shared File Systems service volumes. However, the underlying operating system manages the devices associated with the volumes. By default, the LVM volume scanning tool scans the ``/dev`` directory for block storage devices that contain volumes. If projects use LVM on their volumes, the scanning tool detects these volumes and attempts to cache them which can cause a variety of problems with both the underlying operating system and project volumes. You must reconfigure LVM to scan only the devices that contain the ``cinder-volume`` and ``manila-volumes`` volume groups. Edit the ``/etc/lvm/lvm.conf`` file and complete the following actions: * In the ``devices`` section, add a filter that accepts the ``/dev/sdb`` and ``/dev/sdc`` devices and rejects all other devices: .. code-block:: ini devices { ... filter = [ "a/sdb/", "a/sdc", "r/.*/"] .. warning:: If your storage nodes use LVM on the operating system disk, you must also add the associated device to the filter. For example, if the ``/dev/sda`` device contains the operating system: .. code-block:: ini filter = [ "a/sda/", "a/sdb/", "a/sdc", "r/.*/"] Similarly, if your compute nodes use LVM on the operating system disk, you must also modify the filter in the ``/etc/lvm/lvm.conf`` file on those nodes to include only the operating system disk. For example, if the ``/dev/sda`` device contains the operating system: .. code-block:: ini filter = [ "a/sda/", "r/.*/"] Configure components -------------------- #. Edit the ``/etc/manila/manila.conf`` file and complete the following actions: * In the ``[DEFAULT]`` section, enable the LVM driver and the NFS protocol: .. code-block:: ini [DEFAULT] ... enabled_share_backends = lvm enabled_share_protocols = NFS .. note:: Back end names are arbitrary. As an example, this guide uses the name of the driver. * In the ``[lvm]`` section, configure the LVM driver: .. code-block:: ini [lvm] share_backend_name = LVM share_driver = manila.share.drivers.lvm.LVMShareDriver driver_handles_share_servers = False lvm_share_volume_group = manila-volumes lvm_share_export_ips = MANAGEMENT_INTERFACE_IP_ADDRESS Replace ``MANAGEMENT_INTERFACE_IP_ADDRESS`` with the IP address of the management network interface on your storage node. The value of this option can be a comma separated string of one or more IP addresses. In the example architecture shown below, the address would be 10.0.0.41: .. figure:: figures/hwreqs.png :alt: Hardware requirements **Hardware requirements**. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/install/common/dhss-false-mode-intro.rst0000664000175000017500000000076100000000000024723 0ustar00zuulzuul00000000000000Shared File Systems Option 1: No driver support for share servers management ---------------------------------------------------------------------------- For simplicity, this configuration references the same storage node configuration for the Block Storage service. However, the LVM driver requires a separate empty local block storage device to avoid conflict with the Block Storage service. The instructions use ``/dev/sdc``, but you can substitute a different value for your particular node. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/install/common/dhss-false-mode-using-shared-file-systems.rst0000664000175000017500000002456500000000000030613 0ustar00zuulzuul00000000000000Creating shares with Shared File Systems Option 1 (DHSS = False) ---------------------------------------------------------------- Create a share type ------------------- Disable DHSS (``driver_handles_share_servers``) before creating a share using the LVM driver. #. Source the admin credentials to gain access to admin-only CLI commands: .. code-block:: console $ . admin-openrc #. Create a default share type with DHSS disabled. A default share type will allow you to create shares with this driver, without having to specify the share type explicitly during share creation. .. code-block:: console $ manila type-create default_share_type False +----------------------+--------------------------------------+ | Property | Value | +----------------------+--------------------------------------+ | required_extra_specs | driver_handles_share_servers : False | | Name | default_share_type | | Visibility | public | | is_default | - | | ID | 3df065c8-6ca4-4b80-a5cb-e633c0439097 | | optional_extra_specs | snapshot_support : True | +----------------------+--------------------------------------+ Set this default share type in ``manila.conf`` under the ``[DEFAULT]`` section and restart the ``manila-api`` service before proceeding. Unless you do so, the default share type will not be effective. .. note:: Creating and configuring a default share type is optional. If you wish to use the shared file system service with a variety of share types, where each share creation request could specify a type, please refer to the Share types usage documentation `here `_. Create a share -------------- #. Source the ``demo`` credentials to perform the following steps as a non-administrative project: .. code-block:: console $ . demo-openrc #. Create an NFS share. Since a default share type has been created and configured, it need not be specified in the request. .. code-block:: console $ manila create NFS 1 --name share1 +-----------------------------+--------------------------------------+ | Property | Value | +-----------------------------+--------------------------------------+ | status | creating | | share_type_name | default_share_type | | description | None | | availability_zone | None | | share_network_id | None | | share_group_id | None | | host | | | access_rules_status | active | | snapshot_id | None | | is_public | False | | task_state | None | | snapshot_support | True | | id | 55c401b3-3112-4294-aa9f-3cc355a4e361 | | size | 1 | | name | share1 | | share_type | 3df065c8-6ca4-4b80-a5cb-e633c0439097 | | has_replicas | False | | replication_type | None | | created_at | 2016-03-30T19:10:33.000000 | | share_proto | NFS | | project_id | 3a46a53a377642a284e1d12efabb3b5a | | metadata | {} | +-----------------------------+--------------------------------------+ #. After some time, the share status should change from ``creating`` to ``available``: .. code-block:: console $ manila list +--------------------------------------+--------+------+-------------+-----------+-----------+--------------------+-----------------------------+-------------------+ | ID | Name | Size | Share Proto | Status | Is Public | Share Type Name | Host | Availability Zone | +--------------------------------------+--------+------+-------------+-----------+-----------+--------------------+-----------------------------+-------------------+ | 55c401b3-3112-4294-aa9f-3cc355a4e361 | share1 | 1 | NFS | available | False | default_share_type | storage@lvm#lvm-single-pool | nova | +--------------------------------------+--------+------+-------------+-----------+-----------+--------------------+-----------------------------+-------------------+ #. Determine export IP address of the share: .. code-block:: console $ manila show share1 +-----------------------------+------------------------------------------------------------------------------------+ | Property | Value | +-----------------------------+------------------------------------------------------------------------------------+ | status | available | | share_type_name | default_share_type | | description | None | | availability_zone | nova | | share_network_id | None | | share_group_id | None | | export_locations | | | | path = 10.0.0.41:/var/lib/manila/mnt/share-8e13a98f-c310-41df-ac90-fc8bce4910b8 | | | id = 3c8d0ada-cadf-48dd-85b8-d4e8c3b1e204 | | | preferred = False | | host | storage@lvm#lvm-single-pool | | access_rules_status | active | | snapshot_id | None | | is_public | False | | task_state | None | | snapshot_support | True | | id | 55c401b3-3112-4294-aa9f-3cc355a4e361 | | size | 1 | | name | share1 | | share_type | c6dfcfc6-9920-420e-8b0a-283d578efef5 | | has_replicas | False | | replication_type | None | | created_at | 2016-03-30T19:10:33.000000 | | share_proto | NFS | | project_id | 3a46a53a377642a284e1d12efabb3b5a | | metadata | {} | +-----------------------------+------------------------------------------------------------------------------------+ Allow access to the share ------------------------- #. Configure access to the new share before attempting to mount it via the network. The compute instance (whose IP address is referenced by the INSTANCE_IP below) must have network connectivity to the network specified in the share network. .. code-block:: console $ manila access-allow share1 ip INSTANCE_IP +--------------+--------------------------------------+ | Property | Value | +--------------+--------------------------------------+ | share_id | 55c401b3-3112-4294-aa9f-3cc355a4e361 | | access_type | ip | | access_to | 10.0.0.46 | | access_level | rw | | state | new | | id | f88eab01-7197-44bf-ad0f-d6ca6f99fc96 | +--------------+--------------------------------------+ Mount the share on a compute instance ------------------------------------- #. Log into your compute instance and create a folder where the mount will be placed: .. code-block:: console $ mkdir ~/test_folder #. Mount the NFS share in the compute instance using the export location of the share: .. code-block:: console # mount -vt nfs 10.0.0.41:/var/lib/manila/mnt/share-8e13a98f-c310-41df-ac90-fc8bce4910b8 ~/test_folder ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/install/common/dhss-true-mode-configuration.rst0000664000175000017500000000644200000000000026326 0ustar00zuulzuul00000000000000Configure components -------------------- #. Edit the ``/etc/manila/manila.conf`` file and complete the following actions: * In the ``[DEFAULT]`` section, enable the generic driver and the NFS protocol: .. code-block:: ini [DEFAULT] ... enabled_share_backends = generic enabled_share_protocols = NFS .. note:: Back end names are arbitrary. As an example, this guide uses the name of the driver. * In the ``[neutron]``, ``[nova]``, ``[cinder]`` and ``[glance]`` sections, enable authentication for those services: .. code-block:: ini [neutron] ... url = http://controller:9696 www_authenticate_uri = http://controller:5000 auth_url = http://controller:5000 memcached_servers = controller:11211 auth_type = password project_domain_name = Default user_domain_name = Default region_name = RegionOne project_name = service username = neutron password = NEUTRON_PASS [nova] ... www_authenticate_uri = http://controller:5000 auth_url = http://controller:5000 memcached_servers = controller:11211 auth_type = password project_domain_name = Default user_domain_name = Default region_name = RegionOne project_name = service username = nova password = NOVA_PASS [cinder] ... www_authenticate_uri = http://controller:5000 auth_url = http://controller:5000 memcached_servers = controller:11211 auth_type = password project_domain_name = Default user_domain_name = Default region_name = RegionOne project_name = service username = cinder password = CINDER_PASS [glance] ... www_authenticate_uri = http://controller:5000 auth_url = http://controller:5000 memcached_servers = controller:11211 auth_type = password project_domain_name = Default user_domain_name = Default region_name = RegionOne project_name = service username = glance password = GLANCE_PASS * In the ``[generic]`` section, configure the generic driver: .. code-block:: ini [generic] share_backend_name = GENERIC share_driver = manila.share.drivers.generic.GenericShareDriver driver_handles_share_servers = True service_instance_flavor_id = 100 service_image_name = manila-service-image service_instance_user = manila service_instance_password = manila interface_driver = manila.network.linux.interface.BridgeInterfaceDriver .. note:: You can also use SSH keys instead of password authentication for service instance credentials. .. important:: The ``service_image_name``, ``service_instance_flavor_id``, ``service_instance_user`` and ``service_instance_password`` are with reference to the service image that is used by the driver to create share servers. A sample service image for use with the ``generic`` driver is available in the ``manila-image-elements`` project. Its creation is explained in the post installation steps (See: :ref:`post-install`). ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/install/common/dhss-true-mode-intro.rst0000664000175000017500000000157600000000000024615 0ustar00zuulzuul00000000000000Shared File Systems Option 2: Driver support for share servers management ------------------------------------------------------------------------- For simplicity, this configuration references the same storage node as the one used for the Block Storage service. .. note:: This guide describes how to configure the Shared File Systems service to use the ``generic`` driver with the driver handles share server mode (DHSS) enabled. This driver requires Compute service (nova), Image service (glance) and Networking service (neutron) for creating and managing share servers; and Block storage service (cinder) for creating shares. The information used for creating share servers is configured as share networks. Generic driver with DHSS enabled also requires the tenant's private network (where the compute instances are running) to be attached to a public router. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/install/common/dhss-true-mode-using-shared-file-systems.rst0000664000175000017500000004155400000000000030475 0ustar00zuulzuul00000000000000Creating shares with Shared File Systems Option 2 (DHSS = True) --------------------------------------------------------------- Before being able to create a share, manila with the generic driver and the DHSS (``driver_handles_share_servers``) mode enabled requires the definition of at least an image, a network and a share-network for being used to create a share server. For that `back end` configuration, the share server is an instance where NFS shares are served. .. note:: This configuration automatically creates a cinder volume for every share. The cinder volumes are attached to share servers according to the definition of a share network. #. Source the admin credentials to gain access to admin-only CLI commands: .. code-block:: console $ . admin-openrc.sh #. Create a default share type with DHSS enabled. A default share type will allow you to create shares with this driver, without having to specify the share type explicitly during share creation. .. code-block:: console $ manila type-create default_share_type True +----------------------+--------------------------------------+ | Property | Value | +----------------------+--------------------------------------+ | required_extra_specs | driver_handles_share_servers : True | | Name | default_share_type | | Visibility | public | | is_default | - | | ID | 8a35da28-0f74-490d-afff-23664ecd4f01 | | optional_extra_specs | snapshot_support : True | +----------------------+--------------------------------------+ Set this default share type in ``manila.conf`` under the ``[DEFAULT]`` section and restart the ``manila-api`` service before proceeding. Unless you do so, the default share type will not be effective. .. note:: Creating and configuring a default share type is optional. If you wish to use the shared file system service with a variety of share types, where each share creation request could specify a type, please refer to the Share types usage documentation `here `_. #. Create a manila share server image in the Image service. You may skip this step and use any existing image. However, for mounting a share, the service image must contain the NFS packages as appropriate for the operating system. Whatever image you choose to be the service image, be sure to set the configuration values ``service_image_name``, ``service_instance_flavor_id``, ``service_instance_user`` and ``service_instance_password`` in ``manila.conf``. .. note:: Any changes made to ``manila.conf`` while the ``manila-share`` service is running will require a restart of the service to be effective. .. note:: As an alternative to specifying a plain-text ``service_instance_password`` in your configuration, a key-pair may be specified with options ``path_to_public_key`` and ``path_to_private_key`` to configure and allow password-less SSH access between the `share node` and the share server/s created. .. code-block:: console $ curl -L \ https://tarballs.opendev.org/openstack/manila-image-elements/images/manila-service-image-master.qcow2 | \ glance image-create \ --name "manila-service-image" \ --disk-format qcow2 \ --container-format bare \ --visibility public --progress % Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed 100 3008k 100 3008k 0 0 1042k 0 0:00:02 0:00:02 --:--:-- 1041k +------------------+----------------------------------------------------------------------------------+ | Property | Value | +------------------+----------------------------------------------------------------------------------+ | checksum | 48a08e746cf0986e2bc32040a9183445 | | container_format | bare | | created_at | 2016-01-26T19:52:24Z | | direct_url | rbd://3c3a4cbc-7331-4fc1-8cbb-79213b9cebff/images/ff97deff-b184-47f8-827c- | | | 16c349c82720/snap | | disk_format | qcow2 | | id | 1fc7f29e-8fe6-44ef-9c3c-15217e83997c | | locations | [{"url": "rbd://3c3a4cbc-7331-4fc1-8cbb-79213b9cebff/images/ff97deff-b184-47f8 | | | -827c-16c349c82720/snap", "metadata": {}}] | | min_disk | 0 | | min_ram | 0 | | name | manila-service-image | | owner | e2c965830ecc4162a002bf16ddc91ab7 | | protected | False | | size | 306577408 | | status | active | | tags | [] | | updated_at | 2016-01-26T19:52:28Z | | virtual_size | None | | visibility | public | +------------------+----------------------------------------------------------------------------------+ #. List available networks in order to get id and subnets of the private network: .. code-block:: console $ neutron net-list +--------------------------------------+---------+----------------------------------------------------+ | id | name | subnets | +--------------------------------------+---------+----------------------------------------------------+ | 0e62efcd-8cee-46c7-b163-d8df05c3c5ad | public | 5cc70da8-4ee7-4565-be53-b9c011fca011 10.3.31.0/24 | | 7c6f9b37-76b4-463e-98d8-27e5686ed083 | private | 3482f524-8bff-4871-80d4-5774c2730728 172.16.1.0/24 | +--------------------------------------+---------+----------------------------------------------------+ #. Source the ``demo`` credentials to perform the following steps as a non-administrative project: .. code-block:: console $ . demo-openrc.sh .. code-block:: console $ manila share-network-create --name demo-share-network1 \ --neutron-net-id PRIVATE_NETWORK_ID \ --neutron-subnet-id PRIVATE_NETWORK_SUBNET_ID +-------------------+--------------------------------------+ | Property | Value | +-------------------+--------------------------------------+ | name | demo-share-network1 | | segmentation_id | None | | created_at | 2016-01-26T20:03:41.877838 | | neutron_subnet_id | 3482f524-8bff-4871-80d4-5774c2730728 | | updated_at | None | | network_type | None | | neutron_net_id | 7c6f9b37-76b4-463e-98d8-27e5686ed083 | | ip_version | None | | cidr | None | | project_id | e2c965830ecc4162a002bf16ddc91ab7 | | id | 58b2f0e6-5509-4830-af9c-97f525a31b14 | | description | None | +-------------------+--------------------------------------+ Create a share -------------- #. Create an NFS share using the share network. Since a default share type has been created and configured, it need not be specified in the request. .. code-block:: console $ manila create NFS 1 --name demo-share1 --share-network demo-share-network1 +-----------------------------+--------------------------------------+ | Property | Value | +-----------------------------+--------------------------------------+ | status | None | | share_type_name | default_share_type | | description | None | | availability_zone | None | | share_network_id | 58b2f0e6-5509-4830-af9c-97f525a31b14 | | share_group_id | None | | host | None | | snapshot_id | None | | is_public | False | | task_state | None | | snapshot_support | True | | id | 016ca18f-bdd5-48e1-88c0-782e4c1aa28c | | size | 1 | | name | demo-share1 | | share_type | 8a35da28-0f74-490d-afff-23664ecd4f01 | | created_at | 2016-01-26T20:08:50.502877 | | export_location | None | | share_proto | NFS | | project_id | 48e8c35b2ac6495d86d4be61658975e7 | | metadata | {} | +-----------------------------+--------------------------------------+ #. After some time, the share status should change from ``creating`` to ``available``: .. code-block:: console $ manila list +--------------------------------------+-------------+------+-------------+-----------+-----------+------------------------+-----------------------------+-------------------+ | ID | Name | Size | Share Proto | Status | Is Public | Share Type Name | Host | Availability Zone | +--------------------------------------+-------------+------+-------------+-----------+-----------+------------------------+-----------------------------+-------------------+ | 5f8a0574-a95e-40ff-b898-09fd8d6a1fac | demo-share1 | 1 | NFS | available | False | default_share_type | storagenode@generic#GENERIC | nova | +--------------------------------------+-------------+------+-------------+-----------+-----------+------------------------+-----------------------------+-------------------+ #. Determine export IP address of the share: .. code-block:: console $ manila show demo-share1 +-----------------------------+------------------------------------------------------------------------------------+ | Property | Value | +-----------------------------+------------------------------------------------------------------------------------+ | status | available | | share_type_name | default_share_type | | description | None | | availability_zone | nova | | share_network_id | 58b2f0e6-5509-4830-af9c-97f525a31b14 | | share_group_id | None | | export_locations | | | | path = 10.254.0.6:/shares/share-0bfd69a1-27f0-4ef5-af17-7cd50bce6550 | | | id = e525cbca-b3cc-4adf-a1cb-b1bf48fa2422 | | | preferred = False | | host | storagenode@generic#GENERIC | | access_rules_status | active | | snapshot_id | None | | is_public | False | | task_state | None | | snapshot_support | True | | id | 5f8a0574-a95e-40ff-b898-09fd8d6a1fac | | size | 1 | | name | demo-share1 | | share_type | 8a35da28-0f74-490d-afff-23664ecd4f01 | | has_replicas | False | | replication_type | None | | created_at | 2016-03-30T19:10:33.000000 | | share_proto | NFS | | project_id | 48e8c35b2ac6495d86d4be61658975e7 | | metadata | {} | +-----------------------------+------------------------------------------------------------------------------------+ Allow access to the share ------------------------- #. Configure access to the new share before attempting to mount it via the network. The compute instance (whose IP address is referenced by the INSTANCE_IP below) must have network connectivity to the network specified in the share network. .. code-block:: console $ manila access-allow demo-share1 ip INSTANCE_IP +--------------+--------------------------------------+ | Property | Value | +--------------+--------------------------------------+ | share_id | 5f8a0574-a95e-40ff-b898-09fd8d6a1fac | | access_type | ip | | access_to | 10.0.0.46 | | access_level | rw | | state | new | | id | aefeab01-7197-44bf-ad0f-d6ca6f99fc96 | +--------------+--------------------------------------+ Mount the share on a compute instance ------------------------------------- #. Log into your compute instance and create a folder where the mount will be placed: .. code-block:: console $ mkdir ~/test_folder #. Mount the NFS share in the compute instance using the export location of the share: .. code-block:: console $ mount -vt nfs 10.254.0.6:/shares/share-0bfd69a1-27f0-4ef5-af17-7cd50bce6550 ~/test_folder ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/install/common/share-node-common-configuration.rst0000664000175000017500000000462500000000000027002 0ustar00zuulzuul000000000000004. Complete the rest of the configuration in ``manila.conf``. * In the ``[DEFAULT]`` section, configure ``RabbitMQ`` message queue access: .. code-block:: ini [DEFAULT] ... transport_url = rabbit://openstack:RABBIT_PASS@controller Replace ``RABBIT_PASS`` with the password you chose for the ``openstack`` account in ``RabbitMQ``. * In the ``[DEFAULT]`` section, set the following config values: .. code-block:: ini [DEFAULT] ... default_share_type = default_share_type rootwrap_config = /etc/manila/rootwrap.conf .. important:: The ``default_share_type`` option specifies the default share type to be used when shares are created without specifying the share type in the request. The default share type that is specified in the configuration file has to be created with the necessary required extra-specs (such as ``driver_handles_share_servers``) set appropriately with reference to the driver mode used. This is explained in further steps. * In the ``[DEFAULT]`` and ``[keystone_authtoken]`` sections, configure Identity service access: .. code-block:: ini [DEFAULT] ... auth_strategy = keystone [keystone_authtoken] ... memcached_servers = controller:11211 www_authenticate_uri = http://controller:5000 auth_url = http://controller:5000 auth_type = password project_domain_name = Default user_domain_name = Default project_name = service username = manila password = MANILA_PASS Replace ``MANILA_PASS`` with the password you chose for the ``manila`` user in the Identity service. * In the ``[DEFAULT]`` section, configure the ``my_ip`` option: .. code-block:: ini [DEFAULT] ... my_ip = MANAGEMENT_INTERFACE_IP_ADDRESS Replace ``MANAGEMENT_INTERFACE_IP_ADDRESS`` with the IP address of the management network interface on your share node, typically 10.0.0.41 for the first node in the example architecture shown below: .. figure:: figures/hwreqs.png :alt: Hardware requirements **Hardware requirements** * In the ``[oslo_concurrency]`` section, configure the lock path: .. code-block:: ini [oslo_concurrency] ... lock_path = /var/lib/manila/tmp ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/install/common/share-node-share-server-modes.rst0000664000175000017500000000367000000000000026357 0ustar00zuulzuul00000000000000The share node can support two modes, with and without the handling of share servers. The mode depends on driver support. Option 1 -------- Deploying the service without driver support for share server management. In this mode, the service does not do anything related to networking. The operator must ensure network connectivity between instances and the NAS protocol based server. This tutorial demonstrates setting up the LVM driver which creates LVM volumes on the share node and exports them with the help of an NFS server that is installed locally on the share node. It therefore requires LVM and NFS packages as well as an additional disk for the ``manila-share`` LVM volume group. This driver mode may be referred to as ``driver_handles_share_servers = False`` mode, or simply ``DHSS=False`` mode. Option 2 -------- Deploying the service with driver support for share server management. In this mode, the service runs with a back end driver that creates and manages share servers. This tutorial demonstrates setting up the ``Generic`` driver. This driver requires Compute service (nova), Image service (glance) and Networking service (neutron) for creating and managing share servers; and Block storage service (cinder) for creating shares. The information used for creating share servers is configured with the help of share networks. This driver mode may be referred to as ``driver_handles_share_servers = True`` mode, or simply ``DHSS=True`` mode. .. warning:: When running the generic driver in ``DHSS=True`` driver mode, the share service should be run on the same node as the networking service. However, such a service may not be able to run the LVM driver that runs in ``DHSS=False`` driver mode effectively, due to a bug in some distributions of Linux. For more information, see LVM Driver section in the `Configuration Reference Guide `_. ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315601.813673 manila-21.0.0/doc/source/install/figures/0000775000175000017500000000000000000000000020235 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/install/figures/hwreqs.graffle0000664000175000017500000000767200000000000023112 0ustar00zuulzuul00000000000000[W۸ǟ˧G%Y0m 30gX,(SNw?@RHI -֯_z~JG/ ~yN 7l{A~T}ݳN}čw;Gvfslqrt:k9/ټ6p^b8hDa_Gl.3:{!;msW϶>Wn]#FGA[j&A/uWGVsqN |?:4EN _Ax(c:r:_˭$gMmPgBڊLS0A6K,cl/y __4!k! I[e,%9'e;2k7,'9Sã{se+|v<1}@ a qu&L2-Rt~Z4?۾ *y<֭)vW&TIW囔nRҠKW%wI¨Ǎ?8Nyܴ\ǯ\*nKc|<_+he۴Rn*%9̃DZjyvTEҲ4)Dq aDJl_rNڜMM8©m3ܠɘbT۪ Zd$/k2 0脳ԥ .r%Mtg$ Q}qS q_-'ltϻv4Zh]lSXYrgqkZ͖g9eu<߿ᕟnKJr"Ir}Ӕlγ&eTR̥䅔G_K^V;:jee.=`AQvt5Stڲf@GN R6Sj>gK<&fm<;q];wC7tC KyT!;x^:@ǤQ<=HyLnu|}O2H}wyEHM䍁'o$ovjyj"Hj7ISiIr^/ x9g7": <&^wT6k49h_`ɒ6ЉH* 6<Rx{[e.p.;fcPͼ{X4Ra y7gqP c 60`XND"gMCm^e );UeLŵOY6 pCdy3hr !#9B!+Rx+RTF7-|*%1X$)8"\ ֔oĬB*JmX!M֧HL)bAJů"~惬q*djBVn!3RWfU*RRRIvϓUMRVéjFIs!" 2miTTWDԏ-߫Vȫ^ī,I%8J%z5Jr5Vڝ10uviPnK"MpQWQWq-iQru(psXqow~G?^^8;c-> 6}w_ߞ#݉S4n0;%pD3$.!p̋JWΠRNx#mۍy4q 8L.>u{ ~{mޑ@o{J"-ɪ^;48DNTPih4 |V)4u 18f\1sO˯pR74\bG1Ou \YfLLGq 2Jg[ʚB_jRNiJQzMyV{闅h[TU>4WHga././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/install/figures/hwreqs.png0000664000175000017500000026132200000000000022262 0ustar00zuulzuul00000000000000PNG  IHDRvFNsRGB pHYs&:4iTXtXML:com.adobe.xmp 5 2 1 2@IDATx]E֮sTL gTs8쬎,άb8|FTQ HJ%g6LgvfvR{^U#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0@( #A*ܕCv˖W ތ#0@D(f&mX&,O8H2.q60ZDP@D;0@XbF`@@Q Ey E,[oMSܕhV2Gb@07s\M?^WꝹT2e# e_FH2sVq]_[oXv?wc) i,f5Bh 5;hczF><\a=fδSٳg[oxxwoG􋗮HtYnR.v:ggE t>}w#m3`nNe4M E;+HwKJ;R~0!g2#kpFYr(D>Q^M6ʥ.G=ky=#ӅìekǾ)OZLGb!,ߕ '^S4mGނf#n[E(8^878oU:x ~[}c)jU Ct\BbRZ\p ]C9@K<\p*B;^S'N,өY%sauy驺.ߺe̺dY?U|/ҟS |("z 5&ؑ"s<н eڢئ(=fb2GTz-&q7^'M=Vռ"gn J. d?'-6' 9O~\ALcdfOs"%۳MӄrIOԣ_a 0'7yBރnESRf:'*i0^c]Mj P 7#AKkW޷سf8o"(>+caa%|>'=aSzk`ԕ<Eb+--' ˎ`Ggǘs`LG/Ҕ\Mt>ݣy *Dn{U{.|XQpRЫ{bT:}{ 2&(Y$,-џ2PIiFEcm|oVH,s ˫(Pt'*wpT(ɓq_2.uNJ +촪(Cul"VKgф.oAcBU-x|Iv}BNq1y[=|á'7PS<,65^A;J^M K YHzYizP0h16×J j1^S Tߠ>#zuV@9+ j/zy>nl?IRiVIS5$LS[5]v-wŎ'qy7gnZmϷ 0rۃU364@e0)BgS4gC3R}1,3s'œ2*t^Q-,t!t4~g9XIq@J[+Ii8FZiv.pN ùP>@qfOzlaeFu>DlV|dabPWƧׂBE|C`I9Od ݫF%B ]3i,^( fC /vI +1ԢX.$'96J%D&)@]ӴkL~OMXB=T?q.WEyڨ$w޹N"5Z”d"Y_/#"Шct"2: :CcwKu b7@i QPzF(gwe:,C|6^-=k^$SIIgXsAVEEK4U5@Q61'R>}GMwNØ.w޵jq ~YQvJ :"ԨB h%mGOOVB9EX$O"_z<yBvk00ѠDR=|+]̸Y۬=G}+vL8|}X0#`lza2#-+gE/R?km:Xoh,r2wY.b>h}Yޓ PoTA2}rlRxɓbD"ʹc@ǂHI Hv0P8Mt-G"ܼ1jO:e#OVp`5:xI>պH·>t^{-h~N+<@?x +C'=1GCC ,`,9*> i$^܍g^dmH'E:'z`~T1ԋ[`1+7Oyd$R\~0 9r@J{zu|deJ6h5>]7RJY^vU`E|2uC54ȞfP+aWś,ocֈjq&ٶ(9m #wH~ˣ?>O2[y &&GS,7rG(i F$|L_+hc$Igewf+ŢlBՏY!$`%94*#5wcmݓ RL7U4o{ĥߒ*|xE*J 7&3?N`"#rd|-#lyJL7QhjLaJt=Bs:b{as2%Ok)]U.(g}ffJV}kBn[oFm=m-͆c1҇ 揽6WL4 vH?<߃GJ")^J7vf0L| Rٱ֣:0`E92>hTvN&V}c0+/gU@a1Pi>p* ưX)JٶGglFl٬_cy-ӬkC`3 v,)_Cv3-}Pğž&?]!`n Β0;3hc#c0p PLh@Hk,c!rʠ#`Ք*+{tL#ś;_@5ә:Jiyp)Mke}fU9E0︍f2d3G$Y(mj+fڴ2_ynچM>'rC`C93lY'jLvcYEu>Bmг̽p ӓsoW;/wCIWSyp=88uMp$ðOYQt䯡޳#(+o˕CT.(r hZ%ā#ʬ{$Bm6GxŰ9>}bZi7L^̊*3i(8OC/:2dTʐ>š]<$ww$=O`@ mcon뉵8i )&נGRn(,c[}޷q91p!8xݳo} .np[|^g?> Jsxw﬊Sx3$,+0h ;#G7IFj#/lE/pF]섣Z@θ??0K gTw<1'``T)lʣ F irҠ#@\(HB[i@;Jϝ|q䭶(^YK}_;|yÄ@;X֋Xڻ SQ~d,; |f=;SJ%w~-6=pĤ;_`H j;ݢFxw$ܛZA.a8y-_doӱfg*΃}KYS ?-L "lO,ϼaۻ߂w`wr]1oxpo(HK'dM\zű :Իq{ 2 FAjSN.uLn"eF i`0ˎ`trf8oߒ2>Ǟ{fuƓϒO.嫛_T~fӺ)K?لP,¶!}7i{#7һMa<߁P/ vW)=ZJu3s&aڐ?ɓ]I e=]ŧz E%%GJ ܑ))3cA,Hd/lv!~/[,M4ig|dr,Fi pʹimJ M.TtxPK]E,Fk d*(0M܂~u]O}sD`5RGЗeEѮWOE E5U<(|d1MB^rb;MUXNS4ӘH6fܘvF  (cvy򽺴B2;o[cXINNSDCw<_D!IE9YNS2Hv)e`D`6- _ZZG'g\MSާN"[b "8F"FJs!6ir+vR)T؂KNVYTcm`Fܳ)g'Y$'eFYdn!/g%9Yt@nP_U'OI/:i! Q:{ͫ=2w#DyMd*ɔmReB_RgAe #`@xɑ[]nYN$Hޅ,LɬU͑\fF ^(pq61B#'S`95VN `\RF sHXQυ&Or)cAtJ",Ij%AMmv $ڵM*aENܣiD@sG$ˤr4(9fY2UO/KF !p % +H7̛' ˑIƃ4ɀrreTMsc y˥f6ډK>{7W&R3G2EDr4r4(9!FsCɗQ+…~F Al#dDŠ2 b_p|F!"y"Jc9M#s / +#$=Hu&('J@Op+yt-XFp8H:JMX^*|Jʯi~)A)kť„m>f"pV.[o23/N;L(tZYە3'}95;a1deCtd9={l[NEM6^47orc NnR"K*HmQH=ﰇkMpEc*LMݮ3q-ó'`fqaixAzbxxE`BIIWJA vTsmO8YQl\j)2?7Rjq:/Mfn踖`aX7ns 3gڵu[jl,-q$լ7?eq@~ d_g8xg_|a\~o i +mJ!Em2+գΘGm1C[v|&劫ZE;MceFrkyYbpMt,8@*/h,QsA#ұ]<ݷǒ^7GT+NAyQ& 2M.rǬ)(B¹ky :Sv `n^P@֪uvnfڴ}nhn},W|=+"F&(Vt4*ɄQ}cīAqA>u?0U+֎Eƣa>p# ~.PE(4nW!=Eî6p*}ZOA hxtafs.-Mr4t܇`Wзgf;fHζ,"m,Y*c'.GÅrk`˧EQн\4e5 PeiPNPEv'&߱՘t ~ ~RJfɰ姚罈7no{-ߪy|K3І4:a}B :~&Sr,|g~觿'1|No}AUh/<1DQtM]mf. \ ehB^(-·#~F9U=_ai,5 D[VcO+>ɓP<4rV0i'יNJ_gDx< Б 2J]_ rX;AJ #5/PEk-ؼ3þG wgsL~čW8vT( ^=]W=-tmi{q՛4=Zl)rwt^u Vxtzy0;R]꙾ ňN[~|iԇaP(C=XC)Dky0)c脵Pt7Rh#e?\,D>q7ws~Nݺ&t7wcG6aߢR{z .LF2Cn&(^9Ȇnis4Rq ޥ. cg=w:2 3]E 8Wk,W ogɑ.I8q2mT>vm8|㢉"Fp>TN4@ou٬m>©Xxi dBvA}Cc)g[ZBɚݹU_ծiLO^Vf"ΦNQ~n(YB,6҆ay躩Sӡ:Qb&`ml.F?QL /^>ڮ)[?lgڹzz1A~NENrmh'|z$ڟSgh1+.ZG]e+uzEts ],bF?%rhmk1DRm10`&,z2{l64@,uBPB`Eyy.P^C6yMVQHuHX ER8(`/Ke2@:zC~Ip:hA!|L?z,/#=#gKG3*HyTLWtO7jVs.m]論CvaJU8]Gs9O5X ̞Nle NO6aP^O i@=*RIf&cIզB=]+%$<4{GA~XYt:UR"m*qEQXU5#]})(ӗ'}Oܦ[}s#A7O *bt4kJ>+I&(l;b'd5lͭ}A+k~7\WꜼ̸ŝ#=&v Ki?MѱۙdO,~LtFBz/bG%BĢ~ QE=WVe1Œ[M7 BжN+as[Y^N_lDz#[m_X(f"j.X,:, *$ N សv>{+wyt g)w_a#3E, гPV4$m *Zwh H rLAb.hyv(jOWzo,?5F?I-8IW$|W2RO.#四kR;靆1ϝ1a›(`Ka/jLW;׀I.1? ^.yH]YDZ fhKz*S|+٘Iܙ.dzo<]a} ODspqǢ#Ć<~hk+.,ФY(<y3@<,<Kd{ *YfO,lSb*lsL؞lo<6ϡWӺ}q1@p)h#,lſ gAWHߡQɘHнm'jo[ъT uZ%O^iQ] /]y=13RlQ4ݎ{J04$4taC;)\"KzJGޣ Z =0(|-m7az K1M_C4> }#9.Ƅ>$wg'e˷\'VeVLt>Sh+pN29yl!ØD1;ЦBmcPw JCD ]O[iVz/BbXRO~\1ϫ/(q6/Qݭvj)wx=;E(wBIV%fOz5ٔeϾ7`[?p$>"*eQYUC6Ca:p+iܸg3hzy>ثmw[h ̰'ꍰÂu pIt*A8%2k3(Wfz< adѴAf@,^wOD&@^TH1_+0Y:H?= L9 *=lWåv7 'v֙^q]6p׻ۭa3 NٯH Ie'o;(VдCq (`5q70g(0DsPקca⧜i`#(Iޗz Pzq -_Z,DK.~ݧ,tgZS;܄Ix6"'.6f/8Z$ѩmt`=O'=دdOv tz}m=D,䬷PިRƸ@:KƑxdS_0خFXk ɑ5lV!-ӡ1$& eI5v(ŶXRNXU7AvDg K& "L}Xă-l`G>:_j l#dd`<2 K''YjaZ(彜Q޽s->nrL E$p ˼ݤ4eB)sxmUlo]C?Pi(x~"מ_GKa$ߢC {mvY˛t(jGt pkA6zr y7[O^m"x.Au{OF&rbB>LC:gNPQ}@`*k  Ou-&Ͻj;tv[;Wfgi1kRŭ~po`(B gcep>S}B<+ !B]ݱQ"ܽ;/Py6ziE<|%FKSfOE!4mf2](g 5̫c@?$sD QRQOqN">v e!ڱ7$tl'iVYd|m D?6u6[ >)c.},uWaw,.T=*ꄋ6xӫHnYPo9F{z'ty}9b;$ ,]`ʵͦpO;oKJм4wx &&δDYG4^2h_11DUޕn?uMvF`] Oȓ>g$(=xď:oe%y 0nFgwa{9c̓ف hX-ykC A5E:<(`ǻp??ṭ%-Wc)'0,/ h\e: ߥ~&,38޴۬Cy|9i؇U!>a,}   36pɼBV=WcP @|mN ud4]xk]=,_@o{gb$lw)ǢSy䠬K+J[ d]^B[[( lهv(*Kp>li:v pkbaB*Tb9Z_k%BAnEkt}ݮ< ; `Wø4d\tۑYM7dH}l,_fSSP֎ )~a}>?! [ܙ?s0g01ll6j}YS40^^ˍ[$Z6lqN7;h5&'YqVlY=!+ab)'}o{F"&^3dʌ4?VwӲ5OM=R|:ڛUefoH{[EphGu릒UVkuW4~{d]ƓaX\]`m [Q}N|ﶆV8^ŖVWUVpEg: ]1KȔri"%+nu:zlnߦd%$6I蟡]C}l, tVEN7T{/Eh|_ 6ӮsMɞA 1)Z<c6pN(f(+&3if*~eT{ Ɉ6j̒'t캂]W5H͐xLG3ߊ~hi]f|} 9UOR>: v*1>˓p2dMBedF!0sĢi'i["1#uƓVǞ-9+˲֬uQƁBȌ#Д[(I_Ҕ ea P5Ů)!f2Z5ڝ(XQN=0#0#qn1Uɮpc!D㸌#0@X8%F#hIqH3g`FHG@cc BI̗A83)#0#0 VØ\F`F` rjp\F`F`l2#4M&:gf)T@Д)Se9y=58Gwtu^}a#yKb^7>^3 TMq c2ta%g%qߦSQJAS.yp[}bE(-.U#)bPPz=[gҬۊlc!⽉o&Z6R&[>⛦<^|tڒ:2y]V#5A5Q9$)iPo|(Prl5G 6'7zUպЩX-r6=P\vTj{+-2ȵ>Z[ɗ^z AHJ\zmY[^o[ofj;oUO,x s&/üB(kn_x&U˕4-#YNG#2ENS(+d۪MۧUM:_wmA!tWr}su࠲FL|^>movz5kR;˫ݘojoF}]]ʓ!=A⟟(nAT^sgM(eJ*irP΄64w^>JxY,~ݷ4 \StQ?-:ȍ|N1sb*PXЪ f^sd#ԾJRbƂV]X#&3ȻXTu&ӉjP9ι4$eK,{Q#/jw٭NERl(_[l7TB*:M5 =y5Ő~i8d7:m2ii(wQ˩tf+ʔ>ݫ׀xGeg.Fjyc\o12֭cA#0*0Ĩ)5 r$ih_W*~^`9ݏEw,qCS3;jJf=SsQ^=J#v#@]-v~ķ?z?s&fOf͕y JQI!u$n_K4ARܾFCRp`D|f9 95Yc5O۷ք]~cvN H5kv^|_2kɅYGse^GRaRkn_G,$RоB r*HU64 NB.UrjLC&wYG@-ȟ]uF )9ٸ)[ QUf^'kgȐf} @>rj a9m ~=t@IDATJ95KQtI/c0쓜& 7Ή  ϼ-Of_7aogQSKg|#y4Lal.cxM*oYpMԅMU@ypOVXU]*D䝉kX:&:O_%!+V*I^&]NCAC%oC֝m[.jrA}5e[VV!_(ϼ5_ypqؑ!ٰyϜ,C).=e7 dN0D;d=}XzS>8W,#zvn/=T%~:ŦtԮsAaźY, pE:c=|< _(Xv^ug'(XlzXyYW`_zoXrp{T1wqQEOmٱWbMe~2쫯'Rb=aI_4HggHw涯a)S5QӠ@!A4 Ivi„wȄT4Lkʷ8,A|w{_~Emeg] =䴴Akvfh}$r$۴'9_,>XclܶKNrZoܷصRQMtj* BA %|tI 3>9v; {]KƉ[D//74JۭXnS8|IxZ A':(tҪ氟V^]iK3= `2|x_ OMd/גXmݦ˟~zLͼ% myR޾yY=4 ܺE.Fm6/<ǃ1`Ц66(}lK) -AVkSMn_=#H8B!c4T-_gbbnٚUecЎm[ Z47vԟL m)r7g2BI^qzyj4r^Saq3Wj@%ɚQ/M|k+\s0Z=ط,4bpw8CIs:<3.$萨8Kd\4OFz1q,^J7<{*u}E~[+^H.ӟ9_.U&ͻ&f/`b`VѮUYI^:+dV#u^oqpf;2\ 浬3tMi- O wMy># COc`>qx"-_S!'^D$2֒. *ѩ]˔_n2NNc= hC8MFN]sW ]mR]6m\gf}!~\OjqfRIN':]#^6x7FLb7CQ%NWb-{֎܅cROdsgڪsFA\Ӌ&Hi8|: 35Ӟ{WL܀-!*VVo n& 𙔭͘}#^x_=OGsI絑gu sSp0}_}p 8HM0?5{.Bp)tIԁoڶ[_U}):X(9\$ڈw:L!jb)GrtnCM~ᚖ1P$D}7v*}NI iegwh?|q&Y^2TEgdc(SDtHc-AiPɑ rB|ǥ3Xg1b,H3jh&%{؊l.WU$7SdGG+QEœw]]irߧ1cB 8damLG~Yv-y0ܥSFAvrOiHl>咢egqRw|aZ!ʶ gkHvqTӱ?8h>EnB(#i3HYJtdf2_~/îB;sƛJSTzArI2TOOfXZcEKD׿Ox0CʱTh[q!:ZH&+Ad \:N?4{t)vK9%s aSo bYHL4)PDr}حE4dB|zx-)$oیt];b_>@;.;GG ΐ3xmF߳Xb}Pr `lHp9Z,6CEPMȶrۮ}M svwDȴ~OF) 2#ͨϧkPϾ}ũnw=E/O(Zaf˄يFu? VW~8Qxy*Gc$Kbq/;IUxQWreۘ3@rh1k<[8RcE98 #0#l&:O4g>Mvڜ#Д(3+@`F`D@G]('%GgJK@di0#0"hY@cF1sigF`F0 f(Ӗ'W7p)$tiᥱ$,F4=mHrʮ>bw~}: [wS{GG:ئ%N>Ŀ,hбGGNizq۬M\>ؾk/NX8+OG ؎kŌ>qŧgN#Ʊ Y?Vz5p{\{՟FZ|~? sq"JJ KCPk8, ']'Ł:0DbE|j(MG(Ĵ}Wp4F1o7?{goҊf<89q3==i'nQux˼3{}OߴΙ S XN/=f!p$w{.&I_~? Wg␙1HpYɊ#'N]aFm(l3U.2$lپ[-:%s{+sok6n[`qۄf̴iT4@,qs:r:yj%tXZL\sɘUbOٓxTj~n!o%sR(UUmUw #)}e $Óѩ(FDu[SUm_gZ.QQ ,OFҶxJ]D ȃQZ-(y dIt*7S'2Cn0iM8ȃšmZGrit)u:W»ʩF{r-Rчk.r: Id6 Zヺ.ޝpR `R-r}_H?:_jݘxZ}:[X &zlEY0kkv]E}|} ڥtY' M<FsVFSy=ff[7*W/,:~@`w!Yo`E3#/>54lT,?ÈX.DGL 񭶦fW]#b.ݼ&[f>1&[势9J>m?`ޱO.i2Jio_ MUNn_82hsӡPlIY~G_0}u/z'xW?VyPWH;f 3GM&cq`]* *Y6ԀK%@غFI9vر? ~Xo Fl7n%h1Gv;(>WQe#MtSNV~z~f,#9[{19^auNB_W} Acq[p>W)|#y@~0ܧt=uw#{Wj1E*E?_'h쏿Szum0LQھkjrjbj-~$x2Mf9 ^)2BL޺sԩW8^t6tp}g0 ,OmlD9Ք~ :3e@Bu+Q7o=D%=u1 {6pѸ3%x% u-K# - FA?=J?|!NO  fl@ѧ{W%H@.6ҝ{5qkǶyb򓯉ŠԷ/d^pމn Χ?/cm)g"|Oa*D$ʼnNv ?qW6K8K̓LK |KFҙͿXxe t:y33t➌%Ll_!3&Fde4"\L(S,څ y*d;.A TcO0| ?(ʏHQ'9@xڂEo8q5 d|ϼ6}xȥzk|w)h_sn/0mgȝ4q?IޥRNPi$Ear{|.;]n߲}p-A4K84|q:7$HdH._,T2cA+)u@Q% w&ѐ&&䵉mr@!il C.9thڈ ipTIhfΨTWϿo۩۸xCh9JIgRВFPT9ߡRzs >x M3L?ʯUv#+ol.}?b$f/z"}u/X6żOP~V|g]טiD贈KS& 6=Vp!$k TP=F&::(Pu Y 35JA~y{<#я!tϹM^u|mr\|3f9mPK4IȟnW [+whsׁo>+75tJ2p}& < 0A,A4LS'f+DQ"xJqVR*M`|/OؑB-T'yF{rg2>J+K=ڇ]ZyoHc):p2fJQ̂+ THʲB*MQlGbr,Mctu}LOy&9Txe*ϙgglsHaRbLWyߜdr`ӳĊ3Ii, ::4>kYYheƊ44.웑>6kYY}caNj>&4MtFzVmVo)t:e#_S2d^'Ԧ]vŢ :B9Z2t"y>]< $JSφ"""!R}fnnڽ0;~gΝsϜ{mAgc#=wߴ;v G{ރײ{nEFɦGYH*>*/iNk9z%vUj|H{#+.h]dɧ^ɦEz]iBiACfזC|rxbye6]~~/Uj O+q {Wk&v8x*]| 8/f)#S'<9·[ITB|_:œ_V> x{^b,S QFڳ0 ^h$?زI>2/iL`nK> -/4>$N2tfžP*s6)%>}ov&UHy*I/ w>ebWqKRHZ`U_TD%|*|Z1FRкdɧ(UFCPv?>VkD_XVht5NIqFq\->'7nA@|,E7!I:2Aݭ\\ʣ܍ɧ%X|%4d*=c0|:G䋚eS",>1)itTښ:ď8Zc,)jc=nKϩӍ|xV9Rӫ,iJ5Ir| .$i{s$xxORNɒOCL:ŧ+ςԚ 87 H ֲj8¿rz;D SI7cbCJ%D~4Z:HZ5:$Jk(M| j~H>NM>A\&5hnJ?ɕvmwr9 Nđx ChaK˯L_A#䕒V k_0TZ'`A]`LFH>?>Vu@|ݪ7<г@鈀>FOъD?7Ġ8&)>Vf"(~;wiZ;/[7,Vl^tm+BE֩1˹bWc7\W :W,?%Dl}PpOJa_Z'eҮG-:v(2ggB׎TM` ŷ lvCthT\:o8.G>F7A6QPnyM?p(7/<1Gq0~X* k~8[ 4+4D4owO_љ³] UO[I>վFE1O HHrDnJ-(?ZjLZoxX+4R+{=.6v;Mt6CuմBRKgA[%h1ng5,30?\߉sgC޾mï'$:(emnH@!$_ \MeCF2'x^I`QZg>.}hXiIش렠a6 /;G{z {vd?˝7aB[_v^/+Qu أn1>]5 ,wDB4C">w|ՕO#4zBz)"yx7#1&§ޭz%FO1O HPRT rVϲyXU*^n&GNdD-p䒗4w ΜɧQrv)WNiBpXXy8t<[,YyW A "Ƞѯ|#ˍhYwمgNjB#Fϳ#F{t "_n:I1cbgR}jjɳ gwi})`L嗥%kwi+yHF34(kH Sȍ>HUO[v|QZ?Y3Y%HAp7}\< 67cBhZ\ ~!{?%s8b_YDʽW ~eGDvNl.DQXq1UvA/˄ю",d)بW2Ss#Lz#Nk8Q%!&E&$׽ϏRsX&6=qRٴ5W>Eӹ?-p[/;WL>u ܂RQ?"3ҺGxLO[9ɧKhV 4DOKV/(z [~@%S@[GP@Rҗ I Ɖ\]<^Cu,`x[EDe47UdFge9ZYnDs@u]KuHKu>Nk%M'x{|pͼ!M35չ\'YQtyg?/.U/MBϹ6ɚ428Mk`1n|5X蘾w+*(hZrF!iTX^z0(Fļ_761/rQ4Հ41/~۸(ia˶'!_]^ $C'o9@WNRĻRꦀp8I`0.M8L&IEt$ϒ_oߧE|k1{)=;Bf|*X{Yr`0|`ldJi?^ne%jYS#(剷UGBPf#6<$v?C7.@Mdnovb-[ϰ=ϲ Wʹ"+;,$uvN⅕hn(07mP\DxbmT(Өˡ]%k҆gy~i5_WFN.mkg鞍4{:,m-wOL*cޫSkѺY2p.ͧ8imr&ru糖huFug9zFвjF ѩBO{m|?^n["jA:=j;f,5n#ez+j^癚{)#/.]KB3_&7]:@-f'6\{L3-5P\c)oꭂ_JtZG6DZC.O&z>UPQњv_7Xsmʎa (Ցp+ H|DFz(#j*(ˠW))wkW : }Z;.lcVzZd./s\i;Ge ѯX)]S.}YsGS g:לq2/<4uXkC# 4ywkExޗ|Fո%KUUN+Jҵjh6I-C#P|Q(QVHl{*f\QG֑,`dq_l_%L-|Ѫ^#H$D@" HAjEJ" H$D DF?_٢azL^H$D@" H".&*Bvh8uIA9d^D@" R㊒%Ij9jkH!M n.dxCpm7oe\d|\kJOHteSieSj_ v{!HhѸ򂳴gmn[U :#9:kďģ&B|=g$ü[C5_g`{kOC@qXV$Vu n?>=|l&} T 8&&?65}X_8>uC]T[AL,^]6r7JѸA]՝I^TKeu7f#Pj ʻ5u$ѽ}Kк-?qt[_3;|8x/w P܆鹅h a,մM1H׻ܷpذ#S'ͭS/'3"1h?bv ..nk ho˱us|e. !H>.ޕ]ʦ@hәWevj,ZM:\ѳSߧ7gqtB˽ =w嗿}:q0SYf_ϻo!nǻ<:2|Gݥx/>za;G{_iX/QtlD$cڻ@\ܯp\fc0DVMĿ!sŌy+ĕu]ۉ\F|2_j5 F\v u&EKEZ;=Dێ_i'Ol ݊i/Y$FߪV(2:u|n_|ګSk]l)P}OO}|}œ fNFO6 28 [ 'O nAٝh|9Z x tWb~f# AYu8v;yбuS>u L.1kկ Y%'%`J.%m{oOP|U狄X_P|wm`Hnq$ѫckmpش|C3fŜujt!v30GguI \|֠S _hW].tT]9F_}9.qÐUnwp]Ck3F_C}jFz|KURyxǸh)rGP@}Z'!.)dܰ3S{_o`'v~ e"Сu_)`ӵ- GVbݾ 96ѵh_iU}O3SG(ӋY's(*q=?:C?/Z'$Ɖa( +Z7/,3z}šdro.~s (3luPc~΂@|,f"Fa5̲e9 G"Azh30LSY7H‚ʪWumh:R㫯g§_⨨~WՕOgwkZL3L!hXsȗi\kEP6BƤ(P ʧ@kMYK\q>FZPִf9GN(4]hdp{4o!-_: !‡^6#>'5O#=mubMZߖmpwzܺNb˞C"挏6L_ @밧'P06)ȷ ^_8Ui4psׅuϖMkaf\G.gdzsĺӚFc;=CP{:u96jƸ([8;'f >g-^[*wg ӎz؊|8oD45hOlс8{v-DٞXUV&7n6o_,YCeg,j4=&onT먶Һii_}kFp| JS.B{'R{m\?7B3Dúu:$L Scp6h OiGsT*|>FT>@IDATBLfcݟzI;G{C>ŧ\ͬa`NXI[ 6z rj!4ezCݹ Y.6&dqE}:v-?%޲W]=^?aOQz8snv1f[3?X~9E0ۦe[xؼ⮺lh,p-|,ѼcfOv>Dr:K xG~/k+FnP$W/JQՙO#8#2i^|zQ^D[s^NAO{vl%?7|Qm9)AFvA ~}+횙WbR>T=kT|* k iU}VħmiN:me~;XHʬRxF[767lsP[D.e}&F{jZim> *h:Y-).<oF'!=JA'jO,ޣ947).,4*F>$dJݥ (W]ާ.&*V - HAf_dl~]IlIQ J詢LRdpO!YI*'@h+A3mi&n.+ ė8ZtzYX3n`R/OrF/<7?^I|ZO;k0Si Fx|& #9&S3I!.$-n׼۵wvuK%-BRZZ0C$e6I{ 1#Y3es{jghU}ѭ\q#~/y-+h#4+~j̃ktZvQ_}6=#m<$pӼ&M]mE_ˍVNn%E:ɧaK^M!|8qpU 筫߸F|%;@6jY7h~pqg,5s1&ė8o&7uYG5xmۺ3/)Mhii]f3US``#z. k(M.G2&OIHyvmޘi/,ȬӰYv:pC=зM{YCv dX'((,gŠC9ذnH߸;'CUt.KSΙ)VY3h 閃ŐOYu(E L^fr|uYڅ2FG˷\L91F":1T(G3d|ei"ͪCC FHa,t̤I;DqC84=-W4]pիv;䐄wjR|\/QZժ7]q:vmg;}`PAq4)D0](:\ǃB> uAY(#*"A/pX ԴU6uUI`d*Chxu>SJ hI:a.[y[ԫy3ָ{]ݶvǏ:{@2.yx]S ]HMVLM2{'Ao ǥwuaͧ,TҺbh uUBҬb YyT ;\#k9&i·uB]^ AyZ] 94%J[)WUf@0DPnimVc}͑3l(1ߞ Ze]Xk=oM>+Om2qX?V]?\H ְIpُ {ZXZXç,H:08æu`J, __tZY:>[,u]5}{㧂^m:vngۖZ|\#YjUY~"(l1ե {/e˺hVA1t 6ÃמBrmSBAA4˨I?PL?jɥPoRLR_]1w*SV"iiHCH1,EGQe #p-76lR?繃R9uß>+2:,؎EUimlN)i,N_ ( K/aˊpgs=aLtM(=j(R{v&bS!!>B]湲oCfs {fdv>eIk=DkE+iΐiV inpWhL#PgH탸a3GjMJTɆLkS ̾5]$I?JQT4d$އGu.M OK?=i$*YUD~в?1Z:f iɔVJ\bڌ!-o?y ˴h E_k6J= @N9b~;c\YYk{囶GU!T+AِB$D@" LUubT/ R'^R]_Biiye|SRW0JPM D@" T2S' Q0z0o ]S}eifLU珱Mj8f Yxpm?t,VRG@U6cYY*r2D@" H D8) 1J)fͲ1?VpVk,>i;UeC4N(i$nkйPz! DM SXs#nο?3< ~KA22^" H$QDOQlߪ M6Y~Smz0:uU0Ø,#ڴI%ھ}1.HZ̮争jr.{2 a5t++;G;&I:Oe9 8W;Yodq H$D RB]v ^)i.j3<^Z\.]; ۫ܶ Pm`vbeAxͶwAb}*_JQXY0O[ɬD@" @@Q7w`LAgbK]f?}hE?u:(DLlL %rϵD3Z)$xC7Jky߳{2^պF*_RP;S" H$%{A*=|$c49Ӈ(}-&6N־x枫D&dtw_i޾lLqC+Z),+E" ,P$D@" j} lEh ۥofIi(5El k'b{=4@Z)g"uOSPQ%D@"5"m)3Xps_P x }λ @R޿ 2K!qrr&mEķ{jÕgZ:E" H$F #m<"]O˟ק:gf5kSNݤ;yis ;y+v[;,XS?hZƜ5xpQڙL ʵD@" <G%x\󒂛M}Ѡ!Έw #ŽG>rGau_eyֽq1+D; eg)(CMfH$DbC[PVM q dR,]!ZĸRĨ:Jh 8k $ v_D@" H${vX5{Ӱ,~'=uo1"TQ}rbi` 8Tn)nVwĶ}EDt\&f_#N[/("{tQW?IoD\;z.]ڭĬĭ6KsVQ,y-kwQe۳CIB8♛Eg;glR\kI/;.H$U ѩJm -7|33e^*S BMGd{V7v8s-įK׋0 PjZݡSgwo">!ܢ {Ѡn8k[wMeW^wlTظ DEr:ycޝݵ_zkѾEcm; x\NniMA9l\VsFz(#)5F(ːH$@( j{lU暇x#)еY6zתl+CM3*YFh)4OzXz8uJ9&+ (3 OF:x Bov<_@ uD|l A9@.yEת5!kdg%D@"Pxǖ 266u fiK7M`>oipª-{MفaۤA]q,봻'K ևF'o+9MKۿ{{9v-q^#vҫ f>'3 E%Bv#?đxf? ٥"$D@"  $)UfZ5B c]8aOvvmBfvwpWѻskq_n!]޶.^@cfw.[G)a >խ7bҍ ם4o|&/eBry5k+e%D@"%,f wUfNd o.\)8{7־T.@م^>]ˉ\PʫDN5//},ycA^:.j\_x) z (FkQD^.Xp.}Q@CUo O+z޼Gئ%?hngvf;7k_'qgB~>3xCC RP9O" H$X6w ZX,<1_uae϶-6iU[-Gg+qVѠ^"L(0]my}:7"' N($ϝnrAB0!lt0ɴD@" ^`*E&5>wHYj鳍Ze&5K޹e_vpUudy3-36\k%fH*ဟdUEas5oӮclBB:,鐥v,,,8]{྽/[:?7 ( \v'8٘ʢ$D@"6S' a!S *\{#ﰉPv#{fddPÑ# G=uqAegš Ҕ_j C eBy ʺɅm25ʵ*'`z1*Kr8ɼD@" 0Uyr#hۛz` HN7ɠ 9 n \KAhq16vin3O.P]Bu$Y" H$ SWx"odj[l0&M>6- Qej=ڠY&. < #FaZe&vE " `%D@"  DK9a |;]_ }iv!M/JeO DX?!5L#dN py%D@"j|eA5>N¡(־S'<?YA1*xO2 _Ѕe ƺp ȵ8FpHeD@" T )Ƃ;a| 4Uudl@M5C-p\53Ob_ ]LTGG  ӧO7vY׽R`m{iј.~8j[Bk6=*7.DnVyKU#OZ\F <1O^qi$֊4<q&|Uq8u0~m4G>t z>":*k-c<܋pݨ@<52_L%>~}+lbf4ۃtSiarK!4&GϕI|>/.Z5(c(~xHt+isɽ /+㻑dLK4'<[xCН&X3_D5N c(H:v;>#ǧ!-/d<,(`yMnU/1|.pj,ʰ=Ǡvb$_y%>xέF@`(vU1VF{վBo>jcݱcOD2@C.x!.W7lMjt8`j!8 {yrnsfl*Q[vlKT!sNS'*JlQc##(Oظ W`rpħDV]v!K+EQ6vy2H$U% ]Ѥ*ҴٌQ)iTN=nѨz_[6_#ld>!f3h^-S>DgPIk@/+2*ӓ''eO2hFOVf&PqgwMʿ,11{V+1q8:DWs ;Ti/بY²Q~QDv:!ii<췧g?ֶ:$*,^|S-,_ !9]UL:ak%֫ ɨߺaY؜f$K!9xpRAZ4J&e:lQ }AU6kp;n4fsϧnd~D 2lY2 @ms @lӗojIJR9aT2SiGy$[>\Иp_IMrtLs}`bx^ 0eD (oyt3i4"2* @t.x+ʻcaңZn"%5nxԮrǽv֏OH a b^:~tď8ͦtSr.ʨUH#uÐ^4R|3M^$hZy.[#mBWHJ#@ \[:V  n6:u/]en]_F糌vngI\ H<-fEhqr F~&Yo9Y$, b4) »t.cJ"!)sK>H(^p;I I޴+Ss]ӹޝDՊ!M#`m1}RrnMMĐ~Űs{h3Z6.rJtIgXth[UaZ9k6 Bt Msnf\??-\#Nm6nX$jeg^~A+AA&iVݧ l22hϪ5ˎJ"f] UreaZY> H7˔Ax\nbaZOjz{uj%cYPB8ѬS'&Vlkw_=H\=lկ#'5ax!X=,ߴKB>Ec-jLQh:фOZ$5o(mn#K{EfPx"4o̧\ .mje8X!2D@"P*@ˁͰso02H$a" G-aV(Spӊ#n/8NM;|U8!Ndhc]5XDtYm&T7m$x4kTO?gՂ,5+6” (@yc^:e_.ĉ_9n\4gnp @tMAHLTrYČ{716X6{wam\o X%X׷ldz9rTp!4<<ӳnGwL,+,QS|3lԳ5Gi$Mr5OQ;s"P nd~컧2htaYW-}m~ _{}[/uEWK֋@^~OOpv6b [m .ڋEuᏸ9 { ̷tvѧ[;]6]#ba]Q]1)b_ж>Xxe!Bxx"pİ/"4WZ<_32J6WBH^ dtvpp| bàuSu, kLPz$xٓRd0~iR'ON܄t8m?kpM;܋ l}_{]%jRY5LA&dԎBzn[4~|?)7­m|9WB` c1G~9A[TWNDͦ~rr왻wY~JPÊE8g犕{ڔơDf kNx??$"W˞%]sƆۻpb3o|.~3shbpnߖqo} |q<~N>LsXMWDW^9.:X,fSMii2=l-Kk6zcg0{fٷN /cDV/Z^OI_7^Y;o={2y.#6 ,x Y׫J-c: cnD3+(׉R uh$ĊwU*59/ڤvsW7xK:|.Ύma\!Ku9s(gP^ :k 0v1ZC8",x6hF'}34qvco %1aӞytī$Iubyjm.xaz4Qf<`4QqZs^UJh益WE8jO,Ѕd/p!inؗ C| -,gf4O~"h$t sYG(LʍKוup[DWW|&I/#E% @nQEuFT'0I7 [=o )icoӳeҺsߞߊ~|c*9"`>Ԑj_TLch|DU,sU{ɈFE| 0i yvA1)>CQxNCSq;v&Θ_o[Z}?mṺͳny{t|)[irH M:՞OƇ aZFb^Z0 LhМhTU,ܞ}X9|`1_UG^?X EXxʣpKA5dy?MK($Dz ^8w/Z_ Ļ&<}!K`n_ɴxP\$!${f2 A|kizwaPK==j04=*z^OO{fTvxL.c90Y@J@Mo#{ Up}.kڎL8zg54pr7'{u8e/ Z?Y&=iψ#:ݧ+rGpw`xJ,SVv{Uu=_x$jQx4=]Ո#GqE> w%FӆmQLS$[hF\w?LTcŃ2['`L᠟.K,l>|ϊ ɛ-Mu}S-\C`4/_Jx44>/=:~D[Y̆0 z^GE C?WW>u`r:a>ƇG  u4h980LSQ@ UuQzQ2ڻd*i-Xd˖|a>u߻1@y&pmkWg,Tp%CE A lAI;UG[}%Zh;A᭬jƭ" B{&Ѓj9~ -{Fx2@y@FZԮb:vEu G 6Qi BQfZ-'E|6{y|q+pG3RR 't.4ߌ66d3./WbK7cecaWŋE"o_4, dv10M`CKk[b"|_5>3`LruR?5-'=Դq3G̓10uB{ϧ"E!6n~ͳRo~nA{= Ӑj冠%$dViO.A`Ygkǡ׿9η}*9BKf:h-Ӿ`4:S20/d6˻rB|̆ŌB$ױm XxYvni k"}isn|*IƘ匫[ 5AZ0|6LgarAZRQ #O^NMSxFj(ffFﯯQ[-4ۋ&\SKvkƉ\>r{ e7ܺiï{ہK2''%bXkvfdUo?4cF㔑uxWK A"UA:L Y0tk쳝 I }g`[*ilfN-m=K]+ju_b* '<~oZ[t+.m+vqn! ު8}`1^gE0-7;9|"e=z2je-Ѧ[nFi^hV[8JRM=\zK/M}鬂 Xl? lYT !>\jr~2rט  g[-3l= a1څDP6/uPPuz$OG-ncw:Irci(ƣ~uӌQ /> b *-rxD]Nآ(␟1P\(?fMI2!ezEGj2kQLc"s}Gz}5gQIpG!قC{r%ڧomڵKH IY1#] Lc=t~nɃ{wo۰|z0%-;\N\-(~*76s{ݜ(pp)ѶR1{)@~h!lHq|Ӟ{By >D>O.(=P)jh{*\=;>UCUu[R_&Kg)`h_< KhCX|  [0W|rQbQv:Ch~Br I/NmF8a9 ltL0e҂>r=.suS:U#^g&4ҏ3P@@)Qke_=~/.??pՂykpC<]e cyItA9 T e˴fDĠ5Zг흿VewNh>ƿ Xi6R7ָ<꾧k|tǕ` J62?5WJ1 ؃zy : xt 90\bjѪRa"ўOIU ťGK:1ӊM¥M8$HԸL-p3@$o?uؖm~G=y⟠[:_%<e'RPtSKmU|#})[gestuϧ.;ե4b O$$EEjwM^T>MYZMZc!ċaB0 KaBs_7x6+|fRՕO,1dL i?1!5<*8L6U^5(T^=)1TzXq4iqYk!PÍB2tAB)ē_Y+_5WL&(#:)UNvph8X]G^+й( =ϳSuݣ怇eu_ZVew;eM4TGyOfѶ4i Hl**a0l rTRF@8ۤ@AZ Uf`7A@IDAT|m00"˴~C #)~֡֯u2?ׂ>Bii!P^}=wIt65ЦdCSHWmdph T )1\< ݶᅌ&/ƓFb揟bZ]m.~~?sO`\,,~9p 7yM?^'X\N>ue§{^^t|[g\UQԐk4 TJQABM2^sxާVy0_5/ <󠖘B05{Q*5u!Yϋ^{|x5^Sc#4AYbz4>vKd)$){lq vGAnxQ.X,UAfc~j?aV ʏ6 E!c³kba%bmk*6))JhOnh!cPt_f?MP \[чB2sřUKxk4 гBaoÒh}\ibDLnƐ AtT_JR*=nX|W~3v&(]H@N</ieiuס> J[xT ՕO#l/ xN_v+Jzt%"pL*`pħ?^5c<=}6*7TvT?aMISL1xε!9wM?8H\qQVL\kEoCv(!a\@J96VqC<9?؅ޗJ3W;+MMLΣ1cS^: l3ZMIɄսo61yrfUp9qβ1ƣƧoUS̤unl~T\GBP.Şܵ;Az6?j+/+445aP Oɋ=GZn$]h>WOL G׹?޴=E`jz+̈_FLDQGZ[co(8 3F]$>fz*q1j/OxQĺu%א{T$Ⱥ&]*PvNpu2''[-|b172|jTFc;X(^]||hd0+3}Ahfh<몃^s5[&4| ~RL^ڗ0"a4Ư ش=nj'm|EvIQ>1_@5Jau :̳.0 -*=2Gpp85zB@A\Z eqfh`ЌYpvt,ɋ#I!9P+;)wЬAqDNPk1nq8Kx H13YLW,2W ((zf:ӥ~@&묉oZ(ELtdžuwл~Wʘ<ה|w>,&.6WyI1֠O@OtvsҲv#1M-nhkܽQ&J"\GO>'^<<Oha.x?[H>-I1qvxq\t.>.LbgwlcwZWu(^hoӝCr@8.5!AjMyhNzN,Rx Ն͛-4횜LkOآ++pL "a@ZUVH zZMm㶄%m遣=LN,Ȋ9OçKU-646II=cO.D@"PQ>+' 9 >A.z+j XՊ~zJ= (("MIvw3w3Iv7{Nvٙwgy}hwujwx_`|x&>q %Z eo3~EY?rK (* Ͷ?/݂RNiZMfD|iKa$?i oj;Ep jζZjodk_F4͜ '>$c%v`) 8%MW6kUL*.% To4٪<]E/ck]1rw1>&+a; orG7nڵ_V&P`k[k_p&Nh.F@'(Ϛ_$,&c&&!I=;]ЫS$z)":Y+`ӰyoHd4Z˧¼oRaYWſnw8Z1oǺ E՗cN.v\O~*umjSzW˝NW[F T0jT?Mkq˂Kxw MdHMz+{Zg'nܳkpńRk]F.پv[jEYW3l¼vl~Bb kC$r;Iuw }Sl*_ JyrJFNhmg2@#D  A<>y7<ɦ-!lk2{'/O>{:P$?g|\]tqâw(` Z+ʵ> --遛Gb,A 5p"7{0,ѻ\l?e,uO?雲[7ޢh2Y'oUr@0@h(SO%5U[H~ DpH["9TǼI";֪y OrBk\ D42.1<T0oa!ƍړ&'jz1 nSb v}K ]cdyso;jϊE`hşosbAKr-sM  O8/#o^,Ud5&jNlv?͒@>aG#cR_2T8_L$Z'e\yB5>O6]WyF ({h; š7 D}M!?YnqGU62T>üAN ͜W.D.s텳@u`ȜdM50 =c|`YH` < &$iZev4dꙢVu> τ xdp4oT**)1pKPuˤ⬸TR?-zvA'ɩ\q3<7rZn8>Y}­`eLly 7ĴN0.4}9/VÐᎉҟ{|Op4ƘW6pu!Ƣdnv'eVk[@ r1NƶTRWno7g~}3d%Ջ*;,[: %eܺE s>_-I>iM`+ʾ]'2o((x>^|h_9$ì+ +uRocS^T :q**m]Э ՗ m’敿!3׺F#sì}2F xXhՠbi4=lX4S/P9.)+.m[~+TaW|Gz dKşhȤz| EHQOCú7Am!!^$g[cSjG0Oa~#kDjĵ3D+l79~~c+h޴P=>W<5 u>y[eOgjPp>a+|6mY4p|լ\0#h_5JaRbWq aOg.\jHok[O 4L;i[5#%":Ac~Qnw;!=4Lq{v8~`kQ 8㜫a6dTv0nHAney$y2I2649,\7HS 9~B>6$sYdVT4wo $Anma0DVfwov/OyXmWM΅3&KUmk)!+ʒ$Wv mf3;j[/Wfѡul?ĕ8QӴl.Yt8$8M6-uӱ3]d*P1qq>~|=t  zQxܩӇZqP"EN!Փ ēvDrJ. -/\~ڛcv]cڌ귓KdM EVD7Կ՟|TX:HIJ!cxٟS+Y]~CyU9{2zf?3%BYKɾbt _W% JDhQfWk`oz>0zFK4|X++fskB>.3:_ 9v)#bQ_\3IN O|??NLҵ6.QG3rD>NAQi9>@'Aώ]'im|Y$>wfbF@'h( +B:t}3e|C`go}\tuB %ފo]^ !ӻK$L?ɻ\M$onX XM~/6I7>6F$B.iv{4?&j L}p;ͤ5iBՅ*r!4RHv_#KI<}(TrD2Z*fJ3{&ddo>/Gl,eZt:}2kE†,)4t5u#eљ @פ  Uv^'4MSڒu2-L}NK' mZDX/FΒu8-̕mNŖv6~2uqȩ/-֢L$*;]<D' ݥ,CuS9 9XP"+Ѥ0Hs?Fs8dBr)# 5eO*Ou6/Kߡaʴ`ʿ4wHd9DV)e7~;ejl?4Op }+0Lw)bNkZ6k 8ϿTv;-!\sKZoh/z:~x$dAfT K [\.֠i337t|0(Z E$tSA # B)܄ Ԅ䰡UIZre^FNiY ϝ\n9k??pbF@(FI}>+:1!@ouC:qvF "`E9"iN5f&fc~ /\;#BXQ!\#.3W \/#)h0)p?HC hE-UvIp"3#)F`F@mVR6%\#0#0N%T*8 lF@ǝ:{ .dF`0 FSe%29E6]52zaI}/S irsmrO r|v!8uέku-c R1B.s y c6CWֲuuz*t0oݟj>ZjNx7N"O_ t/ÝGK,/"TN€33@!/V*fVUр^,",5ulFz[V^~/^AkepH%Z.9Jk o{ QQ&jO6EnmaђuhYAUߡ+Q؂_NG9\xlSB~!4E_tKˬ<Lٷµ?T\s|%l#UN_u]r#5Z[vTZ+.EQNNJJD=>jY<*@?: PPT*mZ;U㨲_o^-.,.}Ӛ& ,JdopOGtd/LvC82VYQQ@Trýb3Tpslʬe" ;Z\*)>qla_ɑ*Zg0@8JQ\(4Pfb=)=%Qf7WOgCvzSO*r%.&Ì;j׏w(ZaANS*\r+:̇u|Fz_0ۥF@mQ&@&wְHꞨ 8$O Fl^,2胰 aٺ]pE٩/kp$<\(V>WyT~xSvdܠ$>g"޴7-a4u-c1Ͼcg_+*yj=)|[F˲kZ6"h*h\u]J®ç]sp#΋z\Ho&Ê{@~b0%;yZАkgJ #MN0Of} L*aRtI(ټn/ܼ7G oN[{>Oa>ΛLO׿8mY)ՒV`fs2|'\[ Bذ(<(d-Dfe%؏^wnRy(ۄ#VN hmAS\Ӈyu5d6O^P84O[و笄]Ѣ] GҘkuUIr* 5#4v6sJ'>]/IUN .] ugo{k䩾p_XRYyģ'Q4лmE@ޙӯ~JUƃH-nGz8ܼau?vLӣ6nq6k*:ߎuu%רӾE5iDgC֘Zat5 ci8jt.`4B h2TVP,uK/wlAކ+Q dj+ȍ[~nNw}e o5? 7ojv.e '95 8<&rFo©$uעH7ki/7`|C xE~=dդaRidU!ab+n߁[^|{GWjaZ?cot`Id'p˟ax>\ oڷRk<2a6N#4ZzA2@^Ku?nOo@pZ"q l8}[k-^ '\ _Y-%KNMȗ_%oGўhmN?l\PXTRsÚ^RĆ>" l{qjjwdϮɔߴY{r:筸h70k1.4_2UV|Kn 8uur_3u˖,)--`Tp6t`ɓ}74}7|tUyy))dE&Bɟї5s JGG\ .qC+y7# QpXp%T1K^}۵ҭ[|bRZlB|d)*6ke$R9s!,Jk$)[JkS4_>dw3>IzL|\SUTN|ġq K?=$ܰ͟(MsjsC޲4k5jaQocPâi@VO_(b/~ٷ}U7]v& Q"\h6Y#IQ&K2:FR~-s*;q9遈n̟SIem MV 㫓TŠ=Ζ/#e(-_9"Hd=B$p!HHR")‹0:Nyō75I̟oʟqEf%0uvu˜aַr.FPejr0Ll5Y$VI(5DxPR",^, IpF6WEirKaIQu5`+ƒĵIM0A hEGKu`NVIRZtJr)uorLkRłaI̟g 01{5s&+g[{F@w(a )P(ƴѤ$ \_`EzIM_mFT&36׮"kƵ˜OϜo1e] x7h"ERr,ָ+aBI e : \kubUÜՇRbu0[`TE #4AL̟<;<.s&ס)9Sg>̪C.`zQE{x0 )6q@$#HFVC9Wf?ZFF`Fq#̖} pF`F` ̖*7.F`F`F@ЊrG; 7F=#0Q+QJ޴% $ѻ oqVYݿߊ&Έ;_x\ Vr5XZT5W?9e`,c%v`) 8%x` ù֪*GAQT\f5LgϞwVe[W-zDJWbd!ެWnk.E„N\בF5u8=aVt׌#2du*M5wK1cSR吤̞ZK`) bʬiؼ7Gs$t|/Bl2>>길G+6uy3 g .?/-ǜ6\4ȟ2UcR-Ɵ1@T0jl,?R T$/j81IW6H9n/!ܳkpńRk]F.پvYC!s~o6[Ha\uy;q6?w\1hu׆!WP(So:ĉp*_ Jyrf`E hEd?丟?}1#-9>DMޝyl~ľWDeǟ^-Ao?M& ̛o*yPP:{?ĝGxf}V`UP`J2~bΟ52g9;#@Њr 5xI)_!e끛GLsCn. ],9h薝`nʖ+wf\c{y?%oyE壳;u}`XP7`a}k5+WcP Sy| @:]ɝ++*E05U*)bfjr=׏nYR|BoK57?Rd'Z5OI9\""h()7̵:Z\g" Wph0J_rkwLbhmV:+pr}շjšC1o_ ۝ 5Or%B$ZE$Ys\E$!Z I<͟UYe[x`@7i/*,Ħi, [%6:/G0o_(7,\h_F{\F+!נXr.^FA+,U4 krk~6f4h "%IjqG(162T>üs8q44sʃ( `|̫12g~x|_`|)`t@Њ2TQ< &B$*[+=VǓ 戶G3!)i֤{?*?Լi\xK<`KȢLQ? ;u|z E +d1 ʵ?P hKm 4_#3#Ы_-Yq-.TMW OWaaq#J.\bA%&1bQ a^AQ7ߕ72zvRfݢjdt=(]K&_!=;@ZN8#4j/Y]jZFˌCHVrM{i2|y3C.M1(Wy-\HNp| A FP 2XR ]5~Z"$H tގzCrx0[c!Ku/zhN_~ZR9QvK.zQk%jvyj)ʨNR7Yv7 qrJ:~s B;DPկչ=uS#|*w`!@0tP..\.iɉc;\qvMަfSmc }C_=+KPYO>XCuwjDkТL)(Cl;!:P9Maْ,lJyQ+$:2M(5keAn ~ZY(;&Pb! fHF hrt6i&Zr,%I1 (h/[ڴLwiȆ 0u_I eK}LѡƅC/m-IZ\#lAqEФ2̛}١:X9mOJUNmNŖV\Ye٤H:݋L,'F VC "SMc-SiV鷌h$͟yu_N(Zдs05`[#0C x+Yh"F 8(<W 0#0"E9#P9<.T>F@ìn^;cm``EY;ldF 0(<.'F-0[ja YuH@F@ˆr#@D(pb!@YfQ}n"h,!QUrڨ2QrX,Uv`%eЪy*\7~fcgh[Qdwt??Rn${k"to >_?.$Pvn?_iоUsj{iAku`9UP ic`0".K'a㞣mci8|܃{ |j++^ :ъ|j]Go6Q} 4֊J8v&dS/˩zX6XN SNFe_*"qTzchj"Q:{_Jʬ0OYpפa@!?CjR@pՔB]@q`1dkddާ]~~9#1:*8wt=d5Lu-?#0W9#>=}Vc@IDAT.2qI`E,SQ%1~0w6pbZ4:ɀ +}FU>p~ZgxIT)Za7'Z7d &ү[;xIЧk6|b3Y+_:b܆z+yԛGE_8 \ Y7 2O`;VXRE%нux""h(2i(]Q#0Ar~,iNmZgoLIJTT-!Y 9u.2}A@V{&_ q_ϋdR|-cQN<5 zwj#++yQC-cr|iד;&r᩿ASES6ʐjɵe}=D@YNF;A^+ՂyWznZh@7x~okw·@88gHԦ=Cn KCVùlN8C>ޣC&e?΢``bJw) DL*j:fe%GTzxfxo{ c16RYy<:@l@L-V9 Z^"Z1@hZQVX)ʲZqTuE"&4(.0>dtY󗊡'/^ Ӳr/btxYeeߟC'T{řR\L ZJv-( 6U ZhsAhݎ-oIOIg}]#*+*  GXkQNCk3T/@93# ]/RUZEP$kYi^qyA[?v.žj{ ,E1;\c43r:_@ytc(Gܞ7d:ٯ\ùCV4IhiXjډp$<**`JժJ.W`hFo?~ړ_L3y,~xv!MqN4}\8Ŷ< ;uA!j\Gu+e^5vN9p|u#(R'ܙվ!^^)K8C7v}En zwIW:g8E|^G|2͆ЧKIњ8d-] FZ5s6Yv:%.rL3^?twD3_\ͪ p>$x( U6~ݲ+hqYM} ncyy6-a@ve.E! [+UN}ZpG{wfF6k*BSCkLdŅpiKƯzmc|ubyTz QٗX`ř/50P &.kDy$=yuִ TTVkBu<"ΰ6W6^wç.T%\hnZ=3k{un6o6ҟAOSy]O9S\ύL[U &r]w k:6rIN_͜?\# P֑ŃRlTC5>9) 8{}pL4SE]õRI8e@+<8]ڦ6G8^<{<%)(Kт7OkyhQe H,\yjEЬE3d\{HSrbb P7Z@e2@h(Ss,uK/wl-A5Ƅ5;WcrZp#pCʲջzΛ[<^7 ^)(u4D\8"f|wp.k{U1@8B Oo] E1dƥi5Z⪪*rRFyqUjz`cI4ܐ^*`lN 6D δq`湇vo߃xbŅ.y\YƄ;ojo9Wq*Q<\e qqԽx Pަ)buܺ8#n| O2XF0#L r%ZCIAܩ)͚IVClw ,εrR @ķ?f8}[їxjELJVT?tزMfϼv}al${/7awJ= kz?P3wUrb~>0"y4kAʲ;/7%宻Z5O1Gq#@ >Y K]0'Y#IH*_ _Z޽3'#+K*z3oxIѽ\)BI߀sf5+4PZ k`*g|eE9F"L\(TeL]69wEaHk45['uہ4m¢}/"6BiTB -ٕc2r6qOD V\ywnuxX( G J2E8jl-8V/s-a>T-K-0#Ъ>V;0<30Z @Vczi82S4..zI||bҰӮ[Q&9gc8E˶a(\4E£ 4On+:$_dIiiEle\h:1HRjN<򗘘lɓt2yC'q7?oo*//%"BVWp:[׵\#=r=:T.k_F+A+d-7[(Y8Mp )kgbҫo]uC9-6!>l2d2m+jD];iHsrkayiI>s`CX!!+p d&kvʩA%sמMc"ogT%䭢uK'8phqCZ?ޙ<̣,Ӻqpm0ġX[ K1NH8[A.&5K:Aqk:_}m:c0"C2̪n;Efb 'Y>R.o-%}mBIh4 p߰ SGSU֊?xY~/'…R|fOؔBHR݂֤1,X+ >'N\qz wXzÕ̟?^uuZFˌnΦmCq I嵃u?M4JzA]Kz̗6sFA+8Pju)œ+wRpeK(fKDAD,11Ũ0p3[ ͘ظlxw@E~F-V k$)d E &Fß&PU SxFԊ(3o}rsK~(*'gN?;u~Tkk}BvOO`G xEẕVfRe"J EY)1qqyfeKN.UV}$%V߾{ϟ=uRVJ%@NJҢLB P(XgGl?gsC *x|KbOr[t1cwL6LpBOTftgazUr"F_B(SNkGI$eLV)XsNUpWvƼ3F]w/~?șlLѢT) ϥ5 <7_#и:.11&V[wb;ws1J*I]to?~mXWku{fYfOdL;Ճ@RB(ɡWO-4w?N*3nͷ_WQԅ/.>ko_9#\1H9U7fn m˺KQ9fԵπ>XG賯Yʵ9\Yxf5ewRQ aRbLkr%&3MiQբ?fN27m__zћ鿊`NV$ڤ; [8oKcbcr F@O4dю˞u]۟?SoYMW3ZguəP9 ;| # 5e,U~E2-t XqWhSlLQ0afs[\SCfKp[[Vm~|ofxվ &Ġ_+11gnD9m)Ɂ1 ȏ>ԩkEpoUoMoNzZW ~靳ŧ17DUzi<|m2-/a6 1 r:Ŧ&~4gf;b@Wbm]2X ;p:xn._ &I>n4ߨ8/u9ox:֘8G&9/#h@Њm y1R G+-3k|A-٩J3oJ+o0j -"`G:*a3>WRO6ӧo1#0 "ZC?͜E%rlWJOô#Y6ǜRJW{T購h7>wOfM4W@%9BQBNfyX|>TotBqΞ?(<7Z%IQm 5r7t˩Gq3>|`Qpբ,\29h\w'W/%%Ó^gOg0C8!^&q K#KT_rx\}~_^o0)eG_zz9}U6V,u0[D*Q#y3OEYsۻˇ;r|9'..!ils"dez8wCztb5zM !r]*}1Ƣ,zG\\r}cl|ԅ,5Xl D:ѫx`0#p8Rl+d+3wï߂AMfdJ~lnSbB3|l-x*6y?0l +m<]^8ȥ7;MC=S7`%~r<@D q>5b)ʄvKb?A{w:?/_"h+:l J?e}%],"3@cB@ E$,UDA\2TVJV*CNe4&}圆 u  >}᲼G8"]n|N8UY-#Khخ^G#XN\`?ZQDKUl,kZ#=\+݀K\fI)Y9K -#4t_*.?n2+N3-۹*++'))):K8Lpd4 vzPNNc˩:x0@(^~o[!$?gա*fh ZSmړgARtL<E%0{7h01HY&wrx\ :n÷Έ|Ơ*'Vr*x`? '7G}ZOv8?lu.F&9#i*VO;M,j5ɽm3AFj_X]Nχda |qtl!g]ql{ faHI~v ͽ/ zqu]]G`]Poczth?4 >C! t@aL+oHN#SBADF *^PUlfd$E/$Ud*߱3PYU}d_~^87⍳Kۖqw_,zYѻƏRo6@d`+*"muڵj&+v?{v/߰ )=4yKd5& wŹ֫!`@ç\ǂ '_™ѠtL4Q4U(QEॲJ 9Ɖm\BN kaDmce sp4szC_崡1b9%M0F@ VBR%Uj4H/eX)k.%; Knm#MXdeZPPXJ]Љ,L}:gCl R?z1}SGuTkMi㐒NEw9ւjr壛ټY튯}ޥ]K;C"X~u'e;]+?G8U'6._Fk_|O8gxa̘9$Dڡu\/;lg9r闌B얰xv鏜z#&, 0\#wj24 {O*. %Vl|ŵ*\lwlB]?x &r}_B+N3[ὥ`hʼnvơ;1\3)Ey6)4+F2"As gpfGN^y*ڹpUcd0]%tƏ y_Qa;4G8=+ |G3*~8<_4/7s_xgtnY}V9ݹOQHoڬg_$oo@>>ԕR!\M䴡14Pݟ2@`r r}bÅ \5ú_C;{Sny`d*ÓK3pxR<2{:/­şsWZkaUMN"4X 7|Ou%D(%I#XNkF`C@ E" 9n\2B_]8sª>¢C^9A!/pÊeOK/L2'F+4>m3IKf9oYN}És1@d";j}S/w?w^ze98ӆBkX7`;( ބwM|Oq_ rL 3-Iо8\h* zK'_%R#1Ҕמ9nzg;>|`]/REׇ`|zGV |7?x%R(э@r %nԤ( pr4)m96BI&aˆ>ʤ G|"hFAZ0 g}#Ϡ7gimݺ+J,NC1@T# KIX97#Wj8 Ng͛ħ{: "HJ G )BQ}PwEl/%q&|LNL!dS/H%𛂦t7=[X"˩{XYN{F-%*iUӟ_ɠIp,z}kRr0YAAQl)ĴN6)Ф$S^qKn7Iϝ9z|=c3罈\* iO=ts3)v,N6,N<`(gf-:k'xa\0gG}̹=(/˅"Zeq&|hOQOrx\"`@RJEE^Ui`tE݂:$,&F˩ K~ a"ApCP&Mv"et#&%cZSVENЍX(LkNr)p㏗N{J/9~{3~x~1rW!,qYD<|C3g>{ޓ4- ̙E]Ib;bi0 ! I2 Ad.ޑ,oI;lR@?LBS# s<YN=r(Ǖ06#  ލ7hYUW||֕0/wmvqKA,f#%9QJi`Ļ`Ս4TT2GAQfCZfG4@`OhPLI2?^P\9 -x5*_~.c9uO˩{\x/#D'j(QgKŀ_ϙʾQ%%wfCA=]ڶ4Q1;WCRD &;>=:q6:i?%?p㳦<|M.+l]nI>OSgλJE9$;~Aio-,`nDW3A;Fs&%6VjQTbN_ o}~bm`o> lG+Zs s 㰓-Fuj*-imXFkHF͜K7?҃x =:d:V}ĕ&/h2S W\!N2!g,oeG/,i,qu`1CuYvg{An=lj]WCl-SMF%۰)~}FǶoٱJkV.EM(QcoY15mef=ҹ{-s?XNk0r0|W[Nkj-F lQ n1$ ~`}Ѭ$G8%3#j| ϺQMEͺ#-9x18 StzvtZOcF@ЊYhiD}\,D&l$9W7Orp~bu1c4>#88kM4(fuCi+!,AC0@#B hE9 -U kr|ѣ⼣q4E(ҾӰz[xWCQ7 {qfE9VV9~>8r"f>s`"eJ}*(ǥyq 8ծywtk' rySqj|O81/u2ǝ:{ ƒS(eM6mW&`Fog׹o43S@y7Ɂ )QqNBQG ' O0[?)0{à\jrc%0RD8YLN .&ūft} x xp$lI~aXvt瓆zG7_Q9_7Сu0?ԭc\Xq^mZ+{ʧY K!E:L*HM xMxC:@8 TM$qx`\N?п[[d=~zYf,~ a9GD#RB8݄I KUUeIQiyѿw TVUti7텣q#*䖰qw2xgZѯ3N'33+š9 .ꨞ?t u"~vt:}C*Y/ GoIE4? @hqrT雬-?šgԿےAۿ=Ϛ>4߼36Z"--ʉ+srɟ37U?33_{ߗJ̃Ohz/(29F޵y5aq9[nۋO=d:,2˜^,k8ш 22O:(}luJD:@.Z;Y7o Y魵uk/˜n[5CN:v.'?2BZw+8GgC꘭u̗s8v!=ǟ{= #|ʞe'f<7jdQece(? o5OhқTKN?Z}ɯ}ΡyP;\3t˶LӳFD5sZnsz̾N6r p7ztq.ǫV|tC-]<0F#CGЛyPsh;=?.\~Mܜ7?&CN4GD?,s|A(B3$18W2%}w c,2-co bxHpwϑzi n =4ܵOy`Sڧ}x >,9s{,bZܼ˓էgvΰ^}v\Njeok>1߾vvT$oI׋]鞹봝~JZP!giTC@ tX2Y; W8>sGْf<ٗisLNҵKL0z{2+[QkA8ݞ5>C>?9ŗ2o9 ()O5Ua$L:kA!LӆT؆_EUYSzs}}͝y5ss`vr,k[L_˒2prc<ء}+Jte77H*S,[,{g:Z2k ϡJgjsiG#tC:kaQ:H!lZ0ʝ F;[Q9X֛"+qÞ}?]Fۥ0&])IkmAh9T["}/l"'=ǟR4H֡;\/v PLCמwGK@r:m! #@hqV, =vjU|yPi$Lwr7wJCd٣wg]t׎ܠddi 2-62:$eqe8E]zy~|k.E헬sآwY"sX򣞽t'zR~WAAawiY!5#}unW.Yſl۲E6틬0@\^ ogԫrҋruQI-<|q>\|sNơAr?+h<%{@38PxZ7J@ (M8 Kxj8 u{׭[k_7F 5]VVfҠ% \MGYoӁ5Pn˅z\p|BO,Yz\IV~q(I@:@e߷ =UvF~i_( ӡE^0(րO' g d㴋:(jj?2{˺ꩭy$'Δּ3JљI~sw\MeN+W^$n#7N IZ(KWgN>?Vd|̗@9l!p=r@YB@Ygd(C˲~Hb Jg]/0HM*8ٜ= 5θ*ȷ^\~ktFoqöKEμBN$@%U'mSNdӔS 5[} X:}eٜ7S{.>MjrR3[ё=;7Ot6{N5KcWl:Zu*tb|hlOVA#D=|4À%lY0@ 9嶺IgGTm+a.S|Nnsæ8?y|،P |3/d]{/a p_k7:+Ǐ?lq46O" >e_\ᵨTyH>4/U'*+4iRgIEfX YϹլ;~~oG^WO>iMg:N͕ }Unm\>y+gV JqXJ?{ƾwۘ1̤D*Y3=.%zӑ?-$!5v팪x|GWu:V~uX7n]?23~~r$D#^&s̷/C^k}gqńʓk+y$j"~S/Q3J}^tRsƞPХDŽ@> Ak2/&HΧsE@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@mˢGUL_2q{e,g&j=lQ{u@@]eߟeUrUƺk3GYk(7^J(#3Cg@l?b[ @@!ЪrIBa!@k2?yVVc^-GM;|9]/+DŜ+@@*jrIEZ;uoI UgZ|KWZ8'0f`j*w 7HkTh{>bb}.~ikY\ܼ姝_` Һ|׷ ZIDATi瞷aѳ^,y  Q -9dhuB25E@d[ FSH l=ov0#.'H>0݌QqeBj@@4P-27ItKNjg)13ZH? :>ɍ{-{{O3C,+F@h@e}qCͬ e Hzf\  69 {0'm޹KQG$}  r(˹Oa"y3qM  pHhRy$uID< }̷L  m,@YM!uʼnz+  @ EW@@h=ֳ%g@@,@܁<  zʭgK  X@yT@@r6EU"p[ UwI`92ƚ2RՔq6%  EyE]rÒgG:jW)A*u%Syr& Jv]YߟUj˒J`nQvzllioNHS[ C?kϖSs ,N;{$3Y<3mFe0qq<3Gs'6mITzӤI=mJeXuĬucEkix˰,) uxub¯T!ʾZcnn7_^\p ucէ /swT}3<7L|`]uMg#Ψ,[itjjv&=N:FZ>;?6pg9c!51/Y1bynH߿m̘MzdEՌtfڈ|m=yraX_]1*)YςdˉM-_ZcnL++[p6l&_5.i:WTM&u$lML  >hQnCk!җ Wcw.i4ƛZo=cvmAnVSwkJ#oun ctyet硒g_Ϲ}ݠ6XZwwobԹ@[nwLvM$i%^)/<pPZѝM$2.CՔ&%0V=~G&oVAwǍ7.=uS][[>$ejf W4fڻT~G-$1SjT~guW'}x2ZFv gGswn &)vs-{)LIn췮N"﬇[u)/جn٤Pk/H#S]:Fre #Պ4Y fcR_5tY_ZA4A] -u~- m.zf}~ܿ5R e/}_bW߳_O_D;ۛ.z]-J"s޹;N/W˾EnT/# p0~ʞZ9vwKr^H7g|NJ>7#Q'-waU\h`4N ʀh|dիɔdQ7vMA&<|VCI7%0X2/;OR Ni1KL4IWǫʺGVךm}SI_Z7]C6"~S|e-eKdd+5䓣ytl64  pP|Ps[PwW {W֥}cI}:z]2 K5҂yYv>!7UiZ LTZ7HsV[W$ܨ붂>~"9 3W]M0W{WnkS+M*-]H4zQc۔IbKV$emdx?vPkƎ }b&%KnzC؆ X@Z5ѹBzsLήTUɍ{tkdX?qѭ)Gj%UUtC:Dоl1.`s].琫ԥsǨUC؆ \8^̌QE2;BԮ@Y>oJR9@0ܪ3?󯖹Y¸YGr  @r(K-dG0wjxf\1i<=_7)1@@lԋ RU@oFzp?{?('Gf')w̨ :3! -YʲE2؇rw[\+2Q<3x  @L͝GdsL:Q꧎`{:iq*  @i< 6^vbQ\&7  ОZ5P]V%W]%#8sn=aE]13O08:n7'-@@&rv5FUL_mH_ ?%Pޤs]ia|x9G@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@iGUL_|q{4gC3W|7{[ޡ;XF.Ɨ'w͌ĸߵzR@@ ڤEb◜K]y:=ieeZ]NZC\cGfT2M8@@@@Gwm @@+7Wo <*LlJf#N-(@@ -??nJ:[A6;y՚# (yKK%oVt{ieen4u[&.G{6;muU׬wy?Ԇ'3BZz-oh:?6qbim>D#57VWuNzmrMNMH cl)HkSF'Ϩ,[2Ɉ+igG~AZɟJՉz~?Y=UJYEpz/F\ /H1򣣿 /XZ돖7^ʱo9Y+77\]Yig㤬c$ϻ峵,Ouw.bWXQک}g]zsytRC}}_t @ve׋W$kr% 4>'I-6tEvTɺ?N{rGF K88W5ɷ-$wEXE&nԥ6>.)ֳWO,sz]F'ARVF7%PNk4h ^b圷 =!ܿeir2Åsz=n?X %Fm,K-Od9ܳb/fϑWڶPy("a.H=G@~Ę Yn|s;P1.,ay$? Α-/.8vN|7欝?jv @ZkzSj.bqA[J{o#>$QY~mUwpl> KtuNo[]QNs1K7v쿦8^yV}2(#vl] к-3WTM&_kK4|-@~ zJ^Jp:NZgTkN˷/$M5xDU;sUrI3pl{BKKswaF:p3m^>|M#֓zɓ \}u]8g1;FZe3%?0+Yo?X[;p6l&_|>+-–D"pO"/_>Yk60iu{ *eDnIm֬YҨմ%컬"=Z/dg{=_Vw˸L70|H$[;_ȊLZ5v$eLoW$~*&)];\ՙ^a}-n:J_}lVfn@N%-ԚK:~dm(owoJ݁W̿{G>V@@hQ>){Wד)w}Ci\T}Zu]-deǟ3-頑9r/+U?1\g{LUfBwf,:1eƎWS^Fsdo%z*R b 5{)z]xK{Ub[˟ʞk> Rnr؏ @GjQkiHaDnΣGAa+o|S"LeҟL:ZbQA`~$I/ܫӺ⛫>:=i RJGg>'eܾcS>ܿ7$J*&Fץ6Pn <݋~_NJݜݖNe*W\1auwk;K7%0X.;OR Ni1KL4xUYhZO}*Kpp~wKKxl򃡲%2ĕҚFcdQyUʵV7vMA&<|~+&{lJu<Εkp3K@[ g-2@\罋`KsK 7-DҒeyҊDI59#fcCgT_س`FQ:X\9O9~ZNnbGϗKƻpe].9.)#A<_&_o_xG?˿3k&]-zL6VaY[OzԂZpi2TJ0"}]H4z'_/:s2xU21X)JK&J%~IڹQcc7ȍ$G}檯gg@ZA@n.zFV:ol{ڵ@ h ݊+ݕ~/ ~Co+8\כ;MǏ?qz%UU4ֿG}٦IX.TUX>*ā^6V^coZ5} @EsP52L9c L{E m.b_Z965/'@9킛f$*2cT# KVf'iֱ԰5Bq;ي  К9 5}h׶5+/yowt|wDs^{LR۝!}wW9y[@~=ru9D@89{ȾNGl}<ʯ 8LyGM7zQot<8eN@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@  [.eTIENDB`././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/install/figures/hwreqs.svg0000664000175000017500000012130300000000000022267 0ustar00zuulzuul00000000000000 Produced by OmniGraffle 6.5.2 2016-04-26 14:57:28 +0000Canvas 1Layer 1Controller NodeCompute Node 11-2CPUBlock Storage Node 1Object Storage Node 1Object Storage Node 2Hardware RequirementsCore componentOptional component8 GBRAM100 GBStorage2-4+CPU8+ GBRAM100+ GBStorage1-2CPU4 GBRAM2NIC2NIC1NIC1NIC4+ GBRAM1-2CPU1NIC100+ GBStorage100+ GBStorage/dev/sdb/dev/sdb/dev/sdc/dev/sdb/dev/sdc1-2CPU4+ GBRAM100+ GBStorage/dev/sdc ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/install/get-started-with-shared-file-systems.rst0000664000175000017500000000325100000000000026406 0ustar00zuulzuul00000000000000================ Service Overview ================ The OpenStack Shared File Systems service (manila) provides file storage to a virtual machine. The Shared File Systems service provides an abstraction for managing and provisioning of file shares. The service also enables management of share types as well as share snapshots if a driver supports them. The Shared File Systems service consists of the following components: manila-api A WSGI app that authenticates and routes requests to the Shared File Systems service. manila-data A standalone service whose purpose is to process data operations such as copying, share migration or backup. manila-scheduler Schedules and routes requests to the appropriate share service. The scheduler uses configurable filters and weighers to route requests. The Filter Scheduler is the default and enables filters on various attributes of back ends, such as, Capacity, Availability Zone and other capabilities. manila-share Manages back-end devices that provide shared file systems. A manila-share service talks to back-end devices by using share back-end drivers as interfaces. A share driver may operate in one of two modes, with or without handling of share servers. Share servers export file shares via share networks. When share servers are not managed by a driver within the shared file systems service, networking requirements should be handled out of band of the shared file systems service. Messaging queue Routes information between the Shared File Systems processes. For more information, see `Configuration Reference Guide `_. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/install/index.rst0000664000175000017500000000532400000000000020436 0ustar00zuulzuul00000000000000===================== Installation Tutorial ===================== .. toctree:: :maxdepth: 2 get-started-with-shared-file-systems.rst install-controller-node.rst install-share-node.rst verify.rst post-install.rst next-steps.rst The OpenStack Shared File Systems service (manila) provides coordinated access to shared or distributed file systems. The method in which the share is provisioned and consumed is determined by the Shared File Systems driver, or drivers in the case of a multi-backend configuration. There are a variety of drivers that support NFS, CIFS, HDFS, GlusterFS, CEPHFS, MAPRFS and other protocols as well. The Shared File Systems API and scheduler services typically run on the controller nodes. Depending upon the drivers used, the share service can run on controllers, compute nodes, or storage nodes. .. important:: For simplicity, this guide describes configuring the Shared File Systems service to use one of either: * the ``generic`` back end with the ``driver_handles_share_servers`` mode (DHSS) enabled that uses the `Compute service` (`nova`), `Image service` (`glance`), `Networking service` (`neutron`) and `Block storage service` (`cinder`); or, * the ``LVM`` back end with ``driver_handles_share_servers`` mode (DHSS) disabled. The storage protocol used and referenced in this guide is ``NFS``. As stated above, the Shared File System service supports different storage protocols depending on the back end chosen. For the ``generic`` back end, networking service configuration requires the capability of networks being attached to a public router in order to create share networks. If using this back end, ensure that Compute, Networking and Block storage services are properly working before you proceed. For networking service, ensure that option 2 (deploying the networking service with support for self-service networks) is properly configured. This installation tutorial also assumes that installation and configuration of OpenStack packages, Network Time Protocol, database engine and message queue has been completed as per the instructions in the `OpenStack Installation Guide. `_. The `Identity Service` (`keystone`) has to be pre-configured with suggested client environment scripts. For more information on various Shared File Systems storage back ends, see the `Shared File Systems Configuration Reference. `_. To learn more about installation dependencies noted above, see the `OpenStack Installation Guide. `_ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/install/install-controller-debian.rst0000664000175000017500000000270500000000000024376 0ustar00zuulzuul00000000000000.. _manila-controller-debian: Install and configure controller node on Debian ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This section describes how to install and configure the Shared File Systems service, code-named manila, on the controller node that runs a Debian distribution. This service requires at least one additional share node that manages file storage back ends. .. include:: common/controller-node-prerequisites.rst Install and configure components -------------------------------- #. Install the packages: .. code-block:: console # apt-get install manila-api manila-scheduler python3-manilaclient #. Edit the ``/etc/manila/manila.conf`` file and complete the following actions: * In the ``[database]`` section, configure database access: .. code-block:: ini [database] ... connection = mysql+pymysql://manila:MANILA_DBPASS@controller/manila Replace ``MANILA_DBPASS`` with the password you chose for the Shared File Systems database. .. include:: common/controller-node-common-configuration.rst #. Populate the Shared File Systems database: .. code-block:: console # su -s /bin/sh -c "manila-manage db sync" manila .. note:: Ignore any deprecation messages in this output. Finalize installation --------------------- #. Restart the Shared File Systems services: .. code-block:: console # service manila-scheduler restart # service manila-api restart ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/install/install-controller-node.rst0000664000175000017500000000124200000000000024074 0ustar00zuulzuul00000000000000.. _manila-controller: Install and configure controller node ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This section describes how to install and configure the Shared File Systems service, code-named manila, on the controller node. This service requires at least one additional share node that manages file storage back ends. This section assumes that you already have a working OpenStack environment with at least the following components installed: Compute, Image Service, Identity. Note that installation and configuration vary by distribution. .. toctree:: :maxdepth: 1 install-controller-rdo.rst install-controller-ubuntu.rst install-controller-debian.rst ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/install/install-controller-rdo.rst0000664000175000017500000000322200000000000023733 0ustar00zuulzuul00000000000000.. _manila-controller-rdo: Install and configure controller node on Red Hat Enterprise Linux and CentOS ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This section describes how to install and configure the Shared File Systems service, code-named manila, on the controller node that runs Red Hat Enterprise Linux or CentOS. This service requires at least one additional share node that manages file storage back ends. .. include:: common/controller-node-prerequisites.rst Install and configure components -------------------------------- #. Install the packages: .. code-block:: console # dnf install openstack-manila python3-manilaclient #. Edit the ``/etc/manila/manila.conf`` file and complete the following actions: * In the ``[database]`` section, configure database access: .. code-block:: ini [database] ... connection = mysql+pymysql://manila:MANILA_DBPASS@controller/manila Replace ``MANILA_DBPASS`` with the password you chose for the Shared File Systems database. .. include:: common/controller-node-common-configuration.rst #. Populate the Shared File Systems database: .. code-block:: console # su -s /bin/sh -c "manila-manage db sync" manila .. note:: Ignore any deprecation messages in this output. Finalize installation --------------------- #. Start the Shared File Systems services and configure them to start when the system boots: .. code-block:: console # systemctl enable openstack-manila-api.service openstack-manila-scheduler.service # systemctl start openstack-manila-api.service openstack-manila-scheduler.service ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/install/install-controller-ubuntu.rst0000664000175000017500000000324500000000000024476 0ustar00zuulzuul00000000000000.. _manila-controller-ubuntu: Install and configure controller node on Ubuntu ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This section describes how to install and configure the Shared File Systems service, code-named manila, on the controller node that runs Ubuntu. This service requires at least one additional share node that manages file storage back ends. .. include:: common/controller-node-prerequisites.rst Install and configure components -------------------------------- #. Install the packages: .. code-block:: console # apt-get install manila-api manila-scheduler python3-manilaclient #. Edit the ``/etc/manila/manila.conf`` file and complete the following actions: * In the ``[database]`` section, configure database access: .. code-block:: ini [database] ... connection = mysql+pymysql://manila:MANILA_DBPASS@controller/manila Replace ``MANILA_DBPASS`` with the password you chose for the Shared File Systems database. .. include:: common/controller-node-common-configuration.rst #. Populate the Shared File Systems database: .. code-block:: console # su -s /bin/sh -c "manila-manage db sync" manila .. note:: Ignore any deprecation messages in this output. Finalize installation --------------------- #. Restart the Shared File Systems services: .. code-block:: console # service manila-scheduler restart # service manila-api restart #. By default, the Ubuntu packages create an SQLite database. Because this configuration uses an SQL database server, you can remove the SQLite database file: .. code-block:: console # rm -f /var/lib/manila/manila.sqlite ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/install/install-share-debian.rst0000664000175000017500000000464600000000000023323 0ustar00zuulzuul00000000000000.. _share-node-install-debian: Install and configure a share node running Debian ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This section describes how to install and configure a share node for the Shared File Systems service. For simplicity, this configuration references one storage node with the generic driver managing the share servers. The generic backend manages share servers using compute, networking and block services for provisioning shares. Note that installation and configuration vary by distribution. This section describes the instructions for a share node running a Debian distribution. Install and configure components -------------------------------- #. Install the packages: .. code-block:: console # apt-get install manila-share python3-pymysql #. Edit the ``/etc/manila/manila.conf`` file and complete the following actions: * In the ``[database]`` section, configure database access: .. code-block:: ini [database] ... connection = mysql+pymysql://manila:MANILA_DBPASS@controller/manila Replace ``MANILA_DBPASS`` with the password you chose for the Shared File Systems database. .. include:: common/share-node-common-configuration.rst Two driver modes ---------------- .. include:: common/share-node-share-server-modes.rst Choose one of the following options to configure the share driver: .. include:: common/dhss-false-mode-intro.rst Prerequisites ------------- .. note:: Perform these steps on the storage node. #. Install the supporting utility packages: * Install LVM and NFS server packages: .. code-block:: console # apt-get install lvm2 nfs-kernel-server .. include:: common/dhss-false-mode-configuration.rst .. include:: common/dhss-true-mode-intro.rst Prerequisites ------------- Before you proceed, verify operation of the Compute, Networking, and Block Storage services. This options requires implementation of Networking option 2 and requires installation of some Networking service components on the storage node. * Install the Networking service components: .. code-block:: console # apt-get install neutron-plugin-linuxbridge-agent .. include:: common/dhss-true-mode-configuration.rst Finalize installation --------------------- #. Prepare manila-share as start/stop service. Start the Shared File Systems service including its dependencies: .. code-block:: console # service manila-share restart ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/install/install-share-node.rst0000664000175000017500000000155300000000000023020 0ustar00zuulzuul00000000000000.. _share-node-install: Install and configure a share node ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This section describes how to install and configure a share node for the Shared File Systems service. .. note:: The manila-share process can run in two modes, with and without handling of share servers. Some drivers may support either modes; while some may only support one of the two modes. See the `Configuration Reference `_ to determine if the driver you choose supports the driver mode desired. This tutorial describes setting up each driver mode using an example driver for the mode. Note that installation and configuration vary by distribution. .. toctree:: :maxdepth: 1 install-share-rdo.rst install-share-ubuntu.rst install-share-debian.rst ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/install/install-share-rdo.rst0000664000175000017500000000562000000000000022656 0ustar00zuulzuul00000000000000.. _share-node-install-rdo: Install and configure a share node running Red Hat Enterprise Linux and CentOS ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This section describes how to install and configure a share node for the Shared File Systems service. For simplicity, this configuration references one storage node with the generic driver managing the share servers. The generic backend manages share servers using compute, networking and block services for provisioning shares. Note that installation and configuration vary by distribution. This section describes the instructions for a share node running Red Hat Enterprise Linux or CentOS. Install and configure components -------------------------------- #. Install the packages: .. code-block:: console # dnf install openstack-manila-share python3-PyMySQL #. Edit the ``/etc/manila/manila.conf`` file and complete the following actions: * In the ``[database]`` section, configure database access: .. code-block:: ini [database] ... connection = mysql+pymysql://manila:MANILA_DBPASS@controller/manila Replace ``MANILA_DBPASS`` with the password you chose for the Shared File Systems database. .. include:: common/share-node-common-configuration.rst Two driver modes ---------------- .. include:: common/share-node-share-server-modes.rst Choose one of the following options to configure the share driver: .. include:: common/dhss-false-mode-intro.rst Prerequisites ------------- .. note:: Perform these steps on the storage node. #. Install the supporting utility packages: * Install LVM and NFS server packages: .. code-block:: console # dnf install lvm2 nfs-utils nfs4-acl-tools portmap targetcli * Start the LVM metadata service and configure it to start when the system boots: .. code-block:: console # systemctl enable lvm2-lvmetad.service target.service # systemctl start lvm2-lvmetad.service target.service .. include:: common/dhss-false-mode-configuration.rst .. include:: common/dhss-true-mode-intro.rst Prerequisites ------------- Before you proceed, verify operation of the Compute, Networking, and Block Storage services. This options requires implementation of Networking option 2 and requires installation of some Networking service components on the storage node. * Install the Networking service components: .. code-block:: console # dnf install openstack-neutron openstack-neutron-linuxbridge ebtables .. include:: common/dhss-true-mode-configuration.rst Finalize installation --------------------- #. Prepare manila-share as start/stop service. Start the Shared File Systems service including its dependencies and configure them to start when the system boots: .. code-block:: console # systemctl enable openstack-manila-share.service # systemctl start openstack-manila-share.service ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/install/install-share-ubuntu.rst0000664000175000017500000000517600000000000023422 0ustar00zuulzuul00000000000000.. _share-node-install-ubuntu: Install and configure a share node running Ubuntu ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This section describes how to install and configure a share node for the Shared File Systems service. For simplicity, this configuration references one storage node with the generic driver managing the share servers. The generic backend manages share servers using compute, networking and block services for provisioning shares. Note that installation and configuration vary by distribution. This section describes the instructions for a share node running Ubuntu. Install and configure components -------------------------------- #. Install the packages: .. code-block:: console # apt-get install manila-share python3-pymysql #. Edit the ``/etc/manila/manila.conf`` file and complete the following actions: * In the ``[database]`` section, configure database access: .. code-block:: ini [database] ... connection = mysql+pymysql://manila:MANILA_DBPASS@controller/manila Replace ``MANILA_DBPASS`` with the password you chose for the Shared File Systems database. .. include:: common/share-node-common-configuration.rst Two driver modes ---------------- .. include:: common/share-node-share-server-modes.rst Choose one of the following options to configure the share driver: .. include:: common/dhss-false-mode-intro.rst Prerequisites ------------- .. note:: Perform these steps on the storage node. #. Install the supporting utility packages: * Install LVM and NFS server packages: .. code-block:: console # apt-get install lvm2 nfs-kernel-server .. include:: common/dhss-false-mode-configuration.rst .. include:: common/dhss-true-mode-intro.rst Prerequisites ------------- Before you proceed, verify operation of the Compute, Networking, and Block Storage services. This options requires implementation of Networking option 2 and requires installation of some Networking service components on the storage node. * Install the Networking service components: .. code-block:: console # apt-get install neutron-plugin-linuxbridge-agent .. include:: common/dhss-true-mode-configuration.rst Finalize installation --------------------- #. Prepare manila-share as start/stop service. Start the Shared File Systems service including its dependencies: .. code-block:: console # service manila-share restart #. By default, the Ubuntu packages create an SQLite database. Because this configuration uses an SQL database server, remove the SQLite database file: .. code-block:: console # rm -f /var/lib/manila/manila.sqlite ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/install/next-steps.rst0000664000175000017500000000067700000000000021447 0ustar00zuulzuul00000000000000.. _next-steps: ========== Next steps ========== Your OpenStack environment now includes the Shared File Systems service. To add more services, see the `additional documentation on installing OpenStack services `_ Continue to evaluate the Shared File Systems service by creating the service image and running the service with the correct driver mode that you chose while configuring the share node. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/install/post-install.rst0000664000175000017500000000201400000000000021751 0ustar00zuulzuul00000000000000.. _post-install: Creating and using shared file systems ====================================== Depending on the option chosen while installing the share node (Option with share server management and one without); the steps to create and use your shared file systems will vary. When the Shared File Systems service handles the creation and management of share servers, you would need to specify the ``share network`` with the request to create a share. Either modes will vary in their respective share type definition. When using the driver mode with automatic handling of share servers, a service image is needed as specified in your configuration. The instructions below enumerate the steps for both driver modes. Follow what is appropriate for your installation. .. include:: common/dhss-false-mode-using-shared-file-systems.rst .. include:: common/dhss-true-mode-using-shared-file-systems.rst For more information about how to manage shares, see the `OpenStack End User Guide `_ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/install/verify.rst0000664000175000017500000000214700000000000020633 0ustar00zuulzuul00000000000000.. _verify: Verify operation ~~~~~~~~~~~~~~~~ Verify operation of the Shared File Systems service. .. note:: Perform these commands on the controller node. #. Source the ``admin`` credentials to gain access to admin-only CLI commands: .. code-block:: console $ . admin-openrc.sh #. List service components to verify successful launch of each process: .. code-block:: console $ manila service-list +------------------+----------------+------+---------+-------+----------------------------+-----------------+ | Binary | Host | Zone | Status | State | Updated_at | Disabled Reason | +------------------+----------------+------+---------+-------+----------------------------+-----------------+ | manila-scheduler | controller | nova | enabled | up | 2014-10-18T01:30:54.000000 | None | | manila-share | share1@generic | nova | enabled | up | 2014-10-18T01:30:57.000000 | None | +------------------+----------------+------+---------+-------+----------------------------+-----------------+ ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315601.813673 manila-21.0.0/doc/source/reference/0000775000175000017500000000000000000000000017061 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/reference/glossary.rst0000664000175000017500000000717100000000000021464 0ustar00zuulzuul00000000000000======== Glossary ======== .. glossary:: manila OpenStack project to provide "Shared Filesystems as a service". manila-api Service that provides a stable RESTful API. The service authenticates and routes requests throughout the Shared Filesystem service. There is :term:`python-manilaclient` to interact with the API. python-manilaclient Command line interface to interact with :term:`manila` via :term:`manila-api` and also a Python module to interact programmatically with :term:`manila`. manila-scheduler Responsible for scheduling/routing requests to the appropriate :term:`manila-share` service. It does that by picking one back-end while filtering all except one back-end. manila-share Responsible for managing Shared File Service devices, specifically the back-end devices. DHSS Acronym for 'driver handles share servers'. It defines two different share driver modes when they either do handle share servers or not. Each driver is allowed to work only in one mode at once. Requirement is to support, at least, one mode. replication_type Type of replication supported by a share driver. If the share driver supports replication it will report a valid value to the :term:`manila-scheduler`. The value of this capability can be one of :term:`readable`, :term:`writable` or :term:`dr`. readable A type of replication supported by :term:`manila` in which there is one :term:`active` replica (also referred to as `primary` share) and one or more non-active replicas (also referred to as `secondary` shares). All share replicas have at least one export location and are mountable. However, the non-active replicas cannot be written to until after promotion. writable A type of replication supported by :term:`manila` in which all share replicas are writable. There is no requirement of a promotion since replication is synchronous. All share replicas have one or more export locations each and are mountable. dr Acronym for `Disaster Recovery`. It is a type of replication supported by :term:`manila` in which there is one :term:`active` replica (also referred to as `primary` share) and one or more non-active replicas (also referred to as `secondary` shares). Only the `active` replica has one or more export locations and can be mounted. The non-active replicas are inaccessible until after promotion. active In :term:`manila`, an `active` replica refers to a share that can be written to. In `readable` and `dr` styles of replication, there is only one `active` replica at any given point in time. Thus, it may also be referred to as the `primary` share. In `writable` style of replication, all replicas are writable and there may be no distinction of a `primary` share. replica_state An attribute of the Share Instance (Share Replica) model in :term:`manila`. If the value is :term:`active`, it refers to the type of the replica. If the value is one of `in_sync` or `out_of_sync`, it refers to the state of consistency of data between the :term:`active` replica and the share replica. If the value is `error`, a potentially irrecoverable error may have occurred during the update of data between the :term:`active` replica and the share replica. replication_change State of a non-active replica when it is being promoted to become the :term:`active` replica. recovery point objective Abbreviated as ``RPO``, recovery point objective is a target window of time between which a storage backend may guarantee that data is consistent between a primary and a secondary replica. This window is **not** managed by :term:`manila`. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/reference/index.rst0000664000175000017500000000121000000000000020714 0ustar00zuulzuul00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Reference --------- .. toctree:: :maxdepth: 1 glossary ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315601.817673 manila-21.0.0/doc/source/user/0000775000175000017500000000000000000000000016101 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/user/create-and-manage-shares.rst0000664000175000017500000034500000000000000023351 0ustar00zuulzuul00000000000000.. _share: ======================== Create and manage shares ======================== .. contents:: :local: General Concepts ---------------- A ``share`` is filesystem storage that you can create with manila. You can pick a network protocol for the underlying storage, manage access and perform lifecycle operations on the share via the ``manila`` command line tool. Before we review the operations possible, lets take a look at certain important terms: - ``share network``: This is a network that your shares can be exported to. Exporting shares to your own self-service isolated networks allows manila to provide ``hard network path`` data isolation guarantees in a multi-tenant cloud. To do so, under the hood, manila creates isolated ``share servers``, and plugs them into your network. These share servers manage exports of your shares, and can connect to authentication domains that you determine. Manila performs all the lifecycle operations necessary on share servers, and you needn't worry about them. The important thing to note is that your cloud administrator must have made a share type with extra-spec ``driver_handles_share_servers=True`` for you to be able to use share networks and create shares on them. See :doc:`share-network-operations` and :doc:`share-network-subnet-operations` for more details. - ``share type``: A share type is a template made available by your administrator. You must always specify a share type when creating a share, unless you would like to use the default share type. It's possible that your cloud administrator has not made a default share type accessible to you. Share types specify some capabilities for your use: +------------------------------------+-------------------------+---------------------------------------------------------+ | Capability | Possible values | Consequence | +====================================+=========================+=========================================================+ | driver_handles_share_servers | true or false | you can or cannot use share networks to create shares | +------------------------------------+-------------------------+---------------------------------------------------------+ | snapshot_support | true or false | you can or cannot create snapshots of shares | +------------------------------------+-------------------------+---------------------------------------------------------+ | create_share_from_snapshot_support | true or false | you can or cannot create clones of share snapshots | +------------------------------------+-------------------------+---------------------------------------------------------+ | revert_to_snapshot_support | true or false | you can or cannot revert your shares in-place to the | | | | most recent snapshot | +------------------------------------+-------------------------+---------------------------------------------------------+ | mount_snapshot_support | true or false | you can or cannot export your snapshots and mount them | +------------------------------------+-------------------------+---------------------------------------------------------+ | replication_type | dr | you can create replicas for disaster recovery, only one | | | | active export allowed at a time | | +-------------------------+---------------------------------------------------------+ | | readable | you can create read-only replicas, only one writable | | | | active export allowed at a time | | +-------------------------+---------------------------------------------------------+ | | writable | you can create read/write replicas, any number | | | | of active exports per share | +------------------------------------+-------------------------+---------------------------------------------------------+ | availability_zones | a list of one or | shares are limited to these availability zones | | | more availability zones | | +------------------------------------+-------------------------+---------------------------------------------------------+ | mount_point_name_support | true or false | share can or cannot have customized export location | +------------------------------------+-------------------------+---------------------------------------------------------+ | encryption_support | share | share is encrypted with share encryption key | | +-------------------------+---------------------------------------------------------+ | | share_server | share is encrypted with share server encryption key | +------------------------------------+-------------------------+---------------------------------------------------------+ | provisioning:mount_point_prefix | string | prefix used for custom export location | +------------------------------------+-------------------------+---------------------------------------------------------+ .. note:: - When ``replication_type`` extra specification is not present in the share type, you cannot create share replicas - When the ``availability_zones`` extra specification is not present in the share type, the share type can be used in all availability zones of the cloud. - When ``mount_point_name_support`` extra specification is not present in the share type, or is set to False, you cannot customize the export location. - ``status`` of resources: Resources that you create or modify with manila may not be "available" immediately. The API service is designed to respond immediately and the resource being created or modified is worked upon by the rest of the service stack. To indicate the readiness of resources, there are several attributes on the resources themselves and the user can watch these fields to know the state of the resource. For example, the ``status`` attribute in shares can convey some busy states such as "creating", "extending", "shrinking", "migrating". These "-ing" states end in a "available" state if everything goes well. They may end up in an "error" state in case there is an issue. See :doc:`troubleshooting-asynchronous-failures` to determine if you can rectify these errors by yourself. If you cannot, consulting a more privileged user, usually a cloud administrator, might be useful. - ``snapshot``: This is a point-in-time copy of a share. In manila, snapshots are meant to be crash consistent, however, you may need to quiesce any applications using the share to ensure that the snapshots are application consistent. Cloud administrators can enable or disable snapshots via share type extra specifications. - ``security service``: This is an authentication domain that you define and associate with your share networks. It could be an Active Directory server, a Lightweight Directory Access Protocol server, or Kerberos. When used, access to shares can be controlled via these authentication domains. You may even combine multiple authentication domains. Usage and Limits ---------------- * List the resource limits and usages that apply to your project .. code-block:: console $ manila absolute-limits +----------------------------+-------+ | Name | Value | +----------------------------+-------+ | maxTotalReplicaGigabytes | 1000 | | maxTotalShareGigabytes | 1000 | | maxTotalShareNetworks | 10 | | maxTotalShareReplicas | 100 | | maxTotalShareSnapshots | 50 | | maxTotalShares | 50 | | maxTotalSnapshotGigabytes | 1000 | | totalReplicaGigabytesUsed | 0 | | totalShareGigabytesUsed | 4 | | totalShareNetworksUsed | 1 | | totalShareReplicasUsed | 0 | | totalShareSnapshotsUsed | 1 | | totalSharesUsed | 4 | | totalSnapshotGigabytesUsed | 1 | +----------------------------+-------+ Share types ----------- * List share types .. code-block:: console $ openstack share type list +--------------------------------------+-----------------------------------+------------+------------+--------------------------------------+--------------------------------------------+---------------------------------------------------------+ | ID | Name | visibility | is_default | required_extra_specs | optional_extra_specs | Description | +--------------------------------------+-----------------------------------+------------+------------+--------------------------------------+--------------------------------------------+---------------------------------------------------------+ | af7b64ec-cdb3-4a5f-93c9-51672d72e172 | dhss_true | public | - | driver_handles_share_servers : True | snapshot_support : True | None | | | | | | | create_share_from_snapshot_support : True | | | | | | | | revert_to_snapshot_support : True | | | | | | | | mount_snapshot_support : True | | | c39d3565-cee0-4a64-9e60-af06991ea4f7 | default | public | YES | driver_handles_share_servers : False | snapshot_support : True | None | | | | | | | create_share_from_snapshot_support : True | | | | | | | | revert_to_snapshot_support : True | | | | | | | | mount_snapshot_support : True | | | e88213ca-66e6-4ae1-ba1b-d9d2c65bae12 | dhss_false | public | - | driver_handles_share_servers : False | snapshot_support : True | None | | | | | | | create_share_from_snapshot_support : True | | | | | | | | revert_to_snapshot_support : True | | | | | | | | mount_snapshot_support : True | | +--------------------------------------+-----------------------------------+------------+------------+--------------------------------------+--------------------------------------------+---------------------------------------------------------+ Share networks -------------- * Create a share network. .. code-block:: console $ manila share-network-create \ --name mysharenetwork \ --description "My Manila network" \ --neutron-net-id 23da40b4-0d5e-468c-8ac9-3766e9ceaacd \ --neutron-subnet-id 4568bc9b-42fe-45ac-a49b-469e8276223c +-----------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | Property | Value | +-----------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | id | c4bfdd5e-7502-4a65-8876-0ce8b9914a64 | | name | mysharenetwork | | project_id | d9932a60d9ee4087b6cff9ce6e9b4e3b | | created_at | 2020-08-07T04:47:53.000000 | | updated_at | None | | description | My Manila network | | share_network_subnets | [{'id': '187dcd27-8478-45c1-bd5e-5423cafd15ae', 'availability_zone': None, 'created_at': '2020-08-07T04:47:53.000000', 'updated_at': None, 'segmentation_id': None, 'neutron_net_id': '23da40b4-0d5e-468c-8ac9-3766e9ceaacd', 'neutron_subnet_id': '4568bc9b-42fe-45ac-a49b-469e8276223c', 'ip_version': None, 'cidr': None, 'network_type': None, 'mtu': None, 'gateway': None}] | +-----------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ .. note:: This Manila API does not validate the subnet information you supply right away. The validation is performed when creating a share with the share network. This is why, you do not see some subnet information populated on the share network resource until at least one share is created with it. * List share networks. .. code-block:: console $ manila share-network-list +--------------------------------------+----------------+ | id | name | +--------------------------------------+----------------+ | c4bfdd5e-7502-4a65-8876-0ce8b9914a64 | mysharenetwork | +--------------------------------------+----------------+ Create a share -------------- * Create a share .. note:: If you use a share type that has the extra specification ``driver_handles_share_servers=False``, you cannot use a share network to create your shares. .. code-block:: console $ manila create NFS 1 \ --name myshare \ --description "My Manila share" \ --share-network mysharenetwork \ --share-type dhss_true +---------------------------------------+--------------------------------------+ | Property | Value | +---------------------------------------+--------------------------------------+ | id | 83b0772b-00ad-4e45-8fad-106b9d4f1719 | | size | 1 | | availability_zone | None | | created_at | 2020-08-07T05:24:14.000000 | | status | creating | | name | myshare | | description | My Manila share | | project_id | d9932a60d9ee4087b6cff9ce6e9b4e3b | | snapshot_id | None | | share_network_id | c4bfdd5e-7502-4a65-8876-0ce8b9914a64 | | share_proto | NFS | | metadata | {} | | share_type | af7b64ec-cdb3-4a5f-93c9-51672d72e172 | | is_public | False | | snapshot_support | True | | task_state | None | | share_type_name | dhss_true | | access_rules_status | active | | replication_type | None | | has_replicas | False | | user_id | 2cebd96a794f431caa06ce5215e0da21 | | create_share_from_snapshot_support | True | | revert_to_snapshot_support | True | | share_group_id | None | | source_share_group_snapshot_member_id | None | | mount_snapshot_support | True | | progress | None | +---------------------------------------+--------------------------------------+ * Show a share. .. code-block:: console $ manila show myshare +---------------------------------------+----------------------------------------------------------------------------------------------------------------------+ | Property | Value | +---------------------------------------+----------------------------------------------------------------------------------------------------------------------+ | id | 83b0772b-00ad-4e45-8fad-106b9d4f1719 | | size | 1 | | availability_zone | nova | | created_at | 2020-08-07T05:24:14.000000 | | status | available | | name | myshare | | description | My Manila share | | project_id | d9932a60d9ee4087b6cff9ce6e9b4e3b | | snapshot_id | None | | share_network_id | c4bfdd5e-7502-4a65-8876-0ce8b9914a64 | | share_proto | NFS | | metadata | {} | | share_type | af7b64ec-cdb3-4a5f-93c9-51672d72e172 | | is_public | False | | snapshot_support | True | | task_state | None | | share_type_name | dhss_true | | access_rules_status | active | | replication_type | None | | has_replicas | False | | user_id | 2cebd96a794f431caa06ce5215e0da21 | | create_share_from_snapshot_support | True | | revert_to_snapshot_support | True | | share_group_id | None | | source_share_group_snapshot_member_id | None | | mount_snapshot_support | True | | progress | 100% | | export_locations | | | | id = 908e5a28-c5ea-4627-b17c-1cfeb894ccd1 | | | path = 10.0.0.11:/sharevolumes_10034/share_83b0772b_00ad_4e45_8fad_106b9d4f1719_da404d59_4280_4b32_847f_6cfa4f730bbd | | | preferred = True | | | id = 395244a1-8aa9-44af-9fda-f7d6036ce2b9 | | | path = 10.0.0.10:/sharevolumes_10034/share_83b0772b_00ad_4e45_8fad_106b9d4f1719_da404d59_4280_4b32_847f_6cfa4f730bbd | | | preferred = False | +---------------------------------------+----------------------------------------------------------------------------------------------------------------------+ * List shares. .. code-block:: console $ manila list +--------------------------------------+--------------------+------+-------------+-----------+-----------+-----------------+------+-------------------+ | ID | Name | Size | Share Proto | Status | Is Public | Share Type Name | Host | Availability Zone | +--------------------------------------+--------------------+------+-------------+-----------+-----------+-----------------+------+-------------------+ | 83b0772b-00ad-4e45-8fad-106b9d4f1719 | myshare | 1 | NFS | available | False | dhss_true | | nova | +--------------------------------------+--------------------+------+-------------+-----------+-----------+-----------------+------+-------------------+ * List share export locations. .. code-block:: console $ manila share-export-location-list myshare +--------------------------------------+---------------------------------------------------------------------------------------------------------------+-----------+ | ID | Path | Preferred | +--------------------------------------+---------------------------------------------------------------------------------------------------------------+-----------+ | 395244a1-8aa9-44af-9fda-f7d6036ce2b9 | 10.0.0.10:/sharevolumes_10034/share_83b0772b_00ad_4e45_8fad_106b9d4f1719_da404d59_4280_4b32_847f_6cfa4f730bbd | False | | 908e5a28-c5ea-4627-b17c-1cfeb894ccd1 | 10.0.0.11:/sharevolumes_10034/share_83b0772b_00ad_4e45_8fad_106b9d4f1719_da404d59_4280_4b32_847f_6cfa4f730bbd | True | +--------------------------------------+---------------------------------------------------------------------------------------------------------------+-----------+ * Create a share using scheduler hints to specify the host. With scheduler hints, you can optionally specify the affinity and anti-affinity rules in relation to other shares. The scheduler will enforce these rules when determining where to create the share. Possible keys are ``same_host`` and ``different_host``, and the value must be the share name or id. .. code-block:: console $ manila create NFS 1 \ --name myshare2 \ --description "My Manila share - Different Host" \ --share-network mysharenetwork \ --share-type dhss_true \ --scheduler-hints different_host=myshare +---------------------------------------+-----------------------------------------------------------------------+ | Property | Value | +---------------------------------------+-----------------------------------------------------------------------+ | id | 40de4f4c-4588-4d9c-844b-f74d8951053a | | size | 1 | | availability_zone | None | | created_at | 2020-08-07T05:24:14.000000 | | status | creating | | name | myshare2 | | description | My Manila share - Different Host | | project_id | d9932a60d9ee4087b6cff9ce6e9b4e3b | | snapshot_id | None | | share_network_id | c4bfdd5e-7502-4a65-8876-0ce8b9914a64 | | share_proto | NFS | | metadata | {'__affinity_different_host': '83b0772b-00ad-4e45-8fad-106b9d4f1719'} | | share_type | af7b64ec-cdb3-4a5f-93c9-51672d72e172 | | is_public | False | | snapshot_support | True | | task_state | None | | share_type_name | dhss_true | | access_rules_status | active | | replication_type | None | | has_replicas | False | | user_id | 2cebd96a794f431caa06ce5215e0da21 | | create_share_from_snapshot_support | True | | revert_to_snapshot_support | True | | share_group_id | None | | source_share_group_snapshot_member_id | None | | mount_snapshot_support | True | | progress | None | +---------------------------------------+-----------------------------------------------------------------------+ Share is created in a different host. .. code-block:: console $ manila list +--------------------------------------+-----------+------+-------------+-----------+-----------+-----------------+-----------------------------+-------------------+ | ID | Name | Size | Share Proto | Status | Is Public | Share Type Name | Host | Availability Zone | +--------------------------------------+-----------+------+-------------+-----------+-----------+-----------------+-----------------------------+-------------------+ | 83b0772b-00ad-4e45-8fad-106b9d4f1719 | myshare | 1 | NFS | available | False | default | nosb-devstack@london#LONDON | nova | | 40de4f4c-4588-4d9c-844b-f74d8951053a | myshare2 | 1 | NFS | available | False | default | nosb-devstack@lisboa#LISBOA | nova | +--------------------------------------+-----------+------+-------------+-----------+-----------+-----------------+-----------------------------+-------------------+ * Create a share using `mount_point_name`. When `mount_point_name_support` is enabled by your administrator, you can specify a custom mount point name during share creation. This name will be used in conjunction with the prefix set by the administrator to form the share's export location. The general workflow for using `mount_point_name`: - ``Creating a new share``: Specify a custom `mount_point_name` using the `--mount-point-name` flag. The `mount_point_name` should not exceed 255 characters in length. .. code-block:: bash openstack share create NFS 1 --share-type gold_provisioning_prefix \ --name MyShare --mount-point-name mount_abc1 \ --share-network 19d78275-55cb-4684-81f2-ec9c07701563 +---------------------------------------+--------------------------------------+ | Field | Value | +=======================================+======================================+ | access_rules_status | active | +---------------------------------------+--------------------------------------+ | availability_zone | None | +---------------------------------------+--------------------------------------+ | create_share_from_snapshot_support | False | +---------------------------------------+--------------------------------------+ | created_at | 2024-03-20T20:32:50.819345 | +---------------------------------------+--------------------------------------+ | description | None | +---------------------------------------+--------------------------------------+ | has_replicas | False | +---------------------------------------+--------------------------------------+ | host | | +---------------------------------------+--------------------------------------+ | id | 138a6884-7a9b-4d9a-9ac1-f565701a4b83 | +---------------------------------------+--------------------------------------+ | is_public | False | +---------------------------------------+--------------------------------------+ | is_soft_deleted | False | +---------------------------------------+--------------------------------------+ | metadata | {} | +---------------------------------------+--------------------------------------+ | mount_snapshot_support | False | +---------------------------------------+--------------------------------------+ | name | MyShare | +---------------------------------------+--------------------------------------+ | progress | None | +---------------------------------------+--------------------------------------+ | project_id | 44754d5c4aea4c8c8d619bb6b4ebeb17 | +---------------------------------------+--------------------------------------+ | replication_type | None | +---------------------------------------+--------------------------------------+ | revert_to_snapshot_support | False | +---------------------------------------+--------------------------------------+ | scheduled_to_be_deleted_at | None | +---------------------------------------+--------------------------------------+ | share_group_id | None | +---------------------------------------+--------------------------------------+ | share_network_id | 19d78275-55cb-4684-81f2-ec9c07701563 | +---------------------------------------+--------------------------------------+ | share_proto | NFS | +---------------------------------------+--------------------------------------+ | share_server_id | None | +---------------------------------------+--------------------------------------+ | share_type | ee1995d8-6827-4711-a58d-38ee00f24a75 | +---------------------------------------+--------------------------------------+ | share_type_name | gold_provisioning_prefix | +---------------------------------------+--------------------------------------+ | size | 1 | +---------------------------------------+--------------------------------------+ | snapshot_id | None | +---------------------------------------+--------------------------------------+ | snapshot_support | False | +---------------------------------------+--------------------------------------+ | source_backup_id | None | +---------------------------------------+--------------------------------------+ | source_share_group_snapshot_member_id | None | +---------------------------------------+--------------------------------------+ | status | creating | +---------------------------------------+--------------------------------------+ | task_state | None | +---------------------------------------+--------------------------------------+ | user_id | fbdba3d017b2484f9773033e3fc0c6ae | +---------------------------------------+--------------------------------------+ | volume_type | gold_provisioning_prefix | +---------------------------------------+--------------------------------------+ * To view the details of a share created with custom mount_point_name. .. code-block:: console $ openstack share show 138a6884-7a9b-4d9a-9ac1-f565701a4b83 +---------------------------------------+-------------------------------------------------------------------------+ | Field | Value | +---------------------------------------+-------------------------------------------------------------------------+ | access_rules_status | active | | availability_zone | nova | | create_share_from_snapshot_support | False | | created_at | 2024-03-20T20:32:50.819345 | | description | None | | export_locations | | | | id = 1f5d8a51-965e-4062-a1e1-03ca146ad277 | | | path = :/gold_mount_abc1 | | | preferred = True | | | share_instance_id = 62a4d622-a3c8-4915-adca-54a7fe5789bf | | | is_admin_only = False | | | id = ea7c936a-d94b-47bd-8a35-4b2f1f7b5e5a | | | path = :/gold_mount_abc1 | | | preferred = False | | | share_instance_id = 62a4d622-a3c8-4915-adca-54a7fe5789bf | | | is_admin_only = False | | has_replicas | False | | host | host@share_server_dhss_true#AstraInfra | | id | 138a6884-7a9b-4d9a-9ac1-f565701a4b83 | | is_public | False | | is_soft_deleted | False | | mount_snapshot_support | False | | name | MyShare | | progress | 100% | | project_id | 44754d5c4aea4c8c8d619bb6b4ebeb17 | | properties | | | replication_type | None | | revert_to_snapshot_support | False | | scheduled_to_be_deleted_at | None | | share_group_id | None | | share_network_id | 19d78275-55cb-4684-81f2-ec9c07701563 | | share_proto | NFS | +---------------------------------------+-------------------------------------------------------------------------+ * Create a share using encryption key reference. User can create share using their own encryption key. The key must be stored in key-manager (Openstack Barbican) service. First create share type and specify extra-spec ``encryption_support``. It can have value ``share`` or ``share_server`` based on support by backend storage driver. Then use `--encryption-key-ref` option in share create command. Users can use encryption key reference or UUID of key reference here. .. code-block:: console $ openstack share create NFS 1 \ --name myshare3 \ --description "My Manila share - Encrypted" \ --share-network mysharenetwork \ --share-type encrypted_share_type \ --encryption-key-ref 86babe9b-7277-4c3a-a081-6eb3eac9231d +---------------------------------------+-----------------------------------------------------------------------+ | Property | Value | +---------------------------------------+-----------------------------------------------------------------------+ | id | 40de4f4c-4588-4d9c-844b-f74d8951053a | | size | 1 | | availability_zone | None | | created_at | 2020-08-07T05:24:14.000000 | | status | creating | | name | myshare3 | | description | My Manila share - Encrypted | | project_id | d9932a60d9ee4087b6cff9ce6e9b4e3b | | snapshot_id | None | | share_network_id | c4bfdd5e-7502-4a65-8876-0ce8b9914a64 | | share_proto | NFS | | metadata | {} | | share_type | af7b64ec-cdb3-4a5f-93c9-51672d72e172 | | is_public | False | | snapshot_support | True | | task_state | None | | share_type_name | encrypted_share_type | | access_rules_status | active | | replication_type | None | | has_replicas | False | | user_id | 2cebd96a794f431caa06ce5215e0da21 | | create_share_from_snapshot_support | True | | revert_to_snapshot_support | True | | share_group_id | None | | encryption_key_ref | 86babe9b-7277-4c3a-a081-6eb3eac9231d | | source_share_group_snapshot_member_id | None | | mount_snapshot_support | True | | progress | None | +---------------------------------------+-----------------------------------------------------------------------+ Grant and revoke share access ----------------------------- .. tip:: Starting from the 2023.2 (Bobcat) release, in case you want to restrict the visibility of the sensitive fields (``access_to`` and ``access_key``), or avoid the access rule being deleted by other users, you can specify ``--lock-visibility`` and ``--lock-deletion`` in the Manila OpenStack command for creating access rules. A reason (``--lock-reason``) can also be provided. Only the user that placed the lock, system administrators and services will be able to manipulate such access rules. In case the deletion of the access rule was locked, Manila will also place an additional lock on the share, to ensure it will not be deleted and cause disconnections. Allow read-write access ~~~~~~~~~~~~~~~~~~~~~~~ * Allow access. .. code-block:: console $ manila access-allow myshare ip 10.0.0.0/24 --metadata key1=value1 +--------------+--------------------------------------+ | Property | Value | +--------------+--------------------------------------+ | id | e30bde96-9217-4f90-afdc-27c092af1c77 | | share_id | 83b0772b-00ad-4e45-8fad-106b9d4f1719 | | access_level | rw | | access_to | 10.0.0.0/24 | | access_type | ip | | state | queued_to_apply | | access_key | None | | created_at | 2020-08-07T05:27:27.000000 | | updated_at | None | | metadata | {'key1': 'value1'} | +--------------+--------------------------------------+ .. note:: Since API version 2.38, access rules of type IP supports IPv6 addresses and subnets in CIDR notation. .. note:: Since API version 2.45, metadata can be added, removed and updated for share access rules in a form of key=value pairs. Metadata can help you identify and filter access rules. * List access. .. code-block:: console $ manila access-list myshare +--------------------------------------+-------------+-------------+--------------+--------+------------+----------------------------+------------+ | id | access_type | access_to | access_level | state | access_key | created_at | updated_at | +--------------------------------------+-------------+-------------+--------------+--------+------------+----------------------------+------------+ | e30bde96-9217-4f90-afdc-27c092af1c77 | ip | 10.0.0.0/24 | rw | active | None | 2020-08-07T05:27:27.000000 | None | +--------------------------------------+-------------+-------------+--------------+--------+------------+----------------------------+------------+ An access rule is created. Allow read-only access ~~~~~~~~~~~~~~~~~~~~~~ * Allow access. .. code-block:: console $ manila access-allow myshare ip fd31:7ee0:3de4:a41b::/64 --access-level ro +--------------+--------------------------------------+ | Property | Value | +--------------+--------------------------------------+ | id | 45b0a030-306a-4305-9e2a-36aeffb2d5b7 | | share_id | 83b0772b-00ad-4e45-8fad-106b9d4f1719 | | access_level | ro | | access_to | fd31:7ee0:3de4:a41b::/64 | | access_type | ip | | state | queued_to_apply | | access_key | None | | created_at | 2020-08-07T05:28:35.000000 | | updated_at | None | | metadata | {} | +--------------+--------------------------------------+ * List access. .. code-block:: console $ manila access-list myshare +--------------------------------------+-------------+----------------------------+--------------+--------+------------+----------------------------+------------+ | id | access_type | access_to | access_level | state | access_key | created_at | updated_at | +--------------------------------------+-------------+----------------------------+--------------+--------+------------+----------------------------+------------+ | 45b0a030-306a-4305-9e2a-36aeffb2d5b7 | ip | fd31:7ee0:3de4:a41b::/64 | ro | active | None | 2020-08-07T05:28:35.000000 | None | | e30bde96-9217-4f90-afdc-27c092af1c77 | ip | 10.0.0.0/24 | rw | active | None | 2020-08-07T05:27:27.000000 | None | +--------------------------------------+-------------+----------------------------+--------------+--------+------------+----------------------------+------------+ Another access rule is created. .. note:: In case one or more access rules had its visibility locked, you might not be able to see the content of the fields containing sensitive information (``access_to`` and ``access_key``). Update access rules metadata ---------------------------- #. Add a new metadata. .. code-block:: console $ manila access-metadata 0c8470ca-0d77-490c-9e71-29e1f453bf97 set key2=value2 $ manila access-show 0c8470ca-0d77-490c-9e71-29e1f453bf97 +--------------+--------------------------------------+ | Property | Value | +--------------+--------------------------------------+ | id | 0c8470ca-0d77-490c-9e71-29e1f453bf97 | | share_id | 8d8b854b-ec32-43f1-acc0-1b2efa7c3400 | | access_level | rw | | access_to | 10.0.0.0/24 | | access_type | ip | | state | active | | access_key | None | | created_at | 2016-03-24T14:51:36.000000 | | updated_at | None | | metadata | {'key1': 'value1', 'key2': 'value2'} | +--------------+--------------------------------------+ #. Remove a metadata key value. .. code-block:: console $ manila access-metadata 0c8470ca-0d77-490c-9e71-29e1f453bf97 unset key $ manila access-show 0c8470ca-0d77-490c-9e71-29e1f453bf97 +--------------+--------------------------------------+ | Property | Value | +--------------+--------------------------------------+ | id | 0c8470ca-0d77-490c-9e71-29e1f453bf97 | | share_id | 8d8b854b-ec32-43f1-acc0-1b2efa7c3400 | | access_level | rw | | access_to | 10.0.0.0/24 | | access_type | ip | | state | active | | access_key | None | | created_at | 2016-03-24T14:51:36.000000 | | updated_at | None | | metadata | {'key2': 'value2'} | +--------------+--------------------------------------+ Deny access ----------- * Deny access. .. code-block:: console $ manila access-deny myshare 45b0a030-306a-4305-9e2a-36aeffb2d5b7 $ manila access-deny myshare e30bde96-9217-4f90-afdc-27c092af1c77 .. note:: Starting from the 2023.2 (Bobcat) release, it is possible to prevent the deletion of an access rule. In case you have placed a deletion lock during the access rule creation, the ``--unrestrict`` argument from the Manila's OpenStack Client must be used in the request to revoke the access. * List access. .. code-block:: console $ manila access-list myshare +----+-------------+-----------+--------------+-------+------------+------------+------------+ | id | access_type | access_to | access_level | state | access_key | created_at | updated_at | +----+-------------+-----------+--------------+-------+------------+------------+------------+ +----+-------------+-----------+--------------+-------+------------+------------+------------+ The access rules are removed. Create snapshot --------------- * Create a snapshot. .. note:: To create a snapshot, the share type of the share must contain the capability extra-spec ``snapshot_support=True``. .. code-block:: console $ openstack share snapshot create --name mysnap --description "My Manila snapshot" myshare +-------------------+--------------------------------------+ | Field | Value | +-------------------+--------------------------------------+ | id | 286edbe1-a69e-40e7-ad50-61287570df55 | | share_id | bf7ffbb7-73a5-44fe-a93e-73cbd5a9197d | | share_size | 1 | | created_at | 2025-03-08T00:06:32.123637 | | status | creating | | name | mysnap | | description | My Manila snapshot | | size | 1 | | share_proto | NFS | | provider_location | None | | user_id | 64e1409650ee4e94a8e78df24da86091 | | project_id | dd43995fee324b24b79adab2542d74e9 | | metadata | {} | +-------------------+--------------------------------------+ * List snapshots. .. code-block:: console $ openstack share snapshot list +--------------------------------------+--------+ | ID | Name | +--------------------------------------+--------+ | 7861eed0-8634-41e0-a57e-a1d87ad48a1b | mysnap | +--------------------------------------+--------+ Mount a snapshot ---------------- * Allow access to the snapshot. .. note:: To mount a snapshot, the share type of the parent share must contain the capability extra-spec ``mount_snapshot_support=True``. .. code-block:: console $ openstack share snapshot access create mysnap ip 192.168.1.0/24 +-------------+--------------------------------------+ | Field | Value | +-------------+--------------------------------------+ | id | 89e36a97-19d8-430c-b920-6d930ea27464 | | access_type | ip | | access_to | 192.168.1.0/24 | | state | queued_to_apply | +-------------+--------------------------------------+ * List snapshot access. .. code-block:: console $ openstack share snapshot access list mysnap +--------------------------------------+-------------+----------------+--------+ | ID | Access Type | Access To | State | +--------------------------------------+-------------+----------------+--------+ | 89e36a97-19d8-430c-b920-6d930ea27464 | ip | 192.168.1.0/24 | active | +--------------------------------------+-------------+----------------+--------+ Then proceed to mounting the snapshot on the clients whose access was created. * Delete snapshot access rule. .. code-block:: console $ openstack share snapshot access delete mysnap 89e36a97-19d8-430c-b920-6d930ea27464 Create share from snapshot -------------------------- * Create a share from a snapshot. .. note:: To create a share from a snapshot, the share type of the parent share must contain the capability extra-spec ``create_share_from_snapshot_support=True``. .. code-block:: console $ manila create NFS 1 \ --snapshot-id 8a18aa77-7500-4e56-be8f-6081146f47f1 \ --share-network mysharenetwork \ --name mysharefromsnap +---------------------------------------+--------------------------------------+ | Property | Value | +---------------------------------------+--------------------------------------+ | id | 2a9336ea-3afc-4443-80bb-398f4bdb3a93 | | size | 1 | | availability_zone | nova | | created_at | 2020-08-07T05:34:12.000000 | | status | creating | | name | mysharefromsnap | | description | None | | project_id | d9932a60d9ee4087b6cff9ce6e9b4e3b | | snapshot_id | 8a18aa77-7500-4e56-be8f-6081146f47f1 | | share_network_id | c4bfdd5e-7502-4a65-8876-0ce8b9914a64 | | share_proto | NFS | | metadata | {} | | share_type | af7b64ec-cdb3-4a5f-93c9-51672d72e172 | | is_public | False | | snapshot_support | True | | task_state | None | | share_type_name | dhss_true | | access_rules_status | active | | replication_type | None | | has_replicas | False | | user_id | 2cebd96a794f431caa06ce5215e0da21 | | create_share_from_snapshot_support | True | | revert_to_snapshot_support | True | | share_group_id | None | | source_share_group_snapshot_member_id | None | | mount_snapshot_support | True | | progress | None | +---------------------------------------+--------------------------------------+ * List shares. .. code-block:: console $ openstack share list +--------------------------------------+---------+------+-------------+-----------+-----------+-----------------+-------------------------------------------+-------------------+ | ID | Name | Size | Share Proto | Status | Is Public | Share Type Name | Host | Availability Zone | +--------------------------------------+---------+------+-------------+-----------+-----------+-----------------+-------------------------------------------+-------------------+ | bf7ffbb7-73a5-44fe-a93e-73cbd5a9197d | myshare | 1 | NFS | available | False | default | oid-na-scale-1@bogota#fake_pool_for_DELTA | manila-zone-3 | +--------------------------------------+---------+------+-------------+-----------+-----------+-----------------+-------------------------------------------+-------------------+ * Show the share created from snapshot. .. code-block:: console $ manila show mysharefromsnap +---------------------------------------+----------------------------------------------------------------------------------------------------------------------+ | Property | Value | +---------------------------------------+----------------------------------------------------------------------------------------------------------------------+ | id | 2a9336ea-3afc-4443-80bb-398f4bdb3a93 | | size | 1 | | availability_zone | nova | | created_at | 2020-08-07T05:34:12.000000 | | status | available | | name | mysharefromsnap | | description | None | | project_id | d9932a60d9ee4087b6cff9ce6e9b4e3b | | snapshot_id | 8a18aa77-7500-4e56-be8f-6081146f47f1 | | share_network_id | c4bfdd5e-7502-4a65-8876-0ce8b9914a64 | | share_proto | NFS | | metadata | {} | | share_type | af7b64ec-cdb3-4a5f-93c9-51672d72e172 | | is_public | False | | snapshot_support | True | | task_state | None | | share_type_name | dhss_true | | access_rules_status | active | | replication_type | None | | has_replicas | False | | user_id | 2cebd96a794f431caa06ce5215e0da21 | | create_share_from_snapshot_support | True | | revert_to_snapshot_support | True | | share_group_id | None | | source_share_group_snapshot_member_id | None | | mount_snapshot_support | True | | progress | 100% | | export_locations | | | | id = 7928b361-cada-4505-a62e-4cefb1cf6fc5 | | | path = 10.0.0.11:/path/to/fake/share/share_2a9336ea_3afc_4443_80bb_398f4bdb3a93_97de2abe_d114_49a9_9d01_ce5e71337e48 | | | preferred = True | | | id = e48d19ba-dee5-4492-b156-5181530955be | | | path = 10.0.0.10:/path/to/fake/share/share_2a9336ea_3afc_4443_80bb_398f4bdb3a93_97de2abe_d114_49a9_9d01_ce5e71337e48 | | | preferred = False | +---------------------------------------+----------------------------------------------------------------------------------------------------------------------+ Delete share ------------ * Delete a share. .. code-block:: console $ manila delete mysharefromsnap * List shares. .. code-block:: console $ manila list +--------------------------------------+-----------------+------+-------------+-----------+-----------+-----------------+-----------------------------+-------------------+ | ID | Name | Size | Share Proto | Status | Is Public | Share Type Name | Host | Availability Zone | +--------------------------------------+-----------------+------+-------------+-----------+-----------+-----------------+-----------------------------+-------------------+ | 83b0772b-00ad-4e45-8fad-106b9d4f1719 | myshare | 1 | NFS | available | False | default | nosb-devstack@london#LONDON | nova | | 2a9336ea-3afc-4443-80bb-398f4bdb3a93 | mysharefromsnap | 1 | NFS | deleting | False | default | nosb-devstack@london#LONDON | nova | +--------------------------------------+-----------------+------+-------------+-----------+-----------+-----------------+-----------------------------+-------------------+ The share is being deleted. Delete snapshot --------------- * Delete a snapshot. .. code-block:: console $ manila snapshot-delete mysnapshot * List snapshots after deleting. .. code-block:: console $ manila snapshot-list +----+----------+--------+------+------------+ | ID | Share ID | Status | Name | Share Size | +----+----------+--------+------+------------+ +----+----------+--------+------+------------+ The snapshot is deleted. Extend share ------------ * Extend share. .. code-block:: console $ manila extend myshare 2 * Show the share while it is being extended. .. code-block:: console $ manila show myshare +---------------------------------------+----------------------------------------------------------------------------------------------------------------------+ | Property | Value | +---------------------------------------+----------------------------------------------------------------------------------------------------------------------+ | id | 83b0772b-00ad-4e45-8fad-106b9d4f1719 | | size | 1 | | availability_zone | nova | | created_at | 2020-08-07T05:24:14.000000 | | status | extending | | name | myshare | | description | My Manila share | | project_id | d9932a60d9ee4087b6cff9ce6e9b4e3b | | snapshot_id | None | | share_network_id | c4bfdd5e-7502-4a65-8876-0ce8b9914a64 | | share_proto | NFS | | metadata | {} | | share_type | af7b64ec-cdb3-4a5f-93c9-51672d72e172 | | is_public | False | | snapshot_support | True | | task_state | None | | share_type_name | dhss_true | | access_rules_status | active | | replication_type | None | | has_replicas | False | | user_id | 2cebd96a794f431caa06ce5215e0da21 | | create_share_from_snapshot_support | True | | revert_to_snapshot_support | True | | share_group_id | None | | source_share_group_snapshot_member_id | None | | mount_snapshot_support | True | | progress | 100% | | export_locations | | | | id = 908e5a28-c5ea-4627-b17c-1cfeb894ccd1 | | | path = 10.0.0.11:/path/to/fake/share/share_83b0772b_00ad_4e45_8fad_106b9d4f1719_da404d59_4280_4b32_847f_6cfa4f730bbd | | | preferred = True | | | id = 395244a1-8aa9-44af-9fda-f7d6036ce2b9 | | | path = 10.0.0.10:/path/to/fake/share/share_83b0772b_00ad_4e45_8fad_106b9d4f1719_da404d59_4280_4b32_847f_6cfa4f730bbd | | | preferred = False | +---------------------------------------+----------------------------------------------------------------------------------------------------------------------+ * Show the share after it is extended. .. code-block:: console $ manila show myshare +---------------------------------------+----------------------------------------------------------------------------------------------------------------------+ | Property | Value | +---------------------------------------+----------------------------------------------------------------------------------------------------------------------+ | id | 83b0772b-00ad-4e45-8fad-106b9d4f1719 | | size | 2 | | availability_zone | nova | | created_at | 2020-08-07T05:24:14.000000 | | status | available | | name | myshare | | description | My Manila share | | project_id | d9932a60d9ee4087b6cff9ce6e9b4e3b | | snapshot_id | None | | share_network_id | c4bfdd5e-7502-4a65-8876-0ce8b9914a64 | | share_proto | NFS | | metadata | {} | | share_type | af7b64ec-cdb3-4a5f-93c9-51672d72e172 | | is_public | False | | snapshot_support | True | | task_state | None | | share_type_name | dhss_true | | access_rules_status | active | | replication_type | None | | has_replicas | False | | user_id | 2cebd96a794f431caa06ce5215e0da21 | | create_share_from_snapshot_support | True | | revert_to_snapshot_support | True | | share_group_id | None | | source_share_group_snapshot_member_id | None | | mount_snapshot_support | True | | progress | 100% | | export_locations | | | | id = 908e5a28-c5ea-4627-b17c-1cfeb894ccd1 | | | path = 10.0.0.11:/path/to/fake/share/share_83b0772b_00ad_4e45_8fad_106b9d4f1719_da404d59_4280_4b32_847f_6cfa4f730bbd | | | preferred = True | | | id = 395244a1-8aa9-44af-9fda-f7d6036ce2b9 | | | path = 10.0.0.10:/path/to/fake/share/share_83b0772b_00ad_4e45_8fad_106b9d4f1719_da404d59_4280_4b32_847f_6cfa4f730bbd | | | preferred = False | +---------------------------------------+----------------------------------------------------------------------------------------------------------------------+ Shrink share ------------ * Shrink a share. .. code-block:: console $ manila shrink myshare 1 * Show the share while it is being shrunk. .. code-block:: console $ manila show myshare +---------------------------------------+----------------------------------------------------------------------------------------------------------------------+ | Property | Value | +---------------------------------------+----------------------------------------------------------------------------------------------------------------------+ | id | 83b0772b-00ad-4e45-8fad-106b9d4f1719 | | size | 2 | | availability_zone | nova | | created_at | 2020-08-07T05:24:14.000000 | | status | shrinking | | name | myshare | | description | My Manila share | | project_id | d9932a60d9ee4087b6cff9ce6e9b4e3b | | snapshot_id | None | | share_network_id | c4bfdd5e-7502-4a65-8876-0ce8b9914a64 | | share_proto | NFS | | metadata | {} | | share_type | af7b64ec-cdb3-4a5f-93c9-51672d72e172 | | is_public | False | | snapshot_support | True | | task_state | None | | share_type_name | dhss_true | | access_rules_status | active | | replication_type | None | | has_replicas | False | | user_id | 2cebd96a794f431caa06ce5215e0da21 | | create_share_from_snapshot_support | True | | revert_to_snapshot_support | True | | share_group_id | None | | source_share_group_snapshot_member_id | None | | mount_snapshot_support | True | | progress | 100% | | export_locations | | | | id = 908e5a28-c5ea-4627-b17c-1cfeb894ccd1 | | | path = 10.0.0.11:/path/to/fake/share/share_83b0772b_00ad_4e45_8fad_106b9d4f1719_da404d59_4280_4b32_847f_6cfa4f730bbd | | | preferred = True | | | id = 395244a1-8aa9-44af-9fda-f7d6036ce2b9 | | | path = 10.0.0.10:/path/to/fake/share/share_83b0772b_00ad_4e45_8fad_106b9d4f1719_da404d59_4280_4b32_847f_6cfa4f730bbd | | | preferred = False | +---------------------------------------+----------------------------------------------------------------------------------------------------------------------+ * Show the share after it is being shrunk. .. code-block:: console $ manila show myshare +---------------------------------------+----------------------------------------------------------------------------------------------------------------------+ | Property | Value | +---------------------------------------+----------------------------------------------------------------------------------------------------------------------+ | id | 83b0772b-00ad-4e45-8fad-106b9d4f1719 | | size | 1 | | availability_zone | nova | | created_at | 2020-08-07T05:24:14.000000 | | status | available | | name | myshare | | description | My Manila share | | project_id | d9932a60d9ee4087b6cff9ce6e9b4e3b | | snapshot_id | None | | share_network_id | c4bfdd5e-7502-4a65-8876-0ce8b9914a64 | | share_proto | NFS | | metadata | {} | | share_type | af7b64ec-cdb3-4a5f-93c9-51672d72e172 | | is_public | False | | snapshot_support | True | | task_state | None | | share_type_name | dhss_true | | access_rules_status | active | | replication_type | None | | has_replicas | False | | user_id | 2cebd96a794f431caa06ce5215e0da21 | | create_share_from_snapshot_support | True | | revert_to_snapshot_support | True | | share_group_id | None | | source_share_group_snapshot_member_id | None | | mount_snapshot_support | True | | progress | 100% | | export_locations | | | | id = 908e5a28-c5ea-4627-b17c-1cfeb894ccd1 | | | path = 10.0.0.11:/path/to/fake/share/share_83b0772b_00ad_4e45_8fad_106b9d4f1719_da404d59_4280_4b32_847f_6cfa4f730bbd | | | preferred = True | | | id = 395244a1-8aa9-44af-9fda-f7d6036ce2b9 | | | path = 10.0.0.10:/path/to/fake/share/share_83b0772b_00ad_4e45_8fad_106b9d4f1719_da404d59_4280_4b32_847f_6cfa4f730bbd | | | preferred = False | +---------------------------------------+----------------------------------------------------------------------------------------------------------------------+ Share metadata -------------- * Set metadata items on your share .. code-block:: console $ manila metadata myshare set purpose='storing financial data for analysis' year_started=2020 * Show share metadata .. code-block:: console $ manila metadata-show myshare +--------------+-------------------------------------+ | Property | Value | +--------------+-------------------------------------+ | purpose | storing financial data for analysis | | year_started | 2020 | +--------------+-------------------------------------+ * Query share list with metadata .. code-block:: console $ manila list --metadata year_started=2020 +--------------------------------------+---------+------+-------------+-----------+-----------+-----------------+------+-------------------+ | ID | Name | Size | Share Proto | Status | Is Public | Share Type Name | Host | Availability Zone | +--------------------------------------+---------+------+-------------+-----------+-----------+-----------------+------+-------------------+ | 83b0772b-00ad-4e45-8fad-106b9d4f1719 | myshare | 1 | NFS | available | False | dhss_true | | nova | +--------------------------------------+---------+------+-------------+-----------+-----------+-----------------+------+-------------------+ * Unset share metadata .. code-block:: console $ manila metadata myshare unset year_started Share revert to snapshot ------------------------ * Share revert to snapshot .. note:: - To revert a share to its snapshot, the share type of the share must contain the capability extra-spec ``revert_to_snapshot_support=True``. - The revert operation can only be performed to the most recent available snapshot of the share known to manila. If revert to an earlier snapshot is desired, later snapshots must explicitly be deleted. .. code-block:: console $ manila revert-to-snapshot mysnapshot Share Transfer -------------- * Transfer a share to a different project .. note:: - Share transfer is available for ``driver_handles_share_servers=False``, only supports transferring shares that are not created with a share network. - Shares that are in transitional states, or possessing replicas, or within share groups cannot be transferred. .. code-block:: console $ manila share-transfer-create myshare --name mytransfer +------------------------+--------------------------------------+ | Property | Value | +------------------------+--------------------------------------+ | id | 1c56314e-7e97-455a-bbde-83828db038d4 | | created_at | 2023-05-25T14:37:11.178869 | | name | mytransfer | | resource_type | share | | resource_id | 5573c214-ef79-4fb7-83f8-8c01fbe847f7 | | source_project_id | 88b1f2cf8f554edaa8dd92892d1eabf7 | | destination_project_id | None | | accepted | False | | expires_at | 2023-05-25T14:42:11.176049 | | auth_key | af429e22e0abc31d | +------------------------+--------------------------------------+ * Accept share transfer .. note:: - Accept share transfer is performed by a user in a different project. .. code-block:: console $ manila share-transfer-accept 1c56314e-7e97-455a-bbde-83828db038d4 af429e22e0abc31d * Delete a transfer .. code-block:: console $ manila share-transfer-delete 1c56314e-7e97-455a-bbde-83828db038d4 * List transfers .. code-block:: console $ manila share-transfer-list +--------------------------------------+------------+---------------+--------------------------------------+ | ID | Name | Resource Type | Resource Id | +--------------------------------------+------------+---------------+--------------------------------------+ | 1c56314e-7e97-455a-bbde-83828db038d4 | mytransfer | share | 5573c214-ef79-4fb7-83f8-8c01fbe847f7 | +--------------------------------------+------------+---------------+--------------------------------------+ * Show a share transfer .. code-block:: console $ manila share-transfer-show 1c56314e-7e97-455a-bbde-83828db038d4 +------------------------+--------------------------------------+ | Property | Value | +------------------------+--------------------------------------+ | id | 1c56314e-7e97-455a-bbde-83828db038d4 | | created_at | 2023-05-25T14:37:11.178869 | | name | mytransfer | | resource_type | share | | resource_id | 5573c214-ef79-4fb7-83f8-8c01fbe847f7 | | source_project_id | 88b1f2cf8f554edaa8dd92892d1eabf7 | | destination_project_id | None | | accepted | False | | expires_at | 2023-05-25T14:42:11.176049 | +------------------------+--------------------------------------+ Snapshot metadata ----------------- * Set metadata items on your share snapshot during creation .. code-block:: console $ openstack share snapshot create myshare --name mysnapshot \ --property key1=value1 --property key2=value2 +-------------------+--------------------------------------+ | Field | Value | +-------------------+--------------------------------------+ | created_at | 2024-03-25T15:39:52.555692 | | description | None | | id | 00a82c82-cb49-414b-a334-c1a1e9b360d5 | | metadata | {'key1': 'value1', 'key2': 'value2'} | | name | mysnapshot | | project_id | df63c20d921f48d8802083fdb858fd3e | | provider_location | None | | share_id | 6c4d785b-9034-400b-95de-3d4f06280b31 | | share_proto | NFS | | share_size | 1 | | size | 1 | | status | creating | | user_id | b3369f53dadd40499d797a9a4ee9326b | +-------------------+--------------------------------------+ * Set metadata items on your share snapshot .. code-block:: console $ openstack share snapshot set mysnapshot --property key1=value * Query snapshot list with metadata .. code-block:: console $ openstack share snapshot list --property key1=value1 +--------------------------------------+------------+ | ID | Name | +--------------------------------------+------------+ | 83b0772b-00ad-4e45-8fad-106b9d4f1719 | mysnapshot | +--------------------------------------+------------+ * Unset snapshot metadata .. code-block:: console $ openstack share snapshot unset mysnapshot --property key1 Resource locks -------------- * Prevent a share from being deleted by creating a ``resource lock``: .. code-block:: console $ openstack share lock create myshare share +-----------------+--------------------------------------+ | Field | Value | +-----------------+--------------------------------------+ | created_at | 2023-07-18T05:11:56.626667 | | id | dc7ec691-a505-47d0-b2ec-8eb7fb9270e4 | | lock_context | user | | lock_reason | None | | project_id | db2e72fef7864bbbbf210f22da7f1158 | | resource_action | delete | | resource_id | 4c0b4d35-4ea8-4811-a1e2-a065c64225a8 | | resource_type | share | | updated_at | None | | user_id | 89de351d3b5744b9853ec4829aa0e714 | +-----------------+--------------------------------------+ .. note:: A ``delete`` (deletion) lock on a share would prevent deletion and other actions on a share that are similar to deletion. Similar actions include moving a share to the recycle bin for deferred deletion (``soft deletion``) or removing a share from the Shared File Systems service (``unmanage``). * Get details of a resource lock: .. code-block:: console $ openstack share lock list --resource myshare --resource-type share +--------------------------------------+--------------------------------------+---------------+-----------------+ | ID | Resource Id | Resource Type | Resource Action | +--------------------------------------+--------------------------------------+---------------+-----------------+ | dc7ec691-a505-47d0-b2ec-8eb7fb9270e4 | 4c0b4d35-4ea8-4811-a1e2-a065c64225a8 | share | delete | +--------------------------------------+--------------------------------------+---------------+-----------------+ $ openstack share lock show dc7ec691-a505-47d0-b2ec-8eb7fb9270e4 +-----------------+--------------------------------------+ | Field | Value | +-----------------+--------------------------------------+ | ID | dc7ec691-a505-47d0-b2ec-8eb7fb9270e4 | | Resource Id | 4c0b4d35-4ea8-4811-a1e2-a065c64225a8 | | Resource Type | share | | Resource Action | delete | | Lock Context | user | | User Id | 89de351d3b5744b9853ec4829aa0e714 | | Project Id | db2e72fef7864bbbbf210f22da7f1158 | | Created At | 2023-07-18T05:11:56.626667 | | Updated At | None | | Lock Reason | None | +-----------------+--------------------------------------+ * Resource lock in action: .. code-block:: console $ openstack share delete myshare Failed to delete share with name or ID 'myshare': Resource lock/s [dc7ec691-a505-47d0-b2ec-8eb7fb9270e4] prevent delete action. (HTTP 403) (Request-ID: req-331a8e31-e02a-40b2-accf-0f6dae1b6178) 1 of 1 shares failed to delete. * Delete a resource lock: .. code-block:: console $ openstack share lock delete dc7ec691-a505-47d0-b2ec-8eb7fb9270e4 Share backups ------------- * Create backup .. code-block:: console $ openstack share backup create --name test5 --backup-options backup_type=eng_data_backup source_share +-------------------+--------------------------------------+ | Field | Value | +-------------------+--------------------------------------+ | availability_zone | manila-zone-0 | | backup_type | backup_type1 | | created_at | 2024-03-11T18:15:32.183982 | | description | None | | host | vm.openstack.opendev.com@nas_storage | | id | 4b468327-d03f-4df7-97ef-c5230b5beafc | | name | test5 | | progress | 0 | | restore_progress | 0 | | share_id | 983c6dd5-ef93-4c73-9359-ef02fe3bbce7 | | size | 1 | | status | creating | | topic | None | | updated_at | None | +-------------------+--------------------------------------+ * List backups .. code-block:: console $ openstack share backup list +--------------------------------------+-------+--------------------------------------+-----------+ | ID | Name | Share ID | Status | +--------------------------------------+-------+--------------------------------------+-----------+ | 4b468327-d03f-4df7-97ef-c5230b5beafc | test5 | 983c6dd5-ef93-4c73-9359-ef02fe3bbce7 | creating | | 8a9b3ce0-23bb-4923-b8ce-d0dd1f56b2b8 | test4 | 983c6dd5-ef93-4c73-9359-ef02fe3bbce7 | available | +--------------------------------------+-------+--------------------------------------+-----------+ $ openstack share backup show test5 +-------------------+------------------------------------------------+ | Field | Value | +-------------------+------------------------------------------------+ | availability_zone | manila-zone-0 | | backup_type | backup_type1 | | created_at | 2024-03-11T18:15:32.000000 | | description | None | | host | scs000215254-1.nb.openenglab.netapp.com@ontap1 | | id | 4b468327-d03f-4df7-97ef-c5230b5beafc | | name | test5 | | progress | 0 | | restore_progress | 0 | | share_id | 983c6dd5-ef93-4c73-9359-ef02fe3bbce7 | | size | 1 | | status | creating | | topic | manila-share | | updated_at | 2024-03-11T18:15:32.000000 | +-------------------+------------------------------------------------+ $ openstack share backup list +--------------------------------------+-------+--------------------------------------+-----------+ | ID | Name | Share ID | Status | +--------------------------------------+-------+--------------------------------------+-----------+ | 4b468327-d03f-4df7-97ef-c5230b5beafc | test5 | 983c6dd5-ef93-4c73-9359-ef02fe3bbce7 | available | | 8a9b3ce0-23bb-4923-b8ce-d0dd1f56b2b8 | test4 | 983c6dd5-ef93-4c73-9359-ef02fe3bbce7 | available | +--------------------------------------+-------+--------------------------------------+-----------+ * Restore backup .. code-block:: console $ openstack share backup restore test4 $ openstack share backup list +--------------------------------------+-------+--------------------------------------+-----------+ | ID | Name | Share ID | Status | +--------------------------------------+-------+--------------------------------------+-----------+ | 4b468327-d03f-4df7-97ef-c5230b5beafc | test5 | 983c6dd5-ef93-4c73-9359-ef02fe3bbce7 | available | | 8a9b3ce0-23bb-4923-b8ce-d0dd1f56b2b8 | test4 | 983c6dd5-ef93-4c73-9359-ef02fe3bbce7 | restoring | +--------------------------------------+-------+--------------------------------------+-----------+ * Delete backup .. code-block:: console $ openstack share backup delete test5 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/user/index.rst0000664000175000017500000000151600000000000017745 0ustar00zuulzuul00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. User ---- .. toctree:: :maxdepth: 1 Manila API v2 Reference create-and-manage-shares share-network-operations share-network-subnet-operations troubleshooting-asynchronous-failures ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/user/share-network-operations.rst0000664000175000017500000011515700000000000023617 0ustar00zuulzuul00000000000000.. _share_network: ================================ Create and manage share networks ================================ .. contents:: :local: A share network stores network information to create and manage shares. A share network provides a way to designate a network to export shares upon. In the most common use case, you can create a share network with a private OpenStack (neutron) network that you own. If the share network is an isolated network, manila can provide hard guarantees of network and data isolation for your shared file systems in a multi-tenant cloud. In some clouds, however, shares cannot be exported directly upon private project networks; and the cloud may have provider networks that are designated for use with share networks. In either case, as long as the underlying network is connected to the clients (virtual machines, containers or bare metals), there will exist a direct path to communicate with shares exported on the share networks. .. important:: In order to use share networks, the share type you choose must have the extra specification ``driver_handles_share_servers`` set to True. Create share networks ~~~~~~~~~~~~~~~~~~~~~ #. Create a share network. .. code-block:: console $ manila share-network-create \ --name sharenetwork1 \ --description "Share Network created for demo purposes" \ --neutron-net-id c297b020-025a-4f3e-8120-57ea90404afb \ --neutron-subnet-id 29ecfbd5-a9be-467e-8b4a-3415d1f82888 +-------------------+-----------------------------------------+ | Property | Value | +-------------------+-----------------------------------------+ | name | sharenetwork1 | | segmentation_id | None | | created_at | 2019-07-02T11:14:06.228816 | | neutron_subnet_id | 29ecfbd5-a9be-467e-8b4a-3415d1f82888 | | updated_at | None | | network_type | None | | neutron_net_id | c297b020-025a-4f3e-8120-57ea90404afb | | ip_version | None | | cidr | None | | project_id | 907004508ef4447397ce6741a8f037c1 | | id | feed6a6c-f9e0-45ba-9a2b-0db76bde63e1 | | description | Share Network created for demo purposes | +-------------------+-----------------------------------------+ #. Show the created share network. .. code-block:: console $ manila share-network-show sharenetwork1 +-------------------+--------------------------------------+ | Property | Value | +-------------------+--------------------------------------+ | id | feed6a6c-f9e0-45ba-9a2b-0db76bde63e1 | | name | sharenetwork1 | | project_id | 5b23075b4b504261a5987b18588f86cf | | created_at | 2019-10-09T04:19:31.000000 | | updated_at | None | | neutron_net_id | c297b020-025a-4f3e-8120-57ea90404afb | | neutron_subnet_id | 29ecfbd5-a9be-467e-8b4a-3415d1f82888 | | network_type | None | | segmentation_id | None | | cidr | None | | ip_version | None | | description | None | | gateway | None | | mtu | None | +-------------------+--------------------------------------+ .. note:: Since API version 2.51, a share network is able to span multiple subnets in different availability zones and the network information will be stored on each subnet. To accommodate adding multiple subnets, the share network create command was updated to accept an availability zone as parameter. This parameter will be used in the share network creation process which also creates a new subnet. If you do not specify an availability zone, the created subnet will be considered default by the Shared File Systems service. A default subnet is expected to be available in all availability zones of the cloud. So when you are creating a share network, the output will be similar to: .. code-block:: console $ manila share-network-create \ --name sharenetwork1 \ --description "Share Network created for demo purposes" \ --availability-zone manila-zone-0 +-----------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | Property | Value | +-----------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | id | feed6a6c-f9e0-45ba-9a2b-0db76bde63e1 | | name | sharenetwork1 | | project_id | 8c2962a4832743469a336f7c179f7d34 | | created_at | 2019-10-09T04:19:31.000000 | | updated_at | None | | description | Share Network created for demo purposes | | share_network_subnets | [{'id': '900d9ddc-7062-404e-8ef5-f63b84782d89', 'availability_zone': 'manila-zone-0', 'created_at': '2019-10-09T04:19:31.000000', 'updated_at': None, 'segmentation_id': None, 'neutron_subnet_id': None, 'neutron_net_id': None, 'ip_version': None, 'cidr': None, 'network_type': None, 'mtu': None, 'gateway': None}] | +-----------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ List share networks ~~~~~~~~~~~~~~~~~~~ #. List share networks. .. code-block:: console $ manila share-network-list +--------------------------------------+---------------+ | id | name | +--------------------------------------+---------------+ | feed6a6c-f9e0-45ba-9a2b-0db76bde63e1 | sharenetwork1 | +--------------------------------------+---------------+ Update share networks ~~~~~~~~~~~~~~~~~~~~~ #. Update the share network data. .. code-block:: console $ manila share-network-update sharenetwork1 \ --neutron-net-id a27160ca-5595-4c62-bf54-a04fb7b14316 \ --neutron-subnet-id f043f4b0-c05e-493f-bbe9-99689e2187d2 +-------------------+--------------------------------------+ | Property | Value | +-------------------+--------------------------------------+ | id | feed6a6c-f9e0-45ba-9a2b-0db76bde63e1 | | name | sharenetwork1 | | project_id | 5b23075b4b504261a5987b18588f86cf | | created_at | 2019-10-09T04:19:31.000000 | | updated_at | 2019-10-10T17:14:08.970945 | | neutron_net_id | a27160ca-5595-4c62-bf54-a04fb7b14316 | | neutron_subnet_id | f043f4b0-c05e-493f-bbe9-99689e2187d2 | | network_type | None | | segmentation_id | None | | cidr | None | | ip_version | None | | description | None | | gateway | None | | mtu | None | +-------------------+--------------------------------------+ #. Show details of the updated share network. .. code-block:: console $ manila share-network-show sharenetwork1 +-------------------+--------------------------------------+ | Property | Value | +-------------------+--------------------------------------+ | id | feed6a6c-f9e0-45ba-9a2b-0db76bde63e1 | | name | sharenetwork1 | | project_id | 5b23075b4b504261a5987b18588f86cf | | created_at | 2019-10-09T04:19:31.000000 | | updated_at | 2019-10-10T17:14:09.000000 | | neutron_net_id | a27160ca-5595-4c62-bf54-a04fb7b14316 | | neutron_subnet_id | f043f4b0-c05e-493f-bbe9-99689e2187d2 | | network_type | None | | segmentation_id | None | | cidr | None | | ip_version | None | | description | None | | gateway | None | | mtu | None | +-------------------+--------------------------------------+ .. note:: You cannot update the ``neutron_net_id`` and ``neutron_subnet_id`` of a share network that has shares exported onto it. .. note:: From API version 2.51, updating the ``neutron_net_id`` and ``neutron_subnet_id`` is possible only for a default subnet. Non default subnets cannot be updated after they are created. You may delete the subnet in question, and re-create it. The output will look as shown below: .. code-block:: console $ manila share-network-update sharenetwork1 \ --neutron-net-id a27160ca-5595-4c62-bf54-a04fb7b14316 \ --neutron-subnet-id f043f4b0-c05e-493f-bbe9-99689e2187d2 +-----------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | Property | Value | +-----------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | id | feed6a6c-f9e0-45ba-9a2b-0db76bde63e1 | | name | sharenetwork1 | | project_id | 8c2962a4832743469a336f7c179f7d34 | | created_at | 2019-10-09T04:19:31.000000 | | updated_at | 2019-10-10T17:14:09.000000 | | description | Share Network created for demo purposes | | share_network_subnets | [{'id': '900d9ddc-7062-404e-8ef5-f63b84782d89', 'availability_zone': None, 'created_at': '2019-10-09T04:19:31.000000', 'updated_at': '2019-10-09T07:39:59.000000', 'segmentation_id': None, 'neutron_net_id': 'a27160ca-5595-4c62-bf54-a04fb7b14316', 'neutron_subnet_id': 'f043f4b0-c05e-493f-bbe9-99689e2187d2', 'ip_version': None, 'cidr': None, 'network_type': None, 'mtu': None, 'gateway': None}] | +-----------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ Share network show ~~~~~~~~~~~~~~~~~~ #. Show details of a share network. .. code-block:: console $ manila share-network-show sharenetwork1 +-------------------+--------------------------------------+ | Property | Value | +-------------------+--------------------------------------+ | id | feed6a6c-f9e0-45ba-9a2b-0db76bde63e1 | | name | sharenetwork1 | | project_id | 5b23075b4b504261a5987b18588f86cf | | created_at | 2019-10-09T04:19:31.000000 | | updated_at | 2019-10-10T17:14:09.000000 | | neutron_net_id | fake_updated_net_id | | neutron_subnet_id | fake_updated_subnet_id | | network_type | None | | segmentation_id | None | | cidr | None | | ip_version | None | | description | None | | gateway | None | | mtu | None | +-------------------+--------------------------------------+ .. note:: Since API version 2.51, the ``share-network-show`` command also shows a list of subnets contained in the share network as show below. .. code-block:: console +-----------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | Property | Value | +-----------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | id | feed6a6c-f9e0-45ba-9a2b-0db76bde63e1 | | name | sharenetwork1 | | project_id | 8c2962a4832743469a336f7c179f7d34 | | created_at | 2019-10-09T04:19:31.000000 | | updated_at | None | | description | Share Network created for demo purposes | | share_network_subnets | [{'id': '900d9ddc-7062-404e-8ef5-f63b84782d89', 'availability_zone': None, 'created_at': '2019-10-09T04:19:31.000000', 'updated_at': '2019-10-09T07:39:59.000000', 'segmentation_id': None, 'neutron_net_id': 'fake_updated_net_id', 'neutron_subnet_id': 'fake_updated_subnet_id', 'ip_version': None, 'cidr': None, 'network_type': None, 'mtu': None, 'gateway': None}] | +-----------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ .. note:: Since API version 2.63, the ``share-network-show`` command also shows the ``status`` and ``security_service_update_support`` fields. .. code-block:: console +---------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | Property | Value | +---------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | id | feed6a6c-f9e0-45ba-9a2b-0db76bde63e1 | | name | sharenetwork1 | | project_id | 8c2962a4832743469a336f7c179f7d34 | | created_at | 2019-10-09T04:19:31.000000 | | updated_at | None | | description | Share Network created for demo purposes | | status | active | | security_service_update_support | True | | share_network_subnets | [{'id': '900d9ddc-7062-404e-8ef5-f63b84782d89', 'availability_zone': None, 'created_at': '2019-10-09T04:19:31.000000', 'updated_at': '2019-10-09T07:39:59.000000', 'segmentation_id': None, 'neutron_net_id': 'fake_updated_net_id', 'neutron_subnet_id': 'fake_updated_subnet_id', 'ip_version': None, 'cidr': None, 'network_type': None, 'mtu': None, 'gateway': None}] | +---------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ Add security service/s ~~~~~~~~~~~~~~~~~~~~~~ #. Add a pre existent security service in a given share network. .. code-block:: console $ manila share-network-security-service-add \ sharenetwork1 \ my_sec_service $ manila share-network-security-service-list sharenetwork1 +--------------------------------------+----------------+--------+------+ | id | name | status | type | +--------------------------------------+----------------+--------+------+ | 50303c35-2c53-4d37-a0d9-61dfe3789569 | my_sec_service | new | ldap | +--------------------------------------+----------------+--------+------+ .. note:: Since API version 2.63, manila supports adding security services to share networks that already are in use, depending on the share network's support. The share network entity now contains a field called ``security_service_update_support`` which holds information whether all resources built within it can hold such operation. Before starting the operation to actually add the security service to a share network that is being used, a check operation must be triggered. See :ref:`subsection `. List share network security services ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #. List all the security services existent in a share network. .. code-block:: console $ manila share-network-security-service-list sharenetwork1 +--------------------------------------+----------------+--------+------+ | id | name | status | type | +--------------------------------------+----------------+--------+------+ | 50303c35-2c53-4d37-a0d9-61dfe3789569 | my_sec_service | new | ldap | +--------------------------------------+----------------+--------+------+ Remove a security service from a share network ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #. Remove a security service from a given share network. .. code-block:: console $ manila share-network-security-service-remove \ sharenetwork1 \ my_sec_service $ manila share-network-security-service-list sharenetwork1 +----+------+--------+------+ | id | name | status | type | +----+------+--------+------+ +----+------+--------+------+ Delete share networks ~~~~~~~~~~~~~~~~~~~~~ #. Delete a share network. .. code-block:: console $ manila share-network-delete sharenetwork1 #. List all share networks .. code-block:: console $ manila share-network-list +--------------------------------------+---------------+ | id | name | +--------------------------------------+---------------+ +--------------------------------------+---------------+ .. _share_network_security_service_update_check: Update share network security service check (Since API version 2.63) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #. Check if the update for security services of the same type can be performed: .. code-block:: console $ manila share-network-security-service-update-check \ sharenetwork1 \ my_sec_service \ my_sec_service_updated +---------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | Property | Value | +---------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | compatible | None | | requested_operation | {'operation': 'update_security_service', 'current_security_service': 50303c35-2c53-4d37-a0d9-61dfe3789569, 'new_security_service': '8971c5f6-52ec-4c53-bf6a-3fae38a9221e'} | +---------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ #. Check the result of the operation: .. code-block:: console $ manila share-network-security-service-update-check \ sharenetwork1 \ my_sec_service \ my_sec_service_updated +---------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | Property | Value | +---------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | compatible | True | | requested_operation | {'operation': 'update_security_service', 'current_security_service': 50303c35-2c53-4d37-a0d9-61dfe3789569, 'new_security_service': '8971c5f6-52ec-4c53-bf6a-3fae38a9221e'} | +---------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ Now, the request to update a share network security service should be accepted. Update share network security services (Since API version 2.63) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #. Replaces one security service for another of the same type. .. code-block:: console $ manila share-network-security-service-update \ sharenetwork1 \ my_sec_service \ my_sec_service_updated $ manila share-network-security-service-list sharenetwork1 +--------------------------------------+------------------------+--------+------+ | id | name | status | type | +--------------------------------------+------------------------+--------+------+ | 8971c5f6-52ec-4c53-bf6a-3fae38a9221e | my_sec_service_updated | new | ldap | +--------------------------------------+------------------------+--------+------+ .. note:: The share network entity now contains a field called ``security_service_update_support`` which holds information whether all resources built within it can hold such operation. In order to update security services in share networks that currently contain shares, an operation to check if the operation can be completed must be performed. See :ref:`subsection `. .. _share_network_security_service_add_check: Add share network security service check (Since API version 2.63) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #. Check if it is possible to add a security service to a share network: .. code-block:: console $ manila share-network-security-service-add-check \ sharenetwork1 \ my_sec_service +---------------------+-----------------------------------------------------------------------------------------------------------------------------------------+ | Property | Value | +---------------------+-----------------------------------------------------------------------------------------------------------------------------------------+ | compatible | None | | requested_operation | {'operation': 'add_security_service', 'current_security_service': None, 'new_security_service': '50303c35-2c53-4d37-a0d9-61dfe3789569'} | +---------------------+-----------------------------------------------------------------------------------------------------------------------------------------+ #. Check if the result of the operation: .. code-block:: console $ manila share-network-security-service-add-check \ sharenetwork1 \ my_sec_service +---------------------+-----------------------------------------------------------------------------------------------------------------------------------------+ | Property | Value | +---------------------+-----------------------------------------------------------------------------------------------------------------------------------------+ | compatible | True | | requested_operation | {'operation': 'add_security_service', 'current_security_service': None, 'new_security_service': '50303c35-2c53-4d37-a0d9-61dfe3789569'} | +---------------------+-----------------------------------------------------------------------------------------------------------------------------------------+ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/user/share-network-subnet-operations.rst0000664000175000017500000002743200000000000025113 0ustar00zuulzuul00000000000000.. _share_network_subnet: ======================================= Create and manage share network subnets ======================================= .. contents:: :local: A share network subnet stores network information to create and manage shares. To create and manage your share network subnets, you can use ``manila`` client commands. You can create multiple subnets in a share network, and if you do not specify an availability zone, the subnet you are creating will be considered default by the Shared File Systems service. The default subnet spans all availability zones. You cannot have more than one default subnet per share network. During share server migration, metadata belonging to the old share network subnet is ignored when moving to a new share network. Since metadata updates are passed to backend driver, with migration of share network these metadata updates will no longer be available to new share network. .. important:: In order to use share networks, the share type you choose must have the extra specification ``driver_handles_share_servers`` set to True. Create a subnet in an existing share network ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #. Create a subnet related to the given share network .. code-block:: console $ manila share-network-subnet-create \ sharenetwork1 \ --availability-zone manila-zone-0 \ --neutron-net-id a27160ca-5595-4c62-bf54-a04fb7b14316 \ --neutron-subnet-id f043f4b0-c05e-493f-bbe9-99689e2187d2 +--------------------+--------------------------------------+ | Property | Value | +--------------------+--------------------------------------+ | id | be3ae5ad-a22c-494f-840e-5e3526e34e0f | | availability_zone | manila-zone-0 | | share_network_id | 35f44d3c-8888-429e-b8c7-8a29dead6e5b | | share_network_name | sharenetwork1 | | created_at | 2019-10-09T04:54:48.000000 | | segmentation_id | None | | neutron_subnet_id | f043f4b0-c05e-493f-bbe9-99689e2187d2 | | updated_at | None | | neutron_net_id | a27160ca-5595-4c62-bf54-a04fb7b14316 | | ip_version | None | | cidr | None | | network_type | None | | mtu | None | | gateway | None | +--------------------+--------------------------------------+ #. Show the share network to verify if the created subnet is attached .. code-block:: console $ manila share-network-show sharenetwork1 +-----------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | Property | Value | +-----------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | id | 35f44d3c-8888-429e-b8c7-8a29dead6e5b | | name | sharenetwork1 | | project_id | 8c2962a4832743469a336f7c179f7d34 | | created_at | 2019-10-09T04:19:31.000000 | | updated_at | None | | description | Share Network created for demo purposes | | share_network_subnets | [{'id': 'be3ae5ad-a22c-494f-840e-5e3526e34e0f', 'availability_zone': 'manila-zone-0', 'created_at': '2019-10-09T04:54:48.000000', 'updated_at': None, 'segmentation_id': None, 'neutron_net_id': 'a27160ca-5595-4c62-bf54-a04fb7b14316', 'neutron_subnet_id': 'f043f4b0-c05e-493f-bbe9-99689e2187d2', 'ip_version': None, 'cidr': None, 'network_type': None, 'mtu': None, 'gateway': None}] | +-----------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ Show a share network subnet ~~~~~~~~~~~~~~~~~~~~~~~~~~~ #. Show an existent subnet in a given share network .. code-block:: console $ manila share-network-subnet-show \ sharenetwork1 \ be3ae5ad-a22c-494f-840e-5e3526e34e0f +--------------------+--------------------------------------+ | Property | Value | +--------------------+--------------------------------------+ | id | be3ae5ad-a22c-494f-840e-5e3526e34e0f | | availability_zone | manila-zone-0 | | share_network_id | 35f44d3c-8888-429e-b8c7-8a29dead6e5b | | share_network_name | sharenetwork1 | | created_at | 2019-10-09T04:54:48.000000 | | segmentation_id | None | | neutron_subnet_id | f043f4b0-c05e-493f-bbe9-99689e2187d2 | | updated_at | None | | neutron_net_id | a27160ca-5595-4c62-bf54-a04fb7b14316 | | ip_version | None | | cidr | None | | network_type | None | | mtu | None | | gateway | None | +--------------------+--------------------------------------+ Delete a share network subnet ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #. Delete a specific share network subnet .. code-block:: console $ manila share-network-subnet-delete \ sharenetwork1 \ be3ae5ad-a22c-494f-840e-5e3526e34e0f #. Verify that it has been deleted .. code-block:: console $ manila share-network-show sharenetwork1 +-----------------------+-----------------------------------------+ | Property | Value | +-----------------------+-----------------------------------------+ | id | 35f44d3c-8888-429e-b8c7-8a29dead6e5b | | name | sharenetwork1 | | project_id | 8c2962a4832743469a336f7c179f7d34 | | created_at | 2019-10-09T04:19:31.000000 | | updated_at | None | | description | Share Network created for demo purposes | | share_network_subnets | [] | +-----------------------+-----------------------------------------+ Share network subnet metadata ----------------------------- * Set metadata items on your share network subnet during creation .. code-block:: console $ openstack share network subnet create sharenetwork1 \ --property key1=value1 --property key2=value2 +--------------------+--------------------------------------+ | Field | Value | +--------------------+--------------------------------------+ | availability_zone | None | | cidr | None | | created_at | 2024-03-28T15:22:53.291721 | | gateway | None | | id | 9ab933ef-f0cd-409e-8b6b-c3d34073ac44 | | ip_version | None | | metadata | {'key1': 'value1', 'key2': 'value2'} | | mtu | None | | network_type | None | | neutron_net_id | None | | neutron_subnet_id | None | | segmentation_id | None | | share_network_id | 35f44d3c-8888-429e-b8c7-8a29dead6e5b | | share_network_name | sharenetwork1 | | updated_at | None | +--------------------+--------------------------------------+ * Set metadata items on your share network subnet .. code-block:: console $ openstack share network subnet set sharenetwork1 \ be3ae5ad-a22c-494f-840e-5e3526e34e0f --property key1=value1 \ --property key2=value2 * Unset share network subnet metadata .. code-block:: console $ openstack share network subnet unset sharenetwork1 \ be3ae5ad-a22c-494f-840e-5e3526e34e0f --property key1 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/doc/source/user/troubleshooting-asynchronous-failures.rst0000664000175000017500000011254300000000000026431 0ustar00zuulzuul00000000000000===================================== Troubleshooting asynchronous failures ===================================== The Shared File Systems service performs many user actions asynchronously. For example, when a new share is created, the request is immediately acknowledged with a response containing the metadata of the share. Users can then query the resource and check the ``status`` attribute of the share. Usually an ``...ing`` status indicates that actions are performed asynchronously. For example, a new share's ``status`` attribute is set to ``creating`` by the service. If these asynchronous operations fail, the resource's status will be set to ``error``. More information about the error can be obtained with the help of the CLI client. Scenario ~~~~~~~~ In this example, the user wants to create a share to host software libraries on several virtual machines. The example deliberately introduces two share creation failures to illustrate how to use the command line to retrieve user support messages. #. In order to create a share, you need to specify the share type that meets your requirements. Cloud administrators create share types; see these available share types: .. code-block:: console clouduser1@client:~$ openstack share type list +--------------------------------------+------------+------------+------------+--------------------------------------+-------------------------------------------+-------------+ | ID | Name | Visibility | Is Default | Required Extra Specs | Optional Extra Specs | Description | +--------------------------------------+------------+------------+------------+--------------------------------------+-------------------------------------------+-------------+ | 61c7e7d2-ce74-4b50-9a3d-a89f7c51b9e9 | default | public | True | driver_handles_share_servers : False | snapshot_support : True | None | | | | | | | create_share_from_snapshot_support : True | | | | | | | | revert_to_snapshot_support : True | | | | | | | | mount_snapshot_support : True | | | 8867fc92-3193-4c6d-8248-a6ba10aa974b | dhss_false | public | False | driver_handles_share_servers : False | snapshot_support : True | None | | | | | | | create_share_from_snapshot_support : True | | | | | | | | revert_to_snapshot_support : True | | | | | | | | mount_snapshot_support : True | | | 4d754228-5b5d-4632-8f96-0c27dcb7968f | dhss_true | public | False | driver_handles_share_servers : True | snapshot_support : True | None | | | | | | | create_share_from_snapshot_support : True | | | | | | | | revert_to_snapshot_support : True | | | | | | | | mount_snapshot_support : True | | +--------------------------------------+------------+------------+------------+--------------------------------------+-------------------------------------------+-------------+ In this example, three share types are available. #. To use a share type that specifies driver_handles_share_servers=True capability, you must create a "share network" on which to export the share. .. code-block:: console clouduser1@client:~$ openstack subnet list +--------------------------------------+---------------------+--------------------------------------+---------------------+ | ID | Name | Network | Subnet | +--------------------------------------+---------------------+--------------------------------------+---------------------+ | 01efb9d0-4c5f-424a-8402-b3bf19d0e4a2 | shared-subnet | b8b3fedf-f788-4ba4-bf55-24521a20e671 | 192.168.233.0/24 | | 54a3188e-8bf2-461a-8b70-0d63f05810a6 | private-subnet | 0bea5e39-81ce-4d6f-845d-ce5e87dad7d3 | 10.0.0.0/26 | | 6d1b41b2-8b39-482d-8e46-10bec65cdc99 | ipv6-public-subnet | 9d25eb3b-d76c-4429-b788-a3dab0f2c24d | 2001:db8::/64 | | 8805a23b-b35e-42fe-8502-4f4bc58d23f7 | public-subnet | 9d25eb3b-d76c-4429-b788-a3dab0f2c24d | 172.24.4.0/24 | | 9f8ae84a-5375-42f7-aa1b-eb3b697e8e3a | ipv6-private-subnet | 0bea5e39-81ce-4d6f-845d-ce5e87dad7d3 | fda4:5834:1c78::/64 | +--------------------------------------+---------------------+--------------------------------------+---------------------+ #. Create a "share network" from a private tenant network: .. code-block:: console clouduser1@client:~$ openstack share network create --name mynet \ --neutron-net-id 0bea5e39-81ce-4d6f-845d-ce5e87dad7d3 \ --neutron-subnet-id 54a3188e-8bf2-461a-8b70-0d63f05810a6 +-----------------------------------+----------------------------------------------------------+ | Field | Value | +-----------------------------------+----------------------------------------------------------+ | created_at | 2025-04-16T18:39:17.582629 | | description | None | | id | b6cc0aa0-c6bf-4c28-9566-a4bff93382d9 | | name | mynet | | network_allocation_update_support | True | | project_id | 138d700333eb46cfb36b5a9659704759 | | security_service_update_support | True | | share_network_subnets | | | | id = 4114b63b-4932-4082-b5c9-e50dc839d3c9 | | | availability_zone = None | | | created_at = 2025-04-16T18:39:17.607997 | | | updated_at = None | | | segmentation_id = None | | | neutron_net_id = 0bea5e39-81ce-4d6f-845d-ce5e87dad7d3 | | | neutron_subnet_id = 54a3188e-8bf2-461a-8b70-0d63f05810a6 | | | ip_version = None | | | cidr = None | | | network_type = None | | | mtu = None | | | gateway = None | | | metadata = {} | | status | active | | updated_at | None | +-----------------------------------+----------------------------------------------------------+ clouduser1@client:~$ openstack share network list +--------------------------------------+-------+ | ID | Name | +--------------------------------------+-------+ | b6cc0aa0-c6bf-4c28-9566-a4bff93382d9 | mynet | +--------------------------------------+-------+ #. Create the share: .. code-block:: console clouduser1@client:~$ openstack share create nfs 1 --name software_share \ --share-network mynet --share-type dhss_true +---------------------------------------+----------------------------------------------------------------------------------------------------------------------+ | Field | Value | +---------------------------------------+----------------------------------------------------------------------------------------------------------------------+ | access_rules_status | active | | availability_zone | manila-zone-2 | | create_share_from_snapshot_support | True | | created_at | 2025-04-22T16:00:19.973764 | | description | None | | export_locations | | | | id = 208c9cb5-853d-41c2-82ae-42c10c11d226 | | | path = 10.0.0.10:/path/to/fake/share/share_18b84ece_fb8e_438c_b89b_bb2e7c69a5a0_013ca955_c1ca_4817_b053_d153e6bb5253 | | | preferred = True | | | metadata = {} | | | id = 5f2f0201-4d68-48c9-a650-be59692a495f | | | path = 10.0.0.11:/path/to/fake/share/share_18b84ece_fb8e_438c_b89b_bb2e7c69a5a0_013ca955_c1ca_4817_b053_d153e6bb5253 | | | preferred = False | | | metadata = {} | | has_replicas | False | | id | 18b84ece-fb8e-438c-b89b-bb2e7c69a5a0 | | is_public | False | | is_soft_deleted | False | | mount_snapshot_support | True | | name | software_share | | progress | 100% | | project_id | 138d700333eb46cfb36b5a9659704759 | | properties | | | replication_type | None | | revert_to_snapshot_support | True | | scheduled_to_be_deleted_at | None | | share_group_id | None | | share_network_id | b6cc0aa0-c6bf-4c28-9566-a4bff93382d9 | | share_proto | NFS | | share_type | 4d754228-5b5d-4632-8f96-0c27dcb7968f | | share_type_name | dhss_true | | size | 1 | | snapshot_id | None | | snapshot_support | True | | source_backup_id | None | | source_share_group_snapshot_member_id | None | | status | available | | task_state | None | | user_id | c01b2bd0b56949508d27aebdf04c6d69 | | volume_type | dhss_true | +---------------------------------------+----------------------------------------------------------------------------------------------------------------------+ #. View the status of the share: .. code-block:: console clouduser1@client:~$ openstack share list +--------------------------------------+------------------+------+-------------+--------+-----------+-----------------+------+-------------------+ | ID | Name | Size | Share Proto | Status | Is Public | Share Type Name | Host | Availability Zone | +--------------------------------------+------------------+------+-------------+--------+-----------+-----------------+------+-------------------+ | 18b84ece-fb8e-438c-b89b-bb2e7c69a5a0 | software_share | 1 | NFS | error | False | dhss_true | | None | +--------------------------------------+------------------+------+-------------+--------+-----------+-----------------+------+-------------------+ In this example, an error occurred during the share creation. #. To view the generated user message, use the ``message-list`` command. Use ``--resource-id`` to filter messages for a specific share resource. .. code-block:: console clouduser1@client:~$ openstack share message list +--------------------------------------+---------------+--------------------------------------+-----------+-----------------------------------------------------+-----------+----------------------------+ | ID | Resource Type | Resource ID | Action ID | User Message | Detail ID | Created At | +--------------------------------------+---------------+--------------------------------------+-----------+-----------------------------------------------------+-----------+----------------------------+ | 8fe74a26-f57d-4961-8435-5ea8ccf05946 | SHARE | 18b84ece-fb8e-438c-b89b-bb2e7c69a5a0 | 001 | allocate host: No storage could be allocated for | 008 | 2025-04-22T20:16:50.207084 | | | | | | this share request, Capabilities filter didn't | | | | | | | | succeed. | | | +--------------------------------------+---------------+--------------------------------------+-----------+-----------------------------------------------------+-----------+----------------------------+ In User Message column, you can see that the Shared File System service failed to create the share because of a capabilities mismatch. #. To view more information, use the ``message-show`` command, followed by the ID of the message from the message-list command: .. code-block:: console clouduser1@client:~$ openstack share message-show 8fe74a26-f57d-4961-8435-5ea8ccf05946 +---------------+----------------------------------------------------------------------------------------------------------+ | Field | Value | +---------------+----------------------------------------------------------------------------------------------------------+ | id | 8fe74a26-f57d-4961-8435-5ea8ccf05946 | | resource_type | SHARE | | resource_id | 18b84ece-fb8e-438c-b89b-bb2e7c69a5a0 | | action_id | 001 | | user_message | allocate host: No storage could be allocated for this share request, Capabilities filter didn't succeed. | | message_level | ERROR | | detail_id | 008 | | created_at | 2025-04-22T20:16:50.207084 | | expires_at | 2025-05-22T20:16:50.000000 | | request_id | req-1621b77d-0abb-4c90-9e61-8809214f58a6 | +---------------+----------------------------------------------------------------------------------------------------------+ As the cloud user, you know the related specs your share type has, so you can review the share types available. The difference between the two share types is the value of driver_handles_share_servers: .. code-block:: console clouduser1@client:~$ openstack share type list +--------------------------------------+------------+------------+------------+--------------------------------------+-------------------------------------------+-------------+ | ID | Name | Visibility | Is Default | Required Extra Specs | Optional Extra Specs | Description | +--------------------------------------+------------+------------+------------+--------------------------------------+-------------------------------------------+-------------+ | 61c7e7d2-ce74-4b50-9a3d-a89f7c51b9e9 | default | public | True | driver_handles_share_servers : False | snapshot_support : True | None | | | | | | | create_share_from_snapshot_support : True | | | | | | | | revert_to_snapshot_support : True | | | | | | | | mount_snapshot_support : True | | | 8867fc92-3193-4c6d-8248-a6ba10aa974b | dhss_false | public | False | driver_handles_share_servers : False | snapshot_support : True | None | | | | | | | create_share_from_snapshot_support : True | | | | | | | | revert_to_snapshot_support : True | | | | | | | | mount_snapshot_support : True | | | 4d754228-5b5d-4632-8f96-0c27dcb7968f | dhss_true | public | False | driver_handles_share_servers : True | snapshot_support : True | None | | | | | | | create_share_from_snapshot_support : True | | | | | | | | revert_to_snapshot_support : True | | | | | | | | mount_snapshot_support : True | | +--------------------------------------+------------+------------+------------+--------------------------------------+-------------------------------------------+-------------+ #. Create a share with the other available share type: .. code-block:: console clouduser1@client:~$ openstack share create nfs 1 --name software_share \ --share-network mynet --share-type dhss_false +---------------------------------------+--------------------------------------+ | Field | Value | +---------------------------------------+--------------------------------------+ | access_rules_status | active | | availability_zone | None | | create_share_from_snapshot_support | True | | created_at | 2025-04-22T20:34:04.627679 | | description | None | | has_replicas | False | | id | 010e4c5b-d40a-4691-a7cb-68c3b3950523 | | is_public | False | | is_soft_deleted | False | | metadata | {} | | mount_snapshot_support | True | | name | software_share | | progress | None | | project_id | 138d700333eb46cfb36b5a9659704759 | | replication_type | None | | revert_to_snapshot_support | True | | scheduled_to_be_deleted_at | None | | share_group_id | None | | share_network_id | b6cc0aa0-c6bf-4c28-9566-a4bff93382d9 | | share_proto | NFS | | share_type | 8867fc92-3193-4c6d-8248-a6ba10aa974b | | share_type_name | dhss_false | | size | 1 | | snapshot_id | None | | snapshot_support | True | | source_backup_id | None | | source_share_group_snapshot_member_id | None | | status | creating | | task_state | None | | user_id | c01b2bd0b56949508d27aebdf04c6d69 | | volume_type | dhss_false | +---------------------------------------+--------------------------------------+ In this example, the second share creation attempt fails. #. View the user support message: .. code-block:: console clouduser1@client:~$ openstack share list +--------------------------------------+------------------+------+-------------+--------+-----------+-----------------+------+-------------------+ | ID | Name | Size | Share Proto | Status | Is Public | Share Type Name | Host | Availability Zone | +--------------------------------------+------------------+------+-------------+--------+-----------+-----------------+------+-------------------+ | 18b84ece-fb8e-438c-b89b-bb2e7c69a5a0 | software_share | 1 | NFS | error | False | dhss_true | | None | | 010e4c5b-d40a-4691-a7cb-68c3b3950523 | software_share | 1 | NFS | error | False | dhss_false | | manila-zone-1 | +--------------------------------------+------------------+------+-------------+--------+-----------+-----------------+------+-------------------+ clouduser1@client:~$ openstack share message list +--------------------------------------+---------------+--------------------------------------+-----------+-----------------------------------------------------+-----------+----------------------------+ | ID | Resource Type | Resource ID | Action ID | User Message | Detail ID | Created At | +--------------------------------------+---------------+--------------------------------------+-----------+-----------------------------------------------------+-----------+----------------------------+ | 50a401e8-c30a-4369-8a35-68a019d19c76 | SHARE | 010e4c5b-d40a-4691-a7cb-68c3b3950523 | 002 | create: Driver does not expect share-network to be | 003 | 2025-04-22T20:34:04.810870 | | | | | | provided with current configuration. | | | | 8fe74a26-f57d-4961-8435-5ea8ccf05946 | SHARE | 18b84ece-fb8e-438c-b89b-bb2e7c69a5a0 | 001 | allocate host: No storage could be allocated for | 008 | 2025-04-22T20:16:50.207084 | | | | | | this share request, Capabilities filter didn't | | | | | | | | succeed. | | | +--------------------------------------+---------------+--------------------------------------+-----------+-----------------------------------------------------+-----------+----------------------------+ You can see that the service does not expect a share network for the share type used. Without consulting the administrator, you can discover that the administrator has not made available a storage back end that supports exporting shares directly on to your private neutron network. #. Create the share without the ``--share-network`` parameter: .. code-block:: console clouduser1@client:~$ openstack share create nfs 1 --name software_share \ --share-type dhss_false +---------------------------------------+--------------------------------------+ | Field | Value | +---------------------------------------+--------------------------------------+ | access_rules_status | active | | availability_zone | None | | create_share_from_snapshot_support | True | | created_at | 2025-04-22T21:48:37.025207 | | description | None | | has_replicas | False | | id | feec61e2-4166-4ca3-8d59-a8d13f78535e | | is_public | False | | is_soft_deleted | False | | metadata | {} | | mount_snapshot_support | True | | name | software_share | | progress | None | | project_id | 138d700333eb46cfb36b5a9659704759 | | replication_type | None | | revert_to_snapshot_support | True | | scheduled_to_be_deleted_at | None | | share_group_id | None | | share_network_id | None | | share_proto | NFS | | share_type | 8867fc92-3193-4c6d-8248-a6ba10aa974b | | share_type_name | dhss_false | | size | 1 | | snapshot_id | None | | snapshot_support | True | | source_backup_id | None | | source_share_group_snapshot_member_id | None | | status | creating | | task_state | None | | user_id | c01b2bd0b56949508d27aebdf04c6d69 | | volume_type | dhss_false | +---------------------------------------+--------------------------------------+ #. To ensure that the share was created successfully, use the `share list` command: .. code-block:: console clouduser1@client:~$ openstack share list +--------------------------------------+------------------+------+-------------+-----------+-----------+-----------------+------+-------------------+ | ID | Name | Size | Share Proto | Status | Is Public | Share Type Name | Host | Availability Zone | +--------------------------------------+------------------+------+-------------+-----------+-----------+-----------------+------+-------------------+ | 18b84ece-fb8e-438c-b89b-bb2e7c69a5a0 | software_share | 1 | NFS | error | False | dhss_true | | None | | feec61e2-4166-4ca3-8d59-a8d13f78535e | software_share | 1 | NFS | available | False | dhss_false | | manila-zone-1 | | 010e4c5b-d40a-4691-a7cb-68c3b3950523 | software_share | 1 | NFS | error | False | dhss_false | | manila-zone-1 | +--------------------------------------+------------------+------+-------------+-----------+-----------+-----------------+------+-------------------+ #. Delete shares that failed to be created and corresponding support messages: .. code-block:: console clouduser1@client:~$ openstack share delete \ 18b84ece-fb8e-438c-b89b-bb2e7c69a5a0 \ 010e4c5b-d40a-4691-a7cb-68c3b3950523 clouduser1@client:~$ openstack share message list +--------------------------------------+---------------+--------------------------------------+-----------+-----------------------------------------------------+-----------+----------------------------+ | ID | Resource Type | Resource ID | Action ID | User Message | Detail ID | Created At | +--------------------------------------+---------------+--------------------------------------+-----------+-----------------------------------------------------+-----------+----------------------------+ | 50a401e8-c30a-4369-8a35-68a019d19c76 | SHARE | 010e4c5b-d40a-4691-a7cb-68c3b3950523 | 002 | create: Driver does not expect share-network to be | 003 | 2025-04-22T20:34:04.810870 | | | | | | provided with current configuration. | | | | 8fe74a26-f57d-4961-8435-5ea8ccf05946 | SHARE | 18b84ece-fb8e-438c-b89b-bb2e7c69a5a0 | 001 | allocate host: No storage could be allocated for | 008 | 2025-04-22T20:16:50.207084 | | | | | | this share request, Capabilities filter didn't | | | | | | | | succeed. | | | +--------------------------------------+---------------+--------------------------------------+-----------+-----------------------------------------------------+-----------+----------------------------+ clouduser1@client:~$ openstack share message delete \ 50a401e8-c30a-4369-8a35-68a019d19c76 \ 8fe74a26-f57d-4961-8435-5ea8ccf05946 clouduser1@client:~$ openstack share message list +----+---------------+-------------+-----------+--------------+-----------+------------+ | ID | Resource Type | Resource ID | Action ID | User Message | Detail ID | Created At | +----+---------------+-------------+-----------+--------------+-----------+------------+ +----+---------------+-------------+-----------+--------------+-----------+------------+ ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315601.6376753 manila-21.0.0/etc/0000775000175000017500000000000000000000000013631 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315601.817673 manila-21.0.0/etc/manila/0000775000175000017500000000000000000000000015072 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/etc/manila/README.manila.conf0000664000175000017500000000020000000000000020126 0ustar00zuulzuul00000000000000To generate the sample manila.conf file, run the following command from the top level of the manila directory: tox -egenconfig ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/etc/manila/api-paste.ini0000664000175000017500000000502200000000000017455 0ustar00zuulzuul00000000000000############# # OpenStack # ############# [composite:osapi_share] use = call:manila.api:root_app_factory /: apiversions /healthcheck: healthcheck /v1: openstack_share_api /v2: openstack_share_api_v2 [composite:openstack_share_api] use = call:manila.api.middleware.auth:pipeline_factory noauth = cors request_id faultwrap http_proxy_to_wsgi sizelimit osprofiler noauth api keystone = cors request_id faultwrap http_proxy_to_wsgi sizelimit osprofiler authtoken keystonecontext api keystone_nolimit = cors request_id faultwrap http_proxy_to_wsgi sizelimit osprofiler authtoken keystonecontext api [composite:openstack_share_api_v2] use = call:manila.api.middleware.auth:pipeline_factory noauth = cors request_id faultwrap http_proxy_to_wsgi sizelimit osprofiler noauth apiv2 noauthv2 = cors request_id faultwrap http_proxy_to_wsgi sizelimit osprofiler noauthv2 apiv2 keystone = cors request_id faultwrap http_proxy_to_wsgi sizelimit osprofiler authtoken keystonecontext apiv2 keystone_nolimit = cors request_id faultwrap http_proxy_to_wsgi sizelimit osprofiler authtoken keystonecontext apiv2 [filter:faultwrap] paste.filter_factory = manila.api.middleware.fault:FaultWrapper.factory [filter:noauth] paste.filter_factory = manila.api.middleware.auth:NoAuthMiddleware.factory [filter:noauthv2] paste.filter_factory = manila.api.middleware.auth:NoAuthMiddlewarev2_60.factory [filter:sizelimit] paste.filter_factory = oslo_middleware.sizelimit:RequestBodySizeLimiter.factory [filter:osprofiler] paste.filter_factory = osprofiler.web:WsgiMiddleware.factory [filter:http_proxy_to_wsgi] paste.filter_factory = oslo_middleware.http_proxy_to_wsgi:HTTPProxyToWSGI.factory [app:api] paste.app_factory = manila.api.v1.router:APIRouter.factory [app:apiv2] paste.app_factory = manila.api.v2.router:APIRouter.factory [pipeline:apiversions] pipeline = cors request_id faultwrap http_proxy_to_wsgi osshareversionapp [app:osshareversionapp] paste.app_factory = manila.api.versions:VersionsRouter.factory ########## # Shared # ########## [filter:keystonecontext] paste.filter_factory = manila.api.middleware.auth:ManilaKeystoneContext.factory [filter:authtoken] paste.filter_factory = keystonemiddleware.auth_token:filter_factory [filter:cors] paste.filter_factory = oslo_middleware.cors:filter_factory oslo_config_project = manila [filter:request_id] paste.filter_factory = oslo_middleware.request_id:RequestId.factory [app:healthcheck] paste.app_factory = oslo_middleware:Healthcheck.app_factory backends = disable_by_file disable_by_file_path = /etc/manila/healthcheck_disable ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/etc/manila/logging_sample.conf0000664000175000017500000000236700000000000020740 0ustar00zuulzuul00000000000000[loggers] keys = root, manila [handlers] keys = stderr, stdout, watchedfile, syslog, null [formatters] keys = default [logger_root] level = WARNING handlers = null [logger_manila] level = INFO handlers = stderr qualname = manila [logger_amqplib] level = WARNING handlers = stderr qualname = amqplib [logger_sqlalchemy] level = WARNING handlers = stderr qualname = sqlalchemy # "level = INFO" logs SQL queries. # "level = DEBUG" logs SQL queries and results. # "level = WARNING" logs neither. (Recommended for production systems.) [logger_boto] level = WARNING handlers = stderr qualname = boto [logger_suds] level = INFO handlers = stderr qualname = suds [logger_eventletwsgi] level = WARNING handlers = stderr qualname = eventlet.wsgi.server [handler_stderr] class = StreamHandler args = (sys.stderr,) formatter = default [handler_stdout] class = StreamHandler args = (sys.stdout,) formatter = default [handler_watchedfile] class = handlers.WatchedFileHandler args = ('manila.log',) formatter = default [handler_syslog] class = handlers.SysLogHandler args = ('/dev/log', handlers.SysLogHandler.LOG_USER) formatter = default [handler_null] class = manila.common.openstack.NullHandler formatter = default args = () [formatter_default] format = %(message)s ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/etc/manila/manila-policy-generator.conf0000664000175000017500000000011100000000000022454 0ustar00zuulzuul00000000000000[DEFAULT] output_file = etc/manila/policy.yaml.sample namespace = manila ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/etc/manila/rootwrap.conf0000664000175000017500000000173400000000000017623 0ustar00zuulzuul00000000000000# Configuration for manila-rootwrap # This file should be owned by (and only-writeable by) the root user [DEFAULT] # List of directories to load filter definitions from (separated by ','). # These directories MUST all be only writeable by root ! filters_path=/etc/manila/rootwrap.d,/usr/share/manila/rootwrap # List of directories to search executables in, in case filters do not # explicitly specify a full path (separated by ',') # If not specified, defaults to system PATH environment variable. # These directories MUST all be only writeable by root ! exec_dirs=/sbin,/usr/sbin,/bin,/usr/bin,/usr/local/sbin,/usr/local/bin,/usr/lpp/mmfs/bin # Enable logging to syslog # Default value is False use_syslog=False # Which syslog facility to use. # Valid values include auth, authpriv, syslog, user0, user1... # Default value is 'syslog' syslog_log_facility=syslog # Which messages to log. # INFO means log all usage # ERROR means only log unsuccessful attempts syslog_log_level=ERROR ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315601.817673 manila-21.0.0/etc/manila/rootwrap.d/0000775000175000017500000000000000000000000017171 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/etc/manila/rootwrap.d/share.filters0000664000175000017500000001766000000000000021677 0ustar00zuulzuul00000000000000# manila-rootwrap command filters for share nodes # This file should be owned by (and only-writeable by) the root user [Filters] # manila/utils.py : 'chown', '%s', '%s' chown: CommandFilter, chown, root # manila/utils.py : 'cat', '%s' cat: CommandFilter, cat, root # manila/share/drivers/lvm.py: 'mkfs.ext4', '/dev/mapper/%s' mkfs.ext4: CommandFilter, mkfs.ext4, root # manila/share/drivers/lvm.py: 'mkfs.ext3', '/dev/mapper/%s' mkfs.ext3: CommandFilter, mkfs.ext3, root # manila/share/drivers/lvm.py: 'smbd', '-s', '%s', '-D' smbd: CommandFilter, smbd, root smb: CommandFilter, smb, root # manila/share/drivers/lvm.py: 'rmdir', '%s' rmdir: CommandFilter, rmdir, root # manila/share/drivers/lvm.py: 'dd' 'count=0', 'if=%s' % srcstr, 'of=%s' dd: CommandFilter, dd, root # manila/share/drivers/lvm.py: 'fsck', '-pf', %s fsck: CommandFilter, fsck, root # manila/share/drivers/lvm.py: 'resize2fs', %s resize2fs: CommandFilter, resize2fs, root # manila/share/drivers/helpers.py: 'smbcontrol', 'all', 'close-share', '%s' smbcontrol: CommandFilter, smbcontrol, root # manila/share/drivers/helpers.py: 'net', 'conf', 'addshare', '%s', '%s', 'writeable=y', 'guest_ok=y # manila/share/drivers/helpers.py: 'net', 'conf', 'delshare', '%s' # manila/share/drivers/helpers.py: 'net', 'conf', 'setparm', '%s', '%s', '%s' # manila/share/drivers/helpers.py: 'net', 'conf', 'getparm', '%s', 'hosts allow' net: CommandFilter, net, root # manila/share/drivers/helpers.py: 'cp', '%s', '%s' cp: CommandFilter, cp, root # manila/share/drivers/helpers.py: 'service', '%s', '%s' service: CommandFilter, service, root # manila/share/drivers/lvm.py: 'lvremove', '-f', "%s/%s lvremove: CommandFilter, lvremove, root # manila/share/drivers/lvm.py: 'lvextend', '-L', '%sG''-n', %s lvextend: CommandFilter, lvextend, root # manila/share/drivers/lvm.py: 'lvcreate', '-L', %s, '-n', %s lvcreate: CommandFilter, lvcreate, root # manila/share/drivers/lvm.py: 'vgs', '--noheadings', '-o', 'name' # manila/share/drivers/lvm.py: 'vgs', %s, '--rows', '--units', 'g' vgs: CommandFilter, vgs, root # manila/share/drivers/lvm.py: 'tune2fs', '-U', 'random', '%volume-snapshot%' tune2fs: CommandFilter, tune2fs, root # manila/share/drivers/generic.py: 'sed', '-i', '\'/%s/d\'', '%s' sed: CommandFilter, sed, root # manila/share/drivers/glusterfs.py: 'mkdir', '%s' # manila/share/drivers/ganesha/manager.py: 'mkdir', '-p', '%s' mkdir: CommandFilter, mkdir, root # manila/share/drivers/glusterfs.py: 'rm', '-rf', '%s' rm: CommandFilter, rm, root # manila/share/drivers/glusterfs.py: 'mount', '-t', 'glusterfs', '%s', '%s' # manila/share/drivers/glusterfs/glusterfs_native.py: 'mount', '-t', 'glusterfs', '%s', '%s' mount: CommandFilter, mount, root # manila/share/drivers/glusterfs.py: 'gluster', '--xml', 'volume', 'info', '%s' # manila/share/drivers/glusterfs.py: 'gluster', 'volume', 'set', '%s', 'nfs.export-dir', '%s' gluster: CommandFilter, gluster, root # manila/network/linux/ip_lib.py: 'ip', 'netns', 'exec', '%s', '%s' ip: CommandFilter, ip, root # manila/network/linux/interface.py: 'ovs-vsctl', 'add-port', '%s', '%s' ovs-vsctl: CommandFilter, ovs-vsctl, root # manila/share/drivers/glusterfs/glusterfs_native.py: 'find', '%s', '-mindepth', '1', '!', '-path', '%s', '!', '-path', '%s', '-delete' # manila/share/drivers/glusterfs/glusterfs_native.py: 'find', '%s', '-mindepth', '1', '-delete' find: CommandFilter, find, root # manila/share/drivers/glusterfs/glusterfs_native.py: 'umount', '%s' umount: CommandFilter, umount, root # GPFS commands # manila/share/drivers/ibm/gpfs.py: 'mmgetstate', '-Y' mmgetstate: CommandFilter, mmgetstate, root # manila/share/drivers/ibm/gpfs.py: 'mmlsattr', '%s' mmlsattr: CommandFilter, mmlsattr, root # manila/share/drivers/ibm/gpfs.py: 'mmcrfileset', '%s', '%s', '--inode-space', 'new' mmcrfileset: CommandFilter, mmcrfileset, root # manila/share/drivers/ibm/gpfs.py: 'mmlinkfileset', '%s', '%s', '-J', '%s' mmlinkfileset: CommandFilter, mmlinkfileset, root # manila/share/drivers/ibm/gpfs.py: 'mmsetquota', '-j', '%s', '-h', '%s', '%s' mmsetquota: CommandFilter, mmsetquota, root # manila/share/drivers/ibm/gpfs.py: 'mmunlinkfileset', '%s', '%s', '-f' mmunlinkfileset: CommandFilter, mmunlinkfileset, root # manila/share/drivers/ibm/gpfs.py: 'mmdelfileset', '%s', '%s', '-f' mmdelfileset: CommandFilter, mmdelfileset, root # manila/share/drivers/ibm/gpfs.py: 'mmcrsnapshot', '%s', '%s', '-j', '%s' mmcrsnapshot: CommandFilter, mmcrsnapshot, root # manila/share/drivers/ibm/gpfs.py: 'mmdelsnapshot', '%s', '%s', '-j', '%s' mmdelsnapshot: CommandFilter, mmdelsnapshot, root # manila/share/drivers/ibm/gpfs.py: 'rsync', '-rp', '%s', '%s' rsync: CommandFilter, rsync, root # manila/share/drivers/ibm/gpfs.py: 'exportfs' exportfs: CommandFilter, exportfs, root # manila/share/drivers/ibm/gpfs.py: 'stat', '--format=%F', '%s' stat: CommandFilter, stat, root # manila/share/drivers/ibm/gpfs.py: 'df', '-P', '-B', '1', '%s' df: CommandFilter, df, root # manila/share/drivers/ibm/gpfs.py: 'chmod', '777', '%s' chmod: CommandFilter, chmod, root # manila/share/drivers/ibm/gpfs.py: 'mmnfs', 'export', '%s', '%s' mmnfs: CommandFilter, mmnfs, root # manila/share/drivers/ibm/gpfs.py: 'mmlsfileset', '%s', '-J', '%s', '-L' mmlsfileset: CommandFilter, mmlsfileset, root # manila/share/drivers/ibm/gpfs.py: 'mmchfileset', '%s', '-J', '%s', '-j', '%s' mmchfileset: CommandFilter, mmchfileset, root # manila/share/drivers/ibm/gpfs.py: 'mmlsquota', '-j', '-J', '%s', '%s' mmlsquota: CommandFilter, mmlsquota, root # manila/share/drivers/ganesha/manager.py: 'mv', '%s', '%s' mv: CommandFilter, mv, root # manila/share/drivers/ganesha/manager.py: 'mktemp', '-p', '%s', '-t', '%s' mktemp: CommandFilter, mktemp, root # manila/share/drivers/ganesha/manager.py: shcat: RegExpFilter, sh, root, sh, -c, echo '((.|\n)*)' > /.* # manila/share/drivers/ganesha/manager.py: dbus-addexport: RegExpFilter, dbus-send, root, dbus-send, --print-reply, --system, --dest=org\.ganesha\.nfsd, /org/ganesha/nfsd/ExportMgr, org\.ganesha\.nfsd\.exportmgr\.(Add|Remove)Export, .*, .* # manila/share/drivers/ganesha/manager.py: dbus-removeexport: RegExpFilter, dbus-send, root, dbus-send, --print-reply, --system, --dest=org\.ganesha\.nfsd, /org/ganesha/nfsd/ExportMgr, org\.ganesha\.nfsd\.exportmgr\.(Add|Remove)Export, .* # manila/share/drivers/ganesha/manager.py: dbus-updateexport: RegExpFilter, dbus-send, root, dbus-send, --print-reply, --system, --dest=org\.ganesha\.nfsd, /org/ganesha/nfsd/ExportMgr, org\.ganesha\.nfsd\.exportmgr\.UpdateExport, .*, .* # manila/share/drivers/ganesha/manager.py: rmconf: RegExpFilter, sh, root, sh, -c, rm -f /.*/\*\.conf$ # ZFS commands # manila/share/drivers/zfsonlinux/driver.py # manila/share/drivers/zfsonlinux/utils.py zpool: CommandFilter, zpool, root # manila/share/drivers/zfsonlinux/driver.py # manila/share/drivers/zfsonlinux/utils.py zfs: CommandFilter, zfs, root # manila/share/drivers/zfsonlinux/driver.py kill: CommandFilter, kill, root # manila/data/utils.py: 'ls', '-pA1', '--group-directories-first', '%s' ls: CommandFilter, ls, root # manila/data/utils.py: 'touch', '--reference=%s', '%s' touch: CommandFilter, touch, root # manila/share/drivers/container/container.py: docker docker: CommandFilter, docker, root # manila/share/drivers/container/container.py: brctl brctl: CommandFilter, brctl, root # manila/share/drivers/container/storage_helper.py: e2fsck # manila/share/drivers/generic.py: e2fsck # manila/share/drivers/lvm.py: e2fsck e2fsck: CommandFilter, e2fsck, root # manila/share/drivers/lvm.py: lvconvert --merge %s lvconvert: CommandFilter, lvconvert, root # manila/data/utils.py: 'sha256sum', '%s' sha256sum: CommandFilter, sha256sum, root # manila/utils.py: 'tee', '%s' tee: CommandFilter, tee, root # manila/share/drivers/container/storage_helper.py: lvs -o lv_size --noheadings --nosuffix --units g lvs: CommandFilter, lvs, root # manila/share/drivers/container/storage_helper.py: lvrename --autobackup n lvrename: CommandFilter, lvrename, root ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315601.817673 manila-21.0.0/etc/oslo-config-generator/0000775000175000017500000000000000000000000020034 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/etc/oslo-config-generator/manila.conf0000664000175000017500000000070600000000000022147 0ustar00zuulzuul00000000000000[DEFAULT] output_file = etc/manila/manila.conf.sample namespace = manila namespace = castellan.config namespace = oslo.concurrency namespace = oslo.db namespace = oslo.log namespace = oslo.messaging namespace = oslo.middleware namespace = oslo.policy namespace = oslo.reports namespace = oslo.service.periodic_task namespace = oslo.service.service namespace = oslo.service.sslutils namespace = oslo.service.wsgi namespace = keystonemiddleware.auth_token ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315601.817673 manila-21.0.0/httpd/0000775000175000017500000000000000000000000014201 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/httpd/manila-uwsgi.ini0000664000175000017500000000060400000000000017277 0ustar00zuulzuul00000000000000[uwsgi] socket-timeout = 10 http-auto-chunked = true http-chunked-input = true http-raw-body = true chmod-socket = 666 lazy-apps = true add-header = Connection: close buffer-size = 65535 thunder-lock = true plugins = python enable-threads = true exit-on-reload = true die-on-term = true master = true processes = 4 http-socket = 127.0.0.1:61999 wsgi-file = /usr/local/bin/manila-wsgi-api ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/httpd/mod_wsgi-manila.conf0000664000175000017500000000107100000000000020116 0ustar00zuulzuul00000000000000Listen 8786 LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-agent}i\" %D(us)" manila_combined WSGIDaemonProcess osapi_share processes=2 threads=1 user=manila display-name=%{GROUP} WSGIProcessGroup osapi_share WSGIScriptAlias / /var/www/cgi-bin/manila/osapi_share WSGIApplicationGroup %{GLOBAL} WSGIPassAuthorization On = 2.4> ErrorLogFormat "%{cu}t %M" ErrorLog /var/log/apache2/manila_error.log CustomLog /var/log/apache2/manila.log manila_combined ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/httpd/uwsgi-manila.conf0000664000175000017500000000013500000000000017444 0ustar00zuulzuul00000000000000KeepAlive Off SetEnv proxy-sendchunked 1 ProxyPass "/share" "http://127.0.0.1:51999" retry=0 ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315601.821673 manila-21.0.0/manila/0000775000175000017500000000000000000000000014317 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/__init__.py0000664000175000017500000000000000000000000016416 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315601.8256729 manila-21.0.0/manila/api/0000775000175000017500000000000000000000000015070 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/api/__init__.py0000664000175000017500000000156500000000000017210 0ustar00zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import paste.urlmap def root_app_factory(loader, global_conf, **local_conf): return paste.urlmap.urlmap_factory(loader, global_conf, **local_conf) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/api/common.py0000664000175000017500000005701600000000000016743 0ustar00zuulzuul00000000000000# Copyright 2010 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ipaddress import os import re import string from urllib import parse from operator import xor from oslo_config import cfg from oslo_log import log from oslo_utils import strutils import webob from webob import exc from manila.api.openstack import api_version_request as api_version from manila.api.openstack import versioned_method from manila.common import constants from manila.db import api as db_api from manila import exception from manila.i18n import _ from manila import policy api_common_opts = [ cfg.IntOpt( 'osapi_max_limit', default=1000, help='The maximum number of items returned in a single response from ' 'a collection resource.'), cfg.StrOpt( 'osapi_share_base_URL', help='Base URL to be presented to users in links to the Share API'), ] CONF = cfg.CONF CONF.register_opts(api_common_opts) LOG = log.getLogger(__name__) # Regex that matches alphanumeric characters, periods, hypens, # colons and underscores: # ^ assert position at start of the string # [\w\.\-\:\_] match expression # $ assert position at end of the string VALID_KEY_NAME_REGEX = re.compile(r"^[\w\.\-\:\_]+$", re.UNICODE) def validate_key_names(key_names_list): """Validate each item of the list to match key name regex.""" for key_name in key_names_list: if not VALID_KEY_NAME_REGEX.match(key_name): return False return True def get_pagination_params(request): """Return marker, limit, offset tuple from request. :param request: `wsgi.Request` possibly containing 'marker' and 'limit' GET variables. 'marker' is the id of the last element the client has seen, and 'limit' is the maximum number of items to return. If 'limit' is not specified, 0, or > max_limit, we default to max_limit. Negative values for either marker or limit will cause exc.HTTPBadRequest() exceptions to be raised. """ params = {} if 'limit' in request.GET: params['limit'] = _get_limit_param(request) if 'marker' in request.GET: params['marker'] = _get_marker_param(request) if 'offset' in request.GET: params['offset'] = _get_offset_param(request) return params def _get_limit_param(request): """Extract integer limit from request or fail. Defaults to max_limit if not present and returns max_limit if present 'limit' is greater than max_limit. """ max_limit = CONF.osapi_max_limit try: limit = int(request.GET['limit']) except ValueError: msg = _('limit param must be an integer') raise webob.exc.HTTPBadRequest(explanation=msg) if limit < 0: msg = _('limit param must be positive') raise webob.exc.HTTPBadRequest(explanation=msg) limit = min(limit, max_limit) return limit def _get_marker_param(request): """Extract marker ID from request or fail.""" return request.GET['marker'] def _get_offset_param(request): """Extract offset id from request's dictionary (defaults to 0) or fail.""" offset = request.GET['offset'] return _validate_integer(offset, 'offset', 0, constants.DB_MAX_INT) def _validate_integer(value, name, min_value=None, max_value=None): """Make sure that value is a valid integer, potentially within range. :param value: the value of the integer :param name: the name of the integer :param min_value: the min_length of the integer :param max_value: the max_length of the integer :return: integer """ try: value = strutils.validate_integer(value, name, min_value, max_value) return value except ValueError as e: raise webob.exc.HTTPBadRequest(explanation=str(e)) def _validate_pagination_query(request, max_limit=CONF.osapi_max_limit): """Validate the given request query and return limit and offset.""" try: offset = int(request.GET.get('offset', 0)) except ValueError: msg = _('offset param must be an integer') raise webob.exc.HTTPBadRequest(explanation=msg) try: limit = int(request.GET.get('limit', max_limit)) except ValueError: msg = _('limit param must be an integer') raise webob.exc.HTTPBadRequest(explanation=msg) if limit < 0: msg = _('limit param must be positive') raise webob.exc.HTTPBadRequest(explanation=msg) if offset < 0: msg = _('offset param must be positive') raise webob.exc.HTTPBadRequest(explanation=msg) return limit, offset def limited(items, request, max_limit=CONF.osapi_max_limit): """Return a slice of items according to requested offset and limit. :param items: A sliceable entity :param request: ``wsgi.Request`` possibly containing 'offset' and 'limit' GET variables. 'offset' is where to start in the list, and 'limit' is the maximum number of items to return. If 'limit' is not specified, 0, or > max_limit, we default to max_limit. Negative values for either offset or limit will cause exc.HTTPBadRequest() exceptions to be raised. :kwarg max_limit: The maximum number of items to return from 'items' """ limit, offset = _validate_pagination_query(request, max_limit) limit = min(max_limit, limit or max_limit) range_end = offset + limit return items[offset:range_end] def get_sort_params(params, default_key='created_at', default_dir='desc'): """Retrieves sort key/direction parameters. Processes the parameters to get the 'sort_key' and 'sort_dir' parameter values. :param params: webob.multidict of request parameters (from manila.api.openstack.wsgi.Request.params) :param default_key: default sort key value, will return if no sort key are supplied :param default_dir: default sort dir value, will return if no sort dir are supplied :returns: value of sort key, value of sort dir """ sort_key = params.pop('sort_key', default_key) sort_dir = params.pop('sort_dir', default_dir) return sort_key, sort_dir def remove_version_from_href(href): """Removes the first api version from the href. Given: 'http://manila.example.com/v1.1/123' Returns: 'http://manila.example.com/123' Given: 'http://www.manila.com/v1.1' Returns: 'http://www.manila.com' Given: 'http://manila.example.com/share/v1.1/123' Returns: 'http://manila.example.com/share/123' """ parsed_url = parse.urlsplit(href) url_parts = parsed_url.path.split('/') # NOTE: this should match vX.X or vX expression = re.compile(r'^v([0-9]+|[0-9]+\.[0-9]+)(/.*|$)') for x in range(len(url_parts)): if expression.match(url_parts[x]): del url_parts[x] break new_path = '/'.join(url_parts) if new_path == parsed_url.path: msg = 'href %s does not contain version' % href LOG.debug(msg) raise ValueError(msg) parsed_url = list(parsed_url) parsed_url[2] = new_path return parse.urlunsplit(parsed_url) def dict_to_query_str(params): # TODO(throughnothing): we should just use urllib.urlencode instead of this # But currently we don't work with urlencoded url's param_str = "" for key, val in params.items(): param_str = param_str + '='.join([str(key), str(val)]) + '&' return param_str.rstrip('&') def check_net_id_and_subnet_id(body): if xor('neutron_net_id' in body, 'neutron_subnet_id' in body): msg = _("When creating a new share network subnet you need to " "specify both neutron_net_id and neutron_subnet_id or " "none of them.") raise webob.exc.HTTPBadRequest(explanation=msg) def check_share_network_is_active(share_network): network_status = share_network.get('status') if network_status != constants.STATUS_NETWORK_ACTIVE: msg = _("The share network %(id)s used isn't in an 'active' state. " "Current status is %(status)s. The action may be retried " "after the share network has changed its state.") % { 'id': share_network['id'], 'status': share_network.get('status'), } raise webob.exc.HTTPBadRequest(explanation=msg) def check_display_field_length(field, field_name): if field: length = len(field) if length > constants.DB_DISPLAY_FIELDS_MAX_LENGTH: raise exception.InvalidInput( reason=("%(field_name)s can only be %(len)d characters long." % {'field_name': field_name, 'len': constants.DB_DISPLAY_FIELDS_MAX_LENGTH})) def parse_is_public(is_public): """Parse is_public into something usable. :returns: - True: API should list public share group types only - False: API should list private share group types only - None: API should list both public and private share group types """ if is_public is None: # preserve default value of showing only public types return True elif str(is_public).lower() == "all": return None else: try: return strutils.bool_from_string(is_public, strict=True) except ValueError: msg = _('Invalid is_public filter [%s]') % is_public raise webob.exc.HTTPBadRequest(explanation=msg) class ViewBuilder(object): """Model API responses as dictionaries.""" _collection_name = None _collection_route_name = None _detail_version_modifiers = [] def _get_project_id(self, request): project_id = request.environ["manila.context"].project_id if '/v1/' in request.url: # project_ids are mandatory in v1 URLs return project_id elif project_id and ("/v2/%s" % project_id in request.url): # project_ids are not mandatory within v2 URLs, but links need # to include them if the request does. return project_id return '' def _get_links(self, request, identifier): return [{"rel": "self", "href": self._get_href_link(request, identifier), }, {"rel": "bookmark", "href": self._get_bookmark_link(request, identifier), }] def _get_next_link(self, request, identifier): """Return href string with proper limit and marker params.""" params = request.params.copy() params["marker"] = identifier url = "" collection_route_name = ( self._collection_route_name or self._collection_name ) prefix = self._update_link_prefix(request.application_url, CONF.osapi_share_base_URL) url = os.path.join(prefix, self._get_project_id(request), collection_route_name) return "%s?%s" % (url, dict_to_query_str(params)) def _get_href_link(self, request, identifier): """Return an href string pointing to this object.""" collection_route_name = ( self._collection_route_name or self._collection_name ) prefix = self._update_link_prefix(request.application_url, CONF.osapi_share_base_URL) return os.path.join(prefix, self._get_project_id(request), collection_route_name, str(identifier)) def _get_bookmark_link(self, request, identifier): """Create a URL that refers to a specific resource.""" base_url = remove_version_from_href(request.application_url) base_url = self._update_link_prefix(base_url, CONF.osapi_share_base_URL) collection_route_name = ( self._collection_route_name or self._collection_name ) return os.path.join(base_url, self._get_project_id(request), collection_route_name, str(identifier)) def _get_collection_links(self, request, items, id_key="uuid"): """Retrieve 'next' link, if applicable.""" links = [] limit = int(request.params.get("limit", 0)) if limit and limit == len(items): last_item = items[-1] if id_key in last_item: last_item_id = last_item[id_key] else: last_item_id = last_item["id"] links.append({ "rel": "next", "href": self._get_next_link(request, last_item_id), }) return links def _update_link_prefix(self, orig_url, prefix): if not prefix: return orig_url url_parts = list(parse.urlsplit(orig_url)) prefix_parts = list(parse.urlsplit(prefix)) url_parts[0:2] = prefix_parts[0:2] return parse.urlunsplit(url_parts) def update_versioned_resource_dict(self, request, resource_dict, resource): """Updates the given resource dict for the given request version. This method calls every method, that is applicable to the request version, in _detail_version_modifiers. """ for method_name in self._detail_version_modifiers: method = getattr(self, method_name) if request.api_version_request.matches_versioned_method(method): request_context = request.environ['manila.context'] method.func(self, request_context, resource_dict, resource) @classmethod def versioned_method(cls, min_ver, max_ver=None, experimental=False): """Decorator for versioning API methods. :param min_ver: string representing minimum version :param max_ver: optional string representing maximum version :param experimental: flag indicating an API is experimental and is subject to change or removal at any time """ def decorator(f): obj_min_ver = api_version.APIVersionRequest(min_ver) if max_ver: obj_max_ver = api_version.APIVersionRequest(max_ver) else: obj_max_ver = api_version.APIVersionRequest() # Add to list of versioned methods registered func_name = f.__name__ new_func = versioned_method.VersionedMethod( func_name, obj_min_ver, obj_max_ver, experimental, f) return new_func return decorator def remove_invalid_options(context, search_options, allowed_search_options): """Remove search options that are not valid for non-admin API/context.""" if context.is_admin: # Allow all options return # Otherwise, strip out all unknown options unknown_options = [opt for opt in search_options if opt not in allowed_search_options] bad_options = ", ".join(unknown_options) LOG.debug("Removing options '%(bad_options)s' from query", {"bad_options": bad_options}) for opt in unknown_options: del search_options[opt] def validate_common_name(access): """Validate common name passed by user. 'access' is used as the certificate's CN (common name) to which access is allowed or denied by the backend. The standard allows for just about any string in the common name. The meaning of a string depends on its interpretation and is limited to 64 characters. """ if not (0 < len(access) < 65): exc_str = _('Invalid CN (common name). Must be 1-64 chars long.') raise webob.exc.HTTPBadRequest(explanation=exc_str) ''' for the reference specification for AD usernames, reference below links: 1:https://msdn.microsoft.com/en-us/library/bb726984.aspx 2:https://technet.microsoft.com/en-us/library/cc733146.aspx ''' def validate_username(access): sole_periods_spaces_re = r'[\s|\.]+$' valid_username_re = r'.[^\"\/\\\[\]\:\;\|\=\,\+\*\?\<\>]{3,254}$' username = access if re.match(sole_periods_spaces_re, username): exc_str = ('Invalid user or group name,cannot consist solely ' 'of periods or spaces.') raise webob.exc.HTTPBadRequest(explanation=exc_str) if not re.match(valid_username_re, username): exc_str = ('Invalid user or group name. Must be 4-255 characters ' 'and consist of alphanumeric characters and ' 'exclude special characters "/\\[]:;|=,+*?<>') raise webob.exc.HTTPBadRequest(explanation=exc_str) def validate_cephx_id(cephx_id): if not cephx_id: raise webob.exc.HTTPBadRequest(explanation=_( 'Ceph IDs may not be empty.')) # This restriction may be lifted in Ceph in the future: # http://tracker.ceph.com/issues/14626 if not set(cephx_id) <= set(string.printable): raise webob.exc.HTTPBadRequest(explanation=_( 'Ceph IDs must consist of ASCII printable characters.')) # Periods are technically permitted, but we restrict them here # to avoid confusion where users are unsure whether they should # include the "client." prefix: otherwise they could accidentally # create "client.client.foobar". if '.' in cephx_id: raise webob.exc.HTTPBadRequest(explanation=_( 'Ceph IDs may not contain periods.')) def validate_ip(access_to, enable_ipv6): try: if enable_ipv6: validator = ipaddress.ip_network else: validator = ipaddress.IPv4Network validator(str(access_to)) except ValueError as e: raise webob.exc.HTTPBadRequest(explanation=str(e)) def validate_access(*args, **kwargs): access_type = kwargs.get('access_type') access_to = kwargs.get('access_to') enable_ceph = kwargs.get('enable_ceph') enable_ipv6 = kwargs.get('enable_ipv6') if access_type == 'ip': validate_ip(access_to, enable_ipv6) elif access_type == 'user': validate_username(access_to) elif access_type == 'cert': validate_common_name(access_to.strip()) elif access_type == "cephx" and enable_ceph: validate_cephx_id(access_to) else: if enable_ceph: exc_str = _("Only 'ip', 'user', 'cert' or 'cephx' access " "types are supported.") else: exc_str = _("Only 'ip', 'user' or 'cert' access types " "are supported.") raise webob.exc.HTTPBadRequest(explanation=exc_str) def validate_integer(value, name, min_value=None, max_value=None): """Make sure that value is a valid integer, potentially within range. :param value: the value of the integer :param name: the name of the integer :param min_value: the lowest integer permitted in the range :param max_value: the highest integer permitted in the range :returns: integer """ try: value = strutils.validate_integer(value, name, min_value, max_value) return value except ValueError as e: raise webob.exc.HTTPBadRequest(explanation=str(e)) def validate_public_share_policy(context, api_params, api='create'): """Validates if policy allows is_public parameter to be set to True. :arg api_params - A dictionary of values that may contain 'is_public' :returns api_params with 'is_public' item sanitized if present :raises exception.InvalidParameterValue if is_public is set but is Invalid exception.NotAuthorized if is_public is True but policy prevents it """ if 'is_public' not in api_params: return api_params policies = { 'create': 'create_public_share', 'update': 'set_public_share', } policy_to_check = policies[api] try: api_params['is_public'] = strutils.bool_from_string( api_params['is_public'], strict=True) except ValueError as e: raise exception.InvalidParameterValue(str(e)) public_shares_allowed = policy.check_policy( context, 'share', policy_to_check, do_raise=False) if api_params['is_public'] and not public_shares_allowed: message = _("User is not authorized to set 'is_public' to True in the " "request.") raise exception.NotAuthorized(message=message) return api_params def _get_existing_subnets(context, share_network_id, az): """Return any existing subnets in the requested AZ. If az is None, the method will search for an existent default subnet. """ if az is None: return db_api.share_network_subnet_get_default_subnets( context, share_network_id) return ( db_api.share_network_subnets_get_all_by_availability_zone_id( context, share_network_id, az, fallback_to_default=False) ) def validate_subnet_create(context, share_network_id, data, multiple_subnet_support): check_net_id_and_subnet_id(data) try: share_network = db_api.share_network_get( context, share_network_id) except exception.ShareNetworkNotFound as e: raise exc.HTTPNotFound(explanation=e.msg) availability_zone = data.pop('availability_zone', None) subnet_az = {} if availability_zone: try: subnet_az = db_api.availability_zone_get(context, availability_zone) except exception.AvailabilityZoneNotFound: msg = _("The provided availability zone %s does not " "exist.") % availability_zone raise exc.HTTPBadRequest(explanation=msg) data['availability_zone_id'] = subnet_az.get('id') existing_subnets = _get_existing_subnets( context, share_network_id, data['availability_zone_id']) if existing_subnets and not multiple_subnet_support: msg = ("Another share network subnet was found in the " "specified availability zone. Only one share network " "subnet is allowed per availability zone for share " "network %s." % share_network_id) raise exc.HTTPConflict(explanation=msg) return share_network, existing_subnets def check_metadata_properties(metadata=None): if not metadata: metadata = {} for k, v in metadata.items(): if not k: msg = _("Metadata property key is blank.") LOG.warning(msg) raise exception.InvalidMetadata(message=msg) if len(k) > 255: msg = _("Metadata property key is " "greater than 255 characters.") LOG.warning(msg) raise exception.InvalidMetadataSize(message=msg) if not v: msg = _("Metadata property value is blank.") LOG.warning(msg) raise exception.InvalidMetadata(message=msg) if len(v) > 1023: msg = _("Metadata property value is " "greater than 1023 characters.") LOG.warning(msg) raise exception.InvalidMetadataSize(message=msg) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315601.8256729 manila-21.0.0/manila/api/contrib/0000775000175000017500000000000000000000000016530 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/api/contrib/__init__.py0000664000175000017500000000226400000000000020645 0ustar00zuulzuul00000000000000# Copyright 2011 Justin Santa Barbara # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Contrib contains extensions that are shipped with manila. It can't be called 'extensions' because that causes namespacing problems. """ from oslo_config import cfg from oslo_log import log from manila.api import extensions CONF = cfg.CONF LOG = log.getLogger(__name__) def standard_extensions(ext_mgr): extensions.load_standard_extensions(ext_mgr, LOG, __path__, __package__) def select_extensions(ext_mgr): extensions.load_standard_extensions(ext_mgr, LOG, __path__, __package__, CONF.osapi_share_ext_list) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/api/extensions.py0000664000175000017500000002654500000000000017655 0ustar00zuulzuul00000000000000# Copyright 2011 OpenStack LLC. # Copyright 2011 Justin Santa Barbara # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from oslo_config import cfg from oslo_log import log from oslo_utils import importutils import webob.dec import webob.exc from manila.api.openstack import wsgi from manila import exception from manila import policy CONF = cfg.CONF LOG = log.getLogger(__name__) class ExtensionDescriptor(object): """Base class that defines the contract for extensions. Note that you don't have to derive from this class to have a valid extension; it is purely a convenience. """ # The name of the extension, e.g., 'Fox In Socks' name = None # The alias for the extension, e.g., 'FOXNSOX' alias = None # Description comes from the docstring for the class # The timestamp when the extension was last updated, e.g., # '2011-01-22T13:25:27-06:00' updated = None def __init__(self, ext_mgr): """Register extension with the extension manager.""" ext_mgr.register(self) self.ext_mgr = ext_mgr def get_resources(self): """List of extensions.ResourceExtension extension objects. Resources define new nouns, and are accessible through URLs. """ resources = [] return resources def get_controller_extensions(self): """List of extensions.ControllerExtension extension objects. Controller extensions are used to extend existing controllers. """ controller_exts = [] return controller_exts class ExtensionsResource(wsgi.Resource): def __init__(self, extension_manager): self.extension_manager = extension_manager super(ExtensionsResource, self).__init__(None) def _translate(self, ext): ext_data = {} ext_data['name'] = ext.name ext_data['alias'] = ext.alias ext_data['description'] = ext.__doc__ ext_data['updated'] = ext.updated ext_data['links'] = [] # TODO(dprince): implement extension links return ext_data def index(self, req): extensions = [] for _alias, ext in self.extension_manager.extensions.items(): extensions.append(self._translate(ext)) return dict(extensions=extensions) def show(self, req, id): try: # NOTE(dprince): the extensions alias is used as the 'id' for show ext = self.extension_manager.extensions[id] except KeyError: raise webob.exc.HTTPNotFound() return dict(extension=self._translate(ext)) def delete(self, req, id): raise webob.exc.HTTPNotFound() def create(self, req): raise webob.exc.HTTPNotFound() class ExtensionManager(object): """Load extensions from the configured extension path. See manila/tests/api/extensions/foxinsocks/extension.py for an example extension implementation. """ def __init__(self): LOG.info('Initializing extension manager.') self.cls_list = CONF.osapi_share_extension self.extensions = {} self._load_extensions() def register(self, ext): # Do nothing if the extension doesn't check out if not self._check_extension(ext): return alias = ext.alias LOG.info('Loaded extension: %s', alias) if alias in self.extensions: raise exception.Error("Found duplicate extension: %s" % alias) self.extensions[alias] = ext def get_resources(self): """Returns a list of ResourceExtension objects.""" resources = [] resources.append(ResourceExtension('extensions', ExtensionsResource(self))) for ext in self.extensions.values(): try: resources.extend(ext.get_resources()) except AttributeError: # NOTE(dprince): Extension aren't required to have resource # extensions pass return resources def get_controller_extensions(self): """Returns a list of ControllerExtension objects.""" controller_exts = [] for ext in self.extensions.values(): try: get_ext_method = ext.get_controller_extensions except AttributeError: # NOTE(Vek): Extensions aren't required to have # controller extensions continue controller_exts.extend(get_ext_method()) return controller_exts def _check_extension(self, extension): """Checks for required methods in extension objects.""" try: LOG.debug('Ext name: %s', extension.name) LOG.debug('Ext alias: %s', extension.alias) LOG.debug('Ext description: %s', ' '.join(extension.__doc__.strip().split())) LOG.debug('Ext updated: %s', extension.updated) except AttributeError: LOG.exception("Exception loading extension.") return False return True def load_extension(self, ext_factory): """Execute an extension factory. Loads an extension. The 'ext_factory' is the name of a callable that will be imported and called with one argument--the extension manager. The factory callable is expected to call the register() method at least once. """ LOG.debug("Loading extension %s", ext_factory) # Load the factory factory = importutils.import_class(ext_factory) # Call it LOG.debug("Calling extension factory %s", ext_factory) factory(self) def _load_extensions(self): """Load extensions specified on the command line.""" extensions = list(self.cls_list) # NOTE(thingee): Backwards compat for the old extension loader path. # We can drop this post-grizzly in the H release. old_contrib_path = ('manila.api.openstack.share.contrib.' 'standard_extensions') new_contrib_path = 'manila.api.contrib.standard_extensions' if old_contrib_path in extensions: LOG.warning('osapi_share_extension is set to deprecated path: ' '%s.', old_contrib_path) LOG.warning('Please set your flag or manila.conf settings for ' 'osapi_share_extension to: %s.', new_contrib_path) extensions = [e.replace(old_contrib_path, new_contrib_path) for e in extensions] for ext_factory in extensions: try: self.load_extension(ext_factory) except Exception as exc: LOG.warning('Failed to load extension %(ext_factory)s: ' '%(exc)s.', {"ext_factory": ext_factory, "exc": exc}) class ControllerExtension(object): """Extend core controllers of manila OpenStack API. Provide a way to extend existing manila OpenStack API core controllers. """ def __init__(self, extension, collection, controller): self.extension = extension self.collection = collection self.controller = controller class ResourceExtension(object): """Add top level resources to the OpenStack API in manila.""" def __init__(self, collection, controller, parent=None, collection_actions=None, member_actions=None, custom_routes_fn=None): if not collection_actions: collection_actions = {} if not member_actions: member_actions = {} self.collection = collection self.controller = controller self.parent = parent self.collection_actions = collection_actions self.member_actions = member_actions self.custom_routes_fn = custom_routes_fn def load_standard_extensions(ext_mgr, logger, path, package, ext_list=None): """Registers all standard API extensions.""" # Walk through all the modules in our directory... our_dir = path[0] for dirpath, dirnames, filenames in os.walk(our_dir): # Compute the relative package name from the dirpath relpath = os.path.relpath(dirpath, our_dir) if relpath == '.': relpkg = '' else: relpkg = '.%s' % '.'.join(relpath.split(os.sep)) # Now, consider each file in turn, only considering .py and .pyc files for fname in filenames: root, ext = os.path.splitext(fname) # Skip __init__ and anything that's not .py and .pyc if (ext not in ('.py', '.pyc')) or root == '__init__': continue # If .pyc and .py both exist, skip .pyc if ext == '.pyc' and ((root + '.py') in filenames): continue # Try loading it classname = "%s%s" % (root[0].upper(), root[1:]) classpath = ("%s%s.%s.%s" % (package, relpkg, root, classname)) if ext_list is not None and classname not in ext_list: logger.debug("Skipping extension: %s" % classpath) continue try: ext_mgr.load_extension(classpath) except Exception as exc: logger.warning('Failed to load extension %(classpath)s: ' '%(exc)s.', {"classpath": classpath, "exc": exc}) # Now, let's consider any subdirectories we may have... subdirs = [] for dname in dirnames: # Skip it if it does not have __init__.py if not os.path.exists(os.path.join(dirpath, dname, '__init__.py')): continue # If it has extension(), delegate... ext_name = ("%s%s.%s.extension" % (package, relpkg, dname)) try: ext = importutils.import_class(ext_name) except ImportError: # extension() doesn't exist on it, so we'll explore # the directory for ourselves subdirs.append(dname) else: try: ext(ext_mgr) except Exception as exc: logger.warning('Failed to load extension ' '%(ext_name)s: %(exc)s.', {"ext_name": ext_name, "exc": exc}) # Update the list of directories we'll explore... dirnames[:] = subdirs def extension_authorizer(api_name, extension_name): def authorize(context, target=None, action=None): target = target or policy.default_target(context) if action is None: act = '%s_extension:%s' % (api_name, extension_name) else: act = '%s_extension:%s:%s' % (api_name, extension_name, action) policy.enforce(context, act, target) return authorize ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315601.8256729 manila-21.0.0/manila/api/middleware/0000775000175000017500000000000000000000000017205 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/api/middleware/__init__.py0000664000175000017500000000000000000000000021304 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/api/middleware/auth.py0000664000175000017500000001404200000000000020521 0ustar00zuulzuul00000000000000# Copyright 2010 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Common Auth Middleware. """ import os from oslo_config import cfg from oslo_log import log from oslo_serialization import jsonutils import webob.dec import webob.exc from manila.api.openstack import wsgi from manila import context from manila.i18n import _ from manila.wsgi import common as base_wsgi use_forwarded_for_opt = cfg.BoolOpt( 'use_forwarded_for', default=False, deprecated_for_removal=True, deprecated_reason='This feature is duplicate of the HTTPProxyToWSGI ' 'middleware of oslo.middleware.', deprecated_since='Zed', help='Treat X-Forwarded-For as the canonical remote address. ' 'Only enable this if you have a sanitizing proxy.') CONF = cfg.CONF CONF.register_opt(use_forwarded_for_opt) LOG = log.getLogger(__name__) def pipeline_factory(loader, global_conf, **local_conf): """A paste pipeline replica that keys off of auth_strategy.""" pipeline = local_conf[CONF.auth_strategy] if not CONF.api_rate_limit: limit_name = CONF.auth_strategy + '_nolimit' pipeline = local_conf.get(limit_name, pipeline) pipeline = pipeline.split() filters = [loader.get_filter(n) for n in pipeline[:-1]] app = loader.get_app(pipeline[-1]) filters.reverse() for filter in filters: app = filter(app) return app class InjectContext(base_wsgi.Middleware): """Add a 'manila.context' to WSGI environ.""" def __init__(self, context, *args, **kwargs): self.context = context super(InjectContext, self).__init__(*args, **kwargs) @webob.dec.wsgify(RequestClass=base_wsgi.Request) def __call__(self, req): req.environ['manila.context'] = self.context return self.application class ManilaKeystoneContext(base_wsgi.Middleware): """Make a request context from keystone headers.""" @webob.dec.wsgify(RequestClass=base_wsgi.Request) def __call__(self, req): # Build a context, including the auth_token... remote_address = req.remote_addr if CONF.use_forwarded_for: remote_address = req.headers.get('X-Forwarded-For', remote_address) service_catalog = None if req.headers.get('X_SERVICE_CATALOG') is not None: try: catalog_header = req.headers.get('X_SERVICE_CATALOG') service_catalog = jsonutils.loads(catalog_header) except ValueError: raise webob.exc.HTTPInternalServerError( _('Invalid service catalog json.')) ctx = context.RequestContext.from_environ( req.environ, remote_address=remote_address, service_catalog=service_catalog) if ctx.user_id is None: LOG.debug("Neither X_USER_ID nor X_USER found in request") return webob.exc.HTTPUnauthorized() if req.environ.get('X_PROJECT_DOMAIN_ID'): ctx.project_domain_id = req.environ['X_PROJECT_DOMAIN_ID'] if req.environ.get('X_PROJECT_DOMAIN_NAME'): ctx.project_domain_name = req.environ['X_PROJECT_DOMAIN_NAME'] if req.environ.get('X_USER_DOMAIN_ID'): ctx.user_domain_id = req.environ['X_USER_DOMAIN_ID'] if req.environ.get('X_USER_DOMAIN_NAME'): ctx.user_domain_name = req.environ['X_USER_DOMAIN_NAME'] req.environ['manila.context'] = ctx return self.application class NoAuthMiddlewareBase(base_wsgi.Middleware): """Return a fake token if one isn't specified.""" def base_call(self, req, project_id_in_path=False): if 'X-Auth-Token' not in req.headers: user_id = req.headers.get('X-Auth-User', 'admin') project_id = req.headers.get('X-Auth-Project-Id', 'admin') if project_id_in_path: os_url = os.path.join(req.url.rstrip('/'), project_id) else: os_url = req.url.rstrip('/') res = webob.Response() # NOTE(vish): This is expecting and returning Auth(1.1), whereas # keystone uses 2.0 auth. We should probably allow # 2.0 auth here as well. res.headers['X-Auth-Token'] = '%s:%s' % (user_id, project_id) res.headers['X-Server-Management-Url'] = os_url res.content_type = 'text/plain' res.status = '204' return res token = req.headers['X-Auth-Token'] user_id, _sep, project_id = token.partition(':') project_id = project_id or user_id remote_address = getattr(req, 'remote_addr', '127.0.0.1') if CONF.use_forwarded_for: remote_address = req.headers.get('X-Forwarded-For', remote_address) ctx = context.RequestContext(user_id, project_id, is_admin=True, remote_address=remote_address) req.environ['manila.context'] = ctx return self.application class NoAuthMiddleware(NoAuthMiddlewareBase): """Return a fake token if one isn't specified. Sets project_id in URLs. """ @webob.dec.wsgify(RequestClass=wsgi.Request) def __call__(self, req): return self.base_call(req, project_id_in_path=True) class NoAuthMiddlewarev2_60(NoAuthMiddlewareBase): """Return a fake token if one isn't specified. Does not set project_id in URLs. """ @webob.dec.wsgify(RequestClass=wsgi.Request) def __call__(self, req): return self.base_call(req) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/api/middleware/fault.py0000664000175000017500000000576700000000000020711 0ustar00zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log import webob.dec import webob.exc from manila.api.openstack import wsgi from manila.i18n import _ from manila import utils from manila.wsgi import common as base_wsgi LOG = log.getLogger(__name__) class FaultWrapper(base_wsgi.Middleware): """Calls down the middleware stack, making exceptions into faults.""" _status_to_type = {} @staticmethod def status_to_type(status): if not FaultWrapper._status_to_type: for clazz in utils.walk_class_hierarchy(webob.exc.HTTPError): FaultWrapper._status_to_type[clazz.code] = clazz return FaultWrapper._status_to_type.get( status, webob.exc.HTTPInternalServerError)() def _error(self, inner, req): if isinstance(inner, UnicodeDecodeError): msg = _("Error decoding your request. Either the URL or the " "request body contained characters that could not be " "decoded by Manila.") return wsgi.Fault(webob.exc.HTTPBadRequest(explanation=msg)) LOG.exception("Caught error: %s", inner) safe = getattr(inner, 'safe', False) headers = getattr(inner, 'headers', None) status = getattr(inner, 'code', 500) if status is None: status = 500 msg_dict = dict(url=req.url, status=status) LOG.info("%(url)s returned with HTTP %(status)d", msg_dict) outer = self.status_to_type(status) if headers: outer.headers = headers # NOTE(johannes): We leave the explanation empty here on # purpose. It could possibly have sensitive information # that should not be returned back to the user. See # bugs 868360 and 874472 # NOTE(eglynn): However, it would be over-conservative and # inconsistent with the EC2 API to hide every exception, # including those that are safe to expose, see bug 1021373 if safe: outer.explanation = '%s: %s' % (inner.__class__.__name__, inner) return wsgi.Fault(outer) @webob.dec.wsgify(RequestClass=wsgi.Request) def __call__(self, req): try: return req.get_response(self.application) except Exception as ex: return self._error(ex, req) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315601.8296728 manila-21.0.0/manila/api/openstack/0000775000175000017500000000000000000000000017057 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/api/openstack/__init__.py0000664000175000017500000001725000000000000021175 0ustar00zuulzuul00000000000000# Copyright (c) 2013 OpenStack, LLC. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ WSGI middleware for OpenStack API controllers. """ from oslo_config import cfg from oslo_log import log from oslo_service import wsgi as base_wsgi import routes from manila.api.openstack import wsgi from manila.i18n import _ openstack_api_opts = [ cfg.StrOpt( 'project_id_regex', default=r'[0-9a-f\-]+', help=( r'The validation regex for project_ids used in URLs. ' r'This defaults to [0-9a-f\\-]+ if not set, ' r'which matches normal uuids created by keystone.' ), ), ] validation_opts = [ cfg.StrOpt( 'response_validation', choices=( ( 'error', 'Raise a HTTP 500 (Server Error) for responses that fail ' 'schema validation', ), ( 'warn', 'Log a warning for responses that fail schema validation', ), ( 'ignore', 'Ignore schema validation failures', ), ), default='warn', help="""\ Configure validation of API responses. ``warn`` is the current recommendation for production environments. If you find it necessary to enable the ``ignore`` option, please report the issues you are seeing to the Manila team so we can improve our schemas. ``error`` should not be used in a production environment. This is because schema validation happens *after* the response body has been generated, meaning any side effects will still happen and the call may be non-idempotent despite the user receiving a HTTP 500 error. """, ), ] CONF = cfg.CONF CONF.register_opts(openstack_api_opts) CONF.register_opts(validation_opts, group='api') LOG = log.getLogger(__name__) class APIMapper(routes.Mapper): def routematch(self, url=None, environ=None): if url == "": result = self._match("", environ) return result[0], result[1] return routes.Mapper.routematch(self, url, environ) def connect(self, *args, **kwargs): # NOTE(inhye): Default the format part of a route to only accept json # and xml so it doesn't eat all characters after a '.' # in the url. kwargs.setdefault('requirements', {}) if not kwargs['requirements'].get('format'): kwargs['requirements']['format'] = 'json|xml' return routes.Mapper.connect(self, *args, **kwargs) class ProjectMapper(APIMapper): def resource(self, member_name, collection_name, **kwargs): """Base resource path handler This method is compatible with resource paths that include a project_id and those that don't. Including project_id in the URLs was a legacy API requirement; and making API requests against such endpoints won't work for users that don't belong to a particular project. """ # NOTE(gouthamr): project_id parameter is only valid if its hex # or hex + dashes (note, integers are a subset of this). This # is required to handle our overlapping routes issues. project_id_regex = CONF.project_id_regex project_id_token = '{project_id:%s}' % project_id_regex if 'parent_resource' not in kwargs: kwargs['path_prefix'] = '%s/' % project_id_token else: parent_resource = kwargs['parent_resource'] p_collection = parent_resource['collection_name'] p_member = parent_resource['member_name'] kwargs['path_prefix'] = '%s/%s/:%s_id' % (project_id_token, p_collection, p_member) routes.Mapper.resource(self, member_name, collection_name, **kwargs) # NOTE(gouthamr): while we are in transition mode to not needing # project_ids in URLs, we'll need additional routes without project_id. if 'parent_resource' not in kwargs: del kwargs['path_prefix'] else: parent_resource = kwargs['parent_resource'] p_collection = parent_resource['collection_name'] p_member = parent_resource['member_name'] kwargs['path_prefix'] = '%s/:%s_id' % (p_collection, p_member) routes.Mapper.resource(self, member_name, collection_name, **kwargs) class APIRouter(base_wsgi.Router): """Routes requests on the API to the appropriate controller and method.""" ExtensionManager = None # override in subclasses @classmethod def factory(cls, global_config, **local_config): """Simple paste factory, :class:`manila.wsgi.Router` doesn't have.""" return cls() def __init__(self, ext_mgr=None): if ext_mgr is None: if self.ExtensionManager: # pylint: disable=not-callable ext_mgr = self.ExtensionManager() else: raise Exception(_("Must specify an ExtensionManager class")) mapper = ProjectMapper() self.resources = {} self._setup_routes(mapper) self._setup_ext_routes(mapper, ext_mgr) self._setup_extensions(ext_mgr) super(APIRouter, self).__init__(mapper) def _setup_ext_routes(self, mapper, ext_mgr): for resource in ext_mgr.get_resources(): LOG.debug('Extended resource: %s', resource.collection) wsgi_resource = wsgi.Resource(resource.controller) self.resources[resource.collection] = wsgi_resource kargs = dict( controller=wsgi_resource, collection=resource.collection_actions, member=resource.member_actions) if resource.parent: kargs['parent_resource'] = resource.parent mapper.resource(resource.collection, resource.collection, **kargs) if resource.custom_routes_fn: resource.custom_routes_fn(mapper, wsgi_resource) def _setup_extensions(self, ext_mgr): for extension in ext_mgr.get_controller_extensions(): ext_name = extension.extension.name collection = extension.collection controller = extension.controller if collection not in self.resources: LOG.warning('Extension %(ext_name)s: Cannot extend ' 'resource %(collection)s: No such resource', {'ext_name': ext_name, 'collection': collection}) continue LOG.debug('Extension %(ext_name)s extending resource: ' '%(collection)s', {'ext_name': ext_name, 'collection': collection}) resource = self.resources[collection] resource.register_actions(controller) resource.register_extensions(controller) def _setup_routes(self, mapper): raise NotImplementedError ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/api/openstack/api_version_request.py0000664000175000017500000004045400000000000023526 0ustar00zuulzuul00000000000000# Copyright 2014 IBM Corp. # Copyright 2015 Clinton Knight # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import re from manila.api.openstack import versioned_method from manila import exception from manila.i18n import _ from manila import utils # Define the minimum and maximum version of the API across all of the # REST API. The format of the version is: # X.Y where: # # - X will only be changed if a significant backwards incompatible API # change is made which affects the API as whole. That is, something # that is only very very rarely incremented. # # - Y when you make any change to the API. Note that this includes # semantic changes which may not affect the input or output formats or # even originate in the API code layer. We are not distinguishing # between backwards compatible and backwards incompatible changes in # the versioning system. It must be made clear in the documentation as # to what is a backwards compatible change and what is a backwards # incompatible one. # # You must update the API version history string below with a one or # two line description as well as update rest_api_version_history.rst REST_API_VERSION_HISTORY = """ REST API Version History: * 1.0 - Initial version. Includes all V1 APIs and extensions in Kilo. * 2.0 - Versions API updated to reflect beginning of microversions epoch. * 2.1 - Share create() doesn't ignore availability_zone field of share. * 2.2 - Snapshots become optional feature. * 2.3 - Share instances admin API * 2.4 - Consistency Group support * 2.5 - Share Migration admin API * 2.6 - Return share_type UUID instead of name in Share API * 2.7 - Rename old extension-like API URLs to core-API-like * 2.8 - Attr "is_public" can be set for share using API "manage" * 2.9 - Add export locations API * 2.10 - Field 'access_rules_status' was added to shares and share instances. * 2.11 - Share Replication support * 2.12 - Manage/unmanage snapshot API. * 2.13 - Add "cephx" auth type to allow_access * 2.14 - 'Preferred' attribute in export location metadata * 2.15 - Added Share migration 'migration_cancel', 'migration_get_progress', 'migration_complete' APIs, renamed 'migrate_share' to 'migration_start' and added notify parameter to 'migration_start'. * 2.16 - Add user_id in share show/create/manage API. * 2.17 - Added project_id and user_id fields to the JSON response of snapshot show/create/manage API. * 2.18 - Add gateway to the JSON response of share network show API. * 2.19 - Share snapshot instances admin APIs (list/show/detail/reset-status). * 2.20 - Add MTU to the JSON response of share network show API. * 2.21 - Add access_key to the response of access_list API. * 2.22 - Updated migration_start API with 'preserve-metadata', 'writable', 'nondisruptive' and 'new_share_network_id' parameters, renamed 'force_host_copy' to 'force_host_assisted_migration', removed 'notify' parameter and removed previous migrate_share API support. Updated reset_task_state API to accept 'None' value. * 2.23 - Added share_type to filter results of scheduler-stats/pools API. * 2.24 - Added optional create_share_from_snapshot_support extra spec, which was previously inferred from the 'snapshot_support' extra spec. Also made the 'snapshot_support' extra spec optional. * 2.25 - Added quota-show detail API. * 2.26 - Removed 'nova_net_id' parameter from share_network API. * 2.27 - Added share revert to snapshot API. * 2.28 - Added transitional states to access rules and replaced all transitional access_rules_status values of shares (share_instances) with 'syncing'. Share action API 'access_allow' now accepts rules even when a share or any of its instances may have an access_rules_status set to 'error'. * 2.29 - Updated migration_start API adding mandatory parameter 'preserve_snapshots' and changed 'preserve_metadata', 'writable', 'nondisruptive' to be mandatory as well. All previous migration_start APIs prior to this microversion are now unsupported. * 2.30 - Added cast_rules_to_readonly field to share_instances. * 2.31 - Convert consistency groups to share groups. * 2.32 - Added mountable snapshots APIs. * 2.33 - Added 'created_at' and 'updated_at' to the response of access_list API. * 2.34 - Added 'availability_zone_id' and 'consistent_snapshot_support' fields to 'share_group' object. * 2.35 - Added support to retrieve shares filtered by export_location_id and export_location_path. * 2.36 - Added like filter support in ``shares``, ``snapshots``, ``share-networks``, ``share-groups`` list APIs. * 2.37 - Added /messages APIs. * 2.38 - Support IPv6 validation in allow_access API to enable IPv6 in manila. * 2.39 - Added share-type quotas. * 2.40 - Added share group and share group snapshot quotas. * 2.41 - Added 'description' in share type create/list APIs. * 2.42 - Added ``with_count`` in share list API to get total count info. * 2.43 - Added filter search by extra spec for share type list. * 2.44 - Added 'ou' field to 'security_service' object. * 2.45 - Added access metadata for share access and also introduced the GET /share-access-rules API. The prior API to retrieve access rules will not work with API version >=2.45. * 2.46 - Added 'is_default' field to 'share_type' and 'share_group_type' objects. * 2.47 - Export locations for non-active share replicas are no longer retrievable through the export locations APIs: GET /v2/{tenant_id}/shares/{share_id}/export_locations and GET /v2/{tenant_id}/shares/{share_id}/export_locations/{ export_location_id}. A new API is introduced at this version: GET /v2/{tenant_id}/share-replicas/{ replica_id}/export-locations to allow retrieving individual replica export locations if available. * 2.48 - Added support for extra-spec "availability_zones" within Share types along with validation in the API. * 2.49 - Added Manage/Unmanage Share Server APIs. Updated Manage/Unmanage Shares and Snapshots APIs to work in ``driver_handles_shares_servers`` enabled mode. * 2.50 - Added update share type API to Share Type APIs. Through this API we can update the ``name``, ``description`` and/or ``share_type_access:is_public`` fields of the share type. * 2.51 - Added Share Network with multiple Subnets. Updated Share Networks to handle with one or more subnets in different availability zones. * 2.52 - Added 'created_before' and 'created_since' field to list messages filters, support querying user messages within the specified time period. * 2.53 - Added quota control to share replicas. * 2.54 - Share and share instance objects include a new field called "progress" which indicates the completion of a share creation operation as a percentage. * 2.55 - Share groups feature is no longer considered experimental. * 2.56 - Share replication feature is no longer considered experimental. * 2.57 - Added Share server migration operations: 'share_server_migration_check' 'share_server_migration_cancel' 'share_server_migration_complete' 'share_server_migration_start' 'share_server_migration_get_progress' 'share_server_reset_task_state' * 2.58 - Added 'share_groups' and 'share_group_snapshots' to the limits view. * 2.59 - Add driver ``details`` field to migration get progress. * 2.60 - API URLs no longer need to include a project_id parameter. * 2.61 - Added optional provisioning:max_share_size and provisioning:min_share_size extra specs, which can add minimum and maximum share size restrictions on a per share-type granularity. * 2.62 - Added quota control to per share size. * 2.63 - Changed the existing behavior of 'add_security_service' action on the share network's endpoint to allow the addition of security services, even when the share network is in use. Also, added new actions on the share network's endpoint: 'update_security_service', 'update_security_service_check' and 'add_security_service_check'. * 2.64 - Added 'force' field to extend share api, which can extend share directly without validation through share scheduler. * 2.65 - Added ability to set affinity scheduler hints via the share create API. * 2.66 - Added filter search by group spec for share group type list. * 2.67 - Added ability to set 'only_host' scheduler hint for the share create and share replica create API. * 2.68 - Added admin only capabilities to share metadata API * 2.69 - Added new share action to soft delete share to recycle bin or restore share from recycle bin. Also, a new parameter called `is_soft_deleted` was added so users can filter out shares in the recycle bin while listing shares. * 2.70 - Added support for multiple share network subnets in the same availability zone. Also, users can add subnets for an in-use share network. * 2.71 - Added 'updated_at' field in share instance show API output. * 2.72 - Added new option ``share-network`` to share replica creare API. * 2.73 - Added Share Snapshot Metadata to Metadata API * 2.74 - Allow/deny share access rule even if share replicas are in 'error' state. * 2.75 - Added option to specify quiesce wait time in share replica promote API. * 2.76 - Added 'default_ad_site' field in security service object. * 2.77 - Added support for share transfer between different projects. * 2.78 - Added Share Network Subnet Metadata to Metadata API. * 2.79 - Added ``with_count`` in share snapshot list API to get total count info. * 2.80 - Added share backup APIs. * 2.81 - Added API methods, endpoint /resource-locks. * 2.82 - Added lock and restriction to share access rules. * 2.83 - Added 'disabled_reason' field to services. * 2.84 - Added mount_point_name to shares. * 2.85 - Added backup_type field to share backups. * 2.86 - Add ensure share API. * 2.87 - Added Share export location metadata API * 2.88 - Added support for update Share access rule. * 2.89 - Added support for passing Share network subnet metadata updates to driver. * 2.90 - Added encryption key reference option to share create API. * 2.91 - Added support for targeted restores via the share backup API """ # The minimum and maximum versions of the API supported # The default api version request is defined to be the # minimum version of the API supported. _MIN_API_VERSION = "2.0" _MAX_API_VERSION = "2.91" DEFAULT_API_VERSION = _MIN_API_VERSION # NOTE(cyeoh): min and max versions declared as functions so we can # mock them for unittests. Do not use the constants directly anywhere # else. def min_api_version(): return APIVersionRequest(_MIN_API_VERSION) def max_api_version(): return APIVersionRequest(_MAX_API_VERSION) class APIVersionRequest(utils.ComparableMixin): """This class represents an API Version Request. This class includes convenience methods for manipulation and comparison of version numbers as needed to implement API microversions. """ def __init__(self, version_string=None, experimental=False): """Create an API version request object.""" self._ver_major = None self._ver_minor = None self._experimental = experimental if version_string is not None: match = re.match(r"^([1-9]\d*)\.([1-9]\d*|0)$", version_string) if match: self._ver_major = int(match.group(1)) self._ver_minor = int(match.group(2)) else: raise exception.InvalidAPIVersionString(version=version_string) def __str__(self): """Debug/Logging representation of object.""" params = { 'major': self._ver_major, 'minor': self._ver_minor, 'experimental': self._experimental, } return ("API Version Request Major: %(major)s, Minor: %(minor)s, " "Experimental: %(experimental)s" % params) def is_null(self): return self._ver_major is None and self._ver_minor is None def _cmpkey(self): """Return the value used by ComparableMixin for rich comparisons.""" return self._ver_major, self._ver_minor @property def experimental(self): return self._experimental @experimental.setter def experimental(self, value): if not isinstance(value, bool): msg = _('The experimental property must be a bool value.') raise exception.InvalidParameterValue(err=msg) self._experimental = value def matches_versioned_method(self, method): """Compares this version to that of a versioned method.""" if not isinstance(method, versioned_method.VersionedMethod): msg = _('An API version request must be compared ' 'to a VersionedMethod object.') raise exception.InvalidParameterValue(err=msg) return self.matches(method.start_version, method.end_version, method.experimental) def matches(self, min_version, max_version, experimental=False): """Compares this version to the specified min/max range. Returns whether the version object represents a version greater than or equal to the minimum version and less than or equal to the maximum version. If min_version is null then there is no minimum limit. If max_version is null then there is no maximum limit. If self is null then raise ValueError. :param min_version: Minimum acceptable version. :param max_version: Maximum acceptable version. :param experimental: Whether to match experimental APIs. :returns: boolean """ if self.is_null(): raise ValueError # NOTE(cknight): An experimental request should still match a # non-experimental API, so the experimental check isn't just # looking for equality. if not self.experimental and experimental: return False if isinstance(min_version, str): min_version = APIVersionRequest(version_string=min_version) if isinstance(max_version, str): max_version = APIVersionRequest(version_string=max_version) if not (min_version or max_version): return True elif (min_version and max_version and max_version.is_null() and min_version.is_null()): return True elif not max_version or max_version.is_null(): return min_version <= self elif not min_version or min_version.is_null(): return self <= max_version else: return min_version <= self <= max_version def get_string(self): """Returns a string representation of this object. If this method is used to create an APIVersionRequest, the resulting object will be an equivalent request. """ if self.is_null(): raise ValueError return ("%(major)s.%(minor)s" % {'major': self._ver_major, 'minor': self._ver_minor}) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/api/openstack/rest_api_version_history.rst0000664000175000017500000003262600000000000024756 0ustar00zuulzuul00000000000000REST API Version History ======================== This documents the changes made to the REST API with every microversion change. The description for each version should be a verbose one which has enough information to be suitable for use in user documentation. 1.0 (Maximum in Kilo) --------------------- The 1.0 Manila API includes all v1 core APIs existing prior to the introduction of microversions. The /v1 URL is used to call 1.0 APIs, and microversions headers sent to this endpoint are ignored. 2.0 --- This is the initial version of the Manila API which supports microversions. The /v2 URL is used to call 2.x APIs. A user can specify a header in the API request:: X-OpenStack-Manila-API-Version: where ```` is any valid api version for this API. If no version is specified then the API will behave as if version 2.0 was requested. The only API change in version 2.0 is versions, i.e. GET http://localhost:8786/, which now returns information about both 1.0 and 2.x versions and their respective /v1 and /v2 endpoints. All other 2.0 APIs are functionally identical to version 1.0. 2.1 --- Share create() method doesn't ignore availability_zone field of provided share. 2.2 --- Snapshots become optional and share payload now has boolean attr 'snapshot_support'. 2.3 --- Share instances admin API and update of Admin Actions extension. 2.4 --- Consistency groups support. /consistency-groups and /cgsnapshots are implemented. AdminActions 'os-force_delete and' 'os-reset_status' have been updated for both new resources. 2.5 --- Share Migration admin API. 2.6 (Maximum in Liberty) ------------------------ Return share_type UUID instead of name in Share API and add share_type_name field. 2.7 --- Rename old extension-like API URLs to core-API-like. 2.8 --- Allow to set share visibility explicitly using "manage" API. 2.9 --- Add export locations API. Remove export locations from "shares" and "share instances" APIs. 2.10 ---- Field 'access_rules_status' was added to shares and share instances. 2.11 ---- Share Replication support added. All Share replication APIs are tagged 'Experimental'. Share APIs return two new attributes: 'has_replicas' and 'replication_type'. Share instance APIs return a new attribute, 'replica_state'. 2.12 ---- Share snapshot manage and unmanage API. 2.13 ---- Add 'cephx' authentication type for the CephFS Native driver. 2.14 ---- Added attribute 'preferred' to export locations. Drivers may use this field to identify which export locations are most efficient and should be used preferentially by clients. Also, change 'uuid' field to 'id', move timestamps to detail view, and return all non-admin fields to users. 2.15 (Maximum in Mitaka) ------------------------ Added Share migration 'migration_cancel', 'migration_get_progress', 'migration_complete' APIs, renamed 'migrate_share' to 'migration_start' and added notify parameter to 'migration_start'. 2.16 ---- Add user_id in share show/create/manage API. 2.17 ---- Added user_id and project_id in snapshot show/create/manage APIs. 2.18 ---- Add gateway in share network show API. 2.19 ---- Add admin APIs(list/show/detail/reset-status) of snapshot instances. 2.20 ---- Add MTU in share network show API. 2.21 ---- Add access_key in access_list API. 2.22 (Maximum in Newton) ------------------------ Updated migration_start API with 'preserve_metadata', 'writable', 'nondisruptive' and 'new_share_network_id' parameters, renamed 'force_host_copy' to 'force_host_assisted_migration', removed 'notify' parameter and removed previous migrate_share API support. Updated reset_task_state API to accept 'None' value. 2.23 ---- Added share_type to filter results of scheduler-stats/pools API. 2.24 ---- Added optional create_share_from_snapshot_support extra spec. Made snapshot_support extra spec optional. 2.25 ---- Added quota-show detail API. 2.26 ---- Removed nova-net plugin support and removed 'nova_net_id' parameter from share_network API. 2.27 ---- Added share revert to snapshot. This API reverts a share to the specified snapshot. The share is reverted in place, and the snapshot must be the most recent one known to manila. The feature is controlled by a new standard optional extra spec, revert_to_snapshot_support. 2.28 ---- Added transitional states ('queued_to_apply' - was previously 'new', 'queued_to_deny', 'applying' and 'denying') to access rules. 'updating', 'updating_multiple' and 'out_of_sync' are no longer valid values for the 'access_rules_status' field of shares, they have been collapsed into the transitional state 'syncing'. Access rule changes can be made independent of a share's 'access_rules_status'. 2.29 ---- Updated migration_start API adding mandatory parameter 'preserve_snapshots' and changed 'preserve_metadata', 'writable', 'nondisruptive' to be mandatory as well. All previous migration_start APIs prior to this microversion are now unsupported. 2.30 ---- Added cast_rules_to_readonly field to share_instances. 2.31 ---- Convert consistency groups to share groups. 2.32 (Maximum in Ocata) ----------------------- Added mountable snapshots APIs. 2.33 ---- Added created_at and updated_at in access_list API. 2.34 ---- Added 'availability_zone_id' and 'consistent_snapshot_support' fields to 'share_group' object. 2.35 ---- Added support to retrieve shares filtered by export_location_id and export_location_path. 2.36 ---- Added like filter support in ``shares``, ``snapshots``, ``share-networks``, ``share-groups`` list APIs. 2.37 ---- Added /messages APIs. 2.38 ---- Support IPv6 format validation in allow_access API to enable IPv6. 2.39 ---- Added share-type quotas. 2.40 (Maximum in Pike) ---------------------- Added share group and share group snapshot quotas. 2.41 ---- Added 'description' in share type create/list APIs. 2.42 (Maximum in Queens) ------------------------ Added ``with_count`` in share list API to get total count info. 2.43 ---- Added filter search by extra spec for share type list. 2.44 ---- Added 'ou' field to 'security_service' object. 2.45 ---- Added access metadata for share access and also introduced the GET /share-access-rules API. The prior API to retrieve access rules will not work with API version >=2.45. 2.46 (Maximum in Rocky) ----------------------- Added 'is_default' field to 'share_type' and 'share_group_type' objects. 2.47 ---- Export locations for non-active share replicas are no longer retrievable through the export locations APIs: ``GET /v2/{tenant_id}/shares/{share_id}/export_locations`` and ``GET /v2/{tenant_id}/shares/{share_id}/export_locations/{export_location_id}``. A new API is introduced at this version: ``GET /v2/{tenant_id}/share-replicas/{replica_id}/export-locations`` to allow retrieving export locations of share replicas if available. 2.48 ---- Administrators can now use the common, user-visible extra-spec 'availability_zones' within share types to allow provisioning of shares only within specific availability zones. The extra-spec allows using comma separated names of one or more availability zones. 2.49 (Maximum in Stein) ----------------------- Added Manage/Unmanage Share Server APIs. Updated Manage/Unmanage Shares and Snapshots APIs to work in ``driver_handles_shares_servers`` enabled mode. 2.50 ---- Added update share type API to Share Type APIs. We can update the ``name``, ``description`` and/or ``share_type_access:is_public`` fields of the share type by the update share type API. 2.51 (Maximum in Train) ----------------------- Added to the service the possibility to have multiple subnets per share network, each of them associated to a different AZ. It is also possible to configure a default subnet that spans all availability zones. 2.52 ---- Added 'created_before' and 'created_since' field to list messages api, support querying user messages within the specified time period. 2.53 ---- Added quota control for share replicas and replica gigabytes. 2.54 ---- Share and share instance objects include a new field called "progress" which indicates the completion of a share creation operation as a percentage. 2.55 (Maximum in Ussuri) ------------------------ Share groups feature is no longer considered experimental. 2.56 ---- Share replication feature is no longer considered experimental. 2.57 (Maximum in Victoria) -------------------------- Added share server migration feature. A two-phase approach that migrates a share server and all its resources to a new host. 2.58 ---- Added 'share_groups' and 'share_group_snapshots' to the limits view. 2.59 ---- Added 'details' field to migration get progress api, which optionally may hold additional driver data related to the progress of share migration. 2.60 ---- API URLs no longer need a "project_id" argument in them. For example, the API route: https://$(controller)s/share/v2/$(project_id)s/shares is equivalent to https://$(controller)s/share/v2/shares. When interacting with the manila service as system or domain scoped users, project_id should not be specified in the API path. 2.61 ---- Ability to add minimum and maximum share size restrictions which can be set on a per share-type granularity. Added new extra specs 'provisioning:max_share_size' and 'provisioning:min_share_size'. 2.62 ---- Added quota control to per share size. 2.63 (Maximum in Wallaby) ------------------------- Added the possibility to attach security services to share networks in use. Also, an attached security service can be replaced for another one of the same 'type'. In order to support those operations a 'status' field was added in the share networks as well as, a new property called 'security_service_update_support' was included in the share networks and share servers. Also new action APIs have been added to the share-networks endpoint: 'update_security_service', 'update_security_service_check' and 'add_security_service_check'. 2.64 ---- Added 'force' field to extend share api, which can extend share directly without go through share scheduler. 2.65 (Maximum in Xena) ---------------------- Added ability to specify "scheduler_hints" in the request body of the POST /shares request. These hints will invoke Affinity/Anti-Affinity scheduler filters during share creation and share migration. 2.66 ---- Added filter search by group spec for share group type list. 2.67 ---- Added support for 'only_host' key in "scheduler_hints" in the request body of the POST/shares and POST/share-replicas request. This hint will invoke 'OnlyHost' scheduler filter during share and share-replica creation. 2.68 ---- Added admin only capabilities to share metadata API. 2.69 ---- Manila support Recycle Bin. Soft delete share to Recycle Bin: ``POST /v2/shares/{share_id}/action {"soft_delete": null}``. List shares in Recycle Bin: `` GET /v2/shares?is_soft_deleted=true``. Restore share from Recycle Bin: `` POST /v2/shares/{share_id}/action {'restore': null}``. 2.70 (Maximum in Yoga) ---------------------- Added support to configure multiple subnets for a given share network in the same availability zone (or the default one). Users can also add new subnets for an in-use share network. To distinguish this update support a new property called 'network_allocation_update_support' was added in the share network and share server. 2.71 ---- Added 'updated_at' field in share instance show API output. 2.72 ---- Added 'share_network' option to share replica create API. 2.73 (Maximum in Zed) --------------------- Added Metadata API methods (GET, PUT, POST, DELETE) to Share Snapshots 2.74 ---- Allow/deny share access rule even if share replicas are in 'error' state. 2.75 ---- Added option to specify quiesce wait time in share replica promote API. 2.76 ---- Added 'default_ad_site' field in security service object. 2.77 ---- Added support for share transfer between different projects. 2.78 (Maximum in 2023.1/Antelope) --------------------------------- Added Metadata API methods (GET, PUT, POST, DELETE) to Share Network Subnets. 2.79 ---- Added ``with_count`` in share snapshot list API to get total count info. 2.80 ---- Added share backup APIs. 2.81 ---- Introduce resource locks as a way users can restrict certain actions on resources. Only share deletion can be prevented at this version. 2.82 (Maximum in 2023.2/Bobcat) ------------------------------- Introduce the ability to lock access rules and restrict the visibility of sensitive fields. 2.83 ---- The ``disabled_reason`` field was added to the service to mark the reason why the user disabled the service. ``disabled`` field will be replaced by ``status`` field. 2.84 ---- Added optional ``mount_point_name`` field to share. 2.85 (Maximum in 2024.1/Caracal) -------------------------------- Added ``backup_type`` field to share backup object. 2.86 ---- Added ensure shares API. 2.87 (Maximum in 2024.2/Dalmatian) ---------------------------------- Added Metadata API methods (GET, PUT, POST, DELETE) to Share Export Locations. 2.88 ---- Allows updating the access rule's access type. 2.89 (Maximum in 2025.1/Epoxy) ------------------------------ Added support for passing share network subnet metadata updates to share backend driver. 2.90 ---- Added ``encryption_key_ref`` option to share create API. 2.91 ---- Added targeted restores to the share backup API ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/api/openstack/urlmap.py0000664000175000017500000000176200000000000020737 0ustar00zuulzuul00000000000000# Copyright (c) 2013 OpenStack, LLC. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log from manila.api import urlmap LOG = log.getLogger(__name__) def urlmap_factory(loader, global_conf, **local_conf): LOG.warning('manila.api.openstack.urlmap:urlmap_factory ' 'is deprecated. ' 'Please use manila.api.urlmap:urlmap_factory instead.') urlmap.urlmap_factory(loader, global_conf, **local_conf) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/api/openstack/versioned_method.py0000664000175000017500000000326500000000000022775 0ustar00zuulzuul00000000000000# Copyright 2014 IBM Corp. # Copyright 2015 Clinton Knight # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from manila import utils class VersionedMethod(utils.ComparableMixin): def __init__(self, name, start_version, end_version, experimental, func): """Versioning information for a single method. Minimum and maximums are inclusive. :param name: Name of the method :param start_version: Minimum acceptable version :param end_version: Maximum acceptable_version :param experimental: True if method is experimental :param func: Method to call """ self.name = name self.start_version = start_version self.end_version = end_version self.experimental = experimental self.func = func def __str__(self): args = { 'name': self.name, 'start': self.start_version, 'end': self.end_version } return ("Version Method %(name)s: min: %(start)s, max: %(end)s" % args) def _cmpkey(self): """Return the value used by ComparableMixin for rich comparisons.""" return self.start_version ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/api/openstack/wsgi.py0000664000175000017500000014470400000000000020414 0ustar00zuulzuul00000000000000# Copyright 2011 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools from http import client as http_client import inspect import math import time from oslo_log import log from oslo_serialization import jsonutils from oslo_utils import encodeutils from oslo_utils import strutils import webob import webob.exc from manila.api.openstack import api_version_request as api_version from manila.api.openstack import versioned_method from manila.common import constants from manila import exception from manila.i18n import _ from manila import policy from manila import utils from manila.wsgi import common as wsgi LOG = log.getLogger(__name__) SUPPORTED_CONTENT_TYPES = ( 'application/json', ) _MEDIA_TYPE_MAP = { 'application/json': 'json', } # name of attribute to keep version method information VER_METHOD_ATTR = 'versioned_methods' # Name of header used by clients to request a specific version # of the REST API API_VERSION_REQUEST_HEADER = 'X-OpenStack-Manila-API-Version' EXPERIMENTAL_API_REQUEST_HEADER = 'X-OpenStack-Manila-API-Experimental' V1_SCRIPT_NAME = '/v1' V2_SCRIPT_NAME = '/v2' class Request(webob.Request): """Add some OpenStack API-specific logic to the base webob.Request.""" def __init__(self, *args, **kwargs): super(Request, self).__init__(*args, **kwargs) self._resource_cache = {} if not hasattr(self, 'api_version_request'): self.api_version_request = api_version.APIVersionRequest() def cache_resource(self, resource_to_cache, id_attribute='id', name=None): """Cache the given resource. Allow API methods to cache objects, such as results from a DB query, to be used by API extensions within the same API request. The resource_to_cache can be a list or an individual resource, but ultimately resources are cached individually using the given id_attribute. Different resources types might need to be cached during the same request, they can be cached using the name parameter. For example: Controller 1: request.cache_resource(db_volumes, 'volumes') request.cache_resource(db_volume_types, 'types') Controller 2: db_volumes = request.cached_resource('volumes') db_type_1 = request.cached_resource_by_id('1', 'types') If no name is given, a default name will be used for the resource. An instance of this class only lives for the lifetime of a single API request, so there's no need to implement full cache management. """ if not isinstance(resource_to_cache, list): resource_to_cache = [resource_to_cache] if not name: name = self.path cached_resources = self._resource_cache.setdefault(name, {}) for resource in resource_to_cache: cached_resources[resource[id_attribute]] = resource def cached_resource(self, name=None): """Get the cached resources cached under the given resource name. Allow an API extension to get previously stored objects within the same API request. Note that the object data will be slightly stale. :returns: a dict of id_attribute to the resource from the cached resources, an empty map if an empty collection was cached, or None if nothing has been cached yet under this name """ if not name: name = self.path if name not in self._resource_cache: # Nothing has been cached for this key yet return None return self._resource_cache[name] def cached_resource_by_id(self, resource_id, name=None): """Get a resource by ID cached under the given resource name. Allow an API extension to get a previously stored object within the same API request. This is basically a convenience method to lookup by ID on the dictionary of all cached resources. Note that the object data will be slightly stale. :returns: the cached resource or None if the item is not in the cache """ resources = self.cached_resource(name) if not resources: # Nothing has been cached yet for this key yet return None return resources.get(resource_id) def cache_db_items(self, key, items, item_key='id'): """Cache db items. Allow API methods to store objects from a DB query to be used by API extensions within the same API request. An instance of this class only lives for the lifetime of a single API request, so there's no need to implement full cache management. """ self.cache_resource(items, item_key, key) def get_db_items(self, key): """Get db item by key. Allow an API extension to get previously stored objects within the same API request. Note that the object data will be slightly stale. """ return self.cached_resource(key) def get_db_item(self, key, item_key): """Get db item by key and item key. Allow an API extension to get a previously stored object within the same API request. Note that the object data will be slightly stale. """ return self.get_db_items(key).get(item_key) def cache_db_share_types(self, share_types): self.cache_db_items('share_types', share_types, 'id') def cache_db_share_type(self, share_type): self.cache_db_items('share_types', [share_type], 'id') def get_db_share_types(self): return self.get_db_items('share_types') def get_db_share_type(self, share_type_id): return self.get_db_item('share_types', share_type_id) def best_match_content_type(self): """Determine the requested response content-type.""" if 'manila.best_content_type' not in self.environ: # Calculate the best MIME type content_type = None # Check URL path suffix parts = self.path.rsplit('.', 1) if len(parts) > 1: possible_type = 'application/' + parts[1] if possible_type in SUPPORTED_CONTENT_TYPES: content_type = possible_type if not content_type: content_type = self.accept.best_match(SUPPORTED_CONTENT_TYPES) self.environ['manila.best_content_type'] = (content_type or 'application/json') return self.environ['manila.best_content_type'] def get_content_type(self): """Determine content type of the request body. Does not do any body introspection, only checks header. """ if "Content-Type" not in self.headers: return None allowed_types = SUPPORTED_CONTENT_TYPES content_type = self.content_type if content_type not in allowed_types: raise exception.InvalidContentType(content_type=content_type) return content_type def set_api_version_request(self): """Set API version request based on the request header information. Microversions starts with /v2, so if a client sends a /v1 URL, then ignore the headers and request 1.0 APIs. """ if not self.script_name or not (V1_SCRIPT_NAME in self.script_name or V2_SCRIPT_NAME in self.script_name): # The request is on the base URL without a major version specified self.api_version_request = api_version.APIVersionRequest() elif V1_SCRIPT_NAME in self.script_name: self.api_version_request = api_version.APIVersionRequest('1.0') else: if API_VERSION_REQUEST_HEADER in self.headers: hdr_string = self.headers[API_VERSION_REQUEST_HEADER] self.api_version_request = api_version.APIVersionRequest( hdr_string) # Check that the version requested is within the global # minimum/maximum of supported API versions if not self.api_version_request.matches( api_version.min_api_version(), api_version.max_api_version()): raise exception.InvalidGlobalAPIVersion( req_ver=self.api_version_request.get_string(), min_ver=api_version.min_api_version().get_string(), max_ver=api_version.max_api_version().get_string()) else: self.api_version_request = api_version.APIVersionRequest( api_version.DEFAULT_API_VERSION) # Check if experimental API was requested if EXPERIMENTAL_API_REQUEST_HEADER in self.headers: self.api_version_request.experimental = strutils.bool_from_string( self.headers[EXPERIMENTAL_API_REQUEST_HEADER]) class ActionDispatcher(object): """Maps method name to local methods through action name.""" def dispatch(self, *args, **kwargs): """Find and call local method.""" action = kwargs.pop('action', 'default') action_method = getattr(self, str(action), self.default) return action_method(*args, **kwargs) def default(self, data): raise NotImplementedError() class TextDeserializer(ActionDispatcher): """Default request body deserialization.""" def deserialize(self, datastring, action='default'): return self.dispatch(datastring, action=action) def default(self, datastring): return {} class JSONDeserializer(TextDeserializer): def _from_json(self, datastring): try: return jsonutils.loads(datastring) except ValueError: msg = _("cannot understand JSON") raise exception.MalformedRequestBody(reason=msg) def default(self, datastring): return {'body': self._from_json(datastring)} class DictSerializer(ActionDispatcher): """Default request body serialization.""" def serialize(self, data, action='default'): return self.dispatch(data, action=action) def default(self, data): return "" class JSONDictSerializer(DictSerializer): """Default JSON request body serialization.""" def default(self, data): return jsonutils.dump_as_bytes(data) def serializers(**serializers): """Attaches serializers to a method. This decorator associates a dictionary of serializers with a method. Note that the function attributes are directly manipulated; the method is not wrapped. """ def decorator(func): if not hasattr(func, 'wsgi_serializers'): func.wsgi_serializers = {} func.wsgi_serializers.update(serializers) return func return decorator def deserializers(**deserializers): """Attaches deserializers to a method. This decorator associates a dictionary of deserializers with a method. Note that the function attributes are directly manipulated; the method is not wrapped. """ def decorator(func): if not hasattr(func, 'wsgi_deserializers'): func.wsgi_deserializers = {} func.wsgi_deserializers.update(deserializers) return func return decorator def response(code): """Attaches response code to a method. This decorator associates a response code with a method. Note that the function attributes are directly manipulated; the method is not wrapped. """ def decorator(func): func.wsgi_code = code return func return decorator class ResponseObject(object): """Bundles a response object with appropriate serializers. Object that app methods may return in order to bind alternate serializers with a response object to be serialized. Its use is optional. """ def __init__(self, obj, code=None, headers=None, **serializers): """Binds serializers with an object. Takes keyword arguments akin to the @serializer() decorator for specifying serializers. Serializers specified will be given preference over default serializers or method-specific serializers on return. """ self.obj = obj self.serializers = serializers self._default_code = 200 self._code = code self._headers = headers or {} self.serializer = None self.media_type = None def __getitem__(self, key): """Retrieves a header with the given name.""" return self._headers[key.lower()] def __setitem__(self, key, value): """Sets a header with the given name to the given value.""" self._headers[key.lower()] = value def __delitem__(self, key): """Deletes the header with the given name.""" del self._headers[key.lower()] def _bind_method_serializers(self, meth_serializers): """Binds method serializers with the response object. Binds the method serializers with the response object. Serializers specified to the constructor will take precedence over serializers specified to this method. :param meth_serializers: A dictionary with keys mapping to response types and values containing serializer objects. """ # We can't use update because that would be the wrong # precedence for mtype, serializer in meth_serializers.items(): self.serializers.setdefault(mtype, serializer) def get_serializer(self, content_type, default_serializers=None): """Returns the serializer for the wrapped object. Returns the serializer for the wrapped object subject to the indicated content type. If no serializer matching the content type is attached, an appropriate serializer drawn from the default serializers will be used. If no appropriate serializer is available, raises InvalidContentType. """ default_serializers = default_serializers or {} try: mtype = _MEDIA_TYPE_MAP.get(content_type, content_type) if mtype in self.serializers: return mtype, self.serializers[mtype] else: return mtype, default_serializers[mtype] except (KeyError, TypeError): raise exception.InvalidContentType(content_type=content_type) def preserialize(self, content_type, default_serializers=None): """Prepares the serializer that will be used to serialize. Determines the serializer that will be used and prepares an instance of it for later call. This allows the serializer to be accessed by extensions for, e.g., template extension. """ mtype, serializer = self.get_serializer(content_type, default_serializers) self.media_type = mtype self.serializer = serializer() def attach(self, **kwargs): """Attach slave templates to serializers.""" if self.media_type in kwargs: self.serializer.attach(kwargs[self.media_type]) def serialize(self, request, content_type, default_serializers=None): """Serializes the wrapped object. Utility method for serializing the wrapped object. Returns a webob.Response object. """ if self.serializer: serializer = self.serializer else: _mtype, _serializer = self.get_serializer(content_type, default_serializers) serializer = _serializer() response = webob.Response() response.status_int = self.code for hdr, value in self._headers.items(): response.headers[hdr] = str(value) response.headers['Content-Type'] = str(content_type) if self.obj is not None: response.body = serializer.serialize(self.obj) return response @property def code(self): """Retrieve the response status.""" return self._code or self._default_code @property def headers(self): """Retrieve the headers.""" return self._headers.copy() def action_peek_json(body): """Determine action to invoke.""" try: decoded = jsonutils.loads(body) except ValueError: msg = _("cannot understand JSON") raise exception.MalformedRequestBody(reason=msg) # Make sure there's exactly one key... if len(decoded) != 1: msg = _("too many body keys") raise exception.MalformedRequestBody(reason=msg) # Return the action and the decoded body... return list(decoded.keys())[0] class ResourceExceptionHandler(object): """Context manager to handle Resource exceptions. Used when processing exceptions generated by API implementation methods (or their extensions). Converts most exceptions to Fault exceptions, with the appropriate logging. """ def __enter__(self): return None def __exit__(self, ex_type, ex_value, ex_traceback): if not ex_value: return True msg = str(ex_value) if isinstance(ex_value, exception.NotAuthorized): raise Fault(webob.exc.HTTPForbidden(explanation=msg)) elif isinstance(ex_value, exception.VersionNotFoundForAPIMethod): raise elif isinstance(ex_value, exception.Invalid): raise Fault(exception.ConvertedException( code=ex_value.code, explanation=msg)) elif isinstance(ex_value, TypeError): exc_info = (ex_type, ex_value, ex_traceback) LOG.error('Exception handling resource: %s', ex_value, exc_info=exc_info) raise Fault(webob.exc.HTTPBadRequest()) elif isinstance(ex_value, Fault): LOG.info("Fault thrown: %s", ex_value) raise ex_value elif isinstance(ex_value, webob.exc.HTTPException): LOG.info("HTTP exception thrown: %s", ex_value) raise Fault(ex_value) # We didn't handle the exception return False class Resource(wsgi.Application): """WSGI app that handles (de)serialization and controller dispatch. WSGI app that reads routing information supplied by RoutesMiddleware and calls the requested action method upon its controller. All controller action methods must accept a 'req' argument, which is the incoming wsgi.Request. If the operation is a PUT or POST, the controller method must also accept a 'body' argument (the deserialized request body). They may raise a webob.exc exception or return a dict, which will be serialized by requested content type. Exceptions derived from webob.exc.HTTPException will be automatically wrapped in Fault() to provide API friendly error responses. """ support_api_request_version = True def __init__(self, controller, action_peek=None, **deserializers): """init method of Resource. :param controller: object that implement methods created by routes lib :param action_peek: dictionary of routines for peeking into an action request body to determine the desired action """ self.controller = controller default_deserializers = dict(json=JSONDeserializer) default_deserializers.update(deserializers) self.default_deserializers = default_deserializers self.default_serializers = dict(json=JSONDictSerializer) self.action_peek = dict(json=action_peek_json) self.action_peek.update(action_peek or {}) # Copy over the actions dictionary self.wsgi_actions = {} if controller: self.register_actions(controller) # Save a mapping of extensions self.wsgi_extensions = {} self.wsgi_action_extensions = {} def register_actions(self, controller): """Registers controller actions with this resource.""" actions = getattr(controller, 'wsgi_actions', {}) for key, method_name in actions.items(): self.wsgi_actions[key] = getattr(controller, method_name) def register_extensions(self, controller): """Registers controller extensions with this resource.""" extensions = getattr(controller, 'wsgi_extensions', []) for method_name, action_name in extensions: # Look up the extending method extension = getattr(controller, method_name) if action_name: # Extending an action... if action_name not in self.wsgi_action_extensions: self.wsgi_action_extensions[action_name] = [] self.wsgi_action_extensions[action_name].append(extension) else: # Extending a regular method if method_name not in self.wsgi_extensions: self.wsgi_extensions[method_name] = [] self.wsgi_extensions[method_name].append(extension) def get_action_args(self, request_environment): """Parse dictionary created by routes library.""" # NOTE(Vek): Check for get_action_args() override in the # controller if hasattr(self.controller, 'get_action_args'): return self.controller.get_action_args(request_environment) try: args = request_environment['wsgiorg.routing_args'][1].copy() except (KeyError, IndexError, AttributeError): return {} try: del args['controller'] except KeyError: pass try: del args['format'] except KeyError: pass return args def get_body(self, request): try: content_type = request.get_content_type() except exception.InvalidContentType: LOG.debug("Unrecognized Content-Type provided in request") return None, '' if not content_type: LOG.debug("No Content-Type provided in request") return None, '' if len(request.body) <= 0: LOG.debug("Empty body provided in request") return None, '' return content_type, request.body def deserialize(self, meth, content_type, body): meth_deserializers = getattr(meth, 'wsgi_deserializers', {}) try: mtype = _MEDIA_TYPE_MAP.get(content_type, content_type) if mtype in meth_deserializers: deserializer = meth_deserializers[mtype] else: deserializer = self.default_deserializers[mtype] except (KeyError, TypeError): raise exception.InvalidContentType(content_type=content_type) return deserializer().deserialize(body) def pre_process_extensions(self, extensions, request, action_args): # List of callables for post-processing extensions post = [] for ext in extensions: if inspect.isgeneratorfunction(ext): response = None # If it's a generator function, the part before the # yield is the preprocessing stage try: with ResourceExceptionHandler(): gen = ext(req=request, **action_args) response = next(gen) except Fault as ex: response = ex # We had a response... if response: return response, [] # No response, queue up generator for post-processing post.append(gen) else: # Regular functions only perform post-processing post.append(ext) # Run post-processing in the reverse order return None, reversed(post) def post_process_extensions(self, extensions, resp_obj, request, action_args): for ext in extensions: response = None if inspect.isgenerator(ext): # If it's a generator, run the second half of # processing try: with ResourceExceptionHandler(): response = ext.send(resp_obj) except StopIteration: # Normal exit of generator continue except Fault as ex: response = ex else: # Regular functions get post-processing... try: with ResourceExceptionHandler(): response = ext(req=request, resp_obj=resp_obj, **action_args) except exception.VersionNotFoundForAPIMethod: # If an attached extension (@wsgi.extends) for the # method has no version match its not an error. We # just don't run the extends code continue except Fault as ex: response = ex # We had a response... if response: return response return None @webob.dec.wsgify(RequestClass=Request) def __call__(self, request): """WSGI method that controls (de)serialization and method dispatch.""" LOG.info("%(method)s %(url)s", {"method": request.method, "url": request.url}) if self.support_api_request_version: # Set the version of the API requested based on the header try: request.set_api_version_request() except exception.InvalidAPIVersionString as e: return Fault(webob.exc.HTTPBadRequest( explanation=e.msg)) except exception.InvalidGlobalAPIVersion as e: return Fault(webob.exc.HTTPNotAcceptable( explanation=e.msg)) # Identify the action, its arguments, and the requested # content type action_args = self.get_action_args(request.environ) action = action_args.pop('action', None) content_type, body = self.get_body(request) accept = request.best_match_content_type() # NOTE(Vek): Splitting the function up this way allows for # auditing by external tools that wrap the existing # function. If we try to audit __call__(), we can # run into troubles due to the @webob.dec.wsgify() # decorator. return self._process_stack(request, action, action_args, content_type, body, accept) def _process_stack(self, request, action, action_args, content_type, body, accept): """Implement the processing stack.""" # Get the implementing method try: meth, extensions = self.get_method(request, action, content_type, body) except (AttributeError, TypeError): return Fault(webob.exc.HTTPNotFound()) except KeyError as ex: msg = _("There is no such action: %s") % ex.args[0] return Fault(webob.exc.HTTPBadRequest(explanation=msg)) except exception.MalformedRequestBody: msg = _("Malformed request body") return Fault(webob.exc.HTTPBadRequest(explanation=msg)) try: method_name = meth.__qualname__ except AttributeError: method_name = 'Controller: %s Method: %s' % ( str(self.controller), meth.__name__) if body: decoded_body = encodeutils.safe_decode(body, errors='ignore') msg = ("Action: '%(action)s', calling method: %(meth)s, body: " "%(body)s") % {'action': action, 'body': decoded_body, 'meth': method_name} LOG.debug(strutils.mask_password(msg)) else: LOG.debug("Calling method '%(meth)s'", {'meth': method_name}) # Now, deserialize the request body... try: if content_type: contents = self.deserialize(meth, content_type, body) else: contents = {} except exception.InvalidContentType: msg = _("Unsupported Content-Type") return Fault(webob.exc.HTTPBadRequest(explanation=msg)) except exception.MalformedRequestBody: msg = _("Malformed request body") return Fault(webob.exc.HTTPBadRequest(explanation=msg)) # Update the action args action_args.update(contents) project_id = action_args.pop("project_id", None) context = request.environ.get('manila.context') if (context and project_id and (project_id != context.project_id)): msg = _("Malformed request url") return Fault(webob.exc.HTTPBadRequest(explanation=msg)) # Run pre-processing extensions response, post = self.pre_process_extensions(extensions, request, action_args) if not response: try: with ResourceExceptionHandler(): action_result = self.dispatch(meth, request, action_args) except Fault as ex: response = ex if not response: # No exceptions; convert action_result into a # ResponseObject resp_obj = None if type(action_result) is dict or action_result is None: resp_obj = ResponseObject(action_result) elif isinstance(action_result, ResponseObject): resp_obj = action_result else: response = action_result # Run post-processing extensions if resp_obj: _set_request_id_header(request, resp_obj) # Do a preserialize to set up the response object serializers = getattr(meth, 'wsgi_serializers', {}) resp_obj._bind_method_serializers(serializers) if hasattr(meth, 'wsgi_code'): resp_obj._default_code = meth.wsgi_code resp_obj.preserialize(accept, self.default_serializers) # Process post-processing extensions response = self.post_process_extensions(post, resp_obj, request, action_args) if resp_obj and not response: response = resp_obj.serialize(request, accept, self.default_serializers) try: msg_dict = dict(url=request.url, status=response.status_int) msg = _("%(url)s returned with HTTP %(status)s") % msg_dict except AttributeError as e: msg_dict = dict(url=request.url, e=e) msg = _("%(url)s returned a fault: %(e)s") % msg_dict LOG.info(msg) if hasattr(response, 'headers'): for hdr, val in response.headers.items(): val = utils.convert_str(val) response.headers[hdr] = val _set_request_id_header(request, response.headers) if not request.api_version_request.is_null(): response.headers[API_VERSION_REQUEST_HEADER] = ( request.api_version_request.get_string()) if request.api_version_request.experimental: # NOTE(vponomaryov): Translate our boolean header # to string explicitly to avoid 'TypeError' failure # running manila API under Apache + mod-wsgi. # It is safe to do so, because all headers are returned as # strings anyway. response.headers[EXPERIMENTAL_API_REQUEST_HEADER] = ( '%s' % request.api_version_request.experimental) response.headers['Vary'] = API_VERSION_REQUEST_HEADER return response def get_method(self, request, action, content_type, body): """Look up the action-specific method and its extensions.""" # Look up the method try: if not self.controller: meth = getattr(self, action) else: meth = getattr(self.controller, action) except AttributeError: if (not self.wsgi_actions or action not in ['action', 'create', 'delete']): # Propagate the error raise else: return meth, self.wsgi_extensions.get(action, []) if action == 'action': # OK, it's an action; figure out which action... mtype = _MEDIA_TYPE_MAP.get(content_type) action_name = self.action_peek[mtype](body) LOG.debug("Action body: %s", body) else: action_name = action # Look up the action method return (self.wsgi_actions[action_name], self.wsgi_action_extensions.get(action_name, [])) def dispatch(self, method, request, action_args): """Dispatch a call to the action-specific method.""" try: return method(req=request, **action_args) except exception.VersionNotFoundForAPIMethod: # We deliberately don't return any message information # about the exception to the user so it looks as if # the method is simply not implemented. return Fault(webob.exc.HTTPNotFound()) def action(name): """Mark a function as an action. The given name will be taken as the action key in the body. This is also overloaded to allow extensions to provide non-extending definitions of create and delete operations. """ def decorator(func): func.wsgi_action = name return func return decorator def extends(*args, **kwargs): """Indicate a function extends an operation. Can be used as either:: @extends def index(...): pass or as:: @extends(action='resize') def _action_resize(...): pass """ def decorator(func): # Store enough information to find what we're extending func.wsgi_extends = (func.__name__, kwargs.get('action')) return func # If we have positional arguments, call the decorator if args: return decorator(*args) # OK, return the decorator instead return decorator class ControllerMetaclass(type): """Controller metaclass. This metaclass automates the task of assembling a dictionary mapping action keys to method names. """ def __new__(mcs, name, bases, cls_dict): """Adds the wsgi_actions dictionary to the class.""" # Find all actions actions = {} extensions = [] versioned_methods = None # start with wsgi actions from base classes for base in bases: actions.update(getattr(base, 'wsgi_actions', {})) if base.__name__ == "Controller": # NOTE(cyeoh): This resets the VER_METHOD_ATTR attribute # between API controller class creations. This allows us # to use a class decorator on the API methods that doesn't # require naming explicitly what method is being versioned as # it can be implicit based on the method decorated. It is a bit # ugly. if VER_METHOD_ATTR in base.__dict__: versioned_methods = getattr(base, VER_METHOD_ATTR) delattr(base, VER_METHOD_ATTR) for key, value in cls_dict.items(): if not callable(value): continue if getattr(value, 'wsgi_action', None): actions[value.wsgi_action] = key elif getattr(value, 'wsgi_extends', None): extensions.append(value.wsgi_extends) # Add the actions and extensions to the class dict cls_dict['wsgi_actions'] = actions cls_dict['wsgi_extensions'] = extensions if versioned_methods: cls_dict[VER_METHOD_ATTR] = versioned_methods return super(ControllerMetaclass, mcs).__new__(mcs, name, bases, cls_dict) class Controller(metaclass=ControllerMetaclass): """Default controller.""" _view_builder_class = None def __init__(self, view_builder=None): """Initialize controller with a view builder instance.""" if view_builder: self._view_builder = view_builder elif self._view_builder_class: # pylint: disable=not-callable self._view_builder = self._view_builder_class() else: self._view_builder = None def __getattribute__(self, key): def version_select(*args, **kwargs): """Select and call the matching version of the specified method. Look for the method which matches the name supplied and version constraints and calls it with the supplied arguments. :returns: Returns the result of the method called :raises: VersionNotFoundForAPIMethod if there is no method which matches the name and version constraints """ # The first arg to all versioned methods is always the request # object. The version for the request is attached to the # request object if len(args) == 0: version_request = kwargs['req'].api_version_request else: version_request = args[0].api_version_request func_list = self.versioned_methods[key] for func in func_list: if version_request.matches_versioned_method(func): # Update the version_select wrapper function so # other decorator attributes like wsgi.response # are still respected. functools.update_wrapper(version_select, func.func) return func.func(self, *args, **kwargs) # No version match raise exception.VersionNotFoundForAPIMethod( version=version_request) try: version_meth_dict = object.__getattribute__(self, VER_METHOD_ATTR) except AttributeError: # No versioning on this class return object.__getattribute__(self, key) if (version_meth_dict and key in object.__getattribute__(self, VER_METHOD_ATTR)): return version_select return object.__getattribute__(self, key) # NOTE(cyeoh): This decorator MUST appear first (the outermost # decorator) on an API method for it to work correctly @classmethod def api_version(cls, min_ver, max_ver=None, experimental=False): """Decorator for versioning API methods. Add the decorator to any method which takes a request object as the first parameter and belongs to a class which inherits from wsgi.Controller. :param min_ver: string representing minimum version :param max_ver: optional string representing maximum version :param experimental: flag indicating an API is experimental and is subject to change or removal at any time """ def decorator(f): obj_min_ver = api_version.APIVersionRequest(min_ver) if max_ver: obj_max_ver = api_version.APIVersionRequest(max_ver) else: obj_max_ver = api_version.APIVersionRequest() # Add to list of versioned methods registered func_name = f.__name__ new_func = versioned_method.VersionedMethod( func_name, obj_min_ver, obj_max_ver, experimental, f) func_dict = getattr(cls, VER_METHOD_ATTR, {}) if not func_dict: setattr(cls, VER_METHOD_ATTR, func_dict) func_list = func_dict.get(func_name, []) if not func_list: func_dict[func_name] = func_list func_list.append(new_func) # Ensure the list is sorted by minimum version (reversed) # so later when we work through the list in order we find # the method which has the latest version which supports # the version requested. # TODO(cyeoh): Add check to ensure that there are no overlapping # ranges of valid versions as that is ambiguous func_list.sort(reverse=True) return f return decorator @staticmethod def authorize(arg): """Decorator for checking the policy on API methods. Add this decorator to any API method which takes a request object as the first parameter and belongs to a class which inherits from wsgi.Controller. The class must also have a class member called 'resource_name' which specifies the resource for the policy check. Can be used in any of the following forms @authorize @authorize('my_action_name') :param arg: Can either be the function being decorated or a str containing the 'action' for the policy check. If no action name is provided, the function name is assumed to be the action name. """ action_name = None def decorator(f): @functools.wraps(f) def wrapper(self, req, *args, **kwargs): action = action_name or f.__name__ context = req.environ['manila.context'] try: policy.check_policy(context, self.resource_name, action) except exception.PolicyNotAuthorized: raise webob.exc.HTTPForbidden() return f(self, req, *args, **kwargs) return wrapper if callable(arg): return decorator(arg) else: action_name = arg return decorator @staticmethod def is_valid_body(body, entity_name): if not (body and entity_name in body): return False def is_dict(d): try: d.get(None) return True except AttributeError: return False if not is_dict(body[entity_name]): return False return True class AdminActionsMixin(object): """Mixin class for API controllers with admin actions.""" body_attributes = { 'status': 'reset_status', 'replica_state': 'reset_replica_state', 'task_state': 'reset_task_state', } valid_statuses = { 'status': set([ constants.STATUS_CREATING, constants.STATUS_AVAILABLE, constants.STATUS_DELETING, constants.STATUS_ERROR, constants.STATUS_ERROR_DELETING, constants.STATUS_MIGRATING, constants.STATUS_MIGRATING_TO, constants.STATUS_SERVER_MIGRATING, ]), 'replica_state': set([ constants.REPLICA_STATE_ACTIVE, constants.REPLICA_STATE_IN_SYNC, constants.REPLICA_STATE_OUT_OF_SYNC, constants.STATUS_ERROR, ]), 'task_state': set(constants.TASK_STATE_STATUSES), } def _update(self, *args, **kwargs): raise NotImplementedError() def _get(self, *args, **kwargs): raise NotImplementedError() def _delete(self, *args, **kwargs): raise NotImplementedError() def validate_update(self, body, status_attr='status'): update = {} try: update[status_attr] = body[status_attr] except (TypeError, KeyError): msg = _("Must specify '%s'") % status_attr raise webob.exc.HTTPBadRequest(explanation=msg) if update[status_attr] not in self.valid_statuses[status_attr]: expl = (_("Invalid state. Valid states: %s.") % ", ".join(str(i) for i in self.valid_statuses[status_attr])) raise webob.exc.HTTPBadRequest(explanation=expl) return update @Controller.authorize('reset_status') def _reset_status(self, req, id, body, status_attr='status', resource=None): """Reset the status_attr specified on the resource. :param req: API request object :param id: ID of the resource :param body: API request body :param status_attr: Attribute on the resource denoting the status to be reset :param resource: Resource model or dict if we need to avoid fetching it """ context = req.environ['manila.context'] body_attr = self.body_attributes[status_attr] update = self.validate_update( body.get(body_attr, body.get('-'.join(('os', body_attr)))), status_attr=status_attr) msg = "Updating %(resource)s '%(id)s' with '%(update)r'" LOG.debug(msg, {'resource': self.resource_name, 'id': id, 'update': update}) try: resource = resource or self._get(context, id) except exception.NotFound as e: raise webob.exc.HTTPNotFound(e.message) if (status_attr == 'replica_state' and resource.get('replica_state') == constants.REPLICA_STATE_ACTIVE): msg = _("Cannot reset replica_state of an active replica") raise webob.exc.HTTPBadRequest(explanation=msg) try: policy.check_policy(context, self.resource_name, "reset_status", target_obj=resource) except exception.NotAuthorized as e: raise webob.exc.HTTPForbidden(e.message) self._update(context, id, update) return webob.Response(status_int=http_client.ACCEPTED) @Controller.authorize('force_delete') def _force_delete(self, req, id, body): """Delete a resource, bypassing the check for status.""" context = req.environ['manila.context'] try: resource = self._get(context, id) except exception.NotFound as e: raise webob.exc.HTTPNotFound(e.message) policy.check_policy(context, self.resource_name, "force_delete", target_obj=resource) self._delete(context, resource, force=True) return webob.Response(status_int=http_client.ACCEPTED) class Fault(webob.exc.HTTPException): """Wrap webob.exc.HTTPException to provide API friendly response.""" _fault_names = {400: "badRequest", 401: "unauthorized", 403: "forbidden", 404: "itemNotFound", 405: "badMethod", 409: "conflictingRequest", 413: "overLimit", 415: "badMediaType", 501: "notImplemented", 503: "serviceUnavailable"} def __init__(self, exception): """Create a Fault for the given webob.exc.exception.""" self.wrapped_exc = exception self.status_int = exception.status_int @webob.dec.wsgify(RequestClass=Request) def __call__(self, req): """Generate a WSGI response based on the exception passed to ctor.""" # Replace the body with fault details. code = self.wrapped_exc.status_int fault_name = self._fault_names.get(code, "computeFault") fault_data = { fault_name: { 'code': code, 'message': self.wrapped_exc.explanation}} if code == 413: retry = self.wrapped_exc.headers['Retry-After'] fault_data[fault_name]['retryAfter'] = '%s' % retry if not req.api_version_request.is_null(): self.wrapped_exc.headers[API_VERSION_REQUEST_HEADER] = ( req.api_version_request.get_string()) if req.api_version_request.experimental: # NOTE(vponomaryov): Translate our boolean header # to string explicitly to avoid 'TypeError' failure # running manila API under Apache + mod-wsgi. # It is safe to do so, because all headers are returned as # strings anyway. self.wrapped_exc.headers[EXPERIMENTAL_API_REQUEST_HEADER] = ( '%s' % req.api_version_request.experimental) self.wrapped_exc.headers['Vary'] = API_VERSION_REQUEST_HEADER content_type = req.best_match_content_type() serializer = { 'application/json': JSONDictSerializer(), }[content_type] self.wrapped_exc.body = serializer.serialize(fault_data) self.wrapped_exc.content_type = content_type _set_request_id_header(req, self.wrapped_exc.headers) return self.wrapped_exc def __str__(self): return self.wrapped_exc.__str__() def _set_request_id_header(req, headers): context = req.environ.get('manila.context') if context: headers['x-compute-request-id'] = context.request_id class OverLimitFault(webob.exc.HTTPException): """Rate-limited request response.""" def __init__(self, message, details, retry_time): """Initialize new `OverLimitFault` with relevant information.""" hdrs = OverLimitFault._retry_after(retry_time) self.wrapped_exc = webob.exc.HTTPRequestEntityTooLarge(headers=hdrs) self.content = { "overLimitFault": { "code": self.wrapped_exc.status_int, "message": message, "details": details, }, } @staticmethod def _retry_after(retry_time): delay = int(math.ceil(retry_time - time.time())) retry_after = delay if delay > 0 else 0 headers = {'Retry-After': '%s' % retry_after} return headers @webob.dec.wsgify(RequestClass=Request) def __call__(self, request): """Wrap the exception. Wrap the exception with a serialized body conforming to our error format. """ content_type = request.best_match_content_type() serializer = { 'application/json': JSONDictSerializer(), }[content_type] content = serializer.serialize(self.content) self.wrapped_exc.body = content return self.wrapped_exc ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315601.8296728 manila-21.0.0/manila/api/schemas/0000775000175000017500000000000000000000000016513 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/api/schemas/__init__.py0000664000175000017500000000000000000000000020612 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/api/schemas/availability_zones.py0000664000175000017500000000337600000000000022766 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. index_request_query = { 'type': 'object', 'properties': {}, # TODO(stephenfin): Exclude additional query string parameters in a future # microversion 'additionalProperties': True, } index_response_body = { 'type': 'object', 'properties': { 'availability_zones': { 'type': 'array', 'items': { 'type': 'object', 'properties': { 'id': { 'type': 'string', 'format': 'uuid', }, 'name': { 'type': 'string', 'minLength': 1, 'maxLength': 255, }, 'created_at': { 'type': 'string', 'format': 'date-time', }, 'updated_at': { 'type': ['string', 'null'], 'format': 'date-time', }, }, 'required': [], 'additionalProperties': False, }, }, }, 'required': ['availability_zones'], 'additionalProperties': False, } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/api/schemas/messages.py0000664000175000017500000001215400000000000020677 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from oslo_config import cfg from manila.api.validation import parameter_types from manila.api.validation import response_types CONF = cfg.CONF show_request_query = { 'type': 'object', 'properties': {}, 'required': [], # TODO(jonathan): Exclude additional query string parameters in a future # microversion 'additionalProperties': True, } index_request_query = { 'type': 'object', 'properties': { 'limit': parameter_types.single_param( parameter_types.non_negative_integer ), # NOTE(stephenfin): This is parsed by 'common.get_pagination_params' # but we ignore it. We may wish to uncomment this when that is no # longer the case # 'marker': parameter_types.multi_params({ # 'type': ['string'], # }), 'offset': parameter_types.single_param( parameter_types.non_negative_integer ), 'sort_key': parameter_types.single_param({ 'type': 'string', 'default': 'created_at', # TODO(stephenfin): These are the allowed (a.k.a. legal) filter # keys, but we currently ignore invalid keys. We should add this in # a future microversion. # 'enum': [ # 'id', # 'project_id', # 'request_id', # 'resource_type', # 'action_id', # 'detail_id', # 'resource_id', # 'message_level', # 'expires_at', # 'created_at', # ], }), 'sort_dir': parameter_types.single_param({ 'type': 'string', 'default': 'desc', # TODO(stephenfin): This should be an enum, but we currently treat # anything != 'desc' as 'asc'. We should make this stricter in a # future microversion. # 'enum': ['asc', 'desc'], }), 'action_id': parameter_types.single_param({ 'type': 'string', }), 'detail_id': parameter_types.single_param({ 'type': 'string', }), # TODO(jonathan) add enum when more message level the 'ERROR' 'message_level': parameter_types.single_param({ 'type': 'string', }), 'request_id': parameter_types.single_param({ 'type': 'string', }), 'resource_id': parameter_types.single_param({ 'type': 'string', }), 'resource_type': parameter_types.multi_params({ 'type': 'string', }), }, 'required': [], # TODO(jonathan): Exclude additional query string parameters in a future # microversion 'additionalProperties': True, } index_request_query_v252 = copy.deepcopy(index_request_query) index_request_query_v252['properties'].update({ 'created_since': parameter_types.single_param({ 'type': 'string', 'format': 'date-time', }), 'created_before': parameter_types.single_param({ 'type': 'string', 'format': 'date-time', }), }) _messages_response = { 'type': 'object', 'properties': { 'action_id': {'type': 'string'}, 'created_at': {'type': 'string', 'format': 'date-time'}, 'detail_id': {'type': 'string'}, 'expires_at': {'type': 'string', 'format': 'date-time'}, 'id': {'type': 'string', 'format': 'uuid'}, 'links': response_types.links, 'message_level': { 'type': 'string', 'enum': ['ERROR'], }, 'project_id': {'type': 'string'}, 'request_id': {'type': 'string'}, 'resource_id': {'type': 'string', 'format': 'uuid'}, 'resource_type': {'type': 'string'}, 'user_message': {'type': 'string'}, }, 'required': [ 'action_id', 'created_at', 'detail_id', 'expires_at', 'id', 'links', 'message_level', 'project_id', 'request_id', 'resource_id', 'resource_type', 'user_message', ], 'additionalProperties': False, } index_response_body = { 'type': 'object', 'properties': { 'messages': { 'type': 'array', 'items': _messages_response, }, 'messages_links': response_types.collection_links, }, 'required': ['messages'], 'additionalProperties': False, } show_response_body = { 'type': 'object', 'properties': { 'message': _messages_response, }, 'required': ['message'], 'additionalProperties': False, } delete_response_body = { 'type': 'null', } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/api/schemas/resource_locks.py0000664000175000017500000002556100000000000022120 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from manila.api.validation import helpers from manila.api.validation import parameter_types from manila.api.validation import response_types from manila.common import constants CONF = cfg.CONF # TODO(stephenfin): Reject additional properties in a future microversion create_request_body = { 'type': 'object', 'properties': { 'resource_lock': { 'type': 'object', 'properties': { 'resource_id': { 'type': 'string', 'format': 'uuid', 'description': helpers.description( 'resource_lock_resource_id' ), }, 'lock_reason': { 'type': ['string', 'null'], 'maxLength': 1023, 'description': helpers.description( 'resource_lock_lock_reason_optional' ), }, 'resource_type': { 'type': ['string', 'null'], 'enum': list(constants.RESOURCE_LOCK_RESOURCE_TYPES) + [None], 'default': constants.SHARE_RESOURCE_TYPE, 'description': helpers.description( 'resource_lock_resource_type' ), }, 'resource_action': { 'type': ['string', 'null'], 'enum': list(constants.RESOURCE_LOCK_RESOURCE_ACTIONS) + [None], 'default': constants.RESOURCE_ACTION_DELETE, 'description': helpers.description( 'resource_lock_resource_action_create_optional' ), }, }, 'required': ['resource_id'], 'additionalProperties': False, 'description': helpers.description('resource_lock_object'), }, }, 'required': ['resource_lock'], 'additionalProperties': True, } update_request_body = { 'type': 'object', 'properties': { 'resource_lock': { 'type': 'object', 'properties': { 'resource_action': { 'type': ['string', 'null'], 'enum': list(constants.RESOURCE_LOCK_RESOURCE_ACTIONS) + [None], 'description': helpers.description( 'resource_lock_resource_action_optional' ), }, 'lock_reason': { 'type': ['string', 'null'], 'maxLength': 1023, 'description': helpers.description( 'resource_lock_lock_reason_optional' ), }, }, 'additionalProperties': False, 'description': helpers.description('resource_lock_object'), }, }, 'required': ['resource_lock'], 'additionalProperties': True, } index_request_query = { 'type': 'object', 'properties': { 'limit': parameter_types.multi_params({ **parameter_types.non_negative_integer, 'description': helpers.description('limit'), }), # NOTE(stephenfin): This is parsed by 'common.get_pagination_params' # but we ignore it. We may wish to uncomment this when that is no # longer the case # 'marker': parameter_types.multi_params({ # 'type': ['string'], # }), 'offset': parameter_types.multi_params({ **parameter_types.non_negative_integer, 'description': helpers.description('offset'), }), 'sort_key': parameter_types.multi_params({ 'type': 'string', 'default': 'created_at', 'description': helpers.description('sort_key_resource_locks'), }), 'sort_dir': parameter_types.multi_params({ 'type': 'string', 'default': 'desc', # TODO(stephenfin): This should be an enum, but we currently treat # anything != 'desc' as 'asc'. We should make this stricter in a # future microversion. # 'enum': ['asc', 'desc'], 'description': helpers.description('sort_dir'), }), 'with_count': parameter_types.multi_params(parameter_types.boolean), 'created_since': parameter_types.multi_params({ 'type': 'string', 'format': 'date-time', 'description': helpers.description('created_since_query'), }), 'created_before': parameter_types.multi_params({ 'type': 'string', 'format': 'date-time', 'description': helpers.description('created_before_query'), }), 'project_id': parameter_types.multi_params({ 'type': ['string', 'null'], 'format': 'uuid', 'description': helpers.description( 'resource_lock_project_id_query' ), }), 'user_id': parameter_types.multi_params({ 'type': ['string', 'null'], 'format': 'uuid', 'description': helpers.description('resource_lock_user_id_query'), }), 'resource_id': parameter_types.multi_params({ 'type': ['string', 'null'], 'format': 'uuid', 'description': helpers.description( 'resource_lock_resource_id_query' ), }), 'resource_action': parameter_types.multi_params({ 'type': ['string', 'null'], 'enum': list(constants.RESOURCE_LOCK_RESOURCE_ACTIONS) + [None], 'description': helpers.description( 'resource_lock_resource_action_query' ), }), 'resource_type': parameter_types.multi_params({ 'type': ['string', 'null'], 'enum': list(constants.RESOURCE_LOCK_RESOURCE_TYPES) + [None], 'description': helpers.description( 'resource_lock_resource_type_query' ), }), 'all_projects': parameter_types.multi_params({ **parameter_types.boolean, 'description': helpers.description( 'resource_lock_all_projects_query' ), }), 'lock_context': parameter_types.multi_params({ 'type': ['string', 'null'], 'maxLength': 10, 'description': helpers.description( 'resource_lock_lock_context_query' ), }), 'lock_reason': parameter_types.multi_params({ 'type': ['string', 'null'], 'maxLength': 1023, 'description': helpers.description( 'resource_lock_lock_reason_query' ), }), }, # TODO(stephenfin): Exclude additional query string parameters in a future # microversion 'additionalProperties': True, } show_request_query = { 'type': 'object', 'properties': {}, # TODO(stephenfin): Exclude additional query string parameters in a future # microversion 'additionalProperties': True, } _resource_lock_response = { 'type': 'object', 'properties': { 'id': { 'type': 'string', 'format': 'uuid', 'description': helpers.description('resource_lock_id'), }, 'user_id': { 'type': 'string', 'format': 'uuid', 'description': helpers.description('resource_lock_user_id'), }, 'project_id': { 'type': 'string', 'format': 'uuid', 'description': helpers.description('resource_lock_project_id'), }, 'lock_context': { 'type': 'string', 'description': helpers.description('resource_lock_lock_context'), }, 'resource_type': { 'type': 'string', 'enum': list(constants.RESOURCE_LOCK_RESOURCE_TYPES), 'description': helpers.description('resource_lock_resource_type'), }, 'resource_id': { 'type': 'string', 'format': 'uuid', 'description': helpers.description('resource_lock_resource_id'), }, 'resource_action': { 'type': 'string', 'enum': list(constants.RESOURCE_LOCK_RESOURCE_ACTIONS), 'description': helpers.description( 'resource_lock_resource_action' ), }, 'lock_reason': { 'type': ['string', 'null'], 'description': helpers.description('resource_lock_lock_reason'), }, 'created_at': { 'type': 'string', 'format': 'date-time', 'description': helpers.description('created_at'), }, 'updated_at': { 'type': ['string', 'null'], 'format': 'date-time', 'description': helpers.description('updated_at'), }, 'links': response_types.links, }, 'description': helpers.description('resource_lock_object'), 'required': [ 'id', 'user_id', 'project_id', 'lock_context', 'resource_type', 'resource_id', 'resource_action', 'lock_reason', 'created_at', 'updated_at', 'links', ], 'additionalProperties': False, } create_response_body = { 'type': 'object', 'properties': { 'resource_lock': _resource_lock_response, }, 'required': ['resource_lock'], 'additionalProperties': False, } index_response_body = { 'type': 'object', 'properties': { 'resource_locks': { 'type': 'array', 'items': _resource_lock_response, }, 'count': { 'type': 'integer', 'description': helpers.description('count_without_min_version'), }, 'resource_locks_links': response_types.collection_links, }, 'required': ['resource_locks'], 'additionalProperties': False, } show_response_body = { 'type': 'object', 'properties': { 'resource_lock': _resource_lock_response, }, 'required': ['resource_lock'], 'additionalProperties': False, } update_response_body = { 'type': 'object', 'properties': { 'resource_lock': _resource_lock_response, }, 'required': ['resource_lock'], 'additionalProperties': False, } delete_response_body = { 'type': 'null', } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/api/urlmap.py0000664000175000017500000002362100000000000016746 0ustar00zuulzuul00000000000000# Copyright 2011 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import re from urllib.request import parse_http_list import paste.urlmap from manila.api.openstack import wsgi _quoted_string_re = r'"[^"\\]*(?:\\.[^"\\]*)*"' _option_header_piece_re = re.compile( r';\s*([^\s;=]+|%s)\s*' r'(?:=\s*([^;]+|%s))?\s*' % (_quoted_string_re, _quoted_string_re)) def unquote_header_value(value): """Unquotes a header value. This does not use the real unquoting but what browsers are actually using for quoting. :param value: the header value to unquote. """ if value and value[0] == value[-1] == '"': # this is not the real unquoting, but fixing this so that the # RFC is met will result in bugs with internet explorer and # probably some other browsers as well. IE for example is # uploading files with "C:\foo\bar.txt" as filename value = value[1:-1] return value def parse_list_header(value): """Parse lists as described by RFC 2068 Section 2. In particular, parse comma-separated lists where the elements of the list may include quoted-strings. A quoted-string could contain a comma. A non-quoted string could have quotes in the middle. Quotes are removed automatically after parsing. The return value is a standard :class:`list`: >>> parse_list_header('token, "quoted value"') ['token', 'quoted value'] :param value: a string with a list header. :return: :class:`list` """ result = [] for item in parse_http_list(value): if item[:1] == item[-1:] == '"': item = unquote_header_value(item[1:-1]) result.append(item) return result def parse_options_header(value): """Parse header into content type and options. Parse a ``Content-Type`` like header into a tuple with the content type and the options: >>> parse_options_header('Content-Type: text/html; mimetype=text/html') ('Content-Type:', {'mimetype': 'text/html'}) :param value: the header to parse. :return: (str, options) """ def _tokenize(string): for match in _option_header_piece_re.finditer(string): key, value = match.groups() key = unquote_header_value(key) if value is not None: value = unquote_header_value(value) yield key, value if not value: return '', {} parts = _tokenize(';' + value) name = next(parts)[0] extra = dict(parts) return name, extra class Accept(object): def __init__(self, value): self._content_types = [parse_options_header(v) for v in parse_list_header(value)] def best_match(self, supported_content_types): # FIXME: Should we have a more sophisticated matching algorithm that # takes into account the version as well? best_quality = -1 best_content_type = None best_params = {} best_match = '*/*' for content_type in supported_content_types: for content_mask, params in self._content_types: try: quality = float(params.get('q', 1)) except ValueError: continue if quality < best_quality: continue elif best_quality == quality: if best_match.count('*') <= content_mask.count('*'): continue if self._match_mask(content_mask, content_type): best_quality = quality best_content_type = content_type best_params = params best_match = content_mask return best_content_type, best_params def content_type_params(self, best_content_type): """Find parameters in Accept header for given content type.""" for content_type, params in self._content_types: if best_content_type == content_type: return params return {} def _match_mask(self, mask, content_type): if '*' not in mask: return content_type == mask if mask == '*/*': return True mask_major = mask[:-2] content_type_major = content_type.split('/', 1)[0] return content_type_major == mask_major def urlmap_factory(loader, global_conf, **local_conf): if 'not_found_app' in local_conf: not_found_app = local_conf.pop('not_found_app') else: not_found_app = global_conf.get('not_found_app') if not_found_app: not_found_app = loader.get_app(not_found_app, global_conf=global_conf) urlmap = URLMap(not_found_app=not_found_app) for path, app_name in local_conf.items(): path = paste.urlmap.parse_path_expression(path) app = loader.get_app(app_name, global_conf=global_conf) urlmap[path] = app return urlmap class URLMap(paste.urlmap.URLMap): def _match(self, host, port, path_info): """Find longest match for a given URL path.""" for (domain, app_url), app in self.applications: if domain and domain != host and domain != host + ':' + port: continue if (path_info == app_url or path_info.startswith(app_url + '/')): return app, app_url return None, None def _set_script_name(self, app, app_url): def wrap(environ, start_response): environ['SCRIPT_NAME'] += app_url return app(environ, start_response) return wrap def _munge_path(self, app, path_info, app_url): def wrap(environ, start_response): environ['SCRIPT_NAME'] += app_url environ['PATH_INFO'] = path_info[len(app_url):] return app(environ, start_response) return wrap def _path_strategy(self, host, port, path_info): """Check path suffix for MIME type and path prefix for API version.""" mime_type = app = app_url = None parts = path_info.rsplit('.', 1) if len(parts) > 1: possible_type = 'application/' + parts[1] if possible_type in wsgi.SUPPORTED_CONTENT_TYPES: mime_type = possible_type parts = path_info.split('/') if len(parts) > 1: possible_app, possible_app_url = self._match(host, port, path_info) # Don't use prefix if it ends up matching default if possible_app and possible_app_url: app_url = possible_app_url app = self._munge_path(possible_app, path_info, app_url) return mime_type, app, app_url def _content_type_strategy(self, host, port, environ): """Check Content-Type header for API version.""" app = None params = parse_options_header(environ.get('CONTENT_TYPE', ''))[1] if 'version' in params: app, app_url = self._match(host, port, '/v' + params['version']) if app: app = self._set_script_name(app, app_url) return app def _accept_strategy(self, host, port, environ, supported_content_types): """Check Accept header for best matching MIME type and API version.""" accept = Accept(environ.get('HTTP_ACCEPT', '')) app = None # Find the best match in the Accept header mime_type, params = accept.best_match(supported_content_types) if 'version' in params: app, app_url = self._match(host, port, '/v' + params['version']) if app: app = self._set_script_name(app, app_url) return mime_type, app def __call__(self, environ, start_response): host = environ.get('HTTP_HOST', environ.get('SERVER_NAME')).lower() if ':' in host: host, port = host.split(':', 1) else: if environ['wsgi.url_scheme'] == 'http': port = '80' else: port = '443' path_info = environ['PATH_INFO'] path_info = self.normalize_url(path_info, False)[1] # The API version is determined in one of three ways: # 1) URL path prefix (eg /v1.1/tenant/servers/detail) # 2) Content-Type header (eg application/json;version=1.1) # 3) Accept header (eg application/json;q=0.8;version=1.1) # Manila supports only application/json as MIME type for the responses. supported_content_types = list(wsgi.SUPPORTED_CONTENT_TYPES) mime_type, app, app_url = self._path_strategy(host, port, path_info) if not app: app = self._content_type_strategy(host, port, environ) if not mime_type or not app: possible_mime_type, possible_app = self._accept_strategy( host, port, environ, supported_content_types) if possible_mime_type and not mime_type: mime_type = possible_mime_type if possible_app and not app: app = possible_app if not mime_type: mime_type = 'application/json' if not app: # Didn't match a particular version, probably matches default app, app_url = self._match(host, port, path_info) if app: app = self._munge_path(app, path_info, app_url) if app: environ['manila.best_content_type'] = mime_type return app(environ, start_response) environ['paste.urlmap_object'] = self return self.not_found_application(environ, start_response) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315601.8296728 manila-21.0.0/manila/api/v1/0000775000175000017500000000000000000000000015416 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/api/v1/__init__.py0000664000175000017500000000000000000000000017515 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/api/v1/limits.py0000664000175000017500000003362400000000000017301 0ustar00zuulzuul00000000000000# Copyright 2011 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Module dedicated functions/classes dealing with rate limiting requests. """ import collections import copy from http import client as http_client import math import re import time from oslo_serialization import jsonutils from oslo_utils import importutils import webob.dec import webob.exc from manila.api.openstack import wsgi from manila.api.views import limits as limits_views from manila.i18n import _ from manila import quota from manila.wsgi import common as base_wsgi QUOTAS = quota.QUOTAS # Convenience constants for the limits dictionary passed to Limiter(). PER_SECOND = 1 PER_MINUTE = 60 PER_HOUR = 60 * 60 PER_DAY = 60 * 60 * 24 class LimitsController(wsgi.Controller): """Controller for accessing limits in the OpenStack API.""" def index(self, req): """Return all global and rate limit information.""" context = req.environ['manila.context'] quotas = QUOTAS.get_project_quotas(context, context.project_id, usages=True) abs_limits = {'in_use': {}, 'limit': {}} for k, v in quotas.items(): abs_limits['limit'][k] = v['limit'] abs_limits['in_use'][k] = v['in_use'] rate_limits = req.environ.get("manila.limits", []) builder = self._get_view_builder(req) return builder.build(req, rate_limits, abs_limits) def _get_view_builder(self, req): return limits_views.ViewBuilder() def create_resource(): return wsgi.Resource(LimitsController()) class Limit(object): """Stores information about a limit for HTTP requests.""" UNITS = { 1: "SECOND", 60: "MINUTE", 60 * 60: "HOUR", 60 * 60 * 24: "DAY", } UNIT_MAP = {v: k for k, v in UNITS.items()} def __init__(self, verb, uri, regex, value, unit): """Initialize a new `Limit`. @param verb: HTTP verb (POST, PUT, etc.) @param uri: Human-readable URI @param regex: Regular expression format for this limit @param value: Integer number of requests which can be made @param unit: Unit of measure for the value parameter """ self.verb = verb self.uri = uri self.regex = regex self.value = int(value) self.unit = unit self.unit_string = self.display_unit().lower() self.remaining = int(value) if value <= 0: raise ValueError("Limit value must be > 0") self.last_request = None self.next_request = None self.water_level = 0 self.capacity = self.unit self.request_value = float(self.capacity) / float(self.value) msg = (_("Only %(value)s %(verb)s request(s) can be " "made to %(uri)s every %(unit_string)s.") % {'value': self.value, 'verb': self.verb, 'uri': self.uri, 'unit_string': self.unit_string}) self.error_message = msg def __call__(self, verb, url): """Represents a call to this limit from a relevant request. @param verb: string http verb (POST, GET, etc.) @param url: string URL """ if self.verb != verb or not re.match(self.regex, url): return now = self._get_time() if self.last_request is None: self.last_request = now leak_value = now - self.last_request self.water_level -= leak_value self.water_level = max(self.water_level, 0) self.water_level += self.request_value difference = self.water_level - self.capacity self.last_request = now if difference > 0: self.water_level -= self.request_value self.next_request = now + difference return difference cap = self.capacity water = self.water_level val = self.value self.remaining = math.floor(((cap - water) / cap) * val) self.next_request = now def _get_time(self): """Retrieve the current time. Broken out for testability.""" return time.time() def display_unit(self): """Display the string name of the unit.""" return self.UNITS.get(self.unit, "UNKNOWN") def display(self): """Return a useful representation of this class.""" return { "verb": self.verb, "URI": self.uri, "regex": self.regex, "value": self.value, "remaining": int(self.remaining), "unit": self.display_unit(), "resetTime": int(self.next_request or self._get_time()), } # "Limit" format is a dictionary with the HTTP verb, human-readable URI, # a regular-expression to match, value and unit of measure (PER_DAY, etc.) DEFAULT_LIMITS = [ Limit("POST", "*", ".*", 10, PER_MINUTE), Limit("POST", "*/servers", "^/servers", 50, PER_DAY), Limit("PUT", "*", ".*", 10, PER_MINUTE), Limit("GET", "*changes-since*", ".*changes-since.*", 3, PER_MINUTE), Limit("DELETE", "*", ".*", 100, PER_MINUTE), ] class RateLimitingMiddleware(base_wsgi.Middleware): """Rate-limits requests passing through this middleware. All limit information is stored in memory for this implementation. """ def __init__(self, application, limits=None, limiter=None, **kwargs): """Initialize new `RateLimitingMiddleware`. `RateLimitingMiddleware` wraps the given WSGI application and sets up the given limits. @param application: WSGI application to wrap @param limits: String describing limits @param limiter: String identifying class for representing limits Other parameters are passed to the constructor for the limiter. """ base_wsgi.Middleware.__init__(self, application) # Select the limiter class if limiter is None: limiter = Limiter else: limiter = importutils.import_class(limiter) # Parse the limits, if any are provided if limits is not None: limits = limiter.parse_limits(limits) self._limiter = limiter(limits or DEFAULT_LIMITS, **kwargs) @webob.dec.wsgify(RequestClass=wsgi.Request) def __call__(self, req): """Represents a single call through this middleware. We should record the request if we have a limit relevant to it. If no limit is relevant to the request, ignore it. If the request should be rate limited, return a fault telling the user they are over the limit and need to retry later. """ verb = req.method url = req.url context = req.environ.get("manila.context") if context: username = context.user_id else: username = None delay, error = self._limiter.check_for_delay(verb, url, username) if delay: msg = _("This request was rate-limited.") retry = time.time() + delay return wsgi.OverLimitFault(msg, error, retry) req.environ["manila.limits"] = self._limiter.get_limits(username) return self.application class Limiter(object): """Rate-limit checking class which handles limits in memory.""" def __init__(self, limits, **kwargs): """Initialize the new `Limiter`. @param limits: List of `Limit` objects """ self.limits = copy.deepcopy(limits) self.levels = collections.defaultdict(lambda: copy.deepcopy(limits)) # Pick up any per-user limit information for key, value in kwargs.items(): if key.startswith('user:'): username = key[5:] self.levels[username] = self.parse_limits(value) def get_limits(self, username=None): """Return the limits for a given user.""" return [limit.display() for limit in self.levels[username]] def check_for_delay(self, verb, url, username=None): """Check the given verb/user/user triplet for limit. @return: Tuple of delay (in seconds) and error message (or None, None) """ delays = [] for limit in self.levels[username]: delay = limit(verb, url) if delay: delays.append((delay, limit.error_message)) if delays: delays.sort() return delays[0] return None, None # Note: This method gets called before the class is instantiated, # so this must be either a static method or a class method. It is # used to develop a list of limits to feed to the constructor. We # put this in the class so that subclasses can override the # default limit parsing. @staticmethod def parse_limits(limits): """Convert a string into a list of Limit instances. This implementation expects a semicolon-separated sequence of parenthesized groups, where each group contains a comma-separated sequence consisting of HTTP method, user-readable URI, a URI reg-exp, an integer number of requests which can be made, and a unit of measure. Valid values for the latter are "SECOND", "MINUTE", "HOUR", and "DAY". @return: List of Limit instances. """ # Handle empty limit strings limits = limits.strip() if not limits: return [] # Split up the limits by semicolon result = [] for group in limits.split(';'): group = group.strip() if group[:1] != '(' or group[-1:] != ')': raise ValueError("Limit rules must be surrounded by " "parentheses") group = group[1:-1] # Extract the Limit arguments args = [a.strip() for a in group.split(',')] if len(args) != 5: raise ValueError("Limit rules must contain the following " "arguments: verb, uri, regex, value, unit") # Pull out the arguments verb, uri, regex, value, unit = args # Upper-case the verb verb = verb.upper() # Convert value--raises ValueError if it's not integer value = int(value) # Convert unit unit = unit.upper() if unit not in Limit.UNIT_MAP: raise ValueError("Invalid units specified") unit = Limit.UNIT_MAP[unit] # Build a limit result.append(Limit(verb, uri, regex, value, unit)) return result class WsgiLimiter(object): """Rate-limit checking from a WSGI application. Uses an in-memory `Limiter`. To use, POST ``/`` with JSON data such as:: { "verb" : GET, "path" : "/servers" } and receive a 204 No Content, or a 403 Forbidden with an X-Wait-Seconds header containing the number of seconds to wait before the action would succeed. """ def __init__(self, limits=None): """Initialize the new `WsgiLimiter`. @param limits: List of `Limit` objects """ self._limiter = Limiter(limits or DEFAULT_LIMITS) @webob.dec.wsgify(RequestClass=wsgi.Request) def __call__(self, request): """Handles a call to this application. Returns 204 if the request is acceptable to the limiter, else a 403 is returned with a relevant header indicating when the request *will* succeed. """ if request.method != "POST": raise webob.exc.HTTPMethodNotAllowed() try: info = dict(jsonutils.loads(request.body)) except ValueError: raise webob.exc.HTTPBadRequest() username = request.path_info_pop() verb = info.get("verb") path = info.get("path") delay, error = self._limiter.check_for_delay(verb, path, username) if delay: headers = {"X-Wait-Seconds": "%.2f" % delay} return webob.exc.HTTPForbidden(headers=headers, explanation=error) else: return webob.exc.HTTPNoContent() class WsgiLimiterProxy(object): """Rate-limit requests based on answers from a remote source.""" def __init__(self, limiter_address): """Initialize the new `WsgiLimiterProxy`. @param limiter_address: IP/port combination of where to request limit """ self.limiter_address = limiter_address def check_for_delay(self, verb, path, username=None): body = jsonutils.dumps({"verb": verb, "path": path}) headers = {"Content-Type": "application/json"} conn = http_client.HTTPConnection(self.limiter_address) if username: conn.request("POST", "/%s" % (username), body, headers) else: conn.request("POST", "/", body, headers) resp = conn.getresponse() if resp.status >= 200 and resp.status < 300: # there's nothing to rate-limit return None, None return resp.getheader("X-Wait-Seconds"), resp.read() or None # Note: This method gets called before the class is instantiated, # so this must be either a static method or a class method. It is # used to develop a list of limits to feed to the constructor. # This implementation returns an empty list, since all limit # decisions are made by a remote server. @staticmethod def parse_limits(limits): """Ignore a limits string. This simply doesn't apply for the limit proxy. @return: Empty list. """ return [] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/api/v1/router.py0000664000175000017500000001630700000000000017317 0ustar00zuulzuul00000000000000# Copyright 2011 OpenStack LLC. # Copyright 2011 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ WSGI middleware for OpenStack Share API v1. """ from manila.api import extensions import manila.api.openstack from manila.api.v1 import limits from manila.api.v1 import scheduler_stats from manila.api.v1 import security_service from manila.api.v1 import share_manage from manila.api.v1 import share_metadata from manila.api.v1 import share_servers from manila.api.v1 import share_snapshots from manila.api.v1 import share_types_extra_specs from manila.api.v1 import share_unmanage from manila.api.v1 import shares from manila.api.v2 import availability_zones from manila.api.v2 import quota_class_sets from manila.api.v2 import quota_sets from manila.api.v2 import services from manila.api.v2 import share_networks from manila.api.v2 import share_types from manila.api import versions class APIRouter(manila.api.openstack.APIRouter): """Route API requests. Routes requests on the OpenStack API to the appropriate controller and method. """ ExtensionManager = extensions.ExtensionManager def _setup_routes(self, mapper): self.resources['versions'] = versions.create_resource() mapper.connect("versions", "/", controller=self.resources['versions'], action='index') mapper.redirect("", "/") self.resources["availability_zones"] = ( availability_zones.create_resource_legacy()) mapper.resource("availability-zone", "os-availability-zone", controller=self.resources["availability_zones"]) self.resources["services"] = services.create_resource_legacy() mapper.resource("service", "os-services", controller=self.resources["services"]) self.resources["quota_sets"] = quota_sets.create_resource_legacy() mapper.resource("quota-set", "os-quota-sets", controller=self.resources["quota_sets"], member={'defaults': 'GET'}) self.resources["quota_class_sets"] = ( quota_class_sets.create_resource_legacy()) mapper.resource("quota-class-set", "os-quota-class-sets", controller=self.resources["quota_class_sets"]) self.resources["share_manage"] = share_manage.create_resource() mapper.resource("share_manage", "os-share-manage", controller=self.resources["share_manage"]) self.resources["share_unmanage"] = share_unmanage.create_resource() mapper.resource("share_unmanage", "os-share-unmanage", controller=self.resources["share_unmanage"], member={'unmanage': 'POST'}) self.resources['shares'] = shares.create_resource() mapper.resource("share", "shares", controller=self.resources['shares'], collection={'detail': 'GET'}, member={'action': 'POST'}) self.resources['snapshots'] = share_snapshots.create_resource() mapper.resource("snapshot", "snapshots", controller=self.resources['snapshots'], collection={'detail': 'GET'}, member={'action': 'POST'}) self.resources['share_metadata'] = share_metadata.create_resource() share_metadata_controller = self.resources['share_metadata'] mapper.resource("share_metadata", "metadata", controller=share_metadata_controller, parent_resource=dict(member_name='share', collection_name='shares')) mapper.connect("metadata", "/{project_id}/shares/{share_id}/metadata", controller=share_metadata_controller, action='update_all', conditions={"method": ['PUT']}) self.resources['limits'] = limits.create_resource() mapper.resource("limit", "limits", controller=self.resources['limits']) self.resources["security_services"] = ( security_service.create_resource()) mapper.resource("security-service", "security-services", controller=self.resources['security_services'], collection={'detail': 'GET'}) self.resources['share_networks'] = share_networks.create_resource() mapper.resource(share_networks.RESOURCE_NAME, 'share-networks', controller=self.resources['share_networks'], collection={'detail': 'GET'}, member={'action': 'POST'}) self.resources['share_servers'] = share_servers.create_resource() mapper.resource('share_server', 'share-servers', controller=self.resources['share_servers']) mapper.connect('details', '/{project_id}/share-servers/{id}/details', controller=self.resources['share_servers'], action='details', conditions={"method": ['GET']}) self.resources['types'] = share_types.create_resource() mapper.resource("type", "types", controller=self.resources['types'], collection={'detail': 'GET', 'default': 'GET'}, member={'action': 'POST', 'os-share-type-access': 'GET'}) self.resources['extra_specs'] = ( share_types_extra_specs.create_resource()) mapper.resource('extra_spec', 'extra_specs', controller=self.resources['extra_specs'], parent_resource=dict(member_name='type', collection_name='types')) self.resources['scheduler_stats'] = scheduler_stats.create_resource() mapper.connect('pools', '/{project_id}/scheduler-stats/pools', controller=self.resources['scheduler_stats'], action='pools_index', conditions={'method': ['GET']}) mapper.connect('pools', '/{project_id}/scheduler-stats/pools/detail', controller=self.resources['scheduler_stats'], action='pools_detail', conditions={'method': ['GET']}) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/api/v1/scheduler_stats.py0000664000175000017500000000664200000000000021174 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Clinton Knight. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from webob import exc from manila.api.openstack import wsgi from manila.api.views import scheduler_stats as scheduler_stats_views from manila import exception from manila.i18n import _ from manila.scheduler import rpcapi from manila.share import share_types class SchedulerStatsController(wsgi.Controller): """The Scheduler Stats API controller for the OpenStack API.""" resource_name = 'scheduler_stats:pools' def __init__(self): self.scheduler_api = rpcapi.SchedulerAPI() self._view_builder_class = scheduler_stats_views.ViewBuilder super(SchedulerStatsController, self).__init__() @wsgi.Controller.api_version('1.0', '2.22') @wsgi.Controller.authorize('index') def pools_index(self, req): """Returns a list of storage pools known to the scheduler.""" return self._pools(req, action='index') @wsgi.Controller.api_version('2.23') # noqa @wsgi.Controller.authorize('index') def pools_index(self, req): # pylint: disable=function-redefined # noqa F811 return self._pools(req, action='index', enable_share_type=True) @wsgi.Controller.api_version('1.0', '2.22') @wsgi.Controller.authorize('detail') def pools_detail(self, req): """Returns a detailed list of storage pools known to the scheduler.""" return self._pools(req, action='detail') @wsgi.Controller.api_version('2.23') # noqa @wsgi.Controller.authorize('detail') def pools_detail(self, req): # pylint: disable=function-redefined # noqa F811 return self._pools(req, action='detail', enable_share_type=True) def _pools(self, req, action='index', enable_share_type=False): context = req.environ['manila.context'] search_opts = {} search_opts.update(req.GET) if enable_share_type: req_share_type = search_opts.pop('share_type', None) if req_share_type: try: share_type = share_types.get_share_type_by_name_or_id( context, req_share_type) search_opts['capabilities'] = share_type.get('extra_specs', {}) except exception.ShareTypeNotFound: msg = _("Share type %s not found.") % req_share_type raise exc.HTTPBadRequest(explanation=msg) try: pools = self.scheduler_api.get_pools(context, filters=search_opts, cached=True) except exception.NotAuthorized: raise exc.HTTPForbidden() detail = (action == 'detail') return self._view_builder.pools(pools, detail=detail) def create_resource(): return wsgi.Resource(SchedulerStatsController()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/api/v1/security_service.py0000664000175000017500000002436100000000000021365 0ustar00zuulzuul00000000000000# Copyright 2014 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """The security service api.""" from http import client as http_client from oslo_log import log import webob from webob import exc from manila.api import common from manila.api.openstack import api_version_request as api_version from manila.api.openstack import wsgi from manila.api.views import security_service as security_service_views from manila.common import constants from manila import db from manila import exception from manila.i18n import _ from manila import policy from manila import utils RESOURCE_NAME = 'security_service' LOG = log.getLogger(__name__) class SecurityServiceController(wsgi.Controller): """The Shares API controller for the OpenStack API.""" _view_builder_class = security_service_views.ViewBuilder def show(self, req, id): """Return data about the given security service.""" context = req.environ['manila.context'] try: security_service = db.security_service_get(context, id) policy.check_policy(context, RESOURCE_NAME, 'show', security_service) except exception.NotFound: raise exc.HTTPNotFound() return self._view_builder.detail(req, security_service) def delete(self, req, id): """Delete a security service.""" context = req.environ['manila.context'] LOG.info("Delete security service with id: %s", id, context=context) try: security_service = db.security_service_get(context, id) except exception.NotFound: raise exc.HTTPNotFound() share_nets = db.share_network_get_all_by_security_service( context, id) if share_nets: msg = _("Cannot delete security service. It is " "assigned to share network(s)") raise exc.HTTPForbidden(explanation=msg) policy.check_policy(context, RESOURCE_NAME, 'delete', security_service) db.security_service_delete(context, id) return webob.Response(status_int=http_client.ACCEPTED) def index(self, req): """Returns a summary list of security services.""" policy.check_policy(req.environ['manila.context'], RESOURCE_NAME, 'index') return self._get_security_services(req, is_detail=False) def detail(self, req): """Returns a detailed list of security services.""" policy.check_policy(req.environ['manila.context'], RESOURCE_NAME, 'detail') return self._get_security_services(req, is_detail=True) def _get_security_services(self, req, is_detail): """Returns a transformed list of security services. The list gets transformed through view builder. """ context = req.environ['manila.context'] search_opts = {} search_opts.update(req.GET) # NOTE(vponomaryov): remove 'status' from search opts # since it was removed from security service model. search_opts.pop('status', None) if 'share_network_id' in search_opts: share_nw = db.share_network_get(context, search_opts['share_network_id']) security_services = share_nw['security_services'] del search_opts['share_network_id'] else: # ignore all_tenants if not authorized to use it. security_services = None if utils.is_all_tenants(search_opts): allowed_to_list_all_tenants = policy.check_policy( context, RESOURCE_NAME, 'get_all_security_services', do_raise=False) if allowed_to_list_all_tenants: security_services = db.security_service_get_all(context) if security_services is None: security_services = db.security_service_get_all_by_project( context, context.project_id) search_opts.pop('all_tenants', None) common.remove_invalid_options( context, search_opts, self._get_security_services_search_options()) if search_opts: results = [] not_found = object() for ss in security_services: if all(ss.get(opt, not_found) == value for opt, value in search_opts.items()): results.append(ss) security_services = results limited_list = common.limited(security_services, req) if is_detail: security_services = self._view_builder.detail_list( req, limited_list) for ss in security_services['security_services']: share_networks = db.share_network_get_all_by_security_service( context, ss['id']) ss['share_networks'] = [sn['id'] for sn in share_networks] else: security_services = self._view_builder.summary_list( req, limited_list) return security_services def _get_security_services_search_options(self): return ('name', 'id', 'type', 'user', 'server', 'dns_ip', 'domain', ) def _share_servers_dependent_on_sn_exist(self, context, security_service_id): share_networks = db.share_network_get_all_by_security_service( context, security_service_id) for sn in share_networks: for sns in sn['share_network_subnets']: if 'share_servers' in sns and sns['share_servers']: return True return False def update(self, req, id, body): """Update a security service.""" context = req.environ['manila.context'] if not body or 'security_service' not in body: raise exc.HTTPUnprocessableEntity() security_service_data = body['security_service'] valid_update_keys = ( 'description', 'name' ) try: security_service = db.security_service_get(context, id) policy.check_policy(context, RESOURCE_NAME, 'update', security_service) except exception.NotFound: raise exc.HTTPNotFound() if self._share_servers_dependent_on_sn_exist(context, id): for item in security_service_data: if item not in valid_update_keys: msg = _("Cannot update security service %s. It is " "attached to share network with share server " "associated. Only 'name' and 'description' " "fields are available for update.") % id raise exc.HTTPForbidden(explanation=msg) server = security_service_data.get('server') default_ad_site = security_service_data.get('default_ad_site') if default_ad_site: if req.api_version_request < api_version.APIVersionRequest("2.76"): msg = _('"default_ad_site" is only supported from API ' 'version 2.76.') raise webob.exc.HTTPBadRequest(explanation=msg) if (security_service['type'] == 'active_directory' and server and default_ad_site): raise exception.InvalidInput( reason=(_("Cannot create security service because both " "server and 'default_ad_site' were provided. " "Specify either server or 'default_ad_site'."))) policy.check_policy(context, RESOURCE_NAME, 'update', security_service) security_service = db.security_service_update( context, id, security_service_data) return self._view_builder.detail(req, security_service) def create(self, req, body): """Creates a new security service.""" context = req.environ['manila.context'] policy.check_policy(context, RESOURCE_NAME, 'create') if not self.is_valid_body(body, 'security_service'): raise exc.HTTPUnprocessableEntity() security_service_args = body['security_service'] security_srv_type = security_service_args.get('type') allowed_types = constants.SECURITY_SERVICES_ALLOWED_TYPES if security_srv_type not in allowed_types: raise exception.InvalidInput( reason=(_("Invalid type %(type)s specified for security " "service. Valid types are %(types)s") % {'type': security_srv_type, 'types': ','.join(allowed_types)})) server = security_service_args.get('server') default_ad_site = security_service_args.get('default_ad_site') if default_ad_site: if req.api_version_request < api_version.APIVersionRequest("2.76"): msg = _('"default_ad_site" is only supported from API ' 'version 2.76.') raise webob.exc.HTTPBadRequest(explanation=msg) if (security_srv_type == 'active_directory' and server and default_ad_site): raise exception.InvalidInput( reason=(_("Cannot create security service because both " "server and 'default_ad_site' were provided, " "Specify either server or 'default_ad_site'."))) security_service_args['project_id'] = context.project_id security_service = db.security_service_create( context, security_service_args) return self._view_builder.detail(req, security_service) def create_resource(): return wsgi.Resource(SecurityServiceController()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/api/v1/share_manage.py0000664000175000017500000001341700000000000020410 0ustar00zuulzuul00000000000000# Copyright 2015 Mirantis inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from webob import exc from manila.api import common from manila.api.openstack import wsgi from manila.api.views import shares as share_views from manila import exception from manila.i18n import _ from manila import share from manila.share import share_types from manila.share import utils as share_utils from manila import utils class ShareManageMixin(object): @wsgi.Controller.authorize('manage') def _manage(self, req, body, allow_dhss_true=False): context = req.environ['manila.context'] share_data = self._validate_manage_parameters(context, body) share_data = common.validate_public_share_policy(context, share_data) # NOTE(vponomaryov): compatibility actions are required between API and # DB layers for 'name' and 'description' API params that are # represented in DB as 'display_name' and 'display_description' # appropriately. name = share_data.get('display_name', share_data.get('name')) description = share_data.get( 'display_description', share_data.get('description')) share = { 'host': share_data['service_host'], 'export_location_path': share_data['export_path'], 'share_proto': share_data['protocol'].upper(), 'share_type_id': share_data['share_type_id'], 'display_name': name, 'display_description': description, } if share_data.get('is_public') is not None: share['is_public'] = share_data['is_public'] driver_options = share_data.get('driver_options', {}) if allow_dhss_true: share['share_server_id'] = share_data.get('share_server_id') try: share_ref = self.share_api.manage(context, share, driver_options) except exception.PolicyNotAuthorized as e: raise exc.HTTPForbidden(explanation=e.msg) except (exception.InvalidShare, exception.InvalidShareServer) as e: raise exc.HTTPConflict(explanation=e.msg) except exception.InvalidInput as e: raise exc.HTTPBadRequest(explanation=e.msg) return self._view_builder.detail(req, share_ref) def _validate_manage_parameters(self, context, body): if not (body and self.is_valid_body(body, 'share')): msg = _("Share entity not found in request body") raise exc.HTTPUnprocessableEntity(explanation=msg) required_parameters = ('export_path', 'service_host', 'protocol') data = body['share'] for parameter in required_parameters: if parameter not in data: msg = _("Required parameter %s not found") % parameter raise exc.HTTPUnprocessableEntity(explanation=msg) if not data.get(parameter): msg = _("Required parameter %s is empty") % parameter raise exc.HTTPUnprocessableEntity(explanation=msg) if isinstance(data['export_path'], dict): # the path may be inside this dictionary try: data['export_path'] = data['export_path']['path'] except KeyError: msg = ("Export path must be a string, or a dictionary " "with a 'path' item") raise exc.HTTPUnprocessableEntity(explanation=msg) if not share_utils.extract_host(data['service_host'], 'pool'): msg = _("service_host parameter should contain pool.") raise exc.HTTPBadRequest(explanation=msg) try: utils.validate_service_host( context, share_utils.extract_host(data['service_host'])) except exception.ServiceNotFound as e: raise exc.HTTPNotFound(explanation=e.msg) except exception.PolicyNotAuthorized as e: raise exc.HTTPForbidden(explanation=e.msg) except exception.AdminRequired as e: raise exc.HTTPForbidden(explanation=e.msg) except exception.ServiceIsDown as e: raise exc.HTTPBadRequest(explanation=e.msg) data['share_type_id'] = self._get_share_type_id( context, data.get('share_type')) return data @staticmethod def _get_share_type_id(context, share_type): try: stype = share_types.get_share_type_by_name_or_id(context, share_type) return stype['id'] except exception.ShareTypeNotFound as e: raise exc.HTTPNotFound(explanation=e.msg) class ShareManageController(ShareManageMixin, wsgi.Controller): """Allows existing share to be 'managed' by Manila.""" resource_name = "share" _view_builder_class = share_views.ViewBuilder def __init__(self, *args, **kwargs): super(ShareManageController, self).__init__(*args, **kwargs) self.share_api = share.API() @wsgi.Controller.api_version('1.0', '2.6') def create(self, req, body): """Legacy method for 'manage share' operation. Should be removed when minimum API version becomes equal to or greater than v2.7 """ body.get('share', {}).pop('is_public', None) return self._manage(req, body) def create_resource(): return wsgi.Resource(ShareManageController()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/api/v1/share_metadata.py0000664000175000017500000001604600000000000020741 0ustar00zuulzuul00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from http import client as http_client from oslo_log import log import webob from webob import exc from oslo_config import cfg from manila.api import common as api_common from manila.api.openstack import wsgi from manila import db from manila import exception from manila.i18n import _ from manila import policy from manila import share LOG = log.getLogger(__name__) CONF = cfg.CONF class ShareMetadataController(object): """The share metadata API controller for the OpenStack API.""" def __init__(self): self.share_api = share.API() super(ShareMetadataController, self).__init__() def _get_metadata(self, context, share_id): try: share = self.share_api.get(context, share_id) rv = db.share_metadata_get(context, share['id']) meta = dict(rv.items()) except exception.NotFound: msg = _('share does not exist') raise exc.HTTPNotFound(explanation=msg) return meta def index(self, req, share_id): """Returns the list of metadata for a given share.""" context = req.environ['manila.context'] return {'metadata': self._get_metadata(context, share_id)} def create(self, req, share_id, body): try: metadata = body['metadata'] except (KeyError, TypeError): msg = _("Malformed request body") raise exc.HTTPBadRequest(explanation=msg) context = req.environ['manila.context'] new_metadata = self._update_share_metadata(context, share_id, metadata, delete=False) return {'metadata': new_metadata} def update(self, req, share_id, id, body): try: meta_item = body['meta'] except (TypeError, KeyError): expl = _('Malformed request body') raise exc.HTTPBadRequest(explanation=expl) if id not in meta_item: expl = _('Request body and URI mismatch') raise exc.HTTPBadRequest(explanation=expl) if len(meta_item) > 1: expl = _('Request body contains too many items') raise exc.HTTPBadRequest(explanation=expl) context = req.environ['manila.context'] self._update_share_metadata(context, share_id, meta_item, delete=False) return {'meta': meta_item} def update_all(self, req, share_id, body): try: metadata = body['metadata'] except (TypeError, KeyError): expl = _('Malformed request body') raise exc.HTTPBadRequest(explanation=expl) context = req.environ['manila.context'] new_metadata = self._update_share_metadata( context, share_id, metadata, delete=True) return {'metadata': new_metadata} def _update_share_metadata(self, context, share_id, metadata, delete=False): ignore_keys = getattr(CONF, 'admin_only_metadata', []) try: share = self.share_api.get(context, share_id) if set(metadata).intersection(set(ignore_keys)): try: policy.check_policy( context, 'share', 'update_admin_only_metadata') except exception.PolicyNotAuthorized: msg = _("Cannot set or update admin only metadata.") LOG.exception(msg) raise exc.HTTPForbidden(explanation=msg) ignore_keys = [] rv = db.share_metadata_get(context, share['id']) orig_meta = dict(rv.items()) if delete: _metadata = metadata for key in ignore_keys: if key in orig_meta: _metadata[key] = orig_meta[key] else: metadata_copy = metadata.copy() for key in ignore_keys: metadata_copy.pop(key, None) _metadata = orig_meta.copy() _metadata.update(metadata_copy) api_common.check_metadata_properties(_metadata) db.share_metadata_update(context, share['id'], _metadata, delete) return _metadata except exception.NotFound: msg = _('share does not exist') raise exc.HTTPNotFound(explanation=msg) except (ValueError, AttributeError): msg = _("Malformed request body") raise exc.HTTPBadRequest(explanation=msg) except exception.InvalidMetadata as error: raise exc.HTTPBadRequest(explanation=error.msg) except exception.InvalidMetadataSize as error: raise exc.HTTPBadRequest(explanation=error.msg) def show(self, req, share_id, id): """Return a single metadata item.""" context = req.environ['manila.context'] data = self._get_metadata(context, share_id) try: return {'meta': {id: data[id]}} except KeyError: msg = _("Metadata item was not found") raise exc.HTTPNotFound(explanation=msg) def delete(self, req, share_id, id): """Deletes an existing metadata.""" context = req.environ['manila.context'] metadata = self._get_metadata(context, share_id) if id not in metadata: msg = _("Metadata item was not found") raise exc.HTTPNotFound(explanation=msg) try: share = self.share_api.get(context, share_id) admin_only_metadata_keys = ( getattr(CONF, 'admin_only_metadata', set()) ) if id in admin_only_metadata_keys: policy.check_policy(context, 'share', 'update_admin_only_metadata') db.share_metadata_delete(context, share['id'], id) except exception.NotFound: msg = _('share does not exist') raise exc.HTTPNotFound(explanation=msg) except exception.PolicyNotAuthorized: msg = _("Cannot delete admin only metadata.") LOG.exception(msg) raise exc.HTTPForbidden(explanation=msg) return webob.Response(status_int=http_client.OK) def create_resource(): return wsgi.Resource(ShareMetadataController()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/api/v1/share_servers.py0000664000175000017500000001342600000000000020651 0ustar00zuulzuul00000000000000# Copyright 2014 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from http import client as http_client from oslo_log import log import webob from webob import exc from manila.api.openstack import wsgi from manila.api.views import share_servers as share_servers_views from manila.common import constants from manila.db import api as db_api from manila import exception from manila.i18n import _ from manila import share LOG = log.getLogger(__name__) class ShareServerController(wsgi.Controller): """The Share Server API controller for the OpenStack API.""" _view_builder_class = share_servers_views.ViewBuilder resource_name = 'share_server' def __init__(self): self.share_api = share.API() super(ShareServerController, self).__init__() @wsgi.Controller.authorize def index(self, req): """Returns a list of share servers.""" context = req.environ['manila.context'] search_opts = {} search_opts.update(req.GET) share_servers = db_api.share_server_get_all(context) for s in share_servers: try: share_network = db_api.share_network_get( context, s.share_network_id) s.project_id = share_network['project_id'] if share_network['name']: s.share_network_name = share_network['name'] else: s.share_network_name = share_network['id'] except exception.ShareNetworkNotFound: # NOTE(dviroel): The share-network may already be deleted while # the share-server is in 'deleting' state. In this scenario, # we will return some empty values. LOG.debug("Unable to retrieve share network details for share " "server %(server)s, the network %(network)s was " "not found.", {'server': s.id, 'network': s.share_network_id}) s.project_id = '' s.share_network_name = '' if search_opts: for k, v in search_opts.items(): share_servers = [s for s in share_servers if (hasattr(s, k) and s[k] == v or k == 'share_network' and v in [s.share_network_name, s.share_network_id] or k == 'share_network_subnet_id' and v in s.share_network_subnet_ids)] return self._view_builder.build_share_servers(req, share_servers) @wsgi.Controller.authorize def show(self, req, id): """Return data about the requested share server.""" context = req.environ['manila.context'] try: server = db_api.share_server_get(context, id) share_network = db_api.share_network_get( context, server['share_network_id']) server.project_id = share_network['project_id'] if share_network['name']: server.share_network_name = share_network['name'] else: server.share_network_name = share_network['id'] except exception.ShareServerNotFound as e: raise exc.HTTPNotFound(explanation=e.msg) except exception.ShareNetworkNotFound: msg = _("Share server could not be found. Its associated share " "network %s does not exist.") % server['share_network_id'] raise exc.HTTPNotFound(explanation=msg) return self._view_builder.build_share_server(req, server) @wsgi.Controller.authorize def details(self, req, id): """Return details for requested share server.""" context = req.environ['manila.context'] try: share_server = db_api.share_server_get(context, id) except exception.ShareServerNotFound as e: raise exc.HTTPNotFound(explanation=e.msg) return self._view_builder.build_share_server_details( share_server['backend_details']) @wsgi.Controller.authorize def delete(self, req, id): """Delete specified share server.""" context = req.environ['manila.context'] try: share_server = db_api.share_server_get(context, id) except exception.ShareServerNotFound as e: raise exc.HTTPNotFound(explanation=e.msg) allowed_statuses = [constants.STATUS_ERROR, constants.STATUS_ACTIVE] if share_server['status'] not in allowed_statuses: data = { 'status': share_server['status'], 'allowed_statuses': allowed_statuses, } msg = _("Share server's actual status is %(status)s, allowed " "statuses for deletion are %(allowed_statuses)s.") % (data) raise exc.HTTPForbidden(explanation=msg) LOG.debug("Deleting share server with id: %s.", id) try: self.share_api.delete_share_server(context, share_server) except exception.ShareServerInUse as e: raise exc.HTTPConflict(explanation=e.msg) return webob.Response(status_int=http_client.ACCEPTED) def create_resource(): return wsgi.Resource(ShareServerController()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/api/v1/share_snapshots.py0000664000175000017500000002475100000000000021205 0ustar00zuulzuul00000000000000# Copyright 2013 NetApp # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """The share snapshots api.""" import ast from http import client as http_client from oslo_log import log import webob from webob import exc from manila.api import common from manila.api.openstack import api_version_request as api_version from manila.api.openstack import wsgi from manila.api.views import share_snapshots as snapshot_views from manila import db from manila import exception from manila.i18n import _ from manila import policy from manila import share from manila import utils LOG = log.getLogger(__name__) class ShareSnapshotMixin(object): """Mixin class for Share Snapshot Controllers.""" def _update(self, *args, **kwargs): db.share_snapshot_update(*args, **kwargs) def _get(self, *args, **kwargs): return self.share_api.get_snapshot(*args, **kwargs) def _delete(self, *args, **kwargs): return self.share_api.delete_snapshot(*args, **kwargs) def show(self, req, id): """Return data about the given snapshot.""" context = req.environ['manila.context'] try: snapshot = self.share_api.get_snapshot(context, id) # Snapshot with no instances is filtered out. if snapshot.get('status') is None: raise exc.HTTPNotFound() except exception.NotFound: raise exc.HTTPNotFound() return self._view_builder.detail(req, snapshot) def delete(self, req, id): """Delete a snapshot.""" context = req.environ['manila.context'] LOG.info("Delete snapshot with id: %s", id, context=context) policy.check_policy(context, 'share', 'delete_snapshot') try: snapshot = self.share_api.get_snapshot(context, id) self.share_api.delete_snapshot(context, snapshot) except exception.NotFound: raise exc.HTTPNotFound() return webob.Response(status_int=http_client.ACCEPTED) def index(self, req): """Returns a summary list of snapshots.""" req.GET.pop('name~', None) req.GET.pop('description~', None) req.GET.pop('description', None) return self._get_snapshots(req, is_detail=False) def detail(self, req): """Returns a detailed list of snapshots.""" req.GET.pop('name~', None) req.GET.pop('description~', None) req.GET.pop('description', None) return self._get_snapshots(req, is_detail=True) def _get_snapshots(self, req, is_detail): """Returns a list of snapshots.""" context = req.environ['manila.context'] search_opts = {} search_opts.update(req.GET) params = common.get_pagination_params(req) limit, offset = [params.get('limit'), params.get('offset')] # Remove keys that are not related to share attrs search_opts.pop('limit', None) search_opts.pop('offset', None) show_count = False if 'with_count' in search_opts: show_count = utils.get_bool_from_api_params( 'with_count', search_opts) search_opts.pop('with_count') sort_key, sort_dir = common.get_sort_params(search_opts) key_dict = {"name": "display_name", "description": "display_description"} for key in key_dict: if sort_key == key: sort_key = key_dict[key] # NOTE(vponomaryov): Manila stores in DB key 'display_name', but # allows to use both keys 'name' and 'display_name'. It is leftover # from Cinder v1 and v2 APIs. if 'name' in search_opts: search_opts['display_name'] = search_opts.pop('name') if 'description' in search_opts: search_opts['display_description'] = search_opts.pop( 'description') # Deserialize dicts if req.api_version_request >= api_version.APIVersionRequest("2.73"): if 'metadata' in search_opts: try: search_opts['metadata'] = ast.literal_eval( search_opts['metadata']) except ValueError: msg = _('Invalid value for metadata filter.') raise webob.exc.HTTPBadRequest(explanation=msg) else: search_opts.pop('metadata', None) # like filter for key, db_key in (('name~', 'display_name~'), ('description~', 'display_description~')): if key in search_opts: search_opts[db_key] = search_opts.pop(key) common.remove_invalid_options(context, search_opts, self._get_snapshots_search_options()) total_count = None if show_count: count, snapshots = self.share_api.get_all_snapshots_with_count( context, search_opts=search_opts, limit=limit, offset=offset, sort_key=sort_key, sort_dir=sort_dir) total_count = count else: snapshots = self.share_api.get_all_snapshots( context, search_opts=search_opts, limit=limit, offset=offset, sort_key=sort_key, sort_dir=sort_dir) if is_detail: snapshots = self._view_builder.detail_list( req, snapshots, total_count) else: snapshots = self._view_builder.summary_list( req, snapshots, total_count) return snapshots def _get_snapshots_search_options(self): """Return share snapshot search options allowed by non-admin.""" return ('display_name', 'status', 'share_id', 'size', 'display_name~', 'display_description~', 'display_description', 'metadata') def update(self, req, id, body): """Update a snapshot.""" context = req.environ['manila.context'] policy.check_policy(context, 'share', 'snapshot_update') if not body or 'snapshot' not in body: raise exc.HTTPUnprocessableEntity() snapshot_data = body['snapshot'] valid_update_keys = ( 'display_name', 'display_description', ) update_dict = {key: snapshot_data[key] for key in valid_update_keys if key in snapshot_data} common.check_display_field_length( update_dict.get('display_name'), 'display_name') common.check_display_field_length( update_dict.get('display_description'), 'display_description') try: snapshot = self.share_api.get_snapshot(context, id) except exception.NotFound: raise exc.HTTPNotFound() snapshot = self.share_api.snapshot_update(context, snapshot, update_dict) snapshot.update(update_dict) return self._view_builder.detail(req, snapshot) @wsgi.response(202) def create(self, req, body): """Creates a new snapshot.""" context = req.environ['manila.context'] if not self.is_valid_body(body, 'snapshot'): raise exc.HTTPUnprocessableEntity() snapshot = body['snapshot'] share_id = snapshot['share_id'] share = self.share_api.get(context, share_id) # Verify that share can be snapshotted if not share['snapshot_support']: msg = _("Snapshots cannot be created for share '%s' " "since it does not have that capability.") % share_id LOG.error(msg) raise exc.HTTPUnprocessableEntity(explanation=msg) # we do not allow soft delete share with snapshot, and also # do not allow create snapshot for shares in recycle bin, # since it will lead to auto delete share failed. if share['is_soft_deleted']: msg = _("Snapshots cannot be created for share '%s' " "since it has been soft deleted.") % share_id raise exc.HTTPForbidden(explanation=msg) LOG.info("Create snapshot from share %s", share_id, context=context) # NOTE(rushiagr): v2 API allows name instead of display_name if 'name' in snapshot: snapshot['display_name'] = snapshot.get('name') common.check_display_field_length( snapshot['display_name'], 'name') del snapshot['name'] # NOTE(rushiagr): v2 API allows description instead of # display_description if 'description' in snapshot: snapshot['display_description'] = snapshot.get('description') common.check_display_field_length( snapshot['display_description'], 'description') del snapshot['description'] kwargs = {} if req.api_version_request >= api_version.APIVersionRequest("2.73"): if snapshot.get('metadata'): metadata = snapshot.get('metadata') kwargs.update({ 'metadata': metadata, }) new_snapshot = self.share_api.create_snapshot( context, share, snapshot.get('display_name'), snapshot.get('display_description'), **kwargs) return self._view_builder.detail( req, dict(new_snapshot.items())) class ShareSnapshotsController(ShareSnapshotMixin, wsgi.Controller, wsgi.AdminActionsMixin): """The Share Snapshots API controller for the OpenStack API.""" resource_name = 'share_snapshot' _view_builder_class = snapshot_views.ViewBuilder def __init__(self): super(ShareSnapshotsController, self).__init__() self.share_api = share.API() @wsgi.action('os-reset_status') def snapshot_reset_status_legacy(self, req, id, body): return self._reset_status(req, id, body) @wsgi.action('os-force_delete') def snapshot_force_delete_legacy(self, req, id, body): return self._force_delete(req, id, body) def create_resource(): return wsgi.Resource(ShareSnapshotsController()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/api/v1/share_types_extra_specs.py0000664000175000017500000001664700000000000022734 0ustar00zuulzuul00000000000000# Copyright (c) 2011 Zadara Storage Inc. # Copyright (c) 2011 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from http import client as http_client import webob from manila.api import common from manila.api.openstack import wsgi from manila.common import constants from manila import db from manila import exception from manila.i18n import _ from manila import rpc from manila.share import share_types class ShareTypeExtraSpecsController(wsgi.Controller): """The share type extra specs API controller for the OpenStack API.""" resource_name = 'share_types_extra_spec' def _get_extra_specs(self, context, type_id): extra_specs = db.share_type_extra_specs_get(context, type_id) specs_dict = {} for key, value in extra_specs.items(): specs_dict[key] = value return dict(extra_specs=specs_dict) def _check_type(self, context, type_id): try: share_types.get_share_type(context, type_id) except exception.NotFound as ex: raise webob.exc.HTTPNotFound(explanation=ex.msg) def _verify_extra_specs(self, extra_specs, verify_all_required=True): if verify_all_required: try: share_types.get_valid_required_extra_specs(extra_specs) except exception.InvalidExtraSpec as e: raise webob.exc.HTTPBadRequest(explanation=e.msg) def is_valid_string(v): return isinstance(v, str) and len(v) in range(1, 256) def is_valid_extra_spec(k, v): valid_extra_spec_key = is_valid_string(k) valid_type = is_valid_string(v) or isinstance(v, bool) valid_required_extra_spec = ( share_types.is_valid_required_extra_spec(k, v) in (None, True)) valid_optional_extra_spec = ( share_types.is_valid_optional_extra_spec(k, v) in (None, True)) return (valid_extra_spec_key and valid_type and valid_required_extra_spec and valid_optional_extra_spec) for k, v in extra_specs.items(): if is_valid_string(k) and isinstance(v, dict): self._verify_extra_specs( v, verify_all_required=verify_all_required) elif not is_valid_extra_spec(k, v): expl = _('Invalid extra_spec: %(key)s: %(value)s') % { 'key': k, 'value': v } raise webob.exc.HTTPBadRequest(explanation=expl) @wsgi.Controller.authorize def index(self, req, type_id): """Returns the list of extra specs for a given share type.""" context = req.environ['manila.context'] self._check_type(context, type_id) return self._get_extra_specs(context, type_id) @wsgi.Controller.authorize def create(self, req, type_id, body=None): context = req.environ['manila.context'] if not self.is_valid_body(body, 'extra_specs'): raise webob.exc.HTTPBadRequest() self._check_type(context, type_id) specs = body['extra_specs'] try: self._verify_extra_specs(specs, False) except exception.InvalidExtraSpec as e: raise webob.exc.HTTPBadRequest(e.msg) self._check_key_names(specs.keys()) specs = share_types.sanitize_extra_specs(specs) db.share_type_extra_specs_update_or_create(context, type_id, specs) notifier_info = dict(type_id=type_id, specs=specs) notifier = rpc.get_notifier('shareTypeExtraSpecs') notifier.info(context, 'share_type_extra_specs.create', notifier_info) return body @wsgi.Controller.authorize def update(self, req, type_id, id, body=None): context = req.environ['manila.context'] if not body: expl = _('Request body empty') raise webob.exc.HTTPBadRequest(explanation=expl) self._check_type(context, type_id) if id not in body: expl = _('Request body and URI mismatch') raise webob.exc.HTTPBadRequest(explanation=expl) if len(body) > 1: expl = _('Request body contains too many items') raise webob.exc.HTTPBadRequest(explanation=expl) self._verify_extra_specs(body, False) specs = share_types.sanitize_extra_specs(body) db.share_type_extra_specs_update_or_create(context, type_id, specs) notifier_info = dict(type_id=type_id, id=id) notifier = rpc.get_notifier('shareTypeExtraSpecs') notifier.info(context, 'share_type_extra_specs.update', notifier_info) return specs @wsgi.Controller.authorize def show(self, req, type_id, id): """Return a single extra spec item.""" context = req.environ['manila.context'] self._check_type(context, type_id) specs = self._get_extra_specs(context, type_id) if id in specs['extra_specs']: return {id: specs['extra_specs'][id]} else: raise webob.exc.HTTPNotFound() @wsgi.Controller.api_version('1.0', '2.23') @wsgi.Controller.authorize def delete(self, req, type_id, id): """Deletes an existing extra spec.""" context = req.environ['manila.context'] self._check_type(context, type_id) if id == constants.ExtraSpecs.SNAPSHOT_SUPPORT: msg = _("Extra spec '%s' can't be deleted.") % id raise webob.exc.HTTPForbidden(explanation=msg) return self._delete(req, type_id, id) @wsgi.Controller.api_version('2.24') # noqa @wsgi.Controller.authorize def delete(self, req, type_id, id): # pylint: disable=function-redefined # noqa F811 """Deletes an existing extra spec.""" context = req.environ['manila.context'] self._check_type(context, type_id) return self._delete(req, type_id, id) def _delete(self, req, type_id, id): """Deletes an existing extra spec.""" context = req.environ['manila.context'] if id in share_types.get_required_extra_specs(): msg = _("Extra spec '%s' can't be deleted.") % id raise webob.exc.HTTPForbidden(explanation=msg) try: db.share_type_extra_specs_delete(context, type_id, id) except exception.ShareTypeExtraSpecsNotFound as error: raise webob.exc.HTTPNotFound(explanation=error.msg) notifier_info = dict(type_id=type_id, id=id) notifier = rpc.get_notifier('shareTypeExtraSpecs') notifier.info(context, 'share_type_extra_specs.delete', notifier_info) return webob.Response(status_int=http_client.ACCEPTED) def _check_key_names(self, keys): if not common.validate_key_names(keys): expl = _('Key names can only contain alphanumeric characters, ' 'underscores, periods, colons and hyphens.') raise webob.exc.HTTPBadRequest(explanation=expl) def create_resource(): return wsgi.Resource(ShareTypeExtraSpecsController()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/api/v1/share_unmanage.py0000664000175000017500000001000000000000000020734 0ustar00zuulzuul00000000000000# Copyright 2015 Mirantis inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from http import client as http_client from oslo_log import log import webob from webob import exc from manila.api.openstack import wsgi from manila.common import constants from manila import exception from manila.i18n import _ from manila import share LOG = log.getLogger(__name__) class ShareUnmanageMixin(object): @wsgi.Controller.authorize("unmanage") def _unmanage(self, req, id, body=None, allow_dhss_true=False): """Unmanage a share.""" context = req.environ['manila.context'] LOG.info("Unmanage share with id: %s", id, context=context) try: share = self.share_api.get(context, id) if share.get('is_soft_deleted'): msg = _("Share '%s cannot be unmanaged, " "since it has been soft deleted.") % share['id'] raise exc.HTTPForbidden(explanation=msg) if share.get('has_replicas'): msg = _("Share %s has replicas. It cannot be unmanaged " "until all replicas are removed.") % share['id'] raise exc.HTTPConflict(explanation=msg) if (not allow_dhss_true and share['instance'].get('share_server_id')): msg = _("Operation 'unmanage' is not supported for shares " "that are created on top of share servers " "(created with share-networks).") raise exc.HTTPForbidden(explanation=msg) elif share['status'] in constants.TRANSITIONAL_STATUSES: msg = _("Share with transitional state can not be unmanaged. " "Share '%(s_id)s' is in '%(state)s' state.") % dict( state=share['status'], s_id=share['id']) raise exc.HTTPForbidden(explanation=msg) snapshots = self.share_api.db.share_snapshot_get_all_for_share( context, id) if snapshots: msg = _("Share '%(s_id)s' can not be unmanaged because it has " "'%(amount)s' dependent snapshot(s).") % { 's_id': id, 'amount': len(snapshots)} raise exc.HTTPForbidden(explanation=msg) filters = {'share_id': id} backups = self.share_api.db.share_backups_get_all(context, filters) if backups: msg = _("Share '%(s_id)s' can not be unmanaged because it has " "'%(amount)s' dependent backup(s).") % { 's_id': id, 'amount': len(backups)} raise exc.HTTPForbidden(explanation=msg) self.share_api.unmanage(context, share) except exception.NotFound as e: raise exc.HTTPNotFound(explanation=e.msg) except (exception.InvalidShare, exception.PolicyNotAuthorized) as e: raise exc.HTTPForbidden(explanation=e.msg) return webob.Response(status_int=http_client.ACCEPTED) class ShareUnmanageController(ShareUnmanageMixin, wsgi.Controller): """The Unmanage API controller for the OpenStack API.""" resource_name = "share" def __init__(self, *args, **kwargs): super(ShareUnmanageController, self).__init__(*args, **kwargs) self.share_api = share.API() @wsgi.Controller.api_version('1.0', '2.6') def unmanage(self, req, id): return self._unmanage(req, id) def create_resource(): return wsgi.Resource(ShareUnmanageController()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/api/v1/shares.py0000664000175000017500000010426100000000000017261 0ustar00zuulzuul00000000000000# Copyright 2013 NetApp # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """The shares api.""" import ast from http import client as http_client from oslo_log import log from oslo_utils import strutils from oslo_utils import uuidutils import webob from webob import exc from manila.api import common from manila.api.openstack import api_version_request as api_version from manila.api.openstack import wsgi from manila.api.views import share_accesses as share_access_views from manila.api.views import shares as share_views from manila.common import constants from manila import db from manila import exception from manila.i18n import _ from manila.lock import api as resource_locks from manila import share from manila.share import share_types from manila import utils LOG = log.getLogger(__name__) class ShareMixin(object): """Mixin class for Share API Controllers.""" def _update(self, *args, **kwargs): db.share_update(*args, **kwargs) def _get(self, *args, **kwargs): return self.share_api.get(*args, **kwargs) def _delete(self, *args, **kwargs): return self.share_api.delete(*args, **kwargs) @wsgi.Controller.authorize('get') def show(self, req, id): """Return data about the given share.""" context = req.environ['manila.context'] try: share = self.share_api.get(context, id) except exception.NotFound: raise exc.HTTPNotFound() return self._view_builder.detail(req, share) @wsgi.Controller.authorize def delete(self, req, id): """Delete a share.""" context = req.environ['manila.context'] LOG.info("Delete share with id: %s", id, context=context) try: share = self.share_api.get(context, id) # NOTE(ameade): If the share is in a share group, we require its # id be specified as a param. sg_id_key = 'share_group_id' if share.get(sg_id_key): share_group_id = req.params.get(sg_id_key) if not share_group_id: msg = _("Must provide '%s' as a request " "parameter when deleting a share in a share " "group.") % sg_id_key raise exc.HTTPBadRequest(explanation=msg) elif share_group_id != share.get(sg_id_key): msg = _("The specified '%s' does not match " "the share group id of the share.") % sg_id_key raise exc.HTTPBadRequest(explanation=msg) self.share_api.delete(context, share) except exception.NotFound: raise exc.HTTPNotFound() except exception.InvalidShare as e: raise exc.HTTPForbidden(explanation=e.msg) except exception.Conflict as e: raise exc.HTTPConflict(explanation=e.msg) return webob.Response(status_int=http_client.ACCEPTED) @wsgi.Controller.authorize("get_all") def index(self, req): """Returns a summary list of shares.""" req.GET.pop('export_location_id', None) req.GET.pop('export_location_path', None) req.GET.pop('name~', None) req.GET.pop('description~', None) req.GET.pop('description', None) req.GET.pop('with_count', None) return self._get_shares(req, is_detail=False) @wsgi.Controller.authorize("get_all") def detail(self, req): """Returns a detailed list of shares.""" req.GET.pop('export_location_id', None) req.GET.pop('export_location_path', None) req.GET.pop('name~', None) req.GET.pop('description~', None) req.GET.pop('description', None) req.GET.pop('with_count', None) return self._get_shares(req, is_detail=True) def _get_shares(self, req, is_detail): """Returns a list of shares, transformed through view builder.""" context = req.environ['manila.context'] common._validate_pagination_query(req) search_opts = {} search_opts.update(req.GET) # Remove keys that are not related to share attrs sort_key = search_opts.pop('sort_key', 'created_at') sort_dir = search_opts.pop('sort_dir', 'desc') show_count = False if 'with_count' in search_opts: show_count = utils.get_bool_from_api_params( 'with_count', search_opts) search_opts.pop('with_count') if 'is_soft_deleted' in search_opts: is_soft_deleted = utils.get_bool_from_api_params( 'is_soft_deleted', search_opts) search_opts['is_soft_deleted'] = is_soft_deleted # Deserialize dicts if 'metadata' in search_opts: search_opts['metadata'] = ast.literal_eval(search_opts['metadata']) if 'extra_specs' in search_opts: search_opts['extra_specs'] = ast.literal_eval( search_opts['extra_specs']) # NOTE(vponomaryov): Manila stores in DB key 'display_name', but # allows to use both keys 'name' and 'display_name'. It is leftover # from Cinder v1 and v2 APIs. if 'name' in search_opts: search_opts['display_name'] = search_opts.pop('name') if 'description' in search_opts: search_opts['display_description'] = search_opts.pop( 'description') # like filter for key, db_key in (('name~', 'display_name~'), ('description~', 'display_description~')): if key in search_opts: search_opts[db_key] = search_opts.pop(key) if sort_key == 'name': sort_key = 'display_name' common.remove_invalid_options( context, search_opts, self._get_share_search_options()) total_count = None if show_count: count, shares = self.share_api.get_all_with_count( context, search_opts=search_opts, sort_key=sort_key, sort_dir=sort_dir) total_count = count else: shares = self.share_api.get_all( context, search_opts=search_opts, sort_key=sort_key, sort_dir=sort_dir) if is_detail: shares = self._view_builder.detail_list(req, shares, total_count) else: shares = self._view_builder.summary_list(req, shares, total_count) return shares def _get_share_search_options(self): """Return share search options allowed by non-admin.""" # NOTE(vponomaryov): share_server_id depends on policy, allow search # by it for non-admins in case policy changed. # Also allow search by extra_specs in case policy # for it allows non-admin access. return ( 'display_name', 'status', 'share_server_id', 'volume_type_id', 'share_type_id', 'snapshot_id', 'host', 'share_network_id', 'is_public', 'metadata', 'extra_specs', 'sort_key', 'sort_dir', 'share_group_id', 'share_group_snapshot_id', 'export_location_id', 'export_location_path', 'display_name~', 'display_description~', 'display_description', 'limit', 'offset', 'is_soft_deleted', 'mount_point_name') @wsgi.Controller.authorize def update(self, req, id, body): """Update a share.""" context = req.environ['manila.context'] if not body or 'share' not in body: raise exc.HTTPUnprocessableEntity() share_data = body['share'] valid_update_keys = ( 'display_name', 'display_description', 'is_public', ) update_dict = {key: share_data[key] for key in valid_update_keys if key in share_data} common.check_display_field_length( update_dict.get('display_name'), 'display_name') common.check_display_field_length( update_dict.get('display_description'), 'display_description') try: share = self.share_api.get(context, id) except exception.NotFound: raise exc.HTTPNotFound() if share.get('is_soft_deleted'): msg = _("Share '%s cannot be updated, " "since it has been soft deleted.") % share['id'] raise exc.HTTPForbidden(explanation=msg) update_dict = common.validate_public_share_policy( context, update_dict, api='update') share = self.share_api.update(context, share, update_dict) share.update(update_dict) return self._view_builder.detail(req, share) def create(self, req, body): # Remove share group attributes body.get('share', {}).pop('share_group_id', None) share = self._create(req, body) return share @wsgi.Controller.authorize('create') def _create(self, req, body, check_create_share_from_snapshot_support=False, check_availability_zones_extra_spec=False, scheduler_hints=None, encryption_key_ref=None): """Creates a new share.""" context = req.environ['manila.context'] if not self.is_valid_body(body, 'share'): raise exc.HTTPUnprocessableEntity() share = body['share'] share = common.validate_public_share_policy(context, share) # NOTE(rushiagr): Manila API allows 'name' instead of 'display_name'. if share.get('name'): share['display_name'] = share.get('name') common.check_display_field_length(share['display_name'], 'name') del share['name'] # NOTE(rushiagr): Manila API allows 'description' instead of # 'display_description'. if share.get('description'): share['display_description'] = share.get('description') common.check_display_field_length( share['display_description'], 'description') del share['description'] size = share['size'] share_proto = share['share_proto'].upper() msg = ("Create %(share_proto)s share of %(size)s GB" % {'share_proto': share_proto, 'size': size}) LOG.info(msg, context=context) availability_zone_id = None availability_zone = share.get('availability_zone') if availability_zone: try: availability_zone_db = db.availability_zone_get( context, availability_zone) availability_zone_id = availability_zone_db.id availability_zone = availability_zone_db.name except exception.AvailabilityZoneNotFound as e: raise exc.HTTPNotFound(explanation=e.msg) share_group_id = share.get('share_group_id') if share_group_id: try: share_group = db.share_group_get(context, share_group_id) except exception.ShareGroupNotFound as e: raise exc.HTTPNotFound(explanation=e.msg) sg_az_id = share_group['availability_zone_id'] if availability_zone and availability_zone_id != sg_az_id: msg = _("Share cannot have AZ ('%(s_az)s') different than " "share group's one (%(sg_az)s).") % { 's_az': availability_zone_id, 'sg_az': sg_az_id} raise exception.InvalidInput(msg) availability_zone = db.availability_zone_get( context, sg_az_id).name kwargs = { 'availability_zone': availability_zone, 'metadata': share.get('metadata'), 'is_public': share.get('is_public', False), 'share_group_id': share_group_id, } snapshot_id = share.get('snapshot_id') if snapshot_id: snapshot = self.share_api.get_snapshot(context, snapshot_id) else: snapshot = None kwargs['snapshot_id'] = snapshot_id share_network_id = share.get('share_network_id') parent_share_type = {} if snapshot: # Need to check that share_network_id from snapshot's # parents share equals to share_network_id from args. # If share_network_id is empty then update it with # share_network_id of parent share. parent_share = self.share_api.get(context, snapshot['share_id']) parent_share_net_id = parent_share.instance['share_network_id'] parent_share_type = share_types.get_share_type( context, parent_share.instance['share_type_id']) if share_network_id: if share_network_id != parent_share_net_id: msg = ("Share network ID should be the same as snapshot's" " parent share's or empty") raise exc.HTTPBadRequest(explanation=msg) elif parent_share_net_id: share_network_id = parent_share_net_id # Verify that share can be created from a snapshot if (check_create_share_from_snapshot_support and not parent_share['create_share_from_snapshot_support']): msg = (_("A new share may not be created from snapshot '%s', " "because the snapshot's parent share does not have " "that capability.") % snapshot_id) LOG.error(msg) raise exc.HTTPBadRequest(explanation=msg) if share_network_id: try: share_network = self.share_api.get_share_network( context, share_network_id) except exception.ShareNetworkNotFound as e: raise exc.HTTPNotFound(explanation=e.msg) common.check_share_network_is_active(share_network) if availability_zone_id: subnets = ( db.share_network_subnets_get_all_by_availability_zone_id( context, share_network_id, availability_zone_id=availability_zone_id)) if not subnets: msg = _("A share network subnet was not found for the " "requested availability zone.") raise exc.HTTPBadRequest(explanation=msg) kwargs['az_request_multiple_subnet_support_map'] = { availability_zone_id: len(subnets) > 1, } display_name = share.get('display_name') display_description = share.get('display_description') if 'share_type' in share and 'volume_type' in share: msg = 'Cannot specify both share_type and volume_type' raise exc.HTTPBadRequest(explanation=msg) req_share_type = share.get('share_type', share.get('volume_type')) share_type = None if req_share_type: try: if not uuidutils.is_uuid_like(req_share_type): share_type = share_types.get_share_type_by_name( context, req_share_type) else: share_type = share_types.get_share_type( context, req_share_type) except (exception.ShareTypeNotFound, exception.ShareTypeNotFoundByName): msg = _("Share type not found.") raise exc.HTTPNotFound(explanation=msg) except exception.InvalidShareType as e: raise exc.HTTPBadRequest(explanation=e.message) elif not snapshot: def_share_type = share_types.get_default_share_type() if def_share_type: share_type = def_share_type # Only use in create share feature. Create share from snapshot # and create share with share group features not # need this check. if share_type and share_type.get('extra_specs'): dhss = (strutils.bool_from_string( share_type.get('extra_specs').get( 'driver_handles_share_servers'))) else: dhss = False if (not share_network_id and not snapshot and not share_group_id and dhss): msg = _('Share network must be set when the ' 'driver_handles_share_servers is true.') raise exc.HTTPBadRequest(explanation=msg) type_chosen = share_type or parent_share_type if type_chosen and check_availability_zones_extra_spec: type_azs = type_chosen.get( 'extra_specs', {}).get('availability_zones', '') type_azs = type_azs.split(',') if type_azs else [] kwargs['availability_zones'] = type_azs if (availability_zone and type_azs and availability_zone not in type_azs): msg = _("Share type %(type)s is not supported within the " "availability zone chosen %(az)s.") type_chosen = ( req_share_type or "%s (from source snapshot)" % ( parent_share_type.get('name') or parent_share_type.get('id')) ) payload = {'type': type_chosen, 'az': availability_zone} raise exc.HTTPBadRequest(explanation=msg % payload) if share_type and encryption_key_ref: type_enc = share_type.get( 'extra_specs', {}).get('encryption_support') if type_enc not in constants.SUPPORTED_ENCRYPTION_TYPES: msg = _("Share type %(type)s extra-specs 'encryption_support' " "is missing valid value e.g. share, share_server.") payload = {'type': share_type} raise exc.HTTPBadRequest(explanation=msg % payload) if not dhss: msg = _("Share type %(type)s must set dhss=True for share " "encryption.") payload = {'type': share_type} raise exc.HTTPBadRequest(explanation=msg % payload) if share_type: kwargs['share_type'] = share_type if share_network_id: kwargs['share_network_id'] = share_network_id kwargs['scheduler_hints'] = scheduler_hints kwargs['encryption_key_ref'] = encryption_key_ref if req.api_version_request >= api_version.APIVersionRequest("2.84"): kwargs['mount_point_name'] = share.pop('mount_point_name', None) new_share = self.share_api.create(context, share_proto, size, display_name, display_description, **kwargs) return self._view_builder.detail(req, new_share) @staticmethod def _any_instance_has_errored_rules(share): for instance in share['instances']: access_rules_status = instance['access_rules_status'] if access_rules_status == constants.SHARE_INSTANCE_RULES_ERROR: return True return False def _create_access_locks( self, context, access, lock_deletion=False, lock_visibility=False, lock_reason=None): """Creates locks for access rules and rollback if it fails.""" # We must populate project_id and user_id in the access object, as this # is not in this entity access['project_id'] = context.project_id access['user_id'] = context.user_id def raise_lock_failed(resource_id, lock_action, resource_type='access rule'): word_mapping = { constants.RESOURCE_ACTION_SHOW: 'visibility', constants.RESOURCE_ACTION_DELETE: 'deletion' } msg = _("Failed to lock the %(action)s of the %(resource_type)s " "%(resource_id)s.") % { 'action': word_mapping[lock_action], 'resource_id': resource_id, 'resource_type': resource_type } raise webob.exc.HTTPBadRequest(explanation=msg) access_deletion_lock = {} share_deletion_lock = {} if lock_deletion: try: access_deletion_lock = self.resource_locks_api.create( context, resource_id=access['id'], resource_type='access_rule', resource_action=constants.RESOURCE_ACTION_DELETE, resource=access, lock_reason=lock_reason) except Exception: raise_lock_failed( access['id'], constants.RESOURCE_ACTION_DELETE ) try: share_lock_reason = ( constants.SHARE_LOCKED_BY_ACCESS_LOCK_REASON % { 'lock_id': access_deletion_lock['id'] } ) share_deletion_lock = self.resource_locks_api.create( context, resource_id=access['share_id'], resource_type='share', resource_action=constants.RESOURCE_ACTION_DELETE, lock_reason=share_lock_reason) except Exception: self.resource_locks_api.delete( context, access_deletion_lock['id']) raise_lock_failed( access['share_id'], constants.RESOURCE_ACTION_DELETE, resource_type='share' ) if lock_visibility: try: self.resource_locks_api.create( context, resource_id=access['id'], resource_type='access_rule', resource_action=constants.RESOURCE_ACTION_SHOW, resource=access, lock_reason=lock_reason) except Exception: # If a deletion lock was placed and the visibility wasn't, # we should rollback the deletion lock. if access_deletion_lock: self.resource_locks_api.delete( context, access_deletion_lock['id']) if share_deletion_lock: self.resource_locks_api.delete( context, share_deletion_lock['id']) raise_lock_failed(access['id'], constants.RESOURCE_ACTION_SHOW) @wsgi.Controller.authorize('allow_access') def _allow_access(self, req, id, body, enable_ceph=False, allow_on_error_status=False, enable_ipv6=False, enable_metadata=False, allow_on_error_state=False, lock_visibility=False, lock_deletion=False, lock_reason=None): """Add share access rule.""" context = req.environ['manila.context'] access_data = body.get('allow_access', body.get('os-allow_access')) if not enable_metadata: access_data.pop('metadata', None) share = self.share_api.get(context, id) if share.get('is_soft_deleted'): msg = _("Cannot allow access for share '%s' " "since it has been soft deleted.") % id raise exc.HTTPForbidden(explanation=msg) share_network_id = share.get('share_network_id') if share_network_id: share_network = db.share_network_get(context, share_network_id) common.check_share_network_is_active(share_network) if (not allow_on_error_status and self._any_instance_has_errored_rules(share)): msg = _("Access rules cannot be added while the share or any of " "its replicas or migration copies has its " "access_rules_status set to %(instance_rules_status)s. " "Deny any rules in %(rule_state)s state and try " "again.") % { 'instance_rules_status': constants.SHARE_INSTANCE_RULES_ERROR, 'rule_state': constants.ACCESS_STATE_ERROR, } raise webob.exc.HTTPBadRequest(explanation=msg) if not (lock_visibility or lock_deletion) and lock_reason: msg = _("Lock reason can only be specified when locking the " "visibility or the deletion of an access rule.") raise webob.exc.HTTPBadRequest(explanation=msg) access_type = access_data['access_type'] access_to = access_data['access_to'] common.validate_access(access_type=access_type, access_to=access_to, enable_ceph=enable_ceph, enable_ipv6=enable_ipv6) try: access = self.share_api.allow_access( context, share, access_type, access_to, access_data.get('access_level'), access_data.get('metadata'), allow_on_error_state) except exception.ShareAccessExists as e: raise webob.exc.HTTPBadRequest(explanation=e.msg) except exception.InvalidMetadata as error: raise exc.HTTPBadRequest(explanation=error.msg) except exception.InvalidMetadataSize as error: raise exc.HTTPBadRequest(explanation=error.msg) if lock_deletion or lock_visibility: self._create_access_locks( context, access, lock_deletion=lock_deletion, lock_visibility=lock_visibility, lock_reason=lock_reason) return self._access_view_builder.view(req, access) def _check_for_access_rule_locks(self, context, access_data, access_id, share_id): """Fetches locks for access rules and attempts deleting them.""" # ensure the requester is asking to remove the restrictions of the rule unrestrict = access_data.get('unrestrict', False) search_opts = { 'resource_id': access_id, 'resource_action': constants.RESOURCE_ACTION_DELETE, 'all_projects': True, } locks, locks_count = ( self.resource_locks_api.get_all( context.elevated(), search_opts=search_opts, show_count=True) or [] ) # no locks placed, nothing to do if not locks: return def raise_rule_is_locked(share_id, unrestrict=False): msg = _( "Cannot deny access for share '%s' since it has been " "locked. Please remove the locks and retry the " "operation") % share_id if unrestrict: msg = _( "Unable to drop access rule restrictions that are not " "placed by you.") raise exc.HTTPForbidden(explanation=msg) if locks_count and not unrestrict: raise_rule_is_locked(share_id) non_deletable_locks = [] for lock in locks: try: self.resource_locks_api.ensure_context_can_delete_lock( context, lock['id']) except (exception.NotAuthorized, exception.ResourceLockNotFound): # If it is not found, then it means that the context doesn't # have access to this resource and should be denied. non_deletable_locks.append(lock) if non_deletable_locks: raise_rule_is_locked(share_id, unrestrict=unrestrict) @wsgi.Controller.authorize('deny_access') def _deny_access(self, req, id, body, allow_on_error_state=False): """Remove share access rule.""" context = req.environ['manila.context'] access_data = body.get('deny_access', body.get('os-deny_access')) access_id = access_data['access_id'] self._check_for_access_rule_locks(context, access_data, access_id, id) share = self.share_api.get(context, id) if share.get('is_soft_deleted'): msg = _("Cannot deny access for share '%s' " "since it has been soft deleted.") % id raise exc.HTTPForbidden(explanation=msg) share_network_id = share.get('share_network_id', None) if share_network_id: share_network = db.share_network_get(context, share_network_id) common.check_share_network_is_active(share_network) try: access = self.share_api.access_get(context, access_id) if access.share_id != id: raise exception.NotFound() share = self.share_api.get(context, id) except exception.NotFound as error: raise webob.exc.HTTPNotFound(explanation=error.message) self.share_api.deny_access(context, share, access, allow_on_error_state) return webob.Response(status_int=http_client.ACCEPTED) def _access_list(self, req, id, body): """List share access rules.""" context = req.environ['manila.context'] share = self.share_api.get(context, id) access_rules = self.share_api.access_get_all(context, share) return self._access_view_builder.list_view(req, access_rules) @wsgi.Controller.authorize("extend") def _extend(self, req, id, body): """Extend size of a share.""" context = req.environ['manila.context'] share, size, force = self._get_valid_extend_parameters( context, id, body, 'os-extend') if share.get('is_soft_deleted'): msg = _("Cannot extend share '%s' " "since it has been soft deleted.") % id raise exc.HTTPForbidden(explanation=msg) try: self.share_api.extend(context, share, size, force=force) except (exception.InvalidInput, exception.InvalidShare) as e: raise webob.exc.HTTPBadRequest(explanation=str(e)) except exception.ShareSizeExceedsAvailableQuota as e: raise webob.exc.HTTPForbidden(explanation=e.message) return webob.Response(status_int=http_client.ACCEPTED) @wsgi.Controller.authorize("shrink") def _shrink(self, req, id, body): """Shrink size of a share.""" context = req.environ['manila.context'] share, size = self._get_valid_shrink_parameters( context, id, body, 'os-shrink') if share.get('is_soft_deleted'): msg = _("Cannot shrink share '%s' " "since it has been soft deleted.") % id raise exc.HTTPForbidden(explanation=msg) try: self.share_api.shrink(context, share, size) except (exception.InvalidInput, exception.InvalidShare) as e: raise webob.exc.HTTPBadRequest(explanation=str(e)) return webob.Response(status_int=http_client.ACCEPTED) def _get_valid_extend_parameters(self, context, id, body, action): try: share = self.share_api.get(context, id) except exception.NotFound as e: raise webob.exc.HTTPNotFound(explanation=e.message) try: size = int(body.get(action, body.get('extend'))['new_size']) except (KeyError, ValueError, TypeError): msg = _("New share size must be specified as an integer.") raise webob.exc.HTTPBadRequest(explanation=msg) # force is True means share extend will extend directly, is False # means will go through scheduler. Default value is False, try: force = strutils.bool_from_string(body.get( action, body.get('extend'))['force'], strict=True) except KeyError: force = False except (ValueError, TypeError): msg = (_('Invalid boolean force : %(value)s') % {'value': body.get('extend')['force']}) raise webob.exc.HTTPBadRequest(explanation=msg) return share, size, force def _get_valid_shrink_parameters(self, context, id, body, action): try: share = self.share_api.get(context, id) except exception.NotFound as e: raise webob.exc.HTTPNotFound(explanation=e.message) try: size = int(body.get(action, body.get('shrink'))['new_size']) except (KeyError, ValueError, TypeError): msg = _("New share size must be specified as an integer.") raise webob.exc.HTTPBadRequest(explanation=msg) return share, size class ShareController(wsgi.Controller, ShareMixin, wsgi.AdminActionsMixin): """The Shares API v1 controller for the OpenStack API.""" resource_name = 'share' _view_builder_class = share_views.ViewBuilder def __init__(self): super(ShareController, self).__init__() self.share_api = share.API() self.resource_locks_api = resource_locks.API() self._access_view_builder = share_access_views.ViewBuilder() @wsgi.action('os-reset_status') def share_reset_status(self, req, id, body): """Reset status of a share.""" return self._reset_status(req, id, body) @wsgi.action('os-force_delete') def share_force_delete(self, req, id, body): """Delete a share, bypassing the check for status.""" return self._force_delete(req, id, body) @wsgi.action('os-allow_access') def allow_access(self, req, id, body): """Add share access rule.""" return self._allow_access(req, id, body) @wsgi.action('os-deny_access') def deny_access(self, req, id, body): """Remove share access rule.""" return self._deny_access(req, id, body) @wsgi.action('os-access_list') def access_list(self, req, id, body): """List share access rules.""" return self._access_list(req, id, body) @wsgi.action('os-extend') def extend(self, req, id, body): """Extend size of a share.""" return self._extend(req, id, body) @wsgi.action('os-shrink') def shrink(self, req, id, body): """Shrink size of a share.""" return self._shrink(req, id, body) def create_resource(): return wsgi.Resource(ShareController()) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315601.8376727 manila-21.0.0/manila/api/v2/0000775000175000017500000000000000000000000015417 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/api/v2/__init__.py0000664000175000017500000000000000000000000017516 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/api/v2/availability_zones.py0000664000175000017500000000515400000000000021666 0ustar00zuulzuul00000000000000# Copyright (c) 2013 OpenStack Foundation # Copyright (c) 2015 Mirantis inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from manila.api.openstack import wsgi from manila.api.schemas import availability_zones as schema from manila.api import validation from manila.api.views import availability_zones as availability_zones_views from manila import db class AvailabilityZoneMixin(object): """The Availability Zone API controller common logic. Mixin class that should be inherited by Availability Zone API controllers, which are used for different API URLs and microversions. """ resource_name = "availability_zone" _view_builder_class = availability_zones_views.ViewBuilder @wsgi.Controller.authorize("index") def _index(self, req): """Describe all known availability zones.""" views = db.availability_zone_get_all(req.environ['manila.context']) return self._view_builder.detail_list(views) @validation.validated class AvailabilityZoneControllerLegacy(AvailabilityZoneMixin, wsgi.Controller): """Deprecated Availability Zone API controller. Used by legacy API v1 and v2 microversions from 2.0 to 2.6. Registered under deprecated API URL 'os-availability-zone'. """ @wsgi.Controller.api_version('1.0', '2.6') @validation.request_query_schema(schema.index_request_query) @validation.response_body_schema(schema.index_response_body) def index(self, req): return self._index(req) @validation.validated class AvailabilityZoneController(AvailabilityZoneMixin, wsgi.Controller): """Availability Zone API controller. Used only by API v2 starting from microversion 2.7. Registered under API URL 'availability-zones'. """ @wsgi.Controller.api_version('2.7') @validation.request_query_schema(schema.index_request_query) @validation.response_body_schema(schema.index_response_body) def index(self, req): return self._index(req) def create_resource_legacy(): return wsgi.Resource(AvailabilityZoneControllerLegacy()) def create_resource(): return wsgi.Resource(AvailabilityZoneController()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/api/v2/messages.py0000664000175000017500000001257200000000000017607 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """The messages API controller module. This module handles the following requests: GET /messages GET /messages/ DELETE /messages/ """ from http import client as http_client from oslo_utils import timeutils import webob from webob import exc from manila.api import common from manila.api.openstack import wsgi from manila.api.schemas import messages as schema from manila.api import validation from manila.api.views import messages as messages_view from manila import exception from manila.i18n import _ from manila.message import api as message_api MESSAGES_BASE_MICRO_VERSION = '2.37' MESSAGES_QUERY_BY_TIMESTAMP = '2.52' @validation.validated class MessagesController(wsgi.Controller): """The User Messages API controller for the OpenStack API.""" _view_builder_class = messages_view.ViewBuilder resource_name = 'message' def __init__(self): self.message_api = message_api.API() super(MessagesController, self).__init__() @wsgi.Controller.api_version(MESSAGES_BASE_MICRO_VERSION) @wsgi.Controller.authorize('get') @validation.request_query_schema(schema.show_request_query) @validation.response_body_schema(schema.show_response_body) def show(self, req, id): """Return the given message.""" context = req.environ['manila.context'] try: message = self.message_api.get(context, id) except exception.MessageNotFound as error: raise exc.HTTPNotFound(explanation=error.msg) return self._view_builder.detail(req, message) @wsgi.Controller.api_version(MESSAGES_BASE_MICRO_VERSION) @wsgi.Controller.authorize @wsgi.action("delete") @validation.response_body_schema(schema.delete_response_body) def delete(self, req, id): """Delete a message.""" context = req.environ['manila.context'] try: message = self.message_api.get(context, id) self.message_api.delete(context, message) except exception.MessageNotFound as error: raise exc.HTTPNotFound(explanation=error.msg) return webob.Response(status_int=http_client.NO_CONTENT) @wsgi.Controller.api_version(MESSAGES_BASE_MICRO_VERSION, '2.51') @wsgi.Controller.authorize('get_all') @validation.request_query_schema(schema.index_request_query) @validation.response_body_schema(schema.index_response_body) def index(self, req): """Returns a list of messages, transformed through view builder.""" context = req.environ['manila.context'] filters = req.params.copy() params = common.get_pagination_params(req) limit, offset = [params.get('limit'), params.get('offset')] sort_key, sort_dir = common.get_sort_params(filters) filters.pop('created_since', None) filters.pop('created_before', None) messages = self.message_api.get_all(context, search_opts=filters, limit=limit, offset=offset, sort_key=sort_key, sort_dir=sort_dir) return self._view_builder.index(req, messages) @wsgi.Controller.api_version(MESSAGES_QUERY_BY_TIMESTAMP) # noqa: F811 @wsgi.Controller.authorize('get_all') @validation.request_query_schema(schema.index_request_query_v252) @validation.response_body_schema(schema.index_response_body) def index(self, req): # pylint: disable=function-redefined # noqa F811 """Returns a list of messages, transformed through view builder.""" context = req.environ['manila.context'] filters = req.params.copy() params = common.get_pagination_params(req) limit, offset = [params.get('limit'), params.get('offset')] sort_key, sort_dir = common.get_sort_params(filters) for time_comparison_filter in ['created_since', 'created_before']: if time_comparison_filter in filters: time_str = filters.get(time_comparison_filter) try: parsed_time = timeutils.parse_isotime(time_str) except ValueError: msg = _('Invalid value specified for the query ' 'key: %s') % time_comparison_filter raise exc.HTTPBadRequest(explanation=msg) filters[time_comparison_filter] = parsed_time messages = self.message_api.get_all(context, search_opts=filters, limit=limit, offset=offset, sort_key=sort_key, sort_dir=sort_dir) return self._view_builder.index(req, messages) def create_resource(): return wsgi.Resource(MessagesController()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/api/v2/metadata.py0000664000175000017500000002457600000000000017567 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from webob import exc from manila.api import common from manila.api.openstack import wsgi from manila import db from manila import exception from manila.i18n import _ from manila import policy class MetadataController(object): """An abstract metadata controller resource.""" # From db, ensure it exists resource_get = { "share": "share_get", "share_snapshot": "share_snapshot_get", "share_network_subnet": "share_network_subnet_get", "share_export_location": "export_location_get_by_uuid", } resource_metadata_get = { "share": "share_metadata_get", "share_snapshot": "share_snapshot_metadata_get", "share_network_subnet": "share_network_subnet_metadata_get", "share_export_location": "export_location_metadata_get", } resource_metadata_get_item = { "share": "share_metadata_get_item", "share_snapshot": "share_snapshot_metadata_get_item", "share_network_subnet": "share_network_subnet_metadata_get_item", "share_export_location": "export_location_metadata_get_item", } resource_metadata_update = { "share": "share_metadata_update", "share_snapshot": "share_snapshot_metadata_update", "share_network_subnet": "share_network_subnet_metadata_update", "share_export_location": "export_location_metadata_update", } resource_metadata_update_item = { "share": "share_metadata_update_item", "share_snapshot": "share_snapshot_metadata_update_item", "share_network_subnet": "share_network_subnet_metadata_update_item", "share_export_location": "export_location_metadata_update_item", } resource_metadata_delete = { "share": "share_metadata_delete", "share_snapshot": "share_snapshot_metadata_delete", "share_network_subnet": "share_network_subnet_metadata_delete", "share_export_location": "export_location_metadata_delete", } resource_policy_get = { 'share': 'get', 'share_snapshot': 'get_snapshot', 'share_network_subnet': 'show', } def __init__(self): super(MetadataController, self).__init__() self.resource_name = None def _get_resource(self, context, resource_id, for_modification=False, parent_id=None): if self.resource_name in ['share', 'share_network_subnet', 'share_export_location']: # some resources don't have a "project_id" field (like # share_export_location or share_network_subnet), # and sometimes we want to retrieve "public" resources # (like shares), so avoid hard coding project_only=True in the # lookup where necessary kwargs = {} else: kwargs = {'project_only': True} try: get_res_method = getattr( db, self.resource_get[self.resource_name]) if parent_id is not None: kwargs["parent_id"] = parent_id res = get_res_method(context, resource_id, **kwargs) if self.resource_name not in ["share_export_location"]: get_policy = self.resource_policy_get[self.resource_name] # skip policy check for export locations if res.get('is_public') is False: authorized = policy.check_policy(context, self.resource_name, get_policy, res, do_raise=False) if not authorized: # Raising NotFound to prevent existence detection raise exception.NotFound() elif for_modification: # a public resource's metadata can be viewed, but not # modified by non owners policy.check_policy(context, self.resource_name, get_policy, res) except exception.NotFound: msg = _('%s not found.' % self.resource_name.capitalize()) raise exc.HTTPNotFound(explanation=msg) return res def _get_metadata(self, context, resource_id, parent_id=None): self._get_resource(context, resource_id, parent_id=parent_id) get_metadata_method = getattr( db, self.resource_metadata_get[self.resource_name]) result = get_metadata_method(context, resource_id) return result @wsgi.response(200) def _index_metadata(self, req, resource_id, parent_id=None): """Lists existing metadata.""" context = req.environ['manila.context'] metadata = self._get_metadata(context, resource_id, parent_id=parent_id) return {'metadata': metadata} @wsgi.response(200) def _create_metadata(self, req, resource_id, body, parent_id=None): """Returns the new metadata item created.""" context = req.environ['manila.context'] try: metadata = body['metadata'] common.check_metadata_properties(metadata) except (KeyError, TypeError): msg = _("Malformed request body") raise exc.HTTPBadRequest(explanation=msg) except exception.InvalidMetadata as error: raise exc.HTTPBadRequest(explanation=error.msg) except exception.InvalidMetadataSize as error: raise exc.HTTPBadRequest(explanation=error.msg) self._get_resource(context, resource_id, for_modification=True, parent_id=parent_id) create_metadata_method = getattr( db, self.resource_metadata_update[self.resource_name]) result = create_metadata_method(context, resource_id, metadata, delete='False') return {'metadata': result} def _update_metadata_item(self, req, resource_id, body, key, parent_id=None): """Updates the specified metadata item.""" context = req.environ['manila.context'] try: meta_item = body['metadata'] common.check_metadata_properties(meta_item) except (TypeError, KeyError): expl = _('Malformed request body') raise exc.HTTPBadRequest(explanation=expl) except exception.InvalidMetadata as error: raise exc.HTTPBadRequest(explanation=error.msg) except exception.InvalidMetadataSize as error: raise exc.HTTPBadRequest(explanation=error.msg) if key not in meta_item: expl = _('Request body and URI mismatch') raise exc.HTTPBadRequest(explanation=expl) if len(meta_item) > 1: expl = _('Request body contains too many items') raise exc.HTTPBadRequest(explanation=expl) self._get_resource(context, resource_id, for_modification=True, parent_id=parent_id) update_metadata_item_method = getattr( db, self.resource_metadata_update_item[self.resource_name]) result = update_metadata_item_method(context, resource_id, meta_item) return {'metadata': result} @wsgi.response(200) def _update_all_metadata(self, req, resource_id, body, parent_id=None): """Deletes existing metadata, and returns the updated metadata.""" context = req.environ['manila.context'] try: metadata = body['metadata'] common.check_metadata_properties(metadata) except (TypeError, KeyError): expl = _('Malformed request body') raise exc.HTTPBadRequest(explanation=expl) except exception.InvalidMetadata as error: raise exc.HTTPBadRequest(explanation=error.msg) except exception.InvalidMetadataSize as error: raise exc.HTTPBadRequest(explanation=error.msg) self._get_resource(context, resource_id, for_modification=True, parent_id=parent_id) meta_ref = self._get_metadata(context, resource_id, parent_id=parent_id) for key in meta_ref: delete_metadata_method = getattr( db, self.resource_metadata_delete[self.resource_name]) delete_metadata_method(context, resource_id, key) update_metadata_method = getattr( db, self.resource_metadata_update[self.resource_name]) new_metadata = update_metadata_method(context, resource_id, metadata, delete='False') return {'metadata': new_metadata} @wsgi.response(200) def _show_metadata(self, req, resource_id, key, parent_id=None): """Return metadata item.""" context = req.environ['manila.context'] self._get_resource(context, resource_id, for_modification=False, parent_id=parent_id) get_metadata_item_method = getattr( db, self.resource_metadata_get_item[self.resource_name]) item = get_metadata_item_method(context, resource_id, key) return {'meta': {key: item[key]}} @wsgi.response(200) def _delete_metadata(self, req, resource_id, key, parent_id=None): """Deletes existing metadata item.""" context = req.environ['manila.context'] self._get_resource(context, resource_id, for_modification=True, parent_id=parent_id) get_metadata_item_method = getattr( db, self.resource_metadata_get_item[self.resource_name]) get_metadata_item_method(context, resource_id, key) delete_metadata_method = getattr( db, self.resource_metadata_delete[self.resource_name]) delete_metadata_method(context, resource_id, key) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/api/v2/quota_class_sets.py0000664000175000017500000000662300000000000021354 0ustar00zuulzuul00000000000000# Copyright 2012 OpenStack LLC. # Copyright (c) 2015 Mirantis inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import webob from manila.api.openstack import wsgi from manila.api.views import quota_class_sets as quota_class_sets_views from manila import db from manila import exception from manila import quota QUOTAS = quota.QUOTAS class QuotaClassSetsMixin(object): """The Quota Class Sets API controller common logic. Mixin class that should be inherited by Quota Class Sets API controllers, which are used for different API URLs and microversions. """ resource_name = "quota_class_set" _view_builder_class = quota_class_sets_views.ViewBuilder @wsgi.Controller.authorize("show") def _show(self, req, id): context = req.environ['manila.context'] try: db.authorize_quota_class_context(context, id) except exception.NotAuthorized: raise webob.exc.HTTPForbidden() return self._view_builder.detail_list( req, QUOTAS.get_class_quotas(context, id), id) @wsgi.Controller.authorize("update") def _update(self, req, id, body): context = req.environ['manila.context'] quota_class = id for key in body.get(self.resource_name, {}).keys(): if key in QUOTAS: value = int(body[self.resource_name][key]) try: db.quota_class_update(context, quota_class, key, value) except exception.QuotaClassNotFound: db.quota_class_create(context, quota_class, key, value) except exception.AdminRequired: raise webob.exc.HTTPForbidden() return self._view_builder.detail_list( req, QUOTAS.get_class_quotas(context, quota_class)) class QuotaClassSetsControllerLegacy(QuotaClassSetsMixin, wsgi.Controller): """Deprecated Quota Class Sets API controller. Used by legacy API v1 and v2 microversions from 2.0 to 2.6. Registered under deprecated API URL 'os-quota-class-sets'. """ @wsgi.Controller.api_version('1.0', '2.6') def show(self, req, id): return self._show(req, id) @wsgi.Controller.api_version('1.0', '2.6') def update(self, req, id, body): return self._update(req, id, body) class QuotaClassSetsController(QuotaClassSetsMixin, wsgi.Controller): """Quota Class Sets API controller. Used only by API v2 starting from microversion 2.7. Registered under API URL 'quota-class-sets'. """ @wsgi.Controller.api_version('2.7') def show(self, req, id): return self._show(req, id) @wsgi.Controller.api_version('2.7') def update(self, req, id, body): return self._update(req, id, body) def create_resource_legacy(): return wsgi.Resource(QuotaClassSetsControllerLegacy()) def create_resource(): return wsgi.Resource(QuotaClassSetsController()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/api/v2/quota_sets.py0000664000175000017500000003742300000000000020171 0ustar00zuulzuul00000000000000# Copyright 2011 OpenStack LLC. # Copyright (c) 2015 Mirantis inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from http import client as http_client from urllib import parse from oslo_log import log from oslo_utils import strutils import webob from manila.api.openstack import api_version_request as api_version from manila.api.openstack import wsgi from manila.api.views import quota_sets as quota_sets_views from manila import db from manila import exception from manila.i18n import _ from manila import quota QUOTAS = quota.QUOTAS LOG = log.getLogger(__name__) NON_QUOTA_KEYS = ('tenant_id', 'id', 'force', 'share_type') class QuotaSetsMixin(object): """The Quota Sets API controller common logic. Mixin class that should be inherited by Quota Sets API controllers, which are used for different API URLs and microversions. """ resource_name = "quota_set" _view_builder_class = quota_sets_views.ViewBuilder @staticmethod def _validate_quota_limit(limit, minimum, maximum, force_update): # NOTE: -1 is a flag value for unlimited if limit < -1: msg = _("Quota limit must be -1 or greater.") raise webob.exc.HTTPBadRequest(explanation=msg) if ((limit < minimum and not force_update) and (maximum != -1 or (maximum == -1 and limit != -1))): msg = _("Quota limit must be greater than %s.") % minimum raise webob.exc.HTTPBadRequest(explanation=msg) if maximum != -1 and limit > maximum and not force_update: msg = _("Quota limit must be less than %s.") % maximum raise webob.exc.HTTPBadRequest(explanation=msg) @staticmethod def _validate_user_id_and_share_type_args(user_id, share_type): if user_id and share_type: msg = _("'user_id' and 'share_type' values are mutually exclusive") raise webob.exc.HTTPBadRequest(explanation=msg) @staticmethod def _get_share_type_id(context, share_type_name_or_id): if share_type_name_or_id: share_type = db.share_type_get_by_name_or_id( context, share_type_name_or_id) if share_type: return share_type['id'] msg = _("Share type with name or id '%s' not found.") % ( share_type_name_or_id) raise webob.exc.HTTPNotFound(explanation=msg) @staticmethod def _ensure_share_type_arg_is_absent(req): params = parse.parse_qs(req.environ.get('QUERY_STRING', '')) share_type = params.get('share_type', [None])[0] if share_type: msg = _("'share_type' key is not supported by this microversion. " "Use 2.39 or greater microversion to be able " "to use 'share_type' quotas.") raise webob.exc.HTTPBadRequest(explanation=msg) @staticmethod def _ensure_specific_microversion_args_are_absent(body, keys, microversion): body = body.get('quota_set', body) for key in keys: if body.get(key): msg = (_("'%(key)s' key is not supported by this " "microversion. Use %(microversion)s or greater " "microversion to be able to use '%(key)s' quotas.") % {"key": key, "microversion": microversion}) raise webob.exc.HTTPBadRequest(explanation=msg) def _get_quotas(self, context, project_id, user_id=None, share_type_id=None, usages=False): self._validate_user_id_and_share_type_args(user_id, share_type_id) if user_id: values = QUOTAS.get_user_quotas( context, project_id, user_id, usages=usages) elif share_type_id: values = QUOTAS.get_share_type_quotas( context, project_id, share_type_id, usages=usages) else: values = QUOTAS.get_project_quotas( context, project_id, usages=usages) if usages: return values return {k: v['limit'] for k, v in values.items()} @wsgi.Controller.authorize("show") def _show(self, req, id, detail=False): context = req.environ['manila.context'] params = parse.parse_qs(req.environ.get('QUERY_STRING', '')) user_id = params.get('user_id', [None])[0] share_type = params.get('share_type', [None])[0] try: db.authorize_project_context(context, id) # _get_quotas use 'usages' to indicate whether retrieve additional # attributes, so pass detail to the argument. share_type_id = self._get_share_type_id(context, share_type) quotas = self._get_quotas( context, id, user_id, share_type_id, usages=detail) return self._view_builder.detail_list( req, quotas, id, share_type_id) except exception.NotAuthorized: raise webob.exc.HTTPForbidden() @wsgi.Controller.authorize('show') def _defaults(self, req, id): context = req.environ['manila.context'] return self._view_builder.detail_list( req, QUOTAS.get_defaults(context), id) @wsgi.Controller.authorize("update") def _update(self, req, id, body): body = body.get('quota_set', {}) if (body.get('gigabytes') is None and body.get('snapshots') is None and body.get('snapshot_gigabytes') is None and body.get('shares') is None and body.get('share_networks') is None and body.get('share_groups') is None and body.get('share_group_snapshots') is None and body.get('share_replicas') is None and body.get('replica_gigabytes') is None and body.get('per_share_gigabytes') is None and body.get('backups') is None and body.get('backup_gigabytes') is None and body.get('encryption_keys') is None): msg = _("Must supply at least one quota field to update.") raise webob.exc.HTTPBadRequest(explanation=msg) context = req.environ['manila.context'] project_id = id bad_keys = [] force_update = False params = parse.parse_qs(req.environ.get('QUERY_STRING', '')) user_id = params.get('user_id', [None])[0] share_type = params.get('share_type', [None])[0] self._validate_user_id_and_share_type_args(user_id, share_type) share_type_id = self._get_share_type_id(context, share_type) if share_type and body.get('share_groups', body.get('share_group_snapshots')): msg = _("Share type quotas cannot constrain share groups and " "share group snapshots.") raise webob.exc.HTTPBadRequest(explanation=msg) try: settable_quotas = QUOTAS.get_settable_quotas( context, project_id, user_id=user_id, share_type_id=share_type_id) except exception.NotAuthorized: raise webob.exc.HTTPForbidden() for key, value in body.items(): if key == 'share_networks' and share_type_id: msg = _("'share_networks' quota cannot be set for share type. " "It can be set only for project or user.") raise webob.exc.HTTPBadRequest(explanation=msg) elif (key not in QUOTAS and key not in NON_QUOTA_KEYS): bad_keys.append(key) elif key == 'force': force_update = strutils.bool_from_string(value) elif key not in NON_QUOTA_KEYS and value: try: value = int(value) except (ValueError, TypeError): msg = _("Quota '%(value)s' for %(key)s should be " "integer.") % {'value': value, 'key': key} LOG.warning(msg) raise webob.exc.HTTPBadRequest(explanation=msg) LOG.debug("Force update quotas: %s.", force_update) if len(bad_keys) > 0: msg = _("Bad key(s) %s in quota_set.") % ",".join(bad_keys) raise webob.exc.HTTPBadRequest(explanation=msg) try: quotas = self._get_quotas( context, id, user_id=user_id, share_type_id=share_type_id, usages=True) except exception.NotAuthorized: raise webob.exc.HTTPForbidden() for key, value in body.items(): if key in NON_QUOTA_KEYS or (not value and value != 0): continue # validate whether already used and reserved exceeds the new # quota, this check will be ignored if admin want to force # update try: value = int(value) except (ValueError, TypeError): msg = _("Quota '%(value)s' for %(key)s should be " "integer.") % {'value': value, 'key': key} LOG.warning(msg) raise webob.exc.HTTPBadRequest(explanation=msg) if force_update is False and value >= 0: quota_value = quotas.get(key) if quota_value and quota_value['limit'] >= 0: quota_used = (quota_value['in_use'] + quota_value['reserved']) LOG.debug("Quota %(key)s used: %(quota_used)s, " "value: %(value)s.", {'key': key, 'quota_used': quota_used, 'value': value}) if quota_used > value: msg = (_("Quota value %(value)s for %(key)s is " "smaller than already used and reserved " "%(quota_used)s.") % {'value': value, 'key': key, 'quota_used': quota_used}) raise webob.exc.HTTPBadRequest(explanation=msg) minimum = settable_quotas[key]['minimum'] maximum = settable_quotas[key]['maximum'] self._validate_quota_limit(value, minimum, maximum, force_update) try: db.quota_create( context, project_id, key, value, user_id=user_id, share_type_id=share_type_id) except exception.QuotaExists: db.quota_update( context, project_id, key, value, user_id=user_id, share_type_id=share_type_id) except exception.AdminRequired: raise webob.exc.HTTPForbidden() return self._view_builder.detail_list( req, self._get_quotas( context, id, user_id=user_id, share_type_id=share_type_id), share_type=share_type_id, ) @wsgi.Controller.authorize("delete") def _delete(self, req, id): context = req.environ['manila.context'] params = parse.parse_qs(req.environ.get('QUERY_STRING', '')) user_id = params.get('user_id', [None])[0] share_type = params.get('share_type', [None])[0] self._validate_user_id_and_share_type_args(user_id, share_type) try: db.authorize_project_context(context, id) if user_id: QUOTAS.destroy_all_by_project_and_user(context, id, user_id) elif share_type: share_type_id = self._get_share_type_id(context, share_type) QUOTAS.destroy_all_by_project_and_share_type( context, id, share_type_id) else: QUOTAS.destroy_all_by_project(context, id) return webob.Response(status_int=http_client.ACCEPTED) except exception.NotAuthorized: raise webob.exc.HTTPForbidden() class QuotaSetsControllerLegacy(QuotaSetsMixin, wsgi.Controller): """Deprecated Quota Sets API controller. Used by legacy API v1 and v2 microversions from 2.0 to 2.6. Registered under deprecated API URL 'os-quota-sets'. """ @wsgi.Controller.api_version('1.0', '2.6') def show(self, req, id): self._ensure_share_type_arg_is_absent(req) return self._show(req, id) @wsgi.Controller.api_version('1.0', '2.6') def defaults(self, req, id): return self._defaults(req, id) @wsgi.Controller.api_version('1.0', '2.6') def update(self, req, id, body): self._ensure_share_type_arg_is_absent(req) self._ensure_specific_microversion_args_are_absent( body, ['share_groups', 'share_group_snapshots'], "2.40") self._ensure_specific_microversion_args_are_absent( body, ['share_replicas', 'replica_gigabytes'], "2.53") return self._update(req, id, body) @wsgi.Controller.api_version('1.0', '2.6') def delete(self, req, id): self._ensure_share_type_arg_is_absent(req) return self._delete(req, id) class QuotaSetsController(QuotaSetsMixin, wsgi.Controller): """Quota Sets API controller. Used only by API v2 starting from microversion 2.7. Registered under API URL 'quota-sets'. """ @wsgi.Controller.api_version('2.7') def show(self, req, id): if req.api_version_request < api_version.APIVersionRequest("2.39"): self._ensure_share_type_arg_is_absent(req) return self._show(req, id) @wsgi.Controller.api_version('2.25') def detail(self, req, id): if req.api_version_request < api_version.APIVersionRequest("2.39"): self._ensure_share_type_arg_is_absent(req) return self._show(req, id, True) @wsgi.Controller.api_version('2.7') def defaults(self, req, id): return self._defaults(req, id) @wsgi.Controller.api_version('2.7') def update(self, req, id, body): if req.api_version_request < api_version.APIVersionRequest("2.39"): self._ensure_share_type_arg_is_absent(req) elif req.api_version_request < api_version.APIVersionRequest("2.40"): self._ensure_specific_microversion_args_are_absent( body, ['share_groups', 'share_group_snapshots'], "2.40") elif req.api_version_request < api_version.APIVersionRequest("2.53"): self._ensure_specific_microversion_args_are_absent( body, ['share_replicas', 'replica_gigabytes'], "2.53") elif req.api_version_request < api_version.APIVersionRequest("2.62"): self._ensure_specific_microversion_args_are_absent( body, ['per_share_gigabytes'], "2.62") elif req.api_version_request < api_version.APIVersionRequest("2.80"): self._ensure_specific_microversion_args_are_absent( body, ['backups', 'backup_gigabytes'], "2.80") elif req.api_version_request < api_version.APIVersionRequest("2.90"): self._ensure_specific_microversion_args_are_absent( body, ['encryption_keys'], "2.90") return self._update(req, id, body) @wsgi.Controller.api_version('2.7') def delete(self, req, id): if req.api_version_request < api_version.APIVersionRequest("2.39"): self._ensure_share_type_arg_is_absent(req) return self._delete(req, id) def create_resource_legacy(): return wsgi.Resource(QuotaSetsControllerLegacy()) def create_resource(): return wsgi.Resource(QuotaSetsController()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/api/v2/resource_locks.py0000664000175000017500000002213400000000000021015 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """The resource_locks API controller module. This module handles the following requests: GET /resource-locks GET /resource-locks/{lock_id} POST /resource-locks PUT /resource-locks/{lock_id} DELETE /resource-locks/{lock_id} """ from http import client as http_client from oslo_utils import timeutils from oslo_utils import uuidutils import webob from webob import exc from manila.api import common from manila.api.openstack import wsgi from manila.api.schemas import resource_locks as schema from manila.api import validation from manila.api.views import resource_locks as resource_locks_view from manila.common import constants from manila import exception from manila.i18n import _ from manila.lock import api as resource_locks from manila import utils RESOURCE_LOCKS_MIN_API_VERSION = '2.81' @validation.validated class ResourceLocksController(wsgi.Controller): """The Resource Locks API controller for the OpenStack API.""" _view_builder_class = resource_locks_view.ViewBuilder resource_name = 'resource_lock' def _check_body(self, body, lock_to_update=None): if 'resource_lock' not in body: raise exc.HTTPBadRequest( explanation="Malformed request body.") lock_data = body['resource_lock'] resource_type = ( lock_to_update['resource_type'] if lock_to_update else lock_data.get('resource_type', constants.SHARE_RESOURCE_TYPE) ) resource_id = lock_data.get('resource_id') or '' resource_action = (lock_data.get('resource_action') or constants.RESOURCE_ACTION_DELETE) lock_reason = lock_data.get('lock_reason') or '' if len(lock_reason) > 1023: msg = _("'lock_reason' can contain a maximum of 1023 characters.") raise exc.HTTPBadRequest(explanation=msg) if resource_type not in constants.RESOURCE_LOCK_RESOURCE_TYPES: msg = _("'resource_type' is required and must be one " "of %(resource_types)s") % { 'resource_types': constants.RESOURCE_LOCK_RESOURCE_TYPES } raise exc.HTTPBadRequest(explanation=msg) resource_type_lock_actions = ( constants.RESOURCE_LOCK_ACTIONS_MAPPING[resource_type]) if resource_action not in resource_type_lock_actions: msg = _("'resource_action' can only be one of %(actions)s" % {'actions': resource_type_lock_actions}) raise exc.HTTPBadRequest(explanation=msg) if lock_to_update: if set(lock_data.keys()) - {'resource_action', 'lock_reason'}: msg = _("Only 'resource_action' and 'lock_reason' " "can be updated.") raise exc.HTTPBadRequest(explanation=msg) else: if not uuidutils.is_uuid_like(resource_id): msg = _("Resource ID is required and must be in uuid format.") raise exc.HTTPBadRequest(explanation=msg) def __init__(self): self.resource_locks_api = resource_locks.API() super(ResourceLocksController, self).__init__() @wsgi.Controller.api_version(RESOURCE_LOCKS_MIN_API_VERSION) @wsgi.Controller.authorize('get_all') @validation.request_query_schema(schema.index_request_query) @validation.response_body_schema(schema.index_response_body) def index(self, req): """Returns a list of locks, transformed through view builder.""" context = req.environ['manila.context'] filters = req.params.copy() params = common.get_pagination_params(req) limit, offset = [params.pop('limit', None), params.pop('offset', None)] sort_key, sort_dir = common.get_sort_params(filters) for key in ('limit', 'offset'): filters.pop(key, None) show_count = utils.get_bool_from_api_params( 'with_count', {'with_count': filters.pop('with_count', False)}) for time_comparison_filter in ['created_since', 'created_before']: if time_comparison_filter in filters: time_str = filters.get(time_comparison_filter) try: parsed_time = timeutils.parse_isotime(time_str) filters[time_comparison_filter] = parsed_time except ValueError: msg = _('Invalid value specified for the query ' 'key: %s') % time_comparison_filter raise exc.HTTPBadRequest(explanation=msg) locks, count = self.resource_locks_api.get_all(context, search_opts=filters, limit=limit, offset=offset, sort_key=sort_key, sort_dir=sort_dir, show_count=show_count) return self._view_builder.index(req, locks, count=count) @wsgi.Controller.api_version(RESOURCE_LOCKS_MIN_API_VERSION) @wsgi.Controller.authorize('get') @validation.request_query_schema(schema.show_request_query) @validation.response_body_schema(schema.show_response_body) def show(self, req, id): """Return an existing resource lock by ID.""" context = req.environ['manila.context'] try: resource_lock = self.resource_locks_api.get(context, id) except exception.ResourceLockNotFound as error: raise exc.HTTPNotFound(explanation=error.msg) return self._view_builder.detail(req, resource_lock) @wsgi.Controller.api_version(RESOURCE_LOCKS_MIN_API_VERSION) @wsgi.Controller.authorize @wsgi.action("delete") @validation.response_body_schema(schema.delete_response_body) def delete(self, req, id): """Delete an existing resource lock.""" context = req.environ['manila.context'] try: self.resource_locks_api.delete(context, id) except exception.ResourceLockNotFound as error: raise exc.HTTPNotFound(explanation=error.msg) return webob.Response(status_int=http_client.NO_CONTENT) @wsgi.Controller.api_version(RESOURCE_LOCKS_MIN_API_VERSION) @wsgi.Controller.authorize @validation.request_body_schema(schema.create_request_body) @validation.response_body_schema(schema.create_response_body) def create(self, req, body): """Create a resource lock.""" context = req.environ['manila.context'] self._check_body(body) lock_data = body['resource_lock'] try: resource_lock = self.resource_locks_api.create( context, resource_id=lock_data['resource_id'], resource_type=lock_data['resource_type'], resource_action=(lock_data.get('resource_action') or constants.RESOURCE_ACTION_DELETE), lock_reason=lock_data.get('lock_reason') ) except exception.NotFound: raise exc.HTTPBadRequest( explanation="No such resource found.") except exception.InvalidInput as error: raise exc.HTTPConflict(explanation=error.msg) except exception.ResourceVisibilityLockExists: raise exc.HTTPConflict( "Resource's visibility is already locked by other user.") return self._view_builder.detail(req, resource_lock) @wsgi.Controller.api_version(RESOURCE_LOCKS_MIN_API_VERSION) @wsgi.Controller.authorize @validation.request_body_schema(schema.update_request_body) @validation.response_body_schema(schema.update_response_body) def update(self, req, id, body): """Update an existing resource lock.""" context = req.environ['manila.context'] try: resource_lock = self.resource_locks_api.get(context, id) except exception.NotFound as e: raise exc.HTTPNotFound(explanation=e.msg) self._check_body(body, lock_to_update=resource_lock) lock_data = body['resource_lock'] try: resource_lock = self.resource_locks_api.update( context, resource_lock, lock_data, ) except exception.InvalidInput as e: raise exc.HTTPBadRequest(explanation=e.msg) return self._view_builder.detail(req, resource_lock) def create_resource(): return wsgi.Resource(ResourceLocksController()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/api/v2/router.py0000664000175000017500000010401400000000000017311 0ustar00zuulzuul00000000000000# Copyright 2011 OpenStack LLC. # Copyright 2011 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright (c) 2015 Mirantis inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ WSGI middleware for OpenStack Share API v2. """ from manila.api import extensions import manila.api.openstack from manila.api.v1 import limits from manila.api.v1 import scheduler_stats from manila.api.v1 import security_service from manila.api.v1 import share_manage from manila.api.v1 import share_types_extra_specs from manila.api.v1 import share_unmanage from manila.api.v2 import availability_zones from manila.api.v2 import messages from manila.api.v2 import quota_class_sets from manila.api.v2 import quota_sets from manila.api.v2 import resource_locks from manila.api.v2 import services from manila.api.v2 import share_access_metadata from manila.api.v2 import share_accesses from manila.api.v2 import share_backups from manila.api.v2 import share_export_locations from manila.api.v2 import share_group_snapshots from manila.api.v2 import share_group_type_specs from manila.api.v2 import share_group_types from manila.api.v2 import share_groups from manila.api.v2 import share_instance_export_locations from manila.api.v2 import share_instances from manila.api.v2 import share_network_subnets from manila.api.v2 import share_networks from manila.api.v2 import share_replica_export_locations from manila.api.v2 import share_replicas from manila.api.v2 import share_servers from manila.api.v2 import share_snapshot_export_locations from manila.api.v2 import share_snapshot_instance_export_locations from manila.api.v2 import share_snapshot_instances from manila.api.v2 import share_snapshots from manila.api.v2 import share_transfer from manila.api.v2 import share_types from manila.api.v2 import shares from manila.api import versions class APIRouter(manila.api.openstack.APIRouter): """Route API requests. Routes requests on the OpenStack API to the appropriate controller and method. """ ExtensionManager = extensions.ExtensionManager def _setup_routes(self, mapper): self.resources["versions"] = versions.create_resource() mapper.connect("versions", "/", controller=self.resources["versions"], action="index") mapper.redirect("", "/") self.resources["availability_zones_legacy"] = ( availability_zones.create_resource_legacy()) # TODO(vponomaryov): "os-availability-zone" is deprecated # since v2.7. Remove it when minimum API version becomes equal to # or greater than v2.7. mapper.resource("availability-zone", "os-availability-zone", controller=self.resources["availability_zones_legacy"]) self.resources["availability_zones"] = ( availability_zones.create_resource()) mapper.resource("availability-zone", "availability-zones", controller=self.resources["availability_zones"]) self.resources["services_legacy"] = services.create_resource_legacy() # TODO(vponomaryov): "os-services" is deprecated # since v2.7. Remove it when minimum API version becomes equal to # or greater than v2.7. mapper.resource("service", "os-services", controller=self.resources["services_legacy"]) self.resources["services"] = services.create_resource() mapper.resource("service", "services", controller=self.resources["services"]) for path_prefix in ['/{project_id}', '']: # project_id is optional mapper.connect("services", "%s/services/ensure-shares" % path_prefix, controller=self.resources["services"], action="ensure_shares", conditions={"method": ["POST"]}) self.resources["quota_sets_legacy"] = ( quota_sets.create_resource_legacy()) # TODO(vponomaryov): "os-quota-sets" is deprecated # since v2.7. Remove it when minimum API version becomes equal to # or greater than v2.7. mapper.resource("quota-set", "os-quota-sets", controller=self.resources["quota_sets_legacy"], member={"defaults": "GET"}) self.resources["quota_sets"] = quota_sets.create_resource() mapper.resource("quota-set", "quota-sets", controller=self.resources["quota_sets"], member={"defaults": "GET", "detail": "GET"}) self.resources["quota_class_sets_legacy"] = ( quota_class_sets.create_resource_legacy()) # TODO(vponomaryov): "os-quota-class-sets" is deprecated # since v2.7. Remove it when minimum API version becomes equal to # or greater than v2.7. mapper.resource("quota-class-set", "os-quota-class-sets", controller=self.resources["quota_class_sets_legacy"]) self.resources["quota_class_sets"] = quota_class_sets.create_resource() mapper.resource("quota-class-set", "quota-class-sets", controller=self.resources["quota_class_sets"]) self.resources["share_manage"] = share_manage.create_resource() # TODO(vponomaryov): "os-share-manage" is deprecated # since v2.7. Remove it when minimum API version becomes equal to # or greater than v2.7. mapper.resource("share_manage", "os-share-manage", controller=self.resources["share_manage"]) self.resources["share_unmanage"] = share_unmanage.create_resource() # TODO(vponomaryov): "os-share-unmanage" is deprecated # since v2.7. Remove it when minimum API version becomes equal to # or greater than v2.7. mapper.resource("share_unmanage", "os-share-unmanage", controller=self.resources["share_unmanage"], member={"unmanage": "POST"}) self.resources["shares"] = shares.create_resource() mapper.resource("share", "shares", controller=self.resources["shares"], collection={"detail": "GET"}, member={"action": "POST"}) for path_prefix in ['/{project_id}', '']: # project_id is optional mapper.connect("shares", "%s/shares/manage" % path_prefix, controller=self.resources["shares"], action="manage", conditions={"method": ["POST"]}) for path_prefix in ['/{project_id}', '']: # project_id is optional mapper.connect("share_metadata", "%s/shares/{resource_id}/metadata" % path_prefix, controller=self.resources["shares"], action="create_metadata", conditions={"method": ["POST"]}) mapper.connect("share_metadata", "%s/shares/{resource_id}/metadata" % path_prefix, controller=self.resources["shares"], action="update_all_metadata", conditions={"method": ["PUT"]}) mapper.connect("share_metadata", "%s/shares/{resource_id}/metadata/{key}" % path_prefix, controller=self.resources["shares"], action="update_metadata_item", conditions={"method": ["POST"]}) mapper.connect("share_metadata", "%s/shares/{resource_id}/metadata" % path_prefix, controller=self.resources["shares"], action="index_metadata", conditions={"method": ["GET"]}) mapper.connect("share_metadata", "%s/shares/{resource_id}/metadata/{key}" % path_prefix, controller=self.resources["shares"], action="show_metadata", conditions={"method": ["GET"]}) mapper.connect("share_metadata", "%s/shares/{resource_id}/metadata/{key}" % path_prefix, controller=self.resources["shares"], action="delete_metadata", conditions={"method": ["DELETE"]}) self.resources["share_instances"] = share_instances.create_resource() mapper.resource("share_instance", "share_instances", controller=self.resources["share_instances"], collection={"detail": "GET"}, member={"action": "POST"}) self.resources["share_instance_export_locations"] = ( share_instance_export_locations.create_resource()) for path_prefix in ['/{project_id}', '']: # project_id is optional mapper.connect("share_instances", ("%s/share_instances/{share_instance_id}" "/export_locations" % path_prefix), controller=self.resources[ "share_instance_export_locations"], action="index", conditions={"method": ["GET"]}) mapper.connect("share_instances", ("%s/share_instances/{share_instance_id}" "/export_locations" "/{export_location_uuid}" % path_prefix), controller=self.resources[ "share_instance_export_locations"], action="show", conditions={"method": ["GET"]}) mapper.connect("share_instance", "%s/shares/{share_id}/instances" % path_prefix, controller=self.resources["share_instances"], action="get_share_instances", conditions={"method": ["GET"]}) self.resources["share_export_locations"] = ( share_export_locations.create_resource()) mapper.connect("shares", "%s/shares/{share_id}" "/export_locations" % path_prefix, controller=self.resources["share_export_locations"], action="index", conditions={"method": ["GET"]}) mapper.connect("shares", ("%s/shares/{share_id}/export_locations" "/{export_location_uuid}" % path_prefix), controller=self.resources["share_export_locations"], action="show", conditions={"method": ["GET"]}) mapper.connect("export_locations_metadata", "%s/shares/{share_id}/export_locations" "/{resource_id}/metadata" % path_prefix, controller=self.resources["share_export_locations"], action="create_metadata", conditions={"method": ["POST"]}) mapper.connect("export_locations_metadata", "%s/shares/{share_id}/export_locations" "/{resource_id}/metadata" % path_prefix, controller=self.resources["share_export_locations"], action="update_all_metadata", conditions={"method": ["PUT"]}) mapper.connect("export_locations_metadata", "%s/shares/{share_id}/export_locations/" "{resource_id}/metadata/{key}" % path_prefix, controller=self.resources["share_export_locations"], action="update_metadata_item", conditions={"method": ["POST"]}) mapper.connect("export_locations_metadata", "%s/shares/{share_id}/export_locations/" "{resource_id}/metadata" % path_prefix, controller=self.resources["share_export_locations"], action="index_metadata", conditions={"method": ["GET"]}) mapper.connect("export_locations_metadata", "%s/shares/{share_id}/export_locations/" "{resource_id}/metadata/{key}" % path_prefix, controller=self.resources["share_export_locations"], action="show_metadata", conditions={"method": ["GET"]}) mapper.connect("export_locations_metadata", "%s/shares/{share_id}/export_locations/" "{resource_id}/metadata/{key}" % path_prefix, controller=self.resources["share_export_locations"], action="delete_metadata", conditions={"method": ["DELETE"]}) self.resources["snapshots"] = share_snapshots.create_resource() mapper.resource("snapshot", "snapshots", controller=self.resources["snapshots"], collection={"detail": "GET"}, member={"action": "POST"}) for path_prefix in ['/{project_id}', '']: # project_id is optional mapper.connect("snapshots_metadata", "%s/snapshots/{resource_id}/metadata" % path_prefix, controller=self.resources["snapshots"], action="create_metadata", conditions={"method": ["POST"]}) mapper.connect("snapshots_metadata", "%s/snapshots/{resource_id}/metadata" % path_prefix, controller=self.resources["snapshots"], action="update_all_metadata", conditions={"method": ["PUT"]}) mapper.connect("snapshots_metadata", "%s/snapshots/{resource_id}/metadata/{key}" % path_prefix, controller=self.resources["snapshots"], action="update_metadata_item", conditions={"method": ["POST"]}) mapper.connect("snapshots_metadata", "%s/snapshots/{resource_id}/metadata" % path_prefix, controller=self.resources["snapshots"], action="index_metadata", conditions={"method": ["GET"]}) mapper.connect("snapshots_metadata", "%s/snapshots/{resource_id}/metadata/{key}" % path_prefix, controller=self.resources["snapshots"], action="show_metadata", conditions={"method": ["GET"]}) mapper.connect("snapshots_metadata", "%s/snapshots/{resource_id}/metadata/{key}" % path_prefix, controller=self.resources["snapshots"], action="delete_metadata", conditions={"method": ["DELETE"]}) for path_prefix in ['/{project_id}', '']: # project_id is optional mapper.connect("snapshots", "%s/snapshots/manage" % path_prefix, controller=self.resources["snapshots"], action="manage", conditions={"method": ["POST"]}) mapper.connect("snapshots", "%s/snapshots/{snapshot_id}" "/access-list" % path_prefix, controller=self.resources["snapshots"], action="access_list", conditions={"method": ["GET"]}) self.resources["share_snapshot_export_locations"] = ( share_snapshot_export_locations.create_resource()) mapper.connect("snapshots", "%s/snapshots/{snapshot_id}" "/export-locations" % path_prefix, controller=self.resources[ "share_snapshot_export_locations"], action="index", conditions={"method": ["GET"]}) mapper.connect("snapshots", "%s/snapshots/{snapshot_id}/export-locations" "/{export_location_id}" % path_prefix, controller=self.resources[ "share_snapshot_export_locations"], action="show", conditions={"method": ["GET"]}) self.resources['snapshot_instances'] = ( share_snapshot_instances.create_resource()) mapper.resource("snapshot-instance", "snapshot-instances", controller=self.resources['snapshot_instances'], collection={'detail': 'GET'}, member={'action': 'POST'}) self.resources["share_snapshot_instance_export_locations"] = ( share_snapshot_instance_export_locations.create_resource()) for path_prefix in ['/{project_id}', '']: # project_id is optional mapper.connect("snapshot-instance", "%s/snapshot-instances/{snapshot_instance_id}" "/export-locations" % path_prefix, controller=self.resources[ "share_snapshot_instance_export_locations"], action="index", conditions={"method": ["GET"]}) mapper.connect("snapshot-instance", "%s/snapshot-instances/{snapshot_instance_id}" "/export-locations" "/{export_location_id}" % path_prefix, controller=self.resources[ "share_snapshot_instance_export_locations"], action="show", conditions={"method": ["GET"]}) self.resources["limits"] = limits.create_resource() mapper.resource("limit", "limits", controller=self.resources["limits"]) self.resources["security_services"] = ( security_service.create_resource()) mapper.resource("security-service", "security-services", controller=self.resources["security_services"], collection={"detail": "GET"}) self.resources["share_networks"] = share_networks.create_resource() mapper.resource(share_networks.RESOURCE_NAME, "share-networks", controller=self.resources["share_networks"], collection={"detail": "GET"}, member={"action": "POST"}) for path_prefix in ['/{project_id}', '']: # project_id is optional self.resources["share_network_subnets"] = ( share_network_subnets.create_resource()) mapper.connect("share-networks", "%s/share-networks/{share_network_id}" "/subnets" % path_prefix, controller=self.resources["share_network_subnets"], action="create", conditions={"method": ["POST"]}) mapper.connect("share-networks", "%s/share-networks/{share_network_id}" "/subnets/{share_network_subnet_id}" % path_prefix, controller=self.resources["share_network_subnets"], action="delete", conditions={"method": ["DELETE"]}) mapper.connect("share-networks", "%s/share-networks/{share_network_id}" "/subnets/{share_network_subnet_id}" % path_prefix, controller=self.resources["share_network_subnets"], action="show", conditions={"method": ["GET"]}) mapper.connect("share-networks", "%s/share-networks/{share_network_id}" "/subnets" % path_prefix, controller=self.resources["share_network_subnets"], action="index", conditions={"method": ["GET"]}) for path_prefix in ['/{project_id}', '']: # project_id is optional mapper.connect("subnets_metadata", "%s/share-networks/{share_network_id}" "/subnets/{resource_id}/metadata" % path_prefix, controller=self.resources["share_network_subnets"], action="create_metadata", conditions={"method": ["POST"]}) mapper.connect("subnets_metadata", "%s/share-networks/{share_network_id}" "/subnets/{resource_id}/metadata" % path_prefix, controller=self.resources["share_network_subnets"], action="update_all_metadata", conditions={"method": ["PUT"]}) mapper.connect("subnets_metadata", "%s/share-networks/{share_network_id}" "/subnets/{resource_id}" "/metadata/{key}" % path_prefix, controller=self.resources["share_network_subnets"], action="update_metadata_item", conditions={"method": ["POST"]}) mapper.connect("subnets_metadata", "%s/share-networks/{share_network_id}" "/subnets/{resource_id}/metadata" % path_prefix, controller=self.resources["share_network_subnets"], action="index_metadata", conditions={"method": ["GET"]}) mapper.connect("subnets_metadata", "%s/share-networks/{share_network_id}" "/subnets/{resource_id}" "/metadata/{key}" % path_prefix, controller=self.resources["share_network_subnets"], action="show_metadata", conditions={"method": ["GET"]}) mapper.connect("subnets_metadata", "%s/share-networks/{share_network_id}" "/subnets/{resource_id}" "/metadata/{key}" % path_prefix, controller=self.resources["share_network_subnets"], action="delete_metadata", conditions={"method": ["DELETE"]}) self.resources["share_servers"] = share_servers.create_resource() mapper.resource("share_server", "share-servers", controller=self.resources["share_servers"], member={"action": "POST"}) for path_prefix in ['/{project_id}', '']: # project_id is optional mapper.connect("details", "%s/share-servers/{id}/details" % path_prefix, controller=self.resources["share_servers"], action="details", conditions={"method": ["GET"]}) mapper.connect("share_servers", "%s/share-servers/manage" % path_prefix, controller=self.resources["share_servers"], action="manage", conditions={"method": ["POST"]}) self.resources["types"] = share_types.create_resource() mapper.resource("type", "types", controller=self.resources["types"], collection={"detail": "GET", "default": "GET"}, member={"action": "POST", "os-share-type-access": "GET", "share_type_access": "GET"}) self.resources["extra_specs"] = ( share_types_extra_specs.create_resource()) mapper.resource("extra_spec", "extra_specs", controller=self.resources["extra_specs"], parent_resource=dict(member_name="type", collection_name="types")) self.resources["scheduler_stats"] = scheduler_stats.create_resource() for path_prefix in ['/{project_id}', '']: # project_id is optional mapper.connect("pools", "%s/scheduler-stats/pools" % path_prefix, controller=self.resources["scheduler_stats"], action="pools_index", conditions={"method": ["GET"]}) mapper.connect("pools", "%s/scheduler-stats/pools/detail" % path_prefix, controller=self.resources["scheduler_stats"], action="pools_detail", conditions={"method": ["GET"]}) self.resources["share-groups"] = share_groups.create_resource() mapper.resource( "share-group", "share-groups", controller=self.resources["share-groups"], collection={"detail": "GET"}) for path_prefix in ['/{project_id}', '']: # project_id is optional mapper.connect( "share-groups", "%s/share-groups/{id}/action" % path_prefix, controller=self.resources["share-groups"], action="action", conditions={"method": ["POST"]}) self.resources["share-group-types"] = ( share_group_types.create_resource()) mapper.resource( "share-group-type", "share-group-types", controller=self.resources["share-group-types"], collection={"detail": "GET", "default": "GET"}, member={"action": "POST"}) for path_prefix in ['/{project_id}', '']: # project_id is optional mapper.connect( "share-group-types", "%s/share-group-types/{id}/access" % path_prefix, controller=self.resources["share-group-types"], action="share_group_type_access", conditions={"method": ["GET"]}) # NOTE(ameade): These routes can be simplified when the following # issue is fixed: https://github.com/bbangert/routes/issues/68 self.resources["group-specs"] = ( share_group_type_specs.create_resource()) for path_prefix in ['/{project_id}', '']: # project_id is optional mapper.connect( "share-group-types", "%s/share-group-types/{id}/group-specs" % path_prefix, controller=self.resources["group-specs"], action="index", conditions={"method": ["GET"]}) mapper.connect( "share-group-types", "%s/share-group-types/{id}/group-specs" % path_prefix, controller=self.resources["group-specs"], action="create", conditions={"method": ["POST"]}) mapper.connect( "share-group-types", "%s/share-group-types/{id}/group-specs/{key}" % path_prefix, controller=self.resources["group-specs"], action="show", conditions={"method": ["GET"]}) mapper.connect( "share-group-types", "%s/share-group-types/{id}/group-specs/{key}" % path_prefix, controller=self.resources["group-specs"], action="delete", conditions={"method": ["DELETE"]}) mapper.connect( "share-group-types", "%s/share-group-types/{id}/group-specs/{key}" % path_prefix, controller=self.resources["group-specs"], action="update", conditions={"method": ["PUT"]}) self.resources["share-group-snapshots"] = ( share_group_snapshots.create_resource()) mapper.resource( "share-group-snapshot", "share-group-snapshots", controller=self.resources["share-group-snapshots"], collection={"detail": "GET"}, member={"members": "GET", "action": "POST"}) for path_prefix in ['/{project_id}', '']: # project_id is optional mapper.connect( "share-group-snapshots", "%s/share-group-snapshots/{id}/action" % path_prefix, controller=self.resources["share-group-snapshots"], action="action", conditions={"method": ["POST"]}) self.resources['share-replicas'] = share_replicas.create_resource() mapper.resource("share-replica", "share-replicas", controller=self.resources['share-replicas'], collection={'detail': 'GET'}, member={'action': 'POST'}) self.resources['share_transfers'] = ( share_transfer.create_resource()) mapper.resource("share-transfer", "share-transfers", controller=self.resources['share_transfers'], collection={'detail': 'GET'}, member={'accept': 'POST'}) self.resources["share-replica-export-locations"] = ( share_replica_export_locations.create_resource()) for path_prefix in ['/{project_id}', '']: # project_id is optional mapper.connect("share-replicas", ("%s/share-replicas/{share_replica_id}" "/export-locations" % path_prefix), controller=self.resources[ "share-replica-export-locations"], action="index", conditions={"method": ["GET"]}) mapper.connect("share-replicas", ("%s/share-replicas/{share_replica_id}" "/export-locations" "/{export_location_uuid}" % path_prefix), controller=self.resources[ "share-replica-export-locations"], action="show", conditions={"method": ["GET"]}) self.resources['messages'] = messages.create_resource() mapper.resource("message", "messages", controller=self.resources['messages']) self.resources["share-access-rules"] = share_accesses.create_resource() mapper.resource( "share-access-rule", "share-access-rules", controller=self.resources["share-access-rules"], collection={"detail": "GET"}) for path_prefix in ['/{project_id}', '']: # project_id is optional self.resources["access-metadata"] = ( share_access_metadata.create_resource()) access_metadata_controller = self.resources["access-metadata"] mapper.connect("share-access-rules", "%s/share-access-rules" "/{access_id}/metadata" % path_prefix, controller=access_metadata_controller, action="update", conditions={"method": ["PUT"]}) mapper.connect("share-access-rules", "%s/share-access-rules" "/{access_id}/metadata/{key}" % path_prefix, controller=access_metadata_controller, action="delete", conditions={"method": ["DELETE"]}) self.resources['share-backups'] = share_backups.create_resource() mapper.resource("share-backup", "share-backups", controller=self.resources['share-backups'], collection={'detail': 'GET'}, member={'action': 'POST'}) self.resources["resource_locks"] = resource_locks.create_resource() mapper.resource("resource-lock", "resource-locks", controller=self.resources["resource_locks"]) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/api/v2/services.py0000664000175000017500000001610100000000000017613 0ustar00zuulzuul00000000000000# Copyright 2012 IBM Corp. # Copyright (c) 2015 Mirantis inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import http.client as http_client from oslo_utils import strutils import webob.exc from manila.api.openstack import wsgi from manila.api.views import services as services_views from manila import db from manila import exception from manila.i18n import _ from manila.services import api as service_api class ServiceMixin(object): """The Services API controller common logic. Mixin class that should be inherited by Services API controllers, which are used for different API URLs and microversions. """ resource_name = "service" _view_builder_class = services_views.ViewBuilder @wsgi.Controller.authorize("index") def _index(self, req, support_ensure_shares=False): """Return a list of all running services.""" context = req.environ['manila.context'] all_services = db.service_get_all(context) services = [] for service in all_services: service_data = { 'id': service['id'], 'binary': service['binary'], 'host': service['host'], 'zone': service['availability_zone']['name'], 'status': 'disabled' if service['disabled'] else 'enabled', 'disabled_reason': service.get('disabled_reason'), 'state': service['state'], 'updated_at': service['updated_at'], } if support_ensure_shares: service_data['ensuring'] = service['ensuring'] services.append(service_data) search_opts = [ 'host', 'binary', 'zone', 'state', 'status', ] for search_opt in search_opts: if search_opt in req.GET: value = req.GET[search_opt] services = [s for s in services if s[search_opt] == value] if len(services) == 0: break return self._view_builder.detail_list(req, services) @wsgi.Controller.authorize("update") def _update(self, req, id, body, support_disabled_reason=True): """Enable/Disable scheduling for a service.""" context = req.environ['manila.context'] update_dict = {} if id == "enable": data = {'disabled': False} if support_disabled_reason: update_dict['disabled_reason'] = None elif id == "disable": data = {'disabled': True} disabled_reason = body.get('disabled_reason') if disabled_reason and not support_disabled_reason: msg = _("'disabled_reason' option is not supported by this " "microversion. Use 2.83 or greater microversion to " "be able to set 'disabled_reason'.") raise webob.exc.HTTPBadRequest(explanation=msg) if disabled_reason: try: strutils.check_string_length(disabled_reason.strip(), name='disabled_reason', min_length=1, max_length=255) except (ValueError, TypeError): msg = _('Disabled reason contains invalid characters ' 'or is too long') raise webob.exc.HTTPBadRequest(explanation=msg) update_dict['disabled_reason'] = disabled_reason.strip() data['disabled_reason'] = disabled_reason.strip() else: raise webob.exc.HTTPNotFound("Unknown action '%s'" % id) try: data['host'] = body['host'] data['binary'] = body['binary'] except (TypeError, KeyError): raise webob.exc.HTTPBadRequest() svc = db.service_get_by_args(context, data['host'], data['binary']) update_dict['disabled'] = data['disabled'] db.service_update(context, svc['id'], update_dict) data['status'] = 'disabled' if id == "disable" else 'enabled' return self._view_builder.summary(req, data) class ServiceControllerLegacy(ServiceMixin, wsgi.Controller): """Deprecated Services API controller. Used by legacy API v1 and v2 microversions from 2.0 to 2.6. Registered under deprecated API URL 'os-services'. """ @wsgi.Controller.api_version('1.0', '2.6') def index(self, req): return self._index(req) @wsgi.Controller.api_version('1.0', '2.6') def update(self, req, id, body): return self._update(req, id, body, support_disabled_reason=False) class ServiceController(ServiceMixin, wsgi.Controller): """Services API controller. Used only by API v2 starting from microversion 2.7. Registered under API URL 'services'. """ def __init__(self): super().__init__() self.service_api = service_api.API() @wsgi.Controller.api_version('2.7', '2.85') def index(self, req): return self._index(req) @wsgi.Controller.api_version('2.86') # noqa def index(self, req): # pylint: disable=function-redefined # noqa F811 return self._index(req, support_ensure_shares=True) @wsgi.Controller.api_version('2.7', '2.82') def update(self, req, id, body): return self._update(req, id, body, support_disabled_reason=False) @wsgi.Controller.api_version('2.83') # noqa def update(self, req, id, body): # pylint: disable=function-redefined # noqa F811 return self._update(req, id, body) @wsgi.Controller.api_version('2.86') @wsgi.Controller.authorize def ensure_shares(self, req, body): """Starts ensure shares for a given manila-share binary.""" context = req.environ['manila.context'] host = body.get('host', None) if not host: raise webob.exc.HTTPBadRequest('Missing host parameter.') try: # The only binary supported is Manila share. service = db.service_get_by_args(context, host, 'manila-share') except exception.NotFound: raise webob.exc.HTTPNotFound( "manila-share binary for '%s' host not found" % id ) try: self.service_api.ensure_shares(context, service, host) except webob.exc.HTTPConflict: raise return webob.Response(status_int=http_client.ACCEPTED) def create_resource_legacy(): return wsgi.Resource(ServiceControllerLegacy()) def create_resource(): return wsgi.Resource(ServiceController()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/api/v2/share_access_metadata.py0000664000175000017500000000610600000000000022257 0ustar00zuulzuul00000000000000# Copyright 2018 Huawei Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """The share access rule metadata api.""" import webob from manila.api.openstack import wsgi from manila.api.views import share_accesses as share_access_views from manila import db from manila import exception from manila.i18n import _ from manila import share class ShareAccessMetadataController(wsgi.Controller): """The Share access rule metadata API V2 controller.""" resource_name = 'share_access_metadata' _view_builder_class = share_access_views.ViewBuilder def __init__(self): super(ShareAccessMetadataController, self).__init__() self.share_api = share.API() @wsgi.Controller.api_version('2.45') @wsgi.Controller.authorize def update(self, req, access_id, body=None): context = req.environ['manila.context'] if not self.is_valid_body(body, 'metadata'): raise webob.exc.HTTPBadRequest() metadata = body['metadata'] md = self._update_share_access_metadata(context, access_id, metadata) return self._view_builder.view_metadata(req, md) @wsgi.Controller.api_version('2.45') @wsgi.Controller.authorize @wsgi.response(200) def delete(self, req, access_id, key): """Deletes an existing access metadata.""" context = req.environ['manila.context'] self._assert_access_exists(context, access_id) try: db.share_access_metadata_delete(context, access_id, key) except exception.ShareAccessMetadataNotFound as error: raise webob.exc.HTTPNotFound(explanation=error.msg) def _update_share_access_metadata(self, context, access_id, metadata): self._assert_access_exists(context, access_id) try: return self.share_api.update_share_access_metadata( context, access_id, metadata) except (ValueError, AttributeError): msg = _("Malformed request body") raise webob.exc.HTTPBadRequest(explanation=msg) except exception.InvalidMetadata as error: raise webob.exc.HTTPBadRequest(explanation=error.msg) except exception.InvalidMetadataSize as error: raise webob.exc.HTTPBadRequest(explanation=error.msg) def _assert_access_exists(self, context, access_id): try: self.share_api.access_get(context, access_id) except exception.NotFound as ex: raise webob.exc.HTTPNotFound(explanation=ex.msg) def create_resource(): return wsgi.Resource(ShareAccessMetadataController()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/api/v2/share_accesses.py0000664000175000017500000001611600000000000020751 0ustar00zuulzuul00000000000000# Copyright 2018 Huawei Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """The share accesses api.""" import ast import webob from manila.api import common from manila.api.openstack import wsgi from manila.api.views import share_accesses as share_access_views from manila.common import constants from manila import exception from manila.i18n import _ from manila.lock import api as resource_locks from manila import share class ShareAccessesController(wsgi.Controller, wsgi.AdminActionsMixin): """The Share accesses API V2 controller for the OpenStack API.""" resource_name = 'share_access_rule' _view_builder_class = share_access_views.ViewBuilder def __init__(self): super(ShareAccessesController, self).__init__() self.share_api = share.API() self.resource_locks_api = resource_locks.API() @wsgi.Controller.api_version('2.45') @wsgi.Controller.authorize('get') def show(self, req, id): """Return data about the given share access rule.""" context = req.environ['manila.context'] share_access = self._get_share_access(context, id) restricted = self._is_rule_restricted(context, id) if restricted: share_access['restricted'] = True return self._view_builder.view(req, share_access) def _is_rule_restricted(self, context, id): search_opts = { 'resource_id': id, 'resource_action': constants.RESOURCE_ACTION_SHOW, 'resource_type': 'access_rule', 'all_projects': True, } locks, count = self.resource_locks_api.get_all( context.elevated(), search_opts, show_count=True) if count: return self.resource_locks_api.access_is_restricted(context, locks[0]) return False def _get_share_access(self, context, share_access_id): try: return self.share_api.access_get(context, share_access_id) except exception.NotFound: msg = _("Share access rule %s not found.") % share_access_id raise webob.exc.HTTPNotFound(explanation=msg) def _validate_search_opts(self, req, search_opts): """Check if search opts parameters are valid.""" access_type = search_opts.get('access_type', None) access_to = search_opts.get('access_to', None) if access_type and access_type not in ['ip', 'user', 'cert', 'cephx']: raise exception.InvalidShareAccessType(type=access_type) # If access_to is present but access type is not, it gets tricky to # validate its content if access_to and not access_type: msg = _("'access_type' parameter must be provided when specifying " "'access_to'.") raise exception.InvalidInput(reason=msg) if access_type and access_to: common.validate_access(access_type=access_type, access_to=access_to, enable_ceph=True, enable_ipv6=True) access_level = search_opts.get('access_level', None) if access_level and access_level not in constants.ACCESS_LEVELS: raise exception.InvalidShareAccessLevel(level=access_level) @wsgi.Controller.authorize('index') def _index(self, req, support_for_access_filters=False): """Returns the list of access rules for a given share.""" context = req.environ['manila.context'] search_opts = {} search_opts.update(req.GET) if 'share_id' not in search_opts: msg = _("The field 'share_id' has to be specified.") raise webob.exc.HTTPBadRequest(explanation=msg) share_id = search_opts.pop('share_id', None) if 'metadata' in search_opts: search_opts['metadata'] = ast.literal_eval( search_opts['metadata']) if support_for_access_filters: try: self._validate_search_opts(req, search_opts) except (exception.InvalidShareAccessLevel, exception.InvalidShareAccessType) as e: raise webob.exc.HTTPBadRequest(explanation=e.msg) try: share = self.share_api.get(context, share_id) except exception.NotFound: msg = _("Share %s not found.") % share_id raise webob.exc.HTTPBadRequest(explanation=msg) access_rules = self.share_api.access_get_all( context, share, search_opts) rule_list = [] for rule in access_rules: restricted = self._is_rule_restricted(context, rule['id']) rule['restricted'] = restricted if (('access_to' in search_opts or 'access_key' in search_opts) and restricted): continue rule_list.append(rule) return self._view_builder.list_view(req, rule_list) @wsgi.Controller.api_version('2.45', '2.81') def index(self, req): return self._index(req) @wsgi.Controller.api_version('2.82') def index(self, req): # pylint: disable=function-redefined # noqa F811 return self._index(req, support_for_access_filters=True) @wsgi.Controller.api_version('2.88') @wsgi.Controller.authorize('update') def update(self, req, id, body): """Update access_level about the given share access rule.""" context = req.environ['manila.context'] if not self.is_valid_body(body, 'update_access'): raise webob.exc.HTTPBadRequest() access_data = body['update_access'] access_level = access_data.get('access_level', None) if not access_level: msg = _("Invalid input. Missing 'access_level' in " "update request.") raise webob.exc.HTTPBadRequest(explanation=msg) if access_level not in constants.ACCESS_LEVELS: msg = _("Invalid or unsupported share access " "level: %s.") % access_level raise webob.exc.HTTPBadRequest(explanation=msg) share_access = self._get_share_access(context, id) if access_level == share_access.access_level: return self._view_builder.view(req, share_access) share = self.share_api.get(context, share_access.share_id) values = { 'access_level': access_level, } access = self.share_api.update_access( context, share, share_access, values) return self._view_builder.view(req, access) def create_resource(): return wsgi.Resource(ShareAccessesController()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/api/v2/share_backups.py0000664000175000017500000002442700000000000020614 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """The Share Backups API.""" import webob from webob import exc from manila.api import common from manila.api.openstack import wsgi from manila.api.views import share_backups as backup_view from manila import db from manila import exception from manila.i18n import _ from manila import policy from manila import share MIN_SUPPORTED_API_VERSION = '2.80' class ShareBackupController(wsgi.Controller, wsgi.AdminActionsMixin): """The Share Backup API controller for the OpenStack API.""" resource_name = 'share_backup' _view_builder_class = backup_view.BackupViewBuilder def __init__(self): super(ShareBackupController, self).__init__() self.share_api = share.API() def _update(self, *args, **kwargs): db.share_backup_update(*args, **kwargs) def _get(self, *args, **kwargs): return db.share_backup_get(*args, **kwargs) @wsgi.Controller.api_version(MIN_SUPPORTED_API_VERSION, experimental=True) def index(self, req): """Return a summary list of backups.""" return self._get_backups(req) @wsgi.Controller.api_version(MIN_SUPPORTED_API_VERSION, experimental=True) def detail(self, req): """Returns a detailed list of backups.""" return self._get_backups(req, is_detail=True) @wsgi.Controller.authorize('get_all') def _get_backups(self, req, is_detail=False): """Returns list of backups.""" context = req.environ['manila.context'] search_opts = {} search_opts.update(req.GET) params = common.get_pagination_params(req) limit, offset = [params.get('limit'), params.get('offset')] search_opts.pop('limit', None) search_opts.pop('offset', None) sort_key, sort_dir = common.get_sort_params(search_opts) key_dict = {"name": "display_name", "description": "display_description"} for key in key_dict: if sort_key == key: sort_key = key_dict[key] if 'name' in search_opts: search_opts['display_name'] = search_opts.pop('name') if 'description' in search_opts: search_opts['display_description'] = search_opts.pop( 'description') # like filter for key, db_key in (('name~', 'display_name~'), ('description~', 'display_description~')): if key in search_opts: search_opts[db_key] = search_opts.pop(key) common.remove_invalid_options(context, search_opts, self._get_backups_search_options()) # Read and remove key 'all_tenants' if was provided search_opts['project_id'] = context.project_id all_tenants = search_opts.pop('all_tenants', search_opts.pop('all_projects', None)) if all_tenants: allowed_to_list_all_tenants = policy.check_policy( context, 'share_backup', 'get_all_project', do_raise=False) if allowed_to_list_all_tenants: search_opts.pop('project_id') share_id = req.params.get('share_id') if share_id: try: self.share_api.get(context, share_id) search_opts.update({'share_id': share_id}) except exception.NotFound: msg = _("No share exists with ID %s.") raise exc.HTTPBadRequest(explanation=msg % share_id) backups = db.share_backups_get_all(context, filters=search_opts, limit=limit, offset=offset, sort_key=sort_key, sort_dir=sort_dir) if is_detail: backups = self._view_builder.detail_list(req, backups) else: backups = self._view_builder.summary_list(req, backups) return backups def _get_backups_search_options(self): """Return share backup search options allowed by non-admin.""" return ('display_name', 'status', 'share_id', 'topic', 'display_name~', 'display_description~', 'display_description') @wsgi.Controller.api_version(MIN_SUPPORTED_API_VERSION, experimental=True) @wsgi.Controller.authorize('get') def show(self, req, id): """Return data about the given backup.""" context = req.environ['manila.context'] try: backup = db.share_backup_get(context, id) except exception.ShareBackupNotFound: msg = _("No backup exists with ID %s.") raise exc.HTTPNotFound(explanation=msg % id) return self._view_builder.detail(req, backup) @wsgi.Controller.api_version(MIN_SUPPORTED_API_VERSION, experimental=True) @wsgi.Controller.authorize @wsgi.response(202) def create(self, req, body): """Add a backup to an existing share.""" context = req.environ['manila.context'] if not self.is_valid_body(body, 'share_backup'): msg = _("Body does not contain 'share_backup' information.") raise exc.HTTPUnprocessableEntity(explanation=msg) backup = body.get('share_backup') share_id = backup.get('share_id') if not share_id: msg = _("'share_id' is missing from the request body.") raise exc.HTTPBadRequest(explanation=msg) try: share = self.share_api.get(context, share_id) except exception.NotFound: msg = _("No share exists with ID %s.") raise exc.HTTPBadRequest(explanation=msg % share_id) if share.get('is_soft_deleted'): msg = _("Backup can not be created for share '%s' " "since it has been soft deleted.") % share_id raise exc.HTTPForbidden(explanation=msg) try: backup = self.share_api.create_share_backup(context, share, backup) except (exception.InvalidBackup, exception.InvalidShare) as e: raise exc.HTTPBadRequest(explanation=e.msg) except exception.ShareBusyException as e: raise exc.HTTPConflict(explanation=e.msg) return self._view_builder.detail(req, backup) @wsgi.Controller.api_version(MIN_SUPPORTED_API_VERSION, experimental=True) @wsgi.Controller.authorize def delete(self, req, id): """Delete a backup.""" context = req.environ['manila.context'] try: backup = db.share_backup_get(context, id) except exception.ShareBackupNotFound: msg = _("No backup exists with ID %s.") raise exc.HTTPNotFound(explanation=msg % id) try: self.share_api.delete_share_backup(context, backup) except exception.InvalidBackup as e: raise exc.HTTPBadRequest(explanation=e.msg) return webob.Response(status_int=202) def _restore(self, req, id, body): """common logic for share backup restore microversion methods""" context = req.environ['manila.context'] try: backup = db.share_backup_get(context, id) except exception.ShareBackupNotFound: msg = _("No backup exists with ID %s.") raise exc.HTTPNotFound(explanation=msg % id) target_share_id = None if body and 'restore' in body: target_share_id = body.get('restore') try: restored = self.share_api.restore_share_backup( context, backup, target_share_id) except (exception.InvalidShare, exception.InvalidBackup) as e: raise exc.HTTPBadRequest(explanation=e.msg) retval = self._view_builder.restore_summary(req, restored) return retval @wsgi.Controller.api_version(MIN_SUPPORTED_API_VERSION, '2.90', experimental=True) @wsgi.action('restore') @wsgi.Controller.authorize @wsgi.response(202) def restore(self, req, id, body): """Restore an existing backup to a source share.""" return self._restore(req, id, None) @wsgi.Controller.api_version('2.91', experimental=True) @wsgi.action('restore') @wsgi.Controller.authorize @wsgi.response(202) def restore(self, req, id, body): # noqa F811 """Restore an existing backup to a source or target share.""" return self._restore(req, id, body) @wsgi.Controller.api_version(MIN_SUPPORTED_API_VERSION, experimental=True) @wsgi.Controller.authorize @wsgi.response(200) def update(self, req, id, body): """Update a backup.""" context = req.environ['manila.context'] if not self.is_valid_body(body, 'share_backup'): msg = _("Body does not contain 'share_backup' information.") raise exc.HTTPUnprocessableEntity(explanation=msg) try: backup = db.share_backup_get(context, id) except exception.ShareBackupNotFound: msg = _("No backup exists with ID %s.") raise exc.HTTPNotFound(explanation=msg % id) backup_update = body.get('share_backup') update_dict = {} if 'name' in backup_update: update_dict['display_name'] = backup_update.pop('name') if 'description' in backup_update: update_dict['display_description'] = ( backup_update.pop('description')) backup = self.share_api.update_share_backup(context, backup, update_dict) return self._view_builder.detail(req, backup) @wsgi.Controller.api_version(MIN_SUPPORTED_API_VERSION, experimental=True) @wsgi.action('reset_status') def backup_reset_status(self, req, id, body): return self._reset_status(req, id, body) def create_resource(): return wsgi.Resource(ShareBackupController()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/api/v2/share_export_locations.py0000664000175000017500000002075500000000000022560 0ustar00zuulzuul00000000000000# Copyright 2015 Mirantis inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_log import log from webob import exc from manila.api.openstack import wsgi from manila.api.v2 import metadata from manila.api.views import export_locations as export_locations_views from manila.db import api as db_api from manila import exception from manila.i18n import _ from manila import policy LOG = log.getLogger(__name__) CONF = cfg.CONF class ShareExportLocationController(wsgi.Controller, metadata.MetadataController): """The Share Export Locations API controller.""" def __init__(self): self._view_builder_class = export_locations_views.ViewBuilder self.resource_name = 'share_export_location' super(ShareExportLocationController, self).__init__() self._conf_admin_only_metadata_keys = getattr( CONF, 'admin_only_el_metadata', [] ) def _verify_share(self, context, share_id): try: share = db_api.share_get(context, share_id) if not share['is_public']: policy.check_policy(context, 'share', 'get', share) except exception.NotFound: msg = _("Share '%s' not found.") % share_id raise exc.HTTPNotFound(explanation=msg) @wsgi.Controller.authorize('index') def _index(self, req, share_id, ignore_secondary_replicas=False): context = req.environ['manila.context'] self._verify_share(context, share_id) kwargs = { 'include_admin_only': context.is_admin, 'ignore_migration_destination': True, 'ignore_secondary_replicas': ignore_secondary_replicas, } export_locations = db_api.export_location_get_all_by_share_id( context, share_id, **kwargs) return self._view_builder.summary_list(req, export_locations) @wsgi.Controller.authorize('show') def _show(self, req, share_id, export_location_uuid, ignore_secondary_replicas=False): context = req.environ['manila.context'] self._verify_share(context, share_id) try: export_location = db_api.export_location_get_by_uuid( context, export_location_uuid, ignore_secondary_replicas=ignore_secondary_replicas) except exception.ExportLocationNotFound: msg = _("Export location '%s' not found.") % export_location_uuid raise exc.HTTPNotFound(explanation=msg) if export_location.is_admin_only and not context.is_admin: raise exc.HTTPForbidden() return self._view_builder.detail(req, export_location) @wsgi.Controller.api_version('2.9', '2.46') def index(self, req, share_id): """Return a list of export locations for share.""" return self._index(req, share_id) @wsgi.Controller.api_version('2.47') # noqa: F811 def index(self, req, share_id): # pylint: disable=function-redefined # noqa F811 """Return a list of export locations for share.""" return self._index(req, share_id, ignore_secondary_replicas=True) @wsgi.Controller.api_version('2.9', '2.46') def show(self, req, share_id, export_location_uuid): """Return data about the requested export location.""" return self._show(req, share_id, export_location_uuid) @wsgi.Controller.api_version('2.47') # noqa: F811 def show(self, req, share_id, # pylint: disable=function-redefined # noqa F811 export_location_uuid): """Return data about the requested export location.""" return self._show(req, share_id, export_location_uuid, ignore_secondary_replicas=True) def _validate_metadata_for_update(self, req, share_export_location, metadata, delete=True): persistent_keys = set(self._conf_admin_only_metadata_keys) context = req.environ['manila.context'] if set(metadata).intersection(persistent_keys): try: policy.check_policy( context, 'share_export_location', 'update_admin_only_metadata') except exception.PolicyNotAuthorized: msg = _("Cannot set or update admin only metadata.") LOG.exception(msg) raise exc.HTTPForbidden(explanation=msg) persistent_keys = [] current_export_metadata = db_api.export_location_metadata_get( context, share_export_location) if delete: _metadata = metadata for key in persistent_keys: if key in current_export_metadata: _metadata[key] = current_export_metadata[key] else: metadata_copy = metadata.copy() for key in persistent_keys: metadata_copy.pop(key, None) _metadata = current_export_metadata.copy() _metadata.update(metadata_copy) return _metadata @wsgi.Controller.api_version("2.87") @wsgi.Controller.authorize("get_metadata") def index_metadata(self, req, share_id, resource_id): """Returns the list of metadata for a given share export location.""" context = req.environ['manila.context'] self._verify_share(context, share_id) return self._index_metadata(req, resource_id) @wsgi.Controller.api_version("2.87") @wsgi.Controller.authorize("update_metadata") def create_metadata(self, req, share_id, resource_id, body): """Create metadata for a given share export location.""" _metadata = self._validate_metadata_for_update(req, resource_id, body['metadata'], delete=False) body['metadata'] = _metadata context = req.environ['manila.context'] self._verify_share(context, share_id) return self._create_metadata(req, resource_id, body) @wsgi.Controller.api_version("2.87") @wsgi.Controller.authorize("update_metadata") def update_all_metadata(self, req, share_id, resource_id, body): """Update entire metadata for a given share export location.""" _metadata = self._validate_metadata_for_update(req, resource_id, body['metadata']) body['metadata'] = _metadata context = req.environ['manila.context'] self._verify_share(context, share_id) return self._update_all_metadata(req, resource_id, body) @wsgi.Controller.api_version("2.87") @wsgi.Controller.authorize("update_metadata") def update_metadata_item(self, req, share_id, resource_id, body, key): """Update metadata item for a given share export location.""" _metadata = self._validate_metadata_for_update(req, resource_id, body['metadata'], delete=False) body['metadata'] = _metadata context = req.environ['manila.context'] self._verify_share(context, share_id) return self._update_metadata_item(req, resource_id, body, key) @wsgi.Controller.api_version("2.87") @wsgi.Controller.authorize("get_metadata") def show_metadata(self, req, share_id, resource_id, key): """Show metadata for a given share export location.""" context = req.environ['manila.context'] self._verify_share(context, share_id) return self._show_metadata(req, resource_id, key) @wsgi.Controller.api_version("2.87") @wsgi.Controller.authorize("delete_metadata") def delete_metadata(self, req, share_id, resource_id, key): """Delete metadata for a given share export location.""" context = req.environ['manila.context'] self._verify_share(context, share_id) return self._delete_metadata(req, resource_id, key) def create_resource(): return wsgi.Resource(ShareExportLocationController()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/api/v2/share_group_snapshots.py0000664000175000017500000002521400000000000022415 0ustar00zuulzuul00000000000000# Copyright 2015 Alex Meade # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from http import client as http_client from oslo_log import log from oslo_utils import uuidutils import webob from webob import exc from manila.api import common from manila.api.openstack import wsgi import manila.api.views.share_group_snapshots as share_group_snapshots_views from manila import db from manila import exception from manila.i18n import _ import manila.share_group.api as share_group_api LOG = log.getLogger(__name__) SG_GRADUATION_VERSION = '2.55' class ShareGroupSnapshotController(wsgi.Controller, wsgi.AdminActionsMixin): """The share group snapshots API controller for the OpenStack API.""" resource_name = 'share_group_snapshot' _view_builder_class = ( share_group_snapshots_views.ShareGroupSnapshotViewBuilder) def __init__(self): super(ShareGroupSnapshotController, self).__init__() self.share_group_api = share_group_api.API() def _get_share_group_snapshot(self, context, sg_snapshot_id): try: return self.share_group_api.get_share_group_snapshot( context, sg_snapshot_id) except exception.NotFound: msg = _("Share group snapshot %s not found.") % sg_snapshot_id raise exc.HTTPNotFound(explanation=msg) @wsgi.Controller.authorize('get') def _show(self, req, id): """Return data about the given share group snapshot.""" context = req.environ['manila.context'] sg_snapshot = self._get_share_group_snapshot(context, id) return self._view_builder.detail(req, sg_snapshot) @wsgi.Controller.api_version('2.31', '2.54', experimental=True) def show(self, req, id): return self._show(req, id) @wsgi.Controller.api_version(SG_GRADUATION_VERSION) # noqa def show(self, req, id): # pylint: disable=function-redefined # noqa F811 return self._show(req, id) @wsgi.Controller.authorize('delete') def _delete_group_snapshot(self, req, id): """Delete a share group snapshot.""" context = req.environ['manila.context'] LOG.info("Delete share group snapshot with id: %s", id, context=context) sg_snapshot = self._get_share_group_snapshot(context, id) try: self.share_group_api.delete_share_group_snapshot( context, sg_snapshot) except exception.InvalidShareGroupSnapshot as e: raise exc.HTTPConflict(explanation=e.msg) return webob.Response(status_int=http_client.ACCEPTED) @wsgi.Controller.api_version('2.31', '2.54', experimental=True) def delete(self, req, id): return self._delete_group_snapshot(req, id) @wsgi.Controller.api_version(SG_GRADUATION_VERSION) # noqa def delete(self, req, id): # pylint: disable=function-redefined # noqa F811 return self._delete_group_snapshot(req, id) @wsgi.Controller.api_version('2.31', '2.54', experimental=True) def index(self, req): """Returns a summary list of share group snapshots.""" return self._get_share_group_snaps(req, is_detail=False) @wsgi.Controller.api_version(SG_GRADUATION_VERSION) # noqa def index(self, req): # pylint: disable=function-redefined # noqa F811 """Returns a summary list of share group snapshots.""" return self._get_share_group_snaps(req, is_detail=False) @wsgi.Controller.api_version('2.31', '2.54', experimental=True) def detail(self, req): """Returns a detailed list of share group snapshots.""" return self._get_share_group_snaps(req, is_detail=True) @wsgi.Controller.api_version(SG_GRADUATION_VERSION) # noqa def detail(self, req): # pylint: disable=function-redefined # noqa F811 """Returns a detailed list of share group snapshots.""" return self._get_share_group_snaps(req, is_detail=True) @wsgi.Controller.authorize('get_all') def _get_share_group_snaps(self, req, is_detail): """Returns a list of share group snapshots.""" context = req.environ['manila.context'] search_opts = {} search_opts.update(req.GET) # Remove keys that are not related to group attrs search_opts.pop('limit', None) search_opts.pop('offset', None) sort_key = search_opts.pop('sort_key', 'created_at') sort_dir = search_opts.pop('sort_dir', 'desc') snaps = self.share_group_api.get_all_share_group_snapshots( context, detailed=is_detail, search_opts=search_opts, sort_dir=sort_dir, sort_key=sort_key) limited_list = common.limited(snaps, req) if is_detail: snaps = self._view_builder.detail_list(req, limited_list) else: snaps = self._view_builder.summary_list(req, limited_list) return snaps @wsgi.Controller.authorize('update') def _update_group_snapshot(self, req, id, body): """Update a share group snapshot.""" context = req.environ['manila.context'] key = 'share_group_snapshot' if not self.is_valid_body(body, key): msg = _("'%s' is missing from the request body.") % key raise exc.HTTPBadRequest(explanation=msg) sg_snapshot_data = body[key] valid_update_keys = { 'name', 'description', } invalid_fields = set(sg_snapshot_data.keys()) - valid_update_keys if invalid_fields: msg = _("The fields %s are invalid or not allowed to be updated.") raise exc.HTTPBadRequest(explanation=msg % invalid_fields) sg_snapshot = self._get_share_group_snapshot(context, id) sg_snapshot = self.share_group_api.update_share_group_snapshot( context, sg_snapshot, sg_snapshot_data) return self._view_builder.detail(req, sg_snapshot) @wsgi.Controller.api_version('2.31', '2.54', experimental=True) def update(self, req, id, body): return self._update_group_snapshot(req, id, body) @wsgi.Controller.api_version(SG_GRADUATION_VERSION) # noqa def update(self, req, id, body): # pylint: disable=function-redefined # noqa F811 return self._update_group_snapshot(req, id, body) @wsgi.Controller.authorize('create') def _create(self, req, body): """Creates a new share group snapshot.""" context = req.environ['manila.context'] if not self.is_valid_body(body, 'share_group_snapshot'): msg = _("'share_group_snapshot' is missing from the request body.") raise exc.HTTPBadRequest(explanation=msg) share_group_snapshot = body.get('share_group_snapshot', {}) share_group_id = share_group_snapshot.get('share_group_id') if not share_group_id: msg = _("Must supply 'share_group_id' attribute.") raise exc.HTTPBadRequest(explanation=msg) if not uuidutils.is_uuid_like(share_group_id): msg = _("The 'share_group_id' attribute must be a uuid.") raise exc.HTTPBadRequest(explanation=msg) kwargs = {"share_group_id": share_group_id} if 'name' in share_group_snapshot: kwargs['name'] = share_group_snapshot.get('name') if 'description' in share_group_snapshot: kwargs['description'] = share_group_snapshot.get('description') try: new_snapshot = self.share_group_api.create_share_group_snapshot( context, **kwargs) except exception.ShareGroupNotFound as e: raise exc.HTTPBadRequest(explanation=e.msg) except exception.InvalidShareGroup as e: raise exc.HTTPConflict(explanation=e.msg) return self._view_builder.detail(req, dict(new_snapshot.items())) @wsgi.Controller.api_version('2.31', '2.54', experimental=True) @wsgi.response(202) def create(self, req, body): return self._create(req, body) @wsgi.Controller.api_version(SG_GRADUATION_VERSION) # noqa @wsgi.response(202) def create(self, req, body): # pylint: disable=function-redefined # noqa F811 return self._create(req, body) @wsgi.Controller.authorize('get') def _members(self, req, id): """Returns a list of share group snapshot members.""" context = req.environ['manila.context'] snaps = self.share_group_api.get_all_share_group_snapshot_members( context, id) limited_list = common.limited(snaps, req) snaps = self._view_builder.member_list(req, limited_list) return snaps @wsgi.Controller.api_version('2.31', '2.54', experimental=True) def members(self, req, id): return self._members(req, id) @wsgi.Controller.api_version(SG_GRADUATION_VERSION) # noqa def members(self, req, id): # pylint: disable=function-redefined # noqa F811 return self._members(req, id) def _update(self, *args, **kwargs): db.share_group_snapshot_update(*args, **kwargs) def _get(self, *args, **kwargs): return self.share_group_api.get_share_group_snapshot(*args, **kwargs) def _delete(self, context, resource, force=True): db.share_group_snapshot_destroy(context.elevated(), resource['id']) @wsgi.Controller.api_version('2.31', '2.54', experimental=True) @wsgi.action('reset_status') def share_group_snapshot_reset_status(self, req, id, body): return self._reset_status(req, id, body) # pylint: disable=function-redefined @wsgi.Controller.api_version(SG_GRADUATION_VERSION) # noqa @wsgi.action('reset_status') def share_group_snapshot_reset_status(self, req, id, body): # noqa F811 return self._reset_status(req, id, body) # pylint: enable=function-redefined @wsgi.Controller.api_version('2.31', '2.54', experimental=True) @wsgi.action('force_delete') def share_group_snapshot_force_delete(self, req, id, body): return self._force_delete(req, id, body) # pylint: disable=function-redefined @wsgi.Controller.api_version(SG_GRADUATION_VERSION) # noqa @wsgi.action('force_delete') def share_group_snapshot_force_delete(self, req, id, body): # noqa F811 return self._force_delete(req, id, body) def create_resource(): return wsgi.Resource(ShareGroupSnapshotController()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/api/v2/share_group_type_specs.py0000664000175000017500000001521000000000000022544 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from http import client as http_client import copy import webob from manila.api import common from manila.api.openstack import wsgi from manila import db from manila import exception from manila.i18n import _ from manila.share_group import share_group_types SG_GRADUATION_VERSION = '2.55' class ShareGroupTypeSpecsController(wsgi.Controller): """The share group type specs API controller for the OpenStack API.""" resource_name = 'share_group_types_spec' def _get_group_specs(self, context, type_id): specs = db.share_group_type_specs_get(context, type_id) return {"group_specs": copy.deepcopy(specs)} def _assert_share_group_type_exists(self, context, type_id): try: share_group_types.get(context, type_id) except exception.NotFound as ex: raise webob.exc.HTTPNotFound(explanation=ex.msg) def _verify_group_specs(self, group_specs): def is_valid_string(v): return isinstance(v, str) and len(v) in range(1, 256) def is_valid_spec(k, v): valid_spec_key = is_valid_string(k) valid_type = is_valid_string(v) or isinstance(v, bool) return valid_spec_key and valid_type for k, v in group_specs.items(): if is_valid_string(k) and isinstance(v, dict): self._verify_group_specs(v) elif not is_valid_spec(k, v): expl = _('Invalid extra_spec: %(key)s: %(value)s') % { 'key': k, 'value': v } raise webob.exc.HTTPBadRequest(explanation=expl) @wsgi.Controller.authorize('index') def _index(self, req, id): """Returns the list of group specs for a given share group type.""" context = req.environ['manila.context'] self._assert_share_group_type_exists(context, id) return self._get_group_specs(context, id) @wsgi.Controller.api_version('2.31', '2.54', experimental=True) def index(self, req, id): return self._index(req, id) @wsgi.Controller.api_version(SG_GRADUATION_VERSION) # noqa def index(self, req, id): # pylint: disable=function-redefined # noqa F811 return self._index(req, id) @wsgi.Controller.authorize('create') def _create(self, req, id, body=None): context = req.environ['manila.context'] if not self.is_valid_body(body, 'group_specs'): raise webob.exc.HTTPBadRequest() self._assert_share_group_type_exists(context, id) specs = body['group_specs'] self._verify_group_specs(specs) self._check_key_names(specs.keys()) db.share_group_type_specs_update_or_create(context, id, specs) return body @wsgi.Controller.api_version('2.31', '2.54', experimental=True) def create(self, req, id, body=None): return self._create(req, id, body) @wsgi.Controller.api_version(SG_GRADUATION_VERSION) # noqa def create(self, req, id, body=None): # pylint: disable=function-redefined # noqa F811 return self._create(req, id, body) @wsgi.Controller.authorize('update') def _update(self, req, id, key, body=None): context = req.environ['manila.context'] if not body: expl = _('Request body empty.') raise webob.exc.HTTPBadRequest(explanation=expl) self._assert_share_group_type_exists(context, id) if key not in body: expl = _('Request body and URI mismatch.') raise webob.exc.HTTPBadRequest(explanation=expl) if len(body) > 1: expl = _('Request body contains too many items.') raise webob.exc.HTTPBadRequest(explanation=expl) self._verify_group_specs(body) db.share_group_type_specs_update_or_create(context, id, body) return body @wsgi.Controller.api_version('2.31', '2.54', experimental=True) def update(self, req, id, key, body=None): return self._update(req, id, key, body) # pylint: disable=function-redefined @wsgi.Controller.api_version(SG_GRADUATION_VERSION) # noqa def update(self, req, id, key, body=None): # noqa F811 return self._update(req, id, key, body) @wsgi.Controller.authorize('show') def _show(self, req, id, key): """Return a single group spec item.""" context = req.environ['manila.context'] self._assert_share_group_type_exists(context, id) specs = self._get_group_specs(context, id) if key in specs['group_specs']: return {key: specs['group_specs'][key]} else: raise webob.exc.HTTPNotFound() # pylint: enable=function-redefined @wsgi.Controller.api_version('2.31', '2.54', experimental=True) def show(self, req, id, key): return self._show(req, id, key) @wsgi.Controller.api_version(SG_GRADUATION_VERSION) # noqa def show(self, req, id, key): # pylint: disable=function-redefined # noqa F811 return self._show(req, id, key) @wsgi.Controller.authorize('delete') def _delete(self, req, id, key): """Deletes an existing group spec.""" context = req.environ['manila.context'] self._assert_share_group_type_exists(context, id) try: db.share_group_type_specs_delete(context, id, key) except exception.ShareGroupTypeSpecsNotFound as error: raise webob.exc.HTTPNotFound(explanation=error.msg) return webob.Response(status_int=http_client.NO_CONTENT) @wsgi.Controller.api_version('2.31', '2.54', experimental=True) def delete(self, req, id, key): return self._delete(req, id, key) @wsgi.Controller.api_version(SG_GRADUATION_VERSION) # noqa def delete(self, req, id, key): # pylint: disable=function-redefined # noqa F811 return self._delete(req, id, key) def _check_key_names(self, keys): if not common.validate_key_names(keys): expl = _('Key names can only contain alphanumeric characters, ' 'underscores, periods, colons and hyphens.') raise webob.exc.HTTPBadRequest(explanation=expl) def create_resource(): return wsgi.Resource(ShareGroupTypeSpecsController()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/api/v2/share_group_types.py0000664000175000017500000003051000000000000021532 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """The group type API controller module.""" import ast from http import client as http_client from oslo_utils import uuidutils import webob from webob import exc from manila.api import common from manila.api.openstack import api_version_request as api_version from manila.api.openstack import wsgi from manila.api.views import share_group_types as views from manila import exception from manila.i18n import _ from manila.share_group import share_group_types SG_GRADUATION_VERSION = '2.55' class ShareGroupTypesController(wsgi.Controller): """The share group types API controller for the OpenStack API.""" resource_name = 'share_group_type' _view_builder_class = views.ShareGroupTypeViewBuilder def _check_body(self, body, action_name): if not self.is_valid_body(body, action_name): raise webob.exc.HTTPBadRequest() access = body[action_name] project = access.get('project') if not uuidutils.is_uuid_like(project): msg = _("Project value (%s) must be in uuid format.") % project raise webob.exc.HTTPBadRequest(explanation=msg) @wsgi.Controller.authorize('index') def _index(self, req): """Returns the list of share group types.""" limited_types = self._get_share_group_types(req) return self._view_builder.index(req, limited_types) @wsgi.Controller.api_version('2.31', '2.54', experimental=True) def index(self, req): return self._index(req) @wsgi.Controller.api_version(SG_GRADUATION_VERSION) # noqa def index(self, req): # pylint: disable=function-redefined # noqa F811 return self._index(req) @wsgi.Controller.authorize('show') def _show(self, req, id): """Return a single share group type item.""" context = req.environ['manila.context'] try: share_group_type = share_group_types.get(context, id) except exception.NotFound: msg = _("Share group type with id %s not found.") raise exc.HTTPNotFound(explanation=msg % id) share_group_type['id'] = str(share_group_type['id']) return self._view_builder.show(req, share_group_type) @wsgi.Controller.api_version('2.31', '2.54', experimental=True) def show(self, req, id): return self._show(req, id) @wsgi.Controller.api_version(SG_GRADUATION_VERSION) # noqa def show(self, req, id): # pylint: disable=function-redefined # noqa F811 return self._show(req, id) @wsgi.Controller.authorize('default') def _default(self, req): """Return default share group type.""" context = req.environ['manila.context'] share_group_type = share_group_types.get_default(context) if not share_group_type: msg = _("Default share group type not found.") raise exc.HTTPNotFound(explanation=msg) share_group_type['id'] = str(share_group_type['id']) return self._view_builder.show(req, share_group_type) @wsgi.Controller.api_version('2.31', '2.54', experimental=True) def default(self, req): return self._default(req) @wsgi.Controller.api_version(SG_GRADUATION_VERSION) # noqa def default(self, req): # pylint: disable=function-redefined # noqa F811 return self._default(req) def _get_share_group_types(self, req): """Helper function that returns a list of share group type dicts.""" filters = {} context = req.environ['manila.context'] if context.is_admin: # Only admin has query access to all group types filters['is_public'] = common.parse_is_public( req.params.get('is_public')) else: filters['is_public'] = True group_specs = req.params.get('group_specs', {}) group_specs_disallowed = (req.api_version_request < api_version.APIVersionRequest("2.66")) if group_specs and group_specs_disallowed: msg = _("Filter by 'group_specs' is not supported by this " "microversion. Use 2.66 or greater microversion to " "be able to use filter search by 'group_specs.") raise webob.exc.HTTPBadRequest(explanation=msg) elif group_specs: filters['group_specs'] = ast.literal_eval(group_specs) limited_types = share_group_types.get_all( context, search_opts=filters).values() return list(limited_types) @wsgi.Controller.authorize('create') def _create(self, req, body): """Creates a new share group type.""" context = req.environ['manila.context'] if not self.is_valid_body(body, 'share_group_type'): raise webob.exc.HTTPBadRequest() share_group_type = body['share_group_type'] name = share_group_type.get('name') specs = share_group_type.get('group_specs', {}) is_public = share_group_type.get('is_public', True) if not share_group_type.get('share_types'): msg = _("Supported share types must be provided.") raise webob.exc.HTTPBadRequest(explanation=msg) share_types = share_group_type.get('share_types') if name is None or name == "" or len(name) > 255: msg = _("Share group type name is not valid.") raise webob.exc.HTTPBadRequest(explanation=msg) if not (specs is None or isinstance(specs, dict)): msg = _("Group specs can be either of 'None' or 'dict' types.") raise webob.exc.HTTPBadRequest(explanation=msg) if specs: for element in list(specs.keys()) + list(specs.values()): if not isinstance(element, str): msg = _("Group specs keys and values should be strings.") raise webob.exc.HTTPBadRequest(explanation=msg) try: share_group_types.create( context, name, share_types, specs, is_public) share_group_type = share_group_types.get_by_name( context, name) except exception.ShareGroupTypeExists as err: raise webob.exc.HTTPConflict(explanation=err.message) except exception.ShareTypeDoesNotExist as err: raise webob.exc.HTTPNotFound(explanation=err.message) except exception.NotFound: raise webob.exc.HTTPNotFound() return self._view_builder.show(req, share_group_type) @wsgi.Controller.api_version('2.31', '2.54', experimental=True) @wsgi.action("create") def create(self, req, body): return self._create(req, body) @wsgi.Controller.api_version(SG_GRADUATION_VERSION) # noqa @wsgi.action("create") def create(self, req, body): # pylint: disable=function-redefined # noqa F811 return self._create(req, body) @wsgi.Controller.authorize('delete') def _delete(self, req, id): """Deletes an existing group type.""" context = req.environ['manila.context'] try: share_group_type = share_group_types.get(context, id) share_group_types.destroy(context, share_group_type['id']) except exception.ShareGroupTypeInUse: msg = _('Target share group type with id %s is still in use.') raise webob.exc.HTTPBadRequest(explanation=msg % id) except exception.NotFound: raise webob.exc.HTTPNotFound() return webob.Response(status_int=http_client.NO_CONTENT) @wsgi.Controller.api_version('2.31', '2.54', experimental=True) @wsgi.action("delete") def delete(self, req, id): return self._delete(req, id) @wsgi.Controller.api_version(SG_GRADUATION_VERSION) # noqa @wsgi.action("delete") def delete(self, req, id): # pylint: disable=function-redefined # noqa F811 return self._delete(req, id) @wsgi.Controller.authorize('list_project_access') def _share_group_type_access(self, req, id): context = req.environ['manila.context'] try: share_group_type = share_group_types.get( context, id, expected_fields=['projects']) except exception.ShareGroupTypeNotFound: explanation = _("Share group type %s not found.") % id raise webob.exc.HTTPNotFound(explanation=explanation) if share_group_type['is_public']: expl = _("Access list not available for public share group types.") raise webob.exc.HTTPNotFound(explanation=expl) projects = [] for project_id in share_group_type['projects']: projects.append( {'share_group_type_id': share_group_type['id'], 'project_id': project_id} ) return {'share_group_type_access': projects} @wsgi.Controller.api_version('2.31', '2.54', experimental=True) def share_group_type_access(self, req, id): return self._share_group_type_access(req, id) # pylint: disable=function-redefined @wsgi.Controller.api_version(SG_GRADUATION_VERSION) # noqa def share_group_type_access(self, req, id): # noqa F811 return self._share_group_type_access(req, id) @wsgi.Controller.authorize('add_project_access') def _add_project_access(self, req, id, body): context = req.environ['manila.context'] self._check_body(body, 'addProjectAccess') project = body['addProjectAccess']['project'] self._assert_non_public_share_group_type(context, id) try: share_group_types.add_share_group_type_access( context, id, project) except exception.ShareGroupTypeAccessExists as err: raise webob.exc.HTTPConflict(explanation=err.message) return webob.Response(status_int=http_client.ACCEPTED) # pylint: enable=function-redefined @wsgi.Controller.api_version('2.31', '2.54', experimental=True) @wsgi.action('addProjectAccess') def add_project_access(self, req, id, body): return self._add_project_access(req, id, body) # pylint: disable=function-redefined @wsgi.Controller.api_version(SG_GRADUATION_VERSION) # noqa @wsgi.action('addProjectAccess') def add_project_access(self, req, id, body): # noqa F811 return self._add_project_access(req, id, body) @wsgi.Controller.authorize('remove_project_access') def _remove_project_access(self, req, id, body): context = req.environ['manila.context'] self._check_body(body, 'removeProjectAccess') project = body['removeProjectAccess']['project'] self._assert_non_public_share_group_type(context, id) try: share_group_types.remove_share_group_type_access( context, id, project) except exception.ShareGroupTypeAccessNotFound as err: raise webob.exc.HTTPNotFound(explanation=err.message) return webob.Response(status_int=http_client.ACCEPTED) # pylint: enable=function-redefined @wsgi.Controller.api_version('2.31', '2.54', experimental=True) @wsgi.action('removeProjectAccess') def remove_project_access(self, req, id, body): return self._remove_project_access(req, id, body) # pylint: disable=function-redefined @wsgi.Controller.api_version(SG_GRADUATION_VERSION) # noqa @wsgi.action('removeProjectAccess') def remove_project_access(self, req, id, body): # noqa F811 return self._remove_project_access(req, id, body) def _assert_non_public_share_group_type(self, context, type_id): try: share_group_type = share_group_types.get( context, type_id) if share_group_type['is_public']: msg = _("Type access modification is not applicable to " "public share group type.") raise webob.exc.HTTPConflict(explanation=msg) except exception.ShareGroupTypeNotFound as err: raise webob.exc.HTTPNotFound(explanation=err.message) def create_resource(): return wsgi.Resource(ShareGroupTypesController()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/api/v2/share_groups.py0000664000175000017500000003371500000000000020503 0ustar00zuulzuul00000000000000# Copyright 2015 Alex Meade # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from http import client as http_client from oslo_log import log from oslo_utils import uuidutils import webob from webob import exc from manila.api import common from manila.api.openstack import api_version_request as api_version from manila.api.openstack import wsgi from manila.api.views import share_groups as share_group_views from manila import db from manila import exception from manila.i18n import _ from manila.share import share_types from manila.share_group import api as share_group_api from manila.share_group import share_group_types LOG = log.getLogger(__name__) SG_GRADUATION_VERSION = '2.55' class ShareGroupController(wsgi.Controller, wsgi.AdminActionsMixin): """The Share Groups API controller for the OpenStack API.""" resource_name = 'share_group' _view_builder_class = share_group_views.ShareGroupViewBuilder def __init__(self): super(ShareGroupController, self).__init__() self.share_group_api = share_group_api.API() def _get_share_group(self, context, share_group_id): try: return self.share_group_api.get(context, share_group_id) except exception.NotFound: msg = _("Share group %s not found.") % share_group_id raise exc.HTTPNotFound(explanation=msg) @wsgi.Controller.authorize('get') def _show(self, req, id): """Return data about the given share group.""" context = req.environ['manila.context'] share_group = self._get_share_group(context, id) return self._view_builder.detail(req, share_group) @wsgi.Controller.api_version('2.31', '2.54', experimental=True) def show(self, req, id): return self._show(req, id) @wsgi.Controller.api_version(SG_GRADUATION_VERSION) # noqa def show(self, req, id): # pylint: disable=function-redefined # noqa F811 return self._show(req, id) @wsgi.Controller.authorize('delete') def _delete_share_group(self, req, id): """Delete a share group.""" context = req.environ['manila.context'] LOG.info("Delete share group with id: %s", id, context=context) share_group = self._get_share_group(context, id) try: self.share_group_api.delete(context, share_group) except exception.InvalidShareGroup as e: raise exc.HTTPConflict(explanation=e.msg) return webob.Response(status_int=http_client.ACCEPTED) @wsgi.Controller.api_version('2.31', '2.54', experimental=True) def delete(self, req, id): return self._delete_share_group(req, id) @wsgi.Controller.api_version(SG_GRADUATION_VERSION) # noqa def delete(self, req, id): # pylint: disable=function-redefined # noqa F811 return self._delete_share_group(req, id) @wsgi.Controller.api_version('2.31', '2.54', experimental=True) def index(self, req): return self._get_share_groups(req, is_detail=False) @wsgi.Controller.api_version(SG_GRADUATION_VERSION) # noqa def index(self, req): # pylint: disable=function-redefined # noqa F811 return self._get_share_groups(req, is_detail=False) @wsgi.Controller.api_version('2.31', '2.54', experimental=True) def detail(self, req): return self._get_share_groups(req, is_detail=True) @wsgi.Controller.api_version(SG_GRADUATION_VERSION) # noqa def detail(self, req): # pylint: disable=function-redefined # noqa F811 return self._get_share_groups(req, is_detail=True) @wsgi.Controller.authorize('get_all') def _get_share_groups(self, req, is_detail): """Returns a summary or detail list of share groups.""" context = req.environ['manila.context'] search_opts = {} search_opts.update(req.GET) # Remove keys that are not related to share group attrs search_opts.pop('limit', None) search_opts.pop('offset', None) sort_key = search_opts.pop('sort_key', 'created_at') sort_dir = search_opts.pop('sort_dir', 'desc') if req.api_version_request < api_version.APIVersionRequest("2.36"): search_opts.pop('name~', None) search_opts.pop('description~', None) if 'group_type_id' in search_opts: search_opts['share_group_type_id'] = search_opts.pop( 'group_type_id') share_groups = self.share_group_api.get_all( context, detailed=is_detail, search_opts=search_opts, sort_dir=sort_dir, sort_key=sort_key, ) limited_list = common.limited(share_groups, req) if is_detail: share_groups = self._view_builder.detail_list(req, limited_list) else: share_groups = self._view_builder.summary_list(req, limited_list) return share_groups @wsgi.Controller.authorize('update') def _update_share_group(self, req, id, body): """Update a share group.""" context = req.environ['manila.context'] if not self.is_valid_body(body, 'share_group'): msg = _("'share_group' is missing from the request body.") raise exc.HTTPBadRequest(explanation=msg) share_group_data = body['share_group'] valid_update_keys = {'name', 'description'} invalid_fields = set(share_group_data.keys()) - valid_update_keys if invalid_fields: msg = _("The fields %s are invalid or not allowed to be updated.") raise exc.HTTPBadRequest(explanation=msg % invalid_fields) share_group = self._get_share_group(context, id) share_group = self.share_group_api.update( context, share_group, share_group_data) return self._view_builder.detail(req, share_group) @wsgi.Controller.api_version('2.31', '2.54', experimental=True) def update(self, req, id, body): return self._update_share_group(req, id, body) @wsgi.Controller.api_version(SG_GRADUATION_VERSION) # noqa def update(self, req, id, body): # pylint: disable=function-redefined # noqa F811 return self._update_share_group(req, id, body) @wsgi.Controller.authorize('create') def _create(self, req, body): """Creates a new share group.""" context = req.environ['manila.context'] if not self.is_valid_body(body, 'share_group'): msg = _("'share_group' is missing from the request body.") raise exc.HTTPBadRequest(explanation=msg) share_group = body['share_group'] valid_fields = { 'name', 'description', 'share_types', 'share_group_type_id', 'source_share_group_snapshot_id', 'share_network_id', 'availability_zone', } invalid_fields = set(share_group.keys()) - valid_fields if invalid_fields: msg = _("The fields %s are invalid.") % invalid_fields raise exc.HTTPBadRequest(explanation=msg) if ('share_types' in share_group and 'source_share_group_snapshot_id' in share_group): msg = _("Cannot supply both 'share_types' and " "'source_share_group_snapshot_id' attributes.") raise exc.HTTPBadRequest(explanation=msg) if not (share_group.get('share_types') or 'source_share_group_snapshot_id' in share_group): default_share_type = share_types.get_default_share_type() if default_share_type: share_group['share_types'] = [default_share_type['id']] else: msg = _("Must specify at least one share type as a default " "share type has not been configured.") raise exc.HTTPBadRequest(explanation=msg) kwargs = {} if 'name' in share_group: kwargs['name'] = share_group.get('name') if 'description' in share_group: kwargs['description'] = share_group.get('description') _share_types = share_group.get('share_types') if _share_types: if not all([uuidutils.is_uuid_like(st) for st in _share_types]): msg = _("The 'share_types' attribute must be a list of uuids") raise exc.HTTPBadRequest(explanation=msg) kwargs['share_type_ids'] = _share_types if ('share_network_id' in share_group and 'source_share_group_snapshot_id' in share_group): msg = _("Cannot supply both 'share_network_id' and " "'source_share_group_snapshot_id' attributes as the share " "network is inherited from the source.") raise exc.HTTPBadRequest(explanation=msg) availability_zone = share_group.get('availability_zone') if availability_zone: if 'source_share_group_snapshot_id' in share_group: msg = _( "Cannot supply both 'availability_zone' and " "'source_share_group_snapshot_id' attributes as the " "availability zone is inherited from the source.") raise exc.HTTPBadRequest(explanation=msg) try: az = db.availability_zone_get(context, availability_zone) kwargs['availability_zone_id'] = az.id kwargs['availability_zone'] = az.name except exception.AvailabilityZoneNotFound as e: raise exc.HTTPNotFound(explanation=e.msg) if 'source_share_group_snapshot_id' in share_group: source_share_group_snapshot_id = share_group.get( 'source_share_group_snapshot_id') if not uuidutils.is_uuid_like(source_share_group_snapshot_id): msg = _("The 'source_share_group_snapshot_id' attribute " "must be a uuid.") raise exc.HTTPBadRequest(explanation=msg) kwargs['source_share_group_snapshot_id'] = ( source_share_group_snapshot_id) elif 'share_network_id' in share_group: share_network_id = share_group.get('share_network_id') if not uuidutils.is_uuid_like(share_network_id): msg = _("The 'share_network_id' attribute must be a uuid.") raise exc.HTTPBadRequest(explanation=msg) kwargs['share_network_id'] = share_network_id if 'share_group_type_id' in share_group: share_group_type_id = share_group.get('share_group_type_id') if not uuidutils.is_uuid_like(share_group_type_id): msg = _("The 'share_group_type_id' attribute must be a uuid.") raise exc.HTTPBadRequest(explanation=msg) kwargs['share_group_type_id'] = share_group_type_id else: # get default def_share_group_type = share_group_types.get_default() if def_share_group_type: kwargs['share_group_type_id'] = def_share_group_type['id'] else: msg = _("Must specify a share group type as a default " "share group type has not been configured.") raise exc.HTTPBadRequest(explanation=msg) try: new_share_group = self.share_group_api.create(context, **kwargs) except exception.InvalidShareGroupSnapshot as e: raise exc.HTTPConflict(explanation=e.msg) except (exception.ShareGroupSnapshotNotFound, exception.InvalidInput) as e: raise exc.HTTPBadRequest(explanation=str(e)) return self._view_builder.detail( req, {k: v for k, v in new_share_group.items()}) @wsgi.Controller.api_version('2.31', '2.54', experimental=True) @wsgi.response(202) def create(self, req, body): return self._create(req, body) @wsgi.Controller.api_version(SG_GRADUATION_VERSION) # noqa @wsgi.response(202) def create(self, req, body): # pylint: disable=function-redefined # noqa F811 return self._create(req, body) def _update(self, *args, **kwargs): db.share_group_update(*args, **kwargs) def _get(self, *args, **kwargs): return self.share_group_api.get(*args, **kwargs) def _delete(self, context, resource, force=True): # Delete all share group snapshots for snap in resource['snapshots']: db.share_group_snapshot_destroy(context, snap['id']) # Delete all shares in share group for share in db.get_all_shares_by_share_group(context, resource['id']): db.share_delete(context, share['id']) db.share_group_destroy(context.elevated(), resource['id']) @wsgi.Controller.api_version('2.31', '2.54', experimental=True) @wsgi.action('reset_status') def share_group_reset_status(self, req, id, body): return self._reset_status(req, id, body) # pylint: disable=function-redefined @wsgi.Controller.api_version(SG_GRADUATION_VERSION) # noqa @wsgi.action('reset_status') def share_group_reset_status(self, req, id, body): # noqa F811 return self._reset_status(req, id, body) # pylint: enable=function-redefined @wsgi.Controller.api_version('2.31', '2.54', experimental=True) @wsgi.action('force_delete') def share_group_force_delete(self, req, id, body): return self._force_delete(req, id, body) # pylint: disable=function-redefined @wsgi.Controller.api_version(SG_GRADUATION_VERSION) # noqa @wsgi.action('force_delete') def share_group_force_delete(self, req, id, body): # noqa F811 return self._force_delete(req, id, body) def create_resource(): return wsgi.Resource(ShareGroupController()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/api/v2/share_instance_export_locations.py0000664000175000017500000000575700000000000024451 0ustar00zuulzuul00000000000000# Copyright 2015 Mirantis inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from webob import exc from manila.api.openstack import wsgi from manila.api.views import export_locations as export_locations_views from manila.db import api as db_api from manila import exception from manila.i18n import _ from manila import policy class ShareInstanceExportLocationController(wsgi.Controller): """The Share Instance Export Locations API controller.""" def __init__(self): self._view_builder_class = export_locations_views.ViewBuilder self.resource_name = 'share_instance_export_location' super(ShareInstanceExportLocationController, self).__init__() def _verify_share_instance(self, context, share_instance_id): try: share_instance = db_api.share_instance_get(context, share_instance_id, with_share_data=True) if not share_instance['is_public']: policy.check_policy(context, 'share_instance', 'show', share_instance) except exception.NotFound: msg = _("Share instance '%s' not found.") % share_instance_id raise exc.HTTPNotFound(explanation=msg) @wsgi.Controller.api_version('2.9') @wsgi.Controller.authorize def index(self, req, share_instance_id): """Return a list of export locations for the share instance.""" context = req.environ['manila.context'] self._verify_share_instance(context, share_instance_id) export_locations = db_api.export_location_get_all_by_share_instance_id( context, share_instance_id) return self._view_builder.summary_list(req, export_locations) @wsgi.Controller.api_version('2.9') @wsgi.Controller.authorize def show(self, req, share_instance_id, export_location_uuid): """Return data about the requested export location.""" context = req.environ['manila.context'] self._verify_share_instance(context, share_instance_id) try: export_location = db_api.export_location_get_by_uuid( context, export_location_uuid) return self._view_builder.detail(req, export_location) except exception.ExportLocationNotFound as e: raise exc.HTTPNotFound(explanation=e.msg) def create_resource(): return wsgi.Resource(ShareInstanceExportLocationController()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/api/v2/share_instances.py0000664000175000017500000001126400000000000021146 0ustar00zuulzuul00000000000000# Copyright 2015 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from webob import exc from manila.api import common from manila.api.openstack import wsgi from manila.api.views import share_instance as instance_view from manila import db from manila import exception from manila import share from manila import utils class ShareInstancesController(wsgi.Controller, wsgi.AdminActionsMixin): """The share instances API controller for the OpenStack API.""" resource_name = 'share_instance' _view_builder_class = instance_view.ViewBuilder def __init__(self): self.share_api = share.API() super(ShareInstancesController, self).__init__() def _get(self, *args, **kwargs): return db.share_instance_get(*args, **kwargs) def _update(self, *args, **kwargs): db.share_instance_update(*args, **kwargs) def _delete(self, *args, **kwargs): return self.share_api.delete_instance(*args, **kwargs) @wsgi.Controller.api_version('2.3', '2.6') @wsgi.action('os-reset_status') def instance_reset_status_legacy(self, req, id, body): return self._reset_status(req, id, body) @wsgi.Controller.api_version('2.7') @wsgi.action('reset_status') def instance_reset_status(self, req, id, body): return self._reset_status(req, id, body) @wsgi.Controller.api_version('2.3', '2.6') @wsgi.action('os-force_delete') def instance_force_delete_legacy(self, req, id, body): return self._force_delete(req, id, body) @wsgi.Controller.api_version('2.7') @wsgi.action('force_delete') def instance_force_delete(self, req, id, body): return self._force_delete(req, id, body) @wsgi.Controller.api_version("2.3", "2.34") # noqa @wsgi.Controller.authorize def index(self, req): # pylint: disable=function-redefined context = req.environ['manila.context'] req.GET.pop('export_location_id', None) req.GET.pop('export_location_path', None) instances = db.share_instance_get_all(context) return self._view_builder.detail_list(req, instances) @wsgi.Controller.api_version("2.35", "2.68") # noqa @wsgi.Controller.authorize def index(self, req): # pylint: disable=function-redefined # noqa F811 context = req.environ['manila.context'] filters = {} filters.update(req.GET) common.remove_invalid_options( context, filters, ('export_location_id', 'export_location_path')) instances = db.share_instance_get_all(context, filters) return self._view_builder.detail_list(req, instances) @wsgi.Controller.api_version("2.69") # noqa @wsgi.Controller.authorize def index(self, req): # pylint: disable=function-redefined # noqa F811 context = req.environ['manila.context'] filters = {} filters.update(req.GET) common.remove_invalid_options( context, filters, ('export_location_id', 'export_location_path', 'is_soft_deleted')) if 'is_soft_deleted' in filters: is_soft_deleted = utils.get_bool_from_api_params( 'is_soft_deleted', filters) filters['is_soft_deleted'] = is_soft_deleted instances = db.share_instance_get_all(context, filters) return self._view_builder.detail_list(req, instances) @wsgi.Controller.api_version("2.3") @wsgi.Controller.authorize def show(self, req, id): context = req.environ['manila.context'] try: instance = db.share_instance_get(context, id) except exception.NotFound: raise exc.HTTPNotFound() return self._view_builder.detail(req, instance) @wsgi.Controller.api_version("2.3") @wsgi.Controller.authorize('index') def get_share_instances(self, req, share_id): context = req.environ['manila.context'] try: share = self.share_api.get(context, share_id) except exception.NotFound: raise exc.HTTPNotFound() view = instance_view.ViewBuilder() return view.detail_list(req, share.instances) def create_resource(): return wsgi.Resource(ShareInstancesController()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/api/v2/share_network_subnets.py0000664000175000017500000003227600000000000022421 0ustar00zuulzuul00000000000000# Copyright 2019 NetApp, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from http import client as http_client from oslo_config import cfg from oslo_db import exception as db_exception from oslo_log import log import webob from webob import exc from manila.api import common as api_common from manila.api.openstack import api_version_request as api_version from manila.api.openstack import wsgi from manila.api.v2 import metadata as metadata_controller from manila.api.views import share_network_subnets as subnet_views from manila.db import api as db_api from manila import exception from manila.i18n import _ from manila.message import api as message_api from manila.message import message_field from manila import share from manila.share import rpcapi as share_rpcapi LOG = log.getLogger(__name__) CONF = cfg.CONF class ShareNetworkSubnetController(wsgi.Controller, metadata_controller.MetadataController): """The Share Network Subnet API controller for the OpenStack API.""" resource_name = 'share_network_subnet' _view_builder_class = subnet_views.ViewBuilder def __init__(self): super(ShareNetworkSubnetController, self).__init__() self.share_rpcapi = share_rpcapi.ShareAPI() self.share_api = share.API() self.message_api = message_api.API() @wsgi.Controller.api_version("2.51") @wsgi.Controller.authorize def index(self, req, share_network_id): """Returns a list of share network subnets.""" context = req.environ['manila.context'] try: share_network = db_api.share_network_get(context, share_network_id) except exception.ShareNetworkNotFound as e: raise exc.HTTPNotFound(explanation=e.msg) return self._view_builder.build_share_network_subnets( req, share_network.get('share_network_subnets')) def _all_share_servers_are_auto_deletable(self, share_network_subnet): return all([ss['is_auto_deletable'] for ss in share_network_subnet['share_servers']]) @wsgi.Controller.api_version('2.51') @wsgi.Controller.authorize def delete(self, req, share_network_id, share_network_subnet_id): """Delete specified share network subnet.""" context = req.environ['manila.context'] try: db_api.share_network_get(context, share_network_id) except exception.ShareNetworkNotFound as e: raise exc.HTTPNotFound(explanation=e.msg) try: share_network_subnet = db_api.share_network_subnet_get( context, share_network_subnet_id) except exception.ShareNetworkSubnetNotFound as e: raise exc.HTTPNotFound(explanation=e.msg) for share_server in share_network_subnet['share_servers'] or []: shares = db_api.share_instance_get_all_by_share_server( context, share_server['id']) if shares: msg = _("Cannot delete share network subnet %(id)s, it has " "one or more shares.") % { 'id': share_network_subnet_id} LOG.error(msg) raise exc.HTTPConflict(explanation=msg) share_groups = db_api.share_group_get_all_by_share_server( context, share_server['id']) if share_groups: msg = _("Cannot delete share network subnet %(id)s, it has " "one or more share groups.") % { 'id': share_network_subnet_id} LOG.error(msg) raise exc.HTTPConflict(explanation=msg) # NOTE(silvacarlose): Do not allow the deletion of any share server # if any of them has the flag is_auto_deletable = False if not self._all_share_servers_are_auto_deletable( share_network_subnet): msg = _("The service cannot determine if there are any " "non-managed shares on the share network subnet %(id)s," "so it cannot be deleted. Please contact the cloud " "administrator to rectify.") % { 'id': share_network_subnet_id} LOG.error(msg) raise exc.HTTPConflict(explanation=msg) for share_server in share_network_subnet['share_servers']: self.share_rpcapi.delete_share_server(context, share_server) db_api.share_network_subnet_delete(context, share_network_subnet_id) return webob.Response(status_int=http_client.ACCEPTED) @wsgi.Controller.api_version("2.51") @wsgi.Controller.authorize def create(self, req, share_network_id, body): """Add a new share network subnet into the share network.""" context = req.environ['manila.context'] if not self.is_valid_body(body, 'share-network-subnet'): msg = _("Share Network Subnet is missing from the request body.") raise exc.HTTPBadRequest(explanation=msg) data = body['share-network-subnet'] if req.api_version_request >= api_version.APIVersionRequest("2.78"): api_common.check_metadata_properties(data.get('metadata')) else: data.pop('metadata', None) data['share_network_id'] = share_network_id multiple_subnet_support = (req.api_version_request >= api_version.APIVersionRequest("2.70")) share_network, existing_subnets = api_common.validate_subnet_create( context, share_network_id, data, multiple_subnet_support) # create subnet operation on subnets with share servers means that an # allocation update is requested. if existing_subnets and existing_subnets[0]['share_servers']: # NOTE(felipe_rodrigues): all subnets have the same set of share # servers, so we can just get the servers from one of them. Not # necessarily all share servers from the specified AZ will be # updated, only the ones created with subnets in the AZ. Others # created with default AZ will only have its allocations updated # when default subnet set is updated. data['share_servers'] = existing_subnets[0]['share_servers'] try: share_network_subnet = ( self.share_api.update_share_server_network_allocations( context, share_network, data)) except exception.ServiceIsDown as e: msg = _('Could not add the share network subnet.') LOG.error(e) raise exc.HTTPInternalServerError(explanation=msg) except exception.InvalidShareNetwork as e: raise exc.HTTPBadRequest(explanation=e.msg) except db_exception.DBError as e: msg = _('Could not add the share network subnet.') LOG.error(e) raise exc.HTTPInternalServerError(explanation=msg) else: try: share_network_subnet = db_api.share_network_subnet_create( context, data) metadata_support = (req.api_version_request >= api_version.APIVersionRequest("2.89")) if metadata_support and data.get('metadata'): context = req.environ['manila.context'] self.share_api.update_share_network_subnet_from_metadata( context, share_network_id, share_network_subnet['id'], data.get('metadata')) except db_exception.DBError as e: msg = _('Could not create the share network subnet.') LOG.error(e) raise exc.HTTPInternalServerError(explanation=msg) share_network_subnet = db_api.share_network_subnet_get( context, share_network_subnet['id']) return self._view_builder.build_share_network_subnet( req, share_network_subnet) @wsgi.Controller.api_version('2.51') @wsgi.Controller.authorize def show(self, req, share_network_id, share_network_subnet_id): """Show share network subnet.""" context = req.environ['manila.context'] try: db_api.share_network_get(context, share_network_id) except exception.ShareNetworkNotFound as e: raise exc.HTTPNotFound(explanation=e.msg) try: share_network_subnet = db_api.share_network_subnet_get( context, share_network_subnet_id) except exception.ShareNetworkSubnetNotFound as e: raise exc.HTTPNotFound(explanation=e.msg) return self._view_builder.build_share_network_subnet( req, share_network_subnet) @wsgi.Controller.api_version("2.78") @wsgi.Controller.authorize("get_metadata") def index_metadata(self, req, share_network_id, resource_id): """Returns the list of metadata for a given share network subnet.""" return self._index_metadata(req, resource_id, parent_id=share_network_id) @wsgi.Controller.api_version("2.78") @wsgi.Controller.authorize("update_metadata") def create_metadata(self, req, share_network_id, resource_id, body): """Create metadata for a given share network subnet.""" metadata = self._create_metadata(req, resource_id, body, parent_id=share_network_id) if req.api_version_request >= api_version.APIVersionRequest("2.89"): context = req.environ['manila.context'] self.share_api.update_share_network_subnet_from_metadata( context, share_network_id, resource_id, metadata.get('metadata')) return metadata @wsgi.Controller.api_version("2.78") @wsgi.Controller.authorize("update_metadata") def update_all_metadata(self, req, share_network_id, resource_id, body): """Update entire metadata for a given share network subnet.""" metadata = self._update_all_metadata(req, resource_id, body, parent_id=share_network_id) if req.api_version_request >= api_version.APIVersionRequest("2.89"): context = req.environ['manila.context'] self.share_api.update_share_network_subnet_from_metadata( context, share_network_id, resource_id, metadata.get('metadata')) return metadata @wsgi.Controller.api_version("2.78") @wsgi.Controller.authorize("update_metadata") def update_metadata_item(self, req, share_network_id, resource_id, body, key): """Update metadata item for a given share network subnet.""" metadata = self._update_metadata_item(req, resource_id, body, key, parent_id=share_network_id) if req.api_version_request >= api_version.APIVersionRequest("2.89"): context = req.environ['manila.context'] self.share_api.update_share_network_subnet_from_metadata( context, share_network_id, resource_id, metadata.get('metadata')) return metadata @wsgi.Controller.api_version("2.78") @wsgi.Controller.authorize("get_metadata") def show_metadata(self, req, share_network_id, resource_id, key): """Show metadata for a given share network subnet.""" return self._show_metadata(req, resource_id, key, parent_id=share_network_id) @wsgi.Controller.api_version("2.78") @wsgi.Controller.authorize("delete_metadata") def delete_metadata(self, req, share_network_id, resource_id, key): """Delete metadata for a given share network subnet.""" if req.api_version_request >= api_version.APIVersionRequest("2.89"): driver_keys = getattr( CONF, 'driver_updatable_subnet_metadata', []) if key in driver_keys: context = req.environ['manila.context'] share_network = db_api.share_network_get( context, share_network_id) self.message_api.create( context, message_field.Action.UPDATE_METADATA, share_network['project_id'], resource_type=message_field.Resource.SHARE_NETWORK_SUBNET, resource_id=resource_id, detail=message_field.Detail.UPDATE_METADATA_NOT_DELETED) return self._delete_metadata(req, resource_id, key, parent_id=share_network_id) def create_resource(): return wsgi.Resource(ShareNetworkSubnetController()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/api/v2/share_networks.py0000664000175000017500000007472000000000000021041 0ustar00zuulzuul00000000000000# Copyright 2014 NetApp # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """The shares api.""" import copy from http import client as http_client from oslo_db import exception as db_exception from oslo_log import log from oslo_utils import timeutils import webob from webob import exc from manila.api import common from manila.api.openstack import api_version_request as api_version from manila.api.openstack import wsgi from manila.api.views import share_networks as share_networks_views from manila.common import constants from manila.db import api as db_api from manila import exception from manila.i18n import _ from manila import policy from manila import quota from manila import share from manila.share import rpcapi as share_rpcapi from manila import utils RESOURCE_NAME = 'share_network' RESOURCES_NAME = 'share_networks' LOG = log.getLogger(__name__) QUOTAS = quota.QUOTAS class ShareNetworkController(wsgi.Controller, wsgi.AdminActionsMixin): """The Share Network API controller for the OpenStack API.""" resource_name = 'share_network' _view_builder_class = share_networks_views.ViewBuilder def __init__(self): super(ShareNetworkController, self).__init__() self.share_rpcapi = share_rpcapi.ShareAPI() self.share_api = share.API() valid_statuses = { 'status': set(constants.SHARE_NETWORK_STATUSES) } def _get(self, *args, **kwargs): return db_api.share_network_get(*args, **kwargs) def show(self, req, id): """Return data about the requested network info.""" context = req.environ['manila.context'] policy.check_policy(context, RESOURCE_NAME, 'show') try: share_network = db_api.share_network_get(context, id) except exception.ShareNetworkNotFound as e: raise exc.HTTPNotFound(explanation=e.msg) return self._view_builder.build_share_network(req, share_network) def _all_share_servers_are_auto_deletable(self, share_network): return all([ss['is_auto_deletable'] for ss in share_network['share_servers']]) def _share_network_contains_subnets(self, share_network): return len(share_network['share_network_subnets']) > 1 def _update(self, *args, **kwargs): db_api.share_network_update(*args, **kwargs) def delete(self, req, id): """Delete specified share network.""" context = req.environ['manila.context'] policy.check_policy(context, RESOURCE_NAME, 'delete') try: share_network = db_api.share_network_get(context, id) except exception.ShareNetworkNotFound as e: raise exc.HTTPNotFound(explanation=e.msg) share_instances = db_api.share_instance_get_all_by_share_network( context, id) if share_instances: msg = _("Can not delete share network %(id)s, it has " "%(len)s share(s).") % {'id': id, 'len': len(share_instances)} LOG.error(msg) raise exc.HTTPConflict(explanation=msg) # NOTE(ameade): Do not allow deletion of share network used by share # group sg_count = db_api.count_share_groups_in_share_network(context, id) if sg_count: msg = _("Can not delete share network %(id)s, it has %(len)s " "share group(s).") % {'id': id, 'len': sg_count} LOG.error(msg) raise exc.HTTPConflict(explanation=msg) # NOTE(silvacarlose): Do not allow the deletion of share networks # if it still contains two or more subnets if self._share_network_contains_subnets(share_network): msg = _("The share network %(id)s has more than one subnet " "attached. Please remove the subnets untill you have one " "or no subnets remaining.") % {'id': id} LOG.error(msg) raise exc.HTTPConflict(explanation=msg) for subnet in share_network['share_network_subnets']: if not self._all_share_servers_are_auto_deletable(subnet): msg = _("The service cannot determine if there are any " "non-managed shares on the share network subnet " "%(id)s, so it cannot be deleted. Please contact the " "cloud administrator to rectify.") % { 'id': subnet['id']} LOG.error(msg) raise exc.HTTPConflict(explanation=msg) for subnet in share_network['share_network_subnets']: for share_server in subnet['share_servers']: self.share_rpcapi.delete_share_server(context, share_server) for security_service in share_network['security_services']: try: db_api.share_network_remove_security_service( context, id, security_service['id']) except Exception: msg = ("Failed to delete security association of network " "{net_id} and security service " "{sec_id}".format(net_id=id, sec_id=security_service['id'])) LOG.exception(msg) db_api.share_network_delete(context, id) try: reservations = QUOTAS.reserve( context, project_id=share_network['project_id'], share_networks=-1, user_id=share_network['user_id']) except Exception: LOG.exception("Failed to update usages deleting " "share-network.") else: QUOTAS.commit(context, reservations, project_id=share_network['project_id'], user_id=share_network['user_id']) return webob.Response(status_int=http_client.ACCEPTED) def _subnet_has_search_opt(self, key, value, network, exact_value=False): for subnet in network.get('share_network_subnets') or []: if subnet.get(key) == value or ( not exact_value and value in subnet.get(key.rstrip('~')) if key.endswith('~') and subnet.get(key.rstrip('~')) else ()): return True return False def _get_share_networks(self, req, is_detail=True): """Returns a list of share networks.""" context = req.environ['manila.context'] search_opts = {} search_opts.update(req.GET) filters = {} # if not context.is_admin, will ignore project_id and all_tenants here, # in database will auto add context.project_id to search_opts. if context.is_admin: if 'project_id' in search_opts: # if specified project_id, will not use all_tenants filters['project_id'] = search_opts['project_id'] elif not utils.is_all_tenants(search_opts): # if not specified project_id and all_tenants, will get # share networks in admin project. filters['project_id'] = context.project_id date_parsing_error_msg = '''%s is not in yyyy-mm-dd format.''' for time_comparison_filter in ['created_since', 'created_before']: if time_comparison_filter in search_opts: time_str = search_opts.get(time_comparison_filter) try: parsed_time = timeutils.parse_strtime(time_str, fmt="%Y-%m-%d") except ValueError: msg = date_parsing_error_msg % time_str raise exc.HTTPBadRequest(explanation=msg) filters[time_comparison_filter] = parsed_time if 'security_service_id' in search_opts: filters['security_service_id'] = search_opts.get( 'security_service_id') networks = db_api.share_network_get_all_by_filter(context, filters=filters) opts_to_remove = [ 'all_tenants', 'created_since', 'created_before', 'limit', 'offset', 'security_service_id', 'project_id' ] for opt in opts_to_remove: search_opts.pop(opt, None) if search_opts: for key, value in search_opts.items(): if key in ['ip_version', 'segmentation_id']: value = int(value) if (req.api_version_request >= api_version.APIVersionRequest("2.36")): networks = [ network for network in networks if network.get(key) == value or self._subnet_has_search_opt(key, value, network) or (value in network.get(key.rstrip('~')) if key.endswith('~') and network.get(key.rstrip('~')) else ())] else: networks = [ network for network in networks if network.get(key) == value or self._subnet_has_search_opt(key, value, network, exact_value=True)] limited_list = common.limited(networks, req) return self._view_builder.build_share_networks( req, limited_list, is_detail) def _share_network_subnets_contain_share_servers(self, share_network): for subnet in share_network['share_network_subnets']: if subnet['share_servers'] and len(subnet['share_servers']) > 0: return True return False def index(self, req): """Returns a summary list of share networks.""" policy.check_policy(req.environ['manila.context'], RESOURCE_NAME, 'index') return self._get_share_networks(req, is_detail=False) def detail(self, req): """Returns a detailed list of share networks.""" policy.check_policy(req.environ['manila.context'], RESOURCE_NAME, 'detail') return self._get_share_networks(req) def update(self, req, id, body): """Update specified share network.""" context = req.environ['manila.context'] policy.check_policy(context, RESOURCE_NAME, 'update') if not body or RESOURCE_NAME not in body: raise exc.HTTPUnprocessableEntity() try: share_network = db_api.share_network_get(context, id) except exception.ShareNetworkNotFound as e: raise exc.HTTPNotFound(explanation=e.msg) update_values = body[RESOURCE_NAME] if 'nova_net_id' in update_values: msg = _("nova networking is not supported starting in Ocata.") raise exc.HTTPBadRequest(explanation=msg) if self._share_network_subnets_contain_share_servers(share_network): for value in update_values: if value not in ['name', 'description']: msg = (_("Cannot update share network %s. It is used by " "share servers. Only 'name' and 'description' " "fields are available for update") % share_network['id']) raise exc.HTTPForbidden(explanation=msg) try: if ('neutron_net_id' in update_values or 'neutron_subnet_id' in update_values): subnets = db_api.share_network_subnet_get_default_subnets( context, id) if not subnets: msg = _("The share network %(id)s does not have a " "'default' subnet that serves all availability " "zones, so subnet details " "('neutron_net_id', 'neutron_subnet_id') cannot " "be updated.") % {'id': id} raise exc.HTTPBadRequest(explanation=msg) if len(subnets) > 1: msg = _("The share network %(id)s does not have an unique " "'default' subnet that serves all availability " "zones, so subnet details " "('neutron_net_id', 'neutron_subnet_id') cannot " "be updated.") % {'id': id} raise exc.HTTPBadRequest(explanation=msg) subnet = subnets[0] # NOTE(silvacarlose): If the default share network subnet have # the fields neutron_net_id and neutron_subnet_id set as None, # we need to make sure that in the update request the user is # passing both parameter since a share network subnet must # have both fields filled or empty. subnet_neutron_net_and_subnet_id_are_empty = ( subnet['neutron_net_id'] is None and subnet['neutron_subnet_id'] is None) update_values_without_neutron_net_or_subnet = ( update_values.get('neutron_net_id') is None or update_values.get('neutron_subnet_id') is None) if (subnet_neutron_net_and_subnet_id_are_empty and update_values_without_neutron_net_or_subnet): msg = _( "To update the share network %(id)s you need to " "specify both 'neutron_net_id' and " "'neutron_subnet_id'.") % {'id': id} raise webob.exc.HTTPBadRequest(explanation=msg) db_api.share_network_subnet_update(context, subnet['id'], update_values) share_network = db_api.share_network_update(context, id, update_values) except db_exception.DBError: msg = "Could not save supplied data due to database error" raise exc.HTTPBadRequest(explanation=msg) return self._view_builder.build_share_network(req, share_network) def create(self, req, body): """Creates a new share network.""" context = req.environ['manila.context'] policy.check_policy(context, RESOURCE_NAME, 'create') if not body or RESOURCE_NAME not in body: raise exc.HTTPUnprocessableEntity() share_network_values = body[RESOURCE_NAME] share_network_subnet_values = copy.deepcopy(share_network_values) share_network_values['project_id'] = context.project_id share_network_values['user_id'] = context.user_id if 'nova_net_id' in share_network_values: msg = _("nova networking is not supported starting in Ocata.") raise exc.HTTPBadRequest(explanation=msg) share_network_values.pop('availability_zone', None) share_network_values.pop('neutron_net_id', None) share_network_values.pop('neutron_subnet_id', None) if req.api_version_request >= api_version.APIVersionRequest("2.51"): if 'availability_zone' in share_network_subnet_values: try: az = db_api.availability_zone_get( context, share_network_subnet_values['availability_zone']) share_network_subnet_values['availability_zone_id'] = ( az['id']) share_network_subnet_values.pop('availability_zone') except exception.AvailabilityZoneNotFound: msg = (_("The provided availability zone %s does not " "exist.") % share_network_subnet_values['availability_zone']) raise exc.HTTPBadRequest(explanation=msg) common.check_net_id_and_subnet_id(share_network_subnet_values) try: reservations = QUOTAS.reserve(context, share_networks=1) except exception.OverQuota as e: overs = e.kwargs['overs'] usages = e.kwargs['usages'] quotas = e.kwargs['quotas'] def _consumed(name): return (usages[name]['reserved'] + usages[name]['in_use']) if 'share_networks' in overs: LOG.warning("Quota exceeded for %(s_pid)s, " "tried to create " "share-network (%(d_consumed)d of %(d_quota)d " "already consumed).", { 's_pid': context.project_id, 'd_consumed': _consumed('share_networks'), 'd_quota': quotas['share_networks']}) raise exception.ShareNetworksLimitExceeded( allowed=quotas['share_networks']) else: # Tries to create the new share network try: share_network = db_api.share_network_create( context, share_network_values) except db_exception.DBError as e: QUOTAS.rollback(context, reservations) LOG.exception(e) msg = "Could not create share network." raise exc.HTTPInternalServerError(explanation=msg) share_network_subnet_values['share_network_id'] = ( share_network['id']) share_network_subnet_values.pop('id', None) # Try to create the share network subnet. If it fails, the service # must rollback the share network creation. try: db_api.share_network_subnet_create( context, share_network_subnet_values) except db_exception.DBError: db_api.share_network_delete(context, share_network['id']) QUOTAS.rollback(context, reservations) msg = _('Could not create share network subnet.') raise exc.HTTPInternalServerError(explanation=msg) QUOTAS.commit(context, reservations) share_network = db_api.share_network_get(context, share_network['id']) return self._view_builder.build_share_network(req, share_network) @wsgi.action("add_security_service") def add_security_service(self, req, id, body): """Associate share network with a given security service.""" context = req.environ['manila.context'] share_network = db_api.share_network_get(context, id) policy.check_policy(context, RESOURCE_NAME, 'add_security_service', target_obj=share_network) try: data = body['add_security_service'] security_service = db_api.security_service_get( context, data['security_service_id']) except KeyError: msg = "Malformed request body" raise exc.HTTPBadRequest(explanation=msg) contain_share_servers = ( self._share_network_subnets_contain_share_servers(share_network)) support_adding_to_in_use_networks = ( req.api_version_request >= api_version.APIVersionRequest("2.63")) if contain_share_servers: if not support_adding_to_in_use_networks: msg = _("Cannot add security services. Share network is used.") raise exc.HTTPForbidden(explanation=msg) try: self.share_api.update_share_network_security_service( context, share_network, security_service) except exception.ServiceIsDown as e: raise exc.HTTPConflict(explanation=e.msg) except exception.InvalidShareNetwork as e: raise exc.HTTPBadRequest(explanation=e.msg) except exception.InvalidSecurityService as e: raise exc.HTTPConflict(explanation=e.msg) try: share_network = db_api.share_network_add_security_service( context, id, data['security_service_id']) except exception.NotFound as e: raise exc.HTTPNotFound(explanation=e.msg) except exception.ShareNetworkSecurityServiceAssociationError as e: raise exc.HTTPBadRequest(explanation=e.msg) return self._view_builder.build_share_network(req, share_network) @wsgi.action('remove_security_service') def remove_security_service(self, req, id, body): """Dissociate share network from a given security service.""" context = req.environ['manila.context'] share_network = db_api.share_network_get(context, id) policy.check_policy(context, RESOURCE_NAME, 'remove_security_service', target_obj=share_network) data = body['remove_security_service'] if self._share_network_subnets_contain_share_servers(share_network): msg = _("Cannot remove security services. Share network is used.") raise exc.HTTPForbidden(explanation=msg) try: share_network = db_api.share_network_remove_security_service( context, id, data['security_service_id']) except KeyError: msg = "Malformed request body" raise exc.HTTPBadRequest(explanation=msg) except exception.NotFound as e: raise exc.HTTPNotFound(explanation=e.msg) except exception.ShareNetworkSecurityServiceDissociationError as e: raise exc.HTTPBadRequest(explanation=e.msg) return self._view_builder.build_share_network(req, share_network) @wsgi.Controller.api_version('2.63') @wsgi.action('update_security_service') @wsgi.response(202) def update_security_service(self, req, id, body): """Update security service parameters from a given share network.""" context = req.environ['manila.context'] share_network = db_api.share_network_get(context, id) policy.check_policy(context, RESOURCE_NAME, 'update_security_service', target_obj=share_network) try: data = body['update_security_service'] current_security_service = db_api.security_service_get( context, data['current_service_id'] ) new_security_service = db_api.security_service_get( context, data['new_service_id'] ) except KeyError: msg = "Malformed request body." raise exc.HTTPBadRequest(explanation=msg) except exception.NotFound: msg = ("The current security service or the new security service " "doesn't exist.") raise exc.HTTPBadRequest(explanation=msg) try: self.share_api.update_share_network_security_service( context, share_network, new_security_service, current_security_service=current_security_service) except exception.ServiceIsDown as e: raise exc.HTTPConflict(explanation=e.msg) except exception.InvalidShareNetwork as e: raise exc.HTTPBadRequest(explanation=e.msg) except exception.InvalidSecurityService as e: raise exc.HTTPConflict(explanation=e.msg) try: share_network = db_api.share_network_update_security_service( context, id, data['current_service_id'], data['new_service_id']) except exception.NotFound as e: raise exc.HTTPNotFound(explanation=e.msg) except (exception.ShareNetworkSecurityServiceDissociationError, exception.ShareNetworkSecurityServiceAssociationError) as e: raise exc.HTTPBadRequest(explanation=e.msg) return self._view_builder.build_share_network(req, share_network) @wsgi.Controller.api_version('2.63') @wsgi.action('update_security_service_check') @wsgi.response(202) def check_update_security_service(self, req, id, body): """Check the feasibility of updating a security service.""" context = req.environ['manila.context'] share_network = db_api.share_network_get(context, id) policy.check_policy(context, RESOURCE_NAME, 'update_security_service_check', target_obj=share_network) try: data = body['update_security_service_check'] current_security_service = db_api.security_service_get( context, data['current_service_id'] ) new_security_service = db_api.security_service_get( context, data['new_service_id'] ) except KeyError: msg = "Malformed request body." raise exc.HTTPBadRequest(explanation=msg) except exception.NotFound: msg = ("The current security service or the new security service " "doesn't exist.") raise exc.HTTPBadRequest(explanation=msg) reset_check = utils.get_bool_from_api_params('reset_operation', data) try: result = ( self.share_api.check_share_network_security_service_update( context, share_network, new_security_service, current_security_service=current_security_service, reset_operation=reset_check)) except exception.ServiceIsDown as e: raise exc.HTTPConflict(explanation=e.msg) except exception.InvalidShareNetwork as e: raise exc.HTTPBadRequest(explanation=e.msg) except exception.InvalidSecurityService as e: raise exc.HTTPConflict(explanation=e.msg) return self._view_builder.build_security_service_update_check( req, data, result) @wsgi.Controller.api_version('2.63') @wsgi.action("add_security_service_check") @wsgi.response(202) def check_add_security_service(self, req, id, body): """Check the feasibility of associate a new security service.""" context = req.environ['manila.context'] share_network = db_api.share_network_get(context, id) policy.check_policy(context, RESOURCE_NAME, 'add_security_service_check', target_obj=share_network) data = body['add_security_service_check'] try: security_service = db_api.security_service_get( context, data['security_service_id'], project_only=True) except KeyError: msg = "Malformed request body." raise exc.HTTPBadRequest(explanation=msg) except exception.NotFound: msg = ("Security service %s doesn't exist." ) % data['security_service_id'] raise exc.HTTPBadRequest(explanation=msg) reset_check = utils.get_bool_from_api_params('reset_operation', data) try: result = ( self.share_api.check_share_network_security_service_update( context, share_network, security_service, reset_operation=reset_check)) except exception.ServiceIsDown as e: raise exc.HTTPConflict(explanation=e.msg) except exception.InvalidShareNetwork as e: raise exc.HTTPBadRequest(explanation=e.msg) except exception.InvalidSecurityService as e: raise exc.HTTPConflict(explanation=e.msg) return self._view_builder.build_security_service_update_check( req, data, result) @wsgi.Controller.api_version('2.70') @wsgi.action('share_network_subnet_create_check') @wsgi.response(202) def share_network_subnet_create_check(self, req, id, body): """Check the feasibility of creating a share network subnet.""" context = req.environ['manila.context'] if not self.is_valid_body(body, 'share_network_subnet_create_check'): msg = _("Share Network Subnet Create Check is missing from " "the request body.") raise exc.HTTPBadRequest(explanation=msg) data = body['share_network_subnet_create_check'] share_network, existing_subnets = common.validate_subnet_create( context, id, data, True) reset_check = utils.get_bool_from_api_params('reset_operation', data) # create subnet operation alongside subnets with share servers means # that an allocation update is requested. if existing_subnets and existing_subnets[0]['share_servers']: # NOTE(felipe_rodrigues): all subnets within the same az have the # same set of share servers, so we can just get the servers from # one of them. Not necessarily all share servers from the specified # AZ will be updated, only the ones created with subnets in the AZ. # Others created with default AZ will only have its allocations # updated when default subnet set is updated. data['share_servers'] = existing_subnets[0]['share_servers'] try: check_result = ( self.share_api. check_update_share_server_network_allocations( context, share_network, data, reset_check)) except exception.ServiceIsDown as e: msg = _("A share network subnet update check cannot be " "performed at this time.") LOG.error(e) raise exc.HTTPInternalServerError(explanation=msg) except exception.InvalidShareNetwork as e: raise exc.HTTPBadRequest(explanation=e.msg) else: check_result = { 'compatible': True, 'hosts_check_result': {} } return self._view_builder.build_share_network_subnet_create_check( req, check_result) @wsgi.Controller.api_version('2.63') @wsgi.action('reset_status') def reset_status(self, req, id, body): return self._reset_status(req, id, body) def create_resource(): return wsgi.Resource(ShareNetworkController()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/api/v2/share_replica_export_locations.py0000664000175000017500000000731600000000000024255 0ustar00zuulzuul00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from webob import exc from manila.api.openstack import wsgi from manila.api.views import export_locations as export_locations_views from manila.db import api as db_api from manila import exception from manila.i18n import _ PRE_GRADUATION_VERSION = '2.55' GRADUATION_VERSION = '2.56' class ShareReplicaExportLocationController(wsgi.Controller): """The Share Instance Export Locations API controller.""" def __init__(self): self._view_builder_class = export_locations_views.ViewBuilder self.resource_name = 'share_replica_export_location' super(ShareReplicaExportLocationController, self).__init__() def _verify_share_replica(self, context, share_replica_id): try: db_api.share_replica_get(context, share_replica_id) except exception.NotFound: msg = _("Share replica '%s' not found.") % share_replica_id raise exc.HTTPNotFound(explanation=msg) @wsgi.Controller.api_version( '2.47', PRE_GRADUATION_VERSION, experimental=True) def index(self, req, share_replica_id): return self._index(req, share_replica_id) # pylint: disable=function-redefined @wsgi.Controller.api_version(GRADUATION_VERSION) # noqa def index(self, req, share_replica_id): # noqa F811 return self._index(req, share_replica_id) # pylint: enable=function-redefined @wsgi.Controller.authorize('index') def _index(self, req, share_replica_id): """Return a list of export locations for the share instance.""" context = req.environ['manila.context'] self._verify_share_replica(context, share_replica_id) export_locations = db_api.export_location_get_all_by_share_instance_id( context, share_replica_id, include_admin_only=context.is_admin) return self._view_builder.summary_list(req, export_locations, replica=True) @wsgi.Controller.api_version( '2.47', PRE_GRADUATION_VERSION, experimental=True) def show(self, req, share_replica_id, export_location_uuid): return self._show(req, share_replica_id, export_location_uuid) # pylint: disable=function-redefined @wsgi.Controller.api_version(GRADUATION_VERSION) # noqa def show(self, req, share_replica_id, export_location_uuid): # noqa F811 return self._show(req, share_replica_id, export_location_uuid) # pylint: enable=function-redefined @wsgi.Controller.authorize('show') def _show(self, req, share_replica_id, export_location_uuid): """Return data about the requested export location.""" context = req.environ['manila.context'] self._verify_share_replica(context, share_replica_id) try: export_location = db_api.export_location_get_by_uuid( context, export_location_uuid) return self._view_builder.detail(req, export_location, replica=True) except exception.ExportLocationNotFound as e: raise exc.HTTPNotFound(explanation=e.msg) def create_resource(): return wsgi.Resource(ShareReplicaExportLocationController()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/api/v2/share_replicas.py0000664000175000017500000003664400000000000020772 0ustar00zuulzuul00000000000000# Copyright 2015 Goutham Pacha Ravi # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """The Share Replication API.""" from http import client as http_client from oslo_utils import strutils import webob from webob import exc from manila.api import common from manila.api.openstack import api_version_request as api_version from manila.api.openstack import wsgi from manila.api.views import share_replicas as replication_view from manila.common import constants from manila import db from manila import exception from manila.i18n import _ from manila import share MIN_SUPPORTED_API_VERSION = '2.11' PRE_GRADUATION_VERSION = '2.55' GRADUATION_VERSION = '2.56' class ShareReplicationController(wsgi.Controller, wsgi.AdminActionsMixin): """The Share Replication API controller for the OpenStack API.""" resource_name = 'share_replica' _view_builder_class = replication_view.ReplicationViewBuilder def __init__(self): super(ShareReplicationController, self).__init__() self.share_api = share.API() def _update(self, *args, **kwargs): db.share_replica_update(*args, **kwargs) def _get(self, *args, **kwargs): return db.share_replica_get(*args, **kwargs) def _delete(self, context, resource, force=True): try: self.share_api.delete_share_replica(context, resource, force=True) except exception.ReplicationException as e: raise exc.HTTPBadRequest(explanation=e.msg) @wsgi.Controller.api_version( MIN_SUPPORTED_API_VERSION, PRE_GRADUATION_VERSION, experimental=True) def index(self, req): """Return a summary list of replicas.""" return self._get_replicas(req) @wsgi.Controller.api_version(GRADUATION_VERSION) # noqa def index(self, req): # pylint: disable=function-redefined # noqa F811 """Return a summary list of replicas.""" return self._get_replicas(req) @wsgi.Controller.api_version( MIN_SUPPORTED_API_VERSION, PRE_GRADUATION_VERSION, experimental=True) def detail(self, req): """Returns a detailed list of replicas.""" return self._get_replicas(req, is_detail=True) @wsgi.Controller.api_version(GRADUATION_VERSION) # noqa def detail(self, req): # pylint: disable=function-redefined # noqa F811 """Returns a detailed list of replicas.""" return self._get_replicas(req, is_detail=True) @wsgi.Controller.authorize('get_all') def _get_replicas(self, req, is_detail=False): """Returns list of replicas.""" context = req.environ['manila.context'] share_id = req.params.get('share_id') if share_id: try: replicas = db.share_replicas_get_all_by_share( context, share_id) except exception.NotFound: msg = _("Share with share ID %s not found.") % share_id raise exc.HTTPNotFound(explanation=msg) else: replicas = db.share_replicas_get_all(context) limited_list = common.limited(replicas, req) if is_detail: replicas = self._view_builder.detail_list(req, limited_list) else: replicas = self._view_builder.summary_list(req, limited_list) return replicas @wsgi.Controller.api_version( MIN_SUPPORTED_API_VERSION, PRE_GRADUATION_VERSION, experimental=True) def show(self, req, id): """Return data about the given replica.""" return self._show(req, id) @wsgi.Controller.api_version(GRADUATION_VERSION) # noqa def show(self, req, id): # pylint: disable=function-redefined # noqa F811 """Return data about the given replica.""" return self._show(req, id) @wsgi.Controller.authorize('show') def _show(self, req, id): """Return data about the given replica.""" context = req.environ['manila.context'] try: replica = db.share_replica_get(context, id) except exception.ShareReplicaNotFound: msg = _("Replica %s not found.") % id raise exc.HTTPNotFound(explanation=msg) return self._view_builder.detail(req, replica) def _validate_body(self, body): if not self.is_valid_body(body, 'share_replica'): msg = _("Body does not contain 'share_replica' information.") raise exc.HTTPUnprocessableEntity(explanation=msg) @wsgi.Controller.api_version( MIN_SUPPORTED_API_VERSION, PRE_GRADUATION_VERSION, experimental=True) @wsgi.response(202) def create(self, req, body): return self._create(req, body) @wsgi.Controller.api_version(GRADUATION_VERSION, "2.66") # noqa @wsgi.response(202) def create(self, req, body): # pylint: disable=function-redefined # noqa F811 return self._create(req, body) @wsgi.Controller.api_version("2.67") # noqa @wsgi.response(202) def create(self, req, body): # pylint: disable=function-redefined # noqa F811 return self._create(req, body, allow_scheduler_hints=True) @wsgi.Controller.authorize('create') def _create(self, req, body, allow_scheduler_hints=False): """Add a replica to an existing share.""" context = req.environ['manila.context'] self._validate_body(body) share_id = body.get('share_replica').get('share_id') availability_zone = body.get('share_replica').get('availability_zone') scheduler_hints = None if allow_scheduler_hints: scheduler_hints = body.get('share_replica').get('scheduler_hints') if not share_id: msg = _("Must provide Share ID to add replica.") raise exc.HTTPBadRequest(explanation=msg) try: share_ref = db.share_get(context, share_id) except exception.NotFound: msg = _("No share exists with ID %s.") raise exc.HTTPNotFound(explanation=msg % share_id) if share_ref.get('is_soft_deleted'): msg = _("Replica cannot be created for share '%s' " "since it has been soft deleted.") % share_id raise exc.HTTPForbidden(explanation=msg) if share_ref.get('encryption_key_ref'): msg = _("Replica cannot be created for share '%s' " "since it is encrypted.") % share_id raise exc.HTTPForbidden(explanation=msg) share_network_id = body.get('share_replica').get('share_network_id') if share_network_id: if req.api_version_request < api_version.APIVersionRequest("2.72"): msg = _("'share_network_id' option is not supported by this " "microversion. Use 2.72 or greater microversion to " "be able to use 'share_network_id'.") raise exc.HTTPBadRequest(explanation=msg) else: share_network_id = share_ref.get('share_network_id', None) try: if share_network_id: share_network = db.share_network_get(context, share_network_id) common.check_share_network_is_active(share_network) except exception.ShareNetworkNotFound: msg = _("No share network exists with ID %s.") raise exc.HTTPBadRequest(explanation=msg % share_network_id) try: new_replica = self.share_api.create_share_replica( context, share_ref, availability_zone=availability_zone, share_network_id=share_network_id, scheduler_hints=scheduler_hints) except exception.AvailabilityZoneNotFound as e: raise exc.HTTPBadRequest(explanation=e.msg) except exception.ReplicationException as e: raise exc.HTTPBadRequest(explanation=e.msg) except exception.ShareBusyException as e: raise exc.HTTPBadRequest(explanation=e.msg) return self._view_builder.detail(req, new_replica) @wsgi.Controller.api_version( MIN_SUPPORTED_API_VERSION, PRE_GRADUATION_VERSION, experimental=True) def delete(self, req, id): return self._delete_share_replica(req, id) @wsgi.Controller.api_version(GRADUATION_VERSION) # noqa def delete(self, req, id): # pylint: disable=function-redefined # noqa F811 return self._delete_share_replica(req, id) @wsgi.Controller.authorize('delete') def _delete_share_replica(self, req, id): """Delete a replica.""" context = req.environ['manila.context'] try: replica = db.share_replica_get(context, id) except exception.ShareReplicaNotFound: msg = _("No replica exists with ID %s.") raise exc.HTTPNotFound(explanation=msg % id) try: self.share_api.delete_share_replica(context, replica) except exception.ReplicationException as e: raise exc.HTTPBadRequest(explanation=e.msg) return webob.Response(status_int=http_client.ACCEPTED) @wsgi.Controller.api_version( MIN_SUPPORTED_API_VERSION, PRE_GRADUATION_VERSION, experimental=True) @wsgi.response(202) @wsgi.action('promote') def promote(self, req, id, body): return self._promote(req, id, body) @wsgi.Controller.api_version(GRADUATION_VERSION, "2.74") # noqa @wsgi.response(202) @wsgi.action('promote') def promote(self, req, id, body): # pylint: disable=function-redefined # noqa F811 return self._promote(req, id, body) @wsgi.Controller.api_version("2.75") # noqa @wsgi.response(202) @wsgi.action('promote') def promote(self, req, id, body): # pylint: disable=function-redefined # noqa F811 return self._promote(req, id, body, allow_quiesce_wait_time=True) @wsgi.Controller.authorize('promote') def _promote(self, req, id, body, allow_quiesce_wait_time=False): """Promote a replica to active state.""" context = req.environ['manila.context'] try: replica = db.share_replica_get(context, id) except exception.ShareReplicaNotFound: msg = _("No replica exists with ID %s.") raise exc.HTTPNotFound(explanation=msg % id) share_network_id = replica.get('share_network_id') if share_network_id: share_network = db.share_network_get(context, share_network_id) common.check_share_network_is_active(share_network) replica_state = replica.get('replica_state') if replica_state == constants.REPLICA_STATE_ACTIVE: return webob.Response(status_int=http_client.OK) quiesce_wait_time = None if allow_quiesce_wait_time: # NOTE(carloss): there is a chance that we receive # {'promote': null}, so we need to prevent that promote_data = body.get('promote', {}) promote_data = {} if promote_data is None else promote_data wait_time = promote_data.get('quiesce_wait_time') if wait_time: if not strutils.is_int_like(wait_time) or int(wait_time) <= 0: msg = _("quiesce_wait_time must be an integer and " "greater than 0.") raise exc.HTTPBadRequest(explanation=msg) else: quiesce_wait_time = int(wait_time) try: replica = self.share_api.promote_share_replica( context, replica, quiesce_wait_time=quiesce_wait_time) except exception.ReplicationException as e: raise exc.HTTPBadRequest(explanation=e.msg) except exception.AdminRequired as e: raise exc.HTTPForbidden(explanation=e.message) return self._view_builder.detail(req, replica) @wsgi.Controller.api_version( MIN_SUPPORTED_API_VERSION, PRE_GRADUATION_VERSION, experimental=True) @wsgi.action('reset_status') def reset_status(self, req, id, body): """Reset the 'status' attribute in the database.""" return self._reset_status(req, id, body) # pylint: disable=function-redefined @wsgi.Controller.api_version(GRADUATION_VERSION) # noqa @wsgi.action('reset_status') def reset_status(self, req, id, body): # noqa F811 """Reset the 'status' attribute in the database.""" return self._reset_status(req, id, body) # pylint: enable=function-redefined @wsgi.Controller.api_version( MIN_SUPPORTED_API_VERSION, PRE_GRADUATION_VERSION, experimental=True) @wsgi.action('force_delete') def force_delete(self, req, id, body): """Force deletion on the database, attempt on the backend.""" return self._force_delete(req, id, body) # pylint: disable=function-redefined @wsgi.Controller.api_version(GRADUATION_VERSION) # noqa @wsgi.action('force_delete') def force_delete(self, req, id, body): # noqa F811 """Force deletion on the database, attempt on the backend.""" return self._force_delete(req, id, body) # pylint: enable=function-redefined @wsgi.Controller.api_version( MIN_SUPPORTED_API_VERSION, PRE_GRADUATION_VERSION, experimental=True) @wsgi.action('reset_replica_state') @wsgi.Controller.authorize def reset_replica_state(self, req, id, body): """Reset the 'replica_state' attribute in the database.""" return self._reset_status(req, id, body, status_attr='replica_state') # pylint: disable=function-redefined @wsgi.Controller.api_version(GRADUATION_VERSION) # noqa @wsgi.action('reset_replica_state') @wsgi.Controller.authorize def reset_replica_state(self, req, id, body): # noqa F811 """Reset the 'replica_state' attribute in the database.""" return self._reset_status(req, id, body, status_attr='replica_state') # pylint: enable=function-redefined @wsgi.Controller.api_version( MIN_SUPPORTED_API_VERSION, PRE_GRADUATION_VERSION, experimental=True) @wsgi.response(202) @wsgi.action('resync') def resync(self, req, id, body): return self._resync(req, id, body) @wsgi.Controller.api_version(GRADUATION_VERSION) # noqa @wsgi.response(202) @wsgi.action('resync') def resync(self, req, id, body): # pylint: disable=function-redefined # noqa F811 return self._resync(req, id, body) @wsgi.Controller.authorize('resync') def _resync(self, req, id, body): """Attempt to update/sync the replica with its source.""" context = req.environ['manila.context'] try: replica = db.share_replica_get(context, id) except exception.ShareReplicaNotFound: msg = _("No replica exists with ID %s.") raise exc.HTTPNotFound(explanation=msg % id) replica_state = replica.get('replica_state') if replica_state == constants.REPLICA_STATE_ACTIVE: return webob.Response(status_int=http_client.OK) try: self.share_api.update_share_replica(context, replica) except exception.InvalidHost as e: raise exc.HTTPBadRequest(explanation=e.msg) def create_resource(): return wsgi.Resource(ShareReplicationController()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/api/v2/share_servers.py0000664000175000017500000004342400000000000020653 0ustar00zuulzuul00000000000000# Copyright 2019 NetApp, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from http import client as http_client from oslo_log import log import webob from webob import exc from manila.api import common from manila.api.openstack import wsgi from manila.api.v1 import share_servers from manila.api.views import share_server_migration as server_migration_views from manila.common import constants from manila.db import api as db_api from manila import exception from manila.i18n import _ from manila.share import utils as share_utils from manila import utils LOG = log.getLogger(__name__) class ShareServerController(share_servers.ShareServerController, wsgi.Controller, wsgi.AdminActionsMixin): """The Share Server API V2 controller for the OpenStack API.""" def __init__(self): super(ShareServerController, self).__init__() self._migration_view_builder = server_migration_views.ViewBuilder() valid_statuses = { 'status': set(constants.SHARE_SERVER_STATUSES), 'task_state': set(constants.SERVER_TASK_STATE_STATUSES), } def _get(self, *args, **kwargs): return db_api.share_server_get(*args, **kwargs) def _update(self, context, id, update): db_api.share_server_update(context, id, update) @wsgi.Controller.api_version('2.49') @wsgi.action('reset_status') def share_server_reset_status(self, req, id, body): return self._reset_status(req, id, body) @wsgi.Controller.authorize('manage_share_server') def _manage(self, req, body): """Manage a share server.""" LOG.debug("Manage Share Server with id: %s", id) context = req.environ['manila.context'] identifier, host, share_network, driver_opts, network_subnet = ( self._validate_manage_share_server_parameters(context, body)) try: result = self.share_api.manage_share_server( context, identifier, host, network_subnet, driver_opts) except exception.InvalidInput as e: raise exc.HTTPBadRequest(explanation=e.msg) except exception.PolicyNotAuthorized as e: raise exc.HTTPForbidden(explanation=e.msg) result.project_id = share_network["project_id"] if share_network['name']: result.share_network_name = share_network['name'] else: result.share_network_name = share_network['id'] return self._view_builder.build_share_server(req, result) @wsgi.Controller.api_version('2.51') @wsgi.response(202) def manage(self, req, body): return self._manage(req, body) @wsgi.Controller.api_version('2.49') # noqa @wsgi.response(202) def manage(self, req, body): # pylint: disable=function-redefined # noqa F811 body.get('share_server', {}).pop('share_network_subnet_id', None) return self._manage(req, body) @wsgi.Controller.authorize('unmanage_share_server') def _unmanage(self, req, id, body=None): context = req.environ['manila.context'] LOG.debug("Unmanage Share Server with id: %s", id) # force's default value is False # force will be True if body is {'unmanage': {'force': True}} force = (body.get('unmanage') or {}).get('force', False) or False try: share_server = db_api.share_server_get( context, id) except exception.ShareServerNotFound as e: raise exc.HTTPNotFound(explanation=e.msg) if len(share_server['share_network_subnets']) > 1: msg = _("Cannot unmanage the share server containing multiple " "subnets.") raise exc.HTTPBadRequest(explanation=msg) if share_server.get('encryption_key_ref'): msg = _("Cannot unmanage the share server containing encryption " "key reference") raise exc.HTTPBadRequest(explanation=msg) share_network_id = share_server['share_network_id'] share_network = db_api.share_network_get(context, share_network_id) common.check_share_network_is_active(share_network) allowed_statuses = [constants.STATUS_ERROR, constants.STATUS_ACTIVE, constants.STATUS_MANAGE_ERROR, constants.STATUS_UNMANAGE_ERROR] if share_server['status'] not in allowed_statuses: data = { 'status': share_server['status'], 'allowed_statuses': ', '.join(allowed_statuses), } msg = _("Share server's actual status is %(status)s, allowed " "statuses for unmanaging are " "%(allowed_statuses)s.") % data raise exc.HTTPBadRequest(explanation=msg) try: self.share_api.unmanage_share_server( context, share_server, force=force) except (exception.ShareServerInUse, exception.PolicyNotAuthorized) as e: raise exc.HTTPBadRequest(explanation=e.msg) return webob.Response(status_int=http_client.ACCEPTED) @wsgi.Controller.api_version("2.49") @wsgi.action('unmanage') def unmanage(self, req, id, body=None): """Unmanage a share server.""" return self._unmanage(req, id, body) def _validate_manage_share_server_parameters(self, context, body): if not (body and self.is_valid_body(body, 'share_server')): msg = _("Share Server entity not found in request body") raise exc.HTTPUnprocessableEntity(explanation=msg) required_parameters = ('host', 'share_network_id', 'identifier') data = body['share_server'] for parameter in required_parameters: if parameter not in data: msg = _("Required parameter %s not found") % parameter raise exc.HTTPBadRequest(explanation=msg) if not data.get(parameter): msg = _("Required parameter %s is empty") % parameter raise exc.HTTPBadRequest(explanation=msg) identifier = data['identifier'] host, share_network_id = data['host'], data['share_network_id'] network_subnet_id = data.get('share_network_subnet_id') if network_subnet_id: try: network_subnets = ( db_api.share_network_subnet_get_all_with_same_az( context, network_subnet_id)) except exception.ShareNetworkSubnetNotFound: msg = _("The share network subnet %s does not " "exist.") % network_subnet_id raise exc.HTTPBadRequest(explanation=msg) else: network_subnets = db_api.share_network_subnet_get_default_subnets( context, share_network_id) if not network_subnets: msg = _("The share network %s does have a default subnet. Create " "one or use a specific subnet to manage this share server " "with API version >= 2.51.") % share_network_id raise exc.HTTPBadRequest(explanation=msg) if len(network_subnets) > 1: msg = _("Cannot manage the share server, since the share network " "subnet %s has more subnets in its availability " "zone and share network.") % network_subnet_id raise exc.HTTPBadRequest(explanation=msg) network_subnet = network_subnets[0] common.check_share_network_is_active(network_subnet['share_network']) if share_utils.extract_host(host, 'pool'): msg = _("Host parameter should not contain pool.") raise exc.HTTPBadRequest(explanation=msg) try: utils.validate_service_host( context, share_utils.extract_host(host)) except exception.ServiceNotFound as e: raise exc.HTTPBadRequest(explanation=e.msg) except exception.PolicyNotAuthorized as e: raise exc.HTTPForbidden(explanation=e.msg) except exception.AdminRequired as e: raise exc.HTTPForbidden(explanation=e.msg) except exception.ServiceIsDown as e: raise exc.HTTPBadRequest(explanation=e.msg) try: share_network = db_api.share_network_get( context, share_network_id) except exception.ShareNetworkNotFound as e: raise exc.HTTPBadRequest(explanation=e.msg) driver_opts = data.get('driver_options') if driver_opts is not None and not isinstance(driver_opts, dict): msg = _("Driver options must be in dictionary format.") raise exc.HTTPBadRequest(explanation=msg) return identifier, host, share_network, driver_opts, network_subnet @wsgi.Controller.api_version('2.57', experimental=True) @wsgi.action("migration_start") @wsgi.Controller.authorize @wsgi.response(http_client.ACCEPTED) def share_server_migration_start(self, req, id, body): """Migrate a share server to the specified host.""" context = req.environ['manila.context'] try: share_server = db_api.share_server_get( context, id) except exception.ShareServerNotFound as e: raise exc.HTTPNotFound(explanation=e.msg) params = body.get('migration_start') if not params: raise exc.HTTPBadRequest(explanation=_("Request is missing body.")) if share_server['encryption_key_ref']: msg = _("Cannot migrate the share server containing encryption " "key reference") raise exc.HTTPBadRequest(explanation=msg) bool_params = ['writable', 'nondisruptive', 'preserve_snapshots'] mandatory_params = bool_params + ['host'] utils.check_params_exist(mandatory_params, params) bool_param_values = utils.check_params_are_boolean(bool_params, params) pool_was_specified = len(params['host'].split('#')) > 1 if pool_was_specified: msg = _('The destination host can not contain pool information.') raise exc.HTTPBadRequest(explanation=msg) new_share_network = None new_share_network_id = params.get('new_share_network_id', None) if new_share_network_id: try: new_share_network = db_api.share_network_get( context, new_share_network_id) except exception.NotFound: msg = _("Share network %s not " "found.") % new_share_network_id raise exc.HTTPBadRequest(explanation=msg) common.check_share_network_is_active(new_share_network) else: share_network_id = ( share_server['share_network_id']) current_share_network = db_api.share_network_get( context, share_network_id) common.check_share_network_is_active(current_share_network) try: self.share_api.share_server_migration_start( context, share_server, params['host'], bool_param_values['writable'], bool_param_values['nondisruptive'], bool_param_values['preserve_snapshots'], new_share_network=new_share_network) except exception.ServiceIsDown as e: # NOTE(dviroel): user should check if the host is healthy raise exc.HTTPBadRequest(explanation=e.msg) except exception.InvalidShareServer as e: # NOTE(dviroel): invalid share server meaning that some internal # resource have a invalid state. raise exc.HTTPConflict(explanation=e.msg) except exception.InvalidInput as e: # User provided controversial parameters in the request raise exc.HTTPBadRequest(explanation=e.msg) @wsgi.Controller.api_version('2.57', experimental=True) @wsgi.action("migration_complete") @wsgi.Controller.authorize def share_server_migration_complete(self, req, id, body): """Invokes 2nd phase of share server migration.""" context = req.environ['manila.context'] try: share_server = db_api.share_server_get( context, id) except exception.ShareServerNotFound as e: raise exc.HTTPNotFound(explanation=e.msg) try: result = self.share_api.share_server_migration_complete( context, share_server) except (exception.InvalidShareServer, exception.ServiceIsDown) as e: raise exc.HTTPBadRequest(explanation=e.msg) return self._migration_view_builder.migration_complete(req, result) @wsgi.Controller.api_version('2.57', experimental=True) @wsgi.action("migration_cancel") @wsgi.Controller.authorize @wsgi.response(http_client.ACCEPTED) def share_server_migration_cancel(self, req, id, body): """Attempts to cancel share migration.""" context = req.environ['manila.context'] try: share_server = db_api.share_server_get( context, id) except exception.ShareServerNotFound as e: raise exc.HTTPNotFound(explanation=e.msg) try: self.share_api.share_server_migration_cancel(context, share_server) except (exception.InvalidShareServer, exception.ServiceIsDown) as e: raise exc.HTTPBadRequest(explanation=e.msg) @wsgi.Controller.api_version('2.57', experimental=True) @wsgi.action("migration_get_progress") @wsgi.Controller.authorize def share_server_migration_get_progress(self, req, id, body): """Retrieve share server migration progress for a given share.""" context = req.environ['manila.context'] try: result = self.share_api.share_server_migration_get_progress( context, id) except exception.ServiceIsDown as e: raise exc.HTTPConflict(explanation=e.msg) except exception.InvalidShareServer as e: raise exc.HTTPBadRequest(explanation=e.msg) return self._migration_view_builder.get_progress(req, result) @wsgi.Controller.api_version('2.57', experimental=True) @wsgi.action("reset_task_state") @wsgi.Controller.authorize def share_server_reset_task_state(self, req, id, body): return self._reset_status(req, id, body, status_attr='task_state') @wsgi.Controller.api_version('2.57', experimental=True) @wsgi.action("migration_check") @wsgi.Controller.authorize def share_server_migration_check(self, req, id, body): """Check if can migrate a share server to the specified host.""" context = req.environ['manila.context'] try: share_server = db_api.share_server_get( context, id) except exception.ShareServerNotFound as e: raise exc.HTTPNotFound(explanation=e.msg) params = body.get('migration_check') if not params: raise exc.HTTPBadRequest(explanation=_("Request is missing body.")) if share_server.get('encryption_key_ref'): msg = _("Cannot migrate the share server containing encryption " "key reference") raise exc.HTTPBadRequest(explanation=msg) bool_params = ['writable', 'nondisruptive', 'preserve_snapshots'] mandatory_params = bool_params + ['host'] utils.check_params_exist(mandatory_params, params) bool_param_values = utils.check_params_are_boolean(bool_params, params) pool_was_specified = len(params['host'].split('#')) > 1 if pool_was_specified: msg = _('The destination host can not contain pool information.') raise exc.HTTPBadRequest(explanation=msg) new_share_network = None new_share_network_id = params.get('new_share_network_id', None) if new_share_network_id: try: new_share_network = db_api.share_network_get( context, new_share_network_id) except exception.NotFound: msg = _("Share network %s not " "found.") % new_share_network_id raise exc.HTTPBadRequest(explanation=msg) common.check_share_network_is_active(new_share_network) else: share_network_id = ( share_server['share_network_id']) current_share_network = db_api.share_network_get( context, share_network_id) common.check_share_network_is_active(current_share_network) try: result = self.share_api.share_server_migration_check( context, share_server, params['host'], bool_param_values['writable'], bool_param_values['nondisruptive'], bool_param_values['preserve_snapshots'], new_share_network=new_share_network) except exception.ServiceIsDown as e: # NOTE(dviroel): user should check if the host is healthy raise exc.HTTPBadRequest(explanation=e.msg) except exception.InvalidShareServer as e: # NOTE(dviroel): invalid share server meaning that some internal # resource have a invalid state. raise exc.HTTPConflict(explanation=e.msg) return self._migration_view_builder.build_check_migration( req, params, result) def create_resource(): return wsgi.Resource(ShareServerController()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/api/v2/share_snapshot_export_locations.py0000664000175000017500000000474100000000000024474 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Hitachi Data Systems # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from webob import exc from manila.api.openstack import wsgi from manila.api.views import share_snapshot_export_locations from manila.db import api as db_api from manila import exception from manila.i18n import _ from manila import policy class ShareSnapshotExportLocationController(wsgi.Controller): def __init__(self): self._view_builder_class = ( share_snapshot_export_locations.ViewBuilder) self.resource_name = 'share_snapshot_export_location' super(ShareSnapshotExportLocationController, self).__init__() @wsgi.Controller.api_version('2.32') @wsgi.Controller.authorize def index(self, req, snapshot_id): context = req.environ['manila.context'] snapshot = self._verify_snapshot(context, snapshot_id) return self._view_builder.list_export_locations( req, snapshot['export_locations']) @wsgi.Controller.api_version('2.32') @wsgi.Controller.authorize def show(self, req, snapshot_id, export_location_id): context = req.environ['manila.context'] self._verify_snapshot(context, snapshot_id) export_location = db_api.share_snapshot_instance_export_location_get( context, export_location_id) return self._view_builder.detail_export_location(req, export_location) def _verify_snapshot(self, context, snapshot_id): try: snapshot = db_api.share_snapshot_get(context, snapshot_id) share = db_api.share_get(context, snapshot['share_id']) if not share['is_public']: policy.check_policy(context, 'share', 'get', share) except exception.NotFound: msg = _("Snapshot '%s' not found.") % snapshot_id raise exc.HTTPNotFound(explanation=msg) return snapshot def create_resource(): return wsgi.Resource(ShareSnapshotExportLocationController()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/api/v2/share_snapshot_instance_export_locations.py0000664000175000017500000000547200000000000026362 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Hitachi Data Systems # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from webob import exc from manila.api.openstack import wsgi from manila.api.views import share_snapshot_export_locations from manila.db import api as db_api from manila import exception from manila.i18n import _ from manila import policy class ShareSnapshotInstanceExportLocationController(wsgi.Controller): def __init__(self): self._view_builder_class = ( share_snapshot_export_locations.ViewBuilder) self.resource_name = 'share_snapshot_instance_export_location' super(ShareSnapshotInstanceExportLocationController, self).__init__() @wsgi.Controller.api_version('2.32') @wsgi.Controller.authorize def index(self, req, snapshot_instance_id): context = req.environ['manila.context'] instance = self._verify_snapshot_instance( context, snapshot_instance_id) export_locations = ( db_api.share_snapshot_instance_export_locations_get_all( context, instance['id'])) return self._view_builder.list_export_locations(req, export_locations) @wsgi.Controller.api_version('2.32') @wsgi.Controller.authorize def show(self, req, snapshot_instance_id, export_location_id): context = req.environ['manila.context'] self._verify_snapshot_instance(context, snapshot_instance_id) export_location = db_api.share_snapshot_instance_export_location_get( context, export_location_id) return self._view_builder.detail_export_location(req, export_location) def _verify_snapshot_instance(self, context, snapshot_instance_id): try: snapshot_instance = db_api.share_snapshot_instance_get( context, snapshot_instance_id) share = db_api.share_get( context, snapshot_instance.share_instance['share_id']) if not share['is_public']: policy.check_policy(context, 'share', 'get', share) except exception.NotFound: msg = _("Snapshot instance '%s' not found.") % snapshot_instance_id raise exc.HTTPNotFound(explanation=msg) return snapshot_instance def create_resource(): return wsgi.Resource(ShareSnapshotInstanceExportLocationController()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/api/v2/share_snapshot_instances.py0000664000175000017500000000623500000000000023067 0ustar00zuulzuul00000000000000# Copyright 2016 Huawei Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from webob import exc from manila.api.openstack import wsgi from manila.api.views import share_snapshot_instances as instance_view from manila import db from manila import exception from manila.i18n import _ from manila import share class ShareSnapshotInstancesController(wsgi.Controller, wsgi.AdminActionsMixin): """The share snapshot instances API controller for the OpenStack API.""" resource_name = 'share_snapshot_instance' _view_builder_class = instance_view.ViewBuilder def __init__(self): self.share_api = share.API() super(ShareSnapshotInstancesController, self).__init__() @wsgi.Controller.api_version('2.19') @wsgi.Controller.authorize def show(self, req, id): context = req.environ['manila.context'] try: snapshot_instance = db.share_snapshot_instance_get( context, id) except exception.ShareSnapshotInstanceNotFound: msg = (_("Snapshot instance %s not found.") % id) raise exc.HTTPNotFound(explanation=msg) return self._view_builder.detail(req, snapshot_instance) @wsgi.Controller.api_version('2.19') @wsgi.Controller.authorize def index(self, req): """Return a summary list of snapshot instances.""" return self._get_instances(req) @wsgi.Controller.api_version('2.19') @wsgi.Controller.authorize def detail(self, req): """Returns a detailed list of snapshot instances.""" return self._get_instances(req, is_detail=True) def _get_instances(self, req, is_detail=False): """Returns list of snapshot instances.""" context = req.environ['manila.context'] snapshot_id = req.params.get('snapshot_id') instances = db.share_snapshot_instance_get_all_with_filters( context, {'snapshot_ids': snapshot_id}) if is_detail: instances = self._view_builder.detail_list(req, instances) else: instances = self._view_builder.summary_list(req, instances) return instances @wsgi.Controller.api_version('2.19') @wsgi.action('reset_status') def reset_status(self, req, id, body): """Reset the 'status' attribute in the database.""" return self._reset_status(req, id, body) def _update(self, *args, **kwargs): db.share_snapshot_instance_update(*args, **kwargs) def _get(self, *args, **kwargs): db.share_snapshot_instance_get(*args, **kwargs) def create_resource(): return wsgi.Resource(ShareSnapshotInstancesController()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/api/v2/share_snapshots.py0000664000175000017500000003630100000000000021200 0ustar00zuulzuul00000000000000# Copyright 2013 NetApp # Copyright 2015 EMC Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """The share snapshots api.""" from http import client as http_client from oslo_log import log import webob from webob import exc from manila.api import common from manila.api.openstack import api_version_request as api_version from manila.api.openstack import wsgi from manila.api.v1 import share_snapshots from manila.api.v2 import metadata from manila.api.views import share_snapshots as snapshot_views from manila.common import constants from manila.db import api as db_api from manila import exception from manila.i18n import _ from manila import share LOG = log.getLogger(__name__) class ShareSnapshotsController(share_snapshots.ShareSnapshotMixin, wsgi.Controller, metadata.MetadataController, wsgi.AdminActionsMixin): """The Share Snapshots API V2 controller for the OpenStack API.""" resource_name = 'share_snapshot' _view_builder_class = snapshot_views.ViewBuilder def __init__(self): super(ShareSnapshotsController, self).__init__() self.share_api = share.API() @wsgi.Controller.authorize('unmanage_snapshot') def _unmanage(self, req, id, body=None, allow_dhss_true=False): """Unmanage a share snapshot.""" context = req.environ['manila.context'] LOG.info("Unmanage share snapshot with id: %s.", id) try: snapshot = self.share_api.get_snapshot(context, id) share = self.share_api.get(context, snapshot['share_id']) if not allow_dhss_true and share.get('share_server_id'): msg = _("Operation 'unmanage_snapshot' is not supported for " "snapshots of shares that are created with share" " servers (created with share-networks).") raise exc.HTTPForbidden(explanation=msg) elif share.get('has_replicas'): msg = _("Share %s has replicas. Snapshots of this share " "cannot currently be unmanaged until all replicas " "are removed.") % share['id'] raise exc.HTTPConflict(explanation=msg) elif snapshot['status'] in constants.TRANSITIONAL_STATUSES: msg = _("Snapshot with transitional state cannot be " "unmanaged. Snapshot '%(s_id)s' is in '%(state)s' " "state.") % {'state': snapshot['status'], 's_id': snapshot['id']} raise exc.HTTPForbidden(explanation=msg) self.share_api.unmanage_snapshot(context, snapshot, share['host']) except (exception.ShareSnapshotNotFound, exception.ShareNotFound) as e: raise exc.HTTPNotFound(explanation=e.msg) return webob.Response(status_int=http_client.ACCEPTED) @wsgi.Controller.authorize('manage_snapshot') def _manage(self, req, body): """Instruct Manila to manage an existing snapshot. Required HTTP Body: .. code-block:: json { "snapshot": { "share_id": , "provider_location": } } Optional elements in 'snapshot' are: name A name for the new snapshot. description A description for the new snapshot. driver_options Driver specific dicts for the existing snapshot. """ context = req.environ['manila.context'] snapshot_data = self._validate_manage_parameters(context, body) # NOTE(vponomaryov): compatibility actions are required between API and # DB layers for 'name' and 'description' API params that are # represented in DB as 'display_name' and 'display_description' # appropriately. name = snapshot_data.get('display_name', snapshot_data.get('name')) description = snapshot_data.get( 'display_description', snapshot_data.get('description')) share_id = snapshot_data['share_id'] snapshot = { 'share_id': share_id, 'provider_location': snapshot_data['provider_location'], 'display_name': name, 'display_description': description, } if req.api_version_request >= api_version.APIVersionRequest("2.73"): if snapshot_data.get('metadata'): metadata = snapshot_data.get('metadata') snapshot.update({ 'metadata': metadata, }) try: share_ref = self.share_api.get(context, share_id) except exception.NotFound: raise exception.ShareNotFound(share_id=share_id) if share_ref.get('is_soft_deleted'): msg = _("Can not manage snapshot for share '%s' " "since it has been soft deleted.") % share_id raise exc.HTTPForbidden(explanation=msg) driver_options = snapshot_data.get('driver_options', {}) try: snapshot_ref = self.share_api.manage_snapshot(context, snapshot, driver_options, share=share_ref) except (exception.ShareNotFound, exception.ShareSnapshotNotFound) as e: raise exc.HTTPNotFound(explanation=e.msg) except (exception.InvalidShare, exception.ManageInvalidShareSnapshot) as e: raise exc.HTTPConflict(explanation=e.msg) return self._view_builder.detail(req, snapshot_ref) def _validate_manage_parameters(self, context, body): if not (body and self.is_valid_body(body, 'snapshot')): msg = _("Snapshot entity not found in request body.") raise exc.HTTPUnprocessableEntity(explanation=msg) data = body['snapshot'] required_parameters = ('share_id', 'provider_location') self._validate_parameters(data, required_parameters) return data def _validate_parameters(self, data, required_parameters, fix_response=False): if fix_response: exc_response = exc.HTTPBadRequest else: exc_response = exc.HTTPUnprocessableEntity for parameter in required_parameters: if parameter not in data: msg = _("Required parameter %s not found.") % parameter raise exc_response(explanation=msg) if not data.get(parameter): msg = _("Required parameter %s is empty.") % parameter raise exc_response(explanation=msg) if not isinstance(data[parameter], str): msg = _("Parameter %s must be a string.") % parameter raise exc_response(explanation=msg) def _check_if_share_share_network_is_active(self, context, snapshot): share_network_id = snapshot['share'].get('share_network_id') if share_network_id: share_network = db_api.share_network_get( context, share_network_id) common.check_share_network_is_active(share_network) def _allow(self, req, id, body, enable_ipv6=False): context = req.environ['manila.context'] if not (body and self.is_valid_body(body, 'allow_access')): msg = _("Access data not found in request body.") raise exc.HTTPBadRequest(explanation=msg) access_data = body.get('allow_access') required_parameters = ('access_type', 'access_to') self._validate_parameters(access_data, required_parameters, fix_response=True) access_type = access_data['access_type'] access_to = access_data['access_to'] common.validate_access(access_type=access_type, access_to=access_to, enable_ipv6=enable_ipv6) snapshot = self.share_api.get_snapshot(context, id) self._check_if_share_share_network_is_active(context, snapshot) self._check_mount_snapshot_support(context, snapshot) try: access = self.share_api.snapshot_allow_access( context, snapshot, access_type, access_to) except exception.ShareSnapshotAccessExists as e: raise webob.exc.HTTPBadRequest(explanation=e.msg) return self._view_builder.detail_access(req, access) def _deny(self, req, id, body): context = req.environ['manila.context'] if not (body and self.is_valid_body(body, 'deny_access')): msg = _("Access data not found in request body.") raise exc.HTTPBadRequest(explanation=msg) access_data = body.get('deny_access') self._validate_parameters( access_data, ('access_id',), fix_response=True) access_id = access_data['access_id'] snapshot = self.share_api.get_snapshot(context, id) self._check_mount_snapshot_support(context, snapshot) self._check_if_share_share_network_is_active(context, snapshot) access = self.share_api.snapshot_access_get(context, access_id) if access['share_snapshot_id'] != snapshot['id']: msg = _("Access rule provided is not associated with given" " snapshot.") raise webob.exc.HTTPBadRequest(explanation=msg) self.share_api.snapshot_deny_access(context, snapshot, access) return webob.Response(status_int=http_client.ACCEPTED) def _check_mount_snapshot_support(self, context, snapshot): share = self.share_api.get(context, snapshot['share_id']) if not share['mount_snapshot_support']: msg = _("Cannot control access to the snapshot %(snap)s since the " "parent share %(share)s does not support mounting its " "snapshots.") % {'snap': snapshot['id'], 'share': share['id']} raise exc.HTTPBadRequest(explanation=msg) def _access_list(self, req, snapshot_id): context = req.environ['manila.context'] snapshot = self.share_api.get_snapshot(context, snapshot_id) self._check_mount_snapshot_support(context, snapshot) access_list = self.share_api.snapshot_access_get_all(context, snapshot) return self._view_builder.detail_list_access(req, access_list) @wsgi.Controller.api_version('2.0', '2.6') @wsgi.action('os-reset_status') def snapshot_reset_status_legacy(self, req, id, body): return self._reset_status(req, id, body) @wsgi.Controller.api_version('2.7') @wsgi.action('reset_status') def snapshot_reset_status(self, req, id, body): return self._reset_status(req, id, body) @wsgi.Controller.api_version('2.0', '2.6') @wsgi.action('os-force_delete') def snapshot_force_delete_legacy(self, req, id, body): return self._force_delete(req, id, body) @wsgi.Controller.api_version('2.7') @wsgi.action('force_delete') def snapshot_force_delete(self, req, id, body): return self._force_delete(req, id, body) @wsgi.Controller.api_version('2.12') @wsgi.response(202) def manage(self, req, body): return self._manage(req, body) @wsgi.Controller.api_version('2.12', '2.48') @wsgi.action('unmanage') def unmanage(self, req, id, body=None): return self._unmanage(req, id, body) @wsgi.Controller.api_version('2.49') # noqa @wsgi.action('unmanage') def unmanage(self, req, id, # pylint: disable=function-redefined # noqa F811 body=None): return self._unmanage(req, id, body, allow_dhss_true=True) @wsgi.Controller.api_version('2.32') @wsgi.action('allow_access') @wsgi.response(202) @wsgi.Controller.authorize def allow_access(self, req, id, body=None): enable_ipv6 = False if req.api_version_request >= api_version.APIVersionRequest("2.38"): enable_ipv6 = True return self._allow(req, id, body, enable_ipv6) @wsgi.Controller.api_version('2.32') @wsgi.action('deny_access') @wsgi.Controller.authorize def deny_access(self, req, id, body=None): return self._deny(req, id, body) @wsgi.Controller.api_version('2.32') @wsgi.Controller.authorize def access_list(self, req, snapshot_id): return self._access_list(req, snapshot_id) @wsgi.Controller.api_version("2.0") def index(self, req): """Returns a summary list of shares.""" if req.api_version_request < api_version.APIVersionRequest("2.36"): req.GET.pop('name~', None) req.GET.pop('description~', None) req.GET.pop('description', None) if req.api_version_request < api_version.APIVersionRequest("2.79"): req.GET.pop('with_count', None) return self._get_snapshots(req, is_detail=False) @wsgi.Controller.api_version("2.0") def detail(self, req): """Returns a detailed list of shares.""" if req.api_version_request < api_version.APIVersionRequest("2.36"): req.GET.pop('name~', None) req.GET.pop('description~', None) req.GET.pop('description', None) return self._get_snapshots(req, is_detail=True) @wsgi.Controller.api_version("2.73") @wsgi.Controller.authorize("get_metadata") def index_metadata(self, req, resource_id): """Returns the list of metadata for a given share snapshot.""" return self._index_metadata(req, resource_id) @wsgi.Controller.api_version("2.73") @wsgi.Controller.authorize("update_metadata") def create_metadata(self, req, resource_id, body): return self._create_metadata(req, resource_id, body) @wsgi.Controller.api_version("2.73") @wsgi.Controller.authorize("update_metadata") def update_all_metadata(self, req, resource_id, body): return self._update_all_metadata(req, resource_id, body) @wsgi.Controller.api_version("2.73") @wsgi.Controller.authorize("update_metadata") def update_metadata_item(self, req, resource_id, body, key): return self._update_metadata_item(req, resource_id, body, key) @wsgi.Controller.api_version("2.73") @wsgi.Controller.authorize("get_metadata") def show_metadata(self, req, resource_id, key): return self._show_metadata(req, resource_id, key) @wsgi.Controller.api_version("2.73") @wsgi.Controller.authorize("delete_metadata") def delete_metadata(self, req, resource_id, key): return self._delete_metadata(req, resource_id, key) def create_resource(): return wsgi.Resource(ShareSnapshotsController()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/api/v2/share_transfer.py0000664000175000017500000001724500000000000021010 0ustar00zuulzuul00000000000000# Copyright (c) 2022 China Telecom Digital Intelligence. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """The share transfer api.""" from http import client as http_client from oslo_log import log as logging from oslo_utils import strutils from oslo_utils import uuidutils import webob from webob import exc from manila.api import common from manila.api.openstack import wsgi from manila.api.views import transfers as transfer_view from manila import exception from manila.i18n import _ from manila.transfer import api as transfer_api LOG = logging.getLogger(__name__) SHARE_TRANSFER_VERSION = "2.77" class ShareTransferController(wsgi.Controller): """The Share Transfer API controller for the OpenStack API.""" resource_name = 'share_transfer' _view_builder_class = transfer_view.ViewBuilder def __init__(self): self.transfer_api = transfer_api.API() super(ShareTransferController, self).__init__() @wsgi.Controller.authorize('get') @wsgi.Controller.api_version(SHARE_TRANSFER_VERSION) def show(self, req, id): """Return data about active transfers.""" context = req.environ['manila.context'] # Not found exception will be handled at the wsgi level transfer = self.transfer_api.get(context, transfer_id=id) return self._view_builder.detail(req, transfer) @wsgi.Controller.api_version(SHARE_TRANSFER_VERSION) def index(self, req): """Returns a summary list of transfers.""" return self._get_transfers(req, is_detail=False) @wsgi.Controller.api_version(SHARE_TRANSFER_VERSION) def detail(self, req): """Returns a detailed list of transfers.""" return self._get_transfers(req, is_detail=True) @wsgi.Controller.authorize('get_all') def _get_transfers(self, req, is_detail): """Returns a list of transfers, transformed through view builder.""" context = req.environ['manila.context'] params = req.params.copy() pagination_params = common.get_pagination_params(req) limit, offset = [pagination_params.pop('limit', None), pagination_params.pop('offset', None)] sort_key, sort_dir = common.get_sort_params(params) filters = params key_map = {'name': 'display_name', 'name~': 'display_name~'} for k in key_map: if k in filters: filters[key_map[k]] = filters.pop(k) LOG.debug('Listing share transfers.') transfers = self.transfer_api.get_all(context, limit=limit, sort_key=sort_key, sort_dir=sort_dir, filters=filters, offset=offset) if is_detail: transfers = self._view_builder.detail_list(req, transfers) else: transfers = self._view_builder.summary_list(req, transfers) return transfers @wsgi.response(http_client.ACCEPTED) @wsgi.Controller.api_version(SHARE_TRANSFER_VERSION) @wsgi.Controller.authorize('create') def create(self, req, body): """Create a new share transfer.""" LOG.debug('Creating new share transfer %s', body) context = req.environ['manila.context'] if not self.is_valid_body(body, 'transfer'): msg = _("'transfer' is missing from the request body.") raise exc.HTTPBadRequest(explanation=msg) transfer = body.get('transfer', {}) share_id = transfer.get('share_id') if not share_id: msg = _("Must supply 'share_id' attribute.") raise exc.HTTPBadRequest(explanation=msg) if not uuidutils.is_uuid_like(share_id): msg = _("The 'share_id' attribute must be a uuid.") raise exc.HTTPBadRequest(explanation=msg) transfer_name = transfer.get('name') if transfer_name is not None: transfer_name = transfer_name.strip() LOG.debug("Creating transfer of share %s", share_id) try: new_transfer = self.transfer_api.create(context, share_id, transfer_name) except exception.Invalid as error: raise exc.HTTPBadRequest(explanation=error.msg) transfer = self._view_builder.create(req, dict(new_transfer)) return transfer @wsgi.response(http_client.ACCEPTED) @wsgi.Controller.api_version(SHARE_TRANSFER_VERSION) @wsgi.Controller.authorize('accept') def accept(self, req, id, body): """Accept a new share transfer.""" transfer_id = id LOG.debug('Accepting share transfer %s', transfer_id) context = req.environ['manila.context'] if not self.is_valid_body(body, 'accept'): msg = _("'accept' is missing from the request body.") raise exc.HTTPBadRequest(explanation=msg) accept = body.get('accept', {}) auth_key = accept.get('auth_key') if not auth_key: msg = _("Must supply 'auth_key' while accepting a " "share transfer.") raise exc.HTTPBadRequest(explanation=msg) clear_rules = accept.get('clear_access_rules', False) if clear_rules: try: clear_rules = strutils.bool_from_string(clear_rules, strict=True) except (ValueError, TypeError): msg = (_('Invalid boolean clear_access_rules : %(value)s') % {'value': accept['clear_access_rules']}) raise exc.HTTPBadRequest(explanation=msg) LOG.debug("Accepting transfer %s", transfer_id) try: self.transfer_api.accept( context, transfer_id, auth_key, clear_rules=clear_rules) except (exception.ShareSizeExceedsLimit, exception.ShareLimitExceeded, exception.ShareSizeExceedsAvailableQuota, exception.ShareReplicasLimitExceeded, exception.ShareReplicaSizeExceedsAvailableQuota, exception.SnapshotSizeExceedsAvailableQuota, exception.SnapshotLimitExceeded) as e: raise exc.HTTPRequestEntityTooLarge(explanation=e.msg, headers={'Retry-After': '0'}) except (exception.InvalidShare, exception.InvalidSnapshot, exception.InvalidAuthKey, exception.TransferNotFound) as error: raise exc.HTTPBadRequest(explanation=error.msg) @wsgi.Controller.api_version(SHARE_TRANSFER_VERSION) @wsgi.Controller.authorize('delete') def delete(self, req, id): """Delete a transfer.""" context = req.environ['manila.context'] LOG.debug("Delete transfer with id: %s", id) # Not found exception will be handled at the wsgi level self.transfer_api.delete(context, transfer_id=id) return webob.Response(status_int=http_client.OK) def create_resource(): return wsgi.Resource(ShareTransferController()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/api/v2/share_types.py0000664000175000017500000003747600000000000020340 0ustar00zuulzuul00000000000000# Copyright (c) 2011 OpenStack Foundation # Copyright (c) 2014 NetApp, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """The share type API controller module..""" import ast from http import client as http_client from oslo_log import log from oslo_utils import strutils from oslo_utils import uuidutils import webob from webob import exc from manila.api import common from manila.api.openstack import api_version_request as api_version from manila.api.openstack import wsgi from manila.api.views import types as views_types from manila.common import constants from manila import exception from manila.i18n import _ from manila import rpc from manila.share import share_types LOG = log.getLogger(__name__) class ShareTypesController(wsgi.Controller): """The share types API controller for the OpenStack API.""" resource_name = 'share_type' _view_builder_class = views_types.ViewBuilder def __getattr__(self, key): if key == 'os-share-type-access': return self.share_type_access return super(ShareTypesController, self).__getattribute__(key) def _notify_share_type_error(self, context, method, payload): rpc.get_notifier('shareType').error(context, method, payload) def _notify_share_type_info(self, context, method, share_type): payload = dict(share_types=share_type) rpc.get_notifier('shareType').info(context, method, payload) def _check_body(self, body, action_name): if not self.is_valid_body(body, action_name): raise webob.exc.HTTPBadRequest() access = body[action_name] project = access.get('project') if not uuidutils.is_uuid_like(project): msg = _("Bad project format: " "project is not in proper format (%s)") % project raise webob.exc.HTTPBadRequest(explanation=msg) @wsgi.Controller.authorize def index(self, req): """Returns the list of share types.""" limited_types = self._get_share_types(req) req.cache_db_share_types(limited_types) return self._view_builder.index(req, limited_types) @wsgi.Controller.authorize def show(self, req, id): """Return a single share type item.""" context = req.environ['manila.context'] try: share_type = self._show_share_type_details(context, id) except exception.NotFound: msg = _("Share type not found.") raise exc.HTTPNotFound(explanation=msg) req.cache_db_share_type(share_type) return self._view_builder.show(req, share_type) def _show_share_type_details(self, context, id): share_type = share_types.get_share_type(context, id) required_extra_specs = {} try: required_extra_specs = share_types.get_valid_required_extra_specs( share_type['extra_specs']) except exception.InvalidExtraSpec: LOG.exception('Share type %(share_type_id)s has invalid required' ' extra specs.', {'share_type_id': id}) share_type['required_extra_specs'] = required_extra_specs return share_type @wsgi.Controller.authorize def default(self, req): """Return default volume type.""" context = req.environ['manila.context'] try: share_type = share_types.get_default_share_type(context) except exception.NotFound: msg = _("Share type not found") raise exc.HTTPNotFound(explanation=msg) if not share_type: msg = _("Default share type not found") raise exc.HTTPNotFound(explanation=msg) return self._view_builder.show(req, share_type) def _get_share_types(self, req): """Helper function that returns a list of type dicts.""" filters = {} context = req.environ['manila.context'] if context.is_admin: # Only admin has query access to all share types filters['is_public'] = common.parse_is_public( req.params.get('is_public')) else: filters['is_public'] = True extra_specs = req.params.get('extra_specs', {}) extra_specs_disallowed = (req.api_version_request < api_version.APIVersionRequest("2.43")) if extra_specs and extra_specs_disallowed: msg = _("Filter by 'extra_specs' is not supported by this " "microversion. Use 2.43 or greater microversion to " "be able to use filter search by 'extra_specs.") raise webob.exc.HTTPBadRequest(explanation=msg) elif extra_specs: extra_specs = ast.literal_eval(extra_specs) filters['extra_specs'] = share_types.sanitize_extra_specs( extra_specs) limited_types = share_types.get_all_types( context, search_opts=filters).values() return list(limited_types) @wsgi.Controller.api_version("1.0", "2.23") @wsgi.action("create") def create(self, req, body): return self._create(req, body, set_defaults=True) @wsgi.Controller.api_version("2.24") # noqa @wsgi.action("create") def create(self, req, body): # pylint: disable=function-redefined # noqa F811 return self._create(req, body, set_defaults=False) @wsgi.Controller.authorize('create') def _create(self, req, body, set_defaults=False): """Creates a new share type.""" context = req.environ['manila.context'] if (not self.is_valid_body(body, 'share_type') and not self.is_valid_body(body, 'volume_type')): raise webob.exc.HTTPBadRequest() elif self.is_valid_body(body, 'share_type'): share_type = body['share_type'] else: share_type = body['volume_type'] name = share_type.get('name') specs = share_type.get('extra_specs', {}) description = share_type.get('description') if (description and req.api_version_request < api_version.APIVersionRequest("2.41")): msg = _("'description' key is not supported by this " "microversion. Use 2.41 or greater microversion " "to be able to use 'description' in share type.") raise webob.exc.HTTPBadRequest(explanation=msg) is_public = share_type.get( 'os-share-type-access:is_public', share_type.get('share_type_access:is_public', True), ) if (name is None or name == "" or len(name) > 255 or (description and len(description) > 255)): msg = _("Type name or description is not valid.") raise webob.exc.HTTPBadRequest(explanation=msg) # Note(cknight): Set the default extra spec value for snapshot_support # for API versions before it was required. if set_defaults: if constants.ExtraSpecs.SNAPSHOT_SUPPORT not in specs: specs[constants.ExtraSpecs.SNAPSHOT_SUPPORT] = True try: required_extra_specs = ( share_types.get_valid_required_extra_specs(specs) ) share_types.create(context, name, specs, is_public, description=description) share_type = share_types.get_share_type_by_name(context, name) share_type['required_extra_specs'] = required_extra_specs req.cache_db_share_type(share_type) self._notify_share_type_info( context, 'share_type.create', share_type) except exception.InvalidExtraSpec as e: raise webob.exc.HTTPBadRequest(explanation=e.msg) except exception.ShareTypeExists as err: notifier_err = dict(share_types=share_type, error_message=err.message) self._notify_share_type_error(context, 'share_type.create', notifier_err) raise webob.exc.HTTPConflict(explanation=err.msg) except exception.NotFound as err: notifier_err = dict(share_types=share_type, error_message=err.message) self._notify_share_type_error(context, 'share_type.create', notifier_err) raise webob.exc.HTTPNotFound() return self._view_builder.show(req, share_type) @wsgi.action("delete") @wsgi.Controller.authorize('delete') def _delete(self, req, id): """Deletes an existing share type.""" context = req.environ['manila.context'] try: share_type = share_types.get_share_type(context, id) share_types.destroy(context, share_type['id']) self._notify_share_type_info( context, 'share_type.delete', share_type) except exception.ShareTypeInUse as err: notifier_err = dict(id=id, error_message=err.message) self._notify_share_type_error(context, 'share_type.delete', notifier_err) msg = 'Target share type is still in use.' raise webob.exc.HTTPBadRequest(explanation=msg) except exception.NotFound as err: notifier_err = dict(id=id, error_message=err.message) self._notify_share_type_error(context, 'share_type.delete', notifier_err) raise webob.exc.HTTPNotFound() return webob.Response(status_int=http_client.ACCEPTED) @wsgi.Controller.api_version("2.50") @wsgi.action("update") @wsgi.Controller.authorize def update(self, req, id, body): """Update name description is_public for a given share type.""" context = req.environ['manila.context'] if (not self.is_valid_body(body, 'share_type') and not self.is_valid_body(body, 'volume_type')): raise webob.exc.HTTPBadRequest() elif self.is_valid_body(body, 'share_type'): sha_type = body['share_type'] else: sha_type = body['volume_type'] name = sha_type.get('name') description = sha_type.get('description') is_public = sha_type.get('share_type_access:is_public', None) if is_public is not None: try: is_public = strutils.bool_from_string(is_public, strict=True) except ValueError: msg = _("share_type_access:is_public has a non-boolean" " value.") raise webob.exc.HTTPBadRequest(explanation=msg) # If name specified, name can not be empty or greater than 255. if name is not None: if len(name.strip()) == 0: msg = _("Share type name cannot be empty.") raise webob.exc.HTTPBadRequest(explanation=msg) if len(name) > 255: msg = _("Share type name cannot be greater than 255 " "characters in length.") raise webob.exc.HTTPBadRequest(explanation=msg) # If description specified, length can not greater than 255. if description and len(description) > 255: msg = _("Share type description cannot be greater than 255 " "characters in length.") raise webob.exc.HTTPBadRequest(explanation=msg) # Name, description and is_public can not be None. # Specify one of them, or a combination thereof. if name is None and description is None and is_public is None: msg = _("Specify share type name, description, " "share_type_access:is_public or a combination thereof.") raise webob.exc.HTTPBadRequest(explanation=msg) try: share_types.update(context, id, name, description, is_public=is_public) # Get the updated sha_type = self._show_share_type_details(context, id) req.cache_resource(sha_type, name='types') self._notify_share_type_info( context, 'share_type.update', sha_type) except exception.ShareTypeNotFound as err: notifier_err = {"id": id, "error_message": err} self._notify_share_type_error( context, 'share_type.update', notifier_err) # Not found exception will be handled at the wsgi level raise except exception.ShareTypeExists as err: notifier_err = {"share_type": sha_type, "error_message": err} self._notify_share_type_error( context, 'share_type.update', notifier_err) raise webob.exc.HTTPConflict(explanation=err.msg) except exception.ShareTypeUpdateFailed as err: notifier_err = {"share_type": sha_type, "error_message": err} self._notify_share_type_error( context, 'share_type.update', notifier_err) raise webob.exc.HTTPInternalServerError( explanation=err.msg) return self._view_builder.show(req, sha_type) @wsgi.Controller.authorize('list_project_access') def share_type_access(self, req, id): context = req.environ['manila.context'] try: share_type = share_types.get_share_type( context, id, expected_fields=['projects']) except exception.ShareTypeNotFound: explanation = _("Share type %s not found.") % id raise webob.exc.HTTPNotFound(explanation=explanation) if share_type['is_public']: expl = _("Access list not available for public share types.") raise webob.exc.HTTPNotFound(explanation=expl) return self._view_builder.share_type_access(req, share_type) @wsgi.action('addProjectAccess') @wsgi.Controller.authorize('add_project_access') def _add_project_access(self, req, id, body): context = req.environ['manila.context'] self._check_body(body, 'addProjectAccess') project = body['addProjectAccess']['project'] self._verify_if_non_public_share_type(context, id) try: share_types.add_share_type_access(context, id, project) except exception.ShareTypeAccessExists as err: raise webob.exc.HTTPConflict(explanation=err.message) return webob.Response(status_int=http_client.ACCEPTED) @wsgi.action('removeProjectAccess') @wsgi.Controller.authorize('remove_project_access') def _remove_project_access(self, req, id, body): context = req.environ['manila.context'] self._check_body(body, 'removeProjectAccess') project = body['removeProjectAccess']['project'] self._verify_if_non_public_share_type(context, id) try: share_types.remove_share_type_access(context, id, project) except exception.ShareTypeAccessNotFound as err: raise webob.exc.HTTPNotFound(explanation=err.message) return webob.Response(status_int=http_client.ACCEPTED) def _verify_if_non_public_share_type(self, context, share_type_id): try: share_type = share_types.get_share_type(context, share_type_id) if share_type['is_public']: msg = _("Type access modification is not applicable to " "public share type.") raise webob.exc.HTTPConflict(explanation=msg) except exception.ShareTypeNotFound as err: raise webob.exc.HTTPNotFound(explanation=err.message) def create_resource(): return wsgi.Resource(ShareTypesController()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/api/v2/shares.py0000664000175000017500000007662300000000000017274 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Mirantis inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from http import client as http_client from oslo_config import cfg from oslo_log import log import webob from webob import exc from manila.api import common from manila.api.openstack import api_version_request as api_version from manila.api.openstack import wsgi from manila.api.v1 import share_manage from manila.api.v1 import share_unmanage from manila.api.v1 import shares from manila.api.v2 import metadata from manila.api.views import share_accesses as share_access_views from manila.api.views import share_migration as share_migration_views from manila.api.views import shares as share_views from manila.common import constants from manila import db from manila import exception from manila.i18n import _ from manila.lock import api as resource_locks from manila import policy from manila import share from manila import utils LOG = log.getLogger(__name__) CONF = cfg.CONF class ShareController(wsgi.Controller, shares.ShareMixin, share_manage.ShareManageMixin, share_unmanage.ShareUnmanageMixin, metadata.MetadataController, wsgi.AdminActionsMixin): """The Shares API v2 controller for the OpenStack API.""" resource_name = 'share' _view_builder_class = share_views.ViewBuilder def __init__(self): super(ShareController, self).__init__() self.share_api = share.API() self.resource_locks_api = resource_locks.API() self._access_view_builder = share_access_views.ViewBuilder() self._migration_view_builder = share_migration_views.ViewBuilder() self._conf_admin_only_metadata_keys = getattr( CONF, 'admin_only_metadata', [] ) @wsgi.Controller.authorize('revert_to_snapshot') def _revert(self, req, id, body=None): """Revert a share to a snapshot.""" context = req.environ['manila.context'] revert_data = self._validate_revert_parameters(context, body) try: share_id = id snapshot_id = revert_data['snapshot_id'] share = self.share_api.get(context, share_id) snapshot = self.share_api.get_snapshot(context, snapshot_id) if share.get('is_soft_deleted'): msg = _("Share '%s cannot revert to snapshot, " "since it has been soft deleted.") % share_id raise exc.HTTPForbidden(explanation=msg) # Ensure share supports reverting to a snapshot if not share['revert_to_snapshot_support']: msg_args = {'share_id': share_id, 'snap_id': snapshot_id} msg = _('Share %(share_id)s may not be reverted to snapshot ' '%(snap_id)s, because the share does not have that ' 'capability.') raise exc.HTTPBadRequest(explanation=msg % msg_args) # Ensure requested share & snapshot match. if share['id'] != snapshot['share_id']: msg_args = {'share_id': share_id, 'snap_id': snapshot_id} msg = _('Snapshot %(snap_id)s is not associated with share ' '%(share_id)s.') raise exc.HTTPBadRequest(explanation=msg % msg_args) # Ensure share status is 'available'. if share['status'] != constants.STATUS_AVAILABLE: msg_args = { 'share_id': share_id, 'state': share['status'], 'available': constants.STATUS_AVAILABLE, } msg = _("Share %(share_id)s is in '%(state)s' state, but it " "must be in '%(available)s' state to be reverted to a " "snapshot.") raise exc.HTTPConflict(explanation=msg % msg_args) # Ensure snapshot status is 'available'. if snapshot['status'] != constants.STATUS_AVAILABLE: msg_args = { 'snap_id': snapshot_id, 'state': snapshot['status'], 'available': constants.STATUS_AVAILABLE, } msg = _("Snapshot %(snap_id)s is in '%(state)s' state, but it " "must be in '%(available)s' state to be restored.") raise exc.HTTPConflict(explanation=msg % msg_args) # Ensure a long-running task isn't active on the share if share.is_busy: msg_args = {'share_id': share_id} msg = _("Share %(share_id)s may not be reverted while it has " "an active task.") raise exc.HTTPConflict(explanation=msg % msg_args) # Ensure the snapshot is the most recent one. latest_snapshot = self.share_api.get_latest_snapshot_for_share( context, share_id) if not latest_snapshot: msg_args = {'share_id': share_id} msg = _("Could not determine the latest snapshot for share " "%(share_id)s.") raise exc.HTTPBadRequest(explanation=msg % msg_args) if latest_snapshot['id'] != snapshot_id: msg_args = { 'share_id': share_id, 'snap_id': snapshot_id, 'latest_snap_id': latest_snapshot['id'], } msg = _("Snapshot %(snap_id)s may not be restored because " "it is not the most recent snapshot of share " "%(share_id)s. Currently the latest snapshot is " "%(latest_snap_id)s.") raise exc.HTTPConflict(explanation=msg % msg_args) # Ensure the access rules are not in the process of updating for instance in share['instances']: access_rules_status = instance['access_rules_status'] if access_rules_status != constants.ACCESS_STATE_ACTIVE: msg_args = { 'share_id': share_id, 'snap_id': snapshot_id, 'state': constants.ACCESS_STATE_ACTIVE } msg = _("Snapshot %(snap_id)s belongs to a share " "%(share_id)s which has access rules that are " "not %(state)s.") raise exc.HTTPConflict(explanation=msg % msg_args) msg_args = {'share_id': share_id, 'snap_id': snapshot_id} msg = 'Reverting share %(share_id)s to snapshot %(snap_id)s.' LOG.info(msg, msg_args) self.share_api.revert_to_snapshot(context, share, snapshot) except exception.ShareNotFound as e: raise exc.HTTPNotFound(explanation=e.msg) except exception.ShareSnapshotNotFound as e: raise exc.HTTPBadRequest(explanation=e.msg) except exception.ShareSizeExceedsAvailableQuota as e: raise exc.HTTPForbidden(explanation=e.msg) except exception.ReplicationException as e: raise exc.HTTPBadRequest(explanation=e.msg) return webob.Response(status_int=http_client.ACCEPTED) def _validate_revert_parameters(self, context, body): if not (body and self.is_valid_body(body, 'revert')): msg = _("Revert entity not found in request body.") raise exc.HTTPBadRequest(explanation=msg) required_parameters = ('snapshot_id',) data = body['revert'] for parameter in required_parameters: if parameter not in data: msg = _("Required parameter %s not found.") % parameter raise exc.HTTPBadRequest(explanation=msg) if not data.get(parameter): msg = _("Required parameter %s is empty.") % parameter raise exc.HTTPBadRequest(explanation=msg) return data @wsgi.Controller.api_version("2.90") def create(self, req, body): if not self.is_valid_body(body, 'share'): raise exc.HTTPUnprocessableEntity() share = body['share'] scheduler_hints = share.pop('scheduler_hints', None) encryption_key_ref = share.pop('encryption_key_ref', None) return self._create( req, body, check_create_share_from_snapshot_support=True, check_availability_zones_extra_spec=True, scheduler_hints=scheduler_hints, encryption_key_ref=encryption_key_ref) @wsgi.Controller.api_version("2.65", "2.89") def create(self, req, body): # pylint: disable=function-redefined # noqa F811 if not self.is_valid_body(body, 'share'): raise exc.HTTPUnprocessableEntity() share = body['share'] scheduler_hints = share.pop('scheduler_hints', None) if req.api_version_request < api_version.APIVersionRequest("2.67"): if scheduler_hints: scheduler_hints.pop('only_host', None) return self._create( req, body, check_create_share_from_snapshot_support=True, check_availability_zones_extra_spec=True, scheduler_hints=scheduler_hints) @wsgi.Controller.api_version("2.48", "2.64") # noqa def create(self, req, body): # pylint: disable=function-redefined # noqa F811 return self._create(req, body, check_create_share_from_snapshot_support=True, check_availability_zones_extra_spec=True) @wsgi.Controller.api_version("2.31", "2.47") # noqa def create(self, req, body): # pylint: disable=function-redefined # noqa F811 return self._create( req, body, check_create_share_from_snapshot_support=True) @wsgi.Controller.api_version("2.24", "2.30") # noqa def create(self, req, body): # pylint: disable=function-redefined # noqa F811 body.get('share', {}).pop('share_group_id', None) return self._create(req, body, check_create_share_from_snapshot_support=True) @wsgi.Controller.api_version("2.0", "2.23") # noqa def create(self, req, body): # pylint: disable=function-redefined # noqa F811 body.get('share', {}).pop('share_group_id', None) return self._create(req, body) @wsgi.Controller.api_version('2.0', '2.6') @wsgi.action('os-reset_status') def share_reset_status_legacy(self, req, id, body): context = req.environ['manila.context'] try: share = self.share_api.get(context, id) except exception.NotFound: raise exc.HTTPNotFound("Share %s not found" % id) if share.get('is_soft_deleted'): msg = _("status cannot be reset for share '%s' " "since it has been soft deleted.") % id raise exc.HTTPForbidden(explanation=msg) return self._reset_status(req, id, body, resource=share) @wsgi.Controller.api_version('2.7') @wsgi.action('reset_status') @wsgi.Controller.authorize('reset_status') def share_reset_status(self, req, id, body): context = req.environ['manila.context'] try: share = self.share_api.get(context, id) except exception.NotFound: raise exc.HTTPNotFound("Share %s not found" % id) if share.get('is_soft_deleted'): msg = _("status cannot be reset for share '%s' " "since it has been soft deleted.") % id raise exc.HTTPForbidden(explanation=msg) return self._reset_status(req, id, body, resource=share) @wsgi.Controller.api_version('2.0', '2.6') @wsgi.action('os-force_delete') def share_force_delete_legacy(self, req, id, body): return self._force_delete(req, id, body) @wsgi.Controller.api_version('2.7') @wsgi.action('force_delete') def share_force_delete(self, req, id, body): return self._force_delete(req, id, body) @wsgi.Controller.api_version('2.69') @wsgi.action('soft_delete') @wsgi.Controller.authorize('soft_delete') def share_soft_delete(self, req, id, body): """Soft delete a share.""" context = req.environ['manila.context'] LOG.debug("Soft delete share with id: %s", id, context=context) try: share = self.share_api.get(context, id) self.share_api.soft_delete(context, share) except exception.NotFound: raise exc.HTTPNotFound() except exception.InvalidShare as e: raise exc.HTTPForbidden(explanation=e.msg) except exception.ShareBusyException as e: raise exc.HTTPForbidden(explanation=e.msg) except exception.Conflict as e: raise exc.HTTPConflict(explanation=e.msg) return webob.Response(status_int=http_client.ACCEPTED) @wsgi.Controller.api_version('2.69') @wsgi.action('restore') @wsgi.Controller.authorize("restore") def share_restore(self, req, id, body): """Restore a share from recycle bin.""" context = req.environ['manila.context'] LOG.debug("Restore share with id: %s", id, context=context) try: share = self.share_api.get(context, id) except exception.NotFound: msg = _("No share exists with ID %s.") raise exc.HTTPNotFound(explanation=msg % id) # If the share not exist in Recycle Bin, the API will return # success directly. is_soft_deleted = share.get('is_soft_deleted') if not is_soft_deleted: return webob.Response(status_int=http_client.OK) # If the share has reached the expired time, and is been deleting, # it too late to restore the share. if share['status'] in [constants.STATUS_DELETING, constants.STATUS_ERROR_DELETING]: msg = _("Share %s is being deleted or has suffered an error " "during deletion, cannot be restored.") raise exc.HTTPForbidden(explanation=msg % id) self.share_api.restore(context, share) return webob.Response(status_int=http_client.ACCEPTED) @wsgi.Controller.api_version('2.29', experimental=True) @wsgi.action("migration_start") @wsgi.Controller.authorize def migration_start(self, req, id, body): """Migrate a share to the specified host.""" context = req.environ['manila.context'] try: share = self.share_api.get(context, id) except exception.NotFound: msg = _("Share %s not found.") % id raise exc.HTTPNotFound(explanation=msg) if share.get('is_soft_deleted'): msg = _("Migration cannot start for share '%s' " "since it has been soft deleted.") % id raise exception.InvalidShare(reason=msg) params = body.get('migration_start') if not params: raise exc.HTTPBadRequest(explanation=_("Request is missing body.")) driver_assisted_params = ['preserve_metadata', 'writable', 'nondisruptive', 'preserve_snapshots'] bool_params = (driver_assisted_params + ['force_host_assisted_migration']) mandatory_params = driver_assisted_params + ['host'] utils.check_params_exist(mandatory_params, params) bool_param_values = utils.check_params_are_boolean(bool_params, params) new_share_network = None new_share_type = None new_share_network_id = params.get('new_share_network_id', None) if new_share_network_id: try: new_share_network = db.share_network_get( context, new_share_network_id) except exception.NotFound: msg = _("Share network %s not " "found.") % new_share_network_id raise exc.HTTPBadRequest(explanation=msg) common.check_share_network_is_active(new_share_network) else: share_network_id = share.get('share_network_id', None) if share_network_id: current_share_network = db.share_network_get( context, share_network_id) common.check_share_network_is_active(current_share_network) new_share_type_id = params.get('new_share_type_id', None) if new_share_type_id: try: new_share_type = db.share_type_get( context, new_share_type_id) except exception.NotFound: msg = _("Share type %s not found.") % new_share_type_id raise exc.HTTPBadRequest(explanation=msg) try: return_code = self.share_api.migration_start( context, share, params['host'], bool_param_values['force_host_assisted_migration'], bool_param_values['preserve_metadata'], bool_param_values['writable'], bool_param_values['nondisruptive'], bool_param_values['preserve_snapshots'], new_share_network=new_share_network, new_share_type=new_share_type) except exception.Conflict as e: raise exc.HTTPConflict(explanation=e.msg) return webob.Response(status_int=return_code) @wsgi.Controller.api_version('2.22', experimental=True) @wsgi.action("migration_complete") @wsgi.Controller.authorize def migration_complete(self, req, id, body): """Invokes 2nd phase of share migration.""" context = req.environ['manila.context'] try: share = self.share_api.get(context, id) except exception.NotFound: msg = _("Share %s not found.") % id raise exc.HTTPNotFound(explanation=msg) self.share_api.migration_complete(context, share) return webob.Response(status_int=http_client.ACCEPTED) @wsgi.Controller.api_version('2.22', experimental=True) @wsgi.action("migration_cancel") @wsgi.Controller.authorize def migration_cancel(self, req, id, body): """Attempts to cancel share migration.""" context = req.environ['manila.context'] try: share = self.share_api.get(context, id) except exception.NotFound: msg = _("Share %s not found.") % id raise exc.HTTPNotFound(explanation=msg) self.share_api.migration_cancel(context, share) return webob.Response(status_int=http_client.ACCEPTED) @wsgi.Controller.api_version('2.22', experimental=True) @wsgi.action("migration_get_progress") @wsgi.Controller.authorize def migration_get_progress(self, req, id, body): """Retrieve share migration progress for a given share.""" context = req.environ['manila.context'] try: share = self.share_api.get(context, id) except exception.NotFound: msg = _("Share %s not found.") % id raise exc.HTTPNotFound(explanation=msg) result = self.share_api.migration_get_progress(context, share) # refresh share model share = self.share_api.get(context, id) return self._migration_view_builder.get_progress(req, share, result) @wsgi.Controller.api_version('2.22', experimental=True) @wsgi.action("reset_task_state") @wsgi.Controller.authorize def reset_task_state(self, req, id, body): context = req.environ['manila.context'] try: share = self.share_api.get(context, id) except exception.NotFound: raise exception.ShareNotFound(share_id=id) if share.get('is_soft_deleted'): msg = _("task state cannot be reset for share '%s' " "since it has been soft deleted.") % id raise exc.HTTPForbidden(explanation=msg) return self._reset_status(req, id, body, status_attr='task_state', resource=share) @wsgi.Controller.api_version('2.0', '2.6') @wsgi.action('os-allow_access') def allow_access_legacy(self, req, id, body): """Add share access rule.""" return self._allow_access(req, id, body) @wsgi.Controller.api_version('2.7') @wsgi.action('allow_access') def allow_access(self, req, id, body): """Add share access rule.""" args = (req, id, body) kwargs = {} if req.api_version_request >= api_version.APIVersionRequest("2.13"): kwargs['enable_ceph'] = True if req.api_version_request >= api_version.APIVersionRequest("2.28"): kwargs['allow_on_error_status'] = True if req.api_version_request >= api_version.APIVersionRequest("2.38"): kwargs['enable_ipv6'] = True if req.api_version_request >= api_version.APIVersionRequest("2.45"): kwargs['enable_metadata'] = True if req.api_version_request >= api_version.APIVersionRequest("2.74"): kwargs['allow_on_error_state'] = True if req.api_version_request >= api_version.APIVersionRequest("2.82"): access_data = body.get('allow_access') kwargs['lock_visibility'] = access_data.get( 'lock_visibility', False) kwargs['lock_deletion'] = access_data.get('lock_deletion', False) kwargs['lock_reason'] = access_data.get('lock_reason') return self._allow_access(*args, **kwargs) @wsgi.Controller.api_version('2.0', '2.6') @wsgi.action('os-deny_access') def deny_access_legacy(self, req, id, body): """Remove share access rule.""" return self._deny_access(req, id, body) @wsgi.Controller.api_version('2.7') @wsgi.action('deny_access') def deny_access(self, req, id, body): """Remove share access rule.""" args = (req, id, body) kwargs = {} if req.api_version_request >= api_version.APIVersionRequest("2.74"): kwargs['allow_on_error_state'] = True return self._deny_access(*args, **kwargs) @wsgi.Controller.api_version('2.0', '2.6') @wsgi.action('os-access_list') def access_list_legacy(self, req, id, body): """List share access rules.""" return self._access_list(req, id, body) @wsgi.Controller.api_version('2.7', '2.44') @wsgi.action('access_list') def access_list(self, req, id, body): """List share access rules.""" return self._access_list(req, id, body) @wsgi.Controller.api_version('2.0', '2.6') @wsgi.action('os-extend') def extend_legacy(self, req, id, body): """Extend size of a share.""" body.get('os-extend', {}).pop('force', None) return self._extend(req, id, body) @wsgi.Controller.api_version('2.7', '2.63') @wsgi.action('extend') def extend(self, req, id, body): """Extend size of a share.""" body.get('extend', {}).pop('force', None) return self._extend(req, id, body) @wsgi.Controller.api_version('2.64') # noqa @wsgi.action('extend') def extend(self, req, id, body): # pylint: disable=function-redefined # noqa F811 """Extend size of a share.""" return self._extend(req, id, body) @wsgi.Controller.api_version('2.0', '2.6') @wsgi.action('os-shrink') def shrink_legacy(self, req, id, body): """Shrink size of a share.""" return self._shrink(req, id, body) @wsgi.Controller.api_version('2.7') @wsgi.action('shrink') def shrink(self, req, id, body): """Shrink size of a share.""" return self._shrink(req, id, body) @wsgi.Controller.api_version('2.7', '2.7') def manage(self, req, body): body.get('share', {}).pop('is_public', None) detail = self._manage(req, body, allow_dhss_true=False) return detail @wsgi.Controller.api_version("2.8", "2.48") # noqa def manage(self, req, body): # pylint: disable=function-redefined # noqa F811 detail = self._manage(req, body, allow_dhss_true=False) return detail @wsgi.Controller.api_version("2.49") # noqa def manage(self, req, body): # pylint: disable=function-redefined # noqa F811 detail = self._manage(req, body, allow_dhss_true=True) return detail @wsgi.Controller.api_version('2.7', '2.48') @wsgi.action('unmanage') def unmanage(self, req, id, body=None): return self._unmanage(req, id, body, allow_dhss_true=False) @wsgi.Controller.api_version('2.49') # noqa @wsgi.action('unmanage') def unmanage(self, req, id, # pylint: disable=function-redefined # noqa F811 body=None): return self._unmanage(req, id, body, allow_dhss_true=True) @wsgi.Controller.api_version('2.27') @wsgi.action('revert') def revert(self, req, id, body=None): return self._revert(req, id, body) @wsgi.Controller.api_version("2.0") @wsgi.Controller.authorize("get_all") def index(self, req): """Returns a summary list of shares.""" if req.api_version_request < api_version.APIVersionRequest("2.35"): req.GET.pop('export_location_id', None) req.GET.pop('export_location_path', None) if req.api_version_request < api_version.APIVersionRequest("2.36"): req.GET.pop('name~', None) req.GET.pop('description~', None) req.GET.pop('description', None) if req.api_version_request < api_version.APIVersionRequest("2.42"): req.GET.pop('with_count', None) if req.api_version_request < api_version.APIVersionRequest("2.69"): req.GET.pop('is_soft_deleted', None) if req.api_version_request < api_version.APIVersionRequest("2.90"): req.GET.pop('encryption_key_ref', None) return self._get_shares(req, is_detail=False) @wsgi.Controller.api_version("2.0") @wsgi.Controller.authorize("get_all") def detail(self, req): """Returns a detailed list of shares.""" if req.api_version_request < api_version.APIVersionRequest("2.35"): req.GET.pop('export_location_id', None) req.GET.pop('export_location_path', None) if req.api_version_request < api_version.APIVersionRequest("2.36"): req.GET.pop('name~', None) req.GET.pop('description~', None) req.GET.pop('description', None) if req.api_version_request < api_version.APIVersionRequest("2.69"): req.GET.pop('is_soft_deleted', None) if req.api_version_request < api_version.APIVersionRequest("2.90"): req.GET.pop('encryption_key_ref', None) return self._get_shares(req, is_detail=True) def _validate_metadata_for_update(self, req, share_id, metadata, delete=True): persistent_keys = set(self._conf_admin_only_metadata_keys) context = req.environ['manila.context'] if set(metadata).intersection(persistent_keys): try: policy.check_policy( context, 'share', 'update_admin_only_metadata') except exception.PolicyNotAuthorized: msg = _("Cannot set or update admin only metadata.") LOG.exception(msg) raise exc.HTTPForbidden(explanation=msg) persistent_keys = [] current_share_metadata = db.share_metadata_get(context, share_id) if delete: _metadata = metadata for key in persistent_keys: if key in current_share_metadata: _metadata[key] = current_share_metadata[key] else: metadata_copy = metadata.copy() for key in persistent_keys: metadata_copy.pop(key, None) _metadata = current_share_metadata.copy() _metadata.update(metadata_copy) return _metadata # NOTE: (ashrod98) original metadata method and policy overrides @wsgi.Controller.api_version("2.0") @wsgi.Controller.authorize("get_share_metadata") def index_metadata(self, req, resource_id): """Returns the list of metadata for a given share.""" return self._index_metadata(req, resource_id) @wsgi.Controller.api_version("2.0") @wsgi.Controller.authorize("update_share_metadata") def create_metadata(self, req, resource_id, body): if not self.is_valid_body(body, 'metadata'): expl = _('Malformed request body') raise exc.HTTPBadRequest(explanation=expl) _metadata = self._validate_metadata_for_update(req, resource_id, body['metadata'], delete=False) body['metadata'] = _metadata metadata = self._create_metadata(req, resource_id, body) context = req.environ['manila.context'] self.share_api.update_share_from_metadata(context, resource_id, metadata.get('metadata')) return metadata @wsgi.Controller.api_version("2.0") @wsgi.Controller.authorize("update_share_metadata") def update_all_metadata(self, req, resource_id, body): if not self.is_valid_body(body, 'metadata'): expl = _('Malformed request body') raise exc.HTTPBadRequest(explanation=expl) _metadata = self._validate_metadata_for_update(req, resource_id, body['metadata']) body['metadata'] = _metadata metadata = self._update_all_metadata(req, resource_id, body) context = req.environ['manila.context'] self.share_api.update_share_from_metadata(context, resource_id, metadata.get('metadata')) return metadata @wsgi.Controller.api_version("2.0") @wsgi.Controller.authorize("update_share_metadata") def update_metadata_item(self, req, resource_id, body, key): if not self.is_valid_body(body, 'meta'): expl = _('Malformed request body') raise exc.HTTPBadRequest(explanation=expl) _metadata = self._validate_metadata_for_update(req, resource_id, body['metadata'], delete=False) body['metadata'] = _metadata metadata = self._update_metadata_item(req, resource_id, body, key) context = req.environ['manila.context'] self.share_api.update_share_from_metadata(context, resource_id, metadata.get('metadata')) return metadata @wsgi.Controller.api_version("2.0") @wsgi.Controller.authorize("get_share_metadata") def show_metadata(self, req, resource_id, key): return self._show_metadata(req, resource_id, key) @wsgi.Controller.api_version("2.0") @wsgi.Controller.authorize("delete_share_metadata") def delete_metadata(self, req, resource_id, key): context = req.environ['manila.context'] if key in self._conf_admin_only_metadata_keys: policy.check_policy(context, 'share', 'update_admin_only_metadata') return self._delete_metadata(req, resource_id, key) def create_resource(): return wsgi.Resource(ShareController()) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315601.8416727 manila-21.0.0/manila/api/validation/0000775000175000017500000000000000000000000017222 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/api/validation/__init__.py0000664000175000017500000001711500000000000021340 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """API request/response validating middleware.""" import functools import typing as ty from oslo_serialization import jsonutils import webob from manila.api.openstack import api_version_request as api_version from manila.api.openstack import wsgi from manila.api.validation import validators from manila import exception from manila.i18n import _ def validated(cls): cls._validated = True return cls def _schema_validator( schema: ty.Dict[str, ty.Any], target: ty.Dict[str, ty.Any], min_version: ty.Optional[str], max_version: ty.Optional[str], args: ty.Any, kwargs: ty.Any, is_body: bool = True, ): """A helper method to execute JSON Schema Validation. This method checks the request version whether matches the specified ``max_version`` and ``min_version``. If the version range matches the request, we validate ``schema`` against ``target``. A failure will result in ``ValidationError`` being raised. :param schema: The JSON Schema schema used to validate the target. :param target: The target to be validated by the schema. :param min_version: A string indicating the minimum API version ``schema`` applies against. :param max_version: A string indicating the maximum API version ``schema`` applies against. :param args: Positional arguments which passed into original method. :param kwargs: Keyword arguments which passed into original method. :param is_body: Whether ``target`` is a HTTP request body or not. :returns: None. :raises: ``ValidationError`` if validation fails. """ min_ver = api_version.APIVersionRequest(min_version) max_ver = api_version.APIVersionRequest(max_version) # NOTE: The request object is always the second argument. However, numerous # unittests pass in the request object via kwargs instead so we handle that # as well. # TODO(stephenfin): Fix unit tests so we don't have to to do this if 'req' in kwargs: ver = kwargs['req'].api_version_request else: ver = args[1].api_version_request if ver.matches(min_ver, max_ver): # Only validate against the schema if it lies within # the version range specified. Note that if both min # and max are not specified the validator will always # be run. schema_validator = validators._SchemaValidator(schema, is_body=is_body) schema_validator.validate(target) def request_body_schema( schema: ty.Dict[str, ty.Any], min_version: ty.Optional[str] = None, max_version: ty.Optional[str] = None, ): """Register a schema to validate request body. ``schema`` will be used for validating the request body just before the API method is executed. :param schema: The JSON Schema schema used to validate the target. :param min_version: A string indicating the minimum API version ``schema`` applies against. :param max_version: A string indicating the maximum API version ``schema`` applies against. """ def add_validator(func): @functools.wraps(func) def wrapper(*args, **kwargs): _schema_validator( schema, kwargs['body'], min_version, max_version, args, kwargs, is_body=True, ) return func(*args, **kwargs) wrapper._request_body_schema = schema return wrapper return add_validator def request_query_schema( schema: ty.Dict[str, ty.Any], min_version: ty.Optional[str] = None, max_version: ty.Optional[str] = None, ): """Register a schema to validate request query string parameters. ``schema`` will be used for validating request query strings just before the API method is executed. :param schema: The JSON Schema schema used to validate the target. :param min_version: A string indicating the minimum API version ``schema`` applies against. :param max_version: A string indicating the maximum API version ``schema`` applies against. """ def add_validator(func): @functools.wraps(func) def wrapper(*args, **kwargs): # NOTE: The request object is always the second argument. However, # numerous unittests pass in the request object via kwargs instead # so we handle that as well. # TODO(stephenfin): Fix unit tests so we don't have to to do this if 'req' in kwargs: req = kwargs['req'] else: req = args[1] # NOTE: The webob package throws UnicodeError when param cannot be # decoded. Catch this and raise HTTP 400. try: query = req.GET.dict_of_lists() except UnicodeDecodeError: msg = _('Query string is not UTF-8 encoded') raise exception.ValidationError(msg) _schema_validator( schema, query, min_version, max_version, args, kwargs, is_body=True, ) return func(*args, **kwargs) wrapper._request_query_schema = schema return wrapper return add_validator def response_body_schema( schema: ty.Dict[str, ty.Any], min_version: ty.Optional[str] = None, max_version: ty.Optional[str] = None, ): """Register a schema to validate response body. ``schema`` will be used for validating the response body just after the API method is executed. :param schema: The JSON Schema schema used to validate the target. :param min_version: A string indicating the minimum API version ``schema`` applies against. :param max_version: A string indicating the maximum API version ``schema`` applies against. """ def add_validator(func): @functools.wraps(func) def wrapper(*args, **kwargs): response = func(*args, **kwargs) # NOTE(stephenfin): If our response is an object, we need to # serializer and deserialize to convert e.g. date-time to strings if isinstance(response, wsgi.ResponseObject): serializer = wsgi.JSONDictSerializer() _body = serializer.serialize(response.obj) # TODO(stephenfin): We should replace all instances of this with # wsgi.ResponseObject elif isinstance(response, webob.Response): _body = response.body else: serializer = wsgi.JSONDictSerializer() _body = serializer.serialize(response) if _body == b'': body = None else: body = jsonutils.loads(_body) _schema_validator( schema, body, min_version, max_version, args, kwargs, is_body=True, ) return response wrapper._response_body_schema = schema return wrapper return add_validator ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/api/validation/helpers.py0000664000175000017500000000167100000000000021243 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import pathlib import yaml _PARAMETERS_YAML = None def _load_parameters(): global _PARAMETERS_YAML p = pathlib.Path(__file__).with_name('parameters.yaml') with p.open(mode='r', encoding="utf-8") as f: _PARAMETERS_YAML = yaml.safe_load(f) def description(parameter): if _PARAMETERS_YAML is None: _load_parameters() return _PARAMETERS_YAML[parameter]['description'] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/api/validation/parameter_types.py0000664000175000017500000000331600000000000023003 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Common parameter types for validating API requests.""" from manila.common import constants def single_param(schema): """Macro function to support query params that allow only one value.""" ret = multi_params(schema) ret['maxItems'] = 1 return ret def multi_params(schema): """Macro function to support query params that allow multiple values.""" return {'type': 'array', 'items': schema} boolean = { 'type': ['boolean', 'string'], 'enum': [ True, 'True', 'TRUE', 'true', '1', 'ON', 'On', 'on', 'YES', 'Yes', 'yes', 'y', 't', False, 'False', 'FALSE', 'false', '0', 'OFF', 'Off', 'off', 'NO', 'No', 'no', 'n', 'f', ], } positive_integer = { 'type': ['integer', 'string'], 'pattern': '^[0-9]*$', 'minimum': 1, 'maximum': constants.DB_MAX_INT, 'minLength': 1, } non_negative_integer = { 'type': ['integer', 'string'], 'pattern': '^[0-9]*$', 'minimum': 0, 'maximum': constants.DB_MAX_INT, 'minLength': 1, } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/api/validation/parameters.yaml0000664000175000017500000035470700000000000022271 0ustar00zuulzuul00000000000000# variables in header #{} service_token_locks: description: | An auth-token specified via the header ``X-Service-Token``. With the OpenStack Identity (Keystone) context, this token can be obtained by a user that has the ``service`` role. The presence of this header is used by resource lock API methods to set or match the lock user's context. A resource lock created by a service user cannot be manipulated by non-service users. in: header required: false type: string # variables in path access_id_path: description: | The UUID of the access rule to which access is granted. in: path required: true type: string api_version: in: path required: true type: string description: > The API version as returned in the links from the ``GET /`` call. backup_id_request_path: description: | The UUID of the share backup. in: path required: true type: string export_location_id_path: description: | The UUID of the export location. in: path required: true type: string extra_spec_key_path: description: | The extra specification key in: path required: true type: string group_snapshot_id_path: description: | The group snapshot ID. in: path required: true type: string message_id: description: | The UUID of the message. in: path required: false type: string metadata_key_path: description: | The key of a metadata item. For example, if the metadata on an existing share or access rule is as follows: ``"project": "my_test", "aim": "testing"``, the keys are "project" and "aim". in: path required: false type: string project_id_path: description: | The project ID of the user or service making the API request. This parameter is optional if the service supports API version 2.60. If the service doesn't yet support API version 2.60, ensure that the service catalog endpoint obtained for the service has the user's project_id after the "/v2/" component, for example, the API to retrieve shares is *GET /v2/{project_id}/shares*. If the service doesn't yet support API version 2.60, and the project_id is ommitted from the API URL, a Malformed Request error is returned (HTTP 400). in: path required: false type: string project_id_quota_request_path: description: | The ID of the project whose quotas must be acted upon by the API. This is optional, and if it is not specified, the project ID is derived from the caller's API token. System/Domain scoped users interacting with this API *must* specify the project ID for the project whose quotas they need to query or manipulate. Note that this ID can be different from the project ID that precedes the resource name "quota-sets". For example, in a multi-tenant cloud, the first ID in the URL is typically the project ID of a privileged user (such as a cloud administrator) that can create, query or delete quotas of other projects in the cloud. If a server supports API version 2.60, URLs no longer need the privileged user's project ID prior to the resource name. in: path required: false type: string quota_class_name: description: The name of the quota class for which to set quotas. in: path required: true type: string resource_lock_id_path: description: | The UUID of the resource lock. in: path required: true type: string security_service_id_path: description: | The UUID of the security service. in: path required: true type: string share_group_id_path: description: | The UUID of the share group. in: path required: true type: string share_group_type_id_path: description: | The UUID of the share group type. in: path required: true type: string share_id: description: | The UUID of the share. in: path required: true type: string share_instance_id: description: | The UUID of the share instance. in: path required: true type: string share_network_id_path: description: | The UUID of the share network. in: path required: true type: string share_network_subnet_id_path: description: | The UUID of the share network subnet. in: path required: true type: string share_replica_id_path: description: | The UUID of the share replica. in: path required: true type: string share_type_for_quota: description: | The name or UUID of the share type. If you specify this parameter in the URI, you show, update, or delete quotas for this share type. This parameter is mutually exclusive with the "user_id" query parameter. in: path required: false type: string min_version: 2.39 share_type_id: description: | The UUID of the share type. in: path required: true type: string snapshot_id_path: description: | The UUID of the snapshot. in: path required: true type: string snapshot_instance_id_path: description: | The UUID of the share snapshot instance. in: path required: true type: string transfer_id: description: | The unique identifier for a transfer. in: path required: true type: string # variables in query action_id: in: query required: false type: string description: > The ID of the action during which the message was created. all_tenants_query: description: | (Admin only). Defines whether to list the requested resources for all projects. Set to ``1`` to list resources for all projects. Set to ``0`` to list resources only for the current project. Examples of resources include shares, snapshots, share networks, security services and share groups. in: query required: false type: boolean backend_capabilities_query: description: | The capabilities for the storage back end. in: query required: false type: string backend_host_query: description: | The host name for the back end. in: query required: false type: string backend_pool_query: description: | The pool name for the back end. in: query required: false type: string backend_query: description: | The name of the back end. in: query required: false type: string backup_host_query: description: | The host name of the backup to query with. Querying by hostname is a privileged operation. If restricted by API policy, this query parameter may be silently ignored by the server. in: query required: false type: string backup_share_id_query: description: | The UUID of the share that the backup pertains to. in: query required: false type: string backup_status_query: description: | Filters by a backup status. A valid filter value can be one of 'creating', 'error', 'available', 'restoring'. in: query required: false type: string backup_topic_query: description: | Filters by a backup topic. A valid filter value can be one of 'manila-data', 'manila-share'. in: query required: false type: string cidr_query: description: | The CIDR to filter share networks. in: query required: false type: string created_before: description: | The date and time stamp when the query operation, only return user messages before it. The date and time stamp format is `ISO 8601 `_: :: CCYY-MM-DDThh:mm:ss±hh:mm The ``±hh:mm`` value, if included, returns the time zone as an offset from UTC. For example, ``2021-11-10T09:49:58+08:00``. in: query required: false type: string min_version: 2.52 created_before_query: description: | Search for the list of resources that were created prior to the specified date. The date is in 'yyyy-mm-dd' format. in: query required: false type: string created_since: description: | The date and time stamp when the query operation, only return user messages since it. The date and time stamp format is `ISO 8601 `_: :: CCYY-MM-DDThh:mm:ss±hh:mm The ``±hh:mm`` value, if included, returns the time zone as an offset from UTC. For example, ``2021-11-10T09:49:58+08:00``. in: query required: false type: string min_version: 2.52 created_since_query: description: | Search for the list of resources that were created after the specified date. The date is in 'yyyy-mm-dd' format. in: query required: false type: string description_inexact_query: description: | The description pattern that can be used to filter shares, share snapshots, share networks or share groups. in: query required: false type: string min_version: 2.36 description_inexact_query_versionless: description: | The description pattern that can be used to filter share backups. in: query required: false type: string description_query: description: | The user defined description text that can be used to filter resources. in: query required: false type: string detail_id: in: query required: false type: string description: > The ID of the message detail. encryption_key_ref_query: description: | The encryption key ref that can be used to filter shares or share instances. in: query required: false type: string min_version: 2.90 export_location_id_query: description: | The export location UUID that can be used to filter shares or share instances. in: query required: false type: string min_version: 2.35 export_location_path_query: description: | The export location path that can be used to filter shares or share instances. in: query required: false type: string min_version: 2.35 extra_specs_query: description: | The extra specifications as a set of one or more key-value pairs. In each pair, the key is the name of the extra specification and the value is the share type that was used to filter search share type list. The query must be a “percent-encoded” string, for example, the following query parameters: {'extra-specs': {'snapshot_support': 'true', 'availability_zones': 'az1'}} is encoded as 'extra_specs=%7B%27snapshot_support%27%3A+%27true%27%2C+%27availability_zones%27%3A+%27az1%27%7D' in: query required: false type: string min_version: 2.43 group_snapshot_status_query: description: | Filters by a share group snapshot status. A valid value is ``creating``, ``error``, ``available``, ``deleting``, ``error_deleting``. in: query required: false type: string group_specs_query: description: | The group specifications as a set of one or more key-value pairs. In each pair, the key is the name of the group specification and the value is the share group type that was used to filter search share group type list. The query must be a “percent-encoded” string, for example, the following query parameters: {'group-specs': {'consistent_snapshot_support': 'true'}} is encoded as 'group_specs=%7B%27consistent_snapshot_support%27%3A+%27True%27%7D' in: query required: false type: string min_version: 2.66 host_query: description: | The host name of the resource to query with. Querying by hostname is a privileged operation. If restricted by API policy, this query parameter may be silently ignored by the server. in: query required: false type: string ip_version_query: description: | The IP version to filter share networks. in: query required: false type: string is_public_query: description: | A boolean query parameter that, when set to true, allows retrieving public resources that belong to all projects. in: query required: false type: boolean is_soft_deleted_query: description: | A boolean query parameter that, when set to True, will return all shares in recycle bin. Default is False, will return all shares not in recycle bin. in: query required: false type: boolean min_version: 2.69 limit: description: | The maximum number of resource records to return. in: query required: false type: integer limit_query: description: | The maximum number of share groups members to return. in: query required: false type: integer message_level: in: query required: false type: string description: > The message level. metadata_query: in: query required: false type: object description: | One or more metadata key and value pairs as a url encoded dictionary of strings. name_inexact_query: description: | The name pattern that can be used to filter shares, share snapshots, share networks, transfers or share groups. in: query required: false type: string min_version: 2.36 name_inexact_query_versionless: description: | The name pattern that can be used to filter share backups. in: query required: false type: string name_query: description: | The user defined name of the resource to filter resources by. in: query required: false type: string network_type_query: description: | The network type to filter out share networks. in: query required: false type: string neutron_net_id_query: description: | The neutron network id to filter out share networks. in: query required: false type: string neutron_subnet_id_query: description: | The neutron network subnet id to filter out share networks. in: query required: false type: string nova_net_id_query: description: | The ID of a Nova network to filter out share networks. in: query required: false type: string max_version: 2.26 offset: description: | The offset to define start point of resource listing. in: query required: false type: integer project_id_messages: description: | The ID of the project for which the message was created. in: query required: false type: string project_id_query: description: | The ID of the project that owns the resource. This query parameter is useful in conjunction with the ``all_tenants`` parameter. in: query required: false type: string request_id: description: | The ID of the request during which the message was created. in: query required: false type: string resource_id: description: | The UUID of the resource for which the message was created. in: query required: false type: string resource_lock_all_projects_query: description: | Set this parameter to True to get resource locks across all project namespaces. in: query required: false type: string resource_lock_id_query: description: | The ID of the resource lock to filter resource locks by. in: query required: false type: string resource_lock_lock_context_query: description: | The lock creator's context to filter locks by. in: query required: false type: string resource_lock_lock_reason_inexact_query: description: | The lock reason pattern that can be used to filter resource locks. in: query required: false type: string resource_lock_lock_reason_query: description: | The lock reason that can be used to filter resource locks. in: query required: false type: string resource_lock_project_id_query: description: | The ID of a project to filter resource locks by. in: query required: false type: string resource_lock_resource_action_query: description: | The ``action`` prevented by the filtered resource locks. in: query required: false type: string resource_lock_resource_id_query: description: | The ID of the resource that the locks pertain to to filter resource locks by. in: query required: false type: string resource_lock_resource_type_query: description: | The type of the resource that the locks pertain to to filter resource locks by. in: query required: false type: string resource_lock_user_id_query: description: | The ID of a user to filter resource locks by. in: query required: false type: string resource_type: description: | The type of the resource for which the message was created. in: query required: false type: string resource_type_query: description: | The type of the resource for which the transfer was created. in: query required: false type: string security_service_query: description: | The security service ID to filter out share networks. in: query required: false type: string segmentation_id_query: description: | The segmentation id to filter out share networks. in: query required: false type: string service_binary_query: description: | The service binary name. Default is the base name of the executable. in: query required: false type: string service_host_query: description: | The service host name. in: query required: false type: string service_state_query: description: | The current state of the service. A valid value is ``up`` or ``down``. in: query required: false type: string service_status_query: description: | The service status, which is ``enabled`` or ``disabled``. in: query required: false type: string service_zone_query: description: | The availability zone. in: query required: false type: string share_group_id_query: description: | The UUID of a share group to filter resource. in: query required: false type: string min_version: 2.31 share_group_status_query: description: | Filters by a share group status. A valid value is ``creating``, ``error``, ``available``, ``deleting``, ``error_deleting``. in: query required: false type: string share_group_type_id_query: description: | The share group type ID to filter share groups. in: query required: false type: string share_id_access_rules_query: description: | The share ID to filter share access rules with. in: query required: true type: string share_id_replicas_query: description: | The share ID to filter share replicas with. in: query required: false type: string share_network_id_query: description: | The UUID of the share network to filter resources by. in: query required: false type: string share_server_id_query: description: | The UUID of the share server. in: query required: false type: string share_type_id_query: description: | The UUID of a share type to query resources by. in: query required: false type: string share_type_query: description: | The share type name or UUID. Allows filtering back end pools based on the extra-specs in the share type. in: query required: false type: string min_version: 2.23 share_types_query: description: | A list of one or more share type IDs. Allows filtering share groups. in: query required: false type: array snapshot_id_query: description: | The UUID of the share's base snapshot to filter the request based on. in: query required: false type: string sort_dir: description: | The direction to sort a list of resources. A valid value is ``asc``, or ``desc``. in: query required: false type: string sort_key: description: | The key to sort a list of shares. A valid value is ``id``, ``status``, ``size``, ``host``, ``share_proto``, ``export_location``, ``availability_zone``, ``user_id``, ``project_id``, ``created_at``, ``updated_at``, ``display_name``, ``name``, ``share_type_id``, ``share_type``, ``share_network_id``, ``share_network``, ``snapshot_id``, or ``snapshot``. in: query required: false type: string sort_key_backup: description: | The key to sort a list of share backups. A valid value is ``id``, ``status``, ``size``, ``host``, ``share_id`` ``availability_zone``, ``created_at``, ``updated_at``, ``display_name``, ``topic``, ``progress`` and ``restore_progress`` in: query required: false type: string sort_key_messages: description: | The key to sort a list of messages. A valid value is ``id``, ``project_id``, ``request_id``, ``resource_type``, ``action_id``, ``detail_id``, ``resource_id``, ``message_level``, ``expires_at``, ``created_at``. in: query required: false type: string sort_key_resource_locks: description: | The key to sort a list of resource locks. A valid value is ``id``, ``resource_id``, ``resource_type``, ``resource_action``, ``user_id``, ``project_id``, ``created_at``, ``updated_act``, ``lock_context``. in: query required: false type: string sort_key_transfer: description: | The key to sort a list of transfers. A valid value is ``id``, ``name``, ``resource_type``, ``resource_id``, ``source_project_id``, ``destination_project_id``, ``created_at``, ``expires_at``. in: query required: false type: string source_backup_id_query: description: | The UUID of the share's backup to filter the request based on. in: query required: false type: string source_share_group_snapshot_id_query: description: | The source share group snapshot ID to list the share group. in: query required: false type: string min_version: 2.31 status_query: description: | Filters by a share status. For valid statuses, see the `section above <#shares>`_. in: query required: false type: string user_id_query: description: | The ID of the user. If you specify this query parameter, you retrieve or update the quotas for this user in the project. If you omit this parameter, you query or update the quotas for the whole project. This parameter is mutually exclusive with the "share_type" parameter. in: query required: false type: string with_count_query: description: | Whether to show ``count`` in share list API response or not, default is ``False``. This query parameter is useful with pagination. in: query required: false type: boolean min_version: 2.42 with_count_query_without_min_version: description: | Whether to show ``count`` in API response or not, default is ``False``. This query parameter is useful with pagination. in: query required: false type: boolean with_count_snapshot_query: description: | Whether to show ``count`` in share snapshot list API response or not, default is ``False``. This query parameter is useful with pagination. in: query required: false type: boolean min_version: 2.79 # variables in body accepted: description: | Whether the transfer has been accepted. in: body required: true type: boolean access: description: | The ``access`` object. in: body required: true type: object access_id: description: | The UUID of the access rule to which access is granted. in: body required: true type: string access_key: description: | The access credential of the entity granted share access. in: body required: true type: string min_version: 2.21 access_key_share_access_rules: description: | The access credential of the entity granted share access. in: body required: true type: string access_level: description: | The access level to the share. To grant or deny access to a share, you specify one of the following share access levels: - ``rw``. Read and write (RW) access. - ``ro``. Read-only (RO) access. in: body required: true type: string access_list: description: | The object of the access rule. To list access rules, set this value to ``null``. in: body required: true type: string access_metadata: description: | One or more access rule metadata key and value pairs as a dictionary of strings. in: body required: true type: object access_metadata_grant_access: description: | One or more access rule metadata key and value pairs as a dictionary of strings. in: body required: true type: object min_version: 2.45 access_rule_id: description: | The access rule ID. in: body required: true type: string access_rules_status: description: | The share instance access rules status. A valid value is ``active``, ``error``, or ``syncing``. In versions prior to 2.28, ``syncing`` was represented with status ``out_of_sync``. in: body required: true type: string min_version: 2.10 access_share_id: description: | The UUID of the share to which you are granted or denied access. in: body required: true type: string access_to: description: | The value that defines the access. The back end grants or denies the access to it. A valid value is one of these values: - ``ip``: Authenticates a client through its IP address, that can be IPv4 or IPv6. You may specify a single client IP address or a range of IP addresses in CIDR notation. For example ``0.0.0.0/0`` for IPv4 or ``::/0`` for IPv6. - ``cert``: Authenticates an instance through a TLS certificate. Specify the TLS identity as the IDENTKEY. A valid value is any string up to 64 characters long in the common name (CN) of the certificate. The meaning of a string depends on its interpretation. - ``user``: Authenticates by a user or group name. A valid value is an alphanumeric string that can contain some special characters and is from 4 to 32 characters long. in: body required: true type: string access_type: in: body required: true type: string description: | The access rule type. A valid value for the share access rule type is one of the following values: - ``ip``: Authenticates a client through its IP address, that can be IPv4 or IPv6. You may specify a single client IP address or a range of IP addresses in CIDR notation. For example ``0.0.0.0/0`` for IPv4 or ``::/0`` for IPv6. - ``cert``: Authenticates a client through a TLS certificate. Specify the TLS identity as the IDENTKEY. A valid value is any string up to 64 characters long in the common name (CN) of the certificate. The meaning of a string depends on its interpretation. - ``user``: Authenticates by a user or group name. A valid value is an alphanumeric string that can contain some special characters and is from 4 to 32 characters long. action_id_body: in: body required: true type: string description: > The ID of the action during which the message was created. add_project_access: description: | An object representing the project resource that access should be granted to. in: body required: true type: object allow_access: description: | The object of grant access. in: body required: true type: object auth_key: description: | The authentication key for the transfer. in: body required: true type: string availability_zone: description: | The name of the availability zone the share exists within. in: body required: true type: string availability_zone_id: description: | The availability zone ID the resource exists within. in: body required: true type: string availability_zone_id_share_group: description: | The availability zone ID that the share group exists within. in: body required: true type: string min_version: 2.34 availability_zone_name: description: | The name of the availability zone. in: body required: true type: string availability_zone_request: description: | The UUID or name of an availability zone for resource to be created within. in: body required: false type: string availability_zones: description: | Top level response body element. in: body required: true type: string backend: description: | The name of the back end. in: body required: true type: string backend_details: description: | The back-end details for a server. Each back end can store any key-value information that it requires. For example, the generic back-end driver might store the router ID. in: body required: true type: object backend_host: description: | The host name for the back end. in: body required: true type: string backend_name: description: | The name of the back end in this format: ``host@backend#POOL``: - ``host``. The host name for the back end. - ``backend``. The name of the back end. - ``POOL``. The pool name for the back end. in: body required: true type: string backup_az: description: | The availability zone. in: body required: true type: string backup_id_response: description: | The UUID of the share backup. in: body required: true type: string backup_options_request: description: | One or more backup options key and value pairs as a url encoded dictionary of strings. in: body required: false type: object backup_progress: description: | The progress of the backup creation in percentange. in: body required: true type: string backup_restore_progress: description: | The progress of the backup restoration in percentage. in: body required: true type: string backup_share_id: description: | The UUID of the share that the backup pertains to. in: body required: true type: string backup_size: description: | The share backup size, in GiBs. in: body required: true type: integer backup_status: description: | The status of backup which can be one of ``creating``, ``error``, ``available``, ``restoring``. in: body required: true type: string backup_status_request: description: | The backup status, which can be ``available``, ``error``, ``creating``, ``deleting``, ``restoring``. in: body required: false type: string capabilities: description: | The back end capabilities which include ``qos``, ``total_capacity_gb``, etc. in: body required: true type: object capability_driver_handles_share_servers: description: | Share server is usually a storage virtual machine or a lightweight container that is used to export shared file systems. Storage backends may be able to work with configured share servers or allow the share driver to create and manage the lifecycle of share servers. This capability specifies whether the pool's associated share driver is responsible to create and manage the lifecycle of share servers. If ``false``, the administrator of the shared file systems service has configured the share server as necessary for the given back end. in: body required: true type: boolean capability_driver_version: description: | The driver version of the back end. in: body required: true type: string capability_free_capacity_gb: description: | The amount of free capacity for the back end, in GiBs. A valid value is a string, such as ``unknown``, or an integer. in: body required: true type: string capability_qos: description: | The quality of service (QoS) support. in: body required: true type: boolean capability_replication_domain: description: | The back end replication domain. in: body required: true type: string capability_reserved_percentage: description: | The percentage of the total capacity that is reserved for the internal use by the back end. in: body required: true type: integer capability_server_pools_mapping: description: | The mapping between servers and pools. in: body required: true type: object capability_share_backend_name: description: | The name of the share back end. in: body required: true type: string capability_snapshot_support: description: | The specification that filters back ends by whether they do or do not support share snapshots. in: body required: true type: boolean capability_storage_protocol: description: | The storage protocol for the back end. For example, ``NFS_CIFS``, ``glusterfs``, ``HDFS``, etc. in: body required: true type: string capability_total_capacity_gb: description: | The total capacity for the back end, in GiBs. A valid value is a string, such as ``unknown``, or an integer. in: body required: true type: string capability_vendor_name: description: | The name of the vendor for the back end. in: body required: true type: string cidr: description: | The IP block from which to allocate the network, in CIDR notation. For example, ``172.16.0.0/24`` or ``2001:DB8::/64``. This parameter is automatically set to a value determined by the network provider. in: body required: true type: string max_version: 2.50 clear_access_rules: description: | Whether clear all access rules when accept share. in: body required: false type: boolean compatible: description: | Whether the destination backend can or can't handle the share server migration considering the received entries for ``share_network_id``, ``host``, ``nondisruptive``, ``writable`` and ``preserve_snapshots`` matched with the driver supported capabilities. in: body required: true type: boolean consistent_snapshot_support: description: | The consistency snapshot support. in: body required: true type: string min_version: 2.34 count: description: | The total count of requested resource before pagination is applied. This parameter is only present in the API response if "with_count=True" is supplied in the query. in: body required: false type: integer min_version: 2.42 count_without_min_version: description: | The total count of requested resource before pagination is applied. This parameter is only present in the API response if "with_count=True" is supplied in the query. in: body required: false type: integer create_share_from_snapshot_support: description: | Boolean extra spec used for filtering of back ends by their capability to create shares from snapshots. in: body required: false type: boolean min_version: 2.24 create_share_from_snapshot_support_body: description: | Boolean extra spec used for filtering of back ends by their capability to create shares from snapshots. in: body required: false type: boolean create_share_from_snapshot_support_share_capability: description: | Whether or not this share supports snapshots that can be cloned into new shares. Inconsequential if the share doesn't support snapshots. (see capability "snapshot_support") in: body required: true type: boolean min_version: 2.24 created_at: description: | The date and time stamp when the resource was created within the service's database. The date and time stamp format is `ISO 8601 `_: :: CCYY-MM-DDThh:mm:ss±hh:mm The ``±hh:mm`` value, if included, returns the time zone as an offset from UTC. For example, ``2019-03-27T09:49:58-05:00``. in: body required: true type: string current_share_network_security_service_id: description: | The ID of a security service that is currently attached to a share network. in: body required: true type: string deny_access: description: | The ``deny_access`` object. in: body required: true type: object description: description: | The user defined description of the resource. in: body required: true type: string description_request: description: | The user defined description of the resource. The value of this field is limited to 255 characters. in: body required: false type: string destination_project_id: description: | UUID of the destination project to accept transfer resource. in: body required: true type: string destination_share_server_id: description: | UUID of the share server that was created in the destination backend during a share server migration operation. in: body required: true type: string detail_id_body: in: body required: true type: string description: > The ID of the message detail. display_description_request: description: | The user defined description of the resource. This field sets the ``description`` parameter. in: body required: false type: string display_name_request: description: | The user defined name of the resource. This field sets the ``name`` parameter. in: body required: false type: string driver_handles_share_servers: description: | An extra specification that defines the driver mode for share server, or storage, life cycle management. The Shared File Systems service creates a share server for the export of shares. This value is ``true`` when the share driver manages, or handles, the share server life cycle. This value is ``false`` when an administrator rather than a share driver manages the storage life cycle. in: body required: true type: boolean driver_options: description: | A set of one or more key and value pairs, as a dictionary of strings, that describe driver options. Details for driver options should be taken from `appropriate share driver documentation `_. in: body required: false type: object encryption_key_ref_request: description: | The encryption key ref is valid barbican secret UUID that will be used to get encryption key by the storage drivers. in: body required: false type: object min_version: 2.90 encryption_key_ref_response: description: | The encryption key ref is valid barbican secret UUID that will be used to get encryption key by the storage drivers. in: body required: true type: object export_location: description: | The export location. For newer API versions it is available in separate APIs. See sections `Share export locations <#share-share-export-locations>`_ and `Share instance export locations <#share-share-instance-export-locations>`_. in: body required: true type: string max_version: 2.8 export_location_availability_zone: description: | The name of the availability zone that the export location belongs to. in: body required: true type: string export_location_id: description: | The share export location UUID. in: body required: true type: string export_location_is_admin_only: description: | Defines purpose of an export location. If set to ``true``, then it is expected to be used for service needs and by administrators only. If it is set to ``false``, then this export location can be used by end users. This parameter is only available to users with an "administrator" role, and cannot be controlled via policy .json. in: body required: true type: boolean export_location_path: description: | The export location path that should be used for mount operation. in: body required: true type: string export_location_preferred: description: | Drivers may use this field to identify which export locations are most efficient and should be used preferentially by clients. By default it is set to ``false`` value. in: body required: true type: boolean min_version: 2.14 export_location_preferred_replicas: description: | Drivers may use this field to identify which export locations are most efficient and should be used preferentially by clients. By default it is set to ``false`` value. in: body required: true type: boolean export_location_share_instance_id: description: | The UUID of the share instance that this export location belongs to. This parameter is only available to users with an "administrator" role, and cannot be controlled via policy.yaml. in: body required: true type: string export_locations: description: | A list of export locations. For example, when a share server has more than one network interface, it can have multiple export locations. For newer API versions it is available in separate APIs. See sections `Share export locations <#share-share-export-locations>`_ and `Share instance export locations <#share-share-instance- export-locations>`_. in: body required: true type: array max_version: 2.8 export_path: description: | The share export path in the format appropriate for the protocol: - NFS protocol. ``10.0.0.1:/foo_path``. For example, ``10.254.0 .5:/shares/share-42033c24-0261-424f-abda-4fef2f6dbfd5``. - CIFS protocol. For example, ``\\10.0.0.1\foo_name_of_cifs_share``. in: body required: true type: string extend: description: | The ``extend`` object. in: body required: true type: object extension_alias: description: | The alias for the extension. For example, "FOXNSOX", "os-availability-zone", "os-extended-quotas", "os- share-unmanage", or "os-used-limits". in: body required: true type: string extension_description: description: | The description of the extension API. in: body required: true type: string extension_links: description: | The extension links. in: body required: true type: array extension_name: description: | The name of the extension. For example, "Fox In Socks." in: body required: true type: string extra_spec_key: description: | The extra specification key in: body required: true type: string extra_specs: description: | Extra specifications of the share type. These are key=value pairs of capabilities that the shares of this type are expected to possess. For more information, see `Share Types `_. Some examples include: ``driver_handles_share_servers``, ``replication_type``, ``snapshot_support``, ``mount_snapshot_support``, ``revert_to_snapshot_support``, ``create_share_from_snapshot_support`` in: body required: true type: object extra_specs_request_body: description: | Extra specifications of the share type. These are key=value pairs of capabilities that the shares of this type are expected to possess. For more information, see `Share Types `_. When creating a new share type, required extra-specifications **must** be provided. ``driver_handles_share_servers`` is a required extra-specification, and ``snapshot_support`` was considered a required extra-specification until API version 2.24. When updating extra-specs of a share type, there's no need to provide required extra specifications unless they need to be updated. Some examples of extra-specifications include: ``replication_type``, ``snapshot_support``, ``mount_snapshot_support``, ``revert_to_snapshot_support``, ``create_share_from_snapshot_support`` in: body required: true type: object force: description: | Indicates whether to permit or deny the force- update of a quota that is already used and the requested value exceeds the configured quota. Set to ``True`` to permit the force-update of the quota. Set to ``False`` to deny the force- update of the quota. in: body required: false type: boolean force_delete_2: description: | To force-delete a share instance, set this value to ``null``. The force-delete action, unlike the delete action, ignores the share instance status. in: body required: true type: string force_host_assisted_migration: description: | Forces the host-assisted mechanism to be used, thus using the Data Service to copy data across back ends. This parameter value defaults to ``False``. When set to ``True``, it skips the driver-assisted approach which would otherwise be attempted first. If this option is set to ``True``, all driver-assisted options must be set to ``False``. in: body required: false type: boolean force_snapshot_request: description: | Indicates whether snapshot creation must be attempted when a share's status is not ``available``. Set to ``true`` to force snapshot creation when the share is busy performing other operations. Default is ``false``. in: body required: false type: boolean group_snapshot_id: description: | The share group snapshot ID. in: body required: true type: object group_snapshot_links: description: | The share group snapshot links. in: body required: true type: string group_snapshot_members: description: | The share group snapshot members. in: body required: true type: string group_snapshot_status_required: description: | Filters by a share group snapshot status. A valid value is ``creating``, ``error``, ``available``, ``deleting``, ``error_deleting``. in: body required: true type: string group_spec_key: description: | The extra specification key for the share group type. in: body required: true type: string group_specs: description: | The extra specifications for the share group type. in: body required: false type: object group_specs_required: description: | The extra specifications for the share group type. in: body required: true type: object has_replicas: description: | Indicates whether a share has replicas or not. in: body required: true type: boolean min_version: 2.11 host: description: | The target pool to which the share should be migrated to, in format ``host@backend#pool``. E.g. ``ubuntu@generic1#GENERIC1``. in: body required: true type: string host_resource_response: description: | The host name of the service back end that the resource is contained within. This parameter is always present in the response schema, but the value may be represented as "null" to non-admin users. in: body required: true type: string host_share_server_body: description: | The share server host name or IP address. in: body required: true type: string host_share_server_migration: description: | The target backend to which the share server should be migrated to, in format ``host@backend``. E.g. ``ubuntu@generic1``. in: body required: true type: string hosts_check_result: description: | (Admin only). Result received from each host in a security service update check operation. in: body required: true type: object id_13: description: | The share instance ID. in: body required: true type: string identifier: description: | The identifier of the share server in the back-end storage system. in: body required: true type: string ip_version: description: | The IP version of the network. A valid value is ``4`` or ``6``. This parameter is automatically set to a value determined by the network provider. in: body required: true type: integer max_version: 2.50 is_auto_deletable: description: | Defines if a share server can be deleted automatically by the service. Share server deletion can be automated with configuration. However, Share servers that have ever had a share removed from service management cannot be automatically deleted by the service. in: body required: true type: boolean is_default_type: description: | Defines the share type created is default or not. If the returning value is true, then it is the default share type, otherwise, it is not default. in: body required: true type: boolean min_version: 2.46 is_default_type_body: description: | Defines the share type created is default or not. If the returning value is true, then it is the default share type, otherwise, it is not default. in: body required: true type: boolean is_group_type_default: description: | Defines the share group type created is default or not. If the returning value is true, then it is the default share group type, otherwise, it is not default. in: body required: true type: boolean min_version: 2.46 is_public_manage_request: description: | The level of visibility for the share. Set to ``true`` to make share visible to all projects in the cloud. Set to ``false`` to make it private to your project. Default value is ``false``. in: body required: false type: boolean min_version: 2.8 is_public_request: description: | The level of visibility for the share. Set to ``true`` to make share visible to all projects in the cloud. Set to ``false`` to make it private to your project. Default value is ``false``. in: body required: false type: boolean is_public_shares_response: description: | Whether the share is visible publicly (by all projects in the cloud) or not. in: body required: true type: boolean is_soft_deleted_response: description: | Whether the share has been soft deleted to recycle bin or not. in: body required: false type: boolean min_version: 2.69 links: description: | Pagination and bookmark links for the resource. in: body required: true type: array lock_deletion: description: | Whether the resource should have its deletion locked or not. in: body required: false type: string min_version: 2.82 lock_visibility: description: | Whether the resource should have its sensitive fields restricted or not. When enabled, other users will see the "access_to" and "access_key" fields set to ****** in: body required: false type: string min_version: 2.82 manage_host: description: | The host of the destination back end, in this format: ``host@backend``. - ``host``: The host name for the destination back end. - ``backend``: The name of the destination back end. in: body required: true type: string manage_share_server_id: description: | The UUID of the share server. in: body required: true type: string min_version: 2.49 managed_share_user_id: description: | ID of the user who brought the share under manila management. in: body required: true type: string min_version: 2.16 maxTotalBackupGigabytes: description: | The total maximum number of backup gigabytes that are allowed in a project. in: body required: true type: integer min_version: 2.80 maxTotalBackupGigabytesOptional: description: | The total maximum number of backup gigabytes that are allowed in a project. in: body required: false type: integer min_version: 2.80 maxTotalEncryptionKeys: description: | The number of encryption keys allowed in a project. in: body required: true type: integer min_version: 2.90 maxTotalEncryptionKeysOptional: description: | The number of encryption keys allowed in a project. in: body required: false type: integer min_version: 2.90 maxTotalReplicaGigabytes: description: | The maximum number of replica gigabytes that are allowed in a project. You cannot create a share, share replica, manage a share or extend a share if it is going to exceed the allowed replica gigabytes quota. in: body required: true type: integer min_version: 2.53 maxTotalReplicaGigabytesOptional: description: | The maximum number of replica gigabytes that are allowed in a project. You cannot create a share, share replica, manage a share or extend a share if it is going to exceed the allowed replica gigabytes quota. in: body required: false type: integer min_version: 2.53 maxTotalShareBackups: description: | The total maximum number of share backups that are allowed in a project. in: body required: true type: integer min_version: 2.80 maxTotalShareBackupsOptional: description: | The total maximum number of share backups that are allowed in a project. in: body required: false type: integer min_version: 2.80 maxTotalShareGigabytes: description: | The total maximum number of share gigabytes that are allowed in a project. You cannot request a share that exceeds the allowed gigabytes quota. in: body required: true type: integer maxTotalShareGigabytesOptional: description: | The total maximum number of share gigabytes that are allowed in a project. You cannot request a share that exceeds the allowed gigabytes quota. in: body required: false type: integer maxTotalShareGroups: description: | The maximum number of share groups. in: body required: true type: integer min_version: 2.40 maxTotalShareGroupSnapshots: description: | The maximum number of share group snapshots. in: body required: true type: integer min_version: 2.40 maxTotalShareNetworks: description: | The total maximum number of share-networks that are allowed in a project. in: body required: true type: integer maxTotalShareNetworksOptional: description: | The total maximum number of share-networks that are allowed in a project. in: body required: false type: integer maxTotalShareReplicas: description: | The maximum number of share replicas that is allowed. in: body required: true type: integer min_version: 2.53 maxTotalShareReplicasOptional: description: | The maximum number of share replicas that is allowed. in: body required: false type: integer min_version: 2.53 maxTotalShares: description: | The total maximum number of shares that are allowed in a project. in: body required: true type: integer maxTotalShareSnapshots: description: | The total maximum number of share snapshots that are allowed in a project. in: body required: true type: integer maxTotalShareSnapshotsOptional: description: | The total maximum number of share snapshots that are allowed in a project. in: body required: false type: integer maxTotalSharesOptional: description: | The total maximum number of shares that are allowed in a project. in: body required: false type: integer maxTotalSnapshotGigabytes: description: | The total maximum number of snapshot gigabytes that are allowed in a project. in: body required: true type: integer maxTotalSnapshotGigabytesOptional: description: | The total maximum number of snapshot gigabytes that are allowed in a project. in: body required: false type: integer message_expires_at_body: description: | The date and time stamp when the resource message will expire within the service's database. The date and time stamp format is `ISO 8601 `_: :: CCYY-MM-DDThh:mm:ss±hh:mm The ``±hh:mm`` value, if included, returns the time zone as an offset from UTC. For example, ``2016-12-31T13:14:15-05:00``. in: body required: true type: string message_id_body: description: | The UUID of the message. in: body required: true type: string message_level_body: in: body required: true type: string description: > The message level. message_links: description: | The message links. in: body required: true type: array message_members_links: description: | The message member links. in: body required: true type: array metadata: description: | One or more metadata key and value pairs as a dictionary of strings. in: body required: true type: object metadata_item: description: | A single metadata key and value pair. in: body required: true type: object metadata_key_request: description: | The key of a metadata item. For example, if the metadata on an existing resource is as follows: ``"project": "my_test", "aim": "testing"``, the keys are "project" and "aim". in: body required: true type: object metadata_request: description: | One or more metadata key-value pairs, as a dictionary of strings. For example, ``"project": "my_test", "aim": "testing"``. The share server does not respect case-sensitive key names. For example, ``"key": "v1"`` and ``"KEY": "V1"`` are equivalent. If you specify both key-value pairs, the server sets and returns only the ``"KEY": "V1"`` key-value pair. in: body required: true type: object migrate-start: description: | The ``migrate-start`` object. in: body required: true type: object migrate_share: description: | The ``migrate_share`` object. in: body required: true type: object migration_complete: description: | The ``migration_complete`` object. in: body required: true type: object migration_progress_details: description: | Additional driver specific details of the migration progress. in: body required: true type: object min_version: 2.59 mount_snapshot_support: description: | Boolean extra spec used for filtering of back ends by their capability to mount share snapshots. in: body required: false type: boolean min_version: 2.32 mount_snapshot_support_body: description: | Boolean extra spec used for filtering of back ends by their capability to mount share snapshots. in: body required: false type: boolean mount_snapshot_support_share_capability: description: | Whether or not this share supports snapshots that can be mounted and access controlled independently of the share. Inconsequential if the share doesn't support snapshots (see capability "snapshot_support"). in: body required: true type: boolean min_version: 2.32 name: description: | The user defined name of the resource. in: body required: true type: string name_request: description: | The user defined name of the resource. The value of this field is limited to 255 characters. in: body required: false type: string network_type: description: | The network type. A valid value is ``VLAN``, ``VXLAN``, ``GRE``, or ``flat``. This parameter is automatically set to a value determined by the network provider. in: body required: true type: string max_version: 2.50 neutron_net_id: description: | The neutron network ID. in: body required: true type: string max_version: 2.50 neutron_net_id_request: description: | The UUID of a neutron network when setting up or updating a share network subnet with neutron. Specify both a neutron network and a neutron subnet that belongs to that neutron network. in: body required: false type: string neutron_subnet_id: description: | The neutron subnet ID. in: body required: true type: string max_version: 2.50 neutron_subnet_id_request: description: | The UUID of the neutron subnet when setting up or updating a share network subnet with neutron. Specify both a neutron network and a neutron subnet that belongs to that neutron network. in: body required: false type: string new_share_network_id: description: | If willing to change the share’s share-network so it can be allocated in the desired destination pool, the invoker may supply a new share network to be used. This is often suited when the share is to be migrated to a pool which operates in a different availability zone or managed by a driver that handles share servers. in: body required: false type: string new_share_network_id_server_migration: description: | If willing to change the share server’s share-network so it can be allocated in the desired destination backend, the invoker may supply a new share network to be used. in: body required: false type: string new_share_network_security_service_id: description: | The ID of a security service that must be attached to a share network after a share network security service update operation. in: body required: true type: string new_share_type_id: description: | If willing to retype the share so it can be allocated in the desired destination pool, the invoker may supply a new share type to be used. This is often suited when the share is to be migrated to a pool which operates in the opposite driver mode. in: body required: false type: string next-available: description: | The date and time stamp when next issues are available. The date and time stamp format is `ISO 8601 `_: :: CCYY-MM-DDThh:mm:ss±hh:mm The ``±hh:mm`` value, if included, returns the time zone as an offset from UTC. For example, ``2015-08-27T09:49:58-05:00``. in: body required: false type: string nondisruptive: description: | Specifies whether migration should only be performed without disrupting clients during migration. For such, it is also expected that the export location does not change. When set to ``True`` and drivers are not capable of allowing the share to remain accessible through the two phases of the migration, migration will result in an error status. As of Ocata release, host-assisted migration cannot provide this capability. in: body required: true type: boolean nondisruptive_server_migration: description: | Specifies whether share server migration should only be performed without disrupting clients during migration. For such, it is also expected that the export location does not change. When set to ``True`` and drivers are not capable of allowing the share server to remain accessible through the two phases of the migration, migration will result in an error status. in: body required: true type: boolean operation_is_compatible: description: | Indicates the result of a check operation. If ``True`` indicates that the addition/update of security service is possible. in: body required: true type: boolean os-share-type-access:is_public: description: | Indicates whether a share type is publicly accessible. Default is ``true``, or publicly accessible. in: body required: false type: boolean max_version: 2.6 perShareGigabytes: description: | The number of gigabytes per share allowed in a project. in: body required: true type: integer min_version: 2.62 perShareGigabytesOptional: description: | The number of gigabytes per share allowed in a project. in: body required: false type: integer min_version: 2.62 pool: description: | The pool name for the back end. in: body required: true type: string pools: description: | The pools for the back end. This value is either ``null`` or a string value that indicates the capabilities for each pool. For example, ``pool_name``, ``total_capacity_gb``, ``qos``, and so on. in: body required: true type: string preserve_metadata: description: | Specifies whether migration should enforce the preservation of all file system metadata. When set to ``True`` and drivers are not capable of ensuring preservation of file system metadata, migration will result in an error status. As of Ocata release, host-assisted migration cannot provide any guarantees of preserving file system metadata. in: body required: true type: boolean preserve_snapshots: description: | Specifies whether migration should enforce the preservation of all existing snapshots at the destination. When set to ``True`` and drivers are not capable of migrating the snapshots, migration will result in an error status. As of Ocata release, host-assisted migration cannot provide this capability. in: body required: true type: boolean preserve_snapshots_server_migration: description: | Specifies whether migration should enforce the preservation of all existing snapshots at the destination. When set to ``True`` and drivers are not capable of migrating the snapshots, migration will result in an error status. in: body required: true type: boolean progress: description: | The progress of the snapshot creation. in: body required: true type: string progress_share_instance: description: | The progress of the share creation. in: body min_version: 2.54 required: true type: string project: description: | The UUID of the project to which access to the share type is granted. in: body required: true type: string project_id: description: | The ID of the project that owns the resource. in: body required: true type: string project_id_messages_body: description: | The ID of the project for which the message was created. in: body required: true type: string project_id_type_access: description: | The ID of the project that has been granted access to the type resource. in: body required: true type: string project_id_type_access_grant_request: description: | The ID of the project that needs to have access to the type resource. in: body required: true type: string project_id_type_access_revoke_request: description: | The ID of the project whose access to the type resource must be revoked. in: body required: true type: string protocol: description: | The Shared File Systems protocol of the share to manage. A valid value is ``NFS``, ``CIFS``, ``GlusterFS``, ``CEPHFS``, ``HDFS`` or ``MAPRFS``. in: body required: true type: string quota_backup_gigabytes: description: | The number of gigabytes for the backups allowed for each project. in: body min_version: 2.80 required: true type: integer quota_backup_gigabytes_detail: description: | The limit, in_use, reserved number of gigabytes for the backups allowed for each project. in: body min_version: 2.80 required: true type: object quota_backup_gigabytes_request: description: | The number of gigabytes for the backups for the project. in: body min_version: 2.80 required: false type: integer quota_backups: description: | The number of backups allowed for each project. in: body min_version: 2.80 required: true type: integer quota_backups_detail: description: | The limit, in_use, reserved number of backups allowed for each project. in: body min_version: 2.80 required: true type: object quota_backups_request: description: | The number of backups for the project. in: body min_version: 2.80 required: false type: integer quota_class_id: description: | A ``quota_class_set`` id. in: body required: true type: string quota_class_set: description: | A ``quota_class_set`` object. in: body required: true type: object quota_encryption_keys: description: | The number of encryption keys allowed for each project. in: body required: true min_version: 2.90 type: integer quota_encryption_keys_detail: description: | The limit, in_use, reserved number of encryption keys for each project. in: body min_version: 2.90 required: true type: object quota_encryption_keys_request: description: | The number of encryption keys allowed for each project. in: body min_version: 2.90 required: false type: integer quota_gigabytes: description: | The number of gigabytes allowed for each project. in: body required: true type: integer quota_gigabytes_detail: description: | The limit, in_use, reserved number of gigabytes allowed for each project. in: body min_version: 2.25 required: true type: object quota_gigabytes_request: description: | The number of gigabytes for the project. in: body required: false type: integer quota_per_share_gigabytes: description: | The number of gigabytes per share allowed for each project. in: body required: true min_version: 2.62 type: integer quota_per_share_gigabytes_detail: description: | The limit, in_use, reserved number of per share gigabytes for each project. in: body min_version: 2.62 required: true type: object quota_per_share_gigabytes_request: description: | The number of gigabytes per share allowed for each project. in: body min_version: 2.62 required: false type: integer quota_project_id: description: | The ID of the project the quota pertains to. in: body required: true type: string quota_replica_gigabytes: description: | The number of gigabytes for the share replicas allowed for each project. in: body required: true min_version: 2.53 type: integer quota_replica_gigabytes_detail: description: | The limit, in_use, reserved number of replica gigabytes for each project. in: body min_version: 2.53 required: true type: object quota_replica_gigabytes_request: description: | The number of gigabytes for share replicas for the project. in: body min_version: 2.53 required: false type: integer quota_set: description: | The ``quota_set`` object. in: body required: true type: object quota_share_group_snapshots: description: | The number of share group snapshots allowed for each project or user. in: body min_version: 2.40 required: true type: integer quota_share_group_snapshots_detail: description: | The limit, in_use, reserved number of share group snapshots for each project or user. in: body min_version: 2.40 required: true type: object quota_share_group_snapshots_request: description: | The number of share group snapshots allowed for each project or user. in: body min_version: 2.40 required: false type: integer quota_share_groups: description: | The number of share groups allowed for each project or user. in: body min_version: 2.40 required: true type: integer quota_share_groups_detail: description: | The limit, in_use, reserved number of share groups for each project or user. in: body min_version: 2.40 required: true type: object quota_share_groups_request: description: | The number of share groups allowed for each project or user. in: body min_version: 2.40 required: false type: integer quota_share_networks: description: | The number of share networks allowed for user and project, but not share type. in: body required: false type: integer quota_share_networks_default: description: | The number of share networks allowed for each project. in: body required: true type: integer quota_share_networks_detail: description: | The limit, in_use, reserved number of share networks allowed for user and project, but not share type. in: body min_version: 2.25 required: false type: object quota_share_networks_request: description: | The number of share networks for the project. in: body required: false type: integer quota_share_replicas: description: | The number of share replicas allowed for each project. in: body required: true min_version: 2.53 type: integer quota_share_replicas_detail: description: | The limit, in_use, reserved number of share replicas for each project. in: body min_version: 2.53 required: true type: object quota_share_replicas_request: description: | The number of share replicas allowed for each project or user. in: body min_version: 2.53 required: false type: integer quota_shares: description: | The number of shares allowed for each project. in: body required: true type: integer quota_shares_detail: description: | The limit, in_use, reserved number of shares allowed for each project. in: body min_version: 2.25 required: true type: object quota_shares_request: description: | The number of shares for the project. in: body required: false type: integer quota_snapshot_gigabytes: description: | The number of gigabytes for the snapshots allowed for each project. in: body required: true type: integer quota_snapshot_gigabytes_detail: description: | The limit, in_use, reserved number of gigabytes for the snapshots allowed for each project. in: body min_version: 2.25 required: true type: object quota_snapshot_gigabytes_request: description: | The number of gigabytes for the snapshots for the project. in: body required: false type: integer quota_snapshots: description: | The number of snapshots allowed for each project. in: body required: true type: integer quota_snapshots_detail: description: | The limit, in_use, reserved number of snapshots allowed for each project. in: body min_version: 2.25 required: true type: object quota_snapshots_request: description: | The number of snapshots for the project. in: body required: false type: integer regex: description: | An API regular expression. For example, ``^/shares`` for the ``/shares`` API URI or ``.*`` for any URI. in: body required: false type: string remaining: description: | The remaining number of allowed requests. in: body required: false type: integer remove_project_access: description: | An object representing the project resource that access should be revoked from. in: body required: true type: object replica_state: description: | The share replica state. Has set value only when replication is used. List of possible values: ``active``, ``in_sync``, ``out_of_sync``, and ``error``. in: body required: true type: string min_version: 2.11 replication_type: description: | Type of replication supported for shares of this type. Valid values are: - ``null``: replication is not supported - ``readable``: users can create replicas of the share that are read-only until they are promoted, - ``writable``: users can create read/write replicas - ``dr``: users can create replicas that are not exported until they are promoted. in: body required: false type: string replication_type_share_capability: description: | The share replication type. The value can be: - ``null``, if the share cannot be replicated. - ``readable``, if one or more read-only replicas of the share can be created - ``writable``, if one or more active replicas of the share can be created - ``dr``, if one or more replicas of the share can be created that will remain inaccessible until they are promoted. in: body required: true type: string min_version: 2.11 request_id_body: description: | The UUID of the request during which the message was created. in: body required: true type: string requested_capabilities: description: | The parameters that were sent to the server in order to check if the destination host can handle the share server migration. This object contains the following attributes: ``writable``, ``nondisruptive``, ``preserve_snapshots``, ``share_network_id`` and ``host``. in: body required: true type: object requested_check_operation: description: | Information about the requested operation. in: body required: true type: object requested_operation: description: | Data about the operation that was requested. in: body required: true type: object requested_operation_name: description: | The name of the check operation that was triggered, being ``add_security_service`` or ``update_security_service``. in: body required: true type: string required_extra_specs: description: | The required extra specifications for the share type: ``driver_handles_share_servers``. ``snapshot_support`` was treated as a required extra-specification until api version 2.24. in: body required: true type: object reset_operation: description: | Whether a share network security service check update or add operation for a given share network should ignore previous check results, and check the operation's compatibility again. in: body required: true type: boolean reset_status: description: | The ``reset_status`` object. in: body required: true type: object resource_id_body: description: | The UUID of the resource for which the message was created. in: body required: true type: string resource_lock_id: description: | The UUID identifying the specific resource lock. in: body required: true type: string resource_lock_lock_context: description: | The lock creator's context. Resource locks can be created by users with different roles. If a user with ``admin`` role creates the lock, the value of this field is ``admin``. If a user with ``service`` role creates the lock, the value of this field is ``service``. For all other contexts, the value of this field is ``user``. This field also determines the user's role that is required to unlock or manipulate a lock by virtue of the service's default RBAC. in: body required: true type: string resource_lock_lock_reason: description: | A blob of text representing the reason for the specific resource lock. in: body required: true type: string resource_lock_lock_reason_optional: description: | A blob of text representing the reason for the specific resource lock. in: body required: false type: string resource_lock_object: description: | A resource lock object when making resource lock requests. All other parameters are included in this object. in: body required: true type: object resource_lock_project_id: description: | The ID of the project that the resource lock was created for. in: body required: true type: string resource_lock_resource_action: description: | The action pertaining to a resource that the resource lock prevents. For example, if a resource lock prevents deletion of a share, the value of ``resource_action`` is ``delete``. Resource locks are not supported for all API actions. in: body required: true type: string resource_lock_resource_action_create_optional: description: | The action pertaining to a resource that the resource lock prevents. For example, if a resource lock prevents deletion of a share, the value of ``resource_action`` is ``delete``. Resource locks are not supported for all API actions. Currently support only exists for ``delete``, and for specific resources. If not provided, the value of this parameter defaults to ``delete``. in: body required: false type: string resource_lock_resource_action_optional: description: | The action pertaining to a resource that the resource lock prevents. For example, if a resource lock prevents deletion of a share, the value of ``resource_action`` is ``delete``. Resource locks are not supported for all API actions. in: body required: false type: string resource_lock_resource_id: description: | The UUID of the resource that the lock pertains to. For example, this can be the ID of the share that is locked from deletion. in: body required: true type: string resource_lock_resource_type: description: | The type of resource that the ID in ``resource_id`` denotes. For example, ``share`` is the resource type that is specified when the resource lock pertains to a share being locked from deletion. Resource locks are not supported for all resources. Currently support only exists for ``share``. in: body required: true type: string resource_lock_user_id: description: | The ID of the user the resource lock was created for. in: body required: true type: string resource_locks_object: description: | A resource locks object containing a collection or list of resource locks. in: body required: true type: object resource_type_body: description: | The type of the resource for which the message was created. in: body required: true type: string revert_to_snapshot_support: description: | Boolean extra spec used for filtering of back ends by their capability to revert shares to snapshots. in: body required: false type: boolean min_version: 2.27 revert_to_snapshot_support_body: description: | Boolean extra spec used for filtering of back ends by their capability to revert shares to snapshots. in: body required: false type: boolean revert_to_snapshot_support_share_capability: description: | Whether or not this share supports being reverted to its latest snapshot. Inconsequential if the share doesn't support snapshots (see capability "snapshot_support"). in: body required: true type: boolean min_version: 2.27 scheduled_to_be_deleted_at_response: description: | Estimated time at which the share in the recycle bin will be deleted automatically. in: body required: false type: string min_version: 2.69 scheduler_hints: description: | One or more scheduler_hints key and value pairs as a dictionary of strings. Accepted hints are: - ``same_host`` or ``different_host``: values must be a comma separated list of Share IDs - ``only_host``: value must be a manage-share service host in ``host@backend#POOL`` format (admin only). Only available in and beyond API version 2.67 in: body required: false type: object min_version: 2.65 security_service_default_ad_site: description: | The security service default AD site. in: body required: true type: string min_version: 2.76 security_service_default_ad_site_request: description: | The security service default AD site. in: body required: false type: string min_version: 2.76 security_service_dns_ip: description: | The DNS IP address that is used inside the project network. in: body required: true type: string security_service_dns_ip_request: description: | The DNS IP address that is used inside the project network. in: body required: false type: string security_service_domain: description: | The security service domain. in: body required: true type: string security_service_domain_request: description: | The security service domain. in: body required: false type: string security_service_id: description: | The security service ID. in: body required: true type: string security_service_ou: description: | The security service ou. in: body required: true type: string min_version: 2.44 security_service_ou_request: description: | The security service ou. An organizational unit can be added to specify where the share ends up. in: body required: false type: string min_version: 2.44 security_service_password: description: | The user password, if you specify a ``user``. in: body required: true type: string security_service_password_request: description: | The user password, if you specify a ``user``. in: body required: false type: string security_service_server: description: | The security service host name or IP address. in: body required: true type: string security_service_server_request: description: | The security service host name or IP address. in: body required: false type: string security_service_status: description: | The security service status. in: body required: true type: string security_service_type: description: | The security service type. A valid value is ``ldap``, ``kerberos``, or ``active_directory``. in: body required: true type: string security_service_type_request: description: | The security service type. A valid value is ``ldap``, ``kerberos``, or ``active_directory``. in: body required: false type: string security_service_update_support: description: | Whether a share network or server supports security service updates or not. in: body required: true type: boolean min_version: 2.63 security_service_user: description: | The security service user or group name that is used by the project. in: body required: true type: string security_service_user_request: description: | The security service user or group name that is used by the project. in: body required: false type: string segmentation_id: description: | The segmentation ID. This parameter is automatically set to a value determined by the network provider. For VLAN, this value is an integer from 1 to 4094. For VXLAN, this value is an integer from 1 to 16777215. For GRE, this value is an integer from 1 to 4294967295. in: body required: true type: integer max_version: 2.50 service_binary_response: description: | The service binary name. Default is the base name of the executable. in: body required: true type: string service_disable_binary_request: description: | The name of the service binary that you want to disable. Typically, this name is the base name of the executable. in: body required: true type: string service_disable_binary_response: description: | The name of the disabled service binary. Typically, this name is the base name of the executable. in: body required: true type: string service_disable_host_request: description: | The host name of the service that you want to disable. in: body required: true type: string service_disable_host_response: description: | The host name of the disabled service. in: body required: true type: string service_disable_reason_request: description: | A reason for the service being disabled. in: body required: false type: string min_version: 2.83 service_disable_reason_response: description: | A reason for the service being disabled. in: body required: true type: string min_version: 2.83 service_disabled_response: description: | Indicates whether the service is disabled. in: body required: true type: boolean max_version: 2.82 service_enable_binary_request: description: | The name of the service binary that you want to enable. Typically, this name is the base name of the executable. in: body required: true type: string service_enable_host_request: description: | The host name of the service that you want to enable. in: body required: true type: string service_enable_host_response: description: | The host name of the enabled service. in: body required: true type: string service_ensure_shares_host_request: description: | The host name of the manila-share binary that you want to start the ensure shares procedure in the ``host@backend`` format. in: body required: true type: string service_ensuring_response: description: | Whether the service is currently running ensure shares or not. in: body required: true type: string min_version: 2.86 service_host: description: | The manage-share service host in this format: ``host@backend#POOL``: - ``host``. The host name for the back end. - ``backend``. The name of the back end. - ``POOL``. The pool name for the back end. in: body required: true type: string service_host_response: description: | The service host name. in: body required: true type: string service_id_response: description: | The service ID. in: body required: true type: integer service_state_response: description: | The current state of the service. A valid value is ``up`` or ``down``. in: body required: true type: string service_status_new_response: description: | The service status, which is ``enabled`` or ``disabled``. in: body required: true type: string min_version: 2.83 service_status_response: description: | The service status, which is ``enabled`` or ``disabled``. in: body required: true type: string service_zone_response: description: | The service availability zone. in: body required: true type: string services: description: | Top element in the response body. in: body required: true type: string share: description: | A ``share`` object. in: body required: true type: object share_force_delete: description: | To force-delete a share or share group, set this value to ``null``. The force-delete action, unlike the delete action, ignores the share or share group status. in: body required: true type: string share_force_extend: description: | (Admin only). Defines whether to go through scheduler, Set to `True` will extend share directly. Set to `False` will go through scheduler, default is `False`. in: body required: false type: boolean min_version: 2.64 share_group_host: description: | The share group host name. in: body required: false type: string share_group_id: description: | The UUID of the share group. in: body required: true type: string min_version: 2.31 share_group_id_request: description: | The UUID of the share group. in: body required: false type: string min_version: 2.31 share_group_links: description: | The share group links. in: body required: true type: string share_group_status: description: | The share group status, which is ``available``, ``error``, ``creating``, or ``deleting``. in: body required: true type: string share_group_type_id: description: | The share group type ID to create a share group. in: body required: false type: string share_group_type_id_required: description: | The share group type ID. in: body required: true type: string share_group_type_is_public: description: | The level of visibility for the share group type. Set to ``true`` to make share group type public. Set to ``false`` to make it private. Default value is ``true``. in: body required: true type: boolean share_group_type_is_public_request: description: | The level of visibility for the share group type. Set to ``true`` to make share group type public. Set to ``false`` to make it private. Default value is ``false``. in: body required: false type: boolean share_group_type_name: description: | The share group type name. in: body required: true type: string share_group_type_name_request: description: | The name of the share group type resource. The value of this field is limited to 255 characters. in: body required: false type: string share_id_request: description: | The UUID of the share. in: body required: true type: string share_id_response: description: | The UUID of the share. in: body required: true type: string share_id_share_instances_response: description: | The UUID of the share to which the share instance belongs to. in: body required: true type: string share_instance_cast_rules_to_readonly: description: | If the share instance has its ``cast_rules_to_readonly`` attribute set to True, all existing access rules will be cast to read/only. in: body required: true type: boolean min_version: 2.30 share_instance_id_1: description: | The UUID of the share instance. in: body required: true type: string share_network_availability_zone_request: description: | The UUID or name of an availability zone for the share network subnet. in: body required: false type: string min_version: 2.51 share_network_gateway: description: | The gateway of a share network. in: body required: true type: string min_version: 2.18 max_version: 2.50 share_network_id: description: | The share network ID where the resource is exported to. in: body required: true type: string share_network_id_manage_server_request: description: | The UUID of the share network that the share server will pertain to. in: body required: true type: string share_network_id_request: description: | The ID of a share network that the resource must be exported to. Note that when using a share type with the ``driver_handles_share_servers`` extra spec as ``False``, you should not provide a ``share_network_id``. in: body required: false type: string share_network_id_share_networks_response: description: | The UUID of a share network resource. in: body required: true type: string share_network_id_share_servers_response: description: | The UUID of a share network that is associated with the share server. in: body required: true type: string share_network_mtu: description: The MTU value of a share network. in: body required: true type: integer min_version: 2.20 max_version: 2.50 share_network_name: description: | The name of a share network that is associated with the share server. in: body required: true type: string share_network_security_service_id: description: | The UUID of the security service to remove from the share network. For details, see the security service section. in: body required: true type: string share_network_security_service_update_support: description: | Whether the share network supports its security services being updated when it is already being used. in: body required: true type: boolean min_version: 2.63 share_network_share_network_subnets: description: | A list of share network subnets that pertain to the related share network. in: body required: true type: array min_version: 2.51 share_network_status: description: | The status of a share network. Possible values are: ``active``, ``error`` or ``network_change``. in: body required: true type: string min_version: 2.63 share_network_subnet_availability_zone: description: | The name of the availability zone that the share network subnet belongs to. in: body required: true type: string share_network_subnet_cidr: description: | The IP block from which to allocate the network, in CIDR notation. For example, ``172.16.0.0/24`` or ``2001:DB8::/64``. This parameter is automatically set to a value determined by the network provider. in: body required: true type: string share_network_subnet_gateway: description: | The gateway of a share network subnet. in: body required: true type: string share_network_subnet_id: description: | The UUID of the share network subnet. in: body required: true type: string share_network_subnet_id_manage_server_body: description: | The UUID of the share network subnet that the share server pertain to. in: body required: true type: string min_version: 2.51 share_network_subnet_id_manage_server_request: description: | The UUID of the share network subnet that the share server will pertain to. If not specified, the share network's default subnet UUID will be used. in: body required: false type: string min_version: 2.51 share_network_subnet_id_share_server_body: description: | The UUID of the share network subnet that the share server pertains to. in: body required: true type: string min_version: 2.51 share_network_subnet_ip_version: description: | The IP version of the network. A valid value is ``4`` or ``6``. This parameter is automatically set to a value determined by the network provider. in: body required: true type: integer share_network_subnet_mtu: description: | The MTU of a share network subnet. in: body required: true type: string share_network_subnet_network_type: description: | The network type. A valid value is ``VLAN``, ``VXLAN``, ``GRE``, or ``flat``. This parameter is automatically set to a value determined by the network provider. in: body required: true type: string share_network_subnet_neutron_net_id: description: | The neutron network ID. in: body required: true type: string share_network_subnet_neutron_subnet_id: description: | The neutron subnet ID. in: body required: true type: string share_network_subnet_segmentation_id: description: | The segmentation ID. This parameter is automatically set to a value determined by the network provider. For VLAN, this value is an integer from 1 to 4094. For VXLAN, this value is an integer from 1 to 16777215. For GRE, this value is an integer from 1 to 4294967295. in: body required: true type: integer share_network_subnet_share_network_id: description: | The UUID of the share network that the share network subnet belongs to. in: body required: true type: string share_network_subnet_share_network_name: description: | The name of the share network that the share network subnet belongs to. in: body required: true type: string share_new_size: description: | New size of the share, in GiBs. in: body required: true type: integer share_proto: description: | The Shared File Systems protocol. A valid value is ``NFS``, ``CIFS``, ``GlusterFS``, ``HDFS``, ``CephFS``, ``MAPRFS``, ``CephFS`` supported is starting with API v2.13. in: body required: true type: string share_replica_az: description: | The availability zone. in: body required: false type: string share_replica_cast_rules_to_readonly: description: | If the share replica has its ``cast_rules_to_readonly`` attribute set to True, all existing access rules will be cast to read/only. in: body required: true type: boolean min_version: 2.30 share_replica_force_delete: description: | To force-delete a share replica, set this value to ``null``. The force-delete action, unlike the delete action, ignores the share replica status. in: body required: true type: string share_replica_host: description: | The host name of the share replica. in: body required: true type: string share_replica_id: description: | The UUID of the share replica. in: body required: true type: string share_replica_quiesce_wait_time: description: | The quiesce wait time in seconds used during replica promote. in: body required: false type: integer min_version: 2.75 share_replica_replica_state: description: | The replica state of a share replica. List of possible values: ``active``, ``in_sync``, ``out_of_sync``, and ``error``. in: body required: true type: string share_replica_reset_replica_state: description: | The ``reset_replica_state`` object. in: body required: true type: object share_replica_scheduler_hints: description: | One or more scheduler_hints key and value pairs as a dictionary of strings. Accepted hints are: - ``only_host``: value must be a share manager service host in ``host@backend#POOL`` format (admin only) in: body required: false type: object min_version: 2.67 share_replica_share_id: description: | The UUID of the share from which to create a share replica. in: body required: true type: string share_replica_share_network_id: description: | The UUID of the share network. in: body required: false type: string share_replica_status: description: | The status of a share replica. List of possible values: ``available``, ``error``, ``creating``, ``deleting``, or ``error_deleting``. in: body required: true type: string share_server_id: description: | The UUID of the share server. in: body required: true type: string share_server_security_service_update_support: description: | Whether the share server supports its security services being updated after its creation. in: body required: true type: boolean min_version: 2.63 share_server_show_identifier: description: | The identifier of the share server in the back-end storage system. in: body required: true type: string min_version: 2.49 share_server_show_is_auto_deletable: description: | Defines if a share server can be deleted automatically by the service. Share server deletion can be automated with configuration. However, Share servers that have ever had a share removed from service management cannot be automatically deleted by the service. in: body required: true type: boolean min_version: 2.49 share_server_status: description: | The share server status, which can be ``active``, ``error``, ``creating``, ``deleting``, ``manage_starting``, ``manage_error``, ``unmanage_starting``, ``unmanage_error`` or ``error_deleting``. in: body required: true type: string share_server_unmanage: description: | To unmanage a share server, either set this value to ``null`` or {}. Optionally, the ``force`` attribute can be included in this object. in: body required: true type: object share_share_type_name: description: | Name of the share type. in: body required: true type: string min_version: 2.6 share_status_request: description: | The share or share instance status to be set. Possible values are listed in the `section above <#shares>`_. in: body required: true type: string share_status_response: description: | The share or share instance status. Possible values are listed in the `section above <#shares>`_. in: body required: true type: string share_type_access:is_public: description: | Indicates whether a share type is publicly accessible. Default is ``true``, or publicly accessible. in: body required: false type: boolean min_version: 2.7 share_type_access:is_public_body: description: | Indicates whether a share type is accessible by all projects (tenants) in the cloud. in: body required: true type: boolean share_type_access:is_public_update_request: description: | Indicates whether the share type should be accessible by all projects (tenants) in the cloud. If not specified, the visibility of the share type is not altered. in: body required: false type: boolean share_type_description: description: | The description of the share type. in: body required: true type: string min_version: 2.41 share_type_description_body: description: | The description of the share type. in: body required: true type: string share_type_description_request: description: | The description of the share type. The value of this field is limited to 255 characters. in: body required: false type: string min_version: 2.41 share_type_description_update_request: description: | New description for the share type. in: body required: false type: string share_type_id_body: description: | The UUID of the share type. in: body required: true type: string share_type_name: description: | Name of the share type. in: body required: true type: string share_type_name_request: description: | Name of the share type. The value of this field is limited to 255 characters. in: body required: false type: string share_type_request: description: | The name or ID of the share type to be used to create the resource. If you omit this parameter, the default share type is used. To view the default share type set by the administrator, issue a list default share types request. in: body required: false type: string share_type_shares_response: description: | The UUID of the share type that the share belongs to. Prior to API version 2.6, this parameter resolved to the name of the share type. In API version 2.6 and beyond, this parameter holds the share type ID instead of the name. in: body required: true type: string share_types: description: | A list of one or more share type IDs. in: body required: false type: array share_types_1: description: | A list of share type IDs. in: body required: true type: array share_unmanage: description: | To unmanage a share, set this value to ``null``. in: body required: true type: string share_user_id: description: | ID of the user that the share was created by. in: body required: true type: string min_version: 2.16 shrink: description: | The ``shrink`` object. in: body required: true type: object size_request: description: | The share size, in GiBs. The requested share size cannot be greater than the allowed GiB quota. To view the allowed quota, issue a get limits request. in: body required: true type: integer size_response: description: | The share size, in GiBs. in: body required: true type: integer snapshot_force_delete: description: | To force-delete a snapshot, include this param and set its value to ``null``. The force-delete action, unlike the delete action, ignores the snapshot status. in: body required: true type: string snapshot_id: description: | The UUID of the snapshot. in: body required: true type: string snapshot_id_request: description: | The UUID of the share's base snapshot. in: body required: false type: string snapshot_id_shares_response: description: | The UUID of the snapshot that was used to create the share. in: body required: true type: string snapshot_instance_id: description: | The UUID of the share snapshot instance. in: body required: false type: string snapshot_instance_id_response: description: | The UUID of the share snapshot instance. in: body required: true type: string snapshot_instance_status: description: | The snapshot instance status. A valid value is ``available``, ``error``, ``creating``, ``deleting``, and ``error_deleting``, ``restoring``, ``unmanage_starting``, ``unmanage_error``, ``manage_starting``, ``manage_error``. in: body required: true type: string snapshot_manage_share_id: description: | The UUID of the share that has snapshot which should be managed. in: body required: true type: string snapshot_manage_status: description: | The snapshot status, which could be ``manage_starting``, ``manage_error``, ``unmanage_starting``, or ``unmanage_error``. in: body required: true type: string snapshot_project_id: description: | ID of the project that the snapshot belongs to. in: body required: true type: string min_version: 2.17 snapshot_provider_location: description: | Provider location of the snapshot on the backend. in: body required: true type: string snapshot_provider_location_optional: description: | Provider location of the snapshot on the backend. This parameter is only available to users with an "administrator" role by virtue of default RBAC. This behavior can be modified by overriding the ``context_is_admin`` policy via a custom ``policy.yaml``. in: body required: false type: string min_version: 2.12 snapshot_provider_location_request: description: | Provider location of the snapshot on the backend. in: body required: true type: string snapshot_share_id: description: | The UUID of the source share that was used to create the snapshot. in: body required: true type: string snapshot_share_id_request: description: | The UUID of the share from which to create a snapshot. in: body required: true type: string snapshot_share_protocol: description: | The file system protocol of a share snapshot. A valid value is ``NFS``, ``CIFS``, ``GlusterFS``, ``HDFS``, ``CephFS`` or ``MAPRFS``. ``CephFS`` is supported starting with API v2.13. in: body required: true type: string snapshot_share_size: description: | The share snapshot size, in GiBs. in: body required: true type: integer snapshot_size: description: | The snapshot size, in GiBs. in: body required: true type: integer snapshot_status: description: | The snapshot status, which can be ``available``, ``error``, ``creating``, ``deleting``, ``manage_starting``, ``manage_error``, ``unmanage_starting``, ``unmanage_error`` or ``error_deleting``. in: body required: true type: string snapshot_status_request: description: | The snapshot status, which can be ``available``, ``error``, ``creating``, ``deleting``, ``manage_starting``, ``manage_error``, ``unmanage_starting``, ``unmanage_error`` or ``error_deleting``. in: body required: false type: string snapshot_support: description: | An extra specification that filters back ends by whether they do or do not support share snapshots. in: body required: true type: boolean min_version: 2.2 snapshot_support_share_capability: description: | Whether or not this share supports snapshots. Snapshots are point in time backups of the share. in: body required: true type: boolean min_version: 2.2 snapshot_unmanage: description: | To unmanage a share snapshot, include this parameter and set its value to ``null``. in: body required: true type: string snapshot_user_id: description: | ID of the user that the snapshot was created by. in: body required: true type: string min_version: 2.17 source_backup_id_shares_response: description: | The UUID of the backup that was restored in the share. in: body required: true type: string min_version: 2.80 source_share_group_snapshot_id: description: | The source share group snapshot ID to create the share group. in: body required: false type: string source_share_group_snapshot_id_response: description: | The source share group snapshot ID to create the share group. in: body required: true type: string source_share_group_snapshot_member_id: description: | ID of the group snapshot instance that was the source of this share. in: body required: true type: string min_version: 2.31 state: description: | Prior to versions 2.28, the state of all access rules of a given share is the same at all times. This could be ``new``, ``active`` or ``error``. Since 2.28, the state of each access rule of a share is independent of the others and can be ``queued_to_apply``, ``applying``, ``active``, ``error``, ``queued_to_deny`` or ``denying``. A new rule starts out in ``queued_to_apply`` state and is successfully applied if it transitions to ``active`` state. in: body required: true type: string status_share_server_body: description: | The share server status, which is ``active``, ``error``, ``creating``, or ``deleting``. in: body required: true type: string supported_capabilities: description: | The driver's supported attributes for a share server migration. It will contain the following items: ``writable``, ``nondisruptive``, ``preserve_snapshots`` and ``share_network_id``. Drivers will also report if they can perform ``migration_cancel`` and ``migration_get_progress`` operations. All of the mentioned parameters will be present in this object. All parameters but the ``share_network_id`` are boolean values. in: body required: true type: object task_state: description: | For the share migration, the migration task state. A valid value is ``null``, ``migration_starting``, ``migration_error``, ``migration_success``, ``migration_completing``, or ``migrating``. The ``task_state`` is ``null`` unless the share is migrated from one back-end to another. in: body required: true type: string min_version: 2.5 task_state_server_migration: description: | For the share server migration, the migration task state. A valid value is ``null``, ``migration_in_progress``, ``migration_cancel_in_progress``, ``migration_cancelled``, ``migration_driver_starting``, ``migration_driver_in_progress``, or ``migration_phase_1_done``. in: body required: true type: string timestamp: description: | The date and time stamp when the API request was issued. The date and time stamp format is `ISO 8601 `_: :: CCYY-MM-DDThh:mm:ss±hh:mm The ``±hh:mm`` value, if included, returns the time zone as an offset from UTC. For example, ``2015-08-27T09:49:58-05:00``. in: body required: true type: string total_progress: description: | Defines a total progress of share migration. in: body required: true type: integer total_progress_server_migration: description: | Defines a total progress of share server migration. in: body required: true type: integer totalBackupGigabytesUsed: description: | The total number of gigabytes used in a project by backups. in: body required: true type: integer totalReplicaGigabytesUsed: description: | The total number of replica gigabytes used in a project by share replicas. in: body required: true type: integer totalShareBackupsUsed: description: | The total number of created share backups in a project. in: body required: true type: integer totalShareGigabytesUsed: description: | The total number of gigabytes used in a project by shares. in: body required: true type: integer totalShareNetworksUsed: description: | The total number of created share-networks in a project. in: body required: true type: integer totalShareReplicasUsed: description: | The total number of created share replicas in a project. in: body required: true type: integer totalShareSnapshotsUsed: description: | The total number of created share snapshots in a project. in: body required: true type: integer totalSharesUsed: description: | The total number of created shares in a project. in: body required: true type: integer totalSnapshotGigabytesUsed: description: | The total number of gigabytes used in a project by snapshots. in: body required: true type: integer transfer: description: | The transfer object. in: body required: true type: object transfer_expires_at_body: description: | The date and time stamp when the resource transfer will expire. After transfer expired, will be automatically deleted. The date and time stamp format is `ISO 8601 `_: :: CCYY-MM-DDThh:mm:ss±hh:mm The ``±hh:mm`` value, if included, returns the time zone as an offset from UTC. For example, ``2016-12-31T13:14:15-05:00``. in: body required: true type: string transfer_id_in_body: description: | The transfer UUID. in: body required: true type: string transfer_name: description: | The transfer display name. in: body required: false type: string transfer_resource_id: description: | The UUID of the resource for the transfer. in: body required: true type: string transfer_resource_type: description: | The type of the resource for the transfer. in: body required: true type: string transfers: description: | List of transfers. in: body required: true type: array unit: description: | The time interval during which a number of API requests are allowed. A valid value is ``SECOND``, ``MINUTE``, ``HOUR``, or ``DAY``. Used in conjunction with the ``value`` parameter, expressed as ``value`` per ``unit``. For example, 120 requests are allowed per minute. in: body required: false type: string unrestrict_access: description: | Whether the service should attempt to remove deletion restrictions during the access rule deletion or not. in: body required: false type: string min_version: 2.82 updated_at: description: | The date and time stamp when the resource was last updated within the service's database. If a resource was never updated after it was created, the value of this parameter is set to ``null``. The date and time stamp format is `ISO 8601 `_: :: CCYY-MM-DDThh:mm:ss±hh:mm The ``±hh:mm`` value, if included, returns the time zone as an offset from UTC. For example, ``2016-12-31T13:14:15-05:00``. in: body required: true type: string updated_at_extensions: description: | The date and time stamp when the extension API was last updated. The date and time stamp format is `ISO 8601 `_: :: CCYY-MM-DDThh:mm:ss±hh:mm The ``±hh:mm`` value, if included, returns the time zone as an offset from UTC. For example, ``2015-08-27T09:49:58-05:00``. in: body required: true type: string uri: description: | A human-readable URI of a rate limit. format: uri in: body required: false type: string user_id: description: | ID of the user that is part of a given project. in: body required: false type: string value: description: | The number of API requests that are allowed during a time interval. Used in conjunction with the ``unit`` parameter, expressed as ``value`` per ``unit``. For example, 120 requests are allowed per minute. in: body required: false type: integer verb: description: | The HTTP method for the API request. For example, ``GET``, ``POST``, ``DELETE``, and so on. in: body required: false type: string version: description: | The version. in: body required: true type: string version_id: type: string in: body required: true description: > A common name for the version in question. Informative only, it has no real semantic meaning. version_max: type: string in: body required: true description: > If this version of the API supports microversions, the maximum microversion that is supported. This will be the empty string if microversions are not supported. version_media_types: description: | Media types supported by the API. in: body required: true type: object version_min: type: string in: body required: true description: > If this version of the API supports microversions, the minimum microversion that is supported. This will be the empty string if microversions are not supported. version_status: type: string in: body required: true description: | The status of this API version. This can be one of: - ``CURRENT``: this is the preferred version of the API to use - ``SUPPORTED``: this is an older, but still supported version of the API - ``DEPRECATED``: a deprecated version of the API that is slated for removal version_updated: description: | A date and time stamp for API versions. This field presents no meaningful information. in: body required: true type: string versions: type: array in: body required: true description: > A list of version objects that describe the API versions available. volume_type: description: | The volume type. The use of the ``volume_type`` object is deprecated but supported. It is recommended that you use the ``share_type`` object when you create a share type. When you issue a create a share type request, you can submit a request body with either a ``share_type`` or ``volume_type`` object. No matter which object type you include in the request, the API creates both a ``volume_type`` object and a ``share_type`` object. Both objects have the same ID. When you issue a list share types request, the response shows both ``share_types`` and ``volume_types`` objects. in: body required: false type: string volume_type_shares_response: description: | The share type ID. This is a legacy parameter that contains the same value as the ``share_type`` parameter. Do not rely on this parameter as it may be removed in a future API revision. in: body required: true type: string writable: description: | Specifies whether migration should only be performed if the share can remain writable. When this behavior is set to ``True`` and drivers are not capable of allowing the share to remain writable, migration will result in an error status. If drivers are not capable of performing a nondisruptive migration, manila will ensure that the share will remain writable through the data copy phase of migration. However, during the switchover phase the share will be re-exported at the destination, causing the share to be rendered inaccessible for the duration of this phase. As of Ocata release, host-assisted migration cannot provide this capability. in: body required: true type: boolean writable_server_migration: description: | Specifies whether migration should only be performed if the shares can remain writable. When this behavior is set to ``True`` and drivers are not capable of allowing the shares to remain writable, migration will result in an error status. If drivers are not capable of performing a nondisruptive migration, manila will ensure that the shares will remain writable through the data copy phase of migration. However, during the switchover phase all shares will be re-exported at the destination, causing the shares to be rendered inaccessible for the duration of this phase. in: body required: true type: boolean ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/api/validation/response_types.py0000664000175000017500000000270700000000000022664 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Common field types for validating API responses.""" links = { 'type': 'array', 'items': { 'type': 'object', 'properties': { 'rel': { 'type': 'string', 'enum': ['self', 'bookmark'], }, 'href': { 'type': 'string', 'format': 'uri', }, }, 'required': ['rel', 'href'], 'additionalProperties': False, }, } collection_links = { 'type': 'array', 'items': { 'type': 'object', 'properties': { 'rel': { 'const': 'next', }, 'href': { 'type': 'string', 'format': 'uri', }, }, 'required': ['rel', 'href'], 'additionalProperties': False, }, # there should be one and only one link object 'minItems': 1, 'maxItems': 1, } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/api/validation/validators.py0000664000175000017500000002305100000000000021745 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Internal implementation of request/response validating middleware.""" import re import jsonschema from jsonschema import exceptions as jsonschema_exc from oslo_utils import timeutils from oslo_utils import uuidutils import webob.exc from manila import exception from manila.i18n import _ from manila import utils def _soft_validate_additional_properties( validator, additional_properties_value, param_value, schema, ): """Validator function. If there are not any properties on the param_value that are not specified in the schema, this will return without any effect. If there are any such extra properties, they will be handled as follows: - if the validator passed to the method is not of type "object", this method will return without any effect. - if the 'additional_properties_value' parameter is True, this method will return without any effect. - if the schema has an additionalProperties value of True, the extra properties on the param_value will not be touched. - if the schema has an additionalProperties value of False and there aren't patternProperties specified, the extra properties will be stripped from the param_value. - if the schema has an additionalProperties value of False and there are patternProperties specified, the extra properties will not be touched and raise validation error if pattern doesn't match. """ if not ( validator.is_type(param_value, "object") or additional_properties_value ): return properties = schema.get('properties', {}) patterns = '|'.join(schema.get('patternProperties', {})) extra_properties = set() for prop in param_value: if prop not in properties: if patterns: if not re.search(patterns, prop): extra_properties.add(prop) else: extra_properties.add(prop) if not extra_properties: return if patterns: error = 'Additional properties are not allowed (%s %s unexpected)' if len(extra_properties) == 1: verb = 'was' else: verb = 'were' yield jsonschema_exc.ValidationError( error % (', '.join(repr(extra) for extra in extra_properties), verb) ) else: for prop in extra_properties: del param_value[prop] def _validate_string_length( value, entity_name, mandatory=False, min_length=0, max_length=None, remove_whitespaces=False, ): """Check the length of specified string. :param value: the value of the string :param entity_name: the name of the string :mandatory: string is mandatory or not :param min_length: the min_length of the string :param max_length: the max_length of the string :param remove_whitespaces: True if trimming whitespaces is needed else False """ if not mandatory and not value: return True if mandatory and not value: msg = _("The '%s' can not be None.") % entity_name raise webob.exc.HTTPBadRequest(explanation=msg) if remove_whitespaces: value = value.strip() utils.check_string_length( value, entity_name, min_length=min_length, max_length=max_length ) class FormatChecker(jsonschema.FormatChecker): """A FormatChecker can output the message from cause exception We need understandable validation errors messages for users. When a custom checker has an exception, the FormatChecker will output a readable message provided by the checker. """ def check(self, param_value, format): """Check whether the param_value conforms to the given format. :param param_value: the param_value to check :type: any primitive type (str, number, bool) :param str format: the format that param_value should conform to :raises: :exc:`FormatError` if param_value does not conform to format """ if format not in self.checkers: return # For safety reasons custom checkers can be registered with # allowed exception types. Anything else will fall into the # default formatter. func, raises = self.checkers[format] result, cause = None, None try: result = func(param_value) except raises as e: cause = e if not result: msg = '%r is not a %r' % (param_value, format) raise jsonschema_exc.FormatError(msg, cause=cause) _FORMAT_CHECKER = FormatChecker() @_FORMAT_CHECKER.checks('date-time') def _validate_datetime_format(instance: object) -> bool: # format checks constrain to the relevant primitive type # https://github.com/OAI/OpenAPI-Specification/issues/3148 if not isinstance(instance, str): return True try: timeutils.parse_isotime(instance) except ValueError: return False else: return True @_FORMAT_CHECKER.checks('uuid') def _validate_uuid_format(instance: object) -> bool: # format checks constrain to the relevant primitive type # https://github.com/OAI/OpenAPI-Specification/issues/3148 if not isinstance(instance, str): return True return uuidutils.is_uuid_like(instance) class _SchemaValidator(object): """A validator class This class is changed from Draft202012Validator to validate minimum/maximum value of a string number(e.g. '10'). In addition, FormatCheckers are added for checking data formats which are common in the Manila API. """ validator = None validator_org = jsonschema.Draft202012Validator def __init__( self, schema, relax_additional_properties=False, is_body=True ): self.is_body = is_body validators = { 'minimum': self._validate_minimum, 'maximum': self._validate_maximum, } if relax_additional_properties: validators['additionalProperties'] = ( _soft_validate_additional_properties ) validator_cls = jsonschema.validators.extend( self.validator_org, validators ) self.validator = validator_cls(schema, format_checker=_FORMAT_CHECKER) def validate(self, *args, **kwargs): try: self.validator.validate(*args, **kwargs) except jsonschema.ValidationError as ex: if len(ex.path) > 0: if self.is_body: # NOTE: For consistency across OpenStack services, this # error message has been written in a similar format as # WSME errors. detail = _( 'Invalid input for field/attribute %(path)s. ' 'Value: %(value)s. %(message)s' ) % { 'path': ex.path.pop(), 'value': ex.instance, 'message': ex.message, } else: # NOTE: We use 'ex.path.popleft()' instead of # 'ex.path.pop()'. This is due to the structure of query # parameters which is a dict with key as name and value is # list. As such, the first item in the 'ex.path' is the key # and second item is the index of list in the value. We # need the key as the parameter name in the error message # so we pop the first value out of 'ex.path'. detail = _( 'Invalid input for query parameters %(path)s. ' 'Value: %(value)s. %(message)s' ) % { 'path': ex.path.popleft(), 'value': ex.instance, 'message': ex.message, } else: detail = ex.message raise exception.ValidationError(detail=detail) except TypeError as ex: # NOTE: If passing non string value to patternProperties parameter, # TypeError happens. Here is for catching the TypeError. detail = str(ex) raise exception.ValidationError(detail=detail) def _number_from_str(self, param_value): try: value = int(param_value) except (ValueError, TypeError): try: value = float(param_value) except (ValueError, TypeError): return None return value def _validate_minimum(self, validator, minimum, param_value, schema): param_value = self._number_from_str(param_value) if param_value is None: return return self.validator_org.VALIDATORS['minimum']( validator, minimum, param_value, schema ) def _validate_maximum(self, validator, maximum, param_value, schema): param_value = self._number_from_str(param_value) if param_value is None: return return self.validator_org.VALIDATORS['maximum']( validator, maximum, param_value, schema ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/api/versions.py0000664000175000017500000000665500000000000017326 0ustar00zuulzuul00000000000000# Copyright 2010 OpenStack LLC. # Copyright 2015 Clinton Knight # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from oslo_config import cfg from manila.api import extensions from manila.api import openstack from manila.api.openstack import api_version_request from manila.api.openstack import wsgi from manila.api.views import versions as views_versions CONF = cfg.CONF _LINKS = [{ 'rel': 'describedby', 'type': 'text/html', 'href': 'http://docs.openstack.org/', }] _MEDIA_TYPES = [{ 'base': 'application/json', 'type': 'application/vnd.openstack.share+json;version=1', }] _KNOWN_VERSIONS = { 'v1.0': { 'id': 'v1.0', 'status': 'DEPRECATED', 'version': '', 'min_version': '', 'updated': '2015-08-27T11:33:21Z', 'links': _LINKS, 'media-types': _MEDIA_TYPES, }, 'v2.0': { 'id': 'v2.0', 'status': 'CURRENT', 'version': api_version_request._MAX_API_VERSION, 'min_version': api_version_request._MIN_API_VERSION, 'updated': '2015-08-27T11:33:21Z', 'links': _LINKS, 'media-types': _MEDIA_TYPES, }, } class VersionsRouter(openstack.APIRouter): """Route versions requests.""" ExtensionManager = extensions.ExtensionManager def _setup_routes(self, mapper): self.resources['versions'] = create_resource() mapper.connect('versions', '/', controller=self.resources['versions'], action='all') mapper.redirect('', '/') class VersionsController(wsgi.Controller): def __init__(self): super(VersionsController, self).__init__(None) @wsgi.Controller.api_version('1.0', '1.0') def index(self, req): """Return versions supported prior to the microversions epoch.""" builder = views_versions.get_view_builder(req) known_versions = copy.deepcopy(_KNOWN_VERSIONS) known_versions.pop('v2.0') return builder.build_versions(known_versions) @wsgi.Controller.api_version('2.0') # noqa def index(self, req): # pylint: disable=function-redefined # noqa F811 """Return versions supported after the start of microversions.""" builder = views_versions.get_view_builder(req) known_versions = copy.deepcopy(_KNOWN_VERSIONS) known_versions.pop('v1.0') return builder.build_versions(known_versions) # NOTE (cknight): Calling the versions API without # /v1 or /v2 in the URL will lead to this unversioned # method, which should always return info about all # available versions. @wsgi.response(300) def all(self, req): """Return all known versions.""" builder = views_versions.get_view_builder(req) known_versions = copy.deepcopy(_KNOWN_VERSIONS) return builder.build_versions(known_versions) def create_resource(): return wsgi.Resource(VersionsController()) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315601.8456726 manila-21.0.0/manila/api/views/0000775000175000017500000000000000000000000016225 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/api/views/__init__.py0000664000175000017500000000000000000000000020324 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/api/views/availability_zones.py0000664000175000017500000000224500000000000022472 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Mirantis inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from manila.api import common class ViewBuilder(common.ViewBuilder): _collection_name = "availability_zones" def _detail(self, availability_zone): """Detailed view of a single availability zone.""" keys = ('id', 'name', 'created_at', 'updated_at') return {key: availability_zone.get(key) for key in keys} def detail_list(self, availability_zones): """Detailed view of a list of availability zones.""" azs = [self._detail(az) for az in availability_zones] return {self._collection_name: azs} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/api/views/export_locations.py0000664000175000017500000000760400000000000022202 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from oslo_utils import strutils from manila.api import common class ViewBuilder(common.ViewBuilder): """Model export-locations API responses as a python dictionary.""" _collection_name = "export_locations" _detail_version_modifiers = [ 'add_preferred_path_attribute', 'add_metadata_attribute', ] def _get_export_location_view(self, request, export_location, detail=False, replica=False): context = request.environ['manila.context'] view = { 'id': export_location['uuid'], 'path': export_location['path'], } self.update_versioned_resource_dict(request, view, export_location) if context.is_admin: view['share_instance_id'] = export_location['share_instance_id'] view['is_admin_only'] = export_location['is_admin_only'] if detail: view['created_at'] = export_location['created_at'] view['updated_at'] = export_location['updated_at'] if replica: share_instance = export_location['share_instance'] view['replica_state'] = share_instance['replica_state'] view['availability_zone'] = share_instance['availability_zone'] return {'export_location': view} def summary(self, request, export_location, replica=False): """Summary view of a single export location.""" return self._get_export_location_view( request, export_location, detail=False, replica=replica) def detail(self, request, export_location, replica=False): """Detailed view of a single export location.""" return self._get_export_location_view( request, export_location, detail=True, replica=replica) def _list_export_locations(self, req, export_locations, detail=False, replica=False): """View of export locations list.""" view_method = self.detail if detail else self.summary return { self._collection_name: [ view_method(req, elocation, replica=replica)['export_location'] for elocation in export_locations ]} def detail_list(self, request, export_locations): """Detailed View of export locations list.""" return self._list_export_locations(request, export_locations, detail=True) def summary_list(self, request, export_locations, replica=False): """Summary View of export locations list.""" return self._list_export_locations(request, export_locations, detail=False, replica=replica) @common.ViewBuilder.versioned_method('2.14') def add_preferred_path_attribute(self, context, view_dict, export_location): view_dict['preferred'] = strutils.bool_from_string( export_location['el_metadata'].get('preferred')) @common.ViewBuilder.versioned_method('2.87') def add_metadata_attribute(self, context, view_dict, export_location): metadata = export_location.get('el_metadata') meta_copy = copy.copy(metadata) meta_copy.pop('preferred', None) view_dict['metadata'] = meta_copy ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/api/views/limits.py0000664000175000017500000001267000000000000020106 0ustar00zuulzuul00000000000000# Copyright 2010-2011 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime from manila.api import common from manila import utils class ViewBuilder(common.ViewBuilder): """OpenStack API base limits view builder.""" _collection_name = "limits" _detail_version_modifiers = [ "add_share_replica_quotas", "add_share_group_quotas", "add_share_backup_quotas", ] def build(self, request, rate_limits, absolute_limits): rate_limits = self._build_rate_limits(rate_limits) absolute_limits = self._build_absolute_limits(request, absolute_limits) output = { "limits": { "rate": rate_limits, "absolute": absolute_limits, }, } return output def _build_absolute_limits(self, request, absolute_limits): """Builder for absolute limits. absolute_limits should be given as a dict of limits. For example: {"limit": {"shares": 10, "gigabytes": 1024}, "in_use": {"shares": 8, "gigabytes": 256}}. """ limit_names = { "limit": { "gigabytes": ["maxTotalShareGigabytes"], "snapshot_gigabytes": ["maxTotalSnapshotGigabytes"], "shares": ["maxTotalShares"], "snapshots": ["maxTotalShareSnapshots"], "share_networks": ["maxTotalShareNetworks"], }, "in_use": { "shares": ["totalSharesUsed"], "snapshots": ["totalShareSnapshotsUsed"], "share_networks": ["totalShareNetworksUsed"], "gigabytes": ["totalShareGigabytesUsed"], "snapshot_gigabytes": ["totalSnapshotGigabytesUsed"], }, } limits = {} self.update_versioned_resource_dict(request, limit_names, absolute_limits) for mapping_key in limit_names.keys(): for k, v in absolute_limits.get(mapping_key, {}).items(): if k in limit_names.get(mapping_key, []) and v is not None: for name in limit_names[mapping_key][k]: limits[name] = v return limits def _build_rate_limits(self, rate_limits): limits = [] for rate_limit in rate_limits: _rate_limit_key = None _rate_limit = self._build_rate_limit(rate_limit) # check for existing key for limit in limits: if (limit["uri"] == rate_limit["URI"] and limit["regex"] == rate_limit["regex"]): _rate_limit_key = limit break # ensure we have a key if we didn't find one if not _rate_limit_key: _rate_limit_key = { "uri": rate_limit["URI"], "regex": rate_limit["regex"], "limit": [], } limits.append(_rate_limit_key) _rate_limit_key["limit"].append(_rate_limit) return limits def _build_rate_limit(self, rate_limit): next_avail = datetime.datetime.fromtimestamp( rate_limit["resetTime"], tz=datetime.timezone.utc).replace(tzinfo=None) return { "verb": rate_limit["verb"], "value": rate_limit["value"], "remaining": int(rate_limit["remaining"]), "unit": rate_limit["unit"], "next-available": utils.isotime(at=next_avail), } @common.ViewBuilder.versioned_method("2.58") def add_share_group_quotas(self, request, limit_names, absolute_limits): limit_names["limit"]["share_groups"] = ["maxTotalShareGroups"] limit_names["limit"]["share_group_snapshots"] = ( ["maxTotalShareGroupSnapshots"]) limit_names["in_use"]["share_groups"] = ["totalShareGroupsUsed"] limit_names["in_use"]["share_group_snapshots"] = ( ["totalShareGroupSnapshotsUsed"]) @common.ViewBuilder.versioned_method("2.53") def add_share_replica_quotas(self, request, limit_names, absolute_limits): limit_names["limit"]["share_replicas"] = ["maxTotalShareReplicas"] limit_names["limit"]["replica_gigabytes"] = ( ["maxTotalReplicaGigabytes"]) limit_names["in_use"]["share_replicas"] = ["totalShareReplicasUsed"] limit_names["in_use"]["replica_gigabytes"] = ( ["totalReplicaGigabytesUsed"]) @common.ViewBuilder.versioned_method("2.80") def add_share_backup_quotas(self, request, limit_names, absolute_limits): limit_names["limit"]["backups"] = ["maxTotalShareBackups"] limit_names["limit"]["backup_gigabytes"] = ( ["maxTotalBackupGigabytes"]) limit_names["in_use"]["backups"] = ["totalShareBackupsUsed"] limit_names["in_use"]["backup_gigabytes"] = ( ["totalBackupGigabytesUsed"]) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/api/views/messages.py0000664000175000017500000000540500000000000020412 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from manila.api import common from manila.message import message_field class ViewBuilder(common.ViewBuilder): """Model a server API response as a python dictionary.""" _collection_name = "messages" def index(self, request, messages): """Show a list of messages.""" return self._list_view(self.detail, request, messages) def detail(self, request, message): """Detailed view of a single message.""" message_ref = { 'id': message.get('id'), 'project_id': message.get('project_id'), 'action_id': message.get('action_id'), 'detail_id': message.get('detail_id'), 'message_level': message.get('message_level'), 'created_at': message.get('created_at'), 'expires_at': message.get('expires_at'), 'request_id': message.get('request_id'), 'links': self._get_links(request, message['id']), 'resource_type': message.get('resource_type'), 'resource_id': message.get('resource_id'), 'user_message': "%s: %s" % ( message_field.translate_action(message.get('action_id')), message_field.translate_detail(message.get('detail_id'))), } return {'message': message_ref} def _list_view(self, func, request, messages, coll_name=_collection_name): """Provide a view for a list of messages. :param func: Function used to format the message data :param request: API request :param messages: List of messages in dictionary format :param coll_name: Name of collection, used to generate the next link for a pagination query :returns: message data in dictionary format """ messages_list = [func(request, message)['message'] for message in messages] messages_links = self._get_collection_links(request, messages, coll_name) messages_dict = dict({"messages": messages_list}) if messages_links: messages_dict['messages_links'] = messages_links return messages_dict ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/api/views/quota_class_sets.py0000664000175000017500000000551600000000000022162 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Mirantis inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from manila.api import common class ViewBuilder(common.ViewBuilder): _collection_name = "quota_class_set" _detail_version_modifiers = [ "add_share_group_quotas", "add_share_replica_quotas", "add_per_share_gigabytes_quotas", "add_share_backup_quotas", "add_encryption_keys_quotas", ] def detail_list(self, request, quota_class_set, quota_class=None): """Detailed view of quota class set.""" keys = ( 'shares', 'gigabytes', 'snapshots', 'snapshot_gigabytes', 'share_networks', ) view = {key: quota_class_set.get(key) for key in keys} if quota_class: view['id'] = quota_class self.update_versioned_resource_dict(request, view, quota_class_set) return {self._collection_name: view} @common.ViewBuilder.versioned_method("2.40") def add_share_group_quotas(self, context, view, quota_class_set): share_groups = quota_class_set.get('share_groups') share_group_snapshots = quota_class_set.get('share_group_snapshots') if share_groups is not None: view['share_groups'] = share_groups if share_group_snapshots is not None: view['share_group_snapshots'] = share_group_snapshots @common.ViewBuilder.versioned_method("2.53") def add_share_replica_quotas(self, context, view, quota_class_set): view['share_replicas'] = quota_class_set.get('share_replicas') view['replica_gigabytes'] = quota_class_set.get('replica_gigabytes') @common.ViewBuilder.versioned_method("2.62") def add_per_share_gigabytes_quotas(self, context, view, quota_class_set): view['per_share_gigabytes'] = quota_class_set.get( 'per_share_gigabytes') @common.ViewBuilder.versioned_method("2.80") def add_share_backup_quotas(self, context, view, quota_class_set): view['backups'] = quota_class_set.get('backups') view['backup_gigabytes'] = quota_class_set.get('backup_gigabytes') @common.ViewBuilder.versioned_method("2.90") def add_encryption_keys_quotas(self, context, view, quota_class_set): view['encryption_keys'] = quota_class_set.get('encryption_keys') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/api/views/quota_sets.py0000664000175000017500000000606400000000000020774 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Mirantis inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from manila.api import common class ViewBuilder(common.ViewBuilder): _collection_name = "quota_set" _detail_version_modifiers = [ "add_share_group_quotas", "add_share_replica_quotas", "add_per_share_gigabytes_quotas", "add_share_backup_quotas", "add_encryption_keys_quotas", ] def detail_list(self, request, quota_set, project_id=None, share_type=None): """Detailed view of quota set.""" keys = ( 'shares', 'gigabytes', 'snapshots', 'snapshot_gigabytes', ) view = {key: quota_set.get(key) for key in keys} if project_id: view['id'] = project_id if share_type: # NOTE(vponomaryov): remove share groups related data for quotas # that are share-type based. quota_set.pop('share_groups', None) quota_set.pop('share_group_snapshots', None) else: view['share_networks'] = quota_set.get('share_networks') self.update_versioned_resource_dict(request, view, quota_set) return {self._collection_name: view} @common.ViewBuilder.versioned_method("2.40") def add_share_group_quotas(self, context, view, quota_set): share_groups = quota_set.get('share_groups') share_group_snapshots = quota_set.get('share_group_snapshots') if share_groups is not None: view['share_groups'] = share_groups if share_group_snapshots is not None: view['share_group_snapshots'] = share_group_snapshots @common.ViewBuilder.versioned_method("2.53") def add_share_replica_quotas(self, context, view, quota_class_set): view['share_replicas'] = quota_class_set.get('share_replicas') view['replica_gigabytes'] = quota_class_set.get('replica_gigabytes') @common.ViewBuilder.versioned_method("2.62") def add_per_share_gigabytes_quotas(self, context, view, quota_set): view['per_share_gigabytes'] = quota_set.get('per_share_gigabytes') @common.ViewBuilder.versioned_method("2.80") def add_share_backup_quotas(self, context, view, quota_set): view['backups'] = quota_set.get('backups') view['backup_gigabytes'] = quota_set.get('backup_gigabytes') @common.ViewBuilder.versioned_method("2.90") def add_encryption_keys_quotas(self, context, view, quota_set): view['encryption_keys'] = quota_set.get('encryption_keys') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/api/views/resource_locks.py0000664000175000017500000000555600000000000021634 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from manila.api import common class ViewBuilder(common.ViewBuilder): """Model a resource lock API response as a python dictionary.""" _collection_name = "resource_locks" def index(self, request, resource_locks, count=None): """Show a list of resource locks.""" return self._list_view(self.detail, request, resource_locks, count=count) def detail(self, request, resource_lock): """Detailed view of a single resource lock.""" lock_ref = { 'id': resource_lock.get('id'), 'user_id': resource_lock.get('user_id'), 'project_id': resource_lock.get('project_id'), 'lock_context': resource_lock.get('lock_context'), 'resource_type': resource_lock.get('resource_type'), 'resource_id': resource_lock.get('resource_id'), 'resource_action': resource_lock.get('resource_action'), 'lock_reason': resource_lock.get('lock_reason'), 'created_at': resource_lock.get('created_at'), 'updated_at': resource_lock.get('updated_at'), 'links': self._get_links(request, resource_lock['id']), } return {'resource_lock': lock_ref} def _list_view(self, func, request, resource_locks, coll_name=_collection_name, count=None): """Provide a view for a list of resource_locks. :param func: Function used to format the lock data :param request: API request :param resource_locks: List of locks in dictionary format :param coll_name: Name of collection, used to generate the next link for a pagination query :returns: lock data in dictionary format """ locks_list = [ func(request, lock)['resource_lock'] for lock in resource_locks ] locks_links = self._get_collection_links(request, resource_locks, coll_name) locks_dict = dict({"resource_locks": locks_list}) if count: locks_dict['count'] = count if locks_links: locks_dict['resource_locks_links'] = locks_links return locks_dict ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/api/views/scheduler_stats.py0000664000175000017500000000343200000000000021775 0ustar00zuulzuul00000000000000# Copyright (c) 2014 eBay Inc. # Copyright (c) 2015 Rushil Chugh # Copyright (c) 2015 Clinton Knight # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from manila.api import common class ViewBuilder(common.ViewBuilder): """Model scheduler-stats API responses as a python dictionary.""" _collection_name = "scheduler-stats" def pool_summary(self, pool): """Summary view of a single pool.""" return { 'pool': { 'name': pool.get('name'), 'host': pool.get('host'), 'backend': pool.get('backend'), 'pool': pool.get('pool'), } } def pool_detail(self, pool): """Detailed view of a single pool.""" return { 'pool': { 'name': pool.get('name'), 'host': pool.get('host'), 'backend': pool.get('backend'), 'pool': pool.get('pool'), 'capabilities': pool.get('capabilities'), } } def pools(self, pools, detail=False): """View of a list of pools seen by scheduler.""" view_method = self.pool_detail if detail else self.pool_summary return {"pools": [view_method(pool)['pool'] for pool in pools]} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/api/views/security_service.py0000664000175000017500000000632600000000000022175 0ustar00zuulzuul00000000000000# Copyright 2013 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from manila.api import common from manila.common import constants class ViewBuilder(common.ViewBuilder): """Model a server API response as a python dictionary.""" _collection_name = 'security_services' _detail_version_modifiers = [ 'add_ou_to_security_service', 'add_default_ad_site_to_security_service', ] def summary_list(self, request, security_services): """Show a list of security services without many details.""" return self._list_view(self.summary, request, security_services) def detail_list(self, request, security_services): """Detailed view of a list of security services.""" return self._list_view(self.detail, request, security_services) def summary(self, request, security_service): """Generic, non-detailed view of a security service.""" return { 'security_service': { 'id': security_service.get('id'), 'name': security_service.get('name'), 'type': security_service.get('type'), # NOTE(vponomaryov): attr "status" was removed from model and # is left in view for compatibility purposes since it affects # user-facing API. This should be removed right after no one # uses it anymore. 'status': constants.STATUS_NEW, } } def detail(self, request, security_service): """Detailed view of a single security service.""" view = self.summary(request, security_service) keys = ( 'created_at', 'updated_at', 'description', 'dns_ip', 'server', 'domain', 'user', 'password', 'project_id') for key in keys: view['security_service'][key] = security_service.get(key) self.update_versioned_resource_dict( request, view['security_service'], security_service) return view @common.ViewBuilder.versioned_method("2.44") def add_ou_to_security_service(self, context, ss_dict, ss): ss_dict['ou'] = ss.get('ou') @common.ViewBuilder.versioned_method("2.76") def add_default_ad_site_to_security_service(self, context, ss_dict, ss): ss_dict['default_ad_site'] = ss.get('default_ad_site') def _list_view(self, func, request, security_services): """Provide a view for a list of security services.""" security_services_list = [func(request, service)['security_service'] for service in security_services] security_services_dict = dict(security_services=security_services_list) return security_services_dict ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/api/views/services.py0000664000175000017500000000425100000000000020424 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Mirantis inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from manila.api import common class ViewBuilder(common.ViewBuilder): _collection_name = "services" _detail_version_modifiers = [ "add_disabled_reason_field", "add_ensuring_field", ] def summary(self, request, service): """Summary view of a single service.""" keys = 'host', 'binary', 'status', service_dict = {key: service.get(key) for key in keys} self.update_versioned_resource_dict(request, service_dict, service) return service_dict def detail(self, request, service): """Detailed view of a single service.""" keys = ('id', 'binary', 'host', 'zone', 'status', 'state', 'updated_at') service_dict = {key: service.get(key) for key in keys} self.update_versioned_resource_dict(request, service_dict, service) return service_dict def detail_list(self, request, services): """Detailed view of a list of services.""" services_list = [self.detail(request, s) for s in services] services_dict = dict(services=services_list) return services_dict @common.ViewBuilder.versioned_method("2.83") def add_disabled_reason_field(self, context, service_dict, service): service_dict.pop('disabled', None) service_dict['status'] = service.get('status') service_dict['disabled_reason'] = service.get('disabled_reason') @common.ViewBuilder.versioned_method("2.86") def add_ensuring_field(self, context, service_dict, service): service_dict['ensuring'] = service.get('ensuring') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/api/views/share_accesses.py0000664000175000017500000001010200000000000021544 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from manila.api import common from manila.common import constants from manila.share import api as share_api class ViewBuilder(common.ViewBuilder): """Model a share access API response as a python dictionary.""" _collection_name = 'share_accesses' _detail_version_modifiers = [ "add_access_key", "translate_transitional_statuses", "add_created_at_and_updated_at", "add_access_rule_metadata_field", ] def list_view(self, request, accesses): """View of a list of share accesses.""" return {'access_list': [self.summary_view(request, access)['access'] for access in accesses]} def _redact_restricted_fields(self, access, access_dict): if access.get('restricted', False): fields_to_redact = ['access_key', 'access_to'] for field in fields_to_redact: access_dict[field] = '******' return access_dict def summary_view(self, request, access): """Summarized view of a single share access.""" access_dict = { 'id': access.get('id'), 'access_level': access.get('access_level'), 'access_to': access.get('access_to'), 'access_type': access.get('access_type'), 'state': access.get('state'), } self.update_versioned_resource_dict( request, access_dict, access) access_dict = self._redact_restricted_fields(access, access_dict) return {'access': access_dict} def view(self, request, access): """Generic view of a single share access.""" access_dict = { 'id': access.get('id'), 'share_id': access.get('share_id'), 'access_level': access.get('access_level'), 'access_to': access.get('access_to'), 'access_type': access.get('access_type'), 'state': access.get('state'), } self.update_versioned_resource_dict( request, access_dict, access) access_dict = self._redact_restricted_fields(access, access_dict) return {'access': access_dict} def view_metadata(self, request, metadata): """View of a share access rule metadata.""" return {'metadata': metadata} @common.ViewBuilder.versioned_method("2.21") def add_access_key(self, context, access_dict, access): access_dict['access_key'] = access.get('access_key') @common.ViewBuilder.versioned_method("2.33") def add_created_at_and_updated_at(self, context, access_dict, access): access_dict['created_at'] = access.get('created_at') access_dict['updated_at'] = access.get('updated_at') @common.ViewBuilder.versioned_method("2.45") def add_access_rule_metadata_field(self, context, access_dict, access): metadata = access.get('share_access_rules_metadata') or {} metadata = {item['key']: item['value'] for item in metadata} access_dict['metadata'] = metadata @common.ViewBuilder.versioned_method("1.0", "2.27") def translate_transitional_statuses(self, context, access_dict, access): """In 2.28, the per access rule status was (re)introduced.""" api = share_api.API() share = api.get(context, access['share_id']) if (share['access_rules_status'] == constants.SHARE_INSTANCE_RULES_SYNCING): access_dict['state'] = constants.STATUS_NEW else: access_dict['state'] = share['access_rules_status'] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/api/views/share_backups.py0000664000175000017500000000701300000000000021412 0ustar00zuulzuul00000000000000# Copyright 2023 Cloudification GmbH. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from manila.api import common from manila import policy class BackupViewBuilder(common.ViewBuilder): """Model a server API response as a python dictionary.""" _collection_name = 'share_backups' _collection_links = 'share_backup_links' _detail_version_modifiers = [ "add_backup_type_field", ] def summary_list(self, request, backups): """Summary view of a list of backups.""" return self._list_view(self.summary, request, backups) def detail_list(self, request, backups): """Detailed view of a list of backups.""" return self._list_view(self.detail, request, backups) def summary(self, request, backup): """Generic, non-detailed view of a share backup.""" backup_dict = { 'id': backup.get('id'), 'name': backup.get('display_name'), 'share_id': backup.get('share_id'), 'status': backup.get('status'), } return {'share_backup': backup_dict} def restore_summary(self, request, restore): """Generic, non-detailed view of a restore.""" return { 'restore': { 'backup_id': restore['backup_id'], 'share_id': restore['share_id'], }, } def detail(self, request, backup): """Detailed view of a single backup.""" context = request.environ['manila.context'] backup_dict = { 'id': backup.get('id'), 'name': backup.get('display_name'), 'share_id': backup.get('share_id'), 'status': backup.get('status'), 'description': backup.get('display_description'), 'size': backup.get('size'), 'created_at': backup.get('created_at'), 'updated_at': backup.get('updated_at'), 'availability_zone': backup.get('availability_zone'), 'progress': backup.get('progress'), 'restore_progress': backup.get('restore_progress'), } self.update_versioned_resource_dict(request, backup_dict, backup) if policy.check_is_host_admin(context): backup_dict['host'] = backup.get('host') backup_dict['topic'] = backup.get('topic') return {'share_backup': backup_dict} def _list_view(self, func, request, backups): """Provide a view for a list of backups.""" backups_list = [func(request, backup)['share_backup'] for backup in backups] backup_links = self._get_collection_links( request, backups, self._collection_name) backups_dict = {self._collection_name: backups_list} if backup_links: backups_dict[self._collection_links] = backup_links return backups_dict @common.ViewBuilder.versioned_method("2.85") def add_backup_type_field(self, context, backup_dict, backup): backup_dict['backup_type'] = backup.get('backup_type') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/api/views/share_group_snapshots.py0000664000175000017500000001047400000000000023225 0ustar00zuulzuul00000000000000# Copyright 2015 Alex Meade # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from manila.api import common class ShareGroupSnapshotViewBuilder(common.ViewBuilder): """Model a share group snapshot API response as a python dictionary.""" _collection_name = "share_group_snapshot" def summary_list(self, request, group_snaps): """Show a list of share_group_snapshots without many details.""" return self._list_view(self.summary, request, group_snaps) def detail_list(self, request, group_snaps): """Detailed view of a list of share_group_snapshots.""" return self._list_view(self.detail, request, group_snaps) def member_list(self, request, members): members_list = [] for member in members: member_dict = { 'id': member.get('id'), 'created_at': member.get('created_at'), 'size': member.get('size'), 'share_protocol': member.get('share_proto'), 'project_id': member.get('project_id'), 'share_group_snapshot_id': member.get( 'share_group_snapshot_id'), 'share_id': member.get('share_instance', {}).get('share_id'), # TODO(vponomaryov): add 'provider_location' key in Pike. } members_list.append(member_dict) members_links = self._get_collection_links( request, members, "share_group_snapshot_id") members_dict = {"share_group_snapshot_members": members_list} if members_links: members_dict["share_group_snapshot_members_links"] = members_links return members_dict def summary(self, request, share_group_snap): """Generic, non-detailed view of a share group snapshot.""" return { 'share_group_snapshot': { 'id': share_group_snap.get('id'), 'name': share_group_snap.get('name'), 'links': self._get_links(request, share_group_snap['id']), } } def detail(self, request, share_group_snap): """Detailed view of a single share group snapshot.""" members = self._format_member_list( share_group_snap.get('share_group_snapshot_members', [])) share_group_snap_dict = { 'id': share_group_snap.get('id'), 'name': share_group_snap.get('name'), 'created_at': share_group_snap.get('created_at'), 'status': share_group_snap.get('status'), 'description': share_group_snap.get('description'), 'project_id': share_group_snap.get('project_id'), 'share_group_id': share_group_snap.get('share_group_id'), 'members': members, 'links': self._get_links(request, share_group_snap['id']), } return {'share_group_snapshot': share_group_snap_dict} def _format_member_list(self, members): members_list = [] for member in members: member_dict = { 'id': member.get('id'), 'size': member.get('size'), 'share_id': member.get('share_instance', {}).get('share_id'), } members_list.append(member_dict) return members_list def _list_view(self, func, request, snaps): """Provide a view for a list of share group snapshots.""" snap_list = [func(request, snap)["share_group_snapshot"] for snap in snaps] snaps_links = self._get_collection_links(request, snaps, self._collection_name) snaps_dict = {"share_group_snapshots": snap_list} if snaps_links: snaps_dict["share_group_snapshot_links"] = snaps_links return snaps_dict ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/api/views/share_group_types.py0000664000175000017500000000427600000000000022352 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from manila.api import common CONF = cfg.CONF class ShareGroupTypeViewBuilder(common.ViewBuilder): _collection_name = 'share_group_types' _detail_version_modifiers = [ "add_is_default_attr", ] def show(self, request, share_group_type, brief=False): """Trim away extraneous share group type attributes.""" group_specs = share_group_type.get('group_specs', {}) trimmed = { 'id': share_group_type.get('id'), 'name': share_group_type.get('name'), 'is_public': share_group_type.get('is_public'), 'group_specs': group_specs, 'share_types': [ st['share_type_id'] for st in share_group_type['share_types']], } self.update_versioned_resource_dict(request, trimmed, share_group_type) return trimmed if brief else {"share_group_type": trimmed} def index(self, request, share_group_types): """Index over trimmed share group types.""" share_group_types_list = [ self.show(request, share_group_type, True) for share_group_type in share_group_types ] return {"share_group_types": share_group_types_list} @common.ViewBuilder.versioned_method("2.46") def add_is_default_attr(self, context, share_group_type_dict, share_group_type): is_default = False type_name = share_group_type.get('name') default_name = CONF.default_share_group_type if default_name is not None: is_default = default_name == type_name share_group_type_dict['is_default'] = is_default ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/api/views/share_groups.py0000664000175000017500000000721400000000000021304 0ustar00zuulzuul00000000000000# Copyright 2015 Alex Meade # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from manila.api import common class ShareGroupViewBuilder(common.ViewBuilder): """Model a share group API response as a python dictionary.""" _collection_name = 'share_groups' _detail_version_modifiers = [ "add_consistent_snapshot_support_and_az_id_fields_to_sg", ] def summary_list(self, request, share_groups): """Show a list of share groups without many details.""" return self._list_view(self.summary, request, share_groups) def detail_list(self, request, share_groups): """Detailed view of a list of share groups.""" return self._list_view(self.detail, request, share_groups) def summary(self, request, share_group): """Generic, non-detailed view of a share group.""" return { 'share_group': { 'id': share_group.get('id'), 'name': share_group.get('name'), 'links': self._get_links(request, share_group['id']) } } def detail(self, request, share_group): """Detailed view of a single share group.""" context = request.environ['manila.context'] share_group_dict = { 'id': share_group.get('id'), 'name': share_group.get('name'), 'created_at': share_group.get('created_at'), 'status': share_group.get('status'), 'description': share_group.get('description'), 'project_id': share_group.get('project_id'), 'host': share_group.get('host'), 'share_group_type_id': share_group.get('share_group_type_id'), 'source_share_group_snapshot_id': share_group.get( 'source_share_group_snapshot_id'), 'share_network_id': share_group.get('share_network_id'), 'share_types': [st['share_type_id'] for st in share_group.get( 'share_types')], 'links': self._get_links(request, share_group['id']), } self.update_versioned_resource_dict( request, share_group_dict, share_group) if context.is_admin: share_group_dict['share_server_id'] = share_group.get( 'share_server_id') return {'share_group': share_group_dict} @common.ViewBuilder.versioned_method("2.34") def add_consistent_snapshot_support_and_az_id_fields_to_sg( self, context, sg_dict, sg): sg_dict['availability_zone'] = sg.get('availability_zone') sg_dict['consistent_snapshot_support'] = sg.get( 'consistent_snapshot_support') def _list_view(self, func, request, shares): """Provide a view for a list of share groups.""" share_group_list = [ func(request, share)['share_group'] for share in shares ] share_groups_links = self._get_collection_links( request, shares, self._collection_name) share_groups_dict = {"share_groups": share_group_list} if share_groups_links: share_groups_dict['share_groups_links'] = share_groups_links return share_groups_dict ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/api/views/share_instance.py0000664000175000017500000001152300000000000021567 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from manila.api import common from manila.common import constants class ViewBuilder(common.ViewBuilder): """Model a server API response as a python dictionary.""" _collection_name = 'share_instances' _collection_links = 'share_instances_links' _detail_version_modifiers = [ "remove_export_locations", "add_access_rules_status_field", "add_replication_fields", "add_share_type_field", "add_cast_rules_to_readonly_field", "add_progress_field", "translate_creating_from_snapshot_status", "add_updated_at_field", ] def detail_list(self, request, instances): """Detailed view of a list of share instances.""" return self._list_view(self.detail, request, instances) def detail(self, request, share_instance): """Detailed view of a single share instance.""" export_locations = [e['path'] for e in share_instance.export_locations] instance_dict = { 'id': share_instance.get('id'), 'share_id': share_instance.get('share_id'), 'availability_zone': share_instance.get('availability_zone'), 'created_at': share_instance.get('created_at'), 'host': share_instance.get('host'), 'status': share_instance.get('status'), 'share_network_id': share_instance.get('share_network_id'), 'share_server_id': share_instance.get('share_server_id'), 'export_location': share_instance.get('export_location'), 'export_locations': export_locations, } self.update_versioned_resource_dict( request, instance_dict, share_instance) return {'share_instance': instance_dict} def _list_view(self, func, request, instances): """Provide a view for a list of share instances.""" instances_list = [func(request, instance)['share_instance'] for instance in instances] instances_links = self._get_collection_links(request, instances, self._collection_name) instances_dict = {self._collection_name: instances_list} if instances_links: instances_dict[self._collection_links] = instances_links return instances_dict @common.ViewBuilder.versioned_method("2.9") def remove_export_locations(self, context, share_instance_dict, share_instance): share_instance_dict.pop('export_location') share_instance_dict.pop('export_locations') @common.ViewBuilder.versioned_method("2.10") def add_access_rules_status_field(self, context, instance_dict, share_instance): instance_dict['access_rules_status'] = ( share_instance.get('access_rules_status') ) @common.ViewBuilder.versioned_method("2.11") def add_replication_fields(self, context, instance_dict, share_instance): instance_dict['replica_state'] = share_instance.get('replica_state') @common.ViewBuilder.versioned_method("2.22") def add_share_type_field(self, context, instance_dict, share_instance): instance_dict['share_type_id'] = share_instance.get('share_type_id') @common.ViewBuilder.versioned_method("2.30") def add_cast_rules_to_readonly_field(self, context, instance_dict, share_instance): instance_dict['cast_rules_to_readonly'] = share_instance.get( 'cast_rules_to_readonly', False) @common.ViewBuilder.versioned_method("1.0", "2.53") def translate_creating_from_snapshot_status(self, context, instance_dict, share_instance): if (share_instance.get('status') == constants.STATUS_CREATING_FROM_SNAPSHOT): instance_dict['status'] = constants.STATUS_CREATING @common.ViewBuilder.versioned_method("2.54") def add_progress_field(self, context, instance_dict, share_instance): instance_dict['progress'] = share_instance.get('progress') @common.ViewBuilder.versioned_method("2.71") def add_updated_at_field(self, context, instance_dict, share_instance): instance_dict['updated_at'] = share_instance.get('updated_at') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/api/views/share_migration.py0000664000175000017500000000257300000000000021761 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Hitachi Data Systems. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from manila.api import common class ViewBuilder(common.ViewBuilder): """Model share migration view data response as a python dictionary.""" _collection_name = 'share_migration' _detail_version_modifiers = [ 'add_progress_details', ] def get_progress(self, request, share, progress): """View of share migration job progress.""" result = { 'total_progress': progress.pop('total_progress'), 'task_state': share['task_state'], } self.update_versioned_resource_dict(request, result, progress) return result @common.ViewBuilder.versioned_method('2.59') def add_progress_details(self, context, progress_dict, progress): progress_dict['details'] = progress ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/api/views/share_network_subnets.py0000664000175000017500000000524700000000000023225 0ustar00zuulzuul00000000000000# Copyright 2019 NetApp, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from manila.api import common class ViewBuilder(common.ViewBuilder): """Model a server API response as a python dictionary.""" _collection_name = 'share_network_subnets' _detail_version_modifiers = [ "add_metadata" ] def build_share_network_subnet(self, request, share_network_subnet): return { 'share_network_subnet': self._build_share_network_subnet_view( request, share_network_subnet)} def build_share_network_subnets(self, request, share_network_subnets): return {'share_network_subnets': [self._build_share_network_subnet_view( request, share_network_subnet) for share_network_subnet in share_network_subnets]} def _build_share_network_subnet_view(self, request, share_network_subnet): sns = { 'id': share_network_subnet.get('id'), 'availability_zone': share_network_subnet.get('availability_zone'), 'share_network_id': share_network_subnet.get('share_network_id'), 'share_network_name': share_network_subnet['share_network_name'], 'created_at': share_network_subnet.get('created_at'), 'segmentation_id': share_network_subnet.get('segmentation_id'), 'neutron_subnet_id': share_network_subnet.get('neutron_subnet_id'), 'updated_at': share_network_subnet.get('updated_at'), 'neutron_net_id': share_network_subnet.get('neutron_net_id'), 'ip_version': share_network_subnet.get('ip_version'), 'cidr': share_network_subnet.get('cidr'), 'network_type': share_network_subnet.get('network_type'), 'mtu': share_network_subnet.get('mtu'), 'gateway': share_network_subnet.get('gateway') } self.update_versioned_resource_dict(request, sns, share_network_subnet) return sns @common.ViewBuilder.versioned_method("2.78") def add_metadata(self, context, share_network_subnet_dict, sns): share_network_subnet_dict['metadata'] = sns.get('subnet_metadata') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/api/views/share_networks.py0000664000175000017500000001750000000000000021640 0ustar00zuulzuul00000000000000# Copyright 2014 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from manila.api import common class ViewBuilder(common.ViewBuilder): """Model a server API response as a python dictionary.""" _collection_name = 'share_networks' _detail_version_modifiers = ["add_gateway", "add_mtu", "add_nova_net_id", "add_subnets", "add_status_and_sec_service_update_fields", "add_network_allocation_update_support_field", "add_subnet_with_metadata"] def build_share_network(self, request, share_network): """View of a share network.""" return {'share_network': self._build_share_network_view( request, share_network)} def build_share_networks(self, request, share_networks, is_detail=True): return {'share_networks': [self._build_share_network_view( request, share_network, is_detail) for share_network in share_networks]} def build_security_service_update_check(self, request, params, result): """View of security service add or update check.""" context = request.environ['manila.context'] requested_operation = { 'operation': ('update_security_service' if params.get('current_service_id') else 'add_security_service'), 'current_security_service': params.get('current_service_id'), 'new_security_service': (params.get('new_service_id') or params.get('security_service_id')) } view = { 'compatible': result['compatible'], 'requested_operation': requested_operation, } if context.is_admin: view['hosts_check_result'] = result['hosts_check_result'] return view def build_share_network_subnet_create_check(self, request, result): """View of share network subnet create check.""" context = request.environ['manila.context'] view = { 'compatible': result['compatible'], } if context.is_admin: view['hosts_check_result'] = result['hosts_check_result'] return view def _update_share_network_info(self, request, share_network): for sns in share_network.get('share_network_subnets') or []: if sns.get('is_default') and sns.get('is_default') is True: share_network.update({ 'neutron_net_id': sns.get('neutron_net_id'), 'neutron_subnet_id': sns.get('neutron_subnet_id'), 'network_type': sns.get('network_type'), 'segmentation_id': sns.get('segmentation_id'), 'cidr': sns.get('cidr'), 'ip_version': sns.get('ip_version'), 'gateway': sns.get('gateway'), 'mtu': sns.get('mtu'), }) def _build_share_network_view(self, request, share_network, is_detail=True): sn = { 'id': share_network.get('id'), 'name': share_network.get('name'), } if is_detail: self._update_share_network_info(request, share_network) sn.update({ 'project_id': share_network.get('project_id'), 'created_at': share_network.get('created_at'), 'updated_at': share_network.get('updated_at'), 'neutron_net_id': share_network.get('neutron_net_id'), 'neutron_subnet_id': share_network.get('neutron_subnet_id'), 'network_type': share_network.get('network_type'), 'segmentation_id': share_network.get('segmentation_id'), 'cidr': share_network.get('cidr'), 'ip_version': share_network.get('ip_version'), 'description': share_network.get('description'), }) self.update_versioned_resource_dict(request, sn, share_network) return sn @common.ViewBuilder.versioned_method("2.51", "2.77") def add_subnets(self, context, network_dict, network): subnets = [{ 'id': sns.get('id'), 'availability_zone': sns.get('availability_zone'), 'created_at': sns.get('created_at'), 'updated_at': sns.get('updated_at'), 'segmentation_id': sns.get('segmentation_id'), 'neutron_net_id': sns.get('neutron_net_id'), 'neutron_subnet_id': sns.get('neutron_subnet_id'), 'ip_version': sns.get('ip_version'), 'cidr': sns.get('cidr'), 'network_type': sns.get('network_type'), 'mtu': sns.get('mtu'), 'gateway': sns.get('gateway'), } for sns in network.get('share_network_subnets')] network_dict['share_network_subnets'] = subnets attr_to_remove = [ 'neutron_net_id', 'neutron_subnet_id', 'network_type', 'segmentation_id', 'cidr', 'ip_version', 'gateway', 'mtu'] for attr in attr_to_remove: network_dict.pop(attr) @common.ViewBuilder.versioned_method("2.18") def add_gateway(self, context, network_dict, network): network_dict['gateway'] = network.get('gateway') @common.ViewBuilder.versioned_method("2.20") def add_mtu(self, context, network_dict, network): network_dict['mtu'] = network.get('mtu') @common.ViewBuilder.versioned_method("1.0", "2.25") def add_nova_net_id(self, context, network_dict, network): network_dict['nova_net_id'] = None @common.ViewBuilder.versioned_method("2.63") def add_status_and_sec_service_update_fields( self, context, network_dict, network): network_dict['status'] = network.get('status') network_dict['security_service_update_support'] = network.get( 'security_service_update_support') @common.ViewBuilder.versioned_method("2.70") def add_network_allocation_update_support_field( self, context, network_dict, network): network_dict['network_allocation_update_support'] = network.get( 'network_allocation_update_support') @common.ViewBuilder.versioned_method("2.78") def add_subnet_with_metadata(self, context, network_dict, network): subnets = [{ 'id': sns.get('id'), 'availability_zone': sns.get('availability_zone'), 'created_at': sns.get('created_at'), 'updated_at': sns.get('updated_at'), 'segmentation_id': sns.get('segmentation_id'), 'neutron_net_id': sns.get('neutron_net_id'), 'neutron_subnet_id': sns.get('neutron_subnet_id'), 'ip_version': sns.get('ip_version'), 'cidr': sns.get('cidr'), 'network_type': sns.get('network_type'), 'mtu': sns.get('mtu'), 'gateway': sns.get('gateway'), 'metadata': sns.get('subnet_metadata'), } for sns in network.get('share_network_subnets')] network_dict['share_network_subnets'] = subnets attr_to_remove = [ 'neutron_net_id', 'neutron_subnet_id', 'network_type', 'segmentation_id', 'cidr', 'ip_version', 'gateway', 'mtu'] for attr in attr_to_remove: network_dict.pop(attr) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/api/views/share_replicas.py0000664000175000017500000000661000000000000021566 0ustar00zuulzuul00000000000000# Copyright 2015 Goutham Pacha Ravi # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from manila.api import common class ReplicationViewBuilder(common.ViewBuilder): """Model a server API response as a python dictionary.""" _collection_name = 'share_replicas' _collection_route_name = "share-replicas" _collection_links = 'share_replica_links' _detail_version_modifiers = [ "add_cast_rules_to_readonly_field", ] def summary_list(self, request, replicas): """Summary view of a list of replicas.""" return self._list_view(self.summary, request, replicas) def detail_list(self, request, replicas): """Detailed view of a list of replicas.""" return self._list_view(self.detail, request, replicas) def summary(self, request, replica): """Generic, non-detailed view of a share replica.""" replica_dict = { 'id': replica.get('id'), 'share_id': replica.get('share_id'), 'status': replica.get('status'), 'replica_state': replica.get('replica_state'), } return {'share_replica': replica_dict} def detail(self, request, replica): """Detailed view of a single replica.""" context = request.environ['manila.context'] replica_dict = { 'id': replica.get('id'), 'share_id': replica.get('share_id'), 'availability_zone': replica.get('availability_zone'), 'created_at': replica.get('created_at'), 'status': replica.get('status'), 'share_network_id': replica.get('share_network_id'), 'replica_state': replica.get('replica_state'), 'updated_at': replica.get('updated_at'), } if context.is_admin: replica_dict['share_server_id'] = replica.get('share_server_id') replica_dict['host'] = replica.get('host') self.update_versioned_resource_dict(request, replica_dict, replica) return {'share_replica': replica_dict} def _list_view(self, func, request, replicas): """Provide a view for a list of replicas.""" replicas_list = [func(request, replica)['share_replica'] for replica in replicas] replica_links = self._get_collection_links(request, replicas, self._collection_name) replicas_dict = {self._collection_name: replicas_list} if replica_links: replicas_dict[self._collection_links] = replica_links return replicas_dict @common.ViewBuilder.versioned_method("2.30") def add_cast_rules_to_readonly_field(self, context, replica_dict, replica): if context.is_admin: replica_dict['cast_rules_to_readonly'] = replica.get( 'cast_rules_to_readonly', False) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/api/views/share_server_migration.py0000664000175000017500000000550400000000000023344 0ustar00zuulzuul00000000000000# Copyright (c) 2020 NetApp, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from manila.api import common class ViewBuilder(common.ViewBuilder): """Model share server migration view data response as a python dictionary. """ _collection_name = 'share_server_migration' _detail_version_modifiers = [] def get_progress(self, request, params): """View of share server migration job progress.""" result = { 'total_progress': params['total_progress'], 'task_state': params['task_state'], 'destination_share_server_id': params['destination_share_server_id'], } self.update_versioned_resource_dict(request, result, params) return result def build_check_migration(self, request, params, result): """View of share server migration check.""" requested_capabilities = { 'writable': params['writable'], 'nondisruptive': params['nondisruptive'], 'preserve_snapshots': params['preserve_snapshots'], 'share_network_id': params['new_share_network_id'], 'host': params['host'], } supported_capabilities = { 'writable': result['writable'], 'nondisruptive': result['nondisruptive'], 'preserve_snapshots': result['preserve_snapshots'], 'share_network_id': result['share_network_id'], 'migration_cancel': result['migration_cancel'], 'migration_get_progress': result['migration_get_progress'] } view = { 'compatible': result['compatible'], 'requested_capabilities': requested_capabilities, 'supported_capabilities': supported_capabilities, } capabilities = { 'requested': copy.copy(params), 'supported': copy.copy(result) } self.update_versioned_resource_dict(request, view, capabilities) return view def migration_complete(self, request, params): """View of share server migration complete command.""" result = { 'destination_share_server_id': params['destination_share_server_id'], } self.update_versioned_resource_dict(request, result, params) return result ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/api/views/share_servers.py0000664000175000017500000001071700000000000021460 0ustar00zuulzuul00000000000000# Copyright 2014 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from manila.api import common class ViewBuilder(common.ViewBuilder): """Model a server API response as a python dictionary.""" _collection_name = 'share_servers' _detail_version_modifiers = [ "add_is_auto_deletable_and_identifier_fields", "add_share_network_subnet_id_field", "add_task_state_and_source_server_fields", "add_sec_service_update_fields", "add_share_network_subnet_ids_and_network_allocation_update_support", "add_encryption_key_ref_field", ] def build_share_server(self, request, share_server): """View of a share server.""" return { 'share_server': self._build_share_server_view( request, share_server, detailed=True) } def build_share_servers(self, request, share_servers): return { 'share_servers': [self._build_share_server_view(request, share_server) for share_server in share_servers] } def build_share_server_details(self, details): return {'details': details} def _build_share_server_view(self, request, share_server, detailed=False): share_server_dict = { 'id': share_server.id, 'project_id': share_server.project_id, 'updated_at': share_server.updated_at, 'status': share_server.status, 'host': share_server.host, 'share_network_name': share_server.share_network_name, 'share_network_id': share_server.share_network_id, } if detailed: share_server_dict['created_at'] = share_server.created_at share_server_dict['backend_details'] = share_server.backend_details self.update_versioned_resource_dict( request, share_server_dict, share_server) return share_server_dict @common.ViewBuilder.versioned_method("2.51", "2.69") def add_share_network_subnet_id_field( self, context, share_server_dict, share_server): """In 2.70, share_network_subnet_id is dropped, it becomes a list.""" share_server_dict['share_network_subnet_id'] = ( share_server['share_network_subnet_ids'][0] if share_server['share_network_subnet_ids'] else None) @common.ViewBuilder.versioned_method("2.49") def add_is_auto_deletable_and_identifier_fields( self, context, share_server_dict, share_server): share_server_dict['is_auto_deletable'] = ( share_server['is_auto_deletable']) share_server_dict['identifier'] = share_server['identifier'] @common.ViewBuilder.versioned_method("2.57") def add_task_state_and_source_server_fields( self, context, share_server_dict, share_server): share_server_dict['task_state'] = share_server['task_state'] share_server_dict['source_share_server_id'] = ( share_server['source_share_server_id']) @common.ViewBuilder.versioned_method("2.63") def add_sec_service_update_fields( self, context, share_server_dict, share_server): share_server_dict['security_service_update_support'] = share_server[ 'security_service_update_support'] @common.ViewBuilder.versioned_method("2.70") def add_share_network_subnet_ids_and_network_allocation_update_support( self, context, share_server_dict, share_server): share_server_dict['share_network_subnet_ids'] = sorted( share_server['share_network_subnet_ids']) share_server_dict['network_allocation_update_support'] = ( share_server['network_allocation_update_support']) @common.ViewBuilder.versioned_method("2.90") def add_encryption_key_ref_field( self, context, share_server_dict, share_server): share_server_dict['encryption_key_ref'] = share_server[ 'encryption_key_ref'] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/api/views/share_snapshot_export_locations.py0000664000175000017500000000430500000000000025276 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Hitachi Data Systems # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from manila.api import common class ViewBuilder(common.ViewBuilder): _collection_name = "share_snapshot_export_locations" def _get_view(self, request, export_location, detail=False): context = request.environ['manila.context'] result = { 'share_snapshot_export_location': { 'id': export_location['id'], 'path': export_location['path'], 'links': self._get_links(request, export_location['id']), } } ss_el = result['share_snapshot_export_location'] if context.is_admin: ss_el['share_snapshot_instance_id'] = ( export_location['share_snapshot_instance_id']) ss_el['is_admin_only'] = export_location['is_admin_only'] if detail: ss_el['created_at'] = export_location['created_at'] ss_el['updated_at'] = export_location['updated_at'] return result def list_export_locations(self, request, export_locations): context = request.environ['manila.context'] result = {self._collection_name: []} for export_location in export_locations: if context.is_admin or not export_location['is_admin_only']: result[self._collection_name].append(self._get_view( request, export_location)['share_snapshot_export_location']) else: continue return result def detail_export_location(self, request, export_location): return self._get_view(request, export_location, detail=True) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/api/views/share_snapshot_instances.py0000664000175000017500000000476700000000000023705 0ustar00zuulzuul00000000000000# Copyright 2016 Huawei Inc. # All Rights Reserved. # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from manila.api import common class ViewBuilder(common.ViewBuilder): """Model the server API response as a python dictionary.""" _collection_name = 'snapshot_instances' def summary_list(self, request, instances): """Summary view of a list of share snapshot instances.""" return self._list_view(self.summary, request, instances) def detail_list(self, request, instances): """Detailed view of a list of share snapshot instances.""" return self._list_view(self.detail, request, instances) def summary(self, request, instance): """Generic, non-detailed view of a share snapshot instance.""" instance_dict = { 'id': instance.get('id'), 'snapshot_id': instance.get('snapshot_id'), 'status': instance.get('status'), } return {'snapshot_instance': instance_dict} def detail(self, request, instance): """Detailed view of a single share snapshot instance.""" instance_dict = { 'id': instance.get('id'), 'snapshot_id': instance.get('snapshot_id'), 'created_at': instance.get('created_at'), 'updated_at': instance.get('updated_at'), 'status': instance.get('status'), 'share_id': instance.get('share_instance').get('share_id'), 'share_instance_id': instance.get('share_instance_id'), 'progress': instance.get('progress'), 'provider_location': instance.get('provider_location'), } return {'snapshot_instance': instance_dict} def _list_view(self, func, request, instances): """Provide a view for a list of share snapshot instances.""" instances_list = [func(request, instance)['snapshot_instance'] for instance in instances] instances_dict = {self._collection_name: instances_list} return instances_dict ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/api/views/share_snapshots.py0000664000175000017500000001103100000000000021777 0ustar00zuulzuul00000000000000# Copyright 2013 NetApp # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from manila.api import common class ViewBuilder(common.ViewBuilder): """Model a server API response as a python dictionary.""" _collection_name = 'snapshots' _detail_version_modifiers = [ "add_provider_location_field", "add_project_and_user_ids", "add_metadata" ] def summary_list(self, request, snapshots, count=None): """Show a list of share snapshots without many details.""" return self._list_view(self.summary, request, snapshots, count) def detail_list(self, request, snapshots, count=None): """Detailed view of a list of share snapshots.""" return self._list_view(self.detail, request, snapshots, count) def summary(self, request, snapshot): """Generic, non-detailed view of an share snapshot.""" return { 'snapshot': { 'id': snapshot.get('id'), 'name': snapshot.get('display_name'), 'links': self._get_links(request, snapshot['id']) } } def detail(self, request, snapshot): """Detailed view of a single share snapshot.""" snapshot_dict = { 'id': snapshot.get('id'), 'share_id': snapshot.get('share_id'), 'share_size': snapshot.get('share_size'), 'created_at': snapshot.get('created_at'), 'status': snapshot.get('aggregate_status'), 'name': snapshot.get('display_name'), 'description': snapshot.get('display_description'), 'size': snapshot.get('size'), 'share_proto': snapshot.get('share_proto'), 'links': self._get_links(request, snapshot['id']), } self.update_versioned_resource_dict(request, snapshot_dict, snapshot) return {'snapshot': snapshot_dict} @common.ViewBuilder.versioned_method("2.12") def add_provider_location_field(self, context, snapshot_dict, snapshot): # NOTE(xyang): Only retrieve provider_location for admin. if context.is_admin: snapshot_dict['provider_location'] = snapshot.get( 'provider_location') @common.ViewBuilder.versioned_method("2.17") def add_project_and_user_ids(self, context, snapshot_dict, snapshot): snapshot_dict['user_id'] = snapshot.get('user_id') snapshot_dict['project_id'] = snapshot.get('project_id') @common.ViewBuilder.versioned_method("2.73") def add_metadata(self, context, snapshot_dict, snapshot): metadata = snapshot.get('share_snapshot_metadata') if metadata: metadata = {item['key']: item['value'] for item in metadata} else: metadata = {} snapshot_dict['metadata'] = metadata def _list_view(self, func, request, snapshots, count=None): """Provide a view for a list of share snapshots.""" snapshots_list = [func(request, snapshot)['snapshot'] for snapshot in snapshots] snapshots_links = self._get_collection_links(request, snapshots, self._collection_name) snapshots_dict = {self._collection_name: snapshots_list} if count is not None: snapshots_dict['count'] = count if snapshots_links: snapshots_dict['share_snapshots_links'] = snapshots_links return snapshots_dict def detail_access(self, request, access): access = { 'snapshot_access': { 'id': access['id'], 'access_type': access['access_type'], 'access_to': access['access_to'], 'state': access['state'], } } return access def detail_list_access(self, request, access_list): return { 'snapshot_access_list': ([self.detail_access(request, access)['snapshot_access'] for access in access_list]) } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/api/views/shares.py0000664000175000017500000002144200000000000020067 0ustar00zuulzuul00000000000000# Copyright 2013 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from manila.api import common from manila.common import constants from manila import policy class ViewBuilder(common.ViewBuilder): """Model a server API response as a python dictionary.""" _collection_name = 'shares' _detail_version_modifiers = [ "add_snapshot_support_field", "add_task_state_field", "modify_share_type_field", "remove_export_locations", "add_access_rules_status_field", "add_replication_fields", "add_user_id", "add_create_share_from_snapshot_support_field", "add_revert_to_snapshot_support_field", "translate_access_rules_status", "add_share_group_fields", "add_mount_snapshot_support_field", "add_progress_field", "translate_creating_from_snapshot_status", "add_share_recycle_bin_field", "add_source_backup_id_field", "add_encryption_key_ref_field", ] def summary_list(self, request, shares, count=None): """Show a list of shares without many details.""" return self._list_view(self.summary, request, shares, count) def detail_list(self, request, shares, count=None): """Detailed view of a list of shares.""" return self._list_view(self.detail, request, shares, count) def summary(self, request, share): """Generic, non-detailed view of a share.""" return { 'share': { 'id': share.get('id'), 'name': share.get('display_name'), 'links': self._get_links(request, share['id']) } } def detail(self, request, share): """Detailed view of a single share.""" context = request.environ['manila.context'] metadata = share.get('share_metadata') if metadata: metadata = {item['key']: item['value'] for item in metadata} else: metadata = {} export_locations = share.get('export_locations', []) share_instance = share.get('instance') or {} if share_instance.get('share_type'): share_type = share_instance.get('share_type').get('name') else: share_type = share_instance.get('share_type_id') share_dict = { 'id': share.get('id'), 'size': share.get('size'), 'availability_zone': share_instance.get('availability_zone'), 'created_at': share.get('created_at'), 'status': share.get('status'), 'name': share.get('display_name'), 'description': share.get('display_description'), 'project_id': share.get('project_id'), 'snapshot_id': share.get('snapshot_id'), 'share_network_id': share_instance.get('share_network_id'), 'share_proto': share.get('share_proto'), 'export_location': share.get('export_location'), 'metadata': metadata, 'share_type': share_type, 'volume_type': share_type, 'links': self._get_links(request, share['id']), 'is_public': share.get('is_public'), 'export_locations': export_locations, } self.update_versioned_resource_dict(request, share_dict, share) if policy.check_is_host_admin(context): share_dict['share_server_id'] = share_instance.get( 'share_server_id') share_dict['host'] = share_instance.get('host') return {'share': share_dict} @common.ViewBuilder.versioned_method("2.2") def add_snapshot_support_field(self, context, share_dict, share): share_dict['snapshot_support'] = share.get('snapshot_support') @common.ViewBuilder.versioned_method("2.5") def add_task_state_field(self, context, share_dict, share): share_dict['task_state'] = share.get('task_state') @common.ViewBuilder.versioned_method("2.6") def modify_share_type_field(self, context, share_dict, share): share_instance = share.get('instance') or {} share_type = share_instance.get('share_type_id') share_type_name = None if share_instance.get('share_type'): share_type_name = share_instance.get('share_type').get('name') share_dict.update({ 'share_type_name': share_type_name, 'share_type': share_type, }) @common.ViewBuilder.versioned_method("2.9") def remove_export_locations(self, context, share_dict, share): share_dict.pop('export_location') share_dict.pop('export_locations') @common.ViewBuilder.versioned_method("2.10") def add_access_rules_status_field(self, context, share_dict, share): share_dict['access_rules_status'] = share.get('access_rules_status') @common.ViewBuilder.versioned_method('2.11') def add_replication_fields(self, context, share_dict, share): share_dict['replication_type'] = share.get('replication_type') share_dict['has_replicas'] = share['has_replicas'] @common.ViewBuilder.versioned_method("2.16") def add_user_id(self, context, share_dict, share): share_dict['user_id'] = share.get('user_id') @common.ViewBuilder.versioned_method("2.24") def add_create_share_from_snapshot_support_field(self, context, share_dict, share): share_dict['create_share_from_snapshot_support'] = share.get( 'create_share_from_snapshot_support') @common.ViewBuilder.versioned_method("2.27") def add_revert_to_snapshot_support_field(self, context, share_dict, share): share_dict['revert_to_snapshot_support'] = share.get( 'revert_to_snapshot_support') @common.ViewBuilder.versioned_method("2.10", "2.27") def translate_access_rules_status(self, context, share_dict, share): if (share['access_rules_status'] == constants.SHARE_INSTANCE_RULES_SYNCING): share_dict['access_rules_status'] = constants.STATUS_OUT_OF_SYNC @common.ViewBuilder.versioned_method("2.31") def add_share_group_fields(self, context, share_dict, share): share_dict['share_group_id'] = share.get( 'share_group_id') share_dict['source_share_group_snapshot_member_id'] = share.get( 'source_share_group_snapshot_member_id') @common.ViewBuilder.versioned_method("2.32") def add_mount_snapshot_support_field(self, context, share_dict, share): share_dict['mount_snapshot_support'] = share.get( 'mount_snapshot_support') def _list_view(self, func, request, shares, count=None): """Provide a view for a list of shares.""" shares_list = [func(request, share)['share'] for share in shares] shares_links = self._get_collection_links(request, shares, self._collection_name) shares_dict = dict(shares=shares_list) if count is not None: shares_dict['count'] = count if shares_links: shares_dict['shares_links'] = shares_links return shares_dict @common.ViewBuilder.versioned_method("1.0", "2.53") def translate_creating_from_snapshot_status(self, context, share_dict, share): if share.get('status') == constants.STATUS_CREATING_FROM_SNAPSHOT: share_dict['status'] = constants.STATUS_CREATING @common.ViewBuilder.versioned_method("2.54") def add_progress_field(self, context, share_dict, share): share_dict['progress'] = share.get('progress') @common.ViewBuilder.versioned_method("2.69") def add_share_recycle_bin_field(self, context, share_dict, share): share_dict['is_soft_deleted'] = share.get('is_soft_deleted') share_dict['scheduled_to_be_deleted_at'] = share.get( 'scheduled_to_be_deleted_at') @common.ViewBuilder.versioned_method("2.80") def add_source_backup_id_field(self, context, share_dict, share): share_dict['source_backup_id'] = share.get('source_backup_id') @common.ViewBuilder.versioned_method("2.90") def add_encryption_key_ref_field(self, context, share_dict, share): share_dict['encryption_key_ref'] = share.get('instance', {}).get( 'encryption_key_ref') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/api/views/transfers.py0000664000175000017500000000654300000000000020616 0ustar00zuulzuul00000000000000# Copyright (C) 2022 China Telecom Digital Intelligence. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from manila.api import common class ViewBuilder(common.ViewBuilder): """Model transfer API responses as a python dictionary.""" _collection_name = "share-transfer" def __init__(self): """Initialize view builder.""" super(ViewBuilder, self).__init__() def summary_list(self, request, transfers,): """Show a list of transfers without many details.""" return self._list_view(self.summary, request, transfers) def detail_list(self, request, transfers): """Detailed view of a list of transfers .""" return self._list_view(self.detail, request, transfers) def summary(self, request, transfer): """Generic, non-detailed view of a transfer.""" return { 'transfer': { 'id': transfer['id'], 'name': transfer['display_name'], 'resource_type': transfer['resource_type'], 'resource_id': transfer['resource_id'], 'links': self._get_links(request, transfer['id']), }, } def detail(self, request, transfer): """Detailed view of a single transfer.""" detail_body = { 'transfer': { 'id': transfer.get('id'), 'created_at': transfer.get('created_at'), 'name': transfer.get('display_name'), 'resource_type': transfer['resource_type'], 'resource_id': transfer['resource_id'], 'source_project_id': transfer['source_project_id'], 'destination_project_id': transfer.get( 'destination_project_id'), 'accepted': transfer['accepted'], 'expires_at': transfer.get('expires_at'), 'links': self._get_links(request, transfer['id']), } } return detail_body def create(self, request, transfer): """Detailed view of a single transfer when created.""" create_body = self.detail(request, transfer) create_body['transfer']['auth_key'] = transfer.get('auth_key') return create_body def _list_view(self, func, request, transfers): """Provide a view for a list of transfers.""" transfers_list = [func(request, transfer)['transfer'] for transfer in transfers] transfers_links = self._get_collection_links(request, transfers, self._collection_name) transfers_dict = dict(transfers=transfers_list) if transfers_links: transfers_dict['transfers_links'] = transfers_links return transfers_dict ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/api/views/types.py0000664000175000017500000001147700000000000017755 0ustar00zuulzuul00000000000000# Copyright 2012 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from manila.api import common from manila.common import constants from manila.share import share_types CONF = cfg.CONF class ViewBuilder(common.ViewBuilder): _collection_name = 'types' _detail_version_modifiers = [ "add_is_public_attr_core_api_like", "add_is_public_attr_extension_like", "add_inferred_optional_extra_specs", "add_description_attr", "add_is_default_attr" ] def show(self, request, share_type, brief=False): """Trim away extraneous share type attributes.""" extra_specs = share_type.get('extra_specs', {}) required_extra_specs = share_type.get('required_extra_specs', {}) # Remove non-tenant-visible extra specs in a non-admin context if not request.environ['manila.context'].is_admin: extra_spec_names = share_types.get_tenant_visible_extra_specs() extra_specs = self._filter_extra_specs(extra_specs, extra_spec_names) required_extra_specs = self._filter_extra_specs( required_extra_specs, extra_spec_names) trimmed = { 'id': share_type.get('id'), 'name': share_type.get('name'), 'extra_specs': extra_specs, 'required_extra_specs': required_extra_specs, } self.update_versioned_resource_dict(request, trimmed, share_type) if brief: return trimmed else: return dict(volume_type=trimmed, share_type=trimmed) @common.ViewBuilder.versioned_method("2.7") def add_is_public_attr_core_api_like(self, context, share_type_dict, share_type): share_type_dict['share_type_access:is_public'] = share_type.get( 'is_public', True) @common.ViewBuilder.versioned_method("1.0", "2.6") def add_is_public_attr_extension_like(self, context, share_type_dict, share_type): share_type_dict['os-share-type-access:is_public'] = share_type.get( 'is_public', True) @common.ViewBuilder.versioned_method("2.24") def add_inferred_optional_extra_specs(self, context, share_type_dict, share_type): # NOTE(cknight): The admin sees exactly which extra specs have been set # on the type, but in order to know how shares of a type will behave, # the user must also see the default values of any public extra specs # that aren't explicitly set on the type. if not context.is_admin: for extra_spec in constants.ExtraSpecs.INFERRED_OPTIONAL_MAP: if extra_spec not in share_type_dict['extra_specs']: share_type_dict['extra_specs'][extra_spec] = ( constants.ExtraSpecs.INFERRED_OPTIONAL_MAP[extra_spec]) def index(self, request, share_types): """Index over trimmed share types.""" share_types_list = [self.show(request, share_type, True) for share_type in share_types] return dict(volume_types=share_types_list, share_types=share_types_list) def share_type_access(self, request, share_type): """Return a dictionary view of the projects with access to type.""" projects = [ {'share_type_id': share_type['id'], 'project_id': project_id} for project_id in share_type['projects'] ] return {'share_type_access': projects} def _filter_extra_specs(self, extra_specs, valid_keys): return {key: value for key, value in extra_specs.items() if key in valid_keys} @common.ViewBuilder.versioned_method("2.41") def add_description_attr(self, context, share_type_dict, share_type): share_type_dict['description'] = share_type.get('description') @common.ViewBuilder.versioned_method("2.46") def add_is_default_attr(self, context, share_type_dict, share_type): is_default = False type_name = share_type.get('name') default_name = CONF.default_share_type if default_name is not None: is_default = default_name == type_name share_type_dict['is_default'] = is_default ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/api/views/versions.py0000664000175000017500000000424200000000000020451 0ustar00zuulzuul00000000000000# Copyright 2010-2011 OpenStack LLC. # Copyright 2015 Clinton Knight # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import re import urllib def get_view_builder(req): return ViewBuilder(req.application_url) _URL_SUFFIX = {'v1.0': 'v1', 'v2.0': 'v2'} class ViewBuilder(object): def __init__(self, base_url): """Initialize ViewBuilder. :param base_url: url of the root wsgi application """ self.base_url = base_url def build_versions(self, versions): views = [self._build_version(versions[key]) for key in sorted(list(versions.keys()))] return dict(versions=views) def _build_version(self, version): view = copy.deepcopy(version) view['links'] = self._build_links(version) return view def _build_links(self, version_data): """Generate a container of links that refer to the provided version.""" links = copy.deepcopy(version_data.get('links', {})) version = _URL_SUFFIX.get(version_data['id']) links.append({'rel': 'self', 'href': self._generate_href(version=version)}) return links def _generate_href(self, version='v1', path=None): """Create a URL that refers to a specific version_number.""" base_url = self._get_base_url_without_version() href = urllib.parse.urljoin(base_url, version).rstrip('/') + '/' if path: href += path.lstrip('/') return href def _get_base_url_without_version(self): """Get the base URL with out the /v1 suffix.""" return re.sub('v[1-9]+/?$', '', self.base_url) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315601.8496726 manila-21.0.0/manila/cmd/0000775000175000017500000000000000000000000015062 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/cmd/__init__.py0000664000175000017500000000000000000000000017161 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/cmd/api.py0000664000175000017500000000330000000000000016201 0ustar00zuulzuul00000000000000#!/usr/bin/env python3 # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Starter script for manila OS API.""" import eventlet eventlet.monkey_patch() import sys from oslo_config import cfg from oslo_log import log from oslo_reports import guru_meditation_report as gmr from oslo_reports import opts as gmr_opts from manila.common import config # Need to register global_opts # noqa from manila import service from manila import utils from manila import version CONF = cfg.CONF def main(): log.register_options(CONF) gmr_opts.set_defaults(CONF) CONF(sys.argv[1:], project='manila', version=version.version_string()) config.verify_share_protocols() config.set_lib_defaults() log.setup(CONF, "manila") utils.monkey_patch() gmr.TextGuruMeditation.setup_autorun(version, conf=CONF) launcher = service.process_launcher() server = service.WSGIService('osapi_share') launcher.launch_service(server, workers=server.workers or 1) launcher.wait() if __name__ == '__main__': main() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/cmd/data.py0000664000175000017500000000271400000000000016351 0ustar00zuulzuul00000000000000#!/usr/bin/env python3 # Copyright 2015, Hitachi Data Systems. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Starter script for manila data copy service.""" import eventlet eventlet.monkey_patch() import sys from oslo_config import cfg from oslo_log import log from oslo_reports import guru_meditation_report as gmr from oslo_reports import opts as gmr_opts from manila.common import config # Need to register global_opts # noqa from manila import service from manila import utils from manila import version CONF = cfg.CONF def main(): log.register_options(CONF) gmr_opts.set_defaults(CONF) CONF(sys.argv[1:], project='manila', version=version.version_string()) log.setup(CONF, "manila") utils.monkey_patch() gmr.TextGuruMeditation.setup_autorun(version, conf=CONF) server = service.Service.create(binary='manila-data') service.serve(server) service.wait() if __name__ == '__main__': main() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/cmd/manage.py0000664000175000017500000005302300000000000016667 0ustar00zuulzuul00000000000000#!/usr/bin/env python3 # Copyright (c) 2011 X.commerce, a business unit of eBay Inc. # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # Interactive shell based on Django: # # Copyright (c) 2005, the Lawrence Journal-World # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # 3. Neither the name of Django nor the names of its contributors may be # used to endorse or promote products derived from this software without # specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """ CLI interface for manila management. """ import os import sys import yaml from oslo_config import cfg from oslo_log import log from oslo_serialization import jsonutils from manila.common import config # Need to register global_opts # noqa from manila import context from manila import db from manila.db import migration from manila.i18n import _ from manila import utils from manila import version CONF = cfg.CONF ALLOWED_OUTPUT_FORMATS = ['table', 'json', 'yaml'] HOST_UPDATE_HELP_MSG = ("A fully qualified host string is of the format " "'HostA@BackendB#PoolC'. Provide only the host name " "(ex: 'HostA') to update the hostname part of " "the host string. Provide only the " "host name and backend name (ex: 'HostA@BackendB') to " "update the host and backend names.") HOST_UPDATE_CURRENT_HOST_HELP = ("Current share host name. %s" % HOST_UPDATE_HELP_MSG) HOST_UPDATE_NEW_HOST_HELP = "New share host name. %s" % HOST_UPDATE_HELP_MSG LIST_OUTPUT_FORMAT_HELP = ("Format to be used to print the output (table, " "json, yaml). Defaults to 'table'") SHARE_SERVERS_UPDATE_HELP = ("List of share servers to be updated, separated " "by commas.") SHARE_SERVERS_UPDATE_CAPABILITIES_HELP = ( "List of share server capabilities to be updated, separated by commas.") SHARE_DELETE_HELP = ("Share ID to be deleted.") # Decorators for actions def args(*args, **kwargs): def _decorator(func): func.__dict__.setdefault('args', []).insert(0, (args, kwargs)) return func return _decorator class ListCommand(object): def list_json(self, resource_name, resource_list): resource_list = {resource_name: resource_list} object_list = jsonutils.dumps(resource_list, indent=4) print(object_list) def list_yaml(self, resource_name, resource_list): resource_list = {resource_name: resource_list} data_yaml = yaml.dump(resource_list) print(data_yaml) def list_table(self, resource_name, resource_list): print_format = "{0:<16} {1:<36} {2:<16} {3:<10} {4:<5} {5:<10}" print(print_format.format( *[k.capitalize().replace( '_', ' ') for k in resource_list[0].keys()])) for resource in resource_list: # Print is not transforming into a string, so let's ensure it # happens resource['updated_at'] = str(resource['updated_at']) print(print_format.format(*resource.values())) def _check_format_output(self, format_output): if format_output not in ALLOWED_OUTPUT_FORMATS: print('Invalid output format specified. Defaulting to table.') return 'table' else: return format_output class ShellCommands(object): def bpython(self): """Runs a bpython shell. Falls back to Ipython/python shell if unavailable """ self.run('bpython') def ipython(self): """Runs an Ipython shell. Falls back to Python shell if unavailable """ self.run('ipython') def python(self): """Runs a python shell. Falls back to Python shell if unavailable """ self.run('python') @args('--shell', dest="shell", metavar='', help='Python shell') def run(self, shell=None): """Runs a Python interactive interpreter.""" if not shell: shell = 'bpython' if shell == 'bpython': try: import bpython bpython.embed() except ImportError: shell = 'ipython' if shell == 'ipython': try: from IPython import embed embed() except ImportError: # Ipython < 0.11 try: import IPython # Explicitly pass an empty list as arguments, because # otherwise IPython would use sys.argv from this script. shell = IPython.Shell.IPShell(argv=[]) shell.mainloop() except ImportError: # no IPython module shell = 'python' if shell == 'python': import code try: # Try activating rlcompleter, because it's handy. import readline except ImportError: pass else: # We don't have to wrap the following import in a 'try', # because we already know 'readline' was imported successfully. import rlcompleter # noqa readline.parse_and_bind("tab:complete") code.interact() @args('--path', required=True, help='Script path') def script(self, path): """Runs the script from the specified path with flags set properly. arguments: path """ exec(compile(open(path).read(), path, 'exec'), locals(), globals()) class HostCommands(object): """List hosts.""" @args('zone', nargs='?', default=None, help='Availability Zone (default: %(default)s)') def list(self, zone=None): """Show a list of all physical hosts. Filter by zone. args: [zone] """ print("%-25s\t%-15s" % (_('host'), _('zone'))) ctxt = context.get_admin_context() services = db.service_get_all(ctxt) if zone: services = [ s for s in services if s['availability_zone']['name'] == zone] hosts = [] for srv in services: if not [h for h in hosts if h['host'] == srv['host']]: hosts.append(srv) for h in hosts: print("%-25s\t%-15s" % (h['host'], h['availability_zone']['name'])) class DbCommands(object): """Class for managing the database.""" def __init__(self): pass @args('version', nargs='?', default=None, help='Database version') def sync(self, version=None): """Sync the database up to the most recent version.""" return migration.upgrade(version) def version(self): """Print the current database version.""" print(migration.version()) # NOTE(imalinovskiy): # Manila init migration hardcoded here, # because alembic has strange behaviour: # downgrade base = downgrade from head(162a3e673105) -> base(162a3e673105) # = downgrade from 162a3e673105 -> (empty) [ERROR] # downgrade 162a3e673105 = downgrade from head(162a3e673105)->162a3e673105 # = do nothing [OK] @args('version', nargs='?', default='162a3e673105', help='Version to downgrade') def downgrade(self, version=None): """Downgrade database to the given version.""" return migration.downgrade(version) @args('--message', help='Revision message') @args('--autogenerate', help='Autogenerate migration from schema') def revision(self, message, autogenerate): """Generate new migration.""" return migration.revision(message, autogenerate) @args('version', nargs='?', default=None, help='Version to stamp version table with') def stamp(self, version=None): """Stamp the version table with the given version.""" return migration.stamp(version) @args('age_in_days', type=int, default=0, nargs='?', help='A non-negative integer, denoting the age of soft-deleted ' 'records in number of days. 0 can be specified to purge all ' 'soft-deleted rows, default is %(default)d.') def purge(self, age_in_days): """Purge soft-deleted records older than a given age.""" age_in_days = int(age_in_days) if age_in_days < 0: print(_("Must supply a non-negative value for age.")) exit(1) ctxt = context.get_admin_context() db.purge_deleted_records(ctxt, age_in_days) class VersionCommands(object): """Class for exposing the codebase version.""" def list(self): print(version.version_string()) def __call__(self): self.list() class ConfigCommands(object): """Class for exposing the flags defined by flag_file(s).""" def list(self): for key, value in CONF.items(): if value is not None: print('%s = %s' % (key, value)) class GetLogCommands(object): """Get logging information.""" def errors(self): """Get all of the errors from the log files.""" error_found = 0 if CONF.log_dir: logs = [x for x in os.listdir(CONF.log_dir) if x.endswith('.log')] for file in logs: log_file = os.path.join(CONF.log_dir, file) lines = [line.strip() for line in open(log_file, "r")] lines.reverse() print_name = 0 for index, line in enumerate(lines): if line.find(" ERROR ") > 0: error_found += 1 if print_name == 0: print(log_file + ":-") print_name = 1 print("Line %d : %s" % (len(lines) - index, line)) if error_found == 0: print("No errors in logfiles!") @args('num_entries', nargs='?', type=int, default=10, help='Number of entries to list (default: %(default)d)') def syslog(self, num_entries=10): """Get of the manila syslog events.""" entries = int(num_entries) count = 0 log_file = '' if os.path.exists('/var/log/syslog'): log_file = '/var/log/syslog' elif os.path.exists('/var/log/messages'): log_file = '/var/log/messages' else: print("Unable to find system log file!") sys.exit(1) lines = [line.strip() for line in open(log_file, "r")] lines.reverse() print("Last %s manila syslog entries:-" % (entries)) for line in lines: if line.find("manila") > 0: count += 1 print("%s" % (line)) if count == entries: break if count == 0: print("No manila entries in syslog!") class ServiceCommands(ListCommand): """Methods for managing services.""" @args('--format_output', required=False, default='table', help=LIST_OUTPUT_FORMAT_HELP) def list(self, format_output): """Show a list of all manila services.""" ctxt = context.get_admin_context() services = db.service_get_all(ctxt) format_output = self._check_format_output(format_output) services_list = [] for service in services: alive = utils.service_is_up(service) state = ":-)" if alive else "XXX" status = 'enabled' if service['disabled']: status = 'disabled' services_list.append({ 'binary': service['binary'], 'host': service['host'].partition('.')[0], 'zone': service['availability_zone']['name'], 'status': status, 'state': state, 'updated_at': str(service['updated_at']), }) method_list_name = f'list_{format_output}' getattr(self, method_list_name)('services', services_list) def cleanup(self): """Remove manila services reporting as 'down'.""" ctxt = context.get_admin_context() services = db.service_get_all(ctxt) for svc in services: if utils.service_is_up(svc): continue db.service_destroy(ctxt, svc['id']) print("Cleaned up service %s" % svc['host']) class ShareCommands(object): @staticmethod def _validate_hosts(current_host, new_host): err = None if '@' in current_host: if '#' in current_host and '#' not in new_host: err = "%(chost)s specifies a pool but %(nhost)s does not." elif '@' not in new_host: err = "%(chost)s specifies a backend but %(nhost)s does not." if err: print(err % {'chost': current_host, 'nhost': new_host}) sys.exit(1) @args('--currenthost', required=True, help=HOST_UPDATE_CURRENT_HOST_HELP) @args('--newhost', required=True, help=HOST_UPDATE_NEW_HOST_HELP) @args('--force', required=False, type=bool, default=False, help="Ignore validations.") def update_host(self, current_host, new_host, force=False): """Modify the host name associated with resources. Particularly to recover from cases where one has moved their Manila Share node, or modified their 'host' opt or their backend section name in the manila configuration file. Affects shares, share servers and share groups """ if not force: self._validate_hosts(current_host, new_host) ctxt = context.get_admin_context() updated = db.share_resources_host_update(ctxt, current_host, new_host) msg = ("Updated host of %(si_count)d share instances, " "%(sg_count)d share groups and %(ss_count)d share servers on " "%(chost)s to %(nhost)s.") msg_args = { 'si_count': updated['instances'], 'sg_count': updated['groups'], 'ss_count': updated['servers'], 'chost': current_host, 'nhost': new_host, } print(msg % msg_args) @args('--share_id', required=True, help=SHARE_DELETE_HELP) def delete(self, share_id): """Delete manila share from the database. This command is useful after a share's manager service has been decommissioned. """ ctxt = context.get_admin_context() share = db.share_get(ctxt, share_id) active_replicas = [] # We delete "active" replicas at the end for share_instance in share['instances']: if share_instance['replica_state'] == "active": active_replicas.append(share_instance) else: db.share_instance_delete(ctxt, share_instance['id']) for share_instance in active_replicas: db.share_instance_delete(ctxt, share_instance['id']) print("Deleted share instance %s" % share_instance['id']) # finally, clean up the share print("Deleted share %s" % share_id) class ShareServerCommands(object): @args('--share_servers', required=True, help=SHARE_SERVERS_UPDATE_HELP) @args('--capabilities', required=True, help=SHARE_SERVERS_UPDATE_CAPABILITIES_HELP) @args('--value', required=False, type=bool, default=False, help="If those capabilities will be enabled (True) or disabled " "(False)") def update_share_server_capabilities(self, share_servers, capabilities, value=False): """Update the share server capabilities. This method receives a list of share servers and capabilities in order to have it updated with the value specified. If the value was not specified the default is False. """ share_servers = [server.strip() for server in share_servers.split(",")] capabilities = [cap.strip() for cap in capabilities.split(",")] supported_capabilities = ['security_service_update_support', 'network_allocation_update_support'] values = dict() for capability in capabilities: if capability not in supported_capabilities: print("One or more capabilities are invalid for this " "operation. The supported capability(ies) is(are) %s." % supported_capabilities) sys.exit(1) values[capability] = value ctxt = context.get_admin_context() db.share_servers_update(ctxt, share_servers, values) print("The capability(ies) %s of the following share server(s)" " %s was(were) updated to %s." % (capabilities, share_servers, value)) CATEGORIES = { 'config': ConfigCommands, 'db': DbCommands, 'host': HostCommands, 'logs': GetLogCommands, 'service': ServiceCommands, 'share': ShareCommands, 'share_server': ShareServerCommands, 'shell': ShellCommands, 'version': VersionCommands } def methods_of(obj): """Get all callable methods of an object that don't start with underscore. Returns a list of tuples of the form (method_name, method). """ result = [] for i in dir(obj): if callable(getattr(obj, i)) and not i.startswith('_'): result.append((i, getattr(obj, i))) return result def add_command_parsers(subparsers): for category in CATEGORIES: command_object = CATEGORIES[category]() parser = subparsers.add_parser(category) parser.set_defaults(command_object=command_object) category_subparsers = parser.add_subparsers(dest='action') for (action, action_fn) in methods_of(command_object): parser = category_subparsers.add_parser(action) action_kwargs = [] for args, kwargs in getattr(action_fn, 'args', []): parser.add_argument(*args, **kwargs) parser.set_defaults(action_fn=action_fn) parser.set_defaults(action_kwargs=action_kwargs) category_opt = cfg.SubCommandOpt('category', title='Command categories', handler=add_command_parsers) def get_arg_string(args): arg = None if args[0] == '-': # (Note)zhiteng: args starts with CONF.oparser.prefix_chars # is optional args. Notice that cfg module takes care of # actual ArgParser so prefix_chars is always '-'. if args[1] == '-': # This is long optional arg arg = args[2:] else: arg = args[1:] else: arg = args return arg def fetch_func_args(func): fn_args = [] for args, kwargs in getattr(func, 'args', []): arg = get_arg_string(args[0]) fn_args.append(getattr(CONF.category, arg)) return fn_args def main(): """Parse options and call the appropriate class/method.""" CONF.register_cli_opt(category_opt) script_name = sys.argv[0] if len(sys.argv) < 2: print(_("\nOpenStack manila version: %(version)s\n") % {'version': version.version_string()}) print(script_name + " category action []") print(_("Available categories:")) for category in CATEGORIES: print("\t%s" % category) sys.exit(2) try: log.register_options(CONF) CONF(sys.argv[1:], project='manila', version=version.version_string()) log.setup(CONF, "manila") except cfg.ConfigFilesNotFoundError as e: cfg_files = e.config_files print(_("Failed to read configuration file(s): %s") % cfg_files) sys.exit(2) fn = CONF.category.action_fn fn_args = fetch_func_args(fn) fn(*fn_args) if __name__ == '__main__': main() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/cmd/scheduler.py0000664000175000017500000000316500000000000017417 0ustar00zuulzuul00000000000000#!/usr/bin/env python3 # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Starter script for manila Scheduler.""" import eventlet eventlet.monkey_patch() import sys from oslo_config import cfg from oslo_log import log from oslo_reports import guru_meditation_report as gmr from oslo_reports import opts as gmr_opts from manila.common import config # Need to register global_opts # noqa from manila import service from manila import utils from manila import version CONF = cfg.CONF def main(): log.register_options(CONF) gmr_opts.set_defaults(CONF) CONF(sys.argv[1:], project='manila', version=version.version_string()) log.setup(CONF, "manila") utils.monkey_patch() gmr.TextGuruMeditation.setup_autorun(version, conf=CONF) server = service.Service.create(binary='manila-scheduler', coordination=True) service.serve(server) service.wait() if __name__ == '__main__': main() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/cmd/share.py0000664000175000017500000000370000000000000016536 0ustar00zuulzuul00000000000000#!/usr/bin/env python3 # Copyright 2013 NetApp # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Starter script for manila Share.""" import eventlet eventlet.monkey_patch() import sys from oslo_config import cfg from oslo_log import log from oslo_reports import guru_meditation_report as gmr from oslo_reports import opts as gmr_opts from manila.common import config # Need to register global_opts # noqa from manila import service from manila import utils from manila import version CONF = cfg.CONF def main(): log.register_options(CONF) gmr_opts.set_defaults(CONF) CONF(sys.argv[1:], project='manila', version=version.version_string()) log.setup(CONF, "manila") utils.monkey_patch() gmr.TextGuruMeditation.setup_autorun(version, conf=CONF) launcher = service.process_launcher() if CONF.enabled_share_backends: for backend in CONF.enabled_share_backends: host = "%s@%s" % (CONF.host, backend) server = service.Service.create(host=host, service_name=backend, binary='manila-share', coordination=True) launcher.launch_service(server) else: server = service.Service.create(binary='manila-share') launcher.launch_service(server) launcher.wait() if __name__ == '__main__': main() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/cmd/status.py0000664000175000017500000000313300000000000016757 0ustar00zuulzuul00000000000000# Copyright (c) 2018 NEC, Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sys from oslo_config import cfg from oslo_upgradecheck import common_checks from oslo_upgradecheck import upgradecheck from manila.i18n import _ class Checks(upgradecheck.UpgradeCommands): """Various upgrade checks should be added as separate methods in this class and added to _upgrade_checks tuple. """ # The format of the check functions is to return an # oslo_upgradecheck.upgradecheck.Result # object with the appropriate # oslo_upgradecheck.upgradecheck.Code and details set. # If the check hits warnings or failures then those should be stored # in the returned Result's "details" attribute. The # summary will be rolled up at the end of the check() method. _upgrade_checks = ( (_('Policy File JSON to YAML Migration'), (common_checks.check_policy_json, {'conf': cfg.CONF})), ) def main(): return upgradecheck.main( cfg.CONF, project='manila', upgrade_command=Checks()) if __name__ == '__main__': sys.exit(main()) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315601.8496726 manila-21.0.0/manila/common/0000775000175000017500000000000000000000000015607 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/common/__init__.py0000664000175000017500000000000000000000000017706 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/common/client_auth.py0000664000175000017500000000641100000000000020462 0ustar00zuulzuul00000000000000# Copyright 2016 SAP SE # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from keystoneauth1 import loading as ks_loading from oslo_config import cfg from manila import exception from manila.i18n import _ CONF = cfg.CONF """Helper class to support keystone v2 and v3 for clients Builds auth and session context before instantiation of the actual client. In order to build this context a dedicated config group is needed to load all needed parameters dynamically. """ class AuthClientLoader(object): def __init__(self, client_class, cfg_group): self.client_class = client_class self.group = cfg_group self.admin_auth = None self.conf = CONF self.session = None self.auth_plugin = None @staticmethod def list_opts(group): """Generates a list of config option for a given group :param group: group name :return: list of auth default configuration """ opts = copy.deepcopy(ks_loading.get_session_conf_options()) opts.insert(0, ks_loading.get_auth_common_conf_options()[0]) for plugin_option in ks_loading.get_auth_plugin_conf_options( 'password'): found = False for option in opts: if option.name == plugin_option.name: found = True break if not found: opts.append(plugin_option) opts.sort(key=lambda x: x.name) return [(group, opts)] def _load_auth_plugin(self): if self.admin_auth: return self.admin_auth self.auth_plugin = ks_loading.load_auth_from_conf_options( CONF, self.group) if self.auth_plugin: return self.auth_plugin msg = _('Cannot load auth plugin for %s') % self.group raise exception.BadConfigurationException(reason=msg) def get_client(self, context, admin=False, **kwargs): """Get's the client with the correct auth/session context """ auth_plugin = None if not self.session: self.session = ks_loading.load_session_from_conf_options( self.conf, self.group) if admin or (context.is_admin and not context.auth_token): if not self.admin_auth: self.admin_auth = self._load_auth_plugin() auth_plugin = self.admin_auth else: # NOTE(mkoderer): Manila basically needs admin clients for # it's actions. If needed this must be enhanced later raise exception.ManilaException( _("Client (%s) is not flagged as admin") % self.group) return self.client_class(session=self.session, auth=auth_plugin, **kwargs) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/common/config.py0000664000175000017500000002206700000000000017435 0ustar00zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # Copyright 2012 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Command-line flag library. Emulates gflags by wrapping cfg.ConfigOpts. The idea is to move fully to cfg eventually, and this wrapper is a stepping stone. """ import socket from oslo_config import cfg from oslo_log import log from oslo_middleware import cors from oslo_utils import netutils from manila.common import constants from manila import exception CONF = cfg.CONF log.register_options(CONF) core_opts = [ cfg.StrOpt('state_path', default='/var/lib/manila', help="Top-level directory for maintaining manila's state."), ] debug_opts = [ ] CONF.register_cli_opts(core_opts) CONF.register_cli_opts(debug_opts) global_opts = [ cfg.HostAddressOpt('my_ip', default=netutils.get_my_ipv4(), sample_default='', help='IP address of this host.'), cfg.StrOpt('scheduler_topic', default='manila-scheduler', help='The topic scheduler nodes listen on.'), cfg.StrOpt('share_topic', default='manila-share', help='The topic share nodes listen on.'), cfg.StrOpt('data_topic', default='manila-data', help='The topic data nodes listen on.'), cfg.BoolOpt('api_rate_limit', default=True, help='Whether to rate limit the API.'), cfg.ListOpt('osapi_share_ext_list', default=[], help='Specify list of extensions to load when using osapi_' 'share_extension option with manila.api.contrib.' 'select_extensions.'), cfg.ListOpt('osapi_share_extension', default=['manila.api.contrib.standard_extensions'], help='The osapi share extensions to load.'), cfg.StrOpt('scheduler_manager', default='manila.scheduler.manager.SchedulerManager', help='Full class name for the scheduler manager.'), cfg.StrOpt('share_manager', default='manila.share.manager.ShareManager', help='Full class name for the share manager.'), cfg.StrOpt('data_manager', default='manila.data.manager.DataManager', help='Full class name for the data manager.'), cfg.HostAddressOpt('host', default=socket.gethostname(), sample_default='', help='Name of this node. This can be an opaque ' 'identifier. It is not necessarily a hostname, ' 'FQDN, or IP address.'), # NOTE(vish): default to nova for compatibility with nova installs cfg.StrOpt('storage_availability_zone', default='nova', help='Availability zone of this node.'), cfg.StrOpt('default_share_type', help='Default share type to use.'), cfg.StrOpt('default_share_group_type', help='Default share group type to use.'), cfg.StrOpt('rootwrap_config', help='Path to the rootwrap configuration file to use for ' 'running commands as root.'), cfg.BoolOpt('monkey_patch', default=False, help='Whether to log monkey patching.'), cfg.ListOpt('monkey_patch_modules', default=[], help='List of modules or decorators to monkey patch.'), cfg.IntOpt('service_down_time', default=60, help='Maximum time since last check-in for up service.'), cfg.StrOpt('share_api_class', default='manila.share.api.API', help='The full class name of the share API class to use.'), cfg.StrOpt('auth_strategy', default='keystone', help='The strategy to use for auth. Supports noauth, keystone, ' 'and noauthv2.'), cfg.ListOpt('enabled_share_backends', help='A list of share backend names to use. These backend ' 'names should be backed by a unique [CONFIG] group ' 'with its options.'), cfg.ListOpt('enabled_share_protocols', default=['NFS', 'CIFS'], help="Specify list of protocols to be allowed for share " "creation. Available values are '%s'" % list(constants.SUPPORTED_SHARE_PROTOCOLS)), cfg.IntOpt('soft_deleted_share_retention_time', default=604800, help='Maximum time (in seconds) to keep a share in the recycle ' 'bin, it will be deleted automatically after this amount ' 'of time has elapsed.'), cfg.IntOpt('transfer_retention_time', default=300, help='Maximum time (in seconds) to keep a share in ' 'awaiting_transfer state, after timeout, the share will ' 'automatically be rolled back to the available state'), cfg.ListOpt('admin_only_metadata', default=constants.AdminOnlyMetadata.SCHEDULER_FILTERS, help='Metadata keys that should only be manipulated by ' 'administrators.'), cfg.ListOpt('driver_updatable_metadata', default=[], help='Metadata keys that will decide which share metadata ' '(element of the list is , ' 'i.e max_files) can be passed to share drivers as part ' 'of metadata create/update operations.'), cfg.ListOpt('driver_updatable_subnet_metadata', default=[], help='Metadata keys that will decide which share network ' 'subnet metadata (element of the list is ' ', e.g. pnfs) can be passed to ' 'share drivers as part of metadata create/update ' 'operations.'), cfg.BoolOpt('update_shares_status_on_ensure', default=True, help='Whether Manila should update the status of all shares ' 'within a backend during ongoing ensure_shares ' 'run.'), cfg.ListOpt('admin_only_el_metadata', default=constants.AdminOnlyMetadata.EXPORT_LOCATION_KEYS, help='Metadata keys for export locations that should only be ' 'manipulated by administrators.'), ] CONF.register_opts(global_opts) def verify_share_protocols(): """Perform verification of 'enabled_share_protocols'.""" msg = None supported_protocols = constants.SUPPORTED_SHARE_PROTOCOLS data = dict(supported=', '.join(supported_protocols)) if CONF.enabled_share_protocols: for share_proto in CONF.enabled_share_protocols: if share_proto.upper() not in supported_protocols: data.update({'share_proto': share_proto}) msg = ("Unsupported share protocol '%(share_proto)s' " "is set as enabled. Available values are " "%(supported)s. ") break else: msg = ("No share protocols were specified as enabled. " "Available values are %(supported)s. ") if msg: msg += ("Please specify one or more protocols using " "configuration option 'enabled_share_protocols'.") raise exception.ManilaException(message=msg % data) def set_lib_defaults(): """Update default configuration options for external lib namespace""" cors.set_defaults( allow_headers=['X-Auth-Token', 'X-OpenStack-Request-ID', 'X-Openstack-Manila-Api-Version', 'X-OpenStack-Manila-API-Experimental', 'X-Identity-Status', 'X-Roles', 'X-Service-Catalog', 'X-User-Id', 'X-Tenant-Id'], expose_headers=['X-Auth-Token', 'X-OpenStack-Request-ID', 'X-Openstack-Manila-Api-Version', 'X-OpenStack-Manila-API-Experimental', 'X-Subject-Token', 'X-Service-Token'], allow_methods=['GET', 'PUT', 'POST', 'DELETE', 'PATCH'] ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/common/constants.py0000664000175000017500000002710100000000000020176 0ustar00zuulzuul00000000000000# Copyright 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # The maximum value a signed INT type may have DB_MAX_INT = 0x7FFFFFFF # The maximum length a display field may have DB_DISPLAY_FIELDS_MAX_LENGTH = 255 # SHARE AND GENERAL STATUSES STATUS_CREATING = 'creating' STATUS_CREATING_FROM_SNAPSHOT = 'creating_from_snapshot' STATUS_DELETING = 'deleting' STATUS_DEFERRED_DELETING = 'deferred_deleting' STATUS_DELETED = 'deleted' STATUS_ERROR = 'error' STATUS_ERROR_DELETING = 'error_deleting' STATUS_ERROR_DEFERRED_DELETING = 'error_deferred_deleting' STATUS_AVAILABLE = 'available' STATUS_INACTIVE = 'inactive' STATUS_MANAGING = 'manage_starting' STATUS_MANAGE_ERROR = 'manage_error' STATUS_UNMANAGING = 'unmanage_starting' STATUS_MANAGE_ERROR_UNMANAGING = 'manage_error_unmanage_starting' STATUS_UNMANAGE_ERROR = 'unmanage_error' STATUS_UNMANAGED = 'unmanaged' STATUS_EXTENDING = 'extending' STATUS_EXTENDING_ERROR = 'extending_error' STATUS_SHRINKING = 'shrinking' STATUS_SHRINKING_ERROR = 'shrinking_error' STATUS_MIGRATING = 'migrating' STATUS_MIGRATING_TO = 'migrating_to' STATUS_SHRINKING_POSSIBLE_DATA_LOSS_ERROR = ( 'shrinking_possible_data_loss_error' ) STATUS_REPLICATION_CHANGE = 'replication_change' STATUS_RESTORING = 'restoring' STATUS_REVERTING = 'reverting' STATUS_REVERTING_ERROR = 'reverting_error' STATUS_AWAITING_TRANSFER = 'awaiting_transfer' STATUS_BACKUP_CREATING = 'backup_creating' STATUS_BACKUP_RESTORING = 'backup_restoring' STATUS_BACKUP_RESTORING_ERROR = 'backup_restoring_error' STATUS_ENSURING = 'ensuring' # Transfer resource type SHARE_RESOURCE_TYPE = 'share' SHARE_ACCESS_RESOURCE_TYPE = 'access_rule' # Access rule states ACCESS_STATE_QUEUED_TO_APPLY = 'queued_to_apply' ACCESS_STATE_QUEUED_TO_DENY = 'queued_to_deny' ACCESS_STATE_QUEUED_TO_UPDATE = 'queued_to_update' ACCESS_STATE_APPLYING = 'applying' ACCESS_STATE_DENYING = 'denying' ACCESS_STATE_UPDATING = 'updating' ACCESS_STATE_ACTIVE = 'active' ACCESS_STATE_ERROR = 'error' ACCESS_STATE_DELETED = 'deleted' # Share instance "access_rules_status" field values SHARE_INSTANCE_RULES_SYNCING = 'syncing' SHARE_INSTANCE_RULES_ERROR = 'error' # States/statuses for multiple resources STATUS_NEW = 'new' STATUS_OUT_OF_SYNC = 'out_of_sync' STATUS_ACTIVE = 'active' # Share server migration statuses STATUS_SERVER_MIGRATING = 'server_migrating' STATUS_SERVER_MIGRATING_TO = 'server_migrating_to' # Share server update statuses STATUS_SERVER_NETWORK_CHANGE = 'network_change' # Share network statuses STATUS_NETWORK_ACTIVE = 'active' STATUS_NETWORK_ERROR = 'error' STATUS_NETWORK_CHANGE = 'network_change' ACCESS_RULES_STATES = ( ACCESS_STATE_QUEUED_TO_APPLY, ACCESS_STATE_QUEUED_TO_DENY, ACCESS_STATE_QUEUED_TO_UPDATE, ACCESS_STATE_APPLYING, ACCESS_STATE_DENYING, ACCESS_STATE_UPDATING, ACCESS_STATE_ACTIVE, ACCESS_STATE_ERROR, ACCESS_STATE_DELETED, ) # Share and share server migration task states TASK_STATE_MIGRATION_STARTING = 'migration_starting' TASK_STATE_MIGRATION_IN_PROGRESS = 'migration_in_progress' TASK_STATE_MIGRATION_COMPLETING = 'migration_completing' TASK_STATE_MIGRATION_SUCCESS = 'migration_success' TASK_STATE_MIGRATION_ERROR = 'migration_error' TASK_STATE_MIGRATION_CANCELLED = 'migration_cancelled' TASK_STATE_MIGRATION_CANCEL_IN_PROGRESS = 'migration_cancel_in_progress' TASK_STATE_MIGRATION_DRIVER_STARTING = 'migration_driver_starting' TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS = 'migration_driver_in_progress' TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE = 'migration_driver_phase1_done' # Share statuses used by data service and host assisted migration TASK_STATE_DATA_COPYING_STARTING = 'data_copying_starting' TASK_STATE_DATA_COPYING_IN_PROGRESS = 'data_copying_in_progress' TASK_STATE_DATA_COPYING_COMPLETING = 'data_copying_completing' TASK_STATE_DATA_COPYING_COMPLETED = 'data_copying_completed' TASK_STATE_DATA_COPYING_CANCELLED = 'data_copying_cancelled' TASK_STATE_DATA_COPYING_ERROR = 'data_copying_error' BACKUP_TYPE = "backup_type" BUSY_TASK_STATES = ( TASK_STATE_MIGRATION_STARTING, TASK_STATE_MIGRATION_IN_PROGRESS, TASK_STATE_MIGRATION_COMPLETING, TASK_STATE_MIGRATION_DRIVER_STARTING, TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS, TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE, TASK_STATE_DATA_COPYING_STARTING, TASK_STATE_DATA_COPYING_IN_PROGRESS, TASK_STATE_DATA_COPYING_COMPLETING, TASK_STATE_DATA_COPYING_COMPLETED, ) BUSY_COPYING_STATES = ( TASK_STATE_DATA_COPYING_STARTING, TASK_STATE_DATA_COPYING_IN_PROGRESS, TASK_STATE_DATA_COPYING_COMPLETING, ) TRANSITIONAL_STATUSES = ( STATUS_CREATING, STATUS_DELETING, STATUS_MANAGING, STATUS_UNMANAGING, STATUS_EXTENDING, STATUS_SHRINKING, STATUS_MIGRATING, STATUS_MIGRATING_TO, STATUS_RESTORING, STATUS_REVERTING, STATUS_SERVER_MIGRATING, STATUS_SERVER_MIGRATING_TO, STATUS_BACKUP_RESTORING, STATUS_BACKUP_CREATING, STATUS_ENSURING, ) INVALID_SHARE_INSTANCE_STATUSES_FOR_ACCESS_RULE_UPDATES = ( TRANSITIONAL_STATUSES ) SUPPORTED_ENCRYPTION_TYPES = ['share', 'share_server'] SUPPORTED_SHARE_PROTOCOLS = ( 'NFS', 'CIFS', 'GLUSTERFS', 'HDFS', 'CEPHFS', 'MAPRFS') SECURITY_SERVICES_ALLOWED_TYPES = ['active_directory', 'ldap', 'kerberos'] LIKE_FILTER = ['name~', 'description~'] NFS_EXPORTS_FILE = '/etc/exports' NFS_EXPORTS_FILE_TEMP = '/var/lib/nfs/etab' MOUNT_FILE = '/etc/fstab' MOUNT_FILE_TEMP = '/etc/mtab' # Below represented ports are ranges (from, to) CIFS_PORTS = ( ("tcp", (445, 445)), ("tcp", (137, 139)), ("udp", (137, 139)), ("udp", (445, 445)), ) NFS_PORTS = ( ("tcp", (2049, 2049)), ("udp", (2049, 2049)), ) SSH_PORTS = ( ("tcp", (22, 22)), ) PING_PORTS = ( ("icmp", (-1, -1)), ) WINRM_PORTS = ( ("tcp", (5985, 5986)), ) SERVICE_INSTANCE_SECGROUP_DATA = ( CIFS_PORTS + NFS_PORTS + PING_PORTS + WINRM_PORTS) ACCESS_LEVEL_RW = 'rw' ACCESS_LEVEL_RO = 'ro' ACCESS_LEVELS = ( ACCESS_LEVEL_RW, ACCESS_LEVEL_RO, ) TASK_STATE_STATUSES = ( TASK_STATE_MIGRATION_STARTING, TASK_STATE_MIGRATION_IN_PROGRESS, TASK_STATE_MIGRATION_COMPLETING, TASK_STATE_MIGRATION_SUCCESS, TASK_STATE_MIGRATION_ERROR, TASK_STATE_MIGRATION_CANCELLED, TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS, TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE, TASK_STATE_DATA_COPYING_STARTING, TASK_STATE_DATA_COPYING_IN_PROGRESS, TASK_STATE_DATA_COPYING_COMPLETING, TASK_STATE_DATA_COPYING_COMPLETED, TASK_STATE_DATA_COPYING_CANCELLED, TASK_STATE_DATA_COPYING_ERROR, None, ) SERVER_TASK_STATE_STATUSES = ( TASK_STATE_MIGRATION_STARTING, TASK_STATE_MIGRATION_IN_PROGRESS, TASK_STATE_MIGRATION_COMPLETING, TASK_STATE_MIGRATION_SUCCESS, TASK_STATE_MIGRATION_ERROR, TASK_STATE_MIGRATION_CANCEL_IN_PROGRESS, TASK_STATE_MIGRATION_CANCELLED, TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS, TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE, None, ) SHARE_SERVER_STATUSES = ( STATUS_ACTIVE, STATUS_ERROR, STATUS_DELETING, STATUS_CREATING, STATUS_MANAGING, STATUS_UNMANAGING, STATUS_UNMANAGE_ERROR, STATUS_MANAGE_ERROR, STATUS_INACTIVE, STATUS_SERVER_MIGRATING, STATUS_SERVER_MIGRATING_TO, STATUS_SERVER_NETWORK_CHANGE, ) SHARE_NETWORK_STATUSES = ( STATUS_NETWORK_ACTIVE, STATUS_NETWORK_ERROR, STATUS_NETWORK_CHANGE, ) REPLICA_STATE_ACTIVE = 'active' REPLICA_STATE_IN_SYNC = 'in_sync' REPLICA_STATE_OUT_OF_SYNC = 'out_of_sync' REPLICATION_TYPE_READABLE = 'readable' REPLICATION_TYPE_WRITABLE = 'writable' REPLICATION_TYPE_DR = 'dr' POLICY_EXTEND_BEYOND_MAX_SHARE_SIZE = 'extend_beyond_max_share_size_spec' RESOURCE_ACTION_DELETE = 'delete' # delete, soft-delete, unmanage RESOURCE_ACTION_SHOW = 'show' RESOURCE_LOCK_RESOURCE_TYPES = ( SHARE_RESOURCE_TYPE, SHARE_ACCESS_RESOURCE_TYPE, ) RESOURCE_LOCK_RESOURCE_ACTIONS = ( RESOURCE_ACTION_DELETE, RESOURCE_ACTION_SHOW, ) RESOURCE_LOCK_ACTIONS_MAPPING = { SHARE_RESOURCE_TYPE: [RESOURCE_ACTION_DELETE], SHARE_ACCESS_RESOURCE_TYPE: [RESOURCE_ACTION_DELETE, RESOURCE_ACTION_SHOW], } DISALLOWED_STATUS_WHEN_LOCKING_SHARES = ( STATUS_DELETING, STATUS_ERROR_DELETING, STATUS_UNMANAGING, STATUS_MANAGE_ERROR_UNMANAGING, STATUS_UNMANAGE_ERROR, STATUS_UNMANAGED, # not possible, future proofing STATUS_DELETED, # not possible, future proofing ) DISALLOWED_STATUS_WHEN_LOCKING_ACCESS_RULES = ( ACCESS_STATE_QUEUED_TO_DENY, ACCESS_STATE_DENYING, ACCESS_STATE_ERROR, ACCESS_STATE_DELETED, ) SHARE_LOCKED_BY_ACCESS_LOCK_REASON = 'Locked by access lock: %(lock_id)s' class ExtraSpecs(object): # Extra specs key names DRIVER_HANDLES_SHARE_SERVERS = "driver_handles_share_servers" SNAPSHOT_SUPPORT = "snapshot_support" REPLICATION_TYPE_SPEC = "replication_type" CREATE_SHARE_FROM_SNAPSHOT_SUPPORT = "create_share_from_snapshot_support" REVERT_TO_SNAPSHOT_SUPPORT = "revert_to_snapshot_support" MOUNT_SNAPSHOT_SUPPORT = "mount_snapshot_support" MOUNT_POINT_NAME_SUPPORT = "mount_point_name_support" AVAILABILITY_ZONES = "availability_zones" ENCRYPTION_SUPPORT = "encryption_support" PROVISIONING_MAX_SHARE_SIZE = "provisioning:max_share_size" PROVISIONING_MIN_SHARE_SIZE = "provisioning:min_share_size" PROVISIONING_MAX_SHARE_EXTEND_SIZE = "provisioning:max_share_extend_size" PROVISIONING_MOUNT_POINT_PREFIX = "provisioning:mount_point_prefix" # Extra specs containers REQUIRED = ( DRIVER_HANDLES_SHARE_SERVERS, ) OPTIONAL = ( SNAPSHOT_SUPPORT, CREATE_SHARE_FROM_SNAPSHOT_SUPPORT, REVERT_TO_SNAPSHOT_SUPPORT, REPLICATION_TYPE_SPEC, MOUNT_SNAPSHOT_SUPPORT, MOUNT_POINT_NAME_SUPPORT, AVAILABILITY_ZONES, ENCRYPTION_SUPPORT, PROVISIONING_MAX_SHARE_SIZE, PROVISIONING_MIN_SHARE_SIZE, PROVISIONING_MAX_SHARE_EXTEND_SIZE, PROVISIONING_MOUNT_POINT_PREFIX, ) # NOTE(cknight): Some extra specs are necessary parts of the Manila API and # should be visible to non-admin users. REQUIRED specs are user-visible, as # are a handful of community-agreed standardized OPTIONAL ones. TENANT_VISIBLE = REQUIRED + OPTIONAL BOOLEAN = ( DRIVER_HANDLES_SHARE_SERVERS, SNAPSHOT_SUPPORT, CREATE_SHARE_FROM_SNAPSHOT_SUPPORT, REVERT_TO_SNAPSHOT_SUPPORT, MOUNT_SNAPSHOT_SUPPORT, ) # NOTE(cknight): Some extra specs are optional, but a nominal (typically # False, but may be non-boolean) default value for each is still needed # when creating shares. INFERRED_OPTIONAL_MAP = { SNAPSHOT_SUPPORT: False, CREATE_SHARE_FROM_SNAPSHOT_SUPPORT: False, REVERT_TO_SNAPSHOT_SUPPORT: False, MOUNT_SNAPSHOT_SUPPORT: False, } REPLICATION_TYPES = ('writable', 'readable', 'dr') ENCRYPTION_TYPES = ('share', 'share_server') class AdminOnlyMetadata(object): AFFINITY_KEY = "__affinity_same_host" ANTI_AFFINITY_KEY = "__affinity_different_host" PREFERRED_KEY = "preferred" SCHEDULER_FILTERS = [ AFFINITY_KEY, ANTI_AFFINITY_KEY, ] EXPORT_LOCATION_KEYS = [ PREFERRED_KEY, ] ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315601.8496726 manila-21.0.0/manila/compute/0000775000175000017500000000000000000000000015773 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/compute/__init__.py0000664000175000017500000000217600000000000020112 0ustar00zuulzuul00000000000000# Copyright 2014 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import oslo_config.cfg import oslo_utils.importutils _compute_opts = [ oslo_config.cfg.StrOpt('compute_api_class', default='manila.compute.nova.API', help='The full class name of the ' 'Compute API class to use.'), ] oslo_config.cfg.CONF.register_opts(_compute_opts) def API(): importutils = oslo_utils.importutils compute_api_class = oslo_config.cfg.CONF.compute_api_class cls = importutils.import_class(compute_api_class) return cls() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/compute/nova.py0000664000175000017500000001715700000000000017323 0ustar00zuulzuul00000000000000# Copyright 2014 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Handles all requests to Nova. """ import functools from keystoneauth1 import loading as ks_loading from novaclient import client as nova_client from novaclient import exceptions as nova_exception from novaclient import utils from oslo_config import cfg from manila.common import client_auth from manila.common.config import core_opts from manila.db import base from manila import exception from manila.i18n import _ NOVA_GROUP = 'nova' AUTH_OBJ = None nova_opts = [ cfg.StrOpt('api_microversion', default='2.10', help='Version of Nova API to be used.'), cfg.StrOpt('endpoint_type', default='publicURL', choices=['publicURL', 'internalURL', 'adminURL', 'public', 'internal', 'admin'], help='Endpoint type to be used with nova client calls.'), cfg.StrOpt('region_name', help='Region name for connecting to nova.'), ] CONF = cfg.CONF CONF.register_opts(core_opts) CONF.register_opts(nova_opts, NOVA_GROUP) ks_loading.register_session_conf_options(CONF, NOVA_GROUP) ks_loading.register_auth_conf_options(CONF, NOVA_GROUP) def list_opts(): return client_auth.AuthClientLoader.list_opts(NOVA_GROUP) def novaclient(context): global AUTH_OBJ if not AUTH_OBJ: AUTH_OBJ = client_auth.AuthClientLoader( client_class=nova_client.Client, cfg_group=NOVA_GROUP) return AUTH_OBJ.get_client(context, version=CONF[NOVA_GROUP].api_microversion, endpoint_type=CONF[NOVA_GROUP].endpoint_type, region_name=CONF[NOVA_GROUP].region_name) def _untranslate_server_summary_view(server): """Maps keys for servers summary view.""" d = {} d['id'] = server.id d['status'] = server.status d['flavor'] = server.flavor.get('id') or server.flavor['original_name'] d['name'] = server.name d['image'] = server.image['id'] d['created'] = server.created d['addresses'] = server.addresses d['networks'] = server.networks d['tenant_id'] = server.tenant_id d['user_id'] = server.user_id d['security_groups'] = getattr(server, 'security_groups', []) return d def _to_dict(obj): if isinstance(obj, dict): return obj elif hasattr(obj, 'to_dict'): return obj.to_dict() else: return obj.__dict__ def translate_server_exception(method): """Transforms the exception for the instance. Note: keeps its traceback intact. """ @functools.wraps(method) def wrapper(self, ctx, instance_id, *args, **kwargs): try: res = method(self, ctx, instance_id, *args, **kwargs) return res except nova_exception.ClientException as e: if isinstance(e, nova_exception.NotFound): raise exception.InstanceNotFound(instance_id=instance_id) elif isinstance(e, nova_exception.BadRequest): raise exception.InvalidInput(reason=str(e)) else: raise exception.ManilaException(e) return wrapper class API(base.Base): """API for interacting with novaclient.""" def server_create(self, context, name, image, flavor, key_name=None, user_data=None, security_groups=None, block_device_mapping=None, block_device_mapping_v2=None, nics=None, availability_zone=None, instance_count=1, admin_pass=None, meta=None): return _untranslate_server_summary_view( novaclient(context).servers.create( name, image, flavor, userdata=user_data, security_groups=security_groups, key_name=key_name, block_device_mapping=block_device_mapping, block_device_mapping_v2=block_device_mapping_v2, nics=nics, availability_zone=availability_zone, min_count=instance_count, admin_pass=admin_pass, meta=meta) ) def server_delete(self, context, instance): novaclient(context).servers.delete(instance) @translate_server_exception def server_get(self, context, instance_id): return _untranslate_server_summary_view( novaclient(context).servers.get(instance_id) ) def server_get_by_name_or_id(self, context, instance_name_or_id): try: server = utils.find_resource( novaclient(context).servers, instance_name_or_id) except nova_exception.CommandError: # we did not find the server in the current tenant, # and proceed searching in all tenants try: server = utils.find_resource( novaclient(context).servers, instance_name_or_id, all_tenants=True) except nova_exception.CommandError as e: msg = _("Failed to get Nova VM. %s") % e raise exception.ManilaException(msg) return _untranslate_server_summary_view(server) @translate_server_exception def server_reboot(self, context, instance_id, soft_reboot=False): hardness = 'SOFT' if soft_reboot else 'HARD' novaclient(context).servers.reboot(instance_id, hardness) @translate_server_exception def instance_volume_attach(self, context, instance_id, volume_id, device=None): if device == 'auto': device = None return novaclient(context).volumes.create_server_volume(instance_id, volume_id, device) @translate_server_exception def instance_volume_detach(self, context, instance_id, att_id): return novaclient(context).volumes.delete_server_volume(instance_id, att_id) @translate_server_exception def instance_volumes_list(self, context, instance_id): volumes = novaclient(context).volumes.get_server_volumes(instance_id) # NOTE(pas-ha): Nova API 2.89 dropped 'id' field of the volume object, # so we use 'volumeId' field that is present in all API versions. return [volume.volumeId for volume in volumes] @translate_server_exception def server_update(self, context, instance_id, name): return _untranslate_server_summary_view( novaclient(context).servers.update(instance_id, name=name) ) def keypair_import(self, context, name, public_key): return novaclient(context).keypairs.create(name, public_key) def keypair_delete(self, context, keypair_id): novaclient(context).keypairs.delete(keypair_id) def keypair_list(self, context): return novaclient(context).keypairs.list() def add_security_group_to_server(self, context, server, security_group): return novaclient(context).servers.add_security_group(server, security_group) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/context.py0000664000175000017500000001307200000000000016360 0ustar00zuulzuul00000000000000# Copyright 2011 OpenStack LLC. # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """RequestContext: context for requests that persist through all of manila.""" import copy from oslo_context import context from oslo_db.sqlalchemy import enginefacade from oslo_utils import timeutils from manila.i18n import _ from manila import policy @enginefacade.transaction_context_provider class RequestContext(context.RequestContext): """Security context and request information. Represents the user taking a given action within the system. """ def __init__(self, user_id=None, project_id=None, is_admin=None, read_deleted="no", project_name=None, remote_address=None, timestamp=None, quota_class=None, service_catalog=None, **kwargs): """Initialize RequestContext. :param read_deleted: 'no' indicates deleted records are hidden, 'yes' indicates deleted records are visible, 'only' indicates that *only* deleted records are visible. :param kwargs: Extra arguments passed transparently to oslo_context.RequestContext. """ kwargs.setdefault('user_id', user_id) kwargs.setdefault('project_id', project_id) super().__init__(is_admin=is_admin, **kwargs) self.project_name = project_name if self.is_admin is None: self.is_admin = policy.check_is_admin(self) elif self.is_admin and 'admin' not in self.roles: self.roles.append('admin') # a "service" user's token will contain "service_roles" self.is_service = kwargs.get('service_roles') or False self.read_deleted = read_deleted self.remote_address = remote_address if not timestamp: timestamp = timeutils.utcnow() elif isinstance(timestamp, str): timestamp = timeutils.parse_isotime(timestamp) self.timestamp = timestamp self.quota_class = quota_class if service_catalog: self.service_catalog = [s for s in service_catalog if s.get('type') in ('compute', 'volume')] else: self.service_catalog = [] def _get_read_deleted(self): return self._read_deleted def _set_read_deleted(self, read_deleted): if read_deleted not in ('no', 'yes', 'only'): raise ValueError(_("read_deleted can only be one of 'no', " "'yes' or 'only', not %r") % read_deleted) self._read_deleted = read_deleted def _del_read_deleted(self): del self._read_deleted read_deleted = property(_get_read_deleted, _set_read_deleted, _del_read_deleted) def to_dict(self): values = super().to_dict() values['user_id'] = self.user_id values['project_id'] = self.project_id values['project_name'] = self.project_name values['domain_id'] = self.domain_id values['read_deleted'] = self.read_deleted values['remote_address'] = self.remote_address values['timestamp'] = self.timestamp.isoformat() values['quota_class'] = self.quota_class values['service_catalog'] = self.service_catalog values['request_id'] = self.request_id return values @classmethod def from_dict(cls, values): return super().from_dict( values, user_id=values.get('user_id'), project_id=values.get('project_id'), project_name=values.get('project_name'), domain_id=values.get('domain_id'), read_deleted=values.get('read_deleted', 'no'), remote_address=values.get('remote_address'), timestamp=values.get('timestamp'), quota_class=values.get('quota_class'), service_catalog=values.get('service_catalog'), request_id=values.get('request_id'), is_admin=values.get('is_admin'), roles=values.get('roles'), auth_token=values.get('auth_token'), user_domain_id=values.get('user_domain_id'), project_domain_id=values.get('project_domain_id') ) def elevated(self, read_deleted=None, overwrite=False): """Return a version of this context with admin flag set.""" ctx = copy.deepcopy(self) ctx.is_admin = True if 'admin' not in ctx.roles: ctx.roles.append('admin') if read_deleted is not None: ctx.read_deleted = read_deleted return ctx def to_policy_values(self): policy = super(RequestContext, self).to_policy_values() policy['is_admin'] = self.is_admin return policy def get_admin_context(read_deleted="no"): return RequestContext(user_id=None, project_id=None, is_admin=True, read_deleted=read_deleted, overwrite=False) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/coordination.py0000664000175000017500000001473100000000000017367 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tooz Coordination and locking utilities.""" import inspect import decorator from oslo_config import cfg from oslo_log import log from oslo_utils import uuidutils from tooz import coordination from tooz import locking from manila import exception from manila.i18n import _ LOG = log.getLogger(__name__) coordination_opts = [ cfg.StrOpt('backend_url', secret=True, default='file://$state_path', help='The back end URL to use for distributed coordination.') ] CONF = cfg.CONF CONF.register_opts(coordination_opts, group='coordination') class Coordinator(object): """Tooz coordination wrapper. Coordination member id is created from concatenated `prefix` and `agent_id` parameters. :param str agent_id: Agent identifier :param str prefix: Used to provide member identifier with a meaningful prefix. """ def __init__(self, agent_id=None, prefix=''): self.coordinator = None self.agent_id = agent_id or uuidutils.generate_uuid() self.started = False self.prefix = prefix def start(self): """Connect to coordination back end.""" if self.started: return # NOTE(gouthamr): Tooz expects member_id as a byte string. member_id = (self.prefix + self.agent_id).encode('ascii') self.coordinator = coordination.get_coordinator( cfg.CONF.coordination.backend_url, member_id) self.coordinator.start(start_heart=True) self.started = True def stop(self): """Disconnect from coordination back end.""" msg = 'Stopped Coordinator (Agent ID: %(agent)s, prefix: %(prefix)s)' msg_args = {'agent': self.agent_id, 'prefix': self.prefix} if self.started: self.coordinator.stop() self.coordinator = None self.started = False LOG.info(msg, msg_args) def get_lock(self, name): """Return a Tooz back end lock. :param str name: The lock name that is used to identify it across all nodes. """ # NOTE(gouthamr): Tooz expects lock name as a byte string lock_name = (self.prefix + name).encode('ascii') if self.started: return self.coordinator.get_lock(lock_name) else: raise exception.LockCreationFailed(_('Coordinator uninitialized.')) LOCK_COORDINATOR = Coordinator(prefix='manila-') class Lock(locking.Lock): """Lock with dynamic name. :param str lock_name: Lock name. :param dict lock_data: Data for lock name formatting. :param coordinator: Coordinator object to use when creating lock. Defaults to the global coordinator. Using it like so:: with Lock('mylock'): ... ensures that only one process at a time will execute code in context. Lock name can be formatted using Python format string syntax:: Lock('foo-{share.id}, {'share': ...,}') Available field names are keys of lock_data. """ def __init__(self, lock_name, lock_data=None, coordinator=None): super(Lock, self).__init__(str(id(self))) lock_data = lock_data or {} self.coordinator = coordinator or LOCK_COORDINATOR self.blocking = True self.lock = self._prepare_lock(lock_name, lock_data) def _prepare_lock(self, lock_name, lock_data): if not isinstance(lock_name, str): raise ValueError(_('Not a valid string: %s') % lock_name) return self.coordinator.get_lock(lock_name.format(**lock_data)) def acquire(self, blocking=None): """Attempts to acquire lock. :param blocking: If True, blocks until the lock is acquired. If False, returns right away. Otherwise, the value is used as a timeout value and the call returns maximum after this number of seconds. :return: returns true if acquired (false if not) :rtype: bool """ blocking = self.blocking if blocking is None else blocking return self.lock.acquire(blocking=blocking) def release(self): """Attempts to release lock. The behavior of releasing a lock which was not acquired in the first place is undefined. """ self.lock.release() def synchronized(lock_name, blocking=True, coordinator=None): """Synchronization decorator. :param str lock_name: Lock name. :param blocking: If True, blocks until the lock is acquired. If False, raises exception when not acquired. Otherwise, the value is used as a timeout value and if lock is not acquired after this number of seconds exception is raised. :param coordinator: Coordinator object to use when creating lock. Defaults to the global coordinator. :raises tooz.coordination.LockAcquireFailed: if lock is not acquired Decorating a method like so:: @synchronized('mylock') def foo(self, *args): ... ensures that only one process will execute the foo method at a time. Different methods can share the same lock:: @synchronized('mylock') def foo(self, *args): ... @synchronized('mylock') def bar(self, *args): ... This way only one of either foo or bar can be executing at a time. Lock name can be formatted using Python format string syntax:: @synchronized('{f_name}-{shr.id}-{snap[name]}') def foo(self, shr, snap): ... Available field names are: decorated function parameters and `f_name` as a decorated function name. """ @decorator.decorator def _synchronized(f, *a, **k): call_args = inspect.getcallargs(f, *a, **k) call_args['f_name'] = f.__name__ lock = Lock(lock_name, call_args, coordinator) with lock(blocking): LOG.debug('Lock "%(name)s" acquired by "%(function)s".', {'name': lock_name, 'function': f.__name__}) return f(*a, **k) return _synchronized ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315601.8496726 manila-21.0.0/manila/data/0000775000175000017500000000000000000000000015230 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/data/__init__.py0000664000175000017500000000000000000000000017327 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/data/backup_driver.py0000664000175000017500000000326400000000000020427 0ustar00zuulzuul00000000000000# Copyright 2023 Cloudification GmbH. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Base class for all backup drivers.""" class BackupDriver(object): def __init__(self): super(BackupDriver, self).__init__() # This flag indicates if backup driver implement backup, restore and # delete operation by its own or uses data manager. self.use_data_manager = True # This flag indicates if the backup driver supports out of place # restores to a share other then the source of a given backup. self.restore_to_target_support = False def backup(self, context, backup, share): """Start a backup of a specified share.""" return def restore(self, context, backup, share): """Restore a saved backup.""" return def delete(self, context, backup): """Delete a saved backup.""" return def get_backup_progress(self, context, backup, share): """Fetch the progress of a in progress backup""" return def get_restore_progress(self, context, backup, share): """Fetch the progress of a in progress restore""" return ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315601.8536725 manila-21.0.0/manila/data/drivers/0000775000175000017500000000000000000000000016706 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/data/drivers/__init__.py0000664000175000017500000000000000000000000021005 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/data/drivers/nfs.py0000664000175000017500000000533700000000000020056 0ustar00zuulzuul00000000000000# Copyright 2023 Cloudification GmbH. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Implementation of a backup service that uses NFS storage as the backend.""" from oslo_config import cfg from manila.data import backup_driver nfsbackup_service_opts = [ cfg.StrOpt('backup_mount_template', default='mount -vt %(proto)s %(options)s %(export)s %(path)s', help='The template for mounting NFS shares.'), cfg.StrOpt('backup_unmount_template', default='umount -v %(path)s', help='The template for unmounting NFS shares.'), cfg.StrOpt('backup_mount_export', help='NFS backup export location in hostname:path, ' 'ipv4addr:path, or "[ipv6addr]:path" format.'), cfg.StrOpt('backup_mount_proto', default='nfs', help='Mount Protocol for mounting NFS shares'), cfg.StrOpt('backup_mount_options', default='', help='Mount options passed to the NFS client. See NFS ' 'man page for details.'), ] CONF = cfg.CONF CONF.register_opts(nfsbackup_service_opts) class NFSBackupDriver(backup_driver.BackupDriver): """Provides backup, restore and delete using NFS supplied repository.""" def __init__(self): self.backup_mount_export = CONF.backup_mount_export self.backup_mount_template = CONF.backup_mount_template self.backup_unmount_template = CONF.backup_unmount_template self.backup_mount_options = CONF.backup_mount_options self.backup_mount_proto = CONF.backup_mount_proto super(NFSBackupDriver, self).__init__() def get_backup_info(self, backup): """Get backup info of a specified backup.""" mount_template = ( self.backup_mount_template % { 'proto': self.backup_mount_proto, 'options': self.backup_mount_options, 'export': self.backup_mount_export, 'path': '%(path)s', } ) unmount_template = self.backup_unmount_template backup_info = { 'mount': mount_template, 'unmount': unmount_template, } return backup_info ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/data/helper.py0000664000175000017500000003147400000000000017072 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Hitachi Data Systems. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Helper class for Data Service operations.""" import os from oslo_config import cfg from oslo_log import log from manila.common import constants from manila import exception from manila.i18n import _ from manila.share import access as access_manager from manila.share import rpcapi as share_rpc from manila import utils LOG = log.getLogger(__name__) data_helper_opts = [ cfg.IntOpt( 'data_access_wait_access_rules_timeout', default=180, help="Time to wait for access rules to be allowed/denied on backends " "when migrating a share (seconds)."), cfg.ListOpt('data_node_access_ips', default=[], help="A list of the IPs of the node interface connected to " "the admin network. Used for allowing access to the " "mounting shares. Default is []."), cfg.StrOpt( 'data_node_access_cert', help="The certificate installed in the data node in order to " "allow access to certificate authentication-based shares."), cfg.StrOpt( 'data_node_access_admin_user', help="The admin user name registered in the security service in order " "to allow access to user authentication-based shares."), cfg.DictOpt( 'data_node_mount_options', default={}, help="Mount options to be included in the mount command for share " "protocols. Use dictionary format, example: " "{'nfs': '-o nfsvers=3', 'cifs': '-o user=foo,pass=bar'}"), ] CONF = cfg.CONF CONF.register_opts(data_helper_opts) class DataServiceHelper(object): def __init__(self, context, db, share): self.db = db self.share = share self.context = context self.share_rpc = share_rpc.ShareAPI() self.access_helper = access_manager.ShareInstanceAccess(self.db, None) self.wait_access_rules_timeout = ( CONF.data_access_wait_access_rules_timeout) def deny_access_to_data_service(self, access_ref_list, share_instance): self._change_data_access_to_instance( share_instance, access_ref_list, deny=True) # NOTE(ganso): Cleanup methods do not throw exceptions, since the # exceptions that should be thrown are the ones that call the cleanup def cleanup_data_access(self, access_ref_list, share_instance): try: self.deny_access_to_data_service( access_ref_list, share_instance) except Exception: LOG.warning("Could not cleanup access rule of share %s.", self.share['id']) def cleanup_temp_folder(self, mount_path, instance_id): try: path = os.path.join(mount_path, instance_id) if os.path.exists(path): os.rmdir(path) self._check_dir_not_exists(path) except Exception: LOG.warning("Could not cleanup instance %(instance_id)s " "temporary folders for data copy of " "share %(share_id)s.", { 'instance_id': instance_id, 'share_id': self.share['id']}) def cleanup_unmount_temp_folder(self, unmount_info, mount_path): share_instance_id = unmount_info.get('share_instance_id') try: self.unmount_share_instance_or_backup(unmount_info, mount_path) except Exception: LOG.warning("Could not unmount folder of instance" " %(instance_id)s for data copy of " "share %(share_id)s.", { 'instance_id': share_instance_id, 'share_id': self.share['id']}) def _change_data_access_to_instance( self, instance, accesses=None, deny=False): self.access_helper.get_and_update_share_instance_access_rules_status( self.context, status=constants.SHARE_INSTANCE_RULES_SYNCING, share_instance_id=instance['id']) if deny: if accesses is None: accesses = [] else: if not isinstance(accesses, list): accesses = [accesses] access_filters = {'access_id': [a['id'] for a in accesses]} updates = {'state': constants.ACCESS_STATE_QUEUED_TO_DENY} self.access_helper.get_and_update_share_instance_access_rules( self.context, filters=access_filters, updates=updates, share_instance_id=instance['id']) self.share_rpc.update_access(self.context, instance) utils.wait_for_access_update( self.context, self.db, instance, self.wait_access_rules_timeout) def allow_access_to_data_service( self, share_instance, connection_info_src, dest_share_instance=None, connection_info_dest=None): allow_access_to_destination_instance = (dest_share_instance and connection_info_dest) # NOTE(ganso): intersect the access type compatible with both instances if allow_access_to_destination_instance: access_mapping = {} for a_type, protocols in ( connection_info_src['access_mapping'].items()): for proto in protocols: if (a_type in connection_info_dest['access_mapping'] and proto in connection_info_dest['access_mapping'][a_type]): access_mapping[a_type] = access_mapping.get(a_type, []) access_mapping[a_type].append(proto) else: access_mapping = connection_info_src['access_mapping'] access_list = self._get_access_entries_according_to_mapping( access_mapping) access_ref_list = [] for access in access_list: values = { 'share_id': self.share['id'], 'access_type': access['access_type'], 'access_level': access['access_level'], 'access_to': access['access_to'], } # Check if the rule being added already exists. If so, we will # remove it to prevent conflicts old_access_list = self.db.share_access_get_all_by_type_and_access( self.context, self.share['id'], access['access_type'], access['access_to']) if old_access_list: self._change_data_access_to_instance( share_instance, old_access_list, deny=True) access_ref = self.db.share_instance_access_create( self.context, values, share_instance['id']) self._change_data_access_to_instance(share_instance) if allow_access_to_destination_instance: access_ref = self.db.share_instance_access_create( self.context, values, dest_share_instance['id']) self._change_data_access_to_instance(dest_share_instance) # The access rule ref used here is a regular Share Access Map, # instead of a Share Instance Access Map. access_ref_list.append(access_ref) return access_ref_list def _get_access_entries_according_to_mapping(self, access_mapping): access_list = [] # NOTE(ganso): protocol is not relevant here because we previously # used it to filter the access types we are interested in for access_type, protocols in access_mapping.items(): access_to_list = [] if access_type.lower() == 'cert' and CONF.data_node_access_cert: access_to_list.append(CONF.data_node_access_cert) elif access_type.lower() == 'ip': ips = CONF.data_node_access_ips if ips: if not isinstance(ips, list): ips = [ips] access_to_list.extend(ips) elif (access_type.lower() == 'user' and CONF.data_node_access_admin_user): access_to_list.append(CONF.data_node_access_admin_user) else: msg = _("Unsupported access type provided: %s.") % access_type raise exception.ShareDataCopyFailed(reason=msg) if not access_to_list: msg = _("Configuration for Data node mounting access type %s " "has not been set.") % access_type raise exception.ShareDataCopyFailed(reason=msg) for access_to in access_to_list: access = { 'access_type': access_type, 'access_level': constants.ACCESS_LEVEL_RW, 'access_to': access_to, } access_list.append(access) return access_list @utils.retry(retry_param=exception.NotFound, interval=1, retries=10, backoff_rate=1) def _check_dir_exists(self, path): if not os.path.exists(path): raise exception.NotFound("Folder %s could not be found." % path) @utils.retry(retry_param=exception.Found, interval=1, retries=10, backoff_rate=1) def _check_dir_not_exists(self, path): if os.path.exists(path): raise exception.Found("Folder %s was found." % path) def mount_share_instance_or_backup(self, mount_info, mount_path): mount_point = mount_info.get('mount_point') mount_template = mount_info.get('mount') share_instance_id = mount_info.get('share_instance_id') backup = mount_info.get('backup') restore = mount_info.get('restore') backup_id = mount_info.get('backup_id') if share_instance_id: path = os.path.join(mount_path, share_instance_id) else: path = '' # overwrite path in case different mount point is explicitly provided if mount_point and mount_point != path: path = mount_point if share_instance_id: share_instance = self.db.share_instance_get( self.context, share_instance_id, with_share_data=True) options = CONF.data_node_mount_options options = {k.lower(): v for k, v in options.items()} proto_options = options.get( share_instance['share_proto'].lower(), '') else: # For backup proto_options are included in mount_template proto_options = '' if not os.path.exists(path): os.makedirs(path) self._check_dir_exists(path) mount_command = mount_template % {'path': path, 'options': proto_options} utils.execute(*(mount_command.split()), run_as_root=True) if backup: # we create new folder, which named with backup_id. To distinguish # different backup data at mount points backup_folder = os.path.join(path, backup_id) if not os.path.exists(backup_folder): os.makedirs(backup_folder) self._check_dir_exists(backup_folder) if restore: # backup_folder should exist after mount, else backup is # already deleted backup_folder = os.path.join(path, backup_id) if not os.path.exists(backup_folder): raise exception.ShareBackupNotFound(backup_id=backup_id) def unmount_share_instance_or_backup(self, unmount_info, mount_path): mount_point = unmount_info.get('mount_point') unmount_template = unmount_info.get('unmount') share_instance_id = unmount_info.get('share_instance_id') if share_instance_id: path = os.path.join(mount_path, share_instance_id) else: path = '' # overwrite path in case different mount point is explicitly provided if mount_point and mount_point != path: path = mount_point unmount_command = unmount_template % {'path': path} utils.execute(*(unmount_command.split()), run_as_root=True) try: if os.path.exists(path): os.rmdir(path) self._check_dir_not_exists(path) except Exception: LOG.warning("Folder %s could not be removed.", path) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/data/manager.py0000664000175000017500000010216300000000000017217 0ustar00zuulzuul00000000000000# Copyright 2015, Hitachi Data Systems. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Data Service """ import os import shutil from oslo_config import cfg from oslo_config import types from oslo_log import log from oslo_service import periodic_task from oslo_utils import excutils from oslo_utils import importutils from manila.common import constants from manila import context from manila.data import helper from manila.data import utils as data_utils from manila import exception from manila import manager from manila import quota from manila.share import rpcapi as share_rpc from manila import utils QUOTAS = quota.QUOTAS from manila.i18n import _ LOG = log.getLogger(__name__) backup_opts = [ cfg.StrOpt( 'backup_driver', default='manila.data.drivers.nfs.NFSBackupDriver', help='Driver to use for backups.'), cfg.StrOpt( 'backup_share_mount_template', default='mount -vt %(proto)s %(options)s %(export)s %(path)s', help="The template for mounting shares during backup. Must specify " "the executable with all necessary parameters for the protocol " "supported. 'proto' template element may not be required if " "included in the command. 'export' and 'path' template elements " "are required. It is advisable to separate different commands " "per backend."), cfg.StrOpt( 'backup_share_unmount_template', default='umount -v %(path)s', help="The template for unmounting shares during backup. Must " "specify the executable with all necessary parameters for the " "protocol supported. 'path' template element is required. It is " "advisable to separate different commands per backend."), cfg.ListOpt( 'backup_ignore_files', default=['lost+found'], help="List of files and folders to be ignored when backing up " "shares. Items should be names (not including any path)."), cfg.Opt( 'backup_protocol_access_mapping', type=types.Dict(types.List(types.String(), bounds=True)), default={'ip': ['nfs']}, help="Protocol access mapping for backup. Should be a " "dictionary comprised of " "{'access_type1': ['share_proto1', 'share_proto2']," " 'access_type2': ['share_proto2', 'share_proto3']}."), ] data_opts = [ cfg.StrOpt( 'mount_tmp_location', default='/tmp/', help="Temporary path to create and mount shares during migration."), cfg.StrOpt( 'backup_mount_tmp_location', default='/tmp/', help="Temporary path to create and mount backup during share backup."), cfg.BoolOpt( 'check_hash', default=False, help="Chooses whether hash of each file should be checked on data " "copying."), cfg.IntOpt( 'backup_continue_update_interval', default=10, help='This value, specified in seconds, determines how often ' 'the data manager will poll to perform the next steps of ' 'backup such as fetch the progress of backup.'), cfg.IntOpt( 'restore_continue_update_interval', default=10, help='This value, specified in seconds, determines how often ' 'the data manager will poll to perform the next steps of ' 'restore such as fetch the progress of restore.') ] CONF = cfg.CONF CONF.register_opts(data_opts) CONF.register_opts(backup_opts) class DataManager(manager.Manager): """Receives requests to handle data and sends responses.""" RPC_API_VERSION = '1.1' def __init__(self, service_name=None, *args, **kwargs): super(DataManager, self).__init__(*args, **kwargs) self.backup_driver = importutils.import_object(CONF.backup_driver) self.busy_tasks_shares = {} self.service_id = None def init_host(self, service_id=None): self.service_id = service_id ctxt = context.get_admin_context() shares = self.db.share_get_all(ctxt) for share in shares: if share['task_state'] in constants.BUSY_COPYING_STATES: self.db.share_update( ctxt, share['id'], {'task_state': constants.TASK_STATE_DATA_COPYING_ERROR}) def migration_start(self, context, ignore_list, share_id, share_instance_id, dest_share_instance_id, connection_info_src, connection_info_dest): LOG.debug( "Received request to migrate share content from share instance " "%(instance_id)s to instance %(dest_instance_id)s.", {'instance_id': share_instance_id, 'dest_instance_id': dest_share_instance_id}) share_ref = self.db.share_get(context, share_id) share_instance_ref = self.db.share_instance_get( context, share_instance_id, with_share_data=True) share_rpcapi = share_rpc.ShareAPI() mount_path = CONF.mount_tmp_location try: copy = data_utils.Copy( os.path.join(mount_path, share_instance_id), os.path.join(mount_path, dest_share_instance_id), ignore_list, CONF.check_hash) info_src = { 'share_id': share_ref['id'], 'share_instance_id': share_instance_id, 'mount': connection_info_src['mount'], 'unmount': connection_info_src['unmount'], 'access_mapping': connection_info_src.get( 'access_mapping', {}), 'mount_point': os.path.join(mount_path, share_instance_id), } info_dest = { 'share_id': None, 'share_instance_id': dest_share_instance_id, 'mount': connection_info_dest['mount'], 'unmount': connection_info_dest['unmount'], 'access_mapping': connection_info_dest.get( 'access_mapping', {}), 'mount_point': os.path.join(mount_path, dest_share_instance_id), } self._copy_share_data(context, copy, info_src, info_dest) except exception.ShareDataCopyCancelled: share_rpcapi.migration_complete( context, share_instance_ref, dest_share_instance_id) return except Exception: self.db.share_update( context, share_id, {'task_state': constants.TASK_STATE_DATA_COPYING_ERROR}) msg = _("Failed to copy contents from instance %(src)s to " "instance %(dest)s.") % {'src': share_instance_id, 'dest': dest_share_instance_id} LOG.exception(msg) share_rpcapi.migration_complete( context, share_instance_ref, dest_share_instance_id) raise exception.ShareDataCopyFailed(reason=msg) finally: self.busy_tasks_shares.pop(share_id, None) LOG.info( "Completed copy operation of migrating share content from share " "instance %(instance_id)s to instance %(dest_instance_id)s.", {'instance_id': share_instance_id, 'dest_instance_id': dest_share_instance_id}) def data_copy_cancel(self, context, share_id): LOG.debug("Received request to cancel data copy " "of share %s.", share_id) copy = self.busy_tasks_shares.get(share_id) if copy: copy.cancel() else: msg = _("Data copy for migration of share %s cannot be cancelled" " at this moment.") % share_id LOG.error(msg) raise exception.InvalidShare(reason=msg) def data_copy_get_progress(self, context, share_id): LOG.debug("Received request to get data copy information " "of share %s.", share_id) copy = self.busy_tasks_shares.get(share_id) if copy: result = copy.get_progress() LOG.info("Obtained following data copy information " "of share %(share)s: %(info)s.", {'share': share_id, 'info': result}) return result else: msg = _("Migration of share %s data copy progress cannot be " "obtained at this moment.") % share_id LOG.error(msg) raise exception.InvalidShare(reason=msg) def _copy_share_data(self, context, copy, info_src, info_dest): """Copy share data between source and destination. e.g. During migration source and destination both are shares and during backup create, destination is backup location while during backup restore, source is backup location. 1. Mount source and destination. Create access rules. 2. Perform copy 3. Unmount source and destination. Cleanup access rules. """ mount_path = CONF.mount_tmp_location if info_src.get('share_id'): share_id = info_src['share_id'] elif info_dest.get('share_id'): share_id = info_dest['share_id'] else: msg = _("Share data copy failed because of undefined share.") LOG.exception(msg) raise exception.ShareDataCopyFailed(reason=msg) share_instance_src = None share_instance_dest = None if info_src['share_instance_id']: share_instance_src = self.db.share_instance_get( context, info_src['share_instance_id'], with_share_data=True) if info_dest['share_instance_id']: share_instance_dest = self.db.share_instance_get( context, info_dest['share_instance_id'], with_share_data=True) share = self.db.share_get(context, share_id) self.db.share_update( context, share['id'], {'task_state': constants.TASK_STATE_DATA_COPYING_STARTING}) helper_src = helper.DataServiceHelper(context, self.db, share) helper_dest = helper_src if share_instance_src: access_ref_src = helper_src.allow_access_to_data_service( share_instance_src, info_src, share_instance_dest, info_dest) access_ref_dest = access_ref_src elif share_instance_dest: access_ref_src = helper_src.allow_access_to_data_service( share_instance_dest, info_dest, share_instance_src, info_src) access_ref_dest = access_ref_src def _call_cleanups(items): for item in items: if 'unmount_src' == item: helper_src.cleanup_unmount_temp_folder( info_src, mount_path) elif 'temp_folder_src' == item: helper_src.cleanup_temp_folder( mount_path, info_src['share_instance_id']) elif 'temp_folder_dest' == item: helper_dest.cleanup_temp_folder( mount_path, info_dest['share_instance_id']) elif 'access_src' == item and share_instance_src: helper_src.cleanup_data_access( access_ref_src, share_instance_src) elif 'access_dest' == item and share_instance_dest: helper_dest.cleanup_data_access( access_ref_dest, share_instance_dest) try: helper_src.mount_share_instance_or_backup(info_src, mount_path) except Exception: msg = _("Share data copy failed attempting to mount source " "at %s.") % info_src['mount_point'] LOG.exception(msg) _call_cleanups(['temp_folder_src', 'access_dest', 'access_src']) raise exception.ShareDataCopyFailed(reason=msg) try: helper_dest.mount_share_instance_or_backup(info_dest, mount_path) except Exception: msg = _("Share data copy failed attempting to mount destination " "at %s.") % info_dest['mount_point'] LOG.exception(msg) _call_cleanups(['temp_folder_dest', 'unmount_src', 'temp_folder_src', 'access_dest', 'access_src']) raise exception.ShareDataCopyFailed(reason=msg) self.busy_tasks_shares[share['id']] = copy self.db.share_update( context, share['id'], {'task_state': constants.TASK_STATE_DATA_COPYING_IN_PROGRESS}) copied = False try: copy.run() self.db.share_update( context, share['id'], {'task_state': constants.TASK_STATE_DATA_COPYING_COMPLETING}) if copy.get_progress()['total_progress'] == 100: copied = True except Exception: LOG.exception("Failed to copy data from source to destination " "%(src)s to %(dest)s.", {'src': info_src['mount_point'], 'dest': info_dest['mount_point']}) try: helper_src.unmount_share_instance_or_backup(info_src, mount_path) except Exception: LOG.exception("Could not unmount src %s after its data copy.", info_src['mount_point']) try: helper_dest.unmount_share_instance_or_backup(info_dest, mount_path) except Exception: LOG.exception("Could not unmount dest %s after its data copy.", info_dest['mount_point']) try: if info_src['share_instance_id']: helper_src.deny_access_to_data_service(access_ref_src, share_instance_src) except Exception: LOG.exception("Could not deny access to src instance %s after " "its data copy.", info_src['share_instance_id']) try: if info_dest['share_instance_id']: helper_dest.deny_access_to_data_service(access_ref_dest, share_instance_dest) except Exception: LOG.exception("Could not deny access to dest instance %s after " "its data copy.", info_dest['share_instance_id']) if copy and copy.cancelled: self.db.share_update( context, share['id'], {'task_state': constants.TASK_STATE_DATA_COPYING_CANCELLED}) LOG.warning("Copy of data from source " "%(src)s to destination %(dest)s was cancelled.", {'src': info_src['mount_point'], 'dest': info_dest['mount_point']}) raise exception.ShareDataCopyCancelled() elif not copied: msg = _("Copying data from source %(src)s " "to destination %(dest)s did not succeed.") % ( {'src': info_src['mount_point'], 'dest': info_dest['mount_point']}) raise exception.ShareDataCopyFailed(reason=msg) self.db.share_update( context, share['id'], {'task_state': constants.TASK_STATE_DATA_COPYING_COMPLETED}) LOG.debug("Copy of data from source %(src)s to destination " "%(dest)s was successful.", { 'src': info_src['mount_point'], 'dest': info_dest['mount_point']}) def create_backup(self, context, backup): share_id = backup['share_id'] backup_id = backup['id'] share = self.db.share_get(context, share_id) backup = self.db.share_backup_get(context, backup_id) self.db.share_backup_update(context, backup_id, {'host': self.host}) LOG.info('Create backup started, backup: %(backup_id)s ' 'share: %(share_id)s.', {'backup_id': backup_id, 'share_id': share_id}) try: if self.backup_driver.use_data_manager is False: self.backup_driver.backup(context, backup, share) else: self._run_backup(context, backup, share) except Exception as err: with excutils.save_and_reraise_exception(): LOG.error("Failed to create share backup %s by data driver.", backup['id']) self.db.share_update( context, share_id, {'status': constants.STATUS_AVAILABLE}) self.db.share_backup_update( context, backup_id, {'status': constants.STATUS_ERROR, 'fail_reason': err}) self.db.share_update( context, share_id, {'status': constants.STATUS_AVAILABLE}) self.db.share_backup_update( context, backup_id, {'status': constants.STATUS_AVAILABLE, 'progress': '100'}) LOG.info("Created share backup %s successfully.", backup_id) @periodic_task.periodic_task( spacing=CONF.backup_continue_update_interval) def create_backup_continue(self, context): filters = { 'status': constants.STATUS_CREATING, 'host': self.host, 'topic': CONF.data_topic } backups = self.db.share_backups_get_all(context, filters) for backup in backups: backup_id = backup['id'] share_id = backup['share_id'] share = self.db.share_get(context, share_id) result = {} try: if self.backup_driver.use_data_manager is False: progress = self.backup_driver.get_backup_progress( context, backup, share) else: result = self.data_copy_get_progress(context, share_id) progress = result.get('total_progress', '0') backup_values = {'progress': progress} if progress == '100': self.db.share_update( context, share_id, {'status': constants.STATUS_AVAILABLE}) backup_values.update( {'status': constants.STATUS_AVAILABLE}) LOG.info("Created share backup %s successfully.", backup_id) self.db.share_backup_update( context, backup_id, backup_values) except Exception: LOG.warning("Failed to get progress of share %(share)s " "backing up in share_backup %(backup).", {'share': share_id, 'backup': backup_id}) self.db.share_update( context, share_id, {'status': constants.STATUS_AVAILABLE}) self.db.share_backup_update( context, backup_id, {'status': constants.STATUS_ERROR, 'progress': '0'}) def _get_share_mount_info(self, share_instance): mount_template = CONF.backup_share_mount_template path = next((x['path'] for x in share_instance['export_locations'] if x['is_admin_only']), None) if not path: path = share_instance['export_locations'][0]['path'] format_args = { 'proto': share_instance['share_proto'].lower(), 'export': path, 'path': '%(path)s', 'options': '%(options)s', } unmount_template = CONF.backup_share_unmount_template mount_info = { 'mount': mount_template % format_args, 'unmount': unmount_template, } return mount_info def _get_backup_access_mapping(self, share): mapping = CONF.backup_protocol_access_mapping result = {} share_proto = share['share_proto'].lower() for access_type, protocols in mapping.items(): if share_proto in [y.lower() for y in protocols]: result[access_type] = result.get(access_type, []) result[access_type].append(share_proto) return result def _run_backup(self, context, backup, share): share_instance_id = share.instance.get('id') share_instance = self.db.share_instance_get( context, share_instance_id, with_share_data=True) access_mapping = self._get_backup_access_mapping(share) ignore_list = CONF.backup_ignore_files mount_path = CONF.mount_tmp_location backup_mount_path = CONF.backup_mount_tmp_location mount_info = self._get_share_mount_info(share_instance) dest_backup_info = self.backup_driver.get_backup_info(backup) dest_backup_mount_point = os.path.join(backup_mount_path, backup['id']) backup_folder = os.path.join(dest_backup_mount_point, backup['id']) try: copy = data_utils.Copy( os.path.join(mount_path, share_instance_id), backup_folder, ignore_list) info_src = { 'share_id': share['id'], 'share_instance_id': share_instance_id, 'mount': mount_info['mount'], 'unmount': mount_info['unmount'], 'mount_point': os.path.join(mount_path, share_instance_id), 'access_mapping': access_mapping } info_dest = { 'share_id': None, 'share_instance_id': None, 'backup': True, 'backup_id': backup['id'], 'mount': dest_backup_info['mount'], 'unmount': dest_backup_info['unmount'], 'mount_point': dest_backup_mount_point, 'access_mapping': access_mapping } self._copy_share_data(context, copy, info_src, info_dest) self.db.share_update(context, share['id'], {'task_state': None}) except Exception: self.db.share_update( context, share['id'], {'task_state': constants.TASK_STATE_DATA_COPYING_ERROR}) msg = _("Failed to copy contents from share %(src)s to " "backup %(dest)s.") % ( {'src': share_instance_id, 'dest': backup['id']}) LOG.exception(msg) raise exception.ShareDataCopyFailed(reason=msg) finally: self.busy_tasks_shares.pop(share['id'], None) def delete_backup(self, context, backup): backup_id = backup['id'] LOG.info('Delete backup started, backup: %s.', backup_id) backup = self.db.share_backup_get(context, backup_id) try: if self.backup_driver.use_data_manager is False: self.backup_driver.delete(context, backup) else: dest_backup_info = self.backup_driver.get_backup_info(backup) backup_mount_path = CONF.backup_mount_tmp_location mount_point = os.path.join(backup_mount_path, backup['id']) backup_folder = os.path.join(mount_point, backup['id']) if not os.path.exists(backup_folder): os.makedirs(backup_folder) if not os.path.exists(backup_folder): raise exception.NotFound("Path %s could not be " "found." % backup_folder) mount_template = dest_backup_info['mount'] unmount_template = dest_backup_info['unmount'] mount_command = mount_template % {'path': mount_point} unmount_command = unmount_template % {'path': mount_point} utils.execute(*(mount_command.split()), run_as_root=True) # backup_folder should exist after mount, else backup is # already deleted if os.path.exists(backup_folder): for filename in os.listdir(backup_folder): if filename in CONF.backup_ignore_files: continue file_path = os.path.join(backup_folder, filename) try: if (os.path.isfile(file_path) or os.path.islink(file_path)): os.unlink(file_path) elif os.path.isdir(file_path): shutil.rmtree(file_path) except Exception as e: LOG.debug("Failed to delete %(file_path)s. Reason:" " %(err)s", {'file_path': file_path, 'err': e}) shutil.rmtree(backup_folder) utils.execute(*(unmount_command.split()), run_as_root=True) except Exception: with excutils.save_and_reraise_exception(): LOG.error("Failed to delete share backup %s.", backup['id']) self.db.share_backup_update( context, backup['id'], {'status': constants.STATUS_ERROR_DELETING}) try: reserve_opts = { 'backups': -1, 'backup_gigabytes': -backup['size'], } reservations = QUOTAS.reserve( context, project_id=backup['project_id'], **reserve_opts) except Exception as e: reservations = None LOG.warning("Failed to update backup quota for %(pid)s: %(err)s.", {'pid': backup['project_id'], 'err': e}) raise if reservations: QUOTAS.commit(context, reservations, project_id=backup['project_id']) self.db.share_backup_delete(context, backup_id) LOG.info("Share backup %s deleted successfully.", backup_id) def restore_backup(self, context, backup, share_id): backup_id = backup['id'] LOG.info('Restore backup started, backup: %(backup_id)s ' 'share: %(share_id)s.', {'backup_id': backup['id'], 'share_id': share_id}) share = self.db.share_get(context, share_id) backup = self.db.share_backup_get(context, backup_id) try: if (self.backup_driver.restore_to_target_support is False and share['id'] != backup['share_id']): msg = _("Cannot restore backup %(backup)s to target share " "%(share)s as backup driver does not provide support " " for targeted restores") % ( {'backup': backup['id'], 'share': share['id']} ) LOG.exception(msg) raise exception.BackupException(reason=msg) if self.backup_driver.use_data_manager is False: self.backup_driver.restore(context, backup, share) else: self._run_restore(context, backup, share) except Exception: with excutils.save_and_reraise_exception(): LOG.error("Failed to restore backup %(backup)s to share " "%(share)s by data driver.", {'backup': backup['id'], 'share': share_id}) self.db.share_update( context, share_id, {'status': constants.STATUS_BACKUP_RESTORING_ERROR}) self.db.share_backup_update( context, backup_id, {'status': constants.STATUS_AVAILABLE}) self.db.share_update( context, share_id, {'status': constants.STATUS_AVAILABLE}) self.db.share_backup_update( context, backup_id, {'status': constants.STATUS_AVAILABLE, 'restore_progress': '100'}) LOG.info("Share backup %s restored successfully.", backup_id) @periodic_task.periodic_task( spacing=CONF.restore_continue_update_interval) def restore_backup_continue(self, context): filters = { 'status': constants.STATUS_RESTORING, 'host': self.host, 'topic': CONF.data_topic } backups = self.db.share_backups_get_all(context, filters) for backup in backups: backup_id = backup['id'] try: filters = { 'source_backup_id': backup_id, } shares = self.db.share_get_all(context, filters) except Exception: LOG.warning('Failed to get shares for backup %s', backup_id) continue for share in shares: if share['status'] != constants.STATUS_BACKUP_RESTORING: continue share_id = share['id'] result = {} try: if self.backup_driver.use_data_manager is False: progress = self.backup_driver.get_restore_progress( context, backup, share) else: result = self.data_copy_get_progress(context, share_id) progress = result.get('total_progress', '0') backup_values = {'restore_progress': progress} if progress == '100': self.db.share_update( context, share_id, {'status': constants.STATUS_AVAILABLE}) backup_values.update( {'status': constants.STATUS_AVAILABLE}) LOG.info("Share backup %s restored successfully.", backup_id) self.db.share_backup_update(context, backup_id, backup_values) except Exception: LOG.exception("Failed to get progress of share_backup " "%(backup)s restoring in share %(share).", {'share': share_id, 'backup': backup_id}) self.db.share_update( context, share_id, {'status': constants.STATUS_BACKUP_RESTORING_ERROR}) self.db.share_backup_update( context, backup_id, {'status': constants.STATUS_AVAILABLE, 'restore_progress': '0'}) def _run_restore(self, context, backup, share): share_instance_id = share.instance.get('id') share_instance = self.db.share_instance_get( context, share_instance_id, with_share_data=True) access_mapping = self._get_backup_access_mapping(share) mount_path = CONF.mount_tmp_location backup_mount_path = CONF.backup_mount_tmp_location ignore_list = CONF.backup_ignore_files mount_info = self._get_share_mount_info(share_instance) src_backup_info = self.backup_driver.get_backup_info(backup) src_backup_mount_point = os.path.join(backup_mount_path, backup['id']) backup_folder = os.path.join(src_backup_mount_point, backup['id']) try: copy = data_utils.Copy( backup_folder, os.path.join(mount_path, share_instance_id), ignore_list) info_src = { 'share_id': None, 'share_instance_id': None, 'restore': True, 'backup_id': backup['id'], 'mount': src_backup_info['mount'], 'unmount': src_backup_info['unmount'], 'mount_point': src_backup_mount_point, 'access_mapping': access_mapping } info_dest = { 'share_id': share['id'], 'share_instance_id': share_instance_id, 'mount': mount_info['mount'], 'unmount': mount_info['unmount'], 'mount_point': os.path.join(mount_path, share_instance_id), 'access_mapping': access_mapping } self._copy_share_data(context, copy, info_src, info_dest) self.db.share_update(context, share['id'], {'task_state': None}) except Exception: self.db.share_update( context, share['id'], {'task_state': constants.TASK_STATE_DATA_COPYING_ERROR}) msg = _("Failed to copy/restore contents from backup %(src)s " "to share %(dest)s.") % ( {'src': backup['id'], 'dest': share_instance_id}) LOG.exception(msg) raise exception.ShareDataCopyFailed(reason=msg) finally: self.busy_tasks_shares.pop(share['id'], None) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/data/rpcapi.py0000664000175000017500000000567500000000000017075 0ustar00zuulzuul00000000000000# Copyright 2015, Hitachi Data Systems. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Client side of the data manager RPC API. """ from oslo_config import cfg import oslo_messaging as messaging from manila import rpc CONF = cfg.CONF class DataAPI(object): """Client side of the data RPC API. API version history: 1.0 - Initial version, Add migration_start(), data_copy_cancel(), data_copy_get_progress() 1.1 - create_backup(), delete_backup(), restore_backup() """ BASE_RPC_API_VERSION = '1.0' def __init__(self): super(DataAPI, self).__init__() target = messaging.Target(topic=CONF.data_topic, version=self.BASE_RPC_API_VERSION) self.client = rpc.get_client(target, version_cap='1.1') def migration_start(self, context, share_id, ignore_list, share_instance_id, dest_share_instance_id, connection_info_src, connection_info_dest): call_context = self.client.prepare(version='1.0') call_context.cast( context, 'migration_start', share_id=share_id, ignore_list=ignore_list, share_instance_id=share_instance_id, dest_share_instance_id=dest_share_instance_id, connection_info_src=connection_info_src, connection_info_dest=connection_info_dest) def data_copy_cancel(self, context, share_id): call_context = self.client.prepare(version='1.0') call_context.call(context, 'data_copy_cancel', share_id=share_id) def data_copy_get_progress(self, context, share_id): call_context = self.client.prepare(version='1.0') return call_context.call(context, 'data_copy_get_progress', share_id=share_id) def create_backup(self, context, backup): call_context = self.client.prepare(version='1.1') call_context.cast(context, 'create_backup', backup=backup) def delete_backup(self, context, backup): call_context = self.client.prepare(version='1.1') call_context.cast(context, 'delete_backup', backup=backup) def restore_backup(self, context, backup, share_id): call_context = self.client.prepare(version='1.1') call_context.cast(context, 'restore_backup', backup=backup, share_id=share_id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/data/utils.py0000664000175000017500000001460400000000000016747 0ustar00zuulzuul00000000000000# Copyright 2015, Hitachi Data Systems. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from oslo_log import log from manila import exception from manila.i18n import _ from manila import utils LOG = log.getLogger(__name__) class Copy(object): def __init__(self, src, dest, ignore_list, check_hash=False): self.src = src self.dest = dest self.total_size = 0 self.current_size = 0 self.files = [] self.dirs = [] self.current_copy = None self.ignore_list = ignore_list self.cancelled = False self.initialized = False self.completed = False self.check_hash = check_hash def get_progress(self): # Empty share or empty contents if self.completed and self.total_size == 0: return {'total_progress': 100} if not self.initialized or self.current_copy is None: return {'total_progress': 0} try: size, err = utils.execute("stat", "-c", "%s", self.current_copy['file_path'], run_as_root=True) size = int(size) except utils.processutils.ProcessExecutionError: size = 0 current_file_progress = 0 if self.current_copy['size'] > 0: current_file_progress = size * 100 / self.current_copy['size'] current_file_path = self.current_copy['file_path'] total_progress = 0 if self.total_size > 0: if current_file_progress == 100: size = 0 total_progress = int((self.current_size + size) * 100 / self.total_size) progress = { 'total_progress': total_progress, 'current_file_path': current_file_path, 'current_file_progress': current_file_progress } return progress def cancel(self): self.cancelled = True def run(self): self.get_total_size(self.src) self.initialized = True self.copy_data(self.src) self.copy_stats(self.src) self.completed = True LOG.info(self.get_progress()) def get_total_size(self, path): if self.cancelled: return out, err = utils.execute( "ls", "-pA1", "--group-directories-first", path, run_as_root=True) for line in out.split('\n'): if self.cancelled: return if len(line) == 0: continue src_item = os.path.join(path, line) if line[-1] == '/': if line[0:-1] in self.ignore_list: continue self.get_total_size(src_item) else: if line in self.ignore_list: continue size, err = utils.execute("stat", "-c", "%s", src_item, run_as_root=True) self.total_size += int(size) def copy_data(self, path): if self.cancelled: return out, err = utils.execute( "ls", "-pA1", "--group-directories-first", path, run_as_root=True) for line in out.split('\n'): if self.cancelled: return if len(line) == 0: continue src_item = os.path.join(path, line) dest_item = src_item.replace(self.src, self.dest) if line[-1] == '/': if line[0:-1] in self.ignore_list: continue utils.execute("mkdir", "-p", dest_item, run_as_root=True) self.copy_data(src_item) else: if line in self.ignore_list: continue size, err = utils.execute("stat", "-c", "%s", src_item, run_as_root=True) self.current_copy = {'file_path': dest_item, 'size': int(size)} self._copy_and_validate(src_item, dest_item) self.current_size += int(size) LOG.info(self.get_progress()) @utils.retry(retry_param=exception.ShareDataCopyFailed, retries=2) def _copy_and_validate(self, src_item, dest_item): utils.execute("cp", "-P", "--preserve=all", src_item, dest_item, run_as_root=True) if self.check_hash: _validate_item(src_item, dest_item) def copy_stats(self, path): if self.cancelled: return out, err = utils.execute( "ls", "-pA1", "--group-directories-first", path, run_as_root=True) for line in out.split('\n'): if self.cancelled: return if len(line) == 0: continue src_item = os.path.join(path, line) dest_item = src_item.replace(self.src, self.dest) # NOTE(ganso): Should re-apply attributes for folders. if line[-1] == '/': if line[0:-1] in self.ignore_list: continue self.copy_stats(src_item) utils.execute("chmod", "--reference=%s" % src_item, dest_item, run_as_root=True) utils.execute("touch", "--reference=%s" % src_item, dest_item, run_as_root=True) utils.execute("chown", "--reference=%s" % src_item, dest_item, run_as_root=True) def _validate_item(src_item, dest_item): src_sum, err = utils.execute( "sha256sum", "%s" % src_item, run_as_root=True) dest_sum, err = utils.execute( "sha256sum", "%s" % dest_item, run_as_root=True) if src_sum.split()[0] != dest_sum.split()[0]: msg = _("Data corrupted while copying. Aborting data copy.") raise exception.ShareDataCopyFailed(reason=msg) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315601.8536725 manila-21.0.0/manila/db/0000775000175000017500000000000000000000000014704 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/db/__init__.py0000664000175000017500000000144200000000000017016 0ustar00zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ DB abstraction for Manila """ from manila.db.api import * # noqa ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/db/api.py0000664000175000017500000021132300000000000016031 0ustar00zuulzuul00000000000000# Copyright (c) 2011 X.commerce, a business unit of eBay Inc. # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Defines interface for DB access. The underlying driver is loaded as a :class:`LazyPluggable`. Functions in this module are imported into the manila.db namespace. Call these functions from manila.db namespace, not the manila.db.api namespace. All functions in this module return objects that implement a dictionary-like interface. Currently, many of these objects are sqlalchemy objects that implement a dictionary interface. However, a future goal is to have all of these objects be simple dictionaries. **Related Flags** :backend: string to lookup in the list of LazyPluggable backends. `sqlalchemy` is the only supported backend right now. :connection: string specifying the sqlalchemy connection to use, like: `sqlite:///var/lib/manila/manila.sqlite`. :enable_new_services: when adding a new service to the database, is it in the pool of available hardware (Default: True) """ from oslo_config import cfg from oslo_db import api as db_api db_opts = [ cfg.StrOpt('db_backend', default='sqlalchemy', help='The backend to use for database.'), cfg.BoolOpt('enable_new_services', default=True, help='Services to be added to the available pool on create.'), cfg.StrOpt('share_name_template', default='share-%s', help='Template string to be used to generate share names.'), cfg.StrOpt('share_snapshot_name_template', default='share-snapshot-%s', help='Template string to be used to generate share snapshot ' 'names.'), cfg.StrOpt('share_backup_name_template', default='share-backup-%s', help='Template string to be used to generate backup names.'), ] CONF = cfg.CONF CONF.register_opts(db_opts) _BACKEND_MAPPING = {'sqlalchemy': 'manila.db.sqlalchemy.api'} IMPL = db_api.DBAPI.from_config(cfg.CONF, backend_mapping=_BACKEND_MAPPING, lazy=True) def authorize_project_context(context, project_id): """Ensures a request has permission to access the given project.""" return IMPL.authorize_project_context(context, project_id) def authorize_quota_class_context(context, class_name): """Ensures a request has permission to access the given quota class.""" return IMPL.authorize_quota_class_context(context, class_name) ################### def service_destroy(context, service_id): """Destroy the service or raise if it does not exist.""" return IMPL.service_destroy(context, service_id) def service_get(context, service_id): """Get a service or raise if it does not exist.""" return IMPL.service_get(context, service_id) def service_get_by_host_and_topic(context, host, topic): """Get a service by host it's on and topic it listens to.""" return IMPL.service_get_by_host_and_topic(context, host, topic) def service_get_all(context, disabled=None): """Get all services.""" return IMPL.service_get_all(context, disabled) def service_get_all_by_topic(context, topic, consider_disabled=False): """Get all services for a given topic.""" return IMPL.service_get_all_by_topic(context, topic, consider_disabled=consider_disabled) def service_get_all_share_sorted(context): """Get all share services sorted by share count. :returns: a list of (Service, share_count) tuples. """ return IMPL.service_get_all_share_sorted(context) def service_get_by_args(context, host, binary): """Get the state of an service by node name and binary.""" return IMPL.service_get_by_args(context, host, binary) def service_create(context, values): """Create a service from the values dictionary.""" return IMPL.service_create(context, values) def service_update(context, service_id, values): """Set the given properties on an service and update it. Raises NotFound if service does not exist. """ return IMPL.service_update(context, service_id, values) #################### def quota_create(context, project_id, resource, limit, user_id=None, share_type_id=None): """Create a quota for the given project and resource.""" return IMPL.quota_create(context, project_id, resource, limit, user_id=user_id, share_type_id=share_type_id) def quota_get_all_by_project_and_user(context, project_id, user_id): """Retrieve all quotas associated with a given project and user.""" return IMPL.quota_get_all_by_project_and_user(context, project_id, user_id) def quota_get_all_by_project_and_share_type(context, project_id, share_type_id): """Retrieve all quotas associated with a given project and user.""" return IMPL.quota_get_all_by_project_and_share_type( context, project_id, share_type_id) def quota_get_all_by_project(context, project_id): """Retrieve all quotas associated with a given project.""" return IMPL.quota_get_all_by_project(context, project_id) def quota_get_all(context, project_id): """Retrieve all user quotas associated with a given project.""" return IMPL.quota_get_all(context, project_id) def quota_update(context, project_id, resource, limit, user_id=None, share_type_id=None): """Update a quota or raise if it does not exist.""" return IMPL.quota_update(context, project_id, resource, limit, user_id=user_id, share_type_id=share_type_id) ################### def quota_class_create(context, class_name, resource, limit): """Create a quota class for the given name and resource.""" return IMPL.quota_class_create(context, class_name, resource, limit) def quota_class_get(context, class_name, resource): """Retrieve a quota class or raise if it does not exist.""" return IMPL.quota_class_get(context, class_name, resource) def quota_class_get_default(context): """Retrieve all default quotas.""" return IMPL.quota_class_get_default(context) def quota_class_get_all_by_name(context, class_name): """Retrieve all quotas associated with a given quota class.""" return IMPL.quota_class_get_all_by_name(context, class_name) def quota_class_update(context, class_name, resource, limit): """Update a quota class or raise if it does not exist.""" return IMPL.quota_class_update(context, class_name, resource, limit) ################### def quota_usage_get(context, project_id, resource, user_id=None, share_type_id=None): """Retrieve a quota usage or raise if it does not exist.""" return IMPL.quota_usage_get( context, project_id, resource, user_id=user_id, share_type_id=share_type_id) def quota_usage_get_all_by_project_and_user(context, project_id, user_id): """Retrieve all usage associated with a given resource.""" return IMPL.quota_usage_get_all_by_project_and_user(context, project_id, user_id) def quota_usage_get_all_by_project_and_share_type(context, project_id, share_type_id): """Retrieve all usage associated with a given resource.""" return IMPL.quota_usage_get_all_by_project_and_share_type( context, project_id, share_type_id) def quota_usage_get_all_by_project(context, project_id): """Retrieve all usage associated with a given resource.""" return IMPL.quota_usage_get_all_by_project(context, project_id) def quota_usage_create(context, project_id, user_id, resource, in_use, reserved=0, until_refresh=None, share_type_id=None): """Create a quota usage.""" return IMPL.quota_usage_create( context, project_id, user_id, resource, in_use, reserved, until_refresh, share_type_id=share_type_id) def quota_usage_update(context, project_id, user_id, resource, share_type_id=None, **kwargs): """Update a quota usage or raise if it does not exist.""" return IMPL.quota_usage_update( context, project_id, user_id, resource, share_type_id=share_type_id, **kwargs) ################### def quota_reserve(context, resources, quotas, user_quotas, share_type_quotas, deltas, expire, until_refresh, max_age, project_id=None, user_id=None, share_type_id=None, overquota_allowed=False): """Check quotas and create appropriate reservations.""" return IMPL.quota_reserve( context, resources, quotas, user_quotas, share_type_quotas, deltas, expire, until_refresh, max_age, project_id=project_id, user_id=user_id, share_type_id=share_type_id, overquota_allowed=overquota_allowed) def reservation_commit(context, reservations, project_id=None, user_id=None, share_type_id=None): """Commit quota reservations.""" return IMPL.reservation_commit( context, reservations, project_id=project_id, user_id=user_id, share_type_id=share_type_id) def reservation_rollback(context, reservations, project_id=None, user_id=None, share_type_id=None): """Roll back quota reservations.""" return IMPL.reservation_rollback( context, reservations, project_id=project_id, user_id=user_id, share_type_id=share_type_id) def quota_destroy_all_by_project_and_user(context, project_id, user_id): """Destroy all quotas associated with a given project and user.""" return IMPL.quota_destroy_all_by_project_and_user(context, project_id, user_id) def quota_destroy_all_by_share_type(context, share_type_id, project_id=None): """Destroy all quotas associated with a given share type and project.""" return IMPL.quota_destroy_all_by_share_type( context, share_type_id, project_id=project_id) def quota_destroy_all_by_project(context, project_id): """Destroy all quotas associated with a given project.""" return IMPL.quota_destroy_all_by_project(context, project_id) def reservation_expire(context): """Roll back any expired reservations.""" return IMPL.reservation_expire(context) ################### def share_instance_get(context, instance_id, with_share_data=False): """Get share instance by id.""" return IMPL.share_instance_get(context, instance_id, with_share_data=with_share_data) def share_instance_create(context, share_id, values): """Create new share instance.""" return IMPL.share_instance_create(context, share_id, values) def share_instance_delete(context, instance_id, need_to_update_usages=False): """Delete share instance.""" return IMPL.share_instance_delete( context, instance_id, need_to_update_usages=need_to_update_usages) def update_share_instance_quota_usages(context, instance_id): """Update share instance quota usages""" return IMPL.update_share_instance_quota_usages(context, instance_id) def share_instance_update(context, instance_id, values, with_share_data=False): """Update share instance fields.""" return IMPL.share_instance_update(context, instance_id, values, with_share_data=with_share_data) def share_and_snapshot_instances_status_update( context, values, share_instance_ids=None, snapshot_instance_ids=None, current_expected_status=None): return IMPL.share_and_snapshot_instances_status_update( context, values, share_instance_ids=share_instance_ids, snapshot_instance_ids=snapshot_instance_ids, current_expected_status=current_expected_status) def share_instance_status_update(context, share_instance_ids, values): """Updates the status of a bunch of share instances at once.""" return IMPL.share_instance_status_update( context, share_instance_ids, values) def share_instance_get_all(context, filters=None): """Returns all share instances.""" return IMPL.share_instance_get_all(context, filters=filters) def share_instance_get_all_by_share_server( context, share_server_id, with_share_data=False, ): """Returns all share instances with given share_server_id.""" return IMPL.share_instance_get_all_by_share_server( context, share_server_id, with_share_data=with_share_data) def share_instance_get_all_by_host( context, host, with_share_data=False, status=None, ): """Returns all share instances with given host.""" return IMPL.share_instance_get_all_by_host( context, host, with_share_data=with_share_data, status=status) def share_instance_get_all_by_share_network(context, share_network_id): """Returns list of shares that belong to given share network.""" return IMPL.share_instance_get_all_by_share_network( context, share_network_id) def share_instance_get_all_by_share(context, share_id): """Returns list of shares that belong to given share.""" return IMPL.share_instance_get_all_by_share(context, share_id) def share_instance_get_all_by_share_group_id(context, share_group_id): """Returns list of share instances that belong to given share group.""" return IMPL.share_instance_get_all_by_share_group_id( context, share_group_id) def share_instance_sizes_sum_by_host(context, host): """Returns sum of sizes of all share instances on given host.""" return IMPL.share_instance_sizes_sum_by_host(context, host) ################### def share_create(context, share_values, create_share_instance=True): """Create new share.""" return IMPL.share_create(context, share_values, create_share_instance=create_share_instance) def share_update(context, share_id, values): """Update share fields.""" return IMPL.share_update(context, share_id, values) def share_get(context, share_id, **kwargs): """Get share by id.""" return IMPL.share_get(context, share_id, **kwargs) def share_get_all(context, filters=None, sort_key=None, sort_dir=None): """Get all shares.""" return IMPL.share_get_all( context, filters=filters, sort_key=sort_key, sort_dir=sort_dir, ) def share_get_all_with_count(context, filters=None, sort_key=None, sort_dir=None): """Get all shares.""" return IMPL.share_get_all_with_count( context, filters=filters, sort_key=sort_key, sort_dir=sort_dir) def share_get_all_by_project(context, project_id, filters=None, is_public=False, sort_key=None, sort_dir=None): """Returns all shares with given project ID.""" return IMPL.share_get_all_by_project( context, project_id, filters=filters, is_public=is_public, sort_key=sort_key, sort_dir=sort_dir) def share_get_all_by_project_with_count( context, project_id, filters=None, is_public=False, sort_key=None, sort_dir=None,): """Returns all shares with given project ID.""" return IMPL.share_get_all_by_project_with_count( context, project_id, filters=filters, is_public=is_public, sort_key=sort_key, sort_dir=sort_dir) def share_get_all_by_share_group_id(context, share_group_id, filters=None, sort_key=None, sort_dir=None): """Returns all shares with given project ID and share group id.""" return IMPL.share_get_all_by_share_group_id( context, share_group_id, filters=filters, sort_key=sort_key, sort_dir=sort_dir) def share_get_all_by_share_group_id_with_count(context, share_group_id, filters=None, sort_key=None, sort_dir=None): """Returns all shares with given project ID and share group id.""" return IMPL.share_get_all_by_share_group_id_with_count( context, share_group_id, filters=filters, sort_key=sort_key, sort_dir=sort_dir) def share_get_all_by_share_server(context, share_server_id, filters=None, sort_key=None, sort_dir=None): """Returns all shares with given share server ID.""" return IMPL.share_get_all_by_share_server( context, share_server_id, filters=filters, sort_key=sort_key, sort_dir=sort_dir) def share_get_all_soft_deleted( context, share_server_id, filters=None, sort_key=None, sort_dir=None): """Returns all shares in recycle bin with given share server ID.""" return IMPL.share_get_all_soft_deleted( context, share_server_id, filters=filters, sort_key=sort_key, sort_dir=sort_dir) def share_get_all_by_share_server_with_count( context, share_server_id, filters=None, sort_key=None, sort_dir=None): """Returns all shares with given share server ID.""" return IMPL.share_get_all_by_share_server_with_count( context, share_server_id, filters=filters, sort_key=sort_key, sort_dir=sort_dir) def share_get_all_soft_deleted_by_network( context, share_network_id, filters=None, sort_key=None, sort_dir=None): """Returns all shares in recycle bin with given share network ID.""" return IMPL.share_get_all_soft_deleted_by_network( context, share_network_id, filters=filters, sort_key=sort_key, sort_dir=sort_dir) def share_delete(context, share_id): """Delete share.""" return IMPL.share_delete(context, share_id) def share_soft_delete(context, share_id): """Soft delete share.""" return IMPL.share_soft_delete(context, share_id) def share_restore(context, share_id): """Restore share.""" return IMPL.share_restore(context, share_id) ################### def transfer_get(context, transfer_id): """Get a share transfer record or raise if it does not exist.""" return IMPL.transfer_get(context, transfer_id) def transfer_get_all(context, limit=None, sort_key=None, sort_dir=None, filters=None, offset=None): """Get all share transfer records.""" return IMPL.transfer_get_all(context, limit=limit, sort_key=sort_key, sort_dir=sort_dir, filters=filters, offset=offset) def transfer_get_all_by_project(context, project_id, limit=None, sort_key=None, sort_dir=None, filters=None, offset=None): """Get all share transfer records for specified project.""" return IMPL.transfer_get_all_by_project(context, project_id, limit=limit, sort_key=sort_key, sort_dir=sort_dir, filters=filters, offset=offset) def transfer_get_all_expired(context): """Get all expired transfers DB records.""" return IMPL.transfer_get_all_expired(context) def transfer_create(context, values): """Create an entry in the transfers table.""" return IMPL.transfer_create(context, values) def transfer_destroy(context, transfer_id, update_share_status=True): """Destroy a record in the share transfer table.""" return IMPL.transfer_destroy(context, transfer_id, update_share_status=update_share_status) def transfer_accept(context, transfer_id, user_id, project_id, accept_snapshots=False): """Accept a share transfer.""" return IMPL.transfer_accept(context, transfer_id, user_id, project_id, accept_snapshots=accept_snapshots) def transfer_accept_rollback(context, transfer_id, user_id, project_id, rollback_snap=False): """Rollback a share transfer.""" return IMPL.transfer_accept_rollback(context, transfer_id, user_id, project_id, rollback_snap=rollback_snap) ################### def share_access_create(context, values): """Allow access to share.""" return IMPL.share_access_create(context, values) def share_access_update(context, access_id, values): """Update access to share.""" return IMPL.share_access_update(context, access_id, values) def share_access_get(context, access_id): """Get share access rule.""" return IMPL.share_access_get(context, access_id) def share_access_get_with_context(context, access_id): """Get share access rule.""" return IMPL.share_access_get_with_context(context, access_id) def share_access_get_all_for_share(context, share_id, filters=None): """Get all access rules for given share.""" return IMPL.share_access_get_all_for_share(context, share_id, filters=filters) def share_access_get_all_for_instance(context, instance_id, filters=None, with_share_access_data=True): """Get all access rules related to a certain share instance.""" return IMPL.share_access_get_all_for_instance( context, instance_id, filters=filters, with_share_access_data=with_share_access_data) def share_access_get_all_by_type_and_access(context, share_id, access_type, access): """Returns share access by given type and access.""" return IMPL.share_access_get_all_by_type_and_access( context, share_id, access_type, access) def share_access_check_for_existing_access(context, share_id, access_type, access_to): """Returns True if rule corresponding to the type and client exists.""" return IMPL.share_access_check_for_existing_access( context, share_id, access_type, access_to) def share_instance_access_create(context, values, share_instance_id): """Allow access to share instance.""" return IMPL.share_instance_access_create( context, values, share_instance_id) def share_instance_access_copy(context, share_id, instance_id): """Maps the existing access rules for the share to the instance in the DB. Adds the instance mapping to the share's access rules and returns the share's access rules. """ return IMPL.share_instance_access_copy(context, share_id, instance_id) def share_instance_access_get(context, access_id, instance_id, with_share_access_data=True): """Get access rule mapping for share instance.""" return IMPL.share_instance_access_get( context, access_id, instance_id, with_share_access_data=with_share_access_data) def share_instance_access_update(context, access_id, instance_id, updates): """Update the access mapping row for a given share instance and access.""" return IMPL.share_instance_access_update( context, access_id, instance_id, updates) def share_instance_access_delete(context, mapping_id): """Deny access to share instance.""" return IMPL.share_instance_access_delete(context, mapping_id) def share_access_metadata_update(context, access_id, metadata): """Update metadata of share access rule.""" return IMPL.share_access_metadata_update(context, access_id, metadata) def share_access_metadata_delete(context, access_id, key): """Delete metadata of share access rule.""" return IMPL.share_access_metadata_delete(context, access_id, key) #################### def share_snapshot_instance_update(context, instance_id, values): """Set the given properties on a share snapshot instance and update it. Raises NotFound if snapshot instance does not exist. """ return IMPL.share_snapshot_instance_update(context, instance_id, values) def share_snapshot_instances_status_update( context, snapshot_instance_ids, values): """Updates the status of a bunch of share snapshot instances at once.""" return IMPL.share_snapshot_instances_status_update( context, snapshot_instance_ids, values) def share_snapshot_instance_create(context, snapshot_id, values): """Create a share snapshot instance for an existing snapshot.""" return IMPL.share_snapshot_instance_create( context, snapshot_id, values) def share_snapshot_instance_get(context, instance_id, with_share_data=False): """Get a snapshot instance or raise a NotFound exception.""" return IMPL.share_snapshot_instance_get( context, instance_id, with_share_data=with_share_data) def share_snapshot_instance_get_all_with_filters(context, filters, with_share_data=False): """Get all snapshot instances satisfying provided filters.""" return IMPL.share_snapshot_instance_get_all_with_filters( context, filters, with_share_data=with_share_data) def share_snapshot_instance_delete(context, snapshot_instance_id): """Delete a share snapshot instance.""" return IMPL.share_snapshot_instance_delete(context, snapshot_instance_id) #################### def share_snapshot_create(context, values, create_snapshot_instance=True): """Create a snapshot from the values dictionary.""" return IMPL.share_snapshot_create( context, values, create_snapshot_instance=create_snapshot_instance) def share_snapshot_get(context, snapshot_id, project_only=True): """Get a snapshot or raise if it does not exist.""" return IMPL.share_snapshot_get(context, snapshot_id, project_only=project_only) def share_snapshot_get_all(context, filters=None, limit=None, offset=None, sort_key=None, sort_dir=None): """Get all snapshots.""" return IMPL.share_snapshot_get_all( context, filters=filters, limit=limit, offset=offset, sort_key=sort_key, sort_dir=sort_dir) def share_snapshot_get_all_with_count(context, filters=None, limit=None, offset=None, sort_key=None, sort_dir=None): """Get all snapshots.""" return IMPL.share_snapshot_get_all_with_count( context, filters=filters, limit=limit, offset=offset, sort_key=sort_key, sort_dir=sort_dir) def share_snapshot_get_all_by_project(context, project_id, filters=None, limit=None, offset=None, sort_key=None, sort_dir=None): """Get all snapshots belonging to a project.""" return IMPL.share_snapshot_get_all_by_project( context, project_id, filters=filters, limit=limit, offset=offset, sort_key=sort_key, sort_dir=sort_dir) def share_snapshot_get_all_by_project_with_count(context, project_id, filters=None, limit=None, offset=None, sort_key=None, sort_dir=None): """Get all snapshots belonging to a project.""" return IMPL.share_snapshot_get_all_by_project_with_count( context, project_id, filters=filters, limit=limit, offset=offset, sort_key=sort_key, sort_dir=sort_dir) def share_snapshot_get_all_for_share(context, share_id, filters=None, sort_key=None, sort_dir=None): """Get all snapshots for a share.""" return IMPL.share_snapshot_get_all_for_share( context, share_id, filters=filters, sort_key=sort_key, sort_dir=sort_dir, ) def share_snapshot_get_latest_for_share(context, share_id): """Get the most recent snapshot for a share.""" return IMPL.share_snapshot_get_latest_for_share(context, share_id) def share_snapshot_update(context, snapshot_id, values): """Set the given properties on an snapshot and update it. Raises NotFound if snapshot does not exist. """ return IMPL.share_snapshot_update(context, snapshot_id, values) ################### def share_snapshot_access_create(context, values): """Create a share snapshot access from the values dictionary.""" return IMPL.share_snapshot_access_create(context, values) def share_snapshot_access_get(context, access_id): """Get share snapshot access rule from given access_id.""" return IMPL.share_snapshot_access_get(context, access_id) def share_snapshot_access_get_all_for_snapshot_instance( context, snapshot_instance_id, ): """Get all access rules related to a certain snapshot instance.""" return IMPL.share_snapshot_access_get_all_for_snapshot_instance( context, snapshot_instance_id) def share_snapshot_access_get_all_for_share_snapshot(context, share_snapshot_id, filters): """Get all access rules for a given share snapshot according to filters.""" return IMPL.share_snapshot_access_get_all_for_share_snapshot( context, share_snapshot_id, filters) def share_snapshot_check_for_existing_access(context, share_snapshot_id, access_type, access_to): """Returns True if rule corresponding to the type and client exists.""" return IMPL.share_snapshot_check_for_existing_access(context, share_snapshot_id, access_type, access_to) def share_snapshot_export_locations_get(context, snapshot_id): """Get all export locations for a given share snapshot.""" return IMPL.share_snapshot_export_locations_get(context, snapshot_id) def share_snapshot_instance_access_update( context, access_id, instance_id, updates): """Update the state of the share snapshot instance access.""" return IMPL.share_snapshot_instance_access_update( context, access_id, instance_id, updates) def share_snapshot_instance_access_get(context, share_snapshot_instance_id, access_id): """Get the share snapshot instance access related to given ids.""" return IMPL.share_snapshot_instance_access_get( context, share_snapshot_instance_id, access_id) def share_snapshot_instance_access_delete(context, access_id, snapshot_instance_id): """Delete share snapshot instance access given its id.""" return IMPL.share_snapshot_instance_access_delete( context, access_id, snapshot_instance_id) def share_snapshot_instance_export_location_create(context, values): """Create a share snapshot instance export location.""" return IMPL.share_snapshot_instance_export_location_create(context, values) def share_snapshot_instance_export_locations_update( context, share_snapshot_instance_id, export_locations, delete=True): """Update export locations of a share instance.""" return IMPL.share_snapshot_instance_export_locations_update( context, share_snapshot_instance_id, export_locations, delete=delete) def share_snapshot_instance_export_locations_get_all( context, share_snapshot_instance_id): """Get the share snapshot instance export locations for given id.""" return IMPL.share_snapshot_instance_export_locations_get_all( context, share_snapshot_instance_id) def share_snapshot_instance_export_location_get(context, el_id): """Get the share snapshot instance export location for given id.""" return IMPL.share_snapshot_instance_export_location_get( context, el_id) def share_snapshot_instance_export_location_delete(context, el_id): """Delete share snapshot instance export location given its id.""" return IMPL.share_snapshot_instance_export_location_delete(context, el_id) #################### def share_snapshot_metadata_get(context, share_snapshot_id, **kwargs): """Get all metadata for a share snapshot.""" return IMPL.share_snapshot_metadata_get(context, share_snapshot_id, **kwargs) def share_snapshot_metadata_get_item(context, share_snapshot_id, key): """Get metadata item for a share snapshot.""" return IMPL.share_snapshot_metadata_get_item(context, share_snapshot_id, key) def share_snapshot_metadata_delete(context, share_snapshot_id, key): """Delete the given metadata item.""" IMPL.share_snapshot_metadata_delete(context, share_snapshot_id, key) def share_snapshot_metadata_update(context, share_snapshot_id, metadata, delete): """Update metadata if it exists, otherwise create it.""" return IMPL.share_snapshot_metadata_update(context, share_snapshot_id, metadata, delete) def share_snapshot_metadata_update_item(context, share_snapshot_id, metadata): """Update metadata item if it exists, otherwise create it.""" return IMPL.share_snapshot_metadata_update_item(context, share_snapshot_id, metadata) ################### def security_service_create(context, values): """Create security service DB record.""" return IMPL.security_service_create(context, values) def security_service_delete(context, id): """Delete security service DB record.""" return IMPL.security_service_delete(context, id) def security_service_update(context, id, values): """Update security service DB record.""" return IMPL.security_service_update(context, id, values) def security_service_get(context, id, **kwargs): """Get security service DB record.""" return IMPL.security_service_get(context, id, **kwargs) def security_service_get_all(context): """Get all security service DB records.""" return IMPL.security_service_get_all(context) def security_service_get_all_by_project(context, project_id): """Get all security service DB records for the given project.""" return IMPL.security_service_get_all_by_project(context, project_id) def security_service_get_all_by_share_network(context, share_network_id): """Get all security service DB records for the given share network.""" return IMPL.security_service_get_all_by_share_network(context, share_network_id) #################### def share_metadata_get(context, share_id): """Get all metadata for a share.""" return IMPL.share_metadata_get(context, share_id) def share_metadata_get_item(context, share_id, key): """Get metadata item for given key and for a given share..""" return IMPL.share_metadata_get_item(context, share_id, key) def share_metadata_delete(context, share_id, key): """Delete the given metadata item.""" return IMPL.share_metadata_delete(context, share_id, key) def share_metadata_update(context, share, metadata, delete): """Update metadata if it exists, otherwise create it.""" return IMPL.share_metadata_update(context, share, metadata, delete) def share_metadata_update_item(context, share_id, item): """update meta item containing key and value for given share.""" return IMPL.share_metadata_update_item(context, share_id, item) ################### def export_location_get_by_uuid( context, export_location_uuid, ignore_secondary_replicas=False, ): """Get specific export location of a share.""" return IMPL.export_location_get_by_uuid( context, export_location_uuid, ignore_secondary_replicas=ignore_secondary_replicas) def export_location_get_all(context, share_id): """Get all export locations of a share.""" return IMPL.export_location_get_all(context, share_id) def export_location_get_all_by_share_id( context, share_id, include_admin_only=True, ignore_migration_destination=False, ignore_secondary_replicas=False, ): """Get all export locations of a share by its ID.""" return IMPL.export_location_get_all_by_share_id( context, share_id, include_admin_only=include_admin_only, ignore_migration_destination=ignore_migration_destination, ignore_secondary_replicas=ignore_secondary_replicas) def export_location_get_all_by_share_instance_id( context, share_instance_id, include_admin_only=True, ): """Get all export locations of a share instance by its ID.""" return IMPL.export_location_get_all_by_share_instance_id( context, share_instance_id, include_admin_only=include_admin_only) def export_locations_update( context, share_instance_id, export_locations, delete=True, ): """Update export locations of a share instance.""" return IMPL.export_locations_update( context, share_instance_id, export_locations, delete) #################### def export_location_metadata_get(context, export_location_uuid): """Get all metadata of an export location.""" return IMPL.export_location_metadata_get(context, export_location_uuid) def export_location_metadata_get_item(context, export_location_uuid, key): """Get metadata item for a share export location.""" return IMPL.export_location_metadata_get_item(context, export_location_uuid, key) def export_location_metadata_delete(context, export_location_uuid, keys): """Delete metadata of an export location.""" return IMPL.export_location_metadata_delete( context, export_location_uuid, keys) def export_location_metadata_update(context, export_location_uuid, metadata, delete): """Update metadata of an export location.""" return IMPL.export_location_metadata_update( context, export_location_uuid, metadata, delete) def export_location_metadata_update_item(context, export_location_uuid, metadata): """Update metadata item if it exists, otherwise create it.""" return IMPL.export_location_metadata_update_item(context, export_location_uuid, metadata) #################### def share_network_create(context, values): """Create a share network DB record.""" return IMPL.share_network_create(context, values) def share_network_delete(context, id): """Delete a share network DB record.""" return IMPL.share_network_delete(context, id) def share_network_update(context, id, values): """Update a share network DB record.""" return IMPL.share_network_update(context, id, values) def share_network_get(context, id): """Get requested share network DB record.""" return IMPL.share_network_get(context, id) def share_network_get_all_by_filter(context, filters=None): """Get all share network DB records for the given filter.""" return IMPL.share_network_get_all_by_filter(context, filters=filters) def share_network_get_all(context): """Get all share network DB records.""" return IMPL.share_network_get_all(context) def share_network_get_all_by_project(context, project_id): """Get all share network DB records for the given project.""" return IMPL.share_network_get_all_by_project(context, project_id) def share_network_get_all_by_security_service(context, security_service_id): """Get all share network DB records for the given project.""" return IMPL.share_network_get_all_by_security_service( context, security_service_id) def share_network_add_security_service(context, id, security_service_id): """Associate a security service with a share network.""" return IMPL.share_network_add_security_service(context, id, security_service_id) def share_network_remove_security_service(context, id, security_service_id): """Dissociate a security service from a share network.""" return IMPL.share_network_remove_security_service(context, id, security_service_id) def share_network_security_service_association_get( context, share_network_id, security_service_id): """Get given share network and security service association.""" return IMPL.share_network_security_service_association_get( context, share_network_id, security_service_id) def share_network_update_security_service(context, id, current_security_service_id, new_security_service_id): """Update a security service association with a share network.""" return IMPL.share_network_update_security_service( context, id, current_security_service_id, new_security_service_id) ################## def share_network_subnet_create(context, values): """Create a share network subnet DB record.""" return IMPL.share_network_subnet_create(context, values) def share_network_subnet_delete(context, network_subnet_id): """Delete a share network subnet DB record.""" return IMPL.share_network_subnet_delete(context, network_subnet_id) def share_network_subnet_update(context, network_subnet_id, values): """Update a share network subnet DB record.""" return IMPL.share_network_subnet_update(context, network_subnet_id, values) def share_network_subnet_get(context, network_subnet_id, parent_id=None): """Get requested share network subnet DB record.""" return IMPL.share_network_subnet_get( context, network_subnet_id, parent_id=parent_id, ) def share_network_subnet_get_all_with_same_az(context, network_subnet_id): """Get requested az share network subnets DB record.""" return IMPL.share_network_subnet_get_all_with_same_az( context, network_subnet_id, ) def share_network_subnet_get_all(context): """Get all share network subnet DB record.""" return IMPL.share_network_subnet_get_all(context) def share_network_subnets_get_all_by_availability_zone_id( context, share_network_id, availability_zone_id, fallback_to_default=True, ): """Get the share network subnets DB record in a given AZ. This method returns list of subnets DB record for a given share network id and an availability zone. If the 'availability_zone_id' is 'None', a record may be returned and it will represent the default share network subnets. If there is no subnet for a specific availability zone id and "fallback_to_default" is True, this method will return the default share network subnets, if it exists. """ return IMPL.share_network_subnets_get_all_by_availability_zone_id( context, share_network_id, availability_zone_id, fallback_to_default=fallback_to_default, ) def share_network_subnet_get_default_subnets(context, share_network_id): """Get the default share network subnets DB records.""" return IMPL.share_network_subnet_get_default_subnets( context, share_network_id, ) def share_network_subnet_get_all_by_share_server_id(context, share_server_id): """Get the subnets that are being used by the share server.""" return IMPL.share_network_subnet_get_all_by_share_server_id( context, share_server_id, ) #################### def share_network_subnet_metadata_get(context, share_network_subnet_id, **kwargs): """Get all metadata for a share network subnet.""" return IMPL.share_network_subnet_metadata_get(context, share_network_subnet_id, **kwargs) def share_network_subnet_metadata_get_item(context, share_network_subnet_id, key): """Get metadata item for a share network subnet.""" return IMPL.share_network_subnet_metadata_get_item(context, share_network_subnet_id, key) def share_network_subnet_metadata_delete(context, share_network_subnet_id, key): """Delete the given metadata item.""" IMPL.share_network_subnet_metadata_delete(context, share_network_subnet_id, key) def share_network_subnet_metadata_update(context, share_network_subnet_id, metadata, delete): """Update metadata if it exists, otherwise create it.""" return IMPL.share_network_subnet_metadata_update(context, share_network_subnet_id, metadata, delete) def share_network_subnet_metadata_update_item(context, share_network_subnet_id, metadata): """Update metadata item if it exists, otherwise create it.""" return IMPL.share_network_subnet_metadata_update_item( context, share_network_subnet_id, metadata) ################### def network_allocation_create(context, values): """Create a network allocation DB record.""" return IMPL.network_allocation_create(context, values) def network_allocation_delete(context, id): """Delete a network allocation DB record.""" return IMPL.network_allocation_delete(context, id) def network_allocation_update(context, id, values, read_deleted=None): """Update a network allocation DB record.""" return IMPL.network_allocation_update(context, id, values, read_deleted=read_deleted) def network_allocation_get(context, id, read_deleted=None): """Get a network allocation DB record.""" return IMPL.network_allocation_get(context, id, read_deleted=read_deleted) def network_allocations_get_for_share_server( context, share_server_id, label=None, subnet_id=None, ): """Get network allocations for share server.""" return IMPL.network_allocations_get_for_share_server( context, share_server_id, label=label, subnet_id=subnet_id, ) def network_allocations_get_by_ip_address(context, ip_address): """Get network allocations by IP address.""" return IMPL.network_allocations_get_by_ip_address(context, ip_address) ################## def share_server_create(context, values): """Create share server DB record.""" return IMPL.share_server_create(context, values) def share_server_delete(context, id): """Delete share server DB record.""" return IMPL.share_server_delete(context, id) def share_server_update(context, id, values): """Update share server DB record.""" return IMPL.share_server_update(context, id, values) def share_server_get(context, id): """Get share server DB record by ID.""" return IMPL.share_server_get(context, id) def share_server_search_by_identifier(context, identifier): """Search for share servers based on given identifier.""" return IMPL.share_server_search_by_identifier( context, identifier, ) def share_server_get_all_by_host_and_share_subnet_valid( context, host, share_subnet_id, ): """Get share server DB records by host and share net not error.""" return IMPL.share_server_get_all_by_host_and_share_subnet_valid( context, host, share_subnet_id, ) def share_server_get_all_by_host_and_or_share_subnet( context, host=None, share_subnet_id=None, ): """Get share server DB records by host and/or share net.""" return IMPL.share_server_get_all_by_host_and_or_share_subnet( context, host=host, share_subnet_id=share_subnet_id, ) def share_server_get_all(context): """Get all share server DB records.""" return IMPL.share_server_get_all(context) def share_server_get_all_with_filters(context, filters): """Get all share servers that match with the specified filters.""" return IMPL.share_server_get_all_with_filters(context, filters) def share_server_get_all_by_host(context, host, filters=None): """Get all share servers related to particular host.""" return IMPL.share_server_get_all_by_host(context, host, filters=filters) def share_server_get_all_unused_deletable(context, host, updated_before): """Get all free share servers DB records.""" return IMPL.share_server_get_all_unused_deletable(context, host, updated_before) def share_get_all_expired(context): """Get all expired share DB records.""" return IMPL.share_get_all_expired(context) def share_server_backend_details_set(context, share_server_id, server_details): """Create DB record with backend details.""" return IMPL.share_server_backend_details_set(context, share_server_id, server_details) def share_server_backend_details_get_item(context, share_server_id, meta_key): """Get backend details.""" return IMPL.share_server_backend_details_get_item(context, share_server_id, meta_key) def share_server_backend_details_delete(context, share_server_id): """Delete backend details DB records for a share server.""" return IMPL.share_server_backend_details_delete(context, share_server_id) def share_servers_update(context, share_server_ids, values): """Updates values of a bunch of share servers at once.""" return IMPL.share_servers_update( context, share_server_ids, values) ################## def share_type_create(context, values, projects=None): """Create a new share type.""" return IMPL.share_type_create(context, values, projects) def share_type_update(context, share_type_id, values): """Update an exist share type.""" return IMPL.share_type_update(context, share_type_id, values) def share_type_get_all(context, inactive=False, filters=None): """Get all share types. :param context: context to query under :param inactive: Include inactive share types to the result set :param filters: Filters for the query in the form of key/value. :is_public: Filter share types based on visibility: * **True**: List public share types only * **False**: List private share types only * **None**: List both public and private share types :returns: list of matching share types """ return IMPL.share_type_get_all(context, inactive, filters) def share_type_get(context, type_id, inactive=False, expected_fields=None): """Get share type by id. :param context: context to query under :param type_id: share type id to get. :param inactive: Consider inactive share types when searching :param expected_fields: Return those additional fields. Supported fields are: projects. :returns: share type """ return IMPL.share_type_get(context, type_id, inactive, expected_fields) def share_type_get_by_name(context, name): """Get share type by name.""" return IMPL.share_type_get_by_name(context, name) def share_type_get_by_name_or_id(context, name_or_id): """Get share type by name or ID and return None if not found.""" return IMPL.share_type_get_by_name_or_id(context, name_or_id) def share_type_access_get_all(context, type_id): """Get all share type access of a share type.""" return IMPL.share_type_access_get_all(context, type_id) def share_type_access_add(context, type_id, project_id): """Add share type access for project.""" return IMPL.share_type_access_add(context, type_id, project_id) def share_type_access_remove(context, type_id, project_id): """Remove share type access for project.""" return IMPL.share_type_access_remove(context, type_id, project_id) def share_type_destroy(context, id): """Delete a share type.""" return IMPL.share_type_destroy(context, id) #################### def share_type_extra_specs_get(context, share_type_id): """Get all extra specs for a share type.""" return IMPL.share_type_extra_specs_get(context, share_type_id) def share_type_extra_specs_delete(context, share_type_id, key): """Delete the given extra specs item.""" return IMPL.share_type_extra_specs_delete(context, share_type_id, key) def share_type_extra_specs_update_or_create(context, share_type_id, extra_specs): """Create or update share type extra specs. This adds or modifies the key/value pairs specified in the extra specs dict argument. """ return IMPL.share_type_extra_specs_update_or_create(context, share_type_id, extra_specs) def driver_private_data_get(context, entity_id, key=None, default=None): """Get one, list or all key-value pairs for given entity_id.""" return IMPL.driver_private_data_get(context, entity_id, key, default) def driver_private_data_update(context, entity_id, details, delete_existing=False): """Update key-value pairs for given entity_id.""" return IMPL.driver_private_data_update(context, entity_id, details, delete_existing) def driver_private_data_delete(context, entity_id, key=None): """Remove one, list or all key-value pairs for given entity_id.""" return IMPL.driver_private_data_delete(context, entity_id, key) #################### def availability_zone_get(context, id_or_name): """Get availability zone by name or id.""" return IMPL.availability_zone_get(context, id_or_name) def availability_zone_get_all(context): """Get all active availability zones.""" return IMPL.availability_zone_get_all(context) #################### def share_group_get(context, share_group_id): """Get a share group or raise if it does not exist.""" return IMPL.share_group_get(context, share_group_id) def share_group_get_all(context, detailed=True, filters=None, sort_key=None, sort_dir=None): """Get all share groups.""" return IMPL.share_group_get_all( context, detailed=detailed, filters=filters, sort_key=sort_key, sort_dir=sort_dir) def share_group_get_all_by_host(context, host, detailed=True, filters=None, sort_key=None, sort_dir=None): """Get all share groups belonging to a host.""" return IMPL.share_group_get_all_by_host( context, host, detailed=detailed, filters=filters, sort_key=sort_key, sort_dir=sort_dir) def share_group_create(context, values): """Create a share group from the values dictionary.""" return IMPL.share_group_create(context, values) def share_group_get_all_by_share_server(context, share_server_id, filters=None, sort_key=None, sort_dir=None): """Get all share groups associated with a share server.""" return IMPL.share_group_get_all_by_share_server( context, share_server_id, filters=filters, sort_key=sort_key, sort_dir=sort_dir) def share_group_get_all_by_project(context, project_id, detailed=True, filters=None, sort_key=None, sort_dir=None): """Get all share groups belonging to a project.""" return IMPL.share_group_get_all_by_project( context, project_id, detailed=detailed, filters=filters, sort_key=sort_key, sort_dir=sort_dir) def share_group_update(context, share_group_id, values): """Set the given properties on a share group and update it. Raises NotFound if share group does not exist. """ return IMPL.share_group_update(context, share_group_id, values) def share_group_destroy(context, share_group_id): """Destroy the share group or raise if it does not exist.""" return IMPL.share_group_destroy(context, share_group_id) def count_shares_in_share_group(context, share_group_id): """Returns the number of undeleted shares with the specified group.""" return IMPL.count_shares_in_share_group(context, share_group_id) def get_all_shares_by_share_group(context, share_group_id): return IMPL.get_all_shares_by_share_group(context, share_group_id) def count_share_group_snapshots_in_share_group(context, share_group_id): """Returns the number of sg snapshots with the specified share group.""" return IMPL.count_share_group_snapshots_in_share_group( context, share_group_id) def count_share_groups_in_share_network(context, share_network_id): """Return the number of groups with the specified share network.""" return IMPL.count_share_groups_in_share_network(context, share_network_id) def count_share_group_snapshot_members_in_share( context, share_id, include_deferred_deleting=True ): """Returns the number of group snapshot members linked to the share.""" return IMPL.count_share_group_snapshot_members_in_share( context, share_id, include_deferred_deleting=include_deferred_deleting) def share_group_snapshot_get(context, share_group_snapshot_id): """Get a share group snapshot.""" return IMPL.share_group_snapshot_get(context, share_group_snapshot_id) def share_group_snapshot_get_all(context, detailed=True, filters=None, sort_key=None, sort_dir=None): """Get all share group snapshots.""" return IMPL.share_group_snapshot_get_all( context, detailed=detailed, filters=filters, sort_key=sort_key, sort_dir=sort_dir) def share_group_snapshot_get_all_by_project(context, project_id, detailed=True, filters=None, sort_key=None, sort_dir=None): """Get all share group snapshots belonging to a project.""" return IMPL.share_group_snapshot_get_all_by_project( context, project_id, detailed=detailed, filters=filters, sort_key=sort_key, sort_dir=sort_dir) def share_group_snapshot_create(context, values): """Create a share group snapshot from the values dictionary.""" return IMPL.share_group_snapshot_create(context, values) def share_group_snapshot_update(context, share_group_snapshot_id, values): """Set the given properties on a share group snapshot and update it. Raises NotFound if share group snapshot does not exist. """ return IMPL.share_group_snapshot_update( context, share_group_snapshot_id, values) def share_group_snapshot_destroy(context, share_group_snapshot_id): """Destroy the share_group_snapshot or raise if it does not exist.""" return IMPL.share_group_snapshot_destroy(context, share_group_snapshot_id) def share_group_snapshot_members_get_all(context, share_group_snapshot_id): """Return the members of a share group snapshot.""" return IMPL.share_group_snapshot_members_get_all( context, share_group_snapshot_id) def share_group_snapshot_member_create(context, values): """Create a share group snapshot member from the values dictionary.""" return IMPL.share_group_snapshot_member_create(context, values) def share_group_snapshot_member_update(context, member_id, values): """Set the given properties on a share group snapshot member and update it. Raises NotFound if share_group_snapshot member does not exist. """ return IMPL.share_group_snapshot_member_update(context, member_id, values) def share_resources_host_update(context, current_host, new_host): """Update the host attr of all share resources that are on current_host.""" return IMPL.share_resources_host_update(context, current_host, new_host) #################### def share_replicas_get_all(context, with_share_server=False, with_share_data=False): """Returns all share replicas regardless of share.""" return IMPL.share_replicas_get_all( context, with_share_server=with_share_server, with_share_data=with_share_data) def share_replicas_get_all_by_share(context, share_id, with_share_server=False, with_share_data=False): """Returns all share replicas for a given share.""" return IMPL.share_replicas_get_all_by_share( context, share_id, with_share_server=with_share_server, with_share_data=with_share_data) def share_replicas_get_available_active_replica(context, share_id, with_share_server=False, with_share_data=False): """Returns an active replica for a given share.""" return IMPL.share_replicas_get_available_active_replica( context, share_id, with_share_server=with_share_server, with_share_data=with_share_data) def share_replica_get(context, replica_id, with_share_server=False, with_share_data=False): """Get share replica by id.""" return IMPL.share_replica_get( context, replica_id, with_share_server=with_share_server, with_share_data=with_share_data) def share_replica_update(context, share_replica_id, values, with_share_data=False): """Updates a share replica with given values.""" return IMPL.share_replica_update(context, share_replica_id, values, with_share_data=with_share_data) def share_replica_delete(context, replica_id, need_to_update_usages=True): """Deletes a share replica.""" return IMPL.share_replica_delete( context, replica_id, need_to_update_usages=need_to_update_usages) def purge_deleted_records(context, age_in_days): """Purge deleted rows older than given age from all tables :raises: InvalidParameterValue if age_in_days is incorrect. """ return IMPL.purge_deleted_records(context, age_in_days=age_in_days) #################### def share_group_type_create(context, values, projects=None): """Create a new share group type.""" return IMPL.share_group_type_create(context, values, projects) def share_group_type_get_all(context, inactive=False, filters=None): """Get all share group types. :param context: context to query under :param inactive: Include inactive share group types to the result set :param filters: Filters for the query in the form of key/value. :is_public: Filter share group types based on visibility: * **True**: List public group types only * **False**: List private group types only * **None**: List both public and private group types :returns: list of matching share group types """ return IMPL.share_group_type_get_all(context, inactive, filters) def share_group_type_get(context, type_id, inactive=False, expected_fields=None): """Get share_group type by id. :param context: context to query under :param type_id: group type id to get. :param inactive: Consider inactive group types when searching :param expected_fields: Return those additional fields. Supported fields are: projects. :returns: share group type """ return IMPL.share_group_type_get( context, type_id, inactive, expected_fields) def share_group_type_get_by_name(context, name): """Get share group type by name.""" return IMPL.share_group_type_get_by_name(context, name) def share_group_type_access_get_all(context, type_id): """Get all share group type access of a share group type.""" return IMPL.share_group_type_access_get_all(context, type_id) def share_group_type_access_add(context, type_id, project_id): """Add share group type access for project.""" return IMPL.share_group_type_access_add(context, type_id, project_id) def share_group_type_access_remove(context, type_id, project_id): """Remove share group type access for project.""" return IMPL.share_group_type_access_remove(context, type_id, project_id) def share_group_type_destroy(context, type_id): """Delete a share group type.""" return IMPL.share_group_type_destroy(context, type_id) def share_group_type_specs_get(context, type_id): """Get all group specs for a share group type.""" return IMPL.share_group_type_specs_get(context, type_id) def share_group_type_specs_delete(context, type_id, key): """Delete the given group specs item.""" return IMPL.share_group_type_specs_delete(context, type_id, key) def share_group_type_specs_update_or_create(context, type_id, group_specs): """Create or update share group type specs. This adds or modifies the key/value pairs specified in the group specs dict argument. """ return IMPL.share_group_type_specs_update_or_create( context, type_id, group_specs) #################### def message_get(context, message_id): """Return a message with the specified ID.""" return IMPL.message_get(context, message_id) def message_get_all(context, filters=None, limit=None, offset=None, sort_key=None, sort_dir=None): """Returns all messages with the project of the specified context.""" return IMPL.message_get_all(context, filters=filters, limit=limit, offset=offset, sort_key=sort_key, sort_dir=sort_dir) def message_create(context, values): """Creates a new message with the specified values.""" return IMPL.message_create(context, values) def message_destroy(context, message_id): """Deletes message with the specified ID.""" return IMPL.message_destroy(context, message_id) def cleanup_expired_messages(context): """Soft delete expired messages""" return IMPL.cleanup_expired_messages(context) def backend_info_get(context, host): """Get hash info for given host.""" return IMPL.backend_info_get(context, host) def backend_info_update(context, host, value=None, delete_existing=False): """Update hash info for host.""" return IMPL.backend_info_update(context, host=host, value=value, delete_existing=delete_existing) #################### def async_operation_data_get(context, entity_id, key=None, default=None): """Get one, list or all key-value pairs for given entity_id.""" return IMPL.async_operation_data_get(context, entity_id, key, default) def async_operation_data_update(context, entity_id, details, delete_existing=False): """Update key-value pairs for given entity_id.""" return IMPL.async_operation_data_update(context, entity_id, details, delete_existing) def async_operation_data_delete(context, entity_id, key=None): """Remove one, list or all key-value pairs for given entity_id.""" return IMPL.async_operation_data_delete(context, entity_id, key) #################### def share_backup_create(context, share_id, values): """Create new share backup with specified values.""" return IMPL.share_backup_create(context, share_id, values) def share_backup_update(context, backup_id, values): """Updates a share backup with given values.""" return IMPL.share_backup_update(context, backup_id, values) def share_backup_get(context, backup_id): """Get share backup by id.""" return IMPL.share_backup_get(context, backup_id) def share_backups_get_all(context, filters=None, limit=None, offset=None, sort_key=None, sort_dir=None): """Get all backups.""" return IMPL.share_backups_get_all( context, filters=filters, limit=limit, offset=offset, sort_key=sort_key, sort_dir=sort_dir) def share_backup_delete(context, backup_id): """Deletes backup with the specified ID.""" return IMPL.share_backup_delete(context, backup_id) ##################### def resource_lock_create(context, values): """Create a resource lock.""" return IMPL.resource_lock_create(context, values) def resource_lock_update(context, lock_id, values): """Update a resource lock.""" return IMPL.resource_lock_update(context, lock_id, values) def resource_lock_delete(context, lock_id): """Delete a resource lock.""" return IMPL.resource_lock_delete(context, lock_id) def resource_lock_get(context, lock_id): """Retrieve a resource lock.""" return IMPL.resource_lock_get(context, lock_id) def resource_lock_get_all(context, **kwargs): """Retrieve all resource locks.""" return IMPL.resource_lock_get_all(context, **kwargs) ################## def encryption_keys_get_count(context, filters=None): """Get count of encryption keys.""" return IMPL.encryption_keys_get_count(context, filters=filters) def encryption_keys_get_all(context, filters=None): """Get all encryption keys.""" return IMPL.encryption_keys_get_all(context, filters=filters) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/db/base.py0000664000175000017500000000252600000000000016175 0ustar00zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Base class for classes that need modular database access.""" from oslo_config import cfg from oslo_utils import importutils db_driver_opt = cfg.StrOpt('db_driver', default='manila.db', help='Driver to use for database access.') CONF = cfg.CONF CONF.register_opt(db_driver_opt) class Base(object): """DB driver is injected in the init method.""" def __init__(self, db_driver=None): super(Base, self).__init__() if not db_driver: db_driver = CONF.db_driver self.db = importutils.import_module(db_driver) # pylint: disable=C0103 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/db/migration.py0000664000175000017500000000272500000000000017255 0ustar00zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Database setup and migration commands.""" from manila import utils IMPL = utils.LazyPluggable( 'db_backend', sqlalchemy='manila.db.migrations.alembic.migration') def upgrade(version): """Upgrade database to 'version' or the most recent version.""" return IMPL.upgrade(version) def downgrade(version): """Downgrade database to 'version' or to initial state.""" return IMPL.downgrade(version) def version(): """Display the current database version.""" return IMPL.version() def stamp(version): """Stamp database with 'version' or the most recent version.""" return IMPL.stamp(version) def revision(message, autogenerate): """Generate new migration script.""" return IMPL.revision(message, autogenerate) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315601.8536725 manila-21.0.0/manila/db/migrations/0000775000175000017500000000000000000000000017060 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/db/migrations/__init__.py0000664000175000017500000000000000000000000021157 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315601.8536725 manila-21.0.0/manila/db/migrations/alembic/0000775000175000017500000000000000000000000020454 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/db/migrations/alembic/__init__.py0000664000175000017500000000000000000000000022553 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/db/migrations/alembic/env.py0000664000175000017500000000240600000000000021620 0ustar00zuulzuul00000000000000# Copyright 2014 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from alembic import context from manila.db.sqlalchemy import api as db_api from manila.db.sqlalchemy import models as db_models def run_migrations_online(): """Run migrations in 'online' mode. In this scenario we need to create an Engine and associate a connection with the context. """ engine = db_api.get_engine() connection = engine.connect() target_metadata = db_models.ManilaBase.metadata # pylint: disable=no-member context.configure(connection=connection, target_metadata=target_metadata) try: with context.begin_transaction(): context.run_migrations() finally: connection.close() run_migrations_online() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/db/migrations/alembic/migration.py0000664000175000017500000000451600000000000023025 0ustar00zuulzuul00000000000000# Copyright 2014 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import alembic from alembic import config as alembic_config import alembic.migration as alembic_migration # pylint: disable=import-error from oslo_config import cfg from manila.db.sqlalchemy import api as db_api CONF = cfg.CONF def _alembic_config(): path = os.path.join(os.path.dirname(__file__), os.pardir, 'alembic.ini') config = alembic_config.Config(path) return config def version(): """Current database version. :returns: Database version :rtype: string """ engine = db_api.get_engine() with engine.connect() as conn: context = alembic_migration.MigrationContext.configure(conn) return context.get_current_revision() def upgrade(revision): """Upgrade database. :param version: Desired database version :type version: string """ return alembic.command.upgrade(_alembic_config(), revision or 'head') def downgrade(revision): """Downgrade database. :param version: Desired database version :type version: string """ return alembic.command.downgrade(_alembic_config(), revision or 'base') def stamp(revision): """Stamp database with provided revision. Don't run any migrations. :param revision: Should match one from repository or head - to stamp database with most recent revision :type revision: string """ return alembic.command.stamp(_alembic_config(), revision or 'head') def revision(message=None, autogenerate=False): """Create template for migration. :param message: Text that will be used for migration title :type message: string :param autogenerate: If True - generates diff based on current database state :type autogenerate: bool """ return alembic.command.revision(_alembic_config(), message, autogenerate) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/db/migrations/alembic/script.py.mako0000664000175000017500000000167100000000000023265 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """${message} Revision ID: ${up_revision} Revises: ${down_revision} Create Date: ${create_date} """ # revision identifiers, used by Alembic. revision = ${repr(up_revision)} down_revision = ${repr(down_revision)} from alembic import op import sqlalchemy as sa ${imports if imports else ""} def upgrade(): ${upgrades if upgrades else "pass"} def downgrade(): ${downgrades if downgrades else "pass"} ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315601.8696723 manila-21.0.0/manila/db/migrations/alembic/versions/0000775000175000017500000000000000000000000022324 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/db/migrations/alembic/versions/0274d20c560f_add_ou_to_security_service.py0000664000175000017500000000204000000000000032024 0ustar00zuulzuul00000000000000# Copyright 2018 SAP SE # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Add ou to security service Revision ID: 0274d20c560f Revises: 4a482571410f Create Date: 2017-05-19 17:27:30.274440 """ # revision identifiers, used by Alembic. revision = '0274d20c560f' down_revision = '4a482571410f' from alembic import op import sqlalchemy as sa def upgrade(): op.add_column( 'security_services', sa.Column('ou', sa.String(255), nullable=True)) def downgrade(): op.drop_column('security_services', 'ou') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/db/migrations/alembic/versions/03da71c0e321_convert_cgs_to_share_groups.py0000664000175000017500000002227100000000000032276 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Convert consistency groups to share groups Revision ID: 03da71c0e321 Revises: e9f79621d83f Create Date: 2016-05-19 10:25:17.899008 """ # revision identifiers, used by Alembic. revision = "03da71c0e321" down_revision = "e9f79621d83f" from alembic import op from oslo_log import log import sqlalchemy as sa from sqlalchemy import Column, String from manila.db.migrations import utils LOG = log.getLogger(__name__) def upgrade(): LOG.info("Renaming consistency group tables") # Rename tables op.rename_table("consistency_groups", "share_groups") op.rename_table("cgsnapshots", "share_group_snapshots") op.rename_table("cgsnapshot_members", "share_group_snapshot_members") op.rename_table( "consistency_group_share_type_mappings", "share_group_share_type_mappings") # Update columns and foreign keys op.drop_constraint( "fk_shares_consistency_group_id", "shares", type_="foreignkey") op.alter_column( "shares", "consistency_group_id", existing_type=String(36), existing_nullable=True, new_column_name="share_group_id") op.alter_column( "shares", "source_cgsnapshot_member_id", existing_type=String(36), existing_nullable=True, new_column_name="source_share_group_snapshot_member_id") op.create_foreign_key( "fk_shares_share_group_id", "shares", "share_groups", ["share_group_id"], ["id"]) op.drop_constraint( "fk_cg_share_network_id", "share_groups", type_="foreignkey") op.drop_constraint( "fk_cg_share_server_id", "share_groups", type_="foreignkey") op.alter_column( "share_groups", "source_cgsnapshot_id", existing_type=String(36), new_column_name="source_share_group_snapshot_id") op.create_foreign_key( "fk_share_group_share_network_id", "share_groups", "share_networks", ["share_network_id"], ["id"]) op.create_foreign_key( "fk_share_group_share_server_id", "share_groups", "share_servers", ["share_server_id"], ["id"]) op.drop_constraint( "fk_cgsnapshots_consistency_group_id", "share_group_snapshots", type_="foreignkey") op.alter_column( "share_group_snapshots", "consistency_group_id", existing_type=String(36), new_column_name="share_group_id") op.create_foreign_key( "fk_share_group_snapshots_share_group_id", "share_group_snapshots", "share_groups", ["share_group_id"], ["id"]) op.drop_constraint( "fk_cgstm_cg_id", "share_group_share_type_mappings", type_="foreignkey") op.drop_constraint( "fk_cgstm_share_type_id", "share_group_share_type_mappings", type_="foreignkey") op.alter_column( "share_group_share_type_mappings", "consistency_group_id", existing_type=String(36), new_column_name="share_group_id") op.create_foreign_key( "fk_sgstm_share_group_id", "share_group_share_type_mappings", "share_groups", ["share_group_id"], ["id"]) op.create_foreign_key( "fk_sgstm_share_type_id", "share_group_share_type_mappings", "share_types", ["share_type_id"], ["id"]) op.drop_constraint( "fk_cgsnapshot_members_cgsnapshot_id", "share_group_snapshot_members", type_="foreignkey") op.drop_constraint( "fk_cgsnapshot_members_share_instance_id", "share_group_snapshot_members", type_="foreignkey") op.drop_constraint( "fk_cgsnapshot_members_share_id", "share_group_snapshot_members", type_="foreignkey") op.drop_constraint( "fk_cgsnapshot_members_share_type_id", "share_group_snapshot_members", type_="foreignkey") op.alter_column( "share_group_snapshot_members", "cgsnapshot_id", existing_type=String(36), new_column_name="share_group_snapshot_id") op.create_foreign_key( "fk_gsm_group_snapshot_id", "share_group_snapshot_members", "share_group_snapshots", ["share_group_snapshot_id"], ["id"]) op.create_foreign_key( "fk_gsm_share_instance_id", "share_group_snapshot_members", "share_instances", ["share_instance_id"], ["id"]) op.create_foreign_key( "fk_gsm_share_id", "share_group_snapshot_members", "shares", ["share_id"], ["id"]) op.drop_column("share_group_snapshot_members", "share_type_id") def downgrade(): meta = sa.MetaData() meta.bind = op.get_bind() # Rename tables op.rename_table("share_groups", "consistency_groups") op.rename_table("share_group_snapshots", "cgsnapshots") op.rename_table("share_group_snapshot_members", "cgsnapshot_members") op.rename_table( "share_group_share_type_mappings", "consistency_group_share_type_mappings") # Update columns and foreign keys op.drop_constraint( "fk_shares_share_group_id", "shares", type_="foreignkey") op.alter_column( "shares", "share_group_id", existing_type=String(36), new_column_name="consistency_group_id") op.alter_column( "shares", "source_share_group_snapshot_member_id", existing_type=String(36), existing_nullable=True, new_column_name="source_cgsnapshot_member_id") op.create_foreign_key( "fk_shares_consistency_group_id", "shares", "consistency_groups", ["consistency_group_id"], ["id"]) op.drop_constraint( "fk_share_group_share_network_id", "consistency_groups", type_="foreignkey") op.drop_constraint( "fk_share_group_share_server_id", "consistency_groups", type_="foreignkey") op.alter_column( "consistency_groups", "source_share_group_snapshot_id", existing_type=String(36), new_column_name="source_cgsnapshot_id") op.create_foreign_key( "fk_cg_share_network_id", "consistency_groups", "share_networks", ["share_network_id"], ["id"]) op.create_foreign_key( "fk_cg_share_server_id", "consistency_groups", "share_servers", ["share_server_id"], ["id"]) op.drop_constraint( "fk_share_group_snapshots_share_group_id", "cgsnapshots", type_="foreignkey") op.alter_column( "cgsnapshots", "share_group_id", existing_type=String(36), new_column_name="consistency_group_id") op.create_foreign_key( "fk_cgsnapshots_consistency_group_id", "cgsnapshots", "consistency_groups", ["consistency_group_id"], ["id"]) op.drop_constraint( "fk_sgstm_share_group_id", "consistency_group_share_type_mappings", type_="foreignkey") op.drop_constraint( "fk_sgstm_share_type_id", "consistency_group_share_type_mappings", type_="foreignkey") op.alter_column( "consistency_group_share_type_mappings", "share_group_id", existing_type=String(36), new_column_name="consistency_group_id") op.create_foreign_key( "fk_cgstm_cg_id", "consistency_group_share_type_mappings", "consistency_groups", ["consistency_group_id"], ["id"]) op.create_foreign_key( "fk_cgstm_share_type_id", "consistency_group_share_type_mappings", "share_types", ["share_type_id"], ["id"]) op.drop_constraint( "fk_gsm_group_snapshot_id", "cgsnapshot_members", type_="foreignkey") op.drop_constraint( "fk_gsm_share_instance_id", "cgsnapshot_members", type_="foreignkey") op.drop_constraint( "fk_gsm_share_id", "cgsnapshot_members", type_="foreignkey") op.alter_column( "cgsnapshot_members", "share_group_snapshot_id", existing_type=String(36), new_column_name="cgsnapshot_id") op.create_foreign_key( "fk_cgsnapshot_members_cgsnapshot_id", "cgsnapshot_members", "cgsnapshots", ["cgsnapshot_id"], ["id"]) op.create_foreign_key( "fk_cgsnapshot_members_share_instance_id", "cgsnapshot_members", "share_instances", ["share_instance_id"], ["id"]) op.create_foreign_key( "fk_cgsnapshot_members_share_id", "cgsnapshot_members", "shares", ["share_id"], ["id"]) op.add_column( "cgsnapshot_members", Column('share_type_id', String(36), nullable=True)) connection = op.get_bind() si_table = utils.load_table('share_instances', connection) member_table = utils.load_table('cgsnapshot_members', connection) for si_record in connection.execute(si_table.select()): # pylint: disable=no-value-for-parameter connection.execute( member_table.update().where( member_table.c.share_instance_id == si_record.id, ).values({"share_type_id": si_record.share_type_id})) op.alter_column( "cgsnapshot_members", Column('share_type_id', String(36), nullable=False)) op.create_foreign_key( "fk_cgsnapshot_members_share_type_id", "cgsnapshot_members", "share_types", ["share_type_id"], ["id"]) ././@PaxHeader0000000000000000000000000000020600000000000011453 xustar0000000000000000112 path=manila-21.0.0/manila/db/migrations/alembic/versions/097fad24d2fc_add_share_instances_share_id_index.py 22 mtime=1759315554.0 manila-21.0.0/manila/db/migrations/alembic/versions/097fad24d2fc_add_share_instances_share_id_index.0000664000175000017500000000231700000000000033315 0ustar00zuulzuul00000000000000# Copyright 2018 SAP SE # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """add_share_instances_share_id_index Revision ID: 097fad24d2fc Revises: 0274d20c560f Create Date: 2018-06-12 10:06:50.642418 """ # revision identifiers, used by Alembic. revision = '097fad24d2fc' down_revision = '0274d20c560f' from alembic import op INDEX_NAME = 'share_instances_share_id_idx' TABLE_NAME = 'share_instances' def upgrade(): op.create_index(INDEX_NAME, TABLE_NAME, ['share_id']) def downgrade(): op.drop_constraint('si_share_fk', TABLE_NAME, type_='foreignkey') op.drop_index(INDEX_NAME, TABLE_NAME) op.create_foreign_key( 'si_share_fk', TABLE_NAME, 'shares', ['share_id'], ['id']) ././@PaxHeader0000000000000000000000000000020700000000000011454 xustar0000000000000000113 path=manila-21.0.0/manila/db/migrations/alembic/versions/0c23aec99b74_add_per_share_gigabytes_quota_class.py 22 mtime=1759315554.0 manila-21.0.0/manila/db/migrations/alembic/versions/0c23aec99b74_add_per_share_gigabytes_quota_class0000664000175000017500000000332200000000000033362 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """add_per_share_gigabytes_quota_class Revision ID: 0c23aec99b74 Revises: 5aa813ae673d Create Date: 2021-01-03 10:01:57.276225 """ # revision identifiers, used by Alembic. revision = '0c23aec99b74' down_revision = '5aa813ae673d' from alembic import op from manila.db.migrations import utils from oslo_log import log from oslo_utils import timeutils LOG = log.getLogger(__name__) def upgrade(): connection = op.get_bind() quota_classes_table = utils.load_table('quota_classes', connection) try: op.bulk_insert (quota_classes_table, [{'created_at': timeutils.utcnow(), 'class_name': 'default', 'resource': 'per_share_gigabytes', 'hard_limit': -1, 'deleted': False, }]) except Exception: LOG.error("Default per_share_gigabytes row not inserted " "into the quota_classes.") raise def downgrade(): """Don't delete the 'default' entries at downgrade time. We don't know if the user had default entries when we started. If they did, we wouldn't want to remove them. So, the safest thing to do is just leave the 'default' entries at downgrade time. """ pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/db/migrations/alembic/versions/0d8c8f6d54a4_modify_share_servers_table.py0000664000175000017500000000316600000000000032201 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """modify_share_servers_table Revision ID: 0d8c8f6d54a4 Revises: cdefa6287df8 Create Date: 2024-11-15 09:25:25.957286 """ # revision identifiers, used by Alembic. revision = '0d8c8f6d54a4' down_revision = 'cdefa6287df8' from alembic import op from oslo_log import log import sqlalchemy as sa SHARE_SERVERS_TABLE = 'share_servers' LOG = log.getLogger(__name__) def upgrade(): # add a new column to share_servers. try: op.add_column( SHARE_SERVERS_TABLE, sa.Column('share_replicas_migration_support', sa.Boolean, nullable=False, server_default=sa.sql.false())) except Exception: LOG.error("Table %s could not add column " "'share_replicas_migration_support'.", SHARE_SERVERS_TABLE) raise def downgrade(): try: op.drop_column(SHARE_SERVERS_TABLE, 'share_replicas_migration_support') except Exception: LOG.error("Table %s failed to drop the column " "'share_replicas_migration_support'.", SHARE_SERVERS_TABLE) raise ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/db/migrations/alembic/versions/11ee96se625f3_add_metadata_for_access.py0000664000175000017500000000404600000000000031471 0ustar00zuulzuul00000000000000# Copyright 2018 Huawei Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """add metadata for access rule Revision ID: 11ee96se625f3 Revises: 097fad24d2fc Create Date: 2018-06-16 03:07:15.548947 """ # revision identifiers, used by Alembic. revision = '11ee96se625f3' down_revision = '097fad24d2fc' from alembic import op from oslo_log import log import sqlalchemy as sql LOG = log.getLogger(__name__) access_metadata_table_name = 'share_access_rules_metadata' def upgrade(): try: op.create_table( access_metadata_table_name, sql.Column('created_at', sql.DateTime(timezone=False)), sql.Column('updated_at', sql.DateTime(timezone=False)), sql.Column('deleted_at', sql.DateTime(timezone=False)), sql.Column('deleted', sql.String(36), default='False'), sql.Column('access_id', sql.String(36), sql.ForeignKey('share_access_map.id'), nullable=False), sql.Column('key', sql.String(255), nullable=False), sql.Column('value', sql.String(1023), nullable=False), sql.Column('id', sql.Integer, primary_key=True, nullable=False), mysql_engine='InnoDB', mysql_charset='utf8' ) except Exception: LOG.error("Table |%s| not created!", access_metadata_table_name) raise def downgrade(): try: op.drop_table(access_metadata_table_name) except Exception: LOG.error("%s table not dropped", access_metadata_table_name) raise ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/db/migrations/alembic/versions/162a3e673105_manila_init.py0000664000175000017500000003600400000000000026634 0ustar00zuulzuul00000000000000# Copyright 2012 OpenStack LLC. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """manila_init Revision ID: 162a3e673105 Revises: None Create Date: 2014-07-23 17:51:57.077203 """ # revision identifiers, used by Alembic. revision = '162a3e673105' down_revision = None from alembic import op from oslo_log import log from sqlalchemy import Boolean, Column, DateTime, ForeignKeyConstraint from sqlalchemy import Integer, MetaData, String, Table, UniqueConstraint LOG = log.getLogger(__name__) def upgrade(): migrate_engine = op.get_bind().engine meta = MetaData() services = Table( 'services', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('deleted', Integer, default=0), Column('id', Integer, primary_key=True, nullable=False), Column('host', String(length=255)), Column('binary', String(length=255)), Column('topic', String(length=255)), Column('report_count', Integer, nullable=False), Column('disabled', Boolean), Column('availability_zone', String(length=255)), mysql_engine='InnoDB', mysql_charset='utf8' ) quotas = Table( 'quotas', meta, Column('id', Integer, primary_key=True, nullable=False), Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('deleted', Integer, default=0), Column('project_id', String(length=255)), Column('resource', String(length=255), nullable=False), Column('hard_limit', Integer), mysql_engine='InnoDB', mysql_charset='utf8' ) quota_classes = Table( 'quota_classes', meta, Column('created_at', DateTime(timezone=False)), Column('updated_at', DateTime(timezone=False)), Column('deleted_at', DateTime(timezone=False)), Column('deleted', Integer, default=0), Column('id', Integer(), primary_key=True), Column('class_name', String(length=255), index=True), Column('resource', String(length=255)), Column('hard_limit', Integer(), nullable=True), mysql_engine='InnoDB', mysql_charset='utf8', ) quota_usages = Table( 'quota_usages', meta, Column('created_at', DateTime(timezone=False)), Column('updated_at', DateTime(timezone=False)), Column('deleted_at', DateTime(timezone=False)), Column('deleted', Integer, default=0), Column('id', Integer(), primary_key=True), Column('user_id', String(length=255)), Column('project_id', String(length=255), index=True), Column('resource', String(length=255)), Column('in_use', Integer(), nullable=False), Column('reserved', Integer(), nullable=False), Column('until_refresh', Integer(), nullable=True), mysql_engine='InnoDB', mysql_charset='utf8', ) reservations = Table( 'reservations', meta, Column('created_at', DateTime(timezone=False)), Column('updated_at', DateTime(timezone=False)), Column('deleted_at', DateTime(timezone=False)), Column('deleted', Integer, default=0), Column('id', Integer(), primary_key=True), Column('user_id', String(length=255)), Column('uuid', String(length=36), nullable=False), Column('usage_id', Integer(), nullable=False), Column('project_id', String(length=255), index=True), Column('resource', String(length=255)), Column('delta', Integer(), nullable=False), Column('expire', DateTime(timezone=False)), ForeignKeyConstraint(['usage_id'], ['quota_usages.id']), mysql_engine='InnoDB', mysql_charset='utf8', ) project_user_quotas = Table( 'project_user_quotas', meta, Column('id', Integer, primary_key=True, nullable=False), Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('deleted', Integer, default=0), Column('user_id', String(length=255), nullable=False), Column('project_id', String(length=255), nullable=False), Column('resource', String(length=25), nullable=False), Column('hard_limit', Integer, nullable=True), mysql_engine='InnoDB', mysql_charset='utf8', ) shares = Table( 'shares', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('deleted', String(length=36), default='False'), Column('id', String(length=36), primary_key=True, nullable=False), Column('user_id', String(length=255)), Column('project_id', String(length=255)), Column('host', String(length=255)), Column('size', Integer), Column('availability_zone', String(length=255)), Column('status', String(length=255)), Column('scheduled_at', DateTime), Column('launched_at', DateTime), Column('terminated_at', DateTime), Column('display_name', String(length=255)), Column('display_description', String(length=255)), Column('snapshot_id', String(length=36)), Column('share_network_id', String(length=36), nullable=True), Column('share_server_id', String(length=36), nullable=True), Column('share_proto', String(255)), Column('export_location', String(255)), Column('volume_type_id', String(length=36)), ForeignKeyConstraint(['share_network_id'], ['share_networks.id']), ForeignKeyConstraint(['share_server_id'], ['share_servers.id']), mysql_engine='InnoDB', mysql_charset='utf8' ) access_map = Table( 'share_access_map', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('deleted', String(length=36), default='False'), Column('id', String(length=36), primary_key=True, nullable=False), Column('share_id', String(36), nullable=False), Column('access_type', String(255)), Column('access_to', String(255)), Column('state', String(255)), ForeignKeyConstraint(['share_id'], ['shares.id']), mysql_engine='InnoDB', mysql_charset='utf8' ) share_snapshots = Table( 'share_snapshots', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('deleted', String(length=36), default='False'), Column('id', String(length=36), primary_key=True, nullable=False), Column('user_id', String(length=255)), Column('project_id', String(length=255)), Column('share_id', String(36), nullable=False), Column('size', Integer), Column('status', String(length=255)), Column('progress', String(length=255)), Column('display_name', String(length=255)), Column('display_description', String(length=255)), Column('share_size', Integer), Column('share_proto', String(length=255)), Column('export_location', String(255)), ForeignKeyConstraint(['share_id'], ['shares.id']), mysql_engine='InnoDB', mysql_charset='utf8' ) share_metadata = Table( 'share_metadata', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('deleted', Integer, default=0), Column('id', Integer, primary_key=True, nullable=False), Column('share_id', String(length=36), nullable=False), Column('key', String(length=255), nullable=False), Column('value', String(length=1023), nullable=False), ForeignKeyConstraint(['share_id'], ['shares.id']), mysql_engine='InnoDB', mysql_charset='utf8' ) security_services = Table( 'security_services', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('deleted', String(length=36), default='False'), Column('id', String(length=36), primary_key=True, nullable=False), Column('project_id', String(length=36), nullable=False), Column('type', String(length=32), nullable=False), Column('dns_ip', String(length=64), nullable=True), Column('server', String(length=255), nullable=True), Column('domain', String(length=255), nullable=True), Column('user', String(length=255), nullable=True), Column('password', String(length=255), nullable=True), Column('name', String(length=255), nullable=True), Column('description', String(length=255), nullable=True), Column('status', String(length=16)), mysql_engine='InnoDB', mysql_charset='utf8', ) share_networks = Table( 'share_networks', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('deleted', String(length=36), default='False'), Column('id', String(length=36), primary_key=True, nullable=False), Column('project_id', String(length=36), nullable=False), Column('user_id', String(length=36)), Column('neutron_net_id', String(length=36), nullable=True), Column('neutron_subnet_id', String(length=36), nullable=True), Column('network_type', String(length=32), nullable=True), Column('segmentation_id', Integer, nullable=True), Column('cidr', String(length=64), nullable=True), Column('ip_version', Integer, nullable=True), Column('name', String(length=255), nullable=True), Column('description', String(length=255), nullable=True), mysql_engine='InnoDB', mysql_charset='utf8', ) share_servers = Table( 'share_servers', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('deleted', String(length=36), default='False'), Column('id', String(length=36), primary_key=True, nullable=False), Column('share_network_id', String(length=36), nullable=True), Column('host', String(length=255), nullable=True), Column('status', String(length=32)), ForeignKeyConstraint(['share_network_id'], ['share_networks.id']), mysql_engine='InnoDB', mysql_charset='utf8', ) share_server_backend_details = Table( 'share_server_backend_details', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('deleted', String(length=36), default=0), Column('id', Integer, primary_key=True, nullable=False), Column('share_server_id', String(length=36), nullable=False), Column('key', String(length=255), nullable=False), Column('value', String(length=1023), nullable=False), ForeignKeyConstraint(['share_server_id'], ['share_servers.id']), mysql_engine='InnoDB', mysql_charset='utf8' ) network_allocations = Table( 'network_allocations', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('deleted', String(length=36), default='False'), Column('id', String(length=36), primary_key=True, nullable=False), Column('ip_address', String(length=64), nullable=True), Column('mac_address', String(length=32), nullable=True), Column('share_server_id', String(length=36), nullable=False), Column('status', String(length=32)), ForeignKeyConstraint(['share_server_id'], ['share_servers.id']), mysql_engine='InnoDB', mysql_charset='utf8', ) ss_nw_association = Table( 'share_network_security_service_association', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('deleted', Integer, default=0), Column('id', Integer, primary_key=True, nullable=False), Column('share_network_id', String(length=36), nullable=False), Column('security_service_id', String(length=36), nullable=False), ForeignKeyConstraint(['share_network_id'], ['share_networks.id']), ForeignKeyConstraint(['security_service_id'], ['security_services.id']), mysql_engine='InnoDB', mysql_charset='utf8', ) volume_types = Table( 'volume_types', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('deleted', String(length=36), default='False'), Column('id', String(length=36), primary_key=True, nullable=False), Column('name', String(length=255)), UniqueConstraint('name', 'deleted', name='vt_name_uc'), mysql_engine='InnoDB', mysql_charset='utf8' ) volume_type_extra_specs = Table( 'volume_type_extra_specs', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('deleted', Boolean), Column('id', Integer, primary_key=True, nullable=False), Column('volume_type_id', String(length=36), nullable=False), Column('key', String(length=255)), Column('value', String(length=255)), ForeignKeyConstraint(['volume_type_id'], ['volume_types.id']), mysql_engine='InnoDB', mysql_charset='utf8' ) # create all tables # Take care on create order for those with FK dependencies tables = [quotas, services, quota_classes, quota_usages, reservations, project_user_quotas, security_services, share_networks, ss_nw_association, share_servers, network_allocations, shares, access_map, share_snapshots, share_server_backend_details, share_metadata, volume_types, volume_type_extra_specs] with migrate_engine.begin() as conn: for table in tables: try: table.create(conn, checkfirst=True) except Exception: LOG.info(repr(table)) LOG.exception('Exception while creating table.') raise def downgrade(): raise NotImplementedError('Downgrade from initial Manila install is not' ' supported.') ././@PaxHeader0000000000000000000000000000021400000000000011452 xustar0000000000000000118 path=manila-21.0.0/manila/db/migrations/alembic/versions/17115072e1c3_add_nova_net_id_column_to_share_networks.py 22 mtime=1759315554.0 manila-21.0.0/manila/db/migrations/alembic/versions/17115072e1c3_add_nova_net_id_column_to_share_net0000664000175000017500000000206700000000000033123 0ustar00zuulzuul00000000000000# Copyright 2015 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """add_nova_net_id_column_to_share_networks Revision ID: 17115072e1c3 Revises: 38e632621e5a Create Date: 2015-02-05 18:07:19.062995 """ # revision identifiers, used by Alembic. revision = '17115072e1c3' down_revision = '38e632621e5a' from alembic import op import sqlalchemy as sa def upgrade(): op.add_column( 'share_networks', sa.Column('nova_net_id', sa.String(36), nullable=True)) def downgrade(): op.drop_column('share_networks', 'nova_net_id') ././@PaxHeader0000000000000000000000000000024600000000000011457 xustar0000000000000000144 path=manila-21.0.0/manila/db/migrations/alembic/versions/1946cb97bb8d_add_is_soft_deleted_and_scheduled_to_be_deleted_at_to_shares_table.py 22 mtime=1759315554.0 manila-21.0.0/manila/db/migrations/alembic/versions/1946cb97bb8d_add_is_soft_deleted_and_scheduled_t0000664000175000017500000000344100000000000033312 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """add is_soft_deleted and scheduled_to_be_deleted_at to shares table Revision ID: 1946cb97bb8d Revises: fbdfabcba377 Create Date: 2021-07-14 14:41:58.615439 """ # revision identifiers, used by Alembic. revision = '1946cb97bb8d' down_revision = 'fbdfabcba377' from alembic import op from oslo_log import log import sqlalchemy as sa LOG = log.getLogger(__name__) def upgrade(): try: op.add_column('shares', sa.Column( 'is_soft_deleted', sa.Boolean, nullable=False, server_default=sa.sql.false())) op.add_column('shares', sa.Column( 'scheduled_to_be_deleted_at', sa.DateTime)) except Exception: LOG.error("Columns shares.is_soft_deleted " "and/or shares.scheduled_to_be_deleted_at not created!") raise def downgrade(): try: op.drop_column('shares', 'is_soft_deleted') op.drop_column('shares', 'scheduled_to_be_deleted_at') LOG.warning("All shares in recycle bin will automatically be " "restored, need to be manually identified and deleted " "again.") except Exception: LOG.error("Column shares.is_soft_deleted and/or " "shares.scheduled_to_be_deleted_at not dropped!") raise ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/db/migrations/alembic/versions/1e2d600bf972_add_transfers.py0000664000175000017500000000440300000000000027331 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """add_transfers Revision ID: 1e2d600bf972 Revises: c476aeb186ec Create Date: 2022-05-30 16:37:18.325464 """ # revision identifiers, used by Alembic. revision = '1e2d600bf972' down_revision = 'c476aeb186ec' from alembic import op from oslo_log import log import sqlalchemy as sa LOG = log.getLogger(__name__) def upgrade(): context = op.get_context() mysql_dl = context.bind.dialect.name == 'mysql' datetime_type = (sa.dialects.mysql.DATETIME(fsp=6) if mysql_dl else sa.DateTime) try: op.create_table( 'transfers', sa.Column('id', sa.String(36), primary_key=True, nullable=False), sa.Column('created_at', datetime_type), sa.Column('updated_at', datetime_type), sa.Column('deleted_at', datetime_type), sa.Column('deleted', sa.String(36), default='False'), sa.Column('resource_id', sa.String(36), nullable=False), sa.Column('resource_type', sa.String(255), nullable=False), sa.Column('display_name', sa.String(255)), sa.Column('salt', sa.String(255)), sa.Column('crypt_hash', sa.String(255)), sa.Column('expires_at', datetime_type), sa.Column('source_project_id', sa.String(255), nullable=True), sa.Column('destination_project_id', sa.String(255), nullable=True), sa.Column('accepted', sa.Boolean, default=False), mysql_engine='InnoDB', mysql_charset='utf8', ) except Exception: LOG.error("Table |%s| not created!", 'transfers') raise def downgrade(): try: op.drop_table('transfers') except Exception: LOG.error("transfers table not dropped") raise ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/db/migrations/alembic/versions/1f0bd302c1a6_add_availability_zones_table.py0000664000175000017500000001136200000000000032424 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """add_availability_zones_table Revision ID: 1f0bd302c1a6 Revises: 579c267fbb4d Create Date: 2015-07-24 12:09:36.008570 """ # revision identifiers, used by Alembic. revision = '1f0bd302c1a6' down_revision = '579c267fbb4d' from alembic import op from oslo_utils import timeutils from oslo_utils import uuidutils from sqlalchemy import Column, DateTime, ForeignKey, String, UniqueConstraint from manila.db.migrations import utils def collect_existing_az_from_services_table(connection, services_table, az_table): az_name_to_id_mapping = dict() existing_az = [] for service in connection.execute(services_table.select()): if service.availability_zone in az_name_to_id_mapping: continue az_id = uuidutils.generate_uuid() az_name_to_id_mapping[service.availability_zone] = az_id existing_az.append({ 'created_at': timeutils.utcnow(), 'id': az_id, 'name': service.availability_zone }) op.bulk_insert(az_table, existing_az) return az_name_to_id_mapping def upgrade(): connection = op.get_bind() # Create new AZ table and columns availability_zones_table = op.create_table( 'availability_zones', Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('deleted', String(length=36), default='False'), Column('id', String(length=36), primary_key=True, nullable=False), Column('name', String(length=255)), UniqueConstraint('name', 'deleted', name='az_name_uc'), mysql_engine='InnoDB', mysql_charset='utf8') for table_name, fk_name in (('services', 'service_az_id_fk'), ('share_instances', 'si_az_id_fk')): op.add_column( table_name, Column('availability_zone_id', String(36), ForeignKey('availability_zones.id', name=fk_name)) ) # Collect existing AZs from services table services_table = utils.load_table('services', connection) az_name_to_id_mapping = collect_existing_az_from_services_table( connection, services_table, availability_zones_table) # Map string AZ names to ID's in target tables # pylint: disable=no-value-for-parameter set_az_id_in_table = lambda table, id, name: ( # noqa: E731 op.execute( table.update().where(table.c.availability_zone == name).values( {'availability_zone_id': id}) ) ) share_instances_table = utils.load_table('share_instances', connection) for name, id in az_name_to_id_mapping.items(): for table_name in [services_table, share_instances_table]: set_az_id_in_table(table_name, id, name) # Remove old AZ columns from tables op.drop_column('services', 'availability_zone') op.drop_column('share_instances', 'availability_zone') def downgrade(): connection = op.get_bind() # Create old AZ fields op.add_column('services', Column('availability_zone', String(length=255))) op.add_column('share_instances', Column('availability_zone', String(length=255))) # Migrate data az_table = utils.load_table('availability_zones', connection) share_instances_table = utils.load_table('share_instances', connection) services_table = utils.load_table('services', connection) for az in connection.execute(az_table.select()): # pylint: disable=no-value-for-parameter op.execute( share_instances_table.update().where( share_instances_table.c.availability_zone_id == az.id ).values({'availability_zone': az.name}) ) op.execute( services_table.update().where( services_table.c.availability_zone_id == az.id ).values({'availability_zone': az.name}) ) # Remove AZ_id columns and AZ table op.drop_constraint('service_az_id_fk', 'services', type_='foreignkey') op.drop_column('services', 'availability_zone_id') op.drop_constraint('si_az_id_fk', 'share_instances', type_='foreignkey') op.drop_column('share_instances', 'availability_zone_id') op.drop_table('availability_zones') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/db/migrations/alembic/versions/211836bf835c_add_access_level.py0000664000175000017500000000217200000000000027677 0ustar00zuulzuul00000000000000# Copyright 2015 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """add access level Revision ID: 211836bf835c Revises: 162a3e673105 Create Date: 2014-12-19 05:34:06.790159 """ # revision identifiers, used by Alembic. revision = '211836bf835c' down_revision = '162a3e673105' from alembic import op import sqlalchemy as sa from manila.common import constants def upgrade(): op.add_column('share_access_map', sa.Column('access_level', sa.String(2), default=constants.ACCESS_LEVEL_RW)) def downgrade(): op.drop_column('share_access_map', 'access_level') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/db/migrations/alembic/versions/221a83cfd85b_change_user_project_id_length.py0000664000175000017500000000357700000000000032634 0ustar00zuulzuul00000000000000# Copyright 2016 SAP SE # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """change_user_id_length Revision ID: 221a83cfd85b Revises: eb6d5544cbbd Create Date: 2016-06-21 14:22:48.314501 """ # revision identifiers, used by Alembic. revision = '221a83cfd85b' down_revision = 'eb6d5544cbbd' from alembic import op from oslo_log import log import sqlalchemy as sa LOG = log.getLogger(__name__) def upgrade(): LOG.info("Changing user_id length for share_networks") op.alter_column("share_networks", "user_id", type_=sa.String(length=255)) LOG.info("Changing project_id length for share_networks") op.alter_column("share_networks", "project_id", type_=sa.String(length=255)) LOG.info("Changing project_id length for security_services") op.alter_column("security_services", "project_id", type_=sa.String(length=255)) def downgrade(): LOG.info("Changing back user_id length for share_networks") op.alter_column("share_networks", "user_id", type_=sa.String(length=36)) LOG.info("Changing back project_id length for share_networks") op.alter_column("share_networks", "project_id", type_=sa.String(length=36)) LOG.info("Changing back project_id length for security_services") op.alter_column("security_services", "project_id", type_=sa.String(length=36)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/db/migrations/alembic/versions/238720805ce1_add_messages_table.py0000664000175000017500000000373200000000000030144 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Add messages table Revision ID: 238720805ce1 Revises: 31252d671ae5 Create Date: 2017-02-02 08:38:55.134095 """ # revision identifiers, used by Alembic. revision = '238720805ce1' down_revision = '31252d671ae5' from alembic import op from oslo_log import log from sqlalchemy import Column, DateTime from sqlalchemy import MetaData, String, Table LOG = log.getLogger(__name__) def upgrade(): meta = MetaData() # New table messages = Table( 'messages', meta, Column('id', String(36), primary_key=True, nullable=False), Column('project_id', String(255), nullable=False), Column('request_id', String(255), nullable=True), Column('resource_type', String(255)), Column('resource_id', String(36), nullable=True), Column('action_id', String(10), nullable=False), Column('detail_id', String(10), nullable=True), Column('message_level', String(255), nullable=False), Column('created_at', DateTime(timezone=False)), Column('updated_at', DateTime(timezone=False)), Column('deleted_at', DateTime(timezone=False)), Column('deleted', String(36)), Column('expires_at', DateTime(timezone=False)), mysql_engine='InnoDB', mysql_charset='utf8' ) messages.create(op.get_bind()) def downgrade(): try: op.drop_table('messages') except Exception: LOG.error("messages table not dropped") raise ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/db/migrations/alembic/versions/27cb96d991fa_add_description_for_share_type.py0000664000175000017500000000254000000000000033035 0ustar00zuulzuul00000000000000# Copyright (c) 2017 Huawei Technologies Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """add description for share type Revision ID: 27cb96d991fa Revises: 829a09b0ddd4 Create Date: 2017-09-16 03:07:15.548947 """ # revision identifiers, used by Alembic. revision = '27cb96d991fa' down_revision = '829a09b0ddd4' from alembic import op from oslo_log import log import sqlalchemy as sa LOG = log.getLogger(__name__) def upgrade(): try: op.add_column( 'share_types', sa.Column('description', sa.String(255), nullable=True)) except Exception: LOG.error("Column share_types.description not created!") raise def downgrade(): try: op.drop_column('share_types', 'description') except Exception: LOG.error("Column share_types.description not dropped!") raise ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/db/migrations/alembic/versions/293fac1130ca_add_replication_attrs.py0000664000175000017500000000252000000000000031113 0ustar00zuulzuul00000000000000# Copyright 2015 Goutham Pacha Ravi. # All Rights Reserved. # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Add replication attributes to Share and ShareInstance models. Revision ID: 293fac1130ca Revises: 344c1ac4747f Create Date: 2015-09-10 15:45:07.273043 """ # revision identifiers, used by Alembic. revision = '293fac1130ca' down_revision = '344c1ac4747f' from alembic import op import sqlalchemy as sa def upgrade(): """Add replication attributes to Shares and ShareInstances.""" op.add_column('shares', sa.Column('replication_type', sa.String(255))) op.add_column('share_instances', sa.Column('replica_state', sa.String(255))) def downgrade(): """Remove replication attributes from Shares and ShareInstances.""" op.drop_column('shares', 'replication_type') op.drop_column('share_instances', 'replica_state') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/db/migrations/alembic/versions/2d708a9a3ba9_backup_change_az_to_az_id.py0000664000175000017500000000600000000000000031704 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """backup_change_availability_zone_to_availability_zone_id Revision ID: 2d708a9a3ba9 Revises: cb20f743ca7b Create Date: 2023-08-24 11:01:41.134456 """ # revision identifiers, used by Alembic. revision = '2d708a9a3ba9' down_revision = 'cb20f743ca7b' from alembic import op from sqlalchemy import Column, ForeignKey, String from manila.db.migrations import utils def collect_existing_az(az_table, connection): az_name_to_id_mapping = dict() for az in connection.execute(az_table.select()): if az.name in az_name_to_id_mapping: continue az_name_to_id_mapping[az.name] = az.id return az_name_to_id_mapping def upgrade(): connection = op.get_bind() op.add_column( 'share_backups', Column('availability_zone_id', String(36), ForeignKey('availability_zones.id', name='sb_az_id_fk')) ) # Collect existing AZs from availability_zones table availability_zones_table = utils.load_table( 'availability_zones', connection) az_name_to_id_mapping = collect_existing_az( availability_zones_table, connection,) # Map string AZ names to ID's in target table # pylint: disable=no-value-for-parameter set_az_id_in_table = lambda table, id, name: ( # noqa: E731 op.execute( table.update().where(table.c.availability_zone == name).values( {'availability_zone_id': id}) ) ) share_backups_table = utils.load_table('share_backups', connection) for name, id in az_name_to_id_mapping.items(): set_az_id_in_table(share_backups_table, id, name) # Remove old AZ columns from table op.drop_column('share_backups', 'availability_zone') def downgrade(): connection = op.get_bind() # Create old AZ fields op.add_column('share_backups', Column('availability_zone', String(length=255))) # Migrate data az_table = utils.load_table('availability_zones', connection) share_backups_table = utils.load_table('share_backups', connection) for az in connection.execute(az_table.select()): # pylint: disable=no-value-for-parameter op.execute( share_backups_table.update().where( share_backups_table.c.availability_zone_id == az.id ).values({'availability_zone': az.name}) ) # Remove AZ_id columns and AZ table op.drop_constraint('sb_az_id_fk', 'share_backups', type_='foreignkey') op.drop_column('share_backups', 'availability_zone_id') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/db/migrations/alembic/versions/2f27d904214c_add_backup_type_column.py0000664000175000017500000000251400000000000031126 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """add_backup_type_column Revision ID: 2f27d904214c Revises: 6e32091979e0 Create Date: 2024-03-10 23:16:18.130654 """ # revision identifiers, used by Alembic. revision = '2f27d904214c' down_revision = '6e32091979e0' from alembic import op from oslo_log import log import sqlalchemy as sa LOG = log.getLogger(__name__) share_backups_table_name = 'share_backups' column_name = "backup_type" def upgrade(): try: op.add_column(share_backups_table_name, sa.Column(column_name, sa.String(32), nullable=True)) except Exception: LOG.error("Column 'backup_type' not created!") raise def downgrade(): try: op.drop_column(share_backups_table_name, column_name) except Exception: LOG.error("Column backup_type not dropped!") raise ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/db/migrations/alembic/versions/30cb96d995fa_add_is_public_column_for_share.py0000664000175000017500000000247000000000000032777 0ustar00zuulzuul00000000000000# Copyright 2015 mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """add public column for share Revision ID: 30cb96d995fa Revises: ef0c02b4366 Create Date: 2015-01-16 03:07:15.548947 """ # revision identifiers, used by Alembic. revision = '30cb96d995fa' down_revision = 'ef0c02b4366' from alembic import op from oslo_log import log import sqlalchemy as sa LOG = log.getLogger(__name__) def upgrade(): try: op.add_column('shares', sa.Column('is_public', sa.Boolean, default=False)) except Exception: LOG.error("Column shares.is_public not created!") raise def downgrade(): try: op.drop_column('shares', 'is_public') except Exception: LOG.error("Column shares.is_public not dropped!") raise ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/db/migrations/alembic/versions/323840a08dc4_add_shares_task_state.py0000664000175000017500000000176600000000000030756 0ustar00zuulzuul00000000000000# Copyright 2015 Hitachi Data Systems. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Add shares.task_state Revision ID: 323840a08dc4 Revises: 3651e16d7c43 Create Date: 2015-04-30 07:58:45.175790 """ # revision identifiers, used by Alembic. revision = '323840a08dc4' down_revision = '3651e16d7c43' from alembic import op import sqlalchemy as sa def upgrade(): op.add_column('shares', sa.Column('task_state', sa.String(255))) def downgrade(): op.drop_column('shares', 'task_state') ././@PaxHeader0000000000000000000000000000021200000000000011450 xustar0000000000000000116 path=manila-21.0.0/manila/db/migrations/alembic/versions/344c1ac4747f_add_share_instance_access_rules_status.py 22 mtime=1759315554.0 manila-21.0.0/manila/db/migrations/alembic/versions/344c1ac4747f_add_share_instance_access_rules_sta0000664000175000017500000001044300000000000033267 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Remove access rules status and add access_rule_status to share_instance model Revision ID: 344c1ac4747f Revises: dda6de06349 Create Date: 2015-11-18 14:58:55.806396 """ # revision identifiers, used by Alembic. revision = '344c1ac4747f' down_revision = 'dda6de06349' from alembic import op from sqlalchemy import Column, String from manila.common import constants from manila.db.migrations import utils priorities = { 'active': 0, 'new': 1, 'error': 2 } upgrade_data_mapping = { 'active': 'active', 'new': 'out_of_sync', 'error': 'error', } def upgrade(): """Transform individual access rules states to 'access_rules_status'. WARNING: This method performs lossy converting of existing data in DB. """ op.add_column( 'share_instances', Column('access_rules_status', String(length=255)) ) connection = op.get_bind() share_instances_table = utils.load_table('share_instances', connection) instance_access_table = utils.load_table('share_instance_access_map', connection) # NOTE(u_glide): Data migrations shouldn't be performed on live clouds # because it will lead to unpredictable behaviour of running operations # like migration. instances_query = ( share_instances_table.select() .where(share_instances_table.c.status == constants.STATUS_AVAILABLE) .where(share_instances_table.c.deleted == 'False') ) for instance in connection.execute(instances_query): access_mappings_query = instance_access_table.select().where( instance_access_table.c.share_instance_id == instance._mapping['id'] ).where(instance_access_table.c.deleted == 'False') status = constants.STATUS_ACTIVE for access_rule in connection.execute(access_mappings_query): if (access_rule._mapping['state'] == constants.STATUS_DELETING or access_rule._mapping['state'] not in priorities): continue if priorities[access_rule._mapping['state']] > priorities[status]: status = access_rule._mapping['state'] # pylint: disable=no-value-for-parameter op.execute( share_instances_table.update().where( share_instances_table.c.id == instance._mapping['id'] ).values({'access_rules_status': upgrade_data_mapping[status]}) ) op.drop_column('share_instance_access_map', 'state') def downgrade(): op.add_column( 'share_instance_access_map', Column('state', String(length=255)) ) connection = op.get_bind() share_instances_table = utils.load_table('share_instances', connection) instance_access_table = utils.load_table('share_instance_access_map', connection) instances_query = ( share_instances_table.select() .where(share_instances_table.c.status == constants.STATUS_AVAILABLE) .where(share_instances_table.c.deleted == 'False') ) for instance in connection.execute(instances_query): # NOTE(u_glide): We cannot determine if a rule is applied or not in # Manila, so administrator should manually handle such access rules. if instance._mapping['access_rules_status'] == 'active': state = 'active' else: state = 'error' # pylint: disable=no-value-for-parameter op.execute( instance_access_table.update().where( instance_access_table.c.share_instance_id == instance._mapping['id'] ).where(instance_access_table.c.deleted == 'False').values( {'state': state} ) ) op.drop_column('share_instances', 'access_rules_status') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/db/migrations/alembic/versions/3651e16d7c43_add_consistency_groups.py0000664000175000017500000001727000000000000031214 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Alex Meade # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Create Consistency Groups Tables and Columns Revision ID: 3651e16d7c43 Revises: 55761e5f59c5 Create Date: 2015-07-29 13:17:15.940454 """ # revision identifiers, used by Alembic. revision = '3651e16d7c43' down_revision = '55761e5f59c5' SHARE_NETWORK_FK_CONSTRAINT_NAME = "fk_cg_share_network_id" SHARE_SERVER_FK_CONSTRAINT_NAME = "fk_cg_share_server_id" SHARES_CG_FK_CONSTRAINT_NAME = "fk_shares_consistency_group_id" CG_MAP_FK_CONSTRAINT_NAME = "fk_cgstm_cg_id" SHARE_TYPE_FK_CONSTRAINT_NAME = "fk_cgstm_share_type_id" CGSNAP_CG_ID_FK_CONSTRAINT_NAME = "fk_cgsnapshots_consistency_group_id" CGSNAP_MEM_SHARETYPE_FK_CONSTRAINT_NAME = "fk_cgsnapshot_members_share_type_id" CGSNAP_MEM_SNAP_ID_FK_CONSTRAINT_NAME = "fk_cgsnapshot_members_cgsnapshot_id" CGSNAP_MEM_SHARE_FK_CONSTRAINT_NAME = "fk_cgsnapshot_members_share_id" CGSNAP_MEM_INST_FK_CONSTRAINT_NAME = "fk_cgsnapshot_members_share_instance_id" from alembic import op from oslo_log import log import sqlalchemy as sa LOG = log.getLogger(__name__) def upgrade(): # New table - consistency_groups op.create_table( 'consistency_groups', sa.Column('id', sa.String(36), primary_key=True, nullable=False), sa.Column('created_at', sa.DateTime), sa.Column('updated_at', sa.DateTime), sa.Column('deleted_at', sa.DateTime), sa.Column('deleted', sa.String(36), default='False'), sa.Column('user_id', sa.String(length=255), nullable=False), sa.Column('project_id', sa.String(length=255), nullable=False), sa.Column('host', sa.String(length=255)), sa.Column('name', sa.String(length=255)), sa.Column('description', sa.String(length=255)), sa.Column('status', sa.String(length=255)), sa.Column('source_cgsnapshot_id', sa.String(length=36)), sa.Column('share_network_id', sa.String(length=36), sa.ForeignKey('share_networks.id', name=SHARE_NETWORK_FK_CONSTRAINT_NAME), nullable=True), sa.Column('share_server_id', sa.String(length=36), sa.ForeignKey('share_servers.id', name=SHARE_SERVER_FK_CONSTRAINT_NAME), nullable=True), mysql_engine='InnoDB', mysql_charset='utf8') op.add_column( 'shares', sa.Column('consistency_group_id', sa.String(36), sa.ForeignKey('consistency_groups.id', name=SHARES_CG_FK_CONSTRAINT_NAME))) op.add_column('shares', sa.Column('source_cgsnapshot_member_id', sa.String(36))) op.create_table( 'consistency_group_share_type_mappings', sa.Column('id', sa.String(36), primary_key=True, nullable=False), sa.Column('created_at', sa.DateTime), sa.Column('updated_at', sa.DateTime), sa.Column('deleted_at', sa.DateTime), sa.Column('deleted', sa.String(36), default='False'), sa.Column('consistency_group_id', sa.String(length=36), sa.ForeignKey('consistency_groups.id', name=CG_MAP_FK_CONSTRAINT_NAME), nullable=False), sa.Column('share_type_id', sa.String(length=36), sa.ForeignKey('share_types.id', name=SHARE_TYPE_FK_CONSTRAINT_NAME), nullable=False), mysql_engine='InnoDB', mysql_charset='utf8') op.create_table( 'cgsnapshots', sa.Column('id', sa.String(36), primary_key=True, nullable=False), sa.Column('created_at', sa.DateTime), sa.Column('updated_at', sa.DateTime), sa.Column('deleted_at', sa.DateTime), sa.Column('deleted', sa.String(36), default='False'), sa.Column('user_id', sa.String(length=255), nullable=False), sa.Column('project_id', sa.String(length=255), nullable=False), sa.Column('consistency_group_id', sa.String(length=36), sa.ForeignKey('consistency_groups.id', name=CGSNAP_CG_ID_FK_CONSTRAINT_NAME), nullable=False), sa.Column('name', sa.String(length=255)), sa.Column('description', sa.String(length=255)), sa.Column('status', sa.String(length=255)), mysql_engine='InnoDB', mysql_charset='utf8') op.create_table( 'cgsnapshot_members', sa.Column('id', sa.String(36), primary_key=True, nullable=False), sa.Column('created_at', sa.DateTime), sa.Column('updated_at', sa.DateTime), sa.Column('deleted_at', sa.DateTime), sa.Column('deleted', sa.String(36), default='False'), sa.Column('user_id', sa.String(length=255), nullable=False), sa.Column('project_id', sa.String(length=255), nullable=False), sa.Column('cgsnapshot_id', sa.String(length=36), sa.ForeignKey('cgsnapshots.id', name=CGSNAP_MEM_SNAP_ID_FK_CONSTRAINT_NAME), nullable=False), sa.Column('share_instance_id', sa.String(length=36), sa.ForeignKey('share_instances.id', name=CGSNAP_MEM_INST_FK_CONSTRAINT_NAME), nullable=False), sa.Column('share_id', sa.String(length=36), sa.ForeignKey('shares.id', name=CGSNAP_MEM_SHARE_FK_CONSTRAINT_NAME), nullable=False), sa.Column('share_type_id', sa.String(length=36), sa.ForeignKey('share_types.id', name=CGSNAP_MEM_SHARETYPE_FK_CONSTRAINT_NAME), nullable=False), sa.Column('size', sa.Integer), sa.Column('status', sa.String(length=255)), sa.Column('share_proto', sa.String(length=255)), mysql_engine='InnoDB', mysql_charset='utf8') def downgrade(): try: op.drop_table('cgsnapshot_members') except Exception: LOG.exception("Error Dropping 'cgsnapshot_members' table.") try: op.drop_table('cgsnapshots') except Exception: LOG.exception("Error Dropping 'cgsnapshots' table.") try: op.drop_table('consistency_group_share_type_mappings') except Exception: LOG.exception("Error Dropping " "'consistency_group_share_type_mappings' table.") try: op.drop_column('shares', 'source_cgsnapshot_member_id') except Exception: LOG.exception("Error Dropping 'source_cgsnapshot_member_id' " "column from 'shares' table.") try: op.drop_constraint(SHARES_CG_FK_CONSTRAINT_NAME, 'shares', type_='foreignkey') except Exception: LOG.exception("Error Dropping '%s' constraint.", SHARES_CG_FK_CONSTRAINT_NAME) try: op.drop_column('shares', 'consistency_group_id') except Exception: LOG.exception("Error Dropping 'consistency_group_id' column " "from 'shares' table.") try: op.drop_table('consistency_groups') except Exception: LOG.exception("Error Dropping 'consistency_groups' table.") ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/db/migrations/alembic/versions/38e632621e5a_change_volume_type_to_share_type.py0000664000175000017500000001267000000000000033244 0ustar00zuulzuul00000000000000# Copyright 2015 Bob Callaway. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """change volume_type to share_type Revision ID: 38e632621e5a Revises: 162a3e673105 Create Date: 2014-10-02 09:14:03.172324 """ # revision identifiers, used by Alembic. revision = '38e632621e5a' down_revision = '211836bf835c' from alembic import op from oslo_log import log from oslo_utils import strutils import sqlalchemy as sa from sqlalchemy.sql import table LOG = log.getLogger(__name__) def upgrade(): LOG.info("Renaming column name shares.volume_type_id to " "shares.share_type.id") op.alter_column("shares", "volume_type_id", new_column_name="share_type_id", type_=sa.String(length=36)) LOG.info("Renaming volume_types table to share_types") op.rename_table("volume_types", "share_types") op.drop_constraint('vt_name_uc', 'share_types', type_='unique') op.create_unique_constraint('st_name_uc', 'share_types', ['name', 'deleted']) LOG.info("Creating share_type_extra_specs table") st_es = op.create_table( 'share_type_extra_specs', sa.Column('created_at', sa.DateTime), sa.Column('updated_at', sa.DateTime), sa.Column('deleted_at', sa.DateTime), sa.Column('deleted', sa.Integer), sa.Column('id', sa.Integer, primary_key=True, nullable=False), sa.Column('share_type_id', sa.String(length=36), sa.ForeignKey('share_types.id', name="st_id_fk"), nullable=False), sa.Column('spec_key', sa.String(length=255)), sa.Column('spec_value', sa.String(length=255)), mysql_engine='InnoDB', mysql_charset='utf8') LOG.info("Migrating volume_type_extra_specs to " "share_type_extra_specs") _copy_records(destination_table=st_es, up_migration=True) LOG.info("Dropping volume_type_extra_specs table") op.drop_table("volume_type_extra_specs") def downgrade(): LOG.info("Creating volume_type_extra_specs table") vt_es = op.create_table( 'volume_type_extra_specs', sa.Column('created_at', sa.DateTime), sa.Column('updated_at', sa.DateTime), sa.Column('deleted_at', sa.DateTime), sa.Column('deleted', sa.Boolean), sa.Column('id', sa.Integer, primary_key=True, nullable=False), sa.Column('volume_type_id', sa.String(length=36), nullable=False), sa.Column('key', sa.String(length=255)), sa.Column('value', sa.String(length=255)), mysql_charset='utf8', mysql_engine='InnoDB') LOG.info("Migrating share_type_extra_specs to " "volume_type_extra_specs") _copy_records(destination_table=vt_es, up_migration=False) LOG.info("Dropping share_type_extra_specs table") op.drop_table("share_type_extra_specs") LOG.info("Renaming share_types table to volume_types") op.drop_constraint('st_name_uc', 'share_types', type_='unique') op.create_unique_constraint('vt_name_uc', 'share_types', ['name', 'deleted']) op.rename_table("share_types", "volume_types") op.create_foreign_key( "volume_type_extra_specs_ibfk_1", "volume_type_extra_specs", "volume_types", ["volume_type_id"], ["id"] ) LOG.info("Renaming column name shares.share_type_id to " "shares.volume_type.id") op.alter_column("shares", "share_type_id", new_column_name="volume_type_id", type_=sa.String(length=36)) def _copy_records(destination_table, up_migration=True): old = ('volume', '') new = ('share', 'spec_') data_from, data_to = (old, new) if up_migration else (new, old) from_table = table( data_from[0] + '_type_extra_specs', sa.Column('created_at', sa.DateTime), sa.Column('updated_at', sa.DateTime), sa.Column('deleted_at', sa.DateTime), sa.Column('deleted', sa.Boolean if up_migration else sa.Integer), sa.Column('id', sa.Integer, primary_key=True, nullable=False), sa.Column(data_from[0] + '_type_id', sa.String(length=36)), sa.Column(data_from[1] + 'key', sa.String(length=255)), sa.Column(data_from[1] + 'value', sa.String(length=255))) extra_specs = [] for es in op.get_bind().execute(from_table.select()): if up_migration: deleted = strutils.int_from_bool_as_string(es.deleted) else: deleted = strutils.bool_from_string(es.deleted, default=True) extra_specs.append({ 'created_at': es.created_at, 'updated_at': es.updated_at, 'deleted_at': es.deleted_at, 'deleted': deleted, data_to[0] + '_type_id': getattr(es, data_from[0] + '_type_id'), data_to[1] + 'key': getattr(es, data_from[1] + 'key'), data_to[1] + 'value': getattr(es, data_from[1] + 'value'), }) op.bulk_insert(destination_table, extra_specs) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/db/migrations/alembic/versions/3a482171410f_add_drivers_private_data_table.py0000664000175000017500000000411400000000000032523 0ustar00zuulzuul00000000000000# Copyright 2015 Mirantis inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """add_driver_private_data_table Revision ID: 3a482171410f Revises: 56cdbe267881 Create Date: 2015-04-21 14:47:38.201658 """ # revision identifiers, used by Alembic. revision = '3a482171410f' down_revision = '56cdbe267881' from alembic import op from oslo_log import log import sqlalchemy as sql LOG = log.getLogger(__name__) drivers_private_data_table_name = 'drivers_private_data' def upgrade(): try: op.create_table( drivers_private_data_table_name, sql.Column('created_at', sql.DateTime), sql.Column('updated_at', sql.DateTime), sql.Column('deleted_at', sql.DateTime), sql.Column('deleted', sql.Integer, default=0), sql.Column('host', sql.String(255), nullable=False, primary_key=True), sql.Column('entity_uuid', sql.String(36), nullable=False, primary_key=True), sql.Column('key', sql.String(255), nullable=False, primary_key=True), sql.Column('value', sql.String(1023), nullable=False), mysql_engine='InnoDB', mysql_charset='utf8', ) except Exception: LOG.error("Table |%s| not created!", drivers_private_data_table_name) raise def downgrade(): try: op.drop_table(drivers_private_data_table_name) except Exception: LOG.error("%s table not dropped", drivers_private_data_table_name) raise ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/db/migrations/alembic/versions/3db9992c30f3_transform_statuses_to_lowercase.py0000664000175000017500000000335300000000000033242 0ustar00zuulzuul00000000000000# Copyright 2015 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Transform statuses to lowercase Revision ID: 3db9992c30f3 Revises: 533646c7af38 Create Date: 2015-05-28 19:30:35.645773 """ # revision identifiers, used by Alembic. revision = '3db9992c30f3' down_revision = '533646c7af38' from alembic import op import sqlalchemy as sa from manila.db.migrations import utils def upgrade(): # NOTE(vponomaryov): shares has some statuses as uppercase, so # transform them in addition to statuses of share servers. for table in ('shares', 'share_servers'): _transform_case(table, make_upper=False) def downgrade(): # NOTE(vponomaryov): transform share server statuses to uppercase and # leave share statuses as is. _transform_case('share_servers', make_upper=True) def _transform_case(table_name, make_upper): connection = op.get_bind() table = utils.load_table(table_name, connection) case = sa.func.upper if make_upper else sa.func.lower for row in connection.execute(table.select()): op.execute( table.update().where( # pylint: disable=no-value-for-parameter table.c.id == row.id ).values({'status': case(row.status)}) ) ././@PaxHeader0000000000000000000000000000021200000000000011450 xustar0000000000000000116 path=manila-21.0.0/manila/db/migrations/alembic/versions/3e7d62517afa_add_create_share_from_snapshot_support.py 22 mtime=1759315554.0 manila-21.0.0/manila/db/migrations/alembic/versions/3e7d62517afa_add_create_share_from_snapshot_supp0000664000175000017500000001366700000000000033431 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Clinton Knight. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Add 'create_share_from_snapshot_support' extra spec to share types Revision ID: 3e7d62517afa Revises: 48a7beae3117 Create Date: 2016-08-16 10:48:11.497499 """ # revision identifiers, used by Alembic. revision = '3e7d62517afa' down_revision = '48a7beae3117' from alembic import op from oslo_utils import timeutils import sqlalchemy as sa from sqlalchemy.sql import table from manila.common import constants def upgrade(): """Performs DB upgrade to add create_share_from_snapshot_support. Prior to this migration, the 'snapshot_support' extra spec meant two different things: a snapshot may be created, and a new share may be created from a snapshot. With the planned addition of new snapshot semantics (revert to snapshot, mountable snapshots), it is likely a driver may be able to support one or both of the new semantics but *not* be able to create a share from a snapshot. So this migration separates the existing snapshot_support extra spec and share attribute into two values to enable logical separability of the features. Add 'create_share_from_snapshot_support' extra spec to all share types and attribute 'create_share_from_snapshot_support' to Share model. """ with sa.orm.Session(bind=op.get_bind()) as session: extra_specs_table = table( 'share_type_extra_specs', sa.Column('created_at', sa.DateTime), sa.Column('deleted', sa.Integer), sa.Column('share_type_id', sa.String(length=36)), sa.Column('spec_key', sa.String(length=255)), sa.Column('spec_value', sa.String(length=255))) share_type_table = table( 'share_types', sa.Column('deleted', sa.Integer), sa.Column('id', sa.Integer)) # Get list of share type IDs that don't already have the new required # create_share_from_snapshot_support extra spec defined. existing_extra_specs = session.query( extra_specs_table).filter( extra_specs_table.c.spec_key == constants.ExtraSpecs.CREATE_SHARE_FROM_SNAPSHOT_SUPPORT).filter( extra_specs_table.c.deleted == 0).all() excluded_st_ids = [es.share_type_id for es in existing_extra_specs] # Get share types for the IDs we got in the previous query share_types = session.query(share_type_table).filter( share_type_table.c.deleted.in_(('0', 'False', ))).filter( share_type_table.c.id.notin_(excluded_st_ids)).all() extra_specs = [] now = timeutils.utcnow() for share_type in share_types: # Get the value of snapshot_support for each extant share type snapshot_support_extra_spec = session.query( extra_specs_table).filter( extra_specs_table.c.spec_key == constants.ExtraSpecs.SNAPSHOT_SUPPORT).filter( extra_specs_table.c.share_type_id == share_type.id).first() spec_value = (snapshot_support_extra_spec.spec_value if snapshot_support_extra_spec else 'False') # Copy the snapshot_support value to # create_share_from_snapshot_support extra_specs.append({ 'spec_key': constants.ExtraSpecs.CREATE_SHARE_FROM_SNAPSHOT_SUPPORT, 'spec_value': spec_value, 'deleted': 0, 'created_at': now, 'share_type_id': share_type.id, }) if extra_specs: op.bulk_insert(extra_specs_table, extra_specs) # Add create_share_from_snapshot_support attribute to shares table op.add_column( 'shares', sa.Column( 'create_share_from_snapshot_support', sa.Boolean, default=True ) ) # Copy snapshot_support to create_share_from_snapshot_support on each # share shares_table = sa.Table( 'shares', sa.MetaData(), sa.Column('id', sa.String(length=36)), sa.Column('deleted', sa.String(length=36)), sa.Column('snapshot_support', sa.Boolean), sa.Column('create_share_from_snapshot_support', sa.Boolean), ) # pylint: disable=no-value-for-parameter update = shares_table.update().where( shares_table.c.deleted == 'False').values( create_share_from_snapshot_support=shares_table.c.snapshot_support) session.execute(update) session.commit() def downgrade(): """Performs DB downgrade removing create_share_from_snapshot_support. Remove 'create_share_from_snapshot_support' extra spec from all share types and attribute 'create_share_from_snapshot_support' from Share model. """ connection = op.get_bind() deleted_at = timeutils.utcnow() extra_specs = sa.Table( 'share_type_extra_specs', sa.MetaData(), autoload_with=connection) # pylint: disable=no-value-for-parameter update = extra_specs.update().where( extra_specs.c.spec_key == constants.ExtraSpecs.CREATE_SHARE_FROM_SNAPSHOT_SUPPORT).where( extra_specs.c.deleted == 0).values(deleted=extra_specs.c.id, deleted_at=deleted_at) connection.execute(update) op.drop_column('shares', 'create_share_from_snapshot_support') ././@PaxHeader0000000000000000000000000000021300000000000011451 xustar0000000000000000117 path=manila-21.0.0/manila/db/migrations/alembic/versions/40d1f2374e89_add_mount_point_name_to_share_instances.py 22 mtime=1759315554.0 manila-21.0.0/manila/db/migrations/alembic/versions/40d1f2374e89_add_mount_point_name_to_share_insta0000664000175000017500000000272200000000000033263 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """add mount_point_name to share_instances Revision ID: 6e32091979e0 Revises: 99d328f0a3d2 Create Date: 2024-01-26 22:08:22.412974 """ # revision identifiers, used by Alembic. revision = '6e32091979e0' down_revision = '99d328f0a3d2' from alembic import op from oslo_log import log import sqlalchemy as sa LOG = log.getLogger(__name__) share_instances_table_name = 'share_instances' column_name = "mount_point_name" def upgrade(): try: op.add_column(share_instances_table_name, sa.Column(column_name, sa.String(255), nullable=True)) except Exception: LOG.error("Column mount_point_name not created!") raise def downgrade(): try: op.drop_column(share_instances_table_name, column_name) except Exception: LOG.error("Column mount_point_name not dropped!") raise ././@PaxHeader0000000000000000000000000000021600000000000011454 xustar0000000000000000120 path=manila-21.0.0/manila/db/migrations/alembic/versions/478c445d8d3e_add_security_service_update_control_fields.py 22 mtime=1759315554.0 manila-21.0.0/manila/db/migrations/alembic/versions/478c445d8d3e_add_security_service_update_control0000664000175000017500000000646300000000000033413 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """add_security_service_update_control_fields Revision ID: 478c445d8d3e Revises: 0c23aec99b74 Create Date: 2020-12-07 12:33:41.444202 """ # revision identifiers, used by Alembic. revision = '478c445d8d3e' down_revision = '0c23aec99b74' from alembic import op from manila.common import constants from oslo_log import log import sqlalchemy as sa SHARE_SERVERS_TABLE = 'share_servers' SHARE_NETWORKS_TABLE = 'share_networks' ASYNC_OPERATION_DATA_TABLE = 'async_operation_data' LOG = log.getLogger(__name__) def upgrade(): context = op.get_context() mysql_dl = context.bind.dialect.name == 'mysql' datetime_type = (sa.dialects.mysql.DATETIME(fsp=6) if mysql_dl else sa.DateTime) try: op.create_table( ASYNC_OPERATION_DATA_TABLE, sa.Column('created_at', datetime_type), sa.Column('updated_at', datetime_type), sa.Column('deleted_at', datetime_type), sa.Column('deleted', sa.Integer, default=0), sa.Column('entity_uuid', sa.String(36), nullable=False, primary_key=True), sa.Column('key', sa.String(255), nullable=False, primary_key=True), sa.Column('value', sa.String(1023), nullable=False), mysql_engine='InnoDB', mysql_charset='utf8', ) op.add_column( SHARE_SERVERS_TABLE, sa.Column('security_service_update_support', sa.Boolean, nullable=False, server_default=sa.sql.false()) ) op.add_column( SHARE_NETWORKS_TABLE, sa.Column('status', sa.String(36), nullable=False, server_default=constants.STATUS_NETWORK_ACTIVE)) except Exception: msg_args = { 'async_op_table': ASYNC_OPERATION_DATA_TABLE, 'sec_serv_column': 'share_servers.security_service_update_support', 'shr_net_column': 'share_networks.status', } LOG.error('Table %(async_op_table)s and table columns ' '%(sec_serv_column)s and %(shr_net_column)s were not' ' created!', msg_args) raise def downgrade(): try: op.drop_table(ASYNC_OPERATION_DATA_TABLE) op.drop_column(SHARE_SERVERS_TABLE, 'security_service_update_support') op.drop_column(SHARE_NETWORKS_TABLE, 'status') except Exception: msg_args = { 'async_op_table': ASYNC_OPERATION_DATA_TABLE, 'sec_serv_column': 'share_servers.security_service_update_support', 'shr_net_column': 'share_networks.status', } LOG.error('Table %(async_op_table)s and table columns ' '%(sec_serv_column)s and %(shr_net_column)s were not ' 'dropped!', msg_args) raise ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/db/migrations/alembic/versions/48a7beae3117_move_share_type_id_to_instances.py0000664000175000017500000000630100000000000033210 0ustar00zuulzuul00000000000000# Copyright 2016, Hitachi Data Systems. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """move_share_type_id_to_instances Revision ID: 48a7beae3117 Revises: 63809d875e32 Create Date: 2016-07-19 13:04:50.035139 """ # revision identifiers, used by Alembic. revision = '48a7beae3117' down_revision = '63809d875e32' from alembic import op import sqlalchemy as sa from manila.db.migrations import utils def upgrade(): """Move share_type_id from Shares to Share Instances table.""" # NOTE(ganso): Adding share_type_id as a foreign key to share_instances # table. Please note that share_type_id is NOT a foreign key in shares # table prior to this migration. op.add_column( 'share_instances', sa.Column('share_type_id', sa.String(36), sa.ForeignKey('share_types.id', name='si_st_id_fk'), nullable=True)) connection = op.get_bind() shares_table = utils.load_table('shares', connection) share_instances_table = utils.load_table('share_instances', connection) for instance in connection.execute(share_instances_table.select()): share = connection.execute(shares_table.select().where( instance._mapping['share_id'] == shares_table.c.id)).first() # pylint: disable=no-value-for-parameter op.execute(share_instances_table.update().where( share_instances_table.c.id == instance._mapping['id']).values( {'share_type_id': share._mapping['share_type_id']})) op.drop_column('shares', 'share_type_id') def downgrade(): """Move share_type_id from Share Instances to Shares table. This method can lead to data loss because only the share_type_id from the first share instance is moved to the shares table. """ # NOTE(ganso): Adding back share_type_id to the shares table NOT as a # foreign key, as it was before. op.add_column( 'shares', sa.Column('share_type_id', sa.String(36), nullable=True)) connection = op.get_bind() shares_table = utils.load_table('shares', connection) share_instances_table = utils.load_table('share_instances', connection) for share in connection.execute(shares_table.select()): instance = connection.execute(share_instances_table.select().where( share._mapping['id'] == share_instances_table.c.share_id)).first() # pylint: disable=no-value-for-parameter op.execute(shares_table.update().where( shares_table.c.id == instance._mapping['share_id']).values( {'share_type_id': instance._mapping['share_type_id']})) op.drop_constraint('si_st_id_fk', 'share_instances', type_='foreignkey') op.drop_column('share_instances', 'share_type_id') ././@PaxHeader0000000000000000000000000000021600000000000011454 xustar0000000000000000120 path=manila-21.0.0/manila/db/migrations/alembic/versions/493eaffd79e1_add_mtu_network_allocations_share_networks.py 22 mtime=1759315554.0 manila-21.0.0/manila/db/migrations/alembic/versions/493eaffd79e1_add_mtu_network_allocations_share_n0000664000175000017500000000216700000000000033525 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """add_mtu_network_allocations Revision ID: 493eaffd79e1 Revises: e8ea58723178 Create Date: 2016-08-01 14:18:31.899606 """ # revision identifiers, used by Alembic. revision = '493eaffd79e1' down_revision = 'e8ea58723178' from alembic import op import sqlalchemy as sa def upgrade(): op.add_column( 'network_allocations', sa.Column('mtu', sa.Integer, nullable=True)) op.add_column( 'share_networks', sa.Column('mtu', sa.Integer, nullable=True)) def downgrade(): op.drop_column('network_allocations', 'mtu') op.drop_column('share_networks', 'mtu') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/db/migrations/alembic/versions/4a482571410f_add_backends_info_table.py0000664000175000017500000000352400000000000031120 0ustar00zuulzuul00000000000000# Copyright 2017 Huawei inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """add_backend_info_table Revision ID: 4a482571410f Revises: 27cb96d991fa Create Date: 2017-05-18 14:47:38.201658 """ # revision identifiers, used by Alembic. revision = '4a482571410f' down_revision = '27cb96d991fa' from alembic import op from oslo_log import log import sqlalchemy as sql LOG = log.getLogger(__name__) backend_info_table_name = 'backend_info' def upgrade(): try: op.create_table( backend_info_table_name, sql.Column('created_at', sql.DateTime), sql.Column('updated_at', sql.DateTime), sql.Column('deleted_at', sql.DateTime), sql.Column('deleted', sql.Integer, default=0), sql.Column('host', sql.String(255), nullable=False, primary_key=True), sql.Column('info_hash', sql.String(255), nullable=False), mysql_engine='InnoDB', mysql_charset='utf8', ) except Exception: LOG.error("Table |%s| not created!", backend_info_table_name) raise def downgrade(): try: op.drop_table(backend_info_table_name) except Exception: LOG.error("%s table not dropped", backend_info_table_name) raise ././@PaxHeader0000000000000000000000000000021200000000000011450 xustar0000000000000000116 path=manila-21.0.0/manila/db/migrations/alembic/versions/4ee2cf4be19a_remove_share_snapshots_export_location.py 22 mtime=1759315554.0 manila-21.0.0/manila/db/migrations/alembic/versions/4ee2cf4be19a_remove_share_snapshots_export_locat0000664000175000017500000000206500000000000033654 0ustar00zuulzuul00000000000000# Copyright 2015 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Remove share_snapshots.export_location Revision ID: 4ee2cf4be19a Revises: 17115072e1c3 Create Date: 2015-02-26 11:11:55.734663 """ # revision identifiers, used by Alembic. revision = '4ee2cf4be19a' down_revision = '17115072e1c3' from alembic import op import sqlalchemy as sql def upgrade(): op.drop_column('share_snapshots', 'export_location') def downgrade(): op.add_column('share_snapshots', sql.Column('export_location', sql.String(255))) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/db/migrations/alembic/versions/5077ffcc5f1c_add_share_instances.py0000664000175000017500000002713700000000000030654 0ustar00zuulzuul00000000000000# Copyright 2015 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """add_share_instances Revision ID: 5077ffcc5f1c Revises: 3db9992c30f3 Create Date: 2015-06-26 12:54:55.630152 """ # revision identifiers, used by Alembic. revision = '5077ffcc5f1c' down_revision = '3db9992c30f3' from alembic import op from sqlalchemy import Column, DateTime, ForeignKey, String from manila.db.migrations import utils def create_share_instances_table(connection): # Create 'share_instances' table share_instances_table = op.create_table( 'share_instances', Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('deleted', String(length=36), default='False'), Column('id', String(length=36), primary_key=True, nullable=False), Column('share_id', String(length=36), ForeignKey('shares.id', name="si_share_fk")), Column('host', String(length=255)), Column('status', String(length=255)), Column('scheduled_at', DateTime), Column('launched_at', DateTime), Column('terminated_at', DateTime), Column('share_network_id', String(length=36), ForeignKey('share_networks.id', name="si_share_network_fk"), nullable=True), Column('share_server_id', String(length=36), ForeignKey('share_servers.id', name="si_share_server_fk"), nullable=True), Column('availability_zone', String(length=255)), mysql_engine='InnoDB', mysql_charset='utf8') # Migrate data from 'shares' to 'share_instances' share_instances = [] shares_table = utils.load_table('shares', connection) for share in connection.execute(shares_table.select()): share_instances.append({ 'created_at': share.created_at, 'updated_at': share.updated_at, 'deleted_at': share.deleted_at, 'deleted': share.deleted, 'id': share.id, 'share_id': share.id, 'host': share.host, 'status': share.status, 'scheduled_at': share.scheduled_at, 'launched_at': share.launched_at, 'terminated_at': share.terminated_at, 'share_network_id': share.share_network_id, 'share_server_id': share.share_server_id, 'availability_zone': share.availability_zone, }) op.bulk_insert(share_instances_table, share_instances) # Remove columns moved to 'share_instances' table with op.batch_alter_table("shares") as batch_op: for fk in shares_table.foreign_keys: batch_op.drop_constraint(fk.name, type_='foreignkey') batch_op.drop_column('host') batch_op.drop_column('status') batch_op.drop_column('scheduled_at') batch_op.drop_column('launched_at') batch_op.drop_column('terminated_at') batch_op.drop_column('share_network_id') batch_op.drop_column('share_server_id') batch_op.drop_column('availability_zone') def remove_share_instances_table(connection): with op.batch_alter_table("shares") as batch_op: batch_op.add_column(Column('host', String(length=255))) batch_op.add_column(Column('status', String(length=255))) batch_op.add_column(Column('scheduled_at', DateTime)) batch_op.add_column(Column('launched_at', DateTime)) batch_op.add_column(Column('terminated_at', DateTime)) batch_op.add_column(Column('share_network_id', String(length=36), ForeignKey('share_networks.id'), nullable=True)) batch_op.add_column(Column('share_server_id', String(length=36), ForeignKey('share_servers.id'), nullable=True)) batch_op.add_column(Column('availability_zone', String(length=255))) shares_table = utils.load_table('shares', connection) share_inst_table = utils.load_table('share_instances', connection) for share in connection.execute(shares_table.select()): instance = connection.execute( share_inst_table.select().where( share_inst_table.c.share_id == share.id) ).first() # pylint: disable=no-value-for-parameter op.execute( shares_table.update().where( shares_table.c.id == share.id ).values( { 'host': instance._mapping['host'], 'status': instance._mapping['status'], 'scheduled_at': instance._mapping['scheduled_at'], 'launched_at': instance._mapping['launched_at'], 'terminated_at': instance._mapping['terminated_at'], 'share_network_id': instance._mapping['share_network_id'], 'share_server_id': instance._mapping['share_server_id'], 'availability_zone': instance._mapping['availability_zone'], } ) ) op.drop_table('share_instances') def create_snapshot_instances_table(connection): # Create 'share_snapshot_instances' table snapshot_instances_table = op.create_table( 'share_snapshot_instances', Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('deleted', String(length=36), default='False'), Column('id', String(length=36), primary_key=True, nullable=False), Column('snapshot_id', String(length=36), ForeignKey('share_snapshots.id', name="ssi_snapshot_fk")), Column('share_instance_id', String(length=36), ForeignKey('share_instances.id', name="ssi_share_instance_fk")), Column('status', String(length=255)), Column('progress', String(length=255)), mysql_engine='InnoDB', mysql_charset='utf8' ) # Migrate data from share_snapshots to share_snapshot_instances snapshot_instances = [] snapshot_table = utils.load_table('share_snapshots', connection) share_instances_table = utils.load_table('share_instances', connection) for snapshot in connection.execute(snapshot_table.select()): share_instances_rows = connection.execute( share_instances_table.select().where( share_instances_table.c.share_id == snapshot.share_id ) ) snapshot_instances.append({ 'created_at': snapshot.created_at, 'updated_at': snapshot.updated_at, 'deleted_at': snapshot.deleted_at, 'deleted': snapshot.deleted, 'id': snapshot.id, 'snapshot_id': snapshot.id, 'status': snapshot.status, 'progress': snapshot.progress, 'share_instance_id': share_instances_rows.first().id, }) op.bulk_insert(snapshot_instances_table, snapshot_instances) # Remove columns moved to 'share_snapshot_instances' table with op.batch_alter_table("share_snapshots") as batch_op: batch_op.drop_column('status') batch_op.drop_column('progress') def remove_snapshot_instances_table(connection): with op.batch_alter_table("share_snapshots") as batch_op: batch_op.add_column(Column('status', String(length=255))) batch_op.add_column(Column('progress', String(length=255))) snapshots_table = utils.load_table('share_snapshots', connection) snapshots_inst_table = utils.load_table('share_snapshot_instances', connection) for snapshot_instance in connection.execute(snapshots_inst_table.select()): snapshot = connection.execute( snapshots_table.select().where( snapshots_table.c.id == snapshot_instance.snapshot_id) ).first() # pylint: disable=no-value-for-parameter op.execute( snapshots_table.update().where( snapshots_table.c.id == snapshot.id ).values( { 'status': snapshot_instance._mapping['status'], 'progress': snapshot_instance._mapping['progress'], } ) ) op.drop_table('share_snapshot_instances') def upgrade_export_locations_table(connection): # Update 'share_export_locations' table op.add_column( 'share_export_locations', Column('share_instance_id', String(36), ForeignKey('share_instances.id', name="sel_instance_id_fk")) ) # Convert share_id to share_instance_id share_el_table = utils.load_table('share_export_locations', connection) share_instances_table = utils.load_table('share_instances', connection) for export in connection.execute(share_el_table.select()): share_instance = connection.execute( share_instances_table.select().where( share_instances_table.c.share_id == export.share_id) ).first() # pylint: disable=no-value-for-parameter op.execute( share_el_table.update().where( share_el_table.c.id == export.id ).values({'share_instance_id': str(share_instance.id)}) ) with op.batch_alter_table("share_export_locations") as batch_op: batch_op.drop_constraint('sel_id_fk', type_='foreignkey') batch_op.drop_column('share_id') op.rename_table( 'share_export_locations', 'share_instance_export_locations') def downgrade_export_locations_table(connection): op.rename_table('share_instance_export_locations', 'share_export_locations') op.add_column( 'share_export_locations', Column('share_id', String(36), ForeignKey('shares.id', name="sel_id_fk")) ) # Convert share_instance_id to share_id share_el_table = utils.load_table('share_export_locations', connection) share_instances_table = utils.load_table('share_instances', connection) for export in connection.execute(share_el_table.select()): share_instance = connection.execute( share_instances_table.select().where( share_instances_table.c.id == export.share_instance_id) ).first() # pylint: disable=no-value-for-parameter op.execute( share_el_table.update().where( share_el_table.c.id == export.id ).values({'share_id': str(share_instance.share_id)}) ) with op.batch_alter_table("share_export_locations") as batch_op: batch_op.drop_constraint('sel_instance_id_fk', type_='foreignkey') batch_op.drop_column('share_instance_id') def upgrade(): connection = op.get_bind() create_share_instances_table(connection) create_snapshot_instances_table(connection) upgrade_export_locations_table(connection) def downgrade(): """Remove share_instances and share_snapshot_instance tables. This method can lead to data loss because only first share/snapshot instance is saved in shares/snapshot table. """ connection = op.get_bind() downgrade_export_locations_table(connection) remove_snapshot_instances_table(connection) remove_share_instances_table(connection) ././@PaxHeader0000000000000000000000000000024100000000000011452 xustar0000000000000000139 path=manila-21.0.0/manila/db/migrations/alembic/versions/5155c7077f99_add_more_network_info_attributes_to_network_allocations_table.py 22 mtime=1759315554.0 manila-21.0.0/manila/db/migrations/alembic/versions/5155c7077f99_add_more_network_info_attributes_to0000664000175000017500000000345700000000000033265 0ustar00zuulzuul00000000000000# Copyright 2015 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Add more network info attributes to 'network_allocations' table. Revision ID: 5155c7077f99 Revises: 293fac1130ca Create Date: 2015-12-22 12:05:24.297049 """ # revision identifiers, used by Alembic. revision = '5155c7077f99' down_revision = '293fac1130ca' from alembic import op import sqlalchemy as sa def upgrade(): default_label_value = 'user' op.add_column( 'network_allocations', sa.Column('label', sa.String(255), default=default_label_value, server_default=default_label_value, nullable=True), ) op.add_column( 'network_allocations', sa.Column('network_type', sa.String(32), nullable=True)) op.add_column( 'network_allocations', sa.Column('segmentation_id', sa.Integer, nullable=True)) op.add_column( 'network_allocations', sa.Column('ip_version', sa.Integer, nullable=True)) op.add_column( 'network_allocations', sa.Column('cidr', sa.String(64), nullable=True)) def downgrade(): for col_name in ('label', 'network_type', 'segmentation_id', 'ip_version', 'cidr'): op.drop_column('network_allocations', col_name) ././@PaxHeader0000000000000000000000000000022200000000000011451 xustar0000000000000000124 path=manila-21.0.0/manila/db/migrations/alembic/versions/5237b6625330_add_availability_zone_id_field_to_share_groups.py 22 mtime=1759315554.0 manila-21.0.0/manila/db/migrations/alembic/versions/5237b6625330_add_availability_zone_id_field_to_s0000664000175000017500000000222500000000000033023 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Add 'availability_zone_id' field to 'share_groups' table. Revision ID: 5237b6625330 Revises: 7d142971c4ef Create Date: 2017-03-17 18:49:53.742325 """ # revision identifiers, used by Alembic. revision = '5237b6625330' down_revision = '7d142971c4ef' from alembic import op import sqlalchemy as sa SG_TABLE_NAME = 'share_groups' ATTR_NAME = 'availability_zone_id' def upgrade(): op.add_column( SG_TABLE_NAME, sa.Column( ATTR_NAME, sa.String(36), default=None, nullable=True, ), ) def downgrade(): op.drop_column(SG_TABLE_NAME, ATTR_NAME) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/db/migrations/alembic/versions/533646c7af38_remove_unused_attr_status.py0000664000175000017500000000412500000000000031763 0ustar00zuulzuul00000000000000# Copyright 2015 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Remove unused attr status Revision ID: 533646c7af38 Revises: 3a482171410f Create Date: 2015-05-28 13:13:47.651353 """ # revision identifiers, used by Alembic. revision = '533646c7af38' down_revision = '3a482171410f' from alembic import op from oslo_log import log import sqlalchemy as sql from manila.common import constants LOG = log.getLogger(__name__) COLUMN_NAME = 'status' TABLE_NAMES = ('network_allocations', 'security_services') def upgrade(): for t_name in TABLE_NAMES: try: op.drop_column(t_name, COLUMN_NAME) except Exception: LOG.error("Column '%s' could not be dropped", COLUMN_NAME) raise def downgrade(): for t_name in TABLE_NAMES: try: op.add_column( t_name, sql.Column( COLUMN_NAME, # NOTE(vponomaryov): original type of attr was enum. But # alembic is buggy with enums [1], so use string type # instead. Anyway we have no reason to keep enum/constraint # on specific set of possible statuses because they have # not been used. # [1] - https://bitbucket.org/zzzeek/alembic/ # issue/89/opadd_column-and-opdrop_column-should sql.String(255), default=constants.STATUS_NEW, ), ) except Exception: LOG.error("Column '%s' could not be added", COLUMN_NAME) raise ././@PaxHeader0000000000000000000000000000021300000000000011451 xustar0000000000000000117 path=manila-21.0.0/manila/db/migrations/alembic/versions/54667b9cade7_restore_share_instance_access_map_state.py 22 mtime=1759315554.0 manila-21.0.0/manila/db/migrations/alembic/versions/54667b9cade7_restore_share_instance_access_map_s0000664000175000017500000000720600000000000033417 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """add_share_instance_access_map_state Revision ID: 54667b9cade7 Revises: 87ce15c59bbe Create Date: 2016-09-02 10:18:07.290461 """ # revision identifiers, used by Alembic. revision = '54667b9cade7' down_revision = '87ce15c59bbe' from alembic import op from sqlalchemy import Column, String from manila.common import constants from manila.db.migrations import utils # Mapping for new value to be assigned as ShareInstanceAccessMapping's state access_rules_status_to_state_mapping = { constants.STATUS_ACTIVE: constants.ACCESS_STATE_ACTIVE, constants.STATUS_OUT_OF_SYNC: constants.ACCESS_STATE_QUEUED_TO_APPLY, 'updating': constants.ACCESS_STATE_QUEUED_TO_APPLY, 'updating_multiple': constants.ACCESS_STATE_QUEUED_TO_APPLY, constants.STATUS_ERROR: constants.ACCESS_STATE_ERROR, } # Mapping for changes to Share Instance's access_rules_status access_rules_status_upgrade_mapping = { constants.STATUS_ACTIVE: constants.STATUS_ACTIVE, constants.STATUS_OUT_OF_SYNC: constants.SHARE_INSTANCE_RULES_SYNCING, 'updating': constants.SHARE_INSTANCE_RULES_SYNCING, 'updating_multiple': constants.SHARE_INSTANCE_RULES_SYNCING, constants.STATUS_ERROR: constants.STATUS_ERROR, } def upgrade(): op.add_column('share_instance_access_map', Column('state', String(length=255), default=constants.ACCESS_STATE_QUEUED_TO_APPLY)) connection = op.get_bind() share_instances_table = utils.load_table('share_instances', connection) instance_access_map_table = utils.load_table('share_instance_access_map', connection) instances_query = ( share_instances_table.select().where( share_instances_table.c.status == constants.STATUS_AVAILABLE).where( share_instances_table.c.deleted == 'False') ) for instance in connection.execute(instances_query): access_rule_status = instance._mapping['access_rules_status'] # pylint: disable=no-value-for-parameter op.execute( instance_access_map_table.update().where( instance_access_map_table.c.share_instance_id == instance._mapping['id'] ).values({ 'state': access_rules_status_to_state_mapping[ access_rule_status], }) ) op.execute( share_instances_table.update().where( share_instances_table.c.id == instance._mapping['id'] ).values({ 'access_rules_status': access_rules_status_upgrade_mapping[ access_rule_status], }) ) def downgrade(): op.drop_column('share_instance_access_map', 'state') connection = op.get_bind() share_instances_table = utils.load_table('share_instances', connection) # pylint: disable=no-value-for-parameter op.execute( share_instances_table.update().where( share_instances_table.c.access_rules_status == constants.SHARE_INSTANCE_RULES_SYNCING).values({ 'access_rules_status': constants.STATUS_OUT_OF_SYNC}) ) ././@PaxHeader0000000000000000000000000000022200000000000011451 xustar0000000000000000124 path=manila-21.0.0/manila/db/migrations/alembic/versions/55761e5f59c5_add_snapshot_support_extra_spec_to_share_types.py 22 mtime=1759315554.0 manila-21.0.0/manila/db/migrations/alembic/versions/55761e5f59c5_add_snapshot_support_extra_spec_to_0000664000175000017500000001016200000000000033345 0ustar00zuulzuul00000000000000# Copyright 2015 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Add 'snapshot_support' extra spec to share types Revision ID: 55761e5f59c5 Revises: 1f0bd302c1a6 Create Date: 2015-08-13 14:02:54.656864 """ # revision identifiers, used by Alembic. revision = '55761e5f59c5' down_revision = '1f0bd302c1a6' from alembic import op from oslo_utils import timeutils import sqlalchemy as sa from sqlalchemy.sql import table from manila.common import constants def upgrade(): """Performs DB upgrade to support feature of making snapshots optional. Add 'snapshot_support' extra spec to all share types and attr 'snapshot_support' to Share model. """ with sa.orm.Session(bind=op.get_bind()) as session: es_table = table( 'share_type_extra_specs', sa.Column('created_at', sa.DateTime), sa.Column('deleted', sa.Integer), sa.Column('share_type_id', sa.String(length=36)), sa.Column('spec_key', sa.String(length=255)), sa.Column('spec_value', sa.String(length=255))) st_table = table( 'share_types', sa.Column('deleted', sa.Integer), sa.Column('id', sa.Integer)) # NOTE(vponomaryov): field 'deleted' is integer here. existing_extra_specs = (session.query(es_table). filter(es_table.c.spec_key == constants.ExtraSpecs.SNAPSHOT_SUPPORT). filter(es_table.c.deleted == 0). all()) exclude_st_ids = [es.share_type_id for es in existing_extra_specs] # NOTE(vponomaryov): field 'deleted' is string here. share_types = (session.query(st_table). filter(st_table.c.deleted.in_(('0', 'False', ))). filter(st_table.c.id.notin_(exclude_st_ids)). all()) extra_specs = [] now = timeutils.utcnow() for st in share_types: extra_specs.append({ 'spec_key': constants.ExtraSpecs.SNAPSHOT_SUPPORT, 'spec_value': 'True', 'deleted': 0, 'created_at': now, 'share_type_id': st.id, }) if extra_specs: op.bulk_insert(es_table, extra_specs) # NOTE(vponomaryov): shares that were created before applying this # migration can have incorrect value because they were created without # consideration of driver capability to create snapshots. op.add_column('shares', sa.Column('snapshot_support', sa.Boolean, default=True)) connection = op.get_bind() shares = sa.Table( 'shares', sa.MetaData(), autoload_with=connection) # pylint: disable=no-value-for-parameter update = shares.update().where( shares.c.deleted == 'False' ).values(snapshot_support=True) connection.execute(update) def downgrade(): """Performs DB downgrade removing support of 'optional snapshots' feature. Remove 'snapshot_support' extra spec from all share types and attr 'snapshot_support' from Share model. """ connection = op.get_bind() extra_specs = sa.Table( 'share_type_extra_specs', sa.MetaData(), autoload_with=connection) # pylint: disable=no-value-for-parameter update = extra_specs.update().where( extra_specs.c.spec_key == constants.ExtraSpecs.SNAPSHOT_SUPPORT).where( extra_specs.c.deleted == 0).values( deleted=extra_specs.c.id, deleted_at=timeutils.utcnow(), ) connection.execute(update) op.drop_column('shares', 'snapshot_support') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/db/migrations/alembic/versions/56cdbe267881_add_share_export_locations_table.py0000664000175000017500000000752600000000000033275 0ustar00zuulzuul00000000000000# Copyright 2015 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Add share_export_locations table Revision ID: 56cdbe267881 Revises: 17115072e1c3 Create Date: 2015-02-27 14:06:30.464315 """ # revision identifiers, used by Alembic. revision = '56cdbe267881' down_revision = '30cb96d995fa' from alembic import op import sqlalchemy as sa from sqlalchemy import func from sqlalchemy.sql import table def upgrade(): export_locations_table = op.create_table( 'share_export_locations', sa.Column('id', sa.Integer, primary_key=True, nullable=False), sa.Column('created_at', sa.DateTime), sa.Column('updated_at', sa.DateTime), sa.Column('deleted_at', sa.DateTime), sa.Column('deleted', sa.Integer, default=0), sa.Column('path', sa.String(2000)), sa.Column('share_id', sa.String(36), sa.ForeignKey('shares.id', name="sel_id_fk")), mysql_engine='InnoDB', mysql_charset='utf8') shares_table = table( 'shares', sa.Column('created_at', sa.DateTime), sa.Column('deleted_at', sa.DateTime), sa.Column('deleted', sa.Integer), sa.Column('export_location', sa.String(length=255)), sa.Column('id', sa.String(length=36)), sa.Column('updated_at', sa.DateTime)) export_locations = [] with sa.orm.Session(bind=op.get_bind()) as session: for share in session.query(shares_table).all(): deleted = share.deleted if isinstance(share.deleted, int) else 0 export_locations.append({ 'created_at': share.created_at, 'updated_at': share.updated_at, 'deleted_at': share.deleted_at, 'deleted': deleted, 'share_id': share.id, 'path': share.export_location, }) op.bulk_insert(export_locations_table, export_locations) op.drop_column('shares', 'export_location') def downgrade(): """Remove share_export_locations table. This method can lead to data loss because only first export_location is saved in shares table. """ op.add_column('shares', sa.Column('export_location', sa.String(255))) export_locations_table = table( 'share_export_locations', sa.Column('share_id', sa.String(length=36)), sa.Column('path', sa.String(length=255)), sa.Column('updated_at', sa.DateTime), sa.Column('deleted', sa.Integer)) connection = op.get_bind() with sa.orm.Session(bind=connection) as session: export_locations = session.query( func.min(export_locations_table.c.updated_at), export_locations_table.c.share_id, export_locations_table.c.path).filter( export_locations_table.c.deleted == 0).group_by( export_locations_table.c.share_id, export_locations_table.c.path).all() shares = sa.Table( 'shares', sa.MetaData(), autoload_with=connection) for location in export_locations: # pylint: disable=no-value-for-parameter update = ( shares.update().where(shares.c.id == location.share_id). values(export_location=location.path)) connection.execute(update) op.drop_table('share_export_locations') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/db/migrations/alembic/versions/579c267fbb4d_add_share_instances_access_map.py0000664000175000017500000001013400000000000032744 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """add_share_instances_access_map Revision ID: 579c267fbb4d Revises: 5077ffcc5f1c Create Date: 2015-08-19 07:51:52.928542 """ # revision identifiers, used by Alembic. revision = '579c267fbb4d' down_revision = '5077ffcc5f1c' from alembic import op from sqlalchemy import Column, DateTime, ForeignKey, String from oslo_utils import uuidutils from manila.db.migrations import utils def upgrade(): """Create 'share_instance_access_map' table and move 'state' column.""" instance_access_table = op.create_table( 'share_instance_access_map', Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('deleted', String(length=36), default='False'), Column('id', String(length=36), primary_key=True, nullable=False), Column('share_instance_id', String(length=36), ForeignKey('share_instances.id', name="siam_instance_fk")), Column('access_id', String(length=36), ForeignKey('share_access_map.id', name="siam_access_fk")), Column('state', String(length=255)), mysql_engine='InnoDB', mysql_charset='utf8') # NOTE(u_glide): Move all states from 'share_access_map' # to 'share_instance_access_map' instance_access_mappings = [] connection = op.get_bind() access_table = utils.load_table('share_access_map', connection) instances_table = utils.load_table('share_instances', connection) for access_rule in connection.execute(access_table.select()): # pylint: disable=assignment-from-no-return instances_query = instances_table.select().where( instances_table.c.share_id == access_rule.share_id ) for instance in connection.execute(instances_query): instance_access_mappings.append({ 'created_at': access_rule.created_at, 'updated_at': access_rule.updated_at, 'deleted_at': access_rule.deleted_at, 'deleted': access_rule.deleted, 'id': uuidutils.generate_uuid(), 'share_instance_id': instance.id, 'access_id': access_rule.id, 'state': access_rule.state, }) op.bulk_insert(instance_access_table, instance_access_mappings) op.drop_column('share_access_map', 'state') def downgrade(): """Remove 'share_instance_access_map' table and add 'state' column back. This method can lead to data loss because only first state is saved in share_access_map table. """ op.add_column('share_access_map', Column('state', String(length=255))) # NOTE(u_glide): Move all states from 'share_instance_access_map' # to 'share_access_map' connection = op.get_bind() access_table = utils.load_table('share_access_map', connection) instance_access_table = utils.load_table('share_instance_access_map', connection) share_access_rules = connection.execute( access_table.select().where(access_table.c.deleted == "False")) for access_rule in share_access_rules: access_mapping = connection.execute( instance_access_table.select().where( instance_access_table.c.access_id == access_rule._mapping['id']) ).first() # pylint: disable=no-value-for-parameter op.execute( access_table.update().where( access_table.c.id == access_rule._mapping['id'] ).values({'state': access_mapping._mapping['state']}) ) op.drop_table('share_instance_access_map') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/db/migrations/alembic/versions/59eb64046740_add_required_extra_spec.py0000664000175000017500000000514500000000000031233 0ustar00zuulzuul00000000000000# Copyright 2015 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Add required extra spec Revision ID: 59eb64046740 Revises: 162a3e673105 Create Date: 2015-01-29 15:33:25.348140 """ # revision identifiers, used by Alembic. revision = '59eb64046740' down_revision = '4ee2cf4be19a' from alembic import op from oslo_utils import timeutils import sqlalchemy as sa from sqlalchemy.sql import table def upgrade(): with sa.orm.Session(bind=op.get_bind()) as session: es_table = table( 'share_type_extra_specs', sa.Column('created_at', sa.DateTime), sa.Column('deleted', sa.Integer), sa.Column('share_type_id', sa.String(length=36)), sa.Column('spec_key', sa.String(length=255)), sa.Column('spec_value', sa.String(length=255))) st_table = table( 'share_types', sa.Column('deleted', sa.Integer), sa.Column('id', sa.Integer)) # NOTE(vponomaryov): field 'deleted' is integer here. existing_required_extra_specs = ( session.query(es_table).filter( es_table.c.spec_key == 'driver_handles_share_servers' ).filter(es_table.c.deleted == 0).all() ) exclude_st_ids = [ es.share_type_id for es in existing_required_extra_specs] # NOTE(vponomaryov): field 'deleted' is string here. share_types = ( session.query(st_table). filter(st_table.c.deleted.in_(('0', 'False', ))). filter(st_table.c.id.notin_(exclude_st_ids)). all() ) extra_specs = [] for st in share_types: extra_specs.append({ 'spec_key': 'driver_handles_share_servers', 'spec_value': 'True', 'deleted': 0, 'created_at': timeutils.utcnow(), 'share_type_id': st.id, }) op.bulk_insert(es_table, extra_specs) def downgrade(): """Downgrade method. We can't determine, which extra specs should be removed after insertion, that's why do nothing here. """ ././@PaxHeader0000000000000000000000000000021200000000000011450 xustar0000000000000000116 path=manila-21.0.0/manila/db/migrations/alembic/versions/5aa813ae673d_add_task_state_field_for_share_servers.py 22 mtime=1759315554.0 manila-21.0.0/manila/db/migrations/alembic/versions/5aa813ae673d_add_task_state_field_for_share_serv0000664000175000017500000000365700000000000033362 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Add task_state field for share servers table Revision ID: 5aa813ae673d Revises: e6d88547b381 Create Date: 2020-06-23 12:04:47.821793 """ # revision identifiers, used by Alembic. revision = '5aa813ae673d' down_revision = 'e6d88547b381' from alembic import op from oslo_log import log import sqlalchemy as sa LOG = log.getLogger(__name__) share_servers_fk_name = ( "fk_share_servers_source_share_server_id") def upgrade(): try: op.add_column('share_servers', sa.Column( 'task_state', sa.String(length=255), default=None)) op.add_column( 'share_servers', sa.Column( 'source_share_server_id', sa.String(length=36), sa.ForeignKey('share_servers.id', name=share_servers_fk_name), default=None, nullable=True)) except Exception: LOG.error("Column share_servers.task_state and/or " "share_server.source_share_server_id not created!") raise def downgrade(): try: op.drop_column('share_servers', 'task_state') op.drop_constraint(share_servers_fk_name, 'share_servers', type_='foreignkey') op.drop_column('share_servers', 'source_share_server_id') except Exception: LOG.error("Column share_servers.task_state and/or " "share_servers.source_share_server_id not dropped!") raise ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/db/migrations/alembic/versions/63809d875e32_add_access_key.py0000664000175000017500000000175000000000000027315 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """add_access_key Revision ID: 63809d875e32 Revises: 493eaffd79e1 Create Date: 2016-07-16 20:53:05.958896 """ # revision identifiers, used by Alembic. revision = '63809d875e32' down_revision = '493eaffd79e1' from alembic import op import sqlalchemy as sa def upgrade(): op.add_column( 'share_access_map', sa.Column('access_key', sa.String(255), nullable=True)) def downgrade(): op.drop_column('share_access_map', 'access_key') ././@PaxHeader0000000000000000000000000000024200000000000011453 xustar0000000000000000140 path=manila-21.0.0/manila/db/migrations/alembic/versions/6a3fd2984bc31_add_is_auto_deletable_and_identifier_fields_for_share_servers.py 22 mtime=1759315554.0 manila-21.0.0/manila/db/migrations/alembic/versions/6a3fd2984bc31_add_is_auto_deletable_and_identifi0000664000175000017500000000453100000000000033206 0ustar00zuulzuul00000000000000# Copyright 2019 NetApp, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Add is_auto_deletable and identifier fields for share servers Revision ID: 6a3fd2984bc31 Revises: 11ee96se625f3 Create Date: 2018-10-29 11:27:44.194732 """ # revision identifiers, used by Alembic. revision = '6a3fd2984bc31' down_revision = '11ee96se625f3' from alembic import op from oslo_log import log import sqlalchemy as sa from manila.db.migrations import utils LOG = log.getLogger(__name__) def upgrade(): try: op.add_column('share_servers', sa.Column( 'is_auto_deletable', sa.Boolean, default=True)) op.add_column('share_servers', sa.Column( 'identifier', sa.String(length=255), default=None)) except Exception: LOG.error("Columns share_servers.is_auto_deletable " "and/or share_servers.identifier not created!") raise try: connection = op.get_bind() share_servers_table = utils.load_table('share_servers', connection) for server in connection.execute(share_servers_table.select()): # pylint: disable=no-value-for-parameter connection.execute( share_servers_table.update().where( share_servers_table.c.id == server.id, ).values({"identifier": server.id, "is_auto_deletable": True})) except Exception: LOG.error( "Could not initialize share_servers.is_auto_deletable to True" " and share_servers.identifier with the share server ID!") raise def downgrade(): try: op.drop_column('share_servers', 'is_auto_deletable') op.drop_column('share_servers', 'identifier') except Exception: LOG.error("Columns share_servers.is_auto_deletable and/or " "share_servers.identifier not dropped!") raise ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/db/migrations/alembic/versions/7d142971c4ef_add_reservation_expire_index.py0000664000175000017500000000176400000000000032446 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """add_reservation_expire_index Revision ID: 7d142971c4ef Revises: d5db24264f5c Create Date: 2017-03-02 09:19:27.114719 """ # revision identifiers, used by Alembic. revision = '7d142971c4ef' down_revision = 'd5db24264f5c' from alembic import op INDEX_NAME = 'reservations_deleted_expire_idx' TABLE_NAME = 'reservations' def upgrade(): op.create_index(INDEX_NAME, TABLE_NAME, ['deleted', 'expire']) def downgrade(): op.drop_index(INDEX_NAME, TABLE_NAME) ././@PaxHeader0000000000000000000000000000024200000000000011453 xustar0000000000000000140 path=manila-21.0.0/manila/db/migrations/alembic/versions/805685098bd2_add_share_network_subnets_table_and_modify_share_servers_table.py 22 mtime=1759315554.0 manila-21.0.0/manila/db/migrations/alembic/versions/805685098bd2_add_share_network_subnets_table_and0000664000175000017500000002315600000000000033165 0ustar00zuulzuul00000000000000# Copyright 2019 NetApp, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """add_share_network_subnets_table_and_modify_share_networks_and_servers Revision ID: 805685098bd2 Revises: 6a3fd2984bc31 Create Date: 2019-05-09 16:28:41.919714 """ # revision identifiers, used by Alembic. revision = '805685098bd2' down_revision = '6a3fd2984bc31' from alembic import op from manila.db.migrations import utils from oslo_log import log from oslo_utils import uuidutils import sqlalchemy as sa LOG = log.getLogger(__name__) def upgrade(): # New table try: share_networks_fk_name = ( "fk_share_network_subnets_share_network_id_share_networks") availability_zones_fk_name = ( "fk_share_network_subnets_availaility_zone_id_availability_zones") share_network_subnets_table = op.create_table( 'share_network_subnets', sa.Column('id', sa.String(36), primary_key=True, nullable=False), sa.Column('neutron_net_id', sa.String(36), nullable=True), sa.Column('neutron_subnet_id', sa.String(36), nullable=True), sa.Column('network_type', sa.String(32), nullable=True), sa.Column('cidr', sa.String(64), nullable=True), sa.Column('segmentation_id', sa.Integer, nullable=True), sa.Column('gateway', sa.String(64), nullable=True), sa.Column('mtu', sa.Integer, nullable=True), sa.Column('share_network_id', sa.String(36), sa.ForeignKey( 'share_networks.id', name=share_networks_fk_name)), sa.Column('ip_version', sa.Integer, nullable=True), sa.Column('availability_zone_id', sa.String(36), sa.ForeignKey('availability_zones.id', name=availability_zones_fk_name)), sa.Column('created_at', sa.DateTime), sa.Column('updated_at', sa.DateTime), sa.Column('deleted_at', sa.DateTime), sa.Column('deleted', sa.String(36), default='False'), mysql_engine='InnoDB', mysql_charset='utf8' ) except Exception: LOG.error("Table |%s| not created!", 'share_network_subnets') raise share_serves_fk_name = ( "fk_share_servers_share_network_subnet_id_share_network_subnets") op.add_column( 'share_servers', sa.Column( 'share_network_subnet_id', sa.String(36), sa.ForeignKey('share_network_subnets.id', name=share_serves_fk_name), ) ) connection = op.get_bind() share_networks_table = utils.load_table('share_networks', connection) share_servers_table = utils.load_table('share_servers', connection) share_network_subnets = [] # Get all share_networks and move all their data to share network subnet for share_network in connection.execute(share_networks_table.select()): share_network_subnet = { 'id': uuidutils.generate_uuid(), 'neutron_net_id': share_network.neutron_net_id, 'neutron_subnet_id': share_network.neutron_subnet_id, 'network_type': share_network.network_type, 'cidr': share_network.cidr, 'segmentation_id': share_network.segmentation_id, 'gateway': share_network.gateway, 'mtu': share_network.mtu, 'share_network_id': share_network.id, 'ip_version': share_network.ip_version, 'created_at': share_network.created_at, 'updated_at': share_network.updated_at, 'deleted_at': share_network.deleted_at, 'deleted': share_network.deleted, } share_network_subnets.append(share_network_subnet) # Insertions for the new share network subnets op.bulk_insert(share_network_subnets_table, share_network_subnets) # Updates the field share server table with the share network subnet id for sns in share_network_subnets: share_servers = connection.execute(share_servers_table.select().where( share_servers_table.c.share_network_id == sns['share_network_id'] )) updated_data = {'share_network_subnet_id': sns['id']} _update_share_servers(share_servers, updated_data, share_servers_table) if connection.engine.name == 'mysql': # Drops necessary constraint from share servers table. Only mysql # needs constraint handling. Postgresql/sqlite don't op.drop_constraint("share_servers_ibfk_1", "share_servers", type_="foreignkey") op.drop_column('share_servers', 'share_network_id') op.drop_column('share_networks', 'neutron_net_id') op.drop_column('share_networks', 'neutron_subnet_id') op.drop_column('share_networks', 'network_type') op.drop_column('share_networks', 'segmentation_id') op.drop_column('share_networks', 'gateway') op.drop_column('share_networks', 'mtu') op.drop_column('share_networks', 'cidr') op.drop_column('share_networks', 'ip_version') def _update_share_servers(share_servers, updated_data, share_servers_table): for share_server in share_servers: # pylint: disable=no-value-for-parameter op.execute( share_servers_table.update().where( share_servers_table.c.id == share_server.id, ).values(updated_data) ) def retrieve_default_subnet(subnets): # NOTE (silvacarlose): A default subnet is that one which doesn't contain # an availability zone. If all the share networks contain an az, we can # retrieve whichever share network, then we pick up the first. for subnet in subnets: if subnet.availability_zone_id is None: return subnet return subnets[0] if subnets is not None else None def downgrade(): connection = op.get_bind() # Include again the removed fields in the share network table op.add_column('share_networks', sa.Column('neutron_net_id', sa.String(36), nullable=True)) op.add_column('share_networks', sa.Column('neutron_subnet_id', sa.String(36), nullable=True)) op.add_column('share_networks', sa.Column('network_type', sa.String(32), nullable=True)) op.add_column('share_networks', sa.Column('cidr', sa.String(64), nullable=True)) op.add_column('share_networks', sa.Column('gateway', sa.String(64), nullable=True)) op.add_column('share_networks', sa.Column('mtu', sa.Integer, nullable=True)) op.add_column('share_networks', sa.Column('segmentation_id', sa.Integer, nullable=True)) op.add_column('share_networks', sa.Column('ip_version', sa.Integer, nullable=True)) # Include again the removed field in the share server table op.add_column('share_servers', sa.Column('share_network_id', sa.String(36), sa.ForeignKey('share_networks.id', name="share_servers_ibfk_1"))) share_networks_table = utils.load_table('share_networks', connection) share_servers_table = utils.load_table('share_servers', connection) subnets_table = utils.load_table('share_network_subnets', connection) for share_network in connection.execute(share_networks_table.select()): network_subnets = connection.execute(subnets_table.select().where( subnets_table.c.share_network_id == share_network.id)) default_subnet = retrieve_default_subnet(network_subnets) if default_subnet is not None: op.execute( # pylint: disable=no-value-for-parameter share_networks_table.update().where( share_networks_table.c.id == share_network.id, ).values({ 'neutron_net_id': default_subnet.neutron_net_id, 'neutron_subnet_id': default_subnet.neutron_subnet_id, 'network_type': default_subnet.network_type, 'cidr': default_subnet.cidr, 'gateway': default_subnet.gateway, 'mtu': default_subnet.mtu, 'segmentation_id': default_subnet.segmentation_id, 'ip_version': default_subnet.ip_version, }) ) for network_subnet in network_subnets: share_servers = connection.execute( share_servers_table.select().where( share_servers_table.c.share_network_subnet_id == network_subnet.id)) updated_data = {'share_network_id': share_network.id} _update_share_servers(share_servers, updated_data, share_servers_table) share_serves_fk_name = ( "fk_share_servers_share_network_subnet_id_share_network_subnets") if connection.engine.name == 'mysql': op.drop_constraint(share_serves_fk_name, "share_servers", type_="foreignkey") op.drop_column('share_servers', 'share_network_subnet_id') try: op.drop_table('share_network_subnets') except Exception: LOG.error("Failed to drop 'share_network_subnets' table!") raise ././@PaxHeader0000000000000000000000000000022300000000000011452 xustar0000000000000000125 path=manila-21.0.0/manila/db/migrations/alembic/versions/829a09b0ddd4_fix_project_share_type_quotas_unique_constraint.py 22 mtime=1759315554.0 manila-21.0.0/manila/db/migrations/alembic/versions/829a09b0ddd4_fix_project_share_type_quotas_uniqu0000664000175000017500000000274700000000000033533 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Fix 'project_share_type_quotas' unique constraint Revision ID: 829a09b0ddd4 Revises: b516de97bfee Create Date: 2017-10-12 20:15:51.267488 """ # revision identifiers, used by Alembic. revision = '829a09b0ddd4' down_revision = 'b516de97bfee' from alembic import op TABLE_NAME = 'project_share_type_quotas' UNIQUE_CONSTRAINT_NAME = 'uc_quotas_per_share_types' ST_FK_NAME = 'share_type_id_fk' def upgrade(): op.drop_constraint(ST_FK_NAME, TABLE_NAME, type_='foreignkey') op.drop_constraint(UNIQUE_CONSTRAINT_NAME, TABLE_NAME, type_='unique') op.create_foreign_key( ST_FK_NAME, TABLE_NAME, 'share_types', ['share_type_id'], ['id']) op.create_unique_constraint( UNIQUE_CONSTRAINT_NAME, TABLE_NAME, ['share_type_id', 'resource', 'deleted', 'project_id']) def downgrade(): # NOTE(vponomaryov): no need to implement old behaviour as it was bug, and, # moreover, not compatible with data from upgraded version. pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/db/migrations/alembic/versions/87ce15c59bbe_add_revert_to_snapshot_support.py0000664000175000017500000000404400000000000033222 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Clinton Knight. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """add_revert_to_snapshot_support Revision ID: 87ce15c59bbe Revises: 95e3cf760840 Create Date: 2016-08-18 00:12:34.587018 """ # revision identifiers, used by Alembic. revision = '87ce15c59bbe' down_revision = '95e3cf760840' from alembic import op import sqlalchemy as sa def upgrade(): """Performs DB upgrade to add revert_to_snapshot_support. Add attribute 'revert_to_snapshot_support' to Share model. """ with sa.orm.Session(bind=op.get_bind()) as session: # Add create_share_from_snapshot_support attribute to shares table op.add_column( 'shares', sa.Column('revert_to_snapshot_support', sa.Boolean, default=False)) # Set revert_to_snapshot_support on each share shares_table = sa.Table( 'shares', sa.MetaData(), sa.Column('id', sa.String(length=36)), sa.Column('deleted', sa.String(length=36)), sa.Column('revert_to_snapshot_support', sa.Boolean), ) # pylint: disable=no-value-for-parameter update = shares_table.update().where( shares_table.c.deleted == 'False').values( revert_to_snapshot_support=False) session.execute(update) session.commit() def downgrade(): """Performs DB downgrade removing revert_to_snapshot_support. Remove attribute 'revert_to_snapshot_support' from Share model. """ op.drop_column('shares', 'revert_to_snapshot_support') ././@PaxHeader0000000000000000000000000000024000000000000011451 xustar0000000000000000138 path=manila-21.0.0/manila/db/migrations/alembic/versions/927920b37453_add_provider_location_for_share_group_snapshot_members_model.py 22 mtime=1759315554.0 manila-21.0.0/manila/db/migrations/alembic/versions/927920b37453_add_provider_location_for_share_gro0000664000175000017500000000222300000000000033103 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Add 'provider_location' attr to 'share_group_snapshot_members' model. Revision ID: 927920b37453 Revises: a77e2ad5012d Create Date: 2017-01-31 20:10:44.937763 """ # revision identifiers, used by Alembic. revision = '927920b37453' down_revision = 'a77e2ad5012d' from alembic import op import sqlalchemy as sa SGSM_TABLE_NAME = 'share_group_snapshot_members' PROVIDER_LOCATION_NAME = 'provider_location' def upgrade(): op.add_column( SGSM_TABLE_NAME, sa.Column(PROVIDER_LOCATION_NAME, sa.String(255), nullable=True), ) def downgrade(): op.drop_column(SGSM_TABLE_NAME, PROVIDER_LOCATION_NAME) ././@PaxHeader0000000000000000000000000000021100000000000011447 xustar0000000000000000115 path=manila-21.0.0/manila/db/migrations/alembic/versions/95e3cf760840_remove_nova_net_id_column_from_share_.py 22 mtime=1759315554.0 manila-21.0.0/manila/db/migrations/alembic/versions/95e3cf760840_remove_nova_net_id_column_from_shar0000664000175000017500000000200400000000000033271 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """remove_nova_net_id_column_from_share_networks Revision ID: 95e3cf760840 Revises: 3e7d62517afa Create Date: 2016-12-13 16:11:05.191717 """ # revision identifiers, used by Alembic. revision = '95e3cf760840' down_revision = '3e7d62517afa' from alembic import op import sqlalchemy as sa def upgrade(): op.drop_column('share_networks', 'nova_net_id') def downgrade(): op.add_column( 'share_networks', sa.Column('nova_net_id', sa.String(36), nullable=True)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/db/migrations/alembic/versions/99d328f0a3d2_add_disable_reason_to_service.py0000664000175000017500000000246400000000000032532 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """add_disable_reason_to_service Revision ID: 99d328f0a3d2 Revises: 2d708a9a3ba9 Create Date: 2023-10-13 10:50:29.311032 """ # revision identifiers, used by Alembic. revision = '99d328f0a3d2' down_revision = '2d708a9a3ba9' from alembic import op from oslo_log import log import sqlalchemy as sa LOG = log.getLogger(__name__) def upgrade(): try: op.add_column( 'services', sa.Column( 'disabled_reason', sa.String(length=255), nullable=True)) except Exception: LOG.error("Column services.disabled_reason not created!") raise def downgrade(): try: op.drop_column('services', 'disabled_reason') except Exception: LOG.error("Column services.disabled_reason not dropped!") raise ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/db/migrations/alembic/versions/9afbe2df4945_add_backup.py0000664000175000017500000000566300000000000026755 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """add backup Revision ID: 9afbe2df4945 Revises: aebe2a413e13 Create Date: 2022-04-21 23:06:59.144695 """ # revision identifiers, used by Alembic. revision = '9afbe2df4945' down_revision = 'aebe2a413e13' from alembic import op from oslo_log import log import sqlalchemy as sa LOG = log.getLogger(__name__) share_backups_table_name = 'share_backups' def upgrade(): """Add backup attributes.""" try: op.create_table( share_backups_table_name, sa.Column('id', sa.String(length=36), primary_key=True, nullable=False), sa.Column('created_at', sa.DateTime), sa.Column('updated_at', sa.DateTime), sa.Column('deleted_at', sa.DateTime), sa.Column('deleted', sa.String(length=36), default='False'), sa.Column('user_id', sa.String(255)), sa.Column('project_id', sa.String(255)), sa.Column('availability_zone', sa.String(255)), sa.Column('fail_reason', sa.String(255)), sa.Column('display_name', sa.String(255)), sa.Column('display_description', sa.String(255)), sa.Column('host', sa.String(255)), sa.Column('topic', sa.String(255)), sa.Column('status', sa.String(255)), sa.Column('progress', sa.String(32)), sa.Column('restore_progress', sa.String(32)), sa.Column('size', sa.Integer), sa.Column('share_id', sa.String(36), sa.ForeignKey('shares.id', name="fk_backups_share_id_shares")), mysql_engine='InnoDB', mysql_charset='utf8' ) except Exception: LOG.error("Table |%s| not created!", share_backups_table_name) raise try: op.add_column( 'shares', sa.Column('source_backup_id', sa.String(36), nullable=True)) except Exception: LOG.error("Column can not be added for 'shares' table!") raise def downgrade(): """Remove share backup attributes and table share_backups.""" try: op.drop_table(share_backups_table_name) except Exception: LOG.error("%s table not dropped.", share_backups_table_name) raise try: op.drop_column('shares', 'source_backup_id') except Exception: LOG.error("Column can not be dropped for 'shares' table!") raise ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/db/migrations/alembic/versions/a77e2ad5012d_add_share_snapshot_access.py0000664000175000017500000000737000000000000031745 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Hitachi Data Systems. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """add_share_snapshot_access Revision ID: a77e2ad5012d Revises: e1949a93157a Create Date: 2016-07-15 13:32:19.417771 """ # revision identifiers, used by Alembic. revision = 'a77e2ad5012d' down_revision = 'e1949a93157a' from manila.common import constants from manila.db.migrations import utils from alembic import op import sqlalchemy as sa def upgrade(): op.create_table( 'share_snapshot_access_map', sa.Column('id', sa.String(36), primary_key=True), sa.Column('created_at', sa.DateTime), sa.Column('updated_at', sa.DateTime), sa.Column('deleted_at', sa.DateTime), sa.Column('deleted', sa.String(36), default='False'), sa.Column('share_snapshot_id', sa.String(36), sa.ForeignKey('share_snapshots.id', name='ssam_snapshot_fk')), sa.Column('access_type', sa.String(255)), sa.Column('access_to', sa.String(255)), mysql_charset='utf8' ) op.create_table( 'share_snapshot_instance_access_map', sa.Column('id', sa.String(36), primary_key=True), sa.Column('created_at', sa.DateTime), sa.Column('updated_at', sa.DateTime), sa.Column('deleted_at', sa.DateTime), sa.Column('deleted', sa.String(36), default='False'), sa.Column('share_snapshot_instance_id', sa.String(36), sa.ForeignKey('share_snapshot_instances.id', name='ssiam_snapshot_instance_fk')), sa.Column('access_id', sa.String(36), sa.ForeignKey('share_snapshot_access_map.id', name='ssam_access_fk')), sa.Column('state', sa.String(255), default=constants.ACCESS_STATE_QUEUED_TO_APPLY), mysql_charset='utf8' ) op.create_table( 'share_snapshot_instance_export_locations', sa.Column('id', sa.String(36), primary_key=True), sa.Column('created_at', sa.DateTime), sa.Column('updated_at', sa.DateTime), sa.Column('deleted_at', sa.DateTime), sa.Column('deleted', sa.String(36), default='False'), sa.Column('share_snapshot_instance_id', sa.String(36), sa.ForeignKey('share_snapshot_instances.id', name='ssiel_snapshot_instance_fk')), sa.Column('path', sa.String(2000)), sa.Column('is_admin_only', sa.Boolean, default=False, nullable=False), mysql_charset='utf8' ) op.add_column('shares', sa.Column('mount_snapshot_support', sa.Boolean, default=False)) connection = op.get_bind() shares_table = utils.load_table('shares', connection) # pylint: disable=no-value-for-parameter op.execute( shares_table.update().where( shares_table.c.deleted == 'False').values({ 'mount_snapshot_support': False, }) ) def downgrade(): op.drop_table('share_snapshot_instance_export_locations') op.drop_table('share_snapshot_instance_access_map') op.drop_table('share_snapshot_access_map') op.drop_column('shares', 'mount_snapshot_support') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/db/migrations/alembic/versions/a87e0fb17dee_multiple_share_server_subnets.py0000664000175000017500000002236400000000000033115 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """multiple share server subnets Revision ID: a87e0fb17dee Revises: 1946cb97bb8d Create Date: 2022-01-14 06:12:27.596130 """ # revision identifiers, used by Alembic. revision = 'a87e0fb17dee' down_revision = '1946cb97bb8d' from alembic import op from oslo_log import log import sqlalchemy as sa from manila.db.migrations import utils SHARE_SERVERS_TABLE = 'share_servers' SHARE_SERVER_SUBNET_MAP_TABLE = 'share_server_share_network_subnet_mappings' NETWORK_ALLOCATIONS_TABLE = 'network_allocations' LOG = log.getLogger(__name__) def upgrade(): # Create mappings table. context = op.get_context() mysql_dl = context.bind.dialect.name == 'mysql' datetime_type = (sa.dialects.mysql.DATETIME(fsp=6) if mysql_dl else sa.DateTime) try: share_server_fk_name = "fk_ss_sns_m_share_server_id_share_servers" share_network_subnet_fk_name = ( "fk_ss_sns_m_share_network_subnet_id_share_network_subnets") server_subnet_mappings_table = op.create_table( SHARE_SERVER_SUBNET_MAP_TABLE, sa.Column('id', sa.Integer, primary_key=True, nullable=False), sa.Column('created_at', datetime_type), sa.Column('updated_at', datetime_type), sa.Column('deleted_at', datetime_type), sa.Column('deleted', sa.Integer, default=0), sa.Column( 'share_server_id', sa.String(length=36), sa.ForeignKey('share_servers.id', name=share_server_fk_name), nullable=False), sa.Column( 'share_network_subnet_id', sa.String(length=36), sa.ForeignKey('share_network_subnets.id', name=share_network_subnet_fk_name), nullable=False), mysql_engine='InnoDB', mysql_charset='utf8') except Exception: LOG.error('Table %s could not be created.', SHARE_SERVER_SUBNET_MAP_TABLE) raise # Populate the mappings table from the share servers table. try: connection = op.get_bind() share_servers_table = utils.load_table(SHARE_SERVERS_TABLE, connection) server_subnet_mappings = [] for server in connection.execute(share_servers_table.select()): if server.share_network_subnet_id: server_subnet_mappings.append({ 'created_at': server.created_at, 'updated_at': server.updated_at, 'deleted_at': server.deleted_at, 'deleted': 0 if server.deleted == 'False' else 1, 'share_server_id': server.id, 'share_network_subnet_id': server.share_network_subnet_id, }) op.bulk_insert(server_subnet_mappings_table, server_subnet_mappings) except Exception: LOG.error('Table %s could not be populated from the %s table.', SHARE_SERVER_SUBNET_MAP_TABLE, SHARE_SERVERS_TABLE) raise # add subnet id column to the allocations table. try: network_allocation_fk_name = ( "fk_network_allocation_subnet_id_share_network_subnets") op.add_column( NETWORK_ALLOCATIONS_TABLE, sa.Column('share_network_subnet_id', sa.String(length=36), sa.ForeignKey('share_network_subnets.id', name=network_allocation_fk_name)) ) except Exception: LOG.error("Could not add ForeignKey column 'share_network_subnet_id'" "to table %s.", NETWORK_ALLOCATIONS_TABLE) raise # populate the allocation with its subnet id using the share server. network_allocation_table = utils.load_table(NETWORK_ALLOCATIONS_TABLE, connection) for alloc in connection.execute(network_allocation_table.select()): # admin allocations should not contain subnet id. if alloc._mapping['label'] == 'admin': continue server = connection.execute( share_servers_table.select().where( alloc._mapping['share_server_id'] == ( share_servers_table.c.id))).first() # pylint: disable=no-value-for-parameter op.execute(network_allocation_table.update().where( alloc._mapping['id'] == network_allocation_table.c.id).values( {'share_network_subnet_id': server._mapping['share_network_subnet_id']})) # add a new column to share_servers. try: op.add_column( SHARE_SERVERS_TABLE, sa.Column('network_allocation_update_support', sa.Boolean, nullable=False, server_default=sa.sql.false())) except Exception: LOG.error("Table %s could not add column " "'network_allocation_update_support'.", SHARE_SERVERS_TABLE) raise # drop subnet id foreign key from share servers. try: share_serves_fk_name = ( "fk_share_servers_share_network_subnet_id_share_network_subnets") if connection.engine.name == 'mysql': op.drop_constraint(share_serves_fk_name, SHARE_SERVERS_TABLE, type_="foreignkey") op.drop_column(SHARE_SERVERS_TABLE, 'share_network_subnet_id') except Exception: LOG.error("Table %s could not drop column 'share_network_subnet_id'.", SHARE_SERVERS_TABLE) raise def downgrade(): """Remove share_server_share_network_subnet_mapping table and new columns. This method can lead to data loss because the share server can have more than one subnet. """ try: share_serves_fk_name = ( "fk_share_servers_share_network_subnet_id_share_network_subnets") op.add_column( SHARE_SERVERS_TABLE, sa.Column( 'share_network_subnet_id', sa.String(36), sa.ForeignKey('share_network_subnets.id', name=share_serves_fk_name), ) ) connection = op.get_bind() server_subnet_mappings_table = utils.load_table( SHARE_SERVER_SUBNET_MAP_TABLE, connection) share_servers_table = utils.load_table(SHARE_SERVERS_TABLE, connection) with sa.orm.Session(bind=op.get_bind()) as session: for server in connection.execute(share_servers_table.select()): subnets = session.query( server_subnet_mappings_table).filter( server._mapping['id'] == ( server_subnet_mappings_table.c.share_server_id) ).all() if server._mapping['deleted'] != 'False' and len(subnets) > 1: LOG.warning('Share server %s is not deleted and it ' 'has more than one subnet (%s subnets), ' 'the downgrade may cause an inconsistent ' 'environment.', server._mapping['id'], len(subnets)) subnet_id = ( subnets[0].share_network_subnet_id if subnets else None ) # pylint: disable=no-value-for-parameter op.execute(share_servers_table.update().where( server._mapping['id'] == share_servers_table.c.id).values( {'share_network_subnet_id': subnet_id})) except Exception: LOG.error("'share_network_subnet_id' field in the %s table could not " "be created and populated from %s table.", SHARE_SERVERS_TABLE, SHARE_SERVER_SUBNET_MAP_TABLE) raise try: op.drop_table(SHARE_SERVER_SUBNET_MAP_TABLE) except Exception: LOG.error("Failed to drop table %s.", SHARE_SERVER_SUBNET_MAP_TABLE) raise try: op.drop_column(SHARE_SERVERS_TABLE, 'network_allocation_update_support') except Exception: LOG.error("Table %s failed to drop the column " "'network_allocation_update_support'.", SHARE_SERVERS_TABLE) raise try: network_allocation_fk_name = ( "fk_network_allocation_subnet_id_share_network_subnets") if connection.engine.name == 'mysql': op.drop_constraint(network_allocation_fk_name, NETWORK_ALLOCATIONS_TABLE, type_="foreignkey") op.drop_column(NETWORK_ALLOCATIONS_TABLE, 'share_network_subnet_id') except Exception: LOG.error("Column 'network_allocations.share_network_subnet_id' from " "table %s failed to drop.", NETWORK_ALLOCATIONS_TABLE) raise ././@PaxHeader0000000000000000000000000000020500000000000011452 xustar0000000000000000111 path=manila-21.0.0/manila/db/migrations/alembic/versions/ac0620cbe74d_add_share_network_subnet_metadata.py 22 mtime=1759315554.0 manila-21.0.0/manila/db/migrations/alembic/versions/ac0620cbe74d_add_share_network_subnet_metadata.p0000664000175000017500000000440400000000000033356 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Add share network subnet metadata Revision ID: ac0620cbe74d Revises: 1e2d600bf972 Create Date: 2023-01-07 14:13:25.525968 """ # revision identifiers, used by Alembic. revision = 'ac0620cbe74d' down_revision = '1e2d600bf972' from alembic import op from oslo_log import log import sqlalchemy as sql LOG = log.getLogger(__name__) share_network_subnet_metadata_table_name = 'share_network_subnet_metadata' def upgrade(): context = op.get_context() mysql_dl = context.bind.dialect.name == 'mysql' datetime_type = (sql.dialects.mysql.DATETIME(fsp=6) if mysql_dl else sql.DateTime) try: op.create_table( share_network_subnet_metadata_table_name, sql.Column('deleted', sql.String(36), default='False'), sql.Column('created_at', datetime_type), sql.Column('updated_at', datetime_type), sql.Column('deleted_at', datetime_type), sql.Column('share_network_subnet_id', sql.String(36), sql.ForeignKey('share_network_subnets.id'), nullable=False), sql.Column('key', sql.String(255), nullable=False), sql.Column('value', sql.String(1023), nullable=False), sql.Column('id', sql.Integer, primary_key=True, nullable=False), mysql_engine='InnoDB', mysql_charset='utf8' ) except Exception: LOG.error("Table |%s| not created!", share_network_subnet_metadata_table_name) raise def downgrade(): try: op.drop_table(share_network_subnet_metadata_table_name) except Exception: LOG.error("Table |%s| not dropped!", share_network_subnet_metadata_table_name) raise ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/db/migrations/alembic/versions/aebe2a413e13_add_state_column_for_service.py0000664000175000017500000000236500000000000032537 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """add state column for service Revision ID: aebe2a413e13 Revises: ac0620cbe74d Create Date: 2023-01-10 11:43:24.741726 """ # revision identifiers, used by Alembic. revision = 'aebe2a413e13' down_revision = 'ac0620cbe74d' from alembic import op from oslo_log import log import sqlalchemy as sa LOG = log.getLogger(__name__) def upgrade(): try: op.add_column( 'services', sa.Column('state', sa.String(36), nullable=True)) except Exception: LOG.error("services table column state not added") raise def downgrade(): try: op.drop_column('services', 'state') except Exception: LOG.error("services table column state not dropped") raise ././@PaxHeader0000000000000000000000000000025200000000000011454 xustar0000000000000000148 path=manila-21.0.0/manila/db/migrations/alembic/versions/b10fb432c042_squash_share_group_snapshot_members_and_share_snapshot_instance_models.py 22 mtime=1759315554.0 manila-21.0.0/manila/db/migrations/alembic/versions/b10fb432c042_squash_share_group_snapshot_members0000664000175000017500000001472000000000000033402 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Squash 'share_group_snapshot_members' and 'share_snapshot_instances' models. Revision ID: 31252d671ae5 Revises: 5237b6625330 Create Date: 2017-02-28 15:35:27.500063 """ # revision identifiers, used by Alembic. revision = '31252d671ae5' down_revision = '5237b6625330' from alembic import op import sqlalchemy as sa from sqlalchemy.sql import LABEL_STYLE_TABLENAME_PLUS_COL from manila.db.migrations import utils SSI_TABLE_NAME = 'share_snapshot_instances' SGSM_TABLE_NAME = 'share_group_snapshot_members' def upgrade(): # Update 'share_snapshot_instance' table with new fields op.add_column(SSI_TABLE_NAME, sa.Column('user_id', sa.String(255))) op.add_column(SSI_TABLE_NAME, sa.Column('project_id', sa.String(255))) op.add_column(SSI_TABLE_NAME, sa.Column('size', sa.Integer)) op.add_column(SSI_TABLE_NAME, sa.Column('share_proto', sa.String(255))) op.add_column( SSI_TABLE_NAME, sa.Column('share_group_snapshot_id', sa.String(36))) # Drop FK for 'snapshot_id' because it will be null in case of SGS member op.drop_constraint('ssi_snapshot_fk', SSI_TABLE_NAME, type_='foreignkey') # Move existing SG snapshot members to share snapshot instance table connection = op.get_bind() ssi_table = utils.load_table(SSI_TABLE_NAME, connection) ssgm_table = utils.load_table(SGSM_TABLE_NAME, connection) ported_data = [] for ssgm_record in connection.execute(ssgm_table.select()): ported_data.append({ "id": ssgm_record.id, "share_group_snapshot_id": ssgm_record.share_group_snapshot_id, "share_instance_id": ssgm_record.share_instance_id, "size": ssgm_record.size, "status": ssgm_record.status, "share_proto": ssgm_record.share_proto, "user_id": ssgm_record.user_id, "project_id": ssgm_record.project_id, "provider_location": ssgm_record.provider_location, "created_at": ssgm_record.created_at, "updated_at": ssgm_record.updated_at, "deleted_at": ssgm_record.deleted_at, "deleted": ssgm_record.deleted, }) op.bulk_insert(ssi_table, ported_data) # Delete 'share_group_snapshot_members' table op.drop_table(SGSM_TABLE_NAME) def downgrade(): # Create 'share_group_snapshot_members' table op.create_table( SGSM_TABLE_NAME, sa.Column('id', sa.String(36), primary_key=True, nullable=False), sa.Column('created_at', sa.DateTime), sa.Column('updated_at', sa.DateTime), sa.Column('deleted_at', sa.DateTime), sa.Column('deleted', sa.String(36), default='False'), sa.Column('user_id', sa.String(length=255), nullable=False), sa.Column('project_id', sa.String(length=255), nullable=False), sa.Column( 'share_group_snapshot_id', sa.String(length=36), sa.ForeignKey( 'share_group_snapshots.id', name='fk_gsm_group_snapshot_id'), nullable=False), sa.Column( 'share_instance_id', sa.String(length=36), sa.ForeignKey( 'share_instances.id', name='fk_gsm_share_instance_id'), nullable=False), sa.Column( 'share_id', sa.String(length=36), sa.ForeignKey('shares.id', name='fk_gsm_share_id'), nullable=False), sa.Column('size', sa.Integer), sa.Column('status', sa.String(length=255)), sa.Column('share_proto', sa.String(length=255)), sa.Column('provider_location', sa.String(255), nullable=True), mysql_engine='InnoDB', mysql_charset='utf8') # Select all share snapshot instances that # have not null 'share_snapshot_group_id' to new table connection = op.get_bind() ssi_table = utils.load_table(SSI_TABLE_NAME, connection) share_instances_table = utils.load_table("share_instances", connection) ssgm_table = utils.load_table(SGSM_TABLE_NAME, connection) ported_data = [] for row in connection.execute( ssi_table.join( share_instances_table, share_instances_table.c.id == ssi_table.c.share_instance_id ).select().set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL).where( ssi_table.c.share_group_snapshot_id.isnot(None), )): ported_data.append({ "id": row.share_snapshot_instances_id, "share_group_snapshot_id": ( row.share_snapshot_instances_share_group_snapshot_id), "share_id": row.share_instances_share_id, "share_instance_id": row.share_instances_id, "size": row.share_snapshot_instances_size, "status": row.share_snapshot_instances_status, "share_proto": row.share_snapshot_instances_share_proto, "user_id": row.share_snapshot_instances_user_id, "project_id": row.share_snapshot_instances_project_id, "provider_location": ( row.share_snapshot_instances_provider_location), "created_at": row.share_snapshot_instances_created_at, "updated_at": row.share_snapshot_instances_updated_at, "deleted_at": row.share_snapshot_instances_deleted_at, "deleted": row.share_snapshot_instances_deleted or "False", }) # Copy share group snapshot members to new table op.bulk_insert(ssgm_table, ported_data) # Remove copied records from source table connection.execute( ssi_table.delete().where( # pylint: disable=no-value-for-parameter ssi_table.c.share_group_snapshot_id.isnot(None))) # Remove redundant fields from 'share_snapshot_instance' table for column_name in ('user_id', 'project_id', 'size', 'share_proto', 'share_group_snapshot_id'): op.drop_column(SSI_TABLE_NAME, column_name) # Add back FK for 'snapshot_id' field op.create_foreign_key( 'ssi_snapshot_fk', SSI_TABLE_NAME, 'share_snapshots', ['snapshot_id'], ['id']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/db/migrations/alembic/versions/b516de97bfee_add_quota_per_share_type_model.py0000664000175000017500000000412000000000000033155 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Add ProjectShareTypeQuota model Revision ID: b516de97bfee Revises: 238720805ce1 Create Date: 2017-03-27 15:11:11.449617 """ # revision identifiers, used by Alembic. revision = 'b516de97bfee' down_revision = '238720805ce1' from alembic import op import sqlalchemy as sql NEW_TABLE_NAME = 'project_share_type_quotas' def upgrade(): op.create_table( NEW_TABLE_NAME, sql.Column('id', sql.Integer, primary_key=True, nullable=False), sql.Column('project_id', sql.String(length=255)), sql.Column('resource', sql.String(length=255), nullable=False), sql.Column('hard_limit', sql.Integer, nullable=True), sql.Column('created_at', sql.DateTime), sql.Column('updated_at', sql.DateTime), sql.Column('deleted_at', sql.DateTime), sql.Column('deleted', sql.Integer, default=0), sql.Column( 'share_type_id', sql.String(36), sql.ForeignKey( 'share_types.id', name='share_type_id_fk', ), nullable=False), sql.UniqueConstraint( 'share_type_id', 'resource', 'deleted', name="uc_quotas_per_share_types"), mysql_engine='InnoDB', mysql_charset='utf8', ) for table_name in ('quota_usages', 'reservations'): op.add_column( table_name, sql.Column('share_type_id', sql.String(36), nullable=True), ) def downgrade(): op.drop_table(NEW_TABLE_NAME) for table_name in ('quota_usages', 'reservations'): op.drop_column(table_name, 'share_type_id') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/db/migrations/alembic/versions/bb5938d74b73_add_snapshot_metadata_table.py0000664000175000017500000000426700000000000032214 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """add_snapshot_metadata_table Revision ID: bb5938d74b73 Revises: a87e0fb17dee Create Date: 2022-01-14 14:36:59.408638 """ # revision identifiers, used by Alembic. revision = 'bb5938d74b73' down_revision = 'a87e0fb17dee' from alembic import op from oslo_log import log import sqlalchemy as sql LOG = log.getLogger(__name__) share_snapshot_metadata_table_name = 'share_snapshot_metadata' def upgrade(): context = op.get_context() mysql_dl = context.bind.dialect.name == 'mysql' datetime_type = (sql.dialects.mysql.DATETIME(fsp=6) if mysql_dl else sql.DateTime) try: op.create_table( share_snapshot_metadata_table_name, sql.Column('deleted', sql.String(36), default='False'), sql.Column('created_at', datetime_type), sql.Column('updated_at', datetime_type), sql.Column('deleted_at', datetime_type), sql.Column('share_snapshot_id', sql.String(36), sql.ForeignKey('share_snapshots.id'), nullable=False), sql.Column('key', sql.String(255), nullable=False), sql.Column('value', sql.String(1023), nullable=False), sql.Column('id', sql.Integer, primary_key=True, nullable=False), mysql_engine='InnoDB', mysql_charset='utf8' ) except Exception: LOG.error("Table |%s| not created!", share_snapshot_metadata_table_name) raise def downgrade(): try: op.drop_table(share_snapshot_metadata_table_name) except Exception: LOG.error("Table |%s| not dropped!", share_snapshot_metadata_table_name) raise ././@PaxHeader0000000000000000000000000000021300000000000011451 xustar0000000000000000117 path=manila-21.0.0/manila/db/migrations/alembic/versions/c476aeb186ec_add_default_ad_site_to_security_service.py 22 mtime=1759315554.0 manila-21.0.0/manila/db/migrations/alembic/versions/c476aeb186ec_add_default_ad_site_to_security_ser0000664000175000017500000000254600000000000033466 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """add default_ad_site to security service Revision ID: c476aeb186ec Revises: bb5938d74b73 Create Date: 2022-11-30 10:59:34.866946 """ # revision identifiers, used by Alembic. revision = 'c476aeb186ec' down_revision = 'bb5938d74b73' from alembic import op from oslo_log import log import sqlalchemy as sa LOG = log.getLogger(__name__) ss_table_name = 'security_services' def upgrade(): try: op.add_column( ss_table_name, sa.Column('default_ad_site', sa.String(255), nullable=True)) except Exception: LOG.error("%s table column default_ad_site not added", ss_table_name) raise def downgrade(): try: op.drop_column(ss_table_name, 'default_ad_site') except Exception: LOG.error("%s table column default_ad_site not dropped", ss_table_name) raise ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/db/migrations/alembic/versions/cb20f743ca7b_add_resource_locks.py0000664000175000017500000000431200000000000030477 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """add_resource_locks Revision ID: cb20f743ca7b Revises: 9afbe2df4945 Create Date: 2023-06-23 16:34:36.277477 """ # revision identifiers, used by Alembic. revision = 'cb20f743ca7b' down_revision = '9afbe2df4945' from alembic import op from oslo_log import log import sqlalchemy as sa LOG = log.getLogger(__name__) def upgrade(): context = op.get_context() mysql_dl = context.bind.dialect.name == 'mysql' datetime_type = (sa.dialects.mysql.DATETIME(fsp=6) if mysql_dl else sa.DateTime) try: op.create_table( 'resource_locks', sa.Column('id', sa.String(36), primary_key=True, nullable=False), sa.Column('user_id', sa.String(255), nullable=False), sa.Column('project_id', sa.String(255), nullable=False), sa.Column('resource_action', sa.String(255), default='delete'), sa.Column('resource_type', sa.String(255), nullable=False), sa.Column('resource_id', sa.String(36), nullable=False), sa.Column('lock_context', sa.String(16), nullable=False), sa.Column('lock_reason', sa.String(1023), nullable=True), sa.Column('created_at', datetime_type), sa.Column('updated_at', datetime_type), sa.Column('deleted_at', datetime_type), sa.Column('deleted', sa.String(36), default='False'), mysql_engine='InnoDB', mysql_charset='utf8', ) except Exception: LOG.error("Table resource_locks not created!") raise def downgrade(): try: op.drop_table('resource_locks') except Exception: LOG.error("resource_locks table not dropped") raise ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/db/migrations/alembic/versions/cdefa6287df8_add_ensuring_field_to_services.py0000664000175000017500000000242700000000000033170 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """add-ensuring-field-to-services Revision ID: cdefa6287df8 Revises: 2f27d904214c Create Date: 2024-07-15 14:29:16.733696 """ # revision identifiers, used by Alembic. revision = 'cdefa6287df8' down_revision = '2f27d904214c' from alembic import op from oslo_log import log import sqlalchemy as sa LOG = log.getLogger(__name__) def upgrade(): try: op.add_column('services', sa.Column( 'ensuring', sa.Boolean, nullable=False, server_default=sa.sql.false())) except Exception: LOG.error("Column services.ensuring not created!") raise def downgrade(): try: op.drop_column('services', 'ensuring') except Exception: LOG.error("Column shares.ensuring not dropped!") raise ././@PaxHeader0000000000000000000000000000023500000000000011455 xustar0000000000000000135 path=manila-21.0.0/manila/db/migrations/alembic/versions/d5db24264f5c_add_consistent_snapshot_support_attr_to_share_group_model.py 22 mtime=1759315554.0 manila-21.0.0/manila/db/migrations/alembic/versions/d5db24264f5c_add_consistent_snapshot_support_att0000664000175000017500000000324500000000000033535 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Add enum 'consistent_snapshot_support' attr to 'share_groups' model. Revision ID: d5db24264f5c Revises: 927920b37453 Create Date: 2017-02-03 15:59:31.134166 """ # revision identifiers, used by Alembic. revision = 'd5db24264f5c' down_revision = '927920b37453' from alembic import op import sqlalchemy as sa SG_TABLE_NAME = 'share_groups' ATTR_NAME = 'consistent_snapshot_support' ENUM_POOL_VALUE = 'pool' ENUM_HOST_VALUE = 'host' def upgrade(): # Workaround for following alembic bug: # https://bitbucket.org/zzzeek/alembic/issue/89 context = op.get_context() if context.bind.dialect.name == 'postgresql': op.execute( "CREATE TYPE %s AS ENUM ('%s', '%s')" % ( ATTR_NAME, ENUM_POOL_VALUE, ENUM_HOST_VALUE)) op.add_column( SG_TABLE_NAME, sa.Column( ATTR_NAME, sa.Enum(ENUM_POOL_VALUE, ENUM_HOST_VALUE, name=ATTR_NAME), nullable=True, ), ) def downgrade(): op.drop_column(SG_TABLE_NAME, ATTR_NAME) context = op.get_context() if context.bind.dialect.name == 'postgresql': op.execute('DROP TYPE %s' % ATTR_NAME) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/db/migrations/alembic/versions/dda6de06349_add_export_locations_metadata.py0000664000175000017500000000762700000000000032573 0ustar00zuulzuul00000000000000# Copyright 2015 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Add DB support for share instance export locations metadata. Revision ID: dda6de06349 Revises: 323840a08dc4 Create Date: 2015-11-30 13:50:15.914232 """ # revision identifiers, used by Alembic. revision = 'dda6de06349' down_revision = '323840a08dc4' from alembic import op from oslo_log import log from oslo_utils import uuidutils import sqlalchemy as sa SI_TABLE_NAME = 'share_instances' EL_TABLE_NAME = 'share_instance_export_locations' ELM_TABLE_NAME = 'share_instance_export_locations_metadata' LOG = log.getLogger(__name__) def upgrade(): try: meta = sa.MetaData() # Add new 'is_admin_only' column in export locations table that will be # used for hiding admin export locations from common users in API. op.add_column( EL_TABLE_NAME, sa.Column('is_admin_only', sa.Boolean, default=False)) # Create new 'uuid' column as String(36) in export locations table # that will be used for API. op.add_column( EL_TABLE_NAME, sa.Column('uuid', sa.String(36), unique=True), ) # Generate UUID for each existing export location. el_table = sa.Table( EL_TABLE_NAME, meta, sa.Column('id', sa.Integer), sa.Column('uuid', sa.String(36)), sa.Column('is_admin_only', sa.Boolean), ) for record in op.get_bind().execute(el_table.select()): # pylint: disable=no-value-for-parameter op.get_bind().execute(el_table.update().values( is_admin_only=False, uuid=uuidutils.generate_uuid(), ).where( el_table.c.id == record.id, )) # Make new 'uuid' column in export locations table not nullable. op.alter_column( EL_TABLE_NAME, 'uuid', existing_type=sa.String(length=36), nullable=False, ) except Exception: LOG.error("Failed to update '%s' table!", EL_TABLE_NAME) raise try: op.create_table( ELM_TABLE_NAME, sa.Column('id', sa.Integer, primary_key=True), sa.Column('created_at', sa.DateTime), sa.Column('updated_at', sa.DateTime), sa.Column('deleted_at', sa.DateTime), sa.Column('deleted', sa.Integer), sa.Column('export_location_id', sa.Integer, sa.ForeignKey('%s.id' % EL_TABLE_NAME, name="elm_id_fk"), nullable=False), sa.Column('key', sa.String(length=255), nullable=False), sa.Column('value', sa.String(length=1023), nullable=False), sa.UniqueConstraint('export_location_id', 'key', 'deleted', name="elm_el_id_uc"), mysql_engine='InnoDB', mysql_charset='utf8', ) except Exception: LOG.error("Failed to create '%s' table!", ELM_TABLE_NAME) raise def downgrade(): try: op.drop_table(ELM_TABLE_NAME) except Exception: LOG.error("Failed to drop '%s' table!", ELM_TABLE_NAME) raise try: op.drop_column(EL_TABLE_NAME, 'is_admin_only') op.drop_column(EL_TABLE_NAME, 'uuid') except Exception: LOG.error("Failed to update '%s' table!", EL_TABLE_NAME) raise ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/db/migrations/alembic/versions/e1949a93157a_add_share_group_types_table.py0000664000175000017500000001223400000000000032167 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Add share group types table Revision ID: e1949a93157a Revises: 03da71c0e321 Create Date: 2016-06-01 10:41:06.410945 """ # revision identifiers, used by Alembic. revision = 'e1949a93157a' down_revision = '03da71c0e321' from alembic import op from oslo_log import log import sqlalchemy as sql LOG = log.getLogger(__name__) def upgrade(): meta = sql.MetaData() meta.bind = op.get_bind() # Add share group types try: op.create_table( 'share_group_types', sql.Column( 'id', sql.String(length=36), primary_key=True, nullable=False), sql.Column('created_at', sql.DateTime), sql.Column('updated_at', sql.DateTime), sql.Column('deleted_at', sql.DateTime), sql.Column('is_public', sql.Boolean()), sql.Column('name', sql.String(length=255)), sql.Column('deleted', sql.String(length=36)), sql.UniqueConstraint( 'name', 'deleted', name="uniq_share_group_type_name"), mysql_engine='InnoDB', mysql_charset='utf8', ) except Exception: LOG.error("Table |%s| not created!", 'share_group_types') raise # Add share group specs try: op.create_table( 'share_group_type_specs', sql.Column('id', sql.Integer, primary_key=True, nullable=False), sql.Column('created_at', sql.DateTime), sql.Column('updated_at', sql.DateTime), sql.Column('deleted_at', sql.DateTime), sql.Column('spec_key', sql.String(length=255)), sql.Column('spec_value', sql.String(length=255)), sql.Column('deleted', sql.Integer), sql.Column( 'share_group_type_id', sql.String(36), sql.ForeignKey( 'share_group_types.id', name="sgtp_id_extra_specs_fk")), mysql_engine='InnoDB', mysql_charset='utf8', ) except Exception: LOG.error("Table |%s| not created!", 'share_group_type_specs') raise # Add share group project types try: op.create_table( 'share_group_type_projects', sql.Column('id', sql.Integer, primary_key=True, nullable=False), sql.Column('created_at', sql.DateTime), sql.Column('updated_at', sql.DateTime), sql.Column('deleted_at', sql.DateTime), sql.Column( 'share_group_type_id', sql.String(36), sql.ForeignKey('share_group_types.id', name="sgtp_id_fk")), sql.Column('project_id', sql.String(length=255)), sql.Column('deleted', sql.Integer), sql.UniqueConstraint( 'share_group_type_id', 'project_id', 'deleted', name="sgtp_project_id_uc"), mysql_engine='InnoDB', mysql_charset='utf8', ) except Exception: LOG.error("Table |%s| not created!", 'share_group_type_projects') raise # Add mapping between group types and share types op.create_table( 'share_group_type_share_type_mappings', sql.Column('id', sql.String(36), primary_key=True, nullable=False), sql.Column('created_at', sql.DateTime), sql.Column('updated_at', sql.DateTime), sql.Column('deleted_at', sql.DateTime), sql.Column('deleted', sql.String(36), default='False'), sql.Column( 'share_group_type_id', sql.String(length=36), sql.ForeignKey('share_group_types.id', name="sgtp_id_sgt_id_uc"), nullable=False), sql.Column( 'share_type_id', sql.String(length=36), sql.ForeignKey('share_types.id', name="sgtp_id_st_id_uc"), nullable=False), mysql_engine='InnoDB', mysql_charset='utf8') # Add share group type for share groups op.add_column( 'share_groups', sql.Column( 'share_group_type_id', sql.String(36), sql.ForeignKey('share_group_types.id', name="sgt_id_sg_id_uc"), ) ) # TODO(ameade): Create type for existing consistency groups def downgrade(): # Remove share group type for share groups op.drop_constraint("sgt_id_sg_id_uc", "share_groups", type_="foreignkey") op.drop_column('share_groups', 'share_group_type_id') # Drop mappings for table_name in ('share_group_type_share_type_mappings', 'share_group_type_projects', 'share_group_type_specs', 'share_group_types'): try: op.drop_table(table_name) except Exception: LOG.error("%s table not dropped", table_name) raise ././@PaxHeader0000000000000000000000000000021000000000000011446 xustar0000000000000000114 path=manila-21.0.0/manila/db/migrations/alembic/versions/e6d88547b381_add_progress_field_to_share_instance.py 22 mtime=1759315554.0 manila-21.0.0/manila/db/migrations/alembic/versions/e6d88547b381_add_progress_field_to_share_instanc0000664000175000017500000000345000000000000033247 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """add-progress-field-to-share-instance Revision ID: e6d88547b381 Revises: 805685098bd2 Create Date: 2020-01-31 14:06:15.952747 """ # revision identifiers, used by Alembic. revision = 'e6d88547b381' down_revision = '805685098bd2' from alembic import op from manila.common import constants from manila.db.migrations import utils from oslo_log import log import sqlalchemy as sa LOG = log.getLogger(__name__) def upgrade(): try: connection = op.get_bind() op.add_column('share_instances', sa.Column('progress', sa.String(32), nullable=True, default=None)) share_instances_table = utils.load_table('share_instances', connection) updated_data = {'progress': '100%'} # pylint: disable=no-value-for-parameter op.execute( share_instances_table.update().where( share_instances_table.c.status == constants.STATUS_AVAILABLE, ).values(updated_data) ) except Exception: LOG.error("Column share_instances.progress not created.") raise def downgrade(): try: op.drop_column('share_instances', 'progress') except Exception: LOG.error("Column share_instances.progress not dropped.") raise ././@PaxHeader0000000000000000000000000000021000000000000011446 xustar0000000000000000114 path=manila-21.0.0/manila/db/migrations/alembic/versions/e8ea58723178_remove_host_from_driver_private_data.py 22 mtime=1759315554.0 manila-21.0.0/manila/db/migrations/alembic/versions/e8ea58723178_remove_host_from_driver_private_dat0000664000175000017500000000733700000000000033346 0ustar00zuulzuul00000000000000# Copyright (c) 2016 EMC Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Remove host from driver private data Revision ID: e8ea58723178 Revises: fdfb668d19e1 Create Date: 2016-07-11 12:59:34.579291 """ # revision identifiers, used by Alembic. revision = 'e8ea58723178' down_revision = 'fdfb668d19e1' from alembic import op from oslo_log import log from oslo_utils import uuidutils import sqlalchemy as sql from manila.db.migrations import utils LOG = log.getLogger(__name__) TABLE_NAME = 'drivers_private_data' COLUMN_HOST = 'host' DEFAULT_HOST = 'unknown' COLUMN_ENTITY = 'entity_uuid' COLUMN_KEY = 'key' MYSQL_ENGINE = 'mysql' def upgrade(): bind = op.get_bind() engine = bind.engine try: if (engine.name == MYSQL_ENGINE): op.drop_constraint('PRIMARY', TABLE_NAME, type_='primary') op.create_primary_key('DRIVERS_PRIVATE_PK', TABLE_NAME, ['entity_uuid', 'key']) op.drop_column(TABLE_NAME, COLUMN_HOST) except Exception: LOG.error("Column '%s' could not be dropped", COLUMN_HOST) raise def downgrade(): connection = op.get_bind() from_table = utils.load_table(TABLE_NAME, connection) migration_table_name = "_migrating_%(table)s_%(session)s" % { 'table': TABLE_NAME, 'session': uuidutils.generate_uuid()[:8] } LOG.info("Creating the migration table %(table)s", { 'table': migration_table_name }) migration_table = op.create_table( migration_table_name, sql.Column('created_at', sql.DateTime), sql.Column('updated_at', sql.DateTime), sql.Column('deleted_at', sql.DateTime), sql.Column('deleted', sql.Integer, default=0), sql.Column('host', sql.String(255), nullable=False, primary_key=True), sql.Column('entity_uuid', sql.String(36), nullable=False, primary_key=True), sql.Column('key', sql.String(255), nullable=False, primary_key=True), sql.Column('value', sql.String(1023), nullable=False), mysql_engine='InnoDB', mysql_charset='utf8', ) LOG.info("Copying data from %(from_table)s to the migration " "table %(migration_table)s", { 'from_table': TABLE_NAME, 'migration_table': migration_table_name }) rows = [] for row in op.get_bind().execute(from_table.select()): rows.append({ 'created_at': row.created_at, 'updated_at': row.updated_at, 'deleted_at': row.deleted_at, 'deleted': row.deleted, 'host': DEFAULT_HOST, 'entity_uuid': row.entity_uuid, 'key': row.key, 'value': row.value }) op.bulk_insert(migration_table, rows) LOG.info("Dropping table %(from_table)s", { 'from_table': TABLE_NAME }) op.drop_table(TABLE_NAME) LOG.info("Rename the migration table %(migration_table)s to " "the original table %(from_table)s", { 'migration_table': migration_table_name, 'from_table': TABLE_NAME }) op.rename_table(migration_table_name, TABLE_NAME) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/db/migrations/alembic/versions/e975ea83b712_add_share_server_encryption.py0000664000175000017500000000713600000000000032305 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Add share server encryption Revision ID: e975ea83b712 Revises: 0d8c8f6d54a4 Create Date: 2025-01-20 14:25:29.141460 """ # revision identifiers, used by Alembic. revision = 'e975ea83b712' down_revision = '0d8c8f6d54a4' from alembic import op from oslo_log import log import sqlalchemy as sa LOG = log.getLogger(__name__) encryption_refs_table = 'encryption_refs' share_servers_table = 'share_servers' share_instances_table = 'share_instances' def upgrade(): try: op.add_column( share_servers_table, sa.Column('encryption_key_ref', sa.String(36), nullable=True)) except Exception: LOG.error("Column 'encryption_key_ref' can not be added to the " "'share_servers' table!") raise try: op.add_column( share_servers_table, sa.Column('application_credential_id', sa.String(36), nullable=True)) except Exception: LOG.error("Column 'application_credential_id' can not be added to the " "'share_servers' table!") raise try: op.add_column( share_instances_table, sa.Column('encryption_key_ref', sa.String(36), nullable=True)) except Exception: LOG.error("Column can not be added to the 'share_instances' table!") raise context = op.get_context() mysql_dl = context.bind.dialect.name == 'mysql' datetime_type = (sa.dialects.mysql.DATETIME(fsp=6) if mysql_dl else sa.DateTime) try: op.create_table( encryption_refs_table, sa.Column('id', sa.String(36), primary_key=True, nullable=False), sa.Column('share_server_id', sa.String(length=36), sa.ForeignKey('share_servers.id'), unique=True), sa.Column('share_instance_id', sa.String(length=36), sa.ForeignKey('share_instances.id'), unique=True), sa.Column('encryption_key_ref', sa.String(36), nullable=True), sa.Column('project_id', sa.String(length=255), nullable=False), sa.Column('deleted', sa.String(36), default='False'), sa.Column('created_at', datetime_type), sa.Column('updated_at', datetime_type), sa.Column('deleted_at', datetime_type), mysql_engine='InnoDB', mysql_charset='utf8' ) except Exception: LOG.error("Table |%s| not created!", encryption_refs_table) raise def downgrade(): try: op.drop_table(encryption_refs_table) except Exception: LOG.error("%s table not dropped", encryption_refs_table) raise try: op.drop_column(share_servers_table, 'encryption_key_ref') op.drop_column(share_servers_table, 'application_credential_id') except Exception: LOG.error("Column can not be dropped for 'share_servers' table!") raise try: op.drop_column(share_instances_table, 'encryption_key_ref') except Exception: LOG.error("Column can not be dropped for 'share_instances' table!") raise ././@PaxHeader0000000000000000000000000000022100000000000011450 xustar0000000000000000123 path=manila-21.0.0/manila/db/migrations/alembic/versions/e9f79621d83f_add_cast_rules_to_readonly_to_share_instances.py 22 mtime=1759315554.0 manila-21.0.0/manila/db/migrations/alembic/versions/e9f79621d83f_add_cast_rules_to_readonly_to_share0000664000175000017500000000710700000000000033354 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """add_cast_rules_to_readonly_to_share_instances Revision ID: e9f79621d83f Revises: 54667b9cade7 Create Date: 2016-12-01 04:06:33.115054 """ # revision identifiers, used by Alembic. revision = 'e9f79621d83f' down_revision = '54667b9cade7' from alembic import op from oslo_log import log import sqlalchemy as sa from manila.common import constants from manila.db.migrations import utils LOG = log.getLogger(__name__) def upgrade(): LOG.info("Adding cast_rules_to_readonly column to share instances.") op.add_column('share_instances', sa.Column('cast_rules_to_readonly', sa.Boolean, default=False)) connection = op.get_bind() shares_table = utils.load_table('shares', connection) share_instances_table = utils.load_table('share_instances', connection) # First, set the value of ``cast_rules_to_readonly`` in every existing # share instance to False # pylint: disable=no-value-for-parameter op.execute( share_instances_table.update().values({ 'cast_rules_to_readonly': False, }) ) # Set the value of ``cast_rules_to_readonly`` to True for secondary # replicas in 'readable' replication relationships replicated_shares_query = ( shares_table.select() .where(shares_table.c.deleted == 'False') .where(shares_table.c.replication_type == constants.REPLICATION_TYPE_READABLE) ) for replicated_share in connection.execute(replicated_shares_query): # NOTE (gouthamr): Only secondary replicas that are not undergoing a # 'replication_change' (promotion to active) are considered. When the # replication change is complete, the share manager will take care # of ensuring the correct values for the replicas that were involved # in the transaction. secondary_replicas_query = ( share_instances_table.select().where( share_instances_table.c.deleted == 'False').where( share_instances_table.c.replica_state != constants.REPLICA_STATE_ACTIVE).where( share_instances_table.c.status != constants.STATUS_REPLICATION_CHANGE).where( replicated_share['id'] == share_instances_table.c.share_id ) ) for replica in connection.execute(secondary_replicas_query): # pylint: disable=no-value-for-parameter op.execute( share_instances_table.update().where( share_instances_table.c.id == replica.id ).values({ 'cast_rules_to_readonly': True, }) ) op.alter_column('share_instances', 'cast_rules_to_readonly', existing_type=sa.Boolean, existing_server_default=False, nullable=False) def downgrade(): LOG.info("Removing cast_rules_to_readonly column from share " "instances.") op.drop_column('share_instances', 'cast_rules_to_readonly') ././@PaxHeader0000000000000000000000000000022500000000000011454 xustar0000000000000000127 path=manila-21.0.0/manila/db/migrations/alembic/versions/eb6d5544cbbd_add_provider_location_to_share_snapshot_instances.py 22 mtime=1759315554.0 manila-21.0.0/manila/db/migrations/alembic/versions/eb6d5544cbbd_add_provider_location_to_share_snap0000664000175000017500000000205100000000000033541 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """add provider_location to share_snapshot_instances Revision ID: eb6d5544cbbd Revises: 5155c7077f99 Create Date: 2016-02-12 22:25:39.594545 """ # revision identifiers, used by Alembic. revision = 'eb6d5544cbbd' down_revision = '5155c7077f99' from alembic import op import sqlalchemy as sa def upgrade(): op.add_column( 'share_snapshot_instances', sa.Column('provider_location', sa.String(255), nullable=True)) def downgrade(): op.drop_column('share_snapshot_instances', 'provider_location') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/db/migrations/alembic/versions/ef0c02b4366_add_share_type_projects.py0000664000175000017500000000477200000000000031320 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Add_share_type_projects Revision ID: ef0c02b4366 Revises: 17115072e1c3 Create Date: 2015-02-20 10:49:40.744974 """ # revision identifiers, used by Alembic. revision = 'ef0c02b4366' down_revision = '59eb64046740' from alembic import op from oslo_log import log import sqlalchemy as sql LOG = log.getLogger(__name__) def upgrade(): meta = sql.MetaData() meta.bind = op.get_bind() is_public = sql.Column('is_public', sql.Boolean) try: op.add_column('share_types', is_public) share_types = sql.Table('share_types', meta, is_public.copy()) # pylint: disable=no-value-for-parameter op.execute(share_types.update().values(is_public=True)) except Exception: LOG.error("Column |%s| not created!", repr(is_public)) raise try: op.create_table( 'share_type_projects', sql.Column('id', sql.Integer, primary_key=True, nullable=False), sql.Column('created_at', sql.DateTime), sql.Column('updated_at', sql.DateTime), sql.Column('deleted_at', sql.DateTime), sql.Column('share_type_id', sql.String(36), sql.ForeignKey('share_types.id', name="stp_id_fk")), sql.Column('project_id', sql.String(length=255)), sql.Column('deleted', sql.Integer), sql.UniqueConstraint('share_type_id', 'project_id', 'deleted', name="stp_project_id_uc"), mysql_engine='InnoDB', mysql_charset='utf8' ) except Exception: LOG.error("Table |%s| not created!", 'share_type_projects') raise def downgrade(): try: op.drop_column('share_types', 'is_public') except Exception: LOG.error("share_types.is_public column not dropped") raise try: op.drop_table('share_type_projects') except Exception: LOG.error("share_type_projects table not dropped") raise ././@PaxHeader0000000000000000000000000000020700000000000011454 xustar0000000000000000113 path=manila-21.0.0/manila/db/migrations/alembic/versions/fbdfabcba377_change_the_mysql_datetime_precision.py 22 mtime=1759315554.0 manila-21.0.0/manila/db/migrations/alembic/versions/fbdfabcba377_change_the_mysql_datetime_precision0000664000175000017500000000652100000000000033707 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Change the datetime precision for all objects (MySQL and derivatives) Revision ID: fbdfabcba377 Revises: 478c445d8d3e Create Date: 2020-04-20 15:32:59.365323 """ # revision identifiers, used by Alembic. revision = 'fbdfabcba377' down_revision = '478c445d8d3e' from alembic import op from sqlalchemy import DateTime, dialects # DB Tables that can be affected by low precision timestamps in MySQL: TABLES = ('services', 'quotas', 'project_user_quotas', 'backend_info', 'project_share_type_quotas', 'quota_classes', 'quota_usages', 'reservations', 'shares', 'share_instance_export_locations', 'share_instances', 'share_instance_export_locations_metadata', 'share_types', 'share_type_projects', 'share_type_extra_specs', 'share_metadata', 'share_access_map', 'share_access_rules_metadata', 'share_instance_access_map', 'share_snapshot_instances', 'share_snapshots', 'share_snapshot_access_map', 'share_networks', 'share_snapshot_instance_access_map', 'share_network_subnets', 'share_snapshot_instance_export_locations', 'security_services', 'share_servers', 'share_server_backend_details', 'share_groups', 'network_allocations', 'share_network_security_service_association', 'drivers_private_data', 'availability_zones', 'share_group_types', 'share_group_type_projects', 'share_group_type_specs', 'messages', 'share_group_snapshots', 'share_group_type_share_type_mappings', 'share_group_share_type_mappings') def upgrade(): context = op.get_context() if context.bind.dialect.name == 'mysql': # override the default precision of DateTime: for table in TABLES: op.alter_column(table, 'created_at', existing_type=DateTime, type_=dialects.mysql.DATETIME(fsp=6)) op.alter_column(table, 'updated_at', existing_type=DateTime, type_=dialects.mysql.DATETIME(fsp=6)) op.alter_column(table, 'deleted_at', existing_type=DateTime, type_=dialects.mysql.DATETIME(fsp=6)) def downgrade(): context = op.get_context() if context.bind.dialect.name == 'mysql': for table in TABLES: op.alter_column(table, 'created_at', existing_type=dialects.mysql.DATETIME(fsp=6), type_=DateTime) op.alter_column(table, 'updated_at', existing_type=dialects.mysql.DATETIME(fsp=6), type_=DateTime) op.alter_column(table, 'deleted_at', existing_type=dialects.mysql.DATETIME(fsp=6), type_=DateTime) ././@PaxHeader0000000000000000000000000000021400000000000011452 xustar0000000000000000118 path=manila-21.0.0/manila/db/migrations/alembic/versions/fdfb668d19e1_add_gateway_to_network_allocations_table.py 22 mtime=1759315554.0 manila-21.0.0/manila/db/migrations/alembic/versions/fdfb668d19e1_add_gateway_to_network_allocations_0000664000175000017500000000223200000000000033512 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """add_gateway_to_network_allocations_table Revision ID: fdfb668d19e1 Revises: 221a83cfd85b Create Date: 2016-04-19 10:07:16.224806 """ # revision identifiers, used by Alembic. revision = 'fdfb668d19e1' down_revision = '221a83cfd85b' from alembic import op import sqlalchemy as sa def upgrade(): op.add_column( 'network_allocations', sa.Column('gateway', sa.String(64), nullable=True)) op.add_column( 'share_networks', sa.Column('gateway', sa.String(64), nullable=True)) def downgrade(): op.drop_column('network_allocations', 'gateway') op.drop_column('share_networks', 'gateway') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/db/migrations/alembic.ini0000664000175000017500000000214700000000000021161 0ustar00zuulzuul00000000000000# A generic, single database configuration. [alembic] # path to migration scripts script_location = %(here)s/alembic # template used to generate migration files # file_template = %%(rev)s_%%(slug)s # max length of characters to apply to the # "slug" field #truncate_slug_length = 40 # set to 'true' to run the environment during # the 'revision' command, regardless of autogenerate # revision_environment = false # set to 'true' to allow .pyc and .pyo files without # a source .py file to be detected as revisions in the # versions/ directory # sourceless = false #sqlalchemy.url = driver://user:pass@localhost/dbname # Logging configuration [loggers] keys = root,sqlalchemy,alembic [handlers] keys = console [formatters] keys = generic [logger_root] level = WARN handlers = console qualname = [logger_sqlalchemy] level = WARN handlers = qualname = sqlalchemy.engine [logger_alembic] level = INFO handlers = qualname = alembic [handler_console] class = StreamHandler args = (sys.stderr,) level = NOTSET formatter = generic [formatter_generic] format = %(levelname)-5.5s [%(name)s] %(message)s datefmt = %H:%M:%S ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/db/migrations/utils.py0000664000175000017500000000132400000000000020572 0ustar00zuulzuul00000000000000# Copyright 2015 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sqlalchemy as sa def load_table(name, connection): return sa.Table(name, sa.MetaData(), autoload_with=connection) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315601.8736722 manila-21.0.0/manila/db/sqlalchemy/0000775000175000017500000000000000000000000017046 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/db/sqlalchemy/__init__.py0000664000175000017500000000000000000000000021145 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/db/sqlalchemy/api.py0000664000175000017500000075423400000000000020210 0ustar00zuulzuul00000000000000# Copyright (c) 2011 X.commerce, a business unit of eBay Inc. # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright (c) 2014 Mirantis, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Implementation of SQLAlchemy backend.""" import copy import datetime from functools import wraps import ipaddress import sys import warnings # NOTE(uglide): Required to override default oslo_db Query class import manila.db.sqlalchemy.query # noqa from oslo_config import cfg from oslo_db import api as oslo_db_api from oslo_db import exception as db_exc from oslo_db import exception as db_exception from oslo_db import options as db_options from oslo_db.sqlalchemy import enginefacade from oslo_db.sqlalchemy import utils as db_utils from oslo_log import log from oslo_utils import excutils from oslo_utils import importutils from oslo_utils import strutils from oslo_utils import timeutils from oslo_utils import uuidutils import sqlalchemy as sa from sqlalchemy import and_ from sqlalchemy import MetaData from sqlalchemy import or_ from sqlalchemy import orm from sqlalchemy.sql.expression import false from sqlalchemy.sql.expression import literal from sqlalchemy.sql.expression import true from sqlalchemy.sql import func from manila.common import constants from manila.db.sqlalchemy import models from manila.db.sqlalchemy import utils from manila import exception from manila.i18n import _ from manila import quota osprofiler_sqlalchemy = importutils.try_import('osprofiler.sqlalchemy') CONF = cfg.CONF CONF.import_group("profiler", "manila.service") LOG = log.getLogger(__name__) QUOTAS = quota.QUOTAS _DEFAULT_QUOTA_NAME = 'default' PER_PROJECT_QUOTAS = [] _DEFAULT_SQL_CONNECTION = 'sqlite://' db_options.set_defaults(cfg.CONF, connection=_DEFAULT_SQL_CONNECTION) context_manager = enginefacade.transaction_context() context_manager.configure() if ( osprofiler_sqlalchemy and CONF.profiler.enabled and CONF.profiler.trace_sqlalchemy ): context_manager.append_on_engine_create( lambda engine: osprofiler_sqlalchemy.add_tracing(sa, engine, "db")) def get_engine(): return context_manager.writer.get_engine() def get_backend(): """The backend is this module itself.""" return sys.modules[__name__] def is_admin_context(context): """Indicates if the request context is an administrator.""" if not context: warnings.warn(_('Use of empty request context is deprecated'), DeprecationWarning) raise Exception('die') return context.is_admin def is_user_context(context): """Indicates if the request context is a normal user.""" if not context: return False if context.is_admin: return False if not context.user_id or not context.project_id: return False return True def authorize_project_context(context, project_id): """Ensures a request has permission to access the given project.""" if is_user_context(context): if not context.project_id: raise exception.NotAuthorized() elif context.project_id != project_id: raise exception.NotAuthorized() def authorize_user_context(context, user_id): """Ensures a request has permission to access the given user.""" if is_user_context(context): if not context.user_id: raise exception.NotAuthorized() elif context.user_id != user_id: raise exception.NotAuthorized() def authorize_quota_class_context(context, class_name): """Ensures a request has permission to access the given quota class.""" if is_user_context(context): if not context.quota_class: raise exception.NotAuthorized() elif context.quota_class != class_name: raise exception.NotAuthorized() def require_admin_context(f): """Decorator to require admin request context. The first argument to the wrapped function must be the context. """ @wraps(f) def wrapper(*args, **kwargs): if not is_admin_context(args[0]): raise exception.AdminRequired() return f(*args, **kwargs) return wrapper def require_context(f): """Decorator to require *any* user or admin context. This does no authorization for user or project access matching, see :py:func:`authorize_project_context` and :py:func:`authorize_user_context`. The first argument to the wrapped function must be the context. """ @wraps(f) def wrapper(*args, **kwargs): if not is_admin_context(args[0]) and not is_user_context(args[0]): raise exception.NotAuthorized() return f(*args, **kwargs) return wrapper def require_share_exists(f): """Decorator to require the specified share to exist. Requires the wrapped function to use context and share_id as their first two arguments. """ @wraps(f) def wrapper(context, share_id, *args, **kwargs): share_get(context, share_id) return f(context, share_id, *args, **kwargs) wrapper.__name__ = f.__name__ return wrapper def require_share_snapshot_exists(f): """Decorator to require the specified share snapshot to exist. Requires the wrapped function to use context and share_snapshot_id as their first two arguments. """ @wraps(f) def wrapper(context, share_snapshot_id, *args, **kwargs): share_snapshot_get(context, share_snapshot_id) return f(context, share_snapshot_id, *args, **kwargs) wrapper.__name__ = f.__name__ return wrapper def require_share_network_subnet_exists(f): """Decorator to require the specified share network subnet to exist. Requires the wrapped function to use context and share_network_subnet_id as their first two arguments. """ @wraps(f) def wrapper(context, share_network_subnet_id, *args, **kwargs): share_network_subnet_get(context, share_network_subnet_id) return f(context, share_network_subnet_id, *args, **kwargs) wrapper.__name__ = f.__name__ return wrapper def require_share_instance_exists(f): """Decorator to require the specified share instance to exist. Requires the wrapped function to use context and share_instance_id as their first two arguments. """ @wraps(f) def wrapper(context, share_instance_id, *args, **kwargs): share_instance_get(context, share_instance_id) return f(context, share_instance_id, *args, **kwargs) wrapper.__name__ = f.__name__ return wrapper def require_availability_zone_exists(*, strict: bool): """Decorator to require the specified availability zone to exist. Requires the wrapped function to use context as their first argument and values as either their second (for create) or third (for update) argument. .. note:: This has a side-effect of updating the provided values dict, replacing ``availability_zone`` with ``availability_zone_id`` :param strict: If true, ``values`` must contain an ``availability_zone`` key """ def inner(f): @wraps(f) def wrapper(context, *args, **kwargs): values = args[0] if not isinstance(args[0], dict): values = args[1] ensure_availability_zone_exists(context, values, strict=strict) return f(context, *args, **kwargs) wrapper.__name__ = f.__name__ return wrapper return inner def apply_sorting(model, query, sort_key, sort_dir): if sort_dir.lower() not in ('desc', 'asc'): msg = _("Wrong sorting data provided: sort key is '%(sort_key)s' " "and sort direction is '%(sort_dir)s'.") % { "sort_key": sort_key, "sort_dir": sort_dir} raise exception.InvalidInput(reason=msg) # NOTE(maaoyu): We add the additional sort by ID in this case to # get deterministic results. Without the ordering by ID this could # lead to flapping return lists. sort_keys = [sort_key] if sort_key != 'id': sort_keys.append('id') for sort_key in sort_keys: sort_attr = getattr(model, sort_key) sort_method = getattr(sort_attr, sort_dir.lower()) query = query.order_by(sort_method()) return query def handle_db_data_error(f): def wrapper(*args, **kwargs): try: return f(*args, **kwargs) except db_exc.DBDataError: msg = _('Error writing field to database.') LOG.exception(msg) raise exception.Invalid(msg) return wrapper def model_query(context, model, *args, **kwargs): """Query helper that accounts for context's `read_deleted` field. :param context: context to query under :param model: model to query. Must be a subclass of ModelBase. :param read_deleted: if present, overrides context's read_deleted field. :param project_only: if present and context is user-type, then restrict query to match the context's project_id. """ assert 'session' not in kwargs assert hasattr(context, 'session') and context.session read_deleted = kwargs.get('read_deleted') or context.read_deleted project_only = kwargs.get('project_only') kwargs = dict() if project_only and not context.is_admin: kwargs['project_id'] = context.project_id if read_deleted in ('no', 'n', False): kwargs['deleted'] = False elif read_deleted == 'only': kwargs['deleted'] = True elif read_deleted in ('yes', 'y', True): pass return db_utils.model_query( model=model, session=context.session, args=args, **kwargs, ) def _process_model_like_filter(model, query, filters): """Applies regex expression filtering to a query. :param model: model to apply filters to :param query: query to apply filters to :param filters: dictionary of filters with regex values :returns: the updated query. """ if query is None: return query if filters: for key in sorted(filters): column_attr = getattr(model, key) if 'property' == type(column_attr).__name__: continue value = filters[key] if not (isinstance(value, (str, int))): continue query = query.filter( column_attr.op('LIKE')(u'%%%s%%' % value)) return query def apply_like_filters(process_exact_filters): def _decorator(query, model, filters, legal_keys): exact_filters = filters.copy() regex_filters = {} for key, value in filters.items(): if key not in legal_keys: # Skip ones we're not filtering on continue # NOTE(haixin): For inexact match, the filter keys # are in the format of 'key~=value' if key.endswith('~'): exact_filters.pop(key) regex_filters[key.rstrip('~')] = value query = process_exact_filters(query, model, exact_filters, legal_keys) return _process_model_like_filter(model, query, regex_filters) return _decorator @apply_like_filters def exact_filter(query, model, filters, legal_keys, created_at_key='created_at'): """Applies exact match filtering to a query. Returns the updated query. Modifies filters argument to remove filters consumed. :param query: query to apply filters to :param model: model object the query applies to, for IN-style filtering :param filters: dictionary of filters; values that are lists, tuples, sets, or frozensets cause an 'IN' test to be performed, while exact matching ('==' operator) is used for other values :param legal_keys: list of keys to apply exact filtering to """ filter_dict = {} created_at_attr = getattr(model, created_at_key, None) # Walk through all the keys for key in legal_keys: # Skip ones we're not filtering on if key not in filters: continue # OK, filtering on this key; what value do we search for? value = filters.pop(key) if key == 'created_since' and created_at_attr: # This is a reserved query parameter to indicate resources created # after a particular datetime value = timeutils.normalize_time(value) query = query.filter(created_at_attr.op('>=')(value)) elif key == 'created_before' and created_at_attr: # This is a reserved query parameter to indicate resources created # before a particular datetime value = timeutils.normalize_time(value) query = query.filter(created_at_attr.op('<=')(value)) elif isinstance(value, (list, tuple, set, frozenset)): # Looking for values in a list; apply to query directly column_attr = getattr(model, key) query = query.filter(column_attr.in_(value)) else: # OK, simple exact match; save for later filter_dict[key] = value # Apply simple exact matches if filter_dict: query = query.filter_by(**filter_dict) return query def ensure_model_dict_has_id(model_dict): if not model_dict.get('id'): model_dict['id'] = uuidutils.generate_uuid() return model_dict def _sync_shares(context, project_id, user_id, share_type_id=None): shares, _ = _share_data_get_for_project( context, project_id, user_id, share_type_id=share_type_id, ) return {'shares': shares} def _sync_snapshots(context, project_id, user_id, share_type_id=None): snapshots, _ = _snapshot_data_get_for_project( context, project_id, user_id, share_type_id=share_type_id, ) return {'snapshots': snapshots} def _sync_gigabytes(context, project_id, user_id, share_type_id=None): _, share_gigs = _share_data_get_for_project( context, project_id, user_id, share_type_id=share_type_id, ) return {'gigabytes': share_gigs} def _sync_snapshot_gigabytes(context, project_id, user_id, share_type_id=None): _, snapshot_gigs = _snapshot_data_get_for_project( context, project_id, user_id, share_type_id=share_type_id, ) return {'snapshot_gigabytes': snapshot_gigs} def _sync_share_networks(context, project_id, user_id, share_type_id=None): share_networks_count = _count_share_networks( context, project_id, user_id, share_type_id=share_type_id, ) return {'share_networks': share_networks_count} def _sync_share_groups(context, project_id, user_id, share_type_id=None): share_groups_count = _count_share_groups( context, project_id, user_id, share_type_id=share_type_id, ) return {'share_groups': share_groups_count} def _sync_backups(context, project_id, user_id, share_type_id=None): backups, _ = _backup_data_get_for_project(context, project_id, user_id) return {'backups': backups} def _sync_backup_gigabytes(context, project_id, user_id, share_type_id=None): _, backup_gigs = _backup_data_get_for_project(context, project_id, user_id) return {'backup_gigabytes': backup_gigs} def _sync_share_group_snapshots( context, project_id, user_id, share_type_id=None, ): share_group_snapshots_count = _count_share_group_snapshots( context, project_id, user_id, share_type_id=share_type_id, ) return {'share_group_snapshots': share_group_snapshots_count} def _sync_share_replicas(context, project_id, user_id, share_type_id=None): share_replicas_count, _ = _share_replica_data_get_for_project( context, project_id, user_id, share_type_id=share_type_id, ) return {'share_replicas': share_replicas_count} def _sync_replica_gigabytes(context, project_id, user_id, share_type_id=None): _, replica_gigs = _share_replica_data_get_for_project( context, project_id, user_id, share_type_id=share_type_id, ) return {'replica_gigabytes': replica_gigs} def _sync_encryption_keys(context, project_id, user_id, share_type_id=None): encryption_keys = _count_encryption_keys_for_project( context, project_id, user_id ) return {'encryption_keys': encryption_keys} QUOTA_SYNC_FUNCTIONS = { '_sync_shares': _sync_shares, '_sync_snapshots': _sync_snapshots, '_sync_gigabytes': _sync_gigabytes, '_sync_snapshot_gigabytes': _sync_snapshot_gigabytes, '_sync_share_networks': _sync_share_networks, '_sync_share_groups': _sync_share_groups, '_sync_share_group_snapshots': _sync_share_group_snapshots, '_sync_share_replicas': _sync_share_replicas, '_sync_replica_gigabytes': _sync_replica_gigabytes, '_sync_backups': _sync_backups, '_sync_backup_gigabytes': _sync_backup_gigabytes, '_sync_encryption_keys': _sync_encryption_keys, } ################### @require_admin_context @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) @context_manager.writer def share_resources_host_update(context, current_host, new_host): """Updates the 'host' attribute of resources""" resources = { 'instances': models.ShareInstance, 'servers': models.ShareServer, 'groups': models.ShareGroup, } result = {} for res_name, res_model in resources.items(): host_field = res_model.host query = model_query( context, res_model, read_deleted="no", ).filter(host_field.like('{}%'.format(current_host))) count = query.update( {host_field: func.replace(host_field, current_host, new_host)}, synchronize_session=False, ) result.update({res_name: count}) return result ################### @require_admin_context @context_manager.writer def service_destroy(context, service_id): service_ref = _service_get(context, service_id) service_ref.soft_delete(context.session) @require_admin_context def _service_get(context, service_id): result = ( model_query( context, models.Service, ).filter_by( id=service_id, ).first() ) if not result: raise exception.ServiceNotFound(service_id=service_id) return result @require_admin_context @context_manager.reader def service_get(context, service_id): return _service_get(context, service_id) @require_admin_context @context_manager.reader def service_get_all(context, disabled=None): query = model_query(context, models.Service) if disabled is not None: query = query.filter_by(disabled=disabled) return query.all() @require_admin_context @context_manager.reader def service_get_all_by_topic(context, topic, consider_disabled=False): query = model_query( context, models.Service, read_deleted="no") if not consider_disabled: query = query.filter_by(disabled=False) return query.filter_by(topic=topic).all() @require_admin_context @context_manager.reader def service_get_by_host_and_topic(context, host, topic): result = (model_query( context, models.Service, read_deleted="no"). filter_by(disabled=False). filter_by(host=host). filter_by(topic=topic). first()) if not result: raise exception.ServiceNotFound(service_id=host) return result @require_admin_context def _service_get_all_topic_subquery(context, topic, subq, label): sort_value = getattr(subq.c, label) return ( model_query( context, models.Service, func.coalesce(sort_value, 0), read_deleted="no", ).filter_by( topic=topic, ).filter_by( disabled=False, ).outerjoin( (subq, models.Service.host == subq.c.host) ).order_by( sort_value ).all() ) @require_admin_context @context_manager.reader def service_get_all_share_sorted(context): topic = CONF.share_topic label = 'share_gigabytes' subq = ( model_query( context, models.Share, func.sum(models.Share.size).label(label), read_deleted="no", ).join( models.ShareInstance, models.ShareInstance.share_id == models.Share.id, ).group_by( models.ShareInstance.host ).subquery() ) return _service_get_all_topic_subquery( context, topic, subq, label, ) @require_admin_context @context_manager.reader def service_get_by_args(context, host, binary): result = (model_query(context, models.Service). filter_by(host=host). filter_by(binary=binary). first()) if not result: raise exception.HostBinaryNotFound(host=host, binary=binary) return result @require_admin_context @require_availability_zone_exists(strict=True) @context_manager.writer def service_create(context, values): service_ref = models.Service() service_ref.update(values) if not CONF.enable_new_services: service_ref.disabled = True service_ref.save(context.session) return service_ref @require_admin_context @require_availability_zone_exists(strict=False) @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) @context_manager.writer def service_update(context, service_id, values): service_ref = _service_get(context, service_id) service_ref.update(values) service_ref.save(context.session) ################### @require_context @context_manager.reader.independent def quota_get_all_by_project_and_user(context, project_id, user_id): authorize_project_context(context, project_id) user_quotas = model_query( context, models.ProjectUserQuota, models.ProjectUserQuota.resource, models.ProjectUserQuota.hard_limit, ).filter_by( project_id=project_id, ).filter_by( user_id=user_id, ).all() result = {'project_id': project_id, 'user_id': user_id} for u_quota in user_quotas: result[u_quota.resource] = u_quota.hard_limit return result @require_context @context_manager.reader.independent def quota_get_all_by_project_and_share_type( context, project_id, share_type_id, ): authorize_project_context(context, project_id) share_type_quotas = model_query( context, models.ProjectShareTypeQuota, models.ProjectShareTypeQuota.resource, models.ProjectShareTypeQuota.hard_limit, ).filter_by( project_id=project_id, ).filter_by( share_type_id=share_type_id, ).all() result = { 'project_id': project_id, 'share_type_id': share_type_id, } for st_quota in share_type_quotas: result[st_quota.resource] = st_quota.hard_limit return result @require_context @context_manager.reader.independent def quota_get_all_by_project(context, project_id): authorize_project_context(context, project_id) project_quotas = model_query( context, models.Quota, read_deleted="no", ).filter_by( project_id=project_id, ).all() result = {'project_id': project_id} for p_quota in project_quotas: result[p_quota.resource] = p_quota.hard_limit return result @require_context @context_manager.reader.independent def quota_get_all(context, project_id): authorize_project_context(context, project_id) result = (model_query(context, models.ProjectUserQuota). filter_by(project_id=project_id). all()) return result @require_admin_context @context_manager.writer.independent def quota_create( context, project_id, resource, limit, user_id=None, share_type_id=None, ): per_user = user_id and resource not in PER_PROJECT_QUOTAS if per_user: check = model_query(context, models.ProjectUserQuota).filter( models.ProjectUserQuota.project_id == project_id, models.ProjectUserQuota.user_id == user_id, models.ProjectUserQuota.resource == resource, ).all() quota_ref = models.ProjectUserQuota() quota_ref.user_id = user_id elif share_type_id: check = model_query(context, models.ProjectShareTypeQuota).filter( models.ProjectShareTypeQuota.project_id == project_id, models.ProjectShareTypeQuota.share_type_id == share_type_id, models.ProjectShareTypeQuota.resource == resource, ).all() quota_ref = models.ProjectShareTypeQuota() quota_ref.share_type_id = share_type_id else: check = model_query(context, models.Quota).filter( models.Quota.project_id == project_id, models.Quota.resource == resource, ).all() quota_ref = models.Quota() if check: raise exception.QuotaExists(project_id=project_id, resource=resource) quota_ref.project_id = project_id quota_ref.resource = resource quota_ref.hard_limit = limit try: quota_ref.save(context.session) except Exception as e: if "out of range" in str(e).lower(): msg = _("Quota limit should not exceed 2147483647") raise exception.InvalidInput(reason=msg) raise return quota_ref @require_admin_context @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) @context_manager.writer.independent def quota_update( context, project_id, resource, limit, user_id=None, share_type_id=None, ): per_user = user_id and resource not in PER_PROJECT_QUOTAS if per_user: query = model_query(context, models.ProjectUserQuota).filter( models.ProjectUserQuota.project_id == project_id, models.ProjectUserQuota.user_id == user_id, models.ProjectUserQuota.resource == resource, ) elif share_type_id: query = model_query(context, models.ProjectShareTypeQuota).filter( models.ProjectShareTypeQuota.project_id == project_id, models.ProjectShareTypeQuota.share_type_id == share_type_id, models.ProjectShareTypeQuota.resource == resource, ) else: query = model_query(context, models.Quota).filter( models.Quota.project_id == project_id, models.Quota.resource == resource, ) result = query.update({'hard_limit': limit}) if not result: if per_user: raise exception.ProjectUserQuotaNotFound( project_id=project_id, user_id=user_id) elif share_type_id: raise exception.ProjectShareTypeQuotaNotFound( project_id=project_id, share_type=share_type_id) raise exception.ProjectQuotaNotFound(project_id=project_id) ################### @require_context @context_manager.reader def quota_class_get(context, class_name, resource): result = ( model_query( context, models.QuotaClass, read_deleted="no", ).filter_by( class_name=class_name ).filter_by( resource=resource ).first() ) if not result: raise exception.QuotaClassNotFound(class_name=class_name) return result @require_context @context_manager.reader def quota_class_get_default(context): rows = (model_query(context, models.QuotaClass, read_deleted="no"). filter_by(class_name=_DEFAULT_QUOTA_NAME). all()) result = {'class_name': _DEFAULT_QUOTA_NAME} for row in rows: result[row.resource] = row.hard_limit return result @require_context @context_manager.reader def quota_class_get_all_by_name(context, class_name): authorize_quota_class_context(context, class_name) rows = (model_query(context, models.QuotaClass, read_deleted="no"). filter_by(class_name=class_name). all()) result = {'class_name': class_name} for row in rows: result[row.resource] = row.hard_limit return result @require_admin_context @context_manager.writer def quota_class_create(context, class_name, resource, limit): quota_class_ref = models.QuotaClass() quota_class_ref.class_name = class_name quota_class_ref.resource = resource quota_class_ref.hard_limit = limit quota_class_ref.save(context.session) return quota_class_ref @require_admin_context @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) @context_manager.writer def quota_class_update(context, class_name, resource, limit): result = (model_query(context, models.QuotaClass, read_deleted="no"). filter_by(class_name=class_name). filter_by(resource=resource). update({'hard_limit': limit})) if not result: raise exception.QuotaClassNotFound(class_name=class_name) ################### @require_context @context_manager.reader def quota_usage_get(context, project_id, resource, user_id=None, share_type_id=None): query = (model_query(context, models.QuotaUsage, read_deleted="no"). filter_by(project_id=project_id). filter_by(resource=resource)) if user_id: if resource not in PER_PROJECT_QUOTAS: result = query.filter_by(user_id=user_id).first() else: result = query.filter_by(user_id=None).first() elif share_type_id: result = query.filter_by(queryshare_type_id=share_type_id).first() else: result = query.first() if not result: raise exception.QuotaUsageNotFound(project_id=project_id) return result def _quota_usage_get_all(context, project_id, user_id=None, share_type_id=None): authorize_project_context(context, project_id) query = (model_query(context, models.QuotaUsage, read_deleted="no"). filter_by(project_id=project_id)) result = {'project_id': project_id} if user_id: query = query.filter(or_(models.QuotaUsage.user_id == user_id, models.QuotaUsage.user_id is None)) result['user_id'] = user_id elif share_type_id: query = query.filter_by(share_type_id=share_type_id) result['share_type_id'] = share_type_id else: query = query.filter_by(share_type_id=None) rows = query.all() for row in rows: if row.resource in result: result[row.resource]['in_use'] += row.in_use result[row.resource]['reserved'] += row.reserved else: result[row.resource] = dict(in_use=row.in_use, reserved=row.reserved) return result @require_context @context_manager.reader def quota_usage_get_all_by_project(context, project_id): return _quota_usage_get_all(context, project_id) @require_context @context_manager.reader def quota_usage_get_all_by_project_and_user(context, project_id, user_id): return _quota_usage_get_all(context, project_id, user_id=user_id) @require_context @context_manager.reader def quota_usage_get_all_by_project_and_share_type(context, project_id, share_type_id): return _quota_usage_get_all( context, project_id, share_type_id=share_type_id) def _quota_usage_create(context, project_id, user_id, resource, in_use, reserved, until_refresh, share_type_id=None): quota_usage_ref = models.QuotaUsage() if share_type_id: quota_usage_ref.share_type_id = share_type_id else: quota_usage_ref.user_id = user_id quota_usage_ref.project_id = project_id quota_usage_ref.resource = resource quota_usage_ref.in_use = in_use quota_usage_ref.reserved = reserved quota_usage_ref.until_refresh = until_refresh # updated_at is needed for judgement of max_age quota_usage_ref.updated_at = timeutils.utcnow() quota_usage_ref.save(session=context.session) return quota_usage_ref @require_admin_context @context_manager.writer def quota_usage_create(context, project_id, user_id, resource, in_use, reserved, until_refresh, share_type_id=None): return _quota_usage_create( context, project_id, user_id, resource, in_use, reserved, until_refresh, share_type_id=share_type_id, ) @require_admin_context @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) @context_manager.writer.independent def quota_usage_update(context, project_id, user_id, resource, share_type_id=None, **kwargs): updates = {} for key in ('in_use', 'reserved', 'until_refresh'): if key in kwargs: updates[key] = kwargs[key] query = model_query( context, models.QuotaUsage, read_deleted="no", ).filter_by(project_id=project_id).filter_by(resource=resource) if share_type_id: query = query.filter_by(share_type_id=share_type_id) else: query = query.filter(or_(models.QuotaUsage.user_id == user_id, models.QuotaUsage.user_id is None)) result = query.update(updates) if not result: raise exception.QuotaUsageNotFound(project_id=project_id) ################### def _reservation_create(context, uuid, usage, project_id, user_id, resource, delta, expire, share_type_id=None): reservation_ref = models.Reservation() reservation_ref.uuid = uuid reservation_ref.usage_id = usage['id'] reservation_ref.project_id = project_id if share_type_id: reservation_ref.share_type_id = share_type_id else: reservation_ref.user_id = user_id reservation_ref.resource = resource reservation_ref.delta = delta reservation_ref.expire = expire reservation_ref.save(session=context.session) return reservation_ref ################### # NOTE(johannes): The quota code uses SQL locking to ensure races don't # cause under or over counting of resources. To avoid deadlocks, this # code always acquires the lock on quota_usages before acquiring the lock # on reservations. def _get_share_type_quota_usages(context, project_id, share_type_id): rows = model_query( context, models.QuotaUsage, read_deleted="no", ).filter( models.QuotaUsage.project_id == project_id, models.QuotaUsage.share_type_id == share_type_id, ).with_for_update().all() return {row.resource: row for row in rows} def _get_user_quota_usages(context, project_id, user_id): # Broken out for testability rows = model_query( context, models.QuotaUsage, read_deleted="no", ).filter_by( project_id=project_id, ).filter( or_( models.QuotaUsage.user_id == user_id, models.QuotaUsage.user_id is None, ) ).with_for_update().all() return {row.resource: row for row in rows} def _get_project_quota_usages(context, project_id): rows = model_query( context, models.QuotaUsage, read_deleted="no", ).filter_by( project_id=project_id, ).filter( models.QuotaUsage.share_type_id is None, ).with_for_update().all() result = dict() # Get the total count of in_use,reserved for row in rows: if row.resource in result: result[row.resource]['in_use'] += row.in_use result[row.resource]['reserved'] += row.reserved result[row.resource]['total'] += (row.in_use + row.reserved) else: result[row.resource] = dict( in_use=row.in_use, reserved=row.reserved, total=row.in_use + row.reserved, ) return result # NOTE(stephenfin): We intentionally don't wrap the outer function here since # we call the innter function multiple times and want each call to be in a # separate transaction @require_context def quota_reserve(context, resources, project_quotas, user_quotas, share_type_quotas, deltas, expire, until_refresh, max_age, project_id=None, user_id=None, share_type_id=None, overquota_allowed=False): user_reservations = _quota_reserve( context, resources, project_quotas, user_quotas, deltas, expire, until_refresh, max_age, project_id, user_id=user_id, overquota_allowed=overquota_allowed) if share_type_id: try: st_reservations = _quota_reserve( context, resources, project_quotas, share_type_quotas, deltas, expire, until_refresh, max_age, project_id, share_type_id=share_type_id, overquota_allowed=overquota_allowed) except exception.OverQuota: # rollback previous reservations with excutils.save_and_reraise_exception(): # We call a public method since we haven't wrapped this, the # caller, and we want to run in a different transaction reservation_rollback( context, user_reservations, project_id=project_id, user_id=user_id) return user_reservations + st_reservations return user_reservations # NOTE(stephenfin): Per above, we wrap the inner method here @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) @context_manager.writer.independent def _quota_reserve(context, resources, project_quotas, user_or_st_quotas, deltas, expire, until_refresh, max_age, project_id=None, user_id=None, share_type_id=None, overquota_allowed=False): elevated = context.elevated() if project_id is None: project_id = context.project_id if share_type_id: user_or_st_usages = _get_share_type_quota_usages( context, project_id, share_type_id, ) else: user_id = user_id if user_id else context.user_id user_or_st_usages = _get_user_quota_usages( context, project_id, user_id, ) # Get the current usages project_usages = _get_project_quota_usages(context, project_id) # Handle usage refresh work = set(deltas.keys()) while work: resource = work.pop() # Do we need to refresh the usage? refresh = False if ((resource not in PER_PROJECT_QUOTAS) and (resource not in user_or_st_usages)): user_or_st_usages[resource] = _quota_usage_create( elevated, project_id, user_id, resource, 0, 0, until_refresh or None, share_type_id=share_type_id, ) refresh = True elif ((resource in PER_PROJECT_QUOTAS) and (resource not in user_or_st_usages)): user_or_st_usages[resource] = _quota_usage_create( elevated, project_id, None, resource, 0, 0, until_refresh or None, share_type_id=share_type_id, ) refresh = True elif user_or_st_usages[resource].in_use < 0: # Negative in_use count indicates a desync, so try to # heal from that... refresh = True elif user_or_st_usages[resource].until_refresh is not None: user_or_st_usages[resource].until_refresh -= 1 if user_or_st_usages[resource].until_refresh <= 0: refresh = True elif max_age and (user_or_st_usages[resource].updated_at - timeutils.utcnow()).seconds >= max_age: refresh = True # OK, refresh the usage if refresh: # Grab the sync routine sync = QUOTA_SYNC_FUNCTIONS[resources[resource].sync] updates = sync( elevated, project_id, user_id, share_type_id=share_type_id, ) for res, in_use in updates.items(): # Make sure we have a destination for the usage! if ((res not in PER_PROJECT_QUOTAS) and (res not in user_or_st_usages)): user_or_st_usages[res] = _quota_usage_create( elevated, project_id, user_id, res, 0, 0, until_refresh or None, share_type_id=share_type_id, ) if ((res in PER_PROJECT_QUOTAS) and (res not in user_or_st_usages)): user_or_st_usages[res] = _quota_usage_create( elevated, project_id, None, res, 0, 0, until_refresh or None, share_type_id=share_type_id, ) if user_or_st_usages[res].in_use != in_use: LOG.debug( 'quota_usages out of sync, updating. ' 'project_id: %(project_id)s, ' 'user_id: %(user_id)s, ' 'share_type_id: %(share_type_id)s, ' 'resource: %(res)s, ' 'tracked usage: %(tracked_use)s, ' 'actual usage: %(in_use)s', {'project_id': project_id, 'user_id': user_id, 'share_type_id': share_type_id, 'res': res, 'tracked_use': user_or_st_usages[res].in_use, 'in_use': in_use}) # Update the usage user_or_st_usages[res].in_use = in_use user_or_st_usages[res].until_refresh = ( until_refresh or None) # Because more than one resource may be refreshed # by the call to the sync routine, and we don't # want to double-sync, we make sure all refreshed # resources are dropped from the work set. work.discard(res) # NOTE(Vek): We make the assumption that the sync # routine actually refreshes the # resources that it is the sync routine # for. We don't check, because this is # a best-effort mechanism. # Check for deltas that would go negative unders = [res for res, delta in deltas.items() if delta < 0 and delta + user_or_st_usages[res].in_use < 0] # Now, let's check the quotas # NOTE(Vek): We're only concerned about positive increments. # If a project has gone over quota, we want them to # be able to reduce their usage without any # problems. for key, value in user_or_st_usages.items(): if key not in project_usages: project_usages[key] = value overs = [res for res, delta in deltas.items() if user_or_st_quotas[res] >= 0 and delta >= 0 and (0 <= project_quotas[res] < delta + project_usages[res]['total'] or user_or_st_quotas[res] < delta + user_or_st_usages[res].total)] # NOTE(carloss): If OverQuota is allowed, there is no problem to exceed # the quotas, so we reset the overs list and LOG it. if overs and overquota_allowed: msg = _("The service has identified one or more exceeded " "quotas. Please check the quotas for project " "%(project_id)s, user %(user_id)s and share type " "%(share_type_id)s, and adjust them if " "necessary.") % { "project_id": project_id, "user_id": user_id, "share_type_id": share_type_id } LOG.warning(msg) overs = [] # NOTE(Vek): The quota check needs to be in the transaction, # but the transaction doesn't fail just because # we're over quota, so the OverQuota raise is # outside the transaction. If we did the raise # here, our usage updates would be discarded, but # they're not invalidated by being over-quota. # Create the reservations if not overs: reservations = [] for res, delta in deltas.items(): reservation = _reservation_create( elevated, uuidutils.generate_uuid(), user_or_st_usages[res], project_id, user_id, res, delta, expire, share_type_id=share_type_id, ) reservations.append(reservation.uuid) # Also update the reserved quantity # NOTE(Vek): Again, we are only concerned here about # positive increments. Here, though, we're # worried about the following scenario: # # 1) User initiates resize down. # 2) User allocates a new instance. # 3) Resize down fails or is reverted. # 4) User is now over quota. # # To prevent this, we only update the # reserved value if the delta is positive. if delta > 0: user_or_st_usages[res].reserved += delta # Apply updates to the usages table for usage_ref in user_or_st_usages.values(): context.session.add(usage_ref) # NOTE(stephenfin): commit changes before we raise any exceptions context.session.commit() context.session.begin() if unders: LOG.warning("Change will make usage less than 0 for the following " "resources: %s", unders) if overs: if project_quotas == user_or_st_quotas: usages = project_usages else: usages = user_or_st_usages usages = {k: dict(in_use=v['in_use'], reserved=v['reserved']) for k, v in usages.items()} raise exception.OverQuota( overs=sorted(overs), quotas=user_or_st_quotas, usages=usages) return reservations def _quota_reservations_query(context, reservations): """Return the relevant reservations.""" return model_query( context, models.Reservation, read_deleted="no", ).filter( models.Reservation.uuid.in_(reservations), ).with_for_update() @require_context @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) @context_manager.writer.independent def reservation_commit(context, reservations, project_id=None, user_id=None, share_type_id=None): if share_type_id: st_usages = _get_share_type_quota_usages( context, project_id, share_type_id, ) else: st_usages = {} user_usages = _get_user_quota_usages(context, project_id, user_id) reservation_query = _quota_reservations_query(context, reservations) for reservation in reservation_query.all(): if reservation['share_type_id']: usages = st_usages else: usages = user_usages usage = usages[reservation.resource] if reservation.delta >= 0: usage.reserved -= reservation.delta usage.in_use += reservation.delta reservation_query.soft_delete(synchronize_session=False) @require_context @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) @context_manager.writer.independent def reservation_rollback(context, reservations, project_id=None, user_id=None, share_type_id=None): if share_type_id: st_usages = _get_share_type_quota_usages( context, project_id, share_type_id, ) else: st_usages = {} user_usages = _get_user_quota_usages(context, project_id, user_id) reservation_query = _quota_reservations_query(context, reservations) for reservation in reservation_query.all(): if reservation['share_type_id']: usages = st_usages else: usages = user_usages usage = usages[reservation.resource] if reservation.delta >= 0: usage.reserved -= reservation.delta reservation_query.soft_delete(synchronize_session=False) @require_admin_context @context_manager.writer.independent def quota_destroy_all_by_project_and_user(context, project_id, user_id): model_query( context, models.ProjectUserQuota, read_deleted="no", ).filter_by( project_id=project_id, ).filter_by(user_id=user_id).soft_delete(synchronize_session=False) model_query( context, models.QuotaUsage, read_deleted="no", ).filter_by( project_id=project_id, ).filter_by(user_id=user_id).soft_delete(synchronize_session=False) model_query( context, models.Reservation, read_deleted="no", ).filter_by( project_id=project_id, ).filter_by(user_id=user_id).soft_delete(synchronize_session=False) @require_admin_context @context_manager.writer.independent def quota_destroy_all_by_share_type(context, share_type_id, project_id=None): return _quota_destroy_all_by_share_type( context, share_type_id, project_id=project_id, ) @require_admin_context def _quota_destroy_all_by_share_type(context, share_type_id, project_id=None): """Soft deletes all quotas, usages and reservations. :param context: request context for queries, updates and logging :param share_type_id: ID of the share type to filter the quotas, usages and reservations under. :param project_id: ID of the project to filter the quotas, usages and reservations under. If not provided, share type quotas for all projects will be acted upon. """ share_type_quotas = model_query( context, models.ProjectShareTypeQuota, read_deleted="no", ).filter_by(share_type_id=share_type_id) share_type_quota_usages = model_query( context, models.QuotaUsage, read_deleted="no", ).filter_by(share_type_id=share_type_id) share_type_quota_reservations = model_query( context, models.Reservation, read_deleted="no", ).filter_by(share_type_id=share_type_id) if project_id is not None: share_type_quotas = share_type_quotas.filter_by( project_id=project_id, ) share_type_quota_usages = share_type_quota_usages.filter_by( project_id=project_id, ) share_type_quota_reservations = ( share_type_quota_reservations.filter_by(project_id=project_id) ) share_type_quotas.soft_delete(synchronize_session=False) share_type_quota_usages.soft_delete(synchronize_session=False) share_type_quota_reservations.soft_delete(synchronize_session=False) @require_admin_context @context_manager.writer.independent def quota_destroy_all_by_project(context, project_id): model_query( context, models.Quota, read_deleted="no", ).filter_by( project_id=project_id, ).soft_delete(synchronize_session=False) model_query( context, models.ProjectUserQuota, read_deleted="no", ).filter_by( project_id=project_id, ).soft_delete(synchronize_session=False) model_query( context, models.QuotaUsage, read_deleted="no", ).filter_by( project_id=project_id, ).soft_delete(synchronize_session=False) model_query( context, models.Reservation, read_deleted="no", ).filter_by( project_id=project_id, ).soft_delete(synchronize_session=False) @require_admin_context @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) @context_manager.writer def reservation_expire(context): current_time = timeutils.utcnow() reservation_query = model_query( context, models.Reservation, read_deleted="no" ).filter(models.Reservation.expire < current_time) for reservation in reservation_query.all(): if reservation.delta >= 0: quota_usage = model_query( context, models.QuotaUsage, read_deleted="no", ).filter( models.QuotaUsage.id == reservation.usage_id, ).first() quota_usage.reserved -= reservation.delta context.session.add(quota_usage) reservation_query.soft_delete(synchronize_session=False) ################ def _extract_subdict_by_fields(source_dict, fields): dict_to_extract_from = copy.deepcopy(source_dict) sub_dict = {} for field in fields: field_value = dict_to_extract_from.pop(field, None) if field_value: sub_dict.update({field: field_value}) return sub_dict, dict_to_extract_from def _extract_share_instance_values(values): share_instance_model_fields = [ 'status', 'host', 'scheduled_at', 'launched_at', 'terminated_at', 'share_server_id', 'share_network_id', 'availability_zone_id', 'replica_state', 'share_type_id', 'share_type', 'access_rules_status', 'mount_point_name', 'encryption_key_ref', ] share_instance_values, share_values = ( _extract_subdict_by_fields(values, share_instance_model_fields) ) return share_instance_values, share_values def _change_size_to_instance_size(snap_instance_values): if 'size' in snap_instance_values: snap_instance_values['instance_size'] = snap_instance_values['size'] snap_instance_values.pop('size') def _extract_snapshot_instance_values(values): fields = ['status', 'progress', 'provider_location'] snapshot_instance_values, snapshot_values = ( _extract_subdict_by_fields(values, fields) ) return snapshot_instance_values, snapshot_values ################ @require_context @context_manager.writer def share_instance_create(context, share_id, values): return _share_instance_create(context, share_id, values) def _share_instance_create(context, share_id, values): if not values.get('id'): values['id'] = uuidutils.generate_uuid() values.update({'share_id': share_id}) share_instance_ref = models.ShareInstance() share_instance_ref.update(values) share_instance_ref.save(session=context.session) return _share_instance_get(context, share_instance_ref['id']) @require_context @require_availability_zone_exists(strict=False) @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) @context_manager.writer def share_instance_update(context, share_instance_id, values, with_share_data=False): instance_ref = _share_instance_update( context, share_instance_id, values, ) if with_share_data: parent_share = _share_get(context, instance_ref['share_id']) instance_ref.set_share_data(parent_share) return instance_ref def _share_instance_update( context, share_instance_id, values, with_share_data=False ): share_instance_ref = _share_instance_get( context, share_instance_id, with_share_data=with_share_data) share_instance_ref.update(values) share_instance_ref.save(session=context.session) return share_instance_ref @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) @context_manager.writer def share_and_snapshot_instances_status_update( context, values, share_instance_ids=None, snapshot_instance_ids=None, current_expected_status=None, ): updated_share_instances = None updated_snapshot_instances = None if current_expected_status and share_instance_ids: filters = {'instance_ids': share_instance_ids} share_instances = _share_instance_get_all(context, filters=filters) all_instances_are_compliant = all( instance['status'] == current_expected_status for instance in share_instances) if not all_instances_are_compliant: msg = _('At least one of the shares is not in the %(status)s ' 'status.') % { 'status': current_expected_status } raise exception.InvalidShareInstance(reason=msg) if current_expected_status and snapshot_instance_ids: filters = {'instance_ids': snapshot_instance_ids} snapshot_instances = _share_snapshot_instance_get_all_with_filters( context, filters, ) all_snap_instances_are_compliant = all( snap_instance['status'] == current_expected_status for snap_instance in snapshot_instances) if not all_snap_instances_are_compliant: msg = _('At least one of the snapshots is not in the ' '%(status)s status.') % { 'status': current_expected_status } raise exception.InvalidShareSnapshotInstance(reason=msg) if share_instance_ids: updated_share_instances = _share_instance_status_update( context, share_instance_ids, values, ) if snapshot_instance_ids: updated_snapshot_instances = _share_snapshot_instances_status_update( context, snapshot_instance_ids, values, ) return updated_share_instances, updated_snapshot_instances @require_context @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) @context_manager.writer def share_instance_status_update(context, share_instance_ids, values): return _share_instance_status_update(context, share_instance_ids, values) def _share_instance_status_update(context, share_instance_ids, values): result = model_query( context, models.ShareInstance, read_deleted="no", ).filter( models.ShareInstance.id.in_(share_instance_ids) ).update( values, synchronize_session=False, ) return result @require_context @context_manager.reader def share_instance_get(context, share_instance_id, with_share_data=False): return _share_instance_get( context, share_instance_id, with_share_data=with_share_data, ) def _share_instance_get(context, share_instance_id, with_share_data=False): result = model_query( context, models.ShareInstance, ).filter_by( id=share_instance_id, ).options( orm.joinedload( models.ShareInstance.export_locations ).joinedload(models.ShareInstanceExportLocations._el_metadata_bare), orm.joinedload(models.ShareInstance.share_type), ).first() if result is None: raise exception.NotFound() if with_share_data: parent_share = _share_get(context, result['share_id']) result.set_share_data(parent_share) return result @require_admin_context @context_manager.reader def share_instance_get_all(context, filters=None): return _share_instance_get_all(context, filters=filters) @require_admin_context def _share_instance_get_all(context, filters=None): query = model_query( context, models.ShareInstance, read_deleted="no", ).options( orm.joinedload(models.ShareInstance.export_locations), ) filters = filters or {} export_location_id = filters.get('export_location_id') export_location_path = filters.get('export_location_path') if export_location_id or export_location_path: query = query.join( models.ShareInstanceExportLocations, models.ShareInstanceExportLocations.share_instance_id == models.ShareInstance.id) if export_location_path: query = query.filter( models.ShareInstanceExportLocations.path == export_location_path) if export_location_id: query = query.filter( models.ShareInstanceExportLocations.uuid == export_location_id) query = query.join( models.Share, models.Share.id == models.ShareInstance.share_id, ) is_soft_deleted = filters.get('is_soft_deleted') if is_soft_deleted: query = query.filter(models.Share.is_soft_deleted == true()) else: query = query.filter(models.Share.is_soft_deleted == false()) instance_ids = filters.get('instance_ids') if instance_ids: query = query.filter(models.ShareInstance.id.in_(instance_ids)) # TODO(gouthamr): This DB API method needs to be generalized for all # share instance fields. host = filters.get('host') if host: query = query.filter( or_(models.ShareInstance.host == host, models.ShareInstance.host.like("{0}#%".format(host))) ) share_server_id = filters.get('share_server_id') if share_server_id: query = query.filter( models.ShareInstance.share_server_id == share_server_id) status = filters.get('status') if status: query = query.filter(models.ShareInstance.status == status) encryption_key_ref = filters.get('encryption_key_ref') if encryption_key_ref: query = query.filter( models.ShareInstance.encryption_key_ref == encryption_key_ref) # Returns list of share instances that satisfy filters. query = query.all() return query @require_context def _update_share_instance_usages(context, share, instance_ref, is_replica=False, deferred_delete=False): deltas = {} # if share is expected to be deferred_deleted, we drop its quotas # whether or not it has additional share instances no_instances_remain = deferred_delete or len(share.instances) == 0 share_usages_to_release = {"shares": -1, "gigabytes": -share['size']} replica_usages_to_release = {"share_replicas": -1, "replica_gigabytes": -share['size']} if is_replica and no_instances_remain: # A share that had a replication_type is being deleted, so there's # need to update the share replica quotas and the share quotas deltas.update(replica_usages_to_release) deltas.update(share_usages_to_release) elif is_replica: # The user is deleting a share replica deltas.update(replica_usages_to_release) else: # A share with no replication_type is being deleted deltas.update(share_usages_to_release) reservations = None try: # we give the user_id of the share, to update # the quota usage for the user, who created the share reservations = QUOTAS.reserve( context, project_id=share['project_id'], user_id=share['user_id'], share_type_id=instance_ref['share_type_id'], **deltas) QUOTAS.commit( context, reservations, project_id=share['project_id'], user_id=share['user_id'], share_type_id=instance_ref['share_type_id']) except Exception: resource_name = ( 'share replica' if is_replica else 'share') resource_id = instance_ref['id'] if is_replica else share['id'] msg = (_("Failed to update usages deleting %(resource_name)s " "'%(id)s'.") % {'id': resource_id, "resource_name": resource_name}) LOG.exception(msg) if reservations: QUOTAS.rollback( context, reservations, share_type_id=instance_ref['share_type_id']) @require_context @context_manager.writer def share_instance_delete(context, instance_id, need_to_update_usages=False): _share_instance_delete( context, instance_id, need_to_update_usages=need_to_update_usages, ) def _share_instance_delete(context, instance_id, need_to_update_usages=False): export_locations_update(context, instance_id, [], delete=True) instance_ref = _share_instance_get(context, instance_id) is_replica = instance_ref['replica_state'] is not None instance_ref.soft_delete(session=context.session, update_status=True) share = _share_get(context, instance_ref['share_id']) if len(share.instances) == 0: # NOTE(zzzeek) currently this potentially is required for current # tempest runs to pass with context_manager.writer.independent.using(context) as oob_session: oob_session.query(models.ShareAccessMapping).filter_by( share_id=share['id'] ).soft_delete() context.session.query(models.ShareMetadata).filter_by( share_id=share['id'], ).soft_delete() share.soft_delete(session=context.session) if need_to_update_usages: _update_share_instance_usages(context, share, instance_ref, is_replica=is_replica, deferred_delete=False) @require_context @context_manager.writer def update_share_instance_quota_usages(context, instance_id): # This method is specifically written for deferred deletion share # instance usage. instance_ref = _share_instance_get(context, instance_id) is_replica = instance_ref['replica_state'] is not None share = _share_get(context, instance_ref['share_id']) _update_share_instance_usages(context, share, instance_ref, is_replica=is_replica, deferred_delete=True) def _set_instances_share_data(instances): instances = instances.options( orm.joinedload(models.ShareInstance.share)).all() instances = [s for s in instances if s.share] for s in instances: s.set_share_data(s.share) return instances @require_admin_context @context_manager.reader def share_instance_get_all_by_host(context, host, with_share_data=False, status=None): """Retrieves all share instances hosted on a host.""" instances = ( model_query(context, models.ShareInstance).filter( or_( models.ShareInstance.host == host, models.ShareInstance.host.like("{0}#%".format(host)) ) ) ) if status is not None: instances = instances.filter(models.ShareInstance.status == status) if with_share_data: instances = _set_instances_share_data(instances) else: # Returns list of all instances that satisfy filters. instances = instances.all() return instances @require_context @context_manager.reader def share_instance_sizes_sum_by_host(context, host): result = model_query( context, models.Share, func.sum(models.Share.size), ).join( models.ShareInstance.share, ).filter(or_( models.ShareInstance.host == host, models.ShareInstance.host.like("{0}#%".format(host)), )).first() return int(result[0] or 0) @require_context @context_manager.reader def share_instance_get_all_by_share_network(context, share_network_id): """Returns list of share instances that belong to given share network.""" result = ( model_query(context, models.ShareInstance).filter( models.ShareInstance.share_network_id == share_network_id, ).all() ) return result @require_context @context_manager.reader def share_instance_get_all_by_share_server(context, share_server_id, with_share_data=False): """Returns list of share instance with given share server.""" result = ( model_query(context, models.ShareInstance).filter( models.ShareInstance.share_server_id == share_server_id, ) ) if with_share_data: result = _set_instances_share_data(result) else: result = result.all() return result @require_context @context_manager.reader def share_instance_get_all_by_share(context, share_id): """Returns list of share instances that belong to given share.""" result = ( model_query(context, models.ShareInstance).filter( models.ShareInstance.share_id == share_id, ).all() ) return result @require_context @context_manager.reader def share_instance_get_all_by_share_group_id(context, share_group_id): """Returns list of share instances that belong to given share group.""" result = ( model_query(context, models.Share).filter( models.Share.share_group_id == share_group_id, ).all() ) instances = [] for share in result: instance = share.instance instance.set_share_data(share) instances.append(instance) return instances ################ def _share_replica_get_with_filters(context, share_id=None, replica_id=None, replica_state=None, status=None, with_share_server=True): query = model_query(context, models.ShareInstance, read_deleted="no") if not context.is_admin: query = query.join( models.Share, models.ShareInstance.share_id == models.Share.id ).filter( models.Share.project_id == context.project_id, ) if share_id is not None: query = query.filter(models.ShareInstance.share_id == share_id) if replica_id is not None: query = query.filter(models.ShareInstance.id == replica_id) if replica_state is not None: query = query.filter( models.ShareInstance.replica_state == replica_state) else: query = query.filter(models.ShareInstance.replica_state.isnot(None)) if status is not None: query = query.filter(models.ShareInstance.status == status) if with_share_server: query = query.options( orm.joinedload(models.ShareInstance.share_server), ) return query @require_context @context_manager.reader def share_replicas_get_all(context, with_share_data=False, with_share_server=True): """Returns replica instances for all available replicated shares.""" result = _share_replica_get_with_filters( context, with_share_server=with_share_server, ) if with_share_data: result = _set_instances_share_data(result) else: result = result.all() return result @require_context @context_manager.reader def share_replicas_get_all_by_share(context, share_id, with_share_data=False, with_share_server=False,): """Returns replica instances for a given share.""" result = _share_replica_get_with_filters( context, with_share_server=with_share_server, share_id=share_id, ) if with_share_data: result = _set_instances_share_data(result) else: result = result.all() return result @require_context @context_manager.reader def share_replicas_get_available_active_replica(context, share_id, with_share_data=False, with_share_server=False): """Returns an 'active' replica instance that is 'available'.""" result = _share_replica_get_with_filters( context, with_share_server=with_share_server, share_id=share_id, replica_state=constants.REPLICA_STATE_ACTIVE, status=constants.STATUS_AVAILABLE, ) if result.first() and with_share_data: result = _set_instances_share_data(result)[0] else: result = result.first() return result @require_context @context_manager.reader def share_replica_get(context, replica_id, with_share_data=False, with_share_server=False): """Returns summary of requested replica if available.""" result = _share_replica_get_with_filters( context, with_share_server=with_share_server, replica_id=replica_id, ) if result.first() and with_share_data: result = _set_instances_share_data(result)[0] else: result = result.first() if result is None: raise exception.ShareReplicaNotFound(replica_id=replica_id) return result @require_context @require_availability_zone_exists(strict=False) @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) @context_manager.writer def share_replica_update(context, share_replica_id, values, with_share_data=False): """Updates a share replica with specified values.""" updated_share_replica = _share_instance_update( context, share_replica_id, values, with_share_data=with_share_data ) return updated_share_replica @require_context @context_manager.writer def share_replica_delete( context, replica_id, need_to_update_usages=True, ): """Deletes a share replica.""" _share_instance_delete( context, replica_id, need_to_update_usages=need_to_update_usages, ) ################ def _process_share_filters(query, filters, project_id=None, is_public=False): if filters is None: filters = {} share_filter_keys = ['share_group_id', 'snapshot_id', 'is_soft_deleted', 'source_backup_id'] instance_filter_keys = ['share_server_id', 'status', 'share_type_id', 'host', 'share_network_id', 'mount_point_name', 'encryption_key_ref'] share_filters = {} instance_filters = {} for k, v in filters.items(): share_filters.update({k: v}) if k in share_filter_keys else None instance_filters.update({k: v}) if k in instance_filter_keys else None no_key = 'key_is_absent' def _filter_data(query, model, desired_filters): for key, value in desired_filters.items(): filter_attr = getattr(model, key, no_key) if filter_attr == no_key: pass query = query.filter(filter_attr == value) return query if share_filters: query = _filter_data(query, models.Share, share_filters) if instance_filters: query = _filter_data(query, models.ShareInstance, instance_filters) if project_id: if is_public: query = query.filter(or_(models.Share.project_id == project_id, models.Share.is_public)) else: query = query.filter(models.Share.project_id == project_id) safe_regex_filter, db_regexp_op = _get_regexp_ops(CONF.database.connection) display_name = filters.get('display_name') if display_name: query = query.filter( models.Share.display_name == display_name) else: display_name = filters.get('display_name~') if display_name: query = query.filter( models.Share.display_name.op(db_regexp_op)( _get_filter_value_by_op( db_regexp_op, display_name, safe_regex_filter))) display_description = filters.get('display_description') if display_description: query = query.filter( models.Share.display_description == display_description) else: display_description = filters.get('display_description~') if display_description: query = query.filter( models.Share.display_description.op(db_regexp_op)( _get_filter_value_by_op( db_regexp_op, display_description, safe_regex_filter))) export_location_id = filters.pop('export_location_id', None) export_location_path = filters.pop('export_location_path', None) if export_location_id or export_location_path: query = query.join( models.ShareInstanceExportLocations, models.ShareInstanceExportLocations.share_instance_id == models.ShareInstance.id, ) if export_location_path: query = query.filter( models.ShareInstanceExportLocations.path == export_location_path) if export_location_id: query = query.filter( models.ShareInstanceExportLocations.uuid == export_location_id) if 'metadata' in filters: for k, v in filters['metadata'].items(): # pylint: disable=no-member query = query.filter( or_(models.Share.share_metadata.any( key=k, value=v))) if 'extra_specs' in filters: query = query.join( models.ShareTypeExtraSpecs, models.ShareTypeExtraSpecs.share_type_id == models.ShareInstance.share_type_id, ) for k, v in filters['extra_specs'].items(): query = query.filter(and_(models.ShareTypeExtraSpecs.key == k, models.ShareTypeExtraSpecs.value == v)) if not filters.get('list_deferred_delete'): query = query.filter(and_( models.ShareInstance.status != ( constants.STATUS_DEFERRED_DELETING), models.ShareInstance.status != ( constants.STATUS_ERROR_DEFERRED_DELETING))) return query def _get_filter_value_by_op(op, filter_value, safe_regex_filter): if op == 'LIKE': return u'%' + filter_value + u'%' else: return safe_regex_filter(filter_value) def _safe_regex_mysql(raw_string): """Make regex safe to mysql. Certain items like '|' are interpreted raw by mysql REGEX. If you search for a single | then you trigger an error because it's expecting content on either side. For consistency sake we escape all '|'. This does mean we wouldn't support something like foo|bar to match completely different things, however, one can argue putting such complicated regex into name search probably means you are doing this wrong. """ return raw_string.replace('|', '\\|') def _get_regexp_ops(connection): """Return safety filter and db opts for regex.""" regexp_op_map = { 'postgresql': '~', 'mysql': 'REGEXP', 'sqlite': 'REGEXP' } regex_safe_filters = { 'mysql': _safe_regex_mysql } db_type = _db_connection_type(connection) return (regex_safe_filters.get(db_type, lambda x: x), regexp_op_map.get(db_type, 'LIKE')) def _db_connection_type(db_connection): """Returns a lowercase symbol for the db type. This is useful when we need to change what we are doing per DB (like handling regexes). In a CellsV2 world it probably needs to do something better than use the database configuration string. """ db_string = db_connection.split(':')[0].split('+')[0] return db_string.lower() def _metadata_refs(metadata_dict, meta_class): metadata_refs = [] if metadata_dict: for k, v in metadata_dict.items(): value = str(v) if isinstance(v, bool) else v metadata_ref = meta_class() metadata_ref['key'] = k metadata_ref['value'] = value metadata_refs.append(metadata_ref) return metadata_refs @require_context @require_availability_zone_exists(strict=False) @context_manager.writer def share_create(context, share_values, create_share_instance=True): values = copy.deepcopy(share_values) values = ensure_model_dict_has_id(values) values['share_metadata'] = _metadata_refs(values.get('metadata'), models.ShareMetadata) share_ref = models.Share() share_instance_values, share_values = _extract_share_instance_values( values) share_ref.update(share_values) share_ref.save(session=context.session) if create_share_instance: _share_instance_create(context, share_ref['id'], share_instance_values) # NOTE(u_glide): Do so to prevent errors with relationships return _share_get(context, share_ref['id']) @require_admin_context def _share_data_get_for_project( context, project_id, user_id, share_type_id=None, ): query = model_query( context, models.Share, func.count(models.Share.id), func.sum(models.Share.size), read_deleted="no", ).filter_by(project_id=project_id) if share_type_id: query = query.join( models.Share.instances, ).filter_by(share_type_id=share_type_id) elif user_id: query = query.filter_by(user_id=user_id) result = query.first() return (result[0] or 0, result[1] or 0) @require_context @require_availability_zone_exists(strict=False) @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) @context_manager.writer def share_update(context, share_id, update_values): return _share_update(context, share_id, update_values) @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) def _share_update(context, share_id, update_values): values = copy.deepcopy(update_values) share_instance_values, share_values = _extract_share_instance_values( values) share_ref = _share_get(context, share_id) _share_instance_update( context, share_ref.instance['id'], share_instance_values, ) share_ref.update(share_values) share_ref.save(session=context.session) return share_ref @require_context @context_manager.reader def share_get(context, share_id, **kwargs): return _share_get(context, share_id, **kwargs) def _share_get(context, share_id, **kwargs): result = model_query( context, models.Share, **kwargs, ).options( orm.joinedload(models.Share.share_metadata), ).filter_by(id=share_id).first() if result is None: raise exception.NotFound() return result def _share_get_all_with_filters(context, project_id=None, share_server_id=None, share_group_id=None, filters=None, is_public=False, sort_key=None, sort_dir=None, show_count=False): """Returns sorted list of shares that satisfies filters. :param context: context to query under :param project_id: project id that owns shares :param share_server_id: share server that hosts shares :param filters: dict of filters to specify share selection :param is_public: public shares from other projects will be added to result if True :param sort_key: key of models.Share to be used for sorting :param sort_dir: desired direction of sorting, can be 'asc' and 'desc' :returns: list -- models.Share :raises: exception.InvalidInput """ if filters is None: filters = {} if not sort_key: sort_key = 'created_at' if not sort_dir: sort_dir = 'desc' query = model_query( context, models.Share, ).options( orm.joinedload(models.Share.share_metadata), ).join( models.ShareInstance, models.ShareInstance.share_id == models.Share.id ) if share_group_id: filters['share_group_id'] = share_group_id if share_server_id: filters['share_server_id'] = share_server_id # if not specified is_soft_deleted filter, default is False, to get # shares not in recycle bin. if 'is_soft_deleted' not in filters: filters['is_soft_deleted'] = False query = _process_share_filters( query, filters, project_id, is_public=is_public) try: query = apply_sorting(models.Share, query, sort_key, sort_dir) except AttributeError: try: query = apply_sorting( models.ShareInstance, query, sort_key, sort_dir) except AttributeError: msg = _("Wrong sorting key provided - '%s'.") % sort_key raise exception.InvalidInput(reason=msg) count = None # NOTE(carloss): Count must be calculated before limit and offset are # applied into the query. if show_count: count = query.order_by(models.Share.id).distinct().count() if 'limit' in filters: offset = filters.get('offset', 0) query = query.limit(filters['limit']).offset(offset) # Returns list of shares that satisfy filters. query = query.all() if show_count: return count, query return query @require_admin_context @context_manager.reader def share_get_all_expired(context): query = model_query( context, models.Share, ).options( orm.joinedload(models.Share.share_metadata), ).join( models.ShareInstance, models.ShareInstance.share_id == models.Share.id, ) filters = {"is_soft_deleted": True} query = _process_share_filters(query, filters=filters) scheduled_deleted_attr = getattr(models.Share, 'scheduled_to_be_deleted_at', None) now_time = timeutils.utcnow() query = query.filter(scheduled_deleted_attr.op('<=')(now_time)) result = query.all() return result @require_admin_context @context_manager.reader def share_get_all(context, filters=None, sort_key=None, sort_dir=None): project_id = filters.pop('project_id', None) if filters else None query = _share_get_all_with_filters( context, project_id=project_id, filters=filters, sort_key=sort_key, sort_dir=sort_dir) return query @require_admin_context @context_manager.reader def share_get_all_with_count(context, filters=None, sort_key=None, sort_dir=None): count, query = _share_get_all_with_filters( context, filters=filters, sort_key=sort_key, sort_dir=sort_dir, show_count=True) return count, query @require_context @context_manager.reader def share_get_all_by_project(context, project_id, filters=None, is_public=False, sort_key=None, sort_dir=None): """Returns list of shares with given project ID.""" query = _share_get_all_with_filters( context, project_id=project_id, filters=filters, is_public=is_public, sort_key=sort_key, sort_dir=sort_dir) return query @require_context @context_manager.reader def share_get_all_by_project_with_count( context, project_id, filters=None, is_public=False, sort_key=None, sort_dir=None): """Returns list of shares with given project ID.""" count, query = _share_get_all_with_filters( context, project_id=project_id, filters=filters, is_public=is_public, sort_key=sort_key, sort_dir=sort_dir, show_count=True) return count, query @require_context @context_manager.reader def share_get_all_by_share_group_id(context, share_group_id, filters=None, sort_key=None, sort_dir=None): """Returns list of shares with given group ID.""" query = _share_get_all_with_filters( context, share_group_id=share_group_id, filters=filters, sort_key=sort_key, sort_dir=sort_dir) return query @require_context @context_manager.reader def share_get_all_by_share_group_id_with_count(context, share_group_id, filters=None, sort_key=None, sort_dir=None): """Returns list of shares with given share group ID.""" count, query = _share_get_all_with_filters( context, share_group_id=share_group_id, filters=filters, sort_key=sort_key, sort_dir=sort_dir, show_count=True) return count, query @require_context @context_manager.reader def share_get_all_by_share_server(context, share_server_id, filters=None, sort_key=None, sort_dir=None): """Returns list of shares with given share server.""" query = _share_get_all_with_filters( context, share_server_id=share_server_id, filters=filters, sort_key=sort_key, sort_dir=sort_dir) return query @require_context @context_manager.reader def share_get_all_soft_deleted( context, share_server_id, filters=None, sort_key=None, sort_dir=None): """Returns list of shares in recycle bin with given share server.""" if filters is None: filters = {} filters["is_soft_deleted"] = True query = _share_get_all_with_filters( context, share_server_id=share_server_id, filters=filters, sort_key=sort_key, sort_dir=sort_dir) return query @require_context @context_manager.reader def share_get_all_by_share_server_with_count( context, share_server_id, filters=None, sort_key=None, sort_dir=None): """Returns list of shares with given share server.""" count, query = _share_get_all_with_filters( context, share_server_id=share_server_id, filters=filters, sort_key=sort_key, sort_dir=sort_dir, show_count=True) return count, query @require_context @context_manager.reader def share_get_all_soft_deleted_by_network( context, share_network_id, filters=None, sort_key=None, sort_dir=None): """Returns list of shares in recycle bin with given share network.""" if filters is None: filters = {} filters["share_network_id"] = share_network_id filters["is_soft_deleted"] = True query = _share_get_all_with_filters(context, filters=filters, sort_key=sort_key, sort_dir=sort_dir) return query @require_context @context_manager.writer def share_delete(context, share_id): share_ref = _share_get(context, share_id) if len(share_ref.instances) > 0: msg = _("Share %(id)s has %(count)s share instances.") % { 'id': share_id, 'count': len(share_ref.instances)} raise exception.InvalidShare(msg) share_ref.soft_delete(session=context.session) context.session.query(models.ShareMetadata).filter_by( share_id=share_id, ).soft_delete() @require_context @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) @context_manager.writer def share_soft_delete(context, share_id): now_time = timeutils.utcnow() time_delta = datetime.timedelta( seconds=CONF.soft_deleted_share_retention_time) scheduled_to_be_deleted_at = now_time + time_delta update_values = { 'is_soft_deleted': True, 'scheduled_to_be_deleted_at': scheduled_to_be_deleted_at } share_ref = _share_get(context, share_id) share_ref.update(update_values) share_ref.save(session=context.session) @require_context @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) @context_manager.writer def share_restore(context, share_id): update_values = { 'is_soft_deleted': False, 'scheduled_to_be_deleted_at': None } share_ref = _share_get(context, share_id) share_ref.update(update_values) share_ref.save(session=context.session) ################### def _transfer_get( context, transfer_id, resource_type='share', read_deleted=False, ): """resource_type can be share or network(TODO network transfer)""" query = model_query( context, models.Transfer, read_deleted=read_deleted, ).filter_by(id=transfer_id) if not is_admin_context(context): if resource_type == 'share': share = models.Share query = query.filter( models.Transfer.resource_id == share.id, share.project_id == context.project_id, ) result = query.first() if not result: raise exception.TransferNotFound(transfer_id=transfer_id) return result @context_manager.reader def transfer_get(context, transfer_id, read_deleted=False): return _transfer_get(context, transfer_id, read_deleted=read_deleted) def _transfer_get_all(context, limit=None, sort_key=None, sort_dir=None, filters=None, offset=None): sort_key = sort_key or 'created_at' sort_dir = sort_dir or 'desc' query = model_query(context, models.Transfer) if filters: legal_filter_keys = ( 'display_name', 'display_name~', 'id', 'resource_type', 'resource_id', 'source_project_id', 'destination_project_id', ) query = exact_filter( query, models.Transfer, filters, legal_filter_keys, ) query = utils.paginate_query( query, models.Transfer, limit, sort_key=sort_key, sort_dir=sort_dir, offset=offset, ) return query.all() @require_admin_context @context_manager.reader def transfer_get_all(context, limit=None, sort_key=None, sort_dir=None, filters=None, offset=None): return _transfer_get_all(context, limit=limit, sort_key=sort_key, sort_dir=sort_dir, filters=filters, offset=offset) @require_context @context_manager.reader def transfer_get_all_by_project(context, project_id, limit=None, sort_key=None, sort_dir=None, filters=None, offset=None): filters = filters.copy() if filters else {} filters['source_project_id'] = project_id return _transfer_get_all(context, limit=limit, sort_key=sort_key, sort_dir=sort_dir, filters=filters, offset=offset) @require_admin_context @context_manager.reader def transfer_get_all_expired(context): query = model_query(context, models.Transfer) expires_at_attr = getattr(models.Transfer, 'expires_at', None) now_time = timeutils.utcnow() query = query.filter(expires_at_attr.op('<=')(now_time)) result = query.all() return result @require_context @handle_db_data_error @context_manager.writer def transfer_create(context, values): if not values.get('id'): values['id'] = uuidutils.generate_uuid() resource_id = values['resource_id'] now_time = timeutils.utcnow() time_delta = datetime.timedelta(seconds=CONF.transfer_retention_time) transfer_timeout = now_time + time_delta values['expires_at'] = transfer_timeout transfer = models.Transfer() transfer.update(values) transfer.save(session=context.session) update = {'status': constants.STATUS_AWAITING_TRANSFER} if values['resource_type'] == 'share': _share_update(context, resource_id, update) return transfer @require_context @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) @context_manager.writer def transfer_destroy(context, transfer_id, update_share_status=True): update = {'status': constants.STATUS_AVAILABLE} transfer = _transfer_get(context, transfer_id) if transfer['resource_type'] == 'share': if update_share_status: _share_update(context, transfer['resource_id'], update) transfer_query = model_query( context, models.Transfer, ).filter_by( id=transfer_id, ) transfer_query.soft_delete() @require_context @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) @context_manager.writer def transfer_accept( context, transfer_id, user_id, project_id, accept_snapshots=False, ): share_id = _transfer_get(context, transfer_id)['resource_id'] update = { 'status': constants.STATUS_AVAILABLE, 'user_id': user_id, 'project_id': project_id, 'updated_at': timeutils.utcnow(), } _share_update(context, share_id, update) # Update snapshots for transfer snapshots with share. if accept_snapshots: snapshots = _share_snapshot_get_all_for_share(context, share_id) for snapshot in snapshots: LOG.debug('Begin to transfer snapshot: %s', snapshot['id']) update = { 'user_id': user_id, 'project_id': project_id, 'updated_at': timeutils.utcnow(), } _share_snapshot_update(context, snapshot['id'], update) query = context.session.query(models.Transfer).filter_by(id=transfer_id) query.update( { 'deleted': True, 'deleted_at': timeutils.utcnow(), 'updated_at': timeutils.utcnow(), 'destination_project_id': project_id, 'accepted': True, } ) @require_context @context_manager.writer def transfer_accept_rollback( context, transfer_id, user_id, project_id, rollback_snap=False, ): share_id = _transfer_get( context, transfer_id, read_deleted=True, )['resource_id'] update = { 'status': constants.STATUS_AWAITING_TRANSFER, 'user_id': user_id, 'project_id': project_id, 'updated_at': timeutils.utcnow(), } _share_update(context, share_id, update) # rollback snapshots for transfer snapshots with share. if rollback_snap: snapshots = _share_snapshot_get_all_for_share(context, share_id) for snapshot in snapshots: LOG.debug('Begin to rollback snapshot: %s', snapshot['id']) update = { 'user_id': user_id, 'project_id': project_id, 'updated_at': timeutils.utcnow(), } _share_snapshot_update(context, snapshot['id'], update) query = context.session.query(models.Transfer).filter_by(id=transfer_id) query.update( { 'deleted': 'False', 'deleted_at': None, 'updated_at': timeutils.utcnow(), 'destination_project_id': None, 'accepted': 0, } ) ################### def _share_access_get_query(context, values, read_deleted='no'): """Get access record.""" query = model_query( context, models.ShareAccessMapping, read_deleted=read_deleted ).options( orm.joinedload(models.ShareAccessMapping.share_access_rules_metadata), ) return query.filter_by(**values) def _share_instance_access_query(context, access_id=None, instance_id=None): filters = {'deleted': 'False'} if access_id is not None: filters.update({'access_id': access_id}) if instance_id is not None: filters.update({'share_instance_id': instance_id}) return model_query( context, models.ShareInstanceAccessMapping, ).filter_by(**filters) def _share_access_metadata_get_item(context, access_id, key): result = _share_access_metadata_get_query( context, access_id, ).filter_by(key=key).first() if not result: raise exception.ShareAccessMetadataNotFound( metadata_key=key, access_id=access_id) return result def _share_access_metadata_get_query(context, access_id): return model_query( context, models.ShareAccessRulesMetadata, read_deleted="no", ).filter_by( access_id=access_id, ).options(orm.joinedload(models.ShareAccessRulesMetadata.access)) @require_context @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) @context_manager.writer def share_access_metadata_update(context, access_id, metadata): # Now update all existing items with new values, or create new meta # objects for meta_key, meta_value in metadata.items(): # update the value whether it exists or not item = {"value": meta_value} try: meta_ref = _share_access_metadata_get_item( context, access_id, meta_key, ) except exception.ShareAccessMetadataNotFound: meta_ref = models.ShareAccessRulesMetadata() item.update({"key": meta_key, "access_id": access_id}) meta_ref.update(item) meta_ref.save(session=context.session) return metadata @require_context @context_manager.writer def share_access_metadata_delete(context, access_id, key): metadata = _share_access_metadata_get_item( context, access_id, key, ) metadata.soft_delete(session=context.session) @require_context @context_manager.writer def share_access_create(context, values): values = ensure_model_dict_has_id(values) values['share_access_rules_metadata'] = _metadata_refs( values.get('metadata'), models.ShareAccessRulesMetadata ) access_ref = models.ShareAccessMapping() access_ref.update(values) access_ref.save(session=context.session) parent_share = _share_get(context, values['share_id']) for instance in parent_share.instances: values = { 'share_instance_id': instance['id'], 'access_id': access_ref['id'], } instance_access_ref = models.ShareInstanceAccessMapping() instance_access_ref.update(ensure_model_dict_has_id(values)) instance_access_ref.save(session=context.session) return _share_access_get(context, access_ref['id']) @require_context @context_manager.writer def share_access_update(context, access_id, values): access_ref = _share_access_get(context, access_id) access_ref.update(values) access_ref.save(session=context.session) return access_ref @require_context @context_manager.writer def share_instance_access_create(context, values, share_instance_id): values = ensure_model_dict_has_id(values) access_list = _share_access_get_query( context, { 'share_id': values['share_id'], 'access_type': values['access_type'], 'access_to': values['access_to'], } ).all() if len(access_list) > 0: access_ref = access_list[0] else: access_ref = models.ShareAccessMapping() access_ref.update(values) access_ref.save(session=context.session) values = { 'share_instance_id': share_instance_id, 'access_id': access_ref['id'], } instance_access_ref = models.ShareInstanceAccessMapping() instance_access_ref.update(ensure_model_dict_has_id(values)) instance_access_ref.save(session=context.session) return _share_access_get(context, access_ref['id']) @require_context @context_manager.writer def share_instance_access_copy(context, share_id, instance_id): """Copy access rules from share to share instance.""" share_access_rules = _share_access_get_query( context, {'share_id': share_id} ).all() for access_rule in share_access_rules: values = { 'share_instance_id': instance_id, 'access_id': access_rule['id'], } instance_access_ref = models.ShareInstanceAccessMapping() instance_access_ref.update(ensure_model_dict_has_id(values)) instance_access_ref.save(session=context.session) return share_access_rules @require_context @context_manager.reader def share_access_get(context, access_id): return _share_access_get(context, access_id) def _share_access_get(context, access_id): """Get access record.""" access = _share_access_get_query(context, {'id': access_id}).first() if access: return access else: raise exception.NotFound() @require_context @context_manager.reader def share_access_get_with_context(context, access_id): """Get access record.""" access = _share_access_get_query( context, {'id': access_id} ).options(orm.joinedload(models.ShareAccessMapping.share)).first() if access: access['project_id'] = access['share']['project_id'] return access else: raise exception.NotFound() @require_context @context_manager.reader def share_instance_access_get(context, access_id, instance_id, with_share_access_data=True): """Get access record.""" access = _share_instance_access_query( context, access_id, instance_id ).first() if access is None: raise exception.NotFound() if with_share_access_data: access = _set_instances_share_access_data(context, access)[0] return access @require_context @context_manager.reader def share_access_get_all_for_share(context, share_id, filters=None): filters = filters or {} share_access_mapping = models.ShareAccessMapping query = _share_access_get_query( context, {'share_id': share_id} ).filter( models.ShareAccessMapping.instance_mappings.any() ) legal_filter_keys = ('id', 'access_type', 'access_key', 'access_to', 'access_level') if 'metadata' in filters: for k, v in filters['metadata'].items(): query = query.filter( or_( models.ShareAccessMapping. share_access_rules_metadata.any(key=k, value=v) ) ) query = exact_filter( query, share_access_mapping, filters, legal_filter_keys) return query.all() @require_context @context_manager.reader def share_access_get_all_for_instance(context, instance_id, filters=None, with_share_access_data=True): """Get all access rules related to a certain share instance.""" filters = copy.deepcopy(filters) if filters else {} filters.update({'share_instance_id': instance_id}) legal_filter_keys = ('id', 'share_instance_id', 'access_id', 'state') query = _share_instance_access_query(context) query = exact_filter( query, models.ShareInstanceAccessMapping, filters, legal_filter_keys) instance_accesses = query.all() if with_share_access_data: instance_accesses = _set_instances_share_access_data( context, instance_accesses ) return instance_accesses def _set_instances_share_access_data(context, instance_accesses): if instance_accesses and not isinstance(instance_accesses, list): instance_accesses = [instance_accesses] for instance_access in instance_accesses: share_access = _share_access_get( context, instance_access['access_id'] ) instance_access.set_share_access_data(share_access) return instance_accesses def _set_instances_snapshot_access_data(context, instance_accesses): if instance_accesses and not isinstance(instance_accesses, list): instance_accesses = [instance_accesses] for instance_access in instance_accesses: snapshot_access = _share_snapshot_access_get( context, instance_access['access_id'] ) instance_access.set_snapshot_access_data(snapshot_access) return instance_accesses @require_context @context_manager.reader def share_access_get_all_by_type_and_access(context, share_id, access_type, access): return _share_access_get_query(context, {'share_id': share_id, 'access_type': access_type, 'access_to': access}).all() @require_context @context_manager.reader def share_access_check_for_existing_access(context, share_id, access_type, access_to): return _check_for_existing_access( context, 'share', share_id, access_type, access_to) def _check_for_existing_access(context, resource, resource_id, access_type, access_to): if resource == 'share': query_method = _share_access_get_query access_to_field = models.ShareAccessMapping.access_to else: query_method = _share_snapshot_access_get_query access_to_field = models.ShareSnapshotAccessMapping.access_to if access_type == 'ip': rules = query_method( context, {'%s_id' % resource: resource_id, 'access_type': access_type} ).filter(access_to_field.startswith(access_to.split('/')[0])).all() matching_rules = [ rule for rule in rules if ipaddress.ip_network(str(access_to)) == ipaddress.ip_network(str(rule['access_to'])) ] return len(matching_rules) > 0 return query_method( context, { '%s_id' % resource: resource_id, 'access_type': access_type, 'access_to': access_to } ).count() > 0 @require_context @context_manager.writer def share_instance_access_delete(context, mapping_id): mapping = context.session.query( models.ShareInstanceAccessMapping ).filter_by(id=mapping_id).first() if not mapping: exception.NotFound() filters = { 'resource_id': mapping['access_id'], 'all_projects': True } locks, _ = resource_lock_get_all( context.elevated(), filters=filters ) if locks: for lock in locks: if lock['resource_action'] == constants.RESOURCE_ACTION_DELETE: lock_reason = ( constants.SHARE_LOCKED_BY_ACCESS_LOCK_REASON % { 'lock_id': lock['id'] } ) share_filters = { 'all_projects': True, 'lock_reason': lock_reason } share_locks, _ = resource_lock_get_all( context.elevated(), filters=share_filters ) or [] for share_lock in share_locks: resource_lock_delete( context.elevated(), share_lock['id'] ) resource_lock_delete( context.elevated(), lock['id'] ) mapping.soft_delete( session=context.session, update_status=True, status_field_name='state' ) other_mappings = _share_instance_access_query( context, mapping['access_id'] ).all() # NOTE(u_glide): Remove access rule if all mappings were removed. if len(other_mappings) == 0: context.session.query(models.ShareAccessRulesMetadata).filter_by( access_id=mapping['access_id'] ).soft_delete() context.session.query(models.ShareAccessMapping).filter_by( id=mapping['access_id'] ).soft_delete() @require_context @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) @context_manager.writer def share_instance_access_update(context, access_id, instance_id, updates): share_access_fields = ('access_type', 'access_to', 'access_key', 'access_level') share_access_map_updates, share_instance_access_map_updates = ( _extract_subdict_by_fields(updates, share_access_fields) ) updated_at = timeutils.utcnow() share_access_map_updates['updated_at'] = updated_at share_instance_access_map_updates['updated_at'] = updated_at access_ref = _share_access_get_query(context, {'id': access_id}).first() access_ref.update(share_access_map_updates) access_ref.save(session=context.session) instance_access_ref = _share_instance_access_query( context, access_id, instance_id).first() instance_access_ref.update(share_instance_access_map_updates) instance_access_ref.save(session=context.session) return instance_access_ref ################### @require_context @context_manager.writer def share_snapshot_instance_create(context, snapshot_id, values): return _share_snapshot_instance_create(context, snapshot_id, values) def _share_snapshot_instance_create(context, snapshot_id, values): values = copy.deepcopy(values) values['share_snapshot_metadata'] = _metadata_refs( values.get('metadata'), models.ShareSnapshotMetadata) _change_size_to_instance_size(values) if not values.get('id'): values['id'] = uuidutils.generate_uuid() values.update({'snapshot_id': snapshot_id}) instance_ref = models.ShareSnapshotInstance() instance_ref.update(values) instance_ref.save(session=context.session) return _share_snapshot_instance_get(context, instance_ref['id']) @require_context @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) @context_manager.writer def share_snapshot_instance_update(context, instance_id, values): instance_ref = _share_snapshot_instance_get(context, instance_id) _change_size_to_instance_size(values) # NOTE(u_glide): Ignore updates to custom properties for extra_key in models.ShareSnapshotInstance._extra_keys: if extra_key in values: values.pop(extra_key) instance_ref.update(values) instance_ref.save(session=context.session) return instance_ref @require_context @context_manager.writer def share_snapshot_instance_delete(context, snapshot_instance_id): snapshot_instance_ref = _share_snapshot_instance_get( context, snapshot_instance_id ) access_rules = _share_snapshot_access_get_all_for_snapshot_instance( context, snapshot_instance_id ) for rule in access_rules: _share_snapshot_instance_access_delete( context, rule['access_id'], snapshot_instance_id, ) for el in snapshot_instance_ref.export_locations: _share_snapshot_instance_export_location_delete(context, el['id']) snapshot_instance_ref.soft_delete( session=context.session, update_status=True) snapshot = _share_snapshot_get( context, snapshot_instance_ref['snapshot_id']) if len(snapshot.instances) == 0: context.session.query(models.ShareSnapshotMetadata).filter_by( share_snapshot_id=snapshot['id'], ).soft_delete() snapshot.soft_delete(session=context.session) @require_context @context_manager.reader def share_snapshot_instance_get(context, snapshot_instance_id, with_share_data=False): return _share_snapshot_instance_get( context, snapshot_instance_id, with_share_data=with_share_data, ) def _share_snapshot_instance_get( context, snapshot_instance_id, with_share_data=False, ): result = _share_snapshot_instance_get_with_filters( context, instance_ids=[snapshot_instance_id], ).first() if result is None: raise exception.ShareSnapshotInstanceNotFound( instance_id=snapshot_instance_id) if with_share_data: result = _set_share_snapshot_instance_data(context, result)[0] return result @require_context @context_manager.reader def share_snapshot_instance_get_all_with_filters( context, search_filters, with_share_data=False, ): """Get snapshot instances filtered by known attrs, ignore unknown attrs. All filters accept list/tuples to filter on, along with simple values. """ return _share_snapshot_instance_get_all_with_filters( context, search_filters, with_share_data=with_share_data, ) def _share_snapshot_instance_get_all_with_filters( context, search_filters, with_share_data=False, ): def listify(values): if values: if not isinstance(values, (list, tuple, set)): return values, else: return values _known_filters = ('instance_ids', 'snapshot_ids', 'share_instance_ids', 'statuses') filters = {k: listify(search_filters.get(k)) for k in _known_filters} result = _share_snapshot_instance_get_with_filters( context, **filters, ).all() if with_share_data: result = _set_share_snapshot_instance_data(context, result) return result def _share_snapshot_instance_get_with_filters(context, instance_ids=None, snapshot_ids=None, statuses=None, share_instance_ids=None): query = model_query(context, models.ShareSnapshotInstance, read_deleted="no") if instance_ids is not None: query = query.filter( models.ShareSnapshotInstance.id.in_(instance_ids)) if snapshot_ids is not None: query = query.filter( models.ShareSnapshotInstance.snapshot_id.in_(snapshot_ids)) if share_instance_ids is not None: query = query.filter(models.ShareSnapshotInstance.share_instance_id .in_(share_instance_ids)) if statuses is not None: query = query.filter(models.ShareSnapshotInstance.status.in_(statuses)) query = query.options( orm.joinedload(models.ShareSnapshotInstance.share_group_snapshot), ) return query def _set_share_snapshot_instance_data(context, snapshot_instances): if snapshot_instances and not isinstance(snapshot_instances, list): snapshot_instances = [snapshot_instances] for snapshot_instance in snapshot_instances: share_instance = _share_instance_get( context, snapshot_instance['share_instance_id'], with_share_data=True) snapshot_instance['share'] = share_instance return snapshot_instances ################### @require_context @context_manager.writer def share_snapshot_create(context, create_values, create_snapshot_instance=True): values = copy.deepcopy(create_values) values = ensure_model_dict_has_id(values) values['share_snapshot_metadata'] = _metadata_refs( values.pop('metadata', {}), models.ShareSnapshotMetadata) snapshot_ref = models.ShareSnapshot() snapshot_instance_values, snapshot_values = ( _extract_snapshot_instance_values(values) ) snapshot_ref.update(snapshot_values) share_ref = _share_get( context, snapshot_values.get('share_id'), ) snapshot_instance_values.update( {'share_instance_id': share_ref.instance.id} ) snapshot_ref.save(session=context.session) if create_snapshot_instance: _share_snapshot_instance_create( context, snapshot_ref['id'], snapshot_instance_values, ) return _share_snapshot_get(context, snapshot_values['id']) @require_admin_context def _snapshot_data_get_for_project( context, project_id, user_id, share_type_id=None, ): query = model_query( context, models.ShareSnapshot, func.count(models.ShareSnapshot.id), func.sum(models.ShareSnapshot.size), read_deleted="no", ).filter_by(project_id=project_id) if share_type_id: query = query.join( models.ShareInstance, models.ShareInstance.share_id == models.ShareSnapshot.share_id, ).filter_by(share_type_id=share_type_id) elif user_id: query = query.filter_by(user_id=user_id) result = query.first() return result[0] or 0, result[1] or 0 @require_context @context_manager.reader def share_snapshot_get(context, snapshot_id, project_only=True): return _share_snapshot_get(context, snapshot_id, project_only=project_only) def _share_snapshot_get(context, snapshot_id, project_only=True): result = model_query( context, models.ShareSnapshot, project_only=project_only, ).filter_by( id=snapshot_id, ).options( orm.joinedload(models.ShareSnapshot.share), orm.joinedload(models.ShareSnapshot.instances), orm.joinedload(models.ShareSnapshot.share_snapshot_metadata), ).first() if not result: raise exception.ShareSnapshotNotFound(snapshot_id=snapshot_id) return result def _share_snapshot_get_all_with_filters(context, project_id=None, share_id=None, filters=None, limit=None, offset=None, sort_key=None, sort_dir=None, show_count=False): """Retrieves all snapshots. If no sorting parameters are specified then returned snapshots are sorted by the 'created_at' key and desc order. :param context: context to query under :param filters: dictionary of filters :param limit: maximum number of items to return :param sort_key: attribute by which results should be sorted,default is created_at :param sort_dir: direction in which results should be sorted :returns: list of matching snapshots """ # Init data sort_key = sort_key or 'created_at' sort_dir = sort_dir or 'desc' filters = copy.deepcopy(filters) if filters else {} query = model_query(context, models.ShareSnapshot) if project_id: query = query.filter_by(project_id=project_id) if share_id: query = query.filter_by(share_id=share_id) query = query.options( orm.joinedload(models.ShareSnapshot.share), orm.joinedload(models.ShareSnapshot.instances), orm.joinedload(models.ShareSnapshot.share_snapshot_metadata), ) # Snapshots with no instances are filtered out. query = query.filter( models.ShareSnapshot.id == models.ShareSnapshotInstance.snapshot_id) # Apply filters if 'usage' in filters: usage_filter_keys = ['any', 'used', 'unused'] if filters['usage'] == 'any': pass elif filters['usage'] == 'used': query = query.filter(models.Share.snapshot_id == ( models.ShareSnapshot.id)) elif filters['usage'] == 'unused': query = query.filter(models.Share.snapshot_id != ( models.ShareSnapshot.id)) else: msg = _("Wrong 'usage' key provided - '%(key)s'. " "Expected keys are '%(ek)s'.") % { 'key': filters['usage'], 'ek': usage_filter_keys} raise exception.InvalidInput(reason=msg) filters.pop('usage') if 'status' in filters: query = query.filter(models.ShareSnapshotInstance.status == ( filters['status'])) filters.pop('status') if 'metadata' in filters: for k, v in filters['metadata'].items(): # pylint: disable=no-member query = query.filter( or_(models.ShareSnapshot.share_snapshot_metadata.any( key=k, value=v))) filters.pop('metadata') legal_filter_keys = ('display_name', 'display_name~', 'display_description', 'display_description~', 'id', 'user_id', 'project_id', 'share_id', 'share_proto', 'size', 'share_size') query = exact_filter(query, models.ShareSnapshot, filters, legal_filter_keys) if not filters.get('list_deferred_delete'): query = query.filter(and_( models.ShareSnapshotInstance.status != ( constants.STATUS_DEFERRED_DELETING), models.ShareSnapshotInstance.status != ( constants.STATUS_ERROR_DEFERRED_DELETING))) query = apply_sorting(models.ShareSnapshot, query, sort_key, sort_dir) count = None if show_count: count = query.order_by(models.ShareSnapshot.id).distinct().count() if limit is not None: query = query.limit(limit) if offset: query = query.offset(offset) # Returns list of share snapshots that satisfy filters query = query.all() if show_count: return count, query return query @require_admin_context @context_manager.reader def share_snapshot_get_all(context, filters=None, limit=None, offset=None, sort_key=None, sort_dir=None): return _share_snapshot_get_all_with_filters( context, filters=filters, limit=limit, offset=offset, sort_key=sort_key, sort_dir=sort_dir) @require_admin_context @context_manager.reader def share_snapshot_get_all_with_count(context, filters=None, limit=None, offset=None, sort_key=None, sort_dir=None): count, query = _share_snapshot_get_all_with_filters( context, filters=filters, limit=limit, offset=offset, sort_key=sort_key, sort_dir=sort_dir, show_count=True) return count, query @require_context @context_manager.reader def share_snapshot_get_all_by_project(context, project_id, filters=None, limit=None, offset=None, sort_key=None, sort_dir=None): authorize_project_context(context, project_id) return _share_snapshot_get_all_with_filters( context, project_id=project_id, filters=filters, limit=limit, offset=offset, sort_key=sort_key, sort_dir=sort_dir) @require_context @context_manager.reader def share_snapshot_get_all_by_project_with_count(context, project_id, filters=None, limit=None, offset=None, sort_key=None, sort_dir=None): authorize_project_context(context, project_id) count, query = _share_snapshot_get_all_with_filters( context, project_id=project_id, filters=filters, limit=limit, offset=offset, sort_key=sort_key, sort_dir=sort_dir, show_count=True) return count, query @require_context @context_manager.reader def share_snapshot_get_all_for_share( context, share_id, filters=None, sort_key=None, sort_dir=None, ): return _share_snapshot_get_all_for_share( context, share_id, filters=None, sort_key=None, sort_dir=None, ) def _share_snapshot_get_all_for_share( context, share_id, filters=None, sort_key=None, sort_dir=None, ): return _share_snapshot_get_all_with_filters( context, share_id=share_id, filters=filters, sort_key=sort_key, sort_dir=sort_dir, ) @require_context @context_manager.reader def share_snapshot_get_latest_for_share(context, share_id): snapshots = _share_snapshot_get_all_with_filters( context, share_id=share_id, sort_key='created_at', sort_dir='desc') return snapshots[0] if snapshots else None @require_context @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) @context_manager.writer def share_snapshot_update(context, snapshot_id, values): return _share_snapshot_update(context, snapshot_id, values) def _share_snapshot_update(context, snapshot_id, values): snapshot_ref = _share_snapshot_get(context, snapshot_id) instance_values, snapshot_values = ( _extract_snapshot_instance_values(values) ) if snapshot_values: snapshot_ref.update(snapshot_values) snapshot_ref.save(session=context.session) if instance_values: snapshot_ref.instance.update(instance_values) snapshot_ref.instance.save(session=context.session) return snapshot_ref @require_context @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) @context_manager.writer def share_snapshot_instances_status_update( context, snapshot_instance_ids, values, ): return _share_snapshot_instances_status_update( context, snapshot_instance_ids, values, ) def _share_snapshot_instances_status_update( context, snapshot_instance_ids, values, ): result = model_query( context, models.ShareSnapshotInstance, read_deleted="no", ).filter( models.ShareSnapshotInstance.id.in_(snapshot_instance_ids) ).update(values, synchronize_session=False) return result ################################### # Share Snapshot Metadata functions ################################### @require_context @require_share_snapshot_exists @context_manager.writer def share_snapshot_metadata_get(context, share_snapshot_id): return _share_snapshot_metadata_get(context, share_snapshot_id) @require_context @require_share_snapshot_exists @context_manager.writer def share_snapshot_metadata_delete(context, share_snapshot_id, key): meta_ref = _share_snapshot_metadata_get_item( context, share_snapshot_id, key) meta_ref.soft_delete(session=context.session) @require_context @require_share_snapshot_exists @context_manager.writer def share_snapshot_metadata_update(context, share_snapshot_id, metadata, delete): return _share_snapshot_metadata_update(context, share_snapshot_id, metadata, delete) @context_manager.writer def share_snapshot_metadata_update_item(context, share_snapshot_id, item): return _share_snapshot_metadata_update(context, share_snapshot_id, item, delete=False) @context_manager.reader def share_snapshot_metadata_get_item(context, share_snapshot_id, key): row = _share_snapshot_metadata_get_item(context, share_snapshot_id, key) result = {} result[row['key']] = row['value'] return result def _share_snapshot_metadata_get_query(context, share_snapshot_id): return model_query( context, models.ShareSnapshotMetadata, read_deleted="no", ).filter_by( share_snapshot_id=share_snapshot_id, ).options(orm.joinedload(models.ShareSnapshotMetadata.share_snapshot)) def _share_snapshot_metadata_get(context, share_snapshot_id): rows = _share_snapshot_metadata_get_query( context, share_snapshot_id, ).all() result = {} for row in rows: result[row['key']] = row['value'] return result def _share_snapshot_metadata_get_item(context, share_snapshot_id, key): result = _share_snapshot_metadata_get_query( context, share_snapshot_id).filter_by( key=key).first() if not result: raise exception.MetadataItemNotFound return result @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) def _share_snapshot_metadata_update(context, share_snapshot_id, metadata, delete): delete = strutils.bool_from_string(delete) if delete: original_metadata = _share_snapshot_metadata_get( context, share_snapshot_id) for meta_key, meta_value in original_metadata.items(): if meta_key not in metadata: meta_ref = _share_snapshot_metadata_get_item( context, share_snapshot_id, meta_key, ) meta_ref.soft_delete(session=context.session) # Now update all existing items with new values, or create new meta # objects meta_ref = None for meta_key, meta_value in metadata.items(): # update the value whether it exists or not item = {"value": meta_value} meta_ref = _share_snapshot_metadata_get_query( context, share_snapshot_id, ).filter_by(key=meta_key).first() if not meta_ref: meta_ref = models.ShareSnapshotMetadata() item.update({"key": meta_key, "share_snapshot_id": share_snapshot_id}) meta_ref.update(item) meta_ref.save(session=context.session) return metadata ################################# @require_context @context_manager.writer def share_snapshot_access_create(context, values): values = ensure_model_dict_has_id(values) access_ref = models.ShareSnapshotAccessMapping() access_ref.update(values) access_ref.save(session=context.session) snapshot = _share_snapshot_get(context, values['share_snapshot_id']) for instance in snapshot.instances: values = { 'share_snapshot_instance_id': instance['id'], 'access_id': access_ref['id'], } instance_access_ref = models.ShareSnapshotInstanceAccessMapping() instance_access_ref.update(ensure_model_dict_has_id(values)) instance_access_ref.save(session=context.session) return _share_snapshot_access_get(context, access_ref['id']) def _share_snapshot_access_get_query(context, filters, read_deleted='no'): query = model_query( context, models.ShareSnapshotAccessMapping, read_deleted=read_deleted ) return query.filter_by(**filters) def _share_snapshot_instance_access_get_query( context, access_id=None, share_snapshot_instance_id=None, ): filters = {'deleted': 'False'} if access_id is not None: filters.update({'access_id': access_id}) if share_snapshot_instance_id is not None: filters.update( {'share_snapshot_instance_id': share_snapshot_instance_id} ) return model_query( context, models.ShareSnapshotInstanceAccessMapping ).filter_by(**filters) @require_context @context_manager.reader def share_snapshot_instance_access_get_all(context, access_id): return _share_snapshot_instance_access_get_all(context, access_id) def _share_snapshot_instance_access_get_all(context, access_id): rules = _share_snapshot_instance_access_get_query( context, access_id=access_id).all() return rules @require_context @context_manager.reader def share_snapshot_access_get(context, access_id): return _share_snapshot_access_get(context, access_id) def _share_snapshot_access_get(context, access_id): access = _share_snapshot_access_get_query( context, {'id': access_id}, ).first() if access: return access else: raise exception.NotFound() @require_context @context_manager.reader def share_snapshot_access_get_all_for_share_snapshot(context, share_snapshot_id, filters): filters['share_snapshot_id'] = share_snapshot_id access_list = _share_snapshot_access_get_query( context, filters ).all() return access_list @require_context @context_manager.reader def share_snapshot_check_for_existing_access(context, share_snapshot_id, access_type, access_to): return _check_for_existing_access( context, 'share_snapshot', share_snapshot_id, access_type, access_to) @require_context @context_manager.reader def share_snapshot_access_get_all_for_snapshot_instance( context, snapshot_instance_id, filters=None, with_snapshot_access_data=True, ): return _share_snapshot_access_get_all_for_snapshot_instance( context, snapshot_instance_id, filters=filters, with_snapshot_access_data=with_snapshot_access_data, ) def _share_snapshot_access_get_all_for_snapshot_instance( context, snapshot_instance_id, filters=None, with_snapshot_access_data=True, ): """Get all access rules related to a certain snapshot instance.""" filters = copy.deepcopy(filters) if filters else {} filters.update({'share_snapshot_instance_id': snapshot_instance_id}) query = _share_snapshot_instance_access_get_query(context) legal_filter_keys = ( 'id', 'share_snapshot_instance_id', 'access_id', 'state') query = exact_filter( query, models.ShareSnapshotInstanceAccessMapping, filters, legal_filter_keys) instance_accesses = query.all() if with_snapshot_access_data: instance_accesses = _set_instances_snapshot_access_data( context, instance_accesses ) return instance_accesses @require_context @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) @context_manager.writer def share_snapshot_instance_access_update( context, access_id, instance_id, updates ): snapshot_access_fields = ('access_type', 'access_to') snapshot_access_map_updates, share_instance_access_map_updates = ( _extract_subdict_by_fields(updates, snapshot_access_fields) ) updated_at = timeutils.utcnow() snapshot_access_map_updates['updated_at'] = updated_at share_instance_access_map_updates['updated_at'] = updated_at snapshot_access = _share_snapshot_access_get_query( context, {'id': access_id}).first() if not snapshot_access: raise exception.NotFound() snapshot_access.update(snapshot_access_map_updates) snapshot_access.save(session=context.session) access = _share_snapshot_instance_access_get_query( context, access_id=access_id, share_snapshot_instance_id=instance_id).first() if not access: raise exception.NotFound() access.update(share_instance_access_map_updates) access.save(session=context.session) return access @require_context @context_manager.writer def share_snapshot_instance_access_get( context, access_id, share_snapshot_instance_id, with_snapshot_access_data=True ): access = _share_snapshot_instance_access_get_query( context, access_id=access_id, share_snapshot_instance_id=share_snapshot_instance_id ).first() if access is None: raise exception.NotFound() if with_snapshot_access_data: return _set_instances_snapshot_access_data(context, access)[0] else: return access @require_context @context_manager.writer def share_snapshot_instance_access_delete( context, access_id, snapshot_instance_id ): return _share_snapshot_instance_access_delete( context, access_id, snapshot_instance_id ) def _share_snapshot_instance_access_delete( context, access_id, snapshot_instance_id ): rule = _share_snapshot_instance_access_get_query( context, access_id=access_id, share_snapshot_instance_id=snapshot_instance_id).first() if not rule: exception.NotFound() rule.soft_delete( session=context.session, update_status=True, status_field_name='state') other_mappings = _share_snapshot_instance_access_get_all( context, rule['access_id']) if len(other_mappings) == 0: context.session.query( models.ShareSnapshotAccessMapping ).filter_by( id=rule['access_id'] ).soft_delete( update_status=True, status_field_name='state' ) @require_context @context_manager.writer def share_snapshot_instance_export_location_create(context, values): values = ensure_model_dict_has_id(values) ssiel = models.ShareSnapshotInstanceExportLocation() ssiel.update(values) ssiel.save(session=context.session) return ssiel def _share_snapshot_instance_export_locations_get_query(context, values): query = model_query(context, models.ShareSnapshotInstanceExportLocation) return query.filter_by(**values) @require_context @context_manager.reader def share_snapshot_export_locations_get(context, snapshot_id): snapshot = _share_snapshot_get(context, snapshot_id) ins_ids = [ins['id'] for ins in snapshot.instances] export_locations = _share_snapshot_instance_export_locations_get_query( context, {}).filter( models.ShareSnapshotInstanceExportLocation. share_snapshot_instance_id.in_(ins_ids)).all() return export_locations @require_context @context_manager.reader def share_snapshot_instance_export_locations_get_all( context, share_snapshot_instance_id, ): return _share_snapshot_instance_export_locations_get_all( context, share_snapshot_instance_id, ) def _share_snapshot_instance_export_locations_get_all( context, share_snapshot_instance_id, ): export_locations = _share_snapshot_instance_export_locations_get_query( context, {'share_snapshot_instance_id': share_snapshot_instance_id}, ).all() return export_locations @require_context @context_manager.reader def share_snapshot_instance_export_location_get(context, el_id): export_location = _share_snapshot_instance_export_locations_get_query( context, {'id': el_id}, ).first() if export_location: return export_location else: raise exception.NotFound() @require_context @context_manager.writer def share_snapshot_instance_export_location_delete(context, el_id): return _share_snapshot_instance_export_location_delete(context, el_id) def _share_snapshot_instance_export_location_delete(context, el_id): el = _share_snapshot_instance_export_locations_get_query( context, {'id': el_id}).first() if not el: exception.NotFound() el.soft_delete(session=context.session) @require_context @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) @context_manager.writer def share_snapshot_instance_export_locations_update( context, share_snapshot_instance_id, export_locations, delete, ): # NOTE(dviroel): Lets keep this backward compatibility for driver that # may still return export_locations as string if not isinstance(export_locations, (list, tuple, set)): export_locations = (export_locations, ) export_locations_as_dicts = [] for el in export_locations: export_location = el if isinstance(el, str): export_location = { "path": el, "is_admin_only": False, } elif not isinstance(export_location, dict): raise exception.ManilaException( _("Wrong export location type '%s'.") % type(export_location)) export_locations_as_dicts.append(export_location) export_locations = export_locations_as_dicts export_locations_paths = [el['path'] for el in export_locations] current_el_rows = _share_snapshot_instance_export_locations_get_all( context, share_snapshot_instance_id, ) def get_path_list_from_rows(rows): return set([row['path'] for row in rows]) current_el_paths = get_path_list_from_rows(current_el_rows) def create_indexed_time_dict(key_list): base = timeutils.utcnow() return { # NOTE(u_glide): Incrementing timestamp by microseconds to make # timestamp order match index order. key: base + datetime.timedelta(microseconds=index) for index, key in enumerate(key_list) } indexed_update_time = create_indexed_time_dict(export_locations_paths) for el in current_el_rows: if delete and el['path'] not in export_locations_paths: el.soft_delete(session=context.session) else: updated_at = indexed_update_time[el['path']] el.update({ 'updated_at': updated_at, }) el.save(session=context.session) # Now add new export locations for el in export_locations: if el['path'] in current_el_paths: # Already updated continue location_ref = models.ShareSnapshotInstanceExportLocation() location_ref.update({ 'id': uuidutils.generate_uuid(), 'path': el['path'], 'share_snapshot_instance_id': share_snapshot_instance_id, 'updated_at': indexed_update_time[el['path']], 'is_admin_only': el.get('is_admin_only', False), }) location_ref.save(session=context.session) return get_path_list_from_rows( _share_snapshot_instance_export_locations_get_all( context, share_snapshot_instance_id, ) ) ################################# def _share_metadata_get_query(context, share_id): return model_query( context, models.ShareMetadata, read_deleted="no", ).filter_by( share_id=share_id, ).options(orm.joinedload(models.ShareMetadata.share)) @require_context @require_share_exists @context_manager.reader def share_metadata_get(context, share_id): return _share_metadata_get(context, share_id) def _share_metadata_get(context, share_id): rows = _share_metadata_get_query(context, share_id).all() result = {} for row in rows: result[row['key']] = row['value'] return result @require_context @require_share_exists @context_manager.reader def share_metadata_get_item(context, share_id, key): try: row = _share_metadata_get_item(context, share_id, key) except exception.MetadataItemNotFound: raise exception.MetadataItemNotFound() result = {} result[row['key']] = row['value'] return result @require_context @require_share_exists @context_manager.writer def share_metadata_delete(context, share_id, key): _share_metadata_get_query( context, share_id, ).filter_by(key=key).soft_delete() @require_context @require_share_exists @context_manager.writer def share_metadata_update(context, share_id, metadata, delete): return _share_metadata_update(context, share_id, metadata, delete) @require_context @require_share_exists @context_manager.writer def share_metadata_update_item(context, share_id, item): return _share_metadata_update(context, share_id, item, delete=False) @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) def _share_metadata_update(context, share_id, metadata, delete): # Set existing metadata to deleted if delete argument is True delete = strutils.bool_from_string(delete) if delete: original_metadata = _share_metadata_get(context, share_id) for meta_key, meta_value in original_metadata.items(): if meta_key not in metadata: meta_ref = _share_metadata_get_item( context, share_id, meta_key, ) meta_ref.soft_delete(session=context.session) meta_ref = None # Now update all existing items with new values, or create new meta # objects for meta_key, meta_value in metadata.items(): # update the value whether it exists or not item = {"value": meta_value} try: meta_ref = _share_metadata_get_item( context, share_id, meta_key, ) except exception.MetadataItemNotFound: meta_ref = models.ShareMetadata() item.update({"key": meta_key, "share_id": share_id}) meta_ref.update(item) meta_ref.save(session=context.session) return metadata def _share_metadata_get_item(context, share_id, key): result = _share_metadata_get_query( context, share_id, ).filter_by(key=key).first() if not result: raise exception.MetadataItemNotFound() return result ############################ # Export locations functions ############################ def _export_location_get_all( context, share_instance_ids, include_admin_only=True, ignore_secondary_replicas=False, ): if not isinstance(share_instance_ids, (set, list, tuple)): share_instance_ids = (share_instance_ids, ) query = model_query( context, models.ShareInstanceExportLocations, read_deleted="no", ).filter( models.ShareInstanceExportLocations.share_instance_id.in_( share_instance_ids), ).order_by( "updated_at", ).options( orm.joinedload(models.ShareInstanceExportLocations._el_metadata_bare), ) if not include_admin_only: query = query.filter_by(is_admin_only=False) if ignore_secondary_replicas: replica_state_attr = models.ShareInstance.replica_state query = query.join( models.ShareInstanceExportLocations.share_instance, ).filter( or_(replica_state_attr == None, # noqa replica_state_attr == constants.REPLICA_STATE_ACTIVE)) return query.all() @require_context @require_share_exists @context_manager.reader def export_location_get_all_by_share_id( context, share_id, include_admin_only=True, ignore_migration_destination=False, ignore_secondary_replicas=False, ): share = _share_get(context, share_id) if ignore_migration_destination: ids = [instance.id for instance in share.instances if instance['status'] != constants.STATUS_MIGRATING_TO] else: ids = [instance.id for instance in share.instances] rows = _export_location_get_all( context, ids, include_admin_only=include_admin_only, ignore_secondary_replicas=ignore_secondary_replicas) return rows @require_context @require_share_instance_exists @context_manager.reader def export_location_get_all_by_share_instance_id( context, share_instance_id, include_admin_only=True, ): rows = _export_location_get_all( context, [share_instance_id], include_admin_only=include_admin_only) return rows @require_context @require_share_exists @context_manager.reader def export_location_get_all(context, share_id): # NOTE(vponomaryov): this method is kept for compatibility with # old approach. New one uses 'export_location_get_all_by_share_id'. # Which returns list of dicts instead of list of strings, as this one does. share = _share_get(context, share_id) rows = _export_location_get_all( context, share.instance.id, context.is_admin) return [location['path'] for location in rows] @require_context @context_manager.reader def export_location_get_by_uuid( context, export_location_uuid, ignore_secondary_replicas=False, ): return _export_location_get_by_uuid( context, export_location_uuid, ignore_secondary_replicas=ignore_secondary_replicas, ) def _export_location_get_by_uuid( context, export_location_uuid, ignore_secondary_replicas=False, ): query = model_query( context, models.ShareInstanceExportLocations, read_deleted="no", ).filter_by( uuid=export_location_uuid, ).options( orm.joinedload(models.ShareInstanceExportLocations._el_metadata_bare), ) if ignore_secondary_replicas: replica_state_attr = models.ShareInstance.replica_state query = query.join( models.ShareInstanceExportLocations.share_instance, ).filter( or_( replica_state_attr == None, # noqa replica_state_attr == constants.REPLICA_STATE_ACTIVE, ) ) result = query.first() if not result: raise exception.ExportLocationNotFound(uuid=export_location_uuid) return result @require_context @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) @context_manager.writer def export_locations_update( context, share_instance_id, export_locations, delete, ): return _export_locations_update( context, share_instance_id, export_locations, delete, ) def _export_locations_update( context, share_instance_id, export_locations, delete, ): # NOTE(u_glide): # Backward compatibility code for drivers, # which return single export_location as string if not isinstance(export_locations, (list, tuple, set)): export_locations = (export_locations, ) export_locations_as_dicts = [] for el in export_locations: # NOTE(vponomaryov): transform old export locations view to new one export_location = el if isinstance(el, str): export_location = { "path": el, "is_admin_only": False, "metadata": {}, } elif isinstance(export_location, dict): if 'metadata' not in export_location: export_location['metadata'] = {} else: raise exception.ManilaException( _("Wrong export location type '%s'.") % type(export_location)) export_locations_as_dicts.append(export_location) export_locations = export_locations_as_dicts export_locations_paths = [el['path'] for el in export_locations] current_el_rows = _export_location_get_all(context, share_instance_id) def get_path_list_from_rows(rows): return set([row['path'] for row in rows]) current_el_paths = get_path_list_from_rows(current_el_rows) def create_indexed_time_dict(key_list): base = timeutils.utcnow() return { # NOTE(u_glide): Incrementing timestamp by microseconds to make # timestamp order match index order. key: base + datetime.timedelta(microseconds=index) for index, key in enumerate(key_list) } indexed_update_time = create_indexed_time_dict(export_locations_paths) for el in current_el_rows: if delete and el['path'] not in export_locations_paths: _export_location_metadata_delete(context, el['uuid']) el.soft_delete(session=context.session) else: updated_at = indexed_update_time[el['path']] el.update({ 'updated_at': updated_at, 'deleted': 0, }) el.save(session=context.session) new_export_metadata = next( exl.get('metadata', {}) for exl in export_locations if exl['path'] == el['path'] ) new_export_metadata = new_export_metadata or el['el_metadata'] if new_export_metadata: _export_location_metadata_update( context, el['uuid'], new_export_metadata, ) # Now add new export locations for el in export_locations: if el['path'] in current_el_paths: # Already updated continue location_ref = models.ShareInstanceExportLocations() location_ref.update({ 'uuid': uuidutils.generate_uuid(), 'path': el['path'], 'share_instance_id': share_instance_id, 'updated_at': indexed_update_time[el['path']], 'deleted': 0, 'is_admin_only': el.get('is_admin_only', False), }) location_ref.save(session=context.session) if not el.get('metadata'): continue _export_location_metadata_update( context, location_ref['uuid'], el.get('metadata'), ) return get_path_list_from_rows( _export_location_get_all(context, share_instance_id) ) ##################################### # Export locations metadata functions ##################################### def _export_location_metadata_get_query(context, export_location_uuid): export_location_id = _export_location_get_by_uuid( context, export_location_uuid).id return model_query( context, models.ShareInstanceExportLocationsMetadata, read_deleted="no", ).filter_by( export_location_id=export_location_id, ) @require_context @context_manager.reader def export_location_metadata_get(context, export_location_uuid): return _export_location_metadata_get(context, export_location_uuid) def _export_location_metadata_get(context, export_location_uuid): rows = _export_location_metadata_get_query( context, export_location_uuid, ).all() result = {} for row in rows: result[row["key"]] = row["value"] return result @require_context @context_manager.writer def export_location_metadata_delete(context, export_location_uuid, keys=None): return _export_location_metadata_delete( context, export_location_uuid, keys=keys, ) def _export_location_metadata_delete(context, export_location_uuid, keys=None): metadata = _export_location_metadata_get_query( context, export_location_uuid, ) # NOTE(vponomaryov): if keys is None then we delete all metadata. if keys is not None: keys = keys if isinstance(keys, (list, set, tuple)) else (keys, ) metadata = metadata.filter( models.ShareInstanceExportLocationsMetadata.key.in_(keys), ) metadata = metadata.all() for meta_ref in metadata: meta_ref.soft_delete(session=context.session) @require_context @context_manager.writer def export_location_metadata_update( context, export_location_uuid, metadata, delete=False, ): return _export_location_metadata_update( context, export_location_uuid, metadata, delete=delete, ) @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) def _export_location_metadata_update( context, export_location_uuid, metadata, delete=False, ): if delete: original_metadata = _export_location_metadata_get( context, export_location_uuid, ) keys_for_deletion = set(original_metadata).difference(metadata) if keys_for_deletion: _export_location_metadata_delete( context, export_location_uuid, keys=keys_for_deletion, ) el = _export_location_get_by_uuid(context, export_location_uuid) for meta_key, meta_value in metadata.items(): # NOTE(vponomaryov): we should use separate session # for each meta_ref because of autoincrement of integer primary key # that will not take effect using one session and we will rewrite, # in that case, single record - first one added with this call. context.session.commit() context.session.begin() if meta_value is None: LOG.warning("%s should be properly defined in the driver.", meta_key) item = {"value": meta_value, "updated_at": timeutils.utcnow()} meta_ref = _export_location_metadata_get_query( context, export_location_uuid, ).filter_by( key=meta_key, ).first() if not meta_ref: meta_ref = models.ShareInstanceExportLocationsMetadata() item.update({ "key": meta_key, "export_location_id": el.id, }) meta_ref.update(item) meta_ref.save(session=context.session) return metadata @require_context @context_manager.reader def export_location_metadata_get_item(context, export_location_uuid, key): row = _export_location_metadata_get_item( context, export_location_uuid, key) result = {row['key']: row['value']} return result @require_context @context_manager.writer def export_location_metadata_update_item(context, export_location_uuid, item): return _export_location_metadata_update(context, export_location_uuid, item, delete=False) def _export_location_metadata_get_item(context, export_location_uuid, key): result = _export_location_metadata_get_query( context, export_location_uuid, ).filter_by(key=key).first() if not result: raise exception.MetadataItemNotFound() return result ################################### def _security_service_get_query(context, project_only=False): return model_query( context, models.SecurityService, project_only=project_only, ) @require_context @context_manager.writer def security_service_create(context, values): values = ensure_model_dict_has_id(values) security_service_ref = models.SecurityService() security_service_ref.update(values) security_service_ref.save(session=context.session) return security_service_ref @require_context @context_manager.writer def security_service_delete(context, id): security_service_ref = _security_service_get(context, id) security_service_ref.soft_delete(session=context.session) @require_context @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) @context_manager.writer def security_service_update(context, id, values): security_service_ref = _security_service_get(context, id) security_service_ref.update(values) security_service_ref.save(session=context.session) return security_service_ref @require_context @context_manager.reader def security_service_get(context, id, **kwargs): return _security_service_get(context, id, **kwargs) @require_context def _security_service_get(context, id, **kwargs): result = _security_service_get_query( context, **kwargs, ).filter_by(id=id).first() if result is None: raise exception.SecurityServiceNotFound(security_service_id=id) return result @require_context @context_manager.reader def security_service_get_all(context): return _security_service_get_query(context).all() @require_context @context_manager.reader def security_service_get_all_by_project(context, project_id): return _security_service_get_query(context).filter_by( project_id=project_id, ).all() @require_context @context_manager.reader def security_service_get_all_by_share_network(context, share_network_id): return model_query( context, models.SecurityService, ).join( models.ShareNetworkSecurityServiceAssociation, models.SecurityService.id == models.ShareNetworkSecurityServiceAssociation.security_service_id, ).filter_by( share_network_id=share_network_id, deleted=0, ).all() ################### def _share_network_get_query(context): return model_query( context, models.ShareNetwork, project_only=True, ).options( orm.joinedload(models.ShareNetwork.share_instances), orm.joinedload(models.ShareNetwork.security_services), orm.subqueryload(models.ShareNetwork.share_network_subnets), ) @require_context @context_manager.writer def share_network_create(context, values): values = ensure_model_dict_has_id(values) network_ref = models.ShareNetwork() network_ref.update(values) network_ref.save(session=context.session) return _share_network_get(context, values['id']) @require_context @context_manager.writer def share_network_delete(context, id): network_ref = _share_network_get(context, id) for subnet in network_ref['share_network_subnets']: share_network_subnet_delete(context, subnet['id']) network_ref.soft_delete(session=context.session) @require_context @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) @context_manager.writer def share_network_update(context, id, values): network_ref = _share_network_get(context, id) network_ref.update(values) network_ref.save(session=context.session) return network_ref @require_context @context_manager.reader def share_network_get(context, id): return _share_network_get(context, id) @require_context def _share_network_get(context, id): result = _share_network_get_query(context).filter_by(id=id).first() if result is None: raise exception.ShareNetworkNotFound(share_network_id=id) return result @require_context @context_manager.reader def share_network_get_all_by_filter(context, filters=None): query = _share_network_get_query(context) legal_filter_keys = ('project_id', 'created_since', 'created_before') if not filters: filters = {} query = exact_filter( query, models.ShareNetwork, filters, legal_filter_keys, ) if 'security_service_id' in filters: security_service_id = filters.get('security_service_id') query = query.join( models.ShareNetworkSecurityServiceAssociation, models.ShareNetwork.id == models.ShareNetworkSecurityServiceAssociation.share_network_id, # noqa: E501 ).filter_by( security_service_id=security_service_id, deleted=0, ) return query.all() @require_context @context_manager.reader def share_network_get_all(context): return _share_network_get_query(context).all() @require_context @context_manager.reader def share_network_get_all_by_project(context, project_id): return _share_network_get_query( context, ).filter_by(project_id=project_id).all() @require_context @context_manager.reader def share_network_get_all_by_security_service(context, security_service_id): return model_query( context, models.ShareNetwork, ).join( models.ShareNetworkSecurityServiceAssociation, models.ShareNetwork.id == models.ShareNetworkSecurityServiceAssociation.share_network_id, ).filter_by(security_service_id=security_service_id, deleted=0).all() @require_context @context_manager.writer def share_network_add_security_service(context, id, security_service_id): assoc_ref = model_query( context, models.ShareNetworkSecurityServiceAssociation, ).filter_by( share_network_id=id, ).filter_by(security_service_id=security_service_id).first() if assoc_ref: msg = "Already associated" raise exception.ShareNetworkSecurityServiceAssociationError( share_network_id=id, security_service_id=security_service_id, reason=msg, ) share_nw_ref = _share_network_get(context, id) security_service_ref = _security_service_get(context, security_service_id) share_nw_ref.security_services += [security_service_ref] share_nw_ref.save(session=context.session) return share_nw_ref @require_context @context_manager.reader def share_network_security_service_association_get( context, share_network_id, security_service_id, ): association = model_query( context, models.ShareNetworkSecurityServiceAssociation, ).filter_by( share_network_id=share_network_id, ).filter_by( security_service_id=security_service_id, ).first() return association @require_context @context_manager.writer def share_network_remove_security_service(context, id, security_service_id): share_nw_ref = _share_network_get(context, id) _security_service_get(context, security_service_id) assoc_ref = model_query( context, models.ShareNetworkSecurityServiceAssociation, ).filter_by( share_network_id=id, ).filter_by(security_service_id=security_service_id).first() if assoc_ref: assoc_ref.soft_delete(session=context.session) else: msg = "No association defined" raise exception.ShareNetworkSecurityServiceDissociationError( share_network_id=id, security_service_id=security_service_id, reason=msg, ) return share_nw_ref @require_context @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) @context_manager.writer def share_network_update_security_service( context, id, current_security_service_id, new_security_service_id, ): share_nw_ref = _share_network_get(context, id) # Check if the old security service exists _security_service_get(context, current_security_service_id) new_security_service_ref = _security_service_get( context, new_security_service_id, ) assoc_ref = model_query( context, models.ShareNetworkSecurityServiceAssociation, ).filter_by( share_network_id=id, ).filter_by( security_service_id=current_security_service_id, ).first() if assoc_ref: assoc_ref.soft_delete(session=context.session) else: msg = "No association defined" raise exception.ShareNetworkSecurityServiceDissociationError( share_network_id=id, security_service_id=current_security_service_id, reason=msg) # Add new association share_nw_ref.security_services += [new_security_service_ref] share_nw_ref.save(session=context.session) return share_nw_ref @require_context def _count_share_networks( context, project_id, user_id=None, share_type_id=None, ): query = model_query( context, models.ShareNetwork, func.count(models.ShareNetwork.id), read_deleted="no", ).filter_by(project_id=project_id) if share_type_id: query = query.join( models.ShareNetwork.share_instances, ).filter_by(share_type_id=share_type_id) elif user_id is not None: query = query.filter_by(user_id=user_id) return query.first()[0] ################### @require_context def _share_network_subnet_get_query(context): return model_query( context, models.ShareNetworkSubnet, ).options( orm.joinedload(models.ShareNetworkSubnet.share_servers), orm.joinedload(models.ShareNetworkSubnet.share_network), orm.joinedload( models.ShareNetworkSubnet.share_network_subnet_metadata ), ) @require_context @context_manager.writer def share_network_subnet_create(context, values): values = ensure_model_dict_has_id(values) values['share_network_subnet_metadata'] = _metadata_refs( values.pop('metadata', {}), models.ShareNetworkSubnetMetadata) network_subnet_ref = models.ShareNetworkSubnet() network_subnet_ref.update(values) network_subnet_ref.save(session=context.session) return _share_network_subnet_get( context, network_subnet_ref['id'], ) @require_context @context_manager.writer def share_network_subnet_delete(context, network_subnet_id): network_subnet_ref = _share_network_subnet_get(context, network_subnet_id) context.session.query(models.ShareNetworkSubnetMetadata).filter_by( share_network_subnet_id=network_subnet_id, ).soft_delete() network_subnet_ref.soft_delete(session=context.session, update_status=True) @require_context @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) @context_manager.writer def share_network_subnet_update(context, network_subnet_id, values): network_subnet_ref = _share_network_subnet_get(context, network_subnet_id) network_subnet_ref.update(values) network_subnet_ref.save(session=context.session) return network_subnet_ref @require_context @context_manager.reader def share_network_subnet_get(context, network_subnet_id, parent_id=None): return _share_network_subnet_get( context, network_subnet_id, parent_id=parent_id, ) @require_context def _share_network_subnet_get(context, network_subnet_id, parent_id=None): kwargs = {'id': network_subnet_id} if parent_id: kwargs['share_network_id'] = parent_id result = _share_network_subnet_get_query( context, ).filter_by(**kwargs).first() if result is None: raise exception.ShareNetworkSubnetNotFound( share_network_subnet_id=network_subnet_id, ) return result @require_context @context_manager.reader def share_network_subnet_get_all_with_same_az(context, network_subnet_id): subnet = _share_network_subnet_get_query( context, ).filter_by(id=network_subnet_id).subquery() result = _share_network_subnet_get_query( context, ).join( subnet, subnet.c.share_network_id == models.ShareNetworkSubnet.share_network_id, ).filter( func.coalesce(subnet.c.availability_zone_id, '0') == func.coalesce(models.ShareNetworkSubnet.availability_zone_id, '0') ).all() if not result: raise exception.ShareNetworkSubnetNotFound( share_network_subnet_id=network_subnet_id, ) return result @require_context @context_manager.reader def share_network_subnet_get_all(context): return _share_network_subnet_get_query(context).all() @require_context @context_manager.reader def share_network_subnet_get_all_by_share_network(context, network_id): return _share_network_subnet_get_query(context).filter_by( share_network_id=network_id, ).all() @require_context @context_manager.reader def share_network_subnets_get_all_by_availability_zone_id( context, share_network_id, availability_zone_id, fallback_to_default=True, ): """Get the share network subnets DB records in a given AZ. This method returns list of subnets DB record for a given share network id and an availability zone. If the 'availability_zone_id' is 'None', a record may be returned and it will represent the default share network subnets. If there is no subnet for a specific availability zone id and "fallback_to_default" is True, this method will return the default share network subnets, if it exists. :param context: operation context. :param share_network_id: the share network id to be the subnets. :param availability_zone_id: the availability zone id to be the subnets. :param fallback_to_default: determines in case no subnets found in the given AZ, it will return the "default" subnets. :return: the list of share network subnets in the AZ and share network. """ return _share_network_subnets_get_all_by_availability_zone_id( context, share_network_id, availability_zone_id, fallback_to_default=fallback_to_default, ) @require_context def _share_network_subnets_get_all_by_availability_zone_id( context, share_network_id, availability_zone_id, fallback_to_default=True, ): result = _share_network_subnet_get_query(context).filter_by( share_network_id=share_network_id, availability_zone_id=availability_zone_id, ).all() # If a specific subnet wasn't found, try get the default one if availability_zone_id and not result and fallback_to_default: return _share_network_subnet_get_query(context).filter_by( share_network_id=share_network_id, availability_zone_id=None, ).all() return result @require_context @context_manager.reader def share_network_subnet_get_default_subnets(context, share_network_id): return _share_network_subnets_get_all_by_availability_zone_id( context, share_network_id, availability_zone_id=None, ) @require_context @context_manager.reader def share_network_subnet_get_all_by_share_server_id(context, share_server_id): result = _share_network_subnet_get_query(context).filter( models.ShareNetworkSubnet.share_servers.any( id=share_server_id, ) ).all() if not result: raise exception.ShareNetworkSubnetNotFoundByShareServer( share_server_id=share_server_id, ) return result ################### def _share_network_subnet_metadata_get_query(context, share_network_subnet_id): return model_query( context, models.ShareNetworkSubnetMetadata, read_deleted="no", ).filter_by( share_network_subnet_id=share_network_subnet_id, ).options( orm.joinedload(models.ShareNetworkSubnetMetadata.share_network_subnet), ) @require_context @require_share_network_subnet_exists @context_manager.reader def share_network_subnet_metadata_get(context, share_network_subnet_id): return _share_network_subnet_metadata_get(context, share_network_subnet_id) @require_context def _share_network_subnet_metadata_get(context, share_network_subnet_id): rows = _share_network_subnet_metadata_get_query( context, share_network_subnet_id, ).all() result = {} for row in rows: result[row['key']] = row['value'] return result @require_context @require_share_network_subnet_exists @context_manager.writer def share_network_subnet_metadata_delete( context, share_network_subnet_id, key, ): meta_ref = _share_network_subnet_metadata_get_item( context, share_network_subnet_id, key, ) meta_ref.soft_delete(session=context.session) @require_context @require_share_network_subnet_exists @context_manager.writer def share_network_subnet_metadata_update( context, share_network_subnet_id, metadata, delete, ): return _share_network_subnet_metadata_update( context, share_network_subnet_id, metadata, delete, ) @require_context @context_manager.writer def share_network_subnet_metadata_update_item( context, share_network_subnet_id, item, ): return _share_network_subnet_metadata_update( context, share_network_subnet_id, item, delete=False, ) @require_context @context_manager.reader def share_network_subnet_metadata_get_item( context, share_network_subnet_id, key, ): row = _share_network_subnet_metadata_get_item( context, share_network_subnet_id, key, ) result = {row['key']: row['value']} return result def _share_network_subnet_metadata_get_item( context, share_network_subnet_id, key, ): result = _share_network_subnet_metadata_get_query( context, share_network_subnet_id, ).filter_by(key=key).first() if not result: raise exception.MetadataItemNotFound return result @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) def _share_network_subnet_metadata_update( context, share_network_subnet_id, metadata, delete, ): delete = strutils.bool_from_string(delete) if delete: original_metadata = _share_network_subnet_metadata_get( context, share_network_subnet_id, ) for meta_key, meta_value in original_metadata.items(): if meta_key not in metadata: meta_ref = _share_network_subnet_metadata_get_item( context, share_network_subnet_id, meta_key, ) meta_ref.soft_delete(session=context.session) meta_ref = None # Now update all existing items with new values, or create new meta # objects. for meta_key, meta_value in metadata.items(): # update the value whether it exists or not. item = {"value": meta_value} meta_ref = _share_network_subnet_metadata_get_query( context, share_network_subnet_id, ).filter_by(key=meta_key).first() if not meta_ref: meta_ref = models.ShareNetworkSubnetMetadata() item.update( { "key": meta_key, "share_network_subnet_id": share_network_subnet_id, } ) meta_ref.update(item) meta_ref.save(session=context.session) return metadata ################################# def _share_server_get_query(context): return model_query( context, models.ShareServer, ).options( orm.joinedload(models.ShareServer.share_instances), orm.joinedload(models.ShareServer.network_allocations), orm.joinedload(models.ShareServer.share_network_subnets), ) @require_context @context_manager.writer def share_server_create(context, values): values = ensure_model_dict_has_id(values) server_ref = models.ShareServer() # updated_at is needed for judgement of automatic cleanup server_ref.updated_at = timeutils.utcnow() server_ref.update(values) # If encryption_key_ref is present, create associated record encryption_key_ref = values.get('encryption_key_ref') if encryption_key_ref: encryption_ref = models.EncryptionRef( id=uuidutils.generate_uuid(), share_server_id=server_ref['id'], encryption_key_ref=encryption_key_ref, project_id=context.project_id, ) server_ref.server_encryption_ref_entry = encryption_ref server_ref.save(session=context.session) # NOTE(u_glide): Do so to prevent errors with relationships return _share_server_get(context, server_ref['id']) @require_context @context_manager.writer def share_server_delete(context, id): server_ref = _share_server_get(context, id) model_query( context, models.ShareServerShareNetworkSubnetMapping, ).filter_by( share_server_id=id, ).soft_delete() _share_server_backend_details_delete(context, id) server_ref.soft_delete(session=context.session, update_status=True) # If encryption_key_ref is present, delete associated encryption ref entry if server_ref.server_encryption_ref_entry: server_ref.server_encryption_ref_entry.soft_delete( session=context.session) @require_context @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) @context_manager.writer def share_server_update(context, id, values): server_ref = _share_server_get(context, id) server_ref.update(values) server_ref.save(session=context.session) return server_ref @require_context @context_manager.reader def share_server_get(context, server_id): return _share_server_get(context, server_id) @require_context def _share_server_get(context, server_id): result = _share_server_get_query(context).filter_by(id=server_id).first() if result is None: raise exception.ShareServerNotFound(share_server_id=server_id) return result @require_context @context_manager.reader def share_server_search_by_identifier(context, identifier): identifier_field = models.ShareServer.identifier # try if given identifier is a suffix of existing entry's identifier result = (_share_server_get_query(context).filter( identifier_field.like('%{}'.format(identifier))).all()) if not result: # repeat it with underscores instead of hyphens result = (_share_server_get_query(context).filter( identifier_field.like('%{}'.format( identifier.replace("-", "_")))).all()) if not result: # repeat it with hypens instead of underscores result = (_share_server_get_query(context).filter( identifier_field.like('%{}'.format( identifier.replace("_", "-")))).all()) if not result: # try if an existing identifier is a substring of given identifier result = (_share_server_get_query(context).filter( literal(identifier).contains(identifier_field)).all()) if not result: # repeat it with underscores instead of hyphens result = (_share_server_get_query(context).filter( literal(identifier.replace("-", "_")).contains( identifier_field)).all()) if not result: # repeat it with hypens instead of underscores result = (_share_server_get_query(context).filter( literal(identifier.replace("_", "-")).contains( identifier_field)).all()) if not result: raise exception.ShareServerNotFound(share_server_id=identifier) return result @require_context @context_manager.reader def share_server_get_all_by_host_and_share_subnet_valid( context, host, share_subnet_id, ): result = _share_server_get_query( context, ).filter_by( host=host, ).filter( models.ShareServer.share_network_subnets.any(id=share_subnet_id) ).filter( models.ShareServer.status.in_( (constants.STATUS_CREATING, constants.STATUS_ACTIVE), ) ).all() if not result: filters_description = ('share_network_subnet_id is ' '"%(share_subnet_id)s", host is "%(host)s" and ' 'status in "%(status_cr)s" or ' '"%(status_act)s"') % { 'share_subnet_id': share_subnet_id, 'host': host, 'status_cr': constants.STATUS_CREATING, 'status_act': constants.STATUS_ACTIVE, } raise exception.ShareServerNotFoundByFilters( filters_description=filters_description, ) return result @require_context @context_manager.reader def share_server_get_all_by_host_and_or_share_subnet( context, host=None, share_subnet_id=None, ): result = _share_server_get_query(context) if host: result = result.filter_by(host=host) result = result.filter( models.ShareServer.share_network_subnets.any(id=share_subnet_id) ).all() if not result: filters_description = ( 'share_network_subnet_id is "%(share_subnet_id)s" and host is ' '"%(host)s".' ) % { 'share_subnet_id': share_subnet_id, 'host': host, } raise exception.ShareServerNotFoundByFilters( filters_description=filters_description, ) return result @require_context @context_manager.reader def share_server_get_all(context): return _share_server_get_query(context).all() @require_context @context_manager.reader def share_server_get_all_with_filters(context, filters): return _share_server_get_all_with_filters(context, filters) @require_context def _share_server_get_all_with_filters(context, filters): query = _share_server_get_query(context) if filters.get('host'): query = query.filter_by(host=filters.get('host')) if filters.get('status'): query = query.filter_by(status=filters.get('status')) if filters.get('source_share_server_id'): query = query.filter_by( source_share_server_id=filters.get('source_share_server_id')) if filters.get('encryption_key_ref'): query = query.filter_by( encryption_key_ref=filters.get('encryption_key_ref')) if filters.get('share_network_id'): query = query.join( models.ShareServerShareNetworkSubnetMapping, models.ShareServerShareNetworkSubnetMapping.share_server_id == models.ShareServer.id ).join( models.ShareNetworkSubnet, models.ShareNetworkSubnet.id == models.ShareServerShareNetworkSubnetMapping.share_network_subnet_id ).filter( models.ShareNetworkSubnet.share_network_id == filters.get('share_network_id')) return query.all() @require_context @context_manager.reader def share_server_get_all_by_host(context, host, filters=None): if filters: filters.update({'host': host}) else: filters = {'host': host} return _share_server_get_all_with_filters(context, filters=filters) @require_context @context_manager.reader def share_server_get_all_unused_deletable(context, host, updated_before): valid_server_status = ( constants.STATUS_INACTIVE, constants.STATUS_ACTIVE, constants.STATUS_ERROR, ) result = (_share_server_get_query(context) .filter_by(is_auto_deletable=True) .filter_by(host=host) .filter(~models.ShareServer.share_groups.any()) .filter(~models.ShareServer.share_instances.any()) .filter(models.ShareServer.status.in_(valid_server_status)) .filter(models.ShareServer.updated_at < updated_before).all()) return result def _share_server_backend_details_get_item(context, share_server_id, key): result = _share_server_backend_details_get_query( context, share_server_id, ).filter_by(key=key).first() if not result: raise exception.ShareServerBackendDetailsNotFound() return result def _share_server_backend_details_get_query(context, share_server_id): return (model_query( context, models.ShareServerBackendDetails, read_deleted="no"). filter_by(share_server_id=share_server_id)) @require_context @context_manager.writer def share_server_backend_details_set(context, share_server_id, server_details): _share_server_get(context, share_server_id) for meta_key, meta_value in server_details.items(): # update the value whether it exists or not item = {"value": meta_value} try: meta_ref = _share_server_backend_details_get_item( context, share_server_id, meta_key) except exception.ShareServerBackendDetailsNotFound: meta_ref = models.ShareServerBackendDetails() item.update({"key": meta_key, "share_server_id": share_server_id}) meta_ref.update(item) meta_ref.save(session=context.session) return server_details @require_context @context_manager.reader def share_server_backend_details_get_item(context, share_server_id, meta_key): try: meta_ref = _share_server_backend_details_get_item( context, share_server_id, meta_key) except exception.ShareServerBackendDetailsNotFound: return None return meta_ref.get('value') @require_context @context_manager.writer def share_server_backend_details_delete(context, share_server_id): return _share_server_backend_details_delete(context, share_server_id) @require_context def _share_server_backend_details_delete(context, share_server_id): share_server_details = model_query( context, models.ShareServerBackendDetails, ).filter_by(share_server_id=share_server_id).all() for item in share_server_details: item.soft_delete(session=context.session) @require_context @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) @context_manager.writer def share_servers_update(context, share_server_ids, values): result = model_query( context, models.ShareServer, read_deleted="no", ).filter( models.ShareServer.id.in_(share_server_ids), ).update(values, synchronize_session=False) return result ################### def _driver_private_data_query( context, entity_id, key=None, read_deleted=False, ): query = model_query( context, models.DriverPrivateData, read_deleted=read_deleted, ).filter_by( entity_uuid=entity_id, ) if isinstance(key, list): return query.filter(models.DriverPrivateData.key.in_(key)) elif key is not None: return query.filter_by(key=key) return query @require_context @context_manager.reader def driver_private_data_get(context, entity_id, key=None, default=None): query = _driver_private_data_query(context, entity_id, key) if key is None or isinstance(key, list): return {item.key: item.value for item in query.all()} else: result = query.first() return result["value"] if result is not None else default @require_context @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) @context_manager.writer def driver_private_data_update( context, entity_id, details, delete_existing=False, ): # NOTE(u_glide): following code modifies details dict, that's why we should # copy it new_details = copy.deepcopy(details) # Process existing data original_data = context.session.query(models.DriverPrivateData).filter_by( entity_uuid=entity_id, ).all() for data_ref in original_data: in_new_details = data_ref['key'] in new_details if in_new_details: new_value = str(new_details.pop(data_ref['key'])) data_ref.update({ "value": new_value, "deleted": 0, "deleted_at": None }) data_ref.save(session=context.session) elif delete_existing and data_ref['deleted'] != 1: data_ref.update({ "deleted": 1, "deleted_at": timeutils.utcnow() }) data_ref.save(session=context.session) # Add new data for key, value in new_details.items(): data_ref = models.DriverPrivateData() data_ref.update({ "entity_uuid": entity_id, "key": key, "value": str(value) }) data_ref.save(session=context.session) return details @require_context @context_manager.writer def driver_private_data_delete(context, entity_id, key=None): query = _driver_private_data_query(context, entity_id, key) query.update({"deleted": 1, "deleted_at": timeutils.utcnow()}) ################### @require_context @context_manager.writer def network_allocation_create(context, values): values = ensure_model_dict_has_id(values) alloc_ref = models.NetworkAllocation() alloc_ref.update(values) alloc_ref.save(session=context.session) return alloc_ref @require_context @context_manager.writer def network_allocation_delete(context, id): alloc_ref = _network_allocation_get(context, id) alloc_ref.soft_delete(session=context.session) @require_context @context_manager.reader def network_allocation_get(context, id, read_deleted="no"): return _network_allocation_get(context, id, read_deleted=read_deleted) @require_context def _network_allocation_get(context, id, read_deleted="no"): result = model_query( context, models.NetworkAllocation, read_deleted=read_deleted, ).filter_by(id=id).first() if result is None: raise exception.NotFound() return result @require_context @context_manager.reader def network_allocations_get_by_ip_address(context, ip_address): result = model_query( context, models.NetworkAllocation, ).filter_by(ip_address=ip_address).all() return result or [] @require_context @context_manager.reader def network_allocations_get_for_share_server( context, share_server_id, label=None, subnet_id=None, ): query = model_query( context, models.NetworkAllocation, ).filter_by( share_server_id=share_server_id, ) if label: if label == 'user': query = query.filter(or_( # NOTE(vponomaryov): we treat None as alias for 'user'. models.NetworkAllocation.label == None, # noqa models.NetworkAllocation.label == label, )) else: query = query.filter(models.NetworkAllocation.label == label) if subnet_id: query = query.filter( models.NetworkAllocation.share_network_subnet_id == subnet_id) result = query.all() return result @require_context @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) @context_manager.writer def network_allocation_update(context, id, values, read_deleted=None): alloc_ref = _network_allocation_get(context, id, read_deleted=read_deleted) alloc_ref.update(values) alloc_ref.save(session=context.session) return alloc_ref ################### def _dict_with_specs(inst_type_query, specs_key='extra_specs'): """Convert type query result to dict with extra_spec and rate_limit. Takes a share [group] type query returned by sqlalchemy and returns it as a dictionary, converting the extra/group specs entry from a list of dicts: 'extra_specs' : [{'key': 'k1', 'value': 'v1', ...}, ...] 'group_specs' : [{'key': 'k1', 'value': 'v1', ...}, ...] to a single dict: 'extra_specs' : {'k1': 'v1'} 'group_specs' : {'k1': 'v1'} """ inst_type_dict = dict(inst_type_query) specs = {x['key']: x['value'] for x in inst_type_query[specs_key]} inst_type_dict[specs_key] = specs return inst_type_dict @require_admin_context @context_manager.writer def share_type_create(context, values, projects=None): """Create a new share type. In order to pass in extra specs, the values dict should contain a 'extra_specs' key/value pair: {'extra_specs' : {'k1': 'v1', 'k2': 'v2', ...}} """ values = ensure_model_dict_has_id(values) projects = projects or [] try: values['extra_specs'] = _metadata_refs( values.get('extra_specs'), models.ShareTypeExtraSpecs, ) share_type_ref = models.ShareTypes() share_type_ref.update(values) share_type_ref.save(session=context.session) except db_exception.DBDuplicateEntry: raise exception.ShareTypeExists(id=values['name']) except Exception as e: raise db_exception.DBError(e) for project in set(projects): access_ref = models.ShareTypeProjects() access_ref.update( {"share_type_id": share_type_ref.id, "project_id": project}, ) access_ref.save(session=context.session) return share_type_ref def _share_type_get_query(context, read_deleted=None, expected_fields=None): expected_fields = expected_fields or [] query = model_query( context, models.ShareTypes, read_deleted=read_deleted, ).options(orm.joinedload(models.ShareTypes.extra_specs)) if 'projects' in expected_fields: query = query.options(orm.joinedload(models.ShareTypes.projects)) if not context.is_admin: the_filter = [models.ShareTypes.is_public == true()] projects_attr = getattr(models.ShareTypes, 'projects') the_filter.extend([ projects_attr.any(project_id=context.project_id) ]) query = query.filter(or_(*the_filter)) return query @handle_db_data_error @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) def _share_type_update(context, type_id, values, is_group): if values.get('name') is None: values.pop('name', None) if is_group: model = models.ShareGroupTypes exists_exc = exception.ShareGroupTypeExists exists_args = {'type_id': values.get('name')} else: model = models.ShareTypes exists_exc = exception.ShareTypeExists exists_args = {'id': values.get('name')} query = model_query(context, model) try: result = query.filter_by(id=type_id).update(values) except db_exception.DBDuplicateEntry: # This exception only occurs if there's a non-deleted # share/group type which has the same name as the name being # updated. raise exists_exc(**exists_args) if not result: if is_group: raise exception.ShareGroupTypeNotFound(type_id=type_id) else: raise exception.ShareTypeNotFound(share_type_id=type_id) @context_manager.writer def share_type_update(context, share_type_id, values): _share_type_update(context, share_type_id, values, is_group=False) @require_context @context_manager.reader def share_type_get_all(context, inactive=False, filters=None): """Returns a dict describing all share_types with name as key.""" filters = filters or {} read_deleted = "yes" if inactive else "no" query = _share_type_get_query(context, read_deleted=read_deleted) if 'is_public' in filters and filters['is_public'] is not None: the_filter = [models. ShareTypes.is_public == filters['is_public']] if filters['is_public'] and context.project_id is not None: projects_attr = getattr(models. ShareTypes, 'projects') the_filter.extend([ projects_attr.any( project_id=context.project_id, deleted=0) ]) if len(the_filter) > 1: query = query.filter(or_(*the_filter)) else: query = query.filter(the_filter[0]) rows = query.order_by("name").all() result = {} for row in rows: result[row['name']] = _dict_with_specs(row) return result def _share_type_get_id_from_share_type(context, id): result = model_query( context, models.ShareTypes, read_deleted="no", ).filter_by(id=id).first() if not result: raise exception.ShareTypeNotFound(share_type_id=id) return result['id'] def _share_type_get(context, id, inactive=False, expected_fields=None): expected_fields = expected_fields or [] read_deleted = "yes" if inactive else "no" result = _share_type_get_query( context, read_deleted, expected_fields, ).filter_by(id=id).first() if not result: # The only way that id could be None is if the default share type is # not configured and no other share type was specified. if id is None: raise exception.DefaultShareTypeNotConfigured() raise exception.ShareTypeNotFound(share_type_id=id) share_type = _dict_with_specs(result) if 'projects' in expected_fields: share_type['projects'] = [p['project_id'] for p in result['projects']] return share_type @require_context @context_manager.reader def share_type_get(context, id, inactive=False, expected_fields=None): """Return a dict describing specific share_type.""" return _share_type_get(context, id, inactive=inactive, expected_fields=expected_fields) def _share_type_get_by_name(context, name): result = _share_type_get_query(context).filter_by(name=name).first() if not result: raise exception.ShareTypeNotFoundByName(share_type_name=name) return _dict_with_specs(result) @require_context @context_manager.reader def share_type_get_by_name(context, name): """Return a dict describing specific share_type.""" return _share_type_get_by_name(context, name) @require_context @context_manager.reader def share_type_get_by_name_or_id(context, name_or_id): """Return a dict describing specific share_type using its name or ID. :returns: ShareType object or None if not found """ try: return _share_type_get(context, name_or_id) except exception.ShareTypeNotFound: try: return _share_type_get_by_name(context, name_or_id) except exception.ShareTypeNotFoundByName: return None @require_admin_context @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) @context_manager.writer def share_type_destroy(context, id): _share_type_get(context, id) shares_count = model_query( context, models.ShareInstance, read_deleted="no", ).filter_by(share_type_id=id).count() share_group_types_count = model_query( context, models.ShareGroupTypeShareTypeMapping, read_deleted="no", ).filter_by(share_type_id=id).count() if shares_count or share_group_types_count: msg = ("Deletion of share type %(stype)s failed; it in use by " "%(shares)d shares and %(gtypes)d share group types") msg_args = {'stype': id, 'shares': shares_count, 'gtypes': share_group_types_count} LOG.error(msg, msg_args) raise exception.ShareTypeInUse(share_type_id=id) model_query( context, models.ShareTypeExtraSpecs, ).filter_by( share_type_id=id ).soft_delete() model_query( context, models.ShareTypeProjects, ).filter_by( share_type_id=id, ).soft_delete() model_query( context, models.ShareTypes, ).filter_by( id=id ).soft_delete() # NOTE(stephenfin): commit changes before we do anything with quotas context.session.commit() context.session.begin() # Destroy any quotas, usages and reservations for the share type: _quota_destroy_all_by_share_type(context, id) def _share_type_access_query(context): return model_query(context, models.ShareTypeProjects, read_deleted="no") @require_admin_context @context_manager.reader def share_type_access_get_all(context, type_id): share_type_id = _share_type_get_id_from_share_type(context, type_id) return _share_type_access_query( context, ).filter_by(share_type_id=share_type_id).all() @require_admin_context @context_manager.writer def share_type_access_add(context, type_id, project_id): """Add given tenant to the share type access list.""" share_type_id = _share_type_get_id_from_share_type(context, type_id) access_ref = models.ShareTypeProjects() access_ref.update( {"share_type_id": share_type_id, "project_id": project_id}, ) try: access_ref.save(session=context.session) except db_exception.DBDuplicateEntry: raise exception.ShareTypeAccessExists( share_type_id=type_id, project_id=project_id, ) return access_ref @require_admin_context @context_manager.writer def share_type_access_remove(context, type_id, project_id): """Remove given tenant from the share type access list.""" share_type_id = _share_type_get_id_from_share_type(context, type_id) count = _share_type_access_query( context, ).filter_by( share_type_id=share_type_id, ).filter_by( project_id=project_id, ).soft_delete(synchronize_session=False) if count == 0: raise exception.ShareTypeAccessNotFound( share_type_id=type_id, project_id=project_id, ) #################### def _share_type_extra_specs_query(context, share_type_id): return model_query( context, models.ShareTypeExtraSpecs, read_deleted="no", ).filter_by( share_type_id=share_type_id, ).options(orm.joinedload(models.ShareTypeExtraSpecs.share_type)) @require_context @context_manager.reader def share_type_extra_specs_get(context, share_type_id): rows = _share_type_extra_specs_query(context, share_type_id).all() result = {} for row in rows: result[row['key']] = row['value'] return result @require_context @context_manager.writer def share_type_extra_specs_delete(context, share_type_id, key): _share_type_extra_specs_get_item(context, share_type_id, key) _share_type_extra_specs_query( context, share_type_id, ).filter_by(key=key).soft_delete() def _share_type_extra_specs_get_item(context, share_type_id, key): result = _share_type_extra_specs_query( context, share_type_id, ).filter_by( key=key, ).options( orm.joinedload(models.ShareTypeExtraSpecs.share_type), ).first() if not result: raise exception.ShareTypeExtraSpecsNotFound( extra_specs_key=key, share_type_id=share_type_id, ) return result @require_context @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) @context_manager.writer def share_type_extra_specs_update_or_create(context, share_type_id, specs): spec_ref = None for key, value in specs.items(): try: spec_ref = _share_type_extra_specs_get_item( context, share_type_id, key, ) except exception.ShareTypeExtraSpecsNotFound: spec_ref = models.ShareTypeExtraSpecs() spec_ref.update( { "key": key, "value": value, "share_type_id": share_type_id, "deleted": 0, } ) spec_ref.save(session=context.session) return specs #################### @context_manager.writer def ensure_availability_zone_exists(context, values, *, strict=True): az_name = values.pop('availability_zone', None) if not az_name: if strict: msg = _("Values dict should have 'availability_zone' field.") raise ValueError(msg) return if uuidutils.is_uuid_like(az_name): az_ref = _availability_zone_get(context, az_name) else: az_ref = _availability_zone_create_if_not_exist(context, az_name) values.update({'availability_zone_id': az_ref['id']}) @require_context @context_manager.reader def availability_zone_get(context, id_or_name): return _availability_zone_get(context, id_or_name) @require_context def _availability_zone_get(context, id_or_name): query = model_query(context, models.AvailabilityZone) if uuidutils.is_uuid_like(id_or_name): query = query.filter_by(id=id_or_name) else: query = query.filter_by(name=id_or_name) result = query.first() if not result: raise exception.AvailabilityZoneNotFound(id=id_or_name) return result @require_context @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) def _availability_zone_create_if_not_exist(context, name): try: return _availability_zone_get(context, name) except exception.AvailabilityZoneNotFound: az = models.AvailabilityZone() az.update({'id': uuidutils.generate_uuid(), 'name': name}) az.save(context.session) return az @require_context @context_manager.reader def availability_zone_get_all(context): enabled_services = model_query( context, models.Service, models.Service.availability_zone_id, read_deleted="no" ).filter_by(disabled=False).distinct() return model_query( context, models.AvailabilityZone, read_deleted="no", ).filter( models.AvailabilityZone.id.in_(enabled_services) ).all() #################### @require_admin_context @context_manager.writer def purge_deleted_records(context, age_in_days): """Purge soft-deleted records older than(and equal) age from tables.""" if age_in_days < 0: msg = _('Must supply a non-negative value for "age_in_days".') LOG.error(msg) raise exception.InvalidParameterValue(msg) metadata = MetaData() metadata.reflect(get_engine()) deleted_age = timeutils.utcnow() - datetime.timedelta(days=age_in_days) # Deleting rows in share_network_security_service_association # related to deleted network or security service sec_assoc_to_delete = context.session.query( models.ShareNetworkSecurityServiceAssociation).join( models.ShareNetwork).join(models.SecurityService).filter( or_(models.ShareNetwork.deleted_at <= deleted_age, models.SecurityService.deleted_at <= deleted_age)).all() for assoc in sec_assoc_to_delete: with context.session.begin_nested(): context.session.delete(assoc) for table in reversed(metadata.sorted_tables): if 'deleted' not in table.columns.keys(): continue try: mds = [m for m in models.__dict__.values() if (hasattr(m, '__tablename__') and m.__tablename__ == str(table))] if len(mds) > 0: # collect all soft-deleted records with context.session.begin_nested(): model = mds[0] s_deleted_records = context.session.query( model, ).filter(model.deleted_at <= deleted_age) deleted_count = 0 # delete records one by one, # skip the records which has FK constraints for record in s_deleted_records: try: with context.session.begin_nested(): context.session.delete(record) deleted_count += 1 except db_exc.DBError: LOG.warning( ("Deleting soft-deleted resource %s " "failed, skipping."), record) if deleted_count != 0: LOG.info("Deleted %(count)s records in " "table %(table)s.", {'count': deleted_count, 'table': table}) except db_exc.DBError: LOG.warning("Querying table %s's soft-deleted records " "failed, skipping.", table) #################### def _share_group_get(context, share_group_id): result = model_query( context, models.ShareGroup, project_only=True, read_deleted='no', ).filter_by( id=share_group_id, ).options(orm.joinedload(models.ShareGroup.share_types)).first() if not result: raise exception.ShareGroupNotFound(share_group_id=share_group_id) return result @require_context @context_manager.reader def share_group_get(context, share_group_id): return _share_group_get(context, share_group_id) def _share_group_get_all(context, project_id=None, share_server_id=None, host=None, detailed=True, filters=None, sort_key=None, sort_dir=None): sort_key = sort_key or 'created_at' sort_dir = sort_dir or 'desc' query = model_query( context, models.ShareGroup, read_deleted='no') # Apply filters if not filters: filters = {} no_key = 'key_is_absent' for k, v in filters.items(): temp_k = k.rstrip('~') if k in constants.LIKE_FILTER else k filter_attr = getattr(models.ShareGroup, temp_k, no_key) if filter_attr == no_key: msg = _("Share groups cannot be filtered using '%s' key.") raise exception.InvalidInput(reason=msg % k) if k in constants.LIKE_FILTER: query = query.filter(filter_attr.op('LIKE')(u'%' + v + u'%')) else: query = query.filter(filter_attr == v) if project_id: query = query.filter( models.ShareGroup.project_id == project_id) if host: query = query.filter( models.ShareGroup.host == host) if share_server_id: query = query.filter( models.ShareGroup.share_server_id == share_server_id) try: query = apply_sorting(models.ShareGroup, query, sort_key, sort_dir) except AttributeError: msg = _("Wrong sorting key provided - '%s'.") % sort_key raise exception.InvalidInput(reason=msg) if detailed: return query.options( orm.joinedload(models.ShareGroup.share_types), ).all() query = query.with_entities( models.ShareGroup.id, models.ShareGroup.name) values = [] for sg_id, sg_name in query.all(): values.append({"id": sg_id, "name": sg_name}) return values @require_admin_context @context_manager.reader def share_group_get_all(context, detailed=True, filters=None, sort_key=None, sort_dir=None): return _share_group_get_all( context, detailed=detailed, filters=filters, sort_key=sort_key, sort_dir=sort_dir) @require_admin_context @context_manager.reader def share_group_get_all_by_host(context, host, detailed=True): return _share_group_get_all(context, host=host, detailed=detailed) @require_context @context_manager.reader def share_group_get_all_by_project(context, project_id, detailed=True, filters=None, sort_key=None, sort_dir=None): authorize_project_context(context, project_id) return _share_group_get_all( context, project_id=project_id, detailed=detailed, filters=filters, sort_key=sort_key, sort_dir=sort_dir) @require_context @context_manager.reader def share_group_get_all_by_share_server(context, share_server_id, filters=None, sort_key=None, sort_dir=None): return _share_group_get_all( context, share_server_id=share_server_id, filters=filters, sort_key=sort_key, sort_dir=sort_dir) @require_context @context_manager.writer def share_group_create(context, values): share_group = models.ShareGroup() if not values.get('id'): values['id'] = uuidutils.generate_uuid() mappings = [] for item in values.get('share_types') or []: mapping = models.ShareGroupShareTypeMapping() mapping['id'] = uuidutils.generate_uuid() mapping['share_type_id'] = item mapping['share_group_id'] = values['id'] mappings.append(mapping) values['share_types'] = mappings share_group.update(values) context.session.add(share_group) return _share_group_get(context, values['id']) @require_context @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) @context_manager.writer def share_group_update(context, share_group_id, values): share_group_ref = _share_group_get( context, share_group_id) share_group_ref.update(values) share_group_ref.save(session=context.session) return share_group_ref @require_admin_context @context_manager.writer def share_group_destroy(context, share_group_id): share_group_ref = _share_group_get(context, share_group_id) share_group_ref.soft_delete(context.session) context.session.query(models.ShareGroupShareTypeMapping).filter_by( share_group_id=share_group_ref['id']).soft_delete() @require_context @context_manager.reader def count_shares_in_share_group(context, share_group_id): return (model_query(context, models.Share, project_only=True, read_deleted="no"). filter_by(share_group_id=share_group_id). count()) @require_context @context_manager.reader def get_all_shares_by_share_group(context, share_group_id): return (model_query( context, models.Share, project_only=True, read_deleted="no"). filter_by(share_group_id=share_group_id). all()) @require_context def _count_share_groups(context, project_id, user_id=None, share_type_id=None): query = model_query( context, models.ShareGroup, func.count(models.ShareGroup.id), read_deleted="no", ).filter_by(project_id=project_id) if share_type_id: query = query.join( # models.ShareGroupShareTypeMapping, models.ShareGroup.share_types, ).filter_by(share_type_id=share_type_id) elif user_id is not None: query = query.filter_by(user_id=user_id) return query.first()[0] @require_context def _count_share_group_snapshots( context, project_id, user_id=None, share_type_id=None, ): query = model_query( context, models.ShareGroupSnapshot, func.count(models.ShareGroupSnapshot.id), read_deleted="no", ).filter_by(project_id=project_id) if share_type_id: query = query.join( models.ShareGroupSnapshot.share_group, ).join( # models.ShareGroupShareTypeMapping, models.ShareGroup.share_types, ).filter_by(share_type_id=share_type_id) elif user_id is not None: query = query.filter_by(user_id=user_id) return query.first()[0] @require_context def _share_replica_data_get_for_project( context, project_id, user_id=None, share_type_id=None, ): query = model_query( context, models.ShareInstance, func.count(models.ShareInstance.id), func.sum(models.Share.size), read_deleted="no", ).join( models.Share, models.ShareInstance.share_id == models.Share.id ).filter( models.Share.project_id == project_id ).filter( models.ShareInstance.replica_state.isnot(None) ) if share_type_id: query = query.filter( models.ShareInstance.share_type_id == share_type_id) elif user_id: query = query.filter(models.Share.user_id == user_id) result = query.first() return result[0] or 0, result[1] or 0 @require_context def _count_encryption_keys_for_project( context, project_id, user_id=None, ): return encryption_keys_get_count( context, filters={'project_id': project_id} ) @require_context @context_manager.reader def count_share_group_snapshots_in_share_group(context, share_group_id): return model_query( context, models.ShareGroupSnapshot, project_only=True, read_deleted="no", ).filter_by( share_group_id=share_group_id, ).count() @require_context @context_manager.reader def count_share_groups_in_share_network(context, share_network_id): return (model_query( context, models.ShareGroup, project_only=True, read_deleted="no"). filter_by(share_network_id=share_network_id). count()) @require_context @context_manager.reader def count_share_group_snapshot_members_in_share( context, share_id, include_deferred_deleting=True ): query = model_query( context, models.ShareSnapshotInstance, project_only=True, read_deleted="no", ).join( models.ShareInstance, models.ShareInstance.id == ( models.ShareSnapshotInstance.share_instance_id), ) if include_deferred_deleting: # consider deferred deleting states in query return query.filter( models.ShareInstance.share_id == share_id, ).count() deferred_delete_states = [ constants.STATUS_DEFERRED_DELETING, constants.STATUS_ERROR_DEFERRED_DELETING, ] return query.filter( models.ShareInstance.share_id == share_id, and_(models.ShareSnapshotInstance.status.not_in( deferred_delete_states)) ).count() #################### @require_context def _share_group_snapshot_get(context, share_group_snapshot_id): result = model_query( context, models.ShareGroupSnapshot, project_only=True, read_deleted='no', ).options( orm.joinedload(models.ShareGroupSnapshot.share_group), orm.joinedload(models.ShareGroupSnapshot.share_group_snapshot_members), ).filter_by( id=share_group_snapshot_id, ).first() if not result: raise exception.ShareGroupSnapshotNotFound( share_group_snapshot_id=share_group_snapshot_id) return result def _share_group_snapshot_get_all( context, project_id=None, detailed=True, filters=None, sort_key=None, sort_dir=None, ): if not sort_key: sort_key = 'created_at' if not sort_dir: sort_dir = 'desc' query = model_query(context, models.ShareGroupSnapshot, read_deleted='no') # Apply filters if not filters: filters = {} no_key = 'key_is_absent' for k, v in filters.items(): filter_attr = getattr(models.ShareGroupSnapshot, k, no_key) if filter_attr == no_key: msg = _("Share group snapshots cannot be filtered using '%s' key.") raise exception.InvalidInput(reason=msg % k) query = query.filter(filter_attr == v) if project_id: query = query.filter( models.ShareGroupSnapshot.project_id == project_id) try: query = apply_sorting( models.ShareGroupSnapshot, query, sort_key, sort_dir) except AttributeError: msg = _("Wrong sorting key provided - '%s'.") % sort_key raise exception.InvalidInput(reason=msg) if detailed: return query.options( orm.joinedload(models.ShareGroupSnapshot.share_group), orm.joinedload( models.ShareGroupSnapshot.share_group_snapshot_members ), ).all() query = query.with_entities(models.ShareGroupSnapshot.id, models.ShareGroupSnapshot.name) values = [] for sgs_id, sgs_name in query.all(): values.append({"id": sgs_id, "name": sgs_name}) return values @require_context @context_manager.reader def share_group_snapshot_get(context, share_group_snapshot_id): return _share_group_snapshot_get(context, share_group_snapshot_id) @require_admin_context @context_manager.reader def share_group_snapshot_get_all( context, detailed=True, filters=None, sort_key=None, sort_dir=None): return _share_group_snapshot_get_all( context, filters=filters, detailed=detailed, sort_key=sort_key, sort_dir=sort_dir) @require_context @context_manager.reader def share_group_snapshot_get_all_by_project( context, project_id, detailed=True, filters=None, sort_key=None, sort_dir=None): authorize_project_context(context, project_id) return _share_group_snapshot_get_all( context, project_id=project_id, filters=filters, detailed=detailed, sort_key=sort_key, sort_dir=sort_dir, ) @require_context @context_manager.writer def share_group_snapshot_create(context, values): share_group_snapshot = models.ShareGroupSnapshot() if not values.get('id'): values['id'] = uuidutils.generate_uuid() share_group_snapshot.update(values) context.session.add(share_group_snapshot) return _share_group_snapshot_get(context, values['id']) @require_context @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) @context_manager.writer def share_group_snapshot_update(context, share_group_snapshot_id, values): share_group_ref = _share_group_snapshot_get( context, share_group_snapshot_id, ) share_group_ref.update(values) share_group_ref.save(session=context.session) return share_group_ref @require_admin_context @context_manager.writer def share_group_snapshot_destroy(context, share_group_snapshot_id): share_group_snap_ref = _share_group_snapshot_get( context, share_group_snapshot_id, ) share_group_snap_ref.soft_delete(context.session) context.session.query( models.ShareSnapshotInstance ).filter_by( share_group_snapshot_id=share_group_snapshot_id ).soft_delete() #################### @require_context @context_manager.reader def share_group_snapshot_members_get_all(context, share_group_snapshot_id): query = model_query( context, models.ShareSnapshotInstance, read_deleted='no', ).filter_by(share_group_snapshot_id=share_group_snapshot_id) return query.all() @require_context @context_manager.reader def share_group_snapshot_member_get(context, member_id): return _share_group_snapshot_member_get(context, member_id) def _share_group_snapshot_member_get(context, member_id): result = model_query( context, models.ShareSnapshotInstance, project_only=True, read_deleted='no', ).filter_by(id=member_id).first() if not result: raise exception.ShareGroupSnapshotMemberNotFound(member_id=member_id) return result @require_context @context_manager.writer def share_group_snapshot_member_create(context, values): if not values.get('id'): values['id'] = uuidutils.generate_uuid() _change_size_to_instance_size(values) member = models.ShareSnapshotInstance() member.update(values) context.session.add(member) return _share_group_snapshot_member_get(context, values['id']) @require_context @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) @context_manager.writer def share_group_snapshot_member_update(context, member_id, values): _change_size_to_instance_size(values) member = _share_group_snapshot_member_get(context, member_id) member.update(values) context.session.add(member) return _share_group_snapshot_member_get(context, member_id) #################### @require_admin_context @context_manager.writer def share_group_type_create(context, values, projects=None): """Create a new share group type. In order to pass in group specs, the values dict should contain a 'group_specs' key/value pair: {'group_specs' : {'k1': 'v1', 'k2': 'v2', ...}} """ values = ensure_model_dict_has_id(values) projects = projects or [] try: values['group_specs'] = _metadata_refs( values.get('group_specs'), models.ShareGroupTypeSpecs) mappings = [] for item in values.get('share_types', []): share_type = share_type_get_by_name_or_id(context, item) if not share_type: raise exception.ShareTypeDoesNotExist(share_type=item) mapping = models.ShareGroupTypeShareTypeMapping() mapping['id'] = uuidutils.generate_uuid() mapping['share_type_id'] = share_type['id'] mapping['share_group_type_id'] = values['id'] mappings.append(mapping) values['share_types'] = mappings share_group_type_ref = models.ShareGroupTypes() share_group_type_ref.update(values) share_group_type_ref.save(session=context.session) except db_exception.DBDuplicateEntry: raise exception.ShareGroupTypeExists(type_id=values['name']) except exception.ShareTypeDoesNotExist: raise except Exception as e: raise db_exception.DBError(e) for project in set(projects): access_ref = models.ShareGroupTypeProjects() access_ref.update({"share_group_type_id": share_group_type_ref.id, "project_id": project}) access_ref.save(session=context.session) return share_group_type_ref def _share_group_type_get_query( context, read_deleted=None, expected_fields=None, ): expected_fields = expected_fields or [] query = model_query( context, models.ShareGroupTypes, read_deleted=read_deleted ).options( orm.joinedload(models.ShareGroupTypes.group_specs), orm.joinedload(models.ShareGroupTypes.share_types), ) if 'projects' in expected_fields: query = query.options(orm.joinedload(models.ShareGroupTypes.projects)) if not context.is_admin: the_filter = [models.ShareGroupTypes.is_public == true()] projects_attr = getattr(models.ShareGroupTypes, 'projects') the_filter.extend([ projects_attr.any(project_id=context.project_id) ]) query = query.filter(or_(*the_filter)) return query @require_context @context_manager.reader def share_group_type_get_all(context, inactive=False, filters=None): """Returns a dict describing all share group types with name as key.""" filters = filters or {} read_deleted = "yes" if inactive else "no" query = _share_group_type_get_query(context, read_deleted=read_deleted) if 'is_public' in filters and filters['is_public'] is not None: the_filter = [models.ShareGroupTypes.is_public == filters['is_public']] if filters['is_public'] and context.project_id is not None: projects_attr = getattr(models. ShareGroupTypes, 'projects') the_filter.extend([ projects_attr.any( project_id=context.project_id, deleted=0) ]) if len(the_filter) > 1: query = query.filter(or_(*the_filter)) else: query = query.filter(the_filter[0]) rows = query.order_by("name").all() result = {} for row in rows: result[row['name']] = _dict_with_specs(row, 'group_specs') return result def _share_group_type_get_id_from_share_group_type_query(context, type_id): return model_query( context, models.ShareGroupTypes, read_deleted="no", ).filter_by(id=type_id) def _share_group_type_get_id_from_share_group_type(context, type_id): result = _share_group_type_get_id_from_share_group_type_query( context, type_id, ).first() if not result: raise exception.ShareGroupTypeNotFound(type_id=type_id) return result['id'] @require_context def _share_group_type_get( context, type_id, inactive=False, expected_fields=None, ): expected_fields = expected_fields or [] read_deleted = "yes" if inactive else "no" result = _share_group_type_get_query( context, read_deleted, expected_fields, ).filter_by(id=type_id).first() if not result: raise exception.ShareGroupTypeNotFound(type_id=type_id) share_group_type = _dict_with_specs(result, 'group_specs') if 'projects' in expected_fields: share_group_type['projects'] = [ p['project_id'] for p in result['projects']] return share_group_type @require_context @context_manager.reader def share_group_type_get(context, type_id, inactive=False, expected_fields=None): """Return a dict describing specific share group type.""" return _share_group_type_get( context, type_id, inactive=inactive, expected_fields=expected_fields, ) @require_context def _share_group_type_get_by_name(context, name): result = model_query( context, models.ShareGroupTypes, ).options( orm.joinedload(models.ShareGroupTypes.group_specs), orm.joinedload(models.ShareGroupTypes.share_types), ).filter_by( name=name, ).first() if not result: raise exception.ShareGroupTypeNotFoundByName(type_name=name) return _dict_with_specs(result, 'group_specs') @require_context @context_manager.reader def share_group_type_get_by_name(context, name): """Return a dict describing specific share group type.""" return _share_group_type_get_by_name(context, name) @require_admin_context @context_manager.writer def share_group_type_destroy(context, type_id): _share_group_type_get(context, type_id) results = model_query( context, models.ShareGroup, read_deleted="no", ).filter_by( share_group_type_id=type_id, ).count() if results: LOG.error('Share group type %s deletion failed, it in use.', type_id) raise exception.ShareGroupTypeInUse(type_id=type_id) model_query( context, models.ShareGroupTypeSpecs, ).filter_by( share_group_type_id=type_id, ).soft_delete() model_query( context, models.ShareGroupTypeShareTypeMapping, ).filter_by( share_group_type_id=type_id, ).soft_delete() model_query( context, models.ShareGroupTypeProjects, ).filter_by( share_group_type_id=type_id, ).soft_delete() model_query( context, models.ShareGroupTypes, ).filter_by( id=type_id, ).soft_delete() ############################### def _share_group_type_access_query(context): return model_query( context, models.ShareGroupTypeProjects, read_deleted="no", ) @require_admin_context @context_manager.reader def share_group_type_access_get_all(context, type_id): share_group_type_id = _share_group_type_get_id_from_share_group_type( context, type_id) return _share_group_type_access_query(context).filter_by( share_group_type_id=share_group_type_id, ).all() @require_admin_context @context_manager.writer def share_group_type_access_add(context, type_id, project_id): """Add given tenant to the share group type access list.""" share_group_type_id = _share_group_type_get_id_from_share_group_type( context, type_id) access_ref = models.ShareGroupTypeProjects() access_ref.update({"share_group_type_id": share_group_type_id, "project_id": project_id}) try: access_ref.save(session=context.session) except db_exception.DBDuplicateEntry: raise exception.ShareGroupTypeAccessExists( type_id=share_group_type_id, project_id=project_id) return access_ref @require_admin_context @context_manager.writer def share_group_type_access_remove(context, type_id, project_id): """Remove given tenant from the share group type access list.""" share_group_type_id = _share_group_type_get_id_from_share_group_type( context, type_id) count = _share_group_type_access_query(context).filter_by( share_group_type_id=share_group_type_id, ).filter_by( project_id=project_id, ).soft_delete( synchronize_session=False, ) if count == 0: raise exception.ShareGroupTypeAccessNotFound( type_id=share_group_type_id, project_id=project_id) ############################### def _share_group_type_specs_query(context, type_id): return model_query( context, models.ShareGroupTypeSpecs, read_deleted="no" ).filter_by( share_group_type_id=type_id, ).options( orm.joinedload(models.ShareGroupTypeSpecs.share_group_type), ) @require_context @context_manager.reader def share_group_type_specs_get(context, type_id): rows = _share_group_type_specs_query(context, type_id).all() result = {} for row in rows: result[row['key']] = row['value'] return result @require_context @context_manager.writer def share_group_type_specs_delete(context, type_id, key): _share_group_type_specs_get_item(context, type_id, key) _share_group_type_specs_query( context, type_id, ).filter_by( key=key, ).soft_delete() @require_context def _share_group_type_specs_get_item(context, type_id, key): result = _share_group_type_specs_query( context, type_id, ).filter_by( key=key, ).options( orm.joinedload(models.ShareGroupTypeSpecs.share_group_type), ).first() if not result: raise exception.ShareGroupTypeSpecsNotFound( specs_key=key, type_id=type_id) return result @require_context @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) @context_manager.writer def share_group_type_specs_update_or_create(context, type_id, specs): spec_ref = None for key, value in specs.items(): try: spec_ref = _share_group_type_specs_get_item( context, type_id, key, ) except exception.ShareGroupTypeSpecsNotFound: spec_ref = models.ShareGroupTypeSpecs() spec_ref.update({"key": key, "value": value, "share_group_type_id": type_id, "deleted": 0}) spec_ref.save(session=context.session) return specs ############################### @require_context def _message_get(context, message_id): query = model_query(context, models.Message, read_deleted="no", project_only="yes") result = query.filter_by(id=message_id).first() if not result: raise exception.MessageNotFound(message_id=message_id) return result @require_context @context_manager.reader def message_get(context, message_id): return _message_get(context, message_id) @require_context @context_manager.reader def message_get_all(context, filters=None, limit=None, offset=None, sort_key='created_at', sort_dir='desc'): """Retrieves all messages. If no sort parameters are specified then the returned messages are sorted by the 'created_at' key in descending order. :param context: context to query under :param limit: maximum number of items to return :param offset: the number of items to skip from the marker or from the first element. :param sort_key: attributes by which results should be sorted. :param sort_dir: directions in which results should be sorted. :param filters: dictionary of filters; values that are in lists, tuples, or sets cause an 'IN' operation, while exact matching is used for other values, see exact_filter function for more information :returns: list of matching messages """ messages = models.Message query = model_query(context, messages, read_deleted="no", project_only="yes") legal_filter_keys = ('request_id', 'resource_type', 'resource_id', 'action_id', 'detail_id', 'message_level', 'created_since', 'created_before') if not filters: filters = {} query = exact_filter(query, messages, filters, legal_filter_keys) query = utils.paginate_query(query, messages, limit, sort_key=sort_key, sort_dir=sort_dir, offset=offset) return query.all() @require_context @context_manager.writer def message_create(context, message_values): values = copy.deepcopy(message_values) message_ref = models.Message() if not values.get('id'): values['id'] = uuidutils.generate_uuid() message_ref.update(values) context.session.add(message_ref) return _message_get(context, message_ref['id']) @require_context @context_manager.writer def message_destroy(context, message): model_query( context, models.Message, ).filter_by(id=message.get('id')).soft_delete() @require_admin_context @context_manager.writer def cleanup_expired_messages(context): now = timeutils.utcnow() return context.session.query( models.Message ).filter( models.Message.expires_at < now ).delete() ############################### @require_context @context_manager.reader def backend_info_get(context, host): """Get hash info for given host.""" result = _backend_info_query(context, host) return result @require_context @context_manager.writer def backend_info_create(context, host, value): info_ref = models.BackendInfo() info_ref.update({"host": host, "info_hash": value}) info_ref.save(context.session) return info_ref @require_context @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) @context_manager.writer def backend_info_update(context, host, value=None, delete_existing=False): """Remove backend info for host name.""" info_ref = _backend_info_query(context, host) if info_ref: if value: info_ref.update({"info_hash": value}) elif delete_existing and info_ref['deleted'] != 1: info_ref.update({"deleted": 1, "deleted_at": timeutils.utcnow()}) else: info_ref = models.BackendInfo() info_ref.update({"host": host, "info_hash": value}) info_ref.save(context.session) return info_ref def _backend_info_query(context, host, read_deleted=False): result = model_query( context, models.BackendInfo, read_deleted=read_deleted, ).filter_by( host=host, ).first() return result ################### def _async_operation_data_query( context, entity_id, key=None, read_deleted=False, ): query = model_query( context, models.AsynchronousOperationData, read_deleted=read_deleted, ).filter_by( entity_uuid=entity_id, ) if isinstance(key, list): return query.filter(models.AsynchronousOperationData.key.in_(key)) elif key is not None: return query.filter_by(key=key) return query @require_context @context_manager.reader def async_operation_data_get(context, entity_id, key=None, default=None): query = _async_operation_data_query(context, entity_id, key) if key is None or isinstance(key, list): return {item.key: item.value for item in query.all()} else: result = query.first() return result["value"] if result is not None else default @require_context @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) @context_manager.writer def async_operation_data_update( context, entity_id, details, delete_existing=False, ): new_details = copy.deepcopy(details) # Process existing data original_data = context.session.query( models.AsynchronousOperationData).filter_by( entity_uuid=entity_id, ).all() for data_ref in original_data: in_new_details = data_ref['key'] in new_details if in_new_details: new_value = str(new_details.pop(data_ref['key'])) data_ref.update({ "value": new_value, "deleted": 0, "deleted_at": None }) data_ref.save(session=context.session) elif delete_existing and data_ref['deleted'] != 1: data_ref.update({ "deleted": 1, "deleted_at": timeutils.utcnow() }) data_ref.save(session=context.session) # Add new data for key, value in new_details.items(): data_ref = models.AsynchronousOperationData() data_ref.update({ "entity_uuid": entity_id, "key": key, "value": str(value) }) data_ref.save(session=context.session) return details @require_context @context_manager.writer def async_operation_data_delete(context, entity_id, key=None): query = _async_operation_data_query(context, entity_id, key) query.update({"deleted": 1, "deleted_at": timeutils.utcnow()}) @require_context @require_availability_zone_exists(strict=True) @context_manager.writer def share_backup_create(context, share_id, values): if not values.get('id'): values['id'] = uuidutils.generate_uuid() values.update({'share_id': share_id}) share_backup_ref = models.ShareBackup() share_backup_ref.update(values) share_backup_ref.save(session=context.session) return share_backup_get(context, share_backup_ref['id']) @require_context @context_manager.reader def share_backup_get(context, share_backup_id): result = model_query( context, models.ShareBackup, project_only=True, read_deleted="no" ).filter_by( id=share_backup_id, ).first() if result is None: raise exception.ShareBackupNotFound(backup_id=share_backup_id) return result @require_context @context_manager.reader def share_backups_get_all(context, filters=None, limit=None, offset=None, sort_key=None, sort_dir=None): project_id = filters.pop('project_id', None) if filters else None query = _share_backups_get_with_filters( context, project_id=project_id, filters=filters, limit=limit, offset=offset, sort_key=sort_key, sort_dir=sort_dir) return query def _share_backups_get_with_filters(context, project_id=None, filters=None, limit=None, offset=None, sort_key=None, sort_dir=None): """Retrieves all backups. If no sorting parameters are specified then returned backups are sorted by the 'created_at' key and desc order. :param context: context to query under :param filters: dictionary of filters :param limit: maximum number of items to return :param sort_key: attribute by which results should be sorted,default is created_at :param sort_dir: direction in which results should be sorted :returns: list of matching backups """ # Init data sort_key = sort_key or 'created_at' sort_dir = sort_dir or 'desc' filters = copy.deepcopy(filters) if filters else {} query = model_query(context, models.ShareBackup) if project_id: query = query.filter_by(project_id=project_id) legal_filter_keys = ('display_name', 'display_name~', 'display_description', 'display_description~', 'id', 'share_id', 'host', 'topic', 'status') query = exact_filter(query, models.ShareBackup, filters, legal_filter_keys) query = apply_sorting(models.ShareBackup, query, sort_key, sort_dir) if limit is not None: query = query.limit(limit) if offset: query = query.offset(offset) return query.all() @require_admin_context @context_manager.reader def _backup_data_get_for_project(context, project_id, user_id): query = model_query(context, models.ShareBackup, func.count(models.ShareBackup.id), func.sum(models.ShareBackup.size), read_deleted="no").\ filter_by(project_id=project_id) if user_id: result = query.filter_by(user_id=user_id).first() else: result = query.first() return (result[0] or 0, result[1] or 0) @require_context @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) @require_availability_zone_exists(strict=False) @context_manager.writer def share_backup_update(context, backup_id, values): backup_ref = share_backup_get(context, backup_id) backup_ref.update(values) backup_ref.save(session=context.session) return backup_ref @require_context @context_manager.writer def share_backup_delete(context, backup_id): backup_ref = share_backup_get(context, backup_id) backup_ref.soft_delete(session=context.session, update_status=True) ############################### @require_context def _resource_lock_get(context, lock_id): query = model_query(context, models.ResourceLock, read_deleted="no", project_only="yes") result = query.filter_by(id=lock_id).first() if not result: raise exception.ResourceLockNotFound(lock_id=lock_id) return result @require_context @context_manager.writer def resource_lock_create(context, kwargs): """Create a resource lock.""" values = copy.deepcopy(kwargs) lock_ref = models.ResourceLock() if not values.get('id'): values['id'] = uuidutils.generate_uuid() lock_ref.update(values) context.session.add(lock_ref) return _resource_lock_get(context, lock_ref['id']) @require_context @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) @context_manager.writer def resource_lock_update(context, lock_id, kwargs): """Update a resource lock.""" lock_ref = _resource_lock_get(context, lock_id) lock_ref.update(kwargs) lock_ref.save(session=context.session) return lock_ref @require_context @context_manager.writer def resource_lock_delete(context, lock_id): """Delete a resource lock.""" lock_ref = _resource_lock_get(context, lock_id) lock_ref.soft_delete(session=context.session) @require_context @context_manager.reader def resource_lock_get(context, lock_id): """Retrieve a resource lock.""" return _resource_lock_get(context, lock_id) @require_context @context_manager.reader def resource_lock_get_all(context, filters=None, limit=None, offset=None, sort_key='created_at', sort_dir='desc', show_count=False): """Retrieve all resource locks. If no sort parameters are specified then the returned locks are sorted by the 'created_at' key in descending order. :param context: context to query under :param limit: maximum number of items to return :param offset: the number of items to skip from the marker or from the first element. :param sort_key: attributes by which results should be sorted. :param sort_dir: directions in which results should be sorted. :param filters: dictionary of filters; values that are in lists, tuples, or sets cause an 'IN' operation, while exact matching is used for other values, see exact_filter function for more information :returns: list of matching resource locks """ locks = models.ResourceLock # add policy check to allow: all_projects, project_id filters filters = filters or {} query = model_query(context, locks, read_deleted="no") project_id = filters.get('project_id') all_projects = filters.get('all_projects') or filters.get('all_tenants') if project_id is None and not all_projects: filters['project_id'] = context.project_id legal_filter_keys = ('id', 'user_id', 'resource_id', 'resource_type', 'lock_context', 'resource_action', 'created_since', 'created_before', 'lock_reason', 'lock_reason~', 'project_id') query = exact_filter(query, locks, filters, legal_filter_keys) count = query.count() if show_count else None query = utils.paginate_query(query, locks, limit, sort_key=sort_key, sort_dir=sort_dir, offset=offset) return query.all(), count ############################### @require_context @context_manager.reader def encryption_keys_get_count(context, filters=None): if filters: project_id = filters.get('project_id') else: project_id = context.project_id query = model_query( context, models.EncryptionRef, read_deleted="no" ).filter_by( project_id=project_id, ) return query.count() @require_context @context_manager.reader def encryption_keys_get_all(context, filters=None): if filters: project_id = filters.get('project_id') else: project_id = context.project_id query = model_query( context, models.EncryptionRef, read_deleted="no" ).filter_by( project_id=project_id, ) encryption_key_ref = filters.get('encryption_key_ref') if encryption_key_ref: query = query.filter_by(encryption_key_ref=encryption_key_ref) return query.all() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/db/sqlalchemy/models.py0000664000175000017500000017222700000000000020716 0ustar00zuulzuul00000000000000# Copyright (c) 2011 X.commerce, a business unit of eBay Inc. # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright 2011 Piston Cloud Computing, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ SQLAlchemy models for Manila data. """ from oslo_config import cfg from oslo_db.sqlalchemy import models from sqlalchemy import Column, Integer, String, schema from sqlalchemy import orm from sqlalchemy import ForeignKey, DateTime, Boolean, Enum from sqlalchemy_utils import generic_repr from manila.common import constants CONF = cfg.CONF BASE = orm.declarative_base() @generic_repr class ManilaBase(models.ModelBase, models.TimestampMixin, models.SoftDeleteMixin): """Base class for Manila Models.""" __table_args__ = {'mysql_engine': 'InnoDB'} metadata = None def to_dict(self): model_dict = {} for k, v in self.items(): if not issubclass(type(v), ManilaBase): model_dict[k] = v return model_dict def soft_delete(self, session, update_status=False, status_field_name='status'): """Mark this object as deleted.""" if update_status: setattr(self, status_field_name, constants.STATUS_DELETED) return super(ManilaBase, self).soft_delete(session) class Service(BASE, ManilaBase): """Represents a running service on a host.""" __tablename__ = 'services' id = Column(Integer, primary_key=True) host = Column(String(255)) # , ForeignKey('hosts.id')) binary = Column(String(255)) topic = Column(String(255)) state = Column(String(36)) report_count = Column(Integer, nullable=False, default=0) disabled = Column(Boolean, default=False) disabled_reason = Column(String(255)) availability_zone_id = Column(String(36), ForeignKey('availability_zones.id'), nullable=True) ensuring = Column(Boolean, default=False) availability_zone = orm.relationship( "AvailabilityZone", lazy='immediate', primaryjoin=( 'and_(' 'Service.availability_zone_id == ' 'AvailabilityZone.id, ' 'AvailabilityZone.deleted == \'False\')' ) ) class Quota(BASE, ManilaBase): """Represents a single quota override for a project. If there is no row for a given project id and resource, then the default for the quota class is used. If there is no row for a given quota class and resource, then the default for the deployment is used. If the row is present but the hard limit is Null, then the resource is unlimited. """ __tablename__ = 'quotas' id = Column(Integer, primary_key=True) project_id = Column(String(255), index=True) resource = Column(String(255)) hard_limit = Column(Integer, nullable=True) class ProjectUserQuota(BASE, ManilaBase): """Represents a single quota override for a user with in a project.""" __tablename__ = 'project_user_quotas' id = Column(Integer, primary_key=True, nullable=False) project_id = Column(String(255), nullable=False) user_id = Column(String(255), nullable=False) resource = Column(String(255), nullable=False) hard_limit = Column(Integer) class ProjectShareTypeQuota(BASE, ManilaBase): """Represents a single quota override for a share type within a project.""" __tablename__ = 'project_share_type_quotas' id = Column(Integer, primary_key=True, nullable=False) project_id = Column(String(255), nullable=False) share_type_id = Column( String(36), ForeignKey('share_types.id'), nullable=False) resource = Column(String(255), nullable=False) hard_limit = Column(Integer) class QuotaClass(BASE, ManilaBase): """Represents a single quota override for a quota class. If there is no row for a given quota class and resource, then the default for the deployment is used. If the row is present but the hard limit is Null, then the resource is unlimited. """ __tablename__ = 'quota_classes' id = Column(Integer, primary_key=True) class_name = Column(String(255), index=True) resource = Column(String(255)) hard_limit = Column(Integer, nullable=True) class QuotaUsage(BASE, ManilaBase): """Represents the current usage for a given resource.""" __tablename__ = 'quota_usages' id = Column(Integer, primary_key=True) project_id = Column(String(255), index=True) user_id = Column(String(255)) share_type_id = Column(String(36)) resource = Column(String(255)) in_use = Column(Integer) reserved = Column(Integer) @property def total(self): return self.in_use + self.reserved until_refresh = Column(Integer, nullable=True) class Reservation(BASE, ManilaBase): """Represents a resource reservation for quotas.""" __tablename__ = 'reservations' id = Column(Integer, primary_key=True) uuid = Column(String(36), nullable=False) usage_id = Column(Integer, ForeignKey('quota_usages.id'), nullable=False) project_id = Column(String(255), index=True) user_id = Column(String(255)) share_type_id = Column(String(36)) resource = Column(String(255)) delta = Column(Integer) expire = Column(DateTime, nullable=False) class Share(BASE, ManilaBase): """Represents an NFS and CIFS shares.""" __tablename__ = 'shares' _extra_keys = ['name', 'export_location', 'export_locations', 'status', 'host', 'share_server_id', 'share_network_id', 'availability_zone', 'access_rules_status', 'share_type_id'] @property def name(self): return CONF.share_name_template % self.id @property def export_location(self): if len(self.instances) > 0: return self.instance.export_location @property def encryption_key_ref(self): if len(self.instances) > 0: return self.instance.encryption_key_ref @property def is_busy(self): # Make sure share is not busy, i.e., not part of a migration if self.task_state in constants.BUSY_TASK_STATES: return True return False @property def export_locations(self): # TODO(gouthamr): Return AZ specific export locations for replicated # shares. # NOTE(gouthamr): For a replicated share, export locations of the # 'active' instances are chosen, if 'available'. all_export_locations = [] select_instances = list(filter( lambda x: x['replica_state'] == constants.REPLICA_STATE_ACTIVE, self.instances)) or self.instances for instance in select_instances: if instance['status'] == constants.STATUS_AVAILABLE: for export_location in instance.export_locations: all_export_locations.append(export_location['path']) return all_export_locations def __getattr__(self, item): proxified_properties = ('status', 'host', 'share_server_id', 'share_network_id', 'availability_zone', 'share_type_id', 'share_type') if item in proxified_properties: return getattr(self.instance, item, None) raise AttributeError(item) @property def share_server_id(self): return self.__getattr__('share_server_id') @property def has_replicas(self): if len(self.instances) > 1: # NOTE(gouthamr): The 'primary' instance of a replicated share # has a 'replica_state' set to 'active'. Only the secondary replica # instances need to be regarded as true 'replicas' by users. replicas = (list(filter(lambda x: x['replica_state'] is not None, self.instances))) return len(replicas) > 1 return False @property def progress(self): if len(self.instances) > 0: return self.instance.progress @property def instance(self): # NOTE(gouthamr): The order of preference: status 'replication_change', # followed by 'available' and 'error'. If replicated share and # not undergoing a 'replication_change', only 'active' instances are # preferred. result = None if len(self.instances) > 0: order = (constants.STATUS_REVERTING, constants.STATUS_REPLICATION_CHANGE, constants.STATUS_MIGRATING, constants.STATUS_SERVER_MIGRATING, constants.STATUS_AVAILABLE, constants.STATUS_ERROR, ) other_statuses = ( [x['status'] for x in self.instances if x['status'] not in order and x['status'] not in constants.TRANSITIONAL_STATUSES] ) order = (order + tuple(other_statuses) + constants.TRANSITIONAL_STATUSES) sorted_instances = sorted( self.instances, key=lambda x: order.index(x['status'])) select_instances = sorted_instances if (select_instances[0]['status'] != constants.STATUS_REPLICATION_CHANGE): select_instances = ( list(filter(lambda x: x['replica_state'] == constants.REPLICA_STATE_ACTIVE, sorted_instances)) or sorted_instances ) result = select_instances[0] return result @property def access_rules_status(self): return get_access_rules_status(self.instances) id = Column(String(36), primary_key=True) deleted = Column(String(36), default='False') user_id = Column(String(255)) project_id = Column(String(255)) size = Column(Integer) display_name = Column(String(255)) display_description = Column(String(255)) snapshot_id = Column(String(36)) source_backup_id = Column(String(36)) snapshot_support = Column(Boolean, default=True) create_share_from_snapshot_support = Column(Boolean, default=True) revert_to_snapshot_support = Column(Boolean, default=False) replication_type = Column(String(255), nullable=True) mount_snapshot_support = Column(Boolean, default=False) share_proto = Column(String(255)) is_public = Column(Boolean, default=False) share_group_id = Column(String(36), ForeignKey('share_groups.id'), nullable=True) source_share_group_snapshot_member_id = Column(String(36), nullable=True) task_state = Column(String(255)) is_soft_deleted = Column(Boolean, default=False) scheduled_to_be_deleted_at = Column(DateTime) instances = orm.relationship( "ShareInstance", lazy='subquery', primaryjoin=( 'and_(' 'Share.id == ShareInstance.share_id, ' 'ShareInstance.deleted == "False")' ), viewonly=True, join_depth=2, ) class ShareInstance(BASE, ManilaBase): __tablename__ = 'share_instances' _extra_keys = ['name', 'export_location', 'availability_zone', 'replica_state'] _proxified_properties = ('user_id', 'project_id', 'size', 'display_name', 'display_description', 'snapshot_id', 'share_proto', 'is_public', 'share_group_id', 'replication_type', 'source_share_group_snapshot_member_id', 'mount_snapshot_support') def set_share_data(self, share): for share_property in self._proxified_properties: setattr(self, share_property, share[share_property]) @property def name(self): return CONF.share_name_template % self.id @property def export_location(self): if len(self.export_locations) > 0: return self.export_locations[0]['path'] @property def availability_zone(self): if self._availability_zone: return self._availability_zone['name'] id = Column(String(36), primary_key=True) share_id = Column(String(36), ForeignKey('shares.id')) deleted = Column(String(36), default='False') host = Column(String(255)) status = Column(String(255)) progress = Column(String(32)) mount_point_name = Column(String(255)) ACCESS_STATUS_PRIORITIES = { constants.STATUS_ACTIVE: 0, constants.SHARE_INSTANCE_RULES_SYNCING: 1, constants.SHARE_INSTANCE_RULES_ERROR: 2, } access_rules_status = Column(Enum(constants.STATUS_ACTIVE, constants.SHARE_INSTANCE_RULES_SYNCING, constants.SHARE_INSTANCE_RULES_ERROR), default=constants.STATUS_ACTIVE) scheduled_at = Column(DateTime) launched_at = Column(DateTime) terminated_at = Column(DateTime) encryption_key_ref = Column(String(36), nullable=True) replica_state = Column(String(255), nullable=True) cast_rules_to_readonly = Column(Boolean, default=False, nullable=False) share_type_id = Column(String(36), ForeignKey('share_types.id'), nullable=True) availability_zone_id = Column(String(36), ForeignKey('availability_zones.id'), nullable=True) _availability_zone = orm.relationship( "AvailabilityZone", lazy='subquery', foreign_keys=availability_zone_id, primaryjoin=( 'and_(' 'ShareInstance.availability_zone_id == ' 'AvailabilityZone.id, ' 'AvailabilityZone.deleted == \'False\')' ) ) export_locations = orm.relationship( "ShareInstanceExportLocations", lazy='joined', backref=orm.backref('share_instance', lazy='joined'), primaryjoin=( 'and_(' 'ShareInstance.id == ' 'ShareInstanceExportLocations.share_instance_id, ' 'ShareInstanceExportLocations.deleted == 0)' ) ) share_network_id = Column(String(36), ForeignKey('share_networks.id'), nullable=True) share_server_id = Column(String(36), ForeignKey('share_servers.id'), nullable=True) share_type = orm.relationship( "ShareTypes", lazy='subquery', foreign_keys=share_type_id, primaryjoin='and_(' 'ShareInstance.share_type_id == ShareTypes.id, ' 'ShareTypes.deleted == "False")') share = orm.relationship( 'Share', foreign_keys=share_id, primaryjoin='ShareInstance.share_id == Share.id' ) class ShareInstanceExportLocations(BASE, ManilaBase): """Represents export locations of share instances.""" __tablename__ = 'share_instance_export_locations' _extra_keys = ['el_metadata', ] @property def el_metadata(self): el_metadata = {} for meta in self._el_metadata_bare: # pylint: disable=no-member el_metadata[meta['key']] = meta['value'] return el_metadata @property def replica_state(self): return self.share_instance['replica_state'] id = Column(Integer, primary_key=True) uuid = Column(String(36), nullable=False, unique=True) share_instance_id = Column( String(36), ForeignKey('share_instances.id'), nullable=False) path = Column(String(2000)) is_admin_only = Column(Boolean, default=False, nullable=False) class ShareInstanceExportLocationsMetadata(BASE, ManilaBase): """Represents export location metadata of share instances.""" __tablename__ = "share_instance_export_locations_metadata" _extra_keys = ['export_location_uuid', ] id = Column(Integer, primary_key=True) export_location_id = Column( Integer, ForeignKey("share_instance_export_locations.id"), nullable=False) key = Column(String(255), nullable=False) value = Column(String(1023), nullable=False) export_location = orm.relationship( ShareInstanceExportLocations, backref="_el_metadata_bare", foreign_keys=export_location_id, lazy='immediate', primaryjoin="and_(" "%(cls_name)s.export_location_id == " "ShareInstanceExportLocations.id," "%(cls_name)s.deleted == 0)" % { "cls_name": "ShareInstanceExportLocationsMetadata"}) @property def export_location_uuid(self): return self.export_location.uuid # pylint: disable=no-member class ShareTypes(BASE, ManilaBase): """Represent possible share_types of volumes offered.""" __tablename__ = "share_types" id = Column(String(36), primary_key=True) deleted = Column(String(36), default='False') name = Column(String(255)) description = Column(String(255)) is_public = Column(Boolean, default=True) class ShareTypeProjects(BASE, ManilaBase): """Represent projects associated share_types.""" __tablename__ = "share_type_projects" __table_args__ = (schema.UniqueConstraint( "share_type_id", "project_id", "deleted", name="uniq_share_type_projects0share_type_id0project_id0deleted"), ) id = Column(Integer, primary_key=True) share_type_id = Column(Integer, ForeignKey('share_types.id'), nullable=False) project_id = Column(String(255)) share_type = orm.relationship( ShareTypes, backref="projects", foreign_keys=share_type_id, primaryjoin='and_(' 'ShareTypeProjects.share_type_id == ShareTypes.id,' 'ShareTypeProjects.deleted == 0)') class ShareTypeExtraSpecs(BASE, ManilaBase): """Represents additional specs as key/value pairs for a share_type.""" __tablename__ = 'share_type_extra_specs' id = Column(Integer, primary_key=True) key = Column("spec_key", String(255)) value = Column("spec_value", String(255)) share_type_id = Column(String(36), ForeignKey('share_types.id'), nullable=False) share_type = orm.relationship( ShareTypes, backref="extra_specs", foreign_keys=share_type_id, primaryjoin='and_(' 'ShareTypeExtraSpecs.share_type_id == ShareTypes.id,' 'ShareTypeExtraSpecs.deleted == 0)' ) class ShareMetadata(BASE, ManilaBase): """Represents a metadata key/value pair for a share.""" __tablename__ = 'share_metadata' id = Column(Integer, primary_key=True) key = Column(String(255), nullable=False) value = Column(String(1023), nullable=False) share_id = Column(String(36), ForeignKey('shares.id'), nullable=False) share = orm.relationship(Share, backref="share_metadata", foreign_keys=share_id, primaryjoin='and_(' 'ShareMetadata.share_id == Share.id,' 'ShareMetadata.deleted == 0)') class ShareAccessMapping(BASE, ManilaBase): """Represents access to share.""" __tablename__ = 'share_access_map' id = Column(String(36), primary_key=True) deleted = Column(String(36), default='False') share_id = Column(String(36), ForeignKey('shares.id')) access_type = Column(String(255)) access_to = Column(String(255)) access_key = Column(String(255), nullable=True) access_level = Column(Enum(*constants.ACCESS_LEVELS), default=constants.ACCESS_LEVEL_RW) @property def state(self): """Get the aggregated 'state' from all the instance mapping states. An access rule is supposed to be truly 'active' when it has been applied across all of the share instances of the parent share object. """ return get_aggregated_access_rules_state(self.instance_mappings) instance_mappings = orm.relationship( "ShareInstanceAccessMapping", lazy='immediate', primaryjoin=( 'and_(' 'ShareAccessMapping.id == ' 'ShareInstanceAccessMapping.access_id, ' 'ShareInstanceAccessMapping.deleted == "False")' ) ) share = orm.relationship( "Share", primaryjoin=( 'and_(' 'ShareAccessMapping.share_id == ' 'Share.id, ' 'Share.deleted == "False")' ) ) class ShareAccessRulesMetadata(BASE, ManilaBase): """Represents a metadata key/value pair for a share access rule.""" __tablename__ = 'share_access_rules_metadata' id = Column(Integer, primary_key=True) deleted = Column(String(36), default='False') key = Column(String(255), nullable=False) value = Column(String(1023), nullable=False) access_id = Column(String(36), ForeignKey('share_access_map.id'), nullable=False) access = orm.relationship( ShareAccessMapping, backref="share_access_rules_metadata", foreign_keys=access_id, lazy='immediate', primaryjoin='and_(' 'ShareAccessRulesMetadata.access_id == ShareAccessMapping.id,' 'ShareAccessRulesMetadata.deleted == "False")') class ShareInstanceAccessMapping(BASE, ManilaBase): """Represents access to individual share instances.""" __tablename__ = 'share_instance_access_map' _proxified_properties = ('share_id', 'access_type', 'access_key', 'access_to', 'access_level') def set_share_access_data(self, share_access): for share_access_attr in self._proxified_properties: setattr(self, share_access_attr, share_access[share_access_attr]) id = Column(String(36), primary_key=True) deleted = Column(String(36), default='False') share_instance_id = Column(String(36), ForeignKey('share_instances.id')) access_id = Column(String(36), ForeignKey('share_access_map.id')) state = Column(String(255), default=constants.ACCESS_STATE_QUEUED_TO_APPLY) instance = orm.relationship( "ShareInstance", lazy='immediate', primaryjoin=( 'and_(' 'ShareInstanceAccessMapping.share_instance_id == ' 'ShareInstance.id, ' 'ShareInstanceAccessMapping.deleted == "False")' ) ) class ShareSnapshot(BASE, ManilaBase): """Represents a snapshot of a share.""" __tablename__ = 'share_snapshots' _extra_keys = ['name', 'share_name', 'status', 'progress', 'provider_location', 'aggregate_status'] def __getattr__(self, item): proxified_properties = ('status', 'progress', 'provider_location') if item in proxified_properties: return getattr(self.instance, item, None) raise AttributeError(item) @property def export_locations(self): # TODO(gouthamr): Return AZ specific export locations for replicated # snapshots. # NOTE(gouthamr): For a replicated snapshot, export locations of the # 'active' instances are chosen, if 'available'. all_export_locations = [] select_instances = list(filter( lambda x: (x['share_instance']['replica_state'] == constants.REPLICA_STATE_ACTIVE), self.instances)) or self.instances for instance in select_instances: if instance['status'] == constants.STATUS_AVAILABLE: for export_location in instance.export_locations: all_export_locations.append(export_location) return all_export_locations @property def name(self): return CONF.share_snapshot_name_template % self.id @property def share_name(self): return CONF.share_name_template % self.share_id @property def instance(self): result = None if len(self.instances) > 0: def qualified_replica(x): if x is None: return False else: preferred_statuses = (constants.REPLICA_STATE_ACTIVE,) return x['replica_state'] in preferred_statuses replica_snapshots = list(filter( lambda x: qualified_replica(x.share_instance), self.instances)) migrating_snapshots = list(filter( lambda x: x.share_instance['status'] in ( constants.STATUS_MIGRATING, constants.STATUS_SERVER_MIGRATING), self.instances)) snapshot_instances = (replica_snapshots or migrating_snapshots or self.instances) result = snapshot_instances[0] return result @property def aggregate_status(self): """Get the aggregated 'status' of all instances. A snapshot is supposed to be truly 'available' when it is available across all of the share instances of the parent share object. In case of replication, we only consider replicas (share instances) that are in 'in_sync' replica_state. """ def qualified_replica(x): if x is None: return False else: preferred_statuses = (constants.REPLICA_STATE_ACTIVE, constants.REPLICA_STATE_IN_SYNC) return x['replica_state'] in preferred_statuses replica_snapshots = list(filter( lambda x: qualified_replica(x['share_instance']), self.instances)) if not replica_snapshots: return self.status order = (constants.STATUS_DELETING, constants.STATUS_CREATING, constants.STATUS_ERROR, constants.STATUS_MIGRATING, constants.STATUS_SERVER_MIGRATING, constants.STATUS_AVAILABLE) other_statuses = [x['status'] for x in self.instances if x['status'] not in order] order = (order + tuple(other_statuses)) sorted_instances = sorted( replica_snapshots, key=lambda x: order.index(x['status'])) return sorted_instances[0].status id = Column(String(36), primary_key=True) deleted = Column(String(36), default='False') user_id = Column(String(255)) project_id = Column(String(255)) share_id = Column(String(36)) size = Column(Integer) display_name = Column(String(255)) display_description = Column(String(255)) share_size = Column(Integer) share_proto = Column(String(255)) share = orm.relationship(Share, backref="snapshots", foreign_keys=share_id, primaryjoin='and_(' 'ShareSnapshot.share_id == Share.id,' 'ShareSnapshot.deleted == "False")') class ShareSnapshotMetadata(BASE, ManilaBase): """Represents a metadata key/value pair for a snapshot.""" __tablename__ = 'share_snapshot_metadata' id = Column(Integer, primary_key=True) key = Column(String(255), nullable=False) value = Column(String(1023), nullable=False) deleted = Column(String(36), default='False') share_snapshot_id = Column(String(36), ForeignKey( 'share_snapshots.id'), nullable=False) share_snapshot = orm.relationship( ShareSnapshot, backref="share_snapshot_metadata", foreign_keys=share_snapshot_id, primaryjoin='and_(' 'ShareSnapshotMetadata.share_snapshot_id == ShareSnapshot.id,' 'ShareSnapshotMetadata.deleted == "False")') class ShareSnapshotInstance(BASE, ManilaBase): """Represents a snapshot of a share.""" __tablename__ = 'share_snapshot_instances' _extra_keys = ['name', 'share_id', 'share_name'] @property def name(self): return CONF.share_snapshot_name_template % self.id @property def share_name(self): return CONF.share_name_template % self.share_instance_id @property def share_id(self): # NOTE(u_glide): This property required for compatibility # with share drivers return self.share_instance_id @property def size(self): # NOTE(silvacarlose) for backwards compatibility if self.instance_size is None: return self.snapshot.size else: return self.instance_size id = Column(String(36), primary_key=True) deleted = Column(String(36), default='False') snapshot_id = Column(String(36), nullable=True) share_instance_id = Column( String(36), ForeignKey('share_instances.id'), nullable=False) status = Column(String(255)) progress = Column(String(255)) provider_location = Column(String(255)) share_proto = Column(String(255)) instance_size = Column('size', Integer) share_group_snapshot_id = Column(String(36), nullable=True) user_id = Column(String(255)) project_id = Column(String(255)) export_locations = orm.relationship( "ShareSnapshotInstanceExportLocation", lazy='immediate', primaryjoin=( 'and_(' 'ShareSnapshotInstance.id == ' 'ShareSnapshotInstanceExportLocation.share_snapshot_instance_id, ' 'ShareSnapshotInstanceExportLocation.deleted == "False")' ) ) share_instance = orm.relationship( ShareInstance, backref="snapshot_instances", lazy='immediate', primaryjoin=( 'and_(' 'ShareSnapshotInstance.share_instance_id == ShareInstance.id,' 'ShareSnapshotInstance.deleted == "False")') ) snapshot = orm.relationship( "ShareSnapshot", lazy="immediate", foreign_keys=snapshot_id, backref="instances", primaryjoin=( 'and_(' 'ShareSnapshot.id == ShareSnapshotInstance.snapshot_id, ' 'ShareSnapshotInstance.deleted == "False")' ), viewonly=True, join_depth=2, sync_backref=False, ) share_group_snapshot = orm.relationship( "ShareGroupSnapshot", lazy="immediate", foreign_keys=share_group_snapshot_id, backref="share_group_snapshot_members", primaryjoin=('ShareGroupSnapshot.id == ' 'ShareSnapshotInstance.share_group_snapshot_id'), viewonly=True, join_depth=2, sync_backref=False, ) class ShareSnapshotAccessMapping(BASE, ManilaBase): """Represents access to share snapshot.""" __tablename__ = 'share_snapshot_access_map' @property def state(self): """Get the aggregated 'state' from all the instance mapping states. An access rule is supposed to be truly 'active' when it has been applied across all of the share snapshot instances of the parent share snapshot object. """ return get_aggregated_access_rules_state(self.instance_mappings) id = Column(String(36), primary_key=True) deleted = Column(String(36), default='False') share_snapshot_id = Column(String(36), ForeignKey('share_snapshots.id')) access_type = Column(String(255)) access_to = Column(String(255)) instance_mappings = orm.relationship( "ShareSnapshotInstanceAccessMapping", lazy='immediate', primaryjoin=( 'and_(' 'ShareSnapshotAccessMapping.id == ' 'ShareSnapshotInstanceAccessMapping.access_id, ' 'ShareSnapshotInstanceAccessMapping.deleted == "False")' ) ) class ShareSnapshotInstanceAccessMapping(BASE, ManilaBase): """Represents access to individual share snapshot instances.""" __tablename__ = 'share_snapshot_instance_access_map' _proxified_properties = ('share_snapshot_id', 'access_type', 'access_to') def set_snapshot_access_data(self, snapshot_access): for snapshot_access_attr in self._proxified_properties: setattr(self, snapshot_access_attr, snapshot_access[snapshot_access_attr]) id = Column(String(36), primary_key=True) deleted = Column(String(36), default='False') share_snapshot_instance_id = Column(String(36), ForeignKey( 'share_snapshot_instances.id')) access_id = Column(String(36), ForeignKey('share_snapshot_access_map.id')) state = Column(Enum(*constants.ACCESS_RULES_STATES), default=constants.ACCESS_STATE_QUEUED_TO_APPLY) instance = orm.relationship( "ShareSnapshotInstance", lazy='immediate', primaryjoin=( 'and_(' 'ShareSnapshotInstanceAccessMapping.share_snapshot_instance_id == ' 'ShareSnapshotInstance.id, ' 'ShareSnapshotInstanceAccessMapping.deleted == "False")' ) ) class ShareSnapshotInstanceExportLocation(BASE, ManilaBase): """Represents export locations of share snapshot instances.""" __tablename__ = 'share_snapshot_instance_export_locations' id = Column(String(36), primary_key=True) share_snapshot_instance_id = Column( String(36), ForeignKey('share_snapshot_instances.id'), nullable=False) path = Column(String(2000)) is_admin_only = Column(Boolean, default=False, nullable=False) deleted = Column(String(36), default='False') class SecurityService(BASE, ManilaBase): """Security service information for manila shares.""" __tablename__ = 'security_services' id = Column(String(36), primary_key=True) deleted = Column(String(36), default='False') project_id = Column(String(255), nullable=False) type = Column(String(32), nullable=False) dns_ip = Column(String(64), nullable=True) server = Column(String(255), nullable=True) domain = Column(String(255), nullable=True) user = Column(String(255), nullable=True) password = Column(String(255), nullable=True) name = Column(String(255), nullable=True) description = Column(String(255), nullable=True) ou = Column(String(255), nullable=True) default_ad_site = Column(String(255), nullable=True) class ShareNetwork(BASE, ManilaBase): """Represents network data used by share.""" __tablename__ = 'share_networks' id = Column(String(36), primary_key=True, nullable=False) deleted = Column(String(36), default='False') project_id = Column(String(255), nullable=False) user_id = Column(String(255), nullable=False) name = Column(String(255), nullable=True) description = Column(String(255), nullable=True) status = Column(Enum( constants.STATUS_NETWORK_ACTIVE, constants.STATUS_NETWORK_ERROR, constants.STATUS_NETWORK_CHANGE), default=constants.STATUS_NETWORK_ACTIVE) security_services = orm.relationship( "SecurityService", secondary="share_network_security_service_association", backref="share_networks", primaryjoin='and_(' 'ShareNetwork.id == ' 'ShareNetworkSecurityServiceAssociation.share_network_id,' 'ShareNetworkSecurityServiceAssociation.deleted == 0,' 'ShareNetwork.deleted == "False")', secondaryjoin='and_(' 'SecurityService.id == ' 'ShareNetworkSecurityServiceAssociation.security_service_id,' 'SecurityService.deleted == "False")') share_instances = orm.relationship( "ShareInstance", backref=orm.backref('share_network'), primaryjoin='and_(' 'ShareNetwork.id == ShareInstance.share_network_id,' 'ShareInstance.deleted == "False")') share_network_subnets = orm.relationship( "ShareNetworkSubnet", lazy='joined', backref=orm.backref('share_network', lazy='joined'), primaryjoin='and_' '(ShareNetwork.id == ShareNetworkSubnet.share_network_id,' 'ShareNetworkSubnet.deleted == "False")') @property def security_service_update_support(self): share_servers_support_updating = [] for network_subnet in self.share_network_subnets: for server in network_subnet['share_servers']: share_servers_support_updating.append( server['security_service_update_support']) # NOTE(carloss): all share servers within this share network must # support updating security services in order to have this property # set to True. return all(share_servers_support_updating) @property def network_allocation_update_support(self): share_servers_support_updating = [] for network_subnet in self.share_network_subnets: for server in network_subnet['share_servers']: share_servers_support_updating.append( server['network_allocation_update_support']) # NOTE(felipe_rodrigues): all share servers within this share network # must support updating in order to have this property set to True. return all(share_servers_support_updating) class ShareNetworkSubnet(BASE, ManilaBase): """Represents a share network subnet used by some resources.""" _extra_keys = ['availability_zone', 'subnet_metadata'] __tablename__ = 'share_network_subnets' id = Column(String(36), primary_key=True, nullable=False) neutron_net_id = Column(String(36), nullable=True) neutron_subnet_id = Column(String(36), nullable=True) network_type = Column(String(32), nullable=True) cidr = Column(String(64), nullable=True) segmentation_id = Column(Integer, nullable=True) gateway = Column(String(64), nullable=True) mtu = Column(Integer, nullable=True) deleted = Column(String(36), default='False') share_network_id = Column(String(36), ForeignKey('share_networks.id'), nullable=False) ip_version = Column(Integer, nullable=True) availability_zone_id = Column( String(36), ForeignKey('availability_zones.id'), nullable=True) share_servers = orm.relationship( "ShareServer", secondary="share_server_share_network_subnet_mappings", backref="share_network_subnets", lazy='immediate', primaryjoin="and_(ShareNetworkSubnet.id == " "%(cls_name)s.share_network_subnet_id, " "%(cls_name)s.deleted == 0)" % { "cls_name": "ShareServerShareNetworkSubnetMapping"}, secondaryjoin='and_(' 'ShareServer.id == ' 'ShareServerShareNetworkSubnetMapping.share_server_id,' 'ShareServerShareNetworkSubnetMapping.deleted == 0)' ) _availability_zone = orm.relationship( "AvailabilityZone", lazy='immediate', foreign_keys=availability_zone_id, primaryjoin=( "and_(" "ShareNetworkSubnet.availability_zone_id == AvailabilityZone.id, " "AvailabilityZone.deleted == 'False')")) @property def availability_zone(self): if self._availability_zone: return self._availability_zone['name'] @property def is_default(self): return self.availability_zone_id is None @property def share_network_name(self): return self.share_network['name'] @property def subnet_metadata(self): metadata_dict = {} metadata_list = ( self.share_network_subnet_metadata) # pylint: disable=no-member for meta in metadata_list: metadata_dict[meta['key']] = meta['value'] return metadata_dict @property def project_id(self): return self.share_network['project_id'] class ShareNetworkSubnetMetadata(BASE, ManilaBase): """Represents a metadata key/value pair for a subnet.""" __tablename__ = 'share_network_subnet_metadata' id = Column(Integer, primary_key=True) key = Column(String(255), nullable=False) value = Column(String(1023), nullable=False) deleted = Column(String(36), default='False') share_network_subnet_id = Column(String(36), ForeignKey( 'share_network_subnets.id'), nullable=False) share_network_subnet = orm.relationship( ShareNetworkSubnet, backref=orm.backref('share_network_subnet_metadata', lazy='immediate'), foreign_keys=share_network_subnet_id, primaryjoin='and_(' 'ShareNetworkSubnetMetadata.share_network_subnet_id == ' 'ShareNetworkSubnet.id,' 'ShareNetworkSubnetMetadata.deleted == "False")') class ShareServer(BASE, ManilaBase): """Represents share server used by share.""" __tablename__ = 'share_servers' id = Column(String(36), primary_key=True, nullable=False) deleted = Column(String(36), default='False') host = Column(String(255), nullable=False) is_auto_deletable = Column(Boolean, default=True) identifier = Column(String(255), nullable=True) task_state = Column(String(255), nullable=True) source_share_server_id = Column(String(36), ForeignKey('share_servers.id'), nullable=True) security_service_update_support = Column( Boolean, nullable=False, default=False) network_allocation_update_support = Column( Boolean, nullable=False, default=False) share_replicas_migration_support = Column( Boolean, nullable=False, default=False) encryption_key_ref = Column(String(36), nullable=True) application_credential_id = Column(String(36), nullable=True) status = Column(Enum( constants.STATUS_INACTIVE, constants.STATUS_ACTIVE, constants.STATUS_ERROR, constants.STATUS_DELETING, constants.STATUS_CREATING, constants.STATUS_DELETED, constants.STATUS_MANAGING, constants.STATUS_UNMANAGING, constants.STATUS_UNMANAGE_ERROR, constants.STATUS_MANAGE_ERROR, constants.STATUS_SERVER_MIGRATING, constants.STATUS_SERVER_MIGRATING_TO, constants.STATUS_SERVER_NETWORK_CHANGE), default=constants.STATUS_INACTIVE) network_allocations = orm.relationship( "NetworkAllocation", primaryjoin='and_(' 'ShareServer.id == NetworkAllocation.share_server_id,' 'NetworkAllocation.deleted == "False")') share_instances = orm.relationship( "ShareInstance", backref='share_server', primaryjoin='and_(' 'ShareServer.id == ShareInstance.share_server_id,' 'ShareInstance.deleted == "False")') share_groups = orm.relationship( "ShareGroup", backref='share_server', primaryjoin='and_(' 'ShareServer.id == ShareGroup.share_server_id,' 'ShareGroup.deleted == "False")') _backend_details = orm.relationship( "ShareServerBackendDetails", lazy='immediate', viewonly=True, primaryjoin='and_(' 'ShareServer.id == ' 'ShareServerBackendDetails.share_server_id, ' 'ShareServerBackendDetails.deleted == "False")') _share_network_subnet_ids = orm.relationship( "ShareServerShareNetworkSubnetMapping", lazy='immediate', viewonly=True, primaryjoin='and_(' 'ShareServer.id == ' 'ShareServerShareNetworkSubnetMapping.share_server_id,' 'ShareServerShareNetworkSubnetMapping.deleted == 0)') @property def backend_details(self): return {model['key']: model['value'] for model in self._backend_details} @property def share_network_subnet_ids(self): return [model['share_network_subnet_id'] for model in self._share_network_subnet_ids] @property def share_network_id(self): return (self.share_network_subnets[0]['share_network_id'] if self.share_network_subnets else None) _extra_keys = ['backend_details', 'share_network_subnet_ids'] class EncryptionRef(BASE, ManilaBase): """Represents a share server with encryption keys.""" __tablename__ = 'encryption_refs' id = Column(String(36), primary_key=True, nullable=False) share_server_id = Column( String(36), ForeignKey('share_servers.id'), unique=True) share_instance_id = Column( String(36), ForeignKey('share_instances.id'), unique=True) encryption_key_ref = Column(String(36), nullable=True) project_id = Column(String(255), nullable=True) deleted = Column(String(36), default='False') share_server = orm.relationship( ShareServer, backref=orm.backref( 'server_encryption_ref_entry', lazy='joined', uselist=False), foreign_keys=share_server_id, primaryjoin='and_(' 'EncryptionRef.share_server_id == ShareServer.id,' 'EncryptionRef.deleted == "False")' ) share_instance = orm.relationship( ShareInstance, backref=orm.backref( 'instance_encryption_ref_entry', lazy='joined', uselist=False), foreign_keys=share_instance_id, primaryjoin='and_(' 'EncryptionRef.share_instance_id == ShareInstance.id,' 'EncryptionRef.deleted == "False")' ) class ShareServerBackendDetails(BASE, ManilaBase): """Represents a metadata key/value pair for a share server.""" __tablename__ = 'share_server_backend_details' deleted = Column(String(36), default='False') id = Column(Integer, primary_key=True) key = Column(String(255), nullable=False) value = Column(String(1023), nullable=False) share_server_id = Column(String(36), ForeignKey('share_servers.id'), nullable=False) class ShareServerShareNetworkSubnetMapping(BASE, ManilaBase): """Represents the Share Server and Share Network Subnet mapping.""" __tablename__ = 'share_server_share_network_subnet_mappings' id = Column(Integer, primary_key=True) share_server_id = Column( String(36), ForeignKey('share_servers.id'), nullable=False) share_network_subnet_id = Column( String(36), ForeignKey('share_network_subnets.id'), nullable=False) class ShareNetworkSecurityServiceAssociation(BASE, ManilaBase): """Association table between compute_zones and compute_nodes tables.""" __tablename__ = 'share_network_security_service_association' id = Column(Integer, primary_key=True) share_network_id = Column(String(36), ForeignKey('share_networks.id'), nullable=False) security_service_id = Column(String(36), ForeignKey('security_services.id'), nullable=False) class Transfer(BASE, ManilaBase): """Represents a share transfer request.""" __tablename__ = 'transfers' id = Column(String(36), primary_key=True, nullable=False) deleted = Column(String(36), default='False') # resource type can be "share" or "share_network" resource_type = Column(String(36), nullable=False) # The uuid of the related resource. resource_id = Column(String(36), nullable=False) display_name = Column(String(255)) salt = Column(String(255)) crypt_hash = Column(String(255)) expires_at = Column(DateTime) source_project_id = Column(String(255), nullable=True) destination_project_id = Column(String(255), nullable=True) accepted = Column(Boolean, default=False) class NetworkAllocation(BASE, ManilaBase): """Represents network allocation data.""" __tablename__ = 'network_allocations' id = Column(String(36), primary_key=True, nullable=False) deleted = Column(String(36), default='False') label = Column(String(255), nullable=True) ip_address = Column(String(64), nullable=True) ip_version = Column(Integer, nullable=True) cidr = Column(String(64), nullable=True) gateway = Column(String(64), nullable=True) mtu = Column(Integer, nullable=True) network_type = Column(String(32), nullable=True) segmentation_id = Column(Integer, nullable=True) mac_address = Column(String(32), nullable=True) share_server_id = Column(String(36), ForeignKey('share_servers.id'), nullable=False) # NOTE(felipe_rodrigues): admin allocation does not have subnet. share_network_subnet_id = Column( String(36), ForeignKey('share_network_subnets.id'), nullable=True) class DriverPrivateData(BASE, ManilaBase): """Represents a private data as key-value pairs for a driver.""" __tablename__ = 'drivers_private_data' entity_uuid = Column(String(36), nullable=False, primary_key=True) key = Column(String(255), nullable=False, primary_key=True) value = Column(String(1023), nullable=False) class AvailabilityZone(BASE, ManilaBase): """Represents a private data as key-value pairs for a driver.""" __tablename__ = 'availability_zones' __table_args__ = ( schema.UniqueConstraint('name', 'deleted', name='az_name_uc'), ) id = Column(String(36), primary_key=True, nullable=False) deleted = Column(String(36), default='False') name = Column(String(255), nullable=False) class ShareGroupTypes(BASE, ManilaBase): """Represent possible share group types of shares offered.""" __tablename__ = "share_group_types" __table_args__ = ( schema.UniqueConstraint( "name", "deleted", name="uniq_share_group_type_name"), ) id = Column(String(36), primary_key=True) deleted = Column(String(36), default='False') name = Column(String(255)) is_public = Column(Boolean, default=True) class ShareGroup(BASE, ManilaBase): """Represents a share group.""" __tablename__ = 'share_groups' _extra_keys = [ 'availability_zone', ] id = Column(String(36), primary_key=True) user_id = Column(String(255), nullable=False) project_id = Column(String(255), nullable=False) deleted = Column(String(36), default='False') host = Column(String(255)) name = Column(String(255)) description = Column(String(255)) status = Column(String(255)) source_share_group_snapshot_id = Column(String(36)) share_network_id = Column( String(36), ForeignKey('share_networks.id'), nullable=True) share_server_id = Column( String(36), ForeignKey('share_servers.id'), nullable=True) share_group_type_id = Column( String(36), ForeignKey('share_group_types.id'), nullable=True) availability_zone_id = Column( String(36), ForeignKey('availability_zones.id'), nullable=True) consistent_snapshot_support = Column(Enum('pool', 'host'), default=None) share_group_type = orm.relationship( ShareGroupTypes, backref="share_groups", foreign_keys=share_group_type_id, primaryjoin="and_(" "ShareGroup.share_group_type_id ==" "ShareGroupTypes.id," "ShareGroup.deleted == 'False')") _availability_zone = orm.relationship( "AvailabilityZone", lazy='immediate', foreign_keys=availability_zone_id, primaryjoin=( "and_(" "ShareGroup.availability_zone_id == AvailabilityZone.id, " "AvailabilityZone.deleted == 'False')")) @property def availability_zone(self): if self._availability_zone: return self._availability_zone['name'] class ShareGroupTypeProjects(BASE, ManilaBase): """Represent projects associated share group types.""" __tablename__ = "share_group_type_projects" __table_args__ = (schema.UniqueConstraint( "share_group_type_id", "project_id", "deleted", name=("uniq_share_group_type_projects0share_group_type_id" "0project_id0deleted")), ) id = Column(Integer, primary_key=True) share_group_type_id = Column( String, ForeignKey('share_group_types.id'), nullable=False) project_id = Column(String(255)) share_group_type = orm.relationship( ShareGroupTypes, backref="projects", foreign_keys=share_group_type_id, primaryjoin='and_(' 'ShareGroupTypeProjects.share_group_type_id == ' 'ShareGroupTypes.id,' 'ShareGroupTypeProjects.deleted == 0)') class ShareGroupTypeSpecs(BASE, ManilaBase): """Represents additional specs for a share group type.""" __tablename__ = 'share_group_type_specs' id = Column(Integer, primary_key=True) key = Column("spec_key", String(255)) value = Column("spec_value", String(255)) share_group_type_id = Column( String(36), ForeignKey('share_group_types.id'), nullable=False) share_group_type = orm.relationship( ShareGroupTypes, backref="group_specs", foreign_keys=share_group_type_id, primaryjoin='and_(' 'ShareGroupTypeSpecs.share_group_type_id == ShareGroupTypes.id,' 'ShareGroupTypeSpecs.deleted == 0)' ) class ShareGroupSnapshot(BASE, ManilaBase): """Represents a share group snapshot.""" __tablename__ = 'share_group_snapshots' id = Column(String(36), primary_key=True) share_group_id = Column(String(36), ForeignKey('share_groups.id')) user_id = Column(String(255), nullable=False) project_id = Column(String(255), nullable=False) deleted = Column(String(36), default='False') name = Column(String(255)) description = Column(String(255)) status = Column(String(255)) share_group = orm.relationship( ShareGroup, backref=orm.backref("snapshots", lazy='joined'), foreign_keys=share_group_id, primaryjoin=('and_(' 'ShareGroupSnapshot.share_group_id == ShareGroup.id,' 'ShareGroupSnapshot.deleted == "False")') ) class ShareGroupTypeShareTypeMapping(BASE, ManilaBase): """Represents the share types supported by a share group type.""" __tablename__ = 'share_group_type_share_type_mappings' id = Column(String(36), primary_key=True) deleted = Column(String(36), default='False') share_group_type_id = Column( String(36), ForeignKey('share_group_types.id'), nullable=False) share_type_id = Column( String(36), ForeignKey('share_types.id'), nullable=False) share_group_type = orm.relationship( ShareGroupTypes, backref="share_types", foreign_keys=share_group_type_id, primaryjoin=('and_(' 'ShareGroupTypeShareTypeMapping.share_group_type_id ' '== ShareGroupTypes.id,' 'ShareGroupTypeShareTypeMapping.deleted == "False")') ) class ShareGroupShareTypeMapping(BASE, ManilaBase): """Represents the share types in a share group.""" __tablename__ = 'share_group_share_type_mappings' id = Column(String(36), primary_key=True) deleted = Column(String(36), default='False') share_group_id = Column( String(36), ForeignKey('share_groups.id'), nullable=False) share_type_id = Column( String(36), ForeignKey('share_types.id'), nullable=False) share_group = orm.relationship( ShareGroup, backref="share_types", foreign_keys=share_group_id, primaryjoin=('and_(' 'ShareGroupShareTypeMapping.share_group_id ' '== ShareGroup.id,' 'ShareGroupShareTypeMapping.deleted == "False")') ) class Message(BASE, ManilaBase): """Represents a user message. User messages show information about API operations to the API end-user. """ __tablename__ = 'messages' id = Column(String(36), primary_key=True, nullable=False) project_id = Column(String(255), nullable=False) # Info/Error/Warning. message_level = Column(String(255), nullable=False) request_id = Column(String(255), nullable=True) resource_type = Column(String(255)) # The uuid of the related resource. resource_id = Column(String(36), nullable=True) # Operation specific action ID, this ID is mapped # to a message in manila/message/message_field.py action_id = Column(String(10), nullable=False) # After this time the message may no longer exist. expires_at = Column(DateTime, nullable=True) # Message detail ID, this ID is mapped # to a message in manila/message/message_field.py detail_id = Column(String(10), nullable=True) deleted = Column(String(36), default='False') class ResourceLock(BASE, ManilaBase): """Represents a resource lock. Resource locks are held by users (or on behalf of users) and prevent actions to be performed on resources while the lock is present. """ __tablename__ = 'resource_locks' id = Column(String(36), primary_key=True, nullable=False) user_id = Column(String(255), nullable=False) project_id = Column(String(255), nullable=False) # If the lock is held on behalf of the user, but created by 'service' or # 'admin' users, as opposed to the user themselves ('project') lock_context = Column(String(10), nullable=False) # The uuid of the resource being locked. resource_id = Column(String(36), nullable=False) # The resource type, a constant dict will hold possible values resource_type = Column(Enum(*constants.RESOURCE_LOCK_RESOURCE_TYPES), default=constants.SHARE_RESOURCE_TYPE) # Action that lock prevents, a constant dict will hold possible values resource_action = Column(Enum(*constants.RESOURCE_LOCK_RESOURCE_ACTIONS), default=constants.RESOURCE_ACTION_DELETE) lock_reason = Column(String(1023), nullable=True) deleted = Column(String(36), default='False') class BackendInfo(BASE, ManilaBase): """Represent Backend Info.""" __tablename__ = "backend_info" host = Column(String(255), primary_key=True) info_hash = Column(String(255)) class AsynchronousOperationData(BASE, ManilaBase): """Represents data as key-value pairs for asynchronous operations.""" __tablename__ = 'async_operation_data' entity_uuid = Column(String(36), nullable=False, primary_key=True) key = Column(String(255), nullable=False, primary_key=True) value = Column(String(1023), nullable=False) class ShareBackup(BASE, ManilaBase): """Represents a backup of a share.""" __tablename__ = 'share_backups' id = Column(String(36), primary_key=True) @property def name(self): return CONF.share_backup_name_template % self.id @property def availability_zone(self): if self._availability_zone: return self._availability_zone['name'] deleted = Column(String(36), default='False') user_id = Column(String(255), nullable=False) project_id = Column(String(255), nullable=False) share_id = Column(String(36), ForeignKey('shares.id')) size = Column(Integer) host = Column(String(255)) topic = Column(String(255)) display_name = Column(String(255)) display_description = Column(String(255)) progress = Column(String(32)) restore_progress = Column(String(32)) status = Column(String(255)) fail_reason = Column(String(1023)) backup_type = Column(String(32)) availability_zone_id = Column(String(36), ForeignKey('availability_zones.id'), nullable=True) _availability_zone = orm.relationship( "AvailabilityZone", lazy='immediate', primaryjoin=( 'and_(' 'ShareBackup.availability_zone_id == ' 'AvailabilityZone.id, ' 'AvailabilityZone.deleted == \'False\')' ) ) def register_models(): """Register Models and create metadata. Called from manila.db.sqlalchemy.__init__ as part of loading the driver, it will never need to be called explicitly elsewhere unless the connection is lost and needs to be reestablished. """ from sqlalchemy import create_engine models = (Service, Share, ShareAccessMapping, ShareSnapshot ) engine = create_engine(CONF.database.connection, echo=False) for model in models: model.metadata.create_all(engine) def get_access_rules_status(instances): share_access_status = constants.STATUS_ACTIVE if len(instances) == 0: return share_access_status priorities = ShareInstance.ACCESS_STATUS_PRIORITIES for instance in instances: if instance['status'] != constants.STATUS_AVAILABLE: continue instance_access_status = instance['access_rules_status'] if priorities.get(instance_access_status) > priorities.get( share_access_status): share_access_status = instance_access_status if share_access_status == constants.SHARE_INSTANCE_RULES_ERROR: break return share_access_status def get_aggregated_access_rules_state(instance_mappings): state = None if len(instance_mappings) > 0: order = (constants.ACCESS_STATE_ERROR, constants.ACCESS_STATE_DENYING, constants.ACCESS_STATE_QUEUED_TO_DENY, constants.ACCESS_STATE_QUEUED_TO_UPDATE, constants.ACCESS_STATE_QUEUED_TO_APPLY, constants.ACCESS_STATE_UPDATING, constants.ACCESS_STATE_APPLYING, constants.ACCESS_STATE_ACTIVE) sorted_instance_mappings = sorted( instance_mappings, key=lambda x: order.index(x['state'])) state = sorted_instance_mappings[0].state return state ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/db/sqlalchemy/query.py0000664000175000017500000000267500000000000020577 0ustar00zuulzuul00000000000000# Copyright 2015 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_db.sqlalchemy import orm import sqlalchemy from manila.common import constants class Query(orm.Query): def soft_delete(self, synchronize_session='evaluate', update_status=False, status_field_name='status'): if update_status: setattr(self, status_field_name, constants.STATUS_DELETED) return super(Query, self).soft_delete(synchronize_session) def get_maker(engine, expire_on_commit=False): """Return a SQLAlchemy sessionmaker using the given engine.""" return sqlalchemy.orm.sessionmaker(bind=engine, class_=orm.Session, expire_on_commit=expire_on_commit, query_cls=Query) # NOTE(uglide): Monkey patch oslo_db get_maker() function to use custom Query orm.get_maker = get_maker ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/db/sqlalchemy/utils.py0000664000175000017500000000351000000000000020557 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Implementation of paginate query.""" from manila import exception import sqlalchemy def paginate_query(query, model, limit, sort_key='created_at', sort_dir='desc', offset=None): """Returns a query with sorting / pagination criteria added. :param query: the query object to which we should add paging/sorting :param model: the ORM model class :param limit: maximum number of items to return :param sort_key: attributes by which results should be sorted, default is created_at :param sort_dir: direction in which results should be sorted (asc, desc) :param offset: the number of items to skip from the marker or from the first element. :rtype: sqlalchemy.orm.query.Query :return: The query with sorting/pagination added. """ try: sort_key_attr = getattr(model, sort_key) except AttributeError: raise exception.InvalidInput(reason='Invalid sort key %s' % sort_key) if sort_dir == 'desc': query = query.order_by(sqlalchemy.desc(sort_key_attr)) else: query = query.order_by(sqlalchemy.asc(sort_key_attr)) if limit is not None: query = query.limit(limit) if offset: query = query.offset(offset) return query ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/exception.py0000664000175000017500000010233500000000000016673 0ustar00zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Manila base exception handling. Includes decorator for re-raising Manila-type exceptions. SHOULD include dedicated exception logging. """ import re from oslo_concurrency import processutils from oslo_config import cfg from oslo_log import log import webob.exc from manila.i18n import _ LOG = log.getLogger(__name__) exc_log_opts = [ cfg.BoolOpt('fatal_exception_format_errors', default=False, help='Whether to make exception message format errors fatal.'), ] CONF = cfg.CONF CONF.register_opts(exc_log_opts) ProcessExecutionError = processutils.ProcessExecutionError class ConvertedException(webob.exc.WSGIHTTPException): def __init__(self, code=400, title="", explanation=""): self.code = code self.title = title self.explanation = explanation super(ConvertedException, self).__init__() class Error(Exception): pass class ManilaException(Exception): """Base Manila Exception To correctly use this class, inherit from it and define a 'message' property. That message will get printf'd with the keyword arguments provided to the constructor. """ message = _("An unknown exception occurred.") code = 500 headers = {} safe = False def __init__(self, message=None, detail_data={}, **kwargs): self.kwargs = kwargs self.detail_data = detail_data if 'code' not in self.kwargs: try: self.kwargs['code'] = self.code except AttributeError: pass for k, v in self.kwargs.items(): if isinstance(v, Exception): self.kwargs[k] = str(v) if not message: try: message = self.message % kwargs except Exception: # kwargs doesn't match a variable in the message # log the issue and the kwargs LOG.exception('Exception in string format operation.') for name, value in kwargs.items(): LOG.error("%(name)s: %(value)s", { 'name': name, 'value': value}) if CONF.fatal_exception_format_errors: raise else: # at least get the core message out if something happened message = self.message elif isinstance(message, Exception): message = str(message) if re.match(r'.*[^\.]\.\.$', message): message = message[:-1] self.msg = message super(ManilaException, self).__init__(message) class NetworkException(ManilaException): message = _("Exception due to network failure.") class NetworkBindException(ManilaException): message = _("Exception due to failed port status in binding.") class NetworkBadConfigurationException(NetworkException): message = _("Bad network configuration: %(reason)s.") class BadConfigurationException(ManilaException): message = _("Bad configuration: %(reason)s.") class NotAuthorized(ManilaException): message = _("Not authorized.") code = 403 class AdminRequired(NotAuthorized): message = _("User does not have admin privileges.") class PolicyNotAuthorized(NotAuthorized): message = _("Policy doesn't allow %(action)s to be performed.") class Conflict(ManilaException): message = _("%(err)s") code = 409 class Invalid(ManilaException): message = _("Unacceptable parameters.") code = 400 class InvalidRequest(Invalid): message = _("The request is invalid.") class InvalidResults(Invalid): message = _("The results are invalid.") class InvalidInput(Invalid): message = _("Invalid input received: %(reason)s.") class InvalidContentType(Invalid): message = _("Invalid content type %(content_type)s.") class InvalidHost(Invalid): message = _("Invalid host: %(reason)s") # Cannot be templated as the error syntax varies. # msg needs to be constructed when raised. class InvalidParameterValue(Invalid): message = _("%(err)s") class InvalidUUID(Invalid): message = _("%(uuid)s is not a valid uuid.") class InvalidDriverMode(Invalid): message = _("Invalid driver mode: %(driver_mode)s.") class InvalidAPIVersionString(Invalid): message = _("API Version String %(version)s is of invalid format. Must " "be of format MajorNum.MinorNum.") class VersionNotFoundForAPIMethod(Invalid): message = _("API version %(version)s is not supported on this method.") class InvalidGlobalAPIVersion(Invalid): message = _("Version %(req_ver)s is not supported by the API. Minimum " "is %(min_ver)s and maximum is %(max_ver)s.") class InvalidCapacity(Invalid): message = _("Invalid capacity: %(name)s = %(value)s.") class ValidationError(Invalid): message = "%(detail)s" class NotFound(ManilaException): message = _("Resource could not be found.") code = 404 safe = True class MessageNotFound(NotFound): message = _("Message %(message_id)s could not be found.") class ResourceLockNotFound(NotFound): message = _("Resource lock %(lock_id)s could not be found.") class ResourceVisibilityLockExists(ManilaException): message = _("Resource %(resource_id)s is already locked.") class Found(ManilaException): message = _("Resource was found.") code = 302 safe = True class InUse(ManilaException): message = _("Resource is in use.") class AvailabilityZoneNotFound(NotFound): message = _("Availability zone %(id)s could not be found.") class ShareNetworkNotFound(NotFound): message = _("Share network %(share_network_id)s could not be found.") class ShareNetworkSubnetNotFound(NotFound): message = _("Share network subnet %(share_network_subnet_id)s could not be" " found.") class ShareNetworkSubnetNotFoundByShareServer(NotFound): message = _("Share network subnet could not be found by " "%(share_server_id)s.") class ShareServerNotFound(NotFound): message = _("Share server %(share_server_id)s could not be found.") class ShareServerNotFoundByFilters(ShareServerNotFound): message = _("Share server could not be found by " "filters: %(filters_description)s.") class AllocationsNotFoundForShareServer(NotFound): message = _("No allocations found for the share server " "%(share_server_id)s on the subnet.") class InvalidShareNetwork(Invalid): message = _("Invalid share network: %(reason)s") class ShareServerInUse(InUse): message = _("Share server %(share_server_id)s is in use.") class ShareServerMigrationError(ManilaException): message = _("Error in share server migration: %(reason)s") class ShareServerMigrationFailed(ManilaException): message = _("Share server migration failed: %(reason)s") class InvalidShareServer(Invalid): message = _("Invalid share server: %(reason)s") class ShareMigrationError(ManilaException): message = _("Error in share migration: %(reason)s") class ShareMigrationFailed(ManilaException): message = _("Share migration failed: %(reason)s") class ShareDataCopyFailed(ManilaException): message = _("Share Data copy failed: %(reason)s") class ShareDataCopyCancelled(ManilaException): message = _("Copy of contents from source to destination was cancelled.") class ServiceIPNotFound(ManilaException): message = _("Service IP for instance not found: %(reason)s") class AdminIPNotFound(ManilaException): message = _("Admin port IP for service instance not found: %(reason)s") class ShareServerNotCreated(ManilaException): message = _("Share server %(share_server_id)s failed on creation.") class ShareServerNotReady(ManilaException): message = _("Share server %(share_server_id)s failed to reach '%(state)s' " "within %(time)s seconds.") class ShareServerBackendDetailsNotFound(NotFound): message = _("Share server backend details does not exist.") class ServiceNotFound(NotFound): message = _("Service %(service_id)s could not be found.") class ServiceIsDown(Invalid): message = _("Service %(service)s is down.") class HostNotFound(NotFound): message = _("Host %(host)s could not be found.") class SchedulerHostFilterNotFound(NotFound): message = _("Scheduler host filter %(filter_name)s could not be found.") class SchedulerHostWeigherNotFound(NotFound): message = _("Scheduler host weigher %(weigher_name)s could not be found.") class HostBinaryNotFound(NotFound): message = _("Could not find binary %(binary)s on host %(host)s.") class TransferNotFound(NotFound): message = _("Transfer %(transfer_id)s could not be found.") class InvalidReservationExpiration(Invalid): message = _("Invalid reservation expiration %(expire)s.") class InvalidQuotaValue(Invalid): message = _("Change would make usage less than 0 for the following " "resources: %(unders)s.") class QuotaNotFound(NotFound): message = _("Quota could not be found.") class QuotaExists(ManilaException): message = _("Quota exists for project %(project_id)s, " "resource %(resource)s.") class QuotaResourceUnknown(QuotaNotFound): message = _("Unknown quota resources %(unknown)s.") class ProjectUserQuotaNotFound(QuotaNotFound): message = _("Quota for user %(user_id)s in project %(project_id)s " "could not be found.") class ProjectShareTypeQuotaNotFound(QuotaNotFound): message = _("Quota for share_type %(share_type)s in " "project %(project_id)s could not be found.") class ProjectQuotaNotFound(QuotaNotFound): message = _("Quota for project %(project_id)s could not be found.") class QuotaClassNotFound(QuotaNotFound): message = _("Quota class %(class_name)s could not be found.") class QuotaUsageNotFound(QuotaNotFound): message = _("Quota usage for project %(project_id)s could not be found.") class ReservationNotFound(QuotaNotFound): message = _("Quota reservation %(uuid)s could not be found.") class OverQuota(ManilaException): message = _("Quota exceeded for resources: %(overs)s.") class MigrationNotFound(NotFound): message = _("Migration %(migration_id)s could not be found.") class MigrationNotFoundByStatus(MigrationNotFound): message = _("Migration not found for instance %(instance_id)s " "with status %(status)s.") class FileNotFound(NotFound): message = _("File %(file_path)s could not be found.") class MigrationError(ManilaException): message = _("Migration error: %(reason)s.") class MalformedRequestBody(ManilaException): message = _("Malformed message body: %(reason)s.") class ConfigNotFound(NotFound): message = _("Could not find config at %(path)s.") class PasteAppNotFound(NotFound): message = _("Could not load paste app '%(name)s' from %(path)s.") class NoValidHost(ManilaException): message = _("No valid host was found. %(reason)s.") class WillNotSchedule(ManilaException): message = _("Host %(host)s is not up or doesn't exist.") class QuotaError(ManilaException): message = _("Quota exceeded: code=%(code)s.") code = 413 headers = {'Retry-After': '0'} safe = True class ShareSizeExceedsAvailableQuota(QuotaError): message = _( "Requested share exceeds allowed project/user or share type " "gigabytes quota.") class SnapshotSizeExceedsAvailableQuota(QuotaError): message = _( "Requested snapshot exceeds allowed project/user or share type " "gigabytes quota.") class ShareSizeExceedsLimit(QuotaError): message = _( "Requested share size %(size)d is larger than " "maximum allowed limit %(limit)d.") class ShareLimitExceeded(QuotaError): message = _( "Maximum number of shares allowed (%(allowed)d) either per " "project/user or share type quota is exceeded.") class SnapshotLimitExceeded(QuotaError): message = _( "Maximum number of snapshots allowed (%(allowed)d) either per " "project/user or share type quota is exceeded.") class ShareNetworksLimitExceeded(QuotaError): message = _("Maximum number of share-networks " "allowed (%(allowed)d) exceeded.") class ShareGroupsLimitExceeded(QuotaError): message = _( "Maximum number of allowed share-groups is exceeded.") class ShareGroupSnapshotsLimitExceeded(QuotaError): message = _( "Maximum number of allowed share-group-snapshots is exceeded.") class ShareReplicasLimitExceeded(QuotaError): message = _( "Maximum number of allowed share-replicas is exceeded.") class ShareReplicaSizeExceedsAvailableQuota(QuotaError): message = _( "Requested share replica exceeds allowed project/user or share type " "gigabytes quota.") class EncryptionKeysLimitExceeded(QuotaError): message = _( "Maximum number of allowed encryption keys is exceeded.") class GlusterfsException(ManilaException): message = _("Unknown Gluster exception.") class InvalidShare(Invalid): message = _("Invalid share: %(reason)s.") class InvalidAuthKey(Invalid): message = _("Invalid auth key: %(reason)s") class ShareBusyException(Invalid): message = _("Share is busy with an active task: %(reason)s.") class InvalidShareInstance(Invalid): message = _("Invalid share instance: %(reason)s.") class ManageInvalidShare(InvalidShare): message = _("Manage existing share failed due to " "invalid share: %(reason)s") class ManageShareServerError(ManilaException): message = _("Manage existing share server failed due to: %(reason)s") class UnmanageInvalidShare(InvalidShare): message = _("Unmanage existing share failed due to " "invalid share: %(reason)s") class PortLimitExceeded(QuotaError): message = _("Maximum number of ports exceeded.") class IpAddressGenerationFailureClient(ManilaException): message = _("No free IP addresses available in neutron subnet.") class ShareAccessExists(ManilaException): message = _("Share access %(access_type)s:%(access)s exists.") class ShareAccessMetadataNotFound(NotFound): message = _("Share access rule metadata does not exist.") class ShareSnapshotAccessExists(InvalidInput): message = _("Share snapshot access %(access_type)s:%(access)s exists.") class InvalidSnapshot(Invalid): message = _("Invalid snapshot: %(reason)s") class InvalidSnapshotAccess(Invalid): message = _("Invalid access rule: %(reason)s") class InvalidShareAccess(Invalid): message = _("Invalid access rule: %(reason)s") class InvalidShareAccessLevel(Invalid): message = _("Invalid or unsupported share access level: %(level)s.") class InvalidShareAccessType(Invalid): message = _("Invalid or unsupported share access type: %(type)s.") class DriverCannotTransferShareWithRules(ManilaException): message = _("Driver failed to transfer share with rules.") class ShareBackendException(ManilaException): message = _("Share backend error: %(msg)s.") class OperationNotSupportedByDriverMode(ManilaException): message = _("The share driver mode does not support this operation.") class RequirementMissing(ManilaException): message = _("Requirement %(req)s is not installed.") class ExportLocationNotFound(NotFound): message = _("Export location %(uuid)s could not be found.") class ShareNotFound(NotFound): message = _("Share %(share_id)s could not be found.") class ShareInstanceNotFound(NotFound): message = _("Share instance %(share_instance_id)s could not be found.") class ShareSnapshotNotFound(NotFound): message = _("Snapshot %(snapshot_id)s could not be found.") class ShareSnapshotInstanceNotFound(NotFound): message = _("Snapshot instance %(instance_id)s could not be found.") class ShareSnapshotNotSupported(ManilaException): message = _("Share %(share_name)s does not support snapshots.") class ShareGroupSnapshotNotSupported(ManilaException): message = _("Share group %(share_group)s does not support snapshots.") class ShareSnapshotIsBusy(ManilaException): message = _("Deleting snapshot %(snapshot_name)s that has " "dependent shares.") class InvalidShareSnapshot(Invalid): message = _("Invalid share snapshot: %(reason)s.") class InvalidShareSnapshotInstance(Invalid): message = _("Invalid share snapshot instance: %(reason)s.") class ManageInvalidShareSnapshot(InvalidShareSnapshot): message = _("Manage existing share snapshot failed due to " "invalid share snapshot: %(reason)s.") class UnmanageInvalidShareSnapshot(InvalidShareSnapshot): message = _("Unmanage existing share snapshot failed due to " "invalid share snapshot: %(reason)s.") class MetadataItemNotFound(NotFound): message = _("Metadata item is not found.") class InvalidMetadata(Invalid): message = _("Invalid metadata.") class InvalidMetadataSize(Invalid): message = _("Invalid metadata size.") class SecurityServiceNotFound(NotFound): message = _("Security service %(security_service_id)s could not be found.") class InvalidSecurityService(Invalid): message = _("Invalid security service: %(reason)s") class ShareNetworkSecurityServiceAssociationError(ManilaException): message = _("Failed to associate share network %(share_network_id)s" " and security service %(security_service_id)s: %(reason)s.") class ShareNetworkSecurityServiceDissociationError(ManilaException): message = _("Failed to dissociate share network %(share_network_id)s" " and security service %(security_service_id)s: %(reason)s.") class SecurityServiceFailedAuth(ManilaException): message = _("Failed to authenticate user against security service.") class InvalidVolume(Invalid): message = _("Invalid volume.") class InvalidShareType(Invalid): message = _("Invalid share type: %(reason)s.") class InvalidShareGroupType(Invalid): message = _("Invalid share group type: %(reason)s.") class InvalidExtraSpec(Invalid): message = _("Invalid extra_spec: %(reason)s.") class VolumeNotFound(NotFound): message = _("Volume %(volume_id)s could not be found.") class VolumeSnapshotNotFound(NotFound): message = _("Snapshot %(snapshot_id)s could not be found.") class ShareTypeNotFound(NotFound): message = _("Share type %(share_type_id)s could not be found.") class ShareGroupTypeNotFound(NotFound): message = _("Share group type %(type_id)s could not be found.") class ShareTypeAccessNotFound(NotFound): message = _("Share type access not found for %(share_type_id)s / " "%(project_id)s combination.") class ShareGroupTypeAccessNotFound(NotFound): message = _("Share group type access not found for %(type_id)s / " "%(project_id)s combination.") class ShareTypeNotFoundByName(ShareTypeNotFound): message = _("Share type with name %(share_type_name)s " "could not be found.") class ShareGroupTypeNotFoundByName(ShareTypeNotFound): message = _("Share group type with name %(type_name)s " "could not be found.") class ShareTypeExtraSpecsNotFound(NotFound): message = _("Share Type %(share_type_id)s has no extra specs with " "key %(extra_specs_key)s.") class ShareGroupTypeSpecsNotFound(NotFound): message = _("Share group type %(type_id)s has no group specs with " "key %(specs_key)s.") class ShareTypeInUse(ManilaException): message = _("Share Type %(share_type_id)s deletion is not allowed while " "shares or share group types are associated with the type.") class IPAddressInUse(InUse): message = _("IP address %(ip)s is already used.") class ShareGroupTypeInUse(ManilaException): message = _("Share group Type %(type_id)s deletion is not allowed " "with groups present with the type.") class ShareTypeExists(ManilaException): message = _("Share Type %(id)s already exists.") class ShareTypeDoesNotExist(NotFound): message = _("Share Type %(share_type)s does not exist.") class DefaultShareTypeNotConfigured(NotFound): message = _("No default share type is configured. Either configure a " "default share type or explicitly specify a share type.") class ShareGroupTypeExists(ManilaException): message = _("Share group type %(type_id)s already exists.") class ShareTypeAccessExists(ManilaException): message = _("Share type access for %(share_type_id)s / " "%(project_id)s combination already exists.") class ShareGroupTypeAccessExists(ManilaException): message = _("Share group type access for %(type_id)s / " "%(project_id)s combination already exists.") class ShareTypeCreateFailed(ManilaException): message = _("Cannot create share_type with " "name %(name)s and specs %(extra_specs)s.") class ShareTypeUpdateFailed(ManilaException): message = _("Cannot update share_type %(id)s.") class ManilaBarbicanACLError(ManilaException): message = _("Failed while communicating to Barbican. " "Please check the provided credentials .") class ManilaBarbicanAppCredsError(ManilaException): message = _("Error occured while dealing with barbican for App Creds.") class ShareGroupTypeCreateFailed(ManilaException): message = _("Cannot create share group type with " "name %(name)s and specs %(group_specs)s.") class ManageExistingShareTypeMismatch(ManilaException): message = _("Manage existing share failed due to share type mismatch: " "%(reason)s") class ShareExtendingError(ManilaException): message = _("Share %(share_id)s could not be extended due to error " "in the driver: %(reason)s") class ShareShrinkingError(ManilaException): message = _("Share %(share_id)s could not be shrunk due to error " "in the driver: %(reason)s") class ShareShrinkingPossibleDataLoss(ManilaException): message = _("Share %(share_id)s could not be shrunk due to " "possible data loss") class InstanceNotFound(NotFound): message = _("Instance %(instance_id)s could not be found.") class BridgeDoesNotExist(ManilaException): message = _("Bridge %(bridge)s does not exist.") class ServiceInstanceException(ManilaException): message = _("Exception in service instance manager occurred.") class ServiceInstanceUnavailable(ServiceInstanceException): message = _("Service instance is not available.") class StorageResourceException(ManilaException): message = _("Storage resource exception.") class StorageResourceNotFound(StorageResourceException): message = _("Storage resource %(name)s not found.") code = 404 class SnapshotResourceNotFound(StorageResourceNotFound): message = _("Snapshot %(name)s not found.") class SnapshotUnavailable(StorageResourceException): message = _("Snapshot %(name)s info not available.") class NetAppException(ManilaException): message = _("Exception due to NetApp failure.") class NetAppBusyAggregateForFlexGroupException(ManilaException): message = _("Exception due to an aggregate being busy while trying to " "provision the FlexGroup.") class VserverNotFound(NetAppException): message = _("Vserver %(vserver)s not found.") class VserverNotSpecified(NetAppException): message = _("Vserver not specified.") class VserverNotReady(NetAppException): message = _("Vserver %(vserver)s is not ready yet.") class EMCPowerMaxXMLAPIError(Invalid): message = _("%(err)s") class EMCPowerMaxLockRequiredException(ManilaException): message = _("Unable to acquire lock(s).") class EMCPowerMaxInvalidMoverID(ManilaException): message = _("Invalid mover or vdm %(id)s.") class EMCVnxXMLAPIError(Invalid): message = _("%(err)s") class EMCVnxLockRequiredException(ManilaException): message = _("Unable to acquire lock(s).") class EMCVnxInvalidMoverID(ManilaException): message = _("Invalid mover or vdm %(id)s.") class EMCUnityError(ShareBackendException): message = _("%(err)s") class HPE3ParInvalidClient(Invalid): message = _("%(err)s") class HPE3ParInvalid(Invalid): message = _("%(err)s") class HPE3ParUnexpectedError(ManilaException): message = _("%(err)s") class GPFSException(ManilaException): message = _("GPFS exception occurred.") class GPFSGaneshaException(ManilaException): message = _("GPFS Ganesha exception occurred.") class GaneshaCommandFailure(ProcessExecutionError): _description = _("Ganesha management command failed.") def __init__(self, **kw): if 'description' not in kw: kw['description'] = self._description super(GaneshaCommandFailure, self).__init__(**kw) class InvalidSqliteDB(Invalid): message = _("Invalid Sqlite database.") class SSHException(ManilaException): message = _("Exception in SSH protocol negotiation or logic.") class HDFSException(ManilaException): message = _("HDFS exception occurred!") class MapRFSException(ManilaException): message = _("MapRFS exception occurred: %(msg)s") class ZFSonLinuxException(ManilaException): message = _("ZFSonLinux exception occurred: %(msg)s") class QBException(ManilaException): message = _("Quobyte exception occurred: %(msg)s") class QBRpcException(ManilaException): """Quobyte backend specific exception.""" message = _("Quobyte JsonRpc call to backend raised " "an exception: %(result)s, Quobyte error" " code %(qbcode)s") class SSHInjectionThreat(ManilaException): message = _("SSH command injection detected: %(command)s") class HNASBackendException(ManilaException): message = _("HNAS Backend Exception: %(msg)s") class HNASConnException(ManilaException): message = _("HNAS Connection Exception: %(msg)s") class HNASSSCIsBusy(ManilaException): message = _("HNAS SSC is busy and cannot execute the command: %(msg)s") class HNASSSCContextChange(ManilaException): message = _("HNAS SSC Context has been changed unexpectedly: %(msg)s") class HNASDirectoryNotEmpty(ManilaException): message = _("HNAS Directory is not empty: %(msg)s") class HNASItemNotFoundException(StorageResourceNotFound): message = _("HNAS Item Not Found Exception: %(msg)s") class HNASNothingToCloneException(ManilaException): message = _("HNAS Nothing To Clone Exception: %(msg)s") # ShareGroup class ShareGroupNotFound(NotFound): message = _("Share group %(share_group_id)s could not be found.") class ShareGroupSnapshotNotFound(NotFound): message = _( "Share group snapshot %(share_group_snapshot_id)s could not be found.") class ShareGroupSnapshotMemberNotFound(NotFound): message = _("Share group snapshot member %(member_id)s could not be " "found.") class InvalidShareGroup(Invalid): message = _("Invalid share group: %(reason)s") class InvalidShareGroupSnapshot(Invalid): message = _("Invalid share group snapshot: %(reason)s") class DriverNotInitialized(ManilaException): message = _("Share driver '%(driver)s' not initialized.") class ShareResourceNotFound(StorageResourceNotFound): message = _("Share id %(share_id)s could not be found " "in storage backend.") class ShareUmountException(ManilaException): message = _("Failed to unmount share: %(reason)s") class ShareMountException(ManilaException): message = _("Failed to mount share: %(reason)s") class ShareCopyDataException(ManilaException): message = _("Failed to copy data: %(reason)s") # Replication class ReplicationException(ManilaException): message = _("Unable to perform a replication action: %(reason)s.") class ShareReplicaNotFound(NotFound): message = _("Share Replica %(replica_id)s could not be found.") # Tegile Storage drivers class TegileAPIException(ShareBackendException): message = _("Unexpected response from Tegile IntelliFlash API: " "%(response)s") class StorageCommunicationException(ShareBackendException): message = _("Could not communicate with storage array.") class EvaluatorParseException(ManilaException): message = _("Error during evaluator parsing: %(reason)s") # Hitachi Scaleout Platform driver class HSPBackendException(ShareBackendException): message = _("HSP Backend Exception: %(msg)s") class HSPTimeoutException(ShareBackendException): message = _("HSP Timeout Exception: %(msg)s") class HSPItemNotFoundException(ShareBackendException): message = _("HSP Item Not Found Exception: %(msg)s") class NexentaException(ShareBackendException): message = _("Exception due to Nexenta failure. %(reason)s") # Tooz locking class LockCreationFailed(ManilaException): message = _('Unable to create lock. Coordination backend not started.') class LockingFailed(ManilaException): message = _('Lock acquisition failed.') # Ganesha library class GaneshaException(ManilaException): message = _("Unknown NFS-Ganesha library exception.") # Infortrend Storage driver class InfortrendCLIException(ShareBackendException): message = _("Infortrend CLI exception: %(err)s " "Return Code: %(rc)s, Output: %(out)s") class InfortrendNASException(ShareBackendException): message = _("Infortrend NAS exception: %(err)s") # Zadara storage driver class ZadaraUnknownCmd(ShareBackendException): message = _("Unknown or unsupported command %(cmd)s") class ZadaraSessionRequestException(ShareBackendException): message = _("%(msg)s") class ZadaraBadHTTPResponseStatus(ShareBackendException): message = _("Bad HTTP response status %(status)s") class ZadaraFailedCmdWithDump(ShareBackendException): message = _("Operation failed with status=%(status)s. Full dump: %(data)s") class ZadaraVPSANoActiveController(ShareBackendException): message = _("Unable to find any active VPSA controller") class ZadaraServerCreateFailure(ShareBackendException): message = _("Unable to create server object for initiator %(name)s") class ZadaraAttachmentsNotFound(ShareBackendException): message = _("Failed to retrieve attachments for volume %(name)s") class ZadaraManilaInvalidAccessKey(ShareBackendException): message = _("Invalid VPSA access key") class ZadaraVPSAVolumeShareFailed(ShareBackendException): message = _("Failed to create VPSA backend share. Error: %(error)s") class ZadaraInvalidShareAccessType(ShareBackendException): message = _("Only ip access type allowed for the Zadara manila share.") class ZadaraShareNotFound(ShareBackendException): message = _("Share %(name)s could not be found.") class ZadaraExtendShareFailed(ShareBackendException): message = _("Failed to extend VPSA backend share. Error: %(error)s") class ZadaraInvalidProtocol(ShareBackendException): message = _("The type of protocol %(protocol_type)s for Zadara " "manila driver is not supported. Only NFS or CIFS " "protocol is supported.") class ZadaraShareNotValid(ShareBackendException): message = _("Share %(name)s is not valid.") class ZadaraVPSASnapshotCreateFailed(ShareBackendException): message = _("Failed to create VPSA share %(name)s snapshot. " "Error: %(error)s") class ZadaraVPSASnapshotManageFailed(ShareBackendException): message = _("Failed to manage VPSA share snapshot with id %(snap_id)s. " "Error: %(error)s") class ZadaraServerNotFound(NotFound): message = _("Unable to find server object for initiator %(name)s") # Macrosan Storage driver class MacrosanBackendExeption(ShareBackendException): message = _("Macrosan backend exception: %(reason)s") # Backup class BackupException(ManilaException): message = _("Unable to perform a backup action: %(reason)s.") class InvalidBackup(Invalid): message = _("Invalid backup: %(reason)s.") class BackupLimitExceeded(QuotaError): message = _("Maximum number of backups allowed (%(allowed)d) exceeded.") class ShareBackupNotFound(NotFound): message = _("Backup %(backup_id)s could not be found.") class ShareBackupSizeExceedsAvailableQuota(QuotaError): message = _("Requested backup exceeds allowed Backup gigabytes " "quota. Requested %(requested)sG, quota is %(quota)sG and " "%(consumed)sG has been consumed.") class NetappActiveIQWeigherRequiredParameter(ManilaException): message = _("%(config)s configuration of the NetAppActiveIQ weigher " "must be set.") # Vastdata Storage driver class VastApiException(ManilaException): message = _("Rest api error: %(reason)s.") class VastApiRetry(ManilaException): message = _("Rest api retry: %(reason)s.") class VastShareNotFound(ShareBackendException): message = _("Share %(name)s could not be found.") class VastDriverException(ShareBackendException): message = _("Vast driver error: %(reason)s.") ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/i18n.py0000664000175000017500000000202100000000000015443 0ustar00zuulzuul00000000000000# Copyright 2014 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """oslo.i18n integration module. See https://docs.openstack.org/oslo.i18n/latest/user/usage.html . """ import oslo_i18n DOMAIN = 'manila' _translators = oslo_i18n.TranslatorFactory(domain=DOMAIN) # The primary translation function using the well-known name "_" _ = _translators.primary def translate(value, user_locale): return oslo_i18n.translate(value, user_locale) def get_available_languages(): return oslo_i18n.get_available_languages(DOMAIN) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315601.8736722 manila-21.0.0/manila/image/0000775000175000017500000000000000000000000015401 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/image/__init__.py0000664000175000017500000000212600000000000017513 0ustar00zuulzuul00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import oslo_config.cfg import oslo_utils.importutils _glance_opts = [ oslo_config.cfg.StrOpt('image_api_class', default='manila.image.glance.API', help='The full class name of the ' 'Glance API class to use.'), ] oslo_config.cfg.CONF.register_opts(_glance_opts) def API(): importutils = oslo_utils.importutils glance_api_class = oslo_config.cfg.CONF.image_api_class cls = importutils.import_class(glance_api_class) return cls() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/image/glance.py0000664000175000017500000000460400000000000017210 0ustar00zuulzuul00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Handles all requests to Glance. """ from glanceclient import client as glance_client from keystoneauth1 import loading as ks_loading from oslo_config import cfg from manila.common import client_auth from manila.common.config import core_opts from manila.db import base GLANCE_GROUP = 'glance' AUTH_OBJ = None glance_opts = [ cfg.StrOpt('api_microversion', default='2', help='Version of Glance API to be used.'), cfg.StrOpt('region_name', default='RegionOne', help='Region name for connecting to glance.'), cfg.StrOpt('endpoint_type', default='publicURL', choices=['publicURL', 'internalURL', 'adminURL', 'public', 'internal', 'admin'], help='Endpoint type to be used with glance client calls.'), ] CONF = cfg.CONF CONF.register_opts(core_opts) CONF.register_opts(glance_opts, GLANCE_GROUP) ks_loading.register_session_conf_options(CONF, GLANCE_GROUP) ks_loading.register_auth_conf_options(CONF, GLANCE_GROUP) def list_opts(): return client_auth.AuthClientLoader.list_opts(GLANCE_GROUP) def glanceclient(context): global AUTH_OBJ if not AUTH_OBJ: AUTH_OBJ = client_auth.AuthClientLoader( client_class=glance_client.Client, cfg_group=GLANCE_GROUP) return AUTH_OBJ.get_client(context, version=CONF[GLANCE_GROUP].api_microversion, interface=CONF[GLANCE_GROUP].endpoint_type, region_name=CONF[GLANCE_GROUP].region_name) class API(base.Base): """API for interacting with glanceclient.""" def image_list(self, context): client = glanceclient(context) if hasattr(client, 'images'): return client.images.list() return client.glance.list() ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315601.8736722 manila-21.0.0/manila/keymgr/0000775000175000017500000000000000000000000015615 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/keymgr/__init__.py0000664000175000017500000000000000000000000017714 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/keymgr/barbican.py0000664000175000017500000002252000000000000017731 0ustar00zuulzuul00000000000000# Copyright 2025 Cloudification GmbH. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import itertools from castellan.key_manager import barbican_key_manager from castellan import options as castellan_options from keystoneauth1 import loading as ks_loading from keystoneauth1 import session as ks_session from keystoneclient.v3 import client as ks_client from oslo_config import cfg from oslo_log import log as logging from oslo_utils import uuidutils from manila.common import client_auth from manila import exception BARBICAN_GROUP = 'barbican' CONF = cfg.CONF LOG = logging.getLogger(__name__) castellan_options.set_defaults(CONF) ks_loading.register_auth_conf_options(CONF, BARBICAN_GROUP) BARBICAN_OPTS = [ cfg.StrOpt('endpoint_type', default='publicURL', choices=['publicURL', 'internalURL', 'adminURL', 'public', 'internal', 'admin'], help='Endpoint type to be used with keystone client calls.'), cfg.StrOpt('region_name', help='Region name for connecting to keystone for ' 'application credential management.'), ] CONF.register_opts(BARBICAN_OPTS, BARBICAN_GROUP) def list_opts(): # NOTE(tkajinam): This likely breaks when castellan fixes missing auth # plugin options return itertools.chain( [(BARBICAN_GROUP, BARBICAN_OPTS)], client_auth.AuthClientLoader.list_opts(BARBICAN_GROUP) ) def _require_barbican_key_manager_backend(conf): backend = conf.key_manager.backend if backend is None: LOG.warning("The BarbicanKeyManager backend should be explicitly " "used for share encryption.") raise exception.ManilaBarbicanACLError() backend = backend.split('.')[-1] if backend not in ('barbican', 'BarbicanKeyManager'): LOG.warning("The '%s' key_manager backend is not supported. Please" " use barbican as key_manager.", backend) raise exception.ManilaBarbicanACLError() class BarbicanSecretACL(barbican_key_manager.BarbicanKeyManager): def get_client_and_href(self, context, secret_ref): """Get user barbican client and a secret href""" _require_barbican_key_manager_backend(self.conf) if not secret_ref: LOG.error("Missing secret_ref provided in current user context.") raise exception.ManilaBarbicanACLError() # Establish a Barbican client session of current user and keystone # session of barbican user to get its user_id. Grant ACL to barbican # user that it will be used for the key_ref handover process. try: user_barbican_client, base_url = self._get_barbican_client(context) secret_ref = self._create_secret_ref(base_url, secret_ref) except Exception as e: LOG.error("Failed to create barbican client. Error: %s", e) raise exception.ManilaBarbicanACLError() return user_barbican_client, secret_ref def _get_barbican_user_id(self): barbican_auth = ks_loading.load_auth_from_conf_options( self.conf, BARBICAN_GROUP) barbican_sess = ks_session.Session(auth=barbican_auth) barbican_ks_client = ks_client.Client( session=barbican_sess, interface=self.conf.barbican.endpoint_type, region_name=self.conf.barbican.region_name) return barbican_ks_client.session.get_user_id() def create_secret_access(self, context, secret_ref): try: user_barbican_client, secret_href = self.get_client_and_href( context, secret_ref) barbican_user_id = self._get_barbican_user_id() # Create a Barbican ACL so the barbican user can access it. acl = user_barbican_client.acls.create(entity_ref=secret_href, users=[barbican_user_id], project_access=False) acl.submit() except Exception as e: LOG.error("Failed to create secret ACL. Error: %s", e) raise exception.ManilaBarbicanACLError() def delete_secret_access(self, context, secret_ref): try: user_barbican_client, secret_href = self.get_client_and_href( context, secret_ref) barbican_user_id = self._get_barbican_user_id() # Remove a Barbican ACL for the barbican user. acl_entity = user_barbican_client.acls.get(entity_ref=secret_href) existing_users = acl_entity.read.users remove_users = [barbican_user_id] updated_users = set(existing_users).difference(remove_users) acl_entity.read.users = list(updated_users) acl_entity.submit() except Exception as e: LOG.error("Failed to delete secret ACL. Error: %s", e) def get_secret_href(self, context, secret_ref): try: user_barbican_client, secret_href = self.get_client_and_href( context, secret_ref) return secret_href except Exception as e: LOG.error("Failed to get barbican secret href. Error: %s", e) raise exception.ManilaBarbicanACLError() class BarbicanUserAppCreds(object): def __init__(self, conf): self.conf = conf @property def client(self): return self.get_client() def get_client(self): _require_barbican_key_manager_backend(self.conf) auth = ks_loading.load_auth_from_conf_options(self.conf, BARBICAN_GROUP) sess = ks_session.Session(auth=auth) return ks_client.Client( session=sess, interface=self.conf.barbican.endpoint_type, region_name=self.conf.barbican.region_name) def get_application_credentials(self, context, application_credential_id): if not application_credential_id: LOG.warning("Missing application credentials ID") raise exception.ManilaBarbicanAppCredsError() try: return self.client.application_credentials.get( application_credential=application_credential_id) except Exception as e: LOG.error("Aborting App Creds request due to error: %s", e) raise exception.ManilaBarbicanAppCredsError() def create_application_credentials(self, context, secret): try: secrets_path = "/key-manager/v1/secrets" return self.client.application_credentials.create( name='manila_barbican_' + uuidutils.generate_uuid(), user=self.client.session.get_user_id(), roles=[{'name': 'service'}], secret=str(secret), access_rules=[ { "path": secrets_path + "/%s" % secret, "method": "GET", "service": "key-manager", }, { "path": secrets_path + "/%s/payload" % secret, "method": "GET", "service": "key-manager", } ] ) except Exception as e: LOG.error("Aborting App Creds create due to error: %s", e) raise exception.ManilaBarbicanAppCredsError() def delete_application_credentials(self, context, application_credential_id): if not application_credential_id: LOG.warning("Missing application credentials ID") raise exception.ManilaBarbicanAppCredsError() try: return self.client.application_credentials.delete( application_credential=application_credential_id) except Exception as e: LOG.error("Aborting App Creds request due to error: %s", e) raise exception.ManilaBarbicanAppCredsError() def create_secret_access(context, secret_ref, conf=CONF): BarbicanSecretACL(conf).create_secret_access(context, secret_ref) def delete_secret_access(context, secret_ref, conf=CONF): BarbicanSecretACL(conf).delete_secret_access(context, secret_ref) def get_secret_href(context, secret_ref, conf=CONF): return BarbicanSecretACL(conf).get_secret_href(context, secret_ref) def create_application_credentials(context, secret, conf=CONF): return BarbicanUserAppCreds(conf).create_application_credentials( context, secret) def get_application_credentials(context, application_credential_id, conf=CONF): return BarbicanUserAppCreds(conf).get_application_credentials( context, application_credential_id) def delete_application_credentials(context, application_credential_id, conf=CONF): BarbicanUserAppCreds(conf).delete_application_credentials( context, application_credential_id) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315601.8736722 manila-21.0.0/manila/lock/0000775000175000017500000000000000000000000015247 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/lock/__init__.py0000664000175000017500000000000000000000000017346 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/lock/api.py0000664000175000017500000002355200000000000016401 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Handles all requests related to resource locks. """ from oslo_log import log as logging from manila.common import constants from manila.db import base from manila import exception from manila import policy LOG = logging.getLogger(__name__) class API(base.Base): """API for handling resource locks.""" resource_get = { "share": "share_get", "access_rule": "share_access_get_with_context" } resource_lock_disallowed_statuses = { "share": constants.DISALLOWED_STATUS_WHEN_LOCKING_SHARES, "access_rule": constants.DISALLOWED_STATUS_WHEN_LOCKING_ACCESS_RULES } def _get_lock_context(self, context): if context.is_service: lock_context = 'service' elif context.is_admin: lock_context = 'admin' else: lock_context = 'user' return { 'lock_context': lock_context, 'user_id': context.user_id, 'project_id': context.project_id, } def _check_allow_lock_manipulation(self, context, resource_lock): """Lock owners may not manipulate a lock if lock_context disallows The logic enforced by this method is that user created locks can be manipulated by all roles, service created locks can be manipulated by service and admin roles, while admin created locks can only be manipulated by admin role: +------------+------------+--------------+---------+ | Requester | Lock Owner | Lock Context | Allowed | +------------+------------+--------------+---------+ | user | user | user | yes | | user | user | service | no | | user | admin | admin | no | | admin | user | user | yes | | admin | user | service | yes | | admin | admin | admin | yes | | service | user | user | yes | | service | user | service | yes | | service | admin | admin | no | +------------+------------+--------------+---------+ """ locked_by = resource_lock['lock_context'] update_requested_by = self._get_lock_context(context)['lock_context'] if ((locked_by == 'admin' and update_requested_by != 'admin') or (locked_by == 'service' and update_requested_by == 'user')): raise exception.NotAuthorized("Resource lock cannot be " "manipulated by user. Please " "contact the administrator.") def access_is_restricted(self, context, resource_lock): """Ensure the requester doesn't have visibility restrictions Call the check allow lock manipulation method as a first validation. In case it fails, the requester should not have the access rules fields entirely visible. In case it passes and the access visibility is restricted, the users will have visibility of all fields only if they have originally created the lock. """ try: self._check_allow_lock_manipulation(context, resource_lock) except exception.NotAuthorized: return True try: policy.check_policy( context, 'resource_lock', 'bypass_locked_show_action', resource_lock) except exception.NotAuthorized: return True return False def get(self, context, lock_id): """Return resource lock with the specified id.""" return self.db.resource_lock_get(context, lock_id) def get_all(self, context, search_opts=None, limit=None, offset=None, sort_key="created_at", sort_dir="desc", show_count=False): """Return resource locks for the given context.""" LOG.debug("Searching for locks by: %s", search_opts) search_opts = search_opts or {} if 'all_projects' in search_opts: allow_all_projects = policy.check_policy( context, 'resource_lock', 'get_all_projects', do_raise=False ) if not allow_all_projects: LOG.warning("User %s not allowed to query locks " "across all projects.", context.user_id) search_opts.pop('all_projects') search_opts.pop('project_id', None) locks, count = self.db.resource_lock_get_all( context, filters=search_opts, limit=limit, offset=offset, sort_key=sort_key, sort_dir=sort_dir, show_count=show_count, ) return locks, count def create(self, context, resource_id=None, resource_type=None, resource_action=None, lock_reason=None, resource=None): """Create a resource lock with the specified information.""" get_res_method = getattr(self.db, self.resource_get[resource_type]) if resource_action == constants.RESOURCE_ACTION_SHOW: # We can't allow visibility locks to be placed more than once, # otherwise the resource might become visible to someone else. visibility_locks, __ = self.db.resource_lock_get_all( context.elevated(), filters={'resource_id': resource_id, 'resource_action': resource_action, 'all_projects': True}) if visibility_locks: raise exception.ResourceVisibilityLockExists( resource_id=resource_id) if resource is None: resource = get_res_method(context, resource_id) policy.check_policy(context, 'resource_lock', 'create', resource) self._check_resource_state_for_locking( resource_action, resource, resource_type=resource_type) lock_context_data = self._get_lock_context(context) resource_lock = lock_context_data.copy() resource_lock.update({ 'resource_id': resource_id, 'resource_action': resource_action, 'lock_reason': lock_reason, 'resource_type': resource_type }) return self.db.resource_lock_create(context, resource_lock) def _check_resource_state_for_locking(self, resource_action, resource, resource_type='share'): """Check if resource is in a "disallowed" state for locking. For example, deletion lock on a "deleting" resource would be futile. """ resource_state = resource.get('status', resource.get('state', '')) disallowed_statuses = () if resource_action == 'delete': disallowed_statuses = ( self.resource_lock_disallowed_statuses[resource_type]) if resource_state in disallowed_statuses: msg = "Resource status not suitable for locking" raise exception.InvalidInput(reason=msg) if resource_type == constants.SHARE_RESOURCE_TYPE: resource_is_soft_deleted = resource.get('is_soft_deleted', False) if resource_is_soft_deleted: msg = ( "Resource cannot be locked since it has been soft deleted." ) raise exception.InvalidInput(reason=msg) def update(self, context, resource_lock, updates): """Update a resource lock with the specified information.""" lock_id = resource_lock['id'] policy.check_policy(context, 'resource_lock', 'update', resource_lock) self._check_allow_lock_manipulation(context, resource_lock) if 'resource_action' in updates: # A resource can have only one visibility lock if (updates['resource_action'] == constants.RESOURCE_ACTION_SHOW and resource_lock['resource_action'] != constants.RESOURCE_ACTION_SHOW): filters = { "resource_id": resource_lock['resource_id'], "resource_action": constants.RESOURCE_ACTION_SHOW } visibility_locks = self.get_all( context.elevated(), search_opts=filters) if visibility_locks: msg = "The resource already has a visibility lock." raise exception.InvalidInput(reason=msg) get_res_method = getattr( self.db, self.resource_get[resource_lock['resource_type']], ) resource = get_res_method(context, resource_lock['resource_id']) self._check_resource_state_for_locking( updates['resource_action'], resource) return self.db.resource_lock_update(context, lock_id, updates) def ensure_context_can_delete_lock(self, context, lock_id): """Ensure the requester is able to delete locks.""" resource_lock = self.db.resource_lock_get(context, lock_id) policy.check_policy(context, 'resource_lock', 'delete', resource_lock) self._check_allow_lock_manipulation(context, resource_lock) def delete(self, context, lock_id): """Delete resource lock with the specified id.""" self.ensure_context_can_delete_lock(context, lock_id) self.db.resource_lock_delete(context, lock_id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/manager.py0000664000175000017500000001460700000000000016313 0ustar00zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Base Manager class. Managers are responsible for a certain aspect of the system. It is a logical grouping of code relating to a portion of the system. In general other components should be using the manager to make changes to the components that it is responsible for. For example, other components that need to deal with volumes in some way, should do so by calling methods on the VolumeManager instead of directly changing fields in the database. This allows us to keep all of the code relating to volumes in the same place. We have adopted a basic strategy of Smart managers and dumb data, which means rather than attaching methods to data objects, components should call manager methods that act on the data. Methods on managers that can be executed locally should be called directly. If a particular method must execute on a remote host, this should be done via rpc to the service that wraps the manager Managers should be responsible for most of the db access, and non-implementation specific data. Anything implementation specific that can't be generalized should be done by the Driver. In general, we prefer to have one manager with multiple drivers for different implementations, but sometimes it makes sense to have multiple managers. You can think of it this way: Abstract different overall strategies at the manager level(FlatNetwork vs VlanNetwork), and different implementations at the driver level(LinuxNetDriver vs CiscoNetDriver). Managers will often provide methods for initial setup of a host or periodic tasks to a wrapping service. This module provides Manager, a base class for managers. """ from eventlet import greenpool from oslo_config import cfg from oslo_log import log from oslo_service import periodic_task from manila.db import base from manila.scheduler import rpcapi as scheduler_rpcapi from manila import version CONF = cfg.CONF LOG = log.getLogger(__name__) class PeriodicTasks(periodic_task.PeriodicTasks): def __init__(self): super(PeriodicTasks, self).__init__(CONF) class Manager(base.Base, PeriodicTasks): @property def RPC_API_VERSION(self): """Redefine this in child classes.""" raise NotImplementedError @property def target(self): """This property is used by oslo_messaging. https://wiki.openstack.org/wiki/Oslo/Messaging#API_Version_Negotiation """ if not hasattr(self, '_target'): import oslo_messaging as messaging self._target = messaging.Target(version=self.RPC_API_VERSION) return self._target def __init__(self, host=None, db_driver=None): if not host: host = CONF.host self.host = host self.additional_endpoints = [] self.availability_zone = CONF.storage_availability_zone super(Manager, self).__init__(db_driver) def periodic_tasks(self, context, raise_on_error=False): """Tasks to be run at a periodic interval.""" return self.run_periodic_tasks(context, raise_on_error=raise_on_error) def init_host(self, service_id=None): """Handle initialization if this is a standalone service. A hook point for services to execute tasks before the services are made available (i.e. showing up on RPC and starting to accept RPC calls) to other components. Child classes should override this method. :param service_id: ID of the service where the manager is running. """ pass def init_host_with_rpc(self, service_id=None): """A hook for service to do jobs after RPC is ready. Like init_host(), this method is a hook where services get a chance to execute tasks that *need* RPC. Child classes should override this method. :param service_id: ID of the service where the manager is running. """ pass def service_version(self, context): return version.version_string() def service_config(self, context): config = {} for key in CONF: config[key] = CONF.get(key, None) return config def is_service_ready(self): """Method indicating if service is ready. This method should be overridden by subclasses which will return False when the back end is not ready yet. """ return True class SchedulerDependentManager(Manager): """Periodically send capability updates to the Scheduler services. Services that need to update the Scheduler of their capabilities should derive from this class. Otherwise they can derive from manager.Manager directly. Updates are only sent after update_service_capabilities is called with non-None values. """ def __init__(self, host=None, db_driver=None, service_name='undefined'): self.last_capabilities = None self.service_name = service_name self.scheduler_rpcapi = scheduler_rpcapi.SchedulerAPI() self._tp = greenpool.GreenPool() super(SchedulerDependentManager, self).__init__(host, db_driver) def _add_to_threadpool(self, func, *args, **kwargs): self._tp.spawn_n(func, *args, **kwargs) def update_service_capabilities(self, capabilities): """Remember these capabilities to send on next periodic update.""" self.last_capabilities = capabilities @periodic_task.periodic_task def _publish_service_capabilities(self, context): """Pass data back to the scheduler at a periodic interval.""" if self.last_capabilities: LOG.debug('Notifying Schedulers of capabilities ...') self.scheduler_rpcapi.update_service_capabilities( context, self.service_name, self.host, self.last_capabilities) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315601.8736722 manila-21.0.0/manila/message/0000775000175000017500000000000000000000000015743 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/message/__init__.py0000664000175000017500000000000000000000000020042 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/message/api.py0000664000175000017500000000673200000000000017076 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Handles all requests related to user facing messages. """ import datetime from oslo_config import cfg from oslo_log import log as logging from oslo_utils import timeutils from manila.db import base from manila.message import message_field from manila.message import message_levels messages_opts = [ cfg.IntOpt('message_ttl', default=2592000, help='Message minimum life in seconds.'), cfg.IntOpt('message_reap_interval', default=86400, help='Interval between periodic task runs to clean expired ' 'messages in seconds.'), ] CONF = cfg.CONF CONF.register_opts(messages_opts) LOG = logging.getLogger(__name__) class API(base.Base): """API for handling user messages.""" def create(self, context, action, project_id, resource_type=None, resource_id=None, exception=None, detail=None, level=message_levels.ERROR): """Create a message with the specified information.""" LOG.info("Creating message record for request_id = %s", context.request_id) # Updates expiry time for message as per message_ttl config. expires_at = (timeutils.utcnow() + datetime.timedelta( seconds=CONF.message_ttl)) detail_id = message_field.translate_detail_id(exception, detail) message_record = { 'project_id': project_id, 'request_id': context.request_id, 'resource_type': resource_type, 'resource_id': resource_id, 'action_id': action[0], 'detail_id': detail_id, 'message_level': level, 'expires_at': expires_at, } try: self.db.message_create(context, message_record) except Exception: LOG.exception(("Failed to create message record " "for request_id %s"), context.request_id) def get(self, context, id): """Return message with the specified message id.""" return self.db.message_get(context, id) def get_all(self, context, search_opts=None, limit=None, offset=None, sort_key=None, sort_dir=None): """Return messages for the given context.""" LOG.debug("Searching for messages by: %s", search_opts) search_opts = search_opts or {} messages = self.db.message_get_all(context, filters=search_opts, limit=limit, offset=offset, sort_key=sort_key, sort_dir=sort_dir) return messages def delete(self, context, id): """Delete message with the specified message id.""" return self.db.message_destroy(context, id) def cleanup_expired_messages(self, context): ctx = context.elevated() count = self.db.cleanup_expired_messages(ctx) LOG.info("Deleted %s expired messages.", count) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/message/message_field.py0000664000175000017500000002553200000000000021113 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from manila import exception from manila.i18n import _ class Resource(object): SHARE = 'SHARE' SHARE_GROUP = 'SHARE_GROUP' SHARE_REPLICA = 'SHARE_REPLICA' SHARE_SNAPSHOT = 'SHARE_SNAPSHOT' SECURITY_SERVICE = 'SECURITY_SERVICE' SHARE_NETWORK_SUBNET = 'SHARE_NETWORK_SUBNET' class Action(object): ALLOCATE_HOST = ('001', _('allocate host')) CREATE = ('002', _('create')) DELETE_ACCESS_RULES = ('003', _('delete access rules')) PROMOTE = ('004', _('promote')) UPDATE = ('005', _('update')) REVERT_TO_SNAPSHOT = ('006', _('revert to snapshot')) DELETE = ('007', _('delete')) EXTEND = ('008', _('extend')) SHRINK = ('009', _('shrink')) UPDATE_ACCESS_RULES = ('010', _('update access rules')) ADD_UPDATE_SECURITY_SERVICE = ('011', _('add or update security service')) TRANSFER_ACCEPT = ('026', _('transfer accept')) UPDATE_METADATA = ('027', _('update_metadata')) RESTORE_BACKUP = ('028', _('restore share backup')) ALL = ( ALLOCATE_HOST, CREATE, DELETE_ACCESS_RULES, PROMOTE, UPDATE, REVERT_TO_SNAPSHOT, DELETE, EXTEND, SHRINK, UPDATE_ACCESS_RULES, ADD_UPDATE_SECURITY_SERVICE, TRANSFER_ACCEPT, UPDATE_METADATA, RESTORE_BACKUP ) class Detail(object): UNKNOWN_ERROR = ('001', _('An unknown error occurred.')) NO_VALID_HOST = ( '002', _("No storage could be allocated for this share request. " "Trying again with a different size or share type may " "succeed.")) UNEXPECTED_NETWORK = ( '003', _("Driver does not expect share-network to be provided with " "current configuration.")) NO_SHARE_SERVER = ( '004', _("Could not find an existing share server or allocate one on " "the share network provided. You may use a different share " "network, or verify the network details in the share network " "and retry your request. If this doesn't work, contact your " "administrator to troubleshoot issues with your network.")) NO_ACTIVE_AVAILABLE_REPLICA = ( '005', _("An 'active' replica must exist in 'available' state to " "create a new replica for share.")) NO_ACTIVE_REPLICA = ( '006', _("Share has no replica with 'replica_state' set to 'active'.")) FILTER_MSG = _("No storage could be allocated for this share request, " "%s filter didn't succeed.") FILTER_AVAILABILITY = ('007', FILTER_MSG % 'AvailabilityZone') FILTER_CAPABILITIES = ('008', FILTER_MSG % 'Capabilities') FILTER_CAPACITY = ('009', FILTER_MSG % 'Capacity') FILTER_DRIVER = ('010', FILTER_MSG % 'Driver') FILTER_IGNORE = ('011', FILTER_MSG % 'IgnoreAttemptedHosts') FILTER_JSON = ('012', FILTER_MSG % 'Json') FILTER_RETRY = ('013', FILTER_MSG % 'Retry') FILTER_REPLICATION = ('014', FILTER_MSG % 'ShareReplication') DRIVER_FAILED_EXTEND = ( '015', _("Share Driver failed to extend share, The share status has been " "set to extending_error. This action cannot be re-attempted until " "the status has been rectified. Contact your administrator to " "determine the cause of this failure.")) FILTER_CREATE_FROM_SNAPSHOT = ('016', FILTER_MSG % 'CreateFromSnapshot') DRIVER_FAILED_CREATING_FROM_SNAP = ( '017', _("Share Driver has failed to create the share from snapshot. This " "operation can be re-attempted by creating a new share. Contact " "your administrator to determine the cause of this failure.")) DRIVER_REFUSED_SHRINK = ( '018', _("Share Driver refused to shrink the share. The size to be shrunk is" " smaller than the current used space. The share status has been" " set to available. Please select a size greater than the current" " used space.")) DRIVER_FAILED_SHRINK = ( '019', _("Share Driver does not support shrinking shares." " Shrinking share operation failed.")) FORBIDDEN_CLIENT_ACCESS = ( '020', _("Failed to grant access to client. The client ID used may be " "forbidden. You may try again with a different client identifier.")) UNSUPPORTED_CLIENT_ACCESS = ( '021', _("Failed to grant access to client. The access level or type may " "be unsupported. You may try again with a different access level " "or access type.")) UNSUPPORTED_ADD_UDPATE_SECURITY_SERVICE = ( '022', _("Share driver has failed to setup one or more security services " "that are associated with the used share network. The security " "service may be unsupported or the provided parameters are invalid. " "You may try again with a different set of configurations.")) SECURITY_SERVICE_FAILED_AUTH = ( '023', _("Share Driver failed to create share due to a security service " "authentication issue. The security service user has either " "insufficient privileges or wrong credentials. Please check your " "user, password, ou and domain.")) NO_DEFAULT_SHARE_TYPE = ( '024', _("No default share type has been made available. " "You must specify a share type for creating shares.")) MISSING_SECURITY_SERVICE = ( '025', _("Share Driver failed to create share because a security service " "has not been added to the share network used. Please add a " "security service to the share network.")) DRIVER_FAILED_TRANSFER_ACCEPT = ( '026', _("Share transfer cannot be accepted without clearing access rules.")) SHARE_NETWORK_PORT_QUOTA_LIMIT_EXCEEDED = ( '027', _("Failed to create a new network port on the share network subnet. " "The limit of the number of ports has been exceeded. You may " "increase the network port quotas or free up some ports and retry. " "If this doesn't work, contact your administrator to troubleshoot " "issues with your network.")) SHARE_BACKEND_NOT_READY_YET = ( '028', _("No storage could be allocated for this share " "request. Share back end services are not " "ready yet. Contact your administrator in case " "retrying does not help.")) UPDATE_METADATA_SUCCESS = ( '029', _("Metadata passed to share driver successfully performed required " "operation.")) UPDATE_METADATA_FAILURE = ( '030', _("Metadata passed to share driver failed to perform required " "operation.")) UPDATE_METADATA_NOT_DELETED = ( '031', _("Metadata delete operation includes driver updatable metadata, and " "it is not passed to share driver to perform required operation.")) TARGETED_RESTORE_UNSUPPORTED = ( '032', _("Cannot restore a given backup to a target share, not supported by " "share driver.")) NEUTRON_SUBNET_FULL = ( '033', _("Share Driver failed to create share server on share network " "due no more free IP addresses in the neutron subnet." "You may free some IP addresses in the subnet " "or create a new subnet/share network. If this doesn't work, " "contact your administrator to troubleshoot " "issues with your network.")) ALL = ( UNKNOWN_ERROR, NO_VALID_HOST, UNEXPECTED_NETWORK, NO_SHARE_SERVER, NO_ACTIVE_AVAILABLE_REPLICA, NO_ACTIVE_REPLICA, FILTER_AVAILABILITY, FILTER_CAPABILITIES, FILTER_CAPACITY, FILTER_DRIVER, FILTER_IGNORE, FILTER_JSON, FILTER_RETRY, FILTER_REPLICATION, DRIVER_FAILED_EXTEND, FILTER_CREATE_FROM_SNAPSHOT, DRIVER_FAILED_CREATING_FROM_SNAP, DRIVER_REFUSED_SHRINK, DRIVER_FAILED_SHRINK, FORBIDDEN_CLIENT_ACCESS, UNSUPPORTED_CLIENT_ACCESS, UNSUPPORTED_ADD_UDPATE_SECURITY_SERVICE, SECURITY_SERVICE_FAILED_AUTH, NO_DEFAULT_SHARE_TYPE, MISSING_SECURITY_SERVICE, DRIVER_FAILED_TRANSFER_ACCEPT, SHARE_NETWORK_PORT_QUOTA_LIMIT_EXCEEDED, SHARE_BACKEND_NOT_READY_YET, UPDATE_METADATA_SUCCESS, UPDATE_METADATA_FAILURE, UPDATE_METADATA_NOT_DELETED, TARGETED_RESTORE_UNSUPPORTED, NEUTRON_SUBNET_FULL ) # Exception and detail mappings EXCEPTION_DETAIL_MAPPINGS = { NO_VALID_HOST: ['NoValidHost'], } # Use special code for each filter rather then categorize all as # NO_VALID_HOST FILTER_DETAIL_MAPPINGS = { 'AvailabilityZoneFilter': FILTER_AVAILABILITY, 'CapabilitiesFilter': FILTER_CAPABILITIES, 'CapacityFilter': FILTER_CAPACITY, 'DriverFilter': FILTER_DRIVER, 'IgnoreAttemptedHostsFilter': FILTER_IGNORE, 'JsonFilter': FILTER_JSON, 'RetryFilter': FILTER_RETRY, 'ShareReplicationFilter': FILTER_REPLICATION, 'CreateFromSnapshotFilter': FILTER_CREATE_FROM_SNAPSHOT, } def translate_action(action_id): action_message = next((action[1] for action in Action.ALL if action[0] == action_id), None) return action_message or 'unknown action' def translate_detail(detail_id): detail_message = next((action[1] for action in Detail.ALL if action[0] == detail_id), None) return detail_message or Detail.UNKNOWN_ERROR[1] def translate_detail_id(excep, detail): if excep is not None: detail = _translate_exception_to_detail(excep) if detail in Detail.ALL: return detail[0] return Detail.UNKNOWN_ERROR[0] def _translate_exception_to_detail(ex): if isinstance(ex, exception.NoValidHost): # if NoValidHost was raised because a filter failed (a filter # didn't return any hosts), use a filter-specific detail details = getattr(ex, 'detail_data', {}) last_filter = details.get('last_filter') return Detail.FILTER_DETAIL_MAPPINGS.get( last_filter, Detail.NO_VALID_HOST) else: for key, value in Detail.EXCEPTION_DETAIL_MAPPINGS.items(): if ex.__class__.__name__ in value: return key ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/message/message_levels.py0000664000175000017500000000115500000000000021315 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Message level constants.""" ERROR = 'ERROR' ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315601.8736722 manila-21.0.0/manila/network/0000775000175000017500000000000000000000000016010 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/network/__init__.py0000664000175000017500000001256300000000000020130 0ustar00zuulzuul00000000000000# Copyright 2013 OpenStack Foundation # Copyright 2014 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc from oslo_config import cfg from oslo_utils import importutils from manila.db import base as db_base from manila import exception from manila.i18n import _ network_opts = [ cfg.StrOpt( 'network_api_class', default='manila.network.neutron.' 'neutron_network_plugin.NeutronNetworkPlugin', help='The full class name of the Networking API class to use.'), ] network_base_opts = [ cfg.BoolOpt( 'network_plugin_ipv4_enabled', default=True, help="Whether to support IPv4 network resource, Default=True."), cfg.BoolOpt( 'network_plugin_ipv6_enabled', default=False, help="Whether to support IPv6 network resource, Default=False. " "If this option is True, the value of " "'network_plugin_ipv4_enabled' will be ignored."), ] CONF = cfg.CONF def API(config_group_name=None, label='user'): """Selects class and config group of network plugin. :param config_group_name: name of config group to be used for registration of networking opts. :returns: instance of networking plugin class """ CONF.register_opts(network_opts, group=config_group_name) if config_group_name: network_api_class = getattr(CONF, config_group_name).network_api_class else: network_api_class = CONF.network_api_class cls = importutils.import_class(network_api_class) return cls(config_group_name=config_group_name, label=label) class NetworkBaseAPI(db_base.Base, metaclass=abc.ABCMeta): """User network plugin for setting up main net interfaces.""" def __init__(self, config_group_name=None, db_driver=None): if config_group_name: CONF.register_opts(network_base_opts, group=config_group_name) else: CONF.register_opts(network_base_opts) self.configuration = getattr(CONF, str(config_group_name), CONF) super(NetworkBaseAPI, self).__init__(db_driver=db_driver) def _verify_share_network(self, share_server_id, share_network): if share_network is None: msg = _("'Share network' is not provided for setting up " "network interfaces for 'Share server' " "'%s'.") % share_server_id raise exception.NetworkBadConfigurationException(reason=msg) def _verify_share_network_subnet(self, share_server_id, share_network_subnet): if share_network_subnet is None: msg = _("'Share network subnet' is not provided for setting up " "network interfaces for 'Share server' " "'%s'.") % share_server_id raise exception.NetworkBadConfigurationException(reason=msg) def update_network_allocation(self, context, share_server): """Update network allocation. Optional method to be called by the manager after share server creation which can be overloaded in case the port state has to be updated. :param context: RequestContext object :param share_server: share server object :return: list of updated ports or None if nothing was updated """ @abc.abstractmethod def allocate_network(self, context, share_server, share_network=None, share_network_subnet=None, **kwargs): pass @abc.abstractmethod def deallocate_network(self, context, share_server_id, share_network=None, share_network_subnet=None): pass @abc.abstractmethod def manage_network_allocations( self, context, allocations, share_server, share_network=None, share_network_subnet=None): pass @abc.abstractmethod def unmanage_network_allocations(self, context, share_server_id): pass @property def enabled_ip_versions(self): if not hasattr(self, '_enabled_ip_versions'): self._enabled_ip_versions = set() if self.configuration.network_plugin_ipv6_enabled: self._enabled_ip_versions.add(6) if self.configuration.network_plugin_ipv4_enabled: self._enabled_ip_versions.add(4) if not self._enabled_ip_versions: msg = _("Either 'network_plugin_ipv4_enabled' or " "'network_plugin_ipv6_enabled' " "should be configured to 'True'.") raise exception.NetworkBadConfigurationException(reason=msg) return self._enabled_ip_versions @abc.abstractmethod def include_network_info(self, share_network_subnet): """Includes share-network-subnet with plugin specific data.""" pass ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315601.8776722 manila-21.0.0/manila/network/linux/0000775000175000017500000000000000000000000017147 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/network/linux/__init__.py0000664000175000017500000000000000000000000021246 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/network/linux/interface.py0000664000175000017500000002207200000000000021464 0ustar00zuulzuul00000000000000# Copyright 2014 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import netaddr from oslo_config import cfg from oslo_log import log from manila import exception from manila.i18n import _ from manila.network.linux import ip_lib from manila.network.linux import ovs_lib from manila import utils LOG = log.getLogger(__name__) OPTS = [ cfg.StrOpt('ovs_integration_bridge', default='br-int', help=_('Name of Open vSwitch bridge to use.')), ] CONF = cfg.CONF CONF.register_opts(OPTS) def device_name_synchronized(f): """Wraps methods with interprocess locks by device names.""" def wrapped_func(self, *args, **kwargs): device_name = "device_name_%s" % args[0] @utils.synchronized("linux_interface_%s" % device_name, external=True) def source_func(self, *args, **kwargs): return f(self, *args, **kwargs) return source_func(self, *args, **kwargs) return wrapped_func class LinuxInterfaceDriver(metaclass=abc.ABCMeta): # from linux IF_NAMESIZE DEV_NAME_LEN = 14 DEV_NAME_PREFIX = 'tap' def __init__(self): self.conf = CONF @device_name_synchronized def init_l3(self, device_name, ip_cidrs, namespace=None, clear_cidrs=[]): """Set the L3 settings for the interface using data from the port. ip_cidrs: list of 'X.X.X.X/YY' strings """ device = ip_lib.IPDevice(device_name, namespace=namespace) for cidr in clear_cidrs: device.route.clear_outdated_routes(cidr) previous = {} for address in device.addr.list(scope='global', filters=['permanent']): previous[address['cidr']] = address['ip_version'] # add new addresses for ip_cidr in ip_cidrs: net = netaddr.IPNetwork(ip_cidr) if ip_cidr in previous: del previous[ip_cidr] continue device.addr.add(net.version, ip_cidr, str(net.broadcast)) # clean up any old addresses for ip_cidr, ip_version in previous.items(): device.addr.delete(ip_version, ip_cidr) # ensure that interface is first in the list device.route.pullup_route(device_name) # here we are checking for garbage devices from removed service port self._remove_outdated_interfaces(device) def _remove_outdated_interfaces(self, device): """Finds and removes unused network device.""" device_cidr_set = self._get_set_of_device_cidrs(device) for dev in ip_lib.IPWrapper().get_devices(): if dev.name != device.name and dev.name[:3] == device.name[:3]: cidr_set = self._get_set_of_device_cidrs(dev) if device_cidr_set & cidr_set: self.unplug(dev.name) def _get_set_of_device_cidrs(self, device): cidrs = set() addr_list = [] try: # NOTE(ganso): I could call ip_lib.device_exists here, but since # this is a concurrency problem, it would not fix the problem. addr_list = device.addr.list() except Exception as e: if 'does not exist' in str(e): LOG.warning( "Device %s does not exist anymore.", device.name) else: raise for addr in addr_list: if addr['ip_version'] == 4: cidrs.add(str(netaddr.IPNetwork(addr['cidr']).cidr)) return cidrs def check_bridge_exists(self, bridge): if not ip_lib.device_exists(bridge): raise exception.BridgeDoesNotExist(bridge=bridge) def get_device_name(self, port): return (self.DEV_NAME_PREFIX + port['id'])[:self.DEV_NAME_LEN] @abc.abstractmethod def plug(self, device_name, port_id, mac_address, bridge=None, namespace=None, prefix=None): """Plug in the interface.""" @abc.abstractmethod def unplug(self, device_name, bridge=None, namespace=None, prefix=None): """Unplug the interface.""" class NoopInterfaceDriver(LinuxInterfaceDriver): """Noop driver when manila-share is already connected to admin network""" def init_l3(self, device_name, ip_cidrs, namespace=None, clear_cidrs=[]): pass def plug(self, device_name, port_id, mac_address, bridge=None, namespace=None, prefix=None): pass def unplug(self, device_name, bridge=None, namespace=None, prefix=None): pass class OVSInterfaceDriver(LinuxInterfaceDriver): """Driver for creating an internal interface on an OVS bridge.""" DEV_NAME_PREFIX = 'tap' def _get_tap_name(self, dev_name): return dev_name def _ovs_add_port(self, bridge, device_name, port_id, mac_address, internal=True): cmd = ['ovs-vsctl', '--', '--may-exist', 'add-port', bridge, device_name] if internal: cmd += ['--', 'set', 'Interface', device_name, 'type=internal'] cmd += ['--', 'set', 'Interface', device_name, 'external-ids:iface-id=%s' % port_id, '--', 'set', 'Interface', device_name, 'external-ids:iface-status=active', '--', 'set', 'Interface', device_name, 'external-ids:attached-mac=%s' % mac_address] utils.execute(*cmd, run_as_root=True) @device_name_synchronized def plug(self, device_name, port_id, mac_address, bridge=None, namespace=None, prefix=None): """Plug in the interface.""" if not bridge: bridge = self.conf.ovs_integration_bridge self.check_bridge_exists(bridge) ip = ip_lib.IPWrapper() ns_dev = ip.device(device_name) if not ip_lib.device_exists(device_name, namespace=namespace): LOG.info("Device %s does not exist - creating ....", device_name) tap_name = self._get_tap_name(device_name) self._ovs_add_port(bridge, tap_name, port_id, mac_address) ns_dev.link.set_address(mac_address) # Add an interface created by ovs to the namespace. if namespace: namespace_obj = ip.ensure_namespace(namespace) namespace_obj.add_device_to_namespace(ns_dev) else: LOG.info("Device %s already exists.", device_name) if ns_dev.link.address != mac_address: LOG.warning("Reset mac address to %s", mac_address) ns_dev.link.set_address(mac_address) ns_dev.link.set_up() @device_name_synchronized def unplug(self, device_name, bridge=None, namespace=None, prefix=None): """Unplug the interface.""" if not bridge: bridge = self.conf.ovs_integration_bridge tap_name = self._get_tap_name(device_name) self.check_bridge_exists(bridge) ovs = ovs_lib.OVSBridge(bridge) try: ovs.delete_port(tap_name) except RuntimeError: LOG.error("Failed unplugging interface '%s'", device_name) class BridgeInterfaceDriver(LinuxInterfaceDriver): """Driver for creating bridge interfaces.""" DEV_NAME_PREFIX = 'ns-' @device_name_synchronized def plug(self, device_name, port_id, mac_address, bridge=None, namespace=None, prefix=None): """Plugin the interface.""" ip = ip_lib.IPWrapper() if prefix: tap_name = device_name.replace(prefix, 'tap') else: tap_name = device_name.replace(self.DEV_NAME_PREFIX, 'tap') if not ip_lib.device_exists(device_name, namespace=namespace): # Create ns_veth in a namespace if one is configured. root_veth, ns_veth = ip.add_veth(tap_name, device_name, namespace2=namespace) ns_veth.link.set_address(mac_address) else: ns_veth = ip.device(device_name) root_veth = ip.device(tap_name) LOG.warning("Device %s already exists.", device_name) root_veth.link.set_up() ns_veth.link.set_up() @device_name_synchronized def unplug(self, device_name, bridge=None, namespace=None, prefix=None): """Unplug the interface.""" device = ip_lib.IPDevice(device_name, namespace) try: device.link.delete() LOG.debug("Unplugged interface '%s'", device_name) except RuntimeError: LOG.error("Failed unplugging interface '%s'", device_name) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/network/linux/ip_lib.py0000664000175000017500000003571200000000000020767 0ustar00zuulzuul00000000000000# Copyright 2014 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import netaddr from manila.i18n import _ from manila import utils LOOPBACK_DEVNAME = 'lo' class SubProcessBase(object): def __init__(self, namespace=None): self.namespace = namespace def _run(self, options, command, args): if self.namespace: return self._as_root(options, command, args) else: return self._execute(options, command, args) def _as_root(self, options, command, args, use_root_namespace=False): namespace = self.namespace if not use_root_namespace else None return self._execute(options, command, args, namespace, as_root=True) @classmethod def _execute(cls, options, command, args, namespace=None, as_root=False): opt_list = ['-%s' % o for o in options] if namespace: ip_cmd = ['ip', 'netns', 'exec', namespace, 'ip'] else: ip_cmd = ['ip'] total_cmd = ip_cmd + opt_list + [command] + list(args) return utils.execute(*total_cmd, run_as_root=as_root)[0] class IPWrapper(SubProcessBase): def __init__(self, namespace=None): super(IPWrapper, self).__init__(namespace=namespace) self.netns = IpNetnsCommand(self) def device(self, name): return IPDevice(name, self.namespace) def get_devices(self, exclude_loopback=False): retval = [] output = self._execute('o', 'link', ('list',), self.namespace) for line in output.split('\n'): if '<' not in line: continue tokens = line.split(':', 2) if len(tokens) >= 3: name = tokens[1].split('@', 1)[0].strip() if exclude_loopback and name == LOOPBACK_DEVNAME: continue retval.append(IPDevice(name, self.namespace)) return retval def add_tuntap(self, name, mode='tap'): self._as_root('', 'tuntap', ('add', name, 'mode', mode)) return IPDevice(name, self.namespace) def add_veth(self, name1, name2, namespace2=None): args = ['add', name1, 'type', 'veth', 'peer', 'name', name2] if namespace2 is None: namespace2 = self.namespace else: self.ensure_namespace(namespace2) args += ['netns', namespace2] self._as_root('', 'link', tuple(args)) return (IPDevice(name1, self.namespace), IPDevice(name2, namespace2)) def ensure_namespace(self, name): if not self.netns.exists(name): ip = self.netns.add(name) lo = ip.device(LOOPBACK_DEVNAME) lo.link.set_up() else: ip = IPWrapper(name) return ip def namespace_is_empty(self): return not self.get_devices(exclude_loopback=True) def garbage_collect_namespace(self): """Conditionally destroy the namespace if it is empty.""" if self.namespace and self.netns.exists(self.namespace): if self.namespace_is_empty(): self.netns.delete(self.namespace) return True return False def add_device_to_namespace(self, device): if self.namespace: device.link.set_netns(self.namespace) @classmethod def get_namespaces(cls): output = cls._execute('', 'netns', ('list',)) return [ns.strip() for ns in output.split('\n')] class IPDevice(SubProcessBase): def __init__(self, name, namespace=None): super(IPDevice, self).__init__(namespace=namespace) self.name = name self.link = IpLinkCommand(self) self.addr = IpAddrCommand(self) self.route = IpRouteCommand(self) def __eq__(self, other): return (other is not None and self.name == other.name and self.namespace == other.namespace) def __str__(self): return self.name class IpCommandBase(object): COMMAND = '' def __init__(self, parent): self._parent = parent def _run(self, *args, **kwargs): return self._parent._run(kwargs.get('options', []), self.COMMAND, args) def _as_root(self, *args, **kwargs): return self._parent._as_root(kwargs.get('options', []), self.COMMAND, args, kwargs.get('use_root_namespace', False)) class IpDeviceCommandBase(IpCommandBase): @property def name(self): return self._parent.name class IpLinkCommand(IpDeviceCommandBase): COMMAND = 'link' def set_address(self, mac_address): self._as_root('set', self.name, 'address', mac_address) def set_mtu(self, mtu_size): self._as_root('set', self.name, 'mtu', mtu_size) def set_up(self): self._as_root('set', self.name, 'up') def set_down(self): self._as_root('set', self.name, 'down') def set_netns(self, namespace): self._as_root('set', self.name, 'netns', namespace) self._parent.namespace = namespace def set_name(self, name): self._as_root('set', self.name, 'name', name) self._parent.name = name def set_alias(self, alias_name): self._as_root('set', self.name, 'alias', alias_name) def delete(self): self._as_root('delete', self.name) @property def address(self): return self.attributes.get('link/ether') @property def state(self): return self.attributes.get('state') @property def mtu(self): return self.attributes.get('mtu') @property def qdisc(self): return self.attributes.get('qdisc') @property def qlen(self): return self.attributes.get('qlen') @property def alias(self): return self.attributes.get('alias') @property def attributes(self): return self._parse_line(self._run('show', self.name, options='o')) def _parse_line(self, value): if not value: return {} device_name, settings = value.replace("\\", '').split('>', 1) tokens = settings.split() keys = tokens[::2] values = [int(v) if v.isdigit() else v for v in tokens[1::2]] retval = dict(zip(keys, values)) return retval class IpAddrCommand(IpDeviceCommandBase): COMMAND = 'addr' def add(self, ip_version, cidr, broadcast, scope='global'): self._as_root('add', cidr, 'brd', broadcast, 'scope', scope, 'dev', self.name, options=[ip_version]) def delete(self, ip_version, cidr): self._as_root('del', cidr, 'dev', self.name, options=[ip_version]) def flush(self): self._as_root('flush', self.name) def list(self, scope=None, to=None, filters=None): if filters is None: filters = [] retval = [] if scope: filters += ['scope', scope] if to: filters += ['to', to] for line in self._run('show', self.name, *filters).split('\n'): line = line.strip() if not line.startswith('inet'): continue parts = line.split() if parts[0] == 'inet6': version = 6 scope = parts[3] broadcast = '::' else: version = 4 if parts[2] == 'brd': broadcast = parts[3] scope = parts[5] else: # sometimes output of 'ip a' might look like: # inet 192.168.100.100/24 scope global eth0 # and broadcast needs to be calculated from CIDR broadcast = str(netaddr.IPNetwork(parts[1]).broadcast) scope = parts[3] retval.append(dict(cidr=parts[1], broadcast=broadcast, scope=scope, ip_version=version, dynamic=('dynamic' == parts[-1]))) return retval class IpRouteCommand(IpDeviceCommandBase): COMMAND = 'route' def add_gateway(self, gateway, metric=None): args = ['replace', 'default', 'via', gateway] if metric: args += ['metric', metric] args += ['dev', self.name] self._as_root(*args) def delete_gateway(self, gateway): self._as_root('del', 'default', 'via', gateway, 'dev', self.name) def get_gateway(self, scope=None, filters=None): if filters is None: filters = [] retval = None if scope: filters += ['scope', scope] route_list_lines = self._run('list', 'dev', self.name, *filters).split('\n') default_route_line = next((x.strip() for x in route_list_lines if x.strip().startswith('default')), None) if default_route_line: gateway_index = 2 parts = default_route_line.split() retval = dict(gateway=parts[gateway_index]) metric_index = 4 parts_has_metric = (len(parts) > metric_index) if parts_has_metric: retval.update(metric=int(parts[metric_index])) return retval def pullup_route(self, interface_name): """Pullup route entry. Ensures that the route entry for the interface is before all others on the same subnet. """ device_list = [] device_route_list_lines = self._run('list', 'proto', 'kernel', 'dev', interface_name).split('\n') for device_route_line in device_route_list_lines: try: subnet = device_route_line.split()[0] except Exception: continue subnet_route_list_lines = self._run( 'list', 'proto', 'kernel', 'exact', subnet).split('\n') for subnet_route_line in subnet_route_list_lines: i = iter(subnet_route_line.split()) while next(i) != 'dev': pass device = next(i) try: while next(i) != 'src': pass src = next(i) except Exception: src = '' if device != interface_name: device_list.append((device, src)) else: break for (device, src) in device_list: self._as_root('del', subnet, 'dev', device) if (src != ''): self._as_root('append', subnet, 'proto', 'kernel', 'src', src, 'dev', device) else: self._as_root('append', subnet, 'proto', 'kernel', 'dev', device) def clear_outdated_routes(self, cidr): """Removes duplicated routes for a certain network CIDR. Removes all routes related to supplied CIDR except for the one related to this interface device. :param cidr: The network CIDR to be cleared. """ routes = self.list() items = [x for x in routes if x['Destination'] == cidr and x.get('Device') and x['Device'] != self.name] for item in items: self.delete_net_route(item['Destination'], item['Device']) def list(self): """List all routes :return: A dictionary with field 'Destination' and 'Device' for each route entry. 'Gateway' field is included if route has a gateway. """ routes = [] output = self._as_root('list') lines = output.split('\n') for line in lines: items = line.split() if len(items) > 0: item = {'Destination': items[0]} if len(items) > 1: if items[1] == 'via': item['Gateway'] = items[2] if len(items) > 3 and items[3] == 'dev': item['Device'] = items[4] if items[1] == 'dev': item['Device'] = items[2] routes.append(item) return routes def delete_net_route(self, cidr, device): """Deletes a route according to supplied CIDR and interface device. :param cidr: The network CIDR to be removed. :param device: The network interface device to be removed. """ self._as_root('delete', cidr, 'dev', device) class IpNetnsCommand(IpCommandBase): COMMAND = 'netns' def add(self, name): self._as_root('add', name, use_root_namespace=True) return IPWrapper(name) def delete(self, name): self._as_root('delete', name, use_root_namespace=True) def execute(self, cmds, addl_env=None, check_exit_code=True): if addl_env is None: addl_env = dict() if not self._parent.namespace: raise Exception(_('No namespace defined for parent')) else: env_params = [] if addl_env: env_params = (['env'] + ['%s=%s' % pair for pair in sorted(addl_env.items())]) total_cmd = (['ip', 'netns', 'exec', self._parent.namespace] + env_params + list(cmds)) return utils.execute(*total_cmd, run_as_root=True, check_exit_code=check_exit_code) def exists(self, name): output = self._as_root('list', options='o', use_root_namespace=True) for line in output.split('\n'): if name == line.strip(): return True return False def device_exists(device_name, namespace=None): try: address = IPDevice(device_name, namespace).link.address except Exception as e: if 'does not exist' in str(e): return False raise return bool(address) def iproute_arg_supported(command, arg): command += ['help'] stdout, stderr = utils.execute(command, check_exit_code=False, return_stderr=True) return any(arg in line for line in stderr.split('\n')) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/network/linux/ovs_lib.py0000664000175000017500000000411100000000000021153 0ustar00zuulzuul00000000000000# Copyright 2014 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import re from oslo_log import log from manila import utils LOG = log.getLogger(__name__) class OVSBridge(object): def __init__(self, br_name): self.br_name = br_name self.re_id = self.re_compile_id() def re_compile_id(self): external = r'external_ids\s*' mac = r'attached-mac="(?P([a-fA-F\d]{2}:){5}([a-fA-F\d]{2}))"' iface = r'iface-id="(?P[^"]+)"' name = r'name\s*:\s"(?P[^"]*)"' port = r'ofport\s*:\s(?P-?\d+)' _re = (r'%(external)s:\s{ ( %(mac)s,? | %(iface)s,? | . )* }' r' \s+ %(name)s \s+ %(port)s' % {'external': external, 'mac': mac, 'iface': iface, 'name': name, 'port': port}) return re.compile(_re, re.M | re.X) def run_vsctl(self, args): full_args = ["ovs-vsctl", "--timeout=2"] + args try: return utils.execute(*full_args, run_as_root=True) except Exception: LOG.exception("Unable to execute %(cmd)s.", {'cmd': full_args}) def reset_bridge(self): self.run_vsctl(["--", "--if-exists", "del-br", self.br_name]) self.run_vsctl(["add-br", self.br_name]) def delete_port(self, port_name): self.run_vsctl(["--", "--if-exists", "del-port", self.br_name, port_name]) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315601.8776722 manila-21.0.0/manila/network/neutron/0000775000175000017500000000000000000000000017502 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/network/neutron/__init__.py0000664000175000017500000000000000000000000021601 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/network/neutron/api.py0000664000175000017500000004545000000000000020635 0ustar00zuulzuul00000000000000# Copyright 2013 OpenStack Foundation # Copyright 2014 Mirantis Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystoneauth1 import exceptions as ks_exec from keystoneauth1 import loading as ks_loading from neutronclient.common import exceptions as neutron_client_exc from neutronclient.v2_0 import client as clientv20 from oslo_config import cfg from oslo_log import log from manila.common import client_auth from manila import context from manila import exception from manila.network.neutron import constants as neutron_constants from manila import utils NEUTRON_GROUP = 'neutron' neutron_opts = [ cfg.StrOpt( 'url', help='URL for connecting to neutron.'), cfg.IntOpt( 'url_timeout', deprecated_for_removal=True, deprecated_reason='This parameter has had no effect since 2.0.0. ' 'The timeout parameter should be used instead.', deprecated_since='Yoga', default=30, help='Timeout value for connecting to neutron in seconds.'), cfg.StrOpt( 'auth_strategy', deprecated_for_removal=True, deprecated_reason='This parameter has had no effect since 2.0.0. ' 'Use the auth_type parameter to select ' 'authentication type', deprecated_since='Yoga', default='keystone', help='Auth strategy for connecting to neutron in admin context.'), cfg.StrOpt( 'endpoint_type', default='publicURL', choices=['publicURL', 'internalURL', 'adminURL', 'public', 'internal', 'admin'], help='Endpoint type to be used with neutron client calls.'), cfg.StrOpt( 'region_name', help='Region name for connecting to neutron in admin context.'), ] CONF = cfg.CONF LOG = log.getLogger(__name__) class PortBindingAlreadyExistsClient(neutron_client_exc.Conflict): pass # We need to monkey-patch neutronclient.common.exceptions module, to make # neutron client to raise error specific exceptions. E.g. exception # PortBindingAlreadyExistsClient is raised for Neutron API error # PortBindingAlreadyExists. If not defined, a general exception of type # Conflict will be raised. neutron_client_exc.PortBindingAlreadyExistsClient = \ PortBindingAlreadyExistsClient def list_opts(): return client_auth.AuthClientLoader.list_opts(NEUTRON_GROUP) class API(object): """API for interacting with the neutron 2.x API. :param configuration: instance of config or config group. """ def __init__(self, config_group_name=None): self.config_group_name = config_group_name or 'DEFAULT' ks_loading.register_session_conf_options( CONF, NEUTRON_GROUP) ks_loading.register_auth_conf_options(CONF, NEUTRON_GROUP) CONF.register_opts(neutron_opts, NEUTRON_GROUP) self.configuration = getattr(CONF, self.config_group_name, CONF) self.last_neutron_extension_sync = None self.extensions = {} self.auth_obj = None @property def client(self): return self.get_client(context.get_admin_context()) def get_client(self, context): if not self.auth_obj: self.auth_obj = client_auth.AuthClientLoader( client_class=clientv20.Client, cfg_group=NEUTRON_GROUP) return self.auth_obj.get_client( self, context, endpoint_type=CONF[NEUTRON_GROUP].endpoint_type, region_name=CONF[NEUTRON_GROUP].region_name, endpoint_override=CONF[NEUTRON_GROUP].url, ) @property def admin_project_id(self): if self.client.httpclient.auth_token is None: try: self.client.httpclient.authenticate() except neutron_client_exc.NeutronClientException as e: raise exception.NetworkException(code=e.status_code, message=e.message) return self.client.httpclient.get_project_id() def get_all_admin_project_networks(self): search_opts = {'tenant_id': self.admin_project_id, 'shared': False} nets = self.client.list_networks(**search_opts).get('networks', []) return nets def create_port(self, tenant_id, network_id, host_id=None, subnet_id=None, fixed_ip=None, device_owner=None, device_id=None, mac_address=None, port_security_enabled=True, security_group_ids=None, dhcp_opts=None, **kwargs): return self._create_port(tenant_id, network_id, host_id=host_id, subnet_id=subnet_id, fixed_ip=fixed_ip, device_owner=device_owner, device_id=device_id, mac_address=mac_address, port_security_enabled=port_security_enabled, security_group_ids=security_group_ids, dhcp_opts=dhcp_opts, **kwargs) @utils.retry(retry_param=ks_exec.ConnectFailure, retries=5) def _create_port(self, tenant_id, network_id, host_id=None, subnet_id=None, fixed_ip=None, device_owner=None, device_id=None, mac_address=None, port_security_enabled=True, security_group_ids=None, dhcp_opts=None, name=None, **kwargs): try: port_req_body = {'port': {}} port_req_body['port']['network_id'] = network_id port_req_body['port']['admin_state_up'] = True port_req_body['port']['tenant_id'] = tenant_id if not port_security_enabled: port_req_body['port']['port_security_enabled'] = ( port_security_enabled) elif security_group_ids: port_req_body['port']['security_groups'] = security_group_ids if mac_address: port_req_body['port']['mac_address'] = mac_address if host_id: if not self._has_port_binding_extension(): msg = ("host_id (%(host_id)s) specified but neutron " "doesn't support port binding. Please activate the " "extension accordingly." % {"host_id": host_id}) raise exception.NetworkException(message=msg) port_req_body['port']['binding:host_id'] = host_id if dhcp_opts is not None: port_req_body['port']['extra_dhcp_opts'] = dhcp_opts if subnet_id: fixed_ip_dict = {'subnet_id': subnet_id} if fixed_ip: fixed_ip_dict.update({'ip_address': fixed_ip}) port_req_body['port']['fixed_ips'] = [fixed_ip_dict] if device_owner: port_req_body['port']['device_owner'] = device_owner if device_id: port_req_body['port']['device_id'] = device_id if name: port_req_body['port']['name'] = name if kwargs: port_req_body['port'].update(kwargs) port = self.client.create_port(port_req_body).get('port', {}) return port except neutron_client_exc.NeutronClientException as e: LOG.exception('Neutron error creating port on network %s', network_id) if e.status_code == 409: raise exception.PortLimitExceeded() raise exception.NetworkException(code=e.status_code, message=e.message) except neutron_client_exc.IpAddressGenerationFailureClient: LOG.warning('No free IP addresses in neutron subnet %s', subnet_id) raise exception.IpAddressGenerationFailureClient() except ks_exec.ConnectFailure: LOG.warning('Create Port: Neutron connection failure') # check if port is created in neutron else re-raise connectFailure search_opts = { 'device_id': device_id, 'network_id': network_id, 'name': name } try: ports = self.list_ports(**search_opts) return ports[0] except ks_exec.ConnectFailure as kse: raise kse @utils.retry(retry_param=ks_exec.ConnectFailure, retries=5) def delete_port(self, port_id): try: self.client.delete_port(port_id) except neutron_client_exc.PortNotFoundClient: LOG.warning('Neutron port not found: %s', port_id) pass except neutron_client_exc.NeutronClientException as e: raise exception.NetworkException(code=e.status_code, message=e.message) except ks_exec.ConnectFailure as e: raise e def delete_subnet(self, subnet_id): try: self.client.delete_subnet(subnet_id) except neutron_client_exc.NeutronClientException as e: raise exception.NetworkException(code=e.status_code, message=e.message) def list_ports(self, **search_opts): """List ports for the client based on search options.""" return self.client.list_ports(**search_opts).get('ports') @utils.retry(retry_param=ks_exec.ConnectFailure, retries=5) def show_port(self, port_id): """Return the port for the client given the port id.""" try: return self.client.show_port(port_id).get('port') except neutron_client_exc.NeutronClientException as e: raise exception.NetworkException(code=e.status_code, message=e.message) except ks_exec.ConnectFailure as e: raise e def get_all_networks(self): """Get all networks for client.""" return self.client.list_networks().get('networks') @utils.retry(retry_param=ks_exec.ConnectFailure, retries=5) def get_network(self, network_uuid): """Get specific network for client.""" try: network = self.client.show_network(network_uuid).get('network', {}) return network except neutron_client_exc.NeutronClientException as e: raise exception.NetworkException(code=e.status_code, message=e.message) except ks_exec.ConnectFailure as e: raise e def get_subnet(self, subnet_uuid): """Get specific subnet for client.""" try: return self.client.show_subnet(subnet_uuid).get('subnet', {}) except neutron_client_exc.NeutronClientException as e: raise exception.NetworkException(code=e.status_code, message=e.message) def list_extensions(self): extensions_list = self.client.list_extensions().get('extensions') return {ext['name']: ext for ext in extensions_list} def _has_port_binding_extension(self): if not self.extensions: self.extensions = self.list_extensions() return neutron_constants.PORTBINDING_EXT in self.extensions def router_create(self, tenant_id, name): router_req_body = {'router': {}} router_req_body['router']['tenant_id'] = tenant_id router_req_body['router']['name'] = name try: return self.client.create_router(router_req_body).get('router', {}) except neutron_client_exc.NeutronClientException as e: raise exception.NetworkException(code=e.status_code, message=e.message) def network_create(self, tenant_id, name): network_req_body = {'network': {}} network_req_body['network']['tenant_id'] = tenant_id network_req_body['network']['name'] = name try: return self.client.create_network( network_req_body).get('network', {}) except neutron_client_exc.NeutronClientException as e: raise exception.NetworkException(code=e.status_code, message=e.message) def subnet_create(self, tenant_id, net_id, name, cidr, no_gateway=False): subnet_req_body = {'subnet': {}} subnet_req_body['subnet']['tenant_id'] = tenant_id subnet_req_body['subnet']['name'] = name subnet_req_body['subnet']['network_id'] = net_id subnet_req_body['subnet']['cidr'] = cidr subnet_req_body['subnet']['ip_version'] = 4 if no_gateway: subnet_req_body['subnet']['gateway_ip'] = None try: return self.client.create_subnet( subnet_req_body).get('subnet', {}) except neutron_client_exc.NeutronClientException as e: raise exception.NetworkException(code=e.status_code, message=e.message) def router_add_interface(self, router_id, subnet_id, port_id=None): body = {} if subnet_id: body['subnet_id'] = subnet_id if port_id: body['port_id'] = port_id try: self.client.add_interface_router(router_id, body) except neutron_client_exc.NeutronClientException as e: raise exception.NetworkException(code=e.status_code, message=e.message) def router_remove_interface(self, router_id, subnet_id, port_id=None): body = {} if subnet_id: body['subnet_id'] = subnet_id if port_id: body['port_id'] = port_id try: self.client.remove_interface_router(router_id, body) except neutron_client_exc.NeutronClientException as e: raise exception.NetworkException(code=e.status_code, message=e.message) def router_list(self): try: return self.client.list_routers().get('routers', {}) except neutron_client_exc.NeutronClientException as e: raise exception.NetworkException(code=e.status_code, message=e.message) def update_port_fixed_ips(self, port_id, fixed_ips): try: port_req_body = {'port': fixed_ips} port = self.client.update_port( port_id, port_req_body).get('port', {}) return port except neutron_client_exc.NeutronClientException as e: raise exception.NetworkException(code=e.status_code, message=e.message) def bind_port_to_host(self, port_id, host, vnic_type): """Add an inactive binding to existing port.""" try: data = {"binding": {"host": host, "vnic_type": vnic_type}} return self.client.create_port_binding(port_id, data)['binding'] except neutron_client_exc.PortBindingAlreadyExistsClient as e: LOG.warning('Port binding already exists: %s', e) except neutron_client_exc.NeutronClientException as e: raise exception.NetworkException( code=e.status_code, message=e.message) def delete_port_binding(self, port_id, host): try: return self.client.delete_port_binding(port_id, host) except neutron_client_exc.NeutronClientException as e: raise exception.NetworkException( code=e.status_code, message=e.message) def activate_port_binding(self, port_id, host): try: return self.client.activate_port_binding(port_id, host) except neutron_client_exc.NeutronClientException as e: raise exception.NetworkException( code=e.status_code, message=e.message) def show_router(self, router_id): try: return self.client.show_router(router_id).get('router', {}) except neutron_client_exc.NeutronClientException as e: raise exception.NetworkException(code=e.status_code, message=e.message) def router_update_routes(self, router_id, routes): try: router_req_body = {'router': routes} port = self.client.update_router( router_id, router_req_body).get('router', {}) return port except neutron_client_exc.NeutronClientException as e: raise exception.NetworkException(code=e.status_code, message=e.message) def update_subnet(self, subnet_uuid, name): """Update specific subnet for client.""" subnet_req_body = {'subnet': {'name': name}} try: return self.client.update_subnet( subnet_uuid, subnet_req_body).get('subnet', {}) except neutron_client_exc.NeutronClientException as e: raise exception.NetworkException(code=e.status_code, message=e.message) def security_group_list(self, search_opts=None): try: return self.client.list_security_groups(**search_opts) except neutron_client_exc.NeutronClientException as e: raise exception.NetworkException( code=e.status_code, message=e.message) def security_group_create(self, name, description=""): try: return self.client.create_security_group( {'security_group': {"name": name, "description": description}}) except neutron_client_exc.NeutronClientException as e: raise exception.NetworkException( code=e.status_code, message=e.message) def security_group_rule_create(self, parent_group_id, ip_protocol=None, from_port=None, to_port=None, cidr=None, group_id=None, direction="ingress"): request = {"security_group_id": parent_group_id, "protocol": ip_protocol, "remote_ip_prefix": cidr, "remote_group_id": group_id, "direction": direction} if ip_protocol != "icmp": request["port_range_min"] = from_port request["port_range_max"] = to_port try: return self.client.create_security_group_rule( {"security_group_rule": request}) except neutron_client_exc.NeutronClientException as e: raise exception.NetworkException( code=e.status_code, message=e.message) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/network/neutron/constants.py0000664000175000017500000000145000000000000022070 0ustar00zuulzuul00000000000000# Copyright 2014 OpenStack Foundation # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. PROVIDER_NW_EXT = 'Provider Network' PORTBINDING_EXT = 'Port Binding' PORT_STATUS_ERROR = 'ERROR' PORT_STATUS_ACTIVE = 'ACTIVE' VIF_TYPE_BINDING_FAILED = 'binding_failed' ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/network/neutron/neutron_network_plugin.py0000664000175000017500000010506500000000000024704 0ustar00zuulzuul00000000000000# Copyright 2013 OpenStack Foundation # Copyright 2015 Mirantis, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ipaddress import socket from oslo_config import cfg from oslo_log import log from manila.common import constants from manila import exception from manila.i18n import _ from manila import network from manila.network.neutron import api as neutron_api from manila.network.neutron import constants as neutron_constants from manila.share import utils as share_utils from manila import utils LOG = log.getLogger(__name__) neutron_network_plugin_opts = [ cfg.StrOpt( 'neutron_physical_net_name', help="The name of the physical network to determine which net segment " "is used. This opt is optional and will only be used for " "networks configured with multiple segments."), ] neutron_single_network_plugin_opts = [ cfg.StrOpt( 'neutron_net_id', help="Default Neutron network that will be used for share server " "creation. This opt is used only with " "class 'NeutronSingleNetworkPlugin'."), cfg.StrOpt( 'neutron_subnet_id', help="Default Neutron subnet that will be used for share server " "creation. Should be assigned to network defined in opt " "'neutron_net_id'. This opt is used only with " "class 'NeutronSingleNetworkPlugin'."), ] neutron_bind_network_plugin_opts = [ cfg.StrOpt( 'neutron_vnic_type', help="vNIC type used for binding.", choices=['baremetal', 'normal', 'direct', 'direct-physical', 'macvtap'], default='baremetal'), cfg.StrOpt( "neutron_host_id", help="Host ID to be used when creating neutron port. If not set " "host is set to manila-share host by default.", default=socket.gethostname()), ] neutron_binding_profile = [ cfg.ListOpt( "neutron_binding_profiles", help="A list of binding profiles to be used during port binding. This " "option can be used with the NeutronBindNetworkPlugin. The value for " "this option has to be a comma separated list of names that " "correspond to each binding profile. Each binding profile needs to be " "specified as an individual configuration section using the binding " "profile name as the section name."), ] neutron_binding_profile_opts = [ cfg.StrOpt( 'neutron_switch_id', help="Switch ID for binding profile."), cfg.StrOpt( 'neutron_port_id', help="Port ID on the given switch.",), cfg.DictOpt( 'neutron_switch_info', help="Switch label. For example: 'switch_ip: 10.4.30.5'. Multiple " "key-value pairs separated by commas are accepted.",), ] CONF = cfg.CONF class NeutronNetworkPlugin(network.NetworkBaseAPI): def __init__(self, *args, **kwargs): db_driver = kwargs.pop('db_driver', None) config_group_name = kwargs.get('config_group_name', 'DEFAULT') super(NeutronNetworkPlugin, self).__init__(config_group_name=config_group_name, db_driver=db_driver) self._neutron_api = None self._neutron_api_args = args self._neutron_api_kwargs = kwargs self._label = kwargs.pop('label', 'user') CONF.register_opts( neutron_network_plugin_opts, group=self.neutron_api.config_group_name) @property def label(self): return self._label @property @utils.synchronized("instantiate_neutron_api") def neutron_api(self): if not self._neutron_api: self._neutron_api = neutron_api.API(*self._neutron_api_args, **self._neutron_api_kwargs) return self._neutron_api def include_network_info(self, share_network_subnet): """Includes share-network-subnet with plugin specific data.""" self._store_and_get_neutron_net_info(None, share_network_subnet, save_db=False) def _store_and_get_neutron_net_info(self, context, share_network_subnet, save_db=True): is_external_network = self._save_neutron_network_data( context, share_network_subnet, save_db=save_db) self._save_neutron_subnet_data(context, share_network_subnet, save_db=save_db) return is_external_network def allocate_network(self, context, share_server, share_network=None, share_network_subnet=None, **kwargs): """Allocate network resources using given network information. Create neutron ports for a given neutron network and subnet, create manila db records for allocated neutron ports. :param context: RequestContext object :param share_server: share server data :param share_network: share network data :param share_network_subnet: share network subnet data :param kwargs: allocations parameters given by the back-end driver. Supported params: 'count' - how many allocations should be created 'device_owner' - set owner for network allocations :rtype: list of :class: 'dict' """ if not self._has_provider_network_extension(): msg = "%s extension required" % neutron_constants.PROVIDER_NW_EXT raise exception.NetworkBadConfigurationException(reason=msg) self._verify_share_network(share_server['id'], share_network) self._verify_share_network_subnet(share_server['id'], share_network_subnet) is_external_network = self._store_and_get_neutron_net_info( context, share_network_subnet) allocation_count = kwargs.get('count', 1) device_owner = kwargs.get('device_owner', 'share') ports = [] for current_count in range(0, allocation_count): ports.append( self._create_port(context, share_server, share_network, share_network_subnet, device_owner, current_count, is_external_network=is_external_network), ) return ports def manage_network_allocations( self, context, allocations, share_server, share_network=None, share_network_subnet=None): self._verify_share_network_subnet(share_server['id'], share_network_subnet) self._store_and_get_neutron_net_info(context, share_network_subnet) # We begin matching the allocations to known neutron ports and # finally return the non-consumed allocations remaining_allocations = list(allocations) fixed_ip_filter = ('subnet_id=' + share_network_subnet['neutron_subnet_id']) port_list = self.neutron_api.list_ports( network_id=share_network_subnet['neutron_net_id'], device_owner='manila:share', fixed_ips=fixed_ip_filter) selected_ports = self._get_ports_respective_to_ips( remaining_allocations, port_list) LOG.debug("Found matching allocations in Neutron:" " %s", selected_ports) for selected_port in selected_ports: port_dict = { 'id': selected_port['port']['id'], 'share_server_id': share_server['id'], 'ip_address': selected_port['allocation'], 'gateway': share_network_subnet['gateway'], 'mac_address': selected_port['port']['mac_address'], 'status': constants.STATUS_ACTIVE, 'label': self.label, 'network_type': share_network_subnet.get('network_type'), 'segmentation_id': share_network_subnet.get('segmentation_id'), 'ip_version': share_network_subnet['ip_version'], 'cidr': share_network_subnet['cidr'], 'mtu': share_network_subnet['mtu'], } # NOTE(felipe_rodrigues): admin plugin does not have any Manila # share net subnet, its data is from manila configuration file. if self.label != 'admin': port_dict['share_network_subnet_id'] = ( share_network_subnet['id']) # There should not be existing allocations with the same port_id. try: existing_port = self.db.network_allocation_get( context, selected_port['port']['id'], read_deleted=False) except exception.NotFound: pass else: msg = _("There were existing conflicting manila network " "allocations found while trying to manage share " "server %(new_ss)s. The conflicting port belongs to " "share server %(old_ss)s.") % { 'new_ss': share_server['id'], 'old_ss': existing_port['share_server_id'], } raise exception.ManageShareServerError(reason=msg) # If there are previously deleted allocations, we undelete them try: self.db.network_allocation_get( context, selected_port['port']['id'], read_deleted=True) except exception.NotFound: self.db.network_allocation_create(context, port_dict) else: port_dict.pop('id') port_dict.update({ 'deleted_at': None, 'deleted': 'False', }) self.db.network_allocation_update( context, selected_port['port']['id'], port_dict, read_deleted=True) remaining_allocations.remove(selected_port['allocation']) return remaining_allocations def unmanage_network_allocations(self, context, share_server_id): ports = self.db.network_allocations_get_for_share_server( context, share_server_id) for port in ports: self.db.network_allocation_delete(context, port['id']) def _get_ports_respective_to_ips(self, allocations, port_list): selected_ports = [] for port in port_list: for ip in port['fixed_ips']: if ip['ip_address'] in allocations: if not any(port['id'] == p['port']['id'] for p in selected_ports): selected_ports.append( {'port': port, 'allocation': ip['ip_address']}) else: LOG.warning("Port %s has more than one IP that " "matches allocations, please use ports " "respective to only one allocation IP.", port['id']) return selected_ports def _get_matched_ip_address(self, fixed_ips, ip_version): """Get first ip address which matches the specified ip_version.""" for ip in fixed_ips: try: address = ipaddress.ip_address(str(ip['ip_address'])) if address.version == ip_version: return ip['ip_address'] except ValueError: LOG.error("%(address)s isn't a valid ip " "address, omitted.", {'address': ip['ip_address']}) msg = _("Can not find any IP address with configured IP " "version %(version)s in share-network.") % {'version': ip_version} raise exception.NetworkBadConfigurationException(reason=msg) def deallocate_network(self, context, share_server_id, share_network=None, share_network_subnet=None): """Deallocate neutron network resources for the given share server. Delete previously allocated neutron ports, delete manila db records for deleted ports. :param context: RequestContext object :param share_server_id: id of share server :param share_network: share network data :param share_network_subnet: share network subnet data :rtype: None """ ports = self.db.network_allocations_get_for_share_server( context, share_server_id) for port in ports: self._delete_port(context, port) # It may be possible that there are ports existing without a # corresponding manila network allocation entry in the manila db, # because port create request may have been successfully sent to # neutron, but the response, the created port could not be stored # in manila due to unreachable db if share_network_subnet: ports = [] try: ports = self.neutron_api.list_ports( network_id=share_network_subnet['neutron_net_id'], device_owner='manila:share', device_id=share_server_id) except exception.NetworkException: LOG.warning("Failed to list ports using neutron API during " "deallocate_network.") for port in ports: LOG.debug(f"Deleting orphaned port {port['id']} belonging to " f"share server {share_server_id} in neutron " f"network {share_network_subnet['neutron_net_id']}") self._delete_port(context, port, ignore_db=True) def _get_port_create_args(self, share_server, share_network_subnet, device_owner, count=0, is_external_network=False): return { "network_id": share_network_subnet['neutron_net_id'], "subnet_id": share_network_subnet['neutron_subnet_id'], "device_owner": 'manila:' + device_owner, "device_id": share_server.get('id'), "name": share_server.get('id') + '_' + str(count), # NOTE (gouthamr): we create disabled ports with external networks # since the actual ports are not managed by neutron. The ports # neutron creates merely assist in IPAM. "admin_state_up": not is_external_network, } def _create_port(self, context, share_server, share_network, share_network_subnet, device_owner, count=0, is_external_network=False): create_args = self._get_port_create_args( share_server, share_network_subnet, device_owner, count, is_external_network=is_external_network) port = self.neutron_api.create_port( share_network['project_id'], **create_args) if is_external_network: msg = ( f"Port '{port['id']}' is disabled to prevent improper " f"routing on an external network." ) LOG.info(msg) ip_address = self._get_matched_ip_address( port['fixed_ips'], share_network_subnet['ip_version']) port_dict = { 'id': port['id'], 'share_server_id': share_server['id'], 'ip_address': ip_address, 'gateway': share_network_subnet['gateway'], 'mac_address': port['mac_address'], 'status': constants.STATUS_ACTIVE, 'label': self.label, 'network_type': share_network_subnet.get('network_type'), 'segmentation_id': share_network_subnet.get('segmentation_id'), 'ip_version': share_network_subnet['ip_version'], 'cidr': share_network_subnet['cidr'], 'mtu': share_network_subnet['mtu'], } # NOTE(felipe_rodrigues): admin plugin does not have any Manila # share net subnet, its data is from manila configuration file. if self.label != 'admin': port_dict['share_network_subnet_id'] = ( share_network_subnet['id']) return self.db.network_allocation_create(context, port_dict) def _delete_port(self, context, port, ignore_db=False): try: self.neutron_api.delete_port(port['id']) except exception.NetworkException: if not ignore_db: self.db.network_allocation_update( context, port['id'], {'status': constants.STATUS_ERROR}) raise else: if not ignore_db: self.db.network_allocation_delete(context, port['id']) def _has_provider_network_extension(self): extensions = self.neutron_api.list_extensions() return neutron_constants.PROVIDER_NW_EXT in extensions def _is_neutron_multi_segment(self, share_network_subnet, net_info=None): if net_info is None: net_info = self.neutron_api.get_network( share_network_subnet['neutron_net_id']) return 'segments' in net_info def _save_neutron_network_data(self, context, share_network_subnet, save_db=True): net_info = self.neutron_api.get_network( share_network_subnet['neutron_net_id']) segmentation_id = None network_type = None is_external_network = net_info.get('router:external', False) if self._is_neutron_multi_segment(share_network_subnet, net_info): # we have a multi segment network and need to identify the # lowest segment used for binding phy_nets = [] phy = self.neutron_api.configuration.neutron_physical_net_name if not phy: msg = "Cannot identify segment used for binding. Please add " "neutron_physical_net_name in configuration." raise exception.NetworkBadConfigurationException(reason=msg) for segment in net_info['segments']: phy_nets.append(segment['provider:physical_network']) if segment['provider:physical_network'] == phy: segmentation_id = segment['provider:segmentation_id'] network_type = segment['provider:network_type'] if not (segmentation_id and network_type): msg = ("No matching neutron_physical_net_name found for %s " "(found: %s)." % (phy, phy_nets)) raise exception.NetworkBadConfigurationException(reason=msg) else: network_type = net_info.get('provider:network_type') segmentation_id = net_info.get('provider:segmentation_id') provider_nw_dict = { 'network_type': network_type, 'segmentation_id': segmentation_id, 'mtu': net_info.get('mtu'), } share_network_subnet.update(provider_nw_dict) if self.label != 'admin' and save_db: self.db.share_network_subnet_update( context, share_network_subnet['id'], provider_nw_dict) return is_external_network def _save_neutron_subnet_data(self, context, share_network_subnet, save_db=True): subnet_info = self.neutron_api.get_subnet( share_network_subnet['neutron_subnet_id']) subnet_values = { 'cidr': subnet_info['cidr'], 'gateway': subnet_info['gateway_ip'], 'ip_version': subnet_info['ip_version'] } share_network_subnet.update(subnet_values) if self.label != 'admin' and save_db: self.db.share_network_subnet_update( context, share_network_subnet['id'], subnet_values) class NeutronSingleNetworkPlugin(NeutronNetworkPlugin): def __init__(self, *args, **kwargs): super(NeutronSingleNetworkPlugin, self).__init__(*args, **kwargs) CONF.register_opts( neutron_single_network_plugin_opts, group=self.neutron_api.config_group_name) self.net = self.neutron_api.configuration.neutron_net_id self.subnet = self.neutron_api.configuration.neutron_subnet_id self._verify_net_and_subnet() def _select_proper_share_network_subnet(self, context, share_network_subnet): if self.label != 'admin': share_network_subnet = self._update_share_network_net_data( context, share_network_subnet) else: share_network_subnet = { 'project_id': self.neutron_api.admin_project_id, 'neutron_net_id': self.net, 'neutron_subnet_id': self.subnet, } return share_network_subnet def allocate_network(self, context, share_server, share_network=None, share_network_subnet=None, **kwargs): share_network_subnet = self._select_proper_share_network_subnet( context, share_network_subnet) # Update share network project_id info if needed if share_network_subnet.get('project_id', None) is not None: share_network['project_id'] = share_network_subnet.pop( 'project_id') return super(NeutronSingleNetworkPlugin, self).allocate_network( context, share_server, share_network, share_network_subnet, **kwargs) def manage_network_allocations( self, context, allocations, share_server, share_network=None, share_network_subnet=None): share_network_subnet = self._select_proper_share_network_subnet( context, share_network_subnet) # Update share network project_id info if needed if share_network and share_network_subnet.get('project_id', None): share_network['project_id'] = ( share_network_subnet.pop('project_id')) return super(NeutronSingleNetworkPlugin, self).manage_network_allocations( context, allocations, share_server, share_network, share_network_subnet) def _verify_net_and_subnet(self): data = dict(net=self.net, subnet=self.subnet) if self.net and self.subnet: net = self.neutron_api.get_network(self.net) if not (net.get('subnets') and data['subnet'] in net['subnets']): raise exception.NetworkBadConfigurationException( "Subnet '%(subnet)s' does not belong to " "network '%(net)s'." % data) else: raise exception.NetworkBadConfigurationException( "Neutron net and subnet are expected to be both set. " "Got: net=%(net)s and subnet=%(subnet)s." % data) def _update_share_network_net_data(self, context, share_network_subnet): upd = dict() if not share_network_subnet.get('neutron_net_id') == self.net: if share_network_subnet.get('neutron_net_id') is not None: raise exception.NetworkBadConfigurationException( "Using neutron net id different from None or value " "specified in the config is forbidden for " "NeutronSingleNetworkPlugin. Allowed values: (%(net)s, " "None), received value: %(err)s" % { "net": self.net, "err": share_network_subnet.get('neutron_net_id')}) upd['neutron_net_id'] = self.net if not share_network_subnet.get('neutron_subnet_id') == self.subnet: if share_network_subnet.get('neutron_subnet_id') is not None: raise exception.NetworkBadConfigurationException( "Using neutron subnet id different from None or value " "specified in the config is forbidden for " "NeutronSingleNetworkPlugin. Allowed values: (%(snet)s, " "None), received value: %(err)s" % { "snet": self.subnet, "err": share_network_subnet.get('neutron_subnet_id')}) upd['neutron_subnet_id'] = self.subnet if upd: share_network_subnet = self.db.share_network_subnet_update( context, share_network_subnet['id'], upd) return share_network_subnet class NeutronBindNetworkPlugin(NeutronNetworkPlugin): def __init__(self, *args, **kwargs): super(NeutronBindNetworkPlugin, self).__init__(*args, **kwargs) self.binding_profiles = [] CONF.register_opts( neutron_binding_profile, group=self.neutron_api.config_group_name) conf = CONF[self.neutron_api.config_group_name] if conf.neutron_binding_profiles: for profile in conf.neutron_binding_profiles: CONF.register_opts(neutron_binding_profile_opts, group=profile) self.binding_profiles.append(profile) CONF.register_opts( neutron_bind_network_plugin_opts, group=self.neutron_api.config_group_name) self.config = self.neutron_api.configuration def update_network_allocation(self, context, share_server): if self.config.neutron_vnic_type == 'normal': ports = self.db.network_allocations_get_for_share_server( context, share_server['id']) self._wait_for_ports_bind(ports, share_server) return ports @utils.retry(retry_param=exception.NetworkBindException, retries=20) def _wait_for_ports_bind(self, ports, share_server): inactive_ports = [] for port in ports: port = self._neutron_api.show_port(port['id']) if (port['status'] == neutron_constants.PORT_STATUS_ERROR or ('binding:vif_type' in port and port['binding:vif_type'] == neutron_constants.VIF_TYPE_BINDING_FAILED)): msg = _("Port binding %s failed.") % port['id'] raise exception.NetworkException(msg) elif port['status'] != neutron_constants.PORT_STATUS_ACTIVE: LOG.debug("The port %(id)s is in state %(state)s. " "Wait for active state.", { "id": port['id'], "state": port['status']}) inactive_ports.append(port['id']) if len(inactive_ports) == 0: return msg = _("Ports are not fully bound for share server " "'%(s_id)s' (inactive ports: %(ports)s)") % { "s_id": share_server['id'], "ports": inactive_ports} raise exception.NetworkBindException(msg) def _get_port_create_args(self, share_server, share_network_subnet, device_owner, count=0, is_external_network=False): arguments = super( NeutronBindNetworkPlugin, self)._get_port_create_args( share_server, share_network_subnet, device_owner, count, is_external_network=is_external_network ) arguments['host_id'] = self.config.neutron_host_id arguments['binding:vnic_type'] = self.config.neutron_vnic_type if self.binding_profiles: local_links = [] for profile in self.binding_profiles: local_links.append({ 'switch_id': CONF[profile]['neutron_switch_id'], 'port_id': CONF[profile]['neutron_port_id'], 'switch_info': CONF[profile]['neutron_switch_info'], }) arguments['binding:profile'] = { "local_link_information": local_links} return arguments def _save_neutron_network_data(self, context, share_network_subnet, save_db=True): """Store the Neutron network info. In case of dynamic multi segments the segment is determined while binding the port. Therefore this method will return for multi segments network without storing network information (apart from mtu). Instead, multi segments network will wait until ports are bound and then store network information (see allocate_network()). """ if self._is_neutron_multi_segment(share_network_subnet): # In case of dynamic multi segment the segment is determined while # binding the port, only mtu is known and already needed self._save_neutron_network_mtu(context, share_network_subnet, save_db=save_db) return super(NeutronBindNetworkPlugin, self)._save_neutron_network_data( context, share_network_subnet, save_db=save_db) def _save_neutron_network_mtu(self, context, share_network_subnet, save_db=True): """Store the Neutron network mtu. In case of dynamic multi segments only the mtu needs storing before binding the port. """ net_info = self.neutron_api.get_network( share_network_subnet['neutron_net_id']) mtu_dict = { 'mtu': net_info['mtu'], } share_network_subnet.update(mtu_dict) if self.label != 'admin' and save_db: self.db.share_network_subnet_update( context, share_network_subnet['id'], mtu_dict) def allocate_network(self, context, share_server, share_network=None, share_network_subnet=None, **kwargs): ports = super(NeutronBindNetworkPlugin, self).allocate_network( context, share_server, share_network, share_network_subnet, **kwargs) # If vnic type is 'normal' we expect a neutron agent to bind the # ports. This action requires a vnic to be spawned by the driver. # Therefore we do not wait for the port binding here, but # return the unbound ports and expect the share manager to call # update_network_allocation after the share server was created, in # order to update the ports with the correct binding. if self.config.neutron_vnic_type != 'normal': self._wait_for_ports_bind(ports, share_server) if self._is_neutron_multi_segment(share_network_subnet): # update segment information after port bind super(NeutronBindNetworkPlugin, self)._save_neutron_network_data(context, share_network_subnet) for num, port in enumerate(ports): port_info = { 'network_type': share_network_subnet['network_type'], 'segmentation_id': share_network_subnet['segmentation_id'], 'cidr': share_network_subnet['cidr'], 'ip_version': share_network_subnet['ip_version'], } ports[num] = self.db.network_allocation_update( context, port['id'], port_info) return ports @utils.retry(retry_param=exception.NetworkException, retries=20) def _wait_for_network_segment(self, share_server, host): network_id = share_server['share_network_subnet']['neutron_net_id'] network = self.neutron_api.get_network(network_id) for segment in network['segments']: if segment['provider:physical_network'] == ( self.config.neutron_physical_net_name): return segment['provider:segmentation_id'] msg = _('Network segment not found on host %s') % host raise exception.NetworkException(msg) def extend_network_allocations(self, context, share_server): """Extend network to target host. This will create port bindings on target host without activating them. If network segment does not exist on target host, it will be created. :return: list of port bindings with new segmentation id on target host """ vnic_type = self.config.neutron_vnic_type host_id = self.config.neutron_host_id active_port_bindings = ( self.db.network_allocations_get_for_share_server( context, share_server['id'], label='user')) if len(active_port_bindings) == 0: raise exception.NetworkException( 'Can not extend network with no active bindings') # Create port binding on destination backend. It's safe to call neutron # api bind_port_to_host if the port is already bound to destination # host. for port in active_port_bindings: self.neutron_api.bind_port_to_host(port['id'], host_id, vnic_type) # Wait for network segment to be created on destination host. vlan = self._wait_for_network_segment(share_server, host_id) for port in active_port_bindings: port['segmentation_id'] = vlan return active_port_bindings def delete_extended_allocations(self, context, share_server): host_id = self.config.neutron_host_id ports = self.db.network_allocations_get_for_share_server( context, share_server['id'], label='user') for port in ports: try: self.neutron_api.delete_port_binding(port['id'], host_id) except exception.NetworkException as e: msg = 'Failed to delete port binding on port %{port}s: %{err}s' LOG.warning(msg, {'port': port['id'], 'err': e}) def cutover_network_allocations(self, context, src_share_server): src_host = share_utils.extract_host(src_share_server['host'], 'host') dest_host = self.config.neutron_host_id ports = self.db.network_allocations_get_for_share_server( context, src_share_server['id'], label='user') for port in ports: self.neutron_api.activate_port_binding(port['id'], dest_host) self.neutron_api.delete_port_binding(port['id'], src_host) return ports class NeutronBindSingleNetworkPlugin(NeutronSingleNetworkPlugin, NeutronBindNetworkPlugin): pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/network/standalone_network_plugin.py0000664000175000017500000004040100000000000023640 0ustar00zuulzuul00000000000000# Copyright 2015 Mirantis, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import netaddr from oslo_config import cfg from oslo_log import log from manila.common import constants from manila import exception from manila.i18n import _ from manila import network from manila import utils standalone_network_plugin_opts = [ cfg.StrOpt( 'standalone_network_plugin_gateway', help="Gateway address that should be used. Required."), cfg.StrOpt( 'standalone_network_plugin_mask', help="Network mask that will be used. Can be either decimal " "like '24' or binary like '255.255.255.0'. Required."), cfg.StrOpt( 'standalone_network_plugin_network_type', help="Network type, such as 'flat', 'vlan', 'vxlan' or 'gre'. " "Empty value is alias for 'flat'. " "It will be assigned to share-network and share drivers will be " "able to use this for network interfaces within provisioned " "share servers. Optional.", choices=['flat', 'vlan', 'vxlan', 'gre']), cfg.IntOpt( 'standalone_network_plugin_segmentation_id', help="Set it if network has segmentation (VLAN, VXLAN, etc...). " "It will be assigned to share-network and share drivers will be " "able to use this for network interfaces within provisioned " "share servers. Optional. Example: 1001"), cfg.ListOpt( 'standalone_network_plugin_allowed_ip_ranges', help="Can be IP address, range of IP addresses or list of addresses " "or ranges. Contains addresses from IP network that are allowed " "to be used. If empty, then will be assumed that all host " "addresses from network can be used. Optional. " "Examples: 10.0.0.10 or 10.0.0.10-10.0.0.20 or " "10.0.0.10-10.0.0.20,10.0.0.30-10.0.0.40,10.0.0.50"), cfg.IntOpt( 'standalone_network_plugin_mtu', default=1500, help="Maximum Transmission Unit (MTU) value of the network. Default " "value is 1500."), ] CONF = cfg.CONF LOG = log.getLogger(__name__) class StandaloneNetworkPlugin(network.NetworkBaseAPI): """Standalone network plugin for share drivers. This network plugin can be used with any network platform. It can serve flat networks as well as segmented. It does not require some specific network services in OpenStack like the Neutron plugin. The only thing that plugin does is reservation and release of IP addresses from some network. """ def __init__(self, config_group_name=None, db_driver=None, label='user'): self.config_group_name = config_group_name or 'DEFAULT' super(StandaloneNetworkPlugin, self).__init__(config_group_name=self.config_group_name, db_driver=db_driver) CONF.register_opts( standalone_network_plugin_opts, group=self.config_group_name) self.configuration = getattr(CONF, self.config_group_name, CONF) self._set_persistent_network_data() self._label = label LOG.debug( "\nStandalone network plugin data for config group " "'%(config_group)s': \n" "IP version - %(ip_version)s\n" "Used network - %(net)s\n" "Used gateway - %(gateway)s\n" "Used network type - %(network_type)s\n" "Used segmentation ID - %(segmentation_id)s\n" "Allowed CIDRs - %(cidrs)s\n" "Original allowed IP ranges - %(ip_ranges)s\n" "Reserved IP addresses - %(reserved)s\n", dict( config_group=self.config_group_name, ip_version=self.ip_version, net=str(self.net), gateway=self.gateway, network_type=self.network_type, segmentation_id=self.segmentation_id, cidrs=self.allowed_cidrs, ip_ranges=self.allowed_ip_ranges, reserved=self.reserved_addresses)) @property def label(self): return self._label def _set_persistent_network_data(self): """Sets persistent data for whole plugin.""" # NOTE(tommylikehu): Standalone plugin could only support # either IPv4 or IPv6, so if both network_plugin_ipv4_enabled # and network_plugin_ipv6_enabled are configured True # we would only support IPv6. ipv4_enabled = getattr(self.configuration, 'network_plugin_ipv4_enabled', None) ipv6_enabled = getattr(self.configuration, 'network_plugin_ipv6_enabled', None) if ipv4_enabled: ip_version = 4 if ipv6_enabled: ip_version = 6 if ipv4_enabled and ipv6_enabled: LOG.warning("Only IPv6 is enabled, although both " "'network_plugin_ipv4_enabled' and " "'network_plugin_ipv6_enabled' are " "configured True.") self.network_type = ( self.configuration.standalone_network_plugin_network_type) self.segmentation_id = ( self.configuration.standalone_network_plugin_segmentation_id) self.gateway = self.configuration.standalone_network_plugin_gateway self.mask = self.configuration.standalone_network_plugin_mask self.allowed_ip_ranges = ( self.configuration.standalone_network_plugin_allowed_ip_ranges) self.ip_version = ip_version self.net = self._get_network() self.allowed_cidrs = self._get_list_of_allowed_addresses() self.reserved_addresses = ( str(self.net.network), self.gateway, str(self.net.broadcast)) self.mtu = self.configuration.standalone_network_plugin_mtu def _get_network(self): """Returns IPNetwork object calculated from gateway and netmask.""" if not isinstance(self.gateway, str): raise exception.NetworkBadConfigurationException( _("Configuration option 'standalone_network_plugin_gateway' " "is required and has improper value '%s'.") % self.gateway) if not isinstance(self.mask, str): raise exception.NetworkBadConfigurationException( _("Configuration option 'standalone_network_plugin_mask' is " "required and has improper value '%s'.") % self.mask) try: return netaddr.IPNetwork(self.gateway + '/' + self.mask) except netaddr.AddrFormatError as e: raise exception.NetworkBadConfigurationException( reason=e) def _get_list_of_allowed_addresses(self): """Returns list of CIDRs that can be used for getting IP addresses. Reads information provided via configuration, such as gateway, netmask, segmentation ID and allowed IP ranges, then performs validation of provided data. :returns: list of CIDRs as text types. :raises: exception.NetworkBadConfigurationException """ cidrs = [] if self.allowed_ip_ranges: for ip_range in self.allowed_ip_ranges: ip_range_start = ip_range_end = None if utils.is_valid_ip_address(ip_range, self.ip_version): ip_range_start = ip_range_end = ip_range elif '-' in ip_range: ip_range_list = ip_range.split('-') if len(ip_range_list) == 2: ip_range_start = ip_range_list[0] ip_range_end = ip_range_list[1] for ip in ip_range_list: utils.is_valid_ip_address(ip, self.ip_version) else: msg = _("Wrong value for IP range " "'%s' was provided.") % ip_range raise exception.NetworkBadConfigurationException( reason=msg) else: msg = _("Config option " "'standalone_network_plugin_allowed_ip_ranges' " "has incorrect value " "'%s'.") % self.allowed_ip_ranges raise exception.NetworkBadConfigurationException( reason=msg) range_instance = netaddr.IPRange(ip_range_start, ip_range_end) if range_instance not in self.net: data = dict( range=str(range_instance), net=str(self.net), gateway=self.gateway, netmask=self.net.netmask) msg = _("One of provided allowed IP ranges ('%(range)s') " "does not fit network '%(net)s' combined from " "gateway '%(gateway)s' and netmask " "'%(netmask)s'.") % data raise exception.NetworkBadConfigurationException( reason=msg) cidrs.extend( str(cidr) for cidr in range_instance.cidrs()) else: if self.net.version != self.ip_version: msg = _("Configured invalid IP version '%(conf_v)s', network " "has version ""'%(net_v)s'") % dict( conf_v=self.ip_version, net_v=self.net.version) raise exception.NetworkBadConfigurationException(reason=msg) cidrs.append(str(self.net)) return cidrs def _get_available_ips(self, context, amount): """Returns IP addresses from allowed IP range if there are unused IPs. :returns: IP addresses as list of text types :raises: exception.NetworkBadConfigurationException """ ips = [] if amount < 1: return ips iterator = netaddr.iter_unique_ips(*self.allowed_cidrs) for ip in iterator: ip = str(ip) if (ip in self.reserved_addresses or self.db.network_allocations_get_by_ip_address(context, ip)): continue else: ips.append(ip) if len(ips) == amount: return ips msg = _("No available IP addresses left in CIDRs %(cidrs)s. " "Requested amount of IPs to be provided '%(amount)s', " "available only '%(available)s'.") % { 'cidrs': self.allowed_cidrs, 'amount': amount, 'available': len(ips)} raise exception.NetworkBadConfigurationException(reason=msg) def include_network_info(self, share_network_subnet): """Includes share-network-subnet with plugin specific data.""" self._save_network_info(None, share_network_subnet, save_db=False) def _save_network_info(self, context, share_network_subnet, save_db=True): """Update share-network-subnet with plugin specific data.""" data = { 'network_type': self.network_type, 'segmentation_id': self.segmentation_id, 'cidr': str(self.net.cidr), 'gateway': str(self.gateway), 'ip_version': self.ip_version, 'mtu': self.mtu, } share_network_subnet.update(data) if self.label != 'admin' and save_db: self.db.share_network_subnet_update( context, share_network_subnet['id'], data) @utils.synchronized( "allocate_network_for_standalone_network_plugin", external=True) def allocate_network(self, context, share_server, share_network=None, share_network_subnet=None, **kwargs): """Allocate network resources using one dedicated network. This one has interprocess lock to avoid concurrency in creation of share servers with same IP addresses using different share-networks. """ allocation_count = kwargs.get('count', 1) if self.label != 'admin': self._verify_share_network(share_server['id'], share_network_subnet) else: share_network_subnet = share_network_subnet or {} self._save_network_info(context, share_network_subnet) allocations = [] ip_addresses = self._get_available_ips(context, allocation_count) for ip_address in ip_addresses: data = { 'share_server_id': share_server['id'], 'ip_address': ip_address, 'status': constants.STATUS_ACTIVE, 'label': self.label, 'network_type': share_network_subnet['network_type'], 'segmentation_id': share_network_subnet['segmentation_id'], 'cidr': share_network_subnet['cidr'], 'gateway': share_network_subnet['gateway'], 'ip_version': share_network_subnet['ip_version'], 'mtu': share_network_subnet['mtu'], } if self.label != 'admin': data['share_network_subnet_id'] = ( share_network_subnet['id']) allocations.append( self.db.network_allocation_create(context, data)) return allocations def deallocate_network(self, context, share_server_id, share_network=None, share_network_subnet=None): """Deallocate network resources for share server.""" allocations = self.db.network_allocations_get_for_share_server( context, share_server_id) for allocation in allocations: self.db.network_allocation_delete(context, allocation['id']) def unmanage_network_allocations(self, context, share_server_id): self.deallocate_network(context, share_server_id) def manage_network_allocations(self, context, allocations, share_server, share_network=None, share_network_subnet=None): if self.label != 'admin': self._verify_share_network_subnet(share_server['id'], share_network_subnet) else: share_network_subnet = share_network_subnet or {} self._save_network_info(context, share_network_subnet) # We begin matching the allocations to known neutron ports and # finally return the non-consumed allocations remaining_allocations = list(allocations) ips = [netaddr.IPAddress(allocation) for allocation in remaining_allocations] cidrs = [netaddr.IPNetwork(cidr) for cidr in self.allowed_cidrs] selected_allocations = [] for ip in ips: if any(ip in cidr for cidr in cidrs): allocation = str(ip) selected_allocations.append(allocation) for allocation in selected_allocations: data = { 'share_server_id': share_server['id'], 'ip_address': allocation, 'status': constants.STATUS_ACTIVE, 'label': self.label, 'network_type': share_network_subnet['network_type'], 'segmentation_id': share_network_subnet['segmentation_id'], 'cidr': share_network_subnet['cidr'], 'gateway': share_network_subnet['gateway'], 'ip_version': share_network_subnet['ip_version'], 'mtu': share_network_subnet['mtu'], } if self.label != 'admin': data['share_network_subnet_id'] = ( share_network_subnet['id']) self.db.network_allocation_create(context, data) remaining_allocations.remove(allocation) return remaining_allocations ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/opts.py0000664000175000017500000002336000000000000015662 0ustar00zuulzuul00000000000000# Copyright (c) 2014 SUSE Linux Products GmbH. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. __all__ = [ 'list_opts' ] import copy import itertools import manila.api.common import manila.api.middleware.auth import manila.common.config import manila.compute import manila.compute.nova import manila.coordination import manila.data.drivers.nfs import manila.data.helper import manila.data.manager import manila.db.api import manila.db.base import manila.exception import manila.image import manila.image.glance import manila.keymgr.barbican import manila.message.api import manila.network import manila.network.linux.interface import manila.network.neutron.api import manila.network.neutron.neutron_network_plugin import manila.network.standalone_network_plugin import manila.quota import manila.scheduler.drivers.base import manila.scheduler.drivers.simple import manila.scheduler.host_manager import manila.scheduler.manager import manila.scheduler.scheduler_options import manila.scheduler.weighers.capacity import manila.scheduler.weighers.pool import manila.service import manila.share.api import manila.share.driver import manila.share.drivers.cephfs.driver import manila.share.drivers.container.driver import manila.share.drivers.container.storage_helper import manila.share.drivers.dell_emc.driver import manila.share.drivers.dell_emc.plugins.powermax.connection import manila.share.drivers.dell_emc.plugins.powerscale.powerscale import manila.share.drivers.generic import manila.share.drivers.glusterfs import manila.share.drivers.glusterfs.common import manila.share.drivers.glusterfs.layout import manila.share.drivers.glusterfs.layout_directory import manila.share.drivers.glusterfs.layout_volume import manila.share.drivers.hdfs.hdfs_native import manila.share.drivers.hitachi.hnas.driver import manila.share.drivers.hitachi.hsp.driver import manila.share.drivers.hpe.hpe_3par_driver import manila.share.drivers.huawei.huawei_nas import manila.share.drivers.ibm.gpfs import manila.share.drivers.infinidat.infinibox import manila.share.drivers.infortrend.driver import manila.share.drivers.inspur.as13000.as13000_nas import manila.share.drivers.inspur.instorage.instorage import manila.share.drivers.lvm import manila.share.drivers.macrosan.macrosan_nas import manila.share.drivers.maprfs.maprfs_native import manila.share.drivers.netapp.options import manila.share.drivers.nexenta.options import manila.share.drivers.purestorage.flashblade import manila.share.drivers.qnap.qnap import manila.share.drivers.quobyte.quobyte import manila.share.drivers.service_instance import manila.share.drivers.tegile.tegile import manila.share.drivers.vastdata.driver import manila.share.drivers.windows.service_instance import manila.share.drivers.windows.winrm_helper import manila.share.drivers.zfsonlinux.driver import manila.share.drivers.zfssa.zfssashare import manila.share.drivers_private_data import manila.share.hook import manila.share.manager import manila.volume import manila.volume.cinder import manila.wsgi.eventlet_server # List of *all* options in [DEFAULT] namespace of manila. # Any new option list or option needs to be registered here. _global_opt_lists = [ # Keep list alphabetically sorted manila.api.common.api_common_opts, [manila.api.middleware.auth.use_forwarded_for_opt], manila.common.config.core_opts, manila.common.config.debug_opts, manila.common.config.global_opts, manila.compute._compute_opts, manila.coordination.coordination_opts, manila.data.drivers.nfs.nfsbackup_service_opts, manila.data.helper.data_helper_opts, manila.data.manager.backup_opts, manila.data.manager.data_opts, manila.db.api.db_opts, [manila.db.base.db_driver_opt], manila.exception.exc_log_opts, manila.image._glance_opts, manila.message.api.messages_opts, manila.network.linux.interface.OPTS, manila.network.network_opts, manila.network.network_base_opts, manila.network.neutron.neutron_network_plugin. neutron_network_plugin_opts, manila.network.neutron.neutron_network_plugin. neutron_single_network_plugin_opts, manila.network.neutron.neutron_network_plugin. neutron_bind_network_plugin_opts, manila.network.neutron.neutron_network_plugin. neutron_binding_profile, manila.network.neutron.neutron_network_plugin. neutron_binding_profile_opts, manila.network.standalone_network_plugin.standalone_network_plugin_opts, manila.scheduler.drivers.base.scheduler_driver_opts, manila.scheduler.host_manager.host_manager_opts, [manila.scheduler.manager.scheduler_driver_opt], [manila.scheduler.scheduler_options.scheduler_json_config_location_opt], manila.scheduler.drivers.simple.simple_scheduler_opts, manila.scheduler.weighers.capacity.capacity_weight_opts, manila.scheduler.weighers.pool.pool_weight_opts, manila.service.service_opts, manila.share.api.share_api_opts, manila.share.driver.ganesha_opts, manila.share.driver.share_opts, manila.share.driver.ssh_opts, manila.share.drivers_private_data.private_data_opts, manila.share.drivers.cephfs.driver.cephfs_opts, manila.share.drivers.container.driver.container_opts, manila.share.drivers.container.storage_helper.lv_opts, manila.share.drivers.dell_emc.driver.EMC_NAS_OPTS, manila.share.drivers.dell_emc.plugins.powermax.connection.POWERMAX_OPTS, manila.share.drivers.generic.share_opts, manila.share.drivers.glusterfs.common.glusterfs_common_opts, manila.share.drivers.glusterfs.GlusterfsManilaShare_opts, manila.share.drivers.glusterfs.layout.glusterfs_share_layout_opts, manila.share.drivers.glusterfs.layout_directory. glusterfs_directory_mapped_opts, manila.share.drivers.glusterfs.layout_volume.glusterfs_volume_mapped_opts, manila.share.drivers.hdfs.hdfs_native.hdfs_native_share_opts, manila.share.drivers.hitachi.hnas.driver.hitachi_hnas_opts, manila.share.drivers.hitachi.hsp.driver.hitachi_hsp_opts, manila.share.drivers.hpe.hpe_3par_driver.HPE3PAR_OPTS, manila.share.drivers.huawei.huawei_nas.huawei_opts, manila.share.drivers.ibm.gpfs.gpfs_share_opts, manila.share.drivers.infinidat.infinibox.infinidat_auth_opts, manila.share.drivers.infinidat.infinibox.infinidat_connection_opts, manila.share.drivers.infinidat.infinibox.infinidat_general_opts, manila.share.drivers.infortrend.driver.infortrend_nas_opts, manila.share.drivers.inspur.as13000.as13000_nas.inspur_as13000_opts, manila.share.drivers.inspur.instorage.instorage.instorage_opts, manila.share.drivers.macrosan.macrosan_nas.macrosan_opts, manila.share.drivers.maprfs.maprfs_native.maprfs_native_share_opts, manila.share.drivers.lvm.share_opts, manila.share.drivers.netapp.options.netapp_proxy_opts, manila.share.drivers.netapp.options.netapp_connection_opts, manila.share.drivers.netapp.options.netapp_transport_opts, manila.share.drivers.netapp.options.netapp_basicauth_opts, manila.share.drivers.netapp.options.netapp_certificateauth_opts, manila.share.drivers.netapp.options.netapp_provisioning_opts, manila.share.drivers.netapp.options.netapp_data_motion_opts, manila.share.drivers.netapp.options.netapp_backup_opts, manila.share.drivers.nexenta.options.nexenta_connection_opts, manila.share.drivers.nexenta.options.nexenta_dataset_opts, manila.share.drivers.nexenta.options.nexenta_nfs_opts, manila.share.drivers.purestorage.flashblade.flashblade_auth_opts, manila.share.drivers.purestorage.flashblade.flashblade_extra_opts, manila.share.drivers.purestorage.flashblade.flashblade_connection_opts, manila.share.drivers.qnap.qnap.qnap_manila_opts, manila.share.drivers.quobyte.quobyte.quobyte_manila_share_opts, manila.share.drivers.service_instance.common_opts, manila.share.drivers.service_instance.no_share_servers_handling_mode_opts, manila.share.drivers.service_instance.share_servers_handling_mode_opts, manila.share.drivers.tegile.tegile.tegile_opts, manila.share.drivers.windows.service_instance.windows_share_server_opts, manila.share.drivers.windows.winrm_helper.winrm_opts, manila.share.drivers.zfsonlinux.driver.zfsonlinux_opts, manila.share.drivers.zfssa.zfssashare.ZFSSA_OPTS, manila.share.hook.hook_options, manila.share.manager.share_manager_opts, manila.volume._volume_opts, manila.wsgi.eventlet_server.socket_opts, manila.share.drivers.vastdata.driver.OPTS, ] _opts = [ (None, list(itertools.chain(*_global_opt_lists))), (manila.volume.cinder.CINDER_GROUP, list(itertools.chain(manila.volume.cinder.cinder_opts))), (manila.compute.nova.NOVA_GROUP, list(itertools.chain(manila.compute.nova.nova_opts))), (manila.network.neutron.api.NEUTRON_GROUP, list(itertools.chain(manila.network.neutron.api.neutron_opts))), (manila.image.glance.GLANCE_GROUP, list(itertools.chain(manila.image.glance.glance_opts))), (manila.quota.QUOTA_GROUP, list(itertools.chain(manila.quota.quota_opts))), ] _opts.extend(manila.network.neutron.api.list_opts()) _opts.extend(manila.compute.nova.list_opts()) _opts.extend(manila.image.glance.list_opts()) _opts.extend(manila.keymgr.barbican.list_opts()) _opts.extend(manila.volume.cinder.list_opts()) def list_opts(): """Return a list of oslo.config options available in Manila.""" return [(m, copy.deepcopy(o)) for m, o in _opts] ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315601.885672 manila-21.0.0/manila/policies/0000775000175000017500000000000000000000000016126 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/policies/__init__.py0000664000175000017500000000656000000000000020246 0ustar00zuulzuul00000000000000# Copyright (c) 2017 Huawei Technologies Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import itertools from manila.policies import availability_zone from manila.policies import base from manila.policies import message from manila.policies import quota_class_set from manila.policies import quota_set from manila.policies import resource_lock from manila.policies import scheduler_stats from manila.policies import security_service from manila.policies import service from manila.policies import share_access from manila.policies import share_access_metadata from manila.policies import share_backup from manila.policies import share_export_location from manila.policies import share_group from manila.policies import share_group_snapshot from manila.policies import share_group_type from manila.policies import share_group_types_spec from manila.policies import share_instance from manila.policies import share_instance_export_location from manila.policies import share_network from manila.policies import share_network_subnet from manila.policies import share_replica from manila.policies import share_replica_export_location from manila.policies import share_server from manila.policies import share_snapshot from manila.policies import share_snapshot_export_location from manila.policies import share_snapshot_instance from manila.policies import share_snapshot_instance_export_location from manila.policies import share_transfer from manila.policies import share_type from manila.policies import share_types_extra_spec from manila.policies import shares def list_rules(): return itertools.chain( base.list_rules(), availability_zone.list_rules(), scheduler_stats.list_rules(), shares.list_rules(), share_instance_export_location.list_rules(), share_type.list_rules(), share_types_extra_spec.list_rules(), share_snapshot.list_rules(), share_snapshot_export_location.list_rules(), share_snapshot_instance.list_rules(), share_snapshot_instance_export_location.list_rules(), share_server.list_rules(), service.list_rules(), quota_set.list_rules(), quota_class_set.list_rules(), resource_lock.list_rules(), share_group_types_spec.list_rules(), share_group_type.list_rules(), share_group_snapshot.list_rules(), share_group.list_rules(), share_replica.list_rules(), share_replica_export_location.list_rules(), share_network.list_rules(), share_network_subnet.list_rules(), security_service.list_rules(), share_export_location.list_rules(), share_instance.list_rules(), message.list_rules(), share_access.list_rules(), share_access_metadata.list_rules(), share_transfer.list_rules(), share_backup.list_rules(), ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/policies/availability_zone.py0000664000175000017500000000317100000000000022207 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import versionutils from oslo_policy import policy from manila.policies import base BASE_POLICY_NAME = 'availability_zone:%s' DEPRECATED_REASON = """ The availability zone API now supports scope and default roles. """ deprecated_get_availability_zone = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'index', check_str=base.RULE_DEFAULT, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) availability_zone_policies = [ policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'index', check_str=base.ADMIN_OR_PROJECT_READER, scope_types=['project'], description="Get all storage availability zones.", operations=[ { 'method': 'GET', 'path': '/os-availability-zone', }, { 'method': 'GET', 'path': '/availability-zone', }, ], deprecated_rule=deprecated_get_availability_zone ), ] def list_rules(): return availability_zone_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/policies/base.py0000664000175000017500000001372000000000000017415 0ustar00zuulzuul00000000000000# Copyright (c) 2017 Huawei Technologies Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import versionutils from oslo_policy import policy # This check string is reserved for actions that require the highest level of # authorization across projects to operate the deployment. They're allowed to # create, read, update, or delete any system-wide resource such as # share types, share group types, storage pools, etc. They can also operate on # project-specific resources where applicable (e.g., cleaning up shares or # snapshots). ADMIN = 'rule:context_is_admin' # This check string is reserved for actions performed by a "service" or the # "admin" super user. Service users act on behalf of other users and can # perform privileged service-specific actions. ADMIN_OR_SERVICE = 'rule:admin_or_service_api' # This check string is the primary use case for typical end-users, who are # working with resources that belong within a project (e.g., managing shares or # share replicas). These users don't require all the authorization that # administrators typically have. PROJECT_MEMBER = 'rule:project-member' # This check string should only be used to protect read-only project-specific # resources. It should not be used to protect APIs that make writable changes # (e.g., updating a share or snapshot). This persona is useful for someone who # needs access for auditing or even support. PROJECT_READER = 'rule:project-reader' # This check string should used to protect user specific resources such as # resource locks, or access rule restrictions. Users are expendable # resources, so ensure that other resources can also perform actions to # avoid orphan resources when users are decommissioned. OWNER_USER = 'rule:owner-user' ADMIN_OR_PROJECT_MEMBER = f'({ADMIN}) or ({PROJECT_MEMBER})' ADMIN_OR_PROJECT_READER = f'({ADMIN}) or ({PROJECT_READER})' ADMIN_OR_SERVICE_OR_PROJECT_READER = (f'({ADMIN_OR_SERVICE}) or ' f'({PROJECT_READER})') ADMIN_OR_SERVICE_OR_PROJECT_MEMBER = (f'({ADMIN_OR_SERVICE}) or ' f'({PROJECT_MEMBER})') ADMIN_OR_SERVICE_OR_OWNER_USER = f'({OWNER_USER} or {ADMIN_OR_SERVICE})' # Old, "unscoped", deprecated check strings to be removed. Do not use these # in default RBAC any longer. These can be removed after "enforce_scope" # defaults to True in oslo.policy RULE_ADMIN_OR_OWNER = 'rule:admin_or_owner' RULE_ADMIN_OR_OWNER_USER = 'rule:admin_or_owner_user' RULE_ADMIN_API = 'rule:admin_api' RULE_DEFAULT = 'rule:default' deprecation_msg = ("The `context_is_admin` check is superseded by more " "specific check strings that consume project " "scope attributes from keystone tokens.") DEPRECATED_CONTEXT_IS_ADMIN = policy.DeprecatedRule( name='context_is_admin', check_str='role:admin', deprecated_reason=deprecation_msg, deprecated_since=versionutils.deprecated.WALLABY ) rules = [ # ***Default OpenStack scoped personas*** # policy.RuleDefault( name='project-admin', check_str='role:admin and ' 'project_id:%(project_id)s', description='Project scoped Administrator', scope_types=['project']), policy.RuleDefault( name='project-member', check_str='role:member and ' 'project_id:%(project_id)s', description='Project scoped Member', scope_types=['project']), policy.RuleDefault( name='project-reader', check_str='role:reader and ' 'project_id:%(project_id)s', description='Project scoped Reader', scope_types=['project']), policy.RuleDefault( name='owner-user', check_str='user_id:%(user_id)s and ' 'project_id:%(project_id)s', description='Project scoped user that owns a user specific resource', scope_types=['project']), policy.RuleDefault( "admin_or_service_api", "role:admin or role:service", description="A service user or an administrator user.", scope_types=['project'], ), # ***Special personas for Manila*** # policy.RuleDefault( name='context_is_admin', check_str='role:admin', description='Privileged users checked via "context.is_admin"', deprecated_rule=DEPRECATED_CONTEXT_IS_ADMIN, scope_types=['project']), policy.RuleDefault( name='context_is_host_admin', check_str='role:admin and ' 'project_id:%(project_id)s', description='Privileged user who can select host during scheduling', scope_types=['project']), # ***Legacy/deprecated unscoped rules*** # # can be removed after "enforce_scope" defaults to True in oslo.policy policy.RuleDefault( name='admin_or_owner', check_str='is_admin:True or project_id:%(project_id)s', description='Administrator or Member of the project'), policy.RuleDefault( name='admin_or_owner_user', check_str='is_admin:True or ' 'project_id:%(project_id)s and user_id:%(user_id)s', description='Administrator or owner user of a resource'), policy.RuleDefault( name='default', check_str=RULE_ADMIN_OR_OWNER, description='Default rule for most non-Admin APIs'), policy.RuleDefault( name='admin_api', check_str='is_admin:True', description='Default rule for most Admin APIs.'), ] def list_rules(): return rules ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/policies/message.py0000664000175000017500000000537300000000000020134 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import versionutils from oslo_policy import policy from manila.policies import base BASE_POLICY_NAME = 'message:%s' DEPRECATED_REASON = """ The messages API now supports scope and default roles. """ deprecated_message_get = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'get', check_str=base.RULE_DEFAULT, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_message_get_all = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'get_all', check_str=base.RULE_DEFAULT, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_message_delete = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'delete', check_str=base.RULE_DEFAULT, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) message_policies = [ policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'get', check_str=base.ADMIN_OR_PROJECT_READER, scope_types=['project'], description="Get details of a given message.", operations=[ { 'method': 'GET', 'path': '/messages/{message_id}' } ], deprecated_rule=deprecated_message_get ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'get_all', check_str=base.ADMIN_OR_PROJECT_READER, scope_types=['project'], description="Get all messages.", operations=[ { 'method': 'GET', 'path': '/messages' }, { 'method': 'GET', 'path': '/messages?{query}' } ], deprecated_rule=deprecated_message_get_all ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'delete', check_str=base.ADMIN_OR_PROJECT_MEMBER, scope_types=['project'], description="Delete a message.", operations=[ { 'method': 'DELETE', 'path': '/messages/{message_id}' } ], deprecated_rule=deprecated_message_delete ), ] def list_rules(): return message_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/policies/quota_class_set.py0000664000175000017500000000450600000000000021676 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import versionutils from oslo_policy import policy from manila.policies import base BASE_POLICY_NAME = 'quota_class_set:%s' DEPRECATED_REASON = """ The quota class API now supports scope and default roles. """ deprecated_quota_class_update = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'update', check_str=base.RULE_ADMIN_API, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_quota_class_show = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'show', check_str=base.RULE_DEFAULT, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) quota_class_set_policies = [ policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'update', check_str=base.ADMIN, scope_types=['project'], description="Update quota class.", operations=[ { 'method': 'PUT', 'path': '/quota-class-sets/{class_name}' }, { 'method': 'PUT', 'path': '/os-quota-class-sets/{class_name}' } ], deprecated_rule=deprecated_quota_class_update ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'show', check_str=base.ADMIN_OR_PROJECT_READER, scope_types=['project'], description="Get quota class.", operations=[ { 'method': 'GET', 'path': '/quota-class-sets/{class_name}' }, { 'method': 'GET', 'path': '/os-quota-class-sets/{class_name}' } ], deprecated_rule=deprecated_quota_class_show ), ] def list_rules(): return quota_class_set_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/policies/quota_set.py0000664000175000017500000000776000000000000020516 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import versionutils from oslo_policy import policy from manila.policies import base BASE_POLICY_NAME = 'quota_set:%s' DEPRECATED_REASON = """ The quota API now supports scope and default roles. """ deprecated_quota_update = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'update', check_str=base.RULE_ADMIN_API, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_quota_show = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'show', check_str=base.RULE_DEFAULT, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_quota_delete = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'delete', check_str=base.RULE_ADMIN_API, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) quota_set_policies = [ policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'update', check_str=base.ADMIN, scope_types=['project'], description=("Update the quotas for a project/user and/or share " "type."), operations=[ { 'method': 'PUT', 'path': '/quota-sets/{project_id}' }, { 'method': 'PUT', 'path': '/quota-sets/{project_id}?user_id={user_id}' }, { 'method': 'PUT', 'path': '/quota-sets/{project_id}?share_type={share_type_id}' }, { 'method': 'PUT', 'path': '/os-quota-sets/{project_id}' }, { 'method': 'PUT', 'path': '/os-quota-sets/{project_id}?user_id={user_id}' }, ], deprecated_rule=deprecated_quota_update ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'show', check_str=base.ADMIN_OR_PROJECT_READER, scope_types=['project'], description="List the quotas for a project/user.", operations=[ { 'method': 'GET', 'path': '/quota-sets/{project_id}/defaults' }, { 'method': 'GET', 'path': '/os-quota-sets/{project_id}/defaults' } ], deprecated_rule=deprecated_quota_show ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'delete', check_str=base.ADMIN, scope_types=['project'], description=("Delete quota for a project/user or " "project/share-type. The quota will revert back to " "default (Admin only)."), operations=[ { 'method': 'DELETE', 'path': '/quota-sets/{project_id}' }, { 'method': 'DELETE', 'path': '/quota-sets/{project_id}?user_id={user_id}' }, { 'method': 'DELETE', 'path': '/quota-sets/{project_id}?share_type={share_type_id}' }, { 'method': 'DELETE', 'path': '/os-quota-sets/{project_id}' }, { 'method': 'DELETE', 'path': '/os-quota-sets/{project_id}?user_id={user_id}' }, ], deprecated_rule=deprecated_quota_delete ), ] def list_rules(): return quota_set_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/policies/resource_lock.py0000664000175000017500000001350600000000000021344 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from manila.policies import base BASE_POLICY_NAME = 'resource_lock:%s' DEPRECATED_REASON = """ The resource lock API now supports scope and default roles. """ deprecated_lock_get = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'get', check_str=base.RULE_DEFAULT, deprecated_reason=DEPRECATED_REASON, deprecated_since='2023.2/Bobcat', ) deprecated_lock_get_all = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'get_all', check_str=base.RULE_DEFAULT, deprecated_reason=DEPRECATED_REASON, deprecated_since='2023.2/Bobcat', ) deprecated_lock_get_all_projects = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'get_all_projects', check_str=base.RULE_ADMIN_API, deprecated_reason=DEPRECATED_REASON, deprecated_since='2023.2/Bobcat', ) deprecated_lock_create = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'create', check_str=base.RULE_DEFAULT, deprecated_reason=DEPRECATED_REASON, deprecated_since='2023.2/Bobcat' ) deprecated_lock_update = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'update', check_str=base.RULE_ADMIN_OR_OWNER_USER, deprecated_reason=DEPRECATED_REASON, deprecated_since='2023.2/Bobcat', ) deprecated_lock_delete = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'delete', check_str=base.RULE_ADMIN_OR_OWNER_USER, deprecated_reason=DEPRECATED_REASON, deprecated_since='2023.2/Bobcat', ) deprecated_bypass_locked_show_action = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'bypass_locked_show_action', check_str=base.RULE_ADMIN_OR_OWNER_USER, deprecated_reason=DEPRECATED_REASON, deprecated_since='2023.2/Bobcat', ) # We anticipate bypassing is desirable only for resource visibility locks. # Without a bypass, the lock would have to be set aside each time the lock # owner wants to view the resource. lock_policies = [ policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'get', check_str=base.ADMIN_OR_SERVICE_OR_PROJECT_READER, scope_types=['project'], description="Get details of a given resource lock.", operations=[ { 'method': 'GET', 'path': '/resource-locks/{lock_id}' } ], deprecated_rule=deprecated_lock_get, ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'get_all', check_str=base.ADMIN_OR_SERVICE_OR_PROJECT_READER, scope_types=['project'], description="Get all resource locks.", operations=[ { 'method': 'GET', 'path': '/resource-locks' }, { 'method': 'GET', 'path': '/resource-locks?{query}' } ], deprecated_rule=deprecated_lock_get_all, ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'get_all_projects', check_str=base.ADMIN_OR_SERVICE, scope_types=['project'], description="Get resource locks from all project namespaces.", operations=[ { 'method': 'GET', 'path': '/resource-locks?all_projects=1' }, { 'method': 'GET', 'path': '/resource-locks?all_projects=1&' 'project_id={project_id}' } ], deprecated_rule=deprecated_lock_get_all_projects, ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'create', check_str=base.ADMIN_OR_SERVICE_OR_PROJECT_MEMBER, scope_types=['project'], description="Create a resource lock.", operations=[ { 'method': 'POST', 'path': '/resource-locks' } ], deprecated_rule=deprecated_lock_create, ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'update', check_str=base.ADMIN_OR_SERVICE_OR_OWNER_USER, scope_types=['project'], description="Update a resource lock.", operations=[ { 'method': 'PUT', 'path': '/resource-locks/{lock_id}' } ], deprecated_rule=deprecated_lock_update, ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'delete', check_str=base.ADMIN_OR_SERVICE_OR_OWNER_USER, scope_types=['project'], description="Delete a resource lock.", operations=[ { 'method': 'DELETE', 'path': '/resource-locks/{lock_id}' } ], deprecated_rule=deprecated_lock_delete, ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'bypass_locked_show_action', check_str=base.ADMIN_OR_SERVICE_OR_OWNER_USER, scope_types=['project'], description="Bypass a visibility lock placed in a resource.", operations=[ { 'method': 'GET', 'path': '/share-access-rules/{share_access_id}' }, { 'method': 'GET', 'path': ('/share-access-rules?share_id={share_id}' '&key1=value1&key2=value2') }, ], deprecated_rule=deprecated_bypass_locked_show_action, ), ] def list_rules(): return lock_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/policies/scheduler_stats.py0000664000175000017500000000437200000000000021702 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import versionutils from oslo_policy import policy from manila.policies import base BASE_POLICY_NAME = 'scheduler_stats:pools:%s' DEPRECATED_REASON = """ The storage pool statistics API now support system scope and default roles. """ deprecated_pool_index = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'index', check_str=base.RULE_ADMIN_API, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_pool_detail = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'detail', check_str=base.RULE_ADMIN_API, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) scheduler_stats_policies = [ policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'index', check_str=base.ADMIN, scope_types=['project'], description="Get information regarding backends " "(and storage pools) known to the scheduler.", operations=[ { 'method': 'GET', 'path': '/scheduler-stats/pools?{query}' } ], deprecated_rule=deprecated_pool_index ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'detail', check_str=base.ADMIN, scope_types=['project'], description="Get detailed information regarding backends " "(and storage pools) known to the scheduler.", operations=[ { 'method': 'GET', 'path': '/scheduler-stats/pools/detail?{query}' }, ], deprecated_rule=deprecated_pool_detail ), ] def list_rules(): return scheduler_stats_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/policies/security_service.py0000664000175000017500000001303300000000000022067 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import versionutils from oslo_policy import policy from manila.policies import base BASE_POLICY_NAME = 'security_service:%s' DEPRECATED_REASON = """ The security service API now supports scope and default roles. """ deprecated_security_service_create = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'create', check_str=base.RULE_DEFAULT, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_security_service_show = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'show', check_str=base.RULE_DEFAULT, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_security_service_detail = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'detail', check_str=base.RULE_DEFAULT, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_security_service_index = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'index', check_str=base.RULE_DEFAULT, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_security_service_update = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'update', check_str=base.RULE_DEFAULT, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_security_service_delete = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'delete', check_str=base.RULE_DEFAULT, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_security_service_get_all = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'get_all_security_services', check_str=base.RULE_ADMIN_API, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) security_service_policies = [ policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'create', check_str=base.ADMIN_OR_PROJECT_MEMBER, scope_types=['project'], description="Create security service.", operations=[ { 'method': 'POST', 'path': '/security-services' } ], deprecated_rule=deprecated_security_service_create ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'show', check_str=base.ADMIN_OR_PROJECT_READER, scope_types=['project'], description="Get details of a security service.", operations=[ { 'method': 'GET', 'path': '/security-services/{security_service_id}' } ], deprecated_rule=deprecated_security_service_show ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'detail', check_str=base.ADMIN_OR_PROJECT_READER, scope_types=['project'], description="Get details of all security services.", operations=[ { 'method': 'GET', 'path': '/security-services/detail?{query}' }, ], deprecated_rule=deprecated_security_service_detail ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'index', check_str=base.ADMIN_OR_PROJECT_READER, scope_types=['project'], description="Get all security services under a project.", operations=[ { 'method': 'GET', 'path': '/security-services?{query}' } ], deprecated_rule=deprecated_security_service_index ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'update', check_str=base.ADMIN_OR_PROJECT_MEMBER, scope_types=['project'], description="Update a security service.", operations=[ { 'method': 'PUT', 'path': '/security-services/{security_service_id}', } ], deprecated_rule=deprecated_security_service_update ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'delete', check_str=base.ADMIN_OR_PROJECT_MEMBER, scope_types=['project'], description="Delete a security service.", operations=[ { 'method': 'DELETE', 'path': '/security-services/{security_service_id}' } ], deprecated_rule=deprecated_security_service_delete ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'get_all_security_services', check_str=base.ADMIN, scope_types=['project'], description="Get security services of all projects.", operations=[ { 'method': 'GET', 'path': '/security-services?all_tenants=1' }, { 'method': 'GET', 'path': '/security-services/detail?all_tenants=1' } ], deprecated_rule=deprecated_security_service_get_all ), ] def list_rules(): return security_service_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/policies/service.py0000664000175000017500000000612000000000000020137 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import versionutils from oslo_policy import policy from manila.policies import base BASE_POLICY_NAME = 'service:%s' DEPRECATED_REASON = """ The service API now supports scope and default roles. """ deprecated_service_index = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'index', check_str=base.RULE_ADMIN_API, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_service_update = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'update', check_str=base.RULE_ADMIN_API, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_service_ensure = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'ensure_shares', check_str=base.RULE_ADMIN_API, deprecated_reason=DEPRECATED_REASON, deprecated_since='2024.2/Dalmatian' ) service_policies = [ policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'index', check_str=base.ADMIN, scope_types=['project'], description="Return a list of all running services.", operations=[ { 'method': 'GET', 'path': '/os-services?{query}', }, { 'method': 'GET', 'path': '/services?{query}', } ], deprecated_rule=deprecated_service_index ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'update', check_str=base.ADMIN, scope_types=['project'], description="Enable/Disable scheduling for a service.", operations=[ { 'method': 'PUT', 'path': '/os-services/disable', }, { 'method': 'PUT', 'path': '/os-services/enable', }, { 'method': 'PUT', 'path': '/services/disable', }, { 'method': 'PUT', 'path': '/services/enable', }, ], deprecated_rule=deprecated_service_update ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'ensure_shares', check_str=base.ADMIN, scope_types=['project'], description="Run ensure shares for a manila-share binary.", operations=[ { 'method': 'POST', 'path': '/services/ensure', } ], deprecated_rule=deprecated_service_ensure ), ] def list_rules(): return service_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/policies/share_access.py0000664000175000017500000000561600000000000021133 0ustar00zuulzuul00000000000000# Copyright 2018 Huawei Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import versionutils from oslo_policy import policy from manila.policies import base BASE_POLICY_NAME = 'share_access_rule:%s' DEPRECATED_REASON = """ The share access rule API now supports scope and default roles. """ deprecated_access_rule_get = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'get', check_str=base.RULE_DEFAULT, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_access_rule_index = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'index', check_str=base.RULE_DEFAULT, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_access_rule_update = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'update', check_str=base.RULE_DEFAULT, deprecated_reason=DEPRECATED_REASON, deprecated_since='2025.1/Epoxy' ) share_access_rule_policies = [ policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'get', check_str=base.ADMIN_OR_PROJECT_READER, scope_types=['project'], description="Get details of a share access rule.", operations=[ { 'method': 'GET', 'path': '/share-access-rules/{share_access_id}' } ], deprecated_rule=deprecated_access_rule_get ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'index', check_str=base.ADMIN_OR_PROJECT_READER, scope_types=['project'], description="List access rules of a given share.", operations=[ { 'method': 'GET', 'path': ('/share-access-rules?share_id={share_id}' '&key1=value1&key2=value2') } ], deprecated_rule=deprecated_access_rule_index ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'update', check_str=base.ADMIN_OR_PROJECT_MEMBER, scope_types=['project'], description="Update access rules of a given share.", operations=[ { 'method': 'PUT', 'path': '/share-access-rules/{share_access_id}' } ], deprecated_rule=deprecated_access_rule_update ), ] def list_rules(): return share_access_rule_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/policies/share_access_metadata.py0000664000175000017500000000446700000000000022776 0ustar00zuulzuul00000000000000# Copyright 2018 Huawei Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import versionutils from oslo_policy import policy from manila.policies import base BASE_POLICY_NAME = 'share_access_metadata:%s' DEPRECATED_REASON = """ The share access metadata API now support system scope and default roles. """ deprecated_access_metadata_update = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'update', check_str=base.RULE_DEFAULT, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_access_metadata_delete = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'delete', check_str=base.RULE_DEFAULT, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) share_access_rule_metadata_policies = [ policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'update', check_str=base.ADMIN_OR_PROJECT_MEMBER, scope_types=['project'], description="Set metadata for a share access rule.", operations=[ { 'method': 'PUT', 'path': '/share-access-rules/{share_access_id}/metadata' } ], deprecated_rule=deprecated_access_metadata_update ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'delete', check_str=base.ADMIN_OR_PROJECT_MEMBER, scope_types=['project'], description="Delete metadata for a share access rule.", operations=[ { 'method': 'DELETE', 'path': '/share-access-rules/{share_access_id}/metadata/{key}' } ], deprecated_rule=deprecated_access_metadata_delete ), ] def list_rules(): return share_access_rule_metadata_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/policies/share_backup.py0000664000175000017500000001374700000000000021143 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from manila.policies import base BASE_POLICY_NAME = 'share_backup:%s' DEPRECATED_REASON = """ The share backup API now supports system scope and default roles. """ deprecated_backup_create = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'create', check_str=base.RULE_DEFAULT, deprecated_reason=DEPRECATED_REASON, deprecated_since='2023.2/Bobcat' ) deprecated_backup_get = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'get', check_str=base.RULE_DEFAULT, deprecated_reason=DEPRECATED_REASON, deprecated_since='2023.2/Bobcat', ) deprecated_backup_get_all = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'get_all', check_str=base.RULE_DEFAULT, deprecated_reason=DEPRECATED_REASON, deprecated_since='2023.2/Bobcat', ) deprecated_get_all_project = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'get_all_project', check_str=base.RULE_ADMIN_API, deprecated_reason=DEPRECATED_REASON, deprecated_since='2023.2/Bobcat', ) deprecated_backup_restore = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'restore', check_str=base.RULE_ADMIN_OR_OWNER, deprecated_reason=DEPRECATED_REASON, deprecated_since='2023.2/Bobcat', ) deprecated_backup_update = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'update', check_str=base.RULE_ADMIN_OR_OWNER, deprecated_reason=DEPRECATED_REASON, deprecated_since='2023.2/Bobcat', ) deprecated_backup_delete = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'delete', check_str=base.RULE_ADMIN_OR_OWNER, deprecated_reason=DEPRECATED_REASON, deprecated_since='2023.2/Bobcat', ) deprecated_backup_reset_status = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'reset_status', check_str=base.RULE_ADMIN_API, deprecated_reason=DEPRECATED_REASON, deprecated_since='2023.2/Bobcat', ) share_backup_policies = [ policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'create', check_str=base.ADMIN_OR_PROJECT_MEMBER, scope_types=['project'], description="Create share backup.", operations=[ { 'method': 'POST', 'path': '/share-backups' } ], deprecated_rule=deprecated_backup_create, ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'get', check_str=base.ADMIN_OR_PROJECT_READER, scope_types=['project'], description="Get share backup.", operations=[ { 'method': 'GET', 'path': '/share-backups/{backup_id}' } ], deprecated_rule=deprecated_backup_get, ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'get_all', check_str=base.ADMIN_OR_PROJECT_READER, scope_types=['project'], description="Get all share backups.", operations=[ { 'method': 'GET', 'path': '/share-backups' }, { 'method': 'GET', 'path': '/share-backups/detail' }, { 'method': 'GET', 'path': '/share-backups/detail?share_id=(share_id}', }, ], deprecated_rule=deprecated_backup_get_all, ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'get_all_project', check_str=base.ADMIN, scope_types=['project'], description="Get share backups of all projects.", operations=[ { 'method': 'GET', 'path': '/share-backups?all_tenants=1' }, { 'method': 'GET', 'path': '/share-backups/detail?all_tenants=1' } ], deprecated_rule=deprecated_get_all_project ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'restore', check_str=base.ADMIN_OR_PROJECT_MEMBER, scope_types=['project'], description="Restore a share backup.", operations=[ { 'method': 'POST', 'path': '/share-backups/{backup_id}/action' } ], deprecated_rule=deprecated_backup_restore, ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'reset_status', check_str=base.ADMIN, scope_types=['project'], description="Reset status.", operations=[ { 'method': 'POST', 'path': '/share-backups/{backup_id}/action', } ], deprecated_rule=deprecated_backup_reset_status ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'update', check_str=base.ADMIN_OR_PROJECT_MEMBER, scope_types=['project'], description="Update a share backup.", operations=[ { 'method': 'PUT', 'path': '/share-backups/{backup_id}', } ], deprecated_rule=deprecated_backup_update, ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'delete', check_str=base.ADMIN_OR_PROJECT_MEMBER, scope_types=['project'], description="Force Delete a share backup.", operations=[ { 'method': 'DELETE', 'path': '/share-backups/{backup_id}' } ], deprecated_rule=deprecated_backup_delete, ), ] def list_rules(): return share_backup_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/policies/share_export_location.py0000664000175000017500000001344300000000000023100 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import versionutils from oslo_policy import policy from manila.policies import base BASE_POLICY_NAME = 'share_export_location:%s' DEPRECATED_REASON = """ The share export location API now support system scope and default roles. """ deprecated_export_location_index = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'index', check_str=base.RULE_DEFAULT, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_export_location_show = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'show', check_str=base.RULE_DEFAULT, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_update_export_location_metadata = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'update_metadata', check_str=base.RULE_DEFAULT, deprecated_reason=DEPRECATED_REASON, deprecated_since='2024.2/Dalmatian' ) deprecated_delete_export_location_metadata = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'delete_metadata', check_str=base.RULE_DEFAULT, deprecated_reason=DEPRECATED_REASON, deprecated_since='2024.2/Dalmatian' ) deprecated_get_export_location_metadata = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'get_metadata', check_str=base.RULE_DEFAULT, deprecated_reason=DEPRECATED_REASON, deprecated_since='2024.2/Dalmatian' ) deprecated_update_admin_only_metadata = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'update_admin_only_metadata', check_str=base.RULE_ADMIN_API, deprecated_reason=DEPRECATED_REASON, deprecated_since="2024.2/Dalmatian" ) share_export_location_policies = [ policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'index', check_str=base.ADMIN_OR_PROJECT_READER, scope_types=['project'], description="Get all export locations of a given share.", operations=[ { 'method': 'GET', 'path': '/shares/{share_id}/export_locations', } ], deprecated_rule=deprecated_export_location_index ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'show', check_str=base.ADMIN_OR_PROJECT_READER, scope_types=['project'], description="Get details about the requested export location.", operations=[ { 'method': 'GET', 'path': ('/shares/{share_id}/export_locations/' '{export_location_id}'), } ], deprecated_rule=deprecated_export_location_show ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'update_metadata', check_str=base.ADMIN_OR_PROJECT_MEMBER, scope_types=['project'], description="Update share export location metadata.", operations=[ { 'method': 'PUT', 'path': ('/shares/{share_id}/export_locations/' '{export_location_id}/metadata'), }, { 'method': 'POST', 'path': ('/shares/{share_id}/export_locations/' '{export_location_id}/metadata/{key}') }, { 'method': 'POST', 'path': ('/shares/{share_id}/export_locations/' '{export_location_id}/metadata'), }, ], deprecated_rule=deprecated_update_export_location_metadata ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'delete_metadata', check_str=base.ADMIN_OR_PROJECT_MEMBER, scope_types=['project'], description="Delete share export location metadata", operations=[ { 'method': 'DELETE', 'path': ('/shares/{share_id}/export_locations/' '{export_location_id}/metadata/{key}') }, ], deprecated_rule=deprecated_delete_export_location_metadata ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'get_metadata', check_str=base.ADMIN_OR_PROJECT_READER, scope_types=['project'], description='Get share export location metadata', operations=[ { 'method': "GET", 'path': ('/shares/{share_id}/export_locations/' '{export_location_id}/metadata') }, { 'method': 'GET', 'path': ('/shares/{share_id}/export_locations/' '{export_location_id}/metadata/{key}') }, ], deprecated_rule=deprecated_get_export_location_metadata ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'update_admin_only_metadata', check_str=base.ADMIN, scope_types=['project'], description=( "Update metadata items that are considered \"admin only\" " "by the service."), operations=[ { 'method': 'PUT', 'path': '/shares/{share_id}/export_locations/' '{export_location_id}/metadata', } ], deprecated_rule=deprecated_update_admin_only_metadata ), ] def list_rules(): return share_export_location_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/policies/share_group.py0000664000175000017500000001252100000000000021017 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import versionutils from oslo_policy import policy from manila.policies import base BASE_POLICY_NAME = 'share_group:%s' DEPRECATED_REASON = """ The share group API now supports scope and default roles. """ deprecated_share_group_create = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'create', check_str=base.RULE_DEFAULT, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_share_group_get = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'get', check_str=base.RULE_DEFAULT, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_share_group_get_all = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'get_all', check_str=base.RULE_DEFAULT, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_share_group_update = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'update', check_str=base.RULE_DEFAULT, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_share_group_delete = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'delete', check_str=base.RULE_DEFAULT, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_share_group_force_delete = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'force_delete', check_str=base.RULE_ADMIN_API, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_share_group_reset_status = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'reset_status', check_str=base.RULE_ADMIN_API, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) share_group_policies = [ policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'create', check_str=base.ADMIN_OR_PROJECT_MEMBER, scope_types=['project'], description="Create share group.", operations=[ { 'method': 'POST', 'path': '/share-groups' } ], deprecated_rule=deprecated_share_group_create ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'get', check_str=base.ADMIN_OR_PROJECT_READER, scope_types=['project'], description="Get details of a share group.", operations=[ { 'method': 'GET', 'path': '/share-groups/{share_group_id}' } ], deprecated_rule=deprecated_share_group_get ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'get_all', check_str=base.ADMIN_OR_PROJECT_READER, scope_types=['project'], description="Get all share groups.", operations=[ { 'method': 'GET', 'path': '/share-groups?{query}' }, { 'method': 'GET', 'path': '/share-groups/detail?{query}' } ], deprecated_rule=deprecated_share_group_get_all ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'update', check_str=base.ADMIN_OR_PROJECT_MEMBER, scope_types=['project'], description="Update share group.", operations=[ { 'method': 'PUT', 'path': '/share-groups/{share_group_id}' } ], deprecated_rule=deprecated_share_group_update ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'delete', check_str=base.ADMIN_OR_PROJECT_MEMBER, scope_types=['project'], description="Delete share group.", operations=[ { 'method': 'DELETE', 'path': '/share-groups/{share_group_id}' } ], deprecated_rule=deprecated_share_group_delete ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'force_delete', check_str=base.ADMIN, scope_types=['project'], description="Force delete a share group.", operations=[ { 'method': 'POST', 'path': '/share-groups/{share_group_id}/action' } ], deprecated_rule=deprecated_share_group_force_delete ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'reset_status', check_str=base.ADMIN, scope_types=['project'], description="Reset share group's status.", operations=[ { 'method': 'POST', 'path': '/share-groups/{share_group_id}/action' } ], deprecated_rule=deprecated_share_group_reset_status ), ] def list_rules(): return share_group_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/policies/share_group_snapshot.py0000664000175000017500000001322600000000000022741 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import versionutils from oslo_policy import policy from manila.policies import base BASE_POLICY_NAME = 'share_group_snapshot:%s' DEPRECATED_REASON = """ The share group snapshots API now supports scope and default roles. """ deprecated_group_snapshot_create = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'create', check_str=base.RULE_DEFAULT, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_group_snapshot_get = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'get', check_str=base.RULE_DEFAULT, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_group_snapshot_get_all = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'get_all', check_str=base.RULE_DEFAULT, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_group_snapshot_update = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'update', check_str=base.RULE_DEFAULT, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_group_snapshot_delete = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'delete', check_str=base.RULE_DEFAULT, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_group_snapshot_force_delete = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'force_delete', check_str=base.RULE_ADMIN_API, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_group_snapshot_reset_status = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'reset_status', check_str=base.RULE_ADMIN_API, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) share_group_snapshot_policies = [ policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'create', check_str=base.ADMIN_OR_PROJECT_MEMBER, scope_types=['project'], description="Create a new share group snapshot.", operations=[ { 'method': 'POST', 'path': '/share-group-snapshots' } ], deprecated_rule=deprecated_group_snapshot_create ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'get', check_str=base.ADMIN_OR_PROJECT_READER, scope_types=['project'], description="Get details of a share group snapshot.", operations=[ { 'method': 'GET', 'path': '/share-group-snapshots/{share_group_snapshot_id}' } ], deprecated_rule=deprecated_group_snapshot_get ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'get_all', check_str=base.ADMIN_OR_PROJECT_READER, scope_types=['project'], description="Get all share group snapshots.", operations=[ { 'method': 'GET', 'path': '/share-group-snapshots?{query}' }, { 'method': 'GET', 'path': '/share-group-snapshots/detail?{query}' } ], deprecated_rule=deprecated_group_snapshot_get_all ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'update', check_str=base.ADMIN_OR_PROJECT_MEMBER, scope_types=['project'], description="Update a share group snapshot.", operations=[ { 'method': 'PUT', 'path': '/share-group-snapshots/{share_group_snapshot_id}' } ], deprecated_rule=deprecated_group_snapshot_update ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'delete', check_str=base.ADMIN_OR_PROJECT_MEMBER, scope_types=['project'], description="Delete a share group snapshot.", operations=[ { 'method': 'DELETE', 'path': '/share-group-snapshots/{share_group_snapshot_id}' } ], deprecated_rule=deprecated_group_snapshot_delete ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'force_delete', check_str=base.ADMIN, scope_types=['project'], description="Force delete a share group snapshot.", operations=[ { 'method': 'POST', 'path': '/share-group-snapshots/{share_group_snapshot_id}/' 'action' } ], deprecated_rule=deprecated_group_snapshot_force_delete ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'reset_status', check_str=base.ADMIN, scope_types=['project'], description="Reset a share group snapshot's status.", operations=[ { 'method': 'POST', 'path': '/share-group-snapshots/{share_group_snapshot_id}/' 'action' } ], deprecated_rule=deprecated_group_snapshot_reset_status ), ] def list_rules(): return share_group_snapshot_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/policies/share_group_type.py0000664000175000017500000001425000000000000022061 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import versionutils from oslo_policy import policy from manila.policies import base BASE_POLICY_NAME = 'share_group_type:%s' DEPRECATED_REASON = """ The share group type API now supports scope and default roles. """ deprecated_share_group_type_create = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'create', check_str=base.RULE_ADMIN_API, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_share_group_type_index = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'index', check_str=base.RULE_DEFAULT, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_share_group_type_show = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'show', check_str=base.RULE_DEFAULT, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_share_group_type_get_default = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'default', check_str=base.RULE_DEFAULT, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_share_group_type_delete = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'delete', check_str=base.RULE_ADMIN_API, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_share_group_type_project_access = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'list_project_access', check_str=base.RULE_ADMIN_API, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_share_group_type_add_project = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'add_project_access', check_str=base.RULE_ADMIN_API, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_share_group_type_remove_project = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'remove_project_access', check_str=base.RULE_ADMIN_API, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) share_group_type_policies = [ policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'create', check_str=base.ADMIN, scope_types=['project'], description="Create a new share group type.", operations=[ { 'method': 'POST', 'path': '/share-group-types', } ], deprecated_rule=deprecated_share_group_type_create ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'index', check_str=base.ADMIN_OR_PROJECT_READER, scope_types=['project'], description="Get the list of share group types.", operations=[ { 'method': 'GET', 'path': '/share-group-types?is_public=all', } ], deprecated_rule=deprecated_share_group_type_index ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'show', check_str=base.ADMIN_OR_PROJECT_READER, scope_types=['project'], description="Get details regarding the specified share group type.", operations=[ { 'method': 'GET', 'path': '/share-group-types/{share_group_type_id}', } ], deprecated_rule=deprecated_share_group_type_show ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'default', check_str=base.ADMIN_OR_PROJECT_READER, scope_types=['project'], description="Get the default share group type.", operations=[ { 'method': 'GET', 'path': '/share-group-types/default', } ], deprecated_rule=deprecated_share_group_type_get_default ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'delete', check_str=base.ADMIN, scope_types=['project'], description="Delete an existing group type.", operations=[ { 'method': 'DELETE', 'path': '/share-group-types/{share_group_type_id}' } ], deprecated_rule=deprecated_share_group_type_delete ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'list_project_access', check_str=base.ADMIN, scope_types=['project'], description="Get project access by share group type.", operations=[ { 'method': 'GET', 'path': '/share-group-types/{share_group_type_id}/access', } ], deprecated_rule=deprecated_share_group_type_project_access ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'add_project_access', check_str=base.ADMIN, scope_types=['project'], description="Allow project to use the share group type.", operations=[ { 'method': 'POST', 'path': '/share-group-types/{share_group_type_id}/action', } ], deprecated_rule=deprecated_share_group_type_add_project ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'remove_project_access', check_str=base.ADMIN, scope_types=['project'], description="Deny project access to use the share group type.", operations=[ { 'method': 'POST', 'path': '/share-group-types/{share_group_type_id}/action', } ], deprecated_rule=deprecated_share_group_type_remove_project ), ] def list_rules(): return share_group_type_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/policies/share_group_types_spec.py0000664000175000017500000001035300000000000023256 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import versionutils from oslo_policy import policy from manila.policies import base BASE_POLICY_NAME = 'share_group_types_spec:%s' DEPRECATED_REASON = """ The share group type specs API now support system scope and default roles. """ deprecated_group_type_spec_create = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'create', check_str=base.RULE_ADMIN_API, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_group_type_spec_index = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'index', check_str=base.RULE_ADMIN_API, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_group_type_spec_show = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'show', check_str=base.RULE_ADMIN_API, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_group_type_spec_update = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'update', check_str=base.RULE_ADMIN_API, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_group_type_spec_delete = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'delete', check_str=base.RULE_ADMIN_API, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) share_group_types_spec_policies = [ policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'create', check_str=base.ADMIN, scope_types=['project'], description="Create share group type specs.", operations=[ { 'method': 'POST', 'path': '/share-group-types/{share_group_type_id}/group-specs' } ], deprecated_rule=deprecated_group_type_spec_create ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'index', check_str=base.ADMIN, scope_types=['project'], description="Get share group type specs.", operations=[ { 'method': 'GET', 'path': '/share-group-types/{share_group_type_id}/group-specs', } ], deprecated_rule=deprecated_group_type_spec_index ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'show', check_str=base.ADMIN, scope_types=['project'], description="Get details of a share group type spec.", operations=[ { 'method': 'GET', 'path': ('/share-group-types/{share_group_type_id}/' 'group-specs/{key}'), } ], deprecated_rule=deprecated_group_type_spec_show ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'update', check_str=base.ADMIN, scope_types=['project'], description="Update a share group type spec.", operations=[ { 'method': 'PUT', 'path': ('/share-group-types/{share_group_type_id}' '/group-specs/{key}'), } ], deprecated_rule=deprecated_group_type_spec_update ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'delete', check_str=base.ADMIN, scope_types=['project'], description="Delete a share group type spec.", operations=[ { 'method': 'DELETE', 'path': ('/share-group-types/{share_group_type_id}/' 'group-specs/{key}'), } ], deprecated_rule=deprecated_group_type_spec_delete ), ] def list_rules(): return share_group_types_spec_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/policies/share_instance.py0000664000175000017500000000700000000000000021463 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import versionutils from oslo_policy import policy from manila.policies import base BASE_POLICY_NAME = 'share_instance:%s' DEPRECATED_REASON = """ The share instances API now supports scope and default roles. """ deprecated_share_instances_index = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'index', check_str=base.RULE_ADMIN_API, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_share_instance_show = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'show', check_str=base.RULE_ADMIN_API, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_share_instance_force_delete = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'force_delete', check_str=base.RULE_ADMIN_API, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_share_instance_reset_status = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'reset_status', check_str=base.RULE_ADMIN_API, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) shares_policies = [ policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'index', check_str=base.ADMIN, scope_types=['project'], description="Get all share instances.", operations=[ { 'method': 'GET', 'path': '/share_instances', }, { 'method': 'GET', 'path': '/share_instances?{query}', } ], deprecated_rule=deprecated_share_instances_index ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'show', check_str=base.ADMIN, scope_types=['project'], description="Get details of a share instance.", operations=[ { 'method': 'GET', 'path': '/share_instances/{share_instance_id}' }, ], deprecated_rule=deprecated_share_instance_show ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'force_delete', check_str=base.ADMIN, scope_types=['project'], description="Force delete a share instance.", operations=[ { 'method': 'POST', 'path': '/share_instances/{share_instance_id}/action', } ], deprecated_rule=deprecated_share_instance_force_delete ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'reset_status', check_str=base.ADMIN, scope_types=['project'], description="Reset share instance's status.", operations=[ { 'method': 'POST', 'path': '/share_instances/{share_instance_id}/action', } ], deprecated_rule=deprecated_share_instance_reset_status ), ] def list_rules(): return shares_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/policies/share_instance_export_location.py0000664000175000017500000000465300000000000024767 0ustar00zuulzuul00000000000000# Copyright (c) 2017 Huawei Technologies Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import versionutils from oslo_policy import policy from manila.policies import base BASE_POLICY_NAME = 'share_instance_export_location:%s' DEPRECATED_REASON = """ The share instance export location API now supports scope and default roles. """ deprecated_instance_export_location_index = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'index', check_str=base.RULE_ADMIN_API, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_instance_export_location_show = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'show', check_str=base.RULE_ADMIN_API, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) share_export_location_policies = [ policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'index', check_str=base.ADMIN, scope_types=['project'], description='Return data about the requested export location.', operations=[ { 'method': 'POST', 'path': ('/share_instances/{share_instance_id}/' 'export_locations'), } ], deprecated_rule=deprecated_instance_export_location_index ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'show', check_str=base.ADMIN, scope_types=['project'], description='Return data about the requested export location.', operations=[ { 'method': 'GET', 'path': ('/share_instances/{share_instance_id}/' 'export_locations/{export_location_id}'), } ], deprecated_rule=deprecated_instance_export_location_show ), ] def list_rules(): return share_export_location_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/policies/share_network.py0000664000175000017500000002507600000000000021365 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import versionutils from oslo_policy import policy from manila.policies import base BASE_POLICY_NAME = 'share_network:%s' DEPRECATED_REASON = """ The share network API now support system scope and default roles. """ deprecated_share_network_create = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'create', check_str=base.RULE_DEFAULT, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_share_network_show = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'show', check_str=base.RULE_DEFAULT, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_share_network_index = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'index', check_str=base.RULE_DEFAULT, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_share_network_detail = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'detail', check_str=base.RULE_DEFAULT, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_share_network_update = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'update', check_str=base.RULE_DEFAULT, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_share_network_delete = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'delete', check_str=base.RULE_DEFAULT, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_share_network_add_security_service = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'add_security_service', check_str=base.RULE_DEFAULT, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_share_network_remove_security_service = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'remove_security_service', check_str=base.RULE_DEFAULT, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_share_network_get_all = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'get_all_share_networks', check_str=base.RULE_ADMIN_API, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_share_network_add_security_service_check = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'add_security_service_check', check_str=base.RULE_DEFAULT, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_share_network_update_security_service = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'update_security_service', check_str=base.RULE_DEFAULT, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_share_network_update_security_service_check = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'update_security_service_check', check_str=base.RULE_DEFAULT, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_share_network_reset_status = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'reset_status', check_str=base.RULE_ADMIN_API, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_share_network_subnet_create_check = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'subnet_create_check', check_str=base.RULE_DEFAULT, deprecated_reason=DEPRECATED_REASON, deprecated_since="Yoga" ) share_network_policies = [ policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'create', check_str=base.ADMIN_OR_PROJECT_MEMBER, scope_types=['project'], description="Create share network.", operations=[ { 'method': 'POST', 'path': '/share-networks' } ], deprecated_rule=deprecated_share_network_create ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'show', check_str=base.ADMIN_OR_PROJECT_READER, scope_types=['project'], description="Get details of a share network.", operations=[ { 'method': 'GET', 'path': '/share-networks/{share_network_id}' } ], deprecated_rule=deprecated_share_network_show ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'index', check_str=base.ADMIN_OR_PROJECT_READER, scope_types=['project'], description="Get all share networks under a project.", operations=[ { 'method': 'GET', 'path': '/share-networks?{query}' } ], deprecated_rule=deprecated_share_network_index ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'detail', check_str=base.ADMIN_OR_PROJECT_READER, scope_types=['project'], description="Get details of share networks under a project.", operations=[ { 'method': 'GET', 'path': '/share-networks/detail?{query}' }, ], deprecated_rule=deprecated_share_network_detail ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'update', check_str=base.ADMIN_OR_PROJECT_MEMBER, scope_types=['project'], description="Update a share network.", operations=[ { 'method': 'PUT', 'path': '/share-networks/{share_network_id}' } ], deprecated_rule=deprecated_share_network_update ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'delete', check_str=base.ADMIN_OR_PROJECT_MEMBER, scope_types=['project'], description="Delete a share network.", operations=[ { 'method': 'DELETE', 'path': '/share-networks/{share_network_id}' } ], deprecated_rule=deprecated_share_network_delete ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'add_security_service', check_str=base.ADMIN_OR_PROJECT_MEMBER, scope_types=['project'], description="Add security service to share network.", operations=[ { 'method': 'POST', 'path': '/share-networks/{share_network_id}/action' } ], deprecated_rule=deprecated_share_network_add_security_service ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'add_security_service_check', check_str=base.ADMIN_OR_PROJECT_MEMBER, scope_types=['project'], description="Check the feasibility of add security service to a share " "network.", operations=[ { 'method': 'POST', 'path': '/share-networks/{share_network_id}/action' } ], deprecated_rule=deprecated_share_network_add_security_service_check ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'remove_security_service', check_str=base.ADMIN_OR_PROJECT_MEMBER, scope_types=['project'], description="Remove security service from share network.", operations=[ { 'method': 'POST', 'path': '/share-networks/{share_network_id}/action' } ], deprecated_rule=deprecated_share_network_remove_security_service ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'update_security_service', check_str=base.ADMIN_OR_PROJECT_MEMBER, scope_types=['project'], description="Update security service from share network.", operations=[ { 'method': 'POST', 'path': '/share-networks/{share_network_id}/action' } ], deprecated_rule=deprecated_share_network_update_security_service ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'update_security_service_check', check_str=base.ADMIN_OR_PROJECT_MEMBER, scope_types=['project'], description="Check the feasibility of update a security service from " "share network.", operations=[ { 'method': 'POST', 'path': '/share-networks/{share_network_id}/action' } ], deprecated_rule=deprecated_share_network_update_security_service_check ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'reset_status', check_str=base.ADMIN, scope_types=['project'], description="Reset share network`s status.", operations=[ { 'method': 'POST', 'path': '/share-networks/{share_network_id}/action' } ], deprecated_rule=deprecated_share_network_reset_status ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'get_all_share_networks', check_str=base.ADMIN, scope_types=['project'], description="Get share networks belonging to all projects.", operations=[ { 'method': 'GET', 'path': '/share-networks?all_tenants=1' }, { 'method': 'GET', 'path': '/share-networks/detail?all_tenants=1' } ], deprecated_rule=deprecated_share_network_get_all ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'subnet_create_check', check_str=base.ADMIN_OR_PROJECT_MEMBER, scope_types=['project'], description="Check the feasibility of create a new share network " "subnet for share network.", operations=[ { 'method': 'POST', 'path': '/share-networks/{share_network_id}/action' } ], deprecated_rule=deprecated_share_network_subnet_create_check ), ] def list_rules(): return share_network_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/policies/share_network_subnet.py0000664000175000017500000001450400000000000022737 0ustar00zuulzuul00000000000000# Copyright 2019 NetApp, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import versionutils from oslo_policy import policy from manila.policies import base BASE_POLICY_NAME = 'share_network_subnet:%s' DEPRECATED_REASON = """ The share network subnet API now supports scope and default roles. """ deprecated_subnet_create = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'create', check_str=base.RULE_DEFAULT, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_subnet_delete = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'delete', check_str=base.RULE_DEFAULT, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_subnet_show = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'show', check_str=base.RULE_DEFAULT, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_subnet_index = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'index', check_str=base.RULE_DEFAULT, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_update_subnet_metadata = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'update_metadata', check_str=base.RULE_DEFAULT, deprecated_reason=DEPRECATED_REASON, deprecated_since='ANTELOPE' ) deprecated_delete_subnet_metadata = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'delete_metadata', check_str=base.RULE_DEFAULT, deprecated_reason=DEPRECATED_REASON, deprecated_since='ANTELOPE' ) deprecated_get_subnet_metadata = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'get_metadata', check_str=base.RULE_DEFAULT, deprecated_reason=DEPRECATED_REASON, deprecated_since='ANTELOPE' ) share_network_subnet_policies = [ policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'create', check_str=base.ADMIN_OR_PROJECT_MEMBER, scope_types=['project'], description="Create a new share network subnet.", operations=[ { 'method': 'POST', 'path': '/share-networks/{share_network_id}/subnets' } ], deprecated_rule=deprecated_subnet_create ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'delete', check_str=base.ADMIN_OR_PROJECT_MEMBER, scope_types=['project'], description="Delete a share network subnet.", operations=[ { 'method': 'DELETE', 'path': '/share-networks/{share_network_id}/subnets/' '{share_network_subnet_id}' } ], deprecated_rule=deprecated_subnet_delete ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'show', check_str=base.ADMIN_OR_PROJECT_READER, scope_types=['project'], description="Shows a share network subnet.", operations=[ { 'method': 'GET', 'path': '/share-networks/{share_network_id}/subnets/' '{share_network_subnet_id}' } ], deprecated_rule=deprecated_subnet_show ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'index', check_str=base.ADMIN_OR_PROJECT_READER, scope_types=['project'], description="Get all share network subnets.", operations=[ { 'method': 'GET', 'path': '/share-networks/{share_network_id}/subnets' } ], deprecated_rule=deprecated_subnet_index ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'update_metadata', check_str=base.ADMIN_OR_PROJECT_MEMBER, scope_types=['system', 'project'], description="Update share network subnet metadata.", operations=[ { 'method': 'PUT', 'path': '/share-networks/{share_network_id}/subnets/' '{share_network_subnet_id}/metadata', }, { 'method': 'POST', 'path': '/share-networks/{share_network_id}/subnets/' '{share_network_subnet_id}/metadata/{key}', }, { 'method': 'POST', 'path': '/share-networks/{share_network_id}/subnets/' '{share_network_subnet_id}/metadata', }, ], deprecated_rule=deprecated_update_subnet_metadata ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'delete_metadata', check_str=base.ADMIN_OR_PROJECT_MEMBER, scope_types=['system', 'project'], description="Delete share network subnet metadata.", operations=[ { 'method': 'DELETE', 'path': '/share-networks/{share_network_id}/subnets/' '{share_network_subnet_id}/metadata/{key}', } ], deprecated_rule=deprecated_delete_subnet_metadata ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'get_metadata', check_str=base.ADMIN_OR_PROJECT_READER, scope_types=['system', 'project'], description="Get share network subnet metadata.", operations=[ { 'method': 'GET', 'path': '/share-networks/{share_network_id}/subnets/' '{share_network_subnet_id}/metadata', }, { 'method': 'GET', 'path': '/share-networks/{share_network_id}/subnets/' '{share_network_subnet_id}/metadata/{key}', } ], deprecated_rule=deprecated_get_subnet_metadata ), ] def list_rules(): return share_network_subnet_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/policies/share_replica.py0000664000175000017500000001544200000000000021307 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import versionutils from oslo_policy import policy from manila.policies import base BASE_POLICY_NAME = 'share_replica:%s' DEPRECATED_REASON = """ The share replica API now supports scope and default roles. """ deprecated_replica_create = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'create', check_str=base.RULE_DEFAULT, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_replica_get_all = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'get_all', check_str=base.RULE_DEFAULT, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_replica_show = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'show', check_str=base.RULE_DEFAULT, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_replica_delete = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'delete', check_str=base.RULE_DEFAULT, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_replica_force_delete = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'force_delete', check_str=base.RULE_ADMIN_API, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_replica_promote = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'promote', check_str=base.RULE_DEFAULT, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_replica_resync = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'resync', check_str=base.RULE_ADMIN_API, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_replica_reset_state = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'reset_replica_state', check_str=base.RULE_ADMIN_API, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_replica_reset_status = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'reset_status', check_str=base.RULE_ADMIN_API, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) share_replica_policies = [ policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'create', check_str=base.ADMIN_OR_PROJECT_MEMBER, scope_types=['project'], description="Create share replica.", operations=[ { 'method': 'POST', 'path': '/share-replicas', } ], deprecated_rule=deprecated_replica_create ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'get_all', check_str=base.ADMIN_OR_PROJECT_READER, scope_types=['project'], description="Get all share replicas.", operations=[ { 'method': 'GET', 'path': '/share-replicas', }, { 'method': 'GET', 'path': '/share-replicas/detail', }, { 'method': 'GET', 'path': '/share-replicas/detail?share_id={share_id}', } ], deprecated_rule=deprecated_replica_get_all ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'show', check_str=base.ADMIN_OR_PROJECT_READER, scope_types=['project'], description="Get details of a share replica.", operations=[ { 'method': 'GET', 'path': '/share-replicas/{share_replica_id}', } ], deprecated_rule=deprecated_replica_show ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'delete', check_str=base.ADMIN_OR_PROJECT_MEMBER, scope_types=['project'], description="Delete a share replica.", operations=[ { 'method': 'DELETE', 'path': '/share-replicas/{share_replica_id}', } ], deprecated_rule=deprecated_replica_delete ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'force_delete', check_str=base.ADMIN, scope_types=['project'], description="Force delete a share replica.", operations=[ { 'method': 'POST', 'path': '/share-replicas/{share_replica_id}/action', } ], deprecated_rule=deprecated_replica_force_delete ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'promote', check_str=base.ADMIN_OR_PROJECT_MEMBER, scope_types=['project'], description="Promote a non-active share replica to active.", operations=[ { 'method': 'POST', 'path': '/share-replicas/{share_replica_id}/action', } ], deprecated_rule=deprecated_replica_promote ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'resync', check_str=base.ADMIN, scope_types=['project'], description="Resync a share replica that is out of sync.", operations=[ { 'method': 'POST', 'path': '/share-replicas/{share_replica_id}/action', } ], deprecated_rule=deprecated_replica_resync ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'reset_replica_state', check_str=base.ADMIN, scope_types=['project'], description="Reset share replica's replica_state attribute.", operations=[ { 'method': 'POST', 'path': '/share-replicas/{share_replica_id}/action', } ], deprecated_rule=deprecated_replica_reset_state ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'reset_status', check_str=base.ADMIN, scope_types=['project'], description="Reset share replica's status.", operations=[ { 'method': 'POST', 'path': '/share-replicas/{share_replica_id}/action', } ], deprecated_rule=deprecated_replica_reset_status ), ] def list_rules(): return share_replica_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/policies/share_replica_export_location.py0000664000175000017500000000456100000000000024600 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import versionutils from oslo_policy import policy from manila.policies import base BASE_POLICY_NAME = 'share_replica_export_location:%s' DEPRECATED_REASON = """ The share replica export location API now supports scope and default roles. """ deprecated_replica_location_index = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'index', check_str=base.RULE_DEFAULT, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_replica_location_show = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'show', check_str=base.RULE_DEFAULT, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) share_replica_export_location_policies = [ policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'index', check_str=base.ADMIN_OR_PROJECT_READER, scope_types=['project'], description="Get all export locations of a given share replica.", operations=[ { 'method': 'GET', 'path': '/share-replicas/{share_replica_id}/export-locations', } ], deprecated_rule=deprecated_replica_location_index ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'show', check_str=base.ADMIN_OR_PROJECT_READER, scope_types=['project'], description="Get details about the requested share replica export " "location.", operations=[ { 'method': 'GET', 'path': ('/share-replicas/{share_replica_id}/export-locations/' '{export_location_id}'), } ], deprecated_rule=deprecated_replica_location_show ), ] def list_rules(): return share_replica_export_location_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/policies/share_server.py0000664000175000017500000002256300000000000021200 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import versionutils from oslo_policy import policy from manila.policies import base BASE_POLICY_NAME = 'share_server:%s' DEPRECATED_REASON = """ The share server API now supports scope and default roles. """ deprecated_server_index = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'index', check_str=base.RULE_ADMIN_API, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_server_show = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'show', check_str=base.RULE_ADMIN_API, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_server_details = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'details', check_str=base.RULE_ADMIN_API, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_server_delete = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'delete', check_str=base.RULE_ADMIN_API, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_manage_server = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'manage_share_server', check_str=base.RULE_ADMIN_API, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_unmanage_server = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'unmanage_share_server', check_str=base.RULE_ADMIN_API, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_server_reset_status = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'reset_status', check_str=base.RULE_ADMIN_API, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_server_migration_start = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'share_server_migration_start', check_str=base.RULE_ADMIN_API, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_server_migration_check = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'share_server_migration_check', check_str=base.RULE_ADMIN_API, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_server_migration_complete = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'share_server_migration_complete', check_str=base.RULE_ADMIN_API, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_server_migration_cancel = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'share_server_migration_cancel', check_str=base.RULE_ADMIN_API, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_server_migration_get_progress = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'share_server_migration_get_progress', check_str=base.RULE_ADMIN_API, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_server_reset_task_state = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'share_server_reset_task_state', check_str=base.RULE_ADMIN_API, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) share_server_policies = [ policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'index', check_str=base.ADMIN, scope_types=['project'], description="Get share servers.", operations=[ { 'method': 'GET', 'path': '/share-servers?{query}', } ], deprecated_rule=deprecated_server_index ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'show', check_str=base.ADMIN, scope_types=['project'], description="Show share server.", operations=[ { 'method': 'GET', 'path': '/share-servers/{server_id}', } ], deprecated_rule=deprecated_server_show ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'details', check_str=base.ADMIN, scope_types=['project'], description="Get share server details.", operations=[ { 'method': 'GET', 'path': '/share-servers/{server_id}/details', } ], deprecated_rule=deprecated_server_details ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'delete', check_str=base.ADMIN, scope_types=['project'], description="Delete share server.", operations=[ { 'method': 'DELETE', 'path': '/share-servers/{server_id}', } ], deprecated_rule=deprecated_server_delete ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'manage_share_server', check_str=base.ADMIN, scope_types=['project'], description="Manage share server.", operations=[ { 'method': 'POST', 'path': '/share-servers/manage' } ], deprecated_rule=deprecated_manage_server ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'unmanage_share_server', check_str=base.ADMIN, scope_types=['project'], description="Unmanage share server.", operations=[ { 'method': 'POST', 'path': '/share-servers/{share_server_id}/action' } ], deprecated_rule=deprecated_unmanage_server ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'reset_status', check_str=base.ADMIN, scope_types=['project'], description="Reset the status of a share server.", operations=[ { 'method': 'POST', 'path': '/share-servers/{share_server_id}/action' } ], deprecated_rule=deprecated_server_reset_status ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'share_server_migration_start', check_str=base.ADMIN, scope_types=['project'], description="Migrates a share server to the specified host.", operations=[ { 'method': 'POST', 'path': '/share-servers/{share_server_id}/action', } ], deprecated_rule=deprecated_server_migration_start ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'share_server_migration_check', check_str=base.ADMIN, scope_types=['project'], description="Check if can migrates a share server to the specified " "host.", operations=[ { 'method': 'POST', 'path': '/share-servers/{share_server_id}/action', } ], deprecated_rule=deprecated_server_migration_check ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'share_server_migration_complete', check_str=base.ADMIN, scope_types=['project'], description="Invokes the 2nd phase of share server migration.", operations=[ { 'method': 'POST', 'path': '/share-servers/{share_server_id}/action', } ], deprecated_rule=deprecated_server_migration_complete ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'share_server_migration_cancel', check_str=base.ADMIN, scope_types=['project'], description="Attempts to cancel share server migration.", operations=[ { 'method': 'POST', 'path': '/share-servers/{share_server_id}/action', } ], deprecated_rule=deprecated_server_migration_cancel ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'share_server_migration_get_progress', check_str=base.ADMIN, scope_types=['project'], description=("Retrieves the share server migration progress for a " "given share server."), operations=[ { 'method': 'POST', 'path': '/share-servers/{share_server_id}/action', } ], deprecated_rule=deprecated_server_migration_get_progress ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'share_server_reset_task_state', check_str=base.ADMIN, scope_types=['project'], description="Resets task state.", operations=[ { 'method': 'POST', 'path': '/share-servers/{share_server_id}/action', } ], deprecated_rule=deprecated_server_reset_task_state ), ] def list_rules(): return share_server_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/policies/share_snapshot.py0000664000175000017500000002477000000000000021533 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import versionutils from oslo_policy import policy from manila.policies import base BASE_POLICY_NAME = 'share_snapshot:%s' DEPRECATED_REASON = """ The share snapshot API now supports scope and default roles. """ deprecated_snapshot_get = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'get_snapshot', check_str=base.RULE_DEFAULT, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_snapshot_get_all = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'get_all_snapshots', check_str=base.RULE_DEFAULT, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_snapshot_force_delete = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'force_delete', check_str=base.RULE_ADMIN_API, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_snapshot_manage = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'manage_snapshot', check_str=base.RULE_ADMIN_API, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_snapshot_unmanage = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'unmanage_snapshot', check_str=base.RULE_ADMIN_API, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_snapshot_reset_status = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'reset_status', check_str=base.RULE_ADMIN_API, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_snapshot_access_list = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'access_list', check_str=base.RULE_DEFAULT, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_snapshot_allow_access = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'allow_access', check_str=base.RULE_DEFAULT, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_snapshot_deny_access = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'deny_access', check_str=base.RULE_DEFAULT, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_update_snapshot_metadata = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'update_metadata', check_str=base.RULE_DEFAULT, deprecated_reason=DEPRECATED_REASON, deprecated_since='ZED' ) deprecated_delete_snapshot_metadata = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'delete_metadata', check_str=base.RULE_DEFAULT, deprecated_reason=DEPRECATED_REASON, deprecated_since='ZED' ) deprecated_get_snapshot_metadata = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'get_metadata', check_str=base.RULE_DEFAULT, deprecated_reason=DEPRECATED_REASON, deprecated_since='ZED' ) deprecated_list_snapshots_in_deferred_deletion_states = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'list_snapshots_in_deferred_deletion_states', check_str=base.RULE_ADMIN_API, deprecated_reason=DEPRECATED_REASON, deprecated_since='2024.1/Caracal' ) deprecated_list_all_projects = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'list_all_projects', check_str=base.RULE_ADMIN_API, deprecated_reason=DEPRECATED_REASON, deprecated_since='2025.1/Epoxy' ) share_snapshot_policies = [ policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'get_snapshot', check_str=base.ADMIN_OR_PROJECT_READER, scope_types=['project'], description="Get share snapshot.", operations=[ { 'method': 'GET', 'path': '/snapshots/{snapshot_id}' } ], deprecated_rule=deprecated_snapshot_get ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'get_all_snapshots', check_str=base.ADMIN_OR_PROJECT_READER, scope_types=['project'], description="Get all share snapshots.", operations=[ { 'method': 'GET', 'path': '/snapshots?{query}' }, { 'method': 'GET', 'path': '/snapshots/detail?{query}' } ], deprecated_rule=deprecated_snapshot_get_all ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'list_all_projects', check_str=base.ADMIN, scope_types=['project'], description="List share snapshots by all projects.", operations=[ { 'method': 'GET', 'path': '/snapshots?all_tenants=1', }, { 'method': 'GET', 'path': '/snapshots/detail?all_tenants=1', } ], deprecated_rule=deprecated_list_all_projects ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'force_delete', check_str=base.ADMIN, scope_types=['project'], description="Force Delete a share snapshot.", operations=[ { 'method': 'DELETE', 'path': '/snapshots/{snapshot_id}' } ], deprecated_rule=deprecated_snapshot_force_delete ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'manage_snapshot', check_str=base.ADMIN, scope_types=['project'], description="Manage share snapshot.", operations=[ { 'method': 'POST', 'path': '/snapshots/manage' } ], deprecated_rule=deprecated_snapshot_manage ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'unmanage_snapshot', check_str=base.ADMIN, scope_types=['project'], description="Unmanage share snapshot.", operations=[ { 'method': 'POST', 'path': '/snapshots/{snapshot_id}/action' } ], deprecated_rule=deprecated_snapshot_unmanage ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'reset_status', check_str=base.ADMIN, scope_types=['project'], description="Reset status.", operations=[ { 'method': 'POST', 'path': '/snapshots/{snapshot_id}/action', } ], deprecated_rule=deprecated_snapshot_reset_status ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'access_list', check_str=base.ADMIN_OR_PROJECT_READER, scope_types=['project'], description="List access rules of a share snapshot.", operations=[ { 'method': 'GET', 'path': '/snapshots/{snapshot_id}/access-list' } ], deprecated_rule=deprecated_snapshot_access_list ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'allow_access', check_str=base.ADMIN_OR_PROJECT_MEMBER, scope_types=['project'], description="Allow access to a share snapshot.", operations=[ { 'method': 'POST', 'path': '/snapshots/{snapshot_id}/action' } ], deprecated_rule=deprecated_snapshot_allow_access ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'deny_access', check_str=base.ADMIN_OR_PROJECT_MEMBER, scope_types=['project'], description="Deny access to a share snapshot.", operations=[ { 'method': 'POST', 'path': '/snapshots/{snapshot_id}/action' } ], deprecated_rule=deprecated_snapshot_deny_access ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'update_metadata', check_str=base.ADMIN_OR_PROJECT_MEMBER, scope_types=['project'], description="Update snapshot metadata.", operations=[ { 'method': 'PUT', 'path': '/snapshots/{snapshot_id}/metadata', }, { 'method': 'POST', 'path': '/snapshots/{snapshot_id}/metadata/{key}', }, { 'method': 'POST', 'path': '/snapshots/{snapshot_id}/metadata', }, ], deprecated_rule=deprecated_update_snapshot_metadata ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'delete_metadata', check_str=base.ADMIN_OR_PROJECT_MEMBER, scope_types=['project'], description="Delete snapshot metadata.", operations=[ { 'method': 'DELETE', 'path': '/snapshots/{snapshot_id}/metadata/{key}', } ], deprecated_rule=deprecated_delete_snapshot_metadata ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'get_metadata', check_str=base.ADMIN_OR_PROJECT_READER, scope_types=['project'], description="Get snapshot metadata.", operations=[ { 'method': 'GET', 'path': '/snapshots/{snapshot_id}/metadata', }, { 'method': 'GET', 'path': '/snapshots/{snapshot_id}/metadata/{key}', } ], deprecated_rule=deprecated_get_snapshot_metadata ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'list_snapshots_in_deferred_deletion_states', check_str=base.ADMIN, scope_types=['project'], description="List (or get) snapshots whose deletion has been deferred", operations=[ { 'method': 'GET', 'path': '/v2/snapshots', }, { 'method': 'GET', 'path': '/snapshots/{snapshot_id}' } ], deprecated_rule=deprecated_list_snapshots_in_deferred_deletion_states ), ] def list_rules(): return share_snapshot_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/policies/share_snapshot_export_location.py0000664000175000017500000000453100000000000025015 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import versionutils from oslo_policy import policy from manila.policies import base BASE_POLICY_NAME = 'share_snapshot_export_location:%s' DEPRECATED_REASON = """ The share snapshot location API now supports scope and default roles. """ deprecated_snapshot_location_index = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'index', check_str=base.RULE_DEFAULT, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_snapshot_location_show = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'show', check_str=base.RULE_DEFAULT, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) share_snapshot_export_location_policies = [ policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'index', check_str=base.ADMIN_OR_PROJECT_READER, scope_types=['project'], description="List export locations of a share snapshot.", operations=[ { 'method': 'GET', 'path': '/snapshots/{snapshot_id}/export-locations/', } ], deprecated_rule=deprecated_snapshot_location_index ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'show', check_str=base.ADMIN_OR_PROJECT_READER, scope_types=['project'], description="Get details of a specified export location of a " "share snapshot.", operations=[ { 'method': 'GET', 'path': ('/snapshots/{snapshot_id}/' 'export-locations/{export_location_id}'), } ], deprecated_rule=deprecated_snapshot_location_show ), ] def list_rules(): return share_snapshot_export_location_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/policies/share_snapshot_instance.py0000664000175000017500000000674600000000000023422 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import versionutils from oslo_policy import policy from manila.policies import base BASE_POLICY_NAME = 'share_snapshot_instance:%s' DEPRECATED_REASON = """ The share snapshot instance API now supports scope and default roles. """ deprecated_snapshot_instance_show = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'show', check_str=base.RULE_ADMIN_API, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_snapshot_instance_index = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'index', check_str=base.RULE_ADMIN_API, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_snapshot_instance_detail = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'detail', check_str=base.RULE_ADMIN_API, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_snapshot_instance_reset_status = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'reset_status', check_str=base.RULE_ADMIN_API, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) share_snapshot_instance_policies = [ policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'show', check_str=base.ADMIN, scope_types=['project'], description="Get share snapshot instance.", operations=[ { 'method': 'GET', 'path': '/snapshot-instances/{snapshot_instance_id}', } ], deprecated_rule=deprecated_snapshot_instance_show ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'index', check_str=base.ADMIN, scope_types=['project'], description="Get all share snapshot instances.", operations=[ { 'method': 'GET', 'path': '/snapshot-instances?{query}', }, ], deprecated_rule=deprecated_snapshot_instance_index ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'detail', check_str=base.ADMIN, scope_types=['project'], description="Get details of share snapshot instances.", operations=[ { 'method': 'GET', 'path': '/snapshot-instances/detail?{query}', }, ], deprecated_rule=deprecated_snapshot_instance_detail ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'reset_status', check_str=base.ADMIN, scope_types=['project'], description="Reset share snapshot instance's status.", operations=[ { 'method': 'POST', 'path': '/snapshot-instances/{snapshot_instance_id}/action', } ], deprecated_rule=deprecated_snapshot_instance_reset_status ), ] def list_rules(): return share_snapshot_instance_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/policies/share_snapshot_instance_export_location.py0000664000175000017500000000467100000000000026706 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import versionutils from oslo_policy import policy from manila.policies import base BASE_POLICY_NAME = 'share_snapshot_instance_export_location:%s' DEPRECATED_REASON = """ The share snapshot instance export location API now supports scope and default roles. """ deprecated_snapshot_instance_index = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'index', check_str=base.RULE_ADMIN_API, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_snapshot_instance_show = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'show', check_str=base.RULE_ADMIN_API, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) share_snapshot_instance_export_location_policies = [ policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'index', check_str=base.ADMIN, scope_types=['project'], description="List export locations of a share snapshot instance.", operations=[ { 'method': 'GET', 'path': ('/snapshot-instances/{snapshot_instance_id}/' 'export-locations'), } ], deprecated_rule=deprecated_snapshot_instance_index ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'show', check_str=base.ADMIN, scope_types=['project'], description="Show details of a specified export location of a share " "snapshot instance.", operations=[ { 'method': 'GET', 'path': ('/snapshot-instances/{snapshot_instance_id}/' 'export-locations/{export_location_id}'), } ], deprecated_rule=deprecated_snapshot_instance_show ), ] def list_rules(): return share_snapshot_instance_export_location_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/policies/share_transfer.py0000664000175000017500000001117600000000000021514 0ustar00zuulzuul00000000000000# Copyright (c) 2022 China Telecom Digital Intelligence. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from manila.policies import base BASE_POLICY_NAME = 'share_transfer:%s' DEPRECATED_REASON = """ The transfer API now supports system scope and default roles. """ deprecated_share_transfer_get_all = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'get_all', check_str=base.RULE_DEFAULT, deprecated_reason=DEPRECATED_REASON, deprecated_since="Antelope" ) deprecated_share_transfer_get_all_tenant = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'get_all_tenant', check_str=base.RULE_ADMIN_API, deprecated_reason=DEPRECATED_REASON, deprecated_since="Antelope" ) deprecated_share_transfer_create = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'create', check_str=base.RULE_DEFAULT, deprecated_reason=DEPRECATED_REASON, deprecated_since="Antelope" ) deprecated_share_transfer_get = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'get', check_str=base.RULE_DEFAULT, deprecated_reason=DEPRECATED_REASON, deprecated_since="Antelope" ) deprecated_share_transfer_accept = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'accept', check_str=base.RULE_DEFAULT, deprecated_reason=DEPRECATED_REASON, deprecated_since="Antelope" ) deprecated_share_transfer_delete = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'delete', check_str=base.RULE_DEFAULT, deprecated_reason=DEPRECATED_REASON, deprecated_since="Antelope" ) share_transfer_policies = [ policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'get_all', check_str=base.ADMIN_OR_PROJECT_READER, description="List share transfers.", operations=[ { 'method': 'GET', 'path': '/share-transfers' }, { 'method': 'GET', 'path': '/share-transfers/detail' } ], deprecated_rule=deprecated_share_transfer_get_all ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'get_all_tenant', check_str=base.ADMIN, scope_types=['project'], description="List share transfers with all tenants.", operations=[ { 'method': 'GET', 'path': '/share-transfers' }, { 'method': 'GET', 'path': '/share-transfers/detail' } ], deprecated_rule=deprecated_share_transfer_get_all_tenant ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'create', check_str=base.ADMIN_OR_PROJECT_MEMBER, description="Create a share transfer.", operations=[ { 'method': 'POST', 'path': '/share-transfers' } ], deprecated_rule=deprecated_share_transfer_create ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'get', check_str=base.ADMIN_OR_PROJECT_READER, description="Show one specified share transfer.", operations=[ { 'method': 'GET', 'path': '/share-transfers/{transfer_id}' } ], deprecated_rule=deprecated_share_transfer_get ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'accept', check_str=base.ADMIN_OR_PROJECT_MEMBER, description="Accept a share transfer.", operations=[ { 'method': 'POST', 'path': '/share-transfers/{transfer_id}/accept' } ], deprecated_rule=deprecated_share_transfer_accept ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'delete', check_str=base.ADMIN_OR_PROJECT_MEMBER, description="Delete share transfer.", operations=[ { 'method': 'DELETE', 'path': '/share-transfers/{transfer_id}' } ], deprecated_rule=deprecated_share_transfer_delete ), ] def list_rules(): return share_transfer_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/policies/share_type.py0000664000175000017500000001477300000000000020657 0ustar00zuulzuul00000000000000# Copyright (c) 2017 Huawei Technologies Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import versionutils from oslo_policy import policy from manila.policies import base BASE_POLICY_NAME = 'share_type:%s' DEPRECATED_REASON = """ The share type API now supports scope and default roles. """ deprecated_share_type_create = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'create', check_str=base.RULE_ADMIN_API, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_share_type_update = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'update', check_str=base.RULE_ADMIN_API, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_share_type_show = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'show', check_str=base.RULE_DEFAULT, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_share_type_index = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'index', check_str=base.RULE_DEFAULT, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_share_type_get_default = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'default', check_str=base.RULE_DEFAULT, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_share_type_delete = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'delete', check_str=base.RULE_ADMIN_API, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_share_type_list_project_access = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'list_project_access', check_str=base.RULE_ADMIN_API, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_share_type_add_project_access = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'add_project_access', check_str=base.RULE_ADMIN_API, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_share_type_remove_project_access = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'remove_project_access', check_str=base.RULE_ADMIN_API, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) share_type_policies = [ policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'create', check_str=base.ADMIN, scope_types=['project'], description='Create share type.', operations=[ { 'method': 'POST', 'path': '/types', } ], deprecated_rule=deprecated_share_type_create ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'update', check_str=base.ADMIN, scope_types=['project'], description='Update share type.', operations=[ { 'method': 'PUT', 'path': '/types/{share_type_id}', } ], deprecated_rule=deprecated_share_type_update ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'show', check_str=base.ADMIN_OR_PROJECT_READER, scope_types=['project'], description='Get share type.', operations=[ { 'method': 'GET', 'path': '/types/{share_type_id}', } ], deprecated_rule=deprecated_share_type_show ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'index', check_str=base.ADMIN_OR_PROJECT_READER, scope_types=['project'], description='List share types.', operations=[ { 'method': 'GET', 'path': '/types?is_public=all', } ], deprecated_rule=deprecated_share_type_index ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'default', check_str=base.ADMIN_OR_PROJECT_READER, scope_types=['project'], description='Get default share type.', operations=[ { 'method': 'GET', 'path': '/types/default', } ], deprecated_rule=deprecated_share_type_get_default ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'delete', check_str=base.ADMIN, scope_types=['project'], description='Delete share type.', operations=[ { 'method': 'DELETE', 'path': '/types/{share_type_id}', } ], deprecated_rule=deprecated_share_type_delete ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'list_project_access', check_str=base.ADMIN, scope_types=['project'], description='List share type project access.', operations=[ { 'method': 'GET', 'path': '/types/{share_type_id}', } ], deprecated_rule=deprecated_share_type_list_project_access ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'add_project_access', check_str=base.ADMIN, scope_types=['project'], description='Add share type to project.', operations=[ { 'method': 'POST', 'path': '/types/{share_type_id}/action', } ], deprecated_rule=deprecated_share_type_add_project_access ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'remove_project_access', check_str=base.ADMIN, scope_types=['project'], description='Remove share type from project.', operations=[ { 'method': 'POST', 'path': '/types/{share_type_id}/action', } ], deprecated_rule=deprecated_share_type_remove_project_access ), ] def list_rules(): return share_type_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/policies/share_types_extra_spec.py0000664000175000017500000001002700000000000023243 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import versionutils from oslo_policy import policy from manila.policies import base BASE_POLICY_NAME = 'share_types_extra_spec:%s' DEPRECATED_REASON = """ The share types extra specs API now supports scope and default roles. """ deprecated_extra_spec_create = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'create', check_str=base.RULE_ADMIN_API, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_extra_spec_show = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'show', check_str=base.RULE_ADMIN_API, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_extra_spec_index = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'index', check_str=base.RULE_ADMIN_API, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_extra_spec_update = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'update', check_str=base.RULE_ADMIN_API, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_extra_spec_delete = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'delete', check_str=base.RULE_ADMIN_API, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) share_types_extra_spec_policies = [ policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'create', check_str=base.ADMIN, scope_types=['project'], description="Create share type extra spec.", operations=[ { 'method': 'POST', 'path': '/types/{share_type_id}/extra_specs', } ], deprecated_rule=deprecated_extra_spec_create ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'show', check_str=base.ADMIN, scope_types=['project'], description="Get share type extra specs of a given share type.", operations=[ { 'method': 'GET', 'path': '/types/{share_type_id}/extra_specs', } ], deprecated_rule=deprecated_extra_spec_show ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'index', check_str=base.ADMIN, scope_types=['project'], description="Get details of a share type extra spec.", operations=[ { 'method': 'GET', 'path': '/types/{share_type_id}/extra_specs/{extra_spec_id}', }, ], deprecated_rule=deprecated_extra_spec_index ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'update', check_str=base.ADMIN, scope_types=['project'], description="Update share type extra spec.", operations=[ { 'method': 'PUT', 'path': '/types/{share_type_id}/extra_specs', } ], deprecated_rule=deprecated_extra_spec_update ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'delete', check_str=base.ADMIN, scope_types=['project'], description="Delete share type extra spec.", operations=[ { 'method': 'DELETE', 'path': '/types/{share_type_id}/extra_specs/{key}', } ], deprecated_rule=deprecated_extra_spec_delete ), ] def list_rules(): return share_types_extra_spec_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/policies/shares.py0000664000175000017500000006205600000000000017776 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import versionutils from oslo_policy import policy from manila.common import constants from manila.policies import base BASE_POLICY_NAME = 'share:%s' DEPRECATED_REASON = """ The share API now supports scope and default roles. """ # Deprecated share policies deprecated_share_create = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'create', check_str="", deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_share_create_public = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'create_public_share', check_str=base.RULE_ADMIN_API, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_share_get = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'get', check_str=base.RULE_DEFAULT, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_share_get_all = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'get_all', check_str=base.RULE_DEFAULT, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_share_update = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'update', check_str=base.RULE_DEFAULT, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_share_set_public = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'set_public_share', check_str=base.RULE_ADMIN_API, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_share_delete = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'delete', check_str=base.RULE_DEFAULT, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_share_force_delete = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'force_delete', check_str=base.RULE_ADMIN_API, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_share_manage = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'manage', check_str=base.RULE_ADMIN_API, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_share_unmanage = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'unmanage', check_str=base.RULE_ADMIN_API, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_share_list_by_host = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'list_by_host', check_str=base.RULE_ADMIN_API, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_share_list_by_server_id = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'list_by_share_server_id', check_str=base.RULE_ADMIN_API, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_share_access_get = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'access_get', check_str=base.RULE_DEFAULT, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_share_access_get_all = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'access_get_all', check_str=base.RULE_DEFAULT, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_share_extend = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'extend', check_str=base.RULE_DEFAULT, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_share_shrink = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'shrink', check_str=base.RULE_DEFAULT, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_share_migration_start = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'migration_start', check_str=base.RULE_ADMIN_API, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_share_migration_complete = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'migration_complete', check_str=base.RULE_ADMIN_API, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_share_migration_cancel = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'migration_cancel', check_str=base.RULE_ADMIN_API, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_share_migration_get_progress = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'migration_get_progress', check_str=base.RULE_ADMIN_API, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_share_reset_task_state = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'reset_task_state', check_str=base.RULE_ADMIN_API, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_share_reset_status = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'reset_status', check_str=base.RULE_ADMIN_API, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_share_revert_to_snapshot = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'revert_to_snapshot', check_str=base.RULE_DEFAULT, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_share_allow_access = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'allow_access', check_str=base.RULE_DEFAULT, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_share_deny_access = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'deny_access', check_str=base.RULE_DEFAULT, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_share_update_metadata = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'update_share_metadata', check_str=base.RULE_DEFAULT, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_share_delete_metadata = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'delete_share_metadata', check_str=base.RULE_DEFAULT, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_share_get_metadata = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'get_share_metadata', check_str=base.RULE_DEFAULT, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) # deprecated legacy snapshot policies with "share" as base resource deprecated_share_create_snapshot = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'create_snapshot', check_str=base.RULE_DEFAULT, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_share_delete_snapshot = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'delete_snapshot', check_str=base.RULE_DEFAULT, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_share_snapshot_update = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'snapshot_update', check_str=base.RULE_DEFAULT, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) deprecated_update_admin_only_metadata = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'update_admin_only_metadata', check_str=base.RULE_ADMIN_API, deprecated_reason=DEPRECATED_REASON, deprecated_since="YOGA" ) deprecated_list_shares_in_deferred_deletion_states = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'list_shares_in_deferred_deletion_states', check_str=base.RULE_ADMIN_API, deprecated_reason=DEPRECATED_REASON, deprecated_since='2024.1/Caracal' ) deprecated_list_all_projects = policy.DeprecatedRule( name=BASE_POLICY_NAME % 'list_all_projects', check_str=base.RULE_ADMIN_API, deprecated_reason=DEPRECATED_REASON, deprecated_since='2025.1/Epoxy' ) shares_policies = [ policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'create', check_str=base.ADMIN_OR_PROJECT_MEMBER, scope_types=['project'], description="Create share.", operations=[ { 'method': 'POST', 'path': '/shares', } ], deprecated_rule=deprecated_share_create ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'create_public_share', check_str=base.ADMIN, scope_types=['project'], description="Create shares visible across all projects in the cloud.", operations=[ { 'method': 'POST', 'path': '/shares', } ], deprecated_rule=deprecated_share_create_public ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'get', check_str=base.ADMIN_OR_PROJECT_READER, scope_types=['project'], description="Get share.", operations=[ { 'method': 'GET', 'path': '/shares/{share_id}', } ], deprecated_rule=deprecated_share_get ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'get_all', check_str=base.ADMIN_OR_PROJECT_READER, scope_types=['project'], description="List shares.", operations=[ { 'method': 'GET', 'path': '/shares?{query}', }, { 'method': 'GET', 'path': '/shares/detail?{query}', } ], deprecated_rule=deprecated_share_get_all ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'update', check_str=base.ADMIN_OR_PROJECT_MEMBER, scope_types=['project'], description="Update a share.", operations=[ { 'method': 'PUT', 'path': '/shares/{share_id}', } ], deprecated_rule=deprecated_share_update ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'set_public_share', check_str=base.ADMIN, scope_types=['project'], description="Update a share to be visible across all projects in the " "cloud.", operations=[ { 'method': 'PUT', 'path': '/shares/{share_id}', } ], deprecated_rule=deprecated_share_set_public ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'delete', check_str=base.ADMIN_OR_PROJECT_MEMBER, scope_types=['project'], description="Delete share.", operations=[ { 'method': 'DELETE', 'path': '/shares/{share_id}', } ], deprecated_rule=deprecated_share_delete ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'soft_delete', check_str=base.ADMIN_OR_PROJECT_MEMBER, scope_types=['project'], description="Soft Delete a share.", operations=[ { 'method': 'POST', 'path': '/shares/{share_id}/action', } ], ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'restore', check_str=base.ADMIN_OR_PROJECT_MEMBER, scope_types=['project'], description="Restore a share.", operations=[ { 'method': 'POST', 'path': '/shares/{share_id}/action', } ], ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'force_delete', check_str=base.ADMIN, scope_types=['project'], description="Force Delete a share.", operations=[ { 'method': 'DELETE', 'path': '/shares/{share_id}', } ], deprecated_rule=deprecated_share_force_delete ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'manage', check_str=base.ADMIN, scope_types=['project'], description="Manage share.", operations=[ { 'method': 'POST', 'path': '/shares/manage', } ], deprecated_rule=deprecated_share_manage ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'unmanage', check_str=base.ADMIN, scope_types=['project'], description="Unmanage share.", operations=[ { 'method': 'POST', 'path': '/shares/unmanage', } ], deprecated_rule=deprecated_share_unmanage ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'list_by_host', check_str=base.ADMIN, scope_types=['project'], description="List share by host.", operations=[ { 'method': 'GET', 'path': '/shares?host={host}', }, { 'method': 'GET', 'path': '/shares/detail?host={host}', } ], deprecated_rule=deprecated_share_list_by_host ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'list_by_share_server_id', check_str=base.ADMIN, scope_types=['project'], description="List share by server id.", operations=[ { 'method': 'GET', 'path': '/shares?share_server_id={share_server_id}' }, { 'method': 'GET', 'path': '/shares/detail?share_server_id={share_server_id}', } ], deprecated_rule=deprecated_share_list_by_server_id ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'access_get', check_str=base.ADMIN_OR_PROJECT_READER, scope_types=['project'], description="Get share access rule (deprecated in API version 2.45).", operations=[ { 'method': 'POST', 'path': '/shares/{share_id}/action', } ], deprecated_rule=deprecated_share_access_get ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'access_get_all', check_str=base.ADMIN_OR_PROJECT_READER, scope_types=['project'], description=("List share access rules (deprecated in API " "version 2.45)."), operations=[ { 'method': 'GET', 'path': '/shares/{share_id}/action', } ], deprecated_rule=deprecated_share_access_get_all ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'extend', check_str=base.ADMIN_OR_PROJECT_MEMBER, scope_types=['project'], description="Extend share.", operations=[ { 'method': 'POST', 'path': '/shares/{share_id}/action', } ], deprecated_rule=deprecated_share_extend ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'force_extend', check_str=base.ADMIN, scope_types=['project'], description="Force extend share.", operations=[ { 'method': 'POST', 'path': '/shares/{share_id}/action', } ]), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % constants.POLICY_EXTEND_BEYOND_MAX_SHARE_SIZE, check_str=base.ADMIN, scope_types=['project'], description="Extend share beyond max share size.", operations=[ { 'method': 'POST', 'path': '/shares/{share_id}/action', } ]), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'shrink', check_str=base.ADMIN_OR_PROJECT_MEMBER, scope_types=['project'], description="Shrink share.", operations=[ { 'method': 'POST', 'path': '/shares/{share_id}/action', } ], deprecated_rule=deprecated_share_shrink ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'migration_start', check_str=base.ADMIN, scope_types=['project'], description="Migrate a share to the specified host.", operations=[ { 'method': 'POST', 'path': '/shares/{share_id}/action', } ], deprecated_rule=deprecated_share_migration_start ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'migration_complete', check_str=base.ADMIN, scope_types=['project'], description="Invoke 2nd phase of share migration.", operations=[ { 'method': 'POST', 'path': '/shares/{share_id}/action', } ], deprecated_rule=deprecated_share_migration_complete ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'migration_cancel', check_str=base.ADMIN, scope_types=['project'], description="Attempt to cancel share migration.", operations=[ { 'method': 'POST', 'path': '/shares/{share_id}/action', } ], deprecated_rule=deprecated_share_migration_cancel ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'migration_get_progress', check_str=base.ADMIN, scope_types=['project'], description=("Retrieve share migration progress for a given " "share."), operations=[ { 'method': 'POST', 'path': '/shares/{share_id}/action', } ], deprecated_rule=deprecated_share_migration_get_progress ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'reset_task_state', check_str=base.ADMIN, scope_types=['project'], description="Reset task state.", operations=[ { 'method': 'POST', 'path': '/shares/{share_id}/action', } ], deprecated_rule=deprecated_share_reset_task_state ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'reset_status', check_str=base.ADMIN, scope_types=['project'], description="Reset status.", operations=[ { 'method': 'POST', 'path': '/shares/{share_id}/action', } ], deprecated_rule=deprecated_share_reset_status ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'revert_to_snapshot', check_str=base.ADMIN_OR_PROJECT_MEMBER, scope_types=['project'], description="Revert a share to a snapshot.", operations=[ { 'method': 'POST', 'path': '/shares/{share_id}/action', } ], deprecated_rule=deprecated_share_revert_to_snapshot ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'allow_access', check_str=base.ADMIN_OR_PROJECT_MEMBER, scope_types=['project'], description="Add share access rule.", operations=[ { 'method': 'POST', 'path': '/shares/{share_id}/action', } ], deprecated_rule=deprecated_share_allow_access ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'deny_access', check_str=base.ADMIN_OR_PROJECT_MEMBER, scope_types=['project'], description="Remove share access rule.", operations=[ { 'method': 'POST', 'path': '/shares/{share_id}/action', } ], deprecated_rule=deprecated_share_deny_access ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'update_share_metadata', check_str=base.ADMIN_OR_PROJECT_MEMBER, scope_types=['project'], description="Update share metadata.", operations=[ { 'method': 'PUT', 'path': '/shares/{share_id}/metadata', }, { 'method': 'POST', 'path': '/shares/{share_id}/metadata/{key}', }, { 'method': 'POST', 'path': '/shares/{share_id}/metadata', }, ], deprecated_rule=deprecated_share_update_metadata ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'delete_share_metadata', check_str=base.ADMIN_OR_PROJECT_MEMBER, scope_types=['project'], description="Delete share metadata.", operations=[ { 'method': 'DELETE', 'path': '/shares/{share_id}/metadata/{key}', } ], deprecated_rule=deprecated_share_delete_metadata ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'get_share_metadata', check_str=base.ADMIN_OR_PROJECT_READER, scope_types=['project'], description="Get share metadata.", operations=[ { 'method': 'GET', 'path': '/shares/{share_id}/metadata', }, { 'method': 'GET', 'path': '/shares/{share_id}/metadata/{key}', } ], deprecated_rule=deprecated_share_get_metadata ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'list_shares_in_deferred_deletion_states', check_str=base.ADMIN, scope_types=['project'], description="List (or get) shares whose deletion has been deferred", operations=[ { 'method': 'GET', 'path': '/v2/shares', }, { 'method': 'GET', 'path': '/shares/{share_id}', } ], deprecated_rule=deprecated_list_shares_in_deferred_deletion_states ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'list_all_projects', check_str=base.ADMIN, scope_types=['project'], description="List share by all projects.", operations=[ { 'method': 'GET', 'path': '/shares?all_tenants=1', }, { 'method': 'GET', 'path': '/shares/detail?all_tenants=1', } ], deprecated_rule=deprecated_list_all_projects ), ] # NOTE(gouthamr) For historic reasons, some snapshot policies used # "share" as the resource. We could deprecate these and move them to using # "share_snapshot" as the base resource in the future. base_snapshot_policies = [ policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'create_snapshot', check_str=base.ADMIN_OR_PROJECT_MEMBER, scope_types=['project'], description="Create share snapshot.", operations=[ { 'method': 'POST', 'path': '/snapshots', } ], deprecated_rule=deprecated_share_create_snapshot ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'delete_snapshot', check_str=base.ADMIN_OR_PROJECT_MEMBER, scope_types=['project'], description="Delete share snapshot.", operations=[ { 'method': 'DELETE', 'path': '/snapshots/{snapshot_id}', } ], deprecated_rule=deprecated_share_delete_snapshot ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'snapshot_update', check_str=base.ADMIN_OR_PROJECT_MEMBER, scope_types=['project'], description="Update share snapshot.", operations=[ { 'method': 'PUT', 'path': '/snapshots/{snapshot_id}/action', } ], deprecated_rule=deprecated_share_snapshot_update ), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'update_admin_only_metadata', check_str=base.ADMIN, scope_types=['project'], description=( "Update metadata items that are considered \"admin only\" " "by the service."), operations=[ { 'method': 'PUT', 'path': '/shares/{share_id}/metadata', } ], deprecated_rule=deprecated_update_admin_only_metadata ), ] def list_rules(): return shares_policies + base_snapshot_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/policy.py0000664000175000017500000002141300000000000016171 0ustar00zuulzuul00000000000000# Copyright (c) 2011 OpenStack, LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Policy Engine For Manila""" import functools import sys from oslo_config import cfg from oslo_log import log as logging from oslo_policy import policy from oslo_utils import excutils from manila import exception from manila import policies CONF = cfg.CONF LOG = logging.getLogger(__name__) _ENFORCER = None def reset(): global _ENFORCER if _ENFORCER: _ENFORCER.clear() _ENFORCER = None def init(rules=None, use_conf=True, suppress_deprecation_warnings=False): """Init an Enforcer class. :param policy_file: Custom policy file to use, if none is specified, `CONF.policy_file` will be used. :param rules: Default dictionary / Rules to use. It will be considered just in the first instantiation. :param use_conf: Whether to load rules from config file. :param suppress_deprecation_warnings: Whether to suppress policy deprecation warnings. """ global _ENFORCER if not _ENFORCER: _ENFORCER = policy.Enforcer(CONF, rules=rules, use_conf=use_conf) # NOTE(gouthamr): Explicitly disable the warnings for policies # changing their default check_str. During # secure-rbac / policy-defaults-refresh work, all the policy # defaults have been changed and warning for each policy started # filling the log limits for various tools. Once we move to new # defaults only world then we can enable these warning again. _ENFORCER.suppress_default_change_warnings = True # Suppressing deprecation warnings is fine for tests. However we # won't do it by default _ENFORCER.suppress_deprecation_warnings = suppress_deprecation_warnings register_rules(_ENFORCER) def enforce(context, action, target, do_raise=True): """Verifies that the action is valid on the target in this context. **IMPORTANT** ONLY for use in API extensions. This method ignores unregistered rules and applies a default rule on them; there should be no unregistered rules in first party manila APIs. :param context: manila context :param action: string representing the action to be checked, this should be colon separated for clarity. i.e. ``share:create``, :param target: dictionary representing the object of the action for object creation, this should be a dictionary representing the location of the object e.g. ``{'project_id': context.project_id}`` :param do_raise: Whether to raise an exception if check fails. :returns: When ``do_raise`` is ``False``, returns a value that evaluates as ``True`` or ``False`` depending on whether the policy allows action on the target. :raises: manila.exception.PolicyNotAuthorized if verification fails and ``do_raise`` is ``True``. """ init() try: return _ENFORCER.enforce(action, target, context, do_raise=do_raise, exc=exception.PolicyNotAuthorized, action=action) except policy.InvalidScope: raise exception.PolicyNotAuthorized(action=action) def set_rules(rules, overwrite=True, use_conf=False): """Set rules based on the provided dict of rules. :param rules: New rules to use. It should be an instance of dict. :param overwrite: Whether to overwrite current rules or update them with the new rules. :param use_conf: Whether to reload rules from config file. """ init(use_conf=False) _ENFORCER.set_rules(rules, overwrite, use_conf) def get_rules(): if _ENFORCER: return _ENFORCER.rules def register_rules(enforcer): enforcer.register_defaults(policies.list_rules()) def get_enforcer(): # This method is for use by oslopolicy CLI scripts. Those scripts need the # 'output-file' and 'namespace' options, but having those in sys.argv means # loading the Manila config options will fail as those are not expected to # be present. So we pass in an arg list with those stripped out. conf_args = [] # Start at 1 because cfg.CONF expects the equivalent of sys.argv[1:] i = 1 while i < len(sys.argv): if sys.argv[i].strip('-') in ['namespace', 'output-file']: i += 2 continue conf_args.append(sys.argv[i]) i += 1 cfg.CONF(conf_args, project='manila') init() return _ENFORCER def authorize(context, action, target, do_raise=True, exc=None): """Verifies that the action is valid on the target in this context. :param context: manila context :param action: string representing the action to be checked this should be colon separated for clarity. i.e. ``share:create``, :param target: dictionary representing the object of the action for object creation this should be a dictionary representing the location of the object e.g. ``{'project_id': context.project_id}`` :param do_raise: if True (the default), raises PolicyNotAuthorized; if False, returns False :param exc: Class of the exception to raise if the check fails. Any remaining arguments passed to :meth:`authorize` (both positional and keyword arguments) will be passed to the exception class. If not specified, :class:`PolicyNotAuthorized` will be used. :raises manila.exception.PolicyNotAuthorized: if verification fails and do_raise is True. Or if 'exc' is specified it will raise an exception of that type. :return: returns a non-False value (not necessarily "True") if authorized, and the exact value False if not authorized and do_raise is False. """ init() if not exc: exc = exception.PolicyNotAuthorized target = target or default_target(context) try: result = _ENFORCER.authorize(action, target, context, do_raise=do_raise, exc=exc, action=action) except policy.PolicyNotRegistered: with excutils.save_and_reraise_exception(): LOG.exception('Policy not registered') except policy.InvalidScope: if do_raise: raise exception.PolicyNotAuthorized(action=action) else: return False except Exception: with excutils.save_and_reraise_exception(): msg_args = { 'action': action, 'credentials': context.to_policy_values(), } LOG.debug('Policy check for %(action)s failed with credentials ' '%(credentials)s', msg_args) return result def default_target(context): return {'project_id': context.project_id, 'user_id': context.user_id} def check_is_admin(context): """Whether or not user is admin according to policy setting. """ # the target is user-self target = default_target(context) return authorize(context, 'context_is_admin', target, do_raise=False) def check_is_host_admin(context): """Whether or not user is host admin according to policy setting. """ # the target is user-self target = default_target(context) return authorize(context, 'context_is_host_admin', target, do_raise=False) def wrap_check_policy(resource): """Check policy corresponding to the wrapped methods prior to execution.""" def check_policy_wraper(func): @functools.wraps(func) def wrapped(self, context, target_obj, *args, **kwargs): check_policy(context, resource, func.__name__, target_obj) return func(self, context, target_obj, *args, **kwargs) return wrapped return check_policy_wraper def check_policy(context, resource, action, target_obj=None, do_raise=True): target = target_obj or default_target(context) _action = '%s:%s' % (resource, action) return authorize(context, _action, target, do_raise=do_raise) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315601.885672 manila-21.0.0/manila/privsep/0000775000175000017500000000000000000000000016007 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/privsep/__init__.py0000664000175000017500000000215500000000000020123 0ustar00zuulzuul00000000000000# Copyright 2021 Red Hat, Inc. # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Setup privsep decorator.""" from oslo_privsep import capabilities from oslo_privsep import priv_context sys_admin_pctxt = priv_context.PrivContext( 'manila', cfg_section='manila_sys_admin', pypath=__name__ + '.sys_admin_pctxt', capabilities=[capabilities.CAP_CHOWN, capabilities.CAP_DAC_OVERRIDE, capabilities.CAP_DAC_READ_SEARCH, capabilities.CAP_FOWNER, capabilities.CAP_NET_ADMIN, capabilities.CAP_SYS_ADMIN], ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/privsep/common.py0000664000175000017500000000240600000000000017653 0ustar00zuulzuul00000000000000# Copyright 2021 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from manila import exception from manila import utils as manila_utils from oslo_log import log LOG = log.getLogger(__name__) def execute_with_retries(action, action_args, max_retries): @manila_utils.retry( retry_param=exception.ProcessExecutionError, backoff_rate=2, retries=max_retries) def execute(): try: action(*action_args) return True except exception.ProcessExecutionError: LOG.exception("Recovering from a failed execute.") raise try: execute() except exception.ProcessExecutionError: LOG.exception("Failed to run command. Tries exhausted.") raise ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/privsep/filesystem.py0000664000175000017500000000217700000000000020554 0ustar00zuulzuul00000000000000# Copyright 2021 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Helpers for filesystem commands """ from oslo_concurrency import processutils import manila.privsep @manila.privsep.sys_admin_pctxt.entrypoint def e2fsck(device_path): return processutils.execute('e2fsck', '-y', '-f', device_path) @manila.privsep.sys_admin_pctxt.entrypoint def tune2fs(device_path): return processutils.execute('tune2fs', '-U', 'random', device_path) @manila.privsep.sys_admin_pctxt.entrypoint def make_filesystem(ext_version, device_name): return processutils.execute(f'mkfs.{ext_version}', device_name) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/privsep/lvm.py0000664000175000017500000000445000000000000017162 0ustar00zuulzuul00000000000000# Copyright 2021 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Helpers for lvm related routines """ from oslo_concurrency import processutils import manila.privsep @manila.privsep.sys_admin_pctxt.entrypoint def lvremove(vg_name, lv_name): processutils.execute('lvremove', '-f', f'{vg_name}/{lv_name}') @manila.privsep.sys_admin_pctxt.entrypoint def lvcreate(lv_size, lv_name, vg_name, mirrors=0, region_size=0): extra_params = [] if mirrors: extra_params += ['-m', mirrors, '--nosync'] if region_size: extra_params += ['-R', region_size] processutils.execute( 'lvcreate', '-Wy', '--yes', '-L', f'{lv_size}G', '-n', lv_name, vg_name, *extra_params) @manila.privsep.sys_admin_pctxt.entrypoint def lv_snapshot_create(snapshot_size, snap_name, orig_lv_name): size_str = '%sG' % snapshot_size processutils.execute( 'lvcreate', '-L', size_str, '--name', snap_name, '--snapshot', orig_lv_name) @manila.privsep.sys_admin_pctxt.entrypoint def get_vgs(vg_name): out, err = processutils.execute( 'vgs', vg_name, '--rows', '--units', 'g',) return out, err @manila.privsep.sys_admin_pctxt.entrypoint def list_vgs_get_name(): out, err = processutils.execute('vgs', '--noheadings', '-o', 'name') return out, err @manila.privsep.sys_admin_pctxt.entrypoint def lvconvert(vg_name, snapshot_name): processutils.execute( 'lvconvert', '--merge', f'{vg_name}/{snapshot_name}') @manila.privsep.sys_admin_pctxt.entrypoint def lvrename(vg_name, lv_name, new_name): processutils.execute( 'lvrename', vg_name, lv_name, new_name) @manila.privsep.sys_admin_pctxt.entrypoint def lvextend(lv_name, new_size): processutils.execute('lvextend', '-L', '%sG' % new_size, '-r', lv_name) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/privsep/os.py0000664000175000017500000000552700000000000017013 0ustar00zuulzuul00000000000000# Copyright 2021 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Helpers for os basic commands """ from oslo_concurrency import processutils from manila import exception import manila.privsep @manila.privsep.sys_admin_pctxt.entrypoint def rmdir(dir_path): processutils.execute('rmdir', dir_path) @manila.privsep.sys_admin_pctxt.entrypoint def mkdir(dir_path): processutils.execute('mkdir', dir_path) @manila.privsep.sys_admin_pctxt.entrypoint def recursive_forced_rm(dir_path): processutils.execute('rm', '-rf', dir_path) @manila.privsep.sys_admin_pctxt.entrypoint def is_data_definition_direct_io_supported(src_str, dest_str): try: processutils.execute( 'dd', 'count=0', f'if={src_str}', f'of={dest_str}', 'iflag=direct', 'oflag=direct') is_direct_io_supported = True except exception.ProcessExecutionError: is_direct_io_supported = False return is_direct_io_supported @manila.privsep.sys_admin_pctxt.entrypoint def data_definition(src_str, dest_str, size_in_g, use_direct_io=False): extra_flags = [] if use_direct_io: extra_flags += ['iflag=direct', 'oflag=direct'] processutils.execute( 'dd', 'if=%s' % src_str, 'of=%s' % dest_str, 'count=%d' % size_in_g, 'bs=1M', *extra_flags) @manila.privsep.sys_admin_pctxt.entrypoint def umount(mount_path): processutils.execute('umount', '-f', mount_path) @manila.privsep.sys_admin_pctxt.entrypoint def mount(device_name, mount_path, mount_type=None): extra_args = ['-t', mount_type] if mount_type else [] processutils.execute('mount', device_name, mount_path, *extra_args) @manila.privsep.sys_admin_pctxt.entrypoint def list_mounts(): out, err = processutils.execute('mount', '-l') return out, err @manila.privsep.sys_admin_pctxt.entrypoint def chmod(permission_level_str, mount_path): processutils.execute('chmod', permission_level_str, mount_path) @manila.privsep.sys_admin_pctxt.entrypoint def find(directory_to_find, min_depth='1', dirs_to_ignore=[], delete=False): ignored_dirs = [] extra_args = [] for dir in dirs_to_ignore: ignored_dirs += '!', '-path', dir if delete: extra_args.append('-delete') processutils.execute( 'find', directory_to_find, '-mindepth', min_depth, *ignored_dirs, *extra_args) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/quota.py0000664000175000017500000014576400000000000016043 0ustar00zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Quotas for shares.""" import datetime from oslo_config import cfg from oslo_log import log from oslo_utils import importutils from oslo_utils import timeutils from manila import db from manila import exception LOG = log.getLogger(__name__) QUOTA_GROUP = 'quota' quota_opts = [ cfg.IntOpt('shares', default=50, help='Number of shares allowed per project.', deprecated_group='DEFAULT', deprecated_name='quota_shares'), cfg.IntOpt('snapshots', default=50, help='Number of share snapshots allowed per project.', deprecated_group='DEFAULT', deprecated_name='quota_snapshots'), cfg.IntOpt('gigabytes', default=1000, help='Number of share gigabytes allowed per project.', deprecated_group='DEFAULT', deprecated_name='quota_gigabytes'), cfg.IntOpt('per_share_gigabytes', default=-1, help='Max size allowed per share, in gigabytes.', deprecated_group='DEFAULT', deprecated_name='quota_per_share_gigabytes'), cfg.IntOpt('snapshot_gigabytes', default=1000, help='Number of snapshot gigabytes allowed per project.', deprecated_group='DEFAULT', deprecated_name='quota_snapshot_gigabytes'), cfg.IntOpt('share_networks', default=10, help='Number of share-networks allowed per project.', deprecated_group='DEFAULT', deprecated_name='quota_share_networks'), cfg.IntOpt('share_replicas', default=100, help='Number of share-replicas allowed per project.', deprecated_group='DEFAULT', deprecated_name='quota_share_replicas'), cfg.IntOpt('replica_gigabytes', default=1000, help='Number of replica gigabytes allowed per project.', deprecated_group='DEFAULT', deprecated_name='quota_replica_gigabytes'), cfg.IntOpt('share_groups', default=50, help='Number of share groups allowed.', deprecated_group='DEFAULT', deprecated_name='quota_share_groups'), cfg.IntOpt('share_group_snapshots', default=50, help='Number of share group snapshots allowed.', deprecated_group='DEFAULT', deprecated_name='quota_share_group_snapshots'), cfg.IntOpt('reservation_expire', default=86400, help='Number of seconds until a reservation expires.', deprecated_group='DEFAULT'), cfg.IntOpt('until_refresh', default=0, help='Count of reservations until usage is refreshed.', deprecated_group='DEFAULT'), cfg.IntOpt('max_age', default=0, help='Number of seconds between subsequent usage refreshes.', deprecated_group='DEFAULT'), cfg.StrOpt('driver', default='manila.quota.DbQuotaDriver', help='Default driver to use for quota checks.', deprecated_group='DEFAULT', deprecated_name='quota_driver'), cfg.IntOpt('backups', default=10, help='Number of share backups allowed per project.', deprecated_group='DEFAULT', deprecated_name='quota_backups'), cfg.IntOpt('backup_gigabytes', default=1000, help='Total amount of storage, in gigabytes, allowed ' 'for backups per project.', deprecated_group='DEFAULT', deprecated_name='quota_backup_gigabytes'), cfg.IntOpt('encryption_keys', default=100, help='Number of encryption keys allowed per project.'), ] CONF = cfg.CONF CONF.register_opts(quota_opts, QUOTA_GROUP) class DbQuotaDriver(object): """Database Quota driver. Driver to perform necessary checks to enforce quotas and obtain quota information. The default driver utilizes the local database. """ def get_by_class(self, context, quota_class, resource): """Get a specific quota by quota class.""" return db.quota_class_get(context, quota_class, resource) def get_defaults(self, context, resources): """Given a list of resources, retrieve the default quotas. :param context: The request context, for access checks. :param resources: A dictionary of the registered resources. """ quotas = {} default_quotas = db.quota_class_get_default(context) for resource in resources.values(): quotas[resource.name] = default_quotas.get(resource.name, resource.default) return quotas def get_class_quotas(self, context, resources, quota_class, defaults=True): """Retrieve quotas for a quota class. Given a list of resources, retrieve the quotas for the given quota class. :param context: The request context, for access checks. :param resources: A dictionary of the registered resources. :param quota_class: The name of the quota class to return quotas for. :param defaults: If True, the default value will be reported if there is no specific value for the resource. """ quotas = {} class_quotas = db.quota_class_get_all_by_name(context, quota_class) for resource in resources.values(): if defaults or resource.name in class_quotas: quotas[resource.name] = class_quotas.get(resource.name, resource.default) return quotas def _process_quotas(self, context, resources, project_id, quotas, quota_class=None, defaults=True, usages=None, remains=False): modified_quotas = {} # Get the quotas for the appropriate class. If the project ID # matches the one in the context, we use the quota_class from # the context, otherwise, we use the provided quota_class (if # any) if project_id == context.project_id: quota_class = context.quota_class if quota_class: class_quotas = db.quota_class_get_all_by_name(context, quota_class) else: class_quotas = {} default_quotas = self.get_defaults(context, resources) for resource in resources.values(): # Omit default/quota class values if not defaults and resource.name not in quotas: continue limit = quotas.get( resource.name, class_quotas.get(resource.name, default_quotas[resource.name])) modified_quotas[resource.name] = dict(limit=limit) # Include usages if desired. This is optional because one # internal consumer of this interface wants to access the # usages directly from inside a transaction. if usages: usage = usages.get(resource.name, {}) modified_quotas[resource.name].update( in_use=usage.get('in_use', 0), reserved=usage.get('reserved', 0), ) # Initialize remains quotas. if remains: modified_quotas[resource.name].update(remains=limit) if remains: all_quotas = db.quota_get_all(context, project_id) for quota in all_quotas: if quota.resource in modified_quotas: modified_quotas[quota.resource]['remains'] -= ( quota.hard_limit) return modified_quotas def get_project_quotas(self, context, resources, project_id, quota_class=None, defaults=True, usages=True, remains=False): """Retrieve quotas for project. Given a list of resources, retrieve the quotas for the given project. :param context: The request context, for access checks. :param resources: A dictionary of the registered resources. :param project_id: The ID of the project to return quotas for. :param quota_class: If project_id != context.project_id, the quota class cannot be determined. This parameter allows it to be specified. It will be ignored if project_id == context.project_id. :param defaults: If True, the quota class value (or the default value, if there is no value from the quota class) will be reported if there is no specific value for the resource. :param usages: If True, the current in_use and reserved counts will also be returned. :param remains: If True, the current remains of the project will will be returned. """ project_quotas = db.quota_get_all_by_project(context, project_id) project_usages = None if usages: project_usages = db.quota_usage_get_all_by_project(context, project_id) return self._process_quotas(context, resources, project_id, project_quotas, quota_class, defaults=defaults, usages=project_usages, remains=remains) def get_user_quotas(self, context, resources, project_id, user_id, quota_class=None, defaults=True, usages=True): """Retrieve quotas for user and project. Given a list of resources, retrieve the quotas for the given user and project. :param context: The request context, for access checks. :param resources: A dictionary of the registered resources. :param project_id: The ID of the project to return quotas for. :param user_id: The ID of the user to return quotas for. :param quota_class: If project_id != context.project_id, the quota class cannot be determined. This parameter allows it to be specified. It will be ignored if project_id == context.project_id. :param defaults: If True, the quota class value (or the default value, if there is no value from the quota class) will be reported if there is no specific value for the resource. :param usages: If True, the current in_use and reserved counts will also be returned. """ user_quotas = db.quota_get_all_by_project_and_user( context, project_id, user_id) # Use the project quota for default user quota. proj_quotas = db.quota_get_all_by_project(context, project_id) for key, value in proj_quotas.items(): if key not in user_quotas.keys(): user_quotas[key] = value user_usages = None if usages: user_usages = db.quota_usage_get_all_by_project_and_user( context, project_id, user_id) return self._process_quotas(context, resources, project_id, user_quotas, quota_class, defaults=defaults, usages=user_usages) def get_share_type_quotas(self, context, resources, project_id, share_type_id, quota_class=None, defaults=True, usages=True): """Retrieve quotas for share_type and project. Given a list of resources, retrieve the quotas for the given share_type and project. :param context: The request context, for access checks. :param resources: A dictionary of the registered resources. :param project_id: The UUID of the project to return quotas for. :param share_type: UUID/name of a share type to return quotas for. :param quota_class: If project_id != context.project_id, the quota class cannot be determined. This parameter allows it to be specified. It will be ignored if project_id == context.project_id. :param defaults: If True, the quota class value (or the default value, if there is no value from the quota class) will be reported if there is no specific value for the resource. :param usages: If True, the current in_use and reserved counts will also be returned. """ st_quotas = db.quota_get_all_by_project_and_share_type( context, project_id, share_type_id) # Use the project quota for default share_type quota. project_quotas = db.quota_get_all_by_project(context, project_id) for key, value in project_quotas.items(): if key not in st_quotas.keys(): st_quotas[key] = value st_usages = None if usages: st_usages = db.quota_usage_get_all_by_project_and_share_type( context, project_id, share_type_id) return self._process_quotas( context, resources, project_id, st_quotas, quota_class, defaults=defaults, usages=st_usages) def get_settable_quotas(self, context, resources, project_id, user_id=None, share_type_id=None): """Retrieve range of settable quotas. Given a list of resources, retrieve the range of settable quotas for the given user or project. :param context: The request context, for access checks. :param resources: A dictionary of the registered resources. :param project_id: The ID of the project to return quotas for. :param user_id: The ID of the user to return quotas for. :param share_type_id: The UUID of the share_type to return quotas for. """ settable_quotas = {} project_quotas = self.get_project_quotas( context, resources, project_id, remains=True) if user_id or share_type_id: if user_id: subquotas = self.get_user_quotas( context, resources, project_id, user_id) else: subquotas = self.get_share_type_quotas( context, resources, project_id, share_type_id) for key, value in subquotas.items(): settable_quotas[key] = { "minimum": value['in_use'] + value['reserved'], "maximum": project_quotas[key]["limit"], } else: for key, value in project_quotas.items(): minimum = max( int(value['limit'] - value['remains']), int(value['in_use'] + value['reserved']) ) settable_quotas[key] = {"minimum": minimum, "maximum": -1} return settable_quotas def _get_quotas(self, context, resources, keys, has_sync, project_id=None, user_id=None, share_type_id=None): """Retrieve quotas for a resource. A helper method which retrieves the quotas for the specific resources identified by keys, and which apply to the current context. :param context: The request context, for access checks. :param resources: A dictionary of the registered resources. :param keys: A list of the desired quotas to retrieve. :param has_sync: If True, indicates that the resource must have a sync attribute; if False, indicates that the resource must NOT have a sync attribute. :param project_id: Specify the project_id if current context is admin and admin wants to impact on common user's tenant. :param user_id: Specify the user_id if current context is admin and admin wants to impact on common user. (Special case: user operates on resource, owned/created by different user) """ # Filter resources if has_sync: sync_filt = lambda x: hasattr(x, 'sync') # noqa: E731 else: sync_filt = lambda x: not hasattr(x, 'sync') # noqa: E731 desired = set(keys) sub_resources = {k: v for k, v in resources.items() if k in desired and sync_filt(v)} # Make sure we accounted for all of them... if len(keys) != len(sub_resources): unknown = desired - set(sub_resources.keys()) raise exception.QuotaResourceUnknown(unknown=sorted(unknown)) if user_id: # Grab and return the quotas (without usages) quotas = self.get_user_quotas(context, sub_resources, project_id, user_id, context.quota_class, usages=False) elif share_type_id: # Grab and return the quotas (without usages) quotas = self.get_share_type_quotas( context, sub_resources, project_id, share_type_id, context.quota_class, usages=False) else: # Grab and return the quotas (without usages) quotas = self.get_project_quotas(context, sub_resources, project_id, context.quota_class, usages=False) return {k: v['limit'] for k, v in quotas.items()} def limit_check(self, context, resources, values, project_id=None): """Check simple quota limits. For limits--those quotas for which there is no usage synchronization function--this method checks that a set of proposed values are permitted by the limit restriction. This method will raise a QuotaResourceUnknown exception if a given resource is unknown or if it is not a simple limit resource. If any of the proposed values is over the defined quota, an OverQuota exception will be raised with the sorted list of the resources which are too high. Otherwise, the method returns nothing. :param context: The request context, for access checks. :param resources: A dictionary of the registered resources. :param values: A dictionary of the values to check against the quota. :param project_id: Specify the project_id if current context is admin and admin wants to impact on common user's tenant. """ # Ensure no value is less than zero unders = [key for key, val in values.items() if val < 0] if unders: raise exception.InvalidQuotaValue(unders=sorted(unders)) # If project_id is None, then we use the project_id in context if project_id is None: project_id = context.project_id quotas = self._get_quotas(context, resources, values.keys(), has_sync=False, project_id=project_id) # Check the quotas and construct a list of the resources that # would be put over limit by the desired values overs = [key for key, val in values.items() if quotas[key] >= 0 and quotas[key] < val] if overs: raise exception.OverQuota(overs=sorted(overs), quotas=quotas, usages={}) def reserve(self, context, resources, deltas, expire=None, project_id=None, user_id=None, share_type_id=None, overquota_allowed=False): """Check quotas and reserve resources. For counting quotas--those quotas for which there is a usage synchronization function--this method checks quotas against current usage and the desired deltas. This method will raise a QuotaResourceUnknown exception if a given resource is unknown or if it does not have a usage synchronization function. If any of the proposed values is over the defined quota, an OverQuota exception will be raised with the sorted list of the resources which are too high. Otherwise, the method returns a list of reservation UUIDs which were created. :param context: The request context, for access checks. :param resources: A dictionary of the registered resources. :param deltas: A dictionary of the proposed delta changes. :param expire: An optional parameter specifying an expiration time for the reservations. If it is a simple number, it is interpreted as a number of seconds and added to the current time; if it is a datetime.timedelta object, it will also be added to the current time. A datetime.datetime object will be interpreted as the absolute expiration time. If None is specified, the default expiration time set by --default-reservation-expire will be used (this value will be treated as a number of seconds). :param project_id: Specify the project_id if current context is admin and admin wants to impact on common user's tenant. :param user_id: Specify the user_id if current context is admin and admin wants to impact on common user. (Special case: user operates on resource, owned/created by different user) """ # Set up the reservation expiration if expire is None: expire = CONF.quota.reservation_expire if isinstance(expire, int): expire = datetime.timedelta(seconds=expire) if isinstance(expire, datetime.timedelta): expire = timeutils.utcnow() + expire if not isinstance(expire, datetime.datetime): raise exception.InvalidReservationExpiration(expire=expire) # If project_id is None, then we use the project_id in context if project_id is None: project_id = context.project_id # If user_id is None, then we use the user_id in context if user_id is None: user_id = context.user_id # Get the applicable quotas. # NOTE(Vek): We're not worried about races at this point. # Yes, the admin may be in the process of reducing # quotas, but that's a pretty rare thing. quotas = self._get_quotas( context, resources, deltas, has_sync=True, project_id=project_id) user_quotas = self._get_quotas( context, resources, deltas, has_sync=True, project_id=project_id, user_id=user_id) if share_type_id: share_type_quotas = self._get_quotas( context, resources, deltas, has_sync=True, project_id=project_id, share_type_id=share_type_id) else: share_type_quotas = {} # NOTE(Vek): Most of the work here has to be done in the DB # API, because we have to do it in a transaction, # which means access to the session. Since the # session isn't available outside the DBAPI, we # have to do the work there. return db.quota_reserve( context, resources, quotas, user_quotas, share_type_quotas, deltas, expire, CONF.quota.until_refresh, CONF.quota.max_age, project_id=project_id, user_id=user_id, share_type_id=share_type_id, overquota_allowed=overquota_allowed) def commit(self, context, reservations, project_id=None, user_id=None, share_type_id=None): """Commit reservations. :param context: The request context, for access checks. :param reservations: A list of the reservation UUIDs, as returned by the reserve() method. :param project_id: Specify the project_id if current context is admin and admin wants to impact on common user's tenant. :param user_id: Specify the user_id if current context is admin and admin wants to impact on common user. (Special case: user operates on resource, owned/created by different user) """ # If project_id is None, then we use the project_id in context if project_id is None: project_id = context.project_id # If user_id is None, then we use the user_id in context if user_id is None: user_id = context.user_id db.reservation_commit( context, reservations, project_id=project_id, user_id=user_id, share_type_id=share_type_id) def rollback(self, context, reservations, project_id=None, user_id=None, share_type_id=None): """Roll back reservations. :param context: The request context, for access checks. :param reservations: A list of the reservation UUIDs, as returned by the reserve() method. :param project_id: Specify the project_id if current context is admin and admin wants to impact on common user's tenant. :param user_id: Specify the user_id if current context is admin and admin wants to impact on common user. (Special case: user operates on resource, owned/created by different user) """ # If project_id is None, then we use the project_id in context if project_id is None: project_id = context.project_id # If user_id is None, then we use the user_id in context if user_id is None: user_id = context.user_id db.reservation_rollback( context, reservations, project_id=project_id, user_id=user_id, share_type_id=share_type_id) def usage_reset(self, context, resources): """Reset usage records. Reset the usage records for a particular user on a list of resources. This will force that user's usage records to be refreshed the next time a reservation is made. Note: this does not affect the currently outstanding reservations the user has; those reservations must be committed or rolled back (or expired). :param context: The request context, for access checks. :param resources: A list of the resource names for which the usage must be reset. """ # We need an elevated context for the calls to # quota_usage_update() elevated = context.elevated() for resource in resources: try: # Reset the usage to -1, which will force it to be # refreshed db.quota_usage_update(elevated, context.project_id, context.user_id, resource, in_use=-1) except exception.QuotaUsageNotFound: # That means it'll be refreshed anyway pass def destroy_all_by_project(self, context, project_id): """Destroy metadata associated with a project. Destroy all quotas, usages, and reservations associated with a project. :param context: The request context, for access checks. :param project_id: The ID of the project being deleted. """ db.quota_destroy_all_by_project(context, project_id) def destroy_all_by_project_and_user(self, context, project_id, user_id): """Destroy metadata associated with a project and user. Destroy all quotas, usages, and reservations associated with a project and user. :param context: The request context, for access checks. :param project_id: The ID of the project being deleted. :param user_id: The ID of the user being deleted. """ db.quota_destroy_all_by_project_and_user(context, project_id, user_id) def destroy_all_by_project_and_share_type(self, context, project_id, share_type_id): """Destroy metadata associated with a project and share_type. Destroy all quotas, usages, and reservations associated with a project and share_type. :param context: The request context, for access checks. :param project_id: The ID of the project. :param share_type_id: The UUID of the share type. """ db.quota_destroy_all_by_share_type(context, share_type_id, project_id=project_id) def expire(self, context): """Expire reservations. Explores all currently existing reservations and rolls back any that have expired. :param context: The request context, for access checks. """ db.reservation_expire(context) class BaseResource(object): """Describe a single resource for quota checking.""" def __init__(self, name, flag=None): """Initializes a Resource. :param name: The name of the resource, i.e., "shares". :param flag: The name of the flag or configuration option which specifies the default value of the quota for this resource. """ self.name = name self.flag = flag @property def default(self): """Return the default value of the quota.""" return CONF.quota[self.flag] if self.flag else -1 class ReservableResource(BaseResource): """Describe a reservable resource.""" def __init__(self, name, sync, flag=None): """Initializes a ReservableResource. Reservable resources are those resources which directly correspond to objects in the database, i.e., shares, gigabytes, etc. A ReservableResource must be constructed with a usage synchronization function, which will be called to determine the current counts of one or more resources. The usage synchronization function will be passed three arguments: an admin context, the project ID, and an opaque session object, which should in turn be passed to the underlying database function. Synchronization functions should return a dictionary mapping resource names to the current in_use count for those resources; more than one resource and resource count may be returned. Note that synchronization functions may be associated with more than one ReservableResource. :param name: The name of the resource, i.e., "shares". :param sync: A callable which returns a dictionary to resynchronize the in_use count for one or more resources, as described above. :param flag: The name of the flag or configuration option which specifies the default value of the quota for this resource. """ super(ReservableResource, self).__init__(name, flag=flag) if sync: self.sync = sync class AbsoluteResource(BaseResource): """Describe a non-reservable resource.""" pass class CountableResource(AbsoluteResource): """Describe a countable resource. Describe a resource where the counts aren't based solely on the project ID. """ def __init__(self, name, count, flag=None): """Initializes a CountableResource. Countable resources are those resources which directly correspond to objects in the database, i.e., shares, gigabytes, etc., but for which a count by project ID is inappropriate. A CountableResource must be constructed with a counting function, which will be called to determine the current counts of the resource. The counting function will be passed the context, along with the extra positional and keyword arguments that are passed to Quota.count(). It should return an integer specifying the count. Note that this counting is not performed in a transaction-safe manner. This resource class is a temporary measure to provide required functionality, until a better approach to solving this problem can be evolved. :param name: The name of the resource, i.e., "shares". :param count: A callable which returns the count of the resource. The arguments passed are as described above. :param flag: The name of the flag or configuration option which specifies the default value of the quota for this resource. """ super(CountableResource, self).__init__(name, flag=flag) self.count = count class QuotaEngine(object): """Represent the set of recognized quotas.""" def __init__(self, quota_driver_class=None): """Initialize a Quota object.""" self._resources = {} self._driver_cls = quota_driver_class self.__driver = None @property def _driver(self): if self.__driver: return self.__driver if not self._driver_cls: self._driver_cls = CONF.quota.driver if isinstance(self._driver_cls, str): self._driver_cls = importutils.import_object(self._driver_cls) self.__driver = self._driver_cls return self.__driver def __contains__(self, resource): return resource in self._resources def register_resource(self, resource): """Register a resource.""" self._resources[resource.name] = resource def register_resources(self, resources): """Register a list of resources.""" for resource in resources: self.register_resource(resource) def get_by_class(self, context, quota_class, resource): """Get a specific quota by quota class.""" return self._driver.get_by_class(context, quota_class, resource) def get_defaults(self, context): """Retrieve the default quotas. :param context: The request context, for access checks. """ return self._driver.get_defaults(context, self._resources) def get_class_quotas(self, context, quota_class, defaults=True): """Retrieve the quotas for the given quota class. :param context: The request context, for access checks. :param quota_class: The name of the quota class to return quotas for. :param defaults: If True, the default value will be reported if there is no specific value for the resource. """ return self._driver.get_class_quotas(context, self._resources, quota_class, defaults=defaults) def get_user_quotas(self, context, project_id, user_id, quota_class=None, defaults=True, usages=True): """Retrieve the quotas for the given user and project. :param context: The request context, for access checks. :param project_id: The ID of the project to return quotas for. :param user_id: The ID of the user to return quotas for. :param quota_class: If project_id != context.project_id, the quota class cannot be determined. This parameter allows it to be specified. :param defaults: If True, the quota class value (or the default value, if there is no value from the quota class) will be reported if there is no specific value for the resource. :param usages: If True, the current in_use and reserved counts will also be returned. """ return self._driver.get_user_quotas(context, self._resources, project_id, user_id, quota_class=quota_class, defaults=defaults, usages=usages) def get_share_type_quotas(self, context, project_id, share_type_id, quota_class=None, defaults=True, usages=True): """Retrieve the quotas for the given user and project. :param context: The request context, for access checks. :param project_id: The ID of the project to return quotas for. :param share_type_id: The UUID of the user to return quotas for. :param quota_class: If project_id != context.project_id, the quota class cannot be determined. This parameter allows it to be specified. :param defaults: If True, the quota class value (or the default value, if there is no value from the quota class) will be reported if there is no specific value for the resource. :param usages: If True, the current in_use and reserved counts will also be returned. """ return self._driver.get_share_type_quotas( context, self._resources, project_id, share_type_id, quota_class=quota_class, defaults=defaults, usages=usages) def get_project_quotas(self, context, project_id, quota_class=None, defaults=True, usages=True, remains=False): """Retrieve the quotas for the given project. :param context: The request context, for access checks. :param project_id: The ID of the project to return quotas for. :param quota_class: If project_id != context.project_id, the quota class cannot be determined. This parameter allows it to be specified. :param defaults: If True, the quota class value (or the default value, if there is no value from the quota class) will be reported if there is no specific value for the resource. :param usages: If True, the current in_use and reserved counts will also be returned. :param remains: If True, the current remains of the project will will be returned. """ return self._driver.get_project_quotas(context, self._resources, project_id, quota_class=quota_class, defaults=defaults, usages=usages, remains=remains) def get_settable_quotas(self, context, project_id, user_id=None, share_type_id=None): """Get settable quotas. Given a list of resources, retrieve the range of settable quotas for the given user or project. :param context: The request context, for access checks. :param resources: A dictionary of the registered resources. :param project_id: The ID of the project to return quotas for. :param user_id: The ID of the user to return quotas for. :param share_type_id: The UUID of the share_type to return quotas for. """ return self._driver.get_settable_quotas( context, self._resources, project_id, user_id=user_id, share_type_id=share_type_id) def count(self, context, resource, *args, **kwargs): """Count a resource. For countable resources, invokes the count() function and returns its result. Arguments following the context and resource are passed directly to the count function declared by the resource. :param context: The request context, for access checks. :param resource: The name of the resource, as a string. """ # Get the resource res = self._resources.get(resource) if not res or not hasattr(res, 'count'): raise exception.QuotaResourceUnknown(unknown=[resource]) return res.count(context, *args, **kwargs) def limit_check(self, context, project_id=None, **values): """Check simple quota limits. For limits--those quotas for which there is no usage synchronization function--this method checks that a set of proposed values are permitted by the limit restriction. The values to check are given as keyword arguments, where the key identifies the specific quota limit to check, and the value is the proposed value. This method will raise a QuotaResourceUnknown exception if a given resource is unknown or if it is not a simple limit resource. If any of the proposed values is over the defined quota, an OverQuota exception will be raised with the sorted list of the resources which are too high. Otherwise, the method returns nothing. :param context: The request context, for access checks. :param project_id: Specify the project_id if current context is admin and admin wants to impact on common user's tenant. """ return self._driver.limit_check(context, self._resources, values, project_id=project_id) def reserve(self, context, expire=None, project_id=None, user_id=None, share_type_id=None, overquota_allowed=False, **deltas): """Check quotas and reserve resources. For counting quotas--those quotas for which there is a usage synchronization function--this method checks quotas against current usage and the desired deltas. The deltas are given as keyword arguments, and current usage and other reservations are factored into the quota check. This method will raise a QuotaResourceUnknown exception if a given resource is unknown or if it does not have a usage synchronization function. If any of the proposed values is over the defined quota, an OverQuota exception will be raised with the sorted list of the resources which are too high. Otherwise, the method returns a list of reservation UUIDs which were created. :param context: The request context, for access checks. :param expire: An optional parameter specifying an expiration time for the reservations. If it is a simple number, it is interpreted as a number of seconds and added to the current time; if it is a datetime.timedelta object, it will also be added to the current time. A datetime.datetime object will be interpreted as the absolute expiration time. If None is specified, the default expiration time set by --default-reservation-expire will be used (this value will be treated as a number of seconds). :param project_id: Specify the project_id if current context is admin and admin wants to impact on common user's tenant. """ reservations = self._driver.reserve( context, self._resources, deltas, expire=expire, project_id=project_id, user_id=user_id, share_type_id=share_type_id, overquota_allowed=overquota_allowed ) LOG.debug("Created reservations %s", reservations) return reservations def commit(self, context, reservations, project_id=None, user_id=None, share_type_id=None): """Commit reservations. :param context: The request context, for access checks. :param reservations: A list of the reservation UUIDs, as returned by the reserve() method. :param project_id: Specify the project_id if current context is admin and admin wants to impact on common user's tenant. """ try: self._driver.commit( context, reservations, project_id=project_id, user_id=user_id, share_type_id=share_type_id) except Exception: # NOTE(Vek): Ignoring exceptions here is safe, because the # usage resynchronization and the reservation expiration # mechanisms will resolve the issue. The exception is # logged, however, because this is less than optimal. LOG.exception("Failed to commit reservations %s", reservations) return LOG.debug("Committed reservations %s", reservations) def rollback(self, context, reservations, project_id=None, user_id=None, share_type_id=None): """Roll back reservations. :param context: The request context, for access checks. :param reservations: A list of the reservation UUIDs, as returned by the reserve() method. :param project_id: Specify the project_id if current context is admin and admin wants to impact on common user's tenant. """ try: self._driver.rollback( context, reservations, project_id=project_id, user_id=user_id, share_type_id=share_type_id) except Exception: # NOTE(Vek): Ignoring exceptions here is safe, because the # usage resynchronization and the reservation expiration # mechanisms will resolve the issue. The exception is # logged, however, because this is less than optimal. LOG.exception("Failed to roll back reservations %s", reservations) return LOG.debug("Rolled back reservations %s", reservations) def usage_reset(self, context, resources): """Reset usage records. Reset the usage records for a particular user on a list of resources. This will force that user's usage records to be refreshed the next time a reservation is made. Note: this does not affect the currently outstanding reservations the user has; those reservations must be committed or rolled back (or expired). :param context: The request context, for access checks. :param resources: A list of the resource names for which the usage must be reset. """ self._driver.usage_reset(context, resources) def destroy_all_by_project_and_user(self, context, project_id, user_id): """Destroy metadata associated with a project and user. Destroy all quotas, usages, and reservations associated with a project and user. :param context: The request context, for access checks. :param project_id: The ID of the project being deleted. :param user_id: The ID of the user being deleted. """ self._driver.destroy_all_by_project_and_user(context, project_id, user_id) def destroy_all_by_project_and_share_type(self, context, project_id, share_type_id): """Destroy metadata associated with a project and share_type. Destroy all quotas, usages, and reservations associated with a project and share_type. :param context: The request context, for access checks. :param project_id: The ID of the project. :param share_type_id: The UUID of the share_type. """ self._driver.destroy_all_by_project_and_share_type( context, project_id, share_type_id) def destroy_all_by_project(self, context, project_id): """Destroy metadata associated with a project. Destroy all quotas, usages, and reservations associated with a project. :param context: The request context, for access checks. :param project_id: The ID of the project being deleted. """ self._driver.destroy_all_by_project(context, project_id) def expire(self, context): """Expire reservations. Explores all currently existing reservations and rolls back any that have expired. :param context: The request context, for access checks. """ self._driver.expire(context) @property def resources(self): return sorted(self._resources.keys()) QUOTAS = QuotaEngine() resources = [ ReservableResource('shares', '_sync_shares', 'shares'), ReservableResource('snapshots', '_sync_snapshots', 'snapshots'), ReservableResource('gigabytes', '_sync_gigabytes', 'gigabytes'), ReservableResource('per_share_gigabytes', None, 'per_share_gigabytes'), ReservableResource('snapshot_gigabytes', '_sync_snapshot_gigabytes', 'snapshot_gigabytes'), ReservableResource('share_networks', '_sync_share_networks', 'share_networks'), ReservableResource('share_groups', '_sync_share_groups', 'share_groups'), ReservableResource('share_group_snapshots', '_sync_share_group_snapshots', 'share_group_snapshots'), ReservableResource('share_replicas', '_sync_share_replicas', 'share_replicas'), ReservableResource('replica_gigabytes', '_sync_replica_gigabytes', 'replica_gigabytes'), ReservableResource('backups', '_sync_backups', 'backups'), ReservableResource('backup_gigabytes', '_sync_backup_gigabytes', 'backup_gigabytes'), ReservableResource('encryption_keys', '_sync_encryption_keys', 'encryption_keys'), ] QUOTAS.register_resources(resources) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/rpc.py0000664000175000017500000001167500000000000015467 0ustar00zuulzuul00000000000000# Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. __all__ = [ 'init', 'cleanup', 'set_defaults', 'add_extra_exmods', 'clear_extra_exmods', 'get_allowed_exmods', 'RequestContextSerializer', 'get_client', 'get_server', 'get_notifier', ] from oslo_config import cfg import oslo_messaging as messaging from oslo_messaging.rpc import dispatcher from oslo_serialization import jsonutils from oslo_utils import importutils profiler = importutils.try_import('osprofiler.profiler') import manila.context import manila.exception from manila import utils CONF = cfg.CONF TRANSPORT = None NOTIFICATION_TRANSPORT = None NOTIFIER = None ALLOWED_EXMODS = [ manila.exception.__name__, ] EXTRA_EXMODS = [] def init(conf): global TRANSPORT, NOTIFICATION_TRANSPORT, NOTIFIER exmods = get_allowed_exmods() TRANSPORT = messaging.get_rpc_transport(conf, allowed_remote_exmods=exmods) NOTIFICATION_TRANSPORT = messaging.get_notification_transport( conf, allowed_remote_exmods=exmods) if utils.notifications_enabled(conf): json_serializer = messaging.JsonPayloadSerializer() serializer = RequestContextSerializer(json_serializer) NOTIFIER = messaging.Notifier(NOTIFICATION_TRANSPORT, serializer=serializer) else: NOTIFIER = utils.DO_NOTHING def initialized(): return None not in [TRANSPORT, NOTIFIER] def cleanup(): global TRANSPORT, NOTIFICATION_TRANSPORT, NOTIFIER assert TRANSPORT is not None assert NOTIFICATION_TRANSPORT is not None assert NOTIFIER is not None TRANSPORT.cleanup() NOTIFICATION_TRANSPORT.cleanup() TRANSPORT = NOTIFIER = NOTIFICATION_TRANSPORT = None def set_defaults(control_exchange): messaging.set_transport_defaults(control_exchange) def add_extra_exmods(*args): EXTRA_EXMODS.extend(args) def clear_extra_exmods(): del EXTRA_EXMODS[:] def get_allowed_exmods(): return ALLOWED_EXMODS + EXTRA_EXMODS class JsonPayloadSerializer(messaging.NoOpSerializer): @staticmethod def serialize_entity(context, entity): return jsonutils.to_primitive(entity, convert_instances=True) class RequestContextSerializer(messaging.Serializer): def __init__(self, base): self._base = base def serialize_entity(self, context, entity): if not self._base: return entity return self._base.serialize_entity(context, entity) def deserialize_entity(self, context, entity): if not self._base: return entity return self._base.deserialize_entity(context, entity) def serialize_context(self, context): _context = context.to_dict() if profiler is not None: prof = profiler.get() if prof: trace_info = { "hmac_key": prof.hmac_key, "base_id": prof.get_base_id(), "parent_id": prof.get_id() } _context.update({"trace_info": trace_info}) return _context def deserialize_context(self, context): trace_info = context.pop("trace_info", None) if trace_info: if profiler is not None: profiler.init(**trace_info) return manila.context.RequestContext.from_dict(context) def get_transport_url(url_str=None): return messaging.TransportURL.parse(CONF, url_str) def get_client(target, version_cap=None, serializer=None): assert TRANSPORT is not None serializer = RequestContextSerializer(serializer) return messaging.get_rpc_client( TRANSPORT, target, version_cap=version_cap, serializer=serializer) def get_server(target, endpoints, serializer=None): assert TRANSPORT is not None access_policy = dispatcher.DefaultRPCAccessPolicy serializer = RequestContextSerializer(serializer) return messaging.get_rpc_server(TRANSPORT, target, endpoints, serializer=serializer, access_policy=access_policy) @utils.if_notifications_enabled def get_notifier(service=None, host=None, publisher_id=None): assert NOTIFIER is not None if not publisher_id: publisher_id = "%s.%s" % (service, host or CONF.host) return NOTIFIER.prepare(publisher_id=publisher_id) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315601.885672 manila-21.0.0/manila/scheduler/0000775000175000017500000000000000000000000016275 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/scheduler/__init__.py0000664000175000017500000000000000000000000020374 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/scheduler/base_handler.py0000664000175000017500000000342100000000000021256 0ustar00zuulzuul00000000000000# Copyright (c) 2011-2013 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ A common base for handling extension classes. Used by BaseFilterHandler and BaseWeightHandler """ import inspect from stevedore import extension class BaseHandler(object): """Base class to handle loading filter and weight classes.""" def __init__(self, modifier_class_type, modifier_namespace): self.namespace = modifier_namespace self.modifier_class_type = modifier_class_type self.extension_manager = extension.ExtensionManager(modifier_namespace) def _is_correct_class(self, cls): """Check if an object is the correct type. Return whether an object is a class of the correct type and is not prefixed with an underscore. """ return (inspect.isclass(cls) and not cls.__name__.startswith('_') and issubclass(cls, self.modifier_class_type)) def get_all_classes(self): # We use a set, as some classes may have an entrypoint of their own, # and also be returned by a function such as 'all_filters' for example return [ext.plugin for ext in self.extension_manager if self._is_correct_class(ext.plugin)] ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315601.889672 manila-21.0.0/manila/scheduler/drivers/0000775000175000017500000000000000000000000017753 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/scheduler/drivers/__init__.py0000664000175000017500000000000000000000000022052 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/scheduler/drivers/base.py0000664000175000017500000001221400000000000021237 0ustar00zuulzuul00000000000000# Copyright (c) 2010 OpenStack, LLC. # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Scheduler base class that all Schedulers should inherit from """ from oslo_config import cfg from oslo_utils import importutils from oslo_utils import timeutils from manila import db from manila.i18n import _ from manila.share import rpcapi as share_rpcapi from manila import utils scheduler_driver_opts = [ cfg.StrOpt('scheduler_host_manager', default='manila.scheduler.host_manager.HostManager', help='The scheduler host manager class to use.'), cfg.IntOpt('scheduler_max_attempts', default=3, help='Maximum number of attempts to schedule a share.'), ] CONF = cfg.CONF CONF.register_opts(scheduler_driver_opts) def share_update_db(context, share_id, host): '''Set the host and set the scheduled_at field of a share. :returns: A Share with the updated fields set properly. ''' now = timeutils.utcnow() values = {'host': host, 'scheduled_at': now} return db.share_update(context, share_id, values) def share_replica_update_db(context, share_replica_id, host): """Set the host and the scheduled_at field of a share replica. :returns: A Share Replica with the updated fields set. """ now = timeutils.utcnow() values = {'host': host, 'scheduled_at': now} return db.share_replica_update(context, share_replica_id, values) def share_group_update_db(context, share_group_id, host): '''Set the host and set the updated_at field of a share group. :returns: A share group with the updated fields set properly. ''' now = timeutils.utcnow() values = {'host': host, 'updated_at': now} return db.share_group_update(context, share_group_id, values) class Scheduler(object): """The base class that all Scheduler classes should inherit from.""" def __init__(self): self.host_manager = importutils.import_object( CONF.scheduler_host_manager) self.share_rpcapi = share_rpcapi.ShareAPI() def get_host_list(self): """Get a list of hosts from the HostManager.""" return self.host_manager.get_host_list() def get_service_capabilities(self): """Get the normalized set of capabilities for the services.""" return self.host_manager.get_service_capabilities() def update_service_capabilities(self, service_name, host, capabilities, timestamp): """Process a capability update from a service node.""" self.host_manager.update_service_capabilities(service_name, host, capabilities, timestamp) def hosts_up(self, context, topic): """Return the list of hosts that have a running service for topic.""" services = db.service_get_all_by_topic(context, topic) return [service['host'] for service in services if utils.service_is_up(service)] def schedule(self, context, topic, method, *_args, **_kwargs): """Must override schedule method for scheduler to work.""" raise NotImplementedError(_("Must implement a fallback schedule")) def schedule_create_share(self, context, request_spec, filter_properties): """Must override schedule method for scheduler to work.""" raise NotImplementedError(_("Must implement schedule_create_share")) def schedule_create_share_group(self, context, share_group_id, request_spec, filter_properties): """Must override schedule method for scheduler to work.""" raise NotImplementedError(_( "Must implement schedule_create_share_group")) def get_pools(self, context, filters): """Must override schedule method for scheduler to work.""" raise NotImplementedError(_("Must implement get_pools")) def host_passes_filters(self, context, host, request_spec, filter_properties): """Must override schedule method for migration to work.""" raise NotImplementedError(_("Must implement host_passes_filters")) def schedule_create_replica(self, context, request_spec, filter_properties): """Must override schedule method for create replica to work.""" raise NotImplementedError(_("Must implement schedule_create_replica")) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/scheduler/drivers/chance.py0000664000175000017500000000515500000000000021554 0ustar00zuulzuul00000000000000# Copyright (c) 2010 OpenStack, LLC. # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Chance (Random) Scheduler implementation """ import random from oslo_config import cfg from manila import exception from manila.i18n import _ from manila.scheduler.drivers import base CONF = cfg.CONF class ChanceScheduler(base.Scheduler): """Implements Scheduler as a random node selector.""" def _filter_hosts(self, request_spec, hosts, **kwargs): """Filter a list of hosts based on request_spec.""" filter_properties = kwargs.get('filter_properties', {}) ignore_hosts = filter_properties.get('ignore_hosts', []) hosts = [host for host in hosts if host not in ignore_hosts] return hosts def _schedule(self, context, topic, request_spec, **kwargs): """Picks a host that is up at random.""" elevated = context.elevated() hosts = self.hosts_up(elevated, topic) if not hosts: msg = _("Is the appropriate service running?") raise exception.NoValidHost(reason=msg) hosts = self._filter_hosts(request_spec, hosts, **kwargs) if not hosts: msg = _("Could not find another host") raise exception.NoValidHost(reason=msg) return hosts[int(random.random() * len(hosts))] def schedule_create_share(self, context, request_spec, filter_properties): """Picks a host that is up at random.""" topic = CONF.share_topic host = self._schedule(context, topic, request_spec, filter_properties=filter_properties) share_id = request_spec['share_id'] snapshot_id = request_spec['snapshot_id'] updated_share = base.share_update_db(context, share_id, host) self.share_rpcapi.create_share_instance( context, updated_share.instance, host, request_spec, filter_properties, snapshot_id ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/scheduler/drivers/filter.py0000664000175000017500000005465500000000000021631 0ustar00zuulzuul00000000000000# Copyright (c) 2011 Intel Corporation # Copyright (c) 2011 OpenStack, LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ The FilterScheduler is for scheduling of share and share group creation. You can customize this scheduler by specifying your own share/share group filters and weighing functions. """ from oslo_config import cfg from oslo_log import log from manila import exception from manila.i18n import _ from manila.message import api as message_api from manila.message import message_field from manila import policy from manila.scheduler.drivers import base from manila.scheduler import scheduler_options from manila.share import share_types CONF = cfg.CONF LOG = log.getLogger(__name__) AFFINITY_HINT = 'same_host' ANTI_AFFINITY_HINT = 'different_host' AFFINITY_KEY = "__affinity_same_host" ANTI_AFFINITY_KEY = "__affinity_different_host" class FilterScheduler(base.Scheduler): """Scheduler that can be used for filtering and weighing.""" def __init__(self, *args, **kwargs): super(FilterScheduler, self).__init__(*args, **kwargs) self.cost_function_cache = None self.options = scheduler_options.SchedulerOptions() self.max_attempts = self._max_attempts() self.message_api = message_api.API() def _get_configuration_options(self): """Fetch options dictionary. Broken out for testing.""" return self.options.get_configuration() def get_pools(self, context, filters, cached): return self.host_manager.get_pools(context, filters, cached) def _post_select_populate_filter_properties(self, filter_properties, host_state): """Add additional information to filter properties. Add additional information to the filter properties after a host has been selected by the scheduling process. """ # Add a retry entry for the selected volume backend: self._add_retry_host(filter_properties, host_state.host) def _add_retry_host(self, filter_properties, host): """Add retry entry for the selected volume backend. In the event that the request gets re-scheduled, this entry will signal that the given backend has already been tried. """ retry = filter_properties.get('retry') if not retry: return hosts = retry['hosts'] hosts.append(host) def _max_attempts(self): max_attempts = CONF.scheduler_max_attempts if max_attempts < 1: msg = _("Invalid value for 'scheduler_max_attempts', " "must be >=1") raise exception.InvalidParameterValue(err=msg) return max_attempts def schedule_create_share(self, context, request_spec, filter_properties): weighed_host = self._schedule_share(context, request_spec, filter_properties) host = weighed_host.obj.host share_id = request_spec['share_id'] snapshot_id = request_spec['snapshot_id'] updated_share = base.share_update_db(context, share_id, host) self._post_select_populate_filter_properties(filter_properties, weighed_host.obj) # context is not serializable filter_properties.pop('context', None) self.share_rpcapi.create_share_instance( context, updated_share.instance, host, request_spec=request_spec, filter_properties=filter_properties, snapshot_id=snapshot_id ) def schedule_create_replica(self, context, request_spec, filter_properties): share_replica_id = request_spec['share_instance_properties'].get('id') weighed_host = self._schedule_share( context, request_spec, filter_properties) host = weighed_host.obj.host updated_share_replica = base.share_replica_update_db( context, share_replica_id, host) self._post_select_populate_filter_properties(filter_properties, weighed_host.obj) # context is not serializable filter_properties.pop('context', None) self.share_rpcapi.create_share_replica( context, updated_share_replica, host, request_spec=request_spec, filter_properties=filter_properties) def _format_filter_properties(self, context, filter_properties, request_spec): elevated = context.elevated() share_properties = request_spec['share_properties'] share_instance_properties = (request_spec.get( 'share_instance_properties', {})) share_proto = request_spec.get('share_proto', share_properties.get('share_proto')) resource_properties = share_properties.copy() resource_properties.update(share_instance_properties.copy()) share_type = request_spec.get("share_type", {}) if not share_type: msg = _("You must create a share type in advance," " and specify in request body or" " set default_share_type in manila.conf.") LOG.error(msg) self.message_api.create( context, message_field.Action.CREATE, context.project_id, resource_type=message_field.Resource.SHARE, resource_id=request_spec.get('share_id', None), detail=message_field.Detail.NO_DEFAULT_SHARE_TYPE) raise exception.InvalidParameterValue(err=msg) share_type['extra_specs'] = share_type.get('extra_specs') or {} if share_type['extra_specs']: for extra_spec_name in share_types.get_boolean_extra_specs(): extra_spec = share_type['extra_specs'].get(extra_spec_name) if extra_spec is not None: if not extra_spec.startswith(""): extra_spec = " %s" % extra_spec share_type['extra_specs'][extra_spec_name] = extra_spec storage_protocol_spec = ( share_type['extra_specs'].get('storage_protocol') ) if storage_protocol_spec is None and share_proto is not None: # a host can report multiple protocols as "storage_protocol" spec_value = " %s" % share_proto share_type['extra_specs']['storage_protocol'] = spec_value resource_type = share_type request_spec.update({'resource_properties': resource_properties}) config_options = self._get_configuration_options() share_group = request_spec.get('share_group') # NOTE(gouthamr): If 'active_replica_host' or 'snapshot_host' is # present in the request spec, pass that host's 'replication_domain' to # the ShareReplication and CreateFromSnapshot filters. active_replica_host = request_spec.get('active_replica_host') snapshot_host = request_spec.get('snapshot_host') allowed_hosts = [] if active_replica_host: allowed_hosts.append(active_replica_host) if snapshot_host: allowed_hosts.append(snapshot_host) replication_domain = None if active_replica_host or snapshot_host: temp_hosts = self.host_manager.get_all_host_states_share(elevated) matching_host = next((host for host in temp_hosts if host.host in allowed_hosts), None) if matching_host: replication_domain = matching_host.replication_domain # NOTE(zengyingzhe): remove the 'share_backend_name' extra spec, # let scheduler choose the available host for this replica or # snapshot clone creation request. share_type.get('extra_specs', {}).pop('share_backend_name', None) if filter_properties is None: filter_properties = {} self._populate_retry_share(filter_properties, resource_properties) filter_properties.update({'context': context, 'request_spec': request_spec, 'config_options': config_options, 'share_type': share_type, 'resource_type': resource_type, 'share_group': share_group, 'replication_domain': replication_domain, }) self.populate_filter_properties_share(context, request_spec, filter_properties) return filter_properties, share_properties def _schedule_share(self, context, request_spec, filter_properties=None): """Returns a list of hosts that meet the required specs. The list is ordered by their fitness. """ elevated = context.elevated() filter_properties, share_properties = self._format_filter_properties( context, filter_properties, request_spec) # Find our local list of acceptable hosts by filtering and # weighing our options. we virtually consume resources on # it so subsequent selections can adjust accordingly. # Note: remember, we are using an iterator here. So only # traverse this list once. consider_disabled = False if policy.check_is_host_admin(context) and filter_properties.get( 'scheduler_hints', {}).get('only_host'): # Admin user can schedule share on disabled host consider_disabled = True hosts = self.host_manager.get_all_host_states_share( elevated, consider_disabled=consider_disabled ) if not hosts: msg = _("There are no hosts to fulfill this " "provisioning request. Are share " "backend services down?") self.message_api.create( context, message_field.Action.CREATE, context.project_id, resource_type=message_field.Resource.SHARE, resource_id=request_spec.get('share_id', None), detail=message_field.Detail.SHARE_BACKEND_NOT_READY_YET) raise exception.WillNotSchedule(msg) # Filter local hosts based on requirements ... hosts, last_filter = self.host_manager.get_filtered_hosts( hosts, filter_properties) if not hosts: msg = _('Failed to find a weighted host, the last executed filter' ' was %s.') raise exception.NoValidHost( reason=msg % last_filter, detail_data={'last_filter': last_filter}) LOG.debug("Filtered share %(hosts)s", {"hosts": hosts}) # weighted_host = WeightedHost() ... the best # host for the job. weighed_hosts = self.host_manager.get_weighed_hosts(hosts, filter_properties) best_host = weighed_hosts[0] LOG.debug("Choosing for share: %(best_host)s", {"best_host": best_host}) # NOTE(rushiagr): updating the available space parameters at same place best_host.obj.consume_from_share(share_properties) return best_host def _populate_retry_share(self, filter_properties, properties): """Populate filter properties with retry history. Populate filter properties with history of retries for this request. If maximum retries is exceeded, raise NoValidHost. """ max_attempts = self.max_attempts retry = filter_properties.pop('retry', {}) if max_attempts == 1: # re-scheduling is disabled. return # retry is enabled, update attempt count: if retry: retry['num_attempts'] += 1 else: retry = { 'num_attempts': 1, 'hosts': [] # list of share service hosts tried } filter_properties['retry'] = retry share_id = properties.get('share_id') self._log_share_error(share_id, retry) if retry['num_attempts'] > max_attempts: msg = _("Exceeded max scheduling attempts %(max_attempts)d for " "share %(share_id)s") % { "max_attempts": max_attempts, "share_id": share_id } raise exception.NoValidHost(reason=msg) def _log_share_error(self, share_id, retry): """Log any exceptions from a previous share create operation. If the request contained an exception from a previous share create operation, log it to aid debugging. """ exc = retry.pop('exc', None) # string-ified exception from share if not exc: return # no exception info from a previous attempt, skip hosts = retry.get('hosts') if not hosts: return # no previously attempted hosts, skip last_host = hosts[-1] LOG.error("Error scheduling %(share_id)s from last share-service: " "%(last_host)s : %(exc)s", { "share_id": share_id, "last_host": last_host, "exc": "exc" }) def _populate_scheduler_hint(self, request_spec, hints, key, hint): share_properties = request_spec.get('share_properties', {}) value = share_properties.get('metadata', {}).get(key, None) if value: hints.update({hint: value}) def populate_filter_properties_scheduler_hints(self, context, request_spec, filter_properties): share_id = request_spec.get('share_id', None) if not share_id: filter_properties['scheduler_hints'] = {} return else: if filter_properties.get('scheduler_hints', None): return hints = {} self._populate_scheduler_hint(request_spec, hints, AFFINITY_KEY, AFFINITY_HINT) self._populate_scheduler_hint(request_spec, hints, ANTI_AFFINITY_KEY, ANTI_AFFINITY_HINT) filter_properties['scheduler_hints'] = hints def populate_filter_properties_share(self, context, request_spec, filter_properties): """Stuff things into filter_properties. Can be overridden in a subclass to add more data. """ shr = request_spec['share_properties'] inst = request_spec['share_instance_properties'] filter_properties['size'] = shr['size'] filter_properties['availability_zone_id'] = ( inst.get('availability_zone_id') ) filter_properties['user_id'] = shr.get('user_id') filter_properties['metadata'] = shr.get('metadata') filter_properties['snapshot_id'] = shr.get('snapshot_id') filter_properties['is_share_extend'] = ( request_spec.get('is_share_extend') ) self.populate_filter_properties_scheduler_hints(context, request_spec, filter_properties) def schedule_create_share_group(self, context, share_group_id, request_spec, filter_properties): LOG.info("Scheduling share group %s.", share_group_id) host = self._get_best_host_for_share_group(context, request_spec) if not host: msg = _("No hosts available for share group %s.") % share_group_id raise exception.NoValidHost(reason=msg) msg = "Chose host %(host)s for create_share_group %(group)s." LOG.info(msg, {'host': host, 'group': share_group_id}) updated_share_group = base.share_group_update_db( context, share_group_id, host) self.share_rpcapi.create_share_group( context, updated_share_group, host) def _get_weighted_hosts_for_share_type(self, context, request_spec, share_type): config_options = self._get_configuration_options() # NOTE(ameade): Find our local list of acceptable hosts by # filtering and weighing our options. We virtually consume # resources on it so subsequent selections can adjust accordingly. # NOTE(ameade): Remember, we are using an iterator here. So only # traverse this list once. all_hosts = self.host_manager.get_all_host_states_share(context) if not all_hosts: return [] share_type['extra_specs'] = share_type.get('extra_specs', {}) if share_type['extra_specs']: for spec_name in share_types.get_required_extra_specs(): extra_spec = share_type['extra_specs'].get(spec_name) if extra_spec is not None: share_type['extra_specs'][spec_name] = ( " %s" % extra_spec) filter_properties = { 'context': context, 'request_spec': request_spec, 'config_options': config_options, 'share_type': share_type, 'resource_type': share_type, 'size': 0, } # Filter local hosts based on requirements ... hosts, last_filter = self.host_manager.get_filtered_hosts( all_hosts, filter_properties) if not hosts: return [] LOG.debug("Filtered %s", hosts) # weighted_host = WeightedHost() ... the best host for the job. weighed_hosts = self.host_manager.get_weighed_hosts( hosts, filter_properties) if not weighed_hosts: return [] return weighed_hosts def _get_weighted_hosts_for_share_group_type(self, context, request_spec, share_group_type): config_options = self._get_configuration_options() all_hosts = self.host_manager.get_all_host_states_share(context) if not all_hosts: return [] filter_properties = { 'context': context, 'request_spec': request_spec, 'config_options': config_options, 'share_group_type': share_group_type, 'resource_type': share_group_type, } hosts, last_filter = self.host_manager.get_filtered_hosts( all_hosts, filter_properties, CONF.scheduler_default_share_group_filters) if not hosts: return [] LOG.debug("Filtered %s", hosts) weighed_hosts = self.host_manager.get_weighed_hosts( hosts, filter_properties) if not weighed_hosts: return [] return weighed_hosts def _get_weighted_candidates_share_group(self, context, request_spec): """Finds hosts that support the share group. Returns a list of hosts that meet the required specs, ordered by their fitness. """ elevated = context.elevated() shr_types = request_spec.get("share_types") weighed_hosts = [] for iteration_count, share_type in enumerate(shr_types): temp_weighed_hosts = self._get_weighted_hosts_for_share_type( elevated, request_spec, share_type) # NOTE(ameade): Take the intersection of hosts so we have one that # can support all share types of the share group if iteration_count == 0: weighed_hosts = temp_weighed_hosts else: new_weighed_hosts = [] for host1 in weighed_hosts: for host2 in temp_weighed_hosts: if host1.obj.host == host2.obj.host: new_weighed_hosts.append(host1) weighed_hosts = new_weighed_hosts if not weighed_hosts: return [] # NOTE(ameade): Ensure the hosts support the share group type share_group_type = request_spec.get("resource_type", {}) temp_weighed_group_hosts = ( self._get_weighted_hosts_for_share_group_type( elevated, request_spec, share_group_type)) new_weighed_hosts = [] for host1 in weighed_hosts: for host2 in temp_weighed_group_hosts: if host1.obj.host == host2.obj.host: new_weighed_hosts.append(host1) weighed_hosts = new_weighed_hosts return weighed_hosts def _get_best_host_for_share_group(self, context, request_spec): weighed_hosts = self._get_weighted_candidates_share_group( context, request_spec) if not weighed_hosts: return None return weighed_hosts[0].obj.host def host_passes_filters(self, context, host, request_spec, filter_properties): elevated = context.elevated() filter_properties, share_properties = self._format_filter_properties( context, filter_properties, request_spec) hosts = self.host_manager.get_all_host_states_share(elevated) filter_class_names = None if request_spec.get('is_share_extend', None): filter_class_names = CONF.scheduler_default_extend_filters hosts, last_filter = self.host_manager.get_filtered_hosts( hosts, filter_properties, filter_class_names=filter_class_names) hosts = self.host_manager.get_weighed_hosts(hosts, filter_properties) for tgt_host in hosts: if tgt_host.obj.host == host: return tgt_host.obj msg = (_('Cannot place share %(id)s on %(host)s, the last executed' ' filter was %(last_filter)s.') % {'id': request_spec['share_id'], 'host': host, 'last_filter': last_filter}) raise exception.NoValidHost(reason=msg) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/scheduler/drivers/simple.py0000664000175000017500000000621500000000000021622 0ustar00zuulzuul00000000000000# Copyright (c) 2010 OpenStack, LLC. # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Simple Scheduler """ from oslo_config import cfg from manila import db from manila import exception from manila.i18n import _ from manila.scheduler.drivers import base from manila.scheduler.drivers import chance from manila import utils simple_scheduler_opts = [ cfg.IntOpt("max_gigabytes", default=10000, help="Maximum number of volume gigabytes to allow per host."), ] CONF = cfg.CONF CONF.register_opts(simple_scheduler_opts) class SimpleScheduler(chance.ChanceScheduler): """Implements Naive Scheduler that tries to find least loaded host.""" def schedule_create_share(self, context, request_spec, filter_properties): """Picks a host that is up and has the fewest shares.""" # TODO(rushiagr) - pick only hosts that run shares elevated = context.elevated() share_id = request_spec.get('share_id') snapshot_id = request_spec.get('snapshot_id') share_properties = request_spec.get('share_properties') share_size = share_properties.get('size') instance_properties = request_spec.get('share_instance_properties', {}) availability_zone_id = instance_properties.get('availability_zone_id') results = db.service_get_all_share_sorted(elevated) if availability_zone_id: results = [(service_g, gigs) for (service_g, gigs) in results if (service_g['availability_zone_id'] == availability_zone_id)] for result in results: (service, share_gigabytes) = result if share_gigabytes + share_size > CONF.max_gigabytes: msg = _("Not enough allocatable share gigabytes remaining") raise exception.NoValidHost(reason=msg) if utils.service_is_up(service) and not service['disabled']: updated_share = base.share_update_db(context, share_id, service['host']) self.share_rpcapi.create_share_instance( context, updated_share.instance, service['host'], request_spec, None, snapshot_id=snapshot_id) return None msg = _("Is the appropriate service running?") raise exception.NoValidHost(reason=msg) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315601.889672 manila-21.0.0/manila/scheduler/evaluator/0000775000175000017500000000000000000000000020277 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/scheduler/evaluator/__init__.py0000664000175000017500000000000000000000000022376 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/scheduler/evaluator/evaluator.py0000664000175000017500000001743300000000000022663 0ustar00zuulzuul00000000000000# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import operator import re import pyparsing from manila import exception from manila.i18n import _ def _operatorOperands(tokenList): it = iter(tokenList) while 1: try: op1 = next(it) op2 = next(it) yield (op1, op2) except StopIteration: break class EvalConstant(object): def __init__(self, toks): self.value = toks[0] def eval(self): result = self.value if (isinstance(result, str) and re.match(r"^[a-zA-Z_]+\.[a-zA-Z_]+$", result)): (which_dict, entry) = result.split('.') try: result = _vars[which_dict][entry] except KeyError as e: msg = _("KeyError: %s") % e raise exception.EvaluatorParseException(reason=msg) except TypeError as e: msg = _("TypeError: %s") % e raise exception.EvaluatorParseException(reason=msg) try: result = int(result) except ValueError: try: result = float(result) except ValueError: if isinstance(result, str): result = result.replace('"', '').replace('\'', '') return result class EvalSignOp(object): operations = { '+': 1, '-': -1, } def __init__(self, toks): self.sign, self.value = toks[0] def eval(self): return self.operations[self.sign] * self.value.eval() class EvalAddOp(object): def __init__(self, toks): self.value = toks[0] def eval(self): sum = self.value[0].eval() for op, val in _operatorOperands(self.value[1:]): if op == '+': sum += val.eval() elif op == '-': sum -= val.eval() return sum class EvalMultOp(object): def __init__(self, toks): self.value = toks[0] def eval(self): prod = self.value[0].eval() for op, val in _operatorOperands(self.value[1:]): try: if op == '*': prod *= val.eval() elif op == '/': prod /= float(val.eval()) except ZeroDivisionError as e: msg = _("ZeroDivisionError: %s") % e raise exception.EvaluatorParseException(reason=msg) return prod class EvalPowerOp(object): def __init__(self, toks): self.value = toks[0] def eval(self): prod = self.value[0].eval() for op, val in _operatorOperands(self.value[1:]): prod = pow(prod, val.eval()) return prod class EvalNegateOp(object): def __init__(self, toks): self.negation, self.value = toks[0] def eval(self): return not self.value.eval() class EvalComparisonOp(object): operations = { "<": operator.lt, "<=": operator.le, ">": operator.gt, ">=": operator.ge, "!=": operator.ne, "==": operator.eq, "<>": operator.ne, } def __init__(self, toks): self.value = toks[0] def eval(self): val1 = self.value[0].eval() for op, val in _operatorOperands(self.value[1:]): fn = self.operations[op] val2 = val.eval() if not fn(val1, val2): break val1 = val2 else: return True return False class EvalTernaryOp(object): def __init__(self, toks): self.value = toks[0] def eval(self): condition = self.value[0].eval() if condition: return self.value[2].eval() else: return self.value[4].eval() class EvalFunction(object): functions = { "abs": abs, "max": max, "min": min, } def __init__(self, toks): self.func, self.value = toks[0] def eval(self): args = self.value.eval() if type(args) is list: return self.functions[self.func](*args) else: return self.functions[self.func](args) class EvalCommaSeperator(object): def __init__(self, toks): self.value = toks[0] def eval(self): val1 = self.value[0].eval() val2 = self.value[2].eval() if type(val2) is list: val_list = [] val_list.append(val1) for val in val2: val_list.append(val) return val_list return [val1, val2] class EvalBoolAndOp(object): def __init__(self, toks): self.value = toks[0] def eval(self): left = self.value[0].eval() right = self.value[2].eval() return left and right class EvalBoolOrOp(object): def __init__(self, toks): self.value = toks[0] def eval(self): left = self.value[0].eval() right = self.value[2].eval() return left or right _parser = None _vars = {} def _def_parser(): # Enabling packrat parsing greatly speeds up the parsing. pyparsing.ParserElement.enablePackrat() # pylint: disable = no-value-for-parameter # noqa:E501 alphas = pyparsing.alphas Combine = pyparsing.Combine Forward = pyparsing.Forward nums = pyparsing.nums quoted_string = pyparsing.quotedString oneOf = pyparsing.oneOf opAssoc = pyparsing.opAssoc infixNotation = pyparsing.infixNotation Word = pyparsing.Word integer = Word(nums) real = Combine(Word(nums) + '.' + Word(nums)) variable = Word(alphas + '_' + '.') number = real | integer expr = Forward() fn = Word(alphas + '_' + '.') operand = number | variable | fn | quoted_string signop = oneOf('+ -') addop = oneOf('+ -') multop = oneOf('* /') comparisonop = oneOf(' '.join(EvalComparisonOp.operations.keys())) ternaryop = ('?', ':') boolandop = oneOf('AND and &&') boolorop = oneOf('OR or ||') negateop = oneOf('NOT not !') operand.setParseAction(EvalConstant) expr = infixNotation(operand, [ (fn, 1, opAssoc.RIGHT, EvalFunction), ("^", 2, opAssoc.RIGHT, EvalPowerOp), (signop, 1, opAssoc.RIGHT, EvalSignOp), (multop, 2, opAssoc.LEFT, EvalMultOp), (addop, 2, opAssoc.LEFT, EvalAddOp), (negateop, 1, opAssoc.RIGHT, EvalNegateOp), (comparisonop, 2, opAssoc.LEFT, EvalComparisonOp), (ternaryop, 3, opAssoc.LEFT, EvalTernaryOp), (boolandop, 2, opAssoc.LEFT, EvalBoolAndOp), (boolorop, 2, opAssoc.LEFT, EvalBoolOrOp), (',', 2, opAssoc.RIGHT, EvalCommaSeperator), ]) return expr def evaluate(expression, **kwargs): """Evaluates an expression. Provides the facility to evaluate mathematical expressions, and to substitute variables from dictionaries into those expressions. Supports both integer and floating point values, and automatic promotion where necessary. """ global _parser if _parser is None: _parser = _def_parser() global _vars _vars = kwargs try: result = _parser.parseString(expression, parseAll=True)[0] except pyparsing.ParseException as e: msg = _("ParseException: %s") % e raise exception.EvaluatorParseException(reason=msg) return result.eval() ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315601.893672 manila-21.0.0/manila/scheduler/filters/0000775000175000017500000000000000000000000017745 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/scheduler/filters/__init__.py0000664000175000017500000000000000000000000022044 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/scheduler/filters/affinity.py0000664000175000017500000001020200000000000022123 0ustar00zuulzuul00000000000000# Copyright (c) 2021 SAP. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log from manila import exception from manila.scheduler.filters import base_host from manila.share import api LOG = log.getLogger(__name__) AFFINITY_FILTER = 'same_host' ANTI_AFFINITY_FILTER = 'different_host' class AffinityBaseFilter(base_host.BaseHostFilter): """Base class of affinity filters""" _filter_type = None def __init__(self): self.share_api = api.API() def filter_all(self, filter_obj_list, filter_properties): # _filter_type should be defined in subclass if self._filter_type is None: raise AffinityFilterTypeNotSetError try: filter_properties = self._validate(filter_properties) except SchedulerHintsNotSet: # AffinityFilter/AntiAffinityFilter is skipped if corresponding # hint is not set. If the "scheduler_hints" is not set, both # filters are skipped. return filter_obj_list except (exception.InvalidUUID, exception.ShareNotFound, exception.ShareInstanceNotFound) as e: # Stop scheduling share when above errors are caught LOG.error('%(filter_name)s: %(error)s', { 'filter_name': self.__class__.__name__, 'error': e}) return None else: # Return list of hosts which pass the function host_passes() # overriden in AffinityFilter and AntiAffinityFilter. return [obj for obj in filter_obj_list if self._filter_one(obj, filter_properties)] def _validate(self, filter_properties): context = filter_properties['context'] hints = filter_properties.get('scheduler_hints') if hints is None: raise SchedulerHintsNotSet else: share_uuids = hints.get(self._filter_type) if share_uuids is None: raise SchedulerHintsNotSet share_uuids = share_uuids.split(",") filter_properties['scheduler_hints'][self._filter_type] = [] filtered_hosts = [] for uuid in share_uuids: try: share = self.share_api.get(context, uuid) except exception.NotFound: raise exception.ShareNotFound(uuid) instances = share.get('instances') if len(instances) == 0: raise exception.ShareInstanceNotFound(share_instance_id=uuid) filtered_hosts.append( [instance.get('host') for instance in instances]) if self._filter_type == AFFINITY_FILTER: filter_properties['scheduler_hints'][self._filter_type] = list( set.intersection(*map(set, filtered_hosts))) else: filter_properties['scheduler_hints'][self._filter_type] = list( set.union(*map(set, filtered_hosts))) return filter_properties class AffinityFilter(AffinityBaseFilter): _filter_type = AFFINITY_FILTER def host_passes(self, host_state, filter_properties): allowed_hosts = \ filter_properties['scheduler_hints'][self._filter_type] return host_state.host in allowed_hosts class AntiAffinityFilter(AffinityBaseFilter): _filter_type = ANTI_AFFINITY_FILTER def host_passes(self, host_state, filter_properties): forbidden_hosts = \ filter_properties['scheduler_hints'][self._filter_type] return host_state.host not in forbidden_hosts class SchedulerHintsNotSet(Exception): pass class AffinityFilterTypeNotSetError(Exception): pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/scheduler/filters/availability_zone.py0000664000175000017500000000450400000000000024027 0ustar00zuulzuul00000000000000# Copyright (c) 2011-2012 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from manila.scheduler.filters import base_host class AvailabilityZoneFilter(base_host.BaseHostFilter): """Filters Hosts by availability zone.""" # Availability zones do not change within a request run_filter_once_per_request = True def host_passes(self, host_state, filter_properties): spec = filter_properties.get('request_spec', {}) props = spec.get('resource_properties', {}) request_az_id = props.get('availability_zone_id', spec.get('availability_zone_id')) az_request_multiple_subnet_support_map = spec.get( 'az_request_multiple_subnet_support_map', {}) request_azs = spec.get('availability_zones') host_az_id = host_state.service['availability_zone_id'] host_az = host_state.service['availability_zone']['name'] host_single_subnet_only = ( not host_state.share_server_multiple_subnet_support) host_satisfied = True if request_az_id is not None: host_satisfied = request_az_id == host_az_id if request_azs: host_satisfied = host_satisfied and host_az in request_azs # Only validates the multiple subnet support in case it can deny the # host: # 1. host is satisfying the AZ # 2. There is a map to be checked # 3. The host does not support a multiple subnet if (host_satisfied and az_request_multiple_subnet_support_map and host_single_subnet_only): host_satisfied = ( not az_request_multiple_subnet_support_map.get(host_az_id, False)) return host_satisfied ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/scheduler/filters/base.py0000664000175000017500000000721200000000000021233 0ustar00zuulzuul00000000000000# Copyright (c) 2011-2012 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Filter support """ from oslo_log import log from manila.scheduler import base_handler LOG = log.getLogger(__name__) class BaseFilter(object): """Base class for all filter classes.""" def _filter_one(self, obj, filter_properties): """Check if an object passes a filter. Return True if it passes the filter, False otherwise. Override this in a subclass. """ return True def filter_all(self, filter_obj_list, filter_properties): """Yield objects that pass the filter. Can be overridden in a subclass, if you need to base filtering decisions on all objects. Otherwise, one can just override _filter_one() to filter a single object. """ for obj in filter_obj_list: if self._filter_one(obj, filter_properties): yield obj # Set to true in a subclass if a filter only needs to be run once # for each request rather than for each instance run_filter_once_per_request = False def run_filter_for_index(self, index): """Check if filter needs to be run for the "index-th" instance. Return True if the filter needs to be run for the "index-th" instance in a request. Only need to override this if a filter needs anything other than "first only" or "all" behaviour. """ return not (self.run_filter_once_per_request and index > 0) class BaseFilterHandler(base_handler.BaseHandler): """Base class to handle loading filter classes. This class should be subclassed where one needs to use filters. """ def get_filtered_objects(self, filter_classes, objs, filter_properties, index=0): """Get objects after filter :param filter_classes: filters that will be used to filter the objects :param objs: objects that will be filtered :param filter_properties: client filter properties :param index: This value needs to be increased in the caller function of get_filtered_objects when handling each resource. """ list_objs = list(objs) LOG.debug("Starting with %d host(s)", len(list_objs)) for filter_cls in filter_classes: cls_name = filter_cls.__name__ filter_class = filter_cls() if filter_class.run_filter_for_index(index): objs = filter_class.filter_all(list_objs, filter_properties) if objs is None: LOG.debug("Filter %(cls_name)s says to stop filtering", {'cls_name': cls_name}) return (None, cls_name) list_objs = list(objs) msg = ("Filter %(cls_name)s returned %(obj_len)d host(s)" % {'cls_name': cls_name, 'obj_len': len(list_objs)}) if not list_objs: LOG.info(msg) break LOG.debug(msg) return (list_objs, cls_name) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/scheduler/filters/base_host.py0000664000175000017500000000252000000000000022265 0ustar00zuulzuul00000000000000# Copyright (c) 2011 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Scheduler host filters """ from manila.scheduler.filters import base class BaseHostFilter(base.BaseFilter): """Base class for host filters.""" def _filter_one(self, obj, filter_properties): """Return True if the object passes the filter, otherwise False.""" return self.host_passes(obj, filter_properties) def host_passes(self, host_state, filter_properties): """Return True if the HostState passes the filter, otherwise False. Override this in a subclass. """ raise NotImplementedError() class HostFilterHandler(base.BaseFilterHandler): def __init__(self, namespace): super(HostFilterHandler, self).__init__(BaseHostFilter, namespace) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/scheduler/filters/capabilities.py0000664000175000017500000000350100000000000022747 0ustar00zuulzuul00000000000000# Copyright (c) 2011 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log from manila.scheduler.filters import base_host from manila.scheduler import utils LOG = log.getLogger(__name__) class CapabilitiesFilter(base_host.BaseHostFilter): """HostFilter to work with resource (instance & volume) type records.""" def _satisfies_extra_specs(self, capabilities, resource_type): """Compare capabilities against extra specs. Check that the capabilities provided by the services satisfy the extra specs associated with the resource type. """ extra_specs = resource_type.get('extra_specs', []) if not extra_specs: return True return utils.capabilities_satisfied(capabilities, extra_specs) def host_passes(self, host_state, filter_properties): """Return a list of hosts that can create resource_type.""" resource_type = filter_properties.get('resource_type') if not self._satisfies_extra_specs(host_state.capabilities, resource_type): LOG.debug("%(host_state)s fails resource_type extra_specs " "requirements", {'host_state': host_state}) return False return True ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/scheduler/filters/capacity.py0000664000175000017500000001360300000000000022117 0ustar00zuulzuul00000000000000# Copyright (c) 2012 Intel # Copyright (c) 2012 OpenStack, LLC. # Copyright (c) 2015 EMC Corporation # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import math from oslo_log import log from manila.scheduler.filters import base_host from manila.scheduler import utils LOG = log.getLogger(__name__) class CapacityFilter(base_host.BaseHostFilter): """CapacityFilter filters based on share host's capacity utilization.""" def host_passes(self, host_state, filter_properties): """Return True if host has sufficient capacity.""" size_increase = filter_properties.get('size_increase') share_size = size_increase if size_increase else filter_properties.get( 'size', 0) if host_state.free_capacity_gb is None: # Fail Safe LOG.error("Free capacity not set: " "share node info collection broken.") return False free_space = host_state.free_capacity_gb total_space = host_state.total_capacity_gb if filter_properties.get('snapshot_id'): reserved = float(host_state.reserved_snapshot_percentage) / 100 elif filter_properties.get('is_share_extend'): reserved = float(host_state.reserved_share_extend_percentage) / 100 else: reserved = float(host_state.reserved_percentage) / 100 if free_space == 'unknown': # NOTE(zhiteng) for those back-ends cannot report actual # available capacity, we assume it is able to serve the # request. Even if it was not, the retry mechanism is # able to handle the failure by rescheduling return True elif total_space == 'unknown': # NOTE(xyang): If total_space is 'unknown' and # reserved is 0, we assume the back-ends can serve the request. # If total_space is 'unknown' and reserved # is not 0, we cannot calculate the reserved space. # float(total_space) will throw an exception. total*reserved # also won't work. So the back-ends cannot serve the request. return reserved == 0 and share_size <= free_space total = float(total_space) if total <= 0: LOG.warning("Insufficient free space for share creation. " "Total capacity is %(total).2f on host %(host)s.", {"total": total, "host": host_state.host}) return False # NOTE(xyang): Calculate how much free space is left after taking # into account the reserved space. free = math.floor(free_space - total * reserved) msg_args = {"host": host_state.host, "requested": share_size, "available": free} LOG.debug("Space information for share creation " "on host %(host)s (requested / avail): " "%(requested)s/%(available)s", msg_args) share_type = filter_properties.get('share_type', {}) use_thin_logic = utils.use_thin_logic(share_type) thin_provisioning = utils.thin_provisioning( host_state.thin_provisioning) # NOTE(xyang): Only evaluate using max_over_subscription_ratio # if use_thin_logic and thin_provisioning are True. Check if the # ratio of provisioned capacity over total capacity would exceed # subscription ratio. # If max_over_subscription_ratio = 1, the provisioned_ratio # should still be limited by the max_over_subscription_ratio; # otherwise, it could result in infinite provisioning. if (use_thin_logic and thin_provisioning and host_state.max_over_subscription_ratio >= 1): provisioned_ratio = ((host_state.provisioned_capacity_gb + share_size) / total) if provisioned_ratio > host_state.max_over_subscription_ratio: LOG.warning( "Insufficient free space for thin provisioning. " "The ratio of provisioned capacity over total capacity " "%(provisioned_ratio).2f would exceed the maximum over " "subscription ratio %(oversub_ratio).2f on host " "%(host)s.", {"provisioned_ratio": provisioned_ratio, "oversub_ratio": host_state.max_over_subscription_ratio, "host": host_state.host}) return False else: # NOTE(xyang): Adjust free_virtual calculation based on # free and max_over_subscription_ratio. adjusted_free_virtual = ( free * host_state.max_over_subscription_ratio) return adjusted_free_virtual >= share_size elif (use_thin_logic and thin_provisioning and host_state.max_over_subscription_ratio < 1): LOG.error("Invalid max_over_subscription_ratio: %(ratio)s. " "Valid value should be >= 1.", {"ratio": host_state.max_over_subscription_ratio}) return False if free < share_size: LOG.warning("Insufficient free space for share creation " "on host %(host)s (requested / avail): " "%(requested)s/%(available)s", msg_args) return False return True ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/scheduler/filters/create_from_snapshot.py0000664000175000017500000000532600000000000024532 0ustar00zuulzuul00000000000000# Copyright 2019 NetApp, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log from manila.scheduler.filters import base_host from manila.share import utils as share_utils LOG = log.getLogger(__name__) class CreateFromSnapshotFilter(base_host.BaseHostFilter): """CreateFromSnapshotFilter filters hosts based on replication_domain.""" def host_passes(self, host_state, filter_properties): """Return True if new share's host is compatible with snapshot's host. Design of this filter: - Creating shares from snapshots in another pool or backend needs to match with one of the below conditions: - The backend of the new share must be the same as its parent snapshot. - Both new share and snapshot are in the same replication_domain """ snapshot_id = filter_properties.get('request_spec', {}).get( 'snapshot_id') snapshot_host = filter_properties.get( 'request_spec', {}).get('snapshot_host') if None in [snapshot_id, snapshot_host]: # NOTE(silvacarlose): if the request does not contain a snapshot_id # or a snapshot_host, the user is not creating a share from a # snapshot and we don't need to filter out the host. return True snapshot_backend = share_utils.extract_host(snapshot_host, 'backend') snapshot_rep_domain = filter_properties.get('replication_domain') host_backend = share_utils.extract_host(host_state.host, 'backend') host_rep_domain = host_state.replication_domain # Same backend if host_backend == snapshot_backend: return True # Same replication domain if snapshot_rep_domain and snapshot_rep_domain == host_rep_domain: return True msg = ("The parent's snapshot %(snapshot_id)s back end and " "replication domain don't match with the back end and " "replication domain of the Host %(host)s.") kwargs = { "snapshot_id": snapshot_id, "host": host_state.host } LOG.debug(msg, kwargs) return False ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/scheduler/filters/driver.py0000664000175000017500000000723000000000000021614 0ustar00zuulzuul00000000000000# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from manila.scheduler.evaluator import evaluator from manila.scheduler.filters import base_host from manila.scheduler import utils LOG = logging.getLogger(__name__) class DriverFilter(base_host.BaseHostFilter): """DriverFilter filters hosts based on a 'filter function' and metrics. DriverFilter filters based on share host's provided 'filter function' and metrics. """ def host_passes(self, host_state, filter_properties): """Determines whether a host has a passing filter_function or not.""" stats = self._generate_stats(host_state, filter_properties) LOG.debug("Driver Filter: Checking host '%s'", stats['host_stats']['host']) result = self._check_filter_function(stats) LOG.debug("Result: %s", result) LOG.debug("Done checking host '%s'", stats['host_stats']['host']) return result def _check_filter_function(self, stats): """Checks if a share passes a host's filter function. Returns a tuple in the format (filter_passing, filter_invalid). Both values are booleans. """ if stats['filter_function'] is None: LOG.debug("Filter function not set :: passing host.") return True try: filter_result = self._run_evaluator(stats['filter_function'], stats) except Exception as ex: # Warn the admin for now that there is an error in the # filter function. LOG.warning("Error in filtering function " "'%(function)s' : '%(error)s' :: failing host.", {'function': stats['filter_function'], 'error': ex, }) return False msg = "Filter function result for host %(host)s: %(result)s." args = {'host': stats['host_stats']['host'], 'result': str(filter_result)} LOG.info(msg, args) return filter_result def _run_evaluator(self, func, stats): """Evaluates a given function using the provided available stats.""" host_stats = stats['host_stats'] host_caps = stats['host_caps'] extra_specs = stats['extra_specs'] share_stats = stats['share_stats'] result = evaluator.evaluate( func, extra=extra_specs, stats=host_stats, capabilities=host_caps, share=share_stats) return result def _generate_stats(self, host_state, filter_properties): """Generates statistics from host and share data.""" filter_function = None if ('filter_function' in host_state.capabilities and host_state.capabilities['filter_function'] is not None): filter_function = str( host_state.capabilities['filter_function']) stats = utils.generate_stats(host_state, filter_properties) stats['filter_function'] = filter_function return stats ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/scheduler/filters/extra_specs_ops.py0000664000175000017500000000503500000000000023523 0ustar00zuulzuul00000000000000# Copyright (c) 2011 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import operator from oslo_utils import strutils # 1. The following operations are supported: # =, s==, s!=, s>=, s>, s<=, s<, , , , ==, !=, >=, <= # 2. Note that is handled in a different way below. # 3. If the first word in the extra_specs is not one of the operators, # it is ignored. _op_methods = {'=': lambda x, y: float(x) >= float(y), '': lambda x, y: y in x, '': lambda x, y: (strutils.bool_from_string(x) is strutils.bool_from_string(y)), '==': lambda x, y: float(x) == float(y), '!=': lambda x, y: float(x) != float(y), '>=': lambda x, y: float(x) >= float(y), '<=': lambda x, y: float(x) <= float(y), 's==': operator.eq, 's!=': operator.ne, 's<': operator.lt, 's<=': operator.le, 's>': operator.gt, 's>=': operator.ge} def match(value, req): # Make case-insensitive if (isinstance(value, str)): value = value.lower() req = req.lower() words = req.split() op = method = None if words: op = words.pop(0) method = _op_methods.get(op) if op != '' and not method: if type(value) is bool: return value == strutils.bool_from_string( req, strict=False, default=req) else: return value == req if value is None: return False if op == '': # Ex: v1 v2 v3 while True: if words.pop(0) == value: return True if not words: break op = words.pop(0) # remove a keyword if not words: break return False try: if words and method(value, words[0]): return True except ValueError: pass return False ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/scheduler/filters/host.py0000664000175000017500000000244700000000000021303 0ustar00zuulzuul00000000000000# Copyright 2021 Cloudification GmbH. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from manila import policy from manila.scheduler.filters import base_host class OnlyHostFilter(base_host.BaseHostFilter): """Filters Hosts by 'only_host' scheduler_hint.""" def host_passes(self, host_state, filter_properties): context = filter_properties['context'] if not policy.check_is_host_admin(context): return True hints = filter_properties.get('scheduler_hints') if hints is None: return True requested_host = hints.get('only_host', None) if requested_host is None: return True # e.g. "only_host=hostname@generic2#GENERIC2" return host_state.host == requested_host ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/scheduler/filters/ignore_attempted_hosts.py0000664000175000017500000000363500000000000025100 0ustar00zuulzuul00000000000000# Copyright (c) 2011 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log from manila.scheduler.filters import base_host LOG = log.getLogger(__name__) class IgnoreAttemptedHostsFilter(base_host.BaseHostFilter): """Filter out previously attempted hosts A host passes this filter if it has not already been attempted for scheduling. The scheduler needs to add previously attempted hosts to the 'retry' key of filter_properties in order for this to work correctly. For example:: { 'retry': { 'hosts': ['host1', 'host2'], 'num_attempts': 3, } } """ def host_passes(self, host_state, filter_properties): """Skip nodes that have already been attempted.""" attempted = filter_properties.get('retry') if not attempted: # Re-scheduling is disabled LOG.debug("Re-scheduling is disabled.") return True hosts = attempted.get('hosts', []) host = host_state.host passes = host not in hosts pass_msg = "passes" if passes else "fails" LOG.debug("Host %(host)s %(pass_msg)s. Previously tried hosts: " "%(hosts)s", {'host': host, 'pass_msg': pass_msg, 'hosts': hosts}) return passes ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/scheduler/filters/json.py0000664000175000017500000001201400000000000021266 0ustar00zuulzuul00000000000000# Copyright (c) 2011 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import operator from oslo_serialization import jsonutils from manila.scheduler.filters import base_host class JsonFilter(base_host.BaseHostFilter): """Host Filter to allow simple JSON-based grammar for selecting hosts.""" def _op_compare(self, args, op): """Check if operator can compare the first arg with the others. Returns True if the specified operator can successfully compare the first item in the args with all the rest. Will return False if only one item is in the list. """ if len(args) < 2: return False if op is operator.contains: bad = args[0] not in args[1:] else: bad = [arg for arg in args[1:] if not op(args[0], arg)] return not bool(bad) def _equals(self, args): """First term is == all the other terms.""" return self._op_compare(args, operator.eq) def _less_than(self, args): """First term is < all the other terms.""" return self._op_compare(args, operator.lt) def _greater_than(self, args): """First term is > all the other terms.""" return self._op_compare(args, operator.gt) def _in(self, args): """First term is in set of remaining terms.""" return self._op_compare(args, operator.contains) def _less_than_equal(self, args): """First term is <= all the other terms.""" return self._op_compare(args, operator.le) def _greater_than_equal(self, args): """First term is >= all the other terms.""" return self._op_compare(args, operator.ge) def _not(self, args): """Flip each of the arguments.""" return [not arg for arg in args] def _or(self, args): """True if any arg is True.""" return any(args) def _and(self, args): """True if all args are True.""" return all(args) commands = { '=': _equals, '<': _less_than, '>': _greater_than, 'in': _in, '<=': _less_than_equal, '>=': _greater_than_equal, 'not': _not, 'or': _or, 'and': _and, } def _parse_string(self, string, host_state): """Parse string. Strings prefixed with $ are capability lookups in the form '$variable' where 'variable' is an attribute in the HostState class. If $variable is a dictionary, you may use: $variable.dictkey """ if not string: return None if not string.startswith("$"): return string path = string[1:].split(".") obj = getattr(host_state, path[0], None) if obj is None: return None for item in path[1:]: obj = obj.get(item) if obj is None: return None return obj def _process_filter(self, query, host_state): """Recursively parse the query structure.""" if not query: return True cmd = query[0] method = self.commands[cmd] cooked_args = [] for arg in query[1:]: if isinstance(arg, list): arg = self._process_filter(arg, host_state) elif isinstance(arg, str): arg = self._parse_string(arg, host_state) if arg is not None: cooked_args.append(arg) result = method(self, cooked_args) return result def host_passes(self, host_state, filter_properties): """Filters hosts. Return a list of hosts that can fulfill the requirements specified in the query. """ # TODO(zhiteng) Add description for filter_properties structure # and scheduler_hints. try: query = filter_properties['scheduler_hints']['query'] # If filter_properties['scheduler_hints'] is None, and TypeError # will occur, add TypeError exception here. except (KeyError, TypeError): query = None if not query: return True # NOTE(comstud): Not checking capabilities or service for # enabled/disabled so that a provided json filter can decide result = self._process_filter(jsonutils.loads(query), host_state) if isinstance(result, list): # If any succeeded, include the host result = any(result) if result: # Filter it out. return True return False ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/scheduler/filters/retry.py0000664000175000017500000000306600000000000021471 0ustar00zuulzuul00000000000000# Copyright (c) 2012 OpenStack, LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log from manila.scheduler.filters import base_host LOG = log.getLogger(__name__) class RetryFilter(base_host.BaseHostFilter): """Filter out already tried nodes for scheduling purposes.""" def host_passes(self, host_state, filter_properties): """Skip nodes that have already been attempted.""" retry = filter_properties.get('retry') if not retry: # Re-scheduling is disabled LOG.debug("Re-scheduling is disabled") return True hosts = retry.get('hosts', []) host = host_state.host passes = host not in hosts pass_msg = "passes" if passes else "fails" LOG.debug("Host %(host)s %(pass_msg)s. Previously tried hosts: " "%(hosts)s", {"host": host, "pass_msg": pass_msg, "hosts": hosts}) # Host passes if it's not in the list of previously attempted hosts: return passes ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315601.893672 manila-21.0.0/manila/scheduler/filters/share_group_filters/0000775000175000017500000000000000000000000024013 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/scheduler/filters/share_group_filters/__init__.py0000664000175000017500000000000000000000000026112 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/scheduler/filters/share_group_filters/consistent_snapshot.py0000664000175000017500000000252700000000000030503 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from manila.scheduler.filters import base_host class ConsistentSnapshotFilter(base_host.BaseHostFilter): """Filters hosts based on possibility to create consistent SG snapshots.""" def host_passes(self, host_state, filter_properties): """Return True if host will work with desired share group.""" cs_group_spec = filter_properties['share_group_type'].get( 'group_specs', {}).get('consistent_snapshot_support') # NOTE(vpoomaryov): if 'consistent_snapshot_support' group spec # is not set, then we assume that share group owner do not care about # it, which means any host should pass this filter. if cs_group_spec is None: return True return cs_group_spec == host_state.sg_consistent_snapshot_support ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/scheduler/filters/share_replication.py0000664000175000017500000000707100000000000024017 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Goutham Pacha Ravi # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log from manila.scheduler.filters import base_host LOG = log.getLogger(__name__) class ShareReplicationFilter(base_host.BaseHostFilter): """ShareReplicationFilter filters hosts based on replication support.""" def host_passes(self, host_state, filter_properties): """Return True if 'active' replica's host can replicate with host. Design of this filter: - Share replication is symmetric. All backends that can replicate between each other must share the same 'replication_domain'. - For scheduling a share that can be replicated in the future, this filter checks for 'replication_domain' capability. - For scheduling a replica, it checks for the 'replication_domain' compatibility. """ active_replica_host = filter_properties.get('request_spec', {}).get( 'active_replica_host') existing_replica_hosts = filter_properties.get('request_spec', {}).get( 'all_replica_hosts', '').split(',') replication_type = filter_properties.get('resource_type', {}).get( 'extra_specs', {}).get('replication_type') active_replica_replication_domain = filter_properties.get( 'replication_domain') host_replication_domain = host_state.replication_domain if replication_type is None: # NOTE(gouthamr): You're probably not creating a replicated # share or a replica, then this host obviously passes. return True elif host_replication_domain is None: msg = "Replication is not enabled on host %s." LOG.debug(msg, host_state.host) return False elif active_replica_host is None: # 'replication_type' filtering will be handled by the # capabilities filter, since it is a share-type extra-spec. return True # Scheduler filtering by replication_domain for a replica if active_replica_replication_domain != host_replication_domain: msg = ("The replication domain of Host %(host)s is " "'%(host_domain)s' and it does not match the replication " "domain of the 'active' replica's host: " "%(active_replica_host)s, which is '%(arh_domain)s'. ") kwargs = { "host": host_state.host, "host_domain": host_replication_domain, "active_replica_host": active_replica_host, "arh_domain": active_replica_replication_domain, } LOG.debug(msg, kwargs) return False # Check host string for already created replicas if host_state.host in existing_replica_hosts: msg = ("Skipping host %s since it already hosts a replica for " "this share.") LOG.debug(msg, host_state.host) return False return True ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/scheduler/host_manager.py0000664000175000017500000007577400000000000021342 0ustar00zuulzuul00000000000000# Copyright (c) 2011 OpenStack, LLC. # Copyright (c) 2015 Rushil Chugh # Copyright (c) 2015 Clinton Knight # Copyright (c) 2015 EMC Corporation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Manage hosts in the current zone. """ import collections import re from oslo_config import cfg from oslo_log import log from oslo_utils import timeutils from manila import db from manila import exception from manila.scheduler.filters import base_host as base_host_filter from manila.scheduler import utils as scheduler_utils from manila.scheduler.weighers import base_host as base_host_weigher from manila.share import utils as share_utils from manila import utils host_manager_opts = [ cfg.ListOpt('scheduler_default_filters', default=[ 'OnlyHostFilter', 'AvailabilityZoneFilter', 'CapacityFilter', 'CapabilitiesFilter', 'DriverFilter', 'ShareReplicationFilter', 'CreateFromSnapshotFilter', 'AffinityFilter', 'AntiAffinityFilter', ], help='Which filter class names to use for filtering hosts ' 'when not specified in the request.'), cfg.ListOpt('scheduler_default_weighers', default=[ 'CapacityWeigher', 'GoodnessWeigher', 'HostAffinityWeigher', ], help='Which weigher class names to use for weighing hosts.'), cfg.ListOpt( 'scheduler_default_share_group_filters', default=[ 'AvailabilityZoneFilter', 'ConsistentSnapshotFilter', ], help='Which filter class names to use for filtering hosts ' 'creating share group when not specified in the request.'), cfg.ListOpt( 'scheduler_default_extend_filters', default=[ 'CapacityFilter', 'DriverFilter', ], help='Which filter class names to use for filtering hosts ' 'extending share when not specified in the request.'), ] CONF = cfg.CONF CONF.register_opts(host_manager_opts) CONF.import_opt('max_over_subscription_ratio', 'manila.share.driver') LOG = log.getLogger(__name__) class ReadOnlyDict(collections.UserDict): """A read-only dict.""" def __init__(self, source=None): self.data = {} self.update(source) def __setitem__(self, key, item): raise TypeError def __delitem__(self, key): raise TypeError def clear(self): raise TypeError def pop(self, key, *args): raise TypeError def popitem(self): raise TypeError def update(self, source=None): if source is None: return elif isinstance(source, collections.UserDict): self.data = source.data elif isinstance(source, type({})): self.data = source else: raise TypeError class HostState(object): """Mutable and immutable information tracked for a host.""" def __init__(self, host, capabilities=None, service=None): self.capabilities = None self.service = None self.host = host self.update_capabilities(capabilities, service) self.share_backend_name = None self.vendor_name = None self.driver_version = 0 self.storage_protocol = None self.qos = False # Mutable available resources. # These will change as resources are virtually "consumed". self.total_capacity_gb = 0 self.free_capacity_gb = None self.reserved_percentage = 0 self.reserved_snapshot_percentage = 0 self.reserved_share_extend_percentage = 0 self.allocated_capacity_gb = 0 # NOTE(xyang): The apparent allocated space indicating how much # capacity has been provisioned. This could be the sum of sizes # of all shares on a backend, which could be greater than or # equal to the allocated_capacity_gb. self.provisioned_capacity_gb = 0 self.max_over_subscription_ratio = 1.0 self.thin_provisioning = False self.driver_handles_share_servers = False self.snapshot_support = True self.create_share_from_snapshot_support = True self.revert_to_snapshot_support = False self.mount_snapshot_support = False self.dedupe = False self.compression = False self.replication_type = None self.replication_domain = None self.ipv4_support = None self.ipv6_support = None self.security_service_update_support = False self.network_allocation_update_support = False self.share_server_multiple_subnet_support = False self.mount_point_name_support = False self.share_replicas_migration_support = False self.encryption_support = None # PoolState for all pools self.pools = {} self.updated = None # Share Group capabilities self.sg_consistent_snapshot_support = None def update_capabilities(self, capabilities=None, service=None): # Read-only capability dicts if capabilities is None: capabilities = {} self.capabilities = ReadOnlyDict(capabilities) if service is None: service = {} self.service = ReadOnlyDict(service) def update_from_share_capability( self, capability, service=None, context=None): """Update information about a host from its share_node info. 'capability' is the status info reported by share backend, a typical capability looks like this:: capability = { 'share_backend_name': 'Local NFS', #\ 'vendor_name': 'OpenStack', # backend level 'driver_version': '1.0', # mandatory/fixed 'storage_protocol': 'NFS', #/ stats&capabilities 'active_shares': 10, #\ 'IOPS_provisioned': 30000, # optional custom 'fancy_capability_1': 'eat', # stats & capabilities 'fancy_capability_2': 'drink', #/ 'pools':[ { 'pool_name': '1st pool', #\ 'total_capacity_gb': 500, # mandatory stats 'free_capacity_gb': 230, # for pools 'allocated_capacity_gb': 270, # | 'qos': 'False', # | 'reserved_percentage': 0, # | 'reserved_snapshot_percentage': 0, # | 'reserved_share_extend_percentage': 0, #/ 'dying_disks': 100, #\ 'super_hero_1': 'spider-man', # optional custom 'super_hero_2': 'flash', # stats & 'super_hero_3': 'neoncat', # capabilities 'super_hero_4': 'green lantern', #/ }, { 'pool_name': '2nd pool', 'total_capacity_gb': 1024, 'free_capacity_gb': 1024, 'allocated_capacity_gb': 0, 'qos': 'False', 'reserved_percentage': 0, 'reserved_snapshot_percentage': 0, 'reserved_share_extend_percentage': 0, 'dying_disks': 200, 'super_hero_1': 'superman', 'super_hero_2': 'Hulk', }] } """ self.update_capabilities(capability, service) if capability: if self.updated and self.updated > capability['timestamp']: return # Update backend level info self.update_backend(capability) # Update pool level info self.update_pools(capability, service, context=context) def update_pools(self, capability, service, context=None): """Update storage pools information from backend reported info.""" if not capability: return pools = capability.get('pools', None) active_pools = set() if pools and isinstance(pools, list): # Update all pools stats according to information from list # of pools in share capacity for pool_cap in pools: pool_name = pool_cap['pool_name'] self._append_backend_info(pool_cap) cur_pool = self.pools.get(pool_name, None) if not cur_pool: # Add new pool cur_pool = PoolState(self.host, pool_cap, pool_name) self.pools[pool_name] = cur_pool cur_pool.update_from_share_capability( pool_cap, service, context=context) active_pools.add(pool_name) elif pools is None: # To handle legacy driver that doesn't report pool # information in the capability, we have to prepare # a pool from backend level info, or to update the one # we created in self.pools. pool_name = self.share_backend_name if pool_name is None: # To get DEFAULT_POOL_NAME pool_name = share_utils.extract_host(self.host, 'pool', True) if len(self.pools) == 0: # No pool was there single_pool = PoolState(self.host, capability, pool_name) self._append_backend_info(capability) self.pools[pool_name] = single_pool else: # This is a update from legacy driver try: single_pool = self.pools[pool_name] except KeyError: single_pool = PoolState(self.host, capability, pool_name) self._append_backend_info(capability) self.pools[pool_name] = single_pool single_pool.update_from_share_capability( capability, service, context=context) active_pools.add(pool_name) # Remove non-active pools from self.pools nonactive_pools = set(self.pools.keys()) - active_pools for pool in nonactive_pools: LOG.debug("Removing non-active pool %(pool)s @ %(host)s " "from scheduler cache.", {'pool': pool, 'host': self.host}) del self.pools[pool] def _append_backend_info(self, pool_cap): # Fill backend level info to pool if needed. if not pool_cap.get('share_backend_name'): pool_cap['share_backend_name'] = self.share_backend_name if not pool_cap.get('storage_protocol'): pool_cap['storage_protocol'] = self.storage_protocol if not pool_cap.get('vendor_name'): pool_cap['vendor_name'] = self.vendor_name if not pool_cap.get('driver_version'): pool_cap['driver_version'] = self.driver_version if not pool_cap.get('timestamp'): pool_cap['timestamp'] = self.updated if not pool_cap.get('storage_protocol'): pool_cap['storage_protocol'] = self.storage_protocol if 'driver_handles_share_servers' not in pool_cap: pool_cap['driver_handles_share_servers'] = ( self.driver_handles_share_servers) if 'snapshot_support' not in pool_cap: pool_cap['snapshot_support'] = self.snapshot_support if 'create_share_from_snapshot_support' not in pool_cap: pool_cap['create_share_from_snapshot_support'] = ( self.create_share_from_snapshot_support) if 'revert_to_snapshot_support' not in pool_cap: pool_cap['revert_to_snapshot_support'] = ( self.revert_to_snapshot_support) if 'mount_snapshot_support' not in pool_cap: pool_cap['mount_snapshot_support'] = self.mount_snapshot_support if 'dedupe' not in pool_cap: pool_cap['dedupe'] = self.dedupe if 'compression' not in pool_cap: pool_cap['compression'] = self.compression if not pool_cap.get('replication_type'): pool_cap['replication_type'] = self.replication_type if not pool_cap.get('replication_domain'): pool_cap['replication_domain'] = self.replication_domain if 'sg_consistent_snapshot_support' not in pool_cap: pool_cap['sg_consistent_snapshot_support'] = ( self.sg_consistent_snapshot_support) if 'security_service_update_support' not in pool_cap: pool_cap['security_service_update_support'] = ( self.security_service_update_support) if 'network_allocation_update_support' not in pool_cap: pool_cap['network_allocation_update_support'] = ( self.network_allocation_update_support) if 'share_server_multiple_subnet_support' not in pool_cap: pool_cap['share_server_multiple_subnet_support'] = ( self.share_server_multiple_subnet_support) if 'share_replicas_migration_support' not in pool_cap: pool_cap['share_replicas_migration_support'] = ( self.share_replicas_migration_support) if 'encryption_support' not in pool_cap: pool_cap['encryption_support'] = ( self.encryption_support) if self.ipv4_support is not None: pool_cap['ipv4_support'] = self.ipv4_support if self.ipv6_support is not None: pool_cap['ipv6_support'] = self.ipv6_support def update_backend(self, capability): self.share_backend_name = capability.get('share_backend_name') self.vendor_name = capability.get('vendor_name') self.driver_version = capability.get('driver_version') self.storage_protocol = capability.get('storage_protocol') self.driver_handles_share_servers = capability.get( 'driver_handles_share_servers') self.snapshot_support = capability.get('snapshot_support') self.create_share_from_snapshot_support = capability.get( 'create_share_from_snapshot_support') self.revert_to_snapshot_support = capability.get( 'revert_to_snapshot_support', False) self.mount_snapshot_support = capability.get( 'mount_snapshot_support', False) self.updated = capability['timestamp'] self.replication_type = capability.get('replication_type') self.replication_domain = capability.get('replication_domain') self.sg_consistent_snapshot_support = capability.get( 'share_group_stats', {}).get('consistent_snapshot_support') if capability.get('ipv4_support') is not None: self.ipv4_support = capability['ipv4_support'] if capability.get('ipv6_support') is not None: self.ipv6_support = capability['ipv6_support'] self.security_service_update_support = capability.get( 'security_service_update_support', False) self.network_allocation_update_support = capability.get( 'network_allocation_update_support', False) self.share_server_multiple_subnet_support = capability.get( 'share_server_multiple_subnet_support', False) self.share_replicas_migration_support = capability.get( 'share_replicas_migration_support', False) self.encryption_support = capability.get('encryption_support', None) def consume_from_share(self, share): """Incrementally update host state from an share.""" if self.provisioned_capacity_gb is not None: self.provisioned_capacity_gb += share['size'] self.allocated_capacity_gb += share['size'] if (isinstance(self.free_capacity_gb, str) and self.free_capacity_gb != 'unknown'): raise exception.InvalidCapacity( name='free_capacity_gb', value=self.free_capacity_gb ) if self.free_capacity_gb != 'unknown': self.free_capacity_gb -= share['size'] self.updated = timeutils.utcnow() def __repr__(self): return ("host: '%(host)s', free_capacity_gb: %(free)s, " "pools: %(pools)s" % {'host': self.host, 'free': self.free_capacity_gb, 'pools': self.pools} ) class PoolState(HostState): def __init__(self, host, capabilities, pool_name): new_host = share_utils.append_host(host, pool_name) super(PoolState, self).__init__(new_host, capabilities) self.pool_name = pool_name # No pools in pool self.pools = None def _estimate_provisioned_capacity(self, host_name, context=None): """Estimate provisioned capacity from share sizes on backend.""" return db.share_instance_sizes_sum_by_host(context, host_name) @utils.synchronized("update_from_share_capability") def update_from_share_capability( self, capability, service=None, context=None): """Update information about a pool from its share_node info.""" self.update_capabilities(capability, service) if capability: if self.updated and self.updated > capability['timestamp']: return self.update_backend(capability) self.total_capacity_gb = capability['total_capacity_gb'] self.free_capacity_gb = capability['free_capacity_gb'] self.allocated_capacity_gb = capability.get( 'allocated_capacity_gb', 0) self.qos = capability.get('qos', False) self.reserved_percentage = capability['reserved_percentage'] self.reserved_snapshot_percentage = ( capability['reserved_snapshot_percentage']) self.reserved_share_extend_percentage = ( capability['reserved_share_extend_percentage']) self.thin_provisioning = scheduler_utils.thin_provisioning( capability.get('thin_provisioning', False)) # NOTE(xyang): provisioned_capacity_gb is the apparent total # capacity of all the shares created on a backend, which is # greater than or equal to allocated_capacity_gb, which is the # apparent total capacity of all the shares created on a backend # in Manila. # NOTE(nidhimittalhada): If 'provisioned_capacity_gb' is not set, # then calculating 'provisioned_capacity_gb' from share sizes # on host, as per information available in manila database. # NOTE(jose-castro-leon): Only calculate provisioned_capacity_gb # on thin provisioned pools self.provisioned_capacity_gb = capability.get( 'provisioned_capacity_gb') if self.thin_provisioning and self.provisioned_capacity_gb is None: self.provisioned_capacity_gb = ( self._estimate_provisioned_capacity(self.host, context=context)) self.max_over_subscription_ratio = capability.get( 'max_over_subscription_ratio', CONF.max_over_subscription_ratio) self.dedupe = capability.get( 'dedupe', False) self.compression = capability.get( 'compression', False) self.replication_type = capability.get( 'replication_type', self.replication_type) self.replication_domain = capability.get( 'replication_domain') self.sg_consistent_snapshot_support = capability.get( 'sg_consistent_snapshot_support') self.security_service_update_support = capability.get( 'security_service_update_support', False) self.network_allocation_update_support = capability.get( 'network_allocation_update_support', False) self.share_server_multiple_subnet_support = capability.get( 'share_server_multiple_subnet_support', False) self.share_replicas_migration_support = capability.get( 'share_replicas_migration_support', False) self.encryption_support = capability.get('encryption_support') def update_pools(self, capability): # Do nothing, since we don't have pools within pool, yet pass class HostManager(object): """Base HostManager class.""" host_state_cls = HostState def __init__(self): self.service_states = {} # { : {: {cap k : v}}} self.host_state_map = {} self.filter_handler = base_host_filter.HostFilterHandler( 'manila.scheduler.filters') self.filter_classes = self.filter_handler.get_all_classes() self.weight_handler = base_host_weigher.HostWeightHandler( 'manila.scheduler.weighers') self.weight_classes = self.weight_handler.get_all_classes() def _choose_host_filters(self, filter_cls_names): """Choose acceptable filters. Since the caller may specify which filters to use we need to have an authoritative list of what is permissible. This function checks the filter names against a predefined set of acceptable filters. """ if filter_cls_names is None: filter_cls_names = CONF.scheduler_default_filters if not isinstance(filter_cls_names, (list, tuple)): filter_cls_names = [filter_cls_names] good_filters = [] bad_filters = [] for filter_name in filter_cls_names: found_class = False for cls in self.filter_classes: if cls.__name__ == filter_name: found_class = True good_filters.append(cls) break if not found_class: bad_filters.append(filter_name) if bad_filters: msg = ", ".join(bad_filters) raise exception.SchedulerHostFilterNotFound(filter_name=msg) return good_filters def _choose_host_weighers(self, weight_cls_names): """Choose acceptable weighers. Since the caller may specify which weighers to use, we need to have an authoritative list of what is permissible. This function checks the weigher names against a predefined set of acceptable weighers. """ if weight_cls_names is None: weight_cls_names = CONF.scheduler_default_weighers if not isinstance(weight_cls_names, (list, tuple)): weight_cls_names = [weight_cls_names] good_weighers = [] bad_weighers = [] for weigher_name in weight_cls_names: found_class = False for cls in self.weight_classes: if cls.__name__ == weigher_name: good_weighers.append(cls) found_class = True break if not found_class: bad_weighers.append(weigher_name) if bad_weighers: msg = ", ".join(bad_weighers) raise exception.SchedulerHostWeigherNotFound(weigher_name=msg) return good_weighers def get_filtered_hosts(self, hosts, filter_properties, filter_class_names=None): """Filter hosts and return only ones passing all filters.""" filter_classes = self._choose_host_filters(filter_class_names) return self.filter_handler.get_filtered_objects(filter_classes, hosts, filter_properties) def get_weighed_hosts(self, hosts, weight_properties, weigher_class_names=None): """Weigh the hosts.""" weigher_classes = self._choose_host_weighers(weigher_class_names) weight_properties['server_pools_mapping'] = {} for backend, info in self.service_states.items(): weight_properties['server_pools_mapping'].update( info.get('server_pools_mapping', {})) return self.weight_handler.get_weighed_objects(weigher_classes, hosts, weight_properties) def update_service_capabilities(self, service_name, host, capabilities, timestamp): """Update the per-service capabilities based on this notification.""" if service_name not in ('share',): LOG.debug('Ignoring %(service_name)s service update ' 'from %(host)s', {'service_name': service_name, 'host': host}) return # Copy the capabilities, so we don't modify the original dict capability_copy = dict(capabilities) timestamp = timestamp or timeutils.utcnow() capability_copy["timestamp"] = timestamp # Reported time capab_old = self.service_states.get(host, {"timestamp": 0}) # Ignore older updates if capab_old['timestamp'] and timestamp < capab_old['timestamp']: LOG.info('Ignoring old capability report from %s.', host) return self.service_states[host] = capability_copy LOG.debug("Received %(service_name)s service update from " "%(host)s: %(cap)s", {'service_name': service_name, 'host': host, 'cap': capabilities}) def _update_host_state_map(self, context, consider_disabled=False): # Get resource usage across the available share nodes: topic = CONF.share_topic share_services = db.service_get_all_by_topic( context, topic, consider_disabled=consider_disabled, ) active_hosts = set() for service in share_services: host = service['host'] # Warn about down services and remove them from host_state_map is_down = not utils.service_is_up(service) is_disabled = (not consider_disabled and service['disabled']) if is_down or is_disabled: LOG.warning("Share service is down. (host: %s).", host) continue # Create and register host_state if not in host_state_map capabilities = self.service_states.get(host, None) host_state = self.host_state_map.get(host) if not host_state: host_state = self.host_state_cls( host, capabilities=capabilities, service=dict(service.items())) self.host_state_map[host] = host_state # Update capabilities and attributes in host_state host_state.update_from_share_capability( capabilities, service=dict(service.items()), context=context) active_hosts.add(host) # remove non-active hosts from host_state_map nonactive_hosts = set(self.host_state_map.keys()) - active_hosts for host in nonactive_hosts: LOG.info("Removing non-active host: %(host)s from " "scheduler cache.", {'host': host}) self.host_state_map.pop(host, None) def get_all_host_states_share(self, context, consider_disabled=False): """Returns a dict of all the hosts the HostManager knows about. Each of the consumable resources in HostState are populated with capabilities scheduler received from RPC. For example: {'192.168.1.100': HostState(), ...} """ self._update_host_state_map( context, consider_disabled=consider_disabled, ) # Build a pool_state map and return that map instead of host_state_map all_pools = {} for host, state in self.host_state_map.items(): for key in state.pools: pool = state.pools[key] # Use host.pool_name to make sure key is unique pool_key = '.'.join([host, pool.pool_name]) all_pools[pool_key] = pool return all_pools.values() def get_pools(self, context, filters=None, cached=False): """Returns a dict of all pools on all hosts HostManager knows about.""" if not cached or not self.host_state_map: self._update_host_state_map(context) all_pools = [] for host, host_state in self.host_state_map.items(): for pool in host_state.pools.values(): fully_qualified_pool_name = share_utils.append_host( host, pool.pool_name) host_name = share_utils.extract_host( fully_qualified_pool_name, level='host') backend_name = (share_utils.extract_host( fully_qualified_pool_name, level='backend').split('@')[1] if '@' in fully_qualified_pool_name else None) pool_name = share_utils.extract_host( fully_qualified_pool_name, level='pool') new_pool = { 'name': fully_qualified_pool_name, 'host': host_name, 'backend': backend_name, 'pool': pool_name, 'capabilities': pool.capabilities, } if self._passes_filters(new_pool, filters): all_pools.append(new_pool) return all_pools def _passes_filters(self, dict_to_check, filter_dict): """Applies a set of regex filters to a dictionary. If no filter keys are supplied, the data passes unfiltered and the method returns True. Otherwise, each key in the filter (filter_dict) must be present in the data (dict_to_check) and the filter values are applied as regex expressions to the data values. If any of the filter values fail to match their corresponding data values, the method returns False. But if all filters match, the method returns True. """ if not filter_dict: return True for filter_key, filter_value in filter_dict.items(): if filter_key not in dict_to_check: return False if filter_key == 'capabilities': if not scheduler_utils.capabilities_satisfied( dict_to_check.get(filter_key), filter_value): return False elif not re.match(filter_value, dict_to_check.get(filter_key)): return False return True ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/scheduler/manager.py0000664000175000017500000003643500000000000020274 0ustar00zuulzuul00000000000000# Copyright (c) 2010 OpenStack, LLC. # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Scheduler Service """ from datetime import datetime from oslo_config import cfg from oslo_log import log from oslo_service import periodic_task from oslo_utils import excutils from oslo_utils import importutils from oslo_utils import timeutils from manila.common import constants from manila import context from manila import coordination from manila import db from manila import exception from manila import manager from manila.message import api as message_api from manila.message import message_field from manila import quota from manila import rpc from manila.share import rpcapi as share_rpcapi from manila.share import share_types from manila import utils LOG = log.getLogger(__name__) QUOTAS = quota.QUOTAS scheduler_driver_opt = cfg.StrOpt('scheduler_driver', default='manila.scheduler.drivers.' 'filter.FilterScheduler', help='Default scheduler driver to use.') CONF = cfg.CONF CONF.register_opt(scheduler_driver_opt) # Drivers that need to change module paths or class names can add their # old/new path here to maintain backward compatibility. MAPPING = { 'manila.scheduler.chance.ChanceScheduler': 'manila.scheduler.drivers.chance.ChanceScheduler', 'manila.scheduler.filter_scheduler.FilterScheduler': 'manila.scheduler.drivers.filter.FilterScheduler', 'manila.scheduler.simple.SimpleScheduler': 'manila.scheduler.drivers.simple.SimpleScheduler', } class SchedulerManager(manager.Manager): """Chooses a host to create shares.""" RPC_API_VERSION = '1.11' def __init__(self, scheduler_driver=None, service_name=None, *args, **kwargs): if not scheduler_driver: scheduler_driver = CONF.scheduler_driver if scheduler_driver in MAPPING: msg_args = { 'old': scheduler_driver, 'new': MAPPING[scheduler_driver], } LOG.warning("Scheduler driver path %(old)s is deprecated, " "update your configuration to the new path " "%(new)s", msg_args) scheduler_driver = MAPPING[scheduler_driver] self.driver = importutils.import_object(scheduler_driver) self.message_api = message_api.API() super(SchedulerManager, self).__init__(*args, **kwargs) self.service_id = None def init_host_with_rpc(self, service_id=None): self.service_id = service_id ctxt = context.get_admin_context() self.request_service_capabilities(ctxt) def get_host_list(self, context): """Get a list of hosts from the HostManager.""" return self.driver.get_host_list() def get_service_capabilities(self, context): """Get the normalized set of capabilities for this zone.""" return self.driver.get_service_capabilities() def update_service_capabilities(self, context, service_name=None, host=None, capabilities=None, timestamp=None, **kwargs): """Process a capability update from a service node.""" if capabilities is None: capabilities = {} elif timestamp: timestamp = datetime.strptime(timestamp, timeutils.PERFECT_TIME_FORMAT) self.driver.update_service_capabilities(service_name, host, capabilities, timestamp) def create_share_instance(self, context, request_spec=None, filter_properties=None): try: self.driver.schedule_create_share(context, request_spec, filter_properties) except exception.NoValidHost as ex: self._set_share_state_and_notify( 'create_share', {'status': constants.STATUS_ERROR}, context, ex, request_spec, message_field.Action.ALLOCATE_HOST) except Exception as ex: with excutils.save_and_reraise_exception(): self._set_share_state_and_notify( 'create_share', {'status': constants.STATUS_ERROR}, context, ex, request_spec) def get_pools(self, context, filters=None, cached=False): """Get active pools from the scheduler's cache.""" return self.driver.get_pools(context, filters, cached) def manage_share(self, context, share_id, driver_options, request_spec, filter_properties=None): """Ensure that the host exists and can accept the share.""" def _manage_share_set_error(self, context, ex, request_spec): # NOTE(haixin) if failed to scheduler backend for manage share, # and we do not commit quota usages here, so we should set size 0 # because we don't know the real size of the size, and we will # skip quota cuts when unmanage share with manage_error status. self._set_share_state_and_notify( 'manage_share', {'status': constants.STATUS_MANAGE_ERROR, 'size': 0}, context, ex, request_spec) share_ref = db.share_get(context, share_id) try: self.driver.host_passes_filters( context, share_ref['host'], request_spec, filter_properties) except Exception as ex: with excutils.save_and_reraise_exception(): _manage_share_set_error(self, context, ex, request_spec) else: share_rpcapi.ShareAPI().manage_share(context, share_ref, driver_options) def migrate_share_to_host( self, context, share_id, host, force_host_assisted_migration, preserve_metadata, writable, nondisruptive, preserve_snapshots, new_share_network_id, new_share_type_id, request_spec, filter_properties=None): """Ensure that the host exists and can accept the share.""" share_ref = db.share_get(context, share_id) def _migrate_share_set_error(self, context, ex, request_spec): instance = next((x for x in share_ref.instances if x['status'] == constants.STATUS_MIGRATING), None) if instance: db.share_instance_update( context, instance['id'], {'status': constants.STATUS_AVAILABLE}) self._set_share_state_and_notify( 'migrate_share_to_host', {'task_state': constants.TASK_STATE_MIGRATION_ERROR}, context, ex, request_spec) share_types.revert_allocated_share_type_quotas_during_migration( context, share_ref, new_share_type_id) try: tgt_host = self.driver.host_passes_filters( context, host, request_spec, filter_properties) except Exception as ex: with excutils.save_and_reraise_exception(): _migrate_share_set_error(self, context, ex, request_spec) else: try: share_rpcapi.ShareAPI().migration_start( context, share_ref, tgt_host.host, force_host_assisted_migration, preserve_metadata, writable, nondisruptive, preserve_snapshots, new_share_network_id, new_share_type_id) except Exception as ex: with excutils.save_and_reraise_exception(): _migrate_share_set_error(self, context, ex, request_spec) def _set_share_state_and_notify(self, method, state, context, ex, request_spec, action=None): LOG.error("Failed to schedule %(method)s: %(ex)s", {"method": method, "ex": ex}) properties = request_spec.get('share_properties', {}) share_id = request_spec.get('share_id', None) if share_id: db.share_update(context, share_id, state) if action: self.message_api.create( context, action, context.project_id, resource_type=message_field.Resource.SHARE, resource_id=share_id, exception=ex) payload = dict(request_spec=request_spec, share_properties=properties, share_id=share_id, state=state, method=method, reason=ex) rpc.get_notifier("scheduler").error( context, 'scheduler.' + method, payload) def request_service_capabilities(self, context): share_rpcapi.ShareAPI().publish_service_capabilities(context) def _set_share_group_error_state(self, method, context, ex, request_spec, action=None): LOG.warning("Failed to schedule_%(method)s: %(ex)s", {"method": method, "ex": ex}) share_group_state = {'status': constants.STATUS_ERROR} share_group_id = request_spec.get('share_group_id') if share_group_id: db.share_group_update(context, share_group_id, share_group_state) if action: self.message_api.create( context, action, context.project_id, resource_type=message_field.Resource.SHARE_GROUP, resource_id=share_group_id, exception=ex) @periodic_task.periodic_task(spacing=600, run_immediately=True) def _expire_reservations(self, context): quota.QUOTAS.expire(context) def create_share_group(self, context, share_group_id, request_spec=None, filter_properties=None): try: self.driver.schedule_create_share_group( context, share_group_id, request_spec, filter_properties) except exception.NoValidHost as ex: self._set_share_group_error_state( 'create_share_group', context, ex, request_spec, message_field.Action.ALLOCATE_HOST) except Exception as ex: with excutils.save_and_reraise_exception(): self._set_share_group_error_state( 'create_share_group', context, ex, request_spec) def _set_share_replica_error_state(self, context, method, exc, request_spec, action=None): LOG.warning("Failed to schedule_%(method)s: %(exc)s", {'method': method, 'exc': exc}) status_updates = { 'status': constants.STATUS_ERROR, 'replica_state': constants.STATUS_ERROR, } share_replica_id = request_spec.get( 'share_instance_properties').get('id') # Set any snapshot instances to 'error'. replica_snapshots = db.share_snapshot_instance_get_all_with_filters( context, {'share_instance_ids': share_replica_id}) for snapshot_instance in replica_snapshots: db.share_snapshot_instance_update( context, snapshot_instance['id'], {'status': constants.STATUS_ERROR}) db.share_replica_update(context, share_replica_id, status_updates) if action: self.message_api.create( context, action, context.project_id, resource_type=message_field.Resource.SHARE_REPLICA, resource_id=share_replica_id, exception=exc) def create_share_replica(self, context, request_spec=None, filter_properties=None): try: self.driver.schedule_create_replica(context, request_spec, filter_properties) except exception.NoValidHost as exc: self._set_share_replica_error_state( context, 'create_share_replica', exc, request_spec, message_field.Action.ALLOCATE_HOST) except Exception as exc: with excutils.save_and_reraise_exception(): self._set_share_replica_error_state( context, 'create_share_replica', exc, request_spec) @periodic_task.periodic_task(spacing=CONF.message_reap_interval, run_immediately=True) @coordination.synchronized('locked-clean-expired-messages') def _clean_expired_messages(self, context): self.message_api.cleanup_expired_messages(context) @periodic_task.periodic_task(spacing=CONF.service_down_time, run_immediately=True) @coordination.synchronized('locked-mark-services-as-down') def _mark_services_as_down(self, context): for svc in db.service_get_all(context): if not utils.service_is_up(svc): if svc["state"] not in ("down", "stopped"): db.service_update(context, svc['id'], {"state": "down"}) def extend_share(self, context, share_id, new_size, reservations, request_spec=None, filter_properties=None): def _extend_share_set_error(self, context, ex, request_spec): share_state = {'status': constants.STATUS_AVAILABLE} self._set_share_state_and_notify('extend_share', share_state, context, ex, request_spec) share = db.share_get(context, share_id) try: size_increase = int(new_size) - share['size'] if filter_properties: filter_properties['size_increase'] = size_increase else: filter_properties = {'size_increase': size_increase} target_host = self.driver.host_passes_filters( context, share['host'], request_spec, filter_properties) target_host.consume_from_share({'size': size_increase}) share_rpcapi.ShareAPI().extend_share(context, share, new_size, reservations) except exception.NoValidHost as ex: quota.QUOTAS.rollback(context, reservations, project_id=share['project_id'], user_id=share['user_id'], share_type_id=share['share_type_id']) _extend_share_set_error(self, context, ex, request_spec) self.message_api.create( context, message_field.Action.EXTEND, share['project_id'], resource_type=message_field.Resource.SHARE, resource_id=share['id'], exception=ex) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/scheduler/rpcapi.py0000664000175000017500000001475400000000000020140 0ustar00zuulzuul00000000000000# Copyright 2012, Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Client side of the scheduler manager RPC API. """ from oslo_config import cfg import oslo_messaging as messaging from oslo_serialization import jsonutils from oslo_utils import timeutils from manila import rpc CONF = cfg.CONF class SchedulerAPI(object): """Client side of the scheduler rpc API. API version history: 1.0 - Initial version. 1.1 - Add get_pools method 1.2 - Introduce Share Instances. Replace ``create_share()`` with ``create_share_instance()`` 1.3 - Add create_consistency_group method (renamed in 1.7) 1.4 - Add migrate_share_to_host method 1.5 - Add create_share_replica 1.6 - Add manage_share 1.7 - Updated migrate_share_to_host method with new parameters 1.8 - Rename create_consistency_group -> create_share_group method 1.9 - Add cached parameter to get_pools method 1.10 - Add timestamp to update_service_capabilities 1.11 - Add extend_share """ RPC_API_VERSION = '1.11' def __init__(self): super(SchedulerAPI, self).__init__() target = messaging.Target(topic=CONF.scheduler_topic, version=self.RPC_API_VERSION) self.client = rpc.get_client(target, version_cap=self.RPC_API_VERSION) def create_share_instance(self, context, request_spec=None, filter_properties=None): request_spec_p = jsonutils.to_primitive(request_spec) call_context = self.client.prepare(version='1.2') return call_context.cast(context, 'create_share_instance', request_spec=request_spec_p, filter_properties=filter_properties) def update_service_capabilities(self, context, service_name, host, capabilities): call_context = self.client.prepare(fanout=True, version='1.10') timestamp = jsonutils.to_primitive(timeutils.utcnow()) call_context.cast(context, 'update_service_capabilities', service_name=service_name, host=host, capabilities=capabilities, timestamp=timestamp) def get_pools(self, context, filters=None, cached=False): call_context = self.client.prepare(version='1.9') return call_context.call(context, 'get_pools', filters=filters, cached=cached) def create_share_group(self, context, share_group_id, request_spec=None, filter_properties=None): """Casts an rpc to the scheduler to create a share group. Example of 'request_spec' argument value:: { 'share_group_type_id': 'fake_share_group_type_id', 'share_group_id': 'some_fake_uuid', 'availability_zone_id': 'some_fake_az_uuid', 'share_types': [models.ShareType], 'resource_type': models.ShareGroup, } """ request_spec_p = jsonutils.to_primitive(request_spec) call_context = self.client.prepare(version='1.8') return call_context.cast(context, 'create_share_group', share_group_id=share_group_id, request_spec=request_spec_p, filter_properties=filter_properties) def migrate_share_to_host( self, context, share_id, host, force_host_assisted_migration, preserve_metadata, writable, nondisruptive, preserve_snapshots, new_share_network_id, new_share_type_id, request_spec=None, filter_properties=None): call_context = self.client.prepare(version='1.7') request_spec_p = jsonutils.to_primitive(request_spec) return call_context.cast( context, 'migrate_share_to_host', share_id=share_id, host=host, force_host_assisted_migration=force_host_assisted_migration, preserve_metadata=preserve_metadata, writable=writable, nondisruptive=nondisruptive, preserve_snapshots=preserve_snapshots, new_share_network_id=new_share_network_id, new_share_type_id=new_share_type_id, request_spec=request_spec_p, filter_properties=filter_properties) def create_share_replica(self, context, request_spec=None, filter_properties=None): request_spec_p = jsonutils.to_primitive(request_spec) call_context = self.client.prepare(version='1.5') return call_context.cast(context, 'create_share_replica', request_spec=request_spec_p, filter_properties=filter_properties) def manage_share(self, context, share_id, driver_options, request_spec=None, filter_properties=None): call_context = self.client.prepare(version='1.6') return call_context.cast(context, 'manage_share', share_id=share_id, driver_options=driver_options, request_spec=request_spec, filter_properties=filter_properties) def extend_share(self, context, share_id, new_size, reservations, request_spec, filter_properties=None): call_context = self.client.prepare(version='1.11') msg_args = { 'share_id': share_id, 'new_size': new_size, 'reservations': reservations, 'request_spec': request_spec, 'filter_properties': filter_properties, } return call_context.cast(context, 'extend_share', **msg_args) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/scheduler/scheduler_options.py0000664000175000017500000000664400000000000022412 0ustar00zuulzuul00000000000000# Copyright (c) 2011 OpenStack, LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ SchedulerOptions monitors a local .json file for changes and loads it if needed. This file is converted to a data structure and passed into the filtering and weighing functions which can use it for dynamic configuration. """ import datetime import os from oslo_config import cfg from oslo_log import log from oslo_serialization import jsonutils from oslo_utils import timeutils scheduler_json_config_location_opt = cfg.StrOpt( 'scheduler_json_config_location', default='', help='Absolute path to scheduler configuration JSON file.') CONF = cfg.CONF CONF.register_opt(scheduler_json_config_location_opt) LOG = log.getLogger(__name__) class SchedulerOptions(object): """Monitor and load local .json file for filtering and weighing. SchedulerOptions monitors a local .json file for changes and loads it if needed. This file is converted to a data structure and passed into the filtering and weighing functions which can use it for dynamic configuration. """ def __init__(self): super(SchedulerOptions, self).__init__() self.data = {} self.last_modified = None self.last_checked = None def _get_file_handle(self, filename): """Get file handle. Broken out for testing.""" return open(filename) def _get_file_timestamp(self, filename): """Get the last modified datetime. Broken out for testing.""" try: return os.path.getmtime(filename) except os.error: LOG.exception("Could not stat scheduler options file " "%(filename)s.", {"filename": filename}) raise def _load_file(self, handle): """Decode the JSON file. Broken out for testing.""" try: return jsonutils.load(handle) except ValueError: LOG.exception("Could not decode scheduler options.") return {} def _get_time_now(self): """Get current UTC. Broken out for testing.""" return timeutils.utcnow() def get_configuration(self, filename=None): """Check the json file for changes and load it if needed.""" if not filename: filename = CONF.scheduler_json_config_location if not filename: return self.data if self.last_checked: now = self._get_time_now() if now - self.last_checked < datetime.timedelta(minutes=5): return self.data last_modified = self._get_file_timestamp(filename) if (not last_modified or not self.last_modified or last_modified > self.last_modified): self.data = self._load_file(self._get_file_handle(filename)) self.last_modified = last_modified if not self.data: self.data = {} return self.data ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/scheduler/utils.py0000664000175000017500000001734400000000000020020 0ustar00zuulzuul00000000000000# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. # Copyright (c) 2016 EMC Corporation # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log from oslo_utils import strutils from manila.scheduler.filters import extra_specs_ops LOG = log.getLogger(__name__) def generate_stats(host_state, properties): """Generates statistics from host and share data.""" host_stats = { 'host': host_state.host, 'share_backend_name': host_state.share_backend_name, 'vendor_name': host_state.vendor_name, 'driver_version': host_state.driver_version, 'storage_protocol': host_state.storage_protocol, 'qos': host_state.qos, 'total_capacity_gb': host_state.total_capacity_gb, 'allocated_capacity_gb': host_state.allocated_capacity_gb, 'free_capacity_gb': host_state.free_capacity_gb, 'reserved_percentage': host_state.reserved_percentage, 'reserved_snapshot_percentage': host_state.reserved_snapshot_percentage, 'reserved_share_extend_percentage': host_state.reserved_share_extend_percentage, 'driver_handles_share_servers': host_state.driver_handles_share_servers, 'thin_provisioning': host_state.thin_provisioning, 'updated': host_state.updated, 'dedupe': host_state.dedupe, 'compression': host_state.compression, 'snapshot_support': host_state.snapshot_support, 'create_share_from_snapshot_support': host_state.create_share_from_snapshot_support, 'revert_to_snapshot_support': host_state.revert_to_snapshot_support, 'mount_snapshot_support': host_state.mount_snapshot_support, 'replication_domain': host_state.replication_domain, 'replication_type': host_state.replication_type, 'provisioned_capacity_gb': host_state.provisioned_capacity_gb, 'pools': host_state.pools, 'max_over_subscription_ratio': host_state.max_over_subscription_ratio, 'sg_consistent_snapshot_support': ( host_state.sg_consistent_snapshot_support), 'ipv4_support': host_state.ipv4_support, 'ipv6_support': host_state.ipv6_support, 'security_service_update_support': ( host_state.security_service_update_support), 'network_allocation_update_support': ( host_state.network_allocation_update_support), 'share_server_multiple_subnet_support': ( host_state.share_server_multiple_subnet_support), 'mount_point_name_support': ( host_state.mount_point_name_support), 'share_replicas_migration_support': ( host_state.share_replicas_migration_support), 'encryption_support': host_state.encryption_support, } host_caps = host_state.capabilities share_type = properties.get('share_type', {}) extra_specs = share_type.get('extra_specs', {}) share_group_type = properties.get('share_group_type', {}) group_specs = share_group_type.get('group_specs', {}) request_spec = properties.get('request_spec', {}) share_stats = request_spec.get('resource_properties', {}) stats = { 'host_stats': host_stats, 'host_caps': host_caps, 'share_type': share_type, 'extra_specs': extra_specs, 'share_stats': share_stats, 'share_group_type': share_group_type, 'group_specs': group_specs, } return stats def use_thin_logic(share_type): # NOTE(xyang): To preserve the existing behavior, we use thin logic # to evaluate in two cases: # 1) 'thin_provisioning' is not set in extra specs (This is for # backward compatibility. If not set, the scheduler behaves # the same as before this bug fix). # 2) 'thin_provisioning' is set in extra specs and it is # ' True' or 'True'. # Otherwise we use the thick logic to evaluate. use_thin_logic = True thin_spec = None try: thin_spec = share_type.get('extra_specs', {}).get( 'thin_provisioning') if thin_spec is None: thin_spec = share_type.get('extra_specs', {}).get( 'capabilities:thin_provisioning') # NOTE(xyang) 'use_thin_logic' and 'thin_provisioning' are NOT # the same thing. The first purpose of "use_thin_logic" is to # preserve the existing scheduler behavior if 'thin_provisioning' # is NOT in extra_specs (if thin_spec is None, use_thin_logic # should be True). The second purpose of 'use_thin_logic' # is to honor 'thin_provisioning' if it is in extra specs (if # thin_spec is set to True, use_thin_logic should be True; if # thin_spec is set to False, use_thin_logic should be False). use_thin_logic = strutils.bool_from_string( thin_spec, strict=True) if thin_spec is not None else True except ValueError: # Check if the value of thin_spec is ' True'. if thin_spec is not None and not extra_specs_ops.match( True, thin_spec): use_thin_logic = False return use_thin_logic def thin_provisioning(host_state_thin_provisioning): # NOTE(xyang): host_state_thin_provisioning is reported by driver. # It can be either bool (True or False) or # list ([True, False], [True], [False]). thin_capability = [host_state_thin_provisioning] if not isinstance( host_state_thin_provisioning, list) else host_state_thin_provisioning return True in thin_capability def capabilities_satisfied(capabilities, extra_specs): # These extra-specs are not capabilities for matching hosts ignored_extra_specs = ( 'availability_zones', 'capabilities:availability_zones', ) for key, req in extra_specs.items(): # Ignore some extra_specs if told to if key in ignored_extra_specs: continue # Either not scoped format, or in capabilities scope scope = key.split(':') # Ignore scoped (such as vendor-specific) capabilities if len(scope) > 1 and scope[0] != "capabilities": continue # Strip off prefix if spec started with 'capabilities:' elif scope[0] == "capabilities": del scope[0] cap = capabilities for index in range(len(scope)): try: cap = cap.get(scope[index]) except AttributeError: cap = None if cap is None: LOG.debug("Host doesn't provide capability '%(cap)s' " "listed in the extra specs", {'cap': scope[index]}) return False # Make all capability values a list so we can handle lists cap_list = [cap] if not isinstance(cap, list) else cap # Loop through capability values looking for any match for cap_value in cap_list: if extra_specs_ops.match(cap_value, req): break else: # Nothing matched, so bail out LOG.debug('Share type extra spec requirement ' '"%(key)s=%(req)s" does not match reported ' 'capability "%(cap)s"', {'key': key, 'req': req, 'cap': cap}) return False return True ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315601.893672 manila-21.0.0/manila/scheduler/weighers/0000775000175000017500000000000000000000000020112 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/scheduler/weighers/__init__.py0000664000175000017500000000000000000000000022211 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/scheduler/weighers/base.py0000664000175000017500000001070400000000000021400 0ustar00zuulzuul00000000000000# Copyright (c) 2011-2012 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Pluggable Weighing support """ import abc from manila.scheduler import base_handler def normalize(weight_list, minval=None, maxval=None): """Normalize the values in a list between 0 and 1.0. The normalization is made regarding the lower and upper values present in weight_list. If the minval and/or maxval parameters are set, these values will be used instead of the minimum and maximum from the list. If all the values are equal, they are normalized to 0. """ if not weight_list: return () if maxval is None: maxval = max(weight_list) if minval is None: minval = min(weight_list) maxval = float(maxval) minval = float(minval) if minval == maxval: return [0] * len(weight_list) range_ = maxval - minval return ((i - minval) / range_ for i in weight_list) class WeighedObject(object): """Object with weight information.""" def __init__(self, obj, weight): self.obj = obj self.weight = weight def __repr__(self): return "" % (self.obj, self.weight) class BaseWeigher(metaclass=abc.ABCMeta): """Base class for pluggable weighers. The attributes maxval and minval can be specified to set up the maximum and minimum values for the weighed objects. These values will then be taken into account in the normalization step, instead of taking the values from the calculated weighers. """ minval = None maxval = None def weight_multiplier(self): """How weighted this weigher should be. Override this method in a subclass, so that the returned value is read from a configuration option to permit operators specify a multiplier for the weigher. """ return 1.0 @abc.abstractmethod def _weigh_object(self, obj, weight_properties): """Override in a subclass to specify a weight for a specific object.""" def weigh_objects(self, weighed_obj_list, weight_properties): """Weigh multiple objects. Override in a subclass if you need access to all objects in order to calculate weighers. Do not modify the weight of an object here, just return a list of weighers. """ # Calculate the weighers weights = [] for obj in weighed_obj_list: weight = self._weigh_object(obj.obj, weight_properties) # Record the min and max values if they are None. If they anything # but none we assume that the weigher has set them if self.minval is None: self.minval = weight if self.maxval is None: self.maxval = weight if weight < self.minval: self.minval = weight elif weight > self.maxval: self.maxval = weight weights.append(weight) return weights class BaseWeightHandler(base_handler.BaseHandler): object_class = WeighedObject def get_weighed_objects(self, weigher_classes, obj_list, weighing_properties): """Return a sorted (descending), normalized list of WeighedObjects.""" if not obj_list: return [] weighed_objs = [self.object_class(obj, 0.0) for obj in obj_list] for weigher_cls in weigher_classes: weigher = weigher_cls() weights = weigher.weigh_objects(weighed_objs, weighing_properties) # Normalize the weighers weights = normalize(weights, minval=weigher.minval, maxval=weigher.maxval) for i, weight in enumerate(weights): obj = weighed_objs[i] obj.weight += weigher.weight_multiplier() * weight return sorted(weighed_objs, key=lambda x: x.weight, reverse=True) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/scheduler/weighers/base_host.py0000664000175000017500000000241200000000000022432 0ustar00zuulzuul00000000000000# Copyright (c) 2011 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Scheduler host weighers """ from manila.scheduler.weighers import base class WeighedHost(base.WeighedObject): def to_dict(self): return { 'weight': self.weight, 'host': self.obj.host, } def __repr__(self): return ("WeighedHost [host: %s, weight: %s]" % (self.obj.host, self.weight)) class BaseHostWeigher(base.BaseWeigher): """Base class for host weighers.""" pass class HostWeightHandler(base.BaseWeightHandler): object_class = WeighedHost def __init__(self, namespace): super(HostWeightHandler, self).__init__(BaseHostWeigher, namespace) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/scheduler/weighers/capacity.py0000664000175000017500000001103000000000000022254 0ustar00zuulzuul00000000000000# Copyright (c) 2012 OpenStack, LLC. # Copyright (c) 2015 EMC Corporation # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Capacity Weigher. Weigh hosts by their virtual or actual free capacity. For thin provisioning, weigh hosts by their virtual free capacity calculated by the total capacity multiplied by the max over subscription ratio and subtracting the provisioned capacity; Otherwise, weigh hosts by their actual free capacity, taking into account the reserved space. The default is to spread shares across all hosts evenly. If you prefer stacking, you can set the 'capacity_weight_multiplier' option to a negative number and the weighing has the opposite effect of the default. """ import math from oslo_config import cfg from manila.scheduler import utils from manila.scheduler.weighers import base_host capacity_weight_opts = [ cfg.FloatOpt('capacity_weight_multiplier', default=1.0, help='Multiplier used for weighing share capacity. ' 'Negative numbers mean to stack vs spread.'), ] CONF = cfg.CONF CONF.register_opts(capacity_weight_opts) class CapacityWeigher(base_host.BaseHostWeigher): def weight_multiplier(self): """Override the weight multiplier.""" return CONF.capacity_weight_multiplier def _weigh_object(self, host_state, weight_properties): """Higher weighers win. We want spreading to be the default.""" if weight_properties.get('snapshot_id'): reserved = float(host_state.reserved_snapshot_percentage) / 100 elif weight_properties.get('is_share_extend'): reserved = float(host_state.reserved_share_extend_percentage) / 100 else: reserved = float(host_state.reserved_percentage) / 100 free_space = host_state.free_capacity_gb total_space = host_state.total_capacity_gb if 'unknown' in (total_space, free_space): # NOTE(u_glide): "unknown" capacity always sorts to the bottom if CONF.capacity_weight_multiplier > 0: free = float('-inf') else: free = float('inf') else: total = float(total_space) share_type = weight_properties.get('share_type', {}) use_thin_logic = utils.use_thin_logic(share_type) thin_provisioning = utils.thin_provisioning( host_state.thin_provisioning) if use_thin_logic and thin_provisioning: # NOTE(xyang): Calculate virtual free capacity for thin # provisioning. free = math.floor( total * host_state.max_over_subscription_ratio - host_state.provisioned_capacity_gb - total * reserved) else: # NOTE(xyang): Calculate how much free space is left after # taking into account the reserved space. free = math.floor(free_space - total * reserved) return free def weigh_objects(self, weighed_obj_list, weight_properties): weights = super(CapacityWeigher, self).weigh_objects(weighed_obj_list, weight_properties) # NOTE(u_glide): Replace -inf with (minimum - 1) and # inf with (maximum + 1) to avoid errors in # manila.scheduler.weighers.base.normalize() method if self.minval == float('-inf'): self.minval = self.maxval for val in weights: if float('-inf') < val < self.minval: self.minval = val self.minval -= 1 return [self.minval if w == float('-inf') else w for w in weights] elif self.maxval == float('inf'): self.maxval = self.minval for val in weights: if self.maxval < val < float('inf'): self.maxval = val self.maxval += 1 return [self.maxval if w == float('inf') else w for w in weights] else: return weights ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/scheduler/weighers/goodness.py0000664000175000017500000001040200000000000022302 0ustar00zuulzuul00000000000000# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from manila.scheduler.evaluator import evaluator from manila.scheduler import utils from manila.scheduler.weighers import base_host LOG = logging.getLogger(__name__) class GoodnessWeigher(base_host.BaseHostWeigher): """Goodness Weigher. Assign weights based on a host's goodness function. Goodness rating is the following: .. code-block:: none 0 -- host is a poor choice . . 50 -- host is a good choice . . 100 -- host is a perfect choice """ def _weigh_object(self, host_state, weight_properties): """Determine host's goodness rating based on a goodness_function.""" stats = self._generate_stats(host_state, weight_properties) LOG.debug("Checking host '%s'", stats['host_stats']['host']) result = self._check_goodness_function(stats) LOG.debug("Goodness: %s", result) LOG.debug("Done checking host '%s'", stats['host_stats']['host']) return result def _check_goodness_function(self, stats): """Gets a host's goodness rating based on its goodness function.""" goodness_rating = 0 if stats['goodness_function'] is None: LOG.warning("Goodness function not set :: defaulting to " "minimal goodness rating of 0.") else: try: goodness_result = self._run_evaluator( stats['goodness_function'], stats) except Exception as ex: LOG.warning("Error in goodness_function function " "'%(function)s' : '%(error)s' :: Defaulting " "to a goodness of 0.", {'function': stats['goodness_function'], 'error': ex, }) return goodness_rating if type(goodness_result) is bool: if goodness_result: goodness_rating = 100 elif goodness_result < 0 or goodness_result > 100: LOG.warning("Invalid goodness result. Result must be " "between 0 and 100. Result generated: '%s' " ":: Defaulting to a goodness of 0.", goodness_result) else: goodness_rating = goodness_result msg = "Goodness function result for host %(host)s: %(result)s." args = {'host': stats['host_stats']['host'], 'result': str(goodness_rating)} LOG.info(msg, args) return goodness_rating def _run_evaluator(self, func, stats): """Evaluates a given function using the provided available stats.""" host_stats = stats['host_stats'] host_caps = stats['host_caps'] extra_specs = stats['extra_specs'] share_stats = stats['share_stats'] result = evaluator.evaluate( func, extra=extra_specs, stats=host_stats, capabilities=host_caps, share=share_stats) return result def _generate_stats(self, host_state, weight_properties): """Generates statistics from host and share data.""" goodness_function = None if ('goodness_function' in host_state.capabilities and host_state.capabilities['goodness_function'] is not None): goodness_function = str( host_state.capabilities['goodness_function']) stats = utils.generate_stats(host_state, weight_properties) stats['goodness_function'] = goodness_function return stats ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/scheduler/weighers/host_affinity.py0000664000175000017500000000560600000000000023341 0ustar00zuulzuul00000000000000# Copyright 2019 NetApp, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from manila import context from manila.db import api as db_api from manila.scheduler.weighers import base_host from manila.share import utils as share_utils class HostAffinityWeigher(base_host.BaseHostWeigher): def _weigh_object(self, obj, weight_properties): """Weigh hosts based on their proximity to the source's share pool. If no snapshot_id was provided will return 0, otherwise, if source and destination hosts are located on: 1. same back ends and pools: host is a perfect choice (100) 2. same back ends and different pools: host is a very good choice (75) 3. different back ends with the same AZ: host is a good choice (50) 4. different back ends and AZs: host isn't so good choice (25) """ ctx = context.get_admin_context() request_spec = weight_properties.get('request_spec') snapshot_id = request_spec.get('snapshot_id') snapshot_host = request_spec.get('snapshot_host') if None in [snapshot_id, snapshot_host]: # NOTE(silvacarlose): if the request does not contain a snapshot_id # or a snapshot_host, the user is not creating a share from a # snapshot and we don't need to weigh the host. return 0 snapshot_ref = db_api.share_snapshot_get(ctx, snapshot_id) # Source host info: pool, backend and availability zone src_pool = share_utils.extract_host(snapshot_host, 'pool') src_backend = share_utils.extract_host( request_spec.get('snapshot_host'), 'backend') src_az = snapshot_ref['share']['availability_zone'] # Destination host info: pool, backend and availability zone dst_pool = share_utils.extract_host(obj.host, 'pool') dst_backend = share_utils.extract_host(obj.host, 'backend') # NOTE(dviroel): All hosts were already filtered by the availability # zone parameter. dst_az = None if weight_properties['availability_zone_id']: dst_az = db_api.availability_zone_get( ctx, weight_properties['availability_zone_id']).name if src_backend == dst_backend: return 100 if (src_pool and src_pool == dst_pool) else 75 else: return 50 if (src_az and src_az == dst_az) else 25 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/scheduler/weighers/netapp_aiq.py0000664000175000017500000003422600000000000022614 0ustar00zuulzuul00000000000000# Copyright 2023 NetApp, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_log import log as logging from oslo_serialization import jsonutils import requests from requests.adapters import HTTPAdapter from requests import auth from urllib3.util import retry from manila import exception from manila.scheduler.weighers import base_host ACTIVE_IQ_WEIGHER_GROUP = 'netapp_active_iq' active_iq_weight_opts = [ cfg.HostAddressOpt('aiq_hostname', help='The hostname (or IP address) for the Active IQ.'), cfg.PortOpt('aiq_port', help=('The TCP port to use for communication with the Active ' 'IQ. If not specified, the weigher driver will use 80 ' 'for HTTP and 443 for HTTPS.')), cfg.StrOpt('aiq_transport_type', default='https', choices=['http', 'https'], help=('The transport protocol used when communicating with ' 'the Active IQ. Valid values are ' 'http or https.')), cfg.BoolOpt('aiq_ssl_verify', default=False, help='Verifying the SSL certificate. Default is False.'), cfg.StrOpt('aiq_ssl_cert_path', help=("The path to a CA_BUNDLE file or directory with " "certificates of trusted CA. If set to a directory, it " "must have been processed using the c_rehash utility " "supplied with OpenSSL. If not informed, it will use the " "Mozilla's carefully curated collection of Root " "Certificates for validating the trustworthiness of SSL " "certificates.")), cfg.StrOpt('aiq_username', help=('Administrative user account name used to access the ' 'Active IQ.')), cfg.StrOpt('aiq_password', help=('Password for the administrative user account ' 'specified in the aiq_username option.'), secret=True), cfg.IntOpt('aiq_eval_method', default=0, help='Integer indicator of which evaluation method, defaults ' 'to 0 (0 - by index, 1 - normalized value, 2 - by ' 'literal value).'), cfg.ListOpt('aiq_priority_order', default=[ 'ops', 'latency', 'volume_count', 'size' ], help='Permutation of the list ["volume_count", "size", ' '"latency", “ops”]. Note that for volume_count and ' 'latency, the higher the values, the less optimal the ' 'resources. For capacity and ops, the higher the value ' 'the more desirable the resources. If metrics are to be ' 'considered with equal weights, concatenate the strings, ' 'separated by ":".' 'An example is ["volume_count", "size", “latency:ops”] ' 'if latency and ops want to have equal but minimum ' 'weights, or ["volume_count:size", "latency", “ops”] ' 'if volume_count and size have equal maximum weights. ' 'If not provided, the default order is ' '["volume_count", "size", "latency", “ops”].'), ] CONF = cfg.CONF CONF.register_opts(active_iq_weight_opts, ACTIVE_IQ_WEIGHER_GROUP) LOG = logging.getLogger(__name__) class NetAppAIQWeigher(base_host.BaseHostWeigher): """AIQ Weigher. Assign weights based on NetApp Active IQ tool.""" def __init__(self, *args, **kwargs): super(NetAppAIQWeigher, self).__init__(*args, **kwargs) self.configuration = CONF[ACTIVE_IQ_WEIGHER_GROUP] self.host = self.configuration.aiq_hostname if not self.host: raise exception.NetappActiveIQWeigherRequiredParameter( config="aiq_hostname") self.username = self.configuration.aiq_username if not self.username: raise exception.NetappActiveIQWeigherRequiredParameter( config="aiq_username") self.password = self.configuration.aiq_password if not self.password: raise exception.NetappActiveIQWeigherRequiredParameter( config="aiq_password") self.protocol = self.configuration.aiq_transport_type self.port = self.configuration.aiq_port if not self.port: self.port = "80" if self.protocol == "http" else "443" self.ssl_verify = self.configuration.aiq_ssl_verify if self.ssl_verify and self.configuration.aiq_ssl_cert_path: self.ssl_verify = self.configuration.aiq_ssl_cert_path self.eval_method = self.configuration.aiq_eval_method self.priority_order = self.configuration.aiq_priority_order def _weigh_object(self, host_state, weight_properties): """Weight for a specific object from parent abstract class""" # NOTE(felipe_rodrigues): this abstract class method is not called for # the AIQ weigher, since it does not weigh one single object. raise NotImplementedError() def _weigh_active_iq(self, netapp_aggregates_location, weight_properties): """Determine host's rating based on a Active IQ.""" size = weight_properties.get('size') share_type = weight_properties.get('share_type', {}) performance_level_name = share_type.get('extra_specs', {}).get( 'netapp:performance_service_level_name') # retrieves the performance service level key if a PSL name is given. performance_level_id = None if performance_level_name: performance_level_id = self._get_performance_level_id( performance_level_name) if not performance_level_id: return [] # retrieves the equivalent active IQ keys of the pools. resource_keys = self._get_resource_keys(netapp_aggregates_location) if len(resource_keys) == 0: return [] result = self._balance_aggregates(resource_keys, size, performance_level_id) return result def _get_url(self): """Get the base URL for REST requests.""" host = self.host if ':' in host: host = '[%s]' % host return f'{self.protocol}://{host}:{self.port}/api/' def _get_request_method(self, method, session): """Returns the request method to be used in the REST call.""" request_methods = { 'post': session.post, 'get': session.get, 'put': session.put, 'delete': session.delete, 'patch': session.patch, } return request_methods[method] def _get_session_method(self, method): """Get the REST method from the session.""" # NOTE(felipe_rodrigues): request resilient of temporary network # failures (like name resolution failure), retrying until 5 times. _session = requests.Session() max_retries = retry.Retry(total=5, connect=5, read=2, backoff_factor=1) adapter = HTTPAdapter(max_retries=max_retries) _session.mount('%s://' % self.protocol, adapter) _session.auth = auth.HTTPBasicAuth(self.username, self.password) _session.verify = self.ssl_verify _session.headers = {} return self._get_request_method(method, _session) def _call_active_iq(self, action_path, method, body=None): """Call the Active IQ REST API.""" rest_method = self._get_session_method(method) url = self._get_url() + action_path msg_args = { "method": method.upper(), "url": url, "body": body, } LOG.debug("REQUEST: %(method)s %(url)s Body=%(body)s", msg_args) response = rest_method(url, json=body) code = response.status_code response_body = response.content msg_args = { "code": code, "body": response_body, } LOG.debug("RESPONSE: %(code)s Body=%(body)s", msg_args) return code, response_body def _get_performance_level_id(self, performance_level_name): """Gets the ID of a performance level name.""" psl_endpoint = (f'storage-provider/performance-service-levels?' f'name={performance_level_name}') try: code, res = self._call_active_iq(psl_endpoint, "get") except Exception as e: LOG.error("Could not retrieve the key of the performance service " "level named as '%(psl)s'. Skipping the weigher. " "Error: %(error)s", {'psl': performance_level_name, 'error': e}) LOG.error(e) return None if code != 200: LOG.error("Could not retrieve the key of the performance service " "level named as '%(psl)s'. Skipping the weigher.", {'psl': performance_level_name}) return None res = jsonutils.loads(res) if res else {} psl_list = res.get('records', []) if len(psl_list) == 0: LOG.error("Could not found any performance service level named " "as '%s'. Skipping the weigher.", performance_level_name) return None return psl_list[0].get("key", None) def _get_aggregate_identifier(self, aggr_name, cluster_name): """Returns the string identifier of an aggregate on a cluster.""" return f'{aggr_name}:{cluster_name}' def _get_resource_keys(self, netapp_aggregates_location): """Map the aggregates names to the AIQ resource keys.""" aggregate_endpoint = 'datacenter/storage/aggregates' try: code, res = self._call_active_iq(aggregate_endpoint, "get") except Exception as e: LOG.error("Could not retrieve the aggregates resource keys. " "Skipping the weigher. Error: %s", e) LOG.error(e) return [] if code != 200: LOG.error("Could not retrieve the aggregates resource keys. " "Skipping the weigher.") return [] res = jsonutils.loads(res) if res else {} aggr_map = {} for aggr in res.get('records', []): identifier = self._get_aggregate_identifier( aggr["name"], aggr["cluster"]["name"]) aggr_map[identifier] = aggr["key"] # we must keep the lists with the same order. resource_keys = [] found_pool_keys = [] for identifier in netapp_aggregates_location: if identifier in aggr_map: found_pool_keys.append(identifier) # If a pool could not be found, it is marked as resource key 0. resource_keys.append(aggr_map.get(identifier, 0)) LOG.debug("The following pools will be evaluated by Active IQ: %s", found_pool_keys) return resource_keys def _balance_aggregates(self, resource_keys, size, performance_level_uuid): """Call AIQ to generate the weights of each aggregate.""" balance_endpoint = 'storage-provider/data-placement/balance' body = { "capacity": f'{size}GB', "eval_method": self.eval_method, # NOTE(felipe_rodrigues): from Active IQ documentation, the # opt_method only works as 0. "opt_method": 0, "priority_order": self.priority_order, "separate_flag": False, # NOTE(felipe_rodrigues): remove the keys marked with 0, since they # are not found the pool keys. "resource_keys": [key for key in resource_keys if key != 0], } if performance_level_uuid: body["ssl_key"] = performance_level_uuid try: code, res = self._call_active_iq( balance_endpoint, "post", body=body) except Exception as e: LOG.error("Could not balance the aggregates. Skipping the " "weigher. Error: %s", e) LOG.error(e) return [] if code != 200: LOG.error("Could not balance the aggregates. Skipping the " "weigher.") return [] res = jsonutils.loads(res) if res else [] weight_map = {} for aggr in res: weight_map[aggr["key"]] = aggr["scores"]["total_weighted_score"] # it must keep the lists with the same order. weights = [] for key in resource_keys: weights.append(weight_map.get(key, 0.0)) return weights def weigh_objects(self, weighed_obj_list, weight_properties): """Weigh multiple objects using Active IQ.""" netapp_aggregates_location = [] for obj in weighed_obj_list: # if at least one host is not from NetApp, the entire weigher is # skipped. if obj.obj.vendor_name != "NetApp": LOG.debug( "Skipping Active IQ weigher given that some backends " "are not from NetApp.") return [] else: cluster_name = obj.obj.capabilities.get("netapp_cluster_name") aggr_name = obj.obj.pool_name netapp_aggregates_location.append( self._get_aggregate_identifier(aggr_name, cluster_name)) result = self._weigh_active_iq( netapp_aggregates_location, weight_properties) LOG.debug("Active IQ weight result: %s", result) return result ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/scheduler/weighers/pool.py0000664000175000017500000000364300000000000021443 0ustar00zuulzuul00000000000000# Copyright 2015 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from manila import context from manila.db import api as db_api from manila.scheduler.weighers import base_host from manila.share import utils pool_weight_opts = [ cfg.FloatOpt('pool_weight_multiplier', default=1.0, help='Multiplier used for weighing pools which have ' 'existing share servers. Negative numbers mean to spread' ' vs stack.'), ] CONF = cfg.CONF CONF.register_opts(pool_weight_opts) class PoolWeigher(base_host.BaseHostWeigher): def weight_multiplier(self): """Override the weight multiplier.""" return CONF.pool_weight_multiplier def _weigh_object(self, host_state, weight_properties): """Pools with existing share server win.""" pool_mapping = weight_properties.get('server_pools_mapping', {}) if not pool_mapping: return 0 ctx = context.get_admin_context() host = utils.extract_host(host_state.host, 'backend') servers = db_api.share_server_get_all_by_host(ctx, host) pool = utils.extract_host(host_state.host, 'pool') for server in servers: if any(pool == p['pool_name'] for p in pool_mapping.get( server['id'], [])): return 1 return 0 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/service.py0000664000175000017500000004042500000000000016336 0ustar00zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright 2011 Justin Santa Barbara # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Generic Node base class for all workers that run on hosts.""" import inspect import os import random from oslo_config import cfg from oslo_log import log import oslo_messaging as messaging from oslo_service import service from oslo_service import wsgi from oslo_utils import importutils from manila import context from manila import coordination from manila import db from manila import exception from manila import rpc from manila import utils from manila import version osprofiler_initializer = importutils.try_import('osprofiler.initializer') profiler = importutils.try_import('osprofiler.profiler') profiler_opts = importutils.try_import('osprofiler.opts') LOG = log.getLogger(__name__) service_opts = [ cfg.IntOpt('report_interval', default=10, help='Seconds between nodes reporting state to datastore.'), cfg.IntOpt('cleanup_interval', min=300, default=1800, help='Seconds between cleaning up the stopped nodes.'), cfg.IntOpt('periodic_interval', default=60, help='Seconds between running periodic tasks.'), cfg.IntOpt('periodic_fuzzy_delay', default=60, help='Range of seconds to randomly delay when starting the ' 'periodic task scheduler to reduce stampeding. ' '(Disable by setting to 0)'), cfg.HostAddressOpt('osapi_share_listen', default="::", help='IP address for OpenStack Share API to listen ' 'on.'), cfg.PortOpt('osapi_share_listen_port', default=8786, help='Port for OpenStack Share API to listen on.'), cfg.IntOpt('osapi_share_workers', default=1, help='Number of workers for OpenStack Share API service.'), cfg.BoolOpt('osapi_share_use_ssl', default=False, help='Wraps the socket in a SSL context if True is set. ' 'A certificate file and key file must be specified.'), ] CONF = cfg.CONF CONF.register_opts(service_opts) if profiler_opts: profiler_opts.set_defaults(CONF) def setup_profiler(binary, host): if (osprofiler_initializer is None or profiler is None or profiler_opts is None): LOG.debug('osprofiler is not present') return if CONF.profiler.enabled: osprofiler_initializer.init_from_conf( conf=CONF, context=context.get_admin_context().to_dict(), project="manila", service=binary, host=host ) LOG.warning("OSProfiler is enabled.") class Service(service.Service): """Service object for binaries running on hosts. A service takes a manager and enables rpc by listening to queues based on topic. It also periodically runs tasks on the manager and reports it state to the database services table. """ def __init__(self, host, binary, topic, manager, report_interval=None, periodic_interval=None, periodic_fuzzy_delay=None, service_name=None, coordination=False, *args, **kwargs): super(Service, self).__init__() if not rpc.initialized(): rpc.init(CONF) self.host = host self.binary = binary self.topic = topic self.manager_class_name = manager manager_class = importutils.import_class(self.manager_class_name) if CONF.profiler.enabled and profiler is not None: manager_class = profiler.trace_cls("rpc")(manager_class) self.service = None self.manager = manager_class(host=self.host, service_name=service_name, *args, **kwargs) self.availability_zone = self.manager.availability_zone self.report_interval = report_interval self.cleanup_interval = CONF.cleanup_interval self.periodic_interval = periodic_interval self.periodic_fuzzy_delay = periodic_fuzzy_delay self.saved_args, self.saved_kwargs = args, kwargs self.coordinator = coordination self.rpcserver = None def start(self): version_string = version.version_string() LOG.info('Starting %(topic)s node (version %(version_string)s)', {'topic': self.topic, 'version_string': version_string}) self.model_disconnected = False ctxt = context.get_admin_context() setup_profiler(self.binary, self.host) try: service_ref = db.service_get_by_args(ctxt, self.host, self.binary) self.service_id = service_ref['id'] db.service_update(ctxt, self.service_id, {'state': 'down'}) except exception.NotFound: self._create_service_ref(ctxt) self.manager.init_host(service_id=self.service_id) if self.coordinator: coordination.LOCK_COORDINATOR.start() LOG.debug("Creating RPC server for service %s.", self.topic) target = messaging.Target(topic=self.topic, server=self.host) endpoints = [self.manager] endpoints.extend(self.manager.additional_endpoints) self.rpcserver = rpc.get_server(target, endpoints) self.rpcserver.start() self.manager.init_host_with_rpc() if self.report_interval: self.tg.add_timer(self.report_interval, self.report_state, initial_delay=self.report_interval) self.tg.add_timer(self.cleanup_interval, self.cleanup_services, initial_delay=self.cleanup_interval) if self.periodic_interval: if self.periodic_fuzzy_delay: initial_delay = random.randint(0, self.periodic_fuzzy_delay) else: initial_delay = None self.tg.add_timer(self.periodic_interval, self.periodic_tasks, initial_delay=initial_delay) def _create_service_ref(self, context): service_args = { 'host': self.host, 'binary': self.binary, 'topic': self.topic, 'state': 'up', 'report_count': 0, 'availability_zone': self.availability_zone } service_ref = db.service_create(context, service_args) self.service_id = service_ref['id'] def __getattr__(self, key): manager = self.__dict__.get('manager', None) return getattr(manager, key) @classmethod def create(cls, host=None, binary=None, topic=None, manager=None, report_interval=None, periodic_interval=None, periodic_fuzzy_delay=None, service_name=None, coordination=False): """Instantiates class and passes back application object. :param host: defaults to CONF.host :param binary: defaults to basename of executable :param topic: defaults to bin_name - 'manila-' part :param manager: defaults to CONF._manager :param report_interval: defaults to CONF.report_interval :param periodic_interval: defaults to CONF.periodic_interval :param periodic_fuzzy_delay: defaults to CONF.periodic_fuzzy_delay """ if not host: host = CONF.host if not binary: binary = os.path.basename(inspect.stack()[-1][1]) if not topic: topic = binary if not manager: subtopic = topic.rpartition('manila-')[2] manager = CONF.get('%s_manager' % subtopic, None) if report_interval is None: report_interval = CONF.report_interval if periodic_interval is None: periodic_interval = CONF.periodic_interval if periodic_fuzzy_delay is None: periodic_fuzzy_delay = CONF.periodic_fuzzy_delay service_obj = cls(host, binary, topic, manager, report_interval=report_interval, periodic_interval=periodic_interval, periodic_fuzzy_delay=periodic_fuzzy_delay, service_name=service_name, coordination=coordination) return service_obj def kill(self): """Destroy the service object in the datastore.""" self.stop() try: db.service_destroy(context.get_admin_context(), self.service_id) except exception.NotFound: LOG.warning('Service killed that has no database entry.') def stop(self): # Try to shut the connection down, but if we get any sort of # errors, go ahead and ignore them.. as we're shutting down anyway try: self.rpcserver.stop() except Exception: pass try: db.service_update(context.get_admin_context(), self.service_id, {'state': 'stopped'}) except exception.NotFound: LOG.warning('Service stopped that has no database entry.') if self.coordinator: try: coordination.LOCK_COORDINATOR.stop() except Exception: LOG.exception("Unable to stop the Tooz Locking " "Coordinator.") super(Service, self).stop(graceful=True) def wait(self): if self.rpcserver: self.rpcserver.wait() super(Service, self).wait() def periodic_tasks(self, raise_on_error=False): """Tasks to be run at a periodic interval.""" ctxt = context.get_admin_context() self.manager.periodic_tasks(ctxt, raise_on_error=raise_on_error) def report_state(self): """Update the state of this service in the datastore.""" if not self.manager.is_service_ready(): # NOTE(haixin): If the service is still initializing or failed to # intialize. LOG.error('Manager for service %s is not ready yet, skipping state' ' update routine. Service will appear "down".', self.binary) return ctxt = context.get_admin_context() state_catalog = {} try: try: service_ref = db.service_get(ctxt, self.service_id) except exception.NotFound: LOG.debug('The service database object disappeared, ' 'Recreating it.') self._create_service_ref(ctxt) service_ref = db.service_get(ctxt, self.service_id) state_catalog['report_count'] = service_ref['report_count'] + 1 if (self.availability_zone != service_ref['availability_zone']['name']): state_catalog['availability_zone'] = self.availability_zone if utils.service_is_up(service_ref): state_catalog['state'] = 'up' else: if service_ref['state'] != 'stopped': state_catalog['state'] = 'down' db.service_update(ctxt, self.service_id, state_catalog) # TODO(termie): make this pattern be more elegant. if getattr(self, 'model_disconnected', False): self.model_disconnected = False LOG.error('Recovered model server connection!') # TODO(vish): this should probably only catch connection errors except Exception: # pylint: disable=W0702 if not getattr(self, 'model_disconnected', False): self.model_disconnected = True LOG.exception('model server went away') def cleanup_services(self): """Remove the stopped services of same topic from the datastore.""" ctxt = context.get_admin_context() try: services = db.service_get_all_by_topic(ctxt, self.topic) except exception.NotFound: LOG.debug('The service database object disappeared,' 'Exiting from cleanup.') return for svc in services: if (svc['state'] == 'stopped' and not utils.service_is_up(svc)): db.service_destroy(ctxt, svc['id']) class WSGIService(service.ServiceBase): """Provides ability to launch API from a 'paste' configuration.""" def __init__(self, name, loader=None): """Initialize, but do not start the WSGI server. :param name: The name of the WSGI server given to the loader. :param loader: Loads the WSGI application using the given name. :returns: None """ self.name = name self.manager = self._get_manager() self.loader = loader or wsgi.Loader(CONF) if not rpc.initialized(): rpc.init(CONF) self.app = self.loader.load_app(name) self.host = getattr(CONF, '%s_listen' % name, "0.0.0.0") # nosec B104 self.port = getattr(CONF, '%s_listen_port' % name, 0) self.workers = getattr(CONF, '%s_workers' % name, None) self.use_ssl = getattr(CONF, '%s_use_ssl' % name, False) if self.workers is not None and self.workers < 1: LOG.warning( "Value of config option %(name)s_workers must be integer " "greater than 1. Input value ignored.", {'name': name}) # Reset workers to default self.workers = None self.server = wsgi.Server( CONF, name, self.app, host=self.host, port=self.port, use_ssl=self.use_ssl ) def _get_manager(self): """Initialize a Manager object appropriate for this service. Use the service name to look up a Manager subclass from the configuration and initialize an instance. If no class name is configured, just return None. :returns: a Manager instance, or None. """ fl = '%s_manager' % self.name if fl not in CONF: return None manager_class_name = CONF.get(fl, None) if not manager_class_name: return None manager_class = importutils.import_class(manager_class_name) return manager_class() def start(self): """Start serving this service using loaded configuration. Also, retrieve updated port number in case '0' was passed in, which indicates a random port should be used. :returns: None """ setup_profiler(self.name, self.host) if self.manager: self.manager.init_host() self.server.start() self.port = self.server.port def stop(self): """Stop serving this API. :returns: None """ self.server.stop() def wait(self): """Wait for the service to stop serving this API. :returns: None """ self.server.wait() def reset(self): """Reset server greenpool size to default. :returns: None """ self.server.reset() def process_launcher(): return service.ProcessLauncher(CONF, restart_method='mutate') # NOTE(vish): the global launcher is to maintain the existing # functionality of calling service.serve + # service.wait _launcher = None def serve(server, workers=None): global _launcher if _launcher: raise RuntimeError('serve() can only be called once') _launcher = service.launch(CONF, server, workers=workers, restart_method='mutate') def wait(): CONF.log_opt_values(LOG, log.DEBUG) try: _launcher.wait() except KeyboardInterrupt: _launcher.stop() rpc.cleanup() ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315601.893672 manila-21.0.0/manila/services/0000775000175000017500000000000000000000000016142 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/services/__init__.py0000664000175000017500000000000000000000000020241 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/services/api.py0000664000175000017500000000235500000000000017272 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from webob import exc from manila.db import base from manila.share import rpcapi as share_rpcapi CONF = cfg.CONF class API(base.Base): """API for handling service actions.""" def __init__(self): super(API, self).__init__() self.share_rpcapi = share_rpcapi.ShareAPI() def ensure_shares(self, context, service, host): """Start the ensure shares in a given host.""" if service['state'] != "up": raise exc.HTTPConflict( "The service must have its state set to 'up' prior to running " "ensure shares.") self.share_rpcapi.ensure_driver_resources(context, host) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315601.897672 manila-21.0.0/manila/share/0000775000175000017500000000000000000000000015421 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/__init__.py0000664000175000017500000000200300000000000017525 0ustar00zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # Importing full names to not pollute the namespace and cause possible # collisions with use of 'from manila.share import ' elsewhere. import oslo_utils.importutils as import_utils from manila.common import config CONF = config.CONF API = import_utils.import_class(CONF.share_api_class) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/access.py0000664000175000017500000006540200000000000017243 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import ipaddress from oslo_log import log from manila.common import constants from manila.i18n import _ from manila import utils LOG = log.getLogger(__name__) def locked_access_rules_operation(operation): """Lock decorator for access rules operations. Takes a named lock prior to executing the operation. The lock is named with the ID of the share instance to which the access rule belongs. Intended use: If an database operation to retrieve or update access rules uses this decorator, it will block actions on all access rules of the share instance until the named lock is free. This is used to avoid race conditions while performing access rules updates on a given share instance. """ def wrapped(*args, **kwargs): instance_id = kwargs.get('share_instance_id') @utils.synchronized( "locked_access_rules_operation_by_share_instance_%s" % instance_id, external=True) def locked_operation(*_args, **_kwargs): return operation(*_args, **_kwargs) return locked_operation(*args, **kwargs) return wrapped class ShareInstanceAccessDatabaseMixin(object): @locked_access_rules_operation def get_and_update_share_instance_access_rules_status( self, context, status=None, conditionally_change=None, share_instance_id=None): """Get and update the access_rules_status of a share instance. :param status: Set this parameter only if you want to omit the conditionally_change parameter; i.e, if you want to force a state change on the share instance regardless of the prior state. :param conditionally_change: Set this parameter to a dictionary of rule state transitions to be made. The key is the expected access_rules_status and the value is the state to transition the access_rules_status to. If the state is not as expected, no transition is performed. Default is {}, which means no state transitions will be made. :returns share_instance: if an update was made. """ if status is not None: updates = {'access_rules_status': status} elif conditionally_change: share_instance = self.db.share_instance_get( context, share_instance_id) access_rules_status = share_instance['access_rules_status'] try: updates = { 'access_rules_status': conditionally_change[access_rules_status], } except KeyError: updates = {} else: updates = {} if updates: share_instance = self.db.share_instance_update( context, share_instance_id, updates, with_share_data=True) return share_instance def update_share_instances_access_rules_status( self, context, status, share_instance_ids): """Update the access_rules_status of all share instances. .. note:: Before making this call, make sure that all share instances have their status set to a value that will block new operations to happen during this update. :param status: Force a state change on all share instances regardless of the prior state. :param share_instance_ids: List of share instance ids to have their access rules status updated. """ updates = {'access_rules_status': status} self.db.share_instance_status_update( context, share_instance_ids, updates) @locked_access_rules_operation def get_and_update_share_instance_access_rules(self, context, filters=None, updates=None, conditionally_change=None, share_instance_id=None): """Get and conditionally update all access rules of a share instance. :param updates: Set this parameter to a dictionary of key:value pairs corresponding to the keys in the ShareInstanceAccessMapping model. Include 'state' in this dictionary only if you want to omit the conditionally_change parameter; i.e, if you want to force a state change on all filtered rules regardless of the prior state. This parameter is always honored, regardless of whether conditionally_change allows for a state transition as desired. Example:: { 'access_key': 'bob007680048318f4239dfc1c192d5', 'access_level': 'ro', } :param conditionally_change: Set this parameter to a dictionary of rule state transitions to be made. The key is the expected state of the access rule the value is the state to transition the access rule to. If the state is not as expected, no transition is performed. Default is {}, which means no state transitions will be made. Example:: { 'queued_to_apply': 'applying', 'queued_to_deny': 'denying', } """ instance_rules = self.db.share_access_get_all_for_instance( context, share_instance_id, filters=filters) if instance_rules and (updates or conditionally_change): if not updates: updates = {} if not conditionally_change: conditionally_change = {} for rule in instance_rules: mapping_state = rule['state'] rule_updates = copy.deepcopy(updates) try: rule_updates['state'] = conditionally_change[mapping_state] except KeyError: pass if rule_updates: self.db.share_instance_access_update( context, rule['access_id'], share_instance_id, rule_updates) # Refresh the rules after the updates rules_to_get = { 'access_id': tuple([i['access_id'] for i in instance_rules]), } instance_rules = self.db.share_access_get_all_for_instance( context, share_instance_id, filters=rules_to_get) return instance_rules def get_share_instance_access_rules(self, context, filters=None, share_instance_id=None): return self.get_and_update_share_instance_access_rules( context, filters, None, None, share_instance_id) @locked_access_rules_operation def get_and_update_share_instance_access_rule(self, context, rule_id, updates=None, share_instance_id=None, conditionally_change=None): """Get and conditionally update a given share instance access rule. :param updates: Set this parameter to a dictionary of key:value pairs corresponding to the keys in the ShareInstanceAccessMapping model. Include 'state' in this dictionary only if you want to omit the conditionally_change parameter; i.e, if you want to force a state change regardless of the prior state. :param conditionally_change: Set this parameter to a dictionary of rule state transitions to be made. The key is the expected state of the access rule the value is the state to transition the access rule to. If the state is not as expected, no transition is performed. Default is {}, which means no state transitions will be made. Example:: { 'queued_to_apply': 'applying', 'queued_to_deny': 'denying', } """ instance_rule_mapping = self.db.share_instance_access_get( context, rule_id, share_instance_id) if not updates: updates = {} if conditionally_change: mapping_state = instance_rule_mapping['state'] try: updated_state = conditionally_change[mapping_state] updates.update({'state': updated_state}) except KeyError: msg = ("The state of the access rule %(rule_id)s (allowing " "access to share instance %(si)s) was not updated " "because its state was modified by another operation.") msg_payload = { 'si': share_instance_id, 'rule_id': rule_id, } LOG.debug(msg, msg_payload) if updates: self.db.share_instance_access_update( context, rule_id, share_instance_id, updates) # Refresh the rule after update instance_rule_mapping = self.db.share_instance_access_get( context, rule_id, share_instance_id) return instance_rule_mapping @locked_access_rules_operation def delete_share_instance_access_rules(self, context, access_rules, share_instance_id=None): for rule in access_rules: self.db.share_instance_access_delete(context, rule['id']) class ShareInstanceAccess(ShareInstanceAccessDatabaseMixin): def __init__(self, db, driver): self.db = db self.driver = driver def update_access_rules(self, context, share_instance_id, delete_all_rules=False, share_server=None): """Update access rules for a given share instance. :param context: request context :param share_instance_id: ID of the share instance :param delete_all_rules: set this parameter to True if all existing access rules must be denied for a given share instance :param share_server: Share server model or None """ share_instance = self.db.share_instance_get( context, share_instance_id, with_share_data=True) msg_payload = { 'si': share_instance_id, 'shr': share_instance['share_id'], } if delete_all_rules: updates = { 'state': constants.ACCESS_STATE_QUEUED_TO_DENY, } self.get_and_update_share_instance_access_rules( context, updates=updates, share_instance_id=share_instance_id) # Is there a sync in progress? If yes, ignore the incoming request. rule_filter = { 'state': (constants.ACCESS_STATE_APPLYING, constants.ACCESS_STATE_DENYING, constants.ACCESS_STATE_UPDATING), } syncing_rules = self.get_and_update_share_instance_access_rules( context, filters=rule_filter, share_instance_id=share_instance_id) if syncing_rules: msg = ("Access rules are being synced for share instance " "%(si)s belonging to share %(shr)s, any rule changes will " "be applied shortly.") LOG.debug(msg, msg_payload) else: rules_to_apply_or_update_or_deny = ( self._update_and_get_unsynced_access_rules_from_db( context, share_instance_id) ) if rules_to_apply_or_update_or_deny: msg = ("Updating access rules for share instance %(si)s " "belonging to share %(shr)s.") LOG.debug(msg, msg_payload) self._update_access_rules(context, share_instance_id, share_server=share_server) else: msg = ("All access rules have been synced for share instance " "%(si)s belonging to share %(shr)s.") LOG.debug(msg, msg_payload) def _update_access_rules(self, context, share_instance_id, share_server=None): # Refresh the share instance model share_instance = self.db.share_instance_get( context, share_instance_id, with_share_data=True) conditionally_change = { constants.STATUS_ACTIVE: constants.SHARE_INSTANCE_RULES_SYNCING, } share_instance = ( self.get_and_update_share_instance_access_rules_status( context, conditionally_change=conditionally_change, share_instance_id=share_instance_id) or share_instance ) rules_to_be_removed_from_db = [] # Populate rules to send to the driver (access_rules_on_share, add_rules, delete_rules, update_rules) = ( self._get_rules_to_send_to_driver(context, share_instance) ) if share_instance['cast_rules_to_readonly']: # Ensure read/only semantics for a migrating instances access_rules_on_share = self._set_rules_to_readonly( access_rules_on_share, share_instance) add_rules = [] rules_to_be_removed_from_db = delete_rules delete_rules = [] update_rules = [] try: driver_rule_updates = self._update_rules_through_share_driver( context, share_instance, access_rules_on_share, add_rules, delete_rules, update_rules, rules_to_be_removed_from_db, share_server) self.process_driver_rule_updates( context, driver_rule_updates, share_instance_id) # Update access rules that are still in 'applying/updating' state conditionally_change = { constants.ACCESS_STATE_APPLYING: constants.ACCESS_STATE_ACTIVE, constants.ACCESS_STATE_UPDATING: constants.ACCESS_STATE_ACTIVE, } self.get_and_update_share_instance_access_rules( context, share_instance_id=share_instance_id, conditionally_change=conditionally_change) except Exception: conditionally_change_rule_state = { constants.ACCESS_STATE_APPLYING: constants.ACCESS_STATE_ERROR, constants.ACCESS_STATE_DENYING: constants.ACCESS_STATE_ERROR, constants.ACCESS_STATE_UPDATING: constants.ACCESS_STATE_ERROR, } self.get_and_update_share_instance_access_rules( context, share_instance_id=share_instance_id, conditionally_change=conditionally_change_rule_state) conditionally_change_access_rules_status = { constants.ACCESS_STATE_ACTIVE: constants.STATUS_ERROR, constants.SHARE_INSTANCE_RULES_SYNCING: constants.STATUS_ERROR, } self.get_and_update_share_instance_access_rules_status( context, share_instance_id=share_instance_id, conditionally_change=conditionally_change_access_rules_status) raise if rules_to_be_removed_from_db: delete_rules = rules_to_be_removed_from_db self.delete_share_instance_access_rules( context, delete_rules, share_instance_id=share_instance['id']) self._loop_for_refresh_else_update_access_rules_status( context, share_instance_id, share_server) msg = _("Access rules were successfully modified for share instance " "%(si)s belonging to share %(shr)s.") msg_payload = { 'si': share_instance['id'], 'shr': share_instance['share_id'], } LOG.info(msg, msg_payload) def _update_rules_through_share_driver(self, context, share_instance, access_rules_to_be_on_share, add_rules, delete_rules, update_rules, rules_to_be_removed_from_db, share_server): driver_rule_updates = {} share_protocol = share_instance['share_proto'].lower() if (not self.driver.ipv6_implemented and share_protocol == 'nfs'): add_rules = self._filter_ipv6_rules(add_rules) delete_rules = self._filter_ipv6_rules(delete_rules) update_rules = self._filter_ipv6_rules(update_rules) access_rules_to_be_on_share = self._filter_ipv6_rules( access_rules_to_be_on_share) try: driver_rule_updates = self.driver.update_access( context, share_instance, access_rules_to_be_on_share, add_rules=add_rules, delete_rules=delete_rules, update_rules=update_rules, share_server=share_server ) or {} except NotImplementedError: # NOTE(u_glide): Fallback to legacy allow_access/deny_access # for drivers without update_access() method support # It is also possible that updating the access_level is not # permitted. self._update_access_fallback(context, add_rules, delete_rules, rules_to_be_removed_from_db, share_instance, share_server) return driver_rule_updates def _loop_for_refresh_else_update_access_rules_status(self, context, share_instance_id, share_server): # Do we need to re-sync or apply any new changes? if self._check_needs_refresh(context, share_instance_id): self._update_access_rules(context, share_instance_id, share_server=share_server) else: # Switch the share instance's access_rules_status to 'active' # if there are no more rules in 'error' state, else, ensure # 'error' state. rule_filter = {'state': constants.STATUS_ERROR} rules_in_error_state = ( self.get_and_update_share_instance_access_rules( context, filters=rule_filter, share_instance_id=share_instance_id) ) if not rules_in_error_state: conditionally_change = { constants.SHARE_INSTANCE_RULES_SYNCING: constants.STATUS_ACTIVE, constants.SHARE_INSTANCE_RULES_ERROR: constants.STATUS_ACTIVE, } self.get_and_update_share_instance_access_rules_status( context, conditionally_change=conditionally_change, share_instance_id=share_instance_id) else: conditionally_change = { constants.SHARE_INSTANCE_RULES_SYNCING: constants.SHARE_INSTANCE_RULES_ERROR, } self.get_and_update_share_instance_access_rules_status( context, conditionally_change=conditionally_change, share_instance_id=share_instance_id) def process_driver_rule_updates(self, context, driver_rule_updates, share_instance_id): for rule_id, rule_updates in driver_rule_updates.items(): if 'state' in rule_updates: # We allow updates *only* if the state is unchanged from # the time this update was initiated. It is possible # that the access rule was denied at the API prior to # the driver reporting that the access rule was added # successfully. state = rule_updates.pop('state') conditional_state_updates = { constants.ACCESS_STATE_APPLYING: state, constants.ACCESS_STATE_DENYING: state, constants.ACCESS_STATE_UPDATING: state, constants.ACCESS_STATE_ACTIVE: state, } else: conditional_state_updates = {} self.get_and_update_share_instance_access_rule( context, rule_id, updates=rule_updates, share_instance_id=share_instance_id, conditionally_change=conditional_state_updates) @staticmethod def _set_rules_to_readonly(access_rules_to_be_on_share, share_instance): LOG.debug("All access rules of share instance %s are being " "cast to read-only for a migration or because the " "instance is a readable replica.", share_instance['id']) for rule in access_rules_to_be_on_share: rule['access_level'] = constants.ACCESS_LEVEL_RO return access_rules_to_be_on_share @staticmethod def _filter_ipv6_rules(rules): filtered = [] for rule in rules: if rule['access_type'] == 'ip': ip_version = ipaddress.ip_network( str(rule['access_to'])).version if 6 == ip_version: continue filtered.append(rule) return filtered def _get_rules_to_send_to_driver(self, context, share_instance): add_rules = [] delete_rules = [] update_rules = [] access_filters = { 'state': (constants.ACCESS_STATE_APPLYING, constants.ACCESS_STATE_ACTIVE, constants.ACCESS_STATE_DENYING, constants.ACCESS_STATE_UPDATING), } existing_rules_in_db = self.get_and_update_share_instance_access_rules( context, filters=access_filters, share_instance_id=share_instance['id']) # Update queued rules to transitional states for rule in existing_rules_in_db: if rule['state'] == constants.ACCESS_STATE_APPLYING: add_rules.append(rule) elif rule['state'] == constants.ACCESS_STATE_DENYING: delete_rules.append(rule) elif rule['state'] == constants.ACCESS_STATE_UPDATING: update_rules.append(rule) delete_rule_ids = [r['id'] for r in delete_rules] access_rules_to_be_on_share = [ r for r in existing_rules_in_db if r['id'] not in delete_rule_ids ] return (access_rules_to_be_on_share, add_rules, delete_rules, update_rules) def _check_needs_refresh(self, context, share_instance_id): rules_to_apply_or_deny = ( self._update_and_get_unsynced_access_rules_from_db( context, share_instance_id) ) return any(rules_to_apply_or_deny) def _update_access_fallback(self, context, add_rules, delete_rules, remove_rules, share_instance, share_server): for rule in add_rules: LOG.info( "Applying access rule '%(rule)s' for share " "instance '%(instance)s'", {'rule': rule['id'], 'instance': share_instance['id']} ) self.driver.allow_access( context, share_instance, rule, share_server=share_server ) # NOTE(ganso): Fallback mode temporary compatibility workaround if remove_rules: delete_rules.extend(remove_rules) for rule in delete_rules: LOG.info( "Denying access rule '%(rule)s' from share " "instance '%(instance)s'", {'rule': rule['id'], 'instance': share_instance['id']} ) self.driver.deny_access( context, share_instance, rule, share_server=share_server ) def _update_and_get_unsynced_access_rules_from_db(self, context, share_instance_id): rule_filter = { 'state': (constants.ACCESS_STATE_QUEUED_TO_APPLY, constants.ACCESS_STATE_QUEUED_TO_DENY, constants.ACCESS_STATE_QUEUED_TO_UPDATE), } conditionally_change = { constants.ACCESS_STATE_QUEUED_TO_APPLY: constants.ACCESS_STATE_APPLYING, constants.ACCESS_STATE_QUEUED_TO_DENY: constants.ACCESS_STATE_DENYING, constants.ACCESS_STATE_QUEUED_TO_UPDATE: constants.ACCESS_STATE_UPDATING, } rules_to_apply_or_deny = ( self.get_and_update_share_instance_access_rules( context, filters=rule_filter, share_instance_id=share_instance_id, conditionally_change=conditionally_change) ) return rules_to_apply_or_deny def reset_rules_to_queueing_states(self, context, share_instance_id, reset_active=False): """Reset applying and denying rules to queued states. This helper is useful when re-applying rule changes. :param context: the RequestContext object :param share_instance_id: ID of the share instance :param reset_active: If True, set "active" rules to "queued_to_apply" """ conditional_updates = { constants.ACCESS_STATE_APPLYING: constants.ACCESS_STATE_QUEUED_TO_APPLY, constants.ACCESS_STATE_DENYING: constants.ACCESS_STATE_QUEUED_TO_DENY, } if reset_active: conditional_updates.update({ constants.STATUS_ACTIVE: constants.ACCESS_STATE_QUEUED_TO_APPLY, }) self.get_and_update_share_instance_access_rules_status( context, share_instance_id=share_instance_id, conditionally_change={ constants.SHARE_INSTANCE_RULES_ERROR: constants.SHARE_INSTANCE_RULES_SYNCING, constants.ACCESS_STATE_ACTIVE: constants.SHARE_INSTANCE_RULES_SYNCING, }, ) self.get_and_update_share_instance_access_rules( context, share_instance_id=share_instance_id, conditionally_change=conditional_updates) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/api.py0000664000175000017500000057615700000000000016571 0ustar00zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # Copyright (c) 2015 Tom Barron. All rights reserved. # Copyright (c) 2015 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Handles all requests relating to shares. """ import functools import json import re from oslo_config import cfg from oslo_log import log from oslo_utils import excutils from oslo_utils import strutils from oslo_utils import timeutils from oslo_utils import uuidutils from webob import exc from manila.api import common as api_common from manila.common import constants from manila import context as manila_context from manila import coordination from manila.data import rpcapi as data_rpcapi from manila.db import base from manila import exception from manila.i18n import _ from manila import policy from manila import quota from manila.scheduler import rpcapi as scheduler_rpcapi from manila.share import access from manila.share import rpcapi as share_rpcapi from manila.share import share_types from manila.share import utils as share_utils from manila import utils share_api_opts = [ cfg.BoolOpt('use_scheduler_creating_share_from_snapshot', default=False, help='If set to False, then share creation from snapshot will ' 'be performed on the same host. ' 'If set to True, then scheduler will be used.' 'When enabling this option make sure that filter ' 'CreateFromSnapshotFilter is enabled and to have hosts ' 'reporting replication_domain option.' ), cfg.StrOpt('default_mount_point_prefix', default='{project_id}_', help='Default prefix that will be used if none is provided' 'through share_type extra specs. Prefix will only be' 'used if share_type support mount_point_name.'), cfg.BoolOpt('is_deferred_deletion_enabled', default=False, help='Whether to delete shares and share snapshots in a ' 'deferred manner. Setting this option to True will cause ' 'quotas to be released immediately if a deletion request ' 'is accepted. Deletions may eventually fail, and ' 'rectifying them will require manual intervention.'), ] CONF = cfg.CONF CONF.register_opts(share_api_opts) LOG = log.getLogger(__name__) GB = 1048576 * 1024 QUOTAS = quota.QUOTAS AFFINITY_HINT = 'same_host' ANTI_AFFINITY_HINT = 'different_host' AFFINITY_KEY = "__affinity_same_host" ANTI_AFFINITY_KEY = "__affinity_different_host" def locked_security_service_update_operation(operation): """Lock decorator for security service operation. Takes a named lock prior to executing the operation. The lock is named with the ids of the security services. """ def wrapped(*args, **kwargs): new_id = kwargs.get('new_security_service_id', '') current_id = kwargs.get('current_security_service_id', '') @coordination.synchronized( 'locked-security-service-update-operation-%(new)s-%(curr)s' % { 'new': new_id, 'curr': current_id, }) def locked_security_service_operation(*_args, **_kwargs): return operation(*_args, **_kwargs) return locked_security_service_operation(*args, **kwargs) return wrapped def locked_share_server_update_allocations_operation(operation): """Lock decorator for share server update allocations operation. Takes a named lock prior to executing the operation. The lock is named with the ids of the share network and the region to be updated. """ def wrapped(*args, **kwargs): az_id = kwargs.get('availability_zone_id') share_net_id = kwargs.get('share_network_id') @coordination.synchronized( 'locked-share-server-update-allocations-operation-%(net)s-%(az)s' % { 'net': share_net_id, 'az': az_id, }) def locked_share_server_allocations_operation(*_args, **_kwargs): return operation(*_args, **_kwargs) return locked_share_server_allocations_operation(*args, **kwargs) return wrapped class API(base.Base): """API for interacting with the share manager.""" def __init__(self, db_driver=None): super(API, self).__init__(db_driver) self.scheduler_rpcapi = scheduler_rpcapi.SchedulerAPI() self.share_rpcapi = share_rpcapi.ShareAPI() self.access_helper = access.ShareInstanceAccess(self.db, None) coordination.LOCK_COORDINATOR.start() # pylint: disable = no-self-argument def prevent_locked_action_on_share(arg): """Decorator for preventing a locked method from executing on a share. Add this decorator to any API method which takes a RequestContext object as a first parameter and a share object as the second parameter. Can be used in any of the following forms @prevent_locked_action_on_share @prevent_locked_action_on_share('my_action_name') :param arg: Can either be the function being decorated or a str containing the 'action' that we need to check resource locks for. If no action name is provided, the function name is assumed to be the action name. """ action_name = None def check_for_locks(f): @functools.wraps(f) def wrapper(self, context, share, *args, **kwargs): action = action_name or f.__name__ resource_locks, __ = ( self.db.resource_lock_get_all( context.elevated(), filters={'resource_id': share['id'], 'resource_action': action, 'all_projects': True}, ) ) if resource_locks: msg_payload = { 'locks': ', '.join( [lock['id'] for lock in resource_locks] ), 'action': action, } msg = (f"Resource lock/s [{msg_payload['locks']}] " f"prevent {action} action.") raise exception.InvalidShare(msg) return f(self, context, share, *args, **kwargs) return wrapper if callable(arg): return check_for_locks(arg) else: action_name = arg return check_for_locks def _get_all_availability_zones_with_subnets(self, context, share_network_id): compatible_azs_name = [] compatible_azs_multiple = {} for az in self.db.availability_zone_get_all(context): subnets = ( self.db.share_network_subnets_get_all_by_availability_zone_id( context, share_network_id=share_network_id, availability_zone_id=az['id'])) if subnets: compatible_azs_multiple[az['id']] = len(subnets) > 1 compatible_azs_name.append(az['name']) return compatible_azs_name, compatible_azs_multiple @staticmethod def check_if_encryption_keys_quotas_exceeded(context, quota_exception): overs = quota_exception.kwargs['overs'] usages = quota_exception.kwargs['usages'] quotas = quota_exception.kwargs['quotas'] def _consumed(name): return (usages[name]['reserved'] + usages[name]['in_use']) if 'encryption_keys' in overs: LOG.warning("Encryption keys quota exceeded for %(s_pid)s " "(%(d_consumed)d of %(d_quota)d keys already " "consumed).", { 's_pid': context.project_id, 'd_consumed': _consumed('encryption_keys'), 'd_quota': quotas['encryption_keys']}) raise exception.EncryptionKeysLimitExceeded() @staticmethod def check_if_share_quotas_exceeded(context, quota_exception, share_size, operation='create'): overs = quota_exception.kwargs['overs'] usages = quota_exception.kwargs['usages'] quotas = quota_exception.kwargs['quotas'] def _consumed(name): return (usages[name]['reserved'] + usages[name]['in_use']) if 'gigabytes' in overs: LOG.warning("Quota exceeded for %(s_pid)s, " "tried to %(operation)s " "%(s_size)sG share (%(d_consumed)dG of " "%(d_quota)dG already consumed).", { 's_pid': context.project_id, 's_size': share_size, 'd_consumed': _consumed('gigabytes'), 'd_quota': quotas['gigabytes'], 'operation': operation}) raise exception.ShareSizeExceedsAvailableQuota() elif 'shares' in overs: LOG.warning("Quota exceeded for %(s_pid)s, " "tried to %(operation)s " "share (%(d_consumed)d shares " "already consumed).", { 's_pid': context.project_id, 'd_consumed': _consumed('shares'), 'operation': operation}) raise exception.ShareLimitExceeded(allowed=quotas['shares']) @staticmethod def check_if_replica_quotas_exceeded(context, quota_exception, replica_size, resource_type='share_replica'): overs = quota_exception.kwargs['overs'] usages = quota_exception.kwargs['usages'] quotas = quota_exception.kwargs['quotas'] def _consumed(name): return (usages[name]['reserved'] + usages[name]['in_use']) if 'share_replicas' in overs: LOG.warning("Quota exceeded for %(s_pid)s, " "unable to create share-replica (%(d_consumed)d " "of %(d_quota)d already consumed).", { 's_pid': context.project_id, 'd_consumed': _consumed('share_replicas'), 'd_quota': quotas['share_replicas']}) exception_kwargs = {} if resource_type != 'share_replica': msg = _("Failed while creating a share with replication " "support. Maximum number of allowed share-replicas " "is exceeded.") exception_kwargs['message'] = msg raise exception.ShareReplicasLimitExceeded(**exception_kwargs) elif 'replica_gigabytes' in overs: LOG.warning("Quota exceeded for %(s_pid)s, " "unable to create a share replica size of " "%(s_size)sG (%(d_consumed)dG of " "%(d_quota)dG already consumed).", { 's_pid': context.project_id, 's_size': replica_size, 'd_consumed': _consumed('replica_gigabytes'), 'd_quota': quotas['replica_gigabytes']}) exception_kwargs = {} if resource_type != 'share_replica': msg = _("Failed while creating a share with replication " "support. Requested share replica exceeds allowed " "project/user or share type gigabytes quota.") exception_kwargs['message'] = msg raise exception.ShareReplicaSizeExceedsAvailableQuota( **exception_kwargs) def create(self, context, share_proto, size, name, description, snapshot_id=None, availability_zone=None, metadata=None, share_network_id=None, share_type=None, is_public=False, share_group_id=None, share_group_snapshot_member=None, availability_zones=None, scheduler_hints=None, az_request_multiple_subnet_support_map=None, mount_point_name=None, encryption_key_ref=None): """Create new share.""" api_common.check_metadata_properties(metadata) if snapshot_id is not None: snapshot = self.get_snapshot(context, snapshot_id) if snapshot['aggregate_status'] != constants.STATUS_AVAILABLE: msg = _("status must be '%s'") % constants.STATUS_AVAILABLE raise exception.InvalidShareSnapshot(reason=msg) if not size: size = snapshot['size'] else: snapshot = None if not strutils.is_int_like(size) or int(size) <= 0: msg = (_("Share size '%s' must be an integer and greater than 0") % size) raise exception.InvalidInput(reason=msg) # make sure size has been convert to int. size = int(size) if snapshot and size < snapshot['size']: msg = (_("Share size '%s' must be equal or greater " "than snapshot size") % size) raise exception.InvalidInput(reason=msg) # ensure we pass the share_type provisioning filter on size share_types.provision_filter_on_size(context, share_type, size) if snapshot is None: share_type_id = share_type['id'] if share_type else None else: source_share = self.db.share_get(context, snapshot['share_id']) source_share_az = source_share['instance']['availability_zone'] if availability_zone is None: availability_zone = source_share_az elif (availability_zone != source_share_az and not CONF.use_scheduler_creating_share_from_snapshot): LOG.error("The specified availability zone must be the same " "as parent share when you have the configuration " "option 'use_scheduler_creating_share_from_snapshot'" " set to False.") msg = _("The specified availability zone must be the same " "as the parent share when creating from snapshot.") raise exception.InvalidInput(reason=msg) if share_type is None: # Grab the source share's share_type if no new share type # has been provided. share_type_id = source_share['instance']['share_type_id'] share_type = share_types.get_share_type(context, share_type_id) else: share_type_id = share_type['id'] if share_type_id != source_share['instance']['share_type_id']: msg = _("Invalid share type specified: the requested " "share type must match the type of the source " "share. If a share type is not specified when " "requesting a new share from a snapshot, the " "share type of the source share will be applied " "to the new share.") raise exception.InvalidInput(reason=msg) supported_share_protocols = ( proto.upper() for proto in CONF.enabled_share_protocols) if not (share_proto and share_proto.upper() in supported_share_protocols): msg = (_("Invalid share protocol provided: %(provided)s. " "It is either disabled or unsupported. Available " "protocols: %(supported)s") % dict( provided=share_proto, supported=CONF.enabled_share_protocols)) raise exception.InvalidInput(reason=msg) self.check_is_share_size_within_per_share_quota_limit(context, size) deltas = {'shares': 1, 'gigabytes': size} share_type_attributes = self.get_share_attributes_from_share_type( share_type) mount_point_name_support = share_type_attributes.get( constants.ExtraSpecs.MOUNT_POINT_NAME_SUPPORT, None) if mount_point_name is not None: if not mount_point_name_support: msg = _("Setting a mount point name is not supported" " by the share type used: %s." % share_type_id) raise exception.InvalidInput(reason=msg) mount_point_name = self._prefix_mount_point_name( share_type, context, mount_point_name ) share_type_supports_replication = share_type_attributes.get( 'replication_type', None) if share_type_supports_replication: deltas.update( {'share_replicas': 1, 'replica_gigabytes': size}) if encryption_key_ref: # Make sure encryption_key_ref is valid UUID if not uuidutils.is_uuid_like(encryption_key_ref): msg = _('Encryption key ref is not valid UUID') raise exception.InvalidInput(reason=msg) # Check if its new encryption_key_ref filters = { 'encryption_key_ref': encryption_key_ref, 'project_id': context.project_id, } is_existing_key = self.db.encryption_keys_get_all( context, filters=filters) if not is_existing_key: deltas.update({'encryption_keys': 1}) try: reservations = QUOTAS.reserve( context, share_type_id=share_type_id, **deltas) except exception.OverQuota as e: self.check_if_share_quotas_exceeded(context, e, size) self.check_if_encryption_keys_quotas_exceeded(context, e) if share_type_supports_replication: self.check_if_replica_quotas_exceeded(context, e, size, resource_type='share') share_group = None if share_group_id: try: share_group = self.db.share_group_get(context, share_group_id) except exception.NotFound as e: raise exception.InvalidParameterValue(e.message) if (not share_group_snapshot_member and not (share_group['status'] == constants.STATUS_AVAILABLE)): params = { 'avail': constants.STATUS_AVAILABLE, 'status': share_group['status'], } msg = _("Share group status must be %(avail)s, got " "%(status)s.") % params raise exception.InvalidShareGroup(message=msg) if share_type_id: share_group_st_ids = [ st['share_type_id'] for st in share_group.get('share_types', [])] if share_type_id not in share_group_st_ids: params = { 'type': share_type_id, 'group': share_group_id, } msg = _("The specified share type (%(type)s) is not " "supported by the specified share group " "(%(group)s).") % params raise exception.InvalidParameterValue(msg) if not share_group.get('share_network_id') == share_network_id: params = { 'net': share_network_id, 'group': share_group_id } msg = _("The specified share network (%(net)s) is not " "supported by the specified share group " "(%(group)s).") % params raise exception.InvalidParameterValue(msg) if share_type: metadata = self.update_metadata_from_share_type_extra_specs( context, share_type, metadata) options = { 'size': size, 'user_id': context.user_id, 'project_id': context.project_id, 'snapshot_id': snapshot_id, 'metadata': metadata, 'display_name': name, 'display_description': description, 'share_proto': share_proto, 'is_public': is_public, 'share_group_id': share_group_id, } options.update(share_type_attributes) if share_group_snapshot_member: options['source_share_group_snapshot_member_id'] = ( share_group_snapshot_member['id']) # NOTE(dviroel): If a target availability zone was not provided, the # scheduler will receive a list with all availability zones that # contains a subnet within the selected share network. if share_network_id and not availability_zone: compatible_azs_name, compatible_azs_multiple = ( self._get_all_availability_zones_with_subnets( context, share_network_id)) if not availability_zones: availability_zones = compatible_azs_name else: availability_zones = ( [az for az in availability_zones if az in compatible_azs_name]) if not availability_zones: msg = _( "The share network is not supported within any requested " "availability zone. Check the share type's " "'availability_zones' extra-spec and the availability " "zones of the share network subnets") raise exception.InvalidInput(message=msg) if az_request_multiple_subnet_support_map: az_request_multiple_subnet_support_map.update( compatible_azs_multiple) else: az_request_multiple_subnet_support_map = ( compatible_azs_multiple) share = None try: share = self.db.share_create(context, options, create_share_instance=False) QUOTAS.commit(context, reservations, share_type_id=share_type_id) except Exception: with excutils.save_and_reraise_exception(): try: if share: self.db.share_delete(context, share['id']) finally: QUOTAS.rollback( context, reservations, share_type_id=share_type_id) self.save_scheduler_hints(context, share, scheduler_hints) host = None snapshot_host = None if snapshot: snapshot_host = snapshot['share']['instance']['host'] if not CONF.use_scheduler_creating_share_from_snapshot: # Shares from snapshots with restriction - source host only. # It is common situation for different types of backends. host = snapshot['share']['instance']['host'] if share_group and host is None: host = share_group['host'] self.create_instance( context, share, share_network_id=share_network_id, host=host, availability_zone=availability_zone, share_group=share_group, share_group_snapshot_member=share_group_snapshot_member, share_type_id=share_type_id, availability_zones=availability_zones, snapshot_host=snapshot_host, scheduler_hints=scheduler_hints, az_request_multiple_subnet_support_map=( az_request_multiple_subnet_support_map), mount_point_name=mount_point_name, encryption_key_ref=encryption_key_ref, ) # Retrieve the share with instance details share = self.db.share_get(context, share['id']) return share def update_metadata_from_share_type_extra_specs(self, context, share_type, user_metadata): extra_specs = share_type.get('extra_specs', {}) if not extra_specs: return user_metadata driver_keys = getattr(CONF, 'driver_updatable_metadata', []) if not driver_keys: return user_metadata metadata_from_share_type = {} for k, v in extra_specs.items(): try: prefix, metadata_key = k.split(':') except Exception: continue # consider prefix only with valid storage driver if prefix.lower() == 'provisioning': continue if metadata_key in driver_keys: metadata_from_share_type.update({metadata_key: v}) metadata_from_share_type.update(user_metadata) return metadata_from_share_type def update_share_from_metadata(self, context, share_id, metadata): driver_keys = getattr(CONF, 'driver_updatable_metadata', []) if not driver_keys: return driver_metadata = {} for k, v in metadata.items(): if k in driver_keys: driver_metadata.update({k: v}) if driver_metadata: share = self.get(context, share_id) self.share_rpcapi.update_share_from_metadata(context, share, driver_metadata) def update_share_network_subnet_from_metadata(self, context, share_network_id, share_network_subnet_id, metadata): driver_keys = getattr(CONF, 'driver_updatable_subnet_metadata', []) if not driver_keys: return driver_metadata = {} for k, v in metadata.items(): if k in driver_keys: driver_metadata.update({k: v}) if driver_metadata: share_servers = ( self.db.share_server_get_all_by_host_and_or_share_subnet( context, host=None, share_subnet_id=share_network_subnet_id)) for share_server in share_servers: self.share_rpcapi.update_share_network_subnet_from_metadata( context, share_network_id, share_network_subnet_id, share_server, driver_metadata ) def get_share_attributes_from_share_type(self, share_type): """Determine share attributes from the share type. The share type can change any time after shares of that type are created, so we copy some share type attributes to the share to consistently govern the behavior of that share over its lifespan. """ inferred_map = constants.ExtraSpecs.INFERRED_OPTIONAL_MAP snapshot_support_key = constants.ExtraSpecs.SNAPSHOT_SUPPORT create_share_from_snapshot_key = ( constants.ExtraSpecs.CREATE_SHARE_FROM_SNAPSHOT_SUPPORT) revert_to_snapshot_key = ( constants.ExtraSpecs.REVERT_TO_SNAPSHOT_SUPPORT) mount_snapshot_support_key = ( constants.ExtraSpecs.MOUNT_SNAPSHOT_SUPPORT) mount_point_name_support_key = ( constants.ExtraSpecs.MOUNT_POINT_NAME_SUPPORT ) snapshot_support_default = inferred_map.get(snapshot_support_key) create_share_from_snapshot_support_default = inferred_map.get( create_share_from_snapshot_key) revert_to_snapshot_support_default = inferred_map.get( revert_to_snapshot_key) mount_snapshot_support_default = inferred_map.get( constants.ExtraSpecs.MOUNT_SNAPSHOT_SUPPORT) mount_point_name_support_default = False if share_type: snapshot_support = share_types.parse_boolean_extra_spec( snapshot_support_key, share_type.get('extra_specs', {}).get( snapshot_support_key, snapshot_support_default)) create_share_from_snapshot_support = ( share_types.parse_boolean_extra_spec( create_share_from_snapshot_key, share_type.get('extra_specs', {}).get( create_share_from_snapshot_key, create_share_from_snapshot_support_default))) revert_to_snapshot_support = ( share_types.parse_boolean_extra_spec( revert_to_snapshot_key, share_type.get('extra_specs', {}).get( revert_to_snapshot_key, revert_to_snapshot_support_default))) mount_snapshot_support = share_types.parse_boolean_extra_spec( mount_snapshot_support_key, share_type.get( 'extra_specs', {}).get( mount_snapshot_support_key, mount_snapshot_support_default)) mount_point_name_support = share_types.parse_boolean_extra_spec( mount_point_name_support_key, share_type.get( 'extra_specs', {}).get( mount_point_name_support_key, mount_point_name_support_default)) replication_type = share_type.get('extra_specs', {}).get( 'replication_type') else: snapshot_support = snapshot_support_default create_share_from_snapshot_support = ( create_share_from_snapshot_support_default) revert_to_snapshot_support = revert_to_snapshot_support_default mount_snapshot_support = mount_snapshot_support_default mount_point_name_support = mount_point_name_support_default replication_type = None return { 'snapshot_support': snapshot_support, 'create_share_from_snapshot_support': create_share_from_snapshot_support, 'revert_to_snapshot_support': revert_to_snapshot_support, 'replication_type': replication_type, 'mount_snapshot_support': mount_snapshot_support, 'mount_point_name_support': mount_point_name_support, } def create_instance(self, context, share, share_network_id=None, host=None, availability_zone=None, share_group=None, share_group_snapshot_member=None, share_type_id=None, availability_zones=None, snapshot_host=None, scheduler_hints=None, az_request_multiple_subnet_support_map=None, mount_point_name=None, encryption_key_ref=None): request_spec, share_instance = ( self.create_share_instance_and_get_request_spec( context, share, availability_zone=availability_zone, share_group=share_group, host=host, share_network_id=share_network_id, share_type_id=share_type_id, availability_zones=availability_zones, snapshot_host=snapshot_host, az_request_multiple_subnet_support_map=( az_request_multiple_subnet_support_map), mount_point_name=mount_point_name, encryption_key_ref=encryption_key_ref)) if share_group_snapshot_member: # Inherit properties from the share_group_snapshot_member member_share_instance = share_group_snapshot_member[ 'share_instance'] updates = { 'host': member_share_instance['host'], 'share_network_id': member_share_instance['share_network_id'], 'share_server_id': member_share_instance['share_server_id'], } share = self.db.share_instance_update(context, share_instance['id'], updates) # NOTE(ameade): Do not cast to driver if creating from share group # snapshot return if host: self.share_rpcapi.create_share_instance( context, share_instance, host, request_spec=request_spec, filter_properties={'scheduler_hints': scheduler_hints}, snapshot_id=share['snapshot_id'], ) else: # Create share instance from scratch or from snapshot could happen # on hosts other than the source host. self.scheduler_rpcapi.create_share_instance( context, request_spec=request_spec, filter_properties={'scheduler_hints': scheduler_hints}, ) return share_instance def create_share_instance_and_get_request_spec( self, context, share, availability_zone=None, share_group=None, host=None, share_network_id=None, share_type_id=None, cast_rules_to_readonly=False, availability_zones=None, snapshot_host=None, az_request_multiple_subnet_support_map=None, mount_point_name=None, encryption_key_ref=None): availability_zone_id = None if availability_zone: availability_zone_id = self.db.availability_zone_get( context, availability_zone).id # TODO(u_glide): Add here validation that provided share network # doesn't conflict with provided availability_zone when Neutron # will have AZ support. share_instance = self.db.share_instance_create( context, share['id'], { 'share_network_id': share_network_id, 'status': constants.STATUS_CREATING, 'scheduled_at': timeutils.utcnow(), 'host': host if host else '', 'availability_zone_id': availability_zone_id, 'share_type_id': share_type_id, 'cast_rules_to_readonly': cast_rules_to_readonly, 'mount_point_name': mount_point_name, 'encryption_key_ref': encryption_key_ref, } ) share_properties = { 'id': share['id'], 'size': share['size'], 'user_id': share['user_id'], 'project_id': share['project_id'], 'metadata': self.db.share_metadata_get(context, share['id']), 'share_server_id': share_instance['share_server_id'], 'snapshot_support': share['snapshot_support'], 'create_share_from_snapshot_support': share['create_share_from_snapshot_support'], 'revert_to_snapshot_support': share['revert_to_snapshot_support'], 'mount_snapshot_support': share['mount_snapshot_support'], 'share_proto': share['share_proto'], 'share_type_id': share_type_id, 'is_public': share['is_public'], 'share_group_id': share['share_group_id'], 'source_share_group_snapshot_member_id': share[ 'source_share_group_snapshot_member_id'], 'snapshot_id': share['snapshot_id'], 'replication_type': share['replication_type'], } share_instance_properties = { 'id': share_instance['id'], 'availability_zone_id': share_instance['availability_zone_id'], 'share_network_id': share_instance['share_network_id'], 'share_server_id': share_instance['share_server_id'], 'share_id': share_instance['share_id'], 'host': share_instance['host'], 'status': share_instance['status'], 'replica_state': share_instance['replica_state'], 'share_type_id': share_instance['share_type_id'], 'encryption_key_ref': share_instance['encryption_key_ref'], } share_type = None if share_instance['share_type_id']: share_type = self.db.share_type_get( context, share_instance['share_type_id']) request_spec = { 'share_properties': share_properties, 'share_instance_properties': share_instance_properties, 'share_proto': share['share_proto'], 'share_id': share['id'], 'snapshot_id': share['snapshot_id'], 'snapshot_host': snapshot_host, 'share_type': share_type, 'share_group': share_group, 'availability_zone_id': availability_zone_id, 'availability_zones': availability_zones, 'az_request_multiple_subnet_support_map': ( az_request_multiple_subnet_support_map) } return request_spec, share_instance def create_share_replica(self, context, share, availability_zone=None, share_network_id=None, scheduler_hints=None): parent_share_network_id = share.get('share_network_id') if (parent_share_network_id and share_network_id and parent_share_network_id != share_network_id): parent_security_services = ( self.db.security_service_get_all_by_share_network( context, parent_share_network_id)) security_services = ( self.db.security_service_get_all_by_share_network( context, share_network_id)) parent_ss = set([s['id'] for s in parent_security_services]) ss = set([s['id'] for s in security_services]) if ss != parent_ss: msg = _("Share and its replica can't be in " "different authentication domains.") raise exception.InvalidInput(reason=msg) if not share.get('replication_type'): msg = _("Replication not supported for share %s.") raise exception.InvalidShare(message=msg % share['id']) if share.get('share_group_id'): msg = _("Replication not supported for shares in a group.") raise exception.InvalidShare(message=msg) if scheduler_hints: if ('only_host' not in scheduler_hints.keys() or len( scheduler_hints) > 1): msg = _("Arg 'scheduler_hints' supports only 'only_host' key.") raise exception.InvalidInput(reason=msg) self._check_is_share_busy(share) active_replica = self.db.share_replicas_get_available_active_replica( context, share['id']) if not active_replica: msg = _("Share %s does not have any active replica in available " "state.") raise exception.ReplicationException(reason=msg % share['id']) share_type = share_types.get_share_type( context, share.instance['share_type_id']) type_azs = share_type['extra_specs'].get('availability_zones', '') type_azs = [t for t in type_azs.split(',') if type_azs] if (availability_zone and type_azs and availability_zone not in type_azs): msg = _("Share replica cannot be created since the share type " "%(type)s is not supported within the availability zone " "chosen %(az)s.") type_name = '%s' % (share_type['name'] or '') type_id = '(ID: %s)' % share_type['id'] payload = {'type': '%s%s' % (type_name, type_id), 'az': availability_zone} raise exception.InvalidShare(message=msg % payload) try: reservations = QUOTAS.reserve( context, share_replicas=1, replica_gigabytes=share['size'], share_type_id=share_type['id'] ) except exception.OverQuota as e: self.check_if_replica_quotas_exceeded(context, e, share['size']) az_request_multiple_subnet_support_map = {} if share_network_id: if availability_zone: try: az = self.db.availability_zone_get(context, availability_zone) except exception.AvailabilityZoneNotFound: msg = _("Share replica cannot be created because the " "specified availability zone does not exist.") raise exception.InvalidInput(message=msg) az_id = az.get('id') subnets = ( self.db. share_network_subnets_get_all_by_availability_zone_id( context, share_network_id, az_id)) if not subnets: msg = _("Share replica cannot be created because the " "share network is not available within the " "specified availability zone.") raise exception.InvalidShare(message=msg) az_request_multiple_subnet_support_map[az_id] = ( len(subnets) > 1) else: # NOTE(dviroel): If a target availability zone was not # provided, the scheduler will receive a list with all # availability zones that contains subnets within the # selected share network. compatible_azs_name, compatible_azs_multiple = ( self._get_all_availability_zones_with_subnets( context, share_network_id)) if not type_azs: type_azs = compatible_azs_name else: type_azs = ( [az for az in type_azs if az in compatible_azs_name]) if not type_azs: msg = _( "The share network is not supported within any " "requested availability zone. Check the share type's " "'availability_zones' extra-spec and the availability " "zones of the share network subnets") raise exception.InvalidInput(message=msg) az_request_multiple_subnet_support_map.update( compatible_azs_multiple) if share['replication_type'] == constants.REPLICATION_TYPE_READABLE: cast_rules_to_readonly = True else: cast_rules_to_readonly = False share_replica = None try: request_spec, share_replica = ( self.create_share_instance_and_get_request_spec( context, share, availability_zone=availability_zone, share_network_id=share_network_id, share_type_id=share['instance']['share_type_id'], cast_rules_to_readonly=cast_rules_to_readonly, availability_zones=type_azs, az_request_multiple_subnet_support_map=( az_request_multiple_subnet_support_map)) ) QUOTAS.commit( context, reservations, project_id=share['project_id'], share_type_id=share_type['id'], ) except Exception: with excutils.save_and_reraise_exception(): try: if share_replica: self.db.share_replica_delete( context, share_replica['id'], need_to_update_usages=False) finally: QUOTAS.rollback( context, reservations, share_type_id=share_type['id']) all_replicas = self.db.share_replicas_get_all_by_share( context, share['id']) all_hosts = [r['host'] for r in all_replicas] request_spec['active_replica_host'] = active_replica['host'] request_spec['all_replica_hosts'] = ','.join(all_hosts) self.db.share_replica_update( context, share_replica['id'], {'replica_state': constants.REPLICA_STATE_OUT_OF_SYNC}) existing_snapshots = ( self.db.share_snapshot_get_all_for_share( context, share_replica['share_id']) ) snapshot_instance = { 'status': constants.STATUS_CREATING, 'progress': '0%', 'share_instance_id': share_replica['id'], } for snapshot in existing_snapshots: self.db.share_snapshot_instance_create( context, snapshot['id'], snapshot_instance) self.scheduler_rpcapi.create_share_replica( context, request_spec=request_spec, filter_properties={'scheduler_hints': scheduler_hints}) return share_replica def delete_share_replica(self, context, share_replica, force=False): # Disallow deletion of ONLY active replica, *even* when this # operation is forced. replicas = self.db.share_replicas_get_all_by_share( context, share_replica['share_id']) active_replicas = list(filter( lambda x: x['replica_state'] == constants.REPLICA_STATE_ACTIVE, replicas)) if (share_replica.get('replica_state') == constants.REPLICA_STATE_ACTIVE and len(active_replicas) == 1): msg = _("Cannot delete last active replica.") raise exception.ReplicationException(reason=msg) LOG.info("Deleting replica %s.", share_replica['id']) self.db.share_replica_update( context, share_replica['id'], { 'status': constants.STATUS_DELETING, 'terminated_at': timeutils.utcnow(), } ) if not share_replica['host']: # Delete any snapshot instances created on the database replica_snapshots = ( self.db.share_snapshot_instance_get_all_with_filters( context, {'share_instance_ids': share_replica['id']}) ) for snapshot in replica_snapshots: self.db.share_snapshot_instance_delete(context, snapshot['id']) # Delete the replica from the database self.db.share_replica_delete(context, share_replica['id']) else: self.share_rpcapi.delete_share_replica(context, share_replica, force=force) def promote_share_replica(self, context, share_replica, quiesce_wait_time=None): if share_replica.get('status') != constants.STATUS_AVAILABLE: msg = _("Replica %(replica_id)s must be in %(status)s state to be " "promoted.") raise exception.ReplicationException( reason=msg % {'replica_id': share_replica['id'], 'status': constants.STATUS_AVAILABLE}) replica_state = share_replica['replica_state'] if (replica_state in (constants.REPLICA_STATE_OUT_OF_SYNC, constants.STATUS_ERROR) and not context.is_admin): msg = _("Promoting a replica with 'replica_state': %s requires " "administrator privileges.") raise exception.AdminRequired( message=msg % replica_state) self.db.share_replica_update( context, share_replica['id'], {'status': constants.STATUS_REPLICATION_CHANGE}) self.share_rpcapi.promote_share_replica( context, share_replica, quiesce_wait_time=quiesce_wait_time) return self.db.share_replica_get(context, share_replica['id']) def update_share_replica(self, context, share_replica): if not share_replica['host']: msg = _("Share replica does not have a valid host.") raise exception.InvalidHost(reason=msg) self.share_rpcapi.update_share_replica(context, share_replica) def manage(self, context, share_data, driver_options): # Check whether there's a share already with the provided options: filters = { 'export_location_path': share_data['export_location_path'], 'host': share_data['host'], } share_server_id = share_data.get('share_server_id') if share_server_id: filters['share_server_id'] = share_data['share_server_id'] already_managed = self.db.share_instance_get_all( context, filters=filters) if already_managed: LOG.error("Found an existing share with export location %s!", share_data['export_location_path']) msg = _("A share already exists with the export path specified.") raise exception.InvalidShare(reason=msg) share_type_id = share_data['share_type_id'] share_type = share_types.get_share_type(context, share_type_id) dhss = share_types.parse_boolean_extra_spec( 'driver_handles_share_servers', share_type['extra_specs']['driver_handles_share_servers']) if dhss and not share_server_id: msg = _("Share Server ID parameter is required when managing a " "share using a share type with " "driver_handles_share_servers extra-spec set to True.") raise exception.InvalidInput(reason=msg) if not dhss and share_server_id: msg = _("Share Server ID parameter is not expected when managing a" " share using a share type with " "driver_handles_share_servers extra-spec set to False.") raise exception.InvalidInput(reason=msg) if share_server_id: try: share_server = self.db.share_server_get( context, share_data['share_server_id']) except exception.ShareServerNotFound: msg = _("Share Server specified was not found.") raise exception.InvalidInput(reason=msg) if share_server['status'] != constants.STATUS_ACTIVE: msg = _("The provided share server is not active.") raise exception.InvalidShareServer(reason=msg) share_data['share_network_id'] = ( share_server['share_network_id']) try: share_network = self.db.share_network_get( context, share_data['share_network_id']) except exception.ShareNetworkNotFound: msg = _("Share network %s was not found." ) % share_data['share_network_id'] raise exception.InvalidInput(reason=msg) # Check if share network is active, otherwise raise a BadRequest api_common.check_share_network_is_active(share_network) share_data.update({ 'user_id': context.user_id, 'project_id': context.project_id, 'status': constants.STATUS_MANAGING, 'scheduled_at': timeutils.utcnow(), }) share_data.update( self.get_share_attributes_from_share_type(share_type)) share = self.db.share_create(context, share_data) export_location_path = share_data.pop('export_location_path') self.db.export_locations_update( context, share.instance['id'], export_location_path) request_spec = self._get_request_spec_dict( context, share, share_type, size=0, share_proto=share_data['share_proto'], host=share_data['host']) # NOTE(ganso): Scheduler is called to validate if share type # provided can fit in host provided. It will invoke manage upon # successful validation. self.scheduler_rpcapi.manage_share(context, share['id'], driver_options, request_spec) return self.db.share_get(context, share['id']) def _get_request_spec_dict(self, context, share, share_type, **kwargs): if share is None: share = {'instance': {}} # NOTE(dviroel): The share object can be a share instance object with # share data. share_instance = share.get('instance', share) share_properties = { 'size': kwargs.get('size', share.get('size')), 'user_id': kwargs.get('user_id', share.get('user_id')), 'project_id': kwargs.get('project_id', share.get('project_id')), 'metadata': self.db.share_metadata_get( context, share_instance.get('share_id')), 'snapshot_support': kwargs.get( 'snapshot_support', share_type.get('extra_specs', {}).get('snapshot_support') ), 'create_share_from_snapshot_support': kwargs.get( 'create_share_from_snapshot_support', share_type.get('extra_specs', {}).get( 'create_share_from_snapshot_support') ), 'revert_to_snapshot_support': kwargs.get( 'revert_to_snapshot_support', share_type.get('extra_specs', {}).get( 'revert_to_snapshot_support') ), 'mount_snapshot_support': kwargs.get( 'mount_snapshot_support', share_type.get('extra_specs', {}).get( 'mount_snapshot_support') ), 'mount_point_name_support': kwargs.get( 'mount_point_name_support', share_type.get('extra_specs', {}).get( 'mount_point_name_support') ), 'share_proto': kwargs.get('share_proto', share.get('share_proto')), 'share_type_id': share_type['id'], 'is_public': kwargs.get('is_public', share.get('is_public')), 'share_group_id': kwargs.get( 'share_group_id', share.get('share_group_id')), 'source_share_group_snapshot_member_id': kwargs.get( 'source_share_group_snapshot_member_id', share.get('source_share_group_snapshot_member_id')), 'snapshot_id': kwargs.get('snapshot_id', share.get('snapshot_id')), } share_instance_properties = { 'availability_zone_id': kwargs.get( 'availability_zone_id', share_instance.get('availability_zone_id')), 'share_network_id': kwargs.get( 'share_network_id', share_instance.get('share_network_id')), 'share_server_id': kwargs.get( 'share_server_id', share_instance.get('share_server_id')), 'share_id': kwargs.get('share_id', share_instance.get('share_id')), 'host': kwargs.get('host', share_instance.get('host')), 'status': kwargs.get('status', share_instance.get('status')), } request_spec = { 'share_properties': share_properties, 'share_instance_properties': share_instance_properties, 'share_type': share_type, 'share_id': share.get('id'), } return request_spec def _prefix_mount_point_name(self, share_type, context, mount_point_name=None): prefix = share_type.get('extra_specs').get( constants.ExtraSpecs.PROVISIONING_MOUNT_POINT_PREFIX) if prefix is None: prefix = CONF.default_mount_point_prefix mount_point_name_template = f"{prefix}{mount_point_name}" mount_point_name = mount_point_name_template.format( **context.to_dict()) if mount_point_name and ( not re.match( r'^[a-zA-Z0-9_-]*$', mount_point_name) or len(mount_point_name) > 255 ): msg = _("Invalid mount_point_name: %s") LOG.error(msg, mount_point_name) raise exception.InvalidInput(msg % mount_point_name) return mount_point_name @prevent_locked_action_on_share('delete') def unmanage(self, context, share): policy.check_policy(context, 'share', 'unmanage') self._check_is_share_busy(share) if share['status'] == constants.STATUS_MANAGE_ERROR: update_status = constants.STATUS_MANAGE_ERROR_UNMANAGING else: update_status = constants.STATUS_UNMANAGING update_data = {'status': update_status, 'terminated_at': timeutils.utcnow()} share_ref = self.db.share_update(context, share['id'], update_data) self.delete_scheduler_hints(context, share) self.share_rpcapi.unmanage_share(context, share_ref) # NOTE(u_glide): We should update 'updated_at' timestamp of # share server here, when manage/unmanage operations will be supported # for driver_handles_share_servers=True mode def manage_snapshot(self, context, snapshot_data, driver_options, share=None): if not share: try: share = self.db.share_get(context, snapshot_data['share_id']) except exception.NotFound: raise exception.ShareNotFound( share_id=snapshot_data['share_id']) if share['has_replicas']: msg = (_("Share %s has replicas. Snapshots of this share cannot " "currently be managed until all replicas are removed.") % share['id']) raise exception.InvalidShare(reason=msg) existing_snapshots = self.db.share_snapshot_get_all_for_share( context, snapshot_data['share_id']) for existing_snap in existing_snapshots: for inst in existing_snap.get('instances'): if (snapshot_data['provider_location'] == inst['provider_location']): msg = _("A share snapshot %(share_snapshot_id)s is " "already managed for provider location " "%(provider_location)s.") % { 'share_snapshot_id': existing_snap['id'], 'provider_location': snapshot_data['provider_location'], } raise exception.ManageInvalidShareSnapshot( reason=msg) snapshot_data.update({ 'user_id': context.user_id, 'project_id': context.project_id, 'status': constants.STATUS_MANAGING, 'share_size': share['size'], 'progress': '0%', 'share_proto': share['share_proto'] }) snapshot = self.db.share_snapshot_create(context, snapshot_data) self.share_rpcapi.manage_snapshot(context, snapshot, share['host'], driver_options) return snapshot def unmanage_snapshot(self, context, snapshot, host): update_data = {'status': constants.STATUS_UNMANAGING, 'terminated_at': timeutils.utcnow()} snapshot_ref = self.db.share_snapshot_update(context, snapshot['id'], update_data) self.share_rpcapi.unmanage_snapshot(context, snapshot_ref, host) def revert_to_snapshot(self, context, share, snapshot): """Revert a share to a snapshot.""" reservations = self._handle_revert_to_snapshot_quotas( context, share, snapshot) try: if share.get('has_replicas'): self._revert_to_replicated_snapshot( context, share, snapshot, reservations) else: self._revert_to_snapshot( context, share, snapshot, reservations) except Exception: with excutils.save_and_reraise_exception(): if reservations: QUOTAS.rollback( context, reservations, share_type_id=share['instance']['share_type_id']) def _handle_revert_to_snapshot_quotas(self, context, share, snapshot): """Reserve extra quota if a revert will result in a larger share.""" # Note(cknight): This value may be positive or negative. size_increase = snapshot['size'] - share['size'] if not size_increase: return None try: return QUOTAS.reserve( context, project_id=share['project_id'], gigabytes=size_increase, user_id=share['user_id'], share_type_id=share['instance']['share_type_id']) except exception.OverQuota as exc: usages = exc.kwargs['usages'] quotas = exc.kwargs['quotas'] consumed_gb = (usages['gigabytes']['reserved'] + usages['gigabytes']['in_use']) msg = _("Quota exceeded for %(s_pid)s. Reverting share " "%(s_sid)s to snapshot %(s_ssid)s will increase the " "share's size by %(s_size)sG, " "(%(d_consumed)dG of %(d_quota)dG already consumed).") msg_args = { 's_pid': context.project_id, 's_sid': share['id'], 's_ssid': snapshot['id'], 's_size': size_increase, 'd_consumed': consumed_gb, 'd_quota': quotas['gigabytes'], } message = msg % msg_args LOG.error(message) raise exception.ShareSizeExceedsAvailableQuota(message=message) def _revert_to_snapshot(self, context, share, snapshot, reservations): """Revert a non-replicated share to a snapshot.""" # Set status of share to 'reverting' self.db.share_update( context, snapshot['share_id'], {'status': constants.STATUS_REVERTING}) # Set status of snapshot to 'restoring' self.db.share_snapshot_update( context, snapshot['id'], {'status': constants.STATUS_RESTORING}) # Send revert API to share host self.share_rpcapi.revert_to_snapshot( context, share, snapshot, share['instance']['host'], reservations) def _revert_to_replicated_snapshot(self, context, share, snapshot, reservations): """Revert a replicated share to a snapshot.""" # Get active replica active_replica = self.db.share_replicas_get_available_active_replica( context, share['id']) if not active_replica: msg = _('Share %s has no active replica in available state.') raise exception.ReplicationException(reason=msg % share['id']) # Get snapshot instance on active replica snapshot_instance_filters = { 'share_instance_ids': active_replica['id'], 'snapshot_ids': snapshot['id'], } snapshot_instances = ( self.db.share_snapshot_instance_get_all_with_filters( context, snapshot_instance_filters)) active_snapshot_instance = ( snapshot_instances[0] if snapshot_instances else None) if not active_snapshot_instance: msg = _('Share %(share)s has no snapshot %(snap)s associated with ' 'its active replica.') msg_args = {'share': share['id'], 'snap': snapshot['id']} raise exception.ReplicationException(reason=msg % msg_args) # Set active replica to 'reverting' self.db.share_replica_update( context, active_replica['id'], {'status': constants.STATUS_REVERTING}) # Set snapshot instance on active replica to 'restoring' self.db.share_snapshot_instance_update( context, active_snapshot_instance['id'], {'status': constants.STATUS_RESTORING}) # Send revert API to active replica host self.share_rpcapi.revert_to_snapshot( context, share, snapshot, active_replica['host'], reservations) @prevent_locked_action_on_share('delete') def soft_delete(self, context, share): """Soft delete share.""" share_id = share['id'] if share['is_soft_deleted']: msg = _("The share has been soft deleted already") raise exception.InvalidShare(reason=msg) statuses = (constants.STATUS_AVAILABLE, constants.STATUS_ERROR, constants.STATUS_INACTIVE) if share['status'] not in statuses: msg = _("Share status must be one of %(statuses)s") % { "statuses": statuses} raise exception.InvalidShare(reason=msg) # If the share has more than one replica, # it can't be soft deleted until the additional replicas are removed. if share.has_replicas: msg = _("Share %s has replicas. Remove the replicas before " "soft deleting the share.") % share_id raise exception.Conflict(err=msg) snapshots = self.db.share_snapshot_get_all_for_share(context, share_id) if len(snapshots): msg = _("Share still has %d dependent snapshots.") % len(snapshots) raise exception.InvalidShare(reason=msg) filters = dict(share_id=share_id) backups = self.db.share_backups_get_all(context, filters=filters) if len(backups): msg = _("Share still has %d dependent backups.") % len(backups) raise exception.InvalidShare(reason=msg) share_group_snapshot_members_count = ( self.db.count_share_group_snapshot_members_in_share( context, share_id, include_deferred_deleting=False)) if share_group_snapshot_members_count: msg = ( _("Share still has %d dependent share group snapshot " "members.") % share_group_snapshot_members_count) raise exception.InvalidShare(reason=msg) self._check_is_share_busy(share) self.db.share_soft_delete(context, share_id) def restore(self, context, share): """Restore share.""" share_id = share['id'] self.db.share_restore(context, share_id) @policy.wrap_check_policy('share') @prevent_locked_action_on_share def delete(self, context, share, force=False): """Delete share.""" share = self.db.share_get(context, share['id']) share_id = share['id'] statuses = (constants.STATUS_AVAILABLE, constants.STATUS_ERROR, constants.STATUS_INACTIVE) if not (force or share['status'] in statuses): msg = _("Share status must be one of %(statuses)s") % { "statuses": statuses} raise exception.InvalidShare(reason=msg) # NOTE(gouthamr): If the share has more than one replica, # it can't be deleted until the additional replicas are removed. if share.has_replicas: msg = _("Share %s has replicas. Remove the replicas before " "deleting the share.") % share_id raise exception.Conflict(err=msg) snapshots = self.db.share_snapshot_get_all_for_share(context, share_id) if len(snapshots): msg = _("Share still has %d dependent snapshots.") % len(snapshots) raise exception.InvalidShare(reason=msg) filters = dict(share_id=share_id) backups = self.db.share_backups_get_all(context, filters=filters) if len(backups): msg = _("Share still has %d dependent backups.") % len(backups) raise exception.InvalidShare(reason=msg) share_group_snapshot_members_count = ( self.db.count_share_group_snapshot_members_in_share( context, share_id)) if share_group_snapshot_members_count: msg = ( _("Share still has %d dependent share group snapshot " "members.") % share_group_snapshot_members_count) raise exception.InvalidShare(reason=msg) self._check_is_share_busy(share) self.delete_scheduler_hints(context, share) for share_instance in share.instances: if share_instance['host']: self.delete_instance(context, share_instance, force=force) else: self.db.share_instance_delete( context, share_instance['id'], need_to_update_usages=True) def delete_instance(self, context, share_instance, force=False): policy.check_policy(context, 'share', 'delete') statuses = (constants.STATUS_AVAILABLE, constants.STATUS_ERROR, constants.STATUS_INACTIVE) if not (force or share_instance['status'] in statuses): msg = _("Share instance status must be one of %(statuses)s") % { "statuses": statuses} raise exception.InvalidShareInstance(reason=msg) deferred_delete = CONF.is_deferred_deletion_enabled if force and deferred_delete: deferred_delete = False current_status = share_instance['status'] if current_status not in (constants.STATUS_DEFERRED_DELETING, constants.STATUS_ERROR_DEFERRED_DELETING): new_status = constants.STATUS_DELETING if deferred_delete: new_status = constants.STATUS_DEFERRED_DELETING share_instance = self.db.share_instance_update( context, share_instance['id'], {'status': new_status, 'terminated_at': timeutils.utcnow()} ) self.share_rpcapi.delete_share_instance( context, share_instance, force=force, deferred_delete=deferred_delete) # NOTE(u_glide): 'updated_at' timestamp is used to track last usage of # share server. This is required for automatic share servers cleanup # because we should track somehow period of time when share server # doesn't have shares (unused). We do this update only on share # deletion because share server with shares cannot be deleted, so no # need to do this update on share creation or any other share operation if share_instance['share_server_id']: self.db.share_server_update( context, share_instance['share_server_id'], {'updated_at': timeutils.utcnow()}) def delete_share_server(self, context, server): """Delete share server.""" policy.check_policy(context, 'share_server', 'delete', server) shares = self.db.share_instance_get_all_by_share_server( context, server['id']) if shares: raise exception.ShareServerInUse(share_server_id=server['id']) share_groups = self.db.share_group_get_all_by_share_server( context, server['id']) if share_groups: LOG.error("share server '%(ssid)s' in use by share groups.", {'ssid': server['id']}) raise exception.ShareServerInUse(share_server_id=server['id']) # NOTE(vponomaryov): There is no share_server status update here, # it is intentional. # Status will be changed in manila.share.manager after verification # for race condition between share creation on server # and server deletion. self.share_rpcapi.delete_share_server(context, server) def manage_share_server( self, context, identifier, host, share_net_subnet, driver_opts): """Manage a share server.""" try: matched_servers = self.db.share_server_search_by_identifier( context, identifier) except exception.ShareServerNotFound: pass else: msg = _("Identifier %(identifier)s specified matches existing " "share servers: %(servers)s.") % { 'identifier': identifier, 'servers': ', '.join(s['identifier'] for s in matched_servers) } raise exception.InvalidInput(reason=msg) values = { 'host': host, 'share_network_subnets': [share_net_subnet], 'status': constants.STATUS_MANAGING, 'is_auto_deletable': False, 'identifier': identifier, } server = self.db.share_server_create(context, values) self.share_rpcapi.manage_share_server( context, server, identifier, driver_opts) return self.db.share_server_get(context, server['id']) def unmanage_share_server(self, context, share_server, force=False): """Unmanage a share server.""" shares = self.db.share_instance_get_all_by_share_server( context, share_server['id']) if shares: raise exception.ShareServerInUse( share_server_id=share_server['id']) share_groups = self.db.share_group_get_all_by_share_server( context, share_server['id']) if share_groups: LOG.error("share server '%(ssid)s' in use by share groups.", {'ssid': share_server['id']}) raise exception.ShareServerInUse( share_server_id=share_server['id']) update_data = {'status': constants.STATUS_UNMANAGING, 'terminated_at': timeutils.utcnow()} share_server = self.db.share_server_update( context, share_server['id'], update_data) self.share_rpcapi.unmanage_share_server( context, share_server, force=force) def transfer_accept(self, context, share, new_user, new_project, clear_rules=False): self.share_rpcapi.transfer_accept(context, share, new_user, new_project, clear_rules=clear_rules) def create_snapshot(self, context, share, name, description, force=False, metadata=None): policy.check_policy(context, 'share', 'create_snapshot', share) if metadata: api_common.check_metadata_properties(metadata) if ((not force) and (share['status'] != constants.STATUS_AVAILABLE)): msg = _("Source share status must be " "%s") % constants.STATUS_AVAILABLE raise exception.InvalidShare(reason=msg) size = share['size'] self._check_is_share_busy(share) try: reservations = QUOTAS.reserve( context, snapshots=1, snapshot_gigabytes=size, share_type_id=share['instance']['share_type_id']) except exception.OverQuota as e: overs = e.kwargs['overs'] usages = e.kwargs['usages'] quotas = e.kwargs['quotas'] def _consumed(name): return (usages[name]['reserved'] + usages[name]['in_use']) if 'snapshot_gigabytes' in overs: msg = ("Quota exceeded for %(s_pid)s, tried to create " "%(s_size)sG snapshot (%(d_consumed)dG of " "%(d_quota)dG already consumed).") LOG.warning(msg, { 's_pid': context.project_id, 's_size': size, 'd_consumed': _consumed('snapshot_gigabytes'), 'd_quota': quotas['snapshot_gigabytes']}) raise exception.SnapshotSizeExceedsAvailableQuota() elif 'snapshots' in overs: msg = ("Quota exceeded for %(s_pid)s, tried to create " "snapshot (%(d_consumed)d snapshots " "already consumed).") LOG.warning(msg, {'s_pid': context.project_id, 'd_consumed': _consumed('snapshots')}) raise exception.SnapshotLimitExceeded( allowed=quotas['snapshots']) options = {'share_id': share['id'], 'size': share['size'], 'user_id': context.user_id, 'project_id': context.project_id, 'status': constants.STATUS_CREATING, 'progress': '0%', 'share_size': share['size'], 'display_name': name, 'display_description': description, 'share_proto': share['share_proto']} if metadata: options.update({"metadata": metadata}) snapshot = None try: snapshot = self.db.share_snapshot_create(context, options) QUOTAS.commit( context, reservations, share_type_id=share['instance']['share_type_id']) except Exception: with excutils.save_and_reraise_exception(): try: if snapshot and snapshot['instance']: self.db.share_snapshot_instance_delete( context, snapshot['instance']['id']) finally: QUOTAS.rollback( context, reservations, share_type_id=share['instance']['share_type_id']) # If replicated share, create snapshot instances for each replica if share.get('has_replicas'): snapshot = self.db.share_snapshot_get(context, snapshot['id']) share_instance_id = snapshot['instance']['share_instance_id'] replicas = self.db.share_replicas_get_all_by_share( context, share['id']) replicas = [r for r in replicas if r['id'] != share_instance_id] snapshot_instance = { 'status': constants.STATUS_CREATING, 'progress': '0%', } for replica in replicas: snapshot_instance.update({'share_instance_id': replica['id']}) self.db.share_snapshot_instance_create( context, snapshot['id'], snapshot_instance) self.share_rpcapi.create_replicated_snapshot( context, share, snapshot) else: self.share_rpcapi.create_snapshot(context, share, snapshot) return snapshot def _modify_quotas_for_share_migration(self, context, share, new_share_type): """Consume quotas for share migration. If a share migration was requested and a new share type was provided, quotas must be consumed from this share type. If no quotas are available for shares, gigabytes, share replicas or replica gigabytes, an error will be thrown. """ new_share_type_id = new_share_type['id'] if new_share_type_id == share['share_type_id']: return new_type_extra_specs = self.get_share_attributes_from_share_type( new_share_type) new_type_replication_type = new_type_extra_specs.get( 'replication_type', None) deltas = {} # NOTE(carloss): If a new share type with a replication type was # specified, there is need to allocate quotas in the new share type. # We won't remove the current consumed quotas, since both share # instances will co-exist until the migration gets completed, # cancelled or it fails. if new_type_replication_type: deltas['share_replicas'] = 1 deltas['replica_gigabytes'] = share['size'] deltas.update({ 'share_type_id': new_share_type_id, 'shares': 1, 'gigabytes': share['size'] }) try: reservations = QUOTAS.reserve( context, project_id=share['project_id'], user_id=share['user_id'], **deltas) QUOTAS.commit( context, reservations, project_id=share['project_id'], user_id=share['user_id'], share_type_id=new_share_type_id) except exception.OverQuota as e: overs = e.kwargs['overs'] usages = e.kwargs['usages'] quotas = e.kwargs['quotas'] def _consumed(name): return (usages[name]['reserved'] + usages[name]['in_use']) if 'replica_gigabytes' in overs: LOG.warning("Replica gigabytes quota exceeded " "for %(s_pid)s, tried to migrate " "%(s_size)sG share (%(d_consumed)dG of " "%(d_quota)dG already consumed).", { 's_pid': context.project_id, 's_size': share['size'], 'd_consumed': _consumed( 'replica_gigabytes'), 'd_quota': quotas['replica_gigabytes']}) msg = _("Failed while migrating a share with replication " "support. Maximum number of allowed " "replica gigabytes is exceeded.") raise exception.ShareReplicaSizeExceedsAvailableQuota( message=msg) if 'share_replicas' in overs: LOG.warning("Quota exceeded for %(s_pid)s, " "unable to migrate share-replica (%(d_consumed)d " "of %(d_quota)d already consumed).", { 's_pid': context.project_id, 'd_consumed': _consumed('share_replicas'), 'd_quota': quotas['share_replicas']}) msg = _( "Failed while migrating a share with replication " "support. Maximum number of allowed share-replicas " "is exceeded.") raise exception.ShareReplicasLimitExceeded(msg) if 'gigabytes' in overs: LOG.warning("Quota exceeded for %(s_pid)s, " "tried to migrate " "%(s_size)sG share (%(d_consumed)dG of " "%(d_quota)dG already consumed).", { 's_pid': context.project_id, 's_size': share['size'], 'd_consumed': _consumed('gigabytes'), 'd_quota': quotas['gigabytes']}) raise exception.ShareSizeExceedsAvailableQuota() if 'shares' in overs: LOG.warning("Quota exceeded for %(s_pid)s, " "tried to migrate " "share (%(d_consumed)d shares " "already consumed).", { 's_pid': context.project_id, 'd_consumed': _consumed('shares')}) raise exception.ShareLimitExceeded(allowed=quotas['shares']) def migration_start( self, context, share, dest_host, force_host_assisted_migration, preserve_metadata, writable, nondisruptive, preserve_snapshots, new_share_network=None, new_share_type=None): """Migrates share to a new host.""" if force_host_assisted_migration and ( preserve_metadata or writable or nondisruptive or preserve_snapshots): msg = _('Invalid parameter combination. Cannot set parameters ' '"nondisruptive", "writable", "preserve_snapshots" or ' '"preserve_metadata" to True when enabling the ' '"force_host_assisted_migration" option.') LOG.error(msg) raise exception.InvalidInput(reason=msg) share_instance = share.instance # NOTE(gouthamr): Ensure share does not have replicas. # Currently share migrations are disallowed for replicated shares. if share.has_replicas: msg = _('Share %s has replicas. Remove the replicas before ' 'attempting to migrate the share.') % share['id'] LOG.error(msg) raise exception.Conflict(err=msg) # TODO(ganso): We do not support migrating shares in or out of groups # for now. if share.get('share_group_id'): msg = _('Share %s is a member of a group. This operation is not ' 'currently supported for shares that are members of ' 'groups.') % share['id'] LOG.error(msg) raise exception.InvalidShare(reason=msg) # We only handle "available" share for now if share_instance['status'] != constants.STATUS_AVAILABLE: msg = _('Share instance %(instance_id)s status must be available, ' 'but current status is: %(instance_status)s.') % { 'instance_id': share_instance['id'], 'instance_status': share_instance['status']} raise exception.InvalidShare(reason=msg) # Access rules status must not be error if share_instance['access_rules_status'] == constants.STATUS_ERROR: msg = _('Share instance %(instance_id)s access rules status must ' 'not be in %(error)s when attempting to start a ' 'migration.') % { 'instance_id': share_instance['id'], 'error': constants.STATUS_ERROR} raise exception.InvalidShare(reason=msg) self._check_is_share_busy(share) if force_host_assisted_migration: # We only handle shares without snapshots for # host-assisted migration snaps = self.db.share_snapshot_get_all_for_share(context, share['id']) if snaps: msg = _("Share %s must not have snapshots when using " "host-assisted migration.") % share['id'] raise exception.Conflict(err=msg) dest_host_host = share_utils.extract_host(dest_host) # Make sure the host is in the list of available hosts utils.validate_service_host(context, dest_host_host) if new_share_type: share_type = new_share_type # ensure pass the size limitations in the share type size = share['size'] share_types.provision_filter_on_size(context, share_type, size) new_share_type_id = new_share_type['id'] dhss = share_type['extra_specs']['driver_handles_share_servers'] dhss = strutils.bool_from_string(dhss, strict=True) if (dhss and not new_share_network and not share_instance['share_network_id']): msg = _( "New share network must be provided when share type of" " given share %s has extra_spec " "'driver_handles_share_servers' as True.") % share['id'] raise exception.InvalidInput(reason=msg) self._modify_quotas_for_share_migration(context, share, new_share_type) else: share_type = {} share_type_id = share_instance['share_type_id'] if share_type_id: share_type = share_types.get_share_type(context, share_type_id) new_share_type_id = share_instance['share_type_id'] dhss = share_type['extra_specs']['driver_handles_share_servers'] dhss = strutils.bool_from_string(dhss, strict=True) if dhss: if new_share_network: new_share_network_id = new_share_network['id'] else: new_share_network_id = share_instance['share_network_id'] else: if new_share_network: msg = _( "New share network must not be provided when share type of" " given share %s has extra_spec " "'driver_handles_share_servers' as False.") % share['id'] raise exception.InvalidInput(reason=msg) new_share_network_id = None # Make sure the destination is different than the source if (new_share_network_id == share_instance['share_network_id'] and new_share_type_id == share_instance['share_type_id'] and dest_host == share_instance['host']): msg = ("Destination host (%(dest_host)s), share network " "(%(dest_sn)s) or share type (%(dest_st)s) are the same " "as the current host's '%(src_host)s', '%(src_sn)s' and " "'%(src_st)s' respectively. Nothing to be done.") % { 'dest_host': dest_host, 'dest_sn': new_share_network_id, 'dest_st': new_share_type_id, 'src_host': share_instance['host'], 'src_sn': share_instance['share_network_id'], 'src_st': share_instance['share_type_id'], } LOG.info(msg) self.db.share_update( context, share['id'], {'task_state': constants.TASK_STATE_MIGRATION_SUCCESS}) return 200 service = self.db.service_get_by_args( context, dest_host_host, 'manila-share') type_azs = share_type['extra_specs'].get('availability_zones', '') type_azs = [t for t in type_azs.split(',') if type_azs] if type_azs and service['availability_zone']['name'] not in type_azs: msg = _("Share %(shr)s cannot be migrated to host %(dest)s " "because share type %(type)s is not supported within the " "availability zone (%(az)s) that the host is in.") type_name = '%s' % (share_type['name'] or '') type_id = '(ID: %s)' % share_type['id'] payload = {'type': '%s%s' % (type_name, type_id), 'az': service['availability_zone']['name'], 'shr': share['id'], 'dest': dest_host} raise exception.InvalidShare(reason=msg % payload) request_spec = self._get_request_spec_dict( context, share, share_type, availability_zone_id=service['availability_zone_id'], share_network_id=new_share_network_id) self.db.share_update( context, share['id'], {'task_state': constants.TASK_STATE_MIGRATION_STARTING}) self.db.share_instance_update(context, share_instance['id'], {'status': constants.STATUS_MIGRATING}) self.scheduler_rpcapi.migrate_share_to_host( context, share['id'], dest_host, force_host_assisted_migration, preserve_metadata, writable, nondisruptive, preserve_snapshots, new_share_network_id, new_share_type_id, request_spec) return 202 def migration_complete(self, context, share): if share['task_state'] not in ( constants.TASK_STATE_DATA_COPYING_COMPLETED, constants.TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE): msg = self._migration_validate_error_message(share) if msg is None: msg = _("First migration phase of share %s not completed" " yet.") % share['id'] LOG.error(msg) raise exception.InvalidShare(reason=msg) share_instance_id, new_share_instance_id = ( self.get_migrating_instances(share)) share_instance_ref = self.db.share_instance_get( context, share_instance_id, with_share_data=True) self.share_rpcapi.migration_complete(context, share_instance_ref, new_share_instance_id) def get_migrating_instances(self, share): share_instance_id = None new_share_instance_id = None for instance in share.instances: if instance['status'] == constants.STATUS_MIGRATING: share_instance_id = instance['id'] if instance['status'] == constants.STATUS_MIGRATING_TO: new_share_instance_id = instance['id'] if None in (share_instance_id, new_share_instance_id): msg = _("Share instances %(instance_id)s and " "%(new_instance_id)s in inconsistent states, cannot" " continue share migration for share %(share_id)s" ".") % {'instance_id': share_instance_id, 'new_instance_id': new_share_instance_id, 'share_id': share['id']} raise exception.ShareMigrationFailed(reason=msg) return share_instance_id, new_share_instance_id def migration_get_progress(self, context, share): if share['task_state'] == ( constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS): share_instance_id, migrating_instance_id = ( self.get_migrating_instances(share)) share_instance_ref = self.db.share_instance_get( context, share_instance_id, with_share_data=True) service_host = share_utils.extract_host(share_instance_ref['host']) service = self.db.service_get_by_args( context, service_host, 'manila-share') if utils.service_is_up(service): try: result = self.share_rpcapi.migration_get_progress( context, share_instance_ref, migrating_instance_id) except exception.InvalidShare: # reload to get the latest task_state share = self.db.share_get(context, share['id']) result = self._migration_get_progress_state(share) except Exception: msg = _("Failed to obtain migration progress of share " "%s.") % share['id'] LOG.exception(msg) raise exception.ShareMigrationError(reason=msg) else: result = None elif share['task_state'] == ( constants.TASK_STATE_DATA_COPYING_IN_PROGRESS): data_rpc = data_rpcapi.DataAPI() LOG.info("Sending request to get share migration information" " of share %s.", share['id']) services = self.db.service_get_all_by_topic(context, 'manila-data') if len(services) > 0 and utils.service_is_up(services[0]): try: result = data_rpc.data_copy_get_progress( context, share['id']) except Exception: msg = _("Failed to obtain migration progress of share " "%s.") % share['id'] LOG.exception(msg) raise exception.ShareMigrationError(reason=msg) else: result = None else: result = self._migration_get_progress_state(share) if not (result and result.get('total_progress') is not None): msg = self._migration_validate_error_message(share) if msg is None: msg = _("Migration progress of share %s cannot be obtained at " "this moment.") % share['id'] LOG.error(msg) raise exception.InvalidShare(reason=msg) return result def _migration_get_progress_state(self, share): task_state = share['task_state'] if task_state in (constants.TASK_STATE_MIGRATION_SUCCESS, constants.TASK_STATE_DATA_COPYING_ERROR, constants.TASK_STATE_MIGRATION_CANCELLED, constants.TASK_STATE_MIGRATION_CANCEL_IN_PROGRESS, constants.TASK_STATE_MIGRATION_COMPLETING, constants.TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE, constants.TASK_STATE_DATA_COPYING_COMPLETED, constants.TASK_STATE_DATA_COPYING_COMPLETING, constants.TASK_STATE_DATA_COPYING_CANCELLED, constants.TASK_STATE_MIGRATION_ERROR): return {'total_progress': 100} elif task_state in (constants.TASK_STATE_MIGRATION_STARTING, constants.TASK_STATE_MIGRATION_DRIVER_STARTING, constants.TASK_STATE_DATA_COPYING_STARTING, constants.TASK_STATE_MIGRATION_IN_PROGRESS): return {'total_progress': 0} else: return None def _migration_validate_error_message(self, resource, resource_type='share'): task_state = resource['task_state'] if task_state == constants.TASK_STATE_MIGRATION_SUCCESS: msg = _("Migration of %(resource_type)s %(resource_id)s has " "already completed.") % { 'resource_id': resource['id'], 'resource_type': resource_type} elif task_state in (None, constants.TASK_STATE_MIGRATION_ERROR): msg = _("There is no migration being performed for " "%(resource_type)s %(resource_id)s at this moment.") % { 'resource_id': resource['id'], 'resource_type': resource_type} elif task_state == constants.TASK_STATE_MIGRATION_CANCELLED: msg = _("Migration of %(resource_type)s %(resource_id)s was " "already cancelled.") % { 'resource_id': resource['id'], 'resource_type': resource_type} elif task_state in (constants.TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE, constants.TASK_STATE_DATA_COPYING_COMPLETED): msg = _("Migration of %(resource_type)s %(resource_id)s has " "already completed first phase.") % { 'resource_id': resource['id'], 'resource_type': resource_type} else: return None return msg def migration_cancel(self, context, share): migrating = True if share['task_state'] in ( constants.TASK_STATE_DATA_COPYING_COMPLETED, constants.TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE, constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS): share_instance_id, migrating_instance_id = ( self.get_migrating_instances(share)) share_instance_ref = self.db.share_instance_get( context, share_instance_id, with_share_data=True) service_host = share_utils.extract_host(share_instance_ref['host']) service = self.db.service_get_by_args( context, service_host, 'manila-share') if utils.service_is_up(service): self.share_rpcapi.migration_cancel( context, share_instance_ref, migrating_instance_id) else: migrating = False elif share['task_state'] == ( constants.TASK_STATE_DATA_COPYING_IN_PROGRESS): data_rpc = data_rpcapi.DataAPI() LOG.info("Sending request to cancel migration of " "share %s.", share['id']) services = self.db.service_get_all_by_topic(context, 'manila-data') if len(services) > 0 and utils.service_is_up(services[0]): try: data_rpc.data_copy_cancel(context, share['id']) except Exception: msg = _("Failed to cancel migration of share " "%s.") % share['id'] LOG.exception(msg) raise exception.ShareMigrationError(reason=msg) else: migrating = False else: migrating = False if not migrating: msg = self._migration_validate_error_message(share) if msg is None: msg = _("Migration of share %s cannot be cancelled at this " "moment.") % share['id'] LOG.error(msg) raise exception.InvalidShare(reason=msg) @policy.wrap_check_policy('share') def delete_snapshot(self, context, snapshot, force=False): statuses = (constants.STATUS_AVAILABLE, constants.STATUS_ERROR) if not (force or snapshot['aggregate_status'] in statuses): msg = _("Share Snapshot status must be one of %(statuses)s.") % { "statuses": statuses} raise exception.InvalidShareSnapshot(reason=msg) share = self.db.share_get(context, snapshot['share_id']) snapshot_instances = ( self.db.share_snapshot_instance_get_all_with_filters( context, {'snapshot_ids': snapshot['id']}) ) deferred_delete = CONF.is_deferred_deletion_enabled if force and deferred_delete: deferred_delete = False current_status = snapshot['aggregate_status'] if current_status not in (constants.STATUS_DEFERRED_DELETING, constants.STATUS_ERROR_DEFERRED_DELETING): new_status = constants.STATUS_DELETING if deferred_delete: new_status = constants.STATUS_DEFERRED_DELETING for snapshot_instance in snapshot_instances: self.db.share_snapshot_instance_update( context, snapshot_instance['id'], {'status': new_status}) if share['has_replicas']: self.share_rpcapi.delete_replicated_snapshot( context, snapshot, share['instance']['host'], share_id=share['id'], force=force) else: self.share_rpcapi.delete_snapshot( context, snapshot, share['instance']['host'], force=force, deferred_delete=deferred_delete) @policy.wrap_check_policy('share') def update(self, context, share, fields): return self.db.share_update(context, share['id'], fields) @policy.wrap_check_policy('share') def snapshot_update(self, context, snapshot, fields): return self.db.share_snapshot_update(context, snapshot['id'], fields) def get(self, context, share_id): share = self.db.share_get(context, share_id) if not share['is_public']: authorized = policy.check_policy( context, 'share', 'get', share, do_raise=False) if not authorized: raise exception.NotFound() if share['status'] in ( constants.STATUS_DEFERRED_DELETING, constants.STATUS_ERROR_DEFERRED_DELETING): policy_str = "list_shares_in_deferred_deletion_states" authorized = policy.check_policy( context, 'share', policy_str, share, do_raise=False) if not authorized: raise exception.NotFound() return share def get_all(self, context, search_opts=None, sort_key='created_at', sort_dir='desc'): return self._get_all(context, search_opts=search_opts, sort_key=sort_key, sort_dir=sort_dir) def get_all_with_count(self, context, search_opts=None, sort_key='created_at', sort_dir='desc'): return self._get_all(context, search_opts=search_opts, sort_key=sort_key, sort_dir=sort_dir, show_count=True) def _get_all(self, context, search_opts=None, sort_key='created_at', sort_dir='desc', show_count=False): if search_opts is None: search_opts = {} LOG.debug("Searching for shares by: %s", search_opts) # Prepare filters filters = {} filter_keys = [ 'display_name', 'share_group_id', 'display_name~', 'display_description', 'display_description~', 'snapshot_id', 'status', 'share_type_id', 'project_id', 'export_location_id', 'export_location_path', 'limit', 'offset', 'host', 'share_network_id', 'is_soft_deleted', 'mount_point_name', 'encryption_key_ref'] for key in filter_keys: if key in search_opts: filters[key] = search_opts.pop(key) if 'metadata' in search_opts: filters['metadata'] = search_opts.pop('metadata') if not isinstance(filters['metadata'], dict): msg = _("Wrong metadata filter provided: " "%s.") % filters['metadata'] raise exception.InvalidInput(reason=msg) if 'extra_specs' in search_opts: # Verify policy for extra-specs access policy.check_policy(context, 'share_types_extra_spec', 'index') filters['extra_specs'] = search_opts.pop('extra_specs') if not isinstance(filters['extra_specs'], dict): msg = _("Wrong extra specs filter provided: " "%s.") % filters['extra_specs'] raise exception.InvalidInput(reason=msg) if not (isinstance(sort_key, str) and sort_key): msg = _("Wrong sort_key filter provided: " "'%s'.") % sort_key raise exception.InvalidInput(reason=msg) if not (isinstance(sort_dir, str) and sort_dir): msg = _("Wrong sort_dir filter provided: " "'%s'.") % sort_dir raise exception.InvalidInput(reason=msg) is_public = search_opts.pop('is_public', False) is_public = strutils.bool_from_string(is_public, strict=True) get_methods = { 'get_by_share_server': ( self.db.share_get_all_by_share_server_with_count if show_count else self.db.share_get_all_by_share_server), 'get_all': ( self.db.share_get_all_with_count if show_count else self.db.share_get_all), 'get_all_by_project': ( self.db.share_get_all_by_project_with_count if show_count else self.db.share_get_all_by_project)} # check if user is querying with deferred states and forbid # users that aren't authorized to query shares in these states policy_str = "list_shares_in_deferred_deletion_states" do_raise = ('status' in filters and 'deferred' in filters['status']) show_deferred_deleted = policy.check_policy( context, 'share', policy_str, do_raise=do_raise) if show_deferred_deleted: filters['list_deferred_delete'] = True list_all_projects = False all_tenants = utils.is_all_tenants(search_opts) if all_tenants: list_all_projects = policy.check_policy( context, 'share', 'list_all_projects', do_raise=False) # Get filtered list of shares if 'host' in filters: policy.check_policy(context, 'share', 'list_by_host') if 'share_server_id' in search_opts: # NOTE(vponomaryov): this is project_id independent policy.check_policy(context, 'share', 'list_by_share_server_id') result = get_methods['get_by_share_server']( context, search_opts.pop('share_server_id'), filters=filters, sort_key=sort_key, sort_dir=sort_dir) elif list_all_projects: result = get_methods['get_all']( context, filters=filters, sort_key=sort_key, sort_dir=sort_dir) else: result = get_methods['get_all_by_project']( context, project_id=context.project_id, filters=filters, is_public=is_public, sort_key=sort_key, sort_dir=sort_dir) if show_count: count = result[0] shares = result[1] else: shares = result result = (count, shares) if show_count else shares return result def get_snapshot(self, context, snapshot_id): snapshot = self.db.share_snapshot_get(context, snapshot_id) authorized = policy.check_policy(context, 'share_snapshot', 'get_snapshot', snapshot, do_raise=False) if not authorized: raise exception.NotFound() if snapshot.get('status') in ( constants.STATUS_DEFERRED_DELETING, constants.STATUS_ERROR_DEFERRED_DELETING): policy_str = "list_snapshots_in_deferred_deletion_states" authorized = policy.check_policy( context, 'share_snapshot', policy_str, snapshot, do_raise=False) if not authorized: raise exception.NotFound() return snapshot def get_all_snapshots(self, context, search_opts=None, limit=None, offset=None, sort_key='share_id', sort_dir='desc'): return self._get_all_snapshots(context, search_opts=search_opts, limit=limit, offset=offset, sort_key=sort_key, sort_dir=sort_dir) def get_all_snapshots_with_count(self, context, search_opts=None, limit=None, offset=None, sort_key='share_id', sort_dir='desc'): return self._get_all_snapshots(context, search_opts=search_opts, limit=limit, offset=offset, sort_key=sort_key, sort_dir=sort_dir, show_count=True) def _get_all_snapshots(self, context, search_opts=None, limit=None, offset=None, sort_key='share_id', sort_dir='desc', show_count=False): policy.check_policy(context, 'share_snapshot', 'get_all_snapshots') search_opts = search_opts or {} LOG.debug("Searching for snapshots by: %s", search_opts) # Read and remove key 'all_tenants' if was provided list_all_projects = False all_tenants = utils.is_all_tenants(search_opts) if all_tenants: search_opts.pop('all_tenants', None) list_all_projects = policy.check_policy( context, 'share_snapshot', 'list_all_projects', do_raise=False) string_args = {'sort_key': sort_key, 'sort_dir': sort_dir} string_args.update(search_opts) for k, v in string_args.items(): if not (isinstance(v, str) and v) and k != 'metadata': msg = _("Wrong '%(k)s' filter provided: " "'%(v)s'.") % {'k': k, 'v': string_args[k]} raise exception.InvalidInput(reason=msg) # check if user is querying with deferred states and forbid # users that aren't authorized to query shares in these states policy_str = "list_snapshots_in_deferred_deletion_states" do_raise = ('status' in search_opts and 'deferred' in search_opts['status']) show_deferred_deleted = policy.check_policy( context, 'share_snapshot', policy_str, do_raise=do_raise) if show_deferred_deleted: search_opts['list_deferred_delete'] = True get_methods = { 'get_all': ( self.db.share_snapshot_get_all_with_count if show_count else self.db.share_snapshot_get_all), 'get_all_by_project': ( self.db.share_snapshot_get_all_by_project_with_count if show_count else self.db.share_snapshot_get_all_by_project)} if list_all_projects: result = get_methods['get_all']( context, filters=search_opts, limit=limit, offset=offset, sort_key=sort_key, sort_dir=sort_dir) else: result = get_methods['get_all_by_project']( context, context.project_id, filters=search_opts, limit=limit, offset=offset, sort_key=sort_key, sort_dir=sort_dir) if show_count: count = result[0] snapshots = result[1] else: snapshots = result result = (count, snapshots) if show_count else snapshots return result def get_latest_snapshot_for_share(self, context, share_id): """Get the newest snapshot of a share.""" return self.db.share_snapshot_get_latest_for_share(context, share_id) @staticmethod def _any_invalid_share_instance(share, allow_on_error_state=False): invalid_states = ( constants.INVALID_SHARE_INSTANCE_STATUSES_FOR_ACCESS_RULE_UPDATES) if not allow_on_error_state: invalid_states += (constants.STATUS_ERROR,) for instance in share.instances: if (not instance['host'] or instance['status'] in invalid_states): return True return False def allow_access(self, ctx, share, access_type, access_to, access_level=None, metadata=None, allow_on_error_state=False): """Allow access to share.""" # Access rule validation: if access_level not in constants.ACCESS_LEVELS + (None, ): msg = _("Invalid share access level: %s.") % access_level raise exception.InvalidShareAccess(reason=msg) api_common.check_metadata_properties(metadata) access_exists = self.db.share_access_check_for_existing_access( ctx, share['id'], access_type, access_to) if access_exists: raise exception.ShareAccessExists(access_type=access_type, access=access_to) if self._any_invalid_share_instance(share, allow_on_error_state): msg = _("New access rules cannot be applied while the share or " "any of its replicas or migration copies lacks a valid " "host or is in an invalid state.") raise exception.InvalidShare(message=msg) values = { 'share_id': share['id'], 'access_type': access_type, 'access_to': access_to, 'access_level': access_level, 'metadata': metadata, } access = self.db.share_access_create(ctx, values) for share_instance in share.instances: self.allow_access_to_instance(ctx, share_instance) return access def allow_access_to_instance(self, context, share_instance): self._conditionally_transition_share_instance_access_rules_status( context, share_instance) self.share_rpcapi.update_access(context, share_instance) def _conditionally_transition_share_instance_access_rules_status( self, context, share_instance): conditionally_change = { constants.STATUS_ACTIVE: constants.SHARE_INSTANCE_RULES_SYNCING, } self.access_helper.get_and_update_share_instance_access_rules_status( context, conditionally_change=conditionally_change, share_instance_id=share_instance['id']) def update_access(self, ctx, share, access, values): if self._any_invalid_share_instance(share, allow_on_error_state=True): msg = _("Access rules cannot be updated while the share, " "any of its replicas or migration copies lacks a valid " "host or is in an invalid state.") raise exception.InvalidShare(message=msg) access = self.db.share_access_update(ctx, access['id'], values) for share_instance in share.instances: self.update_access_to_instance(ctx, share_instance, access) return access def update_access_to_instance(self, context, share_instance, access): self._conditionally_transition_share_instance_access_rules_status( context, share_instance) updates = {'state': constants.ACCESS_STATE_QUEUED_TO_UPDATE} self.access_helper.get_and_update_share_instance_access_rule( context, access['id'], updates=updates, share_instance_id=share_instance['id']) self.share_rpcapi.update_access(context, share_instance) def deny_access(self, ctx, share, access, allow_on_error_state=False): """Deny access to share.""" if self._any_invalid_share_instance(share, allow_on_error_state): msg = _("Access rules cannot be denied while the share, " "any of its replicas or migration copies lacks a valid " "host or is in an invalid state.") raise exception.InvalidShare(message=msg) for share_instance in share.instances: self.deny_access_to_instance(ctx, share_instance, access) def deny_access_to_instance(self, context, share_instance, access): self._conditionally_transition_share_instance_access_rules_status( context, share_instance) updates = {'state': constants.ACCESS_STATE_QUEUED_TO_DENY} self.access_helper.get_and_update_share_instance_access_rule( context, access['id'], updates=updates, share_instance_id=share_instance['id']) self.share_rpcapi.update_access(context, share_instance) def access_get_all(self, context, share, filters=None): """Returns all access rules for share.""" policy.check_policy(context, 'share', 'access_get_all') rules = self.db.share_access_get_all_for_share( context, share['id'], filters=filters) return rules def access_get(self, context, access_id): """Returns access rule with the id.""" policy.check_policy(context, 'share', 'access_get') rule = self.db.share_access_get(context, access_id) # NOTE(gouthamr): Check if the caller has access to the share that # the rule belongs to: self.get(context, rule['share_id']) return rule def _validate_scheduler_hints(self, context, share, share_uuids): for uuid in share_uuids: if not uuidutils.is_uuid_like(uuid): raise exception.InvalidUUID(uuid=uuid) try: self.get(context, uuid) except (exception.NotFound, exception.PolicyNotAuthorized): raise exception.ShareNotFound(share_id=uuid) def _save_scheduler_hints(self, context, share, share_uuids, key): share_uuids = share_uuids.split(",") self._validate_scheduler_hints(context, share, share_uuids) val_uuids = None for uuid in share_uuids: try: result = self.db.share_metadata_get_item(context, uuid, key) except exception.MetadataItemNotFound: item = {key: share['id']} else: existing_uuids = result.get(key, "") item = {key: ','.join(existing_uuids.split(',') + [share['id']])} self.db.share_metadata_update_item(context, uuid, item) if not val_uuids: val_uuids = uuid else: val_uuids = val_uuids + "," + uuid if val_uuids: item = {key: val_uuids} self.db.share_metadata_update_item(context, share['id'], item) def save_scheduler_hints(self, context, share, scheduler_hints=None): if scheduler_hints is None: return same_host_uuids = scheduler_hints.get(AFFINITY_HINT, None) different_host_uuids = scheduler_hints.get(ANTI_AFFINITY_HINT, None) if same_host_uuids: self._save_scheduler_hints(context, share, same_host_uuids, AFFINITY_KEY) if different_host_uuids: self._save_scheduler_hints(context, share, different_host_uuids, ANTI_AFFINITY_KEY) def _delete_scheduler_hints(self, context, share, key): try: result = self.db.share_metadata_get_item(context, share['id'], key) except exception.MetadataItemNotFound: return share_uuids = result.get(key, "").split(",") for uuid in share_uuids: try: result = self.db.share_metadata_get_item(context, uuid, key) except exception.MetadataItemNotFound: continue new_val_uuids = [val_uuid for val_uuid in result.get(key, "").split(",") if val_uuid != share['id']] if not new_val_uuids: self.db.share_metadata_delete(context, uuid, key) else: item = {key: ','.join(new_val_uuids)} self.db.share_metadata_update_item(context, uuid, item) self.db.share_metadata_delete(context, share['id'], key) def delete_scheduler_hints(self, context, share): self._delete_scheduler_hints(context, share, AFFINITY_KEY) self._delete_scheduler_hints(context, share, ANTI_AFFINITY_KEY) def _check_is_share_busy(self, share): """Raises an exception if share is busy with an active task.""" if share.is_busy: msg = _("Share %(share_id)s is busy as part of an active " "task: %(task)s.") % { 'share_id': share['id'], 'task': share['task_state'] } raise exception.ShareBusyException(reason=msg) @staticmethod def check_is_share_size_within_per_share_quota_limit(context, size): """Raises an exception if share size above per share quota limit.""" try: values = {'per_share_gigabytes': size} QUOTAS.limit_check(context, project_id=context.project_id, **values) except exception.OverQuota as e: quotas = e.kwargs['quotas'] raise exception.ShareSizeExceedsLimit( size=size, limit=quotas['per_share_gigabytes']) def update_share_access_metadata(self, context, access_id, metadata): """Updates share access metadata.""" try: api_common.check_metadata_properties(metadata) except exception.InvalidMetadata: raise exception.InvalidMetadata() except exception.InvalidMetadataSize: raise exception.InvalidMetadataSize() return self.db.share_access_metadata_update( context, access_id, metadata) def get_share_network(self, context, share_net_id): return self.db.share_network_get(context, share_net_id) def extend(self, context, share, new_size, force=False): if force: policy.check_policy(context, 'share', 'force_extend', share) else: policy.check_policy(context, 'share', 'extend', share) if share['status'] != constants.STATUS_AVAILABLE: msg_params = { 'valid_status': constants.STATUS_AVAILABLE, 'share_id': share['id'], 'status': share['status'], } msg = _("Share %(share_id)s status must be '%(valid_status)s' " "to extend, but current status is: " "%(status)s.") % msg_params raise exception.InvalidShare(reason=msg) self._check_is_share_busy(share) size_increase = int(new_size) - share['size'] if size_increase <= 0: msg = (_("New size for extend must be greater " "than current size. (current: %(size)s, " "extended: %(new_size)s).") % {'new_size': new_size, 'size': share['size']}) raise exception.InvalidInput(reason=msg) self.check_is_share_size_within_per_share_quota_limit(context, new_size) # ensure we pass the share_type provisioning filter on size try: share_type = share_types.get_share_type( context, share['instance']['share_type_id']) except (exception.InvalidShareType, exception.ShareTypeNotFound): share_type = None allowed_to_extend_past_max_share_size = policy.check_policy( context, 'share', constants.POLICY_EXTEND_BEYOND_MAX_SHARE_SIZE, target_obj=share, do_raise=False) if allowed_to_extend_past_max_share_size: share_types.provision_filter_on_size(context, share_type, new_size, operation='admin-extend') else: share_types.provision_filter_on_size(context, share_type, new_size, operation='extend') replicas = self.db.share_replicas_get_all_by_share( context, share['id']) supports_replication = len(replicas) > 0 deltas = { 'project_id': share['project_id'], 'gigabytes': size_increase, 'user_id': share['user_id'], 'share_type_id': share['instance']['share_type_id'] } # NOTE(carloss): If the share type supports replication, we must get # all the replicas that pertain to the share and calculate the final # size (size to increase * amount of replicas), since all the replicas # are going to be extended when the driver sync them. if supports_replication: replica_gigs_to_increase = len(replicas) * size_increase deltas.update({'replica_gigabytes': replica_gigs_to_increase}) try: # we give the user_id of the share, to update the quota usage # for the user, who created the share, because on share delete # only this quota will be decreased reservations = QUOTAS.reserve(context, **deltas) except exception.OverQuota as exc: # Check if the exceeded quota was 'gigabytes' self.check_if_share_quotas_exceeded(context, exc, share['size'], operation='extend') # NOTE(carloss): Check if the exceeded quota is # 'replica_gigabytes'. If so the failure could be caused due to # lack of quotas to extend the share's replicas, then the # 'check_if_replica_quotas_exceeded' method can't be reused here # since the error message must be different from the default one. if supports_replication: overs = exc.kwargs['overs'] usages = exc.kwargs['usages'] quotas = exc.kwargs['quotas'] def _consumed(name): return (usages[name]['reserved'] + usages[name]['in_use']) if 'replica_gigabytes' in overs: LOG.warning("Replica gigabytes quota exceeded " "for %(s_pid)s, tried to extend " "%(s_size)sG share (%(d_consumed)dG of " "%(d_quota)dG already consumed).", { 's_pid': context.project_id, 's_size': share['size'], 'd_consumed': _consumed( 'replica_gigabytes'), 'd_quota': quotas['replica_gigabytes']}) msg = _("Failed while extending a share with replication " "support. There is no available quota to extend " "the share and its %(count)d replicas. Maximum " "number of allowed replica_gigabytes is " "exceeded.") % {'count': len(replicas)} raise exception.ShareReplicaSizeExceedsAvailableQuota( message=msg) self.update(context, share, {'status': constants.STATUS_EXTENDING}) if force: self.share_rpcapi.extend_share(context, share, new_size, reservations) else: share_type = share_types.get_share_type( context, share['instance']['share_type_id']) request_spec = self._get_request_spec_dict(context, share, share_type) request_spec.update({'is_share_extend': True}) self.scheduler_rpcapi.extend_share(context, share['id'], new_size, reservations, request_spec) LOG.info("Extend share request issued successfully.", resource=share) def shrink(self, context, share, new_size): status = str(share['status']).lower() valid_statuses = (constants.STATUS_AVAILABLE, constants.STATUS_SHRINKING_POSSIBLE_DATA_LOSS_ERROR) if status not in valid_statuses: msg_params = { 'valid_status': ", ".join(valid_statuses), 'share_id': share['id'], 'status': status, } msg = _("Share %(share_id)s status must in (%(valid_status)s) " "to shrink, but current status is: " "%(status)s.") % msg_params raise exception.InvalidShare(reason=msg) self._check_is_share_busy(share) size_decrease = int(share['size']) - int(new_size) if size_decrease <= 0 or new_size <= 0: msg = (_("New size for shrink must be less " "than current size and greater than 0 (current: %(size)s," " new: %(new_size)s)") % {'new_size': new_size, 'size': share['size']}) raise exception.InvalidInput(reason=msg) # ensure we pass the share_type provisioning filter on size try: share_type = share_types.get_share_type( context, share['instance']['share_type_id']) except (exception.InvalidShareType, exception.ShareTypeNotFound): share_type = None share_types.provision_filter_on_size(context, share_type, new_size, operation='shrink') self.update(context, share, {'status': constants.STATUS_SHRINKING}) self.share_rpcapi.shrink_share(context, share, new_size) LOG.info("Shrink share (id=%(id)s) request issued successfully." " New size: %(size)s", {'id': share['id'], 'size': new_size}) def snapshot_allow_access(self, context, snapshot, access_type, access_to): """Allow access to a share snapshot.""" access_exists = self.db.share_snapshot_check_for_existing_access( context, snapshot['id'], access_type, access_to) if access_exists: raise exception.ShareSnapshotAccessExists(access_type=access_type, access=access_to) values = { 'share_snapshot_id': snapshot['id'], 'access_type': access_type, 'access_to': access_to, } if any((instance['status'] != constants.STATUS_AVAILABLE) or (instance['share_instance']['host'] is None) for instance in snapshot.instances): msg = _("New access rules cannot be applied while the snapshot or " "any of its replicas or migration copies lacks a valid " "host or is not in %s state.") % constants.STATUS_AVAILABLE raise exception.InvalidShareSnapshotInstance(reason=msg) access = self.db.share_snapshot_access_create(context, values) for snapshot_instance in snapshot.instances: self.share_rpcapi.snapshot_update_access( context, snapshot_instance) return access def snapshot_deny_access(self, context, snapshot, access): """Deny access to a share snapshot.""" if any((instance['status'] != constants.STATUS_AVAILABLE) or (instance['share_instance']['host'] is None) for instance in snapshot.instances): msg = _("Access rules cannot be denied while the snapshot or " "any of its replicas or migration copies lacks a valid " "host or is not in %s state.") % constants.STATUS_AVAILABLE raise exception.InvalidShareSnapshotInstance(reason=msg) for snapshot_instance in snapshot.instances: rule = self.db.share_snapshot_instance_access_get( context, access['id'], snapshot_instance['id']) self.db.share_snapshot_instance_access_update( context, rule['access_id'], snapshot_instance['id'], {'state': constants.ACCESS_STATE_QUEUED_TO_DENY}) self.share_rpcapi.snapshot_update_access( context, snapshot_instance) def snapshot_access_get_all(self, context, snapshot): """Returns all access rules for share snapshot.""" rules = self.db.share_snapshot_access_get_all_for_share_snapshot( context, snapshot['id'], {}) return rules def snapshot_access_get(self, context, access_id): """Returns snapshot access rule with the id.""" rule = self.db.share_snapshot_access_get(context, access_id) return rule def snapshot_export_locations_get(self, context, snapshot): return self.db.share_snapshot_export_locations_get(context, snapshot) def snapshot_export_location_get(self, context, el_id): return self.db.share_snapshot_instance_export_location_get(context, el_id) def share_server_migration_get_destination(self, context, source_server_id, status=None): """Returns destination share server for a share server migration.""" filters = {'source_share_server_id': source_server_id} if status: filters.update({'status': status}) dest_share_servers = self.db.share_server_get_all_with_filters( context, filters=filters) if not dest_share_servers: msg = _("A destination share server wasn't found for source " "share server %s.") % source_server_id raise exception.InvalidShareServer(reason=msg) if len(dest_share_servers) > 1: msg = _("More than one destination share server was found for " "source share server %s. Aborting...") % source_server_id raise exception.InvalidShareServer(reason=msg) return dest_share_servers[0] def get_share_server_migration_request_spec_dict( self, context, share_instances, snapshot_instances, **kwargs): """Returns request specs related to share server and all its shares.""" shares_total_size = sum([instance.get('size', 0) for instance in share_instances]) snapshots_total_size = sum([instance.get('size', 0) for instance in snapshot_instances]) shares_req_spec = [] for share_instance in share_instances: share_type_id = share_instance['share_type_id'] share_type = share_types.get_share_type(context, share_type_id) req_spec = self._get_request_spec_dict(context, share_instance, share_type, **kwargs) shares_req_spec.append(req_spec) server_request_spec = { 'shares_size': shares_total_size, 'snapshots_size': snapshots_total_size, 'shares_req_spec': shares_req_spec, } return server_request_spec def _migration_initial_checks(self, context, share_server, dest_host, new_share_network): shares = self.db.share_get_all_by_share_server( context, share_server['id']) shares_in_recycle_bin = self.db.share_get_all_soft_deleted( context, share_server['id']) if len(shares) == 0: msg = _("Share server %s does not have shares." % share_server['id']) raise exception.InvalidShareServer(reason=msg) if shares_in_recycle_bin: msg = _("Share server %s has at least one share that has " "been soft deleted." % share_server['id']) raise exception.InvalidShareServer(reason=msg) # We only handle "active" share servers for now if share_server['status'] != constants.STATUS_ACTIVE: msg = _('Share server %(server_id)s status must be active, ' 'but current status is: %(server_status)s.') % { 'server_id': share_server['id'], 'server_status': share_server['status']} raise exception.InvalidShareServer(reason=msg) share_groups_related_to_share_server = ( self.db.share_group_get_all_by_share_server( context, share_server['id'])) if share_groups_related_to_share_server: msg = _("The share server %s can not be migrated because it is " "related to a share group.") % share_server['id'] raise exception.InvalidShareServer(reason=msg) # Same backend and same network, nothing changes src_backend = share_utils.extract_host(share_server['host'], level='backend_name') dest_backend = share_utils.extract_host(dest_host, level='backend_name') current_share_network_id = shares[0]['instance']['share_network_id'] if (src_backend == dest_backend and (new_share_network is None or new_share_network['id'] == current_share_network_id)): msg = _('There is no difference between source and destination ' 'backends and between source and destination share ' 'networks. Share server migration will not proceed.') raise exception.InvalidShareServer(reason=msg) filters = {'source_share_server_id': share_server['id'], 'status': constants.STATUS_SERVER_MIGRATING_TO} dest_share_servers = self.db.share_server_get_all_with_filters( context, filters=filters) if len(dest_share_servers): msg = _("There is at least one destination share server pointing " "to this source share server. Clean up your environment " "before starting a new migration.") raise exception.InvalidShareServer(reason=msg) dest_service_host = share_utils.extract_host(dest_host) # Make sure the host is in the list of available hosts utils.validate_service_host(context, dest_service_host) service = self.db.service_get_by_args( context, dest_service_host, 'manila-share') # Get all share types type_ids = set([share['instance']['share_type_id'] for share in shares]) types = [share_types.get_share_type(context, type_id) for type_id in type_ids] # Check if share type azs are supported by the destination host for share_type in types: azs = share_type['extra_specs'].get('availability_zones', '') if azs and service['availability_zone']['name'] not in azs: msg = _("Share server %(server)s cannot be migrated to host " "%(dest)s because the share type %(type)s is used by " "one of the shares, and this share type is not " "supported within the availability zone (%(az)s) that " "the host is in.") type_name = '%s' % (share_type['name'] or '') type_id = '(ID: %s)' % share_type['id'] payload = {'type': '%s%s' % (type_name, type_id), 'az': service['availability_zone']['name'], 'server': share_server['id'], 'dest': dest_host} raise exception.InvalidShareServer(reason=msg % payload) if new_share_network: new_share_network_id = new_share_network['id'] else: new_share_network_id = shares[0]['instance']['share_network_id'] # NOTE(carloss): check if the new or old share network has a subnet # that spans the availability zone of the destination host, otherwise # we should deny this operation. dest_az = self.db.availability_zone_get( context, service['availability_zone']['name']) compatible_subnets = ( self.db.share_network_subnets_get_all_by_availability_zone_id( context, new_share_network_id, dest_az['id'])) if not compatible_subnets: msg = _("The share network %(network)s does not have a subnet " "that spans the destination host availability zone.") payload = {'network': new_share_network_id} raise exception.InvalidShareServer(reason=msg % payload) net_changes_identified = False if new_share_network: net_changes_identified = not share_utils.is_az_subnets_compatible( share_server['share_network_subnets'], compatible_subnets) # NOTE(carloss): Refreshing the list of shares since something could've # changed from the initial list. shares = self.db.share_get_all_by_share_server( context, share_server['id']) for share in shares: if share['status'] != constants.STATUS_AVAILABLE: msg = _('Share %(share_id)s status must be available, ' 'but current status is: %(share_status)s.') % { 'share_id': share['id'], 'share_status': share['status']} raise exception.InvalidShareServer(reason=msg) if (not share_server.get( 'share_replicas_migration_support', False) and share.has_replicas): msg = _('Share %s has replicas. Remove the replicas of all ' 'shares in the share server before attempting to ' 'migrate it.') % share['id'] LOG.error(msg) raise exception.InvalidShareServer(reason=msg) # NOTE(carloss): Not validating the flag preserve_snapshots at this # point, considering that even if the admin set the value to False, # the driver can still support preserving snapshots and the # snapshots would be copied anyway. So the share/manager will be # responsible for checking if the driver does not support snapshot # preservation, and if there are snapshots in the share server. share_snapshots = self.db.share_snapshot_get_all_for_share( context, share['id']) all_snapshots_are_available = all( [snapshot['status'] == constants.STATUS_AVAILABLE for snapshot in share_snapshots]) if not all_snapshots_are_available: msg = _( "All snapshots must have '%(status)s' status to be " "migrated by the driver along with share " "%(resource_id)s.") % { 'resource_id': share['id'], 'status': constants.STATUS_AVAILABLE, } LOG.error(msg) raise exception.InvalidShareServer(reason=msg) if share.get('share_group_id'): msg = _('Share %s is a member of a group. This operation is ' 'not currently supported for share servers that ' 'contain shares members of groups.') % share['id'] LOG.error(msg) raise exception.InvalidShareServer(reason=msg) share_instance = share['instance'] # Access rules status must not be error if share_instance['access_rules_status'] == constants.STATUS_ERROR: msg = _( 'Share instance %(instance_id)s access rules status must ' 'not be in %(error)s when attempting to start a share ' 'server migration.') % { 'instance_id': share_instance['id'], 'error': constants.STATUS_ERROR} raise exception.InvalidShareServer(reason=msg) try: self._check_is_share_busy(share) except exception.ShareBusyException as e: raise exception.InvalidShareServer(reason=e.msg) return ( shares, types, service, new_share_network_id, net_changes_identified) def share_server_migration_check(self, context, share_server, dest_host, writable, nondisruptive, preserve_snapshots, new_share_network=None): """Migrates share server to a new host.""" shares, types, service, new_share_network_id, net_params_changed = ( self._migration_initial_checks(context, share_server, dest_host, new_share_network)) # If a nondisruptive migration was requested and different neutron net # id and neutron subnet ids were identified if net_params_changed and nondisruptive: result = { 'compatible': False, 'writable': False, 'nondisruptive': False, 'preserve_snapshots': False, 'migration_cancel': False, 'migration_get_progress': False, 'share_network_id': new_share_network_id } return result # NOTE(dviroel): Service is up according to validations made on initial # checks result = self.share_rpcapi.share_server_migration_check( context, share_server['id'], dest_host, writable, nondisruptive, preserve_snapshots, new_share_network_id) # NOTE(carloss): In case users haven't requested a nondisruptive # migration and a network change was identified, we must get the # driver's check result and if there is need to, manipulate it. # The result is provided by the driver and based on the back end # possibility to perform a nondisruptive migration or not. If # a network change was provided, we know that the migration will be # disruptive, so in order to do not confuse the user, we must present # the share server migration as disruptive if result.get('nondisruptive') and net_params_changed: result['nondisruptive'] = False return result def share_server_migration_start( self, context, share_server, dest_host, writable, nondisruptive, preserve_snapshots, new_share_network=None): """Migrates share server to a new host.""" shares, types, service, new_share_network_id, net_params_changed = ( self._migration_initial_checks(context, share_server, dest_host, new_share_network)) if nondisruptive and net_params_changed: msg = _("Nondisruptive migration would only be feasible when the " "current and new share networks carry the same " "'neutron_net_id' and 'neutron_subnet_id', or when no " "network changes are occurring.") raise exception.InvalidInput(reason=msg) # Updates the share server status to migration starting self.db.share_server_update( context, share_server['id'], {'task_state': constants.TASK_STATE_MIGRATION_STARTING, 'status': constants.STATUS_SERVER_MIGRATING}) share_instances = self.db.share_instance_get_all_by_share_server( context, share_server['id']) share_instance_ids = [ share_instance['id'] for share_instance in share_instances] snap_instances = self.db.share_snapshot_instance_get_all_with_filters( context, {'share_instance_ids': share_instance_ids}) snapshot_instance_ids = [ snap_instance['id'] for snap_instance in snap_instances] # Updates all shares and snapshot instances self.db.share_and_snapshot_instances_status_update( context, {'status': constants.STATUS_SERVER_MIGRATING}, share_instance_ids=share_instance_ids, snapshot_instance_ids=snapshot_instance_ids, current_expected_status=constants.STATUS_AVAILABLE ) # NOTE(dviroel): Service is up according to validations made on initial # checks self.share_rpcapi.share_server_migration_start( context, share_server, dest_host, writable, nondisruptive, preserve_snapshots, new_share_network_id) def share_server_migration_complete(self, context, share_server): """Invokes 2nd phase of share server migration.""" if share_server['status'] != constants.STATUS_SERVER_MIGRATING: msg = _("Share server %s is not migrating") % share_server['id'] LOG.error(msg) raise exception.InvalidShareServer(reason=msg) if (share_server['task_state'] != constants.TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE): msg = _("The first phase of migration has to finish to " "request the completion of server %s's " "migration.") % share_server['id'] LOG.error(msg) raise exception.InvalidShareServer(reason=msg) dest_share_server = self.share_server_migration_get_destination( context, share_server['id'], status=constants.STATUS_SERVER_MIGRATING_TO ) dest_host = share_utils.extract_host(dest_share_server['host']) utils.validate_service_host(context, dest_host) self.share_rpcapi.share_server_migration_complete( context, dest_share_server['host'], share_server, dest_share_server) return { 'destination_share_server_id': dest_share_server['id'] } def share_server_migration_cancel(self, context, share_server): """Attempts to cancel share server migration.""" if share_server['status'] != constants.STATUS_SERVER_MIGRATING: msg = _("Migration of share server %s cannot be cancelled because " "the provided share server is not being migrated." % (share_server['id'])) LOG.error(msg) raise exception.InvalidShareServer(reason=msg) if share_server['task_state'] in ( constants.TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE, constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS): dest_share_server = self.share_server_migration_get_destination( context, share_server['id'], status=constants.STATUS_SERVER_MIGRATING_TO ) dest_host = share_utils.extract_host(dest_share_server['host']) utils.validate_service_host(context, dest_host) self.share_rpcapi.share_server_migration_cancel( context, dest_share_server['host'], share_server, dest_share_server) else: msg = self._migration_validate_error_message( share_server, resource_type='share_server') if msg is None: msg = _("Migration of share server %s can be cancelled only " "after the driver already started the migration, or " "when the first phase of the migration gets " "completed.") % share_server['id'] LOG.error(msg) raise exception.InvalidShareServer(reason=msg) def share_server_migration_get_progress(self, context, src_share_server_id): """Retrieve migration progress for a given share server.""" try: share_server = self.db.share_server_get(context, src_share_server_id) except exception.ShareServerNotFound: msg = _('Share server %s was not found. We will search for a ' 'successful migration') % src_share_server_id LOG.debug(msg) # Search for a successful migration, raise an error if not found dest_share_server = self.share_server_migration_get_destination( context, src_share_server_id, status=constants.STATUS_ACTIVE ) return { 'total_progress': 100, 'destination_share_server_id': dest_share_server['id'], 'task_state': dest_share_server['task_state'], } # Source server still exists so it must be in 'server_migrating' status if share_server['status'] != constants.STATUS_SERVER_MIGRATING: msg = _("Migration progress of share server %s cannot be " "obtained. The provided share server is not being " "migrated.") % share_server['id'] LOG.error(msg) raise exception.InvalidShareServer(reason=msg) try: dest_share_server = self.share_server_migration_get_destination( context, share_server['id'], status=constants.STATUS_SERVER_MIGRATING_TO ) except Exception: msg = ("Migration progress of share server %s cannot be " "determined yet. Please retry the migration get " "progress operation.") % share_server['id'] LOG.info(msg) result = { 'destination_share_server_id': '', 'task_state': '' } result.update(self._migration_get_progress_state(share_server)) return result if (share_server['task_state'] == constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS): dest_host = share_utils.extract_host(dest_share_server['host']) utils.validate_service_host(context, dest_host) try: result = ( self.share_rpcapi.share_server_migration_get_progress( context, dest_share_server['host'], share_server, dest_share_server)) except Exception: msg = _("Failed to obtain migration progress of share " "server %s.") % share_server['id'] LOG.exception(msg) raise exception.ShareServerMigrationError(reason=msg) else: result = self._migration_get_progress_state(share_server) if not (result and result.get('total_progress') is not None): msg = self._migration_validate_error_message( share_server, resource_type='share_server') if msg is None: msg = _("Migration progress of share server %s cannot be " "obtained at this moment.") % share_server['id'] LOG.error(msg) raise exception.InvalidShareServer(reason=msg) result.update({ 'destination_share_server_id': dest_share_server['id'], 'task_state': dest_share_server['task_state'] }) return result def _share_network_update_initial_checks(self, context, share_network, new_security_service, current_security_service=None): api_common.check_share_network_is_active(share_network) if not current_security_service: # Since we are adding a new security service, we can't have one # of the same type already associated with this share network for attached_service in share_network['security_services']: if attached_service['type'] == new_security_service['type']: msg = _("Cannot add security service to share network. " "Security service with '%(ss_type)s' type already " "added to '%(sn_id)s' share network") % { 'ss_type': new_security_service['type'], 'sn_id': share_network['id'] } raise exception.InvalidSecurityService(reason=msg) else: # Validations needed only for update operation current_service_is_associated = ( self.db.share_network_security_service_association_get( context, share_network['id'], current_security_service['id'])) if not current_service_is_associated: msg = _("The specified current security service %(service)s " "is not associated to the share network %(network)s." ) % { 'service': current_security_service['id'], 'network': share_network['id'] } raise exception.InvalidSecurityService(reason=msg) if (current_security_service['type'] != new_security_service['type']): msg = _("A security service can only be replaced by one of " "the same type. The current security service type is " "'%(ss_type)s' and the new security service type is " "'%(new_ss_type)s'") % { 'ss_type': current_security_service['type'], 'new_ss_type': new_security_service['type'], 'sn_id': share_network['id'] } raise exception.InvalidSecurityService(reason=msg) share_servers = set() for subnet in share_network['share_network_subnets']: if subnet['share_servers']: share_servers.update(subnet['share_servers']) backend_hosts = set() if share_servers: if not share_network['security_service_update_support']: msg = _("Updating security services is not supported on this " "share network (%(sn_id)s) while it has shares. " "See the capability " "'security_service_update_support'.") % { "sn_id": share_network["id"] } raise exception.InvalidShareNetwork(reason=msg) # We can only handle "active" share servers for now for share_server in share_servers: if share_server['status'] != constants.STATUS_ACTIVE: msg = _('Some resources exported on share network ' '%(shar_net_id)s are not currently available.') % { 'shar_net_id': share_network['id'] } raise exception.InvalidShareNetwork(reason=msg) # Create a set of backend hosts backend_hosts.add(share_server['host']) for backend_host in backend_hosts: # We need an admin context to validate these hosts admin_ctx = manila_context.get_admin_context() # Make sure the host is in the list of available hosts utils.validate_service_host(admin_ctx, backend_host) shares_in_recycle_bin = ( self.db.share_get_all_soft_deleted_by_network( context, share_network['id'])) if shares_in_recycle_bin: msg = _("Some shares with share network %(sn_id)s have " "been soft deleted.") % {'sn_id': share_network['id']} raise exception.InvalidShareNetwork(reason=msg) shares = self.get_all( context, search_opts={'share_network_id': share_network['id']}) shares_not_available = [ share['id'] for share in shares if share['status'] != constants.STATUS_AVAILABLE] if shares_not_available: msg = _("Some shares exported on share network %(sn_id)s are " "not available: %(share_ids)s.") % { 'sn_id': share_network['id'], 'share_ids': shares_not_available, } raise exception.InvalidShareNetwork(reason=msg) shares_rules_not_available = [ share['id'] for share in shares if share['instance'][ 'access_rules_status'] != constants.STATUS_ACTIVE] if shares_rules_not_available: msg = _( "Either these shares or one of their replicas or " "migration copies exported on share network %(sn_id)s " "are not available: %(share_ids)s.") % { 'sn_id': share_network['id'], 'share_ids': shares_rules_not_available, } raise exception.InvalidShareNetwork(reason=msg) busy_shares = [] for share in shares: try: self._check_is_share_busy(share) except exception.ShareBusyException: busy_shares.append(share['id']) if busy_shares: msg = _("Some shares exported on share network %(sn_id)s " "are busy: %(share_ids)s.") % { 'sn_id': share_network['id'], 'share_ids': busy_shares, } raise exception.InvalidShareNetwork(reason=msg) return list(share_servers), list(backend_hosts) def get_security_service_update_key( self, operation, new_security_service_id, current_security_service_id=None): if current_security_service_id: return ('share_network_sec_service_update_' + current_security_service_id + '_' + new_security_service_id + '_' + operation) else: return ('share_network_sec_service_add_' + new_security_service_id + '_' + operation) @locked_security_service_update_operation def _security_service_update_validate_hosts( self, context, share_network, backend_hosts, share_servers, new_security_service_id=None, current_security_service_id=None): # create a key based on users request update_key = self.get_security_service_update_key( 'hosts_check', new_security_service_id, current_security_service_id=current_security_service_id) return self._do_update_validate_hosts( context, share_network['id'], backend_hosts, update_key, new_security_service_id=new_security_service_id, current_security_service_id=current_security_service_id) def _do_update_validate_hosts( self, context, share_network_id, backend_hosts, update_key, new_share_network_subnet=None, new_security_service_id=None, current_security_service_id=None): # check if there is an entry being processed. update_value = self.db.async_operation_data_get( context, share_network_id, update_key) if not update_value: # Create a new entry, send all asynchronous rpcs and return. hosts_to_validate = {} for host in backend_hosts: hosts_to_validate[host] = None self.db.async_operation_data_update( context, share_network_id, {update_key: json.dumps(hosts_to_validate)}) for host in backend_hosts: if new_share_network_subnet: (self.share_rpcapi. check_update_share_server_network_allocations( context, host, share_network_id, new_share_network_subnet)) else: (self.share_rpcapi. check_update_share_network_security_service( context, host, share_network_id, new_security_service_id, current_security_service_id=( current_security_service_id))) return None, hosts_to_validate else: # process current existing hosts and update them if needed. current_hosts = json.loads(update_value) hosts_to_include = ( set(backend_hosts).difference(set(current_hosts.keys()))) hosts_to_validate = {} for host in backend_hosts: hosts_to_validate[host] = current_hosts.get(host, None) # Check if there is any unsupported host. if any(hosts_to_validate[host] is False for host in backend_hosts): return False, hosts_to_validate # Update the list of hosts to be validated. if hosts_to_include: self.db.async_operation_data_update( context, share_network_id, {update_key: json.dumps(hosts_to_validate)}) for host in hosts_to_include: # send asynchronous check only for new backend hosts. if new_share_network_subnet: (self.share_rpcapi. check_update_share_server_network_allocations( context, host, share_network_id, new_share_network_subnet)) else: (self.share_rpcapi. check_update_share_network_security_service( context, host, share_network_id, new_security_service_id, current_security_service_id=( current_security_service_id))) return None, hosts_to_validate if all(hosts_to_validate[host] for host in backend_hosts): return True, hosts_to_validate return None, current_hosts def check_share_network_security_service_update( self, context, share_network, new_security_service, current_security_service=None, reset_operation=False): share_servers, backend_hosts = ( self._share_network_update_initial_checks( context, share_network, new_security_service, current_security_service=current_security_service)) if not backend_hosts: # There is no backend host to validate. Operation is supported. return { 'compatible': True, 'hosts_check_result': {}, } curr_sec_serv_id = ( current_security_service['id'] if current_security_service else None) key = self.get_security_service_update_key( 'hosts_check', new_security_service['id'], current_security_service_id=curr_sec_serv_id) if reset_operation: self.db.async_operation_data_delete(context, share_network['id'], key) try: compatible, hosts_info = ( self._security_service_update_validate_hosts( context, share_network, backend_hosts, share_servers, new_security_service_id=new_security_service['id'], current_security_service_id=curr_sec_serv_id)) except Exception as e: LOG.error(e) # Due to an internal error, we will delete the entry self.db.async_operation_data_delete( context, share_network['id'], key) msg = _( 'The share network %(share_net_id)s cannot be updated ' 'since at least one of its backend hosts do not support ' 'this operation.') % { 'share_net_id': share_network['id']} raise exception.InvalidShareNetwork(reason=msg) return { 'compatible': compatible, 'hosts_check_result': hosts_info } def update_share_network_security_service(self, context, share_network, new_security_service, current_security_service=None): share_servers, backend_hosts = ( self._share_network_update_initial_checks( context, share_network, new_security_service, current_security_service=current_security_service)) if not backend_hosts: # There is no backend host to validate or update. return curr_sec_serv_id = ( current_security_service['id'] if current_security_service else None) update_key = self.get_security_service_update_key( 'hosts_check', new_security_service['id'], current_security_service_id=curr_sec_serv_id) # check if there is an entry being processed at this moment update_value = self.db.async_operation_data_get( context, share_network['id'], update_key) if not update_value: msg = _( 'The share network %(share_net_id)s cannot start the update ' 'process since no check operation was found. Before starting ' 'the update operation, a "check" operation must be triggered ' 'to validate if all backend hosts support the provided ' 'configuration paramaters.') % { 'share_net_id': share_network['id'] } raise exception.InvalidShareNetwork(reason=msg) try: result, __ = self._security_service_update_validate_hosts( context, share_network, backend_hosts, share_servers, new_security_service_id=new_security_service['id'], current_security_service_id=curr_sec_serv_id) except Exception: # Due to an internal error, we will delete the entry self.db.async_operation_data_delete( context, share_network['id'], update_key) msg = _( 'The share network %(share_net_id)s cannot be updated ' 'since at least one of its backend hosts do not support ' 'this operation.') % { 'share_net_id': share_network['id']} raise exception.InvalidShareNetwork(reason=msg) if result is False: msg = _( 'The share network %(share_net_id)s cannot be updated ' 'since at least one of its backend hosts do not support ' 'this operation.') % { 'share_net_id': share_network['id']} raise exception.InvalidShareNetwork(reason=msg) elif result is None: msg = _( 'Not all of the validation has been completed yet. A ' 'validation check is in progress. This operation can be ' 'retried.') raise exception.InvalidShareNetwork(reason=msg) self.db.share_network_update( context, share_network['id'], {'status': constants.STATUS_NETWORK_CHANGE}) # NOTE(dviroel): We want to change the status for all share servers to # identify when all modifications are made, and update share network # status to 'active' again. share_servers_ids = [ss.id for ss in share_servers] self.db.share_servers_update( context, share_servers_ids, {'status': constants.STATUS_SERVER_NETWORK_CHANGE}) for backend_host in backend_hosts: self.share_rpcapi.update_share_network_security_service( context, backend_host, share_network['id'], new_security_service['id'], current_security_service_id=curr_sec_serv_id) # Erase db entry, since we won't need it anymore self.db.async_operation_data_delete( context, share_network['id'], update_key) LOG.info('Security service update has been started for share network ' '%(share_net_id)s.', {'share_net_id': share_network['id']}) @locked_share_server_update_allocations_operation def _share_server_update_allocations_validate_hosts( self, context, backend_hosts, update_key, share_network_id=None, neutron_net_id=None, neutron_subnet_id=None, availability_zone_id=None): new_share_network_subnet = { 'neutron_net_id': neutron_net_id, 'neutron_subnet_id': neutron_subnet_id, 'availability_zone_id': availability_zone_id, } return self._do_update_validate_hosts( context, share_network_id, backend_hosts, update_key, new_share_network_subnet=new_share_network_subnet) def get_share_server_update_allocations_key( self, share_network_id, availability_zone_id): return ('share_server_update_allocations_' + share_network_id + '_' + str(availability_zone_id) + '_' + 'hosts_check') def _share_server_update_allocations_initial_checks( self, context, share_network, share_servers): api_common.check_share_network_is_active(share_network) if not share_network['network_allocation_update_support']: msg = _("Updating network allocations is not supported on this " "share network (%(sn_id)s) while it has shares. " "See the capability 'network_allocation_update_support'." ) % {"sn_id": share_network["id"]} raise exception.InvalidShareNetwork(reason=msg) backend_hosts = set() for share_server in share_servers: share_server_id = share_server['id'] if share_server['status'] != constants.STATUS_ACTIVE: msg = _('The share server %(server)s in the specified ' 'availability zone subnet is not currently ' 'available.') % {'server': share_server_id} raise exception.InvalidShareNetwork(reason=msg) # We need an admin context to validate these hosts. admin_ctx = manila_context.get_admin_context() # Make sure the host is in the list of available hosts. utils.validate_service_host(admin_ctx, share_server['host']) # Create a set of backend hosts. backend_hosts.add(share_server['host']) shares = self.db.share_get_all_by_share_server( context, share_server_id) shares_not_available = [ share['id'] for share in shares if share['status'] != constants.STATUS_AVAILABLE] if shares_not_available: msg = _("The share server (%(server_id)s) in the specified " "availability zone subnet has some shares that are " "not available: " "%(share_ids)s.") % { 'server_id': share_server_id, 'share_ids': shares_not_available, } raise exception.InvalidShareNetwork(reason=msg) shares_rules_not_available = [ share['id'] for share in shares if share['instance'][ 'access_rules_status'] != constants.STATUS_ACTIVE] if shares_rules_not_available: msg = _("The share server (%(server_id)s) in the specified " "availability zone subnet has either these shares or " "one of their replicas or migration copies that are " "not available: %(share_ids)s.") % { 'server_id': share_server_id, 'share_ids': shares_rules_not_available, } raise exception.InvalidShareNetwork(reason=msg) busy_shares = [] for share in shares: try: self._check_is_share_busy(share) except exception.ShareBusyException: busy_shares.append(share['id']) if busy_shares: msg = _("The share server (%(server_id)s) in the specified " "availability zone subnet has some shares that are " "busy as part of an active task: " "%(share_ids)s.") % { 'server_id': share_server_id, 'share_ids': busy_shares, } raise exception.InvalidShareNetwork(reason=msg) return backend_hosts def check_update_share_server_network_allocations( self, context, share_network, new_share_network_subnet, reset_operation): backend_hosts = self._share_server_update_allocations_initial_checks( context, share_network, new_share_network_subnet['share_servers']) update_key = self.get_share_server_update_allocations_key( share_network['id'], new_share_network_subnet['availability_zone_id']) if reset_operation: self.db.async_operation_data_delete(context, share_network['id'], update_key) try: compatible, hosts_info = ( self._share_server_update_allocations_validate_hosts( context, backend_hosts, update_key, share_network_id=share_network['id'], neutron_net_id=( new_share_network_subnet.get('neutron_net_id')), neutron_subnet_id=( new_share_network_subnet.get('neutron_subnet_id')), availability_zone_id=new_share_network_subnet.get( "availability_zone_id"))) except Exception as e: LOG.exception(e) # Due to an internal error, we will delete the entry. self.db.async_operation_data_delete( context, share_network['id'], update_key) msg = _( "The server's allocations cannot be updated on availability " "zone %(zone_id)s of the share network %(share_net_id)s, " "since at least one of its backend hosts do not support this " "operation.") % { 'share_net_id': share_network['id'], 'zone_id': new_share_network_subnet['availability_zone_id']} raise exception.InvalidShareNetwork(reason=msg) return { 'compatible': compatible, 'hosts_check_result': hosts_info } def update_share_server_network_allocations( self, context, share_network, new_share_network_subnet): backend_hosts = self._share_server_update_allocations_initial_checks( context, share_network, new_share_network_subnet['share_servers']) update_key = self.get_share_server_update_allocations_key( share_network['id'], new_share_network_subnet['availability_zone_id']) # check if there is an entry being processed at this moment. update_value = self.db.async_operation_data_get( context, share_network['id'], update_key) if not update_value: msg = _( 'The share network %(share_net_id)s cannot start the update ' 'process since no check operation was found. Before starting ' 'the update operation, a "check" operation must be triggered ' 'to validate if all backend hosts support the provided ' 'configuration paramaters.') % { 'share_net_id': share_network['id'] } raise exception.InvalidShareNetwork(reason=msg) subnet_info = { 'availability_zone_id': new_share_network_subnet.get("availability_zone_id"), 'neutron_net_id': new_share_network_subnet.get('neutron_net_id'), 'neutron_subnet_id': new_share_network_subnet.get('neutron_subnet_id'), } try: result, __ = self._share_server_update_allocations_validate_hosts( context, backend_hosts, update_key, share_network_id=share_network['id'], neutron_net_id=( new_share_network_subnet.get('neutron_net_id')), neutron_subnet_id=( new_share_network_subnet.get('neutron_subnet_id')), availability_zone_id=new_share_network_subnet.get( "availability_zone_id")) except Exception: # Due to an internal error, we will delete the entry. self.db.async_operation_data_delete( context, share_network['id'], update_key) msg = _( "The server's allocations cannot be updated on availability " "zone %(zone_id)s of the share network %(share_net_id)s, " "since an internal error occurred." "operation.") % { 'share_net_id': share_network['id'], 'zone_id': subnet_info['availability_zone_id'] } raise exception.InvalidShareNetwork(reason=msg) if result is False: msg = _( "The server's allocations cannot be updated on availability " "zone %(zone_id)s of the share network %(share_net_id)s, " "since at least one of its backend hosts do not support this " "operation.") % { 'share_net_id': share_network['id'], 'zone_id': subnet_info['availability_zone_id'] } raise exception.InvalidShareNetwork(reason=msg) elif result is None: msg = _( 'Not all of the validation has been completed yet. A ' 'validation check is in progress. This operation can be ' 'retried.') raise exception.InvalidShareNetwork(reason=msg) # change db to start the update. self.db.share_network_update( context, share_network['id'], {'status': constants.STATUS_NETWORK_CHANGE}) share_servers_ids = [ss['id'] for ss in new_share_network_subnet['share_servers']] self.db.share_servers_update( context, share_servers_ids, {'status': constants.STATUS_SERVER_NETWORK_CHANGE}) # create the new subnet. new_share_network_subnet_db = self.db.share_network_subnet_create( context, new_share_network_subnet) # triggering the actual update. for backend_host in backend_hosts: self.share_rpcapi.update_share_server_network_allocations( context, backend_host, share_network['id'], new_share_network_subnet_db['id']) # Erase db entry, since we won't need it anymore. self.db.async_operation_data_delete( context, share_network['id'], update_key) LOG.info('Share servers allocations update have been started for ' 'share network %(share_net_id)s on its availability zone ' '%(az_id)s with new subnet %(subnet_id)s.', { 'share_net_id': share_network['id'], 'az_id': new_share_network_subnet['availability_zone_id'], 'subnet_id': new_share_network_subnet_db['id'], }) return new_share_network_subnet_db def create_share_backup(self, context, share, backup): share_id = share['id'] self._check_is_share_busy(share) if share['status'] != constants.STATUS_AVAILABLE: msg_args = {'share_id': share_id, 'state': share['status']} msg = (_("Share %(share_id)s is in '%(state)s' state, but it must " "be in 'available' state to create a backup.") % msg_args) raise exception.InvalidShare(message=msg) snapshots = self.db.share_snapshot_get_all_for_share(context, share_id) if snapshots: msg = _("Cannot backup share %s while it has snapshots.") raise exception.InvalidShare(message=msg % share_id) if share.has_replicas: msg = _("Cannot backup share %s while it has replicas.") raise exception.InvalidShare(message=msg % share_id) # Reserve a quota before setting share status and backup status try: reservations = QUOTAS.reserve( context, backups=1, backup_gigabytes=share['size']) except exception.OverQuota as e: overs = e.kwargs['overs'] usages = e.kwargs['usages'] quotas = e.kwargs['quotas'] def _consumed(resource_name): return (usages[resource_name]['reserved'] + usages[resource_name]['in_use']) for over in overs: if 'backup_gigabytes' in over: msg = ("Quota exceeded for %(s_pid)s, tried to create " "%(s_size)sG backup, but (%(d_consumed)dG of " "%(d_quota)dG already consumed.)") LOG.warning(msg, {'s_pid': context.project_id, 's_size': share['size'], 'd_consumed': _consumed(over), 'd_quota': quotas[over]}) raise exception.ShareBackupSizeExceedsAvailableQuota( requested=share['size'], consumed=_consumed('backup_gigabytes'), quota=quotas['backup_gigabytes']) elif 'backups' in over: msg = ("Quota exceeded for %(s_pid)s, tried to create " "backup, but (%(d_consumed)d of %(d_quota)d " "backups already consumed.)") LOG.warning(msg, {'s_pid': context.project_id, 'd_consumed': _consumed(over), 'd_quota': quotas[over]}) raise exception.BackupLimitExceeded( allowed=quotas[over]) # Validate right backup type is provided backup_type = backup.get('backup_options') and backup.get( 'backup_options').get(constants.BACKUP_TYPE) filters = { 'status': constants.STATUS_AVAILABLE, 'share_id': share_id, 'topic': CONF.share_topic, } backups = self.db.share_backups_get_all(context, filters) if backup_type and len(backups) > 0: previous_backup_type = backups[0][constants.BACKUP_TYPE] backup_options = backup.get('backup_options') current_backup_type = backup_options.get(constants.BACKUP_TYPE) if previous_backup_type != current_backup_type: err_msg = _("Share '%(share)s' has existing backups with" " backup_type: '%(correct_backup_type)s'. You must" " delete these backups to schedule a backup with" " a different backup_type, or re-use the same" " backup_type.") msg_args = { 'share': share.get('display_name'), 'correct_backup_type': previous_backup_type, } raise exc.HTTPBadRequest(explanation=err_msg % msg_args) backup_ref = {} try: backup_ref = self.db.share_backup_create( context, share['id'], { 'user_id': context.user_id, 'project_id': context.project_id, 'progress': '0', 'restore_progress': '0', 'status': constants.STATUS_CREATING, 'display_description': backup.get('description'), 'display_name': backup.get('name'), 'size': share['size'], 'availability_zone': share['instance'] ['availability_zone'], 'backup_type': backup_type, } ) QUOTAS.commit(context, reservations) except Exception: with excutils.save_and_reraise_exception(): QUOTAS.rollback(context, reservations) self.db.share_update( context, share_id, {'status': constants.STATUS_BACKUP_CREATING}) backup_ref['backup_options'] = backup.get('backup_options', {}) backup_values = {} if backup_ref['backup_options']: topic = CONF.share_topic backup_ref['host'] = share_utils.extract_host(share['host']) backup_values.update({'host': backup_ref['host']}) else: topic = CONF.data_topic backup_values.update({'topic': topic}) self.db.share_backup_update(context, backup_ref['id'], backup_values) if topic == CONF.share_topic: self.share_rpcapi.create_backup(context, backup_ref) elif topic == CONF.data_topic: data_rpc = data_rpcapi.DataAPI() data_rpc.create_backup(context, backup_ref) return backup_ref def delete_share_backup(self, context, backup): """Make the RPC call to delete a share backup. :param context: request context :param backup: the model of backup that is retrieved from DB. :raises: InvalidBackup :raises: BackupDriverException :raises: ServiceNotFound """ if backup.status not in [constants.STATUS_AVAILABLE, constants.STATUS_ERROR]: msg = (_('Backup %s status must be available or error.') % backup['id']) raise exception.InvalidBackup(reason=msg) self.db.share_backup_update( context, backup['id'], {'status': constants.STATUS_DELETING}) if backup['topic'] == CONF.share_topic: self.share_rpcapi.delete_backup(context, backup) elif backup['topic'] == CONF.data_topic: data_rpc = data_rpcapi.DataAPI() data_rpc.delete_backup(context, backup) def restore_share_backup(self, context, backup, target_share_id=None): """Make the RPC call to restore a backup.""" backup_id = backup['id'] if backup['status'] != constants.STATUS_AVAILABLE: msg = (_('Backup %s status must be available.') % backup['id']) raise exception.InvalidBackup(reason=msg) if target_share_id: share = self.get(context, target_share_id) else: share = self.get(context, backup['share_id']) share_id = share['id'] if share['status'] != constants.STATUS_AVAILABLE: msg = _('Share to be restored to must be available.') raise exception.InvalidShare(reason=msg) backup_size = backup['size'] LOG.debug('Checking backup size %(backup_size)s against share size ' '%(share_size)s.', {'backup_size': backup_size, 'share_size': share['size']}) if backup_size > share['size']: msg = (_('Share size %(share_size)d is too small to restore ' 'backup of size %(size)d.') % {'share_size': share['size'], 'size': backup_size}) raise exception.InvalidShare(reason=msg) LOG.info("Overwriting share %(share_id)s with restore of " "backup %(backup_id)s.", {'share_id': share_id, 'backup_id': backup_id}) self.db.share_backup_update( context, backup_id, {'status': constants.STATUS_RESTORING}) self.db.share_update( context, share_id, {'status': constants.STATUS_BACKUP_RESTORING, 'source_backup_id': backup_id}) if backup['topic'] == CONF.share_topic: self.share_rpcapi.restore_backup(context, backup, share_id) elif backup['topic'] == CONF.data_topic: data_rpc = data_rpcapi.DataAPI() data_rpc.restore_backup(context, backup, share_id) restore_info = {'backup_id': backup_id, 'share_id': share_id} return restore_info def update_share_backup(self, context, backup, fields): return self.db.share_backup_update(context, backup['id'], fields) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/configuration.py0000664000175000017500000000472700000000000020654 0ustar00zuulzuul00000000000000# # Copyright (c) 2012 Rackspace Hosting # Copyright (c) 2013 NetApp # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Configuration support for all drivers. This module allows support for setting configurations either from default or from a particular CONF group, to be able to set multiple configurations for a given set of values. For instance, two generic configurations can be set by naming them in groups as [generic1] share_backend_name=generic-backend-1 ... [generic2] share_backend_name=generic-backend-2 ... And the configuration group name will be passed in so that all calls to configuration.volume_group within that instance will be mapped to the proper named group. This class also ensures the implementation's configuration is grafted into the option group. This is due to the way cfg works. All cfg options must be defined and registered in the group in which they are used. """ from oslo_config import cfg CONF = cfg.CONF class Configuration(object): def __init__(self, share_opts, config_group=None): """Graft config values into config group. This takes care of grafting the implementation's config values into the config group. """ self.config_group = config_group # set the local conf so that __call__'s know what to use if self.config_group: self._ensure_config_values(share_opts) self.local_conf = CONF._get(self.config_group) else: self.local_conf = CONF def _ensure_config_values(self, share_opts): CONF.register_opts(share_opts, group=self.config_group) def append_config_values(self, share_opts): self._ensure_config_values(share_opts) def safe_get(self, value): try: return self.__getattr__(value) except cfg.NoSuchOptError: return None def __getattr__(self, value): return getattr(self.local_conf, value) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/driver.py0000664000175000017500000047342700000000000017307 0ustar00zuulzuul00000000000000# Copyright 2012 NetApp # Copyright 2015 Mirantis inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Drivers for shares. """ import time from oslo_config import cfg from oslo_config import types from oslo_log import log from manila.common import constants from manila import exception from manila.i18n import _ from manila import network from manila import utils LOG = log.getLogger(__name__) share_opts = [ # NOTE(rushiagr): Reasonable to define this option at only one place. cfg.IntOpt( 'num_shell_tries', default=3, help='Number of times to attempt to run flakey shell commands.'), cfg.IntOpt( 'reserved_share_percentage', default=0, help="The percentage of backend capacity reserved. Used for shares " "which are not created from the snapshot."), cfg.IntOpt( 'reserved_share_from_snapshot_percentage', default=0, help="The percentage of backend capacity reserved. Used for shares " "created from the snapshot. On some platforms, shares can only " "be created from the snapshot on the host where snapshot was " "taken, so we can set a lower value in this option compared to " "reserved_share_percentage, and allow to create shares from the " "snapshot on the same host up to a higher threshold."), cfg.IntOpt( 'reserved_share_extend_percentage', default=0, help="The percentage of backend capacity reserved for share extend " "operation. When existing limit of 'reserved_share_percentage' " "is hit, we do not want user to create a new share but existing " "shares can be extended based on value of this parameter."), cfg.StrOpt( 'share_backend_name', help='The backend name for a given driver implementation.'), cfg.StrOpt( 'network_config_group', help="Name of the configuration group in the Manila conf file " "to look for network config options." "If not set, the share backend's config group will be used." "If an option is not found within provided group, then " "'DEFAULT' group will be used for search of option."), cfg.BoolOpt( 'driver_handles_share_servers', help="There are two possible approaches for share drivers in Manila. " "First is when share driver is able to handle share-servers and " "second when not. Drivers can support either both or only one " "of these approaches. So, set this opt to True if share driver " "is able to handle share servers and it is desired mode else set " "False. It is set to None by default to make this choice " "intentional."), cfg.FloatOpt( 'max_over_subscription_ratio', default=20.0, min=1.0, help='Float representation of the over subscription ratio ' 'when thin provisioning is involved. Default ratio is ' '20.0, meaning provisioned capacity can be 20 times ' 'the total physical capacity. If the ratio is 10.5, it ' 'means provisioned capacity can be 10.5 times the ' 'total physical capacity. A ratio of 1.0 means ' 'provisioned capacity cannot exceed the total physical ' 'capacity. A ratio lower than 1.0 is invalid.'), cfg.ListOpt( 'migration_ignore_files', default=['lost+found'], help="List of files and folders to be ignored when migrating shares. " "Items should be names (not including any path)."), cfg.StrOpt( 'share_mount_template', default='mount -vt %(proto)s %(options)s %(export)s %(path)s', help="The template for mounting shares for this backend. Must specify " "the executable with all necessary parameters for the protocol " "supported. 'proto' template element may not be required if " "included in the command. 'export' and 'path' template elements " "are required. It is advisable to separate different commands " "per backend."), cfg.StrOpt( 'share_unmount_template', default='umount -v %(path)s', help="The template for unmounting shares for this backend. Must " "specify the executable with all necessary parameters for the " "protocol supported. 'path' template element is required. It is " "advisable to separate different commands per backend."), cfg.Opt( 'protocol_access_mapping', type=types.Dict(types.List(types.String(), bounds=True)), default={ 'ip': ['nfs'], 'user': ['cifs'], }, help="Protocol access mapping for this backend. Should be a " "dictionary comprised of " "{'access_type1': ['share_proto1', 'share_proto2']," " 'access_type2': ['share_proto2', 'share_proto3']}."), cfg.StrOpt( "admin_network_config_group", help="If share driver requires to setup admin network for share, then " "define network plugin config options in some separate config " "group and set its name here. Used only with another " "option 'driver_handles_share_servers' set to 'True'."), # Replication option/s cfg.StrOpt( "replication_domain", help="A string specifying the replication domain that the backend " "belongs to. This option needs to be specified the same in the " "configuration sections of all backends that support " "replication between each other. If this option is not " "specified in the group, it means that replication is not " "enabled on the backend."), cfg.StrOpt('backend_availability_zone', default=None, help='Availability zone for this share backend. If not set, ' 'the ``storage_availability_zone`` option from the ' '``[DEFAULT]`` section is used.'), cfg.StrOpt('filter_function', help='String representation for an equation that will be ' 'used to filter hosts.'), cfg.StrOpt('goodness_function', help='String representation for an equation that will be ' 'used to determine the goodness of a host.'), cfg.IntOpt('max_shares_per_share_server', default=-1, help="Maximum number of share instances created in a share " "server."), cfg.IntOpt('max_share_server_size', default=-1, help="Maximum sum of gigabytes a share server can have " "considering all its share instances and snapshots.") ] ssh_opts = [ cfg.IntOpt( 'ssh_conn_timeout', default=60, help='Backend server SSH connection timeout.'), cfg.IntOpt( 'ssh_min_pool_conn', default=1, help='Minimum number of connections in the SSH pool.'), cfg.IntOpt( 'ssh_max_pool_conn', default=10, help='Maximum number of connections in the SSH pool.'), ] ganesha_opts = [ cfg.StrOpt('ganesha_config_dir', default='/etc/ganesha', help='Directory where Ganesha config files are stored.'), cfg.StrOpt('ganesha_config_path', default='$ganesha_config_dir/ganesha.conf', help='Path to main Ganesha config file.'), cfg.StrOpt('ganesha_service_name', default='ganesha.nfsd', help='Name of the ganesha nfs service.'), cfg.StrOpt('ganesha_db_path', default='$state_path/manila-ganesha.db', help='Location of Ganesha database file. ' '(Ganesha module only.)'), cfg.StrOpt('ganesha_export_dir', default='$ganesha_config_dir/export.d', help='Path to directory containing Ganesha export ' 'configuration. (Ganesha module only.)'), cfg.StrOpt('ganesha_export_template_dir', default='/etc/manila/ganesha-export-templ.d', help='Path to directory containing Ganesha export ' 'block templates. (Ganesha module only.)'), cfg.BoolOpt('ganesha_rados_store_enable', default=False, help='Persist Ganesha exports and export counter ' 'in Ceph RADOS objects, highly available storage.'), cfg.StrOpt('ganesha_rados_store_pool_name', help='Name of the Ceph RADOS pool to store Ganesha exports ' 'and export counter.'), cfg.StrOpt('ganesha_rados_export_counter', default='ganesha-export-counter', help='Name of the Ceph RADOS object used as the Ganesha ' 'export counter.'), cfg.StrOpt('ganesha_rados_export_index', default='ganesha-export-index', help='Name of the Ceph RADOS object used to store a list ' 'of the export RADOS object URLS.'), ] CONF = cfg.CONF CONF.register_opts(share_opts) CONF.register_opts(ssh_opts) CONF.register_opts(ganesha_opts) class ExecuteMixin(object): """Provides an executable functionality to a driver class.""" def init_execute_mixin(self, *args, **kwargs): if self.configuration: self.configuration.append_config_values(ssh_opts) self.set_execute(kwargs.pop('execute', utils.execute)) def set_execute(self, execute): self._execute = execute def _try_execute(self, *command, **kwargs): # NOTE(vish): Volume commands can partially fail due to timing, but # running them a second time on failure will usually # recover nicely. tries = 0 while True: try: self._execute(*command, **kwargs) return True except exception.ProcessExecutionError: tries += 1 if tries >= self.configuration.num_shell_tries: raise LOG.exception("Recovering from a failed execute. " "Try number %s", tries) time.sleep(tries ** 2) class GaneshaMixin(object): """Augment derived classes with Ganesha configuration.""" def init_ganesha_mixin(self, *args, **kwargs): if self.configuration: self.configuration.append_config_values(ganesha_opts) class ShareDriver(object): """Class defines interface of NAS driver.""" def __init__(self, driver_handles_share_servers, *args, **kwargs): """Implements base functionality for share drivers. :param driver_handles_share_servers: expected boolean value or tuple/list/set of boolean values. There are two possible approaches for share drivers in Manila. First is when share driver is able to handle share-servers and second when not. Drivers can support either both (indicated by a tuple/set/list with (True, False)) or only one of these approaches. So, it is allowed to be 'True' when share driver does support handling of share servers and allowed to be 'False' when it does support usage of unhandled share-servers that are not tracked by Manila. Share drivers are allowed to work only in one of two possible driver modes, that is why only one should be chosen. :param config_opts: tuple, list or set of config option lists that should be registered in driver's configuration right after this attribute is created. Useful for usage with mixin classes. """ super(ShareDriver, self).__init__() self.configuration = kwargs.get('configuration', None) self.initialized = False self._stats = {} self.ip_versions = None self.ipv6_implemented = False # Indicates whether a driver supports update of security services for # in-use share networks. This property will be saved in every new share # server. self.security_service_update_support = False # Indicates whether a driver supports adding subnet with its # allocations to an in-use share network availability zone. This # property will be saved in every new share server. self.restore_to_target_support = False # Indicates whether a driver supports out of place restores # to a share other then the source of a given backup. self.network_allocation_update_support = False self.dhss_mandatory_security_service_association = {} self.share_replicas_migration_support = False self.encryption_support = None self.pools = [] if self.configuration: self.configuration.append_config_values(share_opts) network_config_group = (self.configuration.network_config_group or self.configuration.config_group) admin_network_config_group = ( self.configuration.admin_network_config_group) else: network_config_group = None admin_network_config_group = ( CONF.admin_network_config_group) self._verify_share_server_handling(driver_handles_share_servers) if self.driver_handles_share_servers: # Enable common network self.network_api = network.API( config_group_name=network_config_group) # Enable admin network if admin_network_config_group: self._admin_network_api = network.API( config_group_name=admin_network_config_group, label='admin') for config_opt_set in kwargs.get('config_opts', []): self.configuration.append_config_values(config_opt_set) if hasattr(self, 'init_execute_mixin'): # Instance with 'ExecuteMixin' # pylint: disable=no-member self.init_execute_mixin(*args, **kwargs) if hasattr(self, 'init_ganesha_mixin'): # Instance with 'GaneshaMixin' # pylint: disable=no-member self.init_ganesha_mixin(*args, **kwargs) @property def admin_network_api(self): if hasattr(self, '_admin_network_api'): return self._admin_network_api @property def driver_handles_share_servers(self): if self.configuration: return self.configuration.safe_get('driver_handles_share_servers') return CONF.driver_handles_share_servers @property def replication_domain(self): if self.configuration: return self.configuration.safe_get('replication_domain') return CONF.replication_domain @property def max_shares_per_share_server(self): if self.configuration: return self.configuration.safe_get( 'max_shares_per_share_server') or -1 return CONF.max_shares_per_share_server @property def max_share_server_size(self): if self.configuration: return self.configuration.safe_get('max_share_server_size') or -1 return CONF.max_share_server_size def _verify_share_server_handling(self, driver_handles_share_servers): """Verifies driver_handles_share_servers and given configuration.""" if not isinstance(self.driver_handles_share_servers, bool): raise exception.ManilaException( "Config opt 'driver_handles_share_servers' has improper " "value - '%s'. Please define it as boolean." % self.driver_handles_share_servers) elif isinstance(driver_handles_share_servers, bool): driver_handles_share_servers = [driver_handles_share_servers] elif not isinstance(driver_handles_share_servers, (tuple, list, set)): raise exception.ManilaException( "Improper data provided for 'driver_handles_share_servers' - " "%s" % driver_handles_share_servers) if any(not isinstance(v, bool) for v in driver_handles_share_servers): raise exception.ManilaException( "Provided wrong data: %s" % driver_handles_share_servers) if (self.driver_handles_share_servers not in driver_handles_share_servers): raise exception.ManilaException( "Driver does not support mode 'driver_handles_share_servers=" "%(actual)s'. It can be used only with value '%(allowed)s'." % {'actual': self.driver_handles_share_servers, 'allowed': driver_handles_share_servers}) def migration_check_compatibility( self, context, source_share, destination_share, share_server=None, destination_share_server=None): """Checks destination compatibility for migration of a given share. .. note:: Is called to test compatibility with destination backend. Driver should check if it is compatible with destination backend so driver-assisted migration can proceed. :param context: The 'context.RequestContext' object for the request. :param source_share: Reference to the share to be migrated. :param destination_share: Reference to the share model to be used by migrated share. :param share_server: Share server model or None. :param destination_share_server: Destination Share server model or None. :return: A dictionary containing values indicating if destination backend is compatible, if share can remain writable during migration, if it can preserve all file metadata and if it can perform migration of given share non-disruptively. Example:: { 'compatible': True, 'writable': True, 'preserve_metadata': True, 'nondisruptive': True, 'preserve_snapshots': True, } """ return { 'compatible': False, 'writable': False, 'preserve_metadata': False, 'nondisruptive': False, 'preserve_snapshots': False, } def migration_start( self, context, source_share, destination_share, source_snapshots, snapshot_mappings, share_server=None, destination_share_server=None): """Starts migration of a given share to another host. .. note:: Is called in source share's backend to start migration. Driver should implement this method if willing to perform migration in a driver-assisted way, useful for when source share's backend driver is compatible with destination backend driver. This method should start the migration procedure in the backend and end. Following steps should be done in 'migration_continue'. :param context: The 'context.RequestContext' object for the request. :param source_share: Reference to the original share model. :param destination_share: Reference to the share model to be used by migrated share. :param source_snapshots: List of snapshots owned by the source share. :param snapshot_mappings: Mapping of source snapshot IDs to destination snapshot models. :param share_server: Share server model or None. :param destination_share_server: Destination Share server model or None. """ raise NotImplementedError() def migration_continue( self, context, source_share, destination_share, source_snapshots, snapshot_mappings, share_server=None, destination_share_server=None): """Continues migration of a given share to another host. .. note:: Is called in source share's backend to continue migration. Driver should implement this method to continue monitor the migration progress in storage and perform following steps until 1st phase is completed. :param context: The 'context.RequestContext' object for the request. :param source_share: Reference to the original share model. :param destination_share: Reference to the share model to be used by migrated share. :param source_snapshots: List of snapshots owned by the source share. :param snapshot_mappings: Mapping of source snapshot IDs to destination snapshot models. :param share_server: Share server model or None. :param destination_share_server: Destination Share server model or None. :return: Boolean value to indicate if 1st phase is finished. """ raise NotImplementedError() def migration_complete( self, context, source_share, destination_share, source_snapshots, snapshot_mappings, share_server=None, destination_share_server=None): """Completes migration of a given share to another host. .. note:: Is called in source share's backend to complete migration. If driver is implementing 2-phase migration, this method should perform the disruptive tasks related to the 2nd phase of migration, thus completing it. Driver should also delete all original share data from source backend. :param context: The 'context.RequestContext' object for the request. :param source_share: Reference to the original share model. :param destination_share: Reference to the share model to be used by migrated share. :param source_snapshots: List of snapshots owned by the source share. :param snapshot_mappings: Mapping of source snapshot IDs to destination snapshot models. :param share_server: Share server model or None. :param destination_share_server: Destination Share server model or None. :return: If the migration changes the share export locations, snapshot provider locations or snapshot export locations, this method should return a dictionary with the relevant info. In such case, a dictionary containing a list of export locations and a list of model updates for each snapshot indexed by their IDs. Example:: { 'export_locations': [ { 'path': '1.2.3.4:/foo', 'metadata': {}, 'is_admin_only': False }, { 'path': '5.6.7.8:/foo', 'metadata': {}, 'is_admin_only': True }, ], 'snapshot_updates': { 'bc4e3b28-0832-4168-b688-67fdc3e9d408': { 'provider_location': '/snapshots/foo/bar_1', 'export_locations': [ { 'path': '1.2.3.4:/snapshots/foo/bar_1', 'is_admin_only': False, }, { 'path': '5.6.7.8:/snapshots/foo/bar_1', 'is_admin_only': True, }, ], }, '2e62b7ea-4e30-445f-bc05-fd523ca62941': { 'provider_location': '/snapshots/foo/bar_2', 'export_locations': [ { 'path': '1.2.3.4:/snapshots/foo/bar_2', 'is_admin_only': False, }, { 'path': '5.6.7.8:/snapshots/foo/bar_2', 'is_admin_only': True, }, ], }, }, } """ raise NotImplementedError() def migration_cancel( self, context, source_share, destination_share, source_snapshots, snapshot_mappings, share_server=None, destination_share_server=None): """Cancels migration of a given share to another host. .. note:: Is called in source share's backend to cancel migration. If possible, driver can implement a way to cancel an in-progress migration. :param context: The 'context.RequestContext' object for the request. :param source_share: Reference to the original share model. :param destination_share: Reference to the share model to be used by migrated share. :param source_snapshots: List of snapshots owned by the source share. :param snapshot_mappings: Mapping of source snapshot IDs to destination snapshot models. :param share_server: Share server model or None. :param destination_share_server: Destination Share server model or None. """ raise NotImplementedError() def transfer_accept(self, context, share, new_user, new_project, access_rules=None, share_server=None): """Backend update project and user info if stored on the backend. :param context: The 'context.RequestContext' object for the request. :param share: Share instance model. :param access_rules: A list of access rules for given share. :param new_user: the share will be updated with the new user id . :param new_project: the share will be updated with the new project id. :param share_server: share server for given share. """ pass def migration_get_progress( self, context, source_share, destination_share, source_snapshots, snapshot_mappings, share_server=None, destination_share_server=None): """Obtains progress of migration of a given share to another host. .. note:: Is called in source share's backend to obtain migration progress. If possible, driver can implement a way to return migration progress information. :param context: The 'context.RequestContext' object for the request. :param source_share: Reference to the original share model. :param destination_share: Reference to the share model to be used by migrated share. :param source_snapshots: List of snapshots owned by the source share. :param snapshot_mappings: Mapping of source snapshot IDs to destination snapshot models. :param share_server: Share server model or None. :param destination_share_server: Destination Share server model or None. :return: A dictionary with at least 'total_progress' field containing the percentage value. """ raise NotImplementedError() def connection_get_info(self, context, share, share_server=None): """Is called to provide necessary generic migration logic. :param context: The 'context.RequestContext' object for the request. :param share: Reference to the share being migrated. :param share_server: Share server model or None. :return: A dictionary with migration information. """ mount_template = self._get_mount_command(context, share, share_server) unmount_template = self._get_unmount_command(context, share, share_server) access_mapping = self._get_access_mapping(context, share, share_server) info = { 'mount': mount_template, 'unmount': unmount_template, 'access_mapping': access_mapping, } LOG.debug("Migration info obtained for share %(share_id)s: %(info)s.", {'share_id': share['id'], 'info': str(info)}) return info def _get_access_mapping(self, context, share, share_server): mapping = self.configuration.safe_get('protocol_access_mapping') or {} result = {} share_proto = share['share_proto'].lower() for access_type, protocols in mapping.items(): if share_proto in [y.lower() for y in protocols]: result[access_type] = result.get(access_type, []) result[access_type].append(share_proto) return result def _get_mount_command(self, context, share_instance, share_server=None): """Is called to delegate mounting share logic.""" mount_template = self.configuration.safe_get('share_mount_template') mount_export = self._get_mount_export(share_instance, share_server) format_template = { 'proto': share_instance['share_proto'].lower(), 'export': mount_export, 'path': '%(path)s', 'options': '%(options)s', } return mount_template % format_template def _get_mount_export(self, share_instance, share_server=None): # NOTE(ganso): If drivers want to override the export_location IP, # they can do so using this configuration. This method can also be # overridden if necessary. path = next((x['path'] for x in share_instance['export_locations'] if x['is_admin_only']), None) if not path: path = share_instance['export_locations'][0]['path'] return path def _get_unmount_command(self, context, share_instance, share_server=None): return self.configuration.safe_get('share_unmount_template') def create_share(self, context, share, share_server=None): """Is called to create share.""" raise NotImplementedError() def create_share_from_snapshot(self, context, share, snapshot, share_server=None, parent_share=None): """Is called to create share from snapshot. Creating a share from snapshot can take longer than a simple clone operation if data copy is required from one host to another. For this reason driver will be able complete this creation asynchronously, by providing a 'creating_from_snapshot' status in the model update. When answering asynchronously, drivers must implement the call 'get_share_status' in order to provide updates for shares with 'creating_from_snapshot' status. It is expected that the driver returns a model update to the share manager that contains: share status and a list of export_locations. A list of 'export_locations' is mandatory only for share in 'available' status. The current supported status are 'available' and 'creating_from_snapshot'. :param context: Current context :param share: Share instance model with share data. :param snapshot: Snapshot instance model . :param share_server: Share server model or None. :param parent_share: Share model from parent snapshot with share data and share server model. :returns: a dictionary of updates containing current share status and its export_location (if available). Example:: { 'status': 'available', 'export_locations': [{...}, {...}], } :raises: ShareBackendException. A ShareBackendException in this method will set the instance to 'error' and the operation will end. """ raise NotImplementedError() def create_snapshot(self, context, snapshot, share_server=None): """Is called to create snapshot. :param context: Current context :param snapshot: Snapshot model. Share model could be retrieved through snapshot['share']. :param share_server: Share server model or None. :return: None or a dictionary with key 'export_locations' containing a list of export locations, if snapshots can be mounted. """ raise NotImplementedError() def delete_share(self, context, share, share_server=None): """Is called to remove share.""" raise NotImplementedError() def delete_snapshot(self, context, snapshot, share_server=None): """Is called to remove snapshot. :param context: Current context :param snapshot: Snapshot model. Share model could be retrieved through snapshot['share']. :param share_server: Share server model or None. """ raise NotImplementedError() def get_pool(self, share): """Return pool name where the share resides on. :param share: The share hosted by the driver. """ def ensure_share(self, context, share, share_server=None): """Invoked to ensure that share is exported. Driver can use this method to update the list of export locations of the share if it changes. To do that, you should return list with export locations. It is preferred if the driver implements "get_backend_info" and "ensure_shares" instead of this routine. :return: None or list with export locations """ raise NotImplementedError() def allow_access(self, context, share, access, share_server=None): """Allow access to the share.""" raise NotImplementedError() def deny_access(self, context, share, access, share_server=None): """Deny access to the share.""" raise NotImplementedError() def update_access(self, context, share, access_rules, add_rules, delete_rules, update_rules, share_server=None): """Update access rules for given share. ``access_rules`` contains all access_rules that need to be on the share. If the driver can make bulk access rule updates, it can safely ignore the ``add_rules`` and ``delete_rules`` parameters. If the driver cannot make bulk access rule changes, it can rely on new rules to be present in ``add_rules`` and rules that need to be removed to be present in ``delete_rules``. When a rule in ``delete_rules`` was never applied, drivers must not raise an exception, or attempt to set the rule to ``error`` state. ``add_rules`` and ``delete_rules`` can be empty lists, in this situation, drivers should ensure that the rules present in ``access_rules`` are the same as those on the back end. One scenario where this situation is forced is when the access_level is changed for all existing rules (share migration and for readable replicas). Drivers must be mindful of this call for share replicas. When 'update_access' is called on one of the replicas, the call is likely propagated to all replicas belonging to the share, especially when individual rules are added or removed. If a particular access rule does not make sense to the driver in the context of a given replica, the driver should be careful to report a correct behavior, and take meaningful action. For example, if R/W access is requested on a replica that is part of a "readable" type replication; R/O access may be added by the driver instead of R/W. Note that raising an exception *will* result in the access_rules_status on the replica, and the share itself being "out_of_sync". Drivers can sync on the valid access rules that are provided on the ``create_replica`` and ``promote_replica`` calls. :param context: Current context :param share: Share model with share data. :param access_rules: A list of access rules for given share :param add_rules: Empty List or List of access rules which should be added. access_rules already contains these rules. :param delete_rules: Empty List or List of access rules which should be removed. access_rules doesn't contain these rules. :param update_rules: Empty List or List of access rules which should be updated. access_rules already contains these rules. :param share_server: None or Share server model :returns: None, or a dictionary of updates in the format:: { '09960614-8574-4e03-89cf-7cf267b0bd08': { 'access_key': 'alice31493e5441b8171d2310d80e37e', 'state': 'error', }, '28f6eabb-4342-486a-a7f4-45688f0c0295': { 'access_key': 'bob0078aa042d5a7325480fd13228b', 'state': 'active', }, } The top level keys are 'access_id' fields of the access rules that need to be updated. ``access_key``s are credentials (str) of the entities granted access. Any rule in the ``access_rules`` parameter can be updated. .. important:: Raising an exception in this method will force *all* rules in 'applying' and 'denying' states to 'error'. An access rule can be set to 'error' state, either explicitly via this return parameter or because of an exception raised in this method. Such an access rule will no longer be sent to the driver on subsequent access rule updates. When users deny that rule however, the driver will be asked to deny access to the client/s represented by the rule. We expect that a rule that was error-ed at the driver should never exist on the back end. So, do not fail the deletion request. Also, it is possible that the driver may receive a request to add a rule that is already present on the back end. This can happen if the share manager service goes down while the driver is committing access rule changes. Since we cannot determine if the rule was applied successfully by the driver before the disruption, we will treat all 'applying' transitional rules as new rules and repeat the request. """ raise NotImplementedError() def check_for_setup_error(self): """Check for setup error.""" pass def do_setup(self, context): """Any initialization the share driver does while starting.""" def get_share_stats(self, refresh=False): """Get share status. If 'refresh' is True, run update the stats first. """ if refresh: self._update_share_stats() return self._stats def get_network_allocations_number(self): """Returns number of network allocations for creating VIFs. Drivers that use Nova for share servers should return zero (0) here same as Generic driver does. Because Nova will handle network resources allocation. Drivers that handle networking itself should calculate it according to their own requirements. It can have 1+ network interfaces. """ raise NotImplementedError() def get_admin_network_allocations_number(self): return 0 def update_network_allocation(self, context, share_server): """Update network allocation after share server creation.""" self.network_api.update_network_allocation(context, share_server) def update_admin_network_allocation(self, context, share_server): """Update admin network allocation after share server creation.""" if (self.get_admin_network_allocations_number() and self.admin_network_api): self.admin_network_api.update_network_allocation(context, share_server) def allocate_network(self, context, share_server, share_network, share_network_subnet, count=None, **kwargs): """Allocate network resources using given network information.""" if count is None: count = self.get_network_allocations_number() if count: kwargs.update(count=count) self.network_api.allocate_network( context, share_server, share_network=share_network, share_network_subnet=share_network_subnet, **kwargs) def allocate_admin_network(self, context, share_server, count=None, **kwargs): """Allocate admin network resources using given network information.""" if count is None: count = self.get_admin_network_allocations_number() if count and not self.admin_network_api: msg = _("Admin network plugin is not set up.") raise exception.NetworkBadConfigurationException(reason=msg) elif count: kwargs.update(count=count) self.admin_network_api.allocate_network( context, share_server, **kwargs) def deallocate_network(self, context, share_server_id, share_network=None, share_network_subnet=None): """Deallocate network resources for the given share server.""" if self.get_network_allocations_number(): self.network_api.deallocate_network( context, share_server_id, share_network=share_network, share_network_subnet=share_network_subnet) def choose_share_server_compatible_with_share(self, context, share_servers, share, snapshot=None, share_group=None, encryption_key_ref=None): """Method that allows driver to choose share server for provided share. If compatible share-server is not found, method should return None. :param context: Current context :param share_servers: list with share-server models :param share: share model :param snapshot: snapshot model :param share_group: ShareGroup model with shares :param encryption_key_ref: Encryption key reference :returns: share-server or None """ # If creating in a share group, use its share server if share_group: for share_server in share_servers: if (share_group.get('share_server_id') == share_server['id']): return share_server return None return share_servers[0] if share_servers else None def choose_share_server_compatible_with_share_group( self, context, share_servers, share_group_ref, share_group_snapshot=None): return share_servers[0] if share_servers else None def setup_server(self, *args, **kwargs): if self.driver_handles_share_servers: return self._setup_server(*args, **kwargs) else: LOG.debug( "Skipping step 'setup share server', because driver is " "enabled with mode when Manila does not handle share servers.") def _setup_server(self, network_info, metadata=None): """Sets up and configures share server with given network parameters. Redefine it within share driver when it is going to handle share servers. :param metadata: a dictionary, for now containing a key 'request_host' """ raise NotImplementedError() def manage_existing(self, share, driver_options): """Brings an existing share under Manila management. If the provided share is not valid, then raise a ManageInvalidShare exception, specifying a reason for the failure. If the provided share is not in a state that can be managed, such as being replicated on the backend, the driver *MUST* raise ManageInvalidShare exception with an appropriate message. The share has a share_type, and the driver can inspect that and compare against the properties of the referenced backend share. If they are incompatible, raise a ManageExistingShareTypeMismatch, specifying a reason for the failure. This method is invoked when the share is being managed with a share type that has ``driver_handles_share_servers`` extra-spec set to False. :param share: Share model :param driver_options: Driver-specific options provided by admin. :return: share_update dictionary with required key 'size', which should contain size of the share. """ raise NotImplementedError() def manage_existing_with_server( self, share, driver_options, share_server=None): """Brings an existing share under Manila management. If the provided share is not valid, then raise a ManageInvalidShare exception, specifying a reason for the failure. If the provided share is not in a state that can be managed, such as being replicated on the backend, the driver *MUST* raise ManageInvalidShare exception with an appropriate message. The share has a share_type, and the driver can inspect that and compare against the properties of the referenced backend share. If they are incompatible, raise a ManageExistingShareTypeMismatch, specifying a reason for the failure. This method is invoked when the share is being managed with a share type that has ``driver_handles_share_servers`` extra-spec set to True. :param share: Share model :param driver_options: Driver-specific options provided by admin. :param share_server: Share server model or None. :return: share_update dictionary with required key 'size', which should contain size of the share. """ raise NotImplementedError() def unmanage(self, share): """Removes the specified share from Manila management. Does not delete the underlying backend share. For most drivers, this will not need to do anything. However, some drivers might use this call as an opportunity to clean up any Manila-specific configuration that they have associated with the backend share. If provided share cannot be unmanaged, then raise an UnmanageInvalidShare exception, specifying a reason for the failure. This method is invoked when the share is being unmanaged with a share type that has ``driver_handles_share_servers`` extra-spec set to False. """ def unmanage_with_server(self, share, share_server=None): """Removes the specified share from Manila management. Does not delete the underlying backend share. For most drivers, this will not need to do anything. However, some drivers might use this call as an opportunity to clean up any Manila-specific configuration that they have associated with the backend share. If provided share cannot be unmanaged, then raise an UnmanageInvalidShare exception, specifying a reason for the failure. This method is invoked when the share is being unmanaged with a share type that has ``driver_handles_share_servers`` extra-spec set to True. """ def manage_existing_snapshot(self, snapshot, driver_options): """Brings an existing snapshot under Manila management. If provided snapshot is not valid, then raise a ManageInvalidShareSnapshot exception, specifying a reason for the failure. This method is invoked when the snapshot that is being managed belongs to a share that has its share type with ``driver_handles_share_servers`` extra-spec set to False. :param snapshot: ShareSnapshotInstance model with ShareSnapshot data. Example:: { 'id': , 'snapshot_id': < snapshot id>, 'provider_location': , ... } :param driver_options: Optional driver-specific options provided by admin. Example:: { 'key': 'value', ... } :return: model_update dictionary with required key 'size', which should contain size of the share snapshot, and key 'export_locations' containing a list of export locations, if snapshots can be mounted. """ raise NotImplementedError() def manage_existing_snapshot_with_server(self, snapshot, driver_options, share_server=None): """Brings an existing snapshot under Manila management. If provided snapshot is not valid, then raise a ManageInvalidShareSnapshot exception, specifying a reason for the failure. This method is invoked when the snapshot that is being managed belongs to a share that has its share type with ``driver_handles_share_servers`` extra-spec set to True. :param snapshot: ShareSnapshotInstance model with ShareSnapshot data. Example:: { 'id': , 'snapshot_id': < snapshot id>, 'provider_location': , ... } :param driver_options: Optional driver-specific options provided by admin. Example:: { 'key': 'value', ... } :param share_server: Share server model or None. :return: model_update dictionary with required key 'size', which should contain size of the share snapshot, and key 'export_locations' containing a list of export locations, if snapshots can be mounted. """ raise NotImplementedError() def unmanage_snapshot(self, snapshot): """Removes the specified snapshot from Manila management. Does not delete the underlying backend share snapshot. For most drivers, this will not need to do anything. However, some drivers might use this call as an opportunity to clean up any Manila-specific configuration that they have associated with the backend share snapshot. If provided share snapshot cannot be unmanaged, then raise an UnmanageInvalidShareSnapshot exception, specifying a reason for the failure. This method is invoked when the snapshot that is being unmanaged belongs to a share that has its share type with ``driver_handles_share_servers`` extra-spec set to False. """ def unmanage_snapshot_with_server(self, snapshot, share_server=None): """Removes the specified snapshot from Manila management. Does not delete the underlying backend share snapshot. For most drivers, this will not need to do anything. However, some drivers might use this call as an opportunity to clean up any Manila-specific configuration that they have associated with the backend share snapshot. If provided share snapshot cannot be unmanaged, then raise an UnmanageInvalidShareSnapshot exception, specifying a reason for the failure. This method is invoked when the snapshot that is being unmanaged belongs to a share that has its share type with ``driver_handles_share_servers`` extra-spec set to True. """ def revert_to_snapshot(self, context, snapshot, share_access_rules, snapshot_access_rules, share_server=None): """Reverts a share (in place) to the specified snapshot. Does not delete the share snapshot. The share and snapshot must both be 'available' for the restore to be attempted. The snapshot must be the most recent one taken by Manila; the API layer performs this check so the driver doesn't have to. The share must be reverted in place to the contents of the snapshot. Application admins should quiesce or otherwise prepare the application for the shared file system contents to change suddenly. :param context: Current context :param snapshot: The snapshot to be restored :param share_access_rules: List of all access rules for the affected share :param snapshot_access_rules: List of all access rules for the affected snapshot :param share_server: Optional -- Share server model or None """ raise NotImplementedError() def extend_share(self, share, new_size, share_server=None): """Extends size of existing share. :param share: Share model :param new_size: New size of share (new_size > share['size']) :param share_server: Optional -- Share server model """ raise NotImplementedError() def shrink_share(self, share, new_size, share_server=None): """Shrinks size of existing share. If consumed space on share larger than new_size driver should raise ShareShrinkingPossibleDataLoss exception: raise ShareShrinkingPossibleDataLoss(share_id=share['id']) :param share: Share model :param new_size: New size of share (new_size < share['size']) :param share_server: Optional -- Share server model :raises ShareShrinkingPossibleDataLoss, NotImplementedError """ raise NotImplementedError() def teardown_server(self, *args, **kwargs): if self.driver_handles_share_servers: return self._teardown_server(*args, **kwargs) else: LOG.debug( "Skipping step 'teardown share server', because driver is " "enabled with mode when Manila does not handle share servers.") def _teardown_server(self, server_details, security_services=None): """Tears down share server. Redefine it within share driver when it is going to handle share servers. """ raise NotImplementedError() def _has_redefined_driver_methods(self, methods): """Returns boolean as a result of methods presence and redefinition.""" if not isinstance(methods, (set, list, tuple)): methods = (methods, ) for method_name in methods: method = getattr(type(self), method_name, None) if (not method or method == getattr(ShareDriver, method_name)): return False return True @property def snapshots_are_supported(self): if not hasattr(self, '_snapshots_are_supported'): methods = ('create_snapshot', 'delete_snapshot') # NOTE(vponomaryov): calculate default value for # stat 'snapshot_support' based on implementation of # appropriate methods of this base driver class. self._snapshots_are_supported = self._has_redefined_driver_methods( methods) return self._snapshots_are_supported @property def creating_shares_from_snapshots_is_supported(self): """Calculate default value for create_share_from_snapshot_support.""" if not hasattr(self, '_creating_shares_from_snapshots_is_supported'): methods = ('create_share_from_snapshot', ) self._creating_shares_from_snapshots_is_supported = ( self._has_redefined_driver_methods(methods)) return ( self._creating_shares_from_snapshots_is_supported and self.snapshots_are_supported ) def _update_share_stats(self, data=None): """Retrieve stats info from share group. :param data: dict -- dict with key-value pairs to redefine common ones. """ LOG.debug("Updating share stats.") backend_name = (self.configuration.safe_get('share_backend_name') or CONF.share_backend_name) # Note(zhiteng): These information are driver/backend specific, # each driver may define these values in its own config options # or fetch from driver specific configuration file. common = dict( share_backend_name=backend_name or 'Generic_NFS', driver_handles_share_servers=self.driver_handles_share_servers, vendor_name='Open Source', driver_version='1.0', storage_protocol=None, total_capacity_gb='unknown', free_capacity_gb='unknown', reserved_percentage=0, reserved_snapshot_percentage=0, reserved_share_extend_percentage=0, qos=False, pools=self.pools or None, snapshot_support=self.snapshots_are_supported, create_share_from_snapshot_support=( self.creating_shares_from_snapshots_is_supported), revert_to_snapshot_support=False, mount_snapshot_support=False, replication_domain=self.replication_domain, filter_function=self.get_filter_function(), goodness_function=self.get_goodness_function(), security_service_update_support=( self.security_service_update_support), network_allocation_update_support=( self.network_allocation_update_support), share_server_multiple_subnet_support=False, mount_point_name_support=False, share_replicas_migration_support=( self.share_replicas_migration_support), encryption_support=self.encryption_support, ) if isinstance(data, dict): common.update(data) if self.driver_handles_share_servers: common.update({ 'max_shares_per_share_server': self.max_shares_per_share_server, 'max_share_server_size': self.max_share_server_size }) sg_stats = data.get('share_group_stats', {}) if data else {} common['share_group_stats'] = { 'consistent_snapshot_support': sg_stats.get( 'consistent_snapshot_support'), } self.add_ip_version_capability(common) self._stats = common def get_share_server_pools(self, share_server): """Return list of pools related to a particular share server. :param share_server: ShareServer class instance. """ return [] def create_share_group(self, context, share_group_dict, share_server=None): """Create a share group. :param context: :param share_group_dict: The share group details EXAMPLE: { 'status': 'creating', 'project_id': '13c0be6290934bd98596cfa004650049', 'user_id': 'a0314a441ca842019b0952224aa39192', 'description': None, 'deleted': 'False', 'created_at': datetime.datetime(2015, 8, 10, 15, 14, 6), 'updated_at': None, 'source_share_group_snapshot_id': 'some_fake_uuid', 'share_group_type_id': 'some_fake_uuid', 'host': 'hostname@backend_name', 'share_network_id': None, 'share_server_id': None, 'deleted_at': None, 'share_types': [], 'id': 'some_fake_uuid', 'name': None } :returns: (share_group_model_update, share_update_list) share_group_model_update - a dict containing any values to be updated for the SG in the database. This value may be None. """ LOG.debug('Created a Share Group with ID: %s.', share_group_dict['id']) def create_share_group_from_share_group_snapshot( self, context, share_group_dict, share_group_snapshot_dict, share_server=None): """Create a share group from a share group snapshot. When creating a share from snapshot operation takes longer than a simple clone operation, drivers will be able to complete this creation asynchronously, by providing a 'creating_from_snapshot' status in the returned model update. The current supported status are 'available' and 'creating_from_snapshot'. In order to provide updates for shares with 'creating_from_snapshot' status, drivers must implement the call 'get_share_status'. :param context: :param share_group_dict: The share group details EXAMPLE: .. code:: { 'status': 'creating', 'project_id': '13c0be6290934bd98596cfa004650049', 'user_id': 'a0314a441ca842019b0952224aa39192', 'description': None, 'deleted': 'False', 'created_at': datetime.datetime(2015, 8, 10, 15, 14, 6), 'updated_at': None, 'source_share_group_snapshot_id': 'f6aa3b59-57eb-421e-965c-4e182538e36a', 'host': 'hostname@backend_name', 'deleted_at': None, 'shares': [], # The new shares being created 'share_types': [], 'id': 'some_fake_uuid', 'name': None } :param share_group_snapshot_dict: The share group snapshot details EXAMPLE: .. code:: { 'status': 'available', 'project_id': '13c0be6290934bd98596cfa004650049', 'user_id': 'a0314a441ca842019b0952224aa39192', 'description': None, 'deleted': '0', 'created_at': datetime.datetime(2015, 8, 10, 0, 5, 58), 'updated_at': datetime.datetime(2015, 8, 10, 0, 5, 58), 'share_group_id': 'some_fake_uuid', 'share_share_group_snapshot_members': [ { 'status': 'available', 'user_id': 'a0314a441ca842019b0952224aa39192', 'deleted': 'False', 'created_at': datetime.datetime(2015, 8, 10, 0, 5, 58), 'share': , 'updated_at': datetime.datetime(2015, 8, 10, 0, 5, 58), 'share_proto': 'NFS', 'project_id': '13c0be6290934bd98596cfa004650049', 'share_group_snapshot_id': 'some_fake_uuid', 'deleted_at': None, 'id': 'some_fake_uuid', 'size': 1 } ], 'deleted_at': None, 'id': 'f6aa3b59-57eb-421e-965c-4e182538e36a', 'name': None } :return: (share_group_model_update, share_update_list) share_group_model_update - a dict containing any values to be updated for the share group in the database. This value may be None share_update_list - a list of dictionaries containing dicts for every share created in the share group. Any share dicts should at a minimum contain the 'id' key and, for synchronous creation, the 'export_locations'. For asynchronous share creation this dict must also contain the key 'status' with the value set to 'creating_from_snapshot'. The current supported status are 'available' and 'creating_from_snapshot'. Export locations should be in the same format as returned by a share_create. This list may be empty or None. EXAMPLE: .. code:: [ { 'id': 'uuid', 'export_locations': [{...}, {...}], }, { 'id': 'uuid', 'export_locations': [], 'status': 'creating_from_snapshot', }, ] """ # Ensure that the share group snapshot has members if not share_group_snapshot_dict['share_group_snapshot_members']: return None, None clone_list = self._collate_share_group_snapshot_info( share_group_dict, share_group_snapshot_dict) share_update_list = [] LOG.debug('Creating share group from group snapshot %s.', share_group_snapshot_dict['id']) for clone in clone_list: kwargs = {} share_update_info = {} if self.driver_handles_share_servers: kwargs['share_server'] = share_server model_update = ( self.create_share_from_snapshot( context, clone['share'], clone['snapshot'], **kwargs)) if isinstance(model_update, dict): status = model_update.get('status') # NOTE(dviroel): share status is mandatory when answering # a model update. If not provided, won't be possible to # determine if was successfully created. if status is None: msg = _("Driver didn't provide a share status.") raise exception.InvalidShareInstance(reason=msg) if status not in [constants.STATUS_AVAILABLE, constants.STATUS_CREATING_FROM_SNAPSHOT]: msg = _('Driver returned an invalid status: %s') % status raise exception.InvalidShareInstance(reason=msg) share_update_info.update({'status': status}) export_locations = model_update.get('export_locations', []) else: # NOTE(dviroel): the driver that doesn't implement the new # model_update will return only the export locations export_locations = model_update share_update_info.update({ 'id': clone['share']['id'], 'export_locations': export_locations, }) share_update_list.append(share_update_info) return None, share_update_list def delete_share_group(self, context, share_group_dict, share_server=None): """Delete a share group :param context: The request context :param share_group_dict: The share group details EXAMPLE: .. code:: { 'status': 'creating', 'project_id': '13c0be6290934bd98596cfa004650049', 'user_id': 'a0314a441ca842019b0952224aa39192', 'description': None, 'deleted': 'False', 'created_at': datetime.datetime(2015, 8, 10, 15, 14, 6), 'updated_at': None, 'source_share_group_snapshot_id': 'some_fake_uuid', 'share_share_group_type_id': 'some_fake_uuid', 'host': 'hostname@backend_name', 'deleted_at': None, 'shares': [], # The new shares being created 'share_types': [], 'id': 'some_fake_uuid', 'name': None } :return: share_group_model_update share_group_model_update - a dict containing any values to be updated for the group in the database. This value may be None. """ def _cleanup_group_share_snapshot(self, context, share_snapshot, share_server): """Deletes the snapshot of a share belonging to a group.""" try: self.delete_snapshot( context, share_snapshot, share_server=share_server) except exception.ManilaException: msg = ('Could not delete share group snapshot member %(snap)s ' 'for share %(share)s.') LOG.error(msg, { 'snap': share_snapshot['id'], 'share': share_snapshot['share_id'], }) raise def create_share_group_snapshot(self, context, snap_dict, share_server=None): """Create a share group snapshot. :param context: :param snap_dict: The share group snapshot details EXAMPLE: .. code:: { 'status': 'available', 'project_id': '13c0be6290934bd98596cfa004650049', 'user_id': 'a0314a441ca842019b0952224aa39192', 'description': None, 'deleted': '0', 'created_at': datetime.datetime(2015, 8, 10, 0, 5, 58), 'updated_at': datetime.datetime(2015, 8, 10, 0, 5, 58), 'share_group_id': 'some_fake_uuid', 'share_group_snapshot_members': [ { 'status': 'available', 'share_type_id': 'some_fake_uuid', 'user_id': 'a0314a441ca842019b0952224aa39192', 'deleted': 'False', 'created_at': datetime.datetime(2015, 8, 10, 0, 5, 58), 'share': , 'updated_at': datetime.datetime(2015, 8, 10, 0, 5, 58), 'share_proto': 'NFS', 'share_name': 'share_some_fake_uuid', 'name': 'share-snapshot-some_fake_uuid', 'project_id': '13c0be6290934bd98596cfa004650049', 'share_group_snapshot_id': 'some_fake_uuid', 'deleted_at': None, 'share_id': 'some_fake_uuid', 'id': 'some_fake_uuid', 'size': 1, 'provider_location': None, } ], 'deleted_at': None, 'id': 'some_fake_uuid', 'name': None } :return: (share_group_snapshot_update, member_update_list) share_group_snapshot_update - a dict containing any values to be updated for the CGSnapshot in the database. This value may be None. member_update_list - a list of dictionaries containing for every member of the share group snapshot. Each dict should contains values to be updated for the ShareGroupSnapshotMember in the database. This list may be empty or None. """ LOG.debug('Attempting to create a share group snapshot %s.', snap_dict['id']) snapshot_members = snap_dict.get('share_group_snapshot_members', []) if not self._stats.get('snapshot_support'): raise exception.ShareGroupSnapshotNotSupported( share_group=snap_dict['share_group_id']) elif not snapshot_members: LOG.warning('No shares in share group to create snapshot.') return None, None else: share_snapshots = [] snapshot_members_updates = [] for member in snapshot_members: share_snapshot = { 'snapshot_id': member['share_group_snapshot_id'], 'share_id': member['share_id'], 'share_instance_id': member['share']['id'], 'id': member['id'], 'share': member['share'], 'share_name': member['share_name'], 'name': member['name'], 'size': member['share']['size'], 'share_size': member['share']['size'], 'share_proto': member['share']['share_proto'], 'provider_location': None, } try: member_update = self.create_snapshot( context, share_snapshot, share_server=share_server) if member_update: member_update['id'] = member['id'] snapshot_members_updates.append(member_update) share_snapshots.append(share_snapshot) except exception.ManilaException as e: msg = ('Could not create share group snapshot. Failed ' 'to create share snapshot %(snap)s for ' 'share %(share)s.') LOG.exception(msg, { 'snap': share_snapshot['id'], 'share': share_snapshot['share_id'] }) # clean up any share snapshots previously created LOG.debug( 'Attempting to clean up snapshots due to failure.') for share_snapshot in share_snapshots: self._cleanup_group_share_snapshot( context, share_snapshot, share_server) raise e LOG.debug('Successfully created share group snapshot %s.', snap_dict['id']) return None, snapshot_members_updates def delete_share_group_snapshot(self, context, snap_dict, share_server=None): """Delete a share group snapshot :param context: :param snap_dict: The share group snapshot details EXAMPLE: .. code:: { 'status': 'available', 'project_id': '13c0be6290934bd98596cfa004650049', 'user_id': 'a0314a441ca842019b0952224aa39192', 'description': None, 'deleted': '0', 'created_at': datetime.datetime(2015, 8, 10, 0, 5, 58), 'updated_at': datetime.datetime(2015, 8, 10, 0, 5, 58), 'share_group_id': 'some_fake_uuid', 'share_group_snapshot_members': [ { 'status': 'available', 'share_type_id': 'some_fake_uuid', 'share_id': 'some_fake_uuid', 'user_id': 'a0314a441ca842019b0952224aa39192', 'deleted': 'False', 'created_at': datetime.datetime(2015, 8, 10, 0, 5, 58), 'share': , 'updated_at': datetime.datetime(2015, 8, 10, 0, 5, 58), 'share_proto': 'NFS', 'share_name':'share_some_fake_uuid', 'name': 'share-snapshot-some_fake_uuid', 'project_id': '13c0be6290934bd98596cfa004650049', 'share_group_snapshot_id': 'some_fake_uuid', 'deleted_at': None, 'id': 'some_fake_uuid', 'size': 1, 'provider_location': 'fake_provider_location_value', } ], 'deleted_at': None, 'id': 'f6aa3b59-57eb-421e-965c-4e182538e36a', 'name': None } :return: (share_group_snapshot_update, member_update_list) share_group_snapshot_update - a dict containing any values to be updated for the ShareGroupSnapshot in the database. This value may be None. """ snapshot_members = snap_dict.get('share_group_snapshot_members', []) LOG.debug('Deleting share group snapshot %s.', snap_dict['id']) for member in snapshot_members: share_snapshot = { 'snapshot_id': member['share_group_snapshot_id'], 'share_id': member['share_id'], 'share_instance_id': member['share']['id'], 'id': member['id'], 'share': member['share'], 'size': member['share']['size'], 'share_name': member['share_name'], 'name': member['name'], 'share_size': member['share']['size'], 'share_proto': member['share']['share_proto'], 'provider_location': member['provider_location'], } self.delete_snapshot( context, share_snapshot, share_server=share_server) LOG.debug('Deleted share group snapshot %s.', snap_dict['id']) return None, None def _collate_share_group_snapshot_info(self, share_group_dict, share_group_snapshot_dict): """Collate the data for a clone of the SG snapshot. Given two data structures, a share group snapshot ( share_group_snapshot_dict) and a new share to be cloned from the snapshot (share_group_dict), match up both structures into a list of dicts (share & snapshot) suitable for use by existing method that clones individual share snapshots. """ clone_list = [] for share in share_group_dict['shares']: clone_info = {'share': share} for share_group_snapshot_member in share_group_snapshot_dict[ 'share_group_snapshot_members']: if (share['source_share_group_snapshot_member_id'] == share_group_snapshot_member['id']): clone_info['snapshot'] = share_group_snapshot_member break if len(clone_info) != 2: msg = _( "Invalid data supplied for creating share group from " "share group snapshot " "%s.") % share_group_snapshot_dict['id'] raise exception.InvalidShareGroup(reason=msg) clone_list.append(clone_info) return clone_list def get_periodic_hook_data(self, context, share_instances): """Dedicated for update/extend of data for existing share instances. Redefine this method in share driver to be able to update/change/extend share instances data that will be used by periodic hook action. One of possible updates is add-on of "automount" CLI commands for each share instance for case of notification is enabled using 'hook' approach. :param context: Current context :param share_instances: share instances list provided by share manager :return: list of share instances. """ return share_instances def create_replica(self, context, replica_list, new_replica, access_rules, replica_snapshots, share_server=None): """Replicate the active replica to a new replica on this backend. .. note:: This call is made on the host that the new replica is being created upon. :param context: Current context :param replica_list: List of all replicas for a particular share. This list also contains the replica to be created. The 'active' replica will have its 'replica_state' attr set to 'active'. Example:: [ { 'id': 'd487b88d-e428-4230-a465-a800c2cce5f8', 'share_id': 'f0e4bb5e-65f0-11e5-9d70-feff819cdc9f', 'replica_state': 'in_sync', ... 'share_server_id': '4ce78e7b-0ef6-4730-ac2a-fd2defefbd05', 'share_server': or None, }, { 'id': '10e49c3e-aca9-483b-8c2d-1c337b38d6af', 'share_id': 'f0e4bb5e-65f0-11e5-9d70-feff819cdc9f', 'replica_state': 'active', ... 'share_server_id': 'f63629b3-e126-4448-bec2-03f788f76094', 'share_server': or None, }, { 'id': 'e82ff8b6-65f0-11e5-9d70-feff819cdc9f', 'share_id': 'f0e4bb5e-65f0-11e5-9d70-feff819cdc9f', 'replica_state': 'in_sync', ... 'share_server_id': '07574742-67ea-4dfd-9844-9fbd8ada3d87', 'share_server': or None, }, ... ] :param new_replica: The share replica dictionary. Example:: { 'id': 'e82ff8b6-65f0-11e5-9d70-feff819cdc9f', 'share_id': 'f0e4bb5e-65f0-11e5-9d70-feff819cdc9f', 'deleted': False, 'host': 'openstack2@cmodeSSVMNFS2', 'status': 'creating', 'scheduled_at': datetime.datetime(2015, 8, 10, 0, 5, 58), 'launched_at': datetime.datetime(2015, 8, 10, 0, 5, 58), 'terminated_at': None, 'replica_state': 'out_of_sync', 'availability_zone_id': 'f6e146d0-65f0-11e5-9d70-feff819cdc9f', 'export_locations': [ models.ShareInstanceExportLocations, ], 'access_rules_status': 'out_of_sync', 'share_network_id': '4ccd5318-65f1-11e5-9d70-feff819cdc9f', 'share_server_id': 'e6155221-ea00-49ef-abf9-9f89b7dd900a', 'share_server': or None, } :param access_rules: A list of access rules. These are rules that other instances of the share already obey. Drivers are expected to apply access rules to the new replica or disregard access rules that don't apply. Example:: [ { 'id': 'f0875f6f-766b-4865-8b41-cccb4cdf1676', 'deleted' = False, 'share_id' = 'f0e4bb5e-65f0-11e5-9d70-feff819cdc9f', 'access_type' = 'ip', 'access_to' = '172.16.20.1', 'access_level' = 'rw', } ] :param replica_snapshots: List of dictionaries of snapshot instances. This includes snapshot instances of every snapshot of the share whose 'aggregate_status' property was reported to be 'available' when the share manager initiated this request. Each list member will have two sub dictionaries: 'active_replica_snapshot' and 'share_replica_snapshot'. The 'active' replica snapshot corresponds to the instance of the snapshot on any of the 'active' replicas of the share while share_replica_snapshot corresponds to the snapshot instance for the specific replica that will need to exist on the new share replica that is being created. The driver needs to ensure that this snapshot instance is truly available before transitioning the replica from 'out_of_sync' to 'in_sync'. Snapshots instances for snapshots that have an 'aggregate_status' of 'creating' or 'deleting' will be polled for in the ``update_replicated_snapshot`` method. Example:: [ { 'active_replica_snapshot': { 'id': '8bda791c-7bb6-4e7b-9b64-fefff85ff13e', 'share_instance_id': '10e49c3e-aca9-483b-8c2d-1c337b38d6af', 'status': 'available', 'provider_location': '/newton/share-snapshot-10e49c3e-aca9', ... }, 'share_replica_snapshot': { 'id': '', 'share_instance_id': 'e82ff8b6-65f0-11e5-9d70-feff819cdc9f', 'status': 'available', 'provider_location': None, ... }, } ] :param share_server: or None Share server of the replica being created. :return: None or a dictionary. The dictionary can contain export_locations replica_state and access_rules_status. export_locations is a list of paths and replica_state is one of 'active', 'in_sync', 'out_of_sync' or 'error'. .. important:: A backend supporting 'writable' type replication should return 'active' as the replica_state. Export locations should be in the same format as returned during the ``create_share`` call. Example:: { 'export_locations': [ { 'path': '172.16.20.22/sample/export/path', 'is_admin_only': False, 'metadata': {'some_key': 'some_value'}, }, ], 'replica_state': 'in_sync', 'access_rules_status': 'in_sync', } """ raise NotImplementedError() def delete_replica(self, context, replica_list, replica_snapshots, replica, share_server=None): """Delete a replica. .. note:: This call is made on the host that hosts the replica being deleted. :param context: Current context :param replica_list: List of all replicas for a particular share This list also contains the replica to be deleted. The 'active' replica will have its 'replica_state' attr set to 'active'. Example:: [ { 'id': 'd487b88d-e428-4230-a465-a800c2cce5f8', 'share_id': 'f0e4bb5e-65f0-11e5-9d70-feff819cdc9f', 'replica_state': 'in_sync', ... 'share_server_id': '4ce78e7b-0ef6-4730-ac2a-fd2defefbd05', 'share_server': or None, }, { 'id': '10e49c3e-aca9-483b-8c2d-1c337b38d6af', 'share_id': 'f0e4bb5e-65f0-11e5-9d70-feff819cdc9f', 'replica_state': 'active', ... 'share_server_id': 'f63629b3-e126-4448-bec2-03f788f76094', 'share_server': or None, }, { 'id': 'e82ff8b6-65f0-11e5-9d70-feff819cdc9f', 'share_id': 'f0e4bb5e-65f0-11e5-9d70-feff819cdc9f', 'replica_state': 'in_sync', ... 'share_server_id': '07574742-67ea-4dfd-9844-9fbd8ada3d87', 'share_server': or None, }, ... ] :param replica: Dictionary of the share replica being deleted. Example:: { 'id': 'e82ff8b6-65f0-11e5-9d70-feff819cdc9f', 'share_id': 'f0e4bb5e-65f0-11e5-9d70-feff819cdc9f', 'deleted': False, 'host': 'openstack2@cmodeSSVMNFS2', 'status': 'available', 'scheduled_at': datetime.datetime(2015, 8, 10, 0, 5, 58), 'launched_at': datetime.datetime(2015, 8, 10, 0, 5, 58), 'terminated_at': None, 'replica_state': 'in_sync', 'availability_zone_id': 'f6e146d0-65f0-11e5-9d70-feff819cdc9f', 'export_locations': [ models.ShareInstanceExportLocations ], 'access_rules_status': 'out_of_sync', 'share_network_id': '4ccd5318-65f1-11e5-9d70-feff819cdc9f', 'share_server_id': '53099868-65f1-11e5-9d70-feff819cdc9f', 'share_server': or None, } :param replica_snapshots: List of dictionaries of snapshot instances. The dict contains snapshot instances that are associated with the share replica being deleted. No model updates to snapshot instances are possible in this method. The driver should return when the cleanup is completed on the backend for both, the snapshots and the replica itself. Drivers must handle situations where the snapshot may not yet have finished 'creating' on this replica. Example:: [ { 'id': '89dafd00-0999-4d23-8614-13eaa6b02a3b', 'snapshot_id': '3ce1caf7-0945-45fd-a320-714973e949d3', 'status: 'available', 'share_instance_id': 'e82ff8b6-65f0-11e5-9d70-feff819cdc9f' ... }, { 'id': '8bda791c-7bb6-4e7b-9b64-fefff85ff13e', 'snapshot_id': '13ee5cb5-fc53-4539-9431-d983b56c5c40', 'status: 'creating', 'share_instance_id': 'e82ff8b6-65f0-11e5-9d70-feff819cdc9f' ... }, ... ] :param share_server: or None Share server of the replica to be deleted. :return: None. :raises: Exception. Any exception raised will set the share replica's 'status' and 'replica_state' attributes to 'error_deleting'. It will not affect snapshots belonging to this replica. """ raise NotImplementedError() def promote_replica(self, context, replica_list, replica, access_rules, share_server=None, quiesce_wait_time=None): """Promote a replica to 'active' replica state. .. note:: This call is made on the host that hosts the replica being promoted. :param context: Current context :param replica_list: List of all replicas for a particular share This list also contains the replica to be promoted. The 'active' replica will have its 'replica_state' attr set to 'active'. Example:: [ { 'id': 'd487b88d-e428-4230-a465-a800c2cce5f8', 'share_id': 'f0e4bb5e-65f0-11e5-9d70-feff819cdc9f', 'replica_state': 'in_sync', ... 'share_server_id': '4ce78e7b-0ef6-4730-ac2a-fd2defefbd05', 'share_server': or None, }, { 'id': '10e49c3e-aca9-483b-8c2d-1c337b38d6af', 'share_id': 'f0e4bb5e-65f0-11e5-9d70-feff819cdc9f', 'replica_state': 'active', ... 'share_server_id': 'f63629b3-e126-4448-bec2-03f788f76094', 'share_server': or None, }, { 'id': 'e82ff8b6-65f0-11e5-9d70-feff819cdc9f', 'share_id': 'f0e4bb5e-65f0-11e5-9d70-feff819cdc9f', 'replica_state': 'in_sync', ... 'share_server_id': '07574742-67ea-4dfd-9844-9fbd8ada3d87', 'share_server': or None, }, ... ] :param replica: Dictionary of the replica to be promoted. Example:: { 'id': 'e82ff8b6-65f0-11e5-9d70-feff819cdc9f', 'share_id': 'f0e4bb5e-65f0-11e5-9d70-feff819cdc9f', 'deleted': False, 'host': 'openstack2@cmodeSSVMNFS2', 'status': 'available', 'scheduled_at': datetime.datetime(2015, 8, 10, 0, 5, 58), 'launched_at': datetime.datetime(2015, 8, 10, 0, 5, 58), 'terminated_at': None, 'replica_state': 'in_sync', 'availability_zone_id': 'f6e146d0-65f0-11e5-9d70-feff819cdc9f', 'export_locations': [ models.ShareInstanceExportLocations ], 'access_rules_status': 'in_sync', 'share_network_id': '4ccd5318-65f1-11e5-9d70-feff819cdc9f', 'share_server_id': '07574742-67ea-4dfd-9844-9fbd8ada3d87', 'share_server': or None, } :param access_rules: A list of access rules These access rules are obeyed by other instances of the share Example:: [ { 'id': 'f0875f6f-766b-4865-8b41-cccb4cdf1676', 'deleted' = False, 'share_id' = 'f0e4bb5e-65f0-11e5-9d70-feff819cdc9f', 'access_type' = 'ip', 'access_to' = '172.16.20.1', 'access_level' = 'rw', } ] :param share_server: or None Share server of the replica to be promoted. :param quiesce_wait_time: time in seconds or None Share replica promote quiesce wait time. :return: updated_replica_list or None. The driver can return the updated list as in the request parameter. Changes that will be updated to the Database are: 'export_locations', 'access_rules_status' and 'replica_state'. :raises: Exception. This can be any exception derived from BaseException. This is re-raised by the manager after some necessary cleanup. If the driver raises an exception during promotion, it is assumed that all of the replicas of the share are in an inconsistent state. Recovery is only possible through the periodic update call and/or administrator intervention to correct the 'status' of the affected replicas if they become healthy again. """ raise NotImplementedError() def update_replica_state(self, context, replica_list, replica, access_rules, replica_snapshots, share_server=None): """Update the replica_state of a replica. .. note:: This call is made on the host which hosts the replica being updated. Drivers should fix replication relationships that were broken if possible inside this method. This method is called periodically by the share manager; and whenever requested by the administrator through the 'resync' API. :param context: Current context :param replica_list: List of all replicas for a particular share This list also contains the replica to be updated. The 'active' replica will have its 'replica_state' attr set to 'active'. Example:: [ { 'id': 'd487b88d-e428-4230-a465-a800c2cce5f8', 'share_id': 'f0e4bb5e-65f0-11e5-9d70-feff819cdc9f', 'replica_state': 'in_sync', ... 'share_server_id': '4ce78e7b-0ef6-4730-ac2a-fd2defefbd05', 'share_server': or None, }, { 'id': '10e49c3e-aca9-483b-8c2d-1c337b38d6af', 'share_id': 'f0e4bb5e-65f0-11e5-9d70-feff819cdc9f', 'replica_state': 'active', ... 'share_server_id': 'f63629b3-e126-4448-bec2-03f788f76094', 'share_server': or None, }, { 'id': 'e82ff8b6-65f0-11e5-9d70-feff819cdc9f', 'share_id': 'f0e4bb5e-65f0-11e5-9d70-feff819cdc9f', 'replica_state': 'in_sync', ... 'share_server_id': '07574742-67ea-4dfd-9844-9fbd8ada3d87', 'share_server': or None, }, ... ] :param replica: Dictionary of the replica being updated Replica state will always be 'in_sync', 'out_of_sync', or 'error'. Replicas in 'active' state will not be passed via this parameter. Example:: { 'id': 'd487b88d-e428-4230-a465-a800c2cce5f8', 'share_id': 'f0e4bb5e-65f0-11e5-9d70-feff819cdc9f', 'deleted': False, 'host': 'openstack2@cmodeSSVMNFS1', 'status': 'available', 'scheduled_at': datetime.datetime(2015, 8, 10, 0, 5, 58), 'launched_at': datetime.datetime(2015, 8, 10, 0, 5, 58), 'terminated_at': None, 'replica_state': 'in_sync', 'availability_zone_id': 'e2c2db5c-cb2f-4697-9966-c06fb200cb80', 'export_locations': [ models.ShareInstanceExportLocations, ], 'access_rules_status': 'in_sync', 'share_network_id': '4ccd5318-65f1-11e5-9d70-feff819cdc9f', 'share_server_id': '4ce78e7b-0ef6-4730-ac2a-fd2defefbd05', } :param access_rules: A list of access rules These access rules are obeyed by other instances of the share. The driver could attempt to sync on any un-applied access_rules. Example:: [ { 'id': 'f0875f6f-766b-4865-8b41-cccb4cdf1676', 'deleted' = False, 'share_id' = 'f0e4bb5e-65f0-11e5-9d70-feff819cdc9f', 'access_type' = 'ip', 'access_to' = '172.16.20.1', 'access_level' = 'rw', } ] :param replica_snapshots: List of dictionaries of snapshot instances. This includes snapshot instances of every snapshot of the share whose 'aggregate_status' property was reported to be 'available' when the share manager initiated this request. Each list member will have two sub dictionaries: 'active_replica_snapshot' and 'share_replica_snapshot'. The 'active' replica snapshot corresponds to the instance of the snapshot on any of the 'active' replicas of the share while share_replica_snapshot corresponds to the snapshot instance for the specific replica being updated. The driver needs to ensure that this snapshot instance is truly available before transitioning from 'out_of_sync' to 'in_sync'. Snapshots instances for snapshots that have an 'aggregate_status' of 'creating' or 'deleting' will be polled for in the update_replicated_snapshot method. Example:: [ { 'active_replica_snapshot': { 'id': '8bda791c-7bb6-4e7b-9b64-fefff85ff13e', 'share_instance_id': '10e49c3e-aca9-483b-8c2d-1c337b38d6af', 'status': 'available', 'provider_location': '/newton/share-snapshot-10e49c3e-aca9', ... }, 'share_replica_snapshot': { 'id': '10e49c3e-aca9-483b-8c2d-1c337b38d6af', 'share_instance_id': 'd487b88d-e428-4230-a465-a800c2cce5f8', 'status': 'creating', 'provider_location': None, ... }, } ] :param share_server: or None :return: replica_state: a str value denoting the replica_state. Valid values are 'in_sync' and 'out_of_sync' or None (to leave the current replica_state unchanged). """ raise NotImplementedError() def create_replicated_snapshot(self, context, replica_list, replica_snapshots, share_server=None): """Create a snapshot on active instance and update across the replicas. .. note:: This call is made on the 'active' replica's host. Drivers are expected to transfer the snapshot created to the respective replicas. The driver is expected to return model updates to the share manager. If it was able to confirm the creation of any number of the snapshot instances passed in this interface, it can set their status to 'available' as a cue for the share manager to set the progress attr to '100%'. :param context: Current context :param replica_list: List of all replicas for a particular share The 'active' replica will have its 'replica_state' attr set to 'active'. Example:: [ { 'id': 'd487b88d-e428-4230-a465-a800c2cce5f8', 'share_id': 'f0e4bb5e-65f0-11e5-9d70-feff819cdc9f', 'replica_state': 'in_sync', ... 'share_server_id': '4ce78e7b-0ef6-4730-ac2a-fd2defefbd05', 'share_server': or None, }, { 'id': '10e49c3e-aca9-483b-8c2d-1c337b38d6af', 'share_id': 'f0e4bb5e-65f0-11e5-9d70-feff819cdc9f', 'replica_state': 'active', ... 'share_server_id': 'f63629b3-e126-4448-bec2-03f788f76094', 'share_server': or None, }, ... ] :param replica_snapshots: List of dictionaries of snapshot instances. These snapshot instances track the snapshot across the replicas. All the instances will have their status attribute set to 'creating'. Example:: [ { 'id': 'd3931a93-3984-421e-a9e7-d9f71895450a', 'snapshot_id': '13ee5cb5-fc53-4539-9431-d983b56c5c40', 'status: 'creating', 'progress': '0%', ... }, { 'id': '8bda791c-7bb6-4e7b-9b64-fefff85ff13e', 'snapshot_id': '13ee5cb5-fc53-4539-9431-d983b56c5c40', 'status: 'creating', 'progress': '0%', ... }, ... ] :param share_server: or None :return: List of dictionaries of snapshot instances. The dictionaries can contain values that need to be updated on the database for the snapshot instances being created. :raises: Exception. Any exception in this method will set all instances to 'error'. """ raise NotImplementedError() def revert_to_replicated_snapshot(self, context, active_replica, replica_list, active_replica_snapshot, replica_snapshots, share_access_rules, snapshot_access_rules, share_server=None): """Reverts a replicated share (in place) to the specified snapshot. .. note:: This call is made on the 'active' replica's host, since drivers may not be able to revert snapshots on individual replicas. Does not delete the share snapshot. The share and snapshot must both be 'available' for the restore to be attempted. The snapshot must be the most recent one taken by Manila; the API layer performs this check so the driver doesn't have to. The share must be reverted in place to the contents of the snapshot. Application admins should quiesce or otherwise prepare the application for the shared file system contents to change suddenly. :param context: Current context :param active_replica: The current active replica :param replica_list: List of all replicas for a particular share The 'active' replica will have its 'replica_state' attr set to 'active' and its 'status' set to 'reverting'. :param active_replica_snapshot: snapshot to be restored :param replica_snapshots: List of dictionaries of snapshot instances. These snapshot instances track the snapshot across the replicas. The snapshot of the active replica to be restored with have its status attribute set to 'restoring'. :param share_access_rules: List of access rules for the affected share. :param snapshot_access_rules: List of access rules for the affected snapshot. :param share_server: Optional -- Share server model """ raise NotImplementedError() def delete_replicated_snapshot(self, context, replica_list, replica_snapshots, share_server=None): """Delete a snapshot by deleting its instances across the replicas. .. note:: This call is made on the 'active' replica's host, since drivers may not be able to delete the snapshot from an individual replica. The driver is expected to return model updates to the share manager. If it was able to confirm the removal of any number of the snapshot instances passed in this interface, it can set their status to 'deleted' as a cue for the share manager to clean up that instance from the database. :param context: Current context :param replica_list: List of all replicas for a particular share The 'active' replica will have its 'replica_state' attr set to 'active'. Example:: [ { 'id': 'd487b88d-e428-4230-a465-a800c2cce5f8', 'share_id': 'f0e4bb5e-65f0-11e5-9d70-feff819cdc9f', 'replica_state': 'in_sync', ... 'share_server_id': '4ce78e7b-0ef6-4730-ac2a-fd2defefbd05', 'share_server': or None, }, { 'id': '10e49c3e-aca9-483b-8c2d-1c337b38d6af', 'share_id': 'f0e4bb5e-65f0-11e5-9d70-feff819cdc9f', 'replica_state': 'active', ... 'share_server_id': 'f63629b3-e126-4448-bec2-03f788f76094', 'share_server': or None, }, ... ] :param replica_snapshots: List of dictionaries of snapshot instances. These snapshot instances track the snapshot across the replicas. All the instances will have their status attribute set to 'deleting'. Example:: [ { 'id': 'd3931a93-3984-421e-a9e7-d9f71895450a', 'snapshot_id': '13ee5cb5-fc53-4539-9431-d983b56c5c40', 'status': 'deleting', 'progress': '100%', ... }, { 'id': '8bda791c-7bb6-4e7b-9b64-fefff85ff13e', 'snapshot_id': '13ee5cb5-fc53-4539-9431-d983b56c5c40', 'status: 'deleting', 'progress': '100%', ... }, ... ] :param share_server: or None :return: List of dictionaries of snapshot instances. The dictionaries can contain values that need to be updated on the database for the snapshot instances being deleted. To confirm the deletion of the snapshot instance, set the 'status' attribute of the instance to 'deleted' (constants.STATUS_DELETED) :raises: Exception. Any exception in this method will set the status attribute of all snapshot instances to 'error_deleting'. """ raise NotImplementedError() def update_replicated_snapshot(self, context, replica_list, share_replica, replica_snapshots, replica_snapshot, share_server=None): """Update the status of a snapshot instance that lives on a replica. .. note:: For DR and Readable styles of replication, this call is made on the replica's host and not the 'active' replica's host. This method is called periodically by the share manager. It will query for snapshot instances that track the parent snapshot across non-'active' replicas. Drivers can expect the status of the instance to be 'creating' or 'deleting'. If the driver sees that a snapshot instance has been removed from the replica's backend and the instance status was set to 'deleting', it is expected to raise a SnapshotResourceNotFound exception. All other exceptions will set the snapshot instance status to 'error'. If the instance was not in 'deleting' state, raising a SnapshotResourceNotFound will set the instance status to 'error'. :param context: Current context :param replica_list: List of all replicas for a particular share The 'active' replica will have its 'replica_state' attr set to 'active'. Example:: [ { 'id': 'd487b88d-e428-4230-a465-a800c2cce5f8', 'share_id': 'f0e4bb5e-65f0-11e5-9d70-feff819cdc9f', 'replica_state': 'in_sync', ... 'share_server_id': '4ce78e7b-0ef6-4730-ac2a-fd2defefbd05', 'share_server': or None, }, { 'id': '10e49c3e-aca9-483b-8c2d-1c337b38d6af', 'share_id': 'f0e4bb5e-65f0-11e5-9d70-feff819cdc9f', 'replica_state': 'active', ... 'share_server_id': 'f63629b3-e126-4448-bec2-03f788f76094', 'share_server': or None, }, ... ] :param share_replica: Share replica dictionary. This replica is associated with the snapshot instance whose status is being updated. Replicas in 'active' replica_state will not be passed via this parameter. Example:: { 'id': 'd487b88d-e428-4230-a465-a800c2cce5f8', 'share_id': 'f0e4bb5e-65f0-11e5-9d70-feff819cdc9f', 'deleted': False, 'host': 'openstack2@cmodeSSVMNFS1', 'status': 'available', 'scheduled_at': datetime.datetime(2015, 8, 10, 0, 5, 58), 'launched_at': datetime.datetime(2015, 8, 10, 0, 5, 58), 'terminated_at': None, 'replica_state': 'in_sync', 'availability_zone_id': 'e2c2db5c-cb2f-4697-9966-c06fb200cb80', 'export_locations': [ models.ShareInstanceExportLocations, ], 'access_rules_status': 'in_sync', 'share_network_id': '4ccd5318-65f1-11e5-9d70-feff819cdc9f', 'share_server_id': '4ce78e7b-0ef6-4730-ac2a-fd2defefbd05', } :param replica_snapshots: List of dictionaries of snapshot instances. These snapshot instances track the snapshot across the replicas. This will include the snapshot instance being updated as well. Example:: [ { 'id': 'd3931a93-3984-421e-a9e7-d9f71895450a', 'snapshot_id': '13ee5cb5-fc53-4539-9431-d983b56c5c40', ... }, { 'id': '8bda791c-7bb6-4e7b-9b64-fefff85ff13e', 'snapshot_id': '13ee5cb5-fc53-4539-9431-d983b56c5c40', ... }, ... ] :param replica_snapshot: Dictionary of the snapshot instance. This is the instance to be updated. It will be in 'creating' or 'deleting' state when sent via this parameter. Example:: { 'name': 'share-snapshot-18825630-574f-4912-93bb-af4611ef35a2', 'share_id': 'd487b88d-e428-4230-a465-a800c2cce5f8', 'share_name': 'share-d487b88d-e428-4230-a465-a800c2cce5f8', 'status': 'creating', 'id': '18825630-574f-4912-93bb-af4611ef35a2', 'deleted': False, 'created_at': datetime.datetime(2016, 8, 3, 0, 5, 58), 'share': , 'updated_at': datetime.datetime(2016, 8, 3, 0, 5, 58), 'share_instance_id': 'd487b88d-e428-4230-a465-a800c2cce5f8', 'snapshot_id': '13ee5cb5-fc53-4539-9431-d983b56c5c40', 'progress': '0%', 'deleted_at': None, 'provider_location': None, } :param share_server: or None :return: replica_snapshot_model_update: a dictionary. The dictionary must contain values that need to be updated on the database for the snapshot instance that represents the snapshot on the replica. :raises: exception.SnapshotResourceNotFound Raise this exception for snapshots that are not found on the backend and their status was 'deleting'. """ raise NotImplementedError() def get_filter_function(self, pool=None): """Get filter_function string. Returns either the string from the driver instance or global section in manila.conf. If nothing is specified in manila.conf, then try to find the default filter_function. When None is returned the scheduler will always pass the driver instance. :param pool: pool name to get the filter or None :return: a filter_function string or None """ ret_function = self.configuration.filter_function if not ret_function: ret_function = CONF.filter_function if not ret_function: kwargs = {'pool': pool} if pool else {} # pylint: disable=assignment-from-none ret_function = self.get_default_filter_function(**kwargs) # pylint: enable=assignment-from-none return ret_function def get_goodness_function(self): """Get good_function string. Returns either the string from the driver instance or global section in manila.conf. If nothing is specified in manila.conf, then try to find the default goodness_function. When None is returned the scheduler will give the lowest score to the driver instance. :return: a goodness_function string or None """ ret_function = self.configuration.goodness_function if not ret_function: ret_function = CONF.goodness_function if not ret_function: # pylint: disable=assignment-from-none ret_function = self.get_default_goodness_function() # pylint: enable=assignment-from-none return ret_function def get_default_filter_function(self, pool=None): """Get the default filter_function string. Each driver could overwrite the method to return a well-known default string if it is available. :param pool: pool name to get the filter or None :return: None """ return None def get_default_goodness_function(self): """Get the default goodness_function string. Each driver could overwrite the method to return a well-known default string if it is available. :return: None """ return None def snapshot_update_access(self, context, snapshot, access_rules, add_rules, delete_rules, share_server=None): """Update access rules for given snapshot. ``access_rules`` contains all access_rules that need to be on the share. If the driver can make bulk access rule updates, it can safely ignore the ``add_rules`` and ``delete_rules`` parameters. If the driver cannot make bulk access rule changes, it can rely on new rules to be present in ``add_rules`` and rules that need to be removed to be present in ``delete_rules``. When a rule in ``add_rules`` already exists in the back end, drivers must not raise an exception. When a rule in ``delete_rules`` was never applied, drivers must not raise an exception, or attempt to set the rule to ``error`` state. ``add_rules`` and ``delete_rules`` can be empty lists, in this situation, drivers should ensure that the rules present in ``access_rules`` are the same as those on the back end. :param context: Current context :param snapshot: Snapshot model with snapshot data. :param access_rules: All access rules for given snapshot :param add_rules: Empty List or List of access rules which should be added. access_rules already contains these rules. :param delete_rules: Empty List or List of access rules which should be removed. access_rules doesn't contain these rules. :param share_server: None or Share server model """ raise NotImplementedError() def update_share_usage_size(self, context, shares): """Invoked to get the usage size of given shares. Driver can use this method to update the share usage size of the shares. To do that, a dictionary of shares should be returned. :param shares: None or a list of all shares for updates. :returns: An empty list or a list of dictionary of updates in the following format. The value of "used_size" can be specified in GiB units, as a floating point number:: [ { 'id': '09960614-8574-4e03-89cf-7cf267b0bd08', 'used_size': '200', 'gathered_at': datetime.datetime(2017, 8, 10, 15, 14, 6), }, ] """ LOG.debug("This backend does not support gathering 'used_size' of " "shares created on it.") return [] def get_configured_ip_versions(self): """"Get allowed IP versions. The supported versions are returned with list, possible values are: [4], [6], or [4, 6] Drivers that assert ipv6_implemented = True must override this method. If the returned list includes 4, then shares created by this driver must have an IPv4 export location. If the list includes 6, then shares created by the driver must have an IPv6 export location. Drivers should check that their storage controller actually has IPv4/IPv6 enabled and configured properly. """ # For drivers that haven't implemented IPv6, assume legacy behavior if not self.ipv6_implemented: return [4] raise NotImplementedError() def add_ip_version_capability(self, data): """Add IP version support capabilities. When DHSS is true, the capabilities are determined by driver and configured network plugin. When DHSS is false, the capabilities are determined by driver only. :param data: the capability dictionary :returns: capability data """ self.ip_versions = self.get_configured_ip_versions() if isinstance(self.ip_versions, list): self.ip_versions = set(self.ip_versions) else: self.ip_versions = set(list(self.ip_versions)) if not self.ip_versions: LOG.error("Backend %s supports neither IPv4 nor IPv6.", data['share_backend_name']) if self.driver_handles_share_servers: network_versions = self.network_api.enabled_ip_versions self.ip_versions = self.ip_versions & network_versions if not self.ip_versions: LOG.error("The enabled IP version of the network plugin is " "not compatible with the version supported by " "backend %s.", data['share_backend_name']) data['ipv4_support'] = (4 in self.ip_versions) data['ipv6_support'] = (6 in self.ip_versions) return data def get_backend_info(self, context): """Get driver and array configuration parameters. Driver can use this method to get the special configuration info and return for assessment. The share manager service uses this assessment to invoke "ensure_shares" during service startup. :returns: A dictionary containing driver-specific info. Example:: { 'version': '2.23' 'port': '80', 'logicalportip': '1.1.1.1', ... } """ raise NotImplementedError() def get_optional_share_creation_data(self, share, share_server=None): """Get info to set in shares after their creation. Driver can use this method to get the special info and return for assessment. The share manager service uses this assessment to set this info to shares after the creation. :returns: A dictionary containing driver-specific info. Example:: { 'metadata': {'__mount_options': 'fake_key=fake_val'} ... } """ return {} def ensure_shares(self, context, shares): """Invoked to ensure that shares are exported. Driver can use this method to update the "status" and/or list of export locations of the shares if they change. To do that, a dictionary of shares should be returned. In addition, the driver can seek to "reapply_access_rules" (boolean) on a per-share basis. When this property exists and is set to True, the share manager service will invoke "update_access" with all the access rules from the service database. :shares: A list of all shares for updates. :returns: None or a dictionary of updates in the format. Example:: { '09960614-8574-4e03-89cf-7cf267b0bd08': { 'export_locations': [{...}, {...}], 'status': 'error', 'reapply_access_rules': False, }, '28f6eabb-4342-486a-a7f4-45688f0c0295': { 'export_locations': [{...}, {...}], 'status': 'available', 'reapply_access_rules': True, }, } """ raise NotImplementedError() def get_share_server_network_info( self, context, share_server, identifier, driver_options): """Obtain network allocations used by share server. :param context: Current context. :param share_server: Share server model. :param identifier: A driver-specific share server identifier :param driver_options: Dictionary of driver options to assist managing the share server :return: A list containing IP addresses allocated in the backend. Example:: ['10.10.10.10', 'fd11::2000', '192.168.10.10'] """ raise NotImplementedError() def manage_server(self, context, share_server, identifier, driver_options): """Manage the share server and return compiled back end details. :param context: Current context. :param share_server: Share server model. :param identifier: A driver-specific share server identifier :param driver_options: Dictionary of driver options to assist managing the share server :return: Identifier and dictionary with back end details to be saved in the database. Example:: 'my_new_server_identifier',{'server_name': 'my_old_server'} """ raise NotImplementedError() def unmanage_server(self, server_details, security_services=None): """Unmanages the share server. If a driver supports unmanaging of share servers, the driver must override this method and return successfully. :param server_details: share server backend details. :param security_services: list of security services configured with this share server. """ raise NotImplementedError() def get_share_status(self, share, share_server=None): """Invoked periodically to get the current status of a given share. Driver can use this method to update the status of a share that is still pending from other operations. This method is expected to be called in a periodic interval set by the 'periodic_interval' configuration in seconds. :param share: share to get updated status from. :param share_server: share server model or None. :returns: a dictionary of updates with the current share status, that must be 'available', 'creating_from_snapshot' or 'error', a list of export locations, if available, and a progress field which indicates the completion of the share creation operation. EXAMPLE:: { 'status': 'available', 'export_locations': [{...}, {...}], 'progress': '50%' } :raises: ShareBackendException. A ShareBackendException in this method will set the instance status to 'error'. """ raise NotImplementedError() def share_server_migration_start(self, context, src_share_server, dest_share_server, shares, snapshots): """Starts migration of a given share server to another host. .. note:: Is called in destination share server's backend to start migration. Driver should implement this method if willing to perform a server migration in driver-assisted way, useful when source share server's backend driver is compatible with destination backend driver. This method should start the migration procedure in the backend and return immediately. Following steps should be done in 'share_server_migration_continue'. :param context: The 'context.RequestContext' object for the request. :param src_share_server: Reference to the original share server. :param dest_share_server: Reference to the share server to be used by as destination. :param shares: All shares in the source share server that should be migrated. :param snapshots: All snapshots in the source share server that should be migrated. :return: Dict with migration information to be set in the destination share server. Example:: { 'backend_details': { 'migration_info_key': 'migration_info_value', } } """ raise NotImplementedError() def share_server_migration_continue(self, context, src_share_server, dest_share_server, shares, snapshots): """Continues migration of a given share server to another host. .. note:: Is called in destination share server's backend to continue migration. Driver should implement this method to continue monitor the migration progress in storage and perform following steps until 1st phase is completed. :param context: The 'context.RequestContext' object for the request. :param src_share_server: Reference to the original share server. :param dest_share_server: Reference to the share server to be used as destination. :param shares: All shares in the source share server that should be migrated. :param snapshots: All snapshots in the source share server that should be migrated. :return: Boolean value to indicate if 1st phase is finished. """ raise NotImplementedError() def share_server_migration_get_progress(self, context, src_share_server, dest_share_server, shares, snapshots): """Obtains progress of migration of a share server to another host. .. note:: Is called in destination share's backend to obtain migration progress. If possible, driver can implement a way to return migration progress information. :param context: The 'context.RequestContext' object for the request. :param src_share_server: Reference to the original share server. :param dest_share_server: Reference to the share server to be used as destination. :param shares: All shares in the source share server that should be migrated. :param snapshots: All snapshots in the source share server that should be migrated. :return: A dictionary with at least 'total_progress' field containing the percentage value. """ raise NotImplementedError() def share_server_migration_cancel(self, context, src_share_server, dest_share_server, shares, snapshots): """Cancels migration of a given share server to another host. .. note:: Is called in destination share server's backend to continue migration. If possible, driver can implement a way to cancel an in-progress migration. :param context: The 'context.RequestContext' object for the request. :param src_share_server: Reference to the original share server. :param dest_share_server: Reference to the share server to be used as destination. :param shares: All shares in the source share server that should be migrated. :param snapshots: All snapshots in the source share server that should be migrated. """ raise NotImplementedError() def share_server_migration_check_compatibility( self, context, share_server, dest_host, old_share_network, new_share_network, shares_request_spec): """Checks destination compatibility for migration of a share server. .. note:: Is called in destination share server's backend to continue migration. Can be called by an admin to check if a given host is compatible or by the share manager to test compatibility with destination backend. Driver should check if it is compatible with destination backend so driver-assisted migration can proceed. :param context: The 'context.RequestContext' object for the request. :param share_server: Share server model. :param dest_host: Reference to the hos to be used by the migrated share server. :param old_share_network: Share network model where the source share server is placed. :param new_share_network: Share network model where the share server is going to be migrated to. :param shares_request_spec: Dict. Contains information about all shares and share types that belong to the source share server. The drivers can use this information to check if the capabilities match with the destination backend and if there is available space to hold the new share server and all its resource. Example:: { 'shares_size': 100, 'snapshots_size': 100, 'shares_req_spec': [ { 'share_properties': { 'size': 10 'user_id': '2f5c1df4-5203-444e-b68e-1e60f3f26fc3' 'project_id': '0b82b278-51d6-4357-b273-0d7263982c31' 'snapshot_support': True 'create_share_from_snapshot_support': True 'revert_to_snapshot_support': False 'mount_snapshot_support': False 'share_proto': NFS 'share_type_id': '360e01c1-a4f7-4782-9676-dc013f1a2f21' 'is_public': False 'share_group_id': None 'source_share_group_snapshot_member_id': None 'snapshot_id': None }, 'share_instance_properties': { 'availability_zone_id': '02377ad7-381c-4b25-a04c-6fd218f22a91', 'share_network_id': '691544aa-da83-4669-8522-22719f236e16', 'share_server_id': 'cd658413-d02c-4d1b-ac8a-b6b972e76bac', 'share_id': 'e42fec45-781e-4dcc-a4d2-44354ad5ae91', 'host': 'hostA@backend1#pool0', 'status': 'available', }, 'share_type': { 'id': '360e01c1-a4f7-4782-9676-dc013f1a2f21', 'name': 'dhss_false', 'is_public': False, 'extra_specs': { 'driver_handles_share_servers': False, } }, 'share_id': e42fec45-781e-4dcc-a4d2-44354ad5ae91, }, ], } :return: A dictionary containing values indicating if destination backend is compatible, if share can remain writable during migration, if it can preserve all file metadata and if it can perform migration of given share non-disruptively. Example:: { 'compatible': True, 'writable': True, 'nondisruptive': True, 'preserve_snapshots': True, 'migration_cancel': True, 'migration_get_progress': False, } """ return { 'compatible': False, 'writable': False, 'nondisruptive': False, 'preserve_snapshots': False, 'migration_cancel': False, 'migration_get_progress': False, } def share_server_migration_complete(self, context, src_share_server, dest_share_server, shares, snapshots, new_network_info): """Completes migration of a given share server to another host. .. note:: Is called in destination share server's backend to complete migration. If driver is implementing 2-phase migration, this method should perform the disruptive tasks related to the 2nd phase of migration, thus completing it. Driver should also delete all original data from source backend. It expected that all shares and snapshots will be available at the destination share server in the end of the migration complete and all updates provided in the returned model update. :param context: The 'context.RequestContext' object for the request. :param src_share_server: Reference to the original share server. :param dest_share_server: Reference to the share server to be used as destination. :param shares: All shares in the source share server that should be migrated. :param snapshots: All snapshots in the source share server that should be migrated. :param new_network_info: Network allocation associated to the destination share server. :return: If the migration changes the shares export locations, snapshots provider locations or snapshots export locations, this method should return a dictionary containing a list of share instances and snapshot instances indexed by their id's, where each instance should provide a dict with the relevant information that need to be updated. Example:: { 'share_updates': { '4363eb92-23ca-4888-9e24-502387816e2a': { 'export_locations': [ { 'path': '1.2.3.4:/foo', 'metadata': {}, 'is_admin_only': False }, { 'path': '5.6.7.8:/foo', 'metadata': {}, 'is_admin_only': True }, ], 'pool_name': 'poolA', }, }, 'snapshot_updates': { 'bc4e3b28-0832-4168-b688-67fdc3e9d408': { 'provider_location': '/snapshots/foo/bar_1', 'export_locations': [ { 'path': '1.2.3.4:/snapshots/foo/bar_1', 'is_admin_only': False, }, { 'path': '5.6.7.8:/snapshots/foo/bar_1', 'is_admin_only': True, }, ], }, '2e62b7ea-4e30-445f-bc05-fd523ca62941': { 'provider_location': '/snapshots/foo/bar_2', 'export_locations': [ { 'path': '1.2.3.4:/snapshots/foo/bar_2', 'is_admin_only': False, }, { 'path': '5.6.7.8:/snapshots/foo/bar_2', 'is_admin_only': True, }, ], }, } 'backend_details': { 'new_share_server_info_key': 'new_share_server_info_value', }, } """ raise NotImplementedError() def update_share_server_security_service( self, context, share_server, network_info, share_instances, share_instance_rules, new_security_service, current_security_service=None): """Updates share server security service configuration. If the driver supports different security services, the user can request the addition of a new security service, with a different type. If the user wants to update the current security service configuration, the driver will receive both current and new security services, which will always be of the same type. :param context: The 'context.RequestContext' object for the request. :param share_server: Reference to the share server object that will be updated. :param network_info: All network allocation associated with the share server that will be updated. :param share_instances: A list of share instances that belong to the share server that is being updated. :param share_instance_rules: A list of access rules, grouped by share instance, in the following format. Example:: [ { 'share_instance_id': '3bc10d67-2598-4122-bb62-0bdeaa8c6db3', 'access_rules': [ { 'access_id':'906d0094-3e34-4d6c-a184-d08a908033e3', 'access_type':'ip', 'access_key':None, 'access_to':'10.0.0.1', 'access_level':'rw' ... }, ], }, ] :param new_security_service: New security service object to be configured in the share server. :param current_security_service: When provided, represents the current security service that will be replaced by the 'new_security_service'. :raises: ShareBackendException. A ShareBackendException should only be raised if the share server failed to update the security service, compromising all its access rules. By raising an exception, the share server and all its share instances will be set to 'error'. :return: None, or a dictionary of updates in the following format. Example:: { '3bc10d67-2598-4122-bb62-0bdeaa8c6db3': { '09960614-8574-4e03-89cf-7cf267b0bd08': { 'access_key': 'alice31493e5441b8171d2310d80e37e', 'state': 'error', }, '28f6eabb-4342-486a-a7f4-45688f0c0295': { 'access_key': 'bob0078aa042d5a7325480fd13228b', 'state': 'active', }, }, } The top level keys are share_instance_id's which should provide another dictionary of access rules to be updated, indexed by their 'access_id'. The inner access rules dictionary should only contain the access rules that need to be updated. """ raise NotImplementedError() def check_update_share_server_security_service( self, context, share_server, network_info, share_instances, share_instance_rules, new_security_service, current_security_service=None): """Check if the current share server security service is supported. If the driver supports different security services, the user can request the addition of a new security service, with a different type. If the user wants to update the current security service configuration, the driver will receive both current and new security services, which will always be of the same type. :param context: The 'context.RequestContext' object for the request. :param share_server: Reference to the share server object that will be updated. :param network_info: All network allocation associated with the share server that will be updated. :param share_instances: A list of share instances that belong to the share server that is affected by the update. :param share_instance_rules: A list of access rules, grouped by share instance, in the following format. Example:: [ { 'share_instance_id': '3bc10d67-2598-4122-bb62-0bdeaa8c6db3', 'access_rules': [ { 'access_id':'906d0094-3e34-4d6c-a184-d08a908033e3', 'access_type':'ip', 'access_key':None, 'access_to':'10.0.0.1', 'access_level':'rw' ... }, ], }, ] :param new_security_service: New security service object to be configured in the share server. :param current_security_service: When provided, represents the current security service that will be replaced by the 'new_security_service'. :return: 'True' if the driver support the requested update, 'False' otherwise. """ raise NotImplementedError() def check_update_share_server_network_allocations( self, context, share_server, current_network_allocations, new_share_network_subnet, security_services, share_instances, share_instances_rules): """"Check if the share server network allocation update is supported. :param context: The 'context.RequestContext' object for the request. :param share_server: Reference to the share server object that will be updated. :param current_network_allocations: All network allocations associated with the share server that will be updated: Example:: { 'admin_network_allocations': [ { 'ip_address': '10.193.154.11', 'ip_version': 4, 'cidr': '10.193.154.0/28', 'gateway': '10.193.154.1', 'mtu': 1500, 'network_type': 'vlan', 'segmentation_id': 3000, 'mac_address': ' AA:AA:AA:AA:AA:AA', ... }, ], 'subnets': [ { 'share_network_subnet_id': '0bdeaa8c6db3-3bc10d67', 'neutron_net_id': '2598-4122-bb62-0bdeaa8c6db3', 'neutron_subnet_id': '3bc10d67-2598-4122-bb62', 'network_allocations': [ { 'ip_address': '10.193.154.10', 'ip_version': 4, 'cidr': '10.193.154.0/28', 'gateway': '10.193.154.1', 'mtu': 1500, 'network_type': 'vlan', 'segmentation_id': 3000, 'mac_address': ' AA:AA:AA:AA:AA:AA', ... }, ], }, ], } :param new_share_network_subnet: dict containing the subnet data that has to be checked if it can be added to the share server: Example:: { 'availability_zone_id': '0bdeaa8c6db3-3bc10d67', 'neutron_net_id': '2598-4122-bb62-0bdeaa8c6db3', 'neutron_subnet_id': '3bc10d67-2598-4122-bb62', 'ip_version': 4, 'cidr': '10.193.154.0/28', 'gateway': '10.193.154.1', 'mtu': 1500, 'network_type': 'vlan', 'segmentation_id': 3000, } :param security_services: list of security services configured with this share server. :param share_instances: A list of share instances that belong to the share server that is affected by the update. :param share_instances_rules: A list of access rules, grouped by share instance, in the following format. Example:: [ { 'share_instance_id': '3bc10d67-2598-4122-bb62-0bdeaa8c6db3', 'access_rules': [ { 'access_id':'906d0094-3e34-4d6c-a184-d08a908033e3', 'access_type':'ip', 'access_key':None, 'access_to':'10.0.0.1', 'access_level':'rw' ... }, ], }, ] :return Boolean indicating whether the update is possible or not. It is the driver responsibility to log the reason why not accepting the update. """ raise NotImplementedError() def update_share_server_network_allocations( self, context, share_server, current_network_allocations, new_network_allocations, security_services, shares, snapshots): """Updates a share server's network allocations. :param context: The 'context.RequestContext' object for the request. :param share_server: reference to the share server that have to update network allocations. :param current_network_allocations: all network allocations associated with the share server that will be updated Example:: { 'admin_network_allocations': [ { 'ip_address': '10.193.154.11', 'ip_version': 4, 'cidr': '10.193.154.0/28', 'gateway': '10.193.154.1', 'mtu': 1500, 'network_type': 'vlan', 'segmentation_id': 3000, 'mac_address': ' AA:AA:AA:AA:AA:AA', }, ... ], 'subnets': [ { 'share_network_subnet_id': '0bdeaa8c6db3-3bc10d67', 'neutron_net_id': '2598-4122-bb62-0bdeaa8c6db3', 'neutron_subnet_id': '3bc10d67-2598-4122-bb62', 'network_allocations': [ { 'ip_address': '10.193.154.10', 'ip_version': 4, 'cidr': '10.193.154.0/28', 'gateway': '10.193.154.1', 'mtu': 1500, 'network_type': 'vlan', 'segmentation_id': 3000, 'mac_address': ' AA:AA:AA:AA:AA:AA', }, ... ], }, ], } :param new_network_allocations: allocations that must be configured in the share server. Example:: { 'share_network_subnet_id': '0bdeaa8c6db3-3bc10d67', 'neutron_net_id': '2598-4122-bb62-0bdeaa8c6db3', 'neutron_subnet_id': '3bc10d67-2598-4122-bb62', 'network_allocations': [ { 'ip_address': '10.193.154.10', 'ip_version': 4, 'cidr': '10.193.154.0/28', 'gateway': '10.193.154.1', 'mtu': 1500, 'network_type': 'vlan', 'segmentation_id': 3000, 'mac_address': 'AA:AA:AA:AA:AA:AA', ... }, ], }, :param security_services: list of security services configured with this share server. :param shares: All shares in the share server. :param snapshots: All snapshots in the share server. :raises: Exception. By raising an exception, the share server and all its shares and snapshots instances will be set to 'error'. The error can contain the field 'details_data' as a dict with the key 'server_details' containing the backend details dict that will be saved to share server. :return If the update changes the shares export locations or snapshots export locations, this method should return a dictionary containing a list of share instances and snapshot instances indexed by their id's, where each instance should provide a dict with the relevant information that need to be updated. Also, the returned dict can contain the updated back end details to be saved in the database. Example:: { 'share_updates': { '4363eb92-23ca-4888-9e24-502387816e2a': [ { 'path': '1.2.3.4:/foo', 'metadata': {}, 'is_admin_only': False }, { 'path': '5.6.7.8:/foo', 'metadata': {}, 'is_admin_only': True }, ], ... }, 'snapshot_updates': { 'bc4e3b28-0832-4168-b688-67fdc3e9d408': { 'provider_location': '/snapshots/foo/bar_1', 'export_locations': [ { 'path': '1.2.3.4:/snapshots/foo/bar_1', 'is_admin_only': False, }, { 'path': '5.6.7.8:/snapshots/foo/bar_1', 'is_admin_only': True, }, ], }, '2e62b7ea-4e30-445f-bc05-fd523ca62941': { 'provider_location': '/snapshots/foo/bar_2', 'export_locations': [ { 'path': '1.2.3.4:/snapshots/foo/bar_2', 'is_admin_only': False, }, { 'path': '5.6.7.8:/snapshots/foo/bar_2', 'is_admin_only': True, }, ], }, } 'server_details': { 'new_share_server_info_key': 'new_share_server_info_value', }, } """ raise NotImplementedError() def create_backup(self, context, share_instance, backup, share_server=None): """Starts backup of a given share_instance into backup. Driver should implement this method if willing to perform backup of share_instance. This method should start the backup procedure in the backend and end. Following steps should be done in 'create_backup_continue'. :param context: The 'context.RequestContext' object for the request. :param share_instance: Reference to the original share instance. :param backup: Share backup model. :param share_server: share server in case of dhss_true """ raise NotImplementedError() def create_backup_continue(self, context, share_instance, backup, share_server=None): """Continue backup of a given share_instance into backup. Driver must implement this method if it supports 'create_backup' method. This method should continue the remaining backup procedure in the backend and report the progress of backup. :param context: The 'context.RequestContext' object for the request. :param share_instance: Reference to the original share instance. :param backup: Share backup model. :param share_server: share server in case of dhss_true """ raise NotImplementedError() def delete_backup(self, context, backup, share_instance, share_server=None): """Is called to remove backup.""" raise NotImplementedError() def restore_backup(self, context, backup, share_instance, share_server=None): """Starts restoring backup into a given share_instance. Driver should implement this method if willing to perform restore of backup into a share_instance. This method should start the backup restore procedure in the backend and end. Following steps should be done in 'restore_backup_continue'. :param context: The 'context.RequestContext' object for the request. :param share_instance: Reference to the original share instance. :param backup: Share backup model. :param share_server: share server in case of dhss_true """ raise NotImplementedError() def restore_backup_continue(self, context, backup, share_instance, share_server=None): """Continue restore of a given backup into share_instance. Driver must implement this method if it supports 'restore_backup' method. This method should continue the remaining restore procedure in the backend and report the progress of backup restore. :param context: The 'context.RequestContext' object for the request. :param share_instance: Reference to the original share instance. :param backup: Share backup model. :param share_server: share server in case of dhss_true """ raise NotImplementedError() def update_share_from_metadata(self, context, share, metadata, share_server=None): """Update the share from metadata. Driver must implement this method if needs to perform some action on given resource (i.e. share) based on provided metadata. :param context: The 'context.RequestContext' object for the request. :param share: Share instance model with share data. :param metadata: Dict contains key-value pair where driver will perform necessary action based on key. :param share_server: Reference to the share server. """ raise NotImplementedError() def update_share_network_subnet_from_metadata(self, context, share_network, share_network_subnet, share_server, metadata): """Update the share network subnet from metadata. Driver must implement this method if it can perform some action on given resource (i.e. share network subnet) based on provided metadata. :param context: The 'context.RequestContext' object for the request. :param share_network: share network model :param share_network_subnet: share network subnet model :param share_server: share-server model. :param metadata: Dict contains key-value pair where driver will perform necessary action based on key. """ raise NotImplementedError() ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315601.901672 manila-21.0.0/manila/share/drivers/0000775000175000017500000000000000000000000017077 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/__init__.py0000664000175000017500000000000000000000000021176 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315601.901672 manila-21.0.0/manila/share/drivers/cephfs/0000775000175000017500000000000000000000000020347 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/cephfs/__init__.py0000664000175000017500000000000000000000000022446 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315601.901672 manila-21.0.0/manila/share/drivers/cephfs/conf/0000775000175000017500000000000000000000000021274 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/cephfs/conf/cephfs-export-template.conf0000664000175000017500000000006300000000000026542 0ustar00zuulzuul00000000000000EXPORT { FSAL { Name = "CEPH"; } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/cephfs/driver.py0000664000175000017500000020504000000000000022215 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ipaddress import json import math import re import socket import sys from oslo_config import cfg from oslo_config import types from oslo_log import log from oslo_utils import importutils from oslo_utils import timeutils from oslo_utils import units from manila.common import constants from manila import exception from manila.i18n import _ from manila.message import api as message_api from manila.message import message_field from manila.share import driver from manila.share.drivers import ganesha from manila.share.drivers.ganesha import utils as ganesha_utils from manila.share.drivers import helpers as driver_helpers rados = None json_command = None ceph_default_target = None def setup_rados(): global rados if not rados: try: rados = importutils.import_module('rados') except ImportError: raise exception.ShareBackendException( _("rados python module is not installed")) def setup_json_command(): global json_command if not json_command: try: json_command = importutils.import_class( 'ceph_argparse.json_command') except ImportError: raise exception.ShareBackendException( _("ceph_argparse python module is not installed")) CEPHX_ACCESS_TYPE = "cephx" # The default Ceph administrative identity CEPH_DEFAULT_AUTH_ID = "admin" DEFAULT_VOLUME_MODE = '755' RADOS_TIMEOUT = 10 LOG = log.getLogger(__name__) # Clone statuses CLONE_CREATING = 'creating' CLONE_FAILED = 'failed' CLONE_CANCELED = 'canceled' CLONE_PENDING = 'pending' CLONE_INPROGRESS = 'in-progress' CLONE_COMPLETE = 'complete' cephfs_opts = [ cfg.StrOpt('cephfs_conf_path', default="", help="Fully qualified path to the ceph.conf file."), cfg.StrOpt('cephfs_cluster_name', help="The name of the cluster in use, if it is not " "the default ('ceph')." ), cfg.StrOpt('cephfs_auth_id', default="manila", help="The name of the ceph auth identity to use." ), cfg.StrOpt('cephfs_volume_path_prefix', deprecated_for_removal=True, deprecated_since='Wallaby', deprecated_reason='This option is not used starting with ' 'the Nautilus release of Ceph.', default="/volumes", help="The prefix of the cephfs volume path." ), cfg.StrOpt('cephfs_protocol_helper_type', default="CEPHFS", choices=['CEPHFS', 'NFS'], ignore_case=True, help="The type of protocol helper to use. Default is " "CEPHFS." ), cfg.BoolOpt('cephfs_ganesha_server_is_remote', default=False, help="Whether the NFS-Ganesha server is remote to the driver.", deprecated_for_removal=True, deprecated_since='2025.1', deprecated_reason="This option is used by the deprecated " "NFSProtocolHelper"), cfg.HostAddressOpt('cephfs_ganesha_server_ip', help="The IP address of the NFS-Ganesha server."), cfg.StrOpt('cephfs_ganesha_server_username', default='root', help="The username to authenticate as in the remote " "NFS-Ganesha server host.", deprecated_for_removal=True, deprecated_since='2025.1', deprecated_reason="This option is used by the deprecated " "NFSProtocolHelper"), cfg.StrOpt('cephfs_ganesha_path_to_private_key', help="The path of the driver host's private SSH key file.", deprecated_for_removal=True, deprecated_since='2025.1', deprecated_reason="This option is used by the deprecated " "NFSProtocolHelper"), cfg.StrOpt('cephfs_ganesha_server_password', secret=True, help="The password to authenticate as the user in the remote " "Ganesha server host. This is not required if " "'cephfs_ganesha_path_to_private_key' is configured.", deprecated_for_removal=True, deprecated_since='2025.1', deprecated_reason="This option is used by the deprecated " "NFSProtocolHelper"), cfg.ListOpt('cephfs_ganesha_export_ips', default=[], help="List of IPs to export shares. If not supplied, " "then the value of 'cephfs_ganesha_server_ip' " "will be used to construct share export locations."), cfg.StrOpt('cephfs_volume_mode', default=DEFAULT_VOLUME_MODE, help="The read/write/execute permissions mode for CephFS " "volumes, snapshots, and snapshot groups expressed in " "Octal as with linux 'chmod' or 'umask' commands."), cfg.StrOpt('cephfs_filesystem_name', help="The name of the filesystem to use, if there are " "multiple filesystems in the cluster."), cfg.StrOpt('cephfs_ensure_all_shares_salt', default="manila_cephfs_reef_caracal", help="Provide a unique string value to make the driver " "ensure all of the shares it has created during " "startup. Ensuring would re-export shares and this " "action isn't always required, unless something has " "been administratively modified on CephFS."), cfg.IntOpt('cephfs_cached_allocated_capacity_update_interval', min=0, default=60, help="The maximum time in seconds that the cached pool " "data will be considered updated. If it is expired when " "trying to read the pool data, it must be refreshed.") ] cephfsnfs_opts = [ cfg.StrOpt('cephfs_nfs_cluster_id', help="The ID of the NFS cluster to use."), ] CONF = cfg.CONF CONF.register_opts(cephfs_opts) CONF.register_opts(cephfsnfs_opts) class RadosError(Exception): """Something went wrong talking to Ceph with librados""" pass class AllocationCapacityCache(object): """AllocationCapacityCache for CephFS filesystems. The cache validity is measured by a stop watch that is not thread-safe. """ def __init__(self, duration): self._stop_watch = timeutils.StopWatch(duration) self._cached_allocated_capacity = None def is_expired(self): return not self._stop_watch.has_started() or self._stop_watch.expired() def get_data(self): return self._cached_allocated_capacity def update_data(self, cached_allocated_capacity): if not self._stop_watch.has_started(): self._stop_watch.start() else: self._stop_watch.restart() self._cached_allocated_capacity = cached_allocated_capacity def rados_command(rados_client, prefix=None, args=None, json_obj=False, target=None, inbuf=None): """Safer wrapper for ceph_argparse.json_command Raises error exception instead of relying on caller to check return codes. Error exception can result from: * Timeout * Actual legitimate errors * Malformed JSON output return: If json_obj is True, return the decoded JSON object from ceph, or None if empty string returned. If json is False, return a decoded string (the data returned by ceph command) """ target = target or ceph_default_target if args is None: args = {} argdict = args.copy() argdict['format'] = 'json' if inbuf is None: inbuf = b'' LOG.debug("Invoking ceph_argparse.json_command - rados_client=%(cl)s, " "target=%(tg)s, prefix='%(pf)s', argdict=%(ad)s, inbuf=%(ib)s, " "timeout=%(to)s.", {"cl": rados_client, "tg": target, "pf": prefix, "ad": argdict, "ib": inbuf, "to": RADOS_TIMEOUT}) try: ret, outbuf, outs = json_command(rados_client, target=target, prefix=prefix, argdict=argdict, inbuf=inbuf, timeout=RADOS_TIMEOUT) if ret != 0: raise rados.Error(outs, ret) if not json_obj: result = outbuf.decode().strip() else: if outbuf: result = json.loads(outbuf.decode().strip()) else: result = None except Exception as e: msg = _("json_command failed - prefix=%(pfx)s, argdict=%(ad)s - " "exception message: %(ex)s." % {"pfx": prefix, "ad": argdict, "ex": e}) raise exception.ShareBackendException(msg) return result class CephFSDriver(driver.ExecuteMixin, driver.GaneshaMixin, driver.ShareDriver): """Driver for the Ceph Filesystem.""" def __init__(self, *args, **kwargs): super(CephFSDriver, self).__init__(False, *args, **kwargs) self.backend_name = self.configuration.safe_get( 'share_backend_name') or 'CephFS' setup_rados() setup_json_command() self._rados_client = None # name of the filesystem/volume used by the driver self._volname = None self._ceph_mon_version = None self.configuration.append_config_values(cephfs_opts) self.configuration.append_config_values(cephfsnfs_opts) self._cached_allocated_capacity_gb = None self.private_storage = kwargs.get('private_storage') try: int(self.configuration.cephfs_volume_mode, 8) except ValueError: msg = _("Invalid CephFS volume mode %s") raise exception.BadConfigurationException( msg % self.configuration.cephfs_volume_mode) self._cephfs_volume_mode = self.configuration.cephfs_volume_mode self.ipv6_implemented = True def do_setup(self, context): if self.configuration.cephfs_protocol_helper_type.upper() == "CEPHFS": protocol_helper_class = getattr( sys.modules[__name__], 'NativeProtocolHelper') else: # FIXME(vkmc) we intent to replace NFSProtocolHelper # with NFSClusterProtocolHelper helper in BB/CC release if self.configuration.cephfs_nfs_cluster_id is None: protocol_helper_class = getattr( sys.modules[__name__], 'NFSProtocolHelper') else: protocol_helper_class = getattr( sys.modules[__name__], 'NFSClusterProtocolHelper') self.setup_default_ceph_cmd_target() self.protocol_helper = protocol_helper_class( self._execute, self.configuration, rados_client=self.rados_client, volname=self.volname) self.protocol_helper.init_helper() allocation_capacity_gb = self._get_cephfs_filesystem_allocation() self._cached_allocated_capacity_gb = AllocationCapacityCache( self.configuration.cephfs_cached_allocated_capacity_update_interval ) self._cached_allocated_capacity_gb.update_data(allocation_capacity_gb) def check_for_setup_error(self): """Returns an error if prerequisites aren't met.""" self.protocol_helper.check_for_setup_error() def _get_cephfs_filesystem_allocation(self): allocated_capacity_gb = 0 argdict = {"vol_name": self.volname} subvolumes = rados_command( self.rados_client, "fs subvolume ls", argdict, json_obj=True) for sub_vol in subvolumes: argdict = {"vol_name": self.volname, "sub_name": sub_vol["name"]} sub_info = rados_command( self.rados_client, "fs subvolume info", argdict, json_obj=True) size = sub_info.get('bytes_quota', 0) if size == "infinite": # If we have a share that has infinite quota, we should not # add that to the allocated capacity as that would make the # scheduler think this backend is full. continue allocated_capacity_gb += round(int(size) / units.Gi, 2) return allocated_capacity_gb def _update_share_stats(self): stats = self.rados_client.get_cluster_stats() total_capacity_gb = round(stats['kb'] / units.Mi, 2) free_capacity_gb = round(stats['kb_avail'] / units.Mi, 2) if self._cached_allocated_capacity_gb.is_expired(): allocated_capacity_gb = self._get_cephfs_filesystem_allocation() self._cached_allocated_capacity_gb.update_data( allocated_capacity_gb ) else: allocated_capacity_gb = ( self._cached_allocated_capacity_gb.get_data() ) data = { 'vendor_name': 'Ceph', 'driver_version': '1.0', 'share_backend_name': self.backend_name, 'storage_protocol': self.configuration.safe_get( 'cephfs_protocol_helper_type'), 'pools': [ { 'pool_name': 'cephfs', 'total_capacity_gb': total_capacity_gb, 'free_capacity_gb': free_capacity_gb, 'allocated_capacity_gb': allocated_capacity_gb, 'qos': 'False', 'reserved_percentage': self.configuration.safe_get( 'reserved_share_percentage'), 'reserved_snapshot_percentage': self.configuration.safe_get( 'reserved_share_from_snapshot_percentage') or self.configuration.safe_get( 'reserved_share_percentage'), 'reserved_share_extend_percentage': self.configuration.safe_get( 'reserved_share_extend_percentage') or self.configuration.safe_get( 'reserved_share_percentage'), 'dedupe': [False], 'compression': [False], 'thin_provisioning': [True] } ], 'total_capacity_gb': total_capacity_gb, 'free_capacity_gb': free_capacity_gb, 'allocated_capacity_gb': allocated_capacity_gb, 'snapshot_support': True, 'create_share_from_snapshot_support': True, } super( # pylint: disable=no-member CephFSDriver, self)._update_share_stats(data) def _to_bytes(self, gigs): """Convert a Manila size into bytes. Manila uses gibibytes everywhere. :param gigs: integer number of gibibytes. :return: integer number of bytes. """ return gigs * units.Gi def _get_subvolume_name(self, share_id): try: subvolume_name = self.private_storage.get( share_id, "subvolume_name") except Exception: return share_id # Subvolume name could be None, so in case it is, return share_id return subvolume_name or share_id def _get_subvolume_snapshot_name(self, snapshot_id): try: subvolume_snapshot_name = self.private_storage.get( snapshot_id, "subvolume_snapshot_name" ) except Exception: return snapshot_id return subvolume_snapshot_name or snapshot_id def _get_export_locations(self, share, subvolume_name=None): """Get the export location for a share. :param share: a manila share. :return: the export location for a share. """ subvolume_name = subvolume_name or share["id"] # get path of FS subvolume/share argdict = { "vol_name": self.volname, "sub_name": subvolume_name } if share['share_group_id'] is not None: argdict.update({"group_name": share["share_group_id"]}) subvolume_path = rados_command( self.rados_client, "fs subvolume getpath", argdict) return self.protocol_helper.get_export_locations(share, subvolume_path) def get_optional_share_creation_data(self, share, share_server=None): """Get the additional properties to be set in a share. :return: the metadata to be set in share. """ return self.protocol_helper.get_optional_share_creation_data(share) def setup_default_ceph_cmd_target(self): global ceph_default_target if not ceph_default_target: ceph_default_target = ('mon-mgr', ) try: ceph_major_version = self.ceph_mon_version['major'] except Exception: msg = _("Error reading ceph version to set the default " "target. Please check your Ceph backend is reachable.") raise exception.ShareBackendException(msg=msg) if ceph_major_version == '14': ceph_default_target = ('mgr', ) elif ceph_major_version < '14': msg = _("CephFSDriver does not support Ceph " "cluster version less than 14.x (Nautilus)") raise exception.ShareBackendException(msg=msg) @property def ceph_mon_version(self): if self._ceph_mon_version: return self._ceph_mon_version self._ceph_mon_version = {} output = rados_command(self.rados_client, "version", target=('mon', )) version_str = json.loads(output)["version"] p = re.compile(r"ceph version (\d+)\.(\d+)\.(\d+)") major, minor, extra = p.match(version_str).groups() self._ceph_mon_version['major'] = major self._ceph_mon_version['minor'] = minor self._ceph_mon_version['extra'] = extra return self._ceph_mon_version @property def rados_client(self): if self._rados_client: return self._rados_client conf_path = self.configuration.safe_get('cephfs_conf_path') cluster_name = self.configuration.safe_get('cephfs_cluster_name') auth_id = self.configuration.safe_get('cephfs_auth_id') self._rados_client = rados.Rados( name="client.{0}".format(auth_id), clustername=cluster_name, conffile=conf_path, conf={} ) LOG.info("[%(be)s] Ceph client found, connecting...", {"be": self.backend_name}) try: if self._rados_client.state != "connected": self._rados_client.connect() except Exception: self._rados_client = None raise exception.ShareBackendException( "[%(be)s] Ceph client failed to connect.", {"be": self.backend_name}) else: LOG.info("[%(be)s] Ceph client connection complete.", {"be": self.backend_name}) return self._rados_client @property def volname(self): # Name of the CephFS volume/filesystem where the driver creates # manila entities such as shares, sharegroups, snapshots, etc. if self._volname: return self._volname self._volname = self.configuration.safe_get('cephfs_filesystem_name') if not self._volname: out = rados_command( self.rados_client, "fs volume ls", json_obj=True) if len(out) == 1: self._volname = out[0]['name'] else: if len(out) > 1: msg = _("Specify Ceph filesystem name using " "'cephfs_filesystem_name' driver option.") else: msg = _("No Ceph filesystem found.") raise exception.ShareBackendException(msg=msg) return self._volname def create_share(self, context, share, share_server=None): """Create a CephFS volume. :param context: A RequestContext. :param share: A Share. :param share_server: Always None for CephFS native. :return: The export locations dictionary. """ requested_proto = share['share_proto'].upper() supported_proto = ( self.configuration.cephfs_protocol_helper_type.upper()) if (requested_proto != supported_proto): msg = _("Share protocol %s is not supported.") % requested_proto raise exception.ShareBackendException(msg=msg) size = self._to_bytes(share['size']) LOG.debug("[%(be)s]: create_share: id=%(id)s, size=%(sz)s, " "group=%(gr)s.", {"be": self.backend_name, "id": share['id'], "sz": share['size'], "gr": share['share_group_id']}) # create FS subvolume/share argdict = { "vol_name": self.volname, "sub_name": share["id"], "size": size, "namespace_isolated": True, "mode": self._cephfs_volume_mode } if share['share_group_id'] is not None: argdict.update({"group_name": share["share_group_id"]}) rados_command(self.rados_client, "fs subvolume create", argdict) return self._get_export_locations(share) def _get_subvolume_size_in_gb(self, subvolume_size): """Returns the size of the subvolume in GB.""" # There is a chance that we would end up with 2.5gb for example, so # we round it up return int(math.ceil(int(subvolume_size) / units.Gi)) def manage_existing(self, share, driver_options): # bring FS subvolume/share under manila management LOG.debug("[%(be)s]: manage_existing: id=%(id)s.", {"be": self.backend_name, "id": share['id']}) # Subvolume name must be provided. subvolume_name = share['export_locations'][0]['path'] if not subvolume_name: raise exception.ShareBackendException( "The subvolume name must be provided as a 'export_path' while " "managing shares.") argdict = { "vol_name": self.volname, "sub_name": subvolume_name, } subvolume_info = {} # Try to get the subvolume info in the ceph backend try: subvolume_info = rados_command( self.rados_client, "fs subvolume info", argdict, json_obj=True) except exception.ShareBackendException as e: # Couldn't find a subvolume with the name provided. if 'does not exist' in str(e).lower(): msg = ("Subvolume %(subvol)s cannot be found on the " "backend." % {'subvol': subvolume_name}) raise exception.ShareBackendException(msg=msg) # Check if share mode matches if subvolume_info.get('mode') != self._cephfs_volume_mode: LOG.info("Subvolume %(subvol)s mode is different from what is " "configured in Manila.") subvolume_size = subvolume_info.get('bytes_quota') # We need to resize infinite subvolumes, as Manila doesn't support it if isinstance(subvolume_size, str) and subvolume_size == "infinite": try: # Default resize gb must be configured new_size = driver_options.get('size') if not new_size or new_size <= 0: msg = ("subvolume %s has infinite size and a valid " "integer value was not added to the driver_options " "arg. Please provide a 'size' in the driver " "options and try again." % subvolume_name) raise exception.ShareBackendException(msg=msg) # Attempt resizing the subvolume self._resize_share(share, new_size, no_shrink=True) subvolume_size = new_size except exception.ShareShrinkingPossibleDataLoss: msg = ("Could not resize the subvolume using the provided " "size, as data could be lost. Please update it and " "try again.") LOG.exception(msg) raise except exception.ShareBackendException: raise else: if int(subvolume_size) % units.Gi == 0: # subvolume_size is an integer GB, no need to resize subvolume subvolume_size = self._get_subvolume_size_in_gb(subvolume_size) else: # subvolume size is not an integer GB. need to resize subvolume new_size_gb = self._get_subvolume_size_in_gb(subvolume_size) LOG.info( "Subvolume %(subvol)s is being resized to %(new_size)s " "GB.", { 'subvol': subvolume_name, 'new_size': new_size_gb } ) self._resize_share(share, new_size_gb, no_shrink=True) subvolume_size = new_size_gb share_metadata = {"subvolume_name": subvolume_name} self.private_storage.update(share['id'], share_metadata) export_locations = self._get_export_locations( share, subvolume_name=subvolume_name ) managed_share = { "size": subvolume_size, "export_locations": export_locations } return managed_share def manage_existing_snapshot(self, snapshot, driver_options): # bring FS subvolume/share under manila management LOG.debug("[%(be)s]: manage_existing_snapshot: id=%(id)s.", {"be": self.backend_name, "id": snapshot['id']}) # Subvolume name must be provided. sub_snapshot_name = snapshot.get('provider_location', None) if not sub_snapshot_name: raise exception.ShareBackendException( "The subvolume snapshot name must be provided as the " "'provider_location' while managing snapshots.") sub_name = self._get_subvolume_name(snapshot['share_instance_id']) argdict = { "vol_name": self.volname, "sub_name": sub_name, } # Try to get the subvolume info in the ceph backend, this is useful for # us to get the size for the snapshot. try: rados_command( self.rados_client, "fs subvolume info", argdict, json_obj=True) except exception.ShareBackendException as e: # Couldn't find a subvolume with the name provided. if 'does not exist' in str(e).lower(): msg = ("Subvolume %(subvol)s cannot be found on the " "backend." % {'subvol': sub_name}) raise exception.ShareBackendException(msg=msg) sub_snap_info_argdict = { "vol_name": self.volname, "sub_name": sub_name, "snap_name": sub_snapshot_name } # Shares/subvolumes already managed by manila will never have # infinite as their bytes_quota, so no need for extra precaution. try: managed_subvolume_snapshot = rados_command( self.rados_client, "fs subvolume snapshot info", sub_snap_info_argdict, json_obj=True ) except exception.ShareBackendException as e: # Couldn't find a subvolume snapshot with the name provided. if 'does not exist' in str(e).lower(): msg = ("Subvolume snapshot %(snap)s cannot be found on the " "backend." % {'snap': sub_snapshot_name}) raise exception.ShareBackendException(msg=msg) snapshot_metadata = {"subvolume_snapshot_name": sub_snapshot_name} self.private_storage.update( snapshot['snapshot_id'], snapshot_metadata ) # NOTE(carloss): fs subvolume snapshot info command does not return # the snapshot size, so we reuse the share size until this is not # available for us. managed_snapshot = {'provider_location': sub_snapshot_name} if managed_subvolume_snapshot.get('bytes_quota') is not None: managed_snapshot['size'] = self._get_subvolume_size_in_gb( managed_subvolume_snapshot['bytes_quota']) return managed_snapshot def _need_to_cancel_clone(self, share, clone_name): # Is there an ongoing clone operation that needs to be canceled # so we can delete the share? need_to_cancel_clone = False argdict = { "vol_name": self.volname, "clone_name": clone_name, } if share['share_group_id'] is not None: argdict.update({"group_name": share["share_group_id"]}) try: status = rados_command( self.rados_client, "fs clone status", argdict) if status in (CLONE_PENDING, CLONE_INPROGRESS): need_to_cancel_clone = True except exception.ShareBackendException as e: # Trying to get clone status on a regular subvolume is expected # to fail. if 'not allowed on subvolume' not in str(e).lower(): raise exception.ShareBackendException( "Failed to remove share.") return need_to_cancel_clone def delete_share(self, context, share, share_server=None): # remove FS subvolume/share LOG.debug("[%(be)s]: delete_share: id=%(id)s, group=%(gr)s.", {"be": self.backend_name, "id": share['id'], "gr": share['share_group_id']}) clone_name = self._get_subvolume_name(share['id']) if self._need_to_cancel_clone(share, clone_name): try: argdict = { "vol_name": self.volname, "clone_name": clone_name, "force": True, } if share['share_group_id'] is not None: argdict.update({"group_name": share["share_group_id"]}) rados_command(self.rados_client, "fs clone cancel", argdict) except rados.Error: raise exception.ShareBackendException( "Failed to cancel clone operation.") argdict = { "vol_name": self.volname, "sub_name": self._get_subvolume_name(share["id"]), # We want to clean up the share even if the subvolume is # not in a good state. "force": True, } if share['share_group_id'] is not None: argdict.update({"group_name": share["share_group_id"]}) rados_command(self.rados_client, "fs subvolume rm", argdict) def update_access(self, context, share, access_rules, add_rules, delete_rules, update_rules, share_server=None): sub_name = self._get_subvolume_name(share['id']) return self.protocol_helper.update_access( context, share, access_rules, add_rules, delete_rules, update_rules, share_server=share_server, sub_name=sub_name) def get_backend_info(self, context): return self.protocol_helper.get_backend_info(context) def ensure_shares(self, context, shares): share_updates = {} for share in shares: share_updates[share['id']] = { 'reapply_access_rules': self.protocol_helper.reapply_rules_while_ensuring_shares, } try: share_metadata = ( self.get_optional_share_creation_data(share).get( "metadata", {}) ) share_updates[share['id']].update({ 'export_locations': self._get_export_locations(share), "metadata": share_metadata }) except exception.ShareBackendException as e: if 'does not exist' in str(e).lower(): msg = ("Share instance %(si)s belonging to share " "%(share)s cannot be found on the backend.") msg_payload = {'si': share['id'], 'share': share['share_id']} LOG.exception(msg, msg_payload) share_updates[share['id']] = { 'status': constants.STATUS_ERROR, } return share_updates def _resize_share(self, share, new_size, no_shrink=False): argdict = { "vol_name": self.volname, "sub_name": self._get_subvolume_name(share["id"]), "new_size": self._to_bytes(new_size), } if share["share_group_id"] is not None: argdict.update({"group_name": share["share_group_id"]}) if no_shrink: argdict.update({"no_shrink": True}) try: rados_command(self.rados_client, "fs subvolume resize", argdict) except exception.ShareBackendException as e: if 'would be lesser than' in str(e).lower(): raise exception.ShareShrinkingPossibleDataLoss( share_id=share['id']) raise def extend_share(self, share, new_size, share_server=None): # resize FS subvolume/share LOG.debug("[%(be)s]: extend_share: share=%(id)s, size=%(sz)s.", {"be": self.backend_name, "id": share['id'], "sz": new_size}) self._resize_share(share, new_size) def shrink_share(self, share, new_size, share_server=None): # resize FS subvolume/share LOG.debug("[%(be)s]: shrink_share: share=%(id)s, size=%(sz)s.", {"be": self.backend_name, "id": share['id'], "sz": new_size}) self._resize_share(share, new_size, no_shrink=True) def create_snapshot(self, context, snapshot, share_server=None): # create a FS snapshot LOG.debug("[%(be)s]: create_snapshot: original share=%(id)s, " "snapshot=%(sn)s.", {"be": self.backend_name, "id": snapshot['share_id'], "sn": snapshot['id']}) argdict = { "vol_name": self.volname, "sub_name": self._get_subvolume_name(snapshot["share_id"]), "snap_name": snapshot["snapshot_id"], } rados_command( self.rados_client, "fs subvolume snapshot create", argdict) return {"provider_location": snapshot["snapshot_id"]} def delete_snapshot(self, context, snapshot, share_server=None): # delete a FS snapshot LOG.debug("[%(be)s]: delete_snapshot: snapshot=%(id)s.", {"be": self.backend_name, "id": snapshot['id']}) snapshot_name = self._get_subvolume_snapshot_name( snapshot['snapshot_id'] ) # FIXME(vkmc) remove this in CC (next tick) release. legacy_snap_name = "_".join([snapshot["snapshot_id"], snapshot["id"]]) argdict_legacy = { "vol_name": self.volname, "sub_name": self._get_subvolume_name(snapshot["share_id"]), "snap_name": legacy_snap_name, "force": True, } # try removing snapshot using legacy naming rados_command( self.rados_client, "fs subvolume snapshot rm", argdict_legacy) # in case it's a snapshot with new naming, retry remove with new name argdict = argdict_legacy.copy() argdict.update({"snap_name": snapshot_name}) rados_command(self.rados_client, "fs subvolume snapshot rm", argdict) def create_share_group(self, context, sg_dict, share_server=None): # delete a FS group LOG.debug("[%(be)s]: create_share_group: share_group=%(id)s.", {"be": self.backend_name, "id": sg_dict['id']}) argdict = { "vol_name": self.volname, "group_name": sg_dict['id'], "mode": self._cephfs_volume_mode, } rados_command(self.rados_client, "fs subvolumegroup create", argdict) def delete_share_group(self, context, sg_dict, share_server=None): # delete a FS group LOG.debug("[%(be)s]: delete_share_group: share_group=%(id)s.", {"be": self.backend_name, "id": sg_dict['id']}) argdict = { "vol_name": self.volname, "group_name": sg_dict['id'], "force": True, } rados_command(self.rados_client, "fs subvolumegroup rm", argdict) def delete_share_group_snapshot(self, context, snap_dict, share_server=None): # delete a FS group snapshot LOG.debug("[%(be)s]: delete_share_group_snapshot: " "share_group=%(sg_id)s, snapshot=%(sn)s.", {"be": self.backend_name, "sg_id": snap_dict['id'], "sn": snap_dict["share_group_id"]}) argdict = { "vol_name": self.volname, "group_name": snap_dict["share_group_id"], "snap_name": snap_dict["id"], "force": True, } rados_command( self.rados_client, "fs subvolumegroup snapshot rm", argdict) return None, [] def create_share_group_snapshot(self, context, snap_dict, share_server=None): # create a FS group snapshot LOG.debug("[%(be)s]: create_share_group_snapshot: share_group=%(id)s, " "snapshot=%(sn)s.", {"be": self.backend_name, "id": snap_dict['share_group_id'], "sn": snap_dict["id"]}) msg = _("Share group snapshot feature is no longer supported in " "mainline CephFS (existing group snapshots can still be " "listed and deleted).") raise exception.ShareBackendException(msg=msg) def _get_clone_status(self, share): """Check the status of a newly cloned share.""" clone_name = self._get_subvolume_name(share["id"]) argdict = { "vol_name": self.volname, "clone_name": clone_name } if share['share_group_id'] is not None: argdict.update({"group_name": share["share_group_id"]}) out = rados_command(self.rados_client, "fs clone status", argdict, True) return out['status']['state'] def _update_create_from_snapshot_status(self, share): updates = { 'status': constants.STATUS_ERROR, 'progress': None, 'export_locations': [] } status = self._get_clone_status(share) if status == CLONE_COMPLETE: updates['status'] = constants.STATUS_AVAILABLE updates['progress'] = '100%' updates['export_locations'] = self._get_export_locations(share) elif status in (CLONE_PENDING, CLONE_INPROGRESS): updates['status'] = constants.STATUS_CREATING_FROM_SNAPSHOT else: # error if clone operation is not progressing or completed raise exception.ShareBackendException( "rados client clone of snapshot [%(sn)s}] to new " "share [%(shr)s}] did not complete successfully." % {"sn": share["snapshot_id"], "shr": share["id"]}) return updates def get_share_status(self, share, share_server=None): """Returns the current status for a share. :param share: a manila share. :param share_server: a manila share server (not currently supported). :returns: manila share status. """ if share['status'] != constants.STATUS_CREATING_FROM_SNAPSHOT: LOG.warning("Caught an unexpected share status '%s' during share " "status update routine. Skipping.", share['status']) return return self._update_create_from_snapshot_status(share) def create_share_from_snapshot(self, context, share, snapshot, share_server=None, parent_share=None): """Create a CephFS subvolume from a snapshot""" LOG.debug("[%(be)s]: create_share_from_snapshot: id=%(id)s, " "snapshot=%(sn)s, size=%(sz)s, group=%(gr)s.", {"be": self.backend_name, "id": share['id'], "sn": snapshot['id'], "sz": share['size'], "gr": share['share_group_id']}) argdict = { "vol_name": self.volname, "sub_name": self._get_subvolume_name(parent_share["id"]), "snap_name": self._get_subvolume_snapshot_name( snapshot["snapshot_id"]), "target_sub_name": self._get_subvolume_name(share["id"]) } if share['share_group_id'] is not None: argdict.update({"group_name": share["share_group_id"]}) rados_command( self.rados_client, "fs subvolume snapshot clone", argdict) return self._update_create_from_snapshot_status(share) def __del__(self): if self._rados_client: LOG.info("[%(be)s] Ceph client disconnecting...", {"be": self.backend_name}) self._rados_client.shutdown() self._rados_client = None LOG.info("[%(be)s] Ceph client disconnected", {"be": self.backend_name}) def get_configured_ip_versions(self): return self.protocol_helper.get_configured_ip_versions() def transfer_accept(self, context, share, new_user, new_project, access_rules=None, share_server=None): # CephFS driver cannot transfer shares by preserving access rules same_project = share["project_id"] == new_project if access_rules and not same_project: raise exception.DriverCannotTransferShareWithRules() class NativeProtocolHelper(ganesha.NASHelperBase): """Helper class for native CephFS protocol""" supported_access_types = (CEPHX_ACCESS_TYPE, ) supported_access_levels = (constants.ACCESS_LEVEL_RW, constants.ACCESS_LEVEL_RO) reapply_rules_while_ensuring_shares = False def __init__(self, execute, config, **kwargs): self.rados_client = kwargs.pop('rados_client') self.volname = kwargs.pop('volname') self.message_api = message_api.API() super(NativeProtocolHelper, self).__init__(execute, config, **kwargs) def _init_helper(self): pass def check_for_setup_error(self): """Returns an error if prerequisites aren't met.""" return def get_mon_addrs(self): result = [] mon_map = rados_command(self.rados_client, "mon dump", json_obj=True, target=('mon', )) for mon in mon_map['mons']: ip_port = mon['addr'].split("/")[0] result.append(ip_port) return result def get_backend_info(self, context): return { "cephfs_ensure_all_shares_salt": self.configuration.cephfs_ensure_all_shares_salt, "cephfs_filesystem_name": self.volname, } def get_export_locations(self, share, subvolume_path): # To mount this you need to know the mon IPs and the path to the volume mon_addrs = self.get_mon_addrs() export_location = "{addrs}:{path}".format( addrs=",".join(mon_addrs), path=subvolume_path) LOG.info("Calculated export location for share %(id)s: %(loc)s", {"id": share['id'], "loc": export_location}) return { 'path': export_location, 'is_admin_only': False, 'metadata': {}, } def get_optional_share_creation_data(self, share, share_server=None): return {"metadata": {"__mount_options": f"fs={self.volname}"}} def _allow_access(self, context, share, access, share_server=None, sub_name=None): if access['access_type'] != CEPHX_ACCESS_TYPE: raise exception.InvalidShareAccessType(type=access['access_type']) ceph_auth_id = access['access_to'] # We need to check here rather than the API or Manila Client to see # if the ceph_auth_id is the same as the one specified for Manila's # usage. This is due to the fact that the API and the Manila client # cannot read the contents of the Manila configuration file. If it # is the same, we need to error out. if ceph_auth_id == CONF.cephfs_auth_id: error_message = (_('Ceph authentication ID %s must be different ' 'than the one the Manila service uses.') % ceph_auth_id) raise exception.InvalidShareAccess(reason=error_message) argdict = { "vol_name": self.volname, "sub_name": sub_name, "auth_id": ceph_auth_id, "tenant_id": share["project_id"], } if share["share_group_id"] is not None: argdict.update({"group_name": share["share_group_id"]}) readonly = access['access_level'] == constants.ACCESS_LEVEL_RO if readonly: argdict.update({"access_level": "r"}) else: argdict.update({"access_level": "rw"}) try: auth_result = rados_command( self.rados_client, "fs subvolume authorize", argdict) except exception.ShareBackendException as e: if 'not allowed' in str(e).lower(): msg = ("Access to client %(client)s is not allowed. " "Reason: %(reason)s") msg_payload = {'client': ceph_auth_id, 'reason': e} raise exception.InvalidShareAccess( reason=msg % msg_payload) raise return auth_result def _deny_access(self, context, share, access, share_server=None, sub_name=None): if access['access_type'] != CEPHX_ACCESS_TYPE: LOG.warning("Invalid access type '%(type)s', " "ignoring in deny.", {"type": access['access_type']}) return argdict = { "vol_name": self.volname, "sub_name": sub_name, "auth_id": access['access_to'] } if share["share_group_id"] is not None: argdict.update({"group_name": share["share_group_id"]}) try: rados_command(self.rados_client, "fs subvolume deauthorize", argdict) except exception.ShareBackendException as e: if "doesn't exist" in e.msg.lower(): LOG.warning(f"%{access['access_to']} did not have access to " f"share {share['id']}.") return raise e rados_command(self.rados_client, "fs subvolume evict", argdict) def update_access(self, context, share, access_rules, add_rules, delete_rules, update_rules, share_server=None, sub_name=None): access_updates = {} argdict = { "vol_name": self.volname, "sub_name": sub_name, } if share["share_group_id"] is not None: argdict.update({"group_name": share["share_group_id"]}) if not (add_rules or delete_rules): # recovery/maintenance mode add_rules = access_rules existing_auths = None existing_auths = rados_command( self.rados_client, "fs subvolume authorized_list", argdict, json_obj=True) if existing_auths: existing_auth_ids = set() for rule in range(len(existing_auths)): for cephx_id in existing_auths[rule]: existing_auth_ids.add(cephx_id) want_auth_ids = set( [rule['access_to'] for rule in add_rules]) delete_auth_ids = existing_auth_ids.difference( want_auth_ids) delete_auth_ids_list = delete_auth_ids for delete_auth_id in delete_auth_ids_list: delete_rules.append( { 'access_to': delete_auth_id, 'access_type': CEPHX_ACCESS_TYPE, }) # During recovery mode, re-authorize share access for auth IDs that # were already granted access by the backend. Do this to fetch their # access keys and ensure that after recovery, manila and the Ceph # backend are in sync. for rule in add_rules: try: access_key = self._allow_access( context, share, rule, sub_name=sub_name ) except (exception.InvalidShareAccessLevel, exception.InvalidShareAccessType): self.message_api.create( context, message_field.Action.UPDATE_ACCESS_RULES, share['project_id'], resource_type=message_field.Resource.SHARE, resource_id=share['share_id'], detail=message_field.Detail.UNSUPPORTED_CLIENT_ACCESS) log_args = {'id': rule['access_id'], 'access_level': rule['access_level'], 'access_to': rule['access_to']} LOG.exception("Failed to provide %(access_level)s access to " "%(access_to)s (Rule ID: %(id)s). Setting rule " "to 'error' state.", log_args) access_updates.update({rule['access_id']: {'state': 'error'}}) except exception.InvalidShareAccess: self.message_api.create( context, message_field.Action.UPDATE_ACCESS_RULES, share['project_id'], resource_type=message_field.Resource.SHARE, resource_id=share['share_id'], detail=message_field.Detail.FORBIDDEN_CLIENT_ACCESS) log_args = {'id': rule['access_id'], 'access_level': rule['access_level'], 'access_to': rule['access_to']} LOG.exception("Failed to provide %(access_level)s access to " "%(access_to)s (Rule ID: %(id)s). Setting rule " "to 'error' state.", log_args) access_updates.update({rule['access_id']: {'state': 'error'}}) else: access_updates.update({ rule['access_id']: {'access_key': access_key}, }) for rule in delete_rules: self._deny_access(context, share, rule, sub_name=sub_name) return access_updates def get_configured_ip_versions(self): return [4] class NFSProtocolHelperMixin(): def get_export_locations(self, share, subvolume_path): export_locations = [] if not self.export_ips: self.export_ips = self._get_export_ips() for export_ip in self.export_ips: # Try to escape the export ip. If it fails, means that the # `cephfs_ganesha_server_ip` wasn't possibly set and the used # address is the hostname try: server_address = driver_helpers.escaped_address( export_ip['ip']) except ValueError: server_address = export_ip['ip'] export_path = "{server_address}:{mount_path}".format( server_address=server_address, mount_path=subvolume_path) LOG.info("Calculated export path for share %(id)s: %(epath)s", {"id": share['id'], "epath": export_path}) export_location = { 'path': export_path, 'is_admin_only': False, 'metadata': { 'preferred': export_ip['preferred'], }, } export_locations.append(export_location) return export_locations def get_optional_share_creation_data(self, share, share_server=None): return {} def _get_export_path(self, share, sub_name=None): """Callback to provide export path.""" argdict = { "vol_name": self.volname, "sub_name": sub_name or share["id"] } if share["share_group_id"] is not None: argdict.update({"group_name": share["share_group_id"]}) path = rados_command( self.rados_client, "fs subvolume getpath", argdict) return path def _get_export_pseudo_path(self, share, sub_name=None): """Callback to provide pseudo path.""" return self._get_export_path(share, sub_name=sub_name) def get_configured_ip_versions(self): if not self.configured_ip_versions: try: if not self.export_ips: self.export_ips = self._get_export_ips() for export_ip in self.export_ips: self.configured_ip_versions.add( ipaddress.ip_address(str(export_ip['ip'])).version) except Exception: # export_ips contained a hostname, safest thing is to # claim support for IPv4 and IPv6 address families LOG.warning("Setting configured IP versions to [4, 6] since " "a hostname (rather than IP address) was supplied " "in 'cephfs_ganesha_server_ip' or " "in 'cephfs_ganesha_export_ips'.") self.configured_ip_versions = {4, 6} return list(self.configured_ip_versions) class NFSProtocolHelper(NFSProtocolHelperMixin, ganesha.GaneshaNASHelper2): shared_data = {} supported_protocols = ('NFS',) reapply_rules_while_ensuring_shares = True def __init__(self, execute, config_object, **kwargs): if config_object.cephfs_ganesha_server_is_remote: execute = ganesha_utils.SSHExecutor( config_object.cephfs_ganesha_server_ip, 22, None, config_object.cephfs_ganesha_server_username, password=config_object.cephfs_ganesha_server_password, privatekey=config_object.cephfs_ganesha_path_to_private_key) else: execute = ganesha_utils.RootExecutor(execute) self.ganesha_host = config_object.cephfs_ganesha_server_ip if not self.ganesha_host: self.ganesha_host = socket.gethostname() LOG.info("NFS-Ganesha server's location defaulted to driver's " "hostname: %s", self.ganesha_host) super(NFSProtocolHelper, self).__init__(execute, config_object, **kwargs) LOG.warning('The NFSProtocolHelper has been deprecated. Starting ' 'from the 2025.1 release, we will no longer support ' 'exporting NFS shares through a NFS Ganesha instance ' 'that not managed by the Ceph orchestrator.') if not hasattr(self, 'rados_client'): self.rados_client = kwargs.pop('rados_client') if not hasattr(self, 'volname'): self.volname = kwargs.pop('volname') self.export_ips = None self.configured_ip_versions = set() self.config = config_object def check_for_setup_error(self): """Returns an error if prerequisites aren't met.""" host_address_obj = types.HostAddress() for export_ip in self.config.cephfs_ganesha_export_ips: try: host_address_obj(export_ip) except ValueError: msg = (_("Invalid list member of 'cephfs_ganesha_export_ips' " "option supplied %s -- not a valid IP address or " "hostname.") % export_ip) raise exception.InvalidParameterValue(err=msg) def _default_config_hook(self): """Callback to provide default export block.""" dconf = super(NFSProtocolHelper, self)._default_config_hook() conf_dir = ganesha_utils.path_from(__file__, "conf") ganesha_utils.patch(dconf, self._load_conf_dir(conf_dir)) return dconf def _fsal_hook(self, base, share, access, sub_name=None): """Callback to create FSAL subblock.""" ceph_auth_id = ''.join(['ganesha-', share['id']]) argdict = { "vol_name": self.volname, "sub_name": sub_name, "auth_id": ceph_auth_id, "access_level": "rw", "tenant_id": share["project_id"], } if share["share_group_id"] is not None: argdict.update({"group_name": share["share_group_id"]}) auth_result = rados_command( self.rados_client, "fs subvolume authorize", argdict) # Restrict Ganesha server's access to only the CephFS subtree or path, # corresponding to the manila share, that is to be exported by making # Ganesha use Ceph auth IDs with path restricted capabilities to # communicate with CephFS. return { 'Name': 'Ceph', 'User_Id': ceph_auth_id, 'Secret_Access_Key': auth_result, 'Filesystem': self.volname } def _cleanup_fsal_hook(self, base, share, access, sub_name=None): """Callback for FSAL specific cleanup after removing an export.""" ceph_auth_id = ''.join(['ganesha-', share['id']]) argdict = { "vol_name": self.volname, "sub_name": sub_name, "auth_id": ceph_auth_id, } if share["share_group_id"] is not None: argdict.update({"group_name": share["share_group_id"]}) rados_command(self.rados_client, "fs subvolume deauthorize", argdict) def _get_export_ips(self): ganesha_export_ips = self.config.cephfs_ganesha_export_ips if not ganesha_export_ips: ganesha_export_ips = [self.ganesha_host] export_ips = [] for ip in set(ganesha_export_ips): export_ips.append({'ip': ip, 'preferred': False}) return export_ips def get_backend_info(self, context): backend_info = { "cephfs_ganesha_export_ips": self.config.cephfs_ganesha_export_ips, "cephfs_ganesha_server_ip": self.config.cephfs_ganesha_server_ip, "cephfs_ensure_all_shares_salt": self.configuration.cephfs_ensure_all_shares_salt, } return backend_info class NFSClusterProtocolHelper(NFSProtocolHelperMixin, ganesha.NASHelperBase): supported_access_types = ('ip', ) supported_access_levels = (constants.ACCESS_LEVEL_RW, constants.ACCESS_LEVEL_RO) reapply_rules_while_ensuring_shares = True def __init__(self, execute, config_object, **kwargs): self.rados_client = kwargs.pop('rados_client') self.volname = kwargs.pop('volname') self.configured_ip_versions = set() self.configuration = config_object self._nfs_clusterid = None self.export_ips = None super(NFSClusterProtocolHelper, self).__init__(execute, config_object, **kwargs) @property def nfs_clusterid(self): # ID of the NFS cluster where the driver exports shares if self._nfs_clusterid: return self._nfs_clusterid self._nfs_clusterid = ( self.configuration.safe_get('cephfs_nfs_cluster_id')) if not self._nfs_clusterid: msg = _("The NFS Cluster ID has not been configured" "Please check cephfs_nfs_cluster_id option " "has been correctly set in the backend configuration.") raise exception.ShareBackendException(msg=msg) return self._nfs_clusterid def _get_configured_export_ips(self): ganesha_server_ips = ( self.configuration.safe_get('cephfs_ganesha_export_ips') or []) if not ganesha_server_ips: ganesha_server_ips = ( self.configuration.safe_get('cephfs_ganesha_server_ip')) ganesha_server_ips = ( [ganesha_server_ips] if ganesha_server_ips else []) return set(ganesha_server_ips) def _get_export_ips(self): """Get NFS cluster export ips.""" nfs_clusterid = self.nfs_clusterid ceph_nfs_export_ips = [] ganesha_export_ips = self._get_configured_export_ips() argdict = { "cluster_id": nfs_clusterid, } output = rados_command(self.rados_client, "nfs cluster info", argdict) nfs_cluster_info = json.loads(output) # NFS has been deployed with an ingress # we use the VIP for the export ips vip = nfs_cluster_info[nfs_clusterid]["virtual_ip"] # there is no VIP, we fallback to NFS cluster ips if not vip: hosts = nfs_cluster_info[nfs_clusterid]["backend"] for host in hosts: ceph_nfs_export_ips.append(host["ip"]) else: ceph_nfs_export_ips.append(vip) # there are no export IPs, there are no NFS servers we can use if not ceph_nfs_export_ips: msg = _("There are no NFS servers available to use. " "Please check the health of your Ceph cluster " "and restart the manila share service.") raise exception.ShareBackendException(msg=msg) export_ips = [] for ip in set(ceph_nfs_export_ips): export_ips.append({'ip': ip, 'preferred': True}) # It's possible for deployers to state additional # NFS interfaces directly via manila.conf. If they do, # these are represented as non-preferred export paths. # This is mostly to allow NFS-Ganesha server migrations. ganesha_export_ips = (eip for eip in ganesha_export_ips if eip not in ceph_nfs_export_ips) for ip in ganesha_export_ips: export_ips.append({'ip': ip, 'preferred': False}) return export_ips def check_for_setup_error(self): """Returns an error if prerequisites aren't met.""" return def _get_export_config(self, share, access, sub_name=None): """Returns export configuration in JSON-encoded bytes.""" pseudo_path = self._get_export_pseudo_path(share, sub_name=sub_name) argdict = { "cluster_id": self.nfs_clusterid, "pseudo_path": pseudo_path } export = rados_command( self.rados_client, "nfs export info", argdict, json_obj=True) if export: export["clients"] = access else: export = { "path": self._get_export_path(share, sub_name=sub_name), "cluster_id": self.nfs_clusterid, "pseudo": pseudo_path, "squash": "none", "security_label": True, "fsal": { "name": "CEPH", "fs_name": self.volname, }, "clients": access } return json.dumps(export).encode('utf-8') def _allow_access(self, share, access, sub_name=None): """Allow access to the share.""" argdict = { "cluster_id": self.nfs_clusterid, } inbuf = self._get_export_config(share, access, sub_name) rados_command(self.rados_client, "nfs export apply", argdict, inbuf=inbuf) def _deny_access(self, share, sub_name=None): """Deny access to the share.""" argdict = { "cluster_id": self.nfs_clusterid, "pseudo_path": self._get_export_pseudo_path( share, sub_name=sub_name) } rados_command(self.rados_client, "nfs export rm", argdict) def update_access(self, context, share, access_rules, add_rules, delete_rules, update_rules, share_server=None, sub_name=None): """Update access rules of share. Creates an export per share. Modifies access rules of shares by dynamically updating exports via ceph nfs. """ rule_state_map = {} wanted_rw_clients, wanted_ro_clients = [], [] for rule in access_rules: try: ganesha_utils.validate_access_rule( self.supported_access_types, self.supported_access_levels, rule, True) except (exception.InvalidShareAccess, exception.InvalidShareAccessLevel): rule_state_map[rule['id']] = {'state': 'error'} continue rule = ganesha_utils.fixup_access_rule(rule) if rule['access_level'] == 'rw': wanted_rw_clients.append(rule['access_to']) elif rule['access_level'] == 'ro': wanted_ro_clients.append(rule['access_to']) if access_rules: # add or update export clients = [] if wanted_ro_clients: clients.append({ 'access_type': 'ro', 'addresses': wanted_ro_clients, 'squash': 'none' }) if wanted_rw_clients: clients.append({ 'access_type': 'rw', 'addresses': wanted_rw_clients, 'squash': 'none' }) if clients: # empty list if no rules passed validation self._allow_access(share, clients, sub_name=sub_name) else: # no clients have access to the share. remove export self._deny_access(share, sub_name=sub_name) return rule_state_map def get_backend_info(self, context): backend_info = { "cephfs_ganesha_export_ips": self.configuration.cephfs_ganesha_export_ips, "cephfs_ganesha_server_ip": self.configuration.cephfs_ganesha_server_ip, "cephfs_nfs_cluster_id": self.nfs_clusterid, "cephfs_ensure_all_shares_salt": self.configuration.cephfs_ensure_all_shares_salt, } return backend_info ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315601.901672 manila-21.0.0/manila/share/drivers/container/0000775000175000017500000000000000000000000021061 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/container/__init__.py0000664000175000017500000000000000000000000023160 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/container/container_helper.py0000664000175000017500000002771400000000000024767 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Mirantis, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import re import uuid from oslo_log import log from oslo_serialization import jsonutils from oslo_utils import excutils from manila import exception from manila.i18n import _ from manila.share import driver LOG = log.getLogger(__name__) class DockerExecHelper(driver.ExecuteMixin): def __init__(self, *args, **kwargs): self.configuration = kwargs.pop("configuration", None) super(DockerExecHelper, self).__init__(*args, **kwargs) self.init_execute_mixin() def create_container(self, name=None): name = name or "".join(["manila_cifs_docker_container", str(uuid.uuid1()).replace("-", "_")]) image_name = self.configuration.container_image_name LOG.debug("Starting container from image %s.", image_name) # (aovchinnikov): --privileged is required for both samba and # nfs-ganesha to actually allow access to shared folders. # # (aovchinnikov): To actually make docker container mount a # logical volume created after container start-up to some location # inside it, we must share entire /dev with it. While seemingly # dangerous it is not and moreover this is apparently the only sane # way to do it. The reason is when a logical volume gets created # several new things appear in /dev: a new /dev/dm-X and a symlink # in /dev/volume_group_name pointing to /dev/dm-X. But to be able # to interact with /dev/dm-X, it must be already present inside # the container's /dev i.e. it must have been -v shared during # container start-up. So we should either precreate an unknown # number of /dev/dm-Xs (one per LV), share them all and hope # for the best or share the entire /dev and hope for the best. # # The risk of allowing a container having access to entire host's # /dev is not as big as it seems: as long as actual share providers # are invulnerable this does not pose any extra risks. If, however, # share providers contain vulnerabilities then the driver does not # provide any more possibilities for an exploitation than other # first-party drivers. path = "{0}:/shares".format( self.configuration.container_volume_mount_path) cmd = ["docker", "container", "create", "--name=%s" % name, "--privileged", "-v", "/dev:/dev", "-v", path, image_name] try: result = self._inner_execute(cmd) except (exception.ProcessExecutionError, OSError): raise exception.ShareBackendException( msg="Container %s failed to be created." % name) self.disconnect_network("bridge", name) LOG.info("A container has been successfully created! Its id is %s.", result[0].rstrip("\n")) def start_container(self, name): cmd = ["docker", "container", "start", name] try: self._inner_execute(cmd) except (exception.ProcessExecutionError, OSError): raise exception.ShareBackendException( msg="Container %s has failed to start." % name) LOG.info("Container %s successfully started!", name) def stop_container(self, name): LOG.debug("Stopping container %s.", name) try: self._inner_execute(["docker", "stop", name]) except (exception.ProcessExecutionError, OSError): raise exception.ShareBackendException( msg="Container %s has failed to stop properly." % name) LOG.info("Container %s is successfully stopped.", name) def execute(self, name=None, cmd=None, ignore_errors=False): if name is None: raise exception.ManilaException(_("Container name not specified.")) if cmd is None or (type(cmd) is not list): raise exception.ManilaException(_("Missing or malformed command.")) LOG.debug("Executing inside a container %s.", name) cmd = ["docker", "exec", "-i", name] + cmd result = self._inner_execute(cmd, ignore_errors=ignore_errors) return result def _inner_execute(self, cmd, ignore_errors=False): LOG.debug("Executing command: %s.", " ".join(cmd)) try: result = self._execute(*cmd, run_as_root=True) except (exception.ProcessExecutionError, OSError) as e: with excutils.save_and_reraise_exception( reraise=not ignore_errors): LOG.warning("Failed to run command %(cmd)s due to " "%(reason)s.", {'cmd': cmd, 'reason': e}) else: LOG.debug("Execution result: %s.", result) return result def fetch_container_addresses(self, name, address_family="inet6"): addresses = [] interfaces = self.fetch_container_interfaces(name) for interface in interfaces: result = self.execute( name, ["ip", "-oneline", "-family", address_family, "address", "show", "scope", "global", "dev", interface], ) address_w_prefix = result[0].split()[3] addresses.append(address_w_prefix.split("/")[0]) return addresses def fetch_container_interfaces(self, name): interfaces = [] links = self.execute(name, ["ip", "-o", "link", "show"]) links = links[0].rstrip().split("\n") links = [link for link in links if link.split()[1].startswith("eth")] for link in links: interface = re.search(" (.+?)@", link).group(1) interfaces.append(interface) return interfaces def rename_container(self, name, new_name): veth_names = self.get_container_veths(name) if not veth_names: raise exception.ManilaException( _("Could not find OVS information related to " "container %s.") % name) try: self._inner_execute(["docker", "rename", name, new_name]) except (exception.ProcessExecutionError, OSError): raise exception.ShareBackendException( msg="Could not rename container %s." % name) for veth_name in veth_names: cmd = ["ovs-vsctl", "set", "interface", veth_name, "external-ids:manila-container=%s" % new_name] try: self._inner_execute(cmd) except (exception.ProcessExecutionError, OSError): try: self._inner_execute(["docker", "rename", new_name, name]) except (exception.ProcessExecutionError, OSError): msg = _("Could not rename back container %s.") % name LOG.exception(msg) raise exception.ShareBackendException( msg="Could not update OVS information %s." % name) LOG.info("Container %s has been successfully renamed.", name) def container_exists(self, name): result = self._execute("docker", "ps", "--no-trunc", "--format='{{.Names}}'", run_as_root=True)[0] for line in result.split('\n'): if name == line.strip("'"): return True return False def create_network(self, network_name): cmd = ["docker", "network", "create", network_name] LOG.debug("Creating the %s Docker network.", network_name) try: result = self._inner_execute(cmd) except (exception.ProcessExecutionError, OSError): raise exception.ShareBackendException( msg="Docker network %s could not be created." % network_name) LOG.info("The Docker network has been successfully created! Its id is " "%s.", result[0].rstrip("\n")) def remove_network(self, network_name): cmd = ["docker", "network", "remove", network_name] LOG.debug("Removing the %s Docker network.", network_name) try: result = self._inner_execute(cmd) except (exception.ProcessExecutionError, OSError): raise exception.ShareBackendException( msg="Docker network %s could not be removed. One or more " "containers are probably still using it." % network_name) LOG.info("The %s Docker network has been successfully removed!", result[0].rstrip("\n")) def connect_network(self, network_name, container_name): cmd = ["docker", "network", "connect", network_name, container_name] try: self._inner_execute(cmd) except (exception.ProcessExecutionError, OSError): raise exception.ShareBackendException( msg="Could not connect the Docker network %s to container %s." % (network_name, container_name)) LOG.info("Docker network %s has been successfully connected to " "container %s!", network_name, container_name) def disconnect_network(self, network_name, container_name): cmd = ["docker", "network", "disconnect", network_name, container_name] try: self._inner_execute(cmd) except (exception.ProcessExecutionError, OSError): raise exception.ShareBackendException( msg="Could not disconnect the Docker network %s from " "container %s." % (network_name, container_name)) LOG.debug("Docker network %s has been successfully disconnected from " "container %s!", network_name, container_name) def get_container_networks(self, container_name): cmd = ["docker", "container", "inspect", "-f", "'{{json .NetworkSettings.Networks}}'", container_name] try: result = self._inner_execute(cmd) except (exception.ProcessExecutionError, OSError): raise exception.ShareBackendException( msg="Could not find any networks associated with the %s " "container." % container_name) # NOTE(ecsantos): The stdout from _inner_execute comes with extra # single quotes. networks = list(jsonutils.loads(result[0].strip("\n'"))) return networks def get_container_veths(self, container_name): veths = [] cmd = ["bash", "-c", "cat /sys/class/net/eth*/iflink"] eths_iflinks = self.execute(container_name, cmd) for eth_iflink in eths_iflinks[0].rstrip().split("\n"): veth = self._execute("bash", "-c", "grep -l %s " "/sys/class/net/veth*/ifindex" % eth_iflink) veth = re.search("t/(.+?)/i", veth[0]).group(1) veths.append(veth) return veths def get_network_bridge(self, network_name): cmd = ["docker", "network", "inspect", "-f", "{{.Id}}", network_name] try: network_id = self._inner_execute(cmd) except (exception.ProcessExecutionError, OSError): raise exception.ShareBackendException( msg="Could not find the ID of the %s Docker network." % network_name) # The name of the bridge associated with a given Docker network is # always "br-" followed by the first 12 digits of that network's ID. return "br-" + network_id[0][0:12] def get_veth_from_bridge(self, bridge): veth = self._execute("ip", "link", "show", "master", bridge) veth = re.search(" (.+?)@", veth[0]).group(1) return veth ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/container/driver.py0000664000175000017500000010214300000000000022727 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Mirantis, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Container Driver for shares. This driver uses a container as a share server. Current implementation suggests that a container when started by Docker will be plugged into a Linux bridge. Also it is suggested that all interfaces willing to talk to each other reside in an OVS bridge.""" import math from oslo_config import cfg from oslo_log import log from oslo_serialization import jsonutils from oslo_utils import importutils from oslo_utils import uuidutils from manila import exception from manila.i18n import _ from manila.share import driver from manila import utils CONF = cfg.CONF LOG = log.getLogger(__name__) container_opts = [ cfg.StrOpt("container_linux_bridge_name", default="docker0", help="Linux bridge used by container hypervisor to plug " "host-side veth to. It will be unplugged from here " "by the driver."), cfg.StrOpt("container_ovs_bridge_name", default="br-int", help="OVS bridge to use to plug a container to."), cfg.BoolOpt("container_cifs_guest_ok", default=True, help="Determines whether to allow guest access to CIFS share " "or not."), cfg.StrOpt("container_image_name", default="manila-docker-container", help="Image to be used for a container-based share server."), cfg.StrOpt("container_helper", default="manila.share.drivers.container.container_helper." "DockerExecHelper", help="Container helper which provides container-related " "operations to the driver."), cfg.StrOpt("container_protocol_helper", default="manila.share.drivers.container.protocol_helper." "DockerCIFSHelper", help="Helper which facilitates interaction with share server."), cfg.StrOpt("container_security_service_helper", default="manila.share.drivers.container.security_service_helper" ".SecurityServiceHelper", help="Helper which facilitates interaction with security " "services."), cfg.StrOpt("container_storage_helper", default="manila.share.drivers.container.storage_helper." "LVMHelper", help="Helper which facilitates interaction with storage " "solution used to actually store data. By default LVM " "is used to provide storage for a share."), cfg.StrOpt("container_volume_mount_path", default="/tmp/shares", help="Folder name in host to which logical volume will be " "mounted prior to providing access to it from a " "container."), ] class ContainerShareDriver(driver.ShareDriver, driver.ExecuteMixin): def __init__(self, *args, **kwargs): super(ContainerShareDriver, self).__init__([True], *args, **kwargs) self.configuration.append_config_values(container_opts) self.backend_name = self.configuration.safe_get( "share_backend_name") or "Docker" self.container = importutils.import_class( self.configuration.container_helper)( configuration=self.configuration) self.security_service_helper = importutils.import_class( self.configuration.container_security_service_helper)( configuration=self.configuration) self.storage = importutils.import_class( self.configuration.container_storage_helper)( configuration=self.configuration) self._helpers = {} self.network_allocation_update_support = True def _get_helper(self, share): if share["share_proto"].upper() == "CIFS": helper = self._helpers.get("CIFS") if helper is not None: return helper(self.container, share=share, config=self.configuration) self._helpers["CIFS"] = importutils.import_class( self.configuration.container_protocol_helper) return self._helpers["CIFS"](self.container, share=share, config=self.configuration) else: raise exception.InvalidShare( reason=_("Wrong, unsupported or disabled protocol.")) def _update_share_stats(self): data = { 'share_backend_name': self.backend_name, 'storage_protocol': 'CIFS', 'reserved_percentage': self.configuration.reserved_share_percentage, 'reserved_snapshot_percentage': self.configuration.reserved_share_from_snapshot_percentage or self.configuration.reserved_share_percentage, 'reserved_share_extend_percentage': self.configuration.reserved_share_extend_percentage or self.configuration.reserved_share_percentage, 'consistency_group_support': None, 'snapshot_support': False, 'create_share_from_snapshot_support': False, 'driver_name': 'ContainerShareDriver', 'pools': self.storage.get_share_server_pools(), 'security_service_update_support': True, 'share_server_multiple_subnet_support': True, 'mount_point_name_support': False, } super(ContainerShareDriver, self)._update_share_stats(data) def create_share(self, context, share, share_server=None): LOG.debug("Create share on server '%s'.", share_server["id"]) server_id = self._get_container_name(share_server["id"]) share_name = share.share_id self.storage.provide_storage(share_name, share['size']) location = self._create_export_and_mount_storage( share, server_id, share_name) return location @utils.synchronized('container_driver_delete_share_lock', external=True) def delete_share(self, context, share, share_server=None): LOG.debug("Deleting share %(share)s on server '%(server)s'.", {"server": share_server["id"], "share": self._get_share_name(share)}) server_id = self._get_container_name(share_server["id"]) share_name = self._get_share_name(share) self._delete_export_and_umount_storage(share, server_id, share_name, ignore_errors=True) self.storage.remove_storage(share_name) LOG.debug("Deleted share %s successfully.", share_name) def _get_share_name(self, share): if share.get('export_location'): return share['export_location'].split('/')[-1] else: return share.share_id def extend_share(self, share, new_size, share_server=None): server_id = self._get_container_name(share_server["id"]) share_name = self._get_share_name(share) self.container.execute( server_id, ["umount", "/shares/%s" % share_name] ) self.storage.extend_share(share_name, new_size, share_server) lv_device = self.storage._get_lv_device(share_name) self.container.execute( server_id, ["mount", lv_device, "/shares/%s" % share_name] ) def ensure_share(self, context, share, share_server=None): pass def update_access(self, context, share, access_rules, add_rules, delete_rules, update_rules, share_server=None): server_id = self._get_container_name(share_server["id"]) share_name = self._get_share_name(share) LOG.debug("Updating access to share %(share)s at " "share server %(share_server)s.", {"share_server": share_server["id"], "share": share_name}) self._get_helper(share).update_access(server_id, share_name, access_rules, add_rules, delete_rules) def get_network_allocations_number(self): return 1 def _get_container_name(self, server_id): return "manila_%s" % server_id.replace("-", "_") def do_setup(self, *args, **kwargs): pass def check_for_setup_error(self, *args, **kwargs): host_id = self.configuration.safe_get("neutron_host_id") neutron_class = importutils.import_class( 'manila.network.neutron.neutron_network_plugin.' 'NeutronNetworkPlugin' ) actual_class = importutils.import_class( self.configuration.safe_get("network_api_class")) if host_id is None and issubclass(actual_class, neutron_class): msg = _("%s requires neutron_host_id to be " "specified.") % neutron_class raise exception.ManilaException(msg) elif host_id is None: LOG.warning("neutron_host_id is not specified. This driver " "might not work as expected without it.") def _connect_to_network(self, server_id, network_info, host_veth, host_bridge, iface): LOG.debug("Attempting to connect container to neutron network.") network_allocation = network_info["network_allocations"][0] port_address = network_allocation.ip_address port_mac = network_allocation.mac_address port_id = network_allocation.id self.container.execute( server_id, ["ifconfig", iface, port_address, "up"] ) self.container.execute( server_id, ["ip", "link", "set", "dev", iface, "address", port_mac] ) msg_helper = { 'id': server_id, 'veth': host_veth, 'lb': host_bridge, 'ovsb': self.configuration.container_ovs_bridge_name, 'ip': port_address, 'network': network_info['neutron_net_id'], 'subnet': network_info['neutron_subnet_id'], } LOG.debug("Container %(id)s veth is %(veth)s.", msg_helper) LOG.debug("Removing %(veth)s from %(lb)s.", msg_helper) self._execute("ip", "link", "set", "dev", host_veth, "nomaster", run_as_root=True) LOG.debug("Plugging %(veth)s into %(ovsb)s.", msg_helper) set_if = ['--', 'set', 'interface', host_veth] e_mac = set_if + ['external-ids:attached-mac="%s"' % port_mac] e_id = set_if + ['external-ids:iface-id="%s"' % port_id] e_status = set_if + ['external-ids:iface-status=active'] e_mcid = set_if + ['external-ids:manila-container=%s' % server_id] self._execute("ovs-vsctl", "--", "add-port", self.configuration.container_ovs_bridge_name, host_veth, *(e_mac + e_id + e_status + e_mcid), run_as_root=True) LOG.debug("Now container %(id)s should be accessible from network " "%(network)s and subnet %(subnet)s by address %(ip)s.", msg_helper) @utils.synchronized("container_driver_teardown_lock", external=True) def _teardown_server(self, *args, **kwargs): server_id = self._get_container_name(kwargs["server_details"]["id"]) veths = self.container.get_container_veths(server_id) networks = self.container.get_container_networks(server_id) for veth, network in zip(veths, networks): LOG.debug("Deleting veth %s.", veth) try: self._execute("ovs-vsctl", "--", "del-port", self.configuration.container_ovs_bridge_name, veth, run_as_root=True) except exception.ProcessExecutionError as e: LOG.warning("Failed to delete port %s: port vanished.", veth) LOG.error(e) self.container.disconnect_network(network, server_id) if network != "bridge": self.container.remove_network(network) self.container.stop_container(server_id) def _setup_server_network(self, server_id, network_info): existing_interfaces = self.container.fetch_container_interfaces( server_id) new_interfaces = [] # If the share server network allocations are being updated, create # interfaces starting with ethX + 1. if existing_interfaces: ifnum_offset = len(existing_interfaces) for ifnum, subnet in enumerate(network_info): # TODO(ecsantos): Newer Ubuntu images (systemd >= 197) use # predictable network interface names (e.g., enp3s0) instead of # the classical kernel naming scheme (e.g., eth0). The # Container driver currently uses an Ubuntu Xenial Docker # image, so if it's updated in the future, these "eth" strings # should also be updated. new_interfaces.append("eth" + str(ifnum + ifnum_offset)) # Otherwise (the share server was just created), create interfaces # starting with eth0. else: for ifnum, subnet in enumerate(network_info): new_interfaces.append("eth" + str(ifnum)) for new_interface, subnet in zip(new_interfaces, network_info): network_name = "manila-docker-network-" + uuidutils.generate_uuid() self.container.create_network(network_name) self.container.connect_network(network_name, server_id) bridge = self.container.get_network_bridge(network_name) veth = self.container.get_veth_from_bridge(bridge) self._connect_to_network(server_id, subnet, veth, bridge, new_interface) @utils.synchronized("veth-lock", external=True) def _setup_server(self, network_info, metadata=None): msg = "Creating share server '%s'." common_net_info = network_info[0] server_id = self._get_container_name(common_net_info["server_id"]) LOG.debug(msg, server_id) try: self.container.create_container(server_id) self.container.start_container(server_id) except Exception as e: raise exception.ManilaException(_("Cannot create container: %s") % e) self._setup_server_network(server_id, network_info) security_services = common_net_info.get('security_services') if security_services: self.setup_security_services(server_id, security_services) LOG.info("Container %s was created.", server_id) return {"id": common_net_info["server_id"]} def _delete_export_and_umount_storage( self, share, server_id, share_name, ignore_errors=False): self._umount_storage( share, server_id, share_name, ignore_errors=ignore_errors) # (aovchinnikov): bug 1621784 manifests itself here as well as in # storage helper. There is a chance that we won't be able to remove # this directory, despite the fact that it is not shared anymore and # already contains nothing. In such case the driver should not fail # share deletion, but issue a warning. self.container.execute( server_id, ["rm", "-fR", "/shares/%s" % share_name], ignore_errors=True ) def _umount_storage( self, share, server_id, share_name, ignore_errors=False): self._get_helper(share).delete_share(server_id, share_name, ignore_errors=ignore_errors) self.container.execute( server_id, ["umount", "/shares/%s" % share_name], ignore_errors=ignore_errors ) def _create_export_and_mount_storage(self, share, server_id, share_name): self.container.execute( server_id, ["mkdir", "-m", "750", "/shares/%s" % share_name] ) return self._mount_storage(share, server_id, share_name) def _mount_storage(self, share, server_id, share_name): lv_device = self.storage._get_lv_device(share_name) self.container.execute( server_id, ["mount", lv_device, "/shares/%s" % share_name] ) location = self._get_helper(share).create_share(server_id) return location def manage_existing_with_server( self, share, driver_options, share_server=None): if not share_server and self.driver_handles_share_servers: raise exception.ShareBackendException( "A share server object is needed to manage a share in this " "driver mode of operation.") server_id = self._get_container_name(share_server["id"]) share_name = self._get_share_name(share) size = int(math.ceil(float(self.storage.get_size(share_name)))) self._delete_export_and_umount_storage(share, server_id, share_name) new_share_name = share.share_id self.storage.rename_storage(share_name, new_share_name) location = self._create_export_and_mount_storage( share, server_id, new_share_name) result = {'size': size, 'export_locations': location} LOG.info("Successfully managed share %(share)s, returning %(data)s", {'share': share.id, 'data': result}) return result def unmanage_with_server(self, share, share_server=None): pass def get_share_server_network_info( self, context, share_server, identifier, driver_options): name = self._get_correct_container_old_name(identifier) return self.container.fetch_container_addresses(name, "inet") def manage_server(self, context, share_server, identifier, driver_options): new_name = self._get_container_name(share_server['id']) old_name = self._get_correct_container_old_name(identifier) self.container.rename_container(old_name, new_name) return new_name, {'id': share_server['id']} def unmanage_server(self, server_details, security_services=None): pass def _get_correct_container_old_name(self, name): # Check if the container with the given name exists, else return # the name based on the driver template if not self.container.container_exists(name): return self._get_container_name(name) return name def migration_check_compatibility(self, context, source_share, destination_share, share_server=None, destination_share_server=None): return self.storage.migration_check_compatibility( context, source_share, destination_share, share_server=share_server, destination_share_server=destination_share_server) def migration_start(self, context, source_share, destination_share, source_snapshots, snapshot_mappings, share_server=None, destination_share_server=None): self.storage.migration_start( context, source_share, destination_share, source_snapshots, snapshot_mappings, share_server=share_server, destination_share_server=destination_share_server) def migration_continue(self, context, source_share, destination_share, source_snapshots, snapshot_mappings, share_server=None, destination_share_server=None): return self.storage.migration_continue( context, source_share, destination_share, source_snapshots, snapshot_mappings, share_server=share_server, destination_share_server=destination_share_server) def migration_get_progress(self, context, source_share, destination_share, source_snapshots, snapshot_mappings, share_server=None, destination_share_server=None): return self.storage.migration_get_progress( context, source_share, destination_share, source_snapshots, snapshot_mappings, share_server=share_server, destination_share_server=destination_share_server) def migration_cancel(self, context, source_share, destination_share, source_snapshots, snapshot_mappings, share_server=None, destination_share_server=None): self.storage.migration_cancel( context, source_share, destination_share, source_snapshots, snapshot_mappings, share_server=share_server, destination_share_server=destination_share_server) def migration_complete(self, context, source_share, destination_share, source_snapshots, snapshot_mappings, share_server=None, destination_share_server=None): # Removes the source share reference from the source container source_server_id = self._get_container_name(share_server["id"]) self._umount_storage( source_share, source_server_id, source_share.share_id) # storage removes source share self.storage.migration_complete( context, source_share, destination_share, source_snapshots, snapshot_mappings, share_server=share_server, destination_share_server=destination_share_server) # Enables the access on the destination container destination_server_id = self._get_container_name( destination_share_server["id"]) new_export_locations = self._mount_storage( destination_share, destination_server_id, destination_share.share_id) msg = ("Volume move operation for share %(shr)s was completed " "successfully. Share has been moved from %(src)s to " "%(dest)s.") msg_args = { 'shr': source_share['id'], 'src': source_share['host'], 'dest': destination_share['host'], } LOG.info(msg, msg_args) return { 'export_locations': new_export_locations, } def share_server_migration_check_compatibility( self, context, share_server, dest_host, old_share_network, new_share_network, shares_request_spec): """Is called to check migration compatibility for a share server.""" return self.storage.share_server_migration_check_compatibility( context, share_server, dest_host, old_share_network, new_share_network, shares_request_spec) def share_server_migration_start(self, context, src_share_server, dest_share_server, shares, snapshots): """Is called to perform 1st phase of migration of a share server.""" LOG.debug( "Migration of share server with ID '%s' has been started.", src_share_server["id"]) self.storage.share_server_migration_start( context, src_share_server, dest_share_server, shares, snapshots) def share_server_migration_continue(self, context, src_share_server, dest_share_server, shares, snapshots): return self.storage.share_server_migration_continue( context, src_share_server, dest_share_server, shares, snapshots) def share_server_migration_cancel(self, context, src_share_server, dest_share_server, shares, snapshots): """Is called to cancel a share server migration.""" self.storage.share_server_migration_cancel( context, src_share_server, dest_share_server, shares, snapshots) LOG.debug( "Migration of share server with ID '%s' has been canceled.", src_share_server["id"]) return def share_server_migration_get_progress(self, context, src_share_server, dest_share_server, shares, snapshots): """Is called to get share server migration progress.""" return self.storage.share_server_migration_get_progress( context, src_share_server, dest_share_server, shares, snapshots) def share_server_migration_complete(self, context, source_share_server, dest_share_server, shares, snapshots, new_network_allocations): # Removes the source shares reference from the source container source_server_id = self._get_container_name(source_share_server["id"]) for source_share in shares: self._umount_storage( source_share, source_server_id, source_share.share_id) # storage removes source share self.storage.share_server_migration_complete( context, source_share_server, dest_share_server, shares, snapshots, new_network_allocations) destination_server_id = self._get_container_name( dest_share_server["id"]) shares_updates = {} for destination_share in shares: share_id = destination_share.share_id new_export_locations = self._mount_storage( destination_share, destination_server_id, share_id) shares_updates[destination_share['id']] = { 'export_locations': new_export_locations, 'pool_name': self.storage.get_share_pool_name(share_id), } msg = ("Volumes move operation from server %(server)s were completed " "successfully. Share server has been moved from %(src)s to " "%(dest)s.") msg_args = { 'serv': source_share_server['id'], 'src': source_share_server['host'], 'dest': dest_share_server['host'], } LOG.info(msg, msg_args) return { 'share_updates': shares_updates, } def setup_security_services(self, share_server_id, security_services): """Is called to setup a security service in the share server.""" for security_service in security_services: if security_service['type'].lower() != 'ldap': raise exception.ShareBackendException(_( "The container driver does not support security services " "other than LDAP.")) self.security_service_helper.setup_security_service( share_server_id, security_service) def _get_different_security_service_keys( self, current_security_service, new_security_service): valid_keys = ['dns_ip', 'server', 'domain', 'user', 'password', 'ou'] different_keys = [] for key, value in current_security_service.items(): if (current_security_service[key] != new_security_service[key] and key in valid_keys): different_keys.append(key) return different_keys def _check_if_all_fields_are_updatable(self, current_security_service, new_security_service): # NOTE(carloss): We only support updating user and password at # the moment updatable_fields = ['user', 'password'] different_keys = self._get_different_security_service_keys( current_security_service, new_security_service) for key in different_keys: if key not in updatable_fields: return False return True def update_share_server_security_service(self, context, share_server, network_info, share_instances, share_instance_rules, new_security_service, current_security_service=None): """Is called to update or add a sec service to a share server.""" if not self.check_update_share_server_security_service( context, share_server, network_info, share_instances, share_instance_rules, new_security_service, current_security_service=current_security_service): raise exception.ManilaException(_( "The requested security service update is not supported by " "the container driver.")) server_id = self._get_container_name(share_server['id']) if not current_security_service: self.setup_security_services(server_id, [new_security_service]) else: self.security_service_helper.update_security_service( server_id, current_security_service, new_security_service) msg = ( "The security service was successfully added to the share " "server %(server_id)s.") msg_args = { 'server_id': share_server['id'], } LOG.info(msg, msg_args) def check_update_share_server_security_service( self, context, share_server, network_info, share_instances, share_instance_rules, new_security_service, current_security_service=None): current_type = ( current_security_service['type'].lower() if current_security_service else '') new_type = new_security_service['type'].lower() if new_type != 'ldap' or (current_type and current_type != 'ldap'): LOG.error('Currently only LDAP security services are supported ' 'by the container driver.') return False if not current_type: return True all_fields_are_updatable = self._check_if_all_fields_are_updatable( current_security_service, new_security_service) if not all_fields_are_updatable: LOG.info( "The Container driver does not support updating " "security service parameters other than 'user' and " "'password'.") return False return True def _form_share_server_update_return(self, share_server, current_network_allocations, new_network_allocations, share_instances): server_id = self._get_container_name(share_server["id"]) addresses = self.container.fetch_container_addresses(server_id, "inet") share_updates = {} subnet_allocations = {} for share_instance in share_instances: export_locations = [] for address in addresses: # TODO(ecsantos): The Container driver currently only # supports CIFS. If NFS support is implemented in the # future, the path should be adjusted accordingly. export_location = { "is_admin_only": False, "path": "//%(ip_address)s/%(share_id)s" % { "ip_address": address, "share_id": share_instance["share_id"] }, "preferred": False } export_locations.append(export_location) share_updates[share_instance["id"]] = export_locations for subnet in current_network_allocations["subnets"]: for network_allocation in subnet["network_allocations"]: subnet_allocations[network_allocation["id"]] = ( network_allocation["ip_address"]) for network_allocation in ( new_network_allocations["network_allocations"]): subnet_allocations[network_allocation["id"]] = ( network_allocation["ip_address"]) server_details = { "subnet_allocations": jsonutils.dumps(subnet_allocations) } return { "share_updates": share_updates, "server_details": server_details } def check_update_share_server_network_allocations( self, context, share_server, current_network_allocations, new_share_network_subnet, security_services, share_instances, share_instances_rules): LOG.debug("Share server %(server)s can be updated with allocations " "from new subnet.", {"server": share_server["id"]}) return True def update_share_server_network_allocations( self, context, share_server, current_network_allocations, new_network_allocations, security_services, share_instances, snapshots): server_id = self._get_container_name(share_server["id"]) self._setup_server_network(server_id, [new_network_allocations]) return self._form_share_server_update_return( share_server, current_network_allocations, new_network_allocations, share_instances) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/container/protocol_helper.py0000664000175000017500000001362500000000000024642 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Mirantis, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log from manila.common import constants as const from manila import exception from manila.i18n import _ LOG = log.getLogger(__name__) class DockerCIFSHelper(object): def __init__(self, container_helper, *args, **kwargs): super(DockerCIFSHelper, self).__init__() self.share = kwargs.get("share") self.conf = kwargs.get("config") self.container = container_helper def create_share(self, server_id): export_locations = [] share_name = self.share.share_id cmd = ["net", "conf", "addshare", share_name, "/shares/%s" % share_name, "writeable=y"] if self.conf.container_cifs_guest_ok: cmd.append("guest_ok=y") else: cmd.append("guest_ok=n") self.container.execute(server_id, cmd) parameters = { "browseable": "yes", "create mask": "0755", "read only": "no", } for param, value in parameters.items(): self.container.execute( server_id, ["net", "conf", "setparm", share_name, param, value] ) # TODO(tbarron): pass configured address family when we support IPv6 addresses = self.container.fetch_container_addresses( server_id, address_family="inet") for address in addresses: export_location = { "is_admin_only": False, "path": "//%(ip_address)s/%(share_name)s" % { "ip_address": address, "share_name": share_name }, "preferred": False } export_locations.append(export_location) return export_locations def delete_share(self, server_id, share_name, ignore_errors=False): self.container.execute( server_id, ["net", "conf", "delshare", share_name], ignore_errors=ignore_errors ) def _get_access_group(self, access_level): if access_level == const.ACCESS_LEVEL_RO: access = "read list" elif access_level == const.ACCESS_LEVEL_RW: access = "valid users" else: raise exception.InvalidShareAccessLevel(level=access_level) return access def _get_existing_users(self, server_id, share_name, access): result = self.container.execute( server_id, ["net", "conf", "getparm", share_name, access], ignore_errors=True ) if result: return result[0].rstrip('\n') else: return "" def _set_users(self, server_id, share_name, access, users_to_set): self.container.execute( server_id, ["net", "conf", "setparm", share_name, access, users_to_set] ) def _allow_access(self, share_name, server_id, user_to_allow, access_level): access = self._get_access_group(access_level) try: existing_users = self._get_existing_users(server_id, share_name, access) except TypeError: users_to_allow = user_to_allow else: users_to_allow = " ".join([existing_users, user_to_allow]) self._set_users(server_id, share_name, access, users_to_allow) def _deny_access(self, share_name, server_id, user_to_deny, access_level): access = self._get_access_group(access_level) try: existing_users = self._get_existing_users(server_id, share_name, access) except TypeError: LOG.warning("Can't access smbd at share %s.", share_name) return else: allowed_users = " ".join(sorted(set(existing_users.split()) - set([user_to_deny]))) if allowed_users != existing_users: self._set_users(server_id, share_name, access, allowed_users) def update_access(self, server_id, share_name, access_rules, add_rules=None, delete_rules=None): def _rule_updater(rules, action, override_type_check=False): for rule in rules: access_level = rule['access_level'] access_type = rule['access_type'] # (aovchinnikov): override_type_check is used to ensure # broken rules deletion. if access_type == 'user' or override_type_check: action(share_name, server_id, rule['access_to'], access_level) else: msg = _("Access type '%s' is not supported by the " "driver.") % access_type raise exception.InvalidShareAccess(reason=msg) if not (add_rules or delete_rules): # clean all users first. self.container.execute( server_id, ["net", "conf", "setparm", share_name, "valid users", ""] ) _rule_updater(access_rules or [], self._allow_access) return _rule_updater(add_rules or [], self._allow_access) _rule_updater(delete_rules or [], self._deny_access, override_type_check=True) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/container/security_service_helper.py0000664000175000017500000001034600000000000026365 0ustar00zuulzuul00000000000000# Copyright (c) 2021 NetApp, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from manila import exception from manila.i18n import _ from manila.share import driver from manila import utils as manila_utils # LDAP error codes LDAP_INVALID_CREDENTIALS = 49 LOG = logging.getLogger(__name__) class SecurityServiceHelper(driver.ExecuteMixin): def __init__(self, *args, **kwargs): self.configuration = kwargs.pop("configuration", None) super(SecurityServiceHelper, self).__init__(*args, **kwargs) self.init_execute_mixin() def setup_security_service(self, share_server_id, security_service): msg = ("Setting up the security service %(service)s for share server " "%(server_id)s") msg_args = { 'service': security_service['id'], 'server_id': share_server_id } LOG.debug(msg, msg_args) self.ldap_bind(share_server_id, security_service) def update_security_service(self, server_id, current_security_service, new_security_service): msg = ("Updating the security service %(service)s for share server " "%(server_id)s") msg_args = { 'service': new_security_service['id'], 'server_id': server_id } LOG.debug(msg, msg_args) self.ldap_bind(server_id, new_security_service) def ldap_bind(self, share_server_id, security_service): ss_info = self.ldap_get_info(security_service) cmd = ["docker", "exec", "%s" % share_server_id, "ldapwhoami", "-x", "-H", "ldap://localhost:389", "-D", "cn=%s,dc=example,dc=com" % ss_info["ss_user"], "-w", "%s" % ss_info["ss_password"]] self.ldap_retry_operation(cmd, run_as_root=True) def ldap_get_info(self, security_service): if all(info in security_service for info in ("user", "password")): ss_user = security_service["user"] ss_password = security_service["password"] else: raise exception.ShareBackendException( _("LDAP requires user and password to be set for the bind " "operation.")) ss_info = { "ss_user": ss_user, "ss_password": ss_password, } return ss_info def ldap_retry_operation(self, cmd, run_as_root=True, timeout=30): interval = 5 retries = int(timeout / interval) or 1 @manila_utils.retry(retry_param=exception.ProcessExecutionError, interval=interval, retries=retries, backoff_rate=1) def try_ldap_operation(): try: self._execute(*cmd, run_as_root=run_as_root) except exception.ProcessExecutionError as e: if e.exit_code == LDAP_INVALID_CREDENTIALS: msg = _('LDAP credentials are invalid. ' 'Aborting operation.') LOG.warning(msg) raise exception.ShareBackendException(msg=msg) else: msg = _('Command has returned execution error.' ' Will retry the operation.' ' Error details: %s') % e.stderr LOG.warning(msg) raise exception.ProcessExecutionError() try: try_ldap_operation() except exception.ProcessExecutionError as e: msg = _("Unable to execute LDAP operation with success. " "Retries exhausted. Error details: %s") % e.stderr LOG.exception(msg) raise exception.ShareBackendException(msg=msg) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/container/storage_helper.py0000664000175000017500000003245000000000000024442 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Mirantis, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import re from oslo_config import cfg from oslo_log import log from manila import exception from manila.i18n import _ from manila.share import driver from manila.share import utils as share_utils CONF = cfg.CONF lv_opts = [ cfg.StrOpt("container_volume_group", default="manila_docker_volumes", help="LVM volume group to use for volumes. This volume group " "must be created by the cloud administrator independently " "from manila operations."), ] CONF.register_opts(lv_opts) LOG = log.getLogger(__name__) class LVMHelper(driver.ExecuteMixin): def __init__(self, *args, **kwargs): self.configuration = kwargs.pop("configuration", None) if self.configuration is None: raise exception.ManilaException(_("LVMHelper called without " "supplying configuration.")) self.configuration.append_config_values(lv_opts) super(LVMHelper, self).__init__(*args, **kwargs) self.init_execute_mixin() def get_share_server_pools(self, share_server=None): out, err = self._execute('vgs', self.configuration.container_volume_group, '--options', 'vg_size,vg_free', '--noheadings', '--units', 'g', run_as_root=True) if err: msg = _("Unable to gather size of the volume group %(vg)s to be " "used by the driver. Error: %(err)s") raise exception.ShareBackendException( msg % {'vg': self.configuration.container_volume_group, 'err': err}) (free_size, total_size) = sorted(re.findall(r"\d+\.\d+|\d+", out), reverse=False) return [{ 'pool_name': self.configuration.container_volume_group, 'total_capacity_gb': float(total_size), 'free_capacity_gb': float(free_size), 'reserved_percentage': 0, 'reserved_snapshot_percentage': 0, 'reserved_share_extend_percentage': 0, }, ] def _get_lv_device(self, share_name): return os.path.join("/dev", self.configuration.container_volume_group, share_name) def _get_lv_folder(self, share_name): return os.path.join(self.configuration.container_volume_mount_path, share_name) def provide_storage(self, share_name, size): self._execute("lvcreate", "-p", "rw", "-L", str(size) + "G", "-n", share_name, self.configuration.container_volume_group, run_as_root=True) self._execute("mkfs.ext4", self._get_lv_device(share_name), run_as_root=True) def _try_to_unmount_device(self, device): # NOTE(ganso): We invoke this method to be sure volume was unmounted, # and we swallow the exception in case it fails to. try: self._execute("umount", device, run_as_root=True) except exception.ProcessExecutionError as e: LOG.warning("Failed to umount helper directory %(device)s due to " "%(reason)s.", {'device': device, 'reason': e}) def remove_storage(self, share_name): device = self._get_lv_device(share_name) self._try_to_unmount_device(device) # (aovchinnikov): bug 1621784 manifests itself in jamming logical # volumes, so try removing once and issue warning until it is fixed. try: self._execute("lvremove", "-f", "--autobackup", "n", device, run_as_root=True) except exception.ProcessExecutionError as e: LOG.warning("Failed to remove logical volume %(device)s due to " "%(reason)s.", {'device': device, 'reason': e}) def rename_storage(self, share_name, new_share_name): old_device = self._get_lv_device(share_name) new_device = self._get_lv_device(new_share_name) self._try_to_unmount_device(old_device) try: self._execute("lvrename", "--autobackup", "n", old_device, new_device, run_as_root=True) except exception.ProcessExecutionError as e: msg = ("Failed to rename logical volume %(device)s due to " "%(reason)s." % {'device': old_device, 'reason': e}) LOG.exception(msg) raise def extend_share(self, share_name, new_size, share_server=None): lv_device = self._get_lv_device(share_name) cmd = ('lvextend', '-L', '%sG' % new_size, '-n', lv_device) self._execute(*cmd, run_as_root=True) self._execute("e2fsck", "-f", "-y", lv_device, run_as_root=True) self._execute('resize2fs', lv_device, run_as_root=True) def get_size(self, share_name): device = self._get_lv_device(share_name) size = self._execute( "lvs", "-o", "lv_size", "--noheadings", "--nosuffix", "--units", "g", device, run_as_root=True) LOG.debug("Found size %(size)s for LVM device " "%(lvm)s.", {'size': size[0], 'lvm': share_name}) return size[0] def migration_check_compatibility(self, context, source_share, destination_share, share_server=None, destination_share_server=None): """Checks compatibility between self.host and destination host.""" # They must be in same vg and host compatible = False destination_host = destination_share['host'] source_host = source_share['host'] destination_vg = share_utils.extract_host( destination_host, level='pool') source_vg = share_utils.extract_host( source_host, level='pool') if destination_vg != source_vg: msg = ("Cannot migrate share %(shr)s between " "%(src)s and %(dest)s, they must be in the same volume " "group.") msg_args = { 'shr': source_share['id'], 'src': source_share['host'], 'dest': destination_host, } LOG.exception(msg, msg_args) else: compatible = True compatibility = { 'compatible': compatible, 'writable': True, 'nondisruptive': False, 'preserve_metadata': True, 'preserve_snapshots': False, } return compatibility def migration_start(self, context, source_share, destination_share, source_snapshots, snapshot_mappings, share_server=None, destination_share_server=None): """Starts the migration of the share from one host to another.""" # NOTE(felipe_rodrigues): Since they are in the same volume group, # there is no need to copy the data between the volumes. return def migration_continue(self, context, source_share, destination_share, source_snapshots, snapshot_mappings, share_server=None, destination_share_server=None): """Check the progress of the migration.""" return True def migration_get_progress(self, context, source_share, destination_share, source_snapshots, snapshot_mappings, share_server=None, destination_share_server=None): """Return detailed progress of the migration in progress.""" return { 'total_progress': 100, } def migration_cancel(self, context, source_share, destination_share, source_snapshots, snapshot_mappings, share_server=None, destination_share_server=None): """Abort an ongoing migration.""" # NOTE(felipe_rodrigues): Since they are in the same volume group, # there is no need to cancel the copy of the data. return def migration_complete(self, context, source_share, destination_share, source_snapshots, snapshot_mappings, share_server=None, destination_share_server=None): """Completes by removing the source local volume.""" # NOTE(felipe_rodrigues): Since they are in the same volume group, # there is no need to remove source lv. return def share_server_migration_check_compatibility( self, context, share_server, dest_host, old_share_network, new_share_network, shares_request_spec): """Is called to check migration compatibility for a share server.""" not_compatible = { 'compatible': False, 'writable': None, 'nondisruptive': None, 'preserve_snapshots': None, 'migration_cancel': None, 'migration_get_progress': None, } dest_backend_name = share_utils.extract_host(dest_host, level='backend_name') source_backend_name = share_utils.extract_host(share_server['host'], level='backend_name') if dest_backend_name == source_backend_name: msg = _("Cannot perform server migration %(server)s within the " "same backend. Please choose a destination host different " "from the source.") msg_args = { 'server': share_server['id'], } LOG.error(msg, msg_args) return not_compatible # The container backend has only one pool, gets its pool name from the # first instance. first_share = shares_request_spec['shares_req_spec'][0] source_host = first_share['share_instance_properties']['host'] source_vg = share_utils.extract_host( source_host, level='pool') dest_vg = share_utils.extract_host( dest_host, level='pool') if dest_vg and dest_vg != source_vg: msg = ("Cannot migrate share server %(server)s between %(src)s " "and %(dest)s. They must be in the same volume group.") msg_args = { 'server': share_server['id'], 'src': source_host, 'dest': dest_host, } LOG.error(msg, msg_args) return not_compatible # NOTE(felipe_rodrigues): it is not required to check the capacity, # because it is migrating in the same volume group. return { 'compatible': True, 'writable': True, 'nondisruptive': False, 'preserve_snapshots': False, 'migration_cancel': True, 'migration_get_progress': True, } def share_server_migration_start(self, context, src_share_server, dest_share_server, shares, snapshots): """Is called to perform 1st phase of migration of a share server.""" # NOTE(felipe_rodrigues): Since they are in the same volume group, # there is no need to copy the data between the volumes. return def share_server_migration_continue(self, context, src_share_server, dest_share_server, shares, snapshots): """Check the progress of the migration.""" return True def share_server_migration_complete(self, context, source_share_server, dest_share_server, shares, snapshots, new_network_allocations): """Completes by removing the source local volume.""" # NOTE(felipe_rodrigues): Since they are in the same volume group, # there is no need to remove source lv. return def share_server_migration_cancel(self, context, src_share_server, dest_share_server, shares, snapshots): """Abort an ongoing migration.""" # NOTE(felipe_rodrigues): Since they are in the same volume group, # there is no need to cancel the copy of the data. return def share_server_migration_get_progress(self, context, src_share_server, dest_share_server, shares, snapshots): """Return detailed progress of the server migration in progress.""" return { 'total_progress': 100, } def get_share_pool_name(self, share_id): """Return the pool name where the share is allocated""" return self.configuration.container_volume_group ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315601.901672 manila-21.0.0/manila/share/drivers/dell_emc/0000775000175000017500000000000000000000000020643 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/dell_emc/__init__.py0000664000175000017500000000000000000000000022742 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315601.9056718 manila-21.0.0/manila/share/drivers/dell_emc/common/0000775000175000017500000000000000000000000022133 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/dell_emc/common/__init__.py0000664000175000017500000000000000000000000024232 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315601.9056718 manila-21.0.0/manila/share/drivers/dell_emc/common/enas/0000775000175000017500000000000000000000000023061 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/dell_emc/common/enas/__init__.py0000664000175000017500000000000000000000000025160 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/dell_emc/common/enas/connector.py0000664000175000017500000001453100000000000025431 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Dell Inc. or its subsidiaries. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from http import cookiejar as http_cookiejar import shlex from urllib import error as url_error from urllib import request as url_request from oslo_concurrency import processutils from oslo_log import log from oslo_utils import excutils from manila import exception from manila.i18n import _ from manila.share.drivers.dell_emc.common.enas import constants from manila.share.drivers.dell_emc.common.enas import utils as enas_utils from manila import ssh_utils LOG = log.getLogger(__name__) class XMLAPIConnector(object): def __init__(self, configuration, debug=True): super(XMLAPIConnector, self).__init__() self.storage_ip = enas_utils.convert_ipv6_format_if_needed( configuration.emc_nas_server) self.username = configuration.emc_nas_login self.password = configuration.emc_nas_password self.debug = debug self.auth_url = 'https://' + self.storage_ip + '/Login' self._url = 'https://{}/servlets/CelerraManagementServices'.format( self.storage_ip) context = enas_utils.create_ssl_context(configuration) if context: https_handler = url_request.HTTPSHandler(context=context) else: https_handler = url_request.HTTPSHandler() cookie_handler = url_request.HTTPCookieProcessor( http_cookiejar.CookieJar()) self.url_opener = url_request.build_opener(https_handler, cookie_handler) self._do_setup() def _do_setup(self): credential = ('user=' + self.username + '&password=' + self.password + '&Login=Login') req = url_request.Request(self.auth_url, credential.encode(), constants.CONTENT_TYPE_URLENCODE) resp = self.url_opener.open(req) resp_body = resp.read() self._http_log_resp(resp, resp_body) def _http_log_req(self, req): if not self.debug: return string_parts = ['curl -i'] string_parts.append(' -X %s' % req.get_method()) for k in req.headers: header = ' -H "%s: %s"' % (k, req.headers[k]) string_parts.append(header) if req.data: string_parts.append(" -d '%s'" % req.data) string_parts.append(' ' + req.get_full_url()) LOG.debug("\nREQ: %s.\n", "".join(string_parts)) def _http_log_resp(self, resp, body): if not self.debug: return headers = str(resp.headers).replace('\n', '\\n') LOG.debug( 'RESP: [%(code)s] %(resp_hdrs)s\n' 'RESP BODY: %(resp_b)s.\n', { 'code': resp.getcode(), 'resp_hdrs': headers, 'resp_b': body, } ) def _request(self, req_body=None, method=None, header=constants.CONTENT_TYPE_URLENCODE): req = url_request.Request(self._url, req_body.encode(), header) if method not in (None, 'GET', 'POST'): req.get_method = lambda: method self._http_log_req(req) try: resp = self.url_opener.open(req) resp_body = resp.read() self._http_log_resp(resp, resp_body) except url_error.HTTPError as http_err: if '403' == str(http_err.code): raise exception.NotAuthorized() else: err = {'errorCode': -1, 'httpStatusCode': http_err.code, 'messages': str(http_err), 'request': req_body} msg = (_("The request is invalid. Reason: %(reason)s") % {'reason': err}) raise exception.ManilaException(message=msg) return resp_body def request(self, req_body=None, method=None, header=constants.CONTENT_TYPE_URLENCODE): try: resp_body = self._request(req_body, method, header) except exception.NotAuthorized: LOG.debug("Login again because client certification " "may be expired.") self._do_setup() resp_body = self._request(req_body, method, header) return resp_body class SSHConnector(object): def __init__(self, configuration, debug=True): super(SSHConnector, self).__init__() self.storage_ip = configuration.emc_nas_server self.username = configuration.emc_nas_login self.password = configuration.emc_nas_password self.debug = debug self.sshpool = ssh_utils.SSHPool(ip=self.storage_ip, port=22, conn_timeout=None, login=self.username, password=self.password) def run_ssh(self, cmd_list, check_exit_code=False): command = ' '.join(shlex.quote(cmd_arg) for cmd_arg in cmd_list) with self.sshpool.item() as ssh: try: out, err = processutils.ssh_execute( ssh, command, check_exit_code=check_exit_code) self.log_request(command, out, err) return out, err except processutils.ProcessExecutionError as e: with excutils.save_and_reraise_exception(): LOG.error('Error running SSH command: %(cmd)s. ' 'Error: %(excmsg)s.', {'cmd': command, 'excmsg': e}) def log_request(self, cmd, out, err): if not self.debug: return LOG.debug("\nSSH command: %s.\n", cmd) LOG.debug("SSH command output: out=%(out)s, err=%(err)s.\n", {'out': out, 'err': err}) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/dell_emc/common/enas/constants.py0000664000175000017500000000333100000000000025447 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Dell Inc. or its subsidiaries. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. STATUS_OK = 'ok' STATUS_INFO = 'info' STATUS_DEBUG = 'debug' STATUS_WARNING = 'warning' STATUS_ERROR = 'error' STATUS_NOT_FOUND = 'not_found' MSG_GENERAL_ERROR = '13690601492' MSG_INVALID_VDM_ID = '14227341325' MSG_INVALID_MOVER_ID = '14227341323' MSG_FILESYSTEM_NOT_FOUND = "18522112101" MSG_FILESYSTEM_EXIST = '13691191325' MSG_VDM_EXIST = '13421840550' MSG_SNAP_EXIST = '13690535947' MSG_INTERFACE_NAME_EXIST = '13421840550' MSG_INTERFACE_EXIST = '13691781136' MSG_INTERFACE_INVALID_VLAN_ID = '13421850371' MSG_INTERFACE_NON_EXISTENT = '13691781134' MSG_JOIN_DOMAIN = '13157007726' MSG_UNJOIN_DOMAIN = '13157007723' # Necessary to retry when ENAS database is locked for provisioning operation MSG_CODE_RETRY = '13421840537' IP_ALLOCATIONS = 2 CONTENT_TYPE_URLENCODE = {'Content-Type': 'application/x-www-form-urlencoded'} XML_HEADER = '' XML_NAMESPACE = 'http://www.emc.com/schemas/celerra/xml_api' CIFS_ACL_FULLCONTROL = 'fullcontrol' CIFS_ACL_READ = 'read' SSH_DEFAULT_RETRY_PATTERN = r'Error 2201:.*: unable to acquire lock\(s\)' ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/dell_emc/common/enas/utils.py0000664000175000017500000001325600000000000024602 0ustar00zuulzuul00000000000000# Copyright (c) 2014 EMC Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import fnmatch import types from oslo_config import cfg from oslo_log import log from oslo_utils import netutils from oslo_utils import timeutils from oslo_utils import units import ssl CONF = cfg.CONF LOG = log.getLogger(__name__) def decorate_all_methods(decorator, debug_only=False): if debug_only and not CONF.debug: return lambda cls: cls def _decorate_all_methods(cls): for attr_name, attr_val in cls.__dict__.items(): if (isinstance(attr_val, types.FunctionType) and not attr_name.startswith("_")): setattr(cls, attr_name, decorator(attr_val)) return cls return _decorate_all_methods def log_enter_exit(func): if not CONF.debug: return func def inner(self, *args, **kwargs): LOG.debug("Entering %(cls)s.%(method)s.", {'cls': self.__class__.__name__, 'method': func.__name__}) start = timeutils.utcnow() ret = func(self, *args, **kwargs) end = timeutils.utcnow() LOG.debug("Exiting %(cls)s.%(method)s. " "Spent %(duration)s sec. " "Return %(return)s.", {'cls': self.__class__.__name__, 'duration': timeutils.delta_seconds(start, end), 'method': func.__name__, 'return': ret}) return ret return inner def do_match_any(full, matcher_list): """Finds items that match any of the matchers. :param full: Full item list :param matcher_list: The list of matchers. Each matcher supports Unix shell-style wildcards :return: The matched items set and the unmatched items set """ matched = set() not_matched = set() full = set([item.strip() for item in full]) matcher_list = set([item.strip() for item in matcher_list]) for matcher in matcher_list: for item in full: if fnmatch.fnmatchcase(item, matcher): matched.add(item) not_matched = full - matched return matched, not_matched def create_ssl_context(configuration): """Create context for ssl verification. .. note:: starting from python 2.7.9 ssl adds create_default_context. We need to keep compatibility with previous python as well. """ try: if configuration.emc_ssl_cert_verify: context = ssl.create_default_context( capath=configuration.emc_ssl_cert_path) else: context = ssl.create_default_context() context.check_hostname = False context.verify_mode = ssl.CERT_NONE except AttributeError: LOG.warning('Creating ssl context is not supported on this ' 'version of Python, ssl verification is disabled.') context = None return context def parse_ipaddr(text): """Parse the output of VNX server_export command, get IPv4/IPv6 addresses. Example: input: 192.168.100.102:[fdf8:f53b:82e4::57]:[fdf8:f53b:82e4::54] output: ['192.168.100.102', '[fdf8:f53b:82e4::57]', '[fdf8:f53b:82e4::54]'] :param text: The output of VNX server_export command. :return: The list of IPv4/IPv6 addresses. The IPv6 address enclosed by []. """ rst = [] stk = [] ipaddr = '' it = iter(text) try: while True: i = next(it) if i == ':' and not stk and ipaddr: rst.append(ipaddr) ipaddr = '' elif i == ':' and not ipaddr: continue elif i == '[': stk.append(i) elif i == ']': rst.append('[%s]' % ipaddr) stk.pop() ipaddr = '' else: ipaddr += i except StopIteration: if ipaddr: rst.append(ipaddr) return rst def convert_ipv6_format_if_needed(ip_addr): """Convert IPv6 address format if needed. The IPv6 address enclosed by []. For the invalid IPv6 cidr, its format will not be changed. :param ip_addr: IPv6 address. :return: Converted IPv6 address. """ if netutils.is_valid_ipv6_cidr(ip_addr): ip_addr = '[%s]' % ip_addr return ip_addr def export_unc_path(ip_addr): """Convert IPv6 address to valid UNC path. In Microsoft Windows OS, UNC (Uniform Naming Convention) specifies a common syntax to describe the location of a network resource. The colon which used by IPv6 is an illegal character in a UNC path name. So the IPv6 address need to be converted to valid UNC path. References: - https://en.wikipedia.org/wiki/IPv6_address #Literal_IPv6_addresses_in_UNC_path_names - https://en.wikipedia.org/wiki/Path_(computing)#Uniform_Naming_Convention :param ip_addr: IPv6 address. :return: UNC path. """ unc_suffix = '.ipv6-literal.net' if netutils.is_valid_ipv6(ip_addr): ip_addr = ip_addr.replace(':', '-') + unc_suffix return ip_addr def bytes_to_gb(size): return round(float(size) / units.Gi, 2) def mb_to_gb(size): return bytes_to_gb(size * units.Mi) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/dell_emc/common/enas/xml_api_parser.py0000664000175000017500000002342500000000000026446 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Dell Inc. or its subsidiaries. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import io import re from lxml import etree class XMLAPIParser(object): def __init__(self): # The following Boolean acts as the flag for the common sub-element. # For instance: # #

  • server_1
  • # # #
  • interface_1
  • #
    self.is_QueryStatus = False self.is_CifsServers = False self.is_Aliases = False self.is_MoverStatus = False self.is_TaskResponse = False self.is_Vdm = False self.is_Interfaces = False self.elt = {} def _remove_ns(self, tag): i = tag.find('}') if i >= 0: tag = tag[i + 1:] return tag def parse(self, xml): result = { 'type': None, 'taskId': None, 'maxSeverity': None, 'objects': [], 'problems': [], } events = ("start", "end") context = etree.iterparse(io.BytesIO(xml), events=events) for action, elem in context: self.tag = self._remove_ns(elem.tag) func = self._get_func(action, self.tag) if func in vars(XMLAPIParser): if action == 'start': eval('self.' + func)(elem, result) # nosec B307 elif action == 'end': eval('self.' + func)() # nosec B307 return result def _get_func(self, action, tag): if tag == 'W2KServerData': return action + '_' + 'w2k_server_data' temp_list = re.sub(r"([A-Z])", r" \1", tag).split() if temp_list: func_name = action + '_' + '_'.join(temp_list) else: func_name = action + '_' + tag return func_name.lower() def _copy_property(self, source, target, property, list_property=None): for key in property: if key in source: target[key] = source[key] if list_property: for key in list_property: if key in source: target[key] = source[key].split() def _append_elm_property(self, elm, result, property, identifier): for obj in result['objects']: if (identifier in obj and identifier in elm.attrib and elm.attrib[identifier] == obj[identifier]): for key, value in elm.attrib.items(): if key in property: obj[key] = value def _append_element(self, elm, result, property, list_property, identifier): sub_elm = {} self._copy_property(elm.attrib, sub_elm, property, list_property) for obj in result['objects']: if (identifier in obj and identifier in elm.attrib and elm.attrib[identifier] == obj[identifier]): if self.tag in obj: obj[self.tag].append(sub_elm) else: obj[self.tag] = [sub_elm] def start_task_response(self, elm, result): self.is_TaskResponse = True result['type'] = 'TaskResponse' self._copy_property(elm.attrib, result, ['taskId']) def end_task_response(self): self.is_TaskResponse = False def start_fault(self, elm, result): result['type'] = 'Fault' def start_status(self, elm, result): if self.is_TaskResponse: result['maxSeverity'] = elm.attrib['maxSeverity'] elif self.is_MoverStatus or self.is_Vdm: self.elt['maxSeverity'] = elm.attrib['maxSeverity'] def start_query_status(self, elm, result): self.is_QueryStatus = True result['type'] = 'QueryStatus' self._copy_property(elm.attrib, result, ['maxSeverity']) def end_query_status(self): self.is_QueryStatus = False def start_problem(self, elm, result): self.elt = {} properties = ('message', 'messageCode') self._copy_property(elm.attrib, self.elt, properties) result['problems'].append(self.elt) def start_description(self, elm, result): self.elt['Description'] = elm.text def start_action(self, elm, result): self.elt['Action'] = elm.text def start_diagnostics(self, elm, result): self.elt['Diagnostics'] = elm.text def start_file_system(self, elm, result): self.elt = {} property = ( 'fileSystem', 'name', 'type', 'storages', 'volume', 'dataServicePolicies', 'internalUse', ) list_property = ('storagePools',) self._copy_property(elm.attrib, self.elt, property, list_property) result['objects'].append(self.elt) def start_file_system_capacity_info(self, elm, result): property = ('volumeSize',) identifier = 'fileSystem' self._append_elm_property(elm, result, property, identifier) def start_storage_pool(self, elm, result): self.elt = {} property = ('name', 'autoSize', 'usedSize', 'diskType', 'pool', 'dataServicePolicies', 'virtualProvisioning') list_property = ('movers',) self._copy_property(elm.attrib, self.elt, property, list_property) result['objects'].append(self.elt) def start_system_storage_pool_data(self, elm, result): property = ('greedy', 'isBackendPool') self._copy_property(elm.attrib, self.elt, property) def start_mover(self, elm, result): self.elt = {} property = ('name', 'host', 'mover', 'role') list_property = ('ntpServers', 'standbyFors', 'standbys') self._copy_property(elm.attrib, self.elt, property, list_property) result['objects'].append(self.elt) def start_mover_status(self, elm, result): self.is_MoverStatus = True property = ('version', 'csTime', 'clock', 'timezone', 'uptime') identifier = 'mover' self._append_elm_property(elm, result, property, identifier) def end_mover_status(self): self.is_MoverStatus = False def start_mover_dns_domain(self, elm, result): property = ('name', 'protocol') list_property = ('servers',) identifier = 'mover' self._append_element(elm, result, property, list_property, identifier) def start_mover_interface(self, elm, result): property = ( 'name', 'device', 'up', 'ipVersion', 'netMask', 'ipAddress', 'vlanid', ) identifier = 'mover' self._append_element(elm, result, property, None, identifier) def start_logical_network_device(self, elm, result): property = ('name', 'type', 'speed') list_property = ('interfaces',) identifier = 'mover' self._append_element(elm, result, property, list_property, identifier) def start_vdm(self, elm, result): self.is_Vdm = True self.elt = {} property = ('name', 'state', 'mover', 'vdm') self._copy_property(elm.attrib, self.elt, property) result['objects'].append(self.elt) def end_vdm(self): self.is_Vdm = False def start_interfaces(self, elm, result): self.is_Interfaces = True self.elt['Interfaces'] = [] def end_interfaces(self): self.is_Interfaces = False def start_li(self, elm, result): if self.is_CifsServers: self.elt['CifsServers'].append(elm.text) elif self.is_Aliases: self.elt['Aliases'].append(elm.text) elif self.is_Interfaces: self.elt['Interfaces'].append(elm.text) def start_cifs_server(self, elm, result): self.elt = {} property = ('type', 'localUsers', 'name', 'mover', 'moverIdIsVdm') list_property = ('interfaces',) self._copy_property(elm.attrib, self.elt, property, list_property) result['objects'].append(self.elt) def start_aliases(self, elm, result): self.is_Aliases = True self.elt['Aliases'] = [] def end_aliases(self): self.is_Aliases = False def start_w2k_server_data(self, elm, result): property = ('domain', 'compName', 'domainJoined') self._copy_property(elm.attrib, self.elt, property) def start_cifs_share(self, elm, result): self.elt = {} property = ('path', 'fileSystem', 'name', 'mover', 'moverIdIsVdm') self._copy_property(elm.attrib, self.elt, property) result['objects'].append(self.elt) def start_cifs_servers(self, elm, result): self.is_CifsServers = True self.elt['CifsServers'] = [] def end_cifs_servers(self): self.is_CifsServers = False def start_checkpoint(self, elm, result): self.elt = {} property = ('checkpointOf', 'name', 'checkpoint', 'state') self._copy_property(elm.attrib, self.elt, property) result['objects'].append(self.elt) def start_mount(self, elm, result): self.elt = {} property = ('fileSystem', 'path', 'mover', 'moverIdIsVdm') self._copy_property(elm.attrib, self.elt, property) result['objects'].append(self.elt) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/dell_emc/driver.py0000664000175000017500000003236700000000000022523 0ustar00zuulzuul00000000000000# Copyright (c) 2019 EMC Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ EMC specific NAS storage driver. This driver is a pluggable driver that allows specific EMC NAS devices to be plugged-in as the underlying backend. Use the Manila configuration variable "share_backend_name" to specify, which backend plugins to use. """ from oslo_config import cfg from oslo_log import log from manila.share import driver from manila.share.drivers.dell_emc import plugin_manager as manager EMC_NAS_OPTS = [ cfg.StrOpt('emc_nas_login', help='User name for the EMC server.'), cfg.StrOpt('emc_nas_password', secret=True, help='Password for the EMC server.'), cfg.HostAddressOpt('emc_nas_server', help='EMC server hostname or IP address.'), cfg.PortOpt('emc_nas_server_port', default=8080, help='Port number for the EMC server.'), cfg.BoolOpt('emc_nas_server_secure', default=True, help='Use secure connection to server.'), cfg.StrOpt('emc_share_backend', ignore_case=True, choices=['powerscale', 'isilon', 'vnx', 'unity', 'powermax', 'powerstore', 'powerflex'], help='Share backend.'), cfg.StrOpt('emc_nas_root_dir', help='The root directory where shares will be located.'), cfg.BoolOpt('emc_ssl_cert_verify', default=True, help='If set to False the https client will not validate the ' 'SSL certificate of the backend endpoint.'), cfg.StrOpt('emc_ssl_cert_path', help='Can be used to specify a non default path to a ' 'CA_BUNDLE file or directory with certificates of trusted ' 'CAs, which will be used to validate the backend.') ] LOG = log.getLogger(__name__) CONF = cfg.CONF CONF.register_opts(EMC_NAS_OPTS) class EMCShareDriver(driver.ShareDriver): """EMC specific NAS driver. Allows for NFS and CIFS NAS storage usage.""" def __init__(self, *args, **kwargs): self.configuration = kwargs.get('configuration', None) if self.configuration: self.configuration.append_config_values(EMC_NAS_OPTS) self.backend_name = self.configuration.safe_get( 'emc_share_backend') else: self.backend_name = CONF.emc_share_backend self.backend_name = self.backend_name or 'EMC_NAS_Storage' self.plugin_manager = manager.EMCPluginManager( namespace='manila.share.drivers.dell_emc.plugins') LOG.info("BACKEND IS: %s", self.backend_name) if self.backend_name == 'vnx': LOG.warning('Dell EMC VNX share driver has been deprecated and is ' 'expected to be removed in a future release.') if self.backend_name == 'isilon': self.backend_name = 'powerscale' LOG.warning('Dell EMC isilon share driver has been deprecated and ' 'is renamed to powerscale. It is expected ' 'to be removed in a future release.') self.plugin = self.plugin_manager.load_plugin( self.backend_name, configuration=self.configuration) LOG.info(f"PLUGIN HAS: {self.plugin.__dict__}") super(EMCShareDriver, self).__init__( self.plugin.driver_handles_share_servers, *args, **kwargs) self.dhss_mandatory_security_service_association = ( self.plugin.dhss_mandatory_security_service_association) self.ipv6_implemented = getattr(self.plugin, 'ipv6_implemented', False) self.revert_to_snap_support = getattr( self.plugin, 'revert_to_snap_support', False) self.shrink_share_support = getattr( self.plugin, 'shrink_share_support', False) self.manage_existing_support = getattr( self.plugin, 'manage_existing_support', False) self.manage_existing_with_server_support = getattr( self.plugin, 'manage_existing_with_server_support', False) self.manage_existing_snapshot_support = getattr( self.plugin, 'manage_existing_snapshot_support', False) self.manage_snapshot_with_server_support = getattr( self.plugin, 'manage_snapshot_with_server_support', False) self.manage_server_support = getattr( self.plugin, 'manage_server_support', False) self.get_share_server_network_info_support = getattr( self.plugin, 'get_share_server_network_info_support', False) def manage_existing(self, share, driver_options): """manage an existing share""" if self.manage_existing_support: return self.plugin.manage_existing(share, driver_options) else: return NotImplementedError() def manage_existing_with_server(self, share, driver_options, share_server=None): """manage an existing share""" if self.manage_existing_with_server_support: return self.plugin.manage_existing_with_server( share, driver_options, share_server) else: return NotImplementedError() def manage_existing_snapshot(self, snapshot, driver_options): """manage an existing share snapshot""" if self.manage_existing_snapshot_support: return self.plugin.manage_existing_snapshot(snapshot, driver_options) else: return NotImplementedError() def manage_existing_snapshot_with_server(self, snapshot, driver_options, share_server=None): """manage an existing share snapshot""" if self.manage_snapshot_with_server_support: return self.plugin.manage_existing_snapshot_with_server( snapshot, driver_options, share_server=None) else: return NotImplementedError() def manage_server(self, context, share_server, identifier, driver_options): if self.manage_server_support: return self.plugin.manage_server(context, share_server, identifier, driver_options) else: return NotImplementedError() def get_share_server_network_info( self, context, share_server, identifier, driver_options): if self.get_share_server_network_info_support: return self.plugin.get_share_server_network_info( context, share_server, identifier, driver_options) else: return NotImplementedError() def unmanage_server(self, server_details, security_services=None): LOG.info('Dell EMC driver will unmanage share server: %s out of ' 'OpenStack.', server_details.get('server_id')) def unmanage(self, share): LOG.info('Dell EMC driver will unmanage share: %s out of ' 'OpenStack.', share.get('id')) def unmanage_with_server(self, share, share_server=None): LOG.info('Dell EMC driver will unmanage share: %s out of ' 'OpenStack.', share.get('id')) def unmanage_snapshot(self, snapshot): LOG.info('Dell EMC driver will unmanage snapshot: %s out of ' 'OpenStack.', snapshot.get('id')) def unmanage_snapshot_with_server(self, snapshot, share_server=None): LOG.info('Dell EMC driver will unmanage snapshot: %s out of ' 'OpenStack.', snapshot.get('id')) def create_share(self, context, share, share_server=None): """Is called to create share.""" location = self.plugin.create_share(context, share, share_server) return location def create_share_from_snapshot(self, context, share, snapshot, share_server=None, parent_share=None): """Is called to create share from snapshot.""" location = self.plugin.create_share_from_snapshot( context, share, snapshot, share_server) return location def extend_share(self, share, new_size, share_server=None): """Is called to extend share.""" self.plugin.extend_share(share, new_size, share_server) def shrink_share(self, share, new_size, share_server=None): """Is called to shrink share.""" if self.shrink_share_support: self.plugin.shrink_share(share, new_size, share_server) else: raise NotImplementedError() def create_snapshot(self, context, snapshot, share_server=None): """Is called to create snapshot.""" return self.plugin.create_snapshot(context, snapshot, share_server) def delete_share(self, context, share, share_server=None): """Is called to remove share.""" self.plugin.delete_share(context, share, share_server) def delete_snapshot(self, context, snapshot, share_server=None): """Is called to remove snapshot.""" self.plugin.delete_snapshot(context, snapshot, share_server) def ensure_share(self, context, share, share_server=None): """Invoked to sure that share is exported.""" self.plugin.ensure_share(context, share, share_server) def allow_access(self, context, share, access, share_server=None): """Allow access to the share.""" self.plugin.allow_access(context, share, access, share_server) def deny_access(self, context, share, access, share_server=None): """Deny access to the share.""" self.plugin.deny_access(context, share, access, share_server) def update_access(self, context, share, access_rules, add_rules, delete_rules, update_rules, share_server=None): """Update access to the share.""" return self.plugin.update_access(context, share, access_rules, add_rules, delete_rules, share_server) def check_for_setup_error(self): """Check for setup error.""" self.plugin.check_for_setup_error() def do_setup(self, context): """Any initialization the share driver does while starting.""" self.plugin.connect(self, context) def _update_share_stats(self): """Retrieve stats info from share.""" backend_name = self.configuration.safe_get( 'share_backend_name') or "EMC_NAS_Storage" data = dict( share_backend_name=backend_name, vendor_name='Dell EMC', storage_protocol='NFS_CIFS', snapshot_support=True, create_share_from_snapshot_support=True, revert_to_snapshot_support=self.revert_to_snap_support) self.plugin.update_share_stats(data) super(EMCShareDriver, self)._update_share_stats(data) LOG.info(f"Updated share stats: {self._stats}") def get_network_allocations_number(self): """Returns number of network allocations for creating VIFs.""" return self.plugin.get_network_allocations_number() def _setup_server(self, network_info, metadata=None): """Set up and configures share server with given network parameters.""" # NOTE(felipe_rodrigues): keep legacy network_info support as a dict. network_info = network_info[0] return self.plugin.setup_server(network_info, metadata) def _teardown_server(self, server_details, security_services=None): """Teardown share server.""" return self.plugin.teardown_server(server_details, security_services) def get_configured_ip_versions(self): if self.ipv6_implemented: return [4, 6] else: return [4] def revert_to_snapshot(self, context, snapshot, share_access_rules, snapshot_access_rules, share_server=None): if self.revert_to_snap_support: return self.plugin.revert_to_snapshot(context, snapshot, share_access_rules, snapshot_access_rules, share_server) else: raise NotImplementedError() def get_default_filter_function(self): if hasattr(self.plugin, 'get_default_filter_function'): return self.plugin.get_default_filter_function() return None def get_backend_info(self, context): """Get driver and array configuration parameters.""" if hasattr(self.plugin, 'get_backend_info'): return self.plugin.get_backend_info(context) raise NotImplementedError() def ensure_shares(self, context, shares): """Invoked to ensure that shares are exported.""" if hasattr(self.plugin, 'ensure_shares'): return self.plugin.ensure_shares(context, shares) raise NotImplementedError() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/dell_emc/plugin_manager.py0000664000175000017500000000216200000000000024206 0ustar00zuulzuul00000000000000# Copyright (c) 2014 EMC Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """EMC Share Driver Plugin Framework.""" from stevedore import extension class EMCPluginManager(object): def __init__(self, namespace): self.namespace = namespace self.extension_manager = extension.ExtensionManager(namespace) def load_plugin(self, name, *args, **kwargs): for ext in self.extension_manager.extensions: if ext.name == name: storage_conn = ext.plugin(*args, **kwargs) return storage_conn return None ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315601.9056718 manila-21.0.0/manila/share/drivers/dell_emc/plugins/0000775000175000017500000000000000000000000022324 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/dell_emc/plugins/__init__.py0000664000175000017500000000000000000000000024423 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/dell_emc/plugins/base.py0000664000175000017500000000601600000000000023613 0ustar00zuulzuul00000000000000# Copyright (c) 2014 EMC Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """EMC Share Driver Base Plugin API """ import abc class StorageConnection(metaclass=abc.ABCMeta): """Subclasses should implement storage backend specific functionality.""" def __init__(self, *args, **kwargs): # NOTE(vponomaryov): redefine 'driver_handles_share_servers' within # plugin. self.driver_handles_share_servers = None self.dhss_mandatory_security_service_association = {} @abc.abstractmethod def create_share(self, context, share, share_server): """Is called to create share.""" @abc.abstractmethod def create_snapshot(self, context, snapshot, share_server): """Is called to create snapshot.""" @abc.abstractmethod def delete_share(self, context, share, share_server): """Is called to remove share.""" @abc.abstractmethod def delete_snapshot(self, context, snapshot, share_server): """Is called to remove snapshot.""" @abc.abstractmethod def ensure_share(self, context, share, share_server): """Invoked to ensure that share is exported.""" @abc.abstractmethod def extend_share(self, share, new_size, share_server): """Invoked to extend share.""" @abc.abstractmethod def allow_access(self, context, share, access, share_server): """Allow access to the share.""" @abc.abstractmethod def deny_access(self, context, share, access, share_server): """Deny access to the share.""" def update_access(self, context, share, access_rules, add_rules, delete_rules, share_server=None): """Update access rules for given share.""" raise NotImplementedError() def raise_connect_error(self): """Check for setup error.""" pass def connect(self, emc_share_driver, context): """Any initialization the share driver does while starting.""" pass def update_share_stats(self, stats_dict): """Add key/values to stats_dict.""" pass def get_network_allocations_number(self): """Returns number of network allocations for creating VIFs.""" return 0 @abc.abstractmethod def setup_server(self, network_info, metadata=None): """Set up and configure share server with given network parameters.""" @abc.abstractmethod def teardown_server(self, server_details, security_services=None): """Teardown share server.""" ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315601.9056718 manila-21.0.0/manila/share/drivers/dell_emc/plugins/powerflex/0000775000175000017500000000000000000000000024337 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/dell_emc/plugins/powerflex/__init__.py0000664000175000017500000000000000000000000026436 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/dell_emc/plugins/powerflex/connection.py0000664000175000017500000004061400000000000027055 0ustar00zuulzuul00000000000000# Copyright (c) 2023 Dell Inc. or its subsidiaries. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ PowerFlex specific NAS backend plugin. """ from oslo_config import cfg from oslo_log import log from oslo_utils import units from manila.common import constants as const from manila import exception from manila.i18n import _ from manila.share.drivers.dell_emc.plugins import base as driver from manila.share.drivers.dell_emc.plugins.powerflex import ( object_manager as manager) """Version history: 1.0 - Initial version """ VERSION = "1.0" CONF = cfg.CONF LOG = log.getLogger(__name__) POWERFLEX_OPTS = [ cfg.StrOpt('powerflex_storage_pool', help='Storage pool used to provision NAS.'), cfg.StrOpt('powerflex_protection_domain', help='Protection domain to use.'), cfg.StrOpt('dell_nas_backend_host', help='Dell NAS backend hostname or IP address.'), cfg.IntOpt('dell_nas_backend_port', default=443, help='Port number to use with the Dell NAS backend.'), cfg.StrOpt('dell_nas_server', help='Root directory or NAS server which owns the shares.'), cfg.StrOpt('dell_nas_login', help='User name for the Dell NAS backend.'), cfg.StrOpt('dell_nas_password', secret=True, help='Password for the Dell NAS backend.') ] class PowerFlexStorageConnection(driver.StorageConnection): """Implements PowerFlex specific functionality for Dell Manila driver.""" def __init__(self, *args, **kwargs): """Do initialization""" LOG.debug('Invoking base constructor for Manila \ Dell PowerFlex SDNAS Driver.') super(PowerFlexStorageConnection, self).__init__(*args, **kwargs) LOG.debug('Setting up attributes for Manila \ Dell PowerFlex SDNAS Driver.') if 'configuration' in kwargs: kwargs['configuration'].append_config_values(POWERFLEX_OPTS) self.manager = None self.server = None self._username = None self._password = None self._server_url = None self._root_dir = None self._verify_ssl_cert = None self._shares = {} self.verify_certificate = None self.certificate_path = None self.export_path = None self.driver_handles_share_servers = False self.reserved_percentage = None self.reserved_snapshot_percentage = None self.reserved_share_extend_percentage = None self.max_over_subscription_ratio = None def connect(self, dell_share_driver, context): """Connects to Dell PowerFlex SDNAS server.""" LOG.debug('Reading configuration parameters for Manila \ Dell PowerFlex SDNAS Driver.') config = dell_share_driver.configuration get_config_value = config.safe_get self.verify_certificate = get_config_value("dell_ssl_cert_verify") self.rest_ip = get_config_value("dell_nas_backend_host") self.rest_port = get_config_value("dell_nas_backend_port") self.nas_server = get_config_value("dell_nas_server") self.storage_pool = get_config_value("powerflex_storage_pool") self.protection_domain = get_config_value( "powerflex_protection_domain") self.rest_username = get_config_value("dell_nas_login") self.rest_password = get_config_value("dell_nas_password") if self.verify_certificate: self.certificate_path = get_config_value( "dell_ssl_certificate_path") if not all([self.rest_ip, self.rest_username, self.rest_password]): message = _("REST server IP, username and password" " must be specified.") raise exception.BadConfigurationException(reason=message) # validate certificate settings if self.verify_certificate and not self.certificate_path: message = _("Path to REST server's certificate must be specified.") raise exception.BadConfigurationException(reason=message) LOG.debug('Initializing Dell PowerFlex SDNAS Layer.') self.host_url = ("https://%(server_ip)s:%(server_port)s" % { "server_ip": self.rest_ip, "server_port": self.rest_port}) LOG.info("REST server IP: %(ip)s, port: %(port)s, " "username: %(user)s. Verify server's certificate: " "%(verify_cert)s.", { "ip": self.rest_ip, "port": self.rest_port, "user": self.rest_username, "verify_cert": self.verify_certificate, }) self.manager = manager.StorageObjectManager(self.host_url, self.rest_username, self.rest_password, self.export_path, self.certificate_path, self.verify_certificate) # configuration for share status update self.reserved_percentage = config.safe_get( 'reserved_share_percentage') if self.reserved_percentage is None: self.reserved_percentage = 0 self.reserved_snapshot_percentage = config.safe_get( 'reserved_share_from_snapshot_percentage') if self.reserved_snapshot_percentage is None: self.reserved_snapshot_percentage = self.reserved_percentage self.reserved_share_extend_percentage = config.safe_get( 'reserved_share_extend_percentage') if self.reserved_share_extend_percentage is None: self.reserved_share_extend_percentage = self.reserved_percentage self.max_over_subscription_ratio = config.safe_get( 'max_over_subscription_ratio') def create_share(self, context, share, share_server): """Is called to create a share.""" LOG.debug(f'Creating {share["share_proto"]} share.') location = self._create_nfs_share(share) return location def create_share_from_snapshot(self, context, share, snapshot, share_server=None, parent_share=None): """Is called to create a share from an existing snapshot.""" raise NotImplementedError() def allow_access(self, context, share, access, share_server): """Is called to allow access to a share.""" raise NotImplementedError() def check_for_setup_error(self): """Is called to check for setup error.""" def update_access(self, context, share, access_rules, add_rules, delete_rules, share_server=None): """Is called to update share access.""" LOG.debug(f'Updating access to {share["share_proto"]} share.') return self._update_nfs_access(share, access_rules) def create_snapshot(self, context, snapshot, share_server): """Is called to create snapshot.""" export_name = snapshot['share_name'] LOG.debug(f'Retrieving filesystem ID for share {export_name}') filesystem_id = self.manager.get_fsid_from_export_name(export_name) LOG.debug(f'Retrieving snapshot ID for filesystem {filesystem_id}') snapshot_id = self.manager.create_snapshot(snapshot['name'], filesystem_id) if snapshot_id: LOG.info("Snapshot %(id)s successfully created.", {'id': snapshot['id']}) def delete_snapshot(self, context, snapshot, share_server): """Is called to delete snapshot.""" snapshot_name = snapshot['name'] filesystem_id = self.manager.get_fsid_from_snapshot_name(snapshot_name) LOG.debug(f'Retrieving filesystem ID for snapshot {snapshot_name}') snapshot_deleted = self.manager.delete_filesystem(filesystem_id) if not snapshot_deleted: message = ( _('Failed to delete snapshot "%(snapshot)s".') % {'snapshot': snapshot['name']}) LOG.error(message) raise exception.ShareBackendException(msg=message) else: LOG.info("Snapshot %(id)s successfully deleted.", {'id': snapshot['id']}) def delete_share(self, context, share, share_server): """Is called to delete a share.""" LOG.debug(f'Deleting {share["share_proto"]} share.') self._delete_nfs_share(share) def deny_access(self, context, share, access, share_server): """Is called to deny access to a share.""" raise NotImplementedError() def ensure_share(self, context, share, share_server): """Is called to ensure a share is exported.""" def extend_share(self, share, new_size, share_server=None): """Is called to extend a share.""" # Converts the size from GiB to Bytes new_size_in_bytes = new_size * units.Gi LOG.debug(f"Extending {share['name']} to {new_size}GiB") filesystem_id = self.manager.get_filesystem_id(share['name']) self.manager.extend_export(filesystem_id, new_size_in_bytes) def setup_server(self, network_info, metadata=None): """Is called to set up a share server. Requires driver_handles_share_servers to be True. """ raise NotImplementedError() def teardown_server(self, server_details, security_services=None): """Is called to teardown a share server. Requires driver_handles_share_servers to be True. """ raise NotImplementedError() def _create_nfs_share(self, share): """Creates an NFS share. In PowerFlex, an export (share) belongs to a filesystem. This function creates a filesystem and an export. """ LOG.debug(f'Retrieving Storage Pool ID for {self.storage_pool}') storage_pool_id = self.manager.get_storage_pool_id( self.protection_domain, self.storage_pool) nas_server_id = self.manager.get_nas_server_id(self.nas_server) LOG.debug(f"Creating filesystem {share['name']}") size_in_bytes = share['size'] * units.Gi filesystem_id = self.manager.create_filesystem(storage_pool_id, self.nas_server, share['name'], size_in_bytes) if not filesystem_id: message = { _('The requested NFS export "%(export)s"' ' was not created.') % {'export': share['name']}} LOG.error(message) raise exception.ShareBackendException(msg=message) LOG.debug(f"Creating export {share['name']}") export_id = self.manager.create_nfs_export(filesystem_id, share['name']) if not export_id: message = ( _('The requested NFS export "%(export)s"' ' was not created.') % {'export': share['name']}) LOG.error(message) raise exception.ShareBackendException(msg=message) file_interfaces = self.manager.get_nas_server_interfaces( nas_server_id) export_path = self.manager.get_nfs_export_name(export_id) locations = self._get_nfs_location(file_interfaces, export_path) return locations def _delete_nfs_share(self, share): """Deletes a filesystem and its associated export.""" filesystem_id = self.manager.get_filesystem_id(share['name']) LOG.debug(f"Retrieving filesystem ID for filesystem {share['name']}") if filesystem_id is None: message = ('Attempted to delete NFS export "%s",' ' but the export does not appear to exist.') LOG.warning(message, share['name']) else: LOG.debug(f"Deleting filesystem ID {filesystem_id}") share_deleted = self.manager.delete_filesystem(filesystem_id) if not share_deleted: message = ( _('Failed to delete NFS export "%(export)s".') % {'export': share['name']}) LOG.error(message) raise exception.ShareBackendException(msg=message) def _update_nfs_access(self, share, access_rules): """Updates access rules for NFS share type.""" nfs_rw_ips = set() nfs_ro_ips = set() access_updates = {} for rule in access_rules: if rule['access_type'].lower() != 'ip': message = (_("Only IP access type currently supported for " "NFS. Share provided %(share)s with rule type " "%(type)s") % {'share': share['display_name'], 'type': rule['access_type']}) LOG.error(message) access_updates.update({rule['access_id']: {'state': 'error'}}) else: if rule['access_level'] == const.ACCESS_LEVEL_RW: nfs_rw_ips.add(rule['access_to']) elif rule['access_level'] == const.ACCESS_LEVEL_RO: nfs_ro_ips.add(rule['access_to']) access_updates.update({rule['access_id']: {'state': 'active'}}) share_id = self.manager.get_nfs_export_id(share['name']) share_updated = self.manager.set_export_access(share_id, nfs_rw_ips, nfs_ro_ips) if not share_updated: message = ( _('Failed to update NFS access rules for "%(export)s".') % {'export': share['display_name']}) LOG.error(message) raise exception.ShareBackendException(msg=message) return access_updates def update_share_stats(self, stats_dict): """Retrieve stats info from share.""" stats_dict['driver_version'] = VERSION stats_dict['storage_protocol'] = 'NFS' stats_dict['create_share_from_snapshot_support'] = False stats_dict['pools'] = [] storage_pool_id = self.manager.get_storage_pool_id( self.protection_domain, self.storage_pool ) total = free = used = provisioned = 0 statistic = self.manager.get_storage_pool_statistic(storage_pool_id) if statistic: total = statistic.get('maxCapacityInKb') // units.Mi free = statistic.get('netUnusedCapacityInKb') // units.Mi used = statistic.get('capacityInUseInKb') // units.Mi provisioned = statistic.get('primaryVacInKb') // units.Mi pool_stat = { 'pool_name': self.storage_pool, 'thin_provisioning': True, 'total_capacity_gb': total, 'free_capacity_gb': free, 'allocated_capacity_gb': used, 'provisioned_capacity_gb': provisioned, 'qos': False, 'reserved_percentage': self.reserved_percentage, 'reserved_snapshot_percentage': self.reserved_snapshot_percentage, 'reserved_share_extend_percentage': self.reserved_share_extend_percentage, 'max_over_subscription_ratio': self.max_over_subscription_ratio } stats_dict['pools'].append(pool_stat) def _get_nfs_location(self, file_interfaces, export_path): export_locations = [] for interface in file_interfaces: export_locations.append( {'path': f"{interface}:/{export_path}"}) return export_locations def get_default_filter_function(self): return 'share.size >= 3' ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/dell_emc/plugins/powerflex/object_manager.py0000664000175000017500000003606100000000000027657 0ustar00zuulzuul00000000000000# Copyright (c) 2023 Dell Inc. or its subsidiaries. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from http import client as http_client import json from oslo_log import log as logging import requests from manila import exception LOG = logging.getLogger(__name__) class StorageObjectManager(object): def __init__(self, host_url, username, password, export_path, certificate_path=None, verify_ssl_cert=False): self.host_url = host_url self.base_url = host_url + '/rest' self.rest_username = username self.rest_password = password self.rest_token = None self.got_token = False self.export_path = export_path self.verify_certificate = verify_ssl_cert self.certificate_path = certificate_path def _get_headers(self): if self.got_token: return {"Content-type": "application/json", "Accept": "application/json", "Authorization": "Bearer " + self.rest_token} else: return {"Content-type": "application/json", "Accept": "application/json"} def execute_powerflex_get_request(self, url, **url_params): request = url % url_params res = requests.get(request, headers=self._get_headers(), verify=self._get_verify_cert()) res = self._check_response(res, request, "GET") response = res.json() return res, response def execute_powerflex_post_request(self, url, params=None, **url_params): if not params: params = {} request = url % url_params res = requests.post(request, data=json.dumps(params), headers=self._get_headers(), verify=self._get_verify_cert()) res = self._check_response(res, request, "POST", params) response = None try: response = res.json() except ValueError: # Particular case for get_storage_pool_id which is not # a json object but a string response = res return res, response def execute_powerflex_delete_request(self, url, **url_params): request = url % url_params res = requests.delete(request, headers=self._get_headers(), verify=self._get_verify_cert()) res = self._check_response(res, request, "DELETE") return res def execute_powerflex_patch_request(self, url, params=None, **url_params): if not params: params = {} request = url % url_params res = requests.patch(request, data=json.dumps(params), headers=self._get_headers(), verify=self._get_verify_cert()) res = self._check_response(res, request, "PATCH") return res def _check_response(self, response, request, request_type, params=None): login_url = "/auth/login" if (response.status_code == http_client.UNAUTHORIZED or response.status_code == http_client.FORBIDDEN): LOG.info("Dell PowerFlex token is invalid, going to re-login " "and get a new one.") login_request = self.base_url + login_url verify_cert = self._get_verify_cert() self.got_token = False payload = json.dumps({"username": self.rest_username, "password": self.rest_password}) res = requests.post(login_request, headers=self._get_headers(), data=payload, verify=verify_cert) if (res.status_code == http_client.UNAUTHORIZED or res.status_code == http_client.FORBIDDEN): message = ("PowerFlex REST API access is still forbidden or " "unauthorized, there might be an issue with your " "credentials.") LOG.error(message) raise exception.NotAuthorized() else: token = res.json()["access_token"] self.rest_token = token self.got_token = True LOG.info("Going to perform request again %s with valid token.", request) if (request_type == "GET"): response = requests.get(request, headers=self._get_headers(), verify=verify_cert) elif (request_type == "POST"): response = requests.post(request, headers=self._get_headers(), data=json.dumps(params), verify=verify_cert) elif (request_type == "DELETE"): response = requests.delete(request, headers=self._get_headers(), verify=verify_cert) elif (request_type == "PATCH"): response = requests.patch(request, headers=self._get_headers(), data=json.dumps(params), verify=verify_cert) level = logging.DEBUG if response.status_code != http_client.OK: level = logging.ERROR LOG.log(level, "REST REQUEST: %s with params %s", request, json.dumps(params)) LOG.log(level, "REST RESPONSE: %s with params %s", response.status_code, response.text) return response def _get_verify_cert(self): verify_cert = False if self.verify_certificate: verify_cert = self.certificate_path return verify_cert def create_filesystem(self, storage_pool_id, nas_server, name, size): """Creates a filesystem. :param nas_server: name of the nas_server :param name: name of the filesystem :param size: size in GiB :return: ID of the filesystem if created successfully """ nas_server_id = self.get_nas_server_id(nas_server) params = { "name": name, "size_total": size, "storage_pool_id": storage_pool_id, "nas_server_id": nas_server_id } url = f'{self.base_url}/v1/file-systems' res, response = self.execute_powerflex_post_request(url, params) if res.status_code == 201: return response["id"] def create_nfs_export(self, filesystem_id, name): """Creates an NFS export. :param filesystem_id: ID of the filesystem on which the export will be created :param name: name of the NFS export :return: ID of the export if created successfully """ params = { "file_system_id": filesystem_id, "path": "/" + str(name), "name": name } url = f'{self.base_url}/v1/nfs-exports' res, response = self.execute_powerflex_post_request(url, params) if res.status_code == 201: return response["id"] def delete_filesystem(self, filesystem_id): """Deletes a filesystem and all associated export. :param filesystem_id: ID of the filesystem to delete :return: True if deleted successfully """ url = f'{self.base_url}/v1/file-systems/{filesystem_id}' res = self.execute_powerflex_delete_request(url) return res.status_code == 204 def create_snapshot(self, name, filesystem_id): """Creates a snapshot of a filesystem. :param name: name of the snapshot :param filesystem_id: ID of the filesystem :return: ID of the snapshot if created successfully """ params = { "name": name } url = f'{self.base_url}/v1/file-systems/{filesystem_id}/snapshot' res, response = self.execute_powerflex_post_request(url, params) return res.status_code == 201 def get_nas_server_id(self, nas_server): """Retrieves the NAS server ID. :param nas_server: NAS server name :return: ID of the NAS server if success """ url = f'{self.base_url}/v1/nas-servers?select=id&name=eq.{nas_server}' res, response = self.execute_powerflex_get_request(url) if res.status_code == 200: return response[0]['id'] def get_nfs_export_name(self, export_id): """Retrieves NFS Export name. :param export_id: ID of the NFS export :return: path of the NFS export if success """ url = f'{self.base_url}/v1/nfs-exports/{export_id}?select=*' res, response = self.execute_powerflex_get_request(url) if res.status_code == 200: return response["name"] def get_filesystem_id(self, name): """Retrieves an ID for a filesystem. :param name: name of the filesystem :return: ID of the filesystem if success """ url = f'{self.base_url}/v1/file-systems?select=id&name=eq.{name}' res, response = self.execute_powerflex_get_request(url) if res.status_code == 200: return response[0]['id'] def get_nfs_export_id(self, name): """Retrieves NFS Export ID. :param name: name of the NFS export :return: id of the NFS export if success """ url = f'{self.base_url}/v1/nfs-exports?select=id&name=eq.{name}' res, response = self.execute_powerflex_get_request(url) if res.status_code == 200: return response[0]['id'] def get_storage_pool_id(self, protection_domain, storage_pool): """Retrieves the Storage Pool ID. :param protection_domain: protection domain name :param storage_pool: storage pool name :return: ID of the storage pool if success """ params = { "protectionDomainName": protection_domain, "name": storage_pool } url = (f'{self.host_url}/api/types/StoragePool/instances/' 'action/queryIdByKey') res, response = self.execute_powerflex_post_request(url, params) if res.status_code == 200: return response def set_export_access(self, export_id, rw_hosts, ro_hosts): """Sets the authorization access on the export. :param export_id: NFS export ID :param rw_hosts: a set of RW hosts :param ro_hosts: a set of RO hosts :return: True if operation succeeded """ params = { "read_only_hosts": list(ro_hosts), "read_write_root_hosts": list(rw_hosts) } url = f'{self.base_url}/v1/nfs-exports/{export_id}' res = self.execute_powerflex_patch_request(url, params) return res.status_code == 204 def extend_export(self, export_id, new_size): """Extends the size of a share to a new size. :param export_id: ID of the NFS export :param new_size: new size to allocate in bytes :return: True if extended successfully """ params = { "size_total": new_size } url = f'{self.base_url}/v1/file-systems/{export_id}' res = self.execute_powerflex_patch_request(url, params) return res.status_code == 204 def get_fsid_from_export_name(self, name): """Retieves the Filesystem ID used by an export. :param name: name of the export :return: ID of the Filesystem which owns the export """ url = (f'{self.base_url}/v1/nfs-exports' f'?select=file_system_id&name=eq.{name}') res, response = self.execute_powerflex_get_request(url) if res.status_code == 200: return response[0]['file_system_id'] def get_fsid_from_snapshot_name(self, snapshot_name): """Retrieves the Filesystem ID used by a snapshot. :param snapshot_name: Name of the snapshot :return: ID of the parent filesystem of the snapshot """ url = (f'{self.base_url}/v1/file-systems' f'?select=id&name=eq.{snapshot_name}') res, response = self.execute_powerflex_get_request(url) if res.status_code == 200: return response[0]['id'] def get_storage_pool_spare_percentage(self, storage_pool_id): """Retrieves the spare capacity percentage of the storage pool. :param storage_pool_id: ID of the storage pool :return: Spare capacity percentage of the storage pool """ url = f'{self.host_url}/api/instances/StoragePool::{storage_pool_id}' res, response = self.execute_powerflex_get_request(url) if res.status_code == 200: return response['sparePercentage'] def get_storage_pool_statistic(self, storage_pool_id): """Retrieves the spare capacity percentage of the storage pool. :param storage_pool_id: ID of the storage pool :return: Statistics of the storage pool """ url = (f'{self.host_url}/api/instances/StoragePool::{storage_pool_id}' '/relationships/Statistics') res, response = self.execute_powerflex_get_request(url) if res.status_code == 200: statistics = { "maxCapacityInKb": response['maxCapacityInKb'], "capacityInUseInKb": response['capacityInUseInKb'], "netUnusedCapacityInKb": response['netUnusedCapacityInKb'], "primaryVacInKb": response['primaryVacInKb'], } return statistics def get_nas_server_interfaces(self, nas_server_id): """Retrieves the file interfaces for a given na_server. :param nas_server_id: ID of the NAS server :return: file interfaces of the NAS server """ url = (f'{self.base_url}/v1/file-interfaces' f'?select=ip_address&nas_server_id=eq.{nas_server_id}') res, response = self.execute_powerflex_get_request(url) if res.status_code == 200: return [i['ip_address'] for i in response] ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315601.9056718 manila-21.0.0/manila/share/drivers/dell_emc/plugins/powermax/0000775000175000017500000000000000000000000024166 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/dell_emc/plugins/powermax/__init__.py0000664000175000017500000000000000000000000026265 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/dell_emc/plugins/powermax/connection.py0000664000175000017500000011062100000000000026700 0ustar00zuulzuul00000000000000# Copyright (c) 2019 Dell Inc. or its subsidiaries. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """PowerMax backend for the Dell EMC Manila driver.""" import copy import random from oslo_config import cfg from oslo_log import log from oslo_utils import excutils from oslo_utils import units from manila.common import constants as const from manila import exception from manila.i18n import _ from manila.share.drivers.dell_emc.common.enas import constants from manila.share.drivers.dell_emc.common.enas import utils as enas_utils from manila.share.drivers.dell_emc.plugins import base as driver from manila.share.drivers.dell_emc.plugins.powermax import ( object_manager as manager) from manila.share import utils as share_utils from manila import utils """Version history: 1.0.0 - Initial version 2.0.0 - Implement IPv6 support 3.0.0 - Rebranding to PowerMax 3.1.0 - Access Host details prevents a read-only share mounts (bug #1845147) 3.2.0 - Wrong format of export locations (bug #1871999) 3.3.0 - Victoria release 3.4.0 - Wallaby release 3.5.0 - Xena release """ VERSION = "3.5.0" LOG = log.getLogger(__name__) POWERMAX_OPTS = [ cfg.StrOpt('powermax_server_container', help='Data mover to host the NAS server.'), cfg.ListOpt('powermax_share_data_pools', help='Comma separated list of pools that can be used to ' 'persist share data.'), cfg.ListOpt('powermax_ethernet_ports', help='Comma separated list of ports that can be used for ' 'share server interfaces. Members of the list ' 'can be Unix-style glob expressions.') ] CONF = cfg.CONF CONF.register_opts(POWERMAX_OPTS) @enas_utils.decorate_all_methods(enas_utils.log_enter_exit, debug_only=True) class PowerMaxStorageConnection(driver.StorageConnection): """Implements powermax specific functionality for Dell EMC Manila driver. """ @enas_utils.log_enter_exit def __init__(self, *args, **kwargs): super(PowerMaxStorageConnection, self).__init__(*args, **kwargs) if 'configuration' in kwargs: kwargs['configuration'].append_config_values(POWERMAX_OPTS) self.mover_name = None self.pools = None self.manager = None self.pool_conf = None self.reserved_percentage = None self.reserved_snapshot_percentage = None self.reserved_share_extend_percentage = None self.driver_handles_share_servers = True self.port_conf = None self.ipv6_implemented = True self.dhss_mandatory_security_service_association = { 'nfs': None, 'cifs': ['active_directory', ] } def create_share(self, context, share, share_server=None): """Create a share and export it based on protocol used.""" share_name = share['id'] size = share['size'] * units.Ki share_proto = share['share_proto'].upper() # Validate the share protocol if share_proto not in ('NFS', 'CIFS'): raise exception.InvalidShare( reason=(_('Invalid NAS protocol supplied: %s.') % share_proto)) # Get the pool name from share host field pool_name = share_utils.extract_host(share['host'], level='pool') if not pool_name: message = (_("Pool is not available in the share host %s.") % share['host']) raise exception.InvalidHost(reason=message) # Validate share server self._share_server_validation(share_server) if share_proto == 'CIFS': vdm_name = self._get_share_server_name(share_server) server_name = vdm_name # Check if CIFS server exists. status, server = self._get_context('CIFSServer').get(server_name, vdm_name) if status != constants.STATUS_OK: message = (_("CIFS server %s not found.") % server_name) LOG.error(message) raise exception.EMCPowerMaxXMLAPIError(err=message) self._allocate_container(share_name, size, share_server, pool_name) if share_proto == 'NFS': location = self._create_nfs_share(share_name, share_server) elif share_proto == 'CIFS': location = self._create_cifs_share(share_name, share_server) return [ {'path': location} ] def _share_server_validation(self, share_server): """Validate the share server.""" if not share_server: msg = _('Share server not provided') raise exception.InvalidInput(reason=msg) backend_details = share_server.get('backend_details') vdm = backend_details.get( 'share_server_name') if backend_details else None if vdm is None: message = _("No share server found.") LOG.error(message) raise exception.EMCPowerMaxXMLAPIError(err=message) def _allocate_container(self, share_name, size, share_server, pool_name): """Allocate file system for share.""" vdm_name = self._get_share_server_name(share_server) self._get_context('FileSystem').create( share_name, size, pool_name, vdm_name) def _allocate_container_from_snapshot(self, share, snapshot, share_server, pool_name): """Allocate file system from snapshot.""" vdm_name = self._get_share_server_name(share_server) interconn_id = self._get_context('Mover').get_interconnect_id( self.mover_name, self.mover_name) self._get_context('FileSystem').create_from_snapshot( share['id'], snapshot['id'], snapshot['share_id'], pool_name, vdm_name, interconn_id) nwe_size = share['size'] * units.Ki self._get_context('FileSystem').extend(share['id'], pool_name, nwe_size) @enas_utils.log_enter_exit def _create_cifs_share(self, share_name, share_server): """Create CIFS share.""" vdm_name = self._get_share_server_name(share_server) server_name = vdm_name # Get available CIFS Server and interface (one CIFS server per VDM) status, server = self._get_context('CIFSServer').get(server_name, vdm_name) if 'interfaces' not in server or len(server['interfaces']) == 0: message = (_("CIFS server %s doesn't have interface, " "so the share is inaccessible.") % server['compName']) LOG.error(message) raise exception.EMCPowerMaxXMLAPIError(err=message) interface = enas_utils.export_unc_path(server['interfaces'][0]) self._get_context('CIFSShare').create(share_name, server['name'], vdm_name) self._get_context('CIFSShare').disable_share_access(share_name, vdm_name) location = (r'\\%(interface)s\%(name)s' % {'interface': interface, 'name': share_name}) return location @enas_utils.log_enter_exit def _create_nfs_share(self, share_name, share_server): """Create NFS share.""" vdm_name = self._get_share_server_name(share_server) self._get_context('NFSShare').create(share_name, vdm_name) nfs_if = enas_utils.convert_ipv6_format_if_needed( share_server['backend_details']['nfs_if']) return ('%(nfs_if)s:/%(share_name)s' % {'nfs_if': nfs_if, 'share_name': share_name}) def create_share_from_snapshot(self, context, share, snapshot, share_server=None, parent_share=None): """Create a share from a snapshot - clone a snapshot.""" share_name = share['id'] share_proto = share['share_proto'].upper() # Validate the share protocol if share_proto not in ('NFS', 'CIFS'): raise exception.InvalidShare( reason=(_('Invalid NAS protocol supplied: %s.') % share_proto)) # Get the pool name from share host field pool_name = share_utils.extract_host(share['host'], level='pool') if not pool_name: message = (_("Pool is not available in the share host %s.") % share['host']) raise exception.InvalidHost(reason=message) self._share_server_validation(share_server) self._allocate_container_from_snapshot( share, snapshot, share_server, pool_name) nfs_if = enas_utils.convert_ipv6_format_if_needed( share_server['backend_details']['nfs_if']) if share_proto == 'NFS': self._create_nfs_share(share_name, share_server) location = ('%(nfs_if)s:/%(share_name)s' % {'nfs_if': nfs_if, 'share_name': share_name}) elif share_proto == 'CIFS': location = self._create_cifs_share(share_name, share_server) return [ {'path': location} ] def create_snapshot(self, context, snapshot, share_server=None): """Create snapshot from share.""" share_name = snapshot['share_id'] status, filesystem = self._get_context('FileSystem').get(share_name) if status != constants.STATUS_OK: message = (_("File System %s not found.") % share_name) LOG.error(message) raise exception.EMCPowerMaxXMLAPIError(err=message) pool_id = filesystem['pools_id'][0] self._get_context('Snapshot').create(snapshot['id'], snapshot['share_id'], pool_id) def delete_share(self, context, share, share_server=None): """Delete a share.""" if share_server is None: LOG.warning("Share network should be specified for " "share deletion.") return share_proto = share['share_proto'].upper() if share_proto == 'NFS': self._delete_nfs_share(share, share_server) elif share_proto == 'CIFS': self._delete_cifs_share(share, share_server) else: raise exception.InvalidShare( reason=_('Unsupported share protocol')) @enas_utils.log_enter_exit def _delete_cifs_share(self, share, share_server): """Delete CIFS share.""" vdm_name = self._get_share_server_name(share_server) name = share['id'] self._get_context('CIFSShare').delete(name, vdm_name) self._deallocate_container(name, vdm_name) @enas_utils.log_enter_exit def _delete_nfs_share(self, share, share_server): """Delete NFS share.""" vdm_name = self._get_share_server_name(share_server) name = share['id'] self._get_context('NFSShare').delete(name, vdm_name) self._deallocate_container(name, vdm_name) @enas_utils.log_enter_exit def _deallocate_container(self, share_name, vdm_name): """Delete underneath objects of the share.""" path = '/' + share_name try: # Delete mount point self._get_context('MountPoint').delete(path, vdm_name) except exception.EMCPowerMaxXMLAPIError as e: LOG.exception("CIFS server %(name)s on mover %(mover_name)s " "not found due to error %(err)s. Skip the " "deletion.", {'name': path, 'mover_name': vdm_name, 'err': e.message}) try: # Delete file system self._get_context('FileSystem').delete(share_name) except exception.EMCPowerMaxXMLAPIError as e: LOG.exception("File system %(share_name)s not found due to " "error %(err)s. Skip the deletion.", {'share_name': share_name, 'err': e.message}) def delete_snapshot(self, context, snapshot, share_server=None): """Delete a snapshot.""" self._get_context('Snapshot').delete(snapshot['id']) def ensure_share(self, context, share, share_server=None): """Ensure that the share is exported.""" def extend_share(self, share, new_size, share_server=None): # Get the pool name from share host field pool_name = share_utils.extract_host(share['host'], level='pool') if not pool_name: message = (_("Pool is not available in the share host %s.") % share['host']) raise exception.InvalidHost(reason=message) share_name = share['id'] self._get_context('FileSystem').extend( share_name, pool_name, new_size * units.Ki) def allow_access(self, context, share, access, share_server=None): """Allow access to a share.""" access_level = access['access_level'] if access_level not in const.ACCESS_LEVELS: raise exception.InvalidShareAccessLevel(level=access_level) share_proto = share['share_proto'] if share_proto == 'NFS': self._nfs_allow_access(context, share, access, share_server) elif share_proto == 'CIFS': self._cifs_allow_access(context, share, access, share_server) else: raise exception.InvalidShare( reason=(_('Invalid NAS protocol supplied: %s.') % share_proto)) @enas_utils.log_enter_exit def _cifs_allow_access(self, context, share, access, share_server): """Allow access to CIFS share.""" vdm_name = self._get_share_server_name(share_server) share_name = share['id'] if access['access_type'] != 'user': reason = _('Only user access type allowed for CIFS share') raise exception.InvalidShareAccess(reason=reason) user_name = access['access_to'] access_level = access['access_level'] if access_level == const.ACCESS_LEVEL_RW: cifs_access = constants.CIFS_ACL_FULLCONTROL else: cifs_access = constants.CIFS_ACL_READ # Check if CIFS server exists. server_name = vdm_name status, server = self._get_context('CIFSServer').get(server_name, vdm_name) if status != constants.STATUS_OK: message = (_("CIFS server %s not found.") % server_name) LOG.error(message) raise exception.EMCPowerMaxXMLAPIError(err=message) self._get_context('CIFSShare').allow_share_access( vdm_name, share_name, user_name, server['domain'], access=cifs_access) @enas_utils.log_enter_exit def _nfs_allow_access(self, context, share, access, share_server): """Allow access to NFS share.""" vdm_name = self._get_share_server_name(share_server) access_type = access['access_type'] if access_type != 'ip': reason = _('Only ip access type allowed.') raise exception.InvalidShareAccess(reason=reason) host_ip = access['access_to'] access_level = access['access_level'] self._get_context('NFSShare').allow_share_access( share['id'], host_ip, vdm_name, access_level) def update_access(self, context, share, access_rules, add_rules, delete_rules, share_server=None): # deleting rules for rule in delete_rules: self.deny_access(context, share, rule, share_server) # adding rules for rule in add_rules: self.allow_access(context, share, rule, share_server) # recovery mode if not (add_rules or delete_rules): white_list = [] for rule in access_rules: self.allow_access(context, share, rule, share_server) white_list.append( enas_utils.convert_ipv6_format_if_needed( rule['access_to'])) self.clear_access(share, share_server, white_list) def clear_access(self, share, share_server, white_list): share_proto = share['share_proto'].upper() share_name = share['id'] if share_proto == 'CIFS': self._cifs_clear_access(share_name, share_server, white_list) elif share_proto == 'NFS': self._nfs_clear_access(share_name, share_server, white_list) @enas_utils.log_enter_exit def _cifs_clear_access(self, share_name, share_server, white_list): """Clear access for CIFS share except hosts in the white list.""" vdm_name = self._get_share_server_name(share_server) # Check if CIFS server exists. server_name = vdm_name status, server = self._get_context('CIFSServer').get(server_name, vdm_name) if status != constants.STATUS_OK: message = (_("CIFS server %(server_name)s has issue. " "Detail: %(status)s") % {'server_name': server_name, 'status': status}) raise exception.EMCPowerMaxXMLAPIError(err=message) self._get_context('CIFSShare').clear_share_access( share_name=share_name, mover_name=vdm_name, domain=server['domain'], white_list_users=white_list) @enas_utils.log_enter_exit def _nfs_clear_access(self, share_name, share_server, white_list): """Clear access for NFS share except hosts in the white list.""" self._get_context('NFSShare').clear_share_access( share_name=share_name, mover_name=self._get_share_server_name(share_server), white_list_hosts=white_list) def deny_access(self, context, share, access, share_server=None): """Deny access to a share.""" share_proto = share['share_proto'] if share_proto == 'NFS': self._nfs_deny_access(share, access, share_server) elif share_proto == 'CIFS': self._cifs_deny_access(share, access, share_server) else: raise exception.InvalidShare( reason=_('Unsupported share protocol')) @enas_utils.log_enter_exit def _cifs_deny_access(self, share, access, share_server): """Deny access to CIFS share.""" vdm_name = self._get_share_server_name(share_server) share_name = share['id'] if access['access_type'] != 'user': LOG.warning("Only user access type allowed for CIFS share.") return user_name = access['access_to'] access_level = access['access_level'] if access_level == const.ACCESS_LEVEL_RW: cifs_access = constants.CIFS_ACL_FULLCONTROL else: cifs_access = constants.CIFS_ACL_READ # Check if CIFS server exists. server_name = vdm_name status, server = self._get_context('CIFSServer').get(server_name, vdm_name) if status != constants.STATUS_OK: message = (_("CIFS server %s not found.") % server_name) LOG.error(message) raise exception.EMCPowerMaxXMLAPIError(err=message) self._get_context('CIFSShare').deny_share_access( vdm_name, share_name, user_name, server['domain'], access=cifs_access) @enas_utils.log_enter_exit def _nfs_deny_access(self, share, access, share_server): """Deny access to NFS share.""" vdm_name = self._get_share_server_name(share_server) access_type = access['access_type'] if access_type != 'ip': LOG.warning("Only ip access type allowed.") return host_ip = enas_utils.convert_ipv6_format_if_needed(access['access_to']) self._get_context('NFSShare').deny_share_access(share['id'], host_ip, vdm_name) def check_for_setup_error(self): """Check for setup error.""" # To verify the input from Manila configuration status, out = self._get_context('Mover').get_ref(self.mover_name, True) if constants.STATUS_ERROR == status: message = (_("Could not find Data Mover by name: %s.") % self.mover_name) LOG.error(message) raise exception.InvalidParameterValue(err=message) self.pools = self._get_managed_storage_pools(self.pool_conf) def _get_managed_storage_pools(self, pools): matched_pools = set() if pools: # Get the real pools from the backend storage status, backend_pools = self._get_context('StoragePool').get_all() if status != constants.STATUS_OK: message = (_("Failed to get storage pool information. " "Reason: %s") % backend_pools) LOG.error(message) raise exception.EMCPowerMaxXMLAPIError(err=message) real_pools = set([item for item in backend_pools]) conf_pools = set([item.strip() for item in pools]) matched_pools, unmatched_pools = enas_utils.do_match_any( real_pools, conf_pools) if not matched_pools: msg = (_("None of the specified storage pools to be managed " "exist. Please check your configuration " "emc_nas_pool_names in manila.conf. " "The available pools in the backend are %s.") % ",".join(real_pools)) raise exception.InvalidParameterValue(err=msg) LOG.info("Storage pools: %s will be managed.", ",".join(matched_pools)) else: LOG.debug("No storage pool is specified, so all pools " "in storage system will be managed.") return matched_pools def connect(self, emc_share_driver, context): """Connect to PowerMax NAS server.""" config = emc_share_driver.configuration config.append_config_values(POWERMAX_OPTS) self.mover_name = config.safe_get('powermax_server_container') self.pool_conf = config.safe_get('powermax_share_data_pools') self.reserved_percentage = config.safe_get('reserved_share_percentage') if self.reserved_percentage is None: self.reserved_percentage = 0 self.reserved_snapshot_percentage = config.safe_get( 'reserved_share_from_snapshot_percentage') if self.reserved_snapshot_percentage is None: self.reserved_snapshot_percentage = self.reserved_percentage self.reserved_share_extend_percentage = config.safe_get( 'reserved_share_extend_percentage') if self.reserved_share_extend_percentage is None: self.reserved_share_extend_percentage = self.reserved_percentage self.manager = manager.StorageObjectManager(config) self.port_conf = config.safe_get('powermax_ethernet_ports') def get_managed_ports(self): # Get the real ports(devices) list from the backend storage real_ports = self._get_physical_devices(self.mover_name) if not self.port_conf: LOG.debug("No ports are specified, so any of the ports on the " "Data Mover can be used.") return real_ports matched_ports, unmanaged_ports = enas_utils.do_match_any( real_ports, self.port_conf) if not matched_ports: msg = (_("None of the specified network ports exist. " "Please check your configuration powermax_ethernet_ports " "in manila.conf. The available ports on the Data Mover " "are %s.") % ",".join(real_ports)) raise exception.BadConfigurationException(reason=msg) LOG.debug("Ports: %s can be used.", ",".join(matched_ports)) return list(matched_ports) def update_share_stats(self, stats_dict): """Communicate with EMCNASClient to get the stats.""" stats_dict['driver_version'] = VERSION self._get_context('Mover').get_ref(self.mover_name, True) stats_dict['pools'] = [] status, pools = self._get_context('StoragePool').get_all() for name, pool in pools.items(): if not self.pools or pool['name'] in self.pools: total_size = float(pool['total_size']) used_size = float(pool['used_size']) pool_stat = { 'pool_name': pool['name'], 'total_capacity_gb': enas_utils.mb_to_gb(total_size), 'free_capacity_gb': enas_utils.mb_to_gb(total_size - used_size), 'qos': False, 'reserved_percentage': self.reserved_percentage, 'reserved_snapshot_percentage': self.reserved_snapshot_percentage, 'reserved_share_extend_percentage': self.reserved_share_extend_percentage, 'snapshot_support': True, 'create_share_from_snapshot_support': True, 'revert_to_snapshot_support': False, 'ipv6_support': True } stats_dict['pools'].append(pool_stat) if not stats_dict['pools']: message = _("Failed to update storage pool.") LOG.error(message) raise exception.EMCPowerMaxXMLAPIError(err=message) def get_pool(self, share): """Get the pool name of the share.""" share_name = share['id'] status, filesystem = self._get_context('FileSystem').get(share_name) if status != constants.STATUS_OK: message = (_("File System %(name)s not found. " "Reason: %(err)s") % {'name': share_name, 'err': filesystem}) LOG.error(message) raise exception.EMCPowerMaxXMLAPIError(err=message) pool_id = filesystem['pools_id'][0] # Get the real pools from the backend storage status, backend_pools = self._get_context('StoragePool').get_all() if status != constants.STATUS_OK: message = (_("Failed to get storage pool information. " "Reason: %s") % backend_pools) LOG.error(message) raise exception.EMCPowerMaxXMLAPIError(err=message) for name, pool_info in backend_pools.items(): if pool_info['id'] == pool_id: return name available_pools = [item for item in backend_pools] message = (_("No matched pool name for share: %(share)s. " "Available pools: %(pools)s") % {'share': share_name, 'pools': available_pools}) raise exception.EMCPowerMaxXMLAPIError(err=message) def get_network_allocations_number(self): """Returns number of network allocations for creating VIFs.""" return constants.IP_ALLOCATIONS def setup_server(self, network_info, metadata=None): """Set up and configure share server. Sets up and configures share server with given network parameters. """ # Only support single security service with type 'active_directory' vdm_name = network_info['server_id'] vlan_id = network_info['segmentation_id'] active_directory = None allocated_interfaces = [] if network_info.get('security_services'): is_valid, active_directory = self._get_valid_security_service( network_info['security_services']) if not is_valid: raise exception.EMCPowerMaxXMLAPIError(err=active_directory) try: if not self._vdm_exist(vdm_name): LOG.debug('Share server %s not found, creating ' 'share server...', vdm_name) self._get_context('VDM').create(vdm_name, self.mover_name) devices = self.get_managed_ports() for net_info in network_info['network_allocations']: random.shuffle(devices) ip_version = net_info['ip_version'] interface = { 'name': net_info['id'][-12:], 'device_name': devices[0], 'ip': net_info['ip_address'], 'mover_name': self.mover_name, 'vlan_id': vlan_id if vlan_id else -1, } if ip_version == 6: interface['ip_version'] = ip_version interface['net_mask'] = str( utils.cidr_to_prefixlen( network_info['cidr'])) else: interface['net_mask'] = utils.cidr_to_netmask( network_info['cidr']) self._get_context('MoverInterface').create(interface) allocated_interfaces.append(interface) cifs_interface = allocated_interfaces[0] nfs_interface = allocated_interfaces[1] if active_directory: self._configure_active_directory( active_directory, vdm_name, cifs_interface) self._get_context('VDM').attach_nfs_interface( vdm_name, nfs_interface['name']) return { 'share_server_name': vdm_name, 'cifs_if': cifs_interface['ip'], 'nfs_if': nfs_interface['ip'], } except Exception: with excutils.save_and_reraise_exception(): LOG.exception('Could not setup server') server_details = self._construct_backend_details( vdm_name, allocated_interfaces) self.teardown_server( server_details, network_info['security_services']) def _construct_backend_details(self, vdm_name, interfaces): if_number = len(interfaces) cifs_if = interfaces[0]['ip'] if if_number > 0 else None nfs_if = interfaces[1]['ip'] if if_number > 1 else None return { 'share_server_name': vdm_name, 'cifs_if': cifs_if, 'nfs_if': nfs_if, } @enas_utils.log_enter_exit def _vdm_exist(self, name): status, out = self._get_context('VDM').get(name) if constants.STATUS_OK != status: return False return True def _get_physical_devices(self, mover_name): """Get a proper network device to create interface.""" devices = self._get_context('Mover').get_physical_devices(mover_name) if not devices: message = (_("Could not get physical device port on mover %s.") % self.mover_name) LOG.error(message) raise exception.EMCPowerMaxXMLAPIError(err=message) return devices def _configure_active_directory( self, security_service, vdm_name, interface): domain = security_service['domain'] server = security_service['dns_ip'] self._get_context('DNSDomain').create(self.mover_name, domain, server) cifs_server_args = { 'name': vdm_name, 'interface_ip': interface['ip'], 'domain_name': security_service['domain'], 'user_name': security_service['user'], 'password': security_service['password'], 'mover_name': vdm_name, 'is_vdm': True, } self._get_context('CIFSServer').create(cifs_server_args) def teardown_server(self, server_details, security_services=None): """Teardown share server.""" if not server_details: LOG.debug('Server details are empty.') return vdm_name = server_details.get('share_server_name') if not vdm_name: LOG.debug('No share server found in server details.') return cifs_if = server_details.get('cifs_if') nfs_if = server_details.get('nfs_if') status, vdm = self._get_context('VDM').get(vdm_name) if constants.STATUS_OK != status: LOG.debug('Share server %s not found.', vdm_name) return interfaces = self._get_context('VDM').get_interfaces(vdm_name) for if_name in interfaces['nfs']: self._get_context('VDM').detach_nfs_interface(vdm_name, if_name) if security_services: # Only support single security service with type 'active_directory' is_valid, active_directory = self._get_valid_security_service( security_services) if is_valid: status, servers = self._get_context('CIFSServer').get_all( vdm_name) if constants.STATUS_OK != status: LOG.error('Could not find CIFS server by name: %s.', vdm_name) else: cifs_servers = copy.deepcopy(servers) for name, server in cifs_servers.items(): # Unjoin CIFS Server from domain cifs_server_args = { 'name': server['name'], 'join_domain': False, 'user_name': active_directory['user'], 'password': active_directory['password'], 'mover_name': vdm_name, 'is_vdm': True, } try: self._get_context('CIFSServer').modify( cifs_server_args) except exception.EMCPowerMaxXMLAPIError as expt: LOG.debug("Failed to modify CIFS server " "%(server)s. Reason: %(err)s.", {'server': server, 'err': expt}) self._get_context('CIFSServer').delete(name, vdm_name) # Delete interface from Data Mover if cifs_if: self._get_context('MoverInterface').delete(cifs_if, self.mover_name) if nfs_if: self._get_context('MoverInterface').delete(nfs_if, self.mover_name) # Delete Virtual Data Mover self._get_context('VDM').delete(vdm_name) def _get_valid_security_service(self, security_services): """Validate security services and return a supported security service. :param security_services: :returns: (, ) -- is true to indicate security_services includes zero or single security service for active directory. Otherwise, it would return false. return error message when is false. Otherwise, it will return zero or single security service for active directory. """ # Only support single security service with type 'active_directory' if (len(security_services) > 1 or (security_services and security_services[0]['type'] != 'active_directory')): return False, _("Unsupported security services. " "Only support single security service and " "only support type 'active_directory'") return True, security_services[0] def _get_share_server_name(self, share_server): try: return share_server['backend_details']['share_server_name'] except Exception: LOG.debug("Didn't get share server name from share_server %s.", share_server) return share_server['id'] def _get_context(self, context_type): return self.manager.getStorageContext(context_type) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/dell_emc/plugins/powermax/object_manager.py0000664000175000017500000023203400000000000027504 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Dell Inc. or its subsidiaries. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import re from lxml import builder from lxml import etree as ET from oslo_concurrency import processutils from oslo_log import log from manila.common import constants as const from manila import exception from manila.i18n import _ from manila.share.drivers.dell_emc.common.enas import connector from manila.share.drivers.dell_emc.common.enas import constants from manila.share.drivers.dell_emc.common.enas import utils as powermax_utils from manila.share.drivers.dell_emc.common.enas import xml_api_parser as parser from manila import utils LOG = log.getLogger(__name__) @powermax_utils.decorate_all_methods(powermax_utils.log_enter_exit, debug_only=True) class StorageObjectManager(object): def __init__(self, configuration): self.context = {} self.connectors = {} self.connectors['XML'] = connector.XMLAPIConnector(configuration) self.connectors['SSH'] = connector.SSHConnector(configuration) elt_maker = builder.ElementMaker(nsmap={None: constants.XML_NAMESPACE}) xml_parser = parser.XMLAPIParser() obj_types = StorageObject.__subclasses__() # pylint: disable=no-member for item in obj_types: key = item.__name__ self.context[key] = eval(key)(self.connectors, elt_maker, xml_parser, self) def getStorageContext(self, type): if type in self.context: return self.context[type] else: message = (_("Invalid storage object type %s.") % type) LOG.error(message) raise exception.EMCPowerMaxXMLAPIError(err=message) class StorageObject(object): def __init__(self, conn, elt_maker, xml_parser, manager): self.conn = conn self.elt_maker = elt_maker self.xml_parser = xml_parser self.manager = manager self.xml_retry = False self.ssh_retry_patterns = [ ( constants.SSH_DEFAULT_RETRY_PATTERN, exception.EMCPowerMaxLockRequiredException() ), ] def _translate_response(self, response): """Translate different status to ok/error status.""" if (constants.STATUS_OK == response['maxSeverity'] or constants.STATUS_ERROR == response['maxSeverity']): return old_Severity = response['maxSeverity'] if response['maxSeverity'] in (constants.STATUS_DEBUG, constants.STATUS_INFO): response['maxSeverity'] = constants.STATUS_OK LOG.warning("Translated status from %(old)s to %(new)s. " "Message: %(info)s.", {'old': old_Severity, 'new': response['maxSeverity'], 'info': response}) def _response_validation(self, response, error_code): """Validates whether a response includes a certain error code.""" msg_codes = self._get_problem_message_codes(response['problems']) for code in msg_codes: if code == error_code: return True return False def _get_problem_message_codes(self, problems): message_codes = [] for problem in problems: if 'messageCode' in problem: message_codes.append(problem['messageCode']) return message_codes def _get_problem_messages(self, problems): messages = [] for problem in problems: if 'message' in problem: messages.append(problem['message']) return messages def _get_problem_diags(self, problems): diags = [] for problem in problems: if 'Diagnostics' in problem: diags.append(problem['Diagnostics']) return diags def _build_query_package(self, body): return self.elt_maker.RequestPacket( self.elt_maker.Request( self.elt_maker.Query(body) ) ) def _build_task_package(self, body): return self.elt_maker.RequestPacket( self.elt_maker.Request( self.elt_maker.StartTask(body, timeout='300') ) ) @utils.retry(retry_param=exception.EMCPowerMaxLockRequiredException) def _send_request(self, req): req_xml = constants.XML_HEADER + ET.tostring(req).decode('utf-8') rsp_xml = self.conn['XML'].request(str(req_xml)) response = self.xml_parser.parse(rsp_xml) self._translate_response(response) if (response['maxSeverity'] != constants.STATUS_OK and self._response_validation(response, constants.MSG_CODE_RETRY)): raise exception.EMCPowerMaxLockRequiredException return response @utils.retry(retry_param=exception.EMCPowerMaxLockRequiredException) def _execute_cmd(self, cmd, retry_patterns=None, check_exit_code=False): """Execute NAS command via SSH. :param retry_patterns: list of tuples,where each tuple contains a reg expression and an exception. :param check_exit_code: Boolean. Raise processutils.ProcessExecutionError if the command failed to execute and this parameter is set to True. """ if retry_patterns is None: retry_patterns = self.ssh_retry_patterns try: out, err = self.conn['SSH'].run_ssh(cmd, check_exit_code) except processutils.ProcessExecutionError as e: for pattern in retry_patterns: if re.search(pattern[0], e.stdout): raise pattern[1] raise return out, err def _copy_properties(self, source, target, property_map, deep_copy=True): for prop in property_map: if isinstance(prop, tuple): target_key, src_key = prop else: target_key = src_key = prop if src_key in source: if deep_copy and isinstance(source[src_key], list): target[target_key] = copy.deepcopy(source[src_key]) else: target[target_key] = source[src_key] else: target[target_key] = None def _get_mover_id(self, mover_name, is_vdm): if is_vdm: return self.get_context('VDM').get_id(mover_name) else: return self.get_context('Mover').get_id(mover_name, self.xml_retry) def get_context(self, type): return self.manager.getStorageContext(type) @powermax_utils.decorate_all_methods(powermax_utils.log_enter_exit, debug_only=True) class FileSystem(StorageObject): def __init__(self, conn, elt_maker, xml_parser, manager): super(FileSystem, self).__init__(conn, elt_maker, xml_parser, manager) self.filesystem_map = {} @utils.retry(retry_param=exception.EMCPowerMaxInvalidMoverID) def create(self, name, size, pool_name, mover_name, is_vdm=True): pool_id = self.get_context('StoragePool').get_id(pool_name) mover_id = self._get_mover_id(mover_name, is_vdm) if is_vdm: mover = self.elt_maker.Vdm(vdm=mover_id) else: mover = self.elt_maker.Mover(mover=mover_id) if self.xml_retry: self.xml_retry = False request = self._build_task_package( self.elt_maker.NewFileSystem( mover, self.elt_maker.StoragePool( pool=pool_id, size=str(size), mayContainSlices='true' ), name=name ) ) response = self._send_request(request) if (self._response_validation(response, constants.MSG_INVALID_MOVER_ID) and not self.xml_retry): self.xml_retry = True raise exception.EMCPowerMaxInvalidMoverID(id=mover_id) elif self._response_validation( response, constants.MSG_FILESYSTEM_EXIST): LOG.warning("File system %s already exists. " "Skip the creation.", name) return elif constants.STATUS_OK != response['maxSeverity']: message = (_("Failed to create file system %(name)s. " "Reason: %(err)s.") % {'name': name, 'err': response['problems']}) LOG.error(message) raise exception.EMCPowerMaxXMLAPIError(err=message) def get(self, name): if name not in self.filesystem_map: request = self._build_query_package( self.elt_maker.FileSystemQueryParams( self.elt_maker.AspectSelection( fileSystems='true', fileSystemCapacityInfos='true' ), self.elt_maker.Alias(name=name) ) ) response = self._send_request(request) if constants.STATUS_OK != response['maxSeverity']: if self._is_filesystem_nonexistent(response): return constants.STATUS_NOT_FOUND, response['problems'] else: return response['maxSeverity'], response['problems'] if not response['objects']: return constants.STATUS_NOT_FOUND, response['problems'] src = response['objects'][0] filesystem = {} property_map = ( 'name', ('pools_id', 'storagePools'), ('volume_id', 'volume'), ('size', 'volumeSize'), ('id', 'fileSystem'), 'type', 'dataServicePolicies', ) self._copy_properties(src, filesystem, property_map) self.filesystem_map[name] = filesystem return constants.STATUS_OK, self.filesystem_map[name] def delete(self, name): status, out = self.get(name) if constants.STATUS_NOT_FOUND == status: LOG.warning("File system %s not found. Skip the deletion.", name) return elif constants.STATUS_OK != status: message = (_("Failed to get file system by name %(name)s. " "Reason: %(err)s.") % {'name': name, 'err': out}) LOG.error(message) raise exception.EMCPowerMaxXMLAPIError(err=message) enas_id = self.filesystem_map[name]['id'] request = self._build_task_package( self.elt_maker.DeleteFileSystem(fileSystem=enas_id) ) response = self._send_request(request) if constants.STATUS_OK != response['maxSeverity']: message = (_("Failed to delete file system %(name)s. " "Reason: %(err)s.") % {'name': name, 'err': response['problems']}) LOG.error(message) raise exception.EMCPowerMaxXMLAPIError(err=message) self.filesystem_map.pop(name) def extend(self, name, pool_name, new_size): status, out = self.get(name) if constants.STATUS_OK != status: message = (_("Failed to get file system by name %(name)s. " "Reason: %(err)s.") % {'name': name, 'err': out}) LOG.error(message) raise exception.EMCPowerMaxXMLAPIError(err=message) enas_id = out['id'] size = int(out['size']) if new_size < size: message = (_("Failed to extend file system %(name)s because new " "size %(new_size)d is smaller than old size " "%(size)d.") % {'name': name, 'new_size': new_size, 'size': size}) LOG.error(message) raise exception.EMCPowerMaxXMLAPIError(err=message) elif new_size == size: return pool_id = self.get_context('StoragePool').get_id(pool_name) request = self._build_task_package( self.elt_maker.ExtendFileSystem( self.elt_maker.StoragePool( pool=pool_id, size=str(new_size - size) ), fileSystem=enas_id, ) ) response = self._send_request(request) if constants.STATUS_OK != response['maxSeverity']: message = (_("Failed to extend file system %(name)s to new size " "%(new_size)d. Reason: %(err)s.") % {'name': name, 'new_size': new_size, 'err': response['problems']}) LOG.error(message) raise exception.EMCPowerMaxXMLAPIError(err=message) def get_id(self, name): status, out = self.get(name) if constants.STATUS_OK != status: message = (_("Failed to get file system by name %(name)s. " "Reason: %(err)s.") % {'name': name, 'err': out}) LOG.error(message) raise exception.EMCPowerMaxXMLAPIError(err=message) return self.filesystem_map[name]['id'] def _is_filesystem_nonexistent(self, response): """Translate different status to ok/error status.""" msg_codes = self._get_problem_message_codes(response['problems']) diags = self._get_problem_diags(response['problems']) for code, diagnose in zip(msg_codes, diags): if (code == constants.MSG_FILESYSTEM_NOT_FOUND and diagnose.find('File system not found.') != -1): return True return False def create_from_snapshot(self, name, snap_name, source_fs_name, pool_name, mover_name, connect_id): create_fs_cmd = [ 'env', 'NAS_DB=/nas', '/nas/bin/nas_fs', '-name', name, '-type', 'uxfs', '-create', 'samesize=' + source_fs_name, 'pool=%s' % pool_name, 'storage=SINGLE', 'worm=off', '-thin', 'no', '-option', 'slice=y', ] self._execute_cmd(create_fs_cmd) ro_mount_cmd = [ 'env', 'NAS_DB=/nas', '/nas/bin/server_mount', mover_name, '-option', 'ro', name, '/%s' % name, ] self._execute_cmd(ro_mount_cmd) session_name = name + ':' + snap_name copy_ckpt_cmd = [ 'env', 'NAS_DB=/nas', '/nas/bin/nas_copy', '-name', session_name[0:63], '-source', '-ckpt', snap_name, '-destination', '-fs', name, '-interconnect', 'id=%s' % connect_id, '-overwrite_destination', '-full_copy', ] try: self._execute_cmd(copy_ckpt_cmd, check_exit_code=True) except processutils.ProcessExecutionError as expt: LOG.error("Failed to copy content from snapshot %(snap)s to " "file system %(filesystem)s. Reason: %(err)s.", {'snap': snap_name, 'filesystem': name, 'err': expt}) # When an error happens during nas_copy, we need to continue # deleting the checkpoint of the target file system if it exists. query_fs_cmd = [ 'env', 'NAS_DB=/nas', '/nas/bin/nas_fs', '-info', name, ] out, err = self._execute_cmd(query_fs_cmd) re_ckpts = r'ckpts\s*=\s*(.*)\s*' m = re.search(re_ckpts, out) if m is not None: ckpts = m.group(1) for ckpt in re.split(',', ckpts): umount_ckpt_cmd = [ 'env', 'NAS_DB=/nas', '/nas/bin/server_umount', mover_name, '-perm', ckpt, ] self._execute_cmd(umount_ckpt_cmd) delete_ckpt_cmd = [ 'env', 'NAS_DB=/nas', '/nas/bin/nas_fs', '-delete', ckpt, '-Force', ] self._execute_cmd(delete_ckpt_cmd) rw_mount_cmd = [ 'env', 'NAS_DB=/nas', '/nas/bin/server_mount', mover_name, '-option', 'rw', name, '/%s' % name, ] self._execute_cmd(rw_mount_cmd) @powermax_utils.decorate_all_methods(powermax_utils.log_enter_exit, debug_only=True) class StoragePool(StorageObject): def __init__(self, conn, elt_maker, xml_parser, manager): super(StoragePool, self).__init__(conn, elt_maker, xml_parser, manager) self.pool_map = {} def get(self, name, force=False): if name not in self.pool_map or force: status, out = self.get_all() if constants.STATUS_OK != status: return status, out if name not in self.pool_map: return constants.STATUS_NOT_FOUND, None return constants.STATUS_OK, self.pool_map[name] def get_all(self): self.pool_map.clear() request = self._build_query_package( self.elt_maker.StoragePoolQueryParams() ) response = self._send_request(request) if constants.STATUS_OK != response['maxSeverity']: return response['maxSeverity'], response['problems'] if not response['objects']: return constants.STATUS_NOT_FOUND, response['problems'] for item in response['objects']: pool = {} property_map = ( 'name', ('movers_id', 'movers'), ('total_size', 'autoSize'), ('used_size', 'usedSize'), 'diskType', 'dataServicePolicies', ('id', 'pool'), ) self._copy_properties(item, pool, property_map) self.pool_map[item['name']] = pool return constants.STATUS_OK, self.pool_map def get_id(self, name): status, out = self.get(name) if constants.STATUS_OK != status: message = (_("Failed to get storage pool by name %(name)s. " "Reason: %(err)s.") % {'name': name, 'err': out}) LOG.error(message) raise exception.EMCPowerMaxXMLAPIError(err=message) return out['id'] @powermax_utils.decorate_all_methods(powermax_utils.log_enter_exit, debug_only=True) class MountPoint(StorageObject): def __init__(self, conn, elt_maker, xml_parser, manager): super(MountPoint, self).__init__(conn, elt_maker, xml_parser, manager) @utils.retry(retry_param=exception.EMCPowerMaxInvalidMoverID) def create(self, mount_path, fs_name, mover_name, is_vdm=True): fs_id = self.get_context('FileSystem').get_id(fs_name) mover_id = self._get_mover_id(mover_name, is_vdm) if self.xml_retry: self.xml_retry = False request = self._build_task_package( self.elt_maker.NewMount( self.elt_maker.MoverOrVdm( mover=mover_id, moverIdIsVdm='true' if is_vdm else 'false', ), fileSystem=fs_id, path=mount_path ) ) response = self._send_request(request) if (self._response_validation(response, constants.MSG_INVALID_MOVER_ID) and not self.xml_retry): self.xml_retry = True raise exception.EMCPowerMaxInvalidMoverID(id=mover_id) elif self._is_mount_point_already_existent(response): LOG.warning("Mount Point %(mount)s already exists. " "Skip the creation.", {'mount': mount_path}) return elif constants.STATUS_OK != response['maxSeverity']: message = (_('Failed to create Mount Point %(mount)s for ' 'file system %(fs_name)s. Reason: %(err)s.') % {'mount': mount_path, 'fs_name': fs_name, 'err': response['problems']}) LOG.error(message) raise exception.EMCPowerMaxXMLAPIError(err=message) @utils.retry(retry_param=exception.EMCPowerMaxInvalidMoverID) def get(self, mover_name, is_vdm=True): mover_id = self._get_mover_id(mover_name, is_vdm) if self.xml_retry: self.xml_retry = False request = self._build_query_package( self.elt_maker.MountQueryParams( self.elt_maker.MoverOrVdm( mover=mover_id, moverIdIsVdm='true' if is_vdm else 'false' ) ) ) response = self._send_request(request) if (self._response_validation(response, constants.MSG_INVALID_MOVER_ID) and not self.xml_retry): self.xml_retry = True raise exception.EMCPowerMaxInvalidMoverID(id=mover_id) elif constants.STATUS_OK != response['maxSeverity']: return response['maxSeverity'], response['objects'] if not response['objects']: return constants.STATUS_NOT_FOUND, None else: return constants.STATUS_OK, response['objects'] @utils.retry(retry_param=exception.EMCPowerMaxInvalidMoverID) def delete(self, mount_path, mover_name, is_vdm=True): mover_id = self._get_mover_id(mover_name, is_vdm) if self.xml_retry: self.xml_retry = False request = self._build_task_package( self.elt_maker.DeleteMount( mover=mover_id, moverIdIsVdm='true' if is_vdm else 'false', path=mount_path ) ) response = self._send_request(request) if (self._response_validation(response, constants.MSG_INVALID_MOVER_ID) and not self.xml_retry): self.xml_retry = True raise exception.EMCPowerMaxInvalidMoverID(id=mover_id) elif self._is_mount_point_nonexistent(response): LOG.warning('Mount point %(mount)s on mover %(mover_name)s ' 'not found.', {'mount': mount_path, 'mover_name': mover_name}) return elif constants.STATUS_OK != response['maxSeverity']: message = (_('Failed to delete mount point %(mount)s on mover ' '%(mover_name)s. Reason: %(err)s.') % {'mount': mount_path, 'mover_name': mover_name, 'err': response}) LOG.error(message) raise exception.EMCPowerMaxXMLAPIError(err=message) def _is_mount_point_nonexistent(self, response): """Translate different status to ok/error status.""" msg_codes = self._get_problem_message_codes(response['problems']) message = self._get_problem_messages(response['problems']) for code, msg in zip(msg_codes, message): if ((code == constants.MSG_GENERAL_ERROR and msg.find( 'No such path or invalid operation') != -1) or code == constants.MSG_INVALID_VDM_ID or code == constants.MSG_INVALID_MOVER_ID): return True return False def _is_mount_point_already_existent(self, response): """Translate different status to ok/error status.""" msg_codes = self._get_problem_message_codes(response['problems']) message = self._get_problem_messages(response['problems']) for code, msg in zip(msg_codes, message): if ((code == constants.MSG_GENERAL_ERROR and msg.find( 'Mount already exists') != -1)): return True return False @powermax_utils.decorate_all_methods(powermax_utils.log_enter_exit, debug_only=True) class Mover(StorageObject): def __init__(self, conn, elt_maker, xml_parser, manager): super(Mover, self).__init__(conn, elt_maker, xml_parser, manager) self.mover_map = {} self.mover_ref_map = {} def get_ref(self, name, force=False): if name not in self.mover_ref_map or force: self.mover_ref_map.clear() request = self._build_query_package( self.elt_maker.MoverQueryParams( self.elt_maker.AspectSelection(movers='true') ) ) response = self._send_request(request) if constants.STATUS_ERROR == response['maxSeverity']: return response['maxSeverity'], response['problems'] for item in response['objects']: mover = {} property_map = ('name', ('id', 'mover')) self._copy_properties(item, mover, property_map) if mover: self.mover_ref_map[mover['name']] = mover if (name not in self.mover_ref_map or self.mover_ref_map[name]['id'] == ''): return constants.STATUS_NOT_FOUND, None return constants.STATUS_OK, self.mover_ref_map[name] def get(self, name, force=False): if name not in self.mover_map or force: if name in self.mover_ref_map and not force: mover_id = self.mover_ref_map[name]['id'] else: mover_id = self.get_id(name, force) if name in self.mover_map: self.mover_map.pop(name) request = self._build_query_package( self.elt_maker.MoverQueryParams( self.elt_maker.AspectSelection( moverDeduplicationSettings='true', moverDnsDomains='true', moverInterfaces='true', moverNetworkDevices='true', moverNisDomains='true', moverRoutes='true', movers='true', moverStatuses='true' ), mover=mover_id ) ) response = self._send_request(request) if constants.STATUS_ERROR == response['maxSeverity']: return response['maxSeverity'], response['problems'] if not response['objects']: return constants.STATUS_NOT_FOUND, response['problems'] mover = {} src = response['objects'][0] property_map = ( 'name', ('id', 'mover'), ('Status', 'maxSeverity'), 'version', 'uptime', 'role', ('interfaces', 'MoverInterface'), ('devices', 'LogicalNetworkDevice'), ('dns_domain', 'MoverDnsDomain'), ) self._copy_properties(src, mover, property_map) internal_devices = [] if mover['interfaces']: for interface in mover['interfaces']: if self._is_internal_device(interface['device']): internal_devices.append(interface) mover['interfaces'] = [var for var in mover['interfaces'] if var not in internal_devices] self.mover_map[name] = mover return constants.STATUS_OK, self.mover_map[name] def get_id(self, name, force=False): status, mover_ref = self.get_ref(name, force) if constants.STATUS_OK != status: message = (_("Failed to get mover by name %(name)s.") % {'name': name}) LOG.error(message) raise exception.EMCPowerMaxXMLAPIError(err=message) return mover_ref['id'] def _is_internal_device(self, device): for device_type in ('mge', 'fxg', 'tks', 'fsn'): if device.find(device_type) == 0: return True return False def get_interconnect_id(self, source, destination): header = [ 'id', 'name', 'source_server', 'destination_system', 'destination_server', ] conn_id = None command_nas_cel = [ 'env', 'NAS_DB=/nas', '/nas/bin/nas_cel', '-interconnect', '-l', ] out, err = self._execute_cmd(command_nas_cel) lines = out.strip().split('\n') for line in lines: if line.strip().split() == header: LOG.info('Found the header of the command ' '/nas/bin/nas_cel -interconnect -l.') else: interconn = line.strip().split() if interconn[2] == source and interconn[4] == destination: conn_id = interconn[0] return conn_id def get_physical_devices(self, mover_name): physical_network_devices = [] cmd_sysconfig = [ 'env', 'NAS_DB=/nas', '/nas/bin/server_sysconfig', mover_name, '-pci' ] out, err = self._execute_cmd(cmd_sysconfig) re_pattern = (r'0:\s*(?P\S+)\s*IRQ:\s*(?P\d+)\n' r'.*\n' r'\s*Link:\s*(?P[A-Za-z]+)') for device in re.finditer(re_pattern, out): if 'Up' in device.group('link'): physical_network_devices.append(device.group('name')) return physical_network_devices @powermax_utils.decorate_all_methods(powermax_utils.log_enter_exit, debug_only=True) class VDM(StorageObject): def __init__(self, conn, elt_maker, xml_parser, manager): super(VDM, self).__init__(conn, elt_maker, xml_parser, manager) self.vdm_map = {} @utils.retry(retry_param=exception.EMCPowerMaxInvalidMoverID) def create(self, name, mover_name): mover_id = self._get_mover_id(mover_name, False) if self.xml_retry: self.xml_retry = False request = self._build_task_package( self.elt_maker.NewVdm(mover=mover_id, name=name) ) response = self._send_request(request) if (self._response_validation(response, constants.MSG_INVALID_MOVER_ID) and not self.xml_retry): self.xml_retry = True raise exception.EMCPowerMaxInvalidMoverID(id=mover_id) elif self._response_validation(response, constants.MSG_VDM_EXIST): LOG.warning("VDM %(name)s already exists. Skip the creation.", {'name': name}) elif constants.STATUS_OK != response['maxSeverity']: message = (_("Failed to create VDM %(name)s on mover " "%(mover_name)s. Reason: %(err)s.") % {'name': name, 'mover_name': mover_name, 'err': response['problems']}) LOG.error(message) raise exception.EMCPowerMaxXMLAPIError(err=message) def get(self, name): if name not in self.vdm_map: request = self._build_query_package( self.elt_maker.VdmQueryParams() ) response = self._send_request(request) if constants.STATUS_OK != response['maxSeverity']: return response['maxSeverity'], response['problems'] elif not response['objects']: return constants.STATUS_NOT_FOUND, response['problems'] for item in response['objects']: vdm = {} property_map = ( 'name', ('id', 'vdm'), 'state', ('host_mover_id', 'mover'), ('interfaces', 'Interfaces'), ) self._copy_properties(item, vdm, property_map) self.vdm_map[item['name']] = vdm if name not in self.vdm_map: return constants.STATUS_NOT_FOUND, None return constants.STATUS_OK, self.vdm_map[name] def delete(self, name): status, out = self.get(name) if constants.STATUS_NOT_FOUND == status: LOG.warning("VDM %s not found. Skip the deletion.", name) return elif constants.STATUS_OK != status: message = (_("Failed to get VDM by name %(name)s. " "Reason: %(err)s.") % {'name': name, 'err': out}) LOG.error(message) raise exception.EMCPowerMaxXMLAPIError(err=message) vdm_id = self.vdm_map[name]['id'] request = self._build_task_package( self.elt_maker.DeleteVdm(vdm=vdm_id) ) response = self._send_request(request) if constants.STATUS_OK != response['maxSeverity']: message = (_("Failed to delete VDM %(name)s. " "Reason: %(err)s.") % {'name': name, 'err': response['problems']}) LOG.error(message) raise exception.EMCPowerMaxXMLAPIError(err=message) self.vdm_map.pop(name) def get_id(self, name): status, vdm = self.get(name) if constants.STATUS_OK != status: message = (_("Failed to get VDM by name %(name)s.") % {'name': name}) LOG.error(message) raise exception.EMCPowerMaxXMLAPIError(err=message) return vdm['id'] def attach_nfs_interface(self, vdm_name, if_name): command_attach_nfs_interface = [ 'env', 'NAS_DB=/nas', '/nas/bin/nas_server', '-vdm', vdm_name, '-attach', if_name, ] self._execute_cmd(command_attach_nfs_interface) def detach_nfs_interface(self, vdm_name, if_name): command_detach_nfs_interface = [ 'env', 'NAS_DB=/nas', '/nas/bin/nas_server', '-vdm', vdm_name, '-detach', if_name, ] try: self._execute_cmd(command_detach_nfs_interface, check_exit_code=True) except processutils.ProcessExecutionError: interfaces = self.get_interfaces(vdm_name) if if_name not in interfaces['nfs']: LOG.debug("Failed to detach interface %(interface)s " "from mover %(mover_name)s.", {'interface': if_name, 'mover_name': vdm_name}) else: message = (_("Failed to detach interface %(interface)s " "from mover %(mover_name)s.") % {'interface': if_name, 'mover_name': vdm_name}) LOG.exception(message) raise exception.EMCPowerMaxXMLAPIError(err=message) def get_interfaces(self, vdm_name): interfaces = { 'cifs': [], 'nfs': [], } re_pattern = (r'Interfaces to services mapping:' r'\s*(?P(\s*interface=.*)*)') command_get_interfaces = [ 'env', 'NAS_DB=/nas', '/nas/bin/nas_server', '-i', '-vdm', vdm_name, ] out, err = self._execute_cmd(command_get_interfaces) m = re.search(re_pattern, out) if m: if_list = m.group('interfaces').split('\n') for i in if_list: m_if = re.search(r'\s*interface=(?P.*)\s*:' r'\s*(?P.*)\s*', i) if m_if: if_name = m_if.group('if').strip() if 'cifs' == m_if.group('type') and if_name != '': interfaces['cifs'].append(if_name) elif (m_if.group('type') in ('vdm', 'nfs') and if_name != ''): interfaces['nfs'].append(if_name) return interfaces @powermax_utils.decorate_all_methods(powermax_utils.log_enter_exit, debug_only=True) class Snapshot(StorageObject): def __init__(self, conn, elt_maker, xml_parser, manager): super(Snapshot, self).__init__(conn, elt_maker, xml_parser, manager) self.snap_map = {} def create(self, name, fs_name, pool_id, ckpt_size=None): fs_id = self.get_context('FileSystem').get_id(fs_name) if ckpt_size: elt_pool = self.elt_maker.StoragePool( pool=pool_id, size=str(ckpt_size) ) else: elt_pool = self.elt_maker.StoragePool(pool=pool_id) new_ckpt = self.elt_maker.NewCheckpoint( self.elt_maker.SpaceAllocationMethod( elt_pool ), checkpointOf=fs_id, name=name ) request = self._build_task_package(new_ckpt) response = self._send_request(request) if self._response_validation(response, constants.MSG_SNAP_EXIST): LOG.warning("Snapshot %(name)s already exists. " "Skip the creation.", {'name': name}) elif constants.STATUS_OK != response['maxSeverity']: message = (_("Failed to create snapshot %(name)s on " "filesystem %(fs_name)s. Reason: %(err)s.") % {'name': name, 'fs_name': fs_name, 'err': response['problems']}) LOG.error(message) raise exception.EMCPowerMaxXMLAPIError(err=message) def get(self, name): if name not in self.snap_map: request = self._build_query_package( self.elt_maker.CheckpointQueryParams( self.elt_maker.Alias(name=name) ) ) response = self._send_request(request) if constants.STATUS_OK != response['maxSeverity']: return response['maxSeverity'], response['problems'] if not response['objects']: return constants.STATUS_NOT_FOUND, response['problems'] src = response['objects'][0] snap = {} property_map = ( 'name', ('id', 'checkpoint'), 'checkpointOf', 'state', ) self._copy_properties(src, snap, property_map) self.snap_map[name] = snap return constants.STATUS_OK, self.snap_map[name] def delete(self, name): status, out = self.get(name) if constants.STATUS_NOT_FOUND == status: LOG.warning("Snapshot %s not found. Skip the deletion.", name) return elif constants.STATUS_OK != status: message = (_("Failed to get snapshot by name %(name)s. " "Reason: %(err)s.") % {'name': name, 'err': out}) LOG.error(message) raise exception.EMCPowerMaxXMLAPIError(err=message) chpt_id = self.snap_map[name]['id'] request = self._build_task_package( self.elt_maker.DeleteCheckpoint(checkpoint=chpt_id) ) response = self._send_request(request) if constants.STATUS_OK != response['maxSeverity']: message = (_("Failed to delete snapshot %(name)s. " "Reason: %(err)s.") % {'name': name, 'err': response['problems']}) LOG.error(message) raise exception.EMCPowerMaxXMLAPIError(err=message) self.snap_map.pop(name) def get_id(self, name): status, out = self.get(name) if constants.STATUS_OK != status: message = (_("Failed to get snapshot by %(name)s. " "Reason: %(err)s.") % {'name': name, 'err': out}) LOG.error(message) raise exception.EMCPowerMaxXMLAPIError(err=message) return self.snap_map[name]['id'] @powermax_utils.decorate_all_methods(powermax_utils.log_enter_exit, debug_only=True) class MoverInterface(StorageObject): def __init__(self, conn, elt_maker, xml_parser, manager): super(MoverInterface, self).__init__(conn, elt_maker, xml_parser, manager) @utils.retry(retry_param=exception.EMCPowerMaxInvalidMoverID) def create(self, interface): # Maximum of 32 characters for mover interface name name = interface['name'] if len(name) > 32: name = name[0:31] device_name = interface['device_name'] ip_addr = interface['ip'] mover_name = interface['mover_name'] net_mask = interface['net_mask'] vlan_id = interface['vlan_id'] if interface['vlan_id'] else -1 mover_id = self._get_mover_id(mover_name, False) params = dict(device=device_name, ipAddress=str(ip_addr), mover=mover_id, name=name, netMask=net_mask, vlanid=str(vlan_id)) if interface.get('ip_version') == 6: params['ipVersion'] = 'IPv6' if self.xml_retry: self.xml_retry = False request = self._build_task_package( self.elt_maker.NewMoverInterface(**params)) response = self._send_request(request) if (self._response_validation(response, constants.MSG_INVALID_MOVER_ID) and not self.xml_retry): self.xml_retry = True raise exception.EMCPowerMaxInvalidMoverID(id=mover_id) elif self._response_validation( response, constants.MSG_INTERFACE_NAME_EXIST): LOG.warning("Mover interface name %s already exists. " "Skip the creation.", name) elif self._response_validation( response, constants.MSG_INTERFACE_EXIST): LOG.warning("Mover interface IP %s already exists. " "Skip the creation.", ip_addr) elif self._response_validation( response, constants.MSG_INTERFACE_INVALID_VLAN_ID): # When fail to create a mover interface with the specified # vlan id, PowerMax will leave an interface with vlan id 0 in the # backend. So we should explicitly remove the interface. try: self.delete(str(ip_addr), mover_name) except exception.EMCPowerMaxXMLAPIError: pass message = (_("Invalid vlan id %s. Other interfaces on this " "subnet are in a different vlan.") % vlan_id) LOG.error(message) raise exception.EMCPowerMaxXMLAPIError(err=message) elif constants.STATUS_OK != response['maxSeverity']: message = (_("Failed to create mover interface %(interface)s. " "Reason: %(err)s.") % {'interface': interface, 'err': response['problems']}) LOG.error(message) raise exception.EMCPowerMaxXMLAPIError(err=message) def get(self, name, mover_name): # Maximum of 32 characters for mover interface name if len(name) > 32: name = name[0:31] status, mover = self.manager.getStorageContext('Mover').get( mover_name, True) if constants.STATUS_OK == status: for interface in mover['interfaces']: if name == interface['name']: return constants.STATUS_OK, interface return constants.STATUS_NOT_FOUND, None @utils.retry(retry_param=exception.EMCPowerMaxInvalidMoverID) def delete(self, ip_addr, mover_name): mover_id = self._get_mover_id(mover_name, False) if self.xml_retry: self.xml_retry = False request = self._build_task_package( self.elt_maker.DeleteMoverInterface( ipAddress=str(ip_addr), mover=mover_id ) ) response = self._send_request(request) if (self._response_validation(response, constants.MSG_INVALID_MOVER_ID) and not self.xml_retry): self.xml_retry = True raise exception.EMCPowerMaxInvalidMoverID(id=mover_id) elif self._response_validation( response, constants.MSG_INTERFACE_NON_EXISTENT): LOG.warning("Mover interface %s not found. " "Skip the deletion.", ip_addr) return elif constants.STATUS_OK != response['maxSeverity']: message = (_("Failed to delete mover interface %(ip)s on mover " "%(mover)s. Reason: %(err)s.") % {'ip': ip_addr, 'mover': mover_name, 'err': response['problems']}) LOG.error(message) raise exception.EMCPowerMaxXMLAPIError(err=message) @powermax_utils.decorate_all_methods(powermax_utils.log_enter_exit, debug_only=True) class DNSDomain(StorageObject): def __init__(self, conn, elt_maker, xml_parser, manager): super(DNSDomain, self).__init__(conn, elt_maker, xml_parser, manager) @utils.retry(retry_param=exception.EMCPowerMaxInvalidMoverID) def create(self, mover_name, name, servers, protocol='udp'): mover_id = self._get_mover_id(mover_name, False) if self.xml_retry: self.xml_retry = False request = self._build_task_package( self.elt_maker.NewMoverDnsDomain( mover=mover_id, name=name, servers=servers, protocol=protocol ) ) response = self._send_request(request) if (self._response_validation(response, constants.MSG_INVALID_MOVER_ID) and not self.xml_retry): self.xml_retry = True raise exception.EMCPowerMaxInvalidMoverID(id=mover_id) elif constants.STATUS_OK != response['maxSeverity']: message = (_("Failed to create DNS domain %(name)s. " "Reason: %(err)s.") % {'name': name, 'err': response['problems']}) LOG.error(message) raise exception.EMCPowerMaxXMLAPIError(err=message) @utils.retry(retry_param=exception.EMCPowerMaxInvalidMoverID) def delete(self, mover_name, name): mover_id = self._get_mover_id(mover_name, False) if self.xml_retry: self.xml_retry = False request = self._build_task_package( self.elt_maker.DeleteMoverDnsDomain( mover=mover_id, name=name ) ) response = self._send_request(request) if (self._response_validation(response, constants.MSG_INVALID_MOVER_ID) and not self.xml_retry): self.xml_retry = True raise exception.EMCPowerMaxInvalidMoverID(id=mover_id) elif constants.STATUS_OK != response['maxSeverity']: LOG.warning("Failed to delete DNS domain %(name)s. " "Reason: %(err)s.", {'name': name, 'err': response['problems']}) @powermax_utils.decorate_all_methods(powermax_utils.log_enter_exit, debug_only=True) class CIFSServer(StorageObject): def __init__(self, conn, elt_maker, xml_parser, manager): super(CIFSServer, self).__init__(conn, elt_maker, xml_parser, manager) self.cifs_server_map = {} @utils.retry(retry_param=exception.EMCPowerMaxInvalidMoverID) def create(self, server_args): compName = server_args['name'] # Maximum of 14 characters for netBIOS name name = server_args['name'][-14:] # Maximum of 12 characters for alias name alias_name = server_args['name'][-12:] interfaces = server_args['interface_ip'] domain_name = server_args['domain_name'] user_name = server_args['user_name'] password = server_args['password'] mover_name = server_args['mover_name'] is_vdm = server_args['is_vdm'] mover_id = self._get_mover_id(mover_name, is_vdm) if self.xml_retry: self.xml_retry = False alias_name_list = [self.elt_maker.li(alias_name)] request = self._build_task_package( self.elt_maker.NewW2KCifsServer( self.elt_maker.MoverOrVdm( mover=mover_id, moverIdIsVdm='true' if server_args['is_vdm'] else 'false' ), self.elt_maker.Aliases(*alias_name_list), self.elt_maker.JoinDomain(userName=user_name, password=password), compName=compName, domain=domain_name, interfaces=interfaces, name=name ) ) response = self._send_request(request) if (self._response_validation(response, constants.MSG_INVALID_MOVER_ID) and not self.xml_retry): self.xml_retry = True raise exception.EMCPowerMaxInvalidMoverID(id=mover_id) if constants.STATUS_OK != response['maxSeverity']: status, out = self.get(compName, mover_name, is_vdm) if constants.STATUS_OK == status and out['domainJoined'] == 'true': return else: message = (_("Failed to create CIFS server %(name)s. " "Reason: %(err)s.") % {'name': name, 'err': response['problems']}) LOG.error(message) raise exception.EMCPowerMaxXMLAPIError(err=message) @utils.retry(retry_param=exception.EMCPowerMaxInvalidMoverID) def get_all(self, mover_name, is_vdm=True): mover_id = self._get_mover_id(mover_name, is_vdm) if self.xml_retry: self.xml_retry = False request = self._build_query_package( self.elt_maker.CifsServerQueryParams( self.elt_maker.MoverOrVdm( mover=mover_id, moverIdIsVdm='true' if is_vdm else 'false' ) ) ) response = self._send_request(request) if (self._response_validation(response, constants.MSG_INVALID_MOVER_ID) and not self.xml_retry): self.xml_retry = True raise exception.EMCPowerMaxInvalidMoverID(id=mover_id) elif constants.STATUS_OK != response['maxSeverity']: return response['maxSeverity'], response['objects'] if mover_name in self.cifs_server_map: self.cifs_server_map.pop(mover_name) self.cifs_server_map[mover_name] = {} for item in response['objects']: self.cifs_server_map[mover_name][item['compName'].lower()] = item return constants.STATUS_OK, self.cifs_server_map[mover_name] def get(self, name, mover_name, is_vdm=True, force=False): # name is compName name = name.lower() if (mover_name in self.cifs_server_map and name in self.cifs_server_map[mover_name]) and not force: return constants.STATUS_OK, self.cifs_server_map[mover_name][name] self.get_all(mover_name, is_vdm) if mover_name in self.cifs_server_map: for compName, server in self.cifs_server_map[mover_name].items(): if name == compName: return constants.STATUS_OK, server return constants.STATUS_NOT_FOUND, None @utils.retry(retry_param=exception.EMCPowerMaxInvalidMoverID) def modify(self, server_args): """Make CIFS server join or un-join the domain. :param server_args: Dictionary for CIFS server modification name: CIFS server name instead of compName join_domain: True for joining the domain, false for un-joining user_name: User name under which the domain is joined password: Password associated with the user name mover_name: mover or VDM name is_vdm: Boolean to indicate mover or VDM :raises exception.EMCPowerMaxXMLAPIError: if modification fails. """ name = server_args['name'] join_domain = server_args['join_domain'] user_name = server_args['user_name'] password = server_args['password'] mover_name = server_args['mover_name'] if 'is_vdm' in server_args.keys(): is_vdm = server_args['is_vdm'] else: is_vdm = True mover_id = self._get_mover_id(mover_name, is_vdm) if self.xml_retry: self.xml_retry = False request = self._build_task_package( self.elt_maker.ModifyW2KCifsServer( self.elt_maker.DomainSetting( joinDomain='true' if join_domain else 'false', password=password, userName=user_name, ), mover=mover_id, moverIdIsVdm='true' if is_vdm else 'false', name=name ) ) response = self._send_request(request) if (self._response_validation(response, constants.MSG_INVALID_MOVER_ID) and not self.xml_retry): self.xml_retry = True raise exception.EMCPowerMaxInvalidMoverID(id=mover_id) elif self._ignore_modification_error(response, join_domain): return elif constants.STATUS_OK != response['maxSeverity']: message = (_("Failed to modify CIFS server %(name)s. " "Reason: %(err)s.") % {'name': name, 'err': response['problems']}) LOG.error(message) raise exception.EMCPowerMaxXMLAPIError(err=message) def _ignore_modification_error(self, response, join_domain): if self._response_validation(response, constants.MSG_JOIN_DOMAIN): return join_domain elif self._response_validation(response, constants.MSG_UNJOIN_DOMAIN): return not join_domain return False def delete(self, computer_name, mover_name, is_vdm=True): try: status, out = self.get( computer_name.lower(), mover_name, is_vdm, self.xml_retry) if constants.STATUS_NOT_FOUND == status: LOG.warning("CIFS server %(name)s on mover %(mover_name)s " "not found. Skip the deletion.", {'name': computer_name, 'mover_name': mover_name}) return except exception.EMCPowerMaxXMLAPIError: LOG.warning("CIFS server %(name)s on mover %(mover_name)s " "not found. Skip the deletion.", {'name': computer_name, 'mover_name': mover_name}) return server_name = out['name'] mover_id = self._get_mover_id(mover_name, is_vdm) request = self._build_task_package( self.elt_maker.DeleteCifsServer( mover=mover_id, moverIdIsVdm='true' if is_vdm else 'false', name=server_name ) ) response = self._send_request(request) if constants.STATUS_OK != response['maxSeverity']: message = (_("Failed to delete CIFS server %(name)s. " "Reason: %(err)s.") % {'name': computer_name, 'err': response['problems']}) LOG.error(message) raise exception.EMCPowerMaxXMLAPIError(err=message) self.cifs_server_map[mover_name].pop(computer_name) @powermax_utils.decorate_all_methods(powermax_utils.log_enter_exit, debug_only=True) class CIFSShare(StorageObject): def __init__(self, conn, elt_maker, xml_parser, manager): super(CIFSShare, self).__init__(conn, elt_maker, xml_parser, manager) self.cifs_share_map = {} @utils.retry(retry_param=exception.EMCPowerMaxInvalidMoverID) def create(self, name, server_name, mover_name, is_vdm=True): mover_id = self._get_mover_id(mover_name, is_vdm) if self.xml_retry: self.xml_retry = False share_path = '/' + name request = self._build_task_package( self.elt_maker.NewCifsShare( self.elt_maker.MoverOrVdm( mover=mover_id, moverIdIsVdm='true' if is_vdm else 'false' ), self.elt_maker.CifsServers(self.elt_maker.li(server_name)), name=name, path=share_path ) ) response = self._send_request(request) if (self._response_validation(response, constants.MSG_INVALID_MOVER_ID) and not self.xml_retry): self.xml_retry = True raise exception.EMCPowerMaxInvalidMoverID(id=mover_id) elif constants.STATUS_OK != response['maxSeverity']: message = (_("Failed to create file share %(name)s. " "Reason: %(err)s.") % {'name': name, 'err': response['problems']}) LOG.error(message) raise exception.EMCPowerMaxXMLAPIError(err=message) def get(self, name): if name not in self.cifs_share_map: request = self._build_query_package( self.elt_maker.CifsShareQueryParams(name=name) ) response = self._send_request(request) if constants.STATUS_OK != response['maxSeverity']: return response['maxSeverity'], response['problems'] if not response['objects']: return constants.STATUS_NOT_FOUND, None self.cifs_share_map[name] = response['objects'][0] return constants.STATUS_OK, self.cifs_share_map[name] @utils.retry(retry_param=exception.EMCPowerMaxInvalidMoverID) def delete(self, name, mover_name, is_vdm=True): status, out = self.get(name) if constants.STATUS_NOT_FOUND == status: LOG.warning("CIFS share %s not found. Skip the deletion.", name) return elif constants.STATUS_OK != status: message = (_("Failed to get CIFS share by name %(name)s. " "Reason: %(err)s.") % {'name': name, 'err': out}) LOG.error(message) raise exception.EMCPowerMaxXMLAPIError(err=message) mover_id = self._get_mover_id(mover_name, is_vdm) if self.xml_retry: self.xml_retry = False netbios_names = self.cifs_share_map[name]['CifsServers'] request = self._build_task_package( self.elt_maker.DeleteCifsShare( self.elt_maker.CifsServers(*map(lambda a: self.elt_maker.li(a), netbios_names)), mover=mover_id, moverIdIsVdm='true' if is_vdm else 'false', name=name ) ) response = self._send_request(request) if (self._response_validation(response, constants.MSG_INVALID_MOVER_ID) and not self.xml_retry): self.xml_retry = True raise exception.EMCPowerMaxInvalidMoverID(id=mover_id) elif constants.STATUS_OK != response['maxSeverity']: message = (_("Failed to delete file system %(name)s. " "Reason: %(err)s.") % {'name': name, 'err': response['problems']}) LOG.error(message) raise exception.EMCPowerMaxXMLAPIError(err=message) self.cifs_share_map.pop(name) def disable_share_access(self, share_name, mover_name): cmd_str = 'sharesd %s set noaccess' % share_name disable_access = [ 'env', 'NAS_DB=/nas', '/nas/bin/.server_config', mover_name, '-v', "%s" % cmd_str, ] try: self._execute_cmd(disable_access, check_exit_code=True) except processutils.ProcessExecutionError: message = (_('Failed to disable the access to CIFS share ' '%(name)s.') % {'name': share_name}) LOG.exception(message) raise exception.EMCPowerMaxXMLAPIError(err=message) def allow_share_access(self, mover_name, share_name, user_name, domain, access=constants.CIFS_ACL_FULLCONTROL): account = user_name + "@" + domain allow_str = ('sharesd %(share_name)s grant %(account)s=%(access)s' % {'share_name': share_name, 'account': account, 'access': access}) allow_access = [ 'env', 'NAS_DB=/nas', '/nas/bin/.server_config', mover_name, '-v', "%s" % allow_str, ] try: self._execute_cmd(allow_access, check_exit_code=True) except processutils.ProcessExecutionError as expt: dup_msg = re.compile(r'ACE for %(domain)s\\%(user)s unchanged' % {'domain': domain, 'user': user_name}, re.I) if re.search(dup_msg, expt.stdout): LOG.warning("Duplicate access control entry, " "skipping allow...") else: message = (_('Failed to allow the access %(access)s to ' 'CIFS share %(name)s. Reason: %(err)s.') % {'access': access, 'name': share_name, 'err': expt}) LOG.error(message) raise exception.EMCPowerMaxXMLAPIError(err=message) def deny_share_access(self, mover_name, share_name, user_name, domain, access=constants.CIFS_ACL_FULLCONTROL): account = user_name + "@" + domain revoke_str = ('sharesd %(share_name)s revoke %(account)s=%(access)s' % {'share_name': share_name, 'account': account, 'access': access}) allow_access = [ 'env', 'NAS_DB=/nas', '/nas/bin/.server_config', mover_name, '-v', "%s" % revoke_str, ] try: self._execute_cmd(allow_access, check_exit_code=True) except processutils.ProcessExecutionError as expt: not_found_msg = re.compile( r'No ACE found for %(domain)s\\%(user)s' % {'domain': domain, 'user': user_name}, re.I) user_err_msg = re.compile( r'Cannot get mapping for %(domain)s\\%(user)s' % {'domain': domain, 'user': user_name}, re.I) if re.search(not_found_msg, expt.stdout): LOG.warning("No access control entry found, " "skipping deny...") elif re.search(user_err_msg, expt.stdout): LOG.warning("User not found on domain, skipping deny...") else: message = (_('Failed to deny the access %(access)s to ' 'CIFS share %(name)s. Reason: %(err)s.') % {'access': access, 'name': share_name, 'err': expt}) LOG.exception(message) raise exception.EMCPowerMaxXMLAPIError(err=message) def get_share_access(self, mover_name, share_name): get_str = 'sharesd %s dump' % share_name get_access = [ 'env', 'NAS_DB=/nas', '/nas/bin/.server_config', mover_name, '-v', "%s" % get_str, ] try: out, err = self._execute_cmd(get_access, check_exit_code=True) except processutils.ProcessExecutionError: msg = _('Failed to get access list of CIFS share %s.') % share_name LOG.exception(msg) raise exception.EMCPowerMaxXMLAPIError(err=msg) ret = {} name_pattern = re.compile(r"Unix user '(.+?)'") access_pattern = re.compile(r"ALLOWED:(.+?):") name = None for line in out.splitlines(): if name is None: names = name_pattern.findall(line) if names: name = names[0].lower() else: accesses = access_pattern.findall(line) if accesses: ret[name] = accesses[0].lower() name = None return ret def clear_share_access(self, mover_name, share_name, domain, white_list_users): existing_users = self.get_share_access(mover_name, share_name) white_list_users_set = set(user.lower() for user in white_list_users) users_to_remove = set(existing_users.keys()) - white_list_users_set for user in users_to_remove: self.deny_share_access(mover_name, share_name, user, domain, existing_users[user]) return users_to_remove @powermax_utils.decorate_all_methods(powermax_utils.log_enter_exit, debug_only=True) class NFSShare(StorageObject): def __init__(self, conn, elt_maker, xml_parser, manager): super(NFSShare, self).__init__(conn, elt_maker, xml_parser, manager) self.nfs_share_map = {} def create(self, name, mover_name): share_path = '/' + name create_nfs_share_cmd = [ 'env', 'NAS_DB=/nas', '/nas/bin/server_export', mover_name, '-option', 'access=-0.0.0.0/0.0.0.0', share_path, ] try: self._execute_cmd(create_nfs_share_cmd, check_exit_code=True) except processutils.ProcessExecutionError as expt: message = (_('Failed to create NFS share %(name)s on mover ' '%(mover_name)s. Reason: %(err)s.') % {'name': name, 'mover_name': mover_name, 'err': expt}) LOG.exception(message) raise exception.EMCPowerMaxXMLAPIError(err=message) def delete(self, name, mover_name): path = '/' + name status, out = self.get(name, mover_name) if constants.STATUS_NOT_FOUND == status: LOG.warning("NFS share %s not found. Skip the deletion.", path) return delete_nfs_share_cmd = [ 'env', 'NAS_DB=/nas', '/nas/bin/server_export', mover_name, '-unexport', '-perm', path, ] try: self._execute_cmd(delete_nfs_share_cmd, check_exit_code=True) except processutils.ProcessExecutionError as expt: message = (_('Failed to delete NFS share %(name)s on ' '%(mover_name)s. Reason: %(err)s.') % {'name': name, 'mover_name': mover_name, 'err': expt}) LOG.exception(message) raise exception.EMCPowerMaxXMLAPIError(err=message) self.nfs_share_map.pop(name) def get(self, name, mover_name, force=False, check_exit_code=False): if name in self.nfs_share_map and not force: return constants.STATUS_OK, self.nfs_share_map[name] path = '/' + name nfs_share = { "mover_name": '', "path": '', 'AccessHosts': [], 'RwHosts': [], 'RoHosts': [], 'RootHosts': [], 'readOnly': '', } nfs_query_cmd = [ 'env', 'NAS_DB=/nas', '/nas/bin/server_export', mover_name, '-P', 'nfs', '-list', path, ] try: out, err = self._execute_cmd(nfs_query_cmd, check_exit_code=check_exit_code) except processutils.ProcessExecutionError as expt: dup_msg = (r'%(mover_name)s : No such file or directory' % {'mover_name': mover_name}) if re.search(dup_msg, expt.stdout): LOG.warning("NFS share %s not found.", name) return constants.STATUS_NOT_FOUND, None else: message = (_('Failed to list NFS share %(name)s on ' '%(mover_name)s. Reason: %(err)s.') % {'name': name, 'mover_name': mover_name, 'err': expt}) LOG.exception(message) raise exception.EMCPowerMaxXMLAPIError(err=message) re_exports = r'%s\s*:\s*\nexport\s*(.*)\n' % mover_name m = re.search(re_exports, out) if m is not None: nfs_share['path'] = path nfs_share['mover_name'] = mover_name export = m.group(1) fields = export.split(" ") for field in fields: field = field.strip() if field.startswith('rw='): nfs_share['RwHosts'] = powermax_utils.parse_ipaddr( field[3:]) elif field.startswith('access='): nfs_share['AccessHosts'] = powermax_utils.parse_ipaddr( field[7:]) elif field.startswith('root='): nfs_share['RootHosts'] = powermax_utils.parse_ipaddr( field[5:]) elif field.startswith('ro='): nfs_share['RoHosts'] = powermax_utils.parse_ipaddr( field[3:]) self.nfs_share_map[name] = nfs_share else: return constants.STATUS_NOT_FOUND, None return constants.STATUS_OK, self.nfs_share_map[name] def allow_share_access(self, share_name, host_ip, mover_name, access_level=const.ACCESS_LEVEL_RW): @utils.synchronized('emc-shareaccess-' + share_name) def do_allow_access(share_name, host_ip, mover_name, access_level): status, share = self.get(share_name, mover_name) if constants.STATUS_NOT_FOUND == status: message = (_('NFS share %s not found.') % share_name) LOG.error(message) raise exception.EMCPowerMaxXMLAPIError(err=message) changed = False rwhosts = share['RwHosts'] rohosts = share['RoHosts'] host_ip = powermax_utils.convert_ipv6_format_if_needed(host_ip) if access_level == const.ACCESS_LEVEL_RW: if host_ip not in rwhosts: rwhosts.append(host_ip) changed = True if host_ip in rohosts: rohosts.remove(host_ip) changed = True if access_level == const.ACCESS_LEVEL_RO: if host_ip not in rohosts: rohosts.append(host_ip) changed = True if host_ip in rwhosts: rwhosts.remove(host_ip) changed = True roothosts = share['RootHosts'] if host_ip not in roothosts: roothosts.append(host_ip) changed = True accesshosts = share['AccessHosts'] if host_ip not in accesshosts: accesshosts.append(host_ip) changed = True if not changed: LOG.debug("%(host)s is already in access list of share " "%(name)s.", {'host': host_ip, 'name': share_name}) else: path = '/' + share_name self._set_share_access(path, mover_name, rwhosts, rohosts, roothosts, accesshosts) # Update self.nfs_share_map self.get(share_name, mover_name, force=True, check_exit_code=True) do_allow_access(share_name, host_ip, mover_name, access_level) def deny_share_access(self, share_name, host_ip, mover_name): @utils.synchronized('emc-shareaccess-' + share_name) def do_deny_access(share_name, host_ip, mover_name): status, share = self.get(share_name, mover_name) if constants.STATUS_OK != status: message = (_('Query nfs share %(path)s failed. ' 'Reason %(err)s.') % {'path': share_name, 'err': share}) LOG.error(message) raise exception.EMCPowerMaxXMLAPIError(err=message) changed = False rwhosts = set(share['RwHosts']) if host_ip in rwhosts: rwhosts.remove(host_ip) changed = True roothosts = set(share['RootHosts']) if host_ip in roothosts: roothosts.remove(host_ip) changed = True accesshosts = set(share['AccessHosts']) if host_ip in accesshosts: accesshosts.remove(host_ip) changed = True rohosts = set(share['RoHosts']) if host_ip in rohosts: rohosts.remove(host_ip) changed = True if not changed: LOG.debug("%(host)s is already in access list of share " "%(name)s.", {'host': host_ip, 'name': share_name}) else: path = '/' + share_name self._set_share_access(path, mover_name, rwhosts, rohosts, roothosts, accesshosts) # Update self.nfs_share_map self.get(share_name, mover_name, force=True, check_exit_code=True) do_deny_access(share_name, host_ip, mover_name) def clear_share_access(self, share_name, mover_name, white_list_hosts): @utils.synchronized('emc-shareaccess-' + share_name) def do_clear_access(share_name, mover_name, white_list_hosts): def hosts_to_remove(orig_list): if white_list_hosts is None: ret = set() else: ret = set(white_list_hosts).intersection(set(orig_list)) return ret status, share = self.get(share_name, mover_name) if constants.STATUS_OK != status: message = (_('Query nfs share %(path)s failed. ' 'Reason %(err)s.') % {'path': share_name, 'err': status}) raise exception.EMCPowerMaxXMLAPIError(err=message) self._set_share_access('/' + share_name, mover_name, hosts_to_remove(share['RwHosts']), hosts_to_remove(share['RoHosts']), hosts_to_remove(share['RootHosts']), hosts_to_remove(share['AccessHosts'])) # Update self.nfs_share_map self.get(share_name, mover_name, force=True, check_exit_code=True) do_clear_access(share_name, mover_name, white_list_hosts) def _set_share_access(self, path, mover_name, rw_hosts, ro_hosts, root_hosts, access_hosts): if access_hosts is None: access_hosts = set() try: access_hosts.remove('-0.0.0.0/0.0.0.0') except (ValueError, KeyError): pass access_str = ('access=%(access)s' % {'access': ':'.join( list(access_hosts) + ['-0.0.0.0/0.0.0.0'])}) if root_hosts: access_str += (',root=%(root)s' % {'root': ':'.join(root_hosts)}) if rw_hosts: access_str += ',rw=%(rw)s' % {'rw': ':'.join(rw_hosts)} if ro_hosts: access_str += ',ro=%(ro)s' % {'ro': ':'.join(ro_hosts)} set_nfs_share_access_cmd = [ 'env', 'NAS_DB=/nas', '/nas/bin/server_export', mover_name, '-ignore', '-option', access_str, path, ] try: self._execute_cmd(set_nfs_share_access_cmd, check_exit_code=True) except processutils.ProcessExecutionError as expt: message = (_('Failed to set NFS share %(name)s access on ' '%(mover_name)s. Reason: %(err)s.') % {'name': path[1:], 'mover_name': mover_name, 'err': expt}) LOG.exception(message) raise exception.EMCPowerMaxXMLAPIError(err=message) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315601.9056718 manila-21.0.0/manila/share/drivers/dell_emc/plugins/powerscale/0000775000175000017500000000000000000000000024470 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/dell_emc/plugins/powerscale/__init__.py0000664000175000017500000000000000000000000026567 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/dell_emc/plugins/powerscale/powerscale.py0000664000175000017500000005675000000000000027223 0ustar00zuulzuul00000000000000# Copyright 2015 EMC Corporation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ PowerScale specific NAS backend plugin. """ import os from oslo_config import cfg from oslo_log import log from oslo_utils import units from manila.common import constants as const from manila import exception from manila.i18n import _ from manila.share.drivers.dell_emc.plugins import base from manila.share.drivers.dell_emc.plugins.powerscale import powerscale_api """Version history: 0.1.0 - Initial version 1.0.0 - Fix Http auth issue, SSL verification error and etc 1.0.1 - Add support for update share stats 1.0.2 - Add support for ensure shares 1.0.3 - Add support for thin provisioning 1.0.4 - Rename isilon to powerscale """ VERSION = "1.0.4" CONF = cfg.CONF LOG = log.getLogger(__name__) POWERSCALE_OPTS = [ cfg.StrOpt('powerscale_dir_permission', default='0777', help='Predefined ACL value or POSIX mode ' 'for PowerScale directories.'), cfg.IntOpt('powerscale_threshold_limit', default=0, help='Specifies the threshold limit (in percentage) ' 'for triggering SmartQuotas alerts in PowerScale') ] class PowerScaleStorageConnection(base.StorageConnection): """Implements PowerScale specific functionality for EMC Manila driver.""" def __init__(self, *args, **kwargs): super(PowerScaleStorageConnection, self).__init__(*args, **kwargs) LOG.debug('Setting up attributes for Manila ' 'Dell PowerScale Driver.') if 'configuration' in kwargs: kwargs['configuration'].append_config_values(POWERSCALE_OPTS) self._server = None self._port = None self._username = None self._password = None self._server_url = None self._root_dir = None self._verify_ssl_cert = None self._ssl_cert_path = None self._containers = {} self._shares = {} self._snapshots = {} self._powerscale_api = None self.driver_handles_share_servers = False self.ipv6_implemented = True # props for share status update self.reserved_percentage = None self.reserved_snapshot_percentage = None self.reserved_share_extend_percentage = None self.max_over_subscription_ratio = None self._threshold_limit = 0 def _get_container_path(self, share): """Return path to a container.""" return os.path.join(self._root_dir, share['name']) def create_share(self, context, share, share_server): """Is called to create share.""" LOG.debug(f'Creating {share["share_proto"]} share.') if share['share_proto'] == 'NFS': location = self._create_nfs_share(share) elif share['share_proto'] == 'CIFS': location = self._create_cifs_share(share) else: message = (_('Unsupported share protocol: %(proto)s.') % {'proto': share['share_proto']}) LOG.error(message) raise exception.InvalidShare(reason=message) # apply directory quota based on share size max_share_size = share['size'] * units.Gi self._powerscale_api.quota_create( self._get_container_path(share), 'directory', max_share_size) return location def create_share_from_snapshot(self, context, share, snapshot, share_server): """Creates a share from the snapshot.""" LOG.debug(f'Creating {share["share_proto"]} share from snapshot.') # Create share at new location location = self.create_share(context, share, share_server) # Clone snapshot to new location fq_target_dir = self._get_container_path(share) self._powerscale_api.clone_snapshot(snapshot['name'], fq_target_dir) return location def _create_nfs_share(self, share): """Is called to create nfs share.""" LOG.debug(f'Creating NFS share {share["name"]}.') # Create directory container_path = self._get_container_path(share) self._create_directory(container_path) # Create nfs share share_created = self._powerscale_api.create_nfs_export(container_path) if not share_created: message = ( _('The requested NFS share "%(share)s" was not created.') % {'share': share['name']}) LOG.error(message) raise exception.ShareBackendException(msg=message) location = self._get_location(self._format_nfs_path(container_path)) return location def _create_cifs_share(self, share): """Is called to create cifs share.""" LOG.debug(f'Creating CIFS share {share["name"]}.') # Create directory container_path = self._get_container_path(share) self._create_directory(container_path) # Create smb share share_created = self._powerscale_api.create_smb_share( share['name'], container_path) if not share_created: message = ( _('The requested CIFS share "%(share)s" was not created.') % {'share': share['name']}) LOG.error(message) raise exception.ShareBackendException(msg=message) location = self._get_location(self._format_smb_path(share['name'])) return location def _create_directory(self, path, recursive=False): """Is called to create a directory.""" dir_created = self._powerscale_api.create_directory(path, recursive) if not dir_created: message = ( _('Failed to create directory "%(dir)s".') % {'dir': path}) LOG.error(message) raise exception.ShareBackendException(msg=message) def create_snapshot(self, context, snapshot, share_server): """Is called to create snapshot.""" LOG.debug(f'Creating snapshot {snapshot["name"]}.') snapshot_path = os.path.join(self._root_dir, snapshot['share_name']) snap_created = self._powerscale_api.create_snapshot( snapshot['name'], snapshot_path) if not snap_created: message = ( _('Failed to create snapshot "%(snap)s".') % {'snap': snapshot['name']}) LOG.error(message) raise exception.ShareBackendException(msg=message) def delete_share(self, context, share, share_server): """Is called to remove share.""" LOG.debug(f'Deleting {share["share_proto"]} share.') if share['share_proto'] == 'NFS': self._delete_nfs_share(share) elif share['share_proto'] == 'CIFS': self._delete_cifs_share(share) else: message = (_('Unsupported share type: %(type)s.') % {'type': share['share_proto']}) LOG.warning(message) return dir_path = self._get_container_path(share) # remove quota self._delete_quota(dir_path) # remove directory self._delete_directory(dir_path) def _delete_quota(self, path): """Is called to remove quota.""" quota = self._powerscale_api.quota_get(path, 'directory') if quota: LOG.debug(f'Removing quota {quota["id"]}') deleted = self._powerscale_api.delete_quota(quota['id']) if not deleted: message = ( _('Failed to delete quota "%(quota_id)s" for ' 'directory "%(dir)s".') % {'quota_id': quota['id'], 'dir': path}) LOG.error(message) else: LOG.warning(f'Quota not found for {path}') def _delete_directory(self, path): """Is called to remove directory.""" path_exist = self._powerscale_api.is_path_existent(path) if path_exist: LOG.debug(f'Removing directory {path}') deleted = self._powerscale_api.delete_path(path, recursive=True) if not deleted: message = ( _('Failed to delete directory "%(dir)s".') % {'dir': path}) LOG.error(message) else: LOG.warning(f'Directory not found for {path}') def _delete_nfs_share(self, share): """Is called to remove nfs share.""" share_id = self._powerscale_api.lookup_nfs_export( self._get_container_path(share)) if share_id is None: lw = ('Attempted to delete NFS Share "%s", but the share does ' 'not appear to exist.') LOG.warning(lw, share['name']) else: # attempt to delete the share export_deleted = self._powerscale_api.delete_nfs_share(share_id) if not export_deleted: message = _('Error deleting NFS share: %s') % share['name'] LOG.error(message) raise exception.ShareBackendException(msg=message) def _delete_cifs_share(self, share): """Is called to remove CIFS share.""" smb_share = self._powerscale_api.lookup_smb_share(share['name']) if smb_share is None: lw = ('Attempted to delete CIFS Share "%s", but the share does ' 'not appear to exist.') LOG.warning(lw, share['name']) else: share_deleted = self._powerscale_api.delete_smb_share( share['name']) if not share_deleted: message = _('Error deleting CIFS share: %s') % share['name'] LOG.error(message) raise exception.ShareBackendException(msg=message) def delete_snapshot(self, context, snapshot, share_server): """Is called to remove snapshot.""" LOG.debug(f'Deleting snapshot {snapshot["name"]}') deleted = self._powerscale_api.delete_snapshot(snapshot['name']) if not deleted: message = ( _('Failed to delete snapshot "%(snap)s".') % {'snap': snapshot['name']}) LOG.error(message) raise exception.ShareBackendException(msg=message) def ensure_share(self, context, share, share_server): """Invoked to ensure that share is exported.""" raise NotImplementedError() def extend_share(self, share, new_size, share_server=None): """Extends a share.""" LOG.debug('Extending share %(name)s to %(size)sG.', { 'name': share['name'], 'size': new_size }) new_quota_size = new_size * units.Gi self._powerscale_api.quota_set( self._get_container_path(share), 'directory', new_quota_size) def allow_access(self, context, share, access, share_server): """Allow access to the share.""" raise NotImplementedError() def deny_access(self, context, share, access, share_server): """Deny access to the share.""" raise NotImplementedError() def check_for_setup_error(self): """Check for setup error.""" def connect(self, emc_share_driver, context): """Connect to an PowerScale cluster.""" LOG.debug('Reading configuration parameters for Manila' ' Dell PowerScale Driver.') config = emc_share_driver.configuration self._server = config.safe_get("emc_nas_server") self._port = config.safe_get("emc_nas_server_port") self._username = config.safe_get("emc_nas_login") self._password = config.safe_get("emc_nas_password") self._root_dir = config.safe_get("emc_nas_root_dir") self._threshold_limit = config.safe_get("powerscale_threshold_limit") # validate IP, username and password if not all([self._server, self._username, self._password]): message = _("REST server IP, username and password" " must be specified.") raise exception.BadConfigurationException(reason=message) self._server_url = f'https://{self._server}:{self._port}' self._verify_ssl_cert = config.safe_get("emc_ssl_cert_verify") if self._verify_ssl_cert: self._ssl_cert_path = config.safe_get("emc_ssl_cert_path") self._dir_permission = config.safe_get("powerscale_dir_permission") self._powerscale_api = powerscale_api.PowerScaleApi( self._server_url, self._username, self._password, self._verify_ssl_cert, self._ssl_cert_path, self._dir_permission, self._threshold_limit) if not self._powerscale_api.is_path_existent(self._root_dir): self._create_directory(self._root_dir, recursive=True) # configuration for share status update self.reserved_percentage = config.safe_get( 'reserved_share_percentage') if self.reserved_percentage is None: self.reserved_percentage = 0 self.reserved_snapshot_percentage = config.safe_get( 'reserved_share_from_snapshot_percentage') if self.reserved_snapshot_percentage is None: self.reserved_snapshot_percentage = self.reserved_percentage self.reserved_share_extend_percentage = config.safe_get( 'reserved_share_extend_percentage') if self.reserved_share_extend_percentage is None: self.reserved_share_extend_percentage = self.reserved_percentage self.max_over_subscription_ratio = config.safe_get( 'max_over_subscription_ratio') def update_share_stats(self, stats_dict): """Retrieve stats info from share.""" stats_dict['driver_version'] = VERSION stats_dict['storage_protocol'] = 'NFS_CIFS' # PowerScale does not support pools. # To align with manila scheduler 'pool-aware' strategic, # report with one pool structure. pool_stat = { 'pool_name': stats_dict['share_backend_name'], 'qos': False, 'reserved_percentage': self.reserved_percentage, 'reserved_snapshot_percentage': self.reserved_snapshot_percentage, 'reserved_share_extend_percentage': self.reserved_share_extend_percentage, 'max_over_subscription_ratio': self.max_over_subscription_ratio, 'thin_provisioning': True, } spaces = self._powerscale_api.get_space_stats() if spaces: pool_stat['total_capacity_gb'] = spaces['total'] // units.Gi pool_stat['free_capacity_gb'] = spaces['free'] // units.Gi allocated_space = self._powerscale_api.get_allocated_space() pool_stat['allocated_capacity_gb'] = allocated_space stats_dict['pools'] = [pool_stat] def get_network_allocations_number(self): """Returns number of network allocations for creating VIFs.""" # TODO(Shaun Edwards) return 0 def setup_server(self, network_info, metadata=None): """Set up and configures share server with given network parameters.""" # TODO(Shaun Edwards): Look into supporting share servers def teardown_server(self, server_details, security_services=None): """Teardown share server.""" # TODO(Shaun Edwards): Look into supporting share servers def update_access(self, context, share, access_rules, add_rules, delete_rules, share_server=None): """Update share access.""" LOG.debug(f'Updaing access for share {share["name"]}.') if share['share_proto'] == 'NFS': state_map = self._update_access_nfs(share, access_rules) if share['share_proto'] == 'CIFS': state_map = self._update_access_cifs(share, access_rules) return state_map def _update_access_nfs(self, share, access_rules): """Updates access on a NFS share.""" nfs_rw_ips = set() nfs_ro_ips = set() rule_state_map = {} for rule in access_rules: rule_state_map[rule['access_id']] = { 'state': 'error' } for rule in access_rules: if rule['access_level'] == const.ACCESS_LEVEL_RW: nfs_rw_ips.add(rule['access_to']) elif rule['access_level'] == const.ACCESS_LEVEL_RO: nfs_ro_ips.add(rule['access_to']) export_id = self._powerscale_api.lookup_nfs_export( self._get_container_path(share)) if export_id is None: # share does not exist on backend (set all rules to error state) message = _('Failed to update access for NFS share %s: ' 'share not found.') % share['name'] LOG.error(message) return rule_state_map r = self._powerscale_api.modify_nfs_export_access( export_id, ro_ips=list(nfs_ro_ips), rw_ips=list(nfs_rw_ips)) if not r: return rule_state_map # if we finish the bulk rule update with no error set rules to active for rule in access_rules: rule_state_map[rule['access_id']]['state'] = 'active' return rule_state_map def _update_access_cifs(self, share, access_rules): """Update access on a CIFS share.""" rule_state_map = {} ip_access_rules = [] user_access_rules = [] for rule in access_rules: if rule['access_type'] == 'ip': ip_access_rules.append(rule) elif rule['access_type'] == 'user': user_access_rules.append(rule) else: message = (_("Access type %(type)s is not supported for CIFS." ) % {'type': rule['access_type']}) LOG.error(message) rule_state_map.update({rule['access_id']: {'state': 'error'}}) ips = self._get_cifs_ip_list(ip_access_rules, rule_state_map) user_permissions = self._get_cifs_user_permissions( user_access_rules, rule_state_map) share_updated = self._powerscale_api.modify_smb_share_access( share['name'], host_acl=ips, permissions=user_permissions) if not share_updated: message = ( _( 'Failed to update access rules for CIFS share ' '"%(share)s".' ) % {'share': share['name']} ) LOG.error(message) for rule in access_rules: rule_state_map[rule['access_id']] = { 'state': 'error' } return rule_state_map def _get_cifs_ip_list(self, access_rules, rule_state_map): """Get CIFS ip list.""" cifs_ips = [] for rule in access_rules: if rule['access_level'] != const.ACCESS_LEVEL_RW: message = ('Only RW access level is supported ' 'for CIFS IP access.') LOG.error(message) rule_state_map.update({rule['access_id']: {'state': 'error'}}) continue cifs_ips.append('allow:' + rule['access_to']) rule_state_map.update({rule['access_id']: {'state': 'active'}}) return cifs_ips def _get_cifs_user_permissions(self, access_rules, rule_state_map): """Get CIFS user permissions.""" cifs_user_permissions = [] for rule in access_rules: if rule['access_level'] == const.ACCESS_LEVEL_RW: smb_permission = powerscale_api.SmbPermission.rw elif rule['access_level'] == const.ACCESS_LEVEL_RO: smb_permission = powerscale_api.SmbPermission.ro else: message = ('Only RW and RO access levels are supported ' 'for CIFS user access.') LOG.error(message) rule_state_map.update({rule['access_id']: {'state': 'error'}}) continue user_sid = self._powerscale_api.get_user_sid(rule['access_to']) if user_sid: cifs_user_permissions.append({ 'permission': smb_permission.value, 'permission_type': 'allow', 'trustee': user_sid }) rule_state_map.update({rule['access_id']: {'state': 'active'}}) else: message = _('Failed to get user sid by %(user)s.' % {'user': rule['access_to']}) LOG.error(message) rule_state_map.update({rule['access_id']: {'state': 'error'}}) return cifs_user_permissions def get_backend_info(self, context): """Get driver and array configuration parameters. :returns: A dictionary containing driver-specific info. """ LOG.debug("Retrieving PowerScale backend info.") cluster_version = self._powerscale_api.get_cluster_version() return {'driver_version': VERSION, 'cluster_version': cluster_version, 'rest_server': self._server, 'rest_port': self._port} def ensure_shares(self, context, shares): """Invoked to ensure that shares are exported. :shares: A list of all shares for updates. :returns: None or a dictionary of updates in the format. """ LOG.debug("Ensuring PowerScale shares.") updates = {} for share in shares: if share['share_proto'] == 'NFS': container_path = self._get_container_path(share) share_id = self._powerscale_api.lookup_nfs_export( container_path) if share_id: location = self._format_nfs_path(container_path) updates[share['id']] = { 'export_locations': [location], 'status': 'available', 'reapply_access_rules': True, } else: LOG.warning(f'NFS Share {share["name"]} is not found.') elif share['share_proto'] == 'CIFS': smb_share = self._powerscale_api.lookup_smb_share( share['name']) if smb_share: location = self._format_smb_path(share['name']) updates[share['id']] = { 'export_locations': [location], 'status': 'available', 'reapply_access_rules': True, } else: LOG.warning(f'CIFS Share {share["name"]} is not found.') if share['id'] not in updates: updates[share['id']] = { 'export_locations': [], 'status': 'error', 'reapply_access_rules': False, } return updates def _format_smb_path(self, share_name): return '\\\\{0}\\{1}'.format(self._server, share_name) def _format_nfs_path(self, container_path): return '{0}:{1}'.format(self._server, container_path) def _get_location(self, path): export_locations = [{'path': path, 'is_admin_only': False, 'metadata': {"preferred": True}}] return export_locations ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/dell_emc/plugins/powerscale/powerscale_api.py0000664000175000017500000004462200000000000030047 0ustar00zuulzuul00000000000000# Copyright (c) 2015 EMC Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import enum import functools from oslo_log import log from oslo_serialization import jsonutils import requests from manila import exception from manila.i18n import _ LOG = log.getLogger(__name__) class PowerScaleApi(object): def __init__(self, api_url, username, password, verify_ssl_cert=False, ssl_cert_path=None, dir_permission=None, threshold_limit=0): self.host_url = api_url self.session = requests.session() self.username = username self.password = password self.verify_ssl_cert = verify_ssl_cert self.certificate_path = ssl_cert_path self.dir_permission = dir_permission self.threshold_limit = threshold_limit # Create session self.session_token = None self.csrf_token = None LOG.debug("Login to PowerScale OneFS during initialization.") login = self.create_session(username, password) if not login: message = _("Failed to login to PowerScale OneFS.") raise exception.BadConfigurationException(reason=message) @property def _verify_cert(self): verify_cert = self.verify_ssl_cert if self.verify_ssl_cert and self.certificate_path: verify_cert = self.certificate_path return verify_cert def create_session(self, username, password): """Create a session. Update session token and csrf token.""" headers = {"Content-type": "application/json"} url = self.host_url + '/session/1/session' data = { "username": username, "password": password, "services": ["platform", "namespace"] } r = self.session.request( 'POST', url, headers=headers, data=jsonutils.dumps(data), verify=self._verify_cert) if r.status_code == requests.codes.created: self.session_token = r.cookies['isisessid'] self.csrf_token = r.cookies['isicsrf'] return True message = (_('Failed to create session. ' 'Status_code="%(code)s", body="%(body)s".') % {'code': r.status_code, 'body': r.text}) LOG.error(message) return False def create_directory(self, container_path, recursive=False): """Create a directory.""" headers = {"x-isi-ifs-target-type": "container"} if self.dir_permission: headers.update({"x-isi-ifs-access-control": self.dir_permission}) url = (self.host_url + "/namespace" + container_path + '?recursive=' + str(recursive)) r = self.send_put_request(url, headers=headers) return r.status_code == 200 def clone_snapshot(self, snapshot_name, fq_target_dir): self.create_directory(fq_target_dir) snapshot = self.get_snapshot(snapshot_name) snapshot_path = snapshot['path'] # remove /ifs from start of path relative_snapshot_path = snapshot_path[4:] fq_snapshot_path = ('/ifs/.snapshot/' + snapshot_name + relative_snapshot_path) self._clone_directory_contents(fq_snapshot_path, fq_target_dir, snapshot_name, relative_snapshot_path) def _clone_directory_contents(self, fq_source_dir, fq_target_dir, snapshot_name, relative_path): dir_listing = self.get_directory_listing(fq_source_dir) for item in dir_listing['children']: name = item['name'] source_item_path = fq_source_dir + '/' + name new_relative_path = relative_path + '/' + name dest_item_path = fq_target_dir + '/' + name if item['type'] == 'container': # create the container name in the target dir & clone dir self.create_directory(dest_item_path) self._clone_directory_contents(source_item_path, dest_item_path, snapshot_name, new_relative_path) elif item['type'] == 'object': self.clone_file_from_snapshot('/ifs' + new_relative_path, dest_item_path, snapshot_name) def clone_file_from_snapshot(self, fq_file_path, fq_dest_path, snapshot_name): headers = {'x-isi-ifs-copy-source': '/namespace' + fq_file_path} snapshot_suffix = '&snapshot=' + snapshot_name url = (self.host_url + '/namespace' + fq_dest_path + '?clone=true' + snapshot_suffix) self.send_put_request(url, headers=headers) def get_directory_listing(self, fq_dir_path): url = self.host_url + '/namespace' + fq_dir_path + '?detail=default' r = self.send_get_request(url) r.raise_for_status() return r.json() def is_path_existent(self, resource_path): url = self.host_url + '/namespace' + resource_path r = self.send_head_request(url) if r.status_code == 200: return True elif r.status_code == 404: return False else: r.raise_for_status() def get_snapshot(self, snapshot_name): r = self.send_get_request( self.host_url + '/platform/1/snapshot/snapshots/' + snapshot_name) snapshot_json = r.json() if r.status_code == 200: return snapshot_json['snapshots'][0] elif r.status_code == 404: return None else: r.raise_for_status() def get_snapshots(self): r = self.send_get_request( self.host_url + '/platform/1/snapshot/snapshots') if r.status_code == 200: return r.json() else: r.raise_for_status() def lookup_nfs_export(self, share_path): '''Retrieve NFS export by directory path.''' r = self.send_get_request( self.host_url + '/platform/12/protocols/nfs/exports', params={'path': share_path}) if r.status_code == 200 and r.json()['total'] > 0: return r.json()['exports'][0]['id'] return None def get_nfs_export(self, export_id): response = self.send_get_request( self.host_url + '/platform/1/protocols/nfs/exports/' + str(export_id)) if response.status_code == 200: return response.json()['exports'][0] else: return None def lookup_smb_share(self, share_name): response = self.send_get_request( self.host_url + '/platform/1/protocols/smb/shares/' + share_name) if response.status_code == 200: return response.json()['shares'][0] else: return None def create_nfs_export(self, export_path): """Creates an NFS export using the Platform API. :param export_path: a string specifying the desired export path :return: "True" if created successfully; "False" otherwise """ data = {'paths': [export_path]} url = self.host_url + '/platform/1/protocols/nfs/exports' response = self.send_post_request(url, data=data) return response.status_code == 201 def modify_nfs_export_access(self, share_id, ro_ips=None, rw_ips=None): """Modify access on an existing NFS export. :param share_id: the ID of the NFS export :param ro_ips: a list of IP addresses that should have read-only access :param rw_ips: a list of IP addresses that should have read-write access :return: a boolean indicating whether the modification was successful """ export_params = {} if ro_ips is not None: export_params['read_only_clients'] = ro_ips if rw_ips is not None: export_params['clients'] = rw_ips url = '{0}/platform/1/protocols/nfs/exports/{1}'.format( self.host_url, share_id) resp = self.send_put_request(url, data=export_params) return resp.status_code == 204 def create_smb_share(self, share_name, share_path): """Creates an SMB/CIFS share. :param share_name: the name of the CIFS share :param share_path: the path associated with the CIFS share :return: "True" if the share created successfully; returns "False" otherwise """ data = {'permissions': []} data['name'] = share_name data['path'] = share_path url = self.host_url + '/platform/1/protocols/smb/shares' response = self.send_post_request(url, data=data) return response.status_code == 201 def create_snapshot(self, snapshot_name, snapshot_path): """Creates a snapshot.""" data = {'name': snapshot_name, 'path': snapshot_path} r = self.send_post_request( self.host_url + '/platform/1/snapshot/snapshots', data=data) return r.status_code == 201 def delete_path(self, fq_resource_path, recursive=False): """Deletes a file or folder.""" r = self.send_delete_request( self.host_url + '/namespace' + fq_resource_path + '?recursive=' + str(recursive)) return r.status_code == 204 def delete_nfs_share(self, share_number): response = self.send_delete_request( self.host_url + '/platform/1/protocols/nfs/exports' + '/' + str(share_number)) return response.status_code == 204 def delete_smb_share(self, share_name): url = self.host_url + '/platform/1/protocols/smb/shares/' + share_name response = self.send_delete_request(url) return response.status_code == 204 def delete_snapshot(self, snapshot_name): response = self.send_delete_request( '{0}/platform/1/snapshot/snapshots/{1}' .format(self.host_url, snapshot_name)) return response.status_code == 204 def quota_create(self, path, quota_type, size): thresholds = {'hard': size} if self.threshold_limit > 0: advisory_size = round((size * self.threshold_limit) / 100) thresholds['advisory'] = int(advisory_size) data = { 'path': path, 'type': quota_type, 'include_snapshots': False, 'thresholds_include_overhead': False, 'enforced': True, 'thresholds': thresholds, } response = self.send_post_request( '{0}/platform/1/quota/quotas'.format(self.host_url), data=data) response.raise_for_status() def quota_get(self, path, quota_type): response = self.send_get_request( '{0}/platform/1/quota/quotas?path={1}'.format(self.host_url, path), ) if response.status_code == 404: return None elif response.status_code != 200: response.raise_for_status() json = response.json() len_returned_quotas = len(json['quotas']) if len_returned_quotas == 0: return None elif len_returned_quotas == 1: return json['quotas'][0] else: message = (_('Greater than one quota returned when querying ' 'quotas associated with share path: %(path)s .') % {'path': path}) raise exception.ShareBackendException(msg=message) def quota_modify_size(self, quota_id, new_size): data = {'thresholds': {'hard': new_size}} if self.threshold_limit > 0: advisory_size = round((new_size * self.threshold_limit) / 100) data.get('thresholds')['advisory'] = int(advisory_size) response = self.send_put_request( '{0}/platform/1/quota/quotas/{1}'.format(self.host_url, quota_id), data=data ) response.raise_for_status() def quota_set(self, path, quota_type, size): """Sets a quota of the given type and size on the given path.""" quota_json = self.quota_get(path, quota_type) if quota_json is None: self.quota_create(path, quota_type, size) else: # quota already exists, modify it's size quota_id = quota_json['id'] self.quota_modify_size(quota_id, size) def delete_quota(self, quota_id): response = self.send_delete_request( '{0}/platform/1/quota/quotas/{1}'.format(self.host_url, quota_id)) return response.status_code == 204 def modify_smb_share_access(self, share_name, host_acl=None, permissions=None): """Modifies SMB share access :param share_name: the name of the SMB share :param host_acl: host access control list :param permissions: SMB permissions :return: "True" if access updated successfully; otherwise "False" """ data = {} if host_acl is not None: data['host_acl'] = host_acl if permissions is not None: data['permissions'] = permissions url = ('{0}/platform/1/protocols/smb/shares/{1}' .format(self.host_url, share_name)) r = self.send_put_request(url, data=data) return r.status_code == 204 def get_user_sid(self, user): user_json = self.auth_lookup_user(user) if user_json: auth_mappings = user_json['mapping'] if len(auth_mappings) > 1: message = (_('More than one mapping found for user "%(user)s".' ) % {'user': user}) LOG.error(message) return None user_sid = auth_mappings[0]['user']['sid'] return user_sid def auth_lookup_user(self, user_string): url = '{0}/platform/1/auth/mapping/users/lookup'.format(self.host_url) r = self.send_get_request(url, params={"user": user_string}) if r.status_code == 200: return r.json() LOG.error(f'Failed to lookup user {user_string}.') def get_space_stats(self): url = '{0}/platform/1/statistics/current'.format(self.host_url) params = {'keys': 'ifs.bytes.free,ifs.bytes.total,ifs.bytes.used'} r = self.send_get_request(url, params=params) if r.status_code != 200: raise exception.ShareBackendException( msg=_('Failed to get statistics from PowerScale.') ) stats = r.json()['stats'] spaces = {} for stat in stats: if stat['key'] == 'ifs.bytes.total': spaces['total'] = stat['value'] elif stat['key'] == 'ifs.bytes.free': spaces['free'] = stat['value'] elif stat['key'] == 'ifs.bytes.used': spaces['used'] = stat['value'] return spaces def get_allocated_space(self): url = '{0}/platform/1/quota/quotas'.format(self.host_url) r = self.send_get_request(url) allocated_capacity = 0 if r.status_code != 200: raise exception.ShareBackendException( msg=_('Failed to get share quotas from PowerScale.') ) quotas = r.json()['quotas'] for quota in quotas: if quota['thresholds']['hard'] is not None: allocated_capacity += quota['thresholds']['hard'] if allocated_capacity > 0: return round(allocated_capacity / (1024 ** 3), 2) return allocated_capacity def get_cluster_version(self): url = '{0}/platform/12/cluster/version'.format(self.host_url) r = self.send_get_request(url) if r.status_code != 200: raise exception.ShareBackendException( msg=_('Failed to get cluster version from PowerScale.') ) return r.json()['nodes'][0]['release'] def request(self, method, url, headers=None, data=None, params=None): if data is not None: data = jsonutils.dumps(data) cookies = {'isisessid': self.session_token} csrf_headers = {'X-CSRF-Token': self.csrf_token, 'referer': self.host_url} if headers: headers.update(csrf_headers) else: headers = csrf_headers self._log_request(method, url, data, params) r = self.session.request( method, url, cookies=cookies, headers=headers, data=data, verify=self._verify_cert, params=params) self._log_response(r) # Unauthorized, login again if r.status_code == 401: login = self.create_session(self.username, self.password) # Resend the request once login is successful if login: self._log_request(method, url, data, params) r = self.session.request( method, url, cookies=cookies, headers=headers, data=data, verify=self._verify_cert, params=params) self._log_response(r) return r def _log_request(self, method, url, data=None, params=None): req_dict = {} if data: req_dict['data'] = data if params: req_dict['params'] = params if req_dict: LOG.debug(f'Request: {method} {url} {req_dict}') else: LOG.debug(f'Request: {method} {url}') def _log_response(self, r): try: body = r.json() except requests.exceptions.JSONDecodeError: body = r.text LOG.debug(f'Response: status_code={r.status_code} body={body}') send_get_request = functools.partialmethod(request, "GET") send_post_request = functools.partialmethod(request, "POST") send_put_request = functools.partialmethod(request, "PUT") send_delete_request = functools.partialmethod(request, "DELETE") send_head_request = functools.partialmethod(request, "HEAD") class SmbPermission(enum.Enum): full = 'full' rw = 'change' ro = 'read' ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315601.9096718 manila-21.0.0/manila/share/drivers/dell_emc/plugins/powerstore/0000775000175000017500000000000000000000000024535 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/dell_emc/plugins/powerstore/__init__.py0000664000175000017500000000000000000000000026634 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/dell_emc/plugins/powerstore/client.py0000664000175000017500000003512300000000000026371 0ustar00zuulzuul00000000000000# Copyright (c) 2023 Dell Inc. or its subsidiaries. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """REST client for Dell EMC PowerStore Manila Driver.""" import functools import json from oslo_log import log as logging from oslo_utils import strutils import requests LOG = logging.getLogger(__name__) class PowerStoreClient(object): def __init__(self, rest_ip, rest_username, rest_password, verify_certificate=False, certificate_path=None): self.rest_ip = rest_ip self.rest_username = rest_username self.rest_password = rest_password self.verify_certificate = verify_certificate self.certificate_path = certificate_path self.base_url = "https://%s/api/rest" % self.rest_ip self.ok_codes = [ requests.codes.ok, requests.codes.created, requests.codes.accepted, requests.codes.no_content, requests.codes.partial_content ] @property def _verify_cert(self): verify_cert = self.verify_certificate if self.verify_certificate and self.certificate_path: verify_cert = self.certificate_path return verify_cert def _send_request(self, method, url, payload=None, params=None, log_response_data=True): if not params: params = {} request_params = { "auth": (self.rest_username, self.rest_password), "verify": self._verify_cert, "params": params } if payload and method != "GET": request_params["data"] = json.dumps(payload) request_url = self.base_url + url r = requests.request(method, request_url, **request_params) log_level = logging.DEBUG if r.status_code not in self.ok_codes: log_level = logging.ERROR LOG.log(log_level, "REST Request: %s %s with body %s", r.request.method, r.request.url, strutils.mask_password(r.request.body)) if log_response_data or log_level == logging.ERROR: msg = "REST Response: %s with data %s" % (r.status_code, r.text) else: msg = "REST Response: %s" % r.status_code LOG.log(log_level, msg) try: response = r.json() except ValueError: response = None return r, response _send_get_request = functools.partialmethod(_send_request, "GET") _send_post_request = functools.partialmethod(_send_request, "POST") _send_patch_request = functools.partialmethod(_send_request, "PATCH") _send_delete_request = functools.partialmethod(_send_request, "DELETE") def get_nas_server_id(self, nas_server_name): """Retrieves the NAS server ID. :param nas_server_name: NAS server name :return: ID of the NAS server if success """ url = '/nas_server?name=eq.' + nas_server_name res, response = self._send_get_request(url) if res.status_code == requests.codes.ok: return response[0]['id'] def get_nas_server_interfaces(self, nas_server_id): """Retrieves the NAS server ID. :param nas_server_id: NAS server ID :return: File interfaces of the NAS server if success """ url = '/nas_server/' + nas_server_id + \ '?select=current_preferred_IPv4_interface_id,' \ 'current_preferred_IPv6_interface_id,' \ 'file_interfaces(id,ip_address)' res, response = self._send_get_request(url) if res.status_code == requests.codes.ok: preferred_IP = [response['current_preferred_IPv4_interface_id'], response['current_preferred_IPv6_interface_id']] file_interfaces = [] for i in response['file_interfaces']: file_interfaces.append({ 'ip': i['ip_address'], 'preferred': i['id'] in preferred_IP }) return file_interfaces def create_filesystem(self, nas_server_id, name, size): """Creates a filesystem. :param nas_server_id: ID of the nas_server :param name: name of the filesystem :param size: size in Byte :return: ID of the filesystem if created successfully """ payload = { "name": name, "size_total": size, "nas_server_id": nas_server_id } url = '/file_system' res, response = self._send_post_request(url, payload) if res.status_code == requests.codes.created: return response["id"] def create_nfs_export(self, filesystem_id, name): """Creates an NFS export. :param filesystem_id: ID of the filesystem on which the export will be created :param name: name of the NFS export :return: ID of the export if created successfully """ payload = { "file_system_id": filesystem_id, "path": "/" + str(name), "name": name } url = '/nfs_export' res, response = self._send_post_request(url, payload) if res.status_code == requests.codes.created: return response["id"] def delete_filesystem(self, filesystem_id): """Deletes a filesystem and all associated export. :param filesystem_id: ID of the filesystem to delete :return: True if deleted successfully """ url = '/file_system/' + filesystem_id res, _ = self._send_delete_request(url) return res.status_code == requests.codes.no_content def get_nfs_export_name(self, export_id): """Retrieves NFS Export name. :param export_id: ID of the NFS export :return: path of the NFS export if success """ url = '/nfs_export/' + export_id + '?select=name' res, response = self._send_get_request(url) if res.status_code == requests.codes.ok: return response["name"] def get_nfs_export_id(self, name): """Retrieves NFS Export ID. :param name: name of the NFS export :return: id of the NFS export if success """ url = '/nfs_export?select=id&name=eq.' + name res, response = self._send_get_request(url) if res.status_code == requests.codes.ok: return response[0]['id'] def get_filesystem_id(self, name): """Retrieves an ID for a filesystem. :param name: name of the filesystem :return: ID of the filesystem if success """ url = '/file_system?name=eq.' + name res, response = self._send_get_request(url) if res.status_code == requests.codes.ok: return response[0]['id'] def set_export_access(self, export_id, rw_hosts, ro_hosts): """Sets the access hosts on the export. :param export_id: NFS export ID :param rw_hosts: a set of RW hosts :param ro_hosts: a set of RO hosts :return: True if operation succeeded """ payload = { "read_only_hosts": list(ro_hosts), "read_write_root_hosts": list(rw_hosts) } url = '/nfs_export/' + export_id res, _ = self._send_patch_request(url, payload) return res.status_code == requests.codes.no_content def resize_filesystem(self, filesystem_id, new_size): """Extends the size of a share to a new size. :param export_id: ID of the NFS export :param new_size: new size to allocate in bytes :return: True if extended successfully """ payload = { "size_total": new_size } url = '/file_system/' + filesystem_id res, response = self._send_patch_request(url, payload) if res.status_code == requests.codes.unprocessable and \ response['messages'][0]['code'] == '0xE08010080449': return False, response['messages'][0]['message_l10n'] return res.status_code == requests.codes.no_content, None def get_fsid_from_export_name(self, name): """Retieves the Filesystem ID used by an export. :param name: name of the export :return: ID of the Filesystem which owns the export """ url = '/nfs_export?select=file_system_id&name=eq.' + name res, response = self._send_get_request(url) if res.status_code == requests.codes.ok: return response[0]['file_system_id'] def create_snapshot(self, filesystem_id, name): """Creates a snapshot of a filesystem. :param filesystem_id: ID of the filesystem :param name: name of the snapshot :return: ID of the snapshot if created successfully """ payload = { "name": name } url = '/file_system/' + filesystem_id + '/snapshot' res, response = self._send_post_request(url, payload) if res.status_code == requests.codes.created: return response["id"] def restore_snapshot(self, snapshot_id): """Restore a snapshot of a filesystem. :param snapshot_id: ID of the snapshot :return: True if operation succeeded """ url = '/file_system/' + snapshot_id + '/restore' res, _ = self._send_post_request(url) return res.status_code == requests.codes.no_content def clone_snapshot(self, snapshot_id, name): """Clone a snapshot of a filesystem. :param snapshot_id: ID of the snapshot :param name: name the snapshot :return: ID of the clone if created successfully """ payload = { "name": name } url = '/file_system/' + snapshot_id + '/clone' res, response = self._send_post_request(url, payload) if res.status_code == requests.codes.created: return response["id"] def get_cluster_id(self): """Get cluster id. :return: ID of the cluster """ url = '/cluster' res, response = self._send_get_request(url) if res.status_code == requests.codes.ok: return response[0]["id"] def retreive_cluster_capacity_metrics(self, cluster_id): """Retreive cluster capacity metrics. :param cluster_id: ID of the cluster :return: total and used capacity in Byte """ payload = { "entity": "space_metrics_by_cluster", "entity_id": cluster_id } url = '/metrics/generate?order=timestamp' # disable logging of the response res, response = self._send_post_request(url, payload, log_response_data=False) if res.status_code == requests.codes.ok: # latest cluster capacity metrics latestMetrics = response[len(response) - 1] LOG.debug(f"Latest cluster capacity: {latestMetrics}") return (latestMetrics["physical_total"], latestMetrics["physical_used"]) return None, None def create_smb_share(self, filesystem_id, name): """Creates a SMB share. :param filesystem_id: ID of the filesystem on which the export will be created :param name: name of the SMB share :return: ID of the share if created successfully """ payload = { "file_system_id": filesystem_id, "path": "/" + str(name), "name": name } url = '/smb_share' res, response = self._send_post_request(url, payload) if res.status_code == requests.codes.created: return response["id"] def get_fsid_from_share_name(self, name): """Retieves the Filesystem ID used by a SMB share. :param name: name of the SMB share :return: ID of the Filesystem which owns the share """ url = '/smb_share?select=file_system_id&name=eq.' + name res, response = self._send_get_request(url) if res.status_code == requests.codes.ok: return response[0]['file_system_id'] def get_smb_share_id(self, name): """Retrieves SMB share ID. :param name: name of the SMB share :return: id of the SMB share if success """ url = '/smb_share?select=id&name=eq.' + name res, response = self._send_get_request(url) if res.status_code == requests.codes.ok: return response[0]['id'] def get_nas_server_smb_netbios(self, nas_server_name): """Retrieves the domain name or netbios name. :param nas_server_name: NAS server name :return: Netbios name of SMB server if success """ url = '/nas_server?select=smb_servers(is_standalone,netbios_name)' \ '&name=eq.' + nas_server_name res, response = self._send_get_request(url) if res.status_code == requests.codes.ok: smb_server = response[0]['smb_servers'][0] if smb_server["is_standalone"]: return smb_server["netbios_name"] def set_acl(self, smb_share_id, cifs_rw_users, cifs_ro_users): """Set ACL for a SMB share. :param smb_share_id: ID of the SMB share :param name: name of the SMB share :return: ID of the share if created successfully """ aces = list() for rw_user in cifs_rw_users: ace = { "trustee_type": "User", "trustee_name": rw_user, "access_level": "Change", "access_type": "Allow" } aces.append(ace) for ro_user in cifs_ro_users: ace = { "trustee_type": "User", "trustee_name": ro_user, "access_level": "Read", "access_type": "Allow" } aces.append(ace) payload = { "aces": aces } url = '/smb_share/' + smb_share_id + '/set_acl' res, _ = self._send_post_request(url, payload) return res.status_code == requests.codes.no_content ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/dell_emc/plugins/powerstore/connection.py0000664000175000017500000005377100000000000027263 0ustar00zuulzuul00000000000000# Copyright (c) 2023 Dell Inc. or its subsidiaries. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ PowerStore specific NAS backend plugin. """ from oslo_config import cfg from oslo_log import log from oslo_utils import units from manila.common import constants as const from manila import exception from manila.i18n import _ from manila.share.drivers.dell_emc.plugins import base as driver from manila.share.drivers.dell_emc.plugins.powerstore import client """Version history: 1.0 - Initial version """ VERSION = "1.0" CONF = cfg.CONF LOG = log.getLogger(__name__) POWERSTORE_OPTS = [ cfg.StrOpt('dell_nas_backend_host', help='Dell NAS backend hostname or IP address.'), cfg.StrOpt('dell_nas_server', help='Root directory or NAS server which owns the shares.'), cfg.StrOpt('dell_ad_domain', help='Domain name of the active directory ' 'joined by the NAS server.'), cfg.StrOpt('dell_nas_login', help='User name for the Dell NAS backend.'), cfg.StrOpt('dell_nas_password', secret=True, help='Password for the Dell NAS backend.'), cfg.BoolOpt('dell_ssl_cert_verify', default=False, help='If set to False the https client will not validate the ' 'SSL certificate of the backend endpoint.'), cfg.StrOpt('dell_ssl_cert_path', help='Can be used to specify a non default path to a ' 'CA_BUNDLE file or directory with certificates of trusted ' 'CAs, which will be used to validate the backend.') ] class PowerStoreStorageConnection(driver.StorageConnection): """Implements PowerStore specific functionality for Dell Manila driver.""" def __init__(self, *args, **kwargs): """Do initialization""" LOG.debug('Invoking base constructor for Manila' ' Dell PowerStore Driver.') super(PowerStoreStorageConnection, self).__init__(*args, **kwargs) LOG.debug('Setting up attributes for Manila' ' Dell PowerStore Driver.') if 'configuration' in kwargs: kwargs['configuration'].append_config_values(POWERSTORE_OPTS) self.client = None self.verify_certificate = None self.certificate_path = None self.ipv6_implemented = True self.revert_to_snap_support = True self.shrink_share_support = True # props from super class self.driver_handles_share_servers = False # props for share status update self.reserved_percentage = None self.reserved_snapshot_percentage = None self.reserved_share_extend_percentage = None self.max_over_subscription_ratio = None def connect(self, dell_share_driver, context): """Connects to Dell PowerStore""" LOG.debug('Reading configuration parameters for Manila' ' Dell PowerStore Driver.') config = dell_share_driver.configuration get_config_value = config.safe_get self.rest_ip = get_config_value("dell_nas_backend_host") self.rest_username = get_config_value("dell_nas_login") self.rest_password = get_config_value("dell_nas_password") # validate IP, username and password if not all([self.rest_ip, self.rest_username, self.rest_password]): message = _("REST server IP, username and password" " must be specified.") raise exception.BadConfigurationException(reason=message) self.nas_server = get_config_value("dell_nas_server") self.ad_domain = get_config_value("dell_ad_domain") self.verify_certificate = (get_config_value("dell_ssl_cert_verify") or False) if self.verify_certificate: self.certificate_path = get_config_value( "dell_ssl_cert_path") LOG.debug('Initializing Dell PowerStore REST Client.') LOG.info("REST server IP: %(ip)s, username: %(user)s. " "Verify server's certificate: %(verify_cert)s.", { "ip": self.rest_ip, "user": self.rest_username, "verify_cert": self.verify_certificate, }) self.client = client.PowerStoreClient(self.rest_ip, self.rest_username, self.rest_password, self.verify_certificate, self.certificate_path) # configuration for share status update self.reserved_percentage = config.safe_get( 'reserved_share_percentage') if self.reserved_percentage is None: self.reserved_percentage = 0 self.reserved_snapshot_percentage = config.safe_get( 'reserved_share_from_snapshot_percentage') if self.reserved_snapshot_percentage is None: self.reserved_snapshot_percentage = self.reserved_percentage self.reserved_share_extend_percentage = config.safe_get( 'reserved_share_extend_percentage') if self.reserved_share_extend_percentage is None: self.reserved_share_extend_percentage = self.reserved_percentage self.max_over_subscription_ratio = config.safe_get( 'max_over_subscription_ratio') def create_share(self, context, share, share_server): """Is called to create a share.""" LOG.debug(f'Creating {share["share_proto"]} share.') locations = self._create_share(share) return locations def _create_share(self, share): """Creates a NFS or SMB share. In PowerStore, an export (share) belongs to a filesystem. This function creates a filesystem and an export. """ share_name = share['name'] size_in_bytes = share['size'] * units.Gi # create a filesystem nas_server_id = self.client.get_nas_server_id(self.nas_server) LOG.debug(f"Creating filesystem {share_name}") filesystem_id = self.client.create_filesystem(nas_server_id, share_name, size_in_bytes) if not filesystem_id: message = { _('The filesystem "%(export)s" was not created.') % {'export': share_name}} LOG.error(message) raise exception.ShareBackendException(msg=message) # create a share locations = self._create_share_NFS_CIFS(nas_server_id, filesystem_id, share_name, share['share_proto'].upper()) return locations def _create_share_NFS_CIFS(self, nas_server_id, filesystem_id, share_name, protocol): LOG.debug(f"Get file interfaces of {nas_server_id}") file_interfaces = self.client.get_nas_server_interfaces( nas_server_id) LOG.debug(f"Creating {protocol} export {share_name}") if protocol == 'NFS': export_id = self.client.create_nfs_export(filesystem_id, share_name) if not export_id: message = ( _('The requested NFS export "%(export)s"' ' was not created.') % {'export': share_name}) LOG.error(message) raise exception.ShareBackendException(msg=message) locations = self._get_nfs_location(file_interfaces, share_name) elif protocol == 'CIFS': export_id = self.client.create_smb_share(filesystem_id, share_name) if not export_id: message = ( _('The requested SMB share "%(export)s"' ' was not created.') % {'export': share_name}) LOG.error(message) raise exception.ShareBackendException(msg=message) locations = self._get_cifs_location(file_interfaces, share_name) return locations def _get_nfs_location(self, file_interfaces, share_name): export_locations = [] for interface in file_interfaces: export_locations.append( {'path': f"{interface['ip']}:/{share_name}", 'metadata': { 'preferred': interface['preferred'] } }) return export_locations def _get_cifs_location(self, file_interfaces, share_name): export_locations = [] for interface in file_interfaces: export_locations.append( {'path': f"\\\\{interface['ip']}\\{share_name}", 'metadata': { 'preferred': interface['preferred'] } }) return export_locations def delete_share(self, context, share, share_server): """Is called to delete a share.""" LOG.debug(f'Deleting {share["share_proto"]} share.') self._delete_share(share) def _delete_share(self, share): """Deletes a filesystem and its associated export.""" LOG.debug(f"Retrieving filesystem ID for filesystem {share['name']}") filesystem_id = self.client.get_filesystem_id(share['name']) if not filesystem_id: LOG.warning( f'Filesystem with share name {share["name"]} is not found.') else: LOG.debug(f"Deleting filesystem ID {filesystem_id}") share_deleted = self.client.delete_filesystem(filesystem_id) if not share_deleted: message = ( _('Failed to delete share "%(export)s".') % {'export': share['name']}) LOG.error(message) raise exception.ShareBackendException(msg=message) def extend_share(self, share, new_size, share_server): """Is called to extend a share.""" LOG.debug(f"Extending {share['name']} to {new_size}GiB") self._resize_filesystem(share, new_size) def shrink_share(self, share, new_size, share_server): """Is called to shrink a share.""" LOG.debug(f"Shrinking {share['name']} to {new_size}GiB") self._resize_filesystem(share, new_size) def _resize_filesystem(self, share, new_size): """Is called to resize a filesystem""" # Converts the size from GiB to Bytes new_size_in_bytes = new_size * units.Gi filesystem_id = self.client.get_filesystem_id(share['name']) is_success, detail = self.client.resize_filesystem(filesystem_id, new_size_in_bytes) if not is_success: message = (_('Failed to resize share "%(export)s".') % {'export': share['name']}) LOG.error(message) if detail: raise exception.ShareShrinkingPossibleDataLoss( share_id=share['id']) raise exception.ShareBackendException(msg=message) def allow_access(self, context, share, access, share_server): """Allow access to the share.""" raise NotImplementedError() def deny_access(self, context, share, access, share_server): """Deny access to the share.""" raise NotImplementedError() def update_access(self, context, share, access_rules, add_rules, delete_rules, share_server=None): """Is called to update share access.""" protocol = share['share_proto'].upper() LOG.debug(f'Updating access to {protocol} share.') if protocol == 'NFS': return self._update_nfs_access(share, access_rules) elif protocol == 'CIFS': return self._update_cifs_access(share, access_rules) def _update_nfs_access(self, share, access_rules): """Updates access rules for NFS share type.""" nfs_rw_ips = set() nfs_ro_ips = set() access_updates = {} for rule in access_rules: if rule['access_type'].lower() != 'ip': message = (_("Only IP access type currently supported for " "NFS. Share provided %(share)s with rule type " "%(type)s") % {'share': share['display_name'], 'type': rule['access_type']}) LOG.error(message) access_updates.update({rule['access_id']: {'state': 'error'}}) else: if rule['access_level'] == const.ACCESS_LEVEL_RW: nfs_rw_ips.add(rule['access_to']) elif rule['access_level'] == const.ACCESS_LEVEL_RO: nfs_ro_ips.add(rule['access_to']) access_updates.update({rule['access_id']: {'state': 'active'}}) share_id = self.client.get_nfs_export_id(share['name']) share_updated = self.client.set_export_access(share_id, nfs_rw_ips, nfs_ro_ips) if not share_updated: message = ( _('Failed to update NFS access rules for "%(export)s".') % {'export': share['display_name']}) LOG.error(message) raise exception.ShareBackendException(msg=message) return access_updates def _update_cifs_access(self, share, access_rules): """Updates access rules for CIFS share type.""" cifs_rw_users = set() cifs_ro_users = set() access_updates = {} for rule in access_rules: if rule['access_type'].lower() != 'user': message = (_("Only user access type currently supported for " "CIFS. Share provided %(share)s with rule type " "%(type)s") % {'share': share['display_name'], 'type': rule['access_type']}) LOG.error(message) access_updates.update({rule['access_id']: {'state': 'error'}}) else: prefix = ( self.ad_domain or self.client.get_nas_server_smb_netbios(self.nas_server) ) if not prefix: message = ( _('Failed to get daomain/netbios name of ' '"%(nas_server)s".' ) % {'nas_server': self.nas_server}) LOG.error(message) access_updates.update({rule['access_id']: {'state': 'error'}}) continue prefix = prefix + '\\' if rule['access_level'] == const.ACCESS_LEVEL_RW: cifs_rw_users.add(prefix + rule['access_to']) elif rule['access_level'] == const.ACCESS_LEVEL_RO: cifs_ro_users.add(prefix + rule['access_to']) access_updates.update({rule['access_id']: {'state': 'active'}}) share_id = self.client.get_smb_share_id(share['name']) share_updated = self.client.set_acl(share_id, cifs_rw_users, cifs_ro_users) if not share_updated: message = ( _('Failed to update NFS access rules for "%(export)s".') % {'export': share['display_name']}) LOG.error(message) raise exception.ShareBackendException(msg=message) return access_updates def update_share_stats(self, stats_dict): """Retrieve stats info from share.""" stats_dict['driver_version'] = VERSION stats_dict['storage_protocol'] = 'NFS_CIFS' stats_dict['reserved_percentage'] = self.reserved_percentage stats_dict['reserved_snapshot_percentage'] = ( self.reserved_snapshot_percentage) stats_dict['reserved_share_extend_percentage'] = ( self.reserved_share_extend_percentage) stats_dict['max_over_subscription_ratio'] = ( self.max_over_subscription_ratio) cluster_id = self.client.get_cluster_id() total, used = self.client.retreive_cluster_capacity_metrics(cluster_id) if total and used: free = total - used stats_dict['total_capacity_gb'] = total // units.Gi stats_dict['free_capacity_gb'] = free // units.Gi def create_snapshot(self, context, snapshot, share_server): """Is called to create snapshot.""" export_name = snapshot['share_name'] LOG.debug(f'Retrieving filesystem ID for share {export_name}') filesystem_id = self.client.get_filesystem_id(export_name) if not filesystem_id: message = ( _('Failed to get filesystem id for export "%(export)s".') % {'export': export_name}) LOG.error(message) raise exception.ShareBackendException(msg=message) snapshot_name = snapshot['name'] LOG.debug( f'Creating snapshot {snapshot_name} for filesystem {filesystem_id}' ) snapshot_id = self.client.create_snapshot(filesystem_id, snapshot_name) if not snapshot_id: message = ( _('Failed to create snapshot "%(snapshot)s".') % {'snapshot': snapshot_name}) LOG.error(message) raise exception.ShareBackendException(msg=message) else: LOG.info("Snapshot %(snapshot)s successfully created.", {'snapshot': snapshot_name}) def delete_snapshot(self, context, snapshot, share_server): """Is called to delete snapshot.""" snapshot_name = snapshot['name'] LOG.debug(f'Retrieving filesystem ID for snapshot {snapshot_name}') filesystem_id = self.client.get_filesystem_id(snapshot_name) LOG.debug(f'Deleting filesystem ID {filesystem_id}') snapshot_deleted = self.client.delete_filesystem(filesystem_id) if not snapshot_deleted: message = ( _('Failed to delete snapshot "%(snapshot)s".') % {'snapshot': snapshot_name}) LOG.error(message) raise exception.ShareBackendException(msg=message) else: LOG.info("Snapshot %(snapshot)s successfully deleted.", {'snapshot': snapshot_name}) def revert_to_snapshot(self, context, snapshot, share_access_rules, snapshot_access_rules, share_server=None): """Reverts a share (in place) to the specified snapshot.""" snapshot_name = snapshot['name'] snapshot_id = self.client.get_filesystem_id(snapshot_name) snapshot_restored = self.client.restore_snapshot(snapshot_id) if not snapshot_restored: message = ( _('Failed to restore snapshot "%(snapshot)s".') % {'snapshot': snapshot_name}) LOG.error(message) raise exception.ShareBackendException(msg=message) else: LOG.info("Snapshot %(snapshot)s successfully restored.", {'snapshot': snapshot_name}) def create_share_from_snapshot(self, context, share, snapshot, share_server=None, parent_share=None): """Create a share from a snapshot - clone a snapshot.""" LOG.debug(f'Creating {share["share_proto"]} share.') locations = self._create_share_from_snapshot(share, snapshot) if share['size'] != snapshot['size']: LOG.debug(f"Resizing {share['name']} to {share['size']}GiB") self._resize_filesystem(share, share['size']) return locations def _create_share_from_snapshot(self, share, snapshot): LOG.debug(f"Retrieving snapshot id of snapshot {snapshot['name']}") snapshot_id = self.client.get_filesystem_id(snapshot['name']) share_name = share['name'] LOG.debug( f"Cloning filesystem {share_name} from snapshot {snapshot_id}" ) filesystem_id = self.client.clone_snapshot(snapshot_id, share_name) if not filesystem_id: message = { _('The filesystem "%(export)s" was not created.') % {'export': share_name}} LOG.error(message) raise exception.ShareBackendException(msg=message) # create a share nas_server_id = self.client.get_nas_server_id(self.nas_server) locations = self._create_share_NFS_CIFS(nas_server_id, filesystem_id, share_name, share['share_proto'].upper()) return locations def ensure_share(self, context, share, share_server): """Invoked to ensure that share is exported.""" def setup_server(self, network_info, metadata=None): """Set up and configures share server with given network parameters.""" def teardown_server(self, server_details, security_services=None): """Teardown share server.""" def check_for_setup_error(self): """Is called to check for setup error.""" def get_default_filter_function(self): return 'share.size >= 3' ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315601.9096718 manila-21.0.0/manila/share/drivers/dell_emc/plugins/unity/0000775000175000017500000000000000000000000023474 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/dell_emc/plugins/unity/__init__.py0000664000175000017500000000000000000000000025573 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/dell_emc/plugins/unity/client.py0000664000175000017500000003246300000000000025334 0ustar00zuulzuul00000000000000# Copyright (c) 2016 EMC Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log from oslo_utils import excutils from oslo_utils import importutils storops = importutils.try_import('storops') if storops: # pylint: disable=import-error from storops import exception as storops_ex from storops.unity import enums from manila.common import constants as const from manila import exception from manila.i18n import _ from manila.share.drivers.dell_emc.common.enas import utils as enas_utils from manila.share.drivers.dell_emc.plugins.unity import utils LOG = log.getLogger(__name__) class UnityClient(object): def __init__(self, host, username, password): if storops is None: LOG.error('StorOps is required to run EMC Unity driver.') self.system = storops.UnitySystem(host, username, password) def create_cifs_share(self, resource, share_name): """Create CIFS share from the resource. :param resource: either UnityFilesystem or UnitySnap object :param share_name: CIFS share name :return: UnityCifsShare object """ try: return resource.create_cifs_share(share_name) except storops_ex.UnitySmbShareNameExistedError: return self.get_share(share_name, 'CIFS') def create_nfs_share(self, resource, share_name): """Create NFS share from the resource. :param resource: either UnityFilesystem or UnitySnap object :param share_name: NFS share name :return: UnityNfsShare object """ try: return resource.create_nfs_share(share_name) except storops_ex.UnityNfsShareNameExistedError: return self.get_share(share_name, 'NFS') def create_nfs_filesystem_and_share(self, pool, nas_server, share_name, size_gb): """Create filesystem and share from pool/NAS server. :param pool: pool for file system creation :param nas_server: nas server for file system creation :param share_name: file system and share name :param size_gb: file system size """ size = utils.gib_to_byte(size_gb) pool.create_nfs_share( nas_server, share_name, size, user_cap=True) def get_share(self, name, share_proto): # Validate the share protocol proto = share_proto.upper() if proto == 'CIFS': return self.system.get_cifs_share(name=name) elif proto == 'NFS': return self.system.get_nfs_share(name=name) else: raise exception.BadConfigurationException( reason=_('Invalid NAS protocol supplied: %s.') % share_proto) @staticmethod def delete_share(share): share.delete() def create_filesystem(self, pool, nas_server, share_name, size_gb, proto): try: size = utils.gib_to_byte(size_gb) return pool.create_filesystem(nas_server, share_name, size, proto=proto, user_cap=True) except storops_ex.UnityFileSystemNameAlreadyExisted: LOG.debug('Filesystem %s already exists, ' 'ignoring filesystem creation.', share_name) return self.system.get_filesystem(name=share_name) @staticmethod def delete_filesystem(filesystem): try: filesystem.delete() except storops_ex.UnityResourceNotFoundError: LOG.info('Filesystem %s is already removed.', filesystem.name) def create_nas_server(self, name, sp, pool, tenant=None): try: return self.system.create_nas_server(name, sp, pool, tenant=tenant) except storops_ex.UnityNasServerNameUsedError: LOG.info('Share server %s already exists, ignoring share ' 'server creation.', name) return self.get_nas_server(name) def get_nas_server(self, name): try: return self.system.get_nas_server(name=name) except storops_ex.UnityResourceNotFoundError: LOG.info('NAS server %s not found.', name) raise def delete_nas_server(self, name, username=None, password=None): tenant = None try: nas_server = self.get_nas_server(name=name) tenant = nas_server.tenant nas_server.delete(username=username, password=password) except storops_ex.UnityResourceNotFoundError: LOG.info('NAS server %s not found.', name) if tenant is not None: self._delete_tenant(tenant) @staticmethod def _delete_tenant(tenant): if tenant.nas_servers: LOG.debug('There are NAS servers belonging to the tenant %s. ' 'Do not delete it.', tenant.get_id()) return try: tenant.delete(delete_hosts=True) except storops_ex.UnityException as ex: LOG.warning('Delete tenant %(tenant)s failed with error: ' '%(ex)s. Leave the tenant on the system.', {'tenant': tenant.get_id(), 'ex': ex}) @staticmethod def create_dns_server(nas_server, domain, dns_ip): try: nas_server.create_dns_server(domain, dns_ip) except storops_ex.UnityOneDnsPerNasServerError: LOG.info('DNS server %s already exists, ' 'ignoring DNS server creation.', domain) @staticmethod def create_interface(nas_server, ip_addr, netmask, gateway, port_id, vlan_id=None, prefix_length=None): try: nas_server.create_file_interface(port_id, ip_addr, netmask=netmask, v6_prefix_length=prefix_length, gateway=gateway, vlan_id=vlan_id) except storops_ex.UnityIpAddressUsedError: raise exception.IPAddressInUse(ip=ip_addr) @staticmethod def enable_cifs_service(nas_server, domain, username, password): try: nas_server.enable_cifs_service( nas_server.file_interface, domain=domain, domain_username=username, domain_password=password) except storops_ex.UnitySmbNameInUseError: LOG.info('CIFS service on NAS server %s is ' 'already enabled.', nas_server.name) @staticmethod def enable_nfs_service(nas_server): try: nas_server.enable_nfs_service() except storops_ex.UnityNfsAlreadyEnabledError: LOG.info('NFS service on NAS server %s is ' 'already enabled.', nas_server.name) @staticmethod def create_snapshot(filesystem, name): access_type = enums.FilesystemSnapAccessTypeEnum.CHECKPOINT try: return filesystem.create_snap(name, fs_access_type=access_type) except storops_ex.UnitySnapNameInUseError: LOG.info('Snapshot %(snap)s on Filesystem %(fs)s already ' 'exists.', {'snap': name, 'fs': filesystem.name}) def create_snap_of_snap(self, src_snap, dst_snap_name): if isinstance(src_snap, str): snap = self.get_snapshot(name=src_snap) else: snap = src_snap try: return snap.create_snap(dst_snap_name) except storops_ex.UnitySnapNameInUseError: return self.get_snapshot(dst_snap_name) def get_snapshot(self, name): return self.system.get_snap(name=name) @staticmethod def delete_snapshot(snap): try: snap.delete() except storops_ex.UnityResourceNotFoundError: LOG.info('Snapshot %s is already removed.', snap.name) def get_pool(self, name=None): return self.system.get_pool(name=name) def get_storage_processor(self, sp_id=None): sp = self.system.get_sp(sp_id) if sp_id is None: # `sp` is a list of SPA and SPB. return [s for s in sp if s is not None and s.existed] else: return sp if sp.existed else None def cifs_clear_access(self, share_name, white_list=None): share = self.system.get_cifs_share(name=share_name) share.clear_access(white_list) def nfs_clear_access(self, share_name, white_list=None): share = self.system.get_nfs_share(name=share_name) share.clear_access(white_list, force_create_host=True) def cifs_allow_access(self, share_name, user_name, access_level): share = self.system.get_cifs_share(name=share_name) if access_level == const.ACCESS_LEVEL_RW: cifs_access = enums.ACEAccessLevelEnum.WRITE else: cifs_access = enums.ACEAccessLevelEnum.READ share.add_ace(user=user_name, access_level=cifs_access) def nfs_allow_access(self, share_name, host_ip, access_level): share = self.system.get_nfs_share(name=share_name) host_ip = enas_utils.convert_ipv6_format_if_needed(host_ip) if access_level == const.ACCESS_LEVEL_RW: share.allow_read_write_access(host_ip, force_create_host=True) share.allow_root_access(host_ip, force_create_host=True) else: share.allow_read_only_access(host_ip, force_create_host=True) def cifs_deny_access(self, share_name, user_name): share = self.system.get_cifs_share(name=share_name) try: share.delete_ace(user=user_name) except storops_ex.UnityAclUserNotFoundError: LOG.debug('ACL User "%(user)s" does not exist.', {'user': user_name}) def nfs_deny_access(self, share_name, host_ip): share = self.system.get_nfs_share(name=share_name) try: share.delete_access(host_ip) except storops_ex.UnityHostNotFoundException: LOG.info('%(host)s access to %(share)s is already removed.', {'host': host_ip, 'share': share_name}) def get_file_ports(self): ports = self.system.get_file_port() link_up_ports = [] for port in ports: if port.is_link_up and self._is_external_port(port.id): link_up_ports.append(port) return link_up_ports def extend_filesystem(self, fs, new_size_gb): size = utils.gib_to_byte(new_size_gb) try: fs.extend(size, user_cap=True) except storops_ex.UnityNothingToModifyError: LOG.debug('The size of the file system %(id)s is %(size)s ' 'bytes.', {'id': fs.get_id(), 'size': size}) return size def shrink_filesystem(self, share_id, fs, new_size_gb): size = utils.gib_to_byte(new_size_gb) try: fs.shrink(size, user_cap=True) except storops_ex.UnityNothingToModifyError: LOG.debug('The size of the file system %(id)s is %(size)s ' 'bytes.', {'id': fs.get_id(), 'size': size}) except storops_ex.UnityShareShrinkSizeTooSmallError: LOG.error('The used size of the file system %(id)s is ' 'bigger than input shrink size,' 'it may cause date loss.', {'id': fs.get_id()}) raise exception.ShareShrinkingPossibleDataLoss(share_id=share_id) return size @staticmethod def _is_external_port(port_id): return 'eth' in port_id or '_la' in port_id def get_tenant(self, name, vlan_id): if not vlan_id: # Do not create vlan for flat network return None tenant = None try: tenant_name = "vlan_%(vlan_id)s_%(name)s" % {'vlan_id': vlan_id, 'name': name} tenant = self.system.create_tenant(tenant_name, vlans=[vlan_id]) except (storops_ex.UnityVLANUsedByOtherTenantError, storops_ex.UnityTenantNameInUseError, storops_ex.UnityVLANAlreadyHasInterfaceError): with excutils.save_and_reraise_exception() as exc: tenant = self.system.get_tenant_use_vlan(vlan_id) if tenant is not None: LOG.debug("The VLAN %s is already added into a tenant. " "Use the existing VLAN tenant.", vlan_id) exc.reraise = False except storops_ex.SystemAPINotSupported: LOG.info("This system doesn't support tenant.") return tenant def restore_snapshot(self, snap_name): snap = self.get_snapshot(snap_name) return snap.restore(delete_backup=True) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/dell_emc/plugins/unity/connection.py0000664000175000017500000011452400000000000026214 0ustar00zuulzuul00000000000000# Copyright (c) 2016 EMC Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Unity backend for the EMC Manila driver.""" import random from oslo_config import cfg from oslo_log import log from oslo_utils import excutils from oslo_utils import importutils from oslo_utils import netutils storops = importutils.try_import('storops') if storops: # pylint: disable=import-error from storops import exception as storops_ex from storops.unity import enums from manila.common import constants as const from manila import exception from manila.i18n import _ from manila.share.drivers.dell_emc.common.enas import utils as enas_utils from manila.share.drivers.dell_emc.plugins import base as driver from manila.share.drivers.dell_emc.plugins.unity import client from manila.share.drivers.dell_emc.plugins.unity import utils as unity_utils from manila.share import utils as share_utils from manila import utils """Version history: 7.0.0 - Supports DHSS=False mode 7.0.1 - Fix parsing management IPv6 address 7.0.2 - Bugfix: failed to delete CIFS share if wrong access was set 8.0.0 - Supports manage/unmanage share server/share/snapshot 9.0.0 - Implements default filter function 9.0.1 - Bugfix: remove enable ace process when creating cifs share 9.0.2 - Bugfix: fix the driver startup issue with LACP ports configured """ VERSION = "9.0.2" LOG = log.getLogger(__name__) SUPPORTED_NETWORK_TYPES = (None, 'flat', 'vlan') UNITY_OPTS = [ cfg.StrOpt('unity_server_meta_pool', required=True, help='Pool to persist the meta-data of NAS server.'), cfg.ListOpt('unity_share_data_pools', help='Comma separated list of pools that can be used to ' 'persist share data.'), cfg.ListOpt('unity_ethernet_ports', help='Comma separated list of ports that can be used for ' 'share server interfaces. Members of the list ' 'can be Unix-style glob expressions.'), cfg.StrOpt('unity_share_server', help='NAS server used for creating share when driver ' 'is in DHSS=False mode. It is required when ' 'driver_handles_share_servers=False in manila.conf.'), cfg.StrOpt('report_default_filter_function', default=False, help='Whether or not report default filter function.'), ] CONF = cfg.CONF CONF.register_opts(UNITY_OPTS) @enas_utils.decorate_all_methods(enas_utils.log_enter_exit, debug_only=True) class UnityStorageConnection(driver.StorageConnection): """Implements Unity specific functionality for EMC Manila driver.""" IP_ALLOCATIONS = 1 @enas_utils.log_enter_exit def __init__(self, *args, **kwargs): super(UnityStorageConnection, self).__init__(*args, **kwargs) if 'configuration' in kwargs: kwargs['configuration'].append_config_values(UNITY_OPTS) self.client = None self.pool_set = None self.nas_server_pool = None self.reserved_percentage = None self.reserved_snapshot_percentage = None self.reserved_share_extend_percentage = None self.max_over_subscription_ratio = None self.port_ids_conf = None self.unity_share_server = None self.ipv6_implemented = True self.revert_to_snap_support = True self.shrink_share_support = True self.manage_existing_support = True self.manage_existing_with_server_support = True self.manage_existing_snapshot_support = True self.manage_snapshot_with_server_support = True self.manage_server_support = True self.get_share_server_network_info_support = True # props from super class. self.driver_handles_share_servers = (True, False) self.dhss_mandatory_security_service_association = { 'nfs': None, 'cifs': ['active_directory', ] } def connect(self, emc_share_driver, context): """Connect to Unity storage.""" config = emc_share_driver.configuration storage_ip = enas_utils.convert_ipv6_format_if_needed( config.emc_nas_server) username = config.emc_nas_login password = config.emc_nas_password self.client = client.UnityClient(storage_ip, username, password) pool_conf = config.safe_get('unity_share_data_pools') self.pool_set = self._get_managed_pools(pool_conf) self.reserved_percentage = config.safe_get( 'reserved_share_percentage') if self.reserved_percentage is None: self.reserved_percentage = 0 self.reserved_snapshot_percentage = config.safe_get( 'reserved_share_from_snapshot_percentage') if self.reserved_snapshot_percentage is None: self.reserved_snapshot_percentage = self.reserved_percentage self.reserved_share_extend_percentage = config.safe_get( 'reserved_share_extend_percentage') if self.reserved_share_extend_percentage is None: self.reserved_share_extend_percentage = self.reserved_percentage self.max_over_subscription_ratio = config.safe_get( 'max_over_subscription_ratio') self.port_ids_conf = config.safe_get('unity_ethernet_ports') self.unity_share_server = config.safe_get('unity_share_server') self.driver_handles_share_servers = config.safe_get( 'driver_handles_share_servers') if (not self.driver_handles_share_servers) and ( not self.unity_share_server): msg = ("Make sure there is NAS server name " "configured for share creation when driver " "is in DHSS=False mode.") raise exception.BadConfigurationException(reason=msg) self.validate_port_configuration(self.port_ids_conf) pool_name = config.unity_server_meta_pool self._config_pool(pool_name) self.report_default_filter_function = config.safe_get( 'report_default_filter_function') def get_server_name(self, share_server=None): if not self.driver_handles_share_servers: return self.unity_share_server else: return self._get_server_name(share_server) def validate_port_configuration(self, port_ids_conf): """Initializes the SP and ports based on the port option.""" ports = self.client.get_file_ports() sp_ports_map, unmanaged_port_ids = unity_utils.match_ports( ports, port_ids_conf) if not sp_ports_map: msg = (_("All the specified storage ports to be managed " "do not exist. Please check your configuration " "unity_ethernet_ports in manila.conf. " "The available ports in the backend are %s.") % ",".join([port.get_id() for port in ports])) raise exception.BadConfigurationException(reason=msg) if unmanaged_port_ids: LOG.info("The following specified ports are not managed by " "the backend: %(unmanaged)s. This host will only " "manage the storage ports: %(exist)s", {'unmanaged': ",".join(unmanaged_port_ids), 'exist': ",".join(map(",".join, sp_ports_map.values()))}) else: LOG.debug("Ports: %s will be managed.", ",".join(map(",".join, sp_ports_map.values()))) if len(sp_ports_map) == 1: LOG.info("Only ports of %s are configured. Configure ports " "of both SPA and SPB to use both of the SPs.", list(sp_ports_map)[0]) return sp_ports_map def check_for_setup_error(self): """Check for setup error.""" def manage_existing(self, share, driver_options, share_server=None): """Manages a share that exists on backend. :param share: Share that will be managed. :param driver_options: Driver-specific options provided by admin. :param share_server: Share server name provided by admin in DHSS=True. :returns: Returns a dict with share size and export location. """ export_locations = share['export_locations'] if not export_locations: message = ("Failed to manage existing share: %s, missing " "export locations." % share['id']) raise exception.ManageInvalidShare(reason=message) try: share_size = int(driver_options.get("size", 0)) except (ValueError, TypeError): msg = _("The driver options' size to manage the share " "%(share_id)s, should be an integer, in format " "driver-options size=. Value specified: " "%(size)s.") % {'share_id': share['id'], 'size': driver_options.get("size")} raise exception.ManageInvalidShare(reason=msg) if not share_size: msg = _("Share %(share_id)s has no specified size. " "Using default value 1, set size in driver options if you " "want.") % {'share_id': share['id']} LOG.warning(msg) share_size = 1 share_id = unity_utils.get_share_backend_id(share) backend_share = self.client.get_share(share_id, share['share_proto']) if not backend_share: message = ("Could not find the share in backend, please make sure " "the export location is right.") raise exception.ManageInvalidShare(reason=message) # Check the share server when in DHSS=true mode if share_server: backend_share_server = self._get_server_name(share_server) if not backend_share_server: message = ("Could not find the backend share server: %s, " "please make sure that share server with the " "specified name exists in the backend.", share_server) raise exception.BadConfigurationException(message) LOG.info("Share %(shr_path)s is being managed with ID " "%(shr_id)s.", {'shr_path': share['export_locations'][0]['path'], 'shr_id': share['id']}) # export_locations was not changed, return original value return {"size": share_size, 'export_locations': { 'path': share['export_locations'][0]['path']}} def manage_existing_with_server(self, share, driver_options, share_server): return self.manage_existing(share, driver_options, share_server) def manage_existing_snapshot(self, snapshot, driver_options, share_server=None): """Brings an existing snapshot under Manila management.""" try: snapshot_size = int(driver_options.get("size", 0)) except (ValueError, TypeError): msg = _("The size in driver options to manage snapshot " "%(snap_id)s should be an integer, in format " "driver-options size=. Value passed: " "%(size)s.") % {'snap_id': snapshot['id'], 'size': driver_options.get("size")} raise exception.ManageInvalidShareSnapshot(reason=msg) if not snapshot_size: msg = _("Snapshot %(snap_id)s has no specified size. " "Use default value 1, set size in driver options if you " "want.") % {'snap_id': snapshot['id']} LOG.info(msg) snapshot_size = 1 provider_location = snapshot.get('provider_location') snap = self.client.get_snapshot(provider_location) if not snap: message = ("Could not find a snapshot in the backend with " "provider_location: %s, please make sure " "the snapshot exists in the backend." % provider_location) raise exception.ManageInvalidShareSnapshot(reason=message) LOG.info("Snapshot %(provider_location)s in Unity will be managed " "with ID %(snapshot_id)s.", {'provider_location': snapshot.get('provider_location'), 'snapshot_id': snapshot['id']}) return {"size": snapshot_size, "provider_location": provider_location} def manage_existing_snapshot_with_server(self, snapshot, driver_options, share_server): return self.manage_existing_snapshot(snapshot, driver_options, share_server) def manage_server(self, context, share_server, identifier, driver_options): """Manage the share server and return compiled back end details. :param context: Current context. :param share_server: Share server model. :param identifier: A driver-specific share server identifier :param driver_options: Dictionary of driver options to assist managing the share server :return: Identifier and dictionary with back end details to be saved in the database. Example:: 'my_new_server_identifier',{'server_name': 'my_old_server'} """ nas_server = self.client.get_nas_server(identifier) if not nas_server: message = ("Could not find the backend share server by server " "name: %s, please make sure the share server is " "existing in the backend." % identifier) raise exception.ManageInvalidShare(reason=message) return identifier, driver_options def get_share_server_network_info( self, context, share_server, identifier, driver_options): """Obtain network allocations used by share server. :param context: Current context. :param share_server: Share server model. :param identifier: A driver-specific share server identifier :param driver_options: Dictionary of driver options to assist managing the share server :return: The containing IP address allocated in the backend, Unity only supports single IP address Example:: ['10.10.10.10'] or ['fd11::2000'] """ containing_ips = [] nas_server = self.client.get_nas_server(identifier) if nas_server: for file_interface in nas_server.file_interface: containing_ips.append(file_interface.ip_address) return containing_ips def create_share(self, context, share, share_server=None): """Create a share and export it based on protocol used.""" share_name = share['id'] size = share['size'] # Check share's protocol. # Throw an exception immediately if it is an invalid protocol. share_proto = share['share_proto'].upper() proto_enum = self._get_proto_enum(share_proto) # Get pool name from share host field pool_name = self._get_pool_name_from_host(share['host']) # Get share server name from share server or manila.conf. server_name = self.get_server_name(share_server) pool = self.client.get_pool(pool_name) try: nas_server = self.client.get_nas_server(server_name) except storops_ex.UnityResourceNotFoundError: message = (_("Failed to get NAS server %(server)s when " "creating the share %(share)s.") % {'server': server_name, 'share': share_name}) LOG.exception(message) raise exception.EMCUnityError(err=message) locations = None if share_proto == 'CIFS': filesystem = self.client.create_filesystem( pool, nas_server, share_name, size, proto=proto_enum) self.client.create_cifs_share(filesystem, share_name) locations = self._get_cifs_location( nas_server.file_interface, share_name) elif share_proto == 'NFS': self.client.create_nfs_filesystem_and_share( pool, nas_server, share_name, size) locations = self._get_nfs_location( nas_server.file_interface, share_name) return locations def create_share_from_snapshot(self, context, share, snapshot, share_server=None, parent_share=None): """Create a share from a snapshot - clone a snapshot.""" share_name = share['id'] # Check share's protocol. # Throw an exception immediately if it is an invalid protocol. share_proto = share['share_proto'].upper() self._validate_share_protocol(share_proto) # Get share server name from share server server_name = self.get_server_name(share_server) try: nas_server = self.client.get_nas_server(server_name) except storops_ex.UnityResourceNotFoundError: message = (_("Failed to get NAS server %(server)s when " "creating the share %(share)s.") % {'server': server_name, 'share': share_name}) LOG.exception(message) raise exception.EMCUnityError(err=message) snapshot_id = unity_utils.get_snapshot_id(snapshot) backend_snap = self.client.create_snap_of_snap(snapshot_id, share_name) locations = None if share_proto == 'CIFS': self.client.create_cifs_share(backend_snap, share_name) locations = self._get_cifs_location( nas_server.file_interface, share_name) elif share_proto == 'NFS': self.client.create_nfs_share(backend_snap, share_name) locations = self._get_nfs_location( nas_server.file_interface, share_name) return locations def delete_share(self, context, share, share_server=None): """Delete a share.""" share_name = unity_utils.get_share_backend_id(share) try: backend_share = self.client.get_share(share_name, share['share_proto']) except storops_ex.UnityResourceNotFoundError: LOG.warning("Share %s is not found when deleting the share", share_name) return # Share created by the API create_share_from_snapshot() if self._is_share_from_snapshot(backend_share): filesystem = backend_share.snap.filesystem self.client.delete_snapshot(backend_share.snap) else: filesystem = backend_share.filesystem self.client.delete_share(backend_share) if self._is_isolated_filesystem(filesystem): self.client.delete_filesystem(filesystem) def extend_share(self, share, new_size, share_server=None): share_id = unity_utils.get_share_backend_id(share) backend_share = self.client.get_share(share_id, share['share_proto']) if not self._is_share_from_snapshot(backend_share): self.client.extend_filesystem(backend_share.filesystem, new_size) else: share_id = share['id'] reason = ("Driver does not support extending a " "snapshot based share.") raise exception.ShareExtendingError(share_id=share_id, reason=reason) def shrink_share(self, share, new_size, share_server=None): """Shrinks a share to new size. :param share: Share that will be shrunk. :param new_size: New size of share. :param share_server: Data structure with share server information. Not used by this driver. """ share_id = unity_utils.get_share_backend_id(share) backend_share = self.client.get_share(share_id, share['share_proto']) if self._is_share_from_snapshot(backend_share): reason = ("Driver does not support shrinking a " "snapshot based share.") raise exception.ShareShrinkingError(share_id=share_id, reason=reason) self.client.shrink_filesystem(share_id, backend_share.filesystem, new_size) LOG.info("Share %(shr_id)s successfully shrunk to " "%(shr_size)sG.", {'shr_id': share_id, 'shr_size': new_size}) def create_snapshot(self, context, snapshot, share_server=None): """Create snapshot from share.""" share = snapshot['share'] share_name = unity_utils.get_share_backend_id( share) if share else snapshot['share_id'] share_proto = snapshot['share']['share_proto'] backend_share = self.client.get_share(share_name, share_proto) snapshot_name = snapshot['id'] if self._is_share_from_snapshot(backend_share): self.client.create_snap_of_snap(backend_share.snap, snapshot_name) else: self.client.create_snapshot(backend_share.filesystem, snapshot_name) return {'provider_location': snapshot_name} def delete_snapshot(self, context, snapshot, share_server=None): """Delete a snapshot.""" snapshot_id = unity_utils.get_snapshot_id(snapshot) snap = self.client.get_snapshot(snapshot_id) self.client.delete_snapshot(snap) def update_access(self, context, share, access_rules, add_rules, delete_rules, share_server=None): # adding rules if add_rules: for rule in add_rules: self.allow_access(context, share, rule, share_server) # deleting rules if delete_rules: for rule in delete_rules: self.deny_access(context, share, rule, share_server) # recovery mode if not (add_rules or delete_rules): white_list = [] for rule in access_rules: self.allow_access(context, share, rule, share_server) white_list.append(rule['access_to']) self.clear_access(share, white_list) def clear_access(self, share, white_list=None): share_proto = share['share_proto'].upper() share_name = unity_utils.get_share_backend_id(share) if share_proto == 'CIFS': self.client.cifs_clear_access(share_name, white_list) elif share_proto == 'NFS': self.client.nfs_clear_access(share_name, white_list) def allow_access(self, context, share, access, share_server=None): """Allow access to a share.""" access_level = access['access_level'] if access_level not in const.ACCESS_LEVELS: raise exception.InvalidShareAccessLevel(level=access_level) share_proto = share['share_proto'].upper() self._validate_share_protocol(share_proto) self._validate_share_access_type(share, access) if share_proto == 'CIFS': self._cifs_allow_access(share, access) elif share_proto == 'NFS': self._nfs_allow_access(share, access) def deny_access(self, context, share, access, share_server): """Deny access to a share.""" share_proto = share['share_proto'].upper() self._validate_share_protocol(share_proto) self._validate_share_access_type(share, access) if share_proto == 'CIFS': self._cifs_deny_access(share, access) elif share_proto == 'NFS': self._nfs_deny_access(share, access) def ensure_share(self, context, share, share_server): """Ensure that the share is exported.""" share_name = unity_utils.get_share_backend_id(share) share_proto = share['share_proto'] backend_share = self.client.get_share(share_name, share_proto) if not backend_share.existed: raise exception.ShareNotFound(share_id=share_name) def update_share_stats(self, stats_dict): """Communicate with EMCNASClient to get the stats.""" stats_dict['driver_version'] = VERSION stats_dict['pools'] = [] for pool in self.client.get_pool(): if pool.name in self.pool_set: # the unit of following numbers are GB total_size = float(pool.size_total) used_size = float(pool.size_used) pool_stat = { 'pool_name': pool.name, 'thin_provisioning': True, 'total_capacity_gb': enas_utils.bytes_to_gb(total_size), 'free_capacity_gb': enas_utils.bytes_to_gb(total_size - used_size), 'allocated_capacity_gb': enas_utils.bytes_to_gb(used_size), 'provisioned_capacity_gb': enas_utils.bytes_to_gb(pool.size_subscribed), 'qos': False, 'reserved_percentage': self.reserved_percentage, 'reserved_snapshot_percentage': self.reserved_snapshot_percentage, 'reserved_share_extend_percentage': self.reserved_share_extend_percentage, 'max_over_subscription_ratio': self.max_over_subscription_ratio, } stats_dict['pools'].append(pool_stat) if not stats_dict.get('pools'): message = _("Failed to update storage pool.") LOG.error(message) raise exception.EMCUnityError(err=message) def get_pool(self, share): """Get the pool name of the share.""" backend_share = self.client.get_share( share['id'], share['share_proto']) return backend_share.filesystem.pool.name def get_network_allocations_number(self): """Returns number of network allocations for creating VIFs.""" return self.IP_ALLOCATIONS def setup_server(self, network_info, metadata=None): """Set up and configures share server with given network parameters.""" server_name = network_info['server_id'] segmentation_id = network_info['segmentation_id'] network = self.validate_network(network_info) mtu = network['mtu'] tenant = self.client.get_tenant(network_info['server_id'], segmentation_id) sp_ports_map = unity_utils.find_ports_by_mtu( self.client.get_file_ports(), self.port_ids_conf, mtu) sp = self._choose_sp(sp_ports_map) nas_server = self.client.create_nas_server(server_name, sp, self.nas_server_pool, tenant=tenant) sp = nas_server.home_sp port_id = self._choose_port(sp_ports_map, sp) try: self._create_network_interface(nas_server, network, port_id) self._handle_security_services( nas_server, network_info['security_services']) return {'share_server_name': server_name} except Exception: with excutils.save_and_reraise_exception(): LOG.exception('Could not setup server.') server_details = {'share_server_name': server_name} self.teardown_server( server_details, network_info['security_services']) def teardown_server(self, server_details, security_services=None): """Teardown share server.""" if not server_details: LOG.debug('Server details are empty.') return server_name = server_details.get('share_server_name') if not server_name: LOG.debug('No share server found for server %s.', server_details.get('instance_id')) return username = None password = None for security_service in security_services: if security_service['type'] == 'active_directory': username = security_service['user'] password = security_service['password'] break self.client.delete_nas_server(server_name, username, password) def _cifs_allow_access(self, share, access): """Allow access to CIFS share.""" self.client.cifs_allow_access( share['id'], access['access_to'], access['access_level']) def _cifs_deny_access(self, share, access): """Deny access to CIFS share.""" self.client.cifs_deny_access(share['id'], access['access_to']) def _config_pool(self, pool_name): try: self.nas_server_pool = self.client.get_pool(pool_name) except storops_ex.UnityResourceNotFoundError: message = (_("The storage pools %s to store NAS server " "configuration do not exist.") % pool_name) LOG.exception(message) raise exception.BadConfigurationException(reason=message) @staticmethod def validate_network(network_info): network = network_info['network_allocations'][0] if network['network_type'] not in SUPPORTED_NETWORK_TYPES: msg = _('The specified network type %s is unsupported by ' 'the EMC Unity driver') raise exception.NetworkBadConfigurationException( reason=msg % network['network_type']) return network def _create_network_interface(self, nas_server, network, port_id): kargs = {'ip_addr': network['ip_address'], 'gateway': network['gateway'], 'vlan_id': network['segmentation_id'], 'port_id': port_id} if netutils.is_valid_ipv6_cidr(kargs['ip_addr']): kargs['netmask'] = None kargs['prefix_length'] = str(utils.cidr_to_prefixlen( network['cidr'])) else: kargs['netmask'] = utils.cidr_to_netmask(network['cidr']) # Create the interfaces on NAS server self.client.create_interface(nas_server, **kargs) def _choose_sp(self, sp_ports_map): sp = None if len(sp_ports_map.keys()) == 1: # Only one storage processor has usable ports, # create NAS server on that SP. sp = self.client.get_storage_processor( sp_id=list(sp_ports_map.keys())[0]) LOG.debug('All the usable ports belong to %s. ' 'Creating NAS server on this SP without ' 'load balance.', sp.get_id()) return sp @staticmethod def _choose_port(sp_ports_map, sp): ports = sp_ports_map[sp.get_id()] return random.choice(list(ports)) @staticmethod def _get_cifs_location(file_interfaces, share_name): return [ {'path': r'\\%(interface)s\%(share_name)s' % { 'interface': enas_utils.export_unc_path(interface.ip_address), 'share_name': share_name} } for interface in file_interfaces ] def _get_managed_pools(self, pool_conf): # Get the real pools from the backend storage real_pools = set(pool.name for pool in self.client.get_pool()) if not pool_conf: LOG.debug("No storage pool is specified, so all pools in storage " "system will be managed.") return real_pools matched_pools, unmanaged_pools = unity_utils.do_match(real_pools, pool_conf) if not matched_pools: msg = (_("All the specified storage pools to be managed " "do not exist. Please check your configuration " "emc_nas_pool_names in manila.conf. " "The available pools in the backend are %s") % ",".join(real_pools)) raise exception.BadConfigurationException(reason=msg) if unmanaged_pools: LOG.info("The following specified storage pools " "are not managed by the backend: " "%(un_managed)s. This host will only manage " "the storage pools: %(exist)s", {'un_managed': ",".join(unmanaged_pools), 'exist': ",".join(matched_pools)}) else: LOG.debug("Storage pools: %s will be managed.", ",".join(matched_pools)) return matched_pools @staticmethod def _get_nfs_location(file_interfaces, share_name): return [ {'path': '%(interface)s:/%(share_name)s' % { 'interface': enas_utils.convert_ipv6_format_if_needed( interface.ip_address), 'share_name': share_name} } for interface in file_interfaces ] @staticmethod def _get_pool_name_from_host(host): pool_name = share_utils.extract_host(host, level='pool') if not pool_name: message = (_("Pool is not available in the share host %s.") % host) raise exception.InvalidHost(reason=message) return pool_name @staticmethod def _get_proto_enum(share_proto): share_proto = share_proto.upper() UnityStorageConnection._validate_share_protocol(share_proto) if share_proto == 'CIFS': return enums.FSSupportedProtocolEnum.CIFS elif share_proto == 'NFS': return enums.FSSupportedProtocolEnum.NFS @staticmethod def _get_server_name(share_server): if not share_server: msg = _('Share server not provided.') raise exception.InvalidInput(reason=msg) # Try to get share server name from property 'identifier' first in # case this is managed share server. server_name = share_server.get('identifier') or share_server.get( 'backend_details', {}).get('share_server_name') if server_name is None: msg = (_("Name of the share server %s not found.") % share_server['id']) LOG.error(msg) raise exception.InvalidInput(reason=msg) return server_name def _handle_security_services(self, nas_server, security_services): kerberos_enabled = False # Support 'active_directory' and 'kerberos' for security_service in security_services: service_type = security_service['type'] if service_type == 'active_directory': # Create DNS server for NAS server domain = security_service['domain'] dns_ip = security_service['dns_ip'] self.client.create_dns_server(nas_server, domain, dns_ip) # Enable CIFS service username = security_service['user'] password = security_service['password'] self.client.enable_cifs_service(nas_server, domain=domain, username=username, password=password) elif service_type == 'kerberos': # Enable NFS service with kerberos kerberos_enabled = True # TODO(jay.xu): enable nfs service with kerberos LOG.warning('Kerberos is not supported by ' 'EMC Unity manila driver plugin.') elif service_type == 'ldap': LOG.warning('LDAP is not supported by ' 'EMC Unity manila driver plugin.') else: LOG.warning('Unknown security service type: %s.', service_type) if not kerberos_enabled: # Enable NFS service without kerberos self.client.enable_nfs_service(nas_server) def _nfs_allow_access(self, share, access): """Allow access to NFS share.""" self.client.nfs_allow_access( share['id'], access['access_to'], access['access_level']) def _nfs_deny_access(self, share, access): """Deny access to NFS share.""" self.client.nfs_deny_access(share['id'], access['access_to']) @staticmethod def _is_isolated_filesystem(filesystem): filesystem.update() return ( not filesystem.has_snap() and not (filesystem.cifs_share or filesystem.nfs_share) ) @staticmethod def _is_share_from_snapshot(share): return True if share.snap else False @staticmethod def _validate_share_access_type(share, access): reason = None share_proto = share['share_proto'].upper() if share_proto == 'CIFS' and access['access_type'] != 'user': reason = _('Only user access type allowed for CIFS share.') elif share_proto == 'NFS' and access['access_type'] != 'ip': reason = _('Only IP access type allowed for NFS share.') if reason: raise exception.InvalidShareAccess(reason=reason) @staticmethod def _validate_share_protocol(share_proto): if share_proto not in ('NFS', 'CIFS'): raise exception.InvalidShare( reason=(_('Invalid NAS protocol supplied: %s.') % share_proto)) def revert_to_snapshot(self, context, snapshot, share_access_rules, snapshot_access_rules, share_server=None): """Reverts a share (in place) to the specified snapshot.""" snapshot_id = unity_utils.get_snapshot_id(snapshot) return self.client.restore_snapshot(snapshot_id) def get_default_filter_function(self): if self.report_default_filter_function: return "share.size >= 3" return None ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/dell_emc/plugins/unity/utils.py0000664000175000017500000001013000000000000025201 0ustar00zuulzuul00000000000000# Copyright (c) 2016 EMC Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Utility module for EMC Unity Manila Driver """ import fnmatch from oslo_log import log from oslo_utils import units from manila import exception from manila.i18n import _ LOG = log.getLogger(__name__) def do_match(full, matcher_list): matched = set() full = set([item.strip() for item in full]) if matcher_list is None: # default to all matcher_list = set('*') else: matcher_list = set([item.strip() for item in matcher_list]) for item in full: for matcher in matcher_list: if fnmatch.fnmatchcase(item, matcher): matched.add(item) return matched, full - matched def match_ports(ports_list, port_ids_conf): """Filters the port in `ports_list` with the port id in `port_ids_conf`. A tuple of (`sp_ports_map`, `unmanaged_port_ids`) is returned, in which `sp_ports_map` is a dict whose key is SPA or SPB, value is the matched port id set, `unmanaged_port_ids` is the un-matched port id set. """ patterns = (set('*') if port_ids_conf is None else set(item.strip() for item in port_ids_conf if item.strip())) if not patterns: patterns = set('*') sp_ports_map = {} unmanaged_port_ids = set() for port in ports_list: port_id = port.get_id() for pattern in patterns: if fnmatch.fnmatchcase(port_id, pattern): # parentStorageProcessor property is deprecated in Unity 5.x if port.parent_storage_processor: sp = port.parent_storage_processor else: sp = port.storage_processor sp_id = sp.get_id() ports_set = sp_ports_map.setdefault(sp_id, set()) ports_set.add(port_id) break else: unmanaged_port_ids.add(port_id) return sp_ports_map, unmanaged_port_ids def find_ports_by_mtu(all_ports, port_ids_conf, mtu): valid_ports = list(filter(lambda p: p.mtu == mtu, all_ports)) managed_port_map, unmatched = match_ports(valid_ports, port_ids_conf) if not managed_port_map: msg = (_('None of the configured port %(conf)s matches the mtu ' '%(mtu)s.') % {'conf': port_ids_conf, 'mtu': mtu}) raise exception.ShareBackendException(msg=msg) return managed_port_map def gib_to_byte(size_gib): return size_gib * units.Gi def get_share_backend_id(share): """Get backend share id. Try to get backend share id from path in case this is managed share, use share['id'] when path is empty. """ backend_share_id = None try: export_locations = share['export_locations'][0] path = export_locations['path'] if share['share_proto'].lower() == 'nfs': # 10.0.0.1:/example_share_name backend_share_id = path.split(':/')[-1] if share['share_proto'].lower() == 'cifs': # \\10.0.0.1\example_share_name backend_share_id = path.split('\\')[-1] except Exception as e: LOG.warning('Cannot get share name from path, make sure the path ' 'is right. Error details: %s', e) if backend_share_id and (backend_share_id != share['id']): return backend_share_id else: return share['id'] def get_snapshot_id(snapshot): """Get backend snapshot id. Take the id from provider_location in case this is managed snapshot. """ return snapshot['provider_location'] or snapshot['id'] ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315601.9096718 manila-21.0.0/manila/share/drivers/dell_emc/plugins/vnx/0000775000175000017500000000000000000000000023137 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/dell_emc/plugins/vnx/__init__.py0000664000175000017500000000000000000000000025236 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/dell_emc/plugins/vnx/connection.py0000664000175000017500000010717300000000000025661 0ustar00zuulzuul00000000000000# Copyright (c) 2014 EMC Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """VNX backend for the EMC Manila driver.""" import copy import random from oslo_config import cfg from oslo_log import log from oslo_utils import excutils from oslo_utils import units from manila.common import constants as const from manila import exception from manila.i18n import _ from manila.share.drivers.dell_emc.common.enas import constants from manila.share.drivers.dell_emc.common.enas import utils as enas_utils from manila.share.drivers.dell_emc.plugins import base as driver from manila.share.drivers.dell_emc.plugins.vnx import object_manager as manager from manila.share import utils as share_utils from manila import utils """Version history: 1.0.0 - Initial version (Liberty) 2.0.0 - Bumped the version for Mitaka 3.0.0 - Bumped the version for Ocata 4.0.0 - Bumped the version for Pike 5.0.0 - Bumped the version for Queens 9.0.0 - Bumped the version for Ussuri 9.0.1 - Fixes bug 1871999: wrong format of export locations """ VERSION = "9.0.1" LOG = log.getLogger(__name__) VNX_OPTS = [ cfg.StrOpt('vnx_server_container', help='Data mover to host the NAS server.'), cfg.ListOpt('vnx_share_data_pools', help='Comma separated list of pools that can be used to ' 'persist share data.'), cfg.ListOpt('vnx_ethernet_ports', help='Comma separated list of ports that can be used for ' 'share server interfaces. Members of the list ' 'can be Unix-style glob expressions.') ] CONF = cfg.CONF CONF.register_opts(VNX_OPTS) @enas_utils.decorate_all_methods(enas_utils.log_enter_exit, debug_only=True) class VNXStorageConnection(driver.StorageConnection): """Implements VNX specific functionality for EMC Manila driver.""" @enas_utils.log_enter_exit def __init__(self, *args, **kwargs): super(VNXStorageConnection, self).__init__(*args, **kwargs) if 'configuration' in kwargs: kwargs['configuration'].append_config_values(VNX_OPTS) self.mover_name = None self.pools = None self.manager = None self.pool_conf = None self.reserved_percentage = None self.reserved_snapshot_percentage = None self.reserved_share_extend_percentage = None self.driver_handles_share_servers = True self.port_conf = None self.ipv6_implemented = True self.dhss_mandatory_security_service_association = { 'nfs': None, 'cifs': ['active_directory', ] } def create_share(self, context, share, share_server=None): """Create a share and export it based on protocol used.""" share_name = share['id'] size = share['size'] * units.Ki share_proto = share['share_proto'] # Validate the share protocol if share_proto.upper() not in ('NFS', 'CIFS'): raise exception.InvalidShare( reason=(_('Invalid NAS protocol supplied: %s.') % share_proto)) # Get the pool name from share host field pool_name = share_utils.extract_host(share['host'], level='pool') if not pool_name: message = (_("Pool is not available in the share host %s.") % share['host']) raise exception.InvalidHost(reason=message) # Validate share server self._share_server_validation(share_server) if share_proto == 'CIFS': vdm_name = self._get_share_server_name(share_server) server_name = vdm_name # Check if CIFS server exists. status, server = self._get_context('CIFSServer').get(server_name, vdm_name) if status != constants.STATUS_OK: message = (_("CIFS server %s not found.") % server_name) LOG.error(message) raise exception.EMCVnxXMLAPIError(err=message) self._allocate_container(share_name, size, share_server, pool_name) if share_proto == 'NFS': location = self._create_nfs_share(share_name, share_server) elif share_proto == 'CIFS': location = self._create_cifs_share(share_name, share_server) return [ {'path': location} ] def _share_server_validation(self, share_server): """Validate the share server.""" if not share_server: msg = _('Share server not provided') raise exception.InvalidInput(reason=msg) backend_details = share_server.get('backend_details') vdm = backend_details.get( 'share_server_name') if backend_details else None if vdm is None: message = _("No share server found.") LOG.error(message) raise exception.EMCVnxXMLAPIError(err=message) def _allocate_container(self, share_name, size, share_server, pool_name): """Allocate file system for share.""" vdm_name = self._get_share_server_name(share_server) self._get_context('FileSystem').create( share_name, size, pool_name, vdm_name) def _allocate_container_from_snapshot(self, share, snapshot, share_server, pool_name): """Allocate file system from snapshot.""" vdm_name = self._get_share_server_name(share_server) interconn_id = self._get_context('Mover').get_interconnect_id( self.mover_name, self.mover_name) self._get_context('FileSystem').create_from_snapshot( share['id'], snapshot['id'], snapshot['share_id'], pool_name, vdm_name, interconn_id) nwe_size = share['size'] * units.Ki self._get_context('FileSystem').extend(share['id'], pool_name, nwe_size) @enas_utils.log_enter_exit def _create_cifs_share(self, share_name, share_server): """Create CIFS share.""" vdm_name = self._get_share_server_name(share_server) server_name = vdm_name # Get available CIFS Server and interface (one CIFS server per VDM) status, server = self._get_context('CIFSServer').get(server_name, vdm_name) if 'interfaces' not in server or len(server['interfaces']) == 0: message = (_("CIFS server %s doesn't have interface, " "so the share is inaccessible.") % server['compName']) LOG.error(message) raise exception.EMCVnxXMLAPIError(err=message) interface = enas_utils.export_unc_path(server['interfaces'][0]) self._get_context('CIFSShare').create(share_name, server['name'], vdm_name) self._get_context('CIFSShare').disable_share_access(share_name, vdm_name) location = (r'\\%(interface)s\%(name)s' % {'interface': interface, 'name': share_name}) return location @enas_utils.log_enter_exit def _create_nfs_share(self, share_name, share_server): """Create NFS share.""" vdm_name = self._get_share_server_name(share_server) self._get_context('NFSShare').create(share_name, vdm_name) nfs_if = enas_utils.convert_ipv6_format_if_needed( share_server['backend_details']['nfs_if']) return ('%(nfs_if)s:/%(share_name)s' % {'nfs_if': nfs_if, 'share_name': share_name}) def create_share_from_snapshot(self, context, share, snapshot, share_server=None, parent_share=None): """Create a share from a snapshot - clone a snapshot.""" share_name = share['id'] share_proto = share['share_proto'] # Validate the share protocol if share_proto.upper() not in ('NFS', 'CIFS'): raise exception.InvalidShare( reason=(_('Invalid NAS protocol supplied: %s.') % share_proto)) # Get the pool name from share host field pool_name = share_utils.extract_host(share['host'], level='pool') if not pool_name: message = (_("Pool is not available in the share host %s.") % share['host']) raise exception.InvalidHost(reason=message) self._share_server_validation(share_server) self._allocate_container_from_snapshot( share, snapshot, share_server, pool_name) nfs_if = enas_utils.convert_ipv6_format_if_needed( share_server['backend_details']['nfs_if']) if share_proto == 'NFS': self._create_nfs_share(share_name, share_server) location = ('%(nfs_if)s:/%(share_name)s' % {'nfs_if': nfs_if, 'share_name': share_name}) elif share_proto == 'CIFS': location = self._create_cifs_share(share_name, share_server) return [ {'path': location} ] def create_snapshot(self, context, snapshot, share_server=None): """Create snapshot from share.""" share_name = snapshot['share_id'] status, filesystem = self._get_context('FileSystem').get(share_name) if status != constants.STATUS_OK: message = (_("File System %s not found.") % share_name) LOG.error(message) raise exception.EMCVnxXMLAPIError(err=message) pool_id = filesystem['pools_id'][0] self._get_context('Snapshot').create(snapshot['id'], snapshot['share_id'], pool_id) def delete_share(self, context, share, share_server=None): """Delete a share.""" if share_server is None: LOG.warning("Driver does not support share deletion without " "share network specified. Return directly because " "there is nothing to clean.") return share_proto = share['share_proto'] if share_proto == 'NFS': self._delete_nfs_share(share, share_server) elif share_proto == 'CIFS': self._delete_cifs_share(share, share_server) else: raise exception.InvalidShare( reason='Unsupported share type') @enas_utils.log_enter_exit def _delete_cifs_share(self, share, share_server): """Delete CIFS share.""" vdm_name = self._get_share_server_name(share_server) name = share['id'] self._get_context('CIFSShare').delete(name, vdm_name) self._deallocate_container(name, vdm_name) @enas_utils.log_enter_exit def _delete_nfs_share(self, share, share_server): """Delete NFS share.""" vdm_name = self._get_share_server_name(share_server) name = share['id'] self._get_context('NFSShare').delete(name, vdm_name) self._deallocate_container(name, vdm_name) @enas_utils.log_enter_exit def _deallocate_container(self, share_name, vdm_name): """Delete underneath objects of the share.""" path = '/' + share_name try: # Delete mount point self._get_context('MountPoint').delete(path, vdm_name) except Exception: LOG.debug("Skip the failure of mount point %s deletion.", path) try: # Delete file system self._get_context('FileSystem').delete(share_name) except Exception: LOG.debug("Skip the failure of file system %s deletion.", share_name) def delete_snapshot(self, context, snapshot, share_server=None): """Delete a snapshot.""" self._get_context('Snapshot').delete(snapshot['id']) def ensure_share(self, context, share, share_server=None): """Ensure that the share is exported.""" def extend_share(self, share, new_size, share_server=None): # Get the pool name from share host field pool_name = share_utils.extract_host(share['host'], level='pool') if not pool_name: message = (_("Pool is not available in the share host %s.") % share['host']) raise exception.InvalidHost(reason=message) share_name = share['id'] self._get_context('FileSystem').extend( share_name, pool_name, new_size * units.Ki) def allow_access(self, context, share, access, share_server=None): """Allow access to a share.""" access_level = access['access_level'] if access_level not in const.ACCESS_LEVELS: raise exception.InvalidShareAccessLevel(level=access_level) share_proto = share['share_proto'] if share_proto == 'NFS': self._nfs_allow_access(context, share, access, share_server) elif share_proto == 'CIFS': self._cifs_allow_access(context, share, access, share_server) else: raise exception.InvalidShare( reason=(_('Invalid NAS protocol supplied: %s.') % share_proto)) @enas_utils.log_enter_exit def _cifs_allow_access(self, context, share, access, share_server): """Allow access to CIFS share.""" vdm_name = self._get_share_server_name(share_server) share_name = share['id'] if access['access_type'] != 'user': reason = _('Only user access type allowed for CIFS share') raise exception.InvalidShareAccess(reason=reason) user_name = access['access_to'] access_level = access['access_level'] if access_level == const.ACCESS_LEVEL_RW: cifs_access = constants.CIFS_ACL_FULLCONTROL else: cifs_access = constants.CIFS_ACL_READ # Check if CIFS server exists. server_name = vdm_name status, server = self._get_context('CIFSServer').get(server_name, vdm_name) if status != constants.STATUS_OK: message = (_("CIFS server %s not found.") % server_name) LOG.error(message) raise exception.EMCVnxXMLAPIError(err=message) self._get_context('CIFSShare').allow_share_access( vdm_name, share_name, user_name, server['domain'], access=cifs_access) @enas_utils.log_enter_exit def _nfs_allow_access(self, context, share, access, share_server): """Allow access to NFS share.""" vdm_name = self._get_share_server_name(share_server) access_type = access['access_type'] if access_type != 'ip': reason = _('Only ip access type allowed.') raise exception.InvalidShareAccess(reason=reason) host_ip = access['access_to'] access_level = access['access_level'] self._get_context('NFSShare').allow_share_access( share['id'], host_ip, vdm_name, access_level) def update_access(self, context, share, access_rules, add_rules, delete_rules, share_server=None): # deleting rules for rule in delete_rules: self.deny_access(context, share, rule, share_server) # adding rules for rule in add_rules: self.allow_access(context, share, rule, share_server) # recovery mode if not (add_rules or delete_rules): white_list = [] for rule in access_rules: self.allow_access(context, share, rule, share_server) white_list.append( enas_utils.convert_ipv6_format_if_needed( rule['access_to'])) self.clear_access(share, share_server, white_list) def clear_access(self, share, share_server, white_list): share_proto = share['share_proto'].upper() share_name = share['id'] if share_proto == 'CIFS': self._cifs_clear_access(share_name, share_server, white_list) elif share_proto == 'NFS': self._nfs_clear_access(share_name, share_server, white_list) @enas_utils.log_enter_exit def _cifs_clear_access(self, share_name, share_server, white_list): """Clear access for CIFS share except hosts in the white list.""" vdm_name = self._get_share_server_name(share_server) # Check if CIFS server exists. server_name = vdm_name status, server = self._get_context('CIFSServer').get(server_name, vdm_name) if status != constants.STATUS_OK: message = (_("CIFS server %(server_name)s has issue. " "Detail: %(status)s") % {'server_name': server_name, 'status': status}) raise exception.EMCVnxXMLAPIError(err=message) self._get_context('CIFSShare').clear_share_access( share_name=share_name, mover_name=vdm_name, domain=server['domain'], white_list_users=white_list) @enas_utils.log_enter_exit def _nfs_clear_access(self, share_name, share_server, white_list): """Clear access for NFS share except hosts in the white list.""" self._get_context('NFSShare').clear_share_access( share_name=share_name, mover_name=self._get_share_server_name(share_server), white_list_hosts=white_list) def deny_access(self, context, share, access, share_server=None): """Deny access to a share.""" share_proto = share['share_proto'] if share_proto == 'NFS': self._nfs_deny_access(share, access, share_server) elif share_proto == 'CIFS': self._cifs_deny_access(share, access, share_server) else: raise exception.InvalidShare( reason=_('Unsupported share type')) @enas_utils.log_enter_exit def _cifs_deny_access(self, share, access, share_server): """Deny access to CIFS share.""" vdm_name = self._get_share_server_name(share_server) share_name = share['id'] if access['access_type'] != 'user': reason = _('Only user access type allowed for CIFS share') raise exception.InvalidShareAccess(reason=reason) user_name = access['access_to'] access_level = access['access_level'] if access_level == const.ACCESS_LEVEL_RW: cifs_access = constants.CIFS_ACL_FULLCONTROL else: cifs_access = constants.CIFS_ACL_READ # Check if CIFS server exists. server_name = vdm_name status, server = self._get_context('CIFSServer').get(server_name, vdm_name) if status != constants.STATUS_OK: message = (_("CIFS server %s not found.") % server_name) LOG.error(message) raise exception.EMCVnxXMLAPIError(err=message) self._get_context('CIFSShare').deny_share_access( vdm_name, share_name, user_name, server['domain'], access=cifs_access) @enas_utils.log_enter_exit def _nfs_deny_access(self, share, access, share_server): """Deny access to NFS share.""" vdm_name = self._get_share_server_name(share_server) access_type = access['access_type'] if access_type != 'ip': reason = _('Only ip access type allowed.') raise exception.InvalidShareAccess(reason=reason) host_ip = enas_utils.convert_ipv6_format_if_needed(access['access_to']) self._get_context('NFSShare').deny_share_access(share['id'], host_ip, vdm_name) def check_for_setup_error(self): """Check for setup error.""" # To verify the input from Manila configuration status, out = self._get_context('Mover').get_ref(self.mover_name, True) if constants.STATUS_ERROR == status: message = (_("Could not find Data Mover by name: %s.") % self.mover_name) LOG.error(message) raise exception.InvalidParameterValue(err=message) self.pools = self._get_managed_storage_pools(self.pool_conf) def _get_managed_storage_pools(self, pools): matched_pools = set() if pools: # Get the real pools from the backend storage status, backend_pools = self._get_context('StoragePool').get_all() if status != constants.STATUS_OK: message = (_("Failed to get storage pool information. " "Reason: %s") % backend_pools) LOG.error(message) raise exception.EMCVnxXMLAPIError(err=message) real_pools = set([item for item in backend_pools]) conf_pools = set([item.strip() for item in pools]) matched_pools, unmatched_pools = enas_utils.do_match_any( real_pools, conf_pools) if not matched_pools: msg = (_("None of the specified storage pools to be managed " "exist. Please check your configuration " "vnx_share_data_pools in manila.conf. " "The available pools in the backend are %s.") % ",".join(real_pools)) raise exception.InvalidParameterValue(err=msg) LOG.info("Storage pools: %s will be managed.", ",".join(matched_pools)) else: LOG.debug("No storage pool is specified, so all pools " "in storage system will be managed.") return matched_pools def connect(self, emc_share_driver, context): """Connect to VNX NAS server.""" config = emc_share_driver.configuration config.append_config_values(VNX_OPTS) self.mover_name = config.vnx_server_container self.pool_conf = config.safe_get('vnx_share_data_pools') self.reserved_percentage = config.safe_get('reserved_share_percentage') if self.reserved_percentage is None: self.reserved_percentage = 0 self.reserved_snapshot_percentage = config.safe_get( 'reserved_share_from_snapshot_percentage') if self.reserved_snapshot_percentage is None: self.reserved_snapshot_percentage = self.reserved_percentage self.reserved_share_extend_percentage = config.safe_get( 'reserved_share_extend_percentage') if self.reserved_share_extend_percentage is None: self.reserved_share_extend_percentage = self.reserved_percentage self.manager = manager.StorageObjectManager(config) self.port_conf = config.safe_get('vnx_ethernet_ports') def get_managed_ports(self): # Get the real ports(devices) list from the backend storage real_ports = self._get_physical_devices(self.mover_name) if not self.port_conf: LOG.debug("No ports are specified, so any of the ports on the " "Data Mover can be used.") return real_ports matched_ports, unmanaged_ports = enas_utils.do_match_any( real_ports, self.port_conf) if not matched_ports: msg = (_("None of the specified network ports exist. " "Please check your configuration vnx_ethernet_ports " "in manila.conf. The available ports on the Data Mover " "are %s.") % ",".join(real_ports)) raise exception.BadConfigurationException(reason=msg) LOG.debug("Ports: %s can be used.", ",".join(matched_ports)) return list(matched_ports) def update_share_stats(self, stats_dict): """Communicate with EMCNASClient to get the stats.""" stats_dict['driver_version'] = VERSION self._get_context('Mover').get_ref(self.mover_name, True) stats_dict['pools'] = [] status, pools = self._get_context('StoragePool').get_all() for name, pool in pools.items(): if not self.pools or pool['name'] in self.pools: total_size = float(pool['total_size']) used_size = float(pool['used_size']) pool_stat = dict( pool_name=pool['name'], total_capacity_gb=enas_utils.mb_to_gb(total_size), free_capacity_gb=enas_utils.mb_to_gb( total_size - used_size), qos=False, reserved_percentage=self.reserved_percentage, reserved_snapshot_percentage=( self.reserved_snapshot_percentage), reserved_share_extend_percentage=( self.reserved_share_extend_percentage), ) stats_dict['pools'].append(pool_stat) if not stats_dict['pools']: message = _("Failed to update storage pool.") LOG.error(message) raise exception.EMCVnxXMLAPIError(err=message) def get_pool(self, share): """Get the pool name of the share.""" share_name = share['id'] status, filesystem = self._get_context('FileSystem').get(share_name) if status != constants.STATUS_OK: message = (_("File System %(name)s not found. " "Reason: %(err)s") % {'name': share_name, 'err': filesystem}) LOG.error(message) raise exception.EMCVnxXMLAPIError(err=message) pool_id = filesystem['pools_id'][0] # Get the real pools from the backend storage status, backend_pools = self._get_context('StoragePool').get_all() if status != constants.STATUS_OK: message = (_("Failed to get storage pool information. " "Reason: %s") % backend_pools) LOG.error(message) raise exception.EMCVnxXMLAPIError(err=message) for name, pool_info in backend_pools.items(): if pool_info['id'] == pool_id: return name available_pools = [item for item in backend_pools] message = (_("No matched pool name for share: %(share)s. " "Available pools: %(pools)s") % {'share': share_name, 'pools': available_pools}) raise exception.EMCVnxXMLAPIError(err=message) def get_network_allocations_number(self): """Returns number of network allocations for creating VIFs.""" return constants.IP_ALLOCATIONS def setup_server(self, network_info, metadata=None): """Set up and configures share server with given network parameters.""" # Only support single security service with type 'active_directory' vdm_name = network_info['server_id'] vlan_id = network_info['segmentation_id'] active_directory = None allocated_interfaces = [] if network_info.get('security_services'): is_valid, active_directory = self._get_valid_security_service( network_info['security_services']) if not is_valid: raise exception.EMCVnxXMLAPIError(err=active_directory) try: if not self._vdm_exist(vdm_name): LOG.debug('Share server %s not found, creating ' 'share server...', vdm_name) self._get_context('VDM').create(vdm_name, self.mover_name) devices = self.get_managed_ports() for net_info in network_info['network_allocations']: random.shuffle(devices) ip_version = net_info['ip_version'] interface = { 'name': net_info['id'][-12:], 'device_name': devices[0], 'ip': net_info['ip_address'], 'mover_name': self.mover_name, 'vlan_id': vlan_id if vlan_id else -1, } if ip_version == 6: interface['ip_version'] = ip_version interface['net_mask'] = str( utils.cidr_to_prefixlen(network_info['cidr'])) else: interface['net_mask'] = utils.cidr_to_netmask( network_info['cidr']) self._get_context('MoverInterface').create(interface) allocated_interfaces.append(interface) cifs_interface = allocated_interfaces[0] nfs_interface = allocated_interfaces[1] if active_directory: self._configure_active_directory( active_directory, vdm_name, cifs_interface) self._get_context('VDM').attach_nfs_interface( vdm_name, nfs_interface['name']) return { 'share_server_name': vdm_name, 'cifs_if': cifs_interface['ip'], 'nfs_if': nfs_interface['ip'], } except Exception: with excutils.save_and_reraise_exception(): LOG.exception('Could not setup server.') server_details = self._construct_backend_details( vdm_name, allocated_interfaces) self.teardown_server( server_details, network_info['security_services']) def _construct_backend_details(self, vdm_name, interfaces): if_number = len(interfaces) cifs_if = interfaces[0]['ip'] if if_number > 0 else None nfs_if = interfaces[1]['ip'] if if_number > 1 else None return { 'share_server_name': vdm_name, 'cifs_if': cifs_if, 'nfs_if': nfs_if, } @enas_utils.log_enter_exit def _vdm_exist(self, name): status, out = self._get_context('VDM').get(name) if constants.STATUS_OK != status: return False return True def _get_physical_devices(self, mover_name): """Get a proper network device to create interface.""" devices = self._get_context('Mover').get_physical_devices(mover_name) if not devices: message = (_("Could not get physical device port on mover %s.") % self.mover_name) LOG.error(message) raise exception.EMCVnxXMLAPIError(err=message) return devices def _configure_active_directory( self, security_service, vdm_name, interface): domain = security_service['domain'] server = security_service['dns_ip'] self._get_context('DNSDomain').create(self.mover_name, domain, server) cifs_server_args = { 'name': vdm_name, 'interface_ip': interface['ip'], 'domain_name': security_service['domain'], 'user_name': security_service['user'], 'password': security_service['password'], 'mover_name': vdm_name, 'is_vdm': True, } self._get_context('CIFSServer').create(cifs_server_args) def teardown_server(self, server_details, security_services=None): """Teardown share server.""" if not server_details: LOG.debug('Server details are empty.') return vdm_name = server_details.get('share_server_name') if not vdm_name: LOG.debug('No share server found in server details.') return cifs_if = server_details.get('cifs_if') nfs_if = server_details.get('nfs_if') status, vdm = self._get_context('VDM').get(vdm_name) if constants.STATUS_OK != status: LOG.debug('Share server %s not found.', vdm_name) return interfaces = self._get_context('VDM').get_interfaces(vdm_name) for if_name in interfaces['nfs']: self._get_context('VDM').detach_nfs_interface(vdm_name, if_name) if security_services: # Only support single security service with type 'active_directory' is_valid, active_directory = self._get_valid_security_service( security_services) if is_valid: status, servers = self._get_context('CIFSServer').get_all( vdm_name) if constants.STATUS_OK != status: LOG.error('Could not find CIFS server by name: %s.', vdm_name) else: cifs_servers = copy.deepcopy(servers) for name, server in cifs_servers.items(): # Unjoin CIFS Server from domain cifs_server_args = { 'name': server['name'], 'join_domain': False, 'user_name': active_directory['user'], 'password': active_directory['password'], 'mover_name': vdm_name, 'is_vdm': True, } try: self._get_context('CIFSServer').modify( cifs_server_args) except exception.EMCVnxXMLAPIError as expt: LOG.debug("Failed to modify CIFS server " "%(server)s. Reason: %(err)s.", {'server': server, 'err': expt}) self._get_context('CIFSServer').delete(name, vdm_name) # Delete interface from Data Mover if cifs_if: self._get_context('MoverInterface').delete(cifs_if, self.mover_name) if nfs_if: self._get_context('MoverInterface').delete(nfs_if, self.mover_name) # Delete Virtual Data Mover self._get_context('VDM').delete(vdm_name) def _get_valid_security_service(self, security_services): """Validate security services and return a supported security service. :param security_services: :returns: (, ) -- is true to indicate security_services includes zero or single security service for active directory. Otherwise, it would return false. return error message when is false. Otherwise, it will return zero or single security service for active directory. """ # Only support single security service with type 'active_directory' service_number = len(security_services) if (service_number > 1 or security_services[0]['type'] != 'active_directory'): return False, _("Unsupported security services. " "Only support single security service and " "only support type 'active_directory'") return True, security_services[0] def _get_share_server_name(self, share_server): try: return share_server['backend_details']['share_server_name'] except Exception: LOG.debug("Didn't get share server name from share_server %s.", share_server) return share_server['id'] def _get_context(self, type): return self.manager.getStorageContext(type) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/dell_emc/plugins/vnx/object_manager.py0000664000175000017500000023071100000000000026455 0ustar00zuulzuul00000000000000# Copyright (c) 2015 EMC Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import re from lxml import builder from lxml import etree as ET from oslo_concurrency import processutils from oslo_log import log from manila.common import constants as const from manila import exception from manila.i18n import _ from manila.share.drivers.dell_emc.common.enas import connector from manila.share.drivers.dell_emc.common.enas import constants from manila.share.drivers.dell_emc.common.enas import utils as enas_utils from manila.share.drivers.dell_emc.common.enas import xml_api_parser as parser from manila import utils LOG = log.getLogger(__name__) @enas_utils.decorate_all_methods(enas_utils.log_enter_exit, debug_only=True) class StorageObjectManager(object): def __init__(self, configuration): self.context = dict() self.connectors = dict() self.connectors['XML'] = connector.XMLAPIConnector(configuration) self.connectors['SSH'] = connector.SSHConnector(configuration) elt_maker = builder.ElementMaker(nsmap={None: constants.XML_NAMESPACE}) xml_parser = parser.XMLAPIParser() obj_types = StorageObject.__subclasses__() # pylint: disable=no-member for item in obj_types: key = item.__name__ self.context[key] = eval(key)(self.connectors, elt_maker, xml_parser, self) def getStorageContext(self, type): if type in self.context: return self.context[type] else: message = (_("Invalid storage object type %s.") % type) LOG.error(message) raise exception.EMCVnxXMLAPIError(err=message) class StorageObject(object): def __init__(self, conn, elt_maker, xml_parser, manager): self.conn = conn self.elt_maker = elt_maker self.xml_parser = xml_parser self.manager = manager self.xml_retry = False self.ssh_retry_patterns = [ ( constants.SSH_DEFAULT_RETRY_PATTERN, exception.EMCVnxLockRequiredException() ), ] def _translate_response(self, response): """Translate different status to ok/error status.""" if (constants.STATUS_OK == response['maxSeverity'] or constants.STATUS_ERROR == response['maxSeverity']): return old_Severity = response['maxSeverity'] if response['maxSeverity'] in (constants.STATUS_DEBUG, constants.STATUS_INFO): response['maxSeverity'] = constants.STATUS_OK LOG.warning("Translated status from %(old)s to %(new)s. " "Message: %(info)s.", {'old': old_Severity, 'new': response['maxSeverity'], 'info': response}) def _response_validation(self, response, error_code): """Validates whether a response includes a certain error code.""" msg_codes = self._get_problem_message_codes(response['problems']) for code in msg_codes: if code == error_code: return True return False def _get_problem_message_codes(self, problems): message_codes = [] for problem in problems: if 'messageCode' in problem: message_codes.append(problem['messageCode']) return message_codes def _get_problem_messages(self, problems): messages = [] for problem in problems: if 'message' in problem: messages.append(problem['message']) return messages def _get_problem_diags(self, problems): diags = [] for problem in problems: if 'Diagnostics' in problem: diags.append(problem['Diagnostics']) return diags def _build_query_package(self, body): return self.elt_maker.RequestPacket( self.elt_maker.Request( self.elt_maker.Query(body) ) ) def _build_task_package(self, body): return self.elt_maker.RequestPacket( self.elt_maker.Request( self.elt_maker.StartTask(body, timeout='300') ) ) @utils.retry(retry_param=exception.EMCVnxLockRequiredException) def _send_request(self, req): req_xml = constants.XML_HEADER + ET.tostring(req).decode('utf-8') rsp_xml = self.conn['XML'].request(str(req_xml)) response = self.xml_parser.parse(rsp_xml) self._translate_response(response) if (response['maxSeverity'] != constants.STATUS_OK and self._response_validation(response, constants.MSG_CODE_RETRY)): raise exception.EMCVnxLockRequiredException return response @utils.retry(retry_param=exception.EMCVnxLockRequiredException) def _execute_cmd(self, cmd, retry_patterns=None, check_exit_code=False): """Execute NAS command via SSH. :param retry_patterns: list of tuples,where each tuple contains a reg expression and an exception. :param check_exit_code: Boolean. Raise processutils.ProcessExecutionError if the command failed to execute and this parameter is set to True. """ if retry_patterns is None: retry_patterns = self.ssh_retry_patterns try: out, err = self.conn['SSH'].run_ssh(cmd, check_exit_code) except processutils.ProcessExecutionError as e: for pattern in retry_patterns: if re.search(pattern[0], e.stdout): raise pattern[1] raise return out, err def _copy_properties(self, source, target, property_map, deep_copy=True): for property in property_map: if isinstance(property, tuple): target_key, src_key = property else: target_key = src_key = property if src_key in source: if deep_copy and isinstance(source[src_key], list): target[target_key] = copy.deepcopy(source[src_key]) else: target[target_key] = source[src_key] else: target[target_key] = None def _get_mover_id(self, mover_name, is_vdm): if is_vdm: return self.get_context('VDM').get_id(mover_name) else: return self.get_context('Mover').get_id(mover_name, self.xml_retry) def get_context(self, type): return self.manager.getStorageContext(type) @enas_utils.decorate_all_methods(enas_utils.log_enter_exit, debug_only=True) class FileSystem(StorageObject): def __init__(self, conn, elt_maker, xml_parser, manager): super(FileSystem, self).__init__(conn, elt_maker, xml_parser, manager) self.filesystem_map = dict() @utils.retry(retry_param=exception.EMCVnxInvalidMoverID) def create(self, name, size, pool_name, mover_name, is_vdm=True): pool_id = self.get_context('StoragePool').get_id(pool_name) mover_id = self._get_mover_id(mover_name, is_vdm) if is_vdm: mover = self.elt_maker.Vdm(vdm=mover_id) else: mover = self.elt_maker.Mover(mover=mover_id) if self.xml_retry: self.xml_retry = False request = self._build_task_package( self.elt_maker.NewFileSystem( mover, self.elt_maker.StoragePool( pool=pool_id, size=str(size), mayContainSlices='true' ), name=name ) ) response = self._send_request(request) if (self._response_validation(response, constants.MSG_INVALID_MOVER_ID) and not self.xml_retry): self.xml_retry = True raise exception.EMCVnxInvalidMoverID(id=mover_id) elif self._response_validation( response, constants.MSG_FILESYSTEM_EXIST): LOG.warning("File system %s already exists. " "Skip the creation.", name) return elif constants.STATUS_OK != response['maxSeverity']: message = (_("Failed to create file system %(name)s. " "Reason: %(err)s.") % {'name': name, 'err': response['problems']}) LOG.error(message) raise exception.EMCVnxXMLAPIError(err=message) def get(self, name): if name not in self.filesystem_map: request = self._build_query_package( self.elt_maker.FileSystemQueryParams( self.elt_maker.AspectSelection( fileSystems='true', fileSystemCapacityInfos='true' ), self.elt_maker.Alias(name=name) ) ) response = self._send_request(request) if constants.STATUS_OK != response['maxSeverity']: if self._is_filesystem_nonexistent(response): return constants.STATUS_NOT_FOUND, response['problems'] else: return response['maxSeverity'], response['problems'] if not response['objects']: return constants.STATUS_NOT_FOUND, response['problems'] src = response['objects'][0] filesystem = {} property_map = ( 'name', ('pools_id', 'storagePools'), ('volume_id', 'volume'), ('size', 'volumeSize'), ('id', 'fileSystem'), 'type', 'dataServicePolicies', ) self._copy_properties(src, filesystem, property_map) self.filesystem_map[name] = filesystem return constants.STATUS_OK, self.filesystem_map[name] def delete(self, name): status, out = self.get(name) if constants.STATUS_NOT_FOUND == status: LOG.warning("File system %s not found. Skip the deletion.", name) return elif constants.STATUS_OK != status: message = (_("Failed to get file system by name %(name)s. " "Reason: %(err)s.") % {'name': name, 'err': out}) LOG.error(message) raise exception.EMCVnxXMLAPIError(err=message) id = self.filesystem_map[name]['id'] request = self._build_task_package( self.elt_maker.DeleteFileSystem(fileSystem=id) ) response = self._send_request(request) if constants.STATUS_OK != response['maxSeverity']: message = (_("Failed to delete file system %(name)s. " "Reason: %(err)s.") % {'name': name, 'err': response['problems']}) LOG.error(message) raise exception.EMCVnxXMLAPIError(err=message) self.filesystem_map.pop(name) def extend(self, name, pool_name, new_size): status, out = self.get(name) if constants.STATUS_OK != status: message = (_("Failed to get file system by name %(name)s. " "Reason: %(err)s.") % {'name': name, 'err': out}) LOG.error(message) raise exception.EMCVnxXMLAPIError(err=message) id = out['id'] size = int(out['size']) if new_size < size: message = (_("Failed to extend file system %(name)s because new " "size %(new_size)d is smaller than old size " "%(size)d.") % {'name': name, 'new_size': new_size, 'size': size}) LOG.error(message) raise exception.EMCVnxXMLAPIError(err=message) elif new_size == size: return pool_id = self.get_context('StoragePool').get_id(pool_name) request = self._build_task_package( self.elt_maker.ExtendFileSystem( self.elt_maker.StoragePool( pool=pool_id, size=str(new_size - size) ), fileSystem=id, ) ) response = self._send_request(request) if constants.STATUS_OK != response['maxSeverity']: message = (_("Failed to extend file system %(name)s to new size " "%(new_size)d. Reason: %(err)s.") % {'name': name, 'new_size': new_size, 'err': response['problems']}) LOG.error(message) raise exception.EMCVnxXMLAPIError(err=message) def get_id(self, name): status, out = self.get(name) if constants.STATUS_OK != status: message = (_("Failed to get file system by name %(name)s. " "Reason: %(err)s.") % {'name': name, 'err': out}) LOG.error(message) raise exception.EMCVnxXMLAPIError(err=message) return self.filesystem_map[name]['id'] def _is_filesystem_nonexistent(self, response): """Translate different status to ok/error status.""" msg_codes = self._get_problem_message_codes(response['problems']) diags = self._get_problem_diags(response['problems']) for code, diagnose in zip(msg_codes, diags): if (code == constants.MSG_FILESYSTEM_NOT_FOUND and diagnose.find('File system not found.') != -1): return True return False def create_from_snapshot(self, name, snap_name, source_fs_name, pool_name, mover_name, connect_id): create_fs_cmd = [ 'env', 'NAS_DB=/nas', '/nas/bin/nas_fs', '-name', name, '-type', 'uxfs', '-create', 'samesize=' + source_fs_name, 'pool=%s' % pool_name, 'storage=SINGLE', 'worm=off', '-thin', 'no', '-option', 'slice=y', ] self._execute_cmd(create_fs_cmd) ro_mount_cmd = [ 'env', 'NAS_DB=/nas', '/nas/bin/server_mount', mover_name, '-option', 'ro', name, '/%s' % name, ] self._execute_cmd(ro_mount_cmd) session_name = name + ':' + snap_name copy_ckpt_cmd = [ 'env', 'NAS_DB=/nas', '/nas/bin/nas_copy', '-name', session_name[0:63], '-source', '-ckpt', snap_name, '-destination', '-fs', name, '-interconnect', 'id=%s' % connect_id, '-overwrite_destination', '-full_copy', ] try: self._execute_cmd(copy_ckpt_cmd, check_exit_code=True) except processutils.ProcessExecutionError: LOG.exception("Failed to copy content from snapshot %(snap)s " "to file system %(filesystem)s.", {'snap': snap_name, 'filesystem': name}) # When an error happens during nas_copy, we need to continue # deleting the checkpoint of the target file system if it exists. query_fs_cmd = [ 'env', 'NAS_DB=/nas', '/nas/bin/nas_fs', '-info', name, ] out, err = self._execute_cmd(query_fs_cmd) re_ckpts = r'ckpts\s*=\s*(.*)\s*' m = re.search(re_ckpts, out) if m is not None: ckpts = m.group(1) for ckpt in re.split(',', ckpts): umount_ckpt_cmd = [ 'env', 'NAS_DB=/nas', '/nas/bin/server_umount', mover_name, '-perm', ckpt, ] self._execute_cmd(umount_ckpt_cmd) delete_ckpt_cmd = [ 'env', 'NAS_DB=/nas', '/nas/bin/nas_fs', '-delete', ckpt, '-Force', ] self._execute_cmd(delete_ckpt_cmd) rw_mount_cmd = [ 'env', 'NAS_DB=/nas', '/nas/bin/server_mount', mover_name, '-option', 'rw', name, '/%s' % name, ] self._execute_cmd(rw_mount_cmd) @enas_utils.decorate_all_methods(enas_utils.log_enter_exit, debug_only=True) class StoragePool(StorageObject): def __init__(self, conn, elt_maker, xml_parser, manager): super(StoragePool, self).__init__(conn, elt_maker, xml_parser, manager) self.pool_map = dict() def get(self, name, force=False): if name not in self.pool_map or force: status, out = self.get_all() if constants.STATUS_OK != status: return status, out if name not in self.pool_map: return constants.STATUS_NOT_FOUND, None return constants.STATUS_OK, self.pool_map[name] def get_all(self): self.pool_map.clear() request = self._build_query_package( self.elt_maker.StoragePoolQueryParams() ) response = self._send_request(request) if constants.STATUS_OK != response['maxSeverity']: return response['maxSeverity'], response['problems'] if not response['objects']: return constants.STATUS_NOT_FOUND, response['problems'] for item in response['objects']: pool = {} property_map = ( 'name', ('movers_id', 'movers'), ('total_size', 'autoSize'), ('used_size', 'usedSize'), 'diskType', 'dataServicePolicies', ('id', 'pool'), ) self._copy_properties(item, pool, property_map) self.pool_map[item['name']] = pool return constants.STATUS_OK, self.pool_map def get_id(self, name): status, out = self.get(name) if constants.STATUS_OK != status: message = (_("Failed to get storage pool by name %(name)s. " "Reason: %(err)s.") % {'name': name, 'err': out}) LOG.error(message) raise exception.EMCVnxXMLAPIError(err=message) return out['id'] @enas_utils.decorate_all_methods(enas_utils.log_enter_exit, debug_only=True) class MountPoint(StorageObject): def __init__(self, conn, elt_maker, xml_parser, manager): super(MountPoint, self).__init__(conn, elt_maker, xml_parser, manager) @utils.retry(retry_param=exception.EMCVnxInvalidMoverID) def create(self, mount_path, fs_name, mover_name, is_vdm=True): fs_id = self.get_context('FileSystem').get_id(fs_name) mover_id = self._get_mover_id(mover_name, is_vdm) if self.xml_retry: self.xml_retry = False request = self._build_task_package( self.elt_maker.NewMount( self.elt_maker.MoverOrVdm( mover=mover_id, moverIdIsVdm='true' if is_vdm else 'false', ), fileSystem=fs_id, path=mount_path ) ) response = self._send_request(request) if (self._response_validation(response, constants.MSG_INVALID_MOVER_ID) and not self.xml_retry): self.xml_retry = True raise exception.EMCVnxInvalidMoverID(id=mover_id) elif self._is_mount_point_already_existent(response): LOG.warning("Mount Point %(mount)s already exists. " "Skip the creation.", {'mount': mount_path}) return elif constants.STATUS_OK != response['maxSeverity']: message = (_('Failed to create Mount Point %(mount)s for ' 'file system %(fs_name)s. Reason: %(err)s.') % {'mount': mount_path, 'fs_name': fs_name, 'err': response['problems']}) LOG.error(message) raise exception.EMCVnxXMLAPIError(err=message) @utils.retry(retry_param=exception.EMCVnxInvalidMoverID) def get(self, mover_name, is_vdm=True): mover_id = self._get_mover_id(mover_name, is_vdm) if self.xml_retry: self.xml_retry = False request = self._build_query_package( self.elt_maker.MountQueryParams( self.elt_maker.MoverOrVdm( mover=mover_id, moverIdIsVdm='true' if is_vdm else 'false' ) ) ) response = self._send_request(request) if (self._response_validation(response, constants.MSG_INVALID_MOVER_ID) and not self.xml_retry): self.xml_retry = True raise exception.EMCVnxInvalidMoverID(id=mover_id) elif constants.STATUS_OK != response['maxSeverity']: return response['maxSeverity'], response['objects'] if not response['objects']: return constants.STATUS_NOT_FOUND, None else: return constants.STATUS_OK, response['objects'] @utils.retry(retry_param=exception.EMCVnxInvalidMoverID) def delete(self, mount_path, mover_name, is_vdm=True): mover_id = self._get_mover_id(mover_name, is_vdm) if self.xml_retry: self.xml_retry = False request = self._build_task_package( self.elt_maker.DeleteMount( mover=mover_id, moverIdIsVdm='true' if is_vdm else 'false', path=mount_path ) ) response = self._send_request(request) if (self._response_validation(response, constants.MSG_INVALID_MOVER_ID) and not self.xml_retry): self.xml_retry = True raise exception.EMCVnxInvalidMoverID(id=mover_id) elif self._is_mount_point_nonexistent(response): LOG.warning('Mount point %(mount)s on mover %(mover_name)s ' 'not found.', {'mount': mount_path, 'mover_name': mover_name}) return elif constants.STATUS_OK != response['maxSeverity']: message = (_('Failed to delete mount point %(mount)s on mover ' '%(mover_name)s. Reason: %(err)s.') % {'mount': mount_path, 'mover_name': mover_name, 'err': response}) LOG.error(message) raise exception.EMCVnxXMLAPIError(err=message) def _is_mount_point_nonexistent(self, response): """Translate different status to ok/error status.""" msg_codes = self._get_problem_message_codes(response['problems']) message = self._get_problem_messages(response['problems']) for code, msg in zip(msg_codes, message): if ((code == constants.MSG_GENERAL_ERROR and msg.find( 'No such path or invalid operation') != -1) or code == constants.MSG_INVALID_VDM_ID or code == constants.MSG_INVALID_MOVER_ID): return True return False def _is_mount_point_already_existent(self, response): """Translate different status to ok/error status.""" msg_codes = self._get_problem_message_codes(response['problems']) message = self._get_problem_messages(response['problems']) for code, msg in zip(msg_codes, message): if ((code == constants.MSG_GENERAL_ERROR and msg.find( 'Mount already exists') != -1)): return True return False @enas_utils.decorate_all_methods(enas_utils.log_enter_exit, debug_only=True) class Mover(StorageObject): def __init__(self, conn, elt_maker, xml_parser, manager): super(Mover, self).__init__(conn, elt_maker, xml_parser, manager) self.mover_map = dict() self.mover_ref_map = dict() def get_ref(self, name, force=False): if name not in self.mover_ref_map or force: self.mover_ref_map.clear() request = self._build_query_package( self.elt_maker.MoverQueryParams( self.elt_maker.AspectSelection(movers='true') ) ) response = self._send_request(request) if constants.STATUS_ERROR == response['maxSeverity']: return response['maxSeverity'], response['problems'] for item in response['objects']: mover = {} property_map = ('name', ('id', 'mover')) self._copy_properties(item, mover, property_map) if mover: self.mover_ref_map[mover['name']] = mover if (name not in self.mover_ref_map or self.mover_ref_map[name]['id'] == ''): return constants.STATUS_NOT_FOUND, None return constants.STATUS_OK, self.mover_ref_map[name] def get(self, name, force=False): if name not in self.mover_map or force: if name in self.mover_ref_map and not force: mover_id = self.mover_ref_map[name]['id'] else: mover_id = self.get_id(name, force) if name in self.mover_map: self.mover_map.pop(name) request = self._build_query_package( self.elt_maker.MoverQueryParams( self.elt_maker.AspectSelection( moverDeduplicationSettings='true', moverDnsDomains='true', moverInterfaces='true', moverNetworkDevices='true', moverNisDomains='true', moverRoutes='true', movers='true', moverStatuses='true' ), mover=mover_id ) ) response = self._send_request(request) if constants.STATUS_ERROR == response['maxSeverity']: return response['maxSeverity'], response['problems'] if not response['objects']: return constants.STATUS_NOT_FOUND, response['problems'] mover = {} src = response['objects'][0] property_map = ( 'name', ('id', 'mover'), ('Status', 'maxSeverity'), 'version', 'uptime', 'role', ('interfaces', 'MoverInterface'), ('devices', 'LogicalNetworkDevice'), ('dns_domain', 'MoverDnsDomain'), ) self._copy_properties(src, mover, property_map) internal_devices = [] if mover['interfaces']: for interface in mover['interfaces']: if self._is_internal_device(interface['device']): internal_devices.append(interface) mover['interfaces'] = [var for var in mover['interfaces'] if var not in internal_devices] self.mover_map[name] = mover return constants.STATUS_OK, self.mover_map[name] def get_id(self, name, force=False): status, mover_ref = self.get_ref(name, force) if constants.STATUS_OK != status: message = (_("Failed to get mover by name %(name)s.") % {'name': name}) LOG.error(message) raise exception.EMCVnxXMLAPIError(err=message) return mover_ref['id'] def _is_internal_device(self, device): for device_type in ('mge', 'fxg', 'tks', 'fsn'): if device.find(device_type) == 0: return True return False def get_interconnect_id(self, source, destination): header = [ 'id', 'name', 'source_server', 'destination_system', 'destination_server', ] conn_id = None command_nas_cel = [ 'env', 'NAS_DB=/nas', '/nas/bin/nas_cel', '-interconnect', '-l', ] out, err = self._execute_cmd(command_nas_cel) lines = out.strip().split('\n') for line in lines: if line.strip().split() == header: LOG.info('Found the header of the command ' '/nas/bin/nas_cel -interconnect -l.') else: interconn = line.strip().split() if interconn[2] == source and interconn[4] == destination: conn_id = interconn[0] return conn_id def get_physical_devices(self, mover_name): physical_network_devices = [] cmd_sysconfig = [ 'env', 'NAS_DB=/nas', '/nas/bin/server_sysconfig', mover_name, '-pci' ] out, err = self._execute_cmd(cmd_sysconfig) re_pattern = (r'0:\s*(?P\S+)\s*IRQ:\s*(?P\d+)\n' r'.*\n' r'\s*Link:\s*(?P[A-Za-z]+)') for device in re.finditer(re_pattern, out): if 'Up' in device.group('link'): physical_network_devices.append(device.group('name')) return physical_network_devices @enas_utils.decorate_all_methods(enas_utils.log_enter_exit, debug_only=True) class VDM(StorageObject): def __init__(self, conn, elt_maker, xml_parser, manager): super(VDM, self).__init__(conn, elt_maker, xml_parser, manager) self.vdm_map = dict() @utils.retry(retry_param=exception.EMCVnxInvalidMoverID) def create(self, name, mover_name): mover_id = self._get_mover_id(mover_name, False) if self.xml_retry: self.xml_retry = False request = self._build_task_package( self.elt_maker.NewVdm(mover=mover_id, name=name) ) response = self._send_request(request) if (self._response_validation(response, constants.MSG_INVALID_MOVER_ID) and not self.xml_retry): self.xml_retry = True raise exception.EMCVnxInvalidMoverID(id=mover_id) elif self._response_validation(response, constants.MSG_VDM_EXIST): LOG.warning("VDM %(name)s already exists. Skip the creation.", {'name': name}) elif constants.STATUS_OK != response['maxSeverity']: message = (_("Failed to create VDM %(name)s on mover " "%(mover_name)s. Reason: %(err)s.") % {'name': name, 'mover_name': mover_name, 'err': response['problems']}) LOG.error(message) raise exception.EMCVnxXMLAPIError(err=message) def get(self, name): if name not in self.vdm_map: request = self._build_query_package( self.elt_maker.VdmQueryParams() ) response = self._send_request(request) if constants.STATUS_OK != response['maxSeverity']: return response['maxSeverity'], response['problems'] elif not response['objects']: return constants.STATUS_NOT_FOUND, response['problems'] for item in response['objects']: vdm = {} property_map = ( 'name', ('id', 'vdm'), 'state', ('host_mover_id', 'mover'), ('interfaces', 'Interfaces'), ) self._copy_properties(item, vdm, property_map) self.vdm_map[item['name']] = vdm if name not in self.vdm_map: return constants.STATUS_NOT_FOUND, None return constants.STATUS_OK, self.vdm_map[name] def delete(self, name): status, out = self.get(name) if constants.STATUS_NOT_FOUND == status: LOG.warning("VDM %s not found. Skip the deletion.", name) return elif constants.STATUS_OK != status: message = (_("Failed to get VDM by name %(name)s. " "Reason: %(err)s.") % {'name': name, 'err': out}) LOG.error(message) raise exception.EMCVnxXMLAPIError(err=message) vdm_id = self.vdm_map[name]['id'] request = self._build_task_package( self.elt_maker.DeleteVdm(vdm=vdm_id) ) response = self._send_request(request) if constants.STATUS_OK != response['maxSeverity']: message = (_("Failed to delete VDM %(name)s. " "Reason: %(err)s.") % {'name': name, 'err': response['problems']}) LOG.error(message) raise exception.EMCVnxXMLAPIError(err=message) self.vdm_map.pop(name) def get_id(self, name): status, vdm = self.get(name) if constants.STATUS_OK != status: message = (_("Failed to get VDM by name %(name)s.") % {'name': name}) LOG.error(message) raise exception.EMCVnxXMLAPIError(err=message) return vdm['id'] def attach_nfs_interface(self, vdm_name, if_name): command_attach_nfs_interface = [ 'env', 'NAS_DB=/nas', '/nas/bin/nas_server', '-vdm', vdm_name, '-attach', if_name, ] self._execute_cmd(command_attach_nfs_interface) def detach_nfs_interface(self, vdm_name, if_name): command_detach_nfs_interface = [ 'env', 'NAS_DB=/nas', '/nas/bin/nas_server', '-vdm', vdm_name, '-detach', if_name, ] try: self._execute_cmd(command_detach_nfs_interface, check_exit_code=True) except processutils.ProcessExecutionError: interfaces = self.get_interfaces(vdm_name) if if_name not in interfaces['nfs']: LOG.debug("Failed to detach interface %(interface)s " "from mover %(mover_name)s.", {'interface': if_name, 'mover_name': vdm_name}) else: message = (_("Failed to detach interface %(interface)s " "from mover %(mover_name)s.") % {'interface': if_name, 'mover_name': vdm_name}) LOG.error(message) raise exception.EMCVnxXMLAPIError(err=message) def get_interfaces(self, vdm_name): interfaces = { 'cifs': [], 'nfs': [], } re_pattern = (r'Interfaces to services mapping:' r'\s*(?P(\s*interface=.*)*)') command_get_interfaces = [ 'env', 'NAS_DB=/nas', '/nas/bin/nas_server', '-i', '-vdm', vdm_name, ] out, err = self._execute_cmd(command_get_interfaces) m = re.search(re_pattern, out) if m: if_list = m.group('interfaces').split('\n') for i in if_list: m_if = re.search(r'\s*interface=(?P.*)\s*:' r'\s*(?P.*)\s*', i) if m_if: if_name = m_if.group('if').strip() if 'cifs' == m_if.group('type') and if_name != '': interfaces['cifs'].append(if_name) elif (m_if.group('type') in ('vdm', 'nfs') and if_name != ''): interfaces['nfs'].append(if_name) return interfaces @enas_utils.decorate_all_methods(enas_utils.log_enter_exit, debug_only=True) class Snapshot(StorageObject): def __init__(self, conn, elt_maker, xml_parser, manager): super(Snapshot, self).__init__(conn, elt_maker, xml_parser, manager) self.snap_map = dict() def create(self, name, fs_name, pool_id, ckpt_size=None): fs_id = self.get_context('FileSystem').get_id(fs_name) if ckpt_size: elt_pool = self.elt_maker.StoragePool( pool=pool_id, size=str(ckpt_size) ) else: elt_pool = self.elt_maker.StoragePool(pool=pool_id) new_ckpt = self.elt_maker.NewCheckpoint( self.elt_maker.SpaceAllocationMethod( elt_pool ), checkpointOf=fs_id, name=name ) request = self._build_task_package(new_ckpt) response = self._send_request(request) if self._response_validation(response, constants.MSG_SNAP_EXIST): LOG.warning("Snapshot %(name)s already exists. " "Skip the creation.", {'name': name}) elif constants.STATUS_OK != response['maxSeverity']: message = (_("Failed to create snapshot %(name)s on " "filesystem %(fs_name)s. Reason: %(err)s.") % {'name': name, 'fs_name': fs_name, 'err': response['problems']}) LOG.error(message) raise exception.EMCVnxXMLAPIError(err=message) def get(self, name): if name not in self.snap_map: request = self._build_query_package( self.elt_maker.CheckpointQueryParams( self.elt_maker.Alias(name=name) ) ) response = self._send_request(request) if constants.STATUS_OK != response['maxSeverity']: return response['maxSeverity'], response['problems'] if not response['objects']: return constants.STATUS_NOT_FOUND, response['problems'] src = response['objects'][0] snap = {} property_map = ( 'name', ('id', 'checkpoint'), 'checkpointOf', 'state', ) self._copy_properties(src, snap, property_map) self.snap_map[name] = snap return constants.STATUS_OK, self.snap_map[name] def delete(self, name): status, out = self.get(name) if constants.STATUS_NOT_FOUND == status: LOG.warning("Snapshot %s not found. Skip the deletion.", name) return elif constants.STATUS_OK != status: message = (_("Failed to get snapshot by name %(name)s. " "Reason: %(err)s.") % {'name': name, 'err': out}) LOG.error(message) raise exception.EMCVnxXMLAPIError(err=message) chpt_id = self.snap_map[name]['id'] request = self._build_task_package( self.elt_maker.DeleteCheckpoint(checkpoint=chpt_id) ) response = self._send_request(request) if constants.STATUS_OK != response['maxSeverity']: message = (_("Failed to delete snapshot %(name)s. " "Reason: %(err)s.") % {'name': name, 'err': response['problems']}) LOG.error(message) raise exception.EMCVnxXMLAPIError(err=message) self.snap_map.pop(name) def get_id(self, name): status, out = self.get(name) if constants.STATUS_OK != status: message = (_("Failed to get snapshot by %(name)s. " "Reason: %(err)s.") % {'name': name, 'err': out}) LOG.error(message) raise exception.EMCVnxXMLAPIError(err=message) return self.snap_map[name]['id'] @enas_utils.decorate_all_methods(enas_utils.log_enter_exit, debug_only=True) class MoverInterface(StorageObject): def __init__(self, conn, elt_maker, xml_parser, manager): super(MoverInterface, self).__init__(conn, elt_maker, xml_parser, manager) @utils.retry(retry_param=exception.EMCVnxInvalidMoverID) def create(self, interface): # Maximum of 32 characters for mover interface name name = interface['name'] if len(name) > 32: name = name[0:31] device_name = interface['device_name'] ip_addr = interface['ip'] mover_name = interface['mover_name'] net_mask = interface['net_mask'] vlan_id = interface['vlan_id'] if interface['vlan_id'] else -1 mover_id = self._get_mover_id(mover_name, False) params = dict(device=device_name, ipAddress=str(ip_addr), mover=mover_id, name=name, netMask=net_mask, vlanid=str(vlan_id)) if interface.get('ip_version') == 6: params['ipVersion'] = 'IPv6' if self.xml_retry: self.xml_retry = False request = self._build_task_package( self.elt_maker.NewMoverInterface(**params) ) response = self._send_request(request) if (self._response_validation(response, constants.MSG_INVALID_MOVER_ID) and not self.xml_retry): self.xml_retry = True raise exception.EMCVnxInvalidMoverID(id=mover_id) elif self._response_validation( response, constants.MSG_INTERFACE_NAME_EXIST): LOG.warning("Mover interface name %s already exists. " "Skip the creation.", name) return elif self._response_validation( response, constants.MSG_INTERFACE_EXIST): LOG.warning("Mover interface IP %s already exists. " "Skip the creation.", ip_addr) return elif self._response_validation( response, constants.MSG_INTERFACE_INVALID_VLAN_ID): # When fail to create a mover interface with the specified # vlan id, VNX will leave an interface with vlan id 0 in the # backend. So we should explicitly remove the interface. try: self.delete(str(ip_addr), mover_name) except exception.EMCVnxXMLAPIError: pass message = (_("Invalid vlan id %s. Other interfaces on this " "subnet are in a different vlan.") % vlan_id) LOG.error(message) raise exception.EMCVnxXMLAPIError(err=message) elif constants.STATUS_OK != response['maxSeverity']: message = (_("Failed to create mover interface %(interface)s. " "Reason: %(err)s.") % {'interface': interface, 'err': response['problems']}) LOG.error(message) raise exception.EMCVnxXMLAPIError(err=message) def get(self, name, mover_name): # Maximum of 32 characters for mover interface name if len(name) > 32: name = name[0:31] status, mover = self.manager.getStorageContext('Mover').get( mover_name, True) if constants.STATUS_OK == status: for interface in mover['interfaces']: if name == interface['name']: return constants.STATUS_OK, interface return constants.STATUS_NOT_FOUND, None @utils.retry(retry_param=exception.EMCVnxInvalidMoverID) def delete(self, ip_addr, mover_name): mover_id = self._get_mover_id(mover_name, False) if self.xml_retry: self.xml_retry = False request = self._build_task_package( self.elt_maker.DeleteMoverInterface( ipAddress=str(ip_addr), mover=mover_id ) ) response = self._send_request(request) if (self._response_validation(response, constants.MSG_INVALID_MOVER_ID) and not self.xml_retry): self.xml_retry = True raise exception.EMCVnxInvalidMoverID(id=mover_id) elif self._response_validation( response, constants.MSG_INTERFACE_NON_EXISTENT): LOG.warning("Mover interface %s not found. " "Skip the deletion.", ip_addr) return elif constants.STATUS_OK != response['maxSeverity']: message = (_("Failed to delete mover interface %(ip)s on mover " "%(mover)s. Reason: %(err)s.") % {'ip': ip_addr, 'mover': mover_name, 'err': response['problems']}) LOG.error(message) raise exception.EMCVnxXMLAPIError(err=message) @enas_utils.decorate_all_methods(enas_utils.log_enter_exit, debug_only=True) class DNSDomain(StorageObject): def __init__(self, conn, elt_maker, xml_parser, manager): super(DNSDomain, self).__init__(conn, elt_maker, xml_parser, manager) @utils.retry(retry_param=exception.EMCVnxInvalidMoverID) def create(self, mover_name, name, servers, protocol='udp'): mover_id = self._get_mover_id(mover_name, False) if self.xml_retry: self.xml_retry = False request = self._build_task_package( self.elt_maker.NewMoverDnsDomain( mover=mover_id, name=name, servers=servers, protocol=protocol ) ) response = self._send_request(request) if (self._response_validation(response, constants.MSG_INVALID_MOVER_ID) and not self.xml_retry): self.xml_retry = True raise exception.EMCVnxInvalidMoverID(id=mover_id) elif constants.STATUS_OK != response['maxSeverity']: message = (_("Failed to create DNS domain %(name)s. " "Reason: %(err)s.") % {'name': name, 'err': response['problems']}) LOG.error(message) raise exception.EMCVnxXMLAPIError(err=message) @utils.retry(retry_param=exception.EMCVnxInvalidMoverID) def delete(self, mover_name, name): mover_id = self._get_mover_id(mover_name, False) if self.xml_retry: self.xml_retry = False request = self._build_task_package( self.elt_maker.DeleteMoverDnsDomain( mover=mover_id, name=name ) ) response = self._send_request(request) if (self._response_validation(response, constants.MSG_INVALID_MOVER_ID) and not self.xml_retry): self.xml_retry = True raise exception.EMCVnxInvalidMoverID(id=mover_id) elif constants.STATUS_OK != response['maxSeverity']: LOG.warning("Failed to delete DNS domain %(name)s. " "Reason: %(err)s.", {'name': name, 'err': response['problems']}) @enas_utils.decorate_all_methods(enas_utils.log_enter_exit, debug_only=True) class CIFSServer(StorageObject): def __init__(self, conn, elt_maker, xml_parser, manager): super(CIFSServer, self).__init__(conn, elt_maker, xml_parser, manager) self.cifs_server_map = dict() @utils.retry(retry_param=exception.EMCVnxInvalidMoverID) def create(self, server_args): compName = server_args['name'] # Maximum of 14 characters for netBIOS name name = server_args['name'][-14:] # Maximum of 12 characters for alias name alias_name = server_args['name'][-12:] interfaces = server_args['interface_ip'] domain_name = server_args['domain_name'] user_name = server_args['user_name'] password = server_args['password'] mover_name = server_args['mover_name'] is_vdm = server_args['is_vdm'] mover_id = self._get_mover_id(mover_name, is_vdm) if self.xml_retry: self.xml_retry = False alias_name_list = [self.elt_maker.li(alias_name)] request = self._build_task_package( self.elt_maker.NewW2KCifsServer( self.elt_maker.MoverOrVdm( mover=mover_id, moverIdIsVdm='true' if server_args['is_vdm'] else 'false' ), self.elt_maker.Aliases(*alias_name_list), self.elt_maker.JoinDomain(userName=user_name, password=password), compName=compName, domain=domain_name, interfaces=interfaces, name=name ) ) response = self._send_request(request) if (self._response_validation(response, constants.MSG_INVALID_MOVER_ID) and not self.xml_retry): self.xml_retry = True raise exception.EMCVnxInvalidMoverID(id=mover_id) if constants.STATUS_OK != response['maxSeverity']: status, out = self.get(compName, mover_name, is_vdm) if constants.STATUS_OK == status and out['domainJoined'] == 'true': return else: message = (_("Failed to create CIFS server %(name)s. " "Reason: %(err)s.") % {'name': name, 'err': response['problems']}) LOG.error(message) raise exception.EMCVnxXMLAPIError(err=message) @utils.retry(retry_param=exception.EMCVnxInvalidMoverID) def get_all(self, mover_name, is_vdm=True): mover_id = self._get_mover_id(mover_name, is_vdm) if self.xml_retry: self.xml_retry = False request = self._build_query_package( self.elt_maker.CifsServerQueryParams( self.elt_maker.MoverOrVdm( mover=mover_id, moverIdIsVdm='true' if is_vdm else 'false' ) ) ) response = self._send_request(request) if (self._response_validation(response, constants.MSG_INVALID_MOVER_ID) and not self.xml_retry): self.xml_retry = True raise exception.EMCVnxInvalidMoverID(id=mover_id) elif constants.STATUS_OK != response['maxSeverity']: return response['maxSeverity'], response['objects'] if mover_name in self.cifs_server_map: self.cifs_server_map.pop(mover_name) self.cifs_server_map[mover_name] = dict() for item in response['objects']: self.cifs_server_map[mover_name][item['compName'].lower()] = item return constants.STATUS_OK, self.cifs_server_map[mover_name] def get(self, name, mover_name, is_vdm=True, force=False): # name is compName name = name.lower() if (mover_name in self.cifs_server_map and name in self.cifs_server_map[mover_name]) and not force: return constants.STATUS_OK, self.cifs_server_map[mover_name][name] self.get_all(mover_name, is_vdm) if mover_name in self.cifs_server_map: for compName, server in self.cifs_server_map[mover_name].items(): if name == compName: return constants.STATUS_OK, server return constants.STATUS_NOT_FOUND, None @utils.retry(retry_param=exception.EMCVnxInvalidMoverID) def modify(self, server_args): """Make CIFS server join or un-join the domain. :param server_args: Dictionary for CIFS server modification name: CIFS server name instead of compName join_domain: True for joining the domain, false for un-joining user_name: User name under which the domain is joined password: Password associated with the user name mover_name: mover or VDM name is_vdm: Boolean to indicate mover or VDM :raises exception.EMCVnxXMLAPIError: if modification fails. """ name = server_args['name'] join_domain = server_args['join_domain'] user_name = server_args['user_name'] password = server_args['password'] mover_name = server_args['mover_name'] if 'is_vdm' in server_args.keys(): is_vdm = server_args['is_vdm'] else: is_vdm = True mover_id = self._get_mover_id(mover_name, is_vdm) if self.xml_retry: self.xml_retry = False request = self._build_task_package( self.elt_maker.ModifyW2KCifsServer( self.elt_maker.DomainSetting( joinDomain='true' if join_domain else 'false', password=password, userName=user_name, ), mover=mover_id, moverIdIsVdm='true' if is_vdm else 'false', name=name ) ) response = self._send_request(request) if (self._response_validation(response, constants.MSG_INVALID_MOVER_ID) and not self.xml_retry): self.xml_retry = True raise exception.EMCVnxInvalidMoverID(id=mover_id) elif self._ignore_modification_error(response, join_domain): return elif constants.STATUS_OK != response['maxSeverity']: message = (_("Failed to modify CIFS server %(name)s. " "Reason: %(err)s.") % {'name': name, 'err': response['problems']}) LOG.error(message) raise exception.EMCVnxXMLAPIError(err=message) def _ignore_modification_error(self, response, join_domain): if self._response_validation(response, constants.MSG_JOIN_DOMAIN): return join_domain elif self._response_validation(response, constants.MSG_UNJOIN_DOMAIN): return not join_domain return False def delete(self, computer_name, mover_name, is_vdm=True): try: status, out = self.get( computer_name.lower(), mover_name, is_vdm, self.xml_retry) if constants.STATUS_NOT_FOUND == status: LOG.warning("CIFS server %(name)s on mover %(mover_name)s " "not found. Skip the deletion.", {'name': computer_name, 'mover_name': mover_name}) return except exception.EMCVnxXMLAPIError: LOG.warning("CIFS server %(name)s on mover %(mover_name)s " "not found. Skip the deletion.", {'name': computer_name, 'mover_name': mover_name}) return server_name = out['name'] mover_id = self._get_mover_id(mover_name, is_vdm) request = self._build_task_package( self.elt_maker.DeleteCifsServer( mover=mover_id, moverIdIsVdm='true' if is_vdm else 'false', name=server_name ) ) response = self._send_request(request) if constants.STATUS_OK != response['maxSeverity']: message = (_("Failed to delete CIFS server %(name)s. " "Reason: %(err)s.") % {'name': computer_name, 'err': response['problems']}) LOG.error(message) raise exception.EMCVnxXMLAPIError(err=message) self.cifs_server_map[mover_name].pop(computer_name) @enas_utils.decorate_all_methods(enas_utils.log_enter_exit, debug_only=True) class CIFSShare(StorageObject): def __init__(self, conn, elt_maker, xml_parser, manager): super(CIFSShare, self).__init__(conn, elt_maker, xml_parser, manager) self.cifs_share_map = dict() @utils.retry(retry_param=exception.EMCVnxInvalidMoverID) def create(self, name, server_name, mover_name, is_vdm=True): mover_id = self._get_mover_id(mover_name, is_vdm) if self.xml_retry: self.xml_retry = False share_path = '/' + name request = self._build_task_package( self.elt_maker.NewCifsShare( self.elt_maker.MoverOrVdm( mover=mover_id, moverIdIsVdm='true' if is_vdm else 'false' ), self.elt_maker.CifsServers(self.elt_maker.li(server_name)), name=name, path=share_path ) ) response = self._send_request(request) if (self._response_validation(response, constants.MSG_INVALID_MOVER_ID) and not self.xml_retry): self.xml_retry = True raise exception.EMCVnxInvalidMoverID(id=mover_id) elif constants.STATUS_OK != response['maxSeverity']: message = (_("Failed to create file share %(name)s. " "Reason: %(err)s.") % {'name': name, 'err': response['problems']}) LOG.error(message) raise exception.EMCVnxXMLAPIError(err=message) def get(self, name): if name not in self.cifs_share_map: request = self._build_query_package( self.elt_maker.CifsShareQueryParams(name=name) ) response = self._send_request(request) if constants.STATUS_OK != response['maxSeverity']: return response['maxSeverity'], response['problems'] if not response['objects']: return constants.STATUS_NOT_FOUND, None self.cifs_share_map[name] = response['objects'][0] return constants.STATUS_OK, self.cifs_share_map[name] @utils.retry(retry_param=exception.EMCVnxInvalidMoverID) def delete(self, name, mover_name, is_vdm=True): status, out = self.get(name) if constants.STATUS_NOT_FOUND == status: LOG.warning("CIFS share %s not found. Skip the deletion.", name) return elif constants.STATUS_OK != status: message = (_("Failed to get CIFS share by name %(name)s. " "Reason: %(err)s.") % {'name': name, 'err': out}) LOG.error(message) raise exception.EMCVnxXMLAPIError(err=message) mover_id = self._get_mover_id(mover_name, is_vdm) if self.xml_retry: self.xml_retry = False netbios_names = self.cifs_share_map[name]['CifsServers'] request = self._build_task_package( self.elt_maker.DeleteCifsShare( self.elt_maker.CifsServers(*map(lambda a: self.elt_maker.li(a), netbios_names)), mover=mover_id, moverIdIsVdm='true' if is_vdm else 'false', name=name ) ) response = self._send_request(request) if (self._response_validation(response, constants.MSG_INVALID_MOVER_ID) and not self.xml_retry): self.xml_retry = True raise exception.EMCVnxInvalidMoverID(id=mover_id) elif constants.STATUS_OK != response['maxSeverity']: message = (_("Failed to delete file system %(name)s. " "Reason: %(err)s.") % {'name': name, 'err': response['problems']}) LOG.error(message) raise exception.EMCVnxXMLAPIError(err=message) self.cifs_share_map.pop(name) def disable_share_access(self, share_name, mover_name): cmd_str = 'sharesd %s set noaccess' % share_name disable_access = [ 'env', 'NAS_DB=/nas', '/nas/bin/.server_config', mover_name, '-v', "%s" % cmd_str, ] try: self._execute_cmd(disable_access, check_exit_code=True) except processutils.ProcessExecutionError as expt: message = (_('Failed to disable the access to CIFS share ' '%(name)s. Reason: %(err)s.') % {'name': share_name, 'err': expt}) LOG.error(message) raise exception.EMCVnxXMLAPIError(err=message) def allow_share_access(self, mover_name, share_name, user_name, domain, access=constants.CIFS_ACL_FULLCONTROL): account = user_name + "@" + domain allow_str = ('sharesd %(share_name)s grant %(account)s=%(access)s' % {'share_name': share_name, 'account': account, 'access': access}) allow_access = [ 'env', 'NAS_DB=/nas', '/nas/bin/.server_config', mover_name, '-v', "%s" % allow_str, ] try: self._execute_cmd(allow_access, check_exit_code=True) except processutils.ProcessExecutionError as expt: dup_msg = re.compile(r'ACE for %(domain)s\\%(user)s unchanged' % {'domain': domain, 'user': user_name}, re.I) if re.search(dup_msg, expt.stdout): LOG.warning("Duplicate access control entry, " "skipping allow...") else: message = (_('Failed to allow the access %(access)s to ' 'CIFS share %(name)s. Reason: %(err)s.') % {'access': access, 'name': share_name, 'err': expt}) LOG.error(message) raise exception.EMCVnxXMLAPIError(err=message) def deny_share_access(self, mover_name, share_name, user_name, domain, access=constants.CIFS_ACL_FULLCONTROL): account = user_name + "@" + domain revoke_str = ('sharesd %(share_name)s revoke %(account)s=%(access)s' % {'share_name': share_name, 'account': account, 'access': access}) allow_access = [ 'env', 'NAS_DB=/nas', '/nas/bin/.server_config', mover_name, '-v', "%s" % revoke_str, ] try: self._execute_cmd(allow_access, check_exit_code=True) except processutils.ProcessExecutionError as expt: not_found_msg = re.compile( r'No ACE found for %(domain)s\\%(user)s' % {'domain': domain, 'user': user_name}, re.I) user_err_msg = re.compile( r'Cannot get mapping for %(domain)s\\%(user)s' % {'domain': domain, 'user': user_name}, re.I) if re.search(not_found_msg, expt.stdout): LOG.warning("No access control entry found, " "skipping deny...") elif re.search(user_err_msg, expt.stdout): LOG.warning("User not found on domain, skipping deny...") else: message = (_('Failed to deny the access %(access)s to ' 'CIFS share %(name)s. Reason: %(err)s.') % {'access': access, 'name': share_name, 'err': expt}) LOG.error(message) raise exception.EMCVnxXMLAPIError(err=message) def get_share_access(self, mover_name, share_name): get_str = 'sharesd %s dump' % share_name get_access = [ 'env', 'NAS_DB=/nas', '/nas/bin/.server_config', mover_name, '-v', "%s" % get_str, ] try: out, err = self._execute_cmd(get_access, check_exit_code=True) except processutils.ProcessExecutionError: msg = _('Failed to get access list of CIFS share %s.') % share_name LOG.exception(msg) raise exception.EMCVnxXMLAPIError(err=msg) ret = {} name_pattern = re.compile(r"Unix user '(.+?)'") access_pattern = re.compile(r"ALLOWED:(.+?):") name = None for line in out.splitlines(): if name is None: names = name_pattern.findall(line) if names: name = names[0].lower() else: accesses = access_pattern.findall(line) if accesses: ret[name] = accesses[0].lower() name = None return ret def clear_share_access(self, mover_name, share_name, domain, white_list_users): existing_users = self.get_share_access(mover_name, share_name) white_list_users_set = set(user.lower() for user in white_list_users) users_to_remove = set(existing_users.keys()) - white_list_users_set for user in users_to_remove: self.deny_share_access(mover_name, share_name, user, domain, existing_users[user]) return users_to_remove @enas_utils.decorate_all_methods(enas_utils.log_enter_exit, debug_only=True) class NFSShare(StorageObject): def __init__(self, conn, elt_maker, xml_parser, manager): super(NFSShare, self).__init__(conn, elt_maker, xml_parser, manager) self.nfs_share_map = {} def create(self, name, mover_name): share_path = '/' + name create_nfs_share_cmd = [ 'env', 'NAS_DB=/nas', '/nas/bin/server_export', mover_name, '-option', 'access=-0.0.0.0/0.0.0.0', share_path, ] try: self._execute_cmd(create_nfs_share_cmd, check_exit_code=True) except processutils.ProcessExecutionError as expt: message = (_('Failed to create NFS share %(name)s on mover ' '%(mover_name)s. Reason: %(err)s.') % {'name': name, 'mover_name': mover_name, 'err': expt}) LOG.error(message) raise exception.EMCVnxXMLAPIError(err=message) def delete(self, name, mover_name): path = '/' + name status, out = self.get(name, mover_name) if constants.STATUS_NOT_FOUND == status: LOG.warning("NFS share %s not found. Skip the deletion.", path) return delete_nfs_share_cmd = [ 'env', 'NAS_DB=/nas', '/nas/bin/server_export', mover_name, '-unexport', '-perm', path, ] try: self._execute_cmd(delete_nfs_share_cmd, check_exit_code=True) except processutils.ProcessExecutionError as expt: message = (_('Failed to delete NFS share %(name)s on ' '%(mover_name)s. Reason: %(err)s.') % {'name': name, 'mover_name': mover_name, 'err': expt}) LOG.error(message) raise exception.EMCVnxXMLAPIError(err=message) self.nfs_share_map.pop(name) def get(self, name, mover_name, force=False, check_exit_code=False): if name in self.nfs_share_map and not force: return constants.STATUS_OK, self.nfs_share_map[name] path = '/' + name nfs_share = { "mover_name": '', "path": '', 'AccessHosts': [], 'RwHosts': [], 'RoHosts': [], 'RootHosts': [], 'readOnly': '', } nfs_query_cmd = [ 'env', 'NAS_DB=/nas', '/nas/bin/server_export', mover_name, '-P', 'nfs', '-list', path, ] try: out, err = self._execute_cmd(nfs_query_cmd, check_exit_code=check_exit_code) except processutils.ProcessExecutionError as expt: dup_msg = (r'%(mover_name)s : No such file or directory' % {'mover_name': mover_name}) if re.search(dup_msg, expt.stdout): LOG.warning("NFS share %s not found.", name) return constants.STATUS_NOT_FOUND, None else: message = (_('Failed to list NFS share %(name)s on ' '%(mover_name)s. Reason: %(err)s.') % {'name': name, 'mover_name': mover_name, 'err': expt}) LOG.error(message) raise exception.EMCVnxXMLAPIError(err=message) re_exports = r'%s\s*:\s*\nexport\s*(.*)\n' % mover_name m = re.search(re_exports, out) if m is not None: nfs_share['path'] = path nfs_share['mover_name'] = mover_name export = m.group(1) fields = export.split(" ") for field in fields: field = field.strip() if field.startswith('rw='): nfs_share['RwHosts'] = enas_utils.parse_ipaddr(field[3:]) elif field.startswith('access='): nfs_share['AccessHosts'] = enas_utils.parse_ipaddr( field[7:]) elif field.startswith('root='): nfs_share['RootHosts'] = enas_utils.parse_ipaddr(field[5:]) elif field.startswith('ro='): nfs_share['RoHosts'] = enas_utils.parse_ipaddr(field[3:]) self.nfs_share_map[name] = nfs_share else: return constants.STATUS_NOT_FOUND, None return constants.STATUS_OK, self.nfs_share_map[name] def allow_share_access(self, share_name, host_ip, mover_name, access_level=const.ACCESS_LEVEL_RW): @utils.synchronized('emc-shareaccess-' + share_name) def do_allow_access(share_name, host_ip, mover_name, access_level): status, share = self.get(share_name, mover_name) if constants.STATUS_NOT_FOUND == status: message = (_('NFS share %s not found.') % share_name) LOG.error(message) raise exception.EMCVnxXMLAPIError(err=message) changed = False rwhosts = share['RwHosts'] rohosts = share['RoHosts'] host_ip = enas_utils.convert_ipv6_format_if_needed(host_ip) if access_level == const.ACCESS_LEVEL_RW: if host_ip not in rwhosts: rwhosts.append(host_ip) changed = True if host_ip in rohosts: rohosts.remove(host_ip) changed = True if access_level == const.ACCESS_LEVEL_RO: if host_ip not in rohosts: rohosts.append(host_ip) changed = True if host_ip in rwhosts: rwhosts.remove(host_ip) changed = True roothosts = share['RootHosts'] if host_ip not in roothosts: roothosts.append(host_ip) changed = True accesshosts = share['AccessHosts'] if host_ip not in accesshosts: accesshosts.append(host_ip) changed = True if not changed: LOG.debug("%(host)s is already in access list of share " "%(name)s.", {'host': host_ip, 'name': share_name}) else: path = '/' + share_name self._set_share_access(path, mover_name, rwhosts, rohosts, roothosts, accesshosts) # Update self.nfs_share_map self.get(share_name, mover_name, force=True, check_exit_code=True) do_allow_access(share_name, host_ip, mover_name, access_level) def deny_share_access(self, share_name, host_ip, mover_name): @utils.synchronized('emc-shareaccess-' + share_name) def do_deny_access(share_name, host_ip, mover_name): status, share = self.get(share_name, mover_name) if constants.STATUS_OK != status: message = (_('Query nfs share %(path)s failed. ' 'Reason %(err)s.') % {'path': share_name, 'err': share}) LOG.error(message) raise exception.EMCVnxXMLAPIError(err=message) changed = False rwhosts = set(share['RwHosts']) if host_ip in rwhosts: rwhosts.remove(host_ip) changed = True roothosts = set(share['RootHosts']) if host_ip in roothosts: roothosts.remove(host_ip) changed = True accesshosts = set(share['AccessHosts']) if host_ip in accesshosts: accesshosts.remove(host_ip) changed = True rohosts = set(share['RoHosts']) if host_ip in rohosts: rohosts.remove(host_ip) changed = True if not changed: LOG.debug("%(host)s is already in access list of share " "%(name)s.", {'host': host_ip, 'name': share_name}) else: path = '/' + share_name self._set_share_access(path, mover_name, rwhosts, rohosts, roothosts, accesshosts) # Update self.nfs_share_map self.get(share_name, mover_name, force=True, check_exit_code=True) do_deny_access(share_name, host_ip, mover_name) def clear_share_access(self, share_name, mover_name, white_list_hosts): @utils.synchronized('emc-shareaccess-' + share_name) def do_clear_access(share_name, mover_name, white_list_hosts): def hosts_to_remove(orig_list): if white_list_hosts is None: ret = set() else: ret = set(white_list_hosts).intersection(set(orig_list)) return ret status, share = self.get(share_name, mover_name) if constants.STATUS_OK != status: message = (_('Query nfs share %(path)s failed. ' 'Reason %(err)s.') % {'path': share_name, 'err': status}) raise exception.EMCVnxXMLAPIError(err=message) self._set_share_access('/' + share_name, mover_name, hosts_to_remove(share['RwHosts']), hosts_to_remove(share['RoHosts']), hosts_to_remove(share['RootHosts']), hosts_to_remove(share['AccessHosts'])) # Update self.nfs_share_map self.get(share_name, mover_name, force=True, check_exit_code=True) do_clear_access(share_name, mover_name, white_list_hosts) def _set_share_access(self, path, mover_name, rw_hosts, ro_hosts, root_hosts, access_hosts): if access_hosts is None: access_hosts = set() try: access_hosts.remove('-0.0.0.0/0.0.0.0') except (ValueError, KeyError): pass access_str = ('access=%(access)s' % {'access': ':'.join( list(access_hosts) + ['-0.0.0.0/0.0.0.0'])}) if root_hosts: access_str += (',root=%(root)s' % {'root': ':'.join(root_hosts)}) if rw_hosts: access_str += ',rw=%(rw)s' % {'rw': ':'.join(rw_hosts)} if ro_hosts: access_str += ',ro=%(ro)s' % {'ro': ':'.join(ro_hosts)} set_nfs_share_access_cmd = [ 'env', 'NAS_DB=/nas', '/nas/bin/server_export', mover_name, '-ignore', '-option', access_str, path, ] try: self._execute_cmd(set_nfs_share_access_cmd, check_exit_code=True) except processutils.ProcessExecutionError as expt: message = (_('Failed to set NFS share %(name)s access on ' '%(mover_name)s. Reason: %(err)s.') % {'name': path[1:], 'mover_name': mover_name, 'err': expt}) LOG.error(message) raise exception.EMCVnxXMLAPIError(err=message) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315601.9096718 manila-21.0.0/manila/share/drivers/ganesha/0000775000175000017500000000000000000000000020505 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/ganesha/__init__.py0000664000175000017500000003107100000000000022620 0ustar00zuulzuul00000000000000# Copyright (c) 2014 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import errno import os import re from oslo_config import cfg from oslo_log import log from manila.common import constants from manila import exception from manila.i18n import _ from manila.share.drivers.ganesha import manager as ganesha_manager from manila.share.drivers.ganesha import utils as ganesha_utils CONF = cfg.CONF LOG = log.getLogger(__name__) class NASHelperBase(metaclass=abc.ABCMeta): """Interface to work with share.""" # drivers that use a helper derived from this class # should pass the following attributes to # ganesha_utils.validate_access_rule in their # update_access implementation. supported_access_types = () supported_access_levels = () def __init__(self, execute, config, **kwargs): self.configuration = config self._execute = execute def init_helper(self): """Initializes protocol-specific NAS drivers.""" @abc.abstractmethod def update_access(self, context, share, access_rules, add_rules, delete_rules, update_rules, share_server=None, sub_name=None): """Update access rules of share.""" def get_backend_info(self, context): raise NotImplementedError def ensure_shares(self, context, shares): raise NotImplementedError class GaneshaNASHelper(NASHelperBase): """Perform share access changes using Ganesha version < 2.4.""" supported_access_types = ('ip', ) supported_access_levels = (constants.ACCESS_LEVEL_RW, constants.ACCESS_LEVEL_RO) def __init__(self, execute, config, tag='', **kwargs): super(GaneshaNASHelper, self).__init__(execute, config, **kwargs) self.tag = tag _confrx = re.compile(r'\.(conf|json)\Z') def _load_conf_dir(self, dirpath, must_exist=True): """Load Ganesha config files in dirpath in alphabetic order.""" try: dirlist = os.listdir(dirpath) except OSError as e: if e.errno != errno.ENOENT or must_exist: raise dirlist = [] LOG.info('Loading Ganesha config from %s.', dirpath) conf_files = list(filter(self._confrx.search, dirlist)) conf_files.sort() export_template = {} for conf_file in conf_files: with open(os.path.join(dirpath, conf_file)) as f: ganesha_utils.patch( export_template, ganesha_manager.parseconf(f.read())) return export_template def init_helper(self): """Initializes protocol-specific NAS drivers.""" self.ganesha = ganesha_manager.GaneshaManager( self._execute, self.tag, ganesha_config_path=self.configuration.ganesha_config_path, ganesha_export_dir=self.configuration.ganesha_export_dir, ganesha_db_path=self.configuration.ganesha_db_path, ganesha_service_name=self.configuration.ganesha_service_name) system_export_template = self._load_conf_dir( self.configuration.ganesha_export_template_dir, must_exist=False) if system_export_template: self.export_template = system_export_template else: self.export_template = self._default_config_hook() def _default_config_hook(self): """The default export block. Subclass this to add FSAL specific defaults. Suggested approach: take the return value of superclass' method, patch with dict containing your defaults, and return the result. However, you can also provide your defaults from scratch with no regard to superclass. """ return self._load_conf_dir(ganesha_utils.path_from(__file__, "conf")) def _fsal_hook(self, base_path, share, access, sub_name=None): """Subclass this to create FSAL block.""" return {} def _cleanup_fsal_hook(self, base_path, share, access, sub_name=None): """Callback for FSAL specific cleanup after removing an export.""" pass def _allow_access(self, base_path, share, access, sub_name=None): """Allow access to the share.""" ganesha_utils.validate_access_rule( self.supported_access_types, self.supported_access_levels, access, abort=True) access = ganesha_utils.fixup_access_rule(access) cf = {} accid = access['id'] name = share['name'] export_name = "%s--%s" % (name, accid) ganesha_utils.patch(cf, self.export_template, { 'EXPORT': { 'Export_Id': self.ganesha.get_export_id(), 'Path': os.path.join(base_path, name), 'Pseudo': os.path.join(base_path, export_name), 'Tag': accid, 'CLIENT': { 'Clients': access['access_to'] }, 'FSAL': self._fsal_hook( base_path, share, access, sub_name=sub_name) } }) self.ganesha.add_export(export_name, cf) def _deny_access(self, base_path, share, access): """Deny access to the share.""" self.ganesha.remove_export("%s--%s" % (share['name'], access['id'])) def update_access(self, context, share, access_rules, add_rules, delete_rules, update_rules, share_server=None, sub_name=None): """Update access rules of share.""" rule_state_map = {} if not (add_rules or delete_rules): add_rules = access_rules self.ganesha.reset_exports() self.ganesha.restart_service() for rule in add_rules: try: self._allow_access('/', share, rule) except (exception.InvalidShareAccess, exception.InvalidShareAccessLevel): rule_state_map[rule['id']] = {'state': 'error'} continue for rule in delete_rules: self._deny_access('/', share, rule) return rule_state_map class GaneshaNASHelper2(GaneshaNASHelper): """Perform share access changes using Ganesha version >= 2.4.""" def __init__(self, execute, config, tag='', **kwargs): super(GaneshaNASHelper2, self).__init__(execute, config, **kwargs) if self.configuration.ganesha_rados_store_enable: self.rados_client = kwargs.pop('rados_client') def init_helper(self): """Initializes protocol-specific NAS drivers.""" kwargs = { 'ganesha_config_path': self.configuration.ganesha_config_path, 'ganesha_export_dir': self.configuration.ganesha_export_dir, 'ganesha_service_name': self.configuration.ganesha_service_name } if self.configuration.ganesha_rados_store_enable: kwargs['ganesha_rados_store_enable'] = ( self.configuration.ganesha_rados_store_enable) if not self.configuration.ganesha_rados_store_pool_name: raise exception.GaneshaException( _('"ganesha_rados_store_pool_name" config option is not ' 'set in the driver section.')) kwargs['ganesha_rados_store_pool_name'] = ( self.configuration.ganesha_rados_store_pool_name) kwargs['ganesha_rados_export_index'] = ( self.configuration.ganesha_rados_export_index) kwargs['ganesha_rados_export_counter'] = ( self.configuration.ganesha_rados_export_counter) kwargs['rados_client'] = self.rados_client else: kwargs['ganesha_db_path'] = self.configuration.ganesha_db_path self.ganesha = ganesha_manager.GaneshaManager( self._execute, self.tag, **kwargs) system_export_template = self._load_conf_dir( self.configuration.ganesha_export_template_dir, must_exist=False) if system_export_template: self.export_template = system_export_template else: self.export_template = self._default_config_hook() def _get_export_path(self, share, sub_name=None): """Subclass this to return export path.""" raise NotImplementedError() def _get_export_pseudo_path(self, share, sub_name=None): """Subclass this to return export pseudo path.""" raise NotImplementedError() def update_access(self, context, share, access_rules, add_rules, delete_rules, update_rules, share_server=None, sub_name=None): """Update access rules of share. Creates an export per share. Modifies access rules of shares by dynamically updating exports via DBUS. """ confdict = {} existing_access_rules = [] rule_state_map = {} # TODO(carloss): check if share['name'] can cause us troubles if self.ganesha.check_export_exists(share['name']): confdict = self.ganesha._read_export(share['name']) existing_access_rules = confdict["EXPORT"]["CLIENT"] if not isinstance(existing_access_rules, list): existing_access_rules = [existing_access_rules] else: if not access_rules: LOG.warning("Trying to remove export file '%s' but it's " "already gone", self.ganesha._getpath(share['name'])) return wanted_rw_clients, wanted_ro_clients = [], [] for rule in access_rules: try: ganesha_utils.validate_access_rule( self.supported_access_types, self.supported_access_levels, rule, True) except (exception.InvalidShareAccess, exception.InvalidShareAccessLevel): rule_state_map[rule['id']] = {'state': 'error'} continue rule = ganesha_utils.fixup_access_rule(rule) if rule['access_level'] == 'rw': wanted_rw_clients.append(rule['access_to']) elif rule['access_level'] == 'ro': wanted_ro_clients.append(rule['access_to']) if access_rules: # Add or Update export. clients = [] if wanted_ro_clients: clients.append({ 'Access_Type': 'ro', 'Clients': ','.join(wanted_ro_clients) }) if wanted_rw_clients: clients.append({ 'Access_Type': 'rw', 'Clients': ','.join(wanted_rw_clients) }) if clients: # Empty list if no rules passed validation if existing_access_rules: # Update existing export. ganesha_utils.patch(confdict, { 'EXPORT': { 'CLIENT': clients } }) self.ganesha.update_export(share['name'], confdict) else: # Add new export. ganesha_utils.patch(confdict, self.export_template, { 'EXPORT': { 'Export_Id': self.ganesha.get_export_id(), 'Path': self._get_export_path( share, sub_name=sub_name), 'Pseudo': self._get_export_pseudo_path( share, sub_name=sub_name), 'Tag': share['name'], 'CLIENT': clients, 'FSAL': self._fsal_hook( None, share, None, sub_name=sub_name ) } }) self.ganesha.add_export(share['name'], confdict) else: # No clients have access to the share. Remove export. self.ganesha.remove_export(share['name']) self._cleanup_fsal_hook(None, share, None, sub_name=sub_name) return rule_state_map ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315601.9096718 manila-21.0.0/manila/share/drivers/ganesha/conf/0000775000175000017500000000000000000000000021432 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/ganesha/conf/00-base-export-template.conf0000664000175000017500000000223400000000000026561 0ustar00zuulzuul00000000000000# This is a Ganesha config template. # Syntactically, a valid Ganesha config # file, but some values in it are stubs. # Fields that have stub values are managed # by Manila; the stubs are of two kinds: # - @config: # value will be taken from Manila config # - @runtime: # value will be determined at runtime # User is free to set Ganesha parameters # which are not reserved to Manila by # stubbing. EXPORT { # Each EXPORT must have a unique Export_Id. Export_Id = @runtime; # The directory in the exported file system this export # is rooted on. Path = @runtime; # FSAL, Ganesha's module component FSAL { # FSAL name Name = @config; } # Path of export in the NFSv4 pseudo filesystem Pseudo = @runtime; # RPC security flavor, one of none, sys, krb5{,i,p} SecType = sys; # Alternative export identifier for NFSv3 Tag = @runtime; # Client specification CLIENT { # Comma separated list of clients Clients = @runtime; # Access type, one of RW, RO, MDONLY, MDONLY_RO, NONE Access_Type = RW; } # User id squashing, one of None, Root, All Squash = None; } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/ganesha/manager.py0000664000175000017500000006043000000000000022474 0ustar00zuulzuul00000000000000# Copyright (c) 2014 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import io import os import re import shlex import sys from oslo_log import log from oslo_serialization import jsonutils from oslo_utils import importutils from manila import exception from manila.i18n import _ from manila.share.drivers.ganesha import utils as ganesha_utils from manila import utils LOG = log.getLogger(__name__) IWIDTH = 4 def _conf2json(conf): """Convert Ganesha config to JSON.""" # tokenize config string token_list = [io.StringIO()] state = { 'in_quote': False, 'in_comment': False, 'escape': False, } cbk = [] for char in conf: if state['in_quote']: if not state['escape']: if char == '"': state['in_quote'] = False cbk.append(lambda: token_list.append(io.StringIO())) elif char == '\\': cbk.append(lambda: state.update({'escape': True})) else: if char == "#": state['in_comment'] = True if state['in_comment']: if char == "\n": state['in_comment'] = False else: if char == '"': token_list.append(io.StringIO()) state['in_quote'] = True state['escape'] = False if not state['in_comment']: token_list[-1].write(char) while cbk: cbk.pop(0)() if state['in_quote']: raise RuntimeError("Unterminated quoted string") # jsonify tokens js_token_list = ["{"] for tok in token_list: tok = tok.getvalue() if tok[0] == '"': js_token_list.append(tok) continue for pat, s in [ # add omitted "=" signs to block openings (r'([^=\s])\s*{', '\\1={'), # delete trailing semicolons in blocks (r';\s*}', '}'), # add omitted semicolons after blocks (r'}\s*([^}\s])', '};\\1'), # separate syntactically significant characters (r'([;{}=])', ' \\1 ')]: tok = re.sub(pat, s, tok) # map tokens to JSON equivalents for word in tok.split(): if word == "=": word = ":" elif word == ";": word = ',' elif (word in ['{', '}'] or re.search(r'\A-?[1-9]\d*(\.\d+)?\Z', word)): pass else: word = jsonutils.dumps(word) js_token_list.append(word) js_token_list.append("}") # group quoted strings token_grp_list = [] for tok in js_token_list: if tok[0] == '"': if not (token_grp_list and isinstance(token_grp_list[-1], list)): token_grp_list.append([]) token_grp_list[-1].append(tok) else: token_grp_list.append(tok) # process quoted string groups by joining them js_token_list2 = [] for x in token_grp_list: if isinstance(x, list): x = ''.join(['"'] + [tok[1:-1] for tok in x] + ['"']) js_token_list2.append(x) return ''.join(js_token_list2) def _dump_to_conf(confdict, out=sys.stdout, indent=0): """Output confdict in Ganesha config format.""" if isinstance(confdict, dict): for k, v in confdict.items(): if v is None: continue if isinstance(v, dict): out.write(' ' * (indent * IWIDTH) + k + ' ') out.write("{\n") _dump_to_conf(v, out, indent + 1) out.write(' ' * (indent * IWIDTH) + '}') elif isinstance(v, list): for item in v: out.write(' ' * (indent * IWIDTH) + k + ' ') out.write("{\n") _dump_to_conf(item, out, indent + 1) out.write(' ' * (indent * IWIDTH) + '}\n') # The 'CLIENTS' Ganesha string option is an exception in that it's # string value can't be enclosed within quotes as can be done for # other string options in a valid Ganesha conf file. elif k.upper() == 'CLIENTS': out.write(' ' * (indent * IWIDTH) + k + ' = ' + v + ';') else: out.write(' ' * (indent * IWIDTH) + k + ' ') out.write('= ') _dump_to_conf(v, out, indent) out.write(';') out.write('\n') else: dj = jsonutils.dumps(confdict) out.write(dj) def parseconf(conf): """Parse Ganesha config. Both native format and JSON are supported. Convert config to a (nested) dictionary. """ def list_to_dict(src_list): # Convert a list of key-value pairs stored as tuples to a dict. # For tuples with identical keys, preserve all the values in a # list. e.g., argument [('k', 'v1'), ('k', 'v2')] to function # returns {'k': ['v1', 'v2']}. dst_dict = {} for i in src_list: if isinstance(i, tuple): k, v = i if isinstance(v, list): v = list_to_dict(v) if k in dst_dict: dst_dict[k] = [dst_dict[k]] dst_dict[k].append(v) else: dst_dict[k] = v return dst_dict try: # allow config to be specified in JSON -- # for sake of people who might feel Ganesha config foreign. d = jsonutils.loads(conf) except ValueError: # Customize JSON decoder to convert Ganesha config to a list # of key-value pairs stored as tuples. This allows multiple # occurrences of a config block to be later converted to a # dict key-value pair, with block name being the key and a # list of block contents being the value. li = jsonutils.loads(_conf2json(conf), object_pairs_hook=lambda x: x) d = list_to_dict(li) return d def mkconf(confdict): """Create Ganesha config string from confdict.""" s = io.StringIO() _dump_to_conf(confdict, s) return s.getvalue() rados = None def setup_rados(): global rados if not rados: try: rados = importutils.import_module('rados') except ImportError: raise exception.ShareBackendException( _("rados python module is not installed")) class GaneshaManager(object): """Ganesha instrumentation class.""" def __init__(self, execute, tag, **kwargs): self.confrx = re.compile(r'\.conf\Z') self.ganesha_config_path = kwargs['ganesha_config_path'] self.tag = tag def _execute(*args, **kwargs): msg = kwargs.pop('message', args[0]) makelog = kwargs.pop('makelog', True) try: return execute(*args, **kwargs) except exception.ProcessExecutionError as e: if makelog: LOG.error( ("Error while executing management command on " "Ganesha node %(tag)s: %(msg)s."), {'tag': tag, 'msg': msg}) raise exception.GaneshaCommandFailure( stdout=e.stdout, stderr=e.stderr, exit_code=e.exit_code, cmd=e.cmd) self.execute = _execute self.ganesha_service = kwargs['ganesha_service_name'] self.ganesha_export_dir = kwargs['ganesha_export_dir'] self.execute('mkdir', '-p', self.ganesha_export_dir) self.ganesha_rados_store_enable = kwargs.get( 'ganesha_rados_store_enable') if self.ganesha_rados_store_enable: setup_rados() self.ganesha_rados_store_pool_name = ( kwargs['ganesha_rados_store_pool_name']) self.ganesha_rados_export_counter = ( kwargs['ganesha_rados_export_counter']) self.ganesha_rados_export_index = ( kwargs['ganesha_rados_export_index']) self.rados_client = kwargs['rados_client'] try: self._get_rados_object(self.ganesha_rados_export_counter) except rados.ObjectNotFound: self._put_rados_object(self.ganesha_rados_export_counter, str(1000)) else: self.ganesha_db_path = kwargs['ganesha_db_path'] self.execute('mkdir', '-p', os.path.dirname(self.ganesha_db_path)) # Here we are to make sure that an SQLite database of the # required scheme exists at self.ganesha_db_path. # The following command gets us there -- provided the file # does not yet exist (otherwise it just fails). However, # we don't care about this condition, we just execute the # command unconditionally (ignoring failure). Instead we # directly query the db right after, to check its validity. self.execute( "sqlite3", self.ganesha_db_path, 'create table ganesha(key varchar(20) primary key, ' 'value int); insert into ganesha values("exportid", ' '100);', run_as_root=False, check_exit_code=False) self.get_export_id(bump=False) def _getpath(self, name): """Get the path of config file for name.""" return os.path.join(self.ganesha_export_dir, name + ".conf") @staticmethod def _get_export_rados_object_name(name): return 'ganesha-export-' + name def _write_tmp_conf_file(self, path, data): """Write data to tmp conf file.""" dirpath, fname = (getattr(os.path, q + "name")(path) for q in ("dir", "base")) tmpf = self.execute('mktemp', '-p', dirpath, "-t", fname + ".XXXXXX")[0][:-1] self.execute( 'sh', '-c', 'echo %s > %s' % (shlex.quote(data), shlex.quote(tmpf)), message='writing ' + tmpf) return tmpf def _write_conf_file(self, name, data): """Write data to config file for name atomically.""" path = self._getpath(name) tmpf = self._write_tmp_conf_file(path, data) try: self.execute('mv', tmpf, path) except exception.ProcessExecutionError as e: LOG.error('mv temp file (%s) to %s failed.', tmpf, path) self.execute('rm', tmpf) raise exception.GaneshaCommandFailure( stdout=e.stdout, stderr=e.stderr, exit_code=e.exit_code, cmd=e.cmd) return path def _mkindex(self): """Generate the index file for current exports.""" @utils.synchronized("ganesha-index-" + self.tag, external=True) def _mkindex(): files = filter(lambda f: self.confrx.search(f) and f != "INDEX.conf", self.execute('ls', self.ganesha_export_dir, run_as_root=False)[0].split("\n")) index = "".join(map(lambda f: "%include " + os.path.join( self.ganesha_export_dir, f) + "\n", files)) self._write_conf_file("INDEX", index) _mkindex() def _read_export_rados_object(self, name): return parseconf(self._get_rados_object( self._get_export_rados_object_name(name))) def _read_export_file(self, name): return parseconf(self.execute("cat", self._getpath(name), message='reading export ' + name)[0]) def _read_export(self, name): """Return the dict of the export identified by name.""" if self.ganesha_rados_store_enable: return self._read_export_rados_object(name) else: return self._read_export_file(name) def _check_export_rados_object_exists(self, name): try: self._get_rados_object( self._get_export_rados_object_name(name)) return True except rados.ObjectNotFound: return False def _check_file_exists(self, path): try: self.execute('test', '-f', path, makelog=False, run_as_root=False) return True except exception.GaneshaCommandFailure as e: if e.exit_code == 1: return False else: raise exception.GaneshaCommandFailure( stdout=e.stdout, stderr=e.stderr, exit_code=e.exit_code, cmd=e.cmd) def _check_export_file_exists(self, name): return self._check_file_exists(self._getpath(name)) def check_export_exists(self, name): """Check whether export exists.""" if self.ganesha_rados_store_enable: return self._check_export_rados_object_exists(name) else: return self._check_export_file_exists(name) def _write_export_rados_object(self, name, data): """Write confdict to the export RADOS object of name.""" self._put_rados_object(self._get_export_rados_object_name(name), data) # temp export config file required for DBus calls return self._write_tmp_conf_file(self._getpath(name), data) def _write_export(self, name, confdict): """Write confdict to the export file or RADOS object of name.""" for k, v in ganesha_utils.walk(confdict): # values in the export block template that need to be # filled in by Manila are pre-fixed by '@' if isinstance(v, str) and v[0] == '@': msg = _("Incomplete export block: value %(val)s of attribute " "%(key)s is a stub.") % {'key': k, 'val': v} raise exception.InvalidParameterValue(err=msg) if self.ganesha_rados_store_enable: return self._write_export_rados_object(name, mkconf(confdict)) else: return self._write_conf_file(name, mkconf(confdict)) def _rm_file(self, path): self.execute("rm", "-f", path) def _rm_export_file(self, name): """Remove export file of name.""" self._rm_file(self._getpath(name)) def _rm_export_rados_object(self, name): """Remove export object of name.""" self._delete_rados_object(self._get_export_rados_object_name(name)) def _dbus_send_ganesha(self, method, *args, **kwargs): """Send a message to Ganesha via dbus.""" service = kwargs.pop("service", "exportmgr") self.execute("dbus-send", "--print-reply", "--system", "--dest=org.ganesha.nfsd", "/org/ganesha/nfsd/ExportMgr", "org.ganesha.nfsd.%s.%s" % (service, method), *args, message='dbus call %s.%s' % (service, method), **kwargs) def _remove_export_dbus(self, xid): """Remove an export from Ganesha runtime with given export id.""" self._dbus_send_ganesha("RemoveExport", "uint16:%d" % xid) def _add_rados_object_url_to_index(self, name): """Add an export RADOS object's URL to the RADOS URL index.""" # TODO(rraja): Ensure that the export index object's update is atomic, # e.g., retry object update until the object version between the 'get' # and 'put' operations remains the same. index_data = self._get_rados_object(self.ganesha_rados_export_index) want_url = "%url rados://{0}/{1}".format( self.ganesha_rados_store_pool_name, self._get_export_rados_object_name(name)) if index_data: self._put_rados_object( self.ganesha_rados_export_index, '\n'.join([index_data, want_url]) ) else: self._put_rados_object(self.ganesha_rados_export_index, want_url) def _remove_rados_object_url_from_index(self, name): """Remove an export RADOS object's URL from the RADOS URL index.""" # TODO(rraja): Ensure that the export index object's update is atomic, # e.g., retry object update until the object version between the 'get' # and 'put' operations remains the same. index_data = self._get_rados_object(self.ganesha_rados_export_index) if not index_data: return unwanted_url = "%url rados://{0}/{1}".format( self.ganesha_rados_store_pool_name, self._get_export_rados_object_name(name)) rados_urls = index_data.split('\n') new_rados_urls = [url for url in rados_urls if url != unwanted_url] self._put_rados_object(self.ganesha_rados_export_index, '\n'.join(new_rados_urls)) def add_export(self, name, confdict): """Add an export to Ganesha specified by confdict.""" xid = confdict["EXPORT"]["Export_Id"] undos = [] _mkindex_called = False try: path = self._write_export(name, confdict) if self.ganesha_rados_store_enable: undos.append(lambda: self._rm_export_rados_object(name)) undos.append(lambda: self._rm_file(path)) else: undos.append(lambda: self._rm_export_file(name)) self._dbus_send_ganesha("AddExport", "string:" + path, "string:EXPORT(Export_Id=%d)" % xid) undos.append(lambda: self._remove_export_dbus(xid)) if self.ganesha_rados_store_enable: # Clean up temp export file used for the DBus call self._rm_file(path) self._add_rados_object_url_to_index(name) else: _mkindex_called = True self._mkindex() except exception.ProcessExecutionError as e: for u in undos: u() if not self.ganesha_rados_store_enable and not _mkindex_called: self._mkindex() raise exception.GaneshaCommandFailure( stdout=e.stdout, stderr=e.stderr, exit_code=e.exit_code, cmd=e.cmd) def update_export(self, name, confdict): """Update an export to Ganesha specified by confdict.""" xid = confdict["EXPORT"]["Export_Id"] old_confdict = self._read_export(name) path = self._write_export(name, confdict) try: self._dbus_send_ganesha("UpdateExport", "string:" + path, "string:EXPORT(Export_Id=%d)" % xid) except exception.ProcessExecutionError as e: # Revert the export update. self._write_export(name, old_confdict) raise exception.GaneshaCommandFailure( stdout=e.stdout, stderr=e.stderr, exit_code=e.exit_code, cmd=e.cmd) finally: if self.ganesha_rados_store_enable: # Clean up temp export file used for the DBus update call self._rm_file(path) def remove_export(self, name): """Remove an export from Ganesha.""" try: confdict = self._read_export(name) self._remove_export_dbus(confdict["EXPORT"]["Export_Id"]) except Exception: LOG.exception("There was a problem removing the export. " "Ignoring errors and continuing operation.") finally: if self.ganesha_rados_store_enable: self._delete_rados_object( self._get_export_rados_object_name(name)) self._remove_rados_object_url_from_index(name) else: self._rm_export_file(name) self._mkindex() def _get_rados_object(self, object_name): """Synchronously read data from Ceph RADOS object as a text string. :param pool_name: name of the pool :type pool_name: str :param object_name: name of the object :type object_name: str :returns: tuple of object data and version """ pool_name = self.ganesha_rados_store_pool_name ioctx = self.rados_client.open_ioctx(pool_name) osd_max_write_size = self.rados_client.conf_get('osd_max_write_size') max_size = int(osd_max_write_size) * 1024 * 1024 try: bytes_read = ioctx.read(object_name, max_size) if ((len(bytes_read) == max_size) and (ioctx.read(object_name, 1, offset=max_size))): LOG.warning("Size of object %s exceeds '%d' bytes " "read", object_name, max_size) finally: ioctx.close() bytes_read_decoded = bytes_read.decode('utf-8') return bytes_read_decoded def _put_rados_object(self, object_name, data): """Synchronously write data as a byte string in a Ceph RADOS object. :param pool_name: name of the pool :type pool_name: str :param object_name: name of the object :type object_name: str :param data: data to write :type data: bytes """ pool_name = self.ganesha_rados_store_pool_name encoded_data = data.encode('utf-8') ioctx = self.rados_client.open_ioctx(pool_name) max_size = int( self.rados_client.conf_get('osd_max_write_size')) * 1024 * 1024 if len(encoded_data) > max_size: msg = ("Data to be written to object '{0}' exceeds " "{1} bytes".format(object_name, max_size)) LOG.error(msg) raise exception.ShareBackendException(msg) try: with rados.WriteOpCtx() as wop: wop.write_full(encoded_data) ioctx.operate_write_op(wop, object_name) except rados.OSError as e: LOG.error(e) raise e finally: ioctx.close() def _delete_rados_object(self, object_name): pool_name = self.ganesha_rados_store_pool_name ioctx = self.rados_client.open_ioctx(pool_name) try: ioctx.remove_object(object_name) except rados.ObjectNotFound: LOG.warning("Object '%s' was already removed", object_name) finally: ioctx.close() def get_export_id(self, bump=True): """Get a new export id.""" # XXX overflowing the export id (16 bit unsigned integer) # is not handled if self.ganesha_rados_store_enable: # TODO(rraja): Ensure that the export counter object's update is # atomic, e.g., retry object update until the object version # between the 'get' and 'put' operations remains the same. export_id = int( self._get_rados_object(self.ganesha_rados_export_counter)) if not bump: return export_id export_id += 1 self._put_rados_object(self.ganesha_rados_export_counter, str(export_id)) return export_id else: if bump: bumpcode = 'update ganesha set value = value + 1;' else: bumpcode = '' out = self.execute( "sqlite3", self.ganesha_db_path, bumpcode + 'select * from ganesha ' # nosec B608 'where key = "exportid";', run_as_root=False)[0] match = re.search(r'\Aexportid\|(\d+)$', out) if not match: LOG.error("Invalid export database on " "Ganesha node %(tag)s: %(db)s.", {'tag': self.tag, 'db': self.ganesha_db_path}) raise exception.InvalidSqliteDB() return int(match.groups()[0]) def restart_service(self): """Restart the Ganesha service.""" self.execute("service", self.ganesha_service, "restart") def reset_exports(self): """Delete all export files.""" self.execute('sh', '-c', 'rm -f %s/*.conf' % shlex.quote(self.ganesha_export_dir)) self._mkindex() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/ganesha/utils.py0000664000175000017500000001153600000000000022225 0ustar00zuulzuul00000000000000# Copyright (c) 2014 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import shlex from oslo_concurrency import processutils from oslo_log import log from manila import exception from manila.i18n import _ from manila import ssh_utils from manila import utils LOG = log.getLogger(__name__) def patch(base, *overlays): """Recursive dictionary patching.""" for ovl in overlays: for k, v in ovl.items(): if isinstance(v, dict) and isinstance(base.get(k), dict): patch(base[k], v) else: base[k] = v return base def walk(dct): """Recursive iteration over dictionary.""" for k, v in dct.items(): if isinstance(v, dict): for w in walk(v): yield w else: yield k, v class RootExecutor(object): """Execute wrapper defaulting to root execution.""" def __init__(self, execute=utils.execute): self.execute = execute def __call__(self, *args, **kwargs): exkwargs = {"run_as_root": True} exkwargs.update(kwargs) return self.execute(*args, **exkwargs) class SSHExecutor(object): """Callable encapsulating exec through ssh.""" def __init__(self, *args, **kwargs): self.pool = ssh_utils.SSHPool(*args, **kwargs) def __call__(self, *args, **kwargs): # argument with identifier 'run_as_root=' is not accepted by # processutils's ssh_execute() method unlike processutils's execute() # method. So implement workaround to enable or disable 'run as root' # behavior. run_as_root = kwargs.pop('run_as_root', False) cmd = ' '.join(shlex.quote(a) for a in args) if run_as_root: cmd = ' '.join(['sudo', cmd]) ssh = self.pool.get() try: ret = processutils.ssh_execute(ssh, cmd, **kwargs) finally: self.pool.put(ssh) return ret def path_from(fpath, *rpath): """Return the join of the dir of fpath and rpath in absolute form.""" return os.path.join(os.path.abspath(os.path.dirname(fpath)), *rpath) def validate_access_rule(supported_access_types, supported_access_levels, access_rule, abort=False): """Validate an access rule. :param access_rule: Access rules to be validated. :param supported_access_types: List of access types that are regarded valid. :param supported_access_levels: List of access levels that are regarded valid. :param abort: a boolean value that indicates if an exception should be raised whether the rule is invalid. :return: Boolean. """ errmsg = _("Unsupported access rule of 'type' %(access_type)s, " "'level' %(access_level)s, 'to' %(access_to)s: " "%(field)s should be one of %(supported)s.") if not isinstance(access_rule, dict): access_param = access_rule.to_dict() else: access_param = access_rule def validate(field, supported_tokens, excinfo): if access_rule['access_%s' % field] in supported_tokens: return True access_param['field'] = field access_param['supported'] = ', '.join( "'%s'" % x for x in supported_tokens) if abort: LOG.error(errmsg, access_param) raise excinfo['type']( **{excinfo['about']: excinfo['details'] % access_param}) else: LOG.warning(errmsg, access_param) return False valid = True valid &= validate( 'type', supported_access_types, {'type': exception.InvalidShareAccess, 'about': "reason", 'details': _( "%(access_type)s; only %(supported)s access type is allowed")}) valid &= validate( 'level', supported_access_levels, {'type': exception.InvalidShareAccessLevel, 'about': "level", 'details': "%(access_level)s"}) return valid def fixup_access_rule(access_rule): """Adjust access rule as required for ganesha to handle it properly. :param access_rule: Access rules to be fixed up. :return: access_rule """ if access_rule['access_to'] == '0.0.0.0/0': access_rule['access_to'] = '0.0.0.0' # nosec B104 LOG.debug("Set access_to field to '0.0.0.0' in ganesha back end.") return access_rule ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/generic.py0000664000175000017500000013556300000000000021102 0ustar00zuulzuul00000000000000# Copyright (c) 2014 NetApp, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Generic Driver for shares.""" import os import time from oslo_concurrency import processutils from oslo_config import cfg from oslo_log import log from oslo_utils import importutils from oslo_utils import units from manila.common import constants as const from manila import compute from manila import context from manila import exception from manila.i18n import _ from manila.share import driver from manila.share.drivers import service_instance from manila import ssh_utils from manila import utils from manila import volume LOG = log.getLogger(__name__) share_opts = [ cfg.StrOpt('smb_template_config_path', default='$state_path/smb.conf', help="Path to smb config."), cfg.StrOpt('volume_name_template', default='manila-share-%s', help="Volume name template."), cfg.StrOpt('volume_snapshot_name_template', default='manila-snapshot-%s', help="Volume snapshot name template."), cfg.StrOpt('share_mount_path', default='/shares', help="Parent path in service instance where shares " "will be mounted."), cfg.IntOpt('max_time_to_create_volume', default=180, help="Maximum time to wait for creating cinder volume."), cfg.IntOpt('max_time_to_extend_volume', default=180, help="Maximum time to wait for extending cinder volume."), cfg.IntOpt('max_time_to_attach', default=120, help="Maximum time to wait for attaching cinder volume."), cfg.StrOpt('service_instance_smb_config_path', default='$share_mount_path/smb.conf', help="Path to SMB config in service instance."), cfg.ListOpt('share_helpers', default=[ 'CIFS=manila.share.drivers.helpers.CIFSHelperIPAccess', 'NFS=manila.share.drivers.helpers.NFSHelper', ], help='Specify list of share export helpers.'), cfg.StrOpt('share_volume_fstype', default='ext4', choices=['ext4', 'ext3'], help='Filesystem type of the share volume.'), cfg.StrOpt('cinder_volume_type', help='Name or id of cinder volume type which will be used ' 'for all volumes created by driver.'), ] CONF = cfg.CONF CONF.register_opts(share_opts) # NOTE(u_glide): These constants refer to the column number in the "df" output BLOCK_DEVICE_SIZE_INDEX = 1 USED_SPACE_INDEX = 2 def ensure_server(f): def wrap(self, context, *args, **kwargs): server = kwargs.get('share_server') if not self.driver_handles_share_servers: if not server: server = self.service_instance_manager.get_common_server() kwargs['share_server'] = server else: raise exception.ManilaException( _("Share server handling is not available. " "But 'share_server' was provided. '%s'. " "Share network should not be used.") % server.get('id')) elif not server: raise exception.ManilaException( _("Share server handling is enabled. But 'share_server' " "is not provided. Make sure you used 'share_network'.")) if not server.get('backend_details'): raise exception.ManilaException( _("Share server '%s' does not have backend details.") % server['id']) if not self.service_instance_manager.ensure_service_instance( context, server['backend_details']): raise exception.ServiceInstanceUnavailable() return f(self, context, *args, **kwargs) return wrap class GenericShareDriver(driver.ExecuteMixin, driver.ShareDriver): """Executes commands relating to Shares.""" def __init__(self, *args, **kwargs): """Do initialization.""" super(GenericShareDriver, self).__init__( [False, True], *args, **kwargs) self.admin_context = context.get_admin_context() self.configuration.append_config_values(share_opts) self._helpers = {} self.backend_name = self.configuration.safe_get( 'share_backend_name') or "Cinder_Volumes" self.ssh_connections = {} self._setup_service_instance_manager() self.private_storage = kwargs.get('private_storage') def _setup_service_instance_manager(self): self.service_instance_manager = ( service_instance.ServiceInstanceManager( driver_config=self.configuration)) def _ssh_exec(self, server, command, check_exit_code=True): LOG.debug("_ssh_exec - server: %s, command: %s, check_exit_code: %s", server, command, check_exit_code) connection = self.ssh_connections.get(server['instance_id']) ssh_conn_timeout = self.configuration.ssh_conn_timeout if not connection: ssh_pool = ssh_utils.SSHPool(server['ip'], 22, ssh_conn_timeout, server['username'], server.get('password'), server.get('pk_path'), max_size=1) ssh = ssh_pool.create() self.ssh_connections[server['instance_id']] = (ssh_pool, ssh) else: ssh_pool, ssh = connection if not ssh.get_transport().is_active(): ssh_pool.remove(ssh) ssh = ssh_pool.create() self.ssh_connections[server['instance_id']] = (ssh_pool, ssh) # (aovchinnikov): ssh_execute does not behave well when passed # parameters with spaces. wrap = lambda token: "\"" + token + "\"" # noqa: E731 command = [wrap(tkn) if tkn.count(' ') else tkn for tkn in command] return processutils.ssh_execute(ssh, ' '.join(command), check_exit_code=check_exit_code) def check_for_setup_error(self): """Returns an error if prerequisites aren't met.""" def do_setup(self, context): """Any initialization the generic driver does while starting.""" super(GenericShareDriver, self).do_setup(context) self.compute_api = compute.API() self.volume_api = volume.API() self._setup_helpers() common_sv_available = False share_server = None sv_fetch_retry_interval = 5 while not (common_sv_available or self.driver_handles_share_servers): try: # Verify availability of common server share_server = ( self.service_instance_manager.get_common_server()) common_sv_available = self._is_share_server_active( context, share_server) except Exception as ex: LOG.error(ex) if not common_sv_available: time.sleep(sv_fetch_retry_interval) LOG.warning("Waiting for the common service VM to become " "available. " "Driver is currently uninitialized. " "Share server: %(share_server)s " "Retry interval: %(retry_interval)s", dict(share_server=share_server, retry_interval=sv_fetch_retry_interval)) def _setup_helpers(self): """Initializes protocol-specific NAS drivers.""" helpers = self.configuration.share_helpers if helpers: for helper_str in helpers: share_proto, __, import_str = helper_str.partition('=') helper = importutils.import_class(import_str) self._helpers[share_proto.upper()] = helper( self._execute, self._ssh_exec, self.configuration) else: raise exception.ManilaException( "No protocol helpers selected for Generic Driver. " "Please specify using config option 'share_helpers'.") @ensure_server def create_share(self, context, share, share_server=None): """Creates share.""" return self._create_share( context, share, snapshot=None, share_server=share_server, ) def _create_share(self, context, share, snapshot, share_server=None): helper = self._get_helper(share) server_details = share_server['backend_details'] volume = self._allocate_container( self.admin_context, share, snapshot=snapshot) volume = self._attach_volume( self.admin_context, share, server_details['instance_id'], volume) if not snapshot: self._format_device(server_details, volume) self._mount_device(share, server_details, volume) export_locations = helper.create_exports( server_details, share['name']) return export_locations @utils.retry(retry_param=exception.ProcessExecutionError, backoff_rate=1) def _is_device_file_available(self, server_details, volume): """Checks whether the device file is available""" command = ['sudo', 'test', '-b', volume['mountpoint']] self._ssh_exec(server_details, command) def _format_device(self, server_details, volume): """Formats device attached to the service vm.""" self._is_device_file_available(server_details, volume) command = ['sudo', 'mkfs.%s' % self.configuration.share_volume_fstype, volume['mountpoint']] self._ssh_exec(server_details, command) def _is_device_mounted(self, mount_path, server_details, volume=None): """Checks whether volume already mounted or not.""" log_data = { 'mount_path': mount_path, 'server_id': server_details['instance_id'], } if volume and volume.get('mountpoint', ''): log_data['volume_id'] = volume['id'] log_data['dev_mount_path'] = volume['mountpoint'] msg = ("Checking whether volume '%(volume_id)s' with mountpoint " "'%(dev_mount_path)s' is mounted on mount path '%(mount_p" "ath)s' on server '%(server_id)s' or not." % log_data) else: msg = ("Checking whether mount path '%(mount_path)s' exists on " "server '%(server_id)s' or not." % log_data) LOG.debug(msg) mounts_list_cmd = ['sudo', 'mount'] output, __ = self._ssh_exec(server_details, mounts_list_cmd) mounts = output.split('\n') for mount in mounts: mount_elements = mount.split(' ') if (len(mount_elements) > 2 and mount_path == mount_elements[2]): if volume: # Mount goes with device path and mount path if (volume.get('mountpoint', '') == mount_elements[0]): return True else: # Unmount goes only by mount path return True return False def _add_mount_permanently(self, share_id, device_path, server_details): """Add mount permanently for mounted filesystems.""" try: self._ssh_exec( server_details, ['grep', share_id, const.MOUNT_FILE_TEMP, '|', 'sudo', 'tee', '-a', const.MOUNT_FILE], ) output, __ = self._ssh_exec( server_details, ['lsblk', '-o', 'uuid', '-n', device_path]) if output: device_uuid = f"UUID={output.strip()}" self._ssh_exec( server_details, ['sudo', 'sed', '-i', "s@{}@{}@".format(device_path, device_uuid), const.MOUNT_FILE] ) except exception.ProcessExecutionError as e: LOG.error("Failed to add 'Share-%(share_id)s' mount " "permanently on server '%(instance_id)s'.", {"share_id": share_id, "instance_id": server_details['instance_id']}) raise exception.ShareBackendException(msg=str(e)) try: # Remount it to avoid postponed point of failure self._ssh_exec(server_details, ['sudo', 'mount', '-a']) except exception.ProcessExecutionError: LOG.error("Failed to mount all shares on server '%s'.", server_details['instance_id']) def _remove_mount_permanently(self, share_id, server_details): """Remove mount permanently from mounted filesystems.""" try: self._ssh_exec( server_details, ['sudo', 'sed', '-i', '\'/%s/d\'' % share_id, const.MOUNT_FILE], ) except exception.ProcessExecutionError as e: LOG.error("Failed to remove 'Share-%(share_id)s' mount " "permanently on server '%(instance_id)s'.", {"share_id": share_id, "instance_id": server_details['instance_id']}) raise exception.ShareBackendException(msg=str(e)) def _mount_device(self, share, server_details, volume): """Mounts block device to the directory on service vm. Mounts attached and formatted block device to the directory if not mounted yet. """ @utils.synchronized('generic_driver_mounts_' '%s' % server_details['instance_id']) def _mount_device_with_lock(): mount_path = self._get_mount_path(share) device_path = volume['mountpoint'] log_data = { 'dev': device_path, 'path': mount_path, 'server': server_details['instance_id'], } try: if not self._is_device_mounted(mount_path, server_details, volume): LOG.debug("Mounting '%(dev)s' to path '%(path)s' on " "server '%(server)s'.", log_data) mount_cmd = ( 'sudo', 'mkdir', '-p', mount_path, '&&', 'sudo', 'mount', device_path, mount_path, '&&', 'sudo', 'chmod', '777', mount_path, '&&', 'sudo', 'umount', mount_path, # NOTE(vponomaryov): 'tune2fs' is required to make # filesystem of share created from snapshot have # unique ID, in case of LVM volumes, by default, # it will have the same UUID as source volume one. # 'tune2fs' command can be executed only when device # is not mounted and also, in current case, it takes # effect only after it was mounted. Closes #1645751 # NOTE(gouthamr): Executing tune2fs -U only works on # a recently checked filesystem. See debian bug 857336 '&&', 'sudo', 'e2fsck', '-y', '-f', device_path, '&&', 'sudo', 'tune2fs', '-U', 'random', device_path, '&&', 'sudo', 'mount', device_path, mount_path, ) self._ssh_exec(server_details, mount_cmd) self._add_mount_permanently(share.id, device_path, server_details) else: LOG.warning("Mount point '%(path)s' already exists on " "server '%(server)s'.", log_data) except exception.ProcessExecutionError as e: raise exception.ShareBackendException(msg=str(e)) return _mount_device_with_lock() @utils.retry(retry_param=exception.ProcessExecutionError) def _unmount_device(self, share, server_details): """Unmounts block device from directory on service vm.""" @utils.synchronized('generic_driver_mounts_' '%s' % server_details['instance_id']) def _unmount_device_with_lock(): mount_path = self._get_mount_path(share) log_data = { 'path': mount_path, 'server': server_details['instance_id'], } if self._is_device_mounted(mount_path, server_details): LOG.debug("Unmounting path '%(path)s' on server " "'%(server)s'.", log_data) unmount_cmd = ['sudo', 'umount', mount_path, '&&', 'sudo', 'rmdir', mount_path] self._ssh_exec(server_details, unmount_cmd) self._remove_mount_permanently(share.id, server_details) else: LOG.warning("Mount point '%(path)s' does not exist on " "server '%(server)s'.", log_data) return _unmount_device_with_lock() def _get_mount_path(self, share): """Returns the path to use for mount device in service vm.""" return os.path.join(self.configuration.share_mount_path, share['name']) def _attach_volume(self, context, share, instance_id, volume): """Attaches cinder volume to service vm.""" @utils.synchronized( "generic_driver_attach_detach_%s" % instance_id, external=True) def do_attach(volume): if volume['status'] == 'in-use': attached_volumes = self.compute_api.instance_volumes_list( self.admin_context, instance_id) if volume['id'] in attached_volumes: return volume else: raise exception.ManilaException( _('Volume %s is already attached to another instance') % volume['id']) @utils.retry(retries=3, interval=2, backoff_rate=1) def attach_volume(): self.compute_api.instance_volume_attach( self.admin_context, instance_id, volume['id']) attach_volume() t = time.time() while time.time() - t < self.configuration.max_time_to_attach: volume = self.volume_api.get(context, volume['id']) if volume['status'] == 'in-use': return volume elif volume['status'] not in ('attaching', 'reserved'): raise exception.ManilaException( _('Failed to attach volume %s') % volume['id']) time.sleep(1) else: err_msg = { 'volume_id': volume['id'], 'max_time': self.configuration.max_time_to_attach } raise exception.ManilaException( _('Volume %(volume_id)s has not been attached in ' '%(max_time)ss. Giving up.') % err_msg) return do_attach(volume) def _get_volume_name(self, share_id): return self.configuration.volume_name_template % share_id def _get_volume(self, context, share_id): """Finds volume, associated to the specific share.""" volume_id = self.private_storage.get(share_id, 'volume_id') if volume_id is not None: return self.volume_api.get(context, volume_id) else: # Fallback to legacy method return self._get_volume_legacy(context, share_id) def _get_volume_legacy(self, context, share_id): # NOTE(u_glide): this method is deprecated and will be removed in # future versions volume_name = self._get_volume_name(share_id) search_opts = {'name': volume_name} if context.is_admin: search_opts['all_tenants'] = True volumes_list = self.volume_api.get_all(context, search_opts) if len(volumes_list) == 1: return volumes_list[0] elif len(volumes_list) > 1: LOG.error( "Expected only one volume in volume list with name " "'%(name)s', but got more than one in a result - " "'%(result)s'.", { 'name': volume_name, 'result': volumes_list}) raise exception.ManilaException( _("Error. Ambiguous volumes for name '%s'") % volume_name) return None def _get_volume_snapshot(self, context, snapshot_id): """Find volume snapshot associated to the specific share snapshot.""" volume_snapshot_id = self.private_storage.get( snapshot_id, 'volume_snapshot_id') if volume_snapshot_id is not None: return self.volume_api.get_snapshot(context, volume_snapshot_id) else: # Fallback to legacy method return self._get_volume_snapshot_legacy(context, snapshot_id) def _get_volume_snapshot_legacy(self, context, snapshot_id): # NOTE(u_glide): this method is deprecated and will be removed in # future versions volume_snapshot_name = ( self.configuration.volume_snapshot_name_template % snapshot_id) volume_snapshot_list = self.volume_api.get_all_snapshots( context, {'name': volume_snapshot_name}) volume_snapshot = None if len(volume_snapshot_list) == 1: volume_snapshot = volume_snapshot_list[0] elif len(volume_snapshot_list) > 1: LOG.error( "Expected only one volume snapshot in list with name " "'%(name)s', but got more than one in a result - " "'%(result)s'.", { 'name': volume_snapshot_name, 'result': volume_snapshot_list}) raise exception.ManilaException( _('Error. Ambiguous volume snaphots')) return volume_snapshot def _detach_volume(self, context, share, server_details): """Detaches cinder volume from service vm.""" instance_id = server_details['instance_id'] @utils.synchronized( "generic_driver_attach_detach_%s" % instance_id, external=True) def do_detach(): attached_volumes = self.compute_api.instance_volumes_list( self.admin_context, instance_id) try: volume = self._get_volume(context, share['id']) except exception.VolumeNotFound: LOG.warning("Volume not found for share %s. " "Possibly already deleted.", share['id']) volume = None if volume and volume['id'] in attached_volumes: self.compute_api.instance_volume_detach( self.admin_context, instance_id, volume['id'] ) t = time.time() while time.time() - t < self.configuration.max_time_to_attach: volume = self.volume_api.get(context, volume['id']) if volume['status'] in (const.STATUS_AVAILABLE, const.STATUS_ERROR): break time.sleep(1) else: err_msg = { 'volume_id': volume['id'], 'max_time': self.configuration.max_time_to_attach } raise exception.ManilaException( _('Volume %(volume_id)s has not been detached in ' '%(max_time)ss. Giving up.') % err_msg) do_detach() def _allocate_container(self, context, share, snapshot=None): """Creates cinder volume, associated to share by name.""" volume_snapshot = None if snapshot: volume_snapshot = self._get_volume_snapshot(context, snapshot['id']) volume = self.volume_api.create( context, share['size'], self.configuration.volume_name_template % share['id'], '', snapshot=volume_snapshot, volume_type=self.configuration.cinder_volume_type, availability_zone=share['availability_zone']) self.private_storage.update( share['id'], {'volume_id': volume['id']}) msg_error = _('Failed to create volume') msg_timeout = ( _('Volume has not been created in %ss. Giving up') % self.configuration.max_time_to_create_volume ) return self._wait_for_available_volume( volume, self.configuration.max_time_to_create_volume, msg_error=msg_error, msg_timeout=msg_timeout ) def _wait_for_available_volume(self, volume, timeout, msg_error, msg_timeout, expected_size=None): t = time.time() while time.time() - t < timeout: if volume['status'] == const.STATUS_AVAILABLE: if expected_size and volume['size'] != expected_size: LOG.debug("The volume %(vol_id)s is available but the " "volume size does not match the expected size. " "A volume resize operation may be pending. " "Expected size: %(expected_size)s, " "Actual size: %(volume_size)s.", dict(vol_id=volume['id'], expected_size=expected_size, volume_size=volume['size'])) else: break elif 'error' in volume['status'].lower(): raise exception.ManilaException(msg_error) time.sleep(1) volume = self.volume_api.get(self.admin_context, volume['id']) else: raise exception.ManilaException(msg_timeout) return volume def _deallocate_container(self, context, share): """Deletes cinder volume.""" try: volume = self._get_volume(context, share['id']) except exception.VolumeNotFound: LOG.info("Volume not found. Already deleted?") volume = None if volume: if volume['status'] == 'in-use': raise exception.ManilaException( _('Volume is still in use and ' 'cannot be deleted now.')) self.volume_api.delete(context, volume['id']) t = time.time() while (time.time() - t < self.configuration.max_time_to_create_volume): try: volume = self.volume_api.get(context, volume['id']) except exception.VolumeNotFound: LOG.debug('Volume was deleted successfully') break time.sleep(1) else: raise exception.ManilaException( _('Volume have not been ' 'deleted in %ss. Giving up') % self.configuration.max_time_to_create_volume) def _update_share_stats(self): """Retrieve stats info from share volume group.""" data = dict( share_backend_name=self.backend_name, storage_protocol='NFS_CIFS', reserved_percentage=self.configuration.reserved_share_percentage, reserved_snapshot_percentage=( self.configuration.reserved_share_from_snapshot_percentage or self.configuration.reserved_share_percentage), reserved_share_extend_percentage=( self.configuration.reserved_share_extend_percentage or self.configuration.reserved_share_percentage), ) super(GenericShareDriver, self)._update_share_stats(data) @ensure_server def create_share_from_snapshot(self, context, share, snapshot, share_server=None, parent_share=None): """Is called to create share from snapshot.""" return self._create_share( context, share, snapshot=snapshot, share_server=share_server, ) @ensure_server def extend_share(self, share, new_size, share_server=None): server_details = share_server['backend_details'] helper = self._get_helper(share) helper.disable_access_for_maintenance(server_details, share['name']) self._unmount_device(share, server_details) volume = self._get_volume(self.admin_context, share['id']) if int(new_size) > volume['size']: self._detach_volume(self.admin_context, share, server_details) volume = self._extend_volume(self.admin_context, volume, new_size) volume = self._attach_volume( self.admin_context, share, server_details['instance_id'], volume) self._resize_filesystem(server_details, volume, new_size=new_size) self._mount_device(share, server_details, volume) helper.restore_access_after_maintenance(server_details, share['name']) def _extend_volume(self, context, volume, new_size): self.volume_api.extend(context, volume['id'], new_size) msg_error = _('Failed to extend volume %s') % volume['id'] msg_timeout = ( _('Volume has not been extended in %ss. Giving up') % self.configuration.max_time_to_extend_volume ) return self._wait_for_available_volume( volume, self.configuration.max_time_to_extend_volume, msg_error=msg_error, msg_timeout=msg_timeout, expected_size=new_size ) @ensure_server def shrink_share(self, share, new_size, share_server=None): server_details = share_server['backend_details'] helper = self._get_helper(share) export_location = share['export_locations'][0]['path'] mount_path = helper.get_share_path_by_export_location( server_details, export_location) consumed_space = self._get_consumed_space(mount_path, server_details) LOG.debug("Consumed space on share: %s", consumed_space) if consumed_space >= new_size: raise exception.ShareShrinkingPossibleDataLoss( share_id=share['id']) volume = self._get_volume(self.admin_context, share['id']) helper.disable_access_for_maintenance(server_details, share['name']) self._unmount_device(share, server_details) try: self._resize_filesystem(server_details, volume, new_size=new_size) except exception.Invalid: raise exception.ShareShrinkingPossibleDataLoss( share_id=share['id']) except Exception as e: msg = _("Cannot shrink share: %s") % str(e) raise exception.Invalid(msg) finally: self._mount_device(share, server_details, volume) helper.restore_access_after_maintenance(server_details, share['name']) def _resize_filesystem(self, server_details, volume, new_size=None): """Resize filesystem of provided volume.""" check_command = ['sudo', 'fsck', '-pf', volume['mountpoint']] self._ssh_exec(server_details, check_command) command = ['sudo', 'resize2fs', volume['mountpoint']] if new_size: command.append("%sG" % new_size) try: self._ssh_exec(server_details, command) except processutils.ProcessExecutionError as e: if e.stderr.find('New size smaller than minimum') != -1: msg = (_("Invalid 'new_size' provided: %s") % new_size) raise exception.Invalid(msg) else: msg = _("Cannot resize file-system: %s") % e raise exception.ManilaException(msg) def _is_share_server_active(self, context, share_server): """Check if the share server is active.""" has_active_share_server = ( share_server and share_server.get('backend_details') and self.service_instance_manager.ensure_service_instance( context, share_server['backend_details'])) return has_active_share_server def delete_share(self, context, share, share_server=None): """Deletes share.""" helper = self._get_helper(share) if not self.driver_handles_share_servers: share_server = self.service_instance_manager.get_common_server() if self._is_share_server_active(context, share_server): helper.remove_exports( share_server['backend_details'], share['name']) self._unmount_device(share, share_server['backend_details']) self._detach_volume(self.admin_context, share, share_server['backend_details']) # Note(jun): It is an intended breakage to deal with the cases # with any reason that caused absence of Nova instances. self._deallocate_container(self.admin_context, share) self.private_storage.delete(share['id']) def create_snapshot(self, context, snapshot, share_server=None): """Creates a snapshot.""" model_update = {} volume = self._get_volume( self.admin_context, snapshot['share_instance_id']) volume_snapshot_name = (self.configuration. volume_snapshot_name_template % snapshot['id']) volume_snapshot = self.volume_api.create_snapshot_force( self.admin_context, volume['id'], volume_snapshot_name, '') t = time.time() while time.time() - t < self.configuration.max_time_to_create_volume: if volume_snapshot['status'] == const.STATUS_AVAILABLE: break if volume_snapshot['status'] == const.STATUS_ERROR: raise exception.ManilaException(_('Failed to create volume ' 'snapshot')) time.sleep(1) volume_snapshot = self.volume_api.get_snapshot( self.admin_context, volume_snapshot['id']) # NOTE(xyang): We should look at whether we still need to save # volume_snapshot_id in private_storage later, now that is saved # in provider_location. self.private_storage.update( snapshot['id'], {'volume_snapshot_id': volume_snapshot['id']}) # NOTE(xyang): Need to update provider_location in the db so # that it can be used in manage/unmanage snapshot tempest tests. model_update['provider_location'] = volume_snapshot['id'] else: raise exception.ManilaException( _('Volume snapshot have not been ' 'created in %ss. Giving up') % self.configuration.max_time_to_create_volume) return model_update def delete_snapshot(self, context, snapshot, share_server=None): """Deletes a snapshot.""" volume_snapshot = self._get_volume_snapshot(self.admin_context, snapshot['id']) if volume_snapshot is None: return self.volume_api.delete_snapshot(self.admin_context, volume_snapshot['id']) t = time.time() while time.time() - t < self.configuration.max_time_to_create_volume: try: snapshot = self.volume_api.get_snapshot(self.admin_context, volume_snapshot['id']) except exception.VolumeSnapshotNotFound: LOG.debug('Volume snapshot was deleted successfully') self.private_storage.delete(snapshot['id']) break time.sleep(1) else: raise exception.ManilaException( _('Volume snapshot have not been ' 'deleted in %ss. Giving up') % self.configuration.max_time_to_create_volume) @ensure_server def ensure_share(self, context, share, share_server=None): """Ensure that storage are mounted and exported.""" helper = self._get_helper(share) volume = self._get_volume(context, share['id']) # NOTE(vponomaryov): volume can be None for managed shares if volume: volume = self._attach_volume( context, share, share_server['backend_details']['instance_id'], volume) self._mount_device(share, share_server['backend_details'], volume) helper.create_exports( share_server['backend_details'], share['name'], recreate=True) @ensure_server def update_access(self, context, share, access_rules, add_rules, delete_rules, update_rules, share_server=None): """Update access rules for given share. This driver has two different behaviors according to parameters: 1. Recovery after error - 'access_rules' contains all access_rules, 'add_rules' and 'delete_rules' shall be empty. Previously existing access rules are cleared and then added back according to 'access_rules'. 2. Adding/Deleting of several access rules - 'access_rules' contains all access_rules, 'add_rules' and 'delete_rules' contain rules which should be added/deleted. Rules in 'access_rules' are ignored and only rules from 'add_rules' and 'delete_rules' are applied. :param context: Current context :param share: Share model with share data. :param access_rules: All access rules for given share :param add_rules: Empty List or List of access rules which should be added. access_rules already contains these rules. :param delete_rules: Empty List or List of access rules which should be removed. access_rules doesn't contain these rules. :param update_rules: Empty List or List of access rules which should be updated. access_rules already contains these rules. :param share_server: None or Share server model """ self._get_helper(share).update_access(share_server['backend_details'], share['name'], access_rules, add_rules=add_rules, delete_rules=delete_rules) def _get_helper(self, share): helper = self._helpers.get(share['share_proto']) if helper: return helper else: raise exception.InvalidShare( reason="Wrong, unsupported or disabled protocol") def get_network_allocations_number(self): """Get number of network interfaces to be created.""" # NOTE(vponomaryov): Generic driver does not need allocations, because # Nova will handle it. It is valid for all multitenant drivers, that # use service instance provided by Nova. return 0 def _setup_server(self, network_info, metadata=None): # NOTE(felipe_rodrigues): keep legacy network_info support as a dict. network_info = network_info[0] msg = "Creating share server '%s'." LOG.debug(msg, network_info['server_id']) server = self.service_instance_manager.set_up_service_instance( self.admin_context, network_info) for helper in self._helpers.values(): helper.init_helper(server) return server def _teardown_server(self, server_details, security_services=None): instance_id = server_details.get("instance_id") LOG.debug("Removing share infrastructure for service instance '%s'.", instance_id) self.service_instance_manager.delete_service_instance( self.admin_context, server_details) def manage_existing(self, share, driver_options): """Manage existing share to manila. Generic driver accepts only one driver_option 'volume_id'. If an administrator provides this option, then appropriate Cinder volume will be managed by Manila as well. :param share: share data :param driver_options: Empty dict or dict with 'volume_id' option. :return: dict with share size, example: {'size': 1} """ helper = self._get_helper(share) share_server = self.service_instance_manager.get_common_server() server_details = share_server['backend_details'] old_export_location = share['export_locations'][0]['path'] mount_path = helper.get_share_path_by_export_location( share_server['backend_details'], old_export_location) LOG.debug("Manage: mount path = %s", mount_path) mounted = self._is_device_mounted(mount_path, server_details) LOG.debug("Manage: is share mounted = %s", mounted) if not mounted: msg = _("Provided share %s is not mounted.") % share['id'] raise exception.ManageInvalidShare(reason=msg) def get_volume(): if 'volume_id' in driver_options: try: return self.volume_api.get( self.admin_context, driver_options['volume_id']) except exception.VolumeNotFound as e: raise exception.ManageInvalidShare(reason=e.message) # NOTE(vponomaryov): Manila can only combine volume name by itself, # nowhere to get volume ID from. Return None since Cinder volume # names are not unique or fixed, hence, they can not be used for # sure. return None share_volume = get_volume() if share_volume: attached_volumes = self.compute_api.instance_volumes_list( self.admin_context, server_details['instance_id']) LOG.debug('Manage: attached volumes = %s', attached_volumes) if share_volume['id'] not in attached_volumes: msg = _("Provided volume %s is not attached " "to service instance.") % share_volume['id'] raise exception.ManageInvalidShare(reason=msg) linked_volume_name = self._get_volume_name(share['id']) if share_volume['name'] != linked_volume_name: LOG.debug('Manage: volume_id = %s', share_volume['id']) self.volume_api.update(self.admin_context, share_volume['id'], {'name': linked_volume_name}) self.private_storage.update( share['id'], {'volume_id': share_volume['id']}) share_size = share_volume['size'] else: share_size = self._get_mounted_share_size( mount_path, share_server['backend_details']) export_locations = helper.get_exports_for_share( server_details, old_export_location) return {'size': share_size, 'export_locations': export_locations} def manage_existing_snapshot(self, snapshot, driver_options): """Manage existing share snapshot with manila. :param snapshot: Snapshot data :param driver_options: Not used by the Generic driver currently :return: dict with share snapshot size, example: {'size': 1} """ model_update = {} volume_snapshot = None snapshot_size = snapshot.get('share_size', 0) provider_location = snapshot.get('provider_location') try: volume_snapshot = self.volume_api.get_snapshot( self.admin_context, provider_location) except exception.VolumeSnapshotNotFound as e: raise exception.ManageInvalidShareSnapshot( reason=e.message) if volume_snapshot: snapshot_size = volume_snapshot['size'] # NOTE(xyang): volume_snapshot_id is saved in private_storage # in create_snapshot, so saving it here too for consistency. # We should look at whether we still need to save it in # private_storage later. self.private_storage.update( snapshot['id'], {'volume_snapshot_id': volume_snapshot['id']}) # NOTE(xyang): provider_location is used to map a Manila snapshot # to its name on the storage backend and prevent managing of the # same snapshot twice. model_update['provider_location'] = volume_snapshot['id'] model_update['size'] = snapshot_size return model_update def unmanage_snapshot(self, snapshot): """Unmanage share snapshot with manila.""" self.private_storage.delete(snapshot['id']) def _get_mount_stats_by_index(self, mount_path, server_details, index, block_size='G'): """Get mount stats using df shell command. :param mount_path: Share path on share server :param server_details: Share server connection details :param index: Data index in df command output: BLOCK_DEVICE_SIZE_INDEX - Size of block device USED_SPACE_INDEX - Used space :param block_size: size of block (example: G, M, Mib, etc) :returns: value of provided index """ share_size_cmd = ['df', '-PB%s' % block_size, mount_path] output, __ = self._ssh_exec(server_details, share_size_cmd) lines = output.split('\n') return int(lines[1].split()[index][:-1]) def _get_mounted_share_size(self, mount_path, server_details): try: size = self._get_mount_stats_by_index( mount_path, server_details, BLOCK_DEVICE_SIZE_INDEX) except Exception as e: msg = _("Cannot calculate size of share %(path)s : %(error)s") % { 'path': mount_path, 'error': e } raise exception.ManageInvalidShare(reason=msg) return size def _get_consumed_space(self, mount_path, server_details): try: size = self._get_mount_stats_by_index( mount_path, server_details, USED_SPACE_INDEX, block_size='M') size /= float(units.Ki) except Exception as e: msg = _("Cannot calculate consumed space on share " "%(path)s : %(error)s") % { 'path': mount_path, 'error': e } raise exception.InvalidShare(reason=msg) return size ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315601.9136717 manila-21.0.0/manila/share/drivers/glusterfs/0000775000175000017500000000000000000000000021115 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/glusterfs/__init__.py0000664000175000017500000003150000000000000023225 0ustar00zuulzuul00000000000000# Copyright (c) 2013 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Flat network GlusterFS Driver. Manila shares are subdirectories within a GlusterFS volume. The backend, a GlusterFS cluster, uses one of the two NFS servers, Gluster-NFS or NFS-Ganesha, based on a configuration option, to mediate access to the shares. NFS-Ganesha server supports NFSv3 and v4 protocols, while Gluster-NFS server supports only NFSv3 protocol. TODO(rraja): support SMB protocol. """ import re import socket import sys from oslo_config import cfg from oslo_log import log from manila.common import constants from manila import exception from manila.i18n import _ from manila.share import driver from manila.share.drivers import ganesha from manila.share.drivers.ganesha import utils as ganesha_utils from manila.share.drivers.glusterfs import layout from manila import utils GlusterfsManilaShare_opts = [ cfg.StrOpt('glusterfs_nfs_server_type', default='Gluster', help='Type of NFS server that mediate access to the Gluster ' 'volumes (Gluster or Ganesha).'), cfg.HostAddressOpt('glusterfs_ganesha_server_ip', help="Remote Ganesha server node's IP address."), cfg.StrOpt('glusterfs_ganesha_server_username', default='root', help="Remote Ganesha server node's username."), cfg.StrOpt('glusterfs_ganesha_server_password', secret=True, help="Remote Ganesha server node's login password. " "This is not required if 'glusterfs_path_to_private_key'" ' is configured.'), ] CONF = cfg.CONF CONF.register_opts(GlusterfsManilaShare_opts) LOG = log.getLogger(__name__) NFS_EXPORT_DIR = 'nfs.export-dir' NFS_EXPORT_VOL = 'nfs.export-volumes' NFS_RPC_AUTH_ALLOW = 'nfs.rpc-auth-allow' NFS_RPC_AUTH_REJECT = 'nfs.rpc-auth-reject' class GlusterfsShareDriver(driver.ExecuteMixin, driver.GaneshaMixin, layout.GlusterfsShareDriverBase): """Execute commands relating to Shares.""" GLUSTERFS_VERSION_MIN = (3, 5) supported_layouts = ('layout_directory.GlusterfsDirectoryMappedLayout', 'layout_volume.GlusterfsVolumeMappedLayout') supported_protocols = ('NFS',) def __init__(self, *args, **kwargs): super(GlusterfsShareDriver, self).__init__(False, *args, **kwargs) LOG.warning('GlusterFS share driver has been deprecated and is ' 'expected to be removed in a future release.') self._helpers = {} self.configuration.append_config_values(GlusterfsManilaShare_opts) self.backend_name = self.configuration.safe_get( 'share_backend_name') or 'GlusterFS' self.nfs_helper = getattr( sys.modules[__name__], self.configuration.glusterfs_nfs_server_type + 'NFSHelper') def do_setup(self, context): # in order to do an initial instantialization of the helper self._get_helper() super(GlusterfsShareDriver, self).do_setup(context) def _setup_via_manager(self, share_manager, share_manager_parent=None): gluster_manager = share_manager['manager'] # TODO(csaba): This should be refactored into proper dispatch to helper if self.nfs_helper == GlusterNFSHelper and not gluster_manager.path: # default behavior of NFS_EXPORT_VOL is as if it were 'on' export_vol = gluster_manager.get_vol_option( NFS_EXPORT_VOL, boolean=True) if export_vol is False: raise exception.GlusterfsException( _("Gluster-NFS with volume layout should be used " "with `nfs.export-volumes = on`")) setting = [NFS_RPC_AUTH_REJECT, '*'] else: # gluster-nfs export of the whole volume must be prohibited # to not to defeat access control setting = [NFS_EXPORT_VOL, False] gluster_manager.set_vol_option(*setting) return self.nfs_helper(self._execute, self.configuration, gluster_manager=gluster_manager).get_export( share_manager['share']) def check_for_setup_error(self): pass def _update_share_stats(self): """Retrieve stats info from the GlusterFS volume.""" data = dict( storage_protocol='NFS', vendor_name='Red Hat', share_backend_name=self.backend_name, reserved_percentage=self.configuration.reserved_share_percentage, reserved_snapshot_percentage=( self.configuration.reserved_share_from_snapshot_percentage or self.configuration.reserved_share_percentage), reserved_share_extend_percentage=( self.configuration.reserved_share_extend_percentage or self.configuration.reserved_share_percentage)) super(GlusterfsShareDriver, self)._update_share_stats(data) def get_network_allocations_number(self): return 0 def _get_helper(self, gluster_mgr=None): """Choose a protocol specific helper class.""" helper_class = self.nfs_helper if (self.nfs_helper == GlusterNFSHelper and gluster_mgr and not gluster_mgr.path): helper_class = GlusterNFSVolHelper helper = helper_class(self._execute, self.configuration, gluster_manager=gluster_mgr) helper.init_helper() return helper @property def supported_access_types(self): return self.nfs_helper.supported_access_types @property def supported_access_levels(self): return self.nfs_helper.supported_access_levels def _update_access_via_manager(self, gluster_mgr, context, share, add_rules, delete_rules, recovery=False, share_server=None): """Update access to the share.""" self._get_helper(gluster_mgr).update_access( '/', share, add_rules, delete_rules, recovery=recovery) class GlusterNFSHelper(ganesha.NASHelperBase): """Manage shares with Gluster-NFS server.""" supported_access_types = ('ip', ) supported_access_levels = (constants.ACCESS_LEVEL_RW, ) def __init__(self, execute, config_object, **kwargs): self.gluster_manager = kwargs.pop('gluster_manager') super(GlusterNFSHelper, self).__init__(execute, config_object, **kwargs) def get_export(self, share): return self.gluster_manager.export def _get_export_dir_dict(self): """Get the export entries of shares in the GlusterFS volume.""" export_dir = self.gluster_manager.get_vol_option( NFS_EXPORT_DIR) edh = {} if export_dir: # see # https://github.com/gluster/glusterfs # /blob/aa19909/xlators/nfs/server/src/nfs.c#L1582 # regarding the format of nfs.export-dir edl = export_dir.split(',') # parsing export_dir into a dict of {dir: [hostpec,..]..} # format r = re.compile(r'\A/(.*)\((.*)\)\Z') for ed in edl: d, e = r.match(ed).groups() edh[d] = e.split('|') return edh def update_access(self, base_path, share, add_rules, delete_rules, recovery=False): """Update access rules.""" existing_rules_set = set() # The name of the directory, which is exported as the share. export_dir = self.gluster_manager.path[1:] # Fetch the existing export entries as an export dictionary with the # exported directories and the list of client IP addresses authorized # to access them as key-value pairs. export_dir_dict = self._get_export_dir_dict() if export_dir in export_dir_dict: existing_rules_set = set(export_dir_dict[export_dir]) add_rules_set = {rule['access_to'] for rule in add_rules} delete_rules_set = {rule['access_to'] for rule in delete_rules} new_rules_set = ( (existing_rules_set | add_rules_set) - delete_rules_set) if new_rules_set: export_dir_dict[export_dir] = new_rules_set elif export_dir not in export_dir_dict: return else: export_dir_dict.pop(export_dir) # Reconstruct the export entries. if export_dir_dict: export_dirs_new = (",".join("/%s(%s)" % (d, "|".join(sorted(v))) for d, v in sorted(export_dir_dict.items()))) else: export_dirs_new = None self.gluster_manager.set_vol_option(NFS_EXPORT_DIR, export_dirs_new) class GlusterNFSVolHelper(GlusterNFSHelper): """Manage shares with Gluster-NFS server, volume mapped variant.""" def _get_vol_exports(self): export_vol = self.gluster_manager.get_vol_option( NFS_RPC_AUTH_ALLOW) return export_vol.split(',') if export_vol else [] def update_access(self, base_path, share, add_rules, delete_rules, recovery=False): """Update access rules.""" existing_rules_set = set(self._get_vol_exports()) add_rules_set = {rule['access_to'] for rule in add_rules} delete_rules_set = {rule['access_to'] for rule in delete_rules} new_rules_set = ( (existing_rules_set | add_rules_set) - delete_rules_set) if new_rules_set: argseq = ((NFS_RPC_AUTH_ALLOW, ','.join(sorted(new_rules_set))), (NFS_RPC_AUTH_REJECT, None)) else: argseq = ((NFS_RPC_AUTH_ALLOW, None), (NFS_RPC_AUTH_REJECT, '*')) for args in argseq: self.gluster_manager.set_vol_option(*args) class GaneshaNFSHelper(ganesha.GaneshaNASHelper): shared_data = {} def __init__(self, execute, config_object, **kwargs): self.gluster_manager = kwargs.pop('gluster_manager') if config_object.glusterfs_ganesha_server_ip: execute = ganesha_utils.SSHExecutor( config_object.glusterfs_ganesha_server_ip, 22, None, config_object.glusterfs_ganesha_server_username, password=config_object.glusterfs_ganesha_server_password, privatekey=config_object.glusterfs_path_to_private_key) else: execute = ganesha_utils.RootExecutor(execute) self.ganesha_host = config_object.glusterfs_ganesha_server_ip if not self.ganesha_host: self.ganesha_host = socket.gethostname() kwargs['tag'] = '-'.join(('GLUSTER', 'Ganesha', self.ganesha_host)) super(GaneshaNFSHelper, self).__init__(execute, config_object, **kwargs) def get_export(self, share): return ':/'.join((self.ganesha_host, share['name'] + "--")) def init_helper(self): @utils.synchronized(self.tag) def _init_helper(): if self.tag in self.shared_data: return True super(GaneshaNFSHelper, self).init_helper() self.shared_data[self.tag] = { 'ganesha': self.ganesha, 'export_template': self.export_template} return False if _init_helper(): tagdata = self.shared_data[self.tag] self.ganesha = tagdata['ganesha'] self.export_template = tagdata['export_template'] def _default_config_hook(self): """Callback to provide default export block.""" dconf = super(GaneshaNFSHelper, self)._default_config_hook() conf_dir = ganesha_utils.path_from(__file__, "conf") ganesha_utils.patch(dconf, self._load_conf_dir(conf_dir)) return dconf def _fsal_hook(self, base, share, access): """Callback to create FSAL subblock.""" return {"Hostname": self.gluster_manager.host, "Volume": self.gluster_manager.volume, "Volpath": self.gluster_manager.path} def update_access(self, base_path, share, add_rules, delete_rules, recovery=False): """Update access rules.""" context = None access_rules = [] super(GaneshaNFSHelper, self).update_access( context, share, access_rules, add_rules, delete_rules, share_server=None) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/glusterfs/common.py0000664000175000017500000004120200000000000022756 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Common GlussterFS routines.""" import re from defusedxml import ElementTree as etree from oslo_config import cfg from oslo_log import log from manila import exception from manila.i18n import _ from manila.privsep import os as privsep_os from manila.share.drivers.ganesha import utils as ganesha_utils from manila import utils LOG = log.getLogger(__name__) glusterfs_common_opts = [ cfg.StrOpt('glusterfs_server_password', secret=True, help='Remote GlusterFS server node\'s login password. ' 'This is not required if ' '\'glusterfs_path_to_private_key\' is ' 'configured.'), cfg.StrOpt('glusterfs_path_to_private_key', help='Path of Manila host\'s private SSH key file.'), ] CONF = cfg.CONF CONF.register_opts(glusterfs_common_opts) def _check_volume_presence(f): def wrapper(self, *args, **kwargs): if not self.components.get('volume'): raise exception.GlusterfsException( _("Gluster address does not have a volume component.")) return f(self, *args, **kwargs) return wrapper def volxml_get(xmlout, *paths, **kwargs): """Attempt to extract a value by a set of Xpaths from XML.""" for path in paths: value = xmlout.find(path) if value is not None: break if value is None: if 'default' in kwargs: return kwargs['default'] raise exception.InvalidShare( _("Volume query response XML has no value for any of " "the following Xpaths: %s") % ", ".join(paths)) return value.text class GlusterManager(object): """Interface with a GlusterFS volume.""" scheme = re.compile(r'\A(?:(?P[^:@/]+)@)?' r'(?P[^:@/]+)' r'(?::/(?P[^/]+)(?P/.*)?)?\Z') # See this about GlusterFS' convention for Boolean interpretation # of strings: # https://github.com/gluster/glusterfs/blob/v3.7.8/ # libglusterfs/src/common-utils.c#L1680-L1708 GLUSTERFS_TRUE_VALUES = ('ON', 'YES', 'TRUE', 'ENABLE', '1') GLUSTERFS_FALSE_VALUES = ('OFF', 'NO', 'FALSE', 'DISABLE', '0') @classmethod def parse(cls, address): """Parse address string into component dict.""" m = cls.scheme.search(address) if not m: raise exception.GlusterfsException( _('Invalid gluster address %s.') % address) return m.groupdict() def __getattr__(self, attr): if attr in self.components: return self.components[attr] raise AttributeError("'%(typ)s' object has no attribute '%(attr)s'" % {'typ': type(self).__name__, 'attr': attr}) def __init__(self, address, execf=None, path_to_private_key=None, remote_server_password=None, requires={}): """Initialize a GlusterManager instance. :param address: the Gluster URI (either string of [@][:/[/]] format or component dict with "user", "host", "volume", "path" keys). :param execf: executor function for management commands. :param path_to_private_key: path to private ssh key of remote server. :param remote_server_password: ssh password for remote server. :param requires: a dict mapping some of the component names to either True or False; having it specified, respectively, the presence or absence of the given component in the uri will be enforced. """ if isinstance(address, dict): tmp_addr = "" if address.get('user') is not None: tmp_addr = address.get('user') + '@' if address.get('host') is not None: tmp_addr += address.get('host') if address.get('volume') is not None: tmp_addr += ':/' + address.get('volume') if address.get('path') is not None: tmp_addr += address.get('path') self.components = self.parse(tmp_addr) # Verify that the original dictionary matches the parsed # dictionary. This will flag typos such as {'volume': 'vol/err'} # in the original dictionary as errors. Additionally, # extra keys will need to be flagged as an error. sanitized_address = {key: None for key in self.scheme.groupindex} sanitized_address.update(address) if sanitized_address != self.components: raise exception.GlusterfsException( _('Invalid gluster address %s.') % address) else: self.components = self.parse(address) for k, v in requires.items(): if v is None: continue if (self.components.get(k) is not None) != v: raise exception.GlusterfsException( _('Invalid gluster address %s.') % address) self.path_to_private_key = path_to_private_key self.remote_server_password = remote_server_password if execf: self.gluster_call = self.make_gluster_call(execf) @property def host_access(self): return '@'.join(filter(None, (self.user, self.host))) def _build_uri(self, base): u = base for sep, comp in ((':/', 'volume'), ('', 'path')): if self.components[comp] is None: break u = sep.join((u, self.components[comp])) return u @property def qualified(self): return self._build_uri(self.host_access) @property def export(self): if self.volume: return self._build_uri(self.host) def make_gluster_call(self, execf): """Execute a Gluster command locally or remotely.""" if self.user: gluster_execf = ganesha_utils.SSHExecutor( self.host, 22, None, self.user, password=self.remote_server_password, privatekey=self.path_to_private_key) else: gluster_execf = ganesha_utils.RootExecutor(execf) def _gluster_call(*args, **kwargs): logmsg = kwargs.pop('log', None) error_policy = kwargs.pop('error_policy', 'coerce') if (error_policy not in ('raw', 'coerce', 'suppress') and not isinstance(error_policy[0], int)): raise TypeError(_("undefined error_policy %s") % repr(error_policy)) try: return gluster_execf(*(('gluster',) + args), **kwargs) except exception.ProcessExecutionError as exc: if error_policy == 'raw': raise elif error_policy == 'coerce': pass elif (error_policy == 'suppress' or exc.exit_code in error_policy): return if logmsg: LOG.error("%s: GlusterFS instrumentation failed.", logmsg) raise exception.GlusterfsException( _("GlusterFS management command '%(cmd)s' failed " "with details as follows:\n%(details)s.") % { 'cmd': ' '.join(args), 'details': exc}) return _gluster_call def xml_response_check(self, xmlout, command, countpath=None): """Sanity check for GlusterFS XML response.""" commandstr = ' '.join(command) ret = {} for e in 'opRet', 'opErrno': ret[e] = int(volxml_get(xmlout, e)) if ret == {'opRet': -1, 'opErrno': 0}: raise exception.GlusterfsException(_( 'GlusterFS command %(command)s on volume %(volume)s failed' ) % {'volume': self.volume, 'command': command}) if list(ret.values()) != [0, 0]: errdct = {'volume': self.volume, 'command': commandstr, 'opErrstr': volxml_get(xmlout, 'opErrstr', default=None)} errdct.update(ret) raise exception.InvalidShare(_( 'GlusterFS command %(command)s on volume %(volume)s got ' 'unexpected response: ' 'opRet=%(opRet)s, opErrno=%(opErrno)s, opErrstr=%(opErrstr)s' ) % errdct) if not countpath: return count = volxml_get(xmlout, countpath) if count != '1': raise exception.InvalidShare( _('GlusterFS command %(command)s on volume %(volume)s got ' 'ambiguous response: ' '%(count)s records') % { 'volume': self.volume, 'command': commandstr, 'count': count}) def _get_vol_option_via_info(self, option): """Get the value of an option set on a GlusterFS volume via volinfo.""" args = ('--xml', 'volume', 'info', self.volume) out, err = self.gluster_call(*args, log=("retrieving volume info")) if not out: raise exception.GlusterfsException( 'gluster volume info %s: no data received' % self.volume ) volxml = etree.fromstring(out) self.xml_response_check(volxml, args[1:], './volInfo/volumes/count') for e in volxml.findall(".//option"): o, v = (volxml_get(e, a) for a in ('name', 'value')) if o == option: return v @_check_volume_presence def _get_vol_user_option(self, useropt): """Get the value of an user option set on a GlusterFS volume.""" option = '.'.join(('user', useropt)) return self._get_vol_option_via_info(option) @_check_volume_presence def _get_vol_regular_option(self, option): """Get the value of a regular option set on a GlusterFS volume.""" args = ('--xml', 'volume', 'get', self.volume, option) out, err = self.gluster_call(*args, check_exit_code=False) if not out: # all input is valid, but the option has not been set # (nb. some options do come by a null value, but some # don't even have that, see eg. cluster.nufa) return try: optxml = etree.fromstring(out) except Exception: # non-xml output indicates that GlusterFS backend does not support # 'vol get', we fall back to 'vol info' based retrieval (glusterfs # < 3.7). return self._get_vol_option_via_info(option) self.xml_response_check(optxml, args[1:], './volGetopts/count') # the Xpath has changed from first to second as of GlusterFS # 3.7.14 (see http://review.gluster.org/14931). return volxml_get(optxml, './volGetopts/Value', './volGetopts/Opt/Value') def get_vol_option(self, option, boolean=False): """Get the value of an option set on a GlusterFS volume.""" useropt = re.sub(r'\Auser\.', '', option) if option == useropt: value = self._get_vol_regular_option(option) else: value = self._get_vol_user_option(useropt) if not boolean or value is None: return value if value.upper() in self.GLUSTERFS_TRUE_VALUES: return True if value.upper() in self.GLUSTERFS_FALSE_VALUES: return False raise exception.GlusterfsException(_( "GlusterFS volume option on volume %(volume)s: " "%(option)s=%(value)s cannot be interpreted as Boolean") % { 'volume': self.volume, 'option': option, 'value': value}) @_check_volume_presence @utils.retry(retry_param=exception.GlusterfsException) def set_vol_option(self, option, value, ignore_failure=False): value = {True: self.GLUSTERFS_TRUE_VALUES[0], False: self.GLUSTERFS_FALSE_VALUES[0]}.get(value, value) if value is None: args = ('reset', (option,)) else: args = ('set', (option, value)) policy = (1,) if ignore_failure else 'coerce' self.gluster_call( 'volume', args[0], self.volume, *args[1], error_policy=policy) def get_gluster_version(self): """Retrieve GlusterFS version. :returns: version (as tuple of strings, example: ('3', '6', '0beta2')) """ out, err = self.gluster_call('--version', log=("GlusterFS version query")) try: owords = out.split() if owords[0] != 'glusterfs': raise RuntimeError vers = owords[1].split('.') # provoke an exception if vers does not start with two numerals int(vers[0]) int(vers[1]) except Exception: raise exception.GlusterfsException( _("Cannot parse version info obtained from server " "%(server)s, version info: %(info)s") % {'server': self.host, 'info': out}) return vers def check_gluster_version(self, minvers): """Retrieve and check GlusterFS version. :param minvers: minimum version to require (given as tuple of integers, example: (3, 6)) """ vers = self.get_gluster_version() if numreduct(vers) < minvers: raise exception.GlusterfsException(_( "Unsupported GlusterFS version %(version)s on server " "%(server)s, minimum requirement: %(minvers)s") % { 'server': self.host, 'version': '.'.join(vers), 'minvers': '.'.join(str(c) for c in minvers)}) def numreduct(vers): """The numeric reduct of a tuple of strings. That is, applying an integer conversion map on the longest initial segment of vers which consists of numerals. """ numvers = [] for c in vers: try: numvers.append(int(c)) except ValueError: break return tuple(numvers) def _mount_gluster_vol(execute, gluster_export, mount_path, ensure=False): """Mount a GlusterFS volume at the specified mount path. :param execute: command execution function :param gluster_export: GlusterFS export to mount :param mount_path: path to mount at :param ensure: boolean to allow remounting a volume with a warning """ execute('mkdir', '-p', mount_path) try: privsep_os.mount(gluster_export, mount_path, mount_type='glusterfs') except exception.ProcessExecutionError as exc: if ensure and 'already mounted' in exc.stderr: LOG.warning("%s is already mounted.", gluster_export) else: raise exception.GlusterfsException( 'Unable to mount Gluster volume' ) def _umount_gluster_vol(mount_path): """Unmount a GlusterFS volume at the specified mount path. :param mount_path: path where volume is mounted """ try: privsep_os.umount(mount_path) except exception.ProcessExecutionError as exc: msg = (_("Unable to unmount gluster volume. " "mount_dir: %(mount_path)s, Error: %(error)s") % {'mount_path': mount_path, 'error': exc.stderr}) LOG.error(msg) raise exception.GlusterfsException(msg) def _restart_gluster_vol(gluster_mgr): """Restart a GlusterFS volume through its manager. :param gluster_mgr: GlusterManager instance """ # TODO(csaba): '--mode=script' ensures that the Gluster CLI runs in # script mode. This seems unnecessary as the Gluster CLI is # expected to run in non-interactive mode when the stdin is not # a terminal, as is the case below. But on testing, found the # behaviour of Gluster-CLI to be the contrary. Need to investigate # this odd-behaviour of Gluster-CLI. gluster_mgr.gluster_call( 'volume', 'stop', gluster_mgr.volume, '--mode=script', log=("stopping GlusterFS volume %s") % gluster_mgr.volume) gluster_mgr.gluster_call( 'volume', 'start', gluster_mgr.volume, log=("starting GlusterFS volume %s") % gluster_mgr.volume) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315601.9136717 manila-21.0.0/manila/share/drivers/glusterfs/conf/0000775000175000017500000000000000000000000022042 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/glusterfs/conf/10-glusterfs-export-template.conf0000664000175000017500000000020600000000000030273 0ustar00zuulzuul00000000000000EXPORT { FSAL { Name = GLUSTER; Hostname = @config; Volume = @config; Volpath = @runtime; } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/glusterfs/glusterfs_native.py0000664000175000017500000002260700000000000025062 0ustar00zuulzuul00000000000000# Copyright (c) 2014 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ GlusterFS native protocol (glusterfs) driver for shares. Manila share is a GlusterFS volume. Unlike the generic driver, this does not use service VM approach. Instances directly talk with the GlusterFS backend storage pool. Instance use the 'glusterfs' protocol to mount the GlusterFS share. Access to the share is allowed via SSL Certificates. Only the instance which has the SSL trust established with the GlusterFS backend can mount and hence use the share. Supports working with multiple glusterfs volumes. """ import re from oslo_log import log from manila.common import constants from manila import exception from manila.i18n import _ from manila.share import driver from manila.share.drivers.glusterfs import common from manila.share.drivers.glusterfs import layout from manila import utils LOG = log.getLogger(__name__) ACCESS_TYPE_CERT = 'cert' AUTH_SSL_ALLOW = 'auth.ssl-allow' CLIENT_SSL = 'client.ssl' NFS_EXPORT_VOL = 'nfs.export-volumes' SERVER_SSL = 'server.ssl' DYNAMIC_AUTH = 'server.dynamic-auth' class GlusterfsNativeShareDriver(driver.ExecuteMixin, layout.GlusterfsShareDriverBase): """GlusterFS native protocol (glusterfs) share driver. Executes commands relating to Shares. Supports working with multiple glusterfs volumes. API version history: 1.0 - Initial version. 1.1 - Support for working with multiple gluster volumes. """ GLUSTERFS_VERSION_MIN = (3, 6) _supported_access_levels = (constants.ACCESS_LEVEL_RW, ) _supported_access_types = (ACCESS_TYPE_CERT, ) supported_layouts = ('layout_volume.GlusterfsVolumeMappedLayout',) supported_protocols = ('GLUSTERFS',) def __init__(self, *args, **kwargs): super(GlusterfsNativeShareDriver, self).__init__( False, *args, **kwargs) LOG.warning('GlusterFS native share driver has been deprecated and is ' 'expected to be removed in a future release.') self._helpers = None self.backend_name = self.configuration.safe_get( 'share_backend_name') or 'GlusterFS-Native' def _setup_via_manager(self, share_mgr, share_mgr_parent=None): # Enable gluster volumes for SSL access only. gluster_mgr = share_mgr['manager'] gluster_mgr_parent = (share_mgr_parent or {}).get('manager', None) ssl_allow_opt = (gluster_mgr_parent if gluster_mgr_parent else gluster_mgr).get_vol_option( AUTH_SSL_ALLOW) if not ssl_allow_opt: # Not having AUTH_SSL_ALLOW set is a problematic edge case. # - In GlusterFS 3.6, it implies that access is allowed to # none, including intra-service access, which causes # problems internally in GlusterFS # - In GlusterFS 3.7, it implies that access control is # disabled, which defeats the purpose of this driver -- # so to avoid these possibilities, we throw an error in this case. msg = (_("Option %(option)s is not defined on gluster volume. " "Volume: %(volname)s") % {'volname': gluster_mgr.volume, 'option': AUTH_SSL_ALLOW}) LOG.error(msg) raise exception.GlusterfsException(msg) gluster_actions = [] if gluster_mgr_parent: # The clone of the snapshot, a new volume, retains the authorized # access list of the snapshotted volume/share, which includes TLS # identities of the backend servers, Manila hosts and clients. # Retain the identities of the GlusterFS servers and Manila host, # and exclude those of the clients in the authorized access list of # the new volume. The TLS identities of GlusterFS servers are # determined as those that are prefixed by 'glusterfs-server'. # And the TLS identity of the Manila host is identified as the # one that has 'manila-host' as the prefix. # Wrt. GlusterFS' parsing of auth.ssl-allow, please see code from # https://github.com/gluster/glusterfs/blob/v3.6.2/ # xlators/protocol/auth/login/src/login.c#L80 # until end of gf_auth() function old_access_list = re.split('[ ,]', ssl_allow_opt) glusterfs_server_CN_pattern = r'\Aglusterfs-server' manila_host_CN_pattern = r'\Amanila-host' regex = re.compile( r'%(pattern1)s|%(pattern2)s' % { 'pattern1': glusterfs_server_CN_pattern, 'pattern2': manila_host_CN_pattern}) access_to = ','.join(filter(regex.match, old_access_list)) gluster_actions.append((AUTH_SSL_ALLOW, access_to)) for option, value in ( (NFS_EXPORT_VOL, False), (CLIENT_SSL, True), (SERVER_SSL, True) ): gluster_actions.append((option, value)) for action in gluster_actions: gluster_mgr.set_vol_option(*action) gluster_mgr.set_vol_option(DYNAMIC_AUTH, True, ignore_failure=True) # SSL enablement requires a fresh volume start # to take effect if gluster_mgr_parent: # in this case the volume is not started # yet (will only be started after this func # returns), so we have nothing to do here pass else: common._restart_gluster_vol(gluster_mgr) return gluster_mgr.export @utils.synchronized("glusterfs_native_access", external=False) def _update_access_via_manager(self, gluster_mgr, context, share, add_rules, delete_rules, recovery=False, share_server=None): """Update access rules, authorize SSL CNs (Common Names).""" # Fetch existing authorized CNs, the value of Gluster option # 'auth.ssl-allow' that is available as a comma separated string. # wrt. GlusterFS' parsing of auth.ssl-allow, please see code from # https://github.com/gluster/glusterfs/blob/v3.6.2/ # xlators/protocol/auth/login/src/login.c#L80 # until end of gf_auth() function ssl_allow_opt = gluster_mgr.get_vol_option(AUTH_SSL_ALLOW) existing_rules_set = set(re.split('[ ,]', ssl_allow_opt)) add_rules_set = {rule['access_to'] for rule in add_rules} for rule in add_rules_set: if re.search('[ ,]', rule): raise exception.GlusterfsException( _("Invalid 'access_to' '%s': common names used for " "GlusterFS authentication should not contain comma " "or whitespace.") % rule) delete_rules_set = {rule['access_to'] for rule in delete_rules} new_rules_set = ( (existing_rules_set | add_rules_set) - delete_rules_set) # Addition or removal of CNs in the authorized list through the # Gluster CLI, used by 'GlusterManager' objects, can only be done by # replacing the existing list with the newly modified list. ssl_allow_opt = ','.join(sorted(new_rules_set)) gluster_mgr.set_vol_option(AUTH_SSL_ALLOW, ssl_allow_opt) # When the Gluster option, DYNAMIC_AUTH is not enabled for the gluster # volume/manila share, the removal of CN of a client does not affect # the client's existing connection to the volume until the volume is # restarted. if delete_rules: dynauth = gluster_mgr.get_vol_option(DYNAMIC_AUTH, boolean=True) if not dynauth: common._restart_gluster_vol(gluster_mgr) def _update_share_stats(self): """Send stats info for the GlusterFS volume.""" data = dict( share_backend_name=self.backend_name, vendor_name='Red Hat', driver_version='1.1', storage_protocol='glusterfs', reserved_percentage=self.configuration.reserved_share_percentage, reserved_snapshot_percentage=( self.configuration.reserved_share_from_snapshot_percentage or self.configuration.reserved_share_percentage), reserved_share_extend_percentage=( self.configuration.reserved_share_extend_percentage or self.configuration.reserved_share_percentage)) # We don't use a service mount to get stats data. # Instead we use glusterfs quota feature and use that to limit # the share to its expected share['size']. # TODO(deepakcs): Change below once glusterfs supports volume # specific stats via the gluster cli. data['total_capacity_gb'] = 'unknown' data['free_capacity_gb'] = 'unknown' super(GlusterfsNativeShareDriver, self)._update_share_stats(data) def get_network_allocations_number(self): return 0 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/glusterfs/layout.py0000664000175000017500000002365500000000000023017 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """GlusterFS share layouts. A share layout encapsulates a particular way of mapping GlusterFS entities to a share and utilizing them to back the share. """ import abc import errno from oslo_config import cfg from oslo_utils import importutils from manila import exception from manila.i18n import _ from manila.share import driver from manila.share.drivers.ganesha import utils as ganesha_utils glusterfs_share_layout_opts = [ cfg.StrOpt( 'glusterfs_share_layout', help="Specifies GlusterFS share layout, that is, " "the method of associating backing GlusterFS " "resources to shares."), ] CONF = cfg.CONF CONF.register_opts(glusterfs_share_layout_opts) class GlusterfsShareDriverBase(driver.ShareDriver): LAYOUT_PREFIX = 'manila.share.drivers.glusterfs' supported_layouts = () supported_protocols = () _supported_access_types = () _supported_access_levels = () GLUSTERFS_VERSION_MIN = (0, 0) def __init__(self, *args, **kwargs): super(GlusterfsShareDriverBase, self).__init__(*args, **kwargs) self.configuration.append_config_values( glusterfs_share_layout_opts) layout_name = self.configuration.glusterfs_share_layout if not layout_name: layout_name = self.supported_layouts[0] if layout_name not in self.supported_layouts: raise exception.GlusterfsException( _('driver %(driver)s does not support %(layout)s layout') % {'driver': type(self).__name__, 'layout': layout_name}) self.layout = importutils.import_object( '.'.join((self.LAYOUT_PREFIX, layout_name)), self, **kwargs) # we determine snapshot support in our own scope, as # 1) the calculation based on parent method # redefinition does not work for us, as actual # glusterfs driver classes are subclassed from # *this* class, not from driver.ShareDriver # and they don't need to redefine snapshot # methods for themselves; # 2) snapshot support depends on choice of layout. self._snapshots_are_supported = getattr(self.layout, '_snapshots_are_supported', False) def _setup_via_manager(self, share_mgr, share_mgr_parent=None): """Callback for layout's `create_share`/`create_share_from_snapshot` :param share_mgr: a {'share': , 'manager': } dict where is the share created in `create_share` or `create_share_from_snapshot` and is a GlusterManager instance representing the GlusterFS resource allocated for it. :param gluster_mgr_parent: a {'share': , 'manager': } dict where is the original share of the snapshot used in `create_share_from_snapshot` and is a GlusterManager instance representing the GlusterFS resource allocated for it. :returns: export location for share_mgr['share']. """ @property def supported_access_levels(self): return self._supported_access_levels @property def supported_access_types(self): return self._supported_access_types def _access_rule_validator(self, abort): def validator(rule): return ganesha_utils.validate_access_rule( self.supported_access_types, self.supported_access_levels, rule, abort) return validator def update_access(self, context, share, access_rules, add_rules, delete_rules, update_rules, share_server=None): """Update access rules for given share. Driver supports 2 different cases in this method: 1. Recovery after error - 'access_rules' contains all access_rules, 'add_rules' and 'delete_rules' are []. Driver should clear any existent access rules and apply all access rules for given share. This recovery is made at driver start up. 2. Adding/Deleting of several access rules - 'access_rules' contains all access_rules, 'add_rules' and 'delete_rules' and 'update_rules' contain rules which should be added/deleted/updated. Driver can ignore rules in 'access_rules' and apply only rules from 'add_rules', 'delete_rules' and 'update_rules'. """ gluster_mgr = self.layout._share_manager(share) access_rules, add_rules, delete_rules = ( list(filter(self._access_rule_validator(abort), rules)) for ( rules, abort) in ((access_rules, True), (add_rules, True), (delete_rules, False))) # Recovery mode. if not (add_rules or delete_rules): ruleop, recovery = (access_rules, []), True else: ruleop, recovery = (add_rules, delete_rules), False self._update_access_via_manager(gluster_mgr, context, share, *ruleop, recovery=recovery) def _update_access_via_manager(self, gluster_mgr, context, share, add_rules, delete_rules, recovery=False, share_server=None): raise NotImplementedError() def do_setup(self, *a, **kw): return self.layout.do_setup(*a, **kw) @classmethod def _check_proto(cls, share): proto = share['share_proto'].upper() if proto not in cls.supported_protocols: msg = _("Share protocol %s is not supported.") % proto raise exception.ShareBackendException(msg=msg) def create_share(self, context, share, *a, **kw): self._check_proto(share) return self.layout.create_share(context, share, *a, **kw) def create_share_from_snapshot(self, context, share, *a, **kw): self._check_proto(share) return self.layout.create_share_from_snapshot(context, share, *a, **kw) def create_snapshot(self, *a, **kw): return self.layout.create_snapshot(*a, **kw) def delete_share(self, *a, **kw): return self.layout.delete_share(*a, **kw) def delete_snapshot(self, *a, **kw): return self.layout.delete_snapshot(*a, **kw) def ensure_share(self, *a, **kw): return self.layout.ensure_share(*a, **kw) def manage_existing(self, *a, **kw): return self.layout.manage_existing(*a, **kw) def unmanage(self, *a, **kw): return self.layout.unmanage(*a, **kw) def extend_share(self, *a, **kw): return self.layout.extend_share(*a, **kw) def shrink_share(self, *a, **kw): return self.layout.shrink_share(*a, **kw) def _update_share_stats(self, data={}): try: data.update(self.layout._update_share_stats()) except NotImplementedError: pass super(GlusterfsShareDriverBase, self)._update_share_stats(data) class GlusterfsShareLayoutBase(metaclass=abc.ABCMeta): """Base class for share layouts.""" def __init__(self, driver, *args, **kwargs): self.driver = driver self.configuration = kwargs.get('configuration') def _check_mount_glusterfs(self): """Checks if mount.glusterfs(8) is available.""" try: self.driver._execute('mount.glusterfs', check_exit_code=False) except OSError as exc: if exc.errno == errno.ENOENT: raise exception.GlusterfsException( _('mount.glusterfs is not installed.')) else: raise @abc.abstractmethod def _share_manager(self, share): """Return GlusterManager object representing share's backend.""" @abc.abstractmethod def do_setup(self, context): """Any initialization the share driver does while starting.""" @abc.abstractmethod def create_share(self, context, share, share_server=None): """Is called to create share.""" @abc.abstractmethod def create_share_from_snapshot(self, context, share, snapshot, share_server=None, parent_share=None): """Is called to create share from snapshot.""" @abc.abstractmethod def create_snapshot(self, context, snapshot, share_server=None): """Is called to create snapshot.""" @abc.abstractmethod def delete_share(self, context, share, share_server=None): """Is called to remove share.""" @abc.abstractmethod def delete_snapshot(self, context, snapshot, share_server=None): """Is called to remove snapshot.""" @abc.abstractmethod def ensure_share(self, context, share, share_server=None): """Invoked to ensure that share is exported.""" @abc.abstractmethod def manage_existing(self, share, driver_options): """Brings an existing share under Manila management.""" @abc.abstractmethod def unmanage(self, share): """Removes the specified share from Manila management.""" @abc.abstractmethod def extend_share(self, share, new_size, share_server=None): """Extends size of existing share.""" @abc.abstractmethod def shrink_share(self, share, new_size, share_server=None): """Shrinks size of existing share.""" def _update_share_stats(self): raise NotImplementedError() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/glusterfs/layout_directory.py0000664000175000017500000002341700000000000025077 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """GlusterFS directory mapped share layout.""" import math import os from defusedxml import ElementTree as etree from oslo_config import cfg from oslo_log import log from manila import exception from manila.i18n import _ from manila.privsep import os as privsep_os from manila.share.drivers.glusterfs import common from manila.share.drivers.glusterfs import layout from manila import utils LOG = log.getLogger(__name__) glusterfs_directory_mapped_opts = [ cfg.StrOpt('glusterfs_target', help='Specifies the GlusterFS volume to be mounted on the ' 'Manila host. It is of the form ' '[remoteuser@]:.'), cfg.StrOpt('glusterfs_mount_point_base', default='$state_path/mnt', help='Base directory containing mount points for Gluster ' 'volumes.'), ] CONF = cfg.CONF CONF.register_opts(glusterfs_directory_mapped_opts) class GlusterfsDirectoryMappedLayout(layout.GlusterfsShareLayoutBase): def __init__(self, driver, *args, **kwargs): super(GlusterfsDirectoryMappedLayout, self).__init__( driver, *args, **kwargs) self.configuration.append_config_values( common.glusterfs_common_opts) self.configuration.append_config_values( glusterfs_directory_mapped_opts) def _glustermanager(self, gluster_address): """Create GlusterManager object for gluster_address.""" return common.GlusterManager( gluster_address, self.driver._execute, self.configuration.glusterfs_path_to_private_key, self.configuration.glusterfs_server_password, requires={'volume': True}) def do_setup(self, context): """Prepares the backend and appropriate NAS helpers.""" if not self.configuration.glusterfs_target: raise exception.GlusterfsException( _('glusterfs_target configuration that specifies the GlusterFS' ' volume to be mounted on the Manila host is not set.')) self.gluster_manager = self._glustermanager( self.configuration.glusterfs_target) self.gluster_manager.check_gluster_version( self.driver.GLUSTERFS_VERSION_MIN) self._check_mount_glusterfs() # enable quota options of a GlusteFS volume to allow # creation of shares of specific size args = ('volume', 'quota', self.gluster_manager.volume, 'enable') try: self.gluster_manager.gluster_call(*args) except exception.GlusterfsException: if (self.gluster_manager. get_vol_option('features.quota')) != 'on': LOG.exception("Error in tuning GlusterFS volume to enable " "creation of shares of specific size.") raise self._ensure_gluster_vol_mounted() def _share_manager(self, share): comp_path = self.gluster_manager.components.copy() comp_path.update({'path': '/' + share['name']}) return self._glustermanager(comp_path) def _get_mount_point_for_gluster_vol(self): """Return mount point for the GlusterFS volume.""" return os.path.join(self.configuration.glusterfs_mount_point_base, self.gluster_manager.volume) def _ensure_gluster_vol_mounted(self): """Ensure GlusterFS volume is native-mounted on Manila host.""" mount_path = self._get_mount_point_for_gluster_vol() try: common._mount_gluster_vol(self.driver._execute, self.gluster_manager.export, mount_path, ensure=True) except exception.GlusterfsException: LOG.exception('Could not mount the Gluster volume %s', self.gluster_manager.volume) raise def _get_local_share_path(self, share): """Determine mount path of the GlusterFS volume in the Manila host.""" local_vol_path = self._get_mount_point_for_gluster_vol() if not os.access(local_vol_path, os.R_OK): raise exception.GlusterfsException('share path %s does not exist' % local_vol_path) return os.path.join(local_vol_path, share['name']) def _update_share_stats(self): """Retrieve stats info from the GlusterFS volume.""" # sanity check for gluster ctl mount smpb = os.stat(self.configuration.glusterfs_mount_point_base) smp = os.stat(self._get_mount_point_for_gluster_vol()) if smpb.st_dev == smp.st_dev: raise exception.GlusterfsException( _("GlusterFS control mount is not available") ) smpv = os.statvfs(self._get_mount_point_for_gluster_vol()) return {'total_capacity_gb': (smpv.f_blocks * smpv.f_frsize) >> 30, 'free_capacity_gb': (smpv.f_bavail * smpv.f_frsize) >> 30} def create_share(self, ctx, share, share_server=None): """Create a sub-directory/share in the GlusterFS volume.""" # probe into getting a NAS protocol helper for the share in order # to facilitate early detection of unsupported protocol type local_share_path = self._get_local_share_path(share) try: privsep_os.mkdir(local_share_path) self._set_directory_quota(share, share['size']) except Exception as exc: if isinstance(exc, exception.ProcessExecutionError): exc = exception.GlusterfsException(exc) if isinstance(exc, exception.GlusterfsException): self._cleanup_create_share(local_share_path, share['name']) LOG.error('Unable to create share %s', share['name']) raise exc comp_share = self.gluster_manager.components.copy() comp_share['path'] = '/' + share['name'] export_location = self.driver._setup_via_manager( {'share': share, 'manager': self._glustermanager(comp_share)}) return export_location def _cleanup_create_share(self, share_path, share_name): """Cleanup share that errored out during its creation.""" if os.path.exists(share_path): try: privsep_os.recursive_forced_rm(share_path) except exception.ProcessExecutionError as exc: LOG.error('Cannot cleanup share, %s, that errored out ' 'during its creation, but exists in GlusterFS ' 'volume.', share_name) raise exception.GlusterfsException(exc) def delete_share(self, context, share, share_server=None): """Remove a sub-directory/share from the GlusterFS volume.""" local_share_path = self._get_local_share_path(share) try: privsep_os.recursive_forced_rm(local_share_path) except exception.ProcessExecutionError: LOG.exception('Unable to delete share %s', share['name']) raise def ensure_share(self, context, share, share_server=None): pass def create_share_from_snapshot(self, context, share, snapshot, share_server=None, parent_share=None): raise NotImplementedError def create_snapshot(self, context, snapshot, share_server=None): raise NotImplementedError def delete_snapshot(self, context, snapshot, share_server=None): raise NotImplementedError def manage_existing(self, share, driver_options): raise NotImplementedError def unmanage(self, share): raise NotImplementedError def extend_share(self, share, new_size, share_server=None): """Extend a sub-directory/share in the GlusterFS volume.""" self._set_directory_quota(share, new_size) def shrink_share(self, share, new_size, share_server=None): """Shrink a sub-directory/share in the GlusterFS volume.""" usage = self._get_directory_usage(share) consumed_limit = int(math.ceil(usage)) if consumed_limit > new_size: raise exception.ShareShrinkingPossibleDataLoss( share_id=share['id']) self._set_directory_quota(share, new_size) def _set_directory_quota(self, share, new_size): sizestr = str(new_size) + 'GB' share_dir = '/' + share['name'] args = ('volume', 'quota', self.gluster_manager.volume, 'limit-usage', share_dir, sizestr) try: self.gluster_manager.gluster_call(*args) except exception.GlusterfsException: LOG.error('Unable to set quota share %s', share['name']) raise def _get_directory_usage(self, share): share_dir = '/' + share['name'] args = ('--xml', 'volume', 'quota', self.gluster_manager.volume, 'list', share_dir) try: out, err = self.gluster_manager.gluster_call(*args) except exception.GlusterfsException: LOG.error('Unable to get quota share %s', share['name']) raise volxml = etree.fromstring(out) usage_byte = volxml.find('./volQuota/limit/used_space').text usage = utils.translate_string_size_to_float(usage_byte) return usage ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/glusterfs/layout_volume.py0000664000175000017500000006465200000000000024410 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """GlusterFS volume mapped share layout.""" import os import random import re import shutil import string import tempfile from defusedxml import ElementTree as etree from oslo_config import cfg from oslo_log import log from manila import exception from manila.i18n import _ from manila.privsep import os as privsep_os from manila.share.drivers.glusterfs import common from manila.share.drivers.glusterfs import layout from manila import utils LOG = log.getLogger(__name__) glusterfs_volume_mapped_opts = [ cfg.ListOpt('glusterfs_servers', default=[], help='List of GlusterFS servers that can be used to create ' 'shares. Each GlusterFS server should be of the form ' '[remoteuser@], and they are assumed to ' 'belong to distinct Gluster clusters.'), cfg.StrOpt('glusterfs_volume_pattern', help='Regular expression template used to filter ' 'GlusterFS volumes for share creation. ' 'The regex template can optionally (ie. with support ' 'of the GlusterFS backend) contain the #{size} ' 'parameter which matches an integer (sequence of ' 'digits) in which case the value shall be interpreted as ' 'size of the volume in GB. Examples: ' r'"manila-share-volume-\d+$", ' r'"manila-share-volume-#{size}G-\d+$"; ' 'with matching volume names, respectively: ' '"manila-share-volume-12", "manila-share-volume-3G-13". ' 'In latter example, the number that matches "#{size}", ' 'that is, 3, is an indication that the size of volume ' 'is 3G.'), ] CONF = cfg.CONF CONF.register_opts(glusterfs_volume_mapped_opts) # The dict specifying named parameters # that can be used with glusterfs_volume_pattern # in #{} format. # For each of them we give regex pattern it matches # and a transformer function ('trans') for the matched # string value. # Currently we handle only #{size}. PATTERN_DICT = {'size': {'pattern': r'(?P\d+)', 'trans': int}} USER_MANILA_SHARE = 'user.manila-share' USER_CLONED_FROM = 'user.manila-cloned-from' UUID_RE = re.compile(r'\A[\da-f]{8}-([\da-f]{4}-){3}[\da-f]{12}\Z', re.I) class GlusterfsVolumeMappedLayout(layout.GlusterfsShareLayoutBase): _snapshots_are_supported = True def __init__(self, driver, *args, **kwargs): super(GlusterfsVolumeMappedLayout, self).__init__( driver, *args, **kwargs) self.gluster_used_vols = set() self.configuration.append_config_values( common.glusterfs_common_opts) self.configuration.append_config_values( glusterfs_volume_mapped_opts) self.gluster_nosnap_vols_dict = {} self.volume_pattern = self._compile_volume_pattern() self.volume_pattern_keys = self.volume_pattern.groupindex.keys() for srvaddr in self.configuration.glusterfs_servers: # format check for srvaddr self._glustermanager(srvaddr, False) self.glusterfs_versions = {} self.private_storage = kwargs.get('private_storage') def _compile_volume_pattern(self): """Compile a RegexObject from the config specified regex template. (cfg.glusterfs_volume_pattern) """ subdict = {} for key, val in PATTERN_DICT.items(): subdict[key] = val['pattern'] # Using templates with placeholder syntax #{} class CustomTemplate(string.Template): delimiter = '#' volume_pattern = CustomTemplate( self.configuration.glusterfs_volume_pattern).substitute( subdict) return re.compile(volume_pattern) def do_setup(self, context): """Setup the GlusterFS volumes.""" glusterfs_versions, exceptions = {}, {} for srvaddr in self.configuration.glusterfs_servers: try: glusterfs_versions[srvaddr] = self._glustermanager( srvaddr, False).get_gluster_version() except exception.GlusterfsException as exc: exceptions[srvaddr] = str(exc) if exceptions: for srvaddr, excmsg in exceptions.items(): LOG.error("'gluster version' failed on server " "%(server)s with: %(message)s", {'server': srvaddr, 'message': excmsg}) raise exception.GlusterfsException(_( "'gluster version' failed on servers %s") % ( ','.join(exceptions.keys()))) notsupp_servers = [] for srvaddr, vers in glusterfs_versions.items(): if common.numreduct(vers) < self.driver.GLUSTERFS_VERSION_MIN: notsupp_servers.append(srvaddr) if notsupp_servers: gluster_version_min_str = '.'.join( str(c) for c in self.driver.GLUSTERFS_VERSION_MIN) for srvaddr in notsupp_servers: LOG.error("GlusterFS version %(version)s on server " "%(server)s is not supported, " "minimum requirement: %(minvers)s", {'server': srvaddr, 'version': '.'.join(glusterfs_versions[srvaddr]), 'minvers': gluster_version_min_str}) raise exception.GlusterfsException(_( "Unsupported GlusterFS version on servers %(servers)s, " "minimum requirement: %(minvers)s") % { 'servers': ','.join(notsupp_servers), 'minvers': gluster_version_min_str}) self.glusterfs_versions = glusterfs_versions gluster_volumes_initial = set( self._fetch_gluster_volumes(filter_used=False)) if not gluster_volumes_initial: # No suitable volumes are found on the Gluster end. # Raise exception. msg = (_("Gluster backend does not provide any volume " "matching pattern %s" ) % self.configuration.glusterfs_volume_pattern) LOG.error(msg) raise exception.GlusterfsException(msg) LOG.info("Found %d Gluster volumes allocated for Manila.", len(gluster_volumes_initial)) self._check_mount_glusterfs() def _glustermanager(self, gluster_address, req_volume=True): """Create GlusterManager object for gluster_address.""" return common.GlusterManager( gluster_address, self.driver._execute, self.configuration.glusterfs_path_to_private_key, self.configuration.glusterfs_server_password, requires={'volume': req_volume}) def _share_manager(self, share): """Return GlusterManager object representing share's backend.""" gluster_address = self.private_storage.get(share['id'], 'volume') if gluster_address is None: return return self._glustermanager(gluster_address) def _fetch_gluster_volumes(self, filter_used=True): """Do a 'gluster volume list | grep '. Aggregate the results from all servers. Extract the named groups from the matching volume names using the specs given in PATTERN_DICT. Return a dict with keys of the form :/ and values being dicts that map names of named groups to their extracted value. """ volumes_dict = {} for srvaddr in self.configuration.glusterfs_servers: gluster_mgr = self._glustermanager(srvaddr, False) if gluster_mgr.user: logmsg = ("Retrieving volume list " "on host %s") % gluster_mgr.host else: logmsg = ("Retrieving volume list") out, err = gluster_mgr.gluster_call('volume', 'list', log=logmsg) for volname in out.split("\n"): patmatch = self.volume_pattern.match(volname) if not patmatch: continue comp_vol = gluster_mgr.components.copy() comp_vol.update({'volume': volname}) gluster_mgr_vol = self._glustermanager(comp_vol) if filter_used: vshr = gluster_mgr_vol.get_vol_option( USER_MANILA_SHARE) or '' if UUID_RE.search(vshr): continue pattern_dict = {} for key in self.volume_pattern_keys: keymatch = patmatch.group(key) if keymatch is None: pattern_dict[key] = None else: trans = PATTERN_DICT[key].get('trans', lambda x: x) pattern_dict[key] = trans(keymatch) volumes_dict[gluster_mgr_vol.qualified] = pattern_dict return volumes_dict @utils.synchronized("glusterfs_native", external=False) def _pop_gluster_vol(self, size=None): """Pick an unbound volume. Do a _fetch_gluster_volumes() first to get the complete list of usable volumes. Keep only the unbound ones (ones that are not yet used to back a share). If size is given, try to pick one which has a size specification (according to the 'size' named group of the volume pattern), and its size is greater-than-or-equal to the given size. Return the volume chosen (in :/ format). """ voldict = self._fetch_gluster_volumes() # calculate the set of unused volumes unused_vols = set(voldict) - self.gluster_used_vols if not unused_vols: # No volumes available for use as share. Warn user. LOG.warning("No unused gluster volumes available for use as " "share! Create share won't be supported unless " "existing shares are deleted or some gluster " "volumes are created with names matching " "'glusterfs_volume_pattern'.") else: LOG.info("Number of gluster volumes in use: " "%(inuse-numvols)s. Number of gluster volumes " "available for use as share: %(unused-numvols)s", {'inuse-numvols': len(self.gluster_used_vols), 'unused-numvols': len(unused_vols)}) # volmap is the data structure used to categorize and sort # the unused volumes. It's a nested dictionary of structure # {: } # where is either an integer or None, # is a dictionary of structure {: } # where is a host name (IP address), is a list # of volumes (gluster addresses). volmap = {None: {}} # if both caller has specified size and 'size' occurs as # a parameter in the volume pattern... if size and 'size' in self.volume_pattern_keys: # then this function is used to extract the # size value for a given volume from the voldict... get_volsize = lambda vol: voldict[vol]['size'] # noqa: E731 else: # else just use a stub. get_volsize = lambda vol: None # noqa: E731 for vol in unused_vols: # For each unused volume, we extract the # and values with which it can be inserted # into the volmap, and conditionally perform # the insertion (with the condition being: once # caller specified size and a size indication was # found in the volume name, we require that the # indicated size adheres to caller's spec). volsize = get_volsize(vol) if not volsize or volsize >= size: hostmap = volmap.get(volsize) if not hostmap: hostmap = {} volmap[volsize] = hostmap host = self._glustermanager(vol).host hostvols = hostmap.get(host) if not hostvols: hostvols = [] hostmap[host] = hostvols hostvols.append(vol) if len(volmap) > 1: # volmap has keys apart from the default None, # ie. volumes with sensible and adherent size # indication have been found. Then pick the smallest # of the size values. chosen_size = sorted(n for n in volmap.keys() if n)[0] else: chosen_size = None chosen_hostmap = volmap[chosen_size] if not chosen_hostmap: msg = (_("Couldn't find a free gluster volume to use.")) LOG.error(msg) raise exception.GlusterfsException(msg) # From the hosts we choose randomly to tend towards # even distribution of share backing volumes among # Gluster clusters. chosen_host = random.choice(list(chosen_hostmap.keys())) # Within a host's volumes, choose alphabetically first, # to make it predictable. vol = sorted(chosen_hostmap[chosen_host])[0] self.gluster_used_vols.add(vol) return vol @utils.synchronized("glusterfs_native", external=False) def _push_gluster_vol(self, exp_locn): try: self.gluster_used_vols.remove(exp_locn) except KeyError: msg = (_("Couldn't find the share in used list.")) LOG.error(msg) raise exception.GlusterfsException(msg) def _wipe_gluster_vol(self, gluster_mgr): # Create a temporary mount. gluster_export = gluster_mgr.export tmpdir = tempfile.mkdtemp() try: common._mount_gluster_vol(self.driver._execute, gluster_export, tmpdir) except exception.GlusterfsException: shutil.rmtree(tmpdir, ignore_errors=True) raise # Delete the contents of a GlusterFS volume that is temporarily # mounted. # From GlusterFS version 3.7, two directories, '.trashcan' at the root # of the GlusterFS volume and 'internal_op' within the '.trashcan' # directory, are internally created when a GlusterFS volume is started. # GlusterFS does not allow unlink(2) of the two directories. So do not # delete the paths of the two directories, but delete their contents # along with the rest of the contents of the volume. srvaddr = gluster_mgr.host_access ignored_dirs = [] if common.numreduct(self.glusterfs_versions[srvaddr]) > (3, 6): ignored_dirs = map(lambda x: os.path.join(tmpdir, *x), [('.trashcan', ), ('.trashcan', 'internal_op')]) ignored_dirs = list(ignored_dirs) ignored_dirs = [ignored_dirs[0], ignored_dirs[1]] try: privsep_os.find( tmpdir, dirs_to_ignore=ignored_dirs, delete=True) except exception.ProcessExecutionError as exc: msg = (_("Error trying to wipe gluster volume. " "gluster_export: %(export)s, Error: %(error)s") % {'export': gluster_export, 'error': exc.stderr}) LOG.error(msg) raise exception.GlusterfsException(msg) finally: # Unmount. common._umount_gluster_vol(tmpdir) shutil.rmtree(tmpdir, ignore_errors=True) def create_share(self, context, share, share_server=None): """Create a share using GlusterFS volume. 1 Manila share = 1 GlusterFS volume. Pick an unused GlusterFS volume for use as a share. """ try: vol = self._pop_gluster_vol(share['size']) except exception.GlusterfsException: msg = ("Error creating share %(share_id)s", {'share_id': share['id']}) LOG.error(msg) raise gmgr = self._glustermanager(vol) export = self.driver._setup_via_manager( {'share': share, 'manager': gmgr}) gmgr.set_vol_option(USER_MANILA_SHARE, share['id']) self.private_storage.update(share['id'], {'volume': vol}) # TODO(deepakcs): Enable quota and set it to the share size. # For native protocol, the export_location should be of the form: # server:/volname LOG.info("export_location sent back from create_share: %s", export) return export def delete_share(self, context, share, share_server=None): """Delete a share on the GlusterFS volume. 1 Manila share = 1 GlusterFS volume. Put the gluster volume back in the available list. """ gmgr = self._share_manager(share) if not gmgr: # Share does not have a record in private storage. # It means create_share{,_from_snapshot} did not # succeed(*). In that case we should not obstruct # share deletion, so we just return doing nothing. # # (*) or we have a database corruption but then # basically does not matter what we do here return clone_of = gmgr.get_vol_option(USER_CLONED_FROM) or '' try: if UUID_RE.search(clone_of): # We take responsibility for the lifecycle # management of those volumes which were # created by us (as snapshot clones) ... gmgr.gluster_call('volume', 'delete', gmgr.volume) else: # ... for volumes that come from the pool, we return # them to the pool (after some purification rituals) self._wipe_gluster_vol(gmgr) gmgr.set_vol_option(USER_MANILA_SHARE, 'NONE') gmgr.set_vol_option('nfs.disable', 'on') # When deleting the share instance, we may need to # update'self.gluster_used_vols' again self.gluster_used_vols.add(gmgr.qualified) self._push_gluster_vol(gmgr.qualified) except exception.GlusterfsException: msg = ("Error during delete_share request for " "share %(share_id)s", {'share_id': share['id']}) LOG.error(msg) raise self.private_storage.delete(share['id']) # TODO(deepakcs): Disable quota. @staticmethod def _find_actual_backend_snapshot_name(gluster_mgr, snapshot): args = ('snapshot', 'list', gluster_mgr.volume, '--mode=script') out, err = gluster_mgr.gluster_call( *args, log=("Retrieving snapshot list")) snapgrep = list(filter(lambda x: snapshot['id'] in x, out.split("\n"))) if len(snapgrep) != 1: msg = (_("Failed to identify backing GlusterFS object " "for snapshot %(snap_id)s of share %(share_id)s: " "a single candidate was expected, %(found)d was found.") % {'snap_id': snapshot['id'], 'share_id': snapshot['share_id'], 'found': len(snapgrep)}) raise exception.GlusterfsException(msg) backend_snapshot_name = snapgrep[0] return backend_snapshot_name def create_share_from_snapshot(self, context, share, snapshot, share_server=None, parent_share=None): old_gmgr = self._share_manager(snapshot['share_instance']) # Snapshot clone feature in GlusterFS server essential to support this # API is available in GlusterFS server versions 3.7 and higher. So do # a version check. vers = self.glusterfs_versions[old_gmgr.host_access] minvers = (3, 7) if common.numreduct(vers) < minvers: minvers_str = '.'.join(str(c) for c in minvers) vers_str = '.'.join(vers) msg = (_("GlusterFS version %(version)s on server %(server)s does " "not support creation of shares from snapshot. " "minimum requirement: %(minversion)s") % {'version': vers_str, 'server': old_gmgr.host, 'minversion': minvers_str}) LOG.error(msg) raise exception.GlusterfsException(msg) # Clone the snapshot. The snapshot clone, a new GlusterFS volume # would serve as a share. backend_snapshot_name = self._find_actual_backend_snapshot_name( old_gmgr, snapshot) volume = ''.join(['manila-', share['id']]) # Query the status of the snapshot, if it is Started, the activate # step will be skipped args = ('snapshot', 'info', backend_snapshot_name) out, err = old_gmgr.gluster_call( *args, log=("Query the status of the snapshot")) gfs_snapshot_state = "" for gfs_snapshot_info in out.split('\t'): gfs_snapshot_states = re.search(r'Started', gfs_snapshot_info, re.I) if gfs_snapshot_states: gfs_snapshot_state = "Started" if gfs_snapshot_state == "Started": args_tuple = (('snapshot', 'clone', volume, backend_snapshot_name), ('volume', 'start', volume)) else: args_tuple = (('snapshot', 'activate', backend_snapshot_name, 'force', '--mode=script'), ('snapshot', 'clone', volume, backend_snapshot_name), ('volume', 'start', volume)) for args in args_tuple: out, err = old_gmgr.gluster_call( *args, log=("Creating share from snapshot")) # Get a manager for the new volume/share. comp_vol = old_gmgr.components.copy() comp_vol.update({'volume': volume}) gmgr = self._glustermanager(comp_vol) export = self.driver._setup_via_manager( {'share': share, 'manager': gmgr}, {'share': snapshot['share_instance'], 'manager': old_gmgr}) export = [export, ] argseq = (('set', [USER_CLONED_FROM, snapshot['share_id']]), ('set', [USER_MANILA_SHARE, share['id']])) for op, opargs in argseq: args = ['volume', op, gmgr.volume] + opargs gmgr.gluster_call(*args, log=("Creating share from snapshot")) self.gluster_used_vols.add(gmgr.qualified) self.private_storage.update(share['id'], {'volume': gmgr.qualified}) return export def create_snapshot(self, context, snapshot, share_server=None): """Creates a snapshot.""" gluster_mgr = self._share_manager(snapshot['share']) if gluster_mgr.qualified in self.gluster_nosnap_vols_dict: opret, operrno = -1, 0 operrstr = self.gluster_nosnap_vols_dict[gluster_mgr.qualified] else: args = ('--xml', 'snapshot', 'create', 'manila-' + snapshot['id'], gluster_mgr.volume) out, err = gluster_mgr.gluster_call( *args, log=("Retrieving volume info")) if not out: raise exception.GlusterfsException( 'gluster volume info %s: no data received' % gluster_mgr.volume ) outxml = etree.fromstring(out) opret = int(common.volxml_get(outxml, 'opRet')) operrno = int(common.volxml_get(outxml, 'opErrno')) operrstr = common.volxml_get(outxml, 'opErrstr', default=None) if opret == -1: vers = self.glusterfs_versions[gluster_mgr.host_access] if common.numreduct(vers) > (3, 6): # This logic has not yet been implemented in GlusterFS 3.6 if operrno == 0: self.gluster_nosnap_vols_dict[ gluster_mgr.qualified] = operrstr msg = _("Share %(share_id)s does not support snapshots: " "%(errstr)s.") % {'share_id': snapshot['share_id'], 'errstr': operrstr} LOG.error(msg) raise exception.ShareSnapshotNotSupported(msg) raise exception.GlusterfsException( _("Creating snapshot for share %(share_id)s failed " "with %(errno)d: %(errstr)s") % { 'share_id': snapshot['share_id'], 'errno': operrno, 'errstr': operrstr}) def delete_snapshot(self, context, snapshot, share_server=None): """Deletes a snapshot.""" gluster_mgr = self._share_manager(snapshot['share']) backend_snapshot_name = self._find_actual_backend_snapshot_name( gluster_mgr, snapshot) args = ('--xml', 'snapshot', 'delete', backend_snapshot_name, '--mode=script') out, err = gluster_mgr.gluster_call( *args, log=("Error deleting snapshot")) if not out: raise exception.GlusterfsException( _('gluster snapshot delete %s: no data received') % gluster_mgr.volume ) outxml = etree.fromstring(out) gluster_mgr.xml_response_check(outxml, args[1:]) def ensure_share(self, context, share, share_server=None): """Invoked to ensure that share is exported.""" gmgr = self._share_manager(share) self.gluster_used_vols.add(gmgr.qualified) gmgr.set_vol_option(USER_MANILA_SHARE, share['id']) # Debt... def manage_existing(self, share, driver_options): raise NotImplementedError() def unmanage(self, share): raise NotImplementedError() def extend_share(self, share, new_size, share_server=None): raise NotImplementedError() def shrink_share(self, share, new_size, share_server=None): raise NotImplementedError() ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315601.9136717 manila-21.0.0/manila/share/drivers/hdfs/0000775000175000017500000000000000000000000020023 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/hdfs/__init__.py0000664000175000017500000000000000000000000022122 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/hdfs/hdfs_native.py0000664000175000017500000004155500000000000022701 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Intel, Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """HDFS native protocol (hdfs) driver for manila shares. Manila share is a directory in HDFS. And this share does not use service VM instance (share server). The instance directly talks to the HDFS cluster. The initial version only supports single namenode and flat network. Configuration Requirements: To enable access control, HDFS file system must have ACLs enabled. """ import math import os import shlex import socket from oslo_concurrency import processutils from oslo_config import cfg from oslo_log import log from oslo_utils import units from manila import exception from manila.i18n import _ from manila.share import driver from manila import ssh_utils from manila import utils LOG = log.getLogger(__name__) hdfs_native_share_opts = [ cfg.HostAddressOpt('hdfs_namenode_ip', help='The IP of the HDFS namenode.'), cfg.PortOpt('hdfs_namenode_port', default=9000, help='The port of HDFS namenode service.'), cfg.PortOpt('hdfs_ssh_port', default=22, help='HDFS namenode SSH port.'), cfg.StrOpt('hdfs_ssh_name', help='HDFS namenode ssh login name.'), cfg.StrOpt('hdfs_ssh_pw', secret=True, help='HDFS namenode SSH login password, ' 'This parameter is not necessary, if ' '\'hdfs_ssh_private_key\' is configured.'), cfg.StrOpt('hdfs_ssh_private_key', help='Path to HDFS namenode SSH private ' 'key for login.'), ] CONF = cfg.CONF CONF.register_opts(hdfs_native_share_opts) class HDFSNativeShareDriver(driver.ExecuteMixin, driver.ShareDriver): """HDFS Share Driver. Executes commands relating to shares. API version history: 1.0 - Initial Version """ def __init__(self, *args, **kwargs): super(HDFSNativeShareDriver, self).__init__(False, *args, **kwargs) self.configuration.append_config_values(hdfs_native_share_opts) self.backend_name = self.configuration.safe_get( 'share_backend_name') or 'HDFS-Native' self.ssh_connections = {} self._hdfs_execute = None self._hdfs_bin = None self._hdfs_base_path = None def do_setup(self, context): """Do initialization while the share driver starts.""" super(HDFSNativeShareDriver, self).do_setup(context) host = self.configuration.hdfs_namenode_ip local_hosts = socket.gethostbyname_ex(socket.gethostname())[2] if host in local_hosts: self._hdfs_execute = self._hdfs_local_execute else: self._hdfs_execute = self._hdfs_remote_execute self._hdfs_bin = 'hdfs' self._hdfs_base_path = ( 'hdfs://' + self.configuration.hdfs_namenode_ip + ':' + str(self.configuration.hdfs_namenode_port)) def _hdfs_local_execute(self, *cmd, **kwargs): if 'run_as_root' not in kwargs: kwargs.update({'run_as_root': False}) return utils.execute(*cmd, **kwargs) def _hdfs_remote_execute(self, *cmd, **kwargs): host = self.configuration.hdfs_namenode_ip check_exit_code = kwargs.pop('check_exit_code', False) return self._run_ssh(host, cmd, check_exit_code) def _run_ssh(self, host, cmd_list, check_exit_code=False): command = ' '.join(shlex.quote(cmd_arg) for cmd_arg in cmd_list) connection = self.ssh_connections.get(host) if not connection: hdfs_ssh_name = self.configuration.hdfs_ssh_name password = self.configuration.hdfs_ssh_pw privatekey = self.configuration.hdfs_ssh_private_key hdfs_ssh_port = self.configuration.hdfs_ssh_port ssh_conn_timeout = self.configuration.ssh_conn_timeout min_size = self.configuration.ssh_min_pool_conn max_size = self.configuration.ssh_max_pool_conn ssh_pool = ssh_utils.SSHPool(host, hdfs_ssh_port, ssh_conn_timeout, hdfs_ssh_name, password=password, privatekey=privatekey, min_size=min_size, max_size=max_size) ssh = ssh_pool.create() self.ssh_connections[host] = (ssh_pool, ssh) else: ssh_pool, ssh = connection if not ssh.get_transport().is_active(): ssh_pool.remove(ssh) ssh = ssh_pool.create() self.ssh_connections[host] = (ssh_pool, ssh) try: return processutils.ssh_execute( ssh, command, check_exit_code=check_exit_code) except Exception as e: msg = (_('Error running SSH command: %(cmd)s. ' 'Error: %(excmsg)s.') % {'cmd': command, 'excmsg': str(e)}) LOG.error(msg) raise exception.HDFSException(msg) def _set_share_size(self, share, size=None): share_dir = '/' + share['name'] if not size: sizestr = str(share['size']) + 'g' else: sizestr = str(size) + 'g' try: self._hdfs_execute(self._hdfs_bin, 'dfsadmin', '-setSpaceQuota', sizestr, share_dir) except exception.ProcessExecutionError as e: msg = (_('Failed to set space quota for the ' 'share %(sharename)s. Error: %(excmsg)s.') % {'sharename': share['name'], 'excmsg': str(e)}) LOG.error(msg) raise exception.HDFSException(msg) def _create_share(self, share): """Creates a share.""" if share['share_proto'].lower() != 'hdfs': msg = _('Only HDFS protocol supported!') LOG.error(msg) raise exception.HDFSException(msg) share_dir = '/' + share['name'] try: self._hdfs_execute(self._hdfs_bin, 'dfs', '-mkdir', share_dir) except exception.ProcessExecutionError as e: msg = (_('Failed to create directory in hdfs for the ' 'share %(sharename)s. Error: %(excmsg)s.') % {'sharename': share['name'], 'excmsg': str(e)}) LOG.error(msg) raise exception.HDFSException(msg) # set share size self._set_share_size(share) try: self._hdfs_execute(self._hdfs_bin, 'dfsadmin', '-allowSnapshot', share_dir) except exception.ProcessExecutionError as e: msg = (_('Failed to allow snapshot for the ' 'share %(sharename)s. Error: %(excmsg)s.') % {'sharename': share['name'], 'excmsg': str(e)}) LOG.error(msg) raise exception.HDFSException(msg) def _get_share_path(self, share): """Return share path on storage provider.""" return os.path.join(self._hdfs_base_path, share['name']) def _get_snapshot_path(self, snapshot): """Return snapshot path on storage provider.""" snapshot_dir = '.snapshot' return os.path.join('/', snapshot['share_name'], snapshot_dir, snapshot['name']) def get_network_allocations_number(self): return 0 def create_share(self, context, share, share_server=None): """Create a HDFS directory which acted as a share.""" self._create_share(share) return self._get_share_path(share) def create_share_from_snapshot(self, context, share, snapshot, share_server=None, parent_share=None): """Creates a snapshot.""" self._create_share(share) share_path = '/' + share['name'] snapshot_path = self._get_snapshot_path(snapshot) try: # check if the directory is empty (out, __) = self._hdfs_execute( self._hdfs_bin, 'dfs', '-ls', snapshot_path) # only copy files when the snapshot directory is not empty if out: copy_path = snapshot_path + "/*" cmd = [self._hdfs_bin, 'dfs', '-cp', copy_path, share_path] self._hdfs_execute(*cmd) except exception.ProcessExecutionError as e: msg = (_('Failed to create share %(sharename)s from ' 'snapshot %(snapshotname)s. Error: %(excmsg)s.') % {'sharename': share['name'], 'snapshotname': snapshot['name'], 'excmsg': str(e)}) LOG.error(msg) raise exception.HDFSException(msg) return self._get_share_path(share) def create_snapshot(self, context, snapshot, share_server=None): """Creates a snapshot.""" share_dir = '/' + snapshot['share_name'] snapshot_name = snapshot['name'] cmd = [self._hdfs_bin, 'dfs', '-createSnapshot', share_dir, snapshot_name] try: self._hdfs_execute(*cmd) except exception.ProcessExecutionError as e: msg = (_('Failed to create snapshot %(snapshotname)s for ' 'the share %(sharename)s. Error: %(excmsg)s.') % {'snapshotname': snapshot_name, 'sharename': snapshot['share_name'], 'excmsg': str(e)}) LOG.error(msg) raise exception.HDFSException(msg) def delete_share(self, context, share, share_server=None): """Deletes share storage.""" share_dir = '/' + share['name'] cmd = [self._hdfs_bin, 'dfs', '-rm', '-r', share_dir] try: self._hdfs_execute(*cmd) except exception.ProcessExecutionError as e: msg = (_('Failed to delete share %(sharename)s. ' 'Error: %(excmsg)s.') % {'sharename': share['name'], 'excmsg': str(e)}) LOG.error(msg) raise exception.HDFSException(msg) def delete_snapshot(self, context, snapshot, share_server=None): """Deletes a snapshot.""" share_dir = '/' + snapshot['share_name'] cmd = [self._hdfs_bin, 'dfs', '-deleteSnapshot', share_dir, snapshot['name']] try: self._hdfs_execute(*cmd) except exception.ProcessExecutionError as e: msg = (_('Failed to delete snapshot %(snapshotname)s. ' 'Error: %(excmsg)s.') % {'snapshotname': snapshot['name'], 'excmsg': str(e)}) LOG.error(msg) raise exception.HDFSException(msg) def ensure_share(self, context, share, share_server=None): """Ensure the storage are exported.""" def allow_access(self, context, share, access, share_server=None): """Allows access to the share for a given user.""" if access['access_type'] != 'user': msg = _("Only 'user' access type allowed!") LOG.error(msg) raise exception.InvalidShareAccess(msg) # Note(jun): For directories in HDFS, the x permission is # required to access a child of the directory. if access['access_level'] == 'rw': access_level = 'rwx' elif access['access_level'] == 'ro': access_level = 'r-x' else: msg = (_('The access level %(accesslevel)s was unsupported.') % {'accesslevel': access['access_level']}) LOG.error(msg) raise exception.InvalidShareAccess(msg) share_dir = '/' + share['name'] user_access = ':'.join([access['access_type'], access['access_to'], access_level]) cmd = [self._hdfs_bin, 'dfs', '-setfacl', '-m', '-R', user_access, share_dir] try: (__, out) = self._hdfs_execute(*cmd, check_exit_code=True) except exception.ProcessExecutionError as e: msg = (_('Failed to set ACL of share %(sharename)s for ' 'user: %(username)s' 'Error: %(excmsg)s.') % {'sharename': share['name'], 'username': access['access_to'], 'excmsg': str(e)}) LOG.error(msg) raise exception.HDFSException(msg) def deny_access(self, context, share, access, share_server=None): """Denies the access to the share for a given user.""" share_dir = '/' + share['name'] access_name = ':'.join([access['access_type'], access['access_to']]) cmd = [self._hdfs_bin, 'dfs', '-setfacl', '-x', '-R', access_name, share_dir] try: (__, out) = self._hdfs_execute(*cmd, check_exit_code=True) except exception.ProcessExecutionError as e: msg = (_('Failed to deny ACL of share %(sharename)s for ' 'user: %(username)s' 'Error: %(excmsg)s.') % {'sharename': share['name'], 'username': access['access_to'], 'excmsg': str(e)}) LOG.error(msg) raise exception.HDFSException(msg) def extend_share(self, share, new_size, share_server=None): """Extend share storage.""" self._set_share_size(share, new_size) def _check_hdfs_state(self): try: (out, __) = self._hdfs_execute(self._hdfs_bin, 'fsck', '/') except exception.ProcessExecutionError as e: msg = (_('Failed to check hdfs state. Error: %(excmsg)s.') % {'excmsg': str(e)}) LOG.error(msg) raise exception.HDFSException(msg) if 'HEALTHY' in out: return True else: return False def check_for_setup_error(self): """Return an error if the prerequisites are met.""" if not self.configuration.hdfs_namenode_ip: msg = _('Not specify the hdfs cluster yet! ' 'Add the ip of hdfs namenode in the ' 'hdfs_namenode_ip configuration parameter.') LOG.error(msg) raise exception.HDFSException(msg) if not self._check_hdfs_state(): msg = _('HDFS is not in healthy state.') LOG.error(msg) raise exception.HDFSException(msg) def _get_available_capacity(self): """Calculate available space on path.""" try: (out, __) = self._hdfs_execute(self._hdfs_bin, 'dfsadmin', '-report') except exception.ProcessExecutionError as e: msg = (_('Failed to check available capacity for hdfs.' 'Error: %(excmsg)s.') % {'excmsg': str(e)}) LOG.error(msg) raise exception.HDFSException(msg) lines = out.splitlines() try: total = int(lines[1].split()[2]) free = int(lines[2].split()[2]) except (IndexError, ValueError) as e: msg = (_('Failed to get hdfs capacity info. ' 'Error: %(excmsg)s.') % {'excmsg': str(e)}) LOG.error(msg) raise exception.HDFSException(msg) return total, free def _update_share_stats(self): """Retrieves stats info of share directories group.""" data = dict(share_backend_name=self.backend_name, storage_protocol='HDFS', reserved_percentage=self.configuration. reserved_share_percentage, reserved_snapshot_percentage=self.configuration. reserved_share_from_snapshot_percentage or self.configuration.reserved_share_percentage, reserved_share_extend_percentage=self.configuration. reserved_share_extend_percentage or self.configuration.reserved_share_percentage) total, free = self._get_available_capacity() data['total_capacity_gb'] = math.ceil(total / units.Gi) data['free_capacity_gb'] = math.ceil(free / units.Gi) super(HDFSNativeShareDriver, self)._update_share_stats(data) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/helpers.py0000664000175000017500000006174200000000000021125 0ustar00zuulzuul00000000000000# Copyright 2015 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import ipaddress import os import re from oslo_log import log from manila.common import constants as const from manila import exception from manila.i18n import _ from manila import utils LOG = log.getLogger(__name__) class NASHelperBase(object): """Interface to work with share.""" def __init__(self, execute, ssh_execute, config_object): self.configuration = config_object self._execute = execute self._ssh_exec = ssh_execute def init_helper(self, server): pass def create_exports(self, server, share_name, recreate=False): """Create new exports, delete old ones if exist.""" raise NotImplementedError() def remove_exports(self, server, share_name): """Remove exports.""" raise NotImplementedError() def configure_access(self, server, share_name): """Configure server before allowing access.""" pass def update_access(self, server, share_name, access_rules, add_rules, delete_rules): """Update access rules for given share. This driver has two different behaviors according to parameters: 1. Recovery after error - 'access_rules' contains all access_rules, 'add_rules' and 'delete_rules' shall be empty. Previously existing access rules are cleared and then added back according to 'access_rules'. 2. Adding/Deleting of several access rules - 'access_rules' contains all access_rules, 'add_rules' and 'delete_rules' contain rules which should be added/deleted. Rules in 'access_rules' are ignored and only rules from 'add_rules' and 'delete_rules' are applied. :param server: None or Share server's backend details :param share_name: Share's path according to id. :param access_rules: All access rules for given share :param add_rules: Empty List or List of access rules which should be added. access_rules already contains these rules. :param delete_rules: Empty List or List of access rules which should be removed. access_rules doesn't contain these rules. """ raise NotImplementedError() @staticmethod def _verify_server_has_public_address(server): if 'public_address' in server: pass elif 'public_addresses' in server: if not isinstance(server['public_addresses'], list): raise exception.ManilaException(_("public_addresses must be " "a list")) else: raise exception.ManilaException( _("Can not get public_address(es) for generation of export.")) def _get_export_location_template(self, export_location_or_path): """Returns template of export location. Example for NFS: %s:/path/to/share Example for CIFS: \\\\%s\\cifs_share_name """ raise NotImplementedError() def get_exports_for_share(self, server, export_location_or_path): """Returns list of exports based on server info.""" self._verify_server_has_public_address(server) export_location_template = self._get_export_location_template( export_location_or_path) export_locations = [] if 'public_addresses' in server: pairs = list(map(lambda addr: (addr, False), server['public_addresses'])) else: pairs = [(server['public_address'], False)] # NOTE(vponomaryov): # Generic driver case: 'admin_ip' exists only in case of DHSS=True # mode and 'ip' exists in case of DHSS=False mode. # Use one of these for creation of export location for service needs. service_address = server.get("admin_ip", server.get("ip")) if service_address: pairs.append((service_address, True)) for ip, is_admin in pairs: export_locations.append({ "path": export_location_template % ip, "is_admin_only": is_admin, "metadata": { # TODO(vponomaryov): remove this fake metadata when # proper appears. "export_location_metadata_example": "example", }, }) return export_locations def get_share_path_by_export_location(self, server, export_location): """Returns share path by its export location.""" raise NotImplementedError() def disable_access_for_maintenance(self, server, share_name): """Disables access to share to perform maintenance operations.""" def restore_access_after_maintenance(self, server, share_name): """Enables access to share after maintenance operations were done.""" @staticmethod def validate_access_rules(access_rules, allowed_types, allowed_levels): """Validates access rules according to access_type and access_level. :param access_rules: List of access rules to be validated. :param allowed_types: tuple of allowed type values. :param allowed_levels: tuple of allowed level values. """ for access in (access_rules or []): access_type = access['access_type'] access_level = access['access_level'] if access_type not in allowed_types: reason = _("Only %s access type allowed.") % ( ', '.join(tuple(["'%s'" % x for x in allowed_types]))) raise exception.InvalidShareAccess(reason=reason) if access_level not in allowed_levels: raise exception.InvalidShareAccessLevel(level=access_level) def _get_maintenance_file_path(self, share_name): return os.path.join(self.configuration.share_mount_path, "%s.maintenance" % share_name) def nfs_synchronized(f): def wrapped_func(self, *args, **kwargs): key = "nfs-%s" % args[0].get("lock_name", args[0]["instance_id"]) # NOTE(vponomaryov): 'external' lock is required for DHSS=False # mode of LVM and Generic drivers, that may have lots of # driver instances on single host. @utils.synchronized(key, external=True) def source_func(self, *args, **kwargs): return f(self, *args, **kwargs) return source_func(self, *args, **kwargs) return wrapped_func def escaped_address(address): addr = ipaddress.ip_address(str(address)) if addr.version == 4: return str(addr) else: return '[%s]' % addr class NFSHelper(NASHelperBase): """Interface to work with share.""" def create_exports(self, server, share_name, recreate=False): path = os.path.join(self.configuration.share_mount_path, share_name) server_copy = copy.copy(server) public_addresses = [] if 'public_addresses' in server_copy: for address in server_copy['public_addresses']: public_addresses.append( escaped_address(address)) server_copy['public_addresses'] = public_addresses for t in ['public_address', 'admin_ip', 'ip']: address = server_copy.get(t) if address is not None: server_copy[t] = escaped_address(address) return self.get_exports_for_share(server_copy, path) def init_helper(self, server): try: self._ssh_exec(server, ['sudo', 'exportfs']) except exception.ProcessExecutionError as e: if 'command not found' in e.stderr: raise exception.ManilaException( _('NFS server is not installed on %s') % server['instance_id']) LOG.error(e.stderr) def remove_exports(self, server, share_name): """Remove exports.""" @nfs_synchronized def update_access(self, server, share_name, access_rules, add_rules, delete_rules): """Update access rules for given share. Please refer to base class for a more in-depth description. """ local_path = os.path.join(self.configuration.share_mount_path, share_name) out, err = self._ssh_exec(server, ['sudo', 'exportfs']) # Recovery mode if not (add_rules or delete_rules): self.validate_access_rules( access_rules, ('ip',), (const.ACCESS_LEVEL_RO, const.ACCESS_LEVEL_RW)) hosts = self.get_host_list(out, local_path) for host in hosts: parsed_host = self._get_parsed_address_or_cidr(host) self._ssh_exec(server, ['sudo', 'exportfs', '-u', ':'.join((parsed_host, local_path))]) self._sync_nfs_temp_and_perm_files(server) for access in access_rules: rules_options = '%s,no_subtree_check,no_root_squash' access_to = self._get_parsed_address_or_cidr( access['access_to']) self._ssh_exec( server, ['sudo', 'exportfs', '-o', rules_options % access['access_level'], ':'.join((access_to, local_path))]) self._sync_nfs_temp_and_perm_files(server) # Adding/Deleting specific rules else: self.validate_access_rules( add_rules, ('ip',), (const.ACCESS_LEVEL_RO, const.ACCESS_LEVEL_RW)) for access in delete_rules: try: self.validate_access_rules( [access], ('ip',), (const.ACCESS_LEVEL_RO, const.ACCESS_LEVEL_RW)) except (exception.InvalidShareAccess, exception.InvalidShareAccessLevel): LOG.warning( "Unsupported access level %(level)s or access type " "%(type)s, skipping removal of access rule to " "%(to)s.", {'level': access['access_level'], 'type': access['access_type'], 'to': access['access_to']}) continue access_to = self._get_parsed_address_or_cidr( access['access_to']) try: self._ssh_exec(server, ['sudo', 'exportfs', '-u', ':'.join((access_to, local_path))]) except exception.ProcessExecutionError as e: if "could not find" in e.stderr.lower(): LOG.debug( "Client/s with IP address/es %(host)s did not " "have access to %(share)s. Nothing to deny.", {'host': access_to, 'share': share_name}) else: raise if delete_rules: self._sync_nfs_temp_and_perm_files(server) for access in add_rules: access_to = self._get_parsed_address_or_cidr( access['access_to']) found_item = re.search( re.escape(local_path) + r'[\s\n]*' + re.escape(access_to), out) if found_item is not None: LOG.warning("Access rule %(type)s:%(to)s already " "exists for share %(name)s", { 'to': access['access_to'], 'type': access['access_type'], 'name': share_name }) else: rules_options = '%s,no_subtree_check,no_root_squash' self._ssh_exec( server, ['sudo', 'exportfs', '-o', rules_options % access['access_level'], ':'.join((access_to, local_path))]) if add_rules: self._sync_nfs_temp_and_perm_files(server) @staticmethod def _get_parsed_address_or_cidr(access_to): network = ipaddress.ip_network(str(access_to)) mask_length = network.prefixlen address = str(network.network_address) if mask_length == 0: # Special case because Linux exports don't support /0 netmasks return '*' if network.version == 4: if mask_length == 32: return address return '%s/%s' % (address, mask_length) if mask_length == 128: return "[%s]" % address return "[%s]/%s" % (address, mask_length) @staticmethod def get_host_list(output, local_path): entries = [] output = output.replace('\n\t\t', ' ') lines = output.split('\n') for line in lines: items = line.split(' ') if local_path == items[0]: entries.append(items[1]) # exportfs may print"" instead of "*" for host entries = ["*" if item == "" else item for item in entries] return entries def _sync_nfs_temp_and_perm_files(self, server): """Sync changes of exports with permanent NFS config file. This is required to ensure, that after share server reboot, exports still exist. """ sync_cmd = [ 'sudo', 'cp', const.NFS_EXPORTS_FILE_TEMP, const.NFS_EXPORTS_FILE ] self._ssh_exec(server, sync_cmd) self._ssh_exec(server, ['sudo', 'exportfs', '-a']) out, _ = self._ssh_exec( server, ['sudo', 'systemctl', 'is-active', 'nfs-kernel-server'], check_exit_code=False) if "inactive" in out: self._ssh_exec( server, ['sudo', 'systemctl', 'restart', 'nfs-kernel-server']) def _get_export_location_template(self, export_location_or_path): path = export_location_or_path.split(':')[-1] return '%s:' + path def get_share_path_by_export_location(self, server, export_location): return export_location.split(':')[-1] @nfs_synchronized def disable_access_for_maintenance(self, server, share_name): maintenance_file = self._get_maintenance_file_path(share_name) backup_exports = [ 'cat', const.NFS_EXPORTS_FILE, '|', 'grep', share_name, '|', 'sudo', 'tee', maintenance_file ] self._ssh_exec(server, backup_exports) local_path = os.path.join(self.configuration.share_mount_path, share_name) out, err = self._ssh_exec(server, ['sudo', 'exportfs']) hosts = self.get_host_list(out, local_path) for host in hosts: self._ssh_exec(server, ['sudo', 'exportfs', '-u', '"{}"'.format(':'.join((host, local_path)))]) self._sync_nfs_temp_and_perm_files(server) @nfs_synchronized def restore_access_after_maintenance(self, server, share_name): maintenance_file = self._get_maintenance_file_path(share_name) restore_exports = [ 'cat', maintenance_file, '|', 'sudo', 'tee', '-a', const.NFS_EXPORTS_FILE, '&&', 'sudo', 'exportfs', '-r', '&&', 'sudo', 'rm', '-f', maintenance_file ] self._ssh_exec(server, restore_exports) class CIFSHelperBase(NASHelperBase): @staticmethod def _get_share_group_name_from_export_location(export_location): if '/' in export_location and '\\' in export_location: pass elif export_location.startswith('\\\\'): return export_location.split('\\')[-1] elif export_location.startswith('//'): return export_location.split('/')[-1] msg = _("Got incorrect CIFS export location '%s'.") % export_location raise exception.InvalidShare(reason=msg) def _get_export_location_template(self, export_location_or_path): group_name = self._get_share_group_name_from_export_location( export_location_or_path) return ('\\\\%s' + ('\\%s' % group_name)) class CIFSHelperIPAccess(CIFSHelperBase): """Manage shares in samba server by net conf tool. Class provides functionality to operate with CIFS shares. Samba server should be configured to use registry as configuration backend to allow dynamically share managements. This class allows to define access to shares by IPs with RW access level. """ def __init__(self, *args): super(CIFSHelperIPAccess, self).__init__(*args) self.parameters = { 'browseable': 'yes', 'create mask': '0755', 'hosts deny': '0.0.0.0/0', # deny all by default 'hosts allow': '127.0.0.1', 'read only': 'no', } def init_helper(self, server): # This is smoke check that we have required dependency self._ssh_exec(server, ['sudo', 'net', 'conf', 'list']) def create_exports(self, server, share_name, recreate=False): """Create share at samba server.""" share_path = os.path.join(self.configuration.share_mount_path, share_name) create_cmd = [ 'sudo', 'net', 'conf', 'addshare', share_name, share_path, 'writeable=y', 'guest_ok=y', ] try: self._ssh_exec( server, ['sudo', 'net', 'conf', 'showshare', share_name, ]) except exception.ProcessExecutionError: # Share does not exist, create it try: self._ssh_exec(server, create_cmd) except Exception: msg = _("Could not create CIFS export %s.") % share_name LOG.exception(msg) raise exception.ManilaException(msg) else: # Share exists if recreate: self._ssh_exec( server, ['sudo', 'net', 'conf', 'delshare', share_name, ]) try: self._ssh_exec(server, create_cmd) except Exception: msg = _("Could not create CIFS export %s.") % share_name LOG.exception(msg) raise exception.ManilaException(msg) else: msg = _('Share section %s already defined.') % share_name raise exception.ShareBackendException(msg=msg) for param, value in self.parameters.items(): self._ssh_exec(server, ['sudo', 'net', 'conf', 'setparm', share_name, param, value]) return self.get_exports_for_share(server, '\\\\%s\\' + share_name) def remove_exports(self, server, share_name): """Remove share definition from samba server.""" try: self._ssh_exec( server, ['sudo', 'net', 'conf', 'delshare', share_name]) except exception.ProcessExecutionError as e: LOG.warning("Caught error trying delete share: %(error)s, try" "ing delete it forcibly.", {'error': e.stderr}) self._ssh_exec(server, ['sudo', 'smbcontrol', 'all', 'close-share', share_name]) def update_access(self, server, share_name, access_rules, add_rules, delete_rules): """Update access rules for given share. Please refer to base class for a more in-depth description. For this specific implementation, add_rules and delete_rules parameters are not used. """ hosts = [] self.validate_access_rules( access_rules, ('ip',), (const.ACCESS_LEVEL_RW,)) for access in access_rules: hosts.append(access['access_to']) self._set_allow_hosts(server, hosts, share_name) def _get_allow_hosts(self, server, share_name): (out, _) = self._ssh_exec(server, ['sudo', 'net', 'conf', 'getparm', share_name, 'hosts allow']) return out.split() def _set_allow_hosts(self, server, hosts, share_name): value = ' '.join(hosts) or ' ' self._ssh_exec(server, ['sudo', 'net', 'conf', 'setparm', share_name, 'hosts allow', value]) def get_share_path_by_export_location(self, server, export_location): # Get name of group that contains share data on CIFS server group_name = self._get_share_group_name_from_export_location( export_location) # Get parameter 'path' from group that belongs to current share (out, __) = self._ssh_exec( server, ['sudo', 'net', 'conf', 'getparm', group_name, 'path']) # Remove special symbols from response and return path return out.strip() def disable_access_for_maintenance(self, server, share_name): maintenance_file = self._get_maintenance_file_path(share_name) allowed_hosts = " ".join(self._get_allow_hosts(server, share_name)) backup_exports = [ 'echo', "'%s'" % allowed_hosts, '|', 'sudo', 'tee', maintenance_file ] self._ssh_exec(server, backup_exports) self._set_allow_hosts(server, [], share_name) self._kick_out_users(server, share_name) def _kick_out_users(self, server, share_name): """Kick out all users of share""" (out, _) = self._ssh_exec(server, ['sudo', 'smbstatus', '-S']) shares = [] header = True regexp = r"^(?P[^ ]+)\s+(?P[0-9]+)\s+(?P[^ ]+).*" for line in out.splitlines(): line = line.strip() if not header and line: match = re.match(regexp, line) if match: shares.append(match.groupdict()) else: raise exception.ShareBackendException( msg="Failed to obtain smbstatus for %s!" % share_name) elif line.startswith('----'): header = False to_kill = [s['pid'] for s in shares if share_name == s['share'] or share_name is None] if to_kill: self._ssh_exec(server, ['sudo', 'kill', '-15'] + to_kill) def restore_access_after_maintenance(self, server, share_name): maintenance_file = self._get_maintenance_file_path(share_name) (exports, __) = self._ssh_exec(server, ['cat', maintenance_file]) self._set_allow_hosts(server, exports.split(), share_name) self._ssh_exec(server, ['sudo', 'rm', '-f', maintenance_file]) class CIFSHelperUserAccess(CIFSHelperIPAccess): """Manage shares in samba server by net conf tool. Class provides functionality to operate with CIFS shares. Samba server should be configured to use registry as configuration backend to allow dynamically share managements. This class allows to define access to shares by usernames with either RW or RO access levels. """ def __init__(self, *args): super(CIFSHelperUserAccess, self).__init__(*args) self.parameters = { 'browseable': 'yes', 'create mask': '0755', 'hosts allow': '0.0.0.0/0', 'read only': 'no', } def update_access(self, server, share_name, access_rules, add_rules, delete_rules): """Update access rules for given share. Please refer to base class for a more in-depth description. For this specific implementation, add_rules and delete_rules parameters are not used. """ all_users_rw = [] all_users_ro = [] self.validate_access_rules( access_rules, ('user',), (const.ACCESS_LEVEL_RO, const.ACCESS_LEVEL_RW)) for access in access_rules: if access['access_level'] == const.ACCESS_LEVEL_RW: all_users_rw.append(access['access_to']) else: all_users_ro.append(access['access_to']) self._set_valid_users( server, all_users_rw, share_name, const.ACCESS_LEVEL_RW) self._set_valid_users( server, all_users_ro, share_name, const.ACCESS_LEVEL_RO) def _get_conf_param(self, access_level): if access_level == const.ACCESS_LEVEL_RW: return 'valid users' else: return 'read list' def _set_valid_users(self, server, users, share_name, access_level): value = ' '.join(users) param = self._get_conf_param(access_level) self._ssh_exec(server, ['sudo', 'net', 'conf', 'setparm', share_name, param, value]) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315601.9136717 manila-21.0.0/manila/share/drivers/hitachi/0000775000175000017500000000000000000000000020510 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/hitachi/__init__.py0000664000175000017500000000000000000000000022607 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315601.9136717 manila-21.0.0/manila/share/drivers/hitachi/hnas/0000775000175000017500000000000000000000000021441 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/hitachi/hnas/__init__.py0000664000175000017500000000000000000000000023540 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/hitachi/hnas/driver.py0000664000175000017500000015416200000000000023317 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Hitachi Data Systems, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from oslo_config import cfg from oslo_log import log from oslo_utils import excutils from oslo_utils import importutils from manila.common import constants from manila import exception from manila.i18n import _ from manila.share import driver from manila.share import utils LOG = log.getLogger(__name__) hitachi_hnas_opts = [ cfg.HostAddressOpt('hitachi_hnas_ip', help="HNAS management interface IP for communication " "between Manila controller and HNAS."), cfg.StrOpt('hitachi_hnas_user', help="HNAS username Base64 String in order to perform tasks " "such as create file-systems and network interfaces."), cfg.StrOpt('hitachi_hnas_password', secret=True, help="HNAS user password. Required only if private key is not " "provided."), cfg.IntOpt('hitachi_hnas_evs_id', help="Specify which EVS this backend is assigned to."), cfg.HostAddressOpt('hitachi_hnas_evs_ip', help="Specify IP for mounting shares."), cfg.HostAddressOpt('hitachi_hnas_admin_network_ip', help="Specify IP for mounting shares in the Admin " "network."), cfg.StrOpt('hitachi_hnas_file_system_name', help="Specify file-system name for creating shares."), cfg.StrOpt('hitachi_hnas_ssh_private_key', secret=True, help="RSA/DSA private key value used to connect into HNAS. " "Required only if password is not provided."), cfg.HostAddressOpt('hitachi_hnas_cluster_admin_ip0', help="The IP of the clusters admin node. Only set in " "HNAS multinode clusters."), cfg.IntOpt('hitachi_hnas_stalled_job_timeout', default=30, help="The time (in seconds) to wait for stalled HNAS jobs " "before aborting."), cfg.StrOpt('hitachi_hnas_driver_helper', default='manila.share.drivers.hitachi.hnas.ssh.HNASSSHBackend', help="Python class to be used for driver helper."), cfg.BoolOpt('hitachi_hnas_allow_cifs_snapshot_while_mounted', default=False, help="By default, CIFS snapshots are not allowed to be taken " "when the share has clients connected because consistent " "point-in-time replica cannot be guaranteed for all " "files. Enabling this might cause inconsistent snapshots " "on CIFS shares."), ] CONF = cfg.CONF CONF.register_opts(hitachi_hnas_opts) class HitachiHNASDriver(driver.ShareDriver): """Manila HNAS Driver implementation. Driver versions:: 1.0.0 - Initial Version. 2.0.0 - Refactoring, bugfixes, implemented Share Shrink and Update Access. 3.0.0 - New driver location, implemented support for CIFS protocol. 3.1.0 - Added admin network export location support. 4.0.0 - Added mountable snapshots, revert-to-snapshot and manage snapshots features support. """ def __init__(self, *args, **kwargs): """Do initialization.""" LOG.debug("Invoking base constructor for Manila Hitachi HNAS Driver.") super(HitachiHNASDriver, self).__init__(False, *args, **kwargs) LOG.debug("Setting up attributes for Manila Hitachi HNAS Driver.") self.configuration.append_config_values(hitachi_hnas_opts) LOG.debug("Reading config parameters for Manila Hitachi HNAS Driver.") self.backend_name = self.configuration.safe_get('share_backend_name') hnas_helper = self.configuration.safe_get('hitachi_hnas_driver_helper') hnas_ip = self.configuration.safe_get('hitachi_hnas_ip') hnas_username = self.configuration.safe_get('hitachi_hnas_user') hnas_password = self.configuration.safe_get('hitachi_hnas_password') hnas_evs_id = self.configuration.safe_get('hitachi_hnas_evs_id') self.hnas_evs_ip = self.configuration.safe_get('hitachi_hnas_evs_ip') self.hnas_admin_network_ip = self.configuration.safe_get( 'hitachi_hnas_admin_network_ip') self.fs_name = self.configuration.safe_get( 'hitachi_hnas_file_system_name') self.cifs_snapshot = self.configuration.safe_get( 'hitachi_hnas_allow_cifs_snapshot_while_mounted') ssh_private_key = self.configuration.safe_get( 'hitachi_hnas_ssh_private_key') cluster_admin_ip0 = self.configuration.safe_get( 'hitachi_hnas_cluster_admin_ip0') self.private_storage = kwargs.get('private_storage') job_timeout = self.configuration.safe_get( 'hitachi_hnas_stalled_job_timeout') if hnas_helper is None: msg = _("The config parameter hitachi_hnas_driver_helper is not " "set.") raise exception.InvalidParameterValue(err=msg) if hnas_evs_id is None: msg = _("The config parameter hitachi_hnas_evs_id is not set.") raise exception.InvalidParameterValue(err=msg) if self.hnas_evs_ip is None: msg = _("The config parameter hitachi_hnas_evs_ip is not set.") raise exception.InvalidParameterValue(err=msg) if hnas_ip is None: msg = _("The config parameter hitachi_hnas_ip is not set.") raise exception.InvalidParameterValue(err=msg) if hnas_username is None: msg = _("The config parameter hitachi_hnas_user is not set.") raise exception.InvalidParameterValue(err=msg) if hnas_password is None and ssh_private_key is None: msg = _("Credentials configuration parameters missing: " "you need to set hitachi_hnas_password or " "hitachi_hnas_ssh_private_key.") raise exception.InvalidParameterValue(err=msg) LOG.debug("Initializing HNAS Layer.") helper = importutils.import_class(hnas_helper) self.hnas = helper(hnas_ip, hnas_username, hnas_password, ssh_private_key, cluster_admin_ip0, hnas_evs_id, self.hnas_evs_ip, self.fs_name, job_timeout) def update_access(self, context, share, access_rules, add_rules, delete_rules, update_rules, share_server=None): """Update access rules for given share. :param context: The `context.RequestContext` object for the request :param share: Share that will have its access rules updated. :param access_rules: All access rules for given share. :param add_rules: Empty List or List of access rules which should be added. access_rules already contains these rules. :param delete_rules: Empty List or List of access rules which should be removed. access_rules doesn't contain these rules. :param update_rules: Empty List or List of access rules which should be updated. access_rules already contains these rules. :param share_server: Data structure with share server information. Not used by this driver. """ hnas_share_id = self._get_hnas_share_id(share['id']) try: self._ensure_share(share, hnas_share_id) except exception.HNASItemNotFoundException: raise exception.ShareResourceNotFound(share_id=share['id']) self._check_protocol(share['id'], share['share_proto']) if share['share_proto'].lower() == 'nfs': self._nfs_update_access(share, hnas_share_id, access_rules) else: if not (add_rules or delete_rules): # recovery mode self._clean_cifs_access_list(hnas_share_id) self._cifs_allow_access(share, hnas_share_id, access_rules) else: self._cifs_deny_access(share, hnas_share_id, delete_rules) self._cifs_allow_access(share, hnas_share_id, add_rules) def _nfs_update_access(self, share, hnas_share_id, access_rules): host_list = [] for rule in access_rules: if rule['access_type'].lower() != 'ip': msg = _("Only IP access type currently supported for NFS. " "Share provided %(share)s with rule type " "%(type)s.") % {'share': share['id'], 'type': rule['access_type']} raise exception.InvalidShareAccess(reason=msg) if rule['access_level'] == constants.ACCESS_LEVEL_RW: host_list.append(rule['access_to'] + '(' + rule['access_level'] + ',norootsquash)') else: host_list.append(rule['access_to'] + '(' + rule['access_level'] + ')') self.hnas.update_nfs_access_rule(host_list, share_id=hnas_share_id) if host_list: LOG.debug("Share %(share)s has the rules: %(rules)s", {'share': share['id'], 'rules': ', '.join(host_list)}) else: LOG.debug("Share %(share)s has no rules.", {'share': share['id']}) def _cifs_allow_access(self, share_or_snapshot, hnas_id, add_rules, is_snapshot=False): entity_type = "share" if is_snapshot: entity_type = "snapshot" for rule in add_rules: if rule['access_type'].lower() != 'user': msg = _("Only USER access type currently supported for CIFS. " "%(entity_type)s provided %(share)s with " "rule %(r_id)s type %(type)s allowing permission " "to %(to)s.") % { 'entity_type': entity_type.capitalize(), 'share': share_or_snapshot['id'], 'type': rule['access_type'], 'r_id': rule['id'], 'to': rule['access_to'], } raise exception.InvalidShareAccess(reason=msg) if rule['access_level'] == constants.ACCESS_LEVEL_RW: # Adding permission acr = Allow Change&Read permission = 'acr' else: # Adding permission ar = Allow Read permission = 'ar' formatted_user = rule['access_to'].replace('\\', '\\\\') self.hnas.cifs_allow_access(hnas_id, formatted_user, permission, is_snapshot=is_snapshot) LOG.debug("Added %(rule)s rule for user/group %(user)s " "to %(entity_type)s %(share)s.", {'rule': rule['access_level'], 'user': rule['access_to'], 'entity_type': entity_type, 'share': share_or_snapshot['id']}) def _cifs_deny_access(self, share_or_snapshot, hnas_id, delete_rules, is_snapshot=False): if is_snapshot: entity_type = "snapshot" share_proto = share_or_snapshot['share']['share_proto'] else: entity_type = "share" share_proto = share_or_snapshot['share_proto'] for rule in delete_rules: if rule['access_type'].lower() != 'user': LOG.warning('Only USER access type is allowed for ' 'CIFS. %(entity_type)s ' 'provided %(share)s with ' 'protocol %(proto)s.', {'entity_type': entity_type.capitalize(), 'share': share_or_snapshot['id'], 'proto': share_proto}) continue formatted_user = rule['access_to'].replace('\\', '\\\\') self.hnas.cifs_deny_access(hnas_id, formatted_user, is_snapshot=is_snapshot) LOG.debug("Access denied for user/group %(user)s " "to %(entity_type)s %(share)s.", {'user': rule['access_to'], 'entity_type': entity_type, 'share': share_or_snapshot['id']}) def _clean_cifs_access_list(self, hnas_id, is_snapshot=False): permission_list = self.hnas.list_cifs_permissions(hnas_id) for permission in permission_list: formatted_user = r'"\{1}{0}\{1}"'.format(permission[0], '"') self.hnas.cifs_deny_access(hnas_id, formatted_user, is_snapshot=is_snapshot) def create_share(self, context, share, share_server=None): r"""Creates share. :param context: The `context.RequestContext` object for the request :param share: Share that will be created. :param share_server: Data structure with share server information. Not used by this driver. :returns: Returns a list of dicts containing the EVS IP concatenated with the path of share in the filesystem. Example for NFS:: [ { 'path': '172.24.44.10:/shares/id', 'metadata': {}, 'is_admin_only': False }, { 'path': '192.168.0.10:/shares/id', 'metadata': {}, 'is_admin_only': True } ] Example for CIFS:: [ { 'path': '\\172.24.44.10\id', 'metadata': {}, 'is_admin_only': False }, { 'path': '\\192.168.0.10\id', 'metadata': {}, 'is_admin_only': True } ] """ LOG.debug("Creating share in HNAS: %(shr)s.", {'shr': share['id']}) self._check_protocol(share['id'], share['share_proto']) export_list = self._create_share(share['id'], share['size'], share['share_proto']) LOG.debug("Share %(share)s created successfully on path(s): " "%(paths)s.", {'paths': ', '.join([x['path'] for x in export_list]), 'share': share['id']}) return export_list def delete_share(self, context, share, share_server=None): """Deletes share. :param context: The `context.RequestContext` object for the request :param share: Share that will be deleted. :param share_server: Data structure with share server information. Not used by this driver. """ hnas_share_id = self._get_hnas_share_id(share['id']) LOG.debug("Deleting share in HNAS: %(shr)s.", {'shr': share['id']}) self._delete_share(hnas_share_id, share['share_proto']) LOG.debug("Export and share successfully deleted: %(shr)s.", {'shr': share['id']}) def create_snapshot(self, context, snapshot, share_server=None): """Creates snapshot. :param context: The `context.RequestContext` object for the request :param snapshot: Snapshot that will be created. :param share_server: Data structure with share server information. Not used by this driver. """ hnas_share_id = self._get_hnas_share_id(snapshot['share_id']) LOG.debug("The snapshot of share %(snap_share_id)s will be created " "with id %(snap_id)s.", {'snap_share_id': snapshot['share_id'], 'snap_id': snapshot['id']}) export_locations = self._create_snapshot(hnas_share_id, snapshot) LOG.info("Snapshot %(id)s successfully created.", {'id': snapshot['id']}) output = { 'provider_location': os.path.join( '/snapshots', hnas_share_id, snapshot['id']) } if export_locations: output['export_locations'] = export_locations return output def delete_snapshot(self, context, snapshot, share_server=None): """Deletes snapshot. :param context: The `context.RequestContext` object for the request :param snapshot: Snapshot that will be deleted. :param share_server: Data structure with share server information. Not used by this driver. """ hnas_share_id = self._get_hnas_share_id(snapshot['share_id']) hnas_snapshot_id = self._get_hnas_snapshot_id(snapshot) LOG.debug("The snapshot %(snap_id)s will be deleted. The related " "share ID is %(snap_share_id)s.", {'snap_id': snapshot['id'], 'snap_share_id': snapshot['share_id']}) self._delete_snapshot(snapshot['share'], hnas_share_id, hnas_snapshot_id) LOG.info("Snapshot %(id)s successfully deleted.", {'id': snapshot['id']}) def create_share_from_snapshot(self, context, share, snapshot, share_server=None, parent_share=None): r"""Creates a new share from snapshot. :param context: The `context.RequestContext` object for the request :param share: Information about the new share. :param snapshot: Information about the snapshot that will be copied to new share. :param share_server: Data structure with share server information. Not used by this driver. :returns: Returns a list of dicts containing the EVS IP concatenated with the path of share in the filesystem. Example for NFS:: [ { 'path': '172.24.44.10:/shares/id', 'metadata': {}, 'is_admin_only': False }, { 'path': '192.168.0.10:/shares/id', 'metadata': {}, 'is_admin_only': True } ] Example for CIFS:: [ { 'path': '\\172.24.44.10\id', 'metadata': {}, 'is_admin_only': False }, { 'path': '\\192.168.0.10\id', 'metadata': {}, 'is_admin_only': True } ] """ LOG.debug("Creating a new share from snapshot: %(ss_id)s.", {'ss_id': snapshot['id']}) hnas_src_share_id = self._get_hnas_share_id(snapshot['share_id']) hnas_src_snap_id = self._get_hnas_snapshot_id(snapshot) export_list = self._create_share_from_snapshot( share, hnas_src_share_id, hnas_src_snap_id) LOG.debug("Share %(share)s created successfully on path(s): " "%(paths)s.", {'paths': ', '.join([x['path'] for x in export_list]), 'share': share['id']}) return export_list def ensure_share(self, context, share, share_server=None): r"""Ensure that share is exported. :param context: The `context.RequestContext` object for the request :param share: Share that will be checked. :param share_server: Data structure with share server information. Not used by this driver. :returns: Returns a list of dicts containing the EVS IP concatenated with the path of share in the filesystem. Example for NFS:: [ { 'path': '172.24.44.10:/shares/id', 'metadata': {}, 'is_admin_only': False }, { 'path': '192.168.0.10:/shares/id', 'metadata': {}, 'is_admin_only': True } ] Example for CIFS:: [ { 'path': '\\172.24.44.10\id', 'metadata': {}, 'is_admin_only': False }, { 'path': '\\192.168.0.10\id', 'metadata': {}, 'is_admin_only': True } ] """ LOG.debug("Ensuring share in HNAS: %(shr)s.", {'shr': share['id']}) hnas_share_id = self._get_hnas_share_id(share['id']) export_list = self._ensure_share(share, hnas_share_id) LOG.debug("Share ensured in HNAS: %(shr)s, protocol %(proto)s.", {'shr': share['id'], 'proto': share['share_proto']}) return export_list def extend_share(self, share, new_size, share_server=None): """Extends a share to new size. :param share: Share that will be extended. :param new_size: New size of share. :param share_server: Data structure with share server information. Not used by this driver. """ hnas_share_id = self._get_hnas_share_id(share['id']) LOG.debug("Expanding share in HNAS: %(shr_id)s.", {'shr_id': share['id']}) self._extend_share(hnas_share_id, share, new_size) LOG.info("Share %(shr_id)s successfully extended to " "%(shr_size)s.", {'shr_id': share['id'], 'shr_size': str(new_size)}) # TODO(alyson): Implement in DHSS = true mode def get_network_allocations_number(self): """Track allocations_number in DHSS = true. When using the setting driver_handles_share_server = false does not require to track allocations_number because we do not handle network stuff. """ return 0 def _update_share_stats(self, data=None): """Updates the Capability of Backend.""" LOG.debug("Updating Backend Capability Information - Hitachi HNAS.") self._check_fs_mounted() total_space, free_space, dedupe = self.hnas.get_stats() reserved = self.configuration.safe_get('reserved_share_percentage') reserved_snapshot = self.configuration.safe_get( 'reserved_share_from_snapshot_percentage') or reserved reserved_share_extend = self.configuration.safe_get( 'reserved_share_extend_percentage') or reserved data = { 'share_backend_name': self.backend_name, 'driver_handles_share_servers': self.driver_handles_share_servers, 'vendor_name': 'Hitachi', 'driver_version': '4.0.0', 'storage_protocol': 'NFS_CIFS', 'total_capacity_gb': total_space, 'free_capacity_gb': free_space, 'reserved_percentage': reserved, 'reserved_snapshot_percentage': reserved_snapshot, 'reserved_share_extend_percentage': reserved_share_extend, 'qos': False, 'thin_provisioning': True, 'dedupe': dedupe, 'revert_to_snapshot_support': True, 'mount_snapshot_support': True, } LOG.info("HNAS Capabilities: %(data)s.", {'data': str(data)}) super(HitachiHNASDriver, self)._update_share_stats(data) def manage_existing(self, share, driver_options): r"""Manages a share that exists on backend. :param share: Share that will be managed. :param driver_options: Empty dict or dict with 'volume_id' option. :returns: Returns a dict with size of the share managed and a list of dicts containing its export locations. Example for NFS:: { 'size': 10, 'export_locations': [ { 'path': '172.24.44.10:/shares/id', 'metadata': {}, 'is_admin_only': False }, { 'path': '192.168.0.10:/shares/id', 'metadata': {}, 'is_admin_only': True } ] } Example for CIFS:: { 'size': 10, 'export_locations': [ { 'path': '\\172.24.44.10\id', 'metadata': {}, 'is_admin_only': False }, { 'path': '\\192.168.0.10\id', 'metadata': {}, 'is_admin_only': True } ] } """ hnas_share_id = self._get_hnas_share_id(share['id']) # Make sure returned value is the same as provided, # confirming it does not exist. if hnas_share_id != share['id']: msg = _("Share ID %s already exists, cannot manage.") % share['id'] raise exception.HNASBackendException(msg=msg) self._check_protocol(share['id'], share['share_proto']) if share['share_proto'].lower() == 'nfs': # 10.0.0.1:/shares/example LOG.info("Share %(shr_path)s will be managed with ID " "%(shr_id)s.", {'shr_path': share['export_locations'][0]['path'], 'shr_id': share['id']}) old_path_info = share['export_locations'][0]['path'].split( ':/shares/') if len(old_path_info) == 2: evs_ip = old_path_info[0] hnas_share_id = old_path_info[1] else: msg = _("Incorrect path. It should have the following format: " "IP:/shares/share_id.") raise exception.ShareBackendException(msg=msg) else: # then its CIFS # \\10.0.0.1\example old_path = share['export_locations'][0]['path'].split('\\') if len(old_path) == 4: evs_ip = old_path[2] hnas_share_id = old_path[3] else: msg = _("Incorrect path. It should have the following format: " "\\\\IP\\share_id.") raise exception.ShareBackendException(msg=msg) if evs_ip != self.hnas_evs_ip: msg = _("The EVS IP %(evs)s is not " "configured.") % {'evs': evs_ip} raise exception.ShareBackendException(msg=msg) if self.backend_name not in share['host']: msg = _("The backend passed in the host parameter (%(shr)s) is " "not configured.") % {'shr': share['host']} raise exception.ShareBackendException(msg=msg) output = self._manage_existing(share, hnas_share_id) self.private_storage.update( share['id'], {'hnas_id': hnas_share_id}) LOG.debug("HNAS ID %(hnas_id)s has been saved to private storage for " "Share ID %(share_id)s", {'hnas_id': hnas_share_id, 'share_id': share['id']}) LOG.info("Share %(shr_path)s was successfully managed with ID " "%(shr_id)s.", {'shr_path': share['export_locations'][0]['path'], 'shr_id': share['id']}) return output def unmanage(self, share): """Unmanages a share. :param share: Share that will be unmanaged. """ self.private_storage.delete(share['id']) if len(share['export_locations']) == 0: LOG.info("The share with ID %(shr_id)s is no longer being " "managed.", {'shr_id': share['id']}) else: LOG.info("The share with current path %(shr_path)s and ID " "%(shr_id)s is no longer being managed.", {'shr_path': share['export_locations'][0]['path'], 'shr_id': share['id']}) def shrink_share(self, share, new_size, share_server=None): """Shrinks a share to new size. :param share: Share that will be shrunk. :param new_size: New size of share. :param share_server: Data structure with share server information. Not used by this driver. """ hnas_share_id = self._get_hnas_share_id(share['id']) LOG.debug("Shrinking share in HNAS: %(shr_id)s.", {'shr_id': share['id']}) self._shrink_share(hnas_share_id, share, new_size) LOG.info("Share %(shr_id)s successfully shrunk to " "%(shr_size)sG.", {'shr_id': share['id'], 'shr_size': str(new_size)}) def revert_to_snapshot(self, context, snapshot, share_access_rules, snapshot_access_rules, share_server=None): """Reverts a share to a given snapshot. :param context: The `context.RequestContext` object for the request :param snapshot: The snapshot to which the share is to be reverted to. :param share_access_rules: List of all access rules for the affected share. Not used by this driver. :param snapshot_access_rules: List of all access rules for the affected snapshot. Not used by this driver. :param share_server: Data structure with share server information. Not used by this driver. """ hnas_share_id = self._get_hnas_share_id(snapshot['share_id']) hnas_snapshot_id = self._get_hnas_snapshot_id(snapshot) self._ensure_snapshot(snapshot, hnas_snapshot_id) dest_path = os.path.join('/shares', hnas_share_id) src_path = os.path.join('/snapshots', hnas_share_id, hnas_snapshot_id) self.hnas.tree_delete(dest_path) self.hnas.vvol_create(hnas_share_id) self.hnas.quota_add(hnas_share_id, snapshot['size']) try: self.hnas.tree_clone(src_path, dest_path) except exception.HNASNothingToCloneException: LOG.warning("Source directory is empty, creating an empty " "directory.") LOG.info("Share %(share)s successfully reverted to snapshot " "%(snapshot)s.", {'share': snapshot['share_id'], 'snapshot': snapshot['id']}) def _get_hnas_share_id(self, share_id): hnas_id = self.private_storage.get(share_id, 'hnas_id') if hnas_id is None: hnas_id = share_id LOG.debug("Share ID is %(shr_id)s and respective HNAS ID " "is %(hnas_id)s.", {'shr_id': share_id, 'hnas_id': hnas_id}) return hnas_id def _get_hnas_snapshot_id(self, snapshot): hnas_snapshot_id = snapshot['id'] if snapshot['provider_location']: LOG.debug("Snapshot %(snap_id)s with provider_location: " "%(p_loc)s.", {'snap_id': hnas_snapshot_id, 'p_loc': snapshot['provider_location']}) hnas_snapshot_id = snapshot['provider_location'].split('/')[-1] return hnas_snapshot_id def _create_share(self, share_id, share_size, share_proto): """Creates share. Creates a virtual-volume, adds a quota limit and exports it. :param share_id: manila's database ID of share that will be created. :param share_size: Size limit of share. :param share_proto: Protocol of share that will be created (NFS or CIFS) :returns: Returns a list of dicts containing the new share's export locations. """ self._check_fs_mounted() self.hnas.vvol_create(share_id) self.hnas.quota_add(share_id, share_size) LOG.debug("Share created with id %(shr)s, size %(size)sG.", {'shr': share_id, 'size': share_size}) self._create_export(share_id, share_proto) export_list = self._get_export_locations(share_proto, share_id) return export_list def _create_export(self, share_id, share_proto, snapshot_id=None): try: if share_proto.lower() == 'nfs': # Create NFS export self.hnas.nfs_export_add(share_id, snapshot_id=snapshot_id) LOG.debug("NFS Export created to %(shr)s.", {'shr': share_id}) else: # Create CIFS share with vvol path self.hnas.cifs_share_add(share_id, snapshot_id=snapshot_id) LOG.debug("CIFS share created to %(shr)s.", {'shr': share_id}) except exception.HNASBackendException: with excutils.save_and_reraise_exception(): if snapshot_id is None: self.hnas.vvol_delete(share_id) def _check_fs_mounted(self): mounted = self.hnas.check_fs_mounted() if not mounted: msg = _("Filesystem %s is not mounted.") % self.fs_name raise exception.HNASBackendException(msg=msg) def _ensure_share(self, share, hnas_share_id): """Ensure that share is exported. :param share: Share that will be checked. :param hnas_share_id: HNAS ID of share that will be checked. :returns: Returns a list of dicts containing the share's export locations. """ self._check_protocol(share['id'], share['share_proto']) self._check_fs_mounted() self.hnas.check_vvol(hnas_share_id) self.hnas.check_quota(hnas_share_id) if share['share_proto'].lower() == 'nfs': self.hnas.check_export(hnas_share_id) else: self.hnas.check_cifs(hnas_share_id) export_list = self._get_export_locations( share['share_proto'], hnas_share_id) return export_list def _shrink_share(self, hnas_share_id, share, new_size): """Shrinks a share to new size. :param hnas_share_id: HNAS ID of share that will be shrunk. :param share: model of share that will be shrunk. :param new_size: New size of share after shrink operation. """ self._ensure_share(share, hnas_share_id) usage = self.hnas.get_share_usage(hnas_share_id) LOG.debug("Usage space in share %(share)s: %(usage)sG", {'share': share['id'], 'usage': usage}) if new_size > usage: self.hnas.modify_quota(hnas_share_id, new_size) else: raise exception.ShareShrinkingPossibleDataLoss( share_id=share['id']) def _extend_share(self, hnas_share_id, share, new_size): """Extends a share to new size. :param hnas_share_id: HNAS ID of share that will be extended. :param share: model of share that will be extended. :param new_size: New size of share after extend operation. """ self._ensure_share(share, hnas_share_id) old_size = share['size'] available_space = self.hnas.get_stats()[1] LOG.debug("Available space in filesystem: %(space)sG.", {'space': available_space}) if (new_size - old_size) < available_space: self.hnas.modify_quota(hnas_share_id, new_size) else: msg = (_("Share %s cannot be extended due to insufficient space.") % share['id']) raise exception.HNASBackendException(msg=msg) def _delete_share(self, hnas_share_id, share_proto): """Deletes share. It uses tree-delete-job-submit to format and delete virtual-volumes. Quota is deleted with virtual-volume. :param hnas_share_id: HNAS ID of share that will be deleted. :param share_proto: Protocol of share that will be deleted. """ self._check_fs_mounted() if share_proto.lower() == 'nfs': self.hnas.nfs_export_del(hnas_share_id) elif share_proto.lower() == 'cifs': self.hnas.cifs_share_del(hnas_share_id) self.hnas.vvol_delete(hnas_share_id) def _manage_existing(self, share, hnas_share_id): """Manages a share that exists on backend. :param share: share that will be managed. :param hnas_share_id: HNAS ID of share that will be managed. :returns: Returns a dict with size of the share managed and a list of dicts containing its export locations. """ self._ensure_share(share, hnas_share_id) share_size = self.hnas.get_share_quota(hnas_share_id) if share_size is None: msg = (_("The share %s trying to be managed does not have a " "quota limit, please set it before manage.") % share['id']) raise exception.ManageInvalidShare(reason=msg) export_list = self._get_export_locations( share['share_proto'], hnas_share_id) return {'size': share_size, 'export_locations': export_list} def _create_snapshot(self, hnas_share_id, snapshot): """Creates a snapshot of share. It copies the directory and all files to a new directory inside /snapshots/share_id/. :param hnas_share_id: HNAS ID of share for snapshot. :param snapshot: Snapshot that will be created. """ self._ensure_share(snapshot['share'], hnas_share_id) saved_list = [] share_proto = snapshot['share']['share_proto'] self._check_protocol(snapshot['share_id'], share_proto) if share_proto.lower() == 'nfs': saved_list = self.hnas.get_nfs_host_list(hnas_share_id) new_list = [] for access in saved_list: for rw in ('read_write', 'readwrite', 'rw'): access = access.replace(rw, 'ro') new_list.append(access) self.hnas.update_nfs_access_rule(new_list, share_id=hnas_share_id) else: # CIFS if (self.hnas.is_cifs_in_use(hnas_share_id) and not self.cifs_snapshot): msg = _("CIFS snapshot when share is mounted is disabled. " "Set hitachi_hnas_allow_cifs_snapshot_while_mounted to" " True or unmount the share to take a snapshot.") raise exception.ShareBackendException(msg=msg) src_path = os.path.join('/shares', hnas_share_id) dest_path = os.path.join('/snapshots', hnas_share_id, snapshot['id']) try: self.hnas.tree_clone(src_path, dest_path) except exception.HNASNothingToCloneException: LOG.warning("Source directory is empty, creating an empty " "directory.") self.hnas.create_directory(dest_path) finally: if share_proto.lower() == 'nfs': self.hnas.update_nfs_access_rule(saved_list, share_id=hnas_share_id) export_locations = [] if snapshot['share'].get('mount_snapshot_support'): self._create_export(hnas_share_id, share_proto, snapshot_id=snapshot['id']) export_locations = self._get_export_locations( share_proto, snapshot['id'], is_snapshot=True) return export_locations def _delete_snapshot(self, share, hnas_share_id, snapshot_id): """Deletes snapshot. It receives the hnas_share_id only to join the path for snapshot. :param hnas_share_id: HNAS ID of share from which snapshot was taken. :param snapshot_id: ID of snapshot. """ self._check_fs_mounted() share_proto = share['share_proto'] if share.get('mount_snapshot_support'): if share_proto.lower() == 'nfs': self.hnas.nfs_export_del(snapshot_id=snapshot_id) elif share_proto.lower() == 'cifs': self.hnas.cifs_share_del(snapshot_id) path = os.path.join('/snapshots', hnas_share_id, snapshot_id) self.hnas.tree_delete(path) path = os.path.join('/snapshots', hnas_share_id) self.hnas.delete_directory(path) def _create_share_from_snapshot(self, share, src_hnas_share_id, hnas_snapshot_id): """Creates a new share from snapshot. It copies everything from snapshot directory to a new vvol, set a quota limit for it and export. :param share: a dict from new share. :param src_hnas_share_id: HNAS ID of share from which snapshot was taken. :param hnas_snapshot_id: HNAS ID from snapshot that will be copied to new share. :returns: Returns a list of dicts containing the new share's export locations. """ dest_path = os.path.join('/shares', share['id']) src_path = os.path.join('/snapshots', src_hnas_share_id, hnas_snapshot_id) # Before copying everything to new vvol, we need to create it, # because we only can transform an empty directory into a vvol. self._check_fs_mounted() self.hnas.vvol_create(share['id']) self.hnas.quota_add(share['id'], share['size']) try: self.hnas.tree_clone(src_path, dest_path) except exception.HNASNothingToCloneException: LOG.warning("Source directory is empty, exporting " "directory.") self._check_protocol(share['id'], share['share_proto']) try: if share['share_proto'].lower() == 'nfs': self.hnas.nfs_export_add(share['id']) else: self.hnas.cifs_share_add(share['id']) except exception.HNASBackendException: with excutils.save_and_reraise_exception(): self.hnas.vvol_delete(share['id']) return self._get_export_locations( share['share_proto'], share['id']) def _check_protocol(self, share_id, protocol): if protocol.lower() not in ('nfs', 'cifs'): msg = _("Only NFS or CIFS protocol are currently supported. " "Share provided %(share)s with protocol " "%(proto)s.") % {'share': share_id, 'proto': protocol} raise exception.ShareBackendException(msg=msg) def _get_export_locations(self, share_proto, hnas_id, is_snapshot=False): export_list = [] for ip in (self.hnas_evs_ip, self.hnas_admin_network_ip): if ip: path = self._get_export_path(ip, share_proto, hnas_id, is_snapshot) export_list.append({ "path": path, "is_admin_only": ip == self.hnas_admin_network_ip, "metadata": {}, }) return export_list def _get_export_path(self, ip, share_proto, hnas_id, is_snapshot): r"""Gets and returns export path. :param ip: IP from HNAS EVS configured. :param share_proto: Share or snapshot protocol (NFS or CIFS). :param hnas_id: Entity ID in HNAS, it can be the ID from a share or a snapshot. :param is_snapshot: Boolean to determine if export is related to a share or a snapshot. :return: Complete export path, for example: - In NFS: SHARE: 172.24.44.10:/shares/id SNAPSHOT: 172.24.44.10:/snapshots/id - In CIFS: SHARE and SNAPSHOT: \\172.24.44.10\id """ if share_proto.lower() == 'nfs': if is_snapshot: path = os.path.join('/snapshots', hnas_id) else: path = os.path.join('/shares', hnas_id) export = ':'.join((ip, path)) else: export = r'\\%s\%s' % (ip, hnas_id) return export def _ensure_snapshot(self, snapshot, hnas_snapshot_id): """Ensure that snapshot is exported. :param snapshot: Snapshot that will be checked. :param hnas_snapshot_id: HNAS ID of snapshot that will be checked. :returns: Returns a list of dicts containing the snapshot's export locations or None if mount_snapshot_support is False. """ self._check_protocol(snapshot['share_id'], snapshot['share']['share_proto']) self._check_fs_mounted() self.hnas.check_directory(snapshot['provider_location']) export_list = None if snapshot['share'].get('mount_snapshot_support'): if snapshot['share']['share_proto'].lower() == 'nfs': self.hnas.check_export(hnas_snapshot_id, is_snapshot=True) else: self.hnas.check_cifs(hnas_snapshot_id) export_list = self._get_export_locations( snapshot['share']['share_proto'], hnas_snapshot_id, is_snapshot=True) return export_list def ensure_snapshot(self, context, snapshot, share_server=None): r"""Ensure that snapshot is exported. :param context: The `context.RequestContext` object for the request. :param snapshot: Snapshot that will be checked. :param share_server: Data structure with share server information. Not used by this driver. :returns: Returns a list of dicts containing the EVS IP concatenated with the path of snapshot in the filesystem or None if mount_snapshot_support is False. Example for NFS:: [ { 'path': '172.24.44.10:/snapshots/id', 'metadata': {}, 'is_admin_only': False }, { 'path': '192.168.0.10:/snapshots/id', 'metadata': {}, 'is_admin_only': True } ] Example for CIFS:: [ { 'path': '\\172.24.44.10\id', 'metadata': {}, 'is_admin_only': False }, { 'path': '\\192.168.0.10\id', 'metadata': {}, 'is_admin_only': True } ] """ LOG.debug("Ensuring snapshot in HNAS: %(snap)s.", {'snap': snapshot['id']}) hnas_snapshot_id = self._get_hnas_snapshot_id(snapshot) export_list = self._ensure_snapshot(snapshot, hnas_snapshot_id) LOG.debug("Snapshot ensured in HNAS: %(snap)s, protocol %(proto)s.", {'snap': snapshot['id'], 'proto': snapshot['share']['share_proto']}) return export_list def manage_existing_snapshot(self, snapshot, driver_options): """Manages a snapshot that exists only in HNAS. The snapshot to be managed should be in the path /snapshots/SHARE_ID/SNAPSHOT_ID. Also, the size of snapshot should be provided as --driver_options size=. :param snapshot: snapshot that will be managed. :param driver_options: expects only one key 'size'. It must be provided in order to manage a snapshot. :returns: Returns a dict with size of snapshot managed """ try: snapshot_size = int(driver_options.get("size", 0)) except (ValueError, TypeError): msg = _("The size in driver options to manage snapshot " "%(snap_id)s should be an integer, in format " "driver-options size=. Value passed: " "%(size)s.") % {'snap_id': snapshot['id'], 'size': driver_options.get("size")} raise exception.ManageInvalidShareSnapshot(reason=msg) if snapshot_size == 0: msg = _("Snapshot %(snap_id)s has no size specified for manage. " "Please, provide the size with parameter driver-options " "size=.") % {'snap_id': snapshot['id']} raise exception.ManageInvalidShareSnapshot(reason=msg) hnas_share_id = self._get_hnas_share_id(snapshot['share_id']) LOG.debug("Path provided to manage snapshot: %(path)s.", {'path': snapshot['provider_location']}) path_info = snapshot['provider_location'].split('/') if len(path_info) == 4 and path_info[1] == 'snapshots': path_share_id = path_info[2] hnas_snapshot_id = path_info[3] else: msg = (_("Incorrect path %(path)s for manage snapshot " "%(snap_id)s. It should have the following format: " "/snapshots/SHARE_ID/SNAPSHOT_ID.") % {'path': snapshot['provider_location'], 'snap_id': snapshot['id']}) raise exception.ManageInvalidShareSnapshot(reason=msg) if hnas_share_id != path_share_id: msg = _("The snapshot %(snap_id)s does not belong to share " "%(share_id)s.") % {'snap_id': snapshot['id'], 'share_id': snapshot['share_id']} raise exception.ManageInvalidShareSnapshot(reason=msg) if not self.hnas.check_directory(snapshot['provider_location']): msg = _("Snapshot %(snap_id)s does not exist in " "HNAS.") % {'snap_id': hnas_snapshot_id} raise exception.ManageInvalidShareSnapshot(reason=msg) try: self._ensure_snapshot(snapshot, hnas_snapshot_id) except exception.HNASItemNotFoundException: LOG.warning("Export does not exist for snapshot %s, " "creating a new one.", snapshot['id']) self._create_export(hnas_share_id, snapshot['share']['share_proto'], snapshot_id=hnas_snapshot_id) output = {'size': snapshot_size} if snapshot['share'].get('mount_snapshot_support'): export_locations = self._get_export_locations( snapshot['share']['share_proto'], hnas_snapshot_id, is_snapshot=True) output['export_locations'] = export_locations LOG.info("Snapshot %(snap_path)s for share %(shr_id)s was " "successfully managed with ID %(snap_id)s.", {'snap_path': snapshot['provider_location'], 'shr_id': snapshot['share_id'], 'snap_id': snapshot['id']}) return output def unmanage_snapshot(self, snapshot): """Unmanage a share snapshot :param snapshot: Snapshot that will be unmanaged. """ LOG.info("The snapshot with ID %(snap_id)s from share " "%(share_id)s is no longer being managed by Manila. " "However, it is not deleted and can be found in HNAS.", {'snap_id': snapshot['id'], 'share_id': snapshot['share_id']}) def snapshot_update_access(self, context, snapshot, access_rules, add_rules, delete_rules, share_server=None): """Update access rules for given snapshot. Drivers should support 2 different cases in this method: 1. Recovery after error - 'access_rules' contains all access rules, 'add_rules' and 'delete_rules' shall be empty. Driver should clear any existent access rules and apply all access rules for given snapshot. This recovery is made at driver start up. 2. Adding/Deleting of several access rules - 'access_rules' contains all access rules, 'add_rules' and 'delete_rules' contain rules which should be added/deleted. Driver can ignore rules in 'access_rules' and apply only rules from 'add_rules' and 'delete_rules'. All snapshots rules should be read only. :param context: Current context :param snapshot: Snapshot model with snapshot data. :param access_rules: All access rules for given snapshot :param add_rules: Empty List or List of access rules which should be added. access_rules already contains these rules. :param delete_rules: Empty List or List of access rules which should be removed. access_rules doesn't contain these rules. :param share_server: None or Share server model """ hnas_snapshot_id = self._get_hnas_snapshot_id(snapshot) self._ensure_snapshot(snapshot, hnas_snapshot_id) access_rules, add_rules, delete_rules = utils.change_rules_to_readonly( access_rules, add_rules, delete_rules) if snapshot['share']['share_proto'].lower() == 'nfs': host_list = [] for rule in access_rules: if rule['access_type'].lower() != 'ip': msg = _("Only IP access type currently supported for NFS. " "Snapshot provided %(snapshot)s with rule type " "%(type)s.") % {'snapshot': snapshot['id'], 'type': rule['access_type']} raise exception.InvalidSnapshotAccess(reason=msg) host_list.append(rule['access_to'] + '(ro)') self.hnas.update_nfs_access_rule(host_list, snapshot_id=hnas_snapshot_id) if host_list: LOG.debug("Snapshot %(snapshot)s has the rules: %(rules)s", {'snapshot': snapshot['id'], 'rules': ', '.join(host_list)}) else: LOG.debug("Snapshot %(snapshot)s has no rules.", {'snapshot': snapshot['id']}) else: if not (add_rules or delete_rules): # cifs recovery mode self._clean_cifs_access_list(hnas_snapshot_id, is_snapshot=True) self._cifs_allow_access(snapshot, hnas_snapshot_id, access_rules, is_snapshot=True) else: self._cifs_deny_access(snapshot, hnas_snapshot_id, delete_rules, is_snapshot=True) self._cifs_allow_access(snapshot, hnas_snapshot_id, add_rules, is_snapshot=True) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/hitachi/hnas/ssh.py0000664000175000017500000010521100000000000022610 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Hitachi Data Systems, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_concurrency import processutils from oslo_log import log from oslo_utils import strutils from oslo_utils import units import paramiko import os import time from manila import exception from manila.i18n import _ from manila import ssh_utils from manila import utils as mutils LOG = log.getLogger(__name__) class HNASSSHBackend(object): def __init__(self, hnas_ip, hnas_username, hnas_password, ssh_private_key, cluster_admin_ip0, evs_id, evs_ip, fs_name, job_timeout): self.ip = hnas_ip self.port = 22 self.user = hnas_username self.password = hnas_password self.priv_key = ssh_private_key self.admin_ip0 = cluster_admin_ip0 self.evs_id = str(evs_id) self.fs_name = fs_name self.evs_ip = evs_ip self.sshpool = None self.job_timeout = job_timeout LOG.debug("Hitachi HNAS Driver using SSH backend.") def get_stats(self): """Get the stats from file-system. :returns: fs_capacity.size = Total size from filesystem. available_space = Free space currently on filesystem. dedupe = True if dedupe is enabled on filesystem. """ command = ['df', '-a', '-f', self.fs_name] try: output, err = self._execute(command) except processutils.ProcessExecutionError: msg = _("Could not get HNAS backend stats.") LOG.exception(msg) raise exception.HNASBackendException(msg=msg) line = output.split('\n') fs = Filesystem(line[3]) available_space = fs.size - fs.used return fs.size, available_space, fs.dedupe def nfs_export_add(self, share_id, snapshot_id=None): if snapshot_id is not None: path = os.path.join('/snapshots', share_id, snapshot_id) name = os.path.join('/snapshots', snapshot_id) else: path = name = os.path.join('/shares', share_id) command = ['nfs-export', 'add', '-S', 'disable', '-c', '127.0.0.1', name, self.fs_name, path] try: self._execute(command) except processutils.ProcessExecutionError: msg = _("Could not create NFS export %s.") % name LOG.exception(msg) raise exception.HNASBackendException(msg=msg) def nfs_export_del(self, share_id=None, snapshot_id=None): if share_id is not None: name = os.path.join('/shares', share_id) elif snapshot_id is not None: name = os.path.join('/snapshots', snapshot_id) else: msg = _("NFS export not specified to delete.") raise exception.HNASBackendException(msg=msg) command = ['nfs-export', 'del', name] try: self._execute(command) except processutils.ProcessExecutionError as e: if 'does not exist' in e.stderr: LOG.warning("Export %s does not exist on " "backend anymore.", name) else: msg = _("Could not delete NFS export %s.") % name LOG.exception(msg) raise exception.HNASBackendException(msg=msg) def cifs_share_add(self, share_id, snapshot_id=None): if snapshot_id is not None: path = r'\\snapshots\\' + share_id + r'\\' + snapshot_id name = snapshot_id else: path = r'\\shares\\' + share_id name = share_id command = ['cifs-share', 'add', '-S', 'disable', '--enable-abe', '--nodefaultsaa', name, self.fs_name, path] try: self._execute(command) except processutils.ProcessExecutionError: msg = _("Could not create CIFS share %s.") % name LOG.exception(msg) raise exception.HNASBackendException(msg=msg) def cifs_share_del(self, name): command = ['cifs-share', 'del', '--target-label', self.fs_name, name] try: self._execute(command) except processutils.ProcessExecutionError as e: if e.exit_code == 1: LOG.warning("CIFS share %s does not exist on " "backend anymore.", name) else: msg = _("Could not delete CIFS share %s.") % name LOG.exception(msg) raise exception.HNASBackendException(msg=msg) def get_nfs_host_list(self, share_id): export = self._get_export(share_id) return export[0].export_configuration def update_nfs_access_rule(self, host_list, share_id=None, snapshot_id=None): if share_id is not None: name = os.path.join('/shares', share_id) elif snapshot_id is not None: name = os.path.join('/snapshots', snapshot_id) else: msg = _("No share/snapshot provided to update NFS rules.") raise exception.HNASBackendException(msg=msg) command = ['nfs-export', 'mod', '-c'] if len(host_list) == 0: command.append('127.0.0.1') else: string_command = '"' + str(host_list[0]) for i in range(1, len(host_list)): string_command += ',' + (str(host_list[i])) string_command += '"' command.append(string_command) command.append(name) try: self._execute(command) except processutils.ProcessExecutionError: msg = _("Could not update access rules for NFS export %s.") % name LOG.exception(msg) raise exception.HNASBackendException(msg=msg) def cifs_allow_access(self, name, user, permission, is_snapshot=False): command = ['cifs-saa', 'add', '--target-label', self.fs_name, name, user, permission] try: self._execute(command) except processutils.ProcessExecutionError as e: if 'already listed as a user' in e.stderr: if is_snapshot: LOG.debug('User %(user)s already allowed to access ' 'snapshot %(snapshot)s.', { 'user': user, 'snapshot': name, }) else: self._update_cifs_rule(name, user, permission) else: entity_type = "share" if is_snapshot: entity_type = "snapshot" msg = _("Could not add access of user %(user)s to " "%(entity_type)s %(name)s.") % { 'user': user, 'name': name, 'entity_type': entity_type, } LOG.exception(msg) raise exception.HNASBackendException(msg=msg) def _update_cifs_rule(self, name, user, permission): LOG.debug('User %(user)s already allowed to access ' 'share %(share)s. Updating access level...', { 'user': user, 'share': name, }) command = ['cifs-saa', 'change', '--target-label', self.fs_name, name, user, permission] try: self._execute(command) except processutils.ProcessExecutionError: msg = _("Could not update access of user %(user)s to " "share %(share)s.") % { 'user': user, 'share': name, } LOG.exception(msg) raise exception.HNASBackendException(msg=msg) def cifs_deny_access(self, name, user, is_snapshot=False): command = ['cifs-saa', 'delete', '--target-label', self.fs_name, name, user] entity_type = "share" if is_snapshot: entity_type = "snapshot" try: self._execute(command) except processutils.ProcessExecutionError as e: if ('not listed as a user' in e.stderr or 'Could not delete user/group' in e.stderr): LOG.warning('User %(user)s already not allowed to access ' '%(entity_type)s %(name)s.', { 'entity_type': entity_type, 'user': user, 'name': name }) else: msg = _("Could not delete access of user %(user)s to " "%(entity_type)s %(name)s.") % { 'user': user, 'name': name, 'entity_type': entity_type, } LOG.exception(msg) raise exception.HNASBackendException(msg=msg) def list_cifs_permissions(self, hnas_share_id): command = ['cifs-saa', 'list', '--target-label', self.fs_name, hnas_share_id] try: output, err = self._execute(command) except processutils.ProcessExecutionError as e: if 'No entries for this share' in e.stderr: LOG.debug('Share %(share)s does not have any permission ' 'added.', {'share': hnas_share_id}) return [] else: msg = _("Could not list access of share %s.") % hnas_share_id LOG.exception(msg) raise exception.HNASBackendException(msg=msg) permissions = CIFSPermissions(output) return permissions.permission_list def tree_clone(self, src_path, dest_path): command = ['tree-clone-job-submit', '-e', '-f', self.fs_name, src_path, dest_path] try: output, err = self._execute(command) except processutils.ProcessExecutionError as e: if ('Cannot find any clonable files in the source directory' in e.stderr): msg = _("Source path %s is empty.") % src_path LOG.debug(msg) raise exception.HNASNothingToCloneException(msg=msg) else: msg = _("Could not submit tree clone job to clone from %(src)s" " to %(dest)s.") % {'src': src_path, 'dest': dest_path} LOG.exception(msg) raise exception.HNASBackendException(msg=msg) job_submit = JobSubmit(output) if job_submit.request_status == 'Request submitted successfully': job_id = job_submit.job_id job_status = None progress = '' job_rechecks = 0 starttime = time.time() deadline = starttime + self.job_timeout while (not job_status or job_status.job_state != "Job was completed"): command = ['tree-clone-job-status', job_id] output, err = self._execute(command) job_status = JobStatus(output) if job_status.job_state == 'Job failed': break old_progress = progress progress = job_status.data_bytes_processed if old_progress == progress: job_rechecks += 1 now = time.time() if now > deadline: command = ['tree-clone-job-abort', job_id] self._execute(command) LOG.error("Timeout in snapshot creation from " "source path %s.", src_path) msg = _("Share snapshot of source path %s " "was not created.") % src_path raise exception.HNASBackendException(msg=msg) else: time.sleep(job_rechecks ** 2) else: job_rechecks = 0 if (job_status.job_state, job_status.job_status, job_status.directories_missing, job_status.files_missing) == ("Job was completed", "Success", '0', '0'): LOG.debug("Snapshot of source path %(src)s to destination " "path %(dest)s created successfully.", {'src': src_path, 'dest': dest_path}) else: LOG.error('Error creating snapshot of source path %s.', src_path) msg = _('Snapshot of source path %s was not ' 'created.') % src_path raise exception.HNASBackendException(msg=msg) def tree_delete(self, path): command = ['tree-delete-job-submit', '--confirm', '-f', self.fs_name, path] try: self._execute(command) except processutils.ProcessExecutionError as e: if 'Source path: Cannot access' in e.stderr: LOG.warning("Attempted to delete path %s " "but it does not exist.", path) else: msg = _("Could not submit tree delete job to delete path " "%s.") % path LOG.exception(msg) raise exception.HNASBackendException(msg=msg) @mutils.retry(retry_param=exception.HNASSSCContextChange, wait_random=True, retries=5) def create_directory(self, dest_path): self._locked_selectfs('create', dest_path) if not self.check_directory(dest_path): msg = _("Command to create directory %(path)s was run in another " "filesystem instead of %(fs)s.") % { 'path': dest_path, 'fs': self.fs_name, } LOG.warning(msg) raise exception.HNASSSCContextChange(msg=msg) @mutils.retry(retry_param=exception.HNASSSCContextChange, wait_random=True, retries=5) def delete_directory(self, path): try: self._locked_selectfs('delete', path) except exception.HNASDirectoryNotEmpty: pass else: if self.check_directory(path): msg = _("Command to delete empty directory %(path)s was run in" " another filesystem instead of %(fs)s.") % { 'path': path, 'fs': self.fs_name, } LOG.debug(msg) raise exception.HNASSSCContextChange(msg=msg) @mutils.retry(retry_param=exception.HNASSSCIsBusy, wait_random=True, retries=5) def check_directory(self, path): command = ['path-to-object-number', '-f', self.fs_name, path] try: self._execute(command) except processutils.ProcessExecutionError as e: if 'path-to-object-number is currently running' in e.stdout: msg = (_("SSC command path-to-object-number for path %s " "is currently busy.") % path) raise exception.HNASSSCIsBusy(msg=msg) if 'Unable to locate component:' in e.stdout: LOG.debug("Cannot find %(path)s: %(out)s", {'path': path, 'out': e.stdout}) return False else: msg = _("Could not check if path %s exists.") % path LOG.exception(msg) raise exception.HNASBackendException(msg=msg) return True def check_fs_mounted(self): command = ['df', '-a', '-f', self.fs_name] output, err = self._execute(command) if "not found" in output: msg = _("Filesystem %s does not exist or it is not available " "in the current EVS context.") % self.fs_name LOG.error(msg) raise exception.HNASItemNotFoundException(msg=msg) else: line = output.split('\n') fs = Filesystem(line[3]) return fs.mounted def mount(self): command = ['mount', self.fs_name] try: self._execute(command) except processutils.ProcessExecutionError as e: if 'file system is already mounted' not in e.stderr: msg = _("Failed to mount filesystem %s.") % self.fs_name LOG.exception(msg) raise exception.HNASBackendException(msg=msg) def vvol_create(self, vvol_name): # create a virtual-volume inside directory path = '/shares/' + vvol_name command = ['virtual-volume', 'add', '--ensure', self.fs_name, vvol_name, path] try: self._execute(command) except processutils.ProcessExecutionError: msg = _("Failed to create vvol %s.") % vvol_name LOG.exception(msg) raise exception.HNASBackendException(msg=msg) def vvol_delete(self, vvol_name): path = '/shares/' + vvol_name # Virtual-volume and quota are deleted together command = ['tree-delete-job-submit', '--confirm', '-f', self.fs_name, path] try: self._execute(command) except processutils.ProcessExecutionError as e: if 'Source path: Cannot access' in e.stderr: LOG.warning("Share %s does not exist.", vvol_name) else: msg = _("Failed to delete vvol %s.") % vvol_name LOG.exception(msg) raise exception.HNASBackendException(msg=msg) def quota_add(self, vvol_name, vvol_quota): str_quota = str(vvol_quota) + 'G' command = ['quota', 'add', '--usage-limit', str_quota, '--usage-hard-limit', 'yes', self.fs_name, vvol_name] try: self._execute(command) except processutils.ProcessExecutionError: msg = _("Failed to add %(quota)s quota to vvol " "%(vvol)s.") % {'quota': str_quota, 'vvol': vvol_name} LOG.exception(msg) raise exception.HNASBackendException(msg=msg) def modify_quota(self, vvol_name, new_size): str_quota = str(new_size) + 'G' command = ['quota', 'mod', '--usage-limit', str_quota, self.fs_name, vvol_name] try: self._execute(command) except processutils.ProcessExecutionError: msg = _("Failed to update quota of vvol %(vvol)s to " "%(quota)s.") % {'quota': str_quota, 'vvol': vvol_name} LOG.exception(msg) raise exception.HNASBackendException(msg=msg) def check_vvol(self, vvol_name): command = ['virtual-volume', 'list', '--verbose', self.fs_name, vvol_name] try: self._execute(command) except processutils.ProcessExecutionError: msg = _("Virtual volume %s does not exist.") % vvol_name LOG.exception(msg) raise exception.HNASItemNotFoundException(msg=msg) def check_quota(self, vvol_name): command = ['quota', 'list', '--verbose', self.fs_name, vvol_name] try: output, err = self._execute(command) except processutils.ProcessExecutionError: msg = _("Could not check quota of vvol %s.") % vvol_name LOG.exception(msg) raise exception.HNASBackendException(msg=msg) if 'No quotas matching specified filter criteria' in output: msg = _("Virtual volume %s does not have any" " quota.") % vvol_name LOG.error(msg) raise exception.HNASItemNotFoundException(msg=msg) def check_export(self, vvol_name, is_snapshot=False): export = self._get_export(vvol_name, is_snapshot=is_snapshot) if (vvol_name in export[0].export_name and self.fs_name in export[0].file_system_label): return else: msg = _("Export %s does not exist.") % export[0].export_name LOG.error(msg) raise exception.HNASItemNotFoundException(msg=msg) def check_cifs(self, vvol_name): output = self._cifs_list(vvol_name) cifs_share = CIFSShare(output) if self.fs_name != cifs_share.fs: msg = _("CIFS share %(share)s is not located in " "configured filesystem " "%(fs)s.") % {'share': vvol_name, 'fs': self.fs_name} LOG.error(msg) raise exception.HNASItemNotFoundException(msg=msg) def is_cifs_in_use(self, vvol_name): output = self._cifs_list(vvol_name) cifs_share = CIFSShare(output) return cifs_share.is_mounted def _cifs_list(self, vvol_name): command = ['cifs-share', 'list', vvol_name] try: output, err = self._execute(command) except processutils.ProcessExecutionError as e: if 'does not exist' in e.stderr: msg = _("CIFS share %(share)s was not found in EVS " "%(evs_id)s") % {'share': vvol_name, 'evs_id': self.evs_id} LOG.exception(msg) raise exception.HNASItemNotFoundException(msg=msg) else: msg = _("Could not list CIFS shares by vvol name " "%s.") % vvol_name LOG.exception(msg) raise exception.HNASBackendException(msg=msg) return output def get_share_quota(self, share_id): command = ['quota', 'list', self.fs_name, share_id] output, err = self._execute(command) quota = Quota(output) if quota.limit is None: return None if quota.limit_unit == 'TB': return quota.limit * units.Ki elif quota.limit_unit == 'GB': return quota.limit else: msg = _("Share %s does not support quota values " "below 1G.") % share_id LOG.error(msg) raise exception.HNASBackendException(msg=msg) def get_share_usage(self, share_id): command = ['quota', 'list', self.fs_name, share_id] output, err = self._execute(command) quota = Quota(output) if quota.usage is None: msg = _("Virtual volume %s does not have any quota.") % share_id LOG.error(msg) raise exception.HNASItemNotFoundException(msg=msg) else: bytes_usage = strutils.string_to_bytes(str(quota.usage) + quota.usage_unit) return bytes_usage / units.Gi def _get_export(self, name, is_snapshot=False): if is_snapshot: name = '/snapshots/' + name else: name = '/shares/' + name command = ['nfs-export', 'list ', name] export_list = [] try: output, err = self._execute(command) except processutils.ProcessExecutionError as e: if 'does not exist' in e.stderr: msg = _("Export %(name)s was not found in EVS " "%(evs_id)s.") % { 'name': name, 'evs_id': self.evs_id, } LOG.exception(msg) raise exception.HNASItemNotFoundException(msg=msg) else: msg = _("Could not list NFS exports by name %s.") % name LOG.exception(msg) raise exception.HNASBackendException(msg=msg) items = output.split('Export name') if items[0][0] == '\n': items.pop(0) for i in range(0, len(items)): export_list.append(Export(items[i])) return export_list @mutils.retry(retry_param=exception.HNASConnException, wait_random=True) def _execute(self, commands): command = ['ssc', '127.0.0.1'] if self.admin_ip0 is not None: command = ['ssc', '--smuauth', self.admin_ip0] command += ['console-context', '--evs', self.evs_id] commands = command + commands mutils.check_ssh_injection(commands) commands = ' '.join(commands) if not self.sshpool: self.sshpool = ssh_utils.SSHPool(ip=self.ip, port=self.port, conn_timeout=None, login=self.user, password=self.password, privatekey=self.priv_key) with self.sshpool.item() as ssh: ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) try: out, err = processutils.ssh_execute(ssh, commands, check_exit_code=True) LOG.debug("Command %(cmd)s result: out = %(out)s - err = " "%(err)s.", { 'cmd': commands, 'out': out, 'err': err, }) return out, err except processutils.ProcessExecutionError as e: if 'Failed to establish SSC connection' in e.stderr: msg = _("Failed to establish SSC connection.") LOG.debug(msg) raise exception.HNASConnException(msg=msg) else: LOG.debug("Error running SSH command. " "Command %(cmd)s result: out = %(out)s - err = " "%(err)s - exit = %(exit)s.", { 'cmd': e.cmd, 'out': e.stdout, 'err': e.stderr, 'exit': e.exit_code, }) raise @mutils.synchronized("hitachi_hnas_select_fs", external=True) def _locked_selectfs(self, op, path): if op == 'create': command = ['selectfs', self.fs_name, '\n', 'ssc', '127.0.0.1', 'console-context', '--evs', self.evs_id, 'mkdir', '-p', path] try: self._execute(command) except processutils.ProcessExecutionError as e: if "Current file system invalid: VolumeNotFound" in e.stderr: msg = _("Command to create directory %s failed due to " "context change.") % path LOG.debug(msg) raise exception.HNASSSCContextChange(msg=msg) else: msg = _("Failed to create directory %s.") % path LOG.exception(msg) raise exception.HNASBackendException(msg=msg) if op == 'delete': command = ['selectfs', self.fs_name, '\n', 'ssc', '127.0.0.1', 'console-context', '--evs', self.evs_id, 'rmdir', path] try: self._execute(command) except processutils.ProcessExecutionError as e: if 'DirectoryNotEmpty' in e.stderr: msg = _("Share %s has more snapshots.") % path LOG.debug(msg) raise exception.HNASDirectoryNotEmpty(msg=msg) elif 'cannot remove' in e.stderr and 'NotFound' in e.stderr: LOG.warning("Attempted to delete path %s but it does " "not exist.", path) elif 'Current file system invalid: VolumeNotFound' in e.stderr: msg = _("Command to delete empty directory %s failed due " "to context change.") % path LOG.debug(msg) raise exception.HNASSSCContextChange(msg=msg) else: msg = _("Failed to delete directory %s.") % path LOG.exception(msg) raise exception.HNASBackendException(msg=msg) class Export(object): def __init__(self, data): if data: split_data = data.split('Export configuration:\n') items = split_data[0].split('\n') self.export_name = items[0].split(':')[1].strip() self.export_path = items[1].split(':')[1].strip() if '*** not available ***' in items[2]: self.file_system_info = items[2].split(':')[1].strip() index = 0 else: self.file_system_label = items[2].split(':')[1].strip() self.file_system_size = items[3].split(':')[1].strip() self.file_system_free_space = items[4].split(':')[1].strip() self.file_system_state = items[5].split(':')[1] self.formatted = items[6].split('=')[1].strip() self.mounted = items[7].split('=')[1].strip() self.failed = items[8].split('=')[1].strip() self.thin_provisioned = items[9].split('=')[1].strip() index = 7 self.access_snapshots = items[3 + index].split(':')[1].strip() self.display_snapshots = items[4 + index].split(':')[1].strip() self.read_caching = items[5 + index].split(':')[1].strip() self.disaster_recovery_setting = items[6 + index].split(':')[1] self.recovered = items[7 + index].split('=')[1].strip() self.transfer_setting = items[8 + index].split('=')[1].strip() self.export_configuration = [] export_config = split_data[1].split('\n') for i in range(0, len(export_config)): if any(j.isdigit() or j.isalpha() for j in export_config[i]): self.export_configuration.append(export_config[i]) class JobStatus(object): def __init__(self, data): if data: lines = data.split("\n") self.job_id = lines[0].split()[3] self.physical_node = lines[2].split()[3] self.evs = lines[3].split()[2] self.volume_number = lines[4].split()[3] self.fs_id = lines[5].split()[4] self.fs_name = lines[6].split()[4] self.source_path = lines[7].split()[3] self.creation_time = " ".join(lines[8].split()[3:5]) self.destination_path = lines[9].split()[3] self.ensure_path_exists = lines[10].split()[5] self.job_state = " ".join(lines[12].split()[3:]) self.job_started = " ".join(lines[14].split()[2:4]) self.job_ended = " ".join(lines[15].split()[2:4]) self.job_status = lines[16].split()[2] error_details_line = lines[17].split() if len(error_details_line) > 3: self.error_details = " ".join(error_details_line[3:]) else: self.error_details = None self.directories_processed = lines[18].split()[3] self.files_processed = lines[19].split()[3] self.data_bytes_processed = lines[20].split()[4] self.directories_missing = lines[21].split()[4] self.files_missing = lines[22].split()[4] self.files_skipped = lines[23].split()[4] skipping_details_line = lines[24].split() if len(skipping_details_line) > 3: self.skipping_details = " ".join(skipping_details_line[3:]) else: self.skipping_details = None class JobSubmit(object): def __init__(self, data): if data: split_data = data.replace(".", "").split() self.request_status = " ".join(split_data[1:4]) self.job_id = split_data[8] class Filesystem(object): def __init__(self, data): if data: items = data.split() self.id = items[0] self.label = items[1] self.evs = items[2] self.size = float(items[3]) self.size_measure = items[4] if self.size_measure == 'TB': self.size = self.size * units.Ki if items[5:7] == ["Not", "mounted"]: self.mounted = False else: self.mounted = True self.used = float(items[5]) self.used_measure = items[6] if self.used_measure == 'TB': self.used = self.used * units.Ki self.dedupe = 'dedupe enabled' in data class Quota(object): def __init__(self, data): if data: if 'No quotas matching' in data: self.type = None self.target = None self.usage = None self.usage_unit = None self.limit = None self.limit_unit = None else: items = data.split() self.type = items[2] self.target = items[6] self.usage = items[9] self.usage_unit = items[10] if items[13] == 'Unset': self.limit = None else: self.limit = float(items[13]) self.limit_unit = items[14] class CIFSPermissions(object): def __init__(self, data): self.permission_list = [] hnas_cifs_permissions = [('Allow Read', 'ar'), ('Allow Change & Read', 'acr'), ('Allow Full Control', 'af'), ('Deny Read', 'dr'), ('Deny Change & Read', 'dcr'), ('Deny Full Control', 'df')] lines = data.split('\n') for line in lines: filtered = list(filter(lambda x: x[0] in line, hnas_cifs_permissions)) if len(filtered) == 1: token, permission = filtered[0] user = line.split(token)[1:][0].strip() self.permission_list.append((user, permission)) class CIFSShare(object): def __init__(self, data): lines = data.split('\n') for line in lines: if 'File system label' in line: self.fs = line.split(': ')[1] elif 'Share users' in line: users = line.split(': ') self.is_mounted = users[1] != '0' ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315601.9136717 manila-21.0.0/manila/share/drivers/hitachi/hsp/0000775000175000017500000000000000000000000021302 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/hitachi/hsp/__init__.py0000664000175000017500000000000000000000000023401 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/hitachi/hsp/driver.py0000664000175000017500000003515700000000000023162 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Hitachi Data Systems, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_log import log from oslo_utils import excutils from oslo_utils import units from manila.common import constants from manila import exception from manila.i18n import _ from manila.share import driver from manila.share.drivers.hitachi.hsp import rest LOG = log.getLogger(__name__) hitachi_hsp_opts = [ cfg.HostAddressOpt('hitachi_hsp_host', required=True, help="HSP management host for communication between " "Manila controller and HSP."), cfg.StrOpt('hitachi_hsp_username', required=True, help="HSP username to perform tasks such as create filesystems" " and shares."), cfg.StrOpt('hitachi_hsp_password', required=True, secret=True, help="HSP password for the username provided."), ] class HitachiHSPDriver(driver.ShareDriver): """Manila HSP Driver implementation. 1.0.0 - Initial Version. """ def __init__(self, *args, **kwargs): super(HitachiHSPDriver, self).__init__( [False], *args, config_opts=[hitachi_hsp_opts], **kwargs) self.private_storage = kwargs.get('private_storage') self.backend_name = self.configuration.safe_get('share_backend_name') self.hsp_host = self.configuration.safe_get('hitachi_hsp_host') self.hsp = rest.HSPRestBackend( self.hsp_host, self.configuration.safe_get('hitachi_hsp_username'), self.configuration.safe_get('hitachi_hsp_password') ) def _update_share_stats(self, data=None): LOG.debug("Updating Backend Capability Information - Hitachi HSP.") reserved = self.configuration.safe_get('reserved_share_percentage') reserved_snapshot = (self.configuration.safe_get( 'reserved_share_from_snapshot_percentage') or self.configuration.safe_get('reserved_share_percentage')) reserved_share_extend = (self.configuration.safe_get( 'reserved_share_extend_percentage') or self.configuration.safe_get('reserved_share_percentage')) max_over_subscription_ratio = self.configuration.safe_get( 'max_over_subscription_ratio') hsp_cluster = self.hsp.get_cluster() total_space = hsp_cluster['properties']['total-storage-capacity'] free_space = hsp_cluster['properties']['total-storage-available'] data = { 'share_backend_name': self.backend_name, 'vendor_name': 'Hitachi', 'driver_version': '1.0.0', 'storage_protocol': 'NFS', 'pools': [{ 'reserved_percentage': reserved, 'reserved_snapshot_percentage': reserved_snapshot, 'reserved_share_extend_percentage': reserved_share_extend, 'pool_name': 'HSP', 'thin_provisioning': True, 'total_capacity_gb': total_space / units.Gi, 'free_capacity_gb': free_space / units.Gi, 'max_over_subscription_ratio': max_over_subscription_ratio, 'qos': False, 'dedupe': False, 'compression': False, }], } LOG.info("Hitachi HSP Capabilities: %(data)s.", {'data': data}) super(HitachiHSPDriver, self)._update_share_stats(data) def create_share(self, context, share, share_server=None): LOG.debug("Creating share in HSP: %(shr)s", {'shr': share['id']}) if share['share_proto'].lower() != 'nfs': msg = _("Only NFS protocol is currently supported.") raise exception.InvalidShare(reason=msg) self.hsp.add_file_system(share['id'], share['size'] * units.Gi) filesystem_id = self.hsp.get_file_system(share['id'])['id'] try: self.hsp.add_share(share['id'], filesystem_id) except exception.HSPBackendException: with excutils.save_and_reraise_exception(): self.hsp.delete_file_system(filesystem_id) msg = ("Could not create share %s on HSP.") LOG.exception(msg, share['id']) uri = self.hsp_host + ':/' + share['id'] LOG.debug("Share created successfully on path: %(uri)s.", {'uri': uri}) return [{ "path": uri, "metadata": {}, "is_admin_only": False, }] def delete_share(self, context, share, share_server=None): LOG.debug("Deleting share in HSP: %(shr)s.", {'shr': share['id']}) filesystem_id = hsp_share_id = None try: filesystem_id = self.hsp.get_file_system(share['id'])['id'] hsp_share_id = self.hsp.get_share(filesystem_id)['id'] except exception.HSPItemNotFoundException: LOG.info("Share %(shr)s already removed from backend.", {'shr': share['id']}) if hsp_share_id: # Clean all rules from share before deleting it current_rules = self.hsp.get_access_rules(hsp_share_id) for rule in current_rules: try: self.hsp.delete_access_rule(hsp_share_id, rule['name']) except exception.HSPBackendException as e: if 'No matching access rule found.' in e.msg: LOG.debug("Rule %(rule)s already deleted in " "backend.", {'rule': rule['name']}) else: raise self.hsp.delete_share(hsp_share_id) if filesystem_id: self.hsp.delete_file_system(filesystem_id) LOG.debug("Export and share successfully deleted: %(shr)s.", {'shr': share['id']}) def update_access(self, context, share, access_rules, add_rules, delete_rules, update_rules, share_server=None): LOG.debug("Updating access rules for share: %(shr)s.", {'shr': share['id']}) try: filesystem_id = self.hsp.get_file_system(share['id'])['id'] hsp_share_id = self.hsp.get_share(filesystem_id)['id'] except exception.HSPItemNotFoundException: raise exception.ShareResourceNotFound(share_id=share['id']) if not (add_rules or delete_rules): # Recovery mode current_rules = self.hsp.get_access_rules(hsp_share_id) # Indexing the rules for faster searching hsp_rules_dict = { rule['host-specification']: rule['read-write'] for rule in current_rules } manila_rules_dict = {} for rule in access_rules: if rule['access_type'].lower() != 'ip': msg = _("Only IP access type currently supported.") raise exception.InvalidShareAccess(reason=msg) access_to = rule['access_to'] is_rw = rule['access_level'] == constants.ACCESS_LEVEL_RW manila_rules_dict[access_to] = is_rw # Remove the rules that exist on HSP but not on manila remove_rules = self._get_complement(hsp_rules_dict, manila_rules_dict) # Add the rules that exist on manila but not on HSP add_rules = self._get_complement(manila_rules_dict, hsp_rules_dict) for rule in remove_rules: rule_name = self._get_hsp_rule_name(hsp_share_id, rule[0]) self.hsp.delete_access_rule(hsp_share_id, rule_name) for rule in add_rules: self.hsp.add_access_rule(hsp_share_id, rule[0], rule[1]) else: for rule in delete_rules: if rule['access_type'].lower() != 'ip': continue # get the real rule name in HSP rule_name = self._get_hsp_rule_name(hsp_share_id, rule['access_to']) try: self.hsp.delete_access_rule(hsp_share_id, rule_name) except exception.HSPBackendException as e: if 'No matching access rule found.' in e.msg: LOG.debug("Rule %(rule)s already deleted in " "backend.", {'rule': rule['access_to']}) else: raise for rule in add_rules: if rule['access_type'].lower() != 'ip': msg = _("Only IP access type currently supported.") raise exception.InvalidShareAccess(reason=msg) try: self.hsp.add_access_rule( hsp_share_id, rule['access_to'], (rule['access_level'] == constants.ACCESS_LEVEL_RW)) except exception.HSPBackendException as e: if 'Duplicate NFS access rule exists' in e.msg: LOG.debug("Rule %(rule)s already exists in " "backend.", {'rule': rule['access_to']}) else: raise LOG.debug("Successfully updated share %(shr)s rules.", {'shr': share['id']}) def _get_hsp_rule_name(self, share_id, host_to): rule_name = share_id + host_to all_rules = self.hsp.get_access_rules(share_id) for rule in all_rules: # check if this rule has other name in HSP if rule['host-specification'] == host_to: rule_name = rule['name'] break return rule_name def _get_complement(self, rules_a, rules_b): """Returns the rules of list A that are not on list B""" complement = [] for rule, is_rw in rules_a.items(): if rule not in rules_b or rules_b[rule] != is_rw: complement.append((rule, is_rw)) return complement def extend_share(self, share, new_size, share_server=None): LOG.debug("Extending share in HSP: %(shr_id)s.", {'shr_id': share['id']}) old_size = share['size'] hsp_cluster = self.hsp.get_cluster() free_space = hsp_cluster['properties']['total-storage-available'] free_space = free_space / units.Gi if (new_size - old_size) < free_space: filesystem_id = self.hsp.get_file_system(share['id'])['id'] self.hsp.resize_file_system(filesystem_id, new_size * units.Gi) else: msg = (_("Share %s cannot be extended due to insufficient space.") % share['id']) raise exception.HSPBackendException(msg=msg) LOG.info("Share %(shr_id)s successfully extended to " "%(shr_size)sG.", {'shr_id': share['id'], 'shr_size': new_size}) def shrink_share(self, share, new_size, share_server=None): LOG.debug("Shrinking share in HSP: %(shr_id)s.", {'shr_id': share['id']}) file_system = self.hsp.get_file_system(share['id']) usage = file_system['properties']['used-capacity'] / units.Gi LOG.debug("Usage for share %(shr_id)s in HSP: %(usage)sG.", {'shr_id': share['id'], 'usage': usage}) if new_size > usage: self.hsp.resize_file_system(file_system['id'], new_size * units.Gi) else: raise exception.ShareShrinkingPossibleDataLoss( share_id=share['id']) LOG.info("Share %(shr_id)s successfully shrunk to " "%(shr_size)sG.", {'shr_id': share['id'], 'shr_size': new_size}) def manage_existing(self, share, driver_options): LOG.debug("Managing share in HSP: %(shr_id)s.", {'shr_id': share['id']}) ip, share_name = share['export_locations'][0]['path'].split(':') try: hsp_share = self.hsp.get_share(name=share_name.strip('/')) except exception.HSPItemNotFoundException: msg = _("The share %s trying to be managed was not found on " "backend.") % share['id'] raise exception.ManageInvalidShare(reason=msg) self.hsp.rename_file_system(hsp_share['properties']['file-system-id'], share['id']) original_name = hsp_share['properties']['file-system-name'] private_storage_content = { 'old_name': original_name, 'new_name': share['id'], } self.private_storage.update(share['id'], private_storage_content) LOG.debug("Filesystem %(original_name)s was renamed to %(name)s.", {'original_name': original_name, 'name': share['id']}) file_system = self.hsp.get_file_system(share['id']) LOG.info("Share %(shr_path)s was successfully managed with ID " "%(shr_id)s.", {'shr_path': share['export_locations'][0]['path'], 'shr_id': share['id']}) export_locations = [{ "path": share['export_locations'][0]['path'], "metadata": {}, "is_admin_only": False, }] return {'size': file_system['properties']['quota'] / units.Gi, 'export_locations': export_locations} def unmanage(self, share): original_name = self.private_storage.get(share['id'], 'old_name') LOG.debug("Filesystem %(name)s that was originally named " "%(original_name)s will no longer be managed.", {'original_name': original_name, 'name': share['id']}) self.private_storage.delete(share['id']) LOG.info("The share with current path %(shr_path)s and ID " "%(shr_id)s is no longer being managed.", {'shr_path': share['export_locations'][0]['path'], 'shr_id': share['id']}) def get_default_filter_function(self): return "share.size >= 128" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/hitachi/hsp/rest.py0000664000175000017500000001660200000000000022636 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Hitachi Data Systems, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import requests from manila import exception from manila.i18n import _ from manila import utils # Suppress the Insecure request warnings requests.packages.urllib3.disable_warnings() # pylint: disable=no-member class HSPRestBackend(object): def __init__(self, hsp_host, hsp_username, hsp_password): self.host = hsp_host self.username = hsp_username self.password = hsp_password def _send_post(self, url, payload=None): resp = requests.post(url, auth=(self.username, self.password), data=payload, verify=False) if resp.status_code == 202: self._wait_job_status(resp.headers['location'], 'COMPLETE') else: msg = (_("HSP API post failed: %s.") % resp.json()['messages'][0]['message']) raise exception.HSPBackendException(msg=msg) def _send_get(self, url, payload=None): resp = requests.get(url, auth=(self.username, self.password), data=payload, verify=False) if resp.status_code == 200: if resp.content == 'null': return None else: return resp.json() else: msg = (_("HSP API get failed: %s.") % resp.json()['messages'][0]['message']) raise exception.HSPBackendException(msg=msg) def _send_delete(self, url, payload=None): resp = requests.delete(url, auth=(self.username, self.password), data=payload, verify=False) if resp.status_code == 202: self._wait_job_status(resp.headers['location'], 'COMPLETE') else: msg = (_("HSP API delete failed: %s.") % resp.json()['messages'][0]['message']) raise exception.HSPBackendException(msg=msg) def add_file_system(self, name, quota): url = "https://%s/hspapi/file-systems/" % self.host payload = { 'quota': quota, 'auto-access': False, 'enabled': True, 'description': '', 'record-access-time': True, 'tags': '', # Usage percentage in which a warning will be shown 'space-hwm': 90, # Usage percentage in which the warning will be cleared 'space-lwm': 70, 'name': name, } self._send_post(url, payload=json.dumps(payload)) def get_file_system(self, name): url = ("https://%s/hspapi/file-systems/list?name=%s" % (self.host, name)) filesystems = self._send_get(url) try: return filesystems['list'][0] except (TypeError, KeyError, IndexError): msg = _("Filesystem does not exist or is not available.") raise exception.HSPItemNotFoundException(msg=msg) def delete_file_system(self, filesystem_id): url = "https://%s/hspapi/file-systems/%s" % (self.host, filesystem_id) self._send_delete(url) def resize_file_system(self, filesystem_id, new_size): url = "https://%s/hspapi/file-systems/%s" % (self.host, filesystem_id) payload = {'quota': new_size} self._send_post(url, payload=json.dumps(payload)) def rename_file_system(self, filesystem_id, new_name): url = "https://%s/hspapi/file-systems/%s" % (self.host, filesystem_id) payload = {'name': new_name} self._send_post(url, payload=json.dumps(payload)) def add_share(self, name, filesystem_id): url = "https://%s/hspapi/shares/" % self.host payload = { 'description': '', 'type': 'NFS', 'enabled': True, 'tags': '', 'name': name, 'file-system-id': filesystem_id, } self._send_post(url, payload=json.dumps(payload)) def get_share(self, fs_id=None, name=None): if fs_id is not None: url = ('https://%s/hspapi/shares/list?file-system-id=%s' % (self.host, fs_id)) elif name is not None: url = ('https://%s/hspapi/shares/list?name=%s' % (self.host, name)) share = self._send_get(url) try: return share['list'][0] except (TypeError, KeyError, IndexError): msg = _("Share %s does not exist or is not available.") if fs_id is not None: args = "for filesystem %s" % fs_id else: args = name raise exception.HSPItemNotFoundException(msg=msg % args) def delete_share(self, share_id): url = "https://%s/hspapi/shares/%s" % (self.host, share_id) self._send_delete(url) def add_access_rule(self, share_id, host_to, read_write): url = "https://%s/hspapi/shares/%s/" % (self.host, share_id) payload = { "action": "add-access-rule", "name": share_id + host_to, "host-specification": host_to, "read-write": read_write, } self._send_post(url, payload=json.dumps(payload)) def delete_access_rule(self, share_id, rule_name): url = "https://%s/hspapi/shares/%s/" % (self.host, share_id) payload = { "action": "delete-access-rule", "name": rule_name, } self._send_post(url, payload=json.dumps(payload)) def get_access_rules(self, share_id): url = ("https://%s/hspapi/shares/%s/access-rules" % (self.host, share_id)) rules = self._send_get(url) try: rules = rules['list'] except (TypeError, KeyError, IndexError): rules = [] return rules def get_cluster(self): url = "https://%s/hspapi/clusters/list" % self.host clusters = self._send_get(url) try: return clusters['list'][0] except (TypeError, KeyError, IndexError): msg = _("No cluster was found on HSP.") raise exception.HSPBackendException(msg=msg) @utils.retry(retry_param=exception.HSPTimeoutException, retries=10, wait_random=True) def _wait_job_status(self, job_url, target_status): resp_json = self._send_get(job_url) status = resp_json['properties']['completion-status'] if status == 'ERROR': msg = _("HSP job %(id)s failed. %(reason)s") job_id = resp_json['id'] reason = resp_json['properties']['completion-details'] raise exception.HSPBackendException(msg=msg % {'id': job_id, 'reason': reason}) elif status != target_status: msg = _("Timeout while waiting for job %s to complete.") args = resp_json['id'] raise exception.HSPTimeoutException(msg=msg % args) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315601.9136717 manila-21.0.0/manila/share/drivers/hpe/0000775000175000017500000000000000000000000017653 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/hpe/__init__.py0000664000175000017500000000000000000000000021752 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/hpe/hpe_3par_driver.py0000664000175000017500000006403600000000000023312 0ustar00zuulzuul00000000000000# Copyright 2015 Hewlett Packard Enterprise Development LP # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """HPE 3PAR Driver for OpenStack Manila.""" import datetime import hashlib import inspect import os import re from oslo_config import cfg from oslo_config import types from oslo_log import log from manila.common import config from manila import exception from manila.i18n import _ from manila.share import driver from manila.share.drivers.hpe import hpe_3par_mediator from manila.share import share_types from manila.share import utils as share_utils from manila import utils LOG = log.getLogger(__name__) class FPG(types.String, types.IPAddress): """FPG type. Used to represent multiple pools per backend values. Converts configuration value to an FPGs value. FPGs value format:: FPG name, IP address 1, IP address 2, ..., IP address 4 where FPG name is a string value, IP address is of type types.IPAddress Optionally doing range checking. If value is whitespace or empty string will raise error :param min_ip: Optional check that number of min IP address of VFS. :param max_ip: Optional check that number of max IP address of VFS. :param type_name: Type name to be used in the sample config file. """ MAX_SUPPORTED_IP_PER_VFS = 4 def __init__(self, min_ip=0, max_ip=MAX_SUPPORTED_IP_PER_VFS, type_name='FPG'): types.String.__init__(self, type_name=type_name) types.IPAddress.__init__(self, type_name=type_name) if max_ip < min_ip: msg = _("Pool's max acceptable IP cannot be less than min.") raise exception.HPE3ParInvalid(err=msg) if min_ip < 0: msg = _("Pools must be configured with zero or more IPs.") raise exception.HPE3ParInvalid(err=msg) if max_ip > FPG.MAX_SUPPORTED_IP_PER_VFS: msg = (_("Pool's max acceptable IP cannot be greater than " "supported value=%s.") % FPG.MAX_SUPPORTED_IP_PER_VFS) raise exception.HPE3ParInvalid(err=msg) self.min_ip = min_ip self.max_ip = max_ip def __call__(self, value): if value is None or value.strip(' ') == '': message = _("Invalid configuration. hpe3par_fpg must be set.") LOG.error(message) raise exception.HPE3ParInvalid(err=message) ips = [] values = value.split(",") # Extract pool name pool_name = values.pop(0).strip() # values will now be ['ip1', ...] if len(values) < self.min_ip: msg = (_("Require at least %s IPs configured per " "pool") % self.min_ip) raise exception.HPE3ParInvalid(err=msg) if len(values) > self.max_ip: msg = (_("Cannot configure IPs more than max supported " "%s IPs per pool") % self.max_ip) raise exception.HPE3ParInvalid(err=msg) for ip_addr in values: ip_addr = types.String.__call__(self, ip_addr.strip()) try: ips.append(types.IPAddress.__call__(self, ip_addr)) except ValueError as verror: raise exception.HPE3ParInvalid(err=verror) fpg = {pool_name: ips} return fpg def __repr__(self): return 'FPG' def _formatter(self, value): return str(value) HPE3PAR_OPTS = [ cfg.StrOpt('hpe3par_api_url', default='', help="3PAR WSAPI Server Url like " "https://<3par ip>:8080/api/v1"), cfg.StrOpt('hpe3par_username', default='', help="3PAR username with the 'edit' role"), cfg.StrOpt('hpe3par_password', default='', help="3PAR password for the user specified in hpe3par_username", secret=True), cfg.HostAddressOpt('hpe3par_san_ip', help="IP address of SAN controller"), cfg.StrOpt('hpe3par_san_login', default='', help="Username for SAN controller"), cfg.StrOpt('hpe3par_san_password', default='', help="Password for SAN controller", secret=True), cfg.PortOpt('hpe3par_san_ssh_port', default=22, help='SSH port to use with SAN'), cfg.MultiOpt('hpe3par_fpg', item_type=FPG(min_ip=0, max_ip=FPG.MAX_SUPPORTED_IP_PER_VFS), help="The File Provisioning Group (FPG) to use"), cfg.BoolOpt('hpe3par_fstore_per_share', default=False, help="Use one filestore per share"), cfg.BoolOpt('hpe3par_require_cifs_ip', default=False, help="Require IP access rules for CIFS (in addition to user)"), cfg.BoolOpt('hpe3par_debug', default=False, help="Enable HTTP debugging to 3PAR"), cfg.StrOpt('hpe3par_cifs_admin_access_username', default='', help="File system admin user name for CIFS."), cfg.StrOpt('hpe3par_cifs_admin_access_password', default='', help="File system admin password for CIFS.", secret=True), cfg.StrOpt('hpe3par_cifs_admin_access_domain', default='LOCAL_CLUSTER', help="File system domain for the CIFS admin user."), cfg.StrOpt('hpe3par_share_mount_path', default='/mnt/', help="The path where shares will be mounted when deleting " "nested file trees."), ] CONF = cfg.CONF CONF.register_opts(HPE3PAR_OPTS) def to_list(var): """Convert var to list type if not""" if isinstance(var, str): return [var] else: return var class HPE3ParShareDriver(driver.ShareDriver): """HPE 3PAR driver for Manila. Supports NFS and CIFS protocols on arrays with File Persona. Version history:: 1.0.0 - Begin Liberty development (post-Kilo) 1.0.1 - Report thin/dedup/hp_flash_cache capabilities 1.0.2 - Add share server/share network support 2.0.0 - Rebranded HP to HPE 2.0.1 - Add access_level (e.g. read-only support) 2.0.2 - Add extend/shrink 2.0.3 - Remove file tree on delete when using nested shares #1538800 2.0.4 - Reduce the fsquota by share size when a share is deleted #1582931 2.0.5 - Add update_access support 2.0.6 - Multi pool support per backend 2.0.7 - Fix get_vfs() to correctly validate conf IP addresses at boot up #1621016 2.0.8 - Replace ConsistencyGroup with ShareGroup """ VERSION = "2.0.8" def __init__(self, *args, **kwargs): super(HPE3ParShareDriver, self).__init__((True, False), *args, **kwargs) self.configuration = kwargs.get('configuration', None) self.configuration.append_config_values(HPE3PAR_OPTS) self.configuration.append_config_values(driver.ssh_opts) self.configuration.append_config_values(config.global_opts) self.fpgs = {} self._hpe3par = None # mediator between driver and client def do_setup(self, context): """Any initialization the share driver does while starting.""" LOG.info("Starting share driver %(driver_name)s (%(version)s)", {'driver_name': self.__class__.__name__, 'version': self.VERSION}) mediator = hpe_3par_mediator.HPE3ParMediator( hpe3par_username=self.configuration.hpe3par_username, hpe3par_password=self.configuration.hpe3par_password, hpe3par_api_url=self.configuration.hpe3par_api_url, hpe3par_debug=self.configuration.hpe3par_debug, hpe3par_san_ip=self.configuration.hpe3par_san_ip, hpe3par_san_login=self.configuration.hpe3par_san_login, hpe3par_san_password=self.configuration.hpe3par_san_password, hpe3par_san_ssh_port=self.configuration.hpe3par_san_ssh_port, hpe3par_fstore_per_share=(self.configuration .hpe3par_fstore_per_share), hpe3par_require_cifs_ip=self.configuration.hpe3par_require_cifs_ip, hpe3par_cifs_admin_access_username=( self.configuration.hpe3par_cifs_admin_access_username), hpe3par_cifs_admin_access_password=( self.configuration.hpe3par_cifs_admin_access_password), hpe3par_cifs_admin_access_domain=( self.configuration.hpe3par_cifs_admin_access_domain), hpe3par_share_mount_path=( self.configuration.hpe3par_share_mount_path), my_ip=self.configuration.my_ip, ssh_conn_timeout=self.configuration.ssh_conn_timeout, ) mediator.do_setup() def _validate_pool_ips(addresses, conf_pool_ips): # Pool configured IP addresses should be subset of IP addresses # retured from vfs if not set(conf_pool_ips) <= set(addresses): msg = _("Incorrect configuration. " "Configuration pool IP address did not match with " "IP addresses at 3par array") raise exception.HPE3ParInvalid(err=msg) def _construct_fpg(): # FPG must be configured and must exist. # self.configuration.safe_get('hpe3par_fpg') will have value in # following format: # [ {'pool_name':['ip_addr', 'ip_addr', ...]}, ... ] for fpg in self.configuration.safe_get('hpe3par_fpg'): pool_name = list(fpg)[0] conf_pool_ips = fpg[pool_name] # Validate the FPG and discover the VFS # This also validates the client, connection, firmware, WSAPI, # FPG... vfs_info = mediator.get_vfs(pool_name) if self.driver_handles_share_servers: # Use discovered IP(s) from array self.fpgs[pool_name] = { vfs_info['vfsname']: vfs_info['vfsip']['address']} elif conf_pool_ips == []: # not DHSS and IPs not configured in manila.conf. if not vfs_info['vfsip']['address']: msg = _("Unsupported configuration. " "hpe3par_fpg must have IP address " "or be discoverable at 3PAR") LOG.error(msg) raise exception.HPE3ParInvalid(err=msg) else: # Use discovered pool ips self.fpgs[pool_name] = { vfs_info['vfsname']: vfs_info['vfsip']['address']} else: # not DHSS and IPs configured in manila.conf _validate_pool_ips(vfs_info['vfsip']['address'], conf_pool_ips) self.fpgs[pool_name] = { vfs_info['vfsname']: conf_pool_ips} _construct_fpg() # Don't set _hpe3par until it is ready. Otherwise _update_stats fails. self._hpe3par = mediator def _get_pool_location_from_share_host(self, share_instance_host): # Return pool name, vfs, IPs for a pool from share instance host pool_name = share_utils.extract_host(share_instance_host, level='pool') if not pool_name: message = (_("Pool is not available in the share host %s.") % share_instance_host) raise exception.InvalidHost(reason=message) if pool_name not in self.fpgs: message = (_("Pool location lookup failed. " "Could not find pool %s") % pool_name) raise exception.InvalidHost(reason=message) vfs = list(self.fpgs[pool_name])[0] ips = self.fpgs[pool_name][vfs] return (pool_name, vfs, ips) def _get_pool_location(self, share, share_server=None): # Return pool name, vfs, IPs for a pool from share host field # Use share_server if provided, instead of self.fpgs if share_server is not None: # When DHSS ips = share_server['backend_details'].get('ip') ips = to_list(ips) vfs = share_server['backend_details'].get('vfs') pool_name = share_server['backend_details'].get('fpg') return (pool_name, vfs, ips) else: # When DHSS = false return self._get_pool_location_from_share_host(share['host']) def check_for_setup_error(self): try: # Log the source SHA for support. Only do this with DEBUG. if LOG.isEnabledFor(log.DEBUG): LOG.debug('HPE3ParShareDriver SHA1: %s', self.sha1_hash(HPE3ParShareDriver)) LOG.debug('HPE3ParMediator SHA1: %s', self.sha1_hash(hpe_3par_mediator.HPE3ParMediator)) except Exception as e: # Don't let any exceptions during the SHA1 logging interfere # with startup. This is just debug info to identify the source # code. If it doesn't work, just log a debug message. LOG.debug('Source code SHA1 not logged due to: %s', str(e)) @staticmethod def sha1_hash(clazz): """Get the SHA1 hash for the source of a class.""" source_file = inspect.getsourcefile(clazz) file_size = os.path.getsize(source_file) sha1 = hashlib.sha1(usedforsecurity=False) sha1.update(("blob %u\0" % file_size).encode('utf-8')) with open(source_file, 'rb') as f: sha1.update(f.read()) return sha1.hexdigest() def get_network_allocations_number(self): return 1 def choose_share_server_compatible_with_share(self, context, share_servers, share, snapshot=None, share_group=None, encryption_key_ref=None): """Method that allows driver to choose share server for provided share. If compatible share-server is not found, method should return None. :param context: Current context :param share_servers: list with share-server models :param share: share model :param snapshot: snapshot model :param share_group: ShareGroup model with shares :returns: share-server or None """ # If creating in a share group, raise exception if share_group: msg = _("HPE 3PAR driver does not support share group") raise exception.InvalidRequest(message=msg) pool_name = share_utils.extract_host(share['host'], level='pool') for share_server in share_servers: if share_server['backend_details'].get('fpg') == pool_name: return share_server return None @staticmethod def _validate_network_type(network_type): if network_type not in ('flat', 'vlan', None): reason = _('Invalid network type. %s is not supported by the ' '3PAR driver.') raise exception.NetworkBadConfigurationException( reason=reason % network_type) def _create_share_server(self, network_info, request_host=None): """Is called to create/setup share server""" # Return pool name, vfs, IPs for a pool pool_name, vfs, ips = self._get_pool_location_from_share_host( request_host) ip = network_info['network_allocations'][0]['ip_address'] if ip not in ips: # Besides DHSS, admin could have setup IP to VFS directly on array if len(ips) > (FPG.MAX_SUPPORTED_IP_PER_VFS - 1): message = (_("Pool %s has exceeded 3PAR's " "max supported VFS IP address") % pool_name) LOG.error(message) raise exception.Invalid(message) subnet = utils.cidr_to_netmask(network_info['cidr']) vlantag = network_info['segmentation_id'] self._hpe3par.create_fsip(ip, subnet, vlantag, pool_name, vfs) # Update in global saved config, self.fpgs[pool_name] ips.append(ip) return {'share_server_name': network_info['server_id'], 'share_server_id': network_info['server_id'], 'ip': ip, 'subnet': subnet, 'vlantag': vlantag if vlantag else 0, 'fpg': pool_name, 'vfs': vfs} def _setup_server(self, network_info, metadata=None): # NOTE(felipe_rodrigues): keep legacy network_info support as a dict. network_info = network_info[0] LOG.debug("begin _setup_server with %s", network_info) self._validate_network_type(network_info['network_type']) if metadata is not None and metadata['request_host'] is not None: return self._create_share_server(network_info, metadata['request_host']) def _teardown_server(self, server_details, security_services=None): LOG.debug("begin _teardown_server with %s", server_details) fpg = server_details.get('fpg') vfs = server_details.get('vfs') ip = server_details.get('ip') self._hpe3par.remove_fsip(ip, fpg, vfs) if ip in self.fpgs[fpg][vfs]: self.fpgs[fpg][vfs].remove(ip) @staticmethod def build_share_comment(share): """Create an informational only comment to help admins and testers.""" info = { 'name': share['display_name'], 'host': share['host'], 'now': datetime.datetime.now().strftime('%H%M%S'), } acceptable = re.compile(r'[^a-zA-Z0-9_=:@# \-]+', re.UNICODE) comment = ("OpenStack Manila - host=%(host)s orig_name=%(name)s " "created=%(now)s" % info) return acceptable.sub('_', comment)[:254] # clean and truncate def create_share(self, context, share, share_server=None): """Is called to create share.""" fpg, vfs, ips = self._get_pool_location(share, share_server) protocol = share['share_proto'] extra_specs = share_types.get_extra_specs_from_share(share) path = self._hpe3par.create_share( share['project_id'], share['id'], protocol, extra_specs, fpg, vfs, size=share['size'], comment=self.build_share_comment(share) ) return self._hpe3par.build_export_locations(protocol, ips, path) def create_share_from_snapshot(self, context, share, snapshot, share_server=None, parent_share=None): """Is called to create share from snapshot.""" fpg, vfs, ips = self._get_pool_location(share, share_server) protocol = share['share_proto'] extra_specs = share_types.get_extra_specs_from_share(share) path = self._hpe3par.create_share_from_snapshot( share['id'], protocol, extra_specs, share['project_id'], snapshot['share_id'], snapshot['id'], fpg, vfs, ips, size=share['size'], comment=self.build_share_comment(share) ) return self._hpe3par.build_export_locations(protocol, ips, path) def delete_share(self, context, share, share_server=None): """Deletes share and its fstore.""" fpg, vfs, ips = self._get_pool_location(share, share_server) self._hpe3par.delete_share(share['project_id'], share['id'], share['size'], share['share_proto'], fpg, vfs, ips[0]) def create_snapshot(self, context, snapshot, share_server=None): """Creates a snapshot of a share.""" fpg, vfs, ips = self._get_pool_location(snapshot['share'], share_server) self._hpe3par.create_snapshot(snapshot['share']['project_id'], snapshot['share']['id'], snapshot['share']['share_proto'], snapshot['id'], fpg, vfs) def delete_snapshot(self, context, snapshot, share_server=None): """Deletes a snapshot of a share.""" fpg, vfs, ips = self._get_pool_location(snapshot['share'], share_server) self._hpe3par.delete_snapshot(snapshot['share']['project_id'], snapshot['share']['id'], snapshot['share']['share_proto'], snapshot['id'], fpg, vfs) def ensure_share(self, context, share, share_server=None): pass def update_access(self, context, share, access_rules, add_rules, delete_rules, update_rules, share_server=None): """Update access to the share.""" extra_specs = None if 'NFS' == share['share_proto']: # Avoiding DB call otherwise extra_specs = share_types.get_extra_specs_from_share(share) fpg, vfs, ips = self._get_pool_location(share, share_server) self._hpe3par.update_access(share['project_id'], share['id'], share['share_proto'], extra_specs, access_rules, add_rules, delete_rules, fpg, vfs) def extend_share(self, share, new_size, share_server=None): """Extends size of existing share.""" fpg, vfs, ips = self._get_pool_location(share, share_server) self._hpe3par.resize_share(share['project_id'], share['id'], share['share_proto'], new_size, share['size'], fpg, vfs) def shrink_share(self, share, new_size, share_server=None): """Shrinks size of existing share.""" fpg, vfs, ips = self._get_pool_location(share, share_server) self._hpe3par.resize_share(share['project_id'], share['id'], share['share_proto'], new_size, share['size'], fpg, vfs) def _update_share_stats(self): """Retrieve stats info from share group.""" backend_name = self.configuration.safe_get( 'share_backend_name') or "HPE_3PAR" max_over_subscription_ratio = self.configuration.safe_get( 'max_over_subscription_ratio') reserved_share_percentage = self.configuration.safe_get( 'reserved_share_percentage') if reserved_share_percentage is None: reserved_share_percentage = 0 reserved_share_from_snapshot_percentage = self.configuration.safe_get( 'reserved_share_from_snapshot_percentage') if reserved_share_from_snapshot_percentage is None: reserved_share_from_snapshot_percentage = reserved_share_percentage reserved_share_extend_percentage = self.configuration.safe_get( 'reserved_share_extend_percentage') if reserved_share_extend_percentage is None: reserved_share_extend_percentage = reserved_share_percentage stats = { 'share_backend_name': backend_name, 'driver_handles_share_servers': self.driver_handles_share_servers, 'vendor_name': 'HPE', 'driver_version': self.VERSION, 'storage_protocol': 'NFS_CIFS', 'total_capacity_gb': 0, 'free_capacity_gb': 0, 'provisioned_capacity_gb': 0, 'reserved_percentage': reserved_share_percentage, 'reserved_snapshot_percentage': reserved_share_from_snapshot_percentage, 'reserved_share_extend_percentage': reserved_share_extend_percentage, 'max_over_subscription_ratio': max_over_subscription_ratio, 'qos': False, 'thin_provisioning': True, # 3PAR default is thin } if not self._hpe3par: LOG.info( "Skipping capacity and capabilities update. Setup has not " "completed.") else: for fpg in self.fpgs: fpg_status = self._hpe3par.get_fpg_status(fpg) fpg_status['reserved_percentage'] = reserved_share_percentage fpg_status['reserved_snapshot_percentage'] = ( reserved_share_from_snapshot_percentage) fpg_status['reserved_share_extend_percentage'] = ( reserved_share_extend_percentage) LOG.debug("FPG status = %s.", fpg_status) stats.setdefault('pools', []).append(fpg_status) super(HPE3ParShareDriver, self)._update_share_stats(stats) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/hpe/hpe_3par_mediator.py0000664000175000017500000021020300000000000023610 0ustar00zuulzuul00000000000000# Copyright 2015 Hewlett Packard Enterprise Development LP # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """HPE 3PAR Mediator for OpenStack Manila. This 'mediator' de-couples the 3PAR focused client from the OpenStack focused driver. """ from oslo_log import log from oslo_utils import importutils from oslo_utils import units from manila.data import utils as data_utils from manila import exception from manila.i18n import _ from manila import utils hpe3parclient = importutils.try_import("hpe3parclient") if hpe3parclient: from hpe3parclient import file_client # pylint: disable=import-error LOG = log.getLogger(__name__) MIN_CLIENT_VERSION = (4, 0, 0) DENY = '-' ALLOW = '+' OPEN_STACK_MANILA = 'OpenStack Manila' FULL = 1 THIN = 2 DEDUPE = 6 ENABLED = 1 DISABLED = 2 CACHE = 'cache' CONTINUOUS_AVAIL = 'continuous_avail' ACCESS_BASED_ENUM = 'access_based_enum' SMB_EXTRA_SPECS_MAP = { CACHE: CACHE, CONTINUOUS_AVAIL: 'ca', ACCESS_BASED_ENUM: 'abe', } IP_ALREADY_EXISTS = 'IP address %s already exists' USER_ALREADY_EXISTS = '"allow" permission already exists for "%s"' DOES_NOT_EXIST = 'does not exist, cannot' LOCAL_IP = '127.0.0.1' LOCAL_IP_RO = '127.0.0.2' SUPER_SHARE = 'OPENSTACK_SUPER_SHARE' TMP_RO_SNAP_EXPORT = "Temp RO snapshot export as source for creating RW share." class HPE3ParMediator(object): """3PAR client-facing code for the 3PAR driver. Version history: 1.0.0 - Begin Liberty development (post-Kilo) 1.0.1 - Report thin/dedup/hp_flash_cache capabilities 1.0.2 - Add share server/share network support 1.0.3 - Use hp3par prefix for share types and capabilities 2.0.0 - Rebranded HP to HPE 2.0.1 - Add access_level (e.g. read-only support) 2.0.2 - Add extend/shrink 2.0.3 - Fix SMB read-only access (added in 2.0.1) 2.0.4 - Remove file tree on delete when using nested shares #1538800 2.0.5 - Reduce the fsquota by share size when a share is deleted #1582931 2.0.6 - Read-write share from snapshot (using driver mount and copy) 2.0.7 - Add update_access support 2.0.8 - Multi pools support per backend 2.0.9 - Fix get_vfs() to correctly validate conf IP addresses at boot up #1621016 """ VERSION = "2.0.9" def __init__(self, **kwargs): self.hpe3par_username = kwargs.get('hpe3par_username') self.hpe3par_password = kwargs.get('hpe3par_password') self.hpe3par_api_url = kwargs.get('hpe3par_api_url') self.hpe3par_debug = kwargs.get('hpe3par_debug') self.hpe3par_san_ip = kwargs.get('hpe3par_san_ip') self.hpe3par_san_login = kwargs.get('hpe3par_san_login') self.hpe3par_san_password = kwargs.get('hpe3par_san_password') self.hpe3par_san_ssh_port = kwargs.get('hpe3par_san_ssh_port') self.hpe3par_san_private_key = kwargs.get('hpe3par_san_private_key') self.hpe3par_fstore_per_share = kwargs.get('hpe3par_fstore_per_share') self.hpe3par_require_cifs_ip = kwargs.get('hpe3par_require_cifs_ip') self.hpe3par_cifs_admin_access_username = ( kwargs.get('hpe3par_cifs_admin_access_username')) self.hpe3par_cifs_admin_access_password = ( kwargs.get('hpe3par_cifs_admin_access_password')) self.hpe3par_cifs_admin_access_domain = ( kwargs.get('hpe3par_cifs_admin_access_domain')) self.hpe3par_share_mount_path = kwargs.get('hpe3par_share_mount_path') self.my_ip = kwargs.get('my_ip') self.ssh_conn_timeout = kwargs.get('ssh_conn_timeout') self._client = None self.client_version = None @staticmethod def no_client(): return hpe3parclient is None def do_setup(self): if self.no_client(): msg = _('You must install hpe3parclient before using the 3PAR ' 'driver. Run "pip install --upgrade python-3parclient" ' 'to upgrade the hpe3parclient.') LOG.error(msg) raise exception.HPE3ParInvalidClient(message=msg) self.client_version = hpe3parclient.version_tuple if self.client_version < MIN_CLIENT_VERSION: msg = (_('Invalid hpe3parclient version found (%(found)s). ' 'Version %(minimum)s or greater required. Run "pip' ' install --upgrade python-3parclient" to upgrade' ' the hpe3parclient.') % {'found': '.'.join(map(str, self.client_version)), 'minimum': '.'.join(map(str, MIN_CLIENT_VERSION))}) LOG.error(msg) raise exception.HPE3ParInvalidClient(message=msg) try: self._client = file_client.HPE3ParFilePersonaClient( self.hpe3par_api_url) except Exception as e: msg = (_('Failed to connect to HPE 3PAR File Persona Client: %s') % str(e)) LOG.exception(msg) raise exception.ShareBackendException(message=msg) try: ssh_kwargs = {} if self.hpe3par_san_ssh_port: ssh_kwargs['port'] = self.hpe3par_san_ssh_port if self.ssh_conn_timeout: ssh_kwargs['conn_timeout'] = self.ssh_conn_timeout if self.hpe3par_san_private_key: ssh_kwargs['privatekey'] = self.hpe3par_san_private_key self._client.setSSHOptions( self.hpe3par_san_ip, self.hpe3par_san_login, self.hpe3par_san_password, **ssh_kwargs ) except Exception as e: msg = (_('Failed to set SSH options for HPE 3PAR File Persona ' 'Client: %s') % str(e)) LOG.exception(msg) raise exception.ShareBackendException(message=msg) LOG.info("HPE3ParMediator %(version)s, " "hpe3parclient %(client_version)s", {"version": self.VERSION, "client_version": hpe3parclient.get_version_string()}) try: wsapi_version = self._client.getWsApiVersion()['build'] LOG.info("3PAR WSAPI %s", wsapi_version) except Exception as e: msg = (_('Failed to get 3PAR WSAPI version: %s') % str(e)) LOG.exception(msg) raise exception.ShareBackendException(message=msg) if self.hpe3par_debug: self._client.debug_rest(True) # Includes SSH debug (setSSH above) def _wsapi_login(self): try: self._client.login(self.hpe3par_username, self.hpe3par_password) except Exception as e: msg = (_("Failed to Login to 3PAR (%(url)s) as %(user)s " "because: %(err)s") % {'url': self.hpe3par_api_url, 'user': self.hpe3par_username, 'err': str(e)}) LOG.error(msg) raise exception.ShareBackendException(msg=msg) def _wsapi_logout(self): try: self._client.http.unauthenticate() except Exception as e: msg = ("Failed to Logout from 3PAR (%(url)s) because %(err)s") LOG.warning(msg, {'url': self.hpe3par_api_url, 'err': str(e)}) # don't raise exception on logout() @staticmethod def build_export_locations(protocol, ips, path): if not ips: message = _('Failed to build export location due to missing IP.') raise exception.InvalidInput(reason=message) if not path: message = _('Failed to build export location due to missing path.') raise exception.InvalidInput(reason=message) share_proto = HPE3ParMediator.ensure_supported_protocol(protocol) if share_proto == 'nfs': return ['%s:%s' % (ip, path) for ip in ips] else: return [r'\\%s\%s' % (ip, path) for ip in ips] def get_provisioned_gb(self, fpg): total_mb = 0 try: result = self._client.getfsquota(fpg=fpg) except Exception as e: result = {'message': str(e)} error_msg = result.get('message') if error_msg: message = (_('Error while getting fsquotas for FPG ' '%(fpg)s: %(msg)s') % {'fpg': fpg, 'msg': error_msg}) LOG.error(message) raise exception.ShareBackendException(msg=message) for fsquota in result['members']: total_mb += float(fsquota['hardBlock']) return total_mb / units.Ki def get_fpg_status(self, fpg): """Get capacity and capabilities for FPG.""" try: result = self._client.getfpg(fpg) except Exception as e: msg = (_('Failed to get capacity for fpg %(fpg)s: %(e)s') % {'fpg': fpg, 'e': str(e)}) LOG.error(msg) raise exception.ShareBackendException(msg=msg) if result['total'] != 1: msg = (_('Failed to get capacity for fpg %s.') % fpg) LOG.error(msg) raise exception.ShareBackendException(msg=msg) member = result['members'][0] total_capacity_gb = float(member['capacityKiB']) / units.Mi free_capacity_gb = float(member['availCapacityKiB']) / units.Mi volumes = member['vvs'] if isinstance(volumes, list): volume = volumes[0] # Use first name from list else: volume = volumes # There is just a name self._wsapi_login() try: volume_info = self._client.getVolume(volume) volume_set = self._client.getVolumeSet(fpg) finally: self._wsapi_logout() provisioning_type = volume_info['provisioningType'] if provisioning_type not in (THIN, FULL, DEDUPE): msg = (_('Unexpected provisioning type for FPG %(fpg)s: ' '%(ptype)s.') % {'fpg': fpg, 'ptype': provisioning_type}) LOG.error(msg) raise exception.ShareBackendException(msg=msg) dedupe = provisioning_type == DEDUPE thin_provisioning = provisioning_type in (THIN, DEDUPE) flash_cache_policy = volume_set.get('flashCachePolicy', DISABLED) hpe3par_flash_cache = flash_cache_policy == ENABLED status = { 'pool_name': fpg, 'total_capacity_gb': total_capacity_gb, 'free_capacity_gb': free_capacity_gb, 'thin_provisioning': thin_provisioning, 'dedupe': dedupe, 'hpe3par_flash_cache': hpe3par_flash_cache, 'hp3par_flash_cache': hpe3par_flash_cache, } if thin_provisioning: status['provisioned_capacity_gb'] = self.get_provisioned_gb(fpg) return status @staticmethod def ensure_supported_protocol(share_proto): protocol = share_proto.lower() if protocol == 'cifs': protocol = 'smb' if protocol not in ['smb', 'nfs']: message = (_('Invalid protocol. Expected nfs or smb. Got %s.') % protocol) LOG.error(message) raise exception.InvalidShareAccess(reason=message) return protocol @staticmethod def other_protocol(share_proto): """Given 'nfs' or 'smb' (or equivalent) return the other one.""" protocol = HPE3ParMediator.ensure_supported_protocol(share_proto) return 'nfs' if protocol == 'smb' else 'smb' @staticmethod def ensure_prefix(uid, protocol=None, readonly=False): if uid.startswith('osf-'): return uid if protocol: proto = '-%s' % HPE3ParMediator.ensure_supported_protocol(protocol) else: proto = '' if readonly: ro = '-ro' else: ro = '' # Format is osf[-ro]-{nfs|smb}-uid return 'osf%s%s-%s' % (proto, ro, uid) @staticmethod def _get_nfs_options(extra_specs, readonly): """Validate the NFS extra_specs and return the options to use.""" nfs_options = extra_specs.get('hpe3par:nfs_options') if nfs_options is None: nfs_options = extra_specs.get('hp3par:nfs_options') if nfs_options: msg = ("hp3par:nfs_options is deprecated. Use " "hpe3par:nfs_options instead.") LOG.warning(msg) if nfs_options: options = nfs_options.split(',') else: options = [] # rw, ro, and (no)root_squash (in)secure options are not allowed in # extra_specs because they will be forcibly set below. # no_subtree_check and fsid are not allowed per 3PAR support. # Other strings will be allowed to be sent to the 3PAR which will do # further validation. options_not_allowed = ['ro', 'rw', 'no_root_squash', 'root_squash', 'secure', 'insecure', 'no_subtree_check', 'fsid'] invalid_options = [ option for option in options if option in options_not_allowed ] if invalid_options: raise exception.InvalidInput(_('Invalid hp3par:nfs_options or ' 'hpe3par:nfs_options in ' 'extra-specs. The following ' 'options are not allowed: %s') % invalid_options) options.append('ro' if readonly else 'rw') options.append('no_root_squash') options.append('insecure') return ','.join(options) def _build_createfshare_kwargs(self, protocol, fpg, fstore, readonly, sharedir, extra_specs, comment, client_ip=None): createfshare_kwargs = dict(fpg=fpg, fstore=fstore, sharedir=sharedir, comment=comment) if 'hp3par_flash_cache' in extra_specs: msg = ("hp3par_flash_cache is deprecated. Use " "hpe3par_flash_cache instead.") LOG.warning(msg) if protocol == 'nfs': if client_ip: createfshare_kwargs['clientip'] = client_ip else: # New NFS shares needs seed IP to prevent "all" access. # Readonly and readwrite NFS shares client IPs cannot overlap. if readonly: createfshare_kwargs['clientip'] = LOCAL_IP_RO else: createfshare_kwargs['clientip'] = LOCAL_IP options = self._get_nfs_options(extra_specs, readonly) createfshare_kwargs['options'] = options else: # To keep the original (Kilo, Liberty) behavior where CIFS IP # access rules were required in addition to user rules enable # this to use a seed IP instead of the default (all allowed). if self.hpe3par_require_cifs_ip: if client_ip: createfshare_kwargs['allowip'] = client_ip else: createfshare_kwargs['allowip'] = LOCAL_IP smb_opts = (ACCESS_BASED_ENUM, CONTINUOUS_AVAIL, CACHE) for smb_opt in smb_opts: opt_value = extra_specs.get('hpe3par:smb_%s' % smb_opt) if opt_value is None: opt_value = extra_specs.get('hp3par:smb_%s' % smb_opt) if opt_value: msg = ("hp3par:smb_* is deprecated. Use " "hpe3par:smb_* instead.") LOG.warning(msg) if opt_value: opt_key = SMB_EXTRA_SPECS_MAP[smb_opt] createfshare_kwargs[opt_key] = opt_value return createfshare_kwargs def _update_capacity_quotas(self, fstore, new_size, old_size, fpg, vfs): @utils.synchronized('hpe3par-update-quota-' + fstore) def _sync_update_capacity_quotas(fstore, new_size, old_size, fpg, vfs): """Update 3PAR quotas and return setfsquota output.""" if self.hpe3par_fstore_per_share: hcapacity = str(new_size * units.Ki) scapacity = hcapacity else: hard_size_mb = (new_size - old_size) * units.Ki soft_size_mb = hard_size_mb result = self._client.getfsquota( fpg=fpg, vfs=vfs, fstore=fstore) LOG.debug("getfsquota result=%s", result) quotas = result['members'] if len(quotas) == 1: hard_size_mb += int(quotas[0].get('hardBlock', '0')) soft_size_mb += int(quotas[0].get('softBlock', '0')) hcapacity = str(hard_size_mb) scapacity = str(soft_size_mb) return self._client.setfsquota(vfs, fpg=fpg, fstore=fstore, scapacity=scapacity, hcapacity=hcapacity) try: result = _sync_update_capacity_quotas( fstore, new_size, old_size, fpg, vfs) LOG.debug("setfsquota result=%s", result) except Exception as e: msg = (_('Failed to update capacity quota ' '%(size)s on %(fstore)s with exception: %(e)s') % {'size': new_size - old_size, 'fstore': fstore, 'e': str(e)}) LOG.error(msg) raise exception.ShareBackendException(msg=msg) # Non-empty result is an error message returned from the 3PAR if result: msg = (_('Failed to update capacity quota ' '%(size)s on %(fstore)s with error: %(error)s') % {'size': new_size - old_size, 'fstore': fstore, 'error': result}) LOG.error(msg) raise exception.ShareBackendException(msg=msg) def _create_share(self, project_id, share_id, protocol, extra_specs, fpg, vfs, fstore, sharedir, readonly, size, comment, client_ip=None): share_name = self.ensure_prefix(share_id, readonly=readonly) if not (sharedir or self.hpe3par_fstore_per_share): sharedir = share_name if fstore: use_existing_fstore = True else: use_existing_fstore = False if self.hpe3par_fstore_per_share: # Do not use -ro in the fstore name. fstore = self.ensure_prefix(share_id, readonly=False) else: fstore = self.ensure_prefix(project_id, protocol) createfshare_kwargs = self._build_createfshare_kwargs( protocol, fpg, fstore, readonly, sharedir, extra_specs, comment, client_ip=client_ip) if not use_existing_fstore: try: result = self._client.createfstore( vfs, fstore, fpg=fpg, comment=comment) LOG.debug("createfstore result=%s", result) except Exception as e: msg = (_('Failed to create fstore %(fstore)s: %(e)s') % {'fstore': fstore, 'e': str(e)}) LOG.exception(msg) raise exception.ShareBackendException(msg=msg) if size: self._update_capacity_quotas(fstore, size, 0, fpg, vfs) try: if readonly and protocol == 'nfs': # For NFS, RO is a 2nd 3PAR share pointing to same sharedir share_name = self.ensure_prefix(share_id, readonly=readonly) result = self._client.createfshare(protocol, vfs, share_name, **createfshare_kwargs) LOG.debug("createfshare result=%s", result) except Exception as e: msg = (_('Failed to create share %(share_name)s: %(e)s') % {'share_name': share_name, 'e': str(e)}) LOG.exception(msg) raise exception.ShareBackendException(msg=msg) try: result = self._client.getfshare( protocol, share_name, fpg=fpg, vfs=vfs, fstore=fstore) LOG.debug("getfshare result=%s", result) except Exception as e: msg = (_('Failed to get fshare %(share_name)s after creating it: ' '%(e)s') % {'share_name': share_name, 'e': str(e)}) LOG.exception(msg) raise exception.ShareBackendException(msg=msg) if result['total'] != 1: msg = (_('Failed to get fshare %(share_name)s after creating it. ' 'Expected to get 1 fshare. Got %(total)s.') % {'share_name': share_name, 'total': result['total']}) LOG.error(msg) raise exception.ShareBackendException(msg=msg) return result['members'][0] def create_share(self, project_id, share_id, share_proto, extra_specs, fpg, vfs, fstore=None, sharedir=None, readonly=False, size=None, comment=OPEN_STACK_MANILA, client_ip=None): """Create the share and return its path. This method can create a share when called by the driver or when called locally from create_share_from_snapshot(). The optional parameters allow re-use. :param project_id: The tenant ID. :param share_id: The share-id with or without osf- prefix. :param share_proto: The protocol (to map to smb or nfs) :param extra_specs: The share type extra-specs :param fpg: The file provisioning group :param vfs: The virtual file system :param fstore: (optional) The file store. When provided, an existing file store is used. Otherwise one is created. :param sharedir: (optional) Share directory. :param readonly: (optional) Create share as read-only. :param size: (optional) Size limit for file store if creating one. :param comment: (optional) Comment to set on the share. :param client_ip: (optional) IP address to give access to. :return: share path string """ protocol = self.ensure_supported_protocol(share_proto) share = self._create_share(project_id, share_id, protocol, extra_specs, fpg, vfs, fstore, sharedir, readonly, size, comment, client_ip=client_ip) if protocol == 'nfs': return share['sharePath'] else: return share['shareName'] def create_share_from_snapshot(self, share_id, share_proto, extra_specs, orig_project_id, orig_share_id, snapshot_id, fpg, vfs, ips, size=None, comment=OPEN_STACK_MANILA): protocol = self.ensure_supported_protocol(share_proto) snapshot_tag = self.ensure_prefix(snapshot_id) orig_share_name = self.ensure_prefix(orig_share_id) snapshot = self._find_fsnap(orig_project_id, orig_share_name, protocol, snapshot_tag, fpg, vfs) if not snapshot: msg = (_('Failed to create share from snapshot for ' 'FPG/VFS/tag %(fpg)s/%(vfs)s/%(tag)s. ' 'Snapshot not found.') % { 'fpg': fpg, 'vfs': vfs, 'tag': snapshot_tag}) LOG.error(msg) raise exception.ShareBackendException(msg=msg) fstore = snapshot['fstoreName'] if fstore == orig_share_name: # No subdir for original share created with fstore_per_share sharedir = '.snapshot/%s' % snapshot['snapName'] else: sharedir = '.snapshot/%s/%s' % (snapshot['snapName'], orig_share_name) if protocol == "smb" and (not self.hpe3par_cifs_admin_access_username or not self.hpe3par_cifs_admin_access_password): LOG.warning("hpe3par_cifs_admin_access_username and " "hpe3par_cifs_admin_access_password must be " "provided in order for CIFS shares created from " "snapshots to be writable.") return self.create_share( orig_project_id, share_id, protocol, extra_specs, fpg, vfs, fstore=fstore, sharedir=sharedir, readonly=True, comment=comment, ) # Export the snapshot as read-only to copy from. temp = ' '.join((comment, TMP_RO_SNAP_EXPORT)) source_path = self.create_share( orig_project_id, share_id, protocol, extra_specs, fpg, vfs, fstore=fstore, sharedir=sharedir, readonly=True, comment=temp, client_ip=self.my_ip ) try: share_name = self.ensure_prefix(share_id) dest_path = self.create_share( orig_project_id, share_id, protocol, extra_specs, fpg, vfs, fstore=fstore, readonly=False, size=size, comment=comment, client_ip=','.join((self.my_ip, LOCAL_IP)) ) try: if protocol == 'smb': self._grant_admin_smb_access( protocol, fpg, vfs, fstore, comment, share=share_name) ro_share_name = self.ensure_prefix(share_id, readonly=True) self._grant_admin_smb_access( protocol, fpg, vfs, fstore, temp, share=ro_share_name) source_locations = self.build_export_locations( protocol, ips, source_path) dest_locations = self.build_export_locations( protocol, ips, dest_path) self._copy_share_data( share_id, source_locations[0], dest_locations[0], protocol) # Revoke the admin access that was needed to copy to the dest. if protocol == 'nfs': self._change_access(DENY, orig_project_id, share_id, protocol, 'ip', self.my_ip, 'rw', fpg, vfs) else: self._revoke_admin_smb_access( protocol, fpg, vfs, fstore, comment) except Exception as e: msg = ('Exception during mount and copy from RO snapshot ' 'to RW share: %s') LOG.error(msg, e) self._delete_share(share_name, protocol, fpg, vfs, fstore) raise finally: self._delete_ro_share( orig_project_id, share_id, protocol, fpg, vfs, fstore) return dest_path def _copy_share_data(self, dest_id, source_location, dest_location, protocol): mount_location = "%s%s" % (self.hpe3par_share_mount_path, dest_id) source_share_dir = '/'.join((mount_location, "source_snap")) dest_share_dir = '/'.join((mount_location, "dest_share")) dirs_to_remove = [] dirs_to_unmount = [] try: utils.execute('mkdir', '-p', source_share_dir, run_as_root=True) dirs_to_remove.append(source_share_dir) self._mount_share(protocol, source_location, source_share_dir) dirs_to_unmount.append(source_share_dir) utils.execute('mkdir', dest_share_dir, run_as_root=True) dirs_to_remove.append(dest_share_dir) self._mount_share(protocol, dest_location, dest_share_dir) dirs_to_unmount.append(dest_share_dir) self._copy_data(source_share_dir, dest_share_dir) finally: for d in dirs_to_unmount: self._unmount_share(d) if dirs_to_remove: dirs_to_remove.append(mount_location) utils.execute('rmdir', *dirs_to_remove, run_as_root=True) def _copy_data(self, source_share_dir, dest_share_dir): err_msg = None err_data = None try: copy = data_utils.Copy(source_share_dir, dest_share_dir, '') copy.run() progress = copy.get_progress()['total_progress'] if progress != 100: err_msg = _("Failed to copy data, reason: " "Total progress %d != 100.") err_data = progress except Exception as err: err_msg = _("Failed to copy data, reason: %s.") err_data = str(err) if err_msg: raise exception.ShareBackendException(msg=err_msg % err_data) def _delete_share(self, share_name, protocol, fpg, vfs, fstore): try: self._client.removefshare( protocol, vfs, share_name, fpg=fpg, fstore=fstore) except Exception as e: msg = (_('Failed to remove share %(share_name)s: %(e)s') % {'share_name': share_name, 'e': str(e)}) LOG.exception(msg) raise exception.ShareBackendException(msg=msg) def _delete_ro_share(self, project_id, share_id, protocol, fpg, vfs, fstore): share_name_ro = self.ensure_prefix(share_id, readonly=True) if not fstore: fstore = self._find_fstore(project_id, share_name_ro, protocol, fpg, vfs, allow_cross_protocol=True) if fstore: self._delete_share(share_name_ro, protocol, fpg, vfs, fstore) return fstore def delete_share(self, project_id, share_id, share_size, share_proto, fpg, vfs, share_ip): protocol = self.ensure_supported_protocol(share_proto) share_name = self.ensure_prefix(share_id) fstore = self._find_fstore(project_id, share_name, protocol, fpg, vfs, allow_cross_protocol=True) removed_writable = False if fstore: self._delete_share(share_name, protocol, fpg, vfs, fstore) removed_writable = True # Try to delete the read-only twin share, too. fstore = self._delete_ro_share( project_id, share_id, protocol, fpg, vfs, fstore) if fstore == share_name: try: self._client.removefstore(vfs, fstore, fpg=fpg) except Exception as e: msg = (_('Failed to remove fstore %(fstore)s: %(e)s') % {'fstore': fstore, 'e': str(e)}) LOG.exception(msg) raise exception.ShareBackendException(msg=msg) elif removed_writable: try: # Attempt to remove file tree on delete when using nested # shares. If the file tree cannot be removed for whatever # reason, we will not treat this as an error_deleting # issue. We will allow the delete to continue as requested. self._delete_file_tree( share_name, protocol, fpg, vfs, fstore, share_ip) # reduce the fsquota by share size when a tree is deleted. self._update_capacity_quotas( fstore, 0, share_size, fpg, vfs) except Exception as e: msg = ('Exception during cleanup of deleted ' 'share %(share)s in filestore %(fstore)s: %(e)s') data = { 'fstore': fstore, 'share': share_name, 'e': str(e), } LOG.warning(msg, data) def _delete_file_tree(self, share_name, protocol, fpg, vfs, fstore, share_ip): # If the share protocol is CIFS, we need to make sure the admin # provided the proper config values. If they have not, we can simply # return out and log a warning. if protocol == "smb" and (not self.hpe3par_cifs_admin_access_username or not self.hpe3par_cifs_admin_access_password): LOG.warning("hpe3par_cifs_admin_access_username and " "hpe3par_cifs_admin_access_password must be " "provided in order for the file tree to be " "properly deleted.") return mount_location = "%s%s" % (self.hpe3par_share_mount_path, share_name) share_dir = mount_location + "/%s" % share_name # Create the super share. self._create_super_share(protocol, fpg, vfs, fstore) # Create the mount directory. self._create_mount_directory(mount_location) # Mount the super share. self._mount_super_share(protocol, mount_location, fpg, vfs, fstore, share_ip) # Delete the share from the super share. self._delete_share_directory(share_dir) # Unmount the super share. self._unmount_share(mount_location) # Delete the mount directory. self._delete_share_directory(mount_location) def _grant_admin_smb_access(self, protocol, fpg, vfs, fstore, comment, share=SUPER_SHARE): user = '+%s:fullcontrol' % self.hpe3par_cifs_admin_access_username setfshare_kwargs = { 'fpg': fpg, 'fstore': fstore, 'comment': comment, 'allowperm': user, } try: self._client.setfshare( protocol, vfs, share, **setfshare_kwargs) except Exception as err: raise exception.ShareBackendException( msg=_("There was an error adding permissions: %s") % err) def _revoke_admin_smb_access(self, protocol, fpg, vfs, fstore, comment, share=SUPER_SHARE): user = '-%s:fullcontrol' % self.hpe3par_cifs_admin_access_username setfshare_kwargs = { 'fpg': fpg, 'fstore': fstore, 'comment': comment, 'allowperm': user, } try: self._client.setfshare( protocol, vfs, share, **setfshare_kwargs) except Exception as err: raise exception.ShareBackendException( msg=_("There was an error revoking permissions: %s") % err) def _create_super_share(self, protocol, fpg, vfs, fstore, readonly=False): sharedir = '' extra_specs = {} comment = 'OpenStack super share used to delete nested shares.' createfshare_kwargs = self._build_createfshare_kwargs(protocol, fpg, fstore, readonly, sharedir, extra_specs, comment) # If the share is NFS, we need to give the host access to the share in # order to properly mount it. if protocol == 'nfs': createfshare_kwargs['clientip'] = self.my_ip else: createfshare_kwargs['allowip'] = self.my_ip try: result = self._client.createfshare(protocol, vfs, SUPER_SHARE, **createfshare_kwargs) LOG.debug("createfshare for %(name)s, result=%(result)s", {'name': SUPER_SHARE, 'result': result}) except Exception as e: msg = (_('Failed to create share %(share_name)s: %(e)s'), {'share_name': SUPER_SHARE, 'e': str(e)}) LOG.exception(msg) raise exception.ShareBackendException(msg=msg) # If the share is CIFS, we need to grant access to the specified admin. if protocol == 'smb': self._grant_admin_smb_access(protocol, fpg, vfs, fstore, comment) def _create_mount_directory(self, mount_location): try: utils.execute('mkdir', mount_location, run_as_root=True) except Exception as err: message = ("There was an error creating mount directory: " "%s. The nested file tree will not be deleted.", str(err)) LOG.warning(message) def _mount_share(self, protocol, export_location, mount_dir): if protocol == 'nfs': cmd = ('mount', '-t', 'nfs', export_location, mount_dir) utils.execute(*cmd, run_as_root=True) else: export_location = export_location.replace('\\', '/') cred = ('username=' + self.hpe3par_cifs_admin_access_username + ',password=' + self.hpe3par_cifs_admin_access_password + ',domain=' + self.hpe3par_cifs_admin_access_domain) cmd = ('mount', '-t', 'cifs', export_location, mount_dir, '-o', cred) utils.execute(*cmd, run_as_root=True) def _mount_super_share(self, protocol, mount_dir, fpg, vfs, fstore, share_ip): try: mount_location = self._generate_mount_path( protocol, fpg, vfs, fstore, share_ip) self._mount_share(protocol, mount_location, mount_dir) except Exception as err: message = ("There was an error mounting the super share: " "%s. The nested file tree will not be deleted.", str(err)) LOG.warning(message) def _unmount_share(self, mount_location): try: utils.execute('umount', mount_location, run_as_root=True) except Exception as err: message = ("There was an error unmounting the share at " "%(mount_location)s: %(error)s") msg_data = { 'mount_location': mount_location, 'error': str(err), } LOG.warning(message, msg_data) def _delete_share_directory(self, directory): try: utils.execute('rm', '-rf', directory, run_as_root=True) except Exception as err: message = ("There was an error removing the share: " "%s. The nested file tree will not be deleted.", str(err)) LOG.warning(message) def _generate_mount_path(self, protocol, fpg, vfs, fstore, share_ip): path = None if protocol == 'nfs': path = (("%(share_ip)s:/%(fpg)s/%(vfs)s/%(fstore)s/") % {'share_ip': share_ip, 'fpg': fpg, 'vfs': vfs, 'fstore': fstore}) else: path = (("//%(share_ip)s/%(share_name)s/") % {'share_ip': share_ip, 'share_name': SUPER_SHARE}) return path def get_vfs(self, fpg, vfs=None): """Get the VFS or raise an exception.""" try: result = self._client.getvfs(fpg=fpg, vfs=vfs) except Exception as e: msg = (_('Exception during getvfs %(vfs)s: %(e)s') % {'vfs': vfs, 'e': str(e)}) LOG.exception(msg) raise exception.ShareBackendException(msg=msg) if result['total'] != 1: error_msg = result.get('message') if error_msg: message = (_('Error while validating FPG/VFS ' '(%(fpg)s/%(vfs)s): %(msg)s') % {'fpg': fpg, 'vfs': vfs, 'msg': error_msg}) LOG.error(message) raise exception.ShareBackendException(msg=message) else: message = (_('Error while validating FPG/VFS ' '(%(fpg)s/%(vfs)s): Expected 1, ' 'got %(total)s.') % {'fpg': fpg, 'vfs': vfs, 'total': result['total']}) LOG.error(message) raise exception.ShareBackendException(msg=message) value = result['members'][0] if isinstance(value['vfsip'], dict): # This is for 3parclient returning only one VFS entry LOG.debug("3parclient version up to 4.2.1 is in use. Client " "upgrade may be needed if using a VFS with multiple " "IP addresses.") value['vfsip']['address'] = [value['vfsip']['address']] else: # This is for 3parclient returning list of VFS entries # Format get_vfs ret value to combine all IP addresses discovered_vfs_ips = [] for vfs_entry in value['vfsip']: if vfs_entry['address']: discovered_vfs_ips.append(vfs_entry['address']) value['vfsip'] = value['vfsip'][0] value['vfsip']['address'] = discovered_vfs_ips return value @staticmethod def _is_share_from_snapshot(fshare): path = fshare.get('shareDir') if path: return '.snapshot' in path.split('/') path = fshare.get('sharePath') return path and '.snapshot' in path.split('/') def create_snapshot(self, orig_project_id, orig_share_id, orig_share_proto, snapshot_id, fpg, vfs): """Creates a snapshot of a share.""" fshare = self._find_fshare(orig_project_id, orig_share_id, orig_share_proto, fpg, vfs) if not fshare: msg = (_('Failed to create snapshot for FPG/VFS/fshare ' '%(fpg)s/%(vfs)s/%(fshare)s: Failed to find fshare.') % {'fpg': fpg, 'vfs': vfs, 'fshare': orig_share_id}) LOG.error(msg) raise exception.ShareBackendException(msg=msg) if self._is_share_from_snapshot(fshare): msg = (_('Failed to create snapshot for FPG/VFS/fshare ' '%(fpg)s/%(vfs)s/%(fshare)s: Share is a read-only ' 'share of an existing snapshot.') % {'fpg': fpg, 'vfs': vfs, 'fshare': orig_share_id}) LOG.error(msg) raise exception.ShareBackendException(msg=msg) fstore = fshare.get('fstoreName') snapshot_tag = self.ensure_prefix(snapshot_id) try: result = self._client.createfsnap( vfs, fstore, snapshot_tag, fpg=fpg) LOG.debug("createfsnap result=%s", result) except Exception as e: msg = (_('Failed to create snapshot for FPG/VFS/fstore ' '%(fpg)s/%(vfs)s/%(fstore)s: %(e)s') % {'fpg': fpg, 'vfs': vfs, 'fstore': fstore, 'e': str(e)}) LOG.exception(msg) raise exception.ShareBackendException(msg=msg) def delete_snapshot(self, orig_project_id, orig_share_id, orig_proto, snapshot_id, fpg, vfs): """Deletes a snapshot of a share.""" snapshot_tag = self.ensure_prefix(snapshot_id) snapshot = self._find_fsnap(orig_project_id, orig_share_id, orig_proto, snapshot_tag, fpg, vfs) if not snapshot: return fstore = snapshot.get('fstoreName') for protocol in ('nfs', 'smb'): try: shares = self._client.getfshare(protocol, fpg=fpg, vfs=vfs, fstore=fstore) except Exception as e: msg = (_('Unexpected exception while getting share list. ' 'Cannot delete snapshot without checking for ' 'dependent shares first: %s') % str(e)) LOG.exception(msg) raise exception.ShareBackendException(msg=msg) for share in shares['members']: if protocol == 'nfs': path = share['sharePath'][1:].split('/') dot_snapshot_index = 3 else: if share['shareDir']: path = share['shareDir'].split('/') else: path = None dot_snapshot_index = 0 snapshot_index = dot_snapshot_index + 1 if path and len(path) > snapshot_index: if (path[dot_snapshot_index] == '.snapshot' and path[snapshot_index].endswith(snapshot_tag)): msg = (_('Cannot delete snapshot because it has a ' 'dependent share.')) raise exception.Invalid(msg) snapname = snapshot['snapName'] try: result = self._client.removefsnap( vfs, fstore, snapname=snapname, fpg=fpg) LOG.debug("removefsnap result=%s", result) except Exception as e: msg = (_('Failed to delete snapshot for FPG/VFS/fstore/snapshot ' '%(fpg)s/%(vfs)s/%(fstore)s/%(snapname)s: %(e)s') % { 'fpg': fpg, 'vfs': vfs, 'fstore': fstore, 'snapname': snapname, 'e': str(e)}) LOG.exception(msg) raise exception.ShareBackendException(msg=msg) # Try to reclaim the space try: self._client.startfsnapclean(fpg, reclaimStrategy='maxspeed') except Exception: # Remove already happened so only log this. LOG.exception('Unexpected exception calling startfsnapclean ' 'for FPG %(fpg)s.', {'fpg': fpg}) @staticmethod def _validate_access_type(protocol, access_type): if access_type not in ('ip', 'user'): msg = (_("Invalid access type. Expected 'ip' or 'user'. " "Actual '%s'.") % access_type) LOG.error(msg) raise exception.InvalidInput(reason=msg) if protocol == 'nfs' and access_type != 'ip': msg = (_("Invalid NFS access type. HPE 3PAR NFS supports 'ip'. " "Actual '%s'.") % access_type) LOG.error(msg) raise exception.HPE3ParInvalid(err=msg) return protocol @staticmethod def _validate_access_level(protocol, access_type, access_level, fshare): readonly = access_level == 'ro' snapshot = HPE3ParMediator._is_share_from_snapshot(fshare) if snapshot and not readonly: reason = _('3PAR shares from snapshots require read-only access') LOG.error(reason) raise exception.InvalidShareAccess(reason=reason) if protocol == 'smb' and access_type == 'ip' and snapshot != readonly: msg = (_("Invalid CIFS access rule. HPE 3PAR optionally supports " "IP access rules for CIFS shares, but they must be " "read-only for shares from snapshots and read-write for " "other shares. Use the required CIFS 'user' access rules " "to refine access.")) LOG.error(msg) raise exception.InvalidShareAccess(reason=msg) @staticmethod def ignore_benign_access_results(plus_or_minus, access_type, access_to, result): # TODO(markstur): Remove the next line when hpe3parclient is fixed. result = [x for x in result if x != '\r'] if result: if plus_or_minus == DENY: if DOES_NOT_EXIST in result[0]: return None else: if access_type == 'user': if USER_ALREADY_EXISTS % access_to in result[0]: return None elif IP_ALREADY_EXISTS % access_to in result[0]: return None return result def _change_access(self, plus_or_minus, project_id, share_id, share_proto, access_type, access_to, access_level, fpg, vfs, extra_specs=None): """Allow or deny access to a share. Plus_or_minus character indicates add to allow list (+) or remove from allow list (-). """ readonly = access_level == 'ro' protocol = self.ensure_supported_protocol(share_proto) try: self._validate_access_type(protocol, access_type) except Exception: if plus_or_minus == DENY: # Catch invalid rules for deny. Allow them to be deleted. return else: raise fshare = self._find_fshare(project_id, share_id, protocol, fpg, vfs, readonly=readonly) if not fshare: # Change access might apply to the share with the name that # does not match the access_level prefix. other_fshare = self._find_fshare(project_id, share_id, protocol, fpg, vfs, readonly=not readonly) if other_fshare: if plus_or_minus == DENY: # Try to deny rule from 'other' share for SMB or legacy. fshare = other_fshare elif self._is_share_from_snapshot(other_fshare): # Found a share-from-snapshot from before # "-ro" was added to the name. Use it. fshare = other_fshare elif protocol == 'nfs': # We don't have the RO|RW share we need, but the # opposite one already exists. It is OK to create # the one we need for ALLOW with NFS (not from snapshot). fstore = other_fshare.get('fstoreName') sharedir = other_fshare.get('shareDir') comment = other_fshare.get('comment') fshare = self._create_share(project_id, share_id, protocol, extra_specs, fpg, vfs, fstore=fstore, sharedir=sharedir, readonly=readonly, size=None, comment=comment) else: # SMB only has one share for RO and RW. Try to use it. fshare = other_fshare if not fshare: msg = _('Failed to change (%(change)s) access ' 'to FPG/share %(fpg)s/%(share)s ' 'for %(type)s %(to)s %(level)s): ' 'Share does not exist on 3PAR.') msg_data = { 'change': plus_or_minus, 'fpg': fpg, 'share': share_id, 'type': access_type, 'to': access_to, 'level': access_level, } if plus_or_minus == DENY: LOG.warning(msg, msg_data) return else: raise exception.HPE3ParInvalid(err=msg % msg_data) try: self._validate_access_level( protocol, access_type, access_level, fshare) except exception.InvalidShareAccess as e: if plus_or_minus == DENY: # Allow invalid access rules to be deleted. msg = _('Ignoring deny invalid access rule ' 'for FPG/share %(fpg)s/%(share)s ' 'for %(type)s %(to)s %(level)s): %(e)s') msg_data = { 'change': plus_or_minus, 'fpg': fpg, 'share': share_id, 'type': access_type, 'to': access_to, 'level': access_level, 'e': str(e), } LOG.info(msg, msg_data) return else: raise share_name = fshare.get('shareName') setfshare_kwargs = { 'fpg': fpg, 'fstore': fshare.get('fstoreName'), 'comment': fshare.get('comment'), } if protocol == 'nfs': access_change = '%s%s' % (plus_or_minus, access_to) setfshare_kwargs['clientip'] = access_change elif protocol == 'smb': if access_type == 'ip': access_change = '%s%s' % (plus_or_minus, access_to) setfshare_kwargs['allowip'] = access_change else: access_str = 'read' if readonly else 'fullcontrol' perm = '%s%s:%s' % (plus_or_minus, access_to, access_str) setfshare_kwargs['allowperm'] = perm try: result = self._client.setfshare( protocol, vfs, share_name, **setfshare_kwargs) result = self.ignore_benign_access_results( plus_or_minus, access_type, access_to, result) except Exception as e: result = str(e) LOG.debug("setfshare result=%s", result) if result: msg = (_('Failed to change (%(change)s) access to FPG/share ' '%(fpg)s/%(share)s for %(type)s %(to)s %(level)s: ' '%(error)s') % {'change': plus_or_minus, 'fpg': fpg, 'share': share_id, 'type': access_type, 'to': access_to, 'level': access_level, 'error': result}) raise exception.ShareBackendException(msg=msg) def _find_fstore(self, project_id, share_id, share_proto, fpg, vfs, allow_cross_protocol=False): share = self._find_fshare(project_id, share_id, share_proto, fpg, vfs, allow_cross_protocol=allow_cross_protocol) return share.get('fstoreName') if share else None def _find_fshare(self, project_id, share_id, share_proto, fpg, vfs, allow_cross_protocol=False, readonly=False): share = self._find_fshare_with_proto(project_id, share_id, share_proto, fpg, vfs, readonly=readonly) if not share and allow_cross_protocol: other_proto = self.other_protocol(share_proto) share = self._find_fshare_with_proto(project_id, share_id, other_proto, fpg, vfs, readonly=readonly) return share def _find_fshare_with_proto(self, project_id, share_id, share_proto, fpg, vfs, readonly=False): protocol = self.ensure_supported_protocol(share_proto) share_name = self.ensure_prefix(share_id, readonly=readonly) project_fstore = self.ensure_prefix(project_id, share_proto) search_order = [ {'fpg': fpg, 'vfs': vfs, 'fstore': project_fstore}, {'fpg': fpg, 'vfs': vfs, 'fstore': share_name}, {'fpg': fpg}, {} ] try: for search_params in search_order: result = self._client.getfshare(protocol, share_name, **search_params) shares = result.get('members', []) if len(shares) == 1: return shares[0] except Exception as e: msg = (_('Unexpected exception while getting share list: %s') % str(e)) raise exception.ShareBackendException(msg=msg) def _find_fsnap(self, project_id, share_id, orig_proto, snapshot_tag, fpg, vfs): share_name = self.ensure_prefix(share_id) osf_project_id = self.ensure_prefix(project_id, orig_proto) pattern = '*_%s' % self.ensure_prefix(snapshot_tag) search_order = [ {'pat': True, 'fpg': fpg, 'vfs': vfs, 'fstore': osf_project_id}, {'pat': True, 'fpg': fpg, 'vfs': vfs, 'fstore': share_name}, {'pat': True, 'fpg': fpg}, {'pat': True}, ] try: for search_params in search_order: result = self._client.getfsnap(pattern, **search_params) snapshots = result.get('members', []) if len(snapshots) == 1: return snapshots[0] except Exception as e: msg = (_('Unexpected exception while getting snapshots: %s') % str(e)) raise exception.ShareBackendException(msg=msg) def update_access(self, project_id, share_id, share_proto, extra_specs, access_rules, add_rules, delete_rules, fpg, vfs): """Update access to a share.""" protocol = self.ensure_supported_protocol(share_proto) if not (delete_rules or add_rules): # We need to re add all the rules. Check with 3PAR on it's current # list and only add the deltas. share = self._find_fshare(project_id, share_id, share_proto, fpg, vfs) ref_users = [] ro_ref_rules = [] if protocol == 'nfs': ref_rules = share['clients'] # Check for RO rules. ro_share = self._find_fshare(project_id, share_id, share_proto, fpg, vfs, readonly=True) if ro_share: ro_ref_rules = ro_share['clients'] else: ref_rules = [x[0] for x in share['allowPerm']] ref_users = ref_rules[:] # Get IP access as well ips = share['allowIP'] if not isinstance(ips, list): # If there is only one IP, the API returns a string # rather than a list. We need to account for that. ips = [ips] ref_rules += ips # Retrieve base rules. base_rules = [] for rule in access_rules: base_rules.append(rule['access_to']) # Check if we need to remove any rules from 3PAR. for rule in ref_rules: if rule in ref_users: rule_type = 'user' else: rule_type = 'ip' if rule not in base_rules + [LOCAL_IP, LOCAL_IP_RO]: self._change_access(DENY, project_id, share_id, share_proto, rule_type, rule, None, fpg, vfs) # Check to see if there are any RO rules to remove. for rule in ro_ref_rules: if rule not in base_rules + [LOCAL_IP, LOCAL_IP_RO]: self._change_access(DENY, project_id, share_id, share_proto, rule_type, rule, 'ro', fpg, vfs) # Check the rules we need to add. for rule in access_rules: if rule['access_to'] not in ref_rules and ( rule['access_to'] not in ro_ref_rules): # Rule does not exist, we need to add it self._change_access(ALLOW, project_id, share_id, share_proto, rule['access_type'], rule['access_to'], rule['access_level'], fpg, vfs, extra_specs=extra_specs) else: # We have deltas of the rules that need to be added and deleted. for rule in delete_rules: self._change_access(DENY, project_id, share_id, share_proto, rule['access_type'], rule['access_to'], rule['access_level'], fpg, vfs) for rule in add_rules: self._change_access(ALLOW, project_id, share_id, share_proto, rule['access_type'], rule['access_to'], rule['access_level'], fpg, vfs, extra_specs=extra_specs) def resize_share(self, project_id, share_id, share_proto, new_size, old_size, fpg, vfs): """Extends or shrinks size of existing share.""" share_name = self.ensure_prefix(share_id) fstore = self._find_fstore(project_id, share_name, share_proto, fpg, vfs, allow_cross_protocol=False) if not fstore: msg = (_('Cannot resize share because it was not found.')) raise exception.InvalidShare(reason=msg) self._update_capacity_quotas(fstore, new_size, old_size, fpg, vfs) def fsip_exists(self, fsip): """Try to get FSIP. Return True if it exists.""" vfs = fsip['vfs'] fpg = fsip['fspool'] try: result = self._client.getfsip(vfs, fpg=fpg) LOG.debug("getfsip result: %s", result) except Exception: msg = (_('Failed to get FSIPs for FPG/VFS %(fspool)s/%(vfs)s.') % fsip) LOG.exception(msg) raise exception.ShareBackendException(msg=msg) for member in result['members']: if all(item in member.items() for item in fsip.items()): return True return False def create_fsip(self, ip, subnet, vlantag, fpg, vfs): vlantag_str = str(vlantag) if vlantag else '0' # Try to create it. It's OK if it already exists. try: result = self._client.createfsip(ip, subnet, vfs, fpg=fpg, vlantag=vlantag_str) LOG.debug("createfsip result: %s", result) except Exception: msg = (_('Failed to create FSIP for %s') % ip) LOG.exception(msg) raise exception.ShareBackendException(msg=msg) # Verify that it really exists. fsip = { 'fspool': fpg, 'vfs': vfs, 'address': ip, 'prefixLen': subnet, 'vlanTag': vlantag_str, } if not self.fsip_exists(fsip): msg = (_('Failed to get FSIP after creating it for ' 'FPG/VFS/IP/subnet/VLAN ' '%(fspool)s/%(vfs)s/' '%(address)s/%(prefixLen)s/%(vlanTag)s.') % fsip) LOG.error(msg) raise exception.ShareBackendException(msg=msg) def remove_fsip(self, ip, fpg, vfs): if not (vfs and ip): # If there is no VFS and/or IP, then there is no FSIP to remove. return try: result = self._client.removefsip(vfs, ip, fpg=fpg) LOG.debug("removefsip result: %s", result) except Exception: msg = (_('Failed to remove FSIP %s') % ip) LOG.exception(msg) raise exception.ShareBackendException(msg=msg) # Verify that it really no longer exists. fsip = { 'fspool': fpg, 'vfs': vfs, 'address': ip, } if self.fsip_exists(fsip): msg = (_('Failed to remove FSIP for FPG/VFS/IP ' '%(fspool)s/%(vfs)s/%(address)s.') % fsip) LOG.error(msg) raise exception.ShareBackendException(msg=msg) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315601.9176717 manila-21.0.0/manila/share/drivers/huawei/0000775000175000017500000000000000000000000020361 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/huawei/__init__.py0000664000175000017500000000000000000000000022460 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/huawei/base.py0000664000175000017500000001043500000000000021650 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Huawei Technologies Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Abstract base class to work with share.""" import abc class HuaweiBase(metaclass=abc.ABCMeta): """Interface to work with share.""" def __init__(self, configuration): """Do initialization.""" self.configuration = configuration @abc.abstractmethod def create_share(self, share, share_server): """Is called to create share.""" @abc.abstractmethod def create_snapshot(self, snapshot, share_server): """Is called to create snapshot.""" @abc.abstractmethod def delete_share(self, share, share_server): """Is called to remove share.""" @abc.abstractmethod def delete_snapshot(self, snapshot, share_server): """Is called to remove snapshot.""" @abc.abstractmethod def allow_access(self, share, access, share_server): """Allow access to the share.""" @abc.abstractmethod def deny_access(self, share, access, share_server): """Deny access to the share.""" @abc.abstractmethod def ensure_share(self, share, share_server=None): """Ensure that share is exported.""" @abc.abstractmethod def update_access(self, share, access_rules, add_rules, delete_rules, update_rules, share_server): """Update access rules list.""" @abc.abstractmethod def extend_share(self, share, new_size, share_server): """Extends size of existing share.""" @abc.abstractmethod def create_share_from_snapshot(self, share, snapshot, share_server=None, parent_share=None): """Create share from snapshot.""" @abc.abstractmethod def shrink_share(self, share, new_size, share_server): """Shrinks size of existing share.""" @abc.abstractmethod def manage_existing(self, share, driver_options): """Manage existing share.""" @abc.abstractmethod def manage_existing_snapshot(self, snapshot, driver_options): """Manage existing snapshot.""" @abc.abstractmethod def get_network_allocations_number(self): """Get number of network interfaces to be created.""" @abc.abstractmethod def get_pool(self, share): """Return pool name where the share resides on.""" def update_share_stats(self, stats_dict): """Retrieve stats info from share group.""" @abc.abstractmethod def setup_server(self, network_info, metadata=None): """Set up share server with given network parameters.""" @abc.abstractmethod def teardown_server(self, server_details, security_services=None): """Teardown share server.""" @abc.abstractmethod def create_replica(self, context, replica_list, new_replica, access_rules, replica_snapshots, share_server=None): """Replicate the active replica to a new replica on this backend.""" @abc.abstractmethod def update_replica_state(self, context, replica_list, replica, access_rules, replica_snapshots, share_server=None): """Update the replica_state of a replica.""" @abc.abstractmethod def promote_replica(self, context, replica_list, replica, access_rules, share_server=None, quiesce_wait_time=None): """Promote a replica to 'active' replica state.""" @abc.abstractmethod def delete_replica(self, context, replica_list, replica_snapshots, replica, share_server=None): """Delete a replica.""" @abc.abstractmethod def revert_to_snapshot(self, context, snapshot, share_access_rules, snapshot_access_rules, share_server=None): """Revert a snapshot.""" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/huawei/constants.py0000664000175000017500000000763500000000000022762 0ustar00zuulzuul00000000000000# Copyright (c) 2014 Huawei Technologies Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. STATUS_ETH_RUNNING = "10" STATUS_FS_HEALTH = "1" STATUS_FS_RUNNING = "27" STATUS_FSSNAPSHOT_HEALTH = '1' STATUS_JOIN_DOMAIN = '1' STATUS_EXIT_DOMAIN = '0' STATUS_SERVICE_RUNNING = "2" QOS_STATUSES = (STATUS_QOS_ACTIVE, STATUS_QOS_INACTIVATED, STATUS_QOS_IDLE) = ('2', '45', '46') DEFAULT_WAIT_INTERVAL = 3 DEFAULT_TIMEOUT = 60 MAX_FS_NUM_IN_QOS = 64 MSG_SNAPSHOT_NOT_FOUND = 1073754118 IP_ALLOCATIONS_DHSS_FALSE = 0 IP_ALLOCATIONS_DHSS_TRUE = 1 SOCKET_TIMEOUT = 52 LOGIN_SOCKET_TIMEOUT = 4 QOS_NAME_PREFIX = 'OpenStack_' SYSTEM_NAME_PREFIX = "Array-" MIN_ARRAY_VERSION_FOR_QOS = 'V300R003C00' TMP_PATH_SRC_PREFIX = "huawei_manila_tmp_path_src_" TMP_PATH_DST_PREFIX = "huawei_manila_tmp_path_dst_" ACCESS_NFS_RW = "1" ACCESS_NFS_RO = "0" ACCESS_CIFS_FULLCONTROL = "1" ACCESS_CIFS_RO = "0" ERROR_CONNECT_TO_SERVER = -403 ERROR_UNAUTHORIZED_TO_SERVER = -401 ERROR_LOGICAL_PORT_EXIST = 1073813505 ERROR_USER_OR_GROUP_NOT_EXIST = 1077939723 ERROR_REPLICATION_PAIR_NOT_EXIST = 1077937923 PORT_TYPE_ETH = '1' PORT_TYPE_BOND = '7' PORT_TYPE_VLAN = '8' SORT_BY_VLAN = 1 SORT_BY_LOGICAL = 2 ALLOC_TYPE_THIN_FLAG = "1" ALLOC_TYPE_THICK_FLAG = "0" ALLOC_TYPE_THIN = "Thin" ALLOC_TYPE_THICK = "Thick" THIN_PROVISIONING = "true" THICK_PROVISIONING = "false" OPTS_QOS_VALUE = { 'maxiops': None, 'miniops': None, 'minbandwidth': None, 'maxbandwidth': None, 'latency': None, 'iotype': None } QOS_LOWER_LIMIT = ['MINIOPS', 'LATENCY', 'MINBANDWIDTH'] QOS_UPPER_LIMIT = ['MAXIOPS', 'MAXBANDWIDTH'] OPTS_CAPABILITIES = { 'dedupe': False, 'compression': False, 'huawei_smartcache': False, 'huawei_smartpartition': False, 'thin_provisioning': None, 'qos': False, 'huawei_sectorsize': None, } OPTS_VALUE = { 'cachename': None, 'partitionname': None, 'sectorsize': None, } OPTS_VALUE.update(OPTS_QOS_VALUE) OPTS_ASSOCIATE = { 'huawei_smartcache': 'cachename', 'huawei_smartpartition': 'partitionname', 'huawei_sectorsize': 'sectorsize', 'qos': OPTS_QOS_VALUE, } VALID_SECTOR_SIZES = ('4', '8', '16', '32', '64') LOCAL_RES_TYPES = (FILE_SYSTEM_TYPE,) = ('40',) REPLICA_MODELS = (REPLICA_SYNC_MODEL, REPLICA_ASYNC_MODEL) = ('1', '2') REPLICA_SPEED_MODELS = (REPLICA_SPEED_LOW, REPLICA_SPEED_MEDIUM, REPLICA_SPEED_HIGH, REPLICA_SPEED_HIGHEST) = ('1', '2', '3', '4') REPLICA_HEALTH_STATUSES = (REPLICA_HEALTH_STATUS_NORMAL, REPLICA_HEALTH_STATUS_FAULT, REPLICA_HEALTH_STATUS_INVALID) = ('1', '2', '14') REPLICA_DATA_STATUSES = ( REPLICA_DATA_STATUS_SYNCHRONIZED, REPLICA_DATA_STATUS_COMPLETE, REPLICA_DATA_STATUS_INCOMPLETE) = ('1', '2', '5') REPLICA_DATA_STATUS_IN_SYNC = ( REPLICA_DATA_STATUS_SYNCHRONIZED, REPLICA_DATA_STATUS_COMPLETE) REPLICA_RUNNING_STATUSES = ( REPLICA_RUNNING_STATUS_NORMAL, REPLICA_RUNNING_STATUS_SYNCING, REPLICA_RUNNING_STATUS_SPLITTED, REPLICA_RUNNING_STATUS_TO_RECOVER, REPLICA_RUNNING_STATUS_INTERRUPTED, REPLICA_RUNNING_STATUS_INVALID) = ( '1', '23', '26', '33', '34', '35') REPLICA_SECONDARY_ACCESS_RIGHTS = ( REPLICA_SECONDARY_ACCESS_DENIED, REPLICA_SECONDARY_RO, REPLICA_SECONDARY_RW) = ('1', '2', '3') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/huawei/huawei_nas.py0000664000175000017500000002673400000000000023072 0ustar00zuulzuul00000000000000# Copyright (c) 2014 Huawei Technologies Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Huawei Nas Driver for Huawei storage arrays.""" from defusedxml import ElementTree as ET from oslo_config import cfg from oslo_log import log from oslo_utils import importutils from manila import exception from manila.i18n import _ from manila.share import driver HUAWEI_UNIFIED_DRIVER_REGISTRY = { 'V3': 'manila.share.drivers.huawei.v3.connection.V3StorageConnection', } huawei_opts = [ cfg.StrOpt('manila_huawei_conf_file', default='/etc/manila/manila_huawei_conf.xml', help='The configuration file for the Manila Huawei driver.')] CONF = cfg.CONF CONF.register_opts(huawei_opts) LOG = log.getLogger(__name__) class HuaweiNasDriver(driver.ShareDriver): """Huawei Share Driver. Executes commands relating to Shares. Driver version history:: 1.0 - Initial version. 1.1 - Add shrink share. Add extend share. Add manage share. Add share level(ro). Add smartx capabilities. Support multi pools in one backend. 1.2 - Add share server support. Add ensure share. Add QoS support. Add create share from snapshot. 1.3 - Add manage snapshot. Support reporting disk type of pool. Add replication support. """ def __init__(self, *args, **kwargs): """Do initialization.""" LOG.debug("Enter into init function of Huawei Driver.") super(HuaweiNasDriver, self).__init__((True, False), *args, **kwargs) if not self.configuration: raise exception.InvalidInput(reason=_( "Huawei driver configuration missing.")) self.configuration.append_config_values(huawei_opts) kwargs.pop('configuration') self.plugin = importutils.import_object(self.get_backend_driver(), self.configuration, **kwargs) def check_for_setup_error(self): """Returns an error if prerequisites aren't met.""" self.plugin.check_conf_file() self.plugin.check_service() def get_backend_driver(self): filename = self.configuration.manila_huawei_conf_file try: tree = ET.parse(filename) root = tree.getroot() except Exception as err: message = (_('Read Huawei config file(%(filename)s)' ' for Manila error: %(err)s') % {'filename': filename, 'err': err}) LOG.error(message) raise exception.InvalidInput(reason=message) product = root.findtext('Storage/Product') backend_driver = HUAWEI_UNIFIED_DRIVER_REGISTRY.get(product) if backend_driver is None: raise exception.InvalidInput( reason=_('Product %s is not supported. Product ' 'must be set to V3.') % product) return backend_driver def do_setup(self, context): """Any initialization the huawei nas driver does while starting.""" LOG.debug("Do setup the plugin.") self.plugin.connect() def create_share(self, context, share, share_server=None): """Create a share.""" LOG.debug("Create a share.") location = self.plugin.create_share(share, share_server) return location def extend_share(self, share, new_size, share_server=None): LOG.debug("Extend a share.") self.plugin.extend_share(share, new_size, share_server) def create_share_from_snapshot(self, context, share, snapshot, share_server=None, parent_share=None): """Create a share from snapshot.""" LOG.debug("Create a share from snapshot %s.", snapshot['snapshot_id']) location = self.plugin.create_share_from_snapshot(share, snapshot) return location def shrink_share(self, share, new_size, share_server=None): """Shrinks size of existing share.""" LOG.debug("Shrink a share.") self.plugin.shrink_share(share, new_size, share_server) def delete_share(self, context, share, share_server=None): """Delete a share.""" LOG.debug("Delete a share.") self.plugin.delete_share(share, share_server) def create_snapshot(self, context, snapshot, share_server=None): """Create a snapshot.""" LOG.debug("Create a snapshot.") snapshot_name = self.plugin.create_snapshot(snapshot, share_server) return {'provider_location': snapshot_name} def delete_snapshot(self, context, snapshot, share_server=None): """Delete a snapshot.""" LOG.debug("Delete a snapshot.") self.plugin.delete_snapshot(snapshot, share_server) def ensure_share(self, context, share, share_server=None): """Ensure that share is exported.""" LOG.debug("Ensure share.") location = self.plugin.ensure_share(share, share_server) return location def allow_access(self, context, share, access, share_server=None): """Allow access to the share.""" LOG.debug("Allow access.") self.plugin.allow_access(share, access, share_server) def deny_access(self, context, share, access, share_server=None): """Deny access to the share.""" LOG.debug("Deny access.") self.plugin.deny_access(share, access, share_server) def update_access(self, context, share, access_rules, add_rules, delete_rules, update_rules, share_server=None): """Update access rules list.""" LOG.debug("Update access.") self.plugin.update_access(share, access_rules, add_rules, delete_rules, update_rules, share_server) def get_pool(self, share): """Return pool name where the share resides on.""" LOG.debug("Get pool.") return self.plugin.get_pool(share) def get_network_allocations_number(self): """Get number of network interfaces to be created.""" LOG.debug("Get network allocations number.") return self.plugin.get_network_allocations_number() def manage_existing(self, share, driver_options): """Manage existing share.""" LOG.debug("Manage existing share to manila.") share_size, location = self.plugin.manage_existing(share, driver_options) return {'size': share_size, 'export_locations': location} def manage_existing_snapshot(self, snapshot, driver_options): """Manage existing snapshot.""" LOG.debug("Manage existing snapshot to manila.") snapshot_name = self.plugin.manage_existing_snapshot(snapshot, driver_options) return {'provider_location': snapshot_name} def _update_share_stats(self): """Retrieve status info from share group.""" backend_name = self.configuration.safe_get('share_backend_name') data = dict( share_backend_name=backend_name or 'HUAWEI_NAS_Driver', vendor_name='Huawei', driver_version='1.3', storage_protocol='NFS_CIFS', qos=True, total_capacity_gb=0.0, free_capacity_gb=0.0, snapshot_support=self.plugin.snapshot_support, create_share_from_snapshot_support=self.plugin.snapshot_support, revert_to_snapshot_support=self.plugin.snapshot_support, ) # huawei array doesn't support snapshot replication, so driver can't # create replicated snapshot, this's not fit the requirement of # replication feature. # to avoid this problem, we specify huawei driver can't support # snapshot and replication both, as a workaround. if not data['snapshot_support'] and self.plugin.replication_support: data['replication_type'] = 'dr' self.plugin.update_share_stats(data) super(HuaweiNasDriver, self)._update_share_stats(data) def _setup_server(self, network_info, metadata=None): """Set up share server with given network parameters.""" # NOTE(felipe_rodrigues): keep legacy network_info support as a dict. network_info = network_info[0] return self.plugin.setup_server(network_info, metadata) def _teardown_server(self, server_details, security_services=None): """Teardown share server.""" return self.plugin.teardown_server(server_details, security_services) def create_replica(self, context, replica_list, new_replica, access_rules, replica_snapshots, share_server=None): """Replicate the active replica to a new replica on this backend.""" return self.plugin.create_replica(context, replica_list, new_replica, access_rules, replica_snapshots, share_server) def update_replica_state(self, context, replica_list, replica, access_rules, replica_snapshots, share_server=None): """Update the replica_state of a replica.""" return self.plugin.update_replica_state(context, replica_list, replica, access_rules, replica_snapshots, share_server) def promote_replica(self, context, replica_list, replica, access_rules, share_server=None, quiesce_wait_time=None): """Promote a replica to 'active' replica state..""" return self.plugin.promote_replica(context, replica_list, replica, access_rules, share_server) def delete_replica(self, context, replica_list, replica_snapshots, replica, share_server=None): """Delete a replica.""" self.plugin.delete_replica(context, replica_list, replica_snapshots, replica, share_server) def revert_to_snapshot(self, context, snapshot, share_access_rules, snapshot_access_rules, share_server=None): self.plugin.revert_to_snapshot(context, snapshot, share_access_rules, snapshot_access_rules, share_server) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/huawei/huawei_utils.py0000664000175000017500000000463000000000000023440 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Huawei Technologies Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from oslo_log import log from manila.share.drivers.huawei import constants from manila.share import share_types LOG = log.getLogger(__name__) def get_share_extra_specs_params(type_id): """Return the parameters for creating the share.""" opts = None if type_id is not None: specs = share_types.get_share_type_extra_specs(type_id) opts = _get_opts_from_specs(specs) LOG.debug('Get share type extra specs: %s', opts) return opts def _get_opts_from_specs(specs): opts = copy.deepcopy(constants.OPTS_CAPABILITIES) opts.update(constants.OPTS_VALUE) for key, value in specs.items(): # Get the scope, if using scope format scope = None key_split = key.split(':') if len(key_split) not in (1, 2): continue if len(key_split) == 1: key = key_split[0] else: scope = key_split[0] key = key_split[1] if scope: scope = scope.lower() if key: key = key.lower() # We want both the scheduler and the driver to act on the value. if ((not scope or scope == 'capabilities') and key in constants.OPTS_CAPABILITIES): words = value.split() if not (words and len(words) == 2 and words[0] == ''): LOG.error("Extra specs must be specified as " "capabilities:%s=' True'.", key) else: opts[key] = words[1].lower() if ((scope in constants.OPTS_CAPABILITIES) and (key in constants.OPTS_VALUE)): if ((scope in constants.OPTS_ASSOCIATE) and (key in constants.OPTS_ASSOCIATE[scope])): opts[key] = value return opts ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315601.9176717 manila-21.0.0/manila/share/drivers/huawei/v3/0000775000175000017500000000000000000000000020711 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/huawei/v3/__init__.py0000664000175000017500000000000000000000000023010 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/huawei/v3/connection.py0000664000175000017500000022763700000000000023443 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Huawei Technologies Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import random import string import tempfile import time from oslo_config import cfg from oslo_log import log import oslo_messaging as messaging from oslo_serialization import jsonutils from oslo_utils import excutils from oslo_utils import strutils from oslo_utils import units from manila.common import constants as common_constants from manila.data import utils as data_utils from manila import exception from manila.i18n import _ from manila import rpc from manila.share.drivers.huawei import base as driver from manila.share.drivers.huawei import constants from manila.share.drivers.huawei import huawei_utils from manila.share.drivers.huawei.v3 import helper from manila.share.drivers.huawei.v3 import replication from manila.share.drivers.huawei.v3 import rpcapi as v3_rpcapi from manila.share.drivers.huawei.v3 import smartx from manila.share import share_types from manila.share import utils as share_utils from manila import utils CONF = cfg.CONF LOG = log.getLogger(__name__) class V3StorageConnection(driver.HuaweiBase): """Helper class for Huawei OceanStor V3 storage system.""" def __init__(self, configuration, **kwargs): super(V3StorageConnection, self).__init__(configuration) self.helper = helper.RestHelper(self.configuration) self.replica_mgr = replication.ReplicaPairManager(self.helper) self.rpc_client = v3_rpcapi.HuaweiV3API() self.private_storage = kwargs.get('private_storage') self.qos_support = False self.snapshot_support = False self.replication_support = False def _setup_rpc_server(self, endpoints): host = "%s@%s" % (CONF.host, self.configuration.config_group) target = messaging.Target(topic=self.rpc_client.topic, server=host) self.rpc_server = rpc.get_server(target, endpoints) self.rpc_server.start() def connect(self): """Try to connect to V3 server.""" self.helper.login() self._setup_rpc_server([self.replica_mgr]) self._setup_conf() def _setup_conf(self): root = self.helper._read_xml() snapshot_support = root.findtext('Storage/SnapshotSupport') if snapshot_support: self.snapshot_support = strutils.bool_from_string( snapshot_support, strict=True) replication_support = root.findtext('Storage/ReplicationSupport') if replication_support: self.replication_support = strutils.bool_from_string( replication_support, strict=True) def create_share(self, share, share_server=None): """Create a share.""" share_name = share['name'] share_proto = share['share_proto'] pool_name = share_utils.extract_host(share['host'], level='pool') if not pool_name: msg = _("Pool is not available in the share host field.") raise exception.InvalidHost(reason=msg) result = self.helper._find_all_pool_info() poolinfo = self.helper._find_pool_info(pool_name, result) if not poolinfo: msg = (_("Can not find pool info by pool name: %s.") % pool_name) raise exception.InvalidHost(reason=msg) fs_id = None # We sleep here to ensure the newly created filesystem can be read. wait_interval = self._get_wait_interval() timeout = self._get_timeout() try: fs_id = self.allocate_container(share, poolinfo) fs = self.helper._get_fs_info_by_id(fs_id) end_time = time.time() + timeout while not (self.check_fs_status(fs['HEALTHSTATUS'], fs['RUNNINGSTATUS']) or time.time() > end_time): time.sleep(wait_interval) fs = self.helper._get_fs_info_by_id(fs_id) if not self.check_fs_status(fs['HEALTHSTATUS'], fs['RUNNINGSTATUS']): raise exception.InvalidShare( reason=(_('Invalid status of filesystem: ' 'HEALTHSTATUS=%(health)s ' 'RUNNINGSTATUS=%(running)s.') % {'health': fs['HEALTHSTATUS'], 'running': fs['RUNNINGSTATUS']})) except Exception as err: if fs_id is not None: qos_id = self.helper.get_qosid_by_fsid(fs_id) if qos_id: self.remove_qos_fs(fs_id, qos_id) self.helper._delete_fs(fs_id) message = (_('Failed to create share %(name)s. ' 'Reason: %(err)s.') % {'name': share_name, 'err': err}) raise exception.InvalidShare(reason=message) try: self.helper.create_share(share_name, fs_id, share_proto) except Exception as err: if fs_id is not None: qos_id = self.helper.get_qosid_by_fsid(fs_id) if qos_id: self.remove_qos_fs(fs_id, qos_id) self.helper._delete_fs(fs_id) raise exception.InvalidShare( reason=(_('Failed to create share %(name)s. Reason: %(err)s.') % {'name': share_name, 'err': err})) ip = self._get_share_ip(share_server) location = self._get_location_path(share_name, share_proto, ip) return location def _get_share_ip(self, share_server): """"Get share logical ip.""" if share_server: ip = share_server['backend_details'].get('ip') else: root = self.helper._read_xml() ip = root.findtext('Storage/LogicalPortIP').strip() return ip def extend_share(self, share, new_size, share_server): share_proto = share['share_proto'] share_name = share['name'] # The unit is in sectors. size = int(new_size) * units.Mi * 2 share_url_type = self.helper._get_share_url_type(share_proto) share = self.helper._get_share_by_name(share_name, share_url_type) if not share: err_msg = (_("Can not get share ID by share %s.") % share_name) LOG.error(err_msg) raise exception.InvalidShareAccess(reason=err_msg) fsid = share['FSID'] fs_info = self.helper._get_fs_info_by_id(fsid) current_size = int(fs_info['CAPACITY']) / units.Mi / 2 if current_size >= new_size: err_msg = (_("New size for extend must be bigger than " "current size on array. (current: %(size)s, " "new: %(new_size)s).") % {'size': current_size, 'new_size': new_size}) LOG.error(err_msg) raise exception.InvalidInput(reason=err_msg) self.helper._change_share_size(fsid, size) def shrink_share(self, share, new_size, share_server): """Shrinks size of existing share.""" share_proto = share['share_proto'] share_name = share['name'] # The unit is in sectors. size = int(new_size) * units.Mi * 2 share_url_type = self.helper._get_share_url_type(share_proto) share = self.helper._get_share_by_name(share_name, share_url_type) if not share: err_msg = (_("Can not get share ID by share %s.") % share_name) LOG.error(err_msg) raise exception.InvalidShare(reason=err_msg) fsid = share['FSID'] fs_info = self.helper._get_fs_info_by_id(fsid) if not fs_info: err_msg = (_("Can not get filesystem info by filesystem ID: %s.") % fsid) LOG.error(err_msg) raise exception.InvalidShare(reason=err_msg) current_size = int(fs_info['CAPACITY']) / units.Mi / 2 if current_size <= new_size: err_msg = (_("New size for shrink must be less than current " "size on array. (current: %(size)s, " "new: %(new_size)s).") % {'size': current_size, 'new_size': new_size}) LOG.error(err_msg) raise exception.InvalidShare(reason=err_msg) if fs_info['ALLOCTYPE'] != constants.ALLOC_TYPE_THIN_FLAG: err_msg = (_("Share (%s) can not be shrunk. only 'Thin' shares " "support shrink.") % share_name) LOG.error(err_msg) raise exception.InvalidShare(reason=err_msg) self.helper._change_share_size(fsid, size) def check_fs_status(self, health_status, running_status): if (health_status == constants.STATUS_FS_HEALTH and running_status == constants.STATUS_FS_RUNNING): return True else: return False def assert_filesystem(self, fsid): fs = self.helper._get_fs_info_by_id(fsid) if not self.check_fs_status(fs['HEALTHSTATUS'], fs['RUNNINGSTATUS']): err_msg = (_('Invalid status of filesystem: ' 'HEALTHSTATUS=%(health)s ' 'RUNNINGSTATUS=%(running)s.') % {'health': fs['HEALTHSTATUS'], 'running': fs['RUNNINGSTATUS']}) raise exception.StorageResourceException(err_msg) def create_snapshot(self, snapshot, share_server=None): """Create a snapshot.""" snap_name = snapshot['id'] share_proto = snapshot['share']['share_proto'] share_url_type = self.helper._get_share_url_type(share_proto) share = self.helper._get_share_by_name(snapshot['share_name'], share_url_type) if not share: err_msg = _('Can not create snapshot,' ' because share id is not provided.') LOG.error(err_msg) raise exception.InvalidInput(reason=err_msg) sharefsid = share['FSID'] snapshot_name = "share_snapshot_" + snap_name snap_id = self.helper._create_snapshot(sharefsid, snapshot_name) LOG.info('Creating snapshot id %s.', snap_id) return snapshot_name.replace("-", "_") def delete_snapshot(self, snapshot, share_server=None): """Delete a snapshot.""" LOG.debug("Delete a snapshot.") snap_name = snapshot['id'] sharefsid = self.helper.get_fsid_by_name(snapshot['share_name']) if sharefsid is None: LOG.warning('Delete snapshot share id %s fs has been ' 'deleted.', snap_name) return snapshot_id = self.helper._get_snapshot_id(sharefsid, snap_name) snapshot_info = self.helper._get_snapshot_by_id(snapshot_id) snapshot_flag = self.helper._check_snapshot_id_exist(snapshot_info) if snapshot_flag: self.helper._delete_snapshot(snapshot_id) else: LOG.warning("Can not find snapshot %s on array.", snap_name) def update_share_stats(self, stats_dict): """Retrieve status info from share group.""" root = self.helper._read_xml() all_pool_info = self.helper._find_all_pool_info() stats_dict["pools"] = [] pool_name_list = root.findtext('Filesystem/StoragePool') pool_name_list = pool_name_list.split(";") for pool_name in pool_name_list: pool_name = pool_name.strip().strip('\n') capacity = self._get_capacity(pool_name, all_pool_info) disk_type = self._get_disk_type(pool_name, all_pool_info) if capacity: pool = dict( pool_name=pool_name, total_capacity_gb=capacity['TOTALCAPACITY'], free_capacity_gb=capacity['CAPACITY'], provisioned_capacity_gb=( capacity['PROVISIONEDCAPACITYGB']), max_over_subscription_ratio=( self.configuration.safe_get( 'max_over_subscription_ratio')), allocated_capacity_gb=capacity['CONSUMEDCAPACITY'], qos=self._get_qos_capability(), reserved_percentage=0, reserved_snapshot_percentage=0, reserved_share_extend_percentage=0, thin_provisioning=[True, False], dedupe=[True, False], compression=[True, False], huawei_smartcache=[True, False], huawei_smartpartition=[True, False], huawei_sectorsize=[True, False], ) if disk_type: pool['huawei_disk_type'] = disk_type stats_dict["pools"].append(pool) if not stats_dict["pools"]: err_msg = _("The StoragePool is None.") LOG.error(err_msg) raise exception.InvalidInput(reason=err_msg) def _get_qos_capability(self): version = self.helper.find_array_version() if version.upper() >= constants.MIN_ARRAY_VERSION_FOR_QOS: self.qos_support = True else: self.qos_support = False return self.qos_support def delete_share(self, share, share_server=None): """Delete share.""" share_name = share['name'] share_url_type = self.helper._get_share_url_type(share['share_proto']) share = self.helper._get_share_by_name(share_name, share_url_type) if not share: LOG.warning('The share was not found. Share name:%s', share_name) fsid = self.helper.get_fsid_by_name(share_name) if fsid: self.helper._delete_fs(fsid) return LOG.warning('The filesystem was not found.') return share_id = share['ID'] share_fs_id = share['FSID'] if share_id: self.helper._delete_share_by_id(share_id, share_url_type) if share_fs_id: if self.qos_support: qos_id = self.helper.get_qosid_by_fsid(share_fs_id) if qos_id: self.remove_qos_fs(share_fs_id, qos_id) self.helper._delete_fs(share_fs_id) return share def create_share_from_snapshot(self, share, snapshot, share_server=None, parent_share=None): """Create a share from snapshot.""" share_fs_id = self.helper.get_fsid_by_name(snapshot['share_name']) if not share_fs_id: err_msg = (_("The source filesystem of snapshot %s " "does not exist.") % snapshot['snapshot_id']) LOG.error(err_msg) raise exception.StorageResourceNotFound( name=snapshot['share_name']) snapshot_id = self.helper._get_snapshot_id(share_fs_id, snapshot['id']) snapshot_info = self.helper._get_snapshot_by_id(snapshot_id) snapshot_flag = self.helper._check_snapshot_id_exist(snapshot_info) if not snapshot_flag: err_msg = (_("Cannot find snapshot %s on array.") % snapshot['snapshot_id']) LOG.error(err_msg) raise exception.ShareSnapshotNotFound( snapshot_id=snapshot['snapshot_id']) self.assert_filesystem(share_fs_id) old_share_name = self.helper.get_share_name_by_id( snapshot['share_id']) old_share_proto = self._get_share_proto(old_share_name) if not old_share_proto: err_msg = (_("Cannot find source share %(share)s of " "snapshot %(snapshot)s on array.") % {'share': snapshot['share_id'], 'snapshot': snapshot['snapshot_id']}) LOG.error(err_msg) raise exception.ShareResourceNotFound( share_id=snapshot['share_id']) new_share_path = self.create_share(share) new_share = { "share_proto": share['share_proto'], "size": share['size'], "name": share['name'], "mount_path": new_share_path.replace("\\", "/"), "mount_src": tempfile.mkdtemp(prefix=constants.TMP_PATH_DST_PREFIX), "id": snapshot['share_id'], } old_share_path = self._get_location_path(old_share_name, old_share_proto) old_share = { "share_proto": old_share_proto, "name": old_share_name, "mount_path": old_share_path.replace("\\", "/"), "mount_src": tempfile.mkdtemp(prefix=constants.TMP_PATH_SRC_PREFIX), "snapshot_name": ("share_snapshot_" + snapshot['id'].replace("-", "_")), "id": snapshot['share_id'], } try: self.copy_data_from_parent_share(old_share, new_share) except Exception: with excutils.save_and_reraise_exception(): self.delete_share(new_share) finally: for item in (new_share, old_share): try: os.rmdir(item['mount_src']) except Exception as err: LOG.warning('Failed to remove temp file. File path:' '%(file_path)s. Reason: %(err)s.', {'file_path': item['mount_src'], 'err': err}) return new_share_path def copy_data_from_parent_share(self, old_share, new_share): old_access = self.get_access(old_share) old_access_id = self._get_access_id(old_share, old_access) if not old_access_id: try: self.allow_access(old_share, old_access) except exception.ManilaException as err: with excutils.save_and_reraise_exception(): LOG.error('Failed to add access to share %(name)s. ' 'Reason: %(err)s.', {'name': old_share['name'], 'err': err}) new_access = self.get_access(new_share) try: try: self.mount_share_to_host(old_share, old_access) except exception.ShareMountException as err: with excutils.save_and_reraise_exception(): LOG.error('Failed to mount old share %(name)s. ' 'Reason: %(err)s.', {'name': old_share['name'], 'err': err}) try: self.allow_access(new_share, new_access) self.mount_share_to_host(new_share, new_access) except Exception as err: with excutils.save_and_reraise_exception(): self.umount_share_from_host(old_share) LOG.error('Failed to mount new share %(name)s. ' 'Reason: %(err)s.', {'name': new_share['name'], 'err': err}) copied = self.copy_snapshot_data(old_share, new_share) for item in (new_share, old_share): try: self.umount_share_from_host(item) except exception.ShareUmountException as err: LOG.warning('Failed to unmount share %(name)s. ' 'Reason: %(err)s.', {'name': item['name'], 'err': err}) self.deny_access(new_share, new_access) if copied: LOG.debug("Created share from snapshot successfully, " "new_share: %s, old_share: %s.", new_share, old_share) else: message = (_('Failed to copy data from share %(old_share)s ' 'to share %(new_share)s.') % {'old_share': old_share['name'], 'new_share': new_share['name']}) raise exception.ShareCopyDataException(reason=message) finally: if not old_access_id: self.deny_access(old_share, old_access) def get_access(self, share): share_proto = share['share_proto'] access = {} root = self.helper._read_xml() if share_proto == 'NFS': access['access_to'] = root.findtext('Filesystem/NFSClient/IP') access['access_level'] = common_constants.ACCESS_LEVEL_RW access['access_type'] = 'ip' elif share_proto == 'CIFS': access['access_to'] = root.findtext( 'Filesystem/CIFSClient/UserName') access['access_password'] = root.findtext( 'Filesystem/CIFSClient/UserPassword') access['access_level'] = common_constants.ACCESS_LEVEL_RW access['access_type'] = 'user' LOG.debug("Get access for share: %s, access_type: %s, access_to: %s, " "access_level: %s", share['name'], access['access_type'], access['access_to'], access['access_level']) return access def _get_access_id(self, share, access): """Get access id of the share.""" access_id = None share_name = share['name'] share_proto = share['share_proto'] share_url_type = self.helper._get_share_url_type(share_proto) access_to = access['access_to'] share = self.helper._get_share_by_name(share_name, share_url_type) access_id = self.helper._get_access_from_share(share['ID'], access_to, share_proto) if access_id is None: LOG.debug('Cannot get access ID from share. ' 'share_name: %s', share_name) return access_id def copy_snapshot_data(self, old_share, new_share): src_path = '/'.join((old_share['mount_src'], '.snapshot', old_share['snapshot_name'])) dst_path = new_share['mount_src'] copy_finish = False LOG.debug("Copy data from src_path: %s to dst_path: %s.", src_path, dst_path) try: ignore_list = '' copy = data_utils.Copy(src_path, dst_path, ignore_list) copy.run() if copy.get_progress()['total_progress'] == 100: copy_finish = True except Exception as err: LOG.error("Failed to copy data, reason: %s.", err) return copy_finish def umount_share_from_host(self, share): try: utils.execute('umount', share['mount_path'], run_as_root=True) except Exception as err: message = (_("Failed to unmount share %(share)s. " "Reason: %(reason)s.") % {'share': share['name'], 'reason': str(err)}) raise exception.ShareUmountException(reason=message) def mount_share_to_host(self, share, access): LOG.debug("Mounting share: %s to host, mount_src: %s", share['name'], share['mount_src']) try: if share['share_proto'] == 'NFS': utils.execute('mount', '-t', 'nfs', share['mount_path'], share['mount_src'], run_as_root=True) LOG.debug("Execute mount. mount_src: %s", share['mount_src']) elif share['share_proto'] == 'CIFS': user = ('username=' + access['access_to'] + ',' + 'password=' + access['access_password']) utils.execute('mount', '-t', 'cifs', share['mount_path'], share['mount_src'], '-o', user, run_as_root=True) except Exception as err: message = (_('Bad response from mount share: %(share)s. ' 'Reason: %(reason)s.') % {'share': share['name'], 'reason': str(err)}) raise exception.ShareMountException(reason=message) def get_network_allocations_number(self): """Get number of network interfaces to be created.""" if self.configuration.driver_handles_share_servers: return constants.IP_ALLOCATIONS_DHSS_TRUE else: return constants.IP_ALLOCATIONS_DHSS_FALSE def _get_capacity(self, pool_name, result): """Get free capacity and total capacity of the pools.""" poolinfo = self.helper._find_pool_info(pool_name, result) if poolinfo: total = float(poolinfo['TOTALCAPACITY']) / units.Mi / 2 free = float(poolinfo['CAPACITY']) / units.Mi / 2 consumed = float(poolinfo['CONSUMEDCAPACITY']) / units.Mi / 2 poolinfo['TOTALCAPACITY'] = total poolinfo['CAPACITY'] = free poolinfo['CONSUMEDCAPACITY'] = consumed poolinfo['PROVISIONEDCAPACITYGB'] = round( float(total) - float(free), 2) return poolinfo def _get_disk_type(self, pool_name, result): """Get disk type of the pool.""" pool_info = self.helper._find_pool_info(pool_name, result) if not pool_info: return None pool_disk = [] for i, x in enumerate(['ssd', 'sas', 'nl_sas']): if pool_info['TIER%dCAPACITY' % i] != '0': pool_disk.append(x) if len(pool_disk) > 1: pool_disk = ['mix'] return pool_disk[0] if pool_disk else None def _init_filesys_para(self, share, poolinfo, extra_specs): """Init basic filesystem parameters.""" name = share['name'] size = int(share['size']) * units.Mi * 2 fileparam = { "NAME": name.replace("-", "_"), "DESCRIPTION": "", "ALLOCTYPE": extra_specs['LUNType'], "CAPACITY": size, "PARENTID": poolinfo['ID'], "INITIALALLOCCAPACITY": units.Ki * 20, "PARENTTYPE": 216, "SNAPSHOTRESERVEPER": 20, "INITIALDISTRIBUTEPOLICY": 0, "ISSHOWSNAPDIR": True, "RECYCLESWITCH": 0, "RECYCLEHOLDTIME": 15, "RECYCLETHRESHOLD": 0, "RECYCLEAUTOCLEANSWITCH": 0, "ENABLEDEDUP": extra_specs['dedupe'], "ENABLECOMPRESSION": extra_specs['compression'], } if fileparam['ALLOCTYPE'] == constants.ALLOC_TYPE_THICK_FLAG: if (extra_specs['dedupe'] or extra_specs['compression']): err_msg = _( 'The filesystem type is "Thick",' ' so dedupe or compression cannot be set.') LOG.error(err_msg) raise exception.InvalidInput(reason=err_msg) if extra_specs['sectorsize']: fileparam['SECTORSIZE'] = extra_specs['sectorsize'] * units.Ki return fileparam def deny_access(self, share, access, share_server=None): """Deny access to share.""" share_proto = share['share_proto'] share_name = share['name'] share_url_type = self.helper._get_share_url_type(share_proto) access_type = access['access_type'] if share_proto == 'NFS' and access_type not in ('ip', 'user'): LOG.warning('Only IP or USER access types are allowed for ' 'NFS shares.') return elif share_proto == 'CIFS' and access_type != 'user': LOG.warning('Only USER access type is allowed for' ' CIFS shares.') return access_to = access['access_to'] # Huawei array uses * to represent IP addresses of all clients if (share_proto == 'NFS' and access_type == 'ip' and access_to == '0.0.0.0/0'): access_to = '*' share = self.helper._get_share_by_name(share_name, share_url_type) if not share: LOG.warning('Can not get share %s.', share_name) return access_id = self.helper._get_access_from_share(share['ID'], access_to, share_proto) if not access_id: LOG.warning('Can not get access id from share. ' 'share_name: %s', share_name) return self.helper._remove_access_from_share(access_id, share_proto) def allow_access(self, share, access, share_server=None): """Allow access to the share.""" share_proto = share['share_proto'] share_name = share['name'] share_url_type = self.helper._get_share_url_type(share_proto) access_type = access['access_type'] access_level = access['access_level'] access_to = access['access_to'] if access_level not in common_constants.ACCESS_LEVELS: raise exception.InvalidShareAccess( reason=(_('Unsupported level of access was provided - %s') % access_level)) if share_proto == 'NFS': if access_type == 'user': # Use 'user' as 'netgroup' for NFS. # A group name starts with @. access_to = '@' + access_to elif access_type != 'ip': message = _('Only IP or USER access types ' 'are allowed for NFS shares.') raise exception.InvalidShareAccess(reason=message) if access_level == common_constants.ACCESS_LEVEL_RW: access_level = constants.ACCESS_NFS_RW else: access_level = constants.ACCESS_NFS_RO # Huawei array uses * to represent IP addresses of all clients if access_to == '0.0.0.0/0': access_to = '*' elif share_proto == 'CIFS': if access_type == 'user': if access_level == common_constants.ACCESS_LEVEL_RW: access_level = constants.ACCESS_CIFS_FULLCONTROL else: access_level = constants.ACCESS_CIFS_RO else: message = _('Only USER access type is allowed' ' for CIFS shares.') raise exception.InvalidShareAccess(reason=message) share_stor = self.helper._get_share_by_name(share_name, share_url_type) if not share_stor: err_msg = (_("Share %s does not exist on the backend.") % share_name) LOG.error(err_msg) raise exception.ShareResourceNotFound(share_id=share['id']) share_id = share_stor['ID'] # Check if access already exists access_id = self.helper._get_access_from_share(share_id, access_to, share_proto) if access_id: # Check if the access level equal level_exist = self.helper._get_level_by_access_id(access_id, share_proto) if level_exist != access_level: # Change the access level self.helper._change_access_rest(access_id, share_proto, access_level) else: # Add this access to share self.helper._allow_access_rest(share_id, access_to, share_proto, access_level) def clear_access(self, share, share_server=None): """Remove all access rules of the share""" share_proto = share['share_proto'] share_name = share['name'] share_url_type = self.helper._get_share_url_type(share_proto) share_stor = self.helper._get_share_by_name(share_name, share_url_type) if not share_stor: LOG.warning('Cannot get share %s.', share_name) return share_id = share_stor['ID'] all_accesses = self.helper._get_all_access_from_share(share_id, share_proto) for access_id in all_accesses: self.helper._remove_access_from_share(access_id, share_proto) def update_access(self, share, access_rules, add_rules, delete_rules, update_rules, share_server=None): """Update access rules list.""" if not (add_rules or delete_rules): self.clear_access(share, share_server) for access in access_rules: self.allow_access(share, access, share_server) else: for access in delete_rules: self.deny_access(share, access, share_server) for access in add_rules: self.allow_access(share, access, share_server) def get_pool(self, share): pool_name = share_utils.extract_host(share['host'], level='pool') if pool_name: return pool_name share_name = share['name'] share_url_type = self.helper._get_share_url_type(share['share_proto']) share = self.helper._get_share_by_name(share_name, share_url_type) pool_name = None if share: pool = self.helper._get_fs_info_by_id(share['FSID']) pool_name = pool['POOLNAME'] return pool_name def allocate_container(self, share, poolinfo): """Creates filesystem associated to share by name.""" opts = huawei_utils.get_share_extra_specs_params( share['share_type_id']) if opts is None: opts = constants.OPTS_CAPABILITIES smart = smartx.SmartX(self.helper) smartx_opts, qos = smart.get_smartx_extra_specs_opts(opts) fileParam = self._init_filesys_para(share, poolinfo, smartx_opts) fsid = self.helper._create_filesystem(fileParam) try: if qos: smart_qos = smartx.SmartQos(self.helper) smart_qos.create_qos(qos, fsid) smartpartition = smartx.SmartPartition(self.helper) smartpartition.add(opts, fsid) smartcache = smartx.SmartCache(self.helper) smartcache.add(opts, fsid) except Exception as err: if fsid is not None: qos_id = self.helper.get_qosid_by_fsid(fsid) if qos_id: self.remove_qos_fs(fsid, qos_id) self.helper._delete_fs(fsid) message = (_('Failed to add smartx. Reason: %(err)s.') % {'err': err}) raise exception.InvalidShare(reason=message) return fsid def manage_existing(self, share, driver_options): """Manage existing share.""" share_proto = share['share_proto'] share_name = share['name'] old_export_location = share['export_locations'][0]['path'] pool_name = share_utils.extract_host(share['host'], level='pool') share_url_type = self.helper._get_share_url_type(share_proto) old_share_name = self.helper._get_share_name_by_export_location( old_export_location, share_proto) share_storage = self.helper._get_share_by_name(old_share_name, share_url_type) if not share_storage: err_msg = (_("Can not get share ID by share %s.") % old_export_location) LOG.error(err_msg) raise exception.InvalidShare(reason=err_msg) fs_id = share_storage['FSID'] fs = self.helper._get_fs_info_by_id(fs_id) if not self.check_fs_status(fs['HEALTHSTATUS'], fs['RUNNINGSTATUS']): raise exception.InvalidShare( reason=(_('Invalid status of filesystem: ' 'HEALTHSTATUS=%(health)s ' 'RUNNINGSTATUS=%(running)s.') % {'health': fs['HEALTHSTATUS'], 'running': fs['RUNNINGSTATUS']})) if pool_name and pool_name != fs['POOLNAME']: raise exception.InvalidHost( reason=(_('The current pool(%(fs_pool)s) of filesystem ' 'does not match the input pool(%(host_pool)s).') % {'fs_pool': fs['POOLNAME'], 'host_pool': pool_name})) result = self.helper._find_all_pool_info() poolinfo = self.helper._find_pool_info(pool_name, result) opts = huawei_utils.get_share_extra_specs_params( share['share_type_id']) specs = share_types.get_share_type_extra_specs(share['share_type_id']) if ('capabilities:thin_provisioning' not in specs.keys() and 'thin_provisioning' not in specs.keys()): if fs['ALLOCTYPE'] == constants.ALLOC_TYPE_THIN_FLAG: opts['thin_provisioning'] = constants.THIN_PROVISIONING else: opts['thin_provisioning'] = constants.THICK_PROVISIONING change_opts = self.check_retype_change_opts(opts, poolinfo, fs) LOG.info('Retyping share (%(share)s), changed options are : ' '(%(change_opts)s).', {'share': old_share_name, 'change_opts': change_opts}) try: self.retype_share(change_opts, fs_id) except Exception as err: message = (_("Retype share error. Share: %(share)s. " "Reason: %(reason)s.") % {'share': old_share_name, 'reason': err}) raise exception.InvalidShare(reason=message) share_size = int(fs['CAPACITY']) / units.Mi / 2 self.helper._change_fs_name(fs_id, share_name) location = self._get_location_path(share_name, share_proto) return (share_size, [location]) def _check_snapshot_valid_for_manage(self, snapshot_info): snapshot_name = snapshot_info['data']['NAME'] # Check whether the snapshot is normal. if (snapshot_info['data']['HEALTHSTATUS'] != constants.STATUS_FSSNAPSHOT_HEALTH): msg = (_("Can't import snapshot %(snapshot)s to Manila. " "Snapshot status is not normal, snapshot status: " "%(status)s.") % {'snapshot': snapshot_name, 'status': snapshot_info['data']['HEALTHSTATUS']}) raise exception.ManageInvalidShareSnapshot( reason=msg) def manage_existing_snapshot(self, snapshot, driver_options): """Manage existing snapshot.""" share_proto = snapshot['share']['share_proto'] share_url_type = self.helper._get_share_url_type(share_proto) share_storage = self.helper._get_share_by_name(snapshot['share_name'], share_url_type) if not share_storage: err_msg = (_("Failed to import snapshot %(snapshot)s to Manila. " "Snapshot source share %(share)s doesn't exist " "on array.") % {'snapshot': snapshot['provider_location'], 'share': snapshot['share_name']}) raise exception.InvalidShare(reason=err_msg) sharefsid = share_storage['FSID'] provider_location = snapshot.get('provider_location') snapshot_id = sharefsid + "@" + provider_location snapshot_info = self.helper._get_snapshot_by_id(snapshot_id) snapshot_flag = self.helper._check_snapshot_id_exist(snapshot_info) if not snapshot_flag: err_msg = (_("Cannot find snapshot %s on array.") % snapshot['provider_location']) raise exception.ManageInvalidShareSnapshot(reason=err_msg) else: self._check_snapshot_valid_for_manage(snapshot_info) snapshot_name = ("share_snapshot_" + snapshot['id'].replace("-", "_")) self.helper._rename_share_snapshot(snapshot_id, snapshot_name) return snapshot_name def check_retype_change_opts(self, opts, poolinfo, fs): change_opts = { "partitionid": None, "cacheid": None, "dedupe&compression": None, } # SmartPartition old_partition_id = fs['SMARTPARTITIONID'] old_partition_name = None new_partition_id = None new_partition_name = None if strutils.bool_from_string(opts['huawei_smartpartition']): if not opts['partitionname']: raise exception.InvalidInput( reason=_('Partition name is None, please set ' 'huawei_smartpartition:partitionname in key.')) new_partition_name = opts['partitionname'] new_partition_id = self.helper._get_partition_id_by_name( new_partition_name) if new_partition_id is None: raise exception.InvalidInput( reason=(_("Can't find partition name on the array, " "partition name is: %(name)s.") % {"name": new_partition_name})) if old_partition_id != new_partition_id: if old_partition_id: partition_info = self.helper.get_partition_info_by_id( old_partition_id) old_partition_name = partition_info['NAME'] change_opts["partitionid"] = ([old_partition_id, old_partition_name], [new_partition_id, new_partition_name]) # SmartCache old_cache_id = fs['SMARTCACHEID'] old_cache_name = None new_cache_id = None new_cache_name = None if strutils.bool_from_string(opts['huawei_smartcache']): if not opts['cachename']: raise exception.InvalidInput( reason=_('Cache name is None, please set ' 'huawei_smartcache:cachename in key.')) new_cache_name = opts['cachename'] new_cache_id = self.helper._get_cache_id_by_name( new_cache_name) if new_cache_id is None: raise exception.InvalidInput( reason=(_("Can't find cache name on the array, " "cache name is: %(name)s.") % {"name": new_cache_name})) if old_cache_id != new_cache_id: if old_cache_id: cache_info = self.helper.get_cache_info_by_id( old_cache_id) old_cache_name = cache_info['NAME'] change_opts["cacheid"] = ([old_cache_id, old_cache_name], [new_cache_id, new_cache_name]) # SmartDedupe&SmartCompression smartx_opts = constants.OPTS_CAPABILITIES if opts is not None: smart = smartx.SmartX(self.helper) smartx_opts, qos = smart.get_smartx_extra_specs_opts(opts) old_compression = fs['COMPRESSION'] new_compression = smartx_opts['compression'] old_dedupe = fs['DEDUP'] new_dedupe = smartx_opts['dedupe'] if fs['ALLOCTYPE'] == constants.ALLOC_TYPE_THIN_FLAG: fs['ALLOCTYPE'] = constants.ALLOC_TYPE_THIN else: fs['ALLOCTYPE'] = constants.ALLOC_TYPE_THICK if strutils.bool_from_string(opts['thin_provisioning']): opts['thin_provisioning'] = constants.ALLOC_TYPE_THIN else: opts['thin_provisioning'] = constants.ALLOC_TYPE_THICK if fs['ALLOCTYPE'] != opts['thin_provisioning']: msg = (_("Manage existing share " "fs type and new_share_type mismatch. " "fs type is: %(fs_type)s, " "new_share_type is: %(new_share_type)s") % {"fs_type": fs['ALLOCTYPE'], "new_share_type": opts['thin_provisioning']}) raise exception.InvalidHost(reason=msg) else: if fs['ALLOCTYPE'] == constants.ALLOC_TYPE_THICK: if new_compression or new_dedupe: raise exception.InvalidInput( reason=_("Dedupe or compression cannot be set for " "thick filesystem.")) else: if (old_dedupe != new_dedupe or old_compression != new_compression): change_opts["dedupe&compression"] = ([old_dedupe, old_compression], [new_dedupe, new_compression]) return change_opts def retype_share(self, change_opts, fs_id): if change_opts.get('partitionid'): old, new = change_opts['partitionid'] old_id = old[0] old_name = old[1] new_id = new[0] new_name = new[1] if old_id: self.helper._remove_fs_from_partition(fs_id, old_id) if new_id: self.helper._add_fs_to_partition(fs_id, new_id) msg = (_("Retype FS(id: %(fs_id)s) smartpartition from " "(name: %(old_name)s, id: %(old_id)s) to " "(name: %(new_name)s, id: %(new_id)s) " "performed successfully.") % {"fs_id": fs_id, "old_id": old_id, "old_name": old_name, "new_id": new_id, "new_name": new_name}) LOG.info(msg) if change_opts.get('cacheid'): old, new = change_opts['cacheid'] old_id = old[0] old_name = old[1] new_id = new[0] new_name = new[1] if old_id: self.helper._remove_fs_from_cache(fs_id, old_id) if new_id: self.helper._add_fs_to_cache(fs_id, new_id) msg = (_("Retype FS(id: %(fs_id)s) smartcache from " "(name: %(old_name)s, id: %(old_id)s) to " "(name: %(new_name)s, id: %(new_id)s) " "performed successfully.") % {"fs_id": fs_id, "old_id": old_id, "old_name": old_name, "new_id": new_id, "new_name": new_name}) LOG.info(msg) if change_opts.get('dedupe&compression'): old, new = change_opts['dedupe&compression'] old_dedupe = old[0] old_compression = old[1] new_dedupe = new[0] new_compression = new[1] if ((old_dedupe != new_dedupe) or (old_compression != new_compression)): new_smartx_opts = {"dedupe": new_dedupe, "compression": new_compression} self.helper._change_extra_specs(fs_id, new_smartx_opts) msg = (_("Retype FS(id: %(fs_id)s) dedupe from %(old_dedupe)s " "to %(new_dedupe)s performed successfully, " "compression from " "%(old_compression)s to %(new_compression)s " "performed successfully.") % {"fs_id": fs_id, "old_dedupe": old_dedupe, "new_dedupe": new_dedupe, "old_compression": old_compression, "new_compression": new_compression}) LOG.info(msg) def remove_qos_fs(self, fs_id, qos_id): fs_list = self.helper.get_fs_list_in_qos(qos_id) fs_count = len(fs_list) if fs_count <= 1: qos = smartx.SmartQos(self.helper) qos.delete_qos(qos_id) else: self.helper.remove_fs_from_qos(fs_id, fs_list, qos_id) def _get_location_path(self, share_name, share_proto, ip=None): location = None if ip is None: root = self.helper._read_xml() ip = root.findtext('Storage/LogicalPortIP').strip() if share_proto == 'NFS': location = '%s:/%s' % (ip, share_name.replace("-", "_")) elif share_proto == 'CIFS': location = '\\\\%s\\%s' % (ip, share_name.replace("-", "_")) else: raise exception.InvalidShareAccess( reason=(_('Invalid NAS protocol supplied: %s.') % share_proto)) return location def _get_share_proto(self, share_name): share_proto = None for proto in ('NFS', 'CIFS'): share_url_type = self.helper._get_share_url_type(proto) share = self.helper._get_share_by_name(share_name, share_url_type) if share: share_proto = proto break return share_proto def _get_wait_interval(self): """Get wait interval from huawei conf file.""" root = self.helper._read_xml() wait_interval = root.findtext('Filesystem/WaitInterval') if wait_interval: return int(wait_interval) else: LOG.info( "Wait interval is not configured in huawei " "conf file. Use default: %(default_wait_interval)d.", {"default_wait_interval": constants.DEFAULT_WAIT_INTERVAL}) return constants.DEFAULT_WAIT_INTERVAL def _get_timeout(self): """Get timeout from huawei conf file.""" root = self.helper._read_xml() timeout = root.findtext('Filesystem/Timeout') if timeout: return int(timeout) else: LOG.info( "Timeout is not configured in huawei conf file. " "Use default: %(default_timeout)d.", {"default_timeout": constants.DEFAULT_TIMEOUT}) return constants.DEFAULT_TIMEOUT def check_conf_file(self): """Check the config file, make sure the essential items are set.""" root = self.helper._read_xml() resturl = root.findtext('Storage/RestURL') username = root.findtext('Storage/UserName') pwd = root.findtext('Storage/UserPassword') product = root.findtext('Storage/Product') pool_node = root.findtext('Filesystem/StoragePool') logical_port_ip = root.findtext('Storage/LogicalPortIP') if product != "V3": err_msg = (_( 'check_conf_file: Config file invalid. ' 'Product must be set to V3.')) LOG.error(err_msg) raise exception.InvalidInput(err_msg) if not (resturl and username and pwd): err_msg = (_( 'check_conf_file: Config file invalid. RestURL,' ' UserName and UserPassword must be set.')) LOG.error(err_msg) raise exception.InvalidInput(err_msg) if not pool_node: err_msg = (_( 'check_conf_file: Config file invalid. ' 'StoragePool must be set.')) LOG.error(err_msg) raise exception.InvalidInput(err_msg) if not (self.configuration.driver_handles_share_servers or logical_port_ip): err_msg = (_( 'check_conf_file: Config file invalid. LogicalPortIP ' 'must be set when driver_handles_share_servers is False.')) LOG.error(err_msg) raise exception.InvalidInput(reason=err_msg) if self.snapshot_support and self.replication_support: err_msg = _('Config file invalid. SnapshotSupport and ' 'ReplicationSupport can not both be set to True.') LOG.error(err_msg) raise exception.BadConfigurationException(reason=err_msg) def check_service(self): running_status = self.helper._get_cifs_service_status() if running_status != constants.STATUS_SERVICE_RUNNING: self.helper._start_cifs_service_status() service = self.helper._get_nfs_service_status() if ((service['RUNNINGSTATUS'] != constants.STATUS_SERVICE_RUNNING) or (service['SUPPORTV3'] == 'false') or (service['SUPPORTV4'] == 'false')): self.helper._start_nfs_service_status() def setup_server(self, network_info, metadata=None): """Set up share server with given network parameters.""" self._check_network_type_validate(network_info['network_type']) vlan_tag = network_info['segmentation_id'] or 0 ip = network_info['network_allocations'][0]['ip_address'] subnet = utils.cidr_to_netmask(network_info['cidr']) if not utils.is_valid_ip_address(ip, '4'): err_msg = (_( "IP (%s) is invalid. Only IPv4 addresses are supported.") % ip) LOG.error(err_msg) raise exception.InvalidInput(reason=err_msg) ad_created = False ldap_created = False try: if network_info.get('security_services'): active_directory, ldap = self._get_valid_security_service( network_info.get('security_services')) # Configure AD or LDAP Domain. if active_directory: self._configure_AD_domain(active_directory) ad_created = True if ldap: self._configure_LDAP_domain(ldap) ldap_created = True # Create vlan and logical_port. vlan_id, logical_port_id = ( self._create_vlan_and_logical_port(vlan_tag, ip, subnet)) except exception.ManilaException: if ad_created: dns_ip_list = [] user = active_directory['user'] password = active_directory['password'] self.helper.set_DNS_ip_address(dns_ip_list) self.helper.delete_AD_config(user, password) self._check_AD_expected_status(constants.STATUS_EXIT_DOMAIN) if ldap_created: self.helper.delete_LDAP_config() raise return { 'share_server_name': network_info['server_id'], 'share_server_id': network_info['server_id'], 'vlan_id': vlan_id, 'logical_port_id': logical_port_id, 'ip': ip, 'subnet': subnet, 'vlan_tag': vlan_tag, 'ad_created': ad_created, 'ldap_created': ldap_created, } def _check_network_type_validate(self, network_type): if network_type not in ('flat', 'vlan', None): err_msg = (_( 'Invalid network type. Network type must be flat or vlan.')) raise exception.NetworkBadConfigurationException(reason=err_msg) def _get_valid_security_service(self, security_services): """Validate security services and return AD/LDAP config.""" service_number = len(security_services) err_msg = _("Unsupported security services. " "Only AD and LDAP are supported.") if service_number > 2: LOG.error(err_msg) raise exception.InvalidInput(reason=err_msg) active_directory = None ldap = None for ss in security_services: if ss['type'] == 'active_directory': active_directory = ss elif ss['type'] == 'ldap': ldap = ss else: LOG.error(err_msg) raise exception.InvalidInput(reason=err_msg) return active_directory, ldap def _configure_AD_domain(self, active_directory): dns_ip = active_directory['dns_ip'] user = active_directory['user'] password = active_directory['password'] domain = active_directory['domain'] if not (dns_ip and user and password and domain): raise exception.InvalidInput( reason=_("dns_ip or user or password or domain " "in security_services is None.")) # Check DNS server exists or not. ip_address = self.helper.get_DNS_ip_address() if ip_address and ip_address[0]: err_msg = (_("DNS server (%s) has already been configured.") % ip_address[0]) LOG.error(err_msg) raise exception.InvalidInput(reason=err_msg) # Check AD config exists or not. ad_exists, AD_domain = self.helper.get_AD_domain_name() if ad_exists: err_msg = (_("AD domain (%s) has already been configured.") % AD_domain) LOG.error(err_msg) raise exception.InvalidInput(reason=err_msg) # Set DNS server ip. dns_ip_list = dns_ip.split(",") DNS_config = self.helper.set_DNS_ip_address(dns_ip_list) # Set AD config. digits = string.digits random_id = ''.join([random.choice(digits) for i in range(9)]) system_name = constants.SYSTEM_NAME_PREFIX + random_id try: self.helper.add_AD_config(user, password, domain, system_name) self._check_AD_expected_status(constants.STATUS_JOIN_DOMAIN) except exception.ManilaException as err: if DNS_config: dns_ip_list = [] self.helper.set_DNS_ip_address(dns_ip_list) raise exception.InvalidShare( reason=(_('Failed to add AD config. ' 'Reason: %s.') % err)) def _check_AD_expected_status(self, expected_status): wait_interval = self._get_wait_interval() timeout = self._get_timeout() retries = timeout / wait_interval interval = wait_interval backoff_rate = 1 @utils.retry(retry_param=exception.InvalidShare, interval=interval, retries=retries, backoff_rate=backoff_rate) def _check_AD_status(): ad = self.helper.get_AD_config() if ad['DOMAINSTATUS'] != expected_status: raise exception.InvalidShare( reason=(_('AD domain (%s) status is not expected.') % ad['FULLDOMAINNAME'])) _check_AD_status() def _configure_LDAP_domain(self, ldap): server = ldap['server'] domain = ldap['domain'] if not server or not domain: raise exception.InvalidInput(reason=_("Server or domain is None.")) # Check LDAP config exists or not. ldap_exists, LDAP_domain = self.helper.get_LDAP_domain_server() if ldap_exists: err_msg = (_("LDAP domain (%s) has already been configured.") % LDAP_domain) LOG.error(err_msg) raise exception.InvalidInput(reason=err_msg) # Set LDAP config. server_number = len(server.split(',')) if server_number == 1: server = server + ",," elif server_number == 2: server = server + "," elif server_number > 3: raise exception.InvalidInput( reason=_("Cannot support more than three LDAP servers.")) self.helper.add_LDAP_config(server, domain) def _create_vlan_and_logical_port(self, vlan_tag, ip, subnet): optimal_port, port_type = self._get_optimal_port(vlan_tag) port_id = self.helper.get_port_id(optimal_port, port_type) home_port_id = port_id home_port_type = port_type vlan_id = 0 vlan_exists = True if port_type is None or port_id is None: err_msg = _("No appropriate port found to create logical port.") LOG.error(err_msg) raise exception.InvalidInput(reason=err_msg) if vlan_tag: vlan_exists, vlan_id = self.helper.get_vlan(port_id, vlan_tag) if not vlan_exists: # Create vlan. vlan_id = self.helper.create_vlan( port_id, port_type, vlan_tag) home_port_id = vlan_id home_port_type = constants.PORT_TYPE_VLAN logical_port_exists, logical_port_id = ( self.helper.get_logical_port(home_port_id, ip, subnet)) if not logical_port_exists: try: # Create logical port. logical_port_id = ( self.helper.create_logical_port( home_port_id, home_port_type, ip, subnet)) except exception.ManilaException as err: if not vlan_exists: self.helper.delete_vlan(vlan_id) raise exception.InvalidShare( reason=(_('Failed to create logical port. ' 'Reason: %s.') % err)) return vlan_id, logical_port_id def _get_optimal_port(self, vlan_tag): """Get an optimal physical port or bond port.""" root = self.helper._read_xml() port_info = [] port_list = root.findtext('Storage/Port') if port_list: port_list = port_list.split(";") for port in port_list: port = port.strip().strip('\n') if port: port_info.append(port) eth_port, bond_port = self._get_online_port(port_info) if vlan_tag: optimal_port, port_type = ( self._get_least_port(eth_port, bond_port, sort_type=constants.SORT_BY_VLAN)) else: optimal_port, port_type = ( self._get_least_port(eth_port, bond_port, sort_type=constants.SORT_BY_LOGICAL)) if not optimal_port: err_msg = (_("Cannot find optimal port. port_info: %s.") % port_info) LOG.error(err_msg) raise exception.InvalidInput(reason=err_msg) return optimal_port, port_type def _get_online_port(self, all_port_list): eth_port = self.helper.get_all_eth_port() bond_port = self.helper.get_all_bond_port() eth_status = constants.STATUS_ETH_RUNNING online_eth_port = [] for eth in eth_port: if (eth_status == eth['RUNNINGSTATUS'] and not eth['IPV4ADDR'] and not eth['BONDNAME']): online_eth_port.append(eth['LOCATION']) online_bond_port = [] for bond in bond_port: if eth_status == bond['RUNNINGSTATUS']: port_id = jsonutils.loads(bond['PORTIDLIST']) bond_eth_port = self.helper.get_eth_port_by_id(port_id[0]) if bond_eth_port and not bond_eth_port['IPV4ADDR']: online_bond_port.append(bond['NAME']) filtered_eth_port = [] filtered_bond_port = [] if len(all_port_list) == 0: filtered_eth_port = online_eth_port filtered_bond_port = online_bond_port else: all_port_list = list(set(all_port_list)) for port in all_port_list: is_eth_port = False for eth in online_eth_port: if port == eth: filtered_eth_port.append(port) is_eth_port = True break if is_eth_port: continue for bond in online_bond_port: if port == bond: filtered_bond_port.append(port) break return filtered_eth_port, filtered_bond_port def _get_least_port(self, eth_port, bond_port, sort_type): sorted_eth = [] sorted_bond = [] if sort_type == constants.SORT_BY_VLAN: _get_sorted_least_port = self._get_sorted_least_port_by_vlan else: _get_sorted_least_port = self._get_sorted_least_port_by_logical if eth_port: sorted_eth = _get_sorted_least_port(eth_port) if bond_port: sorted_bond = _get_sorted_least_port(bond_port) if sorted_eth and sorted_bond: if sorted_eth[1] >= sorted_bond[1]: return sorted_bond[0], constants.PORT_TYPE_BOND else: return sorted_eth[0], constants.PORT_TYPE_ETH elif sorted_eth: return sorted_eth[0], constants.PORT_TYPE_ETH elif sorted_bond: return sorted_bond[0], constants.PORT_TYPE_BOND else: return None, None def _get_sorted_least_port_by_vlan(self, port_list): if not port_list: return None vlan_list = self.helper.get_all_vlan() count = {} for item in port_list: count[item] = 0 for item in port_list: for vlan in vlan_list: pos = vlan['NAME'].rfind('.') if vlan['NAME'][:pos] == item: count[item] += 1 sort_port = sorted(count.items(), key=lambda count: count[1]) return sort_port[0] def _get_sorted_least_port_by_logical(self, port_list): if not port_list: return None logical_list = self.helper.get_all_logical_port() count = {} for item in port_list: count[item] = 0 for logical in logical_list: if logical['HOMEPORTTYPE'] == constants.PORT_TYPE_VLAN: pos = logical['HOMEPORTNAME'].rfind('.') if logical['HOMEPORTNAME'][:pos] == item: count[item] += 1 else: if logical['HOMEPORTNAME'] == item: count[item] += 1 sort_port = sorted(count.items(), key=lambda count: count[1]) return sort_port[0] def teardown_server(self, server_details, security_services=None): if not server_details: LOG.debug('Server details are empty.') return logical_port_id = server_details.get('logical_port_id') vlan_id = server_details.get('vlan_id') ad_created = server_details.get('ad_created') ldap_created = server_details.get('ldap_created') # Delete logical_port. if logical_port_id: logical_port_exists = ( self.helper.check_logical_port_exists_by_id(logical_port_id)) if logical_port_exists: self.helper.delete_logical_port(logical_port_id) # Delete vlan. if vlan_id and vlan_id != '0': vlan_exists = self.helper.check_vlan_exists_by_id(vlan_id) if vlan_exists: self.helper.delete_vlan(vlan_id) if security_services: active_directory, ldap = ( self._get_valid_security_service(security_services)) if ad_created and ad_created == '1' and active_directory: dns_ip = active_directory['dns_ip'] user = active_directory['user'] password = active_directory['password'] domain = active_directory['domain'] # Check DNS server exists or not. ip_address = self.helper.get_DNS_ip_address() if ip_address and ip_address[0] == dns_ip: dns_ip_list = [] self.helper.set_DNS_ip_address(dns_ip_list) # Check AD config exists or not. ad_exists, AD_domain = self.helper.get_AD_domain_name() if ad_exists and AD_domain == domain: self.helper.delete_AD_config(user, password) self._check_AD_expected_status( constants.STATUS_EXIT_DOMAIN) if ldap_created and ldap_created == '1' and ldap: server = ldap['server'] domain = ldap['domain'] # Check LDAP config exists or not. ldap_exists, LDAP_domain = ( self.helper.get_LDAP_domain_server()) if ldap_exists: LDAP_config = self.helper.get_LDAP_config() if (LDAP_config['LDAPSERVER'] == server and LDAP_config['BASEDN'] == domain): self.helper.delete_LDAP_config() def ensure_share(self, share, share_server=None): """Ensure that share is exported.""" share_proto = share['share_proto'] share_name = share['name'] share_id = share['id'] share_url_type = self.helper._get_share_url_type(share_proto) share_storage = self.helper._get_share_by_name(share_name, share_url_type) if not share_storage: raise exception.ShareResourceNotFound(share_id=share_id) fs_id = share_storage['FSID'] self.assert_filesystem(fs_id) ip = self._get_share_ip(share_server) location = self._get_location_path(share_name, share_proto, ip) return [location] def create_replica(self, context, replica_list, new_replica, access_rules, replica_snapshots, share_server=None): """Create a new share, and create a remote replication pair.""" active_replica = share_utils.get_active_replica(replica_list) if (self.private_storage.get(active_replica['share_id'], 'replica_pair_id')): # for huawei array, only one replication can be created for # each active replica, so if a replica pair id is recorded for # this share, it means active replica already has a replication, # can not create anymore. msg = _('Cannot create more than one replica for share %s.') LOG.error(msg, active_replica['share_id']) raise exception.ReplicationException( reason=msg % active_replica['share_id']) # Create a new share new_share_name = new_replica['name'] location = self.create_share(new_replica, share_server) # create a replication pair. # replication pair only can be created by master node, # so here is a remote call to trigger master node to # start the creating progress. try: replica_pair_id = self.rpc_client.create_replica_pair( context, active_replica['host'], local_share_info=active_replica, remote_device_wwn=self.helper.get_array_wwn(), remote_fs_id=self.helper.get_fsid_by_name(new_share_name) ) except Exception: LOG.exception('Failed to create a replication pair ' 'with host %s.', active_replica['host']) raise self.private_storage.update(new_replica['share_id'], {'replica_pair_id': replica_pair_id}) # Get the state of the new created replica replica_state = self.replica_mgr.get_replica_state(replica_pair_id) replica_ref = { 'export_locations': [location], 'replica_state': replica_state, 'access_rules_status': common_constants.STATUS_ACTIVE, } return replica_ref def update_replica_state(self, context, replica_list, replica, access_rules, replica_snapshots, share_server=None): replica_pair_id = self.private_storage.get(replica['share_id'], 'replica_pair_id') if replica_pair_id is None: msg = ("No replication pair ID recorded for share %s.") LOG.error(msg, replica['share_id']) return common_constants.STATUS_ERROR self.replica_mgr.update_replication_pair_state(replica_pair_id) return self.replica_mgr.get_replica_state(replica_pair_id) def promote_replica(self, context, replica_list, replica, access_rules, share_server=None, quiesce_wait_time=None): replica_pair_id = self.private_storage.get(replica['share_id'], 'replica_pair_id') if replica_pair_id is None: msg = _("No replication pair ID recorded for share %s.") LOG.error(msg, replica['share_id']) raise exception.ReplicationException( reason=msg % replica['share_id']) try: self.replica_mgr.switch_over(replica_pair_id) except Exception: LOG.exception('Failed to promote replica %s.', replica['id']) raise updated_new_active_access = True cleared_old_active_access = True try: self.update_access(replica, access_rules, [], [], [], share_server) except Exception: LOG.warning('Failed to set access rules to ' 'new active replica %s.', replica['id']) updated_new_active_access = False old_active_replica = share_utils.get_active_replica(replica_list) try: self.clear_access(old_active_replica, share_server) except Exception: LOG.warning("Failed to clear access rules from " "old active replica %s.", old_active_replica['id']) cleared_old_active_access = False new_active_update = { 'id': replica['id'], 'replica_state': common_constants.REPLICA_STATE_ACTIVE, } new_active_update['access_rules_status'] = ( common_constants.STATUS_ACTIVE if updated_new_active_access else common_constants.SHARE_INSTANCE_RULES_SYNCING) # get replica state for new secondary after switch over replica_state = self.replica_mgr.get_replica_state(replica_pair_id) old_active_update = { 'id': old_active_replica['id'], 'replica_state': replica_state, } old_active_update['access_rules_status'] = ( common_constants.SHARE_INSTANCE_RULES_SYNCING if cleared_old_active_access else common_constants.STATUS_ACTIVE) return [new_active_update, old_active_update] def delete_replica(self, context, replica_list, replica_snapshots, replica, share_server=None): replica_pair_id = self.private_storage.get(replica['share_id'], 'replica_pair_id') if replica_pair_id is None: msg = ("No replication pair ID recorded for share %(share)s. " "Continue to delete replica %(replica)s.") LOG.warning(msg, {'share': replica['share_id'], 'replica': replica['id']}) else: self.replica_mgr.delete_replication_pair(replica_pair_id) self.private_storage.delete(replica['share_id']) try: self.delete_share(replica, share_server) except Exception: LOG.exception('Failed to delete replica %s.', replica['id']) raise def revert_to_snapshot(self, context, snapshot, share_access_rules, snapshot_access_rules, share_server): fs_id = self.helper.get_fsid_by_name(snapshot['share_name']) if not fs_id: msg = _("The source filesystem of snapshot %s " "not exist.") % snapshot['id'] LOG.error(msg) raise exception.ShareResourceNotFound( share_id=snapshot['share_id']) snapshot_id = self.helper._get_snapshot_id(fs_id, snapshot['id']) self.helper.rollback_snapshot(snapshot_id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/huawei/v3/helper.py0000664000175000017500000014553600000000000022560 0ustar00zuulzuul00000000000000# Copyright (c) 2014 Huawei Technologies Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import base64 import copy import requests import time from defusedxml import ElementTree as ET from oslo_log import log from oslo_serialization import jsonutils from manila import exception from manila.i18n import _ from manila.share.drivers.huawei import constants from manila import utils LOG = log.getLogger(__name__) class RestHelper(object): """Helper class for Huawei OceanStor V3 storage system.""" def __init__(self, configuration): self.configuration = configuration self.url = None self.session = None # pylint: disable=no-member requests.packages.urllib3.disable_warnings( requests.packages.urllib3.exceptions.InsecureRequestWarning) requests.packages.urllib3.disable_warnings( requests.packages.urllib3.exceptions.InsecurePlatformWarning) # pylint: enable=no-member def init_http_head(self): self.url = None self.session = requests.Session() self.session.headers.update({ "Connection": "keep-alive", "Content-Type": "application/json"}) self.session.verify = False def do_call(self, url, data, method, calltimeout=constants.SOCKET_TIMEOUT): """Send requests to server. Send HTTPS call, get response in JSON. Convert response into Python Object and return it. """ if self.url: url = self.url + url LOG.debug('Request URL: %(url)s\n' 'Call Method: %(method)s\n' 'Request Data: %(data)s\n', {'url': url, 'method': method, 'data': data}) kwargs = {'timeout': calltimeout} if data: kwargs['data'] = data if method in ('POST', 'PUT', 'GET', 'DELETE'): func = getattr(self.session, method.lower()) else: msg = _("Request method %s is invalid.") % method LOG.error(msg) raise exception.ShareBackendException(msg=msg) try: res = func(url, **kwargs) except Exception as err: LOG.error('\nBad response from server: %(url)s.' ' Error: %(err)s', {'url': url, 'err': err}) return {"error": {"code": constants.ERROR_CONNECT_TO_SERVER, "description": "Connect server error"}} try: res.raise_for_status() except requests.HTTPError as exc: return {"error": {"code": exc.response.status_code, "description": str(exc)}} result = res.json() LOG.debug('Response Data: %s', result) return result def login(self): """Login huawei array.""" login_info = self._get_login_info() urlstr = login_info['RestURL'] url_list = urlstr.split(";") deviceid = None for item_url in url_list: url = item_url.strip('').strip('\n') + "xx/sessions" data = jsonutils.dumps({"username": login_info['UserName'], "password": login_info['UserPassword'], "scope": "0"}) self.init_http_head() result = self.do_call(url, data, 'POST', calltimeout=constants.LOGIN_SOCKET_TIMEOUT) if ((result['error']['code'] != 0) or ("data" not in result) or (result['data']['deviceid'] is None)): LOG.error("Login to %s failed, try another.", item_url) continue LOG.debug('Login success: %(url)s\n', {'url': item_url}) deviceid = result['data']['deviceid'] self.url = item_url + deviceid self.session.headers['iBaseToken'] = result['data']['iBaseToken'] break if deviceid is None: err_msg = _("All url login fail.") LOG.error(err_msg) raise exception.InvalidShare(reason=err_msg) return deviceid @utils.synchronized('huawei_manila') def call(self, url, data, method): """Send requests to server. If fail, try another RestURL. """ deviceid = None old_url = self.url result = self.do_call(url, data, method) error_code = result['error']['code'] if (error_code == constants.ERROR_CONNECT_TO_SERVER or error_code == constants.ERROR_UNAUTHORIZED_TO_SERVER): LOG.error("Can't open the recent url, re-login.") deviceid = self.login() if deviceid is not None: LOG.debug('Replace URL: \n' 'Old URL: %(old_url)s\n' 'New URL: %(new_url)s\n', {'old_url': old_url, 'new_url': self.url}) result = self.do_call(url, data, method) return result def _create_filesystem(self, fs_param): """Create file system.""" url = "/filesystem" data = jsonutils.dumps(fs_param) result = self.call(url, data, 'POST') msg = 'Create filesystem error.' self._assert_rest_result(result, msg) self._assert_data_in_result(result, msg) return result['data']['ID'] def _assert_rest_result(self, result, err_str): if result['error']['code'] != 0: err_msg = (_('%(err)s\nresult: %(res)s.') % {'err': err_str, 'res': result}) LOG.error(err_msg) raise exception.InvalidShare(reason=err_msg) def _assert_data_in_result(self, result, msg): if "data" not in result: err_msg = (_('%s "data" was not in result.') % msg) LOG.error(err_msg) raise exception.InvalidShare(reason=err_msg) def _get_login_info(self): """Get login IP, username and password from config file.""" logininfo = {} filename = self.configuration.manila_huawei_conf_file tree = ET.parse(filename) root = tree.getroot() RestURL = root.findtext('Storage/RestURL') logininfo['RestURL'] = RestURL.strip() # Prefix !$$$ means encoded already. prefix_name = '!$$$' need_encode = False for key in ['UserName', 'UserPassword']: node = root.find('Storage/%s' % key) if node.text.startswith(prefix_name): logininfo[key] = base64.b64decode( (node.text[4:]).encode("latin-1")).decode() else: logininfo[key] = node.text node.text = prefix_name + base64.b64encode( node.text.encode("latin-1")).decode() need_encode = True if need_encode: self._change_file_mode(filename) try: tree.write(filename, 'UTF-8') except Exception as err: err_msg = (_('File write error %s.') % err) LOG.error(err_msg) raise exception.InvalidShare(reason=err_msg) return logininfo def _change_file_mode(self, filepath): try: utils.execute('chmod', '666', filepath, run_as_root=True) except Exception as err: LOG.error('Bad response from change file: %s.', err) raise def create_share(self, share_name, fs_id, share_proto): """Create a share.""" share_url_type = self._get_share_url_type(share_proto) share_path = self._get_share_path(share_name) filepath = {} if share_proto == 'NFS': filepath = { "DESCRIPTION": "", "FSID": fs_id, "SHAREPATH": share_path, } elif share_proto == 'CIFS': filepath = { "SHAREPATH": share_path, "DESCRIPTION": "", "ABEENABLE": "false", "ENABLENOTIFY": "true", "ENABLEOPLOCK": "true", "NAME": share_name.replace("-", "_"), "FSID": fs_id, "TENANCYID": "0", } else: raise exception.InvalidShare( reason=(_('Invalid NAS protocol supplied: %s.') % share_proto)) url = "/" + share_url_type data = jsonutils.dumps(filepath) result = self.call(url, data, "POST") msg = 'Create share error.' self._assert_rest_result(result, msg) self._assert_data_in_result(result, msg) return result['data']['ID'] def _delete_share_by_id(self, share_id, share_url_type): """Delete share by share id.""" url = "/" + share_url_type + "/" + share_id result = self.call(url, None, "DELETE") self._assert_rest_result(result, 'Delete share error.') def _delete_fs(self, fs_id): """Delete file system.""" # Get available file system url = "/filesystem/" + fs_id result = self.call(url, None, "DELETE") self._assert_rest_result(result, 'Delete file system error.') def _get_cifs_service_status(self): url = "/CIFSSERVICE" result = self.call(url, None, "GET") msg = 'Get CIFS service status error.' self._assert_rest_result(result, msg) self._assert_data_in_result(result, msg) return result['data']['RUNNINGSTATUS'] def _get_nfs_service_status(self): url = "/NFSSERVICE" result = self.call(url, None, "GET") msg = 'Get NFS service status error.' self._assert_rest_result(result, msg) self._assert_data_in_result(result, msg) service = {} service['RUNNINGSTATUS'] = result['data']['RUNNINGSTATUS'] service['SUPPORTV3'] = result['data']['SUPPORTV3'] service['SUPPORTV4'] = result['data']['SUPPORTV4'] return service def _start_nfs_service_status(self): url = "/NFSSERVICE" nfsserviceinfo = { "NFSV4DOMAIN": "localdomain", "RUNNINGSTATUS": "2", "SUPPORTV3": 'true', "SUPPORTV4": 'true', "TYPE": "16452", } data = jsonutils.dumps(nfsserviceinfo) result = self.call(url, data, "PUT") self._assert_rest_result(result, 'Start NFS service error.') def _start_cifs_service_status(self): url = "/CIFSSERVICE" cifsserviceinfo = { "ENABLENOTIFY": "true", "ENABLEOPLOCK": "true", "ENABLEOPLOCKLEASE": "false", "GUESTENABLE": "false", "OPLOCKTIMEOUT": "35", "RUNNINGSTATUS": "2", "SECURITYMODEL": "3", "SIGNINGENABLE": "false", "SIGNINGREQUIRED": "false", "TYPE": "16453", } data = jsonutils.dumps(cifsserviceinfo) result = self.call(url, data, "PUT") self._assert_rest_result(result, 'Start CIFS service error.') def _find_pool_info(self, pool_name, result): if pool_name is None: return poolinfo = {} pool_name = pool_name.strip() for item in result.get('data', []): if pool_name == item['NAME'] and '2' == item['USAGETYPE']: poolinfo['name'] = pool_name poolinfo['ID'] = item['ID'] poolinfo['CAPACITY'] = item['USERFREECAPACITY'] poolinfo['TOTALCAPACITY'] = item['USERTOTALCAPACITY'] poolinfo['CONSUMEDCAPACITY'] = item['USERCONSUMEDCAPACITY'] poolinfo['TIER0CAPACITY'] = item['TIER0CAPACITY'] poolinfo['TIER1CAPACITY'] = item['TIER1CAPACITY'] poolinfo['TIER2CAPACITY'] = item['TIER2CAPACITY'] break return poolinfo def _find_all_pool_info(self): url = "/storagepool" result = self.call(url, None, "GET") msg = "Query resource pool error." self._assert_rest_result(result, msg) self._assert_data_in_result(result, msg) return result def _read_xml(self): """Open xml file and parse the content.""" filename = self.configuration.manila_huawei_conf_file try: tree = ET.parse(filename) root = tree.getroot() except Exception as err: message = (_('Read Huawei config file(%(filename)s)' ' for Manila error: %(err)s') % {'filename': filename, 'err': err}) LOG.error(message) raise exception.InvalidInput(reason=message) return root def _remove_access_from_share(self, access_id, share_proto): access_type = self._get_share_client_type(share_proto) url = "/" + access_type + "/" + access_id result = self.call(url, None, "DELETE") self._assert_rest_result(result, 'delete access from share error!') def _get_access_count(self, share_id, share_client_type): url_subfix = ("/" + share_client_type + "/count?" + "filter=PARENTID::" + share_id) url = url_subfix result = self.call(url, None, "GET") msg = "Get access count by share error!" self._assert_rest_result(result, msg) self._assert_data_in_result(result, msg) return int(result['data']['COUNT']) def _get_all_access_from_share(self, share_id, share_proto): """Return a list of all the access IDs of the share""" share_client_type = self._get_share_client_type(share_proto) count = self._get_access_count(share_id, share_client_type) access_ids = [] range_begin = 0 while count > 0: access_range = self._get_access_from_share_range(share_id, range_begin, share_client_type) for item in access_range: access_ids.append(item['ID']) range_begin += 100 count -= 100 return access_ids def _get_access_from_share(self, share_id, access_to, share_proto): """Segments to find access for a period of 100.""" share_client_type = self._get_share_client_type(share_proto) count = self._get_access_count(share_id, share_client_type) access_id = None range_begin = 0 while count > 0: if access_id: break access_range = self._get_access_from_share_range(share_id, range_begin, share_client_type) for item in access_range: if item['NAME'] in (access_to, '@' + access_to): access_id = item['ID'] range_begin += 100 count -= 100 return access_id def _get_access_from_share_range(self, share_id, range_begin, share_client_type): range_end = range_begin + 100 url = ("/" + share_client_type + "?filter=PARENTID::" + share_id + "&range=[" + str(range_begin) + "-" + str(range_end) + "]") result = self.call(url, None, "GET") self._assert_rest_result(result, 'Get access id by share error!') return result.get('data', []) def _get_level_by_access_id(self, access_id, share_proto): share_client_type = self._get_share_client_type(share_proto) url = "/" + share_client_type + "/" + access_id result = self.call(url, None, "GET") self._assert_rest_result(result, 'Get access information error!') access_info = result.get('data', []) access_level = access_info.get('ACCESSVAL') if not access_level: access_level = access_info.get('PERMISSION') return access_level def _change_access_rest(self, access_id, share_proto, access_level): """Change access level of the share.""" if share_proto == 'NFS': self._change_nfs_access_rest(access_id, access_level) elif share_proto == 'CIFS': self._change_cifs_access_rest(access_id, access_level) else: raise exception.InvalidInput( reason=(_('Invalid NAS protocol supplied: %s.') % share_proto)) def _change_nfs_access_rest(self, access_id, access_level): url = "/NFS_SHARE_AUTH_CLIENT/" + access_id access = { "ACCESSVAL": access_level, "SYNC": "0", "ALLSQUASH": "1", "ROOTSQUASH": "0", } data = jsonutils.dumps(access) result = self.call(url, data, "PUT") msg = 'Change access error.' self._assert_rest_result(result, msg) def _change_cifs_access_rest(self, access_id, access_level): url = "/CIFS_SHARE_AUTH_CLIENT/" + access_id access = { "PERMISSION": access_level, } data = jsonutils.dumps(access) result = self.call(url, data, "PUT") msg = 'Change access error.' self._assert_rest_result(result, msg) def _allow_access_rest(self, share_id, access_to, share_proto, access_level): """Allow access to the share.""" if share_proto == 'NFS': self._allow_nfs_access_rest(share_id, access_to, access_level) elif share_proto == 'CIFS': self._allow_cifs_access_rest(share_id, access_to, access_level) else: raise exception.InvalidInput( reason=(_('Invalid NAS protocol supplied: %s.') % share_proto)) def _allow_nfs_access_rest(self, share_id, access_to, access_level): url = "/NFS_SHARE_AUTH_CLIENT" access = { "TYPE": "16409", "NAME": access_to, "PARENTID": share_id, "ACCESSVAL": access_level, "SYNC": "0", "ALLSQUASH": "1", "ROOTSQUASH": "0", } data = jsonutils.dumps(access) result = self.call(url, data, "POST") msg = 'Allow access error.' self._assert_rest_result(result, msg) def _allow_cifs_access_rest(self, share_id, access_to, access_level): url = "/CIFS_SHARE_AUTH_CLIENT" domain_type = { 'local': '2', 'ad': '0' } error_msg = 'Allow access error.' access_info = ('Access info (access_to: %(access_to)s, ' 'access_level: %(access_level)s, share_id: %(id)s)' % {'access_to': access_to, 'access_level': access_level, 'id': share_id}) def send_rest(access_to, domain_type): access = { "NAME": access_to, "PARENTID": share_id, "PERMISSION": access_level, "DOMAINTYPE": domain_type, } data = jsonutils.dumps(access) result = self.call(url, data, "POST") error_code = result['error']['code'] if error_code == 0: return True elif error_code != constants.ERROR_USER_OR_GROUP_NOT_EXIST: self._assert_rest_result(result, error_msg) return False if '\\' not in access_to: # First, try to add user access. LOG.debug('Try to add user access. %s.', access_info) if send_rest(access_to, domain_type['local']): return # Second, if add user access failed, # try to add group access. LOG.debug('Failed with add user access, ' 'try to add group access. %s.', access_info) # Group name starts with @. if send_rest('@' + access_to, domain_type['local']): return else: LOG.debug('Try to add domain user access. %s.', access_info) if send_rest(access_to, domain_type['ad']): return # If add domain user access failed, # try to add domain group access. LOG.debug('Failed with add domain user access, ' 'try to add domain group access. %s.', access_info) # Group name starts with @. if send_rest('@' + access_to, domain_type['ad']): return raise exception.InvalidShare(reason=error_msg) def _get_share_client_type(self, share_proto): share_client_type = None if share_proto == 'NFS': share_client_type = "NFS_SHARE_AUTH_CLIENT" elif share_proto == 'CIFS': share_client_type = "CIFS_SHARE_AUTH_CLIENT" else: raise exception.InvalidInput( reason=(_('Invalid NAS protocol supplied: %s.') % share_proto)) return share_client_type def _check_snapshot_id_exist(self, snapshot_info): """Check the snapshot id exists.""" if snapshot_info['error']['code'] == constants.MSG_SNAPSHOT_NOT_FOUND: return False elif snapshot_info['error']['code'] == 0: return True else: err_str = "Check the snapshot id exists error!" err_msg = (_('%(err)s\nresult: %(res)s.') % {'err': err_str, 'res': snapshot_info}) raise exception.InvalidShareSnapshot(reason=err_msg) def _get_snapshot_by_id(self, snap_id): """Get snapshot by id""" url = "/FSSNAPSHOT/" + snap_id result = self.call(url, None, "GET") return result def _delete_snapshot(self, snap_id): """Deletes snapshot.""" url = "/FSSNAPSHOT/%s" % snap_id data = jsonutils.dumps({"TYPE": "48", "ID": snap_id}) result = self.call(url, data, "DELETE") self._assert_rest_result(result, 'Delete snapshot error.') def _create_snapshot(self, sharefsid, snapshot_name): """Create a snapshot.""" filepath = { "PARENTTYPE": "40", "TYPE": "48", "PARENTID": sharefsid, "NAME": snapshot_name.replace("-", "_"), "DESCRIPTION": "", } url = "/FSSNAPSHOT" data = jsonutils.dumps(filepath) result = self.call(url, data, "POST") msg = 'Create a snapshot error.' self._assert_rest_result(result, msg) self._assert_data_in_result(result, msg) return result['data']['ID'] def _get_share_by_name(self, share_name, share_url_type): """Segments to find share for a period of 100.""" count = self._get_share_count(share_url_type) share = {} range_begin = 0 while True: if count < 0 or share: break share = self._get_share_by_name_range(share_name, range_begin, share_url_type) range_begin += 100 count -= 100 return share def _get_share_count(self, share_url_type): """Get share count.""" url = "/" + share_url_type + "/count" result = self.call(url, None, "GET") self._assert_rest_result(result, 'Get share count error!') return int(result['data']['COUNT']) def _get_share_by_name_range(self, share_name, range_begin, share_url_type): """Get share by share name.""" range_end = range_begin + 100 url = ("/" + share_url_type + "?range=[" + str(range_begin) + "-" + str(range_end) + "]") result = self.call(url, None, "GET") self._assert_rest_result(result, 'Get share by name error!') share_path = self._get_share_path(share_name) share = {} for item in result.get('data', []): if share_path == item['SHAREPATH']: share['ID'] = item['ID'] share['FSID'] = item['FSID'] break return share def _get_share_url_type(self, share_proto): share_url_type = None if share_proto == 'NFS': share_url_type = "NFSHARE" elif share_proto == 'CIFS': share_url_type = "CIFSHARE" else: raise exception.InvalidInput( reason=(_('Invalid NAS protocol supplied: %s.') % share_proto)) return share_url_type def get_fsid_by_name(self, share_name): share_name = share_name.replace("-", "_") url = "/FILESYSTEM?filter=NAME::%s&range=[0-8191]" % share_name result = self.call(url, None, "GET") self._assert_rest_result(result, 'Get filesystem by name error!') for item in result.get('data', []): if share_name == item['NAME']: return item['ID'] def _get_fs_info_by_id(self, fsid): url = "/filesystem/%s" % fsid result = self.call(url, None, "GET") msg = "Get filesystem info by id error!" self._assert_rest_result(result, msg) self._assert_data_in_result(result, msg) fs = {} fs['HEALTHSTATUS'] = result['data']['HEALTHSTATUS'] fs['RUNNINGSTATUS'] = result['data']['RUNNINGSTATUS'] fs['CAPACITY'] = result['data']['CAPACITY'] fs['ALLOCTYPE'] = result['data']['ALLOCTYPE'] fs['POOLNAME'] = result['data']['PARENTNAME'] fs['COMPRESSION'] = result['data']['ENABLECOMPRESSION'] fs['DEDUP'] = result['data']['ENABLEDEDUP'] fs['SMARTPARTITIONID'] = result['data']['CACHEPARTITIONID'] fs['SMARTCACHEID'] = result['data']['SMARTCACHEPARTITIONID'] return fs def _get_share_path(self, share_name): share_path = "/" + share_name.replace("-", "_") + "/" return share_path def get_share_name_by_id(self, share_id): share_name = "share_" + share_id return share_name def _get_share_name_by_export_location(self, export_location, share_proto): export_location_split = None share_name = None share_ip = None if export_location: if share_proto == 'NFS': export_location_split = export_location.split(':/') if len(export_location_split) == 2: share_name = export_location_split[1] share_ip = export_location_split[0] elif share_proto == 'CIFS': export_location_split = export_location.split('\\') if (len(export_location_split) == 4 and export_location_split[0] == "" and export_location_split[1] == ""): share_ip = export_location_split[2] share_name = export_location_split[3] if share_name is None: raise exception.InvalidInput( reason=(_('No share with export location %s could be found.') % export_location)) root = self._read_xml() target_ip = root.findtext('Storage/LogicalPortIP') if target_ip: if share_ip != target_ip.strip(): raise exception.InvalidInput( reason=(_('The share IP %s is not configured.') % share_ip)) else: raise exception.InvalidInput( reason=(_('The config parameter LogicalPortIP is not set.'))) return share_name def _get_snapshot_id(self, fs_id, snap_name): snapshot_id = (fs_id + "@" + "share_snapshot_" + snap_name.replace("-", "_")) return snapshot_id def _change_share_size(self, fsid, new_size): url = "/filesystem/%s" % fsid capacityinfo = { "CAPACITY": new_size, } data = jsonutils.dumps(capacityinfo) result = self.call(url, data, "PUT") msg = "Change a share size error!" self._assert_rest_result(result, msg) self._assert_data_in_result(result, msg) def _change_fs_name(self, fsid, name): url = "/filesystem/%s" % fsid fs_param = { "NAME": name.replace("-", "_"), } data = jsonutils.dumps(fs_param) result = self.call(url, data, "PUT") msg = _("Change filesystem name error.") self._assert_rest_result(result, msg) def _change_extra_specs(self, fsid, extra_specs): url = "/filesystem/%s" % fsid fs_param = { "ENABLEDEDUP": extra_specs['dedupe'], "ENABLECOMPRESSION": extra_specs['compression'] } data = jsonutils.dumps(fs_param) result = self.call(url, data, "PUT") msg = _("Change extra_specs error.") self._assert_rest_result(result, msg) def _get_partition_id_by_name(self, name): url = "/cachepartition" result = self.call(url, None, "GET") self._assert_rest_result(result, _('Get partition by name error.')) if "data" in result: for item in result['data']: if name == item['NAME']: return item['ID'] return None def get_partition_info_by_id(self, partitionid): url = '/cachepartition/' + partitionid result = self.call(url, None, "GET") self._assert_rest_result(result, _('Get partition by partition id error.')) return result['data'] def _add_fs_to_partition(self, fs_id, partition_id): url = "/filesystem/associate/cachepartition" data = jsonutils.dumps({"ID": partition_id, "ASSOCIATEOBJTYPE": 40, "ASSOCIATEOBJID": fs_id, "TYPE": 268}) result = self.call(url, data, "POST") self._assert_rest_result(result, _('Add filesystem to partition error.')) def _remove_fs_from_partition(self, fs_id, partition_id): url = "/smartPartition/removeFs" data = jsonutils.dumps({"ID": partition_id, "ASSOCIATEOBJTYPE": 40, "ASSOCIATEOBJID": fs_id, "TYPE": 268}) result = self.call(url, data, "PUT") self._assert_rest_result(result, _('Remove filesystem from partition error.')) def _rename_share_snapshot(self, snapshot_id, new_name): url = "/FSSNAPSHOT/" + snapshot_id data = jsonutils.dumps({"NAME": new_name}) result = self.call(url, data, "PUT") msg = _('Rename share snapshot on array error.') self._assert_rest_result(result, msg) self._assert_data_in_result(result, msg) def _get_cache_id_by_name(self, name): url = "/SMARTCACHEPARTITION" result = self.call(url, None, "GET") self._assert_rest_result(result, _('Get cache by name error.')) if "data" in result: for item in result['data']: if name == item['NAME']: return item['ID'] return None def get_cache_info_by_id(self, cacheid): url = "/SMARTCACHEPARTITION/" + cacheid data = jsonutils.dumps({"TYPE": "273", "ID": cacheid}) result = self.call(url, data, "GET") self._assert_rest_result( result, _('Get smartcache by cache id error.')) return result['data'] def _add_fs_to_cache(self, fs_id, cache_id): url = "/SMARTCACHEPARTITION/CREATE_ASSOCIATE" data = jsonutils.dumps({"ID": cache_id, "ASSOCIATEOBJTYPE": 40, "ASSOCIATEOBJID": fs_id, "TYPE": 273}) result = self.call(url, data, "PUT") self._assert_rest_result(result, _('Add filesystem to cache error.')) def get_qos(self): url = "/ioclass" result = self.call(url, None, "GET") self._assert_rest_result(result, _('Get QoS information error.')) return result def find_available_qos(self, qos): """"Find available QoS on the array.""" qos_id = None fs_list = [] temp_qos = copy.deepcopy(qos) result = self.get_qos() if 'data' in result: if 'LATENCY' not in temp_qos: temp_qos['LATENCY'] = '0' for item in result['data']: for key in constants.OPTS_QOS_VALUE: if temp_qos.get(key.upper()) != item.get(key.upper()): break else: fs_num = len(item['FSLIST'].split(",")) # We use this QoS only if the filesystems in it is less # than 64, else we cannot add filesystem to this QoS # any more. if (item['RUNNINGSTATUS'] == constants.STATUS_QOS_ACTIVE and fs_num < constants.MAX_FS_NUM_IN_QOS and item['NAME'].startswith( constants.QOS_NAME_PREFIX) and item['LUNLIST'] == '[""]'): qos_id = item['ID'] fs_list = item['FSLIST'] break return (qos_id, fs_list) def add_share_to_qos(self, qos_id, fs_id, fs_list): """Add filesystem to QoS.""" url = "/ioclass/" + qos_id new_fs_list = [] fs_list_string = fs_list[1:-1] for fs_string in fs_list_string.split(","): tmp_fs_id = fs_string[1:-1] if '' != tmp_fs_id and tmp_fs_id != fs_id: new_fs_list.append(tmp_fs_id) new_fs_list.append(fs_id) data = jsonutils.dumps({"FSLIST": new_fs_list, "TYPE": 230, "ID": qos_id}) result = self.call(url, data, "PUT") msg = _('Associate filesystem to Qos error.') self._assert_rest_result(result, msg) def create_qos_policy(self, qos, fs_id): # Get local time. localtime = time.strftime('%Y%m%d%H%M%S', time.localtime(time.time())) # Package QoS name. qos_name = constants.QOS_NAME_PREFIX + fs_id + '_' + localtime mergedata = { "TYPE": "230", "NAME": qos_name, "FSLIST": ["%s" % fs_id], "CLASSTYPE": "1", "SCHEDULEPOLICY": "2", "SCHEDULESTARTTIME": "1410969600", "STARTTIME": "08:00", "DURATION": "86400", "CYCLESET": "[1,2,3,4,5,6,0]", } mergedata.update(qos) data = jsonutils.dumps(mergedata) url = "/ioclass" result = self.call(url, data, 'POST') self._assert_rest_result(result, _('Create QoS policy error.')) return result['data']['ID'] def activate_deactivate_qos(self, qos_id, enablestatus): """Activate or deactivate QoS. enablestatus: true (activate) enablestatus: false (deactivate) """ url = "/ioclass/active/" + qos_id data = jsonutils.dumps({ "TYPE": 230, "ID": qos_id, "ENABLESTATUS": enablestatus}) result = self.call(url, data, "PUT") self._assert_rest_result( result, _('Activate or deactivate QoS error.')) def change_fs_priority_high(self, fs_id): """Change fs priority to high.""" url = "/filesystem/" + fs_id data = jsonutils.dumps({"IOPRIORITY": "3"}) result = self.call(url, data, "PUT") self._assert_rest_result( result, _('Change filesystem priority error.')) def delete_qos_policy(self, qos_id): """Delete a QoS policy.""" url = "/ioclass/" + qos_id data = jsonutils.dumps({"TYPE": "230", "ID": qos_id}) result = self.call(url, data, 'DELETE') self._assert_rest_result(result, _('Delete QoS policy error.')) def get_qosid_by_fsid(self, fs_id): """Get QoS id by fs id.""" url = "/filesystem/" + fs_id result = self.call(url, None, "GET") self._assert_rest_result( result, _('Get QoS id by filesystem id error.')) return result['data'].get('IOCLASSID') def get_fs_list_in_qos(self, qos_id): """Get the filesystem list in QoS.""" qos_info = self.get_qos_info(qos_id) fs_list = [] fs_string = qos_info['FSLIST'][1:-1] for fs in fs_string.split(","): fs_id = fs[1:-1] fs_list.append(fs_id) return fs_list def get_qos_info(self, qos_id): """Get QoS information.""" url = "/ioclass/" + qos_id result = self.call(url, None, "GET") self._assert_rest_result(result, _('Get QoS information error.')) return result['data'] def remove_fs_from_qos(self, fs_id, fs_list, qos_id): """Remove filesystem from QoS.""" fs_list = [i for i in fs_list if i != fs_id] url = "/ioclass/" + qos_id data = jsonutils.dumps({"FSLIST": fs_list, "TYPE": 230, "ID": qos_id}) result = self.call(url, data, "PUT") msg = _('Remove filesystem from QoS error.') self._assert_rest_result(result, msg) def _remove_fs_from_cache(self, fs_id, cache_id): url = "/SMARTCACHEPARTITION/REMOVE_ASSOCIATE" data = jsonutils.dumps({"ID": cache_id, "ASSOCIATEOBJTYPE": 40, "ASSOCIATEOBJID": fs_id, "TYPE": 273}) result = self.call(url, data, "PUT") self._assert_rest_result(result, _('Remove filesystem from cache error.')) def get_all_eth_port(self): url = "/ETH_PORT" result = self.call(url, None, 'GET') self._assert_rest_result(result, _('Get all eth port error.')) all_eth = {} if "data" in result: all_eth = result['data'] return all_eth def get_eth_port_by_id(self, port_id): url = "/ETH_PORT/" + port_id result = self.call(url, None, 'GET') self._assert_rest_result(result, _('Get eth port by id error.')) if "data" in result: return result['data'] return None def get_all_bond_port(self): url = "/BOND_PORT" result = self.call(url, None, 'GET') self._assert_rest_result(result, _('Get all bond port error.')) all_bond = {} if "data" in result: all_bond = result['data'] return all_bond def get_port_id(self, port_name, port_type): if port_type == constants.PORT_TYPE_ETH: all_eth = self.get_all_eth_port() for item in all_eth: if port_name == item['LOCATION']: return item['ID'] elif port_type == constants.PORT_TYPE_BOND: all_bond = self.get_all_bond_port() for item in all_bond: if port_name == item['NAME']: return item['ID'] return None def get_all_vlan(self): url = "/vlan" result = self.call(url, None, 'GET') self._assert_rest_result(result, _('Get all vlan error.')) all_vlan = {} if "data" in result: all_vlan = result['data'] return all_vlan def get_vlan(self, port_id, vlan_tag): url = "/vlan" result = self.call(url, None, 'GET') self._assert_rest_result(result, _('Get vlan error.')) vlan_tag = str(vlan_tag) if "data" in result: for item in result['data']: if port_id == item['PORTID'] and vlan_tag == item['TAG']: return True, item['ID'] return False, None def create_vlan(self, port_id, port_type, vlan_tag): url = "/vlan" data = jsonutils.dumps({"PORTID": port_id, "PORTTYPE": port_type, "TAG": str(vlan_tag), "TYPE": "280"}) result = self.call(url, data, "POST") self._assert_rest_result(result, _('Create vlan error.')) return result['data']['ID'] def check_vlan_exists_by_id(self, vlan_id): all_vlan = self.get_all_vlan() return any(vlan['ID'] == vlan_id for vlan in all_vlan) def delete_vlan(self, vlan_id): url = "/vlan/" + vlan_id result = self.call(url, None, 'DELETE') if result['error']['code'] == constants.ERROR_LOGICAL_PORT_EXIST: LOG.warning('Cannot delete vlan because there is ' 'a logical port on vlan.') return self._assert_rest_result(result, _('Delete vlan error.')) def get_logical_port(self, home_port_id, ip, subnet): url = "/LIF" result = self.call(url, None, 'GET') self._assert_rest_result(result, _('Get logical port error.')) if "data" not in result: return False, None for item in result['data']: if (home_port_id == item['HOMEPORTID'] and ip == item['IPV4ADDR'] and subnet == item['IPV4MASK']): if item['OPERATIONALSTATUS'] != 'true': self._activate_logical_port(item['ID']) return True, item['ID'] return False, None def _activate_logical_port(self, logical_port_id): url = "/LIF/" + logical_port_id data = jsonutils.dumps({"OPERATIONALSTATUS": "true"}) result = self.call(url, data, 'PUT') self._assert_rest_result(result, _('Activate logical port error.')) def create_logical_port(self, home_port_id, home_port_type, ip, subnet): url = "/LIF" info = { "ADDRESSFAMILY": 0, "CANFAILOVER": "true", "HOMEPORTID": home_port_id, "HOMEPORTTYPE": home_port_type, "IPV4ADDR": ip, "IPV4GATEWAY": "", "IPV4MASK": subnet, "NAME": ip, "OPERATIONALSTATUS": "true", "ROLE": 2, "SUPPORTPROTOCOL": 3, "TYPE": "279", } data = jsonutils.dumps(info) result = self.call(url, data, 'POST') self._assert_rest_result(result, _('Create logical port error.')) return result['data']['ID'] def check_logical_port_exists_by_id(self, logical_port_id): all_logical_port = self.get_all_logical_port() return any(port['ID'] == logical_port_id for port in all_logical_port) def get_all_logical_port(self): url = "/LIF" result = self.call(url, None, 'GET') self._assert_rest_result(result, _('Get all logical port error.')) all_logical_port = {} if "data" in result: all_logical_port = result['data'] return all_logical_port def delete_logical_port(self, logical_port_id): url = "/LIF/" + logical_port_id result = self.call(url, None, 'DELETE') self._assert_rest_result(result, _('Delete logical port error.')) def set_DNS_ip_address(self, dns_ip_list): if len(dns_ip_list) > 3: message = _('Most three ips can be set to DNS.') LOG.error(message) raise exception.InvalidInput(reason=message) url = "/DNS_Server" dns_info = { "ADDRESS": jsonutils.dumps(dns_ip_list), "TYPE": "260", } data = jsonutils.dumps(dns_info) result = self.call(url, data, 'PUT') self._assert_rest_result(result, _('Set DNS ip address error.')) if "data" in result: return result['data'] return None def get_DNS_ip_address(self): url = "/DNS_Server" result = self.call(url, None, 'GET') self._assert_rest_result(result, _('Get DNS ip address error.')) ip_address = {} if "data" in result: ip_address = jsonutils.loads(result['data']['ADDRESS']) return ip_address def add_AD_config(self, user, password, domain, system_name): url = "/AD_CONFIG" info = { "ADMINNAME": user, "ADMINPWD": password, "DOMAINSTATUS": 1, "FULLDOMAINNAME": domain, "OU": "", "SYSTEMNAME": system_name, "TYPE": "16414", } data = jsonutils.dumps(info) result = self.call(url, data, 'PUT') self._assert_rest_result(result, _('Add AD config error.')) def delete_AD_config(self, user, password): url = "/AD_CONFIG" info = { "ADMINNAME": user, "ADMINPWD": password, "DOMAINSTATUS": 0, "TYPE": "16414", } data = jsonutils.dumps(info) result = self.call(url, data, 'PUT') self._assert_rest_result(result, _('Delete AD config error.')) def get_AD_config(self): url = "/AD_CONFIG" result = self.call(url, None, 'GET') self._assert_rest_result(result, _('Get AD config error.')) if "data" in result: return result['data'] return None def get_AD_domain_name(self): result = self.get_AD_config() if result and result['DOMAINSTATUS'] == '1': return True, result['FULLDOMAINNAME'] return False, None def add_LDAP_config(self, server, domain): url = "/LDAP_CONFIG" info = { "BASEDN": domain, "LDAPSERVER": server, "PORTNUM": 389, "TRANSFERTYPE": "1", "TYPE": "16413", "USERNAME": "", } data = jsonutils.dumps(info) result = self.call(url, data, 'PUT') self._assert_rest_result(result, _('Add LDAP config error.')) def delete_LDAP_config(self): url = "/LDAP_CONFIG" result = self.call(url, None, 'DELETE') self._assert_rest_result(result, _('Delete LDAP config error.')) def get_LDAP_config(self): url = "/LDAP_CONFIG" result = self.call(url, None, 'GET') self._assert_rest_result(result, _('Get LDAP config error.')) if "data" in result: return result['data'] return None def get_LDAP_domain_server(self): result = self.get_LDAP_config() if result and result['LDAPSERVER']: return True, result['LDAPSERVER'] return False, None def _get_array_info(self): url = "/system/" result = self.call(url, None, "GET") msg = _('Get array info error.') self._assert_rest_result(result, msg) self._assert_data_in_result(result, msg) return result.get('data') def find_array_version(self): info = self._get_array_info() return info.get('PRODUCTVERSION') def get_array_wwn(self): info = self._get_array_info() return info.get('wwn') def _get_all_remote_devices(self): url = "/remote_device" result = self.call(url, None, "GET") self._assert_rest_result(result, _('Get all remote devices error.')) return result.get('data', []) def get_remote_device_by_wwn(self, wwn): devices = self._get_all_remote_devices() for device in devices: if device.get('WWN') == wwn: return device return {} def create_replication_pair(self, pair_params): url = "/REPLICATIONPAIR" data = jsonutils.dumps(pair_params) result = self.call(url, data, "POST") msg = _('Failed to create replication pair for ' '(LOCALRESID: %(lres)s, REMOTEDEVICEID: %(rdev)s, ' 'REMOTERESID: %(rres)s).') % { 'lres': pair_params['LOCALRESID'], 'rdev': pair_params['REMOTEDEVICEID'], 'rres': pair_params['REMOTERESID']} self._assert_rest_result(result, msg) self._assert_data_in_result(result, msg) return result['data'] def split_replication_pair(self, pair_id): url = '/REPLICATIONPAIR/split' data = jsonutils.dumps({"ID": pair_id, "TYPE": "263"}) result = self.call(url, data, "PUT") msg = _('Failed to split replication pair %s.') % pair_id self._assert_rest_result(result, msg) def switch_replication_pair(self, pair_id): url = '/REPLICATIONPAIR/switch' data = jsonutils.dumps({"ID": pair_id, "TYPE": "263"}) result = self.call(url, data, "PUT") msg = _('Failed to switch replication pair %s.') % pair_id self._assert_rest_result(result, msg) def delete_replication_pair(self, pair_id): url = "/REPLICATIONPAIR/" + pair_id data = None result = self.call(url, data, "DELETE") if (result['error']['code'] == constants.ERROR_REPLICATION_PAIR_NOT_EXIST): LOG.warning('Replication pair %s was not found.', pair_id) return msg = _('Failed to delete replication pair %s.') % pair_id self._assert_rest_result(result, msg) def sync_replication_pair(self, pair_id): url = "/REPLICATIONPAIR/sync" data = jsonutils.dumps({"ID": pair_id, "TYPE": "263"}) result = self.call(url, data, "PUT") msg = _('Failed to sync replication pair %s.') % pair_id self._assert_rest_result(result, msg) def cancel_pair_secondary_write_lock(self, pair_id): url = "/REPLICATIONPAIR/CANCEL_SECODARY_WRITE_LOCK" data = jsonutils.dumps({"ID": pair_id, "TYPE": "263"}) result = self.call(url, data, "PUT") msg = _('Failed to cancel replication pair %s ' 'secondary write lock.') % pair_id self._assert_rest_result(result, msg) def set_pair_secondary_write_lock(self, pair_id): url = "/REPLICATIONPAIR/SET_SECODARY_WRITE_LOCK" data = jsonutils.dumps({"ID": pair_id, "TYPE": "263"}) result = self.call(url, data, "PUT") msg = _('Failed to set replication pair %s ' 'secondary write lock.') % pair_id self._assert_rest_result(result, msg) def get_replication_pair_by_id(self, pair_id): url = "/REPLICATIONPAIR/" + pair_id result = self.call(url, None, "GET") msg = _('Failed to get replication pair %s.') % pair_id self._assert_rest_result(result, msg) self._assert_data_in_result(result, msg) return result.get('data') def rollback_snapshot(self, snap_id): url = "/FSSNAPSHOT/ROLLBACK_FSSNAPSHOT" data = jsonutils.dumps({"ID": snap_id}) result = self.call(url, data, "PUT") msg = _('Failed to rollback snapshot %s.') % snap_id self._assert_rest_result(result, msg) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/huawei/v3/replication.py0000664000175000017500000002344500000000000023604 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Huawei Technologies Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log from oslo_utils import strutils from manila.common import constants as common_constants from manila import exception from manila.i18n import _ from manila.share.drivers.huawei import constants LOG = log.getLogger(__name__) class ReplicaPairManager(object): def __init__(self, helper): self.helper = helper def create(self, local_share_info, remote_device_wwn, remote_fs_id): local_share_name = local_share_info.get('name') try: local_fs_id = self.helper.get_fsid_by_name(local_share_name) if not local_fs_id: msg = _("Local fs was not found by name %s.") LOG.error(msg, local_share_name) raise exception.ReplicationException( reason=msg % local_share_name) remote_device = self.helper.get_remote_device_by_wwn( remote_device_wwn) pair_params = { "LOCALRESID": local_fs_id, "LOCALRESTYPE": constants.FILE_SYSTEM_TYPE, "REMOTEDEVICEID": remote_device.get('ID'), "REMOTEDEVICENAME": remote_device.get('NAME'), "REMOTERESID": remote_fs_id, "REPLICATIONMODEL": constants.REPLICA_ASYNC_MODEL, "RECOVERYPOLICY": '2', "SYNCHRONIZETYPE": '1', "SPEED": constants.REPLICA_SPEED_MEDIUM, } pair_info = self.helper.create_replication_pair(pair_params) except Exception: msg = ("Failed to create replication pair for share %s.") LOG.exception(msg, local_share_name) raise self._sync_replication_pair(pair_info['ID']) return pair_info['ID'] def _get_replication_pair_info(self, replica_pair_id): try: pair_info = self.helper.get_replication_pair_by_id( replica_pair_id) except Exception: LOG.exception('Failed to get replication pair info for ' '%s.', replica_pair_id) raise return pair_info def _check_replication_health(self, pair_info): if (pair_info['HEALTHSTATUS'] != constants.REPLICA_HEALTH_STATUS_NORMAL): return common_constants.STATUS_ERROR def _check_replication_running_status(self, pair_info): if (pair_info['RUNNINGSTATUS'] in ( constants.REPLICA_RUNNING_STATUS_SPLITTED, constants.REPLICA_RUNNING_STATUS_TO_RECOVER)): return common_constants.REPLICA_STATE_OUT_OF_SYNC if (pair_info['RUNNINGSTATUS'] in ( constants.REPLICA_RUNNING_STATUS_INTERRUPTED, constants.REPLICA_RUNNING_STATUS_INVALID)): return common_constants.STATUS_ERROR def _check_replication_secondary_data_status(self, pair_info): if (pair_info['SECRESDATASTATUS'] in constants.REPLICA_DATA_STATUS_IN_SYNC): return common_constants.REPLICA_STATE_IN_SYNC else: return common_constants.REPLICA_STATE_OUT_OF_SYNC def _check_replica_state(self, pair_info): result = self._check_replication_health(pair_info) if result is not None: return result result = self._check_replication_running_status(pair_info) if result is not None: return result return self._check_replication_secondary_data_status(pair_info) def get_replica_state(self, replica_pair_id): try: pair_info = self._get_replication_pair_info(replica_pair_id) except Exception: # if cannot communicate to backend, return error LOG.error('Cannot get replica state, return %s', common_constants.STATUS_ERROR) return common_constants.STATUS_ERROR return self._check_replica_state(pair_info) def _sync_replication_pair(self, pair_id): try: self.helper.sync_replication_pair(pair_id) except Exception as err: LOG.warning('Failed to sync replication pair %(id)s. ' 'Reason: %(err)s', {'id': pair_id, 'err': err}) def update_replication_pair_state(self, replica_pair_id): pair_info = self._get_replication_pair_info(replica_pair_id) health = self._check_replication_health(pair_info) if health is not None: LOG.warning("Cannot update the replication %s " "because it's not in normal status.", replica_pair_id) return if strutils.bool_from_string(pair_info['ISPRIMARY']): # current replica is primary, not consistent with manila. # the reason for this circumstance is the last switch over # didn't succeed completely. continue the switch over progress.. try: self.helper.switch_replication_pair(replica_pair_id) except Exception: msg = ('Replication pair %s primary/secondary ' 'relationship is not right, try to switch over ' 'again but still failed.') LOG.exception(msg, replica_pair_id) return # refresh the replication pair info pair_info = self._get_replication_pair_info(replica_pair_id) if pair_info['SECRESACCESS'] == constants.REPLICA_SECONDARY_RW: try: self.helper.set_pair_secondary_write_lock(replica_pair_id) except Exception: msg = ('Replication pair %s secondary access is R/W, ' 'try to set write lock but still failed.') LOG.exception(msg, replica_pair_id) return if pair_info['RUNNINGSTATUS'] in ( constants.REPLICA_RUNNING_STATUS_NORMAL, constants.REPLICA_RUNNING_STATUS_SPLITTED, constants.REPLICA_RUNNING_STATUS_TO_RECOVER): self._sync_replication_pair(replica_pair_id) def switch_over(self, replica_pair_id): pair_info = self._get_replication_pair_info(replica_pair_id) if strutils.bool_from_string(pair_info['ISPRIMARY']): LOG.warning('The replica to promote is already primary, ' 'no need to switch over.') return replica_state = self._check_replica_state(pair_info) if replica_state != common_constants.REPLICA_STATE_IN_SYNC: # replica is not in SYNC state, can't be promoted msg = _('Data of replica %s is not synchronized, ' 'can not promote.') raise exception.ReplicationException( reason=msg % replica_pair_id) try: self.helper.split_replication_pair(replica_pair_id) except Exception: # split failed # means replication pair is in an abnormal status, # ignore this exception, continue to cancel secondary write lock, # let secondary share accessible for disaster recovery. LOG.exception('Failed to split replication pair %s while ' 'switching over.', replica_pair_id) try: self.helper.cancel_pair_secondary_write_lock(replica_pair_id) except Exception: LOG.exception('Failed to cancel replication pair %s ' 'secondary write lock.', replica_pair_id) raise try: self.helper.switch_replication_pair(replica_pair_id) self.helper.set_pair_secondary_write_lock(replica_pair_id) self.helper.sync_replication_pair(replica_pair_id) except Exception: LOG.exception('Failed to completely switch over ' 'replication pair %s.', replica_pair_id) # for all the rest steps, # because secondary share is accessible now, # the upper business may access the secondary share, # return success to tell replica is primary. return def delete_replication_pair(self, replica_pair_id): try: self.helper.split_replication_pair(replica_pair_id) except Exception: # Ignore this exception because replication pair may at some # abnormal status that supports deleting. LOG.warning('Failed to split replication pair %s ' 'before deleting it. Ignore this exception, ' 'and try to delete anyway.', replica_pair_id) try: self.helper.delete_replication_pair(replica_pair_id) except Exception: LOG.exception('Failed to delete replication pair %s.', replica_pair_id) raise def create_replica_pair(self, ctx, local_share_info, remote_device_wwn, remote_fs_id): """Create replication pair for RPC call. This is for remote call, because replica pair can only be created by master node. """ return self.create(local_share_info, remote_device_wwn, remote_fs_id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/huawei/v3/rpcapi.py0000664000175000017500000000312300000000000022540 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Huawei Technologies Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import oslo_messaging as messaging from manila import rpc from manila.share import utils class HuaweiV3API(object): """Client side of the huawei V3 rpc API. API version history: 1.0 - Initial version. """ BASE_RPC_API_VERSION = '1.0' def __init__(self): self.topic = 'huawei_v3' target = messaging.Target(topic=self.topic, version=self.BASE_RPC_API_VERSION) self.client = rpc.get_client(target, version_cap='1.0') def create_replica_pair(self, context, host, local_share_info, remote_device_wwn, remote_fs_id): new_host = utils.extract_host(host) call_context = self.client.prepare(server=new_host, version='1.0') return call_context.call( context, 'create_replica_pair', local_share_info=local_share_info, remote_device_wwn=remote_device_wwn, remote_fs_id=remote_fs_id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/huawei/v3/smartx.py0000664000175000017500000001745000000000000022610 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Huawei Technologies Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_utils import excutils from oslo_utils import strutils from manila import exception from manila.i18n import _ from manila.share.drivers.huawei import constants class SmartPartition(object): def __init__(self, helper): self.helper = helper def add(self, opts, fsid): if not strutils.bool_from_string(opts['huawei_smartpartition']): return if not opts['partitionname']: raise exception.InvalidInput( reason=_('Partition name is None, please set ' 'huawei_smartpartition:partitionname in key.')) partition_id = self.helper._get_partition_id_by_name( opts['partitionname']) if not partition_id: raise exception.InvalidInput( reason=_('Can not find partition id.')) self.helper._add_fs_to_partition(fsid, partition_id) class SmartCache(object): def __init__(self, helper): self.helper = helper def add(self, opts, fsid): if not strutils.bool_from_string(opts['huawei_smartcache']): return if not opts['cachename']: raise exception.InvalidInput( reason=_('Illegal value specified for cache.')) cache_id = self.helper._get_cache_id_by_name(opts['cachename']) if not cache_id: raise exception.InvalidInput( reason=(_('Can not find cache id by cache name %(name)s.') % {'name': opts['cachename']})) self.helper._add_fs_to_cache(fsid, cache_id) class SmartQos(object): def __init__(self, helper): self.helper = helper def create_qos(self, qos, fs_id): policy_id = None try: # Check QoS priority. if self._check_qos_high_priority(qos): self.helper.change_fs_priority_high(fs_id) # Create QoS policy and activate it. (qos_id, fs_list) = self.helper.find_available_qos(qos) if qos_id is not None: self.helper.add_share_to_qos(qos_id, fs_id, fs_list) else: policy_id = self.helper.create_qos_policy(qos, fs_id) self.helper.activate_deactivate_qos(policy_id, True) except exception.InvalidInput: with excutils.save_and_reraise_exception(): if policy_id is not None: self.helper.delete_qos_policy(policy_id) def _check_qos_high_priority(self, qos): """Check QoS priority.""" for key, value in qos.items(): if (key.find('MIN') == 0) or (key.find('LATENCY') == 0): return True return False def delete_qos(self, qos_id): qos_info = self.helper.get_qos_info(qos_id) qos_status = qos_info['RUNNINGSTATUS'] if qos_status != constants.STATUS_QOS_INACTIVATED: self.helper.activate_deactivate_qos(qos_id, False) self.helper.delete_qos_policy(qos_id) class SmartX(object): def __init__(self, helper): self.helper = helper def get_smartx_extra_specs_opts(self, opts): opts = self.get_capabilities_opts(opts, 'dedupe') opts = self.get_capabilities_opts(opts, 'compression') opts = self.get_smartprovisioning_opts(opts) opts = self.get_smartcache_opts(opts) opts = self.get_smartpartition_opts(opts) opts = self.get_sectorsize_opts(opts) qos = self.get_qos_opts(opts) return opts, qos def get_capabilities_opts(self, opts, key): if strutils.bool_from_string(opts[key]): opts[key] = True else: opts[key] = False return opts def get_smartprovisioning_opts(self, opts): thin_provision = opts.get('thin_provisioning') if (thin_provision is None or strutils.bool_from_string(thin_provision)): opts['LUNType'] = constants.ALLOC_TYPE_THIN_FLAG else: opts['LUNType'] = constants.ALLOC_TYPE_THICK_FLAG return opts def get_smartcache_opts(self, opts): if strutils.bool_from_string(opts['huawei_smartcache']): if not opts['cachename']: raise exception.InvalidInput( reason=_('Cache name is None, please set ' 'huawei_smartcache:cachename in key.')) else: opts['cachename'] = None return opts def get_smartpartition_opts(self, opts): if strutils.bool_from_string(opts['huawei_smartpartition']): if not opts['partitionname']: raise exception.InvalidInput( reason=_('Partition name is None, please set ' 'huawei_smartpartition:partitionname in key.')) else: opts['partitionname'] = None return opts def get_sectorsize_opts(self, opts): value = None if strutils.bool_from_string(opts.get('huawei_sectorsize')): value = opts.get('sectorsize') if not value: root = self.helper._read_xml() sectorsize = root.findtext('Filesystem/SectorSize') if sectorsize: sectorsize = sectorsize.strip() value = sectorsize if value: if value not in constants.VALID_SECTOR_SIZES: raise exception.InvalidInput( reason=(_('Illegal value(%s) specified for sectorsize: ' 'set to either 4, 8, 16, 32 or 64.') % value)) else: opts['sectorsize'] = int(value) return opts def get_qos_opts(self, opts): qos = {} if not strutils.bool_from_string(opts.get('qos')): return for key, value in opts.items(): if (key in constants.OPTS_QOS_VALUE) and value is not None: if (key.upper() != 'IOTYPE') and (int(value) <= 0): err_msg = (_('QoS config is wrong. %(key)s' ' must be set greater than 0.') % {'key': key}) raise exception.InvalidInput(reason=err_msg) elif ((key.upper() == 'IOTYPE') and (value not in ['0', '1', '2'])): raise exception.InvalidInput( reason=(_('Illegal value specified for IOTYPE: ' 'set to either 0, 1, or 2.'))) else: qos[key.upper()] = value if len(qos) <= 1 or 'IOTYPE' not in qos: msg = (_('QoS config is incomplete. Please set more. ' 'QoS policy: %(qos_policy)s.') % {'qos_policy': qos}) raise exception.InvalidInput(reason=msg) lowerlimit = constants.QOS_LOWER_LIMIT upperlimit = constants.QOS_UPPER_LIMIT if (set(lowerlimit).intersection(set(qos)) and set(upperlimit).intersection(set(qos))): msg = (_('QoS policy conflict, both protection policy and ' 'restriction policy are set. ' 'QoS policy: %(qos_policy)s ') % {'qos_policy': qos}) raise exception.InvalidInput(reason=msg) return qos ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315601.9176717 manila-21.0.0/manila/share/drivers/ibm/0000775000175000017500000000000000000000000017646 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/ibm/__init__.py0000664000175000017500000000000000000000000021745 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/ibm/gpfs.py0000664000175000017500000014342700000000000021172 0ustar00zuulzuul00000000000000# Copyright 2014 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ GPFS Driver for shares. Config Requirements: GPFS file system must have quotas enabled (`mmchfs -Q yes`). Notes: GPFS independent fileset is used for each share. TODO(nileshb): add support for share server creation/deletion/handling. Limitation: While using remote GPFS node, with Ganesha NFS, 'gpfs_ssh_private_key' for remote login to the GPFS node must be specified and there must be a passwordless authentication already setup between the Manila share service and the remote GPFS node. """ import abc import math import os import re import shlex import socket from oslo_config import cfg from oslo_log import log from oslo_utils import excutils from oslo_utils import importutils from oslo_utils import strutils from oslo_utils import units from manila.common import constants from manila import exception from manila.i18n import _ from manila.share import driver from manila.share.drivers.helpers import NFSHelper from manila.share import share_types from manila import ssh_utils from manila import utils LOG = log.getLogger(__name__) # matches multiple comma separated avpairs on a line. values with an embedded # comma must be wrapped in quotation marks AVPATTERN = re.compile(r'\s*(?P\w+)\s*=\s*(?P' r'(["][a-zA-Z0-9_, ]+["])|(\w+))\s*[,]?') ERR_FILE_NOT_FOUND = 2 gpfs_share_opts = [ cfg.HostAddressOpt('gpfs_share_export_ip', help='IP to be added to GPFS export string.'), cfg.StrOpt('gpfs_mount_point_base', default='$state_path/mnt', help='Base folder where exported shares are located.'), cfg.StrOpt('gpfs_nfs_server_type', default='CES', help=('NFS Server type. Valid choices are "CES" (Ganesha NFS) ' 'or "KNFS" (Kernel NFS).')), cfg.ListOpt('gpfs_nfs_server_list', help=('A list of the fully qualified NFS server names that ' 'make up the OpenStack Manila configuration.')), cfg.BoolOpt('is_gpfs_node', default=False, help=('True:when Manila services are running on one of the ' 'Spectrum Scale node. ' 'False:when Manila services are not running on any of ' 'the Spectrum Scale node.')), cfg.PortOpt('gpfs_ssh_port', default=22, help='GPFS server SSH port.'), cfg.StrOpt('gpfs_ssh_login', help='GPFS server SSH login name.'), cfg.StrOpt('gpfs_ssh_password', secret=True, help='GPFS server SSH login password. ' 'The password is not needed, if \'gpfs_ssh_private_key\' ' 'is configured.'), cfg.StrOpt('gpfs_ssh_private_key', help='Path to GPFS server SSH private key for login.'), cfg.ListOpt('gpfs_share_helpers', default=[ 'KNFS=manila.share.drivers.ibm.gpfs.KNFSHelper', 'CES=manila.share.drivers.ibm.gpfs.CESHelper', ], help='Specify list of share export helpers.'), ] CONF = cfg.CONF CONF.register_opts(gpfs_share_opts) class GPFSShareDriver(driver.ExecuteMixin, driver.GaneshaMixin, driver.ShareDriver): """GPFS Share Driver. Executes commands relating to Shares. Supports creation of shares on a GPFS cluster. API version history: 1.0 - Initial version. 1.1 - Added extend_share functionality 2.0 - Added CES support for NFS Ganesha """ def __init__(self, *args, **kwargs): """Do initialization.""" super(GPFSShareDriver, self).__init__(False, *args, **kwargs) self._helpers = {} self.configuration.append_config_values(gpfs_share_opts) self.backend_name = self.configuration.safe_get( 'share_backend_name') or "IBM Storage System" self.sshpool = None self.ssh_connections = {} self._gpfs_execute = None if self.configuration.is_gpfs_node: self.GPFS_PATH = '' else: self.GPFS_PATH = '/usr/lpp/mmfs/bin/' def do_setup(self, context): """Any initialization the share driver does while starting.""" super(GPFSShareDriver, self).do_setup(context) if self.configuration.is_gpfs_node: self._gpfs_execute = self._gpfs_local_execute else: self._gpfs_execute = self._gpfs_remote_execute self._setup_helpers() def _gpfs_local_execute(self, *cmd, **kwargs): if 'run_as_root' not in kwargs: kwargs.update({'run_as_root': True}) if 'ignore_exit_code' in kwargs: check_exit_code = kwargs.pop('ignore_exit_code') check_exit_code.append(0) kwargs.update({'check_exit_code': check_exit_code}) return utils.execute(*cmd, **kwargs) def _gpfs_remote_execute(self, *cmd, **kwargs): host = self.configuration.gpfs_share_export_ip check_exit_code = kwargs.pop('check_exit_code', True) ignore_exit_code = kwargs.pop('ignore_exit_code', None) return self._run_ssh(host, cmd, ignore_exit_code, check_exit_code) def _sanitize_command(self, cmd_list): # pylint: disable=too-many-function-args return ' '.join(shlex.quote(cmd_arg) for cmd_arg in cmd_list) def _run_ssh(self, host, cmd_list, ignore_exit_code=None, check_exit_code=True): command = self._sanitize_command(cmd_list) if not self.sshpool: gpfs_ssh_login = self.configuration.gpfs_ssh_login password = self.configuration.gpfs_ssh_password privatekey = self.configuration.gpfs_ssh_private_key gpfs_ssh_port = self.configuration.gpfs_ssh_port ssh_conn_timeout = self.configuration.ssh_conn_timeout min_size = self.configuration.ssh_min_pool_conn max_size = self.configuration.ssh_max_pool_conn self.sshpool = ssh_utils.SSHPool(host, gpfs_ssh_port, ssh_conn_timeout, gpfs_ssh_login, password=password, privatekey=privatekey, min_size=min_size, max_size=max_size) try: with self.sshpool.item() as ssh: return self._gpfs_ssh_execute( ssh, command, ignore_exit_code=ignore_exit_code, check_exit_code=check_exit_code) except Exception as e: with excutils.save_and_reraise_exception(): msg = (_('Error running SSH command: %(cmd)s. ' 'Error: %(excmsg)s.') % {'cmd': command, 'excmsg': e}) LOG.error(msg) raise exception.GPFSException(msg) def _gpfs_ssh_execute(self, ssh, cmd, ignore_exit_code=None, check_exit_code=True): sanitized_cmd = strutils.mask_password(cmd) LOG.debug('Running cmd (SSH): %s', sanitized_cmd) stdin_stream, stdout_stream, stderr_stream = ssh.exec_command(cmd) channel = stdout_stream.channel stdout = stdout_stream.read() sanitized_stdout = strutils.mask_password(stdout) stderr = stderr_stream.read() sanitized_stderr = strutils.mask_password(stderr) stdin_stream.close() exit_status = channel.recv_exit_status() # exit_status == -1 if no exit code was returned if exit_status != -1: LOG.debug('Result was %s', exit_status) if ((check_exit_code and exit_status != 0) and (ignore_exit_code is None or exit_status not in ignore_exit_code)): raise exception.ProcessExecutionError(exit_code=exit_status, stdout=sanitized_stdout, stderr=sanitized_stderr, cmd=sanitized_cmd) return (sanitized_stdout, sanitized_stderr) def _check_gpfs_state(self): try: out, __ = self._gpfs_execute(self.GPFS_PATH + 'mmgetstate', '-Y') except exception.ProcessExecutionError as e: msg = (_('Failed to check GPFS state. Error: %(excmsg)s.') % {'excmsg': e}) LOG.error(msg) raise exception.GPFSException(msg) lines = out.splitlines() try: state_token = lines[0].split(':').index('state') gpfs_state = lines[1].split(':')[state_token] except (IndexError, ValueError) as e: msg = (_('Failed to check GPFS state. Error: %(excmsg)s.') % {'excmsg': e}) LOG.error(msg) raise exception.GPFSException(msg) if gpfs_state != 'active': return False return True def _is_dir(self, path): try: output, __ = self._gpfs_execute('stat', '--format=%F', path, run_as_root=False) except exception.ProcessExecutionError as e: msg = (_('%(path)s is not a directory. Error: %(excmsg)s') % {'path': path, 'excmsg': e}) LOG.error(msg) raise exception.GPFSException(msg) return output.strip() == 'directory' def _is_gpfs_path(self, directory): try: self._gpfs_execute(self.GPFS_PATH + 'mmlsattr', directory) except exception.ProcessExecutionError as e: msg = (_('%(dir)s is not on GPFS filesystem. Error: %(excmsg)s.') % {'dir': directory, 'excmsg': e}) LOG.error(msg) raise exception.GPFSException(msg) return True def _setup_helpers(self): """Initializes protocol-specific NAS drivers.""" self._helpers = {} for helper_str in self.configuration.gpfs_share_helpers: share_proto, _, import_str = helper_str.partition('=') helper = importutils.import_class(import_str) self._helpers[share_proto.upper()] = helper(self._gpfs_execute, self.configuration) def _local_path(self, sharename): """Get local path for a share or share snapshot by name.""" return os.path.join(self.configuration.gpfs_mount_point_base, sharename) def _get_gpfs_device(self): fspath = self.configuration.gpfs_mount_point_base try: (out, __) = self._gpfs_execute('df', fspath) except exception.ProcessExecutionError as e: msg = (_('Failed to get GPFS device for %(fspath)s.' 'Error: %(excmsg)s') % {'fspath': fspath, 'excmsg': e}) LOG.error(msg) raise exception.GPFSException(msg) lines = out.splitlines() fs = lines[1].split()[0] return fs def _create_share(self, shareobj): """Create a linked fileset file in GPFS. Note: GPFS file system must have quotas enabled (mmchfs -Q yes). """ sharename = shareobj['name'] sizestr = '%sG' % shareobj['size'] sharepath = self._local_path(sharename) fsdev = self._get_gpfs_device() # create fileset for the share, link it to root path and set max size try: self._gpfs_execute(self.GPFS_PATH + 'mmcrfileset', fsdev, sharename, '--inode-space', 'new') except exception.ProcessExecutionError as e: msg = (_('Failed to create fileset on %(fsdev)s for ' 'the share %(sharename)s. Error: %(excmsg)s.') % {'fsdev': fsdev, 'sharename': sharename, 'excmsg': e}) LOG.error(msg) raise exception.GPFSException(msg) try: self._gpfs_execute(self.GPFS_PATH + 'mmlinkfileset', fsdev, sharename, '-J', sharepath) except exception.ProcessExecutionError as e: msg = (_('Failed to link fileset for the share %(sharename)s. ' 'Error: %(excmsg)s.') % {'sharename': sharename, 'excmsg': e}) LOG.error(msg) raise exception.GPFSException(msg) try: self._gpfs_execute(self.GPFS_PATH + 'mmsetquota', fsdev + ':' + sharename, '--block', '0:' + sizestr) except exception.ProcessExecutionError as e: msg = (_('Failed to set quota for the share %(sharename)s. ' 'Error: %(excmsg)s.') % {'sharename': sharename, 'excmsg': e}) LOG.error(msg) raise exception.GPFSException(msg) try: self._gpfs_execute('chmod', '777', sharepath) except exception.ProcessExecutionError as e: msg = (_('Failed to set permissions for share %(sharename)s. ' 'Error: %(excmsg)s.') % {'sharename': sharename, 'excmsg': e}) LOG.error(msg) raise exception.GPFSException(msg) def _delete_share(self, shareobj): """Remove container by removing GPFS fileset.""" sharename = shareobj['name'] fsdev = self._get_gpfs_device() # ignore error, when the fileset does not exist # it may happen, when the share creation failed, the share is in # 'error' state, and the fileset was never created # we want to ignore that error condition while deleting the fileset, # i.e. 'Fileset name share-xyz not found', with error code '2' # and mark the deletion successful ignore_exit_code = [ERR_FILE_NOT_FOUND] # unlink and delete the share's fileset try: self._gpfs_execute(self.GPFS_PATH + 'mmunlinkfileset', fsdev, sharename, '-f', ignore_exit_code=ignore_exit_code) except exception.ProcessExecutionError as e: msg = (_('Failed unlink fileset for share %(sharename)s. ' 'Error: %(excmsg)s.') % {'sharename': sharename, 'excmsg': e}) LOG.error(msg) raise exception.GPFSException(msg) try: self._gpfs_execute(self.GPFS_PATH + 'mmdelfileset', fsdev, sharename, '-f', ignore_exit_code=ignore_exit_code) except exception.ProcessExecutionError as e: msg = (_('Failed delete fileset for share %(sharename)s. ' 'Error: %(excmsg)s.') % {'sharename': sharename, 'excmsg': e}) LOG.error(msg) raise exception.GPFSException(msg) def _get_available_capacity(self, path): """Calculate available space on path.""" try: out, __ = self._gpfs_execute('df', '-P', '-B', '1', path) except exception.ProcessExecutionError as e: msg = (_('Failed to check available capacity for %(path)s.' 'Error: %(excmsg)s.') % {'path': path, 'excmsg': e}) LOG.error(msg) raise exception.GPFSException(msg) out = out.splitlines()[1] size = int(out.split()[1]) available = int(out.split()[3]) return available, size def _create_share_snapshot(self, snapshot): """Create a snapshot of the share.""" sharename = snapshot['share_name'] snapshotname = snapshot['name'] fsdev = self._get_gpfs_device() LOG.debug( 'Attempting to create a snapshot %(snap)s from share %(share)s ' 'on device %(dev)s.', {'share': sharename, 'snap': snapshotname, 'dev': fsdev} ) try: self._gpfs_execute(self.GPFS_PATH + 'mmcrsnapshot', fsdev, snapshot['name'], '-j', sharename) except exception.ProcessExecutionError as e: msg = (_('Failed to create snapshot %(snapshot)s. ' 'Error: %(excmsg)s.') % {'snapshot': snapshot['name'], 'excmsg': e}) LOG.error(msg) raise exception.GPFSException(msg) def _delete_share_snapshot(self, snapshot): """Delete a snapshot of the share.""" sharename = snapshot['share_name'] fsdev = self._get_gpfs_device() try: self._gpfs_execute(self.GPFS_PATH + 'mmdelsnapshot', fsdev, snapshot['name'], '-j', sharename) except exception.ProcessExecutionError as e: msg = (_('Failed to delete snapshot %(snapshot)s. ' 'Error: %(excmsg)s.') % {'snapshot': snapshot['name'], 'excmsg': e}) LOG.error(msg) raise exception.GPFSException(msg) def _create_share_from_snapshot(self, share, snapshot, share_path): """Create share from a share snapshot.""" self._create_share(share) snapshot_path = self._get_snapshot_path(snapshot) snapshot_path = snapshot_path + "/" try: self._gpfs_execute('rsync', '-rp', snapshot_path, share_path) except exception.ProcessExecutionError as e: msg = (_('Failed to create share %(share)s from ' 'snapshot %(snapshot)s. Error: %(excmsg)s.') % {'share': share['name'], 'snapshot': snapshot['name'], 'excmsg': e}) LOG.error(msg) raise exception.GPFSException(msg) def _extend_share(self, shareobj, new_size): sharename = shareobj['name'] sizestr = '%sG' % new_size fsdev = self._get_gpfs_device() try: self._gpfs_execute(self.GPFS_PATH + 'mmsetquota', fsdev + ':' + sharename, '--block', '0:' + sizestr) except exception.ProcessExecutionError as e: msg = (_('Failed to set quota for the share %(sharename)s. ' 'Error: %(excmsg)s.') % {'sharename': sharename, 'excmsg': e}) LOG.error(msg) raise exception.GPFSException(msg) def get_network_allocations_number(self): return 0 def create_share(self, ctx, share, share_server=None): """Create GPFS directory that will be represented as share.""" self._create_share(share) share_path = self._get_share_path(share) location = self._get_helper(share).create_export(share_path) return location def create_share_from_snapshot(self, ctx, share, snapshot, share_server=None, parent_share=None): """Is called to create share from a snapshot.""" share_path = self._get_share_path(share) self._create_share_from_snapshot(share, snapshot, share_path) location = self._get_helper(share).create_export(share_path) return location def create_snapshot(self, context, snapshot, share_server=None): """Creates a snapshot.""" self._create_share_snapshot(snapshot) def delete_share(self, ctx, share, share_server=None): """Remove and cleanup share storage.""" location = self._get_share_path(share) self._get_helper(share).remove_export(location, share) self._delete_share(share) def delete_snapshot(self, context, snapshot, share_server=None): """Deletes a snapshot.""" self._delete_share_snapshot(snapshot) def extend_share(self, share, new_size, share_server=None): """Extends the quota on the share fileset.""" self._extend_share(share, new_size) def ensure_share(self, ctx, share, share_server=None): """Ensure that storage are mounted and exported.""" def update_access(self, context, share, access_rules, add_rules, delete_rules, update_rules, share_server=None): """Update access rules for given share.""" helper = self._get_helper(share) location = self._get_share_path(share) for access in delete_rules: helper.deny_access(location, share, access) for access in add_rules: helper.allow_access(location, share, access) if not (add_rules or delete_rules): helper.resync_access(location, share, access_rules) def check_for_setup_error(self): """Returns an error if prerequisites aren't met.""" if not self._check_gpfs_state(): msg = (_('GPFS is not active.')) LOG.error(msg) raise exception.GPFSException(msg) if not self.configuration.gpfs_share_export_ip: msg = (_('gpfs_share_export_ip must be specified.')) LOG.error(msg) raise exception.InvalidParameterValue(err=msg) gpfs_base_dir = self.configuration.gpfs_mount_point_base if not gpfs_base_dir.startswith('/'): msg = (_('%s must be an absolute path.') % gpfs_base_dir) LOG.error(msg) raise exception.GPFSException(msg) if not self._is_dir(gpfs_base_dir): msg = (_('%s is not a directory.') % gpfs_base_dir) LOG.error(msg) raise exception.GPFSException(msg) if not self._is_gpfs_path(gpfs_base_dir): msg = (_('%s is not on GPFS. Perhaps GPFS not mounted.') % gpfs_base_dir) LOG.error(msg) raise exception.GPFSException(msg) if self.configuration.gpfs_nfs_server_type not in ("KNFS", "CES"): msg = (_('Invalid gpfs_nfs_server_type value: %s. ' 'Valid values are: "KNFS", "CES".') % self.configuration.gpfs_nfs_server_type) LOG.error(msg) raise exception.InvalidParameterValue(err=msg) if ((not self.configuration.gpfs_nfs_server_list) and (self.configuration.gpfs_nfs_server_type != 'CES')): msg = (_('Missing value for gpfs_nfs_server_list.')) LOG.error(msg) raise exception.InvalidParameterValue(err=msg) def _is_share_valid(self, fsdev, location): try: out, __ = self._gpfs_execute(self.GPFS_PATH + 'mmlsfileset', fsdev, '-J', location, '-L', '-Y') except exception.ProcessExecutionError: msg = (_('Given share path %(share_path)s does not exist at ' 'mount point %(mount_point)s.') % {'share_path': location, 'mount_point': fsdev}) LOG.exception(msg) raise exception.ManageInvalidShare(reason=msg) lines = out.splitlines() try: validation_token = lines[0].split(':').index('allocInodes') alloc_inodes = lines[1].split(':')[validation_token] except (IndexError, ValueError): msg = (_('Failed to check share at %s.') % location) LOG.exception(msg) raise exception.GPFSException(msg) return alloc_inodes != '0' def _get_share_name(self, fsdev, location): try: out, __ = self._gpfs_execute(self.GPFS_PATH + 'mmlsfileset', fsdev, '-J', location, '-L', '-Y') except exception.ProcessExecutionError: msg = (_('Given share path %(share_path)s does not exist at ' 'mount point %(mount_point)s.') % {'share_path': location, 'mount_point': fsdev}) LOG.exception(msg) raise exception.ManageInvalidShare(reason=msg) lines = out.splitlines() try: validation_token = lines[0].split(':').index('filesetName') share_name = lines[1].split(':')[validation_token] except (IndexError, ValueError): msg = (_('Failed to check share at %s.') % location) LOG.exception(msg) raise exception.GPFSException(msg) return share_name def _manage_existing(self, fsdev, share, old_share_name): new_share_name = share['name'] new_export_location = self._local_path(new_share_name) try: self._gpfs_execute(self.GPFS_PATH + 'mmunlinkfileset', fsdev, old_share_name, '-f') except exception.ProcessExecutionError: msg = _('Failed to unlink fileset for share %s.') % new_share_name LOG.exception(msg) raise exception.GPFSException(msg) LOG.debug('Unlinked the fileset of share %s.', old_share_name) try: self._gpfs_execute(self.GPFS_PATH + 'mmchfileset', fsdev, old_share_name, '-j', new_share_name) except exception.ProcessExecutionError: msg = _('Failed to rename fileset for share %s.') % new_share_name LOG.exception(msg) raise exception.GPFSException(msg) LOG.debug('Renamed the fileset from %(old_share)s to %(new_share)s.', {'old_share': old_share_name, 'new_share': new_share_name}) try: self._gpfs_execute(self.GPFS_PATH + 'mmlinkfileset', fsdev, new_share_name, '-J', new_export_location) except exception.ProcessExecutionError: msg = _('Failed to link fileset for the share %s.' ) % new_share_name LOG.exception(msg) raise exception.GPFSException(msg) LOG.debug('Linked the fileset of share %(share_name)s at location ' '%(export_location)s.', {'share_name': new_share_name, 'export_location': new_export_location}) try: self._gpfs_execute('chmod', '777', new_export_location) except exception.ProcessExecutionError: msg = _('Failed to set permissions for share %s.') % new_share_name LOG.exception(msg) raise exception.GPFSException(msg) LOG.debug('Changed the permission of share %s.', new_share_name) try: out, __ = self._gpfs_execute(self.GPFS_PATH + 'mmlsquota', '-j', new_share_name, '-Y', fsdev) except exception.ProcessExecutionError: msg = _('Failed to check size for share %s.') % new_share_name LOG.exception(msg) raise exception.GPFSException(msg) lines = out.splitlines() try: quota_limit = lines[0].split(':').index('blockLimit') quota_status = lines[1].split(':')[quota_limit] except (IndexError, ValueError): msg = _('Failed to check quota for share %s.') % new_share_name LOG.exception(msg) raise exception.GPFSException(msg) share_size = int(quota_status) # Note: since share_size returns integer value in KB, # we are checking whether share is less than 1GiB. # (units.Mi * KB = 1GB) if share_size < units.Mi: try: self._gpfs_execute(self.GPFS_PATH + 'mmsetquota', fsdev + ':' + new_share_name, '--block', '0:1G') except exception.ProcessExecutionError: msg = _('Failed to set quota for share %s.') % new_share_name LOG.exception(msg) raise exception.GPFSException(msg) LOG.info('Existing share %(shr)s has size %(size)s KB ' 'which is below 1GiB, so extended it to 1GiB.', {'shr': new_share_name, 'size': share_size}) share_size = 1 else: orig_share_size = share_size share_size = int(math.ceil(float(share_size) / units.Mi)) if orig_share_size != share_size * units.Mi: try: self._gpfs_execute(self.GPFS_PATH + 'mmsetquota', fsdev + ':' + new_share_name, '--block', '0:' + str(share_size) + 'G') except exception.ProcessExecutionError: msg = _('Failed to set quota for share %s.' ) % new_share_name LOG.exception(msg) raise exception.GPFSException(msg) new_export_location = self._get_helper(share).create_export( new_export_location) return share_size, new_export_location def manage_existing(self, share, driver_options): old_export = share['export_location'].split(':') try: ces_ip = old_export[0] old_export_location = old_export[1] except IndexError: msg = _('Incorrect export path. Expected format: ' 'IP:/gpfs_mount_point_base/share_id.') LOG.exception(msg) raise exception.ShareBackendException(msg=msg) if ces_ip not in self.configuration.gpfs_nfs_server_list: msg = _('The CES IP %s is not present in the ' 'configuration option "gpfs_nfs_server_list".') % ces_ip raise exception.ShareBackendException(msg=msg) fsdev = self._get_gpfs_device() if not self._is_share_valid(fsdev, old_export_location): err_msg = _('Given share path %s does not have a valid ' 'share.') % old_export_location raise exception.ManageInvalidShare(reason=err_msg) share_name = self._get_share_name(fsdev, old_export_location) out = self._get_helper(share)._has_client_access(old_export_location) if out: err_msg = _('Clients have access to %s share currently. Evict any ' 'clients before trying again.') % share_name raise exception.ManageInvalidShare(reason=err_msg) share_size, new_export_location = self._manage_existing( fsdev, share, share_name) return {"size": share_size, "export_locations": new_export_location} def _update_share_stats(self): """Retrieve stats info from share volume group.""" data = dict( share_backend_name=self.backend_name, vendor_name='IBM', storage_protocol='NFS', reserved_percentage=self.configuration.reserved_share_percentage, reserved_snapshot_percentage=( self.configuration.reserved_share_from_snapshot_percentage or self.configuration.reserved_share_percentage), reserved_share_extend_percentage=( self.configuration.reserved_share_extend_percentage or self.configuration.reserved_share_percentage)) free, capacity = self._get_available_capacity( self.configuration.gpfs_mount_point_base) data['total_capacity_gb'] = math.ceil(capacity / units.Gi) data['free_capacity_gb'] = math.ceil(free / units.Gi) super(GPFSShareDriver, self)._update_share_stats(data) def _get_helper(self, share): if share['share_proto'] == 'NFS': return self._helpers[self.configuration.gpfs_nfs_server_type] else: msg = (_('Share protocol %s not supported by GPFS driver.') % share['share_proto']) LOG.error(msg) raise exception.InvalidShare(reason=msg) def _get_share_path(self, share): """Returns share path on storage provider.""" return os.path.join(self.configuration.gpfs_mount_point_base, share['name']) def _get_snapshot_path(self, snapshot): """Returns share path on storage provider.""" snapshot_dir = ".snapshots" return os.path.join(self.configuration.gpfs_mount_point_base, snapshot["share_name"], snapshot_dir, snapshot["name"]) class NASHelperBase(metaclass=abc.ABCMeta): """Interface to work with share.""" def __init__(self, execute, config_object): self.configuration = config_object self._execute = execute def create_export(self, local_path): """Construct location of new export.""" return ':'.join([self.configuration.gpfs_share_export_ip, local_path]) def get_export_options(self, share, access, helper): """Get the export options.""" extra_specs = share_types.get_extra_specs_from_share(share) if helper == 'KNFS': export_options = extra_specs.get('knfs:export_options') elif helper == 'CES': export_options = extra_specs.get('ces:export_options') else: export_options = None options = self._get_validated_opt_list(export_options) options.append(self.get_access_option(access)) return ','.join(options) def _validate_export_options(self, options): """Validate the export options.""" options_not_allowed = self._get_options_not_allowed() invalid_options = [ option for option in options if option in options_not_allowed ] if invalid_options: raise exception.InvalidInput(reason='Invalid export_option %s as ' 'it is set by access_type.' % invalid_options) def _get_validated_opt_list(self, export_options): """Validate the export options and return an option list.""" if export_options: options = export_options.lower().split(',') self._validate_export_options(options) else: options = [] return options @abc.abstractmethod def get_access_option(self, access): """Get access option string based on access level.""" @abc.abstractmethod def _get_options_not_allowed(self): """Get access options that are not allowed in extra-specs.""" @abc.abstractmethod def remove_export(self, local_path, share): """Remove export.""" @abc.abstractmethod def allow_access(self, local_path, share, access): """Allow access to the host.""" @abc.abstractmethod def deny_access(self, local_path, share, access): """Deny access to the host.""" @abc.abstractmethod def resync_access(self, local_path, share, access_rules): """Re-sync all access rules for given share.""" class KNFSHelper(NASHelperBase): """Wrapper for Kernel NFS Commands.""" def __init__(self, execute, config_object): super(KNFSHelper, self).__init__(execute, config_object) self._execute = execute try: self._execute('exportfs', check_exit_code=True, run_as_root=True) except exception.ProcessExecutionError as e: msg = (_('NFS server not found. Error: %s.') % e) LOG.error(msg) raise exception.GPFSException(msg) def _has_client_access(self, local_path, access_to=None): try: out, __ = self._execute('exportfs', run_as_root=True) except exception.ProcessExecutionError: msg = _('Failed to check exports on the systems.') LOG.exception(msg) raise exception.GPFSException(msg) if access_to: if (re.search(re.escape(local_path) + r'[\s\n]*' + re.escape(access_to), out)): return True else: if re.findall(local_path + '\\b', ''.join(out)): return True return False def _publish_access(self, *cmd, **kwargs): check_exit_code = kwargs.get('check_exit_code', True) outs = [] localserver_iplist = socket.gethostbyname_ex(socket.gethostname())[2] for server in self.configuration.gpfs_nfs_server_list: if server in localserver_iplist: run_command = cmd run_local = True else: sshlogin = self.configuration.gpfs_ssh_login remote_login = sshlogin + '@' + server run_command = ['ssh', remote_login] + list(cmd) run_local = False try: out = utils.execute(*run_command, run_as_root=run_local, check_exit_code=check_exit_code) except exception.ProcessExecutionError: raise outs.append(out) return outs def _verify_denied_access(self, local_path, share, ip): try: cmd = ['exportfs'] outs = self._publish_access(*cmd) except exception.ProcessExecutionError: msg = _('Failed to verify denied access for ' 'share %s.') % share['name'] LOG.exception(msg) raise exception.GPFSException(msg) for stdout, stderr in outs: if stderr and stderr.strip(): msg = ('Log/ignore stderr during _validate_denied_access for ' 'share %(sharename)s. Return code OK. ' 'Stderr: %(stderr)s' % {'sharename': share['name'], 'stderr': stderr}) LOG.debug(msg) gpfs_ips = NFSHelper.get_host_list(stdout, local_path) if ip in gpfs_ips: msg = (_('Failed to deny access for share %(sharename)s. ' 'IP %(ip)s still has access.') % {'sharename': share['name'], 'ip': ip}) LOG.error(msg) raise exception.GPFSException(msg) def remove_export(self, local_path, share): """Remove export.""" def get_access_option(self, access): """Get access option string based on access level.""" return access['access_level'] def _get_options_not_allowed(self): """Get access options that are not allowed in extra-specs.""" return list(constants.ACCESS_LEVELS) def _get_exports(self): """Get exportfs output.""" try: out, __ = self._execute('exportfs', run_as_root=True) except exception.ProcessExecutionError as e: msg = (_('Failed to check exports on the systems. ' ' Error: %s.') % e) LOG.error(msg) raise exception.GPFSException(msg) return out def allow_access(self, local_path, share, access, error_on_exists=True): """Allow access to one or more vm instances.""" if access['access_type'] != 'ip': raise exception.InvalidShareAccess(reason='Only ip access type ' 'supported.') if error_on_exists: # check if present in export out = re.search( re.escape(local_path) + r'[\s\n]*' + re.escape(access['access_to']), self._get_exports()) if out is not None: access_type = access['access_type'] access_to = access['access_to'] raise exception.ShareAccessExists(access_type=access_type, access=access_to) export_opts = self.get_export_options(share, access, 'KNFS') cmd = ['exportfs', '-o', export_opts, ':'.join([access['access_to'], local_path])] try: self._publish_access(*cmd) except exception.ProcessExecutionError: msg = _('Failed to allow access for share %s.') % share['name'] LOG.exception(msg) raise exception.GPFSException(msg) def _deny_ip(self, local_path, share, ip): """Remove access for one or more vm instances.""" cmd = ['exportfs', '-u', ':'.join([ip, local_path])] try: # Can get exit code 0 for success or 1 for already gone (also # potentially get 1 due to exportfs bug). So allow # _publish_access to continue with [0, 1] and then verify after # it is done. self._publish_access(*cmd, check_exit_code=[0, 1]) except exception.ProcessExecutionError: msg = _('Failed to deny access for share %s.') % share['name'] LOG.exception(msg) raise exception.GPFSException(msg) # Error code (0 or 1) makes deny IP success indeterminate. # So, verify that the IP access was completely removed. self._verify_denied_access(local_path, share, ip) def deny_access(self, local_path, share, access): """Remove access for one or more vm instances.""" self._deny_ip(local_path, share, access['access_to']) def _remove_other_access(self, local_path, share, access_rules): """Remove any client access that is not in access_rules.""" exports = self._get_exports() gpfs_ips = set(NFSHelper.get_host_list(exports, local_path)) manila_ips = set([x['access_to'] for x in access_rules]) remove_ips = gpfs_ips - manila_ips for ip in remove_ips: self._deny_ip(local_path, share, ip) def resync_access(self, local_path, share, access_rules): """Re-sync all access rules for given share.""" for access in access_rules: self.allow_access(local_path, share, access, error_on_exists=False) self._remove_other_access(local_path, share, access_rules) class CESHelper(NASHelperBase): """Wrapper for NFS by Spectrum Scale CES""" def __init__(self, execute, config_object): super(CESHelper, self).__init__(execute, config_object) self._execute = execute if self.configuration.is_gpfs_node: self.GPFS_PATH = '' else: self.GPFS_PATH = '/usr/lpp/mmfs/bin/' def _execute_mmnfs_command(self, cmd, err_msg): try: out, __ = self._execute(self.GPFS_PATH + 'mmnfs', 'export', *cmd) except exception.ProcessExecutionError as e: msg = (_('%(err_msg)s Error: %(e)s.') % {'err_msg': err_msg, 'e': e}) LOG.error(msg) raise exception.GPFSException(msg) return out @staticmethod def _fix_export_data(data, headers): """Export data split by ':' may need fixing if client had colons.""" # If an IPv6 client shows up then ':' delimiters don't work. # So use header positions to get data before/after Clients. # Then what is left in between can be joined back into a client IP. client_index = headers.index('Clients') # reverse_client_index is distance from end. reverse_client_index = len(headers) - (client_index + 1) after_client_index = len(data) - reverse_client_index before_client = data[:client_index] client = data[client_index: after_client_index] after_client = data[after_client_index:] result_data = before_client result_data.append(':'.join(client)) # Fixes colons in client IP result_data.extend(after_client) return result_data def _get_nfs_client_exports(self, local_path): """Get the current NFS client export details from GPFS.""" out = self._execute_mmnfs_command( ('list', '-n', local_path, '-Y'), 'Failed to get exports from the system.') # Remove the header line and use the headers to describe the data lines = out.splitlines() for line in lines: data = line.split(':') if "HEADER" in data: headers = data lines.remove(line) break else: msg = _('Failed to parse exports for path %s. ' 'No HEADER found.') % local_path LOG.error(msg) raise exception.GPFSException(msg) exports = [] for line in lines: data = line.split(':') if len(data) < 3: continue # Skip empty lines (and anything less than minimal). result_data = self._fix_export_data(data, headers) exports.append(dict(zip(headers, result_data))) return exports def _has_client_access(self, local_path, access_to=None): """Check path for any export or for one with a specific IP address.""" gpfs_clients = self._get_nfs_client_exports(local_path) return gpfs_clients and (access_to is None or access_to in [ x['Clients'] for x in gpfs_clients]) def remove_export(self, local_path, share): """Remove export.""" if self._has_client_access(local_path): err_msg = ('Failed to remove export for share %s.' % share['name']) self._execute_mmnfs_command(('remove', local_path), err_msg) def _get_options_not_allowed(self): """Get access options that are not allowed in extra-specs.""" return ['access_type=ro', 'access_type=rw'] def get_access_option(self, access): """Get access option string based on access level.""" if access['access_level'] == constants.ACCESS_LEVEL_RO: return 'access_type=ro' else: return 'access_type=rw' def allow_access(self, local_path, share, access): """Allow access to the host.""" if access['access_type'] != 'ip': raise exception.InvalidShareAccess(reason='Only ip access type ' 'supported.') has_exports = self._has_client_access(local_path) export_opts = self.get_export_options(share, access, 'CES') if not has_exports: cmd = ['add', local_path, '-c', access['access_to'] + '(' + export_opts + ')'] else: cmd = ['change', local_path, '--nfsadd', access['access_to'] + '(' + export_opts + ')'] err_msg = ('Failed to allow access for share %s.' % share['name']) self._execute_mmnfs_command(cmd, err_msg) def deny_access(self, local_path, share, access, force=False): """Deny access to the host.""" has_export = self._has_client_access(local_path, access['access_to']) if has_export: err_msg = ('Failed to remove access for share %s.' % share['name']) self._execute_mmnfs_command(('change', local_path, '--nfsremove', access['access_to']), err_msg) def _get_client_opts(self, access, opts_list): """Get client options string for access rule and NFS options.""" nfs_opts = ','.join([self.get_access_option(access)] + opts_list) return '%(ip)s(%(nfs_opts)s)' % {'ip': access['access_to'], 'nfs_opts': nfs_opts} def _get_share_opts(self, share): """Get a list of NFS options from the share's share type.""" extra_specs = share_types.get_extra_specs_from_share(share) opts_list = self._get_validated_opt_list( extra_specs.get('ces:export_options')) return opts_list def _nfs_change(self, local_path, share, access_rules, gpfs_clients): """Bulk add/update/remove of access rules for share.""" opts_list = self._get_share_opts(share) # Create a map of existing client access rules from GPFS. # Key from 'Clients' is an IP address or # Value from 'Access_Type' is RW|RO (case varies) gpfs_map = { x['Clients']: x['Access_Type'].lower() for x in gpfs_clients} gpfs_ips = set(gpfs_map.keys()) manila_ips = set([x['access_to'] for x in access_rules]) add_ips = manila_ips - gpfs_ips update_ips = gpfs_ips.intersection(manila_ips) remove_ips = gpfs_ips - manila_ips adds = [] updates = [] if add_ips or update_ips: for access in access_rules: ip = access['access_to'] if ip in add_ips: adds.append(self._get_client_opts(access, opts_list)) elif (ip in update_ips and access['access_level'] != gpfs_map[ip]): updates.append(self._get_client_opts(access, opts_list)) if remove_ips or adds or updates: cmd = ['change', local_path] if remove_ips: cmd.append('--nfsremove') cmd.append(','.join(remove_ips)) if adds: cmd.append('--nfsadd') cmd.append(';'.join(adds)) if updates: cmd.append('--nfschange') cmd.append(';'.join(updates)) err_msg = ('Failed to resync access for share %s.' % share['name']) self._execute_mmnfs_command(cmd, err_msg) def _nfs_add(self, access_rules, local_path, share): """Bulk add of access rules to share.""" if not access_rules: return opts_list = self._get_share_opts(share) client_options = [] for access in access_rules: client_options.append(self._get_client_opts(access, opts_list)) cmd = ['add', local_path, '-c', ';'.join(client_options)] err_msg = ('Failed to resync access for share %s.' % share['name']) self._execute_mmnfs_command(cmd, err_msg) def resync_access(self, local_path, share, access_rules): """Re-sync all access rules for given share.""" gpfs_clients = self._get_nfs_client_exports(local_path) if not gpfs_clients: self._nfs_add(access_rules, local_path, share) else: self._nfs_change(local_path, share, access_rules, gpfs_clients) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315601.9176717 manila-21.0.0/manila/share/drivers/infinidat/0000775000175000017500000000000000000000000021044 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/infinidat/__init__.py0000664000175000017500000000000000000000000023143 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/infinidat/infinibox.py0000664000175000017500000006137400000000000023416 0ustar00zuulzuul00000000000000# Copyright 2022 Infinidat Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ INFINIDAT InfiniBox Share Driver """ import functools import ipaddress from oslo_config import cfg from oslo_log import log as logging from oslo_utils import units import requests from manila.common import constants from manila import exception from manila.i18n import _ from manila.share import driver from manila.share import utils from manila import version try: import capacity except ImportError: capacity = None try: import infinisdk except ImportError: infinisdk = None LOG = logging.getLogger(__name__) infinidat_connection_opts = [ cfg.HostAddressOpt('infinibox_hostname', help='The name (or IP address) for the INFINIDAT ' 'Infinibox storage system.'), cfg.BoolOpt('infinidat_use_ssl', help='Use SSL to connect to the INFINIDAT Infinibox storage ' 'system.', default=False), cfg.BoolOpt('infinidat_suppress_ssl_warnings', help='Suppress requests library SSL certificate warnings.', default=False), ] infinidat_auth_opts = [ cfg.StrOpt('infinibox_login', help=('Administrative user account name used to access the ' 'INFINIDAT Infinibox storage system.')), cfg.StrOpt('infinibox_password', help=('Password for the administrative user account ' 'specified in the infinibox_login option.'), secret=True), ] infinidat_general_opts = [ cfg.StrOpt('infinidat_pool_name', help='Name of the pool from which volumes are allocated.'), cfg.StrOpt('infinidat_nas_network_space_name', help='Name of the NAS network space on the INFINIDAT ' 'InfiniBox.'), cfg.BoolOpt('infinidat_thin_provision', help='Use thin provisioning.', default=True), cfg.BoolOpt('infinidat_snapdir_accessible', help=('Controls access to the .snapshot directory. ' 'By default, each share allows access to its own ' '.snapshot directory, which contains files and ' 'directories of each snapshot taken. To restrict ' 'access to the .snapshot directory, this option ' 'should be set to False.'), default=True), cfg.BoolOpt('infinidat_snapdir_visible', help=('Controls visibility of the .snapshot directory. ' 'By default, each share contains the .snapshot ' 'directory, which is hidden on the client side. ' 'To make the .snapshot directory visible, this ' 'option should be set to True.'), default=False), ] CONF = cfg.CONF CONF.register_opts(infinidat_connection_opts) CONF.register_opts(infinidat_auth_opts) CONF.register_opts(infinidat_general_opts) _MANILA_TO_INFINIDAT_ACCESS_LEVEL = { constants.ACCESS_LEVEL_RW: 'RW', constants.ACCESS_LEVEL_RO: 'RO', } # Max retries for the REST API client in case of a failure: _API_MAX_RETRIES = 5 # Identifier used as the REST API User-Agent string: _INFINIDAT_MANILA_IDENTIFIER = ( "manila/%s" % version.version_info.release_string()) def infinisdk_to_manila_exceptions(func): @functools.wraps(func) def wrapper(*args, **kwargs): try: return func(*args, **kwargs) except infinisdk.core.exceptions.InfiniSDKException as ex: # string formatting of 'ex' includes http code and url msg = _('Caught exception from infinisdk: %s') % ex LOG.exception(msg) raise exception.ShareBackendException(msg=msg) return wrapper class InfiniboxShareDriver(driver.ShareDriver): """INFINIDAT InfiniBox Share driver. Version history: 1.0 - initial release 1.1 - added support for TLS/SSL communication 1.2 - fixed host assisted migration """ VERSION = '1.2' # driver version def __init__(self, *args, **kwargs): super(InfiniboxShareDriver, self).__init__(False, *args, **kwargs) self.configuration.append_config_values(infinidat_connection_opts) self.configuration.append_config_values(infinidat_auth_opts) self.configuration.append_config_values(infinidat_general_opts) def _setup_and_get_system_object(self, management_address, auth, use_ssl): system = infinisdk.InfiniBox(management_address, auth=auth, use_ssl=use_ssl) system.api.add_auto_retry( lambda e: isinstance( e, infinisdk.core.exceptions.APITransportFailure) and "Interrupted system call" in e.error_desc, _API_MAX_RETRIES) system.api.set_source_identifier(_INFINIDAT_MANILA_IDENTIFIER) system.login() return system def do_setup(self, context): """Driver initialization""" if capacity is None: msg = _("Missing 'capacity' python module, ensure the library" " is installed and available.") raise exception.ManilaException(message=msg) if infinisdk is None: msg = _("Missing 'infinisdk' python module, ensure the library" " is installed and available.") raise exception.ManilaException(message=msg) if self.configuration.safe_get('infinidat_suppress_ssl_warnings'): LOG.warning('Suppressing requests library SSL Warnings') rpu = requests.packages.urllib3 # pylint: disable=no-member rpu.disable_warnings(rpu.exceptions.InsecureRequestWarning) rpu.disable_warnings(rpu.exceptions.InsecurePlatformWarning) use_ssl = self.configuration.safe_get('infinidat_use_ssl') infinibox_login = self._safe_get_from_config_or_fail('infinibox_login') infinibox_password = ( self._safe_get_from_config_or_fail('infinibox_password')) auth = (infinibox_login, infinibox_password) management_address = ( self._safe_get_from_config_or_fail('infinibox_hostname')) self._pool_name = ( self._safe_get_from_config_or_fail('infinidat_pool_name')) self._network_space_name = ( self._safe_get_from_config_or_fail( 'infinidat_nas_network_space_name')) self._system = self._setup_and_get_system_object(management_address, auth, use_ssl) backend_name = self.configuration.safe_get('share_backend_name') self._backend_name = backend_name or self.__class__.__name__ thin_provisioning = self.configuration.infinidat_thin_provision self._provtype = "THIN" if thin_provisioning else "THICK" LOG.debug('setup complete') def _update_share_stats(self): """Retrieve stats info from share group.""" (free_capacity_bytes, physical_capacity_bytes, provisioned_capacity_gb) = self._get_available_capacity() max_over_subscription_ratio = ( self.configuration.max_over_subscription_ratio) data = dict( share_backend_name=self._backend_name, vendor_name='INFINIDAT', driver_version=self.VERSION, storage_protocol='NFS', total_capacity_gb=float(physical_capacity_bytes) / units.Gi, free_capacity_gb=float(free_capacity_bytes) / units.Gi, reserved_percentage=self.configuration.reserved_share_percentage, reserved_snapshot_percentage=( self.configuration.reserved_share_from_snapshot_percentage or self.configuration.reserved_share_percentage), reserved_share_extend_percentage=( self.configuration.reserved_share_extend_percentage or self.configuration.reserved_share_percentage), thin_provisioning=self.configuration.infinidat_thin_provision, max_over_subscription_ratio=max_over_subscription_ratio, provisioned_capacity_gb=provisioned_capacity_gb, snapshot_support=True, create_share_from_snapshot_support=True, mount_snapshot_support=True, revert_to_snapshot_support=True) super(InfiniboxShareDriver, self)._update_share_stats(data) def _get_available_capacity(self): # pylint: disable=no-member pool = self._get_infinidat_pool() free_capacity_bytes = (pool.get_free_physical_capacity() / capacity.byte) physical_capacity_bytes = (pool.get_physical_capacity() / capacity.byte) provisioned_capacity_gb = ( (pool.get_virtual_capacity() - pool.get_free_virtual_capacity()) / capacity.GB) # pylint: enable=no-member return (free_capacity_bytes, physical_capacity_bytes, provisioned_capacity_gb) def _safe_get_from_config_or_fail(self, config_parameter): config_value = self.configuration.safe_get(config_parameter) if not config_value: # None or empty string reason = (_("%(config_parameter)s configuration parameter " "must be specified") % {'config_parameter': config_parameter}) LOG.error(reason) raise exception.BadConfigurationException(reason=reason) return config_value def _verify_share_protocol(self, share): if share['share_proto'] != 'NFS': reason = (_('Unsupported share protocol: %(proto)s.') % {'proto': share['share_proto']}) LOG.error(reason) raise exception.InvalidShare(reason=reason) def _verify_access_type(self, access): if access['access_type'] != 'ip': reason = _('Only "ip" access type allowed for the NFS protocol.') LOG.error(reason) raise exception.InvalidShareAccess(reason=reason) return True def _make_share_name(self, manila_share): return 'openstack-shr-%s' % manila_share['id'] def _make_snapshot_name(self, manila_snapshot): return 'openstack-snap-%s' % manila_snapshot['id'] def _set_manila_object_metadata(self, infinidat_object, manila_object): data = {"system": "openstack", "openstack_version": version.version_info.release_string(), "manila_id": manila_object['id'], "manila_name": manila_object['name'], "host.created_by": _INFINIDAT_MANILA_IDENTIFIER} infinidat_object.set_metadata_from_dict(data) @infinisdk_to_manila_exceptions def _get_infinidat_pool(self): pool = self._system.pools.safe_get(name=self._pool_name) if pool is None: msg = _('Pool "%s" not found') % self._pool_name LOG.error(msg) raise exception.ShareBackendException(msg=msg) return pool @infinisdk_to_manila_exceptions def _get_infinidat_nas_network_space_ips(self): network_space = self._system.network_spaces.safe_get( name=self._network_space_name) if network_space is None: msg = _('INFINIDAT InfiniBox NAS network space "%s" ' 'not found') % self._network_space_name LOG.error(msg) raise exception.ShareBackendException(msg=msg) network_space_ips = network_space.get_ips() if not network_space_ips: msg = _('INFINIDAT InfiniBox NAS network space "%s" has no IP ' 'addresses defined') % self._network_space_name LOG.error(msg) raise exception.ShareBackendException(msg=msg) ip_addresses = ( [ip_munch.ip_address for ip_munch in network_space_ips if ip_munch.enabled]) if not ip_addresses: msg = _('INFINIDAT InfiniBox NAS network space "%s" has no ' 'enabled IP addresses') % self._network_space_name LOG.error(msg) raise exception.ShareBackendException(msg=msg) return ip_addresses def _get_full_nfs_export_paths(self, export_path): network_space_ips = self._get_infinidat_nas_network_space_ips() return ['{network_space_ip}:{export_path}'.format( network_space_ip=network_space_ip, export_path=export_path) for network_space_ip in network_space_ips] @infinisdk_to_manila_exceptions def _get_infinidat_filesystem_by_name(self, name): filesystem = self._system.filesystems.safe_get(name=name) if filesystem is None: msg = (_('Filesystem not found on the Infinibox by its name: %s') % name) LOG.error(msg) raise exception.ShareResourceNotFound(share_id=name) return filesystem def _get_infinidat_filesystem(self, manila_share): filesystem_name = self._make_share_name(manila_share) return self._get_infinidat_filesystem_by_name(filesystem_name) def _get_infinidat_snapshot_by_name(self, name): snapshot = self._system.filesystems.safe_get(name=name) if snapshot is None: msg = (_('Snapshot not found on the Infinibox by its name: %s') % name) LOG.error(msg) raise exception.ShareSnapshotNotFound(snapshot_id=name) return snapshot def _get_infinidat_snapshot(self, manila_snapshot): snapshot_name = self._make_snapshot_name(manila_snapshot) return self._get_infinidat_snapshot_by_name(snapshot_name) def _get_infinidat_dataset(self, manila_object, is_snapshot): return (self._get_infinidat_snapshot(manila_object) if is_snapshot else self._get_infinidat_filesystem(manila_object)) @infinisdk_to_manila_exceptions def _get_export(self, infinidat_filesystem): infinidat_exports = infinidat_filesystem.get_exports() if len(infinidat_exports) == 0: msg = _("Could not find share export") raise exception.ShareBackendException(msg=msg) elif len(infinidat_exports) > 1: msg = _("INFINIDAT filesystem has more than one active export; " "possibly not a Manila share") LOG.error(msg) raise exception.ShareBackendException(msg=msg) return infinidat_exports[0] def _get_infinidat_access_level(self, access): """Translates between Manila access levels to INFINIDAT API ones""" access_level = access['access_level'] try: return _MANILA_TO_INFINIDAT_ACCESS_LEVEL[access_level] except KeyError: raise exception.InvalidShareAccessLevel(level=access_level) def _get_ip_address_range(self, ip_address): """Parse single IP address or subnet into a range. If the IP address string is in subnet mask format, returns a - string. If the IP address contains a single IP address, returns only that IP address. """ ip_address = str(ip_address) # try treating the ip_address parameter as a range of IP addresses: ip_network = ipaddress.ip_network(ip_address, strict=False) ip_network_hosts = list(ip_network.hosts()) if len(ip_network_hosts) < 2: # /32, single IP address return ip_address.split('/')[0] return "{}-{}".format(ip_network_hosts[0], ip_network_hosts[-1]) @infinisdk_to_manila_exceptions def _create_filesystem_export(self, infinidat_filesystem): snapdir_visible = self.configuration.infinidat_snapdir_visible infinidat_export = infinidat_filesystem.add_export( permissions=[], snapdir_visible=snapdir_visible) return self._make_export_locations(infinidat_export) @infinisdk_to_manila_exceptions def _ensure_filesystem_export(self, infinidat_filesystem): try: infinidat_export = self._get_export(infinidat_filesystem) except exception.ShareBackendException: return self._create_filesystem_export(infinidat_filesystem) actual = infinidat_export.is_snapdir_visible() expected = self.configuration.infinidat_snapdir_visible if actual is not expected: LOG.debug('Update snapdir_visible for %s: %s -> %s', infinidat_filesystem.get_name(), actual, expected) infinidat_export.update_snapdir_visible(expected) return self._make_export_locations(infinidat_export) @infinisdk_to_manila_exceptions def _make_export_locations(self, infinidat_export): export_paths = self._get_full_nfs_export_paths( infinidat_export.get_export_path()) export_locations = [{ 'path': export_path, 'is_admin_only': False, 'metadata': {}, } for export_path in export_paths] return export_locations @infinisdk_to_manila_exceptions def _delete_share(self, share, is_snapshot): if is_snapshot: dataset_name = self._make_snapshot_name(share) else: dataset_name = self._make_share_name(share) try: infinidat_filesystem = ( self._get_infinidat_filesystem_by_name(dataset_name)) except exception.ShareResourceNotFound: message = ("share %(share)s not found on Infinibox, skipping " "delete") LOG.warning(message, {"share": share}) return # filesystem not found try: infinidat_export = self._get_export(infinidat_filesystem) infinidat_export.safe_delete() except exception.ShareBackendException: # it is possible that the export has been deleted pass infinidat_filesystem.safe_delete() @infinisdk_to_manila_exceptions def _extend_share(self, infinidat_filesystem, share, new_size): # pylint: disable=no-member new_size_capacity_units = new_size * capacity.GiB # pylint: enable=no-member old_size = infinidat_filesystem.get_size() infinidat_filesystem.resize(new_size_capacity_units - old_size) @infinisdk_to_manila_exceptions def _update_access(self, manila_object, access_rules, is_snapshot): infinidat_filesystem = self._get_infinidat_dataset( manila_object, is_snapshot=is_snapshot) infinidat_export = self._get_export(infinidat_filesystem) permissions = [ {'access': self._get_infinidat_access_level(access_rule), 'client': self._get_ip_address_range(access_rule['access_to']), 'no_root_squash': True} for access_rule in access_rules if self._verify_access_type(access_rule)] infinidat_export.update_permissions(permissions) @infinisdk_to_manila_exceptions def create_share(self, context, share, share_server=None): self._verify_share_protocol(share) pool = self._get_infinidat_pool() size = share['size'] * capacity.GiB # pylint: disable=no-member name = self._make_share_name(share) snapdir_accessible = self.configuration.infinidat_snapdir_accessible infinidat_filesystem = self._system.filesystems.create( pool=pool, name=name, size=size, provtype=self._provtype, snapdir_accessible=snapdir_accessible) self._set_manila_object_metadata(infinidat_filesystem, share) return self._create_filesystem_export(infinidat_filesystem) @infinisdk_to_manila_exceptions def create_share_from_snapshot(self, context, share, snapshot, share_server=None, parent_share=None): name = self._make_share_name(share) infinidat_snapshot = self._get_infinidat_snapshot(snapshot) snapdir_accessible = self.configuration.infinidat_snapdir_accessible infinidat_new_share = infinidat_snapshot.create_snapshot( name=name, write_protected=False, snapdir_accessible=snapdir_accessible) self._extend_share(infinidat_new_share, share, share['size']) return self._create_filesystem_export(infinidat_new_share) @infinisdk_to_manila_exceptions def create_snapshot(self, context, snapshot, share_server=None): """Creates a snapshot.""" share = snapshot['share'] infinidat_filesystem = self._get_infinidat_filesystem(share) name = self._make_snapshot_name(snapshot) snapdir_accessible = self.configuration.infinidat_snapdir_accessible infinidat_snapshot = infinidat_filesystem.create_snapshot( name=name, snapdir_accessible=snapdir_accessible) # snapshot is created in the same size as the original share, so no # extending is needed self._set_manila_object_metadata(infinidat_snapshot, snapshot) return {'export_locations': self._create_filesystem_export(infinidat_snapshot)} def delete_share(self, context, share, share_server=None): try: self._verify_share_protocol(share) except exception.InvalidShare: # cleanup shouldn't fail on wrong protocol or missing share: message = ("failed to delete share %(share)s; unsupported share " "protocol %(share_proto)s, only NFS is supported") LOG.warning(message, {"share": share, "share_proto": share['share_proto']}) return self._delete_share(share, is_snapshot=False) def delete_snapshot(self, context, snapshot, share_server=None): self._delete_share(snapshot, is_snapshot=True) def ensure_share(self, context, share, share_server=None): """Ensure that share is properly configured and exported.""" # will raise ShareResourceNotFound if the share was not found: infinidat_filesystem = self._get_infinidat_filesystem(share) actual = infinidat_filesystem.is_snapdir_accessible() expected = self.configuration.infinidat_snapdir_accessible if actual is not expected: LOG.debug('Update snapdir_accessible for %s: %s -> %s', infinidat_filesystem.get_name(), actual, expected) infinidat_filesystem.update_field('snapdir_accessible', expected) return self._ensure_filesystem_export(infinidat_filesystem) def ensure_shares(self, context, shares): """Invoked to ensure that shares are exported.""" updates = {} for share in shares: updates[share['id']] = { 'export_locations': self.ensure_share(context, share)} return updates def get_backend_info(self, context): snapdir_accessible = self.configuration.infinidat_snapdir_accessible snapdir_visible = self.configuration.infinidat_snapdir_visible return { 'snapdir_accessible': snapdir_accessible, 'snapdir_visible': snapdir_visible } def update_access(self, context, share, access_rules, add_rules, delete_rules, update_rules, share_server=None): # As the Infinibox API can bulk update export access rules, we will try # to use the access_rules list self._verify_share_protocol(share) self._update_access(share, access_rules, is_snapshot=False) def get_network_allocations_number(self): return 0 @infinisdk_to_manila_exceptions def revert_to_snapshot(self, context, snapshot, share_access_rules, snapshot_access_rules, share_server=None): infinidat_snapshot = self._get_infinidat_snapshot(snapshot) infinidat_parent_share = self._get_infinidat_filesystem( snapshot['share']) infinidat_parent_share.restore(infinidat_snapshot) def extend_share(self, share, new_size, share_server=None): infinidat_filesystem = self._get_infinidat_filesystem(share) self._extend_share(infinidat_filesystem, share, new_size) def snapshot_update_access(self, context, snapshot, access_rules, add_rules, delete_rules, share_server=None): # snapshots are to be mounted in read-only mode, see: # "Add mountable snapshots" on openstack specs. access_rules, _, _ = utils.change_rules_to_readonly( access_rules, [], []) try: self._update_access(snapshot, access_rules, is_snapshot=True) except exception.InvalidShareAccess as e: raise exception.InvalidSnapshotAccess(e) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315601.9216716 manila-21.0.0/manila/share/drivers/infortrend/0000775000175000017500000000000000000000000021251 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/infortrend/__init__.py0000664000175000017500000000000000000000000023350 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/infortrend/driver.py0000664000175000017500000002477200000000000023132 0ustar00zuulzuul00000000000000# Copyright (c) 2019 Infortrend Technology, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_log import log from manila import exception from manila.i18n import _ from manila.share import driver from manila.share.drivers.infortrend import infortrend_nas LOG = log.getLogger(__name__) infortrend_nas_opts = [ cfg.HostAddressOpt('infortrend_nas_ip', required=True, help='Infortrend NAS IP for management.'), cfg.StrOpt('infortrend_nas_user', default='manila', help='User for the Infortrend NAS server.'), cfg.StrOpt('infortrend_nas_password', secret=True, help='Password for the Infortrend NAS server. ' 'This is not necessary ' 'if infortrend_nas_ssh_key is set.'), cfg.StrOpt('infortrend_nas_ssh_key', help='SSH key for the Infortrend NAS server. ' 'This is not necessary ' 'if infortrend_nas_password is set.'), cfg.ListOpt('infortrend_share_pools', required=True, help='Comma separated list of Infortrend NAS pools.'), cfg.ListOpt('infortrend_share_channels', required=True, help='Comma separated list of Infortrend channels.'), cfg.IntOpt('infortrend_ssh_timeout', default=30, help='SSH timeout in seconds.'), ] CONF = cfg.CONF CONF.register_opts(infortrend_nas_opts) class InfortrendNASDriver(driver.ShareDriver): """Infortrend Share Driver for GS/GSe Family using NASCLI. Version history: 1.0.0 - Initial driver """ VERSION = "1.0.0" PROTOCOL = "NFS_CIFS" def __init__(self, *args, **kwargs): super(InfortrendNASDriver, self).__init__(False, *args, **kwargs) self.configuration.append_config_values(infortrend_nas_opts) nas_ip = self.configuration.safe_get('infortrend_nas_ip') username = self.configuration.safe_get('infortrend_nas_user') password = self.configuration.safe_get('infortrend_nas_password') ssh_key = self.configuration.safe_get('infortrend_nas_ssh_key') timeout = self.configuration.safe_get('infortrend_ssh_timeout') self.backend_name = self.configuration.safe_get('share_backend_name') if not (password or ssh_key): msg = _('Either infortrend_nas_password or infortrend_nas_ssh_key ' 'should be set.') raise exception.InvalidParameterValue(err=msg) pool_dict = self._init_pool_dict() channel_dict = self._init_channel_dict() self.ift_nas = infortrend_nas.InfortrendNAS(nas_ip, username, password, ssh_key, timeout, pool_dict, channel_dict) def _init_pool_dict(self): pools_names = self.configuration.safe_get('infortrend_share_pools') return {el: {} for el in pools_names} def _init_channel_dict(self): channels = self.configuration.safe_get('infortrend_share_channels') return {el: '' for el in channels} def do_setup(self, context): """Any initialization the share driver does while starting.""" LOG.debug('Infortrend NAS do_setup start.') self.ift_nas.do_setup() def check_for_setup_error(self): """Check for setup error.""" LOG.debug('Infortrend NAS check_for_setup_error start.') self.ift_nas.check_for_setup_error() def _update_share_stats(self): """Retrieve stats info from share group.""" LOG.debug('Updating Infortrend backend [%s].', self.backend_name) data = dict( share_backend_name=self.backend_name, vendor_name='Infortrend', driver_version=self.VERSION, storage_protocol=self.PROTOCOL, reserved_percentage=self.configuration.reserved_share_percentage, reserved_snapshot_percentage=( self.configuration.reserved_share_from_snapshot_percentage or self.configuration.reserved_share_percentage), reserved_share_extend_percentage=( self.configuration.reserved_share_extend_percentage or self.configuration.reserved_share_percentage), pools=self.ift_nas.update_pools_stats()) LOG.debug('Infortrend pools status: %s', data['pools']) super(InfortrendNASDriver, self)._update_share_stats(data) def update_access(self, context, share, access_rules, add_rules, delete_rules, update_rules, share_server=None): """Update access rules for given share. :param context: Current context :param share: Share model with share data. :param access_rules: All access rules for given share :param add_rules: Empty List or List of access rules which should be added. access_rules already contains these rules. :param delete_rules: Empty List or List of access rules which should be removed. access_rules doesn't contain these rules. :param update_rules: Empty List or List of access rules which should be updated. access_rules already contains these rules. :param share_server: Not used by this driver. :returns: None, or a dictionary of ``access_id``, ``access_key`` as key: value pairs for the rules added, where, ``access_id`` is the UUID (string) of the access rule, and ``access_key`` is the credential (string) of the entity granted access. During recovery after error, the returned dictionary must contain ``access_id``, ``access_key`` for all the rules that the driver is ordered to resync, i.e. rules in the ``access_rules`` parameter. """ return self.ift_nas.update_access(share, access_rules, add_rules, delete_rules, share_server) def create_share(self, context, share, share_server=None): """Create a share.""" LOG.debug('Creating share: %s.', share['id']) return self.ift_nas.create_share(share, share_server) def delete_share(self, context, share, share_server=None): """Remove a share.""" LOG.debug('Deleting share: %s.', share['id']) return self.ift_nas.delete_share(share, share_server) def get_pool(self, share): """Return pool name where the share resides on. :param share: The share hosted by the driver. """ return self.ift_nas.get_pool(share) def ensure_share(self, context, share, share_server=None): """Invoked to ensure that share is exported. Driver can use this method to update the list of export locations of the share if it changes. To do that, you should return list with export locations. :return None or list with export locations """ return self.ift_nas.ensure_share(share, share_server) def manage_existing(self, share, driver_options): """Brings an existing share under Manila management. If the provided share is not valid, then raise a ManageInvalidShare exception, specifying a reason for the failure. If the provided share is not in a state that can be managed, such as being replicated on the backend, the driver *MUST* raise ManageInvalidShare exception with an appropriate message. The share has a share_type, and the driver can inspect that and compare against the properties of the referenced backend share. If they are incompatible, raise a ManageExistingShareTypeMismatch, specifying a reason for the failure. :param share: Share model :param driver_options: Driver-specific options provided by admin. :return: share_update dictionary with required key 'size', which should contain size of the share. """ LOG.debug( 'Manage existing for share: %(share)s,', { 'share': share['share_id'], }) return self.ift_nas.manage_existing(share, driver_options) def unmanage(self, share): """Removes the specified share from Manila management. Does not delete the underlying backend share. For most drivers, this will not need to do anything. However, some drivers might use this call as an opportunity to clean up any Manila-specific configuration that they have associated with the backend share. If provided share cannot be unmanaged, then raise an UnmanageInvalidShare exception, specifying a reason for the failure. This method is invoked when the share is being unmanaged with a share type that has ``driver_handles_share_servers`` extra-spec set to False. """ LOG.debug( 'Unmanage share: %(share)s', { 'share': share['share_id'], }) return self.ift_nas.unmanage(share) def extend_share(self, share, new_size, share_server=None): """Extends size of existing share. :param share: Share model :param new_size: New size of share (new_size > share['size']) :param share_server: Optional -- Share server model """ return self.ift_nas.extend_share(share, new_size, share_server) def shrink_share(self, share, new_size, share_server=None): """Shrinks size of existing share. If consumed space on share larger than new_size driver should raise ShareShrinkingPossibleDataLoss exception: raise ShareShrinkingPossibleDataLoss(share_id=share['id']) :param share: Share model :param new_size: New size of share (new_size < share['size']) :param share_server: Optional -- Share server model :raises ShareShrinkingPossibleDataLoss, NotImplementedError """ return self.ift_nas.shrink_share(share, new_size, share_server) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/infortrend/infortrend_nas.py0000664000175000017500000006300700000000000024644 0ustar00zuulzuul00000000000000# Copyright (c) 2019 Infortrend Technology, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import re from oslo_concurrency import processutils from oslo_log import log from oslo_utils import units from manila.common import constants from manila import exception from manila.i18n import _ from manila.share import utils as share_utils from manila import ssh_utils from manila import utils as manila_utils LOG = log.getLogger(__name__) def _bi_to_gi(bi_size): return bi_size / units.Gi class InfortrendNAS(object): _SSH_PORT = 22 def __init__(self, nas_ip, username, password, ssh_key, timeout, pool_dict, channel_dict): self.nas_ip = nas_ip self.port = self._SSH_PORT self.username = username self.password = password self.ssh_key = ssh_key self.ssh_timeout = timeout self.pool_dict = pool_dict self.channel_dict = channel_dict self.command = "" self.ssh = None self.sshpool = None self.location = 'a@0' def _execute(self, command_line): command_line.extend(['-z', self.location]) commands = ' '.join(command_line) manila_utils.check_ssh_injection(commands) LOG.debug('Executing: %(command)s', {'command': commands}) cli_out = self._ssh_execute(commands) return self._parser(cli_out) def _ssh_execute(self, commands): try: out, err = processutils.ssh_execute( self.ssh, commands, timeout=self.ssh_timeout, check_exit_code=True) except processutils.ProcessExecutionError as pe: rc = pe.exit_code out = pe.stdout out = out.replace('\n', '\\n') msg = _('Error on execute ssh command. ' 'Exit code: %(rc)d, msg: %(out)s') % { 'rc': rc, 'out': out} raise exception.InfortrendNASException(err=msg) return out def _parser(self, content=None): LOG.debug('parsing data:\n%s', content) content = content.replace("\r", "") content = content.strip() json_string = content.replace("'", "\"") cli_data = json_string.splitlines()[2] if cli_data: try: data_dict = json.loads(cli_data) except Exception: msg = _('Failed to parse data: ' '%(cli_data)s to dictionary.') % { 'cli_data': cli_data} LOG.error(msg) raise exception.InfortrendNASException(err=msg) rc = int(data_dict['cliCode'][0]['Return'], 16) if rc == 0: result = data_dict['data'] else: result = data_dict['cliCode'][0]['CLI'] else: msg = _('No data is returned from NAS.') LOG.error(msg) raise exception.InfortrendNASException(err=msg) if rc != 0: msg = _('NASCLI error, returned: %(result)s.') % { 'result': result} LOG.error(msg) raise exception.InfortrendCLIException( err=msg, rc=rc, out=result) return rc, result def do_setup(self): self._init_connect() self._ensure_service_on('nfs') self._ensure_service_on('cifs') def _init_connect(self): if not (self.sshpool and self.ssh): self.sshpool = ssh_utils.SSHPool(ip=self.nas_ip, port=self.port, conn_timeout=None, login=self.username, password=self.password, privatekey=self.ssh_key) self.ssh = self.sshpool.create() if not self.ssh.get_transport().is_active(): self.sshpool = ssh_utils.SSHPool(ip=self.nas_ip, port=self.port, conn_timeout=None, login=self.username, password=self.password, privatekey=self.ssh_key) self.ssh = self.sshpool.create() LOG.debug('NAScmd [%s@%s] start!', self.username, self.nas_ip) def check_for_setup_error(self): self._check_pools_setup() self._check_channels_status() def _ensure_service_on(self, proto, slot='A'): command_line = ['service', 'status', proto] rc, service_status = self._execute(command_line) if not service_status[0][slot][proto.upper()]['enabled']: command_line = ['service', 'restart', proto] self._execute(command_line) def _check_channels_status(self): channel_list = list(self.channel_dict.keys()) command_line = ['ifconfig', 'inet', 'show'] rc, channels_status = self._execute(command_line) for channel in channels_status: if 'CH' in channel['datalink']: ch = channel['datalink'].strip('CH') if ch in self.channel_dict.keys(): self.channel_dict[ch] = channel['IP'] channel_list.remove(ch) if channel['status'] == 'DOWN': LOG.warning('Channel [%(ch)s] status ' 'is down, please check.', { 'ch': ch}) if len(channel_list) != 0: msg = _('Channel setting %(channel_list)s is invalid!') % { 'channel_list': channel_list} LOG.error(msg) raise exception.InfortrendNASException(message=msg) def _check_pools_setup(self): pool_list = list(self.pool_dict.keys()) command_line = ['folder', 'status'] rc, pool_data = self._execute(command_line) for pool in pool_data: pool_name = self._extract_pool_name(pool) if pool_name in self.pool_dict.keys(): pool_list.remove(pool_name) self.pool_dict[pool_name]['id'] = pool['volumeId'] self.pool_dict[pool_name]['path'] = pool['directory'] + '/' if len(pool_list) == 0: break if len(pool_list) != 0: msg = _('Please create %(pool_list)s pool/s in advance!') % { 'pool_list': pool_list} LOG.error(msg) raise exception.InfortrendNASException(message=msg) def _extract_pool_name(self, pool_info): return pool_info['directory'].split('/')[1] def _extract_lv_name(self, pool_info): return pool_info['path'].split('/')[2] def update_pools_stats(self): pools = [] command_line = ['folder', 'status'] rc, pools_data = self._execute(command_line) for pool_info in pools_data: pool_name = self._extract_pool_name(pool_info) if pool_name in self.pool_dict.keys(): total_space = float(pool_info['size']) pool_quota_used = self._get_pool_quota_used(pool_name) available_space = total_space - pool_quota_used total_capacity_gb = round(_bi_to_gi(total_space), 2) free_capacity_gb = round(_bi_to_gi(available_space), 2) pool = { 'pool_name': pool_name, 'total_capacity_gb': total_capacity_gb, 'free_capacity_gb': free_capacity_gb, 'reserved_percentage': 0, 'qos': False, 'dedupe': False, 'compression': False, 'snapshot_support': False, 'thin_provisioning': False, 'thick_provisioning': True, 'replication_type': None, } pools.append(pool) return pools def _get_pool_quota_used(self, pool_name): pool_quota_used = 0.0 pool_data = self._get_share_pool_data(pool_name) folder_name = self._extract_lv_name(pool_data) command_line = ['fquota', 'status', pool_data['id'], folder_name, '-t', 'folder'] rc, quota_status = self._execute(command_line) for share_quota in quota_status: pool_quota_used += int(share_quota['quota']) return pool_quota_used def _get_share_pool_data(self, pool_name): if not pool_name: msg = _("Pool is not available in the share host.") raise exception.InvalidHost(reason=msg) if pool_name in self.pool_dict.keys(): return self.pool_dict[pool_name] else: msg = _('Pool [%(pool_name)s] not set in conf.') % { 'pool_name': pool_name} LOG.error(msg) raise exception.InfortrendNASException(err=msg) def create_share(self, share, share_server=None): pool_name = share_utils.extract_host(share['host'], level='pool') pool_data = self._get_share_pool_data(pool_name) folder_name = self._extract_lv_name(pool_data) share_proto = share['share_proto'].lower() share_name = share['id'].replace('-', '') share_path = pool_data['path'] + share_name command_line = ['folder', 'options', pool_data['id'], folder_name, '-c', share_name] self._execute(command_line) self._set_share_size( pool_data['id'], pool_name, share_name, share['size']) self._ensure_protocol_on(share_path, share_proto, share_name) LOG.info('Create Share [%(share)s] completed.', { 'share': share['id']}) return self._export_location( share_name, share_proto, pool_data['path']) def _export_location(self, share_name, share_proto, pool_path=None): location = [] location_data = { 'pool_path': pool_path, 'share_name': share_name, } self._check_channels_status() for ch in sorted(self.channel_dict.keys()): ip = self.channel_dict[ch] if share_proto == 'nfs': location.append( ip + ':%(pool_path)s%(share_name)s' % location_data) elif share_proto == 'cifs': location.append( '\\\\' + ip + '\\%(share_name)s' % location_data) else: msg = _('Unsupported protocol: [%s].') % share_proto raise exception.InvalidInput(msg) return location def _set_share_size(self, pool_id, pool_name, share_name, share_size): pool_data = self._get_share_pool_data(pool_name) folder_name = self._extract_lv_name(pool_data) command_line = ['fquota', 'create', pool_id, folder_name, share_name, str(share_size) + 'G', '-t', 'folder'] self._execute(command_line) LOG.debug('Set Share [%(share_name)s] ' 'Size [%(share_size)s G] completed.', { 'share_name': share_name, 'share_size': share_size}) return def _get_share_size(self, pool_id, pool_name, share_name): share_size = None command_line = ['fquota', 'status', pool_id, share_name, '-t', 'folder'] rc, quota_status = self._execute(command_line) for share_quota in quota_status: if share_quota['name'] == share_name: share_size = round(_bi_to_gi(float(share_quota['quota'])), 2) break return share_size def delete_share(self, share, share_server=None): pool_name = share_utils.extract_host(share['host'], level='pool') pool_data = self._get_share_pool_data(pool_name) folder_name = self._extract_lv_name(pool_data) share_name = share['id'].replace('-', '') if self._check_share_exist(pool_name, share_name): command_line = ['folder', 'options', pool_data['id'], folder_name, '-d', share_name] self._execute(command_line) else: LOG.warning('Share [%(share_name)s] is already deleted.', { 'share_name': share_name}) LOG.info('Delete Share [%(share)s] completed.', { 'share': share['id']}) def _check_share_exist(self, pool_name, share_name): path = self.pool_dict[pool_name]['path'] command_line = ['pagelist', 'folder', path] rc, subfolders = self._execute(command_line) return any(subfolder['name'] == share_name for subfolder in subfolders) def update_access(self, share, access_rules, add_rules, delete_rules, share_server=None): self._evict_unauthorized_clients(share, access_rules, share_server) access_dict = {} for access in access_rules: try: self._allow_access(share, access, share_server) except (exception.InfortrendNASException) as e: msg = _('Failed to allow access to client %(access)s, ' 'reason %(e)s.') % { 'access': access['access_to'], 'e': e} LOG.error(msg) access_dict[access['id']] = 'error' return access_dict def _evict_unauthorized_clients(self, share, access_rules, share_server=None): pool_name = share_utils.extract_host(share['host'], level='pool') pool_data = self._get_share_pool_data(pool_name) share_proto = share['share_proto'].lower() share_name = share['id'].replace('-', '') share_path = pool_data['path'] + share_name access_list = [] for access in access_rules: access_list.append(access['access_to']) if share_proto == 'nfs': host_ip_list = [] command_line = ['share', 'status', '-f', share_path] rc, nfs_status = self._execute(command_line) host_list = nfs_status[0]['nfs_detail']['hostList'] for host in host_list: if host['host'] != '*': host_ip_list.append(host['host']) for ip in host_ip_list: if ip not in access_list: command_line = ['share', 'options', share_path, 'nfs', '-c', ip] try: self._execute(command_line) except exception.InfortrendNASException: msg = _("Failed to remove share access rule %s") % (ip) LOG.exception(msg) pass elif share_proto == 'cifs': host_user_list = [] command_line = ['acl', 'get', share_path] rc, cifs_status = self._execute(command_line) for cifs_rule in cifs_status: if cifs_rule['name']: host_user_list.append(cifs_rule['name']) for user in host_user_list: if user not in access_list: command_line = ['acl', 'delete', share_path, '-u', user] try: self._execute(command_line) except exception.InfortrendNASException: msg = _("Failed to remove share access rule %s") % ( user) LOG.exception(msg) pass def _allow_access(self, share, access, share_server=None): pool_name = share_utils.extract_host(share['host'], level='pool') pool_data = self._get_share_pool_data(pool_name) share_name = share['id'].replace('-', '') share_path = pool_data['path'] + share_name share_proto = share['share_proto'].lower() access_type = access['access_type'] access_level = access['access_level'] or constants.ACCESS_LEVEL_RW access_to = access['access_to'] ACCESS_LEVEL_MAP = {access_level: access_level} msg = self._check_access_legal(share_proto, access_type) if msg: raise exception.InvalidShareAccess(reason=msg) if share_proto == 'nfs': command_line = ['share', 'options', share_path, 'nfs', '-h', access_to, '-p', access_level] self._execute(command_line) elif share_proto == 'cifs': if not self._check_user_exist(access_to): msg = _('Please create user [%(user)s] in advance.') % { 'user': access_to} LOG.error(msg) raise exception.InfortrendNASException(err=msg) if access_level == constants.ACCESS_LEVEL_RW: cifs_access = 'f' elif access_level == constants.ACCESS_LEVEL_RO: cifs_access = 'r' try: access_level = ACCESS_LEVEL_MAP[access_level] except KeyError: msg = _('Unsupported access_level: [%s].') % access_level raise exception.InvalidInput(msg) command_line = ['acl', 'set', share_path, '-u', access_to, '-a', cifs_access] self._execute(command_line) LOG.info('Share [%(share)s] access to [%(access_to)s] ' 'level [%(level)s] protocol [%(share_proto)s] completed.', { 'share': share['id'], 'access_to': access_to, 'level': access_level, 'share_proto': share_proto}) def _ensure_protocol_on(self, share_path, share_proto, cifs_name): if not self._check_proto_enabled(share_path, share_proto): command_line = ['share', share_path, share_proto, 'on'] if share_proto == 'cifs': command_line.extend(['-n', cifs_name]) self._execute(command_line) def _check_proto_enabled(self, share_path, share_proto): command_line = ['share', 'status', '-f', share_path] rc, share_status = self._execute(command_line) if share_status: check_enabled = share_status[0][share_proto] if check_enabled: return True return False def _check_user_exist(self, user_name): command_line = ['useradmin', 'user', 'list'] rc, user_list = self._execute(command_line) for user in user_list: if user['Name'] == user_name: return True return False def _check_access_legal(self, share_proto, access_type): msg = None if share_proto == 'cifs' and access_type != 'user': msg = _('Infortrend CIFS share only supports USER access type.') elif share_proto == 'nfs' and access_type != 'ip': msg = _('Infortrend NFS share only supports IP access type.') elif share_proto not in ('nfs', 'cifs'): msg = _('Unsupported share protocol [%s].') % share_proto return msg def get_pool(self, share): pool_name = share_utils.extract_host(share['host'], level='pool') if not pool_name: share_name = share['id'].replace('-', '') for pool in self.pool_dict.keys(): if self._check_share_exist(pool, share_name): pool_name = pool break return pool_name def ensure_share(self, share, share_server=None): share_proto = share['share_proto'].lower() pool_name = share_utils.extract_host(share['host'], level='pool') pool_data = self._get_share_pool_data(pool_name) share_name = share['id'].replace('-', '') return self._export_location( share_name, share_proto, pool_data['path']) def extend_share(self, share, new_size, share_server=None): pool_name = share_utils.extract_host(share['host'], level='pool') pool_data = self._get_share_pool_data(pool_name) share_name = share['id'].replace('-', '') self._set_share_size(pool_data['id'], pool_name, share_name, new_size) LOG.info('Successfully Extend Share [%(share)s] ' 'to size [%(new_size)s G].', { 'share': share['id'], 'new_size': new_size}) def shrink_share(self, share, new_size, share_server=None): pool_name = share_utils.extract_host(share['host'], level='pool') pool_data = self._get_share_pool_data(pool_name) share_name = share['id'].replace('-', '') folder_name = self._extract_lv_name(pool_data) command_line = ['fquota', 'status', pool_data['id'], folder_name, '-t', 'folder'] rc, quota_status = self._execute(command_line) for share_quota in quota_status: if share_quota['name'] == share_name: used_space = round(_bi_to_gi(float(share_quota['used'])), 2) if new_size < used_space: raise exception.ShareShrinkingPossibleDataLoss( share_id=share['id']) self._set_share_size(pool_data['id'], pool_name, share_name, new_size) LOG.info('Successfully Shrink Share [%(share)s] ' 'to size [%(new_size)s G].', { 'share': share['id'], 'new_size': new_size}) def manage_existing(self, share, driver_options): share_proto = share['share_proto'].lower() pool_name = share_utils.extract_host(share['host'], level='pool') pool_data = self._get_share_pool_data(pool_name) volume_name = self._extract_lv_name(pool_data) input_location = share['export_locations'][0]['path'] share_name = share['id'].replace('-', '') ch_ip, folder_name = self._parse_location(input_location, share_proto) if not self._check_channel_ip(ch_ip): msg = _('Export location ip: [%(ch_ip)s] ' 'is incorrect, please use data port ip.') % { 'ch_ip': ch_ip} LOG.error(msg) raise exception.InfortrendNASException(err=msg) if not self._check_share_exist(pool_name, folder_name): msg = _('Can not find folder [%(folder_name)s] ' 'in pool [%(pool_name)s].') % { 'folder_name': folder_name, 'pool_name': pool_name} LOG.error(msg) raise exception.InfortrendNASException(err=msg) share_path = pool_data['path'] + folder_name self._ensure_protocol_on(share_path, share_proto, share_name) share_size = self._get_share_size( pool_data['id'], pool_name, folder_name) if not share_size: msg = _('Folder [%(folder_name)s] has no size limitation, ' 'please set it first for Openstack management.') % { 'folder_name': folder_name} LOG.error(msg) raise exception.InfortrendNASException(err=msg) # rename folder name command_line = ['folder', 'options', pool_data['id'], volume_name, '-k', folder_name, share_name] self._execute(command_line) location = self._export_location( share_name, share_proto, pool_data['path']) LOG.info('Successfully Manage Infortrend Share [%(folder_name)s], ' 'Size: [%(size)s G], Protocol: [%(share_proto)s], ' 'new name: [%(share_name)s].', { 'folder_name': folder_name, 'size': share_size, 'share_proto': share_proto, 'share_name': share_name}) return {'size': share_size, 'export_locations': location} def _parse_location(self, input_location, share_proto): ip = None folder_name = None pattern_ip = r'[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}' if share_proto == 'nfs': pattern_folder = r'[^\/]+$' ip = "".join(re.findall(pattern_ip, input_location)) folder_name = "".join(re.findall(pattern_folder, input_location)) elif share_proto == 'cifs': pattern_folder = r'[^\\]+$' ip = "".join(re.findall(pattern_ip, input_location)) folder_name = "".join(re.findall(pattern_folder, input_location)) if not (ip and folder_name): msg = _('Export location error, please check ' 'ip: [%(ip)s], folder_name: [%(folder_name)s].') % { 'ip': ip, 'folder_name': folder_name} LOG.error(msg) raise exception.InfortrendNASException(err=msg) return ip, folder_name def _check_channel_ip(self, channel_ip): return any(ip == channel_ip for ip in self.channel_dict.values()) def unmanage(self, share): pool_name = share_utils.extract_host(share['host'], level='pool') share_name = share['id'].replace('-', '') if not self._check_share_exist(pool_name, share_name): LOG.warning('Share [%(share_name)s] does not exist.', { 'share_name': share_name}) return LOG.info('Successfully Unmanaged Share [%(share)s].', { 'share': share['id']}) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315601.9216716 manila-21.0.0/manila/share/drivers/inspur/0000775000175000017500000000000000000000000020417 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/inspur/__init__.py0000664000175000017500000000000000000000000022516 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315601.9216716 manila-21.0.0/manila/share/drivers/inspur/as13000/0000775000175000017500000000000000000000000021406 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/inspur/as13000/__init__.py0000664000175000017500000000000000000000000023505 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/inspur/as13000/as13000_nas.py0000664000175000017500000010252300000000000023613 0ustar00zuulzuul00000000000000# Copyright 2018 Inspur Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Share driver for Inspur AS13000 """ import eventlet import functools import json import re import requests import time from oslo_config import cfg from oslo_log import log as logging from oslo_utils import units from manila import exception from manila.i18n import _ from manila.share import driver from manila.share import utils as share_utils inspur_as13000_opts = [ cfg.HostAddressOpt( 'as13000_nas_ip', required=True, help='IP address for the AS13000 storage.'), cfg.PortOpt( 'as13000_nas_port', default=8088, help='Port number for the AS13000 storage.'), cfg.StrOpt( 'as13000_nas_login', required=True, help='Username for the AS13000 storage'), cfg.StrOpt( 'as13000_nas_password', required=True, secret=True, help='Password for the AS13000 storage'), cfg.ListOpt( 'as13000_share_pools', required=True, help='The Storage Pools Manila should use, a comma separated list'), cfg.IntOpt( 'as13000_token_available_time', default=3600, help='The effective time of token validity in seconds.') ] CONF = cfg.CONF CONF.register_opts(inspur_as13000_opts) LOG = logging.getLogger(__name__) def inspur_driver_debug_trace(f): """Log the method entrance and exit including active backend name. This should only be used on Share_Driver class methods. It depends on having a 'self' argument that is a AS13000_Driver. """ @functools.wraps(f) def wrapper(*args, **kwargs): driver = args[0] cls_name = driver.__class__.__name__ method_name = "%(cls_name)s.%(method)s" % {"cls_name": cls_name, "method": f.__name__} backend_name = driver.configuration.share_backend_name LOG.debug("[%(backend_name)s] Enter %(method_name)s", {"method_name": method_name, "backend_name": backend_name}) result = f(*args, **kwargs) LOG.debug("[%(backend_name)s] Leave %(method_name)s", {"method_name": method_name, "backend_name": backend_name}) return result return wrapper class RestAPIExecutor(object): def __init__(self, hostname, port, username, password): self._hostname = hostname self._port = port self._username = username self._password = password self._token_pool = [] self._token_size = 1 def logins(self): """login the AS13000 and store the token in token_pool""" times = self._token_size while times > 0: token = self.login() self._token_pool.append(token) times = times - 1 LOG.debug('Logged into the AS13000.') def login(self): """login in the AS13000 and return the token""" method = 'security/token' params = {'name': self._username, 'password': self._password} token = self.send_rest_api(method=method, params=params, request_type='post').get('token') return token def logout(self): method = 'security/token' self.send_rest_api(method=method, request_type='delete') def refresh_token(self, force=False): if force is True: for i in range(self._token_size): self._token_pool = [] token = self.login() self._token_pool.append(token) else: for i in range(self._token_size): self.logout() token = self.login() self._token_pool.append(token) LOG.debug('Tokens have been refreshed.') def send_rest_api(self, method, params=None, request_type='post'): attempts = 3 msge = '' while attempts > 0: attempts -= 1 try: return self.send_api(method, params, request_type) except exception.NetworkException as e: msge = str(e) LOG.error(msge) self.refresh_token(force=True) eventlet.sleep(1) except exception.ShareBackendException as e: msge = str(e) break msg = (_('Access RestAPI /rest/%(method)s by %(type)s failed,' ' error: %(msge)s') % {'method': method, 'msge': msge, 'type': request_type}) LOG.error(msg) raise exception.ShareBackendException(msg) @staticmethod def do_request(cmd, url, header, data): LOG.debug('CMD: %(cmd)s, URL: %(url)s, DATA: %(data)s', {'cmd': cmd, 'url': url, 'data': data}) if cmd == 'post': req = requests.post(url, data=data, headers=header) elif cmd == 'get': req = requests.get(url, data=data, headers=header) elif cmd == 'put': req = requests.put(url, data=data, headers=header) elif cmd == 'delete': req = requests.delete(url, data=data, headers=header) else: msg = (_('Unsupported cmd: %s') % cmd) raise exception.ShareBackendException(msg) response = req.json() code = req.status_code LOG.debug('CODE: %(code)s, RESPONSE: %(response)s', {'code': code, 'response': response}) if code != 200: msg = (_('Code: %(code)s, URL: %(url)s, Message: %(msg)s') % {'code': req.status_code, 'url': req.url, 'msg': req.text}) LOG.error(msg) raise exception.NetworkException(msg) return response def send_api(self, method, params=None, request_type='post'): if params: params = json.dumps(params) url = ('http://%(hostname)s:%(port)s/%(rest)s/%(method)s' % {'hostname': self._hostname, 'port': self._port, 'rest': 'rest', 'method': method}) # header is not needed when the driver login the backend if method == 'security/token': # token won't be return to the token_pool if request_type == 'delete': header = {'X-Auth-Token': self._token_pool.pop(0)} else: header = None else: if len(self._token_pool) == 0: self.logins() token = self._token_pool.pop(0) header = {'X-Auth-Token': token} self._token_pool.append(token) response = self.do_request(request_type, url, header, params) try: code = response.get('code') if code == 0: if request_type == 'get': data = response.get('data') else: if method == 'security/token': data = response.get('data') else: data = response.get('message') data = str(data).lower() if hasattr(data, 'success'): return elif code == 301: msg = _('Token is expired') LOG.error(msg) raise exception.NetworkException(msg) else: message = response.get('message') msg = (_('Unexpected RestAPI response: %(code)d %(msg)s') % { 'code': code, 'msg': message}) LOG.error(msg) raise exception.ShareBackendException(msg) except ValueError: msg = _("Deal with response failed") raise exception.ShareBackendException(msg) return data class AS13000ShareDriver(driver.ShareDriver): """AS13000 Share Driver Version history: V1.0.0: Initial version Driver support: share create/delete, snapshot create/delete, extend size, create_share_from_snapshot, update_access. protocol: NFS/CIFS """ VENDOR = 'INSPUR' VERSION = '1.0.0' PROTOCOL = 'NFS_CIFS' def __init__(self, *args, **kwargs): super(AS13000ShareDriver, self).__init__(False, *args, **kwargs) self.configuration.append_config_values(inspur_as13000_opts) self.hostname = self.configuration.as13000_nas_ip self.port = self.configuration.as13000_nas_port self.username = self.configuration.as13000_nas_login self.password = self.configuration.as13000_nas_password self.token_available_time = (self.configuration. as13000_token_available_time) self.pools = self.configuration.as13000_share_pools # base dir detail contain the information which we will use # when we create subdirectorys self.base_dir_detail = None self._token_time = 0 self.ips = [] self._rest = RestAPIExecutor(self.hostname, self.port, self.username, self.password) @inspur_driver_debug_trace def do_setup(self, context): # get access tokens self._rest.logins() self._token_time = time.time() # Check the pool in conf exist in the backend self._validate_pools_exist() # get the base directory detail self.base_dir_detail = self._get_directory_detail(self.pools[0]) # get all backend node ip self.ips = self._get_nodes_ips() @inspur_driver_debug_trace def check_for_setup_error(self): if self.base_dir_detail is None: msg = _('The pool status is not right') raise exception.ShareBackendException(msg) if len(self.ips) == 0: msg = _('All backend nodes are down') raise exception.ShareBackendException(msg) @inspur_driver_debug_trace def create_share(self, context, share, share_server=None): """Create a share.""" pool, name, size, proto = self._get_share_instance_pnsp(share) # create directory first share_path = self._create_directory(share_name=name, pool_name=pool) # then create nfs or cifs share if proto == 'nfs': self._create_nfs_share(share_path=share_path) else: self._create_cifs_share(share_name=name, share_path=share_path) # finally we set the quota of directory self._set_directory_quota(share_path, size) locations = self._get_location_path(name, share_path, proto) LOG.debug('Create share: name:%(name)s' ' protocol:%(proto)s,location: %(loc)s', {'name': name, 'proto': proto, 'loc': locations}) return locations @inspur_driver_debug_trace def create_share_from_snapshot(self, context, share, snapshot, share_server=None, parent_share=None): """Create a share from snapshot.""" pool, name, size, proto = self._get_share_instance_pnsp(share) # create directory first share_path = self._create_directory(share_name=name, pool_name=pool) # as quota must be set when directory is empty # then we set the quota of directory self._set_directory_quota(share_path, size) # and next clone snapshot to dest_path self._clone_directory_to_dest(snapshot=snapshot, dest_path=share_path) # finally create share if proto == 'nfs': self._create_nfs_share(share_path=share_path) else: self._create_cifs_share(share_name=name, share_path=share_path) locations = self._get_location_path(name, share_path, proto) LOG.debug('Create share from snapshot:' ' name:%(name)s protocol:%(proto)s,location: %(loc)s', {'name': name, 'proto': proto, 'loc': locations}) return locations @inspur_driver_debug_trace def delete_share(self, context, share, share_server=None): """Delete share.""" pool, name, _, proto = self._get_share_instance_pnsp(share) share_path = self._generate_share_path(pool, name) if proto == 'nfs': share_backend = self._get_nfs_share(share_path) if len(share_backend) == 0: return else: self._delete_nfs_share(share_path) else: share_backend = self._get_cifs_share(name) if len(share_backend) == 0: return else: self._delete_cifs_share(name) self._delete_directory(share_path) LOG.debug('Delete share: %s', name) @inspur_driver_debug_trace def extend_share(self, share, new_size, share_server=None): """extend share to new size""" pool, name, size, proto = self._get_share_instance_pnsp(share) share_path = self._generate_share_path(pool, name) self._set_directory_quota(share_path, new_size) LOG.debug('extend share %(name)s to new size %(size)s GB', {'name': name, 'size': new_size}) @inspur_driver_debug_trace def ensure_share(self, context, share, share_server=None): """Ensure that share is exported.""" pool, name, size, proto = self._get_share_instance_pnsp(share) share_path = self._generate_share_path(pool, name) if proto == 'nfs': share_backend = self._get_nfs_share(share_path) elif proto == 'cifs': share_backend = self._get_cifs_share(name) else: msg = (_('Invalid NAS protocol supplied: %s.') % proto) LOG.error(msg) raise exception.InvalidInput(msg) if len(share_backend) == 0: raise exception.ShareResourceNotFound(share_id=share['share_id']) return self._get_location_path(name, share_path, proto) @inspur_driver_debug_trace def create_snapshot(self, context, snapshot, share_server=None): """create snapshot of share""" # !!! Attention the share property is a ShareInstance share = snapshot['share'] pool, share_name, _, _ = self._get_share_instance_pnsp(share) share_path = self._generate_share_path(pool, share_name) snap_name = self._generate_snapshot_name(snapshot) method = 'snapshot/directory' request_type = 'post' params = {'path': share_path, 'snapName': snap_name} self._rest.send_rest_api(method=method, params=params, request_type=request_type) LOG.debug('Create snapshot %(snap)s for share %(share)s', {'snap': snap_name, 'share': share_name}) @inspur_driver_debug_trace def delete_snapshot(self, context, snapshot, share_server=None): """delete snapshot of share""" # !!! Attention the share property is a ShareInstance share = snapshot['share'] pool, share_name, _, _ = self._get_share_instance_pnsp(share) share_path = self._generate_share_path(pool, share_name) # if there are no snapshot exist, driver will return directly snaps_backend = self._get_snapshots_from_share(share_path) if len(snaps_backend) == 0: return snap_name = self._generate_snapshot_name(snapshot) method = ('snapshot/directory?path=%s&snapName=%s' % (share_path, snap_name)) request_type = 'delete' self._rest.send_rest_api(method=method, request_type=request_type) LOG.debug('Delete snapshot %(snap)s of share %(share)s', {'snap': snap_name, 'share': share_name}) @staticmethod def transfer_rule_to_client(proto, rule): """transfer manila access rule to backend client""" access_level = rule['access_level'] if proto == 'cifs' and access_level == 'rw': access_level = 'rwx' return dict(name=rule['access_to'], type=(0 if proto == 'nfs' else 1), authority=access_level) @inspur_driver_debug_trace def update_access(self, context, share, access_rules, add_rules, delete_rules, update_rules, share_server=None): """update access of share""" pool, share_name, _, proto = self._get_share_instance_pnsp(share) share_path = self._generate_share_path(pool, share_name) method = 'file/share/%s' % proto request_type = 'put' params = { 'path': share_path, 'addedClientList': [], 'deletedClientList': [], 'editedClientList': [] } if proto == 'nfs': share_backend = self._get_nfs_share(share_path) params['pathAuthority'] = share_backend['pathAuthority'] else: params['name'] = share_name if add_rules or delete_rules: to_add_clients = [self.transfer_rule_to_client(proto, rule) for rule in add_rules] params['addedClientList'] = to_add_clients to_del_clients = [self.transfer_rule_to_client(proto, rule) for rule in delete_rules] params['deletedClientList'] = to_del_clients else: access_clients = [self.transfer_rule_to_client(proto, rule) for rule in access_rules] params['addedClientList'] = access_clients self._clear_access(share) self._rest.send_rest_api(method=method, params=params, request_type=request_type) LOG.debug('complete the update access work for share %s', share_name) @inspur_driver_debug_trace def _update_share_stats(self, data=None): """update the backend stats including driver info and pools info""" # Do a check of the token validity each time we update share stats, # do a refresh if token already expires time_difference = time.time() - self._token_time if time_difference > self.token_available_time: self._rest.refresh_token() self._token_time = time.time() LOG.debug('Token of Driver has been refreshed') data = { 'vendor_name': self.VENDOR, 'driver_version': self.VERSION, 'storage_protocol': self.PROTOCOL, 'share_backend_name': self.configuration.safe_get('share_backend_name'), 'snapshot_support': True, 'create_share_from_snapshot_support': True, 'pools': [self._get_pool_stats(pool) for pool in self.pools] } super(AS13000ShareDriver, self)._update_share_stats(data) @inspur_driver_debug_trace def _clear_access(self, share): """clear all access of share""" pool, share_name, size, proto = self._get_share_instance_pnsp(share) share_path = self._generate_share_path(pool, share_name) method = 'file/share/%s' % proto request_type = 'put' params = { 'path': share_path, 'addedClientList': [], 'deletedClientList': [], 'editedClientList': [] } if proto == 'nfs': share_backend = self._get_nfs_share(share_path) params['deletedClientList'] = share_backend['clientList'] params['pathAuthority'] = share_backend['pathAuthority'] else: share_backend = self._get_cifs_share(share_name) params['deletedClientList'] = share_backend['userList'] params['name'] = share_name self._rest.send_rest_api(method=method, params=params, request_type=request_type) LOG.debug('Clear all the access of share %s', share_name) @inspur_driver_debug_trace def _validate_pools_exist(self): """Check the pool in conf exist in the backend""" available_pools = self._get_directory_list('/') for pool in self.pools: if pool not in available_pools: msg = (_('Pool %s is not exist in backend storage.') % pool) LOG.error(msg) raise exception.InvalidInput(reason=msg) @inspur_driver_debug_trace def _get_directory_quota(self, path): """get the quota of directory""" method = 'file/quota/directory?path=/%s' % path request_type = 'get' data = self._rest.send_rest_api(method=method, request_type=request_type) quota = data.get('hardthreshold') if quota is None: # the method of '_update_share_stats' will check quota of pools. # To avoid return NONE for pool info, so raise this exception msg = (_(r'Quota of pool: /%s is not set, ' r'please set it in GUI of AS13000') % path) LOG.error(msg) raise exception.ShareBackendException(msg=msg) hardunit = data.get('hardunit') used_capacity = data.get('capacity') used_capacity = (str(used_capacity)).upper() used_capacity = self._unit_convert(used_capacity) if hardunit == 1: quota = quota * 1024 total_capacity = int(quota) used_capacity = int(used_capacity) return total_capacity, used_capacity def _get_pool_stats(self, path): """Get the stats of pools, such as capacity and other information.""" total_capacity, used_capacity = self._get_directory_quota(path) free_capacity = total_capacity - used_capacity pool = { 'pool_name': path, 'reserved_percentage': self.configuration.reserved_share_percentage, 'reserved_snapshot_percentage': self.configuration.reserved_share_from_snapshot_percentage or self.configuration.reserved_share_percentage, 'reserved_share_extend_percentage': self.configuration.reserved_share_extend_percentage or self.configuration.reserved_share_percentage, 'max_over_subscription_ratio': self.configuration.max_over_subscription_ratio, 'dedupe': False, 'compression': False, 'qos': False, 'thin_provisioning': True, 'total_capacity_gb': total_capacity, 'free_capacity_gb': free_capacity, 'allocated_capacity_gb': used_capacity, 'snapshot_support': True, 'create_share_from_snapshot_support': True } return pool @inspur_driver_debug_trace def _get_directory_list(self, path): """Get all the directory list of target path""" method = 'file/directory?path=%s' % path request_type = 'get' directory_list = self._rest.send_rest_api(method=method, request_type=request_type) dir_list = [] for directory in directory_list: dir_list.append(directory['name']) return dir_list @inspur_driver_debug_trace def _create_directory(self, share_name, pool_name): """create a directory for share""" method = 'file/directory' request_type = 'post' params = {'name': share_name, 'parentPath': self.base_dir_detail['path'], 'authorityInfo': self.base_dir_detail['authorityInfo'], 'dataProtection': self.base_dir_detail['dataProtection'], 'poolName': self.base_dir_detail['poolName']} self._rest.send_rest_api(method=method, params=params, request_type=request_type) return self._generate_share_path(pool_name, share_name) @inspur_driver_debug_trace def _delete_directory(self, share_path): """delete the directory when delete share""" method = 'file/directory?path=%s' % share_path request_type = 'delete' self._rest.send_rest_api(method=method, request_type=request_type) @inspur_driver_debug_trace def _set_directory_quota(self, share_path, quota): """set directory quota for share""" method = 'file/quota/directory' request_type = 'put' params = {'path': share_path, 'hardthreshold': quota, 'hardunit': 2} self._rest.send_rest_api(method=method, params=params, request_type=request_type) @inspur_driver_debug_trace def _create_nfs_share(self, share_path): """create a NFS share""" method = 'file/share/nfs' request_type = 'post' params = {'path': share_path, 'pathAuthority': 'rw', 'client': []} self._rest.send_rest_api(method=method, params=params, request_type=request_type) @inspur_driver_debug_trace def _delete_nfs_share(self, share_path): """Delete the NFS share""" method = 'file/share/nfs?path=%s' % share_path request_type = 'delete' self._rest.send_rest_api(method=method, request_type=request_type) @inspur_driver_debug_trace def _get_nfs_share(self, share_path): """Get the nfs share in backend""" method = 'file/share/nfs?path=%s' % share_path request_type = 'get' share_backend = self._rest.send_rest_api(method=method, request_type=request_type) return share_backend @inspur_driver_debug_trace def _create_cifs_share(self, share_name, share_path): """Create a CIFS share.""" method = 'file/share/cifs' request_type = 'post' params = {'path': share_path, 'name': share_name, 'userlist': []} self._rest.send_rest_api(method=method, params=params, request_type=request_type) @inspur_driver_debug_trace def _delete_cifs_share(self, share_name): """Delete the CIFS share.""" method = 'file/share/cifs?name=%s' % share_name request_type = 'delete' self._rest.send_rest_api(method=method, request_type=request_type) @inspur_driver_debug_trace def _get_cifs_share(self, share_name): """Get the CIFS share in backend""" method = 'file/share/cifs?name=%s' % share_name request_type = 'get' share_backend = self._rest.send_rest_api(method=method, request_type=request_type) return share_backend @inspur_driver_debug_trace def _clone_directory_to_dest(self, snapshot, dest_path): """Clone the directory to the new directory""" # get the origin share name of the snapshot share_instance = snapshot['share_instance'] pool, name, _, _ = self._get_share_instance_pnsp(share_instance) share_path = self._generate_share_path(pool, name) # get the snapshot instance name snap_name = self._generate_snapshot_name(snapshot) method = 'snapshot/directory/clone' request_type = 'post' params = {'path': share_path, 'snapName': snap_name, 'destPath': dest_path} self._rest.send_rest_api(method=method, params=params, request_type=request_type) LOG.debug('Clone Path: %(path)s Snapshot: %(snap)s to Path %(dest)s', {'path': share_path, 'snap': snap_name, 'dest': dest_path}) @inspur_driver_debug_trace def _get_snapshots_from_share(self, path): """get all the snapshot of share""" method = 'snapshot/directory?path=%s' % path request_type = 'get' snaps = self._rest.send_rest_api(method=method, request_type=request_type) return snaps @inspur_driver_debug_trace def _get_location_path(self, share_name, share_path, share_proto): """return all the location of all nodes""" if share_proto == 'nfs': location = [ {'path': r'%(ip)s:%(share_path)s' % {'ip': ip, 'share_path': share_path}} for ip in self.ips] else: location = [ {'path': r'\\%(ip)s\%(share_name)s' % {'ip': ip, 'share_name': share_name}} for ip in self.ips] return location def _get_nodes_virtual_ips(self): """Get the virtual ip list of the node""" method = 'ctdb/set' request_type = 'get' ctdb_set = self._rest.send_rest_api(method=method, request_type=request_type) virtual_ips = [] for vip in ctdb_set['virtualIpList']: ip = vip['ip'].split('/')[0] virtual_ips.append(ip) return virtual_ips def _get_nodes_physical_ips(self): """Get the physical ip of all the backend nodes""" method = 'cluster/node/cache' request_type = 'get' cached_nodes = self._rest.send_rest_api(method=method, request_type=request_type) node_ips = [] for node in cached_nodes: if node['runningStatus'] == 1 and node['healthStatus'] == 1: node_ips.append(node['nodeIp']) return node_ips def _get_nodes_ips(self): """Return both the physical ip and virtual ip""" virtual_ips = self._get_nodes_virtual_ips() physical_ips = self._get_nodes_physical_ips() return virtual_ips + physical_ips def _get_share_instance_pnsp(self, share_instance): """Get pool, name, size, proto information of a share instance. AS13000 require all the names can only consist of letters,numbers, and undercores,and must begin with a letter. Also the length of name must less than 32 character. The driver will use the ID as the name in backend, add 'share_' to the beginning,and convert '-' to '_' """ pool = share_utils.extract_host(share_instance['host'], level='pool') name = self._generate_share_name(share_instance) # a share instance may not contain size attr. try: size = share_instance['size'] except AttributeError: size = None # a share instance may not contain proto attr. try: proto = share_instance['share_proto'].lower() except AttributeError: proto = None LOG.debug("Pool %s, Name: %s, Size: %s, Protocol: %s", pool, name, size, proto) return pool, name, size, proto def _unit_convert(self, capacity): """Convert all units to GB""" capacity = str(capacity) capacity = capacity.upper() try: unit_of_used = re.findall(r'[A-Z]', capacity) unit_of_used = ''.join(unit_of_used) except BaseException: unit_of_used = '' capacity = capacity.replace(unit_of_used, '') capacity = float(capacity.replace(unit_of_used, '')) if unit_of_used in ['B', '']: capacity = capacity / units.Gi elif unit_of_used in ['K', 'KB']: capacity = capacity / units.Mi elif unit_of_used in ['M', 'MB']: capacity = capacity / units.Ki elif unit_of_used in ['G', 'GB']: capacity = capacity elif unit_of_used in ['T', 'TB']: capacity = capacity * units.Ki elif unit_of_used in ['E', 'EB']: capacity = capacity * units.Mi capacity = '%.0f' % capacity return float(capacity) def _format_name(self, name): """format name to meet the backend requirements""" name = name[0:32] name = name.replace('-', '_') return name def _generate_share_name(self, share_instance): share_name = 'share_%s' % share_instance['id'] return self._format_name(share_name) def _generate_snapshot_name(self, snapshot_instance): snap_name = 'snap_%s' % snapshot_instance['id'] return self._format_name(snap_name) @staticmethod def _generate_share_path(pool, share_name): return r'/%s/%s' % (pool, share_name) def _get_directory_detail(self, directory): method = 'file/directory/detail?path=/%s' % directory request_type = 'get' details = self._rest.send_rest_api(method=method, request_type=request_type) return details[0] ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315601.9216716 manila-21.0.0/manila/share/drivers/inspur/instorage/0000775000175000017500000000000000000000000022412 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/inspur/instorage/__init__.py0000664000175000017500000000000000000000000024511 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/inspur/instorage/cli_helper.py0000664000175000017500000003755400000000000025110 0ustar00zuulzuul00000000000000# Copyright 2019 Inspur Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ CLI helpers for Inspur InStorage """ import paramiko import re from eventlet import greenthread from oslo_concurrency import processutils from oslo_log import log from oslo_utils import excutils from manila import exception from manila.i18n import _ from manila import ssh_utils from manila import utils as manila_utils LOG = log.getLogger(__name__) class SSHRunner(object): """SSH runner is used to run ssh command on inspur instorage system.""" def __init__(self, host, port, login, password, privatekey=None): self.host = host self.port = port self.login = login self.password = password self.privatekey = privatekey self.ssh_conn_timeout = 60 self.ssh_min_pool_size = 1 self.ssh_max_pool_size = 10 self.sshpool = None def __call__(self, cmd_list, check_exit_code=True, attempts=1): """SSH tool""" manila_utils.check_ssh_injection(cmd_list) command = ' '.join(cmd_list) if not self.sshpool: try: self.sshpool = ssh_utils.SSHPool( self.host, self.port, self.ssh_conn_timeout, self.login, password=self.password, privatekey=self.privatekey, min_size=self.ssh_min_pool_size, max_size=self.ssh_max_pool_size ) except paramiko.SSHException: LOG.error("Unable to create SSHPool") raise try: return self._ssh_execute(self.sshpool, command, check_exit_code, attempts) except Exception: LOG.error("Error running SSH command: %s", command) raise def _ssh_execute(self, sshpool, command, check_exit_code=True, attempts=1): try: with sshpool.item() as ssh: last_exception = None while attempts > 0: attempts -= 1 try: return processutils.ssh_execute( ssh, command, check_exit_code=check_exit_code) except Exception as e: LOG.exception('Error has occurred') last_exception = e greenthread.sleep(1) try: raise processutils.ProcessExecutionError( exit_code=last_exception.exit_code, stdout=last_exception.stdout, stderr=last_exception.stderr, cmd=last_exception.cmd) except AttributeError: raise processutils.ProcessExecutionError( exit_code=-1, stdout="", stderr="Error running SSH command", cmd=command) except Exception: with excutils.save_and_reraise_exception(): LOG.error("Error running SSH command: %s", command) class CLIParser(object): """Parse MCS CLI output and generate iterable.""" def __init__(self, raw, ssh_cmd=None, delim='!', with_header=True): super(CLIParser, self).__init__() if ssh_cmd: self.ssh_cmd = ' '.join(ssh_cmd) else: self.ssh_cmd = 'None' self.raw = raw self.delim = delim self.with_header = with_header self.result = self._parse() def __getitem__(self, key): try: return self.result[key] except KeyError: msg = (_('Did not find the expected key %(key)s in %(fun)s: ' '%(raw)s.') % {'key': key, 'fun': self.ssh_cmd, 'raw': self.raw}) raise exception.ShareBackendException(msg=msg) def __iter__(self): for a in self.result: yield a def __len__(self): return len(self.result) def _parse(self): def get_reader(content, delim): for line in content.lstrip().splitlines(): line = line.strip() if line: yield line.split(delim) else: yield [] if isinstance(self.raw, str): stdout, stderr = self.raw, '' else: stdout, stderr = self.raw reader = get_reader(stdout, self.delim) result = [] if self.with_header: hds = tuple() for row in reader: hds = row break for row in reader: cur = dict() if len(hds) != len(row): msg = (_('Unexpected CLI response: header/row mismatch. ' 'header: %(header)s, row: %(row)s.') % {'header': hds, 'row': row}) raise exception.ShareBackendException(msg=msg) for k, v in zip(hds, row): CLIParser.append_dict(cur, k, v) result.append(cur) else: cur = dict() for row in reader: if row: CLIParser.append_dict(cur, row[0], ' '.join(row[1:])) elif cur: # start new section result.append(cur) cur = dict() if cur: result.append(cur) return result @staticmethod def append_dict(dict_, key, value): key, value = key.strip(), value.strip() obj = dict_.get(key, None) if obj is None: dict_[key] = value elif isinstance(obj, list): obj.append(value) dict_[key] = obj else: dict_[key] = [obj, value] return dict_ class InStorageSSH(object): """SSH interface to Inspur InStorage systems.""" def __init__(self, ssh_runner): self._ssh = ssh_runner def _run_ssh(self, ssh_cmd): try: return self._ssh(ssh_cmd) except processutils.ProcessExecutionError as e: msg = (_('CLI Exception output:\n command: %(cmd)s\n ' 'stdout: %(out)s\n stderr: %(err)s.') % {'cmd': ssh_cmd, 'out': e.stdout, 'err': e.stderr}) LOG.error(msg) raise exception.ShareBackendException(msg=msg) def run_ssh_inq(self, ssh_cmd, delim='!', with_header=False): """Run an SSH command and return parsed output.""" raw = self._run_ssh(ssh_cmd) LOG.debug('Response for cmd %s is %s', ssh_cmd, raw) return CLIParser(raw, ssh_cmd=ssh_cmd, delim=delim, with_header=with_header) def run_ssh_assert_no_output(self, ssh_cmd): """Run an SSH command and assert no output returned.""" out, err = self._run_ssh(ssh_cmd) if len(out.strip()) != 0: msg = (_('Expected no output from CLI command %(cmd)s, ' 'got %(out)s.') % {'cmd': ' '.join(ssh_cmd), 'out': out}) LOG.error(msg) raise exception.ShareBackendException(msg=msg) def run_ssh_check_created(self, ssh_cmd): """Run an SSH command and return the ID of the created object.""" out, err = self._run_ssh(ssh_cmd) try: match_obj = re.search(r'\[([0-9]+)\],? successfully created', out) return match_obj.group(1) except (AttributeError, IndexError): msg = (_('Failed to parse CLI output:\n command: %(cmd)s\n ' 'stdout: %(out)s\n stderr: %(err)s.') % {'cmd': ssh_cmd, 'out': out, 'err': err}) LOG.error(msg) raise exception.ShareBackendException(msg=msg) def lsnode(self, node_id=None): with_header = True ssh_cmd = ['mcsinq', 'lsnode', '-delim', '!'] if node_id: with_header = False ssh_cmd.append(node_id) return self.run_ssh_inq(ssh_cmd, with_header=with_header) def lsnaspool(self, pool_id=None): ssh_cmd = ['mcsinq', 'lsnaspool', '-delim', '!'] if pool_id: ssh_cmd.append(pool_id) return self.run_ssh_inq(ssh_cmd, with_header=True) def lsfs(self, node_name=None, fsname=None): if fsname and not node_name: msg = _('Node name should be set when file system name is set.') LOG.error(msg) raise exception.InvalidParameterValue(msg) ssh_cmd = ['mcsinq', 'lsfs', '-delim', '!'] to_append = [] if node_name: to_append += ['-node', '"%s"' % node_name] if fsname: to_append += ['-name', '"%s"' % fsname] if not to_append: to_append += ['-all'] ssh_cmd += to_append return self.run_ssh_inq(ssh_cmd, with_header=True) def addfs(self, fsname, pool_name, size, node_name): """Create a file system on the storage. :param fsname: file system name :param pool_name: pool in which to create the file system :param size: file system size in GB :param node_name: the primary node name :return: """ ssh_cmd = ['mcsop', 'addfs', '-name', '"%s"' % fsname, '-pool', '"%s"' % pool_name, '-size', '%dg' % size, '-node', '"%s"' % node_name] self.run_ssh_assert_no_output(ssh_cmd) def rmfs(self, fsname): """Remove the specific file system. :param fsname: file system name to be removed :return: """ ssh_cmd = ['mcsop', 'rmfs', '-name', '"%s"' % fsname] self.run_ssh_assert_no_output(ssh_cmd) def expandfs(self, fsname, size): """Expand the space of the specific file system. :param fsname: file system name :param size: the size(GB) to be expanded, origin + size = result :return: """ ssh_cmd = ['mcsop', 'expandfs', '-name', '"%s"' % fsname, '-size', '%dg' % size] self.run_ssh_assert_no_output(ssh_cmd) # NAS directory operation def lsnasdir(self, dirpath): """List the child directory under dirpath. :param dirpath: the parent directory to list with :return: """ ssh_cmd = ['mcsinq', 'lsnasdir', '-delim', '!', '"%s"' % dirpath] return self.run_ssh_inq(ssh_cmd, with_header=True) def addnasdir(self, dirpath): """Create a new NAS directory indicated by dirpath.""" ssh_cmd = ['mcsop', 'addnasdir', '"%s"' % dirpath] self.run_ssh_assert_no_output(ssh_cmd) def chnasdir(self, old_path, new_path): """Rename the NAS directory name.""" ssh_cmd = ['mcsop', 'chnasdir', '-oldpath', '"%s"' % old_path, '-newpath', '"%s"' % new_path] self.run_ssh_assert_no_output(ssh_cmd) def rmnasdir(self, dirpath): """Remove the specific dirpath.""" ssh_cmd = ['mcsop', 'rmnasdir', '"%s"' % dirpath] self.run_ssh_assert_no_output(ssh_cmd) # NFS operation def rmnfs(self, share_path): """Remove the NFS indicated by path.""" ssh_cmd = ['mcsop', 'rmnfs', '"%s"' % share_path] self.run_ssh_assert_no_output(ssh_cmd) def lsnfslist(self, prefix=None): """List NFS shares on a system.""" ssh_cmd = ['mcsinq', 'lsnfslist', '-delim', '!'] if prefix: ssh_cmd.append('"%s"' % prefix) return self.run_ssh_inq(ssh_cmd, with_header=True) def lsnfsinfo(self, share_path): """List a specific NFS share's information.""" ssh_cmd = ['mcsinq', 'lsnfsinfo', '-delim', '!', '"%s"' % share_path] return self.run_ssh_inq(ssh_cmd, with_header=True) def addnfsclient(self, share_path, client_spec): """Add a client access rule to NFS share. :param share_path: the NFS share path. :param client_spec: IP/MASK:RIGHTS:ALL_SQUASH:ROOT_SQUASH. :return: """ ssh_cmd = ['mcsop', 'addnfsclient', '-path', '"%s"' % share_path, '-client', client_spec] self.run_ssh_assert_no_output(ssh_cmd) def chnfsclient(self, share_path, client_spec): """Change a NFS share's client info.""" ssh_cmd = ['mcsop', 'chnfsclient', '-path', '"%s"' % share_path, '-client', client_spec] self.run_ssh_assert_no_output(ssh_cmd) def rmnfsclient(self, share_path, client_spec): """Remove a client info from the NFS share.""" # client_spec parameter for rmnfsclient is IP/MASK, # so we need remove the right part client_spec = client_spec.split(':')[0] ssh_cmd = ['mcsop', 'rmnfsclient', '-path', '"%s"' % share_path, '-client', client_spec] self.run_ssh_assert_no_output(ssh_cmd) # CIFS operation def lscifslist(self, filter=None): """List CIFS shares on the system.""" ssh_cmd = ['mcsinq', 'lscifslist', '-delim', '!'] if filter: ssh_cmd.append('"%s"' % filter) return self.run_ssh_inq(ssh_cmd, with_header=True) def lscifsinfo(self, share_name): """List a specific CIFS share's information.""" ssh_cmd = ['mcsinq', 'lscifsinfo', '-delim', '!', '"%s"' % share_name] return self.run_ssh_inq(ssh_cmd, with_header=True) def addcifs(self, share_name, dirpath, oplocks='off'): """Create a CIFS share with given path.""" ssh_cmd = ['mcsop', 'addcifs', '-name', share_name, '-path', dirpath, '-oplocks', oplocks] self.run_ssh_assert_no_output(ssh_cmd) def rmcifs(self, share_name): """Remove a CIFS share.""" ssh_cmd = ['mcsop', 'rmcifs', share_name] self.run_ssh_assert_no_output(ssh_cmd) def chcifs(self, share_name, oplocks='off'): """Change a CIFS share's attribute. :param share_name: share's name :param oplocks: 'off' or 'on' :return: """ ssh_cmd = ['mcsop', 'chcifs', '-name', share_name, '-oplocks', oplocks] self.run_ssh_assert_no_output(ssh_cmd) def addcifsuser(self, share_name, rights): """Add a user access rule to CIFS share. :param share_name: share's name :param rights: [LU|LG]:xxx:[rw|ro] :return: """ ssh_cmd = ['mcsop', 'addcifsuser', '-name', share_name, '-rights', rights] self.run_ssh_assert_no_output(ssh_cmd) def chcifsuser(self, share_name, rights): """Change a user access rule.""" ssh_cmd = ['mcsop', 'chcifsuser', '-name', share_name, '-rights', rights] self.run_ssh_assert_no_output(ssh_cmd) def rmcifsuser(self, share_name, rights): """Remove CIFS user from a CIFS share.""" # the rights parameter for rmcifsuser is LU:NAME rights = ':'.join(rights.split(':')[0:-1]) ssh_cmd = ['mcsop', 'rmcifsuser', '-name', share_name, '-rights', rights] self.run_ssh_assert_no_output(ssh_cmd) # NAS port ip def lsnasportip(self): """List NAS service port ip address.""" ssh_cmd = ['mcsinq', 'lsnasportip', '-delim', '!'] return self.run_ssh_inq(ssh_cmd, with_header=True) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/inspur/instorage/instorage.py0000664000175000017500000005301700000000000024765 0ustar00zuulzuul00000000000000# Copyright 2019 Inspur Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Driver for Inspur InStorage """ import ipaddress import itertools from oslo_config import cfg from oslo_log import log from oslo_utils import units from manila import coordination from manila import exception from manila.i18n import _ from manila.share import driver from manila.share import utils as share_utils from manila.share.drivers.inspur.instorage.cli_helper import InStorageSSH from manila.share.drivers.inspur.instorage.cli_helper import SSHRunner instorage_opts = [ cfg.HostAddressOpt( 'instorage_nas_ip', required=True, help='IP address for the InStorage.' ), cfg.PortOpt( 'instorage_nas_port', default=22, help='Port number for the InStorage.' ), cfg.StrOpt( 'instorage_nas_login', required=True, help='Username for the InStorage.' ), cfg.StrOpt( 'instorage_nas_password', required=True, secret=True, help='Password for the InStorage.' ), cfg.ListOpt( 'instorage_nas_pools', required=True, help='The Storage Pools Manila should use, a comma separated list.' ) ] CONF = cfg.CONF CONF.register_opts(instorage_opts) LOG = log.getLogger(__name__) class InStorageShareDriver(driver.ShareDriver): """Inspur InStorage NAS driver. Allows for NFS and CIFS NAS. .. code::none Version history: 1.0.0 - Initial driver. Driver support: share create/delete extend size update_access protocol: NFS/CIFS """ VENDOR = 'INSPUR' VERSION = '1.0.0' PROTOCOL = 'NFS_CIFS' def __init__(self, *args, **kwargs): super(InStorageShareDriver, self).__init__(False, *args, **kwargs) self.configuration.append_config_values(instorage_opts) self.backend_name = self.configuration.safe_get('share_backend_name') self.backend_pools = self.configuration.instorage_nas_pools self.ssh_runner = SSHRunner(**{ 'host': self.configuration.instorage_nas_ip, 'port': 22, 'login': self.configuration.instorage_nas_login, 'password': self.configuration.instorage_nas_password }) self.assistant = InStorageAssistant(self.ssh_runner) def check_for_setup_error(self): nodes = self.assistant.get_nodes_info() if len(nodes) == 0: msg = _('No valid node, be sure the NAS Port IP is configured') raise exception.ShareBackendException(msg=msg) pools = self.assistant.get_available_pools() not_exist = set(self.backend_pools).difference(set(pools)) if not_exist: msg = _('Pool %s not exist on the storage system') % not_exist raise exception.InvalidParameterValue(msg) def _update_share_stats(self, **kwargs): """Retrieve share stats information.""" try: stats = { 'share_backend_name': self.backend_name, 'vendor_name': self.VENDOR, 'driver_version': self.VERSION, 'storage_protocol': 'NFS_CIFS', 'reserved_percentage': self.configuration.reserved_share_percentage, 'reserved_snapshot_percentage': ( self.configuration.reserved_share_from_snapshot_percentage or self.configuration.reserved_share_percentage), 'reserved_share_extend_percentage': ( self.configuration.reserved_share_extend_percentage or self.configuration.reserved_share_percentage), 'max_over_subscription_ratio': self.configuration.max_over_subscription_ratio, 'snapshot_support': False, 'create_share_from_snapshot_support': False, 'revert_to_snapshot_support': False, 'qos': False, 'total_capacity_gb': 0.0, 'free_capacity_gb': 0.0, 'pools': [] } pools = self.assistant.get_pools_attr(self.backend_pools) total_capacity_gb = 0 free_capacity_gb = 0 for pool in pools.values(): total_capacity_gb += pool['total_capacity_gb'] free_capacity_gb += pool['free_capacity_gb'] stats['pools'].append(pool) stats['total_capacity_gb'] = total_capacity_gb stats['free_capacity_gb'] = free_capacity_gb LOG.debug('share status %s', stats) super(InStorageShareDriver, self)._update_share_stats(stats) except Exception: msg = _('Unexpected error while trying to get the ' 'usage stats from array.') LOG.exception(msg) raise @staticmethod def generate_share_name(share): # Generate a name with id of the share as base, and do follows: # 1. Remove the '-' in the id string. # 2. Transform all alpha to lower case. # 3. If the first char of the id is a num, # transform it to an Upper case alpha start from 'A', # such as '0' -> 'A', '1' -> 'B'. # e.g. # generate_share_name({ # 'id': '46CF5E85-D618-4023-8727-6A1EA9292954', # ... # }) # returns 'E6cf5e85d618402387276a1ea9292954' name = share['id'].replace('-', '').lower() if name[0] in '0123456789': name = chr(ord('A') + ord(name[0]) - ord('0')) + name[1:] return name def get_network_allocations_number(self): """Get the number of network interfaces to be created.""" return 0 def create_share(self, context, share, share_server=None): """Create a new share instance.""" share_name = self.generate_share_name(share) share_size = share['size'] share_proto = share['share_proto'] pool_name = share_utils.extract_host(share['host'], level='pool') self.assistant.create_share( share_name, pool_name, share_size, share_proto ) return self.assistant.get_export_locations(share_name, share_proto) def delete_share(self, context, share, share_server=None): """Delete the given share instance.""" share_name = self.generate_share_name(share) share_proto = share['share_proto'] self.assistant.delete_share(share_name, share_proto) def extend_share(self, share, new_size, share_server=None): """Extend the share instance's size to new size.""" share_name = self.generate_share_name(share) self.assistant.extend_share(share_name, new_size) def ensure_share(self, context, share, share_server=None): """Ensure that the share instance is exported.""" share_name = self.generate_share_name(share) share_proto = share['share_proto'] return self.assistant.get_export_locations(share_name, share_proto) def update_access(self, context, share, access_rules, add_rules, delete_rules, update_rules, share_server=None): """Update the share instance's access rule.""" share_name = self.generate_share_name(share) share_proto = share['share_proto'] @coordination.synchronized('inspur-instorage-access-' + share_name) def _update_access(name, proto, rules, add_rules, delete_rules): self.assistant.update_access( name, proto, rules, add_rules, delete_rules ) _update_access( share_name, share_proto, access_rules, add_rules, delete_rules ) class InStorageAssistant(object): NFS_CLIENT_SPEC_PATTERN = ( '%(ip)s/%(mask)s:%(rights)s:%(all_squash)s:%(root_squash)s' ) CIFS_CLIENT_RIGHT_PATTERN = ( '%(type)s:%(name)s:%(rights)s' ) def __init__(self, ssh_runner): self.ssh = InStorageSSH(ssh_runner) @staticmethod def handle_keyerror(cmd, out): msg = (_('Could not find key in output of command %(cmd)s: %(out)s.') % {'out': out, 'cmd': cmd}) raise exception.ShareBackendException(msg=msg) def size_to_gb(self, size): new_size = 0 if 'P' in size: new_size = int(float(size.rstrip('PB')) * units.Mi) elif 'T' in size: new_size = int(float(size.rstrip('TB')) * units.Ki) elif 'G' in size: new_size = int(float(size.rstrip('GB')) * 1) elif 'M' in size: mb_size = float(size.rstrip('MB')) new_size = int((mb_size + units.Ki - 1) / units.Ki) return new_size def get_available_pools(self): nas_pools = self.ssh.lsnaspool() return [pool['pool_name'] for pool in nas_pools] def get_pools_attr(self, backend_pools): pools = {} fs_attr = self.ssh.lsfs() nas_pools = self.ssh.lsnaspool() for pool_attr in nas_pools: pool_name = pool_attr['pool_name'] if pool_name not in backend_pools: continue total_used_capacity = 0 total_allocated_capacity = 0 for fs in fs_attr: if fs['pool_name'] != pool_name: continue allocated = self.size_to_gb(fs['total_capacity']) used = self.size_to_gb(fs['used_capacity']) total_allocated_capacity += allocated total_used_capacity += used available = self.size_to_gb(pool_attr['available_capacity']) pool = { 'pool_name': pool_name, 'total_capacity_gb': total_allocated_capacity + available, 'free_capacity_gb': available, 'allocated_capacity_gb': total_allocated_capacity, 'reserved_percentage': 0, 'reserved_snapshot_percentage': 0, 'reserved_share_extend_percentage': 0, 'qos': False, 'dedupe': False, 'compression': False, 'thin_provisioning': False, 'max_over_subscription_ratio': 0 } pools[pool_name] = pool return pools def get_nodes_info(self): """Return a dictionary containing information of system's nodes.""" nodes = {} resp = self.ssh.lsnasportip() for port in resp: try: # Port is invalid if it has no IP configured. if port['ip'] == '': continue node_name = port['node_name'] if node_name not in nodes: nodes[node_name] = {} node = nodes[node_name] node[port['id']] = port except KeyError: self.handle_keyerror('lsnasportip', port) return nodes @staticmethod def get_fsname_by_name(name): return ('%(fsname)s' % {'fsname': name})[0:32] @staticmethod def get_dirname_by_name(name): return ('%(dirname)s' % {'dirname': name})[0:32] def get_dirpath_by_name(self, name): fsname = self.get_fsname_by_name(name) dirname = self.get_dirname_by_name(name) return '/fs/%(fsname)s/%(dirname)s' % { 'fsname': fsname, 'dirname': dirname } def create_share(self, name, pool, size, proto): """Create a share with given info.""" # use one available node as the primary node nodes = self.get_nodes_info() if len(nodes) == 0: msg = _('No valid node, be sure the NAS Port IP is configured') raise exception.ShareBackendException(msg=msg) node_name = [key for key in nodes.keys()][0] # first create the file system on which share will be created fsname = self.get_fsname_by_name(name) self.ssh.addfs(fsname, pool, size, node_name) # then create the directory used for the share dirpath = self.get_dirpath_by_name(name) self.ssh.addnasdir(dirpath) # For CIFS, we need to create a CIFS share. # For NAS, the share is automatically added when the first # 'access spec' is added on it. if proto == 'CIFS': self.ssh.addcifs(name, dirpath) def check_share_exist(self, name): """Check whether the specified share exist on backend.""" fsname = self.get_fsname_by_name(name) for fs in self.ssh.lsfs(): if fs['fs_name'] == fsname: return True return False def delete_share(self, name, proto): """Delete the given share.""" if not self.check_share_exist(name): LOG.warning('Share %s does not exist on the backend.', name) return # For CIFS, we have to delete the share first. # For NAS, when the last client access spec is removed from # it, the share is automatically deleted. if proto == 'CIFS': self.ssh.rmcifs(name) # then delete the directory dirpath = self.get_dirpath_by_name(name) self.ssh.rmnasdir(dirpath) # at last delete the file system fsname = self.get_fsname_by_name(name) self.ssh.rmfs(fsname) def extend_share(self, name, new_size): """Extend a given share to a new size. :param name: the name of the share. :param new_size: the new size the share should be. :return: """ # first get the original capacity old_size = None fsname = self.get_fsname_by_name(name) for fs in self.ssh.lsfs(): if fs['fs_name'] == fsname: old_size = self.size_to_gb(fs['total_capacity']) break if old_size is None: msg = _('share %s is not available') % name raise exception.ShareBackendException(msg=msg) LOG.debug('Extend fs %s from %dGB to %dGB', fsname, old_size, new_size) self.ssh.expandfs(fsname, new_size - old_size) def get_export_locations(self, name, share_proto): """Get the export locations of a given share. :param name: the name of the share. :param share_proto: the protocol of the share. :return: a list of export locations. """ if share_proto == 'NFS': dirpath = self.get_dirpath_by_name(name) pattern = '%(ip)s:' + dirpath elif share_proto == 'CIFS': pattern = '\\\\%(ip)s\\' + name else: msg = _('share protocol %s is not supported') % share_proto raise exception.ShareBackendException(msg=msg) # we need get the node so that we know which port ip we can use node_name = None fsname = self.get_fsname_by_name(name) for node in self.ssh.lsnode(): for fs in self.ssh.lsfs(node['name']): if fs['fs_name'] == fsname: node_name = node['name'] break if node_name: break if node_name is None: msg = _('share %s is not available') % name raise exception.ShareBackendException(msg=msg) locations = [] ports = self.ssh.lsnasportip() for port in ports: if port['node_name'] == node_name and port['ip'] != '': location = pattern % {'ip': port['ip']} locations.append({ 'path': location, 'is_admin_only': False, 'metadata': {} }) return locations def classify_nfs_client_spec(self, client_spec, dirpath): nfslist = self.ssh.lsnfslist(dirpath) if len(nfslist): nfsinfo = self.ssh.lsnfsinfo(dirpath) spec_set = set([ self.NFS_CLIENT_SPEC_PATTERN % i for i in nfsinfo ]) else: spec_set = set() client_spec_set = set(client_spec) del_spec = spec_set.difference(client_spec_set) add_spec = client_spec_set.difference(spec_set) return list(add_spec), list(del_spec) def access_rule_to_client_spec(self, access_rule): if access_rule['access_type'] != 'ip': msg = _('only ip access type is supported when using NFS protocol') raise exception.ShareBackendException(msg=msg) network = ipaddress.ip_network(str(access_rule['access_to'])) if network.version != 4: msg = _('only IPV4 is accepted when using NFS protocol') raise exception.ShareBackendException(msg=msg) client_spec = self.NFS_CLIENT_SPEC_PATTERN % { 'ip': str(network.network_address), 'mask': str(network.netmask), 'rights': access_rule['access_level'], 'all_squash': 'all_squash', 'root_squash': 'root_squash' } return client_spec def update_nfs_access(self, share_name, access_rules, add_rules, delete_rules): """Update a NFS share's access rule.""" dirpath = self.get_dirpath_by_name(share_name) if add_rules or delete_rules: add_spec = [ self.access_rule_to_client_spec(r) for r in add_rules ] del_spec = [ self.access_rule_to_client_spec(r) for r in delete_rules ] _, can_del_spec = self.classify_nfs_client_spec( [], dirpath ) to_del_set = set(del_spec) can_del_set = set(can_del_spec) will_del_set = to_del_set.intersection(can_del_set) del_spec = list(will_del_set) else: access_spec = [ self.access_rule_to_client_spec(r) for r in access_rules ] add_spec, del_spec = self.classify_nfs_client_spec( access_spec, dirpath ) for spec in del_spec: self.ssh.rmnfsclient(dirpath, spec) for spec in add_spec: self.ssh.addnfsclient(dirpath, spec) def classify_cifs_rights(self, access_rights, share_name): cifsinfo = self.ssh.lscifsinfo(share_name) rights_set = set([ self.CIFS_CLIENT_RIGHT_PATTERN % i for i in cifsinfo ]) access_rights_set = set(access_rights) del_rights = rights_set.difference(access_rights_set) add_rights = access_rights_set.difference(rights_set) return list(add_rights), list(del_rights) def access_rule_to_rights(self, access_rule): if access_rule['access_type'] != 'user': msg = _('only user access type is supported' ' when using CIFS protocol') raise exception.ShareBackendException(msg=msg) rights = self.CIFS_CLIENT_RIGHT_PATTERN % { 'type': 'LU', 'name': access_rule['access_to'], 'rights': access_rule['access_level'] } return rights def update_cifs_access(self, share_name, access_rules, add_rules, delete_rules): """Update a CIFS share's access rule.""" if add_rules or delete_rules: add_rights = [ self.access_rule_to_rights(r) for r in add_rules ] del_rights = [ self.access_rule_to_rights(r) for r in delete_rules ] else: access_rights = [ self.access_rule_to_rights(r) for r in access_rules ] add_rights, del_rights = self.classify_cifs_rights( access_rights, share_name ) for rights in del_rights: self.ssh.rmcifsuser(share_name, rights) for rights in add_rights: self.ssh.addcifsuser(share_name, rights) @staticmethod def check_access_type(access_type, *rules): rule_chain = itertools.chain(*rules) if all([r['access_type'] == access_type for r in rule_chain]): return True else: return False def update_access(self, share_name, share_proto, access_rules, add_rules, delete_rules): if share_proto == 'CIFS': if self.check_access_type('user', access_rules, add_rules, delete_rules): self.update_cifs_access(share_name, access_rules, add_rules, delete_rules) else: msg = _("Only %s access type allowed.") % "user" raise exception.InvalidShareAccess(reason=msg) elif share_proto == 'NFS': if self.check_access_type('ip', access_rules, add_rules, delete_rules): self.update_nfs_access(share_name, access_rules, add_rules, delete_rules) else: msg = _("Only %s access type allowed.") % "ip" raise exception.InvalidShareAccess(reason=msg) else: msg = _('share protocol %s is not supported') % share_proto raise exception.ShareBackendException(msg=msg) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/lvm.py0000664000175000017500000006273200000000000020261 0ustar00zuulzuul00000000000000# Copyright 2012 NetApp # Copyright 2016 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ LVM Driver for shares. """ import ipaddress import math import os import re from oslo_concurrency import processutils from oslo_config import cfg from oslo_log import log from oslo_utils import importutils from oslo_utils import timeutils from manila import exception from manila.i18n import _ from manila.privsep import common as privsep_common from manila.privsep import filesystem as privsep_filesystem from manila.privsep import lvm as privsep_lvm from manila.privsep import os as privsep_os from manila.share import driver from manila.share.drivers import generic from manila.share import utils as share_utils from manila import utils LOG = log.getLogger(__name__) share_opts = [ cfg.StrOpt('lvm_share_export_root', default='$state_path/mnt', help='Base folder where exported shares are located.'), cfg.ListOpt('lvm_share_export_ips', help='List of IPs to export shares belonging to the LVM ' 'storage driver.'), cfg.IntOpt('lvm_share_mirrors', default=0, help='If set, create LVMs with multiple mirrors. Note that ' 'this requires lvm_mirrors + 2 PVs with available space.'), cfg.StrOpt('lvm_share_volume_group', default='lvm-shares', help='Name for the VG that will contain exported shares.'), cfg.ListOpt('lvm_share_helpers', default=[ 'CIFS=manila.share.drivers.helpers.CIFSHelperUserAccess', 'NFS=manila.share.drivers.helpers.NFSHelper', ], help='Specify list of share export helpers.'), ] CONF = cfg.CONF CONF.register_opts(share_opts) CONF.register_opts(generic.share_opts) class LVMMixin(driver.ExecuteMixin): def check_for_setup_error(self): """Returns an error if prerequisites aren't met.""" try: out, err = privsep_lvm.list_vgs_get_name() except processutils.ProcessExecutionError: msg = _("Failed to get LVM volume group names.") raise exception.ShareBackendException(msg=msg) volume_groups = out.split() if self.configuration.lvm_share_volume_group not in volume_groups: msg = (_("Share volume group %s doesn't exist.") % self.configuration.lvm_share_volume_group) raise exception.InvalidParameterValue(err=msg) if not self.configuration.lvm_share_export_ips: msg = _("The option lvm_share_export_ips must be specified.") raise exception.InvalidParameterValue(err=msg) def _allocate_container(self, share): sizestr = '%sG' % share['size'] mirrors = 0 region_size = 0 if self.configuration.lvm_share_mirrors: mirrors = self.configuration.lvm_share_mirrors terras = int(sizestr[:-1]) / 1024.0 if terras >= 1.5: rsize = int(2 ** math.ceil(math.log(terras) / math.log(2))) # NOTE(vish): Next power of two for region size. See: # http://red.ht/U2BPOD region_size = str(rsize) action_args = [ share['size'], share['name'], self.configuration.lvm_share_volume_group, mirrors, region_size ] privsep_common.execute_with_retries( privsep_lvm.lvcreate, action_args, self.configuration.num_shell_tries) device_name = self._get_local_path(share) try: privsep_filesystem.make_filesystem( self.configuration.share_volume_fstype, device_name) except processutils.ProcessExecutionError: raise def _get_mount_point_name(self, share): return share.get('mount_point_name') or share.get('name') def _extend_container(self, share, device_name, size): privsep_common.execute_with_retries( privsep_lvm.lvextend, [device_name, size], self.configuration.num_shell_tries) def _deallocate_container(self, share_name): """Deletes a logical volume for share.""" try: action_args = [ self.configuration.lvm_share_volume_group, share_name] privsep_common.execute_with_retries( privsep_lvm.lvremove, action_args, self.configuration.num_shell_tries) except exception.ProcessExecutionError as exc: err_pattern = re.compile(".*failed to find.*|.*not found.*", re.IGNORECASE) if not err_pattern.match(exc.stderr): LOG.exception("Error deleting volume") raise LOG.warning("Volume not found: %s", exc.stderr) def _create_snapshot(self, context, snapshot): """Creates a snapshot.""" orig_lv_name = "%s/%s" % (self.configuration.lvm_share_volume_group, snapshot['share_name']) action_args = [ snapshot['share']['size'], snapshot['name'], orig_lv_name] privsep_common.execute_with_retries( privsep_lvm.lv_snapshot_create, action_args, self.configuration.num_shell_tries) self._set_random_uuid_to_device(snapshot) def _set_random_uuid_to_device(self, share_or_snapshot): # NOTE(vponomaryov): 'tune2fs' is required to make # filesystem of share created from snapshot have # unique ID, in case of LVM volumes, by default, # it will have the same UUID as source volume. Closes #1645751 # NOTE(gouthamr): Executing tune2fs -U only works on # a recently checked filesystem. # See: https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=857336 device_path = self._get_local_path(share_or_snapshot) try: privsep_filesystem.e2fsck(device_path) privsep_filesystem.tune2fs(device_path) except processutils.ProcessExecutionError: msg = _("Failed to check or modify filesystems.") raise exception.ShareBackendException(msg=msg) def create_snapshot(self, context, snapshot, share_server=None): self._create_snapshot(context, snapshot) def delete_snapshot(self, context, snapshot, share_server=None): """Deletes a snapshot.""" self._deallocate_container(snapshot['name']) class LVMShareDriver(LVMMixin, driver.ShareDriver): """Executes commands relating to Shares.""" def __init__(self, *args, **kwargs): """Do initialization.""" super(LVMShareDriver, self).__init__([False], *args, **kwargs) self.configuration.append_config_values(share_opts) self.configuration.append_config_values(generic.share_opts) self.configuration.share_mount_path = ( self.configuration.lvm_share_export_root) self._helpers = None self.configured_ip_version = None self.backend_name = self.configuration.safe_get( 'share_backend_name') or 'LVM' # Set of parameters used for compatibility with # Generic driver's helpers. self.share_server = { 'instance_id': self.backend_name, 'lock_name': 'manila_lvm', } self.share_server['public_addresses'] = ( self.configuration.lvm_share_export_ips ) self.ipv6_implemented = True def _ssh_exec_as_root(self, server, command, check_exit_code=True): kwargs = {} if 'sudo' in command: kwargs['run_as_root'] = True command.remove('sudo') kwargs['check_exit_code'] = check_exit_code return self._execute(*command, **kwargs) def do_setup(self, context): """Any initialization the volume driver does while starting.""" super(LVMShareDriver, self).do_setup(context) self._setup_helpers() def _setup_helpers(self): """Initializes protocol-specific NAS drivers.""" self._helpers = {} for helper_str in self.configuration.lvm_share_helpers: share_proto, _, import_str = helper_str.partition('=') helper = importutils.import_class(import_str) # TODO(rushiagr): better way to handle configuration # instead of just passing to the helper self._helpers[share_proto.upper()] = helper( self._execute, self._ssh_exec_as_root, self.configuration) def _get_local_path(self, share): # The escape characters are expected by the device mapper. escaped_group = ( self.configuration.lvm_share_volume_group.replace('-', '--')) escaped_name = share['name'].replace('-', '--') return "/dev/mapper/%s-%s" % (escaped_group, escaped_name) def _update_share_stats(self): """Retrieve stats info from share volume group.""" data = { 'share_backend_name': self.backend_name, 'storage_protocol': 'NFS_CIFS', 'reserved_percentage': self.configuration.reserved_share_percentage, 'reserved_snapshot_percentage': (self.configuration.reserved_share_from_snapshot_percentage or self.configuration.reserved_share_percentage), 'reserved_share_extend_percentage': (self.configuration.reserved_share_extend_percentage or self.configuration.reserved_share_percentage), 'snapshot_support': True, 'create_share_from_snapshot_support': True, 'revert_to_snapshot_support': True, 'mount_snapshot_support': True, 'driver_name': 'LVMShareDriver', 'pools': self.get_share_server_pools(), } super(LVMShareDriver, self)._update_share_stats(data) def get_share_server_pools(self, share_server=None): try: out, err = privsep_lvm.get_vgs( self.configuration.lvm_share_volume_group) except processutils.ProcessExecutionError: msg = _("Failed to list LVM Volume Groups.") raise exception.ShareBackendException(msg=msg) total_size = re.findall(r"VSize\s[0-9.]+g", out)[0][6:-1] free_size = re.findall(r"VFree\s[0-9.]+g", out)[0][6:-1] return [{ 'pool_name': 'lvm-single-pool', 'total_capacity_gb': float(total_size), 'free_capacity_gb': float(free_size), 'reserved_percentage': 0, 'reserved_snapshot_percentage': 0, 'reserved_share_extend_percentage': 0, 'mount_point_name_support': True, }, ] def create_share(self, context, share, share_server=None): self._allocate_container(share) # create file system device_name = self._get_local_path(share) share_export_location = self._get_mount_point_name(share) location = self._get_helper(share).create_exports( self.share_server, share_export_location) self._mount_device(share, device_name) return location def create_share_from_snapshot(self, context, share, snapshot, share_server=None, parent_share=None): """Is called to create share from snapshot.""" self._allocate_container(share) snapshot_device_name = self._get_local_path(snapshot) share_device_name = self._get_local_path(share) self._set_random_uuid_to_device(share) self._copy_volume( snapshot_device_name, share_device_name, share['size']) share_export_location = self._get_mount_point_name(share) location = self._get_helper(share).create_exports( self.share_server, share_export_location) self._mount_device(share, share_device_name) return location def delete_share(self, context, share, share_server=None): self._unmount_device(share, raise_if_missing=False, retry_busy_device=True) self._delete_share(context, share) self._deallocate_container(share['name']) def _unmount_device(self, share_or_snapshot, raise_if_missing=True, retry_busy_device=False): """Unmount the filesystem of a share or snapshot LV.""" mount_path = self._get_mount_path(share_or_snapshot) if os.path.exists(mount_path): retries = 10 if retry_busy_device else 1 @utils.retry(retry_param=exception.ShareBusyException, retries=retries) def _unmount_device_with_retry(): try: privsep_os.umount(mount_path) except exception.ProcessExecutionError as exc: if 'is busy' in exc.stderr.lower(): raise exception.ShareBusyException( reason=share_or_snapshot['name']) elif 'not mounted' in exc.stderr.lower(): if raise_if_missing: LOG.error('Unable to find device: %s', exc) raise else: LOG.error('Unable to umount: %s', exc) raise _unmount_device_with_retry() # remove dir try: privsep_os.rmdir(mount_path) except exception.ProcessExecutionError: msg = _("Failed to remove the directory.") raise exception.ShareBackendException(msg=msg) def ensure_shares(self, context, shares): updates = {} for share in shares: updates[share['id']] = { 'export_locations': self.ensure_share(context, share)} return updates def ensure_share(self, ctx, share, share_server=None): """Ensure that storage are mounted and exported.""" device_name = self._get_local_path(share) self._mount_device(share, device_name) share_export_location = self._get_mount_point_name(share) return self._get_helper(share).create_exports( self.share_server, share_export_location, recreate=True ) def _delete_share(self, ctx, share): share_export_location = self._get_mount_point_name(share) """Delete a share.""" try: self._get_helper(share).remove_exports( self.share_server, share_export_location) except exception.ProcessExecutionError: LOG.warning("Can't remove share %r", share['id']) except exception.InvalidShare as exc: LOG.warning(exc) def update_access(self, context, share, access_rules, add_rules, delete_rules, update_rules, share_server=None): """Update access rules for given share. This driver has two different behaviors according to parameters: 1. Recovery after error - 'access_rules' contains all access_rules, 'add_rules' and 'delete_rules' shall be empty. Previously existing access rules are cleared and then added back according to 'access_rules'. 2. Adding/Deleting of several access rules - 'access_rules' contains all access_rules, 'add_rules' and 'delete_rules' contain rules which should be added/deleted. Rules in 'access_rules' are ignored and only rules from 'add_rules' and 'delete_rules' are applied. :param context: Current context :param share: Share model with share data. :param access_rules: All access rules for given share :param add_rules: Empty List or List of access rules which should be added. access_rules already contains these rules. :param delete_rules: Empty List or List of access rules which should be removed. access_rules doesn't contain these rules. :param update_rules: Empty List or List of access rules which should be updated. access_rules already contains these rules. :param share_server: None or Share server model """ share_export_location = self._get_mount_point_name(share) self._get_helper(share).update_access(self.share_server, share_export_location, access_rules, add_rules=add_rules, delete_rules=delete_rules) def _get_helper(self, share): if share['share_proto'].lower().startswith('nfs'): return self._helpers['NFS'] elif share['share_proto'].lower().startswith('cifs'): return self._helpers['CIFS'] else: raise exception.InvalidShare(reason='Wrong share protocol') def _mount_device(self, share_or_snapshot, device_name): """Mount LV for share or snapshot and ignore if already mounted.""" mount_path = self._get_mount_path(share_or_snapshot) self._execute('mkdir', '-p', mount_path) try: privsep_os.mount(device_name, mount_path) privsep_os.chmod('777', mount_path) except exception.ProcessExecutionError: out, err = privsep_os.list_mounts() if device_name in out: LOG.warning("%s is already mounted", device_name) else: raise return mount_path def _get_mount_path(self, share_or_snapshot): """Returns path where share or snapshot is mounted.""" return os.path.join(self.configuration.share_mount_path, share_or_snapshot['name']) def _copy_volume(self, srcstr, deststr, size_in_g): # Use O_DIRECT to avoid thrashing the system buffer cache # Check whether O_DIRECT is supported use_direct_io = ( privsep_os.is_data_definition_direct_io_supported(srcstr, deststr)) # Perform the copy try: privsep_os.data_definition( srcstr, deststr, (size_in_g * 1024), use_direct_io=use_direct_io) except exception.ProcessExecutionError: msg = _("Failed while copying from the snapshot to the share.") raise exception.ShareBackendException(msg=msg) def extend_share(self, share, new_size, share_server=None): device_name = self._get_local_path(share) self._extend_container(share, device_name, new_size) def revert_to_snapshot(self, context, snapshot, share_access_rules, snapshot_access_rules, share_server=None): share = snapshot['share'] snapshot_export_location = self._get_mount_point_name(snapshot) share_export_location = self._get_mount_point_name(share) # Temporarily remove all access rules self._get_helper(share).update_access(self.share_server, snapshot_export_location, [], [], []) self._get_helper(share).update_access(self.share_server, share_export_location, [], [], []) # Unmount the snapshot filesystem self._unmount_device(snapshot) # Unmount the share filesystem self._unmount_device(share) # Merge the snapshot LV back into the share, reverting it try: privsep_lvm.lvconvert(self.configuration.lvm_share_volume_group, snapshot['name']) except exception.ProcessExecutionError: msg = _('Failed to revert the share to the given snapshot.') raise exception.ShareBackendException(msg=msg) # Now recreate the snapshot that was destroyed by the merge self._create_snapshot(context, snapshot) # At this point we can mount the share again device_name = self._get_local_path(share) self._mount_device(share, device_name) # Also remount the snapshot device_name = self._get_local_path(snapshot) self._mount_device(snapshot, device_name) share_export_location = self._get_mount_point_name(share) snapshot_export_location = self._get_mount_point_name(share) # Lastly we add all the access rules back self._get_helper(share).update_access(self.share_server, share_export_location, share_access_rules, [], []) snapshot_access_rules, __, __ = share_utils.change_rules_to_readonly( snapshot_access_rules, [], []) self._get_helper(share).update_access(self.share_server, snapshot_export_location, snapshot_access_rules, [], []) def create_snapshot(self, context, snapshot, share_server=None): self._create_snapshot(context, snapshot) device_name = self._get_local_path(snapshot) self._mount_device(snapshot, device_name) helper = self._get_helper(snapshot['share']) exports = helper.create_exports(self.share_server, snapshot['name']) return {'export_locations': exports} def delete_snapshot(self, context, snapshot, share_server=None): self._unmount_device(snapshot, raise_if_missing=False) super(LVMShareDriver, self).delete_snapshot(context, snapshot, share_server) def get_configured_ip_versions(self): if self.configured_ip_version is None: try: self.configured_ip_version = [] for ip in self.configuration.lvm_share_export_ips: self.configured_ip_version.append( ipaddress.ip_address(str(ip)).version) except Exception: message = (_("Invalid 'lvm_share_export_ips' option supplied " "%s.") % self.configuration.lvm_share_export_ips) raise exception.InvalidInput(reason=message) return self.configured_ip_version def snapshot_update_access(self, context, snapshot, access_rules, add_rules, delete_rules, share_server=None): """Update access rules for given snapshot. This driver has two different behaviors according to parameters: 1. Recovery after error - 'access_rules' contains all access_rules, 'add_rules' and 'delete_rules' shall be empty. Previously existing access rules are cleared and then added back according to 'access_rules'. 2. Adding/Deleting of several access rules - 'access_rules' contains all access_rules, 'add_rules' and 'delete_rules' contain rules which should be added/deleted. Rules in 'access_rules' are ignored and only rules from 'add_rules' and 'delete_rules' are applied. :param context: Current context :param snapshot: Snapshot model with snapshot data. :param access_rules: All access rules for given snapshot :param add_rules: Empty List or List of access rules which should be added. access_rules already contains these rules. :param delete_rules: Empty List or List of access rules which should be removed. access_rules doesn't contain these rules. :param share_server: None or Share server model """ helper = self._get_helper(snapshot['share']) access_rules, add_rules, delete_rules = ( share_utils.change_rules_to_readonly( access_rules, add_rules, delete_rules) ) helper.update_access(self.share_server, snapshot['name'], access_rules, add_rules=add_rules, delete_rules=delete_rules) def update_share_usage_size(self, context, shares): updated_shares = [] out, err = self._execute( 'df', '-l', '--output=target,used', '--block-size=g') gathered_at = timeutils.utcnow() for share in shares: try: mount_path = self._get_mount_path(share) if os.path.exists(mount_path): used_size = (re.findall( mount_path + r"\s*[0-9.]+G", out)[0]. split(' ')[-1][:-1]) updated_shares.append({'id': share['id'], 'used_size': used_size, 'gathered_at': gathered_at}) else: raise exception.NotFound( _("Share mount path %s could not be " "found.") % mount_path) except Exception: LOG.exception("Failed to gather 'used_size' for share %s.", share['id']) return updated_shares def get_backend_info(self, context): return { 'export_ips': ','.join(self.share_server['public_addresses']), 'db_version': share_utils.get_recent_db_migration_id(), } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315601.9216716 manila-21.0.0/manila/share/drivers/macrosan/0000775000175000017500000000000000000000000020702 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/macrosan/__init__.py0000664000175000017500000000000000000000000023001 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/macrosan/macrosan_constants.py0000664000175000017500000000220400000000000025151 0ustar00zuulzuul00000000000000# Copyright (c) 2022 MacroSAN Technologies Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. NFS_NON_CONFIG = 0 NFS_ENABLED = 1 NFS_DISABLED = 2 NFS_EXCEPTION = 3 NFS_NON_SUPPORTED = 0 NFS_SUPPORTED = 1 CIFS_SHARE_MODE = '2' CIFS_ENABLED = '1' CIFS_NON_CONFIG = '-1' CIFS_DISABLED = '-2' CIFS_EXCEPTION = '-3' USER_NOT_EXIST = '0' USER_EXIST = '1' USER_FORMAT_ERROR = '2' GROUP_NOT_EXIST = '0' GROUP_EXIST = '1' GROUP_FORMAT_ERROR = '2' CODE_SUCCESS = 0 CODE_NOT_FOUND = 4 CODE_SOURCE_NOT_EXIST = 403 TOKEN_EXPIRED = 301 TOKEN_VERIFY_FAILED = 302 TOKEN_FORMAT_ERROR = 303 TOKEN_REQUIRED = 304 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/macrosan/macrosan_helper.py0000664000175000017500000005560300000000000024427 0ustar00zuulzuul00000000000000# Copyright (c) 2022 MacroSAN Technologies Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import re from oslo_config import cfg from oslo_log import log from oslo_utils import units from manila import exception from manila.i18n import _ from manila.share.drivers.macrosan import macrosan_constants as constants from manila.share.drivers.macrosan import rest_helper from manila.share import utils as share_utils CONF = cfg.CONF LOG = log.getLogger(__name__) class MacrosanHelper(object): def __init__(self, configuration): self.configuration = configuration self.rest = rest_helper.RestHelper(self.configuration) self.snapshot_support = False self.replication_support = False self.pools = self.configuration.macrosan_share_pools def check_share_service(self): nfs_service = self.rest._get_nfs_service_status() if nfs_service['serviceStatus'] not in [constants.NFS_NON_CONFIG, constants.NFS_ENABLED, constants.NFS_DISABLED]: raise exception.MacrosanBackendExeption( reason=_("nfs service exception. Please check backend")) elif nfs_service['serviceStatus'] == constants.NFS_NON_CONFIG: self.rest._config_nfs_service() self.rest._start_nfs_service() elif nfs_service['serviceStatus'] == constants.NFS_DISABLED: if (nfs_service['nfs3Status'] == constants.NFS_NON_SUPPORTED and nfs_service['nfs4Status'] == constants.NFS_NON_SUPPORTED): self.rest._config_nfs_service() self.rest._start_nfs_service() else: if (nfs_service['nfs3Status'] == constants.NFS_NON_SUPPORTED and nfs_service['nfs4Status'] == constants.NFS_NON_SUPPORTED): self.rest._config_nfs_service() cifs_status = self.rest._get_cifs_service_status() if cifs_status == constants.CIFS_EXCEPTION: raise exception.MacrosanBackendExeption( reason=_("cifs service exception. Please check backend")) elif cifs_status == constants.CIFS_NON_CONFIG: """need config first, then start service""" self.rest._config_cifs_service() self.rest._start_cifs_service() elif cifs_status == constants.CIFS_DISABLED: self.rest._start_cifs_service() status = self.rest._get_cifs_service_status() if status == constants.CIFS_SHARE_MODE: self.rest._config_cifs_service() elif cifs_status == constants.CIFS_SHARE_MODE: self.rest._config_cifs_service() def do_setup(self): """get token""" self.rest.login() def create_share(self, share, share_server=None): """Create a share""" pool_name, share_name, proto = self._get_share_instance_pnp(share) share_size = ''.join((str(share['size']), 'GB')) # first create filesystem self.rest._create_filesystem(fs_name=share_name, pool_name=pool_name, filesystem_quota=share_size) share_path = self._generate_share_path(share_name) # second create filesystem dir self.rest._create_filesystem_dir(share_path) # third create nfs or cifs share if proto == 'NFS': self.rest._create_nfs_share(share_path=share_path) else: user_name = 'manilanobody' user_passwd = 'manilanobody' group_name = 'manilanobody' ret = self._ensure_user(user_name, user_passwd, group_name) if not ret: self.rest._delete_filesystem(share_name) raise exception.MacrosanBackendExeption( reason=(_( 'Failed to create share %(share)s. Reason: ' 'username %(user_name)s error.') % {'share': share_name, 'user_name': user_name})) rw_list = [user_name] rw_list_type = ['0'] self.rest._create_cifs_share(share_name=share_name, share_path=share_path, rw_list=rw_list, rw_list_type=rw_list_type) location = self._get_location_path(share_path, share_name, proto) return location def delete_share(self, share, share_server=None): """Delete a share.""" pool, share_name, proto = self._get_share_instance_pnp(share) share_path = self._generate_share_path(share_name) backend_share = self._get_share(share_path, proto) if not backend_share: LOG.error(f'Share {share_name} not found.') filesystem = self.rest._get_filesystem(share_name) if filesystem: self.rest._delete_filesystem(share_name) else: if proto == 'NFS': self.rest._delete_nfs_share(share_path) else: self.rest._delete_cifs_share(share_name, share_path) self.rest._delete_filesystem(share_name) def extend_share(self, share, new_size, share_server=None): """Extend share""" pool, share_name, proto = self._get_share_instance_pnp(share) share_path = self._generate_share_path(share_name) backend_share = self._get_share(share_path, proto) if not backend_share: msg = f"Can't find the share by share name: {share_name}." msg = _(msg) LOG.error(msg) raise exception.ShareResourceNotFound(share_id=share['id']) # storage size logic already in manila/share/api.py extend func # size param need unit new_size = ''.join((str(new_size), 'GB')) self.rest._update_share_size(share_name, new_size) def shrink_share(self, share, new_size, share_server=None): """Shrink share""" pool, share_name, proto = self._get_share_instance_pnp(share) share_path = self._generate_share_path(share_name) backend_share = self._get_share(share_path, proto) if not backend_share: msg = f"Can't find the share by share name: {share_name}." msg = _(msg) LOG.error(msg) raise exception.ShareResourceNotFound(share_id=share['id']) filesystem_info = self.rest._get_filesystem(share_name) used_size = self._unit_convert_toGB(filesystem_info['usedCapacity']) if new_size <= used_size: raise exception.ShareShrinkingPossibleDataLoss( share_id=share['id']) # storage size logic already in manila/share/api.py shrink func new_size = ''.join((str(new_size), 'GB')) self.rest._update_share_size(share_name, new_size) def ensure_share(self, share, share_server=None): """Enusre that share is exported""" pool, share_name, proto = self._get_share_instance_pnp(share) share_path = self._generate_share_path(share_name) backend_share = self._get_share(share_path, proto) if not backend_share: raise exception.ShareResourceNotFound(share_id=share['id']) location = self._get_location_path(share_path, share_name, proto) return [location] def _allow_access(self, share, access, share_server=None): """Allow access to the share.""" pool, share_name, proto = self._get_share_instance_pnp(share) share_path = self._generate_share_path(share_name) access_level = access['access_level'] share_id = share['id'] if access_level not in ('rw', 'ro'): raise exception.InvalidShareAccess( reason=(_('Unsupported access level: %s.') % access_level)) if proto == 'NFS': self._allow_nfs_access(share_path, share_name, access, share_id) elif proto == 'CIFS': self._allow_cifs_access(share_path, share_name, access, share_id) def _allow_nfs_access(self, share_path, share_name, access, share_id): """Allow nfs access.""" access_type = access['access_type'] access_to = access['access_to'] access_level = access['access_level'] # Only use 'ip', # input "*" replace all, or ip 172.0.1.11 , # or ip network segment 172.0.1.11/255.255.0.0 172.0.1.11/16 if access_type != 'ip': message = (_('NFS shares only allow IP access types. ' 'access_type: %(access_type)s') % {'access_type': access_type}) raise exception.InvalidShareAccess(reason=message) backend_share = self.rest._get_nfs_share(share_path) if not backend_share: msg = (_("Can't find the share by share name: %s.") % share_name) LOG.error(msg) raise exception.ShareResourceNotFound(share_id=share_id) if access_to == '0.0.0.0/0': access_to = '*' share_client = self.rest._get_access_from_nfs_share(share_path, access_to) if share_client: if access_level != share_client['accessRight']: self.rest._change_nfs_access_rest(share_path, access_to, access_level) else: self.rest._allow_nfs_access_rest(share_path, access_to, access_level) def _allow_cifs_access(self, share_path, share_name, access, share_id): """Allow cifs access.""" access_type = access['access_type'] access_to = access['access_to'] access_level = access['access_level'] if access_type != 'user': message = _('Only user access type is ' 'allowed for CIFS shares.') raise exception.InvalidShareAccess(reason=message) backend_share = self.rest._get_cifs_share(share_path) if not backend_share: msg = (_("Can't find the share by share name: %s.") % share_name) LOG.error(msg) raise exception.ShareResourceNotFound(share_id=share_id) share_client = self.rest._get_access_from_cifs_share(share_path, access_to) if share_client: if access_level != share_client['accessRight']: self.rest._change_cifs_access_rest(share_path, access_to, access_level, share_client['ugType']) else: self.rest._allow_cifs_access_rest(share_path, access_to, access_level) def _deny_access(self, share, access, share_server=None): """Deny access to the share.""" pool, share_name, proto = self._get_share_instance_pnp(share) share_path = self._generate_share_path(share_name) if proto == 'NFS': self._deny_nfs_access(share_path, share_name, access) else: self._deny_cifs_access(share_path, share_name, access) def _deny_nfs_access(self, share_path, share_name, access): """Deny nfs access.""" access_type = access['access_type'] access_to = access['access_to'] if access_type != 'ip': LOG.error('Only IP access types are allowed ' 'for NFS shares.') return if access_to == '0.0.0.0/0': access_to = '*' share_client = self.rest._get_access_from_nfs_share(share_path, access_to) if not share_client: LOG.error(f'Could not list the share access for share ' f'{share_name}') return self.rest._delete_nfs_access_rest(share_path, access_to) def _deny_cifs_access(self, share_path, share_name, access): """Deny cifs access.""" access_type = access['access_type'] access_to = access['access_to'] if access_type != 'user': LOG.error('Only USER access types are allowed ' 'for CIFS shares.') return share_client = self.rest._get_access_from_cifs_share(share_path, access_to) if not share_client: LOG.error(f'Could not list the share access for share ' f'{share_name}') return self.rest._delete_cifs_access_rest(share_path, share_client['ugName'], share_client['ugType']) def _clear_access(self, share, share_server=None): """Remove all access rules of the share""" pool, share_name, proto = self._get_share_instance_pnp(share) share_path = self._generate_share_path(share_name) access_list = self._get_all_access_from_share(share_path, proto) if not access_list: LOG.error(f'Could not list the share access for share ' f'{share_name}') return if proto == 'NFS': for share_access in access_list: # IPv4 Address Blocks Reserved for Documentation if share_access['access_to'] == '192.0.2.0': continue self.rest._delete_nfs_access_rest(share_path, share_access['access_to']) elif proto == 'CIFS': for share_access in access_list: if (share_access['access_to'] == 'manilanobody' and share_access['ugType'] == '0'): continue self.rest._delete_cifs_access_rest(share_path, share_access['access_to'], share_access['ugType']) def update_access(self, share, access_rules, add_rules, delete_rules, share_server=None): """Update access rules list.""" access_updates = {} if not (add_rules or delete_rules): self._clear_access(share, share_server) for access_rule in access_rules: try: self._allow_access(share, access_rule, share_server) except exception.InvalidShareAccess as e: msg = f'Failed to allow {access_rule["access_level"]} ' \ f'access to {access_rule["access_to"]}, reason {e}' msg = _(msg) LOG.error(msg) access_updates.update( {access_rule['access_id']: {'state': 'error'}}) else: for access_rule in delete_rules: self._deny_access(share, access_rule, share_server) for access_rule in add_rules: try: self._allow_access(share, access_rule, share_server) except exception.InvalidShareAccess as e: msg = f'Failed to allow {access_rule["access_level"]} ' \ f'access to {access_rule["access_to"]}, reason {e}' msg = _(msg) LOG.error(msg) access_updates.update( {access_rule['access_id']: {'state': 'error'}}) return access_updates def _get_all_access_from_share(self, share_path, share_proto): access_list = [] if share_proto == 'NFS': access_list = self.rest._get_all_nfs_access_rest(share_path) elif share_proto == 'CIFS': access_list = self.rest._get_all_cifs_access_rest(share_path) return access_list def _ensure_user(self, user_name, user_passwd, group_name): ret_user = self.rest._query_user(user_name) if ret_user == constants.USER_NOT_EXIST: ret_group = self.rest._query_group(group_name) if ret_group not in [constants.GROUP_NOT_EXIST, constants.GROUP_EXIST]: msg = f'Failed to use group {group_name}' msg = _(msg) raise exception.InvalidInput(reason=msg) elif ret_group == constants.GROUP_NOT_EXIST: self.rest._add_localgroup(group_name) self.rest._add_localuser(user_name, user_passwd, group_name) return True elif ret_user == constants.USER_EXIST: return True else: return False def update_share_stats(self, dict_data): """Update pools info""" result = self.rest._get_all_pool() dict_data["pools"] = [] for pool_name in self.pools: pool_capacity = self._get_pool_capacity(pool_name, result) if pool_capacity: pool = { 'pool_name': pool_name, 'total_capacity_gb': pool_capacity['totalcapacity'], 'free_capacity_gb': pool_capacity['freecapacity'], 'allocated_capacity_gb': pool_capacity['allocatedcapacity'], 'reserved_percentage': self.configuration.reserved_share_percentage, 'reserved_snapshot_percentage': self.configuration .reserved_share_from_snapshot_percentage or self.configuration.reserved_share_percentage, 'reserved_share_extend_percentage': self.configuration.reserved_share_extend_percentage or self.configuration.reserved_share_percentage, 'dedupe': False, 'compression': False, 'qos': False, 'thin_provisioning': False, 'snapshot_support': self.snapshot_support, 'create_share_from_snapshot_support': self.snapshot_support, } dict_data["pools"].append(pool) if not dict_data['pools']: msg = _("StoragePool is None") LOG.error(msg) raise exception.InvalidInput(reason=msg) def _get_pool_capacity(self, pool_name, result): """Get total,allocated,free capacity of the pools""" pool_info = self._find_pool_info(pool_name, result) if pool_info: total_capacity = int(self._unit_convert_toGB( pool_info['totalcapacity'])) free_capacity = int(self._unit_convert_toGB( pool_info['freecapacity'])) allocated_capacity = int(self._unit_convert_toGB( pool_info['allocatedcapacity'])) pool_info['totalcapacity'] = total_capacity pool_info['freecapacity'] = free_capacity pool_info['allocatedcapacity'] = allocated_capacity return pool_info def _unit_convert_toGB(self, capacity): """Convert unit to GB""" capacity = capacity.upper() try: # get unit char array and use join connect to string unit = re.findall(r'[A-Z]', capacity) unit = ''.join(unit) except BaseException: unit = '' # get capacity size,unit is GB capacity = capacity.replace(unit, '') capacity = float(capacity) if unit in ['B', '']: capacity = capacity / units.Gi elif unit in ['K', 'KB']: capacity = capacity / units.Mi elif unit in ['M', 'MB']: capacity = capacity / units.Ki elif unit in ['G', 'GB']: capacity = capacity elif unit in ['T', 'TB']: capacity = capacity * units.Ki elif unit in ['E', 'EB']: capacity = capacity * units.Mi capacity = '%.0f' % capacity return float(capacity) def _generate_share_name(self, share): share_name = 'manila_%s' % share['id'] return self._format_name(share_name) def _format_name(self, name): """format name to meet the backend requirements""" name = name[0: 31] name = name.replace('-', '_') return name def _generate_share_path(self, share_name): """add '/' as path""" share_path = r'/%(path)s/%(dirName)s' % { 'path': share_name.replace("-", "_"), 'dirName': share_name.replace("-", "_") } return share_path def _get_location_path(self, share_path, share_name, share_proto, ip=None): location = None if ip is None: ip = self.configuration.macrosan_nas_ip share_proto = share_proto.upper() if share_proto == 'NFS': location = f'{ip}:{share_path}' elif share_proto == 'CIFS': location = f'\\\\{ip}\\{share_name}' return location def _get_share_instance_pnp(self, share_instance): proto = share_instance['share_proto'].upper() share_name = self._generate_share_name(share_instance) pool = share_utils.extract_host(share_instance['host'], level='pool') if not pool: msg = _("Pool doesn't exist in host field.") raise exception.InvalidHost(reason=msg) if proto != 'NFS' and proto != 'CIFS': msg = f'Share protocol {proto} is not supported.' msg = _(msg) raise exception.MacrosanBackendExeption(reason=msg) return pool, share_name, proto def _get_share(self, share_path, proto): return (self.rest._get_nfs_share(share_path) if proto == 'NFS' else self.rest._get_cifs_share(share_path)) def _find_pool_info(self, pool_name, result): if pool_name is None: return pool_info = {} pool_name = pool_name.strip() for item in result.get('data', []): if pool_name == item['name']: pool_info['name'] = item['name'] pool_info['totalcapacity'] = item['size'] pool_info['allocatedcapacity'] = item['allocated'] pool_info['freecapacity'] = item['free'] pool_info['health'] = item['health'] pool_info['rw'] = item['rwStatus'] break return pool_info ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/macrosan/macrosan_nas.py0000664000175000017500000001561400000000000023727 0ustar00zuulzuul00000000000000# Copyright (c) 2022 MacroSAN Technologies Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Share driver for Macrosan Storage Array. """ import functools from oslo_config import cfg from oslo_log import log from manila.share import driver from manila.share.drivers.macrosan import macrosan_helper macrosan_opts = [ cfg.HostAddressOpt('macrosan_nas_ip', required=True, help='IP address for the Macrosan NAS server.'), cfg.PortOpt('macrosan_nas_port', default=8443, help='Port number for the Macrosan NAS server.'), cfg.StrOpt('macrosan_nas_username', default='manila', help='Username for the Macrosan NAS server.'), cfg.StrOpt('macrosan_nas_password', secret=True, help='Password for the Macrosan NAS server.'), cfg.StrOpt('macrosan_nas_http_protocol', default='https', choices=['http', 'https'], help='Http protocol for the Macrosan NAS server.'), cfg.BoolOpt('macrosan_ssl_cert_verify', default=False, help='Defines whether the driver should check ssl cert.'), cfg.StrOpt('macrosan_nas_prefix', default='nas', help='Url prefix for the Macrosan NAS server.'), cfg.ListOpt('macrosan_share_pools', required=True, help='Comma separated list of Macrosan NAS pools.'), cfg.IntOpt('macrosan_timeout', default=60, help='request timeout in seconds.') ] CONF = cfg.CONF CONF.register_opts(macrosan_opts) LOG = log.getLogger(__name__) def debug_trace(func): """Log the dirver invoke method start and leave information Used in the MacrosanNasDriver class methods. Ensure func have 'self' argument. """ @functools.wraps(func) def wrapper(*args, **kwargs): driver = args[0] method_name = "%(class_name)s.%(method)s" \ % {"class_name": driver.__class__.__name__, "method": func.__name__} backend_name = driver.configuration.share_backend_name LOG.debug("[%(backend_name)s]:Start %(method_name)s", {"backend_name": backend_name, "method_name": method_name}) result = func(*args, **kwargs) LOG.debug("[%(backend_name)s]:Leave %(method_name)s", {"backend_name": backend_name, "method_name": method_name}) return result return wrapper class MacrosanNasDriver(driver.ShareDriver): """Macrosan Share Driver Driver version history: V1.0.0: Initial version Driver support: share create/delete, extend size, shrink size, update_access. protocol: NFS/CIFS """ VENDOR = 'Macrosan' VERSION = '1.0.0' PROTOCOL = 'NFS_CIFS' def __init__(self, *args, **kwargs): super(MacrosanNasDriver, self).__init__(False, *args, **kwargs) self.configuration.append_config_values(macrosan_opts) self.helper = macrosan_helper.MacrosanHelper(self.configuration) @debug_trace def do_setup(self, context): """initialization the driver when start""" self.helper.do_setup() @debug_trace def check_for_setup_error(self): """Check prerequisites""" self.helper.check_share_service() @debug_trace def create_share(self, context, share, share_server=None): """Create a share""" return self.helper.create_share(share, share_server) @debug_trace def delete_share(self, context, share, share_server=None): """Delete a share.""" self.helper.delete_share(share, share_server) @debug_trace def extend_share(self, share, new_size, share_server=None): """Extend share capacity""" self.helper.extend_share(share, new_size, share_server) @debug_trace def shrink_share(self, share, new_size, share_server=None): """Shrink share capacity""" self.helper.shrink_share(share, new_size, share_server) @debug_trace def ensure_share(self, context, share, share_server=None): """Enusre that share is exported.""" return self.helper.ensure_share(share, share_server) @debug_trace def update_access(self, context, share, access_rules, add_rules, delete_rules, update_rules, share_server=None): """Update access rules list. :param context: Current context :param share: Share model with share data. :param access_rules: All access rules for given share :param add_rules: Empty List or List of access rules which should be added. access_rules already contains these rules. :param delete_rules: Empty List or List of access rules which should be removed. access_rules doesn't contain these rules. :param update_rules: Empty List or List of access rules which should be updated. access_rules already contains these rules. :param share_server: Not used by this driver. :returns: None, or a dictionary of ``access_id``, ``access_key`` as key: value pairs for the rules added, where, ``access_id`` is the UUID (string) of the access rule, and ``access_key`` is the credential (string) of the entity granted access. During recovery after error, the returned dictionary must contain ``access_id``, ``access_key`` for all the rules that the driver is ordered to resync, i.e. rules in the ``access_rules`` parameter. """ return self.helper.update_access(share, access_rules, add_rules, delete_rules, share_server) @debug_trace def _update_share_stats(self): """Update backend status ,include driver and pools""" data = { 'vendor_name': self.VENDOR, 'driver_version': self.VERSION, 'storage_protocol': self.PROTOCOL, 'share_backend_name': self.configuration.safe_get('share_backend_name'), } self.helper.update_share_stats(data) super(MacrosanNasDriver, self)._update_share_stats(data) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/macrosan/rest_helper.py0000664000175000017500000005714300000000000023602 0ustar00zuulzuul00000000000000# Copyright (c) 2022 MacroSAN Technologies Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import requests from oslo_log import log from manila import exception from manila.i18n import _ from manila.share.drivers.macrosan import macrosan_constants as constants from manila import utils LOG = log.getLogger(__name__) class RestHelper(object): def __init__(self, configuration): self.configuration = configuration self._protocol = self.configuration.macrosan_nas_http_protocol self._ip = self.configuration.macrosan_nas_ip self._port = self.configuration.macrosan_nas_port self._prefix = self.configuration.macrosan_nas_prefix self._token = None self._username = self.configuration.macrosan_nas_username self._password = self.configuration.macrosan_nas_password self.request_timeout = self.configuration.macrosan_timeout self.ssl_verify = self.configuration.macrosan_ssl_cert_verify if not self.ssl_verify: # Suppress the Insecure request warnings requests.packages.urllib3.disable_warnings( requests.packages.urllib3.exceptions.InsecureRequestWarning) @utils.synchronized('macrosan_manila') def call(self, url, data, method): """Send requests. If token is expired,re-login. """ header = {'Authorization': self._token} if self._token is None: self.login() result = self.do_request(url, data, method, header) if result['code'] == constants.TOKEN_EXPIRED: LOG.error("Token is expired, re-login.") self.login() # token refresh, Re-assign header['Authorization'] = self._token result = self.do_request(url, data, method, header) elif (result['code'] == constants.TOKEN_FORMAT_ERROR or result['code'] == constants.TOKEN_VERIFY_FAILED or result['code'] == constants.TOKEN_REQUIRED): msg = _('Token authentication error.') LOG.error(msg) raise exception.MacrosanBackendExeption(msg) return result def do_request(self, url, data, method, header=None): final_url = (f'{self._protocol}://{self._ip}:{self._port}/' f'{self._prefix}/{url}') LOG.debug(f'Request URL: {final_url}, Method: {method}, Data: {data}') if method == 'POST': res = requests.post(final_url, data=data, headers=header, timeout=self.request_timeout, verify=self.ssl_verify) elif method == 'GET': res = requests.get(final_url, data=data, headers=header, timeout=self.request_timeout, verify=self.ssl_verify) elif method == 'PUT': res = requests.put(final_url, data=data, headers=header, timeout=self.request_timeout, verify=self.ssl_verify) elif method == 'DELETE': res = requests.delete(final_url, data=data, headers=header, timeout=self.request_timeout, verify=self.ssl_verify) else: msg = (_("Request method %s invalid.") % method) raise exception.ShareBackendException(msg=msg) code = res.status_code if code != 200: msg = (_('Code: %(code)s, URL: %(url)s, Message: %(msg)s') % {'code': res.status_code, 'url': final_url, 'msg': res.text}) LOG.error(msg) raise exception.NetworkException(msg) response = res.json() LOG.debug('CODE: %(code)s, RESPONSE: %(response)s', {'code': code, 'response': response}) return response def login(self): """Login array and return token.""" url = 'rest/token' data = {'userName': self._username, 'userPasswd': self._password} result = self.do_request(url, data, 'POST') if result['code'] != 0: msg = f"Login failed. code: {result['code']}" msg = _(msg) LOG.error(msg) raise exception.ShareBackendException(msg=msg) LOG.debug(f'Login successful. URL {self._ip}\n') self._token = result['data'] def _assert_result_code(self, result, msg): if (result['code'] != constants.CODE_SUCCESS and result['code'] != constants.CODE_NOT_FOUND): error_msg = (_('%(err)s\nresult: %(res)s.') % {'err': msg, 'res': result}) LOG.error(error_msg) raise exception.ShareBackendException(msg=error_msg) def _assert_result_data(self, result, msg): if "data" not in result: error_msg = (_('Error:"data" not in result. %s') % msg) LOG.error(error_msg) raise exception.ShareBackendException(msg=error_msg) def _create_nfs_share(self, share_path): url = 'rest/nfsShare' # IPv4 Address Blocks Reserved for Documentation params = { 'path': share_path, 'authority': 'ro', 'accessClient': '192.0.2.0', } result = self.call(url, params, 'POST') msg = 'Failed to create a nfs share.' self._assert_result_code(result, msg) def _get_nfs_share(self, share_path): # GET method: param need be after url url = f'rest/nfsShare?path={share_path}' result = self.call(url, None, 'GET') msg = 'Failed to get nfs share.' self._assert_result_code(result, msg) return result['data'] def _delete_nfs_share(self, share_path): url = f'rest/nfsShare?path={share_path}' result = self.call(url, None, 'DELETE') msg = 'Failed to delete nfs share.' self._assert_result_code(result, msg) def _create_cifs_share(self, share_name, share_path, rw_list, rw_list_type): url = 'rest/cifsShare' params = { 'path': share_path, 'cifsName': share_name, 'cifsDescription': '', 'RoList': [], 'RoListType': [], 'RwList': rw_list, 'RwListType': rw_list_type, 'allowList': [], 'denyList': [], } result = self.call(url, params, 'POST') msg = 'Failed to create a CIFS share.' self._assert_result_code(result, msg) def _get_cifs_share(self, share_path): url = f'rest/cifsShare?path={share_path}' result = self.call(url, None, 'GET') msg = 'Failed to get the cifs share.' self._assert_result_code(result, msg) return result['data'] def _delete_cifs_share(self, share_name, share_path): url = f'rest/cifsShare?path={share_path}&cifsName={share_name}' result = self.call(url, None, 'DELETE') msg = 'Failed to delete the cifs share.' self._assert_result_code(result, msg) def _update_share_size(self, fs_name, new_size): url = f'rest/filesystem/{fs_name}' params = { 'capacity': new_size, } result = self.call(url, params, 'PUT') msg = 'Failed to update the filesystem size.' self._assert_result_code(result, msg) def _create_filesystem(self, fs_name, pool_name, filesystem_quota): url = 'rest/filesystem' fsinfo = { 'fsName': fs_name, 'poolName': pool_name, 'createType': '0', 'fileSystemQuota': filesystem_quota, 'fileSystemReserve': filesystem_quota, 'wormStatus': 0, 'defaultTimeStatus': 0, 'defaultTimeNum': 0, 'defaultTimeUnit': 'year', 'isAutoLock': 0, 'isAutoDelete': 0, 'lockTime': 0 } result = self.call(url, fsinfo, 'POST') msg = 'Failed to create the filesystem.' self._assert_result_code(result, msg) def _delete_filesystem(self, fs_name): """Delete filesystem""" url = f'rest/filesystem/{fs_name}' result = self.call(url, None, 'DELETE') msg = 'Failed to delete the filesystem.' self._assert_result_code(result, msg) def _get_filesystem(self, fs_name): """Get filesystem """ url = f'rest/filesystem/{fs_name}' result = self.call(url, None, 'GET') msg = 'Failed to get the filesystem.' self._assert_result_code(result, msg) return result['data'] def _create_filesystem_dir(self, share_path): url = 'rest/fileDir' slash = share_path.index(r'/', 1) dir_info = { 'path': share_path[0: slash], 'dirName': share_path[slash + 1:], } result = self.call(url, dir_info, 'POST') msg = 'Failed to create the filesystem directory.' self._assert_result_code(result, msg) def _delete_filesystem_dir(self, share_path): slash = share_path.index(r'/', 1) url = f'rest/fileDir?path={share_path[0: slash]}' \ f'&dirName={share_path[slash + 1:]}' result = self.call(url, None, 'DELETE') msg = 'Failed to delete the filesystem directory.' self._assert_result_code(result, msg) def _allow_access_rest(self, share_path, access_to, access_level, share_proto): """Allow access to the share.""" if share_proto == 'NFS': self._allow_nfs_access_rest(share_path, access_to, access_level) elif share_proto == 'CIFS': self._allow_cifs_access_rest(share_path, access_to, access_level) else: raise exception.InvalidInput( reason=(_('Invalid Nas protocol: %s.') % share_proto)) def _allow_nfs_access_rest(self, share_path, access_to, access_level): url = 'rest/nfsShareClient' access = { 'path': share_path, 'client': access_to, 'authority': access_level, } result = self.call(url, access, 'POST') msg = 'Failed to allow access to the NFS share.' self._assert_result_code(result, msg) def _allow_cifs_access_rest(self, share_path, access_to, access_level): url = 'rest/cifsShareClient' ug_type = { 'localUser': '0', 'localGroup': '1', 'adUser': '2', 'adGroup': '3', } msg = 'Failed to allow access to the CIFS share.' access_info = (f'Access info (access_to: {access_to},' f'access_level: {access_level},' f'path: {share_path}.)') def send_rest(rest_access_to, rest_ug_type): access = { 'path': share_path, 'right': access_level, 'ugName': rest_access_to, 'ugType': rest_ug_type, } result = self.call(url, access, 'POST') err_code = result['code'] if err_code == constants.CODE_SUCCESS: return True elif err_code != constants.CODE_SOURCE_NOT_EXIST: self._assert_result_code(result, msg) return False if '/' not in access_to: # First, try to add local user access LOG.debug('Attempting to add local user access. %s', access_info) if send_rest(access_to, ug_type['localUser']): return # Second,If add local user access failed, # try to add local group access LOG.debug('Failed add local user access,' ' attempting to add local group access. %s', access_info) if send_rest(access_to, ug_type['localGroup']): return else: str = access_to.index('/') access_to = access_to[str + 1:] # First, add domain user access LOG.debug('Attempting to add domain user access. %s', access_info) if send_rest(access_to, ug_type['adUser']): return # Second, if add domain user access failed, # try to add domain group access. LOG.debug('Failed add domain user access, ' 'attempting to add domain group access. %s', access_info) if send_rest(access_to, ug_type['adGroup']): return raise exception.InvalidShare(reason=msg) def _get_access_from_nfs_share(self, path, clientName): url = f'rest/nfsShareClient?path={path}&client={clientName}' result = self.call(url, None, 'GET') msg = 'Failed to get share NFS access.' self._assert_result_code(result, msg) share_client = None if result['data'] is not None: share_client = {} share_client['path'] = result['data']['path'] share_client['clientName'] = result['data']['clientName'] share_client['accessRight'] = result['data']['accessRight'] return share_client def _get_access_from_cifs_share(self, share_path, access_to, ug_input_type=None): ug_type = { 'localUser': '0', 'localGroup': '1', 'adUser': '2', 'adGroup': '3', } msg = 'Failed to get share cifs access.' access_info = (f'Access info (access_to: {access_to},' f'path: {share_path}.)') def send_rest(access_to, ug_type): url = f'rest/cifsShareClient?path={share_path}' \ f'&ugName={access_to}&ugType={ug_type}' result = self.call(url, None, 'GET') self._assert_result_code(result, msg) return result share_client = None if ug_input_type is not None: ret = send_rest(access_to, ug_input_type) if ret['data']: share_client = {} share_client['path'] = ret['data']['path'] share_client['ugName'] = ret['data']['ugName'] share_client['ugType'] = ret['data']['ugType'] share_client['accessRight'] = ret['data']['accessRight'] return share_client elif '/' not in access_to: LOG.debug('Attempting to get local user access. %s', access_info) user_ret = send_rest(access_to, ug_type['localUser']) if user_ret['code'] == constants.CODE_NOT_FOUND: return share_client if user_ret['data']: share_client = {} share_client['path'] = user_ret['data']['path'] share_client['ugName'] = user_ret['data']['ugName'] share_client['ugType'] = user_ret['data']['ugType'] share_client['accessRight'] = user_ret['data']['accessRight'] return share_client LOG.debug('Failed get local user access,' ' attempting to get local group access. %s', access_info) group_ret = send_rest(access_to, ug_type['localGroup']) if group_ret['data']: share_client = {} share_client['path'] = group_ret['data']['path'] share_client['ugName'] = group_ret['data']['ugName'] share_client['ugType'] = group_ret['data']['ugType'] share_client['accessRight'] = group_ret['data']['accessRight'] return share_client else: str = access_to.index('/') access_to = access_to[str + 1:] LOG.debug('Attempting to get domain user access. %s', access_info) aduser_ret = send_rest(access_to, ug_type['adUser']) if aduser_ret['code'] == constants.CODE_NOT_FOUND: return share_client if aduser_ret['data']: share_client = {} share_client['path'] = aduser_ret['data']['path'] share_client['ugName'] = aduser_ret['data']['ugName'] share_client['ugType'] = aduser_ret['data']['ugType'] share_client['accessRight'] = \ aduser_ret['data']['accessRight'] return share_client LOG.debug('Failed get domain user access,' ' attempting to get domain group access. %s', access_info) adgroup_ret = send_rest(access_to, ug_type['adGroup']) if adgroup_ret['data']: share_client = {} share_client['path'] = adgroup_ret['data']['path'] share_client['ugName'] = adgroup_ret['data']['ugName'] share_client['ugType'] = adgroup_ret['data']['ugType'] share_client['accessRight'] = \ adgroup_ret['data']['accessRight'] return share_client return share_client def _get_all_nfs_access_rest(self, share_path): url = f'rest/allNfsShareClient?path={share_path}' result = self.call(url, None, 'GET') msg = 'Get all nfs access error.' self._assert_result_code(result, msg) access_list = [] if result['data'] is None: pass else: for item in result.get('data', []): access = {} access['share_path'] = item['path'] access['access_to'] = item['clientName'] access['access_level'] = item['accessRight'] access_list.append(access) return access_list def _get_all_cifs_access_rest(self, share_path): url = f'rest/allCifsShareClient?path={share_path}' result = self.call(url, None, 'GET') msg = 'Get all cifs access error.' self._assert_result_code(result, msg) access_list = [] for item in result.get('data', []): access = {} access['share_path'] = item['path'] access['access_to'] = item['ugName'] access['ugType'] = item['ugType'] access['access_level'] = item['accessRight'] access_list.append(access) return access_list def _change_nfs_access_rest(self, share_path, access_to, access_level): url = 'rest/nfsShareClient' access_info = { 'path': share_path, 'oldNfsClientName': access_to, 'clientName': '', 'accessRight': access_level, 'allSquash': '', 'rootSquash': '', 'secure': '', 'anonuid': '', 'anongid': '', } result = self.call(url, access_info, 'PUT') msg = 'Update nfs acess error.' self._assert_result_code(result, msg) def _change_cifs_access_rest(self, share_path, access_to, access_level, ug_type): url = 'rest/cifsShareClient' if '/' in access_to: str = access_to.index('/') access_to = access_to[str + 1:] access_info = { 'path': share_path, 'right': access_level, 'ugName': access_to, 'ugType': ug_type, } result = self.call(url, access_info, 'PUT') msg = 'Update cifs access error.' self._assert_result_code(result, msg) def _delete_nfs_access_rest(self, share_path, access_to): url = f'rest/nfsShareClient?path={share_path}&client={access_to}' result = self.call(url, None, 'DELETE') msg = 'Delete nfs access error.' self._assert_result_code(result, msg) def _delete_cifs_access_rest(self, share_path, access_to, ug_type): url = f'rest/cifsShareClient?path={share_path}&ugName={access_to}' \ f'&ugType={ug_type}' result = self.call(url, None, 'DELETE') msg = 'Delete cifs access error.' self._assert_result_code(result, msg) def _get_nfs_service_status(self): url = 'rest/nfsService' result = self.call(url, None, 'GET') msg = 'Get NFS service stauts error.' self._assert_result_code(result, msg) nfs_service = {} nfs_service['serviceStatus'] = result['data']['serviceStatus'] nfs_service['nfs3Status'] = result['data']['nfs3Status'] nfs_service['nfs4Status'] = result['data']['nfs4Status'] return nfs_service def _start_nfs_service(self): url = 'rest/nfsService' nfs_service_info = { "openStatus": "1", } result = self.call(url, nfs_service_info, 'PUT') self._assert_result_code(result, 'Start NFS service error.') def _config_nfs_service(self): url = 'rest/nfsConfig' config_nfs = { 'configNfs3': "yes", 'configNfs4': "yes", } result = self.call(url, config_nfs, 'PUT') self._assert_result_code(result, 'Config NFS service error.') def _get_cifs_service_status(self): url = 'rest/cifsService' result = self.call(url, None, 'GET') msg = 'Get CIFS service status error.' self._assert_result_code(result, msg) return result['data'] def _start_cifs_service(self): url = 'rest/cifsService' cifs_service_info = { 'openStatus': '1', } result = self.call(url, cifs_service_info, 'PUT') self._assert_result_code(result, 'Start CIFS service error.') def _config_cifs_service(self): url = 'rest/cifsConfig' """config user mode""" config_cifs = { 'workName': 'manila', 'description': '', 'access_way': 'user', 'isCache': 'no', 'adsName': '', 'adsIP': '', 'adsUSER': '', 'adsPASSWD': '', 'allowList': [], 'denyList': [], } result = self.call(url, config_cifs, 'PUT') self._assert_result_code(result, 'Config CIFS service error.') def _get_all_pool(self): url = 'rest/storagepool' result = self.call(url, None, 'GET') msg = 'Query pool info error.' self._assert_result_code(result, msg) return result def _query_user(self, user_name): url = f'rest/user/{user_name}' result = self.call(url, None, 'GET') msg = 'Query user error.' self._assert_result_code(result, msg) return result['data'] def _add_localuser(self, user_name, user_passwd, group_name): url = 'rest/localUser' user_info = { 'userName': user_name, 'mgGroup': group_name, 'userPasswd': user_passwd, 'unusedGroup': [], } result = self.call(url, user_info, 'POST') msg = 'add localuser error.' self._assert_result_code(result, msg) def _query_group(self, group_name): url = f'rest/group/{group_name}' result = self.call(url, None, 'GET') msg = 'Query group error.' self._assert_result_code(result, msg) return result['data'] def _add_localgroup(self, group_name): url = 'rest/localGroup' group_info = { 'groupName': group_name, } result = self.call(url, group_info, 'POST') msg = 'add localgroup error.' self._assert_result_code(result, msg) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315601.9216716 manila-21.0.0/manila/share/drivers/maprfs/0000775000175000017500000000000000000000000020367 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/maprfs/__init__.py0000664000175000017500000000000000000000000022466 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/maprfs/driver_util.py0000664000175000017500000003253700000000000023303 0ustar00zuulzuul00000000000000# Copyright (c) 2016, MapR Technologies # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Utility for processing MapR cluster operations """ import json import shlex import socket from oslo_concurrency import processutils from oslo_log import log from manila.common import constants from manila import exception from manila.i18n import _ from manila import ssh_utils from manila import utils LOG = log.getLogger(__name__) def get_version_handler(configuration): # here can be choosing DriverUtils depend on cluster version return BaseDriverUtil(configuration) class BaseDriverUtil(object): """Utility class for MapR-FS specific operations.""" NOT_FOUND_MSG = 'No such' ERROR_MSG = 'ERROR' def __init__(self, configuration): self.configuration = configuration self.ssh_connections = {} self.hosts = self.configuration.maprfs_clinode_ip self.local_hosts = socket.gethostbyname_ex(socket.gethostname())[2] self.maprcli_bin = '/usr/bin/maprcli' self.hadoop_bin = '/usr/bin/hadoop' def _execute(self, *cmd, **kwargs): for x in range(0, len(self.hosts)): try: check_exit_code = kwargs.pop('check_exit_code', True) host = self.hosts[x] if host in self.local_hosts: cmd = self._as_user(cmd, self.configuration.maprfs_ssh_name) out, err = utils.execute(*cmd, check_exit_code=check_exit_code) else: out, err = self._run_ssh(host, cmd, check_exit_code) # move available cldb host to the beginning if x > 0: self.hosts[0], self.hosts[x] = self.hosts[x], self.hosts[0] return out, err except exception.ProcessExecutionError as e: if self._check_error(e): raise elif x < len(self.hosts) - 1: msg = ('Error running SSH command. Trying another host') LOG.error(msg) else: raise except Exception as e: if x < len(self.hosts) - 1: msg = ('Error running SSH command. Trying another host') LOG.error(msg) else: raise exception.ProcessExecutionError(str(e)) def _run_ssh(self, host, cmd_list, check_exit_code=False): command = ' '.join(shlex.quote(cmd_arg) for cmd_arg in cmd_list) connection = self.ssh_connections.get(host) if connection is None: ssh_name = self.configuration.maprfs_ssh_name password = self.configuration.maprfs_ssh_pw private_key = self.configuration.maprfs_ssh_private_key remote_ssh_port = self.configuration.maprfs_ssh_port ssh_conn_timeout = self.configuration.ssh_conn_timeout min_size = self.configuration.ssh_min_pool_conn max_size = self.configuration.ssh_max_pool_conn ssh_pool = ssh_utils.SSHPool(host, remote_ssh_port, ssh_conn_timeout, ssh_name, password=password, privatekey=private_key, min_size=min_size, max_size=max_size) ssh = ssh_pool.create() self.ssh_connections[host] = (ssh_pool, ssh) else: ssh_pool, ssh = connection if not ssh.get_transport().is_active(): ssh_pool.remove(ssh) ssh = ssh_pool.create() self.ssh_connections[host] = (ssh_pool, ssh) return processutils.ssh_execute( ssh, command, check_exit_code=check_exit_code) @staticmethod def _check_error(error): # check if error was native return BaseDriverUtil.ERROR_MSG in error.stdout @staticmethod def _as_user(cmd, user): return ['sudo', 'su', '-', user, '-c', ' '.join(shlex.quote(cmd_arg) for cmd_arg in cmd)] @staticmethod def _add_params(cmd, **kwargs): params = [] for x in kwargs.keys(): params.append('-' + x) params.append(kwargs[x]) return cmd + params def create_volume(self, name, path, size, **kwargs): # delete size param as it is set separately if kwargs.get('quota'): del kwargs['quota'] sizestr = str(size) + 'G' cmd = [self.maprcli_bin, 'volume', 'create', '-name', name, '-path', path, '-quota', sizestr, '-readAce', '', '-writeAce', ''] cmd = self._add_params(cmd, **kwargs) self._execute(*cmd) def volume_exists(self, volume_name): cmd = [self.maprcli_bin, 'volume', 'info', '-name', volume_name] out, __ = self._execute(*cmd, check_exit_code=False) return self.NOT_FOUND_MSG not in out def delete_volume(self, name): cmd = [self.maprcli_bin, 'volume', 'remove', '-name', name, '-force', 'true'] out, __ = self._execute(*cmd, check_exit_code=False) # if volume does not exist do not raise exception.ProcessExecutionError if self.ERROR_MSG in out and self.NOT_FOUND_MSG not in out: raise exception.ProcessExecutionError(out) def set_volume_size(self, name, size): sizestr = str(size) + 'G' cmd = [self.maprcli_bin, 'volume', 'modify', '-name', name, '-quota', sizestr] self._execute(*cmd) def create_snapshot(self, name, volume_name): cmd = [self.maprcli_bin, 'volume', 'snapshot', 'create', '-snapshotname', name, '-volume', volume_name] self._execute(*cmd) def delete_snapshot(self, name, volume_name): cmd = [self.maprcli_bin, 'volume', 'snapshot', 'remove', '-snapshotname', name, '-volume', volume_name] out, __ = self._execute(*cmd, check_exit_code=False) # if snapshot does not exist do not raise ProcessExecutionError if self.ERROR_MSG in out and self.NOT_FOUND_MSG not in out: raise exception.ProcessExecutionError(out) def get_volume_info(self, volume_name, columns=None): cmd = [self.maprcli_bin, 'volume', 'info', '-name', volume_name, '-json'] if columns: cmd += ['-columns', ','.join(columns)] out, __ = self._execute(*cmd) return json.loads(out)['data'][0] def get_volume_info_by_path(self, volume_path, columns=None, check_if_exists=False): cmd = [self.maprcli_bin, 'volume', 'info', '-path', volume_path, '-json'] if columns: cmd += ['-columns', ','.join(columns)] out, __ = self._execute(*cmd, check_exit_code=not check_if_exists) if check_if_exists and self.NOT_FOUND_MSG in out: return None return json.loads(out)['data'][0] def get_snapshot_list(self, volume_name=None, volume_path=None): params = {} if volume_name: params['volume'] = volume_name if volume_path: params['path'] = volume_name cmd = [self.maprcli_bin, 'volume', 'snapshot', 'list', '-volume', '-columns', 'snapshotname', '-json'] cmd = self._add_params(cmd, **params) out, __ = self._execute(*cmd) return [x['snapshotname'] for x in json.loads(out)['data']] def rename_volume(self, name, new_name): cmd = [self.maprcli_bin, 'volume', 'rename', '-name', name, '-newname', new_name] self._execute(*cmd) def fs_capacity(self): cmd = [self.hadoop_bin, 'fs', '-df'] out, err = self._execute(*cmd) lines = out.splitlines() try: fields = lines[1].split() total = int(fields[1]) free = int(fields[3]) except (IndexError, ValueError): msg = _('Failed to get MapR-FS capacity info.') LOG.exception(msg) raise exception.ProcessExecutionError(msg) return total, free def maprfs_ls(self, path): cmd = [self.hadoop_bin, 'fs', '-ls', path] out, __ = self._execute(*cmd) return out def maprfs_cp(self, source, dest): cmd = [self.hadoop_bin, 'fs', '-cp', '-p', source, dest] self._execute(*cmd) def maprfs_chmod(self, dest, mod): cmd = [self.hadoop_bin, 'fs', '-chmod', mod, dest] self._execute(*cmd) def maprfs_du(self, path): cmd = [self.hadoop_bin, 'fs', '-du', '-s', path] out, __ = self._execute(*cmd) return int(out.split(' ')[0]) def check_state(self): cmd = [self.hadoop_bin, 'fs', '-ls', '/'] out, __ = self._execute(*cmd, check_exit_code=False) return 'Found' in out def dir_not_empty(self, path): cmd = [self.hadoop_bin, 'fs', '-ls', path] out, __ = self._execute(*cmd, check_exit_code=False) return 'Found' in out def set_volume_ace(self, volume_name, access_rules): read_accesses = [] write_accesses = [] for access_rule in access_rules: if access_rule['access_level'] == constants.ACCESS_LEVEL_RO: read_accesses.append(access_rule['access_to']) elif access_rule['access_level'] == constants.ACCESS_LEVEL_RW: read_accesses.append(access_rule['access_to']) write_accesses.append(access_rule['access_to']) def rule_type(access_to): if self.group_exists(access_to): return 'g' elif self.user_exists(access_to): return 'u' else: # if nor user nor group exits, it should try add group rule return 'g' read_accesses_string = '|'.join( map(lambda x: rule_type(x) + ':' + x, read_accesses)) write_accesses_string = '|'.join( map(lambda x: rule_type(x) + ':' + x, write_accesses)) cmd = [self.maprcli_bin, 'volume', 'modify', '-name', volume_name, '-readAce', read_accesses_string, '-writeAce', write_accesses_string] self._execute(*cmd) def add_volume_ace_rules(self, volume_name, access_rules): if not access_rules: return access_rules_map = self.get_access_rules(volume_name) for access_rule in access_rules: access_rules_map[access_rule['access_to']] = access_rule self.set_volume_ace(volume_name, access_rules_map.values()) def remove_volume_ace_rules(self, volume_name, access_rules): if not access_rules: return access_rules_map = self.get_access_rules(volume_name) for access_rule in access_rules: if access_rules_map.get(access_rule['access_to']): del access_rules_map[access_rule['access_to']] self.set_volume_ace(volume_name, access_rules_map.values()) def get_access_rules(self, volume_name): info = self.get_volume_info(volume_name) aces = info['volumeAces'] read_ace = aces['readAce'] write_ace = aces['writeAce'] access_rules_map = {} self._retrieve_access_rules_from_ace(read_ace, 'r', access_rules_map) self._retrieve_access_rules_from_ace(write_ace, 'w', access_rules_map) return access_rules_map def _retrieve_access_rules_from_ace(self, ace, ace_type, access_rules_map): access = constants.ACCESS_LEVEL_RW if ace_type == 'w' else ( constants.ACCESS_LEVEL_RO) if ace not in ['p', '']: write_rules = [x.strip() for x in ace.split('|')] for user in write_rules: rule_type, username = user.split(':') if rule_type not in ['u', 'g']: continue access_rules_map[username] = { 'access_level': access, 'access_to': username, 'access_type': 'user', } def user_exists(self, user): cmd = ['getent', 'passwd', user] out, __ = self._execute(*cmd, check_exit_code=False) return out != '' def group_exists(self, group): cmd = ['getent', 'group', group] out, __ = self._execute(*cmd, check_exit_code=False) return out != '' def get_cluster_name(self): cmd = [self.maprcli_bin, 'dashboard', 'info', '-json'] out, __ = self._execute(*cmd) try: return json.loads(out)['data'][0]['cluster']['name'] except (IndexError, ValueError) as e: msg = (_("Failed to parse cluster name. Error: %s") % e) raise exception.ProcessExecutionError(msg) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/maprfs/maprfs_native.py0000664000175000017500000004641200000000000023606 0ustar00zuulzuul00000000000000# Copyright (c) 2016, MapR Technologies # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Share driver for MapR-FS distributed file system. """ import math import os from oslo_config import cfg from oslo_log import log from oslo_utils import strutils from oslo_utils import units from manila import context from manila import exception from manila.i18n import _ from manila.share import api from manila.share import driver from manila.share.drivers.maprfs import driver_util as mapru LOG = log.getLogger(__name__) maprfs_native_share_opts = [ cfg.ListOpt('maprfs_clinode_ip', help='The list of IPs or hostnames of nodes where mapr-core ' 'is installed.'), cfg.PortOpt('maprfs_ssh_port', default=22, help='CLDB node SSH port.'), cfg.StrOpt('maprfs_ssh_name', default="mapr", help='Cluster admin user ssh login name.'), cfg.StrOpt('maprfs_ssh_pw', secret=True, help='Cluster node SSH login password, ' 'This parameter is not necessary, if ' '\'maprfs_ssh_private_key\' is configured.'), cfg.StrOpt('maprfs_ssh_private_key', help='Path to SSH private ' 'key for login.'), cfg.StrOpt('maprfs_base_volume_dir', default='/', help='Path in MapRFS where share volumes must be created.'), cfg.ListOpt('maprfs_zookeeper_ip', help='The list of IPs or hostnames of ZooKeeper nodes.'), cfg.ListOpt('maprfs_cldb_ip', help='The list of IPs or hostnames of CLDB nodes.'), cfg.BoolOpt('maprfs_rename_managed_volume', default=True, help='Specify whether existing volume should be renamed when' ' start managing.'), ] CONF = cfg.CONF CONF.register_opts(maprfs_native_share_opts) class MapRFSNativeShareDriver(driver.ExecuteMixin, driver.ShareDriver): """MapR-FS Share Driver. Executes commands relating to shares. driver_handles_share_servers must be False because this driver does not support creating or managing virtual storage servers (share servers) API version history: 1.0 - Initial Version """ def __init__(self, *args, **kwargs): super(MapRFSNativeShareDriver, self).__init__(False, *args, **kwargs) self.configuration.append_config_values(maprfs_native_share_opts) self.backend_name = self.configuration.safe_get( 'share_backend_name') or 'MapR-FS-Native' self._base_volume_dir = self.configuration.safe_get( 'maprfs_base_volume_dir') or '/' self._maprfs_util = None self._maprfs_base_path = "maprfs://" self.cldb_ip = self.configuration.maprfs_cldb_ip or [] self.zookeeper_ip = self.configuration.maprfs_zookeeper_ip or [] self.rename_volume = self.configuration.maprfs_rename_managed_volume self.api = api.API() def do_setup(self, context): """Do initialization while the share driver starts.""" super(MapRFSNativeShareDriver, self).do_setup(context) self._maprfs_util = mapru.get_version_handler(self.configuration) def _share_dir(self, share_name): return os.path.join(self._base_volume_dir, share_name) def _volume_name(self, share_name): return share_name def _get_share_path(self, share): return share['export_location'] def _get_snapshot_path(self, snapshot): share_dir = snapshot['share_instance']['export_location'].split( ' ')[0][len(self._maprfs_base_path):] return os.path.join(share_dir, '.snapshot', snapshot['provider_location'] or snapshot['name']) def _get_volume_name(self, context, share): metadata = self.api.get_share_metadata(context, {'id': share['share_id']}) return metadata.get('_name', self._volume_name(share['name'])) def _get_share_export_locations(self, share, path=None): """Return share path on storage provider.""" cluster_name = self._maprfs_util.get_cluster_name() path = '%(path)s -C %(cldb)s -Z %(zookeeper)s -N %(name)s' % { 'path': self._maprfs_base_path + ( path or self._share_dir(share['name'])), 'cldb': ' '.join(self.cldb_ip), 'zookeeper': ' '.join(self.zookeeper_ip), 'name': cluster_name } export_list = [{ "path": path, "is_admin_only": False, "metadata": { "cldb": ','.join(self.cldb_ip), "zookeeper": ','.join(self.zookeeper_ip), "cluster-name": cluster_name, }, }] return export_list def _create_share(self, share, metadata, context): """Creates a share.""" if share['share_proto'].lower() != 'maprfs': msg = _('Only MapRFS protocol supported!') LOG.error(msg) raise exception.MapRFSException(msg=msg) options = {k[1:]: v for k, v in metadata.items() if k[0] == '_'} share_dir = options.pop('path', self._share_dir(share['name'])) volume_name = options.pop('name', self._volume_name(share['name'])) try: self._maprfs_util.create_volume(volume_name, share_dir, share['size'], **options) # posix permissions should be 777, ACEs are used as a restriction self._maprfs_util.maprfs_chmod(share_dir, '777') except exception.ProcessExecutionError: self.api.update_share_metadata(context, {'id': share['share_id']}, {'_name': 'error'}) msg = (_('Failed to create volume in MapR-FS for the ' 'share %(share_name)s.') % {'share_name': share['name']}) LOG.exception(msg) raise exception.MapRFSException(msg=msg) def _set_share_size(self, share, size): volume_name = self._get_volume_name(context.get_admin_context(), share) try: if share['size'] > size: info = self._maprfs_util.get_volume_info(volume_name) used = info['totalused'] if int(used) >= int(size) * units.Ki: raise exception.ShareShrinkingPossibleDataLoss( share_id=share['id']) self._maprfs_util.set_volume_size(volume_name, size) except exception.ProcessExecutionError: msg = (_('Failed to set space quota for the share %(share_name)s.') % {'share_name': share['name']}) LOG.exception(msg) raise exception.MapRFSException(msg=msg) def get_network_allocations_number(self): return 0 def create_share(self, context, share, share_server=None): """Create a MapRFS volume which acts as a share.""" metadata = self.api.get_share_metadata(context, {'id': share['share_id']}) self._create_share(share, metadata, context) return self._get_share_export_locations(share, path=metadata.get('_path')) def ensure_share(self, context, share, share_server=None): """Updates export location if it is changes.""" volume_name = self._get_volume_name(context, share) if self._maprfs_util.volume_exists(volume_name): info = self._maprfs_util.get_volume_info(volume_name) path = info['mountdir'] old_location = share['export_locations'][0] new_location = self._get_share_export_locations( share, path=path) if new_location[0]['path'] != old_location['path']: return new_location else: raise exception.ShareResourceNotFound(share_id=share['share_id']) def create_share_from_snapshot(self, context, share, snapshot, share_server=None, parent_share=None): """Creates a share from snapshot.""" metadata = self.api.get_share_metadata(context, {'id': share['share_id']}) sn_share_tenant = self.api.get_share_metadata(context, { 'id': snapshot['share_instance']['share_id']}).get('_tenantuser') if sn_share_tenant and sn_share_tenant != metadata.get('_tenantuser'): msg = ( _('Cannot create share from snapshot %(snapshot_name)s ' 'with name %(share_name)s. Error: Tenant user should not ' 'differ from tenant of the source snapshot.') % {'snapshot_name': snapshot['name'], 'share_name': share['name']}) LOG.error(msg) raise exception.MapRFSException(msg=msg) share_dir = metadata.get('_path', self._share_dir(share['name'])) snapshot_path = self._get_snapshot_path(snapshot) self._create_share(share, metadata, context) try: if self._maprfs_util.dir_not_empty(snapshot_path): self._maprfs_util.maprfs_cp(snapshot_path + '/*', share_dir) except exception.ProcessExecutionError: msg = ( _('Failed to create share from snapshot %(snapshot_name)s ' 'with name %(share_name)s.') % { 'snapshot_name': snapshot['name'], 'share_name': share['name']}) LOG.exception(msg) raise exception.MapRFSException(msg=msg) return self._get_share_export_locations(share, path=metadata.get('_path')) def create_snapshot(self, context, snapshot, share_server=None): """Creates a snapshot.""" volume_name = self._get_volume_name(context, snapshot['share']) snapshot_name = snapshot['name'] try: self._maprfs_util.create_snapshot(snapshot_name, volume_name) return {'provider_location': snapshot_name} except exception.ProcessExecutionError: msg = ( _('Failed to create snapshot %(snapshot_name)s for the share ' '%(share_name)s.') % {'snapshot_name': snapshot_name, 'share_name': snapshot['share_name']}) LOG.exception(msg) raise exception.MapRFSException(msg=msg) def delete_share(self, context, share, share_server=None): """Deletes share storage.""" volume_name = self._get_volume_name(context, share) if volume_name == "error": LOG.info("Skipping deleting share with name %s, as it does not" " exist on the backend", share['name']) return try: self._maprfs_util.delete_volume(volume_name) except exception.ProcessExecutionError: msg = (_('Failed to delete share %(share_name)s.') % {'share_name': share['name']}) LOG.exception(msg) raise exception.MapRFSException(msg=msg) def delete_snapshot(self, context, snapshot, share_server=None): """Deletes a snapshot.""" snapshot_name = snapshot['provider_location'] or snapshot['name'] volume_name = self._get_volume_name(context, snapshot['share']) try: self._maprfs_util.delete_snapshot(snapshot_name, volume_name) except exception.ProcessExecutionError: msg = (_('Failed to delete snapshot %(snapshot_name)s.') % {'snapshot_name': snapshot['name']}) LOG.exception(msg) raise exception.MapRFSException(msg=msg) def update_access(self, context, share, access_rules, add_rules, delete_rules, update_rules, share_server=None): """Update access rules for given share.""" for access in access_rules: if access['access_type'].lower() != 'user': msg = _("Only 'user' access type allowed!") LOG.error(msg) raise exception.InvalidShareAccess(reason=msg) volume_name = self._get_volume_name(context, share) try: # 'update_access' is called before share is removed, so this # method shouldn`t raise exception if share does # not exist actually if not self._maprfs_util.volume_exists(volume_name): LOG.warning('Can not get share %s.', share['name']) return # check update if add_rules or delete_rules: self._maprfs_util.remove_volume_ace_rules(volume_name, delete_rules) self._maprfs_util.add_volume_ace_rules(volume_name, add_rules) else: self._maprfs_util.set_volume_ace(volume_name, access_rules) except exception.ProcessExecutionError: msg = (_('Failed to update access for share %(name)s.') % {'name': share['name']}) LOG.exception(msg) raise exception.MapRFSException(msg=msg) def extend_share(self, share, new_size, share_server=None): """Extend share storage.""" self._set_share_size(share, new_size) def shrink_share(self, share, new_size, share_server=None): """Shrink share storage.""" self._set_share_size(share, new_size) def _check_maprfs_state(self): try: return self._maprfs_util.check_state() except exception.ProcessExecutionError: msg = _('Failed to check MapRFS state.') LOG.exception(msg) raise exception.MapRFSException(msg=msg) def check_for_setup_error(self): """Return an error if the prerequisites are not met.""" if not self.configuration.maprfs_clinode_ip: msg = _( 'MapR cluster has not been specified in the configuration. ' 'Add the ip or list of ip of nodes with mapr-core installed ' 'in the "maprfs_clinode_ip" configuration parameter.') LOG.error(msg) raise exception.MapRFSException(msg=msg) if not self.configuration.maprfs_cldb_ip: LOG.warning('CLDB nodes are not specified!') if not self.configuration.maprfs_zookeeper_ip: LOG.warning('Zookeeper nodes are not specified!') if not self._check_maprfs_state(): msg = _('MapR-FS is not in healthy state.') LOG.error(msg) raise exception.MapRFSException(msg=msg) try: self._maprfs_util.maprfs_ls( os.path.join(self._base_volume_dir, '')) except exception.ProcessExecutionError: msg = _('Invalid "maprfs_base_volume_name". No such directory.') LOG.exception(msg) raise exception.MapRFSException(msg=msg) def manage_existing(self, share, driver_options): try: # retrieve share path from export location, maprfs:// prefix and # metadata (-C -Z -N) should be casted away share_path = share['export_location'].split( )[0][len(self._maprfs_base_path):] info = self._maprfs_util.get_volume_info_by_path( share_path, check_if_exists=True) if not info: msg = _("Share %s not found") % share[ 'export_location'] LOG.error(msg) raise exception.ManageInvalidShare(reason=msg) size = math.ceil(float(info['quota']) / units.Ki) used = math.ceil(float(info['totalused']) / units.Ki) volume_name = info['volumename'] should_rename = self.rename_volume rename_option = driver_options.get('rename') if rename_option: should_rename = strutils.bool_from_string(rename_option) if should_rename: self._maprfs_util.rename_volume(volume_name, share['name']) else: self.api.update_share_metadata(context.get_admin_context(), {'id': share['share_id']}, {'_name': volume_name}) location = self._get_share_export_locations(share, path=share_path) if size == 0: size = used msg = ( 'Share %s has no size quota. Total used value will be' ' used as share size') LOG.warning(msg, share['name']) return {'size': size, 'export_locations': location} except (ValueError, KeyError, exception.ProcessExecutionError): msg = _('Failed to manage share.') LOG.exception(msg) raise exception.MapRFSException(msg=msg) def manage_existing_snapshot(self, snapshot, driver_options): volume_name = self._get_volume_name(context.get_admin_context(), snapshot['share']) snapshot_path = self._get_snapshot_path(snapshot) try: snapshot_list = self._maprfs_util.get_snapshot_list( volume_name=volume_name) snapshot_name = snapshot['provider_location'] if snapshot_name not in snapshot_list: msg = _("Snapshot %s not found") % snapshot_name LOG.error(msg) raise exception.ManageInvalidShareSnapshot(reason=msg) size = math.ceil(float(self._maprfs_util.maprfs_du( snapshot_path)) / units.Gi) return {'size': size} except exception.ProcessExecutionError: msg = _("Manage existing share snapshot failed.") LOG.exception(msg) raise exception.MapRFSException(msg=msg) def _update_share_stats(self): """Retrieves stats info of share directories group.""" try: total, free = self._maprfs_util.fs_capacity() except exception.ProcessExecutionError: msg = _('Failed to check MapRFS capacity info.') LOG.exception(msg) raise exception.MapRFSException(msg=msg) total_capacity_gb = int(math.ceil(float(total) / units.Gi)) free_capacity_gb = int(math.floor(float(free) / units.Gi)) data = { 'share_backend_name': self.backend_name, 'storage_protocol': 'MAPRFS', 'driver_handles_share_servers': self.driver_handles_share_servers, 'vendor_name': 'MapR Technologies', 'driver_version': '1.0', 'total_capacity_gb': total_capacity_gb, 'free_capacity_gb': free_capacity_gb, 'snapshot_support': True, 'create_share_from_snapshot_support': True, } super(MapRFSNativeShareDriver, self)._update_share_stats(data) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315601.9256716 manila-21.0.0/manila/share/drivers/netapp/0000775000175000017500000000000000000000000020366 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/netapp/__init__.py0000664000175000017500000000000000000000000022465 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/netapp/common.py0000664000175000017500000001117500000000000022235 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Clinton Knight. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Unified driver for NetApp storage systems. Supports multiple storage systems of different families and driver modes. """ from oslo_log import log from oslo_utils import importutils from manila import exception from manila.i18n import _ from manila.share import driver from manila.share.drivers.netapp import options from manila.share.drivers.netapp import utils as na_utils LOG = log.getLogger(__name__) MULTI_SVM = 'multi_svm' SINGLE_SVM = 'single_svm' DATAONTAP_CMODE_PATH = 'manila.share.drivers.netapp.dataontap.cluster_mode' # Add new drivers here, no other code changes required. NETAPP_UNIFIED_DRIVER_REGISTRY = { 'ontap_cluster': { MULTI_SVM: DATAONTAP_CMODE_PATH + '.drv_multi_svm.NetAppCmodeMultiSvmShareDriver', SINGLE_SVM: DATAONTAP_CMODE_PATH + '.drv_single_svm.NetAppCmodeSingleSvmShareDriver', }, } NETAPP_UNIFIED_DRIVER_DEFAULT_MODE = { 'ontap_cluster': MULTI_SVM, } class NetAppDriver(object): """"NetApp unified share storage driver. Acts as a factory to create NetApp storage drivers based on the storage family and driver mode configured. """ REQUIRED_FLAGS = ['netapp_storage_family', 'driver_handles_share_servers'] def __new__(cls, *args, **kwargs): config = kwargs.get('configuration', None) if not config: raise exception.InvalidInput( reason=_('Required configuration not found.')) config.append_config_values(driver.share_opts) config.append_config_values(options.netapp_proxy_opts) na_utils.check_flags(NetAppDriver.REQUIRED_FLAGS, config) app_version = na_utils.OpenStackInfo().info() LOG.info('OpenStack OS Version Info: %s', app_version) kwargs['app_version'] = app_version driver_mode = NetAppDriver._get_driver_mode( config.netapp_storage_family, config.driver_handles_share_servers) return NetAppDriver._create_driver(config.netapp_storage_family, driver_mode, *args, **kwargs) @staticmethod def _get_driver_mode(storage_family, driver_handles_share_servers): if driver_handles_share_servers is None: driver_mode = NETAPP_UNIFIED_DRIVER_DEFAULT_MODE.get( storage_family.lower()) if driver_mode: LOG.debug('Default driver mode %s selected.', driver_mode) else: raise exception.InvalidInput( reason=_('Driver mode was not specified and a default ' 'value could not be determined from the ' 'specified storage family.')) elif driver_handles_share_servers: driver_mode = MULTI_SVM else: driver_mode = SINGLE_SVM return driver_mode @staticmethod def _create_driver(storage_family, driver_mode, *args, **kwargs): """"Creates an appropriate driver based on family and mode.""" storage_family = storage_family.lower() fmt = {'storage_family': storage_family, 'driver_mode': driver_mode} LOG.info('Requested unified config: %(storage_family)s and ' '%(driver_mode)s.', fmt) family_meta = NETAPP_UNIFIED_DRIVER_REGISTRY.get(storage_family) if family_meta is None: raise exception.InvalidInput( reason=_('Storage family %s is not supported.') % storage_family) driver_loc = family_meta.get(driver_mode) if driver_loc is None: raise exception.InvalidInput( reason=_('Driver mode %(driver_mode)s is not supported ' 'for storage family %(storage_family)s.') % fmt) kwargs['netapp_mode'] = 'proxy' driver = importutils.import_object(driver_loc, *args, **kwargs) LOG.info('NetApp driver of family %(storage_family)s and mode ' '%(driver_mode)s loaded.', fmt) driver.ipv6_implemented = True return driver ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315601.9256716 manila-21.0.0/manila/share/drivers/netapp/dataontap/0000775000175000017500000000000000000000000022341 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/netapp/dataontap/__init__.py0000664000175000017500000000000000000000000024440 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315601.9256716 manila-21.0.0/manila/share/drivers/netapp/dataontap/client/0000775000175000017500000000000000000000000023617 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/netapp/dataontap/client/__init__.py0000664000175000017500000000000000000000000025716 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/netapp/dataontap/client/api.py0000664000175000017500000011554600000000000024756 0ustar00zuulzuul00000000000000# Copyright (c) 2014 Navneet Singh. All rights reserved. # Copyright (c) 2014 Clinton Knight. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ NetApp API for Data ONTAP and OnCommand DFM. Contains classes required to issue API calls to Data ONTAP and OnCommand DFM. """ import copy import re from lxml import etree from oslo_log import log from oslo_serialization import jsonutils import requests from requests.adapters import HTTPAdapter from requests import auth from requests.packages.urllib3.util.retry import Retry from manila import exception from manila.i18n import _ from manila.share.drivers.netapp.dataontap.client import rest_endpoints from manila.share.drivers.netapp import utils LOG = log.getLogger(__name__) EONTAPI_EINVAL = '22' EVOLOPNOTSUPP = '160' EAPIERROR = '13001' EAPINOTFOUND = '13005' ESNAPSHOTNOTALLOWED = '13023' EVOLUMEDOESNOTEXIST = '13040' EVOLUMEOFFLINE = '13042' EINTERNALERROR = '13114' EINVALIDINPUTERROR = '13115' EDUPLICATEENTRY = '13130' EVOLNOTCLONE = '13170' EVOLOPNOTUNDERWAY = '13171' EVOLMOVE_CANNOT_MOVE_TO_CFO = '13633' EAGGRDOESNOTEXIST = '14420' EVOL_NOT_MOUNTED = '14716' EVSERVERALREADYSTARTED = '14923' ESIS_CLONE_NOT_LICENSED = '14956' EOBJECTNOTFOUND = '15661' EVSERVERNOTFOUND = '15698' E_VIFMGR_PORT_ALREADY_ASSIGNED_TO_BROADCAST_DOMAIN = '18605' ERELATION_EXISTS = '17122' ENOTRANSFER_IN_PROGRESS = '17130' ETRANSFER_IN_PROGRESS = '17137' EANOTHER_OP_ACTIVE = '17131' ERELATION_NOT_QUIESCED = '17127' ESOURCE_IS_DIFFERENT = '17105' EVOL_CLONE_BEING_SPLIT = '17151' EPOLICYNOTFOUND = '18251' EEVENTNOTFOUND = '18253' ESCOPENOTFOUND = '18259' ESVMDR_CANNOT_PERFORM_OP_FOR_STATUS = '18815' OPERATION_ALREADY_ENABLED = '40043' ENFS_V4_0_ENABLED_MIGRATION_FAILURE = '13172940' EVSERVER_MIGRATION_TO_NON_AFF_CLUSTER = '13172984' STYLE_LOGIN_PASSWORD = 'basic_auth' TRANSPORT_TYPE_HTTP = 'http' TRANSPORT_TYPE_HTTPS = 'https' STYLE_CERTIFICATE = 'certificate_auth' class BaseClient(object): """Encapsulates server connection logic.""" def __init__(self, host, transport_type=TRANSPORT_TYPE_HTTP, style=STYLE_LOGIN_PASSWORD, ssl_cert_path=None, username=None, password=None, port=None, trace=False, api_trace_pattern=None, private_key_file=None, certificate_file=None, ca_certificate_file=None, certificate_host_validation=False): super(BaseClient, self).__init__() self._host = host if private_key_file and certificate_file: transport_type = TRANSPORT_TYPE_HTTPS style = STYLE_CERTIFICATE self.set_transport_type(transport_type) self.set_style(style) if port: self.set_port(port) self._username = username self._password = password self._trace = trace self._api_trace_pattern = api_trace_pattern self._refresh_conn = True if ssl_cert_path is not None: self._ssl_verify = ssl_cert_path else: # Note(felipe_rodrigues): it will verify with the mozila CA roots, # given by certifi package. self._ssl_verify = True self._private_key_file = private_key_file self._certificate_file = certificate_file self._ca_certificate_file = ca_certificate_file self._certificate_host_validation = certificate_host_validation LOG.debug('Using NetApp controller: %s', self._host) def get_style(self): """Get the authorization style for communicating with the server.""" return self._auth_style def set_style(self, style): """Set the authorization style for communicating with the server. Supports basic_auth for now. Certificate_auth mode to be done. """ if style.lower() not in (STYLE_LOGIN_PASSWORD, STYLE_CERTIFICATE): raise ValueError('Unsupported authentication style') self._auth_style = style.lower() def get_transport_type(self): """Get the transport type protocol.""" return self._protocol def set_transport_type(self, transport_type): """Set the transport type protocol for API. Supports http and https transport types. """ if transport_type.lower() not in ( TRANSPORT_TYPE_HTTP, TRANSPORT_TYPE_HTTPS): raise ValueError('Unsupported transport type') self._protocol = transport_type.lower() self._refresh_conn = True def get_server_type(self): """Get the server type.""" return self._server_type def set_server_type(self, server_type): """Set the target server type. Supports filer and dfm server types. """ raise NotImplementedError() def set_api_version(self, major, minor): """Set the API version.""" try: self._api_major_version = int(major) self._api_minor_version = int(minor) self._api_version = (str(major) + "." + str(minor)) except ValueError: raise ValueError('Major and minor versions must be integers') self._refresh_conn = True def set_system_version(self, system_version): """Set the ONTAP system version.""" self._system_version = system_version self._refresh_conn = True def get_api_version(self): """Gets the API version tuple.""" if hasattr(self, '_api_version'): return (self._api_major_version, self._api_minor_version) return None def get_system_version(self): """Gets the ONTAP system version.""" if hasattr(self, '_system_version'): return self._system_version return None def set_port(self, port): """Set the server communication port.""" try: int(port) except ValueError: raise ValueError('Port must be integer') self._port = str(port) self._refresh_conn = True def get_port(self): """Get the server communication port.""" return self._port def set_timeout(self, seconds): """Sets the timeout in seconds.""" try: self._timeout = int(seconds) except ValueError: raise ValueError('timeout in seconds must be integer') def get_timeout(self): """Gets the timeout in seconds if set.""" if hasattr(self, '_timeout'): return self._timeout return None def get_vserver(self): """Get the vserver to use in tunneling.""" return self._vserver def set_vserver(self, vserver): """Set the vserver to use if tunneling gets enabled.""" self._vserver = vserver def set_username(self, username): """Set the user name for authentication.""" self._username = username self._refresh_conn = True def set_password(self, password): """Set the password for authentication.""" self._password = password self._refresh_conn = True def invoke_successfully(self, na_element, api_args=None, enable_tunneling=False, use_zapi=True): """Invokes API and checks execution status as success. Need to set enable_tunneling to True explicitly to achieve it. This helps to use same connection instance to enable or disable tunneling. The vserver or vfiler should be set before this call otherwise tunneling remains disabled. """ pass def _build_session(self): """Builds a session in the client.""" self._session = requests.Session() max_retries = Retry(total=5, connect=5, read=2, backoff_factor=1) adapter = HTTPAdapter(max_retries=max_retries) self._session.mount('%s://' % self._protocol, adapter) if self._auth_style == STYLE_CERTIFICATE: self._session.cert, self._session.verify = ( self._create_certificate_auth_handler()) else: self._session.auth = self._create_basic_auth_handler() self._session.verify = self._ssl_verify headers = self._build_headers() self._session.headers = headers def _build_headers(self): """Adds the necessary headers to the session.""" raise NotImplementedError() def _create_basic_auth_handler(self): """Creates and returns a basic HTTP auth handler.""" return auth.HTTPBasicAuth(self._username, self._password) def _create_certificate_auth_handler(self): """Creates and returns a certificate auth handler.""" self._session.verify = self._certificate_host_validation if self._certificate_file and self._private_key_file: self._session.cert = (self._certificate_file, self._private_key_file) # Assigning _session.verify to ca cert file to validate the certs # when we have host validation set to true if self._certificate_host_validation and self._ca_certificate_file: self._session.verify = self._ca_certificate_file return self._session.cert, self._session.verify def __str__(self): """Gets a representation of the client.""" return "server: %s" % (self._host) class ZapiClient(BaseClient): SERVER_TYPE_FILER = 'filer' SERVER_TYPE_DFM = 'dfm' URL_FILER = 'servlets/netapp.servlets.admin.XMLrequest_filer' URL_DFM = 'apis/XMLrequest' NETAPP_NS = 'http://www.netapp.com/filer/admin' def __init__(self, host, server_type=SERVER_TYPE_FILER, transport_type=TRANSPORT_TYPE_HTTP, style=STYLE_LOGIN_PASSWORD, ssl_cert_path=None, username=None, password=None, port=None, trace=False, api_trace_pattern=utils.API_TRACE_PATTERN, private_key_file=None, certificate_file=None, ca_certificate_file=None, certificate_host_validation=None): super(ZapiClient, self).__init__( host, transport_type=transport_type, style=style, ssl_cert_path=ssl_cert_path, username=username, password=password, port=port, trace=trace, api_trace_pattern=api_trace_pattern, private_key_file=private_key_file, certificate_file=certificate_file, ca_certificate_file=ca_certificate_file, certificate_host_validation=certificate_host_validation) self.set_server_type(server_type) if port is None: # Not yet set in parent, use defaults self._set_port() def _set_port(self): """Defines which port will be used to communicate with ONTAP.""" if self._protocol == TRANSPORT_TYPE_HTTP: if self._server_type == ZapiClient.SERVER_TYPE_FILER: self.set_port(80) else: self.set_port(8088) else: if self._server_type == ZapiClient.SERVER_TYPE_FILER: self.set_port(443) else: self.set_port(8488) def set_server_type(self, server_type): """Set the target server type. Supports filer and dfm server types. """ if server_type.lower() not in (ZapiClient.SERVER_TYPE_FILER, ZapiClient.SERVER_TYPE_DFM): raise ValueError('Unsupported server type') self._server_type = server_type.lower() if self._server_type == ZapiClient.SERVER_TYPE_FILER: self._url = ZapiClient.URL_FILER else: self._url = ZapiClient.URL_DFM self._ns = ZapiClient.NETAPP_NS self._refresh_conn = True def get_vfiler(self): """Get the vfiler to use in tunneling.""" return self._vfiler def set_vfiler(self, vfiler): """Set the vfiler to use if tunneling gets enabled.""" self._vfiler = vfiler def invoke_elem(self, na_element, enable_tunneling=False): """Invoke the API on the server.""" if na_element and not isinstance(na_element, NaElement): ValueError('NaElement must be supplied to invoke API') request_element = self._create_request(na_element, enable_tunneling) request_d = request_element.to_string() api_name = na_element.get_name() api_name_matches_regex = (re.match(self._api_trace_pattern, api_name) is not None) if self._trace and api_name_matches_regex: LOG.debug("Request: %s", request_element.to_string(pretty=True)) if (not hasattr(self, '_session') or not self._session or self._refresh_conn): self._build_session() try: if hasattr(self, '_timeout'): if self._timeout is None: self._timeout = 10 response = self._session.post( self._get_url(), data=request_d, timeout=self._timeout) else: response = self._session.post( self._get_url(), data=request_d) except requests.HTTPError as e: raise NaApiError(e.errno, e.strerror) except requests.URLRequired as e: raise exception.StorageCommunicationException(str(e)) except Exception as e: raise NaApiError(message=e) response_xml = response.text response_element = self._get_result( bytes(bytearray(response_xml, encoding='utf-8'))) if self._trace and api_name_matches_regex: LOG.debug("Response: %s", response_element.to_string(pretty=True)) return response_element def invoke_successfully(self, na_element, api_args=None, enable_tunneling=False, use_zapi=True): """Invokes API and checks execution status as success. Need to set enable_tunneling to True explicitly to achieve it. This helps to use same connection instance to enable or disable tunneling. The vserver or vfiler should be set before this call otherwise tunneling remains disabled. """ if api_args: na_element.translate_struct(api_args) result = self.invoke_elem( na_element, enable_tunneling=enable_tunneling) if result.has_attr('status') and result.get_attr('status') == 'passed': return result code = (result.get_attr('errno') or result.get_child_content('errorno') or 'ESTATUSFAILED') if code == ESIS_CLONE_NOT_LICENSED: msg = 'Clone operation failed: FlexClone not licensed.' else: msg = (result.get_attr('reason') or result.get_child_content('reason') or 'Execution status is failed due to unknown reason') raise NaApiError(code, msg) def _create_request(self, na_element, enable_tunneling=False): """Creates request in the desired format.""" netapp_elem = NaElement('netapp') netapp_elem.add_attr('xmlns', self._ns) if hasattr(self, '_api_version'): netapp_elem.add_attr('version', self._api_version) if enable_tunneling: self._enable_tunnel_request(netapp_elem) netapp_elem.add_child_elem(na_element) return netapp_elem def _enable_tunnel_request(self, netapp_elem): """Enables vserver or vfiler tunneling.""" if hasattr(self, '_vfiler') and self._vfiler: if (hasattr(self, '_api_major_version') and hasattr(self, '_api_minor_version') and self._api_major_version >= 1 and self._api_minor_version >= 7): netapp_elem.add_attr('vfiler', self._vfiler) else: raise ValueError('ontapi version has to be atleast 1.7' ' to send request to vfiler') if hasattr(self, '_vserver') and self._vserver: if (hasattr(self, '_api_major_version') and hasattr(self, '_api_minor_version') and self._api_major_version >= 1 and self._api_minor_version >= 15): netapp_elem.add_attr('vfiler', self._vserver) else: raise ValueError('ontapi version has to be atleast 1.15' ' to send request to vserver') @staticmethod def _parse_response(response): """Get the NaElement for the response.""" if not response: raise NaApiError('No response received') xml = etree.XML(response) return NaElement(xml) def _get_result(self, response): """Gets the call result.""" processed_response = self._parse_response(response) return processed_response.get_child_by_name('results') def _get_url(self): """Get the base url to send the request.""" host = self._host if ':' in host: host = '[%s]' % host return '%s://%s:%s/%s' % (self._protocol, host, self._port, self._url) def _build_headers(self): """Build and return headers.""" return {'Content-Type': 'text/xml'} class RestClient(BaseClient): def __init__(self, host, transport_type=TRANSPORT_TYPE_HTTP, style=STYLE_LOGIN_PASSWORD, ssl_cert_path=None, username=None, password=None, port=None, trace=False, api_trace_pattern=utils.API_TRACE_PATTERN, private_key_file=None, certificate_file=None, ca_certificate_file=None, certificate_host_validation=False): super(RestClient, self).__init__( host, transport_type=transport_type, style=style, ssl_cert_path=ssl_cert_path, username=username, password=password, port=port, trace=trace, api_trace_pattern=api_trace_pattern, private_key_file=private_key_file, certificate_file=certificate_file, ca_certificate_file=ca_certificate_file, certificate_host_validation=certificate_host_validation) if port is None: # Not yet set in parent, use defaults self._set_port() def _set_port(self): if self._protocol == TRANSPORT_TYPE_HTTP: self.set_port(80) else: self.set_port(443) def _get_request_info(self, api_name, session): """Returns the request method and url to be used in the REST call.""" request_methods = { 'post': session.post, 'get': session.get, 'put': session.put, 'delete': session.delete, 'patch': session.patch, } rest_call = rest_endpoints.endpoints.get(api_name) return request_methods[rest_call['method']], rest_call['url'] def _add_query_params_to_url(self, url, query): """Populates the URL with specified filters.""" filters = "" for k, v in query.items(): filters += "%(key)s=%(value)s&" % {"key": k, "value": v} url += "?" + filters return url def invoke_elem(self, na_element, api_args=None): """Invoke the API on the server.""" if na_element and not isinstance(na_element, NaElement): raise ValueError('NaElement must be supplied to invoke API') api_name = na_element.get_name() api_name_matches_regex = (re.match(self._api_trace_pattern, api_name) is not None) data = api_args.get("body") if api_args else {} if (not hasattr(self, '_session') or not self._session or self._refresh_conn): self._build_session() request_method, action_url = self._get_request_info( api_name, self._session) url_params = api_args.get("url_params") if api_args else None if url_params: action_url = action_url % url_params query = api_args.get("query") if api_args else None if query: action_url = self._add_query_params_to_url( action_url, api_args['query']) url = self._get_base_url() + action_url data = jsonutils.dumps(data) if data else data if self._trace and api_name_matches_regex: message = ("Request: %(method)s %(url)s. Request body " "%(body)s") % { "method": request_method, "url": action_url, "body": api_args.get("body") if api_args else {} } LOG.debug(message) try: if hasattr(self, '_timeout'): response = request_method( url, data=data, timeout=self._timeout) else: response = request_method(url, data=data) except requests.HTTPError as e: raise NaApiError(e.errno, e.strerror) except requests.URLRequired as e: raise exception.StorageCommunicationException(str(e)) except Exception as e: raise NaApiError(message=e) response = ( jsonutils.loads(response.content) if response.content else None) if self._trace and api_name_matches_regex: LOG.debug("Response: %s", response) return response def invoke_successfully(self, na_element, api_args=None, enable_tunneling=False, use_zapi=False): """Invokes API and checks execution status as success. Need to set enable_tunneling to True explicitly to achieve it. This helps to use same connection instance to enable or disable tunneling. The vserver or vfiler should be set before this call otherwise tunneling remains disabled. """ result = self.invoke_elem(na_element, api_args=api_args) if not result.get('error'): return result result_error = result.get('error') code = (result_error.get('code') or 'ESTATUSFAILED') if code == ESIS_CLONE_NOT_LICENSED: msg = 'Clone operation failed: FlexClone not licensed.' else: msg = (result_error.get('message') or 'Execution status is failed due to unknown reason') raise NaApiError(code, msg) def _get_base_url(self): """Get the base URL for REST requests.""" host = self._host if ':' in host: host = '[%s]' % host return '%s://%s:%s/api/' % (self._protocol, host, self._port) def _build_headers(self): """Build and return headers for a REST request.""" headers = { "Accept": "application/json", "Content-Type": "application/json" } return headers class NaServer(object): """Encapsulates server connection logic.""" def __init__(self, host, transport_type=TRANSPORT_TYPE_HTTP, style=STYLE_LOGIN_PASSWORD, ssl_cert_path=None, username=None, password=None, port=None, trace=False, api_trace_pattern=utils.API_TRACE_PATTERN, private_key_file=None, certificate_file=None, ca_certificate_file=None, certificate_host_validation=False): self.zapi_client = ZapiClient( host, transport_type=transport_type, style=style, ssl_cert_path=ssl_cert_path, username=username, password=password, port=port, trace=trace, api_trace_pattern=api_trace_pattern, private_key_file=private_key_file, certificate_file=certificate_file, ca_certificate_file=ca_certificate_file, certificate_host_validation=certificate_host_validation) self.rest_client = RestClient( host, transport_type=transport_type, style=style, ssl_cert_path=ssl_cert_path, username=username, password=password, port=port, trace=trace, api_trace_pattern=api_trace_pattern, private_key_file=private_key_file, certificate_file=certificate_file, ca_certificate_file=ca_certificate_file, certificate_host_validation=certificate_host_validation) self._host = host LOG.debug('Using NetApp controller: %s', self._host) def get_transport_type(self, use_zapi_client=True): """Get the transport type protocol.""" return self.get_client(use_zapi=use_zapi_client).get_transport_type() def set_transport_type(self, transport_type): """Set the transport type protocol for API. Supports http and https transport types. """ self.zapi_client.set_transport_type(transport_type) self.rest_client.set_transport_type(transport_type) def get_style(self, use_zapi_client=True): """Get the authorization style for communicating with the server.""" return self.get_client(use_zapi=use_zapi_client).get_style() def set_style(self, style): """Set the authorization style for communicating with the server. Supports basic_auth for now. Certificate_auth mode to be done. """ self.zapi_client.set_style(style) self.rest_client.set_style(style) def get_server_type(self, use_zapi_client=True): """Get the target server type.""" return self.get_client(use_zapi=use_zapi_client).get_server_type() def set_server_type(self, server_type): """Set the target server type. Supports filer and dfm server types. """ self.zapi_client.set_server_type(server_type) self.rest_client.set_server_type(server_type) def set_api_version(self, major, minor): """Set the API version.""" self.zapi_client.set_api_version(major, minor) self.rest_client.set_api_version(1, 0) def set_system_version(self, system_version): """Set the ONTAP system version.""" self.zapi_client.set_system_version(system_version) self.rest_client.set_system_version(system_version) def get_api_version(self, use_zapi_client=True): """Gets the API version tuple.""" return self.get_client(use_zapi=use_zapi_client).get_api_version() def get_system_version(self, use_zapi_client=True): """Gets the ONTAP system version.""" return self.get_client(use_zapi=use_zapi_client).get_system_version() def set_port(self, port): """Set the server communication port.""" self.zapi_client.set_port(port) self.rest_client.set_port(port) def get_port(self, use_zapi_client=True): """Get the server communication port.""" return self.get_client(use_zapi=use_zapi_client).get_port() def set_timeout(self, seconds): """Sets the timeout in seconds.""" self.zapi_client.set_timeout(seconds) self.rest_client.set_timeout(seconds) def get_timeout(self, use_zapi_client=True): """Gets the timeout in seconds if set.""" return self.get_client(use_zapi=use_zapi_client).get_timeout() def get_vfiler(self): """Get the vfiler to use in tunneling.""" return self.zapi_client.get_vfiler() def set_vfiler(self, vfiler): """Set the vfiler to use if tunneling gets enabled.""" self.zapi_client.set_vfiler(vfiler) def get_vserver(self, use_zapi_client=True): """Get the vserver to use in tunneling.""" return self.get_client(use_zapi=use_zapi_client).get_vserver() def set_vserver(self, vserver): """Set the vserver to use if tunneling gets enabled.""" self.zapi_client.set_vserver(vserver) self.rest_client.set_vserver(vserver) def set_username(self, username): """Set the user name for authentication.""" self.zapi_client.set_username(username) self.rest_client.set_username(username) def set_password(self, password): """Set the password for authentication.""" self.zapi_client.set_password(password) self.rest_client.set_password(password) def get_client(self, use_zapi=True): """Chooses the client to be used in the request.""" if use_zapi: return self.zapi_client return self.rest_client def invoke_successfully(self, na_element, api_args=None, enable_tunneling=False, use_zapi=True): """Invokes API and checks execution status as success. Need to set enable_tunneling to True explicitly to achieve it. This helps to use same connection instance to enable or disable tunneling. The vserver or vfiler should be set before this call otherwise tunneling remains disabled. """ return self.get_client(use_zapi=use_zapi).invoke_successfully( na_element, api_args=api_args, enable_tunneling=enable_tunneling) def __str__(self): return "server: %s" % (self._host) class NaElement(object): """Class wraps basic building block for NetApp API request.""" def __init__(self, name): """Name of the element or etree.Element.""" if isinstance(name, etree._Element): self._element = name else: self._element = etree.Element(name) def get_name(self): """Returns the tag name of the element.""" return self._element.tag def set_content(self, text): """Set the text string for the element.""" self._element.text = text def get_content(self): """Get the text for the element.""" return self._element.text def add_attr(self, name, value): """Add the attribute to the element.""" self._element.set(name, value) def add_attrs(self, **attrs): """Add multiple attributes to the element.""" for attr in attrs.keys(): self._element.set(attr, attrs.get(attr)) def add_child_elem(self, na_element): """Add the child element to the element.""" if isinstance(na_element, NaElement): self._element.append(na_element._element) return raise ValueError(_("Can only add elements of type NaElement.")) def get_child_by_name(self, name): """Get the child element by the tag name.""" for child in self._element.iterchildren(): if child.tag == name or etree.QName(child.tag).localname == name: return NaElement(child) return None def get_child_content(self, name): """Get the content of the child.""" for child in self._element.iterchildren(): if child.tag == name or etree.QName(child.tag).localname == name: return child.text return None def get_children(self): """Get the children for the element.""" return [NaElement(el) for el in self._element.iterchildren()] def has_attr(self, name): """Checks whether element has attribute.""" attributes = self._element.attrib or {} return name in attributes.keys() def get_attr(self, name): """Get the attribute with the given name.""" attributes = self._element.attrib or {} return attributes.get(name) def get_attr_names(self): """Returns the list of attribute names.""" attributes = self._element.attrib or {} return attributes.keys() def add_new_child(self, name, content, convert=False): """Add child with tag name and context. Convert replaces entity refs to chars. """ child = NaElement(name) if convert: content = NaElement._convert_entity_refs(content) child.set_content(content) self.add_child_elem(child) @staticmethod def _convert_entity_refs(text): """Converts entity refs to chars to handle etree auto conversions.""" text = text.replace("<", "<") text = text.replace(">", ">") return text @staticmethod def create_node_with_children(node, **children): """Creates and returns named node with children.""" parent = NaElement(node) for child in children.keys(): parent.add_new_child(child, children.get(child, None)) return parent def add_node_with_children(self, node, **children): """Creates named node with children.""" parent = NaElement.create_node_with_children(node, **children) self.add_child_elem(parent) def to_string(self, pretty=False, method='xml', encoding='UTF-8'): """Prints the element to string.""" return etree.tostring(self._element, method=method, encoding=encoding, pretty_print=pretty) def __getitem__(self, key): """Dict getter method for NaElement. Returns NaElement list if present, text value in case no NaElement node children or attribute value if present. """ child = self.get_child_by_name(key) if child: if child.get_children(): return child else: return child.get_content() elif self.has_attr(key): return self.get_attr(key) raise KeyError(_('No element by given name %s.') % (key)) def __setitem__(self, key, value): """Dict setter method for NaElement. Accepts dict, list, tuple, str, int, float and long as valid value. """ if key: if value: if isinstance(value, NaElement): child = NaElement(key) child.add_child_elem(value) self.add_child_elem(child) elif isinstance( value, (str, ) + (int, ) + (float, )): self.add_new_child(key, str(value)) elif isinstance(value, (list, tuple, dict)): child = NaElement(key) child.translate_struct(value) self.add_child_elem(child) else: raise TypeError(_('Not a valid value for NaElement.')) else: self.add_child_elem(NaElement(key)) else: raise KeyError(_('NaElement name cannot be null.')) def translate_struct(self, data_struct): """Convert list, tuple, dict to NaElement and appends. Example usage: 1. vl1 vl2 vl3 The above can be achieved by doing root = NaElement('root') root.translate_struct({'elem1': 'vl1', 'elem2': 'vl2', 'elem3': 'vl3'}) 2. vl1 vl2 vl3 The above can be achieved by doing root = NaElement('root') root.translate_struct([{'elem1': 'vl1', 'elem2': 'vl2'}, {'elem1': 'vl3'}]) """ if isinstance(data_struct, (list, tuple)): for el in data_struct: if isinstance(el, (list, tuple, dict)): self.translate_struct(el) else: self.add_child_elem(NaElement(el)) elif isinstance(data_struct, dict): for k in data_struct.keys(): child = NaElement(k) if isinstance(data_struct[k], (dict, list, tuple)): child.translate_struct(data_struct[k]) else: if data_struct[k]: child.set_content(str(data_struct[k])) self.add_child_elem(child) else: raise ValueError(_('Type cannot be converted into NaElement.')) class NaApiError(Exception): """Base exception class for NetApp API errors.""" def __init__(self, code='unknown', message='unknown'): self.code = code self.message = message def __str__(self, *args, **kwargs): return 'NetApp API failed. Reason - %s:%s' % (self.code, self.message) def invoke_api(na_server, api_name, api_family='cm', query=None, des_result=None, additional_elems=None, is_iter=False, records=0, tag=None, timeout=0, tunnel=None): """Invokes any given API call to a NetApp server. :param na_server: na_server instance :param api_name: API name string :param api_family: cm or 7m :param query: API query as dict :param des_result: desired result as dict :param additional_elems: dict other than query and des_result :param is_iter: is iterator API :param records: limit for records, 0 for infinite :param timeout: timeout seconds :param tunnel: tunnel entity, vserver or vfiler name """ record_step = 50 if not (na_server or isinstance(na_server, NaServer)): msg = _("Requires an NaServer instance.") raise exception.InvalidInput(reason=msg) server = copy.copy(na_server) if api_family == 'cm': server.set_vserver(tunnel) else: server.set_vfiler(tunnel) if timeout > 0: server.set_timeout(timeout) iter_records = 0 cond = True while cond: na_element = create_api_request( api_name, query, des_result, additional_elems, is_iter, record_step, tag) result = server.invoke_successfully(na_element, True) if is_iter: if records > 0: iter_records = iter_records + record_step if iter_records >= records: cond = False tag_el = result.get_child_by_name('next-tag') tag = tag_el.get_content() if tag_el else None if not tag: cond = False else: cond = False yield result def create_api_request(api_name, query=None, des_result=None, additional_elems=None, is_iter=False, record_step=50, tag=None): """Creates a NetApp API request. :param api_name: API name string :param query: API query as dict :param des_result: desired result as dict :param additional_elems: dict other than query and des_result :param is_iter: is iterator API :param record_step: records at a time for iter API :param tag: next tag for iter API """ api_el = NaElement(api_name) if query: query_el = NaElement('query') query_el.translate_struct(query) api_el.add_child_elem(query_el) if des_result: res_el = NaElement('desired-attributes') res_el.translate_struct(des_result) api_el.add_child_elem(res_el) if additional_elems: api_el.translate_struct(additional_elems) if is_iter: api_el.add_new_child('max-records', str(record_step)) if tag: api_el.add_new_child('tag', tag, True) return api_el ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/netapp/dataontap/client/client_base.py0000664000175000017500000001121700000000000026443 0ustar00zuulzuul00000000000000# Copyright (c) 2014 Alex Meade. All rights reserved. # Copyright (c) 2014 Clinton Knight. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log from oslo_utils import excutils from manila.share.drivers.netapp.dataontap.client import api as netapp_api from manila.share.drivers.netapp import utils as na_utils LOG = log.getLogger(__name__) class NetAppBaseClient(object): def __init__(self, **kwargs): self.connection = netapp_api.NaServer( host=kwargs['hostname'], transport_type=kwargs['transport_type'], ssl_cert_path=kwargs['ssl_cert_path'], port=kwargs['port'], username=kwargs['username'], password=kwargs['password'], trace=kwargs.get('trace', False), api_trace_pattern=kwargs.get('api_trace_pattern', na_utils.API_TRACE_PATTERN), private_key_file=kwargs['private_key_file'], certificate_file=kwargs['certificate_file'], ca_certificate_file=kwargs['ca_certificate_file'], certificate_host_validation=kwargs['certificate_host_validation']) def get_ontapi_version(self, cached=True): """Gets the supported ontapi version.""" if cached: return self.connection.get_api_version() result = self.send_request('system-get-ontapi-version', enable_tunneling=False) major = result.get_child_content('major-version') minor = result.get_child_content('minor-version') return major, minor @na_utils.trace def get_system_version(self, cached=True): """Gets the current Data ONTAP version.""" if cached: return self.connection.get_system_version() result = self.send_request('system-get-version', enable_tunneling=False) version_tuple = result.get_child_by_name( 'version-tuple') or netapp_api.NaElement('none') system_version_tuple = version_tuple.get_child_by_name( 'system-version-tuple') or netapp_api.NaElement('none') version = {} version['version'] = result.get_child_content('version') version['version-tuple'] = ( int(system_version_tuple.get_child_content('generation')), int(system_version_tuple.get_child_content('major')), int(system_version_tuple.get_child_content('minor'))) return version def _init_features(self): """Set up the repository of available Data ONTAP features.""" self.features = Features() def _strip_xml_namespace(self, string): if string.startswith('{') and '}' in string: return string.split('}', 1)[1] return string def send_request(self, api_name, api_args=None, enable_tunneling=True, use_zapi=True): """Sends request to Ontapi.""" request = netapp_api.NaElement(api_name) return self.connection.invoke_successfully( request, api_args=api_args, enable_tunneling=enable_tunneling, use_zapi=use_zapi) @na_utils.trace def get_licenses(self): try: result = self.send_request('license-v2-list-info') except netapp_api.NaApiError: with excutils.save_and_reraise_exception(): LOG.exception("Could not get licenses list.") return sorted( [child.get_child_content('package').lower() for child in result.get_child_by_name('licenses').get_children()]) def send_ems_log_message(self, message_dict): """Sends a message to the Data ONTAP EMS log.""" raise NotImplementedError() class Features(object): def __init__(self): self.defined_features = set() def add_feature(self, name, supported=True): if not isinstance(supported, bool): raise TypeError("Feature value must be a bool type.") self.defined_features.add(name) setattr(self, name, supported) def __getattr__(self, name): # NOTE(cknight): Needed to keep pylint happy. raise AttributeError ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/netapp/dataontap/client/client_cmode.py0000664000175000017500000102141000000000000026615 0ustar00zuulzuul00000000000000# Copyright (c) 2014 Alex Meade. All rights reserved. # Copyright (c) 2015 Clinton Knight. All rights reserved. # Copyright (c) 2015 Tom Barron. All rights reserved. # Copyright (c) 2018 Jose Porrua. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import hashlib import re import time from oslo_log import log from oslo_utils import excutils from oslo_utils import strutils from oslo_utils import units from oslo_utils import uuidutils from manila import exception from manila.i18n import _ from manila.share.drivers.netapp.dataontap.client import api as netapp_api from manila.share.drivers.netapp.dataontap.client import client_base from manila.share.drivers.netapp import utils as na_utils from manila import utils as manila_utils LOG = log.getLogger(__name__) DELETED_PREFIX = 'deleted_manila_' DEFAULT_IPSPACE = 'Default' IPSPACE_PREFIX = 'ipspace_' CLUSTER_IPSPACES = ('Cluster', DEFAULT_IPSPACE) DEFAULT_BROADCAST_DOMAIN = 'Default' BROADCAST_DOMAIN_PREFIX = 'domain_' DEFAULT_MAX_PAGE_LENGTH = 50 CUTOVER_ACTION_MAP = { 'defer': 'defer_on_failure', 'abort': 'abort_on_failure', 'force': 'force', 'wait': 'wait', } class NetAppCmodeClient(client_base.NetAppBaseClient): def __init__(self, **kwargs): super(NetAppCmodeClient, self).__init__(**kwargs) self.vserver = kwargs.get('vserver') self.connection.set_vserver(self.vserver) # Default values to run first api. self.connection.set_api_version(1, 15) (major, minor) = self.get_ontapi_version(cached=False) self.connection.set_api_version(major, minor) system_version = self.get_system_version(cached=False) self.connection.set_system_version(system_version) self._init_features() def _init_features(self): """Initialize cDOT feature support map.""" super(NetAppCmodeClient, self)._init_features() ontapi_version = self.get_ontapi_version(cached=True) ontapi_1_20 = ontapi_version >= (1, 20) ontapi_1_2x = (1, 20) <= ontapi_version < (1, 30) ontapi_1_30 = ontapi_version >= (1, 30) ontapi_1_100 = ontapi_version >= (1, 100) ontapi_1_110 = ontapi_version >= (1, 110) ontapi_1_120 = ontapi_version >= (1, 120) ontapi_1_140 = ontapi_version >= (1, 140) ontapi_1_150 = ontapi_version >= (1, 150) ontapi_1_180 = ontapi_version >= (1, 180) ontapi_1_191 = ontapi_version >= (1, 191) ontap_9_10 = self.get_system_version()['version-tuple'] >= (9, 10, 0) ontap_9_10_1 = self.get_system_version()['version-tuple'] >= (9, 10, 1) ontap_9_11_1 = self.get_system_version()['version-tuple'] >= (9, 11, 1) ontap_9_12_1 = self.get_system_version()['version-tuple'] >= (9, 12, 1) self.features.add_feature('SNAPMIRROR_V2', supported=ontapi_1_20) self.features.add_feature('SYSTEM_METRICS', supported=ontapi_1_2x) self.features.add_feature('SYSTEM_CONSTITUENT_METRICS', supported=ontapi_1_30) self.features.add_feature('BROADCAST_DOMAINS', supported=ontapi_1_30) self.features.add_feature('IPSPACES', supported=ontapi_1_30) self.features.add_feature('SUBNETS', supported=ontapi_1_30) self.features.add_feature('CLUSTER_PEER_POLICY', supported=ontapi_1_30) self.features.add_feature('ADVANCED_DISK_PARTITIONING', supported=ontapi_1_30) self.features.add_feature('KERBEROS_VSERVER', supported=ontapi_1_30) self.features.add_feature('FLEXVOL_ENCRYPTION', supported=ontapi_1_110) self.features.add_feature('SVM_DR', supported=ontapi_1_140) self.features.add_feature('ADAPTIVE_QOS', supported=ontapi_1_140) self.features.add_feature('TRANSFER_LIMIT_NFS_CONFIG', supported=ontapi_1_140) self.features.add_feature('CIFS_DC_ADD_SKIP_CHECK', supported=ontapi_1_150) self.features.add_feature('LDAP_LDAP_SERVERS', supported=ontapi_1_120) self.features.add_feature('FLEXGROUP', supported=ontapi_1_180) self.features.add_feature('FLEXGROUP_FAN_OUT', supported=ontapi_1_191) self.features.add_feature('SVM_MIGRATE', supported=ontap_9_10) self.features.add_feature('SNAPLOCK', supported=ontapi_1_100) self.features.add_feature('UNIFIED_AGGR', supported=ontap_9_10_1) self.features.add_feature('DELETE_RETENTION_HOURS', supported=ontap_9_11_1) self.features.add_feature('AES_ENCRYPTION_TYPES', supported=ontap_9_12_1) def _invoke_vserver_api(self, na_element, vserver): server = copy.copy(self.connection) server.set_vserver(vserver) result = server.invoke_successfully(na_element, True) return result def _has_records(self, api_result_element): if (not api_result_element.get_child_content('num-records') or api_result_element.get_child_content('num-records') == '0'): return False else: return True def _get_record_count(self, api_result_element): try: return int(api_result_element.get_child_content('num-records')) except TypeError: msg = _('Missing record count for NetApp iterator API invocation.') raise exception.NetAppException(msg) def set_vserver(self, vserver): self.vserver = vserver self.connection.set_vserver(vserver) def send_iter_request(self, api_name, api_args=None, max_page_length=DEFAULT_MAX_PAGE_LENGTH, enable_tunneling=True): """Invoke an iterator-style getter API.""" if not api_args: api_args = {} api_args['max-records'] = max_page_length # Get first page result = self.send_request(api_name, api_args, enable_tunneling=enable_tunneling) # Most commonly, we can just return here if there is no more data next_tag = result.get_child_content('next-tag') if not next_tag: return result # Ensure pagination data is valid and prepare to store remaining pages num_records = self._get_record_count(result) attributes_list = result.get_child_by_name('attributes-list') if not attributes_list: msg = _('Missing attributes list for API %s.') % api_name raise exception.NetAppException(msg) # Get remaining pages, saving data into first page while next_tag is not None: next_api_args = copy.deepcopy(api_args) next_api_args['tag'] = next_tag next_result = self.send_request(api_name, next_api_args, enable_tunneling=enable_tunneling) next_attributes_list = next_result.get_child_by_name( 'attributes-list') or netapp_api.NaElement('none') for record in next_attributes_list.get_children(): attributes_list.add_child_elem(record) num_records += self._get_record_count(next_result) next_tag = next_result.get_child_content('next-tag') result.get_child_by_name('num-records').set_content( str(num_records)) result.get_child_by_name('next-tag').set_content('') return result @na_utils.trace def create_vserver(self, vserver_name, root_volume_aggregate_name, root_volume_name, aggregate_names, ipspace_name, security_cert_expire_days, delete_retention_hours, logical_space_reporting): """Creates new vserver and assigns aggregates.""" self._create_vserver( vserver_name, aggregate_names, ipspace_name, delete_retention_hours, root_volume_name=root_volume_name, root_volume_aggregate_name=root_volume_aggregate_name, root_volume_security_style='unix', name_server_switch='file', logical_space_reporting=logical_space_reporting) self._modify_security_cert(vserver_name, security_cert_expire_days) @na_utils.trace def create_vserver_dp_destination(self, vserver_name, aggregate_names, ipspace_name, delete_retention_hours, logical_space_reporting): """Creates new 'dp_destination' vserver and assigns aggregates.""" self._create_vserver( vserver_name, aggregate_names, ipspace_name, delete_retention_hours, subtype='dp_destination', logical_space_reporting=logical_space_reporting) @na_utils.trace def _create_vserver(self, vserver_name, aggregate_names, ipspace_name, delete_retention_hours, root_volume_name=None, root_volume_aggregate_name=None, root_volume_security_style=None, name_server_switch=None, subtype=None, logical_space_reporting=False): """Creates new vserver and assigns aggregates.""" create_args = { 'vserver-name': vserver_name, } if root_volume_name: create_args['root-volume'] = root_volume_name if root_volume_aggregate_name: create_args['root-volume-aggregate'] = root_volume_aggregate_name if root_volume_security_style: create_args['root-volume-security-style'] = ( root_volume_security_style) if name_server_switch: create_args['name-server-switch'] = { 'nsswitch': name_server_switch} if subtype: create_args['vserver-subtype'] = subtype if ipspace_name: if not self.features.IPSPACES: msg = 'IPSpaces are not supported on this backend.' raise exception.NetAppException(msg) else: create_args['ipspace'] = ipspace_name create_args['is-space-reporting-logical'] = ( 'true' if logical_space_reporting else 'false') create_args['is-space-enforcement-logical'] = ( 'true' if logical_space_reporting else 'false') LOG.debug('Creating Vserver %(vserver)s with create args ' '%(args)s', {'vserver': vserver_name, 'args': create_args}) self.send_request('vserver-create', create_args) aggr_list = [{'aggr-name': aggr_name} for aggr_name in aggregate_names] modify_args = { 'aggr-list': aggr_list, 'vserver-name': vserver_name, } if (delete_retention_hours != 0 and self.features.DELETE_RETENTION_HOURS): modify_args.update( {'volume-delete-retention-hours': delete_retention_hours}) self.send_request('vserver-modify', modify_args) @na_utils.trace def _modify_security_cert(self, vserver_name, security_cert_expire_days): """Create new security certificate with given expire days.""" # Do not modify security certificate if specified expire days are # equal to default security certificate expire days i.e. 365. if security_cert_expire_days == 365: return api_args = { 'query': { 'certificate-info': { 'vserver': vserver_name, 'common-name': vserver_name, 'certificate-authority': vserver_name, 'type': 'server', }, }, 'desired-attributes': { 'certificate-info': { 'serial-number': None, }, }, } result = self.send_iter_request('security-certificate-get-iter', api_args) try: old_certificate_info_list = result.get_child_by_name( 'attributes-list') except AttributeError: LOG.warning('Could not retrieve certificate-info for vserver ' '%(server)s.', {'server': vserver_name}) return old_serial_nums = [] for certificate_info in old_certificate_info_list.get_children(): serial_num = certificate_info.get_child_content('serial-number') old_serial_nums.append(serial_num) try: create_args = { 'vserver': vserver_name, 'common-name': vserver_name, 'type': 'server', 'expire-days': security_cert_expire_days, } self.send_request('security-certificate-create', create_args) except netapp_api.NaApiError as e: LOG.warning("Failed to create new security certificate: %s - %s", e.code, e.message) return api_args = { 'query': { 'certificate-info': { 'vserver': vserver_name, 'common-name': vserver_name, 'certificate-authority': vserver_name, 'type': 'server', }, }, 'desired-attributes': { 'certificate-info': { 'serial-number': None, }, }, } result = self.send_iter_request('security-certificate-get-iter', api_args) try: new_certificate_info_list = result.get_child_by_name( 'attributes-list') except AttributeError: LOG.warning('Could not retrieve certificate-info for vserver ' '%(server)s.', {'server': vserver_name}) return for certificate_info in new_certificate_info_list.get_children(): serial_num = certificate_info.get_child_content('serial-number') if serial_num not in old_serial_nums: try: ssl_modify_args = { 'certificate-authority': vserver_name, 'common-name': vserver_name, 'certificate-serial-number': serial_num, 'vserver': vserver_name, 'client-authentication-enabled': 'false', 'server-authentication-enabled': 'true', } self.send_request('security-ssl-modify', ssl_modify_args) except netapp_api.NaApiError as e: LOG.debug('Failed to modify SSL for security certificate ' 'with serial number %s: %s - %s', serial_num, e.code, e.message) # Delete all old security certificates for certificate_info in old_certificate_info_list.get_children(): serial_num = certificate_info.get_child_content('serial-number') delete_args = { 'certificate-authority': vserver_name, 'common-name': vserver_name, 'serial-number': serial_num, 'type': 'server', 'vserver': vserver_name, } try: self.send_request('security-certificate-delete', delete_args) except netapp_api.NaApiError as e: LOG.warning('Failed to delete security certificate with ' 'serial number %s: %s - %s', serial_num, e.code, e.message) @na_utils.trace def get_vserver_info(self, vserver_name): """Retrieves Vserver info.""" LOG.debug('Retrieving Vserver %s information.', vserver_name) api_args = { 'query': { 'vserver-info': { 'vserver-name': vserver_name, }, }, 'desired-attributes': { 'vserver-info': { 'vserver-name': None, 'vserver-subtype': None, 'state': None, 'operational-state': None, }, }, } result = self.send_iter_request('vserver-get-iter', api_args) if not self._has_records(result): return try: vserver_info = result.get_child_by_name( 'attributes-list').get_child_by_name( 'vserver-info') vserver_subtype = vserver_info.get_child_content( 'vserver-subtype') vserver_op_state = vserver_info.get_child_content( 'operational-state') vserver_state = vserver_info.get_child_content('state') except AttributeError: msg = _('Could not retrieve vserver-info for %s.') % vserver_name raise exception.NetAppException(msg) vserver_info = { 'name': vserver_name, 'subtype': vserver_subtype, 'operational_state': vserver_op_state, 'state': vserver_state, } return vserver_info @na_utils.trace def vserver_exists(self, vserver_name): """Checks if Vserver exists.""" LOG.debug('Checking if Vserver %s exists', vserver_name) api_args = { 'query': { 'vserver-info': { 'vserver-name': vserver_name, }, }, 'desired-attributes': { 'vserver-info': { 'vserver-name': None, }, }, } try: result = self.send_iter_request('vserver-get-iter', api_args, enable_tunneling=False) except netapp_api.NaApiError as e: if e.code == netapp_api.EVSERVERNOTFOUND: return False else: raise return self._has_records(result) @na_utils.trace def get_vserver_root_volume_name(self, vserver_name): """Get the root volume name of the vserver.""" api_args = { 'query': { 'vserver-info': { 'vserver-name': vserver_name, }, }, 'desired-attributes': { 'vserver-info': { 'root-volume': None, }, }, } vserver_info = self.send_iter_request('vserver-get-iter', api_args) try: root_volume_name = vserver_info.get_child_by_name( 'attributes-list').get_child_by_name( 'vserver-info').get_child_content('root-volume') except AttributeError: msg = _('Could not determine root volume name ' 'for Vserver %s.') % vserver_name raise exception.NetAppException(msg) return root_volume_name @na_utils.trace def get_vserver_ipspace(self, vserver_name): """Get the IPspace of the vserver, or None if not supported.""" if not self.features.IPSPACES: return None api_args = { 'query': { 'vserver-info': { 'vserver-name': vserver_name, }, }, 'desired-attributes': { 'vserver-info': { 'ipspace': None, }, }, } vserver_info = self.send_iter_request('vserver-get-iter', api_args) try: ipspace = vserver_info.get_child_by_name( 'attributes-list').get_child_by_name( 'vserver-info').get_child_content('ipspace') except AttributeError: msg = _('Could not determine IPspace for Vserver %s.') raise exception.NetAppException(msg % vserver_name) return ipspace @na_utils.trace def ipspace_has_data_vservers(self, ipspace_name): """Check whether an IPspace has any data Vservers assigned to it.""" if not self.features.IPSPACES: return False api_args = { 'query': { 'vserver-info': { 'ipspace': ipspace_name, 'vserver-type': 'data' }, }, 'desired-attributes': { 'vserver-info': { 'vserver-name': None, }, }, } result = self.send_iter_request('vserver-get-iter', api_args) return self._has_records(result) @na_utils.trace def list_vservers(self, vserver_type='data'): """Get the names of vservers present, optionally filtered by type.""" query = { 'vserver-info': { 'vserver-type': vserver_type, } } if vserver_type else None api_args = { 'desired-attributes': { 'vserver-info': { 'vserver-name': None, }, }, } if query: api_args['query'] = query result = self.send_iter_request('vserver-get-iter', api_args) vserver_info_list = result.get_child_by_name( 'attributes-list') or netapp_api.NaElement('none') return [vserver_info.get_child_content('vserver-name') for vserver_info in vserver_info_list.get_children()] @na_utils.trace def get_vserver_volume_count(self): """Get the number of volumes present on a cluster or vserver. Call this on a vserver client to see how many volumes exist on that vserver. """ api_args = { 'desired-attributes': { 'volume-attributes': { 'volume-id-attributes': { 'name': None, }, }, }, } volumes_data = self.send_iter_request('volume-get-iter', api_args) return self._get_record_count(volumes_data) @na_utils.trace def delete_vserver(self, vserver_name, vserver_client, security_services=None): """Deletes a Vserver. Checks if Vserver exists and does not have active shares. Offlines and destroys root volumes. Deletes Vserver. """ vserver_info = self.get_vserver_info(vserver_name) if vserver_info is None: LOG.error("Vserver %s does not exist.", vserver_name) return is_dp_destination = vserver_info.get('subtype') == 'dp_destination' root_volume_name = self.get_vserver_root_volume_name(vserver_name) volumes_count = vserver_client.get_vserver_volume_count() # NOTE(dviroel): 'dp_destination' vservers don't allow to delete its # root volume. We can just call vserver-destroy directly. if volumes_count == 1 and not is_dp_destination: try: vserver_client.offline_volume(root_volume_name) except netapp_api.NaApiError as e: if e.code == netapp_api.EVOLUMEOFFLINE: LOG.error("Volume %s is already offline.", root_volume_name) else: raise vserver_client.delete_volume(root_volume_name) elif volumes_count > 1: msg = _("Cannot delete Vserver. Vserver %s has shares.") raise exception.NetAppException(msg % vserver_name) if security_services and not is_dp_destination: self._terminate_vserver_services(vserver_name, vserver_client, security_services) self.send_request('vserver-destroy', {'vserver-name': vserver_name}) @na_utils.trace def _terminate_vserver_services(self, vserver_name, vserver_client, security_services): for service in security_services: if service['type'].lower() == 'active_directory': api_args = { 'admin-password': service['password'], 'admin-username': service['user'], } try: vserver_client.send_request('cifs-server-delete', api_args) except netapp_api.NaApiError as e: if e.code == netapp_api.EOBJECTNOTFOUND: LOG.error('CIFS server does not exist for ' 'Vserver %s.', vserver_name) else: LOG.debug('Retrying CIFS server delete with force flag' ' for Vserver %s.', vserver_name) api_args = { 'force-account-delete': 'true' } vserver_client.send_request('cifs-server-delete', api_args) elif service['type'].lower() == 'kerberos': vserver_client.disable_kerberos(service) @na_utils.trace def is_nve_supported(self): """Determine whether NVE is supported on this platform and version.""" nodes = self.list_cluster_nodes() system_version = self.get_system_version() version = system_version.get('version') version_tuple = system_version.get('version-tuple') # NVE requires an ONTAP version >= 9.1. Also, not all platforms # support this feature. NVE is not supported if the version # includes the substring '<1no-DARE>' (no Data At Rest Encryption). if version_tuple >= (9, 1, 0) and "<1no-DARE>" not in version: if nodes is not None: return self.get_security_key_manager_nve_support(nodes[0]) else: LOG.debug('Cluster credentials are required in order to ' 'determine whether NetApp Volume Encryption is ' 'supported or not on this platform.') return False else: LOG.debug('NetApp Volume Encryption is not supported on this ' 'ONTAP version: %(version)s, %(version_tuple)s. ', {'version': version, 'version_tuple': version_tuple}) return False @na_utils.trace def list_cluster_nodes(self): """Get all available cluster nodes.""" api_args = { 'desired-attributes': { 'node-details-info': { 'node': None, }, }, } result = self.send_iter_request('system-node-get-iter', api_args) nodes_info_list = result.get_child_by_name( 'attributes-list') or netapp_api.NaElement('none') return [node_info.get_child_content('node') for node_info in nodes_info_list.get_children()] @na_utils.trace def get_security_key_manager_nve_support(self, node): """Determine whether the cluster platform supports Volume Encryption""" api_args = {'node': node} try: result = self.send_request( 'security-key-manager-volume-encryption-supported', api_args) vol_encryption_supported = result.get_child_content( 'vol-encryption-supported') or 'false' except netapp_api.NaApiError as e: LOG.debug("NVE disabled due to error code: %s - %s", e.code, e.message) return False return strutils.bool_from_string(vol_encryption_supported) @na_utils.trace def list_node_data_ports(self, node): ports = self.get_node_data_ports(node) return [port.get('port') for port in ports] @na_utils.trace def get_node_data_ports(self, node): """Get applicable data ports on the node.""" api_args = { 'query': { 'net-port-info': { 'node': node, 'link-status': 'up', 'port-type': 'physical|if_group', 'role': 'data', }, }, 'desired-attributes': { 'net-port-info': { 'port': None, 'node': None, 'operational-speed': None, 'ifgrp-port': None, }, }, } result = self.send_iter_request('net-port-get-iter', api_args) net_port_info_list = result.get_child_by_name( 'attributes-list') or netapp_api.NaElement('none') ports = [] for port_info in net_port_info_list.get_children(): # Skip physical ports that are part of interface groups. if port_info.get_child_content('ifgrp-port'): continue port = { 'node': port_info.get_child_content('node'), 'port': port_info.get_child_content('port'), 'speed': port_info.get_child_content('operational-speed'), } ports.append(port) return self._sort_data_ports_by_speed(ports) @na_utils.trace def _sort_data_ports_by_speed(self, ports): def sort_key(port): value = port.get('speed') if not (value and isinstance(value, str)): return 0 elif value.isdigit(): return int(value) elif value == 'auto': return 3 elif value == 'undef': return 2 else: return 1 return sorted(ports, key=sort_key, reverse=True) @na_utils.trace def list_root_aggregates(self): """Get names of all aggregates that contain node root volumes.""" desired_attributes = { 'aggr-attributes': { 'aggregate-name': None, 'aggr-raid-attributes': { 'has-local-root': None, 'has-partner-root': None, }, }, } aggrs = self._get_aggregates(desired_attributes=desired_attributes) root_aggregates = [] for aggr in aggrs: aggr_name = aggr.get_child_content('aggregate-name') aggr_raid_attrs = aggr.get_child_by_name('aggr-raid-attributes') local_root = strutils.bool_from_string( aggr_raid_attrs.get_child_content('has-local-root')) partner_root = strutils.bool_from_string( aggr_raid_attrs.get_child_content('has-partner-root')) if local_root or partner_root: root_aggregates.append(aggr_name) return root_aggregates @na_utils.trace def list_non_root_aggregates(self): """Get names of all aggregates that don't contain node root volumes.""" query = { 'aggr-attributes': { 'aggr-raid-attributes': { 'has-local-root': 'false', 'has-partner-root': 'false', } }, } return self._list_aggregates(query=query) @na_utils.trace def _list_aggregates(self, query=None): """Get names of all aggregates.""" try: api_args = { 'desired-attributes': { 'aggr-attributes': { 'aggregate-name': None, }, }, } if query: api_args['query'] = query result = self.send_iter_request('aggr-get-iter', api_args) aggr_list = result.get_child_by_name( 'attributes-list').get_children() except AttributeError: msg = _("Could not list aggregates.") raise exception.NetAppException(msg) return [aggr.get_child_content('aggregate-name') for aggr in aggr_list] @na_utils.trace def list_vserver_aggregates(self): """Returns a list of aggregates available to a vserver. This must be called against a Vserver LIF. """ return list(self.get_vserver_aggregate_capacities().keys()) @na_utils.trace def create_port_and_broadcast_domain(self, node, port, vlan, mtu, ipspace): home_port_name = port if vlan: self._create_vlan(node, port, vlan) home_port_name = '%(port)s-%(tag)s' % {'port': port, 'tag': vlan} if self.features.BROADCAST_DOMAINS: self._ensure_broadcast_domain_for_port( node, home_port_name, mtu, ipspace=ipspace) return home_port_name @na_utils.trace def create_network_interface(self, ip, netmask, node, port, vserver_name, lif_name): """Creates LIF on VLAN port.""" LOG.debug('Creating LIF %(lif)s for Vserver %(vserver)s ' 'node/port %(node)s:%(port)s.', {'lif': lif_name, 'vserver': vserver_name, 'node': node, 'port': port}) api_args = { 'address': ip, 'administrative-status': 'up', 'data-protocols': [ {'data-protocol': 'nfs'}, {'data-protocol': 'cifs'}, ], 'home-node': node, 'home-port': port, 'netmask': netmask, 'interface-name': lif_name, 'role': 'data', 'vserver': vserver_name, } self.send_request('net-interface-create', api_args) @na_utils.trace def _create_vlan(self, node, port, vlan): try: api_args = { 'vlan-info': { 'parent-interface': port, 'node': node, 'vlanid': vlan, }, } self.send_request('net-vlan-create', api_args) except netapp_api.NaApiError as e: if e.code == netapp_api.EDUPLICATEENTRY: LOG.debug('VLAN %(vlan)s already exists on port %(port)s', {'vlan': vlan, 'port': port}) else: msg = _('Failed to create VLAN %(vlan)s on ' 'port %(port)s. %(err_msg)s') msg_args = {'vlan': vlan, 'port': port, 'err_msg': e.message} raise exception.NetAppException(msg % msg_args) @na_utils.trace def delete_vlan(self, node, port, vlan): try: api_args = { 'vlan-info': { 'parent-interface': port, 'node': node, 'vlanid': vlan, }, } self.send_request('net-vlan-delete', api_args) except netapp_api.NaApiError as e: p = re.compile('port already has a lif bound.*', re.IGNORECASE) if (e.code == netapp_api.EAPIERROR and re.match(p, e.message)): LOG.debug('VLAN %(vlan)s on port %(port)s node %(node)s ' 'still used by LIF and cannot be deleted.', {'vlan': vlan, 'port': port, 'node': node}) else: msg = _('Failed to delete VLAN %(vlan)s on ' 'port %(port)s node %(node)s: %(err_msg)s') msg_args = { 'vlan': vlan, 'port': port, 'node': node, 'err_msg': e.message } raise exception.NetAppException(msg % msg_args) @na_utils.trace def get_degraded_ports(self, broadcast_domains, ipspace): """Get degraded ports for broadcast domains and an ipspace.""" valid_domains = self._get_valid_broadcast_domains(broadcast_domains) api_args = { 'query': { 'net-port-info': { 'broadcast-domain': '|'.join(valid_domains), 'health-degraded-reasons': { 'netport-degraded-reason': 'l2_reachability' }, 'health-status': 'degraded', 'ipspace': ipspace, 'port-type': 'vlan', }, }, 'desired-attributes': { 'net-port-info': { 'port': None, 'node': None, }, }, } result = self.send_iter_request('net-port-get-iter', api_args) net_port_info_list = result.get_child_by_name( 'attributes-list') or netapp_api.NaElement('none') ports = [] for port_info in net_port_info_list.get_children(): # making it a net-qualified-port-name # compatible with ports result from net-ipspaces-get-iter ports.append(f"{port_info.get_child_content('node')}:" f"{port_info.get_child_content('port')}") return ports @na_utils.trace def _get_valid_broadcast_domains(_self, broadcast_domains): valid_domains = [] for broadcast_domain in broadcast_domains: if ( broadcast_domain == 'OpenStack' or broadcast_domain == DEFAULT_BROADCAST_DOMAIN or broadcast_domain.startswith(BROADCAST_DOMAIN_PREFIX) ): valid_domains.append(broadcast_domain) return valid_domains @na_utils.trace def create_route(self, gateway, destination=None): if not gateway: return if not destination: if ':' in gateway: destination = '::/0' else: destination = '0.0.0.0/0' try: api_args = { 'destination': destination, 'gateway': gateway, 'return-record': 'true', } self.send_request('net-routes-create', api_args) except netapp_api.NaApiError as e: p = re.compile('.*Duplicate route exists.*', re.IGNORECASE) if (e.code == netapp_api.EAPIERROR and re.match(p, e.message)): LOG.debug('Route to %(destination)s via gateway %(gateway)s ' 'exists.', {'destination': destination, 'gateway': gateway}) else: msg = _('Failed to create a route to %(destination)s via ' 'gateway %(gateway)s: %(err_msg)s') msg_args = { 'destination': destination, 'gateway': gateway, 'err_msg': e.message, } raise exception.NetAppException(msg % msg_args) @na_utils.trace def _ensure_broadcast_domain_for_port(self, node, port, mtu, ipspace=DEFAULT_IPSPACE): """Ensure a port is in a broadcast domain. Create one if necessary. If the IPspace:domain pair match for the given port, which commonly happens in multi-node clusters, then there isn't anything to do. Otherwise, we can assume the IPspace is correct and extant by this point, so the remaining task is to remove the port from any domain it is already in, create the domain for the IPspace if it doesn't exist, and add the port to this domain. """ # Derive the broadcast domain name from the IPspace name since they # need to be 1-1 and the default for both is the same name, 'Default'. domain = re.sub(IPSPACE_PREFIX, BROADCAST_DOMAIN_PREFIX, ipspace) port_info = self._get_broadcast_domain_for_port(node, port) # Port already in desired ipspace and broadcast domain. if (port_info['ipspace'] == ipspace and port_info['broadcast-domain'] == domain): self._modify_broadcast_domain(domain, ipspace, mtu) return # If in another broadcast domain, remove port from it. if port_info['broadcast-domain']: self._remove_port_from_broadcast_domain( node, port, port_info['broadcast-domain'], port_info['ipspace']) # If desired broadcast domain doesn't exist, create it. if not self._broadcast_domain_exists(domain, ipspace): self._create_broadcast_domain(domain, ipspace, mtu) else: self._modify_broadcast_domain(domain, ipspace, mtu) # Move the port into the broadcast domain where it is needed. self._add_port_to_broadcast_domain(node, port, domain, ipspace) @na_utils.trace def _get_broadcast_domain_for_port(self, node, port): """Get broadcast domain for a specific port.""" api_args = { 'query': { 'net-port-info': { 'node': node, 'port': port, }, }, 'desired-attributes': { 'net-port-info': { 'broadcast-domain': None, 'ipspace': None, }, }, } result = self.send_iter_request('net-port-get-iter', api_args) net_port_info_list = result.get_child_by_name( 'attributes-list') or netapp_api.NaElement('none') port_info = net_port_info_list.get_children() if not port_info: msg = _('Could not find port %(port)s on node %(node)s.') msg_args = {'port': port, 'node': node} raise exception.NetAppException(msg % msg_args) port = { 'broadcast-domain': port_info[0].get_child_content('broadcast-domain'), 'ipspace': port_info[0].get_child_content('ipspace') } return port @na_utils.trace def _broadcast_domain_exists(self, domain, ipspace): """Check if a broadcast domain exists.""" api_args = { 'query': { 'net-port-broadcast-domain-info': { 'ipspace': ipspace, 'broadcast-domain': domain, }, }, 'desired-attributes': { 'net-port-broadcast-domain-info': None, }, } result = self.send_iter_request('net-port-broadcast-domain-get-iter', api_args) return self._has_records(result) @na_utils.trace def _create_broadcast_domain(self, domain, ipspace, mtu): """Create a broadcast domain.""" api_args = { 'ipspace': ipspace, 'broadcast-domain': domain, 'mtu': mtu, } self.send_request('net-port-broadcast-domain-create', api_args) @na_utils.trace def _modify_broadcast_domain(self, domain, ipspace, mtu): """Modify a broadcast domain.""" api_args = { 'ipspace': ipspace, 'broadcast-domain': domain, 'mtu': mtu, } self.send_request('net-port-broadcast-domain-modify', api_args) @na_utils.trace def _delete_broadcast_domain(self, domain, ipspace): """Delete a broadcast domain.""" api_args = { 'ipspace': ipspace, 'broadcast-domain': domain, } self.send_request('net-port-broadcast-domain-destroy', api_args) @na_utils.trace def _delete_broadcast_domains_for_ipspace(self, ipspace_name): """Deletes all broadcast domains in an IPspace.""" ipspaces = self.get_ipspaces(ipspace_name=ipspace_name) if not ipspaces: return ipspace = ipspaces[0] for broadcast_domain_name in ipspace['broadcast-domains']: self._delete_broadcast_domain(broadcast_domain_name, ipspace_name) @na_utils.trace def _add_port_to_broadcast_domain(self, node, port, domain, ipspace): qualified_port_name = ':'.join([node, port]) try: api_args = { 'ipspace': ipspace, 'broadcast-domain': domain, 'ports': { 'net-qualified-port-name': qualified_port_name, } } self.send_request('net-port-broadcast-domain-add-ports', api_args) except netapp_api.NaApiError as e: if e.code == (netapp_api. E_VIFMGR_PORT_ALREADY_ASSIGNED_TO_BROADCAST_DOMAIN): LOG.debug('Port %(port)s already exists in broadcast domain ' '%(domain)s', {'port': port, 'domain': domain}) else: msg = _('Failed to add port %(port)s to broadcast domain ' '%(domain)s. %(err_msg)s') msg_args = { 'port': qualified_port_name, 'domain': domain, 'err_msg': e.message, } raise exception.NetAppException(msg % msg_args) @na_utils.trace def _remove_port_from_broadcast_domain(self, node, port, domain, ipspace): qualified_port_name = ':'.join([node, port]) api_args = { 'ipspace': ipspace, 'broadcast-domain': domain, 'ports': { 'net-qualified-port-name': qualified_port_name, } } self.send_request('net-port-broadcast-domain-remove-ports', api_args) @na_utils.trace def network_interface_exists(self, vserver_name, node, port, ip, netmask, vlan=None, home_port=None): """Checks if LIF exists.""" if not home_port: home_port = port if not vlan else f'{port}-{vlan}' api_args = { 'query': { 'net-interface-info': { 'address': ip, 'home-node': node, 'home-port': home_port, 'netmask': netmask, 'vserver': vserver_name, }, }, 'desired-attributes': { 'net-interface-info': { 'interface-name': None, }, }, } result = self.send_iter_request('net-interface-get-iter', api_args) return self._has_records(result) @na_utils.trace def list_network_interfaces(self): """Get the names of available LIFs.""" api_args = { 'desired-attributes': { 'net-interface-info': { 'interface-name': None, }, }, } result = self.send_iter_request('net-interface-get-iter', api_args) lif_info_list = result.get_child_by_name( 'attributes-list') or netapp_api.NaElement('none') return [lif_info.get_child_content('interface-name') for lif_info in lif_info_list.get_children()] @na_utils.trace def get_network_interfaces(self, protocols=None): """Get available LIFs.""" protocols = na_utils.convert_to_list(protocols) protocols = [protocol.lower() for protocol in protocols] api_args = { 'query': { 'net-interface-info': { 'data-protocols': { 'data-protocol': '|'.join(protocols), } } } } if protocols else None result = self.send_iter_request('net-interface-get-iter', api_args) lif_info_list = result.get_child_by_name( 'attributes-list') or netapp_api.NaElement('none') interfaces = [] for lif_info in lif_info_list.get_children(): lif = { 'administrative-status': lif_info.get_child_content( 'administrative-status'), 'address': lif_info.get_child_content('address'), 'home-node': lif_info.get_child_content('home-node'), 'home-port': lif_info.get_child_content('home-port'), 'interface-name': lif_info.get_child_content('interface-name'), 'netmask': lif_info.get_child_content('netmask'), 'role': lif_info.get_child_content('role'), 'vserver': lif_info.get_child_content('vserver'), } interfaces.append(lif) return interfaces @na_utils.trace def disable_network_interface(self, vserver_name, interface_name): api_args = { 'administrative-status': 'down', 'interface-name': interface_name, 'vserver': vserver_name, } self.send_request('net-interface-modify', api_args) @na_utils.trace def delete_network_interface(self, vserver_name, interface_name): self.disable_network_interface(vserver_name, interface_name) api_args = { 'interface-name': interface_name, 'vserver': vserver_name } self.send_request('net-interface-delete', api_args) @na_utils.trace def get_ipspace_name_for_vlan_port(self, vlan_node, vlan_port, vlan_id): """Gets IPSpace name for specified VLAN""" if not self.features.IPSPACES: return None port = vlan_port if not vlan_id else '%(port)s-%(id)s' % { 'port': vlan_port, 'id': vlan_id, } api_args = {'node': vlan_node, 'port': port} try: result = self.send_request('net-port-get', api_args) except netapp_api.NaApiError as e: if e.code == netapp_api.EOBJECTNOTFOUND: msg = _('No pre-existing port or ipspace was found for ' '%(port)s, will attempt to create one.') msg_args = {'port': port} LOG.debug(msg, msg_args) return None else: raise attributes = result.get_child_by_name('attributes') net_port_info = attributes.get_child_by_name('net-port-info') ipspace_name = net_port_info.get_child_content('ipspace') return ipspace_name @na_utils.trace def get_ipspaces(self, ipspace_name=None, vserver_name=None): """Gets one or more IPSpaces. parameters ipspace_name and vserver_name are mutually exclusive """ if ipspace_name and vserver_name: msg = ('The parameters "ipspace_name" and "vserver_name" cannot ' 'both be used at the same time.') raise exception.InvalidInput(reason=msg) if not self.features.IPSPACES: return [] api_args = {} if ipspace_name: api_args['query'] = { 'net-ipspaces-info': { 'ipspace': ipspace_name, } } elif vserver_name: api_args['query'] = { 'net-ipspaces-info': { 'vservers': { 'vserver_name': vserver_name, } } } result = self.send_iter_request('net-ipspaces-get-iter', api_args) if not self._has_records(result): return [] ipspaces = [] for net_ipspaces_info in result.get_child_by_name( 'attributes-list').get_children(): ipspace = { 'ports': [], 'vservers': [], 'broadcast-domains': [], } ports = net_ipspaces_info.get_child_by_name( 'ports') or netapp_api.NaElement('none') for port in ports.get_children(): ipspace['ports'].append(port.get_content()) vservers = net_ipspaces_info.get_child_by_name( 'vservers') or netapp_api.NaElement('none') for vserver in vservers.get_children(): ipspace['vservers'].append(vserver.get_content()) broadcast_domains = net_ipspaces_info.get_child_by_name( 'broadcast-domains') or netapp_api.NaElement('none') for broadcast_domain in broadcast_domains.get_children(): ipspace['broadcast-domains'].append( broadcast_domain.get_content()) ipspace['ipspace'] = net_ipspaces_info.get_child_content('ipspace') ipspace['id'] = net_ipspaces_info.get_child_content('id') ipspace['uuid'] = net_ipspaces_info.get_child_content('uuid') ipspaces.append(ipspace) return ipspaces @na_utils.trace def ipspace_exists(self, ipspace_name): """Checks if IPspace exists.""" if not self.features.IPSPACES: return False api_args = { 'query': { 'net-ipspaces-info': { 'ipspace': ipspace_name, }, }, 'desired-attributes': { 'net-ipspaces-info': { 'ipspace': None, }, }, } result = self.send_iter_request('net-ipspaces-get-iter', api_args) return self._has_records(result) @na_utils.trace def create_ipspace(self, ipspace_name): """Creates an IPspace.""" api_args = {'ipspace': ipspace_name} self.send_request('net-ipspaces-create', api_args) @na_utils.trace def delete_ipspace(self, ipspace_name): """Deletes an IPspace. Returns: True if ipspace was deleted, False if validation or error prevented deletion """ if not self.features.IPSPACES: return False if not ipspace_name: return False if ( ipspace_name in CLUSTER_IPSPACES or self.ipspace_has_data_vservers(ipspace_name) ): LOG.debug('IPspace %(ipspace)s not deleted: still in use.', {'ipspace': ipspace_name}) return False try: self._delete_broadcast_domains_for_ipspace(ipspace_name) except netapp_api.NaApiError as e: msg = _('Broadcast Domains of IPspace %s not deleted. ' 'Reason: %s') % (ipspace_name, e) LOG.warning(msg) return False api_args = {'ipspace': ipspace_name} try: self.send_request('net-ipspaces-destroy', api_args) except netapp_api.NaApiError as e: msg = _('IPspace %s not deleted. Reason: %s') % (ipspace_name, e) LOG.warning(msg) return False return True @na_utils.trace def add_vserver_to_ipspace(self, ipspace_name, vserver_name): """Assigns a vserver to an IPspace.""" api_args = {'ipspace': ipspace_name, 'vserver': vserver_name} self.send_request('net-ipspaces-assign-vserver', api_args) @na_utils.trace def get_node_for_aggregate(self, aggregate_name): """Get home node for the specified aggregate. This API could return None, most notably if it was sent to a Vserver LIF, so the caller must be able to handle that case. """ if not aggregate_name: return None desired_attributes = { 'aggr-attributes': { 'aggregate-name': None, 'aggr-ownership-attributes': { 'home-name': None, }, }, } try: aggrs = self._get_aggregates(aggregate_names=[aggregate_name], desired_attributes=desired_attributes) except netapp_api.NaApiError as e: if e.code == netapp_api.EAPINOTFOUND: return None else: raise if len(aggrs) < 1: return None aggr_ownership_attrs = aggrs[0].get_child_by_name( 'aggr-ownership-attributes') or netapp_api.NaElement('none') return aggr_ownership_attrs.get_child_content('home-name') @na_utils.trace def get_cluster_aggregate_capacities(self, aggregate_names): """Calculates capacity of one or more aggregates. Returns dictionary of aggregate capacity metrics. 'size-used' is the actual space consumed on the aggregate. 'size-available' is the actual space remaining. 'size-total' is the defined total aggregate size, such that used + available = total. """ if aggregate_names is not None and len(aggregate_names) == 0: return {} desired_attributes = { 'aggr-attributes': { 'aggregate-name': None, 'aggr-space-attributes': { 'size-available': None, 'size-total': None, 'size-used': None, }, }, } aggrs = self._get_aggregates(aggregate_names=aggregate_names, desired_attributes=desired_attributes) aggr_space_dict = dict() for aggr in aggrs: aggr_name = aggr.get_child_content('aggregate-name') aggr_space_attrs = aggr.get_child_by_name('aggr-space-attributes') aggr_space_dict[aggr_name] = { 'available': int(aggr_space_attrs.get_child_content('size-available')), 'total': int(aggr_space_attrs.get_child_content('size-total')), 'used': int(aggr_space_attrs.get_child_content('size-used')), } return aggr_space_dict @na_utils.trace def get_vserver_aggregate_capacities(self, aggregate_names=None): """Calculates capacity of one or more aggregates for a vserver. Returns dictionary of aggregate capacity metrics. This must be called against a Vserver LIF. """ if aggregate_names is not None and len(aggregate_names) == 0: return {} api_args = { 'desired-attributes': { 'vserver-info': { 'vserver-name': None, 'vserver-aggr-info-list': { 'vserver-aggr-info': { 'aggr-name': None, 'aggr-availsize': None, }, }, }, }, } result = self.send_request('vserver-get', api_args) attributes = result.get_child_by_name('attributes') if not attributes: raise exception.NetAppException('Failed to read Vserver info') vserver_info = attributes.get_child_by_name('vserver-info') vserver_name = vserver_info.get_child_content('vserver-name') vserver_aggr_info_element = vserver_info.get_child_by_name( 'vserver-aggr-info-list') or netapp_api.NaElement('none') vserver_aggr_info_list = vserver_aggr_info_element.get_children() if not vserver_aggr_info_list: LOG.warning('No aggregates assigned to Vserver %s.', vserver_name) # Return dict of key-value pair of aggr_name:aggr_size_available. aggr_space_dict = {} for aggr_info in vserver_aggr_info_list: aggr_name = aggr_info.get_child_content('aggr-name') if aggregate_names is None or aggr_name in aggregate_names: aggr_size = int(aggr_info.get_child_content('aggr-availsize')) aggr_space_dict[aggr_name] = {'available': aggr_size} LOG.debug('Found available Vserver aggregates: %s', aggr_space_dict) return aggr_space_dict @na_utils.trace def _get_aggregates(self, aggregate_names=None, desired_attributes=None): query = { 'aggr-attributes': { 'aggregate-name': '|'.join(aggregate_names), } } if aggregate_names else None api_args = {} if query: api_args['query'] = query if desired_attributes: api_args['desired-attributes'] = desired_attributes result = self.send_iter_request('aggr-get-iter', api_args) if not self._has_records(result): return [] else: return result.get_child_by_name('attributes-list').get_children() def get_performance_instance_uuids(self, object_name, node_name): """Get UUIDs of performance instances for a cluster node.""" api_args = { 'objectname': object_name, 'query': { 'instance-info': { 'uuid': node_name + ':*', } } } result = self.send_request('perf-object-instance-list-info-iter', api_args) uuids = [] instances = result.get_child_by_name( 'attributes-list') or netapp_api.NaElement('None') for instance_info in instances.get_children(): uuids.append(instance_info.get_child_content('uuid')) return uuids def get_performance_counter_info(self, object_name, counter_name): """Gets info about one or more Data ONTAP performance counters.""" api_args = {'objectname': object_name} result = self.send_request('perf-object-counter-list-info', api_args) counters = result.get_child_by_name( 'counters') or netapp_api.NaElement('None') for counter in counters.get_children(): if counter.get_child_content('name') == counter_name: labels = [] label_list = counter.get_child_by_name( 'labels') or netapp_api.NaElement('None') for label in label_list.get_children(): labels.extend(label.get_content().split(',')) base_counter = counter.get_child_content('base-counter') return { 'name': counter_name, 'labels': labels, 'base-counter': base_counter, } else: raise exception.NotFound(_('Counter %s not found') % counter_name) def get_performance_counters(self, object_name, instance_uuids, counter_names): """Gets one or more cDOT performance counters.""" api_args = { 'objectname': object_name, 'instance-uuids': [ {'instance-uuid': instance_uuid} for instance_uuid in instance_uuids ], 'counters': [ {'counter': counter} for counter in counter_names ], } result = self.send_request('perf-object-get-instances', api_args) counter_data = [] timestamp = result.get_child_content('timestamp') instances = result.get_child_by_name( 'instances') or netapp_api.NaElement('None') for instance in instances.get_children(): instance_name = instance.get_child_content('name') instance_uuid = instance.get_child_content('uuid') node_name = instance_uuid.split(':')[0] counters = instance.get_child_by_name( 'counters') or netapp_api.NaElement('None') for counter in counters.get_children(): counter_name = counter.get_child_content('name') counter_value = counter.get_child_content('value') counter_data.append({ 'instance-name': instance_name, 'instance-uuid': instance_uuid, 'node-name': node_name, 'timestamp': timestamp, counter_name: counter_value, }) return counter_data @na_utils.trace def setup_security_services(self, security_services, vserver_client, vserver_name, aes_encryption, timeout=30): api_args = { 'name-mapping-switch': [ {'nmswitch': 'ldap'}, {'nmswitch': 'file'} ], 'name-server-switch': [ {'nsswitch': 'ldap'}, {'nsswitch': 'file'} ], 'vserver-name': vserver_name, } self.send_request('vserver-modify', api_args) for security_service in security_services: if security_service['type'].lower() == 'ldap': vserver_client.configure_ldap(security_service, timeout=timeout) elif security_service['type'].lower() == 'active_directory': vserver_client.configure_active_directory(security_service, vserver_name, aes_encryption) vserver_client.configure_cifs_options(security_service) elif security_service['type'].lower() == 'kerberos': vserver_client.create_kerberos_realm(security_service) vserver_client.configure_kerberos(security_service, vserver_name) else: msg = _('Unsupported security service type %s for ' 'Data ONTAP driver') raise exception.NetAppException(msg % security_service['type']) @na_utils.trace def update_showmount(self, showmount): """Update show mount for vserver. """ nfs_service_modify_arg = { 'showmount': showmount } self.send_request('nfs-service-modify', nfs_service_modify_arg) @na_utils.trace def update_pnfs(self, pnfs): """Update pNFS for vserver. """ nfs_service_modify_arg = { 'is-nfsv41-pnfs-enabled': pnfs } self.send_request('nfs-service-modify', nfs_service_modify_arg) @na_utils.trace def enable_nfs(self, versions, nfs_config=None): """Enables NFS on Vserver.""" self.send_request('nfs-enable') self._enable_nfs_protocols(versions) if nfs_config: self._configure_nfs(nfs_config) self._create_default_nfs_export_rules() @na_utils.trace def _enable_nfs_protocols(self, versions): """Set the enabled NFS protocol versions.""" nfs3 = 'true' if 'nfs3' in versions else 'false' nfs40 = 'true' if 'nfs4.0' in versions else 'false' nfs41 = 'true' if 'nfs4.1' in versions else 'false' nfs_service_modify_args = { 'is-nfsv3-enabled': nfs3, 'is-nfsv40-enabled': nfs40, 'is-nfsv41-enabled': nfs41, 'showmount': 'true', 'is-v3-ms-dos-client-enabled': 'true', 'is-nfsv3-connection-drop-enabled': 'false', 'enable-ejukebox': 'false', } self.send_request('nfs-service-modify', nfs_service_modify_args) @na_utils.trace def _configure_nfs(self, nfs_config): """Sets the nfs configuraton""" self.send_request('nfs-service-modify', nfs_config) @na_utils.trace def _create_default_nfs_export_rules(self): """Create the default export rule for the NFS service.""" export_rule_create_args = { 'client-match': '0.0.0.0/0', 'policy-name': 'default', 'ro-rule': { 'security-flavor': 'any', }, 'rw-rule': { 'security-flavor': 'never', }, } self.send_request('export-rule-create', export_rule_create_args) export_rule_create_args['client-match'] = '::/0' self.send_request('export-rule-create', export_rule_create_args) @na_utils.trace def _create_ldap_client(self, security_service): ad_domain = security_service.get('domain') ldap_servers = security_service.get('server') bind_dn = security_service.get('user') ldap_schema = 'RFC-2307' if ad_domain: if ldap_servers: msg = _("LDAP client cannot be configured with both 'server' " "and 'domain' parameters. Use 'server' for Linux/Unix " "LDAP servers or 'domain' for Active Directory LDAP " "servers.") LOG.exception(msg) raise exception.NetAppException(msg) # RFC2307bis, for MS Active Directory LDAP server ldap_schema = 'MS-AD-BIS' bind_dn = (security_service.get('user') + '@' + ad_domain) else: if not ldap_servers: msg = _("LDAP client cannot be configured without 'server' " "or 'domain' parameters. Use 'server' for Linux/Unix " "LDAP servers or 'domain' for Active Directory LDAP " "server.") LOG.exception(msg) raise exception.NetAppException(msg) if security_service.get('dns_ip'): self.configure_dns(security_service) config_name = hashlib.md5( security_service['id'].encode("latin-1"), usedforsecurity=False).hexdigest() api_args = { 'ldap-client-config': config_name, 'tcp-port': '389', 'schema': ldap_schema, 'bind-dn': bind_dn, 'bind-password': security_service.get('password'), } if security_service.get('ou'): api_args['base-dn'] = security_service['ou'] if ad_domain: # Active Directory LDAP server api_args['ad-domain'] = ad_domain else: # Linux/Unix LDAP servers if self.features.LDAP_LDAP_SERVERS: servers_key, servers_key_type = 'ldap-servers', 'string' else: servers_key, servers_key_type = 'servers', 'ip-address' api_args[servers_key] = [] for server in ldap_servers.split(','): api_args[servers_key].append( {servers_key_type: server.strip()}) self.send_request('ldap-client-create', api_args) @na_utils.trace def _enable_ldap_client(self, client_config_name, timeout=30): # ONTAP ldap query timeout is 3 seconds by default interval = 3 retries = int(timeout / interval) or 1 api_args = {'client-config': client_config_name, 'client-enabled': 'true'} @manila_utils.retry(retry_param=exception.ShareBackendException, interval=interval, retries=retries, backoff_rate=1) def try_enable_ldap_client(): try: self.send_request('ldap-config-create', api_args) except netapp_api.NaApiError as e: msg = _('Unable to enable ldap client configuration. Will ' 'retry the operation. Error details: %s') % e.message LOG.warning(msg) raise exception.ShareBackendException(msg=msg) try: try_enable_ldap_client() except exception.ShareBackendException: msg = _("Unable to enable ldap client configuration %s. " "Retries exhausted. Aborting.") % client_config_name LOG.exception(msg) raise exception.NetAppException(message=msg) @na_utils.trace def _delete_ldap_client(self, security_service): config_name = ( hashlib.md5(security_service['id'].encode("latin-1"), usedforsecurity=False).hexdigest()) api_args = {'ldap-client-config': config_name} self.send_request('ldap-client-delete', api_args) @na_utils.trace def configure_ldap(self, security_service, timeout=30): """Configures LDAP on Vserver.""" config_name = hashlib.md5( security_service['id'].encode("latin-1"), usedforsecurity=False).hexdigest() self._create_ldap_client(security_service) self._enable_ldap_client(config_name, timeout=timeout) @na_utils.trace def modify_ldap(self, new_security_service, current_security_service): """Modifies LDAP client on a Vserver.""" # Create a new ldap client self._create_ldap_client(new_security_service) # Delete current ldap config try: self.send_request('ldap-config-delete') except netapp_api.NaApiError as e: if e.code != netapp_api.EOBJECTNOTFOUND: # Delete previously created ldap client self._delete_ldap_client(new_security_service) msg = _("An error occurred while deleting original LDAP " "configuration. %s") raise exception.NetAppException(msg % e.message) else: msg = _("Original LDAP configuration was not found. " "LDAP modification will continue.") LOG.debug(msg) new_config_name = ( hashlib.md5( new_security_service['id'].encode("latin-1"), usedforsecurity=False).hexdigest()) # Create ldap config with the new client api_args = {'client-config': new_config_name, 'client-enabled': 'true'} self.send_request('ldap-config-create', api_args) # Delete old client configuration try: self._delete_ldap_client(current_security_service) except netapp_api.NaApiError as e: if e.code != netapp_api.EOBJECTNOTFOUND: current_config_name = ( hashlib.md5( current_security_service['id'].encode( "latin-1"), usedforsecurity=False).hexdigest()) msg = _("An error occurred while deleting original LDAP " "client configuration %(current_config)s. " "Error details: %(e_msg)s") msg_args = { 'current_config': current_config_name, 'e_msg': e.message, } LOG.warning(msg, msg_args) else: msg = _("Original LDAP client configuration was not found.") LOG.debug(msg) @na_utils.trace def _get_cifs_server_name(self, vserver_name): # 'cifs-server' is CIFS Server NetBIOS Name, max length is 15. # Should be unique within each domain (data['domain']). # Cut to 15 char with begin and end, attempt to make valid DNS hostname cifs_server = (vserver_name[0:8] + '-' + vserver_name[-6:]).replace('_', '-').upper() return cifs_server @na_utils.trace def configure_active_directory(self, security_service, vserver_name, aes_encryption): """Configures AD on Vserver.""" self.configure_dns(security_service) self.configure_cifs_aes_encryption(aes_encryption) self.set_preferred_dc(security_service) cifs_server = self._get_cifs_server_name(vserver_name) api_args = { 'admin-username': security_service['user'], 'admin-password': security_service['password'], 'force-account-overwrite': 'true', 'cifs-server': cifs_server, 'domain': security_service['domain'], } if security_service['ou'] is not None: api_args['organizational-unit'] = security_service['ou'] if security_service.get('default_ad_site'): api_args['default-site'] = security_service['default_ad_site'] try: LOG.debug("Trying to setup CIFS server with data: %s", api_args) self.send_request('cifs-server-create', api_args) except netapp_api.NaApiError as e: credential_msg = "could not authenticate" privilege_msg = "insufficient access" if (e.code == netapp_api.EAPIERROR and ( credential_msg in e.message.lower() or privilege_msg in e.message.lower())): auth_msg = _("Failed to create CIFS server entry. " "Please double check your user credentials " "or privileges. %s") raise exception.SecurityServiceFailedAuth(auth_msg % e.message) msg = _("Failed to create CIFS server entry. %s") raise exception.NetAppException(msg % e.message) @na_utils.trace def modify_active_directory_security_service( self, vserver_name, differring_keys, new_security_service, current_security_service): cifs_server = self._get_cifs_server_name(vserver_name) current_user_name = current_security_service['user'] new_username = new_security_service['user'] current_cifs_username = cifs_server + '\\' + current_user_name if 'password' in differring_keys: api_args = { 'user-name': current_cifs_username, 'user-password': new_security_service['password'] } try: self.send_request('cifs-local-user-set-password', api_args) except netapp_api.NaApiError as e: msg = _("Failed to modify existing CIFS server password. %s") raise exception.NetAppException(msg % e.message) if 'user' in differring_keys: api_args = { 'user-name': current_cifs_username, 'new-user-name': new_username } try: self.send_request('cifs-local-user-rename', api_args) except netapp_api.NaApiError as e: msg = _("Failed to modify existing CIFS server user-name. %s") raise exception.NetAppException(msg % e.message) if 'default_ad_site' in differring_keys: if new_security_service['default_ad_site'] is not None: cifs_server = self._get_cifs_server_name(vserver_name) api_args = { 'admin-username': new_security_service['user'], 'admin-password': new_security_service['password'], 'force-account-overwrite': 'true', 'cifs-server': cifs_server, 'default-site': new_security_service['default_ad_site'] } try: LOG.debug("Trying to modify CIFS server with data: %s", api_args) self.send_request('cifs-server-modify', api_args) except netapp_api.NaApiError as e: msg = _("Failed to modify CIFS server entry. %s") raise exception.NetAppException(msg % e.message) self.configure_cifs_options(new_security_service) if 'server' in differring_keys: if current_security_service['server'] is not None: self.remove_preferred_dcs(current_security_service) if new_security_service['server'] is not None: self.set_preferred_dc(new_security_service) self.configure_cifs_options(new_security_service) @na_utils.trace def create_kerberos_realm(self, security_service): """Creates Kerberos realm on cluster.""" if not self.features.KERBEROS_VSERVER: msg = _('Kerberos realms owned by Vserver are supported on ONTAP ' '8.3 or later.') raise exception.NetAppException(msg) api_args = { 'admin-server-ip': security_service['server'], 'admin-server-port': '749', 'clock-skew': '5', 'comment': '', 'kdc-ip': security_service['server'], 'kdc-port': '88', 'kdc-vendor': 'other', 'password-server-ip': security_service['server'], 'password-server-port': '464', 'realm': security_service['domain'].upper(), } try: self.send_request('kerberos-realm-create', api_args) except netapp_api.NaApiError as e: if e.code == netapp_api.EDUPLICATEENTRY: LOG.debug('Kerberos realm config already exists.') else: msg = _('Failed to create Kerberos realm. %s') raise exception.NetAppException(msg % e.message) @na_utils.trace def configure_kerberos(self, security_service, vserver_name): """Configures Kerberos for NFS on Vserver.""" if not self.features.KERBEROS_VSERVER: msg = _('Kerberos realms owned by Vserver are supported on ONTAP ' '8.3 or later.') raise exception.NetAppException(msg) self.configure_dns(security_service) spn = self._get_kerberos_service_principal_name( security_service, vserver_name) lifs = self.list_network_interfaces() if not lifs: msg = _("Cannot set up Kerberos. There are no LIFs configured.") raise exception.NetAppException(msg) for lif_name in lifs: api_args = { 'admin-password': security_service['password'], 'admin-user-name': security_service['user'], 'interface-name': lif_name, 'is-kerberos-enabled': 'true', 'service-principal-name': spn } self.send_request('kerberos-config-modify', api_args) @na_utils.trace def _get_kerberos_service_principal_name(self, security_service, vserver_name): return ('nfs/' + vserver_name.replace('_', '-') + '.' + security_service['domain'] + '@' + security_service['domain'].upper()) @na_utils.trace def update_kerberos_realm(self, security_service): """Update Kerberos realm info. Only KDC IP can be changed.""" if not self.features.KERBEROS_VSERVER: msg = _('Kerberos realms owned by Vserver are supported on ONTAP ' '8.3 or later.') raise exception.NetAppException(msg) api_args = { 'admin-server-ip': security_service['server'], 'kdc-ip': security_service['server'], 'password-server-ip': security_service['server'], 'realm': security_service['domain'].upper(), } try: self.send_request('kerberos-realm-modify', api_args) except netapp_api.NaApiError as e: msg = _('Failed to update Kerberos realm. %s') raise exception.NetAppException(msg % e.message) @na_utils.trace def disable_kerberos(self, security_service): """Disable Kerberos in all Vserver LIFs.""" lifs = self.list_network_interfaces() # NOTE(dviroel): If the Vserver has no LIFs, there are no Kerberos # to be disabled. for lif_name in lifs: api_args = { 'admin-password': security_service['password'], 'admin-user-name': security_service['user'], 'interface-name': lif_name, 'is-kerberos-enabled': 'false', } try: self.send_request('kerberos-config-modify', api_args) except netapp_api.NaApiError as e: disabled_msg = "Kerberos is already disabled" if (e.code == netapp_api.EAPIERROR and disabled_msg in e.message): # NOTE(dviroel): do not raise an error for 'Kerberos is # already disabled in this LIF'. continue msg = _("Failed to disable Kerberos: %s.") raise exception.NetAppException(msg % e.message) @na_utils.trace def is_kerberos_enabled(self): """Check if Kerberos in enabled in all LIFs.""" if not self.features.KERBEROS_VSERVER: msg = _('Kerberos realms owned by Vserver are supported on ONTAP ' '8.3 or later.') raise exception.NetAppException(msg) lifs_info = self.get_network_interfaces(protocols=['NFS', 'CIFS']) if len(lifs_info) == 0: LOG.debug("There are no LIFs configured for this Vserver. " "Kerberos is disabled.") return False # NOTE(dviroel): All LIFs must have kerberos enabled for lif in lifs_info: api_args = { 'interface-name': lif.get('interface-name'), 'desired-attributes': { 'kerberos-config-info': { 'is-kerberos-enabled': None, } } } result = None # Catch the exception in case kerberos is not configured with LIF. try: result = self.send_request('kerberos-config-get', api_args) except netapp_api.NaApiError as e: with excutils.save_and_reraise_exception() as exc_context: if "entry doesn't exist" in e.message: exc_context.reraise = False return False attributes = result.get_child_by_name('attributes') kerberos_info = attributes.get_child_by_name( 'kerberos-config-info') kerberos_enabled = kerberos_info.get_child_content( 'is-kerberos-enabled') if kerberos_enabled == 'false': return False return True @na_utils.trace def configure_dns(self, security_service): """Configure DNS address and servers for a vserver.""" api_args = { 'domains': [], 'name-servers': [], 'dns-state': 'enabled', } # NOTE(dviroel): Read the current dns configuration and merge with the # new one. This scenario is expected when 2 security services provide # a DNS configuration, like 'active_directory' and 'ldap'. current_dns_config = self.get_dns_config() domains = set(current_dns_config.get('domains', [])) dns_ips = set(current_dns_config.get('dns-ips', [])) domains.add(security_service['domain']) for domain in domains: api_args['domains'].append({'string': domain}) for dns_ip in security_service['dns_ip'].split(','): dns_ips.add(dns_ip.strip()) for dns_ip in dns_ips: api_args['name-servers'].append({'ip-address': dns_ip}) try: if current_dns_config: self.send_request('net-dns-modify', api_args) else: self.send_request('net-dns-create', api_args) except netapp_api.NaApiError as e: msg = _("Failed to configure DNS. %s") raise exception.NetAppException(msg % e.message) @na_utils.trace def get_dns_config(self): """Read DNS servers and domains currently configured in the vserver·""" api_args = {} try: result = self.send_request('net-dns-get', api_args) except netapp_api.NaApiError as e: if e.code == netapp_api.EOBJECTNOTFOUND: return {} msg = _("Failed to retrieve DNS configuration. %s") raise exception.NetAppException(msg % e.message) dns_config = {} attributes = result.get_child_by_name('attributes') dns_info = attributes.get_child_by_name('net-dns-info') dns_config['dns-state'] = dns_info.get_child_content( 'dns-state') domains = dns_info.get_child_by_name( 'domains') or netapp_api.NaElement('None') dns_config['domains'] = [domain.get_content() for domain in domains.get_children()] servers = dns_info.get_child_by_name( 'name-servers') or netapp_api.NaElement('None') dns_config['dns-ips'] = [server.get_content() for server in servers.get_children()] return dns_config @na_utils.trace def update_dns_configuration(self, dns_ips, domains): """Overrides DNS configuration with the specified IPs and domains.""" current_dns_config = self.get_dns_config() api_args = { 'domains': [], 'name-servers': [], 'dns-state': 'enabled', } for domain in domains: api_args['domains'].append({'string': domain}) for dns_ip in dns_ips: api_args['name-servers'].append({'ip-address': dns_ip}) empty_dns_config = (not api_args['domains'] and not api_args['name-servers']) if current_dns_config: api_name, api_args = ( ('net-dns-destroy', {}) if empty_dns_config else ('net-dns-modify', api_args)) else: api_name, api_args = 'net-dns-create', api_args try: self.send_request(api_name, api_args) except netapp_api.NaApiError as e: msg = _("Failed to update DNS configuration. %s") raise exception.NetAppException(msg % e.message) @na_utils.trace def configure_cifs_options(self, security_service): if security_service.get('server'): api_args = {'mode': 'none'} elif security_service.get('default_ad_site'): api_args = {'mode': 'site'} else: api_args = {'mode': 'all'} try: self.send_request( 'cifs-domain-server-discovery-mode-modify', api_args) except netapp_api.NaApiError as e: msg = ('Failed to set cifs domain server discovery mode to ' '%(mode)s. Exception: %(exception)s') msg_args = {'mode': api_args['mode'], 'exception': e.message} LOG.warning(msg, msg_args) @na_utils.trace def configure_cifs_aes_encryption(self, aes_encryption): if self.features.AES_ENCRYPTION_TYPES: if aes_encryption: api_args = { 'advertised-enc-types': [{'cifskrbenctypes': 'aes_128'}, {'cifskrbenctypes': 'aes_256'}] } else: api_args = { 'advertised-enc-types': [{'cifskrbenctypes': 'des'}, {'cifskrbenctypes': 'rc4'}] } else: api_args = { 'is-aes-encryption-enabled': ( 'true' if aes_encryption else 'false'), } try: self.send_request('cifs-security-modify', api_args) except netapp_api.NaApiError as e: msg = _("Failed to set aes encryption. %s") raise exception.NetAppException(msg % e.message) @na_utils.trace def set_preferred_dc(self, security_service): # server is optional if not security_service['server']: return api_args = { 'preferred-dc': [], 'domain': security_service['domain'], } for dc_ip in security_service['server'].split(','): api_args['preferred-dc'].append({'string': dc_ip.strip()}) if self.features.CIFS_DC_ADD_SKIP_CHECK: api_args['skip-config-validation'] = 'false' try: self.send_request('cifs-domain-preferred-dc-add', api_args) except netapp_api.NaApiError as e: msg = _("Failed to set preferred DC. %s") raise exception.NetAppException(msg % e.message) @na_utils.trace def remove_preferred_dcs(self, security_service): """Drops all preferred DCs at once.""" api_args = { 'domain': security_service['domain'], } try: self.send_request('cifs-domain-preferred-dc-remove', api_args) except netapp_api.NaApiError as e: msg = _("Failed to unset preferred DCs. %s") raise exception.NetAppException(msg % e.message) @na_utils.trace def create_volume(self, aggregate_name, volume_name, size_gb, thin_provisioned=False, snapshot_policy=None, language=None, dedup_enabled=False, compression_enabled=False, max_files=None, snapshot_reserve=None, volume_type='rw', qos_policy_group=None, adaptive_qos_policy_group=None, encrypt=False, mount_point_name=None, snaplock_type=None, **options): """Creates a volume.""" if adaptive_qos_policy_group and not self.features.ADAPTIVE_QOS: msg = 'Adaptive QoS not supported on this backend ONTAP version.' raise exception.NetAppException(msg) api_args = { 'containing-aggr-name': aggregate_name, 'size': str(size_gb) + 'g', 'volume': volume_name, } api_args.update(self._get_create_volume_api_args( volume_name, thin_provisioned, snapshot_policy, language, snapshot_reserve, volume_type, qos_policy_group, encrypt, adaptive_qos_policy_group, mount_point_name, snaplock_type)) self.send_request('volume-create', api_args) efficiency_policy = options.get('efficiency_policy', None) self.update_volume_efficiency_attributes( volume_name, dedup_enabled, compression_enabled, efficiency_policy=efficiency_policy ) if volume_type != 'dp': if options.get('max_files_multiplier') is not None: max_files_multiplier = options.pop('max_files_multiplier') max_files = na_utils.calculate_max_files(size_gb, max_files_multiplier, max_files) if max_files is not None: self.set_volume_max_files(volume_name, max_files) if snaplock_type is not None: self.set_snaplock_attributes(volume_name, **options) @na_utils.trace def create_volume_async(self, aggregate_list, volume_name, size_gb, thin_provisioned=False, snapshot_policy=None, language=None, snapshot_reserve=None, volume_type='rw', qos_policy_group=None, encrypt=False, adaptive_qos_policy_group=None, auto_provisioned=False, mount_point_name=None, snaplock_type=None, **options): """Creates a volume asynchronously.""" if adaptive_qos_policy_group and not self.features.ADAPTIVE_QOS: msg = 'Adaptive QoS not supported on this backend ONTAP version.' raise exception.NetAppException(msg) api_args = { 'size': size_gb * units.Gi, 'volume-name': volume_name, } if auto_provisioned: api_args['auto-provision-as'] = 'flexgroup' else: api_args['aggr-list'] = [{'aggr-name': aggr} for aggr in aggregate_list] api_args.update(self._get_create_volume_api_args( volume_name, thin_provisioned, snapshot_policy, language, snapshot_reserve, volume_type, qos_policy_group, encrypt, adaptive_qos_policy_group, mount_point_name, snaplock_type)) result = self.send_request('volume-create-async', api_args) job_info = { 'jobid': result.get_child_content('result-jobid'), 'error-code': result.get_child_content('result-error-code'), 'error-message': result.get_child_content('result-error-message') } return job_info def _get_create_volume_api_args(self, volume_name, thin_provisioned, snapshot_policy, language, snapshot_reserve, volume_type, qos_policy_group, encrypt, adaptive_qos_policy_group, mount_point_name=None, snaplock_type=None): api_args = { 'volume-type': volume_type, 'space-reserve': ('none' if thin_provisioned else 'volume'), } if volume_type != 'dp': api_args['junction-path'] = '/%s' % (mount_point_name or volume_name) if snapshot_policy is not None: api_args['snapshot-policy'] = snapshot_policy if language is not None: api_args['language-code'] = language if snapshot_reserve is not None: api_args['percentage-snapshot-reserve'] = str(snapshot_reserve) if qos_policy_group is not None: api_args['qos-policy-group-name'] = qos_policy_group if adaptive_qos_policy_group is not None: api_args['qos-adaptive-policy-group-name'] = ( adaptive_qos_policy_group) if encrypt is True: if not self.features.FLEXVOL_ENCRYPTION: msg = 'Flexvol encryption is not supported on this backend.' raise exception.NetAppException(msg) else: api_args['encrypt'] = 'true' else: api_args['encrypt'] = 'false' if snaplock_type is not None: api_args['snaplock-type'] = snaplock_type return api_args @na_utils.trace def update_volume_snapshot_policy(self, volume_name, snapshot_policy): """Set snapshot policy for the specified volume.""" api_args = { 'query': { 'volume-attributes': { 'volume-id-attributes': { 'name': volume_name, }, }, }, 'attributes': { 'volume-attributes': { 'volume-snapshot-attributes': { 'snapshot-policy': snapshot_policy, }, }, }, } self.send_request('volume-modify-iter', api_args) @na_utils.trace @manila_utils.retry(retry_param=exception.NetAppException, interval=3, retries=5, backoff_rate=1) def enable_dedup(self, volume_name): """Enable deduplication on volume.""" api_args = {'path': '/vol/%s' % volume_name} try: self.send_request('sis-enable', api_args) return except netapp_api.NaApiError as e: enabled_msg = "has already been enabled" if (e.code == netapp_api.OPERATION_ALREADY_ENABLED and enabled_msg in e.message): return active_msg = "sis operation is currently active" if (e.code == netapp_api.OPERATION_ALREADY_ENABLED and active_msg in e.message): msg = _('Unable to enable dedup. Will retry the ' 'operation. Error details: %s') % e.message LOG.warning(msg) raise exception.NetAppException(msg=msg) raise e @na_utils.trace @manila_utils.retry(retry_param=exception.NetAppException, interval=3, retries=5, backoff_rate=1) def disable_dedup(self, volume_name): """Disable deduplication on volume.""" api_args = {'path': '/vol/%s' % volume_name} try: self.send_request('sis-disable', api_args) return except netapp_api.NaApiError as e: active_msg = "sis operation is currently active" if (e.code == netapp_api.OPERATION_ALREADY_ENABLED and active_msg in e.message): msg = _('Unable to disable dedup. Will retry the ' 'operation. Error details: %s') % e.message LOG.warning(msg) raise exception.NetAppException(msg=msg) raise e @na_utils.trace def enable_compression(self, volume_name): """Enable compression on volume.""" api_args = { 'path': '/vol/%s' % volume_name, 'enable-compression': 'true' } self.send_request('sis-set-config', api_args) @na_utils.trace def disable_compression(self, volume_name): """Disable compression on volume.""" api_args = { 'path': '/vol/%s' % volume_name, 'enable-compression': 'false' } self.send_request('sis-set-config', api_args) @na_utils.trace def enable_dedupe_async(self, volume_name): """Enable deduplication on FlexVol/FlexGroup volume asynchronously.""" api_args = {'volume-name': volume_name} self.send_request('sis-enable-async', api_args) @na_utils.trace def disable_dedupe_async(self, volume_name): """Disable deduplication on FlexVol/FlexGroup volume asynchronously.""" api_args = {'volume-name': volume_name} self.send_request('sis-disable-async', api_args) @na_utils.trace def enable_compression_async(self, volume_name): """Enable compression on FlexVol/FlexGroup volume asynchronously.""" api_args = { 'volume-name': volume_name, 'enable-compression': 'true' } self.send_request('sis-set-config-async', api_args) @na_utils.trace def disable_compression_async(self, volume_name): """Disable compression on FlexVol/FlexGroup volume asynchronously.""" api_args = { 'volume-name': volume_name, 'enable-compression': 'false' } self.send_request('sis-set-config-async', api_args) @na_utils.trace def apply_volume_efficiency_policy(self, volume_name, efficiency_policy=None): """Apply efficiency policy to FlexVol/FlexGroup volume.""" if efficiency_policy: api_args = { 'path': f'/vol/{volume_name}', 'policy-name': efficiency_policy } self.send_request('sis-set-config', api_args) @na_utils.trace def apply_volume_efficiency_policy_async(self, volume_name, efficiency_policy=None): """Apply efficiency policy to FlexVol volume asynchronously.""" if efficiency_policy: api_args = { 'path': f'/vol/{volume_name}', 'policy-name': efficiency_policy } self.connection.send_request('sis-set-config-async', api_args) @na_utils.trace def get_volume_efficiency_status(self, volume_name): """Get dedupe & compression status for a volume.""" api_args = { 'query': { 'sis-status-info': { 'path': '/vol/%s' % volume_name, }, }, 'desired-attributes': { 'sis-status-info': { 'state': None, 'is-compression-enabled': None, }, }, } try: result = self.send_iter_request('sis-get-iter', api_args) attributes_list = result.get_child_by_name( 'attributes-list') or netapp_api.NaElement('none') sis_status_info = attributes_list.get_child_by_name( 'sis-status-info') or netapp_api.NaElement('none') except exception.NetAppException: msg = _('Failed to get volume efficiency status for %s.') LOG.error(msg, volume_name) sis_status_info = netapp_api.NaElement('none') return { 'dedupe': True if 'enabled' == sis_status_info.get_child_content( 'state') else False, 'compression': True if 'true' == sis_status_info.get_child_content( 'is-compression-enabled') else False, } @na_utils.trace def set_volume_max_files(self, volume_name, max_files, retry_allocated=False): """Set flexvol file limit.""" api_args = { 'query': { 'volume-attributes': { 'volume-id-attributes': { 'name': volume_name, }, }, }, 'attributes': { 'volume-attributes': { 'volume-inode-attributes': { 'files-total': max_files, }, }, }, } result = self.send_request('volume-modify-iter', api_args) failures = result.get_child_content('num-failed') if failures and int(failures) > 0: failure_list = result.get_child_by_name( 'failure-list') or netapp_api.NaElement('none') errors = failure_list.get_children() if not errors: return error_code = errors[0].get_child_content('error-code') if retry_allocated: if error_code == netapp_api.EVOLOPNOTSUPP: alloc_files = self.get_volume_allocated_files( volume_name) new_max_files = alloc_files['used'] # no need to act if current max files are set to # allocated files if new_max_files == alloc_files['max']: return msg = _('Set higher max files %(new_max_files)s ' 'on %(vol)s. The current allocated inodes ' 'are larger than requested %(max_files)s.') msg_args = {'vol': volume_name, 'max_files': max_files, 'new_max_files': new_max_files} LOG.info(msg, msg_args) self.set_volume_max_files(volume_name, new_max_files, retry_allocated=False) else: raise netapp_api.NaApiError( error_code, errors[0].get_child_content('error-message')) @na_utils.trace def set_volume_size(self, volume_name, size_gb): """Set volume size.""" api_args = { 'query': { 'volume-attributes': { 'volume-id-attributes': { 'name': volume_name, }, }, }, 'attributes': { 'volume-attributes': { 'volume-space-attributes': { 'size': int(size_gb) * units.Gi, }, }, }, } result = self.send_request('volume-modify-iter', api_args) failures = result.get_child_content('num-failed') if failures and int(failures) > 0: failure_list = result.get_child_by_name( 'failure-list') or netapp_api.NaElement('none') errors = failure_list.get_children() if errors: raise netapp_api.NaApiError( errors[0].get_child_content('error-code'), errors[0].get_child_content('error-message')) @na_utils.trace def set_volume_snapdir_access(self, volume_name, hide_snapdir): """Set volume snapshot directory visibility.""" api_args = { 'query': { 'volume-attributes': { 'volume-id-attributes': { 'name': volume_name, }, }, }, 'attributes': { 'volume-attributes': { 'volume-snapshot-attributes': { 'snapdir-access-enabled': str( not hide_snapdir).lower(), }, }, }, } result = self.send_request('volume-modify-iter', api_args) failures = result.get_child_content('num-failed') if failures and int(failures) > 0: failure_list = result.get_child_by_name( 'failure-list') or netapp_api.NaElement('none') errors = failure_list.get_children() if errors: raise netapp_api.NaApiError( errors[0].get_child_content('error-code'), errors[0].get_child_content('error-message')) @na_utils.trace def set_volume_filesys_size_fixed(self, volume_name, filesys_size_fixed=False): """Set volume file system size fixed to true/false.""" api_args = { 'query': { 'volume-attributes': { 'volume-id-attributes': { 'name': volume_name, }, }, }, 'attributes': { 'volume-attributes': { 'volume-space-attributes': { 'is-filesys-size-fixed': str( filesys_size_fixed).lower(), }, }, }, } result = self.send_request('volume-modify-iter', api_args) failures = result.get_child_content('num-failed') if failures and int(failures) > 0: failure_list = result.get_child_by_name( 'failure-list') or netapp_api.NaElement('none') errors = failure_list.get_children() if errors: raise netapp_api.NaApiError( errors[0].get_child_content('error-code'), errors[0].get_child_content('error-message')) @na_utils.trace def set_volume_security_style(self, volume_name, security_style='unix'): """Set volume security style""" api_args = { 'query': { 'volume-attributes': { 'volume-id-attributes': { 'name': volume_name, }, }, }, 'attributes': { 'volume-attributes': { 'volume-security-attributes': { 'style': security_style, }, }, }, } result = self.send_request('volume-modify-iter', api_args) failures = result.get_child_content('num-failed') if failures and int(failures) > 0: failure_list = result.get_child_by_name( 'failure-list') or netapp_api.NaElement('none') errors = failure_list.get_children() if errors: raise netapp_api.NaApiError( errors[0].get_child_content('error-code'), errors[0].get_child_content('error-message')) @na_utils.trace def set_volume_name(self, volume_name, new_volume_name): """Set flexvol name.""" api_args = { 'volume': volume_name, 'new-volume-name': new_volume_name, } self.send_request('volume-rename', api_args) @na_utils.trace def rename_vserver(self, vserver_name, new_vserver_name): """Rename a vserver.""" api_args = { 'vserver-name': vserver_name, 'new-name': new_vserver_name, } self.send_request('vserver-rename', api_args) @na_utils.trace def modify_volume(self, aggregate_name, volume_name, thin_provisioned=False, snapshot_policy=None, language=None, dedup_enabled=False, compression_enabled=False, max_files=None, qos_policy_group=None, hide_snapdir=None, autosize_attributes=None, adaptive_qos_policy_group=None, **options): """Update backend volume for a share as necessary. :param aggregate_name: either a list or a string. List for aggregate names where the FlexGroup resides, while a string for the aggregate name where FlexVol volume is. :param volume_name: name of the modified volume. :param thin_provisioned: volume is thin. :param snapshot_policy: policy of volume snapshot. :param language: language of the volume. :param dedup_enabled: is the deduplication enabled for the volume. :param compression_enabled: is the compression enabled for the volume. :param max_files: number of maximum files in the volume. :param qos_policy_group: name of the QoS policy. :param hide_snapdir: hide snapshot directory. :param autosize_attributes: autosize for the volume. :param adaptive_qos_policy_group: name of the adaptive QoS policy. """ if adaptive_qos_policy_group and not self.features.ADAPTIVE_QOS: msg = 'Adaptive QoS not supported on this backend ONTAP version.' raise exception.NetAppException(msg) api_args = { 'query': { 'volume-attributes': { 'volume-id-attributes': { 'name': volume_name, }, }, }, 'attributes': { 'volume-attributes': { 'volume-inode-attributes': {}, 'volume-language-attributes': {}, 'volume-snapshot-attributes': {}, 'volume-autosize-attributes': (autosize_attributes if autosize_attributes else {}), 'volume-space-attributes': { 'space-guarantee': ('none' if thin_provisioned else 'volume'), }, }, }, } if isinstance(aggregate_name, str): is_flexgroup = False api_args['query']['volume-attributes']['volume-id-attributes'][ 'containing-aggregate-name'] = aggregate_name elif isinstance(aggregate_name, list): is_flexgroup = True aggr_list = [{'aggr-name': aggr_name} for aggr_name in aggregate_name] api_args['query']['volume-attributes']['volume-id-attributes'][ 'aggr-list'] = aggr_list if language: api_args['attributes']['volume-attributes'][ 'volume-language-attributes']['language'] = language if max_files: api_args['attributes']['volume-attributes'][ 'volume-inode-attributes']['files-total'] = max_files if snapshot_policy: api_args['attributes']['volume-attributes'][ 'volume-snapshot-attributes'][ 'snapshot-policy'] = snapshot_policy if qos_policy_group: api_args['attributes']['volume-attributes'][ 'volume-qos-attributes'] = { 'policy-group-name': qos_policy_group, } if adaptive_qos_policy_group: api_args['attributes']['volume-attributes'][ 'volume-qos-attributes'] = { 'adaptive-policy-group-name': adaptive_qos_policy_group, } if hide_snapdir in (True, False): # Value of hide_snapdir needs to be inverted for ZAPI parameter api_args['attributes']['volume-attributes'][ 'volume-snapshot-attributes'][ 'snapdir-access-enabled'] = str( not hide_snapdir).lower() self.send_request('volume-modify-iter', api_args) efficiency_policy = options.get('efficiency_policy', None) # Efficiency options must be handled separately self.update_volume_efficiency_attributes( volume_name, dedup_enabled, compression_enabled, is_flexgroup=is_flexgroup, efficiency_policy=efficiency_policy ) if self._is_snaplock_enabled_volume(volume_name): self.set_snaplock_attributes(volume_name, **options) @na_utils.trace def update_volume_efficiency_attributes(self, volume_name, dedup_enabled, compression_enabled, is_flexgroup=False, efficiency_policy=None): """Update dedupe & compression attributes to match desired values.""" efficiency_status = self.get_volume_efficiency_status(volume_name) # cDOT compression requires dedup to be enabled dedup_enabled = dedup_enabled or compression_enabled # enable/disable dedup if needed if dedup_enabled and not efficiency_status['dedupe']: if is_flexgroup: self.enable_dedupe_async(volume_name) else: self.enable_dedup(volume_name) elif not dedup_enabled and efficiency_status['dedupe']: if is_flexgroup: self.disable_dedupe_async(volume_name) else: self.disable_dedup(volume_name) # enable/disable compression if needed if compression_enabled and not efficiency_status['compression']: if is_flexgroup: self.enable_compression_async(volume_name) else: self.enable_compression(volume_name) elif not compression_enabled and efficiency_status['compression']: if is_flexgroup: self.disable_compression_async(volume_name) else: self.disable_compression(volume_name) if is_flexgroup: self.apply_volume_efficiency_policy_async( volume_name, efficiency_policy=efficiency_policy) else: self.apply_volume_efficiency_policy( volume_name, efficiency_policy=efficiency_policy) @na_utils.trace def volume_exists(self, volume_name): """Checks if volume exists.""" LOG.debug('Checking if volume %s exists', volume_name) api_args = { 'query': { 'volume-attributes': { 'volume-id-attributes': { 'name': volume_name, }, }, }, 'desired-attributes': { 'volume-attributes': { 'volume-id-attributes': { 'name': None, }, }, }, } result = self.send_iter_request('volume-get-iter', api_args) return self._has_records(result) @na_utils.trace def is_flexvol_encrypted(self, volume_name, vserver_name): """Checks whether the volume is encrypted or not.""" if not self.features.FLEXVOL_ENCRYPTION: return False api_args = { 'query': { 'volume-attributes': { 'encrypt': 'true', 'volume-id-attributes': { 'name': volume_name, 'owning-vserver-name': vserver_name, }, }, }, 'desired-attributes': { 'volume-attributes': { 'encrypt': None, }, }, } result = self.send_iter_request('volume-get-iter', api_args) if self._has_records(result): attributes_list = result.get_child_by_name( 'attributes-list') or netapp_api.NaElement('none') volume_attributes = attributes_list.get_child_by_name( 'volume-attributes') or netapp_api.NaElement('none') encrypt = volume_attributes.get_child_content('encrypt') if encrypt: return True return False @na_utils.trace def get_aggregate_for_volume(self, volume_name): """Get the name of the aggregate containing a volume.""" api_args = { 'query': { 'volume-attributes': { 'volume-id-attributes': { 'name': volume_name, }, }, }, 'desired-attributes': { 'volume-attributes': { 'volume-id-attributes': { 'aggr-list': { 'aggr-name': None, }, 'containing-aggregate-name': None, 'name': None, }, }, }, } result = self.send_iter_request('volume-get-iter', api_args) attributes_list = result.get_child_by_name( 'attributes-list') or netapp_api.NaElement('none') volume_attributes = attributes_list.get_child_by_name( 'volume-attributes') or netapp_api.NaElement('none') volume_id_attributes = volume_attributes.get_child_by_name( 'volume-id-attributes') or netapp_api.NaElement('none') aggregate = volume_id_attributes.get_child_content( 'containing-aggregate-name') if not aggregate: aggr_list_attr = volume_id_attributes.get_child_by_name( 'aggr-list') or netapp_api.NaElement('none') aggregate = [aggr_elem.get_content() for aggr_elem in aggr_list_attr.get_children()] if not aggregate: msg = _('Could not find aggregate for volume %s.') raise exception.NetAppException(msg % volume_name) return aggregate @na_utils.trace def volume_has_luns(self, volume_name): """Checks if volume has LUNs.""" LOG.debug('Checking if volume %s has LUNs', volume_name) api_args = { 'query': { 'lun-info': { 'volume': volume_name, }, }, 'desired-attributes': { 'lun-info': { 'path': None, }, }, } result = self.send_iter_request('lun-get-iter', api_args) return self._has_records(result) @na_utils.trace def volume_has_junctioned_volumes(self, junction_path): """Checks if volume has volumes mounted beneath its junction path.""" if not junction_path: return False api_args = { 'query': { 'volume-attributes': { 'volume-id-attributes': { 'junction-path': junction_path + '/*', }, }, }, 'desired-attributes': { 'volume-attributes': { 'volume-id-attributes': { 'name': None, }, }, }, } result = self.send_iter_request('volume-get-iter', api_args) return self._has_records(result) @na_utils.trace def get_volume_autosize_attributes(self, volume_name): """Returns autosize attributes for a given volume name.""" api_args = { 'volume': volume_name, } result = self.send_request('volume-autosize-get', api_args) # NOTE(dviroel): 'is-enabled' is deprecated since ONTAP 8.2, use 'mode' # to identify if autosize is enabled or not. return { 'mode': result.get_child_content('mode'), 'grow-threshold-percent': result.get_child_content( 'grow-threshold-percent'), 'shrink-threshold-percent': result.get_child_content( 'shrink-threshold-percent'), 'maximum-size': result.get_child_content('maximum-size'), 'minimum-size': result.get_child_content('minimum-size'), } @na_utils.trace def get_volume_snapshot_attributes(self, volume_name): """Returns snapshot attributes""" desired_snapshot_attributes = { 'snapshot-policy': None, 'snapdir-access-enabled': None, } api_args = { 'query': { 'volume-attributes': { 'volume-id-attributes': { 'name': volume_name, }, }, }, 'desired-attributes': { 'volume-attributes': { 'volume-snapshot-attributes': desired_snapshot_attributes, }, }, } result = self.send_request('volume-get-iter', api_args) attributes_list = result.get_child_by_name( 'attributes-list') or netapp_api.NaElement('none') volume_attributes_list = attributes_list.get_children() if not self._has_records(result): raise exception.StorageResourceNotFound(name=volume_name) elif len(volume_attributes_list) > 1: msg = _('Could not find unique volume %(vol)s.') msg_args = {'vol': volume_name} raise exception.NetAppException(msg % msg_args) vol_attr = volume_attributes_list[0] vol_snapshot_attr = vol_attr.get_child_by_name( "volume-snapshot-attributes") or netapp_api.NaElement('none') return {key: vol_snapshot_attr.get_child_content(key) for key in desired_snapshot_attributes.keys()} def get_volume_allocated_files(self, volume_name): """Get flexvol allocated files""" api_args = { 'query': { 'volume-attributes': { 'volume-id-attributes': { 'name': volume_name, }, }, }, 'desired-attributes': { 'volume-attributes': { 'volume-inode-attributes': { 'inodefile-public-capacity': None, 'files-total': None, }, }, }, } result = self.send_iter_request('volume-get-iter', api_args) if not self._has_records(result): return None attributes_list = result.get_child_by_name( 'attributes-list') or netapp_api.NaElement('none') volume_attributes = attributes_list.get_child_by_name( 'volume-attributes') or netapp_api.NaElement('none') volume_inode_attributes = volume_attributes.get_child_by_name( 'volume-inode-attributes') or netapp_api.NaElement('none') return { 'used': volume_inode_attributes.get_child_content( 'inodefile-public-capacity'), 'max': volume_inode_attributes.get_child_content( 'files-total'), } @na_utils.trace def get_volume(self, volume_name): """Returns the volume with the specified name, if present.""" api_args = { 'query': { 'volume-attributes': { 'volume-id-attributes': { 'name': volume_name, }, }, }, 'desired-attributes': { 'volume-attributes': { 'volume-id-attributes': { 'aggr-list': { 'aggr-name': None, }, 'containing-aggregate-name': None, 'junction-path': None, 'name': None, 'owning-vserver-name': None, 'type': None, 'style': None, 'style-extended': None, }, 'volume-qos-attributes': { 'policy-group-name': None, }, 'volume-space-attributes': { 'size': None, 'size-used': None, }, 'volume-snaplock-attributes': { 'snaplock-type': None, }, }, }, } result = self.send_request('volume-get-iter', api_args) attributes_list = result.get_child_by_name( 'attributes-list') or netapp_api.NaElement('none') volume_attributes_list = attributes_list.get_children() if not self._has_records(result): raise exception.StorageResourceNotFound(name=volume_name) elif len(volume_attributes_list) > 1: msg = _('Could not find unique volume %(vol)s.') msg_args = {'vol': volume_name} raise exception.NetAppException(msg % msg_args) volume_attributes = volume_attributes_list[0] volume_id_attributes = volume_attributes.get_child_by_name( 'volume-id-attributes') or netapp_api.NaElement('none') volume_qos_attributes = volume_attributes.get_child_by_name( 'volume-qos-attributes') or netapp_api.NaElement('none') volume_space_attributes = volume_attributes.get_child_by_name( 'volume-space-attributes') or netapp_api.NaElement('none') volume_snaplock_attributes = volume_attributes.get_child_by_name( 'volume-snaplock-attributes') or netapp_api.NaElement('none') aggregate = volume_id_attributes.get_child_content( 'containing-aggregate-name') aggregate_list = [] if not aggregate: aggregate = '' aggr_list_attr = volume_id_attributes.get_child_by_name( 'aggr-list') or netapp_api.NaElement('none') aggregate_list = [aggr_elem.get_content() for aggr_elem in aggr_list_attr.get_children()] volume = { 'aggregate': aggregate, 'aggr-list': aggregate_list, 'junction-path': volume_id_attributes.get_child_content( 'junction-path'), 'name': volume_id_attributes.get_child_content('name'), 'owning-vserver-name': volume_id_attributes.get_child_content( 'owning-vserver-name'), 'type': volume_id_attributes.get_child_content('type'), 'style': volume_id_attributes.get_child_content('style'), 'size': volume_space_attributes.get_child_content('size'), 'size-used': volume_space_attributes.get_child_content( 'size-used'), 'qos-policy-group-name': volume_qos_attributes.get_child_content( 'policy-group-name'), 'style-extended': volume_id_attributes.get_child_content( 'style-extended'), 'snaplock-type': volume_snaplock_attributes.get_child_content( 'snaplock-type') } return volume @na_utils.trace def get_volume_at_junction_path(self, junction_path): """Returns the volume with the specified junction path, if present.""" if not junction_path: return None api_args = { 'query': { 'volume-attributes': { 'volume-id-attributes': { 'junction-path': junction_path, 'style-extended': '%s|%s' % ( na_utils.FLEXGROUP_STYLE_EXTENDED, na_utils.FLEXVOL_STYLE_EXTENDED), }, }, }, 'desired-attributes': { 'volume-attributes': { 'volume-id-attributes': { 'name': None, }, }, }, } result = self.send_iter_request('volume-get-iter', api_args) if not self._has_records(result): return None attributes_list = result.get_child_by_name( 'attributes-list') or netapp_api.NaElement('none') volume_attributes = attributes_list.get_child_by_name( 'volume-attributes') or netapp_api.NaElement('none') volume_id_attributes = volume_attributes.get_child_by_name( 'volume-id-attributes') or netapp_api.NaElement('none') volume = { 'name': volume_id_attributes.get_child_content('name'), } return volume @na_utils.trace def get_volume_to_manage(self, aggregate_name, volume_name): """Get flexvol to be managed by Manila. :param aggregate_name: either a list or a string. List for aggregate names where the FlexGroup resides, while a string for the aggregate name where FlexVol volume is. :param volume_name: name of the managed volume. """ api_args = { 'query': { 'volume-attributes': { 'volume-id-attributes': { 'name': volume_name, }, }, }, 'desired-attributes': { 'volume-attributes': { 'volume-id-attributes': { 'aggr-list': { 'aggr-name': None, }, 'containing-aggregate-name': None, 'junction-path': None, 'name': None, 'type': None, 'style': None, 'owning-vserver-name': None, }, 'volume-qos-attributes': { 'policy-group-name': None, }, 'volume-space-attributes': { 'size': None, }, }, }, } if isinstance(aggregate_name, str): api_args['query']['volume-attributes']['volume-id-attributes'][ 'containing-aggregate-name'] = aggregate_name elif isinstance(aggregate_name, list): aggr_list = [{'aggr-name': aggr_name} for aggr_name in aggregate_name] api_args['query']['volume-attributes']['volume-id-attributes'][ 'aggr-list'] = aggr_list result = self.send_iter_request('volume-get-iter', api_args) if not self._has_records(result): return None attributes_list = result.get_child_by_name( 'attributes-list') or netapp_api.NaElement('none') volume_attributes = attributes_list.get_child_by_name( 'volume-attributes') or netapp_api.NaElement('none') volume_id_attributes = volume_attributes.get_child_by_name( 'volume-id-attributes') or netapp_api.NaElement('none') volume_qos_attributes = volume_attributes.get_child_by_name( 'volume-qos-attributes') or netapp_api.NaElement('none') volume_space_attributes = volume_attributes.get_child_by_name( 'volume-space-attributes') or netapp_api.NaElement('none') aggregate = volume_id_attributes.get_child_content( 'containing-aggregate-name') aggregate_list = [] if not aggregate: aggregate = '' aggr_list_attr = volume_id_attributes.get_child_by_name( 'aggr-list') or netapp_api.NaElement('none') aggregate_list = [aggr_elem.get_content() for aggr_elem in aggr_list_attr.get_children()] volume = { 'aggregate': aggregate, 'aggr-list': aggregate_list, 'junction-path': volume_id_attributes.get_child_content( 'junction-path'), 'name': volume_id_attributes.get_child_content('name'), 'type': volume_id_attributes.get_child_content('type'), 'style': volume_id_attributes.get_child_content('style'), 'owning-vserver-name': volume_id_attributes.get_child_content( 'owning-vserver-name'), 'size': volume_space_attributes.get_child_content('size'), 'qos-policy-group-name': volume_qos_attributes.get_child_content( 'policy-group-name') } return volume @na_utils.trace def create_volume_clone(self, volume_name, parent_volume_name, parent_snapshot_name=None, qos_policy_group=None, adaptive_qos_policy_group=None, mount_point_name=None, **options): """Clones a volume.""" api_args = { 'volume': volume_name, 'parent-volume': parent_volume_name, 'parent-snapshot': parent_snapshot_name, 'junction-path': '/%s' % (mount_point_name or volume_name), } if qos_policy_group is not None: api_args['qos-policy-group-name'] = qos_policy_group self.send_request('volume-clone-create', api_args) if adaptive_qos_policy_group is not None: self.set_qos_adaptive_policy_group_for_volume( volume_name, adaptive_qos_policy_group) @na_utils.trace def volume_clone_split_start(self, volume_name): """Begins splitting a clone from its parent.""" try: api_args = {'volume': volume_name} self.send_request('volume-clone-split-start', api_args) except netapp_api.NaApiError as e: if e.code == netapp_api.EVOL_CLONE_BEING_SPLIT: return raise @na_utils.trace def volume_clone_split_status(self, volume_name): """Status of splitting a clone from its parent.""" try: api_args = {'volume': volume_name} result = self.send_request('volume-clone-split-status', api_args) except netapp_api.NaApiError: # any exception in status is considered either clone split is # completed or not triggred on this volume return 100 clone_split_details = result.get_child_by_name( 'clone-split-details') or netapp_api.NaElement('none') for clone_split_details_info in clone_split_details.get_children(): percentage = clone_split_details_info.get_child_content( 'block-percentage-complete') try: return int(percentage) except Exception: return 100 return 100 @na_utils.trace def volume_clone_split_stop(self, volume_name): """Stop splitting a clone from its parent.""" try: api_args = {'volume': volume_name} self.send_request('volume-clone-split-stop', api_args) except netapp_api.NaApiError as e: if e.code in (netapp_api.EVOLUMEDOESNOTEXIST, netapp_api.EVOLNOTCLONE, netapp_api.EVOLOPNOTUNDERWAY): return raise @na_utils.trace def check_volume_clone_split_completed(self, volume_name): """Check if volume clone split operation already finished""" return self.get_volume_clone_parent_snaphot(volume_name) is None @na_utils.trace def get_volume_clone_parent_snaphot(self, volume_name): """Gets volume's clone parent. Return the snapshot name of a volume's clone parent, or None if it doesn't exist. """ api_args = { 'query': { 'volume-attributes': { 'volume-id-attributes': { 'name': volume_name } } }, 'desired-attributes': { 'volume-attributes': { 'volume-clone-attributes': { 'volume-clone-parent-attributes': { 'snapshot-name': '' } } } } } result = self.send_iter_request('volume-get-iter', api_args) if not self._has_records(result): return None attributes_list = result.get_child_by_name( 'attributes-list') or netapp_api.NaElement('none') volume_attributes = attributes_list.get_child_by_name( 'volume-attributes') or netapp_api.NaElement('none') vol_clone_attrs = volume_attributes.get_child_by_name( 'volume-clone-attributes') or netapp_api.NaElement('none') vol_clone_parent_atts = vol_clone_attrs.get_child_by_name( 'volume-clone-parent-attributes') or netapp_api.NaElement( 'none') snapshot_name = vol_clone_parent_atts.get_child_content( 'snapshot-name') return snapshot_name @na_utils.trace def get_clone_children_for_snapshot(self, volume_name, snapshot_name): """Returns volumes that are keeping a snapshot locked.""" api_args = { 'query': { 'volume-attributes': { 'volume-clone-attributes': { 'volume-clone-parent-attributes': { 'name': volume_name, 'snapshot-name': snapshot_name, }, }, }, }, 'desired-attributes': { 'volume-attributes': { 'volume-id-attributes': { 'name': None, }, }, }, } result = self.send_iter_request('volume-get-iter', api_args) if not self._has_records(result): return [] volume_list = [] attributes_list = result.get_child_by_name( 'attributes-list') or netapp_api.NaElement('none') for volume_attributes in attributes_list.get_children(): volume_id_attributes = volume_attributes.get_child_by_name( 'volume-id-attributes') or netapp_api.NaElement('none') volume_list.append({ 'name': volume_id_attributes.get_child_content('name'), }) return volume_list @na_utils.trace def get_volume_junction_path(self, volume_name, is_style_cifs=False): """Gets a volume junction path.""" api_args = { 'volume': volume_name, 'is-style-cifs': str(is_style_cifs).lower(), } result = self.send_request('volume-get-volume-path', api_args) return result.get_child_content('junction') @na_utils.trace def mount_volume(self, volume_name, junction_path=None): """Mounts a volume on a junction path.""" api_args = { 'volume-name': volume_name, 'junction-path': (junction_path if junction_path else '/%s' % volume_name) } self.send_request('volume-mount', api_args) @na_utils.trace def offline_volume(self, volume_name): """Offlines a volume.""" try: self.send_request('volume-offline', {'name': volume_name}) except netapp_api.NaApiError as e: if e.code == netapp_api.EVOLUMEOFFLINE: return raise @na_utils.trace def _unmount_volume(self, volume_name, force=False): """Unmounts a volume.""" api_args = { 'volume-name': volume_name, 'force': str(force).lower(), } try: self.send_request('volume-unmount', api_args) except netapp_api.NaApiError as e: if e.code == netapp_api.EVOL_NOT_MOUNTED: return raise @na_utils.trace def unmount_volume(self, volume_name, force=False, wait_seconds=30): """Unmounts a volume, retrying if a clone split is ongoing. NOTE(cknight): While unlikely to happen in normal operation, any client that tries to delete volumes immediately after creating volume clones is likely to experience failures if cDOT isn't quite ready for the delete. The volume unmount is the first operation in the delete path that fails in this case, and there is no proactive check we can use to reliably predict the failure. And there isn't a specific error code from volume-unmount, so we have to check for a generic error code plus certain language in the error code. It's ugly, but it works, and it's better than hard-coding a fixed delay. """ # Do the unmount, handling split-related errors with retries. retry_interval = 3 # seconds for retry in range(int(wait_seconds / retry_interval)): try: self._unmount_volume(volume_name, force=force) LOG.debug('Volume %s unmounted.', volume_name) return except netapp_api.NaApiError as e: if e.code == netapp_api.EAPIERROR and 'job ID' in e.message: msg = ('Could not unmount volume %(volume)s due to ' 'ongoing volume operation: %(exception)s') msg_args = {'volume': volume_name, 'exception': e} LOG.warning(msg, msg_args) time.sleep(retry_interval) continue raise msg = _('Failed to unmount volume %(volume)s after ' 'waiting for %(wait_seconds)s seconds.') msg_args = {'volume': volume_name, 'wait_seconds': wait_seconds} LOG.error(msg, msg_args) raise exception.NetAppException(msg % msg_args) @na_utils.trace def delete_volume(self, volume_name): """Deletes a volume.""" self.send_request('volume-destroy', {'name': volume_name}) @na_utils.trace def create_snapshot(self, volume_name, snapshot_name, snapmirror_label=None): """Creates a volume snapshot.""" api_args = {'volume': volume_name, 'snapshot': snapshot_name} if snapmirror_label is not None: api_args['snapmirror-label'] = snapmirror_label self.send_request('snapshot-create', api_args) @na_utils.trace def snapshot_exists(self, snapshot_name, volume_name): """Checks if Snapshot exists for a specified volume.""" LOG.debug('Checking if snapshot %(snapshot)s exists for ' 'volume %(volume)s', {'snapshot': snapshot_name, 'volume': volume_name}) # Gets a single snapshot. api_args = { 'query': { 'snapshot-info': { 'name': snapshot_name, 'volume': volume_name, }, }, 'desired-attributes': { 'snapshot-info': { 'name': None, 'volume': None, 'busy': None, 'snapshot-owners-list': { 'snapshot-owner': None, } }, }, } result = self.send_request('snapshot-get-iter', api_args) error_record_list = result.get_child_by_name( 'volume-errors') or netapp_api.NaElement('none') errors = error_record_list.get_children() if errors: error = errors[0] error_code = error.get_child_content('errno') error_reason = error.get_child_content('reason') msg = _('Could not read information for snapshot %(name)s. ' 'Code: %(code)s. Reason: %(reason)s') msg_args = { 'name': snapshot_name, 'code': error_code, 'reason': error_reason } if error_code == netapp_api.ESNAPSHOTNOTALLOWED: raise exception.SnapshotUnavailable(msg % msg_args) else: raise exception.NetAppException(msg % msg_args) return self._has_records(result) @na_utils.trace def get_snapshot(self, volume_name, snapshot_name): """Gets a single snapshot.""" api_args = { 'query': { 'snapshot-info': { 'name': snapshot_name, 'volume': volume_name, }, }, 'desired-attributes': { 'snapshot-info': { 'access-time': None, 'name': None, 'volume': None, 'busy': None, 'snapshot-owners-list': { 'snapshot-owner': None, } }, }, } result = self.send_request('snapshot-get-iter', api_args) error_record_list = result.get_child_by_name( 'volume-errors') or netapp_api.NaElement('none') errors = error_record_list.get_children() if errors: error = errors[0] error_code = error.get_child_content('errno') error_reason = error.get_child_content('reason') msg = _('Could not read information for snapshot %(name)s. ' 'Code: %(code)s. Reason: %(reason)s') msg_args = { 'name': snapshot_name, 'code': error_code, 'reason': error_reason } if error_code == netapp_api.ESNAPSHOTNOTALLOWED: raise exception.SnapshotUnavailable(msg % msg_args) else: raise exception.NetAppException(msg % msg_args) attributes_list = result.get_child_by_name( 'attributes-list') or netapp_api.NaElement('none') snapshot_info_list = attributes_list.get_children() if not self._has_records(result): raise exception.SnapshotResourceNotFound(name=snapshot_name) elif len(snapshot_info_list) > 1: msg = _('Could not find unique snapshot %(snap)s on ' 'volume %(vol)s.') msg_args = {'snap': snapshot_name, 'vol': volume_name} raise exception.NetAppException(msg % msg_args) snapshot_info = snapshot_info_list[0] snapshot = { 'access-time': snapshot_info.get_child_content('access-time'), 'name': snapshot_info.get_child_content('name'), 'volume': snapshot_info.get_child_content('volume'), 'busy': strutils.bool_from_string( snapshot_info.get_child_content('busy')), } snapshot_owners_list = snapshot_info.get_child_by_name( 'snapshot-owners-list') or netapp_api.NaElement('none') snapshot_owners = set([ snapshot_owner.get_child_content('owner') for snapshot_owner in snapshot_owners_list.get_children()]) snapshot['owners'] = snapshot_owners snapshot['locked_by_clone'] = snapshot['owners'] == {'volume clone'} return snapshot @na_utils.trace def rename_snapshot(self, volume_name, snapshot_name, new_snapshot_name): api_args = { 'volume': volume_name, 'current-name': snapshot_name, 'new-name': new_snapshot_name } self.send_request('snapshot-rename', api_args) @na_utils.trace def restore_snapshot(self, volume_name, snapshot_name): """Reverts a volume to the specified snapshot.""" api_args = { 'volume': volume_name, 'snapshot': snapshot_name, } self.send_request('snapshot-restore-volume', api_args) @na_utils.trace def delete_snapshot(self, volume_name, snapshot_name, ignore_owners=False): """Deletes a volume snapshot.""" ignore_owners = ('true' if strutils.bool_from_string(ignore_owners) else 'false') api_args = { 'volume': volume_name, 'snapshot': snapshot_name, 'ignore-owners': ignore_owners, } self.send_request('snapshot-delete', api_args) @na_utils.trace def soft_delete_snapshot(self, volume_name, snapshot_name): """Deletes a volume snapshot, or renames it if delete fails.""" try: self.delete_snapshot(volume_name, snapshot_name) except netapp_api.NaApiError: self.rename_snapshot(volume_name, snapshot_name, DELETED_PREFIX + snapshot_name) msg = _('Soft-deleted snapshot %(snapshot)s on volume %(volume)s.') msg_args = {'snapshot': snapshot_name, 'volume': volume_name} LOG.info(msg, msg_args) # Snapshots are locked by clone(s), so split the clone(s) snapshot_children = self.get_clone_children_for_snapshot( volume_name, DELETED_PREFIX + snapshot_name) for snapshot_child in snapshot_children: self.volume_clone_split_start(snapshot_child['name']) @na_utils.trace def prune_deleted_snapshots(self): """Deletes non-busy snapshots that were previously soft-deleted.""" deleted_snapshots_map = self._get_deleted_snapshots() for vserver in deleted_snapshots_map: client = copy.deepcopy(self) client.set_vserver(vserver) for snapshot in deleted_snapshots_map[vserver]: try: client.delete_snapshot(snapshot['volume'], snapshot['name']) except netapp_api.NaApiError: msg = _('Could not delete snapshot %(snap)s on ' 'volume %(volume)s.') msg_args = { 'snap': snapshot['name'], 'volume': snapshot['volume'], } LOG.exception(msg, msg_args) @na_utils.trace def _get_deleted_snapshots(self): """Returns non-busy, soft-deleted snapshots suitable for reaping.""" api_args = { 'query': { 'snapshot-info': { 'name': DELETED_PREFIX + '*', 'busy': 'false', }, }, 'desired-attributes': { 'snapshot-info': { 'name': None, 'vserver': None, 'volume': None, }, }, } result = self.send_iter_request('snapshot-get-iter', api_args) attributes_list = result.get_child_by_name( 'attributes-list') or netapp_api.NaElement('none') # Build a map of snapshots, one list of snapshots per vserver snapshot_map = {} for snapshot_info in attributes_list.get_children(): vserver = snapshot_info.get_child_content('vserver') snapshot_list = snapshot_map.get(vserver, []) snapshot_list.append({ 'name': snapshot_info.get_child_content('name'), 'volume': snapshot_info.get_child_content('volume'), 'vserver': vserver, }) snapshot_map[vserver] = snapshot_list return snapshot_map @na_utils.trace def create_cg_snapshot(self, volume_names, snapshot_name): """Creates a consistency group snapshot of one or more flexvols.""" cg_id = self._start_cg_snapshot(volume_names, snapshot_name) if not cg_id: msg = _('Could not start consistency group snapshot %s.') raise exception.NetAppException(msg % snapshot_name) self._commit_cg_snapshot(cg_id) @na_utils.trace def _start_cg_snapshot(self, volume_names, snapshot_name): api_args = { 'snapshot': snapshot_name, 'timeout': 'relaxed', 'volumes': [ {'volume-name': volume_name} for volume_name in volume_names ], } result = self.send_request('cg-start', api_args) return result.get_child_content('cg-id') @na_utils.trace def _commit_cg_snapshot(self, cg_id): api_args = {'cg-id': cg_id} self.send_request('cg-commit', api_args) @na_utils.trace def create_cifs_share(self, share_name, path): api_args = {'path': path, 'share-name': share_name} self.send_request('cifs-share-create', api_args) @na_utils.trace def cifs_share_exists(self, share_name): """Check that a cifs share already exists""" share_path = '/%s' % share_name api_args = { 'query': { 'cifs-share': { 'share-name': share_name, 'path': share_path, }, }, 'desired-attributes': { 'cifs-share': { 'share-name': None } }, } result = self.send_iter_request('cifs-share-get-iter', api_args) return self._has_records(result) @na_utils.trace def get_cifs_share_access(self, share_name): api_args = { 'query': { 'cifs-share-access-control': { 'share': share_name, }, }, 'desired-attributes': { 'cifs-share-access-control': { 'user-or-group': None, 'permission': None, }, }, } result = self.send_iter_request('cifs-share-access-control-get-iter', api_args) attributes_list = result.get_child_by_name( 'attributes-list') or netapp_api.NaElement('none') rules = {} for rule in attributes_list.get_children(): user_or_group = rule.get_child_content('user-or-group') permission = rule.get_child_content('permission') rules[user_or_group] = permission return rules @na_utils.trace def add_cifs_share_access(self, share_name, user_name, readonly): try: api_args = { 'permission': 'read' if readonly else 'full_control', 'share': share_name, 'user-or-group': user_name, } self.send_request('cifs-share-access-control-create', api_args) except netapp_api.NaApiError as e: if e.code != netapp_api.EDUPLICATEENTRY: raise @na_utils.trace def modify_cifs_share_access(self, share_name, user_name, readonly): api_args = { 'permission': 'read' if readonly else 'full_control', 'share': share_name, 'user-or-group': user_name, } self.send_request('cifs-share-access-control-modify', api_args) @na_utils.trace def remove_cifs_share_access(self, share_name, user_name): api_args = {'user-or-group': user_name, 'share': share_name} self.send_request('cifs-share-access-control-delete', api_args) @na_utils.trace def remove_cifs_share(self, share_name): try: self.send_request('cifs-share-delete', {'share-name': share_name}) except netapp_api.NaApiError as e: if e.code == netapp_api.EOBJECTNOTFOUND: return raise @na_utils.trace def add_nfs_export_rule(self, policy_name, client_match, readonly, auth_methods): rule_indices = self._get_nfs_export_rule_indices(policy_name, client_match) if not rule_indices: self._add_nfs_export_rule(policy_name, client_match, readonly, auth_methods) else: # Update first rule and delete the rest self._update_nfs_export_rule( policy_name, client_match, readonly, rule_indices.pop(0), auth_methods) self._remove_nfs_export_rules(policy_name, rule_indices) @na_utils.trace def _add_nfs_export_rule(self, policy_name, client_match, readonly, auth_methods): api_args = { 'policy-name': policy_name, 'client-match': client_match, 'ro-rule': [], 'rw-rule': [], 'super-user-security': [], } for am in auth_methods: api_args['ro-rule'].append({'security-flavor': am}) api_args['rw-rule'].append({'security-flavor': am}) api_args['super-user-security'].append({'security-flavor': am}) if readonly: # readonly, overwrite with auth method 'never' api_args['rw-rule'] = [{'security-flavor': 'never'}] self.send_request('export-rule-create', api_args) @na_utils.trace def _update_nfs_export_rule(self, policy_name, client_match, readonly, rule_index, auth_methods): api_args = { 'policy-name': policy_name, 'rule-index': rule_index, 'client-match': client_match, 'ro-rule': [], 'rw-rule': [], 'super-user-security': [], } for am in auth_methods: api_args['ro-rule'].append({'security-flavor': am}) api_args['rw-rule'].append({'security-flavor': am}) api_args['super-user-security'].append({'security-flavor': am}) if readonly: api_args['rw-rule'] = [{'security-flavor': 'never'}] self.send_request('export-rule-modify', api_args) @na_utils.trace def _get_nfs_export_rule_indices(self, policy_name, client_match): api_args = { 'query': { 'export-rule-info': { 'policy-name': policy_name, 'client-match': client_match, }, }, 'desired-attributes': { 'export-rule-info': { 'vserver-name': None, 'policy-name': None, 'client-match': None, 'rule-index': None, }, }, } result = self.send_iter_request('export-rule-get-iter', api_args) attributes_list = result.get_child_by_name( 'attributes-list') or netapp_api.NaElement('none') export_rule_info_list = attributes_list.get_children() rule_indices = [int(export_rule_info.get_child_content('rule-index')) for export_rule_info in export_rule_info_list] rule_indices.sort() return [str(rule_index) for rule_index in rule_indices] @na_utils.trace def remove_nfs_export_rule(self, policy_name, client_match): rule_indices = self._get_nfs_export_rule_indices(policy_name, client_match) self._remove_nfs_export_rules(policy_name, rule_indices) @na_utils.trace def _remove_nfs_export_rules(self, policy_name, rule_indices): for rule_index in rule_indices: api_args = { 'policy-name': policy_name, 'rule-index': rule_index } try: self.send_request('export-rule-destroy', api_args) except netapp_api.NaApiError as e: if e.code != netapp_api.EOBJECTNOTFOUND: raise @na_utils.trace def clear_nfs_export_policy_for_volume(self, volume_name): self.set_nfs_export_policy_for_volume(volume_name, 'default') @na_utils.trace def set_nfs_export_policy_for_volume(self, volume_name, policy_name): api_args = { 'query': { 'volume-attributes': { 'volume-id-attributes': { 'name': volume_name, }, }, }, 'attributes': { 'volume-attributes': { 'volume-export-attributes': { 'policy': policy_name, }, }, }, } self.send_request('volume-modify-iter', api_args) @na_utils.trace def set_qos_policy_group_for_volume(self, volume_name, qos_policy_group_name): api_args = { 'query': { 'volume-attributes': { 'volume-id-attributes': { 'name': volume_name, }, }, }, 'attributes': { 'volume-attributes': { 'volume-qos-attributes': { 'policy-group-name': qos_policy_group_name, }, }, }, } self.send_request('volume-modify-iter', api_args) @na_utils.trace def set_qos_adaptive_policy_group_for_volume(self, volume_name, qos_policy_group_name): if not self.features.ADAPTIVE_QOS: msg = 'Adaptive QoS not supported on this backend ONTAP version.' raise exception.NetAppException(msg) api_args = { 'query': { 'volume-attributes': { 'volume-id-attributes': { 'name': volume_name, }, }, }, 'attributes': { 'volume-attributes': { 'volume-qos-attributes': { 'adaptive-policy-group-name': qos_policy_group_name, }, }, }, } self.send_request('volume-modify-iter', api_args) @na_utils.trace def get_nfs_export_policy_for_volume(self, volume_name): """Get the name of the export policy for a volume.""" api_args = { 'query': { 'volume-attributes': { 'volume-id-attributes': { 'name': volume_name, }, }, }, 'desired-attributes': { 'volume-attributes': { 'volume-export-attributes': { 'policy': None, }, }, }, } result = self.send_iter_request('volume-get-iter', api_args) attributes_list = result.get_child_by_name( 'attributes-list') or netapp_api.NaElement('none') volume_attributes = attributes_list.get_child_by_name( 'volume-attributes') or netapp_api.NaElement('none') volume_export_attributes = volume_attributes.get_child_by_name( 'volume-export-attributes') or netapp_api.NaElement('none') export_policy = volume_export_attributes.get_child_content('policy') if not export_policy: msg = _('Could not find export policy for volume %s.') raise exception.NetAppException(msg % volume_name) return export_policy @na_utils.trace def create_nfs_export_policy(self, policy_name): api_args = {'policy-name': policy_name} try: self.send_request('export-policy-create', api_args) except netapp_api.NaApiError as e: if e.code != netapp_api.EDUPLICATEENTRY: raise @na_utils.trace def soft_delete_nfs_export_policy(self, policy_name): try: self.delete_nfs_export_policy(policy_name) except netapp_api.NaApiError: # NOTE(cknight): Policy deletion can fail if called too soon after # removing from a flexvol. So rename for later harvesting. self.rename_nfs_export_policy(policy_name, DELETED_PREFIX + policy_name) @na_utils.trace def delete_nfs_export_policy(self, policy_name): api_args = {'policy-name': policy_name} try: self.send_request('export-policy-destroy', api_args) except netapp_api.NaApiError as e: if e.code == netapp_api.EOBJECTNOTFOUND: return raise @na_utils.trace def rename_nfs_export_policy(self, policy_name, new_policy_name): api_args = { 'policy-name': policy_name, 'new-policy-name': new_policy_name } self.send_request('export-policy-rename', api_args) @na_utils.trace def prune_deleted_nfs_export_policies(self): deleted_policy_map = self._get_deleted_nfs_export_policies() for vserver in deleted_policy_map: client = copy.deepcopy(self) client.set_vserver(vserver) for policy in deleted_policy_map[vserver]: try: client.delete_nfs_export_policy(policy) except netapp_api.NaApiError: LOG.debug('Could not delete export policy %s.', policy) @na_utils.trace def _get_deleted_nfs_export_policies(self): api_args = { 'query': { 'export-policy-info': { 'policy-name': DELETED_PREFIX + '*', }, }, 'desired-attributes': { 'export-policy-info': { 'policy-name': None, 'vserver': None, }, }, } result = self.send_iter_request('export-policy-get-iter', api_args) attributes_list = result.get_child_by_name( 'attributes-list') or netapp_api.NaElement('none') policy_map = {} for export_info in attributes_list.get_children(): vserver = export_info.get_child_content('vserver') policies = policy_map.get(vserver, []) policies.append(export_info.get_child_content('policy-name')) policy_map[vserver] = policies return policy_map @na_utils.trace def _get_ems_log_destination_vserver(self): """Returns the best vserver destination for EMS messages.""" major, minor = self.get_ontapi_version(cached=True) if (major > 1) or (major == 1 and minor > 15): # Prefer admin Vserver (requires cluster credentials). admin_vservers = self.list_vservers(vserver_type='admin') if admin_vservers: return admin_vservers[0] # Fall back to data Vserver. data_vservers = self.list_vservers(vserver_type='data') if data_vservers: return data_vservers[0] # If older API version, or no other Vservers found, use node Vserver. node_vservers = self.list_vservers(vserver_type='node') if node_vservers: return node_vservers[0] raise exception.NotFound("No Vserver found to receive EMS messages.") @na_utils.trace def send_ems_log_message(self, message_dict): """Sends a message to the Data ONTAP EMS log.""" # NOTE(cknight): Cannot use deepcopy on the connection context node_client = copy.copy(self) node_client.connection = copy.copy(self.connection.get_client()) node_client.connection.set_timeout(25) try: node_client.set_vserver(self._get_ems_log_destination_vserver()) node_client.send_request('ems-autosupport-log', message_dict) LOG.debug('EMS executed successfully.') except netapp_api.NaApiError as e: LOG.warning('Failed to invoke EMS. %s', e) @na_utils.trace def get_aggregate(self, aggregate_name): """Get aggregate attributes needed for the storage service catalog.""" if not aggregate_name: return {} desired_attributes = { 'aggr-attributes': { 'aggregate-name': None, 'aggr-raid-attributes': { 'raid-type': None, 'is-hybrid': None, }, 'aggr-ownership-attributes': { 'home-id': None, 'owner-id': None, }, }, } if self.features.SNAPLOCK: snaplock_attributes = {'is-snaplock': None, 'snaplock-type': None} desired_attributes['aggr-attributes'][ 'aggr-snaplock-attributes'] = snaplock_attributes try: aggrs = self._get_aggregates( aggregate_names=[aggregate_name], desired_attributes=desired_attributes ) except netapp_api.NaApiError: msg = _('Failed to get info for aggregate %s.') LOG.exception(msg, aggregate_name) return {} if len(aggrs) < 1: return {} aggr_attributes = aggrs[0] aggr_raid_attrs = aggr_attributes.get_child_by_name( 'aggr-raid-attributes') or netapp_api.NaElement('none') aggr_owner_attrs = aggr_attributes.get_child_by_name( 'aggr-ownership-attributes') or netapp_api.NaElement('none') aggr_snaplock_attrs = aggr_attributes.get_child_by_name( 'aggr-snaplock-attributes') or netapp_api.NaElement('none') aggregate = { 'name': aggr_attributes.get_child_content('aggregate-name'), 'raid-type': aggr_raid_attrs.get_child_content('raid-type'), 'is-hybrid': strutils.bool_from_string( aggr_raid_attrs.get_child_content('is-hybrid') ), 'is-home': (aggr_owner_attrs.get_child_content('owner-id') == aggr_owner_attrs.get_child_content('home-id')), 'is-snaplock': aggr_snaplock_attrs.get_child_content( 'is-snaplock', ), 'snaplock-type': aggr_snaplock_attrs.get_child_content( 'snaplock-type', ), } return aggregate @na_utils.trace def get_aggregate_disk_types(self, aggregate_name): """Get the disk type(s) of an aggregate.""" disk_types = set() disk_types.update(self._get_aggregate_disk_types(aggregate_name)) if self.features.ADVANCED_DISK_PARTITIONING: disk_types.update(self._get_aggregate_disk_types(aggregate_name, shared=True)) return list(disk_types) if disk_types else None @na_utils.trace def _get_aggregate_disk_types(self, aggregate_name, shared=False): """Get the disk type(s) of an aggregate.""" disk_types = set() if shared: disk_raid_info = { 'disk-shared-info': { 'aggregate-list': { 'shared-aggregate-info': { 'aggregate-name': aggregate_name, }, }, }, } else: disk_raid_info = { 'disk-aggregate-info': { 'aggregate-name': aggregate_name, }, } api_args = { 'query': { 'storage-disk-info': { 'disk-raid-info': disk_raid_info, }, }, 'desired-attributes': { 'storage-disk-info': { 'disk-raid-info': { 'effective-disk-type': None, }, }, }, } try: result = self.send_iter_request('storage-disk-get-iter', api_args) except netapp_api.NaApiError: msg = _('Failed to get disk info for aggregate %s.') LOG.exception(msg, aggregate_name) return disk_types attributes_list = result.get_child_by_name( 'attributes-list') or netapp_api.NaElement('none') for storage_disk_info in attributes_list.get_children(): disk_raid_info = storage_disk_info.get_child_by_name( 'disk-raid-info') or netapp_api.NaElement('none') disk_type = disk_raid_info.get_child_content( 'effective-disk-type') if disk_type: disk_types.add(disk_type) return disk_types @na_utils.trace def check_for_cluster_credentials(self): try: self.list_cluster_nodes() # API succeeded, so definitely a cluster management LIF return True except netapp_api.NaApiError as e: if e.code == netapp_api.EAPINOTFOUND: LOG.debug('Not connected to cluster management LIF.') return False else: raise @na_utils.trace def get_cluster_name(self): """Gets cluster name.""" api_args = { 'desired-attributes': { 'cluster-identity-info': { 'cluster-name': None, } } } result = self.send_request('cluster-identity-get', api_args, enable_tunneling=False) attributes = result.get_child_by_name('attributes') cluster_identity = attributes.get_child_by_name( 'cluster-identity-info') return cluster_identity.get_child_content('cluster-name') @na_utils.trace def create_cluster_peer(self, addresses, username=None, password=None, passphrase=None): """Creates a cluster peer relationship.""" api_args = { 'peer-addresses': [ {'remote-inet-address': address} for address in addresses ], } if username: api_args['user-name'] = username if password: api_args['password'] = password if passphrase: api_args['passphrase'] = passphrase self.send_request('cluster-peer-create', api_args, enable_tunneling=False) @na_utils.trace def get_cluster_peers(self, remote_cluster_name=None): """Gets one or more cluster peer relationships.""" api_args = {} if remote_cluster_name: api_args['query'] = { 'cluster-peer-info': { 'remote-cluster-name': remote_cluster_name, } } result = self.send_iter_request('cluster-peer-get-iter', api_args) if not self._has_records(result): return [] cluster_peers = [] for cluster_peer_info in result.get_child_by_name( 'attributes-list').get_children(): cluster_peer = { 'active-addresses': [], 'peer-addresses': [] } active_addresses = cluster_peer_info.get_child_by_name( 'active-addresses') or netapp_api.NaElement('none') for address in active_addresses.get_children(): cluster_peer['active-addresses'].append(address.get_content()) peer_addresses = cluster_peer_info.get_child_by_name( 'peer-addresses') or netapp_api.NaElement('none') for address in peer_addresses.get_children(): cluster_peer['peer-addresses'].append(address.get_content()) cluster_peer['availability'] = cluster_peer_info.get_child_content( 'availability') cluster_peer['cluster-name'] = cluster_peer_info.get_child_content( 'cluster-name') cluster_peer['cluster-uuid'] = cluster_peer_info.get_child_content( 'cluster-uuid') cluster_peer['remote-cluster-name'] = ( cluster_peer_info.get_child_content('remote-cluster-name')) cluster_peer['serial-number'] = ( cluster_peer_info.get_child_content('serial-number')) cluster_peer['timeout'] = cluster_peer_info.get_child_content( 'timeout') cluster_peers.append(cluster_peer) return cluster_peers @na_utils.trace def delete_cluster_peer(self, cluster_name): """Deletes a cluster peer relationship.""" api_args = {'cluster-name': cluster_name} self.send_request('cluster-peer-delete', api_args, enable_tunneling=False) @na_utils.trace def get_cluster_peer_policy(self): """Gets the cluster peering policy configuration.""" if not self.features.CLUSTER_PEER_POLICY: return {} result = self.send_request('cluster-peer-policy-get') attributes = result.get_child_by_name( 'attributes') or netapp_api.NaElement('none') cluster_peer_policy = attributes.get_child_by_name( 'cluster-peer-policy') or netapp_api.NaElement('none') policy = { 'is-unauthenticated-access-permitted': cluster_peer_policy.get_child_content( 'is-unauthenticated-access-permitted'), 'passphrase-minimum-length': cluster_peer_policy.get_child_content( 'passphrase-minimum-length'), } if policy['is-unauthenticated-access-permitted'] is not None: policy['is-unauthenticated-access-permitted'] = ( strutils.bool_from_string( policy['is-unauthenticated-access-permitted'])) if policy['passphrase-minimum-length'] is not None: policy['passphrase-minimum-length'] = int( policy['passphrase-minimum-length']) return policy @na_utils.trace def set_cluster_peer_policy(self, is_unauthenticated_access_permitted=None, passphrase_minimum_length=None): """Modifies the cluster peering policy configuration.""" if not self.features.CLUSTER_PEER_POLICY: return if (is_unauthenticated_access_permitted is None and passphrase_minimum_length is None): return api_args = {} if is_unauthenticated_access_permitted is not None: api_args['is-unauthenticated-access-permitted'] = ( 'true' if strutils.bool_from_string( is_unauthenticated_access_permitted) else 'false') if passphrase_minimum_length is not None: api_args['passphrase-minlength'] = str( passphrase_minimum_length) self.send_request('cluster-peer-policy-modify', api_args) @na_utils.trace def create_vserver_peer(self, vserver_name, peer_vserver_name, peer_cluster_name=None): """Creates a Vserver peer relationship for SnapMirrors.""" api_args = { 'vserver': vserver_name, 'peer-vserver': peer_vserver_name, 'applications': [ {'vserver-peer-application': 'snapmirror'}, ], } if peer_cluster_name: api_args['peer-cluster'] = peer_cluster_name self.send_request('vserver-peer-create', api_args, enable_tunneling=False) @na_utils.trace def delete_vserver_peer(self, vserver_name, peer_vserver_name): """Deletes a Vserver peer relationship.""" api_args = {'vserver': vserver_name, 'peer-vserver': peer_vserver_name} self.send_request('vserver-peer-delete', api_args, enable_tunneling=False) @na_utils.trace def accept_vserver_peer(self, vserver_name, peer_vserver_name): """Accepts a pending Vserver peer relationship.""" api_args = {'vserver': vserver_name, 'peer-vserver': peer_vserver_name} self.send_request('vserver-peer-accept', api_args, enable_tunneling=False) @na_utils.trace def get_vserver_peers(self, vserver_name=None, peer_vserver_name=None): """Gets one or more Vserver peer relationships.""" api_args = None if vserver_name or peer_vserver_name: api_args = {'query': {'vserver-peer-info': {}}} if vserver_name: api_args['query']['vserver-peer-info']['vserver'] = ( vserver_name) if peer_vserver_name: api_args['query']['vserver-peer-info']['peer-vserver'] = ( peer_vserver_name) result = self.send_iter_request('vserver-peer-get-iter', api_args) if not self._has_records(result): return [] vserver_peers = [] for vserver_peer_info in result.get_child_by_name( 'attributes-list').get_children(): vserver_peer = { 'vserver': vserver_peer_info.get_child_content('vserver'), 'peer-vserver': vserver_peer_info.get_child_content('peer-vserver'), 'peer-state': vserver_peer_info.get_child_content('peer-state'), 'peer-cluster': vserver_peer_info.get_child_content('peer-cluster'), } vserver_peers.append(vserver_peer) return vserver_peers def _ensure_snapmirror_v2(self): """Verify support for SnapMirror control plane v2.""" if not self.features.SNAPMIRROR_V2: msg = _('SnapMirror features require Data ONTAP 8.2 or later.') raise exception.NetAppException(msg) @na_utils.trace def create_snapmirror_vol(self, source_vserver, source_volume, destination_vserver, destination_volume, relationship_type, schedule=None, policy=na_utils.MIRROR_ALL_SNAP_POLICY): """Creates a SnapMirror relationship between volumes.""" self._create_snapmirror(source_vserver, destination_vserver, source_volume=source_volume, destination_volume=destination_volume, schedule=schedule, policy=policy, relationship_type=relationship_type) @na_utils.trace def create_snapmirror_svm(self, source_vserver, destination_vserver, schedule=None, policy=None, relationship_type=na_utils.DATA_PROTECTION_TYPE, identity_preserve=True, max_transfer_rate=None): """Creates a SnapMirror relationship between vServers.""" self._create_snapmirror(source_vserver, destination_vserver, schedule=schedule, policy=policy, relationship_type=relationship_type, identity_preserve=identity_preserve, max_transfer_rate=max_transfer_rate) @na_utils.trace def _create_snapmirror(self, source_vserver, destination_vserver, source_volume=None, destination_volume=None, schedule=None, policy=None, relationship_type=na_utils.DATA_PROTECTION_TYPE, identity_preserve=None, max_transfer_rate=None): """Creates a SnapMirror relationship (cDOT 8.2 or later only).""" self._ensure_snapmirror_v2() api_args = { 'source-vserver': source_vserver, 'destination-vserver': destination_vserver, 'relationship-type': relationship_type, } if source_volume: api_args['source-volume'] = source_volume if destination_volume: api_args['destination-volume'] = destination_volume if schedule: api_args['schedule'] = schedule if policy: api_args['policy'] = policy if identity_preserve is not None: api_args['identity-preserve'] = ( 'true' if identity_preserve is True else 'false') if max_transfer_rate is not None: api_args['max-transfer-rate'] = max_transfer_rate try: self.send_request('snapmirror-create', api_args) except netapp_api.NaApiError as e: if e.code != netapp_api.ERELATION_EXISTS: raise def _build_snapmirror_request(self, source_path=None, dest_path=None, source_vserver=None, dest_vserver=None, source_volume=None, dest_volume=None): """Build a default SnapMirror request.""" req_args = {} if source_path: req_args['source-location'] = source_path if dest_path: req_args['destination-location'] = dest_path if source_vserver: req_args['source-vserver'] = source_vserver if source_volume: req_args['source-volume'] = source_volume if dest_vserver: req_args['destination-vserver'] = dest_vserver if dest_volume: req_args['destination-volume'] = dest_volume return req_args @na_utils.trace def initialize_snapmirror_vol(self, source_vserver, source_volume, dest_vserver, dest_volume, source_snapshot=None, transfer_priority=None): """Initializes a SnapMirror relationship between volumes.""" return self._initialize_snapmirror( source_vserver=source_vserver, dest_vserver=dest_vserver, source_volume=source_volume, dest_volume=dest_volume, source_snapshot=source_snapshot, transfer_priority=transfer_priority) @na_utils.trace def initialize_snapmirror_svm(self, source_vserver, dest_vserver, transfer_priority=None): """Initializes a SnapMirror relationship between vServer.""" source_path = source_vserver + ':' dest_path = dest_vserver + ':' return self._initialize_snapmirror(source_path=source_path, dest_path=dest_path, transfer_priority=transfer_priority) @na_utils.trace def _initialize_snapmirror(self, source_path=None, dest_path=None, source_vserver=None, dest_vserver=None, source_volume=None, dest_volume=None, source_snapshot=None, transfer_priority=None): """Initializes a SnapMirror relationship.""" self._ensure_snapmirror_v2() api_args = self._build_snapmirror_request( source_path, dest_path, source_vserver, dest_vserver, source_volume, dest_volume) if source_snapshot: api_args['source-snapshot'] = source_snapshot if transfer_priority: api_args['transfer-priority'] = transfer_priority result = self.send_request('snapmirror-initialize', api_args) result_info = {} result_info['operation-id'] = result.get_child_content( 'result-operation-id') result_info['status'] = result.get_child_content('result-status') result_info['jobid'] = result.get_child_content('result-jobid') result_info['error-code'] = result.get_child_content( 'result-error-code') result_info['error-message'] = result.get_child_content( 'result-error-message') return result_info @na_utils.trace def release_snapmirror_vol(self, source_vserver, source_volume, dest_vserver, dest_volume, relationship_info_only=False): """Removes a SnapMirror relationship on the source endpoint.""" self._ensure_snapmirror_v2() snapmirror_destinations_list = self.get_snapmirror_destinations( source_vserver=source_vserver, dest_vserver=dest_vserver, source_volume=source_volume, dest_volume=dest_volume, desired_attributes=['relationship-id']) if len(snapmirror_destinations_list) > 1: msg = ("Expected snapmirror relationship to be unique. " "List returned: %s." % snapmirror_destinations_list) raise exception.NetAppException(msg) api_args = self._build_snapmirror_request( source_vserver=source_vserver, dest_vserver=dest_vserver, source_volume=source_volume, dest_volume=dest_volume) api_args['relationship-info-only'] = ( 'true' if relationship_info_only else 'false') # NOTE(nahimsouza): This verification is needed because an empty list # is returned in snapmirror_destinations_list when a single share is # created with only one replica and this replica is deleted, thus there # will be no relationship-id in that case. if len(snapmirror_destinations_list) == 1: api_args['relationship-id'] = ( snapmirror_destinations_list[0]['relationship-id']) self.send_request('snapmirror-release', api_args, enable_tunneling=True) @na_utils.trace def release_snapmirror_svm(self, source_vserver, dest_vserver, relationship_info_only=False): """Removes a SnapMirror relationship on the source endpoint.""" source_path = source_vserver + ':' dest_path = dest_vserver + ':' dest_info = self._build_snapmirror_request( source_path=source_path, dest_path=dest_path) self._ensure_snapmirror_v2() api_args = { 'query': { 'snapmirror-destination-info': dest_info, }, 'relationship-info-only': ( 'true' if relationship_info_only else 'false'), } self.send_request('snapmirror-release-iter', api_args, enable_tunneling=False) @na_utils.trace def quiesce_snapmirror_vol(self, source_vserver, source_volume, dest_vserver, dest_volume): """Disables future transfers to a SnapMirror destination.""" self._quiesce_snapmirror(source_vserver=source_vserver, dest_vserver=dest_vserver, source_volume=source_volume, dest_volume=dest_volume) @na_utils.trace def quiesce_snapmirror_svm(self, source_vserver, dest_vserver): """Disables future transfers to a SnapMirror destination.""" source_path = source_vserver + ':' dest_path = dest_vserver + ':' self._quiesce_snapmirror(source_path=source_path, dest_path=dest_path) @na_utils.trace def _quiesce_snapmirror(self, source_path=None, dest_path=None, source_vserver=None, dest_vserver=None, source_volume=None, dest_volume=None): """Disables future transfers to a SnapMirror destination.""" self._ensure_snapmirror_v2() api_args = self._build_snapmirror_request( source_path, dest_path, source_vserver, dest_vserver, source_volume, dest_volume) self.send_request('snapmirror-quiesce', api_args) @na_utils.trace def abort_snapmirror_vol(self, source_vserver, source_volume, dest_vserver, dest_volume, clear_checkpoint=False): """Stops ongoing transfers for a SnapMirror relationship.""" self._abort_snapmirror(source_vserver=source_vserver, dest_vserver=dest_vserver, source_volume=source_volume, dest_volume=dest_volume, clear_checkpoint=clear_checkpoint) @na_utils.trace def abort_snapmirror_svm(self, source_vserver, dest_vserver, clear_checkpoint=False): """Stops ongoing transfers for a SnapMirror relationship.""" source_path = source_vserver + ':' dest_path = dest_vserver + ':' self._abort_snapmirror(source_path=source_path, dest_path=dest_path, clear_checkpoint=clear_checkpoint) @na_utils.trace def _abort_snapmirror(self, source_path=None, dest_path=None, source_vserver=None, dest_vserver=None, source_volume=None, dest_volume=None, clear_checkpoint=False): """Stops ongoing transfers for a SnapMirror relationship.""" self._ensure_snapmirror_v2() api_args = self._build_snapmirror_request( source_path, dest_path, source_vserver, dest_vserver, source_volume, dest_volume) api_args['clear-checkpoint'] = 'true' if clear_checkpoint else 'false' try: self.send_request('snapmirror-abort', api_args) except netapp_api.NaApiError as e: if e.code != netapp_api.ENOTRANSFER_IN_PROGRESS: raise @na_utils.trace def break_snapmirror_vol(self, source_vserver, source_volume, dest_vserver, dest_volume): """Breaks a data protection SnapMirror relationship.""" self._break_snapmirror(source_vserver=source_vserver, dest_vserver=dest_vserver, source_volume=source_volume, dest_volume=dest_volume) @na_utils.trace def break_snapmirror_svm(self, source_vserver=None, dest_vserver=None): """Breaks a data protection SnapMirror relationship.""" source_path = source_vserver + ':' if source_vserver else None dest_path = dest_vserver + ':' if dest_vserver else None self._break_snapmirror(source_path=source_path, dest_path=dest_path) @na_utils.trace def _break_snapmirror(self, source_path=None, dest_path=None, source_vserver=None, dest_vserver=None, source_volume=None, dest_volume=None): """Breaks a data protection SnapMirror relationship.""" self._ensure_snapmirror_v2() api_args = self._build_snapmirror_request( source_path, dest_path, source_vserver, dest_vserver, source_volume, dest_volume) try: self.send_request('snapmirror-break', api_args) except netapp_api.NaApiError as e: break_in_progress = 'SnapMirror operation status is "Breaking"' if not (e.code == netapp_api.ESVMDR_CANNOT_PERFORM_OP_FOR_STATUS and break_in_progress in e.message): raise @na_utils.trace def modify_snapmirror_vol(self, source_vserver, source_volume, dest_vserver, dest_volume, schedule=None, policy=None, tries=None, max_transfer_rate=None): """Modifies a SnapMirror relationship between volumes.""" self._modify_snapmirror(source_vserver=source_vserver, dest_vserver=dest_vserver, source_volume=source_volume, dest_volume=dest_volume, schedule=schedule, policy=policy, tries=tries, max_transfer_rate=max_transfer_rate) @na_utils.trace def _modify_snapmirror(self, source_path=None, dest_path=None, source_vserver=None, dest_vserver=None, source_volume=None, dest_volume=None, schedule=None, policy=None, tries=None, max_transfer_rate=None): """Modifies a SnapMirror relationship.""" self._ensure_snapmirror_v2() api_args = self._build_snapmirror_request( source_path, dest_path, source_vserver, dest_vserver, source_volume, dest_volume) if schedule: api_args['schedule'] = schedule if policy: api_args['policy'] = policy if tries is not None: api_args['tries'] = tries if max_transfer_rate is not None: api_args['max-transfer-rate'] = max_transfer_rate self.send_request('snapmirror-modify', api_args) @na_utils.trace def delete_snapmirror_vol(self, source_vserver, source_volume, dest_vserver, dest_volume): """Destroys a SnapMirror relationship between volumes.""" self._delete_snapmirror(source_vserver=source_vserver, dest_vserver=dest_vserver, source_volume=source_volume, dest_volume=dest_volume) @na_utils.trace def delete_snapmirror_svm(self, source_vserver, dest_vserver): """Destroys a SnapMirror relationship between vServers.""" source_path = source_vserver + ':' dest_path = dest_vserver + ':' self._delete_snapmirror(source_path=source_path, dest_path=dest_path) @na_utils.trace def _delete_snapmirror(self, source_path=None, dest_path=None, source_vserver=None, dest_vserver=None, source_volume=None, dest_volume=None): """Destroys a SnapMirror relationship.""" self._ensure_snapmirror_v2() snapmirror_info = self._build_snapmirror_request( source_path, dest_path, source_vserver, dest_vserver, source_volume, dest_volume) api_args = { 'query': { 'snapmirror-info': snapmirror_info } } self.send_request('snapmirror-destroy-iter', api_args) @na_utils.trace def update_snapmirror_vol(self, source_vserver, source_volume, dest_vserver, dest_volume): """Schedules a snapmirror update between volumes.""" self._update_snapmirror(source_vserver=source_vserver, dest_vserver=dest_vserver, source_volume=source_volume, dest_volume=dest_volume) @na_utils.trace def update_snapmirror_svm(self, source_vserver, dest_vserver): """Schedules a snapmirror update between vServers.""" source_path = source_vserver + ':' dest_path = dest_vserver + ':' self._update_snapmirror(source_path=source_path, dest_path=dest_path) @na_utils.trace def _update_snapmirror(self, source_path=None, dest_path=None, source_vserver=None, dest_vserver=None, source_volume=None, dest_volume=None): """Schedules a snapmirror update.""" self._ensure_snapmirror_v2() api_args = self._build_snapmirror_request( source_path, dest_path, source_vserver, dest_vserver, source_volume, dest_volume) try: self.send_request('snapmirror-update', api_args) except netapp_api.NaApiError as e: if (e.code != netapp_api.ETRANSFER_IN_PROGRESS and e.code != netapp_api.EANOTHER_OP_ACTIVE): raise @na_utils.trace def resume_snapmirror_vol(self, source_vserver, source_volume, dest_vserver, dest_volume): """Resume a SnapMirror relationship if it is quiesced.""" self._resume_snapmirror(source_vserver=source_vserver, dest_vserver=dest_vserver, source_volume=source_volume, dest_volume=dest_volume) @na_utils.trace def resume_snapmirror_svm(self, source_vserver, dest_vserver): """Resume a SnapMirror relationship if it is quiesced.""" source_path = source_vserver + ':' dest_path = dest_vserver + ':' self._resume_snapmirror(source_path=source_path, dest_path=dest_path) @na_utils.trace def _resume_snapmirror(self, source_path=None, dest_path=None, source_vserver=None, dest_vserver=None, source_volume=None, dest_volume=None): """Resume a SnapMirror relationship if it is quiesced.""" self._ensure_snapmirror_v2() api_args = self._build_snapmirror_request( source_path, dest_path, source_vserver, dest_vserver, source_volume, dest_volume) try: self.send_request('snapmirror-resume', api_args) except netapp_api.NaApiError as e: if e.code != netapp_api.ERELATION_NOT_QUIESCED: raise @na_utils.trace def resync_snapmirror_vol(self, source_vserver, source_volume, dest_vserver, dest_volume): """Resync a SnapMirror relationship between volumes.""" self._resync_snapmirror(source_vserver=source_vserver, dest_vserver=dest_vserver, source_volume=source_volume, dest_volume=dest_volume) @na_utils.trace def resync_snapmirror_svm(self, source_vserver, dest_vserver): """Resync a SnapMirror relationship between vServers.""" source_path = source_vserver + ':' dest_path = dest_vserver + ':' self._resync_snapmirror(source_path=source_path, dest_path=dest_path) @na_utils.trace def _resync_snapmirror(self, source_path=None, dest_path=None, source_vserver=None, dest_vserver=None, source_volume=None, dest_volume=None): """Resync a SnapMirror relationship.""" self._ensure_snapmirror_v2() api_args = self._build_snapmirror_request( source_path, dest_path, source_vserver, dest_vserver, source_volume, dest_volume) self.send_request('snapmirror-resync', api_args) @na_utils.trace def _get_snapmirrors(self, source_path=None, dest_path=None, source_vserver=None, source_volume=None, dest_vserver=None, dest_volume=None, desired_attributes=None): """Gets one or more SnapMirror relationships.""" snapmirror_info = self._build_snapmirror_request( source_path, dest_path, source_vserver, dest_vserver, source_volume, dest_volume) api_args = {} if snapmirror_info: api_args['query'] = { 'snapmirror-info': snapmirror_info } if desired_attributes: api_args['desired-attributes'] = desired_attributes result = self.send_iter_request('snapmirror-get-iter', api_args) if not self._has_records(result): return [] else: return result.get_child_by_name('attributes-list').get_children() @na_utils.trace def get_snapmirrors_svm(self, source_vserver=None, dest_vserver=None, desired_attributes=None): source_path = source_vserver + ':' if source_vserver else None dest_path = dest_vserver + ':' if dest_vserver else None return self.get_snapmirrors(source_path=source_path, dest_path=dest_path, desired_attributes=desired_attributes) @na_utils.trace def get_snapmirrors(self, source_path=None, dest_path=None, source_vserver=None, dest_vserver=None, source_volume=None, dest_volume=None, desired_attributes=None): """Gets one or more SnapMirror relationships. Either the source or destination info may be omitted. Desired attributes should be a flat list of attribute names. """ self._ensure_snapmirror_v2() if desired_attributes is not None: desired_attributes = { 'snapmirror-info': {attr: None for attr in desired_attributes}, } result = self._get_snapmirrors( source_path=source_path, dest_path=dest_path, source_vserver=source_vserver, source_volume=source_volume, dest_vserver=dest_vserver, dest_volume=dest_volume, desired_attributes=desired_attributes) snapmirrors = [] for snapmirror_info in result: snapmirror = {} for child in snapmirror_info.get_children(): name = self._strip_xml_namespace(child.get_name()) snapmirror[name] = child.get_content() snapmirrors.append(snapmirror) return snapmirrors @na_utils.trace def _get_snapmirror_destinations(self, source_path=None, dest_path=None, source_vserver=None, source_volume=None, dest_vserver=None, dest_volume=None, desired_attributes=None): """Gets one or more SnapMirror at source endpoint.""" snapmirror_info = self._build_snapmirror_request( source_path, dest_path, source_vserver, dest_vserver, source_volume, dest_volume) api_args = {} if snapmirror_info: api_args['query'] = { 'snapmirror-destination-info': snapmirror_info } if desired_attributes: api_args['desired-attributes'] = desired_attributes result = self.send_iter_request('snapmirror-get-destination-iter', api_args) if not self._has_records(result): return [] else: return result.get_child_by_name('attributes-list').get_children() @na_utils.trace def get_snapmirror_destinations(self, source_path=None, dest_path=None, source_vserver=None, dest_vserver=None, source_volume=None, dest_volume=None, desired_attributes=None): """Gets one or more SnapMirror relationships in the source endpoint. Either the source or destination info may be omitted. Desired attributes should be a flat list of attribute names. """ self._ensure_snapmirror_v2() if desired_attributes is not None: desired_attributes = { 'snapmirror-destination-info': { attr: None for attr in desired_attributes}, } result = self._get_snapmirror_destinations( source_path=source_path, dest_path=dest_path, source_vserver=source_vserver, source_volume=source_volume, dest_vserver=dest_vserver, dest_volume=dest_volume, desired_attributes=desired_attributes) snapmirrors = [] for snapmirror_info in result: snapmirror = {} for child in snapmirror_info.get_children(): name = self._strip_xml_namespace(child.get_name()) snapmirror[name] = child.get_content() snapmirrors.append(snapmirror) return snapmirrors @na_utils.trace def get_snapmirror_destinations_svm(self, source_vserver=None, dest_vserver=None, desired_attributes=None): source_path = source_vserver + ':' if source_vserver else None dest_path = dest_vserver + ':' if dest_vserver else None return self.get_snapmirror_destinations( source_path=source_path, dest_path=dest_path, desired_attributes=desired_attributes) def volume_has_snapmirror_relationships(self, volume): """Return True if snapmirror relationships exist for a given volume. If we have snapmirror control plane license, we can verify whether the given volume is part of any snapmirror relationships. """ try: # Check if volume is a source snapmirror volume snapmirrors = self.get_snapmirrors( source_vserver=volume['owning-vserver-name'], source_volume=volume['name']) # Check if volume is a destination snapmirror volume if not snapmirrors: snapmirrors = self.get_snapmirrors( dest_vserver=volume['owning-vserver-name'], dest_volume=volume['name']) has_snapmirrors = len(snapmirrors) > 0 except netapp_api.NaApiError: msg = ("Could not determine if volume %s is part of " "existing snapmirror relationships.") LOG.exception(msg, volume['name']) has_snapmirrors = False return has_snapmirrors def list_snapmirror_snapshots(self, volume_name, newer_than=None): """Gets SnapMirror snapshots on a volume.""" api_args = { 'query': { 'snapshot-info': { 'dependency': 'snapmirror', 'volume': volume_name, }, }, } if newer_than: api_args['query']['snapshot-info'][ 'access-time'] = '>' + newer_than result = self.send_iter_request('snapshot-get-iter', api_args) attributes_list = result.get_child_by_name( 'attributes-list') or netapp_api.NaElement('none') return [snapshot_info.get_child_content('name') for snapshot_info in attributes_list.get_children()] @na_utils.trace def create_snapmirror_policy(self, policy_name, policy_type='async_mirror', discard_network_info=True, preserve_snapshots=True, snapmirror_label='all_source_snapshots', keep=1 ): """Creates a SnapMirror policy for a vServer.""" self._ensure_snapmirror_v2() api_args = { 'policy-name': policy_name, 'type': policy_type, } if discard_network_info: api_args['discard-configs'] = { 'svmdr-config-obj': 'network' } self.send_request('snapmirror-policy-create', api_args) if preserve_snapshots: api_args = { 'policy-name': policy_name, 'snapmirror-label': snapmirror_label, 'keep': keep, 'preserve': 'false' } self.send_request('snapmirror-policy-add-rule', api_args) @na_utils.trace def delete_snapmirror_policy(self, policy_name): """Deletes a SnapMirror policy.""" api_args = { 'policy-name': policy_name, } try: self.send_request('snapmirror-policy-delete', api_args) except netapp_api.NaApiError as e: if e.code != netapp_api.EOBJECTNOTFOUND: raise @na_utils.trace def get_snapmirror_policies(self, vserver_name): """Get all SnapMirror policies associated to a vServer.""" api_args = { 'query': { 'snapmirror-policy-info': { 'vserver-name': vserver_name, }, }, 'desired-attributes': { 'snapmirror-policy-info': { 'policy-name': None, }, }, } result = self.send_iter_request('snapmirror-policy-get-iter', api_args) attributes_list = result.get_child_by_name( 'attributes-list') or netapp_api.NaElement('none') return [policy_info.get_child_content('policy-name') for policy_info in attributes_list.get_children()] @na_utils.trace def start_volume_move(self, volume_name, vserver, destination_aggregate, cutover_action='wait', encrypt_destination=None): """Moves a FlexVol across Vserver aggregates. Requires cluster-scoped credentials. """ self._send_volume_move_request( volume_name, vserver, destination_aggregate, cutover_action=cutover_action, encrypt_destination=encrypt_destination) @na_utils.trace def check_volume_move(self, volume_name, vserver, destination_aggregate, encrypt_destination=None): """Moves a FlexVol across Vserver aggregates. Requires cluster-scoped credentials. """ self._send_volume_move_request( volume_name, vserver, destination_aggregate, validation_only=True, encrypt_destination=encrypt_destination) @na_utils.trace def _send_volume_move_request(self, volume_name, vserver, destination_aggregate, cutover_action='wait', validation_only=False, encrypt_destination=None): """Send request to check if vol move is possible, or start it. :param volume_name: Name of the FlexVol to be moved. :param destination_aggregate: Name of the destination aggregate :param cutover_action: can have one of ['force', 'defer', 'abort', 'wait']. 'force' will force a cutover despite errors (causing possible client disruptions), 'wait' will wait for cutover to be triggered manually. 'abort' will rollback move on errors on cutover, 'defer' will attempt a cutover, but wait for manual intervention in case of errors. :param validation_only: If set to True, only validates if the volume move is possible, does not trigger data copy. :param encrypt_destination: If set to True, it encrypts the Flexvol after the volume move is complete. """ api_args = { 'source-volume': volume_name, 'vserver': vserver, 'dest-aggr': destination_aggregate, 'cutover-action': CUTOVER_ACTION_MAP[cutover_action], } if self.features.FLEXVOL_ENCRYPTION: if encrypt_destination: api_args['encrypt-destination'] = 'true' else: api_args['encrypt-destination'] = 'false' elif encrypt_destination: msg = 'Flexvol encryption is not supported on this backend.' raise exception.NetAppException(msg) if validation_only: api_args['perform-validation-only'] = 'true' self.send_request('volume-move-start', api_args) @na_utils.trace def abort_volume_move(self, volume_name, vserver): """Aborts an existing volume move operation.""" api_args = { 'source-volume': volume_name, 'vserver': vserver, } self.send_request('volume-move-trigger-abort', api_args) @na_utils.trace def trigger_volume_move_cutover(self, volume_name, vserver, force=True): """Triggers the cut-over for a volume in data motion.""" api_args = { 'source-volume': volume_name, 'vserver': vserver, 'force': 'true' if force else 'false', } self.send_request('volume-move-trigger-cutover', api_args) @na_utils.trace def get_volume_move_status(self, volume_name, vserver): """Gets the current state of a volume move operation.""" api_args = { 'query': { 'volume-move-info': { 'volume': volume_name, 'vserver': vserver, }, }, 'desired-attributes': { 'volume-move-info': { 'percent-complete': None, 'estimated-completion-time': None, 'state': None, 'details': None, 'cutover-action': None, 'phase': None, }, }, } result = self.send_iter_request('volume-move-get-iter', api_args) if not self._has_records(result): msg = _("Volume %(vol)s in Vserver %(server)s is not part of any " "data motion operations.") msg_args = {'vol': volume_name, 'server': vserver} raise exception.NetAppException(msg % msg_args) attributes_list = result.get_child_by_name( 'attributes-list') or netapp_api.NaElement('none') volume_move_info = attributes_list.get_child_by_name( 'volume-move-info') or netapp_api.NaElement('none') status_info = { 'percent-complete': volume_move_info.get_child_content( 'percent-complete'), 'estimated-completion-time': volume_move_info.get_child_content( 'estimated-completion-time'), 'state': volume_move_info.get_child_content('state'), 'details': volume_move_info.get_child_content('details'), 'cutover-action': volume_move_info.get_child_content( 'cutover-action'), 'phase': volume_move_info.get_child_content('phase'), } return status_info @na_utils.trace def qos_policy_group_exists(self, qos_policy_group_name): """Checks if a QoS policy group exists.""" try: self.qos_policy_group_get(qos_policy_group_name) except exception.NetAppException: return False return True @na_utils.trace def qos_policy_group_get(self, qos_policy_group_name): """Checks if a QoS policy group exists.""" api_args = { 'query': { 'qos-policy-group-info': { 'policy-group': qos_policy_group_name, }, }, 'desired-attributes': { 'qos-policy-group-info': { 'policy-group': None, 'vserver': None, 'max-throughput': None, 'num-workloads': None }, }, } try: result = self.send_request('qos-policy-group-get-iter', api_args, False) except netapp_api.NaApiError as e: if e.code == netapp_api.EAPINOTFOUND: msg = _("Configured ONTAP login user cannot retrieve " "QoS policies.") LOG.error(msg) raise exception.NetAppException(msg) else: raise if not self._has_records(result): msg = _("No QoS policy group found with name %s.") raise exception.NetAppException(msg % qos_policy_group_name) attributes_list = result.get_child_by_name( 'attributes-list') or netapp_api.NaElement('none') qos_policy_group_info = attributes_list.get_child_by_name( 'qos-policy-group-info') or netapp_api.NaElement('none') policy_info = { 'policy-group': qos_policy_group_info.get_child_content( 'policy-group'), 'vserver': qos_policy_group_info.get_child_content('vserver'), 'max-throughput': qos_policy_group_info.get_child_content( 'max-throughput'), 'num-workloads': int(qos_policy_group_info.get_child_content( 'num-workloads')), } return policy_info @na_utils.trace def qos_policy_group_create(self, qos_policy_group_name, vserver, max_throughput=None): """Creates a QoS policy group.""" api_args = { 'policy-group': qos_policy_group_name, 'vserver': vserver, } if max_throughput: api_args['max-throughput'] = max_throughput return self.send_request('qos-policy-group-create', api_args, False) @na_utils.trace def qos_policy_group_modify(self, qos_policy_group_name, max_throughput): """Modifies a QoS policy group.""" api_args = { 'policy-group': qos_policy_group_name, 'max-throughput': max_throughput, } return self.send_request('qos-policy-group-modify', api_args, False) @na_utils.trace def qos_policy_group_delete(self, qos_policy_group_name): """Attempts to delete a QoS policy group.""" api_args = {'policy-group': qos_policy_group_name} return self.send_request('qos-policy-group-delete', api_args, False) @na_utils.trace def qos_policy_group_rename(self, qos_policy_group_name, new_name): """Renames a QoS policy group.""" if qos_policy_group_name == new_name: return api_args = { 'policy-group-name': qos_policy_group_name, 'new-name': new_name, } return self.send_request('qos-policy-group-rename', api_args, False) @na_utils.trace def mark_qos_policy_group_for_deletion(self, qos_policy_group_name): """Soft delete backing QoS policy group for a manila share.""" # NOTE(gouthamr): ONTAP deletes storage objects asynchronously. As # long as garbage collection hasn't occurred, assigned QoS policy may # still be tagged "in use". So, we rename the QoS policy group using a # specific pattern and later attempt on a best effort basis to # delete any QoS policy groups matching that pattern. if self.qos_policy_group_exists(qos_policy_group_name): new_name = DELETED_PREFIX + qos_policy_group_name try: self.qos_policy_group_rename(qos_policy_group_name, new_name) except netapp_api.NaApiError as ex: msg = ('Rename failure in cleanup of cDOT QoS policy ' 'group %(name)s: %(ex)s') msg_args = {'name': qos_policy_group_name, 'ex': ex} LOG.warning(msg, msg_args) # Attempt to delete any QoS policies named "deleted_manila-*". self.remove_unused_qos_policy_groups() @na_utils.trace def remove_unused_qos_policy_groups(self): """Deletes all QoS policy groups that are marked for deletion.""" api_args = { 'query': { 'qos-policy-group-info': { 'policy-group': '%s*' % DELETED_PREFIX, } }, 'max-records': 3500, 'continue-on-failure': 'true', 'return-success-list': 'false', 'return-failure-list': 'false', } try: self.send_request('qos-policy-group-delete-iter', api_args, False) except netapp_api.NaApiError as ex: msg = 'Could not delete QoS policy groups. Details: %(ex)s' msg_args = {'ex': ex} LOG.debug(msg, msg_args) @na_utils.trace def get_net_options(self): result = self.send_request('net-options-get', None, False) options = result.get_child_by_name('net-options') ipv6_enabled = False ipv6_info = options.get_child_by_name('ipv6-options-info') if ipv6_info: ipv6_enabled = ipv6_info.get_child_content('enabled') == 'true' return { 'ipv6-enabled': ipv6_enabled, } @na_utils.trace def rehost_volume(self, volume_name, vserver, destination_vserver): """Rehosts a volume from one Vserver into another Vserver. :param volume_name: Name of the FlexVol to be rehosted. :param vserver: Source Vserver name to which target volume belongs. :param destination_vserver: Destination Vserver name where target volume must reside after successful volume rehost operation. """ api_args = { 'volume': volume_name, 'vserver': vserver, 'destination-vserver': destination_vserver, } self.send_request('volume-rehost', api_args) @na_utils.trace def get_nfs_config(self, desired_args, vserver): """Gets the NFS config of the given vserver with the desired params""" api_args = { 'query': { 'nfs-info': { 'vserver': vserver, }, }, } nfs_info = {} for arg in desired_args: nfs_info[arg] = None if nfs_info: api_args['desired-attributes'] = {'nfs-info': nfs_info} result = self.send_request('nfs-service-get-iter', api_args) child_elem = result.get_child_by_name('attributes-list') return self.parse_nfs_config(child_elem, desired_args) @na_utils.trace def get_nfs_config_default(self, desired_args): """Gets the default NFS config with the desired params""" result = self.send_request('nfs-service-get-create-defaults', None) child_elem = result.get_child_by_name('defaults') return self.parse_nfs_config(child_elem, desired_args) @na_utils.trace def parse_nfs_config(self, parent_elem, desired_args): """Parse the get NFS config operation returning the desired params""" nfs_info_elem = parent_elem.get_child_by_name('nfs-info') nfs_config = {} for arg in desired_args: nfs_config[arg] = nfs_info_elem.get_child_content(arg) return nfs_config @na_utils.trace def start_vserver(self, vserver, force=None): """Starts a vServer.""" api_args = { 'vserver-name': vserver, } if force is not None: api_args['force'] = 'true' if force is True else 'false' try: self.send_request('vserver-start', api_args, enable_tunneling=False) except netapp_api.NaApiError as e: if e.code == netapp_api.EVSERVERALREADYSTARTED: msg = _("Vserver %s is already started.") LOG.debug(msg, vserver) else: raise @na_utils.trace def stop_vserver(self, vserver): """Stops a vServer.""" api_args = { 'vserver-name': vserver, } self.send_request('vserver-stop', api_args, enable_tunneling=False) def is_svm_dr_supported(self): return self.features.SVM_DR def create_fpolicy_event(self, share_name, event_name, protocol, file_operations): """Creates a new fpolicy policy event. :param event_name: name of the new fpolicy event :param protocol: name of protocol for which event is created. Possible values are: 'nfsv3', 'nfsv4' or 'cifs'. :param file_operations: name of file operations to be monitored. Values should be provided as list of strings. :param share_name: name of share associated with the vserver where the fpolicy event should be added. """ api_args = { 'event-name': event_name, 'protocol': protocol, 'file-operations': [], } for file_op in file_operations: api_args['file-operations'].append({'fpolicy-operation': file_op}) self.send_request('fpolicy-policy-event-create', api_args) def delete_fpolicy_event(self, share_name, event_name): """Deletes a fpolicy policy event. :param event_name: name of the event to be deleted :param share_name: name of share associated with the vserver where the fpolicy event should be deleted. """ try: self.send_request('fpolicy-policy-event-delete', {'event-name': event_name}) except netapp_api.NaApiError as e: if e.code in [netapp_api.EEVENTNOTFOUND, netapp_api.EOBJECTNOTFOUND]: msg = _("FPolicy event %s not found.") LOG.debug(msg, event_name) else: raise exception.NetAppException(message=e.message) def get_fpolicy_events(self, event_name=None, protocol=None, file_operations=None): """Retrives a list of fpolicy events. :param event_name: name of the fpolicy event :param protocol: name of protocol. Possible values are: 'nfsv3', 'nfsv4' or 'cifs'. :param file_operations: name of file operations to be monitored. Values should be provided as list of strings. :returns List of policy events or empty list """ event_options_config = {} if event_name: event_options_config['event-name'] = event_name if protocol: event_options_config['protocol'] = protocol if file_operations: event_options_config['file-operations'] = [] for file_op in file_operations: event_options_config['file-operations'].append( {'fpolicy-operation': file_op}) api_args = { 'query': { 'fpolicy-event-options-config': event_options_config, }, } result = self.send_iter_request('fpolicy-policy-event-get-iter', api_args) fpolicy_events = [] if self._has_records(result): try: fpolicy_events = [] attributes_list = result.get_child_by_name( 'attributes-list') or netapp_api.NaElement('none') for event_info in attributes_list.get_children(): name = event_info.get_child_content('event-name') proto = event_info.get_child_content('protocol') file_operations_child = event_info.get_child_by_name( 'file-operations') or netapp_api.NaElement('none') operations = [operation.get_content() for operation in file_operations_child.get_children()] fpolicy_events.append({ 'event-name': name, 'protocol': proto, 'file-operations': operations }) except AttributeError: msg = _('Could not retrieve fpolicy policy event information.') raise exception.NetAppException(msg) return fpolicy_events def create_fpolicy_policy(self, fpolicy_name, share_name, events, engine='native'): """Creates a fpolicy policy resource. :param fpolicy_name: name of the fpolicy policy to be created. :param share_name: name of the share to be associated with the new fpolicy policy. :param events: list of event names for file access monitoring. :param engine: name of the engine to be used. """ api_args = { 'policy-name': fpolicy_name, 'events': [], 'engine-name': engine } for event in events: api_args['events'].append({'event-name': event}) self.send_request('fpolicy-policy-create', api_args) def delete_fpolicy_policy(self, share_name, policy_name): """Deletes a fpolicy policy event. :param policy_name: name of the policy to be deleted. """ try: self.send_request('fpolicy-policy-delete', {'policy-name': policy_name}) except netapp_api.NaApiError as e: if e.code in [netapp_api.EPOLICYNOTFOUND, netapp_api.EOBJECTNOTFOUND]: msg = _("FPolicy policy %s not found.") LOG.debug(msg, policy_name) else: raise exception.NetAppException(message=e.message) def get_fpolicy_policies(self, share_name, policy_name=None, engine_name='native', event_names=[]): """Retrieve one or more fpolicy policies. :param policy_name: name of the policy to be retrieved :param engine_name: name of the engine :param share_name: name of the share associated with the fpolicy policy. :param event_names: list of event names that must be associated to the fpolicy policy :return: list of fpolicy policies or empty list """ policy_info = {} if policy_name: policy_info['policy-name'] = policy_name if engine_name: policy_info['engine-name'] = engine_name if event_names: policy_info['events'] = [] for event_name in event_names: policy_info['events'].append({'event-name': event_name}) api_args = { 'query': { 'fpolicy-policy-info': policy_info, }, } result = self.send_iter_request('fpolicy-policy-get-iter', api_args) fpolicy_policies = [] if self._has_records(result): try: attributes_list = result.get_child_by_name( 'attributes-list') or netapp_api.NaElement('none') for policy_info in attributes_list.get_children(): name = policy_info.get_child_content('policy-name') engine = policy_info.get_child_content('engine-name') events_child = policy_info.get_child_by_name( 'events') or netapp_api.NaElement('none') events = [event.get_content() for event in events_child.get_children()] fpolicy_policies.append({ 'policy-name': name, 'engine-name': engine, 'events': events }) except AttributeError: msg = _('Could not retrieve fpolicy policy information.') raise exception.NetAppException(message=msg) return fpolicy_policies def create_fpolicy_scope(self, policy_name, share_name, extensions_to_include=None, extensions_to_exclude=None): """Assings a file scope to an existing fpolicy policy. :param policy_name: name of the policy to associate with the new scope. :param share_name: name of the share to be associated with the new scope. :param extensions_to_include: file extensions included for screening. Values should be provided as comma separated list :param extensions_to_exclude: file extensions excluded for screening. Values should be provided as comma separated list """ api_args = { 'policy-name': policy_name, 'shares-to-include': { 'string': share_name, }, 'file-extensions-to-include': [], 'file-extensions-to-exclude': [], } if extensions_to_include: for file_ext in extensions_to_include.split(','): api_args['file-extensions-to-include'].append( {'string': file_ext.strip()}) if extensions_to_exclude: for file_ext in extensions_to_exclude.split(','): api_args['file-extensions-to-exclude'].append( {'string': file_ext.strip()}) self.send_request('fpolicy-policy-scope-create', api_args) def modify_fpolicy_scope(self, share_name, policy_name, shares_to_include=[], extensions_to_include=None, extensions_to_exclude=None): """Modify an existing fpolicy scope. :param policy_name: name of the policy associated to the scope. :param share_name: name of the share associated with the fpolicy scope. :param shares_to_include: list of shares to include for file access monitoring. :param extensions_to_include: file extensions included for screening. Values should be provided as comma separated list :param extensions_to_exclude: file extensions excluded for screening. Values should be provided as comma separated list """ api_args = { 'policy-name': policy_name, } if extensions_to_include: api_args['file-extensions-to-include'] = [] for file_ext in extensions_to_include.split(','): api_args['file-extensions-to-include'].append( {'string': file_ext.strip()}) if extensions_to_exclude: api_args['file-extensions-to-exclude'] = [] for file_ext in extensions_to_exclude.split(','): api_args['file-extensions-to-exclude'].append( {'string': file_ext.strip()}) if shares_to_include: api_args['shares-to-include'] = [ {'string': share} for share in shares_to_include ] self.send_request('fpolicy-policy-scope-modify', api_args) def delete_fpolicy_scope(self, policy_name): """Deletes a fpolicy policy scope. :param policy_name: name of the policy associated to the scope to be deleted. """ try: self.send_request('fpolicy-policy-scope-delete', {'policy-name': policy_name}) except netapp_api.NaApiError as e: if e.code in [netapp_api.ESCOPENOTFOUND, netapp_api.EOBJECTNOTFOUND]: msg = _("FPolicy scope %s not found.") LOG.debug(msg, policy_name) else: raise exception.NetAppException(message=e.message) def get_fpolicy_scopes(self, share_name, policy_name=None, extensions_to_include=None, extensions_to_exclude=None, shares_to_include=None): """Retrieve fpolicy scopes. :param policy_name: name of the policy associated with a scope. :param share_name: name of the share associated with the fpolicy scope. :param extensions_to_include: file extensions included for screening. Values should be provided as comma separated list :param extensions_to_exclude: file extensions excluded for screening. Values should be provided as comma separated list :param shares_to_include: list of shares to include for file access monitoring. :return: list of fpolicy scopes or empty list """ policy_scope_info = {} if policy_name: policy_scope_info['policy-name'] = policy_name if shares_to_include: policy_scope_info['shares-to-include'] = [ {'string': share} for share in shares_to_include ] if extensions_to_include: policy_scope_info['file-extensions-to-include'] = [] for file_op in extensions_to_include.split(','): policy_scope_info['file-extensions-to-include'].append( {'string': file_op.strip()}) if extensions_to_exclude: policy_scope_info['file-extensions-to-exclude'] = [] for file_op in extensions_to_exclude.split(','): policy_scope_info['file-extensions-to-exclude'].append( {'string': file_op.strip()}) api_args = { 'query': { 'fpolicy-scope-config': policy_scope_info, }, } result = self.send_iter_request('fpolicy-policy-scope-get-iter', api_args) fpolicy_scopes = [] if self._has_records(result): try: fpolicy_scopes = [] attributes_list = result.get_child_by_name( 'attributes-list') or netapp_api.NaElement('none') for policy_scope in attributes_list.get_children(): name = policy_scope.get_child_content('policy-name') ext_include_child = policy_scope.get_child_by_name( 'file-extensions-to-include') or netapp_api.NaElement( 'none') ext_include = [ext.get_content() for ext in ext_include_child.get_children()] ext_exclude_child = policy_scope.get_child_by_name( 'file-extensions-to-exclude') or netapp_api.NaElement( 'none') ext_exclude = [ext.get_content() for ext in ext_exclude_child.get_children()] shares_child = policy_scope.get_child_by_name( 'shares-to-include') or netapp_api.NaElement('none') shares_include = [ext.get_content() for ext in shares_child.get_children()] fpolicy_scopes.append({ 'policy-name': name, 'file-extensions-to-include': ext_include, 'file-extensions-to-exclude': ext_exclude, 'shares-to-include': shares_include, }) except AttributeError: msg = _('Could not retrieve fpolicy policy information.') raise exception.NetAppException(msg) return fpolicy_scopes def enable_fpolicy_policy(self, share_name, policy_name, sequence_number): """Enables a specific named policy. :param policy_name: name of the policy to be enabled :param share_name: name of the share associated with the vserver and the fpolicy :param sequence_number: policy sequence number """ api_args = { 'policy-name': policy_name, 'sequence-number': sequence_number, } self.send_request('fpolicy-enable-policy', api_args) def disable_fpolicy_policy(self, policy_name): """Disables a specific policy. :param policy_name: name of the policy to be disabled """ try: self.send_request('fpolicy-disable-policy', {'policy-name': policy_name}) except netapp_api.NaApiError as e: disabled = "policy is already disabled" if (e.code in [netapp_api.EPOLICYNOTFOUND, netapp_api.EOBJECTNOTFOUND] or (e.code == netapp_api.EINVALIDINPUTERROR and disabled in e.message)): msg = _("FPolicy policy %s not found or already disabled.") LOG.debug(msg, policy_name) else: raise exception.NetAppException(message=e.message) def get_fpolicy_policies_status(self, share_name, policy_name=None, status='true'): policy_status_info = {} if policy_name: policy_status_info['policy-name'] = policy_name policy_status_info['status'] = status api_args = { 'query': { 'fpolicy-policy-status-info': policy_status_info, }, } result = self.send_iter_request('fpolicy-policy-status-get-iter', api_args) fpolicy_status = [] if self._has_records(result): try: fpolicy_status = [] attributes_list = result.get_child_by_name( 'attributes-list') or netapp_api.NaElement('none') for policy_status in attributes_list.get_children(): name = policy_status.get_child_content('policy-name') status = policy_status.get_child_content('status') seq = policy_status.get_child_content('sequence-number') fpolicy_status.append({ 'policy-name': name, 'status': strutils.bool_from_string(status), 'sequence-number': seq }) except AttributeError: msg = _('Could not retrieve fpolicy status information.') raise exception.NetAppException(msg) return fpolicy_status @na_utils.trace def is_svm_migrate_supported(self): """Checks if the cluster supports SVM Migrate.""" return self.features.SVM_MIGRATE def get_volume_state(self, name): """Returns volume state for a given name""" api_args = { 'query': { 'volume-attributes': { 'volume-id-attributes': { 'name': name, }, }, }, 'desired-attributes': { 'volume-attributes': { 'volume-state-attributes': { 'state': None } } }, } result = self.send_iter_request('volume-get-iter', api_args) volume_state = '' if self._has_records(result): attributes_list = result.get_child_by_name( 'attributes-list') or netapp_api.NaElement('none') volume_attributes = attributes_list.get_child_by_name( 'volume-attributes') or netapp_api.NaElement('none') volume_state_attributes = volume_attributes.get_child_by_name( 'volume-state-attributes') or netapp_api.NaElement('none') volume_state = volume_state_attributes.get_child_content('state') return volume_state @na_utils.trace def is_flexgroup_volume(self, volume_name): """Determines if the ONTAP volume is FlexGroup.""" if not self.is_flexgroup_supported(): return False api_args = { 'query': { 'volume-attributes': { 'volume-id-attributes': { 'name': volume_name, }, }, }, 'desired-attributes': { 'volume-attributes': { 'volume-id-attributes': { 'style-extended': None, }, }, }, } result = self.send_request('volume-get-iter', api_args) attributes_list = result.get_child_by_name( 'attributes-list') or netapp_api.NaElement('none') volume_attributes_list = attributes_list.get_children() if not self._has_records(result): raise exception.StorageResourceNotFound(name=volume_name) elif len(volume_attributes_list) > 1: msg = _('More than one volume with volume name %(vol)s found.') msg_args = {'vol': volume_name} raise exception.NetAppException(msg % msg_args) volume_attributes = volume_attributes_list[0] volume_id_attributes = volume_attributes.get_child_by_name( 'volume-id-attributes') or netapp_api.NaElement('none') return na_utils.is_style_extended_flexgroup( volume_id_attributes.get_child_content('style-extended')) @na_utils.trace def is_flexgroup_supported(self): return self.features.FLEXGROUP @na_utils.trace def is_flexgroup_fan_out_supported(self): return self.features.FLEXGROUP_FAN_OUT @na_utils.trace def get_job_state(self, job_id): """Returns job state for a given job id.""" api_args = { 'query': { 'job-info': { 'job-id': job_id, }, }, 'desired-attributes': { 'job-info': { 'job-state': None, }, }, } result = self.send_iter_request('job-get-iter', api_args, enable_tunneling=False) attributes_list = result.get_child_by_name( 'attributes-list') or netapp_api.NaElement('none') job_info_list = attributes_list.get_children() if not self._has_records(result): msg = _('Could not find job with ID %(id)s.') msg_args = {'id': job_id} raise exception.NetAppException(msg % msg_args) elif len(job_info_list) > 1: msg = _('Could not find unique job for ID %(id)s.') msg_args = {'id': job_id} raise exception.NetAppException(msg % msg_args) return job_info_list[0].get_child_content('job-state') @na_utils.trace def create_fpolicy_policy_with_scope(self, fpolicy_name, share_name, events, engine='native', extensions_to_include=None, extensions_to_exclude=None): # Create a fpolicy policy self.create_fpolicy_policy(fpolicy_name, share_name, events, engine='native') # Assign a scope to the fpolicy policy self.create_fpolicy_scope(fpolicy_name, share_name, extensions_to_include, extensions_to_exclude) @na_utils.trace def check_snaprestore_license(self): """Check SnapRestore license for SVM scoped user.""" # NOTE(felipe_rodrigues): workaround to find out whether the # backend has the license: since without cluster credentials it # cannot retrieve the ontap licenses, it sends a fake ONTAP # "snapshot-restore-volume" request which is only available when # the license exists. By the got error, it checks whether license # is installed or not. try: self.restore_snapshot( "fake_%s" % uuidutils.generate_uuid(dashed=False), "") except netapp_api.NaApiError as e: no_license = 'is not licensed' LOG.debug('Fake restore_snapshot request failed: %s', e) return not (e.code == netapp_api.EAPIERROR and no_license in e.message) # since it passed an empty snapshot, it should never get here msg = _("Caught an unexpected behavior: the fake restore to " "snapshot request using 'fake' volume and empty string " "snapshot as argument has not failed.") LOG.exception(msg) raise exception.NetAppException(msg) # ------------------------ REST CALLS ONLY ------------------------ # NOTE(nahimsouza): For ONTAP 9.12.1 and newer, if the option # `netapp_use_legacy_client` is False, REST API client will be used. This # code was kept here to avoid breaking the SVM migrate feature on older # ONTAP versions. In the future, when ZAPI is deprecated, this code can # also be removed. @na_utils.trace def _format_request(self, request_data, headers={}, query={}, url_params={}): """Receives the request data and formats it into a request pattern. :param request_data: the body to be sent to the request. :param headers: additional headers to the request. :param query: filters to the request. :param url_params: parameters to be added to the request. """ request = { "body": request_data, "headers": headers, "query": query, "url_params": url_params } return request @na_utils.trace def svm_migration_start( self, source_cluster_name, source_share_server_name, dest_aggregates, dest_ipspace=None, check_only=False): """Send a request to start the SVM migration in the backend. :param source_cluster_name: the name of the source cluster. :param source_share_server_name: the name of the source server. :param dest_aggregates: the aggregates where volumes will be placed in the migration. :param dest_ipspace: created IPspace for the migration. :param check_only: If the call will only check the feasibility. deleted after the cutover or not. """ request = { "auto_cutover": False, "auto_source_cleanup": True, "check_only": check_only, "source": { "cluster": {"name": source_cluster_name}, "svm": {"name": source_share_server_name}, }, "destination": { "volume_placement": { "aggregates": dest_aggregates, }, }, } if dest_ipspace: ipspace_data = { "ipspace": { "name": dest_ipspace, } } request["destination"].update(ipspace_data) api_args = self._format_request(request) return self.send_request( 'svm-migration-start', api_args=api_args, use_zapi=False) @na_utils.trace def get_migration_check_job_state(self, job_id): """Get the job state of a share server migration. :param job_id: id of the job to be searched. """ try: job = self.get_job(job_id) return job except netapp_api.NaApiError as e: if e.code == netapp_api.ENFS_V4_0_ENABLED_MIGRATION_FAILURE: msg = _( 'NFS v4.0 is not supported while migrating vservers.') LOG.error(msg) raise exception.NetAppException(message=e.message) if e.code == netapp_api.EVSERVER_MIGRATION_TO_NON_AFF_CLUSTER: msg = _('Both source and destination clusters must be AFF ' 'systems.') LOG.error(msg) raise exception.NetAppException(message=e.message) msg = (_('Failed to check migration support. Reason: ' '%s' % e.message)) LOG.error(msg) raise exception.NetAppException(msg) @na_utils.trace def svm_migrate_complete(self, migration_id): """Send a request to complete the SVM migration. :param migration_id: the id of the migration provided by the storage. """ request = { "action": "cutover" } url_params = { "svm_migration_id": migration_id } api_args = self._format_request( request, url_params=url_params) return self.send_request( 'svm-migration-complete', api_args=api_args, use_zapi=False) @na_utils.trace def svm_migrate_cancel(self, migration_id): """Send a request to cancel the SVM migration. :param migration_id: the id of the migration provided by the storage. """ request = {} url_params = { "svm_migration_id": migration_id } api_args = self._format_request(request, url_params=url_params) return self.send_request( 'svm-migration-cancel', api_args=api_args, use_zapi=False) @na_utils.trace def svm_migration_get(self, migration_id): """Send a request to get the progress of the SVM migration. :param migration_id: the id of the migration provided by the storage. """ request = {} url_params = { "svm_migration_id": migration_id } api_args = self._format_request(request, url_params=url_params) return self.send_request( 'svm-migration-get', api_args=api_args, use_zapi=False) @na_utils.trace def svm_migrate_pause(self, migration_id): """Send a request to pause a migration. :param migration_id: the id of the migration provided by the storage. """ request = { "action": "pause" } url_params = { "svm_migration_id": migration_id } api_args = self._format_request( request, url_params=url_params) return self.send_request( 'svm-migration-pause', api_args=api_args, use_zapi=False) @na_utils.trace def get_job(self, job_uuid): """Get a job in ONTAP. :param job_uuid: uuid of the job to be searched. """ request = {} url_params = { "job_uuid": job_uuid } api_args = self._format_request(request, url_params=url_params) return self.send_request( 'get-job', api_args=api_args, use_zapi=False) @na_utils.trace def get_svm_volumes_total_size(self, svm_name): """Gets volumes sizes sum (GB) from all volumes in SVM by svm_name""" request = {} query = { 'svm.name': svm_name, 'fields': 'size' } api_args = self._format_request(request, query=query) response = self.send_request( 'svm-migration-get-progress', api_args=api_args, use_zapi=False) svm_volumes = response.get('records', []) if len(svm_volumes) > 0: total_volumes_size = 0 for volume in svm_volumes: # Root volumes are not taking account because they are part of # SVM creation. if volume['name'] != 'root': total_volumes_size = total_volumes_size + volume['size'] else: return 0 # Convert Bytes to GBs. return (total_volumes_size / 1024**3) @na_utils.trace def snapmirror_restore_vol(self, source_path=None, dest_path=None, source_vserver=None, dest_vserver=None, source_volume=None, dest_volume=None, des_cluster=None, source_snapshot=None): """Restore snapshot copy from destination volume to source volume""" self._ensure_snapmirror_v2() api_args = self._build_snapmirror_request( source_path, dest_path, source_vserver, dest_vserver, source_volume, dest_volume) if source_snapshot: api_args["source-snapshot"] = source_snapshot self.send_request('snapmirror-restore', api_args) @na_utils.trace def list_volume_snapshots(self, volume_name, snapmirror_label=None, newer_than=None): """Gets SnapMirror snapshots on a volume.""" api_args = { 'query': { 'snapshot-info': { 'volume': volume_name, }, }, } if newer_than: api_args['query']['snapshot-info'][ 'access-time'] = '>' + newer_than if snapmirror_label: api_args['query']['snapshot-info'][ 'snapmirror-label'] = snapmirror_label result = self.send_iter_request('snapshot-get-iter', api_args) attributes_list = result.get_child_by_name( 'attributes-list') or netapp_api.NaElement('none') return [snapshot_info.get_child_content('name') for snapshot_info in attributes_list.get_children()] @na_utils.trace def is_snaplock_compliance_clock_configured(self, node_name): """Get the Snaplock compliance is configured for each node""" api_args = {'node': node_name} result = self.send_request('snaplock-get-node-compliance-clock', api_args) node_compliance_clock = result.get_child_by_name( "snaplock-node-compliance-clock" ) if not node_compliance_clock: raise exception.NetAppException( "Compliance clock is not configured for node %s", node_name, ) clock_info = node_compliance_clock.get_child_by_name( "compliance-clock-info") clock_fmt_value = clock_info.get_child_content( "formatted-snaplock-compliance-clock") return 'not configured' not in clock_fmt_value.lower() @na_utils.trace def set_snaplock_attributes(self, volume_name, **options): """Set the retention period for SnapLock enabled volume""" api_args = {} snaplock_attribute_mapping = { 'snaplock_autocommit_period': 'autocommit-period', 'snaplock_min_retention_period': 'minimum-retention-period', 'snaplock_max_retention_period': 'maximum-retention-period', 'snaplock_default_retention_period': 'default-retention-period', } for share_type_attr, na_api_attr in snaplock_attribute_mapping.items(): if options.get(share_type_attr): api_args[na_api_attr] = options.get(share_type_attr) if all(value is None for value in api_args.values()): LOG.debug("All SnapLock attributes are None, not" " updating SnapLock attributes") return api_args['volume'] = volume_name default_retention_period = options.get( 'snaplock_default_retention_period' ) if default_retention_period and default_retention_period == "max": api_args['default-retention-period'] = ( api_args['maximum-retention-period'] ) elif default_retention_period and default_retention_period == "min": api_args['default-retention-period'] = ( api_args['minimum-retention-period'] ) self.send_request('volume-set-snaplock-attrs', api_args) @na_utils.trace def _is_snaplock_enabled_volume(self, volume_name): """Get whether volume is SnapLock enabled or disabled""" vol_attr = self.get_volume(volume_name) return vol_attr.get('snaplock-type') in ("compliance", "enterprise") @na_utils.trace def get_vserver_aggr_snaplock_type(self, aggr_name): """Get SnapLock type for vserver aggregate""" api_args = { 'query': { 'show-aggregates': { 'aggregate-name': aggr_name, }, }, 'desired-attributes': { 'show-aggregates': { 'snaplock-type': None, }, }, } if self.features.SNAPLOCK: result = self.send_iter_request('vserver-show-aggr-get-iter', api_args) else: return None if result is not None and self._has_records(result): attributes_list = result.get_child_by_name( 'attributes-list') or netapp_api.NaElement('none') vs_aggr_attributes = attributes_list.get_child_by_name( 'show-aggregates') or netapp_api.NaElement('none') return vs_aggr_attributes.get_child_content('snaplock-type') @na_utils.trace def get_storage_failover_partner(self, node_name): """Get the partner node of HA pair""" api_args = {'node': node_name} result = self.send_request('cf-get-partner', api_args) partner_node = result.get_child_content("partner") return partner_node @na_utils.trace def get_migratable_data_lif_for_node(self, node): """Get available LIFs that can be migrated to another node.""" failover_policy = ['system-defined', 'sfo-partner-only'] protocols = ['nfs', 'cifs'] api_args = { 'query': { 'net-interface-info': { 'failover-policy': '|'.join(failover_policy), 'home-node': node, 'data-protocols': { 'data-protocol': '|'.join(protocols), } } } } result = self.send_iter_request('net-interface-get-iter', api_args) lif_info_list = result.get_child_by_name( 'attributes-list') or netapp_api.NaElement('none') return [lif_info.get_child_content('interface-name') for lif_info in lif_info_list.get_children()] @na_utils.trace def get_data_lif_details_for_nodes(self): """Get the data LIF details for each node.""" api_args = { 'desired-attributes': { 'data-lif-capacity-details-info': { 'limit-for-node': None, 'count-for-node': None, 'node': None }, }, } result = self.send_iter_request('data-lif-capacity-details', api_args) data_lif_info_list = result.get_child_by_name( 'attributes-list') or netapp_api.NaElement('none') data_lif_info = [] for lif_info in data_lif_info_list.get_children(): lif_info_node = { 'limit-for-node': lif_info.get_child_content('limit-for-node'), 'count-for-node': lif_info.get_child_content('count-for-node'), 'node': lif_info.get_child_content('node'), } data_lif_info.append(lif_info_node) return data_lif_info ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/netapp/dataontap/client/client_cmode_rest.py0000664000175000017500000070074000000000000027663 0ustar00zuulzuul00000000000000# Copyright (c) 2023 NetApp, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from datetime import datetime from http import client as http_client import math import re import time from oslo_log import log from oslo_utils import excutils from oslo_utils import strutils from oslo_utils import units from manila import exception from manila.i18n import _ from manila.share.drivers.netapp.dataontap.client import client_base from manila.share.drivers.netapp.dataontap.client import client_cmode from manila.share.drivers.netapp.dataontap.client import rest_api as netapp_api from manila.share.drivers.netapp import utils as na_utils from manila import utils LOG = log.getLogger(__name__) DELETED_PREFIX = 'deleted_manila_' DEFAULT_IPSPACE = 'Default' CLUSTER_IPSPACES = ('Cluster', DEFAULT_IPSPACE) DEFAULT_BROADCAST_DOMAIN = 'Default' BROADCAST_DOMAIN_PREFIX = 'domain_' DEFAULT_MAX_PAGE_LENGTH = 10000 CIFS_USER_GROUP_TYPE = 'windows' SNAPSHOT_CLONE_OWNER = 'volume_clone' CUTOVER_ACTION_MAP = { 'defer': 'defer_on_failure', 'abort': 'abort_on_failure', 'force': 'force', 'wait': 'wait', } DEFAULT_TIMEOUT = 15 DEFAULT_TCP_MAX_XFER_SIZE = 65536 DEFAULT_UDP_MAX_XFER_SIZE = 32768 DEFAULT_SECURITY_CERT_EXPIRE_DAYS = 365 class NetAppRestClient(object): def __init__(self, **kwargs): self.connection = netapp_api.RestNaServer( host=kwargs['hostname'], transport_type=kwargs['transport_type'], ssl_cert_path=kwargs['ssl_cert_path'], port=kwargs['port'], username=kwargs['username'], password=kwargs['password'], trace=kwargs.get('trace', False), api_trace_pattern=kwargs.get('api_trace_pattern', na_utils.API_TRACE_PATTERN), private_key_file=kwargs['private_key_file'], certificate_file=kwargs['certificate_file'], ca_certificate_file=kwargs['ca_certificate_file'], certificate_host_validation=kwargs['certificate_host_validation']) self.async_rest_timeout = kwargs['async_rest_timeout'] self.vserver = kwargs.get('vserver') self.connection.set_vserver(self.vserver) # NOTE(nahimsouza): Set this flag to False to ensure get_ontap_version # will be called without SVM tunneling. This is necessary because # requests with SVM scoped account can not be tunneled in REST API. self._have_cluster_creds = False ontap_version = self.get_ontap_version(cached=False) if ontap_version['version-tuple'] < (9, 12, 1): msg = _('This driver can communicate with ONTAP via REST APIs ' 'exclusively only when paired with a NetApp ONTAP storage ' 'system running release 9.12.1 or newer. ' 'To use ZAPI and supported REST APIs instead, ' 'set "netapp_use_legacy_client" to True.') raise exception.NetAppException(msg) self.connection.set_ontap_version(ontap_version) # NOTE(nahimsouza): ZAPI Client is needed to implement the fallback # when a REST method is not supported. self.zapi_client = client_cmode.NetAppCmodeClient(**kwargs) self._have_cluster_creds = self._check_for_cluster_credentials() self._init_features() def _init_features(self): """Initialize feature support map.""" self.features = client_base.Features() # NOTE(felipe_rodrigues): REST client only runs with ONTAP 9.11.1 or # upper, so all features below are supported with this client. self.features.add_feature('SNAPMIRROR_V2', supported=True) self.features.add_feature('SYSTEM_METRICS', supported=True) self.features.add_feature('SYSTEM_CONSTITUENT_METRICS', supported=True) self.features.add_feature('BROADCAST_DOMAINS', supported=True) self.features.add_feature('IPSPACES', supported=True) self.features.add_feature('SUBNETS', supported=True) self.features.add_feature('CLUSTER_PEER_POLICY', supported=True) self.features.add_feature('ADVANCED_DISK_PARTITIONING', supported=True) self.features.add_feature('KERBEROS_VSERVER', supported=True) self.features.add_feature('FLEXVOL_ENCRYPTION', supported=True) self.features.add_feature('SVM_DR', supported=True) self.features.add_feature('ADAPTIVE_QOS', supported=True) self.features.add_feature('TRANSFER_LIMIT_NFS_CONFIG', supported=True) self.features.add_feature('CIFS_DC_ADD_SKIP_CHECK', supported=True) self.features.add_feature('LDAP_LDAP_SERVERS', supported=True) self.features.add_feature('FLEXGROUP', supported=True) self.features.add_feature('FLEXGROUP_FAN_OUT', supported=True) self.features.add_feature('SVM_MIGRATE', supported=True) self.features.add_feature('UNIFIED_AGGR', supported=True) def __getattr__(self, name): """If method is not implemented for REST, try to call the ZAPI.""" LOG.debug("The %s call is not supported for REST, falling back to " "ZAPI.", name) # Don't use self.zapi_client to avoid reentrant call to __getattr__() zapi_client = object.__getattribute__(self, 'zapi_client') return getattr(zapi_client, name) def _wait_job_result(self, job_url): interval = 2 retries = (self.async_rest_timeout / interval) @utils.retry(netapp_api.NaRetryableError, interval=interval, retries=retries, backoff_rate=1) def _waiter(): response = self.send_request(job_url, 'get', enable_tunneling=False) job_state = response.get('state') if job_state == 'success': return response elif job_state == 'failure': message = response['error']['message'] code = response['error']['code'] raise netapp_api.NaRetryableError(message=message, code=code) msg_args = {'job': job_url, 'state': job_state} LOG.debug("Job %(job)s has not finished: %(state)s", msg_args) raise netapp_api.NaRetryableError(message='Job is running.') try: return _waiter() except netapp_api.NaRetryableError: msg = _("Job %s did not reach the expected state. Retries " "exhausted. Aborting.") % job_url raise na_utils.NetAppDriverException(msg) def send_request(self, action_url, method, body=None, query=None, enable_tunneling=True, max_page_length=DEFAULT_MAX_PAGE_LENGTH, wait_on_accepted=True): """Sends REST request to ONTAP. :param action_url: action URL for the request :param method: HTTP method for the request ('get', 'post', 'put', 'delete' or 'patch') :param body: dict of arguments to be passed as request body :param query: dict of arguments to be passed as query string :param enable_tunneling: enable tunneling to the ONTAP host :param max_page_length: size of the page during pagination :param wait_on_accepted: if True, wait until the job finishes when HTTP code 202 (Accepted) is returned :returns: parsed REST response """ # NOTE(felipe_rodrigues): disable tunneling when running in SVM scoped # context, otherwise REST API fails. if not self._have_cluster_creds: enable_tunneling = False response = None if method == 'get': response = self.get_records( action_url, query, enable_tunneling, max_page_length) else: code, response = self.connection.invoke_successfully( action_url, method, body=body, query=query, enable_tunneling=enable_tunneling) if code == http_client.ACCEPTED and wait_on_accepted: # get job URL and discard '/api' job_url = response['job']['_links']['self']['href'][4:] response = self._wait_job_result(job_url) return response def get_records(self, action_url, query=None, enable_tunneling=True, max_page_length=DEFAULT_MAX_PAGE_LENGTH): """Retrieves ONTAP resources using pagination REST request. :param action_url: action URL for the request :param query: dict of arguments to be passed as query string :param enable_tunneling: enable tunneling to the ONTAP host :param max_page_length: size of the page during pagination :returns: dict containing records and num_records """ # NOTE(felipe_rodrigues): disable tunneling when running in SVM scoped # context, otherwise REST API fails. if not self._have_cluster_creds: enable_tunneling = False # Initialize query variable if it is None query = query if query else {} query['max_records'] = max_page_length _, response = self.connection.invoke_successfully( action_url, 'get', query=query, enable_tunneling=enable_tunneling) # NOTE(nahimsouza): if all records are returned in the first call, # 'next_url' will be None. next_url = response.get('_links', {}).get('next', {}).get('href') next_url = next_url[4:] if next_url else None # discard '/api' # Get remaining pages, saving data into first page while next_url: # NOTE(nahimsouza): clean the 'query', because the parameters are # already included in 'next_url'. _, next_response = self.connection.invoke_successfully( next_url, 'get', query=None, enable_tunneling=enable_tunneling) response['num_records'] += next_response.get('num_records', 0) response['records'].extend(next_response.get('records')) next_url = ( next_response.get('_links', {}).get('next', {}).get('href')) next_url = next_url[4:] if next_url else None # discard '/api' return response @na_utils.trace def get_ontap_version(self, cached=True): """Get the current Data ONTAP version.""" if cached: return self.connection.get_ontap_version() query = { 'fields': 'version' } try: response = self.send_request('/cluster/nodes', 'get', query=query, enable_tunneling=False) records = response.get('records')[0] return { 'version': records['version']['full'], 'version-tuple': (records['version']['generation'], records['version']['major'], records['version']['minor']), } except netapp_api.api.NaApiError as e: if e.code != netapp_api.EREST_NOT_AUTHORIZED: raise # NOTE(nahimsouza): SVM scoped account is not authorized to access # the /cluster/nodes endpoint, that's why we use /private/cli response = self.send_request('/private/cli/version', 'get', query=query) # Response is formatted as: # 'NetApp Release 9.12.1: Wed Feb 01 01:10:18 UTC 2023' version_full = response['records'][0]['version']['full'] version_parsed = re.findall(r'\d+\.\d+\.\d+', version_full)[0] version_splited = version_parsed.split('.') return { 'version': version_full, 'version-tuple': (int(version_splited[0]), int(version_splited[1]), int(version_splited[2])), } @na_utils.trace def get_job(self, job_uuid): """Get a job in ONTAP. :param job_uuid: uuid of the job to be searched. """ action_url = f'/cluster/jobs/{job_uuid}' return self.send_request(action_url, 'get', enable_tunneling=False) @na_utils.trace def _has_records(self, api_response): """Check if API response contains any records.""" if (not api_response['num_records'] or api_response['num_records'] == 0): return False else: return True @na_utils.trace def get_licenses(self): """Get list of ONTAP licenses.""" try: result = self.send_request('/cluster/licensing/licenses', 'get') except netapp_api.api.NaApiError: with excutils.save_and_reraise_exception(): LOG.exception("Could not get list of ONTAP licenses.") return sorted( [license['name'] for license in result.get('records', [])]) @na_utils.trace def _get_security_key_manager_nve_support(self): """Determine whether the cluster platform supports Volume Encryption""" query = {'fields': 'volume_encryption.*'} try: response = self.send_request('/security/key-managers', 'get', query=query) records = response.get('records', []) if records: if records[0]['volume_encryption']['supported']: return True except netapp_api.api.NaApiError as e: LOG.debug("NVE disabled due to error code: %s - %s", e.code, e.message) return False LOG.debug("NVE disabled - Key management is not " "configured on the admin Vserver.") return False @na_utils.trace def is_nve_supported(self): """Determine whether NVE is supported on this platform.""" nodes = self.list_cluster_nodes() system_version = self.get_ontap_version() version = system_version.get('version') # Not all platforms support this feature. NVE is not supported if the # version includes the substring '<1no-DARE>' (no Data At Rest # Encryption). if "<1no-DARE>" not in version: if nodes is not None: return self._get_security_key_manager_nve_support() else: LOG.warning('Cluster credentials are required in order to ' 'determine whether NetApp Volume Encryption is ' 'supported or not on this platform.') return False else: LOG.warning('NetApp Volume Encryption is not supported on this ' 'ONTAP version: %(version)s. ', {'version': version}) return False @na_utils.trace def check_for_cluster_credentials(self): """Check if credentials to connect to ONTAP from cached value.""" return self._have_cluster_creds @na_utils.trace def _check_for_cluster_credentials(self): """Check if credentials to connect to ONTAP are defined correctly.""" try: self.list_cluster_nodes() # API succeeded, so definitely a cluster management LIF return True except netapp_api.api.NaApiError as e: if e.code == netapp_api.EREST_NOT_AUTHORIZED: LOG.debug('Not connected to cluster management LIF.') return False else: raise @na_utils.trace def list_cluster_nodes(self): """Get all available cluster nodes.""" result = self.send_request('/cluster/nodes', 'get') return [node['name'] for node in result.get('records', [])] @na_utils.trace def _get_volume_by_args(self, vol_name=None, aggregate_name=None, vol_path=None, vserver=None, fields=None, is_root=None): """Get info from a single volume according to the args.""" query = { 'style': 'flex*', # Match both 'flexvol' and 'flexgroup' 'error_state.is_inconsistent': 'false', 'fields': 'name,style,svm.name,svm.uuid' } if vol_name: query['name'] = vol_name if aggregate_name: query['aggregates.name'] = aggregate_name if vol_path: query['nas.path'] = vol_path if vserver: query['svm.name'] = vserver if fields: query['fields'] = fields if is_root is not None: query['is_svm_root'] = is_root volumes_response = self.send_request( '/storage/volumes/', 'get', query=query) records = volumes_response.get('records', []) if len(records) != 1: msg = _('Could not find unique share. Shares found: %(shares)s.') msg_args = {'shares': records} raise exception.NetAppException(message=msg % msg_args) return records[0] @na_utils.trace def restore_snapshot(self, volume_name, snapshot_name): """Reverts a volume to the specified snapshot.""" volume = self._get_volume_by_args(vol_name=volume_name) uuid = volume['uuid'] body = { 'restore_to.snapshot.name': snapshot_name } # Update volume self.send_request(f'/storage/volumes/{uuid}', 'patch', body=body) @na_utils.trace def vserver_exists(self, vserver_name): """Checks if Vserver exists.""" LOG.debug('Checking if Vserver %s exists', vserver_name) query = { 'name': vserver_name } try: result = self.send_request('/svm/svms', 'get', query=query, enable_tunneling=False) except netapp_api.api.NaApiError as e: if e.code == netapp_api.EREST_VSERVER_NOT_FOUND: return False else: raise return self._has_records(result) @na_utils.trace def list_root_aggregates(self): """Get names of all aggregates that contain node root volumes.""" response = self.send_request('/private/cli/aggr', 'get', query={'root': 'true'}) return [aggr['aggregate'] for aggr in response['records']] @na_utils.trace def list_non_root_aggregates(self): """Get names of all aggregates that don't contain node root volumes.""" # NOTE(nahimsouza): According to REST API doc, only data aggregates are # returned by the /storage/aggregates endpoint, which means no System # owned root aggregate will be included in the output. Also, note that # this call does not work for users with SVM scoped account. response = self.send_request('/storage/aggregates', 'get') aggr_list = response['records'] return [aggr['name'] for aggr in aggr_list] @na_utils.trace def get_cluster_aggregate_capacities(self, aggregate_names): """Calculates capacity of one or more aggregates. Returns dictionary of aggregate capacity metrics. 'used' is the actual space consumed on the aggregate. 'available' is the actual space remaining. 'size' is the defined total aggregate size, such that used + available = total. """ if aggregate_names is not None and len(aggregate_names) == 0: return {} fields = 'name,space' aggrs = self._get_aggregates(aggregate_names=aggregate_names, fields=fields) aggr_space_dict = dict() for aggr in aggrs: aggr_name = aggr['name'] aggr_space_attrs = aggr['space'] aggr_space_dict[aggr_name] = { 'available': int(aggr_space_attrs["block_storage"]["available"]), 'total': int(aggr_space_attrs["block_storage"]["size"]), 'used': int(aggr_space_attrs["block_storage"]["used"]), } return aggr_space_dict @na_utils.trace def _get_aggregates(self, aggregate_names=None, fields=None): """Get a list of aggregates and their attributes. :param aggregate_names: List of aggregate names. :param fields: List of fields to be retrieved from each aggregate. :return: List of aggregates. """ query = {} if aggregate_names: query['name'] = ','.join(aggregate_names) if fields: query['fields'] = fields # NOTE(nahimsouza): This endpoint returns only data aggregates. Also, # it does not work with SVM scoped account. response = self.send_request('/storage/aggregates', 'get', query=query) if not self._has_records(response): return [] else: return response.get('records', []) @na_utils.trace def get_aggregate(self, aggregate_name): """Get aggregate attributes needed for the storage service catalog.""" if not aggregate_name: return {} fields = ('name,block_storage.primary.raid_type,' 'block_storage.storage_type,snaplock_type') try: aggrs = self._get_aggregates(aggregate_names=[aggregate_name], fields=fields) except netapp_api.api.NaApiError: LOG.exception('Failed to get info for aggregate %s.', aggregate_name) return {} if len(aggrs) == 0: return {} aggr_attributes = aggrs[0] aggregate = { 'name': aggr_attributes['name'], 'raid-type': aggr_attributes['block_storage']['primary']['raid_type'], 'is-hybrid': aggr_attributes['block_storage']['storage_type'] == 'hybrid', 'snaplock-type': aggr_attributes.get('snaplock_type'), 'is-snaplock': False if (aggr_attributes.get('snaplock_type') == 'non_snaplock') else True } return aggregate @na_utils.trace def get_node_for_aggregate(self, aggregate_name): """Get home node for the specified aggregate. This API could return None, most notably if it was sent to a Vserver LIF, so the caller must be able to handle that case. """ if not aggregate_name: return None fields = 'name,home_node.name' try: aggrs = self._get_aggregates(aggregate_names=[aggregate_name], fields=fields) except netapp_api.api.NaApiError as e: if e.code == netapp_api.EREST_NOT_AUTHORIZED: LOG.debug("Could not get the home node of aggregate %s: " "command not authorized.", aggregate_name) return None else: raise return aggrs[0]['home_node']['name'] if aggrs else None @na_utils.trace def get_aggregate_disk_types(self, aggregate_name): """Get the disk type(s) of an aggregate.""" disk_types = set() disk_types.update(self._get_aggregate_disk_types(aggregate_name)) return list(disk_types) if disk_types else None @na_utils.trace def _get_aggregate_disk_types(self, aggregate_name): """Get the disk type(s) of an aggregate (may be a list).""" disk_types = set() query = { 'aggregates.name': aggregate_name, 'fields': 'effective_type' } try: response = self.send_request( '/storage/disks', 'get', query=query) except netapp_api.api.NaApiError: LOG.exception('Failed to get disk info for aggregate %s.', aggregate_name) return disk_types for storage_disk_info in response['records']: disk_types.add(storage_disk_info['effective_type']) return disk_types @na_utils.trace def volume_exists(self, volume_name): """Checks if volume exists.""" LOG.debug('Checking if volume %s exists', volume_name) query = { 'name': volume_name } result = self.send_request( '/storage/volumes', 'get', query=query) return self._has_records(result) @na_utils.trace def list_vserver_aggregates(self): """Returns a list of aggregates available to a vserver. This must be called against a Vserver LIF. """ return list(self.get_vserver_aggregate_capacities().keys()) @na_utils.trace def get_vserver_aggregate_capacities(self, aggregate_names=None): """Calculates capacity of one or more aggregates for a vserver. Returns dictionary of aggregate capacity metrics. This must be called against a Vserver LIF. """ if aggregate_names is not None and len(aggregate_names) == 0: return {} query = { 'fields': 'name,aggregates.name,aggregates.available_size' } response = self.send_request('/svm/svms', 'get', query=query) if not response['records']: msg = _('Could not find information of vserver.') raise exception.NetAppException(message=msg) vserver = response['records'][0] aggr_space_dict = dict() for aggr in vserver.get('aggregates', []): available_size = aggr.get('available_size') if available_size is None: # NOTE(felipe_rodrigues): available_size not returned means # the vserver does not have any aggregate assigned to it. REST # API returns all non root aggregates of the cluster to vserver # that does not have any, but without the space information. LOG.warning('No aggregates assigned to Vserver %s.', vserver['name']) return {} aggr_name = aggr['name'] if aggregate_names is None or aggr_name in aggregate_names: aggr_space_dict[aggr['name']] = {'available': available_size} if not aggr_space_dict: LOG.warning('No aggregates assigned to Vserver %s.', vserver['name']) return {} LOG.debug('Found available Vserver aggregates: %s.', aggr_space_dict) return aggr_space_dict @na_utils.trace def qos_policy_group_create(self, qos_policy_group_name, vserver, max_throughput=None): """Creates a QoS policy group.""" body = { 'name': qos_policy_group_name, 'svm.name': vserver, } if max_throughput: value = max_throughput.lower() if 'iops' in max_throughput: value = value.replace('iops', '') value = int(value) body['fixed.max_throughput_iops'] = value else: value = value.replace('b/s', '') value = int(value) body['fixed.max_throughput_mbps'] = math.ceil(value / units.Mi) return self.send_request('/storage/qos/policies', 'post', body=body) @na_utils.trace def list_network_interfaces(self): """Get the names of available LIFs.""" query = { 'fields': 'name' } result = self.send_request('/network/ip/interfaces', 'get', query=query) if self._has_records(result): return [lif['name'] for lif in result.get('records', [])] @na_utils.trace def get_network_interfaces(self, protocols=None): """Get available LIFs.""" protocols = na_utils.convert_to_list(protocols) protocols = [f"data_{protocol.lower()}" for protocol in protocols] if protocols: query = { 'services': ','.join(protocols), 'fields': 'ip.address,location.home_node.name,' 'location.home_port.name,ip.netmask,' 'services,svm.name,enabled' } else: query = { 'fields': 'ip.address,location.home_node.name,' 'location.home_port.name,ip.netmask,' 'services,svm.name,enabled' } result = self.send_request('/network/ip/interfaces', 'get', query=query) interfaces = [] for lif_info in result.get('records', []): lif = { 'administrative-status': ( 'up' if lif_info['enabled'] else 'down'), 'uuid': lif_info['uuid'], 'address': lif_info['ip']['address'], 'home-node': lif_info['location']['home_node']['name'], 'home-port': lif_info['location']['home_port']['name'], 'interface-name': lif_info['name'], 'netmask': lif_info['ip']['netmask'], 'role': lif_info['services'], 'vserver': lif_info['svm']['name'], } interfaces.append(lif) return interfaces @na_utils.trace def clear_nfs_export_policy_for_volume(self, volume_name): """Clear NFS export policy for volume, i.e. sets it to default.""" self.set_nfs_export_policy_for_volume(volume_name, 'default') @na_utils.trace def set_nfs_export_policy_for_volume(self, volume_name, policy_name): """Set NFS the export policy for the specified volume.""" query = {"name": volume_name} body = {'nas.export_policy.name': policy_name} try: self.send_request('/storage/volumes/', 'patch', query=query, body=body) except netapp_api.api.NaApiError as e: # NOTE(nahimsouza): Since this error is ignored in ZAPI, we are # replicating the behavior here. if e.code == netapp_api.EREST_CANNOT_MODITY_OFFLINE_VOLUME: LOG.debug('Cannot modify offline volume: %s', volume_name) return @na_utils.trace def create_nfs_export_policy(self, policy_name): """Create an NFS export policy.""" body = {'name': policy_name} try: self.send_request('/protocols/nfs/export-policies', 'post', body=body) except netapp_api.api.NaApiError as e: if e.code != netapp_api.EREST_DUPLICATE_ENTRY: msg = _("Create NFS export policy %s fail.") LOG.debug(msg, policy_name) raise @na_utils.trace def soft_delete_nfs_export_policy(self, policy_name): """Try to delete export policy or mark it to be deleted later.""" try: self.delete_nfs_export_policy(policy_name) except netapp_api.api.NaApiError: # NOTE(cknight): Policy deletion can fail if called too soon after # removing from a flexvol. So rename for later harvesting. LOG.warning("Fail to delete NFS export policy %s." "Export policy will be renamed instead.", policy_name) self.rename_nfs_export_policy(policy_name, DELETED_PREFIX + policy_name) @na_utils.trace def rename_nfs_export_policy(self, policy_name, new_policy_name): """Rename NFS export policy.""" response = self.send_request( '/protocols/nfs/export-policies', 'get', query={'name': policy_name}) if not self._has_records(response): msg = _('Could not rename policy %(policy_name)s. ' 'Entry does not exist.') msg_args = {'policy_name': policy_name} raise exception.NetAppException(msg % msg_args) uuid = response['records'][0]['id'] body = {'name': new_policy_name} self.send_request(f'/protocols/nfs/export-policies/{uuid}', 'patch', body=body) @na_utils.trace def get_volume_junction_path(self, volume_name, is_style_cifs=False): """Gets a volume junction path.""" query = { 'name': volume_name, 'fields': 'nas.path' } result = self.send_request('/storage/volumes/', 'get', query=query) return result['records'][0]['nas']['path'] @na_utils.trace def get_volume_snapshot_attributes(self, volume_name): """Returns snapshot attributes""" volume = self._get_volume_by_args(vol_name=volume_name) vol_uuid = volume['uuid'] query = { 'fields': 'snapshot_directory_access_enabled,snapshot_policy.name' } result = self.send_request( f'/storage/volumes/{vol_uuid}', 'get', query=query) snap_attributes = {} snap_attributes['snapshot-policy'] = result.get( 'snapshot_policy', '').get('name') snap_attributes['snapdir-access-enabled'] = result.get( 'snapshot_directory_access_enabled', 'false') return snap_attributes @na_utils.trace def get_volume(self, volume_name): """Returns the volume with the specified name, if present.""" query = { 'name': volume_name, 'fields': 'aggregates.name,nas.path,name,svm.name,type,style,' 'qos.policy.name,space.size,space.used,snaplock.type' } result = self.send_request('/storage/volumes', 'get', query=query) if not self._has_records(result): raise exception.StorageResourceNotFound(name=volume_name) elif result['num_records'] > 1: msg = _('Could not find unique volume %(vol)s.') msg_args = {'vol': volume_name} raise exception.NetAppException(msg % msg_args) volume_infos = result['records'][0] aggregates = volume_infos.get('aggregates', []) if len(aggregates) == 0: aggregate = '' aggregate_list = [] else: aggregate = aggregates[0]['name'] aggregate_list = [aggr['name'] for aggr in aggregates] volume = { 'aggregate': aggregate, 'aggr-list': aggregate_list, 'junction-path': volume_infos.get('nas', {}).get('path'), 'name': volume_infos.get('name'), 'owning-vserver-name': volume_infos.get('svm', {}).get('name'), 'type': volume_infos.get('type'), 'style': volume_infos.get('style'), 'size': volume_infos.get('space', {}).get('size'), 'size-used': volume_infos.get('space', {}).get('used'), 'qos-policy-group-name': ( volume_infos.get('qos', {}).get('policy', {}).get('name')), 'style-extended': volume_infos.get('style'), 'snaplock-type': volume_infos.get('snaplock', {}).get('type'), } return volume @na_utils.trace def cifs_share_exists(self, share_name): """Check that a CIFS share already exists.""" share_path = f'/{share_name}' query = { 'name': share_name, 'path': share_path, } result = self.send_request('/protocols/cifs/shares', 'get', query=query) return self._has_records(result) @na_utils.trace def create_cifs_share(self, share_name, path): """Create a CIFS share.""" body = { 'name': share_name, 'path': path, 'svm.name': self.vserver, } self.send_request('/protocols/cifs/shares', 'post', body=body) @na_utils.trace def set_volume_security_style(self, volume_name, security_style='unix'): """Set volume security style""" query = { 'name': volume_name, } body = { 'nas.security_style': security_style } self.send_request('/storage/volumes', 'patch', body=body, query=query) @na_utils.trace def remove_cifs_share_access(self, share_name, user_name): """Remove CIFS share access.""" query = { 'name': share_name, 'fields': 'svm.uuid' } get_uuid = self.send_request('/protocols/cifs/shares', 'get', query=query) svm_uuid = get_uuid['records'][0]['svm']['uuid'] self.send_request( f'/protocols/cifs/shares/{svm_uuid}/{share_name}' f'/acls/{user_name}/{CIFS_USER_GROUP_TYPE}', 'delete') # TODO(caique): when ZAPI is dropped, this method should be removed and # the callers should start calling directly the "create_volume_async" @na_utils.trace def create_volume(self, aggregate_name, volume_name, size_gb, thin_provisioned=False, snapshot_policy=None, language=None, dedup_enabled=False, compression_enabled=False, max_files=None, snapshot_reserve=None, volume_type='rw', qos_policy_group=None, adaptive_qos_policy_group=None, encrypt=False, mount_point_name=None, snaplock_type=None, **options): """Creates a FlexVol volume synchronously.""" # NOTE(nahimsouza): In REST API, both FlexVol and FlexGroup volumes are # created asynchronously. However, we kept the synchronous process for # FlexVols to replicate the behavior from ZAPI and avoid changes in the # layers above. self.create_volume_async( [aggregate_name], volume_name, size_gb, is_flexgroup=False, thin_provisioned=thin_provisioned, snapshot_policy=snapshot_policy, language=language, max_files=max_files, snapshot_reserve=snapshot_reserve, volume_type=volume_type, qos_policy_group=qos_policy_group, encrypt=encrypt, adaptive_qos_policy_group=adaptive_qos_policy_group, mount_point_name=mount_point_name, snaplock_type=snaplock_type, **options) efficiency_policy = options.get('efficiency_policy', None) self.update_volume_efficiency_attributes( volume_name, dedup_enabled, compression_enabled, efficiency_policy=efficiency_policy ) if max_files is not None: self.set_volume_max_files(volume_name, max_files) if snaplock_type is not None: self.set_snaplock_attributes(volume_name, **options) @na_utils.trace def create_volume_async(self, aggregate_list, volume_name, size_gb, is_flexgroup=False, thin_provisioned=False, snapshot_policy=None, language=None, snapshot_reserve=None, volume_type='rw', qos_policy_group=None, encrypt=False, adaptive_qos_policy_group=None, auto_provisioned=False, mount_point_name=None, snaplock_type=None, **options): """Creates FlexGroup/FlexVol volumes. If the parameter `is_flexgroup` is False, the creation process is made synchronously to replicate ZAPI behavior for FlexVol creation. """ body = { 'size': size_gb * units.Gi, 'name': volume_name, } body['style'] = 'flexgroup' if is_flexgroup else 'flexvol' if aggregate_list and not auto_provisioned: body['aggregates'] = [{'name': aggr} for aggr in aggregate_list] body.update(self._get_create_volume_body( volume_name, thin_provisioned, snapshot_policy, language, snapshot_reserve, volume_type, qos_policy_group, encrypt, adaptive_qos_policy_group, mount_point_name, snaplock_type)) # NOTE(nahimsouza): When a volume is not a FlexGroup, volume creation # is made synchronously to replicate old ZAPI behavior. When ZAPI is # deprecated, this can be changed to be made asynchronously. wait_on_accepted = (not is_flexgroup) result = self.send_request('/storage/volumes', 'post', body=body, wait_on_accepted=wait_on_accepted) job_info = { 'jobid': result.get('job', {}).get('uuid', {}), # NOTE(caiquemello): remove error-code and error-message # when zapi is dropped. 'error-code': '', 'error-message': '' } return job_info @na_utils.trace def _get_create_volume_body(self, volume_name, thin_provisioned, snapshot_policy, language, snapshot_reserve, volume_type, qos_policy_group, encrypt, adaptive_qos_policy_group, mount_point_name, snaplock_type): """Builds the body to volume creation request.""" body = { 'type': volume_type, 'guarantee.type': ('none' if thin_provisioned else 'volume'), 'svm.name': self.connection.get_vserver() } if volume_type != 'dp': mount_point_name = mount_point_name or volume_name body['nas.path'] = f'/{mount_point_name}' if snapshot_policy is not None: body['snapshot_policy.name'] = snapshot_policy if language is not None: body['language'] = language if snapshot_reserve is not None: body['space.snapshot.reserve_percent'] = str(snapshot_reserve) if qos_policy_group is not None: body['qos.policy.name'] = qos_policy_group if adaptive_qos_policy_group is not None: body['qos.policy.name'] = adaptive_qos_policy_group if encrypt is True: if not self.features.FLEXVOL_ENCRYPTION: msg = 'Flexvol encryption is not supported on this backend.' raise exception.NetAppException(msg) else: body['encryption.enabled'] = 'true' else: body['encryption.enabled'] = 'false' if snaplock_type is not None: body['snaplock.type'] = snaplock_type return body @na_utils.trace def get_job_state(self, job_id): """Returns job state for a given job id.""" query = { 'uuid': job_id, 'fields': 'state' } result = self.send_request('/cluster/jobs/', 'get', query=query, enable_tunneling=False) job_info = result.get('records', []) if not self._has_records(result): msg = _('Could not find job with ID %(id)s.') msg_args = {'id': job_id} raise exception.NetAppException(msg % msg_args) elif len(job_info) > 1: msg = _('Could not find unique job for ID %(id)s.') msg_args = {'id': job_id} raise exception.NetAppException(msg % msg_args) return job_info[0]['state'] @na_utils.trace def get_volume_efficiency_status(self, volume_name): """Get dedupe & compression status for a volume.""" query = { 'efficiency.volume_path': f'/vol/{volume_name}', 'fields': 'efficiency.state,efficiency.compression' } dedupe = False compression = False try: response = self.send_request('/storage/volumes', 'get', query=query) if self._has_records(response): efficiency = response['records'][0]['efficiency'] dedupe = (efficiency['state'] == 'enabled') compression = (efficiency['compression'] != 'none') except netapp_api.api.NaApiError: msg = _('Failed to get volume efficiency status for %s.') LOG.error(msg, volume_name) return { 'dedupe': dedupe, 'compression': compression, } @na_utils.trace def update_volume_snapshot_policy(self, volume_name, snapshot_policy): """Set snapshot policy for the specified volume.""" volume = self._get_volume_by_args(vol_name=volume_name) uuid = volume['uuid'] body = { 'snapshot_policy.name': snapshot_policy } # update snapshot policy self.send_request(f'/storage/volumes/{uuid}', 'patch', body=body) @na_utils.trace def update_volume_efficiency_attributes(self, volume_name, dedup_enabled, compression_enabled, is_flexgroup=False, efficiency_policy=None): """Update dedupe & compression attributes to match desired values.""" efficiency_status = self.get_volume_efficiency_status(volume_name) # cDOT compression requires dedup to be enabled dedup_enabled = dedup_enabled or compression_enabled # enable/disable compression if needed if compression_enabled and not efficiency_status['compression']: self.enable_compression_async(volume_name) elif not compression_enabled and efficiency_status['compression']: self.disable_compression_async(volume_name) # enable/disable dedup if needed if dedup_enabled and not efficiency_status['dedupe']: self.enable_dedupe_async(volume_name) elif not dedup_enabled and efficiency_status['dedupe']: self.disable_dedupe_async(volume_name) self.apply_volume_efficiency_policy( volume_name, efficiency_policy=efficiency_policy) @na_utils.trace def enable_dedupe_async(self, volume_name): """Enable deduplication on FlexVol/FlexGroup volume asynchronously.""" volume = self._get_volume_by_args(vol_name=volume_name) uuid = volume['uuid'] body = { 'efficiency': {'dedupe': 'background'} } # update volume efficiency self.send_request(f'/storage/volumes/{uuid}', 'patch', body=body) @na_utils.trace def disable_dedupe_async(self, volume_name): """Disable deduplication on FlexVol/FlexGroup volume asynchronously.""" volume = self._get_volume_by_args(vol_name=volume_name) uuid = volume['uuid'] body = { 'efficiency': {'dedupe': 'none'} } # update volume efficiency self.send_request(f'/storage/volumes/{uuid}', 'patch', body=body) @na_utils.trace def enable_compression_async(self, volume_name): """Enable compression on FlexVol/FlexGroup volume asynchronously.""" volume = self._get_volume_by_args(vol_name=volume_name) uuid = volume['uuid'] body = { 'efficiency': {'compression': 'background'} } # update volume efficiency self.send_request(f'/storage/volumes/{uuid}', 'patch', body=body) @na_utils.trace def disable_compression_async(self, volume_name): """Disable compression on FlexVol/FlexGroup volume asynchronously.""" volume = self._get_volume_by_args(vol_name=volume_name) uuid = volume['uuid'] body = { 'efficiency': {'compression': 'none'} } # update volume efficiency self.send_request(f'/storage/volumes/{uuid}', 'patch', body=body) @na_utils.trace def apply_volume_efficiency_policy(self, volume_name, efficiency_policy=None): if efficiency_policy: """Apply volume efficiency policy to FlexVol""" volume = self._get_volume_by_args(vol_name=volume_name) uuid = volume['uuid'] body = { 'efficiency': {'policy': efficiency_policy} } # update volume efficiency policy only if policy_name is provided self.send_request(f'/storage/volumes/{uuid}', 'patch', body=body) @na_utils.trace def set_volume_max_files(self, volume_name, max_files, retry_allocated=False): """Set share file limit.""" try: volume = self._get_volume_by_args(vol_name=volume_name) uuid = volume['uuid'] body = { 'files.maximum': int(max_files) } self.send_request(f'/storage/volumes/{uuid}', 'patch', body=body) except netapp_api.api.NaApiError as e: if e.code != netapp_api.EREST_CANNOT_MODITY_SPECIFIED_FIELD: return if retry_allocated: alloc_files = self.get_volume_allocated_files(volume_name) new_max_files = alloc_files['used'] # no need to act if current max files are set to # allocated files if new_max_files == alloc_files['maximum']: return msg = _('Set higher max files %(new_max_files)s ' 'on %(vol)s. The current allocated inodes ' 'are larger than requested %(max_files)s.') msg_args = {'vol': volume_name, 'max_files': max_files, 'new_max_files': new_max_files} LOG.info(msg, msg_args) self.set_volume_max_files(volume_name, new_max_files, retry_allocated=False) else: raise exception.NetAppException(message=e.message) @na_utils.trace def get_volume_allocated_files(self, volume_name): """Get share allocated files.""" try: volume = self._get_volume_by_args(vol_name=volume_name) uuid = volume['uuid'] query = { 'fields': 'files.maximum,files.used' } response = self.send_request(f'/storage/volumes/{uuid}', 'get', query=query) if self._has_records(response): return response['records'][0]['files'] except netapp_api.api.NaApiError: msg = _('Failed to get volume allocated files for %s.') LOG.error(msg, volume_name) return {'maximum': 0, 'used': 0} @na_utils.trace def set_volume_snapdir_access(self, volume_name, hide_snapdir): """Set volume snapshot directory visibility.""" try: volume = self._get_volume_by_args(vol_name=volume_name) except exception.NetAppException: msg = _('Could not find volume %s to set snapdir access') LOG.error(msg, volume_name) raise exception.SnapshotResourceNotFound(name=volume_name) uuid = volume['uuid'] body = { 'snapshot_directory_access_enabled': str(not hide_snapdir).lower() } self.send_request(f'/storage/volumes/{uuid}', 'patch', body=body) @na_utils.trace def get_fpolicy_scopes(self, share_name, policy_name=None, extensions_to_include=None, extensions_to_exclude=None, shares_to_include=None): """Retrieve fpolicy scopes. :param policy_name: name of the policy associated with a scope. :param share_name: name of the share associated with the fpolicy scope. :param extensions_to_include: file extensions included for screening. Values should be provided as comma separated list :param extensions_to_exclude: file extensions excluded for screening. Values should be provided as comma separated list :param shares_to_include: list of shares to include for file access monitoring. :return: list of fpolicy scopes or empty list """ try: volume = self._get_volume_by_args(vol_name=share_name) svm_uuid = volume['svm']['uuid'] except exception.NetAppException: LOG.debug('Could not find fpolicy. Share not found: %s.', share_name) return [] query = {} if policy_name: query['name'] = policy_name if shares_to_include: query['scope.include_shares'] = ','.join( [str(share) for share in shares_to_include]) if extensions_to_include: query['scope.include_extension'] = ','.join( [str(ext_include) for ext_include in extensions_to_include]) if extensions_to_exclude: query['scope.exclude_extension'] = ','.join( [str(ext_exclude) for ext_exclude in extensions_to_exclude]) result = self.send_request( f'/protocols/fpolicy/{svm_uuid}/policies', 'get', query=query) fpolicy_scopes = [] if self._has_records(result): for fpolicy_scope_result in result['records']: name = fpolicy_scope_result['name'] policy_scope = fpolicy_scope_result.get('scope') if policy_scope: ext_include = policy_scope.get('include_extension', []) ext_exclude = policy_scope.get('exclude_extension', []) shares_include = policy_scope.get('include_shares', []) fpolicy_scopes.append({ 'policy-name': name, 'file-extensions-to-include': ext_include, 'file-extensions-to-exclude': ext_exclude, 'shares-to-include': shares_include, }) return fpolicy_scopes @na_utils.trace def get_fpolicy_policies_status(self, share_name, policy_name=None, status='true'): """Get fpolicy polices status currently configured in the vserver·""" volume = self._get_volume_by_args(vol_name=share_name) svm_uuid = volume['svm']['uuid'] query = {} if policy_name: query['name'] = policy_name query['enabled'] = status result = self.send_request( f'/protocols/fpolicy/{svm_uuid}/policies', 'get', query=query) fpolicy_status = [] if self._has_records(result): for fpolicy_status_result in result['records']: name = fpolicy_status_result['name'] status = fpolicy_status_result.get('enabled', '') seq = fpolicy_status_result.get('priority', '') fpolicy_status.append({ 'policy-name': name, 'status': strutils.bool_from_string(status), 'sequence-number': int(seq) }) return fpolicy_status @na_utils.trace def get_fpolicy_policies(self, share_name, policy_name=None, engine_name='native', event_names=[]): """Retrieve one or more fpolicy policies. :param policy_name: name of the policy to be retrieved :param engine_name: name of the engine :param share_name: name of the share associated with the fpolicy policy. :param event_names: list of event names that must be associated to the fpolicy policy :return: list of fpolicy policies or empty list """ volume = self._get_volume_by_args(vol_name=share_name) svm_uuid = volume['svm']['uuid'] query = {} if policy_name: query['name'] = policy_name if engine_name: query['engine.name'] = engine_name if event_names: query['events'] = ','.join( [str(events) for events in event_names]) result = self.send_request( f'/protocols/fpolicy/{svm_uuid}/policies', 'get', query=query) fpolicy_policies = [] if self._has_records(result): for fpolicy_policies_result in result['records']: name = fpolicy_policies_result['name'] engine = (fpolicy_policies_result.get( 'engine', {}).get('name', '')) events = ([event['name'] for event in fpolicy_policies_result.get('events', [])]) fpolicy_policies.append({ 'policy-name': name, 'engine-name': engine, 'events': events }) return fpolicy_policies @na_utils.trace def get_fpolicy_events(self, share_name, event_name=None, protocol=None, file_operations=None): """Retrives a list of fpolicy events. :param event_name: name of the fpolicy event :param protocol: name of protocol. Possible values are: 'nfsv3', 'nfsv4' or 'cifs'. :param file_operations: name of file operations to be monitored. Values should be provided as list of strings. :returns List of policy events or empty list """ volume = self._get_volume_by_args(vol_name=share_name) svm_uuid = volume['svm']['uuid'] query = {} if event_name: query['name'] = event_name if protocol: query['protocol'] = protocol if file_operations: query['fields'] = (','.join([str(f'file_operations.{file_op}') for file_op in file_operations])) result = self.send_request( f'/protocols/fpolicy/{svm_uuid}/events', 'get', query=query) fpolicy_events = [] if self._has_records(result): for fpolicy_events_result in result['records']: name = fpolicy_events_result['name'] proto = fpolicy_events_result.get('protocol', '') file_operations = [] operations = fpolicy_events_result.get('file_operations', {}) for key, value in operations.items(): if value: file_operations.append(key) fpolicy_events.append({ 'event-name': name, 'protocol': proto, 'file-operations': file_operations }) return fpolicy_events @na_utils.trace def create_fpolicy_event(self, share_name, event_name, protocol, file_operations): """Creates a new fpolicy policy event. :param event_name: name of the new fpolicy event :param protocol: name of protocol for which event is created. Possible values are: 'nfsv3', 'nfsv4' or 'cifs'. :param file_operations: name of file operations to be monitored. Values should be provided as list of strings. :param share_name: name of share associated with the vserver where the fpolicy event should be added. """ volume = self._get_volume_by_args(vol_name=share_name) svm_uuid = volume['svm']['uuid'] body = { 'name': event_name, 'protocol': protocol, } for file_op in file_operations: body[f'file_operations.{file_op}'] = 'true' self.send_request(f'/protocols/fpolicy/{svm_uuid}/events', 'post', body=body) @na_utils.trace def delete_fpolicy_event(self, share_name, event_name): """Deletes a fpolicy policy event. :param event_name: name of the event to be deleted :param share_name: name of share associated with the vserver where the fpolicy event should be deleted. """ try: volume = self._get_volume_by_args(vol_name=share_name) svm_uuid = volume['svm']['uuid'] except exception.NetAppException: msg = _("FPolicy event %s not found.") LOG.debug(msg, event_name) return try: self.send_request( f'/protocols/fpolicy/{svm_uuid}/events/{event_name}', 'delete') except netapp_api.api.NaApiError as e: if e.code == netapp_api.EREST_ENTRY_NOT_FOUND: msg = _("FPolicy event %s not found.") LOG.debug(msg, event_name) else: raise exception.NetAppException(message=e.message) @na_utils.trace def delete_fpolicy_policy(self, share_name, policy_name): """Deletes a fpolicy policy. :param policy_name: name of the policy to be deleted. """ try: volume = self._get_volume_by_args(vol_name=share_name) svm_uuid = volume['svm']['uuid'] except exception.NetAppException: msg = _("FPolicy policy %s not found.") LOG.debug(msg, policy_name) return try: self.send_request( f'/protocols/fpolicy/{svm_uuid}/policies/{policy_name}', 'delete') except netapp_api.api.NaApiError as e: if e.code == netapp_api.EREST_ENTRY_NOT_FOUND: msg = _("FPolicy policy %s not found.") LOG.debug(msg, policy_name) else: raise exception.NetAppException(message=e.message) @na_utils.trace def enable_fpolicy_policy(self, share_name, policy_name, sequence_number): """Enables a specific named policy. :param policy_name: name of the policy to be enabled :param share_name: name of the share associated with the vserver and the fpolicy :param sequence_number: policy sequence number """ volume = self._get_volume_by_args(vol_name=share_name) svm_uuid = volume['svm']['uuid'] body = { 'priority': sequence_number, } self.send_request( f'/protocols/fpolicy/{svm_uuid}/policies/{policy_name}', 'patch', body=body) @na_utils.trace def modify_fpolicy_scope(self, share_name, policy_name, shares_to_include=[], extensions_to_include=None, extensions_to_exclude=None): """Modify an existing fpolicy scope. :param policy_name: name of the policy associated to the scope. :param share_name: name of the share associated with the fpolicy scope. :param shares_to_include: list of shares to include for file access monitoring. :param extensions_to_include: file extensions included for screening. Values should be provided as comma separated list :param extensions_to_exclude: file extensions excluded for screening. Values should be provided as comma separated list """ volume = self._get_volume_by_args(vol_name=share_name) svm_uuid = volume['svm']['uuid'] body = {} if policy_name: body['name'] = policy_name if shares_to_include: body['scope.include_shares'] = ','.join( [str(share) for share in shares_to_include]) if extensions_to_include: body['scope.include_extension'] = ','.join( [str(ext_include) for ext_include in extensions_to_include]) if extensions_to_exclude: body['scope.exclude_extension'] = ','.join( [str(ext_exclude) for ext_exclude in extensions_to_exclude]) self.send_request(f'/protocols/fpolicy/{svm_uuid}/policies/', 'patch', body=body) @na_utils.trace def create_fpolicy_policy_with_scope(self, fpolicy_name, share_name, events, engine='native', extensions_to_include=None, extensions_to_exclude=None): """Creates a fpolicy policy resource with scopes. :param fpolicy_name: name of the fpolicy policy to be created. :param share_name: name of the share to be associated with the new scope. :param events: list of event names for file access monitoring. :param engine: name of the engine to be used. :param extensions_to_include: file extensions included for screening. Values should be provided as comma separated list :param extensions_to_exclude: file extensions excluded for screening. Values should be provided as comma separated list """ volume = self._get_volume_by_args(vol_name=share_name) svm_uuid = volume['svm']['uuid'] body = { 'name': fpolicy_name, 'events.name': events, 'engine.name': engine, 'scope.include_shares': [share_name] } if extensions_to_include: body['scope.include_extension'] = extensions_to_include.split(',') if extensions_to_exclude: body['scope.exclude_extension'] = extensions_to_exclude.split(',') self.send_request(f'/protocols/fpolicy/{svm_uuid}/policies', 'post', body=body) @na_utils.trace def delete_nfs_export_policy(self, policy_name): """Delete NFS export policy.""" # Get policy id. query = { 'name': policy_name, } response = self.send_request('/protocols/nfs/export-policies', 'get', query=query) if not response.get('records'): return policy_id = response.get('records')[0]['id'] # Remove policy. self.send_request(f'/protocols/nfs/export-policies/{policy_id}', 'delete') @na_utils.trace def remove_cifs_share(self, share_name): """Remove CIFS share from the CIFS server.""" # Get SVM UUID. query = { 'name': self.vserver, 'fields': 'uuid' } res = self.send_request('/svm/svms', 'get', query=query) if not res.get('records'): msg = _('Vserver %s not found.') % self.vserver raise exception.NetAppException(msg) svm_id = res.get('records')[0]['uuid'] # Remove CIFS share. try: self.send_request(f'/protocols/cifs/shares/{svm_id}/{share_name}', 'delete') except netapp_api.api.NaApiError as e: if e.code == netapp_api.EREST_ENTRY_NOT_FOUND: return raise @na_utils.trace def _unmount_volume(self, volume_name): """Unmounts a volume.""" # Get volume UUID. volume = self._get_volume_by_args(vol_name=volume_name) uuid = volume['uuid'] # Unmount volume async operation. body = {"nas": {"path": ""}} self.send_request(f'/storage/volumes/{uuid}', 'patch', body=body) @na_utils.trace # TODO(felipe_rodrigues): remove the force parameter when ZAPI is dropped. def unmount_volume(self, volume_name, force=False, wait_seconds=30): """Unmounts a volume, retrying if a clone split is ongoing. NOTE(cknight): While unlikely to happen in normal operation, any client that tries to delete volumes immediately after creating volume clones is likely to experience failures if cDOT isn't quite ready for the delete. The volume unmount is the first operation in the delete path that fails in this case, and there is no proactive check we can use to reliably predict the failure. And there isn't a specific error code from volume-unmount, so we have to check for a generic error code plus certain language in the error code. It's ugly, but it works, and it's better than hard-coding a fixed delay. """ # Do the unmount, handling split-related errors with retries. retry_interval = 3 # seconds for retry in range(int(wait_seconds / retry_interval)): try: self._unmount_volume(volume_name) LOG.debug('Volume %s unmounted.', volume_name) return except netapp_api.api.NaApiError as e: if (e.code == netapp_api.EREST_UNMOUNT_FAILED_LOCK and 'job ID' in e.message): msg = ('Could not unmount volume %(volume)s due to ' 'ongoing volume operation: %(exception)s') msg_args = {'volume': volume_name, 'exception': e} LOG.warning(msg, msg_args) time.sleep(retry_interval) continue raise msg = _('Failed to unmount volume %(volume)s after ' 'waiting for %(wait_seconds)s seconds.') msg_args = {'volume': volume_name, 'wait_seconds': wait_seconds} LOG.error(msg, msg_args) raise exception.NetAppException(msg % msg_args) @na_utils.trace def offline_volume(self, volume_name): """Offlines a volume.""" # Get volume UUID. volume = self._get_volume_by_args(vol_name=volume_name) uuid = volume['uuid'] body = {'state': 'offline'} self.send_request(f'/storage/volumes/{uuid}', 'patch', body=body) @na_utils.trace def delete_volume(self, volume_name): """Deletes a volume.""" # Get volume UUID. volume = self._get_volume_by_args(vol_name=volume_name) uuid = volume['uuid'] # delete volume async operation. self.send_request(f'/storage/volumes/{uuid}', 'delete') @na_utils.trace def qos_policy_group_get(self, qos_policy_group_name): """Checks if a QoS policy group exists.""" query = { 'name': qos_policy_group_name, 'fields': 'name,object_count,fixed.max_throughput_iops,' 'fixed.max_throughput_mbps,svm.name', } try: res = self.send_request('/storage/qos/policies', 'get', query=query) except netapp_api.api.NaApiError as e: if e.code == netapp_api.EREST_NOT_AUTHORIZED: msg = _("Configured ONTAP login user cannot retrieve " "QoS policies.") LOG.error(msg) raise exception.NetAppException(msg) else: raise if not res.get('records'): msg = _('QoS %s not found.') % qos_policy_group_name raise exception.NetAppException(msg) qos_policy_group_info = res.get('records')[0] policy_info = { 'policy-group': qos_policy_group_info.get('name'), 'vserver': qos_policy_group_info.get('svm', {}).get('name'), 'num-workloads': int(qos_policy_group_info.get('object_count')), } iops = qos_policy_group_info.get('fixed', {}).get( 'max_throughput_iops') mbps = qos_policy_group_info.get('fixed', {}).get( 'max_throughput_mbps') if iops: policy_info['max-throughput'] = f'{iops}iops' elif mbps: policy_info['max-throughput'] = f'{mbps * 1024 * 1024}b/s' else: policy_info['max-throughput'] = None return policy_info @na_utils.trace def qos_policy_group_exists(self, qos_policy_group_name): """Checks if a QoS policy group exists.""" try: self.qos_policy_group_get(qos_policy_group_name) except exception.NetAppException: return False return True @na_utils.trace def qos_policy_group_rename(self, qos_policy_group_name, new_name): """Renames a QoS policy group.""" if qos_policy_group_name == new_name: return # Get QoS UUID. query = { 'name': qos_policy_group_name, 'fields': 'uuid', } res = self.send_request('/storage/qos/policies', 'get', query=query) if not res.get('records'): msg = _('QoS %s not found.') % qos_policy_group_name raise exception.NetAppException(msg) uuid = res.get('records')[0]['uuid'] body = {"name": new_name} self.send_request(f'/storage/qos/policies/{uuid}', 'patch', body=body) @na_utils.trace def remove_unused_qos_policy_groups(self): """Deletes all QoS policy groups that are marked for deletion.""" # Get QoS policies. query = { 'name': '%s*' % DELETED_PREFIX, 'fields': 'uuid,name', } res = self.send_request('/storage/qos/policies', 'get', query=query) for qos in res.get('records'): uuid = qos['uuid'] try: self.send_request(f'/storage/qos/policies/{uuid}', 'delete') except netapp_api.api.NaApiError as ex: msg = ('Could not delete QoS policy group %(qos_name)s. ' 'Details: %(ex)s') msg_args = {'qos_name': qos['name'], 'ex': ex} LOG.debug(msg, msg_args) @na_utils.trace def mark_qos_policy_group_for_deletion(self, qos_policy_group_name): """Soft delete backing QoS policy group for a manila share.""" # NOTE(gouthamr): ONTAP deletes storage objects asynchronously. As # long as garbage collection hasn't occurred, assigned QoS policy may # still be tagged "in use". So, we rename the QoS policy group using a # specific pattern and later attempt on a best effort basis to # delete any QoS policy groups matching that pattern. if self.qos_policy_group_exists(qos_policy_group_name): new_name = DELETED_PREFIX + qos_policy_group_name try: self.qos_policy_group_rename(qos_policy_group_name, new_name) except netapp_api.api.NaApiError as ex: msg = ('Rename failure in cleanup of cDOT QoS policy ' 'group %(name)s: %(ex)s') msg_args = {'name': qos_policy_group_name, 'ex': ex} LOG.warning(msg, msg_args) # Attempt to delete any QoS policies named "deleted_manila-*". self.remove_unused_qos_policy_groups() @na_utils.trace def qos_policy_group_modify(self, qos_policy_group_name, max_throughput): """Modifies a QoS policy group.""" query = { 'name': qos_policy_group_name, } body = {} value = max_throughput.lower() if 'iops' in value: value = value.replace('iops', '') value = int(value) body['fixed.max_throughput_iops'] = value body['fixed.max_throughput_mbps'] = 0 elif 'b/s' in value: value = value.replace('b/s', '') value = int(value) body['fixed.max_throughput_mbps'] = math.ceil(value / units.Mi) body['fixed.max_throughput_iops'] = 0 res = self.send_request('/storage/qos/policies', 'get', query=query) if not res.get('records'): msg = ('QoS %s not found.') % qos_policy_group_name raise exception.NetAppException(msg) uuid = res.get('records')[0]['uuid'] self.send_request(f'/storage/qos/policies/{uuid}', 'patch', body=body) @na_utils.trace def set_volume_size(self, volume_name, size_gb): """Set volume size.""" volume = self._get_volume_by_args(vol_name=volume_name) uuid = volume['uuid'] body = { 'space.size': int(size_gb) * units.Gi } self.send_request(f'/storage/volumes/{uuid}', 'patch', body=body) @na_utils.trace def set_volume_filesys_size_fixed(self, volume_name, filesys_size_fixed=False): """Set volume file system size fixed to true/false.""" volume = self._get_volume_by_args(vol_name=volume_name) uuid = volume['uuid'] body = { 'space.filesystem_size_fixed': filesys_size_fixed } self.send_request(f'/storage/volumes/{uuid}', 'patch', body=body) @na_utils.trace def create_snapshot(self, volume_name, snapshot_name, snapmirror_label=None): """Creates a volume snapshot.""" volume = self._get_volume_by_args(vol_name=volume_name) uuid = volume['uuid'] body = { 'name': snapshot_name, } if snapmirror_label is not None: body['snapmirror_label'] = snapmirror_label self.send_request(f'/storage/volumes/{uuid}/snapshots', 'post', body=body) @na_utils.trace def is_flexgroup_supported(self): return self.features.FLEXGROUP @na_utils.trace def is_flexgroup_volume(self, volume_name): """Determines if the ONTAP volume is FlexGroup.""" query = { 'name': volume_name, 'fields': 'style' } result = self.send_request('/storage/volumes/', 'get', query=query) if not self._has_records(result): raise exception.StorageResourceNotFound(name=volume_name) vols = result.get('records', []) if len(vols) > 1: msg = _('More than one volume with volume name %(vol)s found.') msg_args = {'vol': volume_name} raise exception.NetAppException(msg % msg_args) return na_utils.is_style_extended_flexgroup(vols[0]['style']) @staticmethod def _is_busy_snapshot(snapshot_owners): """Checks if the owners means that the snapshot is busy. Snapshot is busy when any of the owners doesn't end with 'dependent'. """ for owner in snapshot_owners: if not owner.endswith('dependent'): return True return False @na_utils.trace def get_snapshot(self, volume_name, snapshot_name): """Gets a single snapshot.""" try: volume = self._get_volume_by_args(vol_name=volume_name) except exception.NetAppException: msg = _('Could not find volume %s to get snapshot') LOG.error(msg, volume_name) raise exception.SnapshotResourceNotFound(name=snapshot_name) uuid = volume['uuid'] query = { 'name': snapshot_name, 'fields': 'name,volume,create_time,owners' } result = self.send_request(f'/storage/volumes/{uuid}/snapshots', 'get', query=query) if not self._has_records(result): raise exception.SnapshotResourceNotFound(name=snapshot_name) snapshots = result.get('records', []) if len(snapshots) > 1: msg = _('Could not find unique snapshot %(snap)s on ' 'volume %(vol)s.') msg_args = {'snap': snapshot_name, 'vol': volume_name} raise exception.NetAppException(msg % msg_args) snapshot_info = snapshots[0] # NOTE(felipe_rodrigues): even requesting the field owners, it is not # sent back in case no owners. owners = set(snapshot_info.get('owners', [])) return { 'access-time': snapshot_info['create_time'], 'name': snapshot_info['name'], 'volume': snapshot_info['volume']['name'], 'owners': owners, 'busy': self._is_busy_snapshot(owners), 'locked_by_clone': SNAPSHOT_CLONE_OWNER in owners, } @na_utils.trace def get_clone_children_for_snapshot(self, volume_name, snapshot_name): """Returns volumes that are keeping a snapshot locked.""" query = { 'clone.parent_snapshot.name': snapshot_name, 'clone.parent_volume.name': volume_name, 'fields': 'name' } result = self.get_records('/storage/volumes', query=query) return [{'name': volume['name']} for volume in result.get('records', [])] @na_utils.trace def volume_clone_split_start(self, volume_name): """Begins splitting a clone from its parent.""" volume = self._get_volume_by_args(vol_name=volume_name) uuid = volume['uuid'] body = { 'clone.split_initiated': 'true', } self.send_request(f'/storage/volumes/{uuid}', 'patch', body=body, wait_on_accepted=False) @na_utils.trace def volume_clone_split_status(self, volume_name): """Status of splitting a clone from its parent.""" query = { 'name': volume_name, 'fields': 'clone.split_complete_percent' } try: res = self.send_request('/storage/volumes/', 'get', query=query) percent = res.get('clone.split_complete_percent') if not percent: return 100 return percent except netapp_api.NaApiError: msg = ("Failed to get clone split status for volume %s ") LOG.warning(msg, volume_name) return 100 @na_utils.trace def volume_clone_split_stop(self, volume_name): """Stops splitting a clone from its parent.""" volume = self._get_volume_by_args(vol_name=volume_name) uuid = volume['uuid'] body = { 'clone.split_initiated': 'false', } try: self.send_request(f'/storage/volumes/{uuid}', 'patch', body=body, wait_on_accepted=False) except netapp_api.NaApiError as e: if e.code in (netapp_api.EVOLUMEDOESNOTEXIST, netapp_api.EVOLNOTCLONE, netapp_api.EVOLOPNOTUNDERWAY): return raise @na_utils.trace def delete_snapshot(self, volume_name, snapshot_name, ignore_owners=False): """Deletes a volume snapshot.""" try: volume = self._get_volume_by_args(vol_name=volume_name) except exception.NetAppException: msg = _('Could not find volume %s to delete snapshot') LOG.warning(msg, volume_name) return uuid = volume['uuid'] query = { 'name': snapshot_name, 'fields': 'uuid' } snapshot = self.send_request(f'/storage/volumes/{uuid}/snapshots', 'get', query=query) if self._has_records(snapshot): snapshot_uuid = snapshot['records'][0]['uuid'] # NOTE(rfluisa): The CLI passthrough was used here, because the # REST API endpoint used to delete snapshots does not an equivalent # to the ignore_owners field if ignore_owners: query_cli = { 'vserver': self.vserver, 'volume': volume_name, 'snapshot': snapshot_name, 'ignore-owners': 'true' } self.send_request( '/private/cli/snapshot', 'delete', query=query_cli) else: self.send_request( f'/storage/volumes/{uuid}/snapshots/{snapshot_uuid}', 'delete') @na_utils.trace def soft_delete_snapshot(self, volume_name, snapshot_name): """Deletes a volume snapshot, or renames it if delete fails.""" try: self.delete_snapshot(volume_name, snapshot_name) except netapp_api.NaApiError: self.rename_snapshot(volume_name, snapshot_name, DELETED_PREFIX + snapshot_name) msg = _('Soft-deleted snapshot %(snapshot)s on volume %(volume)s.') msg_args = {'snapshot': snapshot_name, 'volume': volume_name} LOG.info(msg, msg_args) # Snapshots are locked by clone(s), so split the clone(s) snapshot_children = self.get_clone_children_for_snapshot( volume_name, snapshot_name) for snapshot_child in snapshot_children: self.volume_clone_split_start(snapshot_child['name']) @na_utils.trace def rename_snapshot(self, volume_name, snapshot_name, new_snapshot_name): """Renames the snapshot.""" volume = self._get_volume_by_args(vol_name=volume_name) uuid = volume['uuid'] query = { 'name': snapshot_name, } body = { 'name': new_snapshot_name, } self.send_request(f'/storage/volumes/{uuid}/snapshots', 'patch', query=query, body=body) @na_utils.trace def _get_soft_deleted_snapshots(self): """Returns non-busy, soft-deleted snapshots suitable for reaping.""" query = { 'name': DELETED_PREFIX + '*', 'fields': 'uuid,volume,owners,svm.name' } result = self.get_records('/storage/volumes/*/snapshots', query=query) snapshot_map = {} for snapshot_info in result.get('records', []): if self._is_busy_snapshot(snapshot_info['owners']): continue vserver = snapshot_info['svm']['name'] snapshot_list = snapshot_map.get(vserver, []) snapshot_list.append({ 'uuid': snapshot_info['uuid'], 'volume_uuid': snapshot_info['volume']['uuid'], }) snapshot_map[vserver] = snapshot_list return snapshot_map @na_utils.trace def prune_deleted_snapshots(self): """Deletes non-busy snapshots that were previously soft-deleted.""" deleted_snapshots_map = self._get_soft_deleted_snapshots() for vserver in deleted_snapshots_map: client = copy.deepcopy(self) client.set_vserver(vserver) for snapshot in deleted_snapshots_map[vserver]: try: vol_uuid = snapshot['volume_uuid'] snap_uuid = snapshot['uuid'] self.send_request(f'/storage/volumes/{vol_uuid}/snapshots/' f'{snap_uuid}', 'delete') except netapp_api.api.NaApiError: msg = _('Could not delete snapshot %(snap)s on ' 'volume %(volume)s.') msg_args = { 'snap': snapshot['uuid'], 'volume': snapshot['volume_uuid'], } LOG.exception(msg, msg_args) @na_utils.trace def snapshot_exists(self, snapshot_name, volume_name): """Checks if Snapshot exists for a specified volume.""" LOG.debug('Checking if snapshot %(snapshot)s exists for ' 'volume %(volume)s', {'snapshot': snapshot_name, 'volume': volume_name}) volume = self._get_volume_by_args(vol_name=volume_name, fields='uuid,state') if volume['state'] == 'offline': msg = _('Could not read information for snapshot %(name)s. ' 'Volume %(volume)s is offline.') msg_args = { 'name': snapshot_name, 'volume': volume_name, } LOG.debug(msg, msg_args) raise exception.SnapshotUnavailable(msg % msg_args) query = {'name': snapshot_name} vol_uuid = volume['uuid'] result = self.send_request( f'/storage/volumes/{vol_uuid}/snapshots/', 'get', query=query) return self._has_records(result) @na_utils.trace def volume_has_luns(self, volume_name): """Checks if volume has LUNs.""" LOG.debug('Checking if volume %s has LUNs', volume_name) query = { 'location.volume.name': volume_name, } response = self.send_request('/storage/luns/', 'get', query=query) return self._has_records(response) @na_utils.trace def volume_has_junctioned_volumes(self, junction_path): """Checks if volume has volumes mounted beneath its junction path.""" if not junction_path: return False query = { 'nas.path': junction_path + '/*' } response = self.send_request('/storage/volumes/', 'get', query=query) return self._has_records(response) @na_utils.trace def set_volume_name(self, volume_name, new_volume_name): """Set volume name.""" volume = self._get_volume_by_args(vol_name=volume_name) uuid = volume['uuid'] body = { 'name': new_volume_name } self.send_request(f'/storage/volumes/{uuid}', 'patch', body=body) @na_utils.trace def mount_volume(self, volume_name, junction_path=None): """Mounts a volume on a junction path.""" volume = self._get_volume_by_args(vol_name=volume_name) uuid = volume['uuid'] body = { 'nas.path': (junction_path if junction_path else '/%s' % volume_name) } try: self.send_request(f'/storage/volumes/{uuid}', 'patch', body=body) except netapp_api.api.NaApiError as e: # NOTE(rfluisa): This verification was added to keep the error code # compatible with the one that was returned by ZAPI if e.code == netapp_api.EREST_SNAPMIRROR_INITIALIZING: raise netapp_api.api.NaApiError(message=e.message, code=netapp_api.api.EAPIERROR) raise @na_utils.trace def get_volume_at_junction_path(self, junction_path): """Returns the volume with the specified junction path, if present.""" if not junction_path: return None query = { 'nas.path': junction_path, 'fields': 'name', } response = self.send_request('/storage/volumes/', 'get', query=query) if not self._has_records(response): return None vol = response.get('records')[0] volume = { 'name': vol.get('name'), } return volume @na_utils.trace def get_aggregate_for_volume(self, volume_name): """Get the name of the aggregate containing a volume.""" query = { 'name': volume_name, 'fields': 'aggregates', } res = self.send_request('/storage/volumes/', 'get', query=query) aggregate = res.get('aggregates') if not aggregate: msg = _('Could not find aggregate for volume %s.') raise exception.NetAppException(msg % volume_name) aggregate_size = len(res.get('aggregates')) if aggregate_size > 1: aggregate = [aggr.get('name') for aggr in res.get('aggregates')] return aggregate @na_utils.trace def get_volume_to_manage(self, aggregate_name, volume_name): """Get existing volume info to be managed.""" query = { 'name': volume_name, 'fields': 'name,aggregates.name,nas.path,name,type,style,' 'svm.name,qos.policy.name,space.size', 'aggregates.name': aggregate_name } response = self.send_request('/storage/volumes', 'get', query=query) if not self._has_records(response): return None res = response.get('records', [])[0] aggregate = '' aggr_list = [] aggregate_size = len(res.get('aggregates', [])) if aggregate_size == 1: aggregate = res.get('aggregates', [])[0].get('name', '') else: aggr_list = [aggr.get('name') for aggr in res.get('aggregates')] volume = { 'aggregate': aggregate, 'aggr-list': aggr_list, 'junction-path': res.get('nas', {}).get('path', ''), 'name': res.get('name'), 'type': res.get('type'), # NOTE(caiquemello): REST no longer uses flex or infinitevol as # styles. In onder to keep compatibility style is set to 'flex'. 'style': 'flex', 'owning-vserver-name': res.get('svm', {}).get('name', ''), 'size': res.get('space', {}).get('size', 0), 'qos-policy-group-name': ( res.get('qos', {}).get('policy', {}).get('name', '')) } return volume @na_utils.trace def _parse_timestamp(self, time_str): """Parse timestamp string into a number.""" try: dt = datetime.fromisoformat(time_str) return dt.timestamp() except Exception: LOG.debug("Failed to parse timestamp: %s", time_str) raise @na_utils.trace def _get_snapmirrors(self, source_path=None, dest_path=None, source_vserver=None, source_volume=None, dest_vserver=None, dest_volume=None, list_destinations_only=False, enable_tunneling=True, desired_attributes=None): """Get a list of snapmirrors.""" fields = ['state', 'source.svm.name', 'source.path', 'destination.svm.name', 'destination.path', 'transfer.end_time', 'uuid', 'policy.type', 'transfer_schedule.name', 'transfer.state', 'last_transfer_type', 'transfer.bytes_transferred', 'healthy'] query = {} query['fields'] = ','.join(fields) if source_path: query['source.path'] = source_path else: query_src_vol = source_volume if source_volume else '*' query_src_vserver = source_vserver if source_vserver else '*' query['source.path'] = query_src_vserver + ':' + query_src_vol if dest_path: query['destination.path'] = dest_path else: query_dst_vol = dest_volume if dest_volume else '*' query_dst_vserver = dest_vserver if dest_vserver else '*' query['destination.path'] = query_dst_vserver + ':' + query_dst_vol if list_destinations_only: query['list_destinations_only'] = 'true' response = self.send_request( '/snapmirror/relationships', 'get', query=query, enable_tunneling=enable_tunneling) snapmirrors = [] for record in response.get('records', []): snapmirrors.append({ 'relationship-status': ( 'idle' if record.get('state') == 'snapmirrored' else record.get('state')), 'transferring-state': record.get('transfer', {}).get('state'), 'mirror-state': record.get('state'), 'schedule': ( record['transfer_schedule']['name'] if record.get('transfer_schedule') else None), 'source-vserver': record['source']['svm']['name'], 'source-volume': (record['source']['path'].split(':')[1] if record.get('source') else None), 'destination-vserver': record['destination']['svm']['name'], 'destination-volume': ( record['destination']['path'].split(':')[1] if record.get('destination') else None), 'last-transfer-end-timestamp': (self._parse_timestamp(record['transfer']['end_time']) if record.get('transfer', {}).get('end_time') else 0), 'uuid': record['uuid'], 'policy-type': record.get('policy', {}).get('type'), 'is-healthy': ( 'true' if record.get('healthy', {}) is True else 'false'), 'last-transfer-type': record.get('last_transfer_type', None), 'last-transfer-size': record.get('transfer', {}).get('bytes_transferred'), }) return snapmirrors @na_utils.trace def get_snapmirrors_svm(self, source_vserver=None, dest_vserver=None, desired_attributes=None): """Get all snapmirrors from specified SVMs source/destination.""" source_path = source_vserver + ':*' if source_vserver else None dest_path = dest_vserver + ':*' if dest_vserver else None return self.get_snapmirrors(source_path=source_path, dest_path=dest_path, desired_attributes=desired_attributes) @na_utils.trace def get_snapmirrors(self, source_path=None, dest_path=None, source_vserver=None, dest_vserver=None, source_volume=None, dest_volume=None, desired_attributes=None, enable_tunneling=None, list_destinations_only=None): """Gets one or more SnapMirror relationships. Either the source or destination info may be omitted. Desired attributes exists only to keep consistency with ZAPI client signature and has no effect in the output. """ snapmirrors = self._get_snapmirrors( source_path=source_path, dest_path=dest_path, source_vserver=source_vserver, source_volume=source_volume, dest_vserver=dest_vserver, dest_volume=dest_volume, enable_tunneling=enable_tunneling, list_destinations_only=list_destinations_only) return snapmirrors @na_utils.trace def volume_has_snapmirror_relationships(self, volume): """Return True if snapmirror relationships exist for a given volume. If we have snapmirror control plane license, we can verify whether the given volume is part of any snapmirror relationships. """ try: # Check if volume is a source snapmirror volume snapmirrors = self.get_snapmirrors( source_vserver=volume['owning-vserver-name'], source_volume=volume['name']) # Check if volume is a destination snapmirror volume if not snapmirrors: snapmirrors = self.get_snapmirrors( dest_vserver=volume['owning-vserver-name'], dest_volume=volume['name']) has_snapmirrors = len(snapmirrors) > 0 except netapp_api.api.NaApiError: msg = ("Could not determine if volume %s is part of " "existing snapmirror relationships.") LOG.exception(msg, volume['name']) has_snapmirrors = False return has_snapmirrors @na_utils.trace def modify_volume(self, aggregate_name, volume_name, thin_provisioned=False, snapshot_policy=None, language=None, dedup_enabled=False, compression_enabled=False, max_files=None, qos_policy_group=None, hide_snapdir=None, autosize_attributes=None, adaptive_qos_policy_group=None, **options): """Update backend volume for a share as necessary. :param aggregate_name: either a list or a string. List for aggregate names where the FlexGroup resides, while a string for the aggregate name where FlexVol volume is. :param volume_name: name of the modified volume. :param thin_provisioned: volume is thin. :param snapshot_policy: policy of volume snapshot. :param language: language of the volume. :param dedup_enabled: is the deduplication enabled for the volume. :param compression_enabled: is the compression enabled for the volume. :param max_files: number of maximum files in the volume. :param qos_policy_group: name of the QoS policy. :param hide_snapdir: hide snapshot directory. :param autosize_attributes: autosize for the volume. :param adaptive_qos_policy_group: name of the adaptive QoS policy. """ body = { 'guarantee': {'type': 'none' if thin_provisioned else 'volume'} } if autosize_attributes: attributes = autosize_attributes body['autosize'] = { 'mode': attributes['mode'], 'grow_threshold': attributes['grow-threshold-percent'], 'shrink_threshold': attributes['shrink-threshold-percent'], 'maximum': attributes['maximum-size'], 'minimum': attributes['minimum-size'], } if language: body['language'] = language if max_files: body['files'] = {'maximum': max_files} if snapshot_policy: body['snapshot_policy'] = {'name': snapshot_policy} qos_policy_name = qos_policy_group or adaptive_qos_policy_group if qos_policy_name: body['qos'] = {'policy': {'name': qos_policy_name}} if hide_snapdir in (True, False): # Value of hide_snapdir needs to be inverted for ZAPI parameter body['snapshot_directory_access_enabled'] = ( str(not hide_snapdir).lower()) aggregates = None if isinstance(aggregate_name, list): is_flexgroup = True aggregates = ','.join(aggregate_name) else: is_flexgroup = False aggregates = aggregate_name volume = self._get_volume_by_args(vol_name=volume_name, aggregate_name=aggregates) self.send_request('/storage/volumes/' + volume['uuid'], 'patch', body=body) # Extract efficiency_policy from provisioning_options efficiency_policy = options.get('efficiency_policy', None) # Efficiency options must be handled separately self.update_volume_efficiency_attributes( volume_name, dedup_enabled, compression_enabled, is_flexgroup=is_flexgroup, efficiency_policy=efficiency_policy ) if self._is_snaplock_enabled_volume(volume_name): self.set_snaplock_attributes(volume_name, **options) @na_utils.trace def start_volume_move(self, volume_name, vserver, destination_aggregate, cutover_action='wait', encrypt_destination=None): """Moves a FlexVol across Vserver aggregates. Requires cluster-scoped credentials. """ self._send_volume_move_request( volume_name, vserver, destination_aggregate, cutover_action=cutover_action, encrypt_destination=encrypt_destination) @na_utils.trace def check_volume_move(self, volume_name, vserver, destination_aggregate, encrypt_destination=None): """Moves a FlexVol across Vserver aggregates. Requires cluster-scoped credentials. """ self._send_volume_move_request( volume_name, vserver, destination_aggregate, validation_only=True, encrypt_destination=encrypt_destination) @na_utils.trace def _send_volume_move_request(self, volume_name, vserver, destination_aggregate, cutover_action='wait', validation_only=False, encrypt_destination=None): """Send request to check if vol move is possible, or start it. :param volume_name: Name of the FlexVol to be moved. :param destination_aggregate: Name of the destination aggregate :param cutover_action: can have one of [cutover_wait]. 'cutover_wait' to go into cutover manually. :param validation_only: If set to True, only validates if the volume move is possible, does not trigger data copy. :param encrypt_destination: If set to True, it encrypts the Flexvol after the volume move is complete. """ body = { 'movement.destination_aggregate.name': destination_aggregate, } # NOTE(caiquemello): In REST 'cutover_action'was deprecated. Now the # equivalant behavior is represented by 'movement.state'. The # equivalent in ZAPI for 'defer_on_failure' is the default value # for 'movement.state' in REST. So, there is no need to set 'defer' in # the body. Remove this behavior when ZAPI is removed. if cutover_action != 'defer': body['movement.state'] = CUTOVER_ACTION_MAP[cutover_action] query = { 'name': volume_name, } if encrypt_destination is True: body['encryption.enabled'] = 'true' elif encrypt_destination is False: body['encryption.enabled'] = 'false' if validation_only: body['validate_only'] = 'true' self.send_request('/storage/volumes/', 'patch', query=query, body=body, wait_on_accepted=False) @na_utils.trace def get_nfs_export_policy_for_volume(self, volume_name): """Get the actual export policy for a share.""" query = { 'name': volume_name, 'fields': 'nas.export_policy.name' } response = self.send_request('/storage/volumes/', 'get', query=query) if not self._has_records(response): msg = _('Could not find export policy for volume %s.') raise exception.NetAppException(msg % volume_name) volume = response['records'][0] return volume['nas']['export_policy']['name'] @na_utils.trace def get_unique_export_policy_id(self, policy_name): """Get export policy uuid for a given policy name""" get_uuid = self.send_request( '/protocols/nfs/export-policies', 'get', query={'name': policy_name}) if not self._has_records(get_uuid): msg = _('Could not find export policy with name %s.') raise exception.NetAppException(msg % policy_name) uuid = get_uuid['records'][0]['id'] return uuid @na_utils.trace def _get_nfs_export_rule_indices(self, policy_name, client_match): """Get index of the rule within the export policy.""" uuid = self.get_unique_export_policy_id(policy_name) query = { 'clients.match': client_match, 'fields': 'clients.match,index' } response = self.send_request( f'/protocols/nfs/export-policies/{uuid}/rules', 'get', query=query) rules = response['records'] indices = [rule['index'] for rule in rules] indices.sort() return [str(i) for i in indices] @na_utils.trace def _add_nfs_export_rule(self, policy_name, client_match, readonly, auth_methods): """Add rule to NFS export policy.""" uuid = self.get_unique_export_policy_id(policy_name) body = { 'clients': [{'match': client_match}], 'ro_rule': [], 'rw_rule': [], 'superuser': [] } for am in auth_methods: body['ro_rule'].append(am) body['rw_rule'].append(am) body['superuser'].append(am) if readonly: # readonly, overwrite with auth method 'never' body['rw_rule'] = ['never'] self.send_request(f'/protocols/nfs/export-policies/{uuid}/rules', 'post', body=body) @na_utils.trace def _update_nfs_export_rule(self, policy_name, client_match, readonly, rule_index, auth_methods): """Update rule of NFS export policy.""" uuid = self.get_unique_export_policy_id(policy_name) body = { 'client_match': client_match, 'ro_rule': [], 'rw_rule': [], 'superuser': [] } for am in auth_methods: body['ro_rule'].append(am) body['rw_rule'].append(am) body['superuser'].append(am) if readonly: # readonly, overwrite with auth method 'never' body['rw_rule'] = ['never'] self.send_request( f'/protocols/nfs/export-policies/{uuid}/rules/{rule_index}', 'patch', body=body) @na_utils.trace def _remove_nfs_export_rules(self, policy_name, rule_indices): """Remove rule from NFS export policy.""" uuid = self.get_unique_export_policy_id(policy_name) for index in rule_indices: body = { 'index': index } try: self.send_request( f'/protocols/nfs/export-policies/{uuid}/rules/{index}', 'delete', body=body) except netapp_api.api.NaApiError as e: if e.code != netapp_api.EREST_ENTRY_NOT_FOUND: msg = _("Fail to delete export rule %s.") LOG.debug(msg, policy_name) raise @na_utils.trace def get_cifs_share_access(self, share_name): """Get CIFS share access rules.""" query = { 'name': share_name, } get_uuid = self.send_request('/protocols/cifs/shares', 'get', query=query) svm_uuid = get_uuid['records'][0]['svm']['uuid'] query = {'fields': 'user_or_group,permission'} result = self.send_request( f'/protocols/cifs/shares/{svm_uuid}/{share_name}/acls', 'get', query=query) rules = {} for records in result["records"]: user_or_group = records['user_or_group'] permission = records['permission'] rules[user_or_group] = permission return rules @na_utils.trace def add_cifs_share_access(self, share_name, user_name, readonly): """Add CIFS share access rules.""" query = { 'name': share_name } get_uuid = self.send_request('/protocols/cifs/shares', 'get', query=query) svm_uuid = get_uuid['records'][0]['svm']['uuid'] body = { 'permission': 'read' if readonly else 'full_control', 'user_or_group': user_name, } self.send_request( f'/protocols/cifs/shares/{svm_uuid}/{share_name}/acls', 'post', body=body) @na_utils.trace def modify_cifs_share_access(self, share_name, user_name, readonly): """Modify CIFS share access rules.""" query = { 'name': share_name } get_uuid = self.send_request('/protocols/cifs/shares', 'get', query=query) svm_uuid = get_uuid['records'][0]['svm']['uuid'] body = { 'permission': 'read' if readonly else 'full_control', } self.send_request( f'/protocols/cifs/shares/{svm_uuid}/{share_name}' f'/acls/{user_name}/{CIFS_USER_GROUP_TYPE}', 'patch', body=body) @na_utils.trace def check_snaprestore_license(self): """Check SnapRestore license for SVM scoped user.""" try: body = { 'restore_to.snapshot.name': '' } query = { # NOTE(felipe_rodrigues): Acting over all volumes to prevent # entry not found error. So, the error comes either by license # not installed or snapshot not specified. 'name': '*' } self.send_request('/storage/volumes', 'patch', body=body, query=query) except netapp_api.api.NaApiError as e: LOG.debug('Fake restore snapshot request failed: %s', e) if e.code == netapp_api.EREST_LICENSE_NOT_INSTALLED: return False elif e.code == netapp_api.EREST_SNAPSHOT_NOT_SPECIFIED: return True else: # unexpected error. raise e # since it passed an empty snapshot, it should never get here. msg = _("Caught an unexpected behavior: the fake restore to " "snapshot request using all volumes and empty string " "snapshot as argument has not failed.") LOG.exception(msg) raise exception.NetAppException(msg) @na_utils.trace def trigger_volume_move_cutover(self, volume_name, vserver, force=True): """Triggers the cut-over for a volume in data motion.""" query = { 'name': volume_name } body = { 'movement.state': 'cutover' } self.send_request('/storage/volumes/', 'patch', query=query, body=body) @na_utils.trace def abort_volume_move(self, volume_name, vserver): """Abort volume move operation.""" volume = self._get_volume_by_args(vol_name=volume_name) vol_uuid = volume['uuid'] self.send_request(f'/storage/volumes/{vol_uuid}', 'patch') @na_utils.trace def get_volume_move_status(self, volume_name, vserver): """Gets the current state of a volume move operation.""" fields = 'movement.percent_complete,movement.state' query = { 'name': volume_name, 'svm.name': vserver, 'fields': fields } result = self.send_request('/storage/volumes/', 'get', query=query) if not self._has_records(result): msg = ("Volume %(vol)s in Vserver %(server)s is not part of any " "data motion operations.") msg_args = {'vol': volume_name, 'server': vserver} raise exception.NetAppException(msg % msg_args) volume_move_info = result.get('records')[0] volume_movement = volume_move_info['movement'] status_info = { 'percent-complete': volume_movement.get('percent_complete', 0), 'estimated-completion-time': '', 'state': volume_movement['state'], 'details': '', 'cutover-action': '', 'phase': volume_movement['state'], } return status_info @na_utils.trace def list_snapmirror_snapshots(self, volume_name, newer_than=None): """Gets SnapMirror snapshots on a volume.""" volume = self._get_volume_by_args(vol_name=volume_name) uuid = volume['uuid'] query = { 'owners': 'snapmirror_dependent', } if newer_than: query['create_time'] = '>' + newer_than response = self.send_request( f'/storage/volumes/{uuid}/snapshots/', 'get', query=query) return [snapshot_info['name'] for snapshot_info in response['records']] @na_utils.trace def abort_snapmirror_vol(self, source_vserver, source_volume, dest_vserver, dest_volume, clear_checkpoint=False): """Stops ongoing transfers for a SnapMirror relationship.""" self._abort_snapmirror(source_vserver=source_vserver, dest_vserver=dest_vserver, source_volume=source_volume, dest_volume=dest_volume, clear_checkpoint=clear_checkpoint) @na_utils.trace def _abort_snapmirror(self, source_path=None, dest_path=None, source_vserver=None, dest_vserver=None, source_volume=None, dest_volume=None, clear_checkpoint=False): """Stops ongoing transfers for a SnapMirror relationship.""" snapmirror = self.get_snapmirrors( source_path=source_path, dest_path=dest_path, source_vserver=source_vserver, source_volume=source_volume, dest_vserver=dest_vserver, dest_volume=dest_volume) if snapmirror: snapmirror_uuid = snapmirror[0]['uuid'] query = {'state': 'transferring'} transfers = self.send_request('/snapmirror/relationships/' + snapmirror_uuid + '/transfers/', 'get', query=query) if not transfers.get('records'): raise netapp_api.api.NaApiError( code=netapp_api.EREST_ENTRY_NOT_FOUND) body = {'state': 'hard_aborted' if clear_checkpoint else 'aborted'} for transfer in transfers['records']: transfer_uuid = transfer['uuid'] self.send_request('/snapmirror/relationships/' + snapmirror_uuid + '/transfers/' + transfer_uuid, 'patch', body=body) @na_utils.trace def delete_snapmirror_vol(self, source_vserver, source_volume, dest_vserver, dest_volume): """Destroys a SnapMirror relationship between volumes.""" self._delete_snapmirror(source_vserver=source_vserver, dest_vserver=dest_vserver, source_volume=source_volume, dest_volume=dest_volume) @na_utils.trace def _delete_snapmirror(self, source_vserver=None, source_volume=None, dest_vserver=None, dest_volume=None): """Deletes an SnapMirror relationship on destination.""" query_uuid = {} query_uuid['source.path'] = source_vserver + ':' + source_volume query_uuid['destination.path'] = (dest_vserver + ':' + dest_volume) query_uuid['fields'] = 'uuid' response = self.send_request('/snapmirror/relationships/', 'get', query=query_uuid) records = response.get('records') if records: # 'destination_only' deletes the snapmirror on destination # but does not release it on source. query_delete = {"destination_only": "true"} snapmirror_uuid = records[0].get('uuid') self.send_request('/snapmirror/relationships/' + snapmirror_uuid, 'delete', query=query_delete) @na_utils.trace def get_snapmirror_destinations(self, source_path=None, dest_path=None, source_vserver=None, source_volume=None, dest_vserver=None, dest_volume=None, desired_attributes=None, enable_tunneling=None): """Gets one or more SnapMirror at source endpoint.""" snapmirrors = self.get_snapmirrors( source_path=source_path, dest_path=dest_path, source_vserver=source_vserver, source_volume=source_volume, dest_vserver=dest_vserver, dest_volume=dest_volume, # NOTE (nahimsouza): From ONTAP 9.12.1 the snapmirror destinations # can only be retrieved with no tunneling. enable_tunneling=False, list_destinations_only=True) return snapmirrors @na_utils.trace def release_snapmirror_vol(self, source_vserver, source_volume, dest_vserver, dest_volume, relationship_info_only=False): """Removes a SnapMirror relationship on the source endpoint.""" snapmirror_destinations_list = self.get_snapmirror_destinations( source_vserver=source_vserver, dest_vserver=dest_vserver, source_volume=source_volume, dest_volume=dest_volume, desired_attributes=['relationship-id']) if len(snapmirror_destinations_list) > 1: msg = ("Expected snapmirror relationship to be unique. " "List returned: %s." % snapmirror_destinations_list) raise exception.NetAppException(msg) query = {} if relationship_info_only: query["source_info_only"] = 'true' else: query["source_only"] = 'true' if len(snapmirror_destinations_list) == 1: uuid = snapmirror_destinations_list[0].get("uuid") self.send_request(f'/snapmirror/relationships/{uuid}', 'delete', query=query) @na_utils.trace def disable_fpolicy_policy(self, policy_name): """Disables a specific policy. :param policy_name: name of the policy to be disabled """ # Get SVM UUID. query = { 'name': self.vserver, 'fields': 'uuid' } res = self.send_request('/svm/svms', 'get', query=query, enable_tunneling=False) if not res.get('records'): msg = _('Vserver %s not found.') % self.vserver raise exception.NetAppException(msg) svm_id = res.get('records')[0]['uuid'] try: self.send_request(f'/protocols/fpolicy/{svm_id}/policies' f'/{policy_name}', 'patch') except netapp_api.api.NaApiError as e: if (e.code in [netapp_api.EREST_POLICY_ALREADY_DISABLED, netapp_api.EREST_FPOLICY_MODIF_POLICY_DISABLED, netapp_api.EREST_ENTRY_NOT_FOUND]): msg = _("FPolicy policy %s not found or already disabled.") LOG.debug(msg, policy_name) else: raise exception.NetAppException(message=e.message) @na_utils.trace def delete_fpolicy_scope(self, policy_name): """Delete fpolicy scope This method is not implemented since the REST API design does not allow for the deletion of the scope only. When deleting a fpolicy policy, the scope will be deleted along with it. """ pass @na_utils.trace def create_snapmirror_vol(self, source_vserver, source_volume, destination_vserver, destination_volume, relationship_type, schedule=None, policy=na_utils.MIRROR_ALL_SNAP_POLICY): """Creates a SnapMirror relationship between volumes.""" self._create_snapmirror(source_vserver, destination_vserver, source_volume=source_volume, destination_volume=destination_volume, schedule=schedule, policy=policy, relationship_type=relationship_type) @na_utils.trace def _create_snapmirror(self, source_vserver, destination_vserver, source_volume=None, destination_volume=None, schedule=None, policy=None, relationship_type=na_utils.DATA_PROTECTION_TYPE, identity_preserve=None, max_transfer_rate=None): """Creates a SnapMirror relationship.""" # NOTE(nahimsouza): Extended Data Protection (XDP) SnapMirror # relationships are the only relationship types that are supported # through the REST API. The arg relationship_type was kept due to # compatibility with ZAPI implementation. # NOTE(nahimsouza): The argument identity_preserve is always None # and it is not available on REST API. It was kept in the signature # due to compatilbity with ZAPI implementation. # TODO(nahimsouza): Tests what happens if volume is None. This happens # when a snapmirror from SVM is created. body = { 'source': { 'path': source_vserver + ':' + source_volume }, 'destination': { 'path': destination_vserver + ':' + destination_volume } } if schedule: body['transfer_schedule.name'] = schedule if policy: body['policy.name'] = policy if max_transfer_rate is not None: body['throttle'] = max_transfer_rate try: self.send_request('/snapmirror/relationships/', 'post', body=body) except netapp_api.api.NaApiError as e: if e.code != netapp_api.EREST_ERELATION_EXISTS: LOG.debug('Failed to create snapmirror. Error: %s. Code: %s', e.message, e.code) raise def _set_snapmirror_state(self, state, source_path, destination_path, source_vserver, source_volume, destination_vserver, destination_volume, wait_result=True, schedule=None): """Change the snapmirror state between two volumes.""" snapmirror = self.get_snapmirrors(source_path=source_path, dest_path=destination_path, source_vserver=source_vserver, source_volume=source_volume, dest_vserver=destination_vserver, dest_volume=destination_volume) if not snapmirror: msg = _('Failed to get information about relationship between ' 'source %(src_vserver)s:%(src_volume)s and ' 'destination %(dst_vserver)s:%(dst_volume)s.') % { 'src_vserver': source_vserver, 'src_volume': source_volume, 'dst_vserver': destination_vserver, 'dst_volume': destination_volume} raise na_utils.NetAppDriverException(msg) uuid = snapmirror[0]['uuid'] body = {} if state: body.update({'state': state}) if schedule: body.update({"transfer_schedule": {'name': schedule}}) result = self.send_request(f'/snapmirror/relationships/{uuid}', 'patch', body=body, wait_on_accepted=wait_result) job = result['job'] job_info = { 'operation-id': None, 'status': None, 'jobid': job.get('uuid'), 'error-code': None, 'error-message': None, 'relationship-uuid': uuid, } return job_info @na_utils.trace def initialize_snapmirror_vol(self, source_vserver, source_volume, dest_vserver, dest_volume, source_snapshot=None, transfer_priority=None): """Initializes a SnapMirror relationship between volumes.""" return self._initialize_snapmirror( source_vserver=source_vserver, dest_vserver=dest_vserver, source_volume=source_volume, dest_volume=dest_volume, source_snapshot=source_snapshot, transfer_priority=transfer_priority) @na_utils.trace def _initialize_snapmirror(self, source_path=None, dest_path=None, source_vserver=None, dest_vserver=None, source_volume=None, dest_volume=None, source_snapshot=None, transfer_priority=None): """Initializes a SnapMirror relationship.""" # NOTE(nahimsouza): The args source_snapshot and transfer_priority are # always None and they are not available on REST API, they were # kept in the signature due to compatilbity with ZAPI implementation. return self._set_snapmirror_state( 'snapmirrored', source_path, dest_path, source_vserver, source_volume, dest_vserver, dest_volume, wait_result=False) @na_utils.trace def modify_snapmirror_vol(self, source_vserver, source_volume, dest_vserver, dest_volume, schedule=None, policy=None, tries=None, max_transfer_rate=None): """Modifies a SnapMirror relationship between volumes.""" return self._modify_snapmirror( source_vserver=source_vserver, dest_vserver=dest_vserver, source_volume=source_volume, dest_volume=dest_volume, schedule=schedule) @na_utils.trace def _modify_snapmirror(self, source_path=None, dest_path=None, source_vserver=None, dest_vserver=None, source_volume=None, dest_volume=None, schedule=None): """Modifies a SnapMirror relationship.""" return self._set_snapmirror_state( None, source_path, dest_path, source_vserver, source_volume, dest_vserver, dest_volume, wait_result=False, schedule=schedule) @na_utils.trace def create_volume_clone(self, volume_name, parent_volume_name, parent_snapshot_name=None, qos_policy_group=None, adaptive_qos_policy_group=None, mount_point_name=None, **options): """Create volume clone in the same aggregate as parent volume.""" body = { 'name': volume_name, 'clone.parent_volume.name': parent_volume_name, 'clone.parent_snapshot.name': parent_snapshot_name, 'nas.path': '/%s' % (mount_point_name or volume_name), 'clone.is_flexclone': 'true', 'svm.name': self.connection.get_vserver(), } self.send_request('/storage/volumes', 'post', body=body) # NOTE(nahimsouza): QoS policy can not be set during the cloning # process, so we need to make a separate request. if qos_policy_group is not None: volume = self._get_volume_by_args(vol_name=volume_name) uuid = volume['uuid'] body = { 'qos.policy.name': qos_policy_group, } self.send_request(f'/storage/volumes/{uuid}', 'patch', body=body) if adaptive_qos_policy_group is not None: self.set_qos_adaptive_policy_group_for_volume( volume_name, adaptive_qos_policy_group) @na_utils.trace def quiesce_snapmirror_vol(self, source_vserver, source_volume, dest_vserver, dest_volume): """Disables future transfers to a SnapMirror destination.""" self._quiesce_snapmirror(source_vserver=source_vserver, dest_vserver=dest_vserver, source_volume=source_volume, dest_volume=dest_volume) @na_utils.trace def _quiesce_snapmirror(self, source_path=None, dest_path=None, source_vserver=None, dest_vserver=None, source_volume=None, dest_volume=None): """Disables future transfers to a SnapMirror destination.""" snapmirror = self.get_snapmirrors( source_path=source_path, dest_path=dest_path, source_vserver=source_vserver, source_volume=source_volume, dest_vserver=dest_vserver, dest_volume=dest_volume) if snapmirror: uuid = snapmirror[0]['uuid'] body = {'state': 'paused'} self.send_request(f'/snapmirror/relationships/{uuid}', 'patch', body=body) @na_utils.trace def break_snapmirror_vol(self, source_vserver, source_volume, dest_vserver, dest_volume): """Breaks a data protection SnapMirror relationship.""" self._break_snapmirror(source_vserver=source_vserver, dest_vserver=dest_vserver, source_volume=source_volume, dest_volume=dest_volume) @na_utils.trace def _break_snapmirror(self, source_path=None, dest_path=None, source_vserver=None, dest_vserver=None, source_volume=None, dest_volume=None): """Breaks a data protection SnapMirror relationship.""" interval = 2 retries = (10 / interval) @utils.retry(netapp_api.NaRetryableError, interval=interval, retries=retries, backoff_rate=1) def _waiter(): snapmirror = self.get_snapmirrors( source_path=source_path, dest_path=dest_path, source_vserver=source_vserver, source_volume=source_volume, dest_vserver=dest_vserver, dest_volume=dest_volume) snapmirror_state = snapmirror[0].get('transferring-state') if snapmirror_state == 'success': uuid = snapmirror[0]['uuid'] body = {'state': 'broken_off'} self.send_request(f'/snapmirror/relationships/{uuid}', 'patch', body=body) return else: message = 'Waiting for transfer state to be SUCCESS.' code = '' raise netapp_api.NaRetryableError(message=message, code=code) try: return _waiter() except netapp_api.NaRetryableError: msg = _("Transfer state did not reach the expected state. Retries " "exhausted. Aborting.") raise na_utils.NetAppDriverException(msg) @na_utils.trace def resume_snapmirror_vol(self, source_vserver, source_volume, dest_vserver, dest_volume): """Resume a SnapMirror relationship if it is quiesced.""" self._resume_snapmirror(source_vserver=source_vserver, dest_vserver=dest_vserver, source_volume=source_volume, dest_volume=dest_volume) @na_utils.trace def resync_snapmirror_vol(self, source_vserver, source_volume, dest_vserver, dest_volume): """Resync a SnapMirror relationship between volumes.""" self._resync_snapmirror(source_vserver=source_vserver, dest_vserver=dest_vserver, source_volume=source_volume, dest_volume=dest_volume) @na_utils.trace def _resume_snapmirror(self, source_path=None, dest_path=None, source_vserver=None, dest_vserver=None, source_volume=None, dest_volume=None): """Resume a SnapMirror relationship if it is quiesced.""" response = self.get_snapmirrors(source_path=source_path, dest_path=dest_path, source_vserver=source_vserver, dest_vserver=dest_vserver, source_volume=source_volume, dest_volume=dest_volume) if not response: # NOTE(nahimsouza): As ZAPI returns this error code, it was kept # to avoid changes in the layer above. raise netapp_api.api.NaApiError( code=netapp_api.api.EOBJECTNOTFOUND) snapmirror_uuid = response[0]['uuid'] snapmirror_policy = response[0]['policy-type'] body_resync = {} if snapmirror_policy == 'async': body_resync['state'] = 'snapmirrored' elif snapmirror_policy == 'sync': body_resync['state'] = 'in_sync' self.send_request('/snapmirror/relationships/' + snapmirror_uuid, 'patch', body=body_resync, wait_on_accepted=False) @na_utils.trace def _resync_snapmirror(self, source_path=None, dest_path=None, source_vserver=None, dest_vserver=None, source_volume=None, dest_volume=None): """Resync a SnapMirror relationship.""" # We reuse the resume operation for resync since both are handled in # the same way in the REST API, by setting the snapmirror relationship # to the snapmirrored state. self._resume_snapmirror(source_path, dest_path, source_vserver, dest_vserver, source_volume, dest_volume) @na_utils.trace def add_nfs_export_rule(self, policy_name, client_match, readonly, auth_methods): """Add rule to NFS export policy.""" rule_indices = self._get_nfs_export_rule_indices(policy_name, client_match) if not rule_indices: self._add_nfs_export_rule(policy_name, client_match, readonly, auth_methods) else: # Update first rule and delete the rest self._update_nfs_export_rule( policy_name, client_match, readonly, rule_indices.pop(0), auth_methods) self._remove_nfs_export_rules(policy_name, rule_indices) @na_utils.trace def set_qos_policy_group_for_volume(self, volume_name, qos_policy_group_name): """Set QoS policy group for volume.""" volume = self._get_volume_by_args(vol_name=volume_name) uuid = volume['uuid'] body = { 'qos.policy.name': qos_policy_group_name } self.send_request(f'/storage/volumes/{uuid}', 'patch', body=body) @na_utils.trace def update_snapmirror_vol(self, source_vserver, source_volume, dest_vserver, dest_volume): """Schedules a snapmirror update between volumes.""" self._update_snapmirror(source_vserver=source_vserver, dest_vserver=dest_vserver, source_volume=source_volume, dest_volume=dest_volume) @na_utils.trace def _update_snapmirror(self, source_path=None, dest_path=None, source_vserver=None, dest_vserver=None, source_volume=None, dest_volume=None): """Update a snapmirror relationship asynchronously.""" snapmirrors = self.get_snapmirrors(source_path=source_path, dest_path=dest_path, source_vserver=source_vserver, dest_vserver=dest_vserver, source_volume=source_volume, dest_volume=dest_volume) if not snapmirrors: msg = _('Failed to get snapmirror relationship information') raise na_utils.NetAppDriverException(msg) snapmirror_uuid = snapmirrors[0]['uuid'] # NOTE(nahimsouza): A POST with an empty body starts the update # snapmirror operation. try: self.send_request('/snapmirror/relationships/' + snapmirror_uuid + '/transfers/', 'post', wait_on_accepted=False) except netapp_api.api.NaApiError as e: transfer_in_progress = 'Another transfer is in progress' if (e.code == netapp_api.EREST_SNAPMIRROR_NOT_INITIALIZED and transfer_in_progress in e.message): # NOTE (nahimsouza): Raise this message to keep compatibility # with ZAPI and avoid change the driver layer. raise netapp_api.api.NaApiError(message='not initialized', code=netapp_api.api.EAPIERROR) if not (e.code == netapp_api.EREST_UPDATE_SNAPMIRROR_FAILED and transfer_in_progress in e.message): raise @na_utils.trace def get_cluster_name(self): """Gets cluster name.""" result = self.send_request('/cluster', 'get', enable_tunneling=False) return result.get('name') @na_utils.trace def check_volume_clone_split_completed(self, volume_name): """Check if volume clone split operation already finished.""" volume = self._get_volume_by_args(vol_name=volume_name, fields='clone.is_flexclone') return volume['clone']['is_flexclone'] is False @na_utils.trace def rehost_volume(self, volume_name, vserver, destination_vserver): """Rehosts a volume from one Vserver into another Vserver. :param volume_name: Name of the FlexVol to be rehosted. :param vserver: Source Vserver name to which target volume belongs. :param destination_vserver: Destination Vserver name where target volume must reside after successful volume rehost operation. """ # TODO(raffaelacunha): As soon NetApp REST API supports "volume_rehost" # the current endpoint (using CLI passthrough) must be replaced. body = { "vserver": vserver, "volume": volume_name, "destination_vserver": destination_vserver } self.send_request('/private/cli/volume/rehost', 'post', body=body) @na_utils.trace def get_net_options(self): """Retrives the IPv6 support.""" return { 'ipv6-enabled': True, } @na_utils.trace def set_qos_adaptive_policy_group_for_volume(self, volume_name, qos_policy_group_name): """Set QoS adaptive policy group for volume.""" # NOTE(renanpiranguinho): For REST API, adaptive QoS is set the same # way as normal QoS. self.set_qos_policy_group_for_volume(volume_name, qos_policy_group_name) def get_performance_counter_info(self, object_name, counter_name): """Gets info about one or more Data ONTAP performance counters.""" # NOTE(nahimsouza): This conversion is nedeed because different names # are used in ZAPI and we want to avoid changes in the driver for now. rest_counter_names = { 'domain_busy': 'domain_busy_percent', 'processor_elapsed_time': 'elapsed_time', 'avg_processor_busy': 'average_processor_busy_percent', } rest_counter_name = counter_name if counter_name in rest_counter_names: rest_counter_name = rest_counter_names[counter_name] # Get counter table info query = { 'counter_schemas.name': rest_counter_name, 'fields': 'counter_schemas.*' } try: table = self.send_request( f'/cluster/counter/tables/{object_name}', 'get', query=query) name = counter_name # use the original name (ZAPI compatible) base_counter = table['counter_schemas'][0]['denominator']['name'] query = { 'counters.name': rest_counter_name, 'fields': 'counters.*' } response = self.send_request( f'/cluster/counter/tables/{object_name}/rows', 'get', query=query, enable_tunneling=False) table_rows = response.get('records', []) labels = [] if len(table_rows) != 0: labels = table_rows[0]['counters'][0].get('labels', []) # NOTE(nahimsouza): Values have a different format on REST API # and we want to keep compatibility with ZAPI for a while if object_name == 'wafl' and counter_name == 'cp_phase_times': # discard the prefix 'cp_' labels = [label[3:] for label in labels] return { 'name': name, 'labels': labels, 'base-counter': base_counter, } except netapp_api.api.NaApiError: raise exception.NotFound(_('Counter %s not found') % counter_name) def get_performance_instance_uuids(self, object_name, node_name): """Get UUIDs of performance instances for a cluster node.""" query = { 'id': node_name + ':*', } response = self.send_request( f'/cluster/counter/tables/{object_name}/rows', 'get', query=query, enable_tunneling=False) records = response.get('records', []) uuids = [] for record in records: uuids.append(record['id']) return uuids def get_performance_counters(self, object_name, instance_uuids, counter_names): """Gets more cDOT performance counters.""" # NOTE(nahimsouza): This conversion is nedeed because different names # are used in ZAPI and we want to avoid changes in the driver for now. rest_counter_names = { 'domain_busy': 'domain_busy_percent', 'processor_elapsed_time': 'elapsed_time', 'avg_processor_busy': 'average_processor_busy_percent', } zapi_counter_names = { 'domain_busy_percent': 'domain_busy', 'elapsed_time': 'processor_elapsed_time', 'average_processor_busy_percent': 'avg_processor_busy', } for i in range(len(counter_names)): if counter_names[i] in rest_counter_names: counter_names[i] = rest_counter_names[counter_names[i]] query = { 'id': '|'.join(instance_uuids), 'counters.name': '|'.join(counter_names), 'fields': 'id,counter_table.name,counters.*', } response = self.send_request( f'/cluster/counter/tables/{object_name}/rows', 'get', query=query) counter_data = [] for record in response.get('records', []): for counter in record['counters']: counter_name = counter['name'] # Reverts the name conversion if counter_name in zapi_counter_names: counter_name = zapi_counter_names[counter_name] counter_value = '' if counter.get('value'): counter_value = counter.get('value') elif counter.get('values'): # NOTE(nahimsouza): Conversion made to keep compatibility # with old ZAPI format values = counter.get('values') counter_value = ','.join([str(v) for v in values]) counter_data.append({ 'instance-name': record['counter_table']['name'], 'instance-uuid': record['id'], 'node-name': record['id'].split(':')[0], 'timestamp': int(time.time()), counter_name: counter_value, }) return counter_data @na_utils.trace def _list_vservers(self): """Get the names of vservers present""" query = { 'fields': 'name', } response = self.send_request('/svm/svms', 'get', query=query, enable_tunneling=False) return [svm['name'] for svm in response.get('records', [])] @na_utils.trace def _get_ems_log_destination_vserver(self): """Returns the best vserver destination for EMS messages.""" # NOTE(nahimsouza): Differently from ZAPI, only 'data' SVMs can be # managed by the SVM REST APIs - that's why the vserver type is not # specified. vservers = self._list_vservers() if vservers: return vservers[0] raise exception.NotFound("No Vserver found to receive EMS messages.") @na_utils.trace def send_ems_log_message(self, message_dict): """Sends a message to the Data ONTAP EMS log.""" body = { 'computer_name': message_dict['computer-name'], 'event_source': message_dict['event-source'], 'app_version': message_dict['app-version'], 'category': message_dict['category'], 'severity': 'notice', 'autosupport_required': message_dict['auto-support'] == 'true', 'event_id': message_dict['event-id'], 'event_description': message_dict['event-description'], } bkp_connection = copy.copy(self.connection) bkp_timeout = self.connection.get_timeout() bkp_vserver = self.vserver self.connection.set_timeout(25) try: # TODO(nahimsouza): Vserver is being set to replicate the ZAPI # behavior, but need to check if this could be removed in REST API self.connection.set_vserver( self._get_ems_log_destination_vserver()) self.send_request('/support/ems/application-logs', 'post', body=body) LOG.debug('EMS executed successfully.') except netapp_api.api.NaApiError as e: LOG.warning('Failed to invoke EMS. %s', e) finally: # Restores the data timeout = ( bkp_timeout if bkp_timeout is not None else DEFAULT_TIMEOUT) self.connection = copy.copy(bkp_connection) self.connection.set_timeout(timeout) self.connection.set_vserver(bkp_vserver) @na_utils.trace def _get_deleted_nfs_export_policies(self): """Get soft deleted NFS export policies.""" query = { 'name': DELETED_PREFIX + '*', 'fields': 'name,svm.name', } response = self.send_request('/protocols/nfs/export-policies', 'get', query=query) policy_map = {} for record in response['records']: vserver = record['svm']['name'] policies = policy_map.get(vserver, []) policies.append(record['name']) policy_map[vserver] = policies return policy_map @na_utils.trace def prune_deleted_nfs_export_policies(self): """Delete export policies that were marked for deletion.""" deleted_policy_map = self._get_deleted_nfs_export_policies() for vserver in deleted_policy_map: client = copy.copy(self) client.connection = copy.copy(self.connection) client.connection.set_vserver(vserver) for policy in deleted_policy_map[vserver]: try: client.delete_nfs_export_policy(policy) except netapp_api.api.NaApiError: LOG.debug('Could not delete export policy %s.', policy) @na_utils.trace def get_nfs_config_default(self, desired_args=None): """Gets the default NFS config with the desired params""" query = {'fields': 'transport.*'} if self.vserver: query['svm.name'] = self.vserver response = self.send_request('/protocols/nfs/services/', 'get', query=query) # NOTE(nahimsouza): Default values to replicate ZAPI behavior when # response is empty. Also, REST API does not have an equivalent to # 'udp-max-xfer-size', so the default is always returned. nfs_info = { 'tcp-max-xfer-size': str(DEFAULT_TCP_MAX_XFER_SIZE), 'udp-max-xfer-size': str(DEFAULT_UDP_MAX_XFER_SIZE), } records = response.get('records', []) if records: nfs_info['tcp-max-xfer-size'] = ( str(records[0]['transport']['tcp_max_transfer_size'])) return nfs_info @na_utils.trace def create_kerberos_realm(self, security_service): """Creates Kerberos realm on cluster.""" body = { 'comment': '', 'kdc.ip': security_service['server'], 'kdc.port': '88', 'kdc.vendor': 'other', 'name': security_service['domain'].upper(), } try: self.send_request('/protocols/nfs/kerberos/realms', 'post', body=body) except netapp_api.api.NaApiError as e: if e.code == netapp_api.EREST_DUPLICATE_ENTRY: LOG.debug('Kerberos realm config already exists.') else: msg = _('Failed to create Kerberos realm. %s') raise exception.NetAppException(msg % e.message) @na_utils.trace def configure_kerberos(self, security_service, vserver_name): """Configures Kerberos for NFS on Vserver.""" self.configure_dns(security_service, vserver_name=vserver_name) spn = self._get_kerberos_service_principal_name( security_service, vserver_name) lifs = self.get_network_interfaces() if not lifs: msg = _("Cannot set up Kerberos. There are no LIFs configured.") raise exception.NetAppException(msg) for lif in lifs: body = { 'password': security_service['password'], 'user': security_service['user'], 'interface.name': lif['interface-name'], 'enabled': True, 'spn': spn } interface_uuid = lif['uuid'] self.send_request( f'/protocols/nfs/kerberos/interfaces/{interface_uuid}', 'patch', body=body) @na_utils.trace def _get_kerberos_service_principal_name(self, security_service, vserver_name): """Build Kerberos service principal name.""" return ('nfs/' + vserver_name.replace('_', '-') + '.' + security_service['domain'] + '@' + security_service['domain'].upper()) @na_utils.trace def _get_cifs_server_name(self, vserver_name): """Build CIFS server name.""" # 'cifs-server' is CIFS Server NetBIOS Name, max length is 15. # Should be unique within each domain (data['domain']). # Cut to 15 char with begin and end, attempt to make valid DNS hostname cifs_server = (vserver_name[0:8] + '-' + vserver_name[-6:]).replace('_', '-').upper() return cifs_server @na_utils.trace def configure_ldap(self, security_service, timeout=30, vserver_name=None): """Configures LDAP on Vserver.""" self._create_ldap_client(security_service, vserver_name=vserver_name) @na_utils.trace def configure_active_directory(self, security_service, vserver_name, aes_encryption): """Configures AD on Vserver.""" self.configure_dns(security_service, vserver_name=vserver_name) self.configure_cifs_aes_encryption(vserver_name, aes_encryption) self.set_preferred_dc(security_service, vserver_name) cifs_server = self._get_cifs_server_name(vserver_name) body = { 'ad_domain.user': security_service['user'], 'ad_domain.password': security_service['password'], 'force': 'true', 'name': cifs_server, 'ad_domain.fqdn': security_service['domain'], } if security_service['ou'] is not None: body['ad_domain.organizational_unit'] = security_service['ou'] try: LOG.debug("Trying to setup CIFS server with data: %s", body) self.send_request('/protocols/cifs/services', 'post', body=body) except netapp_api.api.NaApiError as e: credential_msg = "could not authenticate" privilege_msg = "insufficient access" if (e.code == netapp_api.api.EAPIERROR and ( credential_msg in e.message.lower() or privilege_msg in e.message.lower())): auth_msg = _("Failed to create CIFS server entry. " "Please double check your user credentials " "or privileges. %s") raise exception.SecurityServiceFailedAuth(auth_msg % e.message) msg = _("Failed to create CIFS server entry. %s") raise exception.NetAppException(msg % e.message) @na_utils.trace def _get_unique_svm_by_name(self, vserver_name=None): """Get the specified SVM UUID.""" query = { 'name': vserver_name if vserver_name else self.vserver, 'fields': 'uuid' } response = self.send_request('/svm/svms', 'get', query=query) if not response.get('records'): msg = ('Vserver %s not found.') % self.vserver raise exception.NetAppException(msg) svm_uuid = response['records'][0]['uuid'] return svm_uuid @na_utils.trace def get_dns_config(self, vserver_name=None): """Read DNS servers and domains currently configured in the vserver·""" svm_uuid = self._get_unique_svm_by_name(vserver_name) try: result = self.send_request(f'/name-services/dns/{svm_uuid}', 'get') except netapp_api.api.NaApiError as e: if e.code == netapp_api.EREST_ENTRY_NOT_FOUND: return {} msg = ("Failed to retrieve DNS configuration. %s") raise exception.NetAppException(msg % e.message) dns_config = {} dns_info = result.get('dynamic_dns', {}) dns_config['dns-state'] = dns_info.get('enabled', '') dns_config['domains'] = result.get('domains', []) dns_config['dns-ips'] = result.get('servers', []) return dns_config @na_utils.trace def configure_dns(self, security_service, vserver_name=None): """Configure DNS address and servers for a vserver.""" body = { 'domains': [], 'servers': [] } # NOTE(dviroel): Read the current dns configuration and merge with the # new one. This scenario is expected when 2 security services provide # a DNS configuration, like 'active_directory' and 'ldap'. current_dns_config = self.get_dns_config(vserver_name=vserver_name) domains = set(current_dns_config.get('domains', [])) dns_ips = set(current_dns_config.get('dns-ips', [])) svm_uuid = self._get_unique_svm_by_name(vserver_name) domains.add(security_service['domain']) for domain in domains: body['domains'].append(domain) for dns_ip in security_service['dns_ip'].split(','): dns_ips.add(dns_ip.strip()) body['servers'] = [] for dns_ip in sorted(dns_ips): body['servers'].append(dns_ip) try: if current_dns_config: self.send_request(f'/name-services/dns/{svm_uuid}', 'patch', body=body) else: self.send_request('/name-services/dns', 'post', body=body) except netapp_api.api.NaApiError as e: msg = _("Failed to configure DNS. %s") raise exception.NetAppException(msg % e.message) @na_utils.trace def setup_security_services(self, security_services, vserver_client, vserver_name, aes_encryption, timeout=30): """Setup SVM security services.""" body = { 'nsswitch.namemap': ['ldap', 'files'], 'nsswitch.group': ['ldap', 'files'], 'nsswitch.netgroup': ['ldap', 'files'], 'nsswitch.passwd': ['ldap', 'files'], } svm_uuid = self._get_unique_svm_by_name(vserver_name) self.send_request(f'/svm/svms/{svm_uuid}', 'patch', body=body) for security_service in security_services: if security_service['type'].lower() == 'ldap': vserver_client.configure_ldap(security_service, timeout=timeout, vserver_name=vserver_name) elif security_service['type'].lower() == 'active_directory': vserver_client.configure_active_directory(security_service, vserver_name, aes_encryption) vserver_client.configure_cifs_options(security_service) elif security_service['type'].lower() == 'kerberos': vserver_client.create_kerberos_realm(security_service) vserver_client.configure_kerberos(security_service, vserver_name) else: msg = _('Unsupported security service type %s for ' 'Data ONTAP driver') raise exception.NetAppException(msg % security_service['type']) @na_utils.trace def _create_ldap_client(self, security_service, vserver_name=None): ad_domain = security_service.get('domain') ldap_servers = security_service.get('server') bind_dn = security_service.get('user') ldap_schema = 'RFC-2307' if ad_domain: if ldap_servers: msg = _("LDAP client cannot be configured with both 'server' " "and 'domain' parameters. Use 'server' for Linux/Unix " "LDAP servers or 'domain' for Active Directory LDAP " "servers.") LOG.exception(msg) raise exception.NetAppException(msg) # RFC2307bis, for MS Active Directory LDAP server ldap_schema = 'MS-AD-BIS' bind_dn = (security_service.get('user') + '@' + ad_domain) else: if not ldap_servers: msg = _("LDAP client cannot be configured without 'server' " "or 'domain' parameters. Use 'server' for Linux/Unix " "LDAP servers or 'domain' for Active Directory LDAP " "server.") LOG.exception(msg) raise exception.NetAppException(msg) if security_service.get('dns_ip'): self.configure_dns(security_service) body = { 'port': '389', 'schema': ldap_schema, 'bind_dn': bind_dn, 'bind_password': security_service.get('password'), 'svm.name': vserver_name } if security_service.get('ou'): body['base_dn'] = security_service['ou'] if ad_domain: # Active Directory LDAP server body['ad_domain'] = ad_domain else: body['servers'] = [] for server in ldap_servers.split(','): body['servers'].append(server.strip()) self.send_request('/name-services/ldap', 'post', body=body) @na_utils.trace def modify_ldap(self, new_security_service, current_security_service): """Modifies LDAP client on a Vserver.""" ad_domain = new_security_service.get('domain') ldap_servers = new_security_service.get('server') bind_dn = new_security_service.get('user') ldap_schema = 'RFC-2307' svm_uuid = self._get_unique_svm_by_name(self.vserver) if ad_domain: if ldap_servers: msg = _("LDAP client cannot be configured with both 'server' " "and 'domain' parameters. Use 'server' for Linux/Unix " "LDAP servers or 'domain' for Active Directory LDAP " "servers.") LOG.exception(msg) raise exception.NetAppException(msg) # RFC2307bis, for MS Active Directory LDAP server ldap_schema = 'MS-AD-BIS' bind_dn = (new_security_service.get('user') + '@' + ad_domain) else: if not ldap_servers: msg = _("LDAP client cannot be configured without 'server' " "or 'domain' parameters. Use 'server' for Linux/Unix " "LDAP servers or 'domain' for Active Directory LDAP " "server.") LOG.exception(msg) raise exception.NetAppException(msg) body = { 'port': '389', 'schema': ldap_schema, 'bind_dn': bind_dn, 'bind_password': new_security_service.get('password') } if new_security_service.get('ou'): body['base_dn'] = new_security_service['ou'] if ad_domain: # Active Directory LDAP server body['ad_domain'] = ad_domain else: body['servers'] = [] for server in ldap_servers.split(','): body['servers'].append(server.strip()) self.send_request(f'/name-services/ldap/{svm_uuid}', 'patch', body=body) @na_utils.trace def update_kerberos_realm(self, security_service): """Update Kerberos realm info. Only KDC IP can be changed.""" realm_name = security_service['domain'] svm_uuid = self._get_unique_svm_by_name(self.vserver) body = { 'kdc-ip': security_service['server'], } try: self.send_request( f'/protocols/nfs/kerberos/realms/{svm_uuid}/{realm_name}', 'patch', body=body) except netapp_api.api.NaApiError as e: msg = _('Failed to update Kerberos realm. %s') raise exception.NetAppException(msg % e.message) @na_utils.trace def update_dns_configuration(self, dns_ips, domains): """Overrides DNS configuration with the specified IPs and domains.""" current_dns_config = self.get_dns_config(vserver_name=self.vserver) body = { 'domains': [], 'servers': [], } for domain in domains: body['domains'].append(domain) for dns_ip in dns_ips: body['servers'].append(dns_ip) empty_dns_config = (not body['domains'] and not body['servers']) svm_uuid = self._get_unique_svm_by_name(self.vserver) if current_dns_config: endpoint, operation, body = ( (f'/name-services/dns/{svm_uuid}', 'delete', {}) if empty_dns_config else (f'/name-services/dns/{svm_uuid}', 'patch', body)) else: endpoint, operation, body = '/name-services/dns', 'post', body try: self.send_request(endpoint, operation, body) except netapp_api.api.NaApiError as e: msg = ("Failed to update DNS configuration. %s") raise exception.NetAppException(msg % e.message) @na_utils.trace def remove_preferred_dcs(self, security_service, svm_uuid): """Drops all preferred DCs at once.""" query = { 'fqdn': security_service['domain'], } records = self.send_request(f'/protocols/cifs/domains/{svm_uuid}/' f'preferred-domain-controllers/', 'get') fqdn = records.get('fqdn') server_ip = records.get('server_ip') try: self.send_request( f'/protocols/cifs/domains/{svm_uuid}/' f'preferred-domain-controllers/{fqdn}/{server_ip}', 'delete', query=query) except netapp_api.api.NaApiError as e: msg = _("Failed to unset preferred DCs. %s") raise exception.NetAppException(msg % e.message) @na_utils.trace def modify_active_directory_security_service( self, vserver_name, differring_keys, new_security_service, current_security_service): """Modify Active Directory security service.""" svm_uuid = self._get_unique_svm_by_name(vserver_name) new_username = new_security_service['user'] records = self.send_request( f'/protocols/cifs/local-users/{svm_uuid}', 'get') sid = records.get('sid') if 'password' in differring_keys: query = { 'password': new_security_service['password'] } try: self.send_request( f'/protocols/cifs/local-users/{svm_uuid}/{sid}', 'patch', query=query ) except netapp_api.api.NaApiError as e: msg = _("Failed to modify existing CIFS server password. %s") raise exception.NetAppException(msg % e.message) if 'user' in differring_keys: query = { 'name': new_username } try: self.send_request( f'/protocols/cifs/local-users/{svm_uuid}/{sid}', 'patch', query=query ) except netapp_api.api.NaApiError as e: msg = _("Failed to modify existing CIFS server user-name. %s") raise exception.NetAppException(msg % e.message) if 'server' in differring_keys: if current_security_service['server'] is not None: self.remove_preferred_dcs(current_security_service, svm_uuid) if new_security_service['server'] is not None: self.set_preferred_dc(new_security_service, svm_uuid) @na_utils.trace def configure_cifs_aes_encryption(self, vserver_name, aes_encryption): try: svm_uuid = self._get_unique_svm_by_name(vserver_name) body = { 'security.advertised_kdc_encryptions': ( ['aes-128', 'aes-256'] if aes_encryption else ['des', 'rc4']), } self.send_request( f'/protocols/cifs/services/{svm_uuid}', 'patch', body=body) except netapp_api.api.NaApiError as e: msg = _("Failed to set aes encryption. %s") raise exception.NetAppException(msg % e.message) @na_utils.trace def set_preferred_dc(self, security_service, vserver_name): """Set preferred domain controller.""" # server is optional if not security_service['server']: return query = { 'server_ip': [], 'fqdn': security_service['domain'], 'skip_config_validation': 'false', } for dc_ip in security_service['server'].split(','): query['server_ip'].append(dc_ip.strip()) svm_uuid = self._get_unique_svm_by_name(vserver_name) try: self.send_request( f'/protocols/cifs/domains/{svm_uuid}' '/preferred-domain-controllers', 'post', query=query) except netapp_api.api.NaApiError as e: msg = _("Failed to set preferred DC. %s") raise exception.NetAppException(msg % e.message) @na_utils.trace def create_vserver_peer(self, vserver_name, peer_vserver_name, peer_cluster_name=None): """Creates a Vserver peer relationship for SnapMirrors.""" body = { 'svm.name': vserver_name, 'peer.svm.name': peer_vserver_name, 'applications': ['snapmirror'] } if peer_cluster_name: body['peer.cluster.name'] = peer_cluster_name self.send_request('/svm/peers', 'post', body=body, enable_tunneling=False) @na_utils.trace def _get_svm_peer_uuid(self, vserver_name, peer_vserver_name): """Get UUID of SVM peer.""" query = { 'svm.name': vserver_name, 'peer.svm.name': peer_vserver_name, 'fields': 'uuid' } res = self.send_request('/svm/peers', 'get', query=query) if not res.get('records'): msg = ('Vserver peer not found.') raise exception.NetAppException(msg) peer_uuid = res.get('records')[0]['uuid'] return peer_uuid @na_utils.trace def accept_vserver_peer(self, vserver_name, peer_vserver_name): """Accepts a pending Vserver peer relationship.""" uuid = self._get_svm_peer_uuid(vserver_name, peer_vserver_name) body = {'state': 'peered'} self.send_request(f'/svm/peers/{uuid}', 'patch', body=body, enable_tunneling=False) @na_utils.trace def get_vserver_peers(self, vserver_name=None, peer_vserver_name=None): """Gets one or more Vserver peer relationships.""" query = {} if peer_vserver_name: query['name'] = peer_vserver_name if vserver_name: query['svm.name'] = vserver_name query['fields'] = 'uuid,svm.name,peer.svm.name,state,peer.cluster.name' result = self.send_request('/svm/peers', 'get', query=query) if not self._has_records(result): return [] vserver_peers = [] for vserver_peer in result['records']: vserver_peer_info = { 'uuid': vserver_peer['uuid'], 'vserver': vserver_peer['svm']['name'], 'peer-vserver': vserver_peer['peer']['svm']['name'], 'peer-state': vserver_peer['state'], 'peer-cluster': vserver_peer['peer']['cluster']['name'], } vserver_peers.append(vserver_peer_info) return vserver_peers @na_utils.trace def delete_vserver_peer(self, vserver_name, peer_vserver_name): """Deletes a Vserver peer relationship.""" vserver_peer = self.get_vserver_peers(vserver_name, peer_vserver_name) uuid = vserver_peer[0].get('uuid') self.send_request(f'/svm/peers/{uuid}', 'delete', enable_tunneling=False) @na_utils.trace def create_vserver(self, vserver_name, root_volume_aggregate_name, root_volume_name, aggregate_names, ipspace_name, security_cert_expire_days, delete_retention_hours, logical_space_reporting): """Creates new vserver and assigns aggregates.""" # NOTE(nahimsouza): root_volume_aggregate_name and root_volume_name # were kept due to compatibility issues, but they are not used in # the vserver creation by REST API self._create_vserver( vserver_name, aggregate_names, ipspace_name, delete_retention_hours, name_server_switch=['files'], logical_space_reporting=logical_space_reporting) self._modify_security_cert(vserver_name, security_cert_expire_days) @na_utils.trace def create_vserver_dp_destination(self, vserver_name, aggregate_names, ipspace_name, delete_retention_hours): """Creates new 'dp_destination' vserver and assigns aggregates.""" self._create_vserver( vserver_name, aggregate_names, ipspace_name, delete_retention_hours, subtype='dp_destination') @na_utils.trace def _create_vserver(self, vserver_name, aggregate_names, ipspace_name, delete_retention_hours, name_server_switch=None, subtype=None, logical_space_reporting=False): """Creates new vserver and assigns aggregates.""" body = { 'name': vserver_name, } if name_server_switch: body['nsswitch.namemap'] = name_server_switch if subtype: body['subtype'] = subtype if ipspace_name: body['ipspace.name'] = ipspace_name body['aggregates'] = [] for aggr_name in aggregate_names: body['aggregates'].append({'name': aggr_name}) body['is_space_reporting_logical'] = ( 'true' if logical_space_reporting else 'false') body['is_space_enforcement_logical'] = ( 'true' if logical_space_reporting else 'false') self.send_request('/svm/svms', 'post', body=body) if delete_retention_hours != 0: try: svm_uuid = self._get_unique_svm_by_name(vserver_name) body = { 'retention_period': delete_retention_hours } self.send_request(f'/svm/svms/{svm_uuid}', 'patch', body=body) except netapp_api.api.NaApiError: LOG.warning('Failed to modify retention period for vserver ' '%(server)s.', {'server': vserver_name}) @na_utils.trace def create_barbican_kms_config_for_specified_vserver(self, vserver_name, config_name, key_id, keystone_url, app_cred_id, app_cred_secret): """Creates a Barbican KMS configuration for the specified vserver.""" body = { 'svm.name': vserver_name, 'configuration.name': config_name, 'key_id': key_id, 'keystone_url': keystone_url, 'application_cred_id': app_cred_id, 'application_cred_secret': app_cred_secret, } self.send_request('/security/barbican-kms', 'post', body=body) @na_utils.trace def get_key_store_config_uuid(self, config_name): """Retrieves keystore configuration uuid for the specified config name. """ query = { 'configuration.name': config_name } response = self.send_request('/security/key-stores', 'get', query=query) if not response.get('records'): return None return response.get('records')[0]['configuration']['uuid'] @na_utils.trace def enable_key_store_config(self, config_uuid): """Enables a keystore configuration""" body = { "enabled": True } # Update key-store self.send_request(f'/security/key-stores/{config_uuid}', 'patch', body=body) @na_utils.trace def _modify_security_cert(self, vserver_name, security_cert_expire_days): """Create new security certificate with given expire days.""" # Do not modify security certificate if specified expire days are # equal to default security certificate expire days i.e. 365. if security_cert_expire_days == DEFAULT_SECURITY_CERT_EXPIRE_DAYS: return query = { 'common-name': vserver_name, 'ca': vserver_name, 'type': 'server', 'svm.name': vserver_name, } result = self.send_request('/security/certificates', 'get', query=query) old_certificate_info_list = result.get('records', []) if not old_certificate_info_list: LOG.warning("Unable to retrieve certificate-info for vserver " "%(server)s'. Cannot set the certificate expiry to " "%s(conf)s. ", {'server': vserver_name, 'conf': security_cert_expire_days}) return body = { 'common-name': vserver_name, 'type': 'server', 'svm.name': vserver_name, 'expiry_time': f'P{security_cert_expire_days}DT', } query = { 'return_records': 'true' } result = self.send_request('/security/certificates', 'post', body=body, query=query) new_certificate_info_list = result.get('records', []) if not new_certificate_info_list: LOG.warning('Failed to create new security certificate for ' 'vserver %(server)s.', {'server': vserver_name}) return for certificate_info in new_certificate_info_list: cert_uuid = certificate_info.get('uuid', None) svm = certificate_info.get('svm', []) svm_uuid = svm.get('uuid', None) if not svm_uuid or not cert_uuid: continue try: body = { 'certificate': { 'uuid': cert_uuid, }, 'client_enabled': 'false', } self.send_request(f'/svm/svms/{svm_uuid}', 'patch', body=body) except netapp_api.api.NaApiError: LOG.debug('Failed to modify SSL for vserver ' '%(server)s.', {'server': vserver_name}) # Delete all old security certificates for certificate_info in old_certificate_info_list: uuid = certificate_info.get('uuid', None) try: self.send_request(f'/security/certificates/{uuid}', 'delete') except netapp_api.api.NaApiError: LOG.error("Failed to delete security certificate for vserver " "%s.", vserver_name) @na_utils.trace def list_node_data_ports(self, node): """List data ports from node.""" ports = self.get_node_data_ports(node) return [port.get('port') for port in ports] @na_utils.trace def _sort_data_ports_by_speed(self, ports): """Sort ports by speed.""" def sort_key(port): value = port.get('speed') if not (value and isinstance(value, str)): return 0 elif value.isdigit(): return int(value) elif value == 'auto': return 3 elif value == 'undef': return 2 else: return 1 return sorted(ports, key=sort_key, reverse=True) @na_utils.trace def get_node_data_ports(self, node): """Get applicable data ports on the node.""" query = { 'node.name': node, 'state': 'up', 'type': 'physical', 'broadcast_domain.name': 'Default', 'fields': 'node.name,speed,name' } result = self.send_request('/network/ethernet/ports', 'get', query=query) net_port_info_list = result.get('records', []) ports = [] if net_port_info_list: # NOTE(pulluri): This query selects the ports that are # being exclusively used for data management query_interfaces = { 'service_policy.name': '!default-management', 'services': 'data_*', 'fields': 'location.port.name' } response = self.send_request('/network/ip/interfaces', 'get', query=query_interfaces, enable_tunneling=False) data_ports = set( [record['location']['port']['name'] for record in response.get('records', [])] ) for port_info in net_port_info_list: if port_info['name'] in data_ports: port = { 'node': port_info['node']['name'], 'port': port_info['name'], 'speed': port_info['speed'], } ports.append(port) ports = self._sort_data_ports_by_speed(ports) return ports @na_utils.trace def get_ipspace_name_for_vlan_port(self, vlan_node, vlan_port, vlan_id): """Gets IPSpace name for specified VLAN""" port = vlan_port if not vlan_id else '%(port)s-%(id)s' % { 'port': vlan_port, 'id': vlan_id, } query = { 'name': port, 'node.name': vlan_node, 'fields': 'broadcast_domain.ipspace.name' } result = self.send_request('/network/ethernet/ports/', 'get', query=query) records = result.get('records', []) if not records: return None ipspace_name = records[0]['broadcast_domain']['ipspace']['name'] return ipspace_name @na_utils.trace def create_ipspace(self, ipspace_name): """Creates an IPspace.""" body = {'name': ipspace_name} self.send_request('/network/ipspaces', 'post', body=body) @na_utils.trace def create_port_and_broadcast_domain(self, node, port, vlan, mtu, ipspace): """Create port and broadcast domain, if they don't exist.""" home_port_name = port if vlan: self._create_vlan(node, port, vlan) home_port_name = '%(port)s-%(tag)s' % {'port': port, 'tag': vlan} self._ensure_broadcast_domain_for_port( node, home_port_name, mtu, ipspace=ipspace) return home_port_name @na_utils.trace def _create_vlan(self, node, port, vlan): """Create VLAN port if it does not exist.""" try: body = { 'vlan.base_port.name': port, 'node.name': node, 'vlan.tag': vlan, 'type': 'vlan' } self.send_request('/network/ethernet/ports', 'post', body=body) except netapp_api.api.NaApiError as e: if e.code == netapp_api.EREST_DUPLICATE_ENTRY: LOG.debug('VLAN %(vlan)s already exists on port %(port)s', {'vlan': vlan, 'port': port}) else: msg = _('Failed to create VLAN %(vlan)s on ' 'port %(port)s. %(err_msg)s') msg_args = {'vlan': vlan, 'port': port, 'err_msg': e.message} raise exception.NetAppException(msg % msg_args) @na_utils.trace def _ensure_broadcast_domain_for_port(self, node, port, mtu, ipspace=DEFAULT_IPSPACE): """Ensure a port is in a broadcast domain. Create one if necessary. If the IPspace:domain pair match for the given port, which commonly happens in multi-node clusters, then there isn't anything to do. Otherwise, we can assume the IPspace is correct and extant by this point, so the remaining task is to remove the port from any domain it is already in, create the domain for the IPspace if it doesn't exist, and add the port to this domain. """ # Derive the broadcast domain name from the IPspace name since they # need to be 1-1 and the default for both is the same name, 'Default'. domain = re.sub(r'ipspace', 'domain', ipspace) port_info = self._get_broadcast_domain_for_port(node, port) # Port already in desired ipspace and broadcast domain. if (port_info['ipspace'] == ipspace and port_info['broadcast-domain'] == domain): self._modify_broadcast_domain(domain, ipspace, mtu) return # If desired broadcast domain doesn't exist, create it. if not self._broadcast_domain_exists(domain, ipspace): self._create_broadcast_domain(domain, ipspace, mtu) else: self._modify_broadcast_domain(domain, ipspace, mtu) # Move the port into the broadcast domain where it is needed. self._add_port_to_broadcast_domain(node, port, domain, ipspace) @na_utils.trace def _get_broadcast_domain_for_port(self, node, port): """Get broadcast domain for a specific port.""" query = { 'node.name': node, 'name': port, 'fields': 'broadcast_domain.name,broadcast_domain.ipspace.name' } result = self.send_request( '/network/ethernet/ports', 'get', query=query) net_port_info_list = result.get('records', []) port_info = net_port_info_list[0] if not port_info: msg = _('Could not find port %(port)s on node %(node)s.') msg_args = {'port': port, 'node': node} raise exception.NetAppException(msg % msg_args) broadcast_domain = port_info.get('broadcast_domain', {}) broadcast_domain_name = broadcast_domain.get('name') ipspace_name = broadcast_domain.get('ipspace', {}).get('name') port = { 'broadcast-domain': broadcast_domain_name, 'ipspace': ipspace_name } return port @na_utils.trace def _create_broadcast_domain(self, domain, ipspace, mtu): """Create a broadcast domain.""" body = { 'ipspace.name': ipspace, 'name': domain, 'mtu': mtu, } self.send_request( '/network/ethernet/broadcast-domains', 'post', body=body) @na_utils.trace def _modify_broadcast_domain(self, domain, ipspace, mtu): """Modify a broadcast domain.""" query = { 'name': domain } body = { 'ipspace.name': ipspace, 'mtu': mtu, } self.send_request( '/network/ethernet/broadcast-domains', 'patch', body=body, query=query) @na_utils.trace def _delete_port_by_ipspace_and_broadcast_domain(self, port, domain, ipspace): query = { 'broadcast_domain.ipspace.name': ipspace, 'broadcast_domain.name': domain, 'name': port } self.send_request('/network/ethernet/ports/', 'delete', query=query) @na_utils.trace def _broadcast_domain_exists(self, domain, ipspace): """Check if a broadcast domain exists.""" query = { 'ipspace.name': ipspace, 'name': domain, } result = self.send_request( '/network/ethernet/broadcast-domains', 'get', query=query) return self._has_records(result) @na_utils.trace def _add_port_to_broadcast_domain(self, node, port, domain, ipspace): """Set a broadcast domain for a given port.""" try: query = { 'name': port, 'node.name': node, } body = { 'broadcast_domain.ipspace.name': ipspace, 'broadcast_domain.name': domain, } self.send_request('/network/ethernet/ports/', 'patch', query=query, body=body) except netapp_api.api.NaApiError as e: if e.code == netapp_api.EREST_FAIL_ADD_PORT_BROADCAST: LOG.debug('Port %(port)s already exists in broadcast domain ' '%(domain)s', {'port': port, 'domain': domain}) else: msg = _('Failed to add port %(port)s to broadcast domain ' '%(domain)s. %(err_msg)s') msg_args = { 'port': port, 'domain': domain, 'err_msg': e.message, } raise exception.NetAppException(msg % msg_args) @na_utils.trace def update_showmount(self, showmount): """Update show mount for vserver. """ # Get SVM UUID. query = { 'name': self.vserver, 'fields': 'uuid' } res = self.send_request('/svm/svms', 'get', query=query) if not res.get('records'): msg = _('Vserver %s not found.') % self.vserver raise exception.NetAppException(msg) svm_id = res.get('records')[0]['uuid'] body = { 'showmount_enabled': showmount, } self.send_request(f'/protocols/nfs/services/{svm_id}', 'patch', body=body) @na_utils.trace def update_pnfs(self, pnfs): """Update pNFS for vserver. """ # Get SVM UUID. query = { 'name': self.vserver, 'fields': 'uuid' } res = self.send_request('/svm/svms', 'get', query=query) if not res.get('records'): msg = _('Vserver %s not found.') % self.vserver raise exception.NetAppException(msg) svm_id = res.get('records')[0]['uuid'] body = { 'protocol.v41_features.pnfs_enabled': pnfs, } self.send_request(f'/protocols/nfs/services/{svm_id}', 'patch', body=body) @na_utils.trace def enable_nfs(self, versions, nfs_config=None): """Enables NFS on Vserver.""" svm_id = self._get_unique_svm_by_name() body = { 'svm.uuid': svm_id, 'enabled': 'true', } self.send_request('/protocols/nfs/services/', 'post', body=body) self._enable_nfs_protocols(versions, svm_id) if nfs_config: self._configure_nfs(nfs_config, svm_id) self._create_default_nfs_export_rules() @na_utils.trace def _enable_nfs_protocols(self, versions, svm_id): """Set the enabled NFS protocol versions.""" nfs3 = 'true' if 'nfs3' in versions else 'false' nfs40 = 'true' if 'nfs4.0' in versions else 'false' nfs41 = 'true' if 'nfs4.1' in versions else 'false' body = { 'protocol.v3_enabled': nfs3, 'protocol.v40_enabled': nfs40, 'protocol.v41_enabled': nfs41, 'showmount_enabled': 'true', 'windows.v3_ms_dos_client_enabled': 'true', 'protocol.v3_features.connection_drop': 'false', 'protocol.v3_features.ejukebox_enabled': 'false', } self.send_request(f'/protocols/nfs/services/{svm_id}', 'patch', body=body) @na_utils.trace def _create_default_nfs_export_rules(self): """Create the default export rule for the NFS service.""" body = { 'clients': [{'match': '0.0.0.0/0'}], 'ro_rule': [ 'any', ], 'rw_rule': [ 'never' ], } uuid = self.get_unique_export_policy_id('default') self.send_request(f'/protocols/nfs/export-policies/{uuid}/rules', "post", body=body) body['clients'] = [{'match': '::/0'}] self.send_request(f'/protocols/nfs/export-policies/{uuid}/rules', "post", body=body) @na_utils.trace def _configure_nfs(self, nfs_config, svm_id): """Sets the nfs configuraton""" if ('udp-max-xfer-size' in nfs_config and (nfs_config['udp-max-xfer-size'] != str(DEFAULT_UDP_MAX_XFER_SIZE))): msg = _('Failed to configure NFS. REST API does not support ' 'setting udp-max-xfer-size default value %(default)s ' 'is not equal to actual value %(actual)s') msg_args = { 'default': DEFAULT_UDP_MAX_XFER_SIZE, 'actual': nfs_config['udp-max-xfer-size'], } raise exception.NetAppException(msg % msg_args) nfs_config_value = int(nfs_config['tcp-max-xfer-size']) body = { 'transport.tcp_max_transfer_size': nfs_config_value } self.send_request(f'/protocols/nfs/services/{svm_id}', 'patch', body=body) @na_utils.trace def create_network_interface(self, ip, netmask, node, port, vserver_name, lif_name): """Creates LIF on VLAN port.""" LOG.debug('Creating LIF %(lif)s for Vserver %(vserver)s ' 'node/port %(node)s:%(port)s.', {'lif': lif_name, 'vserver': vserver_name, 'node': node, 'port': port}) query = { 'name': 'default-data-files', 'svm.name': vserver_name, 'fields': 'uuid,name,services,svm.name' } result = self.send_request('/network/ip/service-policies/', 'get', query=query) if result.get('records'): policy = result['records'][0] # NOTE(nahimsouza): Workaround to add services in the policy # in the case ONTAP does not create it automatically if 'data_nfs' not in policy['services']: policy['services'].append('data_nfs') if 'data_cifs' not in policy['services']: policy['services'].append('data_cifs') uuid = policy['uuid'] body = {'services': policy['services']} self.send_request( f'/network/ip/service-policies/{uuid}', 'patch', body=body) body = { 'ip.address': ip, 'ip.netmask': netmask, 'enabled': 'true', 'service_policy.name': 'default-data-files', 'location.home_node.name': node, 'location.home_port.name': port, 'name': lif_name, 'svm.name': vserver_name, } self.send_request('/network/ip/interfaces', 'post', body=body) @na_utils.trace def network_interface_exists(self, vserver_name, node, port, ip, netmask, vlan=None, home_port=None): """Checks if LIF exists.""" if not home_port: home_port = port if not vlan else f'{port}-{vlan}' query = { 'ip.address': ip, 'location.home_node.name': node, 'location.home_port.name': home_port, 'ip.netmask': netmask, 'svm.name': vserver_name, 'fields': 'name', } result = self.send_request('/network/ip/interfaces', 'get', query=query) return self._has_records(result) @na_utils.trace def create_route(self, gateway, destination=None): """Create a network route.""" if not gateway: return address = None netmask = None if not destination: if ':' in gateway: destination = '::/0' else: destination = '0.0.0.0/0' if '/' in destination: address, netmask = destination.split('/') else: address = destination body = { 'destination.address': address, 'gateway': gateway, } if netmask: body['destination.netmask'] = netmask try: self.send_request('/network/ip/routes', 'post', body=body) except netapp_api.api.NaApiError as e: if (e.code == netapp_api.EREST_DUPLICATE_ROUTE): LOG.debug('Route to %(destination)s via gateway %(gateway)s ' 'exists.', {'destination': destination, 'gateway': gateway}) else: msg = _('Failed to create a route to %(destination)s via ' 'gateway %(gateway)s: %(err_msg)s') msg_args = { 'destination': destination, 'gateway': gateway, 'err_msg': e.message, } raise exception.NetAppException(msg % msg_args) @na_utils.trace def rename_vserver(self, vserver_name, new_vserver_name): """Rename a vserver.""" body = { 'name': new_vserver_name } svm_uuid = self._get_unique_svm_by_name(vserver_name) self.send_request(f'/svm/svms/{svm_uuid}', 'patch', body=body) @na_utils.trace def get_vserver_info(self, vserver_name): """Retrieves Vserver info.""" LOG.debug('Retrieving Vserver %s information.', vserver_name) query = { 'name': vserver_name, 'fields': 'state,subtype' } response = self.send_request('/svm/svms', 'get', query=query) if not response.get('records'): return vserver = response['records'][0] vserver_info = { 'name': vserver_name, 'subtype': vserver['subtype'], 'operational_state': vserver['state'], 'state': vserver['state'], } return vserver_info @na_utils.trace def get_nfs_config(self, desired_args, vserver): """Gets the NFS config of the given vserver with the desired params""" query = {'fields': 'transport.*'} query['svm.name'] = vserver nfs_info = { 'tcp-max-xfer-size': str(DEFAULT_TCP_MAX_XFER_SIZE), 'udp-max-xfer-size': str(DEFAULT_UDP_MAX_XFER_SIZE) } response = self.send_request('/protocols/nfs/services/', 'get', query=query) records = response.get('records', []) if records: nfs_info['tcp-max-xfer-size'] = ( str(records[0]['transport']['tcp_max_transfer_size'])) return nfs_info @na_utils.trace def get_vserver_ipspace(self, vserver_name): """Get the IPspace of the vserver, or None if not supported.""" query = { 'name': vserver_name, 'fields': 'ipspace.name' } try: response = self.send_request('/svm/svms', 'get', query=query) except netapp_api.api.NaApiError: msg = _('Could not determine IPspace for Vserver %s.') raise exception.NetAppException(msg % vserver_name) if self._has_records(response): return response['records'][0].get('ipspace', {}).get('name') return None @na_utils.trace def get_snapmirror_policies(self, vserver_name): """Get all SnapMirror policies associated to a vServer.""" query = { 'svm.name': vserver_name, 'fields': 'name' } response = self.send_request( '/snapmirror/policies', 'get', query=query) records = response.get('records') policy_name = [] for record in records: policy_name.append(record.get('name')) return policy_name @na_utils.trace def create_snapmirror_policy(self, policy_name, policy_type='async', discard_network_info=True, preserve_snapshots=True, snapmirror_label='all_source_snapshots', keep=1): """Create SnapMirror Policy""" if policy_type == "vault": body = {"name": policy_name, "type": "async", "create_snapshot_on_source": False} else: body = {"name": policy_name, "type": policy_type} if discard_network_info: body["exclude_network_config"] = {'svmdr-config-obj': 'network'} if preserve_snapshots: body["retention"] = [{"label": snapmirror_label, "count": keep}] try: self.send_request('/snapmirror/policies/', 'post', body=body) except netapp_api.api.NaApiError as e: LOG.debug('Failed to create SnapMirror policy. ' 'Error: %s. Code: %s', e.message, e.code) raise @na_utils.trace def delete_snapmirror_policy(self, policy_name): """Deletes a SnapMirror policy.""" query = { 'name': policy_name, 'fields': 'uuid,name' } response = self.send_request('/snapmirror/policies', 'get', query=query) if self._has_records(response): uuid = response['records'][0]['uuid'] try: self.send_request(f'/snapmirror/policies/{uuid}', 'delete') except netapp_api.api.NaApiError as e: if e.code != netapp_api.EREST_ENTRY_NOT_FOUND: raise @na_utils.trace def delete_vserver(self, vserver_name, vserver_client, security_services=None): """Deletes a Vserver. Checks if Vserver exists and does not have active shares. Offlines and destroys root volumes. Deletes Vserver. """ vserver_info = self.get_vserver_info(vserver_name) if vserver_info is None: LOG.error("Vserver %s does not exist.", vserver_name) return svm_uuid = self._get_unique_svm_by_name(vserver_name) is_dp_destination = vserver_info.get('subtype') == 'dp_destination' root_volume_name = self.get_vserver_root_volume_name(vserver_name) volumes_count = vserver_client.get_vserver_volume_count() # NOTE(dviroel): 'dp_destination' vservers don't allow to delete its # root volume. We can just call vserver-destroy directly. if volumes_count == 1 and not is_dp_destination: try: vserver_client.offline_volume(root_volume_name) except netapp_api.api.NaApiError as e: if e.code == netapp_api.EREST_ENTRY_NOT_FOUND: LOG.error("Cannot delete Vserver %s. " "Failed to put volumes offline. " "Entry doesn't exist.", vserver_name) else: raise vserver_client.delete_volume(root_volume_name) elif volumes_count > 1: msg = _("Cannot delete Vserver. Vserver %s has shares.") raise exception.NetAppException(msg % vserver_name) if security_services and not is_dp_destination: self._terminate_vserver_services(vserver_name, vserver_client, security_services) self.send_request(f'/svm/svms/{svm_uuid}', 'delete') @na_utils.trace def get_vserver_volume_count(self): """Get number of volumes in SVM.""" query = {'return_records': 'false'} response = self.send_request('/storage/volumes', 'get', query=query) return response['num_records'] @na_utils.trace def _terminate_vserver_services(self, vserver_name, vserver_client, security_services): """Terminate SVM security services.""" svm_uuid = self._get_unique_svm_by_name(vserver_name) for service in security_services: if service['type'].lower() == 'active_directory': body = { 'ad_domain.password': service['password'], 'ad_domain.user': service['user'], } body_force = { 'ad_domain.password': service['password'], 'ad_domain.user': service['user'], 'force': True } try: vserver_client.send_request( f'/protocols/cifs/services/{svm_uuid}', 'delete', body=body) except netapp_api.api.NaApiError as e: if e.code == netapp_api.EREST_ENTRY_NOT_FOUND: LOG.error('CIFS server does not exist for ' 'Vserver %s.', vserver_name) else: vserver_client.send_request( f'/protocols/cifs/services/{svm_uuid}', 'delete', body=body_force) elif service['type'].lower() == 'kerberos': vserver_client.disable_kerberos(service) @na_utils.trace def disable_kerberos(self, security_service): """Disable Kerberos in all Vserver LIFs.""" lifs = self.get_network_interfaces() # NOTE(dviroel): If the Vserver has no LIFs, there are no Kerberos # to be disabled. for lif in lifs: body = { 'password': security_service['password'], 'user': security_service['user'], 'interface.name': lif['interface-name'], 'enabled': False } interface_uuid = lif['uuid'] try: self.send_request( f'/protocols/nfs/kerberos/interfaces/{interface_uuid}', 'patch', body=body) except netapp_api.api.NaApiError as e: disabled_msg = ( "Kerberos is already enabled/disabled on this LIF") if (e.code == netapp_api.EREST_KERBEROS_IS_ENABLED_DISABLED and disabled_msg in e.message): # NOTE(dviroel): do not raise an error for 'Kerberos is # already disabled in this LIF'. continue msg = ("Failed to disable Kerberos: %s.") raise exception.NetAppException(msg % e.message) @na_utils.trace def get_vserver_root_volume_name(self, vserver_name): """Get the root volume name of the vserver.""" unique_volume = self._get_volume_by_args(vserver=vserver_name, is_root=True) return unique_volume['name'] @na_utils.trace def ipspace_has_data_vservers(self, ipspace_name): """Check whether an IPspace has any data Vservers assigned to it.""" query = {'ipspace.name': ipspace_name} result = self.send_request('/svm/svms', 'get', query=query) return self._has_records(result) @na_utils.trace def delete_vlan(self, node, port, vlan): """Delete VLAN port if not in use.""" query = { 'vlan.base_port.name': port, 'node.name': node, 'vlan.tag': vlan, } try: self.send_request('/network/ethernet/ports/', 'delete', query=query) except netapp_api.api.NaApiError as e: if e.code == netapp_api.EREST_ENTRY_NOT_FOUND: LOG.debug('VLAN %(vlan)s on port %(port)s node %(node)s ' 'was not found') elif (e.code == netapp_api.EREST_INTERFACE_BOUND or e.code == netapp_api.EREST_PORT_IN_USE): LOG.debug('VLAN %(vlan)s on port %(port)s node %(node)s ' 'still used by LIF and cannot be deleted.', {'vlan': vlan, 'port': port, 'node': node}) else: msg = _('Failed to delete VLAN %(vlan)s on ' 'port %(port)s node %(node)s: %(err_msg)s') msg_args = { 'vlan': vlan, 'port': port, 'node': node, 'err_msg': e.message } raise exception.NetAppException(msg % msg_args) @na_utils.trace def get_degraded_ports(self, broadcast_domains, ipspace_name): """Get degraded ports for broadcast domains and an ipspace.""" valid_domains = self._get_valid_broadcast_domains(broadcast_domains) query = { 'broadcast_domain.name': '|'.join(valid_domains), 'broadcast_domain.ipspace.name': ipspace_name, 'state': 'degraded', 'type': 'vlan', 'fields': 'node.name,name' } result = self.send_request('/network/ethernet/ports', 'get', query=query) net_port_info_list = result.get('records', []) ports = [] for port_info in net_port_info_list: ports.append(f"{port_info['node']['name']}:" f"{port_info['name']}") return ports @na_utils.trace def _get_valid_broadcast_domains(_self, broadcast_domains): valid_domains = [] for broadcast_domain in broadcast_domains: if ( broadcast_domain == 'OpenStack' or broadcast_domain == DEFAULT_BROADCAST_DOMAIN or broadcast_domain.startswith(BROADCAST_DOMAIN_PREFIX) ): valid_domains.append(broadcast_domain) return valid_domains @na_utils.trace def svm_migration_start( self, source_cluster_name, source_share_server_name, dest_aggregates, dest_ipspace=None, check_only=False): """Send a request to start the SVM migration in the backend. :param source_cluster_name: the name of the source cluster. :param source_share_server_name: the name of the source server. :param dest_aggregates: the aggregates where volumes will be placed in the migration. :param dest_ipspace: created IPspace for the migration. :param check_only: If the call will only check the feasibility. deleted after the cutover or not. """ body = { "auto_cutover": False, "auto_source_cleanup": True, "check_only": check_only, "source": { "cluster": {"name": source_cluster_name}, "svm": {"name": source_share_server_name}, }, "destination": { "volume_placement": { "aggregates": dest_aggregates, }, }, } if dest_ipspace: ipspace_data = { "ipspace": { "name": dest_ipspace, } } body["destination"].update(ipspace_data) return self.send_request('/svm/migrations', 'post', body=body, wait_on_accepted=False) @na_utils.trace def get_migration_check_job_state(self, job_id): """Get the job state of a share server migration. :param job_id: id of the job to be searched. """ try: job = self.get_job(job_id) return job except netapp_api.api.NaApiError as e: if e.code == netapp_api.EREST_NFS_V4_0_ENABLED_MIGRATION_FAILURE: msg = _( 'NFS v4.0 is not supported while migrating vservers.') LOG.error(msg) raise exception.NetAppException(message=e.message) if e.code == netapp_api.EREST_VSERVER_MIGRATION_TO_NON_AFF_CLUSTER: msg = _('Both source and destination clusters must be AFF ' 'systems.') LOG.error(msg) raise exception.NetAppException(message=e.message) msg = (_('Failed to check migration support. Reason: ' '%s' % e.message)) raise exception.NetAppException(msg) @na_utils.trace def svm_migrate_complete(self, migration_id): """Send a request to complete the SVM migration. :param migration_id: the id of the migration provided by the storage. """ body = { "action": "cutover" } return self.send_request( f'/svm/migrations/{migration_id}', 'patch', body=body, wait_on_accepted=False) @na_utils.trace def svm_migrate_cancel(self, migration_id): """Send a request to cancel the SVM migration. :param migration_id: the id of the migration provided by the storage. """ return self.send_request(f'/svm/migrations/{migration_id}', 'delete', wait_on_accepted=False) @na_utils.trace def svm_migration_get(self, migration_id): """Send a request to get the progress of the SVM migration. :param migration_id: the id of the migration provided by the storage. """ return self.send_request(f'/svm/migrations/{migration_id}', 'get') @na_utils.trace def svm_migrate_pause(self, migration_id): """Send a request to pause a migration. :param migration_id: the id of the migration provided by the storage. """ body = { "action": "pause" } return self.send_request( f'/svm/migrations/{migration_id}', 'patch', body=body, wait_on_accepted=False) @na_utils.trace def delete_network_interface(self, vserver_name, interface_name): """Delete the LIF, disabling it before.""" self.disable_network_interface(vserver_name, interface_name) query = { 'svm.name': vserver_name, 'name': interface_name } self.send_request('/network/ip/interfaces', 'delete', query=query) @na_utils.trace def disable_network_interface(self, vserver_name, interface_name): """Disable the LIF.""" body = { 'enabled': 'false' } query = { 'svm.name': vserver_name, 'name': interface_name } self.send_request('/network/ip/interfaces', 'patch', body=body, query=query) @na_utils.trace def get_ipspaces(self, ipspace_name=None, vserver_name=None): """Gets one or more IPSpaces.""" query = { 'name': ipspace_name } result = self.send_request('/network/ipspaces', 'get', query=query) if not self._has_records(result): return [] ipspace_info = result.get('records')[0] query = { 'broadcast_domain.ipspace.name': ipspace_name } ports = self.send_request('/network/ethernet/ports', 'get', query=query) query = { 'ipspace.name': ipspace_name } vservers = self.send_request('/svm/svms', 'get', query=query) br_domains = self.send_request('/network/ethernet/broadcast-domains', 'get', query=query) ipspace = { 'ports': [], 'vservers': [], 'broadcast-domains': [], } for port in ports.get('records'): ipspace['ports'].append(port.get('name')) for vserver in vservers.get('records'): ipspace['vservers'].append(vserver.get('name')) for broadcast in br_domains.get('records'): ipspace['broadcast-domains'].append(broadcast.get('name')) ipspace['ipspace'] = ipspace_info.get('name') ipspace['uuid'] = ipspace_info.get('uuid') return ipspace @na_utils.trace def _delete_port_and_broadcast_domain(self, domain, ipspace): """Delete a broadcast domain and its ports.""" ipspace_name = ipspace['ipspace'] ports = ipspace['ports'] for port in ports: self._delete_port_by_ipspace_and_broadcast_domain( port, domain, ipspace_name) query = { 'name': domain, 'ipspace.name': ipspace_name } self.send_request('/network/ethernet/broadcast-domains', 'delete', query=query) @na_utils.trace def _delete_port_and_broadcast_domains_for_ipspace(self, ipspace_name): """Deletes all broadcast domains in an IPspace.""" ipspace = self.get_ipspaces(ipspace_name) if not ipspace: return for broadcast_domain_name in ipspace['broadcast-domains']: self._delete_port_and_broadcast_domain(broadcast_domain_name, ipspace) @na_utils.trace def delete_ipspace(self, ipspace_name): """Deletes an IPspace Returns: True if ipspace was deleted, False if validation or error prevented deletion """ if not self.features.IPSPACES: return False if not ipspace_name: return False if ( ipspace_name in CLUSTER_IPSPACES or self.ipspace_has_data_vservers(ipspace_name) ): LOG.debug('IPspace %(ipspace)s not deleted: still in use.', {'ipspace': ipspace_name}) return False try: self._delete_port_and_broadcast_domains_for_ipspace(ipspace_name) except netapp_api.NaApiError as e: msg = _('Broadcast Domains of IPspace %s not deleted. ' 'Reason: %s') % (ipspace_name, e) LOG.warning(msg) return False query = { 'name': ipspace_name } try: self.send_request('/network/ipspaces', 'delete', query=query) except netapp_api.NaApiError as e: msg = _('IPspace %s not deleted. Reason: %s') % (ipspace_name, e) LOG.warning(msg) return False return True @na_utils.trace def get_svm_volumes_total_size(self, svm_name): """Gets volumes sizes sum (GB) from all volumes in SVM by svm_name""" query = { 'svm.name': svm_name, 'fields': 'size' } response = self.send_request('/storage/volumes/', 'get', query=query) svm_volumes = response.get('records', []) if len(svm_volumes) > 0: total_volumes_size = 0 for volume in svm_volumes: # Root volumes are not taking account because they are part of # SVM creation. if volume['name'] != 'root': total_volumes_size = total_volumes_size + volume['size'] else: return 0 # Convert Bytes to GBs. return (total_volumes_size / 1024**3) def snapmirror_restore_vol(self, source_path=None, dest_path=None, source_vserver=None, dest_vserver=None, source_volume=None, dest_volume=None, des_cluster=None, source_snapshot=None): """Restore snapshot copy from destination volume to source volume""" snapmirror_info = self.get_snapmirrors(dest_path, source_path) if not snapmirror_info: msg = _("There is no relationship between source " "'%(source_path)s' and destination cluster" " '%(des_path)s'") msg_args = {'source_path': source_path, 'des_path': dest_path, } raise exception.NetAppException(msg % msg_args) uuid = snapmirror_info[0].get('uuid') body = {"destination": {"path": dest_path, "cluster": {"name": des_cluster}}, "source_snapshot": source_snapshot} try: self.send_request(f"/snapmirror/relationships/{uuid}/restore", 'post', body=body) except netapp_api.api.NaApiError as e: LOG.debug('Snapmirror restore has failed. Error: %s. Code: %s', e.message, e.code) raise @na_utils.trace def list_volume_snapshots(self, volume_name, snapmirror_label=None, newer_than=None): """Gets list of snapshots of volume.""" volume = self._get_volume_by_args(vol_name=volume_name) uuid = volume['uuid'] query = {} if snapmirror_label: query = { 'snapmirror_label': snapmirror_label, } if newer_than: query['create_time'] = '>' + newer_than response = self.send_request( f'/storage/volumes/{uuid}/snapshots/', 'get', query=query) return [snapshot_info['name'] for snapshot_info in response['records']] @na_utils.trace def is_snaplock_compliance_clock_configured(self, node_name): """Get the SnapLock compliance clock is configured for each node""" node_uuid = self._get_cluster_node_uuid(node_name) response = self.send_request( f'/storage/snaplock/compliance-clocks/{node_uuid}', 'get' ) clock_fmt_value = response.get('time') if clock_fmt_value is None: return False return True @na_utils.trace def set_snaplock_attributes(self, volume_name, **options): """Set the retention period for SnapLock enabled volume""" body = {} snaplock_attribute_mapping = { 'snaplock_autocommit_period': 'snaplock.autocommit_period', 'snaplock_min_retention_period': 'snaplock.retention.minimum', 'snaplock_max_retention_period': 'snaplock.retention.maximum', 'snaplock_default_retention_period': 'snaplock.retention.default', } for share_type_attr, na_api_attr in snaplock_attribute_mapping.items(): if options.get(share_type_attr): if share_type_attr == 'snaplock_default_retention_period': default_retention_period = options.get( 'snaplock_default_retention_period' ) if default_retention_period == "max": options[share_type_attr] =\ options.get('snaplock_max_retention_period') elif default_retention_period == "min": options[share_type_attr] = \ options.get('snaplock_min_retention_period') body[na_api_attr] = utils.convert_time_duration_to_iso_format( options.get(share_type_attr)) if all(value is None for value in body.values()): LOG.debug("All SnapLock attributes are None, not" " updating SnapLock attributes") return volume = self._get_volume_by_args(vol_name=volume_name) uuid = volume['uuid'] self.send_request(f'/storage/volumes/{uuid}', 'patch', body=body) @na_utils.trace def _is_snaplock_enabled_volume(self, volume_name): """Get whether volume is SnapLock enabled or disabled""" vol_attr = self.get_volume(volume_name) return vol_attr.get('snaplock-type') in ("compliance", "enterprise") @na_utils.trace def _get_cluster_node_uuid(self, node_name): query = { 'name': node_name } response = self.send_request('/cluster/nodes', 'get', query=query) return response.get('records')[0].get('uuid') @na_utils.trace def get_storage_failover_partner(self, node_name): """Get the partner node of HA pair""" node_uuid = self._get_cluster_node_uuid(node_name) node_details = self.send_request(f'/cluster/nodes/{node_uuid}', 'get') return node_details['ha']['partners'][0]['name'] @na_utils.trace def get_migratable_data_lif_for_node(self, node): """Get available LIFs that can be migrated to another node.""" protocols = ['data_nfs', 'data_cifs'] query = { 'services': '|'.join(protocols), 'location.home_node.name': node, 'fields': 'name', } result = self.send_request('/network/ip/interfaces', 'get', query=query) migratable_lif = [] if self._has_records(result): result = result.get('records', []) for lif in result: lif_result = self.send_request( f'/network/ip/interfaces/{lif.get("uuid")}', 'get' ) failover_policy = lif_result['location']['failover'] if failover_policy in ('default', 'sfo_partners_only'): migratable_lif.append(lif["name"]) return migratable_lif ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/netapp/dataontap/client/rest_api.py0000664000175000017500000002711300000000000026003 0ustar00zuulzuul00000000000000# Copyright 2023 NetApp, Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ NetApp API for REST Data ONTAP. Contains classes required to issue REST API calls to Data ONTAP. """ import re from oslo_log import log from oslo_serialization import jsonutils import requests from requests.adapters import HTTPAdapter from requests import auth from urllib3.util import retry from manila.share.drivers.netapp.dataontap.client import api from manila.share.drivers.netapp import utils LOG = log.getLogger(__name__) EREST_DUPLICATE_ENTRY = '1' EREST_ENTRY_NOT_FOUND = '4' EREST_NOT_AUTHORIZED = '6' EREST_SNAPMIRROR_INITIALIZING = '917536' EREST_VSERVER_NOT_FOUND = '13434920' EREST_ANOTHER_VOLUME_OPERATION = '13107406' EREST_LICENSE_NOT_INSTALLED = '1115127' EREST_SNAPSHOT_NOT_SPECIFIED = '1638515' EREST_FPOLICY_MODIF_POLICY_DISABLED = '9765029' EREST_POLICY_ALREADY_DISABLED = '9764907' EREST_ERELATION_EXISTS = '6619637' EREST_BREAK_SNAPMIRROR_FAILED = '13303808' EREST_UPDATE_SNAPMIRROR_FAILED = '13303844' EREST_SNAPMIRROR_NOT_INITIALIZED = '13303812' EREST_DUPLICATE_ROUTE = '1966345' EREST_FAIL_ADD_PORT_BROADCAST = '1967149' EREST_KERBEROS_IS_ENABLED_DISABLED = '3276861' EREST_INTERFACE_BOUND = '1376858' EREST_PORT_IN_USE = '1966189' EREST_NFS_V4_0_ENABLED_MIGRATION_FAILURE = '13172940' EREST_VSERVER_MIGRATION_TO_NON_AFF_CLUSTER = '13172984' EREST_UNMOUNT_FAILED_LOCK = '917536' EREST_CANNOT_MODITY_OFFLINE_VOLUME = '917533' EREST_CANNOT_MODITY_SPECIFIED_FIELD = '917628' class NaRetryableError(api.NaApiError): def __str__(self, *args, **kwargs): return 'NetApp API failed. Try again. Reason - %s:%s' % ( self.code, self.message) class RestNaServer(object): TRANSPORT_TYPE_HTTP = 'http' TRANSPORT_TYPE_HTTPS = 'https' HTTP_PORT = '80' HTTPS_PORT = '443' TUNNELING_HEADER_KEY = "X-Dot-SVM-Name" def __init__(self, host, transport_type=TRANSPORT_TYPE_HTTP, ssl_cert_path=None, username=None, password=None, port=None, trace=False, api_trace_pattern=utils.API_TRACE_PATTERN, private_key_file=None, certificate_file=None, ca_certificate_file=None, certificate_host_validation=None): self._host = host if private_key_file and certificate_file: transport_type = RestNaServer.TRANSPORT_TYPE_HTTPS self.set_transport_type(transport_type) self.set_port(port=port) self._username = username self._password = password self._trace = trace self._api_trace_pattern = api_trace_pattern self._timeout = None if ssl_cert_path is not None: self._ssl_verify = ssl_cert_path else: # Note(felipe_rodrigues): it will verify with the mozila CA roots, # given by certifi package. self._ssl_verify = True self._private_key_file = private_key_file self._certificate_file = certificate_file self._ca_certificate_file = ca_certificate_file self._certificate_host_validation = certificate_host_validation LOG.debug('Using REST with NetApp controller: %s', self._host) def set_transport_type(self, transport_type): """Set the transport type protocol for API. Supports http and https transport types. """ if transport_type is None or transport_type.lower() not in ( RestNaServer.TRANSPORT_TYPE_HTTP, RestNaServer.TRANSPORT_TYPE_HTTPS): raise ValueError('Unsupported transport type') self._protocol = transport_type.lower() def get_transport_type(self): """Get the transport type protocol.""" return self._protocol def set_api_version(self, major, minor): """Set the API version.""" try: self._api_major_version = int(major) self._api_minor_version = int(minor) self._api_version = str(major) + "." + str(minor) except ValueError: raise ValueError('Major and minor versions must be integers') def get_api_version(self): """Gets the API version tuple.""" if hasattr(self, '_api_version'): return (self._api_major_version, self._api_minor_version) return None def set_ontap_version(self, ontap_version): """Set the ONTAP version.""" self._ontap_version = ontap_version def get_ontap_version(self): """Gets the ONTAP version.""" if hasattr(self, '_ontap_version'): return self._ontap_version return None def set_port(self, port=None): """Set the ONTAP port, if not informed, set with default one.""" if port is None and self._protocol == RestNaServer.TRANSPORT_TYPE_HTTP: self._port = RestNaServer.HTTP_PORT elif port is None: self._port = RestNaServer.HTTPS_PORT else: try: int(port) except ValueError: raise ValueError('Port must be integer') self._port = str(port) def get_port(self): """Get the server communication port.""" return self._port def set_timeout(self, seconds): """Sets the timeout in seconds.""" try: self._timeout = int(seconds) except ValueError: raise ValueError('timeout in seconds must be integer') def get_timeout(self): """Gets the timeout in seconds if set.""" return self._timeout def set_vserver(self, vserver): """Set the vserver to use if tunneling gets enabled.""" self._vserver = vserver def get_vserver(self): """Get the vserver to use in tunneling.""" return self._vserver def __str__(self): """Gets a representation of the client.""" return "server: %s" % (self._host) def _get_request_method(self, method, session): """Returns the request method to be used in the REST call.""" request_methods = { 'post': session.post, 'get': session.get, 'put': session.put, 'delete': session.delete, 'patch': session.patch, } return request_methods[method] def _add_query_params_to_url(self, url, query): """Populates the URL with specified filters.""" filters = '&'.join([f"{k}={v}" for k, v in query.items()]) url += "?" + filters return url def _get_base_url(self): """Get the base URL for REST requests.""" host = self._host if ':' in host: host = '[%s]' % host return f'{self._protocol}://{host}:{self._port}/api' def _build_session(self, headers): """Builds a session in the client.""" self._session = requests.Session() # NOTE(felipe_rodrigues): request resilient of temporary network # failures (like name resolution failure), retrying until 5 times. max_retries = retry.Retry(total=5, connect=5, read=2, backoff_factor=1) adapter = HTTPAdapter(max_retries=max_retries) self._session.mount('%s://' % self._protocol, adapter) if self._private_key_file and self._certificate_file: self._session.cert, self._session.verify = ( self._create_certificate_auth_handler()) else: self._session.auth = self._create_basic_auth_handler() self._session.verify = self._ssl_verify self._session.headers = headers def _build_headers(self, enable_tunneling): """Build and return headers for a REST request.""" headers = { "Accept": "application/json", "Content-Type": "application/json" } # enable tunneling only if vserver is set by upper layer if enable_tunneling and self.get_vserver: headers[RestNaServer.TUNNELING_HEADER_KEY] = self.get_vserver() return headers def _create_basic_auth_handler(self): """Creates and returns a basic HTTP auth handler.""" return auth.HTTPBasicAuth(self._username, self._password) def _create_certificate_auth_handler(self): """Creates and returns a certificate auth handler.""" self._session.verify = self._certificate_host_validation if self._certificate_file and self._private_key_file: self._session.cert = (self._certificate_file, self._private_key_file) # Assigning _session.verify to ca cert file to validate the certs # when we have host validation set to true if self._certificate_host_validation and self._ca_certificate_file: self._session.verify = self._ca_certificate_file return self._session.cert, self._session.verify def send_http_request(self, method, url, body, headers): """Invoke the API on the server.""" data = jsonutils.dumps(body) if body else {} self._build_session(headers) request_method = self._get_request_method(method, self._session) api_name_matches_regex = (re.match(self._api_trace_pattern, url) is not None) if self._trace and api_name_matches_regex: svm = headers.get(RestNaServer.TUNNELING_HEADER_KEY) message = ("Request: %(method)s Header=%(header)s %(url)s " "Body=%(body)s") msg_args = { "method": method.upper(), "url": url, "body": body, "header": ({RestNaServer.TUNNELING_HEADER_KEY: svm} if svm else {}), } LOG.debug(message, msg_args) try: if self._timeout is not None: response = request_method( url, data=data, timeout=self._timeout) else: response = request_method(url, data=data) except requests.HTTPError as e: raise api.NaApiError(e.errno, e.strerror) except Exception as e: raise api.NaApiError(message=e) code = response.status_code res = jsonutils.loads(response.content) if response.content else {} if self._trace and api_name_matches_regex: message = "Response: %(code)s Body=%(body)s" msg_args = { "code": code, "body": res } LOG.debug(message, msg_args) return code, res def invoke_successfully(self, action_url, method, body=None, query=None, enable_tunneling=False): """Invokes REST API and checks execution status as success.""" headers = self._build_headers(enable_tunneling) if query: action_url = self._add_query_params_to_url(action_url, query) url = self._get_base_url() + action_url code, response = self.send_http_request(method, url, body, headers) if not response.get('error'): return code, response result_error = response.get('error') code = result_error.get('code') or 'ESTATUSFAILED' msg = (result_error.get('message') or 'Execution failed due to unknown reason') raise api.NaApiError(code, msg) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/netapp/dataontap/client/rest_endpoints.py0000664000175000017500000000321300000000000027230 0ustar00zuulzuul00000000000000# Copyright 2021 NetApp, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. ENDPOINT_MIGRATION_ACTIONS = 'svm/migrations/%(svm_migration_id)s' ENDPOINT_MIGRATIONS = 'svm/migrations' ENDPOINT_JOB_ACTIONS = 'cluster/jobs/%(job_uuid)s' ENDPOINT_MIGRATION_GET_PROGRESS = '/storage/volumes/' endpoints = { 'system-get-version': { 'method': 'get', 'url': 'cluster?fields=version', }, 'svm-migration-start': { 'method': 'post', 'url': ENDPOINT_MIGRATIONS }, 'svm-migration-complete': { 'method': 'patch', 'url': ENDPOINT_MIGRATION_ACTIONS }, 'svm-migration-cancel': { 'method': 'delete', 'url': ENDPOINT_MIGRATION_ACTIONS }, 'svm-migration-get': { 'method': 'get', 'url': ENDPOINT_MIGRATION_ACTIONS }, 'get-job': { 'method': 'get', 'url': ENDPOINT_JOB_ACTIONS }, 'svm-migration-pause': { 'method': 'patch', 'url': ENDPOINT_MIGRATION_ACTIONS }, 'svm-migration-get-progress': { 'method': 'get', 'url': ENDPOINT_MIGRATION_GET_PROGRESS }, } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315601.9296715 manila-21.0.0/manila/share/drivers/netapp/dataontap/cluster_mode/0000775000175000017500000000000000000000000025026 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/netapp/dataontap/cluster_mode/__init__.py0000664000175000017500000000000000000000000027125 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/netapp/dataontap/cluster_mode/data_motion.py0000664000175000017500000013300500000000000027700 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Alex Meade. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ NetApp Data ONTAP data motion library. This library handles transferring data from a source to a destination. Its responsibility is to handle this as efficiently as possible given the location of the data's source and destination. This includes cloning, SnapMirror, and copy-offload as improvements to brute force data transfer. """ from oslo_config import cfg from oslo_log import log from oslo_utils import excutils from manila import exception from manila.i18n import _ from manila.share import configuration from manila.share import driver from manila.share.drivers.netapp.dataontap.client import api as netapp_api from manila.share.drivers.netapp.dataontap.client import client_cmode from manila.share.drivers.netapp.dataontap.client import client_cmode_rest from manila.share.drivers.netapp import options as na_opts from manila.share.drivers.netapp import utils as na_utils from manila.share import utils as share_utils from manila import utils LOG = log.getLogger(__name__) CONF = cfg.CONF def get_backend_configuration(backend_name): config = configuration.Configuration(driver.share_opts, config_group=backend_name) if config.driver_handles_share_servers is None: msg = _("Could not find backend stanza %(backend_name)s in " "configuration which is required for replication or migration " "workflows with the source backend.") params = { "backend_name": backend_name, } raise exception.BadConfigurationException(reason=msg % params) if config.driver_handles_share_servers is True: # NOTE(dviroel): avoid using a pre-create vserver on DHSS == True mode # when retrieving remote backend configuration. config.netapp_vserver = None config.append_config_values(na_opts.netapp_cluster_opts) config.append_config_values(na_opts.netapp_connection_opts) config.append_config_values(na_opts.netapp_basicauth_opts) config.append_config_values(na_opts.netapp_certificateauth_opts) config.append_config_values(na_opts.netapp_transport_opts) config.append_config_values(na_opts.netapp_support_opts) config.append_config_values(na_opts.netapp_provisioning_opts) config.append_config_values(na_opts.netapp_data_motion_opts) config.append_config_values(na_opts.netapp_proxy_opts) config.append_config_values(na_opts.netapp_backup_opts) return config def get_backup_configuration(backup_type): config_stanzas = CONF.list_all_sections() if backup_type not in config_stanzas: msg = _("Could not find backup_type stanza %(backup_type)s in " "configuration which is required for backup workflows " "with the source share. Available stanzas are " "%(stanzas)s") params = { "stanzas": config_stanzas, "backup_type": backup_type, } raise exception.BadConfigurationException(reason=msg % params) config = configuration.Configuration(driver.share_opts, config_group=backup_type) config.append_config_values(na_opts.netapp_backup_opts) return config def get_client_for_backend(backend_name, vserver_name=None, force_rest_client=False): config = get_backend_configuration(backend_name) if config.netapp_use_legacy_client and not force_rest_client: client = client_cmode.NetAppCmodeClient( transport_type=config.netapp_transport_type, ssl_cert_path=config.netapp_ssl_cert_path, username=config.netapp_login, password=config.netapp_password, hostname=config.netapp_server_hostname, port=config.netapp_server_port, vserver=vserver_name or config.netapp_vserver, trace=na_utils.TRACE_API, private_key_file=config.netapp_private_key_file, certificate_file=config.netapp_certificate_file, ca_certificate_file=config.netapp_ca_certificate_file, certificate_host_validation=( config.netapp_certificate_host_validation)) else: client = client_cmode_rest.NetAppRestClient( transport_type=config.netapp_transport_type, ssl_cert_path=config.netapp_ssl_cert_path, username=config.netapp_login, password=config.netapp_password, hostname=config.netapp_server_hostname, port=config.netapp_server_port, vserver=vserver_name or config.netapp_vserver, async_rest_timeout=config.netapp_rest_operation_timeout, trace=na_utils.TRACE_API, private_key_file=config.netapp_private_key_file, certificate_file=config.netapp_certificate_file, ca_certificate_file=config.netapp_ca_certificate_file, certificate_host_validation=( config.netapp_certificate_host_validation)) return client def get_client_for_host(host): """Returns a cluster client to the desired host.""" backend_name = share_utils.extract_host(host, level='backend_name') client = get_client_for_backend(backend_name) return client class DataMotionSession(object): def _get_backend_volume_name(self, config, share_obj): """Return the calculated backend name of the share. Uses the netapp_volume_name_template configuration value for the backend to calculate the volume name on the array for the share. """ volume_name = config.netapp_volume_name_template % { 'share_id': share_obj['id'].replace('-', '_')} return volume_name def _get_backend_qos_policy_group_name(self, share): """Get QoS policy name according to QoS policy group name template.""" __, config = self.get_backend_name_and_config_obj(share['host']) return config.netapp_qos_policy_group_name_template % { 'share_id': share['id'].replace('-', '_')} def _get_backend_snapmirror_policy_name_svm(self, share_server_id, backend_name): config = get_backend_configuration(backend_name) return (config.netapp_snapmirror_policy_name_svm_template % {'share_server_id': share_server_id.replace('-', '_')}) def get_vserver_from_share_server(self, share_server): backend_details = share_server.get('backend_details') if backend_details: return backend_details.get('vserver_name') def get_vserver_from_share(self, share_obj): share_server = share_obj.get('share_server') if share_server: return self.get_vserver_from_share_server(share_server) def get_backend_name_and_config_obj(self, host): backend_name = share_utils.extract_host(host, level='backend_name') config = get_backend_configuration(backend_name) return backend_name, config def get_backend_info_for_share(self, share_obj): backend_name, config = self.get_backend_name_and_config_obj( share_obj['host']) vserver = (self.get_vserver_from_share(share_obj) or config.netapp_vserver) volume_name = self._get_backend_volume_name(config, share_obj) return volume_name, vserver, backend_name def get_client_and_vserver_name(self, share_server): destination_host = share_server.get('host') vserver = self.get_vserver_from_share_server(share_server) backend, __ = self.get_backend_name_and_config_obj(destination_host) client = get_client_for_backend(backend, vserver_name=vserver) return client, vserver def get_snapmirrors(self, source_share_obj, dest_share_obj): dest_volume_name, dest_vserver, dest_backend = ( self.get_backend_info_for_share(dest_share_obj)) dest_client = get_client_for_backend(dest_backend, vserver_name=dest_vserver) src_volume_name, src_vserver, __ = self.get_backend_info_for_share( source_share_obj) snapmirrors = dest_client.get_snapmirrors( source_vserver=src_vserver, dest_vserver=dest_vserver, source_volume=src_volume_name, dest_volume=dest_volume_name, desired_attributes=['relationship-status', 'mirror-state', 'schedule', 'source-vserver', 'source-volume', 'last-transfer-end-timestamp', 'last-transfer-size', 'last-transfer-error']) return snapmirrors def create_snapmirror(self, source_share_obj, dest_share_obj, relationship_type, mount=False): """Sets up a SnapMirror relationship between two volumes. 1. Create SnapMirror relationship. 2. Initialize data transfer asynchronously. 3. Mount destination volume if requested. """ dest_volume_name, dest_vserver, dest_backend = ( self.get_backend_info_for_share(dest_share_obj)) dest_client = get_client_for_backend(dest_backend, vserver_name=dest_vserver) src_volume_name, src_vserver, __ = self.get_backend_info_for_share( source_share_obj) # 1. Create SnapMirror relationship config = get_backend_configuration(dest_backend) schedule = config.netapp_snapmirror_schedule dest_client.create_snapmirror_vol(src_vserver, src_volume_name, dest_vserver, dest_volume_name, relationship_type, schedule=schedule) # 2. Initialize async transfer of the initial data dest_client.initialize_snapmirror_vol(src_vserver, src_volume_name, dest_vserver, dest_volume_name) # 3. Mount the destination volume and create a junction path if mount: replica_config = get_backend_configuration(dest_backend) self.wait_for_mount_replica( dest_client, dest_volume_name, timeout=replica_config.netapp_mount_replica_timeout) def delete_snapmirror(self, source_share_obj, dest_share_obj, release=True, relationship_info_only=False): """Ensures all information about a SnapMirror relationship is removed. 1. Abort snapmirror 2. Delete the snapmirror 3. Release snapmirror to cleanup snapmirror metadata and snapshots """ dest_volume_name, dest_vserver, dest_backend = ( self.get_backend_info_for_share(dest_share_obj)) dest_client = get_client_for_backend(dest_backend, vserver_name=dest_vserver) src_volume_name, src_vserver, src_backend = ( self.get_backend_info_for_share(source_share_obj)) # 1. Abort any ongoing transfers try: dest_client.abort_snapmirror_vol(src_vserver, src_volume_name, dest_vserver, dest_volume_name, clear_checkpoint=False) except netapp_api.NaApiError: # Snapmirror is already deleted pass # 2. Delete SnapMirror Relationship and cleanup destination snapshots try: dest_client.delete_snapmirror_vol(src_vserver, src_volume_name, dest_vserver, dest_volume_name) except netapp_api.NaApiError as e: with excutils.save_and_reraise_exception() as exc_context: if (e.code == netapp_api.EOBJECTNOTFOUND or e.code == netapp_api.ESOURCE_IS_DIFFERENT or "(entry doesn't exist)" in e.message): LOG.info('No snapmirror relationship to delete') exc_context.reraise = False if release: # If the source is unreachable, do not perform the release try: src_client = get_client_for_backend(src_backend, vserver_name=src_vserver) except Exception: src_client = None # 3. Cleanup SnapMirror relationship on source if src_client: src_config = get_backend_configuration(src_backend) release_timeout = ( src_config.netapp_snapmirror_release_timeout) self.wait_for_snapmirror_release_vol( src_vserver, dest_vserver, src_volume_name, dest_volume_name, relationship_info_only, src_client, timeout=release_timeout) def update_snapmirror(self, source_share_obj, dest_share_obj): """Schedule a snapmirror update to happen on the backend.""" dest_volume_name, dest_vserver, dest_backend = ( self.get_backend_info_for_share(dest_share_obj)) dest_client = get_client_for_backend(dest_backend, vserver_name=dest_vserver) src_volume_name, src_vserver, __ = self.get_backend_info_for_share( source_share_obj) # Update SnapMirror dest_client.update_snapmirror_vol(src_vserver, src_volume_name, dest_vserver, dest_volume_name) def quiesce_then_abort_svm(self, source_share_server, dest_share_server): source_client, source_vserver = self.get_client_and_vserver_name( source_share_server) dest_client, dest_vserver = self.get_client_and_vserver_name( dest_share_server) # 1. Attempt to quiesce, then abort dest_client.quiesce_snapmirror_svm(source_vserver, dest_vserver) dest_backend = share_utils.extract_host(dest_share_server['host'], level='backend_name') config = get_backend_configuration(dest_backend) retries = config.netapp_snapmirror_quiesce_timeout / 5 @utils.retry(retry_param=exception.ReplicationException, interval=5, retries=retries, backoff_rate=1) def wait_for_quiesced(): snapmirror = dest_client.get_snapmirrors_svm( source_vserver=source_vserver, dest_vserver=dest_vserver, desired_attributes=['relationship-status', 'mirror-state'] )[0] if snapmirror.get('relationship-status') not in ['quiesced', 'paused']: raise exception.ReplicationException( reason="Snapmirror relationship is not quiesced.") try: wait_for_quiesced() except exception.ReplicationException: dest_client.abort_snapmirror_svm(source_vserver, dest_vserver, clear_checkpoint=False) def quiesce_then_abort(self, source_share_obj, dest_share_obj, quiesce_wait_time=None): dest_volume, dest_vserver, dest_backend = ( self.get_backend_info_for_share(dest_share_obj)) dest_client = get_client_for_backend(dest_backend, vserver_name=dest_vserver) src_volume, src_vserver, __ = self.get_backend_info_for_share( source_share_obj) # 1. Attempt to quiesce, then abort dest_client.quiesce_snapmirror_vol(src_vserver, src_volume, dest_vserver, dest_volume) config = get_backend_configuration(dest_backend) timeout = ( quiesce_wait_time or config.netapp_snapmirror_quiesce_timeout) retries = int(timeout / 5) or 1 @utils.retry(retry_param=exception.ReplicationException, interval=5, retries=retries, backoff_rate=1) def wait_for_quiesced(): snapmirror = dest_client.get_snapmirrors( source_vserver=src_vserver, dest_vserver=dest_vserver, source_volume=src_volume, dest_volume=dest_volume, desired_attributes=['relationship-status', 'mirror-state'] )[0] if snapmirror.get('relationship-status') not in ['quiesced', 'paused']: raise exception.ReplicationException( reason="Snapmirror relationship is not quiesced.") try: wait_for_quiesced() except exception.ReplicationException: dest_client.abort_snapmirror_vol(src_vserver, src_volume, dest_vserver, dest_volume, clear_checkpoint=False) def break_snapmirror(self, source_share_obj, dest_share_obj, mount=True, quiesce_wait_time=None): """Breaks SnapMirror relationship. 1. Quiesce any ongoing snapmirror transfers 2. Wait until snapmirror finishes transfers and enters quiesced state 3. Break snapmirror 4. Mount the destination volume so it is exported as a share """ dest_volume_name, dest_vserver, dest_backend = ( self.get_backend_info_for_share(dest_share_obj)) dest_client = get_client_for_backend(dest_backend, vserver_name=dest_vserver) src_volume_name, src_vserver, __ = self.get_backend_info_for_share( source_share_obj) # 1. Attempt to quiesce, then abort self.quiesce_then_abort(source_share_obj, dest_share_obj, quiesce_wait_time=quiesce_wait_time) # 2. Break SnapMirror dest_client.break_snapmirror_vol(src_vserver, src_volume_name, dest_vserver, dest_volume_name) # 3. Mount the destination volume and create a junction path if mount: dest_client.mount_volume(dest_volume_name) def resync_snapmirror(self, source_share_obj, dest_share_obj): """Resync SnapMirror relationship. """ dest_volume_name, dest_vserver, dest_backend = ( self.get_backend_info_for_share(dest_share_obj)) dest_client = get_client_for_backend(dest_backend, vserver_name=dest_vserver) src_volume_name, src_vserver, __ = self.get_backend_info_for_share( source_share_obj) dest_client.resync_snapmirror_vol(src_vserver, src_volume_name, dest_vserver, dest_volume_name) def modify_snapmirror(self, source_share_obj, dest_share_obj, schedule=None): """Modify SnapMirror relationship: set schedule""" dest_volume_name, dest_vserver, dest_backend = ( self.get_backend_info_for_share(dest_share_obj)) dest_client = get_client_for_backend(dest_backend, vserver_name=dest_vserver) src_volume_name, src_vserver, __ = self.get_backend_info_for_share( source_share_obj) if schedule is None: config = get_backend_configuration(dest_backend) schedule = config.netapp_snapmirror_schedule dest_client.modify_snapmirror_vol(src_vserver, src_volume_name, dest_vserver, dest_volume_name, schedule=schedule) def resume_snapmirror(self, source_share_obj, dest_share_obj): """Resume SnapMirror relationship from a quiesced state.""" dest_volume_name, dest_vserver, dest_backend = ( self.get_backend_info_for_share(dest_share_obj)) dest_client = get_client_for_backend(dest_backend, vserver_name=dest_vserver) src_volume_name, src_vserver, __ = self.get_backend_info_for_share( source_share_obj) dest_client.resume_snapmirror_vol(src_vserver, src_volume_name, dest_vserver, dest_volume_name) def change_snapmirror_source(self, replica, orig_source_replica, new_source_replica, replica_list, is_flexgroup=False): """Creates SnapMirror relationship from the new source to destination. 1. Delete all snapmirrors involving the replica, but maintain snapmirror metadata and snapshots for efficiency 2. For DHSS=True scenarios, creates a new vserver peer relationship if it does not exists 3. Ensure a new source -> replica snapmirror exists 4. Resync new source -> replica snapmirror relationship """ replica_volume_name, replica_vserver, replica_backend = ( self.get_backend_info_for_share(replica)) replica_client = get_client_for_backend(replica_backend, vserver_name=replica_vserver) new_src_volume_name, new_src_vserver, new_src_backend = ( self.get_backend_info_for_share(new_source_replica)) # 1. delete for other_replica in replica_list: if other_replica['id'] == replica['id']: continue # deletes all snapmirror relationships involving this replica to # ensure new relation can be set. For efficient snapmirror, it # does not remove the snapshots, only releasing the relationship # info if FlexGroup volume. self.delete_snapmirror(other_replica, replica, release=is_flexgroup, relationship_info_only=is_flexgroup) self.delete_snapmirror(replica, other_replica, release=is_flexgroup, relationship_info_only=is_flexgroup) # 2. vserver operations when driver handles share servers replica_config = get_backend_configuration(replica_backend) if (replica_config.driver_handles_share_servers and replica_vserver != new_src_vserver): # create vserver peering if does not exists if not replica_client.get_vserver_peers(replica_vserver, new_src_vserver): new_src_client = get_client_for_backend( new_src_backend, vserver_name=new_src_vserver) # Cluster name is needed for setting up the vserver peering new_src_cluster_name = new_src_client.get_cluster_name() replica_cluster_name = replica_client.get_cluster_name() replica_client.create_vserver_peer( replica_vserver, new_src_vserver, peer_cluster_name=new_src_cluster_name) if new_src_cluster_name != replica_cluster_name: new_src_client.accept_vserver_peer(new_src_vserver, replica_vserver) # 3. create relationship_type = na_utils.get_relationship_type(is_flexgroup) schedule = replica_config.netapp_snapmirror_schedule replica_client.create_snapmirror_vol(new_src_vserver, new_src_volume_name, replica_vserver, replica_volume_name, relationship_type, schedule=schedule) # 4. resync replica_client.resync_snapmirror_vol(new_src_vserver, new_src_volume_name, replica_vserver, replica_volume_name) @na_utils.trace def remove_qos_on_old_active_replica(self, orig_active_replica): old_active_replica_qos_policy = ( self._get_backend_qos_policy_group_name(orig_active_replica) ) replica_volume_name, replica_vserver, replica_backend = ( self.get_backend_info_for_share(orig_active_replica)) replica_client = get_client_for_backend( replica_backend, vserver_name=replica_vserver) try: replica_client.set_qos_policy_group_for_volume( replica_volume_name, 'none') replica_client.mark_qos_policy_group_for_deletion( old_active_replica_qos_policy) except exception.StorageCommunicationException: LOG.exception("Could not communicate with the backend " "for replica %s to unset QoS policy and mark " "the QoS policy group for deletion.", orig_active_replica['id']) def create_snapmirror_svm(self, source_share_server, dest_share_server): """Sets up a SnapMirror relationship between two vServers. 1. Create a SnapMirror policy for SVM DR 2. Create SnapMirror relationship 3. Initialize data transfer asynchronously """ dest_client, dest_vserver = self.get_client_and_vserver_name( dest_share_server) src_vserver = self.get_vserver_from_share_server(source_share_server) # 1: Create SnapMirror policy for SVM DR dest_backend_name = share_utils.extract_host(dest_share_server['host'], level='backend_name') policy_name = self._get_backend_snapmirror_policy_name_svm( dest_share_server['id'], dest_backend_name, ) dest_client.create_snapmirror_policy(policy_name) # 2. Create SnapMirror relationship dest_client.create_snapmirror_svm(src_vserver, dest_vserver, policy=policy_name, schedule='hourly') # 2. Initialize async transfer of the initial data dest_client.initialize_snapmirror_svm(src_vserver, dest_vserver) def get_snapmirrors_svm(self, source_share_server, dest_share_server): """Get SnapMirrors between two vServers.""" dest_client, dest_vserver = self.get_client_and_vserver_name( dest_share_server) src_vserver = self.get_vserver_from_share_server(source_share_server) snapmirrors = dest_client.get_snapmirrors_svm( source_vserver=src_vserver, dest_vserver=dest_vserver, desired_attributes=['relationship-status', 'mirror-state', 'last-transfer-end-timestamp']) return snapmirrors def get_snapmirror_destinations_svm(self, source_share_server, dest_share_server): """Get SnapMirrors between two vServers.""" dest_client, dest_vserver = self.get_client_and_vserver_name( dest_share_server) src_vserver = self.get_vserver_from_share_server(source_share_server) snapmirrors = dest_client.get_snapmirror_destinations_svm( source_vserver=src_vserver, dest_vserver=dest_vserver) return snapmirrors def update_snapmirror_svm(self, source_share_server, dest_share_server): """Schedule a SnapMirror update to happen on the backend.""" dest_client, dest_vserver = self.get_client_and_vserver_name( dest_share_server) src_vserver = self.get_vserver_from_share_server(source_share_server) # Update SnapMirror dest_client.update_snapmirror_svm(src_vserver, dest_vserver) def quiesce_and_break_snapmirror_svm(self, source_share_server, dest_share_server): """Abort and break a SnapMirror relationship between vServers. 1. Quiesce SnapMirror 2. Break SnapMirror """ dest_client, dest_vserver = self.get_client_and_vserver_name( dest_share_server) src_vserver = self.get_vserver_from_share_server(source_share_server) # 1. Attempt to quiesce, then abort self.quiesce_then_abort_svm(source_share_server, dest_share_server) # 2. Break SnapMirror dest_client.break_snapmirror_svm(src_vserver, dest_vserver) def cancel_snapmirror_svm(self, source_share_server, dest_share_server): """Cancels SnapMirror relationship between vServers.""" dest_backend = share_utils.extract_host(dest_share_server['host'], level='backend_name') dest_config = get_backend_configuration(dest_backend) server_timeout = ( dest_config.netapp_server_migration_state_change_timeout) dest_client, dest_vserver = self.get_client_and_vserver_name( dest_share_server) snapmirrors = self.get_snapmirrors_svm(source_share_server, dest_share_server) if snapmirrors: # 1. Attempt to quiesce and break snapmirror self.quiesce_and_break_snapmirror_svm(source_share_server, dest_share_server) # NOTE(dviroel): Lets wait until the destination vserver be # promoted to 'default' and state 'running', before starting # shutting down the source self.wait_for_vserver_state(dest_vserver, dest_client, subtype='default', state='running', operational_state='stopped', timeout=server_timeout) # 2. Delete SnapMirror self.delete_snapmirror_svm(source_share_server, dest_share_server) else: dest_info = dest_client.get_vserver_info(dest_vserver) if dest_info is None: # NOTE(dviroel): Nothing to cancel since the destination does # not exist. return if dest_info.get('subtype') == 'dp_destination': # NOTE(dviroel): Can be a corner case where no snapmirror # relationship was found but the destination vserver is stuck # in DP mode. We need to convert it to 'default' to release # its resources later. self.convert_svm_to_default_subtype(dest_vserver, dest_client, timeout=server_timeout) def convert_svm_to_default_subtype(self, vserver_name, client, is_dest_path=True, timeout=300): interval = 10 retries = (timeout / interval or 1) @utils.retry(retry_param=exception.VserverNotReady, interval=interval, retries=retries, backoff_rate=1) def wait_for_state(): vserver_info = client.get_vserver_info(vserver_name) if vserver_info.get('subtype') != 'default': if is_dest_path: client.break_snapmirror_svm(dest_vserver=vserver_name) else: client.break_snapmirror_svm(source_vserver=vserver_name) raise exception.VserverNotReady(vserver=vserver_name) try: wait_for_state() except exception.VserverNotReady: msg = _("Vserver %s did not reach the expected state. Retries " "exhausted. Aborting.") % vserver_name raise exception.NetAppException(message=msg) def delete_snapmirror_svm(self, src_share_server, dest_share_server, release=True): """Ensures all information about a SnapMirror relationship is removed. 1. Abort SnapMirror 2. Delete the SnapMirror 3. Release SnapMirror to cleanup SnapMirror metadata and snapshots """ src_client, src_vserver = self.get_client_and_vserver_name( src_share_server) dest_client, dest_vserver = self.get_client_and_vserver_name( dest_share_server) # 1. Abort any ongoing transfers try: dest_client.abort_snapmirror_svm(src_vserver, dest_vserver) except netapp_api.NaApiError: # SnapMirror is already deleted pass # 2. Delete SnapMirror Relationship and cleanup destination snapshots try: dest_client.delete_snapmirror_svm(src_vserver, dest_vserver) except netapp_api.NaApiError as e: with excutils.save_and_reraise_exception() as exc_context: if (e.code == netapp_api.EOBJECTNOTFOUND or e.code == netapp_api.ESOURCE_IS_DIFFERENT or "(entry doesn't exist)" in e.message): LOG.info('No snapmirror relationship to delete') exc_context.reraise = False # 3. Release SnapMirror if release: src_backend = share_utils.extract_host(src_share_server['host'], level='backend_name') src_config = get_backend_configuration(src_backend) release_timeout = ( src_config.netapp_snapmirror_release_timeout) self.wait_for_snapmirror_release_svm(src_vserver, dest_vserver, src_client, timeout=release_timeout) def wait_for_vserver_state(self, vserver_name, client, state=None, operational_state=None, subtype=None, timeout=300): interval = 10 retries = (timeout / interval or 1) expected = {} if state: expected['state'] = state if operational_state: expected['operational_state'] = operational_state if subtype: expected['subtype'] = subtype @utils.retry(retry_param=exception.VserverNotReady, interval=interval, retries=retries, backoff_rate=1) def wait_for_state(): vserver_info = client.get_vserver_info(vserver_name) if not all(item in vserver_info.items() for item in expected.items()): raise exception.VserverNotReady(vserver=vserver_name) try: wait_for_state() except exception.VserverNotReady: msg = _("Vserver %s did not reach the expected state. Retries " "exhausted. Aborting.") % vserver_name raise exception.NetAppException(message=msg) def wait_for_snapmirror_release_svm(self, source_vserver, dest_vserver, src_client, timeout=300): interval = 10 retries = (timeout / interval or 1) @utils.retry(retry_param=exception.NetAppException, interval=interval, retries=retries, backoff_rate=1) def release_snapmirror(): snapmirrors = src_client.get_snapmirror_destinations_svm( source_vserver=source_vserver, dest_vserver=dest_vserver) if not snapmirrors: LOG.debug("No snapmirrors to be released in source location.") else: try: src_client.release_snapmirror_svm(source_vserver, dest_vserver) except netapp_api.NaApiError as e: if (e.code == netapp_api.EOBJECTNOTFOUND or e.code == netapp_api.ESOURCE_IS_DIFFERENT or "(entry doesn't exist)" in e.message): LOG.debug('Snapmirror relationship does not exists ' 'anymore.') msg = _('Snapmirror release sent to source vserver. We will ' 'wait for it to be released.') raise exception.NetAppException(vserver=msg) try: release_snapmirror() except exception.NetAppException: msg = _("Unable to release the snapmirror from source vserver %s. " "Retries exhausted. Aborting") % source_vserver raise exception.NetAppException(message=msg) def wait_for_mount_replica(self, vserver_client, share_name, timeout=300): """Mount a replica share that is waiting for snapmirror initialize.""" interval = 10 retries = (timeout // interval or 1) @utils.retry(exception.ShareBusyException, interval=interval, retries=retries, backoff_rate=1) def try_mount_volume(): try: vserver_client.mount_volume(share_name) except netapp_api.NaApiError as e: undergoing_snap_init = 'snapmirror initialize' msg_args = {'name': share_name} if (e.code == netapp_api.EAPIERROR and undergoing_snap_init in e.message): msg = _('The share %(name)s is undergoing a snapmirror ' 'initialize. Will retry the operation.') % msg_args LOG.warning(msg) raise exception.ShareBusyException(reason=msg) else: msg = _("Unable to perform mount operation for the share " "%(name)s. Caught an unexpected error. Not " "retrying.") % msg_args raise exception.NetAppException(message=msg) try: try_mount_volume() except exception.ShareBusyException: msg_args = {'name': share_name} msg = _("Unable to perform mount operation for the share %(name)s " "because a snapmirror initialize operation is still in " "progress. Retries exhausted. Not retrying.") % msg_args raise exception.NetAppException(message=msg) def wait_for_snapmirror_release_vol(self, src_vserver, dest_vserver, src_volume_name, dest_volume_name, relationship_info_only, src_client, timeout=300): interval = 10 retries = (timeout / interval or 1) @utils.retry(exception.NetAppException, interval=interval, retries=retries, backoff_rate=1) def release_snapmirror(): snapmirrors = src_client.get_snapmirror_destinations( source_vserver=src_vserver, dest_vserver=dest_vserver, source_volume=src_volume_name, dest_volume=dest_volume_name) if not snapmirrors: LOG.debug("No snapmirrors to be released in source volume.") else: try: src_client.release_snapmirror_vol( src_vserver, src_volume_name, dest_vserver, dest_volume_name, relationship_info_only=relationship_info_only) except netapp_api.NaApiError as e: if (e.code == netapp_api.EOBJECTNOTFOUND or e.code == netapp_api.ESOURCE_IS_DIFFERENT or "(entry doesn't exist)" in e.message): LOG.debug('Snapmirror relationship does not exist ' 'anymore.') msg = _('Snapmirror release sent to source volume. Waiting ' 'until it has been released.') raise exception.NetAppException(vserver=msg) try: release_snapmirror() except exception.NetAppException: msg = _("Unable to release the snapmirror from source volume %s. " "Retries exhausted. Aborting") % src_volume_name raise exception.NetAppException(message=msg) def cleanup_previous_snapmirror_relationships(self, replica, replica_list): """Cleanup previous snapmirrors relationships for replica.""" LOG.debug("Cleaning up old snapmirror relationships for replica %s.", replica['id']) src_vol_name, src_vserver, src_backend = ( self.get_backend_info_for_share(replica)) src_client = get_client_for_backend(src_backend, vserver_name=src_vserver) # replica_list may contain the replica we are trying to clean up destinations = (r for r in replica_list if r['id'] != replica['id']) for destination in destinations: dest_vol_name, dest_vserver, _ = ( self.get_backend_info_for_share(destination)) try: src_client.release_snapmirror_vol( src_vserver, src_vol_name, dest_vserver, dest_vol_name) except netapp_api.NaApiError as e: if (e.code == netapp_api.EOBJECTNOTFOUND or e.code == netapp_api.ESOURCE_IS_DIFFERENT or "(entry doesn't exist)" in e.message): LOG.debug( 'Snapmirror destination %s no longer exists for ' 'replica %s.', destination['id'], replica['id']) else: LOG.exception( 'Error releasing snapmirror destination %s for ' 'replica %s.', destination['id'], replica['id']) def get_most_available_aggr_of_vserver(self, vserver_client): """Get most available aggregate""" aggrs_space_attr = vserver_client.get_vserver_aggregate_capacities() if not aggrs_space_attr: return None aggr_list = list(aggrs_space_attr.keys()) most_available_aggr = aggr_list[0] for aggr in aggr_list: if (aggrs_space_attr.get(aggr).get('available') > aggrs_space_attr.get( most_available_aggr).get('available')): most_available_aggr = aggr return most_available_aggr def initialize_and_wait_snapmirror_vol(self, vserver_client, source_vserver, source_volume, dest_vserver, dest_volume, source_snapshot=None, transfer_priority=None, timeout=300): """Initialize and wait for SnapMirror relationship""" interval = 10 retries = (timeout / interval or 1) vserver_client.initialize_snapmirror_vol( source_vserver, source_volume, dest_vserver, dest_volume, source_snapshot=source_snapshot, transfer_priority=transfer_priority, ) @utils.retry(exception.NetAppException, interval=interval, retries=retries, backoff_rate=1) def wait_for_initialization(): source_path = f"{source_vserver}:{source_volume}" des_path = f"{dest_vserver}:{dest_volume}" snapmirror_info = vserver_client.get_snapmirrors( source_path=source_path, dest_path=des_path) relationship_status = snapmirror_info[0].get("relationship-status") if relationship_status == "idle": return else: msg = (_('Snapmirror relationship status is: %s. Waiting ' 'until it has been initialized.') % relationship_status) raise exception.NetAppException(message=msg) try: wait_for_initialization() except exception.NetAppException: msg = _("Timed out while wait for SnapMirror relationship to " "be initialized") raise exception.NetAppException(message=msg) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/netapp/dataontap/cluster_mode/drv_multi_svm.py0000664000175000017500000004752200000000000030304 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Clinton Knight. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ NetApp Data ONTAP cDOT multi-SVM storage driver. This driver requires a Data ONTAP (Cluster-mode) storage system with installed CIFS and/or NFS licenses, as well as a FlexClone license. This driver manages share servers, meaning it creates Data ONTAP storage virtual machines (i.e. 'vservers') for each share network for provisioning shares. This driver supports NFS & CIFS protocols. """ from manila.share import driver from manila.share.drivers.netapp.dataontap.cluster_mode import lib_multi_svm class NetAppCmodeMultiSvmShareDriver(driver.ShareDriver): """NetApp Cluster-mode multi-SVM share driver.""" DRIVER_NAME = 'NetApp_Cluster_MultiSVM' def __init__(self, *args, **kwargs): super(NetAppCmodeMultiSvmShareDriver, self).__init__( True, *args, **kwargs) self.library = lib_multi_svm.NetAppCmodeMultiSVMFileStorageLibrary( self.DRIVER_NAME, **kwargs) # NetApp driver supports updating security service for in use share # networks. self.security_service_update_support = True self.dhss_mandatory_security_service_association = { 'nfs': None, 'cifs': ['active_directory', ] } # NetApp driver supports multiple subnets including update existing # share servers. self.network_allocation_update_support = True self.share_replicas_migration_support = True # NetApp driver supports share server encryption and enables encryption # on the created share. self.encryption_support = ["share_server"] def do_setup(self, context): self.library.do_setup(context) def check_for_setup_error(self): self.library.check_for_setup_error() def get_pool(self, share): return self.library.get_pool(share) def create_share(self, context, share, **kwargs): return self.library.create_share(context, share, **kwargs) def create_share_from_snapshot(self, context, share, snapshot, **kwargs): return self.library.create_share_from_snapshot(context, share, snapshot, **kwargs) def create_snapshot(self, context, snapshot, **kwargs): return self.library.create_snapshot(context, snapshot, **kwargs) def revert_to_snapshot(self, context, snapshot, share_access_rules, snapshot_access_rules, **kwargs): return self.library.revert_to_snapshot(context, snapshot, **kwargs) def delete_share(self, context, share, **kwargs): self.library.delete_share(context, share, **kwargs) def delete_snapshot(self, context, snapshot, **kwargs): self.library.delete_snapshot(context, snapshot, **kwargs) def extend_share(self, share, new_size, **kwargs): self.library.extend_share(share, new_size, **kwargs) def shrink_share(self, share, new_size, **kwargs): self.library.shrink_share(share, new_size, **kwargs) def manage_existing(self, share, driver_options): raise NotImplementedError def unmanage(self, share): raise NotImplementedError def manage_existing_snapshot(self, snapshot, driver_options): raise NotImplementedError def unmanage_snapshot(self, snapshot): raise NotImplementedError def manage_existing_with_server( self, share, driver_options, share_server=None): return self.library.manage_existing( share, driver_options, share_server=share_server) def unmanage_with_server(self, share, share_server=None): self.library.unmanage(share, share_server=share_server) def manage_existing_snapshot_with_server( self, snapshot, driver_options, share_server=None): return self.library.manage_existing_snapshot( snapshot, driver_options, share_server=share_server) def unmanage_snapshot_with_server(self, snapshot, share_server=None): self.library.unmanage_snapshot(snapshot, share_server=share_server) def update_access(self, context, share, access_rules, add_rules, delete_rules, update_rules, **kwargs): self.library.update_access(context, share, access_rules, add_rules, delete_rules, update_rules, **kwargs) def _update_share_stats(self, data=None): data = self.library.get_share_stats( get_filter_function=self.get_filter_function, goodness_function=self.get_goodness_function()) super(NetAppCmodeMultiSvmShareDriver, self)._update_share_stats( data=data) def get_default_filter_function(self, pool=None): return self.library.get_default_filter_function(pool=pool) def get_default_goodness_function(self): return self.library.get_default_goodness_function() def get_share_server_pools(self, share_server): return self.library.get_share_server_pools(share_server) def get_network_allocations_number(self): return self.library.get_network_allocations_number() def get_admin_network_allocations_number(self): return self.library.get_admin_network_allocations_number( self.admin_network_api) def _setup_server(self, network_info, metadata=None): return self.library.setup_server(network_info, metadata) def _teardown_server(self, server_details, **kwargs): self.library.teardown_server(server_details, **kwargs) def create_replica(self, context, replica_list, new_replica, access_rules, replica_snapshots, **kwargs): return self.library.create_replica(context, replica_list, new_replica, access_rules, replica_snapshots) def delete_replica(self, context, replica_list, replica_snapshots, replica, **kwargs): self.library.delete_replica(context, replica_list, replica, replica_snapshots) def promote_replica(self, context, replica_list, replica, access_rules, share_server=None, quiesce_wait_time=None): return self.library.promote_replica( context, replica_list, replica, access_rules, share_server=share_server, quiesce_wait_time=quiesce_wait_time) def update_replica_state(self, context, replica_list, replica, access_rules, replica_snapshots, share_server=None): return self.library.update_replica_state(context, replica_list, replica, access_rules, replica_snapshots, share_server) def create_replicated_snapshot(self, context, replica_list, replica_snapshots, share_server=None): return self.library.create_replicated_snapshot( context, replica_list, replica_snapshots, share_server=share_server) def delete_replicated_snapshot(self, context, replica_list, replica_snapshots, share_server=None): return self.library.delete_replicated_snapshot( context, replica_list, replica_snapshots, share_server=share_server) def update_replicated_snapshot(self, context, replica_list, share_replica, replica_snapshots, replica_snapshot, share_server=None): return self.library.update_replicated_snapshot( replica_list, share_replica, replica_snapshots, replica_snapshot, share_server=share_server) def revert_to_replicated_snapshot(self, context, active_replica, replica_list, active_replica_snapshot, replica_snapshots, share_access_rules, snapshot_access_rules, **kwargs): return self.library.revert_to_replicated_snapshot( context, active_replica, replica_list, active_replica_snapshot, replica_snapshots, **kwargs) def migration_check_compatibility(self, context, source_share, destination_share, share_server=None, destination_share_server=None): return self.library.migration_check_compatibility( context, source_share, destination_share, share_server=share_server, destination_share_server=destination_share_server) def migration_start(self, context, source_share, destination_share, source_snapshots, snapshot_mappings, share_server=None, destination_share_server=None): return self.library.migration_start( context, source_share, destination_share, source_snapshots, snapshot_mappings, share_server=share_server, destination_share_server=destination_share_server) def migration_continue(self, context, source_share, destination_share, source_snapshots, snapshot_mappings, share_server=None, destination_share_server=None): return self.library.migration_continue( context, source_share, destination_share, source_snapshots, snapshot_mappings, share_server=share_server, destination_share_server=destination_share_server) def migration_get_progress(self, context, source_share, destination_share, source_snapshots, snapshot_mappings, share_server=None, destination_share_server=None): return self.library.migration_get_progress( context, source_share, destination_share, source_snapshots, snapshot_mappings, share_server=share_server, destination_share_server=destination_share_server) def migration_cancel(self, context, source_share, destination_share, source_snapshots, snapshot_mappings, share_server=None, destination_share_server=None): return self.library.migration_cancel( context, source_share, destination_share, source_snapshots, snapshot_mappings, share_server=share_server, destination_share_server=destination_share_server) def migration_complete(self, context, source_share, destination_share, source_snapshots, snapshot_mappings, share_server=None, destination_share_server=None): return self.library.migration_complete( context, source_share, destination_share, source_snapshots, snapshot_mappings, share_server=share_server, destination_share_server=destination_share_server) def create_share_group_snapshot(self, context, snap_dict, share_server=None): fallback_create = super(NetAppCmodeMultiSvmShareDriver, self).create_share_group_snapshot return self.library.create_group_snapshot(context, snap_dict, fallback_create, share_server) def delete_share_group_snapshot(self, context, snap_dict, share_server=None): fallback_delete = super(NetAppCmodeMultiSvmShareDriver, self).delete_share_group_snapshot return self.library.delete_group_snapshot(context, snap_dict, fallback_delete, share_server) def create_share_group_from_share_group_snapshot( self, context, share_group_dict, snapshot_dict, share_server=None): fallback_create = super( NetAppCmodeMultiSvmShareDriver, self).create_share_group_from_share_group_snapshot return self.library.create_group_from_snapshot(context, share_group_dict, snapshot_dict, fallback_create, share_server) def get_configured_ip_versions(self): return self.library.get_configured_ip_versions() def get_backend_info(self, context): return self.library.get_backend_info(context) def ensure_shares(self, context, shares): return self.library.ensure_shares(context, shares) def get_share_server_network_info( self, context, share_server, identifier, driver_options): return self.library.get_share_server_network_info( context, share_server, identifier, driver_options) def manage_server(self, context, share_server, identifier, driver_options): return self.library.manage_server( context, share_server, identifier, driver_options) def unmanage_server(self, server_details, security_services=None): return self.library.unmanage_server(server_details, security_services) def get_share_status(self, share_instance, share_server=None): return self.library.get_share_status(share_instance, share_server) def share_server_migration_check_compatibility( self, context, share_server, dest_host, old_share_network, new_share_network, shares_request_spec): return self.library.share_server_migration_check_compatibility( context, share_server, dest_host, old_share_network, new_share_network, shares_request_spec) def share_server_migration_start(self, context, src_share_server, dest_share_server, shares, snapshots): return self.library.share_server_migration_start( context, src_share_server, dest_share_server, shares, snapshots) def share_server_migration_continue(self, context, src_share_server, dest_share_server, shares, snapshots): return self.library.share_server_migration_continue( context, src_share_server, dest_share_server, shares, snapshots) def share_server_migration_complete(self, context, src_share_server, dest_share_server, shares, snapshots, new_network_info): return self.library.share_server_migration_complete( context, src_share_server, dest_share_server, shares, snapshots, new_network_info) def share_server_migration_cancel(self, context, src_share_server, dest_share_server, shares, snapshots): self.library.share_server_migration_cancel( context, src_share_server, dest_share_server, shares, snapshots) def choose_share_server_compatible_with_share(self, context, share_servers, share, snapshot=None, share_group=None, encryption_key_ref=None): return self.library.choose_share_server_compatible_with_share( context, share_servers, share, snapshot=snapshot, share_group=share_group, encryption_key_ref=encryption_key_ref) def choose_share_server_compatible_with_share_group( self, context, share_servers, share_group_ref, share_group_snapshot=None): return self.library.choose_share_server_compatible_with_share_group( context, share_servers, share_group_ref, share_group_snapshot=share_group_snapshot) def share_server_migration_get_progress(self, context, src_share_server, dest_share_server, shares, snapshots): return self.library.share_server_migration_get_progress( context, src_share_server, dest_share_server, shares, snapshots) def update_share_server_security_service( self, context, share_server, network_info, share_instances, share_instance_rules, new_security_service, current_security_service=None): return self.library.update_share_server_security_service( context, share_server, network_info, new_security_service, current_security_service=current_security_service) def check_update_share_server_security_service( self, context, share_server, network_info, share_instances, share_instance_rules, new_security_service, current_security_service=None): return self.library.check_update_share_server_security_service( context, share_server, network_info, new_security_service, current_security_service=current_security_service) def check_update_share_server_network_allocations( self, context, share_server, current_network_allocations, new_share_network_subnet, security_services, share_instances, share_instances_rules): return self.library.check_update_share_server_network_allocations( context, share_server, current_network_allocations, new_share_network_subnet, security_services, share_instances, share_instances_rules) def update_share_server_network_allocations( self, context, share_server, current_network_allocations, new_network_allocations, security_services, shares, snapshots): return self.library.update_share_server_network_allocations( context, share_server, current_network_allocations, new_network_allocations, security_services, shares, snapshots) def create_backup(self, context, share, backup, **kwargs): return self.library.create_backup(context, share, backup, **kwargs) def create_backup_continue(self, context, share, backup, **kwargs): return self.library.create_backup_continue(context, share, backup, **kwargs) def restore_backup(self, context, backup, share, **kwargs): return self.library.restore_backup(context, backup, share, **kwargs) def restore_backup_continue(self, context, backup, share, **kwargs): return self.library.restore_backup_continue(context, backup, share, **kwargs) def delete_backup(self, context, backup, share, **kwargs): return self.library.delete_backup(context, backup, share, **kwargs) def update_share_from_metadata(self, context, share, metadata, share_server=None): self.library.update_share_from_metadata( context, share, metadata, share_server=share_server) def update_share_network_subnet_from_metadata(self, context, share_network, share_network_subnet, share_server, metadata): self.library.update_share_network_subnet_from_metadata( context, share_network, share_network_subnet, share_server, metadata) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/netapp/dataontap/cluster_mode/drv_single_svm.py0000664000175000017500000004216300000000000030427 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Clinton Knight. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ NetApp Data ONTAP cDOT single-SVM storage driver. This driver requires a Data ONTAP (Cluster-mode) storage system with installed CIFS and/or NFS licenses, as well as a FlexClone license. This driver does not manage share servers, meaning it uses a single Data ONTAP storage virtual machine (i.e. 'vserver') as defined in manila.conf to provision shares. This driver supports NFS & CIFS protocols. """ from manila.share import driver from manila.share.drivers.netapp.dataontap.cluster_mode import lib_single_svm class NetAppCmodeSingleSvmShareDriver(driver.ShareDriver): """NetApp Cluster-mode single-SVM share driver.""" DRIVER_NAME = 'NetApp_Cluster_SingleSVM' def __init__(self, *args, **kwargs): super(NetAppCmodeSingleSvmShareDriver, self).__init__( False, *args, **kwargs) self.library = lib_single_svm.NetAppCmodeSingleSVMFileStorageLibrary( self.DRIVER_NAME, **kwargs) self.dhss_mandatory_security_service_association = {} def do_setup(self, context): self.library.do_setup(context) def check_for_setup_error(self): self.library.check_for_setup_error() def get_pool(self, share): return self.library.get_pool(share) def create_share(self, context, share, **kwargs): return self.library.create_share(context, share, **kwargs) def create_share_from_snapshot(self, context, share, snapshot, **kwargs): return self.library.create_share_from_snapshot(context, share, snapshot, **kwargs) def create_snapshot(self, context, snapshot, **kwargs): return self.library.create_snapshot(context, snapshot, **kwargs) def revert_to_snapshot(self, context, snapshot, share_access_rules, snapshot_access_rules, **kwargs): return self.library.revert_to_snapshot(context, snapshot, **kwargs) def delete_share(self, context, share, **kwargs): self.library.delete_share(context, share, **kwargs) def delete_snapshot(self, context, snapshot, **kwargs): self.library.delete_snapshot(context, snapshot, **kwargs) def extend_share(self, share, new_size, **kwargs): self.library.extend_share(share, new_size, **kwargs) def shrink_share(self, share, new_size, **kwargs): self.library.shrink_share(share, new_size, **kwargs) def manage_existing(self, share, driver_options): return self.library.manage_existing(share, driver_options) def unmanage(self, share): self.library.unmanage(share) def manage_existing_snapshot(self, snapshot, driver_options): return self.library.manage_existing_snapshot(snapshot, driver_options) def unmanage_snapshot(self, snapshot): self.library.unmanage_snapshot(snapshot) def manage_existing_with_server( self, share, driver_options, share_server=None): raise NotImplementedError def unmanage_with_server(self, share, share_server=None): raise NotImplementedError def manage_existing_snapshot_with_server( self, snapshot, driver_options, share_server=None): raise NotImplementedError def unmanage_snapshot_with_server(self, snapshot, share_server=None): raise NotImplementedError def update_access(self, context, share, access_rules, add_rules, delete_rules, update_rules, **kwargs): self.library.update_access(context, share, access_rules, add_rules, delete_rules, update_rules, **kwargs) def _update_share_stats(self, data=None): data = self.library.get_share_stats( get_filter_function=self.get_filter_function, goodness_function=self.get_goodness_function()) super(NetAppCmodeSingleSvmShareDriver, self)._update_share_stats( data=data) def get_default_filter_function(self, pool=None): return self.library.get_default_filter_function(pool=pool) def get_default_goodness_function(self): return self.library.get_default_goodness_function() def get_share_server_pools(self, share_server): return self.library.get_share_server_pools(share_server) def get_network_allocations_number(self): return self.library.get_network_allocations_number() def get_admin_network_allocations_number(self): return self.library.get_admin_network_allocations_number() def _setup_server(self, network_info, metadata=None): return self.library.setup_server(network_info, metadata) def _teardown_server(self, server_details, **kwargs): self.library.teardown_server(server_details, **kwargs) def create_replica(self, context, replica_list, replica, access_rules, replica_snapshots, **kwargs): return self.library.create_replica(context, replica_list, replica, access_rules, replica_snapshots, **kwargs) def delete_replica(self, context, replica_list, replica_snapshots, replica, **kwargs): self.library.delete_replica(context, replica_list, replica, replica_snapshots, **kwargs) def promote_replica(self, context, replica_list, replica, access_rules, share_server=None, quiesce_wait_time=None): return self.library.promote_replica( context, replica_list, replica, access_rules, share_server=share_server, quiesce_wait_time=quiesce_wait_time) def update_replica_state(self, context, replica_list, replica, access_rules, replica_snapshots, share_server=None): return self.library.update_replica_state(context, replica_list, replica, access_rules, replica_snapshots, share_server=share_server) def create_replicated_snapshot(self, context, replica_list, replica_snapshots, share_server=None): return self.library.create_replicated_snapshot( context, replica_list, replica_snapshots, share_server=share_server) def delete_replicated_snapshot(self, context, replica_list, replica_snapshots, share_server=None): return self.library.delete_replicated_snapshot( context, replica_list, replica_snapshots, share_server=share_server) def update_replicated_snapshot(self, context, replica_list, share_replica, replica_snapshots, replica_snapshot, share_server=None): return self.library.update_replicated_snapshot( replica_list, share_replica, replica_snapshots, replica_snapshot, share_server=share_server) def revert_to_replicated_snapshot(self, context, active_replica, replica_list, active_replica_snapshot, replica_snapshots, share_access_rules, snapshot_access_rules, **kwargs): return self.library.revert_to_replicated_snapshot( context, active_replica, replica_list, active_replica_snapshot, replica_snapshots, **kwargs) def migration_check_compatibility(self, context, source_share, destination_share, share_server=None, destination_share_server=None): return self.library.migration_check_compatibility( context, source_share, destination_share, share_server=share_server, destination_share_server=destination_share_server) def migration_start(self, context, source_share, destination_share, source_snapshots, snapshot_mappings, share_server=None, destination_share_server=None): return self.library.migration_start( context, source_share, destination_share, source_snapshots, snapshot_mappings, share_server=share_server, destination_share_server=destination_share_server) def migration_continue(self, context, source_share, destination_share, source_snapshots, snapshot_mappings, share_server=None, destination_share_server=None): return self.library.migration_continue( context, source_share, destination_share, source_snapshots, snapshot_mappings, share_server=share_server, destination_share_server=destination_share_server) def migration_get_progress(self, context, source_share, destination_share, source_snapshots, snapshot_mappings, share_server=None, destination_share_server=None): return self.library.migration_get_progress( context, source_share, destination_share, source_snapshots, snapshot_mappings, share_server=share_server, destination_share_server=destination_share_server) def migration_cancel(self, context, source_share, destination_share, source_snapshots, snapshot_mappings, share_server=None, destination_share_server=None): return self.library.migration_cancel( context, source_share, destination_share, source_snapshots, snapshot_mappings, share_server=share_server, destination_share_server=destination_share_server) def migration_complete(self, context, source_share, destination_share, source_snapshots, snapshot_mappings, share_server=None, destination_share_server=None): return self.library.migration_complete( context, source_share, destination_share, source_snapshots, snapshot_mappings, share_server=share_server, destination_share_server=destination_share_server) def create_share_group_snapshot(self, context, snap_dict, share_server=None): fallback_create = super(NetAppCmodeSingleSvmShareDriver, self).create_share_group_snapshot return self.library.create_group_snapshot(context, snap_dict, fallback_create, share_server) def delete_share_group_snapshot(self, context, snap_dict, share_server=None): fallback_delete = super(NetAppCmodeSingleSvmShareDriver, self).delete_share_group_snapshot return self.library.delete_group_snapshot(context, snap_dict, fallback_delete, share_server) def create_share_group_from_share_group_snapshot( self, context, share_group_dict, snapshot_dict, share_server=None): fallback_create = super( NetAppCmodeSingleSvmShareDriver, self).create_share_group_from_share_group_snapshot return self.library.create_group_from_snapshot(context, share_group_dict, snapshot_dict, fallback_create, share_server) def get_configured_ip_versions(self): return self.library.get_configured_ip_versions() def get_backend_info(self, context): return self.library.get_backend_info(context) def ensure_shares(self, context, shares): return self.library.ensure_shares(context, shares) def get_share_server_network_info( self, context, share_server, identifier, driver_options): raise NotImplementedError def manage_server(self, context, share_server, identifier, driver_options): raise NotImplementedError def unmanage_server(self, server_details, security_services=None): raise NotImplementedError def get_share_status(self, share_instance, share_server=None): return self.library.get_share_status(share_instance, share_server) def share_server_migration_start(self, context, src_share_server, dest_share_server, shares, snapshots): raise NotImplementedError def share_server_migration_continue(self, context, src_share_server, dest_share_server, shares, snapshots): raise NotImplementedError def share_server_migration_complete(self, context, src_share_server, dest_share_server, shares, snapshots, new_network_info): raise NotImplementedError def share_server_migration_cancel(self, context, src_share_server, dest_share_server, shares, snapshots): raise NotImplementedError def share_server_migration_check_compatibility( self, context, share_server, dest_host, old_share_network, new_share_network, shares_request_spec): raise NotImplementedError def share_server_migration_get_progress(self, context, src_share_server, dest_share_server): raise NotImplementedError def choose_share_server_compatible_with_share(self, context, share_servers, share, snapshot=None, share_group=None, encryption_key_ref=None): raise NotImplementedError def choose_share_server_compatible_with_share_group( self, context, share_servers, share_group_ref, share_group_snapshot=None): raise NotImplementedError def update_share_server_security_service( self, context, share_server, network_info, share_instances, share_instance_rules, new_security_service, current_security_service=None): raise NotImplementedError def check_update_share_server_security_service( self, context, share_server, network_info, share_instances, share_instance_rules, new_security_service, current_security_service=None): raise NotImplementedError def check_update_share_server_network_allocations( self, context, share_server, current_network_allocations, new_share_network_subnet, security_services, share_instances, share_instances_rules): raise NotImplementedError def update_share_server_network_allocations( self, context, share_server, current_network_allocations, new_network_allocations, security_services, shares, snapshots): raise NotImplementedError def create_backup(self, context, share, backup, **kwargs): return self.library.create_backup(context, share, backup, **kwargs) def create_backup_continue(self, context, share, backup, **kwargs): return self.library.create_backup_continue(context, share, backup, **kwargs) def restore_backup(self, context, backup, share, **kwargs): return self.library.restore_backup(context, backup, share, **kwargs) def restore_backup_continue(self, context, backup, share, **kwargs): return self.library.restore_backup_continue(context, backup, share, **kwargs) def delete_backup(self, context, backup, share, **kwargs): return self.library.delete_backup(context, backup, share, **kwargs) def update_share_from_metadata(self, context, share, metadata, share_server=None): self.library.update_share_from_metadata( context, share, metadata, share_server=share_server) def update_share_network_subnet_from_metadata(self, context, share_network, share_network_subnet, share_server, metadata): raise NotImplementedError ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/netapp/dataontap/cluster_mode/lib_base.py0000664000175000017500000074476300000000000027165 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Clinton Knight. All rights reserved. # Copyright (c) 2015 Tom Barron. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ NetApp Data ONTAP cDOT base storage driver library. This library is the abstract base for subclasses that complete the single-SVM or multi-SVM functionality needed by the cDOT Manila drivers. """ import copy import datetime from enum import Enum import json import math import re import socket from manila.exception import SnapshotResourceNotFound from oslo_config import cfg from oslo_log import log from oslo_service import loopingcall from oslo_utils import excutils from oslo_utils import timeutils from oslo_utils import units from oslo_utils import uuidutils from manila.common import constants from manila import coordination from manila import exception from manila.i18n import _ from manila.message import api as message_api from manila.share.drivers.netapp.dataontap.client import api as netapp_api from manila.share.drivers.netapp.dataontap.client import client_cmode from manila.share.drivers.netapp.dataontap.client import client_cmode_rest from manila.share.drivers.netapp.dataontap.client import rest_api as rest_api from manila.share.drivers.netapp.dataontap.cluster_mode import data_motion from manila.share.drivers.netapp.dataontap.cluster_mode import performance from manila.share.drivers.netapp.dataontap.protocols import cifs_cmode from manila.share.drivers.netapp.dataontap.protocols import nfs_cmode from manila.share.drivers.netapp import options as na_opts from manila.share.drivers.netapp import utils as na_utils from manila.share import share_types from manila.share import utils as share_utils from manila import utils as manila_utils LOG = log.getLogger(__name__) CONF = cfg.CONF class Backup(Enum): """Enum for share backup""" BACKUP_TYPE = "backup_type" BACKEND_NAME = "netapp_backup_backend_section_name" DES_VSERVER = "netapp_backup_vserver" DES_VOLUME = "netapp_backup_volume" SM_LABEL = "backup" DES_VSERVER_PREFIX = "backup" DES_VOLUME_PREFIX = "backup_volume" VOLUME_TYPE = "dp" SM_POLICY = "os_backup_policy" TOTAL_PROGRESS_HUNDRED = "100" TOTAL_PROGRESS_ZERO = "0" class NetAppCmodeFileStorageLibrary(object): AUTOSUPPORT_INTERVAL_SECONDS = 3600 # hourly SSC_UPDATE_INTERVAL_SECONDS = 3600 # hourly HOUSEKEEPING_INTERVAL_SECONDS = 600 # ten minutes SUPPORTED_PROTOCOLS = ('nfs', 'cifs') DEFAULT_FILTER_FUNCTION = 'capabilities.utilization < 70' DEFAULT_GOODNESS_FUNCTION = '100 - capabilities.utilization' DEFAULT_FLEXGROUP_FILTER_FUNCTION = 'share.size >= %s' # ONTAP requires 100G per FlexGroup member, since the driver is deploying # with default number of members (four), the min size is 400G. FLEXGROUP_MIN_SIZE_PER_AGGR = 400 # Internal states when dealing with data motion STATE_SPLITTING_VOLUME_CLONE = 'splitting_volume_clone' STATE_MOVING_VOLUME = 'moving_volume' STATE_SNAPMIRROR_DATA_COPYING = 'snapmirror_data_copying' # Maximum number of FPolicis per vServer FPOLICY_MAX_VSERVER_POLICIES = 10 # Maps NetApp qualified extra specs keys to corresponding backend API # client library argument keywords. When we expose more backend # capabilities here, we will add them to this map. BOOLEAN_QUALIFIED_EXTRA_SPECS_MAP = { 'netapp:thin_provisioned': 'thin_provisioned', 'netapp:dedup': 'dedup_enabled', 'netapp:compression': 'compression_enabled', 'netapp:split_clone_on_create': 'split', 'netapp:hide_snapdir': 'hide_snapdir', } STRING_QUALIFIED_EXTRA_SPECS_MAP = { 'netapp:snapshot_policy': 'snapshot_policy', 'netapp:language': 'language', 'netapp:max_files': 'max_files', 'netapp:max_files_multiplier': 'max_files_multiplier', 'netapp:adaptive_qos_policy_group': 'adaptive_qos_policy_group', 'netapp:fpolicy_extensions_to_include': 'fpolicy_extensions_to_include', 'netapp:fpolicy_extensions_to_exclude': 'fpolicy_extensions_to_exclude', 'netapp:fpolicy_file_operations': 'fpolicy_file_operations', 'netapp:efficiency_policy': 'efficiency_policy', 'netapp_snaplock_type': 'snaplock_type', 'netapp:snaplock_autocommit_period': 'snaplock_autocommit_period', 'netapp:snaplock_min_retention_period': 'snaplock_min_retention_period', 'netapp:snaplock_max_retention_period': 'snaplock_max_retention_period', 'netapp:snaplock_default_retention_period': 'snaplock_default_retention_period', } # Maps standard extra spec keys to legacy NetApp keys STANDARD_BOOLEAN_EXTRA_SPECS_MAP = { 'thin_provisioning': 'netapp:thin_provisioned', 'dedupe': 'netapp:dedup', 'compression': 'netapp:compression', } QOS_SPECS = { 'netapp:maxiops': 'maxiops', 'netapp:maxiopspergib': 'maxiopspergib', 'netapp:maxbps': 'maxbps', 'netapp:maxbpspergib': 'maxbpspergib', } HIDE_SNAPDIR_CFG_MAP = { 'visible': False, 'hidden': True, 'default': None, } SIZE_DEPENDENT_QOS_SPECS = {'maxiopspergib', 'maxbpspergib'} # Maps the NFS config used by share-servers NFS_CONFIG_EXTRA_SPECS_MAP = { 'netapp:tcp_max_xfer_size': 'tcp-max-xfer-size', 'netapp:udp_max_xfer_size': 'udp-max-xfer-size', } FPOLICY_FILE_OPERATIONS_LIST = [ 'close', 'create', 'create_dir', 'delete', 'delete_dir', 'getattr', 'link', 'lookup', 'open', 'read', 'write', 'rename', 'rename_dir', 'setattr', 'symlink'] SNAPLOCK_TYPE = ['compliance', 'enterprise'] def __init__(self, driver_name, **kwargs): na_utils.validate_driver_instantiation(**kwargs) self.driver_name = driver_name self.private_storage = kwargs['private_storage'] self.configuration = kwargs['configuration'] self.configuration.append_config_values(na_opts.netapp_connection_opts) self.configuration.append_config_values(na_opts.netapp_basicauth_opts) self.configuration.append_config_values( na_opts.netapp_certificateauth_opts) self.configuration.append_config_values(na_opts.netapp_transport_opts) self.configuration.append_config_values(na_opts.netapp_support_opts) self.configuration.append_config_values(na_opts.netapp_cluster_opts) self.configuration.append_config_values( na_opts.netapp_provisioning_opts) self.configuration.append_config_values( na_opts.netapp_data_motion_opts) self._licenses = [] self._client = None self._clients = {} self._backend_clients = {} self._ssc_stats = {} self._have_cluster_creds = None self._revert_to_snapshot_support = False self._cluster_info = {} self._default_nfs_config = None self.is_nfs_config_supported = False self._cache_pool_status = None self._flexgroup_pools = {} self._is_flexgroup_auto = False self._is_snaplock_compliance_configured = False self._app_version = kwargs.get('app_version', 'unknown') na_utils.setup_tracing(self.configuration.netapp_trace_flags, self.configuration.netapp_api_trace_pattern) self._backend_name = self.configuration.safe_get( 'share_backend_name') or driver_name self.message_api = message_api.API() self._snapmirror_schedule = self._convert_schedule_to_seconds( schedule=self.configuration.netapp_snapmirror_schedule) self._cluster_name = self.configuration.netapp_cluster_name self.is_volume_backup_before = False @na_utils.trace def do_setup(self, context): self._client = self._get_api_client() self._have_cluster_creds = self._client.check_for_cluster_credentials() if self._have_cluster_creds is True: self._set_cluster_info() # Set SnapLock compliance clock configured on both the nodes nodes = self._client.list_cluster_nodes() for node in nodes: self._is_snaplock_compliance_configured = ( self._client.is_snaplock_compliance_clock_configured(node) ) if not self._is_snaplock_compliance_configured: break self._licenses = self._get_licenses() self._revert_to_snapshot_support = self._check_snaprestore_license() # Performance monitoring library self._perf_library = performance.PerformanceLibrary(self._client) # NOTE(felipe_rodrigues): In case adding a parameter that can be # configured in old versions too, the "is_nfs_config_supported" should # be removed (always supporting), adding the logic of skipping the # transfer limit parameters when building the server nfs_config. if self._client.features.TRANSFER_LIMIT_NFS_CONFIG: self.is_nfs_config_supported = True self._default_nfs_config = self._client.get_nfs_config_default( list(self.NFS_CONFIG_EXTRA_SPECS_MAP.values())) LOG.debug('The default NFS configuration: %s', self._default_nfs_config) self._cache_pool_status = na_utils.DataCache( self.configuration.netapp_cached_aggregates_status_lifetime) @na_utils.trace def _set_cluster_info(self): self._cluster_info['nve_support'] = ( self._client.is_nve_supported() and self._client.features.FLEXVOL_ENCRYPTION) @na_utils.trace def check_for_setup_error(self): self._start_periodic_tasks() def _get_vserver(self, share_server=None): raise NotImplementedError() def _get_client(self, config, vserver=None): if config.netapp_use_legacy_client: client = client_cmode.NetAppCmodeClient( transport_type=config.netapp_transport_type, ssl_cert_path=config.netapp_ssl_cert_path, username=config.netapp_login, password=config.netapp_password, hostname=config.netapp_server_hostname, port=config.netapp_server_port, vserver=vserver, trace=na_utils.TRACE_API, api_trace_pattern=na_utils.API_TRACE_PATTERN, private_key_file=config.netapp_private_key_file, certificate_file=config.netapp_certificate_file, ca_certificate_file=config.netapp_ca_certificate_file, certificate_host_validation=( config.netapp_certificate_host_validation)) else: client = client_cmode_rest.NetAppRestClient( transport_type=config.netapp_transport_type, ssl_cert_path=config.netapp_ssl_cert_path, username=config.netapp_login, password=config.netapp_password, hostname=config.netapp_server_hostname, port=config.netapp_server_port, vserver=vserver, trace=na_utils.TRACE_API, async_rest_timeout=( config.netapp_rest_operation_timeout), api_trace_pattern=na_utils.API_TRACE_PATTERN, private_key_file=config.netapp_private_key_file, certificate_file=config.netapp_certificate_file, ca_certificate_file=config.netapp_ca_certificate_file, certificate_host_validation=( config.netapp_certificate_host_validation)) return client @na_utils.trace def _get_api_client(self, vserver=None): # Use cached value to prevent redo calls during client initialization. client = self._clients.get(vserver) if not client: client = self._get_client(self.configuration, vserver=vserver) self._clients[vserver] = client return client @na_utils.trace def _get_api_client_for_backend(self, backend_name, vserver=None): key = f"{backend_name}-{vserver}" client = self._backend_clients.get(key) if not client: config = data_motion.get_backend_configuration(backend_name) client = self._get_client(config, vserver=vserver) self._backend_clients[key] = client return client @na_utils.trace def _get_licenses(self): if not self._have_cluster_creds: LOG.debug('License info not available without cluster credentials') return [] self._licenses = self._client.get_licenses() log_data = { 'backend': self._backend_name, 'licenses': ', '.join(self._licenses), } LOG.info('Available licenses on %(backend)s ' 'are %(licenses)s.', log_data) if 'nfs' not in self._licenses and 'cifs' not in self._licenses: msg = 'Neither NFS nor CIFS is licensed on %(backend)s' msg_args = {'backend': self._backend_name} LOG.error(msg, msg_args) return self._licenses @na_utils.trace def _start_periodic_tasks(self): # Run the task once in the current thread so prevent a race with # the first invocation of get_share_stats. self._update_ssc_info() # Start the task that updates the slow-changing storage service catalog ssc_periodic_task = loopingcall.FixedIntervalLoopingCall( self._update_ssc_info) ssc_periodic_task.start(interval=self.SSC_UPDATE_INTERVAL_SECONDS, initial_delay=self.SSC_UPDATE_INTERVAL_SECONDS) # Start the task that logs autosupport (EMS) data to the controller ems_periodic_task = loopingcall.FixedIntervalLoopingCall( self._handle_ems_logging) ems_periodic_task.start(interval=self.AUTOSUPPORT_INTERVAL_SECONDS, initial_delay=0) # Start the task that runs other housekeeping tasks, such as deletion # of previously soft-deleted storage artifacts. housekeeping_periodic_task = loopingcall.FixedIntervalLoopingCall( self._handle_housekeeping_tasks) housekeeping_periodic_task.start( interval=self.HOUSEKEEPING_INTERVAL_SECONDS, initial_delay=0) def _get_backend_share_name(self, share_id): """Get share name according to share name template.""" return self.configuration.netapp_volume_name_template % { 'share_id': share_id.replace('-', '_')} def _get_backend_snapshot_name(self, snapshot_id): """Get snapshot name according to snapshot name template.""" return 'share_snapshot_' + snapshot_id.replace('-', '_') def _get_backend_cg_snapshot_name(self, snapshot_id): """Get snapshot name according to snapshot name template.""" return 'share_cg_snapshot_' + snapshot_id.replace('-', '_') def _get_backend_qos_policy_group_name(self, share_id): """Get QoS policy name according to QoS policy group name template.""" return self.configuration.netapp_qos_policy_group_name_template % { 'share_id': share_id.replace('-', '_')} def _get_backend_snapmirror_policy_name_svm(self, share_server_id): return (self.configuration.netapp_snapmirror_policy_name_svm_template % {'share_server_id': share_server_id.replace('-', '_')}) def _get_backend_fpolicy_policy_name(self, share_id): """Get FPolicy policy name according with the configured template.""" return (self.configuration.netapp_fpolicy_policy_name_template % {'share_id': share_id.replace('-', '_')}) def _get_backend_fpolicy_event_name(self, share_id, protocol): """Get FPolicy event name according with the configured template.""" return (self.configuration.netapp_fpolicy_event_name_template % {'protocol': protocol.lower(), 'share_id': share_id.replace('-', '_')}) @na_utils.trace def _get_aggregate_space(self, aggr_set): if self._have_cluster_creds: return self._client.get_cluster_aggregate_capacities(aggr_set) else: return self._client.get_vserver_aggregate_capacities(aggr_set) @na_utils.trace def _check_snaprestore_license(self): """Check if snaprestore license is enabled.""" if self._have_cluster_creds: return 'snaprestore' in self._licenses else: return self._client.check_snaprestore_license() @na_utils.trace def _get_aggregate_node(self, aggregate_name, cluster_client=None): """Get home node for the specified aggregate, or None.""" if cluster_client: return cluster_client.get_node_for_aggregate(aggregate_name) elif self._have_cluster_creds: return self._client.get_node_for_aggregate(aggregate_name) else: return None def get_default_filter_function(self, pool=None): """Get the default filter_function string.""" if not self._is_flexgroup_pool(pool): return self.DEFAULT_FILTER_FUNCTION min_size = self._get_minimum_flexgroup_size(pool) return self.DEFAULT_FLEXGROUP_FILTER_FUNCTION % min_size def get_default_goodness_function(self): """Get the default goodness_function string.""" return self.DEFAULT_GOODNESS_FUNCTION @na_utils.trace def get_share_stats(self, get_filter_function=None, goodness_function=None): """Retrieve stats info from Data ONTAP backend.""" # NOTE(felipe_rodrigues): the share group stats is reported to the # entire backend, not per pool. So, if there is a FlexGroup pool, the # driver drops for all pools the consistent snapshot support. consistent_snapshot_support = 'host' if self._flexgroup_pools: consistent_snapshot_support = None data = { 'share_backend_name': self._backend_name, 'driver_name': self.driver_name, 'vendor_name': 'NetApp', 'driver_version': '1.0', 'netapp_storage_family': 'ontap_cluster', 'storage_protocol': 'NFS_CIFS', 'pools': self._get_pools(get_filter_function=get_filter_function, goodness_function=goodness_function), 'share_group_stats': { 'consistent_snapshot_support': consistent_snapshot_support, }, } if self.configuration.replication_domain: data['replication_type'] = [constants.REPLICATION_TYPE_DR, constants.REPLICATION_TYPE_READABLE] data['replication_domain'] = self.configuration.replication_domain return data @na_utils.trace def get_share_server_pools(self, share_server): """Return list of pools related to a particular share server. Note that the multi-SVM cDOT driver assigns all available pools to each Vserver, so there is no need to filter the pools any further by share_server. :param share_server: ShareServer class instance. """ if self._cache_pool_status.is_expired(): return self._get_pools() return self._cache_pool_status.get_data() @na_utils.trace def _get_pools(self, get_filter_function=None, goodness_function=None): """Retrieve list of pools available to this backend.""" pools = [] cached_pools = [] aggr_pool = set(self._find_matching_aggregates()) flexgroup_pools = self._flexgroup_pools flexgroup_aggr = self._get_flexgroup_aggr_set() aggr_space = self._get_aggregate_space(aggr_pool.union(flexgroup_aggr)) cluster_name = self._cluster_name if self._have_cluster_creds and not cluster_name: # Get up-to-date node utilization metrics just once. self._perf_library.update_performance_cache({}, self._ssc_stats) cluster_name = self._client.get_cluster_name() self._cluster_name = cluster_name # Add FlexVol pools. filter_function = (get_filter_function() if get_filter_function else None) for aggr_name in sorted(aggr_pool): total_gb, free_gb, used_gb = self._get_flexvol_pool_space( aggr_space, aggr_name) pool = self._get_pool(aggr_name, total_gb, free_gb, used_gb) cached_pools.append(pool) pool_with_func = copy.deepcopy(pool) pool_with_func['filter_function'] = filter_function pool_with_func['goodness_function'] = goodness_function pool_with_func['netapp_cluster_name'] = self._cluster_name pools.append(pool_with_func) # Add FlexGroup pools. for pool_name, aggr_list in flexgroup_pools.items(): filter_function = (get_filter_function(pool=pool_name) if get_filter_function else None) total_gb, free_gb, used_gb = self._get_flexgroup_pool_space( aggr_space, aggr_list) pool = self._get_pool(pool_name, total_gb, free_gb, used_gb) cached_pools.append(pool) pool_with_func = copy.deepcopy(pool) pool_with_func['filter_function'] = filter_function pool_with_func['goodness_function'] = goodness_function pool_with_func['netapp_cluster_name'] = self._cluster_name pools.append(pool_with_func) self._cache_pool_status.update_data(cached_pools) return pools @na_utils.trace def _get_pool(self, pool_name, total_capacity_gb, free_capacity_gb, allocated_capacity_gb): """Gets the pool dictionary.""" if self._have_cluster_creds: qos_support = True else: qos_support = False # Share-server/share encryption support with NetApp will only be # possible with DHSS=True encryption_support = None if self.configuration.safe_get( 'netapp_vserver') else ['share_server'] netapp_flexvol_encryption = self._cluster_info.get( 'nve_support', False) reserved_percentage = self.configuration.reserved_share_percentage reserved_snapshot_percentage = ( self.configuration.reserved_share_from_snapshot_percentage or reserved_percentage) reserved_shr_extend_percentage = ( self.configuration.reserved_share_extend_percentage or reserved_percentage) max_over_ratio = self.configuration.max_over_subscription_ratio if total_capacity_gb == 0.0: total_capacity_gb = 'unknown' pool = { 'pool_name': pool_name, 'filter_function': None, 'goodness_function': None, 'netapp_cluster_name': '', 'total_capacity_gb': total_capacity_gb, 'free_capacity_gb': free_capacity_gb, 'allocated_capacity_gb': allocated_capacity_gb, 'qos': qos_support, 'reserved_percentage': reserved_percentage, 'reserved_snapshot_percentage': reserved_snapshot_percentage, 'reserved_share_extend_percentage': reserved_shr_extend_percentage, 'max_over_subscription_ratio': max_over_ratio, 'dedupe': [True, False], 'compression': [True, False], 'netapp_flexvol_encryption': netapp_flexvol_encryption, 'thin_provisioning': [True, False], 'snapshot_support': True, 'create_share_from_snapshot_support': True, 'revert_to_snapshot_support': self._revert_to_snapshot_support, 'security_service_update_support': True, 'share_server_multiple_subnet_support': True, 'mount_point_name_support': True, 'share_replicas_migration_support': True, 'encryption_support': encryption_support, } # Add storage service catalog data. pool_ssc_stats = self._ssc_stats.get(pool_name) if pool_ssc_stats: pool.update(pool_ssc_stats) # Add utilization info, or nominal value if not available. utilization = self._perf_library.get_node_utilization_for_pool( pool_name) pool['utilization'] = na_utils.round_down(utilization) return pool @na_utils.trace def _get_flexvol_pool_space(self, aggr_space_map, aggr): """Returns the space info tuple for a FlexVol pool.""" total_capacity_gb = na_utils.round_down(float( aggr_space_map[aggr].get('total', 0)) / units.Gi) free_capacity_gb = na_utils.round_down(float( aggr_space_map[aggr].get('available', 0)) / units.Gi) allocated_capacity_gb = na_utils.round_down(float( aggr_space_map[aggr].get('used', 0)) / units.Gi) return total_capacity_gb, free_capacity_gb, allocated_capacity_gb @na_utils.trace def _get_flexgroup_pool_space(self, aggr_space_map, aggr_pool): """Returns the space info tuple for a FlexGroup pool. Given that the set of aggregates that form a FlexGroup pool may have different space information, the space info for the pool is not calculated by summing their values. The FlexGroup share must have its total size divided equally through the pool aggregates. So, the pool size is limited by the least aggregate size: - free_size = least_aggregate_free_size * number_aggregates - total_size = least_aggregate_total_size * number_aggregates - used_size = total_size - free_size :param aggr_space_map: space info dict for the driver aggregates. :param aggr_pool: list of aggregate names for the FlexGroup pool. """ min_total = None min_free = None for aggr_name in aggr_pool: if aggr_name not in aggr_space_map: continue aggr_total = aggr_space_map[aggr_name].get('total', 0) aggr_free = aggr_space_map[aggr_name].get('available', 0) if not min_total or min_total.get('total', 0) > aggr_total: min_total = aggr_space_map[aggr_name] if not min_free or min_free.get('available', 0) > aggr_free: min_free = aggr_space_map[aggr_name] total_gb = na_utils.round_down(0) if min_total: min_total_pool = min_total.get('total', 0) * len(aggr_pool) total_gb = na_utils.round_down(float(min_total_pool) / units.Gi) free_gb = na_utils.round_down(0) if min_free: min_free_pool = min_free.get('available', 0) * len(aggr_pool) free_gb = na_utils.round_down(float(min_free_pool) / units.Gi) used_gb = na_utils.round_down(0) if total_gb > free_gb: used_gb = na_utils.round_down(total_gb - free_gb) return total_gb, free_gb, used_gb @na_utils.trace def _handle_ems_logging(self): """Build and send an EMS log message.""" self._client.send_ems_log_message(self._build_ems_log_message_0()) self._client.send_ems_log_message(self._build_ems_log_message_1()) def _build_base_ems_log_message(self): """Construct EMS Autosupport log message common to all events.""" ems_log = { 'computer-name': socket.gethostname() or 'Manila_node', 'event-source': 'Manila driver %s' % self.driver_name, 'app-version': self._app_version, 'category': 'provisioning', 'log-level': '5', 'auto-support': 'false', } return ems_log @na_utils.trace def _build_ems_log_message_0(self): """Construct EMS Autosupport log message with deployment info.""" ems_log = self._build_base_ems_log_message() ems_log.update({ 'event-id': '0', 'event-description': 'OpenStack Manila connected to cluster node', }) return ems_log @na_utils.trace def _build_ems_log_message_1(self): """Construct EMS Autosupport log message with storage pool info.""" message = self._get_ems_pool_info() ems_log = self._build_base_ems_log_message() ems_log.update({ 'event-id': '1', 'event-description': json.dumps(message), }) return ems_log def _get_ems_pool_info(self): raise NotImplementedError() @na_utils.trace def _handle_housekeeping_tasks(self): """Handle various cleanup activities.""" def _find_matching_aggregates(self, aggregate_names=None): """Find all aggregates match pattern.""" raise NotImplementedError() def _get_backup_vserver(self, backup, share_server=None): """Get/Create the vserver for backup """ raise NotImplementedError() def _delete_backup_vserver(self, backup, des_vserver): """Delete the vserver for backup """ raise NotImplementedError() @na_utils.trace def _get_flexgroup_aggr_set(self): aggr = set() for aggr_list in self._flexgroup_pools.values(): aggr = aggr.union(aggr_list) return aggr @na_utils.trace def _get_helper(self, share): """Returns driver which implements share protocol.""" share_protocol = share['share_proto'].lower() if share_protocol not in self.SUPPORTED_PROTOCOLS: err_msg = _("Invalid NAS protocol supplied: %s.") % share_protocol raise exception.NetAppException(err_msg) self._check_license_for_protocol(share_protocol) if share_protocol == 'nfs': return nfs_cmode.NetAppCmodeNFSHelper() elif share_protocol == 'cifs': return cifs_cmode.NetAppCmodeCIFSHelper() @na_utils.trace def _check_license_for_protocol(self, share_protocol): """Validates protocol license if cluster APIs are accessible.""" if not self._have_cluster_creds: return if share_protocol.lower() not in self._licenses: current_licenses = self._get_licenses() if share_protocol.lower() not in current_licenses: msg_args = { 'protocol': share_protocol, 'host': self.configuration.netapp_server_hostname } msg = _('The protocol %(protocol)s is not licensed on ' 'controller %(host)s') % msg_args LOG.error(msg) raise exception.NetAppException(msg) @na_utils.trace def get_pool(self, share): """Returns the name of the pool for a given share.""" pool = share_utils.extract_host(share['host'], level='pool') if pool: return pool share_name = self._get_backend_share_name(share['id']) aggr = self._client.get_aggregate_for_volume(share_name) if isinstance(aggr, list): pool = self._get_flexgroup_pool_name(aggr) else: pool = aggr if not pool: msg = _('Could not find out the pool name for the share %s.') raise exception.NetAppException(msg % share_name) return pool @na_utils.trace def create_share(self, context, share, share_server): """Creates new share.""" vserver, vserver_client = self._get_vserver(share_server=share_server) self._allocate_container(share, vserver, vserver_client) return self._create_export(share, share_server, vserver, vserver_client) @na_utils.trace def create_share_from_snapshot(self, context, share, snapshot, share_server=None, parent_share=None): """Creates new share from snapshot.""" # TODO(dviroel) return progress info in asynchronous answers # NOTE(felipe_rodrigues): when using FlexGroup, the NetApp driver will # drop consistent snapshot support, calling this create from snap # method for each member (no parent share is set). is_group_snapshot = share.get('source_share_group_snapshot_member_id') if is_group_snapshot or parent_share['host'] == share['host']: src_vserver, src_vserver_client = self._get_vserver( share_server=share_server) # Creating a new share from snapshot in the source share's pool self._allocate_container_from_snapshot( share, snapshot, src_vserver, src_vserver_client) return self._create_export(share, share_server, src_vserver, src_vserver_client) parent_share_server = {} if parent_share['share_server'] is not None: # Get only the information needed by Data Motion ss_keys = ['id', 'identifier', 'backend_details', 'host'] for key in ss_keys: parent_share_server[key] = ( parent_share['share_server'].get(key)) # Information to be saved in the private_storage that will need to be # retrieved later, in order to continue with the share creation flow src_share_instance = { 'id': share['id'], 'host': parent_share.get('host'), 'share_server': parent_share_server or None } # NOTE(dviroel): Data Motion functions access share's 'share_server' # attribute to get vserser information. dest_share = copy.copy(share.to_dict()) dest_share['share_server'] = share_server dm_session = data_motion.DataMotionSession() # Source host info __, src_vserver, src_backend = ( dm_session.get_backend_info_for_share(parent_share)) src_vserver_client = data_motion.get_client_for_backend( src_backend, vserver_name=src_vserver) # Destination host info dest_vserver, dest_vserver_client = self._get_vserver(share_server) src_cluster_name = None dest_cluster_name = None if self._have_cluster_creds: src_cluster_name = src_vserver_client.get_cluster_name() dest_cluster_name = dest_vserver_client.get_cluster_name() # the parent share and new share must reside on same pool type: either # FlexGroup or FlexVol. parent_share_name = self._get_backend_share_name(parent_share['id']) parent_is_flexgroup = self._is_flexgroup_share(src_vserver_client, parent_share_name) dest_pool_name = share_utils.extract_host(share['host'], level='pool') dest_is_flexgroup = self._is_flexgroup_pool(dest_pool_name) if parent_is_flexgroup != dest_is_flexgroup: parent_type = 'FlexGroup' if parent_is_flexgroup else 'FlexVol' dest_type = 'FlexGroup' if dest_is_flexgroup else 'FlexVol' msg = _('Could not create share %(share_id)s from snapshot ' '%(snapshot_id)s in the destination host %(dest_host)s. ' 'The snapshot is from %(parent_type)s style, while the ' 'destination host is %(dest_type)s style.') msg_args = {'share_id': share['id'], 'snapshot_id': snapshot['id'], 'dest_host': dest_share['host'], 'parent_type': parent_type, 'dest_type': dest_type} raise exception.NetAppException(msg % msg_args) # FlexGroup pools must have the same number of aggregates if dest_is_flexgroup: parent_aggr = src_vserver_client.get_aggregate_for_volume( parent_share_name) # NOTE(felipe_rodrigues): when FlexGroup is auto provisioned the # match of number of aggregates cannot be checked. So, it might # fail during snapmirror setup. if not self._is_flexgroup_auto: dest_aggr = self._get_flexgroup_aggregate_list(dest_pool_name) if len(parent_aggr) != len(dest_aggr): msg = _('Could not create share %(share_id)s from ' 'snapshot %(snapshot_id)s in the destination host ' '%(dest_host)s. The source and destination ' 'FlexGroup pools must have the same number of ' 'aggregates.') msg_args = {'share_id': share['id'], 'snapshot_id': snapshot['id'], 'dest_host': dest_share['host']} raise exception.NetAppException(msg % msg_args) else: parent_aggr = share_utils.extract_host(parent_share['host'], level='pool') try: # NOTE(felipe_rodrigues): no support to move volumes that are # FlexGroup or without the cluster credential. So, performs the # workaround using snapmirror even being in the same cluster. if (src_cluster_name != dest_cluster_name or dest_is_flexgroup or not self._have_cluster_creds): # 1. Create a clone on source (temporary volume). We don't need # to split from clone in order to replicate data. We don't need # to create fpolicies since this copy will be deleted. temp_share = copy.deepcopy(dest_share) temp_uuid = uuidutils.generate_uuid() temp_share['id'] = str(temp_uuid) self._allocate_container_from_snapshot( temp_share, snapshot, src_vserver, src_vserver_client, split=False, create_fpolicy=False) # 2. Create a replica in destination host. self._allocate_container( dest_share, dest_vserver, dest_vserver_client, replica=True, set_qos=False) # 3. Initialize snapmirror relationship with cloned share. src_share_instance['replica_state'] = ( constants.REPLICA_STATE_ACTIVE) relationship_type = na_utils.get_relationship_type( dest_is_flexgroup) src_share_instance["id"] = temp_share['id'] dm_session.create_snapmirror( src_share_instance, dest_share, relationship_type) # The snapmirror data copy can take some time to be concluded, # we'll answer this call asynchronously. state = self.STATE_SNAPMIRROR_DATA_COPYING else: # NOTE(dviroel): there's a need to split the cloned share from # its parent in order to move it to a different aggregate or # vserver. self._allocate_container_from_snapshot( dest_share, snapshot, src_vserver, src_vserver_client, split=True) # The split volume clone operation can take some time to be # concluded and we'll answer the call asynchronously. state = self.STATE_SPLITTING_VOLUME_CLONE except Exception: # If the share exists on the source vserver, we need to delete it # since it's a temporary share, not managed by the system. dm_session.delete_snapmirror(src_share_instance, dest_share) self._delete_share(src_share_instance, src_vserver, src_vserver_client, remove_export=False) msg = _('Could not create share %(share_id)s from snapshot ' '%(snapshot_id)s in the destination host %(dest_host)s.') msg_args = {'share_id': dest_share['id'], 'snapshot_id': snapshot['id'], 'dest_host': dest_share['host']} raise exception.NetAppException(msg % msg_args) # Store source share info on private storage using destination share id src_share_instance['aggregate'] = parent_aggr src_share_instance['internal_state'] = state src_share_instance['status'] = constants.STATUS_ACTIVE self.private_storage.update(dest_share['id'], { 'source_share': json.dumps(src_share_instance) }) return { 'status': constants.STATUS_CREATING_FROM_SNAPSHOT, } def _update_create_from_snapshot_status(self, share, share_server=None): # TODO(dviroel) return progress info in asynchronous answers # If the share is creating from snapshot and copying data in background # we'd verify if the operation has finished and trigger new operations # if necessary. source_share_str = self.private_storage.get(share['id'], 'source_share') if source_share_str is None: msg = _('Could not update share %(share_id)s status due to invalid' ' internal state. Aborting share creation.') msg_args = {'share_id': share['id']} LOG.error(msg, msg_args) return {'status': constants.STATUS_ERROR} try: # Check if current operation had finished and continue to move the # source share towards its destination return self._create_from_snapshot_continue(share, share_server) except Exception: # Delete everything associated to the temporary clone created on # the source host. source_share = json.loads(source_share_str) dm_session = data_motion.DataMotionSession() dm_session.delete_snapmirror(source_share, share) __, src_vserver, src_backend = ( dm_session.get_backend_info_for_share(source_share)) src_vserver_client = data_motion.get_client_for_backend( src_backend, vserver_name=src_vserver) self._delete_share(source_share, src_vserver, src_vserver_client, remove_export=False) # Delete private storage info self.private_storage.delete(share['id']) msg = _('Could not complete share %(share_id)s creation due to an ' 'internal error.') msg_args = {'share_id': share['id']} LOG.error(msg, msg_args) return {'status': constants.STATUS_ERROR} def _create_from_snapshot_continue(self, share, share_server=None): return_values = { 'status': constants.STATUS_CREATING_FROM_SNAPSHOT } apply_qos_on_dest = False # Data motion session used to extract host info and manage snapmirrors dm_session = data_motion.DataMotionSession() # Get info from private storage src_share_str = self.private_storage.get(share['id'], 'source_share') src_share = json.loads(src_share_str) current_state = src_share['internal_state'] share['share_server'] = share_server # Source host info __, src_vserver, src_backend = ( dm_session.get_backend_info_for_share(src_share)) src_aggr = src_share['aggregate'] src_vserver_client = data_motion.get_client_for_backend( src_backend, vserver_name=src_vserver) # Destination host info dest_vserver, dest_vserver_client = self._get_vserver(share_server) dest_aggr = share_utils.extract_host(share['host'], level='pool') if self._is_flexgroup_pool(dest_aggr): if self._is_flexgroup_auto: dest_share_name = self._get_backend_share_name(share['id']) dest_aggr = dest_vserver_client.get_aggregate_for_volume( dest_share_name) else: dest_aggr = self._get_flexgroup_aggregate_list(dest_aggr) if current_state == self.STATE_SPLITTING_VOLUME_CLONE: if self._check_volume_clone_split_completed( src_share, src_vserver_client): # Rehost volume if source and destination are hosted in # different vservers if src_vserver != dest_vserver: # NOTE(dviroel): some volume policies, policy rules and # configurations are lost from the source volume after # rehost operation. qos_policy_for_share = ( self._get_backend_qos_policy_group_name(share['id'])) src_vserver_client.mark_qos_policy_group_for_deletion( qos_policy_for_share) # Apply QoS on destination share apply_qos_on_dest = True self._rehost_and_mount_volume( share, src_vserver, src_vserver_client, dest_vserver, dest_vserver_client) # Move the share to the expected aggregate if set(src_aggr) != set(dest_aggr): # Move volume and 'defer' the cutover. If it fails, the # share will be deleted afterwards self._move_volume_after_splitting( src_share, share, share_server, cutover_action='defer') # Move a volume can take longer, we'll answer # asynchronously current_state = self.STATE_MOVING_VOLUME else: return_values['status'] = constants.STATUS_AVAILABLE elif current_state == self.STATE_MOVING_VOLUME: if self._check_volume_move_completed(share, share_server): if src_vserver != dest_vserver: # NOTE(dviroel): at this point we already rehosted the # share, but we missed applying the qos since it was moving # the share between aggregates apply_qos_on_dest = True return_values['status'] = constants.STATUS_AVAILABLE elif current_state == self.STATE_SNAPMIRROR_DATA_COPYING: replica_state = self.update_replica_state( None, # no context is needed [src_share], share, [], # access_rules [], # snapshot list share_server, replication=False) if replica_state in [None, constants.STATUS_ERROR]: msg = _("Destination share has failed on replicating data " "from source share.") LOG.exception(msg) raise exception.NetAppException(msg) elif replica_state == constants.REPLICA_STATE_IN_SYNC: try: # 1. Start an update to try to get a last minute # transfer before we quiesce and break dm_session.update_snapmirror(src_share, share) except exception.StorageCommunicationException: # Ignore any errors since the current source replica # may be unreachable pass # 2. Break SnapMirror # NOTE(dviroel): if it fails on break/delete a snapmirror # relationship, we won't be able to delete the share. dm_session.break_snapmirror(src_share, share) dm_session.delete_snapmirror(src_share, share) # 3. Delete the source volume self._delete_share(src_share, src_vserver, src_vserver_client, remove_export=False) share_name = self._get_backend_share_name(src_share['id']) # 4. Set File system size fixed to false dest_vserver_client.set_volume_filesys_size_fixed( share_name, filesys_size_fixed=False) apply_qos_on_dest = True return_values['status'] = constants.STATUS_AVAILABLE else: # Delete this share from private storage since we'll abort this # operation. self.private_storage.delete(share['id']) msg_args = { 'state': current_state, 'id': share['id'], } msg = _("Caught an unexpected internal state '%(state)s' for " "share %(id)s. Aborting operation.") % msg_args LOG.exception(msg) raise exception.NetAppException(msg) if return_values['status'] == constants.STATUS_AVAILABLE: if apply_qos_on_dest: extra_specs = share_types.get_extra_specs_from_share(share) provisioning_options = self._get_provisioning_options( extra_specs) qos_policy_group_name = ( self._modify_or_create_qos_for_existing_share( share, extra_specs, dest_vserver, dest_vserver_client)) if qos_policy_group_name: provisioning_options['qos_policy_group'] = ( qos_policy_group_name) share_name = self._get_backend_share_name(share['id']) # Modify volume to match extra specs dest_vserver_client.modify_volume( dest_aggr, share_name, **provisioning_options) self.private_storage.delete(share['id']) return_values['export_locations'] = self._create_export( share, share_server, dest_vserver, dest_vserver_client, clear_current_export_policy=False) else: new_src_share = copy.deepcopy(src_share) new_src_share['internal_state'] = current_state self.private_storage.update(share['id'], { 'source_share': json.dumps(new_src_share) }) return return_values @na_utils.trace def _allocate_container(self, share, vserver, vserver_client, replica=False, create_fpolicy=True, set_qos=True): """Create new share on aggregate.""" share_name = self._get_backend_share_name(share['id']) # Get Data ONTAP aggregate name as pool name. pool_name = share_utils.extract_host(share['host'], level='pool') if pool_name is None: msg = _("Pool is not available in the share host field.") raise exception.InvalidHost(reason=msg) provisioning_options = self._get_provisioning_options_for_share( share, vserver, vserver_client=vserver_client, set_qos=set_qos) if provisioning_options.get('snaplock_type'): self._check_snaplock_compatibility() if replica: # If this volume is intended to be a replication destination, # create it as the 'data-protection' type provisioning_options['volume_type'] = 'dp' hide_snapdir = provisioning_options.pop('hide_snapdir') mount_point_name = share.get('mount_point_name') if share.get('encryption_key_ref'): provisioning_options['encrypt'] = True LOG.debug('Creating an encrypted share %(share)s on pool %(pool)s ' 'with provisioning options %(options)s', {'share': share_name, 'pool': pool_name, 'options': provisioning_options}) else: LOG.debug('Creating share %(share)s on pool %(pool)s with ' 'provisioning options %(options)s', {'share': share_name, 'pool': pool_name, 'options': provisioning_options}) if self._is_flexgroup_pool(pool_name): aggr_list = self._get_flexgroup_aggregate_list(pool_name) self._create_flexgroup_share( vserver_client, aggr_list, share_name, share['size'], self.configuration.netapp_volume_snapshot_reserve_percent, mount_point_name=mount_point_name, **provisioning_options) else: vserver_client.create_volume( pool_name, share_name, share['size'], snapshot_reserve=self.configuration. netapp_volume_snapshot_reserve_percent, mount_point_name=mount_point_name, **provisioning_options) if hide_snapdir: self._apply_snapdir_visibility( hide_snapdir, share_name, vserver_client) if create_fpolicy: fpolicy_ext_to_include = provisioning_options.get( 'fpolicy_extensions_to_include') fpolicy_ext_to_exclude = provisioning_options.get( 'fpolicy_extensions_to_exclude') if fpolicy_ext_to_include or fpolicy_ext_to_exclude: self._create_fpolicy_for_share(share, vserver, vserver_client, **provisioning_options) def _apply_snapdir_visibility( self, hide_snapdir, share_name, vserver_client): LOG.debug('Applying snapshot visibility according to hide_snapdir ' 'value of %(hide_snapdir)s on share %(share)s.', {'hide_snapdir': hide_snapdir, 'share': share_name}) vserver_client.set_volume_snapdir_access(share_name, hide_snapdir) @na_utils.trace def _create_flexgroup_share(self, vserver_client, aggr_list, share_name, size, snapshot_reserve, dedup_enabled=False, compression_enabled=False, max_files=None, mount_point_name=None, snaplock_type=None, **provisioning_options): """Create a FlexGroup share using async API with job.""" start_timeout = ( self.configuration.netapp_flexgroup_aggregate_not_busy_timeout) job_info = self.wait_for_start_create_flexgroup( start_timeout, vserver_client, aggr_list, share_name, size, snapshot_reserve, mount_point_name, snaplock_type, **provisioning_options) if not job_info['jobid'] or job_info['error-code']: msg = "Error creating FlexGroup share: %s." raise exception.NetAppException(msg % job_info['error-message']) timeout = self.configuration.netapp_flexgroup_volume_online_timeout self.wait_for_flexgroup_deployment(vserver_client, job_info['jobid'], timeout) efficiency_policy = provisioning_options.get('efficiency_policy', None) vserver_client.update_volume_efficiency_attributes( share_name, dedup_enabled, compression_enabled, is_flexgroup=True, efficiency_policy=efficiency_policy) if provisioning_options.get('max_files_multiplier') is not None: max_files_multiplier = provisioning_options.pop( 'max_files_multiplier') max_files = na_utils.calculate_max_files(size, max_files_multiplier, max_files) if max_files is not None: vserver_client.set_volume_max_files(share_name, max_files) if snaplock_type is not None: vserver_client.set_snaplock_attributes(share_name, **provisioning_options) @na_utils.trace def wait_for_start_create_flexgroup(self, start_timeout, vserver_client, aggr_list, share_name, size, snapshot_reserve, mount_point_name, snaplock_type, **provisioning_options): """Wait for starting create FlexGroup volume succeed. Create FlexGroup volume fails in case any of the selected aggregates are being used for provision another volume. Instead of failing, tries several times. :param start_timeout: time in seconds to try create. :param vserver_client: the client to call the create operation. :param aggr_list: list of aggregates to create the FlexGroup. :param share_name: name of the FlexGroup volume. :param size: size to be provisioned. :param snapshot_reserve: snapshot reserve option. :param mount_point_name: junction_path_name. :param snaplock_type: SnapLock type :param provisioning_options: other provision not required options. """ interval = 5 retries = (start_timeout / interval or 1) @manila_utils.retry(exception.NetAppBusyAggregateForFlexGroupException, interval=interval, retries=retries, backoff_rate=1) def _start_create_flexgroup_volume(): try: return vserver_client.create_volume_async( aggr_list, share_name, size, is_flexgroup=True, snapshot_reserve=snapshot_reserve, auto_provisioned=self._is_flexgroup_auto, mount_point_name=mount_point_name, snaplock_type=snaplock_type, **provisioning_options) except netapp_api.NaApiError as e: with excutils.save_and_reraise_exception() as raise_ctxt: try_msg = "try the command again" in e.message if ((e.code == netapp_api.EAPIERROR or e.code == rest_api.EREST_ANOTHER_VOLUME_OPERATION) and try_msg): msg = _("Another volume is currently being " "provisioned using one or more of the " "aggregates selected to provision FlexGroup " "volume %(share_name)s.") msg_args = {'share_name': share_name} LOG.error(msg, msg_args) raise_ctxt.reraise = False raise (exception. NetAppBusyAggregateForFlexGroupException()) try: return _start_create_flexgroup_volume() except exception.NetAppBusyAggregateForFlexGroupException: msg_err = _("Unable to start provision the FlexGroup volume %s. " "Retries exhausted. Aborting") raise exception.NetAppException(message=msg_err % share_name) @na_utils.trace def wait_for_flexgroup_deployment(self, vserver_client, job_id, timeout): """Wait for creating FlexGroup share job to get finished. :param vserver_client: the client to call the get job status. :param job_id: create FlexGroup job ID. :param timeout: time in seconds to wait create. """ interval = 5 retries = (timeout / interval or 1) @manila_utils.retry(exception.ShareBackendException, interval=interval, retries=retries, backoff_rate=1) def _wait_job_is_completed(): job_state = vserver_client.get_job_state(job_id) LOG.debug("Waiting for creating FlexGroup job %(job_id)s in " "state: %(job_state)s.", {'job_id': job_id, 'job_state': job_state}) if job_state == 'failure' or job_state == 'error': msg_error = "Error performing the create FlexGroup job %s." raise exception.NetAppException(msg_error % job_id) elif job_state != 'success': msg = _('FlexGroup share is being created. Will wait the ' 'job.') LOG.warning(msg) raise exception.ShareBackendException(msg=msg) try: _wait_job_is_completed() except exception.ShareBackendException: msg = _("Timeout waiting for FlexGroup create job %s to be " "finished.") raise exception.NetAppException(msg % job_id) @na_utils.trace def _remap_standard_boolean_extra_specs(self, extra_specs): """Replace standard boolean extra specs with NetApp-specific ones.""" specs = copy.deepcopy(extra_specs) for (key, netapp_key) in self.STANDARD_BOOLEAN_EXTRA_SPECS_MAP.items(): if key in specs: bool_value = share_types.parse_boolean_extra_spec(key, specs[key]) specs[netapp_key] = 'true' if bool_value else 'false' del specs[key] return specs @na_utils.trace def _check_extra_specs_validity(self, share, extra_specs): """Check if the extra_specs have valid values.""" self._check_boolean_extra_specs_validity( share, extra_specs, list(self.BOOLEAN_QUALIFIED_EXTRA_SPECS_MAP)) self._check_string_extra_specs_validity(share, extra_specs) @na_utils.trace def _check_string_extra_specs_validity(self, share, extra_specs): """Check if the string_extra_specs have valid values.""" if 'netapp:max_files' in extra_specs: self._check_if_max_files_is_valid(share, extra_specs['netapp:max_files']) if 'netapp:max_files_multiplier' in extra_specs: self._check_if_max_files_multiplier_is_valid( share, extra_specs['netapp:max_files_multiplier']) if 'netapp:fpolicy_file_operations' in extra_specs: self._check_fpolicy_file_operations( share, extra_specs['netapp:fpolicy_file_operations']) # Validate extra_specs for SnapLock snaplock_attributes = [ 'netapp:snaplock_autocommit_period', 'netapp:snaplock_min_retention_period', 'netapp:snaplock_max_retention_period', 'netapp:snaplock_default_retention_period' ] for attribute in snaplock_attributes: if attribute in extra_specs: self._check_snaplock_attributes(share, attribute, extra_specs[attribute]) @na_utils.trace def _check_if_max_files_is_valid(self, share, value): """Check if max_files has a valid value.""" if int(value) < 0: args = {'value': value, 'key': 'netapp:max_files', 'type_id': share['share_type_id'], 'share_id': share['id']} msg = _('Invalid value "%(value)s" for extra_spec "%(key)s" ' 'in share_type %(type_id)s for share %(share_id)s.') raise exception.NetAppException(msg % args) @na_utils.trace def _check_if_max_files_multiplier_is_valid(self, share, value): """Check if max_files_multiplier has a valid value.""" try: if float(value) <= 0 or float(value) > 8: args = {'value': value, 'key': 'netapp:max_files_multiplier', 'type_id': share['share_type_id'], 'share_id': share['id']} msg = _('Invalid value "%(value)s" for extra_spec "%(key)s" ' 'in share_type %(type_id)s for share %(share_id)s. ' 'Must be between "0" and "8".') raise exception.NetAppException(msg % args) except ValueError as e: raise exception.InvalidInput(e) @na_utils.trace def _check_fpolicy_file_operations(self, share, value): """Check if the provided fpolicy file operations are valid.""" for file_op in value.split(','): if file_op.strip() not in self.FPOLICY_FILE_OPERATIONS_LIST: args = {'file_op': file_op, 'extra_spec': 'netapp:fpolicy_file_operations', 'type_id': share['share_type_id'], 'share_id': share['id']} msg = _('Invalid value "%(file_op)s" for extra_spec ' '"%(extra_spec)s" in share_type %(type_id)s for share ' '%(share_id)s.') raise exception.NetAppException(msg % args) @na_utils.trace def _check_snaplock_attributes(self, share, key, value): """Validate the SnapLock retention periods""" valid_units_for_period = ["minutes", "hours", "days", "months", "years"] pattern = re.compile(r'^\d+\s*(minutes|hours|days|months|years)$') common_msg = ("a number followed suffix, valid suffix are: " f"{valid_units_for_period}. For example, a value" f" if '2hours' represents a {key}" " of 2 hours.") if key == 'netapp:snaplock_autocommit_period': is_matched = pattern.match(value) extra_msg = (f"The value of the {key} should be" f" {common_msg} ") if not is_matched: self._raise_snaplock_exception(share, key, value, extra_msg) elif (key == 'netapp:snaplock_min_retention_period' or key == 'netapp:snaplock_max_retention_period'): is_matched = pattern.match(value) or value == "infinite" extra_msg = (f"The value of the {key} should be " f"'infinite' or {common_msg}") if not is_matched: self._raise_snaplock_exception(share, key, value, extra_msg) elif key == 'netapp:snaplock_default_retention_period': is_matched = (pattern.match(value) or value == "infinite" or value == "min" or value == "max") extra_msg = (f"The value of the {key} should be " f"'infinite', 'min', 'max', or {common_msg}") if not is_matched: self._raise_snaplock_exception(share, key, value, extra_msg) def _raise_snaplock_exception(self, share, key, value, extra_msg): args = {'value': value, 'extra_spec': key, 'type_id': share['share_type_id'], 'share_id': share['id'], 'extra_msg': extra_msg} msg = _('Invalid value "%(value)s" for extra_spec ' '"%(extra_spec)s" in share_type %(type_id)s for share ' '%(share_id)s. %(extra_msg)s') raise exception.NetAppException(msg % args) @na_utils.trace def _check_snaplock_compatibility(self): """Check SnapLock license and compliance clock sync with the nodes""" # Check SnapLock license is enabled on cluster if self._have_cluster_creds: if 'snaplock' not in self._licenses: exception.NetAppException("SnapLock License is not" " available on ONTAP") if not self._is_snaplock_compliance_configured: msg = _('Compliance clock is not configured for one' ' of the nodes.') raise exception.NetAppException(msg) else: LOG.warning("Unable to verify if SnapLock is enabled for" " the cluster.") @na_utils.trace def _check_boolean_extra_specs_validity(self, share, specs, keys_of_interest): # cDOT compression requires deduplication. dedup = specs.get('netapp:dedup', None) compression = specs.get('netapp:compression', None) if dedup is not None and compression is not None: if dedup.lower() == 'false' and compression.lower() == 'true': spec = {'netapp:dedup': dedup, 'netapp:compression': compression} type_id = share['share_type_id'] share_id = share['id'] args = {'type_id': type_id, 'share_id': share_id, 'spec': spec} msg = _('Invalid combination of extra_specs in share_type ' '%(type_id)s for share %(share_id)s: %(spec)s: ' 'deduplication must be enabled in order for ' 'compression to be enabled.') raise exception.Invalid(msg % args) """Check if the boolean_extra_specs have valid values.""" # Extra spec values must be (ignoring case) 'true' or 'false'. for key in keys_of_interest: value = specs.get(key) if value is not None and value.lower() not in ['true', 'false']: type_id = share['share_type_id'] share_id = share['id'] arg_map = {'value': value, 'key': key, 'type_id': type_id, 'share_id': share_id} msg = _('Invalid value "%(value)s" for extra_spec "%(key)s" ' 'in share_type %(type_id)s for share %(share_id)s.') raise exception.Invalid(msg % arg_map) @na_utils.trace def _get_boolean_provisioning_options(self, extra_specs, boolean_specs_map): """Given extra specs, return corresponding client library kwargs. Build a full set of client library provisioning kwargs, filling in a default value if an explicit value has not been supplied via a corresponding extra spec. Boolean extra spec values are "true" or "false", with missing specs treated as "false". Provisioning kwarg values are True or False. """ specs = copy.deepcopy(extra_specs) # Extract the extra spec keys of concern and their corresponding # kwarg keys as lists. keys_of_interest = list(boolean_specs_map) provisioning_args = [boolean_specs_map[key] for key in keys_of_interest] # Set missing spec values to 'false' for key in keys_of_interest: if key not in specs: specs[key] = 'false' # Build a list of Boolean provisioning arguments from the string # equivalents in the spec values. provisioning_values = [specs[key].lower() == 'true' for key in keys_of_interest] # Combine the list of provisioning args and the list of provisioning # values into a dictionary suitable for use as kwargs when invoking # provisioning methods from the client API library. return dict(zip(provisioning_args, provisioning_values)) @na_utils.trace def get_string_provisioning_options(self, extra_specs, string_specs_map): """Given extra specs, return corresponding client library kwargs. Build a full set of client library provisioning kwargs, filling in a default value if an explicit value has not been supplied via a corresponding extra spec. """ specs = copy.deepcopy(extra_specs) # Extract the extra spec keys of concern and their corresponding # kwarg keys as lists. keys_of_interest = list(string_specs_map) provisioning_args = [string_specs_map[key] for key in keys_of_interest] # Set missing spec values to 'false' for key in keys_of_interest: if key not in specs: specs[key] = None provisioning_values = [specs[key] for key in keys_of_interest] # Combine the list of provisioning args and the list of provisioning # values into a dictionary suitable for use as kwargs when invoking # provisioning methods from the client API library. return dict(zip(provisioning_args, provisioning_values)) def _get_normalized_qos_specs(self, extra_specs): if not extra_specs.get('qos'): return {} normalized_qos_specs = { self.QOS_SPECS[key.lower()]: value for key, value in extra_specs.items() if self.QOS_SPECS.get(key.lower()) } if not normalized_qos_specs: msg = _("The extra-spec 'qos' is set to True, but no netapp " "supported qos-specs have been specified in the share " "type. Cannot provision a QoS policy. Specify any of the " "following extra-specs and try again: %s") raise exception.NetAppException(msg % list(self.QOS_SPECS)) # TODO(gouthamr): Modify check when throughput floors are allowed if len(normalized_qos_specs) > 1: msg = _('Only one NetApp QoS spec can be set at a time. ' 'Specified QoS limits: %s') raise exception.NetAppException(msg % normalized_qos_specs) return normalized_qos_specs def _get_max_throughput(self, share_size, qos_specs): # QoS limits are exclusive of one another. if 'maxiops' in qos_specs: return '%siops' % qos_specs['maxiops'] elif 'maxiopspergib' in qos_specs: return '%siops' % str( int(qos_specs['maxiopspergib']) * int(share_size)) elif 'maxbps' in qos_specs: return '%sB/s' % qos_specs['maxbps'] elif 'maxbpspergib' in qos_specs: return '%sB/s' % str( int(qos_specs['maxbpspergib']) * int(share_size)) @na_utils.trace def _create_qos_policy_group(self, share, vserver, qos_specs, vserver_client=None): max_throughput = self._get_max_throughput(share['size'], qos_specs) qos_policy_group_name = self._get_backend_qos_policy_group_name( share['id']) client = vserver_client or self._client client.qos_policy_group_create(qos_policy_group_name, vserver, max_throughput=max_throughput) return qos_policy_group_name @na_utils.trace def _get_provisioning_options_for_share( self, share, vserver, vserver_client=None, set_qos=True): """Return provisioning options from a share. Starting with a share, this method gets the extra specs, rationalizes NetApp vs. standard extra spec values, ensures their validity, and returns them in a form suitable for passing to various API client methods. """ extra_specs = share_types.get_extra_specs_from_share(share) extra_specs = self._remap_standard_boolean_extra_specs(extra_specs) self._check_extra_specs_validity(share, extra_specs) provisioning_options = self._get_provisioning_options(extra_specs) qos_specs = self._get_normalized_qos_specs(extra_specs) self.validate_provisioning_options_for_share(provisioning_options, extra_specs=extra_specs, qos_specs=qos_specs) if qos_specs and set_qos: qos_policy_group = self._create_qos_policy_group( share, vserver, qos_specs, vserver_client) provisioning_options['qos_policy_group'] = qos_policy_group return provisioning_options @na_utils.trace def _get_provisioning_options(self, specs): """Return a merged result of string and binary provisioning options.""" boolean_args = self._get_boolean_provisioning_options( specs, self.BOOLEAN_QUALIFIED_EXTRA_SPECS_MAP) string_args = self.get_string_provisioning_options( specs, self.STRING_QUALIFIED_EXTRA_SPECS_MAP) result = boolean_args.copy() result.update(string_args) result['encrypt'] = self._get_nve_option(specs) return result @na_utils.trace def validate_provisioning_options_for_share(self, provisioning_options, extra_specs=None, qos_specs=None): """Checks if provided provisioning options are valid.""" adaptive_qos = provisioning_options.get('adaptive_qos_policy_group') max_files = provisioning_options.get('max_files') max_files_multiplier = provisioning_options.get('max_files_multiplier') replication_type = (extra_specs.get('replication_type') if extra_specs else None) if adaptive_qos and qos_specs: msg = _('Share cannot be provisioned with both qos_specs ' '%(qos_specs_string)s and adaptive_qos_policy_group ' '%(adaptive_qos_policy_group)s.') qos_specs_string = "" for key in qos_specs: qos_specs_string += key + "=" + str(qos_specs[key]) + " " msg_args = { 'adaptive_qos_policy_group': provisioning_options['adaptive_qos_policy_group'], 'qos_specs_string': qos_specs_string } raise exception.NetAppException(msg % msg_args) if adaptive_qos and replication_type: msg = _("The extra spec 'adaptive_qos_policy_group' is not " "supported by share replication feature.") raise exception.NetAppException(msg) if max_files and max_files_multiplier: msg = _("Share cannot be provisioned with both 'max_files' and " "'max_files_multiplier' extra specs.") raise exception.NetAppException(msg) # NOTE(dviroel): This validation will need to be updated if newer # versions of ONTAP stop requiring cluster credentials to associate # QoS to volumes. if (adaptive_qos or qos_specs) and not self._have_cluster_creds: msg = _('Share cannot be provisioned with QoS without having ' 'cluster credentials.') raise exception.NetAppException(msg) fpolicy_ext_to_include = provisioning_options.get( 'fpolicy_extensions_to_include') fpolicy_ext_to_exclude = provisioning_options.get( 'fpolicy_extensions_to_exclude') if provisioning_options.get('fpolicy_file_operations') and not ( fpolicy_ext_to_include or fpolicy_ext_to_exclude): msg = _('The extra spec "fpolicy_file_operations" can only ' 'be configured together with ' '"fpolicy_extensions_to_include" or ' '"fpolicy_extensions_to_exclude".') raise exception.NetAppException(msg) if replication_type and ( fpolicy_ext_to_include or fpolicy_ext_to_exclude): msg = _("The extra specs 'fpolicy_extensions_to_include' and " "'fpolicy_extensions_to_exclude' are not " "supported by share replication feature.") raise exception.NetAppException(msg) def _get_nve_option(self, specs): if 'netapp_flexvol_encryption' in specs: nve = specs['netapp_flexvol_encryption'].lower() == 'true' else: nve = False return nve @na_utils.trace def _check_aggregate_extra_specs_validity(self, pool_name, specs): for specs_key in ('netapp_disk_type', 'netapp_raid_type'): aggr_value = self._ssc_stats.get(pool_name, {}).get(specs_key) specs_value = specs.get(specs_key) if aggr_value and specs_value and aggr_value != specs_value: msg = _('Invalid value "%(value)s" for extra_spec "%(key)s" ' 'in pool %(pool)s.') msg_args = { 'value': specs_value, 'key': specs_key, 'pool': pool_name } raise exception.NetAppException(msg % msg_args) @na_utils.trace def _allocate_container_from_snapshot( self, share, snapshot, vserver, vserver_client, snapshot_name_func=_get_backend_snapshot_name, split=None, create_fpolicy=True): """Clones existing share.""" share_name = self._get_backend_share_name(share['id']) parent_share_name = self._get_backend_share_name(snapshot['share_id']) if snapshot.get('provider_location') is None: parent_snapshot_name = snapshot_name_func(self, snapshot['id']) else: parent_snapshot_name = snapshot['provider_location'] provisioning_options = self._get_provisioning_options_for_share( share, vserver, vserver_client=vserver_client) hide_snapdir = provisioning_options.pop('hide_snapdir') mount_point_name = share.get('mount_point_name') # split in args takes precedence over split in provisioning_options if split is None: split = provisioning_options.pop('split') LOG.debug('Creating share from snapshot %s', snapshot['id']) vserver_client.create_volume_clone( share_name, parent_share_name, parent_snapshot_name, mount_point_name=mount_point_name, **provisioning_options) if share['size'] > snapshot['size']: vserver_client.set_volume_size(share_name, share['size']) if provisioning_options.get('max_files_multiplier') is not None: max_files_multiplier = provisioning_options.pop( 'max_files_multiplier') max_files = na_utils.calculate_max_files(share['size'], max_files_multiplier) vserver_client.set_volume_max_files(share_name, max_files) if hide_snapdir: self._apply_snapdir_visibility( hide_snapdir, share_name, vserver_client) if create_fpolicy: fpolicy_ext_to_include = provisioning_options.get( 'fpolicy_extensions_to_include') fpolicy_ext_to_exclude = provisioning_options.get( 'fpolicy_extensions_to_exclude') if fpolicy_ext_to_include or fpolicy_ext_to_exclude: self._create_fpolicy_for_share(share, vserver, vserver_client, **provisioning_options) # split at the end: not be blocked by a busy volume if split: vserver_client.volume_clone_split_start(share_name) @na_utils.trace def _share_exists(self, share_name, vserver_client): return vserver_client.volume_exists(share_name) @na_utils.trace def _delete_share(self, share, vserver, vserver_client, remove_export=True, remove_qos=True): share_name = self._get_backend_share_name(share['id']) # Share doesn't need to exist to be assigned to a fpolicy scope self._delete_fpolicy_for_share(share, vserver, vserver_client) if self._share_exists(share_name, vserver_client): if vserver_client.volume_clone_split_status(share_name) != 100: vserver_client.volume_clone_split_stop(share_name) if remove_export: self._remove_export(share, vserver_client) self._deallocate_container(share_name, vserver_client) if remove_qos: qos_policy_for_share = self._get_backend_qos_policy_group_name( share['id']) vserver_client.mark_qos_policy_group_for_deletion( qos_policy_for_share) else: LOG.info("Share %s does not exist.", share['id']) @na_utils.trace def delete_share(self, context, share, share_server=None): """Deletes share.""" try: vserver, vserver_client = self._get_vserver( share_server=share_server) except (exception.InvalidInput, exception.VserverNotSpecified, exception.VserverNotFound) as error: LOG.warning("Could not determine share server for share being " "deleted: %(share)s. Deletion of share record " "will proceed anyway. Error: %(error)s", {'share': share['id'], 'error': error}) return self._delete_share(share, vserver, vserver_client) @na_utils.trace def _deallocate_container(self, share_name, vserver_client): """Free share space.""" vserver_client.unmount_volume(share_name, force=True) vserver_client.offline_volume(share_name) vserver_client.delete_volume(share_name) @na_utils.trace def _create_export(self, share, share_server, vserver, vserver_client, cluster_client=None, clear_current_export_policy=True, ensure_share_already_exists=False, replica=False, share_host=None): """Creates NAS storage.""" helper = self._get_helper(share) helper.set_client(vserver_client) share_name = self._get_backend_share_name(share['id']) interfaces = vserver_client.get_network_interfaces( protocols=[share['share_proto']]) if not interfaces: msg = _('Cannot find network interfaces for Vserver %(vserver)s ' 'and protocol %(proto)s.') msg_args = {'vserver': vserver, 'proto': share['share_proto']} raise exception.NetAppException(msg % msg_args) host = share_host if share_host else share['host'] # Get LIF addresses with metadata export_addresses = self._get_export_addresses_with_metadata( share, share_server, interfaces, host, cluster_client) # Create the share and get a callback for generating export locations pool = share_utils.extract_host(share['host'], level='pool') callback = helper.create_share( share, share_name, clear_current_export_policy=clear_current_export_policy, ensure_share_already_exists=ensure_share_already_exists, replica=replica, is_flexgroup=self._is_flexgroup_pool(pool)) # Generate export locations using addresses, metadata and callback export_locations = [ { 'path': callback(export_address), 'is_admin_only': metadata.pop('is_admin_only', False), 'metadata': metadata, } for export_address, metadata in copy.deepcopy(export_addresses).items() ] # Sort the export locations to report preferred paths first export_locations = self._sort_export_locations_by_preferred_paths( export_locations) return export_locations @na_utils.trace def _get_export_addresses_with_metadata(self, share, share_server, interfaces, share_host, cluster_client=None): """Return interface addresses with locality and other metadata.""" # Get home nodes so we can identify preferred paths pool = share_utils.extract_host(share_host, level='pool') home_node_set = set() if self._is_flexgroup_pool(pool): for aggregate_name in self._get_flexgroup_aggregate_list(pool): home_node = self._get_aggregate_node( aggregate_name, cluster_client) if home_node: home_node_set.add(home_node) else: home_node = self._get_aggregate_node(pool, cluster_client) if home_node: home_node_set.add(home_node) # Get admin LIF addresses so we can identify admin export locations admin_addresses = self._get_admin_addresses_for_share_server( share_server) addresses = {} for interface in interfaces: address = interface['address'] is_admin_only = address in admin_addresses preferred = interface.get('home-node') in home_node_set addresses[address] = { 'is_admin_only': is_admin_only, 'preferred': preferred, } return addresses @na_utils.trace def _get_admin_addresses_for_share_server(self, share_server): if not share_server: return [] admin_addresses = [] for network_allocation in share_server.get('network_allocations'): if network_allocation['label'] == 'admin': admin_addresses.append(network_allocation['ip_address']) return admin_addresses @na_utils.trace def _sort_export_locations_by_preferred_paths(self, export_locations): """Sort the export locations to report preferred paths first.""" sort_key = lambda location: location.get( # noqa: E731 'metadata', {}).get('preferred') is not True return sorted(export_locations, key=sort_key) @na_utils.trace def _remove_export(self, share, vserver_client): """Deletes NAS storage.""" helper = self._get_helper(share) helper.set_client(vserver_client) share_name = self._get_backend_share_name(share['id']) target = helper.get_target(share) # Share may be in error state, so there's no share and target. if target: helper.delete_share(share, share_name) @na_utils.trace def create_snapshot(self, context, snapshot, share_server=None): """Creates a snapshot of a share.""" vserver, vserver_client = self._get_vserver(share_server=share_server) share_name = self._get_backend_share_name(snapshot['share_id']) snapshot_name = self._get_backend_snapshot_name(snapshot['id']) LOG.debug('Creating snapshot %s', snapshot_name) vserver_client.create_snapshot(share_name, snapshot_name) if not vserver_client.snapshot_exists(snapshot_name, share_name): raise exception.SnapshotResourceNotFound( name=snapshot_name) return {'provider_location': snapshot_name} def revert_to_snapshot(self, context, snapshot, share_server=None): """Reverts a share (in place) to the specified snapshot.""" vserver, vserver_client = self._get_vserver(share_server=share_server) share_name = self._get_backend_share_name(snapshot['share_id']) snapshot_name = (snapshot.get('provider_location') or self._get_backend_snapshot_name(snapshot['id'])) LOG.debug('Restoring snapshot %s', snapshot_name) vserver_client.restore_snapshot(share_name, snapshot_name) volume = vserver_client.get_volume(share_name) # When calculating the size, round up to the next GB. return int(math.ceil(float(volume['size']) / units.Gi)) @na_utils.trace def delete_snapshot(self, context, snapshot, share_server=None, snapshot_name=None): """Deletes a snapshot of a share.""" try: vserver, vserver_client = self._get_vserver( share_server=share_server) except (exception.InvalidInput, exception.VserverNotSpecified, exception.VserverNotFound) as error: LOG.warning("Could not determine share server for snapshot " "being deleted: %(snap)s. Deletion of snapshot " "record will proceed anyway. Error: %(error)s", {'snap': snapshot['id'], 'error': error}) return share_name = self._get_backend_share_name(snapshot['share_id']) snapshot_name = (snapshot.get('provider_location') or snapshot_name or self._get_backend_snapshot_name(snapshot['id'])) try: is_flexgroup = self._is_flexgroup_share(vserver_client, share_name) except exception.ShareNotFound: msg = _('Could not determine if the share %(share)s is FlexGroup ' 'or FlexVol style. Share does not exist.') msg_args = {'share': share_name} LOG.info(msg, msg_args) is_flexgroup = False try: self._delete_snapshot(vserver_client, share_name, snapshot_name, is_flexgroup=is_flexgroup) except exception.SnapshotResourceNotFound: msg = "Snapshot %(snap)s does not exist on share %(share)s." msg_args = {'snap': snapshot_name, 'share': share_name} LOG.info(msg, msg_args) def _delete_snapshot(self, vserver_client, share_name, snapshot_name, is_flexgroup=False): """Deletes a backend snapshot, handling busy snapshots as needed.""" backend_snapshot = vserver_client.get_snapshot(share_name, snapshot_name) LOG.debug('Deleting snapshot %(snap)s for share %(share)s.', {'snap': snapshot_name, 'share': share_name}) if not backend_snapshot['busy']: vserver_client.delete_snapshot(share_name, snapshot_name) elif backend_snapshot['locked_by_clone']: if is_flexgroup: # Snapshots are locked by clone(s), so split the clone(s) snap_children = vserver_client.get_clone_children_for_snapshot( share_name, snapshot_name) for snapshot_child in snap_children: vserver_client.volume_clone_split_start( snapshot_child['name']) # NOTE(felipe_rodrigues): ONTAP does not allow rename a # FlexGroup snapshot, so it cannot be soft deleted. It will # wait for all split clones complete. self._delete_busy_snapshot(vserver_client, share_name, snapshot_name) else: vserver_client.soft_delete_snapshot(share_name, snapshot_name) else: raise exception.ShareSnapshotIsBusy(snapshot_name=snapshot_name) @na_utils.trace def _delete_busy_snapshot(self, vserver_client, share_name, snapshot_name): """Delete the snapshot waiting for it to not be busy.""" timeout = (self.configuration. netapp_delete_busy_flexgroup_snapshot_timeout) interval = 5 retries = (int(timeout / interval) or 1) @manila_utils.retry(exception.ShareSnapshotIsBusy, interval=interval, retries=retries, backoff_rate=1) def _wait_snapshot_is_not_busy(): backend_snapshot = vserver_client.get_snapshot(share_name, snapshot_name) if backend_snapshot['busy']: msg = _("Cannot delete snapshot %s that is busy. Will wait it " "for not be busy.") LOG.debug(msg, snapshot_name) raise exception.ShareSnapshotIsBusy( snapshot_name=snapshot_name) try: _wait_snapshot_is_not_busy() vserver_client.delete_snapshot(share_name, snapshot_name) except exception.ShareSnapshotIsBusy: msg = _("Error deleting the snapshot %s: timeout waiting for " "FlexGroup snapshot to not be busy.") raise exception.NetAppException(msg % snapshot_name) @na_utils.trace def manage_existing(self, share, driver_options, share_server=None): vserver, vserver_client = self._get_vserver(share_server=share_server) share_size = self._manage_container(share, vserver, vserver_client) export_locations = self._create_export(share, share_server, vserver, vserver_client) return {'size': share_size, 'export_locations': export_locations} @na_utils.trace def unmanage(self, share, share_server=None): pass @na_utils.trace def _manage_container(self, share, vserver, vserver_client): """Bring existing volume under management as a share.""" protocol_helper = self._get_helper(share) protocol_helper.set_client(vserver_client) volume_name = protocol_helper.get_share_name_for_share(share) if not volume_name: msg = _('Volume could not be determined from export location ' '%(export)s.') msg_args = {'export': share['export_location']} raise exception.ManageInvalidShare(reason=msg % msg_args) # NOTE(felipe_rodrigues): depending on volume style, the aggregate_name # is a string (FlexVol) or a list (FlexGroup). pool_name = share_utils.extract_host(share['host'], level='pool') flexgroup_pool = False aggregate_name = pool_name if self._is_flexgroup_pool(pool_name): flexgroup_pool = True if self._is_flexgroup_auto: aggregate_name = self._client.get_aggregate_for_volume( volume_name) else: aggregate_name = self._get_flexgroup_aggregate_list(pool_name) # Check that share and pool are from same style. flexgroup_vol = self._is_flexgroup_share(vserver_client, volume_name) if flexgroup_vol != flexgroup_pool: share_style = 'FlexGroup' if flexgroup_vol else 'FlexVol' pool_style = 'FlexGroup' if flexgroup_pool else 'FlexVol' msg = _('Could not manage share %(share)s on the specified pool ' '%(pool_name)s. The share is from %(share_style)s style, ' 'while the pool is for %(pool_style)s style.') msg_args = {'share': volume_name, 'pool_name': pool_name, 'share_style': share_style, 'pool_style': pool_style} raise exception.ManageInvalidShare(reason=msg % msg_args) # Get existing volume info. volume = vserver_client.get_volume_to_manage(aggregate_name, volume_name) if not volume: msg = _('Volume %(volume)s not found on pool %(pool)s.') msg_args = {'volume': volume_name, 'pool': pool_name} raise exception.ManageInvalidShare(reason=msg % msg_args) # When calculating the size, round up to the next GB. volume_size = int(math.ceil(float(volume['size']) / units.Gi)) # Validate extra specs. extra_specs = share_types.get_extra_specs_from_share(share) extra_specs = self._remap_standard_boolean_extra_specs(extra_specs) try: self._check_extra_specs_validity(share, extra_specs) self._check_aggregate_extra_specs_validity(pool_name, extra_specs) except exception.ManilaException as ex: raise exception.ManageExistingShareTypeMismatch( reason=str(ex)) # Ensure volume is manageable. self._validate_volume_for_manage(volume, vserver_client) provisioning_options = self._get_provisioning_options(extra_specs) qos_specs = self._get_normalized_qos_specs(extra_specs) self.validate_provisioning_options_for_share(provisioning_options, extra_specs=extra_specs, qos_specs=qos_specs) # Check fpolicy extra-specs. fpolicy_ext_include = provisioning_options.get( 'fpolicy_extensions_to_include') fpolicy_ext_exclude = provisioning_options.get( 'fpolicy_extensions_to_exclude') fpolicy_file_operations = provisioning_options.get( 'fpolicy_file_operations') fpolicy_scope = None if fpolicy_ext_include or fpolicy_ext_include: fpolicy_scope = self._find_reusable_fpolicy_scope( share, vserver_client, fpolicy_extensions_to_include=fpolicy_ext_include, fpolicy_extensions_to_exclude=fpolicy_ext_exclude, fpolicy_file_operations=fpolicy_file_operations, shares_to_include=[volume_name] ) if fpolicy_scope is None: msg = _('Volume %(volume)s does not contains the expected ' 'fpolicy configuration.') msg_args = {'volume': volume_name} raise exception.ManageExistingShareTypeMismatch( reason=msg % msg_args) share_name = self._get_backend_share_name(share['id']) mount_point_name = share.get('mount_point_name') # Rename & remount volume on new path. vserver_client.unmount_volume(volume_name) vserver_client.set_volume_name(volume_name, share_name) vserver_client.mount_volume(share_name, mount_point_name) qos_policy_group_name = self._modify_or_create_qos_for_existing_share( share, extra_specs, vserver, vserver_client) if qos_policy_group_name: provisioning_options['qos_policy_group'] = qos_policy_group_name snap_attributes = self._get_provisioning_options_for_snap_attributes( vserver_client, share_name) provisioning_options.update(snap_attributes) debug_args = { 'share': share_name, 'aggr': (",".join(aggregate_name) if flexgroup_vol else aggregate_name), 'options': provisioning_options } LOG.debug('Managing share %(share)s on aggregate(s) %(aggr)s with ' 'provisioning options %(options)s', debug_args) # Modify volume to match extra specs. vserver_client.modify_volume(aggregate_name, share_name, **provisioning_options) # Update fpolicy to include the new share name and remove the old one. if fpolicy_scope is not None: shares_to_include = copy.deepcopy( fpolicy_scope.get('shares-to-include', [])) shares_to_include.remove(volume_name) shares_to_include.append(share_name) policy_name = fpolicy_scope.get('policy-name') # Update. vserver_client.modify_fpolicy_scope( share_name, policy_name, shares_to_include=shares_to_include) if provisioning_options.get('max_files_multiplier') is not None: max_files_multiplier = provisioning_options.pop( 'max_files_multiplier') max_files = na_utils.calculate_max_files(volume_size, max_files_multiplier) vserver_client.set_volume_max_files(share_name, max_files, retry_allocated=True) # Save original volume info to private storage. original_data = { 'original_name': volume['name'], 'original_junction_path': volume['junction-path'] } self.private_storage.update(share['id'], original_data) return volume_size @na_utils.trace def _get_provisioning_options_for_snap_attributes(self, vserver_client, volume_name): provisioning_options = {} snapshot_attributes = vserver_client.get_volume_snapshot_attributes( volume_name) if ( snapshot_attributes['snapshot-policy'].lower() in self.configuration.netapp_volume_snapshot_policy_exceptions ): provisioning_options['snapshot_policy'] = ( snapshot_attributes['snapshot-policy']) provisioning_options['hide_snapdir'] = ( snapshot_attributes['snapdir-access-enabled'].lower() != 'true' ) return provisioning_options @na_utils.trace def _validate_volume_for_manage(self, volume, vserver_client): """Ensure volume is a candidate for becoming a share.""" # Check volume info, extra specs validity if volume['type'] != 'rw' or volume['style'] != 'flex': msg = _('Volume %(volume)s must be a read-write flexible volume.') msg_args = {'volume': volume['name']} raise exception.ManageInvalidShare(reason=msg % msg_args) if vserver_client.volume_has_luns(volume['name']): msg = _('Volume %(volume)s must not contain LUNs.') msg_args = {'volume': volume['name']} raise exception.ManageInvalidShare(reason=msg % msg_args) if vserver_client.volume_has_junctioned_volumes( volume['junction-path']): msg = _('Volume %(volume)s must not have junctioned volumes.') msg_args = {'volume': volume['name']} raise exception.ManageInvalidShare(reason=msg % msg_args) if vserver_client.volume_has_snapmirror_relationships(volume): msg = _('Volume %(volume)s must not be in any snapmirror ' 'relationships.') msg_args = {'volume': volume['name']} raise exception.ManageInvalidShare(reason=msg % msg_args) @na_utils.trace def manage_existing_snapshot( self, snapshot, driver_options, share_server=None): """Brings an existing snapshot under Manila management. The managed snapshot keeps with its name, not renaming to the driver snapshot name pattern (share_snapshot_), because the rename is lost when reverting to the snapshot, causing some issues (bug #1936648). """ vserver, vserver_client = self._get_vserver(share_server=share_server) share_name = self._get_backend_share_name(snapshot['share_id']) existing_snapshot_name = snapshot.get('provider_location') if not existing_snapshot_name: msg = _('provider_location not specified.') raise exception.ManageInvalidShareSnapshot(reason=msg) # Get the volume containing the snapshot so we can report its size. try: volume = vserver_client.get_volume(share_name) except (netapp_api.NaApiError, exception.StorageResourceNotFound, exception.NetAppException): msg = _('Could not determine snapshot %(snap)s size from ' 'volume %(vol)s.') msg_args = {'snap': existing_snapshot_name, 'vol': share_name} LOG.exception(msg, msg_args) raise exception.ShareNotFound(share_id=snapshot['share_id']) # Ensure snapshot is from the share. if not vserver_client.snapshot_exists( existing_snapshot_name, share_name): msg = _('Snapshot %(snap)s is not from the share %(vol)s.') msg_args = {'snap': existing_snapshot_name, 'vol': share_name} raise exception.ManageInvalidShareSnapshot(reason=msg % msg_args) # Ensure there aren't any mirrors on this volume. if vserver_client.volume_has_snapmirror_relationships(volume): msg = _('Share %s has SnapMirror relationships.') msg_args = {'vol': share_name} raise exception.ManageInvalidShareSnapshot(reason=msg % msg_args) # When calculating the size, round up to the next GB. size = int(math.ceil(float(volume['size']) / units.Gi)) return {'size': size} @na_utils.trace def unmanage_snapshot(self, snapshot, share_server=None): """Removes the specified snapshot from Manila management.""" @na_utils.trace def create_consistency_group_from_cgsnapshot( self, context, cg_dict, cgsnapshot_dict, share_server=None): """Creates a consistency group from an existing CG snapshot.""" vserver, vserver_client = self._get_vserver(share_server=share_server) # Ensure there is something to do if not cgsnapshot_dict['share_group_snapshot_members']: return None, None clone_list = self._collate_cg_snapshot_info(cg_dict, cgsnapshot_dict) share_update_list = [] LOG.debug('Creating consistency group from CG snapshot %s.', cgsnapshot_dict['id']) for clone in clone_list: self._allocate_container_from_snapshot( clone['share'], clone['snapshot'], vserver, vserver_client, NetAppCmodeFileStorageLibrary._get_backend_cg_snapshot_name) export_locations = self._create_export(clone['share'], share_server, vserver, vserver_client) share_update_list.append({ 'id': clone['share']['id'], 'export_locations': export_locations, }) return None, share_update_list def _collate_cg_snapshot_info(self, cg_dict, cgsnapshot_dict): """Collate the data for a clone of a CG snapshot. Given two data structures, a CG snapshot (cgsnapshot_dict) and a new CG to be cloned from the snapshot (cg_dict), match up both structures into a list of dicts (share & snapshot) suitable for use by existing driver methods that clone individual share snapshots. """ clone_list = list() for share in cg_dict['shares']: clone_info = {'share': share} for cgsnapshot_member in ( cgsnapshot_dict['share_group_snapshot_members']): if (share['source_share_group_snapshot_member_id'] == cgsnapshot_member['id']): clone_info['snapshot'] = { 'share_id': cgsnapshot_member['share_id'], 'id': cgsnapshot_dict['id'], 'size': cgsnapshot_member['size'], } break else: msg = _("Invalid data supplied for creating consistency group " "from CG snapshot %s.") % cgsnapshot_dict['id'] raise exception.InvalidShareGroup(reason=msg) clone_list.append(clone_info) return clone_list @na_utils.trace def create_cgsnapshot(self, context, snap_dict, share_server=None): """Creates a consistency group snapshot.""" vserver, vserver_client = self._get_vserver(share_server=share_server) share_names = [self._get_backend_share_name(member['share_id']) for member in snap_dict.get('share_group_snapshot_members', [])] snapshot_name = self._get_backend_cg_snapshot_name(snap_dict['id']) if share_names: LOG.debug('Creating CG snapshot %s.', snapshot_name) vserver_client.create_cg_snapshot(share_names, snapshot_name) return None, None @na_utils.trace def delete_cgsnapshot(self, context, snap_dict, share_server=None): """Deletes a consistency group snapshot.""" try: vserver, vserver_client = self._get_vserver( share_server=share_server) except (exception.InvalidInput, exception.VserverNotSpecified, exception.VserverNotFound) as error: LOG.warning("Could not determine share server for CG snapshot " "being deleted: %(snap)s. Deletion of CG snapshot " "record will proceed anyway. Error: %(error)s", {'snap': snap_dict['id'], 'error': error}) return None, None share_names = [self._get_backend_share_name(member['share_id']) for member in ( snap_dict.get('share_group_snapshot_members', []))] snapshot_name = self._get_backend_cg_snapshot_name(snap_dict['id']) for share_name in share_names: try: self._delete_snapshot( vserver_client, share_name, snapshot_name) except exception.SnapshotResourceNotFound: msg = ("Snapshot %(snap)s does not exist on share " "%(share)s.") msg_args = {'snap': snapshot_name, 'share': share_name} LOG.info(msg, msg_args) continue return None, None @staticmethod def _is_group_cg(context, share_group): return 'host' == share_group.consistent_snapshot_support @na_utils.trace def create_group_snapshot(self, context, snap_dict, fallback_create, share_server=None): share_group = snap_dict['share_group'] if self._is_group_cg(context, share_group): return self.create_cgsnapshot(context, snap_dict, share_server=share_server) else: return fallback_create(context, snap_dict, share_server=share_server) @na_utils.trace def delete_group_snapshot(self, context, snap_dict, fallback_delete, share_server=None): share_group = snap_dict['share_group'] if self._is_group_cg(context, share_group): return self.delete_cgsnapshot(context, snap_dict, share_server=share_server) else: return fallback_delete(context, snap_dict, share_server=share_server) @na_utils.trace def create_group_from_snapshot(self, context, share_group, snapshot_dict, fallback_create, share_server=None): share_group2 = snapshot_dict['share_group'] if self._is_group_cg(context, share_group2): return self.create_consistency_group_from_cgsnapshot( context, share_group, snapshot_dict, share_server=share_server) else: return fallback_create(context, share_group, snapshot_dict, share_server=share_server) @na_utils.trace def _adjust_qos_policy_with_volume_resize(self, share, new_size, vserver_client): # Adjust QoS policy on a share if any if self._have_cluster_creds: share_name = self._get_backend_share_name(share['id']) share_on_the_backend = vserver_client.get_volume(share_name) qos_policy_on_share = share_on_the_backend['qos-policy-group-name'] if qos_policy_on_share is None: return extra_specs = share_types.get_extra_specs_from_share(share) qos_specs = self._get_normalized_qos_specs(extra_specs) size_dependent_specs = {k: v for k, v in qos_specs.items() if k in self.SIZE_DEPENDENT_QOS_SPECS} if size_dependent_specs: max_throughput = self._get_max_throughput( new_size, size_dependent_specs) self._client.qos_policy_group_modify( qos_policy_on_share, max_throughput) @na_utils.trace def extend_share(self, share, new_size, share_server=None): """Extends size of existing share.""" vserver, vserver_client = self._get_vserver(share_server=share_server) share_name = self._get_backend_share_name(share['id']) extra_specs = share_types.get_extra_specs_from_share(share) provisioning_options = self._get_provisioning_options(extra_specs) vserver_client.set_volume_filesys_size_fixed(share_name, filesys_size_fixed=False) LOG.debug('Extending share %(name)s to %(size)s GB.', {'name': share_name, 'size': new_size}) vserver_client.set_volume_size(share_name, new_size) if provisioning_options.get('max_files_multiplier') is not None: max_files_multiplier = provisioning_options.pop( 'max_files_multiplier') max_files = na_utils.calculate_max_files(new_size, max_files_multiplier) vserver_client.set_volume_max_files(share_name, max_files) self._adjust_qos_policy_with_volume_resize(share, new_size, vserver_client) @na_utils.trace def shrink_share(self, share, new_size, share_server=None): """Shrinks size of existing share.""" vserver, vserver_client = self._get_vserver(share_server=share_server) share_name = self._get_backend_share_name(share['id']) extra_specs = share_types.get_extra_specs_from_share(share) provisioning_options = self._get_provisioning_options(extra_specs) vserver_client.set_volume_filesys_size_fixed(share_name, filesys_size_fixed=False) LOG.debug('Shrinking share %(name)s to %(size)s GB.', {'name': share_name, 'size': new_size}) try: vserver_client.set_volume_size(share_name, new_size) except netapp_api.NaApiError as e: if e.code == netapp_api.EVOLOPNOTSUPP: msg = _('Failed to shrink share %(share_id)s. ' 'The current used space is larger than the the size' ' requested.') msg_args = {'share_id': share['id']} LOG.error(msg, msg_args) raise exception.ShareShrinkingPossibleDataLoss( share_id=share['id']) self._adjust_qos_policy_with_volume_resize( share, new_size, vserver_client) if provisioning_options.get('max_files_multiplier') is not None: max_files_multiplier = provisioning_options.pop( 'max_files_multiplier') max_files = na_utils.calculate_max_files(new_size, max_files_multiplier) vserver_client.set_volume_max_files(share_name, max_files, retry_allocated=True) @na_utils.trace def update_access(self, context, share, access_rules, add_rules, delete_rules, update_rules, share_server=None): """Updates access rules for a share.""" # NOTE(felipe_rodrigues): do not add export rules to a non-active # replica that is DR type, it might not have its policy yet. replica_state = share.get('replica_state') if (replica_state is not None and replica_state != constants.REPLICA_STATE_ACTIVE and not self._is_readable_replica(share)): return try: vserver, vserver_client = self._get_vserver( share_server=share_server) except (exception.InvalidInput, exception.VserverNotSpecified, exception.VserverNotFound) as error: LOG.warning("Could not determine share server for share " "%(share)s during access rules update. " "Error: %(error)s", {'share': share['id'], 'error': error}) return share_name = self._get_backend_share_name(share['id']) if self._share_exists(share_name, vserver_client): helper = self._get_helper(share) helper.set_client(vserver_client) helper.update_access(share, share_name, access_rules) else: raise exception.ShareResourceNotFound(share_id=share['id']) def setup_server(self, network_info, metadata=None): raise NotImplementedError() def teardown_server(self, server_details, security_services=None): raise NotImplementedError() def get_network_allocations_number(self): """Get number of network interfaces to be created.""" raise NotImplementedError() @na_utils.trace def _update_ssc_info(self): """Periodically runs to update Storage Service Catalog data. The self._ssc_stats attribute is updated with the following format. { : {: }} """ LOG.info("Updating storage service catalog information for " "backend '%s'", self._backend_name) # Work on a copy and update the ssc data atomically before returning. ssc_stats = copy.deepcopy(self._ssc_stats) aggregate_names = self._find_matching_aggregates() # Initialize entries for each aggregate. for aggregate_name in aggregate_names: if aggregate_name not in ssc_stats: ssc_stats[aggregate_name] = { 'netapp_aggregate': aggregate_name, 'netapp_flexgroup': False, } # Initialize entries for each FlexGroup pool flexgroup_pools = self._flexgroup_pools for pool_name, aggr_list in flexgroup_pools.items(): if pool_name not in ssc_stats: ssc_stats[pool_name] = { 'netapp_aggregate': " ".join(aggr_list), 'netapp_flexgroup': True, } # Add the SnapLock info for FlexVol for aggr_name in aggregate_names: if self._client.features.UNIFIED_AGGR: snaplock_dict = {'netapp_snaplock_type': self.SNAPLOCK_TYPE} else: snaplock_dict = { 'netapp_snaplock_type': self._get_aggregate_snaplock_type(aggr_name) } ssc_stats[aggr_name].update(snaplock_dict) # Add aggregate specs for pools aggr_set = set(aggregate_names).union(self._get_flexgroup_aggr_set()) if self._have_cluster_creds and aggr_set: aggr_info = self._get_aggregate_info(aggr_set) # FlexVol pools aggr_info_flexvol = copy.deepcopy(aggr_info) for aggr_name in aggregate_names: if self._client.features.UNIFIED_AGGR: aggr_info_flexvol[aggr_name]['netapp_snaplock_type'] = \ self.SNAPLOCK_TYPE ssc_stats[aggr_name].update(aggr_info_flexvol[aggr_name]) # FlexGroup pools for pool_name, aggr_list in flexgroup_pools.items(): raid_type = set() hybrid = set() disk_type = set() snaplock_type = set() for aggr in aggr_list: raid_type.add(aggr_info[aggr]['netapp_raid_type']) hybrid.add(aggr_info[aggr]['netapp_hybrid_aggregate']) disk_type = disk_type.union( aggr_info[aggr]['netapp_disk_type']) snaplock_type.add(aggr_info[aggr]['netapp_snaplock_type']) ssc_stats[pool_name].update({ 'netapp_raid_type': " ".join(sorted(raid_type)), 'netapp_hybrid_aggregate': " ".join(sorted(hybrid)), 'netapp_disk_type': sorted(list(disk_type)), 'netapp_snaplock_type': self.SNAPLOCK_TYPE if self._client.features.UNIFIED_AGGR else " ".join(sorted(snaplock_type)), }) self._ssc_stats = ssc_stats @na_utils.trace def _get_aggregate_info(self, aggregate_names): """Gets the disk type information for the driver aggregates. :param aggregate_names: The aggregates this driver cares about """ aggr_info = {} for aggregate_name in aggregate_names: aggregate = self._client.get_aggregate(aggregate_name) hybrid = (str(aggregate.get('is-hybrid')).lower() if 'is-hybrid' in aggregate else None) disk_types = self._client.get_aggregate_disk_types(aggregate_name) aggr_info[aggregate_name] = { 'netapp_raid_type': aggregate.get('raid-type'), 'netapp_hybrid_aggregate': hybrid, 'netapp_disk_type': disk_types, 'netapp_is_home': aggregate.get('is-home'), 'netapp_snaplock_type': aggregate.get('snaplock-type'), } return aggr_info def find_active_replica(self, replica_list): # NOTE(ameade): Find current active replica. There can only be one # active replica (SnapMirror source volume) at a time in cDOT. for r in replica_list: if r['replica_state'] == constants.REPLICA_STATE_ACTIVE: return r def _find_nonactive_replicas(self, replica_list): """Returns a list of all except the active replica.""" return [replica for replica in replica_list if replica['replica_state'] != constants.REPLICA_STATE_ACTIVE] def create_replica(self, context, replica_list, new_replica, access_rules, share_snapshots, share_server=None): """Creates the new replica on this backend and sets up SnapMirror.""" active_replica = self.find_active_replica(replica_list) dm_session = data_motion.DataMotionSession() # check that the source and new replica reside in the same pool type: # either FlexGroup or FlexVol. src_share_name, src_vserver, src_backend = ( dm_session.get_backend_info_for_share(active_replica)) src_client = data_motion.get_client_for_backend( src_backend, vserver_name=src_vserver) src_is_flexgroup = self._is_flexgroup_share(src_client, src_share_name) pool_name = share_utils.extract_host(new_replica['host'], level='pool') dest_is_flexgroup = self._is_flexgroup_pool(pool_name) if src_is_flexgroup != dest_is_flexgroup: src_type = 'FlexGroup' if src_is_flexgroup else 'FlexVol' dest_type = 'FlexGroup' if dest_is_flexgroup else 'FlexVol' msg = _('Could not create replica %(replica_id)s from share ' '%(share_id)s in the destination host %(dest_host)s. The ' 'source share is from %(src_type)s style, while the ' 'destination replica host is %(dest_type)s style.') msg_args = {'replica_id': new_replica['id'], 'share_id': new_replica['share_id'], 'dest_host': new_replica['host'], 'src_type': src_type, 'dest_type': dest_type} raise exception.NetAppException(msg % msg_args) # NOTE(felipe_rodrigues): The FlexGroup replication does not support # several replicas (fan-out) in some ONTAP versions, while FlexVol is # always supported. if dest_is_flexgroup: fan_out = (src_client.is_flexgroup_fan_out_supported() and self._client.is_flexgroup_fan_out_supported()) if not fan_out and len(replica_list) > 2: msg = _('Could not create replica %(replica_id)s from share ' '%(share_id)s in the destination host %(dest_host)s. ' 'The share does not support more than one replica.') msg_args = {'replica_id': new_replica['id'], 'share_id': new_replica['share_id'], 'dest_host': new_replica['host']} raise exception.NetAppException(msg % msg_args) # 1. Create the destination share dest_backend = share_utils.extract_host(new_replica['host'], level='backend_name') vserver = (dm_session.get_vserver_from_share(new_replica) or self.configuration.netapp_vserver) vserver_client = data_motion.get_client_for_backend( dest_backend, vserver_name=vserver) is_readable = self._is_readable_replica(new_replica) self._allocate_container(new_replica, vserver, vserver_client, replica=True, create_fpolicy=False, set_qos=is_readable) # 2. Setup SnapMirror with mounting replica whether 'readable' type. relationship_type = na_utils.get_relationship_type(dest_is_flexgroup) dm_session.create_snapmirror(active_replica, new_replica, relationship_type, mount=is_readable) # 3. Create export location model_update = { 'export_locations': [], 'replica_state': constants.REPLICA_STATE_OUT_OF_SYNC, 'access_rules_status': constants.STATUS_ACTIVE, } if is_readable: model_update['export_locations'] = self._create_export( new_replica, share_server, vserver, vserver_client, replica=True) if access_rules: helper = self._get_helper(new_replica) helper.set_client(vserver_client) share_name = self._get_backend_share_name(new_replica['id']) try: helper.update_access(new_replica, share_name, access_rules) except Exception: model_update['access_rules_status'] = ( constants.SHARE_INSTANCE_RULES_ERROR) return model_update def delete_replica(self, context, replica_list, replica, share_snapshots, share_server=None): """Removes the replica on this backend and destroys SnapMirror.""" dm_session = data_motion.DataMotionSession() # 1. Remove SnapMirror dest_backend = share_utils.extract_host(replica['host'], level='backend_name') vserver = (dm_session.get_vserver_from_share(replica) or self.configuration.netapp_vserver) # Ensure that all potential snapmirror relationships and their metadata # involving the replica are destroyed. for other_replica in replica_list: if other_replica['id'] != replica['id']: dm_session.delete_snapmirror(other_replica, replica) dm_session.delete_snapmirror(replica, other_replica) # 2. Delete share is_readable = self._is_readable_replica(replica) vserver_client = data_motion.get_client_for_backend( dest_backend, vserver_name=vserver) self._delete_share(replica, vserver, vserver_client, remove_export=is_readable, remove_qos=is_readable) @na_utils.trace def _convert_schedule_to_seconds(self, schedule='hourly'): """Convert snapmirror schedule to seconds.""" results = re.findall(r'[\d]+|[^d]+', schedule) if not results or len(results) > 2: return 3600 # default (1 hour) if len(results) == 2: try: num = int(results[0]) except ValueError: return 3600 schedule = results[1] else: num = 1 schedule = results[0] schedule = schedule.lower() if schedule in ['min', 'minute']: return (num * 60) if schedule in ['hour', 'hourly']: return (num * 3600) if schedule in ['day', 'daily']: return (num * 24 * 3600) if schedule in ['week', 'weekly']: return (num * 7 * 24 * 3600) if schedule in ['month', 'monthly']: return (num * 30 * 24 * 2600) return 3600 def update_replica_state(self, context, replica_list, replica, access_rules, share_snapshots, share_server=None, replication=True): """Returns the status of the given replica on this backend.""" active_replica = self.find_active_replica(replica_list) share_name = self._get_backend_share_name(replica['id']) vserver, vserver_client = self._get_vserver(share_server=share_server) if not vserver_client.volume_exists(share_name): msg = _("Volume %(share_name)s does not exist on vserver " "%(vserver)s.") msg_args = {'share_name': share_name, 'vserver': vserver} raise exception.ShareResourceNotFound(msg % msg_args) # NOTE(cknight): The SnapMirror may have been intentionally broken by # a revert-to-snapshot operation, in which case this method should not # attempt to change anything. if active_replica['status'] == constants.STATUS_REVERTING: return None dm_session = data_motion.DataMotionSession() try: snapmirrors = dm_session.get_snapmirrors(active_replica, replica) except netapp_api.NaApiError: LOG.exception("Could not get snapmirrors for replica %s.", replica['id']) return constants.STATUS_ERROR is_readable = replication and self._is_readable_replica(replica) if not snapmirrors: if replica['status'] != constants.STATUS_CREATING: try: pool_name = share_utils.extract_host(replica['host'], level='pool') relationship_type = na_utils.get_relationship_type( self._is_flexgroup_pool(pool_name)) dm_session.create_snapmirror(active_replica, replica, relationship_type, mount=is_readable) except netapp_api.NaApiError: LOG.exception("Could not create snapmirror for " "replica %s.", replica['id']) return constants.STATUS_ERROR return constants.REPLICA_STATE_OUT_OF_SYNC snapmirror = snapmirrors[0] # NOTE(dviroel): Don't try to resume or resync a SnapMirror that has # one of the in progress transfer states, because the storage will # answer with an error. in_progress_status = ['preparing', 'transferring', 'finalizing', 'synchronizing'] if (snapmirror.get('mirror-state') != 'snapmirrored' and (snapmirror.get('relationship-status') in in_progress_status or snapmirror.get('transferring-state') in in_progress_status)): return constants.REPLICA_STATE_OUT_OF_SYNC if snapmirror.get('mirror-state') != 'snapmirrored': try: vserver_client.resume_snapmirror_vol( snapmirror['source-vserver'], snapmirror['source-volume'], vserver, share_name) vserver_client.resync_snapmirror_vol( snapmirror['source-vserver'], snapmirror['source-volume'], vserver, share_name) return constants.REPLICA_STATE_OUT_OF_SYNC except netapp_api.NaApiError: LOG.exception("Could not resync snapmirror.") return constants.STATUS_ERROR current_schedule = snapmirror.get('schedule') new_schedule = self.configuration.netapp_snapmirror_schedule if current_schedule != new_schedule: dm_session.modify_snapmirror(active_replica, replica, schedule=new_schedule) LOG.debug('Modify snapmirror schedule for replica:' '%(replica)s from %(from)s to %(to)s', {'replica': replica['id'], 'from': current_schedule, 'to': new_schedule}) last_update_timestamp = float( snapmirror.get('last-transfer-end-timestamp', 0)) # Recovery Point Objective (RPO) indicates the point in time to # which data can be recovered. The RPO target is typically less # than twice the replication schedule. if (last_update_timestamp and (timeutils.is_older_than( datetime.datetime.fromtimestamp( last_update_timestamp, tz=datetime.timezone.utc).replace(tzinfo=None) .isoformat(), (2 * self._snapmirror_schedule)))): return constants.REPLICA_STATE_OUT_OF_SYNC last_transfer_error = snapmirror.get('last-transfer-error', None) if last_transfer_error: LOG.debug('Found last-transfer-error: %(error)s for replica: ' '%(replica)s.', {'replica': replica['id'], 'error': last_transfer_error}) return constants.REPLICA_STATE_OUT_OF_SYNC # Check all snapshots exist snapshots = [snap['share_replica_snapshot'] for snap in share_snapshots] for snap in snapshots: snapshot_name = snap.get('provider_location') if (not snapshot_name or not vserver_client.snapshot_exists(snapshot_name, share_name)): return constants.REPLICA_STATE_OUT_OF_SYNC # NOTE(sfernand): When promoting replicas, the previous source volume # and its destinations are put in an 'out of sync' state and must be # cleaned up once to avoid retaining unused snapshots from the previous # relationship. Replicas already 'in-sync' won't try another cleanup # attempt. if replica['replica_state'] == constants.REPLICA_STATE_OUT_OF_SYNC: dm_session.cleanup_previous_snapmirror_relationships( replica, replica_list) return constants.REPLICA_STATE_IN_SYNC def promote_replica(self, context, replica_list, replica, access_rules, share_server=None, quiesce_wait_time=None): """Switch SnapMirror relationships and allow r/w ops on replica. Creates a DataMotion session and switches the direction of the SnapMirror relationship between the currently 'active' instance ( SnapMirror source volume) and the replica. Also attempts setting up SnapMirror relationships between the other replicas and the new SnapMirror source volume ('active' instance). For DR style, the promotion creates the QoS policy and export policy for the new active replica. While for 'readable', those specs are only updated without unmounting. :param context: Request Context :param replica_list: List of replicas, including the 'active' instance :param replica: Replica to promote to SnapMirror source :param access_rules: Access rules to apply to the replica :param share_server: ShareServer class instance of replica :param quiesce_wait_time: Wait time in seconds for snapmirror quiesce :return: Updated replica_list """ orig_active_replica = self.find_active_replica(replica_list) dm_session = data_motion.DataMotionSession() new_replica_list = [] # Setup the new active replica try: new_active_replica = ( self._convert_destination_replica_to_independent( context, dm_session, orig_active_replica, replica, access_rules, share_server=share_server, quiesce_wait_time=quiesce_wait_time)) except exception.StorageCommunicationException: LOG.exception("Could not communicate with the backend " "for replica %s during promotion.", replica['id']) new_active_replica = replica.copy() new_active_replica['replica_state'] = ( constants.STATUS_ERROR) new_active_replica['status'] = constants.STATUS_ERROR return [new_active_replica] new_replica_list.append(new_active_replica) # Change the source replica for all destinations to the new # active replica. is_dr = not self._is_readable_replica(replica) pool_name = share_utils.extract_host(replica['host'], level='pool') is_flexgroup = self._is_flexgroup_pool(pool_name) for r in replica_list: if r['id'] != replica['id']: r = self._safe_change_replica_source(dm_session, r, orig_active_replica, replica, replica_list, is_dr, access_rules, share_server=share_server, is_flexgroup=is_flexgroup) new_replica_list.append(r) if is_dr: # NOTE(felipe_rodrigues): non active DR replica does not have the # export location set, so during replica deletion the driver cannot # delete the ONTAP export. Clean up it when becoming non active. orig_active_vserver = dm_session.get_vserver_from_share( orig_active_replica) orig_active_replica_backend = ( share_utils.extract_host(orig_active_replica['host'], level='backend_name')) orig_active_replica_name = self._get_backend_share_name( orig_active_replica['id']) orig_active_vserver_client = data_motion.get_client_for_backend( orig_active_replica_backend, vserver_name=orig_active_vserver) orig_active_replica_helper = self._get_helper(orig_active_replica) orig_active_replica_helper.set_client(orig_active_vserver_client) try: orig_active_replica_helper.cleanup_demoted_replica( orig_active_replica, orig_active_replica_name) except exception.StorageCommunicationException: LOG.exception( "Could not cleanup the original active replica export %s.", orig_active_replica['id']) self._unmount_orig_active_replica(orig_active_replica, orig_active_vserver) self._handle_qos_on_replication_change(dm_session, new_active_replica, orig_active_replica, is_dr, share_server=share_server) extra_specs = share_types.get_extra_specs_from_share( new_active_replica) provisioning_options = self._get_provisioning_options(extra_specs) if provisioning_options.get('max_files_multiplier') is not None: max_files_multiplier = provisioning_options.pop( 'max_files_multiplier') max_files = na_utils.calculate_max_files( new_active_replica['size'], max_files_multiplier) new_active_replica_share_name = self._get_backend_share_name( new_active_replica['id']) __, vserver_client = self._get_vserver( share_server=share_server) vserver_client.set_volume_max_files(new_active_replica_share_name, max_files) self._update_autosize_attributes_after_promote_replica( orig_active_replica, new_active_replica, dm_session) return new_replica_list def _unmount_orig_active_replica(self, orig_active_replica, orig_active_vserver=None): orig_active_replica_backend = ( share_utils.extract_host(orig_active_replica['host'], level='backend_name')) orig_active_vserver_client = data_motion.get_client_for_backend( orig_active_replica_backend, vserver_name=orig_active_vserver) share_name = self._get_backend_share_name( orig_active_replica['id']) try: orig_active_vserver_client.unmount_volume(share_name, force=True) LOG.info("Unmount of the original active replica %s successful.", orig_active_replica['id']) except exception.StorageCommunicationException: LOG.exception("Could not unmount the original active replica %s.", orig_active_replica['id']) def _get_replica_info(self, replica, dm_session): """Retrieves a dict with the replica information. :param replica: Share replica. :param dm_session: Data Motion session. :return: A dict with the replica information. """ extra_specs = share_types.get_extra_specs_from_share(replica) provisioning_options = self._get_provisioning_options(extra_specs) replica_name = self._get_backend_share_name(replica['id']) vserver = dm_session.get_vserver_from_share(replica) replica_backend = share_utils.extract_host(replica['host'], level='backend_name') replica_client = data_motion.get_client_for_backend( replica_backend, vserver_name=vserver) pool_name = share_utils.extract_host(replica['host'], level='pool') is_flexgroup = self._is_flexgroup_pool(pool_name) if is_flexgroup: replica_aggregate = self._get_flexgroup_aggregate_list(pool_name) else: replica_aggregate = share_utils.extract_host( replica['host'], level='pool') replica_info = { 'client': replica_client, 'aggregate': replica_aggregate, 'name': replica_name, 'provisioning_options': provisioning_options, } return replica_info def _update_autosize_attributes_after_promote_replica( self, orig_active_replica, new_active_replica, dm_session): """Update autosize attributes after replica is promoted""" # 1. Get the info from original active replica orig_active_replica_info = self._get_replica_info( orig_active_replica, dm_session) # 2. Get the info from the promoted replica (new_active_replica) new_active_replica_info = self._get_replica_info( new_active_replica, dm_session) # 3. Set autosize attributes for orig_active_replica # Reset the autosize attributes according to the volume type (RW or DP) orig_active_replica_autosize_attributes = {'reset': 'true'} orig_provisioning_opts = ( orig_active_replica_info['provisioning_options']) orig_provisioning_opts['autosize_attributes'] = ( orig_active_replica_autosize_attributes) orig_active_replica_info['client'].modify_volume( orig_active_replica_info['aggregate'], orig_active_replica_info['name'], **orig_provisioning_opts) # 4. Set autosize attributes for new_active_replica # Reset the autosize attributes according to the volume type (RW or DP) new_active_replica_autosize_attributes = {'reset': 'true'} new_provisioning_opts = ( new_active_replica_info['provisioning_options']) new_provisioning_opts['autosize_attributes'] = ( new_active_replica_autosize_attributes) new_active_replica_info['client'].modify_volume( new_active_replica_info['aggregate'], new_active_replica_info['name'], **new_provisioning_opts) def _handle_qos_on_replication_change(self, dm_session, new_active_replica, orig_active_replica, is_dr, share_server=None): """Handle QoS change while promoting a replica.""" # QoS is only available for cluster credentials. if not self._have_cluster_creds: return extra_specs = share_types.get_extra_specs_from_share( orig_active_replica) qos_specs = self._get_normalized_qos_specs(extra_specs) if is_dr and qos_specs: dm_session.remove_qos_on_old_active_replica(orig_active_replica) if qos_specs: # Check if a QoS policy already exists for the promoted replica, # if it does, modify it as necessary, else create it: try: new_active_replica_qos_policy = ( self._get_backend_qos_policy_group_name( new_active_replica['id'])) vserver, vserver_client = self._get_vserver( share_server=share_server) volume_name_on_backend = self._get_backend_share_name( new_active_replica['id']) if not self._client.qos_policy_group_exists( new_active_replica_qos_policy): self._create_qos_policy_group( new_active_replica, vserver, qos_specs) else: max_throughput = self._get_max_throughput( new_active_replica['size'], qos_specs) self._client.qos_policy_group_modify( new_active_replica_qos_policy, max_throughput) vserver_client.set_qos_policy_group_for_volume( volume_name_on_backend, new_active_replica_qos_policy) LOG.info("QoS policy applied successfully for promoted " "replica: %s", new_active_replica['id']) except Exception: LOG.exception("Could not apply QoS to the promoted replica.") def _convert_destination_replica_to_independent( self, context, dm_session, orig_active_replica, replica, access_rules, share_server=None, quiesce_wait_time=None): """Breaks SnapMirror and allows r/w ops on the destination replica. For promotion, the existing SnapMirror relationship must be broken and access rules have to be granted to the broken off replica to use it as an independent share. :param context: Request Context :param dm_session: Data motion object for SnapMirror operations :param orig_active_replica: Original SnapMirror source :param replica: Replica to promote to SnapMirror source :param access_rules: Access rules to apply to the replica :param share_server: ShareServer class instance of replica :param quiesce_wait_time: Wait time in seconds for snapmirror quiesce :return: Updated replica """ vserver, vserver_client = self._get_vserver(share_server=share_server) share_name = self._get_backend_share_name(replica['id']) try: # 1. Start an update to try to get a last minute transfer before we # quiesce and break dm_session.update_snapmirror(orig_active_replica, replica) except exception.StorageCommunicationException: # Ignore any errors since the current source replica may be # unreachable pass # 2. Break SnapMirror dm_session.break_snapmirror(orig_active_replica, replica, quiesce_wait_time=quiesce_wait_time) # 3. Setup access rules new_active_replica = replica.copy() new_active_replica['export_locations'] = self._create_export( new_active_replica, share_server, vserver, vserver_client) helper = self._get_helper(replica) helper.set_client(vserver_client) try: helper.update_access(replica, share_name, access_rules) except Exception: new_active_replica['access_rules_status'] = ( constants.SHARE_INSTANCE_RULES_SYNCING) else: new_active_replica['access_rules_status'] = constants.STATUS_ACTIVE new_active_replica['replica_state'] = constants.REPLICA_STATE_ACTIVE # 4. Set File system size fixed to false vserver_client.set_volume_filesys_size_fixed(share_name, filesys_size_fixed=False) return new_active_replica def _safe_change_replica_source(self, dm_session, replica, orig_source_replica, new_source_replica, replica_list, is_dr, access_rules, share_server=None, is_flexgroup=False): """Attempts to change the SnapMirror source to new source. If the attempt fails, 'replica_state' is set to 'error'. :param dm_session: Data motion object for SnapMirror operations. :param replica: Replica that requires a change of source. :param orig_source_replica: Original SnapMirror source volume. :param new_source_replica: New SnapMirror source volume. :param is_dr: the replication type is dr, otherwise it is readable. :param access_rules: share access rules to be applied. :param share_server: share server. :param is_flexgroup: the replication is over FlexGroup style :return: Updated replica. """ try: dm_session.change_snapmirror_source(replica, orig_source_replica, new_source_replica, replica_list, is_flexgroup=is_flexgroup) except exception.StorageCommunicationException: replica['status'] = constants.STATUS_ERROR replica['replica_state'] = constants.STATUS_ERROR if is_dr: replica['export_locations'] = [] msg = ("Failed to change replica (%s) to a SnapMirror " "destination. Replica backend is unreachable.") LOG.exception(msg, replica['id']) return replica except netapp_api.NaApiError: replica['status'] = constants.STATUS_ERROR replica['replica_state'] = constants.STATUS_ERROR if is_dr: replica['export_locations'] = [] msg = ("Failed to change replica (%s) to a SnapMirror " "destination.") LOG.exception(msg, replica['id']) return replica replica['replica_state'] = constants.REPLICA_STATE_OUT_OF_SYNC replica['status'] = constants.STATUS_AVAILABLE if is_dr: replica['export_locations'] = [] return replica # NOTE(felipe_rodrigues): readable replica might be in an error # state without mounting and export. Retries to recover it. replica_volume_name, replica_vserver, replica_backend = ( dm_session.get_backend_info_for_share(replica)) replica_client = data_motion.get_client_for_backend( replica_backend, vserver_name=replica_vserver) try: replica_config = data_motion.get_backend_configuration( replica_backend) dm_session.wait_for_mount_replica( replica_client, replica_volume_name, timeout=replica_config.netapp_mount_replica_timeout) except netapp_api.NaApiError: replica['status'] = constants.STATUS_ERROR replica['replica_state'] = constants.STATUS_ERROR msg = "Failed to mount readable replica (%s)." LOG.exception(msg, replica['id']) return replica try: replica_cluster_client = self._get_api_client_for_backend( replica_backend) replica['export_locations'] = self._create_export( replica, share_server, replica_vserver, replica_client, cluster_client=replica_cluster_client, replica=True) except netapp_api.NaApiError: replica['status'] = constants.STATUS_ERROR replica['replica_state'] = constants.STATUS_ERROR msg = "Failed to create export for readable replica (%s)." LOG.exception(msg, replica['id']) return replica helper = self._get_helper(replica) helper.set_client(replica_client) try: helper.update_access( replica, replica_volume_name, access_rules) except Exception: replica['access_rules_status'] = ( constants.SHARE_INSTANCE_RULES_ERROR) else: replica['access_rules_status'] = constants.STATUS_ACTIVE return replica def create_replicated_snapshot(self, context, replica_list, snapshot_instances, share_server=None): active_replica = self.find_active_replica(replica_list) active_snapshot = [x for x in snapshot_instances if x['share_id'] == active_replica['id']][0] snapshot_name = self._get_backend_snapshot_name(active_snapshot['id']) self.create_snapshot(context, active_snapshot, share_server=share_server) active_snapshot['status'] = constants.STATUS_AVAILABLE active_snapshot['provider_location'] = snapshot_name snapshots = [active_snapshot] instances = zip(sorted(replica_list, key=lambda x: x['id']), sorted(snapshot_instances, key=lambda x: x['share_id'])) for replica, snapshot in instances: if snapshot['id'] != active_snapshot['id']: snapshot['provider_location'] = snapshot_name snapshots.append(snapshot) dm_session = data_motion.DataMotionSession() if replica.get('host'): try: dm_session.update_snapmirror(active_replica, replica) except netapp_api.NaApiError as e: not_initialized = 'not initialized' if (e.code != netapp_api.EOBJECTNOTFOUND and not_initialized not in e.message): raise return snapshots def delete_replicated_snapshot(self, context, replica_list, snapshot_instances, share_server=None): active_replica = self.find_active_replica(replica_list) active_snapshot = [x for x in snapshot_instances if x['share_id'] == active_replica['id']][0] self.delete_snapshot(context, active_snapshot, share_server=share_server, snapshot_name=active_snapshot['provider_location'] ) active_snapshot['status'] = constants.STATUS_DELETED instances = zip(sorted(replica_list, key=lambda x: x['id']), sorted(snapshot_instances, key=lambda x: x['share_id'])) for replica, snapshot in instances: if snapshot['id'] != active_snapshot['id']: dm_session = data_motion.DataMotionSession() if replica.get('host'): try: dm_session.update_snapmirror(active_replica, replica) except netapp_api.NaApiError as e: not_initialized = 'not initialized' if (e.code != netapp_api.EOBJECTNOTFOUND and not_initialized not in e.message): raise return [active_snapshot] def update_replicated_snapshot(self, replica_list, share_replica, snapshot_instances, snapshot_instance, share_server=None): active_replica = self.find_active_replica(replica_list) vserver, vserver_client = self._get_vserver(share_server=share_server) share_name = self._get_backend_share_name( snapshot_instance['share_id']) snapshot_name = snapshot_instance.get('provider_location') # NOTE(ameade): If there is no provider location, # then grab from active snapshot instance if snapshot_name is None: active_snapshot = [x for x in snapshot_instances if x['share_id'] == active_replica['id']][0] snapshot_name = active_snapshot.get('provider_location') if not snapshot_name: return try: snapshot_exists = vserver_client.snapshot_exists(snapshot_name, share_name) except exception.SnapshotUnavailable: # The volume must still be offline return if (snapshot_exists and snapshot_instance['status'] == constants.STATUS_CREATING): return { 'status': constants.STATUS_AVAILABLE, 'provider_location': snapshot_name, } elif (not snapshot_exists and snapshot_instance['status'] == constants.STATUS_DELETING): raise exception.SnapshotResourceNotFound( name=snapshot_instance.get('provider_location')) dm_session = data_motion.DataMotionSession() try: dm_session.update_snapmirror(active_replica, share_replica) except netapp_api.NaApiError as e: # ignore exception in case the relationship does not exist or it # is not completely initialized yet. not_initialized = 'not initialized' if (e.code != netapp_api.EOBJECTNOTFOUND and not_initialized not in e.message): raise def revert_to_replicated_snapshot(self, context, active_replica, replica_list, active_replica_snapshot, replica_snapshots, share_server=None): """Reverts a replicated share (in place) to the specified snapshot.""" vserver, vserver_client = self._get_vserver(share_server=share_server) share_name = self._get_backend_share_name( active_replica_snapshot['share_id']) snapshot_name = ( active_replica_snapshot.get('provider_location') or self._get_backend_snapshot_name(active_replica_snapshot['id'])) LOG.debug('Restoring snapshot %s', snapshot_name) dm_session = data_motion.DataMotionSession() non_active_replica_list = self._find_nonactive_replicas(replica_list) # Ensure source snapshot exists vserver_client.get_snapshot(share_name, snapshot_name) # Break all mirrors for replica in non_active_replica_list: try: dm_session.break_snapmirror( active_replica, replica, mount=False) except netapp_api.NaApiError as e: if e.code != netapp_api.EOBJECTNOTFOUND: raise # Delete source SnapMirror snapshots that will prevent a snap restore snapmirror_snapshot_names = vserver_client.list_snapmirror_snapshots( share_name) for snapmirror_snapshot_name in snapmirror_snapshot_names: vserver_client.delete_snapshot( share_name, snapmirror_snapshot_name, ignore_owners=True) # Restore source snapshot of interest vserver_client.restore_snapshot(share_name, snapshot_name) # Reestablish mirrors for replica in non_active_replica_list: try: dm_session.resync_snapmirror(active_replica, replica) except netapp_api.NaApiError as e: if e.code != netapp_api.EOBJECTNOTFOUND: raise def _is_readable_replica(self, replica): """Check the replica type to find out if the replica is readable.""" extra_specs = share_types.get_extra_specs_from_share(replica) return (extra_specs.get('replication_type') == constants.REPLICATION_TYPE_READABLE) def _check_destination_vserver_for_vol_move(self, source_share, source_vserver, dest_share_server): try: destination_vserver, __ = self._get_vserver( share_server=dest_share_server) except exception.InvalidParameterValue: destination_vserver = None if source_vserver != destination_vserver: msg = _("Cannot migrate %(shr)s efficiently from source " "VServer %(src)s to destination VServer %(dest)s.") msg_args = { 'shr': source_share['id'], 'src': source_vserver, 'dest': destination_vserver, } raise exception.NetAppException(msg % msg_args) def migration_check_compatibility(self, context, source_share, destination_share, share_server=None, destination_share_server=None): """Checks compatibility between self.host and destination host.""" # We need cluster creds to perform an intra-cluster data motion compatible = False destination_host = destination_share['host'] if self._have_cluster_creds: try: backend = share_utils.extract_host( destination_host, level='backend_name') destination_aggregate = share_utils.extract_host( destination_host, level='pool') source_pool = share_utils.extract_host( source_share['host'], level='pool') # Check the source/destination pool type, they must be FlexVol. if self._is_flexgroup_pool(source_pool): msg = _("Cannot migrate share because it resides on a " "FlexGroup pool.") raise exception.NetAppException(msg) dm_session = data_motion.DataMotionSession() if self.is_flexgroup_destination_host(destination_host, dm_session): msg = _("Cannot migrate share because the destination " "pool is FlexGroup type.") raise exception.NetAppException(msg) # Check the source/destination pool SnapLock type, for # ONTAP version < 9.10.1 if not self._is_snaplock_compatible_for_migration( source_pool, destination_aggregate ): msg = _("Cannot migrate share because the source and " "destination pool support different SnapLock" " type.") raise exception.NetAppException(msg) # Validate new extra-specs are valid on the destination extra_specs = share_types.get_extra_specs_from_share( destination_share) self._check_extra_specs_validity( destination_share, extra_specs) # NOTE(dviroel): Check if the destination share-type has valid # provisioning options. provisioning_options = self._get_provisioning_options( extra_specs) qos_specs = self._get_normalized_qos_specs(extra_specs) self.validate_provisioning_options_for_share( provisioning_options, extra_specs=extra_specs, qos_specs=qos_specs) # Validate destination against fpolicy extra specs fpolicy_ext_include = provisioning_options.get( 'fpolicy_extensions_to_include') fpolicy_ext_exclude = provisioning_options.get( 'fpolicy_extensions_to_exclude') fpolicy_file_operations = provisioning_options.get( 'fpolicy_file_operations') if fpolicy_ext_include or fpolicy_ext_include: __, dest_client = self._get_vserver( share_server=destination_share_server) fpolicies = dest_client.get_fpolicy_policies_status() if len(fpolicies) >= self.FPOLICY_MAX_VSERVER_POLICIES: # If we can't create a new policy for the new share, # we need to reuse an existing one. reusable_scopes = self._find_reusable_fpolicy_scope( destination_share, dest_client, fpolicy_extensions_to_include=fpolicy_ext_include, fpolicy_extensions_to_exclude=fpolicy_ext_exclude, fpolicy_file_operations=fpolicy_file_operations) if not reusable_scopes: msg = _( "Cannot migrate share because the destination " "reached its maximum number of policies.") raise exception.NetAppException(msg) # NOTE (felipe_rodrigues): NetApp only can migrate within the # same server, so it does not need to check that the # destination share has the same NFS config as the destination # server. # TODO(gouthamr): Check whether QoS min-throughputs can be # honored on the destination aggregate when supported. self._check_aggregate_extra_specs_validity( destination_aggregate, extra_specs) data_motion.get_backend_configuration(backend) source_vserver, __ = self._get_vserver( share_server=share_server) share_volume = self._get_backend_share_name( source_share['id']) # NOTE(dviroel): If source and destination vservers are # compatible for volume move, the provisioning option # 'adaptive_qos_policy_group' will also be supported since the # share will remain in the same vserver. self._check_destination_vserver_for_vol_move( source_share, source_vserver, destination_share_server) encrypt_dest = self._get_dest_flexvol_encryption_value( destination_share) self._client.check_volume_move( share_volume, source_vserver, destination_aggregate, encrypt_destination=encrypt_dest) except Exception: msg = ("Cannot migrate share %(shr)s efficiently between " "%(src)s and %(dest)s.") msg_args = { 'shr': source_share['id'], 'src': source_share['host'], 'dest': destination_host, } LOG.exception(msg, msg_args) else: compatible = True else: msg = ("Cluster credentials have not been configured " "with this share driver. Cannot perform volume move " "operations.") LOG.warning(msg) compatibility = { 'compatible': compatible, 'writable': compatible, 'nondisruptive': compatible, 'preserve_metadata': compatible, 'preserve_snapshots': compatible, } return compatibility def _move_volume_after_splitting(self, source_share, destination_share, share_server=None, cutover_action='wait'): retries = (self.configuration.netapp_start_volume_move_timeout / 5 or 1) @manila_utils.retry(retry_param=exception.ShareBusyException, interval=5, retries=retries, backoff_rate=1) def try_move_volume(): try: self._move_volume(source_share, destination_share, share_server, cutover_action) except netapp_api.NaApiError as e: undergoing_split = 'undergoing a clone split' msg_args = {'id': source_share['id']} if (e.code == netapp_api.EAPIERROR and undergoing_split in e.message): msg = _('The volume %(id)s is undergoing a clone split ' 'operation. Will retry the operation.') % msg_args LOG.warning(msg) raise exception.ShareBusyException(reason=msg) else: msg = _("Unable to perform move operation for the volume " "%(id)s. Caught an unexpected error. Not " "retrying.") % msg_args raise exception.NetAppException(message=msg) try: try_move_volume() except exception.ShareBusyException: msg_args = {'id': source_share['id']} msg = _("Unable to perform move operation for the volume %(id)s " "because a clone split operation is still in progress. " "Retries exhausted. Not retrying.") % msg_args raise exception.NetAppException(message=msg) def _move_volume(self, source_share, destination_share, share_server=None, cutover_action='wait'): # Intra-cluster migration vserver, vserver_client = self._get_vserver(share_server=share_server) share_volume = self._get_backend_share_name(source_share['id']) destination_aggregate = share_utils.extract_host( destination_share['host'], level='pool') # If the destination's share type extra-spec for Flexvol encryption # is different than the source's, then specify the volume-move # operation to set the correct 'encrypt' attribute on the destination # volume. encrypt_dest = self._get_dest_flexvol_encryption_value( destination_share) self._client.start_volume_move( share_volume, vserver, destination_aggregate, cutover_action=cutover_action, encrypt_destination=encrypt_dest) msg = ("Began volume move operation of share %(shr)s from %(src)s " "to %(dest)s.") msg_args = { 'shr': source_share['id'], 'src': source_share['host'], 'dest': destination_share['host'], } LOG.info(msg, msg_args) def migration_start(self, context, source_share, destination_share, source_snapshots, snapshot_mappings, share_server=None, destination_share_server=None): """Begins data motion from source_share to destination_share.""" self._move_volume(source_share, destination_share, share_server) def _get_volume_move_status(self, source_share, share_server): vserver, vserver_client = self._get_vserver(share_server=share_server) share_volume = self._get_backend_share_name(source_share['id']) status = self._client.get_volume_move_status(share_volume, vserver) return status def _check_volume_clone_split_completed(self, share, vserver_client): share_volume = self._get_backend_share_name(share['id']) return vserver_client.check_volume_clone_split_completed(share_volume) def _get_dest_flexvol_encryption_value(self, destination_share): dest_share_type_encrypted_val = share_types.get_share_type_extra_specs( destination_share['share_type_id'], 'netapp_flexvol_encryption') encrypt_destination = share_types.parse_boolean_extra_spec( 'netapp_flexvol_encryption', dest_share_type_encrypted_val) return encrypt_destination def _check_volume_move_completed(self, source_share, share_server): """Check progress of volume move operation.""" status = self._get_volume_move_status(source_share, share_server) completed_phases = ( 'cutover_hard_deferred', 'cutover_soft_deferred', 'completed', 'success') move_phase = status['phase'].lower() if move_phase == 'failed': msg_args = { 'shr': source_share['id'], 'reason': status['details'], } msg = _("Volume move operation for share %(shr)s failed. Reason: " "%(reason)s") % msg_args LOG.exception(msg) raise exception.NetAppException(msg) elif move_phase in completed_phases: return True return False def migration_continue(self, context, source_share, destination_share, source_snapshots, snapshot_mappings, share_server=None, destination_share_server=None): """Check progress of migration, try to repair data motion errors.""" return self._check_volume_move_completed(source_share, share_server) def _get_volume_move_progress(self, source_share, share_server): status = self._get_volume_move_status(source_share, share_server) # NOTE (gouthamr): If the volume move is waiting for a manual # intervention to cut-over, the copy is done with respect to the # user. Volume move copies the rest of the data before cut-over anyway. if status['phase'] in ('cutover_hard_deferred', 'cutover_soft_deferred'): status['percent-complete'] = 100 msg = ("Volume move status for share %(share)s: (State) %(state)s. " "(Phase) %(phase)s. Details: %(details)s") msg_args = { 'state': status['state'], 'details': status['details'], 'share': source_share['id'], 'phase': status['phase'], } LOG.info(msg, msg_args) return { 'total_progress': status['percent-complete'] or 0, 'state': status['state'], 'estimated_completion_time': status['estimated-completion-time'], 'phase': status['phase'], 'details': status['details'], } def migration_get_progress(self, context, source_share, destination_share, source_snapshots, snapshot_mappings, share_server=None, destination_share_server=None): """Return detailed progress of the migration in progress.""" return self._get_volume_move_progress(source_share, share_server) def migration_cancel(self, context, source_share, destination_share, source_snapshots, snapshot_mappings, share_server=None, destination_share_server=None): """Abort an ongoing migration.""" vserver, vserver_client = self._get_vserver(share_server=share_server) share_volume = self._get_backend_share_name(source_share['id']) retries = (math.ceil( self.configuration.netapp_migration_cancel_timeout / 5) or 1) try: self._get_volume_move_status(source_share, share_server) except exception.NetAppException: LOG.exception("Could not get volume move status.") return self._client.abort_volume_move(share_volume, vserver) @manila_utils.retry(retry_param=exception.InUse, interval=5, retries=retries, backoff_rate=1) def wait_for_migration_cancel_complete(): move_status = self._get_volume_move_status(source_share, share_server) if move_status['state'] == 'failed': return else: msg = "Migration cancelation isn't finished yet." raise exception.InUse(message=msg) try: wait_for_migration_cancel_complete() except exception.InUse: move_status = self._get_volume_move_status(source_share, share_server) msg_args = { 'share_move_state': move_status['state'] } msg = _("Migration cancellation was not successful. The share " "migration state failed while transitioning from " "%(share_move_state)s state to 'failed'. Retries " "exhausted.") % msg_args raise exception.NetAppException(message=msg) except exception.NetAppException: LOG.exception("Could not get volume move status.") msg = ("Share volume move operation for share %(shr)s from host " "%(src)s to %(dest)s was successfully aborted.") msg_args = { 'shr': source_share['id'], 'src': source_share['host'], 'dest': destination_share['host'], } LOG.info(msg, msg_args) def migration_complete(self, context, source_share, destination_share, source_snapshots, snapshot_mappings, share_server=None, destination_share_server=None): """Initiate the cutover to destination share after move is complete.""" vserver, vserver_client = self._get_vserver(share_server=share_server) share_volume = self._get_backend_share_name(source_share['id']) status = self._get_volume_move_status(source_share, share_server) move_phase = status['phase'].lower() if move_phase == 'completed': LOG.debug("Volume move operation was already successfully " "completed for share %(shr)s.", {'shr': source_share['id']}) elif move_phase in ('cutover_hard_deferred', 'cutover_soft_deferred'): self._client.trigger_volume_move_cutover(share_volume, vserver) self._wait_for_cutover_completion( source_share, share_server) else: msg_args = { 'shr': source_share['id'], 'status': status['state'], 'phase': status['phase'], 'details': status['details'], } msg = _("Cannot complete volume move operation for share %(shr)s. " "Current volume move status: %(status)s, phase: " "%(phase)s. Details: %(details)s") % msg_args LOG.exception(msg) raise exception.NetAppException(msg) new_share_volume_name = self._get_backend_share_name( destination_share['id']) vserver_client.set_volume_name(share_volume, new_share_volume_name) # Modify volume properties per share type extra-specs extra_specs = share_types.get_extra_specs_from_share( destination_share) extra_specs = self._remap_standard_boolean_extra_specs(extra_specs) self._check_extra_specs_validity(destination_share, extra_specs) provisioning_options = self._get_provisioning_options(extra_specs) qos_policy_group_name = self._modify_or_create_qos_for_existing_share( destination_share, extra_specs, vserver, vserver_client) if qos_policy_group_name: provisioning_options['qos_policy_group'] = qos_policy_group_name else: # Removing the QOS Policy on the migrated share as the # new extra-spec for which this share is being migrated to # does not specify any QOS settings. provisioning_options['qos_policy_group'] = "none" qos_policy_of_src_share = self._get_backend_qos_policy_group_name( source_share['id']) self._client.mark_qos_policy_group_for_deletion( qos_policy_of_src_share) snap_attributes = self._get_provisioning_options_for_snap_attributes( vserver_client, new_share_volume_name) provisioning_options.update(snap_attributes) destination_aggregate = share_utils.extract_host( destination_share['host'], level='pool') # Modify volume to match extra specs vserver_client.modify_volume(destination_aggregate, new_share_volume_name, **provisioning_options) # Create or reuse fpolicy fpolicy_ext_to_include = provisioning_options.get( 'fpolicy_extensions_to_include') fpolicy_ext_to_exclude = provisioning_options.get( 'fpolicy_extensions_to_exclude') if fpolicy_ext_to_include or fpolicy_ext_to_exclude: self._create_fpolicy_for_share(destination_share, vserver, vserver_client, **provisioning_options) # Delete old fpolicies if needed self._delete_fpolicy_for_share(source_share, vserver, vserver_client) msg = ("Volume move operation for share %(shr)s has completed " "successfully. Share has been moved from %(src)s to " "%(dest)s.") msg_args = { 'shr': source_share['id'], 'src': source_share['host'], 'dest': destination_share['host'], } LOG.info(msg, msg_args) # NOTE(gouthamr): For NFS nondisruptive migration, current export # policy will not be cleared, the export policy will be renamed to # match the name of the share. # NOTE (caiquemello): For CIFS nondisruptive migration, current CIFS # share cannot be renamed, so keep the previous CIFS share. share_protocol = source_share['share_proto'].lower() if share_protocol != 'cifs': export_locations = self._create_export( destination_share, share_server, vserver, vserver_client, clear_current_export_policy=False) else: export_locations = [] for item in source_share['export_locations']: export_locations.append( { 'path': item['path'], 'is_admin_only': item['is_admin_only'], 'metadata': item['el_metadata'] } ) # Sort the export locations to report preferred paths first export_locations = self._sort_export_locations_by_preferred_paths( export_locations) src_snaps_dict = {s['id']: s for s in source_snapshots} snapshot_updates = {} for source_snap_id, destination_snap in snapshot_mappings.items(): p_location = src_snaps_dict[source_snap_id]['provider_location'] snapshot_updates.update( {destination_snap['id']: {'provider_location': p_location}}) return { 'export_locations': export_locations, 'snapshot_updates': snapshot_updates, } @na_utils.trace def _modify_or_create_qos_for_existing_share(self, share, extra_specs, vserver, vserver_client): """Gets/Creates QoS policy for an existing FlexVol. The share's assigned QoS policy is renamed and adjusted if the policy is exclusive to the FlexVol. If the policy includes other workloads besides the FlexVol, a new policy is created with the specs necessary. """ qos_specs = self._get_normalized_qos_specs(extra_specs) if not qos_specs: return backend_share_name = self._get_backend_share_name(share['id']) qos_policy_group_name = self._get_backend_qos_policy_group_name( share['id']) create_new_qos_policy_group = True backend_volume = vserver_client.get_volume( backend_share_name) backend_volume_size = int( math.ceil(float(backend_volume['size']) / units.Gi)) LOG.debug("Checking for a pre-existing QoS policy group that " "is exclusive to the volume %s.", backend_share_name) # Does the volume have an exclusive QoS policy that we can rename? if backend_volume['qos-policy-group-name'] is not None: existing_qos_policy_group = self._client.qos_policy_group_get( backend_volume['qos-policy-group-name']) if existing_qos_policy_group['num-workloads'] == 1: # Yay, can set max-throughput and rename msg = ("Found pre-existing QoS policy %(policy)s and it is " "exclusive to the volume %(volume)s. Modifying and " "renaming this policy to %(new_policy)s.") msg_args = { 'policy': backend_volume['qos-policy-group-name'], 'volume': backend_share_name, 'new_policy': qos_policy_group_name, } LOG.debug(msg, msg_args) max_throughput = self._get_max_throughput( backend_volume_size, qos_specs) if (existing_qos_policy_group['max-throughput'] != max_throughput): self._client.qos_policy_group_modify( backend_volume['qos-policy-group-name'], max_throughput) self._client.qos_policy_group_rename( backend_volume['qos-policy-group-name'], qos_policy_group_name) create_new_qos_policy_group = False if create_new_qos_policy_group: share_obj = { 'size': backend_volume_size, 'id': share['id'], } LOG.debug("No existing QoS policy group found for " "volume. Creating a new one with name %s.", qos_policy_group_name) self._create_qos_policy_group(share_obj, vserver, qos_specs, vserver_client=vserver_client) return qos_policy_group_name def _wait_for_cutover_completion(self, source_share, share_server): retries = (self.configuration.netapp_volume_move_cutover_timeout / 5 or 1) @manila_utils.retry(retry_param=exception.ShareBusyException, interval=5, retries=retries, backoff_rate=1) def check_move_completion(): status = self._get_volume_move_status(source_share, share_server) if status['phase'].lower() != 'completed': msg_args = { 'shr': source_share['id'], 'phs': status['phase'], } msg = _('Volume move operation for share %(shr)s is not ' 'complete. Current Phase: %(phs)s. ' 'Retrying.') % msg_args LOG.warning(msg) raise exception.ShareBusyException(reason=msg) try: check_move_completion() except exception.ShareBusyException: msg = _("Volume move operation did not complete after cut-over " "was triggered. Retries exhausted. Not retrying.") raise exception.NetAppException(message=msg) def get_backend_info(self, context): snapdir_visibility = self.configuration.netapp_reset_snapdir_visibility return { 'snapdir_visibility': snapdir_visibility, } def ensure_shares(self, context, shares): cfg_snapdir = self.configuration.netapp_reset_snapdir_visibility hide_snapdir = self.HIDE_SNAPDIR_CFG_MAP[cfg_snapdir.lower()] if hide_snapdir is not None: for share in shares: share_server = share.get('share_server') vserver, vserver_client = self._get_vserver( share_server=share_server) share_name = self._get_backend_share_name(share['id']) self._apply_snapdir_visibility( hide_snapdir, share_name, vserver_client) def get_share_status(self, share, share_server=None): if share['status'] == constants.STATUS_CREATING_FROM_SNAPSHOT: return self._update_create_from_snapshot_status(share, share_server) else: LOG.warning("Caught an unexpected share status '%s' during share " "status update routine. Skipping.", share['status']) def volume_rehost(self, share, src_vserver, dest_vserver): volume_name = self._get_backend_share_name(share['id']) msg = ("Rehosting volume of share %(shr)s from vserver %(src)s " "to vserver %(dest)s.") msg_args = { 'shr': share['id'], 'src': src_vserver, 'dest': dest_vserver, } LOG.info(msg, msg_args) self._client.rehost_volume(volume_name, src_vserver, dest_vserver) def _rehost_and_mount_volume(self, share, src_vserver, src_vserver_client, dest_vserver, dest_vserver_client): volume_name = self._get_backend_share_name(share['id']) mount_point_name = share.get('mount_point_name') # Unmount volume in the source vserver: src_vserver_client.unmount_volume(volume_name) # Rehost the volume self.volume_rehost(share, src_vserver, dest_vserver) # Mount the volume on the destination vserver dest_vserver_client.mount_volume(volume_name, mount_point_name) def _check_capacity_compatibility(self, pools, thin_provision, size): """Check if the size requested is suitable for the available pools""" backend_free_capacity = 0.0 for pool in pools: if "unknown" in (pool['free_capacity_gb'], pool['total_capacity_gb']): return False reserved = float(pool['reserved_percentage']) / 100 total_pool_free = math.floor( pool['free_capacity_gb'] - pool['total_capacity_gb'] * reserved) if thin_provision: # If thin provision is enabled it's necessary recalculate the # total_pool_free considering the max over subscription ratio # for each pool. After summing the free space for each pool we # have the total backend free capacity to compare with the # requested size. if pool['max_over_subscription_ratio'] >= 1: total_pool_free = math.floor( total_pool_free * pool['max_over_subscription_ratio']) backend_free_capacity += total_pool_free return size <= backend_free_capacity def _find_reusable_fpolicy_scope( self, share, vserver_client, fpolicy_extensions_to_include=None, fpolicy_extensions_to_exclude=None, fpolicy_file_operations=None, shares_to_include=None): """Searches a fpolicy scope that can be reused for a share.""" protocols = ( ['nfsv3', 'nfsv4'] if share['share_proto'].lower() == 'nfs' else ['cifs']) protocols.sort() requested_ext_to_include = [] if fpolicy_extensions_to_include: requested_ext_to_include = na_utils.convert_string_to_list( fpolicy_extensions_to_include) requested_ext_to_include.sort() requested_ext_to_exclude = [] if fpolicy_extensions_to_exclude: requested_ext_to_exclude = na_utils.convert_string_to_list( fpolicy_extensions_to_exclude) requested_ext_to_exclude.sort() if fpolicy_file_operations: requested_file_operations = na_utils.convert_string_to_list( fpolicy_file_operations) else: requested_file_operations = ( self.configuration.netapp_fpolicy_default_file_operations) requested_file_operations.sort() share_name = self._get_backend_share_name(share['id']) reusable_scopes = vserver_client.get_fpolicy_scopes( share_name=share_name, extensions_to_exclude=fpolicy_extensions_to_exclude, extensions_to_include=fpolicy_extensions_to_include, shares_to_include=shares_to_include) # NOTE(dviroel): get_fpolicy_scopes can return scopes that don't match # the exact requirements. for scope in reusable_scopes[:]: scope_ext_include = copy.deepcopy( scope.get('file-extensions-to-include', [])) scope_ext_include.sort() scope_ext_exclude = copy.deepcopy( scope.get('file-extensions-to-exclude', [])) scope_ext_exclude.sort() if scope_ext_include != requested_ext_to_include: LOG.debug( "Excluding scope for policy %(policy_name)s because " "it doesn't match 'file-extensions-to-include' " "configuration.", {'policy_name': scope['policy-name']}) reusable_scopes.remove(scope) elif scope_ext_exclude != requested_ext_to_exclude: LOG.debug( "Excluding scope for policy %(policy_name)s because " "it doesn't match 'file-extensions-to-exclude' " "configuration.", {'policy_name': scope['policy-name']}) reusable_scopes.remove(scope) for scope in reusable_scopes[:]: fpolicy_policy = vserver_client.get_fpolicy_policies( share_name=share_name, policy_name=scope['policy-name']) for policy in fpolicy_policy: event_names = copy.deepcopy(policy.get('events', [])) match_event_protocols = [] for event_name in event_names: events = vserver_client.get_fpolicy_events( share_name=share_name, event_name=event_name) for event in events: event_file_ops = copy.deepcopy( event.get('file-operations', [])) event_file_ops.sort() if event_file_ops == requested_file_operations: # Event has same file operations match_event_protocols.append(event.get('protocol')) match_event_protocols.sort() if match_event_protocols != protocols: LOG.debug( "Excluding scope for policy %(policy_name)s because " "it doesn't match 'events' configuration of file " "operations per protocol.", {'policy_name': scope['policy-name']}) reusable_scopes.remove(scope) return reusable_scopes[0] if reusable_scopes else None def _create_fpolicy_for_share( self, share, vserver, vserver_client, fpolicy_extensions_to_include=None, fpolicy_extensions_to_exclude=None, fpolicy_file_operations=None, **options): """Creates or reuses a fpolicy for a new share.""" share_name = self._get_backend_share_name(share['id']) @manila_utils.synchronized('netapp-fpolicy-%s' % vserver, external=True) def _create_fpolicy_with_lock(): nonlocal share_name # 1. Try to reuse an existing FPolicy if matches the same # requirements reusable_scope = self._find_reusable_fpolicy_scope( share, vserver_client, fpolicy_extensions_to_include=fpolicy_extensions_to_include, fpolicy_extensions_to_exclude=fpolicy_extensions_to_exclude, fpolicy_file_operations=fpolicy_file_operations) if reusable_scope: shares_to_include = copy.deepcopy( reusable_scope.get('shares-to-include')) shares_to_include.append(share_name) # Add the new share to the existing policy scope vserver_client.modify_fpolicy_scope( share_name, reusable_scope.get('policy-name'), shares_to_include=shares_to_include) LOG.debug("Share %(share_id)s was added to an existing " "fpolicy scope.", {'share_id': share['id']}) return # 2. Since we can't reuse any scope, start creating a new fpolicy protocols = ( ['nfsv3', 'nfsv4'] if share['share_proto'].lower() == 'nfs' else ['cifs']) if fpolicy_file_operations: file_operations = na_utils.convert_string_to_list( fpolicy_file_operations) else: file_operations = ( self.configuration.netapp_fpolicy_default_file_operations) # NOTE(dviroel): ONTAP limit of fpolicies for a vserser is 10. # DHSS==True backends can create new share servers or fail earlier # in choose_share_server_for_share. share_name = self._get_backend_share_name(share['id']) vserver_policies = (vserver_client.get_fpolicy_policies_status( share_name=share_name)) if len(vserver_policies) >= self.FPOLICY_MAX_VSERVER_POLICIES: msg_args = {'share_id': share['id']} msg = _("Cannot configure a new FPolicy for share " "%(share_id)s. The maximum number of fpolicies was " "already reached.") % msg_args LOG.exception(msg) raise exception.NetAppException(message=msg) seq_number_list = [int(policy['sequence-number']) for policy in vserver_policies] available_seq_number = None for number in range(1, self.FPOLICY_MAX_VSERVER_POLICIES + 1): if number not in seq_number_list: available_seq_number = number break events = [] policy_name = self._get_backend_fpolicy_policy_name(share['id']) try: for protocol in protocols: event_name = self._get_backend_fpolicy_event_name( share['id'], protocol) vserver_client.create_fpolicy_event(share_name, event_name, protocol, file_operations) events.append(event_name) # 2. Create a fpolicy policy and assign a scope vserver_client.create_fpolicy_policy_with_scope( policy_name, share_name, events, extensions_to_include=fpolicy_extensions_to_include, extensions_to_exclude=fpolicy_extensions_to_exclude) except Exception: # NOTE(dviroel): Rollback fpolicy policy and events creation # since they won't be linked to the share, which is made by # the scope creation. # Delete fpolicy policy vserver_client.delete_fpolicy_policy(share_name, policy_name) # Delete fpolicy events for event in events: vserver_client.delete_fpolicy_event(share_name, event) msg = _("Failed to configure a FPolicy resources for share " "%(share_id)s. ") % {'share_id': share['id']} LOG.exception(msg) raise exception.NetAppException(message=msg) # 4. Enable fpolicy policy vserver_client.enable_fpolicy_policy(share_name, policy_name, available_seq_number) _create_fpolicy_with_lock() LOG.debug('A new fpolicy was successfully created and associated to ' 'share %(share_id)s', {'share_id': share['id']}) def _delete_fpolicy_for_share(self, share, vserver, vserver_client): """Delete all associated fpolicy resources from a share.""" share_name = self._get_backend_share_name(share['id']) @coordination.synchronized('netapp-fpolicy-%s' % vserver) def _delete_fpolicy_with_lock(): fpolicy_scopes = vserver_client.get_fpolicy_scopes( share_name=share_name, shares_to_include=[share_name]) if fpolicy_scopes: shares_to_include = copy.copy( fpolicy_scopes[0].get('shares-to-include')) shares_to_include.remove(share_name) policy_name = fpolicy_scopes[0].get('policy-name') if shares_to_include: vserver_client.modify_fpolicy_scope( share_name, policy_name, shares_to_include=shares_to_include) else: # Delete an empty fpolicy # 1. Disable fpolicy policy vserver_client.disable_fpolicy_policy(policy_name) # 2. Retrieve fpoliocy info fpolicy_policies = vserver_client.get_fpolicy_policies( share_name=share_name, policy_name=policy_name) # 3. Delete fpolicy scope vserver_client.delete_fpolicy_scope(policy_name) # 4. Delete fpolicy policy vserver_client.delete_fpolicy_policy(share_name, policy_name) # 5. Delete fpolicy events for policy in fpolicy_policies: events = policy.get('events', []) for event in events: vserver_client.delete_fpolicy_event(event) _delete_fpolicy_with_lock() @na_utils.trace def _initialize_flexgroup_pools(self, cluster_aggr_set): """Initialize the FlexGroup pool map.""" flexgroup_pools = self.configuration.safe_get('netapp_flexgroup_pools') if not self.configuration.netapp_enable_flexgroup and flexgroup_pools: msg = _('Invalid configuration for FlexGroup: ' 'netapp_enable_flexgroup option must be True to configure ' 'its custom pools using netapp_flexgroup_pools.') raise exception.NetAppException(msg) elif not self.configuration.netapp_enable_flexgroup: return if not self._client.is_flexgroup_supported(): msg = _('FlexGroup pool is only supported with ONTAP version ' 'greater than or equal to 9.8.') raise exception.NetAppException(msg) if flexgroup_pools: self._flexgroup_pools = na_utils.parse_flexgroup_pool_config( flexgroup_pools, cluster_aggr_set=cluster_aggr_set, check=True) self._is_flexgroup_auto = False else: self._flexgroup_pools[na_utils.FLEXGROUP_DEFAULT_POOL_NAME] = ( sorted(cluster_aggr_set)) self._is_flexgroup_auto = True @na_utils.trace def _get_flexgroup_pool_name(self, aggr_list): """Gets the FlexGroup pool name that has the given aggregate list.""" # for FlexGroup auto provisioned is over the same single pool. if self._is_flexgroup_auto: return na_utils.FLEXGROUP_DEFAULT_POOL_NAME pool_name = '' aggr_list.sort() for pool, aggr_pool in self._flexgroup_pools.items(): if aggr_pool == aggr_list: pool_name = pool break return pool_name @na_utils.trace def _is_flexgroup_pool(self, pool_name): """Check if the given pool name is a FlexGroup pool.""" return pool_name in self._flexgroup_pools @na_utils.trace def _get_flexgroup_aggregate_list(self, pool_name): """Returns the aggregate list of a given FlexGroup pool name.""" return (self._flexgroup_pools[pool_name] if self._is_flexgroup_pool(pool_name) else []) @staticmethod def _is_flexgroup_share(vserver_client, share_name): """Determines if a given share name is a FlexGroup style or not.""" try: return vserver_client.is_flexgroup_volume(share_name) except (netapp_api.NaApiError, exception.StorageResourceNotFound, exception.NetAppException): msg = _('Could not determine if the volume %s is a FlexGroup or ' 'a FlexVol style.') LOG.exception(msg, share_name) raise exception.ShareNotFound(share_id=share_name) @na_utils.trace def is_flexvol_pool_configured(self): """Determines if the driver has FlexVol pools.""" return (not self.configuration.netapp_enable_flexgroup or not self.configuration.netapp_flexgroup_pool_only) @na_utils.trace def _get_minimum_flexgroup_size(self, pool_name): """Returns the minimum size for a FlexGroup share inside a pool.""" aggr_list = set(self._get_flexgroup_aggregate_list(pool_name)) return self.FLEXGROUP_MIN_SIZE_PER_AGGR * len(aggr_list) @na_utils.trace def is_flexgroup_destination_host(self, host, dm_session): """Returns if the destination host is over a FlexGroup pool.""" __, config = dm_session.get_backend_name_and_config_obj(host) if not config.safe_get('netapp_enable_flexgroup'): return False flexgroup_pools = config.safe_get('netapp_flexgroup_pools') pools = {} if flexgroup_pools: pools = na_utils.parse_flexgroup_pool_config(flexgroup_pools) else: pools[na_utils.FLEXGROUP_DEFAULT_POOL_NAME] = {} pool_name = share_utils.extract_host(host, level='pool') return pool_name in pools @na_utils.trace def create_backup(self, context, share_instance, backup, share_server=None): """Create backup for NetApp share""" src_vserver, src_vserver_client = self._get_vserver( share_server=share_server) src_cluster = src_vserver_client.get_cluster_name() src_vol = self._get_backend_share_name(share_instance['id']) backup_options = backup.get('backup_options', {}) backup_type = backup_options.get(Backup.BACKUP_TYPE.value) # Check if valid backup type is provided if not backup_type: raise exception.BackupException("Driver needs a valid backup type" " from command line or API.") # check the backend is related to NetApp try: backup_config = data_motion.get_backup_configuration(backup_type) except exception.BadConfigurationException: msg = _("Could not find backup_type '%(backup_type)s' stanza" " in config file") % {'backup_type': backup_type} raise exception.BadConfigurationException(reason=msg) backend_name = backup_config.safe_get(Backup.BACKEND_NAME.value) try: backend_config = data_motion.get_backend_configuration( backend_name ) except exception.BadConfigurationException: msg = _("Could not find backend '%(backend_name)s' stanza" " in config file") % {'backend_name': backend_name} raise exception.BadConfigurationException(reason=msg) if (backend_config.safe_get("netapp_storage_family") != 'ontap_cluster'): err_msg = _("Wrong vendor backend '%s' is provided, provide" " only NetApp backend.") % backend_name raise exception.BackupException(err_msg) # Check backend has compatible backup type if (backend_config.safe_get("netapp_enabled_backup_types") is None or backup_type not in backend_config.safe_get( "netapp_enabled_backup_types")): err_msg = _("Backup type '%(backup_type)s' is not compatible with" " backend '%(backend_name)s'.") msg_args = { 'backup_type': backup_type, 'backend_name': backend_name, } raise exception.BackupException(err_msg % msg_args) # Verify that both source and destination cluster are peered des_cluster_api_client = self._get_api_client_for_backend( backend_name) des_cluster = des_cluster_api_client.get_cluster_name() if src_cluster != des_cluster: cluster_peer_info = self._client.get_cluster_peers( remote_cluster_name=des_cluster) if not cluster_peer_info: err_msg = _("Source cluster '%(src_cluster)s' and destination" " cluster '%(des_cluster)s' are not peered" " backend %(backend_name)s.") msg_args = { 'src_cluster': src_cluster, 'des_cluster': des_cluster, 'backend_name': backend_name } raise exception.NetAppException(err_msg % msg_args) # Get the destination vserver and volume for relationship source_path = f"{src_vserver}:{src_vol}" snapmirror_info = src_vserver_client.get_snapmirror_destinations( source_path=source_path ) if len(snapmirror_info) > 1: msg = _("Source path '%(path)s' has more than one relationships." " To create the share backup, delete the all source" " volume's SnapMirror relationships using 'snapmirror'" " ONTAP CLI or System Manager.") msg_args = { 'path': source_path } raise exception.NetAppException(msg % msg_args) elif len(snapmirror_info) == 1: des_vserver, des_volume = self._get_destination_vserver_and_vol( src_vserver_client, source_path, False) des_vserver_client = self._get_api_client_for_backend( backend_name, vserver=des_vserver) else: if (backup_config.safe_get(Backup.DES_VOLUME.value) and not backup_config.safe_get(Backup.DES_VSERVER.value)): msg = _("Could not find vserver name under stanza" " '%(backup_type)s' in configuration while volume" " name is provided.") params = {"backup_type": backup_type} raise exception.BadConfigurationException(reason=msg % params) des_vserver = self._get_vserver_for_backup( backup, share_server=share_server) des_vserver_client = self._get_api_client_for_backend( backend_name, vserver=des_vserver) try: des_volume = self._get_volume_for_backup(backup, share_instance, src_vserver_client, des_vserver_client) except (netapp_api.NaApiError, exception.NetAppException): # Delete the vserver if share_server: self._delete_backup_vserver(backup, des_vserver) msg = _("Failed to create a volume in vserver '%(des_vserver)s" "'") msg_args = {'des_vserver': des_vserver} raise exception.NetAppException(msg % msg_args) if (src_vserver != des_vserver and len(src_vserver_client.get_vserver_peers( src_vserver, des_vserver)) == 0): src_vserver_client.create_vserver_peer( src_vserver, des_vserver, peer_cluster_name=des_cluster) if des_cluster is not None and src_cluster != des_cluster: des_vserver_client.accept_vserver_peer(des_vserver, src_vserver) des_snapshot_list = (des_vserver_client. list_volume_snapshots(des_volume)) snap_list_with_backup = [ snap for snap in des_snapshot_list if snap.startswith( Backup.SM_LABEL.value) ] if len(snap_list_with_backup) == 1: self._set_volume_has_backup_before(True) policy_name = f"{Backup.SM_POLICY.value}_{share_instance['id']}" try: des_vserver_client.create_snapmirror_policy( policy_name, policy_type="vault", discard_network_info=False, snapmirror_label=Backup.SM_LABEL.value, keep=250) except netapp_api.NaApiError as e: with excutils.save_and_reraise_exception() as exc_context: if 'policy with this name already exists' in e.message: exc_context.reraise = False try: des_vserver_client.create_snapmirror_vol( src_vserver, src_vol, des_vserver, des_volume, "extended_data_protection", policy=policy_name, ) db_session = data_motion.DataMotionSession() db_session.initialize_and_wait_snapmirror_vol( des_vserver_client, src_vserver, src_vol, des_vserver, des_volume, timeout=backup_config.netapp_snapmirror_job_timeout ) except netapp_api.NaApiError: self._resource_cleanup_for_backup(backup, share_instance, des_vserver, des_volume, share_server=share_server) msg = _("SnapVault relationship creation or initialization" " failed between source '%(source_vserver)s:" "%(source_volume)s' and destination '%(des_vserver)s:" "%(des_volume)s' for share id %(share_id)s.") msg_args = { 'source_vserver': src_vserver, 'source_volume': src_vol, 'des_vserver': des_vserver, 'des_volume': des_volume, 'share_id': share_instance['share_id'] } raise exception.NetAppException(msg % msg_args) snapshot_name = self._get_backup_snapshot_name(backup, share_instance['id']) src_vserver_client.create_snapshot( src_vol, snapshot_name, snapmirror_label=Backup.SM_LABEL.value) # Update the SnapMirror relationship des_vserver_client.update_snapmirror_vol(src_vserver, src_vol, des_vserver, des_volume) LOG.debug("SnapMirror relationship updated successfully.") @na_utils.trace def create_backup_continue(self, context, share_instance, backup, share_server=None): """Keep tracking the status of share backup""" progress_status = {'total_progress': Backup.TOTAL_PROGRESS_ZERO.value} src_vserver, src_vserver_client = self._get_vserver( share_server=share_server) src_vol_name = self._get_backend_share_name(share_instance['id']) backend_name = self._get_backend(backup) source_path = f"{src_vserver}:{src_vol_name}" LOG.debug("SnapMirror source path: %s", source_path) backup_type = backup.get(Backup.BACKUP_TYPE.value) backup_config = data_motion.get_backup_configuration(backup_type) # Make sure SnapMirror relationship is created snapmirror_info = src_vserver_client.get_snapmirror_destinations( source_path=source_path, ) if not snapmirror_info: LOG.warning("There is no SnapMirror relationship available for" " source path yet %s.", source_path) return progress_status des_vserver, des_vol = self._get_destination_vserver_and_vol( src_vserver_client, source_path, ) if not des_vserver or not des_vol: raise exception.NetAppException("Not able to find vserver " " and volume from SnpMirror" " relationship.") des_path = f"{des_vserver}:{des_vol}" LOG.debug("SnapMirror destination path: %s", des_path) des_vserver_client = self._get_api_client_for_backend( backend_name, vserver=des_vserver, ) snapmirror_info = des_vserver_client.get_snapmirrors( source_path=source_path, dest_path=des_path) if not snapmirror_info: msg_args = { 'source_path': source_path, 'des_path': des_path, } msg = _("There is no SnapMirror relationship available for" " source path '%(source_path)s' and destination path" " '%(des_path)s' yet.") LOG.warning(msg, msg_args) return progress_status LOG.debug("SnapMirror details %s:", snapmirror_info) progress_status["total_progress"] = (Backup. TOTAL_PROGRESS_HUNDRED.value) if snapmirror_info[0].get("last-transfer-type") != "update": progress_status["total_progress"] = (Backup. TOTAL_PROGRESS_ZERO.value) return progress_status if snapmirror_info[0].get("relationship-status") != "idle": progress_status = self._get_backup_progress_status( des_vserver_client, snapmirror_info) LOG.debug("Progress status: %(progress_status)s", {'progress_status': progress_status}) return progress_status if snapmirror_info[0].get("is-healthy") == 'false': self._resource_cleanup_for_backup(backup, share_instance, des_vserver, des_vol, share_server=share_server) msg_args = { 'source_path': source_path, 'des_path': des_path, } msg = _("There is an issue with SnapMirror relationship with" " source path '%(source_path)s' and destination path" " '%(des_path)s'. Make sure destination volume is or was " "not part of any other SnapMirror relationship.") raise exception.NetAppException(message=msg % msg_args) # Verify that snapshot is transferred to destination volume snap_name = self._get_backup_snapshot_name(backup, share_instance['id']) self._verify_and_wait_for_snapshot_to_transfer(des_vserver_client, des_vol, snap_name) LOG.debug("Snapshot '%(snap_name)s' transferred successfully to" " destination.", {'snap_name': snap_name}) # previously if volume was part of some relationship and if we delete # all the backup of share then last snapshot will be left on # destination volume, and we can't delete that snapshot due to ONTAP # restriction. Next time if user create the first backup then we # update the destination volume with latest backup and delete the last # leftover snapshot is_backup_completed = (progress_status["total_progress"] == Backup.TOTAL_PROGRESS_HUNDRED.value) if backup_config.get(Backup.DES_VOLUME.value) and is_backup_completed: snap_list_with_backup = self._get_des_volume_backup_snapshots( des_vserver_client, des_vol, share_instance['id'] ) LOG.debug("Snapshot list for backup %(snap_list)s.", {'snap_list': snap_list_with_backup}) if (self.is_volume_backup_before and len(snap_list_with_backup) == 2): if snap_name == snap_list_with_backup[0]: snap_to_delete = snap_list_with_backup[1] else: snap_to_delete = snap_list_with_backup[0] self._set_volume_has_backup_before(False) des_vserver_client.delete_snapshot(des_vol, snap_to_delete, True) LOG.debug("Previous snapshot %{snap_name}s deleted" " successfully.", {'snap_name': snap_to_delete}) return progress_status @na_utils.trace def restore_backup(self, context, backup, share_instance, share_server=None): """Restore the share backup""" src_vserver, src_vserver_client = self._get_vserver( share_server=share_server, ) src_vol_name = self._get_backend_share_name(share_instance['id']) source_path = f"{src_vserver}:{src_vol_name}" des_vserver, des_vol = self._get_destination_vserver_and_vol( src_vserver_client, source_path, ) if not des_vserver or not des_vol: raise exception.NetAppException("Not able to find vserver " " and volume from SnpMirror" " relationship.") backend_name = self._get_backend(backup) des_vserver_client = self._get_api_client_for_backend( backend_name, vserver=des_vserver, ) vserver_client = src_vserver_client backend_config = data_motion.get_backend_configuration( backend_name) if not backend_config.netapp_use_legacy_client: vserver_client = des_vserver_client snap_name = self._get_backup_snapshot_name(backup, share_instance['id']) source_path = src_vserver + ":" + src_vol_name des_path = des_vserver + ":" + des_vol source_cluster = src_vserver_client.get_cluster_name() vserver_client.snapmirror_restore_vol(source_path=des_path, dest_path=source_path, source_snapshot=snap_name, des_cluster=source_cluster) @na_utils.trace def restore_backup_continue(self, context, backup, share_instance, share_server=None): """Keep checking the restore operation status""" progress_status = {} src_vserver, src_vserver_client = self._get_vserver( share_server=share_server) src_vol_name = self._get_backend_share_name(share_instance['id']) source_path = f"{src_vserver}:{src_vol_name}" snapmirror_info = src_vserver_client.get_snapmirrors( dest_path=source_path, ) if snapmirror_info: progress_status = { "total_progress": Backup.TOTAL_PROGRESS_ZERO.value } return progress_status LOG.debug("SnapMirror relationship of type RST is deleted.") snap_name = self._get_backup_snapshot_name(backup, share_instance['id']) snapshot_list = src_vserver_client.list_volume_snapshots(src_vol_name) for snapshot in snapshot_list: if snap_name in snapshot: progress_status["total_progress"] = ( Backup.TOTAL_PROGRESS_HUNDRED.value) return progress_status if not progress_status: err_msg = _("Failed to restore the snapshot %s.") % snap_name raise exception.NetAppException(err_msg) @na_utils.trace def delete_backup(self, context, backup, share_instance, share_server=None): """Delete the share backup for netapp share""" try: src_vserver, src_vserver_client = self._get_vserver( share_server=share_server, ) except exception.VserverNotFound: LOG.warning("Vserver associated with share '%s' was not found.", share_instance['id']) return src_vol_name = self._get_backend_share_name(share_instance['id']) backend_name = self._get_backend(backup) if backend_name is None: return source_path = f"{src_vserver}:{src_vol_name}" des_vserver, des_vol = self._get_destination_vserver_and_vol( src_vserver_client, source_path, False, ) if not des_vserver or not des_vol: LOG.debug("Not able to find vserver and volume from SnpMirror" " relationship.") return des_path = f"{des_vserver}:{des_vol}" # Delete the snapshot from destination volume snap_name = self._get_backup_snapshot_name(backup, share_instance['id']) des_vserver_client = self._get_api_client_for_backend( backend_name, vserver=des_vserver, ) try: list_snapshots = self._get_des_volume_backup_snapshots( des_vserver_client, des_vol, share_instance['id'], ) except netapp_api.NaApiError: LOG.exception("Failed to get the snapshots from cluster," " provide the right backup type or check the" " backend details are properly configured in" " manila.conf file.") return snapmirror_info = des_vserver_client.get_snapmirrors( source_path=source_path, dest_path=des_path, ) is_snapshot_deleted = self._is_snapshot_deleted(True) if snapmirror_info and len(list_snapshots) == 1: self._resource_cleanup_for_backup(backup, share_instance, des_vserver, des_vol, share_server=share_server) elif len(list_snapshots) > 1: try: des_vserver_client.delete_snapshot(des_vol, snap_name, True) except netapp_api.NaApiError as e: with excutils.save_and_reraise_exception() as exc_context: if "entry doesn't exist" in e.message: exc_context.reraise = False try: des_vserver_client.get_snapshot(des_vol, snap_name) is_snapshot_deleted = self._is_snapshot_deleted(False) except (SnapshotResourceNotFound, netapp_api.NaApiError): LOG.debug("Snapshot '%s' deleted successfully.", snap_name) if not is_snapshot_deleted: err_msg = _("Snapshot '%(snapshot_name)s' is not deleted" " successfully on ONTAP." % {"snapshot_name": snap_name}) LOG.exception(err_msg) raise exception.NetAppException(err_msg) @na_utils.trace def _set_volume_has_backup_before(self, value): self.is_volume_backup_before = value @na_utils.trace def _is_snapshot_deleted(self, is_deleted): return is_deleted @na_utils.trace def _get_backup_snapshot_name(self, backup, share_id): backup_id = backup.get('id', "") return f"{Backup.SM_LABEL.value}_{share_id}_{backup_id}" @na_utils.trace def _get_backend(self, backup): backup_type = backup.get(Backup.BACKUP_TYPE.value) try: backup_config = data_motion.get_backup_configuration(backup_type) except Exception: LOG.exception("There is some issue while getting the" " backup configuration. Make sure correct" " backup type is provided while creating the" " backup.") return None return backup_config.safe_get(Backup.BACKEND_NAME.value) @na_utils.trace def _get_des_volume_backup_snapshots(self, des_vserver_client, des_vol, share_id): """Get the list of snapshot from destination volume""" des_snapshot_list = (des_vserver_client. list_volume_snapshots(des_vol, Backup.SM_LABEL.value)) backup_filter = f"{Backup.SM_LABEL.value}_{share_id}" snap_list_with_backup = [snap for snap in des_snapshot_list if snap.startswith(backup_filter)] return snap_list_with_backup @na_utils.trace def _get_vserver_for_backup(self, backup, share_server=None): """Get the destination vserver if vserver not provided we are creating the new one in case of dhss_true """ backup_type_config = data_motion.get_backup_configuration( backup.get(Backup.BACKUP_TYPE.value)) if backup_type_config.get(Backup.DES_VSERVER.value): return backup_type_config.get(Backup.DES_VSERVER.value) else: return self._get_backup_vserver(backup, share_server=share_server) @na_utils.trace def _get_volume_for_backup(self, backup, share_instance, src_vserver_client, des_vserver_client): """Get the destination volume if volume is not provided in config file under backup_type stanza then create the new one """ dm_session = data_motion.DataMotionSession() backup_type = backup.get(Backup.BACKUP_TYPE.value) backup_type_config = data_motion.get_backup_configuration(backup_type) if (backup_type_config.get(Backup.DES_VSERVER.value) and backup_type_config.get(Backup.DES_VOLUME.value)): return backup_type_config.get(Backup.DES_VOLUME.value) else: des_aggr = dm_session.get_most_available_aggr_of_vserver( des_vserver_client) if not des_aggr: msg = _("Not able to find any aggregate from ONTAP" " to create the volume. Make sure aggregates are" " added to destination vserver") raise exception.NetAppException(msg) src_vol = self._get_backend_share_name(share_instance['id']) vol_attr = src_vserver_client.get_volume(src_vol) source_vol_size = vol_attr.get('size') vol_size_in_gb = int(source_vol_size) / units.Gi share_id = share_instance['id'].replace('-', '_') des_volume = f"{Backup.DES_VOLUME_PREFIX.value}_{share_id}" des_vserver_client.create_volume(des_aggr, des_volume, vol_size_in_gb, volume_type='dp') return des_volume @na_utils.trace def _get_destination_vserver_and_vol(self, src_vserver_client, source_path, validate_relation=True): """Get Destination vserver and volume from SM relationship""" des_vserver, des_vol = None, None snapmirror_info = src_vserver_client.get_snapmirror_destinations( source_path=source_path) if validate_relation and len(snapmirror_info) != 1: msg = _("There are more then one relationship with the source" " '%(source_path)s'." % {'source_path': source_path}) raise exception.NetAppException(msg) if len(snapmirror_info) == 1: des_vserver = snapmirror_info[0].get("destination-vserver") des_vol = snapmirror_info[0].get("destination-volume") return des_vserver, des_vol @na_utils.trace def _verify_and_wait_for_snapshot_to_transfer(self, des_vserver_client, des_vol, snap_name, timeout=300, ): """Wait and verify that snapshot is moved to destination""" interval = 5 retries = (timeout / interval or 1) @manila_utils.retry(retry_param=(netapp_api.NaApiError, SnapshotResourceNotFound), interval=interval, retries=retries, backoff_rate=1) def _wait_for_snapshot_to_transfer(): des_vserver_client.get_snapshot(des_vol, snap_name) try: _wait_for_snapshot_to_transfer() except (netapp_api.NaApiError, SnapshotResourceNotFound): msg = _("Timed out while wait for snapshot to transfer") raise exception.NetAppException(message=msg) @na_utils.trace def _get_backup_progress_status(self, des_vserver_client, snapmirror_details): """Calculate percentage of SnapMirror data transferred""" des_vol = snapmirror_details[0].get("destination-volume") vol_attr = des_vserver_client.get_volume(des_vol) size_used = vol_attr.get('size-used') sm_data_transferred = snapmirror_details[0].get( "last-transfer-size") if size_used and sm_data_transferred: progress_status_percent = (int(sm_data_transferred) / int( size_used)) * 100 return str(round(progress_status_percent, 2)) else: return Backup.TOTAL_PROGRESS_ZERO.value @na_utils.trace def _resource_cleanup_for_backup(self, backup, share_instance, des_vserver, des_vol, share_server=None): """Cleanup the created resources cleanup all created ONTAP resources when delete the last backup or in case of exception throw while creating the backup. """ src_vserver, src_vserver_client = self._get_vserver( share_server=share_server) dm_session = data_motion.DataMotionSession() backup_type_config = data_motion.get_backup_configuration( backup.get(Backup.BACKUP_TYPE.value)) backend_name = backup_type_config.safe_get(Backup.BACKEND_NAME.value) des_vserver_client = self._get_api_client_for_backend( backend_name, vserver=des_vserver, ) src_vol_name = self._get_backend_share_name(share_instance['id']) # Abort relationship try: des_vserver_client.abort_snapmirror_vol(src_vserver, src_vol_name, des_vserver, des_vol, clear_checkpoint=False) except netapp_api.NaApiError: pass try: des_vserver_client.delete_snapmirror_vol(src_vserver, src_vol_name, des_vserver, des_vol) except netapp_api.NaApiError as e: with excutils.save_and_reraise_exception() as exc_context: if (e.code == netapp_api.EOBJECTNOTFOUND or e.code == netapp_api.ESOURCE_IS_DIFFERENT or "(entry doesn't exist)" in e.message): exc_context.reraise = False dm_session.wait_for_snapmirror_release_vol( src_vserver, des_vserver, src_vol_name, des_vol, False, src_vserver_client, timeout=backup_type_config.netapp_snapmirror_job_timeout) try: policy_name = f"{Backup.SM_POLICY.value}_{share_instance['id']}" des_vserver_client.delete_snapmirror_policy(policy_name) except netapp_api.NaApiError: pass # Delete the vserver peering try: if src_vserver != des_vserver: src_vserver_client.delete_vserver_peer(src_vserver, des_vserver) except netapp_api.NaApiError: pass # Delete volume if not backup_type_config.safe_get(Backup.DES_VOLUME.value): try: des_vserver_client.offline_volume(des_vol) des_vserver_client.delete_volume(des_vol) except netapp_api.NaApiError: pass # Delete Vserver if share_server is not None: self._delete_backup_vserver(backup, des_vserver) @na_utils.trace def update_volume_snapshot_policy(self, share, snapshot_policy, share_server=None): share_name = self._get_backend_share_name(share['id']) _, vserver_client = self._get_vserver(share_server=share_server) vserver_client.update_volume_snapshot_policy(share_name, snapshot_policy) @na_utils.trace def update_showmount(self, showmount, share_server=None): showmount = showmount.lower() if showmount not in ('true', 'false'): err_msg = _("Invalid showmount value supplied: %s.") % showmount raise exception.NetAppException(err_msg) vserver, vserver_client = self._get_vserver(share_server=share_server) vserver_client.update_showmount(showmount) def update_pnfs(self, pnfs, share_server=None): pnfs = pnfs.lower() if pnfs not in ('true', 'false'): err_msg = _("Invalid pnfs value supplied: %s.") % pnfs raise exception.NetAppException(err_msg) vserver, vserver_client = self._get_vserver(share_server=share_server) vserver_client.update_pnfs(pnfs) @na_utils.trace def update_share_from_metadata(self, context, share, metadata, share_server=None): metadata_update_func_map = { "snapshot_policy": "update_volume_snapshot_policy", } for k, v in metadata.items(): metadata_update_method = ( getattr(self, metadata_update_func_map.get(k)) if k in metadata_update_func_map.keys() else None) if metadata_update_method: metadata_update_method(share, v, share_server=share_server) def update_share_network_subnet_from_metadata(self, context, share_network, share_network_subnet, share_server, metadata): metadata_update_func_map = { "showmount": "update_showmount", "pnfs": "update_pnfs", } for k, v in metadata.items(): metadata_update_method = ( getattr(self, metadata_update_func_map.get(k)) if k in metadata_update_func_map.keys() else None) if metadata_update_method: metadata_update_method(v, share_server=share_server) @na_utils.trace def _get_aggregate_snaplock_type(self, aggr_name): if self._have_cluster_creds: aggr_attributes = self._client.get_aggregate(aggr_name) snaplock_type = aggr_attributes.get('snaplock-type') else: snaplock_type = self._client.get_vserver_aggr_snaplock_type( aggr_name, ) return snaplock_type @na_utils.trace def _is_snaplock_compatible_for_migration(self, source_pool, des_pool): if self._client.features.UNIFIED_AGGR: return True if (self.configuration.netapp_use_legacy_client and self._client.features.SNAPLOCK): source_snaplock_type = self._ssc_stats.get(source_pool, {}).get( 'netapp_snaplock_type') des_snaplock_type = self._ssc_stats.get(des_pool, {}).get( 'netapp_snaplock_type') if source_snaplock_type != des_snaplock_type: return False return True ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/netapp/dataontap/cluster_mode/lib_multi_svm.py0000664000175000017500000034666200000000000030266 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Clinton Knight. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ NetApp Data ONTAP cDOT multi-SVM storage driver library. This library extends the abstract base library and completes the multi-SVM functionality needed by the cDOT multi-SVM Manila driver. This library variant creates Data ONTAP storage virtual machines (i.e. 'vservers') as needed to provision shares. """ import re from manila.share.drivers.netapp.dataontap.cluster_mode.lib_base import Backup from oslo_log import log from oslo_serialization import jsonutils from oslo_utils import excutils from oslo_utils import units from oslo_utils import uuidutils from manila.common import constants from manila import exception from manila.i18n import _ from manila.message import message_field from manila.share.drivers.netapp.dataontap.client import api as netapp_api from manila.share.drivers.netapp.dataontap.client import client_cmode from manila.share.drivers.netapp.dataontap.client import client_cmode_rest from manila.share.drivers.netapp.dataontap.cluster_mode import data_motion from manila.share.drivers.netapp.dataontap.cluster_mode import lib_base from manila.share.drivers.netapp import utils as na_utils from manila.share import share_types from manila.share import utils as share_utils from manila import utils LOG = log.getLogger(__name__) SUPPORTED_NETWORK_TYPES = (None, 'flat', 'vlan') SEGMENTED_NETWORK_TYPES = ('vlan',) DEFAULT_MTU = 1500 SERVER_MIGRATE_SVM_DR = 'svm_dr' SERVER_MIGRATE_SVM_MIGRATE = 'svm_migrate' METADATA_VLAN = 'set_vlan' METADATA_MTU = 'set_mtu' class NetAppCmodeMultiSVMFileStorageLibrary( lib_base.NetAppCmodeFileStorageLibrary): @na_utils.trace def check_for_setup_error(self): if self._have_cluster_creds: if self.configuration.netapp_vserver: msg = ('Vserver is specified in the configuration. This is ' 'ignored when the driver is managing share servers.') LOG.warning(msg) else: # only have vserver creds, which is an error in multi_svm mode msg = _('Cluster credentials must be specified in the ' 'configuration when the driver is managing share servers.') raise exception.InvalidInput(reason=msg) # Ensure FlexGroup support aggr_list = self._client.list_non_root_aggregates() self._initialize_flexgroup_pools(set(aggr_list)) # Ensure one or more aggregates are available. if (self.is_flexvol_pool_configured() and not self._find_matching_aggregates(aggregate_names=aggr_list)): msg = _('No aggregates are available for provisioning shares. ' 'Ensure that the configuration option ' 'netapp_aggregate_name_search_pattern is set correctly.') raise exception.NetAppException(msg) (super(NetAppCmodeMultiSVMFileStorageLibrary, self). check_for_setup_error()) @na_utils.trace def _get_vserver(self, share_server=None, vserver_name=None, backend_name=None): if share_server: backend_details = share_server.get('backend_details') vserver = backend_details.get( 'vserver_name') if backend_details else None if not vserver: msg = _('Vserver name is absent in backend details. Please ' 'check whether Vserver was created properly.') raise exception.VserverNotSpecified(msg) elif vserver_name: vserver = vserver_name else: msg = _('Share server or vserver name not provided') raise exception.InvalidInput(reason=msg) if backend_name: vserver_client = data_motion.get_client_for_backend( backend_name, vserver ) else: vserver_client = self._get_api_client(vserver) if not vserver_client.vserver_exists(vserver): raise exception.VserverNotFound(vserver=vserver) return vserver, vserver_client def _get_ems_pool_info(self): return { 'pools': { 'vserver': None, 'aggregates': self._find_matching_aggregates(), 'flexgroup_aggregates': self._flexgroup_pools, }, } @na_utils.trace def _handle_housekeeping_tasks(self): """Handle various cleanup activities.""" self._client.prune_deleted_nfs_export_policies() self._client.prune_deleted_snapshots() self._client.remove_unused_qos_policy_groups() (super(NetAppCmodeMultiSVMFileStorageLibrary, self). _handle_housekeeping_tasks()) @na_utils.trace def _find_matching_aggregates(self, aggregate_names=None): """Find all aggregates match pattern.""" if not self.is_flexvol_pool_configured(): return [] if not aggregate_names: aggregate_names = self._client.list_non_root_aggregates() pattern = self.configuration.netapp_aggregate_name_search_pattern return [aggr_name for aggr_name in aggregate_names if re.match(pattern, aggr_name)] @na_utils.trace def _set_network_with_metadata(self, network_info): """Set the subnet metadata information for network_info object.""" for network in network_info: metadata = network.get('subnet_metadata') if not metadata: continue metadata_vlan = metadata.get(METADATA_VLAN) if not metadata_vlan: continue if int(metadata_vlan) > 4094 or int(metadata_vlan) < 1: msg = _( 'A segmentation ID %s was specified but is not valid for ' 'a VLAN network type; the segmentation ID must be an ' 'integer value in the range of [1,4094]') raise exception.NetworkBadConfigurationException( reason=msg % metadata_vlan) if metadata.get(METADATA_MTU) is not None: try: int(metadata.get(METADATA_MTU)) except ValueError: msg = _('Metadata network MTU must be an integer value.') raise exception.NetworkBadConfigurationException(msg) network['network_type'] = 'vlan' network['segmentation_id'] = metadata_vlan for allocation in network['network_allocations']: allocation['network_type'] = 'vlan' allocation['segmentation_id'] = metadata_vlan allocation['mtu'] = int(metadata.get(METADATA_MTU) or allocation['mtu']) @na_utils.trace def setup_server(self, network_info, metadata=None): """Creates and configures new Vserver.""" # only changes network_info if one of networks has metadata set. self._set_network_with_metadata(network_info) ports = {} server_id = network_info[0]['server_id'] LOG.debug("Setting up server %s.", server_id) for network in network_info: for network_allocation in network['network_allocations']: ports[network_allocation['id']] = ( network_allocation['ip_address']) nfs_config = self._default_nfs_config if (self.is_nfs_config_supported and metadata and 'share_type_id' in metadata): extra_specs = share_types.get_share_type_extra_specs( metadata['share_type_id']) self._check_nfs_config_extra_specs_validity(extra_specs) nfs_config = self._get_nfs_config_provisioning_options(extra_specs) vlan = network_info[0]['segmentation_id'] @utils.synchronized('netapp-VLAN-%s' % vlan, external=True) def setup_server_with_lock(): self._validate_network_type(network_info) # Before proceeding, make sure subnet configuration is valid self._validate_share_network_subnets(network_info) vserver_name = self._get_vserver_name(server_id) server_details = { 'vserver_name': vserver_name, 'ports': jsonutils.dumps(ports), } if self.is_nfs_config_supported: server_details['nfs_config'] = jsonutils.dumps(nfs_config) if self.configuration.netapp_restrict_lif_creation_per_ha_pair: self._check_data_lif_count_limit_reached_for_ha_pair( self._client ) try: self._create_vserver(vserver_name, network_info, metadata, nfs_config=nfs_config) except Exception as e: e.detail_data = {'server_details': server_details} raise if metadata.get('encryption_key_ref'): self._create_barbican_kms_config_for_specified_vserver( vserver_name, metadata) return server_details return setup_server_with_lock() @na_utils.trace def _check_nfs_config_extra_specs_validity(self, extra_specs): """Check if the nfs config extra_spec has valid values.""" int_extra_specs = ['netapp:tcp_max_xfer_size', 'netapp:udp_max_xfer_size'] for key in int_extra_specs: if key in extra_specs: self._check_if_extra_spec_is_positive( extra_specs[key], key) @na_utils.trace def _check_if_extra_spec_is_positive(self, value, key): """Check if extra_spec has a valid positive int value.""" if int(value) < 0: args = {'value': value, 'key': key} msg = _('Invalid value "%(value)s" for extra_spec "%(key)s" ' 'used by share server setup.') raise exception.NetAppException(msg % args) @na_utils.trace def _get_nfs_config_provisioning_options(self, specs): """Return the nfs config provisioning option.""" nfs_config = self.get_string_provisioning_options( specs, self.NFS_CONFIG_EXTRA_SPECS_MAP) # Changes the no set config to the default value for k, v in nfs_config.items(): if v is None: nfs_config[k] = self._default_nfs_config[k] return nfs_config @na_utils.trace def _validate_network_type(self, network_info): """Raises exception if the segmentation type is incorrect.""" unsupported_nets = [network for network in network_info if network['network_type'] not in SUPPORTED_NETWORK_TYPES] if unsupported_nets: msg = _('The specified network type %s is unsupported by the ' 'NetApp clustered Data ONTAP driver') raise exception.NetworkBadConfigurationException( reason=msg % unsupported_nets[0]['network_type']) @na_utils.trace def _get_vserver_name(self, server_id): return self.configuration.netapp_vserver_name_template % server_id @na_utils.trace def _validate_share_network_subnets(self, network_info): """Raises exception if subnet configuration isn't valid.""" # Driver supports multiple subnets only if in the same network segment ref_vlan = network_info[0]['segmentation_id'] if not all([network['segmentation_id'] == ref_vlan for network in network_info]): msg = _("The specified network configuration isn't supported by " "the NetApp clustered Data ONTAP driver. All subnets must " "reside in the same network segment.") raise exception.NetworkBadConfigurationException(reason=msg) @na_utils.trace def _create_vserver(self, vserver_name, network_info, metadata=None, nfs_config=None): """Creates Vserver with given parameters if it doesn't exist.""" if self._client.vserver_exists(vserver_name): msg = _('Vserver %s already exists.') raise exception.NetAppException(msg % vserver_name) # NOTE(dviroel): check if this vserver will be a data protection server is_dp_destination = False if metadata and metadata.get('migration_destination') is True: is_dp_destination = True msg = _("Starting creation of a vserver with 'dp_destination' " "subtype.") LOG.debug(msg) # NOTE(lseki): If there's already an ipspace created for the same VLAN # port, reuse it. It will be named after the previously created share # server's neutron subnet id. node_name = self._client.list_cluster_nodes()[0] port = self._get_node_data_port(node_name) # NOTE(sfernand): ONTAP driver currently supports multiple subnets # only in a same network segment. A validation is performed in a # earlier step to make sure all subnets have the same segmentation_id. vlan = network_info[0]['segmentation_id'] ipspace_name = self._client.get_ipspace_name_for_vlan_port( node_name, port, vlan) if ( ipspace_name is None or ipspace_name in client_cmode.CLUSTER_IPSPACES ): ipspace_name = self._create_ipspace(network_info[0]) aggregate_names = self._find_matching_aggregates() if is_dp_destination: # Get Data ONTAP aggregate name as pool name. LOG.debug('Creating a new Vserver (%s) for data protection.', vserver_name) self._client.create_vserver_dp_destination( vserver_name, aggregate_names, ipspace_name, self.configuration.netapp_delete_retention_hours, self.configuration.netapp_enable_logical_space_reporting) # Set up port and broadcast domain for the current ipspace self._create_port_and_broadcast_domain( ipspace_name, network_info[0]) else: LOG.debug('Vserver %s does not exist, creating.', vserver_name) aggr_set = set(aggregate_names).union( self._get_flexgroup_aggr_set()) self._client.create_vserver( vserver_name, self.configuration.netapp_root_volume_aggregate, self.configuration.netapp_root_volume, aggr_set, ipspace_name, self.configuration.netapp_security_cert_expire_days, self.configuration.netapp_delete_retention_hours, self.configuration.netapp_enable_logical_space_reporting) vserver_client = self._get_api_client(vserver=vserver_name) security_services = network_info[0].get('security_services') try: self._setup_network_for_vserver( vserver_name, vserver_client, network_info, ipspace_name, security_services=security_services, nfs_config=nfs_config) except Exception: with excutils.save_and_reraise_exception(): LOG.error("Failed to configure Vserver.") # NOTE(dviroel): At this point, the lock was already # acquired by the caller of _create_vserver. self._delete_vserver(vserver_name, security_services=security_services, needs_lock=False) @na_utils.trace def _create_barbican_kms_config_for_specified_vserver(self, vserver_name, metadata): """Creates a Barbican KMS configuration for the specified vserver.""" key_href = metadata.get('encryption_key_ref') config_name = "barbican_config_" + uuidutils.generate_uuid() keystone_auth_url = metadata.get('keystone_url') keystone_auth_token_path = self.configuration.safe_get( 'netapp_identity_auth_token_path') keystone_url = keystone_auth_url + keystone_auth_token_path app_cred_id = metadata.get('application_credential_id') app_cred_secret = metadata.get('application_credential_secret') backend_name = share_utils.extract_host(metadata.get('request_host'), level='backend_name') try: rest_client = data_motion.get_client_for_backend( backend_name, vserver_name=None, force_rest_client=True) except exception.NetAppException: LOG.error("Failed to get REST client for backend %s. " "Please ensure that REST is enabled in the cluster.", backend_name) raise LOG.debug('Creating a Barbican KMS configuration for the vserver ' '%(vserver)s', {'vserver': vserver_name}) rest_client.create_barbican_kms_config_for_specified_vserver( vserver_name, config_name, key_href, keystone_url, app_cred_id, app_cred_secret) LOG.debug('Getting the key store configuration uuid for the config ' '%(config)s', {'config': config_name}) config_uuid = rest_client.get_key_store_config_uuid(config_name) LOG.debug('Enabling the key store configuration the config %(config)s', {'config': config_name}) rest_client.enable_key_store_config(config_uuid) def _setup_network_for_vserver(self, vserver_name, vserver_client, network_info, ipspace_name, enable_nfs=True, security_services=None, nfs_config=None): """Setup Vserver network configuration""" # segmentation_id and mtu are the same for all allocations and can be # extracted from the first index, as subnets were previously checked # at this point to ensure they are all in the same network segment and # consequently belongs to the same Neutron network (which holds L2 # information). ref_subnet_allocation = network_info[0]['network_allocations'][0] vlan = ref_subnet_allocation['segmentation_id'] mtu = ref_subnet_allocation['mtu'] or DEFAULT_MTU home_ports = {} nodes = self._client.list_cluster_nodes() for node in nodes: port = self._get_node_data_port(node) vlan_port_name = self._client.create_port_and_broadcast_domain( node, port, vlan, mtu, ipspace_name) home_ports[node] = vlan_port_name for network in network_info: self._create_vserver_lifs(vserver_name, vserver_client, network, ipspace_name, lif_home_ports=home_ports) self._create_vserver_routes(vserver_client, network) self._create_vserver_admin_lif(vserver_name, vserver_client, network_info[0], ipspace_name, lif_home_ports=home_ports) if enable_nfs: vserver_client.enable_nfs( self.configuration.netapp_enabled_share_protocols, nfs_config=nfs_config) if security_services: self._client.setup_security_services( security_services, vserver_client, vserver_name, self.configuration.netapp_cifs_aes_encryption) def _get_valid_ipspace_name(self, network_id): """Get IPspace name according to network id.""" return client_cmode.IPSPACE_PREFIX + network_id.replace('-', '_') @na_utils.trace def _create_ipspace(self, network_info, client=None): """If supported, create an IPspace for a new Vserver.""" desired_client = client if client else self._client if not desired_client.features.IPSPACES: return None if (network_info['network_allocations'][0]['network_type'] not in SEGMENTED_NETWORK_TYPES): return client_cmode.DEFAULT_IPSPACE # NOTE(cknight): Neutron needs cDOT IP spaces because it can provide # overlapping IP address ranges for different subnets. That is not # believed to be an issue for any of Manila's other network plugins. ipspace_id = network_info.get('neutron_net_id') if not ipspace_id: return client_cmode.DEFAULT_IPSPACE ipspace_name = self._get_valid_ipspace_name(ipspace_id) desired_client.create_ipspace(ipspace_name) return ipspace_name @na_utils.trace def _create_vserver_lifs(self, vserver_name, vserver_client, network_info, ipspace_name, lif_home_ports=None): """Create Vserver data logical interfaces (LIFs).""" # We can get node names directly from lif_home_ports in case # it was passed as parameter, otherwise a request to the cluster is nodes = (list(lif_home_ports.keys()) if lif_home_ports else self._client.list_cluster_nodes()) # required node_network_info = zip(nodes, network_info['network_allocations']) # Creating LIF per node for node_name, network_allocation in node_network_info: lif_home_port = (lif_home_ports[node_name] if lif_home_ports else None) lif_name = self._get_lif_name(node_name, network_allocation) self._create_lif(vserver_client, vserver_name, ipspace_name, node_name, lif_name, network_allocation, lif_home_port=lif_home_port) @na_utils.trace def _create_vserver_admin_lif(self, vserver_name, vserver_client, network_info, ipspace_name, lif_home_ports=None): """Create Vserver admin LIF, if defined.""" network_allocations = network_info.get('admin_network_allocations') if not network_allocations: return LOG.info('Admin network defined for Vserver %s.', vserver_name) home_port = None if lif_home_ports: node_name, home_port = list(lif_home_ports.items())[0] else: nodes = self._client.list_cluster_nodes() node_name = nodes[0] network_allocation = network_allocations[0] lif_name = self._get_lif_name(node_name, network_allocation) self._create_lif(vserver_client, vserver_name, ipspace_name, node_name, lif_name, network_allocation, lif_home_port=home_port) @na_utils.trace def _create_vserver_routes(self, vserver_client, network_info): """Create Vserver route and set gateways.""" route_gateways = [] # NOTE(gouthamr): Use the gateway from the tenant subnet/s # for the static routes. Do not configure a route for the admin # subnet because fast path routing will work for incoming # connections and there are no requirements for outgoing # connections on the admin network yet. for net_allocation in (network_info['network_allocations']): if net_allocation['gateway'] not in route_gateways: vserver_client.create_route(net_allocation['gateway']) route_gateways.append(net_allocation['gateway']) @na_utils.trace def _get_node_data_port(self, node): port_names = self._client.list_node_data_ports(node) pattern = self.configuration.netapp_port_name_search_pattern matched_port_names = [port_name for port_name in port_names if re.match(pattern, port_name)] if not matched_port_names: raise exception.NetAppException( _('Could not find eligible network ports on node %s on which ' 'to create Vserver LIFs.') % node) return matched_port_names[0] def _get_lif_name(self, node_name, network_allocation): """Get LIF name based on template from manila.conf file.""" lif_name_args = { 'node': node_name, 'net_allocation_id': network_allocation['id'], } return self.configuration.netapp_lif_name_template % lif_name_args @na_utils.trace def _create_lif(self, vserver_client, vserver_name, ipspace_name, node_name, lif_name, network_allocation, lif_home_port=None): """Creates LIF for Vserver.""" port = lif_home_port or self._get_node_data_port(node_name) vlan = network_allocation['segmentation_id'] ip_address = network_allocation['ip_address'] netmask = utils.cidr_to_netmask(network_allocation['cidr']) # We can skip the operation if an lif already exists with the same # configuration if vserver_client.network_interface_exists( vserver_name, node_name, port, ip_address, netmask, vlan, home_port=lif_home_port): msg = ('LIF %(ip)s netmask %(mask)s already exists for ' 'node %(node)s port %(port)s in vserver %(vserver)s.' % { 'ip': ip_address, 'mask': netmask, 'node': node_name, 'vserver': vserver_name, 'port': '%(port)s-%(vlan)s' % {'port': port, 'vlan': vlan}}) LOG.debug(msg) return if not lif_home_port: mtu = network_allocation.get('mtu') or DEFAULT_MTU lif_home_port = ( self._client.create_port_and_broadcast_domain( node_name, port, vlan, mtu, ipspace_name)) self._client.create_network_interface( ip_address, netmask, node_name, lif_home_port, vserver_name, lif_name) @na_utils.trace def _create_port_and_broadcast_domain(self, ipspace_name, network_info): nodes = self._client.list_cluster_nodes() node_network_info = zip(nodes, network_info['network_allocations']) for node_name, network_allocation in node_network_info: port = self._get_node_data_port(node_name) vlan = network_allocation['segmentation_id'] network_mtu = network_allocation.get('mtu') mtu = network_mtu or DEFAULT_MTU self._client.create_port_and_broadcast_domain( node_name, port, vlan, mtu, ipspace_name) @na_utils.trace def get_network_allocations_number(self): """Get number of network interfaces to be created.""" return len(self._client.list_cluster_nodes()) @na_utils.trace def get_admin_network_allocations_number(self, admin_network_api): """Get number of network allocations for creating admin LIFs.""" return 1 if admin_network_api else 0 @na_utils.trace def teardown_server(self, server_details, security_services=None): """Teardown share server.""" vserver = server_details.get( 'vserver_name') if server_details else None if not vserver: LOG.warning("Vserver not specified for share server being " "deleted. Deletion of share server record will " "proceed anyway.") return elif not self._client.vserver_exists(vserver): LOG.warning("Could not find Vserver for share server being " "deleted: %s. Deletion of share server " "record will proceed anyway.", vserver) return self._delete_vserver(vserver, security_services=security_services) @na_utils.trace def _delete_vserver(self, vserver, security_services=None, needs_lock=True): """Delete a Vserver plus IPspace and security services as needed.""" ipspace_name = None ipspaces = self._client.get_ipspaces(vserver_name=vserver) if ipspaces: ipspace = ipspaces[0] ipspace_name = ipspace['ipspace'] vserver_client = self._get_api_client(vserver=vserver) network_interfaces = vserver_client.get_network_interfaces() snapmirror_policies = self._client.get_snapmirror_policies(vserver) interfaces_on_vlans = [] vlans = [] for interface in network_interfaces: if '-' in interface['home-port']: interfaces_on_vlans.append(interface) vlans.append(interface['home-port']) if vlans: vlans = '-'.join(sorted(set(vlans))) if vlans else None vlan_id = vlans.split('-')[-1] else: vlan_id = None def _delete_vserver_without_lock(): # we already deleted the neutron network allocations, # make sure those are no longer used for interface in network_interfaces: if interface['administrative-status'] != 'down': self._client.disable_network_interface( vserver, interface['interface-name']) # NOTE(dviroel): always delete all policies before deleting the # vserver for policy in snapmirror_policies: vserver_client.delete_snapmirror_policy(policy) # NOTE(dviroel): Attempt to delete all vserver peering # created by replication self._delete_vserver_peers(vserver) self._client.delete_vserver(vserver, vserver_client, security_services=security_services) if ipspace_name is None: return ipspace_deleted = self._client.delete_ipspace(ipspace_name) ports = set() if ipspace_deleted: # we don't want to leave any ports behind, clean them all up ports.update(ipspace['ports']) # NOTE(dviroel): only delete vlans if they are not being used # by any ipspaces and data vservers. else: broadcast_domains = ipspace['broadcast-domains'] # NOTE(carthaca): filter for degraded ports, those where # reachability is down (e.g. because neutron port is gone) ports.update(self._client.get_degraded_ports(broadcast_domains, ipspace_name)) # make sure to delete ports of the vserver we are currently # deleting (may not be marked degraded, yet) for interface in interfaces_on_vlans: port = f"{interface['home-node']}:{interface['home-port']}" ports.add(port) self._delete_port_vlans(self._client, ports) @utils.synchronized('netapp-VLAN-%s' % vlan_id, external=True) def _delete_vserver_with_lock(): _delete_vserver_without_lock() if needs_lock: return _delete_vserver_with_lock() else: return _delete_vserver_without_lock() def _delete_port_vlans(_self, client, ports): """Delete Port's VLAN configuration must be called with a cluster client """ for port_name in ports: try: node, port = port_name.split(':') port, vlan = port.split('-') client.delete_vlan(node, port, vlan) except exception.NetAppException: LOG.exception("Deleting Vserver VLAN failed.") @na_utils.trace def _delete_vserver_peers(self, vserver): vserver_peers = self._get_vserver_peers(vserver=vserver) for peer in vserver_peers: self._delete_vserver_peer(peer.get('vserver'), peer.get('peer-vserver')) def get_configured_ip_versions(self): versions = [4] options = self._client.get_net_options() if options['ipv6-enabled']: versions.append(6) return versions @na_utils.trace def create_replica(self, context, replica_list, new_replica, access_rules, share_snapshots, share_server=None): """Creates the new replica on this backend and sets up SnapMirror. It creates the peering between the associated vservers before creating the share replica and setting up the SnapMirror. """ # 1. Retrieve source and destination vservers from both replicas, # active and and new_replica src_vserver, dst_vserver = self._get_vservers_from_replicas( context, replica_list, new_replica) # 2. Retrieve the active replica host's client and cluster name src_replica = self.find_active_replica(replica_list) src_replica_host = share_utils.extract_host( src_replica['host'], level='backend_name') src_replica_client = data_motion.get_client_for_backend( src_replica_host, vserver_name=src_vserver) # Cluster name is needed for setting up the vserver peering src_replica_cluster_name = src_replica_client.get_cluster_name() # 3. Retrieve new replica host's client new_replica_host = share_utils.extract_host( new_replica['host'], level='backend_name') new_replica_client = data_motion.get_client_for_backend( new_replica_host, vserver_name=dst_vserver) new_replica_cluster_name = new_replica_client.get_cluster_name() if (dst_vserver != src_vserver and not self._get_vserver_peers(dst_vserver, src_vserver)): # 3.1. Request vserver peer creation from new_replica's host # to active replica's host new_replica_client.create_vserver_peer( dst_vserver, src_vserver, peer_cluster_name=src_replica_cluster_name) # 3.2. Accepts the vserver peering using active replica host's # client (inter-cluster only) if new_replica_cluster_name != src_replica_cluster_name: src_replica_client.accept_vserver_peer(src_vserver, dst_vserver) return (super(NetAppCmodeMultiSVMFileStorageLibrary, self). create_replica(context, replica_list, new_replica, access_rules, share_snapshots)) def delete_replica(self, context, replica_list, replica, share_snapshots, share_server=None): """Removes the replica on this backend and destroys SnapMirror. Removes the replica, destroys the SnapMirror and delete the vserver peering if needed. """ vserver, peer_vserver = self._get_vservers_from_replicas( context, replica_list, replica) super(NetAppCmodeMultiSVMFileStorageLibrary, self).delete_replica( context, replica_list, replica, share_snapshots) # Fix for bug 1996907- If snapmirror relationship still exist, # deletes those again. snapmirrors_des_list = self._get_snapmirrors_destinations( vserver, peer_vserver) snapmirrors_des_list_from_peer = self._get_snapmirrors_destinations( peer_vserver, vserver) if snapmirrors_des_list or snapmirrors_des_list_from_peer: super(NetAppCmodeMultiSVMFileStorageLibrary, self).delete_replica( context, replica_list, replica, share_snapshots) # Check if there are no remaining SnapMirror connections and if a # vserver peering exists and delete it. snapmirrors_from_local = self._get_snapmirrors(vserver, peer_vserver) snapmirrors_from_peer = self._get_snapmirrors(peer_vserver, vserver) peers = self._get_vserver_peers(peer_vserver, vserver) if (not (snapmirrors_from_local or snapmirrors_from_peer or snapmirrors_des_list or snapmirrors_des_list_from_peer) and peers): self._delete_vserver_peer(peer_vserver, vserver) def manage_server(self, context, share_server, identifier, driver_options): """Manages a vserver by renaming it and returning backend_details.""" new_vserver_name = self._get_vserver_name(share_server['id']) old_vserver_name = self._get_correct_vserver_old_name(identifier) if new_vserver_name != old_vserver_name: self._client.rename_vserver(old_vserver_name, new_vserver_name) backend_details = { 'vserver_name': new_vserver_name, } if self.is_nfs_config_supported: nfs_config = self._client.get_nfs_config( list(self.NFS_CONFIG_EXTRA_SPECS_MAP.values()), new_vserver_name) backend_details['nfs_config'] = jsonutils.dumps(nfs_config) return new_vserver_name, backend_details def unmanage_server(self, server_details, security_services=None): pass def get_share_server_network_info( self, context, share_server, identifier, driver_options): """Returns a list of IPs for each vserver network interface.""" vserver_name = self._get_correct_vserver_old_name(identifier) vserver, vserver_client = self._get_vserver(vserver_name=vserver_name) interfaces = vserver_client.get_network_interfaces() allocations = [] for lif in interfaces: allocations.append(lif['address']) return allocations def _get_correct_vserver_old_name(self, identifier): # In case vserver_name includes the template, we check and add it here if not self._client.vserver_exists(identifier): return self._get_vserver_name(identifier) return identifier def _get_snapmirrors(self, vserver, peer_vserver): return self._client.get_snapmirrors( source_vserver=vserver, dest_vserver=peer_vserver) def _get_snapmirrors_destinations(self, vserver, peer_vserver): return self._client.get_snapmirror_destinations( source_vserver=vserver, dest_vserver=peer_vserver) def _get_vservers_from_replicas(self, context, replica_list, new_replica): active_replica = self.find_active_replica(replica_list) dm_session = data_motion.DataMotionSession() vserver = dm_session.get_vserver_from_share(active_replica) peer_vserver = dm_session.get_vserver_from_share(new_replica) return vserver, peer_vserver def _get_vserver_peers(self, vserver=None, peer_vserver=None): return self._client.get_vserver_peers(vserver, peer_vserver) def _create_vserver_peer(self, context, vserver, peer_vserver): self._client.create_vserver_peer(vserver, peer_vserver) def _delete_vserver_peer(self, vserver, peer_vserver): self._client.delete_vserver_peer(vserver, peer_vserver) def create_share_from_snapshot(self, context, share, snapshot, share_server=None, parent_share=None): # NOTE(dviroel): If both parent and child shares are in the same host, # they belong to the same cluster, and we can skip all the processing # below. Group snapshot is always to the same host too, so we can skip. is_group_snapshot = share.get('source_share_group_snapshot_member_id') if not is_group_snapshot and parent_share['host'] != share['host']: # 1. Retrieve source and destination vservers from source and # destination shares dm_session = data_motion.DataMotionSession() src_vserver = dm_session.get_vserver_from_share(parent_share) dest_vserver = dm_session.get_vserver_from_share_server( share_server) # 2. Retrieve the source share host's client and cluster name src_share_host = share_utils.extract_host( parent_share['host'], level='backend_name') src_share_client = data_motion.get_client_for_backend( src_share_host, vserver_name=src_vserver) # Cluster name is needed for setting up the vserver peering src_share_cluster_name = src_share_client.get_cluster_name() # 3. Retrieve new share host's client dest_share_host = share_utils.extract_host( share['host'], level='backend_name') dest_share_client = data_motion.get_client_for_backend( dest_share_host, vserver_name=dest_vserver) dest_share_cluster_name = dest_share_client.get_cluster_name() # If source and destination shares are placed in a different # clusters, we'll need the both vserver peered. if src_share_cluster_name != dest_share_cluster_name: if not self._get_vserver_peers(dest_vserver, src_vserver): # 3.1. Request vserver peer creation from new_replica's # host to active replica's host dest_share_client.create_vserver_peer( dest_vserver, src_vserver, peer_cluster_name=src_share_cluster_name) # 3.2. Accepts the vserver peering using active replica # host's client src_share_client.accept_vserver_peer(src_vserver, dest_vserver) return (super(NetAppCmodeMultiSVMFileStorageLibrary, self) .create_share_from_snapshot( context, share, snapshot, share_server=share_server, parent_share=parent_share)) @na_utils.trace def _is_share_server_compatible(self, share_server, expected_nfs_config): """Check if the share server has the given nfs config The None and the default_nfs_config should be considered as the same configuration. """ nfs_config = share_server.get('backend_details', {}).get('nfs_config') share_server_nfs = jsonutils.loads(nfs_config) if nfs_config else None if share_server_nfs == expected_nfs_config: return True elif (share_server_nfs is None and expected_nfs_config == self._default_nfs_config): return True elif (expected_nfs_config is None and share_server_nfs == self._default_nfs_config): return True return False def choose_share_server_compatible_with_share(self, context, share_servers, share, snapshot=None, share_group=None, encryption_key_ref=None): """Method that allows driver to choose share server for provided share. If compatible share-server is not found, method should return None. :param context: Current context :param share_servers: list with share-server models :param share: share model :param snapshot: snapshot model :param share_group: ShareGroup model with shares :param encryption_key_ref: Encryption key reference :returns: share-server or None """ if not share_servers: # No share server to reuse return None nfs_config = None extra_specs = share_types.get_extra_specs_from_share(share) if self.is_nfs_config_supported: nfs_config = self._get_nfs_config_provisioning_options(extra_specs) provisioning_options = self._get_provisioning_options(extra_specs) # Get FPolicy extra specs to avoid incompatible share servers fpolicy_ext_to_include = provisioning_options.get( 'fpolicy_extensions_to_include') fpolicy_ext_to_exclude = provisioning_options.get( 'fpolicy_extensions_to_exclude') fpolicy_file_operations = provisioning_options.get( 'fpolicy_file_operations') # Avoid the reuse of 'dp_protection' vservers: for share_server in share_servers: if self._check_reuse_share_server( share_server, nfs_config, share=share, share_group=share_group, fpolicy_ext_include=fpolicy_ext_to_include, fpolicy_ext_exclude=fpolicy_ext_to_exclude, fpolicy_file_operations=fpolicy_file_operations, encryption_key_ref=encryption_key_ref): return share_server # There is no compatible share server to be reused return None @na_utils.trace def _check_reuse_share_server(self, share_server, nfs_config, share=None, share_group=None, fpolicy_ext_include=None, fpolicy_ext_exclude=None, fpolicy_file_operations=None, encryption_key_ref=None): """Check whether the share_server can be reused or not.""" LOG.debug('Checking if the encryption key ref passed is already ' 'configured for the existing share_servers') if (encryption_key_ref and encryption_key_ref != share_server['encryption_key_ref']): msg = _('The available share server %(server_id)s is already' 'configured with a different encryption-key-ref', {'server_id': share_server['id']}) LOG.warning(msg) return False if (share_group and share_group.get('share_server_id') != share_server['id']): return False backend_name = share_utils.extract_host(share_server['host'], level='backend_name') try: vserver_name, client = self._get_vserver(share_server, backend_name=backend_name) except (exception.InvalidInput, exception.VserverNotSpecified, exception.VserverNotFound) as error: LOG.warning("Could not determine vserver for reuse of " "share server. Share server: %(ss)s - Error: %(err)s", {'ss': share_server, 'err': error}) return False vserver_info = client.get_vserver_info(vserver_name) if (vserver_info.get('operational_state') != 'running' or vserver_info.get('state') != 'running' or vserver_info.get('subtype') != 'default'): return False if share: share_pool = share_utils.extract_host( share['host'], level='pool') if self._is_flexgroup_pool(share_pool): share_pool_list = self._get_flexgroup_aggregate_list( share_pool) else: share_pool_list = [share_pool] aggr_list = client.list_vserver_aggregates() if not set(share_pool_list).issubset(set(aggr_list)): return False if self.is_nfs_config_supported: # NOTE(felipe_rodrigues): Do not check that the share nfs_config # matches with the group nfs_config, because the API guarantees # that the share type is an element of the group types. return self._is_share_server_compatible(share_server, nfs_config) if fpolicy_ext_include or fpolicy_ext_exclude: fpolicies = client.get_fpolicy_policies_status() if len(fpolicies) >= self.FPOLICY_MAX_VSERVER_POLICIES: # This share server already reached it maximum number of # policies, we need to check if we can reuse one, otherwise, # it is not suitable for this share. reusable_scope = self._find_reusable_fpolicy_scope( share, client, fpolicy_extensions_to_include=fpolicy_ext_include, fpolicy_extensions_to_exclude=fpolicy_ext_exclude, fpolicy_file_operations=fpolicy_file_operations) if not reusable_scope: return False return True @na_utils.trace def choose_share_server_compatible_with_share_group( self, context, share_servers, share_group_ref, share_group_snapshot=None): """Choose the server compatible with group. If the NFS configuration is supported, it will check that the group types agree for the NFS extra-specs values. """ if not share_servers: # No share server to reuse return None nfs_config = None if self.is_nfs_config_supported: nfs_config = self._get_nfs_config_share_group(share_group_ref) # NOTE(dviroel): FPolicy extra-specs won't be conflicting, since # multiple policies can be created. The maximum number of policies or # the reusability of existing ones, can only be analyzed at share # instance creation. for share_server in share_servers: if self._check_reuse_share_server(share_server, nfs_config): return share_server return None @na_utils.trace def _get_nfs_config_share_group(self, share_group_ref): """Get the NFS config of the share group. In case the group types do not agree for the NFS config, it throws an exception. """ nfs_config = None first = True for st in share_group_ref.get('share_types', []): extra_specs = share_types.get_share_type_extra_specs( st['share_type_id']) if first: self._check_nfs_config_extra_specs_validity(extra_specs) nfs_config = self._get_nfs_config_provisioning_options( extra_specs) first = False continue type_nfs_config = self._get_nfs_config_provisioning_options( extra_specs) if nfs_config != type_nfs_config: msg = _("The specified share_types cannot have " "conflicting values for the NFS configuration " "extra-specs.") raise exception.InvalidInput(reason=msg) return nfs_config @na_utils.trace def manage_existing(self, share, driver_options, share_server=None): # In case NFS config is supported, the share's nfs_config must be the # same as the server if share_server and self.is_nfs_config_supported: extra_specs = share_types.get_extra_specs_from_share(share) nfs_config = self._get_nfs_config_provisioning_options(extra_specs) if not self._is_share_server_compatible(share_server, nfs_config): args = {'server_id': share_server['id']} msg = _('Invalid NFS configuration for the server ' '%(server_id)s . The extra-specs must match the ' 'values of NFS of the server.') raise exception.NetAppException(msg % args) return (super(NetAppCmodeMultiSVMFileStorageLibrary, self). manage_existing(share, driver_options, share_server=share_server)) @na_utils.trace def _check_compatibility_using_svm_dr( self, src_client, dest_client, shares_request_spec, pools): """Send a request to pause a migration. :param src_client: source cluster client. :param dest_client: destination cluster client. :param shares_request_spec: shares specifications. :param pools: pools to be used during the migration. :returns server migration mechanism name and compatibility result example: (svm_dr, True). """ method = SERVER_MIGRATE_SVM_DR if (not src_client.is_svm_dr_supported() or not dest_client.is_svm_dr_supported()): msg = _("Cannot perform server migration because at leat one of " "the backends doesn't support SVM DR.") LOG.error(msg) return method, False # Check that server does not have any FlexGroup volume. if src_client.is_flexgroup_supported(): dm_session = data_motion.DataMotionSession() for req_spec in shares_request_spec.get('shares_req_spec', []): share_instance = req_spec.get('share_instance_properties', {}) host = share_instance.get('host') if self.is_flexgroup_destination_host(host, dm_session): msg = _("Cannot perform server migration since a " "FlexGroup was encountered in share server to be " "migrated.") LOG.error(msg) return method, False # Check capacity. server_total_size = (shares_request_spec.get('shares_size', 0) + shares_request_spec.get('snapshots_size', 0)) # NOTE(dviroel): If the backend has a 'max_over_subscription_ratio' # configured and greater than 1, we'll consider thin provisioning # enable for all shares. thin_provisioning = self.configuration.max_over_subscription_ratio > 1 if self.configuration.netapp_server_migration_check_capacity is True: if not self._check_capacity_compatibility(pools, thin_provisioning, server_total_size): msg = _("Cannot perform server migration because destination " "host doesn't have enough free space.") LOG.error(msg) return method, False return method, True @na_utils.trace def _get_job_uuid(self, job): """Get the uuid of a job.""" job = job.get("job", {}) return job.get("uuid") @na_utils.trace def _wait_for_operation_status( self, operation_id, func_get_operation, desired_status='success', timeout=None): """Waits until a given operation reachs the desired status. :param operation_id: ID of the operation to be searched. :param func_get_operation: Function to be used to get the operation details. :param desired_status: Operation expected status. :param timeout: How long (in seconds) should the driver wait for the status to be reached. """ if not timeout: timeout = ( self.configuration.netapp_server_migration_state_change_timeout ) interval = 10 retries = int(timeout / interval) or 1 @utils.retry(exception.ShareBackendException, interval=interval, retries=retries, backoff_rate=1) def wait_for_status(): # Get the job based on its id. operation = func_get_operation(operation_id) status = operation.get("status") or operation.get("state") if status != desired_status: msg = _( "Operation %(operation_id)s didn't reach status " "%(desired_status)s. Current status is %(status)s.") % { 'operation_id': operation_id, 'desired_status': desired_status, 'status': status } LOG.debug(msg) # Failed, no need to retry. if status == 'error': msg = _('Operation %(operation_id)s is in error status.' 'Reason: %(message)s') raise exception.NetAppException( msg % {'operation_id': operation_id, 'message': operation.get('message')}) # Didn't fail, so we can retry. raise exception.ShareBackendException(msg) elif status == desired_status: msg = _( 'Operation %(operation_id)s reached status %(status)s.') LOG.debug( msg, {'operation_id': operation_id, 'status': status}) return try: wait_for_status() except exception.NetAppException: raise except exception.ShareBackendException: msg_args = {'operation_id': operation_id, 'status': desired_status} msg = _('Timed out while waiting for operation %(operation_id)s ' 'to reach status %(status)s') % msg_args raise exception.NetAppException(msg) @na_utils.trace def _check_compatibility_for_svm_migrate( self, source_cluster_name, source_share_server_name, source_share_server, dest_aggregates, dest_client): """Checks if the migration can be performed using SVM Migrate. 1. Send the request to the backed to check if the migration is possible 2. Wait until the job finishes checking the migration status """ # Reuse network information from the source share server in the SVM # Migrate if the there was no share network changes. network_info = { 'network_allocations': source_share_server['network_allocations'], 'neutron_subnet_id': source_share_server['share_network_subnets'][0].get( 'neutron_subnet_id') } # Check the LIF creation on destination cluster when # 'netapp_restrict_lif_creation_per_ha_pair' option is set # to true. if self.configuration.netapp_restrict_lif_creation_per_ha_pair: self._check_data_lif_count_limit_reached_for_ha_pair(dest_client) # 2. Create new ipspace, port and broadcast domain. node_name = self._client.list_cluster_nodes()[0] port = self._get_node_data_port(node_name) vlan = network_info['network_allocations'][0]['segmentation_id'] destination_ipspace = self._client.get_ipspace_name_for_vlan_port( node_name, port, vlan) or self._create_ipspace( network_info, client=dest_client) self._create_port_and_broadcast_domain( destination_ipspace, network_info) def _cleanup_ipspace(ipspace): if not dest_client.delete_ipspace(ipspace): LOG.info( 'Did not delete ipspace used to check the compatibility ' 'for SVM Migrate. It is possible that it was reused and ' 'there are other entities consuming it.') if vlan: port = None for node in dest_client.list_cluster_nodes(): port = port or self._get_node_data_port(node) dest_client.delete_vlan(node, port, vlan) # 1. Sends the request to the backend. try: job = dest_client.svm_migration_start( source_cluster_name, source_share_server_name, dest_aggregates, dest_ipspace=destination_ipspace, check_only=True) except Exception: LOG.error('Failed to check compatibility for migration.') _cleanup_ipspace(destination_ipspace) raise job_id = self._get_job_uuid(job) try: # 2. Wait until the job to check the migration status concludes. self._wait_for_operation_status( job_id, dest_client.get_migration_check_job_state) _cleanup_ipspace(destination_ipspace) return True except exception.NetAppException: # Performed the check with the given parameters and the backend # returned an error, so the migration is not compatible _cleanup_ipspace(destination_ipspace) return False @na_utils.trace def _check_for_migration_support( self, src_client, dest_client, source_share_server, shares_request_spec, src_cluster_name, pools): """Checks if the migration is supported and chooses the way to do it In terms of performance, SVM Migrate is more adequate and it should be prioritised over a SVM DR migration. If both source and destination clusters do not support SVM Migrate, then SVM DR is the option to be used. 1. Checks if both source and destination clients support SVM Migrate. 2. Requests the migration. """ # 1. Checks if both source and destination clients support SVM Migrate. if (dest_client.is_svm_migrate_supported() and src_client.is_svm_migrate_supported()): source_share_server_name = ( source_share_server["backend_details"]["vserver_name"]) # Check if the migration is supported. try: result = self._check_compatibility_for_svm_migrate( src_cluster_name, source_share_server_name, source_share_server, self._find_matching_aggregates(), dest_client) return SERVER_MIGRATE_SVM_MIGRATE, result except Exception: LOG.error('Failed to check the compatibility for the share ' 'server migration using SVM Migrate.') return SERVER_MIGRATE_SVM_MIGRATE, False # SVM Migrate is not supported, try to check the compatibility using # SVM DR. return self._check_compatibility_using_svm_dr( src_client, dest_client, shares_request_spec, pools) @na_utils.trace def share_server_migration_check_compatibility( self, context, source_share_server, dest_host, old_share_network, new_share_network, shares_request_spec): not_compatible = { 'compatible': False, 'writable': None, 'nondisruptive': None, 'preserve_snapshots': None, 'migration_cancel': None, 'migration_get_progress': None, 'share_network_id': None, } # We need cluster creds, of course if not self._have_cluster_creds: msg = _("Cluster credentials have not been configured with this " "share driver. Cannot perform server migration operation.") LOG.error(msg) return not_compatible # Vserver will spread across aggregates in this implementation if share_utils.extract_host(dest_host, level='pool') is not None: msg = _("Cannot perform server migration to a specific pool. " "Please choose a destination host 'host@backend' as " "destination.") LOG.error(msg) return not_compatible src_backend_name = share_utils.extract_host( source_share_server['host'], level='backend_name') src_vserver, src_client = self._get_vserver( source_share_server, backend_name=src_backend_name) dest_backend_name = share_utils.extract_host(dest_host, level='backend_name') # Block migration within the same backend. if src_backend_name == dest_backend_name: msg = _("Cannot perform server migration within the same backend. " "Please choose a destination host different from the " "source.") LOG.error(msg) return not_compatible src_cluster_name = src_client.get_cluster_name() # NOTE(dviroel): This call is supposed to made in the destination host dest_cluster_name = self._client.get_cluster_name() # Must be in different clusters too, SVM-DR restriction if src_cluster_name == dest_cluster_name: msg = _("Cannot perform server migration within the same cluster. " "Please choose a destination host that's in a different " "cluster.") LOG.error(msg) return not_compatible # Blocking multiple subnets new_subnets = new_share_network.get('share_network_subnets', []) old_subnets = old_share_network.get('share_network_subnets', []) if (len(new_subnets) != 1) or (len(old_subnets) != 1): msg = _("Cannot perform server migration for share network" "with multiple subnets.") LOG.error(msg) return not_compatible pools = self._get_pools() # NOTE(dviroel): These clients can only be used for non-tunneling # requests. dst_client = data_motion.get_client_for_backend(dest_backend_name, vserver_name=None) migration_method, compatibility = self._check_for_migration_support( src_client, dst_client, source_share_server, shares_request_spec, src_cluster_name, pools) if not compatibility: return not_compatible # Blocking different security services for now if old_share_network['id'] != new_share_network['id']: new_sec_services = new_share_network.get('security_services', []) old_sec_services = old_share_network.get('security_services', []) if new_sec_services or old_sec_services: new_sec_serv_ids = [ss['id'] for ss in new_sec_services] old_sec_serv_ids = [ss['id'] for ss in old_sec_services] if not set(new_sec_serv_ids) == set(old_sec_serv_ids): msg = _("Cannot perform server migration for different " "security services. Please choose a suitable " "share network that matches the source security " "service.") LOG.error(msg) return not_compatible # Check 'netapp_flexvol_encryption' and 'revert_to_snapshot_support' specs_to_validate = ('netapp_flexvol_encryption', 'revert_to_snapshot_support') for req_spec in shares_request_spec.get('shares_req_spec', []): extra_specs = req_spec.get('share_type', {}).get('extra_specs', {}) for spec in specs_to_validate: if extra_specs.get(spec) and not pools[0][spec]: msg = _("Cannot perform server migration since the " "destination host doesn't support the required " "extra-spec %s.") % spec LOG.error(msg) return not_compatible # TODO(dviroel): disk_type extra-spec nondisruptive = (migration_method == SERVER_MIGRATE_SVM_MIGRATE) compatibility = { 'compatible': True, 'writable': True, 'nondisruptive': nondisruptive, 'preserve_snapshots': True, 'share_network_id': new_share_network['id'], 'migration_cancel': True, 'migration_get_progress': False, } return compatibility @na_utils.trace def _migration_start_using_svm_dr( self, source_share_server, dest_share_server): """Start share server migration using SVM DR. 1. Create vserver peering between source and destination 2. Create SnapMirror """ src_backend_name = share_utils.extract_host( source_share_server['host'], level='backend_name') src_vserver, src_client = self._get_vserver( share_server=source_share_server, backend_name=src_backend_name) src_cluster = src_client.get_cluster_name() dest_backend_name = share_utils.extract_host( dest_share_server['host'], level='backend_name') dest_vserver, dest_client = self._get_vserver( share_server=dest_share_server, backend_name=dest_backend_name) dest_cluster = dest_client.get_cluster_name() # 1. Check and create vserver peer if needed if not self._get_vserver_peers(dest_vserver, src_vserver): # Request vserver peer creation from destination to source # NOTE(dviroel): vserver peering rollback is handled by # '_delete_vserver' function. dest_client.create_vserver_peer( dest_vserver, src_vserver, peer_cluster_name=src_cluster) # Accepts the vserver peering using active replica host's # client (inter-cluster only) if dest_cluster != src_cluster: src_client.accept_vserver_peer(src_vserver, dest_vserver) # 2. Create SnapMirror dm_session = data_motion.DataMotionSession() try: dm_session.create_snapmirror_svm(source_share_server, dest_share_server) except Exception: # NOTE(dviroel): vserver peer delete will be handled on vserver # teardown dm_session.cancel_snapmirror_svm(source_share_server, dest_share_server) msg_args = { 'src': source_share_server['id'], 'dest': dest_share_server['id'], } msg = _('Could not initialize SnapMirror between %(src)s and ' '%(dest)s vservers.') % msg_args raise exception.NetAppException(message=msg) return None @na_utils.trace def _migration_start_using_svm_migrate( self, context, source_share_server, dest_share_server, src_client, dest_client): """Start share server migration using SVM Migrate. 1. Check if share network reusage is supported 2. Create a new ipspace, port and broadcast domain to the dest server 3. Send the request start the share server migration 4. Read the job id and get the id of the migration 5. Set the migration uuid in the backend details """ # 1. Check if share network reusage is supported # NOTE(carloss): if share network was not changed, SVM migrate can # reuse the network allocation from the source share server, so as # Manila haven't made new allocations, we can just get allocation data # from the source share server. if not dest_share_server['network_allocations']: share_server_network_info = source_share_server else: share_server_network_info = dest_share_server # Reuse network information from the source share server in the SVM # Migrate if the there was no share network changes. network_info = { 'network_allocations': share_server_network_info['network_allocations'], 'neutron_subnet_id': share_server_network_info['share_network_subnets'][0].get( 'neutron_subnet_id') } # 2. Create new ipspace, port and broadcast domain. node_name = self._client.list_cluster_nodes()[0] port = self._get_node_data_port(node_name) vlan = network_info['network_allocations'][0]['segmentation_id'] destination_ipspace = self._client.get_ipspace_name_for_vlan_port( node_name, port, vlan) or self._create_ipspace( network_info, client=dest_client) self._create_port_and_broadcast_domain( destination_ipspace, network_info) # Prepare the migration request. src_cluster_name = src_client.get_cluster_name() source_share_server_name = ( source_share_server["backend_details"]["vserver_name"]) # 3. Send the migration request to ONTAP. try: result = dest_client.svm_migration_start( src_cluster_name, source_share_server_name, self._find_matching_aggregates(), dest_ipspace=destination_ipspace) # 4. Read the job id and get the id of the migration. result_job = result.get("job", {}) job_details = dest_client.get_job(result_job.get("uuid")) job_description = job_details.get('description') migration_uuid = job_description.split('/')[-1] except Exception: # As it failed, we must remove the ipspace, ports and broadcast # domain. dest_client.delete_ipspace(destination_ipspace) msg = _("Unable to start the migration for share server %s." % source_share_server['id']) raise exception.NetAppException(msg) # 5. Returns migration data to be saved as backend details. server_info = { "backend_details": { na_utils.MIGRATION_OPERATION_ID_KEY: migration_uuid } } return server_info @na_utils.trace def share_server_migration_start( self, context, source_share_server, dest_share_server, share_intances, snapshot_instances): """Start share server migration. This method will choose the best migration strategy to perform the migration, based on the storage functionalities support. """ src_backend_name = share_utils.extract_host( source_share_server['host'], level='backend_name') dest_backend_name = share_utils.extract_host( dest_share_server['host'], level='backend_name') dest_client = data_motion.get_client_for_backend( dest_backend_name, vserver_name=None) __, src_client = self._get_vserver( share_server=source_share_server, backend_name=src_backend_name) use_svm_migrate = ( src_client.is_svm_migrate_supported() and dest_client.is_svm_migrate_supported()) if use_svm_migrate: result = self._migration_start_using_svm_migrate( context, source_share_server, dest_share_server, src_client, dest_client) else: result = self._migration_start_using_svm_dr( source_share_server, dest_share_server) msg_args = { 'src': source_share_server['id'], 'dest': dest_share_server['id'], 'migration_method': 'SVM Migrate' if use_svm_migrate else 'SVM DR' } msg = _('Starting share server migration from %(src)s to %(dest)s ' 'using %(migration_method)s as migration method.') LOG.info(msg, msg_args) return result def _get_snapmirror_svm(self, source_share_server, dest_share_server): dm_session = data_motion.DataMotionSession() try: snapmirrors = dm_session.get_snapmirrors_svm( source_share_server, dest_share_server) except netapp_api.NaApiError: msg_args = { 'src': source_share_server['id'], 'dest': dest_share_server['id'] } msg = _("Could not retrieve snapmirrors between source " "%(src)s and destination %(dest)s vServers.") % msg_args LOG.exception(msg) raise exception.NetAppException(message=msg) return snapmirrors @na_utils.trace def _share_server_migration_continue_svm_dr( self, source_share_server, dest_share_server): """Continues a share server migration using SVM DR.""" snapmirrors = self._get_snapmirror_svm(source_share_server, dest_share_server) if not snapmirrors: msg_args = { 'src': source_share_server['id'], 'dest': dest_share_server['id'] } msg = _("No snapmirror relationship was found between source " "%(src)s and destination %(dest)s vServers.") % msg_args LOG.exception(msg) raise exception.NetAppException(message=msg) snapmirror = snapmirrors[0] in_progress_status = ['preparing', 'transferring', 'finalizing'] mirror_state = snapmirror.get('mirror-state') status = snapmirror.get('relationship-status') if mirror_state != 'snapmirrored' and status in in_progress_status: LOG.debug("Data transfer still in progress.") return False elif mirror_state == 'snapmirrored' and status == 'idle': LOG.info("Source and destination vServers are now snapmirrored.") return True msg = _("Snapmirror is not ready yet. The current mirror state is " "'%(mirror_state)s' and relationship status is '%(status)s'.") msg_args = { 'mirror_state': mirror_state, 'status': status, } LOG.debug(msg, msg_args) return False @na_utils.trace def _share_server_migration_continue_svm_migrate(self, dest_share_server, migration_id): """Continues the migration for a share server. :param dest_share_server: reference for the destination share server. :param migration_id: ID of the migration. """ dest_client = data_motion.get_client_for_host( dest_share_server['host']) try: result = dest_client.svm_migration_get(migration_id) except netapp_api.NaApiError as e: msg = (_('Failed to continue the migration for share server ' '%(server_id)s. Reason: %(reason)s' ) % {'server_id': dest_share_server['id'], 'reason': e.message} ) raise exception.NetAppException(message=msg) return ( result.get("state") == na_utils.MIGRATION_STATE_READY_FOR_CUTOVER) @na_utils.trace def share_server_migration_continue(self, context, source_share_server, dest_share_server, share_instances, snapshot_instances): """Continues the migration of a share server.""" # If the migration operation was started using SVM migrate, it # returned a migration ID to get information about the job afterwards. migration_id = self._get_share_server_migration_id( dest_share_server) # Checks the progress for a SVM migrate migration. if migration_id: return self._share_server_migration_continue_svm_migrate( dest_share_server, migration_id) # Checks the progress of a SVM DR Migration. return self._share_server_migration_continue_svm_dr( source_share_server, dest_share_server) def _setup_networking_for_destination_vserver( self, vserver_client, vserver_name, new_net_allocations): ipspace_name = vserver_client.get_vserver_ipspace(vserver_name) # NOTE(dviroel): Security service and NFS configuration should be # handled by SVM DR, so no changes will be made here. vlan = new_net_allocations[0]['segmentation_id'] @utils.synchronized('netapp-VLAN-%s' % vlan, external=True) def setup_network_for_destination_vserver(): self._setup_network_for_vserver( vserver_name, vserver_client, new_net_allocations, ipspace_name, enable_nfs=False, security_services=None) setup_network_for_destination_vserver() @na_utils.trace def _share_server_migration_complete_svm_dr( self, source_share_server, dest_share_server, src_vserver, src_client, share_instances, new_net_allocations): """Perform steps to complete the SVM DR migration. 1. Do a last SnapMirror update. 2. Quiesce, abort and then break the relationship. 3. Stop the source vserver 4. Configure network interfaces in the destination vserver 5. Start the destinarion vserver 6. Delete and release the snapmirror """ dest_backend_name = share_utils.extract_host( dest_share_server['host'], level='backend_name') dest_vserver, dest_client = self._get_vserver( share_server=dest_share_server, backend_name=dest_backend_name) dm_session = data_motion.DataMotionSession() try: # 1. Start an update to try to get a last minute transfer before we # quiesce and break dm_session.update_snapmirror_svm(source_share_server, dest_share_server) except exception.StorageCommunicationException: # Ignore any errors since the current source may be unreachable pass try: # 2. Attempt to quiesce, abort and then break SnapMirror dm_session.quiesce_and_break_snapmirror_svm(source_share_server, dest_share_server) # NOTE(dviroel): Lets wait until the destination vserver be # promoted to 'default' and state 'running', before starting # shutting down the source dm_session.wait_for_vserver_state( dest_vserver, dest_client, subtype='default', state='running', operational_state='stopped', timeout=(self.configuration. netapp_server_migration_state_change_timeout)) # 3. Stop source vserver src_client.stop_vserver(src_vserver) # 4. Setup network configuration self._setup_networking_for_destination_vserver( dest_client, dest_vserver, new_net_allocations) # 5. Start the destination. dest_client.start_vserver(dest_vserver) except Exception: # Try to recover source vserver try: src_client.start_vserver(src_vserver) except Exception: LOG.warning("Unable to recover source share server after a " "migration failure.") # Destroy any snapmirror and make destination vserver to have its # subtype set to 'default' dm_session.cancel_snapmirror_svm(source_share_server, dest_share_server) # Rollback resources transferred to the destination for instance in share_instances: self._delete_share(instance, dest_vserver, dest_client, remove_export=False) msg_args = { 'src': source_share_server['id'], 'dest': dest_share_server['id'], } msg = _('Could not complete the migration between %(src)s and ' '%(dest)s vservers.') % msg_args raise exception.NetAppException(message=msg) # 6. Delete/release snapmirror dm_session.delete_snapmirror_svm(source_share_server, dest_share_server) @na_utils.trace def _share_server_migration_complete_svm_migrate( self, migration_id, dest_share_server): """Completes share server migration using SVM Migrate. 1. Call functions to conclude the migration for SVM Migrate 2. Waits until the job gets a success status 3. Wait until the migration cancellation reach the desired status """ dest_client = data_motion.get_client_for_host( dest_share_server['host']) try: # Triggers the migration completion. job = dest_client.svm_migrate_complete(migration_id) job_id = self._get_job_uuid(job) # Wait until the job is successful. self._wait_for_operation_status( job_id, dest_client.get_job) # Wait until the migration is entirely finished. self._wait_for_operation_status( migration_id, dest_client.svm_migration_get, desired_status=na_utils.MIGRATION_STATE_MIGRATE_COMPLETE) except exception.NetAppException: msg = _( "Failed to complete the migration for " "share server %s.") % dest_share_server['id'] raise exception.NetAppException(msg) @na_utils.trace def share_server_migration_complete(self, context, source_share_server, dest_share_server, share_instances, snapshot_instances, new_network_alloc): """Completes share server migration. 1. Call functions to conclude the migration for SVM DR or SVM Migrate 2. Build the list of export_locations for each share 3. Release all resources from the source share server """ src_backend_name = share_utils.extract_host( source_share_server['host'], level='backend_name') src_vserver, src_client = self._get_vserver( share_server=source_share_server, backend_name=src_backend_name) src_ipspace_name = src_client.get_vserver_ipspace(src_vserver) dest_backend_name = share_utils.extract_host( dest_share_server['host'], level='backend_name') migration_id = self._get_share_server_migration_id(dest_share_server) share_server_to_get_vserver_name_from = ( source_share_server if migration_id else dest_share_server) dest_vserver, dest_client = self._get_vserver( share_server=share_server_to_get_vserver_name_from, backend_name=dest_backend_name) server_backend_details = {} # 1. Call functions to conclude the migration for SVM DR or SVM # Migrate. if migration_id: self._share_server_migration_complete_svm_migrate( migration_id, dest_share_server) server_backend_details = source_share_server['backend_details'] # If there are new network allocations to be added, do so, and add # them to the share server's backend details. if dest_share_server['network_allocations']: # Teardown the current network allocations current_network_interfaces = ( dest_client.list_network_interfaces()) # Need a cluster client to be able to remove the current # network interfaces dest_cluster_client = data_motion.get_client_for_host( dest_share_server['host']) for interface_name in current_network_interfaces: dest_cluster_client.delete_network_interface( src_vserver, interface_name) self._setup_networking_for_destination_vserver( dest_client, src_vserver, new_network_alloc) server_backend_details.pop('ports') ports = {} for allocation in dest_share_server['network_allocations']: ports[allocation['id']] = allocation['ip_address'] server_backend_details['ports'] = jsonutils.dumps(ports) # Delete ipspace on source cluster when possible src_cluster_client = data_motion.get_client_for_host( source_share_server['host']) def _delete_ipspace_and_vlan(): src_ipspace = src_cluster_client.get_ipspaces( src_ipspace_name)[0] ports = src_ipspace['ports'] broadcast_domains = src_ipspace['broadcast-domains'] ipspace_deleted = src_cluster_client.delete_ipspace( src_ipspace_name) if not ipspace_deleted: ports = src_cluster_client.get_degraded_ports( broadcast_domains, src_ipspace_name) self._delete_port_vlans(src_cluster_client, ports) try: _delete_ipspace_and_vlan() except Exception as e: msg = _('Could not delete ipspace %s on SVM migration ' 'source. Reason: %s') % (src_ipspace_name, e) LOG.warning(msg) else: self._share_server_migration_complete_svm_dr( source_share_server, dest_share_server, src_vserver, src_client, share_instances, new_network_alloc) # 2. Build a dict with shares/snapshot location updates. # NOTE(dviroel): For SVM DR, the share names aren't modified, only the # export_locations are updated due to network changes. share_updates = {} for instance in share_instances: # Get the volume to find out the associated aggregate # Update post-migration info that can't be replicated try: share_name = self._get_backend_share_name(instance['id']) volume = dest_client.get_volume(share_name) dest_aggregate = volume.get('aggregate') if not migration_id: # Update share attributes according with share extra specs. self._update_share_attributes_after_server_migration( instance, src_client, dest_aggregate, dest_client) except Exception: msg_args = { 'src': source_share_server['id'], 'dest': dest_share_server['id'], } msg = _('Could not complete the migration between %(src)s and ' '%(dest)s vservers. One of the shares was not found ' 'in the destination vserver.') % msg_args raise exception.NetAppException(message=msg) new_share_data = { 'pool_name': volume.get('aggregate') } share_host = instance['host'] # If using SVM migrate, must already ensure the export policies # using the new host information. if migration_id: old_aggregate = share_host.split('#')[1] share_host = share_host.replace( old_aggregate, dest_aggregate) export_locations = self._create_export( instance, dest_share_server, dest_vserver, dest_client, clear_current_export_policy=False, ensure_share_already_exists=True, share_host=share_host) new_share_data.update({'export_locations': export_locations}) share_updates.update({instance['id']: new_share_data}) # NOTE(dviroel): Nothing to update in snapshot instances since the # provider location didn't change. # NOTE(carloss): as SVM DR works like a replica, we must delete the # source shares after the migration. In case of SVM Migrate, the shares # were moved to the destination, so there's no need to remove them. # Then, we need to delete the source server if not migration_id: # 3. Release source share resources. for instance in share_instances: self._delete_share(instance, src_vserver, src_client, remove_export=True) # NOTE(dviroel): source share server deletion must be triggered by # the manager after finishing the migration LOG.info('Share server migration completed.') return { 'share_updates': share_updates, 'server_backend_details': server_backend_details } @na_utils.trace def _get_share_server_migration_id(self, dest_share_server): return dest_share_server['backend_details'].get( na_utils.MIGRATION_OPERATION_ID_KEY) @na_utils.trace def _migration_cancel_using_svm_dr( self, source_share_server, dest_share_server, shares): """Cancel a share server migration that is using SVM DR.""" dm_session = data_motion.DataMotionSession() dest_backend_name = share_utils.extract_host(dest_share_server['host'], level='backend_name') dest_vserver, dest_client = self._get_vserver( share_server=dest_share_server, backend_name=dest_backend_name) try: snapmirrors = self._get_snapmirror_svm(source_share_server, dest_share_server) if snapmirrors: dm_session.cancel_snapmirror_svm(source_share_server, dest_share_server) # Do a simple volume cleanup in the destination vserver for instance in shares: self._delete_share(instance, dest_vserver, dest_client, remove_export=False) except Exception: msg_args = { 'src': source_share_server['id'], 'dest': dest_share_server['id'], } msg = _('Unable to cancel SnapMirror relationship between %(src)s ' 'and %(dest)s vservers.') % msg_args raise exception.NetAppException(message=msg) @na_utils.trace def _migration_cancel_using_svm_migrate(self, migration_id, dest_share_server): """Cancel a share server migration that is using SVM migrate. 1. Gets information about the migration 2. Pauses the migration, as it can't be cancelled without pausing 3. Ask to ONTAP to actually cancel the migration """ # 1. Gets information about the migration. dest_client = data_motion.get_client_for_host( dest_share_server['host']) migration_information = dest_client.svm_migration_get(migration_id) # Gets the ipspace that was created so we can delete it if it's not # being used anymore. dest_ipspace_name = ( migration_information["destination"]["ipspace"]["name"]) # 2. Pauses the migration. try: # Request the migration to be paused and wait until the job is # successful. job = dest_client.svm_migrate_pause(migration_id) job_id = self._get_job_uuid(job) self._wait_for_operation_status(job_id, dest_client.get_job) # Wait until the migration get actually paused. self._wait_for_operation_status( migration_id, dest_client.svm_migration_get, desired_status=na_utils.MIGRATION_STATE_MIGRATE_PAUSED) except exception.NetAppException: msg = _("Failed to pause the share server migration.") raise exception.NetAppException(message=msg) try: # 3. Ask to ONTAP to actually cancel the migration. job = dest_client.svm_migrate_cancel(migration_id) job_id = self._get_job_uuid(job) self._wait_for_operation_status( job_id, dest_client.get_job) except exception.NetAppException: msg = _("Failed to cancel the share server migration.") raise exception.NetAppException(message=msg) # TODO(chuan) Wait until the ipspace is not being used by vserver # anymore, which is not deleted immediately after migration # cancelled. dest_client.delete_ipspace(dest_ipspace_name) network_info = dest_share_server.get('network_allocations') vlan = network_info[0]['segmentation_id'] if network_info else None if vlan: port = None for node in dest_client.list_cluster_nodes(): port = port or self._get_node_data_port(node) dest_client.delete_vlan(node, port, vlan) return @na_utils.trace def share_server_migration_cancel(self, context, source_share_server, dest_share_server, shares, snapshots): """Send the request to cancel the SVM migration.""" migration_id = self._get_share_server_migration_id(dest_share_server) if migration_id: return self._migration_cancel_using_svm_migrate( migration_id, dest_share_server) self._migration_cancel_using_svm_dr( source_share_server, dest_share_server, shares) LOG.info('Share server migration was cancelled.') @na_utils.trace def share_server_migration_get_progress(self, context, src_share_server, dest_share_server, shares, snapshots): """Compare source SVM total shares size with the destination SVM. 1. Gets the total size of the source SVM shares 2. Gets the total size of the destination SVM shares 3. Return the progress up to 99%, because 100% migration will be returned when SVM migration phase 1 is finished. """ # Get the total size of the source share server shares. src_shares_total_size = 0 for instance in shares: src_shares_total_size = ( src_shares_total_size + instance.get('size', 0)) if src_shares_total_size > 0: # Destination share server has the same name as the source share # server. dest_share_server_name = self._get_vserver_name( dest_share_server['source_share_server_id']) # Get current volume total size in the destination SVM. dest_shares_total_size = self._client.get_svm_volumes_total_size( dest_share_server_name) # The 100% progress will be return only when the SVM migration # phase 1 is completed. 99% is an arbitrary number. total_progress = ( (99 * dest_shares_total_size) / src_shares_total_size) return {'total_progress': round(total_progress)} return {'total_progress': 0} def _update_share_attributes_after_server_migration( self, src_share_instance, src_client, dest_aggregate, dest_client): """Updates destination share instance with share type extra specs.""" extra_specs = share_types.get_extra_specs_from_share( src_share_instance) provisioning_options = self._get_provisioning_options(extra_specs) volume_name = self._get_backend_share_name(src_share_instance['id']) # NOTE(dviroel): Need to retrieve current autosize attributes since # they aren't being updated by SVM DR. autosize_attrs = src_client.get_volume_autosize_attributes(volume_name) # NOTE(dviroel): In order to modify maximum and minimum size, we must # convert from Kbytes to bytes. for key in ('minimum-size', 'maximum-size'): autosize_attrs[key] = int(autosize_attrs[key]) * units.Ki provisioning_options['autosize_attributes'] = autosize_attrs # NOTE(dviroel): SVM DR already creates a copy of the snapshot policies # at the destination, using a different name. If we update the snapshot # policy in these volumes, might end up with an error if the policy # still does not exist in the destination cluster. Administrators will # have the opportunity to add the snapshot policy after a successful # migration. provisioning_options.pop('snapshot_policy', None) # Modify volume to match extra specs dest_client.modify_volume(dest_aggregate, volume_name, **provisioning_options) def validate_provisioning_options_for_share(self, provisioning_options, extra_specs=None, qos_specs=None): if provisioning_options.get('adaptive_qos_policy_group') is not None: msg = _("The extra spec 'adaptive_qos_policy_group' is not " "supported by backends configured with " "'driver_handles_share_server' == True mode.") raise exception.NetAppException(msg) if (self.configuration.netapp_enable_logical_space_reporting and not provisioning_options.get('thin_provisioned')): msg = _("Logical space reporting is only available if thin " "provisioning is enabled. Set 'thin_provisioning=True' " "in your provisioning options") raise exception.NetAppException(msg) (super(NetAppCmodeMultiSVMFileStorageLibrary, self) .validate_provisioning_options_for_share(provisioning_options, extra_specs=extra_specs, qos_specs=qos_specs)) def _get_different_keys_for_equal_ss_type(self, current_sec_service, new_sec_service): different_keys = [] valid_keys = ['dns_ip', 'server', 'domain', 'user', 'password', 'ou', 'default_ad_site'] for key, value in current_sec_service.items(): if (current_sec_service[key] != new_sec_service[key] and key in valid_keys): different_keys.append(key) return different_keys def _is_security_service_valid(self, security_service): mandatory_params = { 'ldap': ['user', 'password'], 'active_directory': ['dns_ip', 'domain', 'user', 'password'], 'kerberos': ['dns_ip', 'domain', 'user', 'password', 'server'], } ss_type = security_service['type'] if ss_type == 'ldap': ad_domain = security_service.get('domain') ldap_servers = security_service.get('server') if not bool(ad_domain) ^ bool(ldap_servers): msg = _("LDAP security service must have either 'server' or " "'domain' parameters. Use 'server' for Linux/Unix " "LDAP servers or 'domain' for Active Directory LDAP " "server.") LOG.error(msg) return False if ss_type == 'active_directory': server = security_service.get('server') default_ad_site = security_service.get('default_ad_site') if server and default_ad_site: msg = _("Active directory security service must not have " "both 'server' and 'default_ad_site' parameters.") LOG.error(msg) return False if not all([security_service[key] is not None for key in mandatory_params[ss_type]]): msg = _("The security service %s does not have all the " "parameters needed to used by the share driver." ) % security_service['id'] LOG.error(msg) return False return True def update_share_server_security_service(self, context, share_server, network_info, new_security_service, current_security_service=None): current_type = ( current_security_service['type'].lower() if current_security_service else '') new_type = new_security_service['type'].lower() vserver_name, vserver_client = self._get_vserver( share_server=share_server) # Check if this update is supported by our driver if not self.check_update_share_server_security_service( context, share_server, network_info, new_security_service, current_security_service=current_security_service): msg = _("The requested security service update is not supported " "by the NetApp driver.") LOG.error(msg) raise exception.NetAppException(msg) if current_security_service is None: self._client.setup_security_services( [new_security_service], vserver_client, vserver_name, self.configuration.netapp_cifs_aes_encryption) LOG.info("A new security service configuration was added to share " "server '%(share_server_id)s'", {'share_server_id': share_server['id']}) return different_keys = self._get_different_keys_for_equal_ss_type( current_security_service, new_security_service) if not different_keys: msg = _("The former and the latter security services are " "equal. Nothing to do.") LOG.debug(msg) return if 'dns_ip' in different_keys: dns_ips = set() domains = set() # Read all dns-ips and domains from other security services for sec_svc in network_info[0]['security_services']: if sec_svc['type'] == current_type: # skip the one that we are replacing continue if sec_svc.get('dns_ip') is not None: for dns_ip in sec_svc['dns_ip'].split(','): dns_ips.add(dns_ip.strip()) if sec_svc.get('domain') is not None: domains.add(sec_svc['domain']) # Merge with the new dns configuration if new_security_service.get('dns_ip') is not None: for dns_ip in new_security_service['dns_ip'].split(','): dns_ips.add(dns_ip.strip()) if new_security_service.get('domain') is not None: domains.add(new_security_service['domain']) # Update vserver DNS configuration vserver_client.update_dns_configuration(dns_ips, domains) if new_type == 'kerberos': if 'server' in different_keys: # NOTE(dviroel): Only IPs will be updated here, new principals # won't be configured here. It is expected that only the IP was # changed, but the KDC remains the same. LOG.debug('Updating kerberos realm on NetApp backend.') vserver_client.update_kerberos_realm(new_security_service) elif new_type == 'active_directory': vserver_client.modify_active_directory_security_service( vserver_name, different_keys, new_security_service, current_security_service) else: vserver_client.modify_ldap(new_security_service, current_security_service) LOG.info("Security service configuration was updated for share server " "'%(share_server_id)s'", {'share_server_id': share_server['id']}) def check_update_share_server_security_service( self, context, share_server, network_info, new_security_service, current_security_service=None): current_type = ( current_security_service['type'].lower() if current_security_service else '') if not self._is_security_service_valid(new_security_service): self.message_api.create( context, message_field.Action.ADD_UPDATE_SECURITY_SERVICE, new_security_service['project_id'], resource_type=message_field.Resource.SECURITY_SERVICE, resource_id=new_security_service['id'], detail=(message_field.Detail .UNSUPPORTED_ADD_UDPATE_SECURITY_SERVICE)) return False if current_security_service: if current_type != 'ldap': # NOTE(dviroel): We don't support domain/realm updates for # Kerberos security service, because it might require a new SPN # to be created and to destroy the old one, thus disrupting all # shares hosted by this share server. Same issue can happen # with AD domain modifications. if (current_security_service['domain'].lower() != new_security_service['domain'].lower()): msg = _("Currently the driver does not support updates " "in the security service 'domain'.") LOG.info(msg) return False return True def check_update_share_server_network_allocations( self, context, share_server, current_network_allocations, new_share_network_subnet, security_services, share_instances, share_instances_rules): """Check if new network configuration is valid.""" LOG.debug('Checking if network configuration is valid to update share' 'server %s.', share_server['id']) # Get segmentation_id from current allocations to check if added # subnet is in the same network segment as the others. ref_subnet = current_network_allocations['subnets'][0] ref_subnet_allocation = ref_subnet['network_allocations'][0] seg_id = ref_subnet_allocation['segmentation_id'] new_subnet_seg_id = new_share_network_subnet['segmentation_id'] network_info = [dict(segmentation_id=seg_id), dict(segmentation_id=new_subnet_seg_id)] is_valid_configuration = True try: self._validate_network_type([new_share_network_subnet]) self._validate_share_network_subnets(network_info) except exception.NetworkBadConfigurationException as e: LOG.error('Invalid share server network allocation. %s', e) is_valid_configuration = False return is_valid_configuration def _build_model_update(self, current_network_allocations, new_network_allocations, export_locations=None): """Updates server details for a new set of network allocations""" ports = {} for subnet in current_network_allocations['subnets']: for alloc in subnet['network_allocations']: ports[alloc['id']] = alloc['ip_address'] for alloc in new_network_allocations['network_allocations']: ports[alloc['id']] = alloc['ip_address'] model_update = {'server_details': {'ports': jsonutils.dumps(ports)}} if export_locations: model_update.update({'share_updates': export_locations}) return model_update def update_share_server_network_allocations( self, context, share_server, current_network_allocations, new_network_allocations, security_services, shares, snapshots): """Update network allocations for the share server.""" vserver_name = self._get_vserver_name(share_server['id']) vserver_client = self._get_api_client(vserver=vserver_name) ipspace_name = self._client.get_vserver_ipspace(vserver_name) network_info = [new_network_allocations] LOG.debug('Adding new subnet allocations to share server %s', share_server['id']) try: self._setup_network_for_vserver( vserver_name, vserver_client, network_info, ipspace_name, enable_nfs=False, security_services=None, nfs_config=None) except Exception as e: with excutils.save_and_reraise_exception(): LOG.error("Failed to update vserver network configuration.") updates = self._build_model_update( current_network_allocations, new_network_allocations, export_locations=None) e.detail_data = updates updated_export_locations = {} for share in shares: if share["replica_state"] in (None, constants.REPLICA_STATE_ACTIVE): host = share['host'] export_locations = self._create_export( share, share_server, vserver_name, vserver_client, clear_current_export_policy=False, ensure_share_already_exists=True, share_host=host) updated_export_locations.update( {share['id']: export_locations}) updates = self._build_model_update( current_network_allocations, new_network_allocations, updated_export_locations) return updates def _get_backup_vserver(self, backup, share_server=None): backend_name = self._get_backend(backup) backend_config = data_motion.get_backend_configuration(backend_name) des_cluster_api_client = self._get_api_client_for_backend( backend_name) aggr_list = des_cluster_api_client.list_non_root_aggregates() aggr_pattern = (backend_config. netapp_aggregate_name_search_pattern) if aggr_pattern: aggr_matching_list = [ element for element in aggr_list if re.search(aggr_pattern, element) ] aggr_list = aggr_matching_list share_server_id = share_server['id'] des_vserver = f"{Backup.DES_VSERVER_PREFIX.value}_{share_server_id}" LOG.debug("Creating vserver %s:", des_vserver) try: des_cluster_api_client.create_vserver( des_vserver, None, None, aggr_list, 'Default', client_cmode_rest.DEFAULT_SECURITY_CERT_EXPIRE_DAYS, ) except netapp_api.NaApiError as e: with excutils.save_and_reraise_exception() as exc_context: if 'already used' in e.message: exc_context.reraise = False return des_vserver def _delete_backup_vserver(self, backup, des_vserver): """Delete the vserver """ backend_name = self._get_backend(backup) des_vserver_client = self._get_api_client_for_backend( backend_name, vserver=des_vserver) try: des_cluster_api_client = self._get_api_client_for_backend( backend_name) des_cluster_api_client.delete_vserver(des_vserver, des_vserver_client) except exception.NetAppException as e: with excutils.save_and_reraise_exception() as exc_context: if 'has shares' in e.msg: exc_context.reraise = False def _check_data_lif_count_limit_reached_for_ha_pair(self, client): ha_pair = {node: client.get_storage_failover_partner(node) for node in client.list_cluster_nodes()} # TODO(agireesh): Get the data LIFs details for node using REST call # The 'get_data_lif_details_for_nodes' method is missing for REST # workflow because there is no REST available to retrieve the data # LIF's capacity and details for the nodes. Filed the RFE on ONTAP # to implement the corresponding REST, and once it is available, the # REST workflow will be added as part of the fix (bug #2100673). lif_info_for_node = client.get_data_lif_details_for_nodes() lif_info_dict = {info['node']: info for info in lif_info_for_node} for node, ha_partner in ha_pair.items(): if node in lif_info_dict: data_lif_count = int(lif_info_dict[node].get( 'count-for-node', 0) ) lif_limit_for_node = int(lif_info_dict[node].get( 'limit-for-node') ) migratable_data_lifs = ( client.get_migratable_data_lif_for_node(ha_partner) ) expected_lif_count_after_failover = ( data_lif_count + len(migratable_data_lifs) ) if expected_lif_count_after_failover > lif_limit_for_node: msg_args = { 'data_lif': expected_lif_count_after_failover, 'lif_limit': lif_limit_for_node, } msg = _("If a partner node fails, the number of data LIFs" " {%(data_lif)s} will exceed the node's maximum " "data LIF limit {%(lif_limit)s}") % msg_args LOG.error(msg) raise exception.NetAppException(msg) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/netapp/dataontap/cluster_mode/lib_single_svm.py0000664000175000017500000001641600000000000030404 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Clinton Knight. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ NetApp Data ONTAP cDOT single-SVM storage driver library. This library extends the abstract base library and completes the single-SVM functionality needed by the cDOT single-SVM Manila driver. This library variant uses a single Data ONTAP storage virtual machine (i.e. 'vserver') as defined in manila.conf to provision shares. """ import re from oslo_log import log from manila import exception from manila.i18n import _ from manila.share.drivers.netapp.dataontap.cluster_mode import data_motion from manila.share.drivers.netapp.dataontap.cluster_mode import lib_base from manila.share.drivers.netapp import utils as na_utils LOG = log.getLogger(__name__) class NetAppCmodeSingleSVMFileStorageLibrary( lib_base.NetAppCmodeFileStorageLibrary): def __init__(self, driver_name, **kwargs): super(NetAppCmodeSingleSVMFileStorageLibrary, self).__init__( driver_name, **kwargs) self._vserver = self.configuration.netapp_vserver @na_utils.trace def check_for_setup_error(self): # Ensure vserver is specified in configuration. if not self._vserver: msg = _('Vserver must be specified in the configuration ' 'when the driver is not managing share servers.') raise exception.InvalidInput(reason=msg) # Ensure vserver exists. if not self._client.vserver_exists(self._vserver): raise exception.VserverNotFound(vserver=self._vserver) # If we have vserver credentials, ensure the vserver they connect # to matches the vserver specified in the configuration. if not self._have_cluster_creds: if self._vserver not in self._client.list_vservers(): msg = _('Vserver specified in the configuration does not ' 'match supplied credentials.') raise exception.InvalidInput(reason=msg) # Ensure FlexGroup support vserver_client = self._get_api_client(vserver=self._vserver) aggr_list = vserver_client.list_vserver_aggregates() self._initialize_flexgroup_pools(set(aggr_list)) # Ensure one or more aggregates are available to the vserver. if (self.is_flexvol_pool_configured() and not self._find_matching_aggregates(aggregate_names=aggr_list)): msg = _('No aggregates are available to Vserver %s for ' 'provisioning shares. Ensure that one or more aggregates ' 'are assigned to the Vserver and that the configuration ' 'option netapp_aggregate_name_search_pattern is set ' 'correctly.') % self._vserver raise exception.NetAppException(msg) msg = ('Using Vserver %(vserver)s for backend %(backend)s with ' '%(creds)s credentials.') msg_args = {'vserver': self._vserver, 'backend': self._backend_name} msg_args['creds'] = ('cluster' if self._have_cluster_creds else 'Vserver') LOG.info(msg, msg_args) (super(NetAppCmodeSingleSVMFileStorageLibrary, self). check_for_setup_error()) @na_utils.trace def _get_vserver(self, share_server=None): if share_server is not None: msg = _('Share server must not be passed to the driver ' 'when the driver is not managing share servers.') raise exception.InvalidParameterValue(err=msg) if not self._vserver: msg = _('Vserver not specified in configuration.') raise exception.InvalidInput(reason=msg) if not self._client.vserver_exists(self._vserver): raise exception.VserverNotFound(vserver=self._vserver) vserver_client = self._get_api_client(self._vserver) return self._vserver, vserver_client def _get_ems_pool_info(self): return { 'pools': { 'vserver': self._vserver, 'aggregates': self._find_matching_aggregates(), 'flexgroup_aggregates': self._flexgroup_pools, }, } @na_utils.trace def _handle_housekeeping_tasks(self): """Handle various cleanup activities.""" vserver_client = self._get_api_client(vserver=self._vserver) vserver_client.prune_deleted_nfs_export_policies() vserver_client.prune_deleted_snapshots() if self._have_cluster_creds: # Harvest soft-deleted QoS policy groups vserver_client.remove_unused_qos_policy_groups() (super(NetAppCmodeSingleSVMFileStorageLibrary, self). _handle_housekeeping_tasks()) @na_utils.trace def _find_matching_aggregates(self, aggregate_names=None): """Find all aggregates match pattern if FlexVol pool is configured.""" if not self.is_flexvol_pool_configured(): return [] if not aggregate_names: vserver_client = self._get_api_client(vserver=self._vserver) aggregate_names = vserver_client.list_vserver_aggregates() root_aggregate_names = [] if self._have_cluster_creds: root_aggregate_names = self._client.list_root_aggregates() pattern = self.configuration.netapp_aggregate_name_search_pattern return [aggr_name for aggr_name in aggregate_names if re.match(pattern, aggr_name) and aggr_name not in root_aggregate_names] @na_utils.trace def get_network_allocations_number(self): """Get number of network interfaces to be created.""" return 0 @na_utils.trace def get_admin_network_allocations_number(self): """Get number of network allocations for creating admin LIFs.""" return 0 @na_utils.trace def get_configured_ip_versions(self): ipv4 = False ipv6 = False vserver_client = self._get_api_client(vserver=self._vserver) interfaces = vserver_client.get_network_interfaces() for interface in interfaces: address = interface['address'] if ':' in address: ipv6 = True else: ipv4 = True versions = [] if ipv4: versions.append(4) if ipv6: versions.append(6) return versions def _get_backup_vserver(self, backup, share_server=None): backend_name = self._get_backend(backup) backend_config = data_motion.get_backend_configuration(backend_name) if share_server is not None: msg = _('Share server must not be passed to the driver ' 'when the driver is not managing share servers.') raise exception.InvalidParameterValue(err=msg) return backend_config.netapp_vserver ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/netapp/dataontap/cluster_mode/performance.py0000664000175000017500000004210700000000000027705 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Clinton Knight # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Performance metrics functions and cache for NetApp systems. """ import copy from oslo_log import log as logging from manila import exception from manila.i18n import _ from manila.share.drivers.netapp.dataontap.client import api as netapp_api LOG = logging.getLogger(__name__) DEFAULT_UTILIZATION = 50 class PerformanceLibrary(object): def __init__(self, zapi_client): self.zapi_client = zapi_client self.performance_counters = {} self.pool_utilization = {} self._init_counter_info() def _init_counter_info(self): """Set a few counter names based on Data ONTAP version.""" self.system_object_name = None self.avg_processor_busy_base_counter_name = None try: if self.zapi_client.features.SYSTEM_CONSTITUENT_METRICS: self.system_object_name = 'system:constituent' self.avg_processor_busy_base_counter_name = ( self._get_base_counter_name('system:constituent', 'avg_processor_busy')) elif self.zapi_client.features.SYSTEM_METRICS: self.system_object_name = 'system' self.avg_processor_busy_base_counter_name = ( self._get_base_counter_name('system', 'avg_processor_busy')) except netapp_api.NaApiError: if self.zapi_client.features.SYSTEM_CONSTITUENT_METRICS: self.avg_processor_busy_base_counter_name = 'cpu_elapsed_time' else: self.avg_processor_busy_base_counter_name = 'cpu_elapsed_time1' LOG.exception('Could not get performance base counter ' 'name. Performance-based scheduler ' 'functions may not be available.') def update_performance_cache(self, flexvol_pools, aggregate_pools): """Called periodically to update per-pool node utilization metrics.""" # Nothing to do on older systems if not (self.zapi_client.features.SYSTEM_METRICS or self.zapi_client.features.SYSTEM_CONSTITUENT_METRICS): return # Get aggregates and nodes for all known pools aggr_names = self._get_aggregates_for_pools(flexvol_pools, aggregate_pools) node_names, aggr_node_map = self._get_nodes_for_aggregates(aggr_names) # Update performance counter cache for each node node_utilization = {} for node_name in node_names: if node_name not in self.performance_counters: self.performance_counters[node_name] = [] # Get new performance counters and save only the last 10 counters = self._get_node_utilization_counters(node_name) if not counters: continue self.performance_counters[node_name].append(counters) self.performance_counters[node_name] = ( self.performance_counters[node_name][-10:]) # Update utilization for each node using newest & oldest sample counters = self.performance_counters[node_name] if len(counters) < 2: node_utilization[node_name] = DEFAULT_UTILIZATION else: node_utilization[node_name] = self._get_node_utilization( counters[0], counters[-1], node_name) # Update pool utilization map atomically pool_utilization = {} all_pools = copy.deepcopy(flexvol_pools) all_pools.update(aggregate_pools) for pool_name, pool_info in all_pools.items(): aggr_name = pool_info.get('netapp_aggregate', 'unknown') node_name = aggr_node_map.get(aggr_name) if node_name: pool_utilization[pool_name] = node_utilization.get( node_name, DEFAULT_UTILIZATION) else: pool_utilization[pool_name] = DEFAULT_UTILIZATION self.pool_utilization = pool_utilization def get_node_utilization_for_pool(self, pool_name): """Get the node utilization for the specified pool, if available.""" return self.pool_utilization.get(pool_name, DEFAULT_UTILIZATION) def update_for_failover(self, zapi_client, flexvol_pools, aggregate_pools): """Change API client after a whole-backend failover event.""" self.zapi_client = zapi_client self.update_performance_cache(flexvol_pools, aggregate_pools) def _get_aggregates_for_pools(self, flexvol_pools, aggregate_pools): """Get the set of aggregates that contain the specified pools.""" aggr_names = set() for pool_name, pool_info in aggregate_pools.items(): if pool_info.get('netapp_flexgroup', False): continue aggr_names.add(pool_info.get('netapp_aggregate')) for pool_name, pool_info in flexvol_pools.items(): if pool_info.get('netapp_flexgroup', False): continue aggr_names.add(pool_info.get('netapp_aggregate')) return list(aggr_names) def _get_nodes_for_aggregates(self, aggr_names): """Get the cluster nodes that own the specified aggregates.""" node_names = set() aggr_node_map = {} for aggr_name in aggr_names: node_name = self.zapi_client.get_node_for_aggregate(aggr_name) if node_name: node_names.add(node_name) aggr_node_map[aggr_name] = node_name return list(node_names), aggr_node_map def _get_node_utilization(self, counters_t1, counters_t2, node_name): """Get node utilization from two sets of performance counters.""" try: # Time spent in the single-threaded Kahuna domain kahuna_percent = self._get_kahuna_utilization(counters_t1, counters_t2) # If Kahuna is using >60% of the CPU, the controller is fully busy if kahuna_percent > 60: return 100.0 # Average CPU busyness across all processors avg_cpu_percent = 100.0 * self._get_average_cpu_utilization( counters_t1, counters_t2) # Total Consistency Point (CP) time total_cp_time_msec = self._get_total_consistency_point_time( counters_t1, counters_t2) # Time spent in CP Phase 2 (buffer flush) p2_flush_time_msec = self._get_consistency_point_p2_flush_time( counters_t1, counters_t2) # Wall-clock time between the two counter sets poll_time_msec = self._get_total_time(counters_t1, counters_t2, 'total_cp_msecs') # If two polls happened in quick succession, use CPU utilization if total_cp_time_msec == 0 or poll_time_msec == 0: return max(min(100.0, avg_cpu_percent), 0) # Adjusted Consistency Point time adjusted_cp_time_msec = self._get_adjusted_consistency_point_time( total_cp_time_msec, p2_flush_time_msec) adjusted_cp_percent = (100.0 * adjusted_cp_time_msec / poll_time_msec) # Utilization is the greater of CPU busyness & CP time node_utilization = max(avg_cpu_percent, adjusted_cp_percent) return max(min(100.0, node_utilization), 0) except Exception: LOG.exception('Could not calculate node utilization for ' 'node %s.', node_name) return DEFAULT_UTILIZATION def _get_kahuna_utilization(self, counters_t1, counters_t2): """Get time spent in the single-threaded Kahuna domain.""" # Note(cknight): Because Kahuna is single-threaded, running only on # one CPU at a time, we can safely sum the Kahuna CPU usage # percentages across all processors in a node. return sum(self._get_performance_counter_average_multi_instance( counters_t1, counters_t2, 'domain_busy:kahuna', 'processor_elapsed_time')) * 100.0 def _get_average_cpu_utilization(self, counters_t1, counters_t2): """Get average CPU busyness across all processors.""" return self._get_performance_counter_average( counters_t1, counters_t2, 'avg_processor_busy', self.avg_processor_busy_base_counter_name) def _get_total_consistency_point_time(self, counters_t1, counters_t2): """Get time spent in Consistency Points in msecs.""" return float(self._get_performance_counter_delta( counters_t1, counters_t2, 'total_cp_msecs')) def _get_consistency_point_p2_flush_time(self, counters_t1, counters_t2): """Get time spent in CP Phase 2 (buffer flush) in msecs.""" return float(self._get_performance_counter_delta( counters_t1, counters_t2, 'cp_phase_times:p2_flush')) def _get_total_time(self, counters_t1, counters_t2, counter_name): """Get wall clock time between two successive counters in msecs.""" timestamp_t1 = float(self._find_performance_counter_timestamp( counters_t1, counter_name)) timestamp_t2 = float(self._find_performance_counter_timestamp( counters_t2, counter_name)) return (timestamp_t2 - timestamp_t1) * 1000.0 def _get_adjusted_consistency_point_time(self, total_cp_time, p2_flush_time): """Get adjusted CP time by limiting CP phase 2 flush time to 20%.""" return (total_cp_time - p2_flush_time) * 1.20 def _get_performance_counter_delta(self, counters_t1, counters_t2, counter_name): """Calculate a delta value from two performance counters.""" counter_t1 = int( self._find_performance_counter_value(counters_t1, counter_name)) counter_t2 = int( self._find_performance_counter_value(counters_t2, counter_name)) return counter_t2 - counter_t1 def _get_performance_counter_average(self, counters_t1, counters_t2, counter_name, base_counter_name, instance_name=None): """Calculate an average value from two performance counters.""" counter_t1 = float(self._find_performance_counter_value( counters_t1, counter_name, instance_name)) counter_t2 = float(self._find_performance_counter_value( counters_t2, counter_name, instance_name)) base_counter_t1 = float(self._find_performance_counter_value( counters_t1, base_counter_name, instance_name)) base_counter_t2 = float(self._find_performance_counter_value( counters_t2, base_counter_name, instance_name)) return (counter_t2 - counter_t1) / (base_counter_t2 - base_counter_t1) def _get_performance_counter_average_multi_instance(self, counters_t1, counters_t2, counter_name, base_counter_name): """Calculate an average value from multiple counter instances.""" averages = [] instance_names = [] for counter in counters_t1: if counter_name in counter: instance_names.append(counter['instance-name']) for instance_name in instance_names: average = self._get_performance_counter_average( counters_t1, counters_t2, counter_name, base_counter_name, instance_name) averages.append(average) return averages def _find_performance_counter_value(self, counters, counter_name, instance_name=None): """Given a counter set, return the value of a named instance.""" for counter in counters: if counter_name in counter: if (instance_name is None or counter['instance-name'] == instance_name): return counter[counter_name] else: raise exception.NotFound(_('Counter %s not found') % counter_name) def _find_performance_counter_timestamp(self, counters, counter_name, instance_name=None): """Given a counter set, return the timestamp of a named instance.""" for counter in counters: if counter_name in counter: if (instance_name is None or counter['instance-name'] == instance_name): return counter['timestamp'] else: raise exception.NotFound(_('Counter %s not found') % counter_name) def _expand_performance_array(self, object_name, counter_name, counter): """Get array labels and expand counter data array.""" # Get array labels for counter value counter_info = self.zapi_client.get_performance_counter_info( object_name, counter_name) array_labels = [counter_name + ':' + label.lower() for label in counter_info['labels']] array_values = counter[counter_name].split(',') # Combine labels and values, and then mix into existing counter array_data = dict(zip(array_labels, array_values)) counter.update(array_data) def _get_base_counter_name(self, object_name, counter_name): """Get the name of the base counter for the specified counter.""" counter_info = self.zapi_client.get_performance_counter_info( object_name, counter_name) return counter_info['base-counter'] def _get_node_utilization_counters(self, node_name): """Get all performance counters for calculating node utilization.""" try: return (self._get_node_utilization_system_counters(node_name) + self._get_node_utilization_wafl_counters(node_name) + self._get_node_utilization_processor_counters(node_name)) except netapp_api.NaApiError: LOG.exception('Could not get utilization counters from node ' '%s', node_name) return None def _get_node_utilization_system_counters(self, node_name): """Get the system counters for calculating node utilization.""" system_instance_uuids = ( self.zapi_client.get_performance_instance_uuids( self.system_object_name, node_name)) system_counter_names = [ 'avg_processor_busy', self.avg_processor_busy_base_counter_name, ] if 'cpu_elapsed_time1' in system_counter_names: system_counter_names.append('cpu_elapsed_time') system_counters = self.zapi_client.get_performance_counters( self.system_object_name, system_instance_uuids, system_counter_names) return system_counters def _get_node_utilization_wafl_counters(self, node_name): """Get the WAFL counters for calculating node utilization.""" wafl_instance_uuids = self.zapi_client.get_performance_instance_uuids( 'wafl', node_name) wafl_counter_names = ['total_cp_msecs', 'cp_phase_times'] wafl_counters = self.zapi_client.get_performance_counters( 'wafl', wafl_instance_uuids, wafl_counter_names) # Expand array data so we can use wafl:cp_phase_times[P2_FLUSH] for counter in wafl_counters: if 'cp_phase_times' in counter: self._expand_performance_array( 'wafl', 'cp_phase_times', counter) return wafl_counters def _get_node_utilization_processor_counters(self, node_name): """Get the processor counters for calculating node utilization.""" processor_instance_uuids = ( self.zapi_client.get_performance_instance_uuids('processor', node_name)) processor_counter_names = ['domain_busy', 'processor_elapsed_time'] processor_counters = self.zapi_client.get_performance_counters( 'processor', processor_instance_uuids, processor_counter_names) # Expand array data so we can use processor:domain_busy[kahuna] for counter in processor_counters: if 'domain_busy' in counter: self._expand_performance_array( 'processor', 'domain_busy', counter) return processor_counters ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315601.9296715 manila-21.0.0/manila/share/drivers/netapp/dataontap/protocols/0000775000175000017500000000000000000000000024365 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/netapp/dataontap/protocols/__init__.py0000664000175000017500000000000000000000000026464 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/netapp/dataontap/protocols/base.py0000664000175000017500000000605700000000000025661 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Abstract base class for NetApp NAS protocol helper classes. """ import abc from manila.common import constants from manila import utils def access_rules_synchronized(f): """Decorator for synchronizing share access rule modification methods.""" def wrapped_func(self, *args, **kwargs): # The first argument is always a share, which has an ID key = "share-access-%s" % args[0]['id'] @utils.synchronized(key) def source_func(self, *args, **kwargs): return f(self, *args, **kwargs) return source_func(self, *args, **kwargs) return wrapped_func class NetAppBaseHelper(metaclass=abc.ABCMeta): """Interface for protocol-specific NAS drivers.""" def __init__(self): self._client = None def set_client(self, client): self._client = client def _is_readonly(self, access_level): """Returns whether an access rule specifies read-only access.""" return access_level == constants.ACCESS_LEVEL_RO @staticmethod def _get_share_export_location(share): """Returns the export location of the share. The share may contain only the list of export location, depending on the entity provided by the manager. """ export_location = share.get('export_location') if not export_location: export_location_list = share.get('export_locations') if (isinstance(export_location_list, list) and len(export_location_list) > 0): export_location = export_location_list[0]['path'] return export_location @abc.abstractmethod def create_share(self, share, share_name, clear_current_export_policy=True, ensure_share_already_exists=False, replica=False, is_flexgroup=False): """Creates NAS share.""" @abc.abstractmethod def delete_share(self, share, share_name): """Deletes NAS share.""" @abc.abstractmethod def update_access(self, share, share_name, rules): """Replaces the list of access rules known to the backend storage.""" @abc.abstractmethod def get_target(self, share): """Returns host where the share located.""" @abc.abstractmethod def get_share_name_for_share(self, share): """Returns the flexvol name that hosts a share.""" @abc.abstractmethod def cleanup_demoted_replica(self, share, share_name): """Do some cleanup regarding the former active replica""" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/netapp/dataontap/protocols/cifs_cmode.py0000664000175000017500000002013700000000000027035 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Clinton Knight. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ NetApp cDOT CIFS protocol helper class. """ import re from manila.common import constants from manila import exception from manila.i18n import _ from manila.share.drivers.netapp.dataontap.protocols import base from manila.share.drivers.netapp import utils as na_utils class NetAppCmodeCIFSHelper(base.NetAppBaseHelper): """NetApp cDOT CIFS protocol helper class.""" @na_utils.trace def create_share(self, share, share_name, clear_current_export_policy=True, ensure_share_already_exists=False, replica=False, is_flexgroup=False): """Creates CIFS share if does not exist on Data ONTAP Vserver. The new CIFS share has Everyone access, so it removes all access after creating. :param share: share entity. :param share_name: share name that must be the CIFS share name. :param clear_current_export_policy: ignored, NFS only. :param ensure_share_already_exists: ensures that CIFS share exists. :param replica: it is a replica volume (DP type). :param is_flexgroup: whether the share is a FlexGroup or not. """ cifs_exist = self._client.cifs_share_exists(share_name) export_path = self._client.get_volume_junction_path(share_name) if ensure_share_already_exists and not cifs_exist: msg = _("The expected CIFS share %(share_name)s was not found.") msg_args = {'share_name': share_name} raise exception.NetAppException(msg % msg_args) elif not cifs_exist: self._client.create_cifs_share(share_name, export_path) self._client.remove_cifs_share_access(share_name, 'Everyone') # Ensure 'ntfs' security style for RW volume. DP volumes cannot set it. if not replica: self._client.set_volume_security_style(share_name, security_style='ntfs') # Return a callback that may be used for generating export paths # for this share. return (lambda export_address, export_path=export_path: r'\\%s%s' % (export_address, export_path.replace('/', '\\'))) @na_utils.trace def delete_share(self, share, share_name): """Deletes CIFS share on Data ONTAP Vserver.""" host_ip, share_name = self._get_export_location(share) self._client.remove_cifs_share(share_name) @na_utils.trace @base.access_rules_synchronized def update_access(self, share, share_name, rules): """Replaces the list of access rules known to the backend storage.""" _, cifs_share_name = self._get_export_location(share) # Ensure rules are valid for rule in rules: self._validate_access_rule(rule) new_rules = {rule['access_to']: rule['access_level'] for rule in rules} # Get rules from share existing_rules = self._get_access_rules(share, cifs_share_name) # Update rules in an order that will prevent transient disruptions self._handle_added_rules(cifs_share_name, existing_rules, new_rules) self._handle_ro_to_rw_rules(cifs_share_name, existing_rules, new_rules) self._handle_rw_to_ro_rules(cifs_share_name, existing_rules, new_rules) self._handle_deleted_rules(cifs_share_name, existing_rules, new_rules) @na_utils.trace def _validate_access_rule(self, rule): """Checks whether access rule type and level are valid.""" if rule['access_type'] != 'user': msg = _("Clustered Data ONTAP supports only 'user' type for " "share access rules with CIFS protocol.") raise exception.InvalidShareAccess(reason=msg) if rule['access_level'] not in constants.ACCESS_LEVELS: raise exception.InvalidShareAccessLevel(level=rule['access_level']) @na_utils.trace def _handle_added_rules(self, share_name, existing_rules, new_rules): """Updates access rules added between two rule sets.""" added_rules = { user_or_group: permission for user_or_group, permission in new_rules.items() if user_or_group not in existing_rules } for user_or_group, permission in added_rules.items(): self._client.add_cifs_share_access( share_name, user_or_group, self._is_readonly(permission)) @na_utils.trace def _handle_ro_to_rw_rules(self, share_name, existing_rules, new_rules): """Updates access rules modified (RO-->RW) between two rule sets.""" modified_rules = { user_or_group: permission for user_or_group, permission in new_rules.items() if (user_or_group in existing_rules and permission == constants.ACCESS_LEVEL_RW and existing_rules[user_or_group] != 'full_control') } for user_or_group, permission in modified_rules.items(): self._client.modify_cifs_share_access( share_name, user_or_group, self._is_readonly(permission)) @na_utils.trace def _handle_rw_to_ro_rules(self, share_name, existing_rules, new_rules): """Returns access rules modified (RW-->RO) between two rule sets.""" modified_rules = { user_or_group: permission for user_or_group, permission in new_rules.items() if (user_or_group in existing_rules and permission == constants.ACCESS_LEVEL_RO and existing_rules[user_or_group] != 'read') } for user_or_group, permission in modified_rules.items(): self._client.modify_cifs_share_access( share_name, user_or_group, self._is_readonly(permission)) @na_utils.trace def _handle_deleted_rules(self, share_name, existing_rules, new_rules): """Returns access rules deleted between two rule sets.""" deleted_rules = { user_or_group: permission for user_or_group, permission in existing_rules.items() if user_or_group not in new_rules } for user_or_group, permission in deleted_rules.items(): self._client.remove_cifs_share_access(share_name, user_or_group) @na_utils.trace def _get_access_rules(self, share, share_name): """Returns the list of access rules known to the backend storage.""" return self._client.get_cifs_share_access(share_name) @na_utils.trace def get_target(self, share): """Returns OnTap target IP based on share export location.""" return self._get_export_location(share)[0] @na_utils.trace def get_share_name_for_share(self, share): """Returns the flexvol name that hosts a share.""" _, volume_junction_path = self._get_export_location(share) volume = self._client.get_volume_at_junction_path( f"/{volume_junction_path}") return volume.get('name') if volume else None @na_utils.trace def _get_export_location(self, share): """Returns host ip and share name for a given CIFS share.""" export_location = self._get_share_export_location(share) or '\\\\\\' regex = r'^(?:\\\\|//)(?P.*)(?:\\|/)(?P.*)$' match = re.match(regex, export_location) if match: return match.group('host_ip'), match.group('share_name') else: return '', '' @na_utils.trace def cleanup_demoted_replica(self, share, share_name): """Cleans up CIFS share for a demoted replica.""" self._client.remove_cifs_share(share_name) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/netapp/dataontap/protocols/nfs_cmode.py0000664000175000017500000002217200000000000026700 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Clinton Knight. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ NetApp cDOT NFS protocol helper class. """ import uuid from oslo_log import log from manila.common import constants from manila import exception from manila.share.drivers.netapp.dataontap.protocols import base from manila.share.drivers.netapp import utils as na_utils LOG = log.getLogger(__name__) class NetAppCmodeNFSHelper(base.NetAppBaseHelper): """NetApp cDOT NFS protocol helper class.""" @staticmethod def _escaped_address(address): if ':' in address: return '[%s]' % address else: return address @na_utils.trace def create_share(self, share, share_name, clear_current_export_policy=True, ensure_share_already_exists=False, replica=False, is_flexgroup=False): """Ensures the share export policy is set correctly. The export policy must have the same name as the share. If it matches, nothing is done. Otherwise, the possible scenarios: 1. policy as 'default': a new export policy is created. 2. policy as any name: renames the assigned policy to match the name. :param share: share entity. :param share_name: share name that must be the export policy name. :param clear_current_export_policy: set the policy to 'default' before the check. :param ensure_share_already_exists: ignored, CIFS only. :param replica: it is a replica volume (DP type). :param is_flexgroup: whether the share is a FlexGroup or not. """ if clear_current_export_policy: self._client.clear_nfs_export_policy_for_volume(share_name) self._ensure_export_policy(share, share_name) if is_flexgroup: volume_info = self._client.get_volume(share_name) export_path = volume_info['junction-path'] else: export_path = self._client.get_volume_junction_path(share_name) # Return a callback that may be used for generating export paths # for this share. return (lambda export_address, export_path=export_path: ':'.join([self._escaped_address(export_address), export_path])) @na_utils.trace @base.access_rules_synchronized def delete_share(self, share, share_name): """Deletes NFS share.""" LOG.debug('Deleting NFS export policy for share %s', share['id']) export_policy_name = self._get_export_policy_name(share) self._client.clear_nfs_export_policy_for_volume(share_name) self._client.soft_delete_nfs_export_policy(export_policy_name) @na_utils.trace @base.access_rules_synchronized def update_access(self, share, share_name, rules): """Replaces the list of access rules known to the backend storage.""" # Ensure rules are valid for rule in rules: self._validate_access_rule(rule) # Sort rules by ascending network size new_rules = {rule['access_to']: rule['access_level'] for rule in rules} addresses = sorted(new_rules, reverse=True) # Ensure current export policy has the name we expect self._ensure_export_policy(share, share_name) export_policy_name = self._get_export_policy_name(share) # Make temp policy names so this non-atomic workflow remains resilient # across process interruptions. temp_new_export_policy_name = self._get_temp_export_policy_name() temp_old_export_policy_name = self._get_temp_export_policy_name() # Create new export policy self._client.create_nfs_export_policy(temp_new_export_policy_name) # Get authentication methods, based on Vserver configuration auth_methods = self._get_auth_methods() # Add new rules to new policy for address in addresses: self._client.add_nfs_export_rule( temp_new_export_policy_name, address, self._is_readonly(new_rules[address]), auth_methods) # Rename policy currently in force LOG.info('Renaming NFS export policy for share %(share)s to ' '%(policy)s.', {'share': share_name, 'policy': temp_old_export_policy_name}) self._client.rename_nfs_export_policy(export_policy_name, temp_old_export_policy_name) # Switch share to the new policy LOG.info('Setting NFS export policy for share %(share)s to ' '%(policy)s.', {'share': share_name, 'policy': temp_new_export_policy_name}) self._client.set_nfs_export_policy_for_volume( share_name, temp_new_export_policy_name) # Delete old policy self._client.soft_delete_nfs_export_policy(temp_old_export_policy_name) # Rename new policy to its final name LOG.info('Renaming NFS export policy for share %(share)s to ' '%(policy)s.', {'share': share_name, 'policy': export_policy_name}) self._client.rename_nfs_export_policy(temp_new_export_policy_name, export_policy_name) @na_utils.trace def _validate_access_rule(self, rule): """Checks whether access rule type and level are valid.""" if rule['access_type'] != 'ip': msg = ("Clustered Data ONTAP supports only 'ip' type for share " "access rules with NFS protocol.") raise exception.InvalidShareAccess(reason=msg) if rule['access_level'] not in constants.ACCESS_LEVELS: raise exception.InvalidShareAccessLevel(level=rule['access_level']) @na_utils.trace def get_target(self, share): """Returns ID of target ONTAP device based on export location.""" return self._get_export_location(share)[0] @na_utils.trace def get_share_name_for_share(self, share): """Returns the flexvol name that hosts a share.""" _, volume_junction_path = self._get_export_location(share) volume = self._client.get_volume_at_junction_path(volume_junction_path) return volume.get('name') if volume else None @na_utils.trace def _get_export_location(self, share): """Returns IP address and export location of an NFS share.""" export_location = self._get_share_export_location(share) or ':' result = export_location.rsplit(':', 1) if len(result) != 2: return ['', ''] return result @staticmethod def _get_temp_export_policy_name(): """Builds export policy name for an NFS share.""" return 'temp_' + str(uuid.uuid1()).replace('-', '_') @staticmethod def _get_export_policy_name(share): """Builds export policy name for an NFS share.""" return 'policy_' + share['id'].replace('-', '_') @na_utils.trace def _ensure_export_policy(self, share, share_name): """Ensures a flexvol/share has an export policy. This method ensures a flexvol has an export policy with a name containing the share ID. For legacy reasons, this may not always be the case. """ expected_export_policy = self._get_export_policy_name(share) actual_export_policy = self._client.get_nfs_export_policy_for_volume( share_name) if actual_export_policy == expected_export_policy: return elif actual_export_policy == 'default': self._client.create_nfs_export_policy(expected_export_policy) self._client.set_nfs_export_policy_for_volume( share_name, expected_export_policy) else: self._client.rename_nfs_export_policy(actual_export_policy, expected_export_policy) @na_utils.trace def _get_auth_methods(self): """Returns authentication methods for export policy rules. This method returns the authentication methods to be configure in an export policy rule, based on security services configuration set in the current Vserver. If Kerberos is enabled in vServer LIFs, the auth methods will be configure to support 'krb5', 'krb5i' and 'krb5p'. The default authentication method is 'sys' (AUTH_SYS). """ kerberos_enabled = self._client.is_kerberos_enabled() return ['krb5', 'krb5i', 'krb5p'] if kerberos_enabled else ['sys'] @na_utils.trace def cleanup_demoted_replica(self, share, share_name): """Cleans up export NFS policy for a demoted replica.""" self.delete_share(share, share_name) return ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/netapp/options.py0000664000175000017500000005520700000000000022444 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Contains configuration options for NetApp drivers. Common place to hold configuration options for all NetApp drivers. Options need to be grouped into granular units to be able to be reused by different modules and classes. This does not restrict declaring options in individual modules. If options are not re usable then can be declared in individual modules. It is recommended to Keep options at a single place to ensure re usability and better management of configuration options. """ from oslo_config import cfg from oslo_config import types netapp_proxy_opts = [ cfg.StrOpt('netapp_storage_family', default='ontap_cluster', help=('The storage family type used on the storage system; ' 'valid values include ontap_cluster for using ' 'clustered Data ONTAP.')), ] netapp_connection_opts = [ cfg.HostAddressOpt('netapp_server_hostname', help='The hostname (or IP address) for the storage ' 'system.'), cfg.PortOpt('netapp_server_port', help=('The TCP port to use for communication with the storage ' 'system or proxy server. If not specified, Data ONTAP ' 'drivers will use 80 for HTTP and 443 for HTTPS.')), cfg.BoolOpt('netapp_use_legacy_client', default=True, help=('The ONTAP client used for retrieving and modifying ' 'data on the storage. The legacy client relies mostly ' 'on ZAPI calls, only using REST calls for SVM migrate ' 'feature. If set to False, the new REST client is used, ' 'which runs REST calls if supported, otherwise falls ' 'back to the equivalent ZAPI call.')), ] netapp_transport_opts = [ cfg.StrOpt('netapp_transport_type', default='http', help=('The transport protocol used when communicating with ' 'the storage system or proxy server. Valid values are ' 'http or https.')), cfg.StrOpt('netapp_ssl_cert_path', help=("The path to a CA_BUNDLE file or directory with " "certificates of trusted CA. If set to a directory, it " "must have been processed using the c_rehash utility " "supplied with OpenSSL. If not informed, it will use the " "Mozilla's carefully curated collection of Root " "Certificates for validating the trustworthiness of SSL " "certificates.")), ] netapp_basicauth_opts = [ cfg.StrOpt('netapp_login', help=('Administrative user account name used to access the ' 'storage system.')), cfg.StrOpt('netapp_password', help=('Password for the administrative user account ' 'specified in the netapp_login option.'), secret=True), ] netapp_certificateauth_opts = [ cfg.StrOpt('netapp_private_key_file', sample_default='/path/to/private_key.key,', regex=r'^/.+', help=('For self signed certificate: This file contains the' ' private key associated with the self-signed' ' certificate. It is a sensitive file that should be' ' kept secure and protected. The private key is used to' ' sign the certificate and establish the authenticity' ' and integrity of the certificate during the' ' authentication process.' ' For ca verified certificate: This file contains the' ' private key associated with the certificate. It is' ' generated when creating the certificate signing' ' request (CSR) and should be kept secure and protected.' ' The private key is used to sign the CSR and later used' ' to establish secure connections and authenticate the' ' entity.')), cfg.StrOpt('netapp_certificate_file', sample_default='/path/to/certificate.pem', regex=r'^/.+', help=('For self signed certificate: This file contains the' ' self-signed digital certificate itself. It includes' ' information about the entity such as the common name' ' (e.g., domain name), organization details, validity' ' period, and public key. The certificate file is' ' generated based on the private key and is used by' ' clients or systems to verify the entity identity' ' during the authentication process.' ' For ca verified certificate: This file contains the' ' digital certificate issued by the trusted third-party' ' certificate authority (CA). It includes information' ' about the entity identity, public key, and the CA that' ' issued the certificate. The certificate file is used' ' by clients or systems to verify the authenticity and' ' integrity of the entity during the authentication' ' process.')), cfg.StrOpt('netapp_ca_certificate_file', sample_default='/path/to/ca_certificate.crt', regex=r'^/.+', help=('This is applicable only for ca verified certificate.' ' This file contains the public key certificate of' ' the trusted third-party certificate authority (CA) that' ' issued the certificate. It is used by clients or ' ' systems to validate the authenticity of the certificate' ' presented by the entity. The CA certificate file' ' is typically pre-configured in the trust store' ' of clients or systems to establish trust in' ' certificates issued by that CA.')), cfg.BoolOpt('netapp_certificate_host_validation', default=False, help=('Enable certificate verification')), ] netapp_provisioning_opts = [ cfg.ListOpt('netapp_enabled_share_protocols', default=['nfs3', 'nfs4.0'], help='The NFS protocol versions that will be enabled. ' 'Supported values include nfs3, nfs4.0, nfs4.1. This ' 'option only applies when the option ' 'driver_handles_share_servers is set to True. '), cfg.StrOpt('netapp_volume_name_template', help='NetApp volume name template.', default='share_%(share_id)s'), cfg.StrOpt('netapp_vserver_name_template', default='os_%s', help='Name template to use for new Vserver. ' 'When using CIFS protocol make sure to not ' 'configure characters illegal in DNS hostnames.'), cfg.StrOpt('netapp_qos_policy_group_name_template', help='NetApp QoS policy group name template.', default='qos_share_%(share_id)s'), cfg.StrOpt('netapp_port_name_search_pattern', default='(.*)', help='Pattern for overriding the selection of network ports ' 'on which to create Vserver LIFs.'), cfg.StrOpt('netapp_lif_name_template', default='os_%(net_allocation_id)s', help='Logical interface (LIF) name template'), cfg.StrOpt('netapp_identity_auth_token_path', default='', help='Path to interact with auth tokens'), cfg.StrOpt('netapp_aggregate_name_search_pattern', default='(.*)', help='Pattern for searching available aggregates ' 'for provisioning.'), cfg.StrOpt('netapp_root_volume_aggregate', help='Name of aggregate to create Vserver root volumes on. ' 'This option only applies when the option ' 'driver_handles_share_servers is set to True.'), cfg.StrOpt('netapp_root_volume', default='root', help='Root volume name.'), cfg.IntOpt('netapp_delete_retention_hours', min=0, default=12, help='The number of hours that a deleted volume should be ' 'retained before the delete is completed.'), cfg.IntOpt('netapp_volume_snapshot_reserve_percent', min=0, max=90, default=5, help='The percentage of share space set aside as reserve for ' 'snapshot usage; valid values range from 0 to 90.'), cfg.StrOpt('netapp_reset_snapdir_visibility', choices=['visible', 'hidden', 'default'], default="default", help="This option forces all existing shares to have their " "snapshot directory visibility set to either 'visible' or " "'hidden' during driver startup. If set to 'default', " "nothing will be changed during startup. This will not " "affect new shares, which will have their snapshot " "directory always visible, unless toggled by the share " "type extra spec 'netapp:hide_snapdir'."), cfg.ListOpt('netapp_volume_snapshot_policy_exceptions', help='NetApp volume Snapshot policy names which will not ' 'be overriden by extra-specs.', default=['ec2_backups']), cfg.StrOpt('netapp_snapmirror_policy_name_svm_template', help='NetApp SnapMirror policy name template for Storage ' 'Virtual Machines (Vservers).', default='snapmirror_policy_%(share_server_id)s'), cfg.ListOpt('netapp_fpolicy_default_file_operations', help='NetApp FPolicy file operations to apply to a FPolicy ' 'event, when not provided by the user using ' '"netapp:fpolicy_file_operations" extra-spec.', default=['create', 'write', 'rename']), cfg.StrOpt('netapp_fpolicy_policy_name_template', help='NetApp FPolicy policy name template.', default='fpolicy_policy_%(share_id)s'), cfg.StrOpt('netapp_fpolicy_event_name_template', help='NetApp FPolicy policy name template.', default='fpolicy_event_%(protocol)s_%(share_id)s'), cfg.IntOpt('netapp_cached_aggregates_status_lifetime', min=0, default=60, help='The maximum time in seconds that the cached aggregates ' 'status will be considered valid. Trying to read the ' 'expired cache leads to refreshing it.'), cfg.BoolOpt('netapp_enable_flexgroup', default=False, help='Specify if the FlexGroup pool is enabled. When it is ' 'enabled, the driver will report a single pool ' 'representing all aggregates (ONTAP chooses on which the ' 'share will be allocated). If you want to Manila control ' 'the aggregate selection, you can configure its custom ' 'FlexGroup pools through netapp_flexgroup_pools option. ' 'The FlexGroup placement is done either by ONTAP or ' 'Manila, not both.'), cfg.MultiOpt('netapp_flexgroup_pools', item_type=types.Dict(value_type=types.String()), default={}, help="Multi opt of dict to represent the FlexGroup pools. " "A FlexGroup pool is configured with its name and its " "list of aggregates. Specify this option as many times " "as you have FlexGroup pools. Each entry takes the " "dict config form: " "netapp_flexgroup_pools = " ": .."), cfg.BoolOpt('netapp_flexgroup_pool_only', default=False, help='Specify if the FlexVol pools must not be reported when ' 'the netapp_enable_flexgroup is enabled.'), cfg.IntOpt('netapp_flexgroup_volume_online_timeout', min=60, default=360, # Default to six minutes help='Sets time in seconds to wait for a FlexGroup volume ' 'create to complete and go online.'), cfg.IntOpt('netapp_flexgroup_aggregate_not_busy_timeout', min=60, default=360, # Default to six minutes help='Provisioning FlexGroup share requires that all of its ' 'aggregates to not be busy deploying another volume. So, ' 'sets time in seconds to retry to create the FlexGroup ' 'share.'), cfg.IntOpt('netapp_delete_busy_flexgroup_snapshot_timeout', min=60, default=360, # Default to six minutes help='Sets time in seconds to wait for a FlexGroup snapshot ' 'to not be busy with clones after splitting them.'), cfg.IntOpt('netapp_rest_operation_timeout', min=60, default=60, # Default to one minutes help='Sets maximum amount of time in seconds to wait for a ' 'synchronous ONTAP REST API operation to be completed.'), cfg.IntOpt('netapp_security_cert_expire_days', min=1, max=3652, default=365, help='Defines the expiration time (in days) for the ' 'certificate created during the vserver creation. This ' 'option only applies when the option ' 'driver_handles_share_servers is set to True.'), cfg.BoolOpt('netapp_restrict_lif_creation_per_ha_pair', default=False, help='Prevent the creation of a share server if total number' ' of data LIFs on one node of HA pair, including those' ' that can be migrated in case of failure, exceeds the ' 'maximum data LIFs supported by the node. This option ' 'guarantees that, in the event of a node failure, the' ' partner node will be able to takeover all data LIFs.'), cfg.BoolOpt('netapp_cifs_aes_encryption', default=False, help='This option enable/disable AES encryption for the share' ' server based on the parameter value (True/False).'), cfg.BoolOpt('netapp_enable_logical_space_reporting', default=False, help='This option enables the logical space reporting on a ' 'newly created vserver and logical space accounting ' 'on newly created volumes on this vserver. ') ] netapp_cluster_opts = [ cfg.StrOpt('netapp_vserver', help=('This option specifies the Storage Virtual Machine ' '(i.e. Vserver) name on the storage cluster on which ' 'provisioning of file storage shares should occur. This ' 'option should only be specified when the option ' 'driver_handles_share_servers is set to False (i.e. the ' 'driver is managing shares on a single pre-configured ' 'Vserver).')), cfg.StrOpt('netapp_cluster_name', help=('This option specifies the Cluster Name on which ' 'provisioning of file storage shares should occur. ' 'If not set, the driver will try to discover by ' 'API call.')), ] netapp_support_opts = [ cfg.StrOpt('netapp_trace_flags', help=('Comma-separated list of options that control which ' 'trace info is written to the debug logs. Values ' 'include method and api. API logging can further be ' 'filtered with the ' '``netapp_api_trace_pattern option``.')), cfg.StrOpt('netapp_api_trace_pattern', default='(.*)', help=('A regular expression to limit the API tracing. This ' 'option is honored only if enabling ``api`` tracing ' 'with the ``netapp_trace_flags`` option. By default, ' 'all APIs will be traced.')), ] netapp_data_motion_opts = [ cfg.IntOpt('netapp_snapmirror_quiesce_timeout', min=0, default=3600, # One Hour help='The maximum time in seconds to wait for existing ' 'snapmirror transfers to complete before aborting when ' 'promoting a replica.'), cfg.IntOpt('netapp_snapmirror_release_timeout', min=0, default=3600, # One Hour help='The maximum time in seconds to wait for a snapmirror ' 'release when breaking snapmirror relationships.'), cfg.StrOpt('netapp_snapmirror_schedule', default='hourly', # One Hour help='An interval in either minutes or hours used to update ' 'the SnapMirror relationship. Few valid values are: ' '5min, 10min, 30min, hourly etc. The schedule at the ' '"destination" host will be the one that will be ' 'considered when creating a new replica, or promoting ' 'a replica'), cfg.IntOpt('netapp_volume_move_cutover_timeout', min=0, default=3600, # One Hour, help='The maximum time in seconds to wait for the completion ' 'of a volume move operation after the cutover ' 'was triggered.'), cfg.IntOpt('netapp_start_volume_move_timeout', min=0, default=3600, # One Hour, help='The maximum time in seconds to wait for the completion ' 'of a volume clone split operation in order to start a ' 'volume move.'), cfg.IntOpt('netapp_migration_cancel_timeout', min=0, default=3600, # One Hour, help='The maximum time in seconds that migration cancel ' 'waits for all migration operations be completely ' 'aborted.'), cfg.IntOpt('netapp_server_migration_state_change_timeout', min=0, default=3600, # One hour, help='The maximum time in seconds that a share server ' 'migration waits for a vserver to change its internal ' 'states.'), cfg.BoolOpt('netapp_server_migration_check_capacity', default=True, help='Specify if the capacity check must be made by the ' 'driver while performing a share server migration. ' 'If enabled, the driver will validate if the destination ' 'backend can hold all shares and snapshots capacities ' 'from the source share server.'), cfg.IntOpt('netapp_server_migration_state_change_timeout', min=0, default=3600, # One hour, help='The maximum time in seconds that a share server ' 'migration waits for a vserver to change its internal ' 'states.'), cfg.IntOpt('netapp_mount_replica_timeout', min=0, default=3600, # One Hour help='The maximum time in seconds to wait for mounting ' 'a replica.'), ] netapp_backup_opts = [ cfg.ListOpt('netapp_enabled_backup_types', default=[], help='Specify compatible backup_types for backend to provision' ' backup share for SnapVault relationship. Multiple ' 'backup_types can be provided. If multiple backup types ' 'are enabled, create separate config sections for each ' 'backup type specifying the "netapp_backup_vserver", ' '"netapp_backup_backend_section_name", ' '"netapp_backup_volume", and ' '"netapp_snapmirror_job_timeout" as appropriate.' ' Example- netapp_enabled_backup_types = eng_backup,' ' finance_backup'), cfg.StrOpt('netapp_backup_backend_section_name', help='Backend (ONTAP cluster) name where backup volume will be ' 'provisioned. This is one of the backend which is enabled ' 'in manila.conf file.'), cfg.StrOpt('netapp_backup_vserver', default='', help='vserver name of backend that is use for backup the share.' ' When user provide vserver value then backup volume will ' ' be created under this vserver '), cfg.StrOpt('netapp_backup_volume', default='', help='Specify backup share name in case user wanted to backup ' 'the share. Some case user has dedicated volume for backup' ' in this case use can provide dedicated volume. ' 'backup_share_server must be specified if backup_share is' ' provided'), cfg.IntOpt('netapp_snapmirror_job_timeout', min=0, default=1800, # 30 minutes help='The maximum time in seconds to wait for a snapmirror ' 'related operation to backup to complete.'), ] CONF = cfg.CONF CONF.register_opts(netapp_proxy_opts) CONF.register_opts(netapp_connection_opts) CONF.register_opts(netapp_transport_opts) CONF.register_opts(netapp_basicauth_opts) CONF.register_opts(netapp_certificateauth_opts) CONF.register_opts(netapp_provisioning_opts) CONF.register_opts(netapp_support_opts) CONF.register_opts(netapp_data_motion_opts) CONF.register_opts(netapp_backup_opts) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/netapp/utils.py0000664000175000017500000003221200000000000022100 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Bob Callaway. All rights reserved. # Copyright (c) 2015 Tom Barron. All rights reserved. # Copyright (c) 2015 Clinton Knight. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Utilities for NetApp drivers.""" from collections import abc import decimal import platform import re from oslo_concurrency import processutils as putils from oslo_log import log from oslo_utils import timeutils from oslo_utils import units from manila import exception from manila.i18n import _ from manila import version LOG = log.getLogger(__name__) VALID_TRACE_FLAGS = ['method', 'api'] TRACE_METHOD = False TRACE_API = False API_TRACE_PATTERN = '(.*)' SVM_MIGRATE_POLICY_TYPE_NAME = 'migrate' MIGRATION_OPERATION_ID_KEY = 'migration_operation_id' MIGRATION_STATE_READY_FOR_CUTOVER = 'ready_for_cutover' MIGRATION_STATE_READY_FOR_SOURCE_CLEANUP = 'ready_for_source_cleanup' MIGRATION_STATE_MIGRATE_COMPLETE = 'migrate_complete' MIGRATION_STATE_MIGRATE_PAUSED = 'migrate_paused' EXTENDED_DATA_PROTECTION_TYPE = 'extended_data_protection' MIRROR_ALL_SNAP_POLICY = 'MirrorAllSnapshots' DATA_PROTECTION_TYPE = 'data_protection' FLEXGROUP_STYLE_EXTENDED = 'flexgroup' FLEXVOL_STYLE_EXTENDED = 'flexvol' FLEXGROUP_DEFAULT_POOL_NAME = 'flexgroup_auto' ABSOLUTE_MAX_INODES = 2_040_109_45 class NetAppDriverException(exception.ShareBackendException): message = _("NetApp Manila Driver exception.") def validate_driver_instantiation(**kwargs): """Checks if a driver is instantiated other than by the unified driver. Helps check direct instantiation of netapp drivers. Call this function in every netapp block driver constructor. """ if kwargs and kwargs.get('netapp_mode') == 'proxy': return LOG.warning('Please use NetAppDriver in the configuration file ' 'to load the driver instead of directly specifying ' 'the driver module name.') def check_flags(required_flags, configuration): """Ensure that the flags we care about are set.""" for flag in required_flags: if getattr(configuration, flag, None) is None: msg = _('Configuration value %s is not set.') % flag raise exception.InvalidInput(reason=msg) def round_down(value, precision='0.00'): """Round a number downward using a specified level of precision. Example: round_down(float(total_space_in_bytes) / units.Gi, '0.01') """ return float(decimal.Decimal(str(value)).quantize( decimal.Decimal(precision), rounding=decimal.ROUND_DOWN)) def setup_tracing(trace_flags_string, api_trace_pattern=API_TRACE_PATTERN): global TRACE_METHOD global TRACE_API global API_TRACE_PATTERN TRACE_METHOD = False TRACE_API = False API_TRACE_PATTERN = api_trace_pattern if trace_flags_string: flags = trace_flags_string.split(',') flags = [flag.strip() for flag in flags] for invalid_flag in list(set(flags) - set(VALID_TRACE_FLAGS)): LOG.warning('Invalid trace flag: %s', invalid_flag) try: re.compile(api_trace_pattern) except re.error: msg = _('Cannot parse the API trace pattern. %s is not a ' 'valid python regular expression.') % api_trace_pattern raise exception.BadConfigurationException(reason=msg) TRACE_METHOD = 'method' in flags TRACE_API = 'api' in flags def trace(f): def trace_wrapper(self, *args, **kwargs): if TRACE_METHOD: LOG.debug('Entering method %s', f.__name__) result = f(self, *args, **kwargs) if TRACE_METHOD: LOG.debug('Leaving method %s', f.__name__) return result return trace_wrapper def convert_to_list(value): if value is None: return [] elif isinstance(value, str): return [value] elif isinstance(value, abc.Iterable): return list(value) else: return [value] def convert_string_to_list(string, separator=','): return [elem.strip() for elem in string.split(separator)] def get_relationship_type(is_flexgroup): """Returns the snapmirror relationship type.""" return (EXTENDED_DATA_PROTECTION_TYPE if is_flexgroup else DATA_PROTECTION_TYPE) def is_style_extended_flexgroup(style_extended): """Returns whether the style is extended type or not.""" return style_extended == FLEXGROUP_STYLE_EXTENDED def parse_flexgroup_pool_config(config, cluster_aggr_set={}, check=False): """Returns the dict with the FlexGroup pools and if it is auto provisioned. :param config: the configuration flexgroup list of dict. :param cluster_aggr_set: the set of aggregates in the cluster. :param check: should check the config is correct. """ flexgroup_pools_map = {} aggr_list_used = [] for pool_dic in config: for pool_name, aggr_str in pool_dic.items(): aggr_name_list = aggr_str.split() if not check: aggr_name_list.sort() flexgroup_pools_map[pool_name] = aggr_name_list continue if pool_name in cluster_aggr_set: msg = _('The %s FlexGroup pool name is not valid, because ' 'it is a cluster aggregate name. Ensure that the ' 'configuration option netapp_flexgroup_pools is ' 'set correctly.') raise exception.NetAppException(msg % pool_name) aggr_name_set = set(aggr_name_list) if len(aggr_name_set) != len(aggr_name_list): msg = _('There is a repeated aggregate name in the ' 'FlexGroup pool %s definition. Ensure that the ' 'configuration option netapp_flexgroup_pools is ' 'set correctly.') raise exception.NetAppException(msg % pool_name) not_found_aggr = aggr_name_set - cluster_aggr_set if not_found_aggr: not_found_list = [str(s) for s in not_found_aggr] not_found_str = ", ".join(not_found_list) msg = _('There is an aggregate name in the FlexGroup pool ' '%(pool)s that is not in the cluster: %(aggr)s. ' 'Ensure that the configuration option ' 'netapp_flexgroup_pools is set correctly.') msg_args = {'pool': pool_name, 'aggr': not_found_str} raise exception.NetAppException(msg % msg_args) aggr_name_list.sort() aggr_name_list_str = "".join(aggr_name_list) if aggr_name_list_str in aggr_list_used: msg = _('The FlexGroup pool %s is duplicated. Ensure that ' 'the configuration option netapp_flexgroup_pools ' 'is set correctly.') raise exception.NetAppException(msg % pool_name) aggr_list_used.append(aggr_name_list_str) flexgroup_pools_map[pool_name] = aggr_name_list return flexgroup_pools_map def calculate_max_files(size, max_files_multiplier, max_files=None): """Returns the max_files as integer or None. :param size: volume size in gb :param max_files_multiplier: config out of string extra spec :param max_files: pass max_files option """ if size is None or max_files_multiplier is None: return None if max_files is not None: msg = _('Something went wrong: ' 'validate_provisioning_options_for_share should have made ' 'sure that max_files and max_files_multiplier cannot be set ' 'at same time.') raise exception.NetAppException(msg) # size_gb * units.Mi = size_kib # calculation based upon TR-4617 max_files = int(size * units.Mi * float(max_files_multiplier) / 33.6925) return min(max_files, ABSOLUTE_MAX_INODES) class OpenStackInfo(object): """OS/distribution, release, and version. NetApp uses these fields as content for EMS log entry. """ PACKAGE_NAME = 'python3-manila' def __init__(self): self._version = 'unknown version' self._release = 'unknown release' self._vendor = 'unknown vendor' self._platform = 'unknown platform' def _update_version_from_version_string(self): try: self._version = version.version_info.version_string() except Exception: pass def _update_release_from_release_string(self): try: self._release = version.version_info.release_string() except Exception: pass def _update_platform(self): try: self._platform = platform.platform() except Exception: pass @staticmethod def _get_version_info_version(): return version.version_info.version @staticmethod def _get_version_info_release(): return version.version_info.release_string() def _update_info_from_version_info(self): try: ver = self._get_version_info_version() if ver: self._version = ver except Exception: pass try: rel = self._get_version_info_release() if rel: self._release = rel except Exception: pass # RDO, RHEL-OSP, Mirantis on Redhat, SUSE. def _update_info_from_rpm(self): LOG.debug('Trying rpm command.') try: out, err = putils.execute("rpm", "-q", "--queryformat", "'%{version}\t%{release}\t%{vendor}'", self.PACKAGE_NAME) if not out: LOG.info('No rpm info found for %(pkg)s package.', { 'pkg': self.PACKAGE_NAME}) return False parts = out.split() self._version = parts[0] self._release = parts[1] self._vendor = ' '.join(parts[2::]) return True except Exception as e: LOG.info('Could not run rpm command: %(msg)s.', { 'msg': e}) return False # Ubuntu, Mirantis on Ubuntu. def _update_info_from_dpkg(self): LOG.debug('Trying dpkg-query command.') try: _vendor = None out, err = putils.execute("dpkg-query", "-W", "-f='${Version}'", self.PACKAGE_NAME) if not out: LOG.info( 'No dpkg-query info found for %(pkg)s package.', { 'pkg': self.PACKAGE_NAME}) return False # Debian format: [epoch:]upstream_version[-debian_revision] deb_version = out # In case epoch or revision is missing, copy entire string. _release = deb_version if ':' in deb_version: deb_epoch, upstream_version = deb_version.split(':') _release = upstream_version if '-' in deb_version: deb_revision = deb_version.split('-')[1] _vendor = deb_revision self._release = _release if _vendor: self._vendor = _vendor return True except Exception as e: LOG.info('Could not run dpkg-query command: %(msg)s.', { 'msg': e}) return False def _update_openstack_info(self): self._update_version_from_version_string() self._update_release_from_release_string() self._update_platform() # Some distributions override with more meaningful information. self._update_info_from_version_info() # See if we have still more targeted info from rpm or apt. found_package = self._update_info_from_rpm() if not found_package: self._update_info_from_dpkg() def info(self): self._update_openstack_info() return '%(version)s|%(release)s|%(vendor)s|%(platform)s' % { 'version': self._version, 'release': self._release, 'vendor': self._vendor, 'platform': self._platform} class DataCache(object): """DataCache class for caching NetApp information. The cache validity is measured by a stop watch that is not thread-safe. """ def __init__(self, duration): self._stop_watch = timeutils.StopWatch(duration) self._cached_data = None def is_expired(self): return not self._stop_watch.has_started() or self._stop_watch.expired() def get_data(self): return self._cached_data def update_data(self, cached_data): if not self._stop_watch.has_started(): self._stop_watch.start() else: self._stop_watch.restart() self._cached_data = cached_data ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315601.9296715 manila-21.0.0/manila/share/drivers/nexenta/0000775000175000017500000000000000000000000020541 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/nexenta/__init__.py0000664000175000017500000000000000000000000022640 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315601.9336715 manila-21.0.0/manila/share/drivers/nexenta/ns4/0000775000175000017500000000000000000000000021245 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/nexenta/ns4/__init__.py0000664000175000017500000000000000000000000023344 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/nexenta/ns4/jsonrpc.py0000664000175000017500000000573700000000000023311 0ustar00zuulzuul00000000000000# Copyright 2016 Nexenta Systems, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ :mod:`nexenta.jsonrpc` -- Nexenta-specific JSON RPC client ===================================================================== .. automodule:: nexenta.jsonrpc """ import base64 import json import requests from oslo_log import log from oslo_serialization import jsonutils from manila import exception from manila import utils LOG = log.getLogger(__name__) class NexentaJSONProxy(object): retry_exc_tuple = (requests.exceptions.ConnectionError,) def __init__(self, scheme, host, port, path, user, password, auto=False, obj=None, method=None): self.scheme = scheme.lower() self.host = host self.port = port self.path = path self.user = user self.password = password self.auto = auto self.obj = obj self.method = method def __getattr__(self, name): if not self.obj: obj, method = name, None elif not self.method: obj, method = self.obj, name else: obj, method = '%s.%s' % (self.obj, self.method), name return NexentaJSONProxy(self.scheme, self.host, self.port, self.path, self.user, self.password, self.auto, obj, method) @property def url(self): return '%s://%s:%s%s' % (self.scheme, self.host, self.port, self.path) def __hash__(self): return self.url.__hash__() def __repr__(self): return 'NMS proxy: %s' % self.url @utils.retry(retry_param=retry_exc_tuple, retries=6) def __call__(self, *args): data = jsonutils.dumps({ 'object': self.obj, 'method': self.method, 'params': args, }) auth = base64.b64encode( ('%s:%s' % (self.user, self.password)).encode('utf-8')) headers = { 'Content-Type': 'application/json', 'Authorization': 'Basic %s' % auth, } LOG.debug('Sending JSON data: %s', data) r = requests.post(self.url, data=data, headers=headers, timeout=60) response = json.loads(r.content) if r.content else None LOG.debug('Got response: %s', response) if response.get('error') is not None: message = response['error'].get('message', '') raise exception.NexentaException(reason=message) return response.get('result') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/nexenta/ns4/nexenta_nas.py0000664000175000017500000001325600000000000024131 0ustar00zuulzuul00000000000000# Copyright 2016 Nexenta Systems, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log from manila import exception from manila.i18n import _ from manila.share import driver from manila.share.drivers.nexenta.ns4 import nexenta_nfs_helper from manila.share.drivers.nexenta import options VERSION = '1.0' LOG = log.getLogger(__name__) class NexentaNasDriver(driver.ShareDriver): """Nexenta Share Driver. Executes commands relating to Shares. API version history: 1.0 - Initial version. """ def __init__(self, *args, **kwargs): """Do initialization.""" LOG.debug('Initializing Nexenta driver.') super(NexentaNasDriver, self).__init__(False, *args, **kwargs) self.configuration = kwargs.get('configuration') if self.configuration: self.configuration.append_config_values( options.nexenta_connection_opts) self.configuration.append_config_values( options.nexenta_nfs_opts) self.configuration.append_config_values( options.nexenta_dataset_opts) self.helper = nexenta_nfs_helper.NFSHelper(self.configuration) else: raise exception.BadConfigurationException( reason=_('Nexenta configuration missing.')) @property def share_backend_name(self): if not hasattr(self, '_share_backend_name'): self._share_backend_name = None if self.configuration: self._share_backend_name = self.configuration.safe_get( 'share_backend_name') if not self._share_backend_name: self._share_backend_name = 'NexentaStor4' return self._share_backend_name def do_setup(self, context): """Any initialization the Nexenta NAS driver does while starting.""" LOG.debug('Setting up the NexentaStor4 plugin.') return self.helper.do_setup() def check_for_setup_error(self): """Returns an error if prerequisites aren't met.""" self.helper.check_for_setup_error() def create_share(self, context, share, share_server=None): """Create a share.""" LOG.debug('Creating share %s.', share['name']) return self.helper.create_filesystem(share) def create_share_from_snapshot(self, context, share, snapshot, share_server=None, parent_share=None): """Is called to create share from snapshot.""" LOG.debug('Creating share from snapshot %s.', snapshot['name']) return self.helper.create_share_from_snapshot(share, snapshot) def delete_share(self, context, share, share_server=None): """Delete a share.""" LOG.debug('Deleting share %s.', share['name']) self.helper.delete_share(share['name']) def extend_share(self, share, new_size, share_server=None): """Extends a share.""" LOG.debug('Extending share %(name)s to %(size)sG.', { 'name': share['name'], 'size': new_size}) self.helper.set_quota(share['name'], new_size) def create_snapshot(self, context, snapshot, share_server=None): """Create a snapshot.""" LOG.debug('Creating a snapshot of share %s.', snapshot['share_name']) snap_id = self.helper.create_snapshot( snapshot['share_name'], snapshot['name']) LOG.info('Created snapshot %s.', snap_id) def delete_snapshot(self, context, snapshot, share_server=None): """Delete a snapshot.""" LOG.debug('Deleting snapshot %(shr_name)s@%(snap_name)s.', { 'shr_name': snapshot['share_name'], 'snap_name': snapshot['name']}) self.helper.delete_snapshot(snapshot['share_name'], snapshot['name']) def update_access(self, context, share, access_rules, add_rules, delete_rules, update_rules, share_server=None): """Update access rules for given share. :param context: The `context.RequestContext` object for the request :param share: Share that will have its access rules updated. :param access_rules: All access rules for given share. This list is enough to update the access rules for given share. :param add_rules: Empty List or List of access rules which should be added. access_rules already contains these rules. Not used by this driver. :param delete_rules: Empty List or List of access rules which should be removed. access_rules doesn't contain these rules. Not used by this driver. :param update_rules: Empty List or List of access rules which should be updated. access_rules already contains these rules. :param share_server: Data structure with share server information. Not used by this driver. """ self.helper.update_access(share['name'], access_rules) def _update_share_stats(self, data=None): super(NexentaNasDriver, self)._update_share_stats() data = self.helper.update_share_stats() data['driver_version'] = VERSION data['share_backend_name'] = self.share_backend_name self._stats.update(data) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/nexenta/ns4/nexenta_nfs_helper.py0000664000175000017500000002272500000000000025476 0ustar00zuulzuul00000000000000# Copyright 2016 Nexenta Systems, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log from oslo_utils import excutils from manila.common import constants as common from manila import exception from manila.i18n import _ from manila.share.drivers.nexenta.ns4 import jsonrpc from manila.share.drivers.nexenta import utils LOG = log.getLogger(__name__) NOT_EXIST = 'does not exist' DEP_CLONES = 'has dependent clones' class NFSHelper(object): def __init__(self, configuration): self.configuration = configuration self.nfs_mount_point_base = ( self.configuration.nexenta_mount_point_base) self.dataset_compression = ( self.configuration.nexenta_dataset_compression) self.dataset_dedupe = self.configuration.nexenta_dataset_dedupe self.nms = None self.nms_protocol = self.configuration.nexenta_rest_protocol self.nms_host = self.configuration.nexenta_nas_host self.volume = self.configuration.nexenta_volume self.share = self.configuration.nexenta_nfs_share self.nms_port = self.configuration.nexenta_rest_port self.nms_user = self.configuration.nexenta_user self.nfs = self.configuration.nexenta_nfs self.nms_password = self.configuration.nexenta_password self.storage_protocol = 'NFS' def do_setup(self): if self.nms_protocol == 'auto': protocol, auto = 'http', True else: protocol, auto = self.nms_protocol, False path = '/rest/nms/' self.nms = jsonrpc.NexentaJSONProxy( protocol, self.nms_host, self.nms_port, path, self.nms_user, self.nms_password, auto=auto) def check_for_setup_error(self): if not self.nms.volume.object_exists(self.volume): raise exception.NexentaException(reason=_( "Volume %s does not exist in NexentaStor appliance.") % self.volume) folder = '%s/%s' % (self.volume, self.share) create_folder_props = { 'recordsize': '4K', 'quota': 'none', 'compression': self.dataset_compression, } if not self.nms.folder.object_exists(folder): self.nms.folder.create_with_props( self.volume, self.share, create_folder_props) def create_filesystem(self, share): """Create file system.""" create_folder_props = { 'recordsize': '4K', 'quota': '%sG' % share['size'], 'compression': self.dataset_compression, } if not self.configuration.nexenta_thin_provisioning: create_folder_props['reservation'] = '%sG' % share['size'] parent_path = '%s/%s' % (self.volume, self.share) self.nms.folder.create_with_props( parent_path, share['name'], create_folder_props) path = self._get_share_path(share['name']) return [self._get_location_path(path, share['share_proto'])] def set_quota(self, share_name, new_size): if self.configuration.nexenta_thin_provisioning: quota = '%sG' % new_size self.nms.folder.set_child_prop( self._get_share_path(share_name), 'quota', quota) def _get_location_path(self, path, protocol): location = None if protocol == 'NFS': location = {'path': '%s:/volumes/%s' % (self.nms_host, path)} else: raise exception.InvalidShare( reason=(_('Only NFS protocol is currently supported.'))) return location def delete_share(self, share_name): """Delete share.""" folder = self._get_share_path(share_name) try: self.nms.folder.destroy(folder.strip(), '-r') except exception.NexentaException as e: with excutils.save_and_reraise_exception() as exc: if NOT_EXIST in e.args[0]: LOG.info('Folder %s does not exist, it was ' 'already deleted.', folder) exc.reraise = False def _get_share_path(self, share_name): return '%s/%s/%s' % (self.volume, self.share, share_name) def _get_snapshot_name(self, snapshot_name): return 'snapshot-%s' % snapshot_name def create_snapshot(self, share_name, snapshot_name): """Create a snapshot.""" folder = self._get_share_path(share_name) self.nms.folder.create_snapshot(folder, snapshot_name, '-r') model_update = {'provider_location': '%s@%s' % (folder, snapshot_name)} return model_update def delete_snapshot(self, share_name, snapshot_name): """Deletes snapshot.""" try: self.nms.snapshot.destroy('%s@%s' % ( self._get_share_path(share_name), snapshot_name), '') except exception.NexentaException as e: with excutils.save_and_reraise_exception() as exc: if NOT_EXIST in e.args[0]: LOG.info('Snapshot %(folder)s@%(snapshot)s does not ' 'exist, it was already deleted.', { 'folder': share_name, 'snapshot': snapshot_name, }) exc.reraise = False elif DEP_CLONES in e.args[0]: LOG.info( 'Snapshot %(folder)s@%(snapshot)s has dependent ' 'clones, it will be deleted later.', { 'folder': share_name, 'snapshot': snapshot_name }) exc.reraise = False def create_share_from_snapshot(self, share, snapshot): snapshot_name = '%s/%s/%s@%s' % ( self.volume, self.share, snapshot['share_name'], snapshot['name']) self.nms.folder.clone( snapshot_name, '%s/%s/%s' % (self.volume, self.share, share['name'])) path = self._get_share_path(share['name']) return [self._get_location_path(path, share['share_proto'])] def update_access(self, share_name, access_rules): """Update access to the share.""" rw_list = [] ro_list = [] for rule in access_rules: if rule['access_type'].lower() != 'ip': msg = _('Only IP access type is supported.') raise exception.InvalidShareAccess(reason=msg) else: if rule['access_level'] == common.ACCESS_LEVEL_RW: rw_list.append(rule['access_to']) else: ro_list.append(rule['access_to']) share_opts = { 'auth_type': 'none', 'read_write': ':'.join(rw_list), 'read_only': ':'.join(ro_list), 'recursive': 'true', 'anonymous_rw': 'true', 'anonymous': 'true', 'extra_options': 'anon=0', } self.nms.netstorsvc.share_folder( 'svc:/network/nfs/server:default', self._get_share_path(share_name), share_opts) def _get_capacity_info(self): """Calculate available space on the NFS share.""" folder_props = self.nms.folder.get_child_props( '%s/%s' % (self.volume, self.share), 'used|available') free = utils.str2gib_size(folder_props['available']) allocated = utils.str2gib_size(folder_props['used']) return free + allocated, free, allocated def update_share_stats(self): """Update driver capabilities. No way of tracking provisioned capacity on this appliance, not returning any to let the scheduler estimate it. """ total, free, allocated = self._get_capacity_info() compression = not self.dataset_compression == 'off' dedupe = not self.dataset_dedupe == 'off' return { 'vendor_name': 'Nexenta', 'storage_protocol': self.storage_protocol, 'nfs_mount_point_base': self.nfs_mount_point_base, 'pools': [{ 'pool_name': self.volume, 'total_capacity_gb': total, 'free_capacity_gb': free, 'reserved_percentage': self.configuration.reserved_share_percentage, 'reserved_snapshot_percentage': (self.configuration.reserved_share_from_snapshot_percentage or self.configuration.reserved_share_percentage), 'reserved_share_extend_percentage': (self.configuration.reserved_share_extend_percentage or self.configuration.reserved_share_percentage), 'compression': compression, 'dedupe': dedupe, 'max_over_subscription_ratio': ( self.configuration.safe_get( 'max_over_subscription_ratio')), 'thin_provisioning': self.configuration.nexenta_thin_provisioning, }], } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315601.9336715 manila-21.0.0/manila/share/drivers/nexenta/ns5/0000775000175000017500000000000000000000000021246 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/nexenta/ns5/__init__.py0000664000175000017500000000000000000000000023345 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/nexenta/ns5/jsonrpc.py0000664000175000017500000005335000000000000023304 0ustar00zuulzuul00000000000000# Copyright 2019 Nexenta by DDN, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import hashlib import json import posixpath from urllib import parse as urlparse from eventlet import greenthread from oslo_log import log as logging import requests from manila import exception from manila.i18n import _ LOG = logging.getLogger(__name__) class NefException(exception.ManilaException): def __init__(self, data=None, **kwargs): defaults = { 'name': 'NexentaError', 'code': 'EBADMSG', 'source': 'ManilaDriver', 'message': 'Unknown error' } if isinstance(data, dict): for key in defaults: if key in kwargs: continue if key in data: kwargs[key] = data[key] else: kwargs[key] = defaults[key] elif isinstance(data, str): if 'message' not in kwargs: kwargs['message'] = data for key in defaults: if key not in kwargs: kwargs[key] = defaults[key] message = (_('%(message)s (source: %(source)s, ' 'name: %(name)s, code: %(code)s)') % kwargs) self.code = kwargs['code'] del kwargs['message'] super(NefException, self).__init__(message, **kwargs) class NefRequest(object): def __init__(self, proxy, method): self.proxy = proxy self.method = method self.path = None self.lock = False self.time = 0 self.data = [] self.payload = {} self.stat = {} self.hooks = { 'response': self.hook } self.kwargs = { 'hooks': self.hooks, 'timeout': self.proxy.timeout } def __call__(self, path, payload=None): LOG.debug('NEF request start: %(method)s %(path)s %(payload)s', {'method': self.method, 'path': path, 'payload': payload}) if self.method not in ['get', 'delete', 'put', 'post']: message = (_('NEF API does not support %(method)s method'), {'method': self.method}) raise NefException(code='EINVAL', message=message) if not path: message = (_('NEF API call requires collection path')) raise NefException(code='EINVAL', message=message) self.path = path if payload: if not isinstance(payload, dict): message = (_('NEF API call payload must be a dictionary')) raise NefException(code='EINVAL', message=message) if self.method in ['get', 'delete']: self.payload = {'params': payload} elif self.method in ['put', 'post']: self.payload = {'data': json.dumps(payload)} try: response = self.request(self.method, self.path, **self.payload) except (requests.exceptions.ConnectionError, requests.exceptions.Timeout) as error: LOG.debug('Failed to %(method)s %(path)s %(payload)s: %(error)s', {'method': self.method, 'path': self.path, 'payload': self.payload, 'error': error}) if not self.failover(): raise error LOG.debug('Retry initial request after failover: ' '%(method)s %(path)s %(payload)s', {'method': self.method, 'path': self.path, 'payload': self.payload}) response = self.request(self.method, self.path, **self.payload) LOG.debug('NEF request done: %(method)s %(path)s %(payload)s, ' 'total response time: %(time)s seconds, ' 'total requests count: %(count)s, ' 'requests statistics: %(stat)s', {'method': self.method, 'path': self.path, 'payload': self.payload, 'time': self.time, 'count': sum(self.stat.values()), 'stat': self.stat}) if response.ok and not response.content: return None content = json.loads(response.content) if not response.ok: raise NefException(content) if isinstance(content, dict) and 'data' in content: return self.data return content def request(self, method, path, **kwargs): url = self.proxy.url(path) LOG.debug('Perform session request: %(method)s %(url)s %(body)s', {'method': method, 'url': url, 'body': kwargs}) kwargs.update(self.kwargs) return self.proxy.session.request(method, url, **kwargs) def hook(self, response, **kwargs): initial_text = (_('initial request %(method)s %(path)s %(body)s') % {'method': self.method, 'path': self.path, 'body': self.payload}) request_text = (_('session request %(method)s %(url)s %(body)s') % {'method': response.request.method, 'url': response.request.url, 'body': response.request.body}) response_text = (_('session response %(code)s %(content)s') % {'code': response.status_code, 'content': response.content}) text = (_('%(request_text)s and %(response_text)s') % {'request_text': request_text, 'response_text': response_text}) LOG.debug('Hook start on %(text)s', {'text': text}) if response.status_code not in self.stat: self.stat[response.status_code] = 0 self.stat[response.status_code] += 1 self.time += response.elapsed.total_seconds() if response.ok and not response.content: LOG.debug('Hook done on %(text)s: ' 'empty response content', {'text': text}) return response if not response.content: message = (_('There is no response content ' 'is available for %(text)s') % {'text': text}) raise NefException(code='ENODATA', message=message) try: content = json.loads(response.content) except (TypeError, ValueError) as error: message = (_('Failed to decode JSON for %(text)s: %(error)s') % {'text': text, 'error': error}) raise NefException(code='ENOMSG', message=message) method = 'get' # pylint: disable=no-member if response.status_code == requests.codes.unauthorized: if self.stat[response.status_code] > self.proxy.retries: raise NefException(content) self.auth() request = response.request.copy() request.headers.update(self.proxy.session.headers) LOG.debug('Retry last %(text)s after authentication', {'text': request_text}) return self.proxy.session.send(request, **kwargs) elif response.status_code == requests.codes.not_found: if self.lock: LOG.debug('Hook done on %(text)s: ' 'nested failover is detected', {'text': text}) return response if self.stat[response.status_code] > self.proxy.retries: raise NefException(content) if not self.failover(): LOG.debug('Hook done on %(text)s: ' 'no valid hosts found', {'text': text}) return response LOG.debug('Retry %(text)s after failover', {'text': initial_text}) return self.request(self.method, self.path, **self.payload) elif response.status_code == requests.codes.server_error: if not (isinstance(content, dict) and 'code' in content and content['code'] == 'EBUSY'): raise NefException(content) if self.stat[response.status_code] > self.proxy.retries: raise NefException(content) self.proxy.delay(self.stat[response.status_code]) LOG.debug('Retry %(text)s after delay', {'text': initial_text}) return self.request(self.method, self.path, **self.payload) elif response.status_code == requests.codes.accepted: path = self.getpath(content, 'monitor') if not path: message = (_('There is no monitor path ' 'available for %(text)s') % {'text': text}) raise NefException(code='ENOMSG', message=message) self.proxy.delay(self.stat[response.status_code]) return self.request(method, path) elif response.status_code == requests.codes.ok: if not (isinstance(content, dict) and 'data' in content): LOG.debug('Hook done on %(text)s: there ' 'is no JSON data available', {'text': text}) return response LOG.debug('Append %(count)s data items to response', {'count': len(content['data'])}) self.data += content['data'] path = self.getpath(content, 'next') if not path: LOG.debug('Hook done on %(text)s: there ' 'is no next path available', {'text': text}) return response LOG.debug('Perform next session request %(method)s %(path)s', {'method': method, 'path': path}) return self.request(method, path) LOG.debug('Hook done on %(text)s and ' 'returned original response', {'text': text}) return response def auth(self): method = 'post' path = 'auth/login' payload = {'username': self.proxy.username, 'password': self.proxy.password} data = json.dumps(payload) kwargs = {'data': data} self.proxy.delete_bearer() response = self.request(method, path, **kwargs) content = json.loads(response.content) if not (isinstance(content, dict) and 'token' in content): message = (_('There is no authentication token available ' 'for authentication request %(method)s %(url)s ' '%(body)s and response %(code)s %(content)s') % {'method': response.request.method, 'url': response.request.url, 'body': response.request.body, 'code': response.status_code, 'content': response.content}) raise NefException(code='ENODATA', message=message) token = content['token'] self.proxy.update_token(token) def failover(self): result = False self.lock = True method = 'get' host = self.proxy.host root = self.proxy.root for item in self.proxy.hosts: if item == host: continue self.proxy.update_host(item) LOG.debug('Try to failover path ' '%(root)s to host %(host)s', {'root': root, 'host': item}) try: response = self.request(method, root) except (requests.exceptions.ConnectionError, requests.exceptions.Timeout) as error: LOG.debug('Skip unavailable host %(host)s ' 'due to error: %(error)s', {'host': item, 'error': error}) continue LOG.debug('Failover result: %(code)s %(content)s', {'code': response.status_code, 'content': response.content}) # pylint: disable=no-member if response.status_code == requests.codes.ok: LOG.debug('Successful failover path ' '%(root)s to host %(host)s', {'root': root, 'host': item}) self.proxy.update_lock() result = True break else: LOG.debug('Skip unsuitable host %(host)s: ' 'there is no %(root)s path found', {'host': item, 'root': root}) self.lock = False return result @staticmethod def getpath(content, name): if isinstance(content, dict) and 'links' in content: for link in content['links']: if not isinstance(link, dict): continue if 'rel' in link and 'href' in link: if link['rel'] == name: return link['href'] return None class NefCollections(object): subj = 'collection' root = '/collections' def __init__(self, proxy): self.proxy = proxy def path(self, name): quoted_name = urlparse.quote_plus(name) return posixpath.join(self.root, quoted_name) def get(self, name, payload=None): LOG.debug('Get properties of %(subj)s %(name)s: %(payload)s', {'subj': self.subj, 'name': name, 'payload': payload}) path = self.path(name) return self.proxy.get(path, payload) def set(self, name, payload=None): LOG.debug('Modify properties of %(subj)s %(name)s: %(payload)s', {'subj': self.subj, 'name': name, 'payload': payload}) path = self.path(name) return self.proxy.put(path, payload) def list(self, payload=None): LOG.debug('List of %(subj)ss: %(payload)s', {'subj': self.subj, 'payload': payload}) return self.proxy.get(self.root, payload) def create(self, payload=None): LOG.debug('Create %(subj)s: %(payload)s', {'subj': self.subj, 'payload': payload}) try: return self.proxy.post(self.root, payload) except NefException as error: if error.code != 'EEXIST': raise error def delete(self, name, payload=None): LOG.debug('Delete %(subj)s %(name)s: %(payload)s', {'subj': self.subj, 'name': name, 'payload': payload}) path = self.path(name) try: return self.proxy.delete(path, payload) except NefException as error: if error.code != 'ENOENT': raise error class NefSettings(NefCollections): subj = 'setting' root = '/settings/properties' def create(self, payload=None): return NotImplemented def delete(self, name, payload=None): return NotImplemented class NefDatasets(NefCollections): subj = 'dataset' root = '/storage/datasets' def rename(self, name, payload=None): LOG.debug('Rename %(subj)s %(name)s: %(payload)s', {'subj': self.subj, 'name': name, 'payload': payload}) path = posixpath.join(self.path(name), 'rename') return self.proxy.post(path, payload) class NefSnapshots(NefDatasets, NefCollections): subj = 'snapshot' root = '/storage/snapshots' def clone(self, name, payload=None): LOG.debug('Clone %(subj)s %(name)s: %(payload)s', {'subj': self.subj, 'name': name, 'payload': payload}) path = posixpath.join(self.path(name), 'clone') return self.proxy.post(path, payload) class NefFilesystems(NefDatasets, NefCollections): subj = 'filesystem' root = '/storage/filesystems' def rollback(self, name, payload=None): LOG.debug('Rollback %(subj)s %(name)s: %(payload)s', {'subj': self.subj, 'name': name, 'payload': payload}) path = posixpath.join(self.path(name), 'rollback') return self.proxy.post(path, payload) def mount(self, name, payload=None): LOG.debug('Mount %(subj)s %(name)s: %(payload)s', {'subj': self.subj, 'name': name, 'payload': payload}) path = posixpath.join(self.path(name), 'mount') return self.proxy.post(path, payload) def unmount(self, name, payload=None): LOG.debug('Unmount %(subj)s %(name)s: %(payload)s', {'subj': self.subj, 'name': name, 'payload': payload}) path = posixpath.join(self.path(name), 'unmount') return self.proxy.post(path, payload) def acl(self, name, payload=None): LOG.debug('Set %(subj)s %(name)s ACL: %(payload)s', {'subj': self.subj, 'name': name, 'payload': payload}) path = posixpath.join(self.path(name), 'acl') return self.proxy.post(path, payload) def promote(self, name, payload=None): LOG.debug('Promote %(subj)s %(name)s: %(payload)s', {'subj': self.subj, 'name': name, 'payload': payload}) path = posixpath.join(self.path(name), 'promote') return self.proxy.post(path, payload) class NefHpr(NefCollections): subj = 'HPR service' root = '/hpr' def activate(self, payload=None): LOG.debug('Activate %(payload)s', {'payload': payload}) path = posixpath.join(self.root, 'activate') return self.proxy.post(path, payload) def start(self, name, payload=None): LOG.debug('Start %(subj)s %(name)s: %(payload)s', {'subj': self.subj, 'name': name, 'payload': payload}) path = posixpath.join(self.path(name), 'start') return self.proxy.post(path, payload) class NefServices(NefCollections): subj = 'service' root = '/services' class NefNfs(NefCollections): subj = 'NFS' root = '/nas/nfs' class NefNetAddresses(NefCollections): subj = 'network address' root = '/network/addresses' class NefProxy(object): def __init__(self, proto, path, conf): self.session = requests.Session() self.settings = NefSettings(self) self.filesystems = NefFilesystems(self) self.snapshots = NefSnapshots(self) self.services = NefServices(self) self.hpr = NefHpr(self) self.nfs = NefNfs(self) self.netaddrs = NefNetAddresses(self) self.proto = proto self.path = path self.lock = None self.tokens = {} self.headers = { 'Content-Type': 'application/json', 'X-XSS-Protection': '1' } if conf.nexenta_use_https: self.scheme = 'https' else: self.scheme = 'http' self.username = conf.nexenta_user self.password = conf.nexenta_password self.hosts = [] if conf.nexenta_rest_addresses: for host in conf.nexenta_rest_addresses: self.hosts.append(host.strip()) self.root = self.filesystems.path(path) if not self.hosts: self.hosts.append(conf.nexenta_nas_host) self.host = self.hosts[0] if conf.nexenta_rest_port: self.port = conf.nexenta_rest_port else: if conf.nexenta_use_https: self.port = 8443 else: self.port = 8080 self.backoff_factor = conf.nexenta_rest_backoff_factor self.retries = len(self.hosts) * conf.nexenta_rest_retry_count self.timeout = ( conf.nexenta_rest_connect_timeout, conf.nexenta_rest_read_timeout) # pylint: disable=no-member max_retries = requests.packages.urllib3.util.retry.Retry( total=conf.nexenta_rest_retry_count, backoff_factor=conf.nexenta_rest_backoff_factor) adapter = requests.adapters.HTTPAdapter(max_retries=max_retries) self.session.verify = conf.nexenta_ssl_cert_verify self.session.headers.update(self.headers) self.session.mount('%s://' % self.scheme, adapter) if not conf.nexenta_ssl_cert_verify: requests.packages.urllib3.disable_warnings() self.update_lock() def __getattr__(self, name): return NefRequest(self, name) def delete_bearer(self): if 'Authorization' in self.session.headers: del self.session.headers['Authorization'] def update_bearer(self, token): bearer = 'Bearer %s' % token self.session.headers['Authorization'] = bearer def update_token(self, token): self.tokens[self.host] = token self.update_bearer(token) def update_host(self, host): self.host = host if host in self.tokens: token = self.tokens[host] self.update_bearer(token) def update_lock(self): prop = self.settings.get('system.guid') guid = prop.get('value') path = '%s:%s' % (guid, self.path) if isinstance(path, str): path = path.encode('utf-8') self.lock = hashlib.md5(path).hexdigest() # nosec B324 def url(self, path): netloc = '%s:%d' % (self.host, int(self.port)) components = (self.scheme, netloc, str(path), None, None) url = urlparse.urlunsplit(components) return url def delay(self, attempt): interval = int(self.backoff_factor * (2 ** (attempt - 1))) LOG.debug('Waiting for %(interval)s seconds', {'interval': interval}) greenthread.sleep(interval) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/nexenta/ns5/nexenta_nas.py0000664000175000017500000005666000000000000024140 0ustar00zuulzuul00000000000000# Copyright 2019 Nexenta by DDN, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import posixpath from oslo_log import log from oslo_utils import units from manila.common import constants as common from manila import exception from manila.i18n import _ from manila.share import driver from manila.share.drivers.nexenta.ns5 import jsonrpc from manila.share.drivers.nexenta import options from manila.share.drivers.nexenta import utils VERSION = '1.1' LOG = log.getLogger(__name__) ZFS_MULTIPLIER = 1.1 # ZFS quotas do not take metadata into account. class NexentaNasDriver(driver.ShareDriver): """Nexenta Share Driver. Executes commands relating to Shares. API version history: 1.0 - Initial version. 1.1 - Failover support. - Unshare filesystem completely after last securityContext is removed. - Moved all http/url code to jsonrpc. - Manage existing support. - Revert to snapshot support. """ driver_prefix = 'nexenta' def __init__(self, *args, **kwargs): """Do initialization.""" LOG.debug('Initializing Nexenta driver.') super(NexentaNasDriver, self).__init__(False, *args, **kwargs) self.configuration = kwargs.get('configuration') if self.configuration: self.configuration.append_config_values( options.nexenta_connection_opts) self.configuration.append_config_values( options.nexenta_nfs_opts) self.configuration.append_config_values( options.nexenta_dataset_opts) else: raise exception.BadConfigurationException( reason=_('Nexenta configuration missing.')) self.nef = None self.verify_ssl = self.configuration.nexenta_ssl_cert_verify self.nas_host = self.configuration.nexenta_nas_host self.nef_port = self.configuration.nexenta_rest_port self.nef_user = self.configuration.nexenta_user self.nef_password = self.configuration.nexenta_password self.pool_name = self.configuration.nexenta_pool self.parent_fs = self.configuration.nexenta_folder self.nfs_mount_point_base = self.configuration.nexenta_mount_point_base self.dataset_compression = ( self.configuration.nexenta_dataset_compression) self.provisioned_capacity = 0 @property def storage_protocol(self): protocol = '' if self.configuration.nexenta_nfs: protocol = 'NFS' else: msg = _('At least 1 storage protocol must be enabled.') raise exception.NexentaException(msg) return protocol @property def root_path(self): return posixpath.join(self.pool_name, self.parent_fs) @property def share_backend_name(self): if not hasattr(self, '_share_backend_name'): self._share_backend_name = None if self.configuration: self._share_backend_name = self.configuration.safe_get( 'share_backend_name') if not self._share_backend_name: self._share_backend_name = 'NexentaStor5' return self._share_backend_name def do_setup(self, context): self.nef = jsonrpc.NefProxy(self.storage_protocol, self.root_path, self.configuration) def check_for_setup_error(self): """Check root filesystem, NFS service and NFS share.""" filesystem = self.nef.filesystems.get(self.root_path) if filesystem['mountPoint'] == 'none': message = (_('NFS root filesystem %(path)s is not writable') % {'path': filesystem['mountPoint']}) raise jsonrpc.NefException(code='ENOENT', message=message) if not filesystem['isMounted']: message = (_('NFS root filesystem %(path)s is not mounted') % {'path': filesystem['mountPoint']}) raise jsonrpc.NefException(code='ENOTDIR', message=message) payload = {} if filesystem['nonBlockingMandatoryMode']: payload['nonBlockingMandatoryMode'] = False if filesystem['smartCompression']: payload['smartCompression'] = False if payload: self.nef.filesystems.set(self.root_path, payload) service = self.nef.services.get('nfs') if service['state'] != 'online': message = (_('NFS server service is not online: %(state)s') % {'state': service['state']}) raise jsonrpc.NefException(code='ESRCH', message=message) self._get_provisioned_capacity() def _get_provisioned_capacity(self): payload = {'fields': 'referencedQuotaSize'} self.provisioned_capacity += self.nef.filesystems.get( self.root_path, payload)['referencedQuotaSize'] def ensure_share(self, context, share, share_server=None): pass def create_share(self, context, share, share_server=None): """Create a share.""" LOG.debug('Creating share: %s.', self._get_share_name(share)) dataset_path = self._get_dataset_path(share) size = int(share['size'] * units.Gi * ZFS_MULTIPLIER) payload = { 'recordSize': self.configuration.nexenta_dataset_record_size, 'compressionMode': self.dataset_compression, 'path': dataset_path, 'referencedQuotaSize': size, 'nonBlockingMandatoryMode': False } if not self.configuration.nexenta_thin_provisioning: payload['referencedReservationSize'] = size self.nef.filesystems.create(payload) try: mount_path = self._mount_filesystem(share) except jsonrpc.NefException as create_error: try: payload = {'force': True} self.nef.filesystems.delete(dataset_path, payload) except jsonrpc.NefException as delete_error: LOG.debug('Failed to delete share %(path)s: %(error)s', {'path': dataset_path, 'error': delete_error}) raise create_error self.provisioned_capacity += share['size'] location = { 'path': mount_path, 'id': self._get_share_name(share) } return [location] def _mount_filesystem(self, share): """Ensure that filesystem is activated and mounted on the host.""" dataset_path = self._get_dataset_path(share) payload = {'fields': 'mountPoint,isMounted'} filesystem = self.nef.filesystems.get(dataset_path, payload) if filesystem['mountPoint'] == 'none': payload = {'datasetName': dataset_path} self.nef.hpr.activate(payload) filesystem = self.nef.filesystems.get(dataset_path, payload) elif not filesystem['isMounted']: self.nef.filesystems.mount(dataset_path) return '%s:%s' % (self.nas_host, filesystem['mountPoint']) def create_share_from_snapshot(self, context, share, snapshot, share_server=None, parent_share=None): """Is called to create share from snapshot.""" snapshot_path = self._get_snapshot_path(snapshot) LOG.debug('Creating share from snapshot %s.', snapshot_path) clone_path = self._get_dataset_path(share) size = int(share['size'] * units.Gi * ZFS_MULTIPLIER) payload = { 'targetPath': clone_path, 'referencedQuotaSize': size, 'recordSize': self.configuration.nexenta_dataset_record_size, 'compressionMode': self.dataset_compression, 'nonBlockingMandatoryMode': False } if not self.configuration.nexenta_thin_provisioning: payload['referencedReservationSize'] = size self.nef.snapshots.clone(snapshot_path, payload) self._remount_filesystem(clone_path) self.provisioned_capacity += share['size'] try: mount_path = self._mount_filesystem(share) except jsonrpc.NefException as create_error: try: payload = {'force': True} self.nef.filesystems.delete(clone_path, payload) except jsonrpc.NefException as delete_error: LOG.debug('Failed to delete share %(path)s: %(error)s', {'path': clone_path, 'error': delete_error}) raise create_error location = { 'path': mount_path, 'id': self._get_share_name(share) } return [location] def _remount_filesystem(self, clone_path): """Workaround for NEF bug: cloned share has offline NFS status""" self.nef.filesystems.unmount(clone_path) self.nef.filesystems.mount(clone_path) def _get_dataset_path(self, share): share_name = self._get_share_name(share) return posixpath.join(self.root_path, share_name) def _get_share_name(self, share): """Get share name with share name prefix.""" return ('%(prefix)s%(share_id)s' % { 'prefix': self.configuration.nexenta_share_name_prefix, 'share_id': share['share_id']}) def _get_snapshot_path(self, snapshot): """Return ZFS snapshot path for the snapshot.""" snapshot_id = ( snapshot['snapshot_id'] or snapshot['share_group_snapshot_id']) share = snapshot.get('share') or snapshot.get('share_instance') fs_path = self._get_dataset_path(share) return '%s@snapshot-%s' % (fs_path, snapshot_id) def delete_share(self, context, share, share_server=None): """Delete a share.""" LOG.debug('Deleting share: %s.', self._get_share_name(share)) share_path = self._get_dataset_path(share) delete_payload = {'force': True, 'snapshots': True} try: self.nef.filesystems.delete(share_path, delete_payload) except jsonrpc.NefException as error: if error.code != 'EEXIST': raise error snapshots_tree = {} snapshots_payload = {'parent': share_path, 'fields': 'path'} snapshots = self.nef.snapshots.list(snapshots_payload) for snapshot in snapshots: clones_payload = {'fields': 'clones,creationTxg'} data = self.nef.snapshots.get(snapshot['path'], clones_payload) if data['clones']: snapshots_tree[data['creationTxg']] = data['clones'][0] if snapshots_tree: clone_path = snapshots_tree[max(snapshots_tree)] self.nef.filesystems.promote(clone_path) self.nef.filesystems.delete(share_path, delete_payload) self.provisioned_capacity -= share['size'] def extend_share(self, share, new_size, share_server=None): """Extends a share.""" LOG.debug( 'Extending share: %(name)s to %(size)sG.', ( {'name': self._get_share_name(share), 'size': new_size})) self._set_quota(share, new_size) if not self.configuration.nexenta_thin_provisioning: self._set_reservation(share, new_size) self.provisioned_capacity += (new_size - share['size']) def shrink_share(self, share, new_size, share_server=None): """Shrinks size of existing share.""" LOG.debug( 'Shrinking share: %(name)s to %(size)sG.', { 'name': self._get_share_name(share), 'size': new_size}) share_path = self._get_dataset_path(share) share_data = self.nef.filesystems.get(share_path) used = share_data['bytesUsedBySelf'] / units.Gi if used > new_size: raise exception.ShareShrinkingPossibleDataLoss( share_id=self._get_share_name(share)) if not self.configuration.nexenta_thin_provisioning: self._set_reservation(share, new_size) self._set_quota(share, new_size) self.provisioned_capacity += (share['size'] - new_size) def create_snapshot(self, context, snapshot, share_server=None): """Create a snapshot.""" snapshot_path = self._get_snapshot_path(snapshot) LOG.debug('Creating snapshot: %s.', snapshot_path) payload = {'path': snapshot_path} self.nef.snapshots.create(payload) def delete_snapshot(self, context, snapshot, share_server=None): """Deletes a snapshot. :param snapshot: snapshot reference """ snapshot_path = self._get_snapshot_path(snapshot) LOG.debug('Deleting snapshot: %s.', snapshot_path) payload = {'defer': True} self.nef.snapshots.delete(snapshot_path, payload) def revert_to_snapshot(self, context, snapshot, share_access_rules, snapshot_access_rules, share_server=None): """Reverts a share (in place) to the specified snapshot. Does not delete the share snapshot. The share and snapshot must both be 'available' for the restore to be attempted. The snapshot must be the most recent one taken by Manila; the API layer performs this check so the driver doesn't have to. The share must be reverted in place to the contents of the snapshot. Application admins should quiesce or otherwise prepare the application for the shared file system contents to change suddenly. :param context: Current context :param snapshot: The snapshot to be restored :param share_access_rules: List of all access rules for the affected share :param snapshot_access_rules: List of all access rules for the affected snapshot :param share_server: Optional -- Share server model or None """ snapshot_path = self._get_snapshot_path(snapshot).split('@')[1] LOG.debug('Reverting to snapshot: %s.', snapshot_path) share_path = self._get_dataset_path(snapshot['share']) payload = {'snapshot': snapshot_path} self.nef.filesystems.rollback(share_path, payload) def manage_existing(self, share, driver_options): """Brings an existing share under Manila management. If the provided share is not valid, then raise a ManageInvalidShare exception, specifying a reason for the failure. If the provided share is not in a state that can be managed, such as being replicated on the backend, the driver *MUST* raise ManageInvalidShare exception with an appropriate message. The share has a share_type, and the driver can inspect that and compare against the properties of the referenced backend share. If they are incompatible, raise a ManageExistingShareTypeMismatch, specifying a reason for the failure. :param share: Share model :param driver_options: Driver-specific options provided by admin. :return: share_update dictionary with required key 'size', which should contain size of the share. """ LOG.debug('Manage share %s.', self._get_share_name(share)) export_path = share['export_locations'][0]['path'] # check that filesystem with provided export exists. fs_path = export_path.split(':/')[1] fs_data = self.nef.filesystems.get(fs_path) if not fs_data: # wrong export path, raise exception. msg = _('Share %s does not exist on Nexenta Store appliance, ' 'cannot manage.') % export_path raise exception.NexentaException(msg) # get dataset properties. if fs_data['referencedQuotaSize']: size = (fs_data['referencedQuotaSize'] / units.Gi) + 1 else: size = fs_data['bytesReferenced'] / units.Gi + 1 # rename filesystem on appliance to correlate with manila ID. new_path = '%s/%s' % (self.root_path, self._get_share_name(share)) self.nef.filesystems.rename(fs_path, {'newPath': new_path}) # make sure quotas and reservations are correct. if not self.configuration.nexenta_thin_provisioning: self._set_reservation(share, size) self._set_quota(share, size) return {'size': size, 'export_locations': [{ 'path': '%s:/%s' % (self.nas_host, new_path) }]} def update_access(self, context, share, access_rules, add_rules, delete_rules, update_rules, share_server=None): """Update access rules for given share. Using access_rules list for both adding and deleting rules. :param context: The `context.RequestContext` object for the request :param share: Share that will have its access rules updated. :param access_rules: All access rules for given share. This list is enough to update the access rules for given share. :param add_rules: Empty List or List of access rules which should be added. access_rules already contains these rules. Not used by this driver. :param delete_rules: Empty List or List of access rules which should be removed. access_rules doesn't contain these rules. Not used by this driver. :param update_rules: Empty List or List of access rules which should be updated. access_rules already contains these rules. :param share_server: Data structure with share server information. Not used by this driver. """ LOG.debug('Updating access to share %(id)s with following access ' 'rules: %(rules)s', { 'id': self._get_share_name(share), 'rules': [( rule.get('access_type'), rule.get('access_level'), rule.get('access_to')) for rule in access_rules]}) rw_list = [] ro_list = [] update_dict = {} if share['share_proto'] == 'NFS': for rule in access_rules: if rule['access_type'].lower() != 'ip': msg = _( 'Only IP access control type is supported for NFS.') LOG.warning(msg) update_dict[rule['access_id']] = { 'state': 'error', } else: update_dict[rule['access_id']] = { 'state': 'active', } if rule['access_level'] == common.ACCESS_LEVEL_RW: rw_list.append(rule['access_to']) else: ro_list.append(rule['access_to']) self._update_nfs_access(share, rw_list, ro_list) return update_dict def _update_nfs_access(self, share, rw_list, ro_list): # Define allowed security context types to be able to tell whether # the 'security_contexts' dict contains any rules at all context_types = {'none', 'root', 'readOnlyList', 'readWriteList'} security_contexts = {'securityModes': ['sys']} def add_sc(addr_list, sc_type): if sc_type not in context_types: return rule_list = [] for addr in addr_list: address_mask = addr.strip().split('/', 1) address = address_mask[0] ls = {"allow": True, "etype": "fqdn", "entity": address} if len(address_mask) == 2: mask = int(address_mask[1]) if 0 <= mask < 31: ls['mask'] = mask ls['etype'] = 'network' rule_list.append(ls) # Context type with no addresses will result in an API error if rule_list: security_contexts[sc_type] = rule_list add_sc(rw_list, 'readWriteList') add_sc(ro_list, 'readOnlyList') payload = {'securityContexts': [security_contexts]} share_path = self._get_dataset_path(share) if self.nef.nfs.list({'filesystem': share_path}): if not set(security_contexts.keys()) & context_types: self.nef.nfs.delete(share_path) else: self.nef.nfs.set(share_path, payload) else: payload['filesystem'] = share_path self.nef.nfs.create(payload) payload = { 'flags': ['file_inherit', 'dir_inherit'], 'permissions': ['full_set'], 'principal': 'everyone@', 'type': 'allow' } self.nef.filesystems.acl(share_path, payload) def _set_quota(self, share, new_size): quota = int(new_size * units.Gi * ZFS_MULTIPLIER) share_path = self._get_dataset_path(share) payload = {'referencedQuotaSize': quota} LOG.debug('Setting quota for dataset %s.', share_path) self.nef.filesystems.set(share_path, payload) def _set_reservation(self, share, new_size): res_size = int(new_size * units.Gi * ZFS_MULTIPLIER) share_path = self._get_dataset_path(share) payload = {'referencedReservationSize': res_size} self.nef.filesystems.set(share_path, payload) def _update_share_stats(self, data=None): super(NexentaNasDriver, self)._update_share_stats() total, free, allocated = self._get_capacity_info() compression = not self.dataset_compression == 'off' data = { 'vendor_name': 'Nexenta', 'storage_protocol': self.storage_protocol, 'share_backend_name': self.share_backend_name, 'nfs_mount_point_base': self.nfs_mount_point_base, 'driver_version': VERSION, 'snapshot_support': True, 'create_share_from_snapshot_support': True, 'revert_to_snapshot_support': True, 'pools': [{ 'pool_name': self.pool_name, 'compression': compression, 'total_capacity_gb': total, 'free_capacity_gb': free, 'reserved_percentage': ( self.configuration.reserved_share_percentage), 'reserved_snapshot_percentage': (self.configuration.reserved_share_from_snapshot_percentage or self.configuration.reserved_share_percentage), 'reserved_share_extend_percentage': (self.configuration.reserved_share_extend_percentage or self.configuration.reserved_share_percentage), 'max_over_subscription_ratio': ( self.configuration.safe_get( 'max_over_subscription_ratio')), 'thin_provisioning': self.configuration.nexenta_thin_provisioning, 'provisioned_capacity_gb': self.provisioned_capacity, }], } self._stats.update(data) def _get_capacity_info(self): """Calculate available space on the NFS share.""" data = self.nef.filesystems.get(self.root_path) free = int(utils.bytes_to_gb(data['bytesAvailable'])) allocated = int(utils.bytes_to_gb(data['bytesUsed'])) total = free + allocated return total, free, allocated ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/nexenta/options.py0000664000175000017500000001233100000000000022606 0ustar00zuulzuul00000000000000# Copyright 2019 Nexenta by DDN, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ :mod:`nexenta.options` -- Contains configuration options for Nexenta drivers. ============================================================================= .. automodule:: nexenta.options """ from oslo_config import cfg nexenta_connection_opts = [ cfg.ListOpt('nexenta_rest_addresses', help='One or more comma delimited IP addresses for management ' 'communication with NexentaStor appliance.'), cfg.IntOpt('nexenta_rest_port', default=8443, help='Port to connect to Nexenta REST API server.'), cfg.StrOpt('nexenta_rest_protocol', default='auto', choices=['http', 'https', 'auto'], help='Use http or https for REST connection (default auto).'), cfg.BoolOpt('nexenta_use_https', default=True, help='Use HTTP secure protocol for NexentaStor ' 'management REST API connections'), cfg.StrOpt('nexenta_user', default='admin', help='User name to connect to Nexenta SA.'), cfg.StrOpt('nexenta_password', help='Password to connect to Nexenta SA.', required=True, secret=True), cfg.StrOpt('nexenta_volume', default='volume1', help='Volume name on NexentaStor.'), cfg.StrOpt('nexenta_pool', default='pool1', help='Pool name on NexentaStor.'), cfg.BoolOpt('nexenta_nfs', default=True, help='Defines whether share over NFS is enabled.'), cfg.BoolOpt('nexenta_ssl_cert_verify', default=False, help='Defines whether the driver should check ssl cert.'), cfg.FloatOpt('nexenta_rest_connect_timeout', default=30, help='Specifies the time limit (in seconds), within ' 'which the connection to NexentaStor management ' 'REST API server must be established'), cfg.FloatOpt('nexenta_rest_read_timeout', default=300, help='Specifies the time limit (in seconds), ' 'within which NexentaStor management ' 'REST API server must send a response'), cfg.FloatOpt('nexenta_rest_backoff_factor', default=1, help='Specifies the backoff factor to apply ' 'between connection attempts to NexentaStor ' 'management REST API server'), cfg.IntOpt('nexenta_rest_retry_count', default=5, help='Specifies the number of times to repeat NexentaStor ' 'management REST API call in case of connection errors ' 'and NexentaStor appliance EBUSY or ENOENT errors'), ] nexenta_nfs_opts = [ cfg.HostAddressOpt('nexenta_nas_host', help='Data IP address of Nexenta storage appliance.', required=True), cfg.StrOpt('nexenta_mount_point_base', default='$state_path/mnt', help='Base directory that contains NFS share mount points.'), ] nexenta_dataset_opts = [ cfg.StrOpt('nexenta_nfs_share', default='nfs_share', help='Parent filesystem where all the shares will be created. ' 'This parameter is only used by NexentaStor4 driver.'), cfg.StrOpt('nexenta_share_name_prefix', help='Nexenta share name prefix.', default='share-'), cfg.StrOpt('nexenta_folder', default='folder', help='Parent folder on NexentaStor.'), cfg.StrOpt('nexenta_dataset_compression', default='on', choices=['on', 'off', 'gzip', 'gzip-1', 'gzip-2', 'gzip-3', 'gzip-4', 'gzip-5', 'gzip-6', 'gzip-7', 'gzip-8', 'gzip-9', 'lzjb', 'zle', 'lz4'], help='Compression value for new ZFS folders.'), cfg.StrOpt('nexenta_dataset_dedupe', default='off', choices=['on', 'off', 'sha256', 'verify'], help='Deduplication value for new ZFS folders. ' 'Only used by NexentaStor4 driver.'), cfg.BoolOpt('nexenta_thin_provisioning', default=True, help=('If True shares will not be space guaranteed and ' 'overprovisioning will be enabled.')), cfg.IntOpt('nexenta_dataset_record_size', default=131072, help='Specifies a suggested block size in for files in a file ' 'system. (bytes)'), ] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/nexenta/utils.py0000664000175000017500000000302400000000000022252 0ustar00zuulzuul00000000000000# Copyright 2019 Nexenta by DDN, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import re from oslo_utils import units def str2size(s, scale=1024): """Convert size-string. String format: [:space:] to bytes. :param s: size-string :param scale: base size """ if not s: return 0 if isinstance(s, int): return s match = re.match(r'^([\.\d]+)\s*([BbKkMmGgTtPpEeZzYy]?)', s) if match is None: raise ValueError('Invalid value: %s' % s) groups = match.groups() value = float(groups[0]) suffix = len(groups) > 1 and groups[1].upper() or 'B' types = ('B', 'K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y') for i, t in enumerate(types): if suffix == t: return float(value * pow(scale, i)) def str2gib_size(s): """Covert size-string to size in gigabytes.""" size_in_bytes = str2size(s) return size_in_bytes // units.Gi def bytes_to_gb(size): return float(size) / units.Gi ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315601.9336715 manila-21.0.0/manila/share/drivers/purestorage/0000775000175000017500000000000000000000000021437 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/purestorage/__init__.py0000664000175000017500000000000000000000000023536 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/purestorage/flashblade.py0000664000175000017500000004540000000000000024101 0ustar00zuulzuul00000000000000# Copyright 2021 Pure Storage Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Pure Storage FlashBlade Share Driver """ import functools import platform from oslo_config import cfg from oslo_log import log as logging from oslo_utils import units from manila import exception from manila.i18n import _ from manila.share import driver try: import purity_fb except ImportError: purity_fb = None LOG = logging.getLogger(__name__) flashblade_connection_opts = [ cfg.HostAddressOpt( "flashblade_mgmt_vip", help="The name (or IP address) for the Pure Storage " "FlashBlade storage system management VIP.", ), cfg.ListOpt( "flashblade_data_vip", help="The names (or IP address) for the Pure Storage " "FlashBlade storage system data VIPs. " "The first listed name or IP address will be considered " "to be the preferred IP address, although is not " "enforced.", ), ] flashblade_auth_opts = [ cfg.StrOpt( "flashblade_api", help=("API token for an administrative user account"), secret=True, ), ] flashblade_extra_opts = [ cfg.BoolOpt( "flashblade_eradicate", default=True, help="When enabled, all FlashBlade file systems and snapshots " "will be eradicated at the time of deletion in Manila. " "Data will NOT be recoverable after a delete with this " "set to True! When disabled, file systems and snapshots " "will go into pending eradication state and can be " "recovered.)", ), ] CONF = cfg.CONF CONF.register_opts(flashblade_connection_opts) CONF.register_opts(flashblade_auth_opts) CONF.register_opts(flashblade_extra_opts) def purity_fb_to_manila_exceptions(func): @functools.wraps(func) def wrapper(*args, **kwargs): try: return func(*args, **kwargs) except purity_fb.rest.ApiException as ex: msg = _("Caught exception from purity_fb: %s") % ex LOG.exception(msg) raise exception.ShareBackendException(msg=msg) return wrapper class FlashBladeShareDriver(driver.ShareDriver): """Version hisotry: 1.0.0 - Initial version 2.0.0 - Xena release 3.0.0 - Yoga release 4.0.0 - Zed release 5.0.0 - Antelope release 6.0.0 - Bobcat release 7.0.0 - 2024.1 (Caracal) release 8.0.0 - 2025.1 (Epoxy) release 9.0.0 - 2025.2 (Flamingo) release """ VERSION = "9.0" # driver version USER_AGENT_BASE = "OpenStack Manila" def __init__(self, *args, **kwargs): super(FlashBladeShareDriver, self).__init__(False, *args, **kwargs) self.configuration.append_config_values(flashblade_connection_opts) self.configuration.append_config_values(flashblade_auth_opts) self.configuration.append_config_values(flashblade_extra_opts) self._user_agent = "%(base)s %(class)s/%(version)s (%(platform)s)" % { "base": self.USER_AGENT_BASE, "class": self.__class__.__name__, "version": self.VERSION, "platform": platform.platform(), } def do_setup(self, context): """Driver initialization""" if purity_fb is None: msg = _( "Missing 'purity_fb' python module, ensure the library" " is installed and available." ) raise exception.ManilaException(message=msg) self.api = self._safe_get_from_config_or_fail("flashblade_api") self.management_address = self._safe_get_from_config_or_fail( "flashblade_mgmt_vip" ) self.data_address = self._safe_get_from_config_or_fail( "flashblade_data_vip" ) self._sys = purity_fb.PurityFb(self.management_address) self._sys.disable_verify_ssl() try: self._sys.login(self.api) self._sys._api_client.user_agent = self._user_agent except purity_fb.rest.ApiException as ex: msg = _("Exception when logging into the array: %s\n") % ex LOG.exception(msg) raise exception.ManilaException(message=msg) backend_name = self.configuration.safe_get("share_backend_name") self._backend_name = backend_name or self.__class__.__name__ LOG.debug("setup complete") def _update_share_stats(self, data=None): """Retrieve stats info from share group.""" ( free_capacity_bytes, physical_capacity_bytes, provisioned_cap_bytes, data_reduction, ) = self._get_available_capacity() reserved_share_percentage = self.configuration.safe_get( "reserved_share_percentage" ) if reserved_share_percentage is None: reserved_share_percentage = 0 reserved_share_from_snapshot_percentage = self.configuration.safe_get( "reserved_share_from_snapshot_percentage" ) if reserved_share_from_snapshot_percentage is None: reserved_share_from_snapshot_percentage = reserved_share_percentage reserved_share_extend_percentage = self.configuration.safe_get( "reserved_share_extend_percentage" ) if reserved_share_extend_percentage is None: reserved_share_extend_percentage = reserved_share_percentage data = dict( share_backend_name=self._backend_name, vendor_name="PURE STORAGE", driver_version=self.VERSION, storage_protocol="NFS", data_reduction=data_reduction, reserved_percentage=reserved_share_percentage, reserved_snapshot_percentage=( reserved_share_from_snapshot_percentage), reserved_share_extend_percentage=( reserved_share_extend_percentage), total_capacity_gb=float(physical_capacity_bytes) / units.Gi, free_capacity_gb=float(free_capacity_bytes) / units.Gi, provisioned_capacity_gb=float(provisioned_cap_bytes) / units.Gi, snapshot_support=True, create_share_from_snapshot_support=False, mount_snapshot_support=False, revert_to_snapshot_support=True, thin_provisioning=True, ) super(FlashBladeShareDriver, self)._update_share_stats(data) def _get_available_capacity(self): try: space = self._sys.arrays.list_arrays_space() except purity_fb.rest.ApiException: message = "Connection failure. Retrying login..." LOG.warning(message) try: self._sys.login(self.api) self._sys._api_client.user_agent = self._user_agent except purity_fb.rest.ApiException as ex: msg = _("Exception when logging into the array: %s\n") % ex LOG.exception(msg) raise exception.ManilaException(message=msg) space = self._sys.arrays.list_arrays_space() array_space = space.items[0] data_reduction = array_space.space.data_reduction physical_capacity_bytes = array_space.capacity used_capacity_bytes = array_space.space.total_physical free_capacity_bytes = physical_capacity_bytes - used_capacity_bytes provisioned_capacity_bytes = array_space.space.unique return ( free_capacity_bytes, physical_capacity_bytes, provisioned_capacity_bytes, data_reduction, ) def _safe_get_from_config_or_fail(self, config_parameter): config_value = self.configuration.safe_get(config_parameter) if not config_value: reason = _( "%(config_parameter)s configuration parameter " "must be specified" ) % {"config_parameter": config_parameter} LOG.exception(reason) raise exception.BadConfigurationException(reason=reason) return config_value def _make_source_name(self, snapshot): base_name = CONF.share_name_template + "-manila" return base_name % snapshot["share_id"] def _make_share_name(self, manila_share): base_name = CONF.share_name_template + "-manila" return base_name % manila_share["id"] def _get_full_nfs_export_path(self, export_path, location): return "{subnet_ip}:/{export_path}".format( subnet_ip=location, export_path=export_path ) def _get_flashblade_filesystem_by_name(self, name): filesys = [] filesys.append(name) try: res = self._sys.file_systems.list_file_systems(names=filesys) except purity_fb.rest.ApiException as ex: msg = _("Share not found on FlashBlade: %s\n") % ex LOG.exception(msg) raise exception.ManilaException(message=msg) message = "Filesystem %(share_name)s exists. Continuing..." LOG.debug(message, {"share_name": res.items[0].name}) def _get_flashblade_snapshot_by_name(self, name): try: self._sys.file_system_snapshots.list_file_system_snapshots( filter=name ) except purity_fb.rest.ApiException as ex: msg = _("Snapshot not found on FlashBlade: %s\n") % ex LOG.exception(msg) raise exception.ManilaException(message=msg) @purity_fb_to_manila_exceptions def _resize_share(self, share, new_size): dataset_name = self._make_share_name(share) self._get_flashblade_filesystem_by_name(dataset_name) consumed_size = ( self._sys.file_systems.list_file_systems(names=[dataset_name]) .items[0] .space.virtual ) attr = {} if consumed_size >= new_size * units.Gi: raise exception.ShareShrinkingPossibleDataLoss( share_id=share["id"] ) attr["provisioned"] = new_size * units.Gi n_attr = purity_fb.FileSystem(**attr) LOG.debug("Resizing filesystem...") self._sys.file_systems.update_file_systems( name=dataset_name, attributes=n_attr ) def _update_nfs_access(self, share, access_rules): dataset_name = self._make_share_name(share) self._get_flashblade_filesystem_by_name(dataset_name) nfs_rules = "" rule_state = {} for access in access_rules: if access["access_type"] == "ip": line = ( access["access_to"] + "(" + access["access_level"] + ",no_root_squash) " ) rule_state[access["access_id"]] = {"state": "active"} nfs_rules += line else: message = _( 'Only "ip" access type is allowed for NFS protocol.' ) LOG.error(message) rule_state[access["access_id"]] = {"state": "error"} try: self._sys.file_systems.update_file_systems( name=dataset_name, attributes=purity_fb.FileSystem( nfs=purity_fb.NfsRule(rules=nfs_rules) ), ) message = "Set nfs rules %(nfs_rules)s for %(share_name)s" LOG.debug( message, {"nfs_rules": nfs_rules, "share_name": dataset_name} ) except purity_fb.rest.ApiException as ex: msg = _("Failed to set NFS access rules: %s\n") % ex LOG.exception(msg) raise exception.ManilaException(message=msg) return rule_state @purity_fb_to_manila_exceptions def create_share(self, context, share, share_server=None): """Create a share and export it based on protocol used.""" size = share["size"] * units.Gi share_name = self._make_share_name(share) if share["share_proto"] == "NFS": flashblade_fs = purity_fb.FileSystem( name=share_name, provisioned=size, hard_limit_enabled=True, fast_remove_directory_enabled=True, snapshot_directory_enabled=True, nfs=purity_fb.NfsRule( v3_enabled=True, rules="", v4_1_enabled=True ), ) self._sys.file_systems.create_file_systems(flashblade_fs) locations = [] preferred = True for address in self.data_address: export_location = { "path": self._get_full_nfs_export_path( share_name, address, ), "is_admin_only": False, "metadata": { "preferred": preferred, }, } LOG.debug("pref %(pref)s", {"pref": preferred}) preferred = False locations.append(export_location) else: message = _("Unsupported share protocol: %(proto)s.") % { "proto": share["share_proto"] } LOG.exception(message) raise exception.InvalidShare(reason=message) LOG.info("FlashBlade created share %(name)s", {"name": share_name}) return locations def create_snapshot(self, context, snapshot, share_server=None): """Called to create a snapshot""" source = [] flashblade_filesystem = self._make_source_name(snapshot) source.append(flashblade_filesystem) try: self._sys.file_system_snapshots.create_file_system_snapshots( sources=source, suffix=purity_fb.SnapshotSuffix(snapshot["id"]) ) except purity_fb.rest.ApiException as ex: msg = ( _("Snapshot failed. Share not found on FlashBlade: %s\n") % ex ) LOG.exception(msg) raise exception.ManilaException(message=msg) def delete_share(self, context, share, share_server=None): """Called to delete a share""" dataset_name = self._make_share_name(share) try: self._get_flashblade_filesystem_by_name(dataset_name) except purity_fb.rest.ApiException: message = ( "share %(dataset_name)s not found on FlashBlade, skip " "delete" ) LOG.warning(message, {"dataset_name": dataset_name}) return self._sys.file_systems.update_file_systems( name=dataset_name, attributes=purity_fb.FileSystem( nfs=purity_fb.NfsRule(v3_enabled=False, v4_1_enabled=False), smb=purity_fb.ProtocolRule(enabled=False), destroyed=True, ), ) if self.configuration.flashblade_eradicate: self._sys.file_systems.delete_file_systems(name=dataset_name) LOG.info( "FlashBlade eradicated share %(name)s", {"name": dataset_name} ) @purity_fb_to_manila_exceptions def delete_snapshot(self, context, snapshot, share_server=None): """Called to delete a snapshot""" dataset_name = self._make_source_name(snapshot) filt = "source_display_name='{0}' and suffix='{1}'".format( dataset_name, snapshot["id"] ) name = "{0}.{1}".format(dataset_name, snapshot["id"]) LOG.debug("FlashBlade filter %(name)s", {"name": filt}) try: self._get_flashblade_snapshot_by_name(filt) except exception.ShareResourceNotFound: message = ( "snapshot %(snapshot)s not found on FlashBlade, skip delete" ) LOG.warning( message, {"snapshot": dataset_name + "." + snapshot["id"]} ) return self._sys.file_system_snapshots.update_file_system_snapshots( name=name, attributes=purity_fb.FileSystemSnapshot(destroyed=True) ) LOG.debug( "Snapshot %(name)s deleted successfully", {"name": dataset_name + "." + snapshot["id"]}, ) if self.configuration.flashblade_eradicate: self._sys.file_system_snapshots.delete_file_system_snapshots( name=name ) LOG.debug( "Snapshot %(name)s eradicated successfully", {"name": dataset_name + "." + snapshot["id"]}, ) def ensure_share(self, context, share, share_server=None): """Dummy - called to ensure share is exported. All shares created on a FlashBlade are guaranteed to be exported so this check is redundant """ def update_access( self, context, share, access_rules, add_rules, delete_rules, update_rules, share_server=None, ): """Update access of share""" # We will use the access_rules list to bulk update access state_map = self._update_nfs_access(share, access_rules) return state_map def extend_share(self, share, new_size, share_server=None): """uses resize_share to extend a share""" self._resize_share(share, new_size) def shrink_share(self, share, new_size, share_server=None): """uses resize_share to shrink a share""" self._resize_share(share, new_size) @purity_fb_to_manila_exceptions def revert_to_snapshot( self, context, snapshot, share_access_rules, snapshot_access_rules, share_server=None, ): dataset_name = self._make_source_name(snapshot) filt = "source_display_name='{0}' and suffix='{1}'".format( dataset_name, snapshot["id"] ) LOG.debug("FlashBlade filter %(name)s", {"name": filt}) name = "{0}.{1}".format(dataset_name, snapshot["id"]) self._get_flashblade_snapshot_by_name(filt) fs_attr = purity_fb.FileSystem( name=dataset_name, source=purity_fb.Reference(name=name) ) try: self._sys.file_systems.create_file_systems( overwrite=True, discard_non_snapshotted_data=True, file_system=fs_attr, ) except purity_fb.rest.ApiException as ex: msg = _("Failed to revert snapshot: %s\n") % ex LOG.exception(msg) raise exception.ManilaException(message=msg) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315601.9336715 manila-21.0.0/manila/share/drivers/qnap/0000775000175000017500000000000000000000000020036 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/qnap/__init__.py0000664000175000017500000000000000000000000022135 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/qnap/api.py0000664000175000017500000006444400000000000021175 0ustar00zuulzuul00000000000000# Copyright (c) 2016 QNAP Systems, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ API for QNAP Storage. """ import base64 import functools from http import client as http_client import re import ssl from urllib import parse as urlparse from defusedxml import ElementTree as etree from oslo_log import log as logging from manila import exception from manila.i18n import _ from manila import utils LOG = logging.getLogger(__name__) MSG_SESSION_EXPIRED = _("Session ID expired") MSG_UNEXPECT_RESP = _("Unexpected response from QNAP API") def _connection_checker(func): """Decorator to check session has expired or not.""" @utils.retry(retry_param=exception.ShareBackendException, retries=5) @functools.wraps(func) def inner_connection_checker(self, *args, **kwargs): LOG.debug('in _connection_checker') pattern = re.compile(r".*Session ID expired.$") try: return func(self, *args, **kwargs) except exception.ShareBackendException as e: matches = pattern.match(str(e)) if matches: LOG.debug('Session might have expired.' ' Trying to relogin') self._login() raise return inner_connection_checker class QnapAPIExecutor(object): """Makes QNAP API calls for ES NAS.""" def __init__(self, *args, **kwargs): self.sid = None self.username = kwargs['username'] self.password = kwargs['password'] self.ip, self.port, self.ssl = ( self._parse_management_url(kwargs['management_url'])) self._login() def _parse_management_url(self, management_url): pattern = re.compile(r"(http|https)\:\/\/(\S+)\:(\d+)") matches = pattern.match(management_url) if matches.group(1) == 'http': management_ssl = False else: management_ssl = True management_ip = matches.group(2) management_port = matches.group(3) return management_ip, management_port, management_ssl def _prepare_connection(self, isSSL, ip, port): if isSSL: if hasattr(ssl, '_create_unverified_context'): context = ssl._create_unverified_context() # nosec B323 connection = http_client.HTTPSConnection(ip, port=port, context=context) else: connection = http_client.HTTPSConnection(ip, port=port) else: connection = http_client.HTTPConnection(ip, port) return connection def get_basic_info(self, management_url): """Get the basic information of NAS.""" LOG.debug('in get_basic_info') management_ip, management_port, management_ssl = ( self._parse_management_url(management_url)) connection = self._prepare_connection(management_ssl, management_ip, management_port) connection.request('GET', '/cgi-bin/authLogin.cgi') response = connection.getresponse() data = response.read() LOG.debug('response data: %s', data) root = etree.fromstring(data) display_model_name = root.find('model/displayModelName').text internal_model_name = root.find('model/internalModelName').text fw_version = root.find('firmware/version').text connection.close() return display_model_name, internal_model_name, fw_version def _execute_and_get_response_details(self, nas_ip, url): """Will prepare response after executing a http request.""" LOG.debug('port: %(port)s, ssl: %(ssl)s', {'port': self.port, 'ssl': self.ssl}) res_details = {} # Prepare the connection connection = self._prepare_connection(self.ssl, nas_ip, self.port) # Make the connection LOG.debug('url : %s', url) connection.request('GET', url) # Extract the response as the connection was successful response = connection.getresponse() # Read the response data = response.read() LOG.debug('response data: %s', data) res_details['data'] = data res_details['error'] = None res_details['http_status'] = response.status connection.close() return res_details def execute_login(self): """Login and return sid.""" params = { 'user': self.username, 'pwd': base64.b64encode(self.password.encode("utf-8")), 'serviceKey': '1', } sanitized_params = self._sanitize_params(params) sanitized_params = urlparse.urlencode(sanitized_params) url = ('/cgi-bin/authLogin.cgi?%s' % sanitized_params) res_details = self._execute_and_get_response_details(self.ip, url) root = etree.fromstring(res_details['data']) if root.find('authPassed').text == '0': raise exception.ShareBackendException(msg=MSG_SESSION_EXPIRED) session_id = root.find('authSid').text return session_id def _login(self): """Execute Https Login API.""" self.sid = self.execute_login() LOG.debug('sid: %s', self.sid) def _sanitize_params(self, params): sanitized_params = {} for key in params: value = params[key] if value is not None: if isinstance(value, list): sanitized_params[key] = [str(v) for v in value] else: sanitized_params[key] = str(value) return sanitized_params @_connection_checker def create_share(self, share, pool_name, create_share_name, share_proto, **kwargs): """Create share.""" LOG.debug('create_share_name: %s', create_share_name) params = { 'wiz_func': 'share_create', 'action': 'add_share', 'vol_name': create_share_name, 'vol_size': str(share['size']) + 'GB', 'threshold': '80', 'dedup': ('sha512' if kwargs['qnap_deduplication'] is True else 'off'), 'compression': '1' if kwargs['qnap_compression'] is True else '0', 'thin_pro': '1' if kwargs['qnap_thin_provision'] is True else '0', 'cache': '1' if kwargs['qnap_ssd_cache'] is True else '0', 'cifs_enable': '0' if share_proto == 'NFS' else '1', 'nfs_enable': '0' if share_proto == 'CIFS' else '1', 'afp_enable': '0', 'ftp_enable': '0', 'encryption': '0', 'hidden': '0', 'oplocks': '1', 'sync': 'always', 'userrw0': 'admin', 'userrd_len': '0', 'userrw_len': '1', 'userno_len': '0', 'access_r': 'setup_users', 'path_type': 'auto', 'recycle_bin': '1', 'recycle_bin_administrators_only': '0', 'pool_name': pool_name, 'sid': self.sid, } sanitized_params = self._sanitize_params(params) sanitized_params = urlparse.urlencode(sanitized_params) url = ('/cgi-bin/wizReq.cgi?%s' % sanitized_params) res_details = self._execute_and_get_response_details(self.ip, url) root = etree.fromstring(res_details['data']) if root.find('authPassed').text == '0': raise exception.ShareBackendException(msg=MSG_SESSION_EXPIRED) if root.find('ES_RET_CODE').text < '0': msg = _("Fail to create share %s on NAS.") % create_share_name LOG.error(msg) raise exception.ShareBackendException(msg=msg) vol_list = root.find('func').find('ownContent').find('volumeList') vol_info_tree = vol_list.findall('volume') for vol in vol_info_tree: LOG.debug('Iterating vol name: %(name)s, index: %(id)s', {'name': vol.find('volumeLabel').text, 'id': vol.find('volumeValue').text}) if (create_share_name == vol.find('volumeLabel').text): LOG.debug('volumeLabel:%s', vol.find('volumeLabel').text) return vol.find('volumeValue').text return res_details['data'] @_connection_checker def delete_share(self, vol_id, *args, **kwargs): """Execute delete share API.""" params = { 'func': 'volume_mgmt', 'vol_remove': '1', 'volumeID': vol_id, 'stop_service': 'no', 'sid': self.sid, } sanitized_params = self._sanitize_params(params) sanitized_params = urlparse.urlencode(sanitized_params) url = ('/cgi-bin/disk/disk_manage.cgi?%s' % sanitized_params) res_details = self._execute_and_get_response_details(self.ip, url) root = etree.fromstring(res_details['data']) if root.find('authPassed').text == '0': raise exception.ShareBackendException(msg=MSG_SESSION_EXPIRED) if root.find('result').text < '0': msg = _('Delete share id: %s failed') % vol_id raise exception.ShareBackendException(msg=msg) @_connection_checker def get_specific_poolinfo(self, pool_id): """Execute get_specific_poolinfo API.""" params = { 'store': 'poolInfo', 'func': 'extra_get', 'poolID': pool_id, 'Pool_Info': '1', 'sid': self.sid, } sanitized_params = self._sanitize_params(params) sanitized_params = urlparse.urlencode(sanitized_params) url = ('/cgi-bin/disk/disk_manage.cgi?%s' % sanitized_params) res_details = self._execute_and_get_response_details(self.ip, url) root = etree.fromstring(res_details['data']) if root.find('authPassed').text == '0': raise exception.ShareBackendException(msg=MSG_SESSION_EXPIRED) if root.find('result').text < '0': msg = _('get_specific_poolinfo failed') raise exception.ShareBackendException(msg=msg) pool_list = root.find('Pool_Index') pool_info_tree = pool_list.findall('row') for pool in pool_info_tree: if pool_id == pool.find('poolID').text: LOG.debug('poolID: %s', pool.find('poolID').text) return pool @_connection_checker def get_share_info(self, pool_id, **kwargs): """Execute get_share_info API.""" for key, value in kwargs.items(): LOG.debug('%(key)s = %(val)s', {'key': key, 'val': value}) params = { 'store': 'poolVolumeList', 'poolID': pool_id, 'func': 'extra_get', 'Pool_Vol_Info': '1', 'sid': self.sid, } sanitized_params = self._sanitize_params(params) sanitized_params = urlparse.urlencode(sanitized_params) url = ('/cgi-bin/disk/disk_manage.cgi?%s' % sanitized_params) res_details = self._execute_and_get_response_details(self.ip, url) root = etree.fromstring(res_details['data']) if root.find('authPassed').text == '0': raise exception.ShareBackendException(msg=MSG_SESSION_EXPIRED) vol_list = root.find('Volume_Info') vol_info_tree = vol_list.findall('row') for vol in vol_info_tree: LOG.debug('Iterating vol name: %(name)s, index: %(id)s', {'name': vol.find('vol_label').text, 'id': vol.find('vol_no').text}) if 'vol_no' in kwargs: if kwargs['vol_no'] == vol.find('vol_no').text: LOG.debug('vol_no:%s', vol.find('vol_no').text) return vol elif 'vol_label' in kwargs: if kwargs['vol_label'] == vol.find('vol_label').text: LOG.debug('vol_label:%s', vol.find('vol_label').text) return vol return None @_connection_checker def get_specific_volinfo(self, vol_id, **kwargs): """Execute get_specific_volinfo API.""" params = { 'store': 'volumeInfo', 'volumeID': vol_id, 'func': 'extra_get', 'Volume_Info': '1', 'sid': self.sid, } sanitized_params = self._sanitize_params(params) sanitized_params = urlparse.urlencode(sanitized_params) url = ('/cgi-bin/disk/disk_manage.cgi?%s' % sanitized_params) res_details = self._execute_and_get_response_details(self.ip, url) root = etree.fromstring(res_details['data']) if root.find('authPassed').text == '0': raise exception.ShareBackendException(msg=MSG_SESSION_EXPIRED) vol_list = root.find('Volume_Info') vol_info_tree = vol_list.findall('row') for vol in vol_info_tree: if vol_id == vol.find('vol_no').text: LOG.debug('vol_no: %s', vol.find('vol_no').text) return vol @_connection_checker def get_snapshot_info(self, **kwargs): """Execute get_snapshot_info API.""" params = { 'func': 'extra_get', 'volumeID': kwargs['volID'], 'snapshot_list': '1', 'snap_start': '0', 'snap_count': '100', 'sid': self.sid, } sanitized_params = self._sanitize_params(params) sanitized_params = urlparse.urlencode(sanitized_params) url = ('/cgi-bin/disk/snapshot.cgi?%s' % sanitized_params) res_details = self._execute_and_get_response_details(self.ip, url) root = etree.fromstring(res_details['data']) if root.find('authPassed').text == '0': raise exception.ShareBackendException(msg=MSG_SESSION_EXPIRED) if root.find('result').text < '0': raise exception.ShareBackendException(msg=MSG_UNEXPECT_RESP) snapshot_list = root.find('SnapshotList') # if snapshot_list is None: if not snapshot_list: return None if ('snapshot_name' in kwargs): snapshot_tree = snapshot_list.findall('row') for snapshot in snapshot_tree: if (kwargs['snapshot_name'] == snapshot.find('snapshot_name').text): LOG.debug('snapshot_name:%s', kwargs['snapshot_name']) return snapshot if (snapshot is snapshot_tree[-1]): return None return res_details['data'] @_connection_checker def create_snapshot_api(self, volumeID, snapshot_name): """Execute CGI to create snapshot from source share.""" LOG.debug('volumeID: %s', volumeID) LOG.debug('snapshot_name: %s', snapshot_name) params = { 'func': 'create_snapshot', 'volumeID': volumeID, 'snapshot_name': snapshot_name, 'expire_min': '0', 'vital': '1', 'sid': self.sid, } sanitized_params = self._sanitize_params(params) sanitized_params = urlparse.urlencode(sanitized_params) url = ('/cgi-bin/disk/snapshot.cgi?%s' % sanitized_params) res_details = self._execute_and_get_response_details(self.ip, url) root = etree.fromstring(res_details['data']) if root.find('authPassed').text == '0': raise exception.ShareBackendException(msg=MSG_SESSION_EXPIRED) if root.find('ES_RET_CODE').text < '0': msg = _('Create snapshot failed') raise exception.ShareBackendException(msg=msg) @_connection_checker def delete_snapshot_api(self, snapshot_id): """Execute CGI to delete snapshot from snapshot_id.""" params = { 'func': 'del_snapshots', 'snapshotID': snapshot_id, 'sid': self.sid, } sanitized_params = self._sanitize_params(params) sanitized_params = urlparse.urlencode(sanitized_params) url = ('/cgi-bin/disk/snapshot.cgi?%s' % sanitized_params) res_details = self._execute_and_get_response_details(self.ip, url) root = etree.fromstring(res_details['data']) if root.find('authPassed').text == '0': raise exception.ShareBackendException(msg=MSG_SESSION_EXPIRED) # snapshot not exist if root.find('result').text == '-206021': LOG.warning('Snapshot id %s does not exist', snapshot_id) return # share not exist if root.find('result').text == '-200005': LOG.warning('Share of snapshot id %s does not exist', snapshot_id) return if root.find('result').text < '0': msg = _('Failed to delete snapshot.') raise exception.ShareBackendException(msg=msg) @_connection_checker def clone_snapshot(self, snapshot_id, new_sharename, clone_size): """Execute CGI to clone snapshot as share.""" params = { 'func': 'clone_qsnapshot', 'by_vol': '1', 'snapshotID': snapshot_id, 'new_name': new_sharename, 'clone_size': '{}g'.format(clone_size), 'sid': self.sid, } sanitized_params = self._sanitize_params(params) sanitized_params = urlparse.urlencode(sanitized_params) url = ('/cgi-bin/disk/snapshot.cgi?%s' % sanitized_params) res_details = self._execute_and_get_response_details(self.ip, url) root = etree.fromstring(res_details['data']) if root.find('authPassed').text == '0': raise exception.ShareBackendException(msg=MSG_SESSION_EXPIRED) if root.find('result').text < '0': msg = _('Failed to clone snapshot.') raise exception.ShareBackendException(msg=msg) @_connection_checker def edit_share(self, share_dict): """Edit share properties.""" LOG.debug('share_dict[sharename]: %s', share_dict['sharename']) params = { 'wiz_func': 'share_property', 'action': 'share_property', 'sharename': share_dict['sharename'], 'old_sharename': share_dict['old_sharename'], 'dedup': 'sha512' if share_dict['deduplication'] else 'off', 'compression': '1' if share_dict['compression'] else '0', 'thin_pro': '1' if share_dict['thin_provision'] else '0', 'cache': '1' if share_dict['ssd_cache'] else '0', 'cifs_enable': '1' if share_dict['share_proto'] == 'CIFS' else '0', 'nfs_enable': '1' if share_dict['share_proto'] == 'NFS' else '0', 'afp_enable': '0', 'ftp_enable': '0', 'hidden': '0', 'oplocks': '1', 'sync': 'always', 'recycle_bin': '1', 'recycle_bin_administrators_only': '0', 'sid': self.sid, } if share_dict.get('new_size'): params['vol_size'] = str(share_dict['new_size']) + 'GB' sanitized_params = self._sanitize_params(params) sanitized_params = urlparse.urlencode(sanitized_params) url = ('/cgi-bin/priv/privWizard.cgi?%s' % sanitized_params) res_details = self._execute_and_get_response_details(self.ip, url) root = etree.fromstring(res_details['data']) if root.find('authPassed').text == '0': raise exception.ShareBackendException(msg=MSG_SESSION_EXPIRED) if root.find('ES_RET_CODE').text < '0': msg = _('Edit sharename %s failed') % share_dict['sharename'] raise exception.ShareBackendException(msg=msg) @_connection_checker def get_host_list(self, **kwargs): """Execute get_host_list API.""" params = { 'module': 'hosts', 'func': 'get_hostlist', 'sid': self.sid, } sanitized_params = self._sanitize_params(params) sanitized_params = urlparse.urlencode(sanitized_params) url = ('/cgi-bin/accessrights/accessrightsRequest.cgi?%s' % sanitized_params) res_details = self._execute_and_get_response_details(self.ip, url) root = etree.fromstring(res_details['data']) if root.find('authPassed').text == '0': raise exception.ShareBackendException(msg=MSG_SESSION_EXPIRED) if root.find('result').text < '0': raise exception.ShareBackendException(msg=MSG_UNEXPECT_RESP) host_list = root.find('content').find('host_list') # if host_list is None: if not host_list: return None return_hosts = [] host_tree = host_list.findall('host') for host in host_tree: LOG.debug('host:%s', host) return_hosts.append(host) return return_hosts @_connection_checker def add_host(self, hostname, ipv4): """Execute add_host API.""" params = { 'module': 'hosts', 'func': 'apply_addhost', 'name': hostname, 'ipaddr_v4': ipv4, 'sid': self.sid, } sanitized_params = self._sanitize_params(params) sanitized_params = urlparse.urlencode(sanitized_params) url = ('/cgi-bin/accessrights/accessrightsRequest.cgi?%s' % sanitized_params) res_details = self._execute_and_get_response_details(self.ip, url) root = etree.fromstring(res_details['data']) if root.find('authPassed').text == '0': raise exception.ShareBackendException(msg=MSG_SESSION_EXPIRED) if root.find('result').text < '0': raise exception.ShareBackendException(msg=MSG_UNEXPECT_RESP) @_connection_checker def edit_host(self, hostname, ipv4_list): """Execute edit_host API.""" params = { 'module': 'hosts', 'func': 'apply_sethost', 'name': hostname, 'ipaddr_v4': ipv4_list, 'sid': self.sid, } sanitized_params = self._sanitize_params(params) # urlencode with True parameter to parse ipv4_list sanitized_params = urlparse.urlencode(sanitized_params, True) url = ('/cgi-bin/accessrights/accessrightsRequest.cgi?%s' % sanitized_params) res_details = self._execute_and_get_response_details(self.ip, url) root = etree.fromstring(res_details['data']) if root.find('authPassed').text == '0': raise exception.ShareBackendException(msg=MSG_SESSION_EXPIRED) if root.find('result').text < '0': raise exception.ShareBackendException(msg=MSG_UNEXPECT_RESP) @_connection_checker def delete_host(self, hostname): """Execute delete_host API.""" params = { 'module': 'hosts', 'func': 'apply_delhost', 'host_name': hostname, 'sid': self.sid, } sanitized_params = self._sanitize_params(params) sanitized_params = urlparse.urlencode(sanitized_params) url = ('/cgi-bin/accessrights/accessrightsRequest.cgi?%s' % sanitized_params) res_details = self._execute_and_get_response_details(self.ip, url) root = etree.fromstring(res_details['data']) if root.find('authPassed').text == '0': raise exception.ShareBackendException(msg=MSG_SESSION_EXPIRED) if root.find('result').text < '0': raise exception.ShareBackendException(msg=MSG_UNEXPECT_RESP) @_connection_checker def set_nfs_access(self, sharename, access, host_name): """Execute set_nfs_access API.""" params = { 'wiz_func': 'share_nfs_control', 'action': 'share_nfs_control', 'sharename': sharename, 'access': access, 'host_name': host_name, 'sid': self.sid, } sanitized_params = self._sanitize_params(params) sanitized_params = urlparse.urlencode(sanitized_params) url = ('/cgi-bin/priv/privWizard.cgi?%s' % sanitized_params) res_details = self._execute_and_get_response_details(self.ip, url) root = etree.fromstring(res_details['data']) if root.find('authPassed').text == '0': raise exception.ShareBackendException(msg=MSG_SESSION_EXPIRED) if root.find('result').text < '0': raise exception.ShareBackendException(msg=MSG_UNEXPECT_RESP) class QnapAPIExecutorTS(QnapAPIExecutor): """Makes QNAP API calls for TS NAS.""" @_connection_checker def get_snapshot_info(self, **kwargs): """Execute get_snapshot_info API.""" for key, value in kwargs.items(): LOG.debug('%(key)s = %(val)s', {'key': key, 'val': value}) params = { 'func': 'extra_get', 'LUNIndex': kwargs['lun_index'], 'smb_snapshot_list': '1', 'smb_snapshot': '1', 'snapshot_list': '1', 'sid': self.sid, } sanitized_params = self._sanitize_params(params) sanitized_params = urlparse.urlencode(sanitized_params) url = ('/cgi-bin/disk/snapshot.cgi?%s' % sanitized_params) res_details = self._execute_and_get_response_details(self.ip, url) root = etree.fromstring(res_details['data']) if root.find('authPassed').text == '0': raise exception.ShareBackendException(msg=MSG_SESSION_EXPIRED) if root.find('result').text < '0': raise exception.ShareBackendException(msg=MSG_UNEXPECT_RESP) snapshot_list = root.find('SnapshotList') if snapshot_list is None: return None snapshot_tree = snapshot_list.findall('row') for snapshot in snapshot_tree: if (kwargs['snapshot_name'] == snapshot.find('snapshot_name').text): LOG.debug('snapshot_name:%s', kwargs['snapshot_name']) return snapshot return None ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/qnap/qnap.py0000664000175000017500000012070300000000000021352 0ustar00zuulzuul00000000000000# Copyright (c) 2016 QNAP Systems, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Share driver for QNAP Storage. This driver supports QNAP Storage for NFS. """ import datetime import math import re import time from oslo_config import cfg from oslo_log import log as logging from oslo_utils import timeutils from oslo_utils import units from manila.common import constants from manila import exception from manila.i18n import _ from manila import share from manila.share import driver from manila.share.drivers.qnap import api from manila.share import share_types from manila import utils LOG = logging.getLogger(__name__) qnap_manila_opts = [ cfg.StrOpt('qnap_management_url', required=True, help='The URL to manage QNAP Storage.'), cfg.HostAddressOpt('qnap_share_ip', required=True, help='NAS share IP for mounting shares.'), cfg.StrOpt('qnap_nas_login', required=True, help='Username for QNAP storage.'), cfg.StrOpt('qnap_nas_password', required=True, secret=True, help='Password for QNAP storage.'), cfg.StrOpt('qnap_poolname', required=True, help='Pool within which QNAP shares must be created.'), ] CONF = cfg.CONF CONF.register_opts(qnap_manila_opts) class QnapShareDriver(driver.ShareDriver): """OpenStack driver to enable QNAP Storage. Version history: 1.0.0 - Initial driver (Only NFS) 1.0.1 - Add support for QES fw 1.1.4. 1.0.2 - Fix bug #1736370, QNAP Manila driver: Access rule setting is override by the another access rule. 1.0.3 - Add supports for Thin Provisioning, SSD Cache, Deduplication and Compression. 1.0.4 - Add support for QES fw 2.0.0. 1.0.5 - Fix bug #1773761, when user tries to manage share, the size of managed share should not be changed. 1.0.6 - Add support for QES fw 2.1.0. 1.0.7 - Add support for QES fw on TDS series NAS model. 1.0.8 - Fix bug, driver should not manage snapshot which does not exist in NAS. Fix bug, driver should create share from snapshot with specified size. """ DRIVER_VERSION = '1.0.8' def __init__(self, *args, **kwargs): """Initialize QnapShareDriver.""" super(QnapShareDriver, self).__init__(False, *args, **kwargs) self.private_storage = kwargs.get('private_storage') self.api_executor = None self.group_stats = {} self.configuration.append_config_values(qnap_manila_opts) self.share_api = share.API() def do_setup(self, context): """Setup the QNAP Manila share driver.""" self.ctxt = context LOG.debug('context: %s', context) # Setup API Executor try: self.api_executor = self._create_api_executor() except Exception: LOG.exception('Failed to create HTTP client. Check IP ' 'address, port, username, password and make ' 'sure the array version is compatible.') raise def check_for_setup_error(self): """Check the status of setup.""" if self.api_executor is None: msg = _("Failed to instantiate API client to communicate with " "QNAP storage systems.") raise exception.ShareBackendException(msg=msg) def _create_api_executor(self): """Create API executor by NAS model.""" """LOG.debug('CONF.qnap_nas_login=%(conf)s', {'conf': CONF.qnap_nas_login}) LOG.debug('self.configuration.qnap_nas_login=%(conf)s', {'conf': self.configuration.qnap_nas_login})""" self.api_executor = api.QnapAPIExecutor( username=self.configuration.qnap_nas_login, password=self.configuration.qnap_nas_password, management_url=self.configuration.qnap_management_url) display_model_name, internal_model_name, fw_version = ( self.api_executor.get_basic_info( self.configuration.qnap_management_url)) pattern = re.compile(r"^([A-Z]+)-?[A-Z]{0,2}(\d+)\d{2}(U|[a-z]*)") matches = pattern.match(display_model_name) if not matches: return None model_type = matches.group(1) ts_model_types = ( "TS", "SS", "IS", "TVS", "TBS" ) tes_model_types = ( "TES", "TDS" ) es_model_types = ( "ES", ) if model_type in ts_model_types: if (fw_version.startswith("4.2") or fw_version.startswith("4.3")): LOG.debug('Create TS API Executor') # modify the pool name to pool index self.configuration.qnap_poolname = ( self._get_ts_model_pool_id( self.configuration.qnap_poolname)) return api.QnapAPIExecutorTS( username=self.configuration.qnap_nas_login, password=self.configuration.qnap_nas_password, management_url=self.configuration.qnap_management_url) elif model_type in tes_model_types: if 'TS' in internal_model_name: if (fw_version.startswith("4.2") or fw_version.startswith("4.3")): LOG.debug('Create TS API Executor') # modify the pool name to pool index self.configuration.qnap_poolname = ( self._get_ts_model_pool_id( self.configuration.qnap_poolname)) return api.QnapAPIExecutorTS( username=self.configuration.qnap_nas_login, password=self.configuration.qnap_nas_password, management_url=self.configuration.qnap_management_url) elif "1.1.2" <= fw_version <= "2.1.9999": LOG.debug('Create ES API Executor') return api.QnapAPIExecutor( username=self.configuration.qnap_nas_login, password=self.configuration.qnap_nas_password, management_url=self.configuration.qnap_management_url) elif model_type in es_model_types: if "1.1.2" <= fw_version <= "2.1.9999": LOG.debug('Create ES API Executor') return api.QnapAPIExecutor( username=self.configuration.qnap_nas_login, password=self.configuration.qnap_nas_password, management_url=self.configuration.qnap_management_url) msg = _('QNAP Storage model is not supported by this driver.') raise exception.ShareBackendException(msg=msg) def _get_ts_model_pool_id(self, pool_name): """Modify the pool name to pool index.""" pattern = re.compile(r"^(\d+)+|^Storage Pool (\d+)+") matches = pattern.match(pool_name) if matches.group(1): return matches.group(1) else: return matches.group(2) @utils.synchronized('qnap-gen_name') def _gen_random_name(self, type): if type == 'share': infix = "shr-" elif type == 'snapshot': infix = "snp-" elif type == 'host': infix = "hst-" else: infix = "" return ("manila-%(ifx)s%(time)s" % {'ifx': infix, 'time': timeutils.utcnow().strftime('%Y%m%d%H%M%S%f')}) def _gen_host_name(self, vol_name_timestamp, access_level): # host_name will be manila-{vol_name_timestamp}-ro or # manila-{vol_name_timestamp}-rw return 'manila-{}-{}'.format(vol_name_timestamp, access_level) def _get_timestamp_from_vol_name(self, vol_name): vol_name_split = vol_name.split('-') dt = datetime.datetime.strptime(vol_name_split[2], '%Y%m%d%H%M%S%f') return int(time.mktime(dt.timetuple())) def _get_location_path(self, share_name, share_proto, ip, vol_id): if share_proto == 'NFS': vol = self.api_executor.get_specific_volinfo(vol_id) vol_mount_path = vol.find('vol_mount_path').text location = '%s:%s' % (ip, vol_mount_path) else: msg = _('Invalid NAS protocol: %s') % share_proto raise exception.InvalidInput(reason=msg) export_location = { 'path': location, 'is_admin_only': False, } return export_location def _update_share_stats(self): """Get latest share stats.""" backend_name = (self.configuration.safe_get( 'share_backend_name') or self.__class__.__name__) LOG.debug('backend_name=%(backend_name)s', {'backend_name': backend_name}) selected_pool = self.api_executor.get_specific_poolinfo( self.configuration.qnap_poolname) total_capacity_gb = (int(selected_pool.find('capacity_bytes').text) / units.Gi) LOG.debug('total_capacity_gb: %s GB', total_capacity_gb) free_capacity_gb = (int(selected_pool.find('freesize_bytes').text) / units.Gi) LOG.debug('free_capacity_gb: %s GB', free_capacity_gb) alloc_capacity_gb = (int(selected_pool.find('allocated_bytes').text) / units.Gi) LOG.debug('allocated_capacity_gb: %s GB', alloc_capacity_gb) reserved_percentage = self.configuration.safe_get( 'reserved_share_percentage') reserved_snapshot_percentage = self.configuration.safe_get( 'reserved_share_from_snapshot_percentage') or reserved_percentage reserved_shr_extend_percentage = self.configuration.safe_get( 'reserved_share_extend_percentage') or reserved_percentage # single pool now, need support multiple pools in the future single_pool = { "pool_name": self.configuration.qnap_poolname, "total_capacity_gb": total_capacity_gb, "free_capacity_gb": free_capacity_gb, "allocated_capacity_gb": alloc_capacity_gb, "reserved_percentage": reserved_percentage, "reserved_snapshot_percentage": reserved_snapshot_percentage, "reserved_share_extend_percentage": reserved_shr_extend_percentage, "qos": False, "dedupe": [True, False], "compression": [True, False], "thin_provisioning": [True, False], "qnap_ssd_cache": [True, False] } data = { "share_backend_name": backend_name, "vendor_name": "QNAP", "driver_version": self.DRIVER_VERSION, "storage_protocol": "NFS", "snapshot_support": True, "create_share_from_snapshot_support": True, "driver_handles_share_servers": self.configuration.safe_get( 'driver_handles_share_servers'), 'pools': [single_pool], } super(QnapShareDriver, self)._update_share_stats(data) @utils.retry(retry_param=exception.ShareBackendException, interval=3, retries=5) @utils.synchronized('qnap-create_share') def create_share(self, context, share, share_server=None): """Create a new share.""" LOG.debug('share: %s', share.__dict__) extra_specs = share_types.get_extra_specs_from_share(share) LOG.debug('extra_specs: %s', extra_specs) qnap_thin_provision = share_types.parse_boolean_extra_spec( 'thin_provisioning', extra_specs.get("thin_provisioning") or extra_specs.get('capabilities:thin_provisioning') or 'true') qnap_compression = share_types.parse_boolean_extra_spec( 'compression', extra_specs.get("compression") or extra_specs.get('capabilities:compression') or 'true') qnap_deduplication = share_types.parse_boolean_extra_spec( 'dedupe', extra_specs.get("dedupe") or extra_specs.get('capabilities:dedupe') or 'false') qnap_ssd_cache = share_types.parse_boolean_extra_spec( 'qnap_ssd_cache', extra_specs.get("qnap_ssd_cache") or extra_specs.get("capabilities:qnap_ssd_cache") or 'false') LOG.debug('qnap_thin_provision: %(qnap_thin_provision)s ' 'qnap_compression: %(qnap_compression)s ' 'qnap_deduplication: %(qnap_deduplication)s ' 'qnap_ssd_cache: %(qnap_ssd_cache)s', {'qnap_thin_provision': qnap_thin_provision, 'qnap_compression': qnap_compression, 'qnap_deduplication': qnap_deduplication, 'qnap_ssd_cache': qnap_ssd_cache}) share_proto = share['share_proto'] # User could create two shares with the same name on horizon. # Therefore, we should not use displayname to create shares on NAS. create_share_name = self._gen_random_name("share") # If share name exists, need to change to another name. created_share = self.api_executor.get_share_info( self.configuration.qnap_poolname, vol_label=create_share_name) LOG.debug('created_share: %s', created_share) if created_share is not None: msg = (_("The share name %s is used by other share on NAS.") % create_share_name) LOG.error(msg) raise exception.ShareBackendException(msg=msg) if (qnap_deduplication and not qnap_thin_provision): msg = _("Dedupe cannot be enabled without thin_provisioning.") LOG.debug('Dedupe cannot be enabled without thin_provisioning.') raise exception.InvalidExtraSpec(reason=msg) self.api_executor.create_share( share, self.configuration.qnap_poolname, create_share_name, share_proto, qnap_thin_provision=qnap_thin_provision, qnap_compression=qnap_compression, qnap_deduplication=qnap_deduplication, qnap_ssd_cache=qnap_ssd_cache) created_share = self._get_share_info(create_share_name) volID = created_share.find('vol_no').text # Use private_storage to record volume ID and Name created in the NAS. LOG.debug('volID: %(volID)s ' 'volName: %(create_share_name)s', {'volID': volID, 'create_share_name': create_share_name}) _metadata = {'volID': volID, 'volName': create_share_name, 'thin_provision': qnap_thin_provision, 'compression': qnap_compression, 'deduplication': qnap_deduplication, 'ssd_cache': qnap_ssd_cache} self.private_storage.update(share['id'], _metadata) return self._get_location_path(create_share_name, share['share_proto'], self.configuration.qnap_share_ip, volID) @utils.retry(retry_param=exception.ShareBackendException, interval=5, retries=5, backoff_rate=1) def _get_share_info(self, share_name): share = self.api_executor.get_share_info( self.configuration.qnap_poolname, vol_label=share_name) if share is None: msg = _("Fail to get share info of %s on NAS.") % share_name LOG.error(msg) raise exception.ShareBackendException(msg=msg) else: return share @utils.synchronized('qnap-delete_share') def delete_share(self, context, share, share_server=None): """Delete the specified share.""" # Use private_storage to retrieve volume ID created in the NAS. volID = self.private_storage.get(share['id'], 'volID') if not volID: LOG.warning('volID for Share %s does not exist', share['id']) return LOG.debug('volID: %s', volID) del_share = self.api_executor.get_share_info( self.configuration.qnap_poolname, vol_no=volID) if del_share is None: LOG.warning('Share %s does not exist', share['id']) return vol_no = del_share.find('vol_no').text self.api_executor.delete_share(vol_no) self.private_storage.delete(share['id']) @utils.synchronized('qnap-extend_share') def extend_share(self, share, new_size, share_server=None): """Extend an existing share.""" LOG.debug('Entering extend_share share_name=%(share_name)s ' 'share_id=%(share_id)s ' 'new_size=%(size)s', {'share_name': share['display_name'], 'share_id': share['id'], 'size': new_size}) # Use private_storage to retrieve volume Name created in the NAS. volName = self.private_storage.get(share['id'], 'volName') if not volName: LOG.debug('Share %s does not exist', share['id']) raise exception.ShareResourceNotFound(share_id=share['id']) LOG.debug('volName: %s', volName) thin_provision = self.private_storage.get( share['id'], 'thin_provision') compression = self.private_storage.get(share['id'], 'compression') deduplication = self.private_storage.get(share['id'], 'deduplication') ssd_cache = self.private_storage.get(share['id'], 'ssd_cache') LOG.debug('thin_provision: %(thin_provision)s ' 'compression: %(compression)s ' 'deduplication: %(deduplication)s ' 'ssd_cache: %(ssd_cache)s', {'thin_provision': thin_provision, 'compression': compression, 'deduplication': deduplication, 'ssd_cache': ssd_cache}) share_dict = { 'sharename': volName, 'old_sharename': volName, 'new_size': new_size, 'thin_provision': thin_provision == 'True', 'compression': compression == 'True', 'deduplication': deduplication == 'True', 'ssd_cache': ssd_cache == 'True', 'share_proto': share['share_proto'] } self.api_executor.edit_share(share_dict) @utils.retry(retry_param=exception.ShareBackendException, interval=3, retries=5) @utils.synchronized('qnap-create_snapshot') def create_snapshot(self, context, snapshot, share_server=None): """Create a snapshot.""" LOG.debug('snapshot[share][share_id]: %s', snapshot['share']['share_id']) LOG.debug('snapshot id: %s', snapshot['id']) # Use private_storage to retrieve volume ID created in the NAS. volID = self.private_storage.get(snapshot['share']['id'], 'volID') if not volID: LOG.warning( 'volID for Share %s does not exist', snapshot['share']['id']) raise exception.ShareResourceNotFound( share_id=snapshot['share']['id']) LOG.debug('volID: %s', volID) # User could create two snapshot with the same name on horizon. # Therefore, we should not use displayname to create snapshot on NAS. # if snapshot exist, need to change another create_snapshot_name = self._gen_random_name("snapshot") LOG.debug('create_snapshot_name: %s', create_snapshot_name) check_snapshot = self.api_executor.get_snapshot_info( volID=volID, snapshot_name=create_snapshot_name) if check_snapshot is not None: msg = _("Failed to create an unused snapshot name.") raise exception.ShareBackendException(msg=msg) LOG.debug('create_snapshot_name: %s', create_snapshot_name) self.api_executor.create_snapshot_api(volID, create_snapshot_name) snapshot_id = "" created_snapshot = self.api_executor.get_snapshot_info( volID=volID, snapshot_name=create_snapshot_name) if created_snapshot is not None: snapshot_id = created_snapshot.find('snapshot_id').text else: msg = _("Failed to get snapshot information.") raise exception.ShareBackendException(msg=msg) LOG.debug('created_snapshot: %s', created_snapshot) LOG.debug('snapshot_id: %s', snapshot_id) # Use private_storage to record data instead of metadata. _metadata = {'snapshot_id': snapshot_id} self.private_storage.update(snapshot['id'], _metadata) # Test to get value from private_storage. snapshot_id = self.private_storage.get(snapshot['id'], 'snapshot_id') LOG.debug('snapshot_id: %s', snapshot_id) return {'provider_location': snapshot_id} @utils.synchronized('qnap-delete_snapshot') def delete_snapshot(self, context, snapshot, share_server=None): """Delete a snapshot.""" LOG.debug('Entering delete_snapshot. The deleted snapshot=%(snap)s', {'snap': snapshot['id']}) snapshot_id = (snapshot.get('provider_location') or self.private_storage.get(snapshot['id'], 'snapshot_id')) if not snapshot_id: LOG.warning('Snapshot %s does not exist', snapshot['id']) return LOG.debug('snapshot_id: %s', snapshot_id) self.api_executor.delete_snapshot_api(snapshot_id) self.private_storage.delete(snapshot['id']) @utils.retry(retry_param=exception.ShareBackendException, interval=3, retries=5) @utils.synchronized('qnap-create_share_from_snapshot') def create_share_from_snapshot(self, context, share, snapshot, share_server=None, parent_share=None): """Create a share from a snapshot.""" LOG.debug('Entering create_share_from_snapshot. The source ' 'snapshot=%(snap)s. The created share=%(share)s', {'snap': snapshot['id'], 'share': share['id']}) snapshot_id = (snapshot.get('provider_location') or self.private_storage.get(snapshot['id'], 'snapshot_id')) if not snapshot_id: LOG.warning('Snapshot %s does not exist', snapshot['id']) raise exception.SnapshotResourceNotFound(name=snapshot['id']) LOG.debug('snapshot_id: %s', snapshot_id) create_share_name = self._gen_random_name("share") # if sharename exist, need to change another created_share = self.api_executor.get_share_info( self.configuration.qnap_poolname, vol_label=create_share_name) if created_share is not None: msg = _("Failed to create an unused share name.") raise exception.ShareBackendException(msg=msg) self.api_executor.clone_snapshot(snapshot_id, create_share_name, share['size']) create_volID = "" created_share = self.api_executor.get_share_info( self.configuration.qnap_poolname, vol_label=create_share_name) if created_share is not None: create_volID = created_share.find('vol_no').text LOG.debug('create_volID: %s', create_volID) else: msg = _("Failed to clone a snapshot in time.") raise exception.ShareBackendException(msg=msg) thin_provision = self.private_storage.get( snapshot['share_instance_id'], 'thin_provision') compression = self.private_storage.get( snapshot['share_instance_id'], 'compression') deduplication = self.private_storage.get( snapshot['share_instance_id'], 'deduplication') ssd_cache = self.private_storage.get( snapshot['share_instance_id'], 'ssd_cache') LOG.debug('thin_provision: %(thin_provision)s ' 'compression: %(compression)s ' 'deduplication: %(deduplication)s ' 'ssd_cache: %(ssd_cache)s', {'thin_provision': thin_provision, 'compression': compression, 'deduplication': deduplication, 'ssd_cache': ssd_cache}) # Use private_storage to record volume ID and Name created in the NAS. _metadata = { 'volID': create_volID, 'volName': create_share_name, 'thin_provision': thin_provision, 'compression': compression, 'deduplication': deduplication, 'ssd_cache': ssd_cache } self.private_storage.update(share['id'], _metadata) # Test to get value from private_storage. volName = self.private_storage.get(share['id'], 'volName') LOG.debug('volName: %s', volName) return self._get_location_path(create_share_name, share['share_proto'], self.configuration.qnap_share_ip, create_volID) def _get_vol_host(self, host_list, vol_name_timestamp): vol_host_list = [] if host_list is None: return vol_host_list for host in host_list: # Check host alias name with prefix "manila-{vol_name_timestamp}" # to find the host of this manila share. LOG.debug('_get_vol_host name:%s', host.find('name').text) # Because driver supports only IPv4 now, check "netaddrs" # have "ipv4" tag to get address. if re.match("^manila-{}".format(vol_name_timestamp), host.find('name').text): host_dict = { 'index': host.find('index').text, 'hostid': host.find('hostid').text, 'name': host.find('name').text, 'ipv4': [], } for ipv4 in host.findall('netaddrs/ipv4'): host_dict['ipv4'].append(ipv4.text) vol_host_list.append(host_dict) LOG.debug('_get_vol_host vol_host_list:%s', vol_host_list) return vol_host_list @utils.synchronized('qnap-update_access') def update_access(self, context, share, access_rules, add_rules, delete_rules, update_rules, share_server=None): if not (add_rules or delete_rules): volName = self.private_storage.get(share['id'], 'volName') LOG.debug('volName: %s', volName) if volName is None: LOG.debug('Share %s does not exist', share['id']) raise exception.ShareResourceNotFound(share_id=share['id']) # Clear all current ACLs self.api_executor.set_nfs_access(volName, 2, "all") vol_name_timestamp = self._get_timestamp_from_vol_name(volName) host_list = self.api_executor.get_host_list() LOG.debug('host_list:%s', host_list) vol_host_list = self._get_vol_host(host_list, vol_name_timestamp) # If host already exist, delete the host if len(vol_host_list) > 0: for vol_host in vol_host_list: self.api_executor.delete_host(vol_host['name']) # Add each one through all rules. for access in access_rules: self._allow_access(context, share, access, share_server) else: # Adding/Deleting specific rules for access in delete_rules: self._deny_access(context, share, access, share_server) for access in add_rules: self._allow_access(context, share, access, share_server) def _allow_access(self, context, share, access, share_server=None): """Allow access to the share.""" share_proto = share['share_proto'] access_type = access['access_type'] access_level = access['access_level'] access_to = access['access_to'] LOG.debug('share_proto: %(share_proto)s ' 'access_type: %(access_type)s ' 'access_level: %(access_level)s ' 'access_to: %(access_to)s', {'share_proto': share_proto, 'access_type': access_type, 'access_level': access_level, 'access_to': access_to}) self._check_share_access(share_proto, access_type) vol_name = self.private_storage.get(share['id'], 'volName') vol_name_timestamp = self._get_timestamp_from_vol_name(vol_name) host_name = self._gen_host_name(vol_name_timestamp, access_level) host_list = self.api_executor.get_host_list() LOG.debug('vol_name: %(vol_name)s ' 'access_level: %(access_level)s ' 'host_name: %(host_name)s ' 'host_list: %(host_list)s ', {'vol_name': vol_name, 'access_level': access_level, 'host_name': host_name, 'host_list': host_list}) filter_host_list = self._get_vol_host(host_list, vol_name_timestamp) if len(filter_host_list) == 0: # if host does not exist, create a host for the share self.api_executor.add_host(host_name, access_to) elif (len(filter_host_list) == 1 and filter_host_list[0]['name'] == host_name): # if the host exist, and this host is for the same access right, # add ip to the host. ipv4_list = filter_host_list[0]['ipv4'] if access_to not in ipv4_list: ipv4_list.append(access_to) LOG.debug('vol_host["ipv4"]: %s', filter_host_list[0]['ipv4']) LOG.debug('ipv4_list: %s', ipv4_list) self.api_executor.edit_host(host_name, ipv4_list) else: # Until now, share of QNAP NAS can only apply one access level for # all ips. "rw" for some ips and "ro" for else is not allowed. support_level = (constants.ACCESS_LEVEL_RW if access_level == constants.ACCESS_LEVEL_RO else constants.ACCESS_LEVEL_RO) reason = _('Share only supports one access ' 'level: %s') % support_level LOG.error(reason) raise exception.InvalidShareAccess(reason=reason) access = 1 if access_level == constants.ACCESS_LEVEL_RO else 0 self.api_executor.set_nfs_access(vol_name, access, host_name) def _deny_access(self, context, share, access, share_server=None): """Deny access to the share.""" share_proto = share['share_proto'] access_type = access['access_type'] access_level = access['access_level'] access_to = access['access_to'] LOG.debug('share_proto: %(share_proto)s ' 'access_type: %(access_type)s ' 'access_level: %(access_level)s ' 'access_to: %(access_to)s', {'share_proto': share_proto, 'access_type': access_type, 'access_level': access_level, 'access_to': access_to}) try: self._check_share_access(share_proto, access_type) except exception.InvalidShareAccess: LOG.warning('The denied rule is invalid and does not exist.') return vol_name = self.private_storage.get(share['id'], 'volName') vol_name_timestamp = self._get_timestamp_from_vol_name(vol_name) host_name = self._gen_host_name(vol_name_timestamp, access_level) host_list = self.api_executor.get_host_list() LOG.debug('vol_name: %(vol_name)s ' 'access_level: %(access_level)s ' 'host_name: %(host_name)s ' 'host_list: %(host_list)s ', {'vol_name': vol_name, 'access_level': access_level, 'host_name': host_name, 'host_list': host_list}) filter_host_list = self._get_vol_host(host_list, vol_name_timestamp) # if share already have host, remove ip from host for vol_host in filter_host_list: if vol_host['name'] == host_name: ipv4_list = vol_host['ipv4'] if access_to in ipv4_list: ipv4_list.remove(access_to) LOG.debug('vol_host["ipv4"]: %s', vol_host['ipv4']) LOG.debug('ipv4_list: %s', ipv4_list) if len(ipv4_list) == 0: # if list empty, remove the host self.api_executor.set_nfs_access( vol_name, 2, host_name) self.api_executor.delete_host(host_name) else: self.api_executor.edit_host(host_name, ipv4_list) break def _check_share_access(self, share_proto, access_type): if share_proto == 'NFS' and access_type != 'ip': reason = _('Only "ip" access type is allowed for ' 'NFS shares.') LOG.warning(reason) raise exception.InvalidShareAccess(reason=reason) elif share_proto != 'NFS': reason = _('Invalid NAS protocol: %s') % share_proto raise exception.InvalidShareAccess(reason=reason) def manage_existing(self, share, driver_options): """Manages a share that exists on backend.""" if share['share_proto'].lower() == 'nfs': # 10.0.0.1:/share/example LOG.info("Share %(shr_path)s will be managed with ID " "%(shr_id)s.", {'shr_path': share['export_locations'][0]['path'], 'shr_id': share['id']}) old_path_info = share['export_locations'][0]['path'].split( ':/share/') if len(old_path_info) == 2: ip = old_path_info[0] share_name = old_path_info[1] else: msg = _("Incorrect path. It should have the following format: " "IP:/share/share_name.") raise exception.ShareBackendException(msg=msg) else: msg = _('Invalid NAS protocol: %s') % share['share_proto'] raise exception.InvalidInput(reason=msg) if ip != self.configuration.qnap_share_ip: msg = _("The NAS IP %(ip)s is not configured.") % {'ip': ip} raise exception.ShareBackendException(msg=msg) existing_share = self.api_executor.get_share_info( self.configuration.qnap_poolname, vol_label=share_name) if existing_share is None: msg = _("The share %s trying to be managed was not found on " "backend.") % share['id'] raise exception.ManageInvalidShare(reason=msg) extra_specs = share_types.get_extra_specs_from_share(share) qnap_thin_provision = share_types.parse_boolean_extra_spec( 'thin_provisioning', extra_specs.get("thin_provisioning") or extra_specs.get('capabilities:thin_provisioning') or 'true') qnap_compression = share_types.parse_boolean_extra_spec( 'compression', extra_specs.get("compression") or extra_specs.get('capabilities:compression') or 'true') qnap_deduplication = share_types.parse_boolean_extra_spec( 'dedupe', extra_specs.get("dedupe") or extra_specs.get('capabilities:dedupe') or 'false') qnap_ssd_cache = share_types.parse_boolean_extra_spec( 'qnap_ssd_cache', extra_specs.get("qnap_ssd_cache") or extra_specs.get("capabilities:qnap_ssd_cache") or 'false') LOG.debug('qnap_thin_provision: %(qnap_thin_provision)s ' 'qnap_compression: %(qnap_compression)s ' 'qnap_deduplication: %(qnap_deduplication)s ' 'qnap_ssd_cache: %(qnap_ssd_cache)s', {'qnap_thin_provision': qnap_thin_provision, 'qnap_compression': qnap_compression, 'qnap_deduplication': qnap_deduplication, 'qnap_ssd_cache': qnap_ssd_cache}) if (qnap_deduplication and not qnap_thin_provision): msg = _("Dedupe cannot be enabled without thin_provisioning.") LOG.debug('Dedupe cannot be enabled without thin_provisioning.') raise exception.InvalidExtraSpec(reason=msg) vol_no = existing_share.find('vol_no').text vol = self.api_executor.get_specific_volinfo(vol_no) vol_size_gb = math.ceil(float(vol.find('size').text) / units.Gi) share_dict = { 'sharename': share_name, 'old_sharename': share_name, 'thin_provision': qnap_thin_provision, 'compression': qnap_compression, 'deduplication': qnap_deduplication, 'ssd_cache': qnap_ssd_cache, 'share_proto': share['share_proto'] } self.api_executor.edit_share(share_dict) _metadata = {} _metadata['volID'] = vol_no _metadata['volName'] = share_name _metadata['thin_provision'] = qnap_thin_provision _metadata['compression'] = qnap_compression _metadata['deduplication'] = qnap_deduplication _metadata['ssd_cache'] = qnap_ssd_cache self.private_storage.update(share['id'], _metadata) LOG.info("Share %(shr_path)s was successfully managed with ID " "%(shr_id)s.", {'shr_path': share['export_locations'][0]['path'], 'shr_id': share['id']}) export_locations = self._get_location_path( share_name, share['share_proto'], self.configuration.qnap_share_ip, vol_no) return {'size': vol_size_gb, 'export_locations': export_locations} def unmanage(self, share): """Remove the specified share from Manila management.""" self.private_storage.delete(share['id']) def manage_existing_snapshot(self, snapshot, driver_options): """Manage existing share snapshot with manila.""" volID = self.private_storage.get(snapshot['share']['id'], 'volID') LOG.debug('volID: %s', volID) existing_share = self.api_executor.get_share_info( self.configuration.qnap_poolname, vol_no=volID) if existing_share is None: msg = _("The share id %s was not found on backend.") % volID LOG.error(msg) raise exception.ShareNotFound(msg) snapshot_id = snapshot.get('provider_location') snapshot_id_info = snapshot_id.split('@') if len(snapshot_id_info) == 2: share_name = snapshot_id_info[0] snapshot_name = snapshot_id_info[1] else: msg = _("Incorrect provider_location format. It should have the " "following format: share_name@snapshot_name.") LOG.error(msg) raise exception.InvalidParameterValue(msg) if share_name != existing_share.find('vol_label').text: msg = (_("The assigned share %(share_name)s was not matched " "%(vol_label)s on backend.") % {'share_name': share_name, 'vol_label': existing_share.find('vol_label').text}) LOG.error(msg) raise exception.ShareNotFound(msg) check_snapshot = self.api_executor.get_snapshot_info( volID=volID, snapshot_name=snapshot_name) if check_snapshot is None: msg = (_("The snapshot %(snapshot_name)s was not " "found on backend.") % {'snapshot_name': snapshot_name}) LOG.error(msg) raise exception.InvalidParameterValue(err=msg) _metadata = { 'snapshot_id': snapshot_id, } self.private_storage.update(snapshot['id'], _metadata) parent_size = check_snapshot.find('parent_size') snap_size_gb = None if parent_size is not None: snap_size_gb = math.ceil(float(parent_size.text) / units.Gi) return {'size': snap_size_gb} def unmanage_snapshot(self, snapshot): """Remove the specified snapshot from Manila management.""" self.private_storage.delete(snapshot['id']) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315601.9336715 manila-21.0.0/manila/share/drivers/quobyte/0000775000175000017500000000000000000000000020567 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/quobyte/__init__.py0000664000175000017500000000000000000000000022666 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/quobyte/jsonrpc.py0000664000175000017500000001114300000000000022617 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Quobyte Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Quobyte driver helper. Control Quobyte over its JSON RPC API. """ import requests from requests import auth from requests import codes from urllib import parse as urlparse from oslo_log import log from oslo_serialization import jsonutils from manila import exception from manila import utils LOG = log.getLogger(__name__) ERROR_ENOENT = 2 ERROR_ENTITY_NOT_FOUND = -24 ERROR_GARBAGE_ARGS = -3 class JsonRpc(object): def __init__(self, url, user_credentials, ca_file=None, key_file=None, cert_file=None): parsedurl = urlparse.urlparse(url) self._url = parsedurl.geturl() self._netloc = parsedurl.netloc self._ca_file = ca_file self._url_scheme = parsedurl.scheme if self._url_scheme == 'https': if not self._ca_file: self._ca_file = False LOG.warning( "Will not verify the server certificate of the API service" " because the CA certificate is not available.") self._id = 0 self._credentials = auth.HTTPBasicAuth( user_credentials[0], user_credentials[1]) self._key_file = key_file self._cert_file = cert_file @utils.synchronized('quobyte-request') def call(self, method_name, user_parameters, expected_errors=None): if expected_errors is None: expected_errors = [] # prepare request self._id += 1 parameters = {'retry': 'INFINITELY'} # Backend specific setting if user_parameters: parameters.update(user_parameters) post_data = { 'jsonrpc': '2.0', 'method': method_name, 'params': parameters, 'id': str(self._id), } LOG.debug("Request payload to be send is: %s", jsonutils.dumps(post_data)) # send request if self._url_scheme == 'https': if self._cert_file: result = requests.post(url=self._url, json=post_data, auth=self._credentials, verify=self._ca_file, cert=(self._cert_file, self._key_file), timeout=60) else: result = requests.post(url=self._url, json=post_data, auth=self._credentials, verify=self._ca_file, timeout=60) else: result = requests.post(url=self._url, json=post_data, auth=self._credentials, timeout=60) # eval request response if result.status_code == codes['OK']: LOG.debug("Retrieved data from Quobyte backend: %s", result.text) response = result.json() return self._checked_for_application_error(response, expected_errors) # If things did not work out provide error info LOG.debug("Backend request resulted in error: %s", result.text) result.raise_for_status() def _checked_for_application_error(self, result, expected_errors=None): if expected_errors is None: expected_errors = [] if 'error' in result and result['error']: if 'message' in result['error'] and 'code' in result['error']: if result["error"]["code"] in expected_errors: # hit an expected error, return empty result return None else: raise exception.QBRpcException( result=result["error"]["message"], qbcode=result["error"]["code"]) else: raise exception.QBException(str(result["error"])) return result["result"] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/quobyte/quobyte.py0000664000175000017500000004304100000000000022633 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Quobyte Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Quobyte driver. Manila shares are directly mapped to Quobyte volumes. The access to the shares is provided by the Quobyte NFS proxy (a Ganesha NFS server). """ import math from oslo_config import cfg from oslo_log import log from oslo_utils import units from manila.common import constants from manila import exception from manila.i18n import _ from manila.share import driver from manila.share.drivers.quobyte import jsonrpc LOG = log.getLogger(__name__) quobyte_manila_share_opts = [ cfg.StrOpt('quobyte_api_url', help='URL of the Quobyte API server (http or https)'), cfg.StrOpt('quobyte_api_ca', help='The X.509 CA file to verify the server cert.'), cfg.BoolOpt('quobyte_delete_shares', default=False, help='Actually deletes shares (vs. unexport)'), cfg.StrOpt('quobyte_api_username', default='admin', help='Username for Quobyte API server.'), cfg.StrOpt('quobyte_api_password', default='quobyte', secret=True, help='Password for Quobyte API server'), cfg.StrOpt('quobyte_volume_configuration', default='BASE', help='Name of volume configuration used for new shares.'), cfg.StrOpt('quobyte_default_volume_user', default='root', help='Default owning user for new volumes.'), cfg.StrOpt('quobyte_default_volume_group', default='root', help='Default owning group for new volumes.'), cfg.StrOpt('quobyte_export_path', default='/quobyte', help='Export path for shares of this bacckend. This needs ' 'to match the quobyte-nfs services "Pseudo" option.'), ] CONF = cfg.CONF CONF.register_opts(quobyte_manila_share_opts) class QuobyteShareDriver(driver.ExecuteMixin, driver.ShareDriver,): """Map share commands to Quobyte volumes. Version history: 1.0 - Initial driver. 1.0.1 - Adds ensure_share() implementation. 1.1 - Adds extend_share() and shrink_share() implementation. 1.2 - Adds update_access() implementation and related methods 1.2.1 - Improved capacity calculation 1.2.2 - Minor optimizations 1.2.3 - Updated RPC layer for improved stability 1.2.4 - Fixed handling updated QB API error codes 1.2.5 - Fixed two quota handling bugs 1.2.6 - Fixed volume resize and jsonrpc code style bugs 1.2.7 - Add quobyte_export_path option """ DRIVER_VERSION = '1.2.7' def __init__(self, *args, **kwargs): super(QuobyteShareDriver, self).__init__(False, *args, **kwargs) self.configuration.append_config_values(quobyte_manila_share_opts) self.backend_name = (self.configuration.safe_get('share_backend_name') or CONF.share_backend_name or 'Quobyte') def _fetch_existing_access(self, context, share): volume_uuid = self._resolve_volume_name(share['name'], share['project_id']) result = self.rpc.call('getConfiguration', {}) if result is None: raise exception.QBException( "Could not retrieve Quobyte configuration data!") tenant_configs = result['tenant_configuration'] qb_access_list = [] for tc in tenant_configs: for va in tc['volume_access']: if va['volume_uuid'] == volume_uuid: a_level = constants.ACCESS_LEVEL_RW if va['read_only']: a_level = constants.ACCESS_LEVEL_RO qb_access_list.append({ 'access_to': va['restrict_to_network'], 'access_level': a_level, 'access_type': 'ip' }) return qb_access_list def do_setup(self, context): """Prepares the backend.""" self.rpc = jsonrpc.JsonRpc( url=self.configuration.quobyte_api_url, ca_file=self.configuration.quobyte_api_ca, user_credentials=( self.configuration.quobyte_api_username, self.configuration.quobyte_api_password)) try: self.rpc.call('getInformation', {}) except Exception as exc: LOG.error("Could not connect to API: %s", exc) raise exception.QBException( _('Could not connect to API: %s') % exc) def _update_share_stats(self): total_gb, free_gb = self._get_capacities() data = dict( storage_protocol='NFS', vendor_name='Quobyte', share_backend_name=self.backend_name, driver_version=self.DRIVER_VERSION, total_capacity_gb=total_gb, free_capacity_gb=free_gb, reserved_percentage=self.configuration.reserved_share_percentage, reserved_snapshot_percentage=( self.configuration.reserved_share_from_snapshot_percentage or self.configuration.reserved_share_percentage), reserved_share_extend_percentage=( self.configuration.reserved_share_extend_percentage or self.configuration.reserved_share_percentage)) super(QuobyteShareDriver, self)._update_share_stats(data) def _get_capacities(self): result = self.rpc.call('getSystemStatistics', {}) total = float(result['total_physical_capacity']) used = float(result['total_physical_usage']) LOG.info('Read capacity of %(cap)s bytes and ' 'usage of %(use)s bytes from backend. ', {'cap': total, 'use': used}) free = total - used if free < 0: free = 0 # no space available free_replicated = free / self._get_qb_replication_factor() # floor numbers to nine digits (bytes) total = math.floor((total / units.Gi) * units.G) / units.G free = math.floor((free_replicated / units.Gi) * units.G) / units.G return total, free def _get_qb_replication_factor(self): result = self.rpc.call('getEffectiveVolumeConfiguration', {'configuration_name': self. configuration.quobyte_volume_configuration}) return int(result['configuration']['volume_metadata_configuration'] ['replication_factor']) def check_for_setup_error(self): pass def get_network_allocations_number(self): return 0 def _get_project_name(self, context, project_id): """Retrieve the project name. TODO (kaisers): retrieve the project name in order to store and use in the backend for better usability. """ return project_id def _resize_share(self, share, new_size): newsize_bytes = new_size * units.Gi self.rpc.call('setQuota', {"quotas": [ {"consumer": [{"type": "VOLUME", "identifier": self._resolve_volume_name(share["name"], share['project_id']), "tenant_id": share["project_id"]}], "limits": [{"type": "LOGICAL_DISK_SPACE", "value": newsize_bytes}]} ]}) def _resolve_volume_name(self, volume_name, tenant_domain): """Resolve a volume name to the global volume uuid.""" result = self.rpc.call('resolveVolumeName', dict( volume_name=volume_name, tenant_domain=tenant_domain), [jsonrpc.ERROR_ENOENT, jsonrpc.ERROR_ENTITY_NOT_FOUND]) if result: return result['volume_uuid'] return None # not found def _subtract_access_lists(self, list_a, list_b): """Returns a list of elements in list_a that are not in list_b :param list_a: Base list of access rules :param list_b: List of access rules not to be returned :return: List of elements of list_a not present in list_b """ sub_tuples_list = [{"to": s.get('access_to'), "type": s.get('access_type'), "level": s.get('access_level')} for s in list_b] return [r for r in list_a if ( {"to": r.get("access_to"), "type": r.get("access_type"), "level": r.get("access_level")} not in sub_tuples_list)] def create_share(self, context, share, share_server=None): """Create or export a volume that is usable as a Manila share.""" if share['share_proto'] != 'NFS': raise exception.QBException( _('Quobyte driver only supports NFS shares')) volume_uuid = self._resolve_volume_name(share['name'], share['project_id']) if not volume_uuid: # create tenant, expect ERROR_GARBAGE_ARGS if it already exists self.rpc.call('setTenant', dict(tenant=dict(tenant_id=share['project_id'])), expected_errors=[jsonrpc.ERROR_GARBAGE_ARGS]) result = self.rpc.call('createVolume', dict( name=share['name'], tenant_domain=share['project_id'], root_user_id=self.configuration.quobyte_default_volume_user, root_group_id=self.configuration.quobyte_default_volume_group, configuration_name=(self.configuration. quobyte_volume_configuration))) volume_uuid = result['volume_uuid'] result = self.rpc.call('exportVolume', dict( volume_uuid=volume_uuid, protocol='NFS')) self._resize_share(share, share['size']) return self._build_share_export_string(result) def delete_share(self, context, share, share_server=None): """Delete the corresponding Quobyte volume.""" volume_uuid = self._resolve_volume_name(share['name'], share['project_id']) if not volume_uuid: LOG.warning("No volume found for " "share %(project_id)s/%(name)s", {"project_id": share['project_id'], "name": share['name']}) return if self.configuration.quobyte_delete_shares: self.rpc.call('deleteVolume', {'volume_uuid': volume_uuid}) else: self.rpc.call('exportVolume', {"volume_uuid": volume_uuid, "remove_export": True, }) def ensure_share(self, context, share, share_server=None): """Invoked to ensure that share is exported. :param context: The `context.RequestContext` object for the request :param share: Share instance that will be checked. :param share_server: Data structure with share server information. Not used by this driver. :returns: IP: of share :raises: :ShareResourceNotFound: If the share instance cannot be found in the backend """ volume_uuid = self._resolve_volume_name(share['name'], share['project_id']) LOG.debug("Ensuring Quobyte share %s", share['name']) if not volume_uuid: raise (exception.ShareResourceNotFound( share_id=share['id'])) result = self.rpc.call('exportVolume', dict( volume_uuid=volume_uuid, protocol='NFS')) return self._build_share_export_string(result) def _allow_access(self, context, share, access, share_server=None): """Allow access to a share.""" if access['access_type'] != 'ip': raise exception.InvalidShareAccess( _('Quobyte driver only supports ip access control')) volume_uuid = self._resolve_volume_name(share['name'], share['project_id']) ro = access['access_level'] == (constants.ACCESS_LEVEL_RO) call_params = { "volume_uuid": volume_uuid, "read_only": ro, "add_allow_ip": access['access_to']} self.rpc.call('exportVolume', call_params) def _build_share_export_string(self, rpc_result): return '%(nfs_server_ip)s:%(qb_exp_path)s%(nfs_export_path)s' % { "nfs_server_ip": rpc_result["nfs_server_ip"], "qb_exp_path": self.configuration.quobyte_export_path, "nfs_export_path": rpc_result["nfs_export_path"]} def _deny_access(self, context, share, access, share_server=None): """Remove white-list ip from a share.""" if access['access_type'] != 'ip': LOG.debug('Quobyte driver only supports ip access control. ' 'Ignoring deny access call for %s , %s', share['name'], self._get_project_name(context, share['project_id'])) return volume_uuid = self._resolve_volume_name(share['name'], share['project_id']) call_params = { "volume_uuid": volume_uuid, "remove_allow_ip": access['access_to']} self.rpc.call('exportVolume', call_params) def extend_share(self, ext_share, ext_size, share_server=None): """Uses _resize_share to extend a share. :param ext_share: Share model. :param ext_size: New size of share (new_size > share['size']). :param share_server: Currently not used. """ self._resize_share(share=ext_share, new_size=ext_size) def shrink_share(self, shrink_share, shrink_size, share_server=None): """Uses _resize_share to shrink a share. Quobyte uses soft quotas. If a shares current size is bigger than the new shrunken size no data is lost. Data can be continuously read from the share but new writes receive out of disk space replies. :param shrink_share: Share model. :param shrink_size: New size of share (new_size < share['size']). :param share_server: Currently not used. """ self._resize_share(share=shrink_share, new_size=shrink_size) def update_access(self, context, share, access_rules, add_rules, delete_rules, update_rules, share_server=None): """Update access rules for given share. Two different cases are supported in here: 1. Recovery after error - 'access_rules' contains all access_rules, 'add_rules' and 'delete_rules' are empty. Driver should apply all access rules for given share. 2. Adding/Deleting of several access rules - 'access_rules' contains all access_rules, 'add_rules' and 'delete_rules' contain rules which should be added/deleted. Driver can ignore rules in 'access_rules' and apply only rules from 'add_rules' and 'delete_rules'. :param context: Current context :param share: Share model with share data. :param access_rules: All access rules for given share :param add_rules: Empty List or List of access rules which should be added. access_rules already contains these rules. :param delete_rules: Empty List or List of access rules which should be removed. access_rules doesn't contain these rules. :param update_rules: Empty List or List of access rules which should be updated. access_rules already contains these rules. :param share_server: None or Share server model :raises If all of the *_rules params are None the method raises an InvalidShareAccess exception """ if (add_rules or delete_rules): # Handling access rule update for d_rule in delete_rules: self._deny_access(context, share, d_rule) for a_rule in add_rules: self._allow_access(context, share, a_rule) else: if not access_rules: LOG.warning("No access rules provided in update_access.") else: # Handling access rule recovery existing_rules = self._fetch_existing_access(context, share) missing_rules = self._subtract_access_lists(access_rules, existing_rules) for a_rule in missing_rules: LOG.debug("Adding rule %s in recovery.", str(a_rule)) self._allow_access(context, share, a_rule) superfluous_rules = self._subtract_access_lists(existing_rules, access_rules) for d_rule in superfluous_rules: LOG.debug("Removing rule %s in recovery.", str(d_rule)) self._deny_access(context, share, d_rule) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/service_instance.py0000664000175000017500000013752300000000000023010 0ustar00zuulzuul00000000000000# Copyright (c) 2014 NetApp, Inc. # Copyright (c) 2015 Mirantis, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Module for managing nova instances for share drivers.""" import abc import os import time import netaddr from oslo_config import cfg from oslo_log import log from oslo_utils import importutils from oslo_utils import netutils from manila.common import constants as const from manila import compute from manila import context from manila import coordination from manila import exception from manila.i18n import _ from manila import image from manila.network.linux import ip_lib from manila.network.neutron import api as neutron from manila import ssh_utils from manila import utils LOG = log.getLogger(__name__) NEUTRON_NAME = "neutron" share_servers_handling_mode_opts = [ cfg.StrOpt( "service_image_name", default="manila-service-image", help="Name of image in Glance, that will be used for service instance " "creation. Only used if driver_handles_share_servers=True."), cfg.StrOpt( "service_instance_name_template", default="%s", help="Name of service instance. " "Only used if driver_handles_share_servers=True."), cfg.StrOpt( "manila_service_keypair_name", default="manila-service", help="Keypair name that will be created and used for service " "instances. Only used if driver_handles_share_servers=True."), cfg.StrOpt( "path_to_public_key", default="~/.ssh/id_rsa.pub", help="Path to hosts public key. " "Only used if driver_handles_share_servers=True."), cfg.StrOpt( "service_instance_security_group", default="manila-service", help="Security group name, that will be used for " "service instance creation. " "Only used if driver_handles_share_servers=True."), cfg.StrOpt( "service_instance_flavor_id", default="100", help="ID of flavor, that will be used for service instance " "creation. Only used if driver_handles_share_servers=True."), cfg.StrOpt( "service_network_name", default="manila_service_network", help="Name of manila service network. Used only with Neutron. " "Only used if driver_handles_share_servers=True."), cfg.HostAddressOpt( "service_network_host", sample_default="", help="Hostname to be used for service network binding. Used only with " "Neutron and if driver_handles_share_servers=True."), cfg.StrOpt( "service_network_cidr", default="10.254.0.0/16", help="CIDR of manila service network. Used only with Neutron and " "if driver_handles_share_servers=True."), cfg.IntOpt( "service_network_division_mask", default=28, help="This mask is used for dividing service network into " "subnets, IP capacity of subnet with this mask directly " "defines possible amount of created service VMs " "per tenant's subnet. Used only with Neutron " "and if driver_handles_share_servers=True."), cfg.StrOpt( "interface_driver", default="manila.network.linux.interface.OVSInterfaceDriver", help="Module path to the Virtual Interface (VIF) driver class. This " "option is used only by drivers operating in " "`driver_handles_share_servers=True` mode that provision " "OpenStack compute instances as share servers. This option is " "only supported with Neutron networking. " "Drivers provided in tree work with Linux Bridge " "(manila.network.linux.interface.BridgeInterfaceDriver) and OVS " "(manila.network.linux.interface.OVSInterfaceDriver). If the " "manila-share service is running on a host that is connected to " "the administrator network, a no-op driver " "(manila.network.linux.interface.NoopInterfaceDriver) may " "be used."), cfg.BoolOpt( "connect_share_server_to_tenant_network", default=False, help="Attach share server directly to share network. " "Used only with Neutron and " "if driver_handles_share_servers=True."), cfg.StrOpt( "admin_network_id", help="ID of neutron network used to communicate with admin network," " to create additional admin export locations on."), cfg.StrOpt( "admin_subnet_id", help="ID of neutron subnet used to communicate with admin network," " to create additional admin export locations on. " "Related to 'admin_network_id'."), ] no_share_servers_handling_mode_opts = [ cfg.StrOpt( "service_instance_name_or_id", help="Name or ID of service instance in Nova to use for share " "exports. Used only when share servers handling is disabled."), cfg.HostAddressOpt( "service_net_name_or_ip", help="Can be either name of network that is used by service " "instance within Nova to get IP address or IP address itself " "(either IPv4 or IPv6) for managing shares there. " "Used only when share servers handling is disabled."), cfg.HostAddressOpt( "tenant_net_name_or_ip", help="Can be either name of network that is used by service " "instance within Nova to get IP address or IP address itself " "(either IPv4 or IPv6) for exporting shares. " "Used only when share servers handling is disabled."), ] common_opts = [ cfg.StrOpt( "service_instance_user", help="User in service instance that will be used for authentication."), cfg.StrOpt( "service_instance_password", secret=True, help="Password for service instance user."), cfg.StrOpt( "path_to_private_key", help="Path to host's private key."), cfg.IntOpt( "max_time_to_build_instance", default=300, help="Maximum time in seconds to wait for creating service instance."), cfg.BoolOpt( "limit_ssh_access", default=False, help="Block SSH connection to the service instance from other " "networks than service network."), ] CONF = cfg.CONF class ServiceInstanceManager(object): """Manages nova instances for various share drivers. This class provides following external methods: 1. set_up_service_instance: creates instance and sets up share infrastructure. 2. ensure_service_instance: ensure service instance is available. 3. delete_service_instance: removes service instance and network infrastructure. """ _INSTANCE_CONNECTION_PROTO = "SSH" def get_config_option(self, key): """Returns value of config option. :param key: key of config' option. :returns: str -- value of config's option. first priority is driver's config, second priority is global config. """ if self.driver_config: return self.driver_config.safe_get(key) return CONF.get(key) def _get_network_helper(self): # Historically, there were multiple types of network helper, # but currently the only network helper type is Neutron. return NeutronNetworkHelper(self) def __init__(self, driver_config=None): super(ServiceInstanceManager, self).__init__() self.driver_config = driver_config if self.driver_config: self.driver_config.append_config_values(common_opts) if self.get_config_option("driver_handles_share_servers"): self.driver_config.append_config_values( share_servers_handling_mode_opts) else: self.driver_config.append_config_values( no_share_servers_handling_mode_opts) else: CONF.register_opts(common_opts) if self.get_config_option("driver_handles_share_servers"): CONF.register_opts(share_servers_handling_mode_opts) else: CONF.register_opts(no_share_servers_handling_mode_opts) if not self.get_config_option("service_instance_user"): raise exception.ServiceInstanceException( _('Service instance user is not specified.')) self.admin_context = context.get_admin_context() self.image_api = image.API() self.compute_api = compute.API() self.path_to_private_key = self.get_config_option( "path_to_private_key") self.max_time_to_build_instance = self.get_config_option( "max_time_to_build_instance") self.availability_zone = self.get_config_option( 'backend_availability_zone') or CONF.storage_availability_zone if self.get_config_option("driver_handles_share_servers"): self.path_to_public_key = self.get_config_option( "path_to_public_key") self._network_helper = None @property @utils.synchronized("instantiate_network_helper") def network_helper(self): if not self._network_helper: self._network_helper = self._get_network_helper() self._network_helper.setup_connectivity_with_service_instances() return self._network_helper def get_common_server(self): data = { 'public_address': None, 'private_address': None, 'service_net_name_or_ip': self.get_config_option( 'service_net_name_or_ip'), 'tenant_net_name_or_ip': self.get_config_option( 'tenant_net_name_or_ip'), } data['instance'] = self.compute_api.server_get_by_name_or_id( self.admin_context, self.get_config_option('service_instance_name_or_id')) if netutils.is_valid_ip(data['service_net_name_or_ip']): data['private_address'] = [data['service_net_name_or_ip']] else: data['private_address'] = self._get_addresses_by_network_name( data['service_net_name_or_ip'], data['instance']) if netutils.is_valid_ip(data['tenant_net_name_or_ip']): data['public_address'] = [data['tenant_net_name_or_ip']] else: data['public_address'] = self._get_addresses_by_network_name( data['tenant_net_name_or_ip'], data['instance']) if not (data['public_address'] and data['private_address']): raise exception.ManilaException( "Can not find one of net addresses for service instance. " "Instance: %(instance)s, " "private_address: %(private_address)s, " "public_address: %(public_address)s." % data) share_server = { 'username': self.get_config_option('service_instance_user'), 'password': self.get_config_option('service_instance_password'), 'pk_path': self.path_to_private_key, 'instance_id': data['instance']['id'], } for key in ('private_address', 'public_address'): data[key + '_first'] = None for address in data[key]: if netutils.is_valid_ip(address): data[key + '_first'] = address break share_server['ip'] = data['private_address_first'] share_server['public_address'] = data['public_address_first'] return {'backend_details': share_server} def _get_addresses_by_network_name(self, net_name, server): net_ips = [] if 'networks' in server and net_name in server['networks']: net_ips = server['networks'][net_name] elif 'addresses' in server and net_name in server['addresses']: net_ips = [addr['addr'] for addr in server['addresses'][net_name]] return net_ips def _get_service_instance_name(self, share_server_id): """Returns service vms name.""" if self.driver_config and self.driver_config.config_group: # Make service instance name unique for multibackend installation name = "%s_%s" % (self.driver_config.config_group, share_server_id) else: name = share_server_id return self.get_config_option("service_instance_name_template") % name def _get_server_ip(self, server, net_name): """Returns service IP address of service instance.""" net_ips = self._get_addresses_by_network_name(net_name, server) if not net_ips: msg = _("Failed to get service instance IP address. " "Service network name is '%(net_name)s' " "and provided data are '%(data)s'.") msg = msg % {'net_name': net_name, 'data': str(server)} raise exception.ServiceInstanceException(msg) return net_ips[0] def _get_or_create_security_groups(self, context, name=None, description=None, allow_ssh_subnet=False): """Get or create security group for service_instance. :param context: context, that should be used :param name: this is used for selection/creation of sec.group :param description: this is used on sec.group creation step only :param allow_ssh_subnet: subnet details to allow ssh connection from, if not supplied ssh will be allowed from any host :returns: SecurityGroup -- security group instance from Nova :raises: exception.ServiceInstanceException. """ sgs = [] # Common security group name = name or self.get_config_option( "service_instance_security_group") if not name: LOG.warning("Name for service instance security group is not " "provided. Skipping security group step.") return None if not description: description = ("This security group is intended " "to be used by share service.") sec_group_data = const.SERVICE_INSTANCE_SECGROUP_DATA if not allow_ssh_subnet: sec_group_data += const.SSH_PORTS sgs.append(self._get_or_create_security_group(name, description, sec_group_data)) if allow_ssh_subnet: if "cidr" not in allow_ssh_subnet or 'id' not in allow_ssh_subnet: raise exception.ManilaException( "Unable to limit SSH access") ssh_sg_name = "manila-service-subnet-{}".format( allow_ssh_subnet["id"]) sgs.append(self._get_or_create_security_group( ssh_sg_name, description, const.SSH_PORTS, allow_ssh_subnet["cidr"])) return sgs @utils.synchronized( "service_instance_get_or_create_security_group", external=True) def _get_or_create_security_group(self, name, description, sec_group_data, cidr="0.0.0.0/0"): s_groups = self.network_helper.neutron_api.security_group_list({ "name": name, })['security_groups'] s_groups = [s for s in s_groups if s['name'] == name] if not s_groups: LOG.debug("Creating security group with name '%s'.", name) sg = self.network_helper.neutron_api.security_group_create( name, description)['security_group'] for protocol, ports in sec_group_data: self.network_helper.neutron_api.security_group_rule_create( parent_group_id=sg['id'], ip_protocol=protocol, from_port=ports[0], to_port=ports[1], cidr=cidr, ) elif len(s_groups) > 1: msg = _("Ambiguous security_groups.") raise exception.ServiceInstanceException(msg) else: sg = s_groups[0] return sg def ensure_service_instance(self, context, server): """Ensures that server exists and active.""" if 'instance_id' not in server: LOG.warning("Unable to check server existence since " "'instance_id' key is not set in share server " "backend details.") return False try: inst = self.compute_api.server_get(self.admin_context, server['instance_id']) except exception.InstanceNotFound: LOG.warning("Service instance %s does not exist.", server['instance_id']) return False if inst['status'] == 'ACTIVE': return self._check_server_availability(server) return False def _delete_server(self, context, server_id): """Deletes the server.""" try: self.compute_api.server_get(context, server_id) except exception.InstanceNotFound: LOG.debug("Service instance '%s' was not found. " "Nothing to delete, skipping.", server_id) return self.compute_api.server_delete(context, server_id) t = time.time() while time.time() - t < self.max_time_to_build_instance: try: inst = self.compute_api.server_get(context, server_id) if inst.get("status").lower() == "soft_deleted": LOG.debug("Service instance '%s' was soft-deleted " "successfully.", server_id) break except exception.InstanceNotFound: LOG.debug("Service instance '%s' was deleted " "successfully.", server_id) break time.sleep(2) else: raise exception.ServiceInstanceException( _("Instance '%(id)s' has not been deleted in %(s)ss. " "Giving up.") % { 'id': server_id, 's': self.max_time_to_build_instance}) def set_up_service_instance(self, context, network_info): """Finds or creates and sets up service vm. :param context: defines context, that should be used :param network_info: network info for getting allocations :returns: dict with service instance details :raises: exception.ServiceInstanceException """ instance_name = self._get_service_instance_name( network_info['server_id']) server = self._create_service_instance( context, instance_name, network_info) instance_details = self._get_new_instance_details(server) if not self._check_server_availability(instance_details): e = exception.ServiceInstanceException( _('%(conn_proto)s connection has not been ' 'established to %(server)s in %(time)ss. Giving up.') % { 'conn_proto': self._INSTANCE_CONNECTION_PROTO, 'server': server['ip'], 'time': self.max_time_to_build_instance}) e.detail_data = {'server_details': instance_details} raise e return instance_details def _get_new_instance_details(self, server): instance_details = { 'instance_id': server['id'], 'ip': server['ip'], 'pk_path': server.get('pk_path'), 'subnet_id': server.get('subnet_id'), 'password': self.get_config_option('service_instance_password'), 'username': self.get_config_option('service_instance_user'), 'public_address': server['public_address'], } if server.get('admin_ip'): instance_details['admin_ip'] = server['admin_ip'] if server.get('router_id'): instance_details['router_id'] = server['router_id'] if server.get('service_port_id'): instance_details['service_port_id'] = server['service_port_id'] if server.get('public_port_id'): instance_details['public_port_id'] = server['public_port_id'] if server.get('admin_port_id'): instance_details['admin_port_id'] = server['admin_port_id'] for key in ('password', 'pk_path', 'subnet_id'): if not instance_details[key]: instance_details.pop(key) return instance_details def _load_public_key(self, path): with open(path, 'r') as f: public_key = f.read() return public_key @utils.synchronized("service_instance_get_key", external=True) def _get_key(self, context): """Get ssh key. :param context: defines context, that should be used :returns: tuple with keypair name and path to private key. """ if not (self.path_to_public_key and self.path_to_private_key): return (None, None) path_to_public_key = os.path.expanduser(self.path_to_public_key) path_to_private_key = os.path.expanduser(self.path_to_private_key) if (not os.path.exists(path_to_public_key) or not os.path.exists(path_to_private_key)): return (None, None) keypair_name = self.get_config_option("manila_service_keypair_name") keypairs = [k for k in self.compute_api.keypair_list(context) if k.name == keypair_name] if len(keypairs) > 1: raise exception.ServiceInstanceException(_('Ambiguous keypairs.')) public_key = self._load_public_key(path_to_public_key) if not keypairs: keypair = self.compute_api.keypair_import( context, keypair_name, public_key) else: keypair = keypairs[0] if keypair.public_key != public_key: LOG.debug('Public key differs from existing keypair. ' 'Creating new keypair.') self.compute_api.keypair_delete(context, keypair.id) keypair = self.compute_api.keypair_import( context, keypair_name, public_key) return keypair.name, path_to_private_key def _get_service_image(self, context): """Returns ID of service image for service vm creating.""" service_image_name = self.get_config_option("service_image_name") images = [image.id for image in self.image_api.image_list(context) if image.name == service_image_name and image.status == 'active'] if not images: raise exception.ServiceInstanceException( _("Image with name '%s' was not found or is not in " "'active' state.") % service_image_name) if len(images) != 1: raise exception.ServiceInstanceException( _("Multiple 'active' state images found with name '%s'!") % service_image_name) return images[0] def _create_service_instance(self, context, instance_name, network_info): """Creates service vm and sets up networking for it.""" service_image_id = self._get_service_image(context) key_name, key_path = self._get_key(context) if not (self.get_config_option("service_instance_password") or key_name): raise exception.ServiceInstanceException( _('Neither service instance password nor key are available.')) if not key_path: LOG.warning( 'No key path is available. May be non-existent key path is ' 'provided. Check path_to_private_key (current value ' '%(private_path)s) and path_to_public_key (current value ' '%(public_path)s) in manila configuration file.', dict( private_path=self.path_to_private_key, public_path=self.path_to_public_key)) network_data = self.network_helper.setup_network(network_info) fail_safe_data = dict( router_id=network_data.get('router_id'), subnet_id=network_data.get('subnet_id')) if network_data.get('service_port'): fail_safe_data['service_port_id'] = ( network_data['service_port']['id']) if network_data.get('public_port'): fail_safe_data['public_port_id'] = ( network_data['public_port']['id']) if network_data.get('admin_port'): fail_safe_data['admin_port_id'] = ( network_data['admin_port']['id']) try: create_kwargs = self._get_service_instance_create_kwargs() service_instance = self.compute_api.server_create( context, name=instance_name, image=service_image_id, flavor=self.get_config_option("service_instance_flavor_id"), key_name=key_name, nics=network_data['nics'], availability_zone=self.availability_zone, **create_kwargs) fail_safe_data['instance_id'] = service_instance['id'] service_instance = self.wait_for_instance_to_be_active( service_instance['id'], self.max_time_to_build_instance) if self.get_config_option("limit_ssh_access"): try: service_subnet = network_data['service_subnet'] except KeyError: LOG.error( "Unable to limit ssh access to instance id: '%s'!", fail_safe_data['instance_id']) raise exception.ManilaException( "Unable to limit SSH access - " "invalid service subnet details provided") else: service_subnet = False sec_groups = self._get_or_create_security_groups( context, allow_ssh_subnet=service_subnet) for sg in sec_groups: sg_id = sg['id'] LOG.debug( "Adding security group '%(sg)s' to server '%(si)s'.", dict(sg=sg_id, si=service_instance["id"])) self.compute_api.add_security_group_to_server( context, service_instance["id"], sg_id) ip = (network_data.get('service_port', network_data.get( 'admin_port'))['fixed_ips']) service_instance['ip'] = ip[0]['ip_address'] public_ip = (network_data.get('public_port', network_data.get( 'service_port'))['fixed_ips']) service_instance['public_address'] = public_ip[0]['ip_address'] except Exception as e: e.detail_data = {'server_details': fail_safe_data} raise service_instance.update(fail_safe_data) service_instance['pk_path'] = key_path for pair in [('router', 'router_id'), ('service_subnet', 'subnet_id')]: if pair[0] in network_data and 'id' in network_data[pair[0]]: service_instance[pair[1]] = network_data[pair[0]]['id'] admin_port = network_data.get('admin_port') if admin_port: try: service_instance['admin_ip'] = ( admin_port['fixed_ips'][0]['ip_address']) except Exception: msg = _("Admin port is being used but Admin IP was not found.") LOG.exception(msg) raise exception.AdminIPNotFound(reason=msg) return service_instance def _get_service_instance_create_kwargs(self): """Specify extra arguments used when creating the service instance. Classes inheriting the service instance manager can use this to easily pass extra arguments such as user data or metadata. """ return {} def _check_server_availability(self, instance_details, interval=5): t = time.time() ssh_pool = ssh_utils.SSHPool(instance_details['ip'], 22, interval, instance_details['username'], instance_details.get('password'), instance_details.get('pk_path'), max_size=1) while time.time() - t < self.max_time_to_build_instance: LOG.debug('Checking server availability.') if not self._test_server_connection(instance_details, ssh_pool): time.sleep(interval) else: return True return False def _test_server_connection(self, server, ssh_pool): conn = None try: conn = ssh_pool.create(quiet=True) return True except Exception as e: LOG.debug(e) LOG.debug("Could not login to server %s over SSH. Waiting...", server["ip"]) return False finally: if conn: conn.close() def delete_service_instance(self, context, server_details): """Removes share infrastructure. Deletes service vm and subnet, associated to share network. """ instance_id = server_details.get("instance_id") self._delete_server(context, instance_id) self.network_helper.teardown_network(server_details) def wait_for_instance_to_be_active(self, instance_id, timeout): t = time.time() while time.time() - t < timeout: try: service_instance = self.compute_api.server_get( self.admin_context, instance_id) except exception.InstanceNotFound as e: LOG.debug(e) time.sleep(1) continue instance_status = service_instance['status'] # NOTE(vponomaryov): emptiness of 'networks' field checked as # workaround for nova/neutron bug #1210483. if (instance_status == 'ACTIVE' and service_instance.get('networks', {})): return service_instance elif service_instance['status'] == 'ERROR': break LOG.debug("Waiting for instance %(instance_id)s to be active. " "Current status: %(instance_status)s.", dict(instance_id=instance_id, instance_status=instance_status)) time.sleep(1) raise exception.ServiceInstanceException( _("Instance %(instance_id)s failed to reach active state " "in %(timeout)s seconds. " "Current status: %(instance_status)s.") % dict(instance_id=instance_id, timeout=timeout, instance_status=instance_status)) def reboot_server(self, server, soft_reboot=False): self.compute_api.server_reboot(self.admin_context, server['instance_id'], soft_reboot) class BaseNetworkhelper(metaclass=abc.ABCMeta): @property @abc.abstractmethod def NAME(self): """Returns code name of network helper.""" @abc.abstractmethod def __init__(self, service_instance_manager): """Instantiates class and its attrs.""" @abc.abstractmethod def get_network_name(self, network_info): """Returns name of network for service instance.""" @abc.abstractmethod def setup_connectivity_with_service_instances(self): """Sets up connectivity between Manila host and service instances.""" @abc.abstractmethod def setup_network(self, network_info): """Sets up network for service instance.""" @abc.abstractmethod def teardown_network(self, server_details): """Teardowns network resources provided for service instance.""" class NeutronNetworkHelper(BaseNetworkhelper): def __init__(self, service_instance_manager): self.get_config_option = service_instance_manager.get_config_option self.vif_driver = importutils.import_class( self.get_config_option("interface_driver"))() if service_instance_manager.driver_config: self._network_config_group = ( service_instance_manager.driver_config.network_config_group or service_instance_manager.driver_config.config_group) else: self._network_config_group = None self.use_admin_port = False self.use_service_network = True self._neutron_api = None self._service_network_id = None self.connect_share_server_to_tenant_network = ( self.get_config_option('connect_share_server_to_tenant_network')) self.admin_network_id = self.get_config_option('admin_network_id') self.admin_subnet_id = self.get_config_option('admin_subnet_id') if self.admin_network_id and self.admin_subnet_id: self.use_admin_port = True if self.use_admin_port and self.connect_share_server_to_tenant_network: self.use_service_network = False @property def NAME(self): return NEUTRON_NAME @property def admin_project_id(self): return self.neutron_api.admin_project_id @property @utils.synchronized("instantiate_neutron_api_neutron_net_helper") def neutron_api(self): if not self._neutron_api: self._neutron_api = neutron.API( config_group_name=self._network_config_group) return self._neutron_api @property @utils.synchronized("service_network_id_neutron_net_helper") def service_network_id(self): if not self._service_network_id: self._service_network_id = self._get_service_network_id() return self._service_network_id def get_network_name(self, network_info): """Returns name of network for service instance.""" net = self.neutron_api.get_network(network_info['neutron_net_id']) return net['name'] @coordination.synchronized("service_instance_get_service_network") def _get_service_network_id(self): """Finds existing or creates new service network.""" service_network_name = self.get_config_option("service_network_name") networks = [] for network in self.neutron_api.get_all_admin_project_networks(): if network['name'] == service_network_name: networks.append(network) if len(networks) > 1: raise exception.ServiceInstanceException( _('Ambiguous service networks.')) elif not networks: return self.neutron_api.network_create( self.admin_project_id, service_network_name)['id'] else: return networks[0]['id'] @utils.synchronized( "service_instance_setup_and_teardown_network_for_instance", external=True) def teardown_network(self, server_details): subnet_id = server_details.get("subnet_id") router_id = server_details.get("router_id") service_port_id = server_details.get("service_port_id") public_port_id = server_details.get("public_port_id") admin_port_id = server_details.get("admin_port_id") for port_id in (service_port_id, public_port_id, admin_port_id): if port_id: try: self.neutron_api.delete_port(port_id) except exception.NetworkException as e: if e.kwargs.get('code') != 404: raise LOG.debug("Failed to delete port %(port_id)s with error: " "\n %(exc)s", {"port_id": port_id, "exc": e}) if subnet_id: ports = self.neutron_api.list_ports( fields=['device_id', 'device_owner'], fixed_ips=['subnet_id=%s' % subnet_id]) # NOTE(vponomaryov): iterate ports to get to know whether current # subnet is used or not. We will not remove it from router if it # is used. for port in ports: # NOTE(vponomaryov): if device_id is present, then we know that # this port is used. Also, if device owner is 'compute:*', then # we know that it is VM. We continue only if both are 'True'. if (port['device_id'] and port['device_owner'].startswith('compute:')): # NOTE(vponomaryov): There are other share servers # exist that use this subnet. So, do not remove it # from router. return if router_id: try: # NOTE(vponomaryov): there is no other share servers or # some VMs that use this subnet. So, remove it from router. self.neutron_api.router_remove_interface( router_id, subnet_id) except exception.NetworkException as e: if e.kwargs['code'] != 404: raise LOG.debug('Subnet %(subnet_id)s is not attached to the ' 'router %(router_id)s.', {'subnet_id': subnet_id, 'router_id': router_id}) self.neutron_api.update_subnet(subnet_id, '') @utils.synchronized( "service_instance_setup_and_teardown_network_for_instance", external=True) def setup_network(self, network_info): neutron_net_id = network_info['neutron_net_id'] neutron_subnet_id = network_info['neutron_subnet_id'] network_data = dict() subnet_name = ('service_subnet_for_handling_of_share_server_for_' 'tenant_subnet_%s' % neutron_subnet_id) if self.use_service_network: network_data['service_subnet'] = self._get_service_subnet( subnet_name) if not network_data['service_subnet']: network_data['service_subnet'] = ( self.neutron_api.subnet_create( self.admin_project_id, self.service_network_id, subnet_name, self._get_cidr_for_subnet(), self.connect_share_server_to_tenant_network)) network_data['ports'] = [] if not self.connect_share_server_to_tenant_network: network_data['router'] = self._get_private_router( neutron_net_id, neutron_subnet_id) try: self.neutron_api.router_add_interface( network_data['router']['id'], network_data['service_subnet']['id']) except exception.NetworkException as e: if e.kwargs['code'] != 400: raise LOG.debug('Subnet %(subnet_id)s is already attached to the ' 'router %(router_id)s.', {'subnet_id': network_data['service_subnet']['id'], 'router_id': network_data['router']['id']}) else: network_data['public_port'] = self.neutron_api.create_port( self.admin_project_id, neutron_net_id, subnet_id=neutron_subnet_id, device_owner='manila') network_data['ports'].append(network_data['public_port']) if self.use_service_network: network_data['service_port'] = self.neutron_api.create_port( self.admin_project_id, self.service_network_id, subnet_id=network_data['service_subnet']['id'], device_owner='manila') network_data['ports'].append(network_data['service_port']) if self.use_admin_port: network_data['admin_port'] = self.neutron_api.create_port( self.admin_project_id, self.admin_network_id, subnet_id=self.admin_subnet_id, device_owner='manila') network_data['ports'].append(network_data['admin_port']) try: self.setup_connectivity_with_service_instances() except Exception: for port in network_data['ports']: self.neutron_api.delete_port(port['id']) raise network_data['nics'] = [ {'port-id': port['id']} for port in network_data['ports']] public_ip = network_data.get( 'public_port', network_data.get('service_port')) network_data['ip_address'] = public_ip['fixed_ips'][0]['ip_address'] return network_data def _get_cidr_for_subnet(self): """Returns not used cidr for service subnet creating.""" subnets = self._get_all_service_subnets() used_cidrs = set(subnet['cidr'] for subnet in subnets) serv_cidr = netaddr.IPNetwork( self.get_config_option("service_network_cidr")) division_mask = self.get_config_option("service_network_division_mask") for subnet in serv_cidr.subnet(division_mask): cidr = str(subnet.cidr) if cidr not in used_cidrs: return cidr else: raise exception.ServiceInstanceException(_('No available cidrs.')) def setup_connectivity_with_service_instances(self): """Sets up connectivity with service instances. Creates host port in service network and/or admin network, creating and setting up required network devices. """ if self.use_service_network: LOG.debug("Plugging service instance into service network %s.", self.service_network_id) port = self._get_service_port( self.service_network_id, None, 'manila-share') port = self._add_fixed_ips_to_service_port(port) interface_name = self.vif_driver.get_device_name(port) device = ip_lib.IPDevice(interface_name) self._plug_interface_in_host(interface_name, device, port) if self.use_admin_port: LOG.debug("Plugging service instance into admin network %s.", self.admin_network_id) port = self._get_service_port( self.admin_network_id, self.admin_subnet_id, 'manila-admin-share') interface_name = self.vif_driver.get_device_name(port) device = ip_lib.IPDevice(interface_name) self._plug_interface_in_host(interface_name, device, port, clear_outdated_routes=True) @utils.synchronized("service_instance_plug_interface_in_host", external=True) def _plug_interface_in_host(self, interface_name, device, port, clear_outdated_routes=False): LOG.debug("Plug interface into host - interface_name: %s, " "device: %s, port: %s", interface_name, device, port) self.vif_driver.plug(interface_name, port['id'], port['mac_address']) cidrs_to_clear = [] ip_cidrs = [] for fixed_ip in port['fixed_ips']: subnet = self.neutron_api.get_subnet(fixed_ip['subnet_id']) if clear_outdated_routes: cidrs_to_clear.append(subnet['cidr']) net = netaddr.IPNetwork(subnet['cidr']) ip_cidr = '%s/%s' % (fixed_ip['ip_address'], net.prefixlen) ip_cidrs.append(ip_cidr) self.vif_driver.init_l3(interface_name, ip_cidrs, clear_cidrs=cidrs_to_clear) @utils.synchronized("service_instance_get_service_port", external=True) def _get_service_port(self, network_id, subnet_id, device_id): """Find or creates service neutron port. This port will be used for connectivity with service instances. """ host = self.get_config_option("service_network_host") or CONF.host search_opts = {'device_id': device_id, 'binding:host_id': host} ports = [port for port in self.neutron_api. list_ports(**search_opts)] if len(ports) > 1: raise exception.ServiceInstanceException( _('Error. Ambiguous service ports.')) elif not ports: port = self.neutron_api.create_port( self.admin_project_id, network_id, subnet_id=subnet_id, device_id=device_id, device_owner='manila:share', host_id=host, port_security_enabled=False) else: port = ports[0] return port @utils.synchronized( "service_instance_add_fixed_ips_to_service_port", external=True) def _add_fixed_ips_to_service_port(self, port): network = self.neutron_api.get_network(self.service_network_id) subnets = set(network['subnets']) port_fixed_ips = [] for fixed_ip in port['fixed_ips']: port_fixed_ips.append({'subnet_id': fixed_ip['subnet_id'], 'ip_address': fixed_ip['ip_address']}) if fixed_ip['subnet_id'] in subnets: subnets.remove(fixed_ip['subnet_id']) # If there are subnets here that means that # we need to add those to the port and call update. if subnets: port_fixed_ips.extend([dict(subnet_id=s) for s in subnets]) port = self.neutron_api.update_port_fixed_ips( port['id'], {'fixed_ips': port_fixed_ips}) return port @utils.synchronized("service_instance_get_private_router", external=True) def _get_private_router(self, neutron_net_id, neutron_subnet_id): """Returns router attached to private subnet gateway.""" private_subnet = self.neutron_api.get_subnet(neutron_subnet_id) if not private_subnet['gateway_ip']: raise exception.ServiceInstanceException( _('Subnet must have gateway.')) private_network_ports = [p for p in self.neutron_api.list_ports( network_id=neutron_net_id)] for p in private_network_ports: fixed_ip = p['fixed_ips'][0] if (fixed_ip['subnet_id'] == private_subnet['id'] and fixed_ip['ip_address'] == private_subnet['gateway_ip']): private_subnet_gateway_port = p break else: raise exception.ServiceInstanceException( _('Subnet gateway is not attached to the router.')) private_subnet_router = self.neutron_api.show_router( private_subnet_gateway_port['device_id']) return private_subnet_router @utils.synchronized("service_instance_get_service_subnet", external=True) def _get_service_subnet(self, subnet_name): all_service_subnets = self._get_all_service_subnets() service_subnets = [subnet for subnet in all_service_subnets if subnet['name'] == subnet_name] if len(service_subnets) == 1: return service_subnets[0] elif not service_subnets: unused_service_subnets = [subnet for subnet in all_service_subnets if subnet['name'] == ''] if unused_service_subnets: service_subnet = unused_service_subnets[0] self.neutron_api.update_subnet( service_subnet['id'], subnet_name) return service_subnet return None else: raise exception.ServiceInstanceException( _('Ambiguous service subnets.')) @utils.synchronized( "service_instance_get_all_service_subnets", external=True) def _get_all_service_subnets(self): service_network = self.neutron_api.get_network(self.service_network_id) subnets = [] for subnet_id in service_network['subnets']: subnets.append(self.neutron_api.get_subnet(subnet_id)) return subnets ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315601.9336715 manila-21.0.0/manila/share/drivers/tegile/0000775000175000017500000000000000000000000020350 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/tegile/__init__.py0000664000175000017500000000000000000000000022447 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/tegile/tegile.py0000664000175000017500000004736600000000000022213 0ustar00zuulzuul00000000000000# Copyright (c) 2016 by Tegile Systems, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Share driver for Tegile storage. """ import json import requests from oslo_config import cfg from oslo_log import log from manila import exception from manila.i18n import _ from manila.share import driver from manila.share import utils as share_utils from manila import utils tegile_opts = [ cfg.HostAddressOpt('tegile_nas_server', help='Tegile NAS server hostname or IP address.'), cfg.StrOpt('tegile_nas_login', help='User name for the Tegile NAS server.'), cfg.StrOpt('tegile_nas_password', secret=True, help='Password for the Tegile NAS server.'), cfg.StrOpt('tegile_default_project', help='Create shares in this project')] CONF = cfg.CONF CONF.register_opts(tegile_opts) LOG = log.getLogger(__name__) DEFAULT_API_SERVICE = 'openstack' TEGILE_API_PATH = 'zebi/api' TEGILE_LOCAL_CONTAINER_NAME = 'Local' TEGILE_SNAPSHOT_PREFIX = 'Manual-S-' VENDOR = 'Tegile Systems Inc.' DEFAULT_BACKEND_NAME = 'Tegile' VERSION = '1.0.0' DEBUG_LOGGING = False # For debugging purposes def debugger(func): """Returns a wrapper that wraps func. The wrapper will log the entry and exit points of the function. """ def wrapper(*args, **kwds): if DEBUG_LOGGING: LOG.debug('Entering %(classname)s.%(funcname)s', { 'classname': args[0].__class__.__name__, 'funcname': func.__name__, }) LOG.debug('Arguments: %(args)s, %(kwds)s', { 'args': args[1:], 'kwds': kwds, }) f_result = func(*args, **kwds) if DEBUG_LOGGING: LOG.debug('Exiting %(classname)s.%(funcname)s', { 'classname': args[0].__class__.__name__, 'funcname': func.__name__, }) LOG.debug('Results: %(result)s', {'result': f_result}) return f_result return wrapper class TegileAPIExecutor(object): def __init__(self, classname, hostname, username, password): self._classname = classname self._hostname = hostname self._username = username self._password = password def __call__(self, *args, **kwargs): return self._send_api_request(*args, **kwargs) @debugger @utils.retry(retry_param=(requests.ConnectionError, requests.Timeout), interval=30, retries=3, backoff_rate=1) def _send_api_request(self, method, params=None, request_type='post', api_service=DEFAULT_API_SERVICE, fine_logging=DEBUG_LOGGING): if params is not None: params = json.dumps(params) url = 'https://%s/%s/%s/%s' % (self._hostname, TEGILE_API_PATH, api_service, method) if fine_logging: LOG.debug('TegileAPIExecutor(%(classname)s) method: %(method)s, ' 'url: %(url)s', { 'classname': self._classname, 'method': method, 'url': url, }) if request_type == 'post': if fine_logging: LOG.debug('TegileAPIExecutor(%(classname)s) ' 'method: %(method)s, payload: %(payload)s', { 'classname': self._classname, 'method': method, 'payload': params, }) req = requests.post(url, data=params, auth=(self._username, self._password), verify=False) else: req = requests.get(url, auth=(self._username, self._password), verify=False) if fine_logging: LOG.debug('TegileAPIExecutor(%(classname)s) method: %(method)s, ' 'return code: %(retcode)s', { 'classname': self._classname, 'method': method, 'retcode': req, }) try: response = req.json() if fine_logging: LOG.debug('TegileAPIExecutor(%(classname)s) ' 'method: %(method)s, response: %(response)s', { 'classname': self._classname, 'method': method, 'response': response, }) except ValueError: # Some APIs don't return output and that's fine response = '' req.close() if req.status_code != 200: raise exception.TegileAPIException(response=req.text) return response class TegileShareDriver(driver.ShareDriver): """Tegile NAS driver. Allows for NFS and CIFS NAS storage usage.""" def __init__(self, *args, **kwargs): super(TegileShareDriver, self).__init__(False, *args, **kwargs) LOG.warning('Tegile share driver has been deprecated and will be ' 'removed in a future release.') self.configuration.append_config_values(tegile_opts) self._default_project = (self.configuration.safe_get( "tegile_default_project") or 'openstack') self._backend_name = (self.configuration.safe_get('share_backend_name') or CONF.share_backend_name or DEFAULT_BACKEND_NAME) self._hostname = self.configuration.safe_get('tegile_nas_server') username = self.configuration.safe_get('tegile_nas_login') password = self.configuration.safe_get('tegile_nas_password') self._api = TegileAPIExecutor(self.__class__.__name__, self._hostname, username, password) @debugger def create_share(self, context, share, share_server=None): """Is called to create share.""" share_name = share['name'] share_proto = share['share_proto'] pool_name = share_utils.extract_host(share['host'], level='pool') params = (pool_name, self._default_project, share_name, share_proto) # Share name coming from the backend is the most reliable. Sometimes # a few options in Tegile array could cause sharename to be different # from the one passed to it. Eg. 'projectname-sharename' instead # of 'sharename' if inherited share properties are selected. ip, real_share_name = self._api('createShare', params).split() LOG.info("Created share %(sharename)s, share id %(shid)s.", {'sharename': share_name, 'shid': share['id']}) return self._get_location_path(real_share_name, share_proto, ip) @debugger def extend_share(self, share, new_size, share_server=None): """Is called to extend share. There is no resize for Tegile shares. We just adjust the quotas. The API is still called 'resizeShare'. """ self._adjust_size(share, new_size, share_server) @debugger def shrink_share(self, shrink_share, shrink_size, share_server=None): """Uses resize_share to shrink a share. There is no shrink for Tegile shares. We just adjust the quotas. The API is still called 'resizeShare'. """ self._adjust_size(shrink_share, shrink_size, share_server) @debugger def _adjust_size(self, share, new_size, share_server=None): pool, project, share_name = self._get_pool_project_share_name(share) params = ('%s/%s/%s/%s' % (pool, TEGILE_LOCAL_CONTAINER_NAME, project, share_name), str(new_size), 'GB') self._api('resizeShare', params) @debugger def delete_share(self, context, share, share_server=None): """Is called to remove share.""" pool, project, share_name = self._get_pool_project_share_name(share) params = ('%s/%s/%s/%s' % (pool, TEGILE_LOCAL_CONTAINER_NAME, project, share_name), True, False) self._api('deleteShare', params) @debugger def create_snapshot(self, context, snapshot, share_server=None): """Is called to create snapshot.""" snap_name = snapshot['name'] pool, project, share_name = self._get_pool_project_share_name( snapshot['share']) share = { 'poolName': '%s' % pool, 'projectName': '%s' % project, 'name': share_name, 'availableSize': 0, 'totalSize': 0, 'datasetPath': '%s/%s/%s' % (pool, TEGILE_LOCAL_CONTAINER_NAME, project), 'mountpoint': share_name, 'local': 'true', } params = (share, snap_name, False) LOG.info('Creating snapshot for share_name=%(shr)s' ' snap_name=%(name)s', {'shr': share_name, 'name': snap_name}) self._api('createShareSnapshot', params) @debugger def create_share_from_snapshot(self, context, share, snapshot, share_server=None, parent_share=None): """Create a share from a snapshot - clone a snapshot.""" pool, project, share_name = self._get_pool_project_share_name(share) params = ('%s/%s/%s/%s@%s%s' % (pool, TEGILE_LOCAL_CONTAINER_NAME, project, snapshot['share_name'], TEGILE_SNAPSHOT_PREFIX, snapshot['name'], ), share_name, True, ) ip, real_share_name = self._api('cloneShareSnapshot', params).split() share_proto = share['share_proto'] return self._get_location_path(real_share_name, share_proto, ip) @debugger def delete_snapshot(self, context, snapshot, share_server=None): """Is called to remove snapshot.""" pool, project, share_name = self._get_pool_project_share_name( snapshot['share']) params = ('%s/%s/%s/%s@%s%s' % (pool, TEGILE_LOCAL_CONTAINER_NAME, project, share_name, TEGILE_SNAPSHOT_PREFIX, snapshot['name']), False) self._api('deleteShareSnapshot', params) @debugger def ensure_share(self, context, share, share_server=None): """Invoked to sure that share is exported.""" # Fetching share name from server, because some configuration # options can cause sharename different from the OpenStack share name pool, project, share_name = self._get_pool_project_share_name(share) params = [ '%s/%s/%s/%s' % (pool, TEGILE_LOCAL_CONTAINER_NAME, project, share_name), ] ip, real_share_name = self._api('getShareIPAndMountPoint', params).split() share_proto = share['share_proto'] location = self._get_location_path(real_share_name, share_proto, ip) return [location] @debugger def _allow_access(self, context, share, access, share_server=None): """Allow access to the share.""" share_proto = share['share_proto'] access_type = access['access_type'] access_level = access['access_level'] access_to = access['access_to'] self._check_share_access(share_proto, access_type) pool, project, share_name = self._get_pool_project_share_name(share) params = ('%s/%s/%s/%s' % (pool, TEGILE_LOCAL_CONTAINER_NAME, project, share_name), share_proto, access_type, access_to, access_level) self._api('shareAllowAccess', params) @debugger def _deny_access(self, context, share, access, share_server=None): """Deny access to the share.""" share_proto = share['share_proto'] access_type = access['access_type'] access_level = access['access_level'] access_to = access['access_to'] self._check_share_access(share_proto, access_type) pool, project, share_name = self._get_pool_project_share_name(share) params = ('%s/%s/%s/%s' % (pool, TEGILE_LOCAL_CONTAINER_NAME, project, share_name), share_proto, access_type, access_to, access_level) self._api('shareDenyAccess', params) def _check_share_access(self, share_proto, access_type): if share_proto == 'CIFS' and access_type != 'user': reason = ('Only USER access type is allowed for ' 'CIFS shares.') LOG.warning(reason) raise exception.InvalidShareAccess(reason=reason) elif share_proto == 'NFS' and access_type not in ('ip', 'user'): reason = ('Only IP or USER access types are allowed for ' 'NFS shares.') LOG.warning(reason) raise exception.InvalidShareAccess(reason=reason) elif share_proto not in ('NFS', 'CIFS'): reason = ('Unsupported protocol \"%s\" specified for ' 'access rule.') % share_proto raise exception.InvalidShareAccess(reason=reason) @debugger def update_access(self, context, share, access_rules, add_rules, delete_rules, update_rules, share_server=None): if not (add_rules or delete_rules): # Recovery mode pool, project, share_name = ( self._get_pool_project_share_name(share)) share_proto = share['share_proto'] params = ('%s/%s/%s/%s' % (pool, TEGILE_LOCAL_CONTAINER_NAME, project, share_name), share_proto) # Clears all current ACLs # Remove ip and user ACLs if share_proto is NFS # Remove user ACLs if share_proto is CIFS self._api('clearAccessRules', params) # Looping through all rules. # Will have one API call per rule. for access in access_rules: self._allow_access(context, share, access, share_server) else: # Adding/Deleting specific rules for access in delete_rules: self._deny_access(context, share, access, share_server) for access in add_rules: self._allow_access(context, share, access, share_server) @debugger def _update_share_stats(self, **kwargs): """Retrieve stats info.""" try: data = self._api(method='getArrayStats', request_type='get', fine_logging=False) # fixing values coming back here as String to float for pool in data.get('pools', []): pool['total_capacity_gb'] = float( pool.get('total_capacity_gb', 0)) pool['free_capacity_gb'] = float( pool.get('free_capacity_gb', 0)) pool['allocated_capacity_gb'] = float( pool.get('allocated_capacity_gb', 0)) pool['qos'] = pool.pop('QoS_support', False) pool['reserved_percentage'] = ( self.configuration.reserved_share_percentage) pool['reserved_snapshot_percentage'] = ( self.configuration.reserved_share_from_snapshot_percentage or self.configuration.reserved_share_percentage) pool['reserved_share_extend_percentage'] = ( self.configuration.reserved_share_extend_percentage or self.configuration.reserved_share_percentage) pool['dedupe'] = True pool['compression'] = True pool['thin_provisioning'] = True pool['max_over_subscription_ratio'] = ( self.configuration.max_over_subscription_ratio) data['share_backend_name'] = self._backend_name data['vendor_name'] = VENDOR data['driver_version'] = VERSION data['storage_protocol'] = 'NFS_CIFS' data['snapshot_support'] = True data['create_share_from_snapshot_support'] = True data['qos'] = False super(TegileShareDriver, self)._update_share_stats(data) except Exception: msg = _('Unexpected error while trying to get the ' 'usage stats from array.') LOG.exception(msg) raise @debugger def get_pool(self, share): """Returns pool name where share resides. :param share: The share hosted by the driver. :return: Name of the pool where given share is hosted. """ pool = share_utils.extract_host(share['host'], level='pool') return pool @debugger def get_network_allocations_number(self): """Get number of network interfaces to be created.""" return 0 @debugger def _get_location_path(self, share_name, share_proto, ip=None): if ip is None: ip = self._hostname if share_proto == 'NFS': location = '%s:%s' % (ip, share_name) elif share_proto == 'CIFS': location = r'\\%s\%s' % (ip, share_name) else: message = _('Invalid NAS protocol supplied: %s.') % share_proto raise exception.InvalidInput(message) export_location = { 'path': location, 'is_admin_only': False, 'metadata': { 'preferred': True, }, } return export_location @debugger def _get_pool_project_share_name(self, share): pool = share_utils.extract_host(share['host'], level='pool') project = self._default_project share_name = share['name'] return pool, project, share_name ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315601.9376714 manila-21.0.0/manila/share/drivers/vastdata/0000775000175000017500000000000000000000000020706 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/vastdata/__init__.py0000664000175000017500000000000000000000000023005 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/vastdata/driver.py0000664000175000017500000003420100000000000022553 0ustar00zuulzuul00000000000000# Copyright 2024 VAST Data Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ VAST's Share Driver Configuration: [DEFAULT] enabled_share_backends = vast [vast] share_driver = manila.share.drivers.vastdata.driver.VASTShareDriver share_backend_name = vast snapshot_support = true driver_handles_share_servers = false vast_mgmt_host = v11 vast_vippool_name = vippool-1 vast_root_export = manila vast_mgmt_user = admin vast_mgmt_password = 123456 """ import collections import netaddr from oslo_config import cfg from oslo_log import log as logging from oslo_utils import units from manila.common import constants from manila import exception from manila.i18n import _ from manila.share import driver from manila.share.drivers.vastdata import driver_util import manila.share.drivers.vastdata.rest as vast_rest LOG = logging.getLogger(__name__) OPTS = [ cfg.HostAddressOpt( "vast_mgmt_host", help="Hostname or IP address VAST storage system management VIP.", ), cfg.PortOpt( "vast_mgmt_port", help="Port for VAST management", default=443 ), cfg.StrOpt( "vast_vippool_name", help="Name of Virtual IP pool" ), cfg.StrOpt( "vast_root_export", default="manila", help="Base path for shares" ), cfg.StrOpt( "vast_mgmt_user", help="Username for VAST management" ), cfg.StrOpt( "vast_mgmt_password", help="Password for VAST management", secret=True ), cfg.StrOpt( "vast_api_token", default="", secret=True, help=( "API token for accessing VAST mgmt. " "If provided, it will be used instead " "of 'san_login' and 'san_password'." ) ), ] CONF = cfg.CONF CONF.register_opts(OPTS) MANILA_TO_VAST_ACCESS_LEVEL = { constants.ACCESS_LEVEL_RW: "nfs_read_write", constants.ACCESS_LEVEL_RO: "nfs_read_only", } @driver_util.decorate_methods_with( driver_util.verbose_driver_trace ) class VASTShareDriver(driver.ShareDriver): """Driver for the VastData Filesystem.""" VERSION = "1.0" # driver version def __init__(self, *args, **kwargs): super().__init__(False, *args, config_opts=[OPTS], **kwargs) def do_setup(self, context): """Driver initialization""" backend_name = self.configuration.safe_get("share_backend_name") root_export = self.configuration.vast_root_export vip_pool_name = self.configuration.safe_get("vast_vippool_name") if not vip_pool_name: raise exception.VastDriverException( reason="vast_vippool_name must be set" ) self._backend_name = backend_name or self.__class__.__name__ self._vippool_name = vip_pool_name self._root_export = "/" + root_export.strip("/") username = self.configuration.safe_get("vast_mgmt_user") password = self.configuration.safe_get("vast_mgmt_password") api_token = self.configuration.safe_get("vast_api_token") host = self.configuration.safe_get("vast_mgmt_host") port = self.configuration.safe_get("vast_mgmt_port") if not host: raise exception.VastDriverException( reason="`vast_mgmt_host` must be set in manila.conf." ) # Require either (username & password) OR (API token) if not ((username and password) or api_token): raise exception.VastDriverException( reason="Authentication failed: You must specify either " "`vast_mgmt_user` and `vast_mgmt_password`, " "or provide `vast_api_token` in manila.conf." ) if port: host = f"{host}:{port}" self.rest = vast_rest.RestApi( host=host, username=username, password=password, api_token=api_token, ssl_verify=False, plugin_version=self.VERSION, ) LOG.debug("VAST Data driver setup is complete.") def _update_share_stats(self, data=None): """Retrieve stats info from share group.""" metrics_list = [ "Capacity,drr", "Capacity,logical_space", "Capacity,logical_space_in_use", "Capacity,physical_space", "Capacity,physical_space_in_use", ] metrics = self.rest.capacity_metrics.get(metrics_list) data = dict( share_backend_name=self._backend_name, vendor_name="VAST STORAGE", driver_version=self.VERSION, storage_protocol="NFS", data_reduction=metrics.drr, total_capacity_gb=float(metrics.logical_space) / units.Gi, free_capacity_gb=float( metrics.logical_space - metrics.logical_space_in_use ) / units.Gi, provisioned_capacity_gb=float( metrics.logical_space_in_use) / units.Gi, snapshot_support=True, create_share_from_snapshot_support=False, mount_snapshot_support=False, revert_to_snapshot_support=False, ) super()._update_share_stats(data) def _to_volume_path(self, share_id, root=None): if not root: root = self._root_export return f"{root}/manila-{share_id}" def create_share(self, context, share, share_server=None): return self._ensure_share(share) def delete_share(self, context, share, share_server=None): """Called to delete a share""" share_id = share["id"] src = self._to_volume_path(share_id) LOG.debug(f"Deleting '{src}'.") self.rest.folders.delete(path=src) self.rest.views.delete(name=share_id) self.rest.quotas.delete(name=share_id) self.rest.view_policies.delete(name=share_id) def update_access( self, context, share, access_rules, add_rules, delete_rules, update_rules, share_server=None ): """Update access rules for share.""" rule_state_map = {} if not (add_rules or delete_rules): add_rules = access_rules if share["share_proto"] != "NFS": LOG.error("The share protocol flavor is invalid. Please use NFS.") return valid_add_rules = [] for rule in (add_rules or []): try: validate_access_rule(rule) except ( exception.InvalidShareAccess, exception.InvalidShareAccessLevel, ) as exc: rule_id = rule["access_id"] access_level = rule["access_level"] access_to = rule["access_to"] LOG.exception( f"Failed to provide {access_level} access to " f"{access_to} (Rule ID: {rule_id}, Reason: {exc}). " "Setting rule to 'error' state." ) rule_state_map[rule['id']] = {'state': 'error'} else: valid_add_rules.append(rule) share_id = share["id"] export = self._to_volume_path(share_id) LOG.debug(f"Changing access on {share_id}.") data = { "name": share_id, "nfs_no_squash": ["*"], "nfs_root_squash": ["*"] } policy = self.rest.view_policies.one(name=share_id) if not policy: raise exception.VastDriverException( reason=f"Policy not found for share {share_id}." ) if valid_add_rules: policy_rules = policy_payload_from_rules( rules=valid_add_rules, policy=policy, action="update" ) data.update(policy_rules) LOG.debug(f"Changing access on {export}. Rules: {policy_rules}.") self.rest.view_policies.update(policy.id, **data) if delete_rules: policy_rules = policy_payload_from_rules( rules=delete_rules, policy=policy, action="deny" ) LOG.debug(f"Changing access on {export}. Rules: {policy_rules}.") data.update(policy_rules) self.rest.view_policies.update(policy.id, **data) return rule_state_map def extend_share(self, share, new_size, share_server=None): """uses resize_share to extend a share""" self._resize_share(share, new_size) def shrink_share(self, share, new_size, share_server=None): """uses resize_share to shrink a share""" self._resize_share(share, new_size) def create_snapshot(self, context, snapshot, share_server=None): """Is called to create snapshot.""" path = self._to_volume_path(snapshot["share_instance_id"]) self.rest.snapshots.create(path=path, name=snapshot["name"]) def delete_snapshot(self, context, snapshot, share_server=None): """Is called to remove share.""" self.rest.snapshots.delete(name=snapshot["name"]) def get_network_allocations_number(self): return 0 def ensure_shares(self, context, shares): updates = {} for share in shares: export_locations = self._ensure_share(share) updates[share["id"]] = { 'export_locations': export_locations } return updates def get_backend_info(self, context): backend_info = { "vast_vippool_name": self.configuration.vast_vippool_name, "vast_mgmt_host": self.configuration.vast_mgmt_host, } return backend_info def _resize_share(self, share, new_size): share_id = share["id"] quota = self.rest.quotas.one(name=share_id) if not quota: raise exception.ShareNotFound( reason="Share not found", share_id=share_id ) requested_capacity = new_size * units.Gi if requested_capacity < quota.used_effective_capacity: raise exception.ShareShrinkingPossibleDataLoss( share_id=share['id']) self.rest.quotas.update(quota.id, hard_limit=requested_capacity) def _ensure_share(self, share): share_proto = share["share_proto"] if share_proto != "NFS": raise exception.InvalidShare( reason=_( "Invalid NAS protocol supplied: {}.".format(share_proto) ) ) vips = self.rest.vip_pools.vips(pool_name=self._vippool_name) share_id = share["id"] requested_capacity = share["size"] * units.Gi path = self._to_volume_path(share_id) policy = self.rest.view_policies.ensure(name=share_id) quota = self.rest.quotas.ensure( name=share_id, path=path, create_dir=True, hard_limit=requested_capacity ) if quota.hard_limit != requested_capacity: raise exception.VastDriverException( reason=f"Share already exists with different capacity" f" (requested={requested_capacity}, exists={quota.hard_limit})" ) view = self.rest.views.ensure( name=share_id, path=path, policy_id=policy.id ) if view.policy != share_id: self.rest.views.update(view.id, policy_id=policy.id) return [ dict(path=f"{vip}:{path}", is_admin_only=False) for vip in vips ] def policy_payload_from_rules(rules, policy, action): """Convert list of manila rules into vast compatible payload for updating/creating policy. """ hosts = collections.defaultdict(set) for rule in rules: addr_list = map( str, netaddr.IPNetwork(rule["access_to"]).iter_hosts() ) hosts[ MANILA_TO_VAST_ACCESS_LEVEL[rule["access_level"]] ].update(addr_list) _default_rules = set() # Delete default_vast_policy on each update. # There is no sense to keep * in list of allowed/denied hosts # as user want to set particular ip/ips only. _default_vast_policy = {"*"} if action == "update": rw = set(policy.nfs_read_write).union( hosts.get("nfs_read_write", _default_rules) ) ro = set(policy.nfs_read_only).union( hosts.get("nfs_read_only", _default_rules) ) elif action == "deny": rw = set(policy.nfs_read_write).difference( hosts.get("nfs_read_write", _default_rules) ) ro = set(policy.nfs_read_only).difference( hosts.get("nfs_read_only", _default_rules) ) else: raise ValueError("Invalid action") # When policy created default access is # "*" for read-write and read-only operations. # After updating any of rules (rw or ro) # we need to delete "*" to prevent ambiguous state when # resource available for certain ip and for all range of ip addresses. if len(rw) > 1: rw -= _default_vast_policy if len(ro) > 1: ro -= _default_vast_policy return {"nfs_read_write": list(rw), "nfs_read_only": list(ro)} def validate_access_rule(access_rule): allowed_types = {"ip"} allowed_levels = MANILA_TO_VAST_ACCESS_LEVEL.keys() access_type = access_rule["access_type"] access_level = access_rule["access_level"] if access_type not in allowed_types: reason = _("Only {} access type allowed.").format( ", ".join(tuple([f"'{x}'" for x in allowed_types])) ) raise exception.InvalidShareAccess(reason=reason) if access_level not in allowed_levels: raise exception.InvalidShareAccessLevel(level=access_level) try: netaddr.IPNetwork(access_rule["access_to"]) except (netaddr.core.AddrFormatError, OSError) as exc: raise exception.InvalidShareAccess(reason=str(exc)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/vastdata/driver_util.py0000664000175000017500000001314300000000000023612 0ustar00zuulzuul00000000000000# Copyright 2024 VAST Data Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ipaddress import types from oslo_config import cfg from oslo_log import log from oslo_utils import timeutils CONF = cfg.CONF LOG = log.getLogger(__name__) class Bunch(dict): # from https://github.com/real-easypy/easypy __slots__ = ("__stop_recursing__",) def __getattr__(self, name): try: return self[name] except KeyError: if name[0] == "_" and name[1:].isdigit(): return self[name[1:]] raise AttributeError( "%s has no attribute %r" % (self.__class__, name) ) def __getitem__(self, key): try: return super(Bunch, self).__getitem__(key) except KeyError: from numbers import Integral if isinstance(key, Integral): return self[str(key)] raise def __setattr__(self, name, value): self[name] = value def __delattr__(self, name): try: del self[name] except KeyError: raise AttributeError( "%s has no attribute %r" % (self.__class__, name) ) def __getstate__(self): return self def __setstate__(self, dict): self.update(dict) def __repr__(self): if getattr(self, "__stop_recursing__", False): items = sorted( "%s" % k for k in self if isinstance(k, str) and not k.startswith("__") ) attrs = ", ".join(items) else: dict.__setattr__(self, "__stop_recursing__", True) try: attrs = self.render() finally: dict.__delattr__(self, "__stop_recursing__") return "%s(%s)" % (self.__class__.__name__, attrs) def render(self): items = sorted( "%s=%r" % (k, v) for k, v in self.items() if isinstance(k, str) and not k.startswith("__") ) return ", ".join(items) def to_dict(self): return unbunchify(self) def to_json(self): import json return json.dumps(self.to_dict()) def copy(self, deep=False): if deep: return _convert(self, self.__class__) else: return self.__class__(self) @classmethod def from_dict(cls, d): return _convert(d, cls) @classmethod def from_json(cls, d): import json return cls.from_dict(json.loads(d)) def __dir__(self): members = set( k for k in self if isinstance(k, str) and (k[0] == "_" or k.replace("_", "").isalnum()) ) members.update(dict.__dir__(self)) return sorted(members) def without(self, *keys): "Return a shallow copy of the bunch without the specified keys" return Bunch((k, v) for k, v in self.items() if k not in keys) def but_with(self, **kw): "Return a shallow copy of the bunch with the specified keys" return Bunch(self, **kw) def _convert(d, typ): if isinstance(d, dict): return typ({str(k): _convert(v, typ) for k, v in d.items()}) elif isinstance(d, (tuple, list, set)): return type(d)(_convert(e, typ) for e in d) else: return d def unbunchify(d): """Recursively convert Bunches in `d` to a regular dicts.""" return _convert(d, dict) def bunchify(d=None, **kw): """Recursively convert dicts in `d` to Bunches. If `kw` given, recursively convert dicts in it to Bunches and update `d` with it. If `d` is None, an empty Bunch is made. """ d = _convert(d, Bunch) if d is not None else Bunch() if kw: d.update(bunchify(kw)) return d def generate_ip_range(ip_ranges): """Generate list of ips from provided ip ranges. `ip_ranges` should be list of ranges where fist ip in range represents start ip and second is end ip eg: [["15.0.0.1", "15.0.0.4"], ["10.0.0.27", "10.0.0.30"]] """ return [ ip.compressed for start_ip, end_ip in ip_ranges for net in ipaddress.summarize_address_range( ipaddress.ip_address(start_ip), ipaddress.ip_address(end_ip) ) for ip in net ] def decorate_methods_with(dec): if not CONF.debug: return lambda cls: cls def inner(cls): for attr_name, attr_val in cls.__dict__.items(): if (isinstance(attr_val, types.FunctionType) and not attr_name.startswith("_")): setattr(cls, attr_name, dec(attr_val)) return cls return inner def verbose_driver_trace(fn): if not CONF.debug: return fn def inner(self, *args, **kwargs): start = timeutils.utcnow() LOG.debug(f"[{fn.__name__}] >>>") res = fn(self, *args, **kwargs) end = timeutils.utcnow() LOG.debug( f"Spent {timeutils.delta_seconds(start, end)} sec. " f"Return {res}.\n" f"<<< [{fn.__name__}]" ) return res return inner ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/vastdata/rest.py0000664000175000017500000002634000000000000022242 0ustar00zuulzuul00000000000000# Copyright 2024 VAST Data Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from abc import ABC import json import pprint import textwrap import cachetools from oslo_log import log as logging from oslo_utils import versionutils from packaging import version as packaging_version import requests from manila import exception from manila.share.drivers.vastdata import driver_util import manila.utils as manila_utils LOG = logging.getLogger(__name__) class Session(requests.Session): def __init__( self, host, username, password, api_token, ssl_verify, plugin_version, ): super().__init__() self.base_url = f"https://{host.strip('/')}/api" self.ssl_verify = ssl_verify self.username = username self.password = password self.token = api_token self.headers["Accept"] = "application/json" self.headers["Content-Type"] = "application/json" self.headers["User-Agent"] = ( f"manila/v{plugin_version}" f" ({requests.utils.default_user_agent()})" ) if self.token: LOG.info("VMS session is using API token authentication.") self.headers["authorization"] = f"Api-Token {self.token}" else: # Will be updated on the first request LOG.info( "VMS session is using username/password authentication" " (Bearer token will be acquired)." ) self.headers["authorization"] = "Bearer" if not ssl_verify: import urllib3 urllib3.disable_warnings() def refresh_auth_token(self): try: resp = super().request( "POST", f"{self.base_url}/token/", verify=self.ssl_verify, timeout=5, json={"username": self.username, "password": self.password}, ) resp.raise_for_status() token = resp.json()["access"] self.headers["authorization"] = f"Bearer {token}" except ConnectionError as e: raise exception.VastApiException( reason=f"The vms on the designated host {self.base_url} " f"cannot be accessed. Please verify the specified endpoint. " f"origin error: {e}" ) @manila_utils.retry(retry_param=exception.VastApiRetry, retries=3) def request( self, verb, api_method, params=None, log_result=True, **kwargs ): verb = verb.upper() api_method = api_method.strip("/") url = f"{self.base_url}/{api_method}/" log_pref = f"\n[{verb}] {url}" if "data" in kwargs: kwargs["data"] = json.dumps(kwargs["data"]) if log_result and (params or kwargs): payload = dict(kwargs, params=params) formatted_request = textwrap.indent( pprint.pformat(payload), prefix="| " ) LOG.debug(f"{log_pref} >>>:\n{formatted_request}") else: LOG.debug(f"{log_pref} >>> (request)") ret = super().request( verb, url, verify=self.ssl_verify, params=params, **kwargs ) # No refresh for token based auth. Token should be long-lived. if ret.status_code == 403 and not self.token: self.refresh_auth_token() raise exception.VastApiRetry(reason="Token is invalid or expired.") if ret.status_code in (400, 503) and ret.text: raise exception.VastApiException(reason=ret.text) try: ret.raise_for_status() except Exception as exc: raise exception.VastApiException(reason=str(exc)) ret = ret.json() if ret.content else {} if ret and log_result: formatted_response = textwrap.indent( pprint.pformat(ret), prefix="| " ) LOG.debug(f"{log_pref} <<<:\n{formatted_response}") else: LOG.debug(f"{log_pref} <<< (response)") return driver_util.Bunch.from_dict(ret) def __getattr__(self, attr): if attr.startswith("_"): raise AttributeError(attr) def func(**params): return self.request("get", attr, params=params) func.__name__ = attr setattr(self, attr, func) return func def requisite(semver: str, operation: str = None): """Use this decorator to indicate the minimum required version cluster for invoking the API that is being decorated. Decorator works in two modes: 1. When ignore == False and version mismatch detected then `OperationNotSupported` exception will be thrown 2. When ignore == True and version mismatch detected then method decorated method execution never happened """ def dec(fn): def _args_wrapper(self, *args, **kwargs): version = packaging_version.parse( self.rest.get_sw_version().replace("-", ".") ) sw_version = f"{version.major}.{version.minor}.{version.micro}" if not versionutils.is_compatible( semver, sw_version, same_major=False ): op = operation or fn.__name__ raise exception.VastDriverException( f"Operation {op} is not supported" f" on VAST version {sw_version}." f" Required version is {semver}" ) return fn(self, *args, **kwargs) return _args_wrapper return dec class VastResource(ABC): resource_name = None def __init__(self, rest): self.rest = rest # For intercommunication between resources. self.session = rest.session def list(self, **params): """Get list of entries with optional filtering params""" return self.session.get(self.resource_name, params=params) def create(self, **params): """Create new entry with provided params""" return self.session.post(self.resource_name, data=params) def update(self, entry_id, **params): """Update entry by id with provided params""" return self.session.patch( f"{self.resource_name}/{entry_id}", data=params ) def delete(self, name): """Delete entry by name. Skip if entry not found.""" entry = self.one(name) if not entry: resource = self.__class__.__name__.lower() LOG.warning( f"{resource} {name} not found on VAST, skipping delete" ) return return self.session.delete(f"{self.resource_name}/{entry.id}") def one(self, name): """Get single entry by name. Raise exception if multiple entries found. """ entries = self.list(name=name) if not entries: return if len(entries) > 1: resource = self.__class__.__name__.lower() + "s" raise exception.VastDriverException( reason=f"Too many {resource} found with name {name}" ) return entries[0] def ensure(self, name, **params): entry = self.one(name) if not entry: entry = self.create(name=name, **params) return entry class View(VastResource): resource_name = "views" def create(self, name, path, policy_id): data = dict( name=name, path=path, policy_id=policy_id, create_dir=True, protocols=["NFS"], ) return super().create(**data) class ViewPolicy(VastResource): resource_name = "viewpolicies" class CapacityMetrics(VastResource): def get(self, metrics, object_type="cluster", time_frame="1m"): """Get capacity metrics for the cluster""" params = dict( prop_list=metrics, object_type=object_type, time_frame=time_frame ) ret = self.session.get("monitors/ad_hoc_query", params=params) last_sample = ret.data[-1] return driver_util.Bunch( { name.partition(",")[-1]: value for name, value in zip(ret.prop_list, last_sample) } ) class Quota(VastResource): resource_name = "quotas" class VipPool(VastResource): resource_name = "vippools" def vips(self, pool_name): """Get list of ip addresses from vip pool""" vippool = self.one(name=pool_name) if not vippool: raise exception.VastDriverException( reason=f"No vip pool found with name {pool_name}" ) vips = driver_util.generate_ip_range(vippool.ip_ranges) if not vips: raise exception.VastDriverException( reason=f"Pool {pool_name} has no available vips" ) return vips class Snapshots(VastResource): resource_name = "snapshots" class Folders(VastResource): resource_name = "folders" @requisite(semver="4.7.0") def delete(self, path): try: self.session.delete( f"{self.resource_name}/delete_folder/", data=dict(path=path) ) except exception.VastApiException as e: exc_msg = str(e) if "no such directory" in exc_msg: LOG.debug(f"remote directory " f"might have been removed earlier. ({e})") elif "trash folder disabled" in exc_msg: raise exception.VastDriverException( reason="Trash Folder Access is disabled" " (see Settings/Cluster/Features in VMS)" ) else: # unpredictable error raise class RestApi: def __init__( self, host, username, password, api_token, ssl_verify, plugin_version, ): self.session = Session( host=host, username=username, password=password, api_token=api_token, ssl_verify=ssl_verify, plugin_version=plugin_version, ) self.views = View(self) self.view_policies = ViewPolicy(self) self.capacity_metrics = CapacityMetrics(self) self.quotas = Quota(self) self.vip_pools = VipPool(self) self.snapshots = Snapshots(self) self.folders = Folders(self) # Refresh auth token to avoid initial "forbidden" status error. self.session.refresh_auth_token() @cachetools.cached(cache=cachetools.TTLCache(ttl=60 * 60, maxsize=1)) def get_sw_version(self): """Software version of cluster Rest API interacts with""" return self.session.versions(status="success")[0].sys_version ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315601.9376714 manila-21.0.0/manila/share/drivers/veritas/0000775000175000017500000000000000000000000020554 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/veritas/__init__.py0000664000175000017500000000000000000000000022653 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/veritas/veritas_isa.py0000664000175000017500000006062500000000000023450 0ustar00zuulzuul00000000000000# Copyright 2017 Veritas Technologies LLC. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Veritas Access Driver for manila shares. Limitation: 1) single tenant """ import hashlib from http import client as http_client import json from oslo_config import cfg from oslo_log import log as logging from oslo_utils import units from random import shuffle import requests import requests.auth from manila.common import constants as const from manila import exception from manila.share import driver LOG = logging.getLogger(__name__) va_share_opts = [ cfg.StrOpt('va_server_ip', help='Console IP of Veritas Access server.'), cfg.IntOpt('va_port', default=14161, help='Veritas Access server REST port.'), cfg.StrOpt('va_user', help='Veritas Access server REST login name.'), cfg.StrOpt('va_pwd', secret=True, help='Veritas Access server REST password.'), cfg.StrOpt('va_pool', help='Veritas Access storage pool from which ' 'shares are served.'), cfg.StrOpt('va_fstype', default='simple', help='Type of VA file system to be created.') ] CONF = cfg.CONF CONF.register_opts(va_share_opts) class NoAuth(requests.auth.AuthBase): """This is a 'authentication' handler. It exists for use with custom authentication systems, such as the one for the Access API, it simply passes the Authorization header as-is. The default authentication handler for requests will clobber the Authorization header. """ def __call__(self, r): return r class ACCESSShareDriver(driver.ExecuteMixin, driver.ShareDriver): """ACCESS Share Driver. Executes commands relating to Manila Shares. Supports creation of shares on ACCESS. API version history: 1.0 - Initial version. """ VA_SHARE_PATH_STR = '/vx/' def __init__(self, *args, **kwargs): """Do initialization.""" super(ACCESSShareDriver, self).__init__(False, *args, **kwargs) self.configuration.append_config_values(va_share_opts) self.backend_name = self.configuration.safe_get( 'share_backend_name') or "VeritasACCESS" self._va_ip = None self._va_url = None self._pool = None self._fstype = None self._port = None self._user = None self._pwd = None self._cred = None self._connect_resp = None self._verify_ssl_cert = None self._fs_create_str = '/fs/create' self._fs_list_str = '/fs' self._fs_delete_str = '/fs/destroy' self._fs_extend_str = '/fs/grow' self._fs_shrink_str = '/fs/shrink' self._snap_create_str = '/snapshot/create' self._snap_delete_str = '/snapshot/delete' self._snap_list_str = '/snapshot/getSnapShotList' self._nfs_add_str = '/share/create' self._nfs_delete_str = '/share/delete' self._nfs_share_list_str = '/share/all_shares_details_by_path/?path=' self._ip_addr_show_str = '/common/get_all_ips' self._pool_free_str = '/storage/pool' self._update_object = '/objecttags' self.session = None self.host = None LOG.debug("ACCESSShareDriver called") def do_setup(self, context): """Any initialization the share driver does while starting.""" super(ACCESSShareDriver, self).do_setup(context) self._va_ip = self.configuration.va_server_ip self._pool = self.configuration.va_pool self._user = self.configuration.va_user self._pwd = self.configuration.va_pwd self._port = self.configuration.va_port self._fstype = self.configuration.va_fstype self.session = self._authenticate_access(self._va_ip, self._user, self._pwd) def _get_va_share_name(self, name): length = len(name) index = int(length / 2) name1 = name[:index] name2 = name[index:] crc1 = hashlib.md5(name1.encode('utf-8'), usedforsecurity=False).hexdigest()[:8] crc2 = hashlib.md5(name2.encode('utf-8'), usedforsecurity=False).hexdigest()[:8] return crc1 + '-' + crc2 def _get_va_snap_name(self, name): return self._get_va_share_name(name) def _get_va_share_path(self, name): return self.VA_SHARE_PATH_STR + name def _does_item_exist_at_va_backend(self, item_name, path_given): """Check given share is exists on backend""" path = path_given provider = '%s:%s' % (self.host, self._port) data = {} item_list = self._access_api(self.session, provider, path, json.dumps(data), 'GET') for item in item_list: if item['name'] == item_name: return True return False def _return_access_lists_difference(self, list_a, list_b): """Returns a list of elements in list_a that are not in list_b""" sub_list = [{"access_to": s.get('access_to'), "access_type": s.get('access_type'), "access_level": s.get('access_level')} for s in list_b] return [r for r in list_a if ( {"access_to": r.get("access_to"), "access_type": r.get("access_type"), "access_level": r.get("access_level")} not in sub_list)] def _fetch_existing_rule(self, share_name): """Return list of access rules on given share""" share_path = self._get_va_share_path(share_name) path = self._nfs_share_list_str + share_path provider = '%s:%s' % (self.host, self._port) data = {} share_list = self._access_api(self.session, provider, path, json.dumps(data), 'GET') va_access_list = [] for share in share_list: if share['shareType'] == 'NFS': for share_info in share['shares']: if share_info['name'] == share_path: access_to = share_info['host_name'] a_level = const.ACCESS_LEVEL_RO if const.ACCESS_LEVEL_RW in share_info['privilege']: a_level = const.ACCESS_LEVEL_RW va_access_list.append({ 'access_to': access_to, 'access_level': a_level, 'access_type': 'ip' }) return va_access_list def create_share(self, ctx, share, share_server=None): """Create an ACCESS file system that will be represented as share.""" sharename = share['name'] sizestr = '%sg' % share['size'] LOG.debug("ACCESSShareDriver create_share sharename %s sizestr %r", sharename, sizestr) va_sharename = self._get_va_share_name(sharename) va_sharepath = self._get_va_share_path(va_sharename) va_fs_type = self._fstype path = self._fs_create_str provider = '%s:%s' % (self.host, self._port) data1 = { "largefs": "no", "blkSize": "blksize=8192", "pdirEnable": "pdir_enable=yes" } data1["layout"] = va_fs_type data1["fs_name"] = va_sharename data1["fs_size"] = sizestr data1["pool_disks"] = self._pool result = self._access_api(self.session, provider, path, json.dumps(data1), 'POST') if not result: message = (('ACCESSShareDriver create share failed %s'), sharename) LOG.error(message) raise exception.ShareBackendException(msg=message) data2 = {"type": "FS", "key": "manila"} data2["id"] = va_sharename data2["value"] = 'manila_fs' path = self._update_object result = self._access_api(self.session, provider, path, json.dumps(data2), 'POST') vip = self._get_vip() location = vip + ':' + va_sharepath LOG.debug("ACCESSShareDriver create_share location %s", location) return location def _get_vip(self): """Get a virtual IP from ACCESS.""" ip_list = self._get_access_ips(self.session, self.host) vip = [] for ips in ip_list: if ips['isconsoleip'] == 1: continue if ips['type'] == 'Virtual' and ips['status'] == 'ONLINE': vip.append(ips['ip']) shuffle(vip) return str(vip[0]) def delete_share(self, context, share, share_server=None): """Delete a share from ACCESS.""" sharename = share['name'] va_sharename = self._get_va_share_name(sharename) LOG.debug("ACCESSShareDriver delete_share %s called", sharename) if share['snapshot_id']: message = (('ACCESSShareDriver delete share %s' ' early return'), sharename) LOG.debug(message) return ret_val = self._does_item_exist_at_va_backend(va_sharename, self._fs_list_str) if not ret_val: return path = self._fs_delete_str provider = '%s:%s' % (self.host, self._port) data = {} data["fs_name"] = va_sharename result = self._access_api(self.session, provider, path, json.dumps(data), 'POST') if not result: message = (('ACCESSShareDriver delete share failed %s'), sharename) LOG.error(message) raise exception.ShareBackendException(msg=message) data2 = {"type": "FS", "key": "manila"} data2["id"] = va_sharename path = self._update_object result = self._access_api(self.session, provider, path, json.dumps(data2), 'DELETE') def extend_share(self, share, new_size, share_server=None): """Extend existing share to new size.""" sharename = share['name'] size = '%s%s' % (str(new_size), 'g') va_sharename = self._get_va_share_name(sharename) path = self._fs_extend_str provider = '%s:%s' % (self.host, self._port) data1 = {"operationOption": "growto", "tier": "primary"} data1["fs_name"] = va_sharename data1["fs_size"] = size result = self._access_api(self.session, provider, path, json.dumps(data1), 'POST') if not result: message = (('ACCESSShareDriver extend share failed %s'), sharename) LOG.error(message) raise exception.ShareBackendException(msg=message) LOG.debug('ACCESSShareDriver extended share' ' successfully %s', sharename) def shrink_share(self, share, new_size, share_server=None): """Shrink existing share to new size.""" sharename = share['name'] va_sharename = self._get_va_share_name(sharename) size = '%s%s' % (str(new_size), 'g') path = self._fs_extend_str provider = '%s:%s' % (self.host, self._port) data1 = {"operationOption": "shrinkto", "tier": "primary"} data1["fs_name"] = va_sharename data1["fs_size"] = size result = self._access_api(self.session, provider, path, json.dumps(data1), 'POST') if not result: message = (('ACCESSShareDriver shrink share failed %s'), sharename) LOG.error(message) raise exception.ShareBackendException(msg=message) LOG.debug('ACCESSShareDriver shrunk share successfully %s', sharename) def _allow_access(self, context, share, access, share_server=None): """Give access of a share to an IP.""" access_type = access['access_type'] server = access['access_to'] if access_type != 'ip': raise exception.InvalidShareAccess('Only ip access type ' 'supported.') access_level = access['access_level'] if access_level not in (const.ACCESS_LEVEL_RW, const.ACCESS_LEVEL_RO): raise exception.InvalidShareAccessLevel(level=access_level) export_path = share['export_locations'][0]['path'].split(':', 1) va_sharepath = str(export_path[1]) access_level = '%s,%s' % (str(access_level), 'sync,no_root_squash') path = self._nfs_add_str provider = '%s:%s' % (self.host, self._port) data = {} va_share_info = ("{\"share\":[{\"fileSystemPath\":\"" + va_sharepath + "\",\"shareType\":\"NFS\",\"shareDetails\":" + "[{\"client\":\"" + server + "\",\"exportOptions\":\"" + access_level + "\"}]}]}") data["shareDetails"] = va_share_info result = self._access_api(self.session, provider, path, json.dumps(data), 'POST') if not result: message = (('ACCESSShareDriver access failed sharepath %s ' 'server %s'), va_sharepath, server) LOG.error(message) raise exception.ShareBackendException(msg=message) LOG.debug("ACCESSShareDriver allow_access sharepath %s server %s", va_sharepath, server) data2 = {"type": "SHARE", "key": "manila"} data2["id"] = va_sharepath data2["value"] = 'manila_share' path = self._update_object result = self._access_api(self.session, provider, path, json.dumps(data2), 'POST') def _deny_access(self, context, share, access, share_server=None): """Deny access to the share.""" server = access['access_to'] access_type = access['access_type'] if access_type != 'ip': return export_path = share['export_locations'][0]['path'].split(':', 1) va_sharepath = str(export_path[1]) LOG.debug("ACCESSShareDriver deny_access sharepath %s server %s", va_sharepath, server) path = self._nfs_delete_str provider = '%s:%s' % (self.host, self._port) data = {} va_share_info = ("{\"share\":[{\"fileSystemPath\":\"" + va_sharepath + "\",\"shareType\":\"NFS\",\"shareDetails\":" + "[{\"client\":\"" + server + "\"}]}]}") data["shareDetails"] = va_share_info result = self._access_api(self.session, provider, path, json.dumps(data), 'DELETE') if not result: message = (('ACCESSShareDriver deny failed' ' sharepath %s server %s'), va_sharepath, server) LOG.error(message) raise exception.ShareBackendException(msg=message) LOG.debug("ACCESSShareDriver deny_access sharepath %s server %s", va_sharepath, server) data2 = {"type": "SHARE", "key": "manila"} data2["id"] = va_sharepath path = self._update_object result = self._access_api(self.session, provider, path, json.dumps(data2), 'DELETE') def update_access(self, context, share, access_rules, add_rules, delete_rules, update_rules, share_server=None): """Update access to the share.""" if (add_rules or delete_rules): # deleting rules for rule in delete_rules: self._deny_access(context, share, rule, share_server) # adding rules for rule in add_rules: self._allow_access(context, share, rule, share_server) else: if not access_rules: LOG.warning("No access rules provided in update_access.") else: sharename = self._get_va_share_name(share['name']) existing_a_rules = self._fetch_existing_rule(sharename) d_rule = self._return_access_lists_difference(existing_a_rules, access_rules) for rule in d_rule: LOG.debug("Removing rule %s in recovery.", str(rule)) self._deny_access(context, share, rule, share_server) a_rule = self._return_access_lists_difference(access_rules, existing_a_rules) for rule in a_rule: LOG.debug("Adding rule %s in recovery.", str(rule)) self._allow_access(context, share, rule, share_server) def create_snapshot(self, context, snapshot, share_server=None): """create snapshot of a share.""" LOG.debug('ACCESSShareDriver create_snapshot called ' 'for snapshot ID %s.', snapshot['snapshot_id']) sharename = snapshot['share_name'] va_sharename = self._get_va_share_name(sharename) snapname = snapshot['name'] va_snapname = self._get_va_snap_name(snapname) path = self._snap_create_str provider = '%s:%s' % (self.host, self._port) data = {} data["snapShotname"] = va_snapname data["fileSystem"] = va_sharename data["removable"] = 'yes' result = self._access_api(self.session, provider, path, json.dumps(data), 'PUT') if not result: message = (('ACCESSShareDriver create snapshot failed snapname %s' ' sharename %s'), snapname, va_sharename) LOG.error(message) raise exception.ShareBackendException(msg=message) data2 = {"type": "SNAPSHOT", "key": "manila"} data2["id"] = va_snapname data2["value"] = 'manila_snapshot' path = self._update_object result = self._access_api(self.session, provider, path, json.dumps(data2), 'POST') def delete_snapshot(self, context, snapshot, share_server=None): """Deletes a snapshot.""" sharename = snapshot['share_name'] va_sharename = self._get_va_share_name(sharename) snapname = snapshot['name'] va_snapname = self._get_va_snap_name(snapname) ret_val = self._does_item_exist_at_va_backend(va_snapname, self._snap_list_str) if not ret_val: return path = self._snap_delete_str provider = '%s:%s' % (self.host, self._port) data = {} data["name"] = va_snapname data["fsName"] = va_sharename data_to_send = {"snapShotDetails": {"snapshot": [data]}} result = self._access_api(self.session, provider, path, json.dumps(data_to_send), 'DELETE') if not result: message = (('ACCESSShareDriver delete snapshot failed snapname %s' ' sharename %s'), snapname, va_sharename) LOG.error(message) raise exception.ShareBackendException(msg=message) data2 = {"type": "SNAPSHOT", "key": "manila"} data2["id"] = va_snapname path = self._update_object result = self._access_api(self.session, provider, path, json.dumps(data2), 'DELETE') def create_share_from_snapshot(self, ctx, share, snapshot, share_server=None, parent_share=None): """create share from a snapshot.""" sharename = snapshot['share_name'] va_sharename = self._get_va_share_name(sharename) snapname = snapshot['name'] va_snapname = self._get_va_snap_name(snapname) va_sharepath = self._get_va_share_path(va_sharename) LOG.debug(('ACCESSShareDriver create_share_from_snapshot snapname %s' ' sharename %s'), va_snapname, va_sharename) vip = self._get_vip() location = vip + ':' + va_sharepath + ':' + va_snapname LOG.debug("ACCESSShareDriver create_share location %s", location) return location def _get_api(self, provider, tail): api_root = 'https://%s/api' % (provider) return api_root + tail def _access_api(self, session, provider, path, input_data, method): """Returns False if failure occurs.""" kwargs = {'data': input_data} if not isinstance(input_data, dict): kwargs['headers'] = {'Content-Type': 'application/json'} full_url = self._get_api(provider, path) response = session.request(method, full_url, **kwargs) if response.status_code != http_client.OK: LOG.debug('Access API operation Failed.') return False if path == self._update_object: return True result = response.json() return result def _get_access_ips(self, session, host): path = self._ip_addr_show_str provider = '%s:%s' % (host, self._port) data = {} ip_list = self._access_api(session, provider, path, json.dumps(data), 'GET') return ip_list def _authenticate_access(self, address, username, password): session = requests.session() session.verify = False session.auth = NoAuth() response = session.post('https://%s:%s/api/rest/authenticate' % (address, self._port), data={'username': username, 'password': password}) if response.status_code != http_client.OK: LOG.debug(('failed to authenticate to remote cluster at %s as %s'), address, username) raise exception.NotAuthorized('Authentication failure.') result = response.json() session.headers.update({'Authorization': 'Bearer {}' .format(result['token'])}) session.headers.update({'Content-Type': 'application/json'}) return session def _get_access_pool_details(self): """Get access pool details.""" path = self._pool_free_str provider = '%s:%s' % (self.host, self._port) data = {} pool_details = self._access_api(self.session, provider, path, json.dumps(data), 'GET') for pool in pool_details: if pool['device_group_name'] == str(self._pool): total_capacity = (int(pool['capacity']) / units.Gi) used_size = (int(pool['used_size']) / units.Gi) return (total_capacity, (total_capacity - used_size)) message = 'Fetching pool details operation failed.' LOG.error(message) raise exception.ShareBackendException(msg=message) def _update_share_stats(self): """Retrieve status info from share volume group.""" LOG.debug("VRTSISA Updating share status.") self.host = str(self._va_ip) self.session = self._authenticate_access(self._va_ip, self._user, self._pwd) total_capacity, free_capacity = self._get_access_pool_details() data = { 'share_backend_name': self.backend_name, 'vendor_name': 'Veritas', 'driver_version': '1.0', 'storage_protocol': 'NFS', 'total_capacity_gb': total_capacity, 'free_capacity_gb': free_capacity, 'reserved_percentage': 0, 'reserved_snapshot_percentage': 0, 'reserved_share_extend_percentage': 0, 'QoS_support': False, 'snapshot_support': True, 'create_share_from_snapshot_support': True } super(ACCESSShareDriver, self)._update_share_stats(data) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315601.9376714 manila-21.0.0/manila/share/drivers/windows/0000775000175000017500000000000000000000000020571 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/windows/__init__.py0000664000175000017500000000000000000000000022670 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/windows/service_instance.py0000664000175000017500000003102200000000000024465 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Cloudbase Solutions SRL # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import re from oslo_concurrency import processutils from oslo_config import cfg from oslo_log import log from manila import exception from manila.i18n import _ from manila.share.drivers import service_instance from manila.share.drivers.windows import windows_utils from manila.share.drivers.windows import winrm_helper CONF = cfg.CONF LOG = log.getLogger(__name__) windows_share_server_opts = [ cfg.StrOpt( "winrm_cert_pem_path", default="~/.ssl/cert.pem", help="Path to the x509 certificate used for accessing the service " "instance."), cfg.StrOpt( "winrm_cert_key_pem_path", default="~/.ssl/key.pem", help="Path to the x509 certificate key."), cfg.BoolOpt( "winrm_use_cert_based_auth", default=False, help="Use x509 certificates in order to authenticate to the " "service instance.") ] CONF = cfg.CONF CONF.register_opts(windows_share_server_opts) class WindowsServiceInstanceManager(service_instance.ServiceInstanceManager): """"Manages Windows Nova instances.""" _INSTANCE_CONNECTION_PROTO = "WinRM" _CBS_INIT_RUN_PLUGIN_AFTER_REBOOT = 2 _CBS_INIT_WINRM_PLUGIN = "ConfigWinRMListenerPlugin" _DEFAULT_MINIMUM_PASS_LENGTH = 6 def __init__(self, driver_config=None, remote_execute=None): super(WindowsServiceInstanceManager, self).__init__( driver_config=driver_config) driver_config.append_config_values(windows_share_server_opts) self._use_cert_auth = self.get_config_option( "winrm_use_cert_based_auth") self._cert_pem_path = self.get_config_option( "winrm_cert_pem_path") self._cert_key_pem_path = self.get_config_option( "winrm_cert_key_pem_path") self._check_auth_mode() self._remote_execute = (remote_execute or winrm_helper.WinRMHelper( configuration=driver_config).execute) self._windows_utils = windows_utils.WindowsUtils( remote_execute=self._remote_execute) def _check_auth_mode(self): if self._use_cert_auth: if not (os.path.exists(self._cert_pem_path) and os.path.exists(self._cert_key_pem_path)): msg = _("Certificate based authentication was configured " "but one or more certificates are missing.") raise exception.ServiceInstanceException(msg) LOG.debug("Using certificate based authentication for " "service instances.") else: instance_password = self.get_config_option( "service_instance_password") if not self._check_password_complexity(instance_password): msg = _("The configured service instance password does not " "match the minimum complexity requirements. " "The password must contain at least %s characters. " "Also, it must contain at least one digit, " "one lower case and one upper case character.") raise exception.ServiceInstanceException( msg % self._DEFAULT_MINIMUM_PASS_LENGTH) LOG.debug("Using password based authentication for " "service instances.") def _get_auth_info(self): auth_info = {'use_cert_auth': self._use_cert_auth} if self._use_cert_auth: auth_info.update(cert_pem_path=self._cert_pem_path, cert_key_pem_path=self._cert_key_pem_path) return auth_info def get_common_server(self): data = super(WindowsServiceInstanceManager, self).get_common_server() data['backend_details'].update(self._get_auth_info()) return data def _get_new_instance_details(self, server): instance_details = super(WindowsServiceInstanceManager, self)._get_new_instance_details(server) instance_details.update(self._get_auth_info()) return instance_details def _check_password_complexity(self, password): # Make sure that the Windows complexity requirements are met: # http://technet.microsoft.com/en-us/library/cc786468(v=ws.10).aspx if len(password) < self._DEFAULT_MINIMUM_PASS_LENGTH: return False for r in ("[a-z]", "[A-Z]", "[0-9]"): if not re.search(r, password): return False return True def _test_server_connection(self, server): try: self._remote_execute(server, "whoami", retry=False) LOG.debug("Service VM %s is available via WinRM", server['ip']) return True except Exception as ex: LOG.debug("Server %(ip)s is not available via WinRM. " "Exception: %(ex)s ", dict(ip=server['ip'], ex=ex)) return False def _get_service_instance_create_kwargs(self): create_kwargs = {} if self._use_cert_auth: # At the moment, we pass the x509 certificate via user data. # We'll use keypairs instead as soon as the nova client will # support x509 certificates. with open(self._cert_pem_path, 'r') as f: cert_pem_data = f.read() create_kwargs['user_data'] = cert_pem_data else: # The admin password has to be specified via instance metadata in # order to be passed to the instance via the metadata service or # configdrive. admin_pass = self.get_config_option("service_instance_password") create_kwargs['meta'] = {'admin_pass': admin_pass} return create_kwargs def set_up_service_instance(self, context, network_info): instance_details = super(WindowsServiceInstanceManager, self).set_up_service_instance(context, network_info) security_services = network_info['security_services'] security_service = self.get_valid_security_service(security_services) if security_service: self._setup_security_service(instance_details, security_service) instance_details['joined_domain'] = bool(security_service) return instance_details def _setup_security_service(self, server, security_service): domain = security_service['domain'] admin_username = security_service['user'] admin_password = security_service['password'] dns_ip = security_service['dns_ip'] self._windows_utils.set_dns_client_search_list(server, [domain]) if_index = self._windows_utils.get_interface_index_by_ip(server, server['ip']) self._windows_utils.set_dns_client_server_addresses(server, if_index, [dns_ip]) # Joining an AD domain will alter the WinRM Listener configuration. # Cloudbase-init is required to be running on the Windows service # instance, so we re-enable the plugin configuring the WinRM listener. # # TODO(lpetrut): add a config option so that we may rely on the AD # group policies taking care of the WinRM configuration. self._run_cloudbase_init_plugin_after_reboot( server, plugin_name=self._CBS_INIT_WINRM_PLUGIN) self._join_domain(server, domain, admin_username, admin_password) def _join_domain(self, server, domain, admin_username, admin_password): # As the WinRM configuration may be altered and existing connections # closed, we may not be able to retrieve the result of this operation. # Instead, we'll ensure that the instance actually joined the domain # after the reboot. try: self._windows_utils.join_domain(server, domain, admin_username, admin_password) except processutils.ProcessExecutionError: raise except Exception as exc: LOG.debug("Unexpected error while attempting to join domain " "%(domain)s. Verifying the result of the operation " "after instance reboot. Exception: %(exc)s", dict(domain=domain, exc=exc)) # We reboot the service instance using the Compute API so that # we can wait for it to become active. self.reboot_server(server, soft_reboot=True) self.wait_for_instance_to_be_active( server['instance_id'], timeout=self.max_time_to_build_instance) if not self._check_server_availability(server): raise exception.ServiceInstanceException( _('%(conn_proto)s connection has not been ' 'established to %(server)s in %(time)ss. Giving up.') % { 'conn_proto': self._INSTANCE_CONNECTION_PROTO, 'server': server['ip'], 'time': self.max_time_to_build_instance}) current_domain = self._windows_utils.get_current_domain(server) if current_domain != domain: err_msg = _("Failed to join domain %(requested_domain)s. " "Current domain: %(current_domain)s") raise exception.ServiceInstanceException( err_msg % dict(requested_domain=domain, current_domain=current_domain)) def get_valid_security_service(self, security_services): if not security_services: LOG.info("No security services provided.") elif len(security_services) > 1: LOG.warning("Multiple security services provided. Only one " "security service of type 'active_directory' " "is supported.") else: security_service = security_services[0] security_service_type = security_service['type'] if security_service_type == 'active_directory': return security_service else: LOG.warning("Only security services of type " "'active_directory' are supported. " "Retrieved security " "service type: %(sec_type)s.", {'sec_type': security_service_type}) return None def _run_cloudbase_init_plugin_after_reboot(self, server, plugin_name): cbs_init_reg_section = self._get_cbs_init_reg_section(server) plugin_key_path = "%(cbs_init_section)s\\%(instance_id)s\\Plugins" % { 'cbs_init_section': cbs_init_reg_section, 'instance_id': server['instance_id'] } self._windows_utils.set_win_reg_value( server, path=plugin_key_path, key=plugin_name, value=self._CBS_INIT_RUN_PLUGIN_AFTER_REBOOT) def _get_cbs_init_reg_section(self, server): base_path = 'hklm:\\SOFTWARE' cbs_section = 'Cloudbase Solutions\\Cloudbase-Init' for upper_section in ('', 'Wow6432Node'): cbs_init_section = self._windows_utils.normalize_path( os.path.join(base_path, upper_section, cbs_section)) try: self._windows_utils.get_win_reg_value( server, path=cbs_init_section) return cbs_init_section except processutils.ProcessExecutionError as ex: # The exit code will always be '1' in case of errors, so the # only way to determine the error type is checking stderr. if 'Cannot find path' in ex.stderr: continue else: raise raise exception.ServiceInstanceException( _("Could not retrieve Cloudbase Init registry section")) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/windows/windows_smb_driver.py0000664000175000017500000001706000000000000025055 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Cloudbase Solutions SRL # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from oslo_log import log from oslo_utils import units from manila.share import driver as base_driver from manila.share.drivers import generic from manila.share.drivers.windows import service_instance from manila.share.drivers.windows import windows_smb_helper from manila.share.drivers.windows import windows_utils from manila.share.drivers.windows import winrm_helper LOG = log.getLogger(__name__) class WindowsSMBDriver(generic.GenericShareDriver): # NOTE(lpetrut): The first partition will be reserved by the OS. _DEFAULT_SHARE_PARTITION = 2 def __init__(self, *args, **kwargs): super(WindowsSMBDriver, self).__init__(*args, **kwargs) LOG.warning('Windows SMB share driver has been deprecated and is ' 'expected to be removed in a future release.') self._remote_execute = winrm_helper.WinRMHelper( configuration=self.configuration).execute self._windows_utils = windows_utils.WindowsUtils( remote_execute=self._remote_execute) self._smb_helper = windows_smb_helper.WindowsSMBHelper( remote_execute=self._remote_execute, configuration=self.configuration) def _update_share_stats(self, data=None): base_driver.ShareDriver._update_share_stats( self, data=dict(storage_protocol="CIFS")) def _setup_service_instance_manager(self): self.service_instance_manager = ( service_instance.WindowsServiceInstanceManager( driver_config=self.configuration)) def _setup_helpers(self): self._helpers = {key: self._smb_helper for key in ("SMB", "CIFS")} def _teardown_server(self, server_details, security_services=None): security_service = ( self.service_instance_manager.get_valid_security_service( security_services)) if server_details.get('joined_domain') and security_service: try: self._windows_utils.unjoin_domain(server_details, security_service['user'], security_service['password']) except Exception as exc: LOG.warning("Failed to remove service instance " "%(instance_id)s from domain %(domain)s. " "Exception: %(exc)s.", dict(instance_id=server_details['instance_id'], domain=security_service['domain'], exc=exc)) super(WindowsSMBDriver, self)._teardown_server(server_details, security_services) def _format_device(self, server_details, volume): disk_number = self._get_disk_number(server_details, volume) self._windows_utils.initialize_disk(server_details, disk_number) self._windows_utils.create_partition(server_details, disk_number) self._windows_utils.format_partition( server_details, disk_number, self._DEFAULT_SHARE_PARTITION) def _mount_device(self, share, server_details, volume): mount_path = self._get_mount_path(share) if not self._is_device_mounted(mount_path, server_details, volume): disk_number = self._get_disk_number(server_details, volume) self._windows_utils.ensure_directory_exists(server_details, mount_path) self._ensure_disk_online_and_writable(server_details, disk_number) self._windows_utils.add_access_path(server_details, mount_path, disk_number, self._DEFAULT_SHARE_PARTITION) def _unmount_device(self, share, server_details): mount_path = self._get_mount_path(share) disk_number = self._windows_utils.get_disk_number_by_mount_path( server_details, mount_path) self._windows_utils.remove(server_details, mount_path, is_junction=True) if disk_number: self._windows_utils.set_disk_online_status( server_details, disk_number, online=False) def _resize_filesystem(self, server_details, volume, new_size=None): disk_number = self._get_disk_number(server_details, volume) self._ensure_disk_online_and_writable(server_details, disk_number) if not new_size: new_size_bytes = self._windows_utils.get_partition_maximum_size( server_details, disk_number, self._DEFAULT_SHARE_PARTITION) else: new_size_bytes = new_size * units.Gi self._windows_utils.resize_partition(server_details, new_size_bytes, disk_number, self._DEFAULT_SHARE_PARTITION) def _ensure_disk_online_and_writable(self, server_details, disk_number): self._windows_utils.update_disk(server_details, disk_number) self._windows_utils.set_disk_readonly_status( server_details, disk_number, readonly=False) self._windows_utils.set_disk_online_status( server_details, disk_number, online=True) def _get_mounted_share_size(self, mount_path, server_details): total_bytes = self._windows_utils.get_disk_space_by_path( server_details, mount_path)[0] return float(total_bytes) / units.Gi def _get_consumed_space(self, mount_path, server_details): total_bytes, free_bytes = self._windows_utils.get_disk_space_by_path( server_details, mount_path) return float(total_bytes - free_bytes) / units.Gi def _get_mount_path(self, share): mount_path = os.path.join(self.configuration.share_mount_path, share['name']) return self._windows_utils.normalize_path(mount_path) def _get_disk_number(self, server_details, volume): disk_number = self._windows_utils.get_disk_number_by_serial_number( server_details, volume['id']) if disk_number is None: LOG.debug("Could not identify the mounted disk by serial number " "using the volume id %(volume_id)s. Attempting to " "retrieve it by the volume mount point %(mountpoint)s.", dict(volume_id=volume['id'], mountpoint=volume['mountpoint'])) # Assumes the mount_point will be something like /dev/hdX mount_point = volume['mountpoint'] disk_number = ord(mount_point[-1]) - ord('a') return disk_number def _is_device_mounted(self, mount_path, server_details, volume=None): disk_number = self._windows_utils.get_disk_number_by_mount_path( server_details, mount_path) return disk_number is not None ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/windows/windows_smb_helper.py0000664000175000017500000002565400000000000025051 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Cloudbase Solutions SRL # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import os from oslo_log import log from manila.common import constants from manila import exception from manila.share.drivers import helpers from manila.share.drivers.windows import windows_utils LOG = log.getLogger(__name__) class WindowsSMBHelper(helpers.CIFSHelperBase): _SHARE_ACCESS_RIGHT_MAP = { constants.ACCESS_LEVEL_RW: "Change", constants.ACCESS_LEVEL_RO: "Read"} _NULL_SID = "S-1-0-0" _WIN_ACL_ALLOW = 0 _WIN_ACL_DENY = 1 _WIN_ACCESS_RIGHT_FULL = 0 _WIN_ACCESS_RIGHT_CHANGE = 1 _WIN_ACCESS_RIGHT_READ = 2 _WIN_ACCESS_RIGHT_CUSTOM = 3 _ACCESS_LEVEL_CUSTOM = 'custom' _WIN_ACL_MAP = { _WIN_ACCESS_RIGHT_CHANGE: constants.ACCESS_LEVEL_RW, _WIN_ACCESS_RIGHT_FULL: constants.ACCESS_LEVEL_RW, _WIN_ACCESS_RIGHT_READ: constants.ACCESS_LEVEL_RO, _WIN_ACCESS_RIGHT_CUSTOM: _ACCESS_LEVEL_CUSTOM, } _SUPPORTED_ACCESS_LEVELS = (constants.ACCESS_LEVEL_RO, constants.ACCESS_LEVEL_RW) _SUPPORTED_ACCESS_TYPES = ('user', ) def __init__(self, remote_execute, configuration): self._remote_exec = remote_execute self.configuration = configuration self._windows_utils = windows_utils.WindowsUtils( remote_execute=remote_execute) def init_helper(self, server): self._remote_exec(server, "Get-SmbShare") def create_exports(self, server, share_name, recreate=False): export_location = '\\\\%s\\%s' % (server['public_address'], share_name) if not self._share_exists(server, share_name): share_path = self._windows_utils.normalize_path( os.path.join(self.configuration.share_mount_path, share_name)) # If no access rules are requested, 'Everyone' will have read # access, by default. We set read access for the 'NULL SID' in # order to avoid this. cmd = ['New-SmbShare', '-Name', share_name, '-Path', share_path, '-ReadAccess', "*%s" % self._NULL_SID] self._remote_exec(server, cmd) else: LOG.info("Skipping creating export %s as it already exists.", share_name) return self.get_exports_for_share(server, export_location) def remove_exports(self, server, share_name): if self._share_exists(server, share_name): cmd = ['Remove-SmbShare', '-Name', share_name, "-Force"] self._remote_exec(server, cmd) else: LOG.debug("Skipping removing export %s as it does not exist.", share_name) def _get_volume_path_by_share_name(self, server, share_name): share_path = self._get_share_path_by_name(server, share_name) volume_path = self._windows_utils.get_volume_path_by_mount_path( server, share_path) return volume_path def _get_acls(self, server, share_name): cmd = ('Get-SmbShareAccess -Name %(share_name)s | ' 'Select-Object @("Name", "AccountName", ' '"AccessControlType", "AccessRight") | ' 'ConvertTo-JSON -Compress' % {'share_name': share_name}) (out, err) = self._remote_exec(server, cmd) if not out.strip(): return [] raw_acls = json.loads(out) if isinstance(raw_acls, dict): return [raw_acls] return raw_acls def get_access_rules(self, server, share_name): raw_acls = self._get_acls(server, share_name) acls = [] for raw_acl in raw_acls: access_to = raw_acl['AccountName'] access_right = raw_acl['AccessRight'] access_level = self._WIN_ACL_MAP[access_right] access_allow = raw_acl["AccessControlType"] == self._WIN_ACL_ALLOW if not access_allow: if access_to.lower() == 'everyone' and len(raw_acls) == 1: LOG.debug("No access rules are set yet for share %s", share_name) else: LOG.warning( "Found explicit deny ACE rule that was not " "created by Manila and will be ignored: %s", raw_acl) continue if access_level == self._ACCESS_LEVEL_CUSTOM: LOG.warning( "Found 'custom' ACE rule that will be ignored: %s", raw_acl) continue elif access_right == self._WIN_ACCESS_RIGHT_FULL: LOG.warning( "Account '%(access_to)s' was given full access " "right on share %(share_name)s. Manila only " "grants 'change' access.", {'access_to': access_to, 'share_name': share_name}) acl = { 'access_to': access_to, 'access_level': access_level, 'access_type': 'user', } acls.append(acl) return acls def _grant_share_access(self, server, share_name, access_level, access_to): access_right = self._SHARE_ACCESS_RIGHT_MAP[access_level] cmd = ["Grant-SmbShareAccess", "-Name", share_name, "-AccessRight", access_right, "-AccountName", "'%s'" % access_to, "-Force"] self._remote_exec(server, cmd) self._refresh_acl(server, share_name) LOG.info("Granted %(access_level)s access to '%(access_to)s' " "on share %(share_name)s", {'access_level': access_level, 'access_to': access_to, 'share_name': share_name}) def _refresh_acl(self, server, share_name): cmd = ['Set-SmbPathAcl', '-ShareName', share_name] self._remote_exec(server, cmd) def _revoke_share_access(self, server, share_name, access_to): cmd = ['Revoke-SmbShareAccess', '-Name', share_name, '-AccountName', '"%s"' % access_to, '-Force'] self._remote_exec(server, cmd) self._refresh_acl(server, share_name) LOG.info("Revoked access to '%(access_to)s' " "on share %(share_name)s", {'access_to': access_to, 'share_name': share_name}) def update_access(self, server, share_name, access_rules, add_rules, delete_rules, update_rules): self.validate_access_rules( access_rules + add_rules, self._SUPPORTED_ACCESS_TYPES, self._SUPPORTED_ACCESS_LEVELS) if not (add_rules or delete_rules): existing_rules = self.get_access_rules(server, share_name) add_rules, delete_rules = self._get_rule_updates( existing_rules=existing_rules, requested_rules=access_rules) LOG.debug(("Missing rules: %(add_rules)s, " "superfluous rules: %(delete_rules)s"), {'add_rules': add_rules, 'delete_rules': delete_rules}) # Some rules may have changed, so we'll # treat the deleted rules first. for deleted_rule in delete_rules: try: self.validate_access_rules( [deleted_rule], self._SUPPORTED_ACCESS_TYPES, self._SUPPORTED_ACCESS_LEVELS) except (exception.InvalidShareAccess, exception.InvalidShareAccessLevel): # This check will allow invalid rules to be deleted. LOG.warning( "Unsupported access level %(level)s or access type " "%(type)s, skipping removal of access rule to " "%(to)s.", {'level': deleted_rule['access_level'], 'type': deleted_rule['access_type'], 'to': deleted_rule['access_to']}) continue self._revoke_share_access(server, share_name, deleted_rule['access_to']) for added_rule in add_rules: self._grant_share_access(server, share_name, added_rule['access_level'], added_rule['access_to']) def _subtract_access_rules(self, access_rules, subtracted_rules): # Account names are case insensitive on Windows. filter_rules = lambda rules: [ # noqa: E731 {'access_to': access_rule['access_to'].lower(), 'access_level': access_rule['access_level'], 'access_type': access_rule['access_type']} for access_rule in rules] return [rule for rule in filter_rules(access_rules) if rule not in filter_rules(subtracted_rules)] def _get_rule_updates(self, existing_rules, requested_rules): added_rules = self._subtract_access_rules(requested_rules, existing_rules) deleted_rules = self._subtract_access_rules(existing_rules, requested_rules) return added_rules, deleted_rules def _get_share_name(self, export_location): return self._windows_utils.normalize_path( export_location).split('\\')[-1] def _get_export_location_template(self, old_export_location): share_name = self._get_share_name(old_export_location) return '\\\\%s' + ('\\%s' % share_name) def _get_share_path_by_name(self, server, share_name, ignore_missing=False): cmd = ('Get-SmbShare -Name %s | ' 'Select-Object -ExpandProperty Path' % share_name) check_exit_code = not ignore_missing (share_path, err) = self._remote_exec(server, cmd, check_exit_code=check_exit_code) return share_path.strip() if share_path else None def get_share_path_by_export_location(self, server, export_location): share_name = self._get_share_name(export_location) return self._get_share_path_by_name(server, share_name) def _share_exists(self, server, share_name): share_path = self._get_share_path_by_name(server, share_name, ignore_missing=True) return bool(share_path) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/windows/windows_utils.py0000664000175000017500000002224600000000000024063 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Cloudbase Solutions SRL # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import re from oslo_log import log LOG = log.getLogger(__name__) class WindowsUtils(object): def __init__(self, remote_execute): self._remote_exec = remote_execute self._fsutil_total_space_regex = re.compile('of bytes *: ([0-9]*)') self._fsutil_free_space_regex = re.compile( 'of avail free bytes *: ([0-9]*)') def initialize_disk(self, server, disk_number): cmd = ["Initialize-Disk", "-Number", disk_number] self._remote_exec(server, cmd) def create_partition(self, server, disk_number): cmd = ["New-Partition", "-DiskNumber", disk_number, "-UseMaximumSize"] self._remote_exec(server, cmd) def format_partition(self, server, disk_number, partition_number): cmd = ("Get-Partition -DiskNumber %(disk_number)s " "-PartitionNumber %(partition_number)s | " "Format-Volume -FileSystem NTFS -Force -Confirm:$false" % { 'disk_number': disk_number, 'partition_number': partition_number, }) self._remote_exec(server, cmd) def add_access_path(self, server, mount_path, disk_number, partition_number): cmd = ["Add-PartitionAccessPath", "-DiskNumber", disk_number, "-PartitionNumber", partition_number, "-AccessPath", self.quote_string(mount_path)] self._remote_exec(server, cmd) def resize_partition(self, server, size_bytes, disk_number, partition_number): cmd = ['Resize-Partition', '-DiskNumber', disk_number, '-PartitionNumber', partition_number, '-Size', size_bytes] self._remote_exec(server, cmd) def get_disk_number_by_serial_number(self, server, serial_number): pattern = "%s*" % serial_number[:15] cmd = ("Get-Disk | " "Where-Object {$_.SerialNumber -like '%s'} | " "Select-Object -ExpandProperty Number" % pattern) (out, err) = self._remote_exec(server, cmd) return int(out) if (len(out) > 0) else None def get_disk_number_by_mount_path(self, server, mount_path): cmd = ('Get-Partition | ' 'Where-Object {$_.AccessPaths -contains "%s"} | ' 'Select-Object -ExpandProperty DiskNumber' % (mount_path + "\\")) (out, err) = self._remote_exec(server, cmd) return int(out) if (len(out) > 0) else None def get_volume_path_by_mount_path(self, server, mount_path): cmd = ('Get-Partition | ' 'Where-Object {$_.AccessPaths -contains "%s"} | ' 'Get-Volume | ' 'Select-Object -ExpandProperty Path' % (mount_path + "\\")) (out, err) = self._remote_exec(server, cmd) return out.strip() def get_disk_space_by_path(self, server, mount_path): cmd = ["fsutil", "volume", "diskfree", self.quote_string(mount_path)] (out, err) = self._remote_exec(server, cmd) total_bytes = int(self._fsutil_total_space_regex.findall(out)[0]) free_bytes = int(self._fsutil_free_space_regex.findall(out)[0]) return total_bytes, free_bytes def get_partition_maximum_size(self, server, disk_number, partition_number): cmd = ('Get-PartitionSupportedSize -DiskNumber %(disk_number)s ' '-PartitionNumber %(partition_number)s | ' 'Select-Object -ExpandProperty SizeMax' % dict(disk_number=disk_number, partition_number=partition_number)) (out, err) = self._remote_exec(server, cmd) max_bytes = int(out) return max_bytes def set_disk_online_status(self, server, disk_number, online=True): is_offline = int(not online) cmd = ["Set-Disk", "-Number", disk_number, "-IsOffline", is_offline] self._remote_exec(server, cmd) def set_disk_readonly_status(self, server, disk_number, readonly=False): cmd = ["Set-Disk", "-Number", disk_number, "-IsReadOnly", int(readonly)] self._remote_exec(server, cmd) def update_disk(self, server, disk_number): """Updates cached disk information.""" cmd = ["Update-Disk", disk_number] self._remote_exec(server, cmd) def join_domain(self, server, domain, admin_username, admin_password): # NOTE(lpetrut): An instance reboot is needed but this will be # performed using Nova so that the instance state can be # retrieved easier. LOG.info("Joining server %(ip)s to Active Directory " "domain %(domain)s", dict(ip=server['ip'], domain=domain)) cmds = [ ('$password = "%s" | ' 'ConvertTo-SecureString -asPlainText -Force' % admin_password), ('$credential = ' 'New-Object System.Management.Automation.PSCredential(' '"%s", $password)' % admin_username), ('Add-Computer -DomainName "%s" -Credential $credential' % domain)] cmd = ";".join(cmds) self._remote_exec(server, cmd) def unjoin_domain(self, server, admin_username, admin_password, reboot=False): cmds = [ ('$password = "%s" | ' 'ConvertTo-SecureString -asPlainText -Force' % admin_password), ('$credential = ' 'New-Object System.Management.Automation.PSCredential(' '"%s", $password)' % admin_username), ('Remove-Computer -UnjoinDomaincredential $credential ' '-Passthru -Verbose -Force')] cmd = ";".join(cmds) self._remote_exec(server, cmd) def get_current_domain(self, server): cmd = "(Get-WmiObject Win32_ComputerSystem).Domain" (out, err) = self._remote_exec(server, cmd) return out.strip() def ensure_directory_exists(self, server, path): cmd = ["New-Item", "-ItemType", "Directory", "-Force", "-Path", self.quote_string(path)] self._remote_exec(server, cmd) def remove(self, server, path, force=True, recurse=False, is_junction=False): if self.path_exists(server, path): if is_junction: cmd = ('[System.IO.Directory]::Delete(' '%(path)s, %(recurse)d)' % dict(path=self.quote_string(path), recurse=recurse)) else: cmd = ["Remove-Item", "-Confirm:$false", "-Path", self.quote_string(path)] if force: cmd += ['-Force'] if recurse: cmd += ['-Recurse'] self._remote_exec(server, cmd) else: LOG.debug("Skipping deleting path %s as it does " "not exist.", path) def path_exists(self, server, path): cmd = ["Test-Path", path] (out, _) = self._remote_exec(server, cmd) return out.strip() == "True" def normalize_path(self, path): return path.replace('/', '\\') def get_interface_index_by_ip(self, server, ip): cmd = ('Get-NetIPAddress | ' 'Where-Object {$_.IPAddress -eq "%(ip)s"} | ' 'Select-Object -ExpandProperty InterfaceIndex' % dict(ip=ip)) (out, err) = self._remote_exec(server, cmd) if_index = int(out) return if_index def set_dns_client_search_list(self, server, search_list): src_list = ",".join(["'%s'" % domain for domain in search_list]) cmd = ["Set-DnsClientGlobalSetting", "-SuffixSearchList", "@(%s)" % src_list] self._remote_exec(server, cmd) def set_dns_client_server_addresses(self, server, if_index, dns_servers): dns_sv_list = ",".join(["'%s'" % dns_sv for dns_sv in dns_servers]) cmd = ["Set-DnsClientServerAddress", "-InterfaceIndex", if_index, "-ServerAddresses", "(%s)" % dns_sv_list] self._remote_exec(server, cmd) def set_win_reg_value(self, server, path, key, value): cmd = ['Set-ItemProperty', '-Path', self.quote_string(path), '-Name', key, '-Value', value] self._remote_exec(server, cmd) def get_win_reg_value(self, server, path, name=None): cmd = "Get-ItemProperty -Path %s" % self.quote_string(path) if name: cmd += " | Select-Object -ExpandProperty %s" % name return self._remote_exec(server, cmd, retry=False)[0] def quote_string(self, string): return '"%s"' % string ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/windows/winrm_helper.py0000664000175000017500000001453000000000000023641 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Cloudbase Solutions SRL # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import base64 from oslo_concurrency import processutils from oslo_config import cfg from oslo_log import log from oslo_utils import importutils from oslo_utils import strutils from manila import exception from manila.i18n import _ from manila import utils LOG = log.getLogger(__name__) CONF = cfg.CONF winrm_opts = [ cfg.IntOpt( 'winrm_conn_timeout', default=60, help='WinRM connection timeout.'), cfg.IntOpt( 'winrm_operation_timeout', default=60, help='WinRM operation timeout.'), cfg.IntOpt( 'winrm_retry_count', default=3, help='WinRM retry count.'), cfg.IntOpt( 'winrm_retry_interval', default=5, help='WinRM retry interval in seconds'), ] CONF.register_opts(winrm_opts) DEFAULT_PORT_HTTP = 5985 DEFAULT_PORT_HTTPS = 5986 TRANSPORT_PLAINTEXT = 'plaintext' TRANSPORT_SSL = 'ssl' winrm = None def setup_winrm(): global winrm if not winrm: try: winrm = importutils.import_module('winrm') except ImportError: raise exception.ShareBackendException( _("PyWinrm is not installed")) class WinRMHelper(object): def __init__(self, configuration=None): if configuration: configuration.append_config_values(winrm_opts) self._config = configuration else: self._config = CONF setup_winrm() def _get_conn(self, server): auth = self._get_auth(server) conn = WinRMConnection( ip=server['ip'], conn_timeout=self._config.winrm_conn_timeout, operation_timeout=self._config.winrm_operation_timeout, **auth) return conn def execute(self, server, command, check_exit_code=True, retry=True): retries = self._config.winrm_retry_count if retry else 1 conn = self._get_conn(server) @utils.retry(retry_param=Exception, interval=self._config.winrm_retry_interval, retries=retries) def _execute(): parsed_cmd, sanitized_cmd = self._parse_command(command) LOG.debug("Executing command: %s", sanitized_cmd) (stdout, stderr, exit_code) = conn.execute(parsed_cmd) sanitized_stdout = strutils.mask_password(stdout) sanitized_stderr = strutils.mask_password(stderr) LOG.debug("Executed command: %(cmd)s. Stdout: %(stdout)s. " "Stderr: %(stderr)s. Exit code %(exit_code)s", dict(cmd=sanitized_cmd, stdout=sanitized_stdout, stderr=sanitized_stderr, exit_code=exit_code)) if check_exit_code and exit_code != 0: raise processutils.ProcessExecutionError( stdout=sanitized_stdout, stderr=sanitized_stderr, exit_code=exit_code, cmd=sanitized_cmd) return (stdout, stderr) return _execute() def _parse_command(self, command): if isinstance(command, list) or isinstance(command, tuple): command = " ".join([str(c) for c in command]) sanitized_cmd = strutils.mask_password(command) b64_command = base64.b64encode(command.encode("utf_16_le")) command = ("powershell.exe -ExecutionPolicy RemoteSigned " "-NonInteractive -EncodedCommand %s" % b64_command) return command, sanitized_cmd def _get_auth(self, server): auth = {'username': server['username']} if server['use_cert_auth']: auth['cert_pem_path'] = server['cert_pem_path'] auth['cert_key_pem_path'] = server['cert_key_pem_path'] else: auth['password'] = server['password'] return auth class WinRMConnection(object): _URL_TEMPLATE = '%(protocol)s://%(ip)s:%(port)s/wsman' def __init__(self, ip=None, port=None, use_ssl=False, transport=None, username=None, password=None, cert_pem_path=None, cert_key_pem_path=None, operation_timeout=None, conn_timeout=None): setup_winrm() use_cert = bool(cert_pem_path and cert_key_pem_path) transport = (TRANSPORT_SSL if use_cert else TRANSPORT_PLAINTEXT) _port = port or self._get_default_port(use_cert) _url = self._get_url(ip, _port, use_cert) self._conn = winrm.protocol.Protocol( endpoint=_url, transport=transport, username=username, password=password, cert_pem=cert_pem_path, cert_key_pem=cert_key_pem_path) self._conn.transport.timeout = conn_timeout self._conn.set_timeout(operation_timeout) def _get_default_port(self, use_ssl): port = (DEFAULT_PORT_HTTPS if use_ssl else DEFAULT_PORT_HTTP) return port def _get_url(self, ip, port, use_ssl): if not ip: err_msg = _("No IP provided.") raise exception.ShareBackendException(msg=err_msg) protocol = 'https' if use_ssl else 'http' return self._URL_TEMPLATE % {'protocol': protocol, 'ip': ip, 'port': port} def execute(self, cmd): shell_id = None cmd_id = None try: shell_id = self._conn.open_shell() cmd_id = self._conn.run_command(shell_id, cmd) (stdout, stderr, exit_code) = self._conn.get_command_output(shell_id, cmd_id) finally: if cmd_id: self._conn.cleanup_command(shell_id, cmd_id) if shell_id: self._conn.close_shell(shell_id) return (stdout, stderr, exit_code) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315601.9376714 manila-21.0.0/manila/share/drivers/zadara/0000775000175000017500000000000000000000000020341 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/zadara/__init__.py0000664000175000017500000000000000000000000022440 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/zadara/common.py0000664000175000017500000004771600000000000022222 0ustar00zuulzuul00000000000000# Copyright (c) 2020 Zadara Storage, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import re from oslo_config import cfg from oslo_log import log as logging import requests LOG = logging.getLogger(__name__) # Number of seconds the repsonse for the request sent to # vpsa is expected. Else the request will be timed out. # Setting it to 300 seconds initially. vpsa_timeout = 300 # Common exception class for all the exceptions that # are used to redirect to the driver specific exceptions. class CommonException(Exception): def __init__(self): pass class UnknownCmd(Exception): def __init__(self, cmd): self.cmd = cmd class BadHTTPResponseStatus(Exception): def __init__(self, status): self.status = status class FailedCmdWithDump(Exception): def __init__(self, status, data): self.status = status self.data = data class SessionRequestException(Exception): def __init__(self, msg): self.msg = msg class ZadaraInvalidAccessKey(Exception): pass exception = CommonException() zadara_opts = [ cfg.HostAddressOpt('zadara_vpsa_host', default=None, help='VPSA - Management Host name or IP address'), cfg.PortOpt('zadara_vpsa_port', default=None, help='VPSA - Port number'), cfg.BoolOpt('zadara_vpsa_use_ssl', default=False, help='VPSA - Use SSL connection'), cfg.BoolOpt('zadara_ssl_cert_verify', default=True, help='If set to True the http client will validate the SSL ' 'certificate of the VPSA endpoint.'), cfg.StrOpt('zadara_access_key', default=None, help='VPSA access key', secret=True), cfg.StrOpt('zadara_vpsa_poolname', default=None, help='VPSA - Storage Pool assigned for volumes'), cfg.BoolOpt('zadara_vol_encrypt', default=False, help='VPSA - Default encryption policy for volumes. ' 'If the option is neither configured nor provided ' 'as metadata, the VPSA will inherit the default value.'), cfg.BoolOpt('zadara_gen3_vol_dedupe', default=False, help='VPSA - Enable deduplication for volumes. ' 'If the option is neither configured nor provided ' 'as metadata, the VPSA will inherit the default value.'), cfg.BoolOpt('zadara_gen3_vol_compress', default=False, help='VPSA - Enable compression for volumes. ' 'If the option is neither configured nor provided ' 'as metadata, the VPSA will inherit the default value.'), cfg.BoolOpt('zadara_default_snap_policy', default=False, help="VPSA - Attach snapshot policy for volumes. " "If the option is neither configured nor provided " "as metadata, the VPSA will inherit the default value.")] # Class used to connect and execute the commands on # Zadara Virtual Private Storage Array (VPSA). class ZadaraVPSAConnection(object): """Executes driver commands on VPSA.""" def __init__(self, conf, driver_ssl_cert_path, block): self.conf = conf self.access_key = conf.zadara_access_key if not self.access_key: raise exception.ZadaraInvalidAccessKey() self.driver_ssl_cert_path = driver_ssl_cert_path # Choose the volume type of either block or file-type # that will help to filter volumes. self.vol_type_str = 'showonlyblock' if block else 'showonlyfile' def _generate_vpsa_cmd(self, cmd, **kwargs): """Generate command to be sent to VPSA.""" # Dictionary of applicable VPSA commands in the following format: # 'command': (method, API_URL, {optional parameters}) vpsa_commands = { # Volume operations 'create_volume': ('POST', '/api/volumes.json', {'name': kwargs.get('name'), 'capacity': kwargs.get('size'), 'pool': self.conf.zadara_vpsa_poolname, 'block': 'YES' if self.vol_type_str == 'showonlyblock' else 'NO', 'thin': 'YES', 'crypt': 'YES' if self.conf.zadara_vol_encrypt else 'NO', 'compress': 'YES' if self.conf.zadara_gen3_vol_compress else 'NO', 'dedupe': 'YES' if self.conf.zadara_gen3_vol_dedupe else 'NO', 'attachpolicies': 'NO' if not self.conf.zadara_default_snap_policy else 'YES'}), 'delete_volume': ('DELETE', '/api/volumes/%s.json' % kwargs.get('vpsa_vol'), {'force': 'YES'}), 'expand_volume': ('POST', '/api/volumes/%s/expand.json' % kwargs.get('vpsa_vol'), {'capacity': kwargs.get('size')}), 'rename_volume': ('POST', '/api/volumes/%s/rename.json' % kwargs.get('vpsa_vol'), {'new_name': kwargs.get('new_name')}), # Snapshot operations # Snapshot request is triggered for a single volume though the # API call implies that snapshot is triggered for CG (legacy API). 'create_snapshot': ('POST', '/api/consistency_groups/%s/snapshots.json' % kwargs.get('cg_name'), {'display_name': kwargs.get('snap_name')}), 'delete_snapshot': ('DELETE', '/api/snapshots/%s.json' % kwargs.get('snap_id'), {}), 'rename_snapshot': ('POST', '/api/snapshots/%s/rename.json' % kwargs.get('snap_id'), {'newname': kwargs.get('new_name')}), 'create_clone_from_snap': ('POST', '/api/consistency_groups/%s/clone.json' % kwargs.get('cg_name'), {'name': kwargs.get('name'), 'snapshot': kwargs.get('snap_id')}), 'create_clone': ('POST', '/api/consistency_groups/%s/clone.json' % kwargs.get('cg_name'), {'name': kwargs.get('name')}), # Server operations 'create_server': ('POST', '/api/servers.json', {'iqn': kwargs.get('iqn'), 'iscsi': kwargs.get('iscsi_ip'), 'display_name': kwargs.get('iqn') if kwargs.get('iqn') else kwargs.get('iscsi_ip')}), # Attach/Detach operations 'attach_volume': ('POST', '/api/servers/%s/volumes.json' % kwargs.get('vpsa_srv'), {'volume_name[]': kwargs.get('vpsa_vol'), 'access_type': kwargs.get('share_proto'), 'readonly': kwargs.get('read_only'), 'force': 'YES'}), 'detach_volume': ('POST', '/api/volumes/%s/detach.json' % kwargs.get('vpsa_vol'), {'server_name[]': kwargs.get('vpsa_srv'), 'force': 'YES'}), # Update volume comment 'update_volume': ('POST', '/api/volumes/%s/update_comment.json' % kwargs.get('vpsa_vol'), {'new_comment': kwargs.get('new_comment')}), # Get operations 'list_volumes': ('GET', '/api/volumes.json?%s=YES' % self.vol_type_str, {}), 'get_volume': ('GET', '/api/volumes/%s.json' % kwargs.get('vpsa_vol'), {}), 'get_volume_by_name': ('GET', '/api/volumes.json?display_name=%s' % kwargs.get('display_name'), {}), 'get_pool': ('GET', '/api/pools/%s.json' % kwargs.get('pool_name'), {}), 'list_controllers': ('GET', '/api/vcontrollers.json', {}), 'list_servers': ('GET', '/api/servers.json', {}), 'list_vol_snapshots': ('GET', '/api/consistency_groups/%s/snapshots.json' % kwargs.get('cg_name'), {}), 'list_vol_attachments': ('GET', '/api/volumes/%s/servers.json' % kwargs.get('vpsa_vol'), {}), 'list_snapshots': ('GET', '/api/snapshots.json', {}), # Put operations 'change_export_name': ('PUT', '/api/volumes/%s/export_name.json' % kwargs.get('vpsa_vol'), {'exportname': kwargs.get('exportname')})} try: method, url, params = vpsa_commands[cmd] # Populate the metadata for the volume creation metadata = kwargs.get('metadata') if metadata: for key, value in metadata.items(): params[key] = value except KeyError: raise exception.UnknownCmd(cmd=cmd) if method == 'GET': params = dict(page=1, start=0, limit=0) body = None elif method in ['DELETE', 'POST', 'PUT']: body = params params = None else: msg = ('Method %(method)s is not defined' % {'method': method}) LOG.error(msg) raise AssertionError(msg) # 'access_key' was generated using username and password # or it was taken from the input file headers = {'X-Access-Key': self.access_key} return method, url, params, body, headers def send_cmd(self, cmd, **kwargs): """Send command to VPSA Controller.""" if not self.access_key: raise exception.ZadaraInvalidAccessKey() method, url, params, body, headers = self._generate_vpsa_cmd(cmd, **kwargs) LOG.debug('Invoking %(cmd)s using %(method)s request.', {'cmd': cmd, 'method': method}) host = self._get_target_host(self.conf.zadara_vpsa_host) port = int(self.conf.zadara_vpsa_port) protocol = "https" if self.conf.zadara_vpsa_use_ssl else "http" if protocol == "https": if not self.conf.zadara_ssl_cert_verify: verify = False else: verify = (self.driver_ssl_cert_path if self.driver_ssl_cert_path else True) else: verify = False if port: api_url = "%s://%s:%d%s" % (protocol, host, port, url) else: api_url = "%s://%s%s" % (protocol, host, url) try: with requests.Session() as session: session.headers.update(headers) response = session.request(method, api_url, params=params, data=body, headers=headers, verify=verify, timeout=vpsa_timeout) except requests.exceptions.RequestException as e: msg = ('Exception: %s') % e raise exception.SessionRequestException(msg=msg) if response.status_code != 200: raise exception.BadHTTPResponseStatus( status=response.status_code) data = response.content json_data = json.loads(data) response = json_data['response'] status = int(response['status']) if status == 5: # Invalid Credentials raise exception.ZadaraInvalidAccessKey() if status != 0: raise exception.FailedCmdWithDump(status=status, data=data) if method in ['POST', 'DELETE']: LOG.debug('Operation completed with status code %(status)s', {'status': status}) return response def _get_target_host(self, vpsa_host): """Helper for target host formatting.""" ipv6_without_brackets = ':' in vpsa_host and vpsa_host[-1] != ']' if ipv6_without_brackets: return ('[%s]' % vpsa_host) return ('%s' % vpsa_host) def _get_active_controller_details(self): """Return details of VPSA's active controller.""" data = self.send_cmd('list_controllers') ctrl = None vcontrollers = data.get('vcontrollers', []) for controller in vcontrollers: if controller['state'] == 'active': ctrl = controller break if ctrl is not None: target_ip = (ctrl['iscsi_ipv6'] if ctrl['iscsi_ipv6'] else ctrl['iscsi_ip']) return dict(target=ctrl['target'], ip=target_ip, chap_user=ctrl['vpsa_chap_user'], chap_passwd=ctrl['vpsa_chap_secret']) return None def _check_access_key_validity(self): """Check VPSA access key""" if not self.access_key: raise exception.ZadaraInvalidAccessKey() active_ctrl = self._get_active_controller_details() if active_ctrl is None: raise exception.ZadaraInvalidAccessKey() def _get_vpsa_volume(self, name): """Returns a single vpsa volume based on the display name""" volume = None display_name = name if re.search(r"\s", name): display_name = re.split(r"\s", name)[0] data = self.send_cmd('get_volume_by_name', display_name=display_name) if data['status'] != 0: return None volumes = data['volumes'] for vol in volumes: if vol['display_name'] == name: volume = vol break return volume def _get_vpsa_volume_by_id(self, vpsa_vol): """Returns a single vpsa volume based on name""" data = self.send_cmd('get_volume', vpsa_vol=vpsa_vol) return data['volume'] def _get_volume_cg_name(self, name): """Return name of the consistency group for the volume. cg-name is a volume uniqe identifier (legacy attribute) and not consistency group as it may imply. """ volume = self._get_vpsa_volume(name) if volume is not None: return volume['cg_name'] return None def _get_all_vpsa_snapshots(self): """Returns snapshots from all vpsa volumes""" data = self.send_cmd('list_snapshots') return data['snapshots'] def _get_all_vpsa_volumes(self): """Returns all vpsa block volumes from the configured pool""" data = self.send_cmd('list_volumes') # FIXME: Work around to filter volumes belonging to given pool # Remove this when we have the API fixed to filter based # on pools. This API today does not have virtual_capacity field volumes = [] for volume in data['volumes']: if volume['pool_name'] == self.conf.zadara_vpsa_poolname: volumes.append(volume) return volumes def _get_server_name(self, initiator, share): """Return VPSA's name for server object. 'share' will be true to search for filesystem volumes """ data = self.send_cmd('list_servers') servers = data.get('servers', []) for server in servers: if share: if server['iscsi_ip'] == initiator: return server['name'] else: if server['iqn'] == initiator: return server['name'] return None def _create_vpsa_server(self, iqn=None, iscsi_ip=None): """Create server object within VPSA (if doesn't exist).""" initiator = iscsi_ip if iscsi_ip else iqn share = True if iscsi_ip else False vpsa_srv = self._get_server_name(initiator, share) if not vpsa_srv: data = self.send_cmd('create_server', iqn=iqn, iscsi_ip=iscsi_ip) if data['status'] != 0: return None vpsa_srv = data['server_name'] return vpsa_srv def _get_servers_attached_to_volume(self, vpsa_vol): """Return all servers attached to volume.""" servers = vpsa_vol.get('server_ext_names') list_servers = [] if servers: list_servers = servers.split(',') return list_servers def _detach_vpsa_volume(self, vpsa_vol, vpsa_srv=None): """Detach volume from all attached servers.""" if vpsa_srv: list_servers_ids = [vpsa_srv] else: list_servers_ids = self._get_servers_attached_to_volume(vpsa_vol) for server_id in list_servers_ids: # Detach volume from server self.send_cmd('detach_volume', vpsa_srv=server_id, vpsa_vol=vpsa_vol['name']) def _get_volume_snapshots(self, cg_name): """Get snapshots in the consistency group""" data = self.send_cmd('list_vol_snapshots', cg_name=cg_name) snapshots = data.get('snapshots', []) return snapshots def _get_snap_id(self, cg_name, snap_name): """Return snapshot ID for particular volume.""" snapshots = self._get_volume_snapshots(cg_name) for snap_vol in snapshots: if snap_vol['display_name'] == snap_name: return snap_vol['name'] return None def _get_pool_capacity(self, pool_name): """Return pool's total and available capacities.""" data = self.send_cmd('get_pool', pool_name=pool_name) pool = data.get('pool') if pool is not None: total = int(pool['capacity']) free = int(pool['available_capacity']) provisioned = int(pool['provisioned_capacity']) LOG.debug('Pool %(name)s: %(total)sGB total, %(free)sGB free, ' '%(provisioned)sGB provisioned', {'name': pool_name, 'total': total, 'free': free, 'provisioned': provisioned}) return total, free, provisioned return 'unknown', 'unknown', 'unknown' ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/zadara/zadara.py0000664000175000017500000010254500000000000022164 0ustar00zuulzuul00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Shared File system services driver for Zadara Virtual Private Storage Array (VPSA). """ import socket from oslo_config import cfg from oslo_log import log as logging from oslo_utils import strutils from manila import exception as manila_exception from manila.i18n import _ from manila.share import api from manila.share import driver from manila.share.drivers.zadara import common CONF = cfg.CONF CONF.register_opts(common.zadara_opts) LOG = logging.getLogger(__name__) manila_opts = [ cfg.StrOpt('zadara_share_name_template', default='OS_share-%s', help='VPSA - Default template for VPSA share names'), cfg.StrOpt('zadara_share_snap_name_template', default='OS_share-snapshot-%s', help='VPSA - Default template for VPSA share names'), cfg.StrOpt('zadara_driver_ssl_cert_path', default=None, help='Can be used to specify a non default path to a ' 'CA_BUNDLE file or directory with certificates ' 'of trusted CAs, which will be used to validate ' 'the backend')] class ZadaraVPSAShareDriver(driver.ShareDriver): """Zadara VPSA Share driver. Version history:: 20.12-01 - Driver changes intended and aligned with openstack latest release. 20.12-02 - Fixed #18723 - Manila: Parsing the export location in a more generic way while managing the vpsa share 20.12-03 - Adding the metadata support while creating share to configure vpsa. 20.12-20 - IPv6 connectivity support for Manila driver 20.12-21 - Adding unit tests and fixing review comments from the openstack community. 20.12-22 - Addressing review comments from the manila community. 20.12-23 - Addressing review comments from the manila community. 20.12-24 - Addressing review comments from the manila community. 20.12-25 - Support host assisted share migration """ VERSION = '20.12-25' # ThirdPartySystems wiki page CI_WIKI_NAME = "ZadaraStorage_VPSA_CI" def __init__(self, *args, **kwargs): """Do initialization.""" super(ZadaraVPSAShareDriver, self).__init__(False, *args, **kwargs) self.vpsa = None self.configuration.append_config_values(common.zadara_opts) self.configuration.append_config_values(manila_opts) self.api = api.API() # The valid list of share options that can be specified # as the metadata while creating manila share self.share_options = ['smbguest', 'smbonly', 'smbwindowsacl', 'smbfilecreatemask', 'smbbrowseable', 'smbhiddenfiles', 'smbhideunreadable', 'smbhideunwriteable', 'smbhidedotfiles', 'smbstoredosattributes', 'smbdircreatemask', 'smbmaparchive', 'smbencryptionmode', 'smbenableoplocks', 'smbaiosize', 'nfsrootsquash', 'nfsallsquash', 'nfsanongid', 'nfsanonuid', 'atimeupdate', 'readaheadkb', 'crypt', 'compress', 'dedupe', 'attachpolicies'] def _check_access_key_validity(self): try: self.vpsa._check_access_key_validity() except common.exception.ZadaraInvalidAccessKey: raise manila_exception.ZadaraManilaInvalidAccessKey() def do_setup(self, context): """Any initialization the share driver does while starting. Establishes initial connection with VPSA and retrieves access_key. Need to pass driver_ssl_cert_path here (and not fetch it from the config opts directly in common code), because this config option is different for different drivers and so cannot be figured in the common code. """ driver_ssl_cert_path = self.configuration.zadara_driver_ssl_cert_path self.vpsa = common.ZadaraVPSAConnection(self.configuration, driver_ssl_cert_path, False) def check_for_setup_error(self): """Returns an error (exception) if prerequisites aren't met.""" self._check_access_key_validity() def vpsa_send_cmd(self, cmd, **kwargs): try: response = self.vpsa.send_cmd(cmd, **kwargs) except common.exception.UnknownCmd as e: raise manila_exception.ZadaraUnknownCmd(cmd=e.cmd) except common.exception.SessionRequestException as e: raise manila_exception.ZadaraSessionRequestException(msg=e.msg) except common.exception.BadHTTPResponseStatus as e: raise manila_exception.ZadaraBadHTTPResponseStatus(status=e.status) except common.exception.FailedCmdWithDump as e: raise manila_exception.ZadaraFailedCmdWithDump(status=e.status, data=e.data) except common.exception.ZadaraInvalidAccessKey: raise manila_exception.ZadaraManilaInvalidAccessKey() return response def _get_zadara_share_template_name(self, share_id): return self.configuration.zadara_share_name_template % share_id def _get_share_export_location(self, share): export_location = '' share_proto = share['share_proto'].upper() share_name = self._get_zadara_share_template_name(share['id']) vpsa_volume = self.vpsa._get_vpsa_volume(share_name) if not vpsa_volume: msg = (_('VPSA volume for share %s ' 'could not be found.') % share['id']) LOG.error(msg) raise manila_exception.ZadaraShareNotFound(name=share['id']) if share_proto == 'NFS': export_location = vpsa_volume['nfs_export_path'] if share_proto == 'CIFS': export_location = vpsa_volume['smb_export_path'] return export_location def _check_share_protocol(self, share): share_proto = share['share_proto'].upper() if share_proto not in ('NFS', 'CIFS'): msg = _("Only NFS or CIFS protocol are currently supported. " "Share provided %(share)s with protocol " "%(proto)s.") % {'share': share['id'], 'proto': share['share_proto']} LOG.error(msg) raise manila_exception.ZadaraInvalidProtocol( protocol_type=share_proto) def is_valid_metadata(self, metadata): LOG.debug('Metadata while creating share: %(metadata)s', {'metadata': metadata}) for key, value in metadata.items(): if key in self.share_options: # Check for the values allowed with provided metadata if key in ['smbguest', 'smbonly', 'smbwindowsacl', 'smbbrowseable', 'smbhideunreadable', 'smbhideunwriteable', 'smbhidedotfiles', 'smbstoredosattributes', 'smbmaparchive', 'smbenableoplocks', 'nfsrootsquash', 'nfsallsquash', 'atimeupdate', 'crypt', 'compress', 'dedupe', 'attachpolicies']: if value in ['YES', 'NO']: continue else: return False if key in ['smbfilecreatemask', 'smbdircreatemask']: if value.isdigit(): # The valid permissions should be for user,group,other # with another special digit for attributes. Ex:0755 if len(value) != 4: return False # No special permission bits for suid,sgid, # stickybit are allowed for vpsa share. if int(value[0]) != 0: return False # The permissions are always specified in octal for i in range(1, len(value)): if int(value[i]) > 7: return False continue else: return False if key == 'smbaiosize': if value.isdigit() and value in ['16384', '1']: continue else: return False if key == 'smbencryptionmode': if value in ['off', 'desired', 'required']: continue else: return False if key in ['nfsanongid', 'nfsanonuid']: if value.isdigit() and int(value) != 0: continue else: return False if key == 'readaheadkb': if value in ['16', '64', '128', '256', '512']: continue else: return False return True def create_share(self, context, share, share_server=None): """Create a Zadara share and export it. :param context: A RequestContext. :param share: A Share. :param share_server: Not used currently :return: The export locations dictionary. """ # Check share's protocol. # Throw an exception immediately if it is an invalid protocol. self._check_share_protocol(share) share_name = self._get_zadara_share_template_name(share['id']) # Collect the share metadata provided and validate it metadata = self.api.get_share_metadata(context, {'id': share['share_id']}) if not self.is_valid_metadata(metadata): raise manila_exception.ManilaException(_( "Not a valid metadata provided for the share %s") % share['id']) data = self.vpsa_send_cmd('create_volume', name=share_name, size=share['size'], metadata=metadata) if data['status'] != 0: raise manila_exception.ZadaraVPSAVolumeShareFailed( error=data['status']) export_location = self._get_share_export_location(share) return {'path': export_location} def _allow_access(self, context, share, access): """Allow access to the share.""" access_type = access['access_type'] share_proto = share['share_proto'].upper() if share_proto == 'CIFS': share_proto = 'SMB' if access_type != 'ip': raise manila_exception.ZadaraInvalidShareAccessType() access_ip = access['access_to'] access_level = 'YES' if access['access_level'] == 'rw': access_level = 'NO' # First: Check Active controller: if not valid, raise exception ctrl = self.vpsa._get_active_controller_details() if not ctrl: raise manila_exception.ZadaraVPSANoActiveController() # Get volume name vol_name = self._get_zadara_share_template_name(share['id']) vpsa_volume = self.vpsa._get_vpsa_volume(vol_name) if not vpsa_volume: msg = (_('VPSA volume for share %s ' 'could not be found.') % share['id']) LOG.error(msg) raise manila_exception.ZadaraShareNotFound(name=share['id']) # Get/Create server name for given IP vpsa_srv = self.vpsa._create_vpsa_server(iscsi_ip=access_ip) if not vpsa_srv: raise manila_exception.ZadaraServerCreateFailure(name=access_ip) servers = self.vpsa._get_servers_attached_to_volume(vpsa_volume) attach = None for server in servers: if server == vpsa_srv: attach = server break # Attach volume to server if attach is None: self.vpsa_send_cmd('attach_volume', vpsa_srv=vpsa_srv, vpsa_vol=vpsa_volume['name'], share_proto=share_proto, read_only=access_level) data = self.vpsa_send_cmd('list_vol_attachments', vpsa_vol=vpsa_volume['name']) server = None servers = data.get('servers', []) for srv in servers: if srv['iscsi_ip'] == access_ip: server = srv break if server is None: raise manila_exception.ZadaraAttachmentsNotFound( name=vpsa_volume['name']) ctrl_ip = self.vpsa._get_target_host(ctrl['ip']) properties = {'target_discovered': False, 'target_portal': (('%s:%s') % (ctrl_ip, '3260')), 'target_ip': server['iscsi_ip'], 'id': share['id'], 'auth_method': 'CHAP', 'auth_username': ctrl['chap_user'], 'auth_password': ctrl['chap_passwd']} LOG.debug('Attach properties: %(properties)s', {'properties': strutils.mask_password(properties)}) return {'driver_volume_type': share['share_proto'], 'data': properties} def delete_share(self, context, share, share_server=None): """Delete share. Auto detach from all servers. """ # Get share name share_name = self._get_zadara_share_template_name(share['id']) volume = self.vpsa._get_vpsa_volume(share_name) if not volume: LOG.warning('Volume %s could not be found. ' 'It might be already deleted', share['id']) return self.vpsa._detach_vpsa_volume(vpsa_vol=volume) # Delete volume associate with the share self.vpsa_send_cmd('delete_volume', vpsa_vol=volume['name']) def _deny_access(self, context, share, access, share_server=None): """Deny access to the share from the host. """ access_type = access['access_type'] if access_type != 'ip': LOG.warning('Only ip access type is allowed for zadara vpsa.') return access_ip = access['access_to'] # First: Check Active controller: if not valid, raise exception ctrl = self.vpsa._get_active_controller_details() if not ctrl: raise manila_exception.ZadaraVPSANoActiveController() # Get share name share_name = self._get_zadara_share_template_name(share['id']) volume = self.vpsa._get_vpsa_volume(share_name) if not volume: LOG.warning('Volume %s could not be found. ' 'It might be already deleted', share['id']) return vpsa_srv = self.vpsa._get_server_name(access_ip, True) if not vpsa_srv: LOG.warning('VPSA server %s could not be found.', access_ip) return servers_list = self.vpsa._get_servers_attached_to_volume(volume) if vpsa_srv not in servers_list: LOG.warning('VPSA server %(access_ip)s not attached ' 'to volume %(volume)s.', {'access_ip': access_ip, 'volume': share['id']}) return self.vpsa._detach_vpsa_volume(vpsa_vol=volume, vpsa_srv=vpsa_srv) def update_access(self, context, share, access_rules, add_rules, delete_rules, update_rules, share_server=None): access_updates = {} if not (add_rules or delete_rules): # add_rules and delete_rules can be empty lists, in cases # like share migration for zadara driver, when the access # level is to be changed for all existing rules. For zadara # backend, we delete and re-add all the existing rules. for access_rule in access_rules: self._deny_access(context, share, access_rule) try: self._allow_access(context, share, access_rule) except manila_exception.ZadaraInvalidShareAccessType: LOG.error("Only ip access type allowed for Zadara share. " "Failed to allow %(access_level)s access to " "%(access_to)s for rule %(id)s. Setting rule " "to 'error' state.", {'access_level': access_rule['access_level'], 'access_to': access_rule['access_to'], 'id': access_rule['access_id']}) access_updates.update( {access_rule['access_id']: {'state': 'error'}}) else: if add_rules: # Add rules for accessing share for access_rule in add_rules: try: self._allow_access(context, share, access_rule) except manila_exception.ZadaraInvalidShareAccessType: LOG.error("Only ip access type allowed for Zadara " "share. Failed to allow %(access_level)s " "access to %(access_to)s for rule %(id)s. " "Setting rule to 'error' state.", {'access_level': access_rule['access_level'], 'access_to': access_rule['access_to'], 'id': access_rule['access_id']}) access_updates.update( {access_rule['access_id']: {'state': 'error'}}) if delete_rules: # Delete access rules for provided share for access_rule in delete_rules: self._deny_access(context, share, access_rule) return access_updates def extend_share(self, share, new_size, share_server=None): """Extend an existing share. """ # Get the backend volume name for the share share_name = self._get_zadara_share_template_name(share['id']) vpsa_volume = self.vpsa._get_vpsa_volume(share_name) if not vpsa_volume: msg = (_('VPSA volume for share %s ' 'could not be found.') % share['id']) LOG.error(msg) raise manila_exception.ZadaraShareNotFound(name=share['id']) size = vpsa_volume['virtual_capacity'] expand_size = new_size - size data = self.vpsa_send_cmd('expand_volume', vpsa_vol=vpsa_volume['name'], size=expand_size) if data['status'] != 0: raise manila_exception.ZadaraExtendShareFailed( error=data['status']) def _ensure_share(self, context, share, share_server=None): """Ensure that the share has a backend volume and it is exported. """ # Get the backend volume name for the share share_name = self._get_zadara_share_template_name(share['id']) vpsa_volume = self.vpsa._get_vpsa_volume(share_name) if not vpsa_volume: msg = (_('VPSA volume for share %s ' 'could not be found.') % share['id']) LOG.error(msg) raise manila_exception.ZadaraShareNotFound(name=share['id']) export_locations = share['export_locations'] if export_locations: return export_locations else: servers_list = (self.vpsa._get_servers_attached_to_volume( vpsa_volume)) if len(servers_list) != 0: msg = (_('Servers attached to the VPSA volume %s without ' 'any locations exported.') % vpsa_volume['name']) LOG.error(msg) raise manila_exception.ZadaraShareNotValid( name=share['id']) def _update_share_stats(self): backend_name = self.configuration.share_backend_name dhss = self.configuration.driver_handles_share_servers vpsa_poolname = self.configuration.zadara_vpsa_poolname (total, free, provisioned) = ( self.vpsa._get_pool_capacity(vpsa_poolname)) ctrl = self.vpsa._get_active_controller_details() if not ctrl: raise manila_exception.ZadaraVPSANoActiveController() ipv4_support = False if ':' in ctrl['ip'] else True # VPSA backend pool single_pool = dict( pool_name=vpsa_poolname, total_capacity_gb=total, free_capacity_gb=free, allocated_capacity_gb=(total - free), provisioned_capacity_gb=provisioned, reserved_percentage=self.configuration.reserved_share_percentage, reserved_snapshot_percentage=( self.configuration.reserved_share_from_snapshot_percentage or self.configuration.reserved_share_percentage), reserved_share_extend_percentage=( self.configuration.reserved_share_extend_percentage or self.configuration.reserved_share_percentage), compression=[True, False], dedupe=[True, False], thin_provisioning=True ) data = dict( share_backend_name=backend_name, driver_handles_share_servers=dhss, vendor_name='Zadara Storage', driver_version=self.VERSION, storage_protocol='NFS_CIFS', pools=[single_pool], snapshot_support=True, create_share_from_snapshot_support=True, revert_to_snapshot_support=False, mount_snapshot_support=False, ipv4_support=ipv4_support, ipv6_support=not ipv4_support ) super(ZadaraVPSAShareDriver, self)._update_share_stats(data) def create_snapshot(self, context, snapshot, share_server=None): """Creates a snapshot.""" LOG.debug('Create snapshot: %s', snapshot['id']) # Retrieve the CG name for the base volume share = snapshot['share'] volume_name = self._get_zadara_share_template_name(share['id']) cg_name = self.vpsa._get_volume_cg_name(volume_name) if not cg_name: msg = (_('VPSA volume for share %s ' 'could not be found.') % share['id']) LOG.error(msg) raise manila_exception.ZadaraShareNotFound(name=share['id']) snap_name = (self.configuration.zadara_share_snap_name_template % snapshot['id']) data = self.vpsa_send_cmd('create_snapshot', cg_name=cg_name, snap_name=snap_name) if data['status'] != 0: raise manila_exception.ZadaraVPSASnapshotCreateFailed( name=share['id'], error=data['status']) return {'provider_location': data['snapshot_name']} def delete_snapshot(self, context, snapshot, share_server=None): """Deletes a snapshot.""" LOG.debug('Delete snapshot: %s', snapshot['id']) # Retrieve the CG name for the base volume share = snapshot['share'] volume_name = self._get_zadara_share_template_name(share['id']) cg_name = self.vpsa._get_volume_cg_name(volume_name) if not cg_name: # If the volume isn't present, then don't attempt to delete LOG.warning('snapshot: original volume %s not found, ' 'skipping delete operation', volume_name) return snap_name = (self.configuration.zadara_share_snap_name_template % snapshot['id']) snap_id = self.vpsa._get_snap_id(cg_name, snap_name) if not snap_id: # If the snapshot isn't present, then don't attempt to delete LOG.warning('snapshot: snapshot %s not found, ' 'skipping delete operation', snap_name) return self.vpsa_send_cmd('delete_snapshot', snap_id=snap_id) def create_share_from_snapshot(self, context, share, snapshot, share_server=None, parent_share=None): """Creates a share from a snapshot. """ LOG.debug('Creating share from snapshot: %s', snapshot['id']) # Retrieve the CG name for the base volume volume_name = (self._get_zadara_share_template_name( snapshot['share_instance_id'])) cg_name = self.vpsa._get_volume_cg_name(volume_name) if not cg_name: msg = (_('VPSA volume for share %s ' 'could not be found.') % share['id']) LOG.error(msg) raise manila_exception.ZadaraShareNotFound(name=share['id']) snap_name = (self.configuration.zadara_share_snap_name_template % snapshot['id']) snap_id = self.vpsa._get_snap_id(cg_name, snap_name) if not snap_id: msg = _('Snapshot %(name)s not found') % {'name': snap_name} LOG.error(msg) raise manila_exception.ShareSnapshotNotFound( snapshot_id=snap_name) self._check_share_protocol(share) share_name = self._get_zadara_share_template_name(share['id']) self.vpsa_send_cmd('create_clone_from_snap', cg_name=cg_name, name=share_name, snap_id=snap_id) if share['size'] > snapshot['size']: self.extend_share(share, share['size']) export_location = self._get_share_export_location(share) return [{'path': export_location}] def _get_export_name_from_export_path(self, proto, export_path): if proto == 'nfs' and '\\' in export_path: return None if proto == 'cifs' and '/' in export_path: return None # Extract the export name from the provided export path if proto == 'nfs': separator = '/' export_location = export_path.strip(separator) export_name = export_location.split(separator)[-1] else: separator = '\\' export_location = export_path.strip(separator) export_name = export_location.split(separator)[-1] return export_name def _extract_vpsa_volume_from_share(self, share): """Returns a vpsa volume based on the export location""" if not share['export_locations'][0]['path']: return None share_proto = share['share_proto'].lower() export_path = share['export_locations'][0]['path'] export_name = self._get_export_name_from_export_path(share_proto, export_path) if export_name is None: msg = (_('Please verify the specifed protocol and export path.')) LOG.error(msg) raise manila_exception.ManilaException(msg) volume = None volumes = self.vpsa._get_all_vpsa_volumes() # Find the volume with the corresponding export name for vol in volumes: if share_proto == 'nfs': vol_export_path = vol.get('nfs_export_path', None) else: vol_export_path = vol.get('smb_export_path', None) vol_export_name = self._get_export_name_from_export_path( share_proto, vol_export_path) if export_name == vol_export_name: volume = vol break # Check the additional smb export paths of the volume if (share_proto == 'cifs' and vol['additional_smb_export_paths_count'] > 0): for additional_path in vol['additional_smb_export_paths']: vol_export_name = self._get_export_name_from_export_path( share_proto, additional_path) if export_name == vol_export_name: volume = vol break if volume: return volume else: msg = (_('Manage backend share could not be found. It might be ' 'deleted or please verify the specifed protocol and ' 'export path.')) LOG.error(msg) raise manila_exception.ManilaException(msg) def manage_existing(self, share, driver_options): # Check whether the specified protocol is supported or not. self._check_share_protocol(share) LOG.info("Share %(shr_path)s will be managed with share %(shr_name)s.", {'shr_path': share['export_locations'][0]['path'], 'shr_name': share['id']}) # Find the backend vpsa volume for the provided export location vpsa_volume = self._extract_vpsa_volume_from_share(share) # Check if the volume is available if vpsa_volume['status'] != 'Available': msg = (_('Existing share %(name)s is not available') % {'name': vpsa_volume['name']}) LOG.error(msg) raise manila_exception.ManilaException(msg) new_share_name = self._get_zadara_share_template_name(share['id']) new_vpsa_share = self.vpsa._get_vpsa_volume(new_share_name) if new_vpsa_share: msg = (_('Share %(new_name)s already exists') % {'new_name': new_share_name}) LOG.error(msg) raise manila_exception.ManilaException(msg) # Rename the volume to the manila share specified name data = self.vpsa_send_cmd('rename_volume', vpsa_vol=vpsa_volume['name'], new_name=new_share_name) if data['status'] != 0: msg = (_('Renaming volume %(old_name)s to %(new_name)s ' 'has failed.') % {'old_name': vpsa_volume['name'], 'new_name': new_share_name}) LOG.error(msg) raise manila_exception.ManilaException(msg) return {'size': vpsa_volume['provisioned_capacity'], 'export_locations': share['export_locations'][0]['path']} def unmanage(self, share): """Removes the specified volume from Manila management""" pass def manage_existing_snapshot(self, snapshot, driver_options): share = snapshot['share'] share_name = self._get_zadara_share_template_name(share['id']) vpsa_volume = self.vpsa._get_vpsa_volume(share_name) if not vpsa_volume: msg = (_('Volume %(name)s could not be found. ' 'It might be already deleted') % {'name': share_name}) LOG.error(msg) raise manila_exception.ZadaraShareNotFound(name=share['id']) # Check if the provider_location is specified if not snapshot['provider_location']: msg = (_('Provider location as snap id of the VPSA backend ' 'should be provided')) LOG.error(msg) raise manila_exception.ManilaException(msg) new_name = (self.configuration.zadara_share_snap_name_template % snapshot['id']) new_snap_id = self.vpsa._get_snap_id(vpsa_volume['cg_name'], new_name) if new_snap_id: msg = (_('Snapshot with name %s already exists') % new_name) LOG.debug(msg) return data = self.vpsa_send_cmd('rename_snapshot', snap_id=snapshot['provider_location'], new_name=new_name) if data['status'] != 0: raise manila_exception.ZadaraVPSASnapshotManageFailed( snap_id=snapshot['provider_location'], error=data['status']) def unmanage_snapshot(self, snapshot): """Removes the specified snapshot from Manila management""" pass def get_configured_ip_versions(self): """"Get allowed IP versions. The shares created should have export location as per the IP version. Currently, zadara backend doesn't support both ipv4 and ipv6. Collect the supported IP version from the vpsa's active controller """ ctrl = self.vpsa._get_active_controller_details() if not ctrl: raise manila_exception.ZadaraVPSANoActiveController() if ':' in ctrl['ip']: return [6] else: return [4] def get_backend_info(self, context): return { 'version': self.VERSION, 'vsa_feip': socket.gethostbyname(self.vpsa.conf.zadara_vpsa_host), 'vsa_port': self.vpsa.conf.zadara_vpsa_port } def ensure_shares(self, context, shares): updates = {} for share in shares: updates[share['id']] = { 'export_locations': self._ensure_share(context, share)} return updates ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315601.9376714 manila-21.0.0/manila/share/drivers/zfsonlinux/0000775000175000017500000000000000000000000021316 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/zfsonlinux/__init__.py0000664000175000017500000000000000000000000023415 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/zfsonlinux/driver.py0000664000175000017500000020666600000000000023203 0ustar00zuulzuul00000000000000# Copyright 2016 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Module with ZFSonLinux share driver that utilizes ZFS filesystem resources and exports them as shares. """ import math import os import time from oslo_config import cfg from oslo_log import log from oslo_utils import importutils from oslo_utils import strutils from oslo_utils import timeutils from manila.common import constants from manila import exception from manila.i18n import _ from manila.share import configuration from manila.share import driver from manila.share.drivers.zfsonlinux import utils as zfs_utils from manila.share.manager import share_manager_opts # noqa from manila.share import share_types from manila.share import utils as share_utils from manila import utils zfsonlinux_opts = [ cfg.HostAddressOpt( "zfs_share_export_ip", required=True, help="IP to be added to user-facing export location. Required."), cfg.HostAddressOpt( "zfs_service_ip", required=True, help="IP to be added to admin-facing export location. Required."), cfg.ListOpt( "zfs_zpool_list", required=True, help="Specify list of zpools that are allowed to be used by backend. " "Can contain nested datasets. Examples: " "Without nested dataset: 'zpool_name'. " "With nested dataset: 'zpool_name/nested_dataset_name'. " "Required."), cfg.ListOpt( "zfs_dataset_creation_options", help="Define here list of options that should be applied " "for each dataset creation if needed. Example: " "compression=gzip,dedup=off. " "Note that, for secondary replicas option 'readonly' will be set " "to 'on' and for active replicas to 'off' in any way. " "Also, 'quota' will be equal to share size. Optional."), cfg.StrOpt( "zfs_dataset_name_prefix", default='manila_share_', help="Prefix to be used in each dataset name. Optional."), cfg.StrOpt( "zfs_dataset_snapshot_name_prefix", default='manila_share_snapshot_', help="Prefix to be used in each dataset snapshot name. Optional."), cfg.BoolOpt( "zfs_use_ssh", default=False, help="Remote ZFS storage hostname that should be used for SSH'ing. " "Optional."), cfg.StrOpt( "zfs_ssh_username", help="SSH user that will be used in 2 cases: " "1) By manila-share service in case it is located on different " "host than its ZFS storage. " "2) By manila-share services with other ZFS backends that " "perform replication. " "It is expected that SSH'ing will be key-based, passwordless. " "This user should be passwordless sudoer. Optional."), cfg.StrOpt( "zfs_ssh_user_password", secret=True, help="Password for user that is used for SSH'ing ZFS storage host. " "Not used for replication operations. They require " "passwordless SSH access. Optional."), cfg.StrOpt( "zfs_ssh_private_key_path", help="Path to SSH private key that should be used for SSH'ing ZFS " "storage host. Not used for replication operations. Optional."), cfg.ListOpt( "zfs_share_helpers", default=[ "NFS=manila.share.drivers.zfsonlinux.utils.NFSviaZFSHelper", ], help="Specify list of share export helpers for ZFS storage. " "It should look like following: " "'FOO_protocol=foo.FooClass,BAR_protocol=bar.BarClass'. " "Required."), cfg.StrOpt( "zfs_replica_snapshot_prefix", default="tmp_snapshot_for_replication_", help="Set snapshot prefix for usage in ZFS replication. Required."), cfg.StrOpt( "zfs_migration_snapshot_prefix", default="tmp_snapshot_for_share_migration_", help="Set snapshot prefix for usage in ZFS migration. Required."), ] CONF = cfg.CONF CONF.register_opts(zfsonlinux_opts) LOG = log.getLogger(__name__) def ensure_share_server_not_provided(f): def wrap(self, context, *args, **kwargs): server = kwargs.get( "share_server", kwargs.get("destination_share_server")) if server: raise exception.InvalidInput( reason=_("Share server handling is not available. " "But 'share_server' was provided. '%s'. " "Share network should not be used.") % server.get( "id", server)) return f(self, context, *args, **kwargs) return wrap def get_backend_configuration(backend_name): config_stanzas = CONF.list_all_sections() if backend_name not in config_stanzas: msg = _("Could not find backend stanza %(backend_name)s in " "configuration which is required for share replication and " "migration. Available stanzas are %(stanzas)s") params = { "stanzas": config_stanzas, "backend_name": backend_name, } raise exception.BadConfigurationException(reason=msg % params) config = configuration.Configuration( driver.share_opts, config_group=backend_name) config.append_config_values(zfsonlinux_opts) config.append_config_values(share_manager_opts) config.append_config_values(driver.ssh_opts) return config class ZFSonLinuxShareDriver(zfs_utils.ExecuteMixin, driver.ShareDriver): def __init__(self, *args, **kwargs): super(ZFSonLinuxShareDriver, self).__init__( [False], *args, config_opts=[zfsonlinux_opts], **kwargs) self.replica_snapshot_prefix = ( self.configuration.zfs_replica_snapshot_prefix) self.migration_snapshot_prefix = ( self.configuration.zfs_migration_snapshot_prefix) self.backend_name = self.configuration.safe_get( 'share_backend_name') or 'ZFSonLinux' self.zpool_list = self._get_zpool_list() self.dataset_creation_options = ( self.configuration.zfs_dataset_creation_options) self.share_export_ip = self.configuration.zfs_share_export_ip self.service_ip = self.configuration.zfs_service_ip self.private_storage = kwargs.get('private_storage') self._helpers = {} # Set config based capabilities self._init_common_capabilities() self._shell_executors = {} def _get_shell_executor_by_host(self, host): backend_name = share_utils.extract_host(host, level='backend_name') if backend_name in CONF.enabled_share_backends: # Return executor of this host return self.execute elif backend_name not in self._shell_executors: config = get_backend_configuration(backend_name) self._shell_executors[backend_name] = ( zfs_utils.get_remote_shell_executor( ip=config.zfs_service_ip, port=22, conn_timeout=config.ssh_conn_timeout, login=config.zfs_ssh_username, password=config.zfs_ssh_user_password, privatekey=config.zfs_ssh_private_key_path, max_size=10, ) ) # Return executor of remote host return self._shell_executors[backend_name] def _init_common_capabilities(self): self.common_capabilities = {} if 'dedup=on' in self.dataset_creation_options: self.common_capabilities['dedupe'] = [True] elif 'dedup=off' in self.dataset_creation_options: self.common_capabilities['dedupe'] = [False] else: self.common_capabilities['dedupe'] = [True, False] if 'compression=off' in self.dataset_creation_options: self.common_capabilities['compression'] = [False] elif any('compression=' in option for option in self.dataset_creation_options): self.common_capabilities['compression'] = [True] else: self.common_capabilities['compression'] = [True, False] # NOTE(vponomaryov): Driver uses 'quota' approach for # ZFS dataset. So, we can consider it as # 'always thin provisioned' because this driver never reserves # space for dataset. self.common_capabilities['thin_provisioning'] = [True] self.common_capabilities['max_over_subscription_ratio'] = ( self.configuration.max_over_subscription_ratio) self.common_capabilities['qos'] = [False] def _get_zpool_list(self): zpools = [] for zpool in self.configuration.zfs_zpool_list: zpool_name = zpool.split('/')[0] if zpool_name in zpools: raise exception.BadConfigurationException( reason=_("Using the same zpool twice is prohibited. " "Duplicate is '%(zpool)s'. List of zpools: " "%(zpool_list)s.") % { 'zpool': zpool, 'zpool_list': ', '.join( self.configuration.zfs_zpool_list)}) zpools.append(zpool_name) return zpools @zfs_utils.zfs_dataset_synchronized def _delete_dataset_or_snapshot_with_retry(self, name): """Attempts to destroy some dataset or snapshot with retries.""" # NOTE(vponomaryov): it is possible to see 'dataset is busy' error # under the load. So, we are ok to perform retry in this case. mountpoint = self.get_zfs_option(name, 'mountpoint') if '@' not in name: # NOTE(vponomaryov): check that dataset has no open files. start_point = time.time() while time.time() - start_point < 60: try: out, err = self.execute('lsof', '-w', mountpoint) except exception.ProcessExecutionError: # NOTE(vponomaryov): lsof returns code 1 if search # didn't give results. break LOG.debug("Cannot destroy dataset '%(name)s', it has " "opened files. Will wait 2 more seconds. " "Out: \n%(out)s", { 'name': name, 'out': out}) time.sleep(2) else: raise exception.ZFSonLinuxException( msg=_("Could not destroy '%s' dataset, " "because it had opened files.") % name) @utils.retry(retry_param=exception.ProcessExecutionError, retries=10) def _zfs_destroy_with_retry(): """Retry destroying dataset ten times with exponential backoff.""" # NOTE(bswartz): There appears to be a bug in ZFS when creating and # destroying datasets concurrently where the filesystem remains # mounted even though ZFS thinks it's unmounted. The most reliable # workaround I've found is to force the unmount, then attempt the # destroy, with short pauses around the unmount. (See bug#1546723) try: self.execute('sudo', 'umount', mountpoint) except exception.ProcessExecutionError: # Ignore failed umount, it's normal pass time.sleep(2) # NOTE(vponomaryov): Now, when no file usages and mounts of dataset # exist, destroy dataset. self.zfs('destroy', '-f', name) _zfs_destroy_with_retry() def _setup_helpers(self): """Setups share helper for ZFS backend.""" self._helpers = {} helpers = self.configuration.zfs_share_helpers if helpers: for helper_str in helpers: share_proto, __, import_str = helper_str.partition('=') helper = importutils.import_class(import_str) self._helpers[share_proto.upper()] = helper( self.configuration) else: raise exception.BadConfigurationException( reason=_( "No share helpers selected for ZFSonLinux Driver. " "Please specify using config option 'zfs_share_helpers'.")) def _get_share_helper(self, share_proto): """Returns share helper specific for used share protocol.""" helper = self._helpers.get(share_proto) if helper: return helper else: raise exception.InvalidShare( reason=_("Wrong, unsupported or disabled protocol - " "'%s'.") % share_proto) def do_setup(self, context): """Perform basic setup and checks.""" super(ZFSonLinuxShareDriver, self).do_setup(context) self._setup_helpers() for ip in (self.share_export_ip, self.service_ip): if not utils.is_valid_ip_address(ip, 4): raise exception.BadConfigurationException( reason=_("Wrong IP address provided: " "%s") % self.share_export_ip) if not self.zpool_list: raise exception.BadConfigurationException( reason=_("No zpools specified for usage: " "%s") % self.zpool_list) # Make pool mounts shared so that cloned namespaces receive unmounts # and don't prevent us from unmounting datasets for zpool in self.configuration.zfs_zpool_list: self.execute('sudo', 'mount', '--make-rshared', ('/%s' % zpool)) if self.configuration.zfs_use_ssh: # Check workability of SSH executor self.ssh_executor('whoami') def _get_pools_info(self): """Returns info about all pools used by backend.""" pools = [] for zpool in self.zpool_list: free_size = self.get_zpool_option(zpool, 'free') free_size = utils.translate_string_size_to_float(free_size) total_size = self.get_zpool_option(zpool, 'size') total_size = utils.translate_string_size_to_float(total_size) pool = { 'pool_name': zpool, 'total_capacity_gb': float(total_size), 'free_capacity_gb': float(free_size), 'reserved_percentage': self.configuration.reserved_share_percentage, 'reserved_snapshot_percentage': ( self.configuration.reserved_share_from_snapshot_percentage or self.configuration.reserved_share_percentage), 'reserved_share_extend_percentage': ( self.configuration.reserved_share_extend_percentage or self.configuration.reserved_share_percentage), } pool.update(self.common_capabilities) if self.configuration.replication_domain: pool['replication_type'] = 'readable' pools.append(pool) return pools def _update_share_stats(self): """Retrieves share stats info.""" data = { 'share_backend_name': self.backend_name, 'storage_protocol': 'NFS', 'reserved_percentage': self.configuration.reserved_share_percentage, 'reserved_snapshot_percentage': ( self.configuration.reserved_share_from_snapshot_percentage or self.configuration.reserved_share_percentage), 'reserved_share_extend_percentage': ( self.configuration.reserved_share_extend_percentage or self.configuration.reserved_share_percentage), 'snapshot_support': True, 'create_share_from_snapshot_support': True, 'driver_name': 'ZFS', 'pools': self._get_pools_info(), } if self.configuration.replication_domain: data['replication_type'] = 'readable' super(ZFSonLinuxShareDriver, self)._update_share_stats(data) def _get_share_name(self, share_id): """Returns name of dataset used for given share.""" prefix = self.configuration.zfs_dataset_name_prefix or '' return prefix + share_id.replace('-', '_') def _get_snapshot_name(self, snapshot_id): """Returns name of dataset snapshot used for given share snapshot.""" prefix = self.configuration.zfs_dataset_snapshot_name_prefix or '' return prefix + snapshot_id.replace('-', '_') def _get_dataset_creation_options(self, share, is_readonly=False): """Returns list of options to be used for dataset creation.""" options = ['quota=%sG' % share['size']] extra_specs = share_types.get_extra_specs_from_share(share) dedupe_set = False dedupe = extra_specs.get('dedupe') if dedupe: dedupe = strutils.bool_from_string( dedupe.lower().split(' ')[-1], default=dedupe) if (dedupe in self.common_capabilities['dedupe']): options.append('dedup=%s' % ('on' if dedupe else 'off')) dedupe_set = True else: raise exception.ZFSonLinuxException(msg=_( "Cannot use requested '%(requested)s' value of 'dedupe' " "extra spec. It does not fit allowed value '%(allowed)s' " "that is configured for backend.") % { 'requested': dedupe, 'allowed': self.common_capabilities['dedupe']}) compression_set = False compression_type = extra_specs.get('zfsonlinux:compression') if compression_type: if (compression_type == 'off' and False in self.common_capabilities['compression']): options.append('compression=off') compression_set = True elif (compression_type != 'off' and True in self.common_capabilities['compression']): options.append('compression=%s' % compression_type) compression_set = True else: raise exception.ZFSonLinuxException(msg=_( "Cannot use value '%s' of extra spec " "'zfsonlinux:compression' because compression is disabled " "for this backend. Set extra spec 'compression=True' to " "make scheduler pick up appropriate backend." ) % compression_type) for option in self.dataset_creation_options or []: if any(v in option for v in ( 'readonly', 'sharenfs', 'sharesmb', 'quota')): continue if 'dedup' in option and dedupe_set is True: continue if 'compression' in option and compression_set is True: continue options.append(option) if is_readonly: options.append('readonly=on') else: options.append('readonly=off') return options def _get_dataset_name(self, share): """Returns name of dataset used for given share.""" pool_name = share_utils.extract_host(share['host'], level='pool') # Pick pool with nested dataset name if set up for pool in self.configuration.zfs_zpool_list: pool_data = pool.split('/') if (pool_name == pool_data[0] and len(pool_data) > 1): pool_name = pool if pool_name[-1] == '/': pool_name = pool_name[0:-1] break dataset_name = self._get_share_name(share['id']) full_dataset_name = '%(pool)s/%(dataset)s' % { 'pool': pool_name, 'dataset': dataset_name} return full_dataset_name @ensure_share_server_not_provided def create_share(self, context, share, share_server=None): """Is called to create a share.""" options = self._get_dataset_creation_options(share, is_readonly=False) cmd = ['create'] for option in options: cmd.extend(['-o', option]) dataset_name = self._get_dataset_name(share) cmd.append(dataset_name) ssh_cmd = '%(username)s@%(host)s' % { 'username': self.configuration.zfs_ssh_username, 'host': self.service_ip, } pool_name = share_utils.extract_host(share['host'], level='pool') self.private_storage.update( share['id'], { 'entity_type': 'share', 'dataset_name': dataset_name, 'ssh_cmd': ssh_cmd, # used with replication and migration 'pool_name': pool_name, # used in replication 'used_options': ' '.join(options), } ) self.zfs(*cmd) return self._get_share_helper( share['share_proto']).create_exports(dataset_name) @ensure_share_server_not_provided def delete_share(self, context, share, share_server=None): """Is called to remove a share.""" pool_name = self.private_storage.get(share['id'], 'pool_name') pool_name = pool_name or share_utils.extract_host( share["host"], level="pool") dataset_name = self.private_storage.get(share['id'], 'dataset_name') if not dataset_name: dataset_name = self._get_dataset_name(share) out, err = self.zfs('list', '-r', pool_name) data = self.parse_zfs_answer(out) for datum in data: if datum['NAME'] != dataset_name: continue # Delete dataset's snapshots first out, err = self.zfs('list', '-r', '-t', 'snapshot', pool_name) snapshots = self.parse_zfs_answer(out) full_snapshot_prefix = ( dataset_name + '@') for snap in snapshots: if full_snapshot_prefix in snap['NAME']: self._delete_dataset_or_snapshot_with_retry(snap['NAME']) self._get_share_helper( share['share_proto']).remove_exports(dataset_name) self._delete_dataset_or_snapshot_with_retry(dataset_name) break else: LOG.warning( "Share with '%(id)s' ID and '%(name)s' NAME is " "absent on backend. Nothing has been deleted.", {'id': share['id'], 'name': dataset_name}) self.private_storage.delete(share['id']) @ensure_share_server_not_provided def create_snapshot(self, context, snapshot, share_server=None): """Is called to create a snapshot.""" dataset_name = self.private_storage.get( snapshot['share_instance_id'], 'dataset_name') snapshot_tag = self._get_snapshot_name(snapshot['id']) snapshot_name = dataset_name + '@' + snapshot_tag self.private_storage.update( snapshot['snapshot_id'], { 'entity_type': 'snapshot', 'snapshot_tag': snapshot_tag, } ) self.zfs('snapshot', snapshot_name) return {"provider_location": snapshot_name} @ensure_share_server_not_provided def delete_snapshot(self, context, snapshot, share_server=None): """Is called to remove a snapshot.""" self._delete_snapshot(context, snapshot) self.private_storage.delete(snapshot['snapshot_id']) def _get_saved_snapshot_name(self, snapshot_instance): snapshot_tag = self.private_storage.get( snapshot_instance['snapshot_id'], 'snapshot_tag') dataset_name = self.private_storage.get( snapshot_instance['share_instance_id'], 'dataset_name') snapshot_name = dataset_name + '@' + snapshot_tag return snapshot_name def _delete_snapshot(self, context, snapshot): snapshot_name = self._get_saved_snapshot_name(snapshot) out, err = self.zfs('list', '-r', '-t', 'snapshot', snapshot_name) data = self.parse_zfs_answer(out) for datum in data: if datum['NAME'] == snapshot_name: self._delete_dataset_or_snapshot_with_retry(snapshot_name) break else: LOG.warning( "Snapshot with '%(id)s' ID and '%(name)s' NAME is " "absent on backend. Nothing has been deleted.", {'id': snapshot['id'], 'name': snapshot_name}) @ensure_share_server_not_provided def create_share_from_snapshot(self, context, share, snapshot, share_server=None, parent_share=None): """Is called to create a share from snapshot.""" src_backend_name = share_utils.extract_host( snapshot.share_instance['host'], level='backend_name' ) src_snapshot_name = self._get_saved_snapshot_name(snapshot) dataset_name = self._get_dataset_name(share) dst_backend_ssh_cmd = '%(username)s@%(host)s' % { 'username': self.configuration.zfs_ssh_username, 'host': self.service_ip, } dst_backend_pool_name = share_utils.extract_host(share['host'], level='pool') options = self._get_dataset_creation_options(share, is_readonly=False) self.private_storage.update( share['id'], { 'entity_type': 'share', 'dataset_name': dataset_name, 'ssh_cmd': dst_backend_ssh_cmd, # used in replication 'pool_name': dst_backend_pool_name, # used in replication 'used_options': options, } ) # NOTE(andrebeltrami): Implementing the support for create share # from snapshot in different backends in different hosts src_config = get_backend_configuration(src_backend_name) src_backend_ssh_cmd = '%(username)s@%(host)s' % { 'username': src_config.zfs_ssh_username, 'host': src_config.zfs_service_ip, } self.execute( # NOTE(vponomaryov): SSH is used as workaround for 'execute' # implementation restriction that does not support usage # of '|'. 'ssh', src_backend_ssh_cmd, 'sudo', 'zfs', 'send', '-vD', src_snapshot_name, '|', 'ssh', dst_backend_ssh_cmd, 'sudo', 'zfs', 'receive', '-v', dataset_name, ) # Apply options based on used share type that may differ from # one used for original share. for option in options: self.zfs('set', option, dataset_name) # Delete with retry as right after creation it may be temporary busy. self.execute_with_retry( 'sudo', 'zfs', 'destroy', dataset_name + '@' + src_snapshot_name.split('@')[-1]) return self._get_share_helper( share['share_proto']).create_exports(dataset_name) def get_pool(self, share): """Return pool name where the share resides on. :param share: The share hosted by the driver. """ pool_name = share_utils.extract_host(share['host'], level='pool') return pool_name @ensure_share_server_not_provided def ensure_share(self, context, share, share_server=None): """Invoked to ensure that given share is exported.""" dataset_name = self.private_storage.get(share['id'], 'dataset_name') if not dataset_name: dataset_name = self._get_dataset_name(share) pool_name = share_utils.extract_host(share['host'], level='pool') out, err = self.zfs('list', '-r', pool_name) data = self.parse_zfs_answer(out) for datum in data: if datum['NAME'] == dataset_name: ssh_cmd = '%(username)s@%(host)s' % { 'username': self.configuration.zfs_ssh_username, 'host': self.service_ip, } self.private_storage.update( share['id'], {'ssh_cmd': ssh_cmd}) sharenfs = self.get_zfs_option(dataset_name, 'sharenfs') if sharenfs != 'off': self.zfs('share', dataset_name) export_locations = self._get_share_helper( share['share_proto']).get_exports(dataset_name) return export_locations else: raise exception.ShareResourceNotFound(share_id=share['id']) def get_network_allocations_number(self): """ZFS does not handle networking. Return 0.""" return 0 @ensure_share_server_not_provided def extend_share(self, share, new_size, share_server=None): """Extends size of existing share.""" dataset_name = self._get_dataset_name(share) self.zfs('set', 'quota=%sG' % new_size, dataset_name) @ensure_share_server_not_provided def shrink_share(self, share, new_size, share_server=None): """Shrinks size of existing share.""" dataset_name = self._get_dataset_name(share) consumed_space = self.get_zfs_option(dataset_name, 'used') consumed_space = utils.translate_string_size_to_float(consumed_space) if consumed_space >= new_size: raise exception.ShareShrinkingPossibleDataLoss( share_id=share['id']) self.zfs('set', 'quota=%sG' % new_size, dataset_name) @ensure_share_server_not_provided def update_access(self, context, share, access_rules, add_rules, delete_rules, update_rules, share_server=None): """Updates access rules for given share.""" dataset_name = self._get_dataset_name(share) executor = self._get_shell_executor_by_host(share['host']) return self._get_share_helper(share['share_proto']).update_access( dataset_name, access_rules, add_rules, delete_rules, executor=executor) def manage_existing(self, share, driver_options): """Manage existing ZFS dataset as manila share. ZFSonLinux driver accepts only one driver_option 'size'. If an administrator provides this option, then such quota will be set to dataset and used as share size. Otherwise, driver will set quota equal to nearest bigger rounded integer of usage size. Driver does not expect mountpoint to be changed (should be equal to default that is "/%(dataset_name)s"). :param share: share data :param driver_options: Empty dict or dict with 'size' option. :return: dict with share size and its export locations. """ old_export_location = share["export_locations"][0]["path"] old_dataset_name = old_export_location.split(":/")[-1] scheduled_pool_name = share_utils.extract_host( share["host"], level="pool") actual_pool_name = old_dataset_name.split("/")[0] new_dataset_name = self._get_dataset_name(share) # Calculate quota for managed dataset quota = driver_options.get("size") if not quota: consumed_space = self.get_zfs_option(old_dataset_name, "used") consumed_space = utils.translate_string_size_to_float( consumed_space) quota = int(consumed_space) + 1 share["size"] = int(quota) # Save dataset-specific data in private storage options = self._get_dataset_creation_options(share, is_readonly=False) ssh_cmd = "%(username)s@%(host)s" % { "username": self.configuration.zfs_ssh_username, "host": self.service_ip, } # Perform checks on requested dataset if actual_pool_name != scheduled_pool_name: raise exception.ZFSonLinuxException( _("Cannot manage share '%(share_id)s' " "(share_instance '%(si_id)s'), because scheduled " "pool '%(sch)s' and actual '%(actual)s' differ.") % { "share_id": share["share_id"], "si_id": share["id"], "sch": scheduled_pool_name, "actual": actual_pool_name}) out, err = self.zfs("list", "-r", actual_pool_name) data = self.parse_zfs_answer(out) for datum in data: if datum["NAME"] == old_dataset_name: break else: raise exception.ZFSonLinuxException( _("Cannot manage share '%(share_id)s' " "(share_instance '%(si_id)s'), because dataset " "'%(dataset)s' not found in zpool '%(zpool)s'.") % { "share_id": share["share_id"], "si_id": share["id"], "dataset": old_dataset_name, "zpool": actual_pool_name}) # Unmount the dataset before attempting to rename and mount try: self._unmount_share_with_retry(old_dataset_name) except exception.ZFSonLinuxException: msg = _("Unable to unmount share before renaming and re-mounting.") raise exception.ZFSonLinuxException(message=msg) # Rename the dataset and mount with new name self.zfs_with_retry("rename", old_dataset_name, new_dataset_name) try: self.zfs("mount", new_dataset_name) except exception.ProcessExecutionError: # Workaround for bug/1785180 out, err = self.zfs("mount") mounted = any([new_dataset_name in mountedfs for mountedfs in out.splitlines()]) if not mounted: raise # Apply options to dataset for option in options: self.zfs("set", option, new_dataset_name) # Get new export locations of renamed dataset export_locations = self._get_share_helper( share["share_proto"]).get_exports(new_dataset_name) self.private_storage.update( share["id"], { "entity_type": "share", "dataset_name": new_dataset_name, "ssh_cmd": ssh_cmd, # used in replication "pool_name": actual_pool_name, # used in replication "used_options": " ".join(options), } ) return {"size": share["size"], "export_locations": export_locations} def unmanage(self, share): """Removes the specified share from Manila management.""" self.private_storage.delete(share['id']) def manage_existing_snapshot(self, snapshot_instance, driver_options): """Manage existing share snapshot with manila. :param snapshot_instance: SnapshotInstance data :param driver_options: expects only one optional key 'size'. :return: dict with share snapshot instance fields for update, example:: { 'size': 1, 'provider_location': 'path/to/some/dataset@some_snapshot_tag', } """ snapshot_size = int(driver_options.get("size", 0)) old_provider_location = snapshot_instance.get("provider_location") old_snapshot_tag = old_provider_location.split("@")[-1] new_snapshot_tag = self._get_snapshot_name(snapshot_instance["id"]) self.private_storage.update( snapshot_instance["snapshot_id"], { "entity_type": "snapshot", "old_snapshot_tag": old_snapshot_tag, "snapshot_tag": new_snapshot_tag, } ) try: self.zfs("list", "-r", "-t", "snapshot", old_provider_location) except exception.ProcessExecutionError as e: raise exception.ManageInvalidShareSnapshot(reason=e.stderr) if not snapshot_size: consumed_space = self.get_zfs_option(old_provider_location, "used") consumed_space = utils.translate_string_size_to_float( consumed_space) snapshot_size = int(math.ceil(consumed_space)) dataset_name = self.private_storage.get( snapshot_instance["share_instance_id"], "dataset_name") new_provider_location = dataset_name + "@" + new_snapshot_tag self.zfs("rename", old_provider_location, new_provider_location) return { "size": snapshot_size, "provider_location": new_provider_location, } def unmanage_snapshot(self, snapshot_instance): """Unmanage dataset snapshot.""" self.private_storage.delete(snapshot_instance["snapshot_id"]) @utils.retry(retry_param=exception.ZFSonLinuxException, retries=10) def _unmount_share_with_retry(self, share_name): out, err = self.execute("sudo", "mount") if "%s " % share_name not in out: return self.zfs_with_retry("umount", "-f", share_name) out, err = self.execute("sudo", "mount") if "%s " % share_name in out: raise exception.ZFSonLinuxException( _("Unable to unmount dataset %s"), share_name) def _get_replication_snapshot_prefix(self, replica): """Returns replica-based snapshot prefix.""" replication_snapshot_prefix = "%s_%s" % ( self.replica_snapshot_prefix, replica['id'].replace('-', '_')) return replication_snapshot_prefix def _get_replication_snapshot_tag(self, replica): """Returns replica- and time-based snapshot tag.""" current_time = timeutils.utcnow().isoformat() snapshot_tag = "%s_time_%s" % ( self._get_replication_snapshot_prefix(replica), current_time) return snapshot_tag def _get_active_replica(self, replica_list): for replica in replica_list: if replica['replica_state'] == constants.REPLICA_STATE_ACTIVE: return replica msg = _("Active replica not found.") raise exception.ReplicationException(reason=msg) def _get_migration_snapshot_prefix(self, share_instance): """Returns migration-based snapshot prefix.""" migration_snapshot_prefix = "%s_%s" % ( self.migration_snapshot_prefix, share_instance['id'].replace('-', '_')) return migration_snapshot_prefix def _get_migration_snapshot_tag(self, share_instance): """Returns migration- and time-based snapshot tag.""" current_time = timeutils.utcnow().isoformat() snapshot_tag = "%s_time_%s" % ( self._get_migration_snapshot_prefix(share_instance), current_time) snapshot_tag = ( snapshot_tag.replace('-', '_').replace('.', '_').replace(':', '_')) return snapshot_tag @ensure_share_server_not_provided def create_replica(self, context, replica_list, new_replica, access_rules, replica_snapshots, share_server=None): """Replicates the active replica to a new replica on this backend.""" active_replica = self._get_active_replica(replica_list) src_dataset_name = self.private_storage.get( active_replica['id'], 'dataset_name') ssh_to_src_cmd = self.private_storage.get( active_replica['id'], 'ssh_cmd') dst_dataset_name = self._get_dataset_name(new_replica) ssh_cmd = '%(username)s@%(host)s' % { 'username': self.configuration.zfs_ssh_username, 'host': self.service_ip, } snapshot_tag = self._get_replication_snapshot_tag(new_replica) src_snapshot_name = ( '%(dataset_name)s@%(snapshot_tag)s' % { 'snapshot_tag': snapshot_tag, 'dataset_name': src_dataset_name, } ) # Save valuable data to DB self.private_storage.update(active_replica['id'], { 'repl_snapshot_tag': snapshot_tag, }) self.private_storage.update(new_replica['id'], { 'entity_type': 'replica', 'replica_type': 'readable', 'dataset_name': dst_dataset_name, 'ssh_cmd': ssh_cmd, 'pool_name': share_utils.extract_host( new_replica['host'], level='pool'), 'repl_snapshot_tag': snapshot_tag, }) # Create temporary snapshot. It will exist until following replica sync # After it - new one will appear and so in loop. self.execute( 'ssh', ssh_to_src_cmd, 'sudo', 'zfs', 'snapshot', src_snapshot_name, ) # Send/receive temporary snapshot out, err = self.execute( 'ssh', ssh_to_src_cmd, 'sudo', 'zfs', 'send', '-vDR', src_snapshot_name, '|', 'ssh', ssh_cmd, 'sudo', 'zfs', 'receive', '-v', dst_dataset_name, ) msg = ("Info about replica '%(replica_id)s' creation is following: " "\n%(out)s") LOG.debug(msg, {'replica_id': new_replica['id'], 'out': out}) # Make replica readonly self.zfs('set', 'readonly=on', dst_dataset_name) # Set original share size as quota to new replica self.zfs('set', 'quota=%sG' % active_replica['size'], dst_dataset_name) # Apply access rules from original share self._get_share_helper(new_replica['share_proto']).update_access( dst_dataset_name, access_rules, add_rules=[], delete_rules=[], make_all_ro=True) return { 'export_locations': self._get_share_helper( new_replica['share_proto']).create_exports(dst_dataset_name), 'replica_state': constants.REPLICA_STATE_IN_SYNC, 'access_rules_status': constants.STATUS_ACTIVE, } @ensure_share_server_not_provided def delete_replica(self, context, replica_list, replica_snapshots, replica, share_server=None): """Deletes a replica. This is called on the destination backend.""" pool_name = self.private_storage.get(replica['id'], 'pool_name') dataset_name = self.private_storage.get(replica['id'], 'dataset_name') if not dataset_name: dataset_name = self._get_dataset_name(replica) # Delete dataset's snapshots first out, err = self.zfs('list', '-r', '-t', 'snapshot', pool_name) data = self.parse_zfs_answer(out) for datum in data: if dataset_name in datum['NAME']: self._delete_dataset_or_snapshot_with_retry(datum['NAME']) # Now we delete dataset itself out, err = self.zfs('list', '-r', pool_name) data = self.parse_zfs_answer(out) for datum in data: if datum['NAME'] == dataset_name: self._get_share_helper( replica['share_proto']).remove_exports(dataset_name) self._delete_dataset_or_snapshot_with_retry(dataset_name) break else: LOG.warning( "Share replica with '%(id)s' ID and '%(name)s' NAME is " "absent on backend. Nothing has been deleted.", {'id': replica['id'], 'name': dataset_name}) self.private_storage.delete(replica['id']) @ensure_share_server_not_provided def update_replica_state(self, context, replica_list, replica, access_rules, replica_snapshots, share_server=None): """Syncs replica and updates its 'replica_state'.""" return self._update_replica_state( context, replica_list, replica, replica_snapshots, access_rules) def _update_replica_state(self, context, replica_list, replica, replica_snapshots=None, access_rules=None): active_replica = self._get_active_replica(replica_list) src_dataset_name = self.private_storage.get( active_replica['id'], 'dataset_name') ssh_to_src_cmd = self.private_storage.get( active_replica['id'], 'ssh_cmd') ssh_to_dst_cmd = self.private_storage.get( replica['id'], 'ssh_cmd') dst_dataset_name = self.private_storage.get( replica['id'], 'dataset_name') # Create temporary snapshot previous_snapshot_tag = self.private_storage.get( replica['id'], 'repl_snapshot_tag') snapshot_tag = self._get_replication_snapshot_tag(replica) src_snapshot_name = src_dataset_name + '@' + snapshot_tag self.execute( 'ssh', ssh_to_src_cmd, 'sudo', 'zfs', 'snapshot', src_snapshot_name, ) # Make sure it is readonly self.zfs('set', 'readonly=on', dst_dataset_name) # Send/receive diff between previous snapshot and last one out, err = self.execute( 'ssh', ssh_to_src_cmd, 'sudo', 'zfs', 'send', '-vDRI', previous_snapshot_tag, src_snapshot_name, '|', 'ssh', ssh_to_dst_cmd, 'sudo', 'zfs', 'receive', '-vF', dst_dataset_name, ) msg = ("Info about last replica '%(replica_id)s' sync is following: " "\n%(out)s") LOG.debug(msg, {'replica_id': replica['id'], 'out': out}) # Update DB data that will be used on following replica sync self.private_storage.update(active_replica['id'], { 'repl_snapshot_tag': snapshot_tag, }) self.private_storage.update( replica['id'], {'repl_snapshot_tag': snapshot_tag}) # Destroy all snapshots on dst filesystem except referenced ones. snap_references = set() for repl in replica_list: snap_references.add( self.private_storage.get(repl['id'], 'repl_snapshot_tag')) dst_pool_name = dst_dataset_name.split('/')[0] out, err = self.zfs('list', '-r', '-t', 'snapshot', dst_pool_name) data = self.parse_zfs_answer(out) for datum in data: if (dst_dataset_name in datum['NAME'] and '@' + self.replica_snapshot_prefix in datum['NAME'] and datum['NAME'].split('@')[-1] not in snap_references): self._delete_dataset_or_snapshot_with_retry(datum['NAME']) # Destroy all snapshots on src filesystem except referenced ones. src_pool_name = src_snapshot_name.split('/')[0] out, err = self.execute( 'ssh', ssh_to_src_cmd, 'sudo', 'zfs', 'list', '-r', '-t', 'snapshot', src_pool_name, ) data = self.parse_zfs_answer(out) full_src_snapshot_prefix = ( src_dataset_name + '@' + self._get_replication_snapshot_prefix(replica)) for datum in data: if (full_src_snapshot_prefix in datum['NAME'] and datum['NAME'].split('@')[-1] not in snap_references): self.execute_with_retry( 'ssh', ssh_to_src_cmd, 'sudo', 'zfs', 'destroy', '-f', datum['NAME'], ) if access_rules: # Apply access rules from original share # TODO(vponomaryov): we should remove somehow rules that were # deleted on active replica after creation of secondary replica. # For the moment there will be difference and it can be considered # as a bug. self._get_share_helper(replica['share_proto']).update_access( dst_dataset_name, access_rules, add_rules=[], delete_rules=[], make_all_ro=True) # Return results return constants.REPLICA_STATE_IN_SYNC @ensure_share_server_not_provided def promote_replica(self, context, replica_list, replica, access_rules, share_server=None, quiesce_wait_time=None): """Promotes secondary replica to active and active to secondary.""" active_replica = self._get_active_replica(replica_list) src_dataset_name = self.private_storage.get( active_replica['id'], 'dataset_name') ssh_to_src_cmd = self.private_storage.get( active_replica['id'], 'ssh_cmd') dst_dataset_name = self.private_storage.get( replica['id'], 'dataset_name') replica_dict = { r['id']: { 'id': r['id'], # NOTE(vponomaryov): access rules will be updated in next # 'sync' operation. 'access_rules_status': constants.SHARE_INSTANCE_RULES_SYNCING, } for r in replica_list } try: # Mark currently active replica as readonly self.execute( 'ssh', ssh_to_src_cmd, 'set', 'readonly=on', src_dataset_name, ) # Create temporary snapshot of currently active replica snapshot_tag = self._get_replication_snapshot_tag(active_replica) src_snapshot_name = src_dataset_name + '@' + snapshot_tag self.execute( 'ssh', ssh_to_src_cmd, 'sudo', 'zfs', 'snapshot', src_snapshot_name, ) # Apply temporary snapshot to all replicas for repl in replica_list: if repl['replica_state'] == constants.REPLICA_STATE_ACTIVE: continue previous_snapshot_tag = self.private_storage.get( repl['id'], 'repl_snapshot_tag') dataset_name = self.private_storage.get( repl['id'], 'dataset_name') ssh_to_dst_cmd = self.private_storage.get( repl['id'], 'ssh_cmd') try: # Send/receive diff between previous snapshot and last one out, err = self.execute( 'ssh', ssh_to_src_cmd, 'sudo', 'zfs', 'send', '-vDRI', previous_snapshot_tag, src_snapshot_name, '|', 'ssh', ssh_to_dst_cmd, 'sudo', 'zfs', 'receive', '-vF', dataset_name, ) except exception.ProcessExecutionError as e: LOG.warning("Failed to sync replica %(id)s. %(e)s", {'id': repl['id'], 'e': e}) replica_dict[repl['id']]['replica_state'] = ( constants.REPLICA_STATE_OUT_OF_SYNC) continue msg = ("Info about last replica '%(replica_id)s' " "sync is following: \n%(out)s") LOG.debug(msg, {'replica_id': repl['id'], 'out': out}) # Update latest replication snapshot for replica self.private_storage.update( repl['id'], {'repl_snapshot_tag': snapshot_tag}) # Update latest replication snapshot for currently active replica self.private_storage.update( active_replica['id'], {'repl_snapshot_tag': snapshot_tag}) replica_dict[active_replica['id']]['replica_state'] = ( constants.REPLICA_STATE_IN_SYNC) except Exception as e: LOG.warning( "Failed to update currently active replica. \n%s", e) replica_dict[active_replica['id']]['replica_state'] = ( constants.REPLICA_STATE_OUT_OF_SYNC) # Create temporary snapshot of new replica and sync it with other # secondary replicas. snapshot_tag = self._get_replication_snapshot_tag(replica) src_snapshot_name = dst_dataset_name + '@' + snapshot_tag ssh_to_src_cmd = self.private_storage.get(replica['id'], 'ssh_cmd') self.zfs('snapshot', src_snapshot_name) for repl in replica_list: if (repl['replica_state'] == constants.REPLICA_STATE_ACTIVE or repl['id'] == replica['id']): continue previous_snapshot_tag = self.private_storage.get( repl['id'], 'repl_snapshot_tag') dataset_name = self.private_storage.get( repl['id'], 'dataset_name') ssh_to_dst_cmd = self.private_storage.get( repl['id'], 'ssh_cmd') try: # Send/receive diff between previous snapshot and last one out, err = self.execute( 'ssh', ssh_to_src_cmd, 'sudo', 'zfs', 'send', '-vDRI', previous_snapshot_tag, src_snapshot_name, '|', 'ssh', ssh_to_dst_cmd, 'sudo', 'zfs', 'receive', '-vF', dataset_name, ) except exception.ProcessExecutionError as e: LOG.warning("Failed to sync replica %(id)s. %(e)s", {'id': repl['id'], 'e': e}) replica_dict[repl['id']]['replica_state'] = ( constants.REPLICA_STATE_OUT_OF_SYNC) continue msg = ("Info about last replica '%(replica_id)s' " "sync is following: \n%(out)s") LOG.debug(msg, {'replica_id': repl['id'], 'out': out}) # Update latest replication snapshot for replica self.private_storage.update( repl['id'], {'repl_snapshot_tag': snapshot_tag}) # Update latest replication snapshot for new active replica self.private_storage.update( replica['id'], {'repl_snapshot_tag': snapshot_tag}) replica_dict[replica['id']]['replica_state'] = ( constants.REPLICA_STATE_ACTIVE) self._get_share_helper(replica['share_proto']).update_access( dst_dataset_name, access_rules, add_rules=[], delete_rules=[]) replica_dict[replica['id']]['access_rules_status'] = ( constants.STATUS_ACTIVE) self.zfs('set', 'readonly=off', dst_dataset_name) return list(replica_dict.values()) @ensure_share_server_not_provided def create_replicated_snapshot(self, context, replica_list, replica_snapshots, share_server=None): """Create a snapshot and update across the replicas.""" active_replica = self._get_active_replica(replica_list) src_dataset_name = self.private_storage.get( active_replica['id'], 'dataset_name') ssh_to_src_cmd = self.private_storage.get( active_replica['id'], 'ssh_cmd') replica_snapshots_dict = { si['id']: {'id': si['id']} for si in replica_snapshots} active_snapshot_instance_id = [ si['id'] for si in replica_snapshots if si['share_instance_id'] == active_replica['id']][0] snapshot_tag = self._get_snapshot_name(active_snapshot_instance_id) # Replication should not be dependent on manually created snapshots # so, create additional one, newer, that will be used for replication # synchronizations. repl_snapshot_tag = self._get_replication_snapshot_tag(active_replica) src_snapshot_name = src_dataset_name + '@' + repl_snapshot_tag self.private_storage.update( replica_snapshots[0]['snapshot_id'], { 'entity_type': 'snapshot', 'snapshot_tag': snapshot_tag, } ) for tag in (snapshot_tag, repl_snapshot_tag): self.execute( 'ssh', ssh_to_src_cmd, 'sudo', 'zfs', 'snapshot', src_dataset_name + '@' + tag, ) # Populate snapshot to all replicas for replica_snapshot in replica_snapshots: replica_id = replica_snapshot['share_instance_id'] if replica_id == active_replica['id']: replica_snapshots_dict[replica_snapshot['id']]['status'] = ( constants.STATUS_AVAILABLE) continue previous_snapshot_tag = self.private_storage.get( replica_id, 'repl_snapshot_tag') dst_dataset_name = self.private_storage.get( replica_id, 'dataset_name') ssh_to_dst_cmd = self.private_storage.get(replica_id, 'ssh_cmd') try: # Send/receive diff between previous snapshot and last one out, err = self.execute( 'ssh', ssh_to_src_cmd, 'sudo', 'zfs', 'send', '-vDRI', previous_snapshot_tag, src_snapshot_name, '|', 'ssh', ssh_to_dst_cmd, 'sudo', 'zfs', 'receive', '-vF', dst_dataset_name, ) except exception.ProcessExecutionError as e: LOG.warning( "Failed to sync snapshot instance %(id)s. %(e)s", {'id': replica_snapshot['id'], 'e': e}) replica_snapshots_dict[replica_snapshot['id']]['status'] = ( constants.STATUS_ERROR) continue replica_snapshots_dict[replica_snapshot['id']]['status'] = ( constants.STATUS_AVAILABLE) msg = ("Info about last replica '%(replica_id)s' " "sync is following: \n%(out)s") LOG.debug(msg, {'replica_id': replica_id, 'out': out}) # Update latest replication snapshot for replica self.private_storage.update( replica_id, {'repl_snapshot_tag': repl_snapshot_tag}) # Update latest replication snapshot for currently active replica self.private_storage.update( active_replica['id'], {'repl_snapshot_tag': repl_snapshot_tag}) return list(replica_snapshots_dict.values()) @ensure_share_server_not_provided def delete_replicated_snapshot(self, context, replica_list, replica_snapshots, share_server=None): """Delete a snapshot by deleting its instances across the replicas.""" active_replica = self._get_active_replica(replica_list) replica_snapshots_dict = { si['id']: {'id': si['id']} for si in replica_snapshots} for replica_snapshot in replica_snapshots: replica_id = replica_snapshot['share_instance_id'] snapshot_name = self._get_saved_snapshot_name(replica_snapshot) if active_replica['id'] == replica_id: self._delete_snapshot(context, replica_snapshot) replica_snapshots_dict[replica_snapshot['id']]['status'] = ( constants.STATUS_DELETED) continue ssh_cmd = self.private_storage.get(replica_id, 'ssh_cmd') out, err = self.execute( 'ssh', ssh_cmd, 'sudo', 'zfs', 'list', '-r', '-t', 'snapshot', snapshot_name, ) data = self.parse_zfs_answer(out) for datum in data: if datum['NAME'] != snapshot_name: continue self.execute_with_retry( 'ssh', ssh_cmd, 'sudo', 'zfs', 'destroy', '-f', datum['NAME'], ) self.private_storage.delete(replica_snapshot['id']) replica_snapshots_dict[replica_snapshot['id']]['status'] = ( constants.STATUS_DELETED) self.private_storage.delete(replica_snapshot['snapshot_id']) return list(replica_snapshots_dict.values()) @ensure_share_server_not_provided def update_replicated_snapshot(self, context, replica_list, share_replica, replica_snapshots, replica_snapshot, share_server=None): """Update the status of a snapshot instance that lives on a replica.""" self._update_replica_state(context, replica_list, share_replica) snapshot_name = self._get_saved_snapshot_name(replica_snapshot) out, err = self.zfs('list', '-r', '-t', 'snapshot', snapshot_name) data = self.parse_zfs_answer(out) snapshot_found = False for datum in data: if datum['NAME'] == snapshot_name: snapshot_found = True break return_dict = {'id': replica_snapshot['id']} if snapshot_found: return_dict.update({'status': constants.STATUS_AVAILABLE}) else: return_dict.update({'status': constants.STATUS_ERROR}) return return_dict @ensure_share_server_not_provided def migration_check_compatibility( self, context, source_share, destination_share, share_server=None, destination_share_server=None): """Is called to test compatibility with destination backend.""" backend_name = share_utils.extract_host( destination_share['host'], level='backend_name') config = get_backend_configuration(backend_name) compatible = self.configuration.share_driver == config.share_driver return { 'compatible': compatible, 'writable': False, 'preserve_metadata': True, 'nondisruptive': True, } @ensure_share_server_not_provided def migration_start( self, context, source_share, destination_share, source_snapshots, snapshot_mappings, share_server=None, destination_share_server=None): """Is called to start share migration.""" src_dataset_name = self.private_storage.get( source_share['id'], 'dataset_name') dst_dataset_name = self._get_dataset_name(destination_share) backend_name = share_utils.extract_host( destination_share['host'], level='backend_name') ssh_cmd = '%(username)s@%(host)s' % { 'username': self.configuration.zfs_ssh_username, 'host': self.configuration.zfs_service_ip, } config = get_backend_configuration(backend_name) remote_ssh_cmd = '%(username)s@%(host)s' % { 'username': config.zfs_ssh_username, 'host': config.zfs_service_ip, } snapshot_tag = self._get_migration_snapshot_tag(destination_share) src_snapshot_name = ( '%(dataset_name)s@%(snapshot_tag)s' % { 'snapshot_tag': snapshot_tag, 'dataset_name': src_dataset_name, } ) # Save valuable data to DB self.private_storage.update(source_share['id'], { 'migr_snapshot_tag': snapshot_tag, }) self.private_storage.update(destination_share['id'], { 'entity_type': 'share', 'dataset_name': dst_dataset_name, 'ssh_cmd': remote_ssh_cmd, 'pool_name': share_utils.extract_host( destination_share['host'], level='pool'), 'migr_snapshot_tag': snapshot_tag, }) # Create temporary snapshot on src host. self.execute('sudo', 'zfs', 'snapshot', src_snapshot_name) # Send/receive temporary snapshot cmd = ( 'ssh ' + ssh_cmd + ' ' 'sudo zfs send -vDR ' + src_snapshot_name + ' ' '| ssh ' + remote_ssh_cmd + ' ' 'sudo zfs receive -v ' + dst_dataset_name ) filename = dst_dataset_name.replace('/', '_') with utils.tempdir() as tmpdir: tmpfilename = os.path.join(tmpdir, '%s.sh' % filename) with open(tmpfilename, "w") as migr_script: migr_script.write(cmd) self.execute('sudo', 'chmod', '755', tmpfilename) self.execute('nohup', tmpfilename, '&') @ensure_share_server_not_provided def migration_continue( self, context, source_share, destination_share, source_snapshots, snapshot_mappings, share_server=None, destination_share_server=None): """Is called in source share's backend to continue migration.""" snapshot_tag = self.private_storage.get( destination_share['id'], 'migr_snapshot_tag') out, err = self.execute('ps', 'aux') if not '@%s' % snapshot_tag in out: dst_dataset_name = self.private_storage.get( destination_share['id'], 'dataset_name') try: self.execute( 'sudo', 'zfs', 'get', 'quota', dst_dataset_name, executor=self._get_shell_executor_by_host( destination_share['host']), ) return True except exception.ProcessExecutionError as e: raise exception.ZFSonLinuxException(msg=_( 'Migration process is absent and dst dataset ' 'returned following error: %s') % e) @ensure_share_server_not_provided def migration_complete( self, context, source_share, destination_share, source_snapshots, snapshot_mappings, share_server=None, destination_share_server=None): """Is called to perform 2nd phase of driver migration of a given share. """ dst_dataset_name = self.private_storage.get( destination_share['id'], 'dataset_name') snapshot_tag = self.private_storage.get( destination_share['id'], 'migr_snapshot_tag') dst_snapshot_name = ( '%(dataset_name)s@%(snapshot_tag)s' % { 'snapshot_tag': snapshot_tag, 'dataset_name': dst_dataset_name, } ) dst_executor = self._get_shell_executor_by_host( destination_share['host']) # Destroy temporary migration snapshot on dst host self.execute( 'sudo', 'zfs', 'destroy', dst_snapshot_name, executor=dst_executor, ) # Get export locations of new share instance export_locations = self._get_share_helper( destination_share['share_proto']).create_exports( dst_dataset_name, executor=dst_executor) # Destroy src share and temporary migration snapshot on src (this) host self.delete_share(context, source_share) return {'export_locations': export_locations} @ensure_share_server_not_provided def migration_cancel( self, context, source_share, destination_share, source_snapshots, snapshot_mappings, share_server=None, destination_share_server=None): """Is called to cancel driver migration.""" src_dataset_name = self.private_storage.get( source_share['id'], 'dataset_name') dst_dataset_name = self.private_storage.get( destination_share['id'], 'dataset_name') ssh_cmd = self.private_storage.get( destination_share['id'], 'ssh_cmd') snapshot_tag = self.private_storage.get( destination_share['id'], 'migr_snapshot_tag') # Kill migration process if exists try: out, err = self.execute('ps', 'aux') lines = out.split('\n') for line in lines: if '@%s' % snapshot_tag in line: migr_pid = [ x for x in line.strip().split(' ') if x != ''][1] self.execute('sudo', 'kill', '-9', migr_pid) except exception.ProcessExecutionError as e: LOG.warning( "Caught following error trying to kill migration process: %s", e) # Sleep couple of seconds before destroying updated objects time.sleep(2) # Destroy snapshot on source host self._delete_dataset_or_snapshot_with_retry( src_dataset_name + '@' + snapshot_tag) # Destroy dataset and its migration snapshot on destination host try: self.execute( 'ssh', ssh_cmd, 'sudo', 'zfs', 'destroy', '-r', dst_dataset_name, ) except exception.ProcessExecutionError as e: LOG.warning( "Failed to destroy destination dataset with following error: " "%s", e) LOG.debug( "Migration of share with ID '%s' has been canceled.", source_share["id"]) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/zfsonlinux/utils.py0000664000175000017500000002732300000000000023037 0ustar00zuulzuul00000000000000# Copyright 2016 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Module for storing ZFSonLinux driver utility stuff such as: - Common ZFS code - Share helpers """ # TODO(vponomaryov): add support of SaMBa import abc from oslo_log import log from manila.common import constants from manila import exception from manila.i18n import _ from manila.share import driver from manila.share.drivers.ganesha import utils as ganesha_utils from manila import utils LOG = log.getLogger(__name__) def zfs_dataset_synchronized(f): def wrapped_func(self, *args, **kwargs): key = "zfs-dataset-%s" % args[0] @utils.synchronized(key, external=True) def source_func(self, *args, **kwargs): return f(self, *args, **kwargs) return source_func(self, *args, **kwargs) return wrapped_func def get_remote_shell_executor( ip, port, conn_timeout, login=None, password=None, privatekey=None, max_size=10): return ganesha_utils.SSHExecutor( ip=ip, port=port, conn_timeout=conn_timeout, login=login, password=password, privatekey=privatekey, max_size=max_size, ) class ExecuteMixin(driver.ExecuteMixin): def init_execute_mixin(self, *args, **kwargs): """Init method for mixin called in the end of driver's __init__().""" super(ExecuteMixin, self).init_execute_mixin(*args, **kwargs) if self.configuration.zfs_use_ssh: self.ssh_executor = get_remote_shell_executor( ip=self.configuration.zfs_service_ip, port=22, conn_timeout=self.configuration.ssh_conn_timeout, login=self.configuration.zfs_ssh_username, password=self.configuration.zfs_ssh_user_password, privatekey=self.configuration.zfs_ssh_private_key_path, max_size=10, ) else: self.ssh_executor = None def execute(self, *cmd, **kwargs): """Common interface for running shell commands.""" if kwargs.get('executor'): executor = kwargs.get('executor') elif self.ssh_executor: executor = self.ssh_executor else: executor = self._execute kwargs.pop('executor', None) if cmd[0] == 'sudo': kwargs['run_as_root'] = True cmd = cmd[1:] return executor(*cmd, **kwargs) @utils.retry(retry_param=exception.ProcessExecutionError, interval=5, retries=36, backoff_rate=1) def execute_with_retry(self, *cmd, **kwargs): """Retry wrapper over common shell interface.""" try: return self.execute(*cmd, **kwargs) except exception.ProcessExecutionError as e: LOG.warning("Failed to run command, got error: %s", e) raise def _get_option(self, resource_name, option_name, pool_level=False, **kwargs): """Returns value of requested zpool or zfs dataset option.""" app = 'zpool' if pool_level else 'zfs' out, err = self.execute( 'sudo', app, 'get', option_name, resource_name, **kwargs) data = self.parse_zfs_answer(out) option = data[0]['VALUE'] msg_payload = {'option': option_name, 'value': option} LOG.debug("ZFS option %(option)s's value is %(value)s.", msg_payload) return option def parse_zfs_answer(self, string): """Returns list of dicts with data returned by ZFS shell commands.""" lines = string.split('\n') if len(lines) < 2: return [] keys = list(filter(None, lines[0].split(' '))) data = [] for line in lines[1:]: values = list(filter(None, line.split(' '))) if not values: continue data.append(dict(zip(keys, values))) return data def get_zpool_option(self, zpool_name, option_name, **kwargs): """Returns value of requested zpool option.""" return self._get_option(zpool_name, option_name, True, **kwargs) def get_zfs_option(self, dataset_name, option_name, **kwargs): """Returns value of requested zfs dataset option.""" return self._get_option(dataset_name, option_name, False, **kwargs) def zfs(self, *cmd, **kwargs): """ZFS shell commands executor.""" return self.execute('sudo', 'zfs', *cmd, **kwargs) def zfs_with_retry(self, *cmd, **kwargs): """ZFS shell commands executor.""" return self.execute_with_retry('sudo', 'zfs', *cmd, **kwargs) class NASHelperBase(metaclass=abc.ABCMeta): """Base class for share helpers of 'ZFS on Linux' driver.""" def __init__(self, configuration): """Init share helper. :param configuration: share driver 'configuration' instance :return: share helper instance. """ self.configuration = configuration self.init_execute_mixin() # pylint: disable=no-member self.verify_setup() @abc.abstractmethod def verify_setup(self): """Performs checks for required stuff.""" @abc.abstractmethod def create_exports(self, dataset_name, executor): """Creates share exports.""" @abc.abstractmethod def get_exports(self, dataset_name, service, executor): """Gets/reads share exports.""" @abc.abstractmethod def remove_exports(self, dataset_name, executor): """Removes share exports.""" @abc.abstractmethod def update_access(self, dataset_name, access_rules, add_rules, delete_rules, executor): """Update access rules for specified ZFS dataset.""" class NFSviaZFSHelper(ExecuteMixin, NASHelperBase): """Helper class for handling ZFS datasets as NFS shares. Kernel and Fuse versions of ZFS have different syntax for setting up access rules, and this Helper designed to satisfy both making autodetection. """ @property def is_kernel_version(self): """Says whether Kernel version of ZFS is used or not.""" if not hasattr(self, '_is_kernel_version'): try: self.execute('modinfo', 'zfs') self._is_kernel_version = True except exception.ProcessExecutionError as e: LOG.info( "Looks like ZFS kernel module is absent. " "Assuming FUSE version is installed. Error: %s", e) self._is_kernel_version = False return self._is_kernel_version def verify_setup(self): """Performs checks for required stuff.""" out, err = self.execute('which', 'exportfs') if not out: raise exception.ZFSonLinuxException( msg=_("Utility 'exportfs' is not installed.")) try: self.execute('sudo', 'exportfs') except exception.ProcessExecutionError: LOG.exception("Call of 'exportfs' utility returned error.") raise # Init that class instance attribute on start of manila-share service self.is_kernel_version def create_exports(self, dataset_name, executor=None): """Creates NFS share exports for given ZFS dataset.""" return self.get_exports(dataset_name, executor=executor) def get_exports(self, dataset_name, executor=None): """Gets/reads NFS share export for given ZFS dataset.""" mountpoint = self.get_zfs_option( dataset_name, 'mountpoint', executor=executor) return [ { "path": "%(ip)s:%(mp)s" % {"ip": ip, "mp": mountpoint}, "metadata": { }, "is_admin_only": is_admin_only, } for ip, is_admin_only in ( (self.configuration.zfs_share_export_ip, False), (self.configuration.zfs_service_ip, True)) ] @zfs_dataset_synchronized def remove_exports(self, dataset_name, executor=None): """Removes NFS share exports for given ZFS dataset.""" sharenfs = self.get_zfs_option( dataset_name, 'sharenfs', executor=executor) if sharenfs == 'off': return self.zfs("set", "sharenfs=off", dataset_name, executor=executor) def _get_parsed_access_to(self, access_to): netmask = utils.cidr_to_netmask(access_to) if netmask == '255.255.255.255': return access_to.split('/')[0] return access_to.split('/')[0] + '/' + netmask @zfs_dataset_synchronized def update_access(self, dataset_name, access_rules, add_rules, delete_rules, make_all_ro=False, executor=None): """Update access rules for given ZFS dataset exported as NFS share.""" rw_rules = [] ro_rules = [] for rule in access_rules: if rule['access_type'].lower() != 'ip': msg = _("Only IP access type allowed for NFS protocol.") raise exception.InvalidShareAccess(reason=msg) if (rule['access_level'] == constants.ACCESS_LEVEL_RW and not make_all_ro): rw_rules.append(self._get_parsed_access_to(rule['access_to'])) elif (rule['access_level'] in (constants.ACCESS_LEVEL_RW, constants.ACCESS_LEVEL_RO)): ro_rules.append(self._get_parsed_access_to(rule['access_to'])) else: msg = _("Unsupported access level provided - " "%s.") % rule['access_level'] raise exception.InvalidShareAccess(reason=msg) rules = [] if self.is_kernel_version: if rw_rules: rules.append( "rw=%s,no_root_squash" % ":".join(rw_rules)) if ro_rules: rules.append("ro=%s,no_root_squash" % ":".join(ro_rules)) rules_str = "sharenfs=" + (','.join(rules) or 'off') else: for rule in rw_rules: rules.append("%s:rw,no_root_squash" % rule) for rule in ro_rules: rules.append("%s:ro,no_root_squash" % rule) rules_str = "sharenfs=" + (' '.join(rules) or 'off') out, err = self.zfs( 'list', '-r', dataset_name.split('/')[0], executor=executor) data = self.parse_zfs_answer(out) for datum in data: if datum['NAME'] == dataset_name: self.zfs("set", rules_str, dataset_name) break else: LOG.warning( "Dataset with '%(name)s' NAME is absent on backend. " "Access rules were not applied.", {'name': dataset_name}) # NOTE(vponomaryov): Setting of ZFS share options does not remove rules # that were added and then removed. So, remove them explicitly. if delete_rules and access_rules: mountpoint = self.get_zfs_option(dataset_name, 'mountpoint') for rule in delete_rules: if rule['access_type'].lower() != 'ip': continue access_to = self._get_parsed_access_to(rule['access_to']) export_location = access_to + ':' + mountpoint self.execute( 'sudo', 'exportfs', '-u', export_location, executor=executor, ) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315601.9416714 manila-21.0.0/manila/share/drivers/zfssa/0000775000175000017500000000000000000000000020225 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/zfssa/__init__.py0000664000175000017500000000000000000000000022324 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/zfssa/restclient.py0000664000175000017500000003111100000000000022750 0ustar00zuulzuul00000000000000# Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ ZFS Storage Appliance REST API Client Programmatic Interface TODO(diemtran): this module needs to be placed in a library common to OpenStack services. When this happens, the file should be removed from Manila code base and imported from the relevant library. """ from http import client as http_client import io import time from urllib import error as urlerror from urllib import request as urlrequest from oslo_serialization import jsonutils def log_debug_msg(obj, message): if obj.log_function: obj.log_function(message) class Status(object): """Result HTTP Status.""" #: Request return OK OK = http_client.OK # pylint: disable=invalid-name #: New resource created successfully CREATED = http_client.CREATED #: Command accepted ACCEPTED = http_client.ACCEPTED #: Command returned OK but no data will be returned NO_CONTENT = http_client.NO_CONTENT #: Bad Request BAD_REQUEST = http_client.BAD_REQUEST #: User is not authorized UNAUTHORIZED = http_client.UNAUTHORIZED #: The request is not allowed FORBIDDEN = http_client.FORBIDDEN #: The requested resource was not found NOT_FOUND = http_client.NOT_FOUND #: The request is not allowed NOT_ALLOWED = http_client.METHOD_NOT_ALLOWED #: Request timed out TIMEOUT = http_client.REQUEST_TIMEOUT #: Invalid request CONFLICT = http_client.CONFLICT #: Service Unavailable BUSY = http_client.SERVICE_UNAVAILABLE class RestResult(object): """Result from a REST API operation.""" def __init__(self, logfunc=None, response=None, err=None): """Initialize a RestResult containing the results from a REST call. :param logfunc: debug log function. :param response: HTTP response. :param err: HTTP error. """ self.response = response self.log_function = logfunc self.error = err self.data = "" self.status = 0 if self.response: self.status = self.response.getcode() result = self.response.read() while result: self.data += result result = self.response.read() if self.error: self.status = self.error.code self.data = http_client.responses[self.status] log_debug_msg(self, 'Response code: %s' % self.status) log_debug_msg(self, 'Response data: %s' % self.data) def get_header(self, name): """Get an HTTP header with the given name from the results. :param name: HTTP header name. :return: The header value or None if no value is found. """ if self.response is None: return None info = self.response.info() return info.getheader(name) class RestClientError(Exception): """Exception for ZFS REST API client errors.""" def __init__(self, status, name="ERR_INTERNAL", message=None): """Create a REST Response exception. :param status: HTTP response status. :param name: The name of the REST API error type. :param message: Descriptive error message returned from REST call. """ super(RestClientError, self).__init__(message) self.code = status self.name = name self.msg = message if status in http_client.responses: self.msg = http_client.responses[status] def __str__(self): return "%d %s %s" % (self.code, self.name, self.msg) class RestClientURL(object): # pylint: disable=R0902 """ZFSSA urllib client.""" def __init__(self, url, logfunc=None, **kwargs): """Initialize a REST client. :param url: The ZFSSA REST API URL. :key session: HTTP Cookie value of x-auth-session obtained from a normal BUI login. :key timeout: Time in seconds to wait for command to complete. (Default is 60 seconds). """ self.url = url self.log_function = logfunc self.local = kwargs.get("local", False) self.base_path = kwargs.get("base_path", "/api") self.timeout = kwargs.get("timeout", 60) self.headers = None if kwargs.get('session'): self.headers['x-auth-session'] = kwargs.get('session') self.headers = {"content-type": "application/json"} self.do_logout = False self.auth_str = None def _path(self, path, base_path=None): """Build rest url path.""" if path.startswith("http://") or path.startswith("https://"): return path if base_path is None: base_path = self.base_path if not path.startswith(base_path) and not ( self.local and ("/api" + path).startswith(base_path)): path = "%s%s" % (base_path, path) if self.local and path.startswith("/api"): path = path[4:] return self.url + path def _authorize(self): """Performs authorization setting x-auth-session.""" self.headers['authorization'] = 'Basic %s' % self.auth_str if 'x-auth-session' in self.headers: del self.headers['x-auth-session'] try: result = self.post("/access/v1") del self.headers['authorization'] if result.status == http_client.CREATED: self.headers['x-auth-session'] = ( result.get_header('x-auth-session')) self.do_logout = True log_debug_msg(self, ('ZFSSA version: %s') % result.get_header('x-zfssa-version')) elif result.status == http_client.NOT_FOUND: raise RestClientError(result.status, name="ERR_RESTError", message=("REST Not Available:" "Please Upgrade")) except RestClientError: del self.headers['authorization'] raise def login(self, auth_str): """Login to an appliance using a user name and password. Start a session like what is done logging into the BUI. This is not a requirement to run REST commands, since the protocol is stateless. What is does is set up a cookie session so that some server side caching can be done. If login is used remember to call logout when finished. :param auth_str: Authorization string (base64). """ self.auth_str = auth_str self._authorize() def logout(self): """Logout of an appliance.""" result = None try: result = self.delete("/access/v1", base_path="/api") except RestClientError: pass self.headers.clear() self.do_logout = False return result def islogin(self): """return if client is login.""" return self.do_logout @staticmethod def mkpath(*args, **kwargs): """Make a path?query string for making a REST request. :cmd_params args: The path part. :cmd_params kwargs: The query part. """ buf = io.StringIO() query = "?" for arg in args: buf.write("/") buf.write(arg) for k in kwargs: buf.write(query) if query == "?": query = "&" buf.write(k) buf.write("=") buf.write(kwargs[k]) return buf.getvalue() # pylint: disable=R0912 def request(self, path, request, body=None, **kwargs): """Make an HTTP request and return the results. :param path: Path used with the initialized URL to make a request. :param request: HTTP request type (GET, POST, PUT, DELETE). :param body: HTTP body of request. :key accept: Set HTTP 'Accept' header with this value. :key base_path: Override the base_path for this request. :key content: Set HTTP 'Content-Type' header with this value. """ out_hdrs = dict.copy(self.headers) if kwargs.get("accept"): out_hdrs['accept'] = kwargs.get("accept") if body: if isinstance(body, dict): body = str(jsonutils.dumps(body)) if body and len(body): out_hdrs['content-length'] = len(body) zfssaurl = self._path(path, kwargs.get("base_path")) req = urlrequest.Request(zfssaurl, body, out_hdrs) req.get_method = lambda: request maxreqretries = kwargs.get("maxreqretries", 10) retry = 0 response = None log_debug_msg(self, 'Request: %s %s' % (request, zfssaurl)) log_debug_msg(self, 'Out headers: %s' % out_hdrs) if body and body != '': log_debug_msg(self, 'Body: %s' % body) while retry < maxreqretries: try: response = urlrequest.urlopen(req, # nosec B310 timeout=self.timeout) except urlerror.HTTPError as err: if err.code == http_client.NOT_FOUND: log_debug_msg(self, 'REST Not Found: %s' % err.code) else: log_debug_msg(self, ('REST Not Available: %s') % err.code) if (err.code == http_client.SERVICE_UNAVAILABLE and retry < maxreqretries): retry += 1 time.sleep(1) log_debug_msg(self, ('Server Busy retry request: %s') % retry) continue if ((err.code == http_client.UNAUTHORIZED or err.code == http_client.INTERNAL_SERVER_ERROR) and '/access/v1' not in zfssaurl): try: log_debug_msg(self, ('Authorizing request: ' '%(zfssaurl)s ' 'retry: %(retry)d .') % {'zfssaurl': zfssaurl, 'retry': retry}) self._authorize() req.add_header('x-auth-session', self.headers['x-auth-session']) except RestClientError: log_debug_msg(self, ('Cannot authorize.')) retry += 1 time.sleep(1) continue return RestResult(self.log_function, err=err) except urlerror.URLError as err: log_debug_msg(self, ('URLError: %s') % err.reason) raise RestClientError(-1, name="ERR_URLError", message=err.reason) break if ((response and response.getcode() == http_client.SERVICE_UNAVAILABLE) and retry >= maxreqretries): raise RestClientError(response.getcode(), name="ERR_HTTPError", message="REST Not Available: Disabled") return RestResult(self.log_function, response=response) def get(self, path, **kwargs): """Make an HTTP GET request. :param path: Path to resource. """ return self.request(path, "GET", **kwargs) def post(self, path, body="", **kwargs): """Make an HTTP POST request. :param path: Path to resource. :param body: Post data content. """ return self.request(path, "POST", body, **kwargs) def put(self, path, body="", **kwargs): """Make an HTTP PUT request. :param path: Path to resource. :param body: Put data content. """ return self.request(path, "PUT", body, **kwargs) def delete(self, path, **kwargs): """Make an HTTP DELETE request. :param path: Path to resource that will be deleted. """ return self.request(path, "DELETE", **kwargs) def head(self, path, **kwargs): """Make an HTTP HEAD request. :param path: Path to resource. """ return self.request(path, "HEAD", **kwargs) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/zfssa/zfssarest.py0000664000175000017500000004331200000000000022626 0ustar00zuulzuul00000000000000# Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ ZFS Storage Appliance Proxy """ from oslo_log import log from oslo_serialization import jsonutils from manila import exception from manila.i18n import _ from manila.share.drivers.zfssa import restclient LOG = log.getLogger(__name__) def factory_restclient(url, logfunc, **kwargs): return restclient.RestClientURL(url, logfunc, **kwargs) class ZFSSAApi(object): """ZFSSA API proxy class.""" pools_path = '/api/storage/v1/pools' pool_path = pools_path + '/%s' projects_path = pool_path + '/projects' project_path = projects_path + '/%s' shares_path = project_path + '/filesystems' share_path = shares_path + '/%s' snapshots_path = share_path + '/snapshots' snapshot_path = snapshots_path + '/%s' clone_path = snapshot_path + '/clone' service_path = '/api/service/v1/services/%s/enable' def __init__(self): self.host = None self.url = None self.rclient = None def __del__(self): if self.rclient: del self.rclient def rest_get(self, path, expected): ret = self.rclient.get(path) if ret.status != expected: exception_msg = (_('Rest call to %(host)s %(path)s failed.' 'Status: %(status)d Message: %(data)s') % {'host': self.host, 'path': path, 'status': ret.status, 'data': ret.data}) LOG.error(exception_msg) raise exception.ShareBackendException(msg=exception_msg) return ret def _is_pool_owned(self, pdata): """returns True if the pool's owner is the same as the host.""" svc = '/api/system/v1/version' ret = self.rest_get(svc, restclient.Status.OK) vdata = jsonutils.loads(ret.data) return (vdata['version']['asn'] == pdata['pool']['asn'] and vdata['version']['nodename'] == pdata['pool']['owner']) def set_host(self, host, timeout=None): self.host = host self.url = "https://%s:215" % self.host self.rclient = factory_restclient(self.url, LOG.debug, timeout=timeout) def login(self, auth_str): """Login to the appliance.""" if self.rclient and not self.rclient.islogin(): self.rclient.login(auth_str) def enable_service(self, service): """Enable the specified service.""" svc = self.service_path % service ret = self.rclient.put(svc) if ret.status != restclient.Status.ACCEPTED: exception_msg = (_("Cannot enable %s service.") % service) raise exception.ShareBackendException(msg=exception_msg) def verify_avail_space(self, pool, project, share, size): """Check if there is enough space available to a new share.""" self.verify_project(pool, project) avail = self.get_project_stats(pool, project) if avail < size: exception_msg = (_('Error creating ' 'share: %(share)s on ' 'pool: %(pool)s. ' 'Not enough space.') % {'share': share, 'pool': pool}) raise exception.ShareBackendException(msg=exception_msg) def get_pool_stats(self, pool): """Get space_available and used properties of a pool. returns (avail, used). """ svc = self.pool_path % pool ret = self.rclient.get(svc) if ret.status != restclient.Status.OK: exception_msg = (_('Error getting pool stats: ' 'pool: %(pool)s ' 'return code: %(ret.status)d ' 'message: %(ret.data)s.') % {'pool': pool, 'ret.status': ret.status, 'ret.data': ret.data}) raise exception.InvalidInput(reason=exception_msg) val = jsonutils.loads(ret.data) if not self._is_pool_owned(val): exception_msg = (_('Error pool ownership: ' 'pool %(pool)s is not owned ' 'by %(host)s.') % {'pool': pool, 'host': self.host}) raise exception.InvalidInput(reason=pool) avail = val['pool']['usage']['available'] used = val['pool']['usage']['used'] return avail, used def get_project_stats(self, pool, project): """Get space_available of a project. Used to check whether a project has enough space (after reservation) or not. """ svc = self.project_path % (pool, project) ret = self.rclient.get(svc) if ret.status != restclient.Status.OK: exception_msg = (_('Error getting project stats: ' 'pool: %(pool)s ' 'project: %(project)s ' 'return code: %(ret.status)d ' 'message: %(ret.data)s.') % {'pool': pool, 'project': project, 'ret.status': ret.status, 'ret.data': ret.data}) raise exception.InvalidInput(reason=exception_msg) val = jsonutils.loads(ret.data) avail = val['project']['space_available'] return avail def create_project(self, pool, project, arg): """Create a project on a pool. Check first whether the pool exists.""" self.verify_pool(pool) svc = self.project_path % (pool, project) ret = self.rclient.get(svc) if ret.status != restclient.Status.OK: svc = self.projects_path % pool ret = self.rclient.post(svc, arg) if ret.status != restclient.Status.CREATED: exception_msg = (_('Error creating project: ' '%(project)s on ' 'pool: %(pool)s ' 'return code: %(ret.status)d ' 'message: %(ret.data)s.') % {'project': project, 'pool': pool, 'ret.status': ret.status, 'ret.data': ret.data}) raise exception.ShareBackendException(msg=exception_msg) def verify_pool(self, pool): """Checks whether pool exists.""" svc = self.pool_path % pool self.rest_get(svc, restclient.Status.OK) def verify_project(self, pool, project): """Checks whether project exists.""" svc = self.project_path % (pool, project) ret = self.rest_get(svc, restclient.Status.OK) return ret def create_share(self, pool, project, share): """Create a share in the specified pool and project.""" self.verify_avail_space(pool, project, share, share['quota']) svc = self.share_path % (pool, project, share['name']) ret = self.rclient.get(svc) if ret.status != restclient.Status.OK: svc = self.shares_path % (pool, project) ret = self.rclient.post(svc, share) if ret.status != restclient.Status.CREATED: exception_msg = (_('Error creating ' 'share: %(name)s ' 'return code: %(ret.status)d ' 'message: %(ret.data)s.') % {'name': share['name'], 'ret.status': ret.status, 'ret.data': ret.data}) raise exception.ShareBackendException(msg=exception_msg) else: exception_msg = (_('Share with name %s already exists.') % share['name']) raise exception.ShareBackendException(msg=exception_msg) def get_share(self, pool, project, share): """Return share properties.""" svc = self.share_path % (pool, project, share) ret = self.rest_get(svc, restclient.Status.OK) val = jsonutils.loads(ret.data) return val['filesystem'] def modify_share(self, pool, project, share, arg): """Modify a set of properties of a share.""" svc = self.share_path % (pool, project, share) ret = self.rclient.put(svc, arg) if ret.status != restclient.Status.ACCEPTED: exception_msg = (_('Error modifying %(arg)s ' ' of share %(id)s.') % {'arg': arg, 'id': share}) raise exception.ShareBackendException(msg=exception_msg) def delete_share(self, pool, project, share): """Delete a share. The function assumes the share has no clone or snapshot. """ svc = self.share_path % (pool, project, share) ret = self.rclient.delete(svc) if ret.status != restclient.Status.NO_CONTENT: exception_msg = (('Error deleting ' 'share: %(share)s to ' 'pool: %(pool)s ' 'project: %(project)s ' 'return code: %(ret.status)d ' 'message: %(ret.data)s.'), {'share': share, 'pool': pool, 'project': project, 'ret.status': ret.status, 'ret.data': ret.data}) LOG.error(exception_msg) def create_snapshot(self, pool, project, share, snapshot): """Create a snapshot of the given share.""" svc = self.snapshots_path % (pool, project, share) arg = {'name': snapshot} ret = self.rclient.post(svc, arg) if ret.status != restclient.Status.CREATED: exception_msg = (_('Error creating ' 'snapshot: %(snapshot)s on ' 'share: %(share)s to ' 'pool: %(pool)s ' 'project: %(project)s ' 'return code: %(ret.status)d ' 'message: %(ret.data)s.') % {'snapshot': snapshot, 'share': share, 'pool': pool, 'project': project, 'ret.status': ret.status, 'ret.data': ret.data}) raise exception.ShareBackendException(msg=exception_msg) def delete_snapshot(self, pool, project, share, snapshot): """Delete a snapshot that has no clone.""" svc = self.snapshot_path % (pool, project, share, snapshot) ret = self.rclient.delete(svc) if ret.status != restclient.Status.NO_CONTENT: exception_msg = (_('Error deleting ' 'snapshot: %(snapshot)s on ' 'share: %(share)s to ' 'pool: %(pool)s ' 'project: %(project)s ' 'return code: %(ret.status)d ' 'message: %(ret.data)s.') % {'snapshot': snapshot, 'share': share, 'pool': pool, 'project': project, 'ret.status': ret.status, 'ret.data': ret.data}) LOG.error(exception_msg) raise exception.ShareBackendException(msg=exception_msg) def clone_snapshot(self, pool, project, snapshot, clone, arg): """Create a new share from the given snapshot.""" self.verify_avail_space(pool, project, clone['id'], clone['size']) svc = self.clone_path % (pool, project, snapshot['share_id'], snapshot['id']) ret = self.rclient.put(svc, arg) if ret.status != restclient.Status.CREATED: exception_msg = (_('Error cloning ' 'snapshot: %(snapshot)s on ' 'share: %(share)s of ' 'Pool: %(pool)s ' 'project: %(project)s ' 'return code: %(ret.status)d ' 'message: %(ret.data)s.') % {'snapshot': snapshot['id'], 'share': snapshot['share_id'], 'pool': pool, 'project': project, 'ret.status': ret.status, 'ret.data': ret.data}) LOG.error(exception_msg) raise exception.ShareBackendException(msg=exception_msg) def has_clones(self, pool, project, share, snapshot): """Check whether snapshot has existing clones.""" svc = self.snapshot_path % (pool, project, share, snapshot) ret = self.rest_get(svc, restclient.Status.OK) val = jsonutils.loads(ret.data) return val['snapshot']['numclones'] != 0 def allow_access_nfs(self, pool, project, share, access): """Allow an IP access to a share through NFS.""" if access['access_type'] != 'ip': reason = _('Only ip access type allowed.') raise exception.InvalidShareAccess(reason) ip = access['access_to'] details = self.get_share(pool, project, share) sharenfs = details['sharenfs'] if sharenfs == 'on' or sharenfs == 'rw': LOG.debug('Share %s has read/write permission ' 'open to all.', share) return if sharenfs == 'off': sharenfs = 'sec=sys' if ip in sharenfs: LOG.debug('Access to share %(share)s via NFS ' 'already granted to %(ip)s.', {'share': share, 'ip': ip}) return entry = (',rw=@%s' % ip) if '/' not in ip: entry = "%s/32" % entry arg = {'sharenfs': sharenfs + entry} self.modify_share(pool, project, share, arg) def deny_access_nfs(self, pool, project, share, access): """Denies access of an IP to a share through NFS. Since sharenfs property allows a combination of mutiple syntaxes: sharenfs="sec=sys,rw=@first_ip,rw=@second_ip" sharenfs="sec=sys,rw=@first_ip:@second_ip" sharenfs="sec=sys,rw=@first_ip:@second_ip,rw=@third_ip" The function checks what syntax is used and remove the IP accordingly. """ if access['access_type'] != 'ip': reason = _('Only ip access type allowed.') raise exception.InvalidShareAccess(reason) ip = access['access_to'] entry = ('@%s' % ip) if '/' not in ip: entry = "%s/32" % entry details = self.get_share(pool, project, share) if entry not in details['sharenfs']: LOG.debug('IP %(ip)s does not have access ' 'to Share %(share)s via NFS.', {'ip': ip, 'share': share}) return sharenfs = str(details['sharenfs']) argval = '' if sharenfs.find((',rw=%s:' % entry)) >= 0: argval = sharenfs.replace(('%s:' % entry), '') elif sharenfs.find((',rw=%s' % entry)) >= 0: argval = sharenfs.replace((',rw=%s' % entry), '') elif sharenfs.find((':%s' % entry)) >= 0: argval = sharenfs.replace((':%s' % entry), '') arg = {'sharenfs': argval} LOG.debug('deny_access: %s', argval) self.modify_share(pool, project, share, arg) def create_schema(self, schema): """Create a custom ZFSSA schema.""" base = '/api/storage/v1/schema' svc = "%(base)s/%(prop)s" % {'base': base, 'prop': schema['property']} ret = self.rclient.get(svc) if ret.status == restclient.Status.OK: LOG.warning('Property %s already exists.', schema['property']) return ret = self.rclient.post(base, schema) if ret.status != restclient.Status.CREATED: exception_msg = (_('Error Creating ' 'Property: %(property)s ' 'Type: %(type)s ' 'Description: %(description)s ' 'Return code: %(ret.status)d ' 'Message: %(ret.data)s.') % {'property': schema['property'], 'type': schema['type'], 'description': schema['description'], 'ret.status': ret.status, 'ret.data': ret.data}) LOG.error(exception_msg) raise exception.ShareBackendException(msg=exception_msg) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers/zfssa/zfssashare.py0000664000175000017500000005103700000000000022756 0ustar00zuulzuul00000000000000# Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ ZFS Storage Appliance Manila Share Driver """ import base64 import math from oslo_config import cfg from oslo_log import log from oslo_utils import units from manila import exception from manila.i18n import _ from manila.share import driver from manila.share.drivers.zfssa import zfssarest ZFSSA_OPTS = [ cfg.HostAddressOpt('zfssa_host', help='ZFSSA management IP address.'), cfg.HostAddressOpt('zfssa_data_ip', help='IP address for data.'), cfg.StrOpt('zfssa_auth_user', help='ZFSSA management authorized username.'), cfg.StrOpt('zfssa_auth_password', secret=True, help='ZFSSA management authorized user\'s password.'), cfg.StrOpt('zfssa_pool', help='ZFSSA storage pool name.'), cfg.StrOpt('zfssa_project', help='ZFSSA project name.'), cfg.StrOpt('zfssa_nas_checksum', default='fletcher4', help='Controls checksum used for data blocks.'), cfg.StrOpt('zfssa_nas_compression', default='off', help='Data compression-off, lzjb, gzip-2, gzip, gzip-9.'), cfg.StrOpt('zfssa_nas_logbias', default='latency', help='Controls behavior when servicing synchronous writes.'), cfg.StrOpt('zfssa_nas_mountpoint', default='', help='Location of project in ZFS/SA.'), cfg.StrOpt('zfssa_nas_quota_snap', default='true', help='Controls whether a share quota includes snapshot.'), cfg.StrOpt('zfssa_nas_rstchown', default='true', help='Controls whether file ownership can be changed.'), cfg.StrOpt('zfssa_nas_vscan', default='false', help='Controls whether the share is scanned for viruses.'), cfg.StrOpt('zfssa_rest_timeout', help='REST connection timeout (in seconds).'), cfg.StrOpt('zfssa_manage_policy', default='loose', choices=['loose', 'strict'], help='Driver policy for share manage. A strict policy checks ' 'for a schema named manila_managed, and makes sure its ' 'value is true. A loose policy does not check for the ' 'schema.') ] cfg.CONF.register_opts(ZFSSA_OPTS) LOG = log.getLogger(__name__) def factory_zfssa(): return zfssarest.ZFSSAApi() class ZFSSAShareDriver(driver.ShareDriver): """ZFSSA share driver: Supports NFS and CIFS protocols. Uses ZFSSA RESTful API to create shares and snapshots on backend. API version history: 1.0 - Initial version. 1.0.1 - Add share shrink/extend feature. 1.0.2 - Add share manage/unmanage feature. """ VERSION = '1.0.2' PROTOCOL = 'NFS_CIFS' def __init__(self, *args, **kwargs): super(ZFSSAShareDriver, self).__init__(False, *args, **kwargs) self.configuration.append_config_values(ZFSSA_OPTS) self.zfssa = None self._stats = None self.mountpoint = '/export/' lcfg = self.configuration required = [ 'zfssa_host', 'zfssa_data_ip', 'zfssa_auth_user', 'zfssa_auth_password', 'zfssa_pool', 'zfssa_project' ] for prop in required: if not getattr(lcfg, prop, None): exception_msg = _('%s is required in manila.conf') % prop LOG.error(exception_msg) raise exception.InvalidParameterValue(exception_msg) self.default_args = { 'compression': lcfg.zfssa_nas_compression, 'logbias': lcfg.zfssa_nas_logbias, 'checksum': lcfg.zfssa_nas_checksum, 'vscan': lcfg.zfssa_nas_vscan, 'rstchown': lcfg.zfssa_nas_rstchown, } self.share_args = { 'sharedav': 'off', 'shareftp': 'off', 'sharesftp': 'off', 'sharetftp': 'off', 'root_permissions': '777', 'sharenfs': 'sec=sys', 'sharesmb': 'off', 'quota_snap': self.configuration.zfssa_nas_quota_snap, 'reservation_snap': self.configuration.zfssa_nas_quota_snap, 'custom:manila_managed': True, } def do_setup(self, context): """Login, create project, no sharing option enabled.""" lcfg = self.configuration LOG.debug("Connecting to host: %s.", lcfg.zfssa_host) self.zfssa = factory_zfssa() self.zfssa.set_host(lcfg.zfssa_host, timeout=lcfg.zfssa_rest_timeout) creds = '%s:%s' % (lcfg.zfssa_auth_user, lcfg.zfssa_auth_password) auth_str = base64.encodebytes(creds.encode("latin-1"))[:-1] self.zfssa.login(auth_str) if lcfg.zfssa_nas_mountpoint == '': self.mountpoint += lcfg.zfssa_project else: self.mountpoint += lcfg.zfssa_nas_mountpoint arg = { 'name': lcfg.zfssa_project, 'sharesmb': 'off', 'sharenfs': 'off', 'mountpoint': self.mountpoint, } arg.update(self.default_args) self.zfssa.create_project(lcfg.zfssa_pool, lcfg.zfssa_project, arg) self.zfssa.enable_service('nfs') self.zfssa.enable_service('smb') schema = { 'property': 'manila_managed', 'description': 'Managed by Manila', 'type': 'Boolean', } self.zfssa.create_schema(schema) def check_for_setup_error(self): """Check for properly configured pool, project.""" lcfg = self.configuration LOG.debug("Verifying pool %s.", lcfg.zfssa_pool) self.zfssa.verify_pool(lcfg.zfssa_pool) LOG.debug("Verifying project %s.", lcfg.zfssa_project) self.zfssa.verify_project(lcfg.zfssa_pool, lcfg.zfssa_project) def _export_location(self, share): """Export share's location based on protocol used.""" lcfg = self.configuration arg = { 'host': lcfg.zfssa_data_ip, 'mountpoint': self.mountpoint, 'name': share['id'], } location = '' proto = share['share_proto'] if proto == 'NFS': location = ("%(host)s:%(mountpoint)s/%(name)s" % arg) elif proto == 'CIFS': location = ("\\\\%(host)s\\%(name)s" % arg) else: exception_msg = _('Protocol %s is not supported.') % proto LOG.error(exception_msg) raise exception.InvalidParameterValue(exception_msg) LOG.debug("Export location: %s.", location) return location def create_arg(self, size): size = units.Gi * int(size) arg = { 'quota': size, 'reservation': size, } arg.update(self.share_args) return arg def create_share(self, context, share, share_server=None): """Create a share and export it based on protocol used. The created share inherits properties from its project. """ lcfg = self.configuration arg = self.create_arg(share['size']) arg.update(self.default_args) arg.update({'name': share['id']}) if share['share_proto'] == 'CIFS': arg.update({'sharesmb': 'on'}) LOG.debug("ZFSSAShareDriver.create_share: id=%(name)s, size=%(quota)s", {'name': arg['name'], 'quota': arg['quota']}) self.zfssa.create_share(lcfg.zfssa_pool, lcfg.zfssa_project, arg) return self._export_location(share) def delete_share(self, context, share, share_server=None): """Delete a share. Shares with existing snapshots can't be deleted. """ LOG.debug("ZFSSAShareDriver.delete_share: id=%s", share['id']) lcfg = self.configuration self.zfssa.delete_share(lcfg.zfssa_pool, lcfg.zfssa_project, share['id']) def create_snapshot(self, context, snapshot, share_server=None): """Creates a snapshot of the snapshot['share_id'].""" LOG.debug("ZFSSAShareDriver.create_snapshot: " "id=%(snap)s share=%(share)s", {'snap': snapshot['id'], 'share': snapshot['share_id']}) lcfg = self.configuration self.zfssa.create_snapshot(lcfg.zfssa_pool, lcfg.zfssa_project, snapshot['share_id'], snapshot['id']) def create_share_from_snapshot(self, context, share, snapshot, share_server=None, parent_share=None): """Create a share from a snapshot - clone a snapshot.""" lcfg = self.configuration LOG.debug("ZFSSAShareDriver.create_share_from_snapshot: clone=%s", share['id']) LOG.debug("ZFSSAShareDriver.create_share_from_snapshot: snapshot=%s", snapshot['id']) arg = self.create_arg(share['size']) details = { 'share': share['id'], 'project': lcfg.zfssa_project, } arg.update(details) if share['share_proto'] == 'CIFS': arg.update({'sharesmb': 'on'}) self.zfssa.clone_snapshot(lcfg.zfssa_pool, lcfg.zfssa_project, snapshot, share, arg) return self._export_location(share) def delete_snapshot(self, context, snapshot, share_server=None): """Delete a snapshot. Snapshots with existing clones cannot be deleted. """ LOG.debug("ZFSSAShareDriver.delete_snapshot: id=%s", snapshot['id']) lcfg = self.configuration has_clones = self.zfssa.has_clones(lcfg.zfssa_pool, lcfg.zfssa_project, snapshot['share_id'], snapshot['id']) if has_clones: LOG.error("snapshot %s: has clones", snapshot['id']) raise exception.ShareSnapshotIsBusy(snapshot_name=snapshot['id']) self.zfssa.delete_snapshot(lcfg.zfssa_pool, lcfg.zfssa_project, snapshot['share_id'], snapshot['id']) def manage_existing(self, share, driver_options): """Manage an existing ZFSSA share. This feature requires an option 'zfssa_name', which specifies the name of the share as appeared in ZFSSA. The driver automatically retrieves information from the ZFSSA backend and returns the correct share size and export location. """ if 'zfssa_name' not in driver_options: msg = _('Name of the share in ZFSSA share has to be ' 'specified in option zfssa_name.') LOG.error(msg) raise exception.ShareBackendException(msg=msg) name = driver_options['zfssa_name'] try: details = self._get_share_details(name) except Exception: LOG.error('Cannot manage share %s', name) raise lcfg = self.configuration input_export_loc = share['export_locations'][0]['path'] proto = share['share_proto'] self._verify_share_to_manage(name, details) # Get and verify share size: size_byte = details['quota'] size_gb = int(math.ceil(size_byte / float(units.Gi))) if size_byte % units.Gi != 0: # Round up the size: new_size_byte = size_gb * units.Gi free_space = self.zfssa.get_project_stats(lcfg.zfssa_pool, lcfg.zfssa_project) diff_space = int(new_size_byte - size_byte) if diff_space > free_space: msg = (_('Quota and reservation of share %(name)s need to be ' 'rounded up to %(size)d. But there is not enough ' 'space in the backend.') % {'name': name, 'size': size_gb}) LOG.error(msg) raise exception.ManageInvalidShare(reason=msg) size_byte = new_size_byte # Get and verify share export location, also update share properties. arg = { 'host': lcfg.zfssa_data_ip, 'mountpoint': input_export_loc, 'name': share['id'], } manage_args = self.default_args.copy() manage_args.update(self.share_args) # The ZFSSA share name has to be updated, as Manila generates a new # share id for each share to be managed. manage_args.update({'name': share['id'], 'quota': size_byte, 'reservation': size_byte}) if proto == 'NFS': export_loc = ("%(host)s:%(mountpoint)s/%(name)s" % arg) manage_args.update({'sharenfs': 'sec=sys', 'sharesmb': 'off'}) elif proto == 'CIFS': export_loc = ("\\\\%(host)s\\%(name)s" % arg) manage_args.update({'sharesmb': 'on', 'sharenfs': 'off'}) else: msg = _('Protocol %s is not supported.') % proto LOG.error(msg) raise exception.ManageInvalidShare(reason=msg) self.zfssa.modify_share(lcfg.zfssa_pool, lcfg.zfssa_project, name, manage_args) return {'size': size_gb, 'export_locations': export_loc} def _verify_share_to_manage(self, name, details): lcfg = self.configuration if lcfg.zfssa_manage_policy == 'loose': return if 'custom:manila_managed' not in details: msg = (_("Unknown if the share: %s to be managed is " "already being managed by Manila. Aborting manage " "share. Please add 'manila_managed' custom schema " "property to the share and set its value to False." "Alternatively, set Manila config property " "'zfssa_manage_policy' to 'loose' to remove this " "restriction.") % name) LOG.error(msg) raise exception.ManageInvalidShare(reason=msg) if details['custom:manila_managed'] is True: msg = (_("Share %s is already being managed by Manila.") % name) LOG.error(msg) raise exception.ManageInvalidShare(reason=msg) def unmanage(self, share): """Removes the specified share from Manila management. This task involves only changing the custom:manila_managed property to False. Current accesses to the share will be removed in ZFSSA, as these accesses are removed in Manila. """ name = share['id'] lcfg = self.configuration managed = 'custom:manila_managed' details = self._get_share_details(name) if (managed not in details) or (details[managed] is not True): msg = (_("Share %s is not being managed by the current Manila " "instance.") % name) LOG.error(msg) raise exception.UnmanageInvalidShare(reason=msg) arg = {'custom:manila_managed': False} if share['share_proto'] == 'NFS': arg.update({'sharenfs': 'off'}) elif share['share_proto'] == 'CIFS': arg.update({'sharesmb': 'off'}) else: msg = (_("ZFSSA does not support %s protocol.") % share['share_proto']) LOG.error(msg) raise exception.UnmanageInvalidShare(reason=msg) self.zfssa.modify_share(lcfg.zfssa_pool, lcfg.zfssa_project, name, arg) def _get_share_details(self, name): lcfg = self.configuration details = self.zfssa.get_share(lcfg.zfssa_pool, lcfg.zfssa_project, name) if not details: msg = (_("Share %s doesn't exist in ZFSSA.") % name) LOG.error(msg) raise exception.ShareResourceNotFound(share_id=name) return details def ensure_share(self, context, share, share_server=None): self._get_share_details(share['id']) def shrink_share(self, share, new_size, share_server=None): """Shrink a share to new_size.""" lcfg = self.configuration details = self.zfssa.get_share(lcfg.zfssa_pool, lcfg.zfssa_project, share['id']) used_space = details['space_data'] new_size_byte = int(new_size) * units.Gi if used_space > new_size_byte: LOG.error('%(used).1fGB of share %(id)s is already used. ' 'Cannot shrink to %(newsize)dGB.', {'used': float(used_space) / units.Gi, 'id': share['id'], 'newsize': new_size}) raise exception.ShareShrinkingPossibleDataLoss( share_id=share['id']) arg = self.create_arg(new_size) self.zfssa.modify_share(lcfg.zfssa_pool, lcfg.zfssa_project, share['id'], arg) def extend_share(self, share, new_size, share_server=None): """Extend a share to new_size.""" lcfg = self.configuration free_space = self.zfssa.get_project_stats(lcfg.zfssa_pool, lcfg.zfssa_project) diff_space = int(new_size - share['size']) * units.Gi if diff_space > free_space: msg = (_('There is not enough free space in project %s') % (lcfg.zfssa_project)) LOG.error(msg) raise exception.ShareExtendingError(share_id=share['id'], reason=msg) arg = self.create_arg(new_size) self.zfssa.modify_share(lcfg.zfssa_pool, lcfg.zfssa_project, share['id'], arg) def allow_access(self, context, share, access, share_server=None): """Allows access to an NFS share for the specified IP.""" LOG.debug("ZFSSAShareDriver.allow_access: share=%s", share['id']) lcfg = self.configuration if share['share_proto'] == 'NFS': self.zfssa.allow_access_nfs(lcfg.zfssa_pool, lcfg.zfssa_project, share['id'], access) def deny_access(self, context, share, access, share_server=None): """Deny access to an NFS share for the specified IP.""" LOG.debug("ZFSSAShareDriver.deny_access: share=%s", share['id']) lcfg = self.configuration if share['share_proto'] == 'NFS': self.zfssa.deny_access_nfs(lcfg.zfssa_pool, lcfg.zfssa_project, share['id'], access) elif share['share_proto'] == 'CIFS': return def _update_share_stats(self): """Retrieve stats info from a share.""" backend_name = self.configuration.safe_get('share_backend_name') data = dict( share_backend_name=backend_name or self.__class__.__name__, vendor_name='Oracle', driver_version=self.VERSION, storage_protocol=self.PROTOCOL) lcfg = self.configuration (avail, used) = self.zfssa.get_pool_stats(lcfg.zfssa_pool) if avail: data['free_capacity_gb'] = int(avail) / units.Gi if used: total = int(avail) + int(used) data['total_capacity_gb'] = total / units.Gi else: data['total_capacity_gb'] = 0 else: data['free_capacity_gb'] = 0 data['total_capacity_gb'] = 0 super(ZFSSAShareDriver, self)._update_share_stats(data) def get_network_allocations_number(self): """Returns number of network allocations for creating VIFs.""" return 0 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/drivers_private_data.py0000664000175000017500000001370500000000000022202 0ustar00zuulzuul00000000000000# Copyright 2015 Mirantis inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Module provides possibility for share drivers to store private information related to common Manila models like Share or Snapshot. """ import abc from oslo_config import cfg from oslo_utils import importutils from oslo_utils import uuidutils from manila.db import api as db_api from manila.i18n import _ private_data_opts = [ cfg.StrOpt( 'drivers_private_storage_class', default='manila.share.drivers_private_data.SqlStorageDriver', help='The full class name of the Private Data Driver class to use.'), ] CONF = cfg.CONF class StorageDriver(metaclass=abc.ABCMeta): def __init__(self, context, backend_host): # Backend shouldn't access data stored by another backend self.backend_host = backend_host self.context = context @abc.abstractmethod def get(self, entity_id, key, default): """Backend implementation for DriverPrivateData.get() method. Should return all keys for given 'entity_id' if 'key' is None. Otherwise should return value for provided 'key'. If values for provided 'entity_id' or 'key' not found, should return 'default'. See DriverPrivateData.get() method for more details. """ @abc.abstractmethod def update(self, entity_id, details, delete_existing): """Backend implementation for DriverPrivateData.update() method. Should update details for given 'entity_id' with behaviour defined by 'delete_existing' boolean flag. See DriverPrivateData.update() method for more details. """ @abc.abstractmethod def delete(self, entity_id, key): """Backend implementation for DriverPrivateData.delete() method. Should return delete all keys if 'key' is None. Otherwise should delete value for provided 'key'. See DriverPrivateData.update() method for more details. """ class SqlStorageDriver(StorageDriver): def update(self, entity_id, details, delete_existing): return db_api.driver_private_data_update( self.context, entity_id, details, delete_existing ) def get(self, entity_id, key, default): return db_api.driver_private_data_get( self.context, entity_id, key, default ) def delete(self, entity_id, key): return db_api.driver_private_data_delete( self.context, entity_id, key ) class DriverPrivateData(object): def __init__(self, storage=None, *args, **kwargs): """Init method. :param storage: None or inheritor of StorageDriver abstract class :param config_group: Optional -- Config group used for loading settings :param context: Optional -- Current context :param backend_host: Optional -- Driver host """ config_group_name = kwargs.get('config_group') CONF.register_opts(private_data_opts, group=config_group_name) if storage is not None: self._storage = storage elif 'context' in kwargs and 'backend_host' in kwargs: if config_group_name: conf = getattr(CONF, config_group_name) else: conf = CONF storage_class = conf.drivers_private_storage_class cls = importutils.import_class(storage_class) self._storage = cls(kwargs.get('context'), kwargs.get('backend_host')) else: msg = _("You should provide 'storage' parameter or" " 'context' and 'backend_host' parameters.") raise ValueError(msg) def get(self, entity_id, key=None, default=None): """Get one, list or all key-value pairs. :param entity_id: Model UUID :param key: Key string or list of keys :param default: Default value for case when key(s) not found :returns: string or dict """ self._validate_entity_id(entity_id) return self._storage.get(entity_id, key, default) def update(self, entity_id, details, delete_existing=False): """Update or create specified key-value pairs. :param entity_id: Model UUID :param details: dict with key-value pairs data. Keys and values should be strings. :param delete_existing: boolean flag which determines behaviour for existing key-value pairs: True - remove all existing key-value pairs False (default) - leave as is """ self._validate_entity_id(entity_id) if not isinstance(details, dict): msg = (_("Provided details %s is not valid dict.") % details) raise ValueError(msg) return self._storage.update( entity_id, details, delete_existing) def delete(self, entity_id, key=None): """Delete one, list or all key-value pairs. :param entity_id: Model UUID :param key: Key string or list of keys """ self._validate_entity_id(entity_id) return self._storage.delete(entity_id, key) @staticmethod def _validate_entity_id(entity_id): if not uuidutils.is_uuid_like(entity_id): msg = (_("Provided entity_id %s is not valid UUID.") % entity_id) raise ValueError(msg) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/hook.py0000664000175000017500000001276200000000000016743 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Mirantis, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Module with hook interface for actions performed by share driver. All available hooks are placed in manila/share/hooks dir. Hooks are used by share services and can serve several use cases such as any kind of notification and performing additional backend-specific actions. """ import abc from oslo_config import cfg from oslo_log import log from manila import context as ctxt hook_options = [ cfg.BoolOpt( "enable_pre_hooks", default=False, help="Whether to enable pre hooks or not."), cfg.BoolOpt( "enable_post_hooks", default=False, help="Whether to enable post hooks or not."), cfg.BoolOpt( "enable_periodic_hooks", default=False, help="Whether to enable periodic hooks or not."), cfg.BoolOpt( "suppress_pre_hooks_errors", default=False, help="Whether to suppress pre hook errors (allow driver perform " "actions) or not."), cfg.BoolOpt( "suppress_post_hooks_errors", default=False, help="Whether to suppress post hook errors (allow driver's results " "to pass through) or not."), cfg.FloatOpt( "periodic_hooks_interval", default=300.0, help="Interval in seconds between execution of periodic hooks. " "Used when option 'enable_periodic_hooks' is set to True. " "Default is 300."), ] CONF = cfg.CONF CONF.register_opts(hook_options) LOG = log.getLogger(__name__) class HookBase(metaclass=abc.ABCMeta): def get_config_option(self, key): if self.configuration: return self.configuration.safe_get(key) return CONF.get(key) def __init__(self, configuration, host): self.host = host self.configuration = configuration if self.configuration: self.configuration.append_config_values(hook_options) self.pre_hooks_enabled = self.get_config_option("enable_pre_hooks") self.post_hooks_enabled = self.get_config_option("enable_post_hooks") self.periodic_hooks_enabled = self.get_config_option( "enable_periodic_hooks") self.suppress_pre_hooks_errors = self.get_config_option( "suppress_pre_hooks_errors") self.suppress_post_hooks_errors = self.get_config_option( "suppress_post_hooks_errors") def execute_pre_hook(self, context=None, func_name=None, *args, **kwargs): """Hook called before driver's action.""" if not self.pre_hooks_enabled: return LOG.debug("Running 'pre hook'.") context = context or ctxt.get_admin_context() try: pre_data = self._execute_pre_hook( context=context, func_name=func_name, *args, **kwargs) except Exception as e: if self.suppress_pre_hooks_errors: LOG.warning("\nSuppressed exception in pre hook. %s\n", e) pre_data = e else: raise return pre_data def execute_post_hook(self, context=None, func_name=None, pre_hook_data=None, driver_action_results=None, *args, **kwargs): """Hook called after driver's action.""" if not self.post_hooks_enabled: return LOG.debug("Running 'post hook'.") context = context or ctxt.get_admin_context() try: post_data = self._execute_post_hook( context=context, func_name=func_name, pre_hook_data=pre_hook_data, driver_action_results=driver_action_results, *args, **kwargs) except Exception as e: if self.suppress_post_hooks_errors: LOG.warning( "\nSuppressed exception in post hook. %s\n", e) post_data = e else: raise return post_data def execute_periodic_hook(self, context, periodic_hook_data, *args, **kwargs): """Hook called on periodic basis.""" if not self.periodic_hooks_enabled: return LOG.debug("Running 'periodic hook'.") context = context or ctxt.get_admin_context() return self._execute_periodic_hook( context, periodic_hook_data, *args, **kwargs) @abc.abstractmethod def _execute_pre_hook(self, context, func_name, *args, **kwargs): """Redefine this method for pre hook action.""" @abc.abstractmethod def _execute_post_hook(self, context, func_name, pre_hook_data, driver_action_results, *args, **kwargs): """Redefine this method for post hook action.""" @abc.abstractmethod def _execute_periodic_hook(self, context, periodic_hook_data, *args, **kwargs): """Redefine this method for periodic hook action.""" ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315601.9416714 manila-21.0.0/manila/share/hooks/0000775000175000017500000000000000000000000016544 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/hooks/__init__.py0000664000175000017500000000000000000000000020643 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/manager.py0000664000175000017500000120200500000000000017405 0ustar00zuulzuul00000000000000# Copyright (c) 2014 NetApp Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """NAS share manager managers creating shares and access rights. **Related Flags** :share_driver: Used by :class:`ShareManager`. """ import copy import datetime import functools import hashlib import json from operator import xor from keystoneauth1 import loading as ks_loading from oslo_config import cfg from oslo_log import log from oslo_serialization import jsonutils from oslo_service import periodic_task from oslo_utils import excutils from oslo_utils import importutils from oslo_utils import timeutils from manila.common import constants from manila import context from manila import coordination from manila.data import rpcapi as data_rpcapi from manila import exception from manila.i18n import _ from manila.keymgr import barbican as barbican_api from manila import manager from manila.message import api as message_api from manila.message import message_field from manila import quota from manila.share import access from manila.share import api from manila.share import configuration from manila.share import drivers_private_data from manila.share import migration from manila.share import rpcapi as share_rpcapi from manila.share import share_types from manila.share import snapshot_access from manila.share import utils as share_utils from manila.transfer import api as transfer_api from manila import utils profiler = importutils.try_import('osprofiler.profiler') LOG = log.getLogger(__name__) share_manager_opts = [ cfg.StrOpt('share_driver', default='manila.share.drivers.generic.GenericShareDriver', help='Driver to use for share creation.'), cfg.ListOpt('hook_drivers', default=[], help='Driver(s) to perform some additional actions before and ' 'after share driver actions and on a periodic basis. ' 'Default is [].'), cfg.BoolOpt('delete_share_server_with_last_share', default=False, help='Whether share servers will ' 'be deleted on deletion of the last share.'), cfg.BoolOpt('unmanage_remove_access_rules', default=False, help='If set to True, then manila will deny access and remove ' 'all access rules on share unmanage.' 'If set to False - nothing will be changed.'), cfg.BoolOpt('automatic_share_server_cleanup', default=True, help='If set to True, then Manila will delete all share ' 'servers which were unused more than specified time .' 'If set to False - automatic deletion of share servers ' 'will be disabled.'), cfg.IntOpt('unused_share_server_cleanup_interval', default=10, help='Unallocated share servers reclamation time interval ' '(minutes). Minimum value is 10 minutes, maximum is 720 ' 'minutes. The reclamation function is run every ' '10 minutes and delete share servers which were unused ' 'more than unused_share_server_cleanup_interval option ' 'defines. This value reflects the shortest time Manila ' 'will wait for a share server to go unutilized before ' 'deleting it.', min=10, max=720), cfg.IntOpt('replica_state_update_interval', default=300, help='This value, specified in seconds, determines how often ' 'the share manager will poll for the health ' '(replica_state) of each replica instance.'), cfg.IntOpt('migration_driver_continue_update_interval', default=60, help='This value, specified in seconds, determines how often ' 'the share manager will poll the driver to perform the ' 'next step of migration in the storage backend, for a ' 'migrating share.'), cfg.IntOpt('server_migration_driver_continue_update_interval', default=900, help='This value, specified in seconds, determines how often ' 'the share manager will poll the driver to perform the ' 'next step of migration in the storage backend, for a ' 'migrating share server.'), cfg.BoolOpt('server_migration_extend_neutron_network', default=False, help='If set to True, neutron network are extended to ' 'destination host during share server migration. This ' 'option should only be enabled if using ' 'NeutronNetworkPlugin or its derivatives and when ' 'multiple bindings of Manila ports are supported by ' 'Neutron ML2 plugin.'), cfg.IntOpt('share_usage_size_update_interval', default=300, help='This value, specified in seconds, determines how often ' 'the share manager will poll the driver to update the ' 'share usage size in the storage backend, for shares in ' 'that backend.'), cfg.BoolOpt('enable_gathering_share_usage_size', default=False, help='If set to True, share usage size will be polled for in ' 'the interval specified with ' '"share_usage_size_update_interval". Usage data can be ' 'consumed by telemetry integration. If telemetry is not ' 'configured, this option must be set to False. ' 'If set to False - gathering share usage size will be' ' disabled.'), cfg.BoolOpt('share_service_inithost_offload', default=False, help='Offload pending share ensure during ' 'share service startup'), cfg.IntOpt('check_for_expired_shares_in_recycle_bin_interval', default=3600, help='This value, specified in seconds, determines how often ' 'the share manager will check for expired shares and ' 'delete them from the Recycle bin.'), cfg.IntOpt('check_for_expired_transfers', default=300, help='This value, specified in seconds, determines how often ' 'the share manager will check for expired transfers and ' 'destroy them and roll back share state.'), cfg.IntOpt('driver_backup_continue_update_interval', default=60, help='This value, specified in seconds, determines how often ' 'the share manager will poll to perform the next steps ' 'of backup such as fetch the progress of backup.'), cfg.IntOpt('driver_restore_continue_update_interval', default=60, help='This value, specified in seconds, determines how often ' 'the share manager will poll to perform the next steps ' 'of restore such as fetch the progress of restore.'), cfg.IntOpt('periodic_deferred_delete_interval', default=300, help='This value, specified in seconds, determines how often ' 'the share manager will try to delete the share and share ' 'snapshots in backend driver.'), ] ks_opts = [ cfg.StrOpt('auth_url', help='Keystone authentication URL.'), ] CONF = cfg.CONF CONF.register_opts(share_manager_opts) CONF.import_opt('periodic_hooks_interval', 'manila.share.hook') CONF.import_opt('periodic_interval', 'manila.service') KEYSTONE_AUTHTOKEN_GROUP = 'keystone_authtoken' CONF.register_opts(ks_opts, KEYSTONE_AUTHTOKEN_GROUP) ks_loading.register_auth_conf_options(CONF, KEYSTONE_AUTHTOKEN_GROUP) keystone_url = getattr(CONF.keystone_authtoken, 'auth_url') # Drivers that need to change module paths or class names can add their # old/new path here to maintain backward compatibility. MAPPING = { 'manila.share.drivers.netapp.cluster_mode.NetAppClusteredShareDriver': 'manila.share.drivers.netapp.common.NetAppDriver', 'manila.share.drivers.hp.hp_3par_driver.HP3ParShareDriver': 'manila.share.drivers.hpe.hpe_3par_driver.HPE3ParShareDriver', 'manila.share.drivers.hitachi.hds_hnas.HDSHNASDriver': 'manila.share.drivers.hitachi.hnas.driver.HitachiHNASDriver', 'manila.share.drivers.glusterfs_native.GlusterfsNativeShareDriver': 'manila.share.drivers.glusterfs.glusterfs_native.' 'GlusterfsNativeShareDriver', 'manila.share.drivers.emc.driver.EMCShareDriver': 'manila.share.drivers.dell_emc.driver.EMCShareDriver', 'manila.share.drivers.cephfs.cephfs_native.CephFSNativeDriver': 'manila.share.drivers.cephfs.driver.CephFSDriver', } QUOTAS = quota.QUOTAS def locked_share_replica_operation(operation): """Lock decorator for share replica operations. Takes a named lock prior to executing the operation. The lock is named with the id of the share to which the replica belongs. Intended use: If a replica operation uses this decorator, it will block actions on all share replicas of the share until the named lock is free. This is used to protect concurrent operations on replicas of the same share e.g. promote ReplicaA while deleting ReplicaB, both belonging to the same share. """ def wrapped(*args, **kwargs): share_id = kwargs.get('share_id') @coordination.synchronized( 'locked-share-replica-operation-for-share-%s' % share_id) def locked_replica_operation(*_args, **_kwargs): return operation(*_args, **_kwargs) return locked_replica_operation(*args, **kwargs) return wrapped def locked_share_network_operation(operation): """Lock decorator for share network operations. Takes a named lock prior to executing the operation. The lock is named with the id of the share network. """ def wrapped(*args, **kwargs): share_network_id = kwargs.get('share_network_id') @coordination.synchronized( 'locked-share-network-operation-%s' % share_network_id) def locked_network_operation(*_args, **_kwargs): return operation(*_args, **_kwargs) return locked_network_operation(*args, **kwargs) return wrapped def add_hooks(f): """Hook decorator to perform action before and after a share method call The hook decorator can perform actions before some share driver methods calls and after a call with results of driver call and preceding hook call. """ @functools.wraps(f) def wrapped(self, *args, **kwargs): if not self.hooks: return f(self, *args, **kwargs) pre_hook_results = [] for hook in self.hooks: pre_hook_results.append( hook.execute_pre_hook( func_name=f.__name__, *args, **kwargs)) wrapped_func_results = f(self, *args, **kwargs) for i, hook in enumerate(self.hooks): hook.execute_post_hook( func_name=f.__name__, driver_action_results=wrapped_func_results, pre_hook_data=pre_hook_results[i], *args, **kwargs) return wrapped_func_results return wrapped class ShareManager(manager.SchedulerDependentManager): """Manages NAS storages.""" RPC_API_VERSION = '1.30' def __init__(self, share_driver=None, service_name=None, *args, **kwargs): """Load the driver from args, or from flags.""" self.configuration = configuration.Configuration( share_manager_opts, config_group=service_name) super(ShareManager, self).__init__(service_name='share', *args, **kwargs) if not share_driver: share_driver = self.configuration.share_driver if share_driver in MAPPING: msg_args = {'old': share_driver, 'new': MAPPING[share_driver]} LOG.warning("Driver path %(old)s is deprecated, update your " "configuration to the new path %(new)s", msg_args) share_driver = MAPPING[share_driver] ctxt = context.get_admin_context() private_storage = drivers_private_data.DriverPrivateData( context=ctxt, backend_host=self.host, config_group=self.configuration.config_group ) self.driver = importutils.import_object( share_driver, private_storage=private_storage, configuration=self.configuration, ) backend_availability_zone = self.driver.configuration.safe_get( 'backend_availability_zone') self.availability_zone = ( backend_availability_zone or CONF.storage_availability_zone ) self.access_helper = access.ShareInstanceAccess(self.db, self.driver) self.snapshot_access_helper = ( snapshot_access.ShareSnapshotInstanceAccess(self.db, self.driver)) self.migration_wait_access_rules_timeout = ( CONF.migration_wait_access_rules_timeout) self.message_api = message_api.API() self.share_api = api.API() self.transfer_api = transfer_api.API() if CONF.profiler.enabled and profiler is not None: self.driver = profiler.trace_cls("driver")(self.driver) self.hooks = [] self._init_hook_drivers() self.service_id = None def _init_hook_drivers(self): # Try to initialize hook driver(s). hook_drivers = self.configuration.safe_get("hook_drivers") for hook_driver in hook_drivers: self.hooks.append( importutils.import_object( hook_driver, configuration=self.configuration, host=self.host, ) ) def _ensure_share_instance_has_pool(self, ctxt, share_instance): pool = share_utils.extract_host(share_instance['host'], 'pool') if pool is None: # No pool name encoded in host, so this is a legacy # share created before pool is introduced, ask # driver to provide pool info if it has such # knowledge and update the DB. try: pool = self.driver.get_pool(share_instance) except Exception: LOG.exception("Failed to fetch pool name for share: " "%(share)s.", {'share': share_instance['id']}) return if pool: new_host = share_utils.append_host( share_instance['host'], pool) self.db.share_instance_update( ctxt, share_instance['id'], {'host': new_host}) return pool @add_hooks def init_host(self, service_id=None): """Initialization for a standalone service.""" self.service_id = service_id ctxt = context.get_admin_context() driver_host_pair = "{}@{}".format( self.driver.__class__.__name__, self.host) # we want to retry to setup the driver. In case of a multi-backend # scenario, working backends are usable and the non-working ones (where # do_setup() or check_for_setup_error() fail) retry. @utils.retry(interval=2, backoff_rate=2, infinite=True, backoff_sleep_max=600) def _driver_setup(): self.driver.initialized = False LOG.debug("Start initialization of driver: '%s'", driver_host_pair) try: self.driver.do_setup(ctxt) self.driver.check_for_setup_error() except Exception: LOG.exception("Error encountered during initialization of " "driver %s", driver_host_pair) raise else: self.driver.initialized = True _driver_setup() if (self.driver.driver_handles_share_servers and hasattr(self.driver, 'service_instance_manager')): (self.driver.service_instance_manager.network_helper. setup_connectivity_with_service_instances()) self.ensure_driver_resources(ctxt) self.publish_service_capabilities(ctxt) LOG.info("Finished initialization of driver: '%(driver)s" "@%(host)s'", {"driver": self.driver.__class__.__name__, "host": self.host}) def is_service_ready(self): """Return if Manager is ready to accept requests. This is to inform Service class that in case of manila driver initialization failure the manager is actually down and not ready to accept any requests. """ return self.driver.initialized def ensure_driver_resources(self, ctxt, skip_backend_info_check=False): update_instances_status = CONF.update_shares_status_on_ensure old_backend_info = self.db.backend_info_get(ctxt, self.host) old_backend_info_hash = (old_backend_info.get('info_hash') if old_backend_info is not None else None) new_backend_info = None new_backend_info_hash = None backend_info_implemented = True update_share_instances = [] if not skip_backend_info_check: try: new_backend_info = self.driver.get_backend_info(ctxt) except Exception as e: if not isinstance(e, NotImplementedError): LOG.exception( "The backend %(host)s could not get backend info.", {'host': self.host}) raise else: backend_info_implemented = False LOG.debug( ("The backend %(host)s does not support get backend" " info method."), {'host': self.host}) if new_backend_info: new_backend_info_hash = hashlib.sha1( str(sorted(new_backend_info.items())).encode( 'utf-8')).hexdigest() if ((old_backend_info_hash == new_backend_info_hash and backend_info_implemented) and not skip_backend_info_check): LOG.debug( ("Ensure shares is being skipped because the %(host)s's " "old backend info is the same as its new backend info."), {'host': self.host}) return share_instances = self.db.share_instance_get_all_by_host( ctxt, self.host) LOG.debug("Re-exporting %s shares", len(share_instances)) for share_instance in share_instances: share_ref = self.db.share_get(ctxt, share_instance['share_id']) if share_ref.is_busy: LOG.info( "Share instance %(id)s: skipping export, " "because it is busy with an active task: %(task)s.", {'id': share_instance['id'], 'task': share_ref['task_state']}, ) continue # If the share's status is 'ensuring', we must allow re-doing the # ensuring operation otherwise it will be stuck if (share_instance['status'] not in [constants.STATUS_AVAILABLE, constants.STATUS_ENSURING]): LOG.info( "Share instance %(id)s: skipping export, " "because it has '%(status)s' status.", {'id': share_instance['id'], 'status': share_instance['status']}, ) continue self._ensure_share_instance_has_pool(ctxt, share_instance) share_instance = self.db.share_instance_get( ctxt, share_instance['id'], with_share_data=True) share_instance_dict = self._get_share_instance_dict( ctxt, share_instance) update_share_instances.append(share_instance_dict) do_service_status_update = False if update_share_instances: # No reason to update the shares status if nothing will be done. do_service_status_update = True service = self.db.service_get_by_args( ctxt, self.host, 'manila-share') self.db.service_update(ctxt, service['id'], {'ensuring': True}) if update_instances_status: for instance in update_share_instances: self.db.share_instance_update( ctxt, instance['id'], {'status': constants.STATUS_ENSURING} ) try: update_share_instances = self.driver.ensure_shares( ctxt, update_share_instances) or {} except Exception as e: if not isinstance(e, NotImplementedError): LOG.exception("Caught exception trying ensure " "share instances.") else: for share_instance in update_share_instances: if CONF.share_service_inithost_offload: self._add_to_threadpool(self._ensure_share, ctxt, share_instance) else: self._ensure_share(ctxt, share_instance) if new_backend_info: self.db.backend_info_update( ctxt, self.host, new_backend_info_hash) shares_with_metadata_already_updated = set() for share_instance in share_instances: if share_instance['id'] not in update_share_instances: continue share_instance_update_dict = ( update_share_instances[share_instance['id']] ) backend_provided_status = share_instance_update_dict.get('status') if backend_provided_status: self.db.share_instance_update( ctxt, share_instance['id'], {'status': backend_provided_status, 'host': share_instance['host']} ) metadata_updates = share_instance_update_dict.get('metadata') if metadata_updates: share_id = share_instance['share_id'] # NOTE(carloss): Multiple instances might exist, and in such # cases, a single share metadata update would be enough. if share_id not in shares_with_metadata_already_updated: self.db.share_metadata_update( ctxt, share_id, metadata_updates, False) shares_with_metadata_already_updated.add(share_id) update_export_locations = ( share_instance_update_dict.get('export_locations') ) if update_export_locations: self.db.export_locations_update( ctxt, share_instance['id'], update_export_locations) share_server = self._get_share_server(ctxt, share_instance) driver_has_to_reapply_access_rules = ( share_instance_update_dict.get('reapply_access_rules') is True ) share_instance_has_pending_rules = ( share_instance['access_rules_status'] != constants.STATUS_ACTIVE ) if (driver_has_to_reapply_access_rules or share_instance_has_pending_rules): try: # Cast any existing 'applying' rules to 'new' self.access_helper.reset_rules_to_queueing_states( ctxt, share_instance['id'], reset_active=driver_has_to_reapply_access_rules) self.access_helper.update_access_rules( ctxt, share_instance['id'], share_server=share_server) except Exception: LOG.exception( ("Unexpected error occurred while updating access " "rules for share instance %(s_id)s."), {'s_id': share_instance['id']}, ) snapshot_instances = ( self.db.share_snapshot_instance_get_all_with_filters( ctxt, {'share_instance_ids': share_instance['id']}, with_share_data=True)) for snap_instance in snapshot_instances: rules = ( self.db. share_snapshot_access_get_all_for_snapshot_instance( ctxt, snap_instance['id'])) # NOTE(ganso): We don't invoke update_access for snapshots if # we don't have invalid rules or pending updates if any(r['state'] in (constants.ACCESS_STATE_DENYING, constants.ACCESS_STATE_QUEUED_TO_DENY, constants.ACCESS_STATE_APPLYING, constants.ACCESS_STATE_QUEUED_TO_APPLY) for r in rules): try: self.snapshot_access_helper.update_access_rules( ctxt, snap_instance['id'], share_server) except Exception: LOG.exception( "Unexpected error occurred while updating " "access rules for snapshot instance %s.", snap_instance['id']) if not backend_provided_status and update_instances_status: self.db.share_instance_update( ctxt, share_instance['id'], {'status': constants.STATUS_AVAILABLE} ) if do_service_status_update: self.db.service_update(ctxt, service['id'], {'ensuring': False}) def _ensure_share(self, ctxt, share_instance): export_locations = None try: export_locations = self.driver.ensure_share( ctxt, share_instance, share_server=share_instance['share_server']) except Exception: LOG.exception("Caught exception trying ensure " "share '%(s_id)s'.", {'s_id': share_instance['id']}) if export_locations: self.db.export_locations_update( ctxt, share_instance['id'], export_locations) def _check_share_server_backend_limits( self, context, available_share_servers, share_instance=None): max_shares_limit = self.driver.max_shares_per_share_server max_server_size = self.driver.max_share_server_size if max_server_size == max_shares_limit == -1: return available_share_servers for ss in available_share_servers[:]: share_instances = self.db.share_instance_get_all_by_share_server( context, ss['id'], with_share_data=True) if not share_instances: continue share_instance_ids = [si['id'] for si in share_instances] share_snapshot_instances = ( self.db.share_snapshot_instance_get_all_with_filters( context, {"share_instance_ids": share_instance_ids}, with_share_data=True)) server_instances_size_sum = 0 num_instances = 0 server_instances_size_sum += sum( instance['size'] for instance in share_instances) server_instances_size_sum += sum( instance['size'] for instance in share_snapshot_instances) num_instances += len(share_instances) # NOTE(carloss): If a share instance was not provided, means that # a share group is being requested and there aren't shares to # be added to to the sum yet. if share_instance: server_instances_size_sum += share_instance['size'] num_instances += 1 achieved_gigabytes_limit = ( max_server_size != -1 and ( server_instances_size_sum > max_server_size)) achieved_instances_limit = num_instances > max_shares_limit > -1 providing_server_for_share_migration = ( share_instance and share_instance['status'] == constants.STATUS_MIGRATING_TO) src_server_id_equals_current_iteration = False if providing_server_for_share_migration: share = self.db.share_get(context, share_instance['share_id']) src_instance_id, dest_instance_id = ( self.share_api.get_migrating_instances(share)) src_instance = self.db.share_instance_get( context, src_instance_id) src_server_id_equals_current_iteration = ( src_instance['share_server_id'] == ss['id']) if (not src_server_id_equals_current_iteration and ( achieved_gigabytes_limit or achieved_instances_limit)): available_share_servers.remove(ss) return available_share_servers def _provide_share_server_for_share(self, context, share_network_id, share_instance, snapshot=None, share_group=None, create_on_backend=True): """Gets or creates share_server and updates share with its id. Active share_server can be deleted if there are no dependent shares on it. So we need avoid possibility to delete share_server in time gap between reaching active state for share_server and setting up share_server_id for share. It is possible, for example, with first share creation, which starts share_server creation. For this purpose used shared lock between this method and the one with deletion of share_server. :param context: Current context :param share_network_id: Share network where existing share server should be found or created. If share_network_id is None method use share_network_id from provided snapshot. :param share_instance: Share Instance model :param snapshot: Optional -- Snapshot model :param create_on_backend: Boolean. If True, driver will be asked to create the share server if no share server is available. :returns: dict, dict -- first value is share_server, that has been chosen for share schedule. Second value is share updated with share_server_id. """ if not (share_network_id or snapshot): msg = _("'share_network_id' parameter or 'snapshot'" " should be provided. ") raise ValueError(msg) def error(msg, *args): LOG.error(msg, *args) self.db.share_instance_update(context, share_instance['id'], {'status': constants.STATUS_ERROR}) parent_share_server = None parent_share_same_dest = False if snapshot: parent_share_server_id = ( snapshot['share']['instance']['share_server_id']) try: parent_share_server = self.db.share_server_get( context, parent_share_server_id) except exception.ShareServerNotFound: with excutils.save_and_reraise_exception(): error("Parent share server %s does not exist.", parent_share_server_id) if parent_share_server['status'] != constants.STATUS_ACTIVE: error_params = { 'id': parent_share_server_id, 'status': parent_share_server['status'], } msg = _("Parent share server %(id)s has invalid status " "'%(status)s'.") error(msg, error_params) raise exception.InvalidShareServer(reason=msg % error_params) parent_share_same_dest = (snapshot['share']['instance']['host'] == share_instance['host']) share_network_subnets = None if share_network_id: share_network_subnets = ( self.db.share_network_subnets_get_all_by_availability_zone_id( context, share_network_id, availability_zone_id=share_instance.get( 'availability_zone_id'))) if not share_network_subnets: raise exception.ShareNetworkSubnetNotFound( share_network_subnet_id=None) elif parent_share_server: share_network_subnets = ( parent_share_server['share_network_subnets']) # NOTE(felipe_rodrigues): it can retrieve the available share # servers using one single subnet_id from the availability zone # subnets, because if the share server has one, it will have # all subnets on that availability zone. share_network_subnet_id = share_network_subnets[0]['id'] def get_available_share_servers(): if parent_share_server and parent_share_same_dest: return [parent_share_server] else: return ( self.db .share_server_get_all_by_host_and_share_subnet_valid( context, self.host, share_network_subnet_id) ) @utils.synchronized("share_manager_%s" % share_network_subnet_id, external=True) def _wrapped_provide_share_server_for_share(): try: available_share_servers = get_available_share_servers() except exception.ShareServerNotFound: available_share_servers = None # creating from snapshot in the same host must reuse the server, # so it ignores the server limits. if available_share_servers and not parent_share_same_dest: available_share_servers = ( self._check_share_server_backend_limits( context, available_share_servers, share_instance=share_instance)) encryption_key_ref = share_instance.get('encryption_key_ref') compatible_share_server = None if available_share_servers: try: compatible_share_server = ( self.driver.choose_share_server_compatible_with_share( context, available_share_servers, share_instance, snapshot=snapshot.instance if snapshot else None, share_group=share_group, encryption_key_ref=encryption_key_ref, ) ) except Exception as e: with excutils.save_and_reraise_exception(): error("Cannot choose compatible share server: %s", e) share_server_should_be_encrypted = ( (encryption_key_ref and self.driver.encryption_support) and ("share_server" in self.driver.encryption_support)) app_cred = None if not compatible_share_server: if share_server_should_be_encrypted: # Create secret_ref ACL for Barbican User try: barbican_api.create_secret_access(context, encryption_key_ref) LOG.debug('Created Barbican ACL for encryption key ' 'reference %s.', encryption_key_ref) except Exception as e: self._delete_encryption_keys_quota(context) with excutils.save_and_reraise_exception(): error("Cannot create ACL for Barbican user %s", e) # Create application credentials for barbican user try: app_cred = ( barbican_api.create_application_credentials( context, encryption_key_ref).to_dict()) LOG.debug("Created app cred id %s", app_cred['id']) except Exception as e: self._delete_encryption_keys_quota(context) barbican_api.delete_secret_access(context, encryption_key_ref) with excutils.save_and_reraise_exception(): error("Cannot create application credential: " "%s", e) compatible_share_server = self.db.share_server_create( context, { 'host': self.host, 'share_network_subnets': share_network_subnets, 'status': constants.STATUS_CREATING, 'security_service_update_support': ( self.driver.security_service_update_support), 'network_allocation_update_support': ( self.driver.network_allocation_update_support), 'share_replicas_migration_support': ( self.driver.share_replicas_migration_support), 'encryption_key_ref': ( encryption_key_ref if share_server_should_be_encrypted else None), 'application_credential_id': ( app_cred['id'] if app_cred else None), } ) else: if share_server_should_be_encrypted: # Get application credentials for barbican user try: app_cred = barbican_api.get_application_credentials( context, compatible_share_server.get( 'application_credential_id')).to_dict() LOG.debug('Got app cred id %s', app_cred['id']) except Exception as e: with excutils.save_and_reraise_exception(): error("Cannot get application credential: %s", e) msg = ("Using share_server %(share_server)s for share instance" " %(share_instance_id)s") LOG.debug(msg, { 'share_server': compatible_share_server['id'], 'share_instance_id': share_instance['id'] }) share_instance_ref = self.db.share_instance_update( context, share_instance['id'], {'share_server_id': compatible_share_server['id']}, with_share_data=True ) if create_on_backend: metadata = self._build_server_metadata( context, share_instance['host'], share_instance['share_type_id'], app_cred=app_cred, encryption_key_ref=encryption_key_ref ) compatible_share_server = ( self._create_share_server_in_backend( context, compatible_share_server, metadata)) return compatible_share_server, share_instance_ref return _wrapped_provide_share_server_for_share() def _build_server_metadata(self, context, host, share_type_id, app_cred=None, encryption_key_ref=None): encryption_key_href = None if encryption_key_ref: encryption_key_href = barbican_api.get_secret_href( context, encryption_key_ref) LOG.debug("Generated encryption_key_href %s for backend share " "server.", encryption_key_href) metadata = { 'request_host': host, 'share_type_id': share_type_id, 'encryption_key_ref': encryption_key_href, 'keystone_url': keystone_url, } if app_cred: metadata.update({ 'application_credential_id': app_cred.get('id'), 'application_credential_secret': encryption_key_ref, }) return metadata def _provide_share_server_for_migration(self, context, source_share_server, new_share_network_id, availability_zone_id, destination_host, create_on_backend=True, server_metadata=None): """Gets or creates share_server for a migration procedure. Active share_server can be deleted if there are no dependent shares on it. So we need avoid possibility to delete share_server in time gap between reaching active state for share_server and setting up share_server_id for share. It is possible, for example, with first share creation, which starts share_server creation. For this purpose used shared lock between this method and the one with deletion of share_server. :param context: Current context :param source_share_server: Share server model that will be migrated. :param new_share_network_id: Share network where existing share server should be found or created. :param availability_zone_id: Id of the availability zone where the new share server will be placed. :param destination_host: The destination host where the new share server will be created or retrieved. :param create_on_backend: Boolean. If True, driver will be asked to create the share server if no share server is available. :param server_metadata: dict. Holds some important information that can help drivers whether to create a new share server or not. :returns: Share server that has been chosen for share server migration. """ share_network_subnets = ( self.db.share_network_subnets_get_all_by_availability_zone_id( context, new_share_network_id, availability_zone_id=availability_zone_id)) if not share_network_subnets: raise exception.ShareNetworkSubnetNotFound( share_network_subnet_id=None) server_metadata = {} if not server_metadata else server_metadata @utils.synchronized( "share_manager_%s" % share_network_subnets[0]['id'], external=True) def _wrapped_provide_share_server_for_migration(): destination_share_server = self.db.share_server_create( context, { 'host': self.host, 'share_network_subnets': share_network_subnets, 'status': constants.STATUS_CREATING, 'security_service_update_support': ( self.driver.security_service_update_support), 'network_allocation_update_support': ( self.driver.network_allocation_update_support), 'share_replicas_migration_support': ( self.driver.share_replicas_migration_support), } ) msg = ("Using share_server %(share_server)s as destination for " "migration.") LOG.debug(msg, { 'share_server': destination_share_server['id'], }) if create_on_backend: # NOTE(carloss): adding some information about the request, so # backends that support share server migration and need to know # if the request share server is from a share server migration # request can use this metadata to take actions. server_metadata['migration_destination'] = True server_metadata['request_host'] = destination_host server_metadata['source_share_server'] = ( source_share_server) destination_share_server = ( self._create_share_server_in_backend( context, destination_share_server, metadata=server_metadata)) return destination_share_server return _wrapped_provide_share_server_for_migration() def _create_share_server_in_backend(self, context, share_server, metadata): """Perform setup_server on backend :param metadata: A dictionary, to be passed to driver's setup_server() """ if share_server['status'] == constants.STATUS_CREATING: # Create share server on backend with data from db. share_server = self._setup_server(context, share_server, metadata) LOG.info("Share server created successfully.") else: LOG.info("Using preexisting share server: " "'%(share_server_id)s'", {'share_server_id': share_server['id']}) return share_server def create_share_server( self, context, share_server_id, share_instance_id): """Invoked to create a share server in this backend. This method is invoked to create the share server defined in the model obtained by the supplied id. :param context: The 'context.RequestContext' object for the request. :param share_server_id: The id of the server to be created. :param share_instance_id: The id of the share instance """ share_server = self.db.share_server_get(context, share_server_id) share = self.db.share_instance_get( context, share_instance_id, with_share_data=True) metadata = self._build_server_metadata(context, share['host'], share['share_type_id']) self._create_share_server_in_backend(context, share_server, metadata) def provide_share_server(self, context, share_instance_id, share_network_id, snapshot_id=None): """Invoked to provide a compatible share server. This method is invoked to find a compatible share server among the existing ones or create a share server database instance with the share server properties that will be used to create the share server later. :param context: The 'context.RequestContext' object for the request. :param share_instance_id: The id of the share instance whose model attributes will be used to provide the share server. :param share_network_id: The id of the share network the share server to be provided has to be related to. :param snapshot_id: The id of the snapshot to be used to obtain the share server if applicable. :return: The id of the share server that is being provided. """ share_instance = self.db.share_instance_get(context, share_instance_id, with_share_data=True) snapshot_ref = None if snapshot_id: snapshot_ref = self.db.share_snapshot_get(context, snapshot_id) share_group_ref = None if share_instance.get('share_group_id'): share_group_ref = self.db.share_group_get( context, share_instance['share_group_id']) share_server, share_instance = self._provide_share_server_for_share( context, share_network_id, share_instance, snapshot_ref, share_group_ref, create_on_backend=False) return share_server['id'] def _provide_share_server_for_share_group(self, context, share_network_id, share_network_subnets, share_group_ref, share_group_snapshot=None): """Gets or creates share_server and updates share group with its id. Active share_server can be deleted if there are no shares or share groups dependent on it. So we need avoid possibility to delete share_server in time gap between reaching active state for share_server and setting up share_server_id for share group. It is possible, for example, with first share group creation, which starts share_server creation. For this purpose used shared lock between this method and the one with deletion of share_server. :param context: Current context :param share_network_id: Share network where existing share server should be found or created. :param share_network_subnets: Share network subnets where existing share server should be found or created. :param share_group_ref: Share Group model :param share_group_snapshot: Optional -- ShareGroupSnapshot model. If supplied, driver will use it to choose the appropriate share server. :returns: dict, dict -- first value is share_server, that has been chosen for share group schedule. Second value is share group updated with share_server_id. """ if not share_network_id: msg = _("'share_network_id' parameter should be provided. ") raise exception.InvalidInput(reason=msg) def error(msg, *args): LOG.error(msg, *args) self.db.share_group_update( context, share_group_ref['id'], {'status': constants.STATUS_ERROR}) @utils.synchronized("share_manager_%s" % share_network_id, external=True) def _wrapped_provide_share_server_for_share_group(): # NOTE(felipe_rodrigues): it can retrieve the available share # servers using one single subnet_id from the availability zone # subnets, because if the share server has one, it will have # all subnets on that availability zone. share_network_subnet_id = share_network_subnets[0]['id'] try: available_share_servers = ( self.db .share_server_get_all_by_host_and_share_subnet_valid( context, self.host, share_network_subnet_id)) except exception.ShareServerNotFound: available_share_servers = None compatible_share_server = None if available_share_servers: available_share_servers = ( self._check_share_server_backend_limits( context, available_share_servers)) choose_share_server = ( self.driver.choose_share_server_compatible_with_share_group) if available_share_servers: try: compatible_share_server = choose_share_server( context, available_share_servers, share_group_ref, share_group_snapshot=share_group_snapshot, ) except Exception as e: with excutils.save_and_reraise_exception(): error("Cannot choose compatible share-server: %s", e) if not compatible_share_server: compatible_share_server = self.db.share_server_create( context, { 'host': self.host, 'share_network_subnets': share_network_subnets, 'status': constants.STATUS_CREATING, 'security_service_update_support': ( self.driver.security_service_update_support), 'network_allocation_update_support': ( self.driver.network_allocation_update_support), 'share_replicas_migration_support': ( self.driver.share_replicas_migration_support), } ) msg = ("Using share_server %(share_server)s for share " "group %(group_id)s") LOG.debug(msg, { 'share_server': compatible_share_server['id'], 'group_id': share_group_ref['id'] }) updated_share_group = self.db.share_group_update( context, share_group_ref['id'], {'share_server_id': compatible_share_server['id']}, ) if compatible_share_server['status'] == constants.STATUS_CREATING: # Create share server on backend with data from db. metadata = self._build_server_metadata( context, share_group_ref['host'], share_group_ref['share_types'][0]['share_type_id']) compatible_share_server = self._setup_server( context, compatible_share_server, metadata) LOG.info("Share server created successfully.") else: LOG.info("Used preexisting share server " "'%(share_server_id)s'", {'share_server_id': compatible_share_server['id']}) return compatible_share_server, updated_share_group return _wrapped_provide_share_server_for_share_group() def _get_share_server(self, context, share_instance): if share_instance['share_server_id']: return self.db.share_server_get( context, share_instance['share_server_id']) else: return None @utils.require_driver_initialized def connection_get_info(self, context, share_instance_id): share_instance = self.db.share_instance_get( context, share_instance_id, with_share_data=True) share_server = None if share_instance.get('share_server_id'): share_server = self.db.share_server_get( context, share_instance['share_server_id']) return self.driver.connection_get_info(context, share_instance, share_server) def _migration_start_driver( self, context, share_ref, src_share_instance, dest_host, writable, preserve_metadata, nondisruptive, preserve_snapshots, new_share_network_id, new_az_id, new_share_type_id): share_server = self._get_share_server(context, src_share_instance) request_spec, dest_share_instance = ( self.share_api.create_share_instance_and_get_request_spec( context, share_ref, new_az_id, None, dest_host, new_share_network_id, new_share_type_id)) self.db.share_instance_update( context, dest_share_instance['id'], {'status': constants.STATUS_MIGRATING_TO}) # refresh and obtain proxified properties dest_share_instance = self.db.share_instance_get( context, dest_share_instance['id'], with_share_data=True) helper = migration.ShareMigrationHelper( context, self.db, self.access_helper) try: if dest_share_instance['share_network_id']: # NOTE(carloss): For a nondisruptive migration request, we must # not change the share server, otherwise the share's export # location will change, disconnecting the user. Disruptive # migration requests the share server from the driver. if nondisruptive: dest_share_server = self._get_share_server_dict( context, share_server) dest_share_instance = self.db.share_instance_update( context, dest_share_instance['id'], {'share_server_id': dest_share_server['id']}, with_share_data=True ) else: rpcapi = share_rpcapi.ShareAPI() # NOTE(ganso): Obtaining the share_server_id asynchronously # so we can wait for it to be ready. dest_share_server_id = rpcapi.provide_share_server( context, dest_share_instance, dest_share_instance['share_network_id']) rpcapi.create_share_server( context, dest_share_instance, dest_share_server_id) dest_share_server = helper.wait_for_share_server( dest_share_server_id) else: dest_share_server = None compatibility = self.driver.migration_check_compatibility( context, src_share_instance, dest_share_instance, share_server, dest_share_server) if not compatibility.get('compatible'): msg = _("Destination host %(host)s is not compatible with " "share %(share)s's source backend for driver-assisted " "migration.") % { 'host': dest_host, 'share': share_ref['id'], } raise exception.ShareMigrationFailed(reason=msg) if (not compatibility.get('nondisruptive') and nondisruptive): msg = _("Driver cannot perform a non-disruptive migration of " "share %s.") % share_ref['id'] raise exception.ShareMigrationFailed(reason=msg) if (not compatibility.get('preserve_metadata') and preserve_metadata): msg = _("Driver cannot perform migration of share %s while " "preserving all metadata.") % share_ref['id'] raise exception.ShareMigrationFailed(reason=msg) if not compatibility.get('writable') and writable: msg = _("Driver cannot perform migration of share %s while " "remaining writable.") % share_ref['id'] raise exception.ShareMigrationFailed(reason=msg) if (not compatibility.get('preserve_snapshots') and preserve_snapshots): msg = _("Driver cannot perform migration of share %s while " "preserving snapshots.") % share_ref['id'] raise exception.ShareMigrationFailed(reason=msg) snapshot_mapping = {} src_snap_instances = [] src_snapshots = self.db.share_snapshot_get_all_for_share( context, share_ref['id']) if compatibility.get('preserve_snapshots'): # Make sure all snapshots are 'available' if any(x['status'] != constants.STATUS_AVAILABLE for x in src_snapshots): msg = _( "All snapshots must have '%(status)s' status to be " "migrated by the driver along with share " "%(share)s.") % { 'share': share_ref['id'], 'status': constants.STATUS_AVAILABLE } raise exception.ShareMigrationFailed(reason=msg) src_snap_instances = [x.instance for x in src_snapshots] dest_snap_instance_data = { 'status': constants.STATUS_MIGRATING_TO, 'progress': '0%', 'share_instance_id': dest_share_instance['id'], } for snap_instance in src_snap_instances: snapshot_mapping[snap_instance['id']] = ( self.db.share_snapshot_instance_create( context, snap_instance['snapshot_id'], dest_snap_instance_data)) self.db.share_snapshot_instance_update( context, snap_instance['id'], {'status': constants.STATUS_MIGRATING}) else: if src_snapshots: msg = _("Driver does not support preserving snapshots, " "driver-assisted migration cannot proceed while " "share %s has snapshots.") % share_ref['id'] raise exception.ShareMigrationFailed(reason=msg) if not compatibility.get('writable'): self._cast_access_rules_to_readonly( context, src_share_instance, share_server) LOG.debug("Initiating driver migration for share %s.", share_ref['id']) self.db.share_update( context, share_ref['id'], {'task_state': ( constants.TASK_STATE_MIGRATION_DRIVER_STARTING)}) self.driver.migration_start( context, src_share_instance, dest_share_instance, src_snap_instances, snapshot_mapping, share_server, dest_share_server) self.db.share_update( context, share_ref['id'], {'task_state': ( constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS)}) except Exception: # NOTE(ganso): Cleaning up error'ed destination share instance from # database. It is assumed that driver cleans up leftovers in # backend when migration fails. share_types.revert_allocated_share_type_quotas_during_migration( context, src_share_instance, src_share_instance['share_type_id']) self._migration_delete_instance(context, dest_share_instance['id']) self._restore_migrating_snapshots_status( context, src_share_instance['id']) # NOTE(ganso): Read only access rules and share instance status # will be restored in migration_start's except block. # NOTE(ganso): For now source share instance should remain in # migrating status for host-assisted migration. msg = _("Driver-assisted migration of share %s " "failed.") % share_ref['id'] LOG.exception(msg) raise exception.ShareMigrationFailed(reason=msg) return True def _cast_access_rules_to_readonly(self, context, src_share_instance, share_server, dest_host=None): self._cast_access_rules_to_readonly_for_server( context, [src_share_instance], share_server, dest_host=dest_host) def _cast_access_rules_to_readonly_for_server( self, context, src_share_instances, share_server, dest_host=None): for src_share_instance in src_share_instances: self.db.share_instance_update( context, src_share_instance['id'], {'cast_rules_to_readonly': True}) # Set all 'applying' or 'active' rules to 'queued_to_apply'. Since # the share instance has its cast_rules_to_readonly attribute set # to True, existing rules will be cast to read/only. acceptable_past_states = (constants.ACCESS_STATE_APPLYING, constants.ACCESS_STATE_ACTIVE) new_state = constants.ACCESS_STATE_QUEUED_TO_APPLY conditionally_change = {k: new_state for k in acceptable_past_states} self.access_helper.get_and_update_share_instance_access_rules( context, share_instance_id=src_share_instance['id'], conditionally_change=conditionally_change) src_share_instance_ids = [x.id for x in src_share_instances] share_server_id = share_server['id'] if share_server else None if dest_host: rpcapi = share_rpcapi.ShareAPI() rpcapi.update_access_for_instances(context, dest_host, src_share_instance_ids, share_server_id) else: self.update_access_for_instances(context, src_share_instance_ids, share_server_id=share_server_id) for src_share_instance in src_share_instances: utils.wait_for_access_update( context, self.db, src_share_instance, self.migration_wait_access_rules_timeout) def _reset_read_only_access_rules( self, context, share_instance_id, supress_errors=True, helper=None): instance = self.db.share_instance_get(context, share_instance_id, with_share_data=True) share_server = self._get_share_server(context, instance) self._reset_read_only_access_rules_for_server( context, [instance], share_server, supress_errors, helper) def _reset_read_only_access_rules_for_server( self, context, share_instances, share_server, supress_errors=True, helper=None, dest_host=None): if helper is None: helper = migration.ShareMigrationHelper( context, self.db, self.access_helper) instances_to_update = [] for share_instance in share_instances: instance = self.db.share_instance_get(context, share_instance['id'], with_share_data=True) if instance['cast_rules_to_readonly']: update = {'cast_rules_to_readonly': False} instances_to_update.append(share_instance) self.db.share_instance_update( context, share_instance['id'], update) if instances_to_update: if supress_errors: helper.cleanup_access_rules(instances_to_update, share_server, dest_host) else: helper.revert_access_rules(instances_to_update, share_server, dest_host) @periodic_task.periodic_task( spacing=CONF.migration_driver_continue_update_interval) @utils.require_driver_initialized def migration_driver_continue(self, context): """Invokes driver to continue migration of shares.""" instances = self.db.share_instance_get_all_by_host( context, self.host, with_share_data=True) for instance in instances: if instance['status'] != constants.STATUS_MIGRATING: continue share = self.db.share_get(context, instance['share_id']) if share['task_state'] == ( constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS): src_share_instance_id, dest_share_instance_id = ( self.share_api.get_migrating_instances(share)) src_share_instance = instance dest_share_instance = self.db.share_instance_get( context, dest_share_instance_id, with_share_data=True) src_share_server = self._get_share_server( context, src_share_instance) dest_share_server = self._get_share_server( context, dest_share_instance) src_snap_instances, snapshot_mappings = ( self._get_migrating_snapshots(context, src_share_instance, dest_share_instance)) try: finished = self.driver.migration_continue( context, src_share_instance, dest_share_instance, src_snap_instances, snapshot_mappings, src_share_server, dest_share_server) if finished: self.db.share_update( context, instance['share_id'], {'task_state': (constants. TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE)}) LOG.info("Share Migration for share %s completed " "first phase successfully.", share['id']) else: share = self.db.share_get( context, instance['share_id']) if (share['task_state'] == constants.TASK_STATE_MIGRATION_CANCELLED): LOG.warning( "Share Migration for share %s was cancelled.", share['id']) except Exception: (share_types. revert_allocated_share_type_quotas_during_migration( context, src_share_instance, dest_share_instance['share_type_id'])) # NOTE(ganso): Cleaning up error'ed destination share # instance from database. It is assumed that driver cleans # up leftovers in backend when migration fails. self._migration_delete_instance( context, dest_share_instance['id']) self._restore_migrating_snapshots_status( context, src_share_instance['id']) self._reset_read_only_access_rules( context, src_share_instance_id) self.db.share_instance_update( context, src_share_instance_id, {'status': constants.STATUS_AVAILABLE}) self.db.share_update( context, instance['share_id'], {'task_state': constants.TASK_STATE_MIGRATION_ERROR}) msg = _("Driver-assisted migration of share %s " "failed.") % share['id'] LOG.exception(msg) def _get_migrating_snapshots( self, context, src_share_instance, dest_share_instance): dest_snap_instances = ( self.db.share_snapshot_instance_get_all_with_filters( context, {'share_instance_ids': [dest_share_instance['id']]})) snapshot_mappings = {} src_snap_instances = [] if len(dest_snap_instances) > 0: src_snap_instances = ( self.db.share_snapshot_instance_get_all_with_filters( context, {'share_instance_ids': [src_share_instance['id']]})) for snap in src_snap_instances: dest_snap_instance = next( x for x in dest_snap_instances if snap['snapshot_id'] == x['snapshot_id']) snapshot_mappings[snap['id']] = dest_snap_instance return src_snap_instances, snapshot_mappings def _restore_migrating_snapshots_status( self, context, src_share_instance_id, errored_dest_instance_id=None): filters = {'share_instance_ids': [src_share_instance_id]} status = constants.STATUS_AVAILABLE if errored_dest_instance_id: filters['share_instance_ids'].append(errored_dest_instance_id) status = constants.STATUS_ERROR snap_instances = ( self.db.share_snapshot_instance_get_all_with_filters( context, filters) ) for instance in snap_instances: if instance['status'] == constants.STATUS_MIGRATING: self.db.share_snapshot_instance_update( context, instance['id'], {'status': status}) elif (errored_dest_instance_id and instance['status'] == constants.STATUS_MIGRATING_TO): self.db.share_snapshot_instance_update( context, instance['id'], {'status': status}) @utils.require_driver_initialized def migration_start( self, context, share_id, dest_host, force_host_assisted_migration, preserve_metadata, writable, nondisruptive, preserve_snapshots, new_share_network_id=None, new_share_type_id=None): """Migrates a share from current host to another host.""" LOG.debug("Entered migration_start method for share %s.", share_id) self.db.share_update( context, share_id, {'task_state': constants.TASK_STATE_MIGRATION_IN_PROGRESS}) share_ref = self.db.share_get(context, share_id) share_instance = self._get_share_instance(context, share_ref) success = False host_value = share_utils.extract_host(dest_host) service = self.db.service_get_by_args( context, host_value, 'manila-share') new_az_id = service['availability_zone_id'] if not force_host_assisted_migration: try: success = self._migration_start_driver( context, share_ref, share_instance, dest_host, writable, preserve_metadata, nondisruptive, preserve_snapshots, new_share_network_id, new_az_id, new_share_type_id) except Exception as e: if not isinstance(e, NotImplementedError): LOG.exception( ("The driver could not migrate the share %(shr)s"), {'shr': share_id}) try: if not success: if (writable or preserve_metadata or nondisruptive or preserve_snapshots): msg = _("Migration for share %s could not be " "performed because host-assisted migration is not " "allowed when share must remain writable, " "preserve snapshots and/or file metadata or be " "performed nondisruptively.") % share_id raise exception.ShareMigrationFailed(reason=msg) # We only handle shares without snapshots for now snaps = self.db.share_snapshot_get_all_for_share( context, share_id) if snaps: msg = _("Share %s must not have snapshots in order to " "perform a host-assisted migration.") % share_id raise exception.ShareMigrationFailed(reason=msg) LOG.debug("Starting host-assisted migration " "for share %s.", share_id) self.db.share_update( context, share_id, {'task_state': constants.TASK_STATE_MIGRATION_IN_PROGRESS}) self._migration_start_host_assisted( context, share_ref, share_instance, dest_host, new_share_network_id, new_az_id, new_share_type_id) except Exception: msg = _("Host-assisted migration failed for share %s.") % share_id LOG.exception(msg) self.db.share_update( context, share_id, {'task_state': constants.TASK_STATE_MIGRATION_ERROR}) self._reset_read_only_access_rules( context, share_instance['id']) self.db.share_instance_update( context, share_instance['id'], {'status': constants.STATUS_AVAILABLE}) raise exception.ShareMigrationFailed(reason=msg) def _migration_start_host_assisted( self, context, share, src_share_instance, dest_host, new_share_network_id, new_az_id, new_share_type_id): rpcapi = share_rpcapi.ShareAPI() helper = migration.ShareMigrationHelper( context, self.db, self.access_helper) share_server = self._get_share_server(context.elevated(), src_share_instance) self._cast_access_rules_to_readonly( context, src_share_instance, share_server) try: dest_share_instance = helper.create_instance_and_wait( share, dest_host, new_share_network_id, new_az_id, new_share_type_id) self.db.share_instance_update( context, dest_share_instance['id'], {'status': constants.STATUS_MIGRATING_TO}) except Exception: msg = _("Failed to create instance on destination " "backend during migration of share %s.") % share['id'] LOG.exception(msg) raise exception.ShareMigrationFailed(reason=msg) ignore_list = self.driver.configuration.safe_get( 'migration_ignore_files') data_rpc = data_rpcapi.DataAPI() try: src_connection_info = self.driver.connection_get_info( context, src_share_instance, share_server) dest_connection_info = rpcapi.connection_get_info( context, dest_share_instance) LOG.debug("Time to start copying in migration" " for share %s.", share['id']) data_rpc.migration_start( context, share['id'], ignore_list, src_share_instance['id'], dest_share_instance['id'], src_connection_info, dest_connection_info) except Exception: msg = _("Failed to obtain migration info from backends or" " invoking Data Service for migration of " "share %s.") % share['id'] LOG.exception(msg) helper.cleanup_new_instance(dest_share_instance) raise exception.ShareMigrationFailed(reason=msg) def _migration_complete_driver( self, context, share_ref, src_share_instance, dest_share_instance): share_server = self._get_share_server(context, src_share_instance) dest_share_server = self._get_share_server( context, dest_share_instance) self.db.share_update( context, share_ref['id'], {'task_state': constants.TASK_STATE_MIGRATION_COMPLETING}) src_snap_instances, snapshot_mappings = ( self._get_migrating_snapshots(context, src_share_instance, dest_share_instance)) data_updates = self.driver.migration_complete( context, src_share_instance, dest_share_instance, src_snap_instances, snapshot_mappings, share_server, dest_share_server) or {} if data_updates.get('export_locations'): self.db.export_locations_update( context, dest_share_instance['id'], data_updates['export_locations']) snapshot_updates = data_updates.get('snapshot_updates') or {} dest_extra_specs = self._get_extra_specs_from_share_type( context, dest_share_instance['share_type_id']) for src_snap_ins, dest_snap_ins in snapshot_mappings.items(): model_update = snapshot_updates.get(dest_snap_ins['id']) or {} snapshot_export_locations = model_update.pop( 'export_locations', []) model_update['status'] = constants.STATUS_AVAILABLE model_update['progress'] = '100%' self.db.share_snapshot_instance_update( context, dest_snap_ins['id'], model_update) if dest_extra_specs['mount_snapshot_support']: for el in snapshot_export_locations: values = { 'share_snapshot_instance_id': dest_snap_ins['id'], 'path': el['path'], 'is_admin_only': el['is_admin_only'], } self.db.share_snapshot_instance_export_location_create( context, values) helper = migration.ShareMigrationHelper( context, self.db, self.access_helper) helper.apply_new_access_rules(dest_share_instance, share_ref['id']) self._migration_complete_instance(context, share_ref, src_share_instance['id'], dest_share_instance['id']) share_types.revert_allocated_share_type_quotas_during_migration( context, dest_share_instance, src_share_instance['share_type_id'], allow_deallocate_from_current_type=True) self._migration_delete_instance(context, src_share_instance['id']) def _migration_complete_instance(self, context, share_ref, src_instance_id, dest_instance_id): dest_updates = { 'status': constants.STATUS_AVAILABLE, 'progress': '100%' } if share_ref.get('replication_type'): dest_updates['replica_state'] = constants.REPLICA_STATE_ACTIVE self.db.share_instance_update(context, dest_instance_id, dest_updates) self.db.share_instance_update(context, src_instance_id, {'status': constants.STATUS_INACTIVE}) def _migration_delete_instance(self, context, instance_id): # refresh the share instance model share_instance = self.db.share_instance_get( context, instance_id, with_share_data=True) rules = self.access_helper.get_and_update_share_instance_access_rules( context, share_instance_id=instance_id) self.access_helper.delete_share_instance_access_rules( context, rules, instance_id) snap_instances = self.db.share_snapshot_instance_get_all_with_filters( context, {'share_instance_ids': [instance_id]}) for instance in snap_instances: self.db.share_snapshot_instance_delete(context, instance['id']) self.db.share_instance_delete(context, instance_id) LOG.info("Share instance %s: deleted successfully.", instance_id) self._check_delete_share_server(context, share_instance=share_instance) @utils.require_driver_initialized def migration_complete(self, context, src_instance_id, dest_instance_id): src_share_instance = self.db.share_instance_get( context, src_instance_id, with_share_data=True) dest_share_instance = self.db.share_instance_get( context, dest_instance_id, with_share_data=True) share_ref = self.db.share_get(context, src_share_instance['share_id']) LOG.info("Received request to finish Share Migration for " "share %s.", share_ref['id']) if share_ref['task_state'] == ( constants.TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE): try: self._migration_complete_driver( context, share_ref, src_share_instance, dest_share_instance) except Exception: msg = _("Driver migration completion failed for" " share %s.") % share_ref['id'] LOG.exception(msg) # NOTE(ganso): If driver fails during migration-complete, # all instances are set to error and it is up to the admin # to fix the problem to either complete migration # manually or clean it up. At this moment, data # preservation at the source backend cannot be # guaranteed. self._restore_migrating_snapshots_status( context, src_share_instance['id'], errored_dest_instance_id=dest_share_instance['id']) self.db.share_instance_update( context, src_instance_id, {'status': constants.STATUS_ERROR}) self.db.share_instance_update( context, dest_instance_id, {'status': constants.STATUS_ERROR}) self.db.share_update( context, share_ref['id'], {'task_state': constants.TASK_STATE_MIGRATION_ERROR}) # NOTE(carloss): No need to deallocate quotas allocated during # the migration request, since both share instances still exist # even they are set to an error state. raise exception.ShareMigrationFailed(reason=msg) else: try: self._migration_complete_host_assisted( context, share_ref, src_instance_id, dest_instance_id) except Exception: msg = _("Host-assisted migration completion failed for" " share %s.") % share_ref['id'] LOG.exception(msg) # NOTE(carloss): No need to deallocate quotas allocated during # the migration request, since both source and destination # instances will still exist self.db.share_update( context, share_ref['id'], {'task_state': constants.TASK_STATE_MIGRATION_ERROR}) self.db.share_instance_update( context, src_instance_id, {'status': constants.STATUS_AVAILABLE}) raise exception.ShareMigrationFailed(reason=msg) model_update = self._get_extra_specs_from_share_type( context, dest_share_instance['share_type_id']) model_update['task_state'] = constants.TASK_STATE_MIGRATION_SUCCESS self.db.share_update( context, dest_share_instance['share_id'], model_update) LOG.info("Share Migration for share %s" " completed successfully.", share_ref['id']) def _get_extra_specs_from_share_type(self, context, share_type_id): share_type = share_types.get_share_type(context, share_type_id) return self.share_api.get_share_attributes_from_share_type(share_type) def _migration_complete_host_assisted(self, context, share_ref, src_instance_id, dest_instance_id): src_share_instance = self.db.share_instance_get( context, src_instance_id, with_share_data=True) dest_share_instance = self.db.share_instance_get( context, dest_instance_id, with_share_data=True) helper = migration.ShareMigrationHelper( context, self.db, self.access_helper) task_state = share_ref['task_state'] if task_state in (constants.TASK_STATE_DATA_COPYING_ERROR, constants.TASK_STATE_DATA_COPYING_CANCELLED): msg = _("Data copy of host assisted migration for share %s has not" " completed successfully.") % share_ref['id'] LOG.warning(msg) helper.cleanup_new_instance(dest_share_instance) cancelled = ( task_state == constants.TASK_STATE_DATA_COPYING_CANCELLED) suppress_errors = True if cancelled: suppress_errors = False self._reset_read_only_access_rules( context, src_instance_id, supress_errors=suppress_errors, helper=helper) self.db.share_instance_update( context, src_instance_id, {'status': constants.STATUS_AVAILABLE}) if cancelled: self.db.share_update( context, share_ref['id'], {'task_state': constants.TASK_STATE_MIGRATION_CANCELLED}) LOG.info("Share Migration for share %s" " was cancelled.", share_ref['id']) return else: raise exception.ShareMigrationFailed(reason=msg) elif task_state != constants.TASK_STATE_DATA_COPYING_COMPLETED: msg = _("Data copy for migration of share %s has not completed" " yet.") % share_ref['id'] LOG.error(msg) raise exception.ShareMigrationFailed(reason=msg) self.db.share_update( context, share_ref['id'], {'task_state': constants.TASK_STATE_MIGRATION_COMPLETING}) try: helper.apply_new_access_rules(dest_share_instance, share_ref['id']) except Exception: msg = _("Failed to apply new access rules during migration " "of share %s.") % share_ref['id'] LOG.exception(msg) helper.cleanup_new_instance(dest_share_instance) self._reset_read_only_access_rules( context, src_instance_id, helper=helper, supress_errors=True) self.db.share_instance_update( context, src_instance_id, {'status': constants.STATUS_AVAILABLE}) raise exception.ShareMigrationFailed(reason=msg) self._migration_complete_instance(context, share_ref, src_share_instance['id'], dest_share_instance['id']) # NOTE(carloss): Won't revert allocated quotas for the share type here # because the delete_instance_and_wait method will end up calling the # delete_share_instance method here in the share manager. When the # share instance deletion is requested in the share manager, Manila # itself will take care of deallocating the existing quotas for the # share instance helper.delete_instance_and_wait(src_share_instance) @utils.require_driver_initialized def migration_cancel(self, context, src_instance_id, dest_instance_id): src_share_instance = self.db.share_instance_get( context, src_instance_id, with_share_data=True) dest_share_instance = self.db.share_instance_get( context, dest_instance_id, with_share_data=True) share_ref = self.db.share_get(context, src_share_instance['share_id']) if share_ref['task_state'] not in ( constants.TASK_STATE_DATA_COPYING_COMPLETED, constants.TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE, constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS): msg = _("Migration of share %s cannot be cancelled at this " "moment.") % share_ref['id'] raise exception.InvalidShare(reason=msg) share_server = self._get_share_server(context, src_share_instance) dest_share_server = self._get_share_server( context, dest_share_instance) helper = migration.ShareMigrationHelper( context, self.db, self.access_helper) if share_ref['task_state'] == ( constants.TASK_STATE_DATA_COPYING_COMPLETED): self.db.share_instance_update( context, dest_share_instance['id'], {'status': constants.STATUS_INACTIVE}) helper.cleanup_new_instance(dest_share_instance) else: src_snap_instances, snapshot_mappings = ( self._get_migrating_snapshots(context, src_share_instance, dest_share_instance)) self.driver.migration_cancel( context, src_share_instance, dest_share_instance, src_snap_instances, snapshot_mappings, share_server, dest_share_server) self._migration_delete_instance(context, dest_share_instance['id']) self._restore_migrating_snapshots_status( context, src_share_instance['id']) self._reset_read_only_access_rules( context, src_instance_id, supress_errors=False, helper=helper) self.db.share_instance_update( context, src_instance_id, {'status': constants.STATUS_AVAILABLE}) self.db.share_update( context, share_ref['id'], {'task_state': constants.TASK_STATE_MIGRATION_CANCELLED}) share_types.revert_allocated_share_type_quotas_during_migration( context, src_share_instance, dest_share_instance['share_type_id']) LOG.info("Share Migration for share %s" " was cancelled.", share_ref['id']) @utils.require_driver_initialized def migration_get_progress(self, context, src_instance_id, dest_instance_id): src_share_instance = self.db.share_instance_get( context, src_instance_id, with_share_data=True) dest_share_instance = self.db.share_instance_get( context, dest_instance_id, with_share_data=True) share_ref = self.db.share_get(context, src_share_instance['share_id']) # Confirm that it is driver migration scenario if share_ref['task_state'] != ( constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS): msg = _("Driver is not performing migration for" " share %s at this moment.") % share_ref['id'] raise exception.InvalidShare(reason=msg) share_server = None if share_ref.instance.get('share_server_id'): share_server = self.db.share_server_get( context, src_share_instance['share_server_id']) dest_share_server = None if dest_share_instance.get('share_server_id'): dest_share_server = self.db.share_server_get( context, dest_share_instance['share_server_id']) src_snap_instances, snapshot_mappings = ( self._get_migrating_snapshots(context, src_share_instance, dest_share_instance)) return self.driver.migration_get_progress( context, src_share_instance, dest_share_instance, src_snap_instances, snapshot_mappings, share_server, dest_share_server) def _get_share_instance(self, context, share): if isinstance(share, str): id = share else: id = share.instance['id'] return self.db.share_instance_get(context, id, with_share_data=True) @add_hooks @utils.require_driver_initialized def create_share_instance(self, context, share_instance_id, request_spec=None, filter_properties=None, snapshot_id=None): """Creates a share instance.""" context = context.elevated() share_instance = self._get_share_instance(context, share_instance_id) share_id = share_instance.get('share_id') share_network_id = share_instance.get('share_network_id') share = self.db.share_get(context, share_id) self._notify_about_share_usage(context, share, share_instance, "create.start") if not share_instance['availability_zone']: share_instance = self.db.share_instance_update( context, share_instance_id, {'availability_zone': self.availability_zone}, with_share_data=True ) if share_network_id and not self.driver.driver_handles_share_servers: self.db.share_instance_update( context, share_instance_id, {'status': constants.STATUS_ERROR}) self.message_api.create( context, message_field.Action.CREATE, share['project_id'], resource_type=message_field.Resource.SHARE, resource_id=share_id, detail=message_field.Detail.UNEXPECTED_NETWORK) raise exception.ManilaException(_( "Creation of share instance %s failed: driver does not expect " "share-network to be provided with current " "configuration.") % share_instance_id) if snapshot_id is not None: snapshot_ref = self.db.share_snapshot_get(context, snapshot_id) parent_share_server_id = ( snapshot_ref['share']['instance']['share_server_id']) else: snapshot_ref = None parent_share_server_id = None share_group_ref = None if share_instance.get('share_group_id'): share_group_ref = self.db.share_group_get( context, share_instance['share_group_id']) if share_network_id or parent_share_server_id: try: share_server, share_instance = ( self._provide_share_server_for_share( context, share_network_id, share_instance, snapshot=snapshot_ref, share_group=share_group_ref, ) ) except exception.PortLimitExceeded: with excutils.save_and_reraise_exception(): error = ("Creation of share instance %s failed: " "failed to allocate network") LOG.error(error, share_instance_id) self.db.share_instance_update( context, share_instance_id, {'status': constants.STATUS_ERROR} ) self.message_api.create( context, message_field.Action.CREATE, share['project_id'], resource_type=message_field.Resource.SHARE, resource_id=share_id, detail=(message_field.Detail .SHARE_NETWORK_PORT_QUOTA_LIMIT_EXCEEDED)) except exception.SecurityServiceFailedAuth: with excutils.save_and_reraise_exception(): error = ("Provision of share server failed: " "failed to authenticate user " "against security server.") LOG.error(error) self.db.share_instance_update( context, share_instance_id, {'status': constants.STATUS_ERROR} ) self.message_api.create( context, message_field.Action.CREATE, share['project_id'], resource_type=message_field.Resource.SHARE, resource_id=share_id, detail=(message_field.Detail .SECURITY_SERVICE_FAILED_AUTH)) except exception.IpAddressGenerationFailureClient: with excutils.save_and_reraise_exception(): error = ("Creation of share instance %s failed: " "No Free IP's in neutron subnet.") LOG.error(error, share_instance_id) self.db.share_instance_update( context, share_instance_id, {'status': constants.STATUS_ERROR} ) self.message_api.create( context, message_field.Action.CREATE, share['project_id'], resource_type=message_field.Resource.SHARE, resource_id=share_id, detail=message_field.Detail.NEUTRON_SUBNET_FULL) except Exception: with excutils.save_and_reraise_exception(): error = ("Creation of share instance %s failed: " "failed to get share server.") LOG.error(error, share_instance_id) self.db.share_instance_update( context, share_instance_id, {'status': constants.STATUS_ERROR} ) self.message_api.create( context, message_field.Action.CREATE, share['project_id'], resource_type=message_field.Resource.SHARE, resource_id=share_id, detail=message_field.Detail.NO_SHARE_SERVER) else: share_server = None if share_network_id and self.driver.driver_handles_share_servers: proto = share_instance.get('share_proto').lower() ret_types = ( self.driver.dhss_mandatory_security_service_association.get( proto)) if ret_types: share_network = self.db.share_network_get(context, share_network_id) share_network_ss = [] for security_service in share_network['security_services']: share_network_ss.append(security_service['type'].lower()) for types in ret_types: if types not in share_network_ss: self.db.share_instance_update( context, share_instance_id, {'status': constants.STATUS_ERROR} ) self.message_api.create( context, message_field.Action.CREATE, share['project_id'], resource_type=message_field.Resource.SHARE, resource_id=share_id, detail=(message_field.Detail .MISSING_SECURITY_SERVICE)) raise exception.InvalidRequest(_( "Share network security service association is " "mandatory for protocol %s.") % share_instance.get('share_proto')) status = constants.STATUS_AVAILABLE try: if snapshot_ref: # NOTE(dviroel): we need to provide the parent share info to # assist drivers that create shares from snapshot in different # pools or back ends parent_share_instance = self.db.share_instance_get( context, snapshot_ref['share']['instance']['id'], with_share_data=True) parent_share_dict = self._get_share_instance_dict( context, parent_share_instance) model_update = self.driver.create_share_from_snapshot( context, share_instance, snapshot_ref.instance, share_server=share_server, parent_share=parent_share_dict) if isinstance(model_update, list): # NOTE(dviroel): the driver that doesn't implement the new # model_update will return only the export locations export_locations = model_update else: # NOTE(dviroel): share status is mandatory when answering # a model update. If not provided, won't be possible to # determine if was successfully created. status = model_update.get('status') if status is None: msg = _("Driver didn't provide a share status.") raise exception.InvalidShareInstance(reason=msg) export_locations = model_update.get('export_locations') else: export_locations = self.driver.create_share( context, share_instance, share_server=share_server) if status not in [constants.STATUS_AVAILABLE, constants.STATUS_CREATING_FROM_SNAPSHOT]: msg = _('Driver returned an invalid status: %s') % status raise exception.InvalidShareInstance(reason=msg) share_backend_info = ( self.driver.get_optional_share_creation_data( share, share_server=share_server)) if share_backend_info: metadata_updates = share_backend_info.get("metadata") if metadata_updates: self.db.share_metadata_update( context, share_id, metadata_updates, False) if export_locations: self.db.export_locations_update( context, share_instance['id'], export_locations) except Exception as e: with excutils.save_and_reraise_exception(): LOG.error("Share instance %s failed on creation.", share_instance_id) detail_data = getattr(e, 'detail_data', {}) def get_export_location(details): if not isinstance(details, dict): return None return details.get('export_locations', details.get('export_location')) export_locations = get_export_location(detail_data) if export_locations: self.db.export_locations_update( context, share_instance['id'], export_locations) else: LOG.warning('Share instance information in exception ' 'can not be written to db because it ' 'contains %s and it is not a dictionary.', detail_data) self.db.share_instance_update( context, share_instance_id, {'status': constants.STATUS_ERROR} ) self.message_api.create( context, message_field.Action.CREATE, share['project_id'], resource_type=message_field.Resource.SHARE, resource_id=share_id, exception=e) else: LOG.info("Share instance %s created successfully.", share_instance_id) progress = '100%' if status == constants.STATUS_AVAILABLE else '0%' updates = { 'status': status, 'launched_at': timeutils.utcnow(), 'progress': progress } if share.get('replication_type'): updates['replica_state'] = constants.REPLICA_STATE_ACTIVE self.db.share_instance_update(context, share_instance_id, updates) self._notify_about_share_usage(context, share, share_instance, "create.end") def _update_share_instance_access_rules_state(self, context, share_instance_id, state): """Update the access_rules_status for the share instance.""" self.access_helper.get_and_update_share_instance_access_rules_status( context, status=state, share_instance_id=share_instance_id) def _get_replica_snapshots_for_snapshot(self, context, snapshot_id, active_replica_id, share_replica_id, with_share_data=True): """Return dict of snapshot instances of active and replica instances. This method returns a dict of snapshot instances for snapshot referred to by snapshot_id. The dict contains the snapshot instance pertaining to the 'active' replica and the snapshot instance pertaining to the replica referred to by share_replica_id. """ filters = { 'snapshot_ids': snapshot_id, 'share_instance_ids': (share_replica_id, active_replica_id), } instance_list = self.db.share_snapshot_instance_get_all_with_filters( context, filters, with_share_data=with_share_data) snapshots = { 'active_replica_snapshot': self._get_snapshot_instance_dict( context, list(filter(lambda x: x['share_instance_id'] == active_replica_id, instance_list))[0]), 'share_replica_snapshot': self._get_snapshot_instance_dict( context, list(filter(lambda x: x['share_instance_id'] == share_replica_id, instance_list))[0]), } return snapshots @add_hooks @utils.require_driver_initialized @locked_share_replica_operation def create_share_replica(self, context, share_replica_id, share_id=None, request_spec=None, filter_properties=None): """Create a share replica.""" context = context.elevated() share_replica = self.db.share_replica_get( context, share_replica_id, with_share_data=True, with_share_server=True) if not share_replica['availability_zone']: share_replica = self.db.share_replica_update( context, share_replica['id'], {'availability_zone': self.availability_zone}, with_share_data=True ) _active_replica = ( self.db.share_replicas_get_available_active_replica( context, share_replica['share_id'], with_share_data=True, with_share_server=True)) if not _active_replica: self.db.share_replica_update( context, share_replica['id'], {'status': constants.STATUS_ERROR, 'replica_state': constants.STATUS_ERROR}) self.message_api.create( context, message_field.Action.CREATE, share_replica['project_id'], resource_type=message_field.Resource.SHARE_REPLICA, resource_id=share_replica['id'], detail=message_field.Detail.NO_ACTIVE_REPLICA) msg = _("An 'active' replica must exist in 'available' " "state to create a new replica for share %s.") raise exception.ReplicationException( reason=msg % share_replica['share_id']) # We need the share_network_id in case of # driver_handles_share_server=True share_network_id = share_replica.get('share_network_id', None) if xor(bool(share_network_id), self.driver.driver_handles_share_servers): self.db.share_replica_update( context, share_replica['id'], {'status': constants.STATUS_ERROR, 'replica_state': constants.STATUS_ERROR}) self.message_api.create( context, message_field.Action.CREATE, share_replica['project_id'], resource_type=message_field.Resource.SHARE_REPLICA, resource_id=share_replica['id'], detail=message_field.Detail.UNEXPECTED_NETWORK) raise exception.InvalidDriverMode( "The share-network value provided does not match with the " "current driver configuration.") if share_network_id: try: share_server, share_replica = ( self._provide_share_server_for_share( context, share_network_id, share_replica) ) except Exception: with excutils.save_and_reraise_exception(): LOG.error("Failed to get share server " "for share replica creation.") self.db.share_replica_update( context, share_replica['id'], {'status': constants.STATUS_ERROR, 'replica_state': constants.STATUS_ERROR}) self.message_api.create( context, message_field.Action.CREATE, share_replica['project_id'], resource_type=message_field.Resource.SHARE_REPLICA, resource_id=share_replica['id'], detail=message_field.Detail.NO_SHARE_SERVER) else: share_server = None # Map the existing access rules for the share to # the replica in the DB. share_access_rules = self.db.share_instance_access_copy( context, share_replica['share_id'], share_replica['id']) # Get snapshots for the share. share_snapshots = self.db.share_snapshot_get_all_for_share( context, share_id) # Get the required data for snapshots that have 'aggregate_status' # set to 'available'. available_share_snapshots = [ self._get_replica_snapshots_for_snapshot( context, x['id'], _active_replica['id'], share_replica_id) for x in share_snapshots if x['aggregate_status'] == constants.STATUS_AVAILABLE] replica_list = ( self.db.share_replicas_get_all_by_share( context, share_replica['share_id'], with_share_data=True, with_share_server=True) ) replica_list = [self._get_share_instance_dict(context, r) for r in replica_list] share_replica = self._get_share_instance_dict(context, share_replica) try: replica_ref = self.driver.create_replica( context, replica_list, share_replica, share_access_rules, available_share_snapshots, share_server=share_server) or {} except Exception as excep: with excutils.save_and_reraise_exception(): LOG.error("Share replica %s failed on creation.", share_replica['id']) self.db.share_replica_update( context, share_replica['id'], {'status': constants.STATUS_ERROR, 'replica_state': constants.STATUS_ERROR}) self._update_share_instance_access_rules_state( context, share_replica['id'], constants.STATUS_ERROR) self.message_api.create( context, message_field.Action.CREATE, share_replica['project_id'], resource_type=message_field.Resource.SHARE_REPLICA, resource_id=share_replica['id'], exception=excep) if replica_ref.get('export_locations'): if isinstance(replica_ref.get('export_locations'), list): self.db.export_locations_update( context, share_replica['id'], replica_ref.get('export_locations')) else: msg = ('Invalid export locations passed to the share ' 'manager.') LOG.warning(msg) if replica_ref.get('replica_state'): self.db.share_replica_update( context, share_replica['id'], {'status': constants.STATUS_AVAILABLE, 'replica_state': replica_ref.get('replica_state'), 'progress': '100%'}) reported_access_rules_status = replica_ref.get('access_rules_status') if reported_access_rules_status in (None, "active"): # update all rules to "active" conditionally_change = {'queued_to_apply': 'active'} self.access_helper.get_and_update_share_instance_access_rules( context, share_instance_id=share_replica['id'], conditionally_change=conditionally_change) # update "access_rules_status" on the replica self._update_share_instance_access_rules_state( context, share_replica['id'], constants.STATUS_ACTIVE) elif replica_ref.get('share_access_rules'): # driver would like to update individual access rules share_access_rules_dict = { rule['id']: rule for rule in share_access_rules} for rule_update in replica_ref.get('share_access_rules'): self.access_helper.get_and_update_share_instance_access_rule( context, rule_update['id'], {'state': rule_update['state']}, share_instance_id=share_replica['id']) share_access_rules_dict.pop(rule_update['id']) for rule_id in share_access_rules_dict: self.access_helper.get_and_update_share_instance_access_rule( context, rule_id, {'state': 'active'}, share_instance_id=share_replica['id']) self._update_share_instance_access_rules_state( context, share_replica['id'], replica_ref.get('access_rules_status')) LOG.info("Share replica %s created successfully.", share_replica['id']) @add_hooks @utils.require_driver_initialized @locked_share_replica_operation def delete_share_replica(self, context, share_replica_id, share_id=None, force=False): """Delete a share replica.""" context = context.elevated() share_replica = self.db.share_replica_get( context, share_replica_id, with_share_data=True, with_share_server=True) # Grab all the snapshot instances that belong to this replica. replica_snapshots = ( self.db.share_snapshot_instance_get_all_with_filters( context, {'share_instance_ids': share_replica_id}, with_share_data=True) ) replica_list = ( self.db.share_replicas_get_all_by_share( context, share_replica['share_id'], with_share_data=True, with_share_server=True) ) replica_list = [self._get_share_instance_dict(context, r) for r in replica_list] replica_snapshots = [self._get_snapshot_instance_dict(context, s) for s in replica_snapshots] share_server = self._get_share_server(context, share_replica) share_replica = self._get_share_instance_dict(context, share_replica) try: self.access_helper.update_access_rules( context, share_replica_id, delete_all_rules=True, share_server=share_server ) except Exception as excep: with excutils.save_and_reraise_exception() as exc_context: # Set status to 'error' from 'deleting' since # access_rules_status has been set to 'error'. self.db.share_replica_update( context, share_replica['id'], {'status': constants.STATUS_ERROR}) self.message_api.create( context, message_field.Action.DELETE_ACCESS_RULES, share_replica['project_id'], resource_type=message_field.Resource.SHARE_REPLICA, resource_id=share_replica['id'], exception=excep) if force: msg = _("The driver was unable to delete access rules " "for the replica: %s. Will attempt to delete " "the replica anyway.") LOG.exception(msg, share_replica['id']) exc_context.reraise = False try: self.driver.delete_replica( context, replica_list, replica_snapshots, share_replica, share_server=share_server) except Exception as excep: with excutils.save_and_reraise_exception() as exc_context: if force: msg = _("The driver was unable to delete the share " "replica: %s on the backend. Since " "this operation is forced, the replica will be " "deleted from Manila's database. A cleanup on " "the backend may be necessary.") LOG.exception(msg, share_replica['id']) exc_context.reraise = False else: self.db.share_replica_update( context, share_replica['id'], {'status': constants.STATUS_ERROR_DELETING, 'replica_state': constants.STATUS_ERROR}) self.message_api.create( context, message_field.Action.DELETE, share_replica['project_id'], resource_type=message_field.Resource.SHARE_REPLICA, resource_id=share_replica['id'], exception=excep) for replica_snapshot in replica_snapshots: self.db.share_snapshot_instance_delete( context, replica_snapshot['id']) self.db.share_replica_delete(context, share_replica['id']) LOG.info("Share replica %s deleted successfully.", share_replica['id']) @add_hooks @utils.require_driver_initialized @locked_share_replica_operation def promote_share_replica(self, context, share_replica_id, share_id=None, quiesce_wait_time=None): """Promote a share replica to active state.""" context = context.elevated() share_replica = self.db.share_replica_get( context, share_replica_id, with_share_data=True, with_share_server=True) replication_type = share_replica['replication_type'] if replication_type == constants.REPLICATION_TYPE_READABLE: ensure_old_active_replica_to_readonly = True else: ensure_old_active_replica_to_readonly = False share_server = self._get_share_server(context, share_replica) # Get list of all replicas for share replica_list = ( self.db.share_replicas_get_all_by_share( context, share_replica['share_id'], with_share_data=True, with_share_server=True) ) try: old_active_replica = list(filter( lambda r: ( r['replica_state'] == constants.REPLICA_STATE_ACTIVE), replica_list))[0] except IndexError: self.db.share_replica_update( context, share_replica['id'], {'status': constants.STATUS_AVAILABLE}) msg = _("Share %(share)s has no replica with 'replica_state' " "set to %(state)s. Promoting %(replica)s is not " "possible.") self.message_api.create( context, message_field.Action.PROMOTE, share_replica['project_id'], resource_type=message_field.Resource.SHARE_REPLICA, resource_id=share_replica['id'], detail=message_field.Detail.NO_ACTIVE_REPLICA) raise exception.ReplicationException( reason=msg % {'share': share_replica['share_id'], 'state': constants.REPLICA_STATE_ACTIVE, 'replica': share_replica['id']}) access_rules = self.db.share_access_get_all_for_share( context, share_replica['share_id']) replica_list = [self._get_share_instance_dict(context, r) for r in replica_list] share_replica = self._get_share_instance_dict(context, share_replica) try: updated_replica_list = ( self.driver.promote_replica( context, replica_list, share_replica, access_rules, share_server=share_server, quiesce_wait_time=quiesce_wait_time) ) except Exception as excep: with excutils.save_and_reraise_exception(): # (NOTE) gouthamr: If the driver throws an exception at # this stage, there is a good chance that the replicas are # somehow altered on the backend. We loop through the # replicas and set their 'status's to 'error' and # leave the 'replica_state' unchanged. This also changes the # 'status' of the replica that failed to promote to 'error' as # before this operation. The backend may choose to update # the actual replica_state during the replica_monitoring # stage. updates = {'status': constants.STATUS_ERROR} for replica_ref in replica_list: self.db.share_replica_update( context, replica_ref['id'], updates) self.message_api.create( context, message_field.Action.PROMOTE, replica_ref['project_id'], resource_type=message_field.Resource.SHARE_REPLICA, resource_id=replica_ref['id'], exception=excep) # Set any 'creating' snapshots on the currently active replica to # 'error' since we cannot guarantee they will finish 'creating'. active_replica_snapshot_instances = ( self.db.share_snapshot_instance_get_all_with_filters( context, {'share_instance_ids': share_replica['id']}) ) for instance in active_replica_snapshot_instances: if instance['status'] in (constants.STATUS_CREATING, constants.STATUS_DELETING): msg = ("The replica snapshot instance %(instance)s was " "in %(state)s. Since it was not in %(available)s " "state when the replica was promoted, it will be " "set to %(error)s.") payload = { 'instance': instance['id'], 'state': instance['status'], 'available': constants.STATUS_AVAILABLE, 'error': constants.STATUS_ERROR, } LOG.info(msg, payload) self.db.share_snapshot_instance_update( context, instance['id'], {'status': constants.STATUS_ERROR}) if not updated_replica_list: self.db.share_replica_update( context, old_active_replica['id'], {'replica_state': constants.REPLICA_STATE_OUT_OF_SYNC, 'cast_rules_to_readonly': ensure_old_active_replica_to_readonly}) self.db.share_replica_update( context, share_replica['id'], {'status': constants.STATUS_AVAILABLE, 'replica_state': constants.REPLICA_STATE_ACTIVE, 'cast_rules_to_readonly': False}) else: while updated_replica_list: # NOTE(vponomaryov): update 'active' replica last. for updated_replica in updated_replica_list: if (updated_replica['id'] == share_replica['id'] and len(updated_replica_list) > 1): continue updated_replica_list.remove(updated_replica) break updated_export_locs = updated_replica.get( 'export_locations') if updated_export_locs is not None \ and isinstance(updated_export_locs, list): self.db.export_locations_update( context, updated_replica['id'], updated_export_locs) updated_replica_state = updated_replica.get( 'replica_state') updates = {} # Change the promoted replica's status from 'available' to # 'replication_change' and unset cast_rules_to_readonly if updated_replica['id'] == share_replica['id']: updates['cast_rules_to_readonly'] = False updates['status'] = constants.STATUS_AVAILABLE elif updated_replica['id'] == old_active_replica['id']: updates['cast_rules_to_readonly'] = ( ensure_old_active_replica_to_readonly) if updated_replica_state == constants.STATUS_ERROR: updates['status'] = constants.STATUS_ERROR if updated_replica_state is not None: updates['replica_state'] = updated_replica_state if updates: self.db.share_replica_update( context, updated_replica['id'], updates) if updated_replica.get('access_rules_status'): self._update_share_instance_access_rules_state( context, share_replica['id'], updated_replica.get('access_rules_status')) LOG.info("Share replica %s: promoted to active state " "successfully.", share_replica['id']) @periodic_task.periodic_task(spacing=CONF.replica_state_update_interval) @utils.require_driver_initialized def periodic_share_replica_update(self, context): LOG.debug("Updating status of share replica instances.") # we will need: id, host, replica_state, share_id replicas = self.db.share_replicas_get_all(context, with_share_data=False, with_share_server=False) # Filter only non-active replicas belonging to this backend def qualified_replica(r): return (share_utils.extract_host(r['host']) == share_utils.extract_host(self.host) and r['replica_state'] != constants.REPLICA_STATE_ACTIVE) replicas = list(filter(lambda x: qualified_replica(x), replicas)) for replica in replicas: self._share_replica_update( context, replica['id'], share_id=replica['share_id']) @add_hooks @utils.require_driver_initialized def update_share_replica(self, context, share_replica_id, share_id=None): """Initiated by the force_update API.""" self._share_replica_update( context, share_replica_id, share_id=share_id) @locked_share_replica_operation def _share_replica_update(self, context, share_replica_id, share_id=None): # share_id is used by the locked_share_replica_operation decorator # Grab the replica: try: # _get_share_instance_dict will fetch share server share_replica = self.db.share_replica_get( context, share_replica_id, with_share_data=True, with_share_server=False) except exception.ShareReplicaNotFound: # Replica may have been deleted, nothing to do here return # We don't poll for replicas that are busy in some operation, # or if they are the 'active' instance. if (share_replica['status'] in constants.TRANSITIONAL_STATUSES or share_replica['status'] == constants.STATUS_ERROR_DELETING or share_replica['replica_state'] == constants.REPLICA_STATE_ACTIVE): return share_server = self._get_share_server(context, share_replica) access_rules = self.db.share_access_get_all_for_share( context, share_replica['share_id']) LOG.debug("Updating status of share share_replica %s: ", share_replica['id']) # _get_share_instance_dict will fetch share server replica_list = ( self.db.share_replicas_get_all_by_share( context, share_replica['share_id'], with_share_data=True, with_share_server=False) ) _active_replica = next((x for x in replica_list if x['replica_state'] == constants.REPLICA_STATE_ACTIVE), None) if _active_replica is None: if share_replica['replica_state'] != constants.STATUS_ERROR: # only log warning if replica_state was not already in error msg = (("Replica parent share %(id)s has no active " "replica.") % {'id': share_replica['share_id']}) LOG.warning(msg) self.db.share_replica_update(context, share_replica['id'], {'replica_state': constants.STATUS_ERROR}) # without a related active replica, we cannot act on any # non-active replica return # Get snapshots for the share. share_snapshots = self.db.share_snapshot_get_all_for_share( context, share_replica['share_id']) # Get the required data for snapshots that have 'aggregate_status' # set to 'available'. available_share_snapshots = [ self._get_replica_snapshots_for_snapshot( context, x['id'], _active_replica['id'], share_replica['id']) for x in share_snapshots if x['aggregate_status'] == constants.STATUS_AVAILABLE] replica_list = [self._get_share_instance_dict(context, r) for r in replica_list] share_replica = self._get_share_instance_dict(context, share_replica) try: replica_state = self.driver.update_replica_state( context, replica_list, share_replica, access_rules, available_share_snapshots, share_server=share_server) except Exception as excep: msg = ("Driver error when updating replica " "state for replica %s.") LOG.exception(msg, share_replica['id']) self.db.share_replica_update( context, share_replica['id'], {'replica_state': constants.STATUS_ERROR, 'status': constants.STATUS_ERROR}) self.message_api.create( context, message_field.Action.UPDATE, share_replica['project_id'], resource_type=message_field.Resource.SHARE_REPLICA, resource_id=share_replica['id'], exception=excep) return if replica_state in (constants.REPLICA_STATE_IN_SYNC, constants.REPLICA_STATE_OUT_OF_SYNC, constants.STATUS_ERROR): self.db.share_replica_update(context, share_replica['id'], {'replica_state': replica_state}) elif replica_state: msg = (("Replica %(id)s cannot be set to %(state)s " "through update call.") % {'id': share_replica['id'], 'state': replica_state}) LOG.warning(msg) def _validate_share_and_driver_mode(self, share_instance): driver_dhss = self.driver.driver_handles_share_servers share_dhss = share_types.parse_boolean_extra_spec( 'driver_handles_share_servers', share_types.get_share_type_extra_specs( share_instance['share_type_id'], constants.ExtraSpecs.DRIVER_HANDLES_SHARE_SERVERS)) if driver_dhss != share_dhss: msg = _("Driver mode of share %(share)s being managed is " "incompatible with mode DHSS=%(dhss)s configured for" " this backend.") % {'share': share_instance['share_id'], 'dhss': driver_dhss} raise exception.InvalidShare(reason=msg) return driver_dhss @add_hooks @utils.require_driver_initialized def manage_share(self, context, share_id, driver_options): context = context.elevated() share_ref = self.db.share_get(context, share_id) share_instance = self._get_share_instance(context, share_ref) share_type = share_types.get_share_type( context, share_instance['share_type_id']) share_type_extra_specs = self._get_extra_specs_from_share_type( context, share_instance['share_type_id']) share_type_supports_replication = share_type_extra_specs.get( 'replication_type', None) project_id = share_ref['project_id'] try: driver_dhss = self._validate_share_and_driver_mode(share_instance) if driver_dhss is True: share_server = self._get_share_server(context, share_instance) share_update = ( self.driver.manage_existing_with_server( share_instance, driver_options, share_server) or {} ) else: share_update = ( self.driver.manage_existing( share_instance, driver_options) or {} ) if not share_update.get('size'): # NOTE(haixin)if failed to get real size of share, will not # commit quota usages. msg = _("Driver cannot calculate share size.") raise exception.InvalidShare(reason=msg) else: share_types.provision_filter_on_size(context, share_type, share_update.get('size')) try: values = {'per_share_gigabytes': share_update.get('size')} QUOTAS.limit_check(context, project_id=context.project_id, **values) except exception.OverQuota as e: quotas = e.kwargs['quotas'] LOG.warning("Requested share size %(size)d is larger than " "maximum allowed limit %(limit)d.", {'size': share_update.get('size'), 'limit': quotas['per_share_gigabytes']}) deltas = { 'project_id': project_id, 'user_id': context.user_id, 'shares': 1, 'gigabytes': share_update['size'], 'share_type_id': share_instance['share_type_id'], } if share_type_supports_replication: deltas.update({'share_replicas': 1, 'replica_gigabytes': share_update['size']}) # NOTE(carloss): Allowing OverQuota to do not compromise this # operation. If this hit OverQuota error while managing a share, # the admin would need to reset the state of the share and # delete or force delete the share (bug 1863298). Allowing # OverQuota makes this operation work properly and the admin will # need to adjust quotas afterwards. reservations = QUOTAS.reserve(context, overquota_allowed=True, **deltas) QUOTAS.commit( context, reservations, project_id=project_id, share_type_id=share_instance['share_type_id'], ) share_update.update({ 'status': constants.STATUS_AVAILABLE, 'launched_at': timeutils.utcnow(), 'availability_zone': self.availability_zone, }) # If the share was managed with `replication_type` extra-spec, the # instance becomes an `active` replica. if share_ref.get('replication_type'): share_update['replica_state'] = constants.REPLICA_STATE_ACTIVE # NOTE(vponomaryov): we should keep only those export locations # that driver has calculated to avoid incompatibilities with one # provided by user. if 'export_locations' in share_update: self.db.export_locations_update( context, share_instance['id'], share_update.pop('export_locations'), delete=True) self.db.share_update(context, share_id, share_update) except Exception: # NOTE(haixin) we should set size 0 because we don't know the real # size of the size, and we will skip quota cuts when # delete/unmanage share. self.db.share_update( context, share_id, {'status': constants.STATUS_MANAGE_ERROR, 'size': 0}) raise @add_hooks @utils.require_driver_initialized def manage_snapshot(self, context, snapshot_id, driver_options): context = context.elevated() snapshot_ref = self.db.share_snapshot_get(context, snapshot_id) snapshot_instance = self.db.share_snapshot_instance_get( context, snapshot_ref.instance['id'], with_share_data=True ) project_id = snapshot_ref['project_id'] driver_dhss = self.driver.driver_handles_share_servers try: if driver_dhss is True: share_server = self._get_share_server(context, snapshot_ref['share']) snapshot_update = ( self.driver.manage_existing_snapshot_with_server( snapshot_instance, driver_options, share_server) or {} ) else: snapshot_update = ( self.driver.manage_existing_snapshot( snapshot_instance, driver_options) or {} ) if not snapshot_update.get('size'): snapshot_update['size'] = snapshot_ref['share']['size'] LOG.warning("Cannot get the size of the snapshot " "%(snapshot_id)s. Using the size of " "the share instead.", {'snapshot_id': snapshot_id}) self._update_quota_usages(context, project_id, { "snapshots": 1, "snapshot_gigabytes": snapshot_update['size'], }) snapshot_export_locations = snapshot_update.pop( 'export_locations', []) if snapshot_instance['share']['mount_snapshot_support']: for el in snapshot_export_locations: values = { 'share_snapshot_instance_id': snapshot_instance['id'], 'path': el['path'], 'is_admin_only': el['is_admin_only'], } self.db.share_snapshot_instance_export_location_create( context, values) snapshot_update.update({ 'status': constants.STATUS_AVAILABLE, 'progress': '100%', }) snapshot_update.pop('id', None) self.db.share_snapshot_update(context, snapshot_id, snapshot_update) except Exception: # NOTE(vponomaryov): set size as 1 because design expects size # to be set, it also will allow us to handle delete/unmanage # operations properly with this errored snapshot according to # quotas. self.db.share_snapshot_update( context, snapshot_id, {'status': constants.STATUS_MANAGE_ERROR, 'size': 1}) raise def _update_quota_usages(self, context, project_id, usages): user_id = context.user_id for resource, usage in usages.items(): try: current_usage = self.db.quota_usage_get( context, project_id, resource, user_id) self.db.quota_usage_update( context, project_id, user_id, resource, in_use=current_usage['in_use'] + usage) except exception.QuotaUsageNotFound: self.db.quota_usage_create(context, project_id, user_id, resource, usage) @add_hooks @utils.require_driver_initialized def unmanage_share(self, context, share_id): context = context.elevated() share_ref = self.db.share_get(context, share_id) share_instance = self._get_share_instance(context, share_ref) share_server = None project_id = share_ref['project_id'] replicas = self.db.share_replicas_get_all_by_share( context, share_id) supports_replication = len(replicas) > 0 def share_manage_set_error_status(msg, exception): status = {'status': constants.STATUS_UNMANAGE_ERROR} self.db.share_update(context, share_id, status) LOG.error(msg, exception) dhss = self.driver.driver_handles_share_servers try: if dhss is True: share_server = self._get_share_server(context, share_instance) self.driver.unmanage_with_server(share_instance, share_server) else: self.driver.unmanage(share_instance) except exception.InvalidShare as e: share_manage_set_error_status( ("Share can not be unmanaged: %s."), e) return # NOTE(haixin) we will skip quota cuts when unmanag share with # 'error_manage' status, because we have not commit quota usages when # we failed to manage the share. if share_ref['status'] != constants.STATUS_MANAGE_ERROR_UNMANAGING: deltas = { 'project_id': project_id, 'shares': -1, 'gigabytes': -share_ref['size'], 'share_type_id': share_instance['share_type_id'], } # NOTE(carloss): while unmanaging a share, a share will not # contain replicas other than the active one. So there is no need # to recalculate the amount of share replicas to be deallocated. if supports_replication: deltas.update({'share_replicas': -1, 'replica_gigabytes': -share_ref['size']}) try: reservations = QUOTAS.reserve(context, **deltas) QUOTAS.commit( context, reservations, project_id=project_id, share_type_id=share_instance['share_type_id'], ) except Exception as e: # Note(imalinovskiy): # Quota reservation errors here are not fatal, because # unmanage is administrator API and he/she could update user # quota usages later if it's required. LOG.warning("Failed to update quota usages: %s.", e) if self.configuration.safe_get('unmanage_remove_access_rules'): try: self.access_helper.update_access_rules( context, share_instance['id'], delete_all_rules=True, share_server=share_server ) except Exception as e: share_manage_set_error_status( ("Can not remove access rules of share: %s."), e) return self.db.share_instance_delete(context, share_instance['id']) # NOTE(ganso): Since we are unmanaging a share that is still within a # share server, we need to prevent the share server from being # auto-deleted. if share_server and share_server['is_auto_deletable']: self.db.share_server_update(context, share_server['id'], {'is_auto_deletable': False}) msg = ("Since share %(share)s has been un-managed from share " "server %(server)s. This share server must be removed " "manually, either by un-managing or by deleting it. The " "share network subnets %(subnets)s and share network " "%(network)s cannot be deleted unless this share server " "has been removed.") msg_args = { 'share': share_id, 'server': share_server['id'], 'subnets': share_server['share_network_subnet_ids'], 'network': share_instance['share_network_id'] } LOG.warning(msg, msg_args) LOG.info("Share %s: unmanaged successfully.", share_id) @add_hooks @utils.require_driver_initialized def unmanage_snapshot(self, context, snapshot_id): status = {'status': constants.STATUS_UNMANAGE_ERROR} context = context.elevated() snapshot_ref = self.db.share_snapshot_get(context, snapshot_id) share_server = self._get_share_server(context, snapshot_ref['share']) snapshot_instance = self.db.share_snapshot_instance_get( context, snapshot_ref.instance['id'], with_share_data=True ) project_id = snapshot_ref['project_id'] if self.configuration.safe_get('unmanage_remove_access_rules'): try: self.snapshot_access_helper.update_access_rules( context, snapshot_instance['id'], delete_all_rules=True, share_server=share_server) except Exception: LOG.exception( ("Cannot remove access rules of snapshot %s."), snapshot_id) self.db.share_snapshot_update(context, snapshot_id, status) return dhss = self.driver.driver_handles_share_servers try: if dhss: self.driver.unmanage_snapshot_with_server( snapshot_instance, share_server) else: self.driver.unmanage_snapshot(snapshot_instance) except exception.UnmanageInvalidShareSnapshot as e: self.db.share_snapshot_update(context, snapshot_id, status) LOG.error("Share snapshot cannot be unmanaged: %s.", e) return try: share_type_id = snapshot_ref['share']['instance']['share_type_id'] reservations = QUOTAS.reserve( context, project_id=project_id, snapshots=-1, snapshot_gigabytes=-snapshot_ref['size'], share_type_id=share_type_id, ) QUOTAS.commit( context, reservations, project_id=project_id, share_type_id=share_type_id, ) except Exception as e: # Note(imalinovskiy): # Quota reservation errors here are not fatal, because # unmanage is administrator API and he/she could update user # quota usages later if it's required. LOG.warning("Failed to update quota usages: %s.", e) self.db.share_snapshot_instance_delete( context, snapshot_instance['id']) @add_hooks @utils.require_driver_initialized def manage_share_server(self, context, share_server_id, identifier, driver_opts): if self.driver.driver_handles_share_servers is False: msg = _("Cannot manage share server %s in a " "backend configured with driver_handles_share_servers" " set to False.") % share_server_id raise exception.ManageShareServerError(reason=msg) server = self.db.share_server_get(context, share_server_id) try: # NOTE(felipe_rodrigues): Manila does not support manage share # server with multiple allocations, so it can get the first # subnet_id element. share_network_subnet = self.db.share_network_subnet_get( context, server['share_network_subnet_ids'][0]) share_network = self.db.share_network_get( context, share_network_subnet['share_network_id']) number_allocations = ( self.driver.get_network_allocations_number()) if self.driver.admin_network_api: number_allocations += ( self.driver.get_admin_network_allocations_number()) if number_allocations > 0: # allocations obtained from the driver that still need to # be validated remaining_allocations = ( self.driver.get_share_server_network_info( context, server, identifier, driver_opts)) if len(remaining_allocations) > 0: if self.driver.admin_network_api: remaining_allocations = ( self.driver.admin_network_api. manage_network_allocations( context, remaining_allocations, server)) # allocations that are managed are removed from # remaining_allocations remaining_allocations = ( self.driver.network_api. manage_network_allocations( context, remaining_allocations, server, share_network, share_network_subnet)) # We require that all allocations are managed, else we # may have problems deleting this share server if len(remaining_allocations) > 0: msg = ("Failed to manage all allocations. " "Allocations %s were not " "managed." % remaining_allocations) raise exception.ManageShareServerError(reason=msg) else: # if there should be allocations, but the driver # doesn't return any something is wrong msg = ("Driver did not return required network " "allocations to be managed. Required number " "of allocations is %s." % number_allocations) raise exception.ManageShareServerError(reason=msg) new_identifier, backend_details = self.driver.manage_server( context, server, identifier, driver_opts) if not new_identifier: new_identifier = server['id'] if backend_details is None or not isinstance( backend_details, dict): backend_details = {} for security_service in share_network['security_services']: ss_type = security_service['type'] data = { 'name': security_service['name'], 'ou': security_service['ou'], 'default_ad_site': security_service['default_ad_site'], 'domain': security_service['domain'], 'server': security_service['server'], 'dns_ip': security_service['dns_ip'], 'user': security_service['user'], 'type': ss_type, 'password': security_service['password'], } backend_details.update({ 'security_service_' + ss_type: jsonutils.dumps(data) }) if backend_details: self.db.share_server_backend_details_set( context, server['id'], backend_details) self.db.share_server_update( context, share_server_id, {'status': constants.STATUS_ACTIVE, 'identifier': new_identifier, 'network_allocation_update_support': ( self.driver.network_allocation_update_support), 'share_replicas_migration_support': ( self.driver.share_replicas_migration_support)}) except Exception: msg = "Error managing share server %s" LOG.exception(msg, share_server_id) self.db.share_server_update( context, share_server_id, {'status': constants.STATUS_MANAGE_ERROR}) raise LOG.info("Share server %s managed successfully.", share_server_id) @add_hooks @utils.require_driver_initialized def unmanage_share_server(self, context, share_server_id, force=False): server = self.db.share_server_get( context, share_server_id) server_details = server['backend_details'] security_services = [] for ss_name in constants.SECURITY_SERVICES_ALLOWED_TYPES: ss = server_details.get('security_service_' + ss_name) if ss: security_services.append(jsonutils.loads(ss)) try: self.driver.unmanage_server(server_details, security_services) except NotImplementedError: if not force: LOG.error("Did not unmanage share server %s since the driver " "does not support managing share servers and no " "``force`` option was supplied.", share_server_id) self.db.share_server_update( context, share_server_id, {'status': constants.STATUS_UNMANAGE_ERROR}) return try: if self.driver.get_network_allocations_number() > 0: # NOTE(ganso): This will already remove admin allocations. self.driver.network_api.unmanage_network_allocations( context, share_server_id) elif (self.driver.get_admin_network_allocations_number() > 0 and self.driver.admin_network_api): # NOTE(ganso): This is here in case there are only admin # allocations. self.driver.admin_network_api.unmanage_network_allocations( context, share_server_id) self.db.share_server_delete(context, share_server_id) except Exception: msg = "Error unmanaging share server %s" LOG.exception(msg, share_server_id) self.db.share_server_update( context, share_server_id, {'status': constants.STATUS_UNMANAGE_ERROR}) raise LOG.info("Share server %s unmanaged successfully.", share_server_id) @add_hooks @utils.require_driver_initialized def revert_to_snapshot(self, context, snapshot_id, reservations): context = context.elevated() snapshot = self.db.share_snapshot_get(context, snapshot_id) share = snapshot['share'] share_id = share['id'] share_instance_id = snapshot.instance.share_instance_id share_access_rules = ( self.access_helper.get_share_instance_access_rules( context, filters={'state': constants.STATUS_ACTIVE}, share_instance_id=share_instance_id)) snapshot_access_rules = ( self.snapshot_access_helper.get_snapshot_instance_access_rules( context, snapshot.instance['id'])) if share.get('has_replicas'): self._revert_to_replicated_snapshot( context, share, snapshot, reservations, share_access_rules, snapshot_access_rules, share_id=share_id) else: self._revert_to_snapshot(context, share, snapshot, reservations, share_access_rules, snapshot_access_rules) def _revert_to_snapshot(self, context, share, snapshot, reservations, share_access_rules, snapshot_access_rules): share_server = self._get_share_server(context, share) share_id = share['id'] snapshot_id = snapshot['id'] project_id = share['project_id'] user_id = share['user_id'] snapshot_instance = self.db.share_snapshot_instance_get( context, snapshot.instance['id'], with_share_data=True) share_type_id = snapshot_instance["share_instance"]["share_type_id"] # Make primitive to pass the information to the driver snapshot_instance_dict = self._get_snapshot_instance_dict( context, snapshot_instance, snapshot=snapshot) try: updated_share_size = self.driver.revert_to_snapshot( context, snapshot_instance_dict, share_access_rules, snapshot_access_rules, share_server=share_server) except Exception as excep: with excutils.save_and_reraise_exception(): msg = ('Share %(share)s could not be reverted ' 'to snapshot %(snap)s.') msg_args = {'share': share_id, 'snap': snapshot_id} LOG.exception(msg, msg_args) if reservations: QUOTAS.rollback( context, reservations, project_id=project_id, user_id=user_id, share_type_id=share_type_id, ) self.db.share_update( context, share_id, {'status': constants.STATUS_REVERTING_ERROR}) self.db.share_snapshot_update( context, snapshot_id, {'status': constants.STATUS_AVAILABLE}) self.message_api.create( context, message_field.Action.REVERT_TO_SNAPSHOT, share['project_id'], resource_type=message_field.Resource.SHARE, resource_id=share_id, exception=excep) # fail-safe in case driver returned size is None or invalid if not updated_share_size: updated_share_size = snapshot['size'] else: try: int(updated_share_size) except ValueError: updated_share_size = snapshot['size'] if reservations: if updated_share_size == snapshot['size']: QUOTAS.commit( context, reservations, project_id=project_id, user_id=user_id, share_type_id=share_type_id, ) else: # NOTE(kpdev): The driver tells us that the share size wasn't # modified to the snapshot's size; so no need to commit quota # changes QUOTAS.rollback( context, reservations, project_id=project_id, user_id=user_id, share_type_id=share_type_id, ) if updated_share_size != share['size']: LOG.error("Driver returned an unexpected size %d on " "revert to snapshot operation. You need to " "adjust the quota", updated_share_size) self.db.share_update( context, share_id, {'status': constants.STATUS_AVAILABLE, 'size': updated_share_size}) self.db.share_snapshot_update( context, snapshot_id, {'status': constants.STATUS_AVAILABLE}) msg = ('Share %(share)s reverted to snapshot %(snap)s ' 'successfully.') msg_args = {'share': share_id, 'snap': snapshot_id} LOG.info(msg, msg_args) def _get_share_details_from_instance(self, context, share_instance_id): share_instance = self._get_share_instance(context, share_instance_id) share = self.db.share_get(context, share_instance.get('share_id')) share_server = self._get_share_server(context, share_instance) return (share, share_instance, share_server) @add_hooks @utils.require_driver_initialized def delete_share_instance(self, context, share_instance_id, force=False, deferred_delete=False): """Delete a share instance.""" context = context.elevated() share, share_instance, share_server = ( self._get_share_details_from_instance(context, share_instance_id)) self._notify_about_share_usage(context, share, share_instance, "delete.start") error_state = None if deferred_delete: try: self.db.update_share_instance_quota_usages( context, share_instance_id) LOG.info( "Share instance %s had its deletion deferred. Quota was " "reclaimed and the share driver will proceed with the " "deletion.", share_instance_id ) except Exception: LOG.warning( "Error occurred during quota usage update. Administrator " "must rectify quotas.") snap_instances = ( self.db.share_snapshot_instance_get_all_with_filters( context, {'share_instance_ids': share_instance_id})) if snap_instances: # The share has some snapshot instances whose deletion # was deferred. We relegate deletion of the share to # a periodic task so it can be processed after # all its snapshots are deleted. So we're deliberately # setting the share instance's status to # "error_deferred_deleting" self.db.share_instance_update( context, share_instance_id, {'status': constants.STATUS_ERROR_DEFERRED_DELETING}) return try: self.access_helper.update_access_rules( context, share_instance_id, delete_all_rules=True, share_server=share_server ) except exception.ShareResourceNotFound: LOG.warning("Share instance %s does not exist in the " "backend.", share_instance_id) except Exception as excep: with excutils.save_and_reraise_exception() as exc_context: if force: msg = ("The driver was unable to delete access rules " "for the instance: %s. Will attempt to delete " "the instance anyway.") LOG.error(msg, share_instance_id) exc_context.reraise = False else: error_state = constants.STATUS_ERROR_DELETING if deferred_delete: error_state = constants.STATUS_ERROR_DEFERRED_DELETING exc_context.reraise = False self.db.share_instance_update( context, share_instance_id, {'status': error_state}) self.message_api.create( context, message_field.Action.DELETE_ACCESS_RULES, share_instance['project_id'], resource_type=message_field.Resource.SHARE, resource_id=share_instance_id, exception=excep) if error_state == constants.STATUS_ERROR_DEFERRED_DELETING and ( not force): return try: self.driver.delete_share(context, share_instance, share_server=share_server) except exception.ShareResourceNotFound: LOG.warning("Share instance %s does not exist in the " "backend.", share_instance_id) except Exception as excep: with excutils.save_and_reraise_exception() as exc_context: if force: msg = ("The driver was unable to delete the share " "instance: %s on the backend. Since this " "operation is forced, the instance will be " "deleted from Manila's database. A cleanup on " "the backend may be necessary.") LOG.error(msg, share_instance_id) exc_context.reraise = False else: error_state = constants.STATUS_ERROR_DELETING if deferred_delete: error_state = constants.STATUS_ERROR_DEFERRED_DELETING exc_context.reraise = False self.db.share_instance_update( context, share_instance_id, {'status': error_state}) self.message_api.create( context, message_field.Action.DELETE, share_instance['project_id'], resource_type=message_field.Resource.SHARE, resource_id=share_instance_id, exception=excep) if error_state == constants.STATUS_ERROR_DEFERRED_DELETING and ( not force): return need_to_update_usages = True if share_instance['status'] in ( constants.STATUS_DEFERRED_DELETING, constants.STATUS_ERROR_DEFERRED_DELETING ): need_to_update_usages = False self.db.share_instance_delete( context, share_instance_id, need_to_update_usages=need_to_update_usages) LOG.info("Share instance %s: deleted successfully.", share_instance_id) self._check_delete_share_server(context, share_instance=share_instance) self._notify_about_share_usage(context, share, share_instance, "delete.end") def _check_delete_share_server(self, context, share_instance=None, share_server=None, remote_host=False): if CONF.delete_share_server_with_last_share: if share_instance and not share_server: share_server = self._get_share_server(context, share_instance) if (share_server and len(share_server.share_instances) == 0 and share_server.is_auto_deletable is True): LOG.debug("Scheduled deletion of share-server " "with id '%s' automatically by " "deletion of last share.", share_server['id']) if remote_host: rpcapi = share_rpcapi.ShareAPI() rpcapi.delete_share_server(context, share_server) else: self.delete_share_server(context, share_server) @periodic_task.periodic_task( spacing=CONF.periodic_deferred_delete_interval) @utils.require_driver_initialized def do_deferred_share_deletion(self, ctxt): LOG.debug("Checking for shares in 'deferred_deleting' status to " "process their deletion.") ctxt = ctxt.elevated() share_instances = self.db.share_instance_get_all( ctxt, filters={ 'status': constants.STATUS_ERROR_DEFERRED_DELETING, 'host': self.host, }, ) for share_instance in share_instances: share_instance_id = share_instance['id'] share, share_instance, share_server = ( self._get_share_details_from_instance( ctxt, share_instance_id ) ) snap_instances = ( self.db.share_snapshot_instance_get_all_with_filters( ctxt, {'share_instance_ids': share_instance_id})) if snap_instances: LOG.warning("Snapshot instances are present for the " "share instance: %s.", share_instance_id) continue try: self.access_helper.update_access_rules( ctxt, share_instance_id, delete_all_rules=True, share_server=share_server ) except Exception: msg = ("The driver was unable to delete access rules " "for the instance: %s.") LOG.error(msg, share_instance_id) continue try: self.driver.delete_share(ctxt, share_instance, share_server=share_server) except exception.ShareResourceNotFound: LOG.warning("Share instance %s does not exist in the " "backend.", share_instance_id) except Exception: msg = ("The driver was unable to delete the share " "instance: %s on the backend. ") LOG.error(msg, share_instance_id) continue self.db.share_instance_delete(ctxt, share_instance_id) LOG.info("Share instance %s: deferred deleted successfully.", share_instance_id) self._check_delete_share_server(ctxt, share_instance=share_instance) self._notify_about_share_usage(ctxt, share, share_instance, "delete.end") @periodic_task.periodic_task(spacing=600) @utils.require_driver_initialized def delete_free_share_servers(self, ctxt): if not (self.driver.driver_handles_share_servers and self.configuration.automatic_share_server_cleanup): return LOG.info("Check for unused share servers to delete.") updated_before = timeutils.utcnow() - datetime.timedelta( minutes=self.configuration.unused_share_server_cleanup_interval) servers = self.db.share_server_get_all_unused_deletable(ctxt, self.host, updated_before) for server in servers: try: self.delete_share_server(ctxt, server) except exception.ShareServerNotFound: continue except Exception: LOG.exception( "Unable to delete share server %s, will retry in the next " "run.", server['id']) @periodic_task.periodic_task( spacing=CONF.check_for_expired_shares_in_recycle_bin_interval) @utils.require_driver_initialized def delete_expired_share(self, ctxt): LOG.debug("Check for expired share in recycle bin to delete.") expired_shares = self.db.share_get_all_expired(ctxt) for share in expired_shares: if share['status'] == constants.STATUS_ERROR_DELETING: LOG.info("Share %s was soft-deleted but a prior deletion " "attempt failed. Resetting status and re-attempting " "deletion", share['id']) # reset share status to error in order to try deleting again update_data = {'status': constants.STATUS_ERROR} self.db.share_update(ctxt, share['id'], update_data) else: LOG.info("share %s has expired, will be deleted", share['id']) self.share_api.delete(ctxt, share) @periodic_task.periodic_task( spacing=CONF.check_for_expired_transfers) def delete_expired_transfers(self, ctxt): LOG.info("Checking for expired transfers.") expired_transfers = self.db.transfer_get_all_expired(ctxt) for transfer in expired_transfers: LOG.debug("Transfer %s has expired, will be destroyed.", transfer['id']) self.transfer_api.delete(ctxt, transfer_id=transfer['id']) @utils.require_driver_initialized def transfer_accept(self, context, share_id, new_user, new_project, clear_rules): # need elevated context as we haven't "given" the share yet elevated_context = context.elevated() share_ref = self.db.share_get(elevated_context, share_id) access_rules = self.db.share_access_get_all_for_share( elevated_context, share_id) share_instances = self.db.share_instance_get_all_by_share( elevated_context, share_id) share_server = self._get_share_server(context, share_ref) for share_instance in share_instances: share_instance = self.db.share_instance_get(context, share_instance['id'], with_share_data=True) if clear_rules and access_rules: try: self.access_helper.update_access_rules( context, share_instance['id'], delete_all_rules=True ) access_rules = [] except Exception: with excutils.save_and_reraise_exception(): msg = ( "Can not remove access rules for share " "instance %(si)s belonging to share %(shr)s.") msg_payload = { 'si': share_instance['id'], 'shr': share_id, } LOG.error(msg, msg_payload) try: self.driver.transfer_accept(context, share_instance, new_user, new_project, access_rules=access_rules, share_server=share_server) except exception.DriverCannotTransferShareWithRules as e: with excutils.save_and_reraise_exception(): self.message_api.create( context, message_field.Action.TRANSFER_ACCEPT, new_project, resource_type=message_field.Resource.SHARE, resource_id=share_id, detail=(message_field.Detail. DRIVER_FAILED_TRANSFER_ACCEPT)) msg = _("The backend failed to accept the share: %s.") LOG.error(msg, e) msg = ('Share %(share_id)s has transfer from %(old_project_id)s to ' '%(new_project_id)s completed successfully.') msg_args = { "share_id": share_id, "old_project_id": share_ref['project_id'], "new_project_id": context.project_id } LOG.info(msg, msg_args) @add_hooks @utils.require_driver_initialized def create_snapshot(self, context, share_id, snapshot_id): """Create snapshot for share.""" snapshot_ref = self.db.share_snapshot_get(context, snapshot_id) share_server = self._get_share_server( context, snapshot_ref['share']['instance']) snapshot_instance = self.db.share_snapshot_instance_get( context, snapshot_ref.instance['id'], with_share_data=True ) snapshot_instance_id = snapshot_instance['id'] snapshot_instance = self._get_snapshot_instance_dict( context, snapshot_instance) try: model_update = self.driver.create_snapshot( context, snapshot_instance, share_server=share_server) or {} except Exception as excep: with excutils.save_and_reraise_exception(): self.db.share_snapshot_instance_update( context, snapshot_instance_id, {'status': constants.STATUS_ERROR}) self.message_api.create( context, message_field.Action.CREATE, snapshot_ref['project_id'], resource_type=message_field.Resource.SHARE_SNAPSHOT, resource_id=snapshot_instance_id, exception=excep) snapshot_export_locations = model_update.pop('export_locations', []) if snapshot_instance['share']['mount_snapshot_support']: for el in snapshot_export_locations: values = { 'share_snapshot_instance_id': snapshot_instance_id, 'path': el['path'], 'is_admin_only': el['is_admin_only'], } self.db.share_snapshot_instance_export_location_create(context, values) if model_update.get('status') in (None, constants.STATUS_AVAILABLE): model_update['status'] = constants.STATUS_AVAILABLE model_update['progress'] = '100%' self.db.share_snapshot_instance_update( context, snapshot_instance_id, model_update) def _delete_snapshot_quota(self, context, snapshot): share_type_id = snapshot['share']['instance']['share_type_id'] reservations = None try: reservations = QUOTAS.reserve( context, project_id=snapshot['project_id'], snapshots=-1, snapshot_gigabytes=-snapshot['size'], user_id=snapshot['user_id'], share_type_id=share_type_id, ) except Exception: LOG.exception("Failed to update quota usages while deleting " "snapshot %s.", snapshot['id']) if reservations: QUOTAS.commit( context, reservations, project_id=snapshot['project_id'], user_id=snapshot['user_id'], share_type_id=share_type_id, ) @add_hooks @utils.require_driver_initialized def delete_snapshot(self, context, snapshot_id, force=False, deferred_delete=False): """Delete share snapshot.""" context = context.elevated() snapshot_ref = self.db.share_snapshot_get(context, snapshot_id) share_server = self._get_share_server( context, snapshot_ref['share']['instance']) snapshot_instance = self.db.share_snapshot_instance_get( context, snapshot_ref.instance['id'], with_share_data=True) snapshot_instance_id = snapshot_instance['id'] snapshot_instance = self._get_snapshot_instance_dict( context, snapshot_instance) share_ref = self.db.share_get(context, snapshot_ref['share_id']) if deferred_delete: try: self._delete_snapshot_quota(context, snapshot_ref) LOG.info( "Snapshot instance %s had its deletion deferred. Quota " "was reclaimed and the share driver will proceed with " "the deletion.", snapshot_instance['id'] ) except Exception: LOG.warning( "Error occured during quota usage update. Administrator " "must rectify quotas.") if share_ref['mount_snapshot_support']: try: self.snapshot_access_helper.update_access_rules( context, snapshot_instance['id'], delete_all_rules=True, share_server=share_server) except Exception: LOG.exception( ("Failed to remove access rules for snapshot %s."), snapshot_instance['id']) LOG.warning("The driver was unable to remove access rules " "for snapshot %s. Moving on.", snapshot_instance['snapshot_id']) try: self.driver.delete_snapshot(context, snapshot_instance, share_server=share_server) except Exception as excep: with excutils.save_and_reraise_exception() as exc: if force: msg = _("The driver was unable to delete the " "snapshot %s on the backend. Since this " "operation is forced, the snapshot will " "be deleted from Manila's database. A cleanup on " "the backend may be necessary.") LOG.exception(msg, snapshot_id) exc.reraise = False else: error_state = constants.STATUS_ERROR_DELETING if deferred_delete: error_state = constants.STATUS_ERROR_DEFERRED_DELETING exc.reraise = False self.db.share_snapshot_instance_update( context, snapshot_instance_id, {'status': error_state}) self.message_api.create( context, message_field.Action.DELETE, snapshot_ref['project_id'], resource_type=message_field.Resource.SHARE_SNAPSHOT, resource_id=snapshot_instance_id, exception=excep) snapshot_instance = self.db.share_snapshot_instance_get( context, snapshot_ref.instance['id']) if snapshot_instance['status'] == ( constants.STATUS_ERROR_DEFERRED_DELETING) and not force: return self.db.share_snapshot_instance_delete(context, snapshot_instance_id) if snapshot_instance['status'] not in ( constants.STATUS_DEFERRED_DELETING, constants.STATUS_ERROR_DEFERRED_DELETING ): self._delete_snapshot_quota(context, snapshot_ref) @periodic_task.periodic_task( spacing=CONF.periodic_deferred_delete_interval) @utils.require_driver_initialized def do_deferred_snapshot_deletion(self, ctxt): LOG.debug("Checking for snapshots in 'deferred_deleting' status to " "process their deletion.") ctxt = ctxt.elevated() snapshot_instances = ( self.db.share_snapshot_instance_get_all_with_filters( ctxt, {'statuses': constants.STATUS_ERROR_DEFERRED_DELETING})) for snapshot_instance in snapshot_instances: snapshot_instance_id = snapshot_instance['id'] share_server = self._get_share_server( ctxt, snapshot_instance['share_instance']) snapshot_instance = self._get_snapshot_instance_dict( ctxt, snapshot_instance) try: self.driver.delete_snapshot(ctxt, snapshot_instance, share_server=share_server) except Exception: continue self.db.share_snapshot_instance_delete(ctxt, snapshot_instance_id) @add_hooks @utils.require_driver_initialized @locked_share_replica_operation def create_replicated_snapshot(self, context, snapshot_id, share_id=None): """Create a snapshot for a replicated share.""" # Grab the snapshot and replica information from the DB. snapshot = self.db.share_snapshot_get(context, snapshot_id) share_server = self._get_share_server(context, snapshot['share']) replica_snapshots = ( self.db.share_snapshot_instance_get_all_with_filters( context, {'snapshot_ids': snapshot['id']}, with_share_data=True) ) replica_list = ( self.db.share_replicas_get_all_by_share( context, share_id, with_share_data=True, with_share_server=True) ) # Make primitives to pass the information to the driver. replica_list = [self._get_share_instance_dict(context, r) for r in replica_list] replica_snapshots = [self._get_snapshot_instance_dict(context, s) for s in replica_snapshots] updated_instances = [] try: updated_instances = self.driver.create_replicated_snapshot( context, replica_list, replica_snapshots, share_server=share_server) or [] except Exception: with excutils.save_and_reraise_exception(): for instance in replica_snapshots: self.db.share_snapshot_instance_update( context, instance['id'], {'status': constants.STATUS_ERROR}) for instance in updated_instances: if instance['status'] == constants.STATUS_AVAILABLE: instance.update({'progress': '100%'}) self.db.share_snapshot_instance_update( context, instance['id'], instance) def _find_active_replica_on_host(self, replica_list): """Find the active replica matching this manager's host.""" for replica in replica_list: if (replica['replica_state'] == constants.REPLICA_STATE_ACTIVE and share_utils.extract_host(replica['host']) == self.host): return replica @locked_share_replica_operation def _revert_to_replicated_snapshot(self, context, share, snapshot, reservations, share_access_rules, snapshot_access_rules, share_id=None): share_server = self._get_share_server(context, share) snapshot_id = snapshot['id'] project_id = share['project_id'] user_id = share['user_id'] # Get replicas, including an active replica replica_list = self.db.share_replicas_get_all_by_share( context, share_id, with_share_data=True, with_share_server=True) active_replica = self._find_active_replica_on_host(replica_list) # Get snapshot instances, including one on an active replica replica_snapshots = ( self.db.share_snapshot_instance_get_all_with_filters( context, {'snapshot_ids': snapshot_id}, with_share_data=True)) snapshot_instance_filters = { 'share_instance_ids': active_replica['id'], 'snapshot_ids': snapshot_id, } active_replica_snapshot = ( self.db.share_snapshot_instance_get_all_with_filters( context, snapshot_instance_filters))[0] # Make primitives to pass the information to the driver replica_list = [self._get_share_instance_dict(context, replica) for replica in replica_list] active_replica = self._get_share_instance_dict(context, active_replica) replica_snapshots = [self._get_snapshot_instance_dict(context, s) for s in replica_snapshots] active_replica_snapshot = self._get_snapshot_instance_dict( context, active_replica_snapshot, snapshot=snapshot) try: self.driver.revert_to_replicated_snapshot( context, active_replica, replica_list, active_replica_snapshot, replica_snapshots, share_access_rules, snapshot_access_rules, share_server=share_server) except Exception: with excutils.save_and_reraise_exception(): msg = ('Share %(share)s could not be reverted ' 'to snapshot %(snap)s.') msg_args = {'share': share_id, 'snap': snapshot_id} LOG.exception(msg, msg_args) if reservations: QUOTAS.rollback( context, reservations, project_id=project_id, user_id=user_id, share_type_id=active_replica['share_type_id'], ) self.db.share_replica_update( context, active_replica['id'], {'status': constants.STATUS_REVERTING_ERROR}) self.db.share_snapshot_instance_update( context, active_replica_snapshot['id'], {'status': constants.STATUS_AVAILABLE}) if reservations: QUOTAS.commit( context, reservations, project_id=project_id, user_id=user_id, share_type_id=active_replica['share_type_id'], ) self.db.share_update(context, share_id, {'size': snapshot['size']}) self.db.share_replica_update( context, active_replica['id'], {'status': constants.STATUS_AVAILABLE}) self.db.share_snapshot_instance_update( context, active_replica_snapshot['id'], {'status': constants.STATUS_AVAILABLE}) msg = ('Share %(share)s reverted to snapshot %(snap)s ' 'successfully.') msg_args = {'share': share_id, 'snap': snapshot_id} LOG.info(msg, msg_args) @add_hooks @utils.require_driver_initialized @locked_share_replica_operation def delete_replicated_snapshot(self, context, snapshot_id, share_id=None, force=False): """Delete a snapshot from a replicated share.""" # Grab the replica and snapshot information from the DB. snapshot = self.db.share_snapshot_get(context, snapshot_id) share_server = self._get_share_server(context, snapshot['share']) replica_snapshots = ( self.db.share_snapshot_instance_get_all_with_filters( context, {'snapshot_ids': snapshot['id']}, with_share_data=True) ) replica_list = ( self.db.share_replicas_get_all_by_share( context, share_id, with_share_data=True, with_share_server=True) ) replica_list = [self._get_share_instance_dict(context, r) for r in replica_list] replica_snapshots = [self._get_snapshot_instance_dict(context, s) for s in replica_snapshots] deleted_instances = [] updated_instances = [] db_force_delete_msg = _('The driver was unable to delete some or all ' 'of the share replica snapshots on the ' 'backend/s. Since this operation is forced, ' 'the replica snapshots will be deleted from ' 'Manila.') try: updated_instances = self.driver.delete_replicated_snapshot( context, replica_list, replica_snapshots, share_server=share_server) or [] except Exception: with excutils.save_and_reraise_exception() as e: if force: # Can delete all instances if forced. deleted_instances = replica_snapshots LOG.exception(db_force_delete_msg) e.reraise = False else: for instance in replica_snapshots: self.db.share_snapshot_instance_update( context, instance['id'], {'status': constants.STATUS_ERROR_DELETING}) if not deleted_instances: if force: # Ignore model updates on 'force' delete. LOG.warning(db_force_delete_msg) deleted_instances = replica_snapshots else: deleted_instances = list(filter( lambda x: x['status'] == constants.STATUS_DELETED, updated_instances)) updated_instances = list(filter( lambda x: x['status'] != constants.STATUS_DELETED, updated_instances)) for instance in deleted_instances: self.db.share_snapshot_instance_delete(context, instance['id']) for instance in updated_instances: self.db.share_snapshot_instance_update( context, instance['id'], instance) @periodic_task.periodic_task(spacing=CONF.replica_state_update_interval) @utils.require_driver_initialized def periodic_share_replica_snapshot_update(self, context): LOG.debug("Updating status of share replica snapshots.") transitional_statuses = (constants.STATUS_CREATING, constants.STATUS_DELETING) # we will need: id, host, replica_state replicas = self.db.share_replicas_get_all(context, with_share_data=False, with_share_server=False) def qualified_replica(r): # Filter non-active replicas belonging to this backend return (share_utils.extract_host(r['host']) == share_utils.extract_host(self.host) and r['replica_state'] != constants.REPLICA_STATE_ACTIVE) host_replicas = list(filter( lambda x: qualified_replica(x), replicas)) transitional_replica_snapshots = [] # Get snapshot instances for each replica that are in 'creating' or # 'deleting' states. for replica in host_replicas: filters = { 'share_instance_ids': replica['id'], 'statuses': transitional_statuses, } # we will need: id, snapshot_id, share_instance_id and # share['share_id'] replica_snapshots = ( self.db.share_snapshot_instance_get_all_with_filters( context, filters, with_share_data=True) ) transitional_replica_snapshots.extend(replica_snapshots) for replica_snapshot in transitional_replica_snapshots: replica_snapshots = ( self.db.share_snapshot_instance_get_all_with_filters( context, {'snapshot_ids': replica_snapshot['snapshot_id']}, with_share_data=False) ) share_id = replica_snapshot['share']['share_id'] self._update_replica_snapshot( context, replica_snapshot, replica_snapshots=replica_snapshots, share_id=share_id) @locked_share_replica_operation def _update_replica_snapshot(self, context, replica_snapshot, replica_snapshots=None, share_id=None): # share_id is used by the locked_share_replica_operation decorator # Re-grab the replica, now with share data: try: # _get_share_instance_dict will fetch share server share_replica = self.db.share_replica_get( context, replica_snapshot['share_instance_id'], with_share_data=True, with_share_server=False) replica_snapshot = self.db.share_snapshot_instance_get( context, replica_snapshot['id'], with_share_data=True) except exception.NotFound: # Replica may have been deleted, try to cleanup the snapshot # instance try: self.db.share_snapshot_instance_delete( context, replica_snapshot['id']) except exception.ShareSnapshotInstanceNotFound: # snapshot instance has been deleted, nothing to do here pass return msg_payload = { 'snapshot_instance': replica_snapshot['id'], 'replica': share_replica['id'], } LOG.debug("Updating status of replica snapshot %(snapshot_instance)s: " "on replica: %(replica)s", msg_payload) # Grab all the replica and snapshot information. # _get_share_instance_dict will fetch share server replica_list = ( self.db.share_replicas_get_all_by_share( context, share_replica['share_id'], with_share_data=True, with_share_server=False) ) replica_list = [self._get_share_instance_dict(context, r) for r in replica_list] replica_snapshots = replica_snapshots or [] # Convert data to primitives to send to the driver. replica_snapshots = [self._get_snapshot_instance_dict(context, s) for s in replica_snapshots] replica_snapshot = self._get_snapshot_instance_dict( context, replica_snapshot) share_replica = self._get_share_instance_dict(context, share_replica) share_server = share_replica['share_server'] snapshot_update = None try: snapshot_update = self.driver.update_replicated_snapshot( context, replica_list, share_replica, replica_snapshots, replica_snapshot, share_server=share_server) or {} except exception.SnapshotResourceNotFound: if replica_snapshot['status'] == constants.STATUS_DELETING: LOG.info('Snapshot %(snapshot_instance)s on replica ' '%(replica)s has been deleted.', msg_payload) self.db.share_snapshot_instance_delete( context, replica_snapshot['id']) else: LOG.exception("Replica snapshot %s was not found on " "the backend.", replica_snapshot['id']) self.db.share_snapshot_instance_update( context, replica_snapshot['id'], {'status': constants.STATUS_ERROR}) except Exception: LOG.exception("Driver error while updating replica snapshot: " "%s", replica_snapshot['id']) self.db.share_snapshot_instance_update( context, replica_snapshot['id'], {'status': constants.STATUS_ERROR}) if snapshot_update: snapshot_status = snapshot_update.get('status') if snapshot_status == constants.STATUS_AVAILABLE: snapshot_update['progress'] = '100%' self.db.share_snapshot_instance_update( context, replica_snapshot['id'], snapshot_update) @add_hooks @utils.require_driver_initialized def update_access(self, context, share_instance_id): """Allow/Deny/Update access to some share.""" share_instance = self._get_share_instance(context, share_instance_id) share_server_id = share_instance.get('share_server_id') self.update_access_for_instances(context, [share_instance_id], share_server_id=share_server_id) def update_access_for_instances(self, context, share_instance_ids, share_server_id=None): """Allow/Deny access to shares that belong to the same share server.""" share_server = None if share_server_id: share_server = self.db.share_server_get(context, share_server_id) for instance_id in share_instance_ids: LOG.debug("Received request to update access for share instance" " %s.", instance_id) self.access_helper.update_access_rules( context, instance_id, share_server=share_server) @periodic_task.periodic_task(spacing=CONF.periodic_interval) @utils.require_driver_initialized def _report_driver_status(self, context): LOG.info('Updating share status') share_stats = self.driver.get_share_stats(refresh=True) if not share_stats: return if self.driver.driver_handles_share_servers: share_stats['server_pools_mapping'] = ( self._get_servers_pool_mapping(context) ) self.update_service_capabilities(share_stats) @periodic_task.periodic_task(spacing=CONF.periodic_hooks_interval) @utils.require_driver_initialized def _execute_periodic_hook(self, context): """Executes periodic-based hooks.""" # TODO(vponomaryov): add also access rules and share servers share_instances = ( self.db.share_instance_get_all_by_host( context=context, host=self.host)) periodic_hook_data = self.driver.get_periodic_hook_data( context=context, share_instances=share_instances) for hook in self.hooks: hook.execute_periodic_hook( context=context, periodic_hook_data=periodic_hook_data) def _get_servers_pool_mapping(self, context): """Get info about relationships between pools and share_servers.""" share_servers = self.db.share_server_get_all_by_host(context, self.host) return {server['id']: self.driver.get_share_server_pools(server) for server in share_servers} @add_hooks @utils.require_driver_initialized def publish_service_capabilities(self, context): """Collect driver status and then publish it.""" self._report_driver_status(context) self._publish_service_capabilities(context) def _form_server_setup_info(self, context, share_server, share_network, share_network_subnets): share_server_id = share_server['id'] # Network info is used by driver for setting up share server # and getting server info on share creation. admin_network_allocations = ( self.db.network_allocations_get_for_share_server( context, share_server_id, label='admin')) # NOTE(felipe_rodrigues): items in the network_info list contain # same values for the keys: server_id, admin_network_allocations, # security_services and backend_details. network_info = [] for share_network_subnet in share_network_subnets: network_allocations = ( self.db.network_allocations_get_for_share_server( context, share_server_id, label='user', subnet_id=share_network_subnet['id'])) # NOTE(vponomaryov): following network_info fields are deprecated: # 'segmentation_id', 'cidr' and 'network_type'. # And they should be used from network allocations directly. # They should be removed right after no one uses them. network_info.append({ 'server_id': share_server['id'], 'segmentation_id': share_network_subnet['segmentation_id'], 'cidr': share_network_subnet['cidr'], 'neutron_net_id': share_network_subnet['neutron_net_id'], 'neutron_subnet_id': share_network_subnet['neutron_subnet_id'], 'security_services': share_network['security_services'], 'network_allocations': network_allocations, 'admin_network_allocations': admin_network_allocations, 'backend_details': share_server.get('backend_details'), 'network_type': share_network_subnet['network_type'], 'subnet_metadata': share_network_subnet['subnet_metadata'] }) return network_info def _handle_setup_server_error(self, context, share_server_id, e): details = getattr(e, "detail_data", {}) if isinstance(details, dict): server_details = details.get("server_details", {}) if not isinstance(server_details, dict): LOG.debug( ("Cannot save non-dict data (%(data)s) provided as " "'server details' of failed share server '%(server)s'."), {"server": share_server_id, "data": server_details}) else: invalid_details = [] for key, value in server_details.items(): try: self.db.share_server_backend_details_set( context, share_server_id, {key: value}) except Exception: invalid_details.append("%(key)s: %(value)s" % { 'key': str(key), 'value': str(value) }) if invalid_details: LOG.debug( ("Following server details cannot be written to db : " "%s"), str("\n".join(invalid_details))) else: LOG.debug( ("Cannot save non-dict data (%(data)s) provided as 'detail " "data' of failed share server '%(server)s'."), {"server": share_server_id, "data": details}) self.db.share_server_update( context, share_server_id, {'status': constants.STATUS_ERROR}) def _setup_server(self, context, share_server, metadata): subnets = share_server['share_network_subnets'] if not subnets: raise exception.NetworkBadConfigurationException( reason="share server does not have subnet") # all subnets reside on same share network, get it from the first one. share_network_id = subnets[0]['share_network_id'] try: share_network = self.db.share_network_get(context, share_network_id) for share_network_subnet in subnets: self.driver.allocate_network( context, share_server, share_network, share_network_subnet) self.driver.allocate_admin_network(context, share_server) # Get share_network_subnets in case they were updated. share_network_subnets = ( self.db.share_network_subnet_get_all_by_share_server_id( context, share_server['id'])) network_info_list = self._form_server_setup_info( context, share_server, share_network, share_network_subnets) for network_info in network_info_list: self._validate_segmentation_id(network_info) # NOTE(vponomaryov): Save security services data to share server # details table to remove dependency from share network after # creation operation. It will allow us to delete share server and # share network separately without dependency on each other. for security_service in network_info_list[0]['security_services']: ss_type = security_service['type'] data = { 'name': security_service['name'], 'ou': security_service['ou'], 'domain': security_service['domain'], 'server': security_service['server'], 'dns_ip': security_service['dns_ip'], 'user': security_service['user'], 'type': ss_type, 'password': security_service['password'], 'default_ad_site': security_service['default_ad_site'], } self.db.share_server_backend_details_set( context, share_server['id'], {'security_service_' + ss_type: jsonutils.dumps(data)}) server_info = self.driver.setup_server( network_info_list, metadata=metadata) self.driver.update_network_allocation(context, share_server) self.driver.update_admin_network_allocation(context, share_server) if server_info and isinstance(server_info, dict): self.db.share_server_backend_details_set( context, share_server['id'], server_info) return self.db.share_server_update( context, share_server['id'], {'status': constants.STATUS_ACTIVE, 'identifier': server_info.get( 'identifier', share_server['id'])}) except Exception as e: with excutils.save_and_reraise_exception(): self._handle_setup_server_error(context, share_server['id'], e) self.driver.deallocate_network(context, share_server['id']) def _validate_segmentation_id(self, network_info): """Raises exception if the segmentation type is incorrect.""" if (network_info['network_type'] in (None, 'flat') and network_info['segmentation_id']): msg = _('A segmentation ID %(vlan_id)s was specified but can not ' 'be used with a network of type %(seg_type)s; the ' 'segmentation ID option must be omitted or set to 0') raise exception.NetworkBadConfigurationException( reason=msg % {'vlan_id': network_info['segmentation_id'], 'seg_type': network_info['network_type']}) elif (network_info['network_type'] == 'vlan' and (network_info['segmentation_id'] is None or int(network_info['segmentation_id']) > 4094 or int(network_info['segmentation_id']) < 1)): msg = _('A segmentation ID %s was specified but is not valid for ' 'a VLAN network type; the segmentation ID must be an ' 'integer value in the range of [1,4094]') raise exception.NetworkBadConfigurationException( reason=msg % network_info['segmentation_id']) elif (network_info['network_type'] == 'vxlan' and (network_info['segmentation_id'] is None or int(network_info['segmentation_id']) > 16777215 or int(network_info['segmentation_id']) < 1)): msg = _('A segmentation ID %s was specified but is not valid for ' 'a VXLAN network type; the segmentation ID must be an ' 'integer value in the range of [1,16777215]') raise exception.NetworkBadConfigurationException( reason=msg % network_info['segmentation_id']) elif (network_info['network_type'] == 'gre' and (network_info['segmentation_id'] is None or int(network_info['segmentation_id']) > 4294967295 or int(network_info['segmentation_id']) < 1)): msg = _('A segmentation ID %s was specified but is not valid for ' 'a GRE network type; the segmentation ID must be an ' 'integer value in the range of [1, 4294967295]') raise exception.NetworkBadConfigurationException( reason=msg % network_info['segmentation_id']) @add_hooks @utils.require_driver_initialized def delete_share_server(self, context, share_server): subnet_id = (share_server['share_network_subnet_ids'][0] if share_server['share_network_subnet_ids'] else None) @utils.synchronized( "share_manager_%s" % subnet_id) def _wrapped_delete_share_server(): # NOTE(vponomaryov): Verify that there are no dependent shares. # Without this verification we can get here exception in next case: # share-server-delete API was called after share creation scheduled # and share_server reached ACTIVE status, but before update # of share_server_id field for share. If so, after lock realese # this method starts executing when amount of dependent shares # has been changed. server_id = share_server['id'] try: server = self.db.share_server_get( context, server_id) except exception.ShareServerNotFound: raise shares = self.db.share_instance_get_all_by_share_server( context, server_id) if shares: raise exception.ShareServerInUse(share_server_id=server_id) server_details = share_server['backend_details'] self.db.share_server_update(context, server_id, {'status': constants.STATUS_DELETING}) try: LOG.debug("Deleting network of share server '%s'", server_id) share_net = None share_net_subnet = None if subnet_id: try: share_net_subnet = self.db.share_network_subnet_get( context, subnet_id) share_net = self.db.share_network_get( context, share_net_subnet['share_network_id']) except Exception: LOG.warning('Share network subnet not found during ' 'deletion of share server.') self.driver.deallocate_network(context, share_server['id'], share_net, share_net_subnet) application_credential_id = server.get( 'application_credential_id') if application_credential_id: # Delete application credentials for barbican user try: barbican_api.delete_application_credentials( context, application_credential_id) except Exception: LOG.warning('Application credentials not found ' 'during deletion of share server.') encryption_key_ref = server.get('encryption_key_ref') barbican_api.delete_secret_access(context, encryption_key_ref) LOG.debug("Deleting share server '%s'", server_id) security_services = [] for ss_name in constants.SECURITY_SERVICES_ALLOWED_TYPES: ss = server_details.get('security_service_' + ss_name) if ss: security_services.append(jsonutils.loads(ss)) self.driver.teardown_server( server_details=server_details, security_services=security_services) except Exception: with excutils.save_and_reraise_exception(): LOG.error( "Share server '%s' failed on deletion.", server_id) self.db.share_server_update( context, server_id, {'status': constants.STATUS_ERROR}) else: encryption_key_ref = server.get('encryption_key_ref') if encryption_key_ref: self._delete_encryption_keys_quota(context) self.db.share_server_delete(context, share_server['id']) _wrapped_delete_share_server() LOG.info( "Share server '%s' has been deleted successfully.", share_server['id']) def _delete_encryption_keys_quota(self, context): reservations = None try: reservations = QUOTAS.reserve( context, project_id=context.project_id, encryption_keys=-1, ) except Exception: LOG.exception("Failed to update encryption_keys quota " "usages while deleting share server.") if reservations: QUOTAS.commit( context, reservations, project_id=context.project_id, ) @add_hooks @utils.require_driver_initialized def extend_share(self, context, share_id, new_size, reservations): context = context.elevated() share = self.db.share_get(context, share_id) share_instance = self._get_share_instance(context, share) share_server = self._get_share_server(context, share_instance) project_id = share['project_id'] user_id = share['user_id'] self._notify_about_share_usage(context, share, share_instance, "extend.start") try: self.driver.extend_share( share_instance, new_size, share_server=share_server) except Exception as e: LOG.exception("Extend share failed.", resource=share) self.message_api.create( context, message_field.Action.EXTEND, project_id, resource_type=message_field.Resource.SHARE, resource_id=share_id, detail=message_field.Detail.DRIVER_FAILED_EXTEND) try: self.db.share_update( context, share['id'], {'status': constants.STATUS_EXTENDING_ERROR} ) raise exception.ShareExtendingError( reason=str(e), share_id=share_id) finally: QUOTAS.rollback( context, reservations, project_id=project_id, user_id=user_id, share_type_id=share_instance['share_type_id'], ) # we give the user_id of the share, to update the quota usage # for the user, who created the share, because on share delete # only this quota will be decreased QUOTAS.commit( context, reservations, project_id=project_id, user_id=user_id, share_type_id=share_instance['share_type_id'], ) share_update = { 'size': int(new_size), # NOTE(u_glide): translation to lower case should be removed in # a row with usage of upper case of share statuses in all places 'status': constants.STATUS_AVAILABLE.lower() } share = self.db.share_update(context, share['id'], share_update) LOG.info("Extend share completed successfully.", resource=share) self._notify_about_share_usage(context, share, share_instance, "extend.end") @add_hooks @utils.require_driver_initialized def shrink_share(self, context, share_id, new_size): context = context.elevated() share = self.db.share_get(context, share_id) share_instance = self._get_share_instance(context, share) share_server = self._get_share_server(context, share_instance) project_id = share['project_id'] user_id = share['user_id'] new_size = int(new_size) replicas = self.db.share_replicas_get_all_by_share( context, share['id']) supports_replication = len(replicas) > 0 self._notify_about_share_usage(context, share, share_instance, "shrink.start") def error_occurred(exc, msg, status=constants.STATUS_SHRINKING_ERROR): if isinstance(exc, NotImplementedError): msg = _("Shrink share operation not supported.") status = constants.STATUS_AVAILABLE self.message_api.create( context, message_field.Action.SHRINK, share['project_id'], resource_type=message_field.Resource.SHARE, resource_id=share['id'], detail=message_field.Detail.DRIVER_FAILED_SHRINK) LOG.exception(msg, resource=share) self.db.share_update(context, share['id'], {'status': status}) raise exception.ShareShrinkingError( reason=str(exc), share_id=share_id) reservations = None try: size_decrease = int(share['size']) - new_size # we give the user_id of the share, to update the quota usage # for the user, who created the share, because on share delete # only this quota will be decreased deltas = { 'project_id': project_id, 'user_id': user_id, 'share_type_id': share_instance['share_type_id'], 'gigabytes': -size_decrease, } # NOTE(carloss): if the share supports replication we need # to query all its replicas and calculate the final size to # deallocate (amount of replicas * size to decrease). if supports_replication: replica_gigs_to_deallocate = len(replicas) * size_decrease deltas.update( {'replica_gigabytes': -replica_gigs_to_deallocate}) reservations = QUOTAS.reserve(context, **deltas) except Exception as e: error_occurred( e, ("Failed to update quota on share shrinking.")) try: self.driver.shrink_share( share_instance, new_size, share_server=share_server) # NOTE(u_glide): Replace following except block by error notification # when Manila has such mechanism. It's possible because drivers # shouldn't shrink share when this validation error occurs. except Exception as e: if isinstance(e, exception.ShareShrinkingPossibleDataLoss): msg = ("Shrink share failed due to possible data loss.") status = constants.STATUS_AVAILABLE error_params = {'msg': msg, 'status': status} self.message_api.create( context, message_field.Action.SHRINK, share['project_id'], resource_type=message_field.Resource.SHARE, resource_id=share_id, detail=message_field.Detail.DRIVER_REFUSED_SHRINK) else: error_params = {'msg': ("Shrink share failed.")} try: error_occurred(e, **error_params) finally: QUOTAS.rollback( context, reservations, project_id=project_id, user_id=user_id, share_type_id=share_instance['share_type_id'], ) QUOTAS.commit( context, reservations, project_id=project_id, user_id=user_id, share_type_id=share_instance['share_type_id'], ) share_update = { 'size': new_size, 'status': constants.STATUS_AVAILABLE } share = self.db.share_update(context, share['id'], share_update) LOG.info("Shrink share completed successfully.", resource=share) self._notify_about_share_usage(context, share, share_instance, "shrink.end") @utils.require_driver_initialized def create_share_group(self, context, share_group_id): context = context.elevated() share_group_ref = self.db.share_group_get(context, share_group_id) share_group_ref['host'] = self.host shares = self.db.share_instance_get_all_by_share_group_id( context, share_group_id) source_share_group_snapshot_id = share_group_ref.get( "source_share_group_snapshot_id") snap_ref = None parent_share_server_id = None if source_share_group_snapshot_id: snap_ref = self.db.share_group_snapshot_get( context, source_share_group_snapshot_id) for member in snap_ref['share_group_snapshot_members']: member['share'] = self.db.share_instance_get( context, member['share_instance_id'], with_share_data=True) if 'share_group' in snap_ref: parent_share_server_id = snap_ref['share_group'][ 'share_server_id'] status = constants.STATUS_AVAILABLE share_network_id = share_group_ref.get('share_network_id') share_server = None if parent_share_server_id and self.driver.driver_handles_share_servers: share_server = self.db.share_server_get(context, parent_share_server_id) share_network_id = ( share_server['share_network_id']) if share_network_id and not self.driver.driver_handles_share_servers: self.db.share_group_update( context, share_group_id, {'status': constants.STATUS_ERROR}) msg = _("Driver does not expect share-network to be provided " "with current configuration.") raise exception.InvalidInput(reason=msg) if not share_server and share_network_id: availability_zone_id = self._get_az_for_share_group( context, share_group_ref) subnets = ( self.db.share_network_subnets_get_all_by_availability_zone_id( context, share_network_id, availability_zone_id)) if not subnets: raise exception.ShareNetworkSubnetNotFound( share_network_subnet_id=None) try: share_server, share_group_ref = ( self._provide_share_server_for_share_group( context, share_network_id, subnets, share_group_ref, share_group_snapshot=snap_ref, ) ) except Exception: with excutils.save_and_reraise_exception(): LOG.error("Failed to get share server" " for share group creation.") self.db.share_group_update( context, share_group_id, {'status': constants.STATUS_ERROR}) self.message_api.create( context, message_field.Action.CREATE, share_group_ref['project_id'], resource_type=message_field.Resource.SHARE_GROUP, resource_id=share_group_id, detail=message_field.Detail.NO_SHARE_SERVER) try: # TODO(ameade): Add notification for create.start LOG.info("Share group %s: creating", share_group_id) model_update, share_update_list = None, None share_group_ref['shares'] = shares if snap_ref: model_update, share_update_list = ( self.driver.create_share_group_from_share_group_snapshot( context, share_group_ref, snap_ref, share_server=share_server)) else: model_update = self.driver.create_share_group( context, share_group_ref, share_server=share_server) if model_update: share_group_ref = self.db.share_group_update( context, share_group_ref['id'], model_update) if share_update_list: for share in share_update_list: values = copy.deepcopy(share) # NOTE(dviroel): To keep backward compatibility we can't # keep 'status' as a mandatory parameter. We'll set its # value to 'available' as default. i_status = values.get('status', constants.STATUS_AVAILABLE) if i_status not in [ constants.STATUS_AVAILABLE, constants.STATUS_CREATING_FROM_SNAPSHOT]: msg = _( 'Driver returned an invalid status %s') % i_status raise exception.InvalidShareInstance(reason=msg) values['status'] = i_status values['progress'] = ( '100%' if i_status == constants.STATUS_AVAILABLE else '0%') values.pop('id') export_locations = values.pop('export_locations') self.db.share_instance_update(context, share['id'], values) self.db.export_locations_update( context, share['id'], export_locations) except Exception: with excutils.save_and_reraise_exception(): self.db.share_group_update( context, share_group_ref['id'], {'status': constants.STATUS_ERROR, 'availability_zone_id': self._get_az_for_share_group( context, share_group_ref), 'consistent_snapshot_support': self.driver._stats[ 'share_group_stats'].get( 'consistent_snapshot_support')}) for share in shares: self.db.share_instance_update( context, share['id'], {'status': constants.STATUS_ERROR}) LOG.error("Share group %s: create failed", share_group_id) now = timeutils.utcnow() for share in shares: self.db.share_instance_update( context, share['id'], {'status': constants.STATUS_AVAILABLE}) self.db.share_group_update( context, share_group_ref['id'], {'status': status, 'created_at': now, 'availability_zone_id': self._get_az_for_share_group( context, share_group_ref), 'consistent_snapshot_support': self.driver._stats[ 'share_group_stats'].get('consistent_snapshot_support')}) LOG.info("Share group %s: created successfully", share_group_id) # TODO(ameade): Add notification for create.end return share_group_ref['id'] def _get_az_for_share_group(self, context, share_group_ref): if not share_group_ref['availability_zone_id']: return self.db.availability_zone_get( context, self.availability_zone)['id'] return share_group_ref['availability_zone_id'] @utils.require_driver_initialized def delete_share_group(self, context, share_group_id): context = context.elevated() share_group_ref = self.db.share_group_get(context, share_group_id) share_group_ref['host'] = self.host share_group_ref['shares'] = ( self.db.share_instance_get_all_by_share_group_id( context, share_group_id)) # TODO(ameade): Add notification for delete.start try: LOG.info("Share group %s: deleting", share_group_id) share_server = None if share_group_ref.get('share_server_id'): share_server = self.db.share_server_get( context, share_group_ref['share_server_id']) model_update = self.driver.delete_share_group( context, share_group_ref, share_server=share_server) if model_update: share_group_ref = self.db.share_group_update( context, share_group_ref['id'], model_update) except Exception: with excutils.save_and_reraise_exception(): self.db.share_group_update( context, share_group_ref['id'], {'status': constants.STATUS_ERROR}) LOG.error("Share group %s: delete failed", share_group_ref['id']) self.db.share_group_destroy(context, share_group_id) LOG.info("Share group %s: deleted successfully", share_group_id) # TODO(ameade): Add notification for delete.end @utils.require_driver_initialized def create_share_group_snapshot(self, context, share_group_snapshot_id): context = context.elevated() snap_ref = self.db.share_group_snapshot_get( context, share_group_snapshot_id) for member in snap_ref['share_group_snapshot_members']: member['share'] = self.db.share_instance_get( context, member['share_instance_id'], with_share_data=True) status = constants.STATUS_AVAILABLE now = timeutils.utcnow() updated_members_ids = [] try: LOG.info("Share group snapshot %s: creating", share_group_snapshot_id) share_server = None if snap_ref['share_group'].get('share_server_id'): share_server = self.db.share_server_get( context, snap_ref['share_group']['share_server_id']) snapshot_update, member_update_list = ( self.driver.create_share_group_snapshot( context, snap_ref, share_server=share_server)) for update in (member_update_list or []): # NOTE(vponomaryov): we expect that each member is a dict # and has required 'id' key and some optional keys # to be updated such as 'provider_location'. It is planned # to have here also 'export_locations' when it is supported. member_id = update.pop('id', None) if not member_id: LOG.warning( "One of share group snapshot '%s' members does not " "have reference ID. Its update was skipped.", share_group_snapshot_id) continue # TODO(vponomaryov): remove following condition when # sgs members start supporting export locations. if 'export_locations' in update: LOG.debug( "Removing 'export_locations' data from " "share group snapshot member '%s' update because " "export locations are not supported.", member_id) update.pop('export_locations') db_update = { 'updated_at': now, 'status': update.pop('status', status) } if 'provider_location' in update: db_update['provider_location'] = ( update.pop('provider_location')) if 'size' in update: db_update['size'] = int(update.pop('size')) updated_members_ids.append(member_id) self.db.share_group_snapshot_member_update( context, member_id, db_update) if update: LOG.debug( "Share group snapshot ID='%(sgs_id)s', " "share group snapshot member ID='%(sgsm_id)s'. " "Following keys of sgs member were not updated " "as not allowed: %(keys)s.", {'sgs_id': share_group_snapshot_id, 'sgsm_id': member_id, 'keys': ', '.join(update)}) if snapshot_update: snap_ref = self.db.share_group_snapshot_update( context, snap_ref['id'], snapshot_update) except Exception: with excutils.save_and_reraise_exception(): self.db.share_group_snapshot_update( context, snap_ref['id'], {'status': constants.STATUS_ERROR}) LOG.error("Share group snapshot %s: create failed", share_group_snapshot_id) for member in (snap_ref.get('share_group_snapshot_members') or []): if member['id'] in updated_members_ids: continue update = {'status': status, 'updated_at': now} self.db.share_group_snapshot_member_update( context, member['id'], update) self.db.share_group_snapshot_update( context, snap_ref['id'], {'status': status, 'updated_at': now}) LOG.info("Share group snapshot %s: created successfully", share_group_snapshot_id) return snap_ref['id'] @utils.require_driver_initialized def delete_share_group_snapshot(self, context, share_group_snapshot_id): context = context.elevated() snap_ref = self.db.share_group_snapshot_get( context, share_group_snapshot_id) for member in snap_ref['share_group_snapshot_members']: member['share'] = self.db.share_instance_get( context, member['share_instance_id'], with_share_data=True) snapshot_update = False try: LOG.info("Share group snapshot %s: deleting", share_group_snapshot_id) share_server = None if snap_ref['share_group'].get('share_server_id'): share_server = self.db.share_server_get( context, snap_ref['share_group']['share_server_id']) snapshot_update, member_update_list = ( self.driver.delete_share_group_snapshot( context, snap_ref, share_server=share_server)) if member_update_list: snapshot_update = snapshot_update or {} snapshot_update['share_group_snapshot_members'] = [] for update in (member_update_list or []): snapshot_update['share_group_snapshot_members'].append(update) if snapshot_update: snap_ref = self.db.share_group_snapshot_update( context, snap_ref['id'], snapshot_update) except Exception: with excutils.save_and_reraise_exception(): self.db.share_group_snapshot_update( context, snap_ref['id'], {'status': constants.STATUS_ERROR}) LOG.error("Share group snapshot %s: delete failed", snap_ref['name']) self.db.share_group_snapshot_destroy(context, share_group_snapshot_id) LOG.info("Share group snapshot %s: deleted successfully", share_group_snapshot_id) def _get_share_server_dict(self, context, share_server): share_server_ref = { 'id': share_server.get('id'), 'project_id': share_server.get('project_id'), 'updated_at': share_server.get('updated_at'), 'status': share_server.get('status'), 'host': share_server.get('host'), 'share_network_name': share_server.get('share_network_name'), 'share_network_id': share_server.get('share_network_id'), 'created_at': share_server.get('created_at'), 'backend_details': share_server.get('backend_details'), 'share_network_subnet_ids': share_server.get('share_network_subnet_ids', []), 'is_auto_deletable': share_server.get('is_auto_deletable', None), 'identifier': share_server.get('identifier', None), 'network_allocations': share_server.get('network_allocations', None), } return share_server_ref def _get_export_location_dict(self, context, export_location): export_location_ref = { 'id': export_location.get('uuid'), 'path': export_location.get('path'), 'created_at': export_location.get('created_at'), 'updated_at': export_location.get('updated_at'), 'share_instance_id': export_location.get('share_instance_id', None), 'is_admin_only': export_location.get('is_admin_only', None), } return export_location_ref def _get_share_instance_dict(self, context, share_instance): # TODO(gouthamr): remove method when the db layer returns primitives share_instance_ref = { 'id': share_instance.get('id'), 'name': share_instance.get('name'), 'share_id': share_instance.get('share_id'), 'host': share_instance.get('host'), 'status': share_instance.get('status'), 'replica_state': share_instance.get('replica_state'), 'availability_zone_id': share_instance.get('availability_zone_id'), 'share_network_id': share_instance.get('share_network_id'), 'share_server_id': share_instance.get('share_server_id'), 'deleted': share_instance.get('deleted'), 'terminated_at': share_instance.get('terminated_at'), 'launched_at': share_instance.get('launched_at'), 'scheduled_at': share_instance.get('scheduled_at'), 'updated_at': share_instance.get('updated_at'), 'deleted_at': share_instance.get('deleted_at'), 'created_at': share_instance.get('created_at'), 'share_server': self._get_share_server(context, share_instance), 'access_rules_status': share_instance.get('access_rules_status'), # Share details 'user_id': share_instance.get('user_id'), 'project_id': share_instance.get('project_id'), 'size': share_instance.get('size'), 'display_name': share_instance.get('display_name'), 'display_description': share_instance.get('display_description'), 'snapshot_id': share_instance.get('snapshot_id'), 'share_proto': share_instance.get('share_proto'), 'share_type_id': share_instance.get('share_type_id'), 'is_public': share_instance.get('is_public'), 'share_group_id': share_instance.get('share_group_id'), 'source_share_group_snapshot_member_id': share_instance.get( 'source_share_group_snapshot_member_id'), 'availability_zone': share_instance.get('availability_zone'), } if share_instance_ref['share_server']: share_instance_ref['share_server'] = self._get_share_server_dict( context, share_instance_ref['share_server'] ) share_instance_ref['export_locations'] = [ self._get_export_location_dict(context, el) for el in share_instance.get('export_locations') or [] ] return share_instance_ref def _get_snapshot_instance_dict(self, context, snapshot_instance, snapshot=None): # TODO(gouthamr): remove method when the db layer returns primitives snapshot_instance_ref = { 'name': snapshot_instance.get('name'), 'share_id': snapshot_instance.get('share_id'), 'share_name': snapshot_instance.get('share_name'), 'status': snapshot_instance.get('status'), 'id': snapshot_instance.get('id'), 'deleted': snapshot_instance.get('deleted') or False, 'created_at': snapshot_instance.get('created_at'), 'share': snapshot_instance.get('share'), 'updated_at': snapshot_instance.get('updated_at'), 'share_instance_id': snapshot_instance.get('share_instance_id'), 'snapshot_id': snapshot_instance.get('snapshot_id'), 'progress': snapshot_instance.get('progress'), 'deleted_at': snapshot_instance.get('deleted_at'), 'provider_location': snapshot_instance.get('provider_location'), } if snapshot: snapshot_instance_ref.update({ 'size': snapshot.get('size'), }) return snapshot_instance_ref def snapshot_update_access(self, context, snapshot_instance_id): snapshot_instance = self.db.share_snapshot_instance_get( context, snapshot_instance_id, with_share_data=True) share_server = self._get_share_server( context, snapshot_instance['share_instance']) self.snapshot_access_helper.update_access_rules( context, snapshot_instance['id'], share_server=share_server) def _notify_about_share_usage(self, context, share, share_instance, event_suffix, extra_usage_info=None): share_utils.notify_about_share_usage( context, share, share_instance, event_suffix, extra_usage_info=extra_usage_info, host=self.host) @utils.require_driver_initialized def create_backup(self, context, backup): share_id = backup['share_id'] backup_id = backup['id'] share = self.db.share_get(context, share_id) share_instance = self._get_share_instance(context, share) LOG.info('Create backup started, backup: %(backup)s share: ' '%(share)s.', {'backup': backup_id, 'share': share_id}) try: share_server = self._get_share_server(context, share) self.driver.create_backup(context, share_instance, backup, share_server=share_server) except Exception as err: with excutils.save_and_reraise_exception(): LOG.error("Failed to create share backup %s by driver.", backup_id) self.db.share_update( context, share_id, {'status': constants.STATUS_AVAILABLE}) self.db.share_backup_update( context, backup_id, {'status': constants.STATUS_ERROR, 'fail_reason': err}) @periodic_task.periodic_task( spacing=CONF.driver_backup_continue_update_interval) @utils.require_driver_initialized def create_backup_continue(self, context): """Invokes driver to continue backup of share.""" filters = {'status': constants.STATUS_CREATING, 'host': self.host, 'topic': CONF.share_topic} backups = self.db.share_backups_get_all(context, filters) for backup in backups: backup_id = backup['id'] share_id = backup['share_id'] share = self.db.share_get(context, share_id) share_instance = self._get_share_instance(context, share) result = {} try: share_server = self._get_share_server(context, share_instance) result = self.driver.create_backup_continue( context, share_instance, backup, share_server=share_server) progress = result.get('total_progress', '0') self.db.share_backup_update(context, backup_id, {'progress': progress}) if progress == '100': self.db.share_update( context, share_id, {'status': constants.STATUS_AVAILABLE}) self.db.share_backup_update( context, backup_id, {'status': constants.STATUS_AVAILABLE}) LOG.info("Created share backup %s successfully.", backup_id) except Exception: LOG.warning("Failed to get progress of share %(share)s " "backing up in share_backup %(backup).", {'share': share_id, 'backup': backup_id}) self.db.share_update( context, share_id, {'status': constants.STATUS_AVAILABLE}) self.db.share_backup_update( context, backup_id, {'status': constants.STATUS_ERROR, 'progress': '0'}) def delete_backup(self, context, backup): LOG.info('Delete backup started, backup: %s.', backup['id']) backup_id = backup['id'] project_id = backup['project_id'] share_id = backup['share_id'] share = self.db.share_get(context, share_id) share_instance = self._get_share_instance(context, share) try: share_server = self._get_share_server(context, share_instance) self.driver.delete_backup(context, backup, share_instance, share_server=share_server) except Exception: with excutils.save_and_reraise_exception(): LOG.error("Failed to delete share backup %s.", backup_id) self.db.share_backup_update( context, backup_id, {'status': constants.STATUS_ERROR_DELETING}) try: reserve_opts = { 'backups': -1, 'backup_gigabytes': -backup['size'], } reservations = QUOTAS.reserve( context, project_id=project_id, **reserve_opts) except Exception as e: reservations = None LOG.warning("Failed to update backup quota for %(pid)s: %(err)s.", {'pid': project_id, 'err': e}) if reservations: QUOTAS.commit(context, reservations, project_id=project_id) self.db.share_backup_delete(context, backup_id) LOG.info("Share backup %s deleted successfully.", backup_id) def restore_backup(self, context, backup, share_id): LOG.info('Restore backup started, backup: %(backup_id)s ' 'share: %(share_id)s.', {'backup_id': backup['id'], 'share_id': share_id}) backup_id = backup['id'] backup_share_id = backup['share_id'] share = self.db.share_get(context, share_id) share_instance = self._get_share_instance(context, share) try: if (self.driver.restore_to_target_support is False and share_id != backup_share_id): self.message_api.create( context, message_field.Action.RESTORE_BACKUP, share['project_id'], resource_type=message_field.Resource.SHARE, resource_id=share['id'], detail=message_field.Detail.TARGETED_RESTORE_UNSUPPORTED ) msg = _("Cannot restore backup %(backup)s to target share " "%(share)s as share driver does not provide support " " for targeted restores") % ( {'backup': backup_id, 'share': share_id}) LOG.exception(msg) raise exception.BackupException(reason=msg) share_server = self._get_share_server(context, share_instance) self.driver.restore_backup(context, backup, share_instance, share_server=share_server) except Exception: with excutils.save_and_reraise_exception(): LOG.error("Failed to restore backup %(backup)s to share " "%(share)s by driver.", {'backup': backup_id, 'share': share_id}) self.db.share_update( context, share_id, {'status': constants.STATUS_BACKUP_RESTORING_ERROR}) self.db.share_backup_update( context, backup['id'], {'status': constants.STATUS_ERROR}) @periodic_task.periodic_task( spacing=CONF.driver_restore_continue_update_interval) @utils.require_driver_initialized def restore_backup_continue(self, context): filters = {'status': constants.STATUS_RESTORING, 'host': self.host, 'topic': CONF.share_topic} backups = self.db.share_backups_get_all(context, filters) for backup in backups: backup_id = backup['id'] try: filters = { 'source_backup_id': backup_id, } shares = self.db.share_get_all(context, filters) except Exception: LOG.warning('Failed to get shares for backup %s', backup_id) continue for share in shares: if share['status'] != constants.STATUS_BACKUP_RESTORING: continue share_id = share['id'] share_instance = self._get_share_instance(context, share) try: share_server = self._get_share_server( context, share_instance) result = self.driver.restore_backup_continue( context, backup, share_instance, share_server=share_server) progress = result.get('total_progress', '0') self.db.share_backup_update( context, backup_id, {'restore_progress': progress}) if progress == '100': self.db.share_update( context, share_id, {'status': constants.STATUS_AVAILABLE}) self.db.share_backup_update( context, backup_id, {'status': constants.STATUS_AVAILABLE}) LOG.info("Share backup %s restored successfully.", backup_id) except Exception: LOG.exception("Failed to get progress of share_backup " "%(backup)s restoring in share %(share).", {'share': share_id, 'backup': backup_id}) self.db.share_update( context, share_id, {'status': constants.STATUS_BACKUP_RESTORING_ERROR}) self.db.share_backup_update( context, backup_id, {'status': constants.STATUS_AVAILABLE, 'restore_progress': '0'}) @periodic_task.periodic_task( spacing=CONF.share_usage_size_update_interval, enabled=CONF.enable_gathering_share_usage_size) @utils.require_driver_initialized def update_share_usage_size(self, context): """Invokes driver to gather usage size of shares.""" updated_share_instances = [] share_instances = self.db.share_instance_get_all_by_host( context, host=self.host, with_share_data=True) if share_instances: try: updated_share_instances = self.driver.update_share_usage_size( context, share_instances) except Exception: LOG.exception("Gather share usage size failure.") for si in updated_share_instances: share_instance = self._get_share_instance(context, si['id']) share = self.db.share_get(context, share_instance['share_id']) self._notify_about_share_usage( context, share, share_instance, "consumed.size", extra_usage_info={'used_size': si['used_size'], 'gathered_at': si['gathered_at']}) @periodic_task.periodic_task(spacing=CONF.periodic_interval) @utils.require_driver_initialized def periodic_share_status_update(self, context): """Invokes share driver to update shares status.""" LOG.debug("Updating status of share instances.") share_instances = self.db.share_instance_get_all_by_host( context, self.host, with_share_data=True, status=constants.STATUS_CREATING_FROM_SNAPSHOT) for si in share_instances: si_dict = self._get_share_instance_dict(context, si) self._update_share_status(context, si_dict) def _update_share_status(self, context, share_instance): share_server = self._get_share_server(context, share_instance) if share_server is not None: share_server = self._get_share_server_dict(context, share_server) try: data_updates = self.driver.get_share_status(share_instance, share_server) except Exception: LOG.exception( ("Unexpected driver error occurred while updating status for " "share instance %(id)s that belongs to share '%(share_id)s'"), {'id': share_instance['id'], 'share_id': share_instance['share_id']} ) data_updates = { 'status': constants.STATUS_ERROR } status = data_updates.get('status') if status == constants.STATUS_ERROR: msg = ("Status of share instance %(id)s that belongs to share " "%(share_id)s was updated to '%(status)s'." % {'id': share_instance['id'], 'share_id': share_instance['share_id'], 'status': status}) LOG.error(msg) self.db.share_instance_update( context, share_instance['id'], {'status': constants.STATUS_ERROR, 'progress': None}) self.message_api.create( context, message_field.Action.UPDATE, share_instance['project_id'], resource_type=message_field.Resource.SHARE, resource_id=share_instance['share_id'], detail=message_field.Detail.DRIVER_FAILED_CREATING_FROM_SNAP) return export_locations = data_updates.get('export_locations') progress = data_updates.get('progress') statuses_requiring_update = [ constants.STATUS_AVAILABLE, constants.STATUS_CREATING_FROM_SNAPSHOT] if status in statuses_requiring_update: si_updates = { 'status': status, } progress = ('100%' if status == constants.STATUS_AVAILABLE else progress) if progress is not None: si_updates.update({'progress': progress}) self.db.share_instance_update( context, share_instance['id'], si_updates) msg = ("Status of share instance %(id)s that belongs to share " "%(share_id)s was updated to '%(status)s'." % {'id': share_instance['id'], 'share_id': share_instance['share_id'], 'status': status}) LOG.debug(msg) if export_locations: self.db.export_locations_update( context, share_instance['id'], export_locations) def _validate_check_compatibility_result( self, context, resource_id, share_instances, snapshot_instances, driver_compatibility, dest_host, nondisruptive, writable, preserve_snapshots, resource_type='share'): resource_exception = ( exception.ShareMigrationFailed if resource_type == 'share' else exception.ShareServerMigrationFailed) if not driver_compatibility.get('compatible'): msg = _("Destination host %(host)s is not compatible with " "%(resource_type)s %(resource_id)s's source backend for " "driver-assisted migration.") % { 'host': dest_host, 'resource_id': resource_id, 'resource_type': resource_type, } raise resource_exception(reason=msg) if (not driver_compatibility.get('nondisruptive') and nondisruptive): msg = _("Driver cannot perform a non-disruptive migration of " "%(resource_type)s %(resource_id)s.") % { 'resource_type': resource_type, 'resource_id': resource_id } raise resource_exception(reason=msg) if not driver_compatibility.get('writable') and writable: msg = _("Driver cannot perform migration of %(resource_type)s " "%(resource_id)s while remaining writable.") % { 'resource_type': resource_type, 'resource_id': resource_id } raise resource_exception(reason=msg) if (not driver_compatibility.get('preserve_snapshots') and preserve_snapshots): msg = _("Driver cannot perform migration of %(resource_type)s " "%(resource_id)s while preserving snapshots.") % { 'resource_type': resource_type, 'resource_id': resource_id } raise resource_exception(reason=msg) if (not driver_compatibility.get('preserve_snapshots') and snapshot_instances): msg = _("Driver does not support preserving snapshots. The " "migration of the %(resource_type)s %(resource_id)s " "cannot proceed while it has snapshots.") % { 'resource_type': resource_type, 'resource_id': resource_id } raise resource_exception(reason=msg) def _update_resource_status(self, context, status, task_state=None, share_instance_ids=None, snapshot_instance_ids=None): fields = {'status': status} if task_state: fields['task_state'] = task_state if share_instance_ids: self.db.share_instance_status_update( context, share_instance_ids, fields) if snapshot_instance_ids: self.db.share_snapshot_instances_status_update( context, snapshot_instance_ids, fields) def _share_server_migration_start_driver( self, context, source_share_server, dest_host, writable, nondisruptive, preserve_snapshots, new_share_network_id): share_instances = self.db.share_instance_get_all_by_share_server( context, source_share_server['id'], with_share_data=True) share_instance_ids = [x.id for x in share_instances] snapshot_instances = ( self.db.share_snapshot_instance_get_all_with_filters( context, {'share_instance_ids': share_instance_ids})) snapshot_instance_ids = [x.id for x in snapshot_instances] old_share_network = self.db.share_network_get( context, share_instances[0]['share_network_id']) new_share_network = self.db.share_network_get( context, new_share_network_id) service_host = share_utils.extract_host(dest_host) service = self.db.service_get_by_args( context, service_host, 'manila-share') # NOTE(dviroel): We'll build a list of request specs and send it to # the driver so vendors have a chance to validate if the destination # host meets the requirements before starting the migration. shares_request_spec = ( self.share_api.get_share_server_migration_request_spec_dict( context, share_instances, snapshot_instances, availability_zone_id=service['availability_zone_id'], share_network_id=new_share_network_id)) extended_allocs = None dest_share_server = None try: # NOTE: Extend network allocations to destination host, i.e. create # inactive port bindings on the destination host. Refresh # network_allocations field in source_share_server with the new # bindings, so that correct segmentation id is used during # compatibility check and migration. if CONF.server_migration_extend_neutron_network: extended_allocs = ( self.driver.network_api.extend_network_allocations( context, source_share_server)) source_share_server['network_allocations'] = extended_allocs compatibility = ( self.driver.share_server_migration_check_compatibility( context, source_share_server, dest_host, old_share_network, new_share_network, shares_request_spec)) self._validate_check_compatibility_result( context, source_share_server, share_instances, snapshot_instances, compatibility, dest_host, nondisruptive, writable, preserve_snapshots, resource_type='share server') create_server_on_backend = not compatibility.get('nondisruptive') dest_share_server = self._provide_share_server_for_migration( context, source_share_server, new_share_network_id, service['availability_zone_id'], dest_host, create_on_backend=create_server_on_backend) net_changes_identified = False if not create_server_on_backend: dest_share_server = self.db.share_server_get( context, dest_share_server['id']) net_changes_identified = ( not share_utils.is_az_subnets_compatible( dest_share_server['share_network_subnets'], source_share_server['share_network_subnets'])) # NOTE(carloss): Even though the share back end won't need to # create a share server, if a network change was identified, # there is need to allocate new interfaces to the share server, # so the back end can set up the new ips considering # the new networking parameters when completing the migration. # In such case, the migration will be disruptive, since the old # allocations will be replaced by the new ones. if net_changes_identified: share_network_subnets = ( self.db. share_network_subnet_get_all_by_share_server_id( context, dest_share_server['id'])) for share_network_subnet in share_network_subnets: self.driver.allocate_network( context, dest_share_server, new_share_network, share_network_subnet) self.driver.allocate_admin_network( context, dest_share_server) # Refresh the share server so it will have the network # allocations when sent to the driver dest_share_server = self.db.share_server_get( context, dest_share_server['id']) self.db.share_server_update( context, dest_share_server['id'], {'status': constants.STATUS_SERVER_MIGRATING_TO, 'task_state': constants.TASK_STATE_MIGRATION_IN_PROGRESS, 'source_share_server_id': source_share_server['id']}) if not compatibility.get('writable'): # NOTE(dviroel): Only modify access rules to read-only if the # driver doesn't support 'writable'. self._cast_access_rules_to_readonly_for_server( context, share_instances, source_share_server, dest_host=source_share_server['host']) LOG.debug("Initiating driver migration for share server %s.", source_share_server['id']) self.db.share_server_update( context, source_share_server['id'], {'task_state': ( constants.TASK_STATE_MIGRATION_DRIVER_STARTING)}) self.db.share_server_update( context, dest_share_server['id'], {'task_state': ( constants.TASK_STATE_MIGRATION_DRIVER_STARTING)}) server_info = self.driver.share_server_migration_start( context, source_share_server, dest_share_server, share_instances, snapshot_instances) backend_details = ( server_info.get('backend_details') if server_info else None) if extended_allocs: backend_details = backend_details or {} backend_details['segmentation_id'] = ( extended_allocs[0]['segmentation_id']) if backend_details: self.db.share_server_backend_details_set( context, dest_share_server['id'], backend_details) self.db.share_server_update( context, source_share_server['id'], {'task_state': ( constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS)}) self.db.share_server_update( context, dest_share_server['id'], {'task_state': ( constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS)}) except Exception: # Rollback status changes for affected resources self._update_resource_status( context, constants.STATUS_AVAILABLE, share_instance_ids=share_instance_ids, snapshot_instance_ids=snapshot_instance_ids) # Rollback port bindings on destination host if extended_allocs: self.driver.network_api.delete_extended_allocations( context, source_share_server, dest_share_server) # Rollback read only access rules self._reset_read_only_access_rules_for_server( context, share_instances, source_share_server, dest_host=source_share_server['host']) if dest_share_server: self.db.share_server_update( context, dest_share_server['id'], {'task_state': constants.TASK_STATE_MIGRATION_ERROR, 'status': constants.STATUS_ERROR}) if not create_server_on_backend: if net_changes_identified: self.driver.deallocate_network( context, dest_share_server['id']) self.db.share_server_delete( context, dest_share_server['id']) else: self.delete_share_server(context, dest_share_server) msg = _("Driver-assisted migration of share server %s " "failed.") % source_share_server['id'] LOG.exception(msg) raise exception.ShareServerMigrationFailed(reason=msg) return True @add_hooks @utils.require_driver_initialized def share_server_migration_check(self, context, share_server_id, dest_host, writable, nondisruptive, preserve_snapshots, new_share_network_id): driver_result = {} result = { 'compatible': False, 'writable': None, 'preserve_snapshots': None, 'nondisruptive': None, 'share_network_id': new_share_network_id, 'migration_cancel': None, 'migration_get_progress': None } if not self.driver.driver_handles_share_servers: LOG.error('This operation is supported only on backends that ' 'handles share servers.') return result share_server = self.db.share_server_get(context, share_server_id) share_instances = self.db.share_instance_get_all_by_share_server( context, share_server_id, with_share_data=True) share_instance_ids = [x.id for x in share_instances] snapshot_instances = ( self.db.share_snapshot_instance_get_all_with_filters( context, {'share_instance_ids': share_instance_ids})) old_share_network = self.db.share_network_get( context, share_instances[0]['share_network_id']) new_share_network = self.db.share_network_get( context, new_share_network_id) service_host = share_utils.extract_host(dest_host) service = self.db.service_get_by_args( context, service_host, 'manila-share') # NOTE: Extend network allocations to destination host, i.e. create # inactive port bindings on destination host with the same ports in the # share network subnet. Refresh share_server with new network # allocations, so that correct segmentation id is used in the # compatibility check. if CONF.server_migration_extend_neutron_network: try: allocs = self.driver.network_api.extend_network_allocations( context, share_server) share_server['network_allocations'] = allocs except Exception: LOG.warning( 'Failed to extend network allocations for ' 'share server %s.', share_server['id']) return result # NOTE(dviroel): We'll build a list of request specs and send it to # the driver so vendors have a chance to validate if the destination # host meets the requirements before starting the migration. shares_request_spec = ( self.share_api.get_share_server_migration_request_spec_dict( context, share_instances, snapshot_instances, availability_zone_id=service['availability_zone_id'], share_network_id=new_share_network_id)) try: driver_result = ( self.driver.share_server_migration_check_compatibility( context, share_server, dest_host, old_share_network, new_share_network, shares_request_spec)) self._validate_check_compatibility_result( context, share_server, share_instances, snapshot_instances, driver_result, dest_host, nondisruptive, writable, preserve_snapshots, resource_type='share server') except Exception: # Update driver result to not compatible since it didn't pass in # the validations. driver_result['compatible'] = False # NOTE: Delete port bindings on destination host after compatibility # check if CONF.server_migration_extend_neutron_network: self.driver.network_api.delete_extended_allocations( context, share_server) result.update(driver_result) return result @add_hooks @utils.require_driver_initialized def share_server_migration_start( self, context, share_server_id, dest_host, writable, nondisruptive, preserve_snapshots, new_share_network_id=None): """Migrates a share server from current host to another host.""" LOG.debug("Entered share_server_migration_start method for share " "server %s.", share_server_id) self.db.share_server_update( context, share_server_id, {'task_state': constants.TASK_STATE_MIGRATION_IN_PROGRESS}) share_server = self.db.share_server_get(context, share_server_id) try: if not self.driver.driver_handles_share_servers: LOG.error('This operation is supported only on backends that ' 'handle share servers.') raise exception.OperationNotSupportedByDriverMode() self._share_server_migration_start_driver( context, share_server, dest_host, writable, nondisruptive, preserve_snapshots, new_share_network_id) except Exception: LOG.exception( ("The driver could not migrate the share server " "%(server)s"), {'server': share_server_id}) self.db.share_server_update( context, share_server_id, {'task_state': constants.TASK_STATE_MIGRATION_ERROR, 'status': constants.STATUS_ACTIVE}) @periodic_task.periodic_task( spacing=CONF.server_migration_driver_continue_update_interval) @add_hooks @utils.require_driver_initialized def share_server_migration_driver_continue(self, context): """Invokes driver to continue migration of share server.""" # Searching for destination share servers share_servers = self.db.share_server_get_all_by_host( context, self.host, filters={'status': constants.STATUS_SERVER_MIGRATING_TO}) dest_updates_on_error = { 'task_state': constants.TASK_STATE_MIGRATION_ERROR, 'status': constants.STATUS_ERROR, } src_updates_on_error = { 'task_state': constants.TASK_STATE_MIGRATION_ERROR, 'status': constants.STATUS_ACTIVE, } updates_on_finished = { 'task_state': constants.TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE } for dest_share_server in share_servers: if dest_share_server['task_state'] == ( constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS): src_share_server_id = dest_share_server.get( 'source_share_server_id') if src_share_server_id is None: msg = _('Destination share server %s does not have a ' 'source share server id.' ) % dest_share_server['id'] LOG.error(msg) self.db.share_server_update( context, dest_share_server['id'], dest_updates_on_error) continue msg_args = { 'src_id': src_share_server_id, 'dest_id': dest_share_server['id'] } src_share_server = self.db.share_server_get( context, src_share_server_id) if not src_share_server: msg = _('Destination share server %(dest_id)s refers to ' 'a source share server %(src_id)s that does not ' 'exists.') % msg_args LOG.error(msg) self.db.share_server_update( context, dest_share_server['id'], dest_updates_on_error) continue if (src_share_server['status'] != constants.STATUS_SERVER_MIGRATING): msg = _('Destination share server %(dest_id)s refers to ' 'a source share server %(src_id)s that is not ' ' being migrated.') % msg_args LOG.error(msg) self.db.share_server_update( context, dest_share_server['id'], dest_updates_on_error) continue share_instances = ( self.db.share_instance_get_all_by_share_server( context, src_share_server_id, with_share_data=True)) share_instance_ids = [x.id for x in share_instances] snapshot_instances = ( self.db.share_snapshot_instance_get_all_with_filters( context, {'share_instance_ids': share_instance_ids})) snapshot_instance_ids = [x.id for x in snapshot_instances] try: finished = self.driver.share_server_migration_continue( context, src_share_server, dest_share_server, share_instances, snapshot_instances) if finished: self.db.share_server_update( context, src_share_server['id'], updates_on_finished) self.db.share_server_update( context, dest_share_server['id'], updates_on_finished) msg = _("Share server migration for share %s " "completed first phase successfully." ) % src_share_server['id'] LOG.info(msg) else: src_share_server = self.db.share_server_get( context, src_share_server['id']) if (src_share_server['task_state'] == constants.TASK_STATE_MIGRATION_CANCELLED): msg = _("Share server migration for share %s was " "cancelled.") % src_share_server['id'] LOG.warning(msg) except Exception: self._update_resource_status( context, constants.STATUS_AVAILABLE, share_instance_ids=share_instance_ids, snapshot_instance_ids=snapshot_instance_ids) self._reset_read_only_access_rules_for_server( context, share_instances, src_share_server, dest_host=dest_share_server['host']) self.db.share_server_update( context, dest_share_server['id'], dest_updates_on_error) if src_share_server: self.db.share_server_update( context, src_share_server['id'], src_updates_on_error) msg = _("Migration of share server %s has failed.") LOG.exception(msg, src_share_server['id']) @add_hooks @utils.require_driver_initialized def share_server_migration_complete(self, context, src_share_server_id, dest_share_server_id): """Invokes driver to complete the migration of share server.""" dest_server = self.db.share_server_get(context, dest_share_server_id) src_server = self.db.share_server_get(context, src_share_server_id) share_instances = ( self.db.share_instance_get_all_by_share_server( context, src_share_server_id, with_share_data=True)) share_instance_ids = [x.id for x in share_instances] snapshot_instances = ( self.db.share_snapshot_instance_get_all_with_filters( context, {'share_instance_ids': share_instance_ids})) snapshot_instance_ids = [x.id for x in snapshot_instances] updates_on_error = { 'task_state': constants.TASK_STATE_MIGRATION_ERROR, 'status': constants.STATUS_ERROR, } try: self._server_migration_complete_driver(context, src_server, share_instances, snapshot_instances, dest_server) except Exception: msg = _("Driver migration completion failed for" " share server %s.") % src_share_server_id LOG.exception(msg) self._update_resource_status( context, constants.STATUS_ERROR, share_instance_ids=share_instance_ids, snapshot_instance_ids=snapshot_instance_ids) self.db.share_server_update( context, src_share_server_id, updates_on_error) self.db.share_server_update( context, dest_share_server_id, updates_on_error) msg_args = { 'source_id': src_share_server_id, 'dest_id': dest_share_server_id } msg = _('Share server migration from %(source_id)s to %(dest_id)s ' 'has failed in migration-complete phase.') % msg_args raise exception.ShareServerMigrationFailed(reason=msg) server_update_args = { 'task_state': constants.TASK_STATE_MIGRATION_SUCCESS, 'status': constants.STATUS_ACTIVE } # Migration mechanism reused the share server if not dest_server['identifier']: server_update_args['identifier'] = src_server['identifier'] # Update share server status for success scenario self.db.share_server_update( context, dest_share_server_id, server_update_args) self._update_resource_status( context, constants.STATUS_AVAILABLE, share_instance_ids=share_instance_ids, snapshot_instance_ids=snapshot_instance_ids) LOG.info("Share Server Migration for share server %s was completed " "with success.", src_share_server_id) def _server_migration_complete_driver(self, context, source_share_server, share_instances, snapshot_instances, dest_share_server): self.db.share_server_update( context, source_share_server['id'], {'task_state': constants.TASK_STATE_MIGRATION_COMPLETING}) self.db.share_server_update( context, dest_share_server['id'], {'task_state': constants.TASK_STATE_MIGRATION_COMPLETING}) # Retrieve network allocations reserved for the new share server dest_snss = dest_share_server['share_network_subnets'] dest_sn_id = dest_snss[0]['share_network_id'] dest_sn = self.db.share_network_get(context, dest_sn_id) dest_snss = self.db.share_network_subnet_get_all_by_share_server_id( context, dest_share_server['id']) existing_allocations = ( self.db.network_allocations_get_for_share_server( context, dest_share_server['id'])) migration_reused_network_allocations = len(existing_allocations) == 0 migration_extended_network_allocations = ( CONF.server_migration_extend_neutron_network) server_to_get_allocations = ( dest_share_server if not migration_reused_network_allocations else source_share_server) new_network_allocations = self._form_server_setup_info( context, server_to_get_allocations, dest_sn, dest_snss) model_update = self.driver.share_server_migration_complete( context, source_share_server, dest_share_server, share_instances, snapshot_instances, new_network_allocations) alloc_update = { 'share_server_id': dest_share_server['id'] } subnet_update = {} if migration_extended_network_allocations: # NOTE: Network allocations are extended to the destination host on # previous (migration_start) step, i.e. port bindings are created # on destination host with existing ports. The network allocations # will be cut over on this (migration_complete) step, i.e. port # bindings on destination host will be activated and bindings on # source host will be deleted. updated_allocations = ( self.driver.network_api.cutover_network_allocations( context, source_share_server)) segmentation_id = self.db.share_server_backend_details_get_item( context, dest_share_server['id'], 'segmentation_id') alloc_update.update({ 'segmentation_id': segmentation_id }) subnet_update.update({ 'segmentation_id': segmentation_id, }) elif migration_reused_network_allocations: updated_allocations = ( self.db.network_allocations_get_for_share_server( context, source_share_server["id"])) else: network_allocations = [] for net_allocation in new_network_allocations: network_allocations += net_allocation['network_allocations'] updated_allocations = [ *network_allocations, *new_network_allocations[0]['admin_network_allocations'] ] for allocation in updated_allocations: allocation_id = allocation['id'] self.db.network_allocation_update( context, allocation_id, alloc_update) if subnet_update: for subnet in dest_snss: self.db.share_network_subnet_update(context, subnet['id'], subnet_update) # If share server doesn't have an identifier, we didn't ask the driver # to create a brand new server - this was a nondisruptive migration share_server_was_reused = not dest_share_server['identifier'] if share_server_was_reused: driver_backend_details = model_update.get( 'server_backend_details') # Clean up the previous backend details set for migration details if driver_backend_details: self.db.share_server_backend_details_delete( context, dest_share_server['id']) backend_details = ( driver_backend_details or source_share_server.get("backend_details")) if backend_details: for k, v in backend_details.items(): self.db.share_server_backend_details_set( context, dest_share_server['id'], {k: v}) host_value = share_utils.extract_host(dest_share_server['host']) service = self.db.service_get_by_args( context, host_value, 'manila-share') new_az_id = service['availability_zone_id'] share_updates = model_update.get('share_updates', {}) for share_instance in share_instances: share_update = share_updates.get(share_instance['id'], {}) new_share_host = share_utils.append_host( dest_share_server['host'], share_update.get('pool_name')) # Update share instance with new values instance_update = { 'share_server_id': dest_share_server['id'], 'host': new_share_host, 'share_network_id': dest_sn_id, 'availability_zone_id': new_az_id, } self.db.share_instance_update( context, share_instance['id'], instance_update) # Try to update info returned in the model update if not share_update: continue # Update export locations update_export_location = ( share_updates[share_instance['id']].get('export_locations')) if update_export_location: self.db.export_locations_update( context, share_instance['id'], update_export_location) snapshot_updates = model_update.get('snapshot_updates', {}) for snap_instance in snapshot_instances: model_update = snapshot_updates.get(snap_instance['id'], {}) snapshot_export_locations = model_update.pop( 'export_locations', []) if model_update: self.db.share_snapshot_instance_update( context, snap_instance['id'], model_update) if snapshot_export_locations: export_locations_update = [] for exp_location in snapshot_export_locations: updated_el = { 'path': exp_location['path'], 'is_admin_only': exp_location['is_admin_only'], } export_locations_update.append(updated_el) self.db.share_snapshot_instance_export_locations_update( context, snap_instance['id'], export_locations_update) # Reset read only access since migration has finished self._reset_read_only_access_rules_for_server( context, share_instances, source_share_server, dest_host=source_share_server['host']) # NOTE(dviroel): Setting the source share server to INACTIVE to avoid # being reused for new shares, since it may have some invalid # configurations and most of the drivers don't check for compatible # share servers on share creation. self.db.share_server_update( context, source_share_server['id'], {'task_state': constants.TASK_STATE_MIGRATION_SUCCESS, 'status': constants.STATUS_INACTIVE}) if share_server_was_reused: self.driver.deallocate_network(context, source_share_server['id']) self.db.share_server_delete(context, source_share_server['id']) else: source_share_server = self._get_share_server_dict( context, source_share_server) rpcapi = share_rpcapi.ShareAPI() rpcapi.delete_share_server(context, source_share_server) @add_hooks @utils.require_driver_initialized def share_server_migration_cancel(self, context, src_share_server_id, dest_share_server_id): share_server = self.db.share_server_get(context, src_share_server_id) dest_share_server = self.db.share_server_get(context, dest_share_server_id) if share_server['task_state'] not in ( constants.TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE, constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS): msg = _("Migration of share server %s cannot be cancelled at this " "moment.") % src_share_server_id raise exception.InvalidShareServer(reason=msg) share_instances = ( self.db.share_instance_get_all_by_share_server( context, src_share_server_id, with_share_data=True)) share_instance_ids = [x.id for x in share_instances] snapshot_instances = ( self.db.share_snapshot_instance_get_all_with_filters( context, {'share_instance_ids': share_instance_ids})) snapshot_instance_ids = [x.id for x in snapshot_instances] # Avoid new migration continue and cancel calls while cancelling the # migration, which can take some time to finish. The cancel in progress # state will help administrator to identify if the operation is still # in progress. self.db.share_server_update( context, share_server['id'], {'task_state': constants.TASK_STATE_MIGRATION_CANCEL_IN_PROGRESS}) self.driver.share_server_migration_cancel( context, share_server, dest_share_server, share_instances, snapshot_instances) if CONF.server_migration_extend_neutron_network: self.driver.network_api.delete_extended_allocations( context, share_server) # NOTE(dviroel): After cancelling the migration we should set the new # share server to INVALID since it may contain an invalid configuration # to be reused. We also cleanup the source_share_server_id to unblock # new migrations. self.db.share_server_update( context, dest_share_server_id, {'task_state': constants.TASK_STATE_MIGRATION_CANCELLED, 'status': constants.STATUS_INACTIVE}) self._check_delete_share_server(context, share_server=dest_share_server) self._update_resource_status( context, constants.STATUS_AVAILABLE, share_instance_ids=share_instance_ids, snapshot_instance_ids=snapshot_instance_ids) self._reset_read_only_access_rules_for_server( context, share_instances, share_server, dest_host=share_server['host']) self.db.share_server_update( context, share_server['id'], {'task_state': constants.TASK_STATE_MIGRATION_CANCELLED, 'status': constants.STATUS_ACTIVE}) LOG.info("Share Server Migration for share server %s was cancelled.", share_server['id']) @add_hooks @utils.require_driver_initialized def share_server_migration_get_progress( self, context, src_share_server_id, dest_share_server_id): src_share_server = self.db.share_server_get(context, src_share_server_id) if src_share_server['task_state'] != ( constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS): msg = _("Driver is not performing migration for" " share server %s at this moment.") % src_share_server_id raise exception.InvalidShareServer(reason=msg) dest_share_server = self.db.share_server_get(context, dest_share_server_id) share_instances = ( self.db.share_instance_get_all_by_share_server( context, src_share_server_id, with_share_data=True)) share_instance_ids = [x.id for x in share_instances] snapshot_instances = ( self.db.share_snapshot_instance_get_all_with_filters( context, {'share_instance_ids': share_instance_ids})) return self.driver.share_server_migration_get_progress( context, src_share_server, dest_share_server, share_instances, snapshot_instances) @locked_share_network_operation def _check_share_network_update_finished( self, context, share_network_id=None): # Check if this share network is already active share_network = self.db.share_network_get(context, share_network_id) if share_network['status'] == constants.STATUS_NETWORK_ACTIVE: return share_servers = self.db.share_server_get_all_with_filters( context, {'share_network_id': share_network_id} ) if all([ss['status'] != constants.STATUS_SERVER_NETWORK_CHANGE for ss in share_servers]): # All share servers have updated their configuration self.db.share_network_update( context, share_network_id, {'status': constants.STATUS_NETWORK_ACTIVE}) def _update_share_network_security_service( self, context, share_network_id, new_security_service_id, current_security_service_id=None, check_only=False): new_security_service = self.db.security_service_get( context, new_security_service_id) current_security_service = None if current_security_service_id: current_security_service = self.db.security_service_get( context, current_security_service_id) new_ss_type = new_security_service['type'] backend_details_data = { 'name': new_security_service['name'], 'ou': new_security_service['ou'], 'default_ad_site': new_security_service['default_ad_site'], 'domain': new_security_service['domain'], 'server': new_security_service['server'], 'dns_ip': new_security_service['dns_ip'], 'user': new_security_service['user'], 'type': new_ss_type, 'password': new_security_service['password'], } share_network = self.db.share_network_get( context, share_network_id) share_servers = self.db.share_server_get_all_by_host( context, self.host, filters={'share_network_id': share_network_id}) for share_server in share_servers: # Get share_network_subnet in case it was updated. share_network_subnets = ( self.db.share_network_subnet_get_all_by_share_server_id( context, share_server['id'])) network_info = self._form_server_setup_info( context, share_server, share_network, share_network_subnets) share_instances = ( self.db.share_instance_get_all_by_share_server( context, share_server['id'], with_share_data=True)) share_instance_ids = [sn.id for sn in share_instances] share_instances_rules = [] for share_instance_id in share_instance_ids: instance_rules = { 'share_instance_id': share_instance_id, 'access_rules': ( self.db.share_access_get_all_for_instance( context, share_instance_id)) } share_instances_rules.append(instance_rules) # Only check if the driver supports this kind of update. if check_only: if self.driver.check_update_share_server_security_service( context, share_server, network_info, share_instances, share_instances_rules, new_security_service, current_security_service=current_security_service): # Check the next share server. continue else: # At least one share server doesn't support this update return False # NOTE(dviroel): We always do backend details update since it # should be the expected configuration for this share server. Any # issue with this operation should be fixed by the admin which will # guarantee that storage and backend_details configurations match. self.db.share_server_backend_details_set( context, share_server['id'], {'security_service_' + new_ss_type: jsonutils.dumps( backend_details_data)}) try: updates = self.driver.update_share_server_security_service( context, share_server, network_info, share_instances, share_instances_rules, new_security_service, current_security_service=current_security_service) or {} except Exception: operation = 'add' sec_serv_info = ('new security service %s' % new_security_service_id) if current_security_service_id: operation = 'update' sec_serv_info = ('current security service %s and ' % current_security_service_id + sec_serv_info) msg = _("Share server %(server_id)s has failed on security " "service %(operation)s operation for " "%(sec_serv_ids)s.") % { 'server_id': share_server['id'], 'operation': operation, 'sec_serv_ids': sec_serv_info, } LOG.exception(msg) # Set share server to error. Security service configuration # must be fixed before restoring it to active again. self.db.share_server_update( context, share_server['id'], {'status': constants.STATUS_ERROR}) if current_security_service: # NOTE(dviroel): An already configured security service has # failed on update operation. We will set all share # instances to 'error'. if share_instance_ids: self.db.share_instance_status_update( context, share_instance_ids, {'status': constants.STATUS_ERROR}) # Update share instance access rules status (self.access_helper .update_share_instances_access_rules_status( context, constants.SHARE_INSTANCE_RULES_ERROR, share_instance_ids)) # Go to the next share server continue # Update access rules based on drivers updates for instance_id, rules_updates in updates.items(): self.access_helper.process_driver_rule_updates( context, rules_updates, instance_id) msg = _("Security service was successfully updated on share " "server %s.") % share_server['id'] LOG.info(msg) self.db.share_server_update( context, share_server['id'], {'status': constants.STATUS_ACTIVE}) if check_only: # All share servers support the requested update return True # Check if all share servers have already finished their updates in # order to properly update share network status self._check_share_network_update_finished( context, share_network_id=share_network['id']) def update_share_network_security_service( self, context, share_network_id, new_security_service_id, current_security_service_id=None): self._update_share_network_security_service( context, share_network_id, new_security_service_id, current_security_service_id=current_security_service_id, check_only=False) def check_update_share_network_security_service( self, context, share_network_id, new_security_service_id, current_security_service_id=None): is_supported = self._update_share_network_security_service( context, share_network_id, new_security_service_id, current_security_service_id=current_security_service_id, check_only=True) self._update_share_network_security_service_operations( context, share_network_id, is_supported, new_security_service_id=new_security_service_id, current_security_service_id=current_security_service_id) @api.locked_security_service_update_operation def _update_share_network_security_service_operations( self, context, share_network_id, is_supported, new_security_service_id=None, current_security_service_id=None): update_check_key = self.share_api.get_security_service_update_key( 'hosts_check', new_security_service_id, current_security_service_id) current_hosts_info = self.db.async_operation_data_get( context, share_network_id, update_check_key) if current_hosts_info: current_hosts = json.loads(current_hosts_info) current_hosts[self.host] = is_supported self.db.async_operation_data_update( context, share_network_id, {update_check_key: json.dumps(current_hosts)}) else: LOG.debug('A share network security service check was requested ' 'but no entries were found in database. Ignoring call ' 'and returning.') @api.locked_share_server_update_allocations_operation def _update_share_server_allocations_check_operation( self, context, is_supported, share_network_id=None, availability_zone_id=None): update_key = self.share_api.get_share_server_update_allocations_key( share_network_id, availability_zone_id) current_hosts_info = self.db.async_operation_data_get( context, share_network_id, update_key) if current_hosts_info: current_hosts = json.loads(current_hosts_info) current_hosts[self.host] = is_supported self.db.async_operation_data_update( context, share_network_id, {update_key: json.dumps(current_hosts)}) else: LOG.debug('A share network subnet create check was requested ' 'but no entries were found in database. Ignoring call ' 'and returning.') def _get_subnet_allocations(self, context, share_server_id, share_network_subnet): network_allocations = ( self.db.network_allocations_get_for_share_server( context, share_server_id, label='user', subnet_id=share_network_subnet['id'])) return { 'share_network_subnet_id': share_network_subnet['id'], 'neutron_net_id': share_network_subnet['neutron_net_id'], 'neutron_subnet_id': share_network_subnet['neutron_subnet_id'], 'network_allocations': network_allocations, } def _form_network_allocations(self, context, share_server_id, share_network_subnets): subnet_allocations = [] for share_network_subnet in share_network_subnets: subnet_allocations.append(self._get_subnet_allocations( context, share_server_id, share_network_subnet)) admin_network_allocations = ( self.db.network_allocations_get_for_share_server( context, share_server_id, label='admin')) return { 'admin_network_allocations': admin_network_allocations, 'subnets': subnet_allocations, } def check_update_share_server_network_allocations( self, context, share_network_id, new_share_network_subnet): share_network = self.db.share_network_get( context, share_network_id) az_subnets = ( self.db.share_network_subnets_get_all_by_availability_zone_id( context, share_network_id, new_share_network_subnet['availability_zone_id'], fallback_to_default=False) ) self.driver.network_api.include_network_info(new_share_network_subnet) # all subnets have the same set of share servers, so do the check from # servers in the first subnet. share_servers = az_subnets[0]['share_servers'] if az_subnets else [] is_supported = True for share_server in share_servers: current_network_allocations = self._form_network_allocations( context, share_server['id'], az_subnets) share_instances = ( self.db.share_instance_get_all_by_share_server( context, share_server['id'], with_share_data=True)) share_instance_ids = [sn.id for sn in share_instances] share_instances_rules = [] for share_instance_id in share_instance_ids: instance_rules = { 'share_instance_id': share_instance_id, 'access_rules': ( self.db.share_access_get_all_for_instance( context, share_instance_id)) } share_instances_rules.append(instance_rules) if self.driver.check_update_share_server_network_allocations( context, share_server, current_network_allocations, new_share_network_subnet, share_network['security_services'], share_instances, share_instances_rules): # Check the next share server. continue else: # At least one share server doesn't support this update. is_supported = False break self._update_share_server_allocations_check_operation( context, is_supported, share_network_id=share_network_id, availability_zone_id=( new_share_network_subnet['availability_zone_id'])) def _do_update_share_server_network_allocations( self, context, share_server, share_network, new_subnet, current_network_allocations, share_instances, snapshot_instance_ids): self.driver.allocate_network( context, share_server, share_network, new_subnet) new_network_allocations = self._get_subnet_allocations( context, share_server['id'], new_subnet) if not new_network_allocations['network_allocations']: raise exception.AllocationsNotFoundForShareServer( share_server_id=share_server['id']) # NOTE(felipe_rodrigues): all allocations have the same network # segmentation info, so validation from the first one. self._validate_segmentation_id( new_network_allocations['network_allocations'][0]) model_update = self.driver.update_share_server_network_allocations( context, share_server, current_network_allocations, new_network_allocations, share_network['security_services'], share_instances, snapshot_instance_ids) self.driver.update_network_allocation(context, share_server) driver_backend_details = model_update.get('server_details') if driver_backend_details: self.db.share_server_backend_details_set( context, share_server['id'], driver_backend_details) share_updates = model_update.get('share_updates', {}) for share_instance_id, export_locations in share_updates.items(): self.db.export_locations_update( context, share_instance_id, export_locations) snapshot_updates = model_update.get('snapshot_updates', {}) for snap_instance_id, model_update in snapshot_updates.items(): snapshot_export_locations = model_update.pop( 'export_locations', []) if model_update: self.db.share_snapshot_instance_update( context, snap_instance_id, model_update) if snapshot_export_locations: export_locations_update = [] for exp_location in snapshot_export_locations: updated_el = { 'path': exp_location['path'], 'is_admin_only': exp_location['is_admin_only'], } export_locations_update.append(updated_el) self.db.share_snapshot_instance_export_locations_update( context, snap_instance_id, export_locations_update) def update_share_server_network_allocations( self, context, share_network_id, new_share_network_subnet_id): share_network = self.db.share_network_get( context, share_network_id) new_subnet = self.db.share_network_subnet_get( context, new_share_network_subnet_id) current_subnets = ( self.db.share_network_subnets_get_all_by_availability_zone_id( context, share_network_id, new_subnet['availability_zone_id'], fallback_to_default=False) ) current_subnets = [subnet for subnet in current_subnets if subnet['id'] != new_share_network_subnet_id] share_servers = ( self.db.share_server_get_all_by_host_and_or_share_subnet( context, host=self.host, share_subnet_id=new_share_network_subnet_id)) for share_server in share_servers: share_server_id = share_server['id'] current_network_allocations = self._form_network_allocations( context, share_server_id, current_subnets) share_instances = ( self.db.share_instance_get_all_by_share_server( context, share_server_id, with_share_data=True)) share_instance_ids = [x['id'] for x in share_instances] snapshot_instances = ( self.db.share_snapshot_instance_get_all_with_filters( context, {'share_instance_ids': share_instance_ids})) snapshot_instance_ids = [x['id'] for x in snapshot_instances] try: self._do_update_share_server_network_allocations( context, share_server, share_network, new_subnet, current_network_allocations, share_instances, snapshot_instances) except Exception as e: msg = ('Failed to update allocations of share server ' '%(server_id)s on subnet %(subnet_id)s: %(e)s.') data = { 'server_id': share_server_id, 'subnet_id': new_share_network_subnet_id, 'e': str(e), } LOG.exception(msg, data) # Set resources to error. Allocations configuration must be # fixed before restoring it to active again. self._handle_setup_server_error(context, share_server_id, e) self._update_resource_status( context, constants.STATUS_ERROR, share_instance_ids=share_instance_ids, snapshot_instance_ids=snapshot_instance_ids) continue msg = _( "Network allocations was successfully updated on share " "server %s.") % share_server['id'] LOG.info(msg) self.db.share_server_update( context, share_server['id'], {'status': constants.STATUS_ACTIVE}) # Check if all share servers have already finished their updates in # order to properly update share network status. self._check_share_network_update_finished( context, share_network_id=share_network['id']) def update_share_from_metadata(self, context, share_id, metadata): share = self.db.share_get(context, share_id) share_instance = self._get_share_instance(context, share) share_server = self._get_share_server(context, share_instance) try: self.driver.update_share_from_metadata(context, share_instance, metadata, share_server) self.message_api.create( context, message_field.Action.UPDATE_METADATA, share['project_id'], resource_type=message_field.Resource.SHARE, resource_id=share_id, detail=message_field.Detail.UPDATE_METADATA_SUCCESS) except Exception: self.message_api.create( context, message_field.Action.UPDATE_METADATA, share['project_id'], resource_type=message_field.Resource.SHARE, resource_id=share_id, detail=message_field.Detail.UPDATE_METADATA_FAILURE) def update_share_network_subnet_from_metadata(self, context, share_network_id, share_network_subnet_id, share_server_id, metadata): share_network = self.db.share_network_get(context, share_network_id) share_network_subnet = self.db.share_network_subnet_get( context, share_network_subnet_id) share_server = self.db.share_server_get(context, share_server_id) try: self.driver.update_share_network_subnet_from_metadata( context, share_network, share_network_subnet, share_server, metadata) self.message_api.create( context, message_field.Action.UPDATE_METADATA, share_network['project_id'], resource_type=message_field.Resource.SHARE_NETWORK_SUBNET, resource_id=share_network_subnet_id, detail=message_field.Detail.UPDATE_METADATA_SUCCESS) except Exception as e: if isinstance(e, NotImplementedError): LOG.debug("Not passing the updates of share network subnet " "metadata to share driver since the required driver " "interface is not implemented.") self.message_api.create( context, message_field.Action.UPDATE_METADATA, share_network['project_id'], resource_type=message_field.Resource.SHARE_NETWORK_SUBNET, resource_id=share_network_subnet_id, detail=message_field.Detail.UPDATE_METADATA_FAILURE) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/migration.py0000664000175000017500000001765500000000000020002 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Hitachi Data Systems. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Helper class for Share Migration.""" import time from oslo_config import cfg from oslo_log import log from manila.common import constants from manila import exception from manila.i18n import _ from manila.share import api as share_api from manila.share import rpcapi as share_rpcapi import manila.utils as utils LOG = log.getLogger(__name__) migration_opts = [ cfg.IntOpt( 'migration_wait_access_rules_timeout', default=180, help="Time to wait for access rules to be allowed/denied on backends " "when migrating shares using generic approach (seconds)."), cfg.IntOpt( 'migration_create_delete_share_timeout', default=300, help='Timeout for creating and deleting share instances ' 'when performing share migration (seconds).'), ] CONF = cfg.CONF CONF.register_opts(migration_opts) class ShareMigrationHelper(object): def __init__(self, context, db, access_helper): self.db = db self.context = context self.access_helper = access_helper self.api = share_api.API() self.access_helper = access_helper self.migration_create_delete_share_timeout = ( CONF.migration_create_delete_share_timeout) self.migration_wait_access_rules_timeout = ( CONF.migration_wait_access_rules_timeout) def delete_instance_and_wait(self, share_instance): self.api.delete_instance(self.context, share_instance, True) # Wait for deletion. starttime = time.time() deadline = starttime + self.migration_create_delete_share_timeout tries = 0 instance = "Something not None" while instance is not None: try: instance = self.db.share_instance_get(self.context, share_instance['id']) tries += 1 now = time.time() if now > deadline: msg = _("Timeout trying to delete instance " "%s") % share_instance['id'] raise exception.ShareMigrationFailed(reason=msg) except exception.NotFound: instance = None else: # 1.414 = square-root of 2 time.sleep(1.414 ** tries) def create_instance_and_wait(self, share, dest_host, new_share_network_id, new_az_id, new_share_type_id): new_share_instance = self.api.create_instance( self.context, share, new_share_network_id, dest_host, new_az_id, share_type_id=new_share_type_id) # Wait for new_share_instance to become ready starttime = time.time() deadline = starttime + self.migration_create_delete_share_timeout new_share_instance = self.db.share_instance_get( self.context, new_share_instance['id'], with_share_data=True) tries = 0 while new_share_instance['status'] != constants.STATUS_AVAILABLE: tries += 1 now = time.time() if new_share_instance['status'] == constants.STATUS_ERROR: msg = _("Failed to create new share instance" " (from %(share_id)s) on " "destination host %(host_name)s") % { 'share_id': share['id'], 'host_name': dest_host} self.cleanup_new_instance(new_share_instance) raise exception.ShareMigrationFailed(reason=msg) elif now > deadline: msg = _("Timeout creating new share instance " "(from %(share_id)s) on " "destination host %(host_name)s") % { 'share_id': share['id'], 'host_name': dest_host} self.cleanup_new_instance(new_share_instance) raise exception.ShareMigrationFailed(reason=msg) else: # 1.414 = square-root of 2 time.sleep(1.414 ** tries) new_share_instance = self.db.share_instance_get( self.context, new_share_instance['id'], with_share_data=True) return new_share_instance # NOTE(ganso): Cleanup methods do not throw exceptions, since the # exceptions that should be thrown are the ones that call the cleanup def cleanup_new_instance(self, new_instance): try: self.delete_instance_and_wait(new_instance) except Exception: LOG.warning("Failed to cleanup new instance during generic " "migration for share %s.", new_instance['share_id']) def cleanup_access_rules(self, share_instances, share_server, dest_host=None): try: self.revert_access_rules(share_instances, share_server, dest_host) except Exception: LOG.warning("Failed to cleanup access rules during generic" " migration.") def revert_access_rules(self, share_instances, share_server, dest_host=None): shares_instance_ids = [] for share_instance in share_instances: # Cast all rules to 'queued_to_apply' so that they can be # re-applied. shares_instance_ids.append(share_instance['id']) updates = {'state': constants.ACCESS_STATE_QUEUED_TO_APPLY} self.access_helper.get_and_update_share_instance_access_rules( self.context, updates=updates, share_instance_id=share_instance['id']) if dest_host: rpcapi = share_rpcapi.ShareAPI() rpcapi.update_access_for_instances(self.context, dest_host, shares_instance_ids, share_server) else: for share_instance in share_instances: self.access_helper.update_access_rules( self.context, share_instance['id'], share_server=share_server) for share_instance in share_instances: utils.wait_for_access_update( self.context, self.db, share_instance, self.migration_wait_access_rules_timeout) def apply_new_access_rules(self, new_share_instance, share_id): rules = self.db.share_instance_access_copy( self.context, share_id, new_share_instance['id']) if rules: self.api.allow_access_to_instance(self.context, new_share_instance) utils.wait_for_access_update( self.context, self.db, new_share_instance, self.migration_wait_access_rules_timeout) else: LOG.debug("No access rules to sync to destination share instance.") @utils.retry(retry_param=exception.ShareServerNotReady, retries=8) def wait_for_share_server(self, share_server_id): share_server = self.db.share_server_get(self.context, share_server_id) if share_server['status'] == constants.STATUS_ERROR: raise exception.ShareServerNotCreated( share_server_id=share_server_id) elif share_server['status'] == constants.STATUS_ACTIVE: return share_server else: raise exception.ShareServerNotReady( share_server_id=share_server_id, time=511, state=constants.STATUS_AVAILABLE) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/rpcapi.py0000664000175000017500000006537500000000000017271 0ustar00zuulzuul00000000000000# Copyright 2012, Intel, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Client side of the share RPC API. """ from oslo_config import cfg import oslo_messaging as messaging from oslo_serialization import jsonutils from manila import rpc from manila.share import utils CONF = cfg.CONF class ShareAPI(object): """Client side of the share rpc API. API version history: 1.0 - Initial version. 1.1 - Add manage_share() and unmanage_share() methods 1.2 - Add extend_share() method 1.3 - Add shrink_share() method 1.4 - Introduce Share Instances: create_share() -> create_share_instance() delete_share() -> delete_share_instance() Add share_instance argument to allow_access() & deny_access() 1.5 - Add create_consistency_group, delete_consistency_group create_cgsnapshot, and delete_cgsnapshot methods 1.6 - Introduce Share migration: migrate_share() get_migration_info() get_driver_migration_info() 1.7 - Update target call API in allow/deny access methods (Removed in 1.14) 1.8 - Introduce Share Replication: create_share_replica() delete_share_replica() promote_share_replica() update_share_replica() 1.9 - Add manage_snapshot() and unmanage_snapshot() methods 1.10 - Add migration_complete(), migration_cancel() and migration_get_progress(), rename migrate_share() to migration_start(), rename get_migration_info() to migration_get_info(), rename get_driver_migration_info() to migration_get_driver_info() 1.11 - Add create_replicated_snapshot() and delete_replicated_snapshot() methods 1.12 - Add provide_share_server(), create_share_server() and migration_driver_recovery(), remove migration_get_driver_info(), update migration_cancel(), migration_complete() and migration_get_progress method signature, rename migration_get_info() to connection_get_info() 1.13 - Introduce share revert to snapshot: revert_to_snapshot() 1.14 - Add update_access() and remove allow_access() and deny_access(). 1.15 - Updated migration_start() method with new parameter "preserve_snapshots" 1.16 - Convert create_consistency_group, delete_consistency_group create_cgsnapshot, and delete_cgsnapshot methods to create_share_group, delete_share_group create_share_group_snapshot, and delete_share_group_snapshot 1.17 - Add snapshot_update_access() 1.18 - Remove unused "share_id" parameter from revert_to_snapshot() 1.19 - Add manage_share_server() and unmanage_share_server() 1.20 - Add share_instance_id parameter for create_share_server() method 1.21 - Add share_server_migration_start, share_server_migration_check() and share_server_get_progress() 1.22 - Add update_share_network_security_service() and check_update_share_network_security_service() 1.23 - Add update_share_server_network_allocations() and check_update_share_server_network_allocations() 1.24 - Add quiesce_wait_time paramater to promote_share_replica() 1.25 - Add transfer_accept() 1.26 - Add create_backup() and delete_backup() restore_backup() methods 1.27 - Update delete_share_instance() and delete_snapshot() methods 1.28 - Add update_share_from_metadata() method 1.29 - Add ensure_shares() 1.30 - Add update_share_network_subnet_from_metadata() method """ BASE_RPC_API_VERSION = '1.0' def __init__(self, topic=None): super(ShareAPI, self).__init__() target = messaging.Target(topic=CONF.share_topic, version=self.BASE_RPC_API_VERSION) self.client = rpc.get_client(target, version_cap='1.30') def create_share_instance(self, context, share_instance, host, request_spec, filter_properties, snapshot_id=None): new_host = utils.extract_host(host) call_context = self.client.prepare(server=new_host, version='1.4') request_spec_p = jsonutils.to_primitive(request_spec) call_context.cast(context, 'create_share_instance', share_instance_id=share_instance['id'], request_spec=request_spec_p, filter_properties=filter_properties, snapshot_id=snapshot_id) def manage_share(self, context, share, driver_options=None): host = utils.extract_host(share['instance']['host']) call_context = self.client.prepare(server=host, version='1.1') call_context.cast(context, 'manage_share', share_id=share['id'], driver_options=driver_options) def unmanage_share(self, context, share): host = utils.extract_host(share['instance']['host']) call_context = self.client.prepare(server=host, version='1.1') call_context.cast(context, 'unmanage_share', share_id=share['id']) def manage_snapshot(self, context, snapshot, host, driver_options=None): new_host = utils.extract_host(host) call_context = self.client.prepare(server=new_host, version='1.9') call_context.cast(context, 'manage_snapshot', snapshot_id=snapshot['id'], driver_options=driver_options) def unmanage_snapshot(self, context, snapshot, host): new_host = utils.extract_host(host) call_context = self.client.prepare(server=new_host, version='1.9') call_context.cast(context, 'unmanage_snapshot', snapshot_id=snapshot['id']) def manage_share_server( self, context, share_server, identifier, driver_opts): host = utils.extract_host(share_server['host']) call_context = self.client.prepare(server=host, version='1.19') call_context.cast(context, 'manage_share_server', share_server_id=share_server['id'], identifier=identifier, driver_opts=driver_opts) def unmanage_share_server(self, context, share_server, force=False): host = utils.extract_host(share_server['host']) call_context = self.client.prepare(server=host, version='1.19') call_context.cast(context, 'unmanage_share_server', share_server_id=share_server['id'], force=force) def revert_to_snapshot(self, context, share, snapshot, host, reservations): host = utils.extract_host(host) call_context = self.client.prepare(server=host, version='1.18') call_context.cast(context, 'revert_to_snapshot', snapshot_id=snapshot['id'], reservations=reservations) def delete_share_instance(self, context, share_instance, force=False, deferred_delete=False): host = utils.extract_host(share_instance['host']) call_context = self.client.prepare(server=host, version='1.27') call_context.cast(context, 'delete_share_instance', share_instance_id=share_instance['id'], force=force, deferred_delete=deferred_delete) def migration_start(self, context, share, dest_host, force_host_assisted_migration, preserve_metadata, writable, nondisruptive, preserve_snapshots, new_share_network_id, new_share_type_id): new_host = utils.extract_host(share['instance']['host']) call_context = self.client.prepare(server=new_host, version='1.15') call_context.cast( context, 'migration_start', share_id=share['id'], dest_host=dest_host, force_host_assisted_migration=force_host_assisted_migration, preserve_metadata=preserve_metadata, writable=writable, nondisruptive=nondisruptive, preserve_snapshots=preserve_snapshots, new_share_network_id=new_share_network_id, new_share_type_id=new_share_type_id) def share_server_migration_start(self, context, share_server, dest_host, writable, nondisruptive, preserve_snapshots, new_share_network_id): host = utils.extract_host(dest_host) call_context = self.client.prepare(server=host, version='1.21') call_context.cast( context, 'share_server_migration_start', share_server_id=share_server['id'], dest_host=dest_host, writable=writable, nondisruptive=nondisruptive, preserve_snapshots=preserve_snapshots, new_share_network_id=new_share_network_id) def share_server_migration_check(self, context, share_server_id, dest_host, writable, nondisruptive, preserve_snapshots, new_share_network_id): host = utils.extract_host(dest_host) call_context = self.client.prepare(server=host, version='1.21') return call_context.call( context, 'share_server_migration_check', share_server_id=share_server_id, dest_host=dest_host, writable=writable, nondisruptive=nondisruptive, preserve_snapshots=preserve_snapshots, new_share_network_id=new_share_network_id) def share_server_migration_cancel(self, context, dest_host, share_server, dest_share_server): host = utils.extract_host(dest_host) call_context = self.client.prepare(server=host, version='1.21') call_context.cast( context, 'share_server_migration_cancel', src_share_server_id=share_server['id'], dest_share_server_id=dest_share_server['id']) def share_server_migration_get_progress(self, context, dest_host, share_server, dest_share_server): host = utils.extract_host(dest_host) call_context = self.client.prepare(server=host, version='1.21') return call_context.call(context, 'share_server_migration_get_progress', src_share_server_id=share_server['id'], dest_share_server_id=dest_share_server['id']) def share_server_migration_complete(self, context, dest_host, share_server, dest_share_server): host = utils.extract_host(dest_host) call_context = self.client.prepare(server=host, version='1.21') call_context.cast(context, 'share_server_migration_complete', src_share_server_id=share_server['id'], dest_share_server_id=dest_share_server['id']) def connection_get_info(self, context, share_instance): new_host = utils.extract_host(share_instance['host']) call_context = self.client.prepare(server=new_host, version='1.12') return call_context.call(context, 'connection_get_info', share_instance_id=share_instance['id']) def delete_share_server(self, context, share_server): host = utils.extract_host(share_server['host']) call_context = self.client.prepare(server=host, version='1.0') call_context.cast(context, 'delete_share_server', share_server=share_server) def create_snapshot(self, context, share, snapshot): host = utils.extract_host(share['instance']['host']) call_context = self.client.prepare(server=host) call_context.cast(context, 'create_snapshot', share_id=share['id'], snapshot_id=snapshot['id']) def delete_snapshot(self, context, snapshot, host, force=False, deferred_delete=False): new_host = utils.extract_host(host) call_context = self.client.prepare(server=new_host, version='1.27') call_context.cast(context, 'delete_snapshot', snapshot_id=snapshot['id'], force=force, deferred_delete=deferred_delete) def create_replicated_snapshot(self, context, share, replicated_snapshot): host = utils.extract_host(share['instance']['host']) call_context = self.client.prepare(server=host, version='1.11') call_context.cast(context, 'create_replicated_snapshot', snapshot_id=replicated_snapshot['id'], share_id=share['id']) def delete_replicated_snapshot(self, context, replicated_snapshot, host, share_id=None, force=False): host = utils.extract_host(host) call_context = self.client.prepare(server=host, version='1.11') call_context.cast(context, 'delete_replicated_snapshot', snapshot_id=replicated_snapshot['id'], share_id=share_id, force=force) def update_access(self, context, share_instance): host = utils.extract_host(share_instance['host']) call_context = self.client.prepare(server=host, version='1.14') call_context.cast(context, 'update_access', share_instance_id=share_instance['id']) def update_access_for_instances(self, context, dest_host, share_instance_ids, share_server_id=None): host = utils.extract_host(dest_host) call_context = self.client.prepare(server=host, version='1.21') call_context.cast(context, 'update_access_for_instances', share_instance_ids=share_instance_ids, share_server_id=share_server_id) def publish_service_capabilities(self, context): call_context = self.client.prepare(fanout=True, version='1.0') call_context.cast(context, 'publish_service_capabilities') def transfer_accept(self, ctxt, share, new_user, new_project, clear_rules=False): msg_args = { 'share_id': share['id'], 'new_user': new_user, 'new_project': new_project, 'clear_rules': clear_rules } host = utils.extract_host(share['instance']['host']) call_context = self.client.prepare(server=host, version='1.25') call_context.call(ctxt, 'transfer_accept', **msg_args) def extend_share(self, context, share, new_size, reservations): host = utils.extract_host(share['instance']['host']) call_context = self.client.prepare(server=host, version='1.2') call_context.cast(context, 'extend_share', share_id=share['id'], new_size=new_size, reservations=reservations) def shrink_share(self, context, share, new_size): host = utils.extract_host(share['instance']['host']) call_context = self.client.prepare(server=host, version='1.3') call_context.cast(context, 'shrink_share', share_id=share['id'], new_size=new_size) def create_share_group(self, context, share_group, host): new_host = utils.extract_host(host) call_context = self.client.prepare(server=new_host, version='1.16') call_context.cast( context, 'create_share_group', share_group_id=share_group['id']) def delete_share_group(self, context, share_group): new_host = utils.extract_host(share_group['host']) call_context = self.client.prepare(server=new_host, version='1.16') call_context.cast( context, 'delete_share_group', share_group_id=share_group['id']) def create_share_group_snapshot(self, context, share_group_snapshot, host): new_host = utils.extract_host(host) call_context = self.client.prepare(server=new_host, version='1.16') call_context.cast( context, 'create_share_group_snapshot', share_group_snapshot_id=share_group_snapshot['id']) def delete_share_group_snapshot(self, context, share_group_snapshot, host): new_host = utils.extract_host(host) call_context = self.client.prepare(server=new_host, version='1.16') call_context.cast( context, 'delete_share_group_snapshot', share_group_snapshot_id=share_group_snapshot['id']) def create_share_replica(self, context, share_replica, host, request_spec, filter_properties): new_host = utils.extract_host(host) call_context = self.client.prepare(server=new_host, version='1.8') request_spec_p = jsonutils.to_primitive(request_spec) call_context.cast(context, 'create_share_replica', share_replica_id=share_replica['id'], request_spec=request_spec_p, filter_properties=filter_properties, share_id=share_replica['share_id']) def delete_share_replica(self, context, share_replica, force=False): host = utils.extract_host(share_replica['host']) call_context = self.client.prepare(server=host, version='1.8') call_context.cast(context, 'delete_share_replica', share_replica_id=share_replica['id'], share_id=share_replica['share_id'], force=force) def promote_share_replica(self, context, share_replica, quiesce_wait_time=None): host = utils.extract_host(share_replica['host']) call_context = self.client.prepare(server=host, version='1.24') call_context.cast(context, 'promote_share_replica', share_replica_id=share_replica['id'], share_id=share_replica['share_id'], quiesce_wait_time=quiesce_wait_time) def update_share_replica(self, context, share_replica): host = utils.extract_host(share_replica['host']) call_context = self.client.prepare(server=host, version='1.8') call_context.cast(context, 'update_share_replica', share_replica_id=share_replica['id'], share_id=share_replica['share_id']) def migration_complete(self, context, src_share_instance, dest_instance_id): new_host = utils.extract_host(src_share_instance['host']) call_context = self.client.prepare(server=new_host, version='1.12') call_context.cast(context, 'migration_complete', src_instance_id=src_share_instance['id'], dest_instance_id=dest_instance_id) def migration_cancel(self, context, src_share_instance, dest_instance_id): new_host = utils.extract_host(src_share_instance['host']) call_context = self.client.prepare(server=new_host, version='1.12') call_context.cast(context, 'migration_cancel', src_instance_id=src_share_instance['id'], dest_instance_id=dest_instance_id) def migration_get_progress(self, context, src_share_instance, dest_instance_id): new_host = utils.extract_host(src_share_instance['host']) call_context = self.client.prepare(server=new_host, version='1.12') return call_context.call(context, 'migration_get_progress', src_instance_id=src_share_instance['id'], dest_instance_id=dest_instance_id) def provide_share_server(self, context, share_instance, share_network_id, snapshot_id=None): new_host = utils.extract_host(share_instance['host']) call_context = self.client.prepare(server=new_host, version='1.12') return call_context.call(context, 'provide_share_server', share_instance_id=share_instance['id'], share_network_id=share_network_id, snapshot_id=snapshot_id) def create_share_server(self, context, share_instance, share_server_id): new_host = utils.extract_host(share_instance['host']) call_context = self.client.prepare(server=new_host, version='1.20') call_context.cast(context, 'create_share_server', share_server_id=share_server_id, share_instance_id=share_instance['id']) def snapshot_update_access(self, context, snapshot_instance): host = utils.extract_host(snapshot_instance['share_instance']['host']) call_context = self.client.prepare(server=host, version='1.17') call_context.cast(context, 'snapshot_update_access', snapshot_instance_id=snapshot_instance['id']) def update_share_network_security_service( self, context, dest_host, share_network_id, new_security_service_id, current_security_service_id=None): host = utils.extract_host(dest_host) call_context = self.client.prepare(server=host, version='1.22') call_context.cast( context, 'update_share_network_security_service', share_network_id=share_network_id, new_security_service_id=new_security_service_id, current_security_service_id=current_security_service_id) def check_update_share_network_security_service( self, context, dest_host, share_network_id, new_security_service_id, current_security_service_id=None): host = utils.extract_host(dest_host) call_context = self.client.prepare(server=host, version='1.22') call_context.cast( context, 'check_update_share_network_security_service', share_network_id=share_network_id, new_security_service_id=new_security_service_id, current_security_service_id=current_security_service_id) def check_update_share_server_network_allocations( self, context, dest_host, share_network_id, new_share_network_subnet): host = utils.extract_host(dest_host) call_context = self.client.prepare(server=host, version='1.23') call_context.cast( context, 'check_update_share_server_network_allocations', share_network_id=share_network_id, new_share_network_subnet=new_share_network_subnet) def update_share_server_network_allocations( self, context, dest_host, share_network_id, new_share_network_subnet_id): host = utils.extract_host(dest_host) call_context = self.client.prepare(server=host, version='1.23') call_context.cast( context, 'update_share_server_network_allocations', share_network_id=share_network_id, new_share_network_subnet_id=new_share_network_subnet_id) def create_backup(self, context, backup): host = utils.extract_host(backup['host']) call_context = self.client.prepare(server=host, version='1.26') return call_context.cast(context, 'create_backup', backup=backup) def delete_backup(self, context, backup): host = utils.extract_host(backup['host']) call_context = self.client.prepare(server=host, version='1.26') return call_context.cast(context, 'delete_backup', backup=backup) def restore_backup(self, context, backup, share_id): host = utils.extract_host(backup['host']) call_context = self.client.prepare(server=host, version='1.26') return call_context.cast(context, 'restore_backup', backup=backup, share_id=share_id) def update_share_from_metadata(self, context, share, metadata): host = utils.extract_host(share['instance']['host']) call_context = self.client.prepare(server=host, version='1.28') return call_context.cast(context, 'update_share_from_metadata', share_id=share['id'], metadata=metadata) def update_share_network_subnet_from_metadata(self, context, share_network_id, share_network_subnet_id, share_server, metadata): host = utils.extract_host(share_server['host']) call_context = self.client.prepare(server=host, version='1.30') call_context.cast( context, 'update_share_network_subnet_from_metadata', share_network_id=share_network_id, share_network_subnet_id=share_network_subnet_id, share_server_id=share_server['id'], metadata=metadata) def ensure_driver_resources(self, context, host): host = utils.extract_host(host) call_context = self.client.prepare(server=host, version='1.29') return call_context.cast( context, 'ensure_driver_resources', skip_backend_info_check=True ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/share_types.py0000664000175000017500000004477300000000000020340 0ustar00zuulzuul00000000000000# Copyright (c) 2014 OpenStack Foundation. # Copyright (c) 2015 Tom Barron. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Built-in share type properties.""" import re from oslo_config import cfg from oslo_db import exception as db_exception from oslo_log import log from oslo_utils import strutils from oslo_utils import uuidutils from manila.api import common from manila.common import constants from manila import context from manila import db from manila import exception from manila.i18n import _ from manila import quota CONF = cfg.CONF LOG = log.getLogger(__name__) QUOTAS = quota.QUOTAS MIN_SIZE_KEY = "provisioning:min_share_size" MAX_SIZE_KEY = "provisioning:max_share_size" MAX_EXTEND_SIZE_KEY = "provisioning:max_share_extend_size" def create(context, name, extra_specs=None, is_public=True, projects=None, description=None): """Creates share types.""" extra_specs = extra_specs or {} projects = projects or [] try: get_valid_required_extra_specs(extra_specs) get_valid_optional_extra_specs(extra_specs) except exception.InvalidExtraSpec as e: raise exception.InvalidShareType(reason=e.message) extra_specs = sanitize_extra_specs(extra_specs) try: type_ref = db.share_type_create(context, dict(name=name, description=description, extra_specs=extra_specs, is_public=is_public), projects=projects) except db_exception.DBError: LOG.exception('DB error.') raise exception.ShareTypeCreateFailed(name=name, extra_specs=extra_specs) return type_ref def sanitize_extra_specs(extra_specs): """Post process extra specs here if necessary""" az_spec = constants.ExtraSpecs.AVAILABILITY_ZONES if az_spec in extra_specs: extra_specs[az_spec] = sanitize_csv(extra_specs[az_spec]) return extra_specs def update(context, id, name, description, is_public=None): """Update share type by id.""" values = {} if name: values.update({'name': name}) if description == "": values.update({'description': None}) elif description: values.update({'description': description}) if is_public is not None: values.update({'is_public': is_public}) try: db.share_type_update(context, id, values) except db_exception.DBError: LOG.exception('DB error.') raise exception.ShareTypeUpdateFailed(id=id) def destroy(context, id): """Marks share types as deleted.""" if id is None: msg = _("id cannot be None") raise exception.InvalidShareType(reason=msg) else: db.share_type_destroy(context, id) def get_all_types(context, inactive=0, search_opts=None): """Get all non-deleted share_types. """ search_opts = search_opts or {} filters = {} if 'is_public' in search_opts: filters['is_public'] = search_opts.pop('is_public') share_types = db.share_type_get_all(context, inactive, filters=filters) for type_name, type_args in share_types.items(): required_extra_specs = {} try: required_extra_specs = get_valid_required_extra_specs( type_args['extra_specs']) except exception.InvalidExtraSpec: LOG.exception('Share type %(share_type)s has invalid required' ' extra specs.', {'share_type': type_name}) type_args['required_extra_specs'] = required_extra_specs search_vars = {} availability_zones = search_opts.get('extra_specs', {}).pop( 'availability_zones', None) extra_specs = search_opts.pop('extra_specs', {}) if extra_specs: search_vars['extra_specs'] = extra_specs if availability_zones: search_vars['availability_zones'] = availability_zones.split(',') if search_opts: # No other search options are currently supported return {} elif not search_vars: return share_types LOG.debug("Searching by: %s", search_vars) def _check_extra_specs_match(share_type, searchdict): for k, v in searchdict.items(): if (k not in share_type['extra_specs'].keys() or share_type['extra_specs'][k] != v): return False return True def _check_availability_zones_match(share_type, availability_zones): type_azs = share_type['extra_specs'].get('availability_zones') if type_azs: type_azs = type_azs.split(',') return set(availability_zones).issubset(set(type_azs)) return True # search_option to filter_name mapping. filter_mapping = { 'extra_specs': _check_extra_specs_match, 'availability_zones': _check_availability_zones_match, } result = {} for type_name, type_args in share_types.items(): # go over all filters in the list (*AND* operation) type_matches = True for opt, value in search_vars.items(): try: filter_func = filter_mapping[opt] except KeyError: # no such filter - ignore it, go to next filter continue else: if not filter_func(type_args, value): type_matches = False break if type_matches: result[type_name] = type_args return result def get_share_type(ctxt, id, expected_fields=None): """Retrieves single share type by id.""" if id is None: msg = _("id cannot be None") raise exception.InvalidShareType(reason=msg) if ctxt is None: ctxt = context.get_admin_context() return db.share_type_get(ctxt, id, expected_fields=expected_fields) def get_share_type_by_name(context, name): """Retrieves single share type by name.""" if name is None: msg = _("name cannot be None") raise exception.InvalidShareType(reason=msg) if not isinstance(name, str): msg = _("the share type's name parameter was badly formatted") raise exception.InvalidShareType(reason=msg) return db.share_type_get_by_name(context, name) def get_share_type_by_name_or_id(context, share_type=None): if not share_type: share_type_ref = get_default_share_type(context) if not share_type_ref: msg = _("Default share type not found.") raise exception.ShareTypeNotFound(message=msg) return share_type_ref if uuidutils.is_uuid_like(share_type): return get_share_type(context, share_type) else: return get_share_type_by_name(context, share_type) def get_default_share_type(ctxt=None): """Get the default share type.""" name = CONF.default_share_type if name is None: return {} if ctxt is None: ctxt = context.get_admin_context() share_type = {} try: share_type = get_share_type_by_name(ctxt, name) required_extra_specs = get_valid_required_extra_specs( share_type['extra_specs']) share_type['required_extra_specs'] = required_extra_specs return share_type except exception.ShareTypeNotFoundByName as e: # Couldn't find share type with the name in default_share_type # flag, record this issue and move on # TODO(zhiteng) consider add notification to warn admin LOG.exception('Default share type is not found, ' 'please check default_share_type config: %s', e) except exception.InvalidExtraSpec as ex: LOG.exception('Default share type has invalid required extra' ' specs: %s', ex) def get_share_type_extra_specs(share_type_id, key=False): share_type = get_share_type(context.get_admin_context(), share_type_id) extra_specs = share_type['extra_specs'] if key: if extra_specs.get(key): return extra_specs.get(key) else: return False else: return extra_specs def get_required_extra_specs(): return constants.ExtraSpecs.REQUIRED def get_optional_extra_specs(): return constants.ExtraSpecs.OPTIONAL def get_tenant_visible_extra_specs(): return constants.ExtraSpecs.TENANT_VISIBLE def get_boolean_extra_specs(): return constants.ExtraSpecs.BOOLEAN def is_valid_required_extra_spec(key, value): """Validates required extra_spec value. :param key: extra_spec name :param value: extra_spec value :return: None if provided extra_spec is not required True/False if extra_spec is required and valid or not. """ if key not in get_required_extra_specs(): return if key == constants.ExtraSpecs.DRIVER_HANDLES_SHARE_SERVERS: return strutils.bool_from_string(value, default=None) is not None return False def get_valid_required_extra_specs(extra_specs): """Validates and returns required extra specs from dict. Raises InvalidExtraSpec if extra specs are not valid, or if any required extra specs are missing. """ extra_specs = extra_specs or {} missed_extra_specs = set(get_required_extra_specs()) - set(extra_specs) if missed_extra_specs: specs = ",".join(missed_extra_specs) msg = _("Required extra specs '%s' not specified.") % specs raise exception.InvalidExtraSpec(reason=msg) required_extra_specs = {} for k in get_required_extra_specs(): value = extra_specs.get(k, '') if not is_valid_required_extra_spec(k, value): msg = _("Value of required extra_spec %s is not valid") % k raise exception.InvalidExtraSpec(reason=msg) required_extra_specs[k] = value return required_extra_specs def is_valid_csv(extra_spec_value): if not isinstance(extra_spec_value, str): extra_spec_value = str(extra_spec_value) values = extra_spec_value.split(',') return all([v.strip() for v in values]) def is_valid_string(v, min_length=1, max_length=256): return isinstance(v, str) and min_length <= len(v) and len(v) < max_length def sanitize_csv(csv_string): return ','.join(value.strip() for value in csv_string.split(',') if (csv_string and value)) def is_valid_optional_extra_spec(key, value): """Validates optional but standardized extra_spec value. :param key: extra_spec name :param value: extra_spec value :return: None if provided extra_spec is not required True/False if extra_spec is required and valid or not. """ if key not in get_optional_extra_specs(): return if key == constants.ExtraSpecs.SNAPSHOT_SUPPORT: return parse_boolean_extra_spec(key, value) is not None elif key == constants.ExtraSpecs.CREATE_SHARE_FROM_SNAPSHOT_SUPPORT: return parse_boolean_extra_spec(key, value) is not None elif key == constants.ExtraSpecs.REVERT_TO_SNAPSHOT_SUPPORT: return parse_boolean_extra_spec(key, value) is not None elif key == constants.ExtraSpecs.REPLICATION_TYPE_SPEC: return value in constants.ExtraSpecs.REPLICATION_TYPES elif key == constants.ExtraSpecs.MOUNT_SNAPSHOT_SUPPORT: return parse_boolean_extra_spec(key, value) is not None elif key == constants.ExtraSpecs.MOUNT_POINT_NAME_SUPPORT: return parse_boolean_extra_spec(key, value) is not None elif key == constants.ExtraSpecs.AVAILABILITY_ZONES: return is_valid_csv(value) elif key == constants.ExtraSpecs.PROVISIONING_MOUNT_POINT_PREFIX: return is_valid_string(value, min_length=0) elif key == constants.ExtraSpecs.ENCRYPTION_SUPPORT: return value in constants.ExtraSpecs.ENCRYPTION_TYPES elif key in [constants.ExtraSpecs.PROVISIONING_MAX_SHARE_SIZE, constants.ExtraSpecs.PROVISIONING_MIN_SHARE_SIZE, constants.ExtraSpecs.PROVISIONING_MAX_SHARE_EXTEND_SIZE]: try: common.validate_integer(value, 'share_size', min_value=1) return True except ValueError: return False return False def get_valid_optional_extra_specs(extra_specs): """Validates and returns optional/standard extra specs from dict. Raises InvalidExtraSpec if extra specs are not valid. """ extra_specs = extra_specs or {} present_optional_extra_spec_keys = set(extra_specs).intersection( set(get_optional_extra_specs())) optional_extra_specs = {} for key in present_optional_extra_spec_keys: value = extra_specs.get(key, '') if not is_valid_optional_extra_spec(key, value): msg = _("Value of optional extra_spec %s is not valid.") % key raise exception.InvalidExtraSpec(reason=msg) optional_extra_specs[key] = value return optional_extra_specs def add_share_type_access(context, share_type_id, project_id): """Add access to share type for project_id.""" if share_type_id is None: msg = _("share_type_id cannot be None") raise exception.InvalidShareType(reason=msg) return db.share_type_access_add(context, share_type_id, project_id) def remove_share_type_access(context, share_type_id, project_id): """Remove access to share type for project_id.""" if share_type_id is None: msg = _("share_type_id cannot be None") raise exception.InvalidShareType(reason=msg) return db.share_type_access_remove(context, share_type_id, project_id) def get_extra_specs_from_share(share): type_id = share.get('share_type_id', None) return get_share_type_extra_specs(type_id) def parse_boolean_extra_spec(extra_spec_key, extra_spec_value): """Parse extra spec values of the form ' True' or ' False' This method returns the boolean value of an extra spec value. If the value does not conform to the standard boolean pattern, it raises an InvalidExtraSpec exception. """ if not isinstance(extra_spec_value, str): extra_spec_value = str(extra_spec_value) match = re.match(r'^\s*(?PTrue|False)$', extra_spec_value.strip(), re.IGNORECASE) if match: extra_spec_value = match.group('value') try: return strutils.bool_from_string(extra_spec_value, strict=True) except ValueError: msg = (_('Invalid boolean extra spec %(key)s : %(value)s') % {'key': extra_spec_key, 'value': extra_spec_value}) raise exception.InvalidExtraSpec(reason=msg) def provision_filter_on_size(context, share_type, size, operation='create'): """This function filters share provisioning requests on size limits. If a share type has provisioning size min/max set, this filter will ensure that the share size requested is within the size limits specified in the share type. """ if not share_type: share_type = get_default_share_type() if not share_type: return size_int = int(size) extra_specs = share_type.get('extra_specs', {}) if operation in ['create', 'shrink']: min_size = extra_specs.get(MIN_SIZE_KEY) if min_size and size_int < int(min_size): msg = _("Specified share size of '%(req_size)d' is less " "than the minimum required size of '%(min_size)s' " "for share type '%(sha_type)s'." ) % {'req_size': size_int, 'min_size': min_size, 'sha_type': share_type['name']} raise exception.InvalidInput(reason=msg) if operation in ['create', 'extend']: max_size = extra_specs.get(MAX_SIZE_KEY) if max_size and size_int > int(max_size): msg = _("Specified share size of '%(req_size)d' is " "greater than the maximum allowable size of " "'%(max_size)s' for share type '%(sha_type)s'." ) % {'req_size': size_int, 'max_size': max_size, 'sha_type': share_type['name']} raise exception.InvalidInput(reason=msg) if operation in ['admin-extend']: max_extend_size = extra_specs.get(MAX_EXTEND_SIZE_KEY) if max_extend_size and size_int > int(max_extend_size): msg = _("Specified share size of '%(req_size)d' is " "greater than the maximum allowable extend size of " "'%(max_extend_size)s' for share type '%(sha_type)s'." ) % {'req_size': size_int, 'max_extend_size': max_extend_size, 'sha_type': share_type['name']} raise exception.InvalidInput(reason=msg) def revert_allocated_share_type_quotas_during_migration( context, share, share_type_id, allow_deallocate_from_current_type=False): # If both new share type and share's share type ID, there is no need # to revert quotas because new quotas weren't allocated, as share # type changes weren't identified, unless it is a migration that was # successfully completed if ((share_type_id == share['share_type_id']) and not allow_deallocate_from_current_type): return new_share_type = get_share_type(context, share_type_id) new_type_extra_specs = new_share_type.get('extra_specs', None) new_type_replication_type = None if new_type_extra_specs: new_type_replication_type = new_type_extra_specs.get( 'replication_type', None) deltas = {} if new_type_replication_type: deltas['share_replicas'] = -1 deltas['replica_gigabytes'] = -share['size'] deltas.update({ 'share_type_id': new_share_type['id'], 'shares': -1, 'gigabytes': -share['size'] }) try: reservations = QUOTAS.reserve( context, project_id=share['project_id'], user_id=share['user_id'], **deltas) except Exception: LOG.exception("Failed to update usages for share_replicas and " "replica_gigabytes.") else: QUOTAS.commit( context, reservations, project_id=share['project_id'], user_id=share['user_id'], share_type_id=share_type_id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/snapshot_access.py0000664000175000017500000001621700000000000021162 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Hitachi Data Systems # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log from manila.common import constants from manila import utils LOG = log.getLogger(__name__) class ShareSnapshotInstanceAccess(object): def __init__(self, db, driver): self.db = db self.driver = driver def update_access_rules(self, context, snapshot_instance_id, delete_all_rules=False, share_server=None): """Update driver and database access rules for given snapshot instance. :param context: current context :param snapshot_instance_id: Id of the snapshot instance model :param delete_all_rules: Whether all rules should be deleted. :param share_server: Share server model or None """ snapshot_instance = self.db.share_snapshot_instance_get( context, snapshot_instance_id, with_share_data=True) snapshot_id = snapshot_instance['snapshot_id'] @utils.synchronized( "update_access_rules_for_snapshot_%s" % snapshot_id, external=True) def _update_access_rules_locked(*args, **kwargs): return self._update_access_rules(*args, **kwargs) _update_access_rules_locked( context=context, snapshot_instance=snapshot_instance, delete_all_rules=delete_all_rules, share_server=share_server, ) def _update_access_rules(self, context, snapshot_instance, delete_all_rules=None, share_server=None): # NOTE(ganso): First let's get all the rules and the mappings. rules = self.db.share_snapshot_access_get_all_for_snapshot_instance( context, snapshot_instance['id']) add_rules = [] delete_rules = [] if delete_all_rules: # NOTE(ganso): We want to delete all rules. delete_rules = rules rules_to_be_on_snapshot = [] # NOTE(ganso): We select all deletable mappings. for rule in rules: # NOTE(ganso): No need to update the state if already set. if rule['state'] != constants.ACCESS_STATE_DENYING: self.db.share_snapshot_instance_access_update( context, rule['access_id'], snapshot_instance['id'], {'state': constants.ACCESS_STATE_DENYING}) else: # NOTE(ganso): error'ed rules are to be left alone until # reset back to "queued_to_deny" by API. rules_to_be_on_snapshot = [ r for r in rules if r['state'] not in ( constants.ACCESS_STATE_QUEUED_TO_DENY, # NOTE(ganso): We select denying rules as a recovery # mechanism for invalid rules during a restart. constants.ACCESS_STATE_DENYING, # NOTE(ganso): We do not re-send error-ed access rules to # drivers. constants.ACCESS_STATE_ERROR ) ] # NOTE(ganso): Process queued rules for rule in rules: # NOTE(ganso): We are barely handling recovery, so if any rule # exists in 'applying' or 'denying' state, we add them again. if rule['state'] in (constants.ACCESS_STATE_QUEUED_TO_APPLY, constants.ACCESS_STATE_APPLYING): if rule['state'] == ( constants.ACCESS_STATE_QUEUED_TO_APPLY): self.db.share_snapshot_instance_access_update( context, rule['access_id'], snapshot_instance['id'], {'state': constants.ACCESS_STATE_APPLYING}) add_rules.append(rule) elif rule['state'] in ( constants.ACCESS_STATE_QUEUED_TO_DENY, constants.ACCESS_STATE_DENYING): if rule['state'] == ( constants.ACCESS_STATE_QUEUED_TO_DENY): self.db.share_snapshot_instance_access_update( context, rule['access_id'], snapshot_instance['id'], {'state': constants.ACCESS_STATE_DENYING}) delete_rules.append(rule) try: self.driver.snapshot_update_access( context, snapshot_instance, rules_to_be_on_snapshot, add_rules=add_rules, delete_rules=delete_rules, share_server=share_server) # NOTE(ganso): successfully added rules transition to "active". for rule in add_rules: self.db.share_snapshot_instance_access_update( context, rule['access_id'], snapshot_instance['id'], {'state': constants.STATUS_ACTIVE}) except Exception: # NOTE(ganso): if we failed, we set all the transitional rules # to ERROR. for rule in add_rules + delete_rules: self.db.share_snapshot_instance_access_update( context, rule['access_id'], snapshot_instance['id'], {'state': constants.STATUS_ERROR}) raise self._remove_access_rules( context, delete_rules, snapshot_instance['id']) if self._check_needs_refresh(context, snapshot_instance['id']): self._update_access_rules(context, snapshot_instance, share_server=share_server) else: LOG.info("Access rules were successfully applied for " "snapshot instance: %s", snapshot_instance['id']) def _check_needs_refresh(self, context, snapshot_instance_id): rules = self.db.share_snapshot_access_get_all_for_snapshot_instance( context, snapshot_instance_id) return (any(rule['state'] in ( constants.ACCESS_STATE_QUEUED_TO_APPLY, constants.ACCESS_STATE_QUEUED_TO_DENY) for rule in rules)) def _remove_access_rules(self, context, rules, snapshot_instance_id): if not rules: return for rule in rules: self.db.share_snapshot_instance_access_delete( context, rule['access_id'], snapshot_instance_id) def get_snapshot_instance_access_rules(self, context, snapshot_instance_id): return self.db.share_snapshot_access_get_all_for_snapshot_instance( context, snapshot_instance_id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share/utils.py0000664000175000017500000001353300000000000017140 0ustar00zuulzuul00000000000000# Copyright (c) 2012 OpenStack Foundation # Copyright (c) 2015 Rushil Chugh # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Share-related Utilities and helpers.""" from oslo_config import cfg from manila.common import constants from manila.db import migration from manila import rpc from manila import utils DEFAULT_POOL_NAME = '_pool0' CONF = cfg.CONF def extract_host(host, level='backend', use_default_pool_name=False): """Extract Host, Backend or Pool information from host string. :param host: String for host, which could include host@backend#pool info :param level: Indicate which level of information should be extracted from host string. Level can be 'host', 'backend', 'pool', or 'backend_name', default value is 'backend' :param use_default_pool_name: This flag specifies what to do if level == 'pool' and there is no 'pool' info encoded in host string. default_pool_name=True will return DEFAULT_POOL_NAME, otherwise it will return None. Default value of this parameter is False. :return: expected level of information For example: host = 'HostA@BackendB#PoolC' ret = extract_host(host, 'host') # ret is 'HostA' ret = extract_host(host, 'backend') # ret is 'HostA@BackendB' ret = extract_host(host, 'pool') # ret is 'PoolC' ret = extract_host(host, 'backend_name') # ret is 'BackendB' host = 'HostX@BackendY' ret = extract_host(host, 'pool') # ret is None ret = extract_host(host, 'pool', True) # ret is '_pool0' """ if level == 'host': # Make sure pool is not included hst = host.split('#')[0] return hst.split('@')[0] if level == 'backend_name': hst = host.split('#')[0] return hst.split('@')[1] elif level == 'backend': return host.split('#')[0] elif level == 'pool': lst = host.split('#') if len(lst) == 2: return lst[1] elif use_default_pool_name is True: return DEFAULT_POOL_NAME else: return None def append_host(host, pool): """Encode pool into host info.""" if not host or not pool: return host new_host = "#".join([host, pool]) return new_host def get_active_replica(replica_list): """Returns the first 'active' replica in the list of replicas provided.""" for replica in replica_list: if replica['replica_state'] == constants.REPLICA_STATE_ACTIVE: return replica def change_rules_to_readonly(access_rules, add_rules, delete_rules): dict_access_rules = cast_access_object_to_dict_in_readonly(access_rules) dict_add_rules = cast_access_object_to_dict_in_readonly(add_rules) dict_delete_rules = cast_access_object_to_dict_in_readonly(delete_rules) return dict_access_rules, dict_add_rules, dict_delete_rules def cast_access_object_to_dict_in_readonly(rules): dict_rules = [] for rule in rules: dict_rules.append({ 'access_level': constants.ACCESS_LEVEL_RO, 'access_type': rule['access_type'], 'access_to': rule['access_to'] }) return dict_rules @utils.if_notifications_enabled def notify_about_share_usage(context, share, share_instance, event_suffix, extra_usage_info=None, host=None): if not host: host = CONF.host if not extra_usage_info: extra_usage_info = {} usage_info = _usage_from_share(share, share_instance, **extra_usage_info) rpc.get_notifier("share", host).info(context, 'share.%s' % event_suffix, usage_info) def _usage_from_share(share_ref, share_instance_ref, **extra_usage_info): usage_info = { 'share_id': share_ref['id'], 'user_id': share_ref['user_id'], 'project_id': share_ref['project_id'], 'snapshot_id': share_ref['snapshot_id'], 'share_group_id': share_ref['share_group_id'], 'size': share_ref['size'], 'name': share_ref['display_name'], 'description': share_ref['display_description'], 'proto': share_ref['share_proto'], 'is_public': share_ref['is_public'], 'availability_zone': share_instance_ref['availability_zone'], 'host': share_instance_ref['host'], 'status': share_instance_ref['status'], 'share_type_id': share_instance_ref['share_type_id'], 'share_type': share_instance_ref['share_type']['name'], } usage_info.update(extra_usage_info) return usage_info def get_recent_db_migration_id(): return migration.version() def is_az_subnets_compatible(subnet_list, new_subnet_list): if len(subnet_list) != len(new_subnet_list): return False for subnet in subnet_list: found_compatible = False for new_subnet in new_subnet_list: if (subnet.get('neutron_net_id') == new_subnet.get('neutron_net_id') and subnet.get('neutron_subnet_id') == new_subnet.get('neutron_subnet_id')): found_compatible = True break if not found_compatible: return False return True ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315601.9416714 manila-21.0.0/manila/share_group/0000775000175000017500000000000000000000000016635 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share_group/__init__.py0000664000175000017500000000000000000000000020734 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share_group/api.py0000664000175000017500000005250300000000000017765 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Alex Meade # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Handles all requests relating to share groups. """ from oslo_config import cfg from oslo_log import log from oslo_utils import excutils from oslo_utils import strutils from manila.api import common as api_common from manila.common import constants from manila.db import base from manila import exception from manila.i18n import _ from manila import quota from manila.scheduler import rpcapi as scheduler_rpcapi from manila import share from manila.share import rpcapi as share_rpcapi from manila.share import share_types CONF = cfg.CONF LOG = log.getLogger(__name__) QUOTAS = quota.QUOTAS class API(base.Base): """API for interacting with the share manager.""" def __init__(self, db_driver=None): self.scheduler_rpcapi = scheduler_rpcapi.SchedulerAPI() self.share_rpcapi = share_rpcapi.ShareAPI() self.share_api = share.API() super(API, self).__init__(db_driver) def create(self, context, name=None, description=None, share_type_ids=None, source_share_group_snapshot_id=None, share_network_id=None, share_group_type_id=None, availability_zone_id=None, availability_zone=None): """Create new share group.""" share_group_snapshot = None original_share_group = None # NOTE(gouthamr): share_server_id is inherited from the # parent share group if a share group snapshot is specified, # else, it will be set in the share manager. share_server_id = None if source_share_group_snapshot_id: share_group_snapshot = self.db.share_group_snapshot_get( context, source_share_group_snapshot_id) if share_group_snapshot['status'] != constants.STATUS_AVAILABLE: msg = (_("Share group snapshot status must be %s.") % constants.STATUS_AVAILABLE) raise exception.InvalidShareGroupSnapshot(reason=msg) original_share_group = self.db.share_group_get( context, share_group_snapshot['share_group_id']) share_type_ids = [ s['share_type_id'] for s in original_share_group['share_types']] share_network_id = original_share_group['share_network_id'] share_server_id = original_share_group['share_server_id'] availability_zone_id = original_share_group['availability_zone_id'] # Get share_type_objects share_type_objects = [] driver_handles_share_servers = None for share_type_id in (share_type_ids or []): try: share_type_object = share_types.get_share_type( context, share_type_id) except exception.ShareTypeNotFound: msg = _("Share type with id %s could not be found.") raise exception.InvalidInput(msg % share_type_id) share_type_objects.append(share_type_object) extra_specs = share_type_object.get('extra_specs') if extra_specs: share_type_handle_ss = strutils.bool_from_string( extra_specs.get( constants.ExtraSpecs.DRIVER_HANDLES_SHARE_SERVERS)) if driver_handles_share_servers is None: driver_handles_share_servers = share_type_handle_ss elif not driver_handles_share_servers == share_type_handle_ss: # NOTE(ameade): if the share types have conflicting values # for driver_handles_share_servers then raise bad request msg = _("The specified share_types cannot have " "conflicting values for the " "driver_handles_share_servers extra spec.") raise exception.InvalidInput(reason=msg) if (not share_type_handle_ss) and share_network_id: msg = _("When using a share types with the " "driver_handles_share_servers extra spec as " "False, a share_network_id must not be provided.") raise exception.InvalidInput(reason=msg) share_network = {} try: if share_network_id: share_network = self.db.share_network_get( context, share_network_id) except exception.ShareNetworkNotFound: msg = _("The specified share network does not exist.") raise exception.InvalidInput(reason=msg) if share_network: # Check if share network is active, otherwise raise a BadRequest api_common.check_share_network_is_active(share_network) if (driver_handles_share_servers and not (source_share_group_snapshot_id or share_network_id)): msg = _("When using a share type with the " "driver_handles_share_servers extra spec as " "True, a share_network_id must be provided.") raise exception.InvalidInput(reason=msg) try: share_group_type = self.db.share_group_type_get( context, share_group_type_id) except exception.ShareGroupTypeNotFound: msg = _("The specified share group type %s does not exist.") raise exception.InvalidInput(reason=msg % share_group_type_id) supported_share_types = set( [x['share_type_id'] for x in share_group_type['share_types']]) supported_share_type_objects = [ share_types.get_share_type(context, share_type_id) for share_type_id in supported_share_types ] if not set(share_type_ids or []) <= supported_share_types: msg = _("The specified share types must be a subset of the share " "types supported by the share group type.") raise exception.InvalidInput(reason=msg) # Grab share type AZs for scheduling share_types_of_new_group = ( share_type_objects or supported_share_type_objects ) stype_azs_of_new_group = [] stypes_unsupported_in_az = [] for stype in share_types_of_new_group: stype_azs = stype.get('extra_specs', {}).get( 'availability_zones', '') if stype_azs: stype_azs = stype_azs.split(',') stype_azs_of_new_group.extend(stype_azs) if availability_zone and availability_zone not in stype_azs: # If an AZ is requested, it must be supported by the AZs # configured in each of the share types requested stypes_unsupported_in_az.append((stype['name'], stype['id'])) if stypes_unsupported_in_az: msg = _("Share group cannot be created since the following share " "types are not supported within the availability zone " "'%(az)s': (%(stypes)s)") payload = {'az': availability_zone, 'stypes': ''} for type_name, type_id in set(stypes_unsupported_in_az): if payload['stypes']: payload['stypes'] += ', ' type_name = '%s ' % (type_name or '') payload['stypes'] += type_name + '(ID: %s)' % type_id raise exception.InvalidInput(reason=msg % payload) try: reservations = QUOTAS.reserve(context, share_groups=1) except exception.OverQuota as e: overs = e.kwargs['overs'] usages = e.kwargs['usages'] quotas = e.kwargs['quotas'] def _consumed(name): return (usages[name]['reserved'] + usages[name]['in_use']) if 'share_groups' in overs: msg = ("Quota exceeded for '%(s_uid)s' user in '%(s_pid)s' " "project. (%(d_consumed)d of " "%(d_quota)d already consumed).") LOG.warning(msg, { 's_pid': context.project_id, 's_uid': context.user_id, 'd_consumed': _consumed('share_groups'), 'd_quota': quotas['share_groups'], }) raise exception.ShareGroupsLimitExceeded() options = { 'share_group_type_id': share_group_type_id, 'source_share_group_snapshot_id': source_share_group_snapshot_id, 'share_network_id': share_network_id, 'share_server_id': share_server_id, 'availability_zone_id': availability_zone_id, 'name': name, 'description': description, 'user_id': context.user_id, 'project_id': context.project_id, 'status': constants.STATUS_CREATING, 'share_types': share_type_ids or supported_share_types } if original_share_group: options['host'] = original_share_group['host'] share_group = {} try: share_group = self.db.share_group_create(context, options) if share_group_snapshot: members = self.db.share_group_snapshot_members_get_all( context, source_share_group_snapshot_id) for member in members: share_instance = self.db.share_instance_get( context, member['share_instance_id']) share_type = share_types.get_share_type( context, share_instance['share_type_id']) self.share_api.create( context, member['share_proto'], member['size'], None, None, share_group_id=share_group['id'], share_group_snapshot_member=member, share_type=share_type, availability_zone=availability_zone_id, share_network_id=share_network_id) except Exception: with excutils.save_and_reraise_exception(): if share_group: self.db.share_group_destroy( context.elevated(), share_group['id']) QUOTAS.rollback(context, reservations) try: QUOTAS.commit(context, reservations) except Exception: with excutils.save_and_reraise_exception(): QUOTAS.rollback(context, reservations) request_spec = {'share_group_id': share_group['id']} request_spec.update(options) request_spec['availability_zones'] = set(stype_azs_of_new_group) request_spec['share_types'] = share_type_objects request_spec['resource_type'] = share_group_type if share_group_snapshot and original_share_group: self.share_rpcapi.create_share_group( context, share_group, original_share_group['host']) else: self.scheduler_rpcapi.create_share_group( context, share_group_id=share_group['id'], request_spec=request_spec, filter_properties={}) return share_group def delete(self, context, share_group): """Delete share group.""" share_group_id = share_group['id'] if not share_group['host']: self.db.share_group_destroy(context.elevated(), share_group_id) return statuses = (constants.STATUS_AVAILABLE, constants.STATUS_ERROR) if not share_group['status'] in statuses: msg = (_("Share group status must be one of %(statuses)s") % {"statuses": statuses}) raise exception.InvalidShareGroup(reason=msg) # NOTE(ameade): check for group_snapshots in the group if self.db.count_share_group_snapshots_in_share_group( context, share_group_id): msg = (_("Cannot delete a share group with snapshots")) raise exception.InvalidShareGroup(reason=msg) # NOTE(ameade): check for shares in the share group if self.db.count_shares_in_share_group(context, share_group_id): msg = (_("Cannot delete a share group with shares")) raise exception.InvalidShareGroup(reason=msg) share_group = self.db.share_group_update( context, share_group_id, {'status': constants.STATUS_DELETING}) try: reservations = QUOTAS.reserve( context, share_groups=-1, project_id=share_group['project_id'], user_id=share_group['user_id'], ) except exception.OverQuota as e: reservations = None LOG.exception( ("Failed to update quota for deleting share group: %s"), e) try: self.share_rpcapi.delete_share_group(context, share_group) except Exception: with excutils.save_and_reraise_exception(): QUOTAS.rollback(context, reservations) if reservations: QUOTAS.commit( context, reservations, project_id=share_group['project_id'], user_id=share_group['user_id'], ) def update(self, context, group, fields): return self.db.share_group_update(context, group['id'], fields) def get(self, context, share_group_id): return self.db.share_group_get(context, share_group_id) def get_all(self, context, detailed=True, search_opts=None, sort_key=None, sort_dir=None): if search_opts is None: search_opts = {} LOG.debug("Searching for share_groups by: %s", search_opts) # Get filtered list of share_groups if search_opts.pop('all_tenants', 0) and context.is_admin: share_groups = self.db.share_group_get_all( context, detailed=detailed, filters=search_opts, sort_key=sort_key, sort_dir=sort_dir) else: share_groups = self.db.share_group_get_all_by_project( context, context.project_id, detailed=detailed, filters=search_opts, sort_key=sort_key, sort_dir=sort_dir) return share_groups def create_share_group_snapshot(self, context, name=None, description=None, share_group_id=None): """Create new share group snapshot.""" options = { 'share_group_id': share_group_id, 'name': name, 'description': description, 'user_id': context.user_id, 'project_id': context.project_id, 'status': constants.STATUS_CREATING, } share_group = self.db.share_group_get(context, share_group_id) # Check status of group, must be active if not share_group['status'] == constants.STATUS_AVAILABLE: msg = (_("Share group status must be %s") % constants.STATUS_AVAILABLE) raise exception.InvalidShareGroup(reason=msg) # Create members for every share in the group shares = self.db.share_get_all_by_share_group_id( context, share_group_id) # Check status of all shares, they must be active in order to snap # the group for s in shares: if not s['status'] == constants.STATUS_AVAILABLE: msg = (_("Share %(s)s in share group must have status " "of %(status)s in order to create a group snapshot") % {"s": s['id'], "status": constants.STATUS_AVAILABLE}) raise exception.InvalidShareGroup(reason=msg) try: reservations = QUOTAS.reserve(context, share_group_snapshots=1) except exception.OverQuota as e: overs = e.kwargs['overs'] usages = e.kwargs['usages'] quotas = e.kwargs['quotas'] def _consumed(name): return (usages[name]['reserved'] + usages[name]['in_use']) if 'share_group_snapshots' in overs: msg = ("Quota exceeded for '%(s_uid)s' user in '%(s_pid)s' " "project. (%(d_consumed)d of " "%(d_quota)d already consumed).") LOG.warning(msg, { 's_pid': context.project_id, 's_uid': context.user_id, 'd_consumed': _consumed('share_group_snapshots'), 'd_quota': quotas['share_group_snapshots'], }) raise exception.ShareGroupSnapshotsLimitExceeded() snap = {} try: snap = self.db.share_group_snapshot_create(context, options) members = [] for s in shares: member_options = { 'share_group_snapshot_id': snap['id'], 'user_id': context.user_id, 'project_id': context.project_id, 'status': constants.STATUS_CREATING, 'size': s['size'], 'share_proto': s['share_proto'], 'share_instance_id': s.instance['id'] } member = self.db.share_group_snapshot_member_create( context, member_options) members.append(member) # Cast to share manager self.share_rpcapi.create_share_group_snapshot( context, snap, share_group['host']) except Exception: with excutils.save_and_reraise_exception(): # This will delete the snapshot and all of it's members if snap: self.db.share_group_snapshot_destroy(context, snap['id']) QUOTAS.rollback(context, reservations) try: QUOTAS.commit(context, reservations) except Exception: with excutils.save_and_reraise_exception(): QUOTAS.rollback(context, reservations) return snap def delete_share_group_snapshot(self, context, snap): """Delete share group snapshot.""" snap_id = snap['id'] statuses = (constants.STATUS_AVAILABLE, constants.STATUS_ERROR) share_group = self.db.share_group_get(context, snap['share_group_id']) if not snap['status'] in statuses: msg = (_("Share group snapshot status must be one of" " %(statuses)s") % {"statuses": statuses}) raise exception.InvalidShareGroupSnapshot(reason=msg) self.db.share_group_snapshot_update( context, snap_id, {'status': constants.STATUS_DELETING}) try: reservations = QUOTAS.reserve( context, share_group_snapshots=-1, project_id=snap['project_id'], user_id=snap['user_id'], ) except exception.OverQuota as e: reservations = None LOG.exception( ("Failed to update quota for deleting share group snapshot: " "%s"), e) # Cast to share manager self.share_rpcapi.delete_share_group_snapshot( context, snap, share_group['host']) if reservations: QUOTAS.commit( context, reservations, project_id=snap['project_id'], user_id=snap['user_id'], ) def update_share_group_snapshot(self, context, share_group_snapshot, fields): return self.db.share_group_snapshot_update( context, share_group_snapshot['id'], fields) def get_share_group_snapshot(self, context, snapshot_id): return self.db.share_group_snapshot_get(context, snapshot_id) def get_all_share_group_snapshots(self, context, detailed=True, search_opts=None, sort_key=None, sort_dir=None): if search_opts is None: search_opts = {} LOG.debug("Searching for share group snapshots by: %s", search_opts) # Get filtered list of share group snapshots if search_opts.pop('all_tenants', 0) and context.is_admin: share_group_snapshots = self.db.share_group_snapshot_get_all( context, detailed=detailed, filters=search_opts, sort_key=sort_key, sort_dir=sort_dir) else: share_group_snapshots = ( self.db.share_group_snapshot_get_all_by_project( context, context.project_id, detailed=detailed, filters=search_opts, sort_key=sort_key, sort_dir=sort_dir, ) ) return share_group_snapshots def get_all_share_group_snapshot_members(self, context, share_group_snapshot_id): members = self.db.share_group_snapshot_members_get_all( context, share_group_snapshot_id) return members ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/share_group/share_group_types.py0000664000175000017500000001320400000000000022751 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_db import exception as db_exception from oslo_log import log from oslo_utils import uuidutils from manila.common import constants from manila import context from manila import db from manila import exception from manila.i18n import _ CONF = cfg.CONF LOG = log.getLogger(__name__) def create(context, name, share_types, group_specs=None, is_public=True, projects=None): """Creates share group types.""" group_specs = group_specs or {} projects = projects or [] try: type_ref = db.share_group_type_create( context, {"name": name, "group_specs": group_specs, "is_public": is_public, "share_types": share_types}, projects=projects) except db_exception.DBError: LOG.exception('DB error') raise exception.ShareGroupTypeCreateFailed( name=name, group_specs=group_specs) return type_ref def destroy(context, type_id): """Marks share group types as deleted.""" if id is None: msg = _("Share group type ID cannot be None.") raise exception.InvalidShareGroupType(reason=msg) else: db.share_group_type_destroy(context, type_id) def get_all(context, inactive=0, search_opts=None): """Get all non-deleted share group types.""" search_opts = search_opts or {} filters = {} if 'is_public' in search_opts: filters['is_public'] = search_opts.pop('is_public') share_group_types = db.share_group_type_get_all( context, inactive, filters=filters) if search_opts: LOG.debug("Searching by: %s", search_opts) def _check_group_specs_match(share_group_type, searchdict): for k, v in searchdict.items(): if (k not in share_group_type['group_specs'].keys() or share_group_type['group_specs'][k] != v): return False return True # search_option to filter_name mapping. filter_mapping = {'group_specs': _check_group_specs_match} result = {} for type_name, type_args in share_group_types.items(): # go over all filters in the list for opt, values in search_opts.items(): try: filter_func = filter_mapping[opt] except KeyError: # no such filter - ignore it, go to next filter continue else: if filter_func(type_args, values): result[type_name] = type_args break share_group_types = result return share_group_types def get(ctxt, type_id, expected_fields=None): """Retrieves single share group type by id.""" if type_id is None: msg = _("Share type ID cannot be None.") raise exception.InvalidShareGroupType(reason=msg) if ctxt is None: ctxt = context.get_admin_context() return db.share_group_type_get( ctxt, type_id, expected_fields=expected_fields) def get_by_name(context, name): """Retrieves single share group type by name.""" if name is None: msg = _("name cannot be None.") raise exception.InvalidShareGroupType(reason=msg) return db.share_group_type_get_by_name(context, name) def get_by_name_or_id(context, share_group_type=None): if not share_group_type: share_group_type_ref = get_default(context) if not share_group_type_ref: msg = _("Default share group type not found.") raise exception.ShareGroupTypeNotFound(msg) return share_group_type_ref if uuidutils.is_uuid_like(share_group_type): return get(context, share_group_type) else: return get_by_name(context, share_group_type) def get_default(ctxt=None): """Get the default share group type.""" name = CONF.default_share_group_type if name is None: return {} if ctxt is None: ctxt = context.get_admin_context() try: return get_by_name(ctxt, name) except exception.ShareGroupTypeNotFoundByName: LOG.exception( "Default share group type '%s' is not found, " "please check 'default_share_group_type' config.", name, ) def get_tenant_visible_group_specs(): return constants.ExtraSpecs.TENANT_VISIBLE def get_boolean_group_specs(): return constants.ExtraSpecs.BOOLEAN def add_share_group_type_access(context, share_group_type_id, project_id): """Add access to share group type for project_id.""" if share_group_type_id is None: msg = _("share_group_type_id cannot be None.") raise exception.InvalidShareGroupType(reason=msg) return db.share_group_type_access_add( context, share_group_type_id, project_id) def remove_share_group_type_access(context, share_group_type_id, project_id): """Remove access to share group type for project_id.""" if share_group_type_id is None: msg = _("share_group_type_id cannot be None.") raise exception.InvalidShareGroupType(reason=msg) return db.share_group_type_access_remove( context, share_group_type_id, project_id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/ssh_utils.py0000664000175000017500000001072400000000000016712 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Ssh utilities.""" import hashlib import logging import os from eventlet import pools from oslo_config import cfg from oslo_log import log from manila import exception from manila.i18n import _ try: import paramiko except ImportError: paramiko = None CONF = cfg.CONF LOG = log.getLogger(__name__) if getattr(CONF, 'debug', False): logging.getLogger("paramiko").setLevel(logging.DEBUG) def get_fingerprint(self): """Patch paramiko This method needs to be patched to allow paramiko to work under FIPS. Until the patch to do this merges, patch paramiko here. TODO(carloss) Remove this when paramiko is patched. See https://github.com/paramiko/paramiko/pull/1928 """ return hashlib.md5(self.asbytes(), usedforsecurity=False).digest() if paramiko is None: raise exception.RequirementMissing(req='paramiko') paramiko.pkey.PKey.get_fingerprint = get_fingerprint class SSHPool(pools.Pool): """A simple eventlet pool to hold ssh connections.""" def __init__(self, ip, port, conn_timeout, login, password=None, privatekey=None, *args, **kwargs): self.ip = ip self.port = port self.login = login self.password = password self.conn_timeout = conn_timeout if conn_timeout else None self.path_to_private_key = privatekey super(SSHPool, self).__init__(*args, **kwargs) def create(self, quiet=False): # pylint: disable=method-hidden ssh = paramiko.SSHClient() ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) look_for_keys = True if self.path_to_private_key: self.path_to_private_key = os.path.expanduser( self.path_to_private_key) look_for_keys = False elif self.password: look_for_keys = False try: LOG.debug("ssh.connect: ip: %s, port: %s, look_for_keys: %s, " "timeout: %s, banner_timeout: %s", self.ip, self.port, look_for_keys, self.conn_timeout, self.conn_timeout) ssh.connect(self.ip, port=self.port, username=self.login, password=self.password, key_filename=self.path_to_private_key, look_for_keys=look_for_keys, timeout=self.conn_timeout, banner_timeout=self.conn_timeout) if self.conn_timeout: transport = ssh.get_transport() transport.set_keepalive(self.conn_timeout) return ssh except Exception as e: msg = _("Check whether private key or password are correctly " "set. Error connecting via ssh: %s") % e if quiet: LOG.debug(msg) else: LOG.error(msg) raise exception.SSHException(msg) def get(self): """Return an item from the pool, when one is available. This may cause the calling greenthread to block. Check if a connection is active before returning it. For dead connections create and return a new connection. """ if self.free_items: conn = self.free_items.popleft() if conn: if conn.get_transport().is_active(): return conn else: conn.close() return self.create() if self.current_size < self.max_size: created = self.create() self.current_size += 1 return created return self.channel.get() def remove(self, ssh): """Close an ssh client and remove it from free_items.""" ssh.close() if ssh in self.free_items: self.free_items.remove(ssh) if self.current_size > 0: self.current_size -= 1 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/test.py0000664000175000017500000003663500000000000015665 0ustar00zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Base classes for our unit tests. Allows overriding of flags for use of fakes, and some black magic for inline callbacks. """ import os import shutil from unittest import mock import warnings import fixtures from oslo_concurrency import lockutils from oslo_config import cfg from oslo_config import fixture as config_fixture import oslo_messaging from oslo_messaging import conffixture as messaging_conffixture from oslo_utils import uuidutils import oslotest.base as base_test from sqlalchemy import exc as sqla_exc from manila.api.openstack import api_version_request as api_version from manila import coordination from manila.db import migration from manila.db.sqlalchemy import api as db_api from manila.db.sqlalchemy import models as db_models from manila import policy from manila import rpc from manila import service from manila.tests import conf_fixture from manila.tests import fake_notifier test_opts = [ cfg.StrOpt('sqlite_clean_db', default='clean.sqlite', help='File name of clean sqlite database.'), cfg.StrOpt('sqlite_db', default='manila.sqlite', help='The filename to use with sqlite.'), ] CONF = cfg.CONF CONF.register_opts(test_opts) _DB_CACHE = None class DatabaseFixture(fixtures.Fixture): def __init__(self, db_session, db_migrate, sql_connection, sqlite_db, sqlite_clean_db): self.sql_connection = sql_connection self.sqlite_db = sqlite_db self.sqlite_clean_db = sqlite_clean_db self.engine = db_session.get_engine() self.engine.dispose() conn = self.engine.connect() # FIXME(stephenfin): This is an issue. We're not applying our # migrations on SQLite in-memory backends and because the model schemas # and migration schemas don't currently match exactly, we are not # testing against something resembling what our customers would see. # We should (a) start applying the migrations for all backends (which # will require reworking the migrations since SQLite doesn't support # ALTER fully, meaning batch mode must be used) and (b) get the two # different sets of schemas in sync and keep them in sync. if sql_connection == "sqlite://": self.setup_sqlite(db_migrate) else: testdb = os.path.join(CONF.state_path, sqlite_db) db_migrate.upgrade('head') if os.path.exists(testdb): return if sql_connection == "sqlite://": conn = self.engine.connect() self._DB = "".join(line for line in conn.connection.iterdump()) self.engine.dispose() else: cleandb = os.path.join(CONF.state_path, sqlite_clean_db) shutil.copyfile(testdb, cleandb) def setUp(self): super().setUp() if self.sql_connection == "sqlite://": conn = self.engine.connect() conn.connection.executescript(self._DB) self.addCleanup(self.engine.dispose) # pylint: disable=no-member else: shutil.copyfile( os.path.join(CONF.state_path, self.sqlite_clean_db), os.path.join(CONF.state_path, self.sqlite_db), ) def setup_sqlite(self, db_migrate): if db_migrate.version(): return db_models.BASE.metadata.create_all(self.engine) db_migrate.stamp('head') class WarningsFixture(fixtures.Fixture): """Filters out warnings during test runs.""" def setUp(self): super().setUp() self._original_warning_filters = warnings.filters[:] # NOTE(sdague): Make deprecation warnings only happen once. Otherwise # this gets kind of crazy given the way that upstream python libs use # this. warnings.simplefilter('once', DeprecationWarning) # NOTE(sdague): this remains an unresolved item around the way # forward on is_admin, the deprecation is definitely really premature. warnings.filterwarnings( 'ignore', message=( 'Policy enforcement is depending on the value of is_admin. ' 'This key is deprecated. Please update your policy ' 'file to use the standard policy values.' ), ) # NOTE(mriedem): Ignore scope check UserWarnings from oslo.policy. warnings.filterwarnings( 'ignore', message='Policy .* failed scope check', category=UserWarning, ) # NOTE(gibi): The UUIDFields emits a warning if the value is not a # valid UUID. Let's escalate that to an exception in the test to # prevent adding violations. warnings.filterwarnings('error', message='.*invalid UUID.*') # NOTE(mriedem): Avoid adding anything which tries to convert an # object to a primitive which jsonutils.to_primitive() does not know # how to handle (or isn't given a fallback callback). warnings.filterwarnings( 'error', message=( 'Cannot convert = api_version.APIVersionRequest(right)) def is_microversion_lt(self, left, right): return (api_version.APIVersionRequest(left) < api_version.APIVersionRequest(right)) def assert_notify_called(self, mock_notify, calls): for i in range(0, len(calls)): mock_call = mock_notify.call_args_list[i] call = calls[i] posargs = mock_call[0] self.assertEqual(call[0], posargs[0]) self.assertEqual(call[1], posargs[2]) def override_config(self, name, override, group=None): """Cleanly override CONF variables.""" CONF.set_override(name, override, group) self.addCleanup(CONF.clear_override, name, group) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315601.9416714 manila-21.0.0/manila/testing/0000775000175000017500000000000000000000000015774 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/testing/README.rst0000664000175000017500000000374100000000000017470 0ustar00zuulzuul00000000000000======================================= OpenStack Manila Testing Infrastructure ======================================= A note of clarification is in order, to help those who are new to testing in OpenStack Manila: - actual unit tests are created in the "tests" directory; - the "testing" directory is used to house the infrastructure needed to support testing in OpenStack Manila. This README file attempts to provide current and prospective contributors with everything they need to know in order to start creating unit tests and utilizing the convenience code provided in manila.testing. Writing Unit Tests ------------------ - All new unit tests are to be written in python-mock. - Old tests that are still written in mox should be updated to use python-mock. Usage of mox has been deprecated for writing Manila unit tests. - use addCleanup in favor of tearDown test.TestCase ------------- The TestCase class from manila.test (generally imported as test) will automatically manage self.stubs using the stubout module. They will automatically verify and clean up during the tearDown step. If using test.TestCase, calling the super class setUp is required and calling the super class tearDown is required to be last if tearDown is overridden. Running Tests ------------- The preferred way to run the unit tests is using ``tox``. Tox executes tests in isolated environment, by creating separate virtualenv and installing dependencies from the ``requirements.txt`` and ``test-requirements.txt`` files, so the only package you install is ``tox`` itself:: sudo pip install tox Run the unit tests by doing:: tox -e py3 Tests and assertRaises ---------------------- When asserting that a test should raise an exception, test against the most specific exception possible. An overly broad exception type (like Exception) can mask errors in the unit test itself. Example:: self.assertRaises(exception.InstanceNotFound, db.instance_get_by_uuid, elevated, instance_uuid) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315601.9496713 manila-21.0.0/manila/tests/0000775000175000017500000000000000000000000015461 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/__init__.py0000664000175000017500000000163500000000000017577 0ustar00zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ :mod:`manila.tests` -- Manila Unittests ===================================================== .. automodule:: manila.tests :platform: Unix """ import eventlet eventlet.monkey_patch() ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315601.9536712 manila-21.0.0/manila/tests/api/0000775000175000017500000000000000000000000016232 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/api/__init__.py0000664000175000017500000000000000000000000020331 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/api/common.py0000664000175000017500000000225600000000000020101 0ustar00zuulzuul00000000000000# Copyright 2011 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. def compare_links(actual, expected): """Compare xml atom links.""" return compare_tree_to_dict(actual, expected, ('rel', 'href', 'type')) def compare_media_types(actual, expected): """Compare xml media types.""" return compare_tree_to_dict(actual, expected, ('base', 'type')) def compare_tree_to_dict(actual, expected, keys): """Compare parts of lxml.etree objects to dicts.""" for elem, data in zip(actual, expected): for key in keys: if elem.get(key) != data.get(key): return False return True ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315601.9536712 manila-21.0.0/manila/tests/api/contrib/0000775000175000017500000000000000000000000017672 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/api/contrib/__init__.py0000664000175000017500000000000000000000000021771 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/api/contrib/stubs.py0000664000175000017500000001507600000000000021415 0ustar00zuulzuul00000000000000# Copyright 2010 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from collections import abc import datetime from manila.common import constants from manila import exception as exc FAKE_UUID = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa' FAKE_UUIDS = {} def stub_share(id, **kwargs): share = { 'id': id, 'share_proto': 'FAKEPROTO', 'export_location': 'fake_location', 'export_locations': ['fake_location', 'fake_location2'], 'user_id': 'fakeuser', 'project_id': 'fakeproject', 'size': 1, 'access_rules_status': 'active', 'status': 'fakestatus', 'display_name': 'displayname', 'display_description': 'displaydesc', 'created_at': datetime.datetime(1, 1, 1, 1, 1, 1), 'snapshot_id': '2', 'share_type_id': '1', 'is_public': False, 'snapshot_support': True, 'create_share_from_snapshot_support': True, 'revert_to_snapshot_support': False, 'mount_snapshot_support': False, 'replication_type': None, 'has_replicas': False, 'is_soft_deleted': False, } share_instance = { 'host': 'fakehost', 'availability_zone': 'fakeaz', 'share_network_id': None, 'share_server_id': 'fake_share_server_id', 'access_rules_status': 'active', 'share_type_id': '1', 'encryption_key_ref': None, } if 'instance' in kwargs: share_instance.update(kwargs.pop('instance')) else: # remove any share instance kwargs so they don't go into the share for inst_key in share_instance.keys(): if inst_key in kwargs: share_instance[inst_key] = kwargs.pop(inst_key) share.update(kwargs) # NOTE(ameade): We must wrap the dictionary in an class in order to stub # object attributes. class wrapper(abc.Mapping): def __getitem__(self, name): if hasattr(self, name): return getattr(self, name) return self.__dict__[name] def __iter__(self): return iter(self.__dict__) def __len__(self): return len(self.__dict__) def update(self, other): self.__dict__.update(other) fake_share = wrapper() fake_share.instance = {'id': "fake_instance_id"} fake_share.instance.update(share_instance) fake_share.update(share) # stub out is_busy based on task_state, this mimics what's in the Share # data model type if share.get('task_state') in constants.BUSY_TASK_STATES: fake_share.is_busy = True else: fake_share.is_busy = False fake_share.instances = [fake_share.instance] return fake_share def stub_snapshot(id, **kwargs): snapshot = { 'id': id, 'share_id': 'fakeshareid', 'share_proto': 'fakesnapproto', 'export_location': 'fakesnaplocation', 'user_id': 'fakesnapuser', 'project_id': 'fakesnapproject', 'host': 'fakesnaphost', 'share_size': 1, 'size': 1, 'status': 'fakesnapstatus', 'aggregate_status': 'fakesnapstatus', 'share_name': 'fakesharename', 'display_name': 'displaysnapname', 'display_description': 'displaysnapdesc', 'created_at': datetime.datetime(1, 1, 1, 1, 1, 1), 'metadata': {} } snapshot.update(kwargs) return snapshot def stub_share_type(id, **kwargs): share_type = { 'id': id, 'name': 'fakesharetype', 'description': 'fakesharetypedescription', 'is_public': True, } share_type.update(kwargs) return share_type def stub_share_type_get(context, share_type_id, **kwargs): return stub_share_type(share_type_id, **kwargs) def stub_share_get(self, context, share_id, **kwargs): return stub_share(share_id, **kwargs) def stub_share_get_notfound(self, context, share_id, **kwargs): raise exc.NotFound def stub_share_delete(self, context, *args, **param): pass def stub_share_soft_delete(self, context, *args, **param): pass def stub_share_restore(self, context, *args, **param): pass def stub_share_update(self, context, *args, **param): share = stub_share('1') return share def stub_snapshot_update(self, context, *args, **param): share = stub_share('1') return share def stub_share_get_all_by_project(self, context, sort_key=None, sort_dir=None, search_opts={}): return [stub_share_get(self, context, '1')] def stub_get_all_shares(self, context): return [stub_share(100, project_id='fake'), stub_share(101, project_id='superfake'), stub_share(102, project_id='superduperfake')] def stub_snapshot_get(self, context, snapshot_id): return stub_snapshot(snapshot_id) def stub_snapshot_get_notfound(self, context, snapshot_id): raise exc.NotFound def stub_snapshot_create(self, context, share, display_name, display_description): return stub_snapshot(200, share_id=share['id'], display_name=display_name, display_description=display_description) def stub_snapshot_delete(self, context, *args, **param): pass def stub_snapshot_get_all_by_project(self, context, search_opts=None, limit=None, offset=None, sort_key=None, sort_dir=None): return [stub_snapshot_get(self, context, 2)] def stub_share_group_snapshot_member(id, **kwargs): member = { 'id': id, 'share_id': 'fakeshareid', 'share_instance_id': 'fakeshareinstanceid', 'share_proto': 'fakesnapproto', 'share_type_id': 'fake_share_type_id', 'export_location': 'fakesnaplocation', 'user_id': 'fakesnapuser', 'project_id': 'fakesnapproject', 'host': 'fakesnaphost', 'share_size': 1, 'size': 1, 'status': 'fakesnapstatus', 'created_at': datetime.datetime(1, 1, 1, 1, 1, 1), } member.update(kwargs) return member ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315601.9536712 manila-21.0.0/manila/tests/api/extensions/0000775000175000017500000000000000000000000020431 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/api/extensions/__init__.py0000664000175000017500000000000000000000000022530 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/api/extensions/foxinsocks.py0000664000175000017500000000555500000000000023203 0ustar00zuulzuul00000000000000# Copyright 2011 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import webob.exc from manila.api import extensions from manila.api.openstack import wsgi class FoxInSocksController(object): def index(self, req): return "Try to say this Mr. Knox, sir..." class FoxInSocksServerControllerExtension(wsgi.Controller): @wsgi.action('add_tweedle') def _add_tweedle(self, req, id, body): return "Tweedle Beetle Added." @wsgi.action('delete_tweedle') def _delete_tweedle(self, req, id, body): return "Tweedle Beetle Deleted." @wsgi.action('fail') def _fail(self, req, id, body): raise webob.exc.HTTPBadRequest(explanation='Tweedle fail') class FoxInSocksFlavorGooseControllerExtension(wsgi.Controller): @wsgi.extends def show(self, req, resp_obj, id): # NOTE: This only handles JSON responses. # You can use content type header to test for XML. resp_obj.obj['flavor']['googoose'] = req.GET.get('chewing') class FoxInSocksFlavorBandsControllerExtension(wsgi.Controller): @wsgi.extends def show(self, req, resp_obj, id): # NOTE: This only handles JSON responses. # You can use content type header to test for XML. resp_obj.obj['big_bands'] = 'Pig Bands!' class Foxinsocks(extensions.ExtensionDescriptor): """The Fox In Socks Extension.""" name = "Fox In Socks" alias = "FOXNSOX" namespace = "http://www.fox.in.socks/api/ext/pie/v1.0" updated = "2011-01-22T13:25:27-06:00" def __init__(self, ext_mgr): ext_mgr.register(self) def get_resources(self): resources = [] resource = extensions.ResourceExtension('foxnsocks', FoxInSocksController()) resources.append(resource) return resources def get_controller_extensions(self): extension_list = [] extension_set = [ (FoxInSocksServerControllerExtension, 'servers'), (FoxInSocksFlavorGooseControllerExtension, 'flavors'), (FoxInSocksFlavorBandsControllerExtension, 'flavors'), ] for klass, collection in extension_set: controller = klass() ext = extensions.ControllerExtension(self, collection, controller) extension_list.append(ext) return extension_list ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/api/fakes.py0000664000175000017500000002237600000000000017707 0ustar00zuulzuul00000000000000# Copyright 2010 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_service import wsgi from oslo_utils import timeutils from oslo_utils import uuidutils import routes import webob import webob.dec import webob.request from manila.api import common as api_common from manila.api.openstack import api_version_request as api_version from manila.api.openstack import wsgi as os_wsgi from manila.api import urlmap from manila.api.v1 import router as router_v1 from manila.api.v2 import router as router_v2 from manila.common import constants from manila import context from manila import exception CONTEXT = context.get_admin_context() driver_opts = {} FAKE_UUID = '123e4567-e89b-12d3-a456-426614174000' FAKE_UUIDS = {} host = 'host_name' identifier = '7cf7c200-d3af-4e05-b87e-9167c95dfcad' class Context(object): pass class FakeRouter(wsgi.Router): def __init__(self, ext_mgr=None): pass @webob.dec.wsgify def __call__(self, req): res = webob.Response() res.status = '200' res.headers['X-Test-Success'] = 'True' return res @webob.dec.wsgify def fake_wsgi(self, req): return self.application class FakeToken(object): id_count = 0 def __getitem__(self, key): return getattr(self, key) def __init__(self, **kwargs): FakeToken.id_count += 1 self.id = FakeToken.id_count self.token_hash = None for k, v in kwargs.items(): setattr(self, k, v) class FakeRequestContext(context.RequestContext): def __init__(self, *args, **kwargs): kwargs['auth_token'] = kwargs.get('auth_token', 'fake_auth_token') super(FakeRequestContext, self).__init__(*args, **kwargs) class HTTPRequest(os_wsgi.Request): @classmethod def blank(cls, *args, **kwargs): if not kwargs.get('base_url'): method_url = args[0] if method_url.startswith('/v2'): kwargs['base_url'] = 'http://localhost/share/v2' else: kwargs['base_url'] = 'http://localhost/share/v1' use_admin_context = kwargs.pop('use_admin_context', False) version = kwargs.pop('version', api_version.DEFAULT_API_VERSION) experimental = kwargs.pop('experimental', False) out = os_wsgi.Request.blank(*args, **kwargs) out.environ['manila.context'] = FakeRequestContext( 'fake_user', 'fake', is_admin=use_admin_context) out.api_version_request = api_version.APIVersionRequest( version, experimental=experimental) return out class TestRouter(wsgi.Router): def __init__(self, controller): mapper = routes.Mapper() mapper.resource("test", "tests", controller=os_wsgi.Resource(controller)) super(TestRouter, self).__init__(mapper) class FakeAuthDatabase(object): data = {} @staticmethod def auth_token_get(context, token_hash): return FakeAuthDatabase.data.get(token_hash, None) @staticmethod def auth_token_create(context, token): fake_token = FakeToken(created_at=timeutils.utcnow(), **token) FakeAuthDatabase.data[fake_token.token_hash] = fake_token FakeAuthDatabase.data['id_%i' % fake_token.id] = fake_token return fake_token @staticmethod def auth_token_destroy(context, token_id): token = FakeAuthDatabase.data.get('id_%i' % token_id) if token and token.token_hash in FakeAuthDatabase.data: del FakeAuthDatabase.data[token.token_hash] del FakeAuthDatabase.data['id_%i' % token_id] class FakeRateLimiter(object): def __init__(self, application): self.application = application @webob.dec.wsgify def __call__(self, req): return self.application def get_fake_uuid(token=0): if token not in FAKE_UUIDS: FAKE_UUIDS[token] = uuidutils.generate_uuid() return FAKE_UUIDS[token] def app(): """API application. No auth, just let environ['manila.context'] pass through. """ mapper = urlmap.URLMap() mapper['/v1'] = router_v1.APIRouter() mapper['/v2'] = router_v2.APIRouter() return mapper fixture_reset_status_with_different_roles_v1 = ( { 'role': 'admin', 'valid_code': 202, 'valid_status': constants.STATUS_ERROR, }, { 'role': 'member', 'valid_code': 403, 'valid_status': constants.STATUS_AVAILABLE, }, ) fixture_reset_status_with_different_roles = ( { 'role': 'admin', 'valid_code': 202, 'valid_status': constants.STATUS_ERROR, 'version': '2.6', }, { 'role': 'admin', 'valid_code': 202, 'valid_status': constants.STATUS_ERROR, 'version': '2.7', }, { 'role': 'member', 'valid_code': 403, 'valid_status': constants.STATUS_AVAILABLE, 'version': '2.6', }, { 'role': 'member', 'valid_code': 403, 'valid_status': constants.STATUS_AVAILABLE, 'version': '2.7', }, ) fixture_reset_replica_status_with_different_roles = ( { 'role': 'admin', 'valid_code': 202, 'valid_status': constants.STATUS_ERROR, 'microversion': '2.55' }, { 'role': 'admin', 'valid_code': 202, 'valid_status': constants.STATUS_ERROR, 'microversion': '2.56' }, { 'role': 'member', 'valid_code': 403, 'valid_status': constants.STATUS_AVAILABLE, 'microversion': '2.55' }, ) fixture_reset_replica_state_with_different_roles = ( { 'role': 'admin', 'valid_code': 202, 'valid_status': constants.REPLICA_STATE_ACTIVE, 'microversion': '2.55' }, { 'role': 'admin', 'valid_code': 202, 'valid_status': constants.REPLICA_STATE_OUT_OF_SYNC, 'microversion': '2.56' }, { 'role': 'admin', 'valid_code': 202, 'valid_status': constants.REPLICA_STATE_IN_SYNC, 'microversion': '2.55' }, { 'role': 'admin', 'valid_code': 202, 'valid_status': constants.STATUS_ERROR, 'microversion': '2.56' }, { 'role': 'member', 'valid_code': 403, 'valid_status': constants.REPLICA_STATE_IN_SYNC, 'microversion': '2.55' }, ) fixture_force_delete_with_different_roles = ( {'role': 'admin', 'resp_code': 202, 'version': '2.6'}, {'role': 'admin', 'resp_code': 202, 'version': '2.7'}, {'role': 'member', 'resp_code': 403, 'version': '2.6'}, {'role': 'member', 'resp_code': 403, 'version': '2.7'}, ) fixture_invalid_reset_status_body = ( {'os-reset_status': {'x-status': 'bad'}}, {'os-reset_status': {'status': 'invalid'}} ) fixture_valid_reset_status_body = ( ({'os-reset_status': {'status': 'creating'}}, '2.6'), ({'os-reset_status': {'status': 'available'}}, '2.6'), ({'os-reset_status': {'status': 'deleting'}}, '2.6'), ({'os-reset_status': {'status': 'error_deleting'}}, '2.6'), ({'os-reset_status': {'status': 'error'}}, '2.6'), ({'os-reset_status': {'status': 'migrating'}}, '2.6'), ({'os-reset_status': {'status': 'migrating_to'}}, '2.6'), ({'reset_status': {'status': 'creating'}}, '2.7'), ({'reset_status': {'status': 'available'}}, '2.7'), ({'reset_status': {'status': 'deleting'}}, '2.7'), ({'reset_status': {'status': 'error_deleting'}}, '2.7'), ({'reset_status': {'status': 'error'}}, '2.7'), ({'reset_status': {'status': 'migrating'}}, '2.7'), ({'reset_status': {'status': 'migrating_to'}}, '2.7'), ) def mock_fake_admin_check(context, resource_name, action, *args, **kwargs): if context.is_admin: return else: raise exception.PolicyNotAuthorized(action=action) class FakeResourceViewBuilder(api_common.ViewBuilder): _collection_name = 'fake_resource' _detail_version_modifiers = [ "add_field_xyzzy", "add_field_spoon_for_admins", "remove_field_foo", ] def view(self, req, resource): keys = ('id', 'foo', 'fred', 'alice') resource_dict = {key: resource.get(key) for key in keys} self.update_versioned_resource_dict(req, resource_dict, resource) return resource_dict @api_common.ViewBuilder.versioned_method("1.41") def add_field_xyzzy(self, context, resource_dict, resource): resource_dict['xyzzy'] = resource.get('xyzzy') @api_common.ViewBuilder.versioned_method("1.6") def add_field_spoon_for_admins(self, context, resource_dict, resource): if context.is_admin: resource_dict['spoon'] = resource.get('spoon') @api_common.ViewBuilder.versioned_method("3.14") def remove_field_foo(self, context, resource_dict, resource): resource_dict.pop('foo', None) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315601.9536712 manila-21.0.0/manila/tests/api/middleware/0000775000175000017500000000000000000000000020347 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/api/middleware/__init__.py0000664000175000017500000000000000000000000022446 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/api/middleware/test_auth.py0000664000175000017500000000440200000000000022721 0ustar00zuulzuul00000000000000# Copyright (c) 2012 OpenStack, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import webob import manila.api.middleware.auth from manila import test class TestManilaKeystoneContextMiddleware(test.TestCase): def setUp(self): super(TestManilaKeystoneContextMiddleware, self).setUp() @webob.dec.wsgify() def fake_app(req): self.context = req.environ['manila.context'] return webob.Response() self.context = None self.middleware = (manila.api.middleware.auth .ManilaKeystoneContext(fake_app)) self.request = webob.Request.blank('/') self.request.headers['X_TENANT_ID'] = 'testtenantid' self.request.headers['X_AUTH_TOKEN'] = 'testauthtoken' def test_no_user_or_user_id(self): response = self.request.get_response(self.middleware) self.assertEqual('401 Unauthorized', response.status) def test_user_only(self): self.request.headers['X_USER_ID'] = 'testuserid' response = self.request.get_response(self.middleware) self.assertEqual('200 OK', response.status) self.assertEqual('testuserid', self.context.user_id) def test_user_id_only(self): self.request.headers['X_USER'] = 'testuser' response = self.request.get_response(self.middleware) self.assertEqual('200 OK', response.status) self.assertEqual('testuser', self.context.user_id) def test_user_id_trumps_user(self): self.request.headers['X_USER_ID'] = 'testuserid' self.request.headers['X_USER'] = 'testuser' response = self.request.get_response(self.middleware) self.assertEqual('200 OK', response.status) self.assertEqual('testuserid', self.context.user_id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/api/middleware/test_faults.py0000664000175000017500000001607400000000000023266 0ustar00zuulzuul00000000000000# Copyright 2010 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_serialization import jsonutils import webob import webob.dec import webob.exc from manila.api.middleware import fault from manila.api.openstack import wsgi from manila import exception from manila import test class TestFaults(test.TestCase): """Tests covering `manila.api.openstack.faults:Fault` class.""" def _prepare_xml(self, xml_string): """Remove characters from string which hinder XML equality testing.""" xml_string = xml_string.replace(" ", "") xml_string = xml_string.replace("\n", "") xml_string = xml_string.replace("\t", "") return xml_string def test_400_fault_json(self): """Test fault serialized to JSON via file-extension and/or header.""" requests = [ webob.Request.blank('/.json'), webob.Request.blank('/', headers={"Accept": "application/json"}), ] for request in requests: fault = wsgi.Fault(webob.exc.HTTPBadRequest(explanation='scram')) response = request.get_response(fault) expected = { "badRequest": { "message": "scram", "code": 400, }, } actual = jsonutils.loads(response.body) self.assertEqual("application/json", response.content_type) self.assertEqual(expected, actual) def test_413_fault_json(self): """Test fault serialized to JSON via file-extension and/or header.""" requests = [ webob.Request.blank('/.json'), webob.Request.blank('/', headers={"Accept": "application/json"}), ] for request in requests: exc = webob.exc.HTTPRequestEntityTooLarge fault = wsgi.Fault(exc(explanation='sorry', headers={'Retry-After': 4})) response = request.get_response(fault) expected = { "overLimit": { "message": "sorry", "code": 413, "retryAfter": '4', }, } actual = jsonutils.loads(response.body) self.assertEqual("application/json", response.content_type) self.assertEqual(expected, actual) def test_raise(self): """Ensure the ability to raise :class:`Fault` in WSGI-ified methods.""" @webob.dec.wsgify def raiser(req): raise wsgi.Fault(webob.exc.HTTPNotFound(explanation='whut?')) req = webob.Request.blank('/.json') resp = req.get_response(raiser) self.assertEqual("application/json", resp.content_type) self.assertEqual(404, resp.status_int) self.assertIn('whut?'.encode("utf-8"), resp.body) def test_raise_403(self): """Ensure the ability to raise :class:`Fault` in WSGI-ified methods.""" @webob.dec.wsgify def raiser(req): raise wsgi.Fault(webob.exc.HTTPForbidden(explanation='whut?')) req = webob.Request.blank('/.json') resp = req.get_response(raiser) self.assertEqual("application/json", resp.content_type) self.assertEqual(403, resp.status_int) self.assertNotIn('resizeNotAllowed'.encode("utf-8"), resp.body) self.assertIn('forbidden'.encode("utf-8"), resp.body) def test_fault_has_status_int(self): """Ensure the status_int is set correctly on faults.""" fault = wsgi.Fault(webob.exc.HTTPBadRequest(explanation='what?')) self.assertEqual(400, fault.status_int) class ExceptionTest(test.TestCase): def _wsgi_app(self, inner_app): return fault.FaultWrapper(inner_app) def _do_test_exception_safety_reflected_in_faults(self, expose): class ExceptionWithSafety(exception.ManilaException): safe = expose @webob.dec.wsgify def fail(req): raise ExceptionWithSafety('some explanation') api = self._wsgi_app(fail) resp = webob.Request.blank('/').get_response(api) self.assertIn('{"computeFault', str(resp.body), resp.body) expected = ('ExceptionWithSafety: some explanation' if expose else 'The server has either erred or is incapable ' 'of performing the requested operation.') self.assertIn(expected, str(resp.body), resp.body) self.assertEqual(500, resp.status_int, resp.body) def test_safe_exceptions_are_described_in_faults(self): self._do_test_exception_safety_reflected_in_faults(True) def test_unsafe_exceptions_are_not_described_in_faults(self): self._do_test_exception_safety_reflected_in_faults(False) def _do_test_exception_mapping(self, exception_type, msg): @webob.dec.wsgify def fail(req): raise exception_type(msg) api = self._wsgi_app(fail) resp = webob.Request.blank('/').get_response(api) self.assertIn(msg, str(resp.body), resp.body) self.assertEqual(exception_type.code, resp.status_int, resp.body) if hasattr(exception_type, 'headers'): for (key, value) in exception_type.headers.items(): self.assertIn(key, resp.headers) self.assertEqual(value, resp.headers[key]) def test_quota_error_mapping(self): self._do_test_exception_mapping(exception.QuotaError, 'too many used') def test_non_manila_notfound_exception_mapping(self): class ExceptionWithCode(Exception): code = 404 self._do_test_exception_mapping(ExceptionWithCode, 'NotFound') def test_non_manila_exception_mapping(self): class ExceptionWithCode(Exception): code = 417 self._do_test_exception_mapping(ExceptionWithCode, 'Expectation failed') def test_exception_with_none_code_throws_500(self): class ExceptionWithNoneCode(Exception): code = None @webob.dec.wsgify def fail(req): raise ExceptionWithNoneCode() api = self._wsgi_app(fail) resp = webob.Request.blank('/').get_response(api) self.assertEqual(500, resp.status_int) def test_validate_request_unicode_decode_fault(self): @webob.dec.wsgify def unicode_error(req): raise UnicodeDecodeError("ascii", "test".encode(), 0, 1, "bad") api = self._wsgi_app(unicode_error) resp = webob.Request.blank('/test?foo=%88').get_response(api) self.assertEqual(400, resp.status_int) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315601.9536712 manila-21.0.0/manila/tests/api/openstack/0000775000175000017500000000000000000000000020221 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/api/openstack/__init__.py0000664000175000017500000000000000000000000022320 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/api/openstack/test_api_version_request.py0000664000175000017500000001601700000000000025725 0ustar00zuulzuul00000000000000# Copyright 2014 IBM Corp. # Copyright 2015 Clinton Knight # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ddt from manila.api.openstack import api_version_request from manila.api.openstack import versioned_method from manila import exception from manila import test @ddt.ddt class APIVersionRequestTests(test.TestCase): def test_init(self): result = api_version_request.APIVersionRequest() self.assertIsNone(result._ver_major) self.assertIsNone(result._ver_minor) self.assertFalse(result._experimental) def test_min_version(self): self.assertEqual( api_version_request.APIVersionRequest( api_version_request._MIN_API_VERSION), api_version_request.min_api_version()) def test_max_api_version(self): self.assertEqual( api_version_request.APIVersionRequest( api_version_request._MAX_API_VERSION), api_version_request.max_api_version()) @ddt.data( ('1.1', 1, 1), ('2.10', 2, 10), ('5.234', 5, 234), ('12.5', 12, 5), ('2.0', 2, 0), ('2.200', 2, 200) ) @ddt.unpack def test_valid_version_strings(self, version_string, major, minor): request = api_version_request.APIVersionRequest(version_string) self.assertEqual(major, request._ver_major) self.assertEqual(minor, request._ver_minor) def test_null_version(self): v = api_version_request.APIVersionRequest() self.assertTrue(v.is_null()) @ddt.data('2', '200', '2.1.4', '200.23.66.3', '5 .3', '5. 3', '5.03', '02.1', '2.001', '', ' 2.1', '2.1 ') def test_invalid_version_strings(self, version_string): self.assertRaises(exception.InvalidAPIVersionString, api_version_request.APIVersionRequest, version_string) def test_cmpkey(self): request = api_version_request.APIVersionRequest('1.2') self.assertEqual((1, 2), request._cmpkey()) @ddt.data(True, False) def test_experimental_property(self, experimental): request = api_version_request.APIVersionRequest() request.experimental = experimental self.assertEqual(experimental, request.experimental) def test_experimental_property_value_error(self): request = api_version_request.APIVersionRequest() def set_non_boolean(): request.experimental = 'non_bool_value' self.assertRaises(exception.InvalidParameterValue, set_non_boolean) def test_version_comparisons(self): v1 = api_version_request.APIVersionRequest('2.0') v2 = api_version_request.APIVersionRequest('2.5') v3 = api_version_request.APIVersionRequest('5.23') v4 = api_version_request.APIVersionRequest('2.0') v_null = api_version_request.APIVersionRequest() self.assertTrue(v1 < v2) self.assertTrue(v1 <= v2) self.assertTrue(v3 > v2) self.assertTrue(v3 >= v2) self.assertTrue(v1 != v2) self.assertTrue(v1 == v4) self.assertTrue(v1 != v_null) self.assertTrue(v_null == v_null) self.assertFalse(v1 == '2.0') def test_version_matches(self): v1 = api_version_request.APIVersionRequest('2.0') v2 = api_version_request.APIVersionRequest('2.5') v3 = api_version_request.APIVersionRequest('2.45') v4 = api_version_request.APIVersionRequest('3.3') v5 = api_version_request.APIVersionRequest('3.23') v6 = api_version_request.APIVersionRequest('2.0') v7 = api_version_request.APIVersionRequest('3.3') v8 = api_version_request.APIVersionRequest('4.0') v_null = api_version_request.APIVersionRequest() self.assertTrue(v2.matches(v1, v3)) self.assertTrue(v2.matches(v1, v_null)) self.assertTrue(v1.matches(v6, v2)) self.assertTrue(v4.matches(v2, v7)) self.assertTrue(v4.matches(v_null, v7)) self.assertTrue(v4.matches(v_null, v8)) self.assertFalse(v1.matches(v2, v3)) self.assertFalse(v5.matches(v2, v4)) self.assertFalse(v2.matches(v3, v1)) self.assertTrue(v1.matches(v_null, v_null)) self.assertRaises(ValueError, v_null.matches, v1, v3) def test_version_matches_experimental_request(self): experimental_request = api_version_request.APIVersionRequest('2.0') experimental_request.experimental = True non_experimental_request = api_version_request.APIVersionRequest('2.0') experimental_function = versioned_method.VersionedMethod( 'experimental_function', api_version_request.APIVersionRequest('2.0'), api_version_request.APIVersionRequest('2.1'), True, None) non_experimental_function = versioned_method.VersionedMethod( 'non_experimental_function', api_version_request.APIVersionRequest('2.0'), api_version_request.APIVersionRequest('2.1'), False, None) self.assertTrue(experimental_request.matches_versioned_method( experimental_function)) self.assertTrue(experimental_request.matches_versioned_method( non_experimental_function)) self.assertTrue(non_experimental_request.matches_versioned_method( non_experimental_function)) self.assertFalse(non_experimental_request.matches_versioned_method( experimental_function)) def test_matches_versioned_method(self): request = api_version_request.APIVersionRequest('2.0') self.assertRaises(exception.InvalidParameterValue, request.matches_versioned_method, 'fake_method') def test_get_string(self): v1_string = '3.23' v1 = api_version_request.APIVersionRequest(v1_string) self.assertEqual(v1_string, v1.get_string()) self.assertRaises(ValueError, api_version_request.APIVersionRequest().get_string) @ddt.data(('1', '0', False), ('1', '1', False), ('1', '0', True)) @ddt.unpack def test_str(self, major, minor, experimental): request_input = '%s.%s' % (major, minor) request = api_version_request.APIVersionRequest( request_input, experimental=experimental) request_string = str(request) self.assertEqual('API Version Request ' 'Major: %s, Minor: %s, Experimental: %s' % (major, minor, experimental), request_string) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/api/openstack/test_versioned_method.py0000664000175000017500000000242000000000000025166 0ustar00zuulzuul00000000000000# Copyright 2015 Clinton Knight # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from manila.api.openstack import versioned_method from manila import test class VersionedMethodTestCase(test.TestCase): def test_str(self): args = ('fake_name', 'fake_min', 'fake_max') method = versioned_method.VersionedMethod(*(args + (False, None))) method_string = str(method) self.assertEqual('Version Method %s: min: %s, max: %s' % args, method_string) def test_cmpkey(self): method = versioned_method.VersionedMethod( 'fake_name', 'fake_start_version', 'fake_end_version', False, 'fake_func') self.assertEqual('fake_start_version', method._cmpkey()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/api/openstack/test_wsgi.py0000664000175000017500000011010500000000000022601 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import inspect from unittest import mock import ddt import webob from manila.api.openstack import api_version_request as api_version from manila.api.openstack import wsgi from manila import context from manila import exception from manila import policy from manila import test from manila.tests.api import fakes @ddt.ddt class RequestTest(test.TestCase): def test_content_type_missing(self): request = wsgi.Request.blank('/tests/123', method='POST') request.body = "".encode("utf-8") self.assertIsNone(request.get_content_type()) def test_content_type_unsupported(self): request = wsgi.Request.blank('/tests/123', method='POST') request.headers["Content-Type"] = "text/html" request.body = "asdf
    ".encode("utf-8") self.assertRaises(exception.InvalidContentType, request.get_content_type) def test_content_type_with_charset(self): request = wsgi.Request.blank('/tests/123') request.headers["Content-Type"] = "application/json; charset=UTF-8" result = request.get_content_type() self.assertEqual("application/json", result) def test_content_type_from_accept(self): content_type = 'application/json' request = wsgi.Request.blank('/tests/123') request.headers["Accept"] = content_type result = request.best_match_content_type() self.assertEqual(content_type, result) def test_content_type_from_accept_best(self): request = wsgi.Request.blank('/tests/123') request.headers["Accept"] = "application/xml, application/json" result = request.best_match_content_type() self.assertEqual("application/json", result) request = wsgi.Request.blank('/tests/123') request.headers["Accept"] = ("application/json; q=0.3, " "application/xml; q=0.9") result = request.best_match_content_type() self.assertEqual("application/json", result) def test_content_type_from_query_extension(self): request = wsgi.Request.blank('/tests/123.json') result = request.best_match_content_type() self.assertEqual("application/json", result) request = wsgi.Request.blank('/tests/123.invalid') result = request.best_match_content_type() self.assertEqual("application/json", result) def test_content_type_accept_default(self): request = wsgi.Request.blank('/tests/123.unsupported') request.headers["Accept"] = "application/unsupported1" result = request.best_match_content_type() self.assertEqual("application/json", result) def test_cache_and_retrieve_resources(self): request = wsgi.Request.blank('/foo') # Test that trying to retrieve a cached object on # an empty cache fails gracefully self.assertIsNone(request.cached_resource()) self.assertIsNone(request.cached_resource_by_id('r-0')) resources = [{'id': 'r-%s' % x} for x in range(3)] # Cache an empty list of resources using the default name request.cache_resource([]) self.assertEqual({}, request.cached_resource()) self.assertIsNone(request.cached_resource('r-0')) # Cache some resources request.cache_resource(resources[:2]) # Cache one resource request.cache_resource(resources[2]) # Cache a different resource name other_resource = {'id': 'o-0'} request.cache_resource(other_resource, name='other-resource') self.assertEqual(resources[0], request.cached_resource_by_id('r-0')) self.assertEqual(resources[1], request.cached_resource_by_id('r-1')) self.assertEqual(resources[2], request.cached_resource_by_id('r-2')) self.assertIsNone(request.cached_resource_by_id('r-3')) self.assertEqual( {'r-0': resources[0], 'r-1': resources[1], 'r-2': resources[2]}, request.cached_resource()) self.assertEqual( other_resource, request.cached_resource_by_id('o-0', name='other-resource')) @ddt.data( 'share_type', ) def test_cache_and_retrieve_resources_by_resource(self, resource_name): cache_all_func = 'cache_db_%ss' % resource_name cache_one_func = 'cache_db_%s' % resource_name get_db_all_func = 'get_db_%ss' % resource_name get_db_one_func = 'get_db_%s' % resource_name r = wsgi.Request.blank('/foo') amount = 5 res_range = range(amount) resources = [{'id': 'id%s' % x} for x in res_range] # Store 2 getattr(r, cache_all_func)(resources[:amount - 1]) # Store 1 getattr(r, cache_one_func)(resources[amount - 1]) for i in res_range: self.assertEqual( resources[i], getattr(r, get_db_one_func)('id%s' % i), ) self.assertIsNone(getattr(r, get_db_one_func)('id%s' % amount)) self.assertEqual( {'id%s' % i: resources[i] for i in res_range}, getattr(r, get_db_all_func)()) def test_set_api_version_request_exception(self): min_version = api_version.APIVersionRequest('2.0') max_version = api_version.APIVersionRequest('2.45') self.mock_object(api_version, 'max_api_version', mock.Mock(return_value=max_version)) self.mock_object(api_version, 'min_api_version', mock.Mock(return_value=min_version)) headers = {'X-OpenStack-Manila-API-Version': '2.51'} request = wsgi.Request.blank( 'https://openstack.acme.com/v2/shares', method='GET', headers=headers, script_name='/v2/shares') self.assertRaises(exception.InvalidGlobalAPIVersion, request.set_api_version_request) self.assertEqual(api_version.APIVersionRequest('2.51'), request.api_version_request) @ddt.data('', '/share', '/v1', '/v2/shares', '/v1.1/', '/share/v1', '/shared-file-sytems/v2', '/share/v3.5/share-replicas', '/shared-file-sytems/v2/shares/xyzzy/action') def test_set_api_version_request(self, resource): min_version = api_version.APIVersionRequest('2.0') max_version = api_version.APIVersionRequest('3.0') self.mock_object(api_version, 'max_api_version', mock.Mock(return_value=max_version)) self.mock_object(api_version, 'min_api_version', mock.Mock(return_value=min_version)) request = wsgi.Request.blank( 'https://openstack.acme.com%s' % resource, method='GET', headers={'X-OpenStack-Manila-API-Version': '2.117'}, script_name=resource) self.assertIsNone(request.set_api_version_request()) if not resource or not ('/v1' in resource or '/v2' in resource): self.assertEqual(api_version.APIVersionRequest(), request.api_version_request) elif 'v1' in resource: self.assertEqual(api_version.APIVersionRequest('1.0'), request.api_version_request) else: self.assertEqual(api_version.APIVersionRequest('2.117'), request.api_version_request) def test_set_api_version_request_no_version_header(self): min_version = api_version.APIVersionRequest('2.0') max_version = api_version.APIVersionRequest('2.45') self.mock_object(api_version, 'max_api_version', mock.Mock(return_value=max_version)) self.mock_object(api_version, 'min_api_version', mock.Mock(return_value=min_version)) headers = {} request = wsgi.Request.blank( 'https://openstack.acme.com/v2/shares', method='GET', headers=headers, script_name='/v2/shares') self.assertIsNone(request.set_api_version_request()) self.assertEqual(api_version.APIVersionRequest('2.0'), request.api_version_request) @ddt.data(None, 'true', 'false') def test_set_api_version_request_experimental_header(self, experimental): min_version = api_version.APIVersionRequest('2.0') max_version = api_version.APIVersionRequest('2.45') self.mock_object(api_version, 'max_api_version', mock.Mock(return_value=max_version)) self.mock_object(api_version, 'min_api_version', mock.Mock(return_value=min_version)) headers = {'X-OpenStack-Manila-API-Version': '2.38'} if experimental: headers['X-OpenStack-Manila-API-Experimental'] = experimental request = wsgi.Request.blank( 'https://openstack.acme.com/v2/shares', method='GET', headers=headers, script_name='/v2/shares') self.assertIsNone(request.set_api_version_request()) self.assertEqual(request.api_version_request, api_version.APIVersionRequest('2.38')) expected_experimental = experimental == 'true' or False self.assertEqual(expected_experimental, request.api_version_request.experimental) class ActionDispatcherTest(test.TestCase): def test_dispatch(self): serializer = wsgi.ActionDispatcher() serializer.create = lambda x: 'pants' self.assertEqual('pants', serializer.dispatch({}, action='create')) def test_dispatch_action_None(self): serializer = wsgi.ActionDispatcher() serializer.create = lambda x: 'pants' serializer.default = lambda x: 'trousers' self.assertEqual('trousers', serializer.dispatch({}, action=None)) def test_dispatch_default(self): serializer = wsgi.ActionDispatcher() serializer.create = lambda x: 'pants' serializer.default = lambda x: 'trousers' self.assertEqual('trousers', serializer.dispatch({}, action='update')) class DictSerializerTest(test.TestCase): def test_dispatch_default(self): serializer = wsgi.DictSerializer() self.assertEqual('', serializer.serialize({}, 'update')) class JSONDictSerializerTest(test.TestCase): def test_json(self): input_dict = dict(servers=dict(a=(2, 3))) expected_json = '{"servers":{"a":[2,3]}}'.encode("utf-8") serializer = wsgi.JSONDictSerializer() result = serializer.serialize(input_dict) result = result.replace( '\n'.encode("utf-8"), ''.encode("utf-8")).replace( ' '.encode("utf-8"), ''.encode("utf-8")) self.assertEqual(expected_json, result) class TextDeserializerTest(test.TestCase): def test_dispatch_default(self): deserializer = wsgi.TextDeserializer() self.assertEqual({}, deserializer.deserialize({}, 'update')) class JSONDeserializerTest(test.TestCase): def test_json(self): data = """{"a": { "a1": "1", "a2": "2", "bs": ["1", "2", "3", {"c": {"c1": "1"}}], "d": {"e": "1"}, "f": "1"}}""" as_dict = { 'body': { 'a': { 'a1': '1', 'a2': '2', 'bs': ['1', '2', '3', {'c': {'c1': '1'}}], 'd': {'e': '1'}, 'f': '1', }, }, } deserializer = wsgi.JSONDeserializer() self.assertEqual(as_dict, deserializer.deserialize(data)) class ResourceTest(test.TestCase): def test_resource_call(self): class Controller(object): def index(self, req): return 'off' req = webob.Request.blank('/tests') app = fakes.TestRouter(Controller()) response = req.get_response(app) self.assertEqual('off'.encode("utf-8"), response.body) self.assertEqual(200, response.status_int) def test_resource_not_authorized(self): class Controller(object): def index(self, req): raise exception.NotAuthorized() req = webob.Request.blank('/tests') app = fakes.TestRouter(Controller()) response = req.get_response(app) self.assertEqual(403, response.status_int) def test_dispatch(self): class Controller(object): def index(self, req, pants=None): return pants controller = Controller() resource = wsgi.Resource(controller) method, extensions = resource.get_method(None, 'index', None, '') actual = resource.dispatch(method, None, {'pants': 'off'}) expected = 'off' self.assertEqual(expected, actual) def test_get_method_undefined_controller_action(self): class Controller(object): def index(self, req, pants=None): return pants controller = Controller() resource = wsgi.Resource(controller) self.assertRaises(AttributeError, resource.get_method, None, 'create', None, '') def test_get_method_action_json(self): class Controller(wsgi.Controller): @wsgi.action('fooAction') def _action_foo(self, req, id, body): return body controller = Controller() resource = wsgi.Resource(controller) method, extensions = resource.get_method(None, 'action', 'application/json', '{"fooAction": true}') self.assertEqual(controller._action_foo, method) def test_get_method_action_bad_body(self): class Controller(wsgi.Controller): @wsgi.action('fooAction') def _action_foo(self, req, id, body): return body controller = Controller() resource = wsgi.Resource(controller) self.assertRaises(exception.MalformedRequestBody, resource.get_method, None, 'action', 'application/json', '{}') def test_get_method_unknown_controller_action(self): class Controller(wsgi.Controller): @wsgi.action('fooAction') def _action_foo(self, req, id, body): return body controller = Controller() resource = wsgi.Resource(controller) self.assertRaises(KeyError, resource.get_method, None, 'action', 'application/json', '{"barAction": true}') def test_get_method_action_method(self): class Controller(object): def action(self, req, pants=None): return pants controller = Controller() resource = wsgi.Resource(controller) method, extensions = resource.get_method(None, 'action', 'application/xml', 'true', False], ['user', '+=*?group', False], ['ip', '1001::1001/256', False], ['ip', '1001:1001/256', False],) @ddt.unpack def test_validate_access_exception(self, access_type, access_to, ceph): self.assertRaises(webob.exc.HTTPBadRequest, common.validate_access, access_type=access_type, access_to=access_to, enable_ceph=ceph) def test_validate_public_share_policy_no_is_public(self): api_params = {'foo': 'bar', 'clemson': 'tigers'} self.mock_object(policy, 'check_policy') actual_params = common.validate_public_share_policy( 'fake_context', api_params) self.assertDictEqual(api_params, actual_params) policy.check_policy.assert_not_called() @ddt.data('foo', 123, 'all', None) def test_validate_public_share_policy_invalid_value(self, is_public): api_params = {'is_public': is_public} self.mock_object(policy, 'check_policy') self.assertRaises(exception.InvalidParameterValue, common.validate_public_share_policy, 'fake_context', api_params) policy.check_policy.assert_not_called() @ddt.data('1', True, 'true', 'yes') def test_validate_public_share_not_authorized(self, is_public): api_params = {'is_public': is_public, 'size': '16'} self.mock_object(policy, 'check_policy', mock.Mock(return_value=False)) self.assertRaises(exception.NotAuthorized, common.validate_public_share_policy, 'fake_context', api_params) policy.check_policy.assert_called_once_with( 'fake_context', 'share', 'create_public_share', do_raise=False) @ddt.data('0', False, 'false', 'no') def test_validate_public_share_is_public_False(self, is_public): api_params = {'is_public': is_public, 'size': '16'} self.mock_object(policy, 'check_policy', mock.Mock(return_value=False)) actual_params = common.validate_public_share_policy( 'fake_context', api_params, api='update') self.assertDictEqual({'is_public': False, 'size': '16'}, actual_params) policy.check_policy.assert_called_once_with( 'fake_context', 'share', 'set_public_share', do_raise=False) @ddt.data('1', True, 'true', 'yes') def test_validate_public_share_is_public_True(self, is_public): api_params = {'is_public': is_public, 'size': '16'} self.mock_object(policy, 'check_policy', mock.Mock(return_value=True)) actual_params = common.validate_public_share_policy( 'fake_context', api_params, api='update') self.assertDictEqual({'is_public': True, 'size': '16'}, actual_params) policy.check_policy.assert_called_once_with( 'fake_context', 'share', 'set_public_share', do_raise=False) @ddt.data(({}, True), ({'neutron_net_id': 'fake_nn_id'}, False), ({'neutron_subnet_id': 'fake_sn_id'}, False), ({'neutron_net_id': 'fake_nn_id', 'neutron_subnet_id': 'fake_sn_id'}, True)) @ddt.unpack def test__check_net_id_and_subnet_id(self, body, expected): if not expected: self.assertRaises(webob.exc.HTTPBadRequest, common.check_net_id_and_subnet_id, body) else: result = common.check_net_id_and_subnet_id(body) self.assertIsNone(result) @ddt.data(None, True, 'true', 'false', 'all') def test_parse_is_public_valid(self, value): result = common.parse_is_public(value) self.assertIn(result, (True, False, None)) def test_parse_is_public_invalid(self): self.assertRaises(webob.exc.HTTPBadRequest, common.parse_is_public, 'fakefakefake') @ddt.data(None, 'fake_az') def test__get_existing_subnets(self, az): default_subnets = 'fake_default_subnets' mock_get_default_subnets = self.mock_object( db_api, 'share_network_subnet_get_default_subnets', mock.Mock(return_value=default_subnets)) subnets = 'fake_subnets' mock_get_subnets = self.mock_object( db_api, 'share_network_subnets_get_all_by_availability_zone_id', mock.Mock(return_value=subnets)) net_id = 'fake_net' context = 'fake_context' res_subnets = common._get_existing_subnets(context, net_id, az) if az: self.assertEqual(subnets, res_subnets) mock_get_subnets.assert_called_once_with(context, net_id, az, fallback_to_default=False) mock_get_default_subnets.assert_not_called() else: self.assertEqual(default_subnets, res_subnets) mock_get_subnets.assert_not_called() mock_get_default_subnets.assert_called_once_with(context, net_id) def test_validate_subnet_create(self): mock_check_net = self.mock_object(common, 'check_net_id_and_subnet_id') net = 'fake_net' mock_get_net = self.mock_object(db_api, 'share_network_get', mock.Mock(return_value=net)) az_id = 'fake_az_id' az = {'id': az_id} mock_get_az = self.mock_object(db_api, 'availability_zone_get', mock.Mock(return_value=az)) subnets = 'fake_subnets' mock_get_subnets = self.mock_object(common, '_get_existing_subnets', mock.Mock(return_value=subnets)) net_id = 'fake_net_id' context = 'fake_context' az_name = 'fake_az' data = {'availability_zone': az_name} res_net, res_subnets = common.validate_subnet_create( context, net_id, data, True) self.assertEqual(net, res_net) self.assertEqual(subnets, res_subnets) self.assertEqual(data['availability_zone_id'], az_id) mock_check_net.assert_called_once_with(data) mock_get_net.assert_called_once_with(context, net_id) mock_get_az.assert_called_once_with(context, az_name) mock_get_subnets.assert_called_once_with(context, net_id, az_id) def test_validate_subnet_create_net_not_found(self): self.mock_object(common, 'check_net_id_and_subnet_id') self.mock_object(db_api, 'share_network_get', mock.Mock(side_effect=exception.ShareNetworkNotFound( share_network_id="fake_id"))) net_id = 'fake_net_id' context = 'fake_context' az_name = 'fake_az' data = {'availability_zone': az_name} self.assertRaises(webob.exc.HTTPNotFound, common.validate_subnet_create, context, net_id, data, True) def test_validate_subnet_create_az_not_found(self): self.mock_object(common, 'check_net_id_and_subnet_id') self.mock_object(db_api, 'share_network_get', mock.Mock(return_value='fake_net')) self.mock_object( db_api, 'availability_zone_get', mock.Mock(side_effect=exception.AvailabilityZoneNotFound( id='fake_id'))) net_id = 'fake_net_id' context = 'fake_context' az_name = 'fake_az' data = {'availability_zone': az_name} self.assertRaises(webob.exc.HTTPBadRequest, common.validate_subnet_create, context, net_id, data, True) def test_validate_subnet_create_multiple_subnet_not_support(self): self.mock_object(common, 'check_net_id_and_subnet_id') self.mock_object(db_api, 'share_network_get', mock.Mock(return_value='fake_net')) self.mock_object(db_api, 'availability_zone_get', mock.Mock(return_value={'id': 'fake_az_id'})) self.mock_object(common, '_get_existing_subnets', mock.Mock(return_value='fake_subnets')) net_id = 'fake_net_id' context = 'fake_context' az_name = 'fake_az' data = {'availability_zone': az_name} self.assertRaises(webob.exc.HTTPConflict, common.validate_subnet_create, context, net_id, data, False) @ddt.ddt class ViewBuilderTest(test.TestCase): def setUp(self): super(ViewBuilderTest, self).setUp() self.expected_resource_dict = { 'id': 'fake_resource_id', 'foo': 'quz', 'fred': 'bob', 'alice': 'waldo', 'spoon': 'spam', 'xyzzy': 'qwerty', } self.fake_resource = db_fakes.FakeModel(self.expected_resource_dict) self.view_builder = fakes.FakeResourceViewBuilder() @ddt.data('1.0', '1.40') def test_versioned_method_no_updates(self, version): req = fakes.HTTPRequest.blank('/my_resource', version=version) actual_resource = self.view_builder.view(req, self.fake_resource) self.assertEqual(set({'id', 'foo', 'fred', 'alice'}), set(actual_resource.keys())) @ddt.data(True, False) def test_versioned_method_v1_6(self, is_admin): req = fakes.HTTPRequest.blank('/my_resource', version='1.6', use_admin_context=is_admin) expected_keys = set({'id', 'foo', 'fred', 'alice'}) if is_admin: expected_keys.add('spoon') actual_resource = self.view_builder.view(req, self.fake_resource) self.assertEqual(expected_keys, set(actual_resource.keys())) @ddt.unpack @ddt.data({'is_admin': True, 'version': '3.14'}, {'is_admin': False, 'version': '3.14'}, {'is_admin': False, 'version': '6.2'}, {'is_admin': True, 'version': '6.2'}) def test_versioned_method_all_match(self, is_admin, version): req = fakes.HTTPRequest.blank( '/my_resource', version=version, use_admin_context=is_admin) expected_keys = set({'id', 'fred', 'xyzzy', 'alice'}) if is_admin: expected_keys.add('spoon') actual_resource = self.view_builder.view(req, self.fake_resource) self.assertEqual(expected_keys, set(actual_resource.keys())) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/api/test_extensions.py0000664000175000017500000001452200000000000022046 0ustar00zuulzuul00000000000000# Copyright (c) 2011 X.commerce, a business unit of eBay Inc. # Copyright 2011 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import ddt from oslo_config import cfg from oslo_serialization import jsonutils from oslo_utils import timeutils import webob from manila.api import extensions from manila.api.v1 import router from manila import policy from manila import test CONF = cfg.CONF class ExtensionTestCase(test.TestCase): def setUp(self): super(ExtensionTestCase, self).setUp() ext_list = CONF.osapi_share_extension[:] fox = ('manila.tests.api.extensions.foxinsocks.Foxinsocks') if fox not in ext_list: ext_list.append(fox) self.flags(osapi_share_extension=ext_list) class ExtensionControllerTest(ExtensionTestCase): def setUp(self): super(ExtensionControllerTest, self).setUp() self.ext_list = [] self.ext_list.sort() def test_list_extensions_json(self): app = router.APIRouter() request = webob.Request.blank("/fake/extensions") response = request.get_response(app) self.assertEqual(200, response.status_int) # Make sure we have all the extensions, extra extensions being OK. data = jsonutils.loads(response.body) names = [str(x['name']) for x in data['extensions'] if str(x['name']) in self.ext_list] names.sort() self.assertEqual(self.ext_list, names) # Ensure all the timestamps are valid according to iso8601 for ext in data['extensions']: timeutils.parse_isotime(ext['updated']) # Make sure that at least Fox in Sox is correct. (fox_ext, ) = [ x for x in data['extensions'] if x['alias'] == 'FOXNSOX'] self.assertEqual( {'name': 'Fox In Socks', 'updated': '2011-01-22T13:25:27-06:00', 'description': 'The Fox In Socks Extension.', 'alias': 'FOXNSOX', 'links': []}, fox_ext) for ext in data['extensions']: url = '/fake/extensions/%s' % ext['alias'] request = webob.Request.blank(url) response = request.get_response(app) output = jsonutils.loads(response.body) self.assertEqual(ext['alias'], output['extension']['alias']) def test_get_extension_json(self): app = router.APIRouter() request = webob.Request.blank("/fake/extensions/FOXNSOX") response = request.get_response(app) self.assertEqual(200, response.status_int) data = jsonutils.loads(response.body) self.assertEqual( {"name": "Fox In Socks", "updated": "2011-01-22T13:25:27-06:00", "description": "The Fox In Socks Extension.", "alias": "FOXNSOX", "links": []}, data['extension']) def test_get_non_existing_extension_json(self): app = router.APIRouter() request = webob.Request.blank("/fake/extensions/4") response = request.get_response(app) self.assertEqual(404, response.status_int) @ddt.ddt class ExtensionAuthorizeTestCase(test.TestCase): @ddt.unpack @ddt.data({'action': 'fake', 'valid': 'api_extension:fake:fake'}, {'action': None, 'valid': 'api_extension:fake'}) def test_extension_authorizer(self, action, valid): self.mock_object(policy, 'enforce') target = 'fake' extensions.extension_authorizer('api', 'fake')( {}, target, action) policy.enforce.assert_called_once_with(mock.ANY, valid, target) def test_extension_authorizer_empty_target(self): self.mock_object(policy, 'enforce') target = None context = mock.Mock() context.project_id = 'fake' context.user_id = 'fake' extensions.extension_authorizer('api', 'fake')( context, target, 'fake') policy.enforce.assert_called_once_with( mock.ANY, mock.ANY, {'project_id': 'fake', 'user_id': 'fake'}) class StubExtensionManager(object): """Provides access to Tweedle Beetles.""" name = "Tweedle Beetle Extension" alias = "TWDLBETL" def __init__(self, resource_ext=None, action_ext=None, request_ext=None, controller_ext=None): self.resource_ext = resource_ext self.controller_ext = controller_ext self.extra_resource_ext = None def get_resources(self): resource_exts = [] if self.resource_ext: resource_exts.append(self.resource_ext) if self.extra_resource_ext: resource_exts.append(self.extra_resource_ext) return resource_exts def get_controller_extensions(self): controller_extensions = [] if self.controller_ext: controller_extensions.append(self.controller_ext) return controller_extensions class ExtensionControllerIdFormatTest(test.TestCase): def _bounce_id(self, test_id): class BounceController(object): def show(self, req, id): return id res_ext = extensions.ResourceExtension('bounce', BounceController()) manager = StubExtensionManager(res_ext) app = router.APIRouter(manager) request = webob.Request.blank("/fake/bounce/%s" % test_id) response = request.get_response(app) return response.body def test_id_with_xml_format(self): result = self._bounce_id('foo.xml') self.assertEqual('foo', result.decode('UTF-8')) def test_id_with_json_format(self): result = self._bounce_id('foo.json') self.assertEqual('foo', result.decode('UTF-8')) def test_id_with_bad_format(self): result = self._bounce_id('foo.bad') self.assertEqual('foo.bad', result.decode('UTF-8')) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/api/test_middleware.py0000664000175000017500000000750400000000000021766 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Hewlett Packard Enterprise Development LP # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. See the License for the specific language governing permissions and # limitations under the License. import ddt from oslo_config import cfg from oslo_serialization import jsonutils import requests from manila.tests.integrated import integrated_helpers @ddt.ddt class TestCORSMiddleware(integrated_helpers._IntegratedTestBase): '''Provide a basic smoke test to ensure CORS middleware is active. The tests below provide minimal confirmation that the CORS middleware is active, and may be configured. For comprehensive tests, please consult the test suite in oslo_middleware. ''' def setUp(self): # Here we monkeypatch GroupAttr.__getattr__, necessary because the # paste.ini method of initializing this middleware creates its own # ConfigOpts instance, bypassing the regular config fixture. # Mocking also does not work, as accessing an attribute on a mock # object will return a MagicMock instance, which will fail # configuration type checks. def _mock_getattr(instance, key): if key != 'allowed_origin': return self._original_call_method(instance, key) return ["http://valid.example.com"] self._original_call_method = cfg.ConfigOpts.GroupAttr.__getattr__ cfg.ConfigOpts.GroupAttr.__getattr__ = _mock_getattr # Initialize the application after all the config overrides are in # place. super(TestCORSMiddleware, self).setUp() def tearDown(self): super(TestCORSMiddleware, self).tearDown() # Reset the configuration overrides. cfg.ConfigOpts.GroupAttr.__getattr__ = self._original_call_method @ddt.data( ('http://valid.example.com', 'http://valid.example.com'), ('http://invalid.example.com', None), ) @ddt.unpack def test_options_request(self, origin_url, acao_header_expected): response = self.api.api_request( '', method='OPTIONS', headers={ 'Origin': origin_url, 'Access-Control-Request-Method': 'GET', } ) self.assertEqual(200, response.status_code) self.assertEqual(acao_header_expected, response.headers.get('Access-Control-Allow-Origin')) @ddt.data( ('http://valid.example.com', 'http://valid.example.com'), ('http://invalid.example.com', None), ) @ddt.unpack def test_get_request(self, origin_url, acao_header_expected): response = self.api.api_request( '', method='GET', headers={ 'Origin': origin_url } ) self.assertEqual(404, response.status_code) self.assertEqual(acao_header_expected, response.headers.get('Access-Control-Allow-Origin')) class TestHealthCheckMiddleware(integrated_helpers._IntegratedTestBase): def test_healthcheck(self): # We verify that we return a HTTP200 when calling api_get url = 'http://%s:%s/healthcheck' % (self.osapi.host, self.osapi.port) response = requests.request( 'GET', url, headers={'Accept': 'application/json'}) output = jsonutils.loads(response.content) self.assertEqual(200, response.status_code) self.assertEqual(['OK'], output['reasons']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/api/test_schemas.py0000664000175000017500000001416700000000000021277 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import jsonschema.exceptions from oslo_log import log from manila.api.v2 import router from manila.api.validation import validators from manila import test LOG = log.getLogger(__name__) class SchemaTest(test.TestCase): def setUp(self): super().setUp() self.router = router.APIRouter() self.meta_schema = validators._SchemaValidator.validator_org def test_schemas(self): missing_request_schemas = set() missing_query_schemas = set() missing_response_schemas = set() invalid_schemas = set() def _validate_schema(func, schema): try: self.meta_schema.check_schema(schema) except jsonschema.exceptions.SchemaError: LOG.exception('schema validation failed') invalid_schemas.add(func.__qualname__) def _validate_func(func, method): if getattr(func, 'removed', False): return if method in ("POST", "PUT", "PATCH"): # request body validation if not hasattr(func, '_request_body_schema'): missing_request_schemas.add(func.__qualname__) else: _validate_schema(func, func._request_body_schema) elif method in ("GET",): # request query string validation if not hasattr(func, '_request_query_schema'): missing_query_schemas.add(func.__qualname__) else: _validate_schema(func, func._request_query_schema) # response body validation if not hasattr(func, '_response_body_schema'): missing_response_schemas.add(func.__qualname__) else: _validate_schema(func, func._response_body_schema) for route in self.router.map.matchlist: if 'controller' not in route.defaults: continue controller = route.defaults['controller'] if not getattr(controller.controller, '_validated', False): continue # NOTE: This is effectively a reimplementation of # 'routes.route.Route.make_full_route' that uses OpenAPI-compatible # template strings instead of regexes for paramters path = "" for part in route.routelist: if isinstance(part, dict): path += "{" + part["name"] + "}" else: path += part method = ( route.conditions.get("method", "GET")[0] if route.conditions else "GET" ) action = route.defaults["action"] if path.endswith('/action'): # all actions should use POST assert method == 'POST' wsgi_actions = [ (k, v, controller.controller) for k, v in controller.controller.wsgi_actions.items() ] for ( wsgi_action, wsgi_method, action_controller ) in wsgi_actions: versioned_methods = getattr( action_controller, 'versioned_methods', {} ) if wsgi_method in versioned_methods: # versioned method for versioned_method in sorted( versioned_methods[action], key=lambda v: v.start_version ): func = versioned_method.func _validate_func(func, method) else: # unversioned method func = controller.wsgi_actions[wsgi_action] _validate_func(func, method) else: # body validation versioned_methods = getattr( controller.controller, 'versioned_methods', {} ) if action in versioned_methods: # versioned method for versioned_method in sorted( versioned_methods[action], key=lambda v: v.start_version ): func = versioned_method.func _validate_func(func, method) else: if not hasattr(controller.controller, action): # these are almost certainly because of use of # routes.mapper.Mapper.resource, which we should remove continue # unversioned method func = getattr(controller.controller, action) _validate_func(func, method) if missing_request_schemas: raise self.failureException( f"Found API resources without request body schemas: " f"{sorted(missing_request_schemas)}" ) if missing_query_schemas: raise self.failureException( f"Found API resources without request query schemas: " f"{sorted(missing_query_schemas)}" ) if missing_response_schemas: raise self.failureException( f"Found API resources without response body schemas: " f"{sorted(missing_response_schemas)}" ) if invalid_schemas: raise self.failureException( f"Found API resources with invalid schemas: " f"{sorted(invalid_schemas)}" ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/api/test_validation.py0000664000175000017500000004073300000000000022004 0ustar00zuulzuul00000000000000# Copyright (C) 2017 NTT DATA # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from http import client as http import re from manila.api.openstack import api_version_request as api_version from manila.api import validation from manila.api.validation import parameter_types from manila import exception from manila import test class FakeRequest(object): api_version_request = api_version.APIVersionRequest("3.0") environ = {} class APIValidationTestCase(test.TestCase): def setUp(self, schema=None): super().setUp() self.post = None if schema is not None: @validation.request_body_schema(schema=schema) def post(req, body): return 'Validation succeeded.' self.post = post def check_validation_error(self, method, body, expected_detail, req=None): if not req: req = FakeRequest() try: method( body=body, req=req, ) except exception.ValidationError as ex: self.assertEqual(http.BAD_REQUEST, ex.kwargs['code']) if isinstance(expected_detail, list): self.assertIn( ex.kwargs['detail'], expected_detail, 'Exception details did not match expected', ) elif not re.match(expected_detail, ex.kwargs['detail']): self.assertEqual( expected_detail, ex.kwargs['detail'], 'Exception details did not match expected', ) except Exception as ex: self.fail('An unexpected exception happens: %s' % ex) else: self.fail('Any exception did not happen.') class RequiredDisableTestCase(APIValidationTestCase): def setUp(self): schema = { 'type': 'object', 'properties': { 'foo': { 'type': 'integer', }, }, } super().setUp(schema=schema) def test_validate_required_disable(self): self.assertEqual( 'Validation succeeded.', self.post(body={'foo': 1}, req=FakeRequest()), ) class RequiredEnableTestCase(APIValidationTestCase): def setUp(self): schema = { 'type': 'object', 'properties': { 'foo': { 'type': 'integer', }, }, 'required': ['foo'], } super().setUp(schema=schema) def test_validate_required_enable(self): self.assertEqual( 'Validation succeeded.', self.post(body={'foo': 1}, req=FakeRequest()), ) def test_validate_required_enable_fails(self): detail = "'foo' is a required property" self.check_validation_error( self.post, body={'abc': 1}, expected_detail=detail ) class AdditionalPropertiesEnableTestCase(APIValidationTestCase): def setUp(self): schema = { 'type': 'object', 'properties': { 'foo': { 'type': 'integer', }, }, 'required': ['foo'], } super().setUp(schema=schema) def test_validate_additionalProperties_enable(self): self.assertEqual( 'Validation succeeded.', self.post(body={'foo': 1}, req=FakeRequest()), ) self.assertEqual( 'Validation succeeded.', self.post(body={'foo': 1, 'ext': 1}, req=FakeRequest()), ) class AdditionalPropertiesDisableTestCase(APIValidationTestCase): def setUp(self): schema = { 'type': 'object', 'properties': { 'foo': { 'type': 'integer', }, }, 'required': ['foo'], 'additionalProperties': False, } super().setUp(schema=schema) def test_validate_additionalProperties_disable(self): self.assertEqual( 'Validation succeeded.', self.post(body={'foo': 1}, req=FakeRequest()), ) def test_validate_additionalProperties_disable_fails(self): detail = "Additional properties are not allowed ('ext' was unexpected)" self.check_validation_error( self.post, body={'foo': 1, 'ext': 1}, expected_detail=detail ) class PatternPropertiesTestCase(APIValidationTestCase): def setUp(self): schema = { 'patternProperties': { '^[a-zA-Z0-9]{1,10}$': {'type': 'string'}, }, 'additionalProperties': False, } super().setUp(schema=schema) def test_validate_patternProperties(self): self.assertEqual( 'Validation succeeded.', self.post(body={'foo': 'bar'}, req=FakeRequest()), ) def test_validate_patternProperties_fails(self): details = [ "Additional properties are not allowed ('__' was unexpected)", "'__' does not match any of the regexes: '^[a-zA-Z0-9]{1,10}$'", ] self.check_validation_error( self.post, body={'__': 'bar'}, expected_detail=details ) details = [ "'' does not match any of the regexes: '^[a-zA-Z0-9]{1,10}$'", "Additional properties are not allowed ('' was unexpected)", ] self.check_validation_error( self.post, body={'': 'bar'}, expected_detail=details ) details = [ ( "'0123456789a' does not match any of the regexes: " "'^[a-zA-Z0-9]{1,10}$'" ), ( "Additional properties are not allowed ('0123456789a' was " "unexpected)" ), ] self.check_validation_error( self.post, body={'0123456789a': 'bar'}, expected_detail=details ) detail = "expected string or bytes-like object" self.check_validation_error( self.post, body={None: 'bar'}, expected_detail=detail ) class StringTestCase(APIValidationTestCase): def setUp(self): schema = { 'type': 'object', 'properties': { 'foo': { 'type': 'string', }, }, } super().setUp(schema=schema) def test_validate_string(self): self.assertEqual( 'Validation succeeded.', self.post(body={'foo': 'abc'}, req=FakeRequest()), ) self.assertEqual( 'Validation succeeded.', self.post(body={'foo': '0'}, req=FakeRequest()), ) self.assertEqual( 'Validation succeeded.', self.post(body={'foo': ''}, req=FakeRequest()), ) def test_validate_string_fails(self): detail = ( "Invalid input for field/attribute foo. Value: 1. " "1 is not of type 'string'" ) self.check_validation_error( self.post, body={'foo': 1}, expected_detail=detail ) detail = ( "Invalid input for field/attribute foo. Value: 1.5. " "1.5 is not of type 'string'" ) self.check_validation_error( self.post, body={'foo': 1.5}, expected_detail=detail ) detail = ( "Invalid input for field/attribute foo. Value: True. " "True is not of type 'string'" ) self.check_validation_error( self.post, body={'foo': True}, expected_detail=detail ) class StringLengthTestCase(APIValidationTestCase): def setUp(self): schema = { 'type': 'object', 'properties': { 'foo': { 'type': 'string', 'minLength': 1, 'maxLength': 10, }, }, } super().setUp(schema=schema) def test_validate_string_length(self): self.assertEqual( 'Validation succeeded.', self.post(body={'foo': '0'}, req=FakeRequest()), ) self.assertEqual( 'Validation succeeded.', self.post(body={'foo': '0123456789'}, req=FakeRequest()), ) def test_validate_string_length_fails(self): # checks for jsonschema output from 3.2.x and 4.21.x detail = ( "Invalid input for field/attribute foo. Value: . " "'' (is too short|should be non-empty)" ) self.check_validation_error( self.post, body={'foo': ''}, expected_detail=detail ) detail = ( "Invalid input for field/attribute foo. Value: 0123456789a. " "'0123456789a' is too long" ) self.check_validation_error( self.post, body={'foo': '0123456789a'}, expected_detail=detail ) class IntegerTestCase(APIValidationTestCase): def setUp(self): schema = { 'type': 'object', 'properties': { 'foo': { 'type': ['integer', 'string'], 'pattern': '^[0-9]+$', }, }, } super().setUp(schema=schema) def test_validate_integer(self): self.assertEqual( 'Validation succeeded.', self.post(body={'foo': 1}, req=FakeRequest()), ) self.assertEqual( 'Validation succeeded.', self.post(body={'foo': '1'}, req=FakeRequest()), ) self.assertEqual( 'Validation succeeded.', self.post(body={'foo': '0123456789'}, req=FakeRequest()), ) def test_validate_integer_fails(self): detail = ( "Invalid input for field/attribute foo. Value: abc. " "'abc' does not match '^[0-9]+$'" ) self.check_validation_error( self.post, body={'foo': 'abc'}, expected_detail=detail ) detail = ( "Invalid input for field/attribute foo. Value: True. " "True is not of type 'integer', 'string'" ) self.check_validation_error( self.post, body={'foo': True}, expected_detail=detail ) detail = ( "Invalid input for field/attribute foo. Value: 0xffff. " "'0xffff' does not match '^[0-9]+$'" ) self.check_validation_error( self.post, body={'foo': '0xffff'}, expected_detail=detail ) detail = ( "Invalid input for field/attribute foo. Value: 1.01. " "1.01 is not of type 'integer', 'string'" ) self.check_validation_error( self.post, body={'foo': 1.01}, expected_detail=detail ) detail = ( "Invalid input for field/attribute foo. Value: 1.0. " "'1.0' does not match '^[0-9]+$'" ) self.check_validation_error( self.post, body={'foo': '1.0'}, expected_detail=detail ) class IntegerRangeTestCase(APIValidationTestCase): def setUp(self): schema = { 'type': 'object', 'properties': { 'foo': { 'type': ['integer', 'string'], 'pattern': '^[0-9]+$', 'minimum': 1, 'maximum': 10, }, }, } super().setUp(schema=schema) def test_validate_integer_range(self): self.assertEqual( 'Validation succeeded.', self.post(body={'foo': 1}, req=FakeRequest()), ) self.assertEqual( 'Validation succeeded.', self.post(body={'foo': 10}, req=FakeRequest()), ) self.assertEqual( 'Validation succeeded.', self.post(body={'foo': '1'}, req=FakeRequest()), ) def test_validate_integer_range_fails(self): detail = ( "Invalid input for field/attribute foo. Value: 0. " "0(.0)? is less than the minimum of 1" ) self.check_validation_error( self.post, body={'foo': 0}, expected_detail=detail ) detail = ( "Invalid input for field/attribute foo. Value: 11. " "11(.0)? is greater than the maximum of 10" ) self.check_validation_error( self.post, body={'foo': 11}, expected_detail=detail ) detail = ( "Invalid input for field/attribute foo. Value: 0. " "0(.0)? is less than the minimum of 1" ) self.check_validation_error( self.post, body={'foo': '0'}, expected_detail=detail ) detail = ( "Invalid input for field/attribute foo. Value: 11. " "11(.0)? is greater than the maximum of 10" ) self.check_validation_error( self.post, body={'foo': '11'}, expected_detail=detail ) class BooleanTestCase(APIValidationTestCase): def setUp(self): schema = { 'type': 'object', 'properties': { 'foo': parameter_types.boolean, }, } super().setUp(schema=schema) def test_validate_boolean(self): self.assertEqual( 'Validation succeeded.', self.post(body={'foo': True}, req=FakeRequest()), ) self.assertEqual( 'Validation succeeded.', self.post(body={'foo': False}, req=FakeRequest()), ) self.assertEqual( 'Validation succeeded.', self.post(body={'foo': 'True'}, req=FakeRequest()), ) self.assertEqual( 'Validation succeeded.', self.post(body={'foo': 'False'}, req=FakeRequest()), ) self.assertEqual( 'Validation succeeded.', self.post(body={'foo': '1'}, req=FakeRequest()), ) self.assertEqual( 'Validation succeeded.', self.post(body={'foo': '0'}, req=FakeRequest()), ) def test_validate_boolean_fails(self): enum_boolean = ( "[True, 'True', 'TRUE', 'true', '1', 'ON', 'On', " "'on', 'YES', 'Yes', 'yes', 'y', 't', " "False, 'False', 'FALSE', 'false', '0', 'OFF', 'Off', " "'off', 'NO', 'No', 'no', 'n', 'f']" ) detail = ( "Invalid input for field/attribute foo. Value: bar. " "'bar' is not one of %s" ) % enum_boolean self.check_validation_error( self.post, body={'foo': 'bar'}, expected_detail=detail ) detail = ( "Invalid input for field/attribute foo. Value: 2. " "'2' is not one of %s" ) % enum_boolean self.check_validation_error( self.post, body={'foo': '2'}, expected_detail=detail ) class DatetimeTestCase(APIValidationTestCase): def setUp(self): schema = { 'type': 'object', 'properties': { 'foo': { 'type': ['string', 'null'], 'format': 'date-time', }, }, } super().setUp(schema=schema) def test_validate_datetime(self): self.assertEqual( 'Validation succeeded.', self.post(body={'foo': '2017-01-14T01:00:00Z'}, req=FakeRequest()), ) self.assertEqual( 'Validation succeeded.', self.post(body={'foo': None}, req=FakeRequest()), ) def test_validate_datetime_fails(self): detail = ( "Invalid input for field/attribute foo. Value: True. " "True is not of type 'string', 'null'" ) self.check_validation_error( self.post, body={'foo': True}, expected_detail=detail ) detail = ( "Invalid input for field/attribute foo. Value: 123. " "'123' is not a 'date-time'" ) self.check_validation_error( self.post, body={'foo': '123'}, expected_detail=detail ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/api/test_versions.py0000664000175000017500000003123400000000000021516 0ustar00zuulzuul00000000000000# Copyright 2015 Clinton Knight # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import ddt from oslo_serialization import jsonutils from oslo_utils import encodeutils from manila.api.openstack import api_version_request from manila.api.openstack import wsgi from manila.api.v1 import router from manila.api import versions from manila import test from manila.tests.api import fakes version_header_name = 'X-OpenStack-Manila-API-Version' experimental_header_name = 'X-OpenStack-Manila-API-Experimental' @ddt.ddt class VersionsControllerTestCase(test.TestCase): def setUp(self): super(VersionsControllerTestCase, self).setUp() self.wsgi_apps = (versions.VersionsRouter(), router.APIRouter()) @ddt.data('1.0', '1.1', '2.0', '3.0') def test_versions_root(self, version): req = fakes.HTTPRequest.blank('/', base_url='http://localhost') req.method = 'GET' req.content_type = 'application/json' req.headers = {version_header_name: version} response = req.get_response(versions.VersionsRouter()) self.assertEqual(300, response.status_int) body = jsonutils.loads(response.body) version_list = body['versions'] ids = [v['id'] for v in version_list] self.assertEqual({'v1.0', 'v2.0'}, set(ids)) self.assertNotIn(version_header_name, response.headers) self.assertNotIn('Vary', response.headers) v1 = [v for v in version_list if v['id'] == 'v1.0'][0] self.assertEqual('', v1.get('min_version')) self.assertEqual('', v1.get('version')) self.assertEqual('DEPRECATED', v1.get('status')) v2 = [v for v in version_list if v['id'] == 'v2.0'][0] self.assertEqual(api_version_request._MIN_API_VERSION, v2.get('min_version')) self.assertEqual(api_version_request._MAX_API_VERSION, v2.get('version')) self.assertEqual('CURRENT', v2.get('status')) @ddt.data('1.0', '1.1', api_version_request._MIN_API_VERSION, api_version_request._MAX_API_VERSION) def test_versions_v1(self, version): req = fakes.HTTPRequest.blank('/', base_url='http://localhost/v1') req.method = 'GET' req.content_type = 'application/json' req.headers = {version_header_name: version} response = req.get_response(router.APIRouter()) self.assertEqual(200, response.status_int) body = jsonutils.loads(response.body) version_list = body['versions'] ids = [v['id'] for v in version_list] self.assertEqual({'v1.0'}, set(ids)) self.assertEqual('1.0', response.headers[version_header_name]) self.assertEqual(version_header_name, response.headers['Vary']) self.assertEqual('', version_list[0].get('min_version')) self.assertEqual('', version_list[0].get('version')) self.assertEqual('DEPRECATED', version_list[0].get('status')) @ddt.data(api_version_request._MIN_API_VERSION, api_version_request._MAX_API_VERSION) def test_versions_v2(self, version): req = fakes.HTTPRequest.blank('/', base_url='http://localhost/v2') req.method = 'GET' req.content_type = 'application/json' req.headers = {version_header_name: version} response = req.get_response(router.APIRouter()) self.assertEqual(200, response.status_int) body = jsonutils.loads(response.body) version_list = body['versions'] ids = [v['id'] for v in version_list] self.assertEqual({'v2.0'}, set(ids)) self.assertEqual(version, response.headers[version_header_name]) self.assertEqual(version_header_name, response.headers['Vary']) v2 = [v for v in version_list if v['id'] == 'v2.0'][0] self.assertEqual(api_version_request._MIN_API_VERSION, v2.get('min_version')) self.assertEqual(api_version_request._MAX_API_VERSION, v2.get('version')) def test_versions_version_invalid(self): req = fakes.HTTPRequest.blank('/', base_url='http://localhost/v2') req.method = 'GET' req.content_type = 'application/json' req.headers = {version_header_name: '2.0.1'} for app in self.wsgi_apps: response = req.get_response(app) self.assertEqual(400, response.status_int) def test_versions_version_not_found(self): api_version_request_3_0 = api_version_request.APIVersionRequest('3.0') self.mock_object(api_version_request, 'max_api_version', mock.Mock(return_value=api_version_request_3_0)) class Controller(wsgi.Controller): @wsgi.Controller.api_version('2.0', '2.0') def index(self, req): return 'off' req = fakes.HTTPRequest.blank('/tests', base_url='http://localhost/v2') req.headers = {version_header_name: '2.5'} app = fakes.TestRouter(Controller()) response = req.get_response(app) self.assertEqual(404, response.status_int) def test_versions_version_not_acceptable(self): req = fakes.HTTPRequest.blank('/', base_url='http://localhost/v2') req.method = 'GET' req.content_type = 'application/json' req.headers = {version_header_name: '3.0'} response = req.get_response(router.APIRouter()) self.assertEqual(406, response.status_int) self.assertEqual('3.0', response.headers[version_header_name]) self.assertEqual(version_header_name, response.headers['Vary']) @ddt.data(['2.5', 200], ['2.55', 404]) @ddt.unpack def test_req_version_matches(self, version, HTTP_ret): version_request = api_version_request.APIVersionRequest(version) self.mock_object(api_version_request, 'max_api_version', mock.Mock(return_value=version_request)) class Controller(wsgi.Controller): @wsgi.Controller.api_version('2.0', '2.6') def index(self, req): return 'off' req = fakes.HTTPRequest.blank('/tests', base_url='http://localhost/v2') req.headers = {version_header_name: version} app = fakes.TestRouter(Controller()) response = req.get_response(app) if HTTP_ret == 200: self.assertEqual(b'off', response.body) elif HTTP_ret == 404: self.assertNotEqual(b'off', response.body) self.assertEqual(HTTP_ret, response.status_int) @ddt.data(['2.5', 'older'], ['2.37', 'newer']) @ddt.unpack def test_req_version_matches_with_if(self, version, ret_val): version_request = api_version_request.APIVersionRequest(version) self.mock_object(api_version_request, 'max_api_version', mock.Mock(return_value=version_request)) class Controller(wsgi.Controller): def index(self, req): req_version = req.api_version_request if req_version.matches('2.1', '2.8'): return 'older' if req_version.matches('2.9', '2.88'): return 'newer' req = fakes.HTTPRequest.blank('/tests', base_url='http://localhost/v2') req.headers = {version_header_name: version} app = fakes.TestRouter(Controller()) response = req.get_response(app) resp = encodeutils.safe_decode(response.body, incoming='utf-8') self.assertEqual(ret_val, resp) self.assertEqual(200, response.status_int) @ddt.data(['2.5', 'older'], ['2.37', 'newer']) @ddt.unpack def test_req_version_matches_with_None(self, version, ret_val): version_request = api_version_request.APIVersionRequest(version) self.mock_object(api_version_request, 'max_api_version', mock.Mock(return_value=version_request)) class Controller(wsgi.Controller): def index(self, req): req_version = req.api_version_request if req_version.matches(None, '2.8'): return 'older' if req_version.matches('2.9', None): return 'newer' req = fakes.HTTPRequest.blank('/tests', base_url='http://localhost/v2') req.headers = {version_header_name: version} app = fakes.TestRouter(Controller()) response = req.get_response(app) resp = encodeutils.safe_decode(response.body, incoming='utf-8') self.assertEqual(ret_val, resp) self.assertEqual(200, response.status_int) def test_req_version_matches_with_None_None(self): version_request = api_version_request.APIVersionRequest('2.39') self.mock_object(api_version_request, 'max_api_version', mock.Mock(return_value=version_request)) class Controller(wsgi.Controller): def index(self, req): req_version = req.api_version_request # This case is artificial, and will return True if req_version.matches(None, None): return "Pass" req = fakes.HTTPRequest.blank('/tests', base_url='http://localhost/v2') req.headers = {version_header_name: '2.39'} app = fakes.TestRouter(Controller()) response = req.get_response(app) resp = encodeutils.safe_decode(response.body, incoming='utf-8') self.assertEqual("Pass", resp) self.assertEqual(200, response.status_int) @ddt.ddt class ExperimentalAPITestCase(test.TestCase): class Controller(wsgi.Controller): @wsgi.Controller.api_version('2.0', '2.0') def index(self, req): return {'fake_key': 'fake_value'} @wsgi.Controller.api_version('2.1', '2.1', experimental=True) # noqa def index(self, req): # pylint: disable=function-redefined # noqa F811 return {'fake_key': 'fake_value'} def setUp(self): super(ExperimentalAPITestCase, self).setUp() self.app = fakes.TestRouter(ExperimentalAPITestCase.Controller()) self.req = fakes.HTTPRequest.blank('/tests', base_url='http://localhost/v2') @ddt.data(True, False) def test_stable_api_always_called(self, experimental): self.req.headers = {version_header_name: '2.0'} if experimental: self.req.headers[experimental_header_name] = experimental response = self.req.get_response(self.app) self.assertEqual(200, response.status_int) self.assertEqual('2.0', response.headers[version_header_name]) if experimental: self.assertEqual('%s' % experimental, response.headers.get(experimental_header_name)) else: self.assertNotIn(experimental_header_name, response.headers) def test_experimental_api_called_when_requested(self): self.req.headers = { version_header_name: '2.1', experimental_header_name: 'True', } response = self.req.get_response(self.app) self.assertEqual(200, response.status_int) self.assertEqual('2.1', response.headers[version_header_name]) self.assertTrue(response.headers.get(experimental_header_name)) def test_experimental_api_not_called_when_not_requested(self): self.req.headers = {version_header_name: '2.1'} response = self.req.get_response(self.app) self.assertEqual(404, response.status_int) self.assertNotIn(experimental_header_name, response.headers) def test_experimental_header_returned_in_exception(self): api_version_request_3_0 = api_version_request.APIVersionRequest('3.0') self.mock_object(api_version_request, 'max_api_version', mock.Mock(return_value=api_version_request_3_0)) self.req.headers = { version_header_name: '2.2', experimental_header_name: 'True', } response = self.req.get_response(self.app) self.assertEqual(404, response.status_int) self.assertTrue(response.headers.get(experimental_header_name)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/api/test_wsgi.py0000664000175000017500000000340000000000000020611 0ustar00zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright 2010 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Test WSGI basics and provide some helper functions for other WSGI tests. """ from oslo_service import wsgi import routes import webob from manila import test from manila.wsgi import common as common_wsgi class Test(test.TestCase): def test_router(self): class Application(common_wsgi.Application): """Test application to call from router.""" def __call__(self, environ, start_response): start_response("200", []) return ['Router result'] class Router(wsgi.Router): """Test router.""" def __init__(self): mapper = routes.Mapper() mapper.connect("/test", controller=Application()) super(Router, self).__init__(mapper) result = webob.Request.blank('/test').get_response(Router()) self.assertEqual("Router result", result.body) result = webob.Request.blank('/bad').get_response(Router()) self.assertNotEqual(result.body, "Router result") ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315601.9576712 manila-21.0.0/manila/tests/api/v1/0000775000175000017500000000000000000000000016560 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/api/v1/__init__.py0000664000175000017500000000000000000000000020657 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/api/v1/stubs.py0000664000175000017500000000763500000000000020305 0ustar00zuulzuul00000000000000# Copyright 2010 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime from manila.common import constants from manila import exception as exc FAKE_UUID = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa' FAKE_UUIDS = {} def stub_volume(id, **kwargs): volume = { 'id': id, 'user_id': 'fakeuser', 'project_id': 'fakeproject', 'host': 'fakehost', 'size': 1, 'availability_zone': 'fakeaz', 'instance_uuid': 'fakeuuid', 'mountpoint': '/', 'status': 'fakestatus', 'attach_status': 'attached', 'bootable': 'false', 'name': 'vol name', 'display_name': 'displayname', 'display_description': 'displaydesc', 'created_at': datetime.datetime(1, 1, 1, 1, 1, 1), 'snapshot_id': None, 'source_volid': None, 'share_type_id': '3e196c20-3c06-11e2-81c1-0800200c9a66', 'volume_type_id': '3e196c20-3c06-11e2-81c1-0800200c9a66', 'volume_metadata': [], 'share_type': {'name': 'share_type_name'}, 'volume_type': {'name': 'share_type_name'}} volume.update(kwargs) return volume def stub_volume_create(self, context, size, name, description, snapshot, **param): vol = stub_volume('1') vol['size'] = size vol['display_name'] = name vol['display_description'] = description vol['source_volid'] = None try: vol['snapshot_id'] = snapshot['id'] except (KeyError, TypeError): vol['snapshot_id'] = None vol['availability_zone'] = param.get('availability_zone', 'fakeaz') return vol def stub_volume_create_from_image(self, context, size, name, description, snapshot, volume_type, metadata, availability_zone): vol = stub_volume('1') vol['status'] = 'creating' vol['size'] = size vol['display_name'] = name vol['display_description'] = description vol['availability_zone'] = 'manila' return vol def stub_volume_update(self, context, *args, **param): pass def stub_volume_delete(self, context, *args, **param): pass def stub_volume_get(self, context, volume_id): return stub_volume(volume_id) def stub_volume_get_notfound(self, context, volume_id): raise exc.NotFound def stub_volume_get_all(context, search_opts=None): return [stub_volume(100, project_id='fake'), stub_volume(101, project_id='superfake'), stub_volume(102, project_id='superduperfake')] def stub_volume_get_all_by_project(self, context, search_opts=None): return [stub_volume_get(self, context, '1')] def stub_snapshot(id, **kwargs): snapshot = {'id': id, 'volume_id': 12, 'status': constants.STATUS_AVAILABLE, 'volume_size': 100, 'created_at': None, 'display_name': 'Default name', 'display_description': 'Default description', 'project_id': 'fake'} snapshot.update(kwargs) return snapshot def stub_snapshot_get_all(self): return [stub_snapshot(100, project_id='fake'), stub_snapshot(101, project_id='superfake'), stub_snapshot(102, project_id='superduperfake')] def stub_snapshot_get_all_by_project(self, context): return [stub_snapshot(1)] def stub_snapshot_update(self, context, *args, **param): pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/api/v1/test_limits.py0000664000175000017500000007411600000000000021503 0ustar00zuulzuul00000000000000# Copyright 2011 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests dealing with HTTP rate-limiting. """ import ddt import http.client as http_client import io from oslo_serialization import jsonutils import webob from manila.api.openstack import api_version_request as api_version from manila.api.v1 import limits from manila.api import views import manila.context from manila import test from manila.tests.api import fakes TEST_LIMITS = [ limits.Limit("GET", "/delayed", "^/delayed", 1, limits.PER_MINUTE), limits.Limit("POST", "*", ".*", 7, limits.PER_MINUTE), limits.Limit("POST", "/shares", "^/shares", 3, limits.PER_MINUTE), limits.Limit("PUT", "*", "", 10, limits.PER_MINUTE), limits.Limit("PUT", "/shares", "^/shares", 5, limits.PER_MINUTE), ] SHARE_REPLICAS_LIMIT_MICROVERSION = "2.58" SHARE_GROUP_QUOTA_MICROVERSION = "2.40" class BaseLimitTestSuite(test.TestCase): """Base test suite which provides relevant stubs and time abstraction.""" def setUp(self): super(BaseLimitTestSuite, self).setUp() self.time = 0.0 self.mock_object(limits.Limit, "_get_time", self._get_time) self.absolute_limits = {} def stub_get_project_quotas(context, project_id, usages=True): quotas = {} for mapping_key in ('limit', 'in_use'): for k, v in self.absolute_limits.get(mapping_key, {}).items(): if k not in quotas: quotas[k] = {} quotas[k].update({mapping_key: v}) return quotas self.mock_object(manila.quota.QUOTAS, "get_project_quotas", stub_get_project_quotas) def _get_time(self): """Return the "time" according to this test suite.""" return self.time @ddt.ddt class LimitsControllerTest(BaseLimitTestSuite): """Tests for `limits.LimitsController` class.""" def setUp(self): """Run before each test.""" super(LimitsControllerTest, self).setUp() self.controller = limits.LimitsController() def _get_index_request(self, accept_header="application/json", microversion=api_version.DEFAULT_API_VERSION): """Helper to set routing arguments.""" request = fakes.HTTPRequest.blank('/limit', version=microversion) request.accept = accept_header return request def _populate_limits(self, request): """Put limit info into a request.""" _limits = [ limits.Limit("GET", "*", ".*", 10, 60).display(), limits.Limit("POST", "*", ".*", 5, 60 * 60).display(), limits.Limit("GET", "changes-since*", "changes-since", 5, 60).display(), ] request.environ["manila.limits"] = _limits return request def test_empty_index_json(self): """Test getting empty limit details in JSON.""" request = self._get_index_request() response = self.controller.index(request) expected = { "limits": { "rate": [], "absolute": {}, }, } self.assertEqual(expected, response) @ddt.data(api_version.DEFAULT_API_VERSION, SHARE_REPLICAS_LIMIT_MICROVERSION) def test_index_json(self, microversion): """Test getting limit details in JSON.""" request = self._get_index_request(microversion=microversion) request = self._populate_limits(request) self.absolute_limits = { 'limit': { 'shares': 11, 'gigabytes': 22, 'snapshots': 33, 'snapshot_gigabytes': 44, 'share_networks': 55, }, 'in_use': { 'shares': 3, 'gigabytes': 4, 'snapshots': 5, 'snapshot_gigabytes': 6, 'share_networks': 7, }, } if microversion == SHARE_GROUP_QUOTA_MICROVERSION: self.absolute_limits['limit']['share_groups'] = 20 self.absolute_limits['limit']['share_group_snapshots'] = 20 self.absolute_limits['in_use']['share_groups'] = 3 self.absolute_limits['in_use']['share_group_snapshots'] = 3 if microversion == SHARE_REPLICAS_LIMIT_MICROVERSION: self.absolute_limits['limit']['share_replicas'] = 20 self.absolute_limits['limit']['replica_gigabytes'] = 20 self.absolute_limits['in_use']['share_replicas'] = 3 self.absolute_limits['in_use']['replica_gigabytes'] = 3 response = self.controller.index(request) expected = { "limits": { "rate": [ { "regex": ".*", "uri": "*", "limit": [ { "verb": "GET", "next-available": "1970-01-01T00:00:00Z", "unit": "MINUTE", "value": 10, "remaining": 10, }, { "verb": "POST", "next-available": "1970-01-01T00:00:00Z", "unit": "HOUR", "value": 5, "remaining": 5, }, ], }, { "regex": "changes-since", "uri": "changes-since*", "limit": [ { "verb": "GET", "next-available": "1970-01-01T00:00:00Z", "unit": "MINUTE", "value": 5, "remaining": 5, }, ], }, ], "absolute": { "totalSharesUsed": 3, "totalShareGigabytesUsed": 4, "totalShareSnapshotsUsed": 5, "totalSnapshotGigabytesUsed": 6, "totalShareNetworksUsed": 7, "maxTotalShares": 11, "maxTotalShareGigabytes": 22, "maxTotalShareSnapshots": 33, "maxTotalSnapshotGigabytes": 44, "maxTotalShareNetworks": 55, }, }, } if microversion == SHARE_GROUP_QUOTA_MICROVERSION: expected['limits']['absolute']["maxTotalShareGroups"] = 20 expected['limits']['absolute']["totalShareGroupsUsed"] = 3 expected['limits']['absolute']["maxTotalShareGroupSnapshots"] = 20 expected['limits']['absolute']["totalShareGroupSnapshots"] = 3 if microversion == SHARE_REPLICAS_LIMIT_MICROVERSION: expected['limits']['absolute']["maxTotalShareReplicas"] = 20 expected['limits']['absolute']["totalShareReplicasUsed"] = 3 expected['limits']['absolute']["maxTotalReplicaGigabytes"] = 20 expected['limits']['absolute']["totalReplicaGigabytesUsed"] = 3 # body = jsonutils.loads(response.body) self.assertEqual(expected, response) def _populate_limits_diff_regex(self, request): """Put limit info into a request.""" _limits = [ limits.Limit("GET", "*", ".*", 10, 60).display(), limits.Limit("GET", "*", "*.*", 10, 60).display(), ] request.environ["manila.limits"] = _limits return request def test_index_diff_regex(self): """Test getting limit details in JSON.""" request = self._get_index_request() request = self._populate_limits_diff_regex(request) response = self.controller.index(request) expected = { "limits": { "rate": [ { "regex": ".*", "uri": "*", "limit": [ { "verb": "GET", "next-available": "1970-01-01T00:00:00Z", "unit": "MINUTE", "value": 10, "remaining": 10, }, ], }, { "regex": "*.*", "uri": "*", "limit": [ { "verb": "GET", "next-available": "1970-01-01T00:00:00Z", "unit": "MINUTE", "value": 10, "remaining": 10, }, ], }, ], "absolute": {}, }, } self.assertEqual(expected, response) def _test_index_absolute_limits_json(self, expected): request = self._get_index_request() response = self.controller.index(request) self.assertEqual(expected, response['limits']['absolute']) def test_index_ignores_extra_absolute_limits_json(self): self.absolute_limits = { 'in_use': {'unknown_limit': 9000}, 'limit': {'unknown_limit': 9001}, } self._test_index_absolute_limits_json({}) class TestLimiter(limits.Limiter): pass class LimitMiddlewareTest(BaseLimitTestSuite): """Tests for the `limits.RateLimitingMiddleware` class.""" @webob.dec.wsgify def _empty_app(self, request): """Do-nothing WSGI app.""" pass def setUp(self): """Prepare middleware for use through fake WSGI app.""" super(LimitMiddlewareTest, self).setUp() _limits = '(GET, *, .*, 1, MINUTE)' self.app = limits.RateLimitingMiddleware(self._empty_app, _limits, "%s.TestLimiter" % self.__class__.__module__) def test_limit_class(self): """Test that middleware selected correct limiter class.""" assert isinstance(self.app._limiter, TestLimiter) def test_good_request(self): """Test successful GET request through middleware.""" request = webob.Request.blank("/") response = request.get_response(self.app) self.assertEqual(200, response.status_int) def test_limited_request_json(self): """Test a rate-limited (413) GET request through middleware.""" request = webob.Request.blank("/") response = request.get_response(self.app) self.assertEqual(200, response.status_int) request = webob.Request.blank("/") response = request.get_response(self.app) self.assertEqual(413, response.status_int) self.assertIn('Retry-After', response.headers) retry_after = int(response.headers['Retry-After']) self.assertAlmostEqual(retry_after, 60, 1) body = jsonutils.loads(response.body) expected = "Only 1 GET request(s) can be made to * every minute." value = body["overLimitFault"]["details"].strip() self.assertEqual(expected, value) class LimitTest(BaseLimitTestSuite): """Tests for the `limits.Limit` class.""" def test_GET_no_delay(self): """Test a limit handles 1 GET per second.""" limit = limits.Limit("GET", "*", ".*", 1, 1) delay = limit("GET", "/anything") self.assertIsNone(delay) self.assertEqual(0, limit.next_request) self.assertEqual(0, limit.last_request) def test_GET_delay(self): """Test two calls to 1 GET per second limit.""" limit = limits.Limit("GET", "*", ".*", 1, 1) delay = limit("GET", "/anything") self.assertIsNone(delay) delay = limit("GET", "/anything") self.assertEqual(1, delay) self.assertEqual(1, limit.next_request) self.assertEqual(0, limit.last_request) self.time += 4 delay = limit("GET", "/anything") self.assertIsNone(delay) self.assertEqual(4, limit.next_request) self.assertEqual(4, limit.last_request) class ParseLimitsTest(BaseLimitTestSuite): """Test default limits parser. Tests for the default limits parser in the in-memory `limits.Limiter` class. """ def test_invalid(self): """Test that parse_limits() handles invalid input correctly.""" self.assertRaises(ValueError, limits.Limiter.parse_limits, ';;;;;') def test_bad_rule(self): """Test that parse_limits() handles bad rules correctly.""" self.assertRaises(ValueError, limits.Limiter.parse_limits, 'GET, *, .*, 20, minute') def test_missing_arg(self): """Test that parse_limits() handles missing args correctly.""" self.assertRaises(ValueError, limits.Limiter.parse_limits, '(GET, *, .*, 20)') def test_bad_value(self): """Test that parse_limits() handles bad values correctly.""" self.assertRaises(ValueError, limits.Limiter.parse_limits, '(GET, *, .*, foo, minute)') def test_bad_unit(self): """Test that parse_limits() handles bad units correctly.""" self.assertRaises(ValueError, limits.Limiter.parse_limits, '(GET, *, .*, 20, lightyears)') def test_multiple_rules(self): """Test that parse_limits() handles multiple rules correctly.""" try: lim = limits.Limiter.parse_limits( '(get, *, .*, 20, minute);' '(PUT, /foo*, /foo.*, 10, hour);' '(POST, /bar*, /bar.*, 5, second);' '(Say, /derp*, /derp.*, 1, day)') except ValueError as e: assert False, str(e) # Make sure the number of returned limits are correct self.assertEqual(4, len(lim)) # Check all the verbs... expected = ['GET', 'PUT', 'POST', 'SAY'] self.assertEqual(expected, [t.verb for t in lim]) # ...the URIs... expected = ['*', '/foo*', '/bar*', '/derp*'] self.assertEqual(expected, [t.uri for t in lim]) # ...the regexes... expected = ['.*', '/foo.*', '/bar.*', '/derp.*'] self.assertEqual(expected, [t.regex for t in lim]) # ...the values... expected = [20, 10, 5, 1] self.assertEqual(expected, [t.value for t in lim]) # ...and the units... expected = [limits.PER_MINUTE, limits.PER_HOUR, limits.PER_SECOND, limits.PER_DAY] self.assertEqual(expected, [t.unit for t in lim]) class LimiterTest(BaseLimitTestSuite): """Tests for the in-memory `limits.Limiter` class.""" def setUp(self): """Run before each test.""" super(LimiterTest, self).setUp() userlimits = {'user:user3': ''} self.limiter = limits.Limiter(TEST_LIMITS, **userlimits) def _check(self, num, verb, url, username=None): """Check and yield results from checks.""" for x in range(num): yield self.limiter.check_for_delay(verb, url, username)[0] def _check_sum(self, num, verb, url, username=None): """Check and sum results from checks.""" results = self._check(num, verb, url, username) return sum(item for item in results if item) def test_no_delay_GET(self): """Test no delay on GET for single call. Simple test to ensure no delay on a single call for a limit verb we didn"t set. """ delay = self.limiter.check_for_delay("GET", "/anything") self.assertEqual((None, None), delay) def test_no_delay_PUT(self): """Test no delay on single call. Simple test to ensure no delay on a single call for a known limit. """ delay = self.limiter.check_for_delay("PUT", "/anything") self.assertEqual((None, None), delay) def test_delay_PUT(self): """Ensure 11th PUT will be delayed. Ensure the 11th PUT will result in a delay of 6.0 seconds until the next request will be granted. """ expected = [None] * 10 + [6.0] results = list(self._check(11, "PUT", "/anything")) self.assertEqual(expected, results) def test_delay_POST(self): """Ensure 8th POST will be delayed. Ensure the 8th POST will result in a delay of 6.0 seconds until the next request will be granced. """ expected = [None] * 7 results = list(self._check(7, "POST", "/anything")) self.assertEqual(expected, results) expected = 60.0 / 7.0 results = self._check_sum(1, "POST", "/anything") self.assertAlmostEqual(expected, results, 8) def test_delay_GET(self): """Ensure the 11th GET will result in NO delay.""" expected = [None] * 11 results = list(self._check(11, "GET", "/anything")) self.assertEqual(expected, results) def test_delay_PUT_volumes(self): """Ensure PUT limits. Ensure PUT on /volumes limits at 5 requests, and PUT elsewhere is still OK after 5 requests...but then after 11 total requests, PUT limiting kicks in. """ # First 6 requests on PUT /volumes expected = [None] * 5 + [12.0] results = list(self._check(6, "PUT", "/shares")) self.assertEqual(expected, results) # Next 5 request on PUT /anything expected = [None] * 4 + [6.0] results = list(self._check(5, "PUT", "/anything")) self.assertEqual(expected, results) def test_delay_PUT_wait(self): """Test limit handling. Ensure after hitting the limit and then waiting for the correct amount of time, the limit will be lifted. """ expected = [None] * 10 + [6.0] results = list(self._check(11, "PUT", "/anything")) self.assertEqual(expected, results) # Advance time self.time += 6.0 expected = [None, 6.0] results = list(self._check(2, "PUT", "/anything")) self.assertEqual(expected, results) def test_multiple_delays(self): """Ensure multiple requests still get a delay.""" expected = [None] * 10 + [6.0] * 10 results = list(self._check(20, "PUT", "/anything")) self.assertEqual(expected, results) self.time += 1.0 expected = [5.0] * 10 results = list(self._check(10, "PUT", "/anything")) self.assertEqual(expected, results) def test_user_limit(self): """Test user-specific limits.""" self.assertEqual([], self.limiter.levels['user3']) def test_multiple_users(self): """Tests involving multiple users.""" # User1 expected = [None] * 10 + [6.0] * 10 results = list(self._check(20, "PUT", "/anything", "user1")) self.assertEqual(expected, results) # User2 expected = [None] * 10 + [6.0] * 5 results = list(self._check(15, "PUT", "/anything", "user2")) self.assertEqual(expected, results) # User3 expected = [None] * 20 results = list(self._check(20, "PUT", "/anything", "user3")) self.assertEqual(expected, results) self.time += 1.0 # User1 again expected = [5.0] * 10 results = list(self._check(10, "PUT", "/anything", "user1")) self.assertEqual(expected, results) self.time += 1.0 # User1 again expected = [4.0] * 5 results = list(self._check(5, "PUT", "/anything", "user2")) self.assertEqual(expected, results) class WsgiLimiterTest(BaseLimitTestSuite): """Tests for `limits.WsgiLimiter` class.""" def setUp(self): """Run before each test.""" super(WsgiLimiterTest, self).setUp() self.app = limits.WsgiLimiter(TEST_LIMITS) def _request_data(self, verb, path): """Get data describing a limit request verb/path.""" return jsonutils.dumps({"verb": verb, "path": path}).encode("utf-8") def _request(self, verb, url, username=None): """Send request. Make sure that POSTing to the given url causes the given username to perform the given action. Make the internal rate limiter return delay and make sure that the WSGI app returns the correct response. """ if username: request = webob.Request.blank("/%s" % username) else: request = webob.Request.blank("/") request.method = "POST" request.body = self._request_data(verb, url) response = request.get_response(self.app) if "X-Wait-Seconds" in response.headers: self.assertEqual(403, response.status_int) return response.headers["X-Wait-Seconds"] self.assertEqual(204, response.status_int) def test_invalid_methods(self): """Only POSTs should work.""" for method in ["GET", "PUT", "DELETE", "HEAD", "OPTIONS"]: request = webob.Request.blank("/", method=method) response = request.get_response(self.app) self.assertEqual(405, response.status_int) def test_good_url(self): delay = self._request("GET", "/something") self.assertIsNone(delay) def test_escaping(self): delay = self._request("GET", "/something/jump%20up") self.assertIsNone(delay) def test_response_to_delays(self): delay = self._request("GET", "/delayed") self.assertIsNone(delay) delay = self._request("GET", "/delayed") self.assertEqual('60.00', delay) def test_response_to_delays_usernames(self): delay = self._request("GET", "/delayed", "user1") self.assertIsNone(delay) delay = self._request("GET", "/delayed", "user2") self.assertIsNone(delay) delay = self._request("GET", "/delayed", "user1") self.assertEqual('60.00', delay) delay = self._request("GET", "/delayed", "user2") self.assertEqual('60.00', delay) class FakeHttplibSocket(object): """Fake `http_client.HTTPResponse` replacement.""" def __init__(self, response_string): """Initialize new `FakeHttplibSocket`.""" self._buffer = io.BytesIO(response_string.encode("utf-8")) def makefile(self, _mode, _other=None): """Returns the socket's internal buffer.""" return self._buffer class FakeHttplibConnection(object): """Fake `http_client.HTTPConnection`.""" def __init__(self, app, host): """Initialize `FakeHttplibConnection`.""" self.app = app self.host = host def request(self, method, path, body="", headers=None): """Translate request to WSGI app. Requests made via this connection actually get translated and routed into our WSGI app, we then wait for the response and turn it back into an `http_client.HTTPResponse`. """ if not headers: headers = {} req = webob.Request.blank(path) req.method = method req.headers = headers req.host = self.host req.body = body.encode("utf-8") resp = str(req.get_response(self.app)) resp = "HTTP/1.0 %s" % resp sock = FakeHttplibSocket(resp) self.http_response = http_client.HTTPResponse(sock) self.http_response.begin() def getresponse(self): """Return our generated response from the request.""" return self.http_response def wire_HTTPConnection_to_WSGI(host, app): """Wire HTTPConnection to WSGI app. Monkeypatches HTTPConnection so that if you try to connect to host, you are instead routed straight to the given WSGI app. After calling this method, when any code calls http_client.HTTPConnection(host) the connection object will be a fake. Its requests will be sent directly to the given WSGI app rather than through a socket. Code connecting to hosts other than host will not be affected. This method may be called multiple times to map different hosts to different apps. This method returns the original HTTPConnection object, so that the caller can restore the default HTTPConnection interface (for all hosts). """ class HTTPConnectionDecorator(object): """Wrapper for HTTPConnection class Wraps the real HTTPConnection class so that when you instantiate the class you might instead get a fake instance. """ def __init__(self, wrapped): self.wrapped = wrapped def __call__(self, connection_host, *args, **kwargs): if connection_host == host: return FakeHttplibConnection(app, host) else: return self.wrapped(connection_host, *args, **kwargs) oldHTTPConnection = http_client.HTTPConnection http_client.HTTPConnection = HTTPConnectionDecorator( http_client.HTTPConnection) return oldHTTPConnection class WsgiLimiterProxyTest(BaseLimitTestSuite): """Tests for the `limits.WsgiLimiterProxy` class.""" def setUp(self): """Set up HTTP/WSGI magic. Do some nifty HTTP/WSGI magic which allows for WSGI to be called directly by something like the `http_client` library. """ super(WsgiLimiterProxyTest, self).setUp() self.app = limits.WsgiLimiter(TEST_LIMITS) self.oldHTTPConnection = ( wire_HTTPConnection_to_WSGI("169.254.0.1:80", self.app)) self.proxy = limits.WsgiLimiterProxy("169.254.0.1:80") def test_200(self): """Successful request test.""" delay = self.proxy.check_for_delay("GET", "/anything") self.assertEqual((None, None), delay) def test_403(self): """Forbidden request test.""" delay = self.proxy.check_for_delay("GET", "/delayed") self.assertEqual((None, None), delay) delay, error = self.proxy.check_for_delay("GET", "/delayed") error = error.strip() expected = ("60.00", ( "403 Forbidden\n\nOnly 1 GET request(s) can be made to /delayed " "every minute.").encode("utf-8")) self.assertEqual(expected, (delay, error)) def tearDown(self): # restore original HTTPConnection object http_client.HTTPConnection = self.oldHTTPConnection super(WsgiLimiterProxyTest, self).tearDown() class LimitsViewBuilderTest(test.TestCase): def setUp(self): super(LimitsViewBuilderTest, self).setUp() self.view_builder = views.limits.ViewBuilder() self.rate_limits = [{"URI": "*", "regex": ".*", "value": 10, "verb": "POST", "remaining": 2, "unit": "MINUTE", "resetTime": 1311272226}, {"URI": "*/shares", "regex": "^/shares", "value": 50, "verb": "POST", "remaining": 10, "unit": "DAY", "resetTime": 1311272226}] self.absolute_limits = { "limit": { "shares": 111, "gigabytes": 222, "snapshots": 333, "snapshot_gigabytes": 444, "share_networks": 555, }, "in_use": { "shares": 65, "gigabytes": 76, "snapshots": 87, "snapshot_gigabytes": 98, "share_networks": 107, }, } def test_build_limits(self): request = fakes.HTTPRequest.blank('/') tdate = "2011-07-21T18:17:06Z" expected_limits = { "limits": { "rate": [ {"uri": "*", "regex": ".*", "limit": [{"value": 10, "verb": "POST", "remaining": 2, "unit": "MINUTE", "next-available": tdate}]}, {"uri": "*/shares", "regex": "^/shares", "limit": [{"value": 50, "verb": "POST", "remaining": 10, "unit": "DAY", "next-available": tdate}]} ], "absolute": { "totalSharesUsed": 65, "totalShareGigabytesUsed": 76, "totalShareSnapshotsUsed": 87, "totalSnapshotGigabytesUsed": 98, "totalShareNetworksUsed": 107, "maxTotalShares": 111, "maxTotalShareGigabytes": 222, "maxTotalShareSnapshots": 333, "maxTotalSnapshotGigabytes": 444, "maxTotalShareNetworks": 555, } } } output = self.view_builder.build(request, self.rate_limits, self.absolute_limits) self.assertDictEqual(expected_limits, output) def test_build_limits_empty_limits(self): request = fakes.HTTPRequest.blank('/') expected_limits = {"limits": {"rate": [], "absolute": {}}} abs_limits = {} rate_limits = [] output = self.view_builder.build(request, rate_limits, abs_limits) self.assertDictEqual(expected_limits, output) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/api/v1/test_scheduler_stats.py0000664000175000017500000003146400000000000023375 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Clinton Knight. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import ddt from oslo_utils import uuidutils from webob import exc from manila.api.openstack import api_version_request as api_version from manila.api.v1 import scheduler_stats from manila import context from manila import exception from manila import policy from manila.scheduler import rpcapi from manila.share import share_types from manila import test from manila.tests.api import fakes FAKE_POOLS = [ { 'name': 'host1@backend1#pool1', 'host': 'host1', 'backend': 'backend1', 'pool': 'pool1', 'capabilities': { 'updated': None, 'total_capacity': 1024, 'free_capacity': 100, 'share_backend_name': 'pool1', 'reserved_percentage': 0, 'driver_version': '1.0.0', 'storage_protocol': 'iSCSI', 'qos': 'False', }, }, { 'name': 'host1@backend1#pool2', 'host': 'host1', 'backend': 'backend1', 'pool': 'pool2', 'capabilities': { 'updated': None, 'total_capacity': 512, 'free_capacity': 200, 'share_backend_name': 'pool2', 'reserved_percentage': 0, 'driver_version': '1.0.1', 'storage_protocol': 'iSER', 'qos': 'True', }, }, ] @ddt.ddt class SchedulerStatsControllerTestCase(test.TestCase): def setUp(self): super(SchedulerStatsControllerTestCase, self).setUp() self.flags(host='fake') self.controller = scheduler_stats.SchedulerStatsController() self.resource_name = self.controller.resource_name self.ctxt = context.RequestContext('admin', 'fake', True) self.mock_policy_check = self.mock_object( policy, 'check_policy', mock.Mock(return_value=True)) def test_pools_index(self): mock_get_pools = self.mock_object(rpcapi.SchedulerAPI, 'get_pools', mock.Mock(return_value=FAKE_POOLS)) req = fakes.HTTPRequest.blank('/v1/fake_project/scheduler_stats/pools') req.environ['manila.context'] = self.ctxt result = self.controller.pools_index(req) expected = { 'pools': [ { 'name': 'host1@backend1#pool1', 'host': 'host1', 'backend': 'backend1', 'pool': 'pool1', }, { 'name': 'host1@backend1#pool2', 'host': 'host1', 'backend': 'backend1', 'pool': 'pool2', } ] } self.assertDictEqual(result, expected) mock_get_pools.assert_called_once_with(self.ctxt, filters={}, cached=True) self.mock_policy_check.assert_called_once_with( self.ctxt, self.resource_name, 'index') @ddt.data(('index', False), ('detail', True)) @ddt.unpack def test_pools_with_share_type_disabled(self, action, detail): mock_get_pools = self.mock_object(rpcapi.SchedulerAPI, 'get_pools', mock.Mock(return_value=FAKE_POOLS)) url = '/v1/fake_project/scheduler-stats/pools/%s' % action url += '?backend=back1&host=host1&pool=pool1' req = fakes.HTTPRequest.blank(url) req.environ['manila.context'] = self.ctxt expected_filters = { 'host': 'host1', 'pool': 'pool1', 'backend': 'back1', } if detail: expected_result = {"pools": FAKE_POOLS} else: expected_result = { 'pools': [ { 'name': 'host1@backend1#pool1', 'host': 'host1', 'backend': 'backend1', 'pool': 'pool1', }, { 'name': 'host1@backend1#pool2', 'host': 'host1', 'backend': 'backend1', 'pool': 'pool2', } ] } result = self.controller._pools(req, action, False) self.assertDictEqual(result, expected_result) mock_get_pools.assert_called_once_with(self.ctxt, filters=expected_filters, cached=True) @ddt.data(('index', False, True), ('index', False, False), ('detail', True, True), ('detail', True, False)) @ddt.unpack def test_pools_with_share_type_enable(self, action, detail, uuid): mock_get_pools = self.mock_object(rpcapi.SchedulerAPI, 'get_pools', mock.Mock(return_value=FAKE_POOLS)) if uuid: share_type = uuidutils.generate_uuid() else: share_type = 'test_type' self.mock_object( share_types, 'get_share_type_by_name_or_id', mock.Mock(return_value={'extra_specs': {'snapshot_support': True}})) url = '/v1/fake_project/scheduler-stats/pools/%s' % action url += ('?backend=back1&host=host1&pool=pool1&share_type=%s' % share_type) req = fakes.HTTPRequest.blank(url) req.environ['manila.context'] = self.ctxt expected_filters = { 'host': 'host1', 'pool': 'pool1', 'backend': 'back1', 'capabilities': { 'snapshot_support': True } } if detail: expected_result = {"pools": FAKE_POOLS} else: expected_result = { 'pools': [ { 'name': 'host1@backend1#pool1', 'host': 'host1', 'backend': 'backend1', 'pool': 'pool1', }, { 'name': 'host1@backend1#pool2', 'host': 'host1', 'backend': 'backend1', 'pool': 'pool2', } ] } result = self.controller._pools(req, action, True) self.assertDictEqual(result, expected_result) mock_get_pools.assert_called_once_with(self.ctxt, filters=expected_filters, cached=True) @ddt.data('index', 'detail') def test_pools_with_share_type_not_found(self, action): url = '/v1/fake_project/scheduler-stats/pools/%s' % action url += '?backend=.%2A&host=host1&pool=pool%2A&share_type=fake_name_1' req = fakes.HTTPRequest.blank(url) self.assertRaises(exc.HTTPBadRequest, self.controller._pools, req, action, True) @ddt.data("1.0", "2.22", "2.23") def test_pools_index_with_filters(self, microversion): mock_get_pools = self.mock_object(rpcapi.SchedulerAPI, 'get_pools', mock.Mock(return_value=FAKE_POOLS)) self.mock_object( share_types, 'get_share_type_by_name', mock.Mock(return_value={'extra_specs': {'snapshot_support': True}})) url = '/v1/fake_project/scheduler-stats/pools/detail' url += '?backend=.%2A&host=host1&pool=pool%2A&share_type=test_type' req = fakes.HTTPRequest.blank(url, version=microversion) req.environ['manila.context'] = self.ctxt result = self.controller.pools_index(req) expected = { 'pools': [ { 'name': 'host1@backend1#pool1', 'host': 'host1', 'backend': 'backend1', 'pool': 'pool1', }, { 'name': 'host1@backend1#pool2', 'host': 'host1', 'backend': 'backend1', 'pool': 'pool2', } ] } expected_filters = { 'host': 'host1', 'pool': 'pool*', 'backend': '.*', 'share_type': 'test_type', } if (api_version.APIVersionRequest(microversion) >= api_version.APIVersionRequest('2.23')): expected_filters.update( {'capabilities': {'snapshot_support': True}}) expected_filters.pop('share_type', None) self.assertDictEqual(result, expected) mock_get_pools.assert_called_once_with(self.ctxt, filters=expected_filters, cached=True) self.mock_policy_check.assert_called_once_with( self.ctxt, self.resource_name, 'index') def test_get_pools_detail(self): mock_get_pools = self.mock_object(rpcapi.SchedulerAPI, 'get_pools', mock.Mock(return_value=FAKE_POOLS)) req = fakes.HTTPRequest.blank( '/v1/fake_project/scheduler_stats/pools/detail') req.environ['manila.context'] = self.ctxt result = self.controller.pools_detail(req) expected = { 'pools': [ { 'name': 'host1@backend1#pool1', 'host': 'host1', 'backend': 'backend1', 'pool': 'pool1', 'capabilities': { 'updated': None, 'total_capacity': 1024, 'free_capacity': 100, 'share_backend_name': 'pool1', 'reserved_percentage': 0, 'driver_version': '1.0.0', 'storage_protocol': 'iSCSI', 'qos': 'False', }, }, { 'name': 'host1@backend1#pool2', 'host': 'host1', 'backend': 'backend1', 'pool': 'pool2', 'capabilities': { 'updated': None, 'total_capacity': 512, 'free_capacity': 200, 'share_backend_name': 'pool2', 'reserved_percentage': 0, 'driver_version': '1.0.1', 'storage_protocol': 'iSER', 'qos': 'True', }, }, ], } self.assertDictEqual(expected, result) mock_get_pools.assert_called_once_with(self.ctxt, filters={}, cached=True) self.mock_policy_check.assert_called_once_with( self.ctxt, self.resource_name, 'detail') @ddt.data('index', 'detail') def test_pools_forbidden(self, subresource): mock_get_pools = self.mock_object( rpcapi.SchedulerAPI, 'get_pools', mock.Mock(side_effect=exception.AdminRequired( "some traceback here"))) path = '/v1/fake_project/scheduler_stats/pools' path = path + ('/%s' % subresource if subresource == 'detail' else '') req = fakes.HTTPRequest.blank(path) req.environ['manila.context'] = self.ctxt self.assertRaises(exc.HTTPForbidden, getattr(self.controller, 'pools_%s' % subresource), req) mock_get_pools.assert_called_once_with(self.ctxt, filters={}, cached=True) class SchedulerStatsTestCase(test.TestCase): def test_create_resource(self): result = scheduler_stats.create_resource() self.assertIsInstance(result.controller, scheduler_stats.SchedulerStatsController) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/api/v1/test_security_service.py0000664000175000017500000005040600000000000023565 0ustar00zuulzuul00000000000000# Copyright 2012 NetApp # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from urllib import parse import ddt import webob from manila.api.v1 import security_service from manila.common import constants from manila import db from manila import exception from manila import policy from manila import test from manila.tests.api import fakes @ddt.ddt class ShareApiTest(test.TestCase): """Share Api Test.""" def setUp(self): super(ShareApiTest, self).setUp() self.controller = security_service.SecurityServiceController() self.maxDiff = None self.ss_active_directory = { "created_at": "fake-time", "updated_at": "fake-time-2", "id": 1, "name": "fake-name", "description": "Fake Security Service Desc", "type": constants.SECURITY_SERVICES_ALLOWED_TYPES[0], "dns_ip": "1.1.1.1", "server": "fake-server", "domain": "fake-domain", "user": "fake-user", "password": "fake-password", "status": constants.STATUS_NEW, "project_id": "fake", } self.ss_ldap = { "created_at": "fake-time", "updated_at": "fake-time-2", "id": 2, "name": "ss-ldap", "description": "Fake Security Service Desc", "type": constants.SECURITY_SERVICES_ALLOWED_TYPES[1], "dns_ip": "2.2.2.2", "server": "test-server", "domain": "test-domain", "user": "test-user", "password": "test-password", "status": "active", "project_id": "fake", } self.valid_search_opts = { 'user': 'fake-user', 'server': 'fake-server', 'dns_ip': '1.1.1.1', 'domain': 'fake-domain', 'type': constants.SECURITY_SERVICES_ALLOWED_TYPES[0], } self.check_policy_patcher = mock.patch( 'manila.api.v1.security_service.policy.check_policy') self.check_policy_patcher.start() self.addCleanup(self._stop_started_patcher, self.check_policy_patcher) self.security_service_list_expected_resp = { 'security_services': [{ 'id': self.ss_active_directory['id'], 'name': self.ss_active_directory['name'], 'type': self.ss_active_directory['type'], 'status': self.ss_active_directory['status'] }, ] } self.fake_share_network_list_with_share_servers = [{ 'id': 'fake_sn_id', 'share_network_subnets': [{ 'id': 'fake_sns_id', 'share_servers': [{'id': 'fake_ss_id'}] }] }] self.fake_share_network_list_without_share_servers = [{ 'id': 'fake_sn_id', 'share_network_subnets': [{ 'id': 'fake_sns_id', 'share_servers': [] }] }] def _stop_started_patcher(self, patcher): if hasattr(patcher, 'is_local'): patcher.stop() def test_security_service_show(self): db.security_service_get = mock.Mock( return_value=self.ss_active_directory) req = fakes.HTTPRequest.blank('/security-services/1') res_dict = self.controller.show(req, '1') expected = self.ss_active_directory.copy() expected.update() self.assertEqual({'security_service': self.ss_active_directory}, res_dict) def test_security_service_show_not_found(self): db.security_service_get = mock.Mock(side_effect=exception.NotFound) req = fakes.HTTPRequest.blank('/shares/1') self.assertRaises(webob.exc.HTTPNotFound, self.controller.show, req, '1') def test_security_service_create(self): sec_service = self.ss_active_directory.copy() create_stub = mock.Mock( return_value=sec_service) self.mock_object(db, 'security_service_create', create_stub) req = fakes.HTTPRequest.blank('/security-services') res_dict = self.controller.create( req, {"security_service": sec_service}) expected = self.ss_active_directory.copy() self.assertEqual({'security_service': expected}, res_dict) def test_security_service_create_invalid_types(self): sec_service = self.ss_active_directory.copy() sec_service['type'] = 'invalid' req = fakes.HTTPRequest.blank('/security-services') self.assertRaises(exception.InvalidInput, self.controller.create, req, {"security_service": sec_service}) @ddt.data('2.76') def test_security_service_create_invalid_active_directory(self, version): sec_service = self.ss_active_directory.copy() sec_service['default_ad_site'] = 'fake_default_ad_site' req = fakes.HTTPRequest.blank('/security-services', version=version) self.assertRaises(exception.InvalidInput, self.controller.create, req, {"security_service": sec_service}) def test_create_security_service_no_body(self): body = {} req = fakes.HTTPRequest.blank('/security-services') self.assertRaises(webob.exc.HTTPUnprocessableEntity, self.controller.create, req, body) def test_security_service_delete(self): db.security_service_delete = mock.Mock() db.security_service_get = mock.Mock() db.share_network_get_all_by_security_service = mock.Mock( return_value=[]) req = fakes.HTTPRequest.blank('/security_services/1') resp = self.controller.delete(req, 1) db.security_service_delete.assert_called_once_with( req.environ['manila.context'], 1) self.assertEqual(202, resp.status_int) def test_security_service_delete_not_found(self): db.security_service_get = mock.Mock(side_effect=exception.NotFound) req = fakes.HTTPRequest.blank('/security_services/1') self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete, req, 1) def test_security_service_delete_has_share_networks(self): db.security_service_get = mock.Mock() db.share_network_get_all_by_security_service = mock.Mock( return_value=[{'share_network': 'fake_share_network'}]) req = fakes.HTTPRequest.blank('/security_services/1') self.assertRaises(webob.exc.HTTPForbidden, self.controller.delete, req, 1) def test_security_service_update_name(self): new = self.ss_active_directory.copy() updated = self.ss_active_directory.copy() updated['name'] = 'new' self.mock_object(security_service.policy, 'check_policy') db.security_service_get = mock.Mock(return_value=new) db.security_service_update = mock.Mock(return_value=updated) fake_sns = {'id': 'fake_sns_id', 'share_servers': ['fake_ss']} db.share_network_get_all_by_security_service = mock.Mock( return_value=[{ 'id': 'fake_id', 'share_network_subnets': [fake_sns] }]) body = {"security_service": {"name": "new"}} req = fakes.HTTPRequest.blank('/security_service/1') res_dict = self.controller.update(req, 1, body)['security_service'] self.assertEqual(updated['name'], res_dict['name']) db.share_network_get_all_by_security_service.assert_called_once_with( req.environ['manila.context'], 1) self.assertEqual(2, security_service.policy.check_policy.call_count) security_service.policy.check_policy.assert_has_calls([ mock.call(req.environ['manila.context'], security_service.RESOURCE_NAME, 'update', new) ]) def test_security_service_update_description(self): new = self.ss_active_directory.copy() updated = self.ss_active_directory.copy() updated['description'] = 'new' self.mock_object(security_service.policy, 'check_policy') db.security_service_get = mock.Mock(return_value=new) db.security_service_update = mock.Mock(return_value=updated) fake_sns = {'id': 'fake_sns_id', 'share_servers': ['fake_ss']} db.share_network_get_all_by_security_service = mock.Mock( return_value=[{ 'id': 'fake_id', 'share_network_subnets': [fake_sns] }]) body = {"security_service": {"description": "new"}} req = fakes.HTTPRequest.blank('/security_service/1') res_dict = self.controller.update(req, 1, body)['security_service'] self.assertEqual(updated['description'], res_dict['description']) db.share_network_get_all_by_security_service.assert_called_once_with( req.environ['manila.context'], 1) self.assertEqual(2, security_service.policy.check_policy.call_count) security_service.policy.check_policy.assert_has_calls([ mock.call(req.environ['manila.context'], security_service.RESOURCE_NAME, 'update', new) ]) @mock.patch.object(db, 'security_service_get', mock.Mock()) @mock.patch.object(db, 'share_network_get_all_by_security_service', mock.Mock()) def test_security_service_update_invalid_keys_sh_server_exists(self): self.mock_object(security_service.policy, 'check_policy') fake_sns = {'id': 'fake_sns_id', 'share_servers': ['fake_ss']} db.share_network_get_all_by_security_service.return_value = [ {'id': 'fake_id', 'share_network_subnets': [fake_sns]}, ] db.security_service_get.return_value = self.ss_active_directory.copy() body = {'security_service': {'user_id': 'new_user'}} req = fakes.HTTPRequest.blank('/security_services/1') self.assertRaises(webob.exc.HTTPForbidden, self.controller.update, req, 1, body) db.security_service_get.assert_called_once_with( req.environ['manila.context'], 1) db.share_network_get_all_by_security_service.assert_called_once_with( req.environ['manila.context'], 1) self.assertEqual(1, security_service.policy.check_policy.call_count) security_service.policy.check_policy.assert_has_calls([ mock.call(req.environ['manila.context'], security_service.RESOURCE_NAME, 'update', db.security_service_get.return_value) ]) @mock.patch.object(db, 'security_service_get', mock.Mock()) @mock.patch.object(db, 'security_service_update', mock.Mock()) @mock.patch.object(db, 'share_network_get_all_by_security_service', mock.Mock()) def test_security_service_update_valid_keys_sh_server_exists(self): self.mock_object(security_service.policy, 'check_policy') fake_sns = {'id': 'fake_sns_id', 'share_servers': ['fake_ss']} db.share_network_get_all_by_security_service.return_value = [ {'id': 'fake_id', 'share_network_subnets': [fake_sns]}, ] old = self.ss_active_directory.copy() updated = self.ss_active_directory.copy() updated['name'] = 'new name' updated['description'] = 'new description' db.security_service_get.return_value = old db.security_service_update.return_value = updated body = { 'security_service': { 'description': 'new description', 'name': 'new name', }, } req = fakes.HTTPRequest.blank('/security_services/1') res_dict = self.controller.update(req, 1, body)['security_service'] self.assertEqual(updated['description'], res_dict['description']) self.assertEqual(updated['name'], res_dict['name']) db.security_service_get.assert_called_once_with( req.environ['manila.context'], 1) db.share_network_get_all_by_security_service.assert_called_once_with( req.environ['manila.context'], 1) db.security_service_update.assert_called_once_with( req.environ['manila.context'], 1, body['security_service']) self.assertEqual(2, security_service.policy.check_policy.call_count) security_service.policy.check_policy.assert_has_calls([ mock.call(req.environ['manila.context'], security_service.RESOURCE_NAME, 'update', old) ]) @mock.patch.object(db, 'security_service_get', mock.Mock()) def test_security_service_update_has_share_servers(self): db.security_service_get = mock.Mock() self.mock_object( self.controller, '_share_servers_dependent_on_sn_exist', mock.Mock(return_value=True)) body = {"security_service": {"type": "ldap"}} req = fakes.HTTPRequest.blank('/security_services/1') self.assertRaises(webob.exc.HTTPForbidden, self.controller.update, req, 1, body) @ddt.data(True, False) def test_security_service_update_share_server_dependent_exists(self, expected): req = fakes.HTTPRequest.blank('/security_services/1') context = req.environ['manila.context'] db.security_service_get = mock.Mock() network = (self.fake_share_network_list_with_share_servers if expected else self.fake_share_network_list_without_share_servers) db.share_network_get_all_by_security_service = mock.Mock( return_value=network) result = self.controller._share_servers_dependent_on_sn_exist( context, 'fake_id') self.assertEqual(expected, result) def test_security_service_list(self): db.security_service_get_all_by_project = mock.Mock( return_value=[self.ss_active_directory.copy()]) req = fakes.HTTPRequest.blank('/security_services') res_dict = self.controller.index(req) self.assertEqual(self.security_service_list_expected_resp, res_dict) @mock.patch.object(db, 'share_network_get', mock.Mock()) def test_security_service_list_filter_by_sn(self): sn = { 'id': 'fake_sn_id', 'security_services': [self.ss_active_directory, ], } db.share_network_get.return_value = sn req = fakes.HTTPRequest.blank( '/security-services?share_network_id=fake_sn_id') res_dict = self.controller.index(req) self.assertEqual(self.security_service_list_expected_resp, res_dict) db.share_network_get.assert_called_once_with( req.environ['manila.context'], sn['id']) @mock.patch.object(db, 'security_service_get_all_by_project', mock.Mock()) @mock.patch.object(db, 'security_service_get_all', mock.Mock()) def test_security_services_list_all_tenants_policy_authorized(self): self.check_policy_patcher.stop() db.security_service_get_all.return_value = [ self.ss_active_directory, self.ss_ldap, ] req = fakes.HTTPRequest.blank( '/security-services?all_tenants=1') self.mock_object(policy, "check_policy", mock.Mock(return_value=True)) fake_context = req.environ['manila.context'] self.controller.index(req) db.security_service_get_all_by_project.assert_not_called() db.security_service_get_all.assert_called_once_with(fake_context) @mock.patch.object(db, 'security_service_get_all_by_project', mock.Mock()) @mock.patch.object(db, 'security_service_get_all', mock.Mock()) def test_security_services_list_all_tenants_policy_not_authorized(self): self.check_policy_patcher.stop() db.security_service_get_all.return_value = [ self.ss_active_directory, self.ss_ldap, ] req = fakes.HTTPRequest.blank( '/security-services?all_tenants=1') self.mock_object(policy, "check_policy", mock.Mock(side_effect=exception.NotAuthorized())) self.assertRaises(exception.NotAuthorized, self.controller.index, req) db.security_service_get_all_by_project.assert_not_called() db.security_service_get_all.assert_not_called() @mock.patch.object(db, 'security_service_get_all', mock.Mock()) def test_security_services_list_all_tenants_with_invalid_value(self): req = fakes.HTTPRequest.blank( '/security-services?all_tenants=nerd', use_admin_context=True) self.assertRaises(exception.InvalidInput, self.controller.index, req) @mock.patch.object(db, 'security_service_get_all_by_project', mock.Mock()) def test_security_services_list_all_tenants_with_value_zero(self): db.security_service_get_all_by_project.return_value = [] req = fakes.HTTPRequest.blank( '/security-services?all_tenants=0', use_admin_context=True) res_dict = self.controller.index(req) self.assertEqual({'security_services': []}, res_dict) db.security_service_get_all_by_project.assert_called_once_with( req.environ['manila.context'], req.environ['manila.context'].project_id) @mock.patch.object(db, 'security_service_get_all_by_project', mock.Mock()) def test_security_services_list_admin_context_invalid_opts(self): db.security_service_get_all_by_project.return_value = [ self.ss_active_directory, self.ss_ldap, ] req = fakes.HTTPRequest.blank( '/security-services?fake_opt=fake_value', use_admin_context=True) res_dict = self.controller.index(req) self.assertEqual({'security_services': []}, res_dict) db.security_service_get_all_by_project.assert_called_once_with( req.environ['manila.context'], req.environ['manila.context'].project_id) @mock.patch.object(db, 'security_service_get_all_by_project', mock.Mock()) def test_security_service_list_all_filter_opts_separately(self): db.security_service_get_all_by_project.return_value = [ self.ss_active_directory, self.ss_ldap, ] for opt, val in self.valid_search_opts.items(): for use_admin_context in [True, False]: req = fakes.HTTPRequest.blank( '/security-services?' + opt + '=' + val, use_admin_context=use_admin_context) res_dict = self.controller.index(req) self.assertEqual(self.security_service_list_expected_resp, res_dict) db.security_service_get_all_by_project.assert_called_with( req.environ['manila.context'], req.environ['manila.context'].project_id) @mock.patch.object(db, 'security_service_get_all_by_project', mock.Mock()) def test_security_service_list_all_filter_opts(self): db.security_service_get_all_by_project.return_value = [ self.ss_active_directory, self.ss_ldap, ] query_string = '/security-services?' + parse.urlencode(sorted( [(k, v) for (k, v) in list(self.valid_search_opts.items())])) for use_admin_context in [True, False]: req = fakes.HTTPRequest.blank(query_string, use_admin_context=use_admin_context) res_dict = self.controller.index(req) self.assertEqual(self.security_service_list_expected_resp, res_dict) db.security_service_get_all_by_project.assert_called_with( req.environ['manila.context'], req.environ['manila.context'].project_id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/api/v1/test_share_manage.py0000664000175000017500000002553400000000000022614 0ustar00zuulzuul00000000000000# Copyright 2015 Mirantis inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import ddt import webob from manila.api import common from manila.api.v1 import share_manage from manila.db import api as db_api from manila import exception from manila import policy from manila.share import api as share_api from manila.share import share_types from manila import test from manila.tests.api import fakes from manila import utils def get_fake_manage_body(export_path='/fake', service_host='fake@host#POOL', protocol='fake', share_type='fake', **kwargs): fake_share = { 'export_path': export_path, 'service_host': service_host, 'protocol': protocol, 'share_type': share_type, } fake_share.update(kwargs) return {'share': fake_share} @ddt.ddt class ShareManageTest(test.TestCase): """Share Manage Test.""" def setUp(self): super(ShareManageTest, self).setUp() self.controller = share_manage.ShareManageController() self.resource_name = self.controller.resource_name self.request = fakes.HTTPRequest.blank('/share/manage', use_admin_context=True) self.context = self.request.environ['manila.context'] self.mock_policy_check = self.mock_object( policy, 'check_policy', mock.Mock(return_value=True)) self.mock_object( common, 'validate_public_share_policy', mock.Mock(side_effect=lambda *args, **kwargs: args[1])) @ddt.data({}, {'shares': {}}, {'share': get_fake_manage_body('', None, None)}, {'share': get_fake_manage_body( export_path={'not_path': '/fake'})}) def test_share_manage_invalid_body(self, body): self.assertRaises(webob.exc.HTTPUnprocessableEntity, self.controller.create, self.request, body) self.mock_policy_check.assert_called_once_with( self.context, self.resource_name, 'manage') def test_share_manage_service_not_found(self): body = get_fake_manage_body() self.mock_object(db_api, 'service_get_by_host_and_topic', mock.Mock( side_effect=exception.ServiceNotFound(service_id='fake'))) self.assertRaises(webob.exc.HTTPNotFound, self.controller.create, self.request, body) self.mock_policy_check.assert_called_once_with( self.context, self.resource_name, 'manage') def test_share_manage_share_type_not_found(self): body = get_fake_manage_body() self.mock_object(db_api, 'service_get_by_host_and_topic', mock.Mock()) self.mock_object(utils, 'service_is_up', mock.Mock(return_value=True)) self.mock_object(db_api, 'share_type_get_by_name', mock.Mock( side_effect=exception.ShareTypeNotFoundByName( share_type_name='fake'))) self.assertRaises(webob.exc.HTTPNotFound, self.controller.create, self.request, body) self.mock_policy_check.assert_called_once_with( self.context, self.resource_name, 'manage') def _setup_manage_mocks(self, service_is_up=True): self.mock_object(db_api, 'service_get_by_host_and_topic', mock.Mock( return_value={'host': 'fake'})) self.mock_object(share_types, 'get_share_type_by_name_or_id', mock.Mock(return_value={'id': 'fake'})) self.mock_object(utils, 'service_is_up', mock.Mock( return_value=service_is_up)) @ddt.data({'service_is_up': False, 'service_host': 'fake@host#POOL'}, {'service_is_up': True, 'service_host': 'fake@host'}) def test_share_manage_bad_request(self, settings): body = get_fake_manage_body(service_host=settings.pop('service_host')) self._setup_manage_mocks(**settings) self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, self.request, body) self.mock_policy_check.assert_called_once_with( self.context, self.resource_name, 'manage') def test_share_manage_duplicate_share(self): body = get_fake_manage_body() exc = exception.InvalidShare(reason="fake") self._setup_manage_mocks() self.mock_object(share_api.API, 'manage', mock.Mock(side_effect=exc)) self.assertRaises(webob.exc.HTTPConflict, self.controller.create, self.request, body) self.mock_policy_check.assert_called_once_with( self.context, self.resource_name, 'manage') def test_share_manage_forbidden_manage(self): body = get_fake_manage_body() self._setup_manage_mocks() error = mock.Mock(side_effect=exception.PolicyNotAuthorized(action='')) self.mock_object(share_api.API, 'manage', error) self.assertRaises(webob.exc.HTTPForbidden, self.controller.create, self.request, body) self.mock_policy_check.assert_called_once_with( self.context, self.resource_name, 'manage') def test_share_manage_forbidden_validate_service_host(self): body = get_fake_manage_body() self._setup_manage_mocks() error = mock.Mock(side_effect=exception.PolicyNotAuthorized(action='')) self.mock_object(utils, 'service_is_up', mock.Mock(side_effect=error)) self.assertRaises(webob.exc.HTTPForbidden, self.controller.create, self.request, body) self.mock_policy_check.assert_called_once_with( self.context, self.resource_name, 'manage') def test_share_manage_invalid_input(self): body = get_fake_manage_body() self._setup_manage_mocks() error = mock.Mock(side_effect=exception.InvalidInput(message="", reason="fake")) self.mock_object(share_api.API, 'manage', mock.Mock(side_effect=error)) self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, self.request, body) self.mock_policy_check.assert_called_once_with( self.context, self.resource_name, 'manage') def test_share_manage_invalid_share_server(self): body = get_fake_manage_body() self._setup_manage_mocks() error = mock.Mock( side_effect=exception.InvalidShareServer(reason="") ) self.mock_object(share_api.API, 'manage', mock.Mock(side_effect=error)) self.assertRaises(webob.exc.HTTPConflict, self.controller.create, self.request, body) self.mock_policy_check.assert_called_once_with( self.context, self.resource_name, 'manage') @ddt.data( get_fake_manage_body(name='foo', description='bar'), get_fake_manage_body(display_name='foo', description='bar'), get_fake_manage_body(name='foo', display_description='bar'), get_fake_manage_body(display_name='foo', display_description='bar'), get_fake_manage_body(display_name='foo', display_description='bar', driver_options=dict(volume_id='quuz')), get_fake_manage_body(display_name='foo', display_description='bar', export_path={'path': '/fake'}), ) def test_share_manage(self, data): self._setup_manage_mocks() return_share = {'share_type_id': '', 'id': 'fake'} self.mock_object( share_api.API, 'manage', mock.Mock(return_value=return_share)) share = { 'host': data['share']['service_host'], 'export_location_path': data['share']['export_path'], 'share_proto': data['share']['protocol'].upper(), 'share_type_id': 'fake', 'display_name': 'foo', 'display_description': 'bar', } data['share']['is_public'] = 'foo' driver_options = data['share'].get('driver_options', {}) if isinstance(share['export_location_path'], dict): share['export_location_path'] = ( share['export_location_path']['path'] ) actual_result = self.controller.create(self.request, data) share_api.API.manage.assert_called_once_with( mock.ANY, share, driver_options) self.assertIsNotNone(actual_result) self.mock_policy_check.assert_called_once_with( self.context, self.resource_name, 'manage') def test_share_manage_allow_dhss_true(self): self._setup_manage_mocks() data = get_fake_manage_body(name='foo', description='bar') return_share = {'share_type_id': '', 'id': 'fake'} self.mock_object( share_api.API, 'manage', mock.Mock(return_value=return_share)) share = { 'host': data['share']['service_host'], 'export_location_path': data['share']['export_path'], 'share_proto': data['share']['protocol'].upper(), 'share_type_id': 'fake', 'display_name': 'foo', 'display_description': 'bar', 'share_server_id': 'fake' } data['share']['share_server_id'] = 'fake' driver_options = data['share'].get('driver_options', {}) self.controller._manage(self.request, data, allow_dhss_true=True) share_api.API.manage.assert_called_once_with( self.context, share, driver_options ) self.mock_policy_check.assert_called_once_with( self.context, self.resource_name, 'manage') def test_wrong_permissions(self): body = get_fake_manage_body() fake_req = fakes.HTTPRequest.blank( '/share/manage', use_admin_context=False) self.assertRaises(webob.exc.HTTPForbidden, self.controller.create, fake_req, body) self.mock_policy_check.assert_called_once_with( fake_req.environ['manila.context'], self.resource_name, 'manage') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/api/v1/test_share_metadata.py0000664000175000017500000004477100000000000023150 0ustar00zuulzuul00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ddt from oslo_config import cfg from oslo_serialization import jsonutils import webob from manila.api.v1 import share_metadata from manila.api.v1 import shares from manila.common import constants from manila import context from manila import db from manila.share import api from manila import test from manila.tests.api import fakes CONF = cfg.CONF AFFINITY_KEY = constants.AdminOnlyMetadata.AFFINITY_KEY ANTI_AFFINITY_KEY = constants.AdminOnlyMetadata.ANTI_AFFINITY_KEY @ddt.ddt class ShareMetaDataTest(test.TestCase): def setUp(self): super(ShareMetaDataTest, self).setUp() self.share_api = api.API() self.share_controller = shares.ShareController() self.controller = share_metadata.ShareMetadataController() self.ctxt = context.RequestContext('admin', 'fake', True) self.origin_metadata = { "key1": "value1", "key2": "value2", "key3": "value3", } self.share = db.share_create(self.ctxt, {}) self.share_id = self.share['id'] self.url = '/shares/%s/metadata' % self.share_id db.share_metadata_update( self.ctxt, self.share_id, self.origin_metadata, delete=False) def test_index(self): req = fakes.HTTPRequest.blank(self.url) res_dict = self.controller.index(req, self.share_id) expected = { 'metadata': { 'key1': 'value1', 'key2': 'value2', 'key3': 'value3', }, } self.assertEqual(expected, res_dict) def test_index_nonexistent_share(self): req = fakes.HTTPRequest.blank(self.url) self.assertRaises(webob.exc.HTTPNotFound, self.controller.index, req, self.url) def test_index_no_data(self): db.share_metadata_update( self.ctxt, self.share_id, {}, delete=True) req = fakes.HTTPRequest.blank(self.url) res_dict = self.controller.index(req, self.share_id) expected = {'metadata': {}} self.assertEqual(expected, res_dict) def test_show(self): req = fakes.HTTPRequest.blank(self.url + '/key2') res_dict = self.controller.show(req, self.share_id, 'key2') expected = {'meta': {'key2': 'value2'}} self.assertEqual(expected, res_dict) def test_show_nonexistent_share(self): req = fakes.HTTPRequest.blank(self.url + '/key2') self.assertRaises( webob.exc.HTTPNotFound, self.controller.show, req, "nonexistent_share", 'key2') def test_show_meta_not_found(self): req = fakes.HTTPRequest.blank(self.url + '/key6') self.assertRaises(webob.exc.HTTPNotFound, self.controller.show, req, self.share_id, 'key6') def test_delete(self): req = fakes.HTTPRequest.blank(self.url + '/key2') req.method = 'DELETE' res = self.controller.delete(req, self.share_id, 'key2') self.assertEqual(200, res.status_int) def test_delete_nonexistent_share(self): req = fakes.HTTPRequest.blank(self.url + '/key1') req.method = 'DELETE' self.assertRaises( webob.exc.HTTPNotFound, self.controller.delete, req, "nonexistent_share", 'key1') def test_delete_meta_not_found(self): req = fakes.HTTPRequest.blank(self.url + '/key6') req.method = 'DELETE' self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete, req, self.share_id, 'key6') @ddt.data((AFFINITY_KEY, '/' + AFFINITY_KEY), (ANTI_AFFINITY_KEY, '/' + ANTI_AFFINITY_KEY)) @ddt.unpack def test_delete_affinities_user(self, key, path): self.userctxt = context.RequestContext('demo', 'fake', False) req = fakes.HTTPRequest.blank(self.url + path) req.method = 'DELETE' req.content_type = "application/json" req.environ['manila.context'] = self.userctxt establish = {key: 'share1'} db.share_metadata_update( self.ctxt, self.share_id, establish, delete=False) self.assertRaises( webob.exc.HTTPForbidden, self.controller.delete, req, self.share_id, key) # test that nothing was deleted data = db.share_metadata_get(self.userctxt, self.share_id) if key in data: res_dict = {'meta': {key: data[key]}} self.assertEqual(res_dict, {'meta': establish}) @ddt.data((AFFINITY_KEY, '/' + AFFINITY_KEY), (ANTI_AFFINITY_KEY, '/' + ANTI_AFFINITY_KEY)) @ddt.unpack def test_delete_affinities_admin(self, key, path): req = fakes.HTTPRequest.blank(self.url + path) req.method = 'DELETE' req.content_type = "application/json" admin_context = req.environ['manila.context'].elevated() req.environ['manila.context'] = admin_context establish = {key: 'share1'} db.share_metadata_update( self.ctxt, self.share_id, establish, delete=False) self.controller.delete( req, self.share_id, key) # test that key was deleted data = db.share_metadata_get(self.ctxt, self.share_id) res_dict = {'meta': data} self.assertEqual(res_dict, {'meta': self.origin_metadata}) def test_create(self): req = fakes.HTTPRequest.blank('/v1/share_metadata') req.method = 'POST' req.content_type = "application/json" body = {"metadata": {"key9": "value9"}} req.body = jsonutils.dumps(body).encode("utf-8") res_dict = self.controller.create(req, self.share_id, body) expected = self.origin_metadata expected.update(body['metadata']) self.assertEqual({'metadata': expected}, res_dict) def test_create_empty_body(self): req = fakes.HTTPRequest.blank(self.url) req.method = 'POST' req.headers["content-type"] = "application/json" self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, req, self.share_id, None) def test_create_item_empty_key(self): req = fakes.HTTPRequest.blank(self.url + '/key1') req.method = 'PUT' body = {"meta": {"": "value1"}} req.body = jsonutils.dumps(body).encode("utf-8") req.headers["content-type"] = "application/json" self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, req, self.share_id, body) def test_create_item_key_too_long(self): req = fakes.HTTPRequest.blank(self.url + '/key1') req.method = 'PUT' body = {"meta": {("a" * 260): "value1"}} req.body = jsonutils.dumps(body).encode("utf-8") req.headers["content-type"] = "application/json" self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, req, self.share_id, body) def test_create_nonexistent_share(self): req = fakes.HTTPRequest.blank('/v1/share_metadata') req.method = 'POST' req.content_type = "application/json" body = {"metadata": {"key9": "value9"}} req.body = jsonutils.dumps(body).encode("utf-8") self.assertRaises( webob.exc.HTTPNotFound, self.controller.create, req, "nonexistent_share", body) def test_update_all(self): req = fakes.HTTPRequest.blank(self.url) req.method = 'PUT' req.content_type = "application/json" expected = { 'metadata': { 'key10': 'value10', 'key99': 'value99', }, } req.body = jsonutils.dumps(expected).encode("utf-8") res_dict = self.controller.update_all(req, self.share_id, expected) self.assertEqual(expected, res_dict) def test_update_all_empty_container(self): req = fakes.HTTPRequest.blank(self.url) req.method = 'PUT' req.content_type = "application/json" expected = {'metadata': {}} req.body = jsonutils.dumps(expected).encode("utf-8") res_dict = self.controller.update_all(req, self.share_id, expected) self.assertEqual(expected, res_dict) def test_update_all_malformed_container(self): req = fakes.HTTPRequest.blank(self.url) req.method = 'PUT' req.content_type = "application/json" expected = {'meta': {}} req.body = jsonutils.dumps(expected).encode("utf-8") self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update_all, req, self.share_id, expected) @ddt.data(['asdf'], {'key': None}, {None: 'value'}, {None: None}) def test_update_all_malformed_data(self, metadata): req = fakes.HTTPRequest.blank(self.url) req.method = 'PUT' req.content_type = "application/json" expected = {'metadata': metadata} req.body = jsonutils.dumps(expected).encode("utf-8") self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update_all, req, self.share_id, expected) def test_update_all_nonexistent_share(self): req = fakes.HTTPRequest.blank(self.url) req.method = 'PUT' req.content_type = "application/json" body = {'metadata': {'key10': 'value10'}} req.body = jsonutils.dumps(body).encode("utf-8") self.assertRaises(webob.exc.HTTPNotFound, self.controller.update_all, req, '100', body) @ddt.data({AFFINITY_KEY: 'foo'}, {ANTI_AFFINITY_KEY: 'foo'}, {AFFINITY_KEY: 'foo', ANTI_AFFINITY_KEY: 'bar'}, {AFFINITY_KEY: 'foo', ANTI_AFFINITY_KEY: 'bar', 'foo': 'bar'}) def test_update_all_affinities_user(self, metadata): body = {'metadata': metadata} self.userctxt = context.RequestContext('demo', 'fake', False) req = fakes.HTTPRequest.blank(self.url) req.method = 'PUT' req.content_type = "application/json" req.environ['manila.context'] = self.userctxt establish = {AFFINITY_KEY: 'share1'} db.share_metadata_update( self.ctxt, self.share_id, establish, delete=False) before_update_all = db.share_metadata_get(self.userctxt, self.share_id) body = {'metadata': metadata} req.body = jsonutils.dumps(body).encode("utf-8") self.assertRaises( webob.exc.HTTPForbidden, self.controller.update_all, req, self.share_id, body) # test nothing was deleted or updated after_update_all = db.share_metadata_get(self.userctxt, self.share_id) self.assertEqual(after_update_all, before_update_all) @ddt.data({AFFINITY_KEY: 'foo'}, {ANTI_AFFINITY_KEY: 'foo'}, {AFFINITY_KEY: 'foo', ANTI_AFFINITY_KEY: 'bar'}, {AFFINITY_KEY: 'foo', ANTI_AFFINITY_KEY: 'bar', 'foo': 'bar'}) def test_update_all_affinities_admin(self, metadata): req = fakes.HTTPRequest.blank(self.url) req.method = 'PUT' req.content_type = "application/json" admin_context = req.environ['manila.context'].elevated() req.environ['manila.context'] = admin_context establish = {AFFINITY_KEY: 'share1'} db.share_metadata_update( self.ctxt, self.share_id, establish, delete=False) body = {'metadata': metadata} req.body = jsonutils.dumps(body).encode("utf-8") res_dict = self.controller.update_all(req, self.share_id, body) expected = body self.assertEqual(res_dict, expected) def test_update_item(self): req = fakes.HTTPRequest.blank(self.url + '/key1') req.method = 'PUT' body = {"meta": {"key1": "value1"}} req.body = jsonutils.dumps(body).encode("utf-8") req.headers["content-type"] = "application/json" res_dict = self.controller.update(req, self.share_id, 'key1', body) expected = {'meta': {'key1': 'value1'}} self.assertEqual(expected, res_dict) def test_update_item_nonexistent_share(self): req = fakes.HTTPRequest.blank('/v1.1/fake/shares/asdf/metadata/key1') req.method = 'PUT' body = {"meta": {"key1": "value1"}} req.body = jsonutils.dumps(body).encode("utf-8") req.headers["content-type"] = "application/json" self.assertRaises( webob.exc.HTTPNotFound, self.controller.update, req, "nonexistent_share", 'key1', body) def test_update_item_empty_body(self): req = fakes.HTTPRequest.blank(self.url + '/key1') req.method = 'PUT' req.headers["content-type"] = "application/json" self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, req, self.share_id, 'key1', None) def test_update_item_empty_key(self): req = fakes.HTTPRequest.blank(self.url + '/key1') req.method = 'PUT' body = {"meta": {"": "value1"}} req.body = jsonutils.dumps(body).encode("utf-8") req.headers["content-type"] = "application/json" self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, req, self.share_id, '', body) def test_update_item_key_too_long(self): req = fakes.HTTPRequest.blank(self.url + '/key1') req.method = 'PUT' body = {"meta": {("a" * 260): "value1"}} req.body = jsonutils.dumps(body).encode("utf-8") req.headers["content-type"] = "application/json" self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, req, self.share_id, ("a" * 260), body) def test_update_item_value_too_long(self): req = fakes.HTTPRequest.blank(self.url + '/key1') req.method = 'PUT' body = {"meta": {"key1": ("a" * 1025)}} req.body = jsonutils.dumps(body).encode("utf-8") req.headers["content-type"] = "application/json" self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, req, self.share_id, "key1", body) def test_update_item_too_many_keys(self): req = fakes.HTTPRequest.blank(self.url + '/key1') req.method = 'PUT' body = {"meta": {"key1": "value1", "key2": "value2"}} req.body = jsonutils.dumps(body).encode("utf-8") req.headers["content-type"] = "application/json" self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, req, self.share_id, 'key1', body) def test_update_item_body_uri_mismatch(self): req = fakes.HTTPRequest.blank(self.url + '/bad') req.method = 'PUT' body = {"meta": {"key1": "value1"}} req.body = jsonutils.dumps(body).encode("utf-8") req.headers["content-type"] = "application/json" self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, req, self.share_id, 'bad', body) @ddt.data((AFFINITY_KEY, '/' + AFFINITY_KEY), (ANTI_AFFINITY_KEY, '/' + ANTI_AFFINITY_KEY)) @ddt.unpack def test_update_item_affinities_user(self, key, path): self.userctxt = context.RequestContext('demo', 'fake', False) req = fakes.HTTPRequest.blank(self.url + path) req.method = 'PUT' req.content_type = "application/json" req.environ['manila.context'] = self.userctxt establish = {AFFINITY_KEY: 'share1'} db.share_metadata_update( self.ctxt, self.share_id, establish, delete=False) body = {'meta': {key: 'share1,share2'}} req.body = jsonutils.dumps(body).encode("utf-8") self.assertRaises( webob.exc.HTTPForbidden, self.controller.update, req, self.share_id, key, body) # test that nothing was updated data = db.share_metadata_get(self.ctxt, self.share_id) if AFFINITY_KEY in data: res_dict = {'meta': {AFFINITY_KEY: data[AFFINITY_KEY]}} self.assertEqual(res_dict, {'meta': establish}) @ddt.data((AFFINITY_KEY, '/' + AFFINITY_KEY), (ANTI_AFFINITY_KEY, '/' + ANTI_AFFINITY_KEY)) @ddt.unpack def test_update_item_affinities_admin(self, key, path): req = fakes.HTTPRequest.blank(self.url + path) req.method = 'PUT' req.content_type = "application/json" admin_context = req.environ['manila.context'].elevated() req.environ['manila.context'] = admin_context establish = {AFFINITY_KEY: 'share1'} db.share_metadata_update( self.ctxt, self.share_id, establish, delete=False) body = {'meta': {key: 'share1,share2'}} req.body = jsonutils.dumps(body).encode("utf-8") res_dict = self.controller.update( req, self.share_id, key, body) expected = body self.assertEqual(res_dict, expected) def test_invalid_metadata_items_on_create(self): req = fakes.HTTPRequest.blank(self.url) req.method = 'POST' req.headers["content-type"] = "application/json" # test for long key data = {"metadata": {"a" * 260: "value1"}} req.body = jsonutils.dumps(data).encode("utf-8") self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, req, self.share_id, data) # test for long value data = {"metadata": {"key": "v" * 1025}} req.body = jsonutils.dumps(data).encode("utf-8") self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, req, self.share_id, data) # test for empty key. data = {"metadata": {"": "value1"}} req.body = jsonutils.dumps(data).encode("utf-8") self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, req, self.share_id, data) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/api/v1/test_share_servers.py0000664000175000017500000005634500000000000023061 0ustar00zuulzuul00000000000000# Copyright 2014 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import copy import ddt from webob import exc from manila.api.openstack import api_version_request as api_version from manila.api.v1 import share_servers from manila.common import constants from manila import context from manila.db import api as db_api from manila import exception from manila import policy from manila import test from manila.tests.api import fakes fake_share_server_list = { 'share_servers': [ { 'status': constants.STATUS_ACTIVE, 'updated_at': None, 'host': 'fake_host', 'share_network_name': 'fake_sn_name', 'share_network_id': 'fake_sn_id', 'share_network_subnet_ids': ['fake_sns_id'], 'project_id': 'fake_project_id', 'id': 'fake_server_id', 'is_auto_deletable': False, 'task_state': None, 'source_share_server_id': None, 'identifier': 'fake_id', 'security_service_update_support': False, 'network_allocation_update_support': False, 'encryption_key_ref': None }, { 'status': constants.STATUS_ERROR, 'updated_at': None, 'host': 'fake_host_2', 'share_network_name': 'fake_sn_id_2', 'share_network_id': 'fake_sn_id_2', 'share_network_subnet_ids': ['fake_sns_id_2'], 'project_id': 'fake_project_id_2', 'id': 'fake_server_id_2', 'is_auto_deletable': True, 'task_state': None, 'source_share_server_id': None, 'identifier': 'fake_id_2', 'security_service_update_support': False, 'network_allocation_update_support': False, 'encryption_key_ref': None }, ] } fake_share_network_get_list = { 'share_networks': [ { 'name': 'fake_sn_name', 'id': 'fake_sn_id', 'project_id': 'fake_project_id', }, { 'name': None, 'id': 'fake_sn_id_2', 'project_id': 'fake_project_id_2', } ] } fake_share_server_get_result = { 'share_server': { 'status': constants.STATUS_ACTIVE, 'created_at': None, 'updated_at': None, 'host': 'fake_host', 'share_network_name': 'fake_sn_name', 'share_network_id': 'fake_sn_id', 'share_network_subnet_ids': ['fake_sns_id'], 'project_id': 'fake_project_id', 'id': 'fake_server_id', 'backend_details': { 'fake_key_1': 'fake_value_1', 'fake_key_2': 'fake_value_2', }, 'is_auto_deletable': False, 'task_state': None, 'source_share_server_id': None, 'identifier': 'fake_id', 'security_service_update_support': False, 'network_allocation_update_support': False, 'encryption_key_ref': None } } share_server_backend_details = { 'fake_key_1': 'fake_value_1', 'fake_key_2': 'fake_value_2', } fake_share_server_backend_details_get_result = { 'details': share_server_backend_details } CONTEXT = context.get_admin_context() class FakeShareServer(object): def __init__(self, *args, **kwargs): super(FakeShareServer, self).__init__() self.id = kwargs.get('id', 'fake_server_id') if 'created_at' in kwargs: self.created_at = kwargs.get('created_at', None) self.updated_at = kwargs.get('updated_at', None) self.host = kwargs.get('host', 'fake_host') self.share_network_subnets = kwargs.get('share_network_subnets', [{ 'id': 'fake_sns_id', 'share_network_id': 'fake_sn_id'}]) self.share_network_subnet_ids = kwargs.get( 'share_network_subnet_ids', [sn['id'] for sn in self.share_network_subnets]) self.status = kwargs.get('status', constants.STATUS_ACTIVE) self.project_id = 'fake_project_id' self.identifier = kwargs.get('identifier', 'fake_id') self.is_auto_deletable = kwargs.get('is_auto_deletable', False) self.task_state = kwargs.get('task_state') self.source_share_server_id = kwargs.get('source_share_server_id') self.backend_details = share_server_backend_details self.security_service_update_support = kwargs.get( 'security_service_update_support', False) self.network_allocation_update_support = kwargs.get( 'network_allocation_update_support', False) self.share_network_id = kwargs.get('share_network_id', 'fake_sn_id') self.encryption_key_ref = kwargs.get('encryption_key_ref', None) def __getitem__(self, item): return getattr(self, item) def fake_share_server_get_all(): fake_share_servers = [ FakeShareServer(), FakeShareServer(id='fake_server_id_2', host='fake_host_2', share_network_subnets=[{ 'id': 'fake_sns_id_2', 'share_network_id': 'fake_sn_id_2', }], share_network_id='fake_sn_id_2', identifier='fake_id_2', task_state=None, is_auto_deletable=True, status=constants.STATUS_ERROR, security_service_update_support=False, network_allocation_update_support=False), ] return fake_share_servers def fake_share_server_get(): return FakeShareServer(created_at=None) class FakeRequestAdmin(object): environ = {"manila.context": CONTEXT} GET = {} class FakeRequestWithHost(FakeRequestAdmin): GET = {'host': fake_share_server_list['share_servers'][0]['host']} class FakeRequestWithStatus(FakeRequestAdmin): GET = {'status': constants.STATUS_ERROR} class FakeRequestWithProjectId(FakeRequestAdmin): GET = {'project_id': fake_share_server_get_all()[0].project_id} class FakeRequestWithShareNetworkSubnetId(FakeRequestAdmin): GET = { 'share_network_subnet_id': fake_share_server_get_all()[0].share_network_subnet_ids, } class FakeRequestWithFakeFilter(FakeRequestAdmin): GET = {'fake_key': 'fake_value'} @ddt.ddt class ShareServerAPITest(test.TestCase): def setUp(self): super(ShareServerAPITest, self).setUp() self.controller = share_servers.ShareServerController() self.resource_name = self.controller.resource_name self.mock_object(policy, 'check_policy', mock.Mock(return_value=True)) self.mock_object(db_api, 'share_server_get_all', mock.Mock(return_value=fake_share_server_get_all())) def _prepare_request(self, url, use_admin_context, version=api_version._MAX_API_VERSION): request = fakes.HTTPRequest.blank(url, use_admin_context=use_admin_context, version=version) ctxt = request.environ['manila.context'] return request, ctxt def test_index_no_filters(self): request, ctxt = self._prepare_request(url='/v2/share-servers/', use_admin_context=True) self.mock_object(db_api, 'share_network_get', mock.Mock( side_effect=[fake_share_network_get_list['share_networks'][0], fake_share_network_get_list['share_networks'][1]])) result = self.controller.index(request) policy.check_policy.assert_called_once_with( ctxt, self.resource_name, 'index') db_api.share_server_get_all.assert_called_once_with(ctxt) self.assertEqual(fake_share_server_list, result) def test_index_host_filter(self): request, ctxt = self._prepare_request( url='/index?host=%s' % fake_share_server_list['share_servers'][0]['host'], use_admin_context=True) self.mock_object(db_api, 'share_network_get', mock.Mock( side_effect=[fake_share_network_get_list['share_networks'][0], fake_share_network_get_list['share_networks'][1]])) result = self.controller.index(request) policy.check_policy.assert_called_once_with( ctxt, self.resource_name, 'index') db_api.share_server_get_all.assert_called_once_with(ctxt) self.assertEqual([fake_share_server_list['share_servers'][0]], result['share_servers']) def test_index_status_filter(self): request, ctxt = self._prepare_request(url='/index?status=%s' % constants.STATUS_ERROR, use_admin_context=True) self.mock_object(db_api, 'share_network_get', mock.Mock( side_effect=[fake_share_network_get_list['share_networks'][0], fake_share_network_get_list['share_networks'][1]])) result = self.controller.index(request) policy.check_policy.assert_called_once_with( ctxt, self.resource_name, 'index') db_api.share_server_get_all.assert_called_once_with(ctxt) self.assertEqual([fake_share_server_list['share_servers'][1]], result['share_servers']) def test_index_project_id_filter(self): request, ctxt = self._prepare_request( url='/index?project_id=%s' % fake_share_server_get_all()[0].project_id, use_admin_context=True) self.mock_object(db_api, 'share_network_get', mock.Mock( side_effect=[fake_share_network_get_list['share_networks'][0], fake_share_network_get_list['share_networks'][1]])) result = self.controller.index(request) policy.check_policy.assert_called_once_with( ctxt, self.resource_name, 'index') db_api.share_server_get_all.assert_called_once_with(ctxt) self.assertEqual([fake_share_server_list['share_servers'][0]], result['share_servers']) def test_index_share_network_filter_by_name(self): request, ctxt = self._prepare_request( url='/index?host=%s' % fake_share_server_list['share_servers'][0]['host'], use_admin_context=True) self.mock_object(db_api, 'share_network_get', mock.Mock( side_effect=[fake_share_network_get_list['share_networks'][0], fake_share_network_get_list['share_networks'][1]])) result = self.controller.index(request) policy.check_policy.assert_called_once_with( ctxt, self.resource_name, 'index') db_api.share_server_get_all.assert_called_once_with(ctxt) self.assertEqual([fake_share_server_list['share_servers'][0]], result['share_servers']) def test_index_share_network_filter_by_id(self): request, ctxt = self._prepare_request( url='/index?share_network=%s' % fake_share_network_get_list['share_networks'][0]['id'], use_admin_context=True) self.mock_object(db_api, 'share_network_get', mock.Mock( side_effect=[fake_share_network_get_list['share_networks'][0], fake_share_network_get_list['share_networks'][1]])) result = self.controller.index(request) policy.check_policy.assert_called_once_with( ctxt, self.resource_name, 'index') db_api.share_server_get_all.assert_called_once_with(ctxt) self.assertEqual([fake_share_server_list['share_servers'][0]], result['share_servers']) def test_index_fake_filter(self): request, ctxt = self._prepare_request(url='/index?fake_key=fake_value', use_admin_context=True) self.mock_object(db_api, 'share_network_get', mock.Mock( side_effect=[fake_share_network_get_list['share_networks'][0], fake_share_network_get_list['share_networks'][1]])) result = self.controller.index(request) policy.check_policy.assert_called_once_with( ctxt, self.resource_name, 'index') db_api.share_server_get_all.assert_called_once_with(ctxt) self.assertEqual(0, len(result['share_servers'])) def test_index_share_network_not_found(self): request, ctxt = self._prepare_request( url='/index?identifier=%s' % fake_share_server_get_all()[0].identifier, use_admin_context=True) self.mock_object( db_api, 'share_network_get', mock.Mock(side_effect=exception.ShareNetworkNotFound( share_network_id='fake'))) result = self.controller.index(request) db_api.share_server_get_all.assert_called_once_with(ctxt) policy.check_policy.assert_called_once_with( ctxt, self.resource_name, 'index') exp_share_server = fake_share_server_list['share_servers'][0].copy() exp_share_server['project_id'] = '' exp_share_server['share_network_name'] = '' self.assertEqual([exp_share_server], result['share_servers']) def test_index_share_network_not_found_filter_project(self): request, ctxt = self._prepare_request( url='/index?project_id=%s' % fake_share_server_get_all()[0].project_id, use_admin_context=True) self.mock_object( db_api, 'share_network_get', mock.Mock(side_effect=exception.ShareNetworkNotFound( share_network_id='fake'))) result = self.controller.index(request) db_api.share_server_get_all.assert_called_once_with(ctxt) policy.check_policy.assert_called_once_with( ctxt, self.resource_name, 'index') self.assertEqual(0, len(result['share_servers'])) @ddt.data({'version': '2.70', 'share_network_name': ''}, {'version': '2.90', 'share_network_name': 'fake_sn_name'}, {'version': '2.70', 'share_network_name': 'fake_sn_name'}, {'version': '2.68', 'share_network_name': 'fake_sn_name'}) @ddt.unpack def test_show(self, version, share_network_name): self.mock_object(db_api, 'share_server_get', mock.Mock(return_value=fake_share_server_get())) request, ctxt = self._prepare_request('/show', use_admin_context=True, version=version) share_network = copy.deepcopy( fake_share_network_get_list['share_networks'][0]) share_server = copy.deepcopy( fake_share_server_get_result['share_server']) if version == '2.68': share_server['share_network_subnet_id'] = \ share_server['share_network_subnet_ids'][0] share_server.pop('share_network_subnet_ids') share_server.pop('network_allocation_update_support') share_network['name'] = share_network_name if share_network['name']: share_server['share_network_name'] = share_network['name'] else: share_server['share_network_name'] = share_network['id'] if version < '2.90': share_server.pop('encryption_key_ref') self.mock_object(db_api, 'share_network_get', mock.Mock( return_value=share_network)) result = self.controller.show( request, share_server['id']) policy.check_policy.assert_called_once_with( ctxt, self.resource_name, 'show') db_api.share_server_get.assert_called_once_with( ctxt, share_server['id']) self.assertEqual(share_server, result['share_server']) @ddt.data( {'share_server_side_effect': exception.ShareServerNotFound( share_server_id="foo"), 'share_net_side_effect': mock.Mock()}, {'share_server_side_effect': mock.Mock( return_value=fake_share_server_get()), 'share_net_side_effect': exception.ShareNetworkNotFound( share_network_id="foo")}) @ddt.unpack def test_show_server_not_found(self, share_server_side_effect, share_net_side_effect): self.mock_object(db_api, 'share_server_get', mock.Mock(side_effect=share_server_side_effect)) request, ctxt = self._prepare_request('/show', use_admin_context=True) self.mock_object(db_api, 'share_network_get', mock.Mock( side_effect=share_net_side_effect)) self.assertRaises( exc.HTTPNotFound, self.controller.show, request, fake_share_server_get_result['share_server']['id']) policy.check_policy.assert_called_once_with( ctxt, self.resource_name, 'show') db_api.share_server_get.assert_called_once_with( ctxt, fake_share_server_get_result['share_server']['id']) if isinstance(share_net_side_effect, exception.ShareNetworkNotFound): exp_share_net_id = (fake_share_server_get() .share_network_subnets[0]['share_network_id']) db_api.share_network_get.assert_called_once_with( ctxt, exp_share_net_id) def test_details(self): self.mock_object(db_api, 'share_server_get', mock.Mock(return_value=fake_share_server_get())) result = self.controller.details( FakeRequestAdmin, fake_share_server_get_result['share_server']['id']) policy.check_policy.assert_called_once_with( CONTEXT, self.resource_name, 'details') db_api.share_server_get.assert_called_once_with( CONTEXT, fake_share_server_get_result['share_server']['id']) self.assertEqual(fake_share_server_backend_details_get_result, result) def test_details_share_server_not_found(self): share_server_id = 'fake' self.mock_object( db_api, 'share_server_get', mock.Mock(side_effect=exception.ShareServerNotFound( share_server_id=share_server_id))) self.assertRaises(exc.HTTPNotFound, self.controller.details, FakeRequestAdmin, share_server_id) policy.check_policy.assert_called_once_with( CONTEXT, self.resource_name, 'details') db_api.share_server_get.assert_called_once_with( CONTEXT, share_server_id) def test_delete_active_server(self): share_server = FakeShareServer(status=constants.STATUS_ACTIVE) self.mock_object(db_api, 'share_server_get', mock.Mock(return_value=share_server)) self.mock_object(self.controller.share_api, 'delete_share_server') self.controller.delete( FakeRequestAdmin, fake_share_server_get_result['share_server']['id']) policy.check_policy.assert_called_once_with( CONTEXT, self.resource_name, 'delete') db_api.share_server_get.assert_called_once_with( CONTEXT, fake_share_server_get_result['share_server']['id']) self.controller.share_api.delete_share_server.assert_called_once_with( CONTEXT, share_server) def test_delete_error_server(self): share_server = FakeShareServer(status=constants.STATUS_ERROR) self.mock_object(db_api, 'share_server_get', mock.Mock(return_value=share_server)) self.mock_object(self.controller.share_api, 'delete_share_server') self.controller.delete( FakeRequestAdmin, fake_share_server_get_result['share_server']['id']) policy.check_policy.assert_called_once_with( CONTEXT, self.resource_name, 'delete') db_api.share_server_get.assert_called_once_with( CONTEXT, fake_share_server_get_result['share_server']['id']) self.controller.share_api.delete_share_server.assert_called_once_with( CONTEXT, share_server) def test_delete_used_server(self): share_server_id = fake_share_server_get_result['share_server']['id'] def raise_not_share_server_in_use(*args, **kwargs): raise exception.ShareServerInUse(share_server_id=share_server_id) share_server = fake_share_server_get() self.mock_object(db_api, 'share_server_get', mock.Mock(return_value=share_server)) self.mock_object(self.controller.share_api, 'delete_share_server', mock.Mock(side_effect=raise_not_share_server_in_use)) self.assertRaises(exc.HTTPConflict, self.controller.delete, FakeRequestAdmin, share_server_id) db_api.share_server_get.assert_called_once_with(CONTEXT, share_server_id) self.controller.share_api.delete_share_server.assert_called_once_with( CONTEXT, share_server) def test_delete_not_found(self): share_server_id = fake_share_server_get_result['share_server']['id'] def raise_not_found(*args, **kwargs): raise exception.ShareServerNotFound( share_server_id=share_server_id) self.mock_object(db_api, 'share_server_get', mock.Mock(side_effect=raise_not_found)) self.assertRaises(exc.HTTPNotFound, self.controller.delete, FakeRequestAdmin, share_server_id) db_api.share_server_get.assert_called_once_with( CONTEXT, share_server_id) policy.check_policy.assert_called_once_with( CONTEXT, self.resource_name, 'delete') def test_delete_creating_server(self): share_server = FakeShareServer(status=constants.STATUS_CREATING) self.mock_object(db_api, 'share_server_get', mock.Mock(return_value=share_server)) self.assertRaises(exc.HTTPForbidden, self.controller.delete, FakeRequestAdmin, share_server['id']) policy.check_policy.assert_called_once_with( CONTEXT, self.resource_name, 'delete') def test_delete_deleting_server(self): share_server = FakeShareServer(status=constants.STATUS_DELETING) self.mock_object(db_api, 'share_server_get', mock.Mock(return_value=share_server)) self.assertRaises(exc.HTTPForbidden, self.controller.delete, FakeRequestAdmin, share_server['id']) policy.check_policy.assert_called_once_with( CONTEXT, self.resource_name, 'delete') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/api/v1/test_share_snapshots.py0000664000175000017500000004422500000000000023404 0ustar00zuulzuul00000000000000# Copyright 2012 NetApp # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import ddt from oslo_serialization import jsonutils import webob from manila.api.v1 import share_snapshots from manila.common import constants from manila import context from manila import db from manila import exception from manila.share import api as share_api from manila import test from manila.tests.api.contrib import stubs from manila.tests.api import fakes from manila.tests import db_utils from manila.tests import fake_share @ddt.ddt class ShareSnapshotAPITest(test.TestCase): """Share Snapshot API Test.""" def setUp(self): super(ShareSnapshotAPITest, self).setUp() self.controller = share_snapshots.ShareSnapshotsController() self.mock_object(share_api.API, 'get', stubs.stub_share_get) self.mock_object(share_api.API, 'get_all_snapshots', stubs.stub_snapshot_get_all_by_project) self.mock_object(share_api.API, 'get_snapshot', stubs.stub_snapshot_get) self.mock_object(share_api.API, 'snapshot_update', stubs.stub_snapshot_update) self.snp_example = { 'share_id': 100, 'size': 12, 'force': False, 'display_name': 'updated_share_name', 'display_description': 'updated_share_description', } self.maxDiff = None def test_snapshot_show_status_none(self): return_snapshot = { 'share_id': 100, 'name': 'fake_share_name', 'description': 'fake_share_description', 'status': None, } self.mock_object(share_api.API, 'get_snapshot', mock.Mock(return_value=return_snapshot)) req = fakes.HTTPRequest.blank('/fake/snapshots/200') self.assertRaises(webob.exc.HTTPNotFound, self.controller.show, req, '200') @ddt.data('true', 'True', ' True', '1') def test_snapshot_create(self, snapshot_support): self.mock_object(share_api.API, 'create_snapshot', stubs.stub_snapshot_create) body = { 'snapshot': { 'share_id': 'fakeshareid', 'force': False, 'name': 'displaysnapname', 'description': 'displaysnapdesc', } } req = fakes.HTTPRequest.blank('/fake/snapshots') res_dict = self.controller.create(req, body) expected = fake_share.expected_snapshot(id=200) self.assertEqual(expected, res_dict) @ddt.data( {'name': 'name1', 'description': 'x' * 256}, {'name': 'x' * 256, 'description': 'description1'}, ) @ddt.unpack def test_snapshot_create_invalid_input(self, name, description): self.mock_object(share_api.API, 'create_snapshot') self.mock_object( share_api.API, 'get', mock.Mock(return_value={'snapshot_support': True, 'is_soft_deleted': False})) body = { 'snapshot': { 'share_id': 200, 'force': False, 'name': name, 'description': description, } } req = fakes.HTTPRequest.blank('/fake/snapshots') self.assertRaises( exception.InvalidInput, self.controller.create, req, body) @ddt.data(0, False) def test_snapshot_create_no_support(self, snapshot_support): self.mock_object(share_api.API, 'create_snapshot') self.mock_object( share_api.API, 'get', mock.Mock(return_value={'snapshot_support': snapshot_support})) body = { 'snapshot': { 'share_id': 100, 'force': False, 'name': 'fake_share_name', 'description': 'fake_share_description', } } req = fakes.HTTPRequest.blank('/fake/snapshots') self.assertRaises( webob.exc.HTTPUnprocessableEntity, self.controller.create, req, body) self.assertFalse(share_api.API.create_snapshot.called) def test_snapshot_create_in_recycle_bin(self): self.mock_object(share_api.API, 'create_snapshot') self.mock_object( share_api.API, 'get', mock.Mock(return_value={'snapshot_support': True, 'is_soft_deleted': True})) body = { 'snapshot': { 'share_id': 200, 'force': False, 'name': 'fake_share_name', 'description': 'fake_share_description', } } req = fakes.HTTPRequest.blank('/fake/snapshots') self.assertRaises( webob.exc.HTTPForbidden, self.controller.create, req, body) self.assertFalse(share_api.API.create_snapshot.called) def test_snapshot_create_no_body(self): body = {} req = fakes.HTTPRequest.blank('/fake/snapshots') self.assertRaises(webob.exc.HTTPUnprocessableEntity, self.controller.create, req, body) def test_snapshot_delete(self): self.mock_object(share_api.API, 'delete_snapshot', stubs.stub_snapshot_delete) req = fakes.HTTPRequest.blank('/fake/snapshots/200') resp = self.controller.delete(req, 200) self.assertEqual(202, resp.status_int) def test_snapshot_delete_nofound(self): self.mock_object(share_api.API, 'get_snapshot', stubs.stub_snapshot_get_notfound) req = fakes.HTTPRequest.blank('/fake/snapshots/200') self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete, req, 200) def test_snapshot_show(self): req = fakes.HTTPRequest.blank('/fake/snapshots/200') res_dict = self.controller.show(req, 200) expected = fake_share.expected_snapshot(id=200) self.assertEqual(expected, res_dict) def test_snapshot_show_nofound(self): self.mock_object(share_api.API, 'get_snapshot', stubs.stub_snapshot_get_notfound) req = fakes.HTTPRequest.blank('/fake/snapshots/200') self.assertRaises(webob.exc.HTTPNotFound, self.controller.show, req, '200') def test_snapshot_list_summary(self): self.mock_object(share_api.API, 'get_all_snapshots', stubs.stub_snapshot_get_all_by_project) req = fakes.HTTPRequest.blank('/fake/snapshots') res_dict = self.controller.index(req) expected = { 'snapshots': [ { 'name': 'displaysnapname', 'id': 2, 'links': [ { 'href': 'http://localhost/share/v1/fake/' 'snapshots/2', 'rel': 'self' }, { 'href': 'http://localhost/share/fake/snapshots/2', 'rel': 'bookmark' } ], } ] } self.assertEqual(expected, res_dict) def _snapshot_list_summary_with_search_opts(self, use_admin_context): search_opts = fake_share.search_opts() # fake_key should be filtered for non-admin url = '/fake/snapshots?fake_key=fake_value' for k, v in search_opts.items(): url = url + '&' + k + '=' + v req = fakes.HTTPRequest.blank(url, use_admin_context=use_admin_context) db_snapshots = [ {'id': 'id1', 'display_name': 'n1', 'status': 'fake_status', 'share_id': 'fake_share_id'}, {'id': 'id2', 'display_name': 'n2', 'status': 'fake_status', 'share_id': 'fake_share_id'}, {'id': 'id3', 'display_name': 'n3', 'status': 'fake_status', 'share_id': 'fake_share_id'}, ] snapshots = [db_snapshots[1]] self.mock_object(share_api.API, 'get_all_snapshots', mock.Mock(return_value=snapshots)) result = self.controller.index(req) search_opts_expected = { 'display_name': search_opts['name'], 'status': search_opts['status'], 'share_id': search_opts['share_id'], } if use_admin_context: search_opts_expected.update({'fake_key': 'fake_value'}) share_api.API.get_all_snapshots.assert_called_once_with( req.environ['manila.context'], limit=int(search_opts['limit']), offset=int(search_opts['offset']), sort_key=search_opts['sort_key'], sort_dir=search_opts['sort_dir'], search_opts=search_opts_expected, ) self.assertEqual(1, len(result['snapshots'])) self.assertEqual(snapshots[0]['id'], result['snapshots'][0]['id']) self.assertEqual( snapshots[0]['display_name'], result['snapshots'][0]['name']) def test_snapshot_list_summary_with_search_opts_by_non_admin(self): self._snapshot_list_summary_with_search_opts(use_admin_context=False) def test_snapshot_list_summary_with_search_opts_by_admin(self): self._snapshot_list_summary_with_search_opts(use_admin_context=True) def _snapshot_list_detail_with_search_opts(self, use_admin_context): search_opts = fake_share.search_opts() # fake_key should be filtered for non-admin url = '/fake/shares/detail?fake_key=fake_value' for k, v in search_opts.items(): url = url + '&' + k + '=' + v req = fakes.HTTPRequest.blank(url, use_admin_context=use_admin_context) db_snapshots = [ { 'id': 'id1', 'display_name': 'n1', 'status': 'fake_status_other', 'aggregate_status': 'fake_status', 'share_id': 'fake_share_id', }, { 'id': 'id2', 'display_name': 'n2', 'status': 'fake_status', 'aggregate_status': 'fake_status', 'share_id': 'fake_share_id', }, { 'id': 'id3', 'display_name': 'n3', 'status': 'fake_status_other', 'aggregate_status': 'fake_status', 'share_id': 'fake_share_id', }, ] snapshots = [db_snapshots[1]] self.mock_object(share_api.API, 'get_all_snapshots', mock.Mock(return_value=snapshots)) result = self.controller.detail(req) search_opts_expected = { 'display_name': search_opts['name'], 'status': search_opts['status'], 'share_id': search_opts['share_id'], } if use_admin_context: search_opts_expected.update({'fake_key': 'fake_value'}) share_api.API.get_all_snapshots.assert_called_once_with( req.environ['manila.context'], limit=int(search_opts['limit']), offset=int(search_opts['offset']), sort_key=search_opts['sort_key'], sort_dir=search_opts['sort_dir'], search_opts=search_opts_expected, ) self.assertEqual(1, len(result['snapshots'])) self.assertEqual(snapshots[0]['id'], result['snapshots'][0]['id']) self.assertEqual( snapshots[0]['display_name'], result['snapshots'][0]['name']) self.assertEqual( snapshots[0]['status'], result['snapshots'][0]['status']) self.assertEqual( snapshots[0]['share_id'], result['snapshots'][0]['share_id']) def test_snapshot_list_detail_with_search_opts_by_non_admin(self): self._snapshot_list_detail_with_search_opts(use_admin_context=False) def test_snapshot_list_detail_with_search_opts_by_admin(self): self._snapshot_list_detail_with_search_opts(use_admin_context=True) def test_snapshot_list_detail(self): env = {'QUERY_STRING': 'name=Share+Test+Name'} req = fakes.HTTPRequest.blank('/fake/shares/detail', environ=env) res_dict = self.controller.detail(req) expected_s = fake_share.expected_snapshot(id=2) expected = {'snapshots': [expected_s['snapshot']]} self.assertEqual(expected, res_dict) def test_snapshot_list_status_none(self): snapshots = [ { 'id': 3, 'share_id': 'fakeshareid', 'size': 1, 'status': None, 'name': 'displaysnapname', 'description': 'displaysnapdesc', } ] self.mock_object(share_api.API, 'get_all_snapshots', mock.Mock(return_value=snapshots)) req = fakes.HTTPRequest.blank('/fake/snapshots') result = self.controller.index(req) self.assertEqual(1, len(result['snapshots'])) self.assertEqual(snapshots[0]['id'], result['snapshots'][0]['id']) def test_snapshot_updates_description(self): snp = self.snp_example body = {"snapshot": snp} req = fakes.HTTPRequest.blank('/fake/snapshot/1') res_dict = self.controller.update(req, 1, body) self.assertEqual(snp["display_name"], res_dict['snapshot']["name"]) def test_snapshot_updates_display_descr(self): snp = self.snp_example body = {"snapshot": snp} req = fakes.HTTPRequest.blank('/fake/snapshot/1') res_dict = self.controller.update(req, 1, body) self.assertEqual(snp["display_description"], res_dict['snapshot']["description"]) def test_share_not_updates_size(self): snp = self.snp_example body = {"snapshot": snp} req = fakes.HTTPRequest.blank('/fake/snapshot/1') res_dict = self.controller.update(req, 1, body) self.assertNotEqual(snp["size"], res_dict['snapshot']["size"]) @ddt.ddt class ShareSnapshotAdminActionsAPITest(test.TestCase): def setUp(self): super(ShareSnapshotAdminActionsAPITest, self).setUp() self.controller = share_snapshots.ShareSnapshotsController() self.flags(transport_url='rabbit://fake:fake@mqhost:5672') self.admin_context = context.RequestContext('admin', 'fake', True) self.member_context = context.RequestContext('fake', 'fake') def _get_context(self, role): return getattr(self, '%s_context' % role) def _setup_snapshot_data(self, snapshot=None): if snapshot is None: share = db_utils.create_share() snapshot = db_utils.create_snapshot( status=constants.STATUS_AVAILABLE, share_id=share['id']) req = fakes.HTTPRequest.blank('/v1/fake/snapshots/%s/action' % snapshot['id']) return snapshot, req def _reset_status(self, ctxt, model, req, db_access_method, valid_code, valid_status=None, body=None): action_name = 'os-reset_status' if body is None: body = {action_name: {'status': constants.STATUS_ERROR}} req.method = 'POST' req.headers['content-type'] = 'application/json' req.body = jsonutils.dumps(body).encode("utf-8") req.environ['manila.context'] = ctxt resp = req.get_response(fakes.app()) # validate response code and model status self.assertEqual(valid_code, resp.status_int) actual_model = db_access_method(ctxt, model['id']) self.assertEqual(valid_status, actual_model['status']) @ddt.data(*fakes.fixture_reset_status_with_different_roles_v1) @ddt.unpack def test_snapshot_reset_status_with_different_roles(self, role, valid_code, valid_status): ctxt = self._get_context(role) snapshot, req = self._setup_snapshot_data() self._reset_status(ctxt, snapshot, req, db.share_snapshot_get, valid_code, valid_status) @ddt.data( {'os-reset_status': {'x-status': 'bad'}}, {'os-reset_status': {'status': 'invalid'}}, ) def test_snapshot_invalid_reset_status_body(self, body): snapshot, req = self._setup_snapshot_data() self._reset_status(self.admin_context, snapshot, req, db.share_snapshot_get, 400, constants.STATUS_AVAILABLE, body) def _force_delete(self, ctxt, model, req, db_access_method, valid_code): action_name = 'os-force_delete' req.method = 'POST' req.headers['content-type'] = 'application/json' req.body = jsonutils.dumps({action_name: {}}).encode("utf-8") req.environ['manila.context'] = ctxt resp = req.get_response(fakes.app()) # Validate response self.assertEqual(valid_code, resp.status_int) @ddt.data( {'role': 'admin', 'resp_code': 202}, {'role': 'member', 'resp_code': 403}, ) @ddt.unpack def test_snapshot_force_delete_with_different_roles(self, role, resp_code): ctxt = self._get_context(role) snapshot, req = self._setup_snapshot_data() self._force_delete(ctxt, snapshot, req, db.share_snapshot_get, resp_code) def test_snapshot_force_delete_missing(self): ctxt = self._get_context('admin') snapshot, req = self._setup_snapshot_data(snapshot={'id': 'fake'}) self._force_delete(ctxt, snapshot, req, db.share_snapshot_get, 404) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/api/v1/test_share_types_extra_specs.py0000664000175000017500000003622200000000000025124 0ustar00zuulzuul00000000000000# Copyright (c) 2011 Zadara Storage Inc. # Copyright (c) 2011 OpenStack Foundation # Copyright 2011 University of Southern California # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import ddt from oslo_utils import strutils import webob from manila.api.v1 import share_types_extra_specs from manila.common import constants from manila import exception from manila import policy from manila import test from manila.tests.api import fakes from manila.tests import fake_notifier import manila.wsgi DRIVER_HANDLES_SHARE_SERVERS = ( constants.ExtraSpecs.DRIVER_HANDLES_SHARE_SERVERS) SNAPSHOT_SUPPORT = constants.ExtraSpecs.SNAPSHOT_SUPPORT def return_create_share_type_extra_specs(context, share_type_id, extra_specs): return stub_share_type_extra_specs() def return_share_type_extra_specs(context, share_type_id): return stub_share_type_extra_specs() def return_empty_share_type_extra_specs(context, share_type_id): return {} def delete_share_type_extra_specs(context, share_type_id, key): pass def delete_share_type_extra_specs_not_found(context, share_type_id, key): raise exception.ShareTypeExtraSpecsNotFound("Not Found") def stub_share_type_extra_specs(): specs = {"key1": "value1", "key2": "value2", "key3": "value3", "key4": "value4", "key5": "value5"} return specs def share_type_get(context, id, inactive=False, expected_fields=None): pass def get_large_string(): return "s" * 256 def get_extra_specs_dict(extra_specs, include_required=True): if not extra_specs: extra_specs = {} if include_required: extra_specs[DRIVER_HANDLES_SHARE_SERVERS] = False return {'extra_specs': extra_specs} @ddt.ddt class ShareTypesExtraSpecsTest(test.TestCase): def setUp(self): super(ShareTypesExtraSpecsTest, self).setUp() self.flags(host='fake') self.mock_object(manila.db, 'share_type_get', share_type_get) self.api_path = '/v2/fake/os-share-types/1/extra_specs' self.controller = ( share_types_extra_specs.ShareTypeExtraSpecsController()) self.resource_name = self.controller.resource_name self.mock_policy_check = self.mock_object(policy, 'check_policy') """to reset notifier drivers left over from other api/contrib tests""" self.addCleanup(fake_notifier.reset) def test_index(self): self.mock_object(manila.db, 'share_type_extra_specs_get', return_share_type_extra_specs) req = fakes.HTTPRequest.blank(self.api_path) req_context = req.environ['manila.context'] res_dict = self.controller.index(req, 1) self.assertEqual('value1', res_dict['extra_specs']['key1']) self.mock_policy_check.assert_called_once_with( req_context, self.resource_name, 'index') def test_index_no_data(self): self.mock_object(manila.db, 'share_type_extra_specs_get', return_empty_share_type_extra_specs) req = fakes.HTTPRequest.blank(self.api_path) req_context = req.environ['manila.context'] res_dict = self.controller.index(req, 1) self.assertEqual(0, len(res_dict['extra_specs'])) self.mock_policy_check.assert_called_once_with( req_context, self.resource_name, 'index') def test_show(self): self.mock_object(manila.db, 'share_type_extra_specs_get', return_share_type_extra_specs) req = fakes.HTTPRequest.blank(self.api_path + '/key5') req_context = req.environ['manila.context'] res_dict = self.controller.show(req, 1, 'key5') self.assertEqual('value5', res_dict['key5']) self.mock_policy_check.assert_called_once_with( req_context, self.resource_name, 'show') def test_show_spec_not_found(self): self.mock_object(manila.db, 'share_type_extra_specs_get', return_empty_share_type_extra_specs) req = fakes.HTTPRequest.blank(self.api_path + '/key6') req_context = req.environ['manila.context'] self.assertRaises(webob.exc.HTTPNotFound, self.controller.show, req, 1, 'key6') self.mock_policy_check.assert_called_once_with( req_context, self.resource_name, 'show') @ddt.data( ('1.0', 'key5'), ('2.23', 'key5'), ('2.24', 'key5'), ('2.24', SNAPSHOT_SUPPORT), ) @ddt.unpack def test_delete(self, version, key): self.mock_object(manila.db, 'share_type_extra_specs_delete', delete_share_type_extra_specs) self.assertEqual(0, len(fake_notifier.NOTIFICATIONS)) req = fakes.HTTPRequest.blank(self.api_path + '/' + key, version=version) req_context = req.environ['manila.context'] self.controller.delete(req, 1, key) self.assertEqual(1, len(fake_notifier.NOTIFICATIONS)) self.mock_policy_check.assert_called_once_with( req_context, self.resource_name, 'delete') def test_delete_not_found(self): self.mock_object(manila.db, 'share_type_extra_specs_delete', delete_share_type_extra_specs_not_found) req = fakes.HTTPRequest.blank(self.api_path + '/key6') req_context = req.environ['manila.context'] self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete, req, 1, 'key6') self.mock_policy_check.assert_called_once_with( req_context, self.resource_name, 'delete') @ddt.data( ('1.0', DRIVER_HANDLES_SHARE_SERVERS), ('1.0', SNAPSHOT_SUPPORT), ('2.23', DRIVER_HANDLES_SHARE_SERVERS), ('2.23', SNAPSHOT_SUPPORT), ('2.24', DRIVER_HANDLES_SHARE_SERVERS), ) @ddt.unpack def test_delete_forbidden(self, version, key): req = fakes.HTTPRequest.blank( self.api_path + '/' + key, version=version) req_context = req.environ['manila.context'] self.assertRaises(webob.exc.HTTPForbidden, self.controller.delete, req, 1, key) self.mock_policy_check.assert_called_once_with( req_context, self.resource_name, 'delete') @ddt.data( get_extra_specs_dict({}), {'foo': 'bar'}, {DRIVER_HANDLES_SHARE_SERVERS + 'foo': True}, {'foo' + DRIVER_HANDLES_SHARE_SERVERS: False}, *[{DRIVER_HANDLES_SHARE_SERVERS: v} for v in strutils.TRUE_STRINGS + strutils.FALSE_STRINGS] ) def test_create(self, data): body = {'extra_specs': data} self.mock_object( manila.db, 'share_type_extra_specs_update_or_create', mock.Mock(return_value=return_create_share_type_extra_specs)) self.assertEqual(0, len(fake_notifier.NOTIFICATIONS)) req = fakes.HTTPRequest.blank(self.api_path) req_context = req.environ['manila.context'] res_dict = self.controller.create(req, 1, body) self.assertEqual(1, len(fake_notifier.NOTIFICATIONS)) for k, v in data.items(): self.assertIn(k, res_dict['extra_specs']) self.assertEqual(v, res_dict['extra_specs'][k]) (manila.db.share_type_extra_specs_update_or_create. assert_called_once_with( req.environ['manila.context'], 1, body['extra_specs'])) self.mock_policy_check.assert_called_once_with( req_context, self.resource_name, 'create') @ddt.data( {"": "value"}, {"k" * 256: "value"}, {"key": ""}, {"key": "v" * 256}, {constants.ExtraSpecs.SNAPSHOT_SUPPORT: "non_boolean"}, ) def test_create_with_invalid_extra_specs(self, extra_specs): self.mock_object( manila.db, 'share_type_extra_specs_update_or_create', mock.Mock(return_value=return_create_share_type_extra_specs)) body = {"extra_specs": extra_specs} self.assertEqual(0, len(fake_notifier.NOTIFICATIONS)) req = fakes.HTTPRequest.blank(self.api_path) req_context = req.environ['manila.context'] self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, req, 1, body) self.assertFalse( manila.db.share_type_extra_specs_update_or_create.called) self.mock_policy_check.assert_called_once_with( req_context, self.resource_name, 'create') def test_create_key_allowed_chars(self): mock_return_value = {"key1": "value1", "key2": "value2", "key3": "value3", "key4": "value4", "key5": "value5"} self.mock_object( manila.db, 'share_type_extra_specs_update_or_create', mock.Mock(return_value=mock_return_value)) body = get_extra_specs_dict({"other_alphanum.-_:": "value1"}) self.assertEqual(0, len(fake_notifier.NOTIFICATIONS)) req = fakes.HTTPRequest.blank(self.api_path) req_context = req.environ['manila.context'] res_dict = self.controller.create(req, 1, body) self.assertEqual(1, len(fake_notifier.NOTIFICATIONS)) self.assertEqual(mock_return_value['key1'], res_dict['extra_specs']['other_alphanum.-_:']) (manila.db.share_type_extra_specs_update_or_create. assert_called_once_with( req.environ['manila.context'], 1, body['extra_specs'])) self.mock_policy_check.assert_called_once_with( req_context, self.resource_name, 'create') def test_create_too_many_keys_allowed_chars(self): mock_return_value = {"key1": "value1", "key2": "value2", "key3": "value3", "key4": "value4", "key5": "value5"} self.mock_object( manila.db, 'share_type_extra_specs_update_or_create', mock.Mock(return_value=mock_return_value)) body = get_extra_specs_dict({ "other_alphanum.-_:": "value1", "other2_alphanum.-_:": "value2", "other3_alphanum.-_:": "value3" }) self.assertEqual(0, len(fake_notifier.NOTIFICATIONS)) req = fakes.HTTPRequest.blank(self.api_path) req_context = req.environ['manila.context'] res_dict = self.controller.create(req, 1, body) self.assertEqual(1, len(fake_notifier.NOTIFICATIONS)) self.assertEqual(mock_return_value['key1'], res_dict['extra_specs']['other_alphanum.-_:']) self.assertEqual(mock_return_value['key2'], res_dict['extra_specs']['other2_alphanum.-_:']) self.assertEqual(mock_return_value['key3'], res_dict['extra_specs']['other3_alphanum.-_:']) (manila.db.share_type_extra_specs_update_or_create. assert_called_once_with(req_context, 1, body['extra_specs'])) self.mock_policy_check.assert_called_once_with( req_context, self.resource_name, 'create') def test_update_item(self): self.mock_object( manila.db, 'share_type_extra_specs_update_or_create', mock.Mock(return_value=return_create_share_type_extra_specs)) body = {DRIVER_HANDLES_SHARE_SERVERS: True} self.assertEqual(0, len(fake_notifier.NOTIFICATIONS)) req = fakes.HTTPRequest.blank( self.api_path + '/' + DRIVER_HANDLES_SHARE_SERVERS) req_context = req.environ['manila.context'] res_dict = self.controller.update( req, 1, DRIVER_HANDLES_SHARE_SERVERS, body) self.assertEqual(1, len(fake_notifier.NOTIFICATIONS)) self.assertTrue(res_dict[DRIVER_HANDLES_SHARE_SERVERS]) (manila.db.share_type_extra_specs_update_or_create. assert_called_once_with(req_context, 1, body)) self.mock_policy_check.assert_called_once_with( req_context, self.resource_name, 'update') def test_update_item_too_many_keys(self): self.mock_object(manila.db, 'share_type_extra_specs_update_or_create') body = {"key1": "value1", "key2": "value2"} req = fakes.HTTPRequest.blank(self.api_path + '/key1') req_context = req.environ['manila.context'] self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, req, 1, 'key1', body) self.assertFalse( manila.db.share_type_extra_specs_update_or_create.called) self.mock_policy_check.assert_called_once_with( req_context, self.resource_name, 'update') def test_update_item_body_uri_mismatch(self): self.mock_object(manila.db, 'share_type_extra_specs_update_or_create') body = {"key1": "value1"} req = fakes.HTTPRequest.blank(self.api_path + '/bad') req_context = req.environ['manila.context'] self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, req, 1, 'bad', body) self.assertFalse( manila.db.share_type_extra_specs_update_or_create.called) self.mock_policy_check.assert_called_once_with( req_context, self.resource_name, 'update') @ddt.data(None, {}, {"extra_specs": {DRIVER_HANDLES_SHARE_SERVERS: ""}}) def test_update_invalid_body(self, body): req = fakes.HTTPRequest.blank('/v2/fake/types/1/extra_specs') req_context = req.environ['manila.context'] req.method = 'POST' self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, req, '1', body) self.mock_policy_check.assert_called_once_with( req_context, self.resource_name, 'update') @ddt.data( None, {}, {'foo': {'a': 'b'}}, {'extra_specs': 'string'}, {"extra_specs": {"ke/y1": "value1"}}, {"key1": "value1", "ke/y2": "value2", "key3": "value3"}, {"extra_specs": {DRIVER_HANDLES_SHARE_SERVERS: ""}}, {"extra_specs": {DRIVER_HANDLES_SHARE_SERVERS: "111"}}, {"extra_specs": {"": "value"}}, {"extra_specs": {"t": get_large_string()}}, {"extra_specs": {get_large_string(): get_large_string()}}, {"extra_specs": {get_large_string(): "v"}}, {"extra_specs": {"k": ""}}) def test_create_invalid_body(self, body): req = fakes.HTTPRequest.blank('/v2/fake/types/1/extra_specs') req_context = req.environ['manila.context'] req.method = 'POST' self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, req, '1', body) self.mock_policy_check.assert_called_once_with( req_context, self.resource_name, 'create') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/api/v1/test_share_unmanage.py0000664000175000017500000002723300000000000023155 0ustar00zuulzuul00000000000000# Copyright 2015 Mirantis inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import ddt import webob from manila.api.v1 import share_unmanage from manila.common import constants from manila import exception from manila import policy from manila.share import api as share_api from manila import test from manila.tests.api.contrib import stubs from manila.tests.api import fakes @ddt.ddt class ShareUnmanageTest(test.TestCase): """Share Unmanage Test.""" def setUp(self): super(ShareUnmanageTest, self).setUp() self.controller = share_unmanage.ShareUnmanageController() self.resource_name = self.controller.resource_name self.mock_object(share_api.API, 'get_all', stubs.stub_get_all_shares) self.mock_object(share_api.API, 'get', stubs.stub_share_get) self.mock_object(share_api.API, 'update', stubs.stub_share_update) self.mock_object(share_api.API, 'delete', stubs.stub_share_delete) self.mock_object(share_api.API, 'get_snapshot', stubs.stub_snapshot_get) self.share_id = 'fake' self.request = fakes.HTTPRequest.blank( '/share/%s/unmanage' % self.share_id, use_admin_context=True ) self.context = self.request.environ['manila.context'] self.mock_policy_check = self.mock_object( policy, 'check_policy', mock.Mock(return_value=True)) @ddt.data(constants.STATUS_AVAILABLE, constants.STATUS_MANAGE_ERROR) def test_unmanage_share(self, status): share = dict(status=status, id='foo_id', instance={}) self.mock_object(share_api.API, 'get', mock.Mock(return_value=share)) self.mock_object(share_api.API, 'unmanage', mock.Mock()) self.mock_object( self.controller.share_api.db, 'share_snapshot_get_all_for_share', mock.Mock(return_value=[])) self.mock_object( self.controller.share_api.db, 'share_backups_get_all', mock.Mock(return_value=[])) actual_result = self.controller.unmanage(self.request, share['id']) self.assertEqual(202, actual_result.status_int) (self.controller.share_api.db.share_snapshot_get_all_for_share. assert_called_once_with( self.request.environ['manila.context'], share['id'])) filters = {'share_id': 'foo_id'} (self.controller.share_api.db.share_backups_get_all. assert_called_once_with( self.request.environ['manila.context'], filters)) self.controller.share_api.get.assert_called_once_with( self.request.environ['manila.context'], share['id']) share_api.API.unmanage.assert_called_once_with( self.request.environ['manila.context'], share) self.mock_policy_check.assert_called_once_with( self.context, self.resource_name, 'unmanage') def test_unmanage_share_that_has_snapshots(self): share = dict(status=constants.STATUS_AVAILABLE, id='foo_id', instance={}) snapshots = ['foo', 'bar'] self.mock_object(self.controller.share_api, 'unmanage') self.mock_object( self.controller.share_api.db, 'share_snapshot_get_all_for_share', mock.Mock(return_value=snapshots)) self.mock_object( self.controller.share_api, 'get', mock.Mock(return_value=share)) self.assertRaises( webob.exc.HTTPForbidden, self.controller.unmanage, self.request, share['id']) self.assertFalse(self.controller.share_api.unmanage.called) (self.controller.share_api.db.share_snapshot_get_all_for_share. assert_called_once_with( self.request.environ['manila.context'], share['id'])) self.controller.share_api.get.assert_called_once_with( self.request.environ['manila.context'], share['id']) self.mock_policy_check.assert_called_once_with( self.context, self.resource_name, 'unmanage') def test_unmanage_share_that_has_backups(self): share = dict(status=constants.STATUS_AVAILABLE, id='foo_id', instance={}) backups = ['foo', 'bar'] self.mock_object(self.controller.share_api, 'unmanage') self.mock_object( self.controller.share_api.db, 'share_backups_get_all', mock.Mock(return_value=backups)) self.mock_object( self.controller.share_api, 'get', mock.Mock(return_value=share)) self.assertRaises( webob.exc.HTTPForbidden, self.controller.unmanage, self.request, share['id']) self.assertFalse(self.controller.share_api.unmanage.called) filters = {'share_id': 'foo_id'} (self.controller.share_api.db.share_backups_get_all. assert_called_once_with( self.request.environ['manila.context'], filters)) self.controller.share_api.get.assert_called_once_with( self.request.environ['manila.context'], share['id']) self.mock_policy_check.assert_called_once_with( self.context, self.resource_name, 'unmanage') def test_unmanage_share_that_has_replicas(self): share = dict(status=constants.STATUS_AVAILABLE, id='foo_id', instance={}, has_replicas=True) mock_api_unmanage = self.mock_object(self.controller.share_api, 'unmanage') mock_db_snapshots_get = self.mock_object( self.controller.share_api.db, 'share_snapshot_get_all_for_share') mock_db_backups_get = self.mock_object( self.controller.share_api.db, 'share_backups_get_all') self.mock_object( self.controller.share_api, 'get', mock.Mock(return_value=share)) self.assertRaises( webob.exc.HTTPConflict, self.controller.unmanage, self.request, share['id']) self.assertFalse(mock_api_unmanage.called) self.assertFalse(mock_db_snapshots_get.called) self.assertFalse(mock_db_backups_get.called) self.controller.share_api.get.assert_called_once_with( self.request.environ['manila.context'], share['id']) self.mock_policy_check.assert_called_once_with( self.context, self.resource_name, 'unmanage') def test_unmanage_share_that_has_been_soft_deleted(self): share = dict(status=constants.STATUS_AVAILABLE, id='foo_id', instance={}, is_soft_deleted=True) mock_api_unmanage = self.mock_object(self.controller.share_api, 'unmanage') mock_db_snapshots_get = self.mock_object( self.controller.share_api.db, 'share_snapshot_get_all_for_share') self.mock_object( self.controller.share_api, 'get', mock.Mock(return_value=share)) self.assertRaises( webob.exc.HTTPForbidden, self.controller.unmanage, self.request, share['id']) self.assertFalse(mock_api_unmanage.called) self.assertFalse(mock_db_snapshots_get.called) self.controller.share_api.get.assert_called_once_with( self.request.environ['manila.context'], share['id']) self.mock_policy_check.assert_called_once_with( self.context, self.resource_name, 'unmanage') def test_unmanage_share_based_on_share_server(self): share = dict(instance=dict(share_server_id='foo_id'), id='bar_id') self.mock_object( self.controller.share_api, 'get', mock.Mock(return_value=share)) self.assertRaises( webob.exc.HTTPForbidden, self.controller.unmanage, self.request, share['id']) self.controller.share_api.get.assert_called_once_with( self.request.environ['manila.context'], share['id']) self.mock_policy_check.assert_called_once_with( self.context, self.resource_name, 'unmanage') @ddt.data(*constants.TRANSITIONAL_STATUSES) def test_unmanage_share_with_transitional_state(self, share_status): share = dict(status=share_status, id='foo_id', instance={}) self.mock_object( self.controller.share_api, 'get', mock.Mock(return_value=share)) self.assertRaises( webob.exc.HTTPForbidden, self.controller.unmanage, self.request, share['id']) self.controller.share_api.get.assert_called_once_with( self.request.environ['manila.context'], share['id']) self.mock_policy_check.assert_called_once_with( self.context, self.resource_name, 'unmanage') def test_unmanage_share_not_found(self): self.mock_object(share_api.API, 'get', mock.Mock( side_effect=exception.NotFound)) self.mock_object(share_api.API, 'unmanage', mock.Mock()) self.assertRaises(webob.exc.HTTPNotFound, self.controller.unmanage, self.request, self.share_id) self.mock_policy_check.assert_called_once_with( self.context, self.resource_name, 'unmanage') @ddt.data(exception.InvalidShare(reason="fake"), exception.PolicyNotAuthorized(action="fake"),) def test_unmanage_share_invalid(self, side_effect): share = dict(status=constants.STATUS_AVAILABLE, id='foo_id', instance={}) self.mock_object(share_api.API, 'get', mock.Mock(return_value=share)) self.mock_object(share_api.API, 'unmanage', mock.Mock( side_effect=side_effect)) self.assertRaises(webob.exc.HTTPForbidden, self.controller.unmanage, self.request, self.share_id) self.mock_policy_check.assert_called_once_with( self.context, self.resource_name, 'unmanage') def test_unmanage_allow_dhss_true_with_share_server(self): share = { 'status': constants.STATUS_AVAILABLE, 'id': 'foo_id', 'instance': '', 'share_server_id': 'fake' } self.mock_object(share_api.API, 'get', mock.Mock(return_value=share)) self.mock_object(share_api.API, 'unmanage', mock.Mock()) self.mock_object( self.controller.share_api.db, 'share_snapshot_get_all_for_share', mock.Mock(return_value=[])) actual_result = self.controller._unmanage(self.request, share['id'], allow_dhss_true=True) self.assertEqual(202, actual_result.status_int) self.mock_policy_check.assert_called_once_with( self.context, self.resource_name, 'unmanage') def test_wrong_permissions(self): share_id = 'fake' req = fakes.HTTPRequest.blank('/share/%s/unmanage' % share_id, use_admin_context=False) req_context = req.environ['manila.context'] self.assertRaises(webob.exc.HTTPForbidden, self.controller.unmanage, req, share_id) self.mock_policy_check.assert_called_once_with( req_context, self.resource_name, 'unmanage') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/api/v1/test_shares.py0000664000175000017500000022723200000000000021466 0ustar00zuulzuul00000000000000# Copyright 2012 NetApp # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import datetime from unittest import mock import ddt from oslo_config import cfg from oslo_serialization import jsonutils import webob from manila.api import common from manila.api.v1 import shares from manila.common import constants from manila import context from manila import db from manila import exception from manila.lock import api as resource_locks from manila import policy from manila.share import api as share_api from manila.share import share_types from manila import test from manila.tests.api.contrib import stubs from manila.tests.api import fakes from manila.tests import db_utils from manila import utils CONF = cfg.CONF @ddt.ddt class ShareAPITest(test.TestCase): """Share API Test.""" def setUp(self): super(ShareAPITest, self).setUp() self.controller = shares.ShareController() self.mock_object(db, 'availability_zone_get') self.mock_object(share_api.API, 'get_all', stubs.stub_get_all_shares) self.mock_object(share_api.API, 'get', stubs.stub_share_get) self.mock_object(share_api.API, 'update', stubs.stub_share_update) self.mock_object(share_api.API, 'delete', stubs.stub_share_delete) self.mock_object(share_api.API, 'get_snapshot', stubs.stub_snapshot_get) self.mock_object(share_types, 'get_share_type', stubs.stub_share_type_get) self.mock_object( common, 'validate_public_share_policy', mock.Mock(side_effect=lambda *args, **kwargs: args[1])) self.resource_name = self.controller.resource_name self.mock_policy_check = self.mock_object(policy, 'check_policy') self.maxDiff = None self.share = { "size": 100, "display_name": "Share Test Name", "display_description": "Share Test Desc", "share_proto": "fakeproto", "availability_zone": "zone1:host1", "is_public": False, } self.create_mock = mock.Mock( return_value=stubs.stub_share( '1', display_name=self.share['display_name'], display_description=self.share['display_description'], size=100, share_proto=self.share['share_proto'].upper(), availability_zone=self.share['availability_zone']) ) self.vt = { 'id': 'fake_volume_type_id', 'name': 'fake_volume_type_name', 'required_extra_specs': { 'driver_handles_share_servers': 'False' }, 'extra_specs': { 'driver_handles_share_servers': 'False' } } CONF.set_default("default_share_type", None) def _get_expected_share_detailed_response(self, values=None, admin=False): share = { 'id': '1', 'name': 'displayname', 'availability_zone': 'fakeaz', 'description': 'displaydesc', 'export_location': 'fake_location', 'export_locations': ['fake_location', 'fake_location2'], 'project_id': 'fakeproject', 'created_at': datetime.datetime(1, 1, 1, 1, 1, 1), 'share_proto': 'FAKEPROTO', 'metadata': {}, 'size': 1, 'snapshot_id': '2', 'share_network_id': None, 'status': 'fakestatus', 'share_type': '1', 'volume_type': '1', 'snapshot_support': True, 'is_public': False, 'links': [ { 'href': 'http://localhost/share/v1/fake/shares/1', 'rel': 'self' }, { 'href': 'http://localhost/share/fake/shares/1', 'rel': 'bookmark' } ], } if values: if 'display_name' in values: values['name'] = values.pop('display_name') if 'display_description' in values: values['description'] = values.pop('display_description') share.update(values) if share.get('share_proto'): share['share_proto'] = share['share_proto'].upper() if admin: share['share_server_id'] = 'fake_share_server_id' share['host'] = 'fakehost' return {'share': share} @ddt.data("1.0", "2.0", "2.1") def test_share_create_original(self, microversion): self.mock_object(share_api.API, 'create', self.create_mock) body = {"share": copy.deepcopy(self.share)} req = fakes.HTTPRequest.blank('/fake/shares', version=microversion) res_dict = self.controller.create(req, body) self.mock_policy_check.assert_called_once_with( req.environ['manila.context'], self.resource_name, 'create') expected = self._get_expected_share_detailed_response(self.share) expected['share'].pop('snapshot_support') self.assertEqual(expected, res_dict) @ddt.data("2.2", "2.3") def test_share_create_with_snapshot_support_without_cg(self, microversion): self.mock_object(share_api.API, 'create', self.create_mock) body = {"share": copy.deepcopy(self.share)} req = fakes.HTTPRequest.blank('/v1/fake/shares', version=microversion) res_dict = self.controller.create(req, body) self.mock_policy_check.assert_called_once_with( req.environ['manila.context'], self.resource_name, 'create') expected = self._get_expected_share_detailed_response(self.share) self.assertEqual(expected, res_dict) def test_share_create_with_valid_default_share_type(self): self.mock_object(share_types, 'get_share_type_by_name', mock.Mock(return_value=self.vt)) CONF.set_default("default_share_type", self.vt['name']) self.mock_object(share_api.API, 'create', self.create_mock) body = {"share": copy.deepcopy(self.share)} req = fakes.HTTPRequest.blank('/v1/fake/shares') res_dict = self.controller.create(req, body) self.mock_policy_check.assert_called_once_with( req.environ['manila.context'], self.resource_name, 'create') expected = self._get_expected_share_detailed_response(self.share) expected['share'].pop('snapshot_support') share_types.get_share_type_by_name.assert_called_once_with( utils.IsAMatcher(context.RequestContext), self.vt['name']) self.assertEqual(expected, res_dict) def test_share_create_with_invalid_default_share_type(self): self.mock_object( share_types, 'get_default_share_type', mock.Mock(side_effect=exception.ShareTypeNotFoundByName( self.vt['name'])), ) CONF.set_default("default_share_type", self.vt['name']) req = fakes.HTTPRequest.blank('/v1/fake/shares') self.assertRaises(exception.ShareTypeNotFoundByName, self.controller.create, req, {'share': self.share}) self.mock_policy_check.assert_called_once_with( req.environ['manila.context'], self.resource_name, 'create') share_types.get_default_share_type.assert_called_once_with() def test_share_create_with_dhss_true_and_network_notexist(self): fake_share_type = { 'id': 'fake_volume_type_id', 'name': 'fake_volume_type_name', 'extra_specs': { 'driver_handles_share_servers': True, } } self.mock_object( share_types, 'get_default_share_type', mock.Mock(return_value=fake_share_type), ) CONF.set_default("default_share_type", fake_share_type['name']) req = fakes.HTTPRequest.blank('/v1/fake/shares') self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, req, {'share': self.share}) self.mock_policy_check.assert_called_once_with( req.environ['manila.context'], self.resource_name, 'create') share_types.get_default_share_type.assert_called_once_with() def test_share_create_with_share_net(self): shr = { "size": 100, "name": "Share Test Name", "description": "Share Test Desc", "share_proto": "fakeproto", "availability_zone": "zone1:host1", "share_network_id": "fakenetid" } fake_network = {'id': 'fakenetid'} create_mock = mock.Mock(return_value=stubs.stub_share('1', display_name=shr['name'], display_description=shr['description'], size=shr['size'], share_proto=shr['share_proto'].upper(), availability_zone=shr['availability_zone'], share_network_id=shr['share_network_id'])) self.mock_object(share_api.API, 'create', create_mock) self.mock_object(share_api.API, 'get_share_network', mock.Mock( return_value=fake_network)) self.mock_object(common, 'check_share_network_is_active', mock.Mock(return_value=True)) self.mock_object( db, 'share_network_subnets_get_all_by_availability_zone_id', mock.Mock(return_value={'id': 'fakesubnetid'})) body = {"share": copy.deepcopy(shr)} req = fakes.HTTPRequest.blank('/v1/fake/shares') res_dict = self.controller.create(req, body) self.mock_policy_check.assert_called_once_with( req.environ['manila.context'], self.resource_name, 'create') expected = self._get_expected_share_detailed_response(shr) expected['share'].pop('snapshot_support') common.check_share_network_is_active.assert_called_once_with( fake_network) self.assertEqual(expected, res_dict) # pylint: disable=unsubscriptable-object self.assertEqual("fakenetid", create_mock.call_args[1]['share_network_id']) def test_share_create_mount_point_name(self): shr = { "size": 100, "name": "Share Test Name", "description": "Share Test Desc", "share_proto": "fakeproto", "mount_point_name": "fake_mp" } fake_network = {'id': 'fakenetid'} create_mock = mock.Mock(return_value=stubs.stub_share('1', display_name=shr['name'], display_description=shr['description'], size=shr['size'], share_proto=shr['share_proto'].upper(), mount_point_name=shr['mount_point_name'])) self.mock_object(share_api.API, 'create', create_mock) self.mock_object(share_api.API, 'get_share_network', mock.Mock( return_value=fake_network)) self.mock_object(common, 'check_share_network_is_active', mock.Mock(return_value=True)) self.mock_object( db, 'share_network_subnets_get_all_by_availability_zone_id', mock.Mock(return_value={'id': 'fakesubnetid'})) body = {"share": copy.deepcopy(shr)} req = fakes.HTTPRequest.blank('/v1/fake/shares') self.controller.create(req, body) self.mock_policy_check.assert_called_once_with( req.environ['manila.context'], self.resource_name, 'create') def test_share_create_with_share_net_not_active(self): shr = { "size": 100, "name": "Share Test Name", "description": "Share Test Desc", "share_proto": "fakeproto", "availability_zone": "zone1:host1", "share_network_id": "fakenetid" } share_network = db_utils.create_share_network( status=constants.STATUS_NETWORK_CHANGE) create_mock = mock.Mock(return_value=stubs.stub_share('1', display_name=shr['name'], display_description=shr['description'], size=shr['size'], share_proto=shr['share_proto'].upper(), availability_zone=shr['availability_zone'], share_network_id=shr['share_network_id'])) self.mock_object(share_api.API, 'create', create_mock) self.mock_object(share_api.API, 'get_share_network', mock.Mock( return_value=share_network)) self.mock_object(common, 'check_share_network_is_active', mock.Mock(side_effect=webob.exc.HTTPBadRequest())) body = {"share": copy.deepcopy(shr)} req = fakes.HTTPRequest.blank('/shares') self.assertRaises( webob.exc.HTTPBadRequest, self.controller.create, req, body) self.mock_policy_check.assert_called_once_with( req.environ['manila.context'], self.resource_name, 'create') common.check_share_network_is_active.assert_called_once_with( share_network) def test_share_create_from_snapshot_without_share_net_no_parent(self): shr = { "size": 100, "name": "Share Test Name", "description": "Share Test Desc", "share_proto": "fakeproto", "availability_zone": "zone1:host1", "snapshot_id": 333, "share_network_id": None, } create_mock = mock.Mock(return_value=stubs.stub_share('1', display_name=shr['name'], display_description=shr['description'], size=shr['size'], share_proto=shr['share_proto'].upper(), availability_zone=shr['availability_zone'], snapshot_id=shr['snapshot_id'], share_network_id=shr['share_network_id'])) self.mock_object(share_api.API, 'create', create_mock) body = {"share": copy.deepcopy(shr)} req = fakes.HTTPRequest.blank('/v1/fake/shares') res_dict = self.controller.create(req, body) self.mock_policy_check.assert_called_once_with( req.environ['manila.context'], self.resource_name, 'create') expected = self._get_expected_share_detailed_response(shr) expected['share'].pop('snapshot_support') self.assertEqual(expected, res_dict) def test_share_create_from_snapshot_without_share_net_parent_exists(self): shr = { "size": 100, "name": "Share Test Name", "description": "Share Test Desc", "share_proto": "fakeproto", "availability_zone": "zone1:host1", "snapshot_id": 333, "share_network_id": None, } parent_share_net = 444 fake_share_net = {'id': parent_share_net} share_net_subnets = [db_utils.create_share_network_subnet( id='fake_subnet_id', share_network_id=fake_share_net['id'])] create_mock = mock.Mock(return_value=stubs.stub_share('1', display_name=shr['name'], display_description=shr['description'], size=shr['size'], share_proto=shr['share_proto'].upper(), snapshot_id=shr['snapshot_id'], instance=dict( availability_zone=shr['availability_zone'], share_network_id=shr['share_network_id']))) self.mock_object(share_api.API, 'create', create_mock) self.mock_object(share_api.API, 'get_snapshot', stubs.stub_snapshot_get) parent_share = stubs.stub_share( '1', instance={'share_network_id': parent_share_net}, create_share_from_snapshot_support=True) self.mock_object(share_api.API, 'get', mock.Mock( return_value=parent_share)) self.mock_object(share_api.API, 'get_share_network', mock.Mock( return_value=fake_share_net)) self.mock_object(common, 'check_share_network_is_active', mock.Mock(return_value=True)) self.mock_object( db, 'share_network_subnets_get_all_by_availability_zone_id', mock.Mock(return_value=share_net_subnets)) body = {"share": copy.deepcopy(shr)} req = fakes.HTTPRequest.blank('/v1/fake/shares') res_dict = self.controller.create(req, body) self.mock_policy_check.assert_called_once_with( req.environ['manila.context'], self.resource_name, 'create') expected = self._get_expected_share_detailed_response(shr) expected['share'].pop('snapshot_support') common.check_share_network_is_active.assert_called_once_with( fake_share_net) self.assertEqual(expected, res_dict) # pylint: disable=unsubscriptable-object self.assertEqual(parent_share_net, create_mock.call_args[1]['share_network_id']) def test_share_create_from_snapshot_with_share_net_equals_parent(self): parent_share_net = 444 shr = { "size": 100, "name": "Share Test Name", "description": "Share Test Desc", "share_proto": "fakeproto", "availability_zone": "zone1:host1", "snapshot_id": 333, "share_network_id": parent_share_net } fake_share_net = {'id': parent_share_net} share_net_subnets = [db_utils.create_share_network_subnet( id='fake_subnet_id', share_network_id=fake_share_net['id'])] create_mock = mock.Mock(return_value=stubs.stub_share('1', display_name=shr['name'], display_description=shr['description'], size=shr['size'], share_proto=shr['share_proto'].upper(), snapshot_id=shr['snapshot_id'], instance=dict( availability_zone=shr['availability_zone'], share_network_id=shr['share_network_id']))) self.mock_object(share_api.API, 'create', create_mock) self.mock_object(share_api.API, 'get_snapshot', stubs.stub_snapshot_get) self.mock_object(common, 'check_share_network_is_active', mock.Mock(return_value=True)) parent_share = stubs.stub_share( '1', instance={'share_network_id': parent_share_net}, create_share_from_snapshot_support=True) self.mock_object(share_api.API, 'get', mock.Mock( return_value=parent_share)) self.mock_object(share_api.API, 'get_share_network', mock.Mock( return_value=fake_share_net)) self.mock_object( db, 'share_network_subnets_get_all_by_availability_zone_id', mock.Mock(return_value=share_net_subnets)) body = {"share": copy.deepcopy(shr)} req = fakes.HTTPRequest.blank('/v1/fake/shares') res_dict = self.controller.create(req, body) self.mock_policy_check.assert_called_once_with( req.environ['manila.context'], self.resource_name, 'create') expected = self._get_expected_share_detailed_response(shr) expected['share'].pop('snapshot_support') common.check_share_network_is_active.assert_called_once_with( fake_share_net) self.assertEqual(expected, res_dict) # pylint: disable=unsubscriptable-object self.assertEqual(parent_share_net, create_mock.call_args[1]['share_network_id']) def test_share_create_from_snapshot_invalid_share_net(self): self.mock_object(share_api.API, 'create') shr = { "size": 100, "name": "Share Test Name", "description": "Share Test Desc", "share_proto": "fakeproto", "availability_zone": "zone1:host1", "snapshot_id": 333, "share_network_id": 1234 } body = {"share": shr} req = fakes.HTTPRequest.blank('/v1/fake/shares') self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, req, body) self.mock_policy_check.assert_called_once_with( req.environ['manila.context'], self.resource_name, 'create') def test_share_create_from_mount_point_name(self): parent_share_net = 444 shr = { "size": 100, "name": "Share Test Name", "description": "Share Test Desc", "share_proto": "fakeproto", "availability_zone": "zone1:host1", "snapshot_id": 333, "share_network_id": parent_share_net, "mount_point_name": "fake_mp" } fake_share_net = {'id': parent_share_net} share_net_subnets = [db_utils.create_share_network_subnet( id='fake_subnet_id', share_network_id=fake_share_net['id'])] create_mock = mock.Mock(return_value=stubs.stub_share('1', display_name=shr['name'], display_description=shr['description'], size=shr['size'], share_proto=shr['share_proto'].upper(), snapshot_id=shr['snapshot_id'], mount_point_name=shr['mount_point_name'], instance=dict( availability_zone=shr['availability_zone'], share_network_id=shr['share_network_id'], ))) self.mock_object(share_api.API, 'create', create_mock) self.mock_object(share_api.API, 'get_snapshot', stubs.stub_snapshot_get) self.mock_object(common, 'check_share_network_is_active', mock.Mock(return_value=True)) parent_share = stubs.stub_share( '1', instance={'share_network_id': parent_share_net}, create_share_from_snapshot_support=True) self.mock_object(share_api.API, 'get', mock.Mock( return_value=parent_share)) self.mock_object(share_api.API, 'get_share_network', mock.Mock( return_value=fake_share_net)) self.mock_object( db, 'share_network_subnets_get_all_by_availability_zone_id', mock.Mock(return_value=share_net_subnets)) body = {"share": copy.deepcopy(shr)} req = fakes.HTTPRequest.blank('/v1/fake/shares', version='2.84') res_dict = self.controller.create(req, body) self.assertEqual(res_dict['share']['project_id'], 'fakeproject') @ddt.data( {'name': 'name1', 'description': 'x' * 256}, {'name': 'x' * 256, 'description': 'description1'}, ) @ddt.unpack def test_share_create_invalid_input(self, name, description): self.mock_object(share_api.API, 'create') shr = { "size": 100, "name": name, "description": description, "share_proto": "fakeproto", "availability_zone": "zone1:host1", } body = {"share": shr} req = fakes.HTTPRequest.blank('/v1/fake/shares') self.assertRaises(exception.InvalidInput, self.controller.create, req, body) self.mock_policy_check.assert_called_once_with( req.environ['manila.context'], self.resource_name, 'create') @ddt.data("1.0", "2.0") def test_share_create_from_snapshot_not_supported(self, microversion): # This create operation should work, because the 1.0 API doesn't check # create_share_from_snapshot_support. parent_share_net = 444 shr = { "size": 100, "name": "Share Test Name", "description": "Share Test Desc", "share_proto": "fakeproto", "availability_zone": "zone1:host1", "snapshot_id": 333, "share_network_id": parent_share_net } fake_share_net = {'id': parent_share_net} share_net_subnets = [db_utils.create_share_network_subnet( id='fake_subnet_id', share_network_id=fake_share_net['id'])] create_mock = mock.Mock(return_value=stubs.stub_share('1', display_name=shr['name'], display_description=shr['description'], size=shr['size'], share_proto=shr['share_proto'].upper(), snapshot_id=shr['snapshot_id'], instance=dict( availability_zone=shr['availability_zone'], share_network_id=shr['share_network_id']))) self.mock_object(share_api.API, 'create', create_mock) self.mock_object(share_api.API, 'get_snapshot', stubs.stub_snapshot_get) self.mock_object(common, 'check_share_network_is_active', mock.Mock(return_value=True)) parent_share = stubs.stub_share( '1', instance={'share_network_id': parent_share_net}, create_share_from_snapshot_support=False) self.mock_object(share_api.API, 'get', mock.Mock( return_value=parent_share)) self.mock_object(share_api.API, 'get_share_network', mock.Mock( return_value=fake_share_net)) self.mock_object( db, 'share_network_subnets_get_all_by_availability_zone_id', mock.Mock(return_value=share_net_subnets)) body = {"share": copy.deepcopy(shr)} req = fakes.HTTPRequest.blank('/v1/fake/shares', version=microversion) res_dict = self.controller.create(req, body) self.mock_policy_check.assert_called_once_with( req.environ['manila.context'], self.resource_name, 'create') expected = self._get_expected_share_detailed_response(shr) expected['share'].pop('snapshot_support') common.check_share_network_is_active.assert_called_once_with( fake_share_net) self.assertDictEqual(expected, res_dict) # pylint: disable=unsubscriptable-object self.assertEqual(parent_share_net, create_mock.call_args[1]['share_network_id']) def test_share_creation_fails_with_bad_size(self): shr = {"size": '', "name": "Share Test Name", "description": "Share Test Desc", "share_proto": "fakeproto", "availability_zone": "zone1:host1"} body = {"share": shr} req = fakes.HTTPRequest.blank('/fake/shares') self.assertRaises(exception.InvalidInput, self.controller.create, req, body) self.mock_policy_check.assert_called_once_with( req.environ['manila.context'], self.resource_name, 'create') def test_share_create_no_body(self): body = {} req = fakes.HTTPRequest.blank('/fake/shares') self.assertRaises(webob.exc.HTTPUnprocessableEntity, self.controller.create, req, body) self.mock_policy_check.assert_called_once_with( req.environ['manila.context'], self.resource_name, 'create') def test_share_creation_fails_with_invalid_share_type(self): shr = { "size": 1, "name": "Share Test Name", "description": "Share Test Desc", "share_proto": "fakeproto", "availability_zone": "zone1:host1", "share_type": "Invalid share type" } body = {"share": shr} req = fakes.HTTPRequest.blank('/fake/shares') with mock.patch('manila.share.share_types.get_share_type_by_name', side_effect=exception.InvalidShareType(reason='')): self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, req, body) self.mock_policy_check.assert_called_once_with( req.environ['manila.context'], self.resource_name, 'create') def test_share_create_invalid_availability_zone(self): self.mock_object( db, 'availability_zone_get', mock.Mock(side_effect=exception.AvailabilityZoneNotFound(id='id')) ) body = {"share": copy.deepcopy(self.share)} req = fakes.HTTPRequest.blank('/fake/shares') self.assertRaises(webob.exc.HTTPNotFound, self.controller.create, req, body) @ddt.data((exception.ShareNetworkNotFound(share_network_id='fake'), webob.exc.HTTPNotFound), (mock.Mock(), webob.exc.HTTPBadRequest)) @ddt.unpack def test_share_create_invalid_subnet(self, share_network_side_effect, exception_to_raise): fake_share_with_sn = copy.deepcopy(self.share) fake_share_with_sn['share_network_id'] = 'fakenetid' self.mock_object(db, 'share_network_get', mock.Mock(side_effect=share_network_side_effect)) self.mock_object( db, 'share_network_subnets_get_all_by_availability_zone_id', mock.Mock(return_value=None)) self.mock_object(common, 'check_share_network_is_active') body = {"share": fake_share_with_sn} req = fakes.HTTPRequest.blank('/fake/shares') self.assertRaises(exception_to_raise, self.controller.create, req, body) def test_share_show(self): req = fakes.HTTPRequest.blank('/fake/shares/1') expected = self._get_expected_share_detailed_response() expected['share'].pop('snapshot_support') res_dict = self.controller.show(req, '1') self.assertEqual(expected, res_dict) def test_share_show_with_share_type_name(self): req = fakes.HTTPRequest.blank('/fake/shares/1', version='2.6') res_dict = self.controller.show(req, '1') expected = self._get_expected_share_detailed_response() expected['share']['share_type_name'] = None expected['share']['task_state'] = None self.assertEqual(expected, res_dict) def test_share_show_admin(self): req = fakes.HTTPRequest.blank('/fake/shares/1', use_admin_context=True) expected = self._get_expected_share_detailed_response(admin=True) expected['share'].pop('snapshot_support') res_dict = self.controller.show(req, '1') self.assertEqual(expected, res_dict) def test_share_show_no_share(self): self.mock_object(share_api.API, 'get', stubs.stub_share_get_notfound) req = fakes.HTTPRequest.blank('/fake/shares/1') self.assertRaises(webob.exc.HTTPNotFound, self.controller.show, req, '1') def test_share_delete(self): req = fakes.HTTPRequest.blank('/fake/shares/1') resp = self.controller.delete(req, 1) self.assertEqual(202, resp.status_int) def test_share_update(self): shr = self.share body = {"share": shr} req = fakes.HTTPRequest.blank('/share/1') res_dict = self.controller.update(req, 1, body) self.mock_policy_check.assert_called_once_with( req.environ['manila.context'], self.resource_name, 'update') self.assertEqual(shr["display_name"], res_dict['share']["name"]) self.assertEqual(shr["display_description"], res_dict['share']["description"]) self.assertEqual(shr['is_public'], res_dict['share']['is_public']) def test_share_not_updates_size(self): req = fakes.HTTPRequest.blank('/share/1') res_dict = self.controller.update(req, 1, {"share": self.share}) self.mock_policy_check.assert_called_once_with( req.environ['manila.context'], self.resource_name, 'update') self.assertNotEqual(res_dict['share']["size"], self.share["size"]) def test_share_delete_no_share(self): self.mock_object(share_api.API, 'get', stubs.stub_share_get_notfound) req = fakes.HTTPRequest.blank('/fake/shares/1') self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete, req, 1) def _share_list_summary_with_search_opts(self, use_admin_context): search_opts = { 'name': 'fake_name', 'status': constants.STATUS_AVAILABLE, 'share_server_id': 'fake_share_server_id', 'share_type_id': 'fake_share_type_id', 'snapshot_id': 'fake_snapshot_id', 'share_network_id': 'fake_share_network_id', 'metadata': '%7B%27k1%27%3A+%27v1%27%7D', # serialized k1=v1 'extra_specs': '%7B%27k2%27%3A+%27v2%27%7D', # serialized k2=v2 'sort_key': 'fake_sort_key', 'sort_dir': 'fake_sort_dir', 'limit': '1', 'offset': '1', 'is_public': 'False', } if use_admin_context: search_opts['host'] = 'fake_host' # fake_key should be filtered for non-admin url = '/fake/shares?fake_key=fake_value' for k, v in search_opts.items(): url = url + '&' + k + '=' + v req = fakes.HTTPRequest.blank(url, use_admin_context=use_admin_context) shares = [ {'id': 'id1', 'display_name': 'n1'}, {'id': 'id2', 'display_name': 'n2'}, {'id': 'id3', 'display_name': 'n3'}, ] self.mock_object(share_api.API, 'get_all', mock.Mock(return_value=[shares[1]])) result = self.controller.index(req) search_opts_expected = { 'display_name': search_opts['name'], 'status': search_opts['status'], 'share_server_id': search_opts['share_server_id'], 'share_type_id': search_opts['share_type_id'], 'snapshot_id': search_opts['snapshot_id'], 'share_network_id': search_opts['share_network_id'], 'metadata': {'k1': 'v1'}, 'extra_specs': {'k2': 'v2'}, 'is_public': 'False', 'limit': '1', 'offset': '1' } if use_admin_context: search_opts_expected.update({'fake_key': 'fake_value'}) search_opts_expected['host'] = search_opts['host'] share_api.API.get_all.assert_called_once_with( req.environ['manila.context'], sort_key=search_opts['sort_key'], sort_dir=search_opts['sort_dir'], search_opts=search_opts_expected, ) self.assertEqual(1, len(result['shares'])) self.assertEqual(shares[1]['id'], result['shares'][0]['id']) self.assertEqual( shares[1]['display_name'], result['shares'][0]['name']) def test_share_list_summary_with_search_opts_by_non_admin(self): self._share_list_summary_with_search_opts(use_admin_context=False) def test_share_list_summary_with_search_opts_by_admin(self): self._share_list_summary_with_search_opts(use_admin_context=True) def test_share_list_summary(self): self.mock_object(share_api.API, 'get_all', stubs.stub_share_get_all_by_project) req = fakes.HTTPRequest.blank('/fake/shares') res_dict = self.controller.index(req) expected = { 'shares': [ { 'name': 'displayname', 'id': '1', 'links': [ { 'href': 'http://localhost/share/v1/fake/shares/1', 'rel': 'self' }, { 'href': 'http://localhost/share/fake/shares/1', 'rel': 'bookmark' } ], } ] } self.assertEqual(expected, res_dict) def _share_list_detail_with_search_opts(self, use_admin_context): search_opts = { 'name': 'fake_name', 'status': constants.STATUS_AVAILABLE, 'share_server_id': 'fake_share_server_id', 'share_type_id': 'fake_share_type_id', 'snapshot_id': 'fake_snapshot_id', 'share_network_id': 'fake_share_network_id', 'metadata': '%7B%27k1%27%3A+%27v1%27%7D', # serialized k1=v1 'extra_specs': '%7B%27k2%27%3A+%27v2%27%7D', # serialized k2=v2 'sort_key': 'fake_sort_key', 'sort_dir': 'fake_sort_dir', 'limit': '1', 'offset': '1', 'is_public': 'False', } if use_admin_context: search_opts['host'] = 'fake_host' # fake_key should be filtered for non-admin url = '/fake/shares/detail?fake_key=fake_value' for k, v in search_opts.items(): url = url + '&' + k + '=' + v req = fakes.HTTPRequest.blank(url, use_admin_context=use_admin_context) shares = [ {'id': 'id1', 'display_name': 'n1'}, { 'id': 'id2', 'display_name': 'n2', 'status': constants.STATUS_AVAILABLE, 'snapshot_id': 'fake_snapshot_id', 'instance': {'host': 'fake_host', 'share_network_id': 'fake_share_network_id', 'share_type_id': 'fake_share_type_id'}, }, {'id': 'id3', 'display_name': 'n3'}, ] self.mock_object(share_api.API, 'get_all', mock.Mock(return_value=[shares[1]])) result = self.controller.detail(req) search_opts_expected = { 'display_name': search_opts['name'], 'status': search_opts['status'], 'share_server_id': search_opts['share_server_id'], 'share_type_id': search_opts['share_type_id'], 'snapshot_id': search_opts['snapshot_id'], 'share_network_id': search_opts['share_network_id'], 'metadata': {'k1': 'v1'}, 'extra_specs': {'k2': 'v2'}, 'is_public': 'False', 'limit': '1', 'offset': '1' } if use_admin_context: search_opts_expected.update({'fake_key': 'fake_value'}) search_opts_expected['host'] = search_opts['host'] share_api.API.get_all.assert_called_once_with( req.environ['manila.context'], sort_key=search_opts['sort_key'], sort_dir=search_opts['sort_dir'], search_opts=search_opts_expected, ) self.assertEqual(1, len(result['shares'])) self.assertEqual(shares[1]['id'], result['shares'][0]['id']) self.assertEqual( shares[1]['display_name'], result['shares'][0]['name']) self.assertEqual( shares[1]['snapshot_id'], result['shares'][0]['snapshot_id']) self.assertEqual( shares[1]['status'], result['shares'][0]['status']) self.assertEqual( shares[1]['instance']['share_type_id'], result['shares'][0]['share_type']) self.assertEqual( shares[1]['snapshot_id'], result['shares'][0]['snapshot_id']) if use_admin_context: self.assertEqual( shares[1]['instance']['host'], result['shares'][0]['host']) self.assertEqual( shares[1]['instance']['share_network_id'], result['shares'][0]['share_network_id']) def test_share_list_detail_with_search_opts_by_non_admin(self): self._share_list_detail_with_search_opts(use_admin_context=False) def test_share_list_detail_with_search_opts_by_admin(self): self._share_list_detail_with_search_opts(use_admin_context=True) def _list_detail_common_expected(self, admin=False): share_dict = { 'status': 'fakestatus', 'description': 'displaydesc', 'export_location': 'fake_location', 'export_locations': ['fake_location', 'fake_location2'], 'availability_zone': 'fakeaz', 'name': 'displayname', 'share_proto': 'FAKEPROTO', 'metadata': {}, 'project_id': 'fakeproject', 'id': '1', 'snapshot_id': '2', 'snapshot_support': True, 'share_network_id': None, 'created_at': datetime.datetime(1, 1, 1, 1, 1, 1), 'size': 1, 'share_type': '1', 'volume_type': '1', 'is_public': False, 'links': [ { 'href': 'http://localhost/share/v1/fake/shares/1', 'rel': 'self' }, { 'href': 'http://localhost/share/fake/shares/1', 'rel': 'bookmark' } ], } if admin: share_dict['host'] = 'fakehost' return {'shares': [share_dict]} def _list_detail_test_common(self, req, expected): self.mock_object(share_api.API, 'get_all', stubs.stub_share_get_all_by_project) res_dict = self.controller.detail(req) self.assertEqual(expected, res_dict) self.assertEqual(res_dict['shares'][0]['volume_type'], res_dict['shares'][0]['share_type']) def test_share_list_detail(self): env = {'QUERY_STRING': 'name=Share+Test+Name'} req = fakes.HTTPRequest.blank('/fake/shares/detail', environ=env) expected = self._list_detail_common_expected() expected['shares'][0].pop('snapshot_support') self._list_detail_test_common(req, expected) def test_share_list_detail_with_task_state(self): env = {'QUERY_STRING': 'name=Share+Test+Name'} req = fakes.HTTPRequest.blank('/fake/shares/detail', environ=env, version="2.5") expected = self._list_detail_common_expected() expected['shares'][0]['task_state'] = None self._list_detail_test_common(req, expected) def test_remove_invalid_options(self): ctx = context.RequestContext('fakeuser', 'fakeproject', is_admin=False) search_opts = {'a': 'a', 'b': 'b', 'c': 'c', 'd': 'd'} expected_opts = {'a': 'a', 'c': 'c'} allowed_opts = ['a', 'c'] common.remove_invalid_options(ctx, search_opts, allowed_opts) self.assertEqual(expected_opts, search_opts) def test_remove_invalid_options_admin(self): ctx = context.RequestContext('fakeuser', 'fakeproject', is_admin=True) search_opts = {'a': 'a', 'b': 'b', 'c': 'c', 'd': 'd'} expected_opts = {'a': 'a', 'b': 'b', 'c': 'c', 'd': 'd'} allowed_opts = ['a', 'c'] common.remove_invalid_options(ctx, search_opts, allowed_opts) self.assertEqual(expected_opts, search_opts) def _fake_access_get(self, ctxt, access_id): class Access(object): def __init__(self, **kwargs): self.STATE_NEW = 'fake_new' self.STATE_ACTIVE = 'fake_active' self.STATE_ERROR = 'fake_error' self.params = kwargs self.params['state'] = self.STATE_NEW self.share_id = kwargs.get('share_id') self.id = access_id def __getitem__(self, item): return self.params[item] access = Access(access_id=access_id, share_id='fake_share_id') return access @ddt.ddt class ShareActionsTest(test.TestCase): def setUp(self): super(ShareActionsTest, self).setUp() self.controller = shares.ShareController() self.mock_object(share_api.API, 'get', stubs.stub_share_get) self.mock_policy_check = self.mock_object(policy, 'check_policy') @ddt.data( {'access_type': 'ip', 'access_to': '127.0.0.1'}, {'access_type': 'user', 'access_to': '1' * 4}, {'access_type': 'user', 'access_to': '1' * 255}, {'access_type': 'user', 'access_to': 'fake{.-_\'`}'}, {'access_type': 'user', 'access_to': 'MYDOMAIN-Administrator'}, {'access_type': 'user', 'access_to': 'test group name'}, {'access_type': 'user', 'access_to': 'group$.-_\'`{}'}, {'access_type': 'cert', 'access_to': 'x'}, {'access_type': 'cert', 'access_to': 'tenant.example.com'}, {'access_type': 'cert', 'access_to': 'x' * 64}, {'access_type': 'cert', 'access_to': 'x' * 64, 'lock_visibility': True}, {'access_type': 'cert', 'access_to': 'x' * 64, 'lock_deletion': True}, {'access_type': 'cert', 'access_to': 'x' * 64, 'lock_deletion': True}, {'access_type': 'cert', 'access_to': 'x' * 64, 'lock_deletion': True, 'lock_visibility': True, 'lock_reason': 'locked_for_testing'}, ) def test_allow_access(self, access): self.mock_object(share_api.API, 'allow_access', mock.Mock(return_value={'fake': 'fake'})) self.mock_object(self.controller._access_view_builder, 'view', mock.Mock(return_value={'access': {'fake': 'fake'}})) self.mock_object(self.controller, '_create_access_locks') id = 'fake_share_id' body = {'os-allow_access': access} expected = {'access': {'fake': 'fake'}} req = fakes.HTTPRequest.blank('/tenant1/shares/%s/action' % id) lock_visibility = access.pop('lock_visibility', None) lock_deletion = access.pop('lock_deletion', None) lock_reason = access.pop('lock_reason', None) res = self.controller._allow_access( req, id, body, lock_visibility=lock_visibility, lock_deletion=lock_deletion, lock_reason=lock_reason ) self.assertEqual(expected, res) self.mock_policy_check.assert_called_once_with( req.environ['manila.context'], 'share', 'allow_access') if lock_visibility or lock_deletion: self.controller._create_access_locks.assert_called_once_with( req.environ['manila.context'], expected['access'], lock_deletion=lock_deletion, lock_visibility=lock_visibility, lock_reason=lock_reason ) @ddt.data( {'lock_visibility': True, 'lock_deletion': True, 'lock_reason': 'test lock reason'}, {'lock_visibility': True, 'lock_deletion': False, 'lock_reason': None}, {'lock_visibility': False, 'lock_deletion': True, 'lock_reason': None}, ) @ddt.unpack def test__create_access_locks(self, lock_visibility, lock_deletion, lock_reason): access = { 'id': 'fake', 'access_type': 'ip', 'access_to': '127.0.0.1', 'share_id': 'fake_share_id' } mock_deletion_lock_create = mock.Mock() lock_id = 'fake_lock_id' if lock_deletion: mock_deletion_lock_create = mock.Mock( side_effect=[ {'id': lock_id}, {'id': f'{lock_id}2'}, {'id': f'{lock_id}3'} ] ) self.mock_object( resource_locks.API, 'create', mock_deletion_lock_create ) id = 'fake_share_id' req = fakes.HTTPRequest.blank( '/tenant1/shares/%s/action' % id, version='2.82') context = req.environ['manila.context'] access['project_id'] = context.project_id access['user_id'] = context.user_id self.controller._create_access_locks( req.environ['manila.context'], access, lock_deletion=lock_deletion, lock_visibility=lock_visibility, lock_reason=lock_reason ) restrict_calls = [] if lock_deletion: share_lock_reason = ( constants.SHARE_LOCKED_BY_ACCESS_LOCK_REASON % {'lock_id': lock_id} ) restrict_calls.append( mock.call( context, resource_id=access['id'], resource_type='access_rule', resource_action=constants.RESOURCE_ACTION_DELETE, resource=access, lock_reason=lock_reason ) ) restrict_calls.append( mock.call( context, resource_id=access['share_id'], resource_type='share', resource_action=constants.RESOURCE_ACTION_DELETE, lock_reason=share_lock_reason ) ) if lock_visibility: restrict_calls.append( mock.call( context, resource_id=access['id'], resource_type='access_rule', resource_action=constants.RESOURCE_ACTION_SHOW, resource=access, lock_reason=lock_reason ) ) resource_locks.API.create.assert_has_calls(restrict_calls) def test__create_access_visibility_locks_creation_failed(self): access = { 'id': 'fake', 'access_type': 'ip', 'access_to': '127.0.0.1', } lock_reason = 'locked for testing' self.mock_object( resource_locks.API, 'create', mock.Mock(side_effect=exception.NotAuthorized) ) id = 'fake_share_id' req = fakes.HTTPRequest.blank( '/tenant1/shares/%s/action' % id, version='2.82') context = req.environ['manila.context'] access['project_id'] = context.project_id access['user_id'] = context.user_id self.assertRaises( webob.exc.HTTPBadRequest, self.controller._create_access_locks, req.environ['manila.context'], access, lock_deletion=False, lock_visibility=True, lock_reason=lock_reason ) resource_locks.API.create.assert_called_once_with( context, resource_id=access['id'], resource_type='access_rule', resource_action=constants.RESOURCE_ACTION_SHOW, resource=access, lock_reason=lock_reason) def test__create_access_deletion_locks_creation_failed(self): access = { 'id': 'fake', 'access_type': 'ip', 'access_to': '127.0.0.1', } lock_reason = 'locked for testing' self.mock_object( resource_locks.API, 'create', mock.Mock(side_effect=exception.NotAuthorized) ) id = 'fake_share_id' req = fakes.HTTPRequest.blank( '/tenant1/shares/%s/action' % id, version='2.82') context = req.environ['manila.context'] access['project_id'] = context.project_id access['user_id'] = context.user_id self.assertRaises( webob.exc.HTTPBadRequest, self.controller._create_access_locks, req.environ['manila.context'], access, lock_deletion=True, lock_visibility=False, lock_reason=lock_reason ) resource_locks.API.create.assert_called_once_with( context, resource_id=access['id'], resource_type='access_rule', resource_action=constants.RESOURCE_ACTION_DELETE, resource=access, lock_reason=lock_reason) @ddt.data( {'lock_visibility': True, 'lock_deletion': True, 'lock_reason': 'test lock reason'}, {'lock_visibility': True, 'lock_deletion': False, 'lock_reason': None}, {'lock_visibility': False, 'lock_deletion': True, 'lock_reason': None}, ) @ddt.unpack def test_allow_access_visibility_restrictions(self, lock_visibility, lock_deletion, lock_reason): access = {'id': 'fake', 'share_id': 'fake_share_id'} expected_access = {'access': {'fake_key': 'fake_value'}} self.mock_object(share_api.API, 'allow_access', mock.Mock(return_value=access)) self.mock_object(self.controller._access_view_builder, 'view', mock.Mock(return_value=expected_access)) self.mock_object(self.controller, '_create_access_locks') id = 'fake_share_id' body = { 'allow_access': { 'access_type': 'ip', 'access_to': '127.0.0.1', 'lock_visibility': lock_visibility, 'lock_deletion': lock_deletion, 'lock_reason': lock_reason } } req = fakes.HTTPRequest.blank( '/tenant1/shares/%s/action' % id, version='2.82') context = req.environ['manila.context'] access['project_id'] = context.project_id access['user_id'] = context.user_id res = self.controller._allow_access( req, id, body, lock_visibility=lock_visibility, lock_deletion=lock_deletion, lock_reason=lock_reason) self.assertEqual(expected_access, res) self.mock_policy_check.assert_called_once_with( context, 'share', 'allow_access') self.controller._create_access_locks.assert_called_once_with( context, access, lock_deletion=lock_deletion, lock_visibility=lock_visibility, lock_reason=lock_reason ) def test_allow_access_with_network_id(self): share_network = db_utils.create_share_network() share = db_utils.create_share(share_network_id=share_network['id']) access = {'access_type': 'user', 'access_to': '1' * 4} self.mock_object(share_api.API, 'allow_access', mock.Mock(return_value={'fake': 'fake'})) self.mock_object(self.controller._access_view_builder, 'view', mock.Mock(return_value={'access': {'fake': 'fake'}})) self.mock_object(share_api.API, 'get', mock.Mock(return_value=share)) id = 'fake_share_id' body = {'os-allow_access': access} expected = {'access': {'fake': 'fake'}} req = fakes.HTTPRequest.blank('/v1/tenant1/shares/%s/action' % id) res = self.controller._allow_access(req, id, body) self.assertEqual(expected, res) self.mock_policy_check.assert_called_once_with( req.environ['manila.context'], 'share', 'allow_access') @ddt.data( {'access_type': 'error_type', 'access_to': '127.0.0.1'}, {'access_type': 'ip', 'access_to': 'localhost'}, {'access_type': 'ip', 'access_to': '127.0.0.*'}, {'access_type': 'ip', 'access_to': '127.0.0.0/33'}, {'access_type': 'ip', 'access_to': '127.0.0.256'}, {'access_type': 'user', 'access_to': '1'}, {'access_type': 'user', 'access_to': '1' * 3}, {'access_type': 'user', 'access_to': '1' * 256}, {'access_type': 'user', 'access_to': 'root<>'}, {'access_type': 'user', 'access_to': 'group\\'}, {'access_type': 'user', 'access_to': '+=*?group'}, {'access_type': 'cert', 'access_to': ''}, {'access_type': 'cert', 'access_to': ' '}, {'access_type': 'cert', 'access_to': 'x' * 65}, {'access_type': 'cephx', 'access_to': 'alice'}, {'access_type': 'ip', 'access_to': '127.0.0.0/24', 'lock_reason': 'fake_lock_reason'}, ) def test_allow_access_error(self, access): id = 'fake_share_id' lock_reason = access.pop('lock_reason', None) body = {'os-allow_access': access} req = fakes.HTTPRequest.blank('/tenant1/shares/%s/action' % id) self.assertRaises(webob.exc.HTTPBadRequest, self.controller._allow_access, req, id, body, lock_reason=lock_reason) self.mock_policy_check.assert_called_once_with( req.environ['manila.context'], 'share', 'allow_access') def test_deny_access(self): def _stub_deny_access(*args, **kwargs): pass self.mock_object(share_api.API, "deny_access", _stub_deny_access) self.mock_object(share_api.API, "access_get", _fake_access_get) self.mock_object(self.controller, '_check_for_access_rule_locks') id = 'fake_share_id' body = {"os-deny_access": {"access_id": 'fake_acces_id'}} req = fakes.HTTPRequest.blank('/tenant1/shares/%s/action' % id) res = self.controller._deny_access(req, id, body) self.assertEqual(202, res.status_int) self.mock_policy_check.assert_called_once_with( req.environ['manila.context'], 'share', 'deny_access') def test_deny_access_with_share_network_id(self): self.mock_object(share_api.API, "deny_access", mock.Mock()) self.mock_object(share_api.API, "access_get", _fake_access_get) share_network = db_utils.create_share_network() share = db_utils.create_share(share_network_id=share_network['id']) self.mock_object(share_api.API, 'get', mock.Mock(return_value=share)) self.mock_object(self.controller, '_check_for_access_rule_locks') id = 'fake_share_id' access_data = {"access_id": 'fake_acces_id'} body = {"os-deny_access": access_data} req = fakes.HTTPRequest.blank('/v1/tenant1/shares/%s/action' % id) res = self.controller._deny_access(req, id, body) self.assertEqual(202, res.status_int) self.controller._check_for_access_rule_locks.assert_called_once_with( req.environ['manila.context'], access_data, access_data['access_id'], id ) self.mock_policy_check.assert_called_once_with( req.environ['manila.context'], 'share', 'deny_access') def test_deny_access_not_found(self): def _stub_deny_access(*args, **kwargs): pass self.mock_object(share_api.API, "deny_access", _stub_deny_access) self.mock_object(share_api.API, "access_get", _fake_access_get) self.mock_object(self.controller, '_check_for_access_rule_locks') id = 'super_fake_share_id' body = {"os-deny_access": {"access_id": 'fake_acces_id'}} req = fakes.HTTPRequest.blank('/tenant1/shares/%s/action' % id) self.assertRaises(webob.exc.HTTPNotFound, self.controller._deny_access, req, id, body) self.mock_policy_check.assert_called_once_with( req.environ['manila.context'], 'share', 'deny_access') def test_deny_access_delete_locks(self): def _stub_deny_access(*args, **kwargs): pass self.mock_object(share_api.API, "deny_access", _stub_deny_access) self.mock_object(share_api.API, "access_get", _fake_access_get) self.mock_object(self.controller, '_check_for_access_rule_locks') id = 'fake_share_id' body_data = {"access_id": 'fake_acces_id'} body = {"deny_access": body_data} req = fakes.HTTPRequest.blank('/tenant1/shares/%s/action' % id, version='2.82') context = req.environ['manila.context'] res = self.controller._deny_access(req, id, body) self.assertEqual(202, res.status_int) self.mock_policy_check.assert_called_once_with( req.environ['manila.context'], 'share', 'deny_access') self.controller._check_for_access_rule_locks.assert_called_once_with( context, body['deny_access'], body_data['access_id'], id ) def test__check_for_access_rule_locks_no_locks(self): self.mock_object( resource_locks.API, "get_all", mock.Mock(return_value=([], 0))) req = fakes.HTTPRequest.blank('/tenant1/shares/%s/action' % id, version='2.82') context = req.environ['manila.context'] access_id = 'fake_access_id' share_id = 'fake_share_id' self.mock_object(context, 'elevated', mock.Mock(return_value=context)) self.controller._check_for_access_rule_locks( context, {}, access_id, share_id) delete_search_opts = { 'resource_id': access_id, 'resource_action': constants.RESOURCE_ACTION_DELETE, 'all_projects': True, } resource_locks.API.get_all.assert_called_once_with( context, search_opts=delete_search_opts, show_count=True ) def test__check_for_access_rules_locks_too_many_locks(self): locks = [{'id': f'lock_id_{i}'} for i in range(4)] self.mock_object( resource_locks.API, "get_all", mock.Mock(return_value=(locks, len(locks)))) req = fakes.HTTPRequest.blank('/tenant1/shares/%s/action' % id, version='2.82') context = req.environ['manila.context'] access_id = 'fake_access_id' share_id = 'fake_share_id' self.mock_object(context, 'elevated', mock.Mock(return_value=context)) self.assertRaises( webob.exc.HTTPForbidden, self.controller._check_for_access_rule_locks, context, {}, access_id, share_id) delete_search_opts = { 'resource_id': access_id, 'resource_action': constants.RESOURCE_ACTION_DELETE, 'all_projects': True, } resource_locks.API.get_all.assert_called_once_with( context, search_opts=delete_search_opts, show_count=True ) def test__check_for_access_rules_cant_manipulate_lock(self): locks = [{ 'id': 'fake_lock_id', 'resource_action': constants.RESOURCE_ACTION_DELETE }] self.mock_object( resource_locks.API, "get_all", mock.Mock(return_value=(locks, len(locks)))) self.mock_object( resource_locks.API, "ensure_context_can_delete_lock", mock.Mock(side_effect=exception.NotAuthorized)) req = fakes.HTTPRequest.blank('/tenant1/shares/%s/action' % id, version='2.82') context = req.environ['manila.context'] access_id = 'fake_access_id' share_id = 'fake_share_id' self.mock_object(context, 'elevated', mock.Mock(return_value=context)) self.assertRaises( webob.exc.HTTPForbidden, self.controller._check_for_access_rule_locks, context, {'unrestrict': True}, access_id, share_id) delete_search_opts = { 'resource_id': access_id, 'resource_action': constants.RESOURCE_ACTION_DELETE, 'all_projects': True, } resource_locks.API.get_all.assert_called_once_with( context, search_opts=delete_search_opts, show_count=True ) (resource_locks.API.ensure_context_can_delete_lock .assert_called_once_with( context, locks[0]['id'])) def test__check_for_access_rules_locks_unauthorized(self): locks = [{ 'id': 'fake_lock_id', 'resource_action': constants.RESOURCE_ACTION_DELETE }] self.mock_object( resource_locks.API, "get_all", mock.Mock(return_value=(locks, len(locks)))) self.mock_object( resource_locks.API, "ensure_context_can_delete_lock", mock.Mock(side_effect=exception.NotAuthorized)) self.mock_object( resource_locks.API, "delete", mock.Mock(side_effect=exception.NotAuthorized)) req = fakes.HTTPRequest.blank('/tenant1/shares/%s/action' % id, version='2.82') context = req.environ['manila.context'] access_id = 'fake_access_id' share_id = 'fake_share_id' self.mock_object(context, 'elevated', mock.Mock(return_value=context)) self.assertRaises( webob.exc.HTTPForbidden, self.controller._check_for_access_rule_locks, context, {'unrestrict': True}, access_id, share_id ) delete_search_opts = { 'resource_id': access_id, 'resource_action': constants.RESOURCE_ACTION_DELETE, 'all_projects': True, } resource_locks.API.get_all.assert_called_once_with( context, search_opts=delete_search_opts, show_count=True ) (resource_locks.API.ensure_context_can_delete_lock .assert_called_once_with( context, locks[0]['id'])) def test_check_for_access_rules_locks(self): locks = [{ 'id': 'fake_lock_id', 'resource_action': constants.RESOURCE_ACTION_DELETE }] self.mock_object( resource_locks.API, "get_all", mock.Mock(return_value=(locks, len(locks)))) self.mock_object( resource_locks.API, "ensure_context_can_delete_lock") self.mock_object(resource_locks.API, "delete") req = fakes.HTTPRequest.blank('/tenant1/shares/%s/action' % id, version='2.82') context = req.environ['manila.context'] access_id = 'fake_access_id' share_id = 'fake_share_id' self.mock_object(context, 'elevated', mock.Mock(return_value=context)) self.controller._check_for_access_rule_locks( context, {'unrestrict': True}, access_id, share_id) delete_search_opts = { 'resource_id': access_id, 'resource_action': constants.RESOURCE_ACTION_DELETE, 'all_projects': True, } resource_locks.API.get_all.assert_called_once_with( context.elevated(), search_opts=delete_search_opts, show_count=True ) (resource_locks.API.ensure_context_can_delete_lock .assert_called_once_with( context, locks[0]['id'])) @ddt.data('_allow_access', '_deny_access') def test_allow_access_deny_access_policy_not_authorized(self, method): req = fakes.HTTPRequest.blank('/tenant1/shares/someuuid/action') action = method[1:] body = {action: None} noauthexc = exception.PolicyNotAuthorized(action=action) with mock.patch.object( policy, 'check_policy', mock.Mock(side_effect=noauthexc)): method = getattr(self.controller, method) self.assertRaises( webob.exc.HTTPForbidden, method, req, body, 'someuuid') policy.check_policy.assert_called_once_with( req.environ['manila.context'], 'share', action) def test_access_list(self): fake_access_list = [ { "state": "fakestatus", "id": "fake_access_id", "access_type": "fakeip", "access_to": "127.0.0.1", } ] self.mock_object(self.controller._access_view_builder, 'list_view', mock.Mock(return_value={'access_list': fake_access_list})) id = 'fake_share_id' body = {"os-access_list": None} req = fakes.HTTPRequest.blank('/tenant1/shares/%s/action' % id) res_dict = self.controller._access_list(req, id, body) self.assertEqual({'access_list': fake_access_list}, res_dict) def test_extend(self): id = 'fake_share_id' share = stubs.stub_share_get(None, None, id) self.mock_object(share_api.API, 'get', mock.Mock(return_value=share)) self.mock_object(share_api.API, "extend") size = '123' body = {"os-extend": {'new_size': size}} req = fakes.HTTPRequest.blank('/fake/shares/%s/action' % id) actual_response = self.controller._extend(req, id, body) share_api.API.get.assert_called_once_with(mock.ANY, id) share_api.API.extend.assert_called_once_with( mock.ANY, share, int(size), force=False) self.assertEqual(202, actual_response.status_int) @ddt.data({"os-extend": ""}, {"os-extend": {"new_size": "foo"}}, {"os-extend": {"new_size": {'foo': 'bar'}}}) def test_extend_invalid_body(self, body): id = 'fake_share_id' req = fakes.HTTPRequest.blank('/fake/shares/%s/action' % id) self.assertRaises(webob.exc.HTTPBadRequest, self.controller._extend, req, id, body) @ddt.data({'source': exception.InvalidInput, 'target': webob.exc.HTTPBadRequest}, {'source': exception.InvalidShare, 'target': webob.exc.HTTPBadRequest}, {'source': exception.ShareSizeExceedsAvailableQuota, 'target': webob.exc.HTTPForbidden}) @ddt.unpack def test_extend_exception(self, source, target): id = 'fake_share_id' req = fakes.HTTPRequest.blank('/fake/shares/%s/action' % id) body = {"os-extend": {'new_size': '123'}} self.mock_object(share_api.API, "extend", mock.Mock(side_effect=source('fake'))) self.assertRaises(target, self.controller._extend, req, id, body) def test_shrink(self): id = 'fake_share_id' share = stubs.stub_share_get(None, None, id) self.mock_object(share_api.API, 'get', mock.Mock(return_value=share)) self.mock_object(share_api.API, "shrink") size = '123' body = {"os-shrink": {'new_size': size}} req = fakes.HTTPRequest.blank('/fake/shares/%s/action' % id) actual_response = self.controller._shrink(req, id, body) share_api.API.get.assert_called_once_with(mock.ANY, id) share_api.API.shrink.assert_called_once_with( mock.ANY, share, int(size)) self.assertEqual(202, actual_response.status_int) @ddt.data({"os-shrink": ""}, {"os-shrink": {"new_size": "foo"}}, {"os-shrink": {"new_size": {'foo': 'bar'}}}) def test_shrink_invalid_body(self, body): id = 'fake_share_id' req = fakes.HTTPRequest.blank('/fake/shares/%s/action' % id) self.assertRaises(webob.exc.HTTPBadRequest, self.controller._shrink, req, id, body) @ddt.data({'source': exception.InvalidInput, 'target': webob.exc.HTTPBadRequest}, {'source': exception.InvalidShare, 'target': webob.exc.HTTPBadRequest}) @ddt.unpack def test_shrink_exception(self, source, target): id = 'fake_share_id' req = fakes.HTTPRequest.blank('/fake/shares/%s/action' % id) body = {"os-shrink": {'new_size': '123'}} self.mock_object(share_api.API, "shrink", mock.Mock(side_effect=source('fake'))) self.assertRaises(target, self.controller._shrink, req, id, body) @ddt.ddt class ShareAdminActionsAPITest(test.TestCase): def setUp(self): super(ShareAdminActionsAPITest, self).setUp() CONF.set_default("default_share_type", None) self.flags(transport_url='rabbit://fake:fake@mqhost:5672') self.share_api = share_api.API() self.admin_context = context.RequestContext('admin', 'fake', True) self.member_context = context.RequestContext('fake', 'fake') def _get_context(self, role): return getattr(self, '%s_context' % role) def _setup_share_data(self, share=None): if share is None: share = db_utils.create_share(status=constants.STATUS_AVAILABLE, size='1', override_defaults=True) req = webob.Request.blank('/v2/fake/shares/%s/action' % share['id']) return share, req def _reset_status(self, ctxt, model, req, db_access_method, valid_code, valid_status=None, body=None): if body is None: body = {'os-reset_status': {'status': constants.STATUS_ERROR}} req.method = 'POST' req.headers['content-type'] = 'application/json' req.body = jsonutils.dumps(body).encode("utf-8") req.environ['manila.context'] = ctxt resp = req.get_response(fakes.app()) # validate response code and model status self.assertEqual(valid_code, resp.status_int) if valid_code == 404: self.assertRaises(exception.NotFound, db_access_method, ctxt, model['id']) else: actual_model = db_access_method(ctxt, model['id']) self.assertEqual(valid_status, actual_model['status']) @ddt.data( { 'role': 'admin', 'valid_code': 202, 'valid_status': constants.STATUS_ERROR, }, { 'role': 'member', 'valid_code': 403, 'valid_status': constants.STATUS_AVAILABLE, }, ) @ddt.unpack def test_share_reset_status_with_different_roles(self, role, valid_code, valid_status): share, req = self._setup_share_data() ctxt = self._get_context(role) self._reset_status(ctxt, share, req, db.share_get, valid_code, valid_status) @ddt.data(*fakes.fixture_invalid_reset_status_body) def test_share_invalid_reset_status_body(self, body): share, req = self._setup_share_data() ctxt = self.admin_context self._reset_status(ctxt, share, req, db.share_get, 400, constants.STATUS_AVAILABLE, body) def test_share_reset_status_for_missing(self): fake_share = {'id': 'missing-share-id'} req = webob.Request.blank('/fake/shares/%s/action' % fake_share['id']) self._reset_status(self.admin_context, fake_share, req, db.share_snapshot_get, 404) def _force_delete(self, ctxt, model, req, db_access_method, valid_code, check_model_in_db=False): req.method = 'POST' req.headers['content-type'] = 'application/json' req.body = jsonutils.dumps({'os-force_delete': {}}).encode("utf-8") req.environ['manila.context'] = ctxt resp = req.get_response(fakes.app()) # validate response self.assertEqual(valid_code, resp.status_int) if valid_code == 202 and check_model_in_db: self.assertRaises(exception.NotFound, db_access_method, ctxt, model['id']) @ddt.data( {'role': 'admin', 'resp_code': 202}, {'role': 'member', 'resp_code': 403}, ) @ddt.unpack def test_share_force_delete_with_different_roles(self, role, resp_code): share, req = self._setup_share_data() ctxt = self._get_context(role) self._force_delete(ctxt, share, req, db.share_get, resp_code, check_model_in_db=True) def test_share_force_delete_missing(self): share, req = self._setup_share_data(share={'id': 'fake'}) ctxt = self._get_context('admin') self._force_delete(ctxt, share, req, db.share_get, 404) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315601.965671 manila-21.0.0/manila/tests/api/v2/0000775000175000017500000000000000000000000016561 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/api/v2/__init__.py0000664000175000017500000000000000000000000020660 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/api/v2/stubs.py0000664000175000017500000000447600000000000020306 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime from manila.message import message_field from manila.message import message_levels from manila.tests.api import fakes FAKE_UUID = fakes.FAKE_UUID def stub_message(id, **kwargs): message = { 'id': id, 'project_id': 'fake_project', 'action_id': message_field.Action.ALLOCATE_HOST[0], 'message_level': message_levels.ERROR, 'request_id': FAKE_UUID, 'resource_type': message_field.Resource.SHARE, 'resource_id': FAKE_UUID, 'updated_at': datetime.datetime(1900, 1, 1, 1, 1, 1, tzinfo=datetime.timezone.utc), 'created_at': datetime.datetime(1900, 1, 1, 1, 1, 1, tzinfo=datetime.timezone.utc), 'expires_at': datetime.datetime(1900, 1, 1, 1, 1, 1, tzinfo=datetime.timezone.utc), 'detail_id': message_field.Detail.NO_VALID_HOST[0], } message.update(kwargs) return message def stub_message_get(self, context, message_id): return stub_message(message_id) def stub_lock(id, **kwargs): lock = { 'id': id, 'project_id': 'f63f7a159f404cfc8604b7065c609691', 'user_id': 'e78f4294e3534e00ae176bd989d6a682', 'resource_id': 'c474badd-f06e-4ff9-ae26-daa00e19867b', 'resource_action': 'delete', 'resource_type': 'share', 'lock_context': 'user', 'lock_reason': 'for the tests', 'updated_at': datetime.datetime(2023, 8, 10, 20, 4, 39, tzinfo=datetime.timezone.utc), 'created_at': datetime.datetime(2023, 1, 10, 15, 3, 1, tzinfo=datetime.timezone.utc), } lock.update(kwargs) return lock ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/api/v2/test_availability_zones.py0000664000175000017500000001004400000000000024061 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Mirantis inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import ddt from oslo_utils.fixture import uuidsentinel as uuids from manila.api.v2 import availability_zones from manila import context from manila import exception from manila import policy from manila import test from manila.tests.api import fakes @ddt.ddt class AvailabilityZonesAPITest(test.TestCase): @ddt.data( availability_zones.AvailabilityZoneControllerLegacy, availability_zones.AvailabilityZoneController, ) def test_instantiate_controller(self, controller): az_controller = controller() self.assertTrue(hasattr(az_controller, "resource_name")) self.assertEqual("availability_zone", az_controller.resource_name) self.assertTrue(hasattr(az_controller, "_view_builder")) self.assertTrue(hasattr(az_controller._view_builder, "detail_list")) @ddt.data( ('1.0', availability_zones.AvailabilityZoneControllerLegacy), ('2.0', availability_zones.AvailabilityZoneControllerLegacy), ('2.6', availability_zones.AvailabilityZoneControllerLegacy), ('2.7', availability_zones.AvailabilityZoneController), ) @ddt.unpack def test_index(self, version, controller): azs = [ { "id": uuids.fake_id1, "name": "fake_name1", "created_at": "2023-05-03T13:10:50.000000", "updated_at": None, }, { "id": uuids.fake_id2, "name": "fake_name2", "created_at": "2023-05-03T13:10:50.000000", "updated_at": "2023-05-04T23:56:01.000000", "deleted": "False", "redundant_key": "redundant_value", }, ] mock_policy_check = self.mock_object(policy, 'check_policy') self.mock_object(availability_zones.db, 'availability_zone_get_all', mock.Mock(return_value=azs)) az_controller = controller() ctxt = context.RequestContext("admin", "fake", True) req = fakes.HTTPRequest.blank('/shares', version=version) req.environ['manila.context'] = ctxt result = az_controller.index(req) (availability_zones.db.availability_zone_get_all. assert_called_once_with(ctxt)) mock_policy_check.assert_called_once_with( ctxt, controller.resource_name, 'index') self.assertIsInstance(result, dict) self.assertEqual(["availability_zones"], list(result.keys())) self.assertIsInstance(result["availability_zones"], list) self.assertEqual(2, len(result["availability_zones"])) self.assertIn(azs[0], result["availability_zones"]) azs[1].pop("deleted") azs[1].pop("redundant_key") self.assertIn(azs[1], result["availability_zones"]) @ddt.data( ('1.0', availability_zones.AvailabilityZoneController), ('2.0', availability_zones.AvailabilityZoneController), ('2.6', availability_zones.AvailabilityZoneController), ('2.7', availability_zones.AvailabilityZoneControllerLegacy), ) @ddt.unpack def test_index_with_unsupported_versions(self, version, controller): ctxt = context.RequestContext("admin", "fake", True) req = fakes.HTTPRequest.blank('/shares', version=version) req.environ['manila.context'] = ctxt az_controller = controller() self.assertRaises( exception.VersionNotFoundForAPIMethod, az_controller.index, req) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/api/v2/test_messages.py0000664000175000017500000002127600000000000022011 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import datetime from oslo_config import cfg import webob from manila.api.v2 import messages from manila import context from manila import exception from manila.message import api as message_api from manila.message import message_field from manila import policy from manila import test from manila.tests.api import fakes from manila.tests.api.v2 import stubs CONF = cfg.CONF class MessageApiTest(test.TestCase): def setUp(self): super(MessageApiTest, self).setUp() self.controller = messages.MessagesController() self.maxDiff = None self.ctxt = context.RequestContext('admin', 'fake', True) self.mock_object(policy, 'check_policy', mock.Mock(return_value=True)) def _expected_message_from_controller(self, id, **kwargs): message = stubs.stub_message(id, **kwargs) links = [ {'href': 'http://localhost/share/v2/fake/messages/%s' % id, 'rel': 'self'}, {'href': 'http://localhost/share/fake/messages/%s' % id, 'rel': 'bookmark'}, ] return { 'message': { 'id': message.get('id'), 'project_id': message.get('project_id'), 'user_message': "%s: %s" % ( message_field.translate_action(message.get('action_id')), message_field.translate_detail(message.get('detail_id'))), 'request_id': message.get('request_id'), 'action_id': message.get('action_id'), 'detail_id': message.get('detail_id'), 'created_at': message.get('created_at'), 'message_level': message.get('message_level'), 'expires_at': message.get('expires_at'), 'links': links, 'resource_type': message.get('resource_type'), 'resource_id': message.get('resource_id'), } } def test_show(self): self.mock_object(message_api.API, 'get', stubs.stub_message_get) req = fakes.HTTPRequest.blank( '/v2/fake/messages/%s' % fakes.FAKE_UUID, version=messages.MESSAGES_BASE_MICRO_VERSION) req.environ['manila.context'] = self.ctxt res_dict = self.controller.show(req, fakes.FAKE_UUID) ex = self._expected_message_from_controller(fakes.FAKE_UUID) self.assertEqual(ex, res_dict) def test_show_with_resource(self): resource_type = "FAKE_RESOURCE" resource_id = "b1872cb2-4c5f-4072-9828-8a51b02926a3" fake_message = stubs.stub_message(fakes.FAKE_UUID, resource_type=resource_type, resource_id=resource_id) mock_get = mock.Mock(return_value=fake_message) self.mock_object(message_api.API, 'get', mock_get) req = fakes.HTTPRequest.blank( '/v2/fake/messages/%s' % fakes.FAKE_UUID, version=messages.MESSAGES_BASE_MICRO_VERSION, base_url='http://localhost/share/v2') req.environ['manila.context'] = self.ctxt res_dict = self.controller.show(req, fakes.FAKE_UUID) self.assertEqual(resource_type, res_dict['message']['resource_type']) self.assertEqual(resource_id, res_dict['message']['resource_id']) def test_show_not_found(self): fake_not_found = exception.MessageNotFound(message_id=fakes.FAKE_UUID) self.mock_object(message_api.API, 'get', mock.Mock(side_effect=fake_not_found)) req = fakes.HTTPRequest.blank( '/v2/fake/messages/%s' % fakes.FAKE_UUID, version=messages.MESSAGES_BASE_MICRO_VERSION, base_url='http://localhost/share/v2') req.environ['manila.context'] = self.ctxt self.assertRaises(webob.exc.HTTPNotFound, self.controller.show, req, fakes.FAKE_UUID) def test_show_pre_microversion(self): self.mock_object(message_api.API, 'get', stubs.stub_message_get) req = fakes.HTTPRequest.blank('/v2/fake/messages/%s' % fakes.FAKE_UUID, version='2.35', base_url='http://localhost/share/v2') req.environ['manila.context'] = self.ctxt self.assertRaises(exception.VersionNotFoundForAPIMethod, self.controller.show, req, fakes.FAKE_UUID) def test_delete(self): self.mock_object(message_api.API, 'get', stubs.stub_message_get) self.mock_object(message_api.API, 'delete') req = fakes.HTTPRequest.blank( '/v2/fake/messages/%s' % fakes.FAKE_UUID, version=messages.MESSAGES_BASE_MICRO_VERSION) req.environ['manila.context'] = self.ctxt resp = self.controller.delete(req, fakes.FAKE_UUID) self.assertEqual(204, resp.status_int) self.assertTrue(message_api.API.delete.called) def test_delete_not_found(self): fake_not_found = exception.MessageNotFound(message_id=fakes.FAKE_UUID) self.mock_object(message_api.API, 'get', mock.Mock(side_effect=fake_not_found)) req = fakes.HTTPRequest.blank( '/v2/fake/messages/%s' % fakes.FAKE_UUID, version=messages.MESSAGES_BASE_MICRO_VERSION) self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete, req, fakes.FAKE_UUID) def test_index(self): msg1 = stubs.stub_message(fakes.get_fake_uuid()) msg2 = stubs.stub_message(fakes.get_fake_uuid()) self.mock_object(message_api.API, 'get_all', mock.Mock( return_value=[msg1, msg2])) req = fakes.HTTPRequest.blank( '/v2/fake/messages', version=messages.MESSAGES_BASE_MICRO_VERSION, base_url='http://localhost/share/v2') req.environ['manila.context'] = self.ctxt res_dict = self.controller.index(req) ex1 = self._expected_message_from_controller(msg1['id'])['message'] ex2 = self._expected_message_from_controller(msg2['id'])['message'] expected = {'messages': [ex1, ex2]} self.assertDictEqual(expected, res_dict) def test_index_with_limit_and_offset(self): msg2 = stubs.stub_message(fakes.get_fake_uuid()) self.mock_object(message_api.API, 'get_all', mock.Mock( return_value=[msg2])) req = fakes.HTTPRequest.blank( '/v2/fake/messages?limit=1&offset=1', version=messages.MESSAGES_BASE_MICRO_VERSION) req.environ['manila.context'] = self.ctxt res_dict = self.controller.index(req) ex2 = self._expected_message_from_controller(msg2['id'])['message'] self.assertEqual([ex2], res_dict['messages']) def test_index_with_created_since_and_created_before(self): msg = stubs.stub_message( fakes.get_fake_uuid(), created_at=datetime.datetime( 1900, 2, 1, 1, 1, 1, tzinfo=datetime.timezone.utc)) self.mock_object(message_api.API, 'get_all', mock.Mock( return_value=[msg])) req = fakes.HTTPRequest.blank( '/fake/messages?created_since=1900-01-01T01:01:01&' 'created_before=1900-03-01T01:01:01', version=messages.MESSAGES_QUERY_BY_TIMESTAMP, base_url='http://localhost/share/v2') req.environ['manila.context'] = self.ctxt res_dict = self.controller.index(req) ex2 = self._expected_message_from_controller( msg['id'], created_at=datetime.datetime( 1900, 2, 1, 1, 1, 1, tzinfo=datetime.timezone.utc))['message'] self.assertEqual([ex2], res_dict['messages']) def test_index_with_invalid_time_format(self): req = fakes.HTTPRequest.blank( '/messages?created_since=invalid_time_str', version=messages.MESSAGES_QUERY_BY_TIMESTAMP, base_url='http://localhost/share/v2') req.environ['manila.context'] = self.ctxt self.assertRaises(exception.ValidationError, self.controller.index, req) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/api/v2/test_metadata.py0000664000175000017500000001560200000000000021756 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import ddt import webob from manila.api.v2 import metadata from manila import context from manila import exception from manila import policy from manila import test from manila.tests.api import fakes from manila.tests import db_utils @ddt.ddt class MetadataAPITest(test.TestCase): def _get_request(self, version="2.65", use_admin_context=False): req = fakes.HTTPRequest.blank( '/v2/shares/{resource_id}/metadata', version=version, use_admin_context=use_admin_context) return req def setUp(self): super(MetadataAPITest, self).setUp() self.controller = ( metadata.MetadataController()) self.controller.resource_name = 'share' self.mock_policy_check = self.mock_object( policy, 'check_policy', mock.Mock(return_value=True)) self.resource = db_utils.create_share(size=1) def test__get_resource_policy_not_authorized_pubic_resource(self): fake_context = context.RequestContext('fake', 'fake', True) policy_exception = exception.PolicyNotAuthorized(action='share:get') mock_policy_check = self.mock_object( policy, 'check_policy', mock.Mock(side_effect=policy_exception)) share_obj = db_utils.create_share(size=1, is_public=True) self.assertRaises( exception.PolicyNotAuthorized, self.controller._get_resource, fake_context, share_obj['id'], for_modification=True, ) mock_policy_check.assert_called_once_with( fake_context, 'share', 'get', mock.ANY) policy_resource_obj = mock_policy_check.call_args[0][3] self.assertEqual(share_obj['id'], policy_resource_obj['id']) @ddt.data(True, False) def test__get_resource_policy_not_authorized_private_resource(self, formd): fake_context = context.RequestContext('fake', 'fake', True) mock_policy_check = self.mock_object( policy, 'check_policy', mock.Mock(return_value=False)) share_obj = db_utils.create_share(size=1, is_public=False) self.assertRaises( webob.exc.HTTPNotFound, self.controller._get_resource, fake_context, share_obj['id'], for_modification=formd, ) mock_policy_check.assert_called_once_with( fake_context, 'share', 'get', mock.ANY, do_raise=False) policy_resource_obj = mock_policy_check.call_args[0][3] self.assertEqual(share_obj['id'], policy_resource_obj['id']) def test_create_index_metadata(self): url = self._get_request() body = {'metadata': {'test_key1': 'test_v1', 'test_key2': 'test_v2'}} update = self.controller._create_metadata( url, self.resource['id'], body=body) get = self.controller._index_metadata(url, self.resource['id']) self.assertEqual(2, len(get['metadata'])) self.assertEqual(update['metadata'], get['metadata']) @ddt.data(({'metadata': {'key1': 'v1'}}, 'key1'), ({'metadata': {'test_key1': 'test_v1'}}, 'test_key1'), ({'metadata': {'key1': 'v2'}}, 'key1')) @ddt.unpack def test_update_get_metadata_item(self, body, key): url = self._get_request() update = self.controller._update_metadata_item( url, self.resource['id'], body=body, key=key) self.assertEqual(body, update) get = self.controller._index_metadata(url, self.resource['id']) self.assertEqual(1, len(get)) self.assertEqual(body['metadata'], get['metadata']) get_item = self.controller._show_metadata(url, self.resource['id'], key=key) self.assertEqual({'meta': body['metadata']}, get_item) @ddt.data({'metadata': {'key1': 'v1', 'key2': 'v2'}}, {'metadata': {'test_key1': 'test_v1'}}, {'metadata': {'key1': 'v2'}}) def test_update_all_metadata(self, body): url = self._get_request() update = self.controller._update_all_metadata( url, self.resource['id'], body=body) self.assertEqual(body, update) get = self.controller._index_metadata(url, self.resource['id']) self.assertEqual(len(body['metadata']), len(get['metadata'])) self.assertEqual(body['metadata'], get['metadata']) def test_delete_metadata(self): body = {'metadata': {'test_key3': 'test_v3', 'testkey': 'testval'}} url = self._get_request() self.controller._create_metadata(url, self.resource['id'], body=body) self.controller._delete_metadata(url, self.resource['id'], 'test_key3') show_result = self.controller._index_metadata(url, self.resource[ 'id']) self.assertEqual(1, len(show_result['metadata'])) self.assertNotIn('test_key3', show_result['metadata']) def test_update_metadata_with_resource_id_not_found(self): url = self._get_request() id = 'invalid_id' body = {'metadata': {'key1': 'v1'}} self.assertRaises( webob.exc.HTTPNotFound, self.controller._create_metadata, url, id, body) def test_update_metadata_with_body_error(self): self.assertRaises( webob.exc.HTTPBadRequest, self.controller._create_metadata, self._get_request(), self.resource['id'], {'metadata_error': {'key1': 'v1'}}) @ddt.data({'metadata': {'key1': 'v1', 'key2': None}}, {'metadata': {None: 'v1', 'key2': 'v2'}}, {'metadata': {'k' * 256: 'v2'}}, {'metadata': {'key1': 'v' * 1024}}) @ddt.unpack def test_update_metadata_with_invalid_metadata(self, metadata): self.assertRaises( webob.exc.HTTPBadRequest, self.controller._create_metadata, self._get_request(), self.resource['id'], {'metadata': metadata}) def test_delete_metadata_not_found(self): body = {'metadata': {'test_key_exist': 'test_v_exist'}} update = self.controller._update_all_metadata( self._get_request(), self.resource['id'], body=body) self.assertEqual(body, update) self.assertRaises( exception.MetadataItemNotFound, self.controller._delete_metadata, self._get_request(), self.resource['id'], 'key1') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/api/v2/test_quota_class_sets.py0000664000175000017500000002130100000000000023543 0ustar00zuulzuul00000000000000# Copyright 2013 OpenStack Foundation # Copyright (c) 2015 Mirantis inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests for manila.api.v1.quota_class_sets.py """ import copy from unittest import mock import ddt from oslo_config import cfg import webob.exc import webob.response from manila.api.openstack import api_version_request as api_version from manila.api.v2 import quota_class_sets from manila import context from manila import exception from manila import policy from manila import test from manila.tests.api import fakes CONF = cfg.CONF REQ = mock.MagicMock() REQ.environ = {'manila.context': context.get_admin_context()} REQ.environ['manila.context'].is_admin = True REQ.environ['manila.context'].auth_token = 'foo_auth_token' REQ.environ['manila.context'].project_id = 'foo_project_id' REQ_MEMBER = copy.deepcopy(REQ) REQ_MEMBER.environ['manila.context'].is_admin = False @ddt.ddt class QuotaSetsControllerTest(test.TestCase): def setUp(self): super(QuotaSetsControllerTest, self).setUp() self.controller = quota_class_sets.QuotaClassSetsController() self.resource_name = self.controller.resource_name self.class_name = 'foo_class_name' self.mock_policy_check = self.mock_object( policy, 'check_policy', mock.Mock(return_value=True)) @ddt.data( ('os-', '1.0', quota_class_sets.QuotaClassSetsControllerLegacy), ('os-', '2.6', quota_class_sets.QuotaClassSetsControllerLegacy), ('', '2.7', quota_class_sets.QuotaClassSetsController), ('', '2.53', quota_class_sets.QuotaClassSetsController), ('', '2.62', quota_class_sets.QuotaClassSetsController), ('', '2.80', quota_class_sets.QuotaClassSetsController), ('', '2.90', quota_class_sets.QuotaClassSetsController), ) @ddt.unpack def test_show_quota(self, url, version, controller): req = fakes.HTTPRequest.blank( '/fooproject/%squota-class-sets' % url, version=version, use_admin_context=True) quotas = { "shares": 23, "snapshots": 34, "gigabytes": 45, "snapshot_gigabytes": 56, "share_networks": 67, } expected = { 'quota_class_set': { 'id': self.class_name, 'shares': quotas.get('shares', 50), 'gigabytes': quotas.get('gigabytes', 1000), 'snapshots': quotas.get('snapshots', 50), 'snapshot_gigabytes': quotas.get('snapshot_gigabytes', 1000), 'share_networks': quotas.get('share_networks', 10), } } for k, v in quotas.items(): CONF.set_default(k, v, 'quota') if req.api_version_request >= api_version.APIVersionRequest("2.40"): expected['quota_class_set']['share_groups'] = 50 expected['quota_class_set']['share_group_snapshots'] = 50 if req.api_version_request >= api_version.APIVersionRequest("2.53"): expected['quota_class_set']['share_replicas'] = 100 expected['quota_class_set']['replica_gigabytes'] = 1000 if req.api_version_request >= api_version.APIVersionRequest("2.62"): expected['quota_class_set']['per_share_gigabytes'] = -1 if req.api_version_request >= api_version.APIVersionRequest("2.80"): expected['quota_class_set']['backups'] = 10 expected['quota_class_set']['backup_gigabytes'] = 1000 if req.api_version_request >= api_version.APIVersionRequest("2.90"): expected['quota_class_set']['encryption_keys'] = 100 result = controller().show(req, self.class_name) self.assertEqual(expected, result) self.mock_policy_check.assert_called_once_with( req.environ['manila.context'], self.resource_name, 'show') def test_show_quota_not_authorized(self): self.mock_object( quota_class_sets.db, 'authorize_quota_class_context', mock.Mock(side_effect=exception.NotAuthorized)) self.assertRaises( webob.exc.HTTPForbidden, self.controller.show, REQ, self.class_name) self.mock_policy_check.assert_called_once_with( REQ.environ['manila.context'], self.resource_name, 'show') @ddt.data( ('os-', '1.0', quota_class_sets.QuotaClassSetsControllerLegacy), ('os-', '2.6', quota_class_sets.QuotaClassSetsControllerLegacy), ('', '2.7', quota_class_sets.QuotaClassSetsController), ('', '2.53', quota_class_sets.QuotaClassSetsController), ('', '2.62', quota_class_sets.QuotaClassSetsController), ) @ddt.unpack def test_update_quota(self, url, version, controller): req = fakes.HTTPRequest.blank( '/fooproject/%squota-class-sets' % url, version=version, use_admin_context=True) CONF.set_default('shares', 789, 'quota') body = { 'quota_class_set': { 'class_name': self.class_name, 'shares': 788, } } expected = { 'quota_class_set': { 'shares': body['quota_class_set']['shares'], 'gigabytes': 1000, 'snapshots': 50, 'snapshot_gigabytes': 1000, 'share_networks': 10, } } if req.api_version_request >= api_version.APIVersionRequest("2.40"): expected['quota_class_set']['share_groups'] = 50 expected['quota_class_set']['share_group_snapshots'] = 50 if req.api_version_request >= api_version.APIVersionRequest("2.53"): expected['quota_class_set']['share_replicas'] = 100 expected['quota_class_set']['replica_gigabytes'] = 1000 if req.api_version_request >= api_version.APIVersionRequest("2.62"): expected['quota_class_set']['per_share_gigabytes'] = -1 if req.api_version_request >= api_version.APIVersionRequest("2.80"): expected['quota_class_set']['backups'] = 10 expected['quota_class_set']['backup_gigabytes'] = 1000 update_result = controller().update( req, self.class_name, body=body) self.assertEqual(expected, update_result) show_result = controller().show(req, self.class_name) expected['quota_class_set']['id'] = self.class_name self.assertEqual(expected, show_result) self.mock_policy_check.assert_has_calls([mock.call( req.environ['manila.context'], self.resource_name, action_name) for action_name in ('update', 'show')]) def test_update_quota_not_authorized(self): body = { 'quota_class_set': { 'class_name': self.class_name, 'shares': 13, } } self.assertRaises( webob.exc.HTTPForbidden, self.controller.update, REQ_MEMBER, self.class_name, body=body) self.mock_policy_check.assert_called_once_with( REQ_MEMBER.environ['manila.context'], self.resource_name, 'update') @ddt.data( ('os-', '2.7', quota_class_sets.QuotaClassSetsControllerLegacy), ('', '2.6', quota_class_sets.QuotaClassSetsController), ('', '2.0', quota_class_sets.QuotaClassSetsController), ) @ddt.unpack def test_api_not_found(self, url, version, controller): req = fakes.HTTPRequest.blank( '/fooproject/%squota-class-sets' % url, version=version) for method_name in ('show', 'update'): self.assertRaises( exception.VersionNotFoundForAPIMethod, getattr(controller(), method_name), req, self.class_name) @ddt.data( ('os-', '2.7', quota_class_sets.QuotaClassSetsControllerLegacy), ('', '2.6', quota_class_sets.QuotaClassSetsController), ('', '2.0', quota_class_sets.QuotaClassSetsController), ) @ddt.unpack def test_update_api_not_found(self, url, version, controller): req = fakes.HTTPRequest.blank( '/fooproject/%squota-class-sets' % url, version=version) self.assertRaises( exception.VersionNotFoundForAPIMethod, controller().update, req, self.class_name) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/api/v2/test_quota_sets.py0000664000175000017500000007356400000000000022400 0ustar00zuulzuul00000000000000# Copyright 2013 OpenStack Foundation # Copyright (c) 2015 Mirantis inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests for manila.api.v2.quota_sets.py """ from unittest import mock import ddt from oslo_config import cfg import webob.exc import webob.response from manila.api.openstack import api_version_request as api_version from manila.api.v2 import quota_sets from manila import context from manila import exception from manila import policy from manila import test from manila.tests.api import fakes from manila import utils CONF = cfg.CONF sg_quota_keys = ['share_groups', 'share_group_snapshots'] replica_quota_keys = ['share_replicas'] per_share_size_quota_keys = ['per_share_gigabytes'] def _get_request(is_admin, user_in_url): req = mock.MagicMock( api_version_request=api_version.APIVersionRequest("2.40")) req.environ = {'manila.context': context.get_admin_context()} req.environ['manila.context'].is_admin = is_admin req.environ['manila.context'].auth_token = 'foo_auth_token' req.environ['manila.context'].project_id = 'foo_project_id' if user_in_url: req.environ['manila.context'].user_id = 'foo_user_id' req.environ['QUERY_STRING'] = 'user_id=foo_user_id' return req @ddt.ddt class QuotaSetsControllerTest(test.TestCase): def setUp(self): super(QuotaSetsControllerTest, self).setUp() self.controller = quota_sets.QuotaSetsController() self.resource_name = self.controller.resource_name self.project_id = 'foo_project_id' self.mock_policy_check = self.mock_object( policy, 'check_policy', mock.Mock(return_value=True)) @ddt.data( {"shares": 3, "snapshots": 4, "gigabytes": 5, "snapshot_gigabytes": 6, "share_networks": 7}, {"shares": -1, "snapshots": -1, "gigabytes": -1, "snapshot_gigabytes": -1, "share_networks": -1}, {"shares": 13}, {"snapshots": 24}, {"gigabytes": 7}, {"snapshot_gigabytes": 10001}, {"share_networks": 12345}, {"share_groups": 123456}, {"share_group_snapshots": 123456}, ) def test_defaults(self, quotas): req = _get_request(True, False) for k, v in quotas.items(): CONF.set_default(k, v, 'quota') expected = { 'quota_set': { 'id': self.project_id, 'shares': quotas.get('shares', 50), 'gigabytes': quotas.get('gigabytes', 1000), 'snapshots': quotas.get('snapshots', 50), 'snapshot_gigabytes': quotas.get('snapshot_gigabytes', 1000), 'share_networks': quotas.get('share_networks', 10), 'share_groups': quotas.get('share_groups', 50), 'share_group_snapshots': quotas.get( 'share_group_snapshots', 50), } } result = self.controller.defaults(req, self.project_id) self.assertEqual(expected, result) self.mock_policy_check.assert_called_once_with( req.environ['manila.context'], self.resource_name, 'show') @ddt.data( ('os-', '1.0', quota_sets.QuotaSetsControllerLegacy, 'defaults'), ('os-', '2.6', quota_sets.QuotaSetsControllerLegacy, 'defaults'), ('', '2.7', quota_sets.QuotaSetsController, 'defaults'), ('os-', '1.0', quota_sets.QuotaSetsControllerLegacy, 'show'), ('os-', '2.6', quota_sets.QuotaSetsControllerLegacy, 'show'), ('', '2.7', quota_sets.QuotaSetsController, 'show'), ) @ddt.unpack def test_get_quotas_with_different_api_versions(self, url, version, controller, method_name): expected = { 'quota_set': { 'id': self.project_id, 'shares': 50, 'gigabytes': 1000, 'snapshots': 50, 'snapshot_gigabytes': 1000, 'share_networks': 10, } } req = fakes.HTTPRequest.blank( '/fooproject/%squota-sets' % url, version=version, use_admin_context=True) result = getattr(controller(), method_name)(req, self.project_id) self.assertEqual(expected, result) @staticmethod def _get_share_type_request_object(microversion=None): req = _get_request(True, False) req.environ['QUERY_STRING'] = 'share_type=fake_share_type_name_or_id' req.api_version_request = api_version.APIVersionRequest( microversion or '2.39') return req @ddt.data('2.39', '2.40') def test_share_type_quota_detail(self, microversion): self.mock_object( quota_sets.db, 'share_type_get_by_name_or_id', mock.Mock(return_value={'id': 'fake_st_id'})) req = self._get_share_type_request_object(microversion) quotas = { "shares": 23, "snapshots": 34, "gigabytes": 45, "snapshot_gigabytes": 56, } expected = {'quota_set': { 'id': self.project_id, 'shares': { 'in_use': 0, 'limit': quotas['shares'], 'reserved': 0, }, 'gigabytes': { 'in_use': 0, 'limit': quotas['gigabytes'], 'reserved': 0, }, 'snapshots': { 'in_use': 0, 'limit': quotas['snapshots'], 'reserved': 0, }, 'snapshot_gigabytes': { 'in_use': 0, 'limit': quotas['snapshot_gigabytes'], 'reserved': 0, }, }} for k, v in quotas.items(): CONF.set_default(k, v, 'quota') result = self.controller.detail(req, self.project_id) self.assertEqual(expected, result) self.mock_policy_check.assert_called_once_with( req.environ['manila.context'], self.resource_name, 'show') quota_sets.db.share_type_get_by_name_or_id.assert_called_once_with( req.environ['manila.context'], 'fake_share_type_name_or_id') @ddt.data('2.39', '2.40') def test_show_share_type_quota(self, microversion): self.mock_object( quota_sets.db, 'share_type_get_by_name_or_id', mock.Mock(return_value={'id': 'fake_st_id'})) req = self._get_share_type_request_object(microversion) quotas = { "shares": 23, "snapshots": 34, "gigabytes": 45, "snapshot_gigabytes": 56, } expected = { 'quota_set': { 'id': self.project_id, 'shares': quotas.get('shares', 50), 'gigabytes': quotas.get('gigabytes', 1000), 'snapshots': quotas.get('snapshots', 50), 'snapshot_gigabytes': quotas.get('snapshot_gigabytes', 1000), } } for k, v in quotas.items(): CONF.set_default(k, v, 'quota') result = self.controller.show(req, self.project_id) self.assertEqual(expected, result) self.mock_policy_check.assert_called_once_with( req.environ['manila.context'], self.resource_name, 'show') quota_sets.db.share_type_get_by_name_or_id.assert_called_once_with( req.environ['manila.context'], 'fake_share_type_name_or_id') @ddt.data('show', 'detail') def test_get_share_type_quota_with_old_microversion(self, method): req = self._get_share_type_request_object('2.38') self.assertRaises( webob.exc.HTTPBadRequest, getattr(self.controller, method), req, self.project_id) @ddt.data((None, None), (None, 'foo'), ('bar', None)) @ddt.unpack def test__validate_user_id_and_share_type_args(self, user_id, st_id): result = self.controller._validate_user_id_and_share_type_args( user_id, st_id) self.assertIsNone(result) def test__validate_user_id_and_share_type_args_exception(self): self.assertRaises( webob.exc.HTTPBadRequest, self.controller._validate_user_id_and_share_type_args, 'foo', 'bar') def test__get_share_type_id_found(self): self.mock_object( quota_sets.db, 'share_type_get_by_name_or_id', mock.Mock(return_value={'id': 'fake_st_id'})) ctxt = 'fake_context' share_type = 'fake_share_type_name_or_id' result = self.controller._get_share_type_id(ctxt, share_type) self.assertEqual('fake_st_id', result) def test__get_share_type_id_not_found(self): self.mock_object( quota_sets.db, 'share_type_get_by_name_or_id', mock.Mock(return_value=None)) ctxt = 'fake_context' share_type = 'fake_share_type_name_or_id' self.assertRaises( webob.exc.HTTPNotFound, self.controller._get_share_type_id, ctxt, share_type) def test__get_share_type_id_is_not_provided(self): self.mock_object( quota_sets.db, 'share_type_get_by_name_or_id', mock.Mock(return_value={'id': 'fake_st_id'})) ctxt = 'fake_context' result = self.controller._get_share_type_id(ctxt, None) self.assertIsNone(result) @ddt.data( ({}, sg_quota_keys, '2.40'), ({"quota_set": {}}, sg_quota_keys, '2.40'), ({"quota_set": {"foo": "bar"}}, sg_quota_keys, '2.40'), ({"foo": "bar"}, replica_quota_keys, '2.53'), ({"quota_set": {"foo": "bar"}}, replica_quota_keys, '2.53'), ({"quota_set": {"foo": "bar"}}, per_share_size_quota_keys, '2.62'), ) @ddt.unpack def test__ensure_specific_microversion_args_are_absent_success( self, body, keys, microversion): result = self.controller._ensure_specific_microversion_args_are_absent( body, keys, microversion) self.assertIsNone(result) @ddt.data( ({"share_groups": 5}, sg_quota_keys, '2.40'), ({"share_group_snapshots": 6}, sg_quota_keys, '2.40'), ({"quota_set": {"share_groups": 7}}, sg_quota_keys, '2.40'), ({"quota_set": {"share_group_snapshots": 8}}, sg_quota_keys, '2.40'), ({"quota_set": {"share_replicas": 9}}, replica_quota_keys, '2.53'), ({"quota_set": {"share_replicas": 10}}, replica_quota_keys, '2.53'), ({"quota_set": {"per_share_gigabytes": 10}}, per_share_size_quota_keys, '2.62'), ) @ddt.unpack def test__ensure_specific_microversion_args_are_absent_error( self, body, keys, microversion): self.assertRaises( webob.exc.HTTPBadRequest, self.controller._ensure_specific_microversion_args_are_absent, body, keys, microversion ) @ddt.data(_get_request(True, True), _get_request(True, False)) def test__ensure_share_type_arg_is_absent(self, req): result = self.controller._ensure_share_type_arg_is_absent(req) self.assertIsNone(result) def test__ensure_share_type_arg_is_absent_exception(self): req = self._get_share_type_request_object('2.39') self.assertRaises( webob.exc.HTTPBadRequest, self.controller._ensure_share_type_arg_is_absent, req) @ddt.data(_get_request(True, True), _get_request(True, False)) def test_quota_detail(self, request): request.api_version_request = api_version.APIVersionRequest('2.25') quotas = { "shares": 23, "snapshots": 34, "gigabytes": 45, "snapshot_gigabytes": 56, "share_networks": 67, } expected = { 'quota_set': { 'id': self.project_id, 'shares': {'in_use': 0, 'limit': quotas['shares'], 'reserved': 0}, 'gigabytes': {'in_use': 0, 'limit': quotas['gigabytes'], 'reserved': 0}, 'snapshots': {'in_use': 0, 'limit': quotas['snapshots'], 'reserved': 0}, 'snapshot_gigabytes': { 'in_use': 0, 'limit': quotas['snapshot_gigabytes'], 'reserved': 0, }, 'share_networks': { 'in_use': 0, 'limit': quotas['share_networks'], 'reserved': 0 }, } } for k, v in quotas.items(): CONF.set_default(k, v, 'quota') result = self.controller.detail(request, self.project_id) self.assertEqual(expected, result) self.mock_policy_check.assert_called_once_with( request.environ['manila.context'], self.resource_name, 'show') @ddt.data(_get_request(True, True), _get_request(True, False)) def test_show_quota(self, request): quotas = { "shares": 23, "snapshots": 34, "gigabytes": 45, "snapshot_gigabytes": 56, "share_networks": 67, "share_groups": 53, "share_group_snapshots": 57, } expected = { 'quota_set': { 'id': self.project_id, 'shares': quotas.get('shares', 50), 'gigabytes': quotas.get('gigabytes', 1000), 'snapshots': quotas.get('snapshots', 50), 'snapshot_gigabytes': quotas.get('snapshot_gigabytes', 1000), 'share_networks': quotas.get('share_networks', 10), 'share_groups': quotas.get('share_groups', 50), 'share_group_snapshots': quotas.get( 'share_group_snapshots', 50), } } for k, v in quotas.items(): CONF.set_default(k, v, 'quota') result = self.controller.show(request, self.project_id) self.assertEqual(expected, result) self.mock_policy_check.assert_called_once_with( request.environ['manila.context'], self.resource_name, 'show') def test_show_quota_not_authorized(self): req = _get_request(True, False) self.mock_object( quota_sets.db, 'authorize_project_context', mock.Mock(side_effect=exception.NotAuthorized)) self.assertRaises( webob.exc.HTTPForbidden, self.controller.show, req, self.project_id) self.mock_policy_check.assert_called_once_with( req.environ['manila.context'], self.resource_name, 'show') @ddt.data(_get_request(True, True), _get_request(True, False)) def test_update_quota(self, request): self.mock_object( quota_sets.db, 'share_type_get_by_name_or_id', mock.Mock( return_value={'id': 'fake_st_id', 'name': 'fake_st_name'})) CONF.set_default('shares', 789, 'quota') body = {'quota_set': {'tenant_id': self.project_id, 'shares': 788}} expected = { 'quota_set': { 'shares': body['quota_set']['shares'], 'gigabytes': 1000, 'snapshots': 50, 'snapshot_gigabytes': 1000, 'share_networks': 10, 'share_groups': 50, 'share_group_snapshots': 50, } } mock_policy_update_check_call = mock.call( request.environ['manila.context'], self.resource_name, 'update') mock_policy_show_check_call = mock.call( request.environ['manila.context'], self.resource_name, 'show') update_result = self.controller.update( request, self.project_id, body=body) self.assertEqual(expected, update_result) show_result = self.controller.show(request, self.project_id) expected['quota_set']['id'] = self.project_id self.assertEqual(expected, show_result) self.mock_policy_check.assert_has_calls([ mock_policy_update_check_call, mock_policy_show_check_call]) quota_sets.db.share_type_get_by_name_or_id.assert_not_called() @ddt.data(_get_request(True, True), _get_request(True, False)) def test_update_quota_with_value_greater_than_2147483647(self, req): value = 2147483648 body = {'quota_set': {'tenant_id': self.project_id, 'shares': value}} if req == _get_request(True, True): self.mock_policy_update_check_call = mock.call( req.environ['manila.context'], self.resource_name, 'update') self.assertRaises( webob.exc.HTTPBadRequest, self.controller.update, req, self.project_id, body ) self.mock_policy_check.assert_called_once_with( req.environ['manila.context'], self.resource_name, 'update') if req == _get_request(True, False): self.assertRaises( webob.exc.HTTPBadRequest, self.controller.update, req, self.project_id, body ) self.mock_policy_check.assert_not_called() @ddt.data('2.39', '2.40') def test_update_share_type_quota(self, microversion): self.mock_object( quota_sets.db, 'share_type_get_by_name_or_id', mock.Mock( return_value={'id': 'fake_st_id', 'name': 'fake_st_name'})) req = self._get_share_type_request_object(microversion) CONF.set_default('shares', 789, 'quota') body = {'quota_set': {'tenant_id': self.project_id, 'shares': 788}} expected = { 'quota_set': { 'shares': body['quota_set']['shares'], 'gigabytes': 1000, 'snapshots': 50, 'snapshot_gigabytes': 1000, } } update_result = self.controller.update(req, self.project_id, body=body) self.assertEqual(expected, update_result) quota_sets.db.share_type_get_by_name_or_id.assert_called_once_with( req.environ['manila.context'], req.environ['QUERY_STRING'].split('=')[-1]) quota_sets.db.share_type_get_by_name_or_id.reset_mock() show_result = self.controller.show(req, self.project_id) expected['quota_set']['id'] = self.project_id self.assertEqual(expected, show_result) self.mock_policy_check.assert_has_calls([ mock.call(req.environ['manila.context'], self.resource_name, key) for key in ('update', 'show') ]) quota_sets.db.share_type_get_by_name_or_id.assert_called_once_with( req.environ['manila.context'], req.environ['QUERY_STRING'].split('=')[-1]) def test_update_share_type_quota_using_too_old_microversion(self): self.mock_object( quota_sets.db, 'share_type_get_by_name_or_id', mock.Mock( return_value={'id': 'fake_st_id', 'name': 'fake_st_name'})) req = self._get_share_type_request_object('2.38') body = {'quota_set': {'tenant_id': self.project_id, 'shares': 788}} self.assertRaises( webob.exc.HTTPBadRequest, self.controller.update, req, self.project_id, body=body) quota_sets.db.share_type_get_by_name_or_id.assert_not_called() def test_update_share_type_quota_for_share_networks(self): self.mock_object( quota_sets.db, 'share_type_get_by_name_or_id', mock.Mock( return_value={'id': 'fake_st_id', 'name': 'fake_st_name'})) req = self._get_share_type_request_object('2.39') body = {'quota_set': { 'tenant_id': self.project_id, 'share_networks': 788, }} self.assertRaises( webob.exc.HTTPBadRequest, self.controller.update, req, self.project_id, body=body) quota_sets.db.share_type_get_by_name_or_id.assert_called_once_with( req.environ['manila.context'], req.environ['QUERY_STRING'].split('=')[-1]) @ddt.data(-2, 'foo', {1: 2}, [1]) def test_update_quota_with_invalid_value(self, value): req = _get_request(True, False) body = {'quota_set': {'tenant_id': self.project_id, 'shares': value}} self.assertRaises( webob.exc.HTTPBadRequest, self.controller.update, req, self.project_id, body=body) self.mock_policy_check.assert_called_once_with( req.environ['manila.context'], self.resource_name, 'update') def test_user_quota_can_not_be_bigger_than_tenant_quota(self): value = 777 CONF.set_default('shares', value, 'quota') body = { 'quota_set': { 'tenant_id': self.project_id, 'shares': value + 1, } } req = _get_request(True, True) self.assertRaises( webob.exc.HTTPBadRequest, self.controller.update, req, self.project_id, body=body) self.mock_policy_check.assert_called_once_with( req.environ['manila.context'], self.resource_name, 'update') def test_update_inexistent_quota(self): body = { 'quota_set': { 'tenant_id': self.project_id, 'fake_quota': 13, } } req = _get_request(True, False) self.assertRaises( webob.exc.HTTPBadRequest, self.controller.update, req, self.project_id, body=body) self.mock_policy_check.assert_called_once_with( req.environ['manila.context'], self.resource_name, 'update') def test_update_quota_not_authorized(self): body = {'quota_set': {'tenant_id': self.project_id, 'shares': 13}} req = _get_request(False, False) self.assertRaises( webob.exc.HTTPForbidden, self.controller.update, req, self.project_id, body=body) self.mock_policy_check.assert_called_once_with( req.environ['manila.context'], self.resource_name, 'update') @ddt.data( ('os-quota-sets', '1.0', quota_sets.QuotaSetsControllerLegacy), ('os-quota-sets', '2.6', quota_sets.QuotaSetsControllerLegacy), ('quota-sets', '2.7', quota_sets.QuotaSetsController), ) @ddt.unpack def test_update_all_quotas_with_force(self, url, version, controller): req = fakes.HTTPRequest.blank( '/fooproject/%s' % url, version=version, use_admin_context=True) quotas = ( ('quota_shares', 13), ('quota_gigabytes', 14), ('quota_snapshots', 15), ('quota_snapshot_gigabytes', 16), ('quota_share_networks', 17), ) for quota, value in quotas: CONF.set_default(quota, value) expected = { 'quota_set': { 'tenant_id': self.project_id, 'shares': quotas[0][1], 'gigabytes': quotas[1][1], 'snapshots': quotas[2][1], 'snapshot_gigabytes': quotas[3][1], 'share_networks': quotas[4][1], 'force': True, } } update_result = controller().update( req, self.project_id, body=expected) expected['quota_set'].pop('force') expected['quota_set'].pop('tenant_id') self.assertEqual(expected, update_result) show_result = controller().show(req, self.project_id) expected['quota_set']['id'] = self.project_id self.assertEqual(expected, show_result) self.mock_policy_check.assert_has_calls([ mock.call(req.environ['manila.context'], self.resource_name, action) for action in ('update', 'show') ]) @ddt.data( ('os-quota-sets', '1.0', quota_sets.QuotaSetsControllerLegacy), ('os-quota-sets', '2.6', quota_sets.QuotaSetsControllerLegacy), ('quota-sets', '2.7', quota_sets.QuotaSetsController), ) @ddt.unpack def test_delete_tenant_quota(self, url, version, controller): self.mock_object(quota_sets.QUOTAS, 'destroy_all_by_project_and_user') self.mock_object(quota_sets.QUOTAS, 'destroy_all_by_project') req = fakes.HTTPRequest.blank( '/fooproject/%s' % url, version=version, use_admin_context=True) result = controller().delete(req, self.project_id) self.assertTrue( utils.IsAMatcher(webob.response.Response) == result ) self.assertTrue(hasattr(result, 'status_code')) self.assertEqual(202, result.status_code) self.assertFalse( quota_sets.QUOTAS.destroy_all_by_project_and_user.called) quota_sets.QUOTAS.destroy_all_by_project.assert_called_once_with( req.environ['manila.context'], self.project_id) self.mock_policy_check.assert_called_once_with( req.environ['manila.context'], self.resource_name, 'delete') def test_delete_user_quota(self): project_id = 'foo_project_id' self.mock_object(quota_sets.QUOTAS, 'destroy_all_by_project_and_user') self.mock_object(quota_sets.QUOTAS, 'destroy_all_by_project') req = _get_request(True, True) result = self.controller.delete(req, project_id) self.assertTrue( utils.IsAMatcher(webob.response.Response) == result ) self.assertTrue(hasattr(result, 'status_code')) self.assertEqual(202, result.status_code) (quota_sets.QUOTAS.destroy_all_by_project_and_user. assert_called_once_with( req.environ['manila.context'], project_id, req.environ['manila.context'].user_id)) self.assertFalse(quota_sets.QUOTAS.destroy_all_by_project.called) self.mock_policy_check.assert_called_once_with( req.environ['manila.context'], self.resource_name, 'delete') def test_delete_share_type_quota(self): req = self._get_share_type_request_object('2.39') self.mock_object(quota_sets.QUOTAS, 'destroy_all_by_project') self.mock_object(quota_sets.QUOTAS, 'destroy_all_by_project_and_user') mock_delete_st_quotas = self.mock_object( quota_sets.QUOTAS, 'destroy_all_by_project_and_share_type') self.mock_object( quota_sets.db, 'share_type_get_by_name_or_id', mock.Mock( return_value={'id': 'fake_st_id', 'name': 'fake_st_name'})) result = self.controller.delete(req, self.project_id) self.assertEqual(utils.IsAMatcher(webob.response.Response), result) self.assertTrue(hasattr(result, 'status_code')) self.assertEqual(202, result.status_code) mock_delete_st_quotas.assert_called_once_with( req.environ['manila.context'], self.project_id, 'fake_st_id') quota_sets.QUOTAS.destroy_all_by_project.assert_not_called() quota_sets.QUOTAS.destroy_all_by_project_and_user.assert_not_called() self.mock_policy_check.assert_called_once_with( req.environ['manila.context'], self.resource_name, 'delete') quota_sets.db.share_type_get_by_name_or_id.assert_called_once_with( req.environ['manila.context'], req.environ['QUERY_STRING'].split('=')[-1]) def test_delete_share_type_quota_using_too_old_microversion(self): self.mock_object( quota_sets.db, 'share_type_get_by_name_or_id', mock.Mock( return_value={'id': 'fake_st_id', 'name': 'fake_st_name'})) req = self._get_share_type_request_object('2.38') self.assertRaises( webob.exc.HTTPBadRequest, self.controller.delete, req, self.project_id) quota_sets.db.share_type_get_by_name_or_id.assert_not_called() def test_delete_not_authorized(self): req = _get_request(False, False) self.assertRaises( webob.exc.HTTPForbidden, self.controller.delete, req, self.project_id) self.mock_policy_check.assert_called_once_with( req.environ['manila.context'], self.resource_name, 'delete') @ddt.data( ('os-quota-sets', '2.7', quota_sets.QuotaSetsControllerLegacy), ('quota-sets', '2.6', quota_sets.QuotaSetsController), ('quota-sets', '2.0', quota_sets.QuotaSetsController), ) @ddt.unpack def test_api_not_found(self, url, version, controller): req = fakes.HTTPRequest.blank('/fooproject/%s' % url, version=version) for method_name in ('show', 'defaults', 'delete'): self.assertRaises( exception.VersionNotFoundForAPIMethod, getattr(controller(), method_name), req, self.project_id) @ddt.data( ('os-quota-sets', '2.7', quota_sets.QuotaSetsControllerLegacy), ('quota-sets', '2.6', quota_sets.QuotaSetsController), ('quota-sets', '2.0', quota_sets.QuotaSetsController), ) @ddt.unpack def test_update_api_not_found(self, url, version, controller): req = fakes.HTTPRequest.blank('/fooproject/%s' % url, version=version) self.assertRaises( exception.VersionNotFoundForAPIMethod, controller().update, req, self.project_id) def test_update_without_quota(self): body = { 'quota_set': { 'tenant_id': self.project_id, } } req = _get_request(True, False) self.assertRaises( webob.exc.HTTPBadRequest, self.controller.update, req, self.project_id, body=body) self.mock_policy_check.assert_called_once_with( req.environ['manila.context'], self.resource_name, 'update') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/api/v2/test_resource_locks.py0000664000175000017500000003541200000000000023221 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from unittest import mock import ddt from oslo_config import cfg from oslo_utils import uuidutils import webob from manila.api.v2 import resource_locks from manila import context from manila import exception from manila import policy from manila import test from manila.tests.api import fakes from manila.tests.api.v2 import stubs from manila.tests import utils as test_utils from manila import utils CONF = cfg.CONF @ddt.ddt class ResourceLockApiTest(test.TestCase): def setUp(self): super(ResourceLockApiTest, self).setUp() self.controller = resource_locks.ResourceLocksController() self.maxDiff = None self.ctxt = context.RequestContext('demo', 'fake', False) self.req = fakes.HTTPRequest.blank( '/resource-locks', version=resource_locks.RESOURCE_LOCKS_MIN_API_VERSION ) self.mock_object( policy, 'check_policy', mock.Mock(return_value=True) ) @ddt.data( test_utils.annotated( 'no_body_content', { 'body': {}, 'resource_type': 'share' } ), test_utils.annotated( 'invalid_body', { 'body': { 'share': 'somedata' }, 'resource_type': 'share' } ), test_utils.annotated( 'invalid_action', { 'body': { 'resource_lock': { 'resource_action': 'invalid_action', } }, 'resource_type': 'share' }, ), test_utils.annotated( 'invalid_reason', { 'body': { 'resource_lock': { 'lock_reason': 'xyzzyspoon!' * 94, } }, 'resource_type': 'share' }, ), test_utils.annotated( 'disallowed_attributes', { 'body': { 'resource_lock': { 'lock_reason': 'the reason is you', 'resource_action': 'delete', 'resource_id': uuidutils.generate_uuid(), } }, 'resource_type': 'share' }, ), ) @ddt.unpack def test__check_body_for_update_invalid(self, body, resource_type): current_lock = {'resource_type': resource_type} self.assertRaises(webob.exc.HTTPBadRequest, self.controller._check_body, body, lock_to_update=current_lock) @ddt.data( test_utils.annotated('no_body_content', {}), test_utils.annotated('invalid_body', {'share': 'somedata'}), test_utils.annotated( 'invalid_action', { 'resource_lock': { 'resource_action': 'invalid_action', }, }, ), test_utils.annotated( 'invalid_reason', { 'resource_lock': { 'lock_reason': 'xyzzyspoon!' * 94, }, }, ), test_utils.annotated( 'invalid_resource_id', { 'resource_lock': { 'resource_id': 'invalid-id', 'resource_action': 'delete', }, }, ), test_utils.annotated( 'invalid_resource_type', { 'resource_lock': { 'resource_id': uuidutils.generate_uuid(), 'resource_type': 'invalid-resource-type', }, }, ), ) def test__check_body_for_create_invalid(self, body): self.assertRaises(webob.exc.HTTPBadRequest, self.controller._check_body, body) @ddt.data( test_utils.annotated( 'action_and_lock_reason', { 'body': { 'resource_lock': { 'resource_action': 'delete', 'lock_reason': 'the reason is you', } }, 'resource_type': 'share', }, ), test_utils.annotated( 'lock_reason', { 'body': { 'resource_lock': { 'lock_reason': 'tienes razon', } }, 'resource_type': 'share', }, ), test_utils.annotated( 'resource_action', { 'body': { 'resource_lock': { 'resource_action': 'delete', } }, 'resource_type': 'access_rule', }, ), ) @ddt.unpack def test__check_body_for_update(self, body, resource_type): current_lock = copy.copy(body['resource_lock']) current_lock['resource_type'] = resource_type result = self.controller._check_body( body, lock_to_update=current_lock) self.assertIsNone(result) def test__check_body_for_create(self): body = { 'resource_lock': { 'resource_id': uuidutils.generate_uuid(), 'resource_type': 'share', }, } result = self.controller._check_body(body) self.assertIsNone(result) @ddt.data({'created_since': None, 'created_before': None}, {'created_since': '2222-22-22', 'created_before': 'a_year_ago'}, {'created_since': 'epoch'}, {'created_before': 'december'}) def test_index_invalid_time_filters(self, filters): url = '/resource-locks?' for key, value in filters.items(): url += f'{key}={value}&' url.rstrip('&') req = fakes.HTTPRequest.blank( url, version=resource_locks.RESOURCE_LOCKS_MIN_API_VERSION) req.environ['manila.context'] = self.ctxt self.assertRaises(exception.ValidationError, self.controller.index, req) @ddt.data({'limit': 'a', 'offset': 'test'}, {'limit': -1}, {'with_count': 'oh-noes', 'limit': 0}) def test_index_invalid_pagination(self, filters): url = '/resource-locks?' for key, value in filters.items(): url += f'{key}={value}&' url.rstrip('&') req = fakes.HTTPRequest.blank( url, version=resource_locks.RESOURCE_LOCKS_MIN_API_VERSION) req.environ['manila.context'] = self.ctxt self.assertRaises(exception.ValidationError, self.controller.index, req) def test_index(self): url = ('/resource-locks?sort_dir=asc&sort_key=resource_id&limit=3' '&offset=1&project_id=f63f7a159f404cfc8604b7065c609691' '&with_count=1') req = fakes.HTTPRequest.blank( url, version=resource_locks.RESOURCE_LOCKS_MIN_API_VERSION) locks = [ stubs.stub_lock('68e2e33d-0f0c-49b7-aee3-f0696ab90360'), stubs.stub_lock('93748a9f-6dfe-4baf-ad4c-b9c82d6063ef'), stubs.stub_lock('44f8dd68-2eeb-41df-b5d1-9e7654212527'), ] self.mock_object(self.controller.resource_locks_api, 'get_all', mock.Mock(return_value=(locks, 3))) actual_locks = self.controller.index(req) expected_filters = { 'project_id': 'f63f7a159f404cfc8604b7065c609691', } self.controller.resource_locks_api.get_all.assert_called_once_with( utils.IsAMatcher(context.RequestContext), search_opts=mock.ANY, limit=3, offset=1, sort_key='resource_id', sort_dir='asc', show_count=True, ) # webob uses a "MultiDict" for request params actual_filters = {} call_args = self.controller.resource_locks_api.get_all.call_args[1] search_opts = call_args['search_opts'] for key, value in search_opts.dict_of_lists().items(): actual_filters[key] = value[0] self.assertEqual(expected_filters, actual_filters) self.assertEqual(3, len(actual_locks['resource_locks'])) for lock in actual_locks['resource_locks']: for key in locks[0].keys(): self.assertIn(key, lock) self.assertIn('links', lock) self.assertIn('resource_locks_links', actual_locks) self.assertEqual(3, actual_locks['count']) def test_show_not_found(self): url = '/resource-locks/fake-lock-id' req = fakes.HTTPRequest.blank( url, version=resource_locks.RESOURCE_LOCKS_MIN_API_VERSION) self.mock_object( self.controller.resource_locks_api, 'get', mock.Mock(side_effect=exception.ResourceLockNotFound(lock_id='1'))) self.assertRaises(webob.exc.HTTPNotFound, self.controller.show, req, 'fake-lock-id') def test_show(self): url = '/resource-locks/c6aef27b-f583-48c7-aac1-bd8fb570ce16' req = fakes.HTTPRequest.blank( url, version=resource_locks.RESOURCE_LOCKS_MIN_API_VERSION) expected_lock = stubs.stub_lock( 'c6aef27b-f583-48c7-aac1-bd8fb570ce16' ) self.mock_object( self.controller.resource_locks_api, 'get', mock.Mock(return_value=expected_lock) ) actual_lock = self.controller.show( req, 'c6aef27b-f583-48c7-aac1-bd8fb570ce16') self.assertSubDictMatch(expected_lock, actual_lock['resource_lock']) self.assertIn('links', actual_lock['resource_lock']) def test_delete_not_found(self): url = '/resource-locks/fake-lock-id' req = fakes.HTTPRequest.blank( url, version=resource_locks.RESOURCE_LOCKS_MIN_API_VERSION) self.mock_object( self.controller.resource_locks_api, 'delete', mock.Mock(side_effect=exception.ResourceLockNotFound(lock_id='1')), ) self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete, req, 'fake-lock-id') def test_delete(self): url = '/resource-locks/c6aef27b-f583-48c7-aac1-bd8fb570ce16' req = fakes.HTTPRequest.blank( url, version=resource_locks.RESOURCE_LOCKS_MIN_API_VERSION) self.mock_object(self.controller.resource_locks_api, 'delete') result = self.controller.delete(req, 'c6aef27b-f583-48c7-aac1-bd8fb570ce16') self.assertEqual(204, result.status_int) def test_create_no_such_resource(self): self.mock_object(self.controller, '_check_body') body = { 'resource_lock': { 'resource_id': '27e14086-16e1-445b-ad32-b2ebb07225a8', 'resource_type': 'share', }, } self.mock_object(self.controller.resource_locks_api, 'create', mock.Mock(side_effect=exception.NotFound)) self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, self.req, body=body) def test_create_visibility_already_locked(self): self.mock_object(self.controller, '_check_body') resource_id = '27e14086-16e1-445b-ad32-b2ebb07225a8' body = { 'resource_lock': { 'resource_id': resource_id, 'resource_type': 'share', }, } self.mock_object( self.controller.resource_locks_api, 'create', mock.Mock( side_effect=exception.ResourceVisibilityLockExists( resource_id=resource_id)) ) self.assertRaises(webob.exc.HTTPConflict, self.controller.create, self.req, body=body) def test_create(self): self.mock_object(self.controller, '_check_body') expected_lock = stubs.stub_lock( '04512dae-18c2-45b5-bbab-50b775ba6f1d', lock_reason=None, ) body = { 'resource_lock': { 'resource_id': expected_lock['resource_id'], 'resource_type': expected_lock['resource_type'], }, } self.mock_object(self.controller.resource_locks_api, 'create', mock.Mock(return_value=expected_lock)) actual_lock = self.controller.create( self.req, body=body )['resource_lock'] self.controller.resource_locks_api.create.assert_called_once_with( utils.IsAMatcher(context.RequestContext), resource_id=expected_lock['resource_id'], resource_type=expected_lock['resource_type'], resource_action='delete', lock_reason=None, ) self.assertSubDictMatch(expected_lock, actual_lock) self.assertIn('links', actual_lock) def test_update(self): expected_lock = stubs.stub_lock( '04512dae-18c2-45b5-bbab-50b775ba6f1d', lock_reason=None, ) self.mock_object(self.controller, '_check_body') self.mock_object(self.controller.resource_locks_api, 'get', mock.Mock(return_value=expected_lock)) self.mock_object(self.controller.resource_locks_api, 'update', mock.Mock(return_value=expected_lock)) body = { 'resource_lock': { 'lock_reason': None }, } actual_lock = self.controller.update( self.req, '04512dae-18c2-45b5-bbab-50b775ba6f1d', body=body )['resource_lock'] self.controller.resource_locks_api.update.assert_called_once_with( utils.IsAMatcher(context.RequestContext), expected_lock, {'lock_reason': None} ) self.assertSubDictMatch(expected_lock, actual_lock) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/api/v2/test_security_services.py0000664000175000017500000000646600000000000023760 0ustar00zuulzuul00000000000000# Copyright 2018 SAP SE # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import datetime import ddt from manila.api.v1 import security_service from manila.common import constants from manila import context from manila import test from manila.tests.api import fakes def stub_security_service(self, version, id): ss_dict = dict( id=id, name='security_service_%s' % str(id), type=constants.SECURITY_SERVICES_ALLOWED_TYPES[0], description='Fake Security Service Desc', dns_ip='1.1.1.1', server='fake-server', domain='fake-domain', user='fake-user', password='fake-password', status=constants.STATUS_NEW, share_networks=[], created_at=datetime.datetime(2017, 8, 24, 1, 1, 1, 1), updated_at=datetime.datetime(2017, 8, 24, 1, 1, 1, 1), project_id='fake-project' ) if self.is_microversion_ge(version, '2.44'): ss_dict['ou'] = 'fake-ou' if self.is_microversion_ge(version, '2.76'): ss_dict['default_ad_site'] = 'fake-default_ad_site' return ss_dict @ddt.ddt class SecurityServicesAPITest(test.TestCase): @ddt.data( ('2.0'), ('2.43'), ('2.44'), ('2.76'), ) def test_index(self, version): ss = [ stub_security_service(self, version, 1), stub_security_service(self, version, 2), ] ctxt = context.RequestContext('admin', 'fake', True) request = fakes.HTTPRequest.blank('/security-services?all_tenants=1', version=version) request.headers['X-Openstack-Manila-Api-Version'] = version request.environ['manila.context'] = ctxt self.mock_object(security_service.db, 'security_service_get_all', mock.Mock(return_value=ss)) self.mock_object(security_service.db, 'share_network_get_all_by_security_service', mock.Mock(return_value=[])) ss_controller = security_service.SecurityServiceController() result = ss_controller.detail(request) self.assertIsInstance(result, dict) self.assertEqual(['security_services'], list(result.keys())) self.assertIsInstance(result['security_services'], list) self.assertEqual(2, len(result['security_services'])) self.assertIn(ss[0], result['security_services']) ss_keys = list(result['security_services'][0].keys()) if self.is_microversion_ge(version, '2.44'): self.assertIn('ou', ss_keys) else: self.assertNotIn('ou', ss_keys) if self.is_microversion_ge(version, '2.76'): self.assertIn('default_ad_site', ss_keys) else: self.assertNotIn('default_ad_site', ss_keys) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/api/v2/test_services.py0000664000175000017500000003774500000000000022035 0ustar00zuulzuul00000000000000# Copyright 2012 IBM Corp. # Copyright 2014 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime from unittest import mock import webob import ddt from oslo_utils import timeutils from manila.api.openstack import api_version_request as api_version from manila.api.v2 import services from manila import context from manila import db from manila import exception from manila import policy from manila import test from manila.tests.api import fakes fake_services_list = [ { 'binary': 'manila-scheduler', 'host': 'host1', 'availability_zone': {'name': 'manila1'}, 'id': 1, 'disabled': True, 'disabled_reason': 'test1', 'state': 'up', 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 2), 'created_at': datetime.datetime(2012, 9, 18, 2, 46, 27), }, { 'binary': 'manila-share', 'host': 'host1', 'availability_zone': {'name': 'manila1'}, 'id': 2, 'disabled': True, 'disabled_reason': 'test2', 'state': 'up', 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 5), 'created_at': datetime.datetime(2012, 9, 18, 2, 46, 27)}, { 'binary': 'manila-scheduler', 'host': 'host2', 'availability_zone': {'name': 'manila2'}, 'id': 3, 'disabled': False, 'disabled_reason': '', 'state': 'down', 'updated_at': datetime.datetime(2012, 9, 19, 6, 55, 34), 'created_at': datetime.datetime(2012, 9, 18, 2, 46, 28)}, { 'binary': 'manila-share', 'host': 'host2', 'availability_zone': {'name': 'manila2'}, 'id': 4, 'disabled': True, 'disabled_reason': 'test4', 'state': 'down', 'updated_at': datetime.datetime(2012, 9, 18, 8, 3, 38), 'created_at': datetime.datetime(2012, 9, 18, 2, 46, 28), }, ] fake_response_service_list = {'services': [ { 'id': 1, 'binary': 'manila-scheduler', 'host': 'host1', 'zone': 'manila1', 'status': 'disabled', 'state': 'up', 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 2), }, { 'id': 2, 'binary': 'manila-share', 'host': 'host1', 'zone': 'manila1', 'status': 'disabled', 'state': 'up', 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 5), }, { 'id': 3, 'binary': 'manila-scheduler', 'host': 'host2', 'zone': 'manila2', 'status': 'enabled', 'state': 'down', 'updated_at': datetime.datetime(2012, 9, 19, 6, 55, 34), }, { 'id': 4, 'binary': 'manila-share', 'host': 'host2', 'zone': 'manila2', 'status': 'disabled', 'state': 'down', 'updated_at': datetime.datetime(2012, 9, 18, 8, 3, 38), }, ]} fake_response_service_list_with_disabled_reason = {'services': [ { 'id': 1, 'binary': 'manila-scheduler', 'host': 'host1', 'zone': 'manila1', 'status': 'disabled', 'disabled_reason': 'test1', 'state': 'up', 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 2), }, { 'id': 2, 'binary': 'manila-share', 'host': 'host1', 'zone': 'manila1', 'status': 'disabled', 'disabled_reason': 'test2', 'state': 'up', 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 5), }, { 'id': 3, 'binary': 'manila-scheduler', 'host': 'host2', 'zone': 'manila2', 'status': 'enabled', 'disabled_reason': '', 'state': 'down', 'updated_at': datetime.datetime(2012, 9, 19, 6, 55, 34), }, { 'id': 4, 'binary': 'manila-share', 'host': 'host2', 'zone': 'manila2', 'status': 'disabled', 'disabled_reason': 'test4', 'state': 'down', 'updated_at': datetime.datetime(2012, 9, 18, 8, 3, 38), }, ]} ENSURE_SHARES_VERSION = "2.86" def fake_service_get_all(context): return fake_services_list def fake_service_get_by_host_binary(context, host, binary): for service in fake_services_list: if service['host'] == host and service['binary'] == binary: return service return None def fake_service_get_by_id(value): for service in fake_services_list: if service['id'] == value: return service return None def fake_service_update(context, service_id, values): service = fake_service_get_by_id(service_id) if service is None: raise exception.ServiceNotFound(service_id=service_id) else: {'host': 'host1', 'binary': 'manila-share', 'disabled': values['disabled']} def fake_utcnow(): return datetime.datetime(2012, 10, 29, 13, 42, 11) @ddt.ddt class ServicesTest(test.TestCase): def setUp(self): super(ServicesTest, self).setUp() self.mock_object(db, "service_get_all", fake_service_get_all) self.mock_object(timeutils, "utcnow", fake_utcnow) self.mock_object(db, "service_get_by_args", fake_service_get_by_host_binary) self.mock_object(db, "service_update", fake_service_update) self.context = context.get_admin_context() self.controller = services.ServiceController() self.controller_legacy = services.ServiceControllerLegacy() self.resource_name = self.controller.resource_name self.mock_policy_check = self.mock_object( policy, 'check_policy', mock.Mock(return_value=True)) @ddt.data( ('os-services', '1.0', services.ServiceControllerLegacy), ('os-services', '2.6', services.ServiceControllerLegacy), ('services', '2.7', services.ServiceController), ('services', '2.83', services.ServiceController), ) @ddt.unpack def test_services_list(self, url, version, controller): req = fakes.HTTPRequest.blank('/%s' % url, version=version) req.environ['manila.context'] = self.context res_dict = controller().index(req) if (api_version.APIVersionRequest(version) >= api_version.APIVersionRequest('2.83')): self.assertEqual(fake_response_service_list_with_disabled_reason, res_dict) else: self.assertEqual(fake_response_service_list, res_dict) self.mock_policy_check.assert_called_once_with( req.environ['manila.context'], self.resource_name, 'index') def test_services_list_with_host(self): req = fakes.HTTPRequest.blank('/services?host=host1', version='2.7') req.environ['manila.context'] = self.context res_dict = self.controller.index(req) response = {'services': [ fake_response_service_list['services'][0], fake_response_service_list['services'][1], ]} self.assertEqual(response, res_dict) self.mock_policy_check.assert_called_once_with( req.environ['manila.context'], self.resource_name, 'index') def test_services_list_with_binary(self): req = fakes.HTTPRequest.blank( '/services?binary=manila-share', version='2.7') req.environ['manila.context'] = self.context res_dict = self.controller.index(req) response = {'services': [ fake_response_service_list['services'][1], fake_response_service_list['services'][3], ]} self.assertEqual(response, res_dict) self.mock_policy_check.assert_called_once_with( req.environ['manila.context'], self.resource_name, 'index') def test_services_list_with_zone(self): req = fakes.HTTPRequest.blank('/services?zone=manila1', version='2.7') req.environ['manila.context'] = self.context res_dict = self.controller.index(req) response = {'services': [ fake_response_service_list['services'][0], fake_response_service_list['services'][1], ]} self.assertEqual(response, res_dict) self.mock_policy_check.assert_called_once_with( req.environ['manila.context'], self.resource_name, 'index') def test_services_list_with_status(self): req = fakes.HTTPRequest.blank( '/services?status=enabled', version='2.7') req.environ['manila.context'] = self.context res_dict = self.controller.index(req) response = {'services': [ fake_response_service_list['services'][2], ]} self.assertEqual(response, res_dict) self.mock_policy_check.assert_called_once_with( req.environ['manila.context'], self.resource_name, 'index') def test_services_list_with_state(self): req = fakes.HTTPRequest.blank('/services?state=up', version='2.7') req.environ['manila.context'] = self.context res_dict = self.controller.index(req) response = {'services': [ fake_response_service_list['services'][0], fake_response_service_list['services'][1], ]} self.assertEqual(response, res_dict) self.mock_policy_check.assert_called_once_with( req.environ['manila.context'], self.resource_name, 'index') def test_services_list_with_host_binary(self): req = fakes.HTTPRequest.blank( "/services?binary=manila-share&state=up", version='2.7') req.environ['manila.context'] = self.context res_dict = self.controller.index(req) response = {'services': [fake_response_service_list['services'][1], ]} self.assertEqual(response, res_dict) self.mock_policy_check.assert_called_once_with( req.environ['manila.context'], self.resource_name, 'index') @ddt.data( ('os-services', '1.0', services.ServiceControllerLegacy), ('os-services', '2.6', services.ServiceControllerLegacy), ('services', '2.7', services.ServiceController), ) @ddt.unpack def test_services_enable(self, url, version, controller): body = {'host': 'host1', 'binary': 'manila-share'} req = fakes.HTTPRequest.blank('/fooproject/%s' % url, version=version) res_dict = controller().update(req, "enable", body) self.assertEqual('enabled', res_dict['status']) self.mock_policy_check.assert_called_once_with( req.environ['manila.context'], self.resource_name, 'update') @ddt.data( ('os-services', '1.0', services.ServiceControllerLegacy), ('os-services', '2.6', services.ServiceControllerLegacy), ('services', '2.7', services.ServiceController), ('services', '2.83', services.ServiceController), ) @ddt.unpack def test_services_disable(self, url, version, controller): req = fakes.HTTPRequest.blank( '/fooproject/%s/disable' % url, version=version) body = {'host': 'host1', 'binary': 'manila-share'} if (api_version.APIVersionRequest(version) > api_version.APIVersionRequest("2.83")): body['disabled_reason'] = 'test1' res_dict = controller().update(req, "disable", body) self.assertEqual('disabled', res_dict['status']) if (api_version.APIVersionRequest(version) > api_version.APIVersionRequest("2.83")): self.assertEqual(res_dict['disabled_reason'], 'test1') self.mock_policy_check.assert_called_once_with( req.environ['manila.context'], self.resource_name, 'update') @ddt.data( ('os-services', '2.7', services.ServiceControllerLegacy), ('services', '2.6', services.ServiceController), ('services', '1.0', services.ServiceController), ) @ddt.unpack def test_services_update_legacy_url_2_dot_7_api_not_found(self, url, version, controller): req = fakes.HTTPRequest.blank( '/fooproject/%s/fake' % url, version=version) body = {'host': 'host1', 'binary': 'manila-share'} self.assertRaises( exception.VersionNotFoundForAPIMethod, controller().update, req, "disable", body, ) @ddt.data( ('os-services', '2.7', services.ServiceControllerLegacy), ('services', '2.6', services.ServiceController), ('services', '1.0', services.ServiceController), ) @ddt.unpack def test_services_list_api_not_found(self, url, version, controller): req = fakes.HTTPRequest.blank('/fooproject/%s' % url, version=version) self.assertRaises( exception.VersionNotFoundForAPIMethod, controller().index, req) def test_ensure_shares_no_host_param(self): req = fakes.HTTPRequest.blank( '/fooproject/services/ensure', version=ENSURE_SHARES_VERSION) body = {} self.assertRaises( webob.exc.HTTPBadRequest, self.controller.ensure_shares, req, body ) def test_ensure_shares_host_not_found(self): req = fakes.HTTPRequest.blank( '/fooproject/services/ensure', version=ENSURE_SHARES_VERSION) req_context = req.environ['manila.context'] body = {'host': 'host1'} mock_service_get = self.mock_object( db, 'service_get_by_args', mock.Mock(side_effect=exception.NotFound()) ) self.assertRaises( webob.exc.HTTPNotFound, self.controller.ensure_shares, req, body ) mock_service_get.assert_called_once_with( req_context, body['host'], 'manila-share' ) def test_ensure_shares_conflict(self): req = fakes.HTTPRequest.blank( '/fooproject/services/ensure', version=ENSURE_SHARES_VERSION) req_context = req.environ['manila.context'] body = {'host': 'host1'} fake_service = {'id': 'fake_service_id'} mock_service_get = self.mock_object( db, 'service_get_by_args', mock.Mock(return_value=fake_service) ) mock_ensure = self.mock_object( self.controller.service_api, 'ensure_shares', mock.Mock(side_effect=webob.exc.HTTPConflict) ) self.assertRaises( webob.exc.HTTPConflict, self.controller.ensure_shares, req, body ) mock_service_get.assert_called_once_with( req_context, body['host'], 'manila-share' ) mock_ensure.assert_called_once_with( req_context, fake_service, body['host'] ) def test_ensure_shares(self): req = fakes.HTTPRequest.blank( '/fooproject/services/ensure', version=ENSURE_SHARES_VERSION) req_context = req.environ['manila.context'] body = {'host': 'host1'} fake_service = {'id': 'fake_service_id'} mock_service_get = self.mock_object( db, 'service_get_by_args', mock.Mock(return_value=fake_service) ) mock_ensure = self.mock_object( self.controller.service_api, 'ensure_shares', ) response = self.controller.ensure_shares(req, body) self.assertEqual(202, response.status_int) mock_service_get.assert_called_once_with( req_context, body['host'], 'manila-share' ) mock_ensure.assert_called_once_with( req_context, fake_service, body['host'] ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/api/v2/test_share_access_metadata.py0000664000175000017500000001222400000000000024456 0ustar00zuulzuul00000000000000# Copyright (c) 2018 Huawei Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import ddt import webob from manila.api.v2 import share_access_metadata from manila.api.v2 import share_accesses from manila import context from manila import exception from manila import policy from manila import test from manila.tests.api import fakes from manila.tests import db_utils from oslo_utils import uuidutils @ddt.ddt class ShareAccessesMetadataAPITest(test.TestCase): def _get_request(self, version="2.45", use_admin_context=True): req = fakes.HTTPRequest.blank( '/v2/share-access-rules', version=version, use_admin_context=use_admin_context) return req def setUp(self): super(ShareAccessesMetadataAPITest, self).setUp() self.controller = ( share_access_metadata.ShareAccessMetadataController()) self.access_controller = ( share_accesses.ShareAccessesController()) self.resource_name = self.controller.resource_name self.admin_context = context.RequestContext('admin', 'fake', True) self.member_context = context.RequestContext('fake', 'fake') self.mock_policy_check = self.mock_object( policy, 'check_policy', mock.Mock(return_value=True)) self.share = db_utils.create_share() self.access = db_utils.create_share_access( id=uuidutils.generate_uuid(), share_id=self.share['id']) @ddt.data({'body': {'metadata': {'key1': 'v1'}}}, {'body': {'metadata': {'test_key1': 'test_v1'}}}, {'body': {'metadata': {'key1': 'v2'}}}) @ddt.unpack def test_update_metadata(self, body): url = self._get_request() update = self.controller.update(url, self.access['id'], body=body) self.assertEqual(body, update) show_result = self.access_controller.show(url, self.access['id']) self.assertEqual(1, len(show_result)) self.assertIn(self.access['id'], show_result['access']['id']) self.assertEqual(body['metadata'], show_result['access']['metadata']) def test_delete_metadata(self): body = {'metadata': {'test_key3': 'test_v3'}} url = self._get_request() self.controller.update(url, self.access['id'], body=body) self.controller.delete(url, self.access['id'], 'test_key3') show_result = self.access_controller.show(url, self.access['id']) self.assertEqual(1, len(show_result)) self.assertIn(self.access['id'], show_result['access']['id']) self.assertNotIn('test_key3', show_result['access']['metadata']) def test_update_access_metadata_with_access_id_not_found(self): self.assertRaises( webob.exc.HTTPNotFound, self.controller.update, self._get_request(), 'not_exist_access_id', {'metadata': {'key1': 'v1'}}) def test_update_access_metadata_with_body_error(self): self.assertRaises( webob.exc.HTTPBadRequest, self.controller.update, self._get_request(), self.access['id'], {'metadata_error': {'key1': 'v1'}}) @ddt.data({'metadata': {'key1': 'v1', 'key2': None}}, {'metadata': {None: 'v1', 'key2': 'v2'}}, {'metadata': {'k' * 256: 'v2'}}, {'metadata': {'key1': 'v' * 1024}}) @ddt.unpack def test_update_metadata_with_invalid_metadata(self, metadata): self.assertRaises( webob.exc.HTTPBadRequest, self.controller.update, self._get_request(), self.access['id'], {'metadata': metadata}) def test_delete_access_metadata_not_found(self): body = {'metadata': {'test_key_exist': 'test_v_exsit'}} update = self.controller.update( self._get_request(), self.access['id'], body=body) self.assertEqual(body, update) self.assertRaises( webob.exc.HTTPNotFound, self.controller.delete, self._get_request(), self.access['id'], 'key1') @ddt.data('1.0', '2.0', '2.8', '2.44') def test_update_metadata_unsupported_version(self, version): self.assertRaises( exception.VersionNotFoundForAPIMethod, self.controller.update, self._get_request(version=version), self.access['id'], {'metadata': {'key1': 'v1'}}) @ddt.data('1.0', '2.0', '2.43') def test_delete_metadata_with_unsupported_version(self, version): self.assertRaises( exception.VersionNotFoundForAPIMethod, self.controller.delete, self._get_request(version=version), self.access['id'], 'key1') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/api/v2/test_share_accesses.py0000664000175000017500000002475400000000000023161 0ustar00zuulzuul00000000000000# Copyright (c) 2018 Huawei Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import copy import ddt from webob import exc from manila.api.v2 import share_accesses from manila.common import constants from manila import exception from manila import policy from manila import test from manila.tests.api import fakes from manila.tests import db_utils from oslo_utils import uuidutils @ddt.ddt class ShareAccessesAPITest(test.TestCase): def _get_index_request(self, share_id=None, filters='', version="2.45", use_admin_context=True): share_id = share_id or self.share['id'] req = fakes.HTTPRequest.blank( '/v2/share-access-rules?share_id=%s' % share_id + filters, version=version, use_admin_context=use_admin_context) return req def _get_show_request(self, access_id=None, version="2.45", use_admin_context=True): access_id = access_id or self.access['id'] req = fakes.HTTPRequest.blank( '/v2/share-access-rules/%s' % access_id, version=version, use_admin_context=use_admin_context) return req def setUp(self): super(ShareAccessesAPITest, self).setUp() self.controller = ( share_accesses.ShareAccessesController()) self.resource_name = self.controller.resource_name self.mock_policy_check = self.mock_object( policy, 'check_policy', mock.Mock(return_value=True)) self.share = db_utils.create_share() self.access = db_utils.create_share_access( id=uuidutils.generate_uuid(), share_id=self.share['id'], ) db_utils.create_share_access( id=uuidutils.generate_uuid(), share_id=self.share['id'], metadata={'k1': 'v1'} ) @ddt.data({'role': 'admin', 'version': '2.45', 'filters': '&metadata=%7B%27k1%27%3A+%27v1%27%7D'}, {'role': 'user', 'version': '2.45', 'filters': ''}) @ddt.unpack def test_list_and_show(self, role, version, filters): summary_keys = ['id', 'access_level', 'access_to', 'access_type', 'state', 'metadata'] self._test_list_and_show(role, filters, version, summary_keys) def _test_list_and_show(self, role, filters, version, summary_keys): req = self._get_index_request( filters=filters, version=version, use_admin_context=(role == 'admin')) index_result = self.controller.index(req) self.assertIn('access_list', index_result) self.assertEqual(1, len(index_result)) access_count = 1 if filters else 2 self.assertEqual(access_count, len(index_result['access_list'])) for index_access in index_result['access_list']: self.assertIn('id', index_access) req = self._get_show_request( index_access['id'], version=version, use_admin_context=(role == 'admin')) show_result = self.controller.show(req, index_access['id']) self.assertIn('access', show_result) self.assertEqual(1, len(show_result)) show_el = show_result['access'] # Ensure keys common to index & show results have matching values for key in summary_keys: self.assertEqual(index_access[key], show_el[key]) @ddt.data(True, False) def test_list_accesses_restricted(self, restricted): req = self._get_index_request(version='2.82') rule_list = [{ 'access_to': '0.0.0.0/0', 'id': 'fakeid', 'access_key': 'fake_key' }] self.mock_object( self.controller.share_api, 'access_get_all', mock.Mock(return_value=rule_list)) self.mock_object( self.controller, '_is_rule_restricted', mock.Mock(return_value=restricted)) index_result = self.controller.index(req) self.assertIn('access_list', index_result) self.controller._is_rule_restricted.assert_called_once_with( req.environ['manila.context'], rule_list[0]['id']) if restricted: for access in index_result['access_list']: self.assertEqual('******', access['access_key']) self.assertEqual('******', access['access_to']) @ddt.data(True, False) def test_show_restricted(self, restricted): req = self._get_show_request( version='2.82', use_admin_context=False) self.mock_object( self.controller, '_is_rule_restricted', mock.Mock(return_value=restricted)) show_result = self.controller.show(req, self.access['id']) expected_access_to = ( '******' if restricted else self.access['access_to']) self.assertEqual( expected_access_to, show_result['access']['access_to']) @ddt.data(True, False) def test__is_rule_restricted(self, is_rule_restricted): req = self._get_show_request( version='2.82', use_admin_context=False) context = req.environ['manila.context'] fake_lock = { 'lock_context': 'user', 'user_id': 'fake', 'project_id': 'fake', 'resource_id': 'fake', 'resource_action': constants.RESOURCE_ACTION_DELETE, 'lock_reason': 'fake reason', } lock = fake_lock if is_rule_restricted else {} locks = [lock] self.mock_object( self.controller.resource_locks_api, 'get_all', mock.Mock(return_value=(locks, len(locks)))) self.mock_object( self.controller.resource_locks_api, 'access_is_restricted', mock.Mock(return_value=is_rule_restricted)) result_rule_restricted = self.controller._is_rule_restricted( context, self.access['id']) self.assertEqual( is_rule_restricted, result_rule_restricted) def test_list_accesses_share_not_found(self): self.assertRaises( exc.HTTPBadRequest, self.controller.index, self._get_index_request(share_id='inexistent_share_id')) def test_list_accesses_share_req_share_id_not_exist(self): req = fakes.HTTPRequest.blank('/v2/share-access-rules?', version="2.45") self.assertRaises(exc.HTTPBadRequest, self.controller.index, req) def test_show_access_not_authorized(self): share = db_utils.create_share( project_id='c3c5ec1ccc4640d0af1914cbf11f05ad', is_public=False) access = db_utils.create_access( id='76699c6b-f3da-47d7-b468-364f1347ba04', share_id=share['id']) req = fakes.HTTPRequest.blank( '/v2/share-access-rules/%s' % access['id'], version="2.45") self.mock_object( policy, 'check_policy', mock.Mock(side_effect=[None, None, exception.NotAuthorized])) self.assertRaises(exception.NotAuthorized, self.controller.show, req, access['id']) policy.check_policy.assert_has_calls([ mock.call(req.environ['manila.context'], 'share_access_rule', 'get'), mock.call(req.environ['manila.context'], 'share', 'access_get'), mock.call(req.environ['manila.context'], 'share', 'get', mock.ANY, do_raise=False)]) policy_check_call_args_list = policy.check_policy.call_args_list[2][0] share_being_checked = policy_check_call_args_list[3] self.assertEqual('c3c5ec1ccc4640d0af1914cbf11f05ad', share_being_checked['project_id']) self.assertIs(False, share_being_checked['is_public']) def test_show_access_not_found(self): req = self._get_show_request('inexistent_id') print(req.environ) self.assertRaises( exc.HTTPNotFound, self.controller.show, req, 'inexistent_id') @ddt.data('1.0', '2.0', '2.8', '2.44') def test_list_with_unsupported_version(self, version): self.assertRaises( exception.VersionNotFoundForAPIMethod, self.controller.index, self._get_index_request(version=version)) @ddt.data('1.0', '2.0', '2.44') def test_show_with_unsupported_version(self, version): self.assertRaises( exception.VersionNotFoundForAPIMethod, self.controller.show, self._get_show_request(version=version), self.access['id']) def _get_update_request(self, access_id=None): access_id = access_id or self.access['id'] req = fakes.HTTPRequest.blank( '/v2/share-access-rules/%s' % access_id, version="2.88", experimental=True) return req def test_update_access_level(self): update_share_access = copy.deepcopy(self.access) update_share_access.update({'access_level': 'ro'}) self.mock_object( self.controller.share_api, 'update_access', mock.Mock(return_value=update_share_access)) body = {'update_access': {'access_level': 'ro'}} url = self._get_update_request() ret = self.controller.update(url, self.access['id'], body=body) self.assertEqual(update_share_access['access_level'], ret['access']['access_level']) def test_update_access_level_invalid_access_level(self): body = {'access': {'access_level': 'fake_access'}} self.assertRaises( exc.HTTPBadRequest, self.controller.update, self._get_update_request(), self.access['id'], body=body) def test_update_access_level_invalid_update_request(self): body = {'access': {'access_key': 'xxxx'}} self.assertRaises( exc.HTTPBadRequest, self.controller.update, self._get_update_request(), self.access['id'], body=body) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/api/v2/test_share_backups.py0000664000175000017500000005604200000000000023013 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ddt from oslo_config import cfg from unittest import mock from webob import exc from manila.api.openstack import api_version_request as api_version from manila.api.v2 import share_backups from manila.common import constants from manila import context from manila import exception from manila import policy from manila import share from manila import test from manila.tests.api import fakes from manila.tests import db_utils from manila.tests import fake_share CONF = cfg.CONF @ddt.ddt class ShareBackupsApiTest(test.TestCase): """Share backups API Test Cases.""" def setUp(self): super(ShareBackupsApiTest, self).setUp() self.controller = share_backups.ShareBackupController() self.resource_name = self.controller.resource_name self.api_version = share_backups.MIN_SUPPORTED_API_VERSION self.backups_req = fakes.HTTPRequest.blank( '/share-backups', version=self.api_version, experimental=True) self.member_context = context.RequestContext('fake', 'fake') self.backups_req.environ['manila.context'] = self.member_context self.backups_req_admin = fakes.HTTPRequest.blank( '/share-backups', version=self.api_version, experimental=True, use_admin_context=True) self.admin_context = self.backups_req_admin.environ['manila.context'] self.mock_policy_check = self.mock_object(policy, 'check_policy') def _get_context(self, role): return getattr(self, '%s_context' % role) def _create_backup_get_req(self, **kwargs): if 'status' not in kwargs: kwargs['status'] = constants.STATUS_AVAILABLE backup = db_utils.create_share_backup(**kwargs) req = fakes.HTTPRequest.blank( '/v2/fake/share-backups/%s/action' % backup['id'], version=self.api_version) req.method = 'POST' req.headers['content-type'] = 'application/json' req.headers['X-Openstack-Manila-Api-Version'] = self.api_version req.headers['X-Openstack-Manila-Api-Experimental'] = True return backup, req def _get_fake_backup(self, admin=False, summary=False, apiversion=share_backups.MIN_SUPPORTED_API_VERSION, **values): backup = fake_share.fake_backup(**values) backup['updated_at'] = '2016-06-12T19:57:56.506805' expected_keys = {'id', 'share_id', 'status'} expected_backup = {key: backup[key] for key in backup if key in expected_keys} expected_backup.update({'name': backup.get('display_name')}) if not summary: expected_backup.update({ 'id': backup.get('id'), 'share_id': backup.get('share_id'), 'status': backup.get('status'), 'description': backup.get('display_description'), 'size': backup.get('size'), 'created_at': backup.get('created_at'), 'updated_at': backup.get('updated_at'), 'availability_zone': backup.get('availability_zone'), 'progress': backup.get('progress'), 'restore_progress': backup.get('restore_progress'), }) if self.is_microversion_ge(apiversion, '2.85'): expected_backup.update({ 'backup_type': backup.get('backup_type'), }) if admin: expected_backup.update({ 'host': backup.get('host'), 'topic': backup.get('topic'), }) return backup, expected_backup def test_list_backups_summary(self): fake_backup, expected_backup = self._get_fake_backup(summary=True) self.mock_object(share_backups.db, 'share_backups_get_all', mock.Mock(return_value=[fake_backup])) res_dict = self.controller.index(self.backups_req) self.assertEqual([expected_backup], res_dict['share_backups']) self.mock_policy_check.assert_called_once_with( self.member_context, self.resource_name, 'get_all') def test_list_backups_summary_with_share_id(self): fake_backup, expected_backup = self._get_fake_backup(summary=True) self.mock_object(share.API, 'get', mock.Mock(return_value={'id': 'FAKE_SHAREID'})) self.mock_object(share_backups.db, 'share_backups_get_all', mock.Mock(return_value=[fake_backup])) req = fakes.HTTPRequest.blank( '/share-backups?share_id=FAKE_SHARE_ID', version=self.api_version, experimental=True) req_context = req.environ['manila.context'] res_dict = self.controller.index(req) self.assertEqual([expected_backup], res_dict['share_backups']) self.mock_policy_check.assert_called_once_with( req_context, self.resource_name, 'get_all') @ddt.data(True, False) def test_list_backups_detail(self, is_admin): fake_backup, expected_backup = self._get_fake_backup(admin=is_admin) self.mock_object(share_backups.db, 'share_backups_get_all', mock.Mock(return_value=[fake_backup])) req = self.backups_req if not is_admin else self.backups_req_admin req_context = req.environ['manila.context'] res_dict = self.controller.detail(req) self.assertEqual([expected_backup], res_dict['share_backups']) self.mock_policy_check.assert_called_once_with( req_context, self.resource_name, 'get_all') def test_list_share_backups_detail_with_limit(self): fake_backup_1, expected_backup_1 = self._get_fake_backup() fake_backup_2, expected_backup_2 = self._get_fake_backup( id="fake_id2") self.mock_object( share_backups.db, 'share_backups_get_all', mock.Mock(return_value=[fake_backup_1])) req = fakes.HTTPRequest.blank('/share-backups?limit=1', version=self.api_version, experimental=True) req_context = req.environ['manila.context'] res_dict = self.controller.detail(req) self.assertEqual(1, len(res_dict['share_backups'])) self.assertEqual([expected_backup_1], res_dict['share_backups']) self.mock_policy_check.assert_called_once_with( req_context, self.resource_name, 'get_all') def test_list_share_backups_detail_with_limit_and_offset(self): fake_backup_1, expected_backup_1 = self._get_fake_backup() fake_backup_2, expected_backup_2 = self._get_fake_backup( id="fake_id2") self.mock_object( share_backups.db, 'share_backups_get_all', mock.Mock(return_value=[fake_backup_2])) req = fakes.HTTPRequest.blank( '/share-backups/detail?limit=1&offset=1', version=self.api_version, experimental=True) req_context = req.environ['manila.context'] res_dict = self.controller.detail(req) self.assertEqual(1, len(res_dict['share_backups'])) self.assertEqual([expected_backup_2], res_dict['share_backups']) self.mock_policy_check.assert_called_once_with( req_context, self.resource_name, 'get_all') def test_list_share_backups_detail_invalid_share(self): self.mock_object(share_backups.db, 'share_backups_get_all', mock.Mock(side_effect=exception.NotFound)) mock__view_builder_call = self.mock_object( share_backups.backup_view.BackupViewBuilder, 'detail_list') req = self.backups_req req.GET['share_id'] = 'FAKE_SHARE_ID' self.assertRaises(exc.HTTPBadRequest, self.controller.detail, req) self.assertFalse(mock__view_builder_call.called) self.mock_policy_check.assert_called_once_with( self.member_context, self.resource_name, 'get_all') @ddt.data(share_backups.MIN_SUPPORTED_API_VERSION, api_version._MAX_API_VERSION) def test_list_share_backups_detail(self, apiversion): fake_backup, expected_backup = self._get_fake_backup( apiversion=apiversion, ) self.mock_object(share.API, 'get', mock.Mock(return_value={'id': 'FAKE_SHAREID'})) self.mock_object(share_backups.db, 'share_backups_get_all', mock.Mock(return_value=[fake_backup])) req = fakes.HTTPRequest.blank( '/share-backups?share_id=FAKE_SHARE_ID', version=apiversion, experimental=True) req.environ['manila.context'] = ( self.member_context) req_context = req.environ['manila.context'] res_dict = self.controller.detail(req) self.assertEqual([expected_backup], res_dict['share_backups']) self.mock_policy_check.assert_called_once_with( req_context, self.resource_name, 'get_all') def test_list_share_backups_with_limit(self): fake_backup_1, expected_backup_1 = self._get_fake_backup() fake_backup_2, expected_backup_2 = self._get_fake_backup( id="fake_id2") self.mock_object(share.API, 'get', mock.Mock(return_value={'id': 'FAKE_SHAREID'})) self.mock_object( share_backups.db, 'share_backups_get_all', mock.Mock(return_value=[fake_backup_1])) req = fakes.HTTPRequest.blank( '/share-backups?share_id=FAKE_SHARE_ID&limit=1', version=self.api_version, experimental=True) req_context = req.environ['manila.context'] res_dict = self.controller.detail(req) self.assertEqual(1, len(res_dict['share_backups'])) self.assertEqual([expected_backup_1], res_dict['share_backups']) self.mock_policy_check.assert_called_once_with( req_context, self.resource_name, 'get_all') def test_list_share_backups_with_limit_and_offset(self): fake_backup_1, expected_backup_1 = self._get_fake_backup() fake_backup_2, expected_backup_2 = self._get_fake_backup( id="fake_id2") self.mock_object(share.API, 'get', mock.Mock(return_value={'id': 'FAKE_SHAREID'})) self.mock_object( share_backups.db, 'share_backups_get_all', mock.Mock(return_value=[fake_backup_2])) req = fakes.HTTPRequest.blank( '/share-backups?share_id=FAKE_SHARE_ID&limit=1&offset=1', version=self.api_version, experimental=True) req_context = req.environ['manila.context'] res_dict = self.controller.detail(req) self.assertEqual(1, len(res_dict['share_backups'])) self.assertEqual([expected_backup_2], res_dict['share_backups']) self.mock_policy_check.assert_called_once_with( req_context, self.resource_name, 'get_all') def test_show(self): fake_backup, expected_backup = self._get_fake_backup() self.mock_object( share_backups.db, 'share_backup_get', mock.Mock(return_value=fake_backup)) req = self.backups_req res_dict = self.controller.show(req, fake_backup.get('id')) self.assertEqual(expected_backup, res_dict['share_backup']) def test_show_no_backup(self): mock__view_builder_call = self.mock_object( share_backups.backup_view.BackupViewBuilder, 'detail') fake_exception = exception.ShareBackupNotFound( backup_id='FAKE_backup_ID') self.mock_object(share_backups.db, 'share_backup_get', mock.Mock( side_effect=fake_exception)) self.assertRaises(exc.HTTPNotFound, self.controller.show, self.backups_req, 'FAKE_backup_ID') self.assertFalse(mock__view_builder_call.called) def test_create_invalid_body(self): body = {} mock__view_builder_call = self.mock_object( share_backups.backup_view.BackupViewBuilder, 'detail_list') self.assertRaises(exc.HTTPUnprocessableEntity, self.controller.create, self.backups_req, body) self.assertEqual(0, mock__view_builder_call.call_count) def test_create_no_share_id(self): body = { 'share_backup': { 'share_id': None, 'availability_zone': None, } } mock__view_builder_call = self.mock_object( share_backups.backup_view.BackupViewBuilder, 'detail_list') self.mock_object(share_backups.db, 'share_get', mock.Mock(side_effect=exception.NotFound)) self.assertRaises(exc.HTTPBadRequest, self.controller.create, self.backups_req, body) self.assertFalse(mock__view_builder_call.called) def test_create_invalid_share_id(self): body = { 'share_backup': { 'share_id': None, } } mock__view_builder_call = self.mock_object( share_backups.backup_view.BackupViewBuilder, 'detail_list') self.mock_object(share.API, 'get', mock.Mock(side_effect=exception.NotFound)) self.assertRaises(exc.HTTPBadRequest, self.controller.create, self.backups_req, body) self.assertFalse(mock__view_builder_call.called) @ddt.data(exception.InvalidBackup, exception.ShareBusyException) def test_create_exception_path(self, exception_type): fake_backup, _ = self._get_fake_backup() mock__view_builder_call = self.mock_object( share_backups.backup_view.BackupViewBuilder, 'detail_list') body = { 'share_backup': { 'share_id': 'FAKE_SHAREID', } } exc_args = {'id': 'xyz', 'reason': 'abc'} self.mock_object(share.API, 'get', mock.Mock(return_value={'id': 'FAKE_SHAREID'})) self.mock_object(share.API, 'create_share_backup', mock.Mock(side_effect=exception_type(**exc_args))) if exception_type == exception.InvalidBackup: expected_exception = exc.HTTPBadRequest else: expected_exception = exc.HTTPConflict self.assertRaises(expected_exception, self.controller.create, self.backups_req, body) self.assertFalse(mock__view_builder_call.called) def test_create(self): fake_backup, expected_backup = self._get_fake_backup() body = { 'share_backup': { 'share_id': 'FAKE_SHAREID', } } self.mock_object(share.API, 'get', mock.Mock(return_value={'id': 'FAKE_SHAREID'})) self.mock_object(share.API, 'create_share_backup', mock.Mock(return_value=fake_backup)) req = self.backups_req res_dict = self.controller.create(req, body) self.assertEqual(expected_backup, res_dict['share_backup']) def test_delete_invalid_backup(self): fake_exception = exception.ShareBackupNotFound( backup_id='FAKE_backup_ID') self.mock_object(share_backups.db, 'share_backup_get', mock.Mock(side_effect=fake_exception)) mock_delete_backup_call = self.mock_object( share.API, 'delete_share_backup') self.assertRaises( exc.HTTPNotFound, self.controller.delete, self.backups_req, 'FAKE_backup_ID') self.assertFalse(mock_delete_backup_call.called) def test_delete_exception(self): fake_backup_1 = self._get_fake_backup( share_id='FAKE_SHARE_ID', status=constants.STATUS_BACKUP_CREATING)[0] fake_backup_2 = self._get_fake_backup( share_id='FAKE_SHARE_ID', status=constants.STATUS_BACKUP_CREATING)[0] exception_type = exception.InvalidBackup(reason='xyz') self.mock_object(share_backups.db, 'share_backup_get', mock.Mock(return_value=fake_backup_1)) self.mock_object( share_backups.db, 'share_backups_get_all', mock.Mock(return_value=[fake_backup_1, fake_backup_2])) self.mock_object(share.API, 'delete_share_backup', mock.Mock(side_effect=exception_type)) self.assertRaises(exc.HTTPBadRequest, self.controller.delete, self.backups_req, 'FAKE_backup_ID') def test_delete(self): fake_backup = self._get_fake_backup( share_id='FAKE_SHARE_ID', status=constants.STATUS_AVAILABLE)[0] self.mock_object(share_backups.db, 'share_backup_get', mock.Mock(return_value=fake_backup)) self.mock_object(share.API, 'delete_share_backup') resp = self.controller.delete( self.backups_req, 'FAKE_backup_ID') self.assertEqual(202, resp.status_code) def test_restore_invalid_backup_id(self): body = {'restore': None} fake_exception = exception.ShareBackupNotFound( backup_id='FAKE_BACKUP_ID') self.mock_object(share.API, 'restore', mock.Mock(side_effect=fake_exception)) self.assertRaises(exc.HTTPNotFound, self.controller.restore, self.backups_req, 'FAKE_BACKUP_ID', body) def test_restore(self): body = {'restore': None} fake_share_obj = fake_share.fake_share( id='FAKE_SHARE_ID', status=constants.STATUS_AVAILABLE, size=1 ) fake_backup = self._get_fake_backup( share_id=fake_share_obj['id'], status=constants.STATUS_AVAILABLE)[0] fake_backup_restore = { 'share_id': fake_share_obj['id'], 'backup_id': fake_backup['id'], } mock_api_restore_backup_call = self.mock_object( share.API, 'restore_share_backup', mock.Mock(return_value=fake_backup_restore)) self.mock_object(share_backups.db, 'share_get', mock.Mock(return_value=fake_share_obj)) self.mock_object(share_backups.db, 'share_backup_get', mock.Mock(return_value=fake_backup)) self.mock_object(share.API, 'get', mock.Mock(return_value={'id': 'FAKE_SHARE_ID'})) resp = self.controller.restore(self.backups_req, fake_backup['id'], body) self.assertEqual(fake_backup_restore, resp['restore']) self.assertTrue(mock_api_restore_backup_call.called) def test_restore_to_target_share(self): body = {'restore': 'FAKE_TRGT_SHARE_ID'} # overide req version to microversion with targeted restore. self.backups_req = fakes.HTTPRequest.blank( '/share-backups', version='2.91', experimental=True) fake_target_share_obj = fake_share.fake_share( id='FAKE_TRGT_SHARE_ID', status=constants.STATUS_AVAILABLE, size=1 ) fake_share_obj = fake_share.fake_share( id='FAKE_SHARE_ID', status=constants.STATUS_AVAILABLE, size=1 ) fake_backup = self._get_fake_backup( share_id=fake_share_obj['id'], status=constants.STATUS_AVAILABLE)[0] fake_backup_restore = { 'share_id': fake_target_share_obj['id'], 'backup_id': fake_backup['id'], } mock_api_restore_backup_call = self.mock_object( share.API, 'restore_share_backup', mock.Mock(return_value=fake_backup_restore)) self.mock_object(share_backups.db, 'share_get', mock.Mock(return_value=fake_target_share_obj)) self.mock_object(share_backups.db, 'share_backup_get', mock.Mock(return_value=fake_backup)) self.mock_object(share.API, 'get', mock.Mock(return_value={'id': 'FAKE_TRGT_SHARE_ID'})) resp = self.controller.restore(self.backups_req, fake_backup['id'], body) self.assertEqual(fake_backup_restore, resp['restore']) self.assertTrue(mock_api_restore_backup_call.called) def test_update(self): fake_backup = self._get_fake_backup( share_id='FAKE_SHARE_ID', status=constants.STATUS_AVAILABLE)[0] self.mock_object(share_backups.db, 'share_backup_get', mock.Mock(return_value=fake_backup)) body = {'share_backup': {'name': 'backup1'}} fake_backup_update = { 'share_id': 'FAKE_SHARE_ID', 'backup_id': fake_backup['id'], 'display_name': 'backup1' } mock_api_update_backup_call = self.mock_object( share.API, 'update_share_backup', mock.Mock(return_value=fake_backup_update)) resp = self.controller.update(self.backups_req, fake_backup['id'], body) self.assertEqual(fake_backup_update['display_name'], resp['share_backup']['name']) self.assertTrue(mock_api_update_backup_call.called) @ddt.data('index', 'detail') def test_policy_not_authorized(self, method_name): method = getattr(self.controller, method_name) arguments = { 'id': 'FAKE_backup_ID', 'body': {'FAKE_KEY': 'FAKE_VAL'}, } if method_name in ('index', 'detail'): arguments.clear() noauthexc = exception.PolicyNotAuthorized(action=method) with mock.patch.object( policy, 'check_policy', mock.Mock(side_effect=noauthexc)): self.assertRaises( exc.HTTPForbidden, method, self.backups_req, **arguments) @ddt.data('index', 'detail', 'show', 'create', 'delete') def test_upsupported_microversion(self, method_name): unsupported_microversions = ('1.0', '2.2', '2.18') method = getattr(self.controller, method_name) arguments = { 'id': 'FAKE_BACKUP_ID', 'body': {'FAKE_KEY': 'FAKE_VAL'}, } if method_name in ('index', 'detail'): arguments.clear() for microversion in unsupported_microversions: req = fakes.HTTPRequest.blank( '/share-backups', version=microversion, experimental=True) self.assertRaises(exception.VersionNotFoundForAPIMethod, method, req, **arguments) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/api/v2/test_share_export_locations.py0000664000175000017500000003415500000000000024760 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import ddt from webob import exc from manila.api.openstack import api_version_request as api_version from manila.api.v2 import share_export_locations as export_locations from manila.common import constants from manila import context from manila import db from manila import exception from manila import policy from manila import test from manila.tests.api import fakes from manila.tests import db_utils @ddt.ddt class ShareExportLocationsAPITest(test.TestCase): def _get_request(self, version="2.9", use_admin_context=True): req = fakes.HTTPRequest.blank( '/v2/shares/%s/export_locations' % self.share_instance_id, version=version, use_admin_context=use_admin_context) return req def setUp(self): super(ShareExportLocationsAPITest, self).setUp() self.controller = ( export_locations.ShareExportLocationController()) self.resource_name = self.controller.resource_name self.ctxt = { 'admin': context.RequestContext('admin', 'fake', True), 'user': context.RequestContext('fake', 'fake'), } self.mock_policy_check = self.mock_object( policy, 'check_policy', mock.Mock(return_value=True)) self.share = db_utils.create_share() self.share_instance_id = self.share.instance.id self.req = self._get_request() paths = ['fake1/1/', 'fake2/2', 'fake3/3'] db.export_locations_update( self.ctxt['admin'], self.share_instance_id, paths, False) @ddt.data({'role': 'admin', 'version': '2.9'}, {'role': 'user', 'version': '2.9'}, {'role': 'admin', 'version': '2.13'}, {'role': 'user', 'version': '2.13'}) @ddt.unpack def test_list_and_show(self, role, version): summary_keys = ['id', 'path'] admin_summary_keys = summary_keys + [ 'share_instance_id', 'is_admin_only'] detail_keys = summary_keys + ['created_at', 'updated_at'] admin_detail_keys = admin_summary_keys + ['created_at', 'updated_at'] self._test_list_and_show(role, version, summary_keys, detail_keys, admin_summary_keys, admin_detail_keys) @ddt.data('admin', 'user') def test_list_and_show_with_preferred_flag(self, role): summary_keys = ['id', 'path', 'preferred'] admin_summary_keys = summary_keys + [ 'share_instance_id', 'is_admin_only'] detail_keys = summary_keys + ['created_at', 'updated_at'] admin_detail_keys = admin_summary_keys + ['created_at', 'updated_at'] self._test_list_and_show(role, '2.14', summary_keys, detail_keys, admin_summary_keys, admin_detail_keys) def _test_list_and_show(self, role, version, summary_keys, detail_keys, admin_summary_keys, admin_detail_keys): req = self._get_request(version=version, use_admin_context=(role == 'admin')) index_result = self.controller.index(req, self.share['id']) self.assertIn('export_locations', index_result) self.assertEqual(1, len(index_result)) self.assertEqual(3, len(index_result['export_locations'])) for index_el in index_result['export_locations']: self.assertIn('id', index_el) show_result = self.controller.show( req, self.share['id'], index_el['id']) self.assertIn('export_location', show_result) self.assertEqual(1, len(show_result)) show_el = show_result['export_location'] # Check summary keys in index result & detail keys in show result if role == 'admin': self.assertEqual(len(admin_summary_keys), len(index_el)) for key in admin_summary_keys: self.assertIn(key, index_el) self.assertEqual(len(admin_detail_keys), len(show_el)) for key in admin_detail_keys: self.assertIn(key, show_el) else: self.assertEqual(len(summary_keys), len(index_el)) for key in summary_keys: self.assertIn(key, index_el) self.assertEqual(len(detail_keys), len(show_el)) for key in detail_keys: self.assertIn(key, show_el) # Ensure keys common to index & show results have matching values for key in summary_keys: self.assertEqual(index_el[key], show_el[key]) def test_list_export_locations_share_not_found(self): self.assertRaises( exc.HTTPNotFound, self.controller.index, self.req, 'inexistent_share_id', ) def test_show_export_location_share_not_found(self): index_result = self.controller.index(self.req, self.share['id']) el_id = index_result['export_locations'][0]['id'] self.assertRaises( exc.HTTPNotFound, self.controller.show, self.req, 'inexistent_share_id', el_id, ) def test_show_export_location_not_found(self): self.assertRaises( exc.HTTPNotFound, self.controller.show, self.req, self.share['id'], 'inexistent_export_location', ) def test_get_admin_export_location(self): el_data = { 'path': '/admin/export/location', 'is_admin_only': True, 'metadata': {'foo': 'bar'}, } db.export_locations_update( self.ctxt['admin'], self.share_instance_id, el_data, True) index_result = self.controller.index(self.req, self.share['id']) el_id = index_result['export_locations'][0]['id'] # Not found for member member_req = self._get_request(use_admin_context=False) self.assertRaises( exc.HTTPForbidden, self.controller.show, member_req, self.share['id'], el_id, ) # Ok for admin el = self.controller.show(self.req, self.share['id'], el_id) for k, v in el.items(): self.assertEqual(v, el[k]) @ddt.data(*set(('2.46', '2.47', api_version._MAX_API_VERSION))) def test_list_export_locations_replicated_share(self, version): """Test the export locations API changes between 2.46 and 2.47 For API version <= 2.46, non-active replica export locations are included in the API response. They are not included in and beyond version 2.47. """ # Setup data share = db_utils.create_share( replication_type=constants.REPLICATION_TYPE_READABLE, replica_state=constants.REPLICA_STATE_ACTIVE) active_replica_id = share.instance.id exports = [ {'path': 'myshare.mydomain/active-replica-exp1', 'is_admin_only': False}, {'path': 'myshare.mydomain/active-replica-exp2', 'is_admin_only': False}, ] db.export_locations_update( self.ctxt['user'], active_replica_id, exports) # Replicas share_replica2 = db_utils.create_share_replica( share_id=share.id, replica_state=constants.REPLICA_STATE_IN_SYNC) share_replica3 = db_utils.create_share_replica( share_id=share.id, replica_state=constants.REPLICA_STATE_OUT_OF_SYNC) replica2_exports = [ {'path': 'myshare.mydomain/insync-replica-exp', 'is_admin_only': False} ] replica3_exports = [ {'path': 'myshare.mydomain/outofsync-replica-exp', 'is_admin_only': False} ] db.export_locations_update( self.ctxt['user'], share_replica2.id, replica2_exports) db.export_locations_update( self.ctxt['user'], share_replica3.id, replica3_exports) req = self._get_request(version=version) index_result = self.controller.index(req, share['id']) actual_paths = [el['path'] for el in index_result['export_locations']] if self.is_microversion_ge(version, '2.47'): self.assertEqual(2, len(index_result['export_locations'])) self.assertNotIn( 'myshare.mydomain/insync-replica-exp', actual_paths) self.assertNotIn( 'myshare.mydomain/outofsync-replica-exp', actual_paths) else: self.assertEqual(4, len(index_result['export_locations'])) self.assertIn('myshare.mydomain/insync-replica-exp', actual_paths) self.assertIn( 'myshare.mydomain/outofsync-replica-exp', actual_paths) @ddt.data('1.0', '2.0', '2.8') def test_list_with_unsupported_version(self, version): self.assertRaises( exception.VersionNotFoundForAPIMethod, self.controller.index, self._get_request(version), self.share_instance_id, ) @ddt.data('1.0', '2.0', '2.8') def test_show_with_unsupported_version(self, version): index_result = self.controller.index(self.req, self.share['id']) self.assertRaises( exception.VersionNotFoundForAPIMethod, self.controller.show, self._get_request(version), self.share['id'], index_result['export_locations'][0]['id'] ) def test_validate_metadata_for_update(self): index_result = self.controller.index(self.req, self.share['id']) el_id = index_result['export_locations'][0]['id'] metadata = {"foo": "bar", "preferred": "False"} req = fakes.HTTPRequest.blank( '/v2/shares/%s/export_locations/%s/metadata' % ( self.share_instance_id, el_id), version="2.87", use_admin_context=True) result = self.controller._validate_metadata_for_update( req, el_id, metadata) self.assertEqual(metadata, result) def test_validate_metadata_for_update_invalid(self): index_result = self.controller.index(self.req, self.share['id']) el_id = index_result['export_locations'][0]['id'] metadata = {"foo": "bar", "preferred": "False"} self.mock_policy_check = self.mock_object( policy, 'check_policy', mock.Mock( side_effect=exception.PolicyNotAuthorized( action="update_admin_only_metadata"))) req = fakes.HTTPRequest.blank( '/v2/shares/%s/export_locations/%s/metadata' % ( self.share_instance_id, el_id), version="2.87", use_admin_context=False) self.assertRaises(exc.HTTPForbidden, self.controller._validate_metadata_for_update, req, el_id, metadata) self.mock_policy_check.assert_called_once_with( req.environ['manila.context'], 'share_export_location', 'update_admin_only_metadata') def test_create_metadata(self): index_result = self.controller.index(self.req, self.share['id']) el_id = index_result['export_locations'][0]['id'] body = {'metadata': {'key1': 'val1', 'key2': 'val2'}} mock_validate = self.mock_object( self.controller, '_validate_metadata_for_update', mock.Mock(return_value=body['metadata'])) mock_create = self.mock_object( self.controller, '_create_metadata', mock.Mock(return_value=body)) req = fakes.HTTPRequest.blank( '/v2/shares/%s/export_locations/%s/metadata' % ( self.share_instance_id, el_id), version="2.87", use_admin_context=True) res = self.controller.create_metadata(req, self.share['id'], el_id, body) self.assertEqual(body, res) mock_validate.assert_called_once_with(req, el_id, body['metadata'], delete=False) mock_create.assert_called_once_with(req, el_id, body) def test_update_all_metadata(self): index_result = self.controller.index(self.req, self.share['id']) el_id = index_result['export_locations'][0]['id'] body = {'metadata': {'key1': 'val1', 'key2': 'val2'}} mock_validate = self.mock_object( self.controller, '_validate_metadata_for_update', mock.Mock(return_value=body['metadata'])) mock_update = self.mock_object( self.controller, '_update_all_metadata', mock.Mock(return_value=body)) req = fakes.HTTPRequest.blank( '/v2/shares/%s/export_locations/%s/metadata' % ( self.share_instance_id, el_id), version="2.87", use_admin_context=True) res = self.controller.update_all_metadata(req, self.share['id'], el_id, body) self.assertEqual(body, res) mock_validate.assert_called_once_with(req, el_id, body['metadata']) mock_update.assert_called_once_with(req, el_id, body) def test_delete_metadata(self): index_result = self.controller.index(self.req, self.share['id']) el_id = index_result['export_locations'][0]['id'] mock_delete = self.mock_object( self.controller, '_delete_metadata', mock.Mock()) req = fakes.HTTPRequest.blank( '/v2/shares/%s/export_locations/%s/metadata/fake_key' % ( self.share_instance_id, el_id), version="2.87", use_admin_context=True) self.controller.delete_metadata(req, self.share['id'], el_id, 'fake_key') mock_delete.assert_called_once_with(req, el_id, 'fake_key') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/api/v2/test_share_group_snapshots.py0000664000175000017500000006626500000000000024631 0ustar00zuulzuul00000000000000# Copyright 2016 Alex Meade # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import datetime from unittest import mock import ddt from oslo_config import cfg from oslo_serialization import jsonutils from oslo_utils import uuidutils import webob from manila.api.openstack import wsgi from manila.api.v2 import share_group_snapshots from manila.common import constants from manila import context from manila import db from manila import exception from manila import policy from manila import test from manila.tests.api import fakes from manila.tests import db_utils CONF = cfg.CONF SG_GRADUATION_VERSION = '2.55' @ddt.ddt class ShareGroupSnapshotAPITest(test.TestCase): def setUp(self): super(ShareGroupSnapshotAPITest, self).setUp() self.controller = share_group_snapshots.ShareGroupSnapshotController() self.resource_name = self.controller.resource_name self.api_version = '2.31' self.mock_policy_check = self.mock_object( policy, 'check_policy', mock.Mock(return_value=True)) self.request = fakes.HTTPRequest.blank( '/share-groups', version=self.api_version, experimental=True) self.context = self.request.environ['manila.context'] self.admin_context = context.RequestContext('admin', 'fake', True) self.member_context = context.RequestContext('fake', 'fake') self.flags(transport_url='rabbit://fake:fake@mqhost:5672') def _get_fake_share_group_snapshot(self, **values): snap = { 'id': 'fake_id', 'user_id': 'fakeuser', 'project_id': 'fakeproject', 'status': constants.STATUS_CREATING, 'name': None, 'description': None, 'share_group_id': None, 'created_at': datetime.datetime(1, 1, 1, 1, 1, 1), 'members': [], } snap.update(**values) expected_snap = copy.deepcopy(snap) del expected_snap['user_id'] return snap, expected_snap def _get_fake_simple_share_group_snapshot(self, **values): snap = { 'id': 'fake_id', 'name': None, } snap.update(**values) expected_snap = copy.deepcopy(snap) return snap, expected_snap def _get_fake_share_group_snapshot_member(self, **values): member = { 'id': 'fake_id', 'user_id': 'fakeuser', 'project_id': 'fakeproject', 'status': constants.STATUS_CREATING, 'share_group_snapshot_id': None, 'share_proto': None, 'share_id': None, 'size': None, 'created_at': datetime.datetime(1, 1, 1, 1, 1, 1), } member.update(**values) expected_member = copy.deepcopy(member) del expected_member['user_id'] del expected_member['status'] expected_member['share_protocol'] = member['share_proto'] del expected_member['share_proto'] return member, expected_member def _get_fake_custom_request_and_context(self, microversion, experimental): req = fakes.HTTPRequest.blank( '/share-group-snapshots', version=microversion, experimental=experimental) req_context = req.environ['manila.context'] return req, req_context def test_create_invalid_body(self): body = {"not_group_snapshot": {}} self.assertRaises( webob.exc.HTTPBadRequest, self.controller.create, self.request, body) self.mock_policy_check.assert_called_once_with( self.context, self.resource_name, 'create') def test_create_no_share_group_id(self): body = {"share_group_snapshot": {}} self.assertRaises( webob.exc.HTTPBadRequest, self.controller.create, self.request, body) self.mock_policy_check.assert_called_once_with( self.context, self.resource_name, 'create') @ddt.data({'microversion': '2.31', 'experimental': True}, {'microversion': SG_GRADUATION_VERSION, 'experimental': False}) @ddt.unpack def test_create(self, microversion, experimental): fake_snap, expected_snap = self._get_fake_share_group_snapshot() fake_id = uuidutils.generate_uuid() body = {"share_group_snapshot": {"share_group_id": fake_id}} mock_create = self.mock_object( self.controller.share_group_api, 'create_share_group_snapshot', mock.Mock(return_value=fake_snap)) req, req_context = self._get_fake_custom_request_and_context( microversion, experimental) res_dict = self.controller.create(req, body) self.mock_policy_check.assert_called_once_with( req_context, self.resource_name, 'create') mock_create.assert_called_once_with( req_context, share_group_id=fake_id) res_dict['share_group_snapshot'].pop('links') self.assertEqual(expected_snap, res_dict['share_group_snapshot']) def test_create_group_does_not_exist(self): fake_id = uuidutils.generate_uuid() body = {"share_group_snapshot": {"share_group_id": fake_id}} self.mock_object( self.controller.share_group_api, 'create_share_group_snapshot', mock.Mock(side_effect=exception.ShareGroupNotFound( share_group_id=uuidutils.generate_uuid()))) self.assertRaises( webob.exc.HTTPBadRequest, self.controller.create, self.request, body) self.mock_policy_check.assert_called_once_with( self.context, self.resource_name, 'create') def test_create_group_does_not_a_uuid(self): self.mock_object( self.controller.share_group_api, 'create_share_group_snapshot', mock.Mock(side_effect=exception.ShareGroupNotFound( share_group_id='not_a_uuid', ))) body = {"share_group_snapshot": {"share_group_id": "not_a_uuid"}} self.assertRaises( webob.exc.HTTPBadRequest, self.controller.create, self.request, body) self.mock_policy_check.assert_called_once_with( self.context, self.resource_name, 'create') def test_create_invalid_share_group(self): fake_id = uuidutils.generate_uuid() body = {"share_group_snapshot": {"share_group_id": fake_id}} self.mock_object( self.controller.share_group_api, 'create_share_group_snapshot', mock.Mock(side_effect=exception.InvalidShareGroup( reason='bad_status'))) self.assertRaises( webob.exc.HTTPConflict, self.controller.create, self.request, body) self.mock_policy_check.assert_called_once_with( self.context, self.resource_name, 'create') def test_create_with_name(self): fake_name = 'fake_name' fake_snap, expected_snap = self._get_fake_share_group_snapshot( name=fake_name) fake_id = uuidutils.generate_uuid() mock_create = self.mock_object( self.controller.share_group_api, 'create_share_group_snapshot', mock.Mock(return_value=fake_snap)) body = { "share_group_snapshot": { "share_group_id": fake_id, "name": fake_name, } } res_dict = self.controller.create(self.request, body) res_dict['share_group_snapshot'].pop('links') mock_create.assert_called_once_with( self.context, share_group_id=fake_id, name=fake_name) self.assertEqual(expected_snap, res_dict['share_group_snapshot']) self.mock_policy_check.assert_called_once_with( self.context, self.resource_name, 'create') def test_create_with_description(self): fake_description = 'fake_description' fake_snap, expected_snap = self._get_fake_share_group_snapshot( description=fake_description) fake_id = uuidutils.generate_uuid() mock_create = self.mock_object( self.controller.share_group_api, 'create_share_group_snapshot', mock.Mock(return_value=fake_snap)) body = { "share_group_snapshot": { "share_group_id": fake_id, "description": fake_description, } } res_dict = self.controller.create(self.request, body) res_dict['share_group_snapshot'].pop('links') mock_create.assert_called_once_with( self.context, share_group_id=fake_id, description=fake_description) self.assertEqual(expected_snap, res_dict['share_group_snapshot']) self.mock_policy_check.assert_called_once_with( self.context, self.resource_name, 'create') def test_create_with_name_and_description(self): fake_name = 'fake_name' fake_description = 'fake_description' fake_id = uuidutils.generate_uuid() fake_snap, expected_snap = self._get_fake_share_group_snapshot( description=fake_description, name=fake_name) mock_create = self.mock_object( self.controller.share_group_api, 'create_share_group_snapshot', mock.Mock(return_value=fake_snap)) body = { "share_group_snapshot": { "share_group_id": fake_id, "description": fake_description, "name": fake_name, } } res_dict = self.controller.create(self.request, body) res_dict['share_group_snapshot'].pop('links') mock_create.assert_called_once_with( self.context, share_group_id=fake_id, name=fake_name, description=fake_description) self.assertEqual(expected_snap, res_dict['share_group_snapshot']) self.mock_policy_check.assert_called_once_with( self.context, self.resource_name, 'create') @ddt.data({'microversion': '2.31', 'experimental': True}, {'microversion': SG_GRADUATION_VERSION, 'experimental': False}) @ddt.unpack def test_update_with_name_and_description(self, microversion, experimental): fake_name = 'fake_name' fake_description = 'fake_description' fake_id = uuidutils.generate_uuid() fake_snap, expected_snap = self._get_fake_share_group_snapshot( description=fake_description, name=fake_name) self.mock_object( self.controller.share_group_api, 'get_share_group_snapshot', mock.Mock(return_value=fake_snap)) mock_update = self.mock_object( self.controller.share_group_api, 'update_share_group_snapshot', mock.Mock(return_value=fake_snap)) req, req_context = self._get_fake_custom_request_and_context( microversion, experimental) body = { "share_group_snapshot": { "description": fake_description, "name": fake_name, } } res_dict = self.controller.update(req, fake_id, body) res_dict['share_group_snapshot'].pop('links') mock_update.assert_called_once_with( req_context, fake_snap, {"name": fake_name, "description": fake_description}) self.assertEqual(expected_snap, res_dict['share_group_snapshot']) self.mock_policy_check.assert_called_once_with( req_context, self.resource_name, 'update') def test_update_snapshot_not_found(self): body = {"share_group_snapshot": {}} self.mock_object( self.controller.share_group_api, 'get_share_group_snapshot', mock.Mock(side_effect=exception.NotFound)) self.assertRaises( webob.exc.HTTPNotFound, self.controller.update, self.request, 'fake_id', body) self.mock_policy_check.assert_called_once_with( self.context, self.resource_name, 'update') def test_update_invalid_body(self): body = {"not_group_snapshot": {}} self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, self.request, 'fake_id', body) self.mock_policy_check.assert_called_once_with( self.context, self.resource_name, 'update') def test_update_invalid_body_invalid_field(self): body = {"share_group_snapshot": {"unknown_field": ""}} exc = self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, self.request, 'fake_id', body) self.assertIn('unknown_field', str(exc)) self.mock_policy_check.assert_called_once_with( self.context, self.resource_name, 'update') def test_update_invalid_body_readonly_field(self): body = {"share_group_snapshot": {"created_at": []}} exc = self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, self.request, 'fake_id', body) self.assertIn('created_at', str(exc)) self.mock_policy_check.assert_called_once_with( self.context, self.resource_name, 'update') @ddt.data({'microversion': '2.31', 'experimental': True}, {'microversion': SG_GRADUATION_VERSION, 'experimental': False}) @ddt.unpack def test_list_index(self, microversion, experimental): fake_snap, expected_snap = self._get_fake_simple_share_group_snapshot() self.mock_object( self.controller.share_group_api, 'get_all_share_group_snapshots', mock.Mock(return_value=[fake_snap])) req, req_context = self._get_fake_custom_request_and_context( microversion, experimental) res_dict = self.controller.index(req) res_dict['share_group_snapshots'][0].pop('links') self.assertEqual([expected_snap], res_dict['share_group_snapshots']) self.mock_policy_check.assert_called_once_with( req_context, self.resource_name, 'get_all') def test_list_index_no_share_groups(self): self.mock_object( self.controller.share_group_api, 'get_all_share_group_snapshots', mock.Mock(return_value=[])) res_dict = self.controller.index(self.request) self.assertEqual([], res_dict['share_group_snapshots']) self.mock_policy_check.assert_called_once_with( self.context, self.resource_name, 'get_all') def test_list_index_with_limit(self): fake_snap, expected_snap = self._get_fake_simple_share_group_snapshot() fake_snap2, expected_snap2 = ( self._get_fake_simple_share_group_snapshot( id="fake_id2")) self.mock_object( self.controller.share_group_api, 'get_all_share_group_snapshots', mock.Mock(return_value=[fake_snap, fake_snap2])) req = fakes.HTTPRequest.blank('/share-group-snapshots?limit=1', version=self.api_version, experimental=True) req_context = req.environ['manila.context'] res_dict = self.controller.index(req) res_dict['share_group_snapshots'][0].pop('links') self.assertEqual(1, len(res_dict['share_group_snapshots'])) self.assertEqual([expected_snap], res_dict['share_group_snapshots']) self.mock_policy_check.assert_called_once_with( req_context, self.resource_name, 'get_all') def test_list_index_with_limit_and_offset(self): fake_snap, expected_snap = self._get_fake_simple_share_group_snapshot() fake_snap2, expected_snap2 = ( self._get_fake_simple_share_group_snapshot(id="fake_id2")) self.mock_object( self.controller.share_group_api, 'get_all_share_group_snapshots', mock.Mock(return_value=[fake_snap, fake_snap2])) req = fakes.HTTPRequest.blank( '/share-group-snapshots?limit=1&offset=1', version=self.api_version, experimental=True) req_context = req.environ['manila.context'] res_dict = self.controller.index(req) res_dict['share_group_snapshots'][0].pop('links') self.assertEqual(1, len(res_dict['share_group_snapshots'])) self.assertEqual([expected_snap2], res_dict['share_group_snapshots']) self.mock_policy_check.assert_called_once_with( req_context, self.resource_name, 'get_all') @ddt.data({'microversion': '2.31', 'experimental': True}, {'microversion': SG_GRADUATION_VERSION, 'experimental': False}) @ddt.unpack def test_list_detail(self, microversion, experimental): fake_snap, expected_snap = self._get_fake_share_group_snapshot() self.mock_object( self.controller.share_group_api, 'get_all_share_group_snapshots', mock.Mock(return_value=[fake_snap])) req, context = self._get_fake_custom_request_and_context( microversion, experimental) res_dict = self.controller.detail(req) res_dict['share_group_snapshots'][0].pop('links') self.assertEqual(1, len(res_dict['share_group_snapshots'])) self.assertEqual(expected_snap, res_dict['share_group_snapshots'][0]) self.mock_policy_check.assert_called_once_with( context, self.resource_name, 'get_all') def test_list_detail_no_share_groups(self): self.mock_object( self.controller.share_group_api, 'get_all_share_group_snapshots', mock.Mock(return_value=[])) res_dict = self.controller.detail(self.request) self.assertEqual([], res_dict['share_group_snapshots']) self.mock_policy_check.assert_called_once_with( self.context, self.resource_name, 'get_all') def test_list_detail_with_limit(self): fake_snap, expected_snap = self._get_fake_share_group_snapshot() fake_snap2, expected_snap2 = self._get_fake_share_group_snapshot( id="fake_id2") self.mock_object( self.controller.share_group_api, 'get_all_share_group_snapshots', mock.Mock(return_value=[fake_snap, fake_snap2])) req = fakes.HTTPRequest.blank('/share-group-snapshots?limit=1', version=self.api_version, experimental=True) req_context = req.environ['manila.context'] res_dict = self.controller.detail(req) res_dict['share_group_snapshots'][0].pop('links') self.assertEqual(1, len(res_dict['share_group_snapshots'])) self.assertEqual([expected_snap], res_dict['share_group_snapshots']) self.mock_policy_check.assert_called_once_with( req_context, self.resource_name, 'get_all') def test_list_detail_with_limit_and_offset(self): fake_snap, expected_snap = self._get_fake_share_group_snapshot() fake_snap2, expected_snap2 = self._get_fake_share_group_snapshot( id="fake_id2") self.mock_object( self.controller.share_group_api, 'get_all_share_group_snapshots', mock.Mock(return_value=[fake_snap, fake_snap2])) req = fakes.HTTPRequest.blank( '/share-group-snapshots?limit=1&offset=1', version=self.api_version, experimental=True) req_context = req.environ['manila.context'] res_dict = self.controller.detail(req) res_dict['share_group_snapshots'][0].pop('links') self.assertEqual(1, len(res_dict['share_group_snapshots'])) self.assertEqual([expected_snap2], res_dict['share_group_snapshots']) self.mock_policy_check.assert_called_once_with( req_context, self.resource_name, 'get_all') @ddt.data({'microversion': '2.31', 'experimental': True}, {'microversion': SG_GRADUATION_VERSION, 'experimental': False}) @ddt.unpack def test_delete(self, microversion, experimental): fake_snap, expected_snap = self._get_fake_share_group_snapshot() self.mock_object( self.controller.share_group_api, 'get_share_group_snapshot', mock.Mock(return_value=fake_snap)) self.mock_object( self.controller.share_group_api, 'delete_share_group_snapshot') req, req_context = self._get_fake_custom_request_and_context( microversion, experimental) res = self.controller.delete(req, fake_snap['id']) self.assertEqual(202, res.status_code) self.mock_policy_check.assert_called_once_with( req_context, self.resource_name, 'delete') def test_delete_not_found(self): fake_snap, expected_snap = self._get_fake_share_group_snapshot() self.mock_object( self.controller.share_group_api, 'get_share_group_snapshot', mock.Mock(side_effect=exception.NotFound)) self.assertRaises( webob.exc.HTTPNotFound, self.controller.delete, self.request, fake_snap['id']) self.mock_policy_check.assert_called_once_with( self.context, self.resource_name, 'delete') def test_delete_in_conflicting_status(self): fake_snap, expected_snap = self._get_fake_share_group_snapshot() self.mock_object( self.controller.share_group_api, 'get_share_group_snapshot', mock.Mock(return_value=fake_snap)) self.mock_object( self.controller.share_group_api, 'delete_share_group_snapshot', mock.Mock(side_effect=exception.InvalidShareGroupSnapshot( reason='blah'))) self.assertRaises( webob.exc.HTTPConflict, self.controller.delete, self.request, fake_snap['id']) self.mock_policy_check.assert_called_once_with( self.context, self.resource_name, 'delete') @ddt.data({'microversion': '2.31', 'experimental': True}, {'microversion': SG_GRADUATION_VERSION, 'experimental': False}) @ddt.unpack def test_show(self, microversion, experimental): fake_snap, expected_snap = self._get_fake_share_group_snapshot() self.mock_object( self.controller.share_group_api, 'get_share_group_snapshot', mock.Mock(return_value=fake_snap)) req, req_context = self._get_fake_custom_request_and_context( microversion, experimental) res_dict = self.controller.show(req, fake_snap['id']) res_dict['share_group_snapshot'].pop('links') self.assertEqual(expected_snap, res_dict['share_group_snapshot']) self.mock_policy_check.assert_called_once_with( req_context, self.resource_name, 'get') def test_show_share_group_not_found(self): fake_snap, expected_snap = self._get_fake_share_group_snapshot() self.mock_object( self.controller.share_group_api, 'get_share_group_snapshot', mock.Mock(side_effect=exception.NotFound)) self.assertRaises( webob.exc.HTTPNotFound, self.controller.show, self.request, fake_snap['id']) self.mock_policy_check.assert_called_once_with( self.context, self.resource_name, 'get') def _get_context(self, role): return getattr(self, '%s_context' % role) def _setup_share_group_snapshot_data(self, share_group_snapshot=None, version='2.31'): if share_group_snapshot is None: share_group_snapshot = db_utils.create_share_group_snapshot( 'fake_id', status=constants.STATUS_AVAILABLE) path = ('/v2/fake/share-group-snapshots/%s/action' % share_group_snapshot['id']) req = fakes.HTTPRequest.blank(path, script_name=path, version=version) req.headers[wsgi.API_VERSION_REQUEST_HEADER] = version return share_group_snapshot, req @ddt.data(*fakes.fixture_force_delete_with_different_roles) @ddt.unpack def test_share_group_snapshot_force_delete_with_different_roles( self, role, resp_code, version): group_snap, req = self._setup_share_group_snapshot_data() ctxt = self._get_context(role) req.method = 'POST' req.headers['content-type'] = 'application/json' action_name = 'force_delete' body = {action_name: {'status': constants.STATUS_ERROR}} req.body = jsonutils.dumps(body).encode("utf-8") req.headers['X-Openstack-Manila-Api-Version'] = self.api_version req.headers['X-Openstack-Manila-Api-Experimental'] = True req.environ['manila.context'] = ctxt with mock.patch.object( policy, 'check_policy', fakes.mock_fake_admin_check): resp = req.get_response(fakes.app()) # Validate response self.assertEqual(resp_code, resp.status_int) @ddt.data({'microversion': '2.31', 'experimental': True}, {'microversion': SG_GRADUATION_VERSION, 'experimental': False}) @ddt.unpack def test__force_delete_call(self, microversion, experimental): self.mock_object(self.controller, '_force_delete') req, _junk = self._get_fake_custom_request_and_context( microversion, experimental) sg_id = 'fake' body = {'force_delete': {}} self.controller.share_group_snapshot_force_delete(req, sg_id, body) self.controller._force_delete.assert_called_once_with(req, sg_id, body) @ddt.data(*fakes.fixture_reset_status_with_different_roles) @ddt.unpack def test_share_group_snapshot_reset_status_with_different_roles( self, role, valid_code, valid_status, version): ctxt = self._get_context(role) group_snap, req = self._setup_share_group_snapshot_data() action_name = 'reset_status' body = {action_name: {'status': constants.STATUS_ERROR}} req.method = 'POST' req.headers['content-type'] = 'application/json' req.body = jsonutils.dumps(body).encode("utf-8") req.headers['X-Openstack-Manila-Api-Version'] = self.api_version req.headers['X-Openstack-Manila-Api-Experimental'] = True req.environ['manila.context'] = ctxt with mock.patch.object( policy, 'check_policy', fakes.mock_fake_admin_check): resp = req.get_response(fakes.app()) # Validate response code and model status self.assertEqual(valid_code, resp.status_int) actual_model = db.share_group_snapshot_get(ctxt, group_snap['id']) self.assertEqual(valid_status, actual_model['status']) @ddt.data({'microversion': '2.31', 'experimental': True}, {'microversion': SG_GRADUATION_VERSION, 'experimental': False}) @ddt.unpack def test__reset_status_call(self, microversion, experimental): self.mock_object(self.controller, '_reset_status') req, _junk = self._get_fake_custom_request_and_context( microversion, experimental) sg_id = 'fake' body = {'reset_status': {'status': constants.STATUS_ERROR}} self.controller.share_group_snapshot_reset_status(req, sg_id, body) self.controller._reset_status.assert_called_once_with(req, sg_id, body) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/api/v2/test_share_group_type_specs.py0000664000175000017500000003725700000000000024764 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import ddt from oslo_utils import strutils import webob from manila.api.v2 import share_group_type_specs from manila import exception from manila import policy from manila import test from manila.tests.api import fakes import manila.wsgi CONSISTENT_SNAPSHOTS = 'consistent_snapshots' def return_create_share_group_type_specs(context, share_group_type_id, group_specs): return stub_share_group_type_specs() def return_share_group_type_specs(context, share_group_type_id): return stub_share_group_type_specs() def return_empty_share_group_type_specs(context, share_group_type_id): return {} def delete_share_group_type_specs(context, share_group_type_id, key): pass def delete_share_group_type_specs_not_found(context, share_group_type_id, key): raise exception.ShareGroupTypeSpecsNotFound("Not Found") def stub_share_group_type_specs(): return {"key%d" % i: "value%d" % i for i in (1, 2, 3, 4, 5)} def get_large_string(): return "s" * 256 def get_group_specs_dict(group_specs, include_required=True): if not group_specs: group_specs = {} return {'group_specs': group_specs} def fake_request(url, admin=False, version='2.31', experimental=True, **kwargs): return fakes.HTTPRequest.blank( url, use_admin_context=admin, version=version, experimental=experimental, **kwargs) SG_GRADUATION_VERSION = '2.55' @ddt.ddt class ShareGroupTypesSpecsTest(test.TestCase): def setUp(self): super(ShareGroupTypesSpecsTest, self).setUp() self.flags(host='fake') self.mock_object(manila.db, 'share_group_type_get') self.api_path = '/v2/fake/share-group-types/1/group_specs' self.controller = ( share_group_type_specs.ShareGroupTypeSpecsController()) self.resource_name = self.controller.resource_name self.mock_policy_check = self.mock_object(policy, 'check_policy') @ddt.data({'microversion': '2.31', 'experimental': True}, {'microversion': SG_GRADUATION_VERSION, 'experimental': False}) @ddt.unpack def test_index(self, microversion, experimental): self.mock_object( manila.db, 'share_group_type_specs_get', return_share_group_type_specs) req = fake_request(self.api_path, version=microversion, experimental=experimental) req_context = req.environ['manila.context'] res_dict = self.controller.index(req, 1) self.assertEqual('value1', res_dict['group_specs']['key1']) self.mock_policy_check.assert_called_once_with( req_context, self.resource_name, 'index') def test_index_no_data(self): self.mock_object(manila.db, 'share_group_type_specs_get', return_empty_share_group_type_specs) req = fake_request(self.api_path) req_context = req.environ['manila.context'] res_dict = self.controller.index(req, 1) self.assertEqual(0, len(res_dict['group_specs'])) self.mock_policy_check.assert_called_once_with( req_context, self.resource_name, 'index') @ddt.data({'microversion': '2.31', 'experimental': True}, {'microversion': SG_GRADUATION_VERSION, 'experimental': False}) @ddt.unpack def test_show(self, microversion, experimental): self.mock_object(manila.db, 'share_group_type_specs_get', return_share_group_type_specs) req = fake_request(self.api_path + '/key5', version=microversion, experimental=experimental) req_context = req.environ['manila.context'] res_dict = self.controller.show(req, 1, 'key5') self.assertEqual('value5', res_dict['key5']) self.mock_policy_check.assert_called_once_with( req_context, self.resource_name, 'show') def test_show_spec_not_found(self): self.mock_object(manila.db, 'share_group_type_specs_get', return_empty_share_group_type_specs) req = fake_request(self.api_path + '/key6') req_context = req.environ['manila.context'] self.assertRaises(webob.exc.HTTPNotFound, self.controller.show, req, 1, 'key6') self.mock_policy_check.assert_called_once_with( req_context, self.resource_name, 'show') @ddt.data({'microversion': '2.31', 'experimental': True}, {'microversion': SG_GRADUATION_VERSION, 'experimental': False}) @ddt.unpack def test_delete(self, microversion, experimental): self.mock_object(manila.db, 'share_group_type_specs_delete', delete_share_group_type_specs) req = fake_request(self.api_path + '/key5', version=microversion, experimental=experimental) req_context = req.environ['manila.context'] self.controller.delete(req, 1, 'key5') self.mock_policy_check.assert_called_once_with( req_context, self.resource_name, 'delete') def test_delete_not_found(self): self.mock_object(manila.db, 'share_group_type_specs_delete', delete_share_group_type_specs_not_found) req = fake_request(self.api_path + '/key6') req_context = req.environ['manila.context'] self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete, req, 1, 'key6') self.mock_policy_check.assert_called_once_with( req_context, self.resource_name, 'delete') @ddt.data( get_group_specs_dict({}), {'foo': 'bar'}, {CONSISTENT_SNAPSHOTS + 'foo': True}, {'foo' + CONSISTENT_SNAPSHOTS: False}, *[{CONSISTENT_SNAPSHOTS: v} for v in strutils.TRUE_STRINGS + strutils.FALSE_STRINGS] ) def test_create_experimental(self, data): self._validate_create(data) @ddt.data( get_group_specs_dict({}), {'foo': 'bar'}, {CONSISTENT_SNAPSHOTS + 'foo': True}, {'foo' + CONSISTENT_SNAPSHOTS: False} ) def test_create_non_experimental(self, data): self._validate_create(data, microversion=SG_GRADUATION_VERSION, experimental=False) def _validate_create(self, data, microversion='2.31', experimental=True): body = {'group_specs': data} mock_spec_update_or_create = self.mock_object( manila.db, 'share_group_type_specs_update_or_create', mock.Mock(return_value=return_create_share_group_type_specs)) req = fake_request(self.api_path, version=microversion, experimental=experimental) req_context = req.environ['manila.context'] res_dict = self.controller.create(req, 1, body) for k, v in data.items(): self.assertIn(k, res_dict['group_specs']) self.assertEqual(v, res_dict['group_specs'][k]) mock_spec_update_or_create.assert_called_once_with( req.environ['manila.context'], 1, body['group_specs']) self.mock_policy_check.assert_called_once_with( req_context, self.resource_name, 'create') def test_create_with_too_small_key(self): self.mock_object( manila.db, 'share_group_type_specs_update_or_create', mock.Mock(return_value=return_create_share_group_type_specs)) too_small_key = "" body = {"group_specs": {too_small_key: "value"}} req = fake_request(self.api_path) req_context = req.environ['manila.context'] self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, req, 1, body) self.assertFalse( manila.db.share_group_type_specs_update_or_create.called) self.mock_policy_check.assert_called_once_with( req_context, self.resource_name, 'create') def test_create_with_too_big_key(self): self.mock_object( manila.db, 'share_group_type_specs_update_or_create', mock.Mock(return_value=return_create_share_group_type_specs)) too_big_key = "k" * 256 body = {"group_specs": {too_big_key: "value"}} req = fake_request(self.api_path) req_context = req.environ['manila.context'] self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, req, 1, body) self.assertFalse( manila.db.share_group_type_specs_update_or_create.called) self.mock_policy_check.assert_called_once_with( req_context, self.resource_name, 'create') def test_create_with_too_small_value(self): self.mock_object( manila.db, 'share_group_type_specs_update_or_create', mock.Mock(return_value=return_create_share_group_type_specs)) too_small_value = "" body = {"group_specs": {"key": too_small_value}} req = fake_request(self.api_path) req_context = req.environ['manila.context'] self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, req, 1, body) self.mock_policy_check.assert_called_once_with( req_context, self.resource_name, 'create') self.assertFalse( manila.db.share_group_type_specs_update_or_create.called) def test_create_with_too_big_value(self): self.mock_object( manila.db, 'share_group_type_specs_update_or_create', mock.Mock(return_value=return_create_share_group_type_specs)) too_big_value = "v" * 256 body = {"extra_specs": {"key": too_big_value}} req = fake_request(self.api_path) req_context = req.environ['manila.context'] self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, req, 1, body) self.mock_policy_check.assert_called_once_with( req_context, self.resource_name, 'create') self.assertFalse( manila.db.share_group_type_specs_update_or_create.called) def test_create_key_allowed_chars(self): mock_return_value = stub_share_group_type_specs() mock_spec_update_or_create = self.mock_object( manila.db, 'share_group_type_specs_update_or_create', mock.Mock(return_value=mock_return_value)) body = get_group_specs_dict({"other_alphanum.-_:": "value1"}) req = fake_request(self.api_path) req_context = req.environ['manila.context'] res_dict = self.controller.create(req, 1, body) self.assertEqual(mock_return_value['key1'], res_dict['group_specs']['other_alphanum.-_:']) mock_spec_update_or_create.assert_called_once_with( req.environ['manila.context'], 1, body['group_specs']) self.mock_policy_check.assert_called_once_with( req_context, self.resource_name, 'create') def test_create_too_many_keys_allowed_chars(self): mock_return_value = stub_share_group_type_specs() mock_spec_update_or_create = self.mock_object( manila.db, 'share_group_type_specs_update_or_create', mock.Mock(return_value=mock_return_value)) body = get_group_specs_dict({ "other_alphanum.-_:": "value1", "other2_alphanum.-_:": "value2", "other3_alphanum.-_:": "value3", }) req = fake_request(self.api_path) req_context = req.environ['manila.context'] res_dict = self.controller.create(req, 1, body) self.assertEqual(mock_return_value['key1'], res_dict['group_specs']['other_alphanum.-_:']) self.assertEqual(mock_return_value['key2'], res_dict['group_specs']['other2_alphanum.-_:']) self.assertEqual(mock_return_value['key3'], res_dict['group_specs']['other3_alphanum.-_:']) mock_spec_update_or_create.assert_called_once_with( req_context, 1, body['group_specs']) self.mock_policy_check.assert_called_once_with( req_context, self.resource_name, 'create') @ddt.data({'microversion': '2.31', 'experimental': True}, {'microversion': SG_GRADUATION_VERSION, 'experimental': False}) @ddt.unpack def test__update_call(self, microversion, experimental): req = fake_request(self.api_path + '/key1', version=microversion, experimental=experimental) sg_id = 'fake_id' key = 'fake_key' body = {"group_specs": {"key1": "fake_value"}} self.mock_object(self.controller, '_update') self.controller.update(req, sg_id, key, body) self.controller._update.assert_called_once_with(req, sg_id, key, body) def test_update_item_too_many_keys(self): self.mock_object(manila.db, 'share_group_type_specs_update_or_create') body = {"key1": "value1", "key2": "value2"} req = fake_request(self.api_path + '/key1') req_context = req.environ['manila.context'] self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, req, 1, 'key1', body) self.assertFalse( manila.db.share_group_type_specs_update_or_create.called) self.mock_policy_check.assert_called_once_with( req_context, self.resource_name, 'update') def test_update_item_body_uri_mismatch(self): self.mock_object(manila.db, 'share_group_type_specs_update_or_create') body = {"key1": "value1"} req = fake_request(self.api_path + '/bad') req_context = req.environ['manila.context'] self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, req, 1, 'bad', body) self.assertFalse( manila.db.share_group_type_specs_update_or_create.called) self.mock_policy_check.assert_called_once_with( req_context, self.resource_name, 'update') @ddt.data(None, {}, {"group_specs": {CONSISTENT_SNAPSHOTS: ""}}) def test_update_invalid_body(self, body): req = fake_request('/v2/fake/share-group-types/1/group_specs') req_context = req.environ['manila.context'] req.method = 'POST' self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, req, '1', body) self.mock_policy_check.assert_called_once_with( req_context, self.resource_name, 'update') @ddt.data( None, {}, {'foo': {'a': 'b'}}, {'group_specs': 'string'}, {"group_specs": {"ke/y1": "value1"}}, {"key1": "value1", "ke/y2": "value2", "key3": "value3"}, {"group_specs": {CONSISTENT_SNAPSHOTS: ""}}, {"group_specs": {"": "value"}}, {"group_specs": {"t": get_large_string()}}, {"group_specs": {get_large_string(): get_large_string()}}, {"group_specs": {get_large_string(): "v"}}, {"group_specs": {"k": ""}}) def test_create_invalid_body(self, body): req = fake_request('/v2/fake/share-group-types/1/group_specs') req_context = req.environ['manila.context'] req.method = 'POST' self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, req, '1', body) self.mock_policy_check.assert_called_once_with( req_context, self.resource_name, 'create') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/api/v2/test_share_group_types.py0000664000175000017500000007533400000000000023750 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import datetime from unittest import mock import ddt from oslo_config import cfg import webob from manila.api.v2 import share_group_types as types from manila import exception from manila import policy from manila.share_group import share_group_types from manila import test from manila.tests.api import fakes CONF = cfg.CONF PROJ1_UUID = '11111111-1111-1111-1111-111111111111' PROJ2_UUID = '22222222-2222-2222-2222-222222222222' PROJ3_UUID = '33333333-3333-3333-3333-333333333333' SHARE_TYPE_ID = '4b1e460f-8bc5-4a97-989b-739a2eceaec6' GROUP_TYPE_1 = { 'id': 'c8d7bf70-0db9-4b3e-8498-055dd0306461', 'name': u'group type 1', 'deleted': False, 'created_at': datetime.datetime(2012, 1, 1, 1, 1, 1, 1), 'updated_at': None, 'deleted_at': None, 'is_public': True, 'group_specs': {}, 'share_types': [], } GROUP_TYPE_2 = { 'id': 'f93f7a1f-62d7-4e7e-b9e6-72eec95a47f5', 'name': u'group type 2', 'deleted': False, 'created_at': datetime.datetime(2012, 1, 1, 1, 1, 1, 1), 'updated_at': None, 'deleted_at': None, 'is_public': False, 'group_specs': {'consistent_snapshots': 'true'}, 'share_types': [{'share_type_id': SHARE_TYPE_ID}], } GROUP_TYPE_3 = { 'id': '61fdcbed-db27-4cc0-8938-8b4f74c2ae59', 'name': u'group type 3', 'deleted': False, 'created_at': datetime.datetime(2012, 1, 1, 1, 1, 1, 1), 'updated_at': None, 'deleted_at': None, 'is_public': True, 'group_specs': {}, 'share_types': [], } SG_GRADUATION_VERSION = '2.55' def fake_request(url, admin=False, version='2.31', experimental=True, **kwargs): return fakes.HTTPRequest.blank( url, use_admin_context=admin, experimental=experimental, version=version, **kwargs ) @ddt.ddt class ShareGroupTypesAPITest(test.TestCase): def setUp(self): super(ShareGroupTypesAPITest, self).setUp() self.flags(host='fake') self.controller = types.ShareGroupTypesController() self.resource_name = self.controller.resource_name self.mock_object(policy, 'check_policy', mock.Mock(return_value=True)) @ddt.data({'microversion': '2.31', 'experimental': True}, {'microversion': SG_GRADUATION_VERSION, 'experimental': False}) @ddt.unpack def test_share_group_types_index(self, microversion, experimental): fake_types = {GROUP_TYPE_1['name']: GROUP_TYPE_1} mock_get_all = self.mock_object( share_group_types, 'get_all', mock.Mock(return_value=fake_types)) req = fake_request('/v2/fake/share-group-types', admin=False, version=microversion, experimental=experimental) expected_list = [{ 'id': GROUP_TYPE_1['id'], 'name': GROUP_TYPE_1['name'], 'is_public': True, 'group_specs': {}, 'share_types': [], }] if self.is_microversion_ge(microversion, '2.46'): expected_list[0]['is_default'] = False res_dict = self.controller.index(req) mock_get_all.assert_called_once_with( mock.ANY, search_opts={"is_public": True}) self.assertEqual(1, len(res_dict['share_group_types'])) self.assertEqual(expected_list, res_dict['share_group_types']) def test_share_group_types_index_as_admin(self): fake_types = { GROUP_TYPE_1['name']: GROUP_TYPE_1, GROUP_TYPE_2['name']: GROUP_TYPE_2, } mock_get_all = self.mock_object( share_group_types, 'get_all', mock.Mock(return_value=fake_types)) req = fake_request( '/v2/fake/share-group-types?is_public=all', admin=True) expected_type_1 = { 'id': GROUP_TYPE_1['id'], 'name': GROUP_TYPE_1['name'], 'is_public': True, 'group_specs': {}, 'share_types': [], } expected_type_2 = { 'id': GROUP_TYPE_2['id'], 'name': GROUP_TYPE_2['name'], 'is_public': False, 'group_specs': {'consistent_snapshots': 'true'}, 'share_types': [SHARE_TYPE_ID], } res_dict = self.controller.index(req) mock_get_all.assert_called_once_with( mock.ANY, search_opts={'is_public': None}) self.assertEqual(2, len(res_dict['share_group_types'])) self.assertIn(expected_type_1, res_dict['share_group_types']) self.assertIn(expected_type_2, res_dict['share_group_types']) def test_share_group_types_index_as_admin_default_public_only(self): fake_types = {} mock_get_all = self.mock_object( share_group_types, 'get_all', mock.Mock(return_value=fake_types)) req = fake_request('/v2/fake/share-group-types', admin=True) self.controller.index(req) mock_get_all.assert_called_once_with( mock.ANY, search_opts={'is_public': True}) def test_share_group_types_index_not_experimental(self): self.mock_object( share_group_types, 'get_all', mock.Mock(return_value={})) req = fake_request('/v2/fake/share-group-types', experimental=False, version='2.54') self.assertRaises( exception.VersionNotFoundForAPIMethod, self.controller.index, req) self.assertFalse(share_group_types.get_all.called) def test_share_group_types_index_older_api_version(self): self.mock_object( share_group_types, 'get_all', mock.Mock(return_value={})) req = fake_request('/v2/fake/share-group-types', version='2.1') self.assertRaises( exception.VersionNotFoundForAPIMethod, self.controller.index, req) @ddt.data(True, False) def test_share_group_types_index_no_data(self, admin): self.mock_object( share_group_types, 'get_all', mock.Mock(return_value={})) req = fake_request('/v2/fake/share-group-types', admin=admin) res_dict = self.controller.index(req) self.assertEqual(0, len(res_dict['share_group_types'])) @ddt.data({'microversion': '2.31', 'experimental': True}, {'microversion': SG_GRADUATION_VERSION, 'experimental': False}) @ddt.unpack def test_share_group_types_show(self, microversion, experimental): mock_get = self.mock_object( share_group_types, 'get', mock.Mock(return_value=GROUP_TYPE_1)) req = fake_request( '/v2/fake/share-group-types/%s' % GROUP_TYPE_1['id'], version=microversion, experimental=experimental) expected_type = { 'id': GROUP_TYPE_1['id'], 'name': GROUP_TYPE_1['name'], 'is_public': True, 'group_specs': {}, 'share_types': [], } if self.is_microversion_ge(microversion, '2.46'): expected_type['is_default'] = False res_dict = self.controller.show(req, GROUP_TYPE_1['id']) mock_get.assert_called_once_with(mock.ANY, GROUP_TYPE_1['id']) self.assertEqual(expected_type, res_dict['share_group_type']) def test_share_group_types_show_with_share_types(self): mock_get = self.mock_object( share_group_types, 'get', mock.Mock(return_value=GROUP_TYPE_2)) req = fake_request('/v2/fake/group-types/%s' % GROUP_TYPE_2['id']) expected_type = { 'id': GROUP_TYPE_2['id'], 'name': GROUP_TYPE_2['name'], 'is_public': False, 'group_specs': {'consistent_snapshots': 'true'}, 'share_types': [SHARE_TYPE_ID], } res_dict = self.controller.show(req, GROUP_TYPE_2['id']) mock_get.assert_called_once_with(mock.ANY, GROUP_TYPE_2['id']) self.assertEqual(expected_type, res_dict['share_group_type']) def test_share_group_types_show_not_found(self): mock_get = self.mock_object( share_group_types, 'get', mock.Mock(side_effect=exception.ShareGroupTypeNotFound( type_id=GROUP_TYPE_2['id']))) req = fake_request( '/v2/fake/share-group-types/%s' % GROUP_TYPE_2['id']) self.assertRaises( webob.exc.HTTPNotFound, self.controller.show, req, GROUP_TYPE_2['id']) mock_get.assert_called_once_with(mock.ANY, GROUP_TYPE_2['id']) @ddt.data({'microversion': '2.31', 'experimental': True}, {'microversion': SG_GRADUATION_VERSION, 'experimental': False}) @ddt.unpack def test_share_group_types_default(self, microversion, experimental): mock_get = self.mock_object( share_group_types, 'get_default', mock.Mock(return_value=GROUP_TYPE_2)) req = fake_request('/v2/fake/share-group-types/default', version=microversion, experimental=experimental) expected_type = { 'id': GROUP_TYPE_2['id'], 'name': GROUP_TYPE_2['name'], 'is_public': False, 'group_specs': {'consistent_snapshots': 'true'}, 'share_types': [SHARE_TYPE_ID], } if self.is_microversion_ge(microversion, '2.46'): expected_type['is_default'] = False res_dict = self.controller.default(req) mock_get.assert_called_once_with(mock.ANY) self.assertEqual(expected_type, res_dict['share_group_type']) def test_share_group_types_default_not_found(self): mock_get = self.mock_object( share_group_types, 'get_default', mock.Mock(return_value=None)) req = fake_request('/v2/fake/share-group-types/default') self.assertRaises(webob.exc.HTTPNotFound, self.controller.default, req) mock_get.assert_called_once_with(mock.ANY) @ddt.data({'microversion': '2.31', 'experimental': True}, {'microversion': SG_GRADUATION_VERSION, 'experimental': False}) @ddt.unpack def test_share_group_types_delete(self, microversion, experimental): mock_get = self.mock_object( share_group_types, 'get', mock.Mock(return_value=GROUP_TYPE_1)) mock_destroy = self.mock_object(share_group_types, 'destroy') req = fake_request( '/v2/fake/share-group-types/%s' % GROUP_TYPE_1['id'], version=microversion, experimental=experimental) self.controller.delete(req, GROUP_TYPE_1['id']) mock_get.assert_called_once_with(mock.ANY, GROUP_TYPE_1['id']) mock_destroy.assert_called_once_with(mock.ANY, GROUP_TYPE_1['id']) def test_share_group_types_delete_not_found(self): mock_get = self.mock_object( share_group_types, 'get', mock.Mock(side_effect=exception.ShareGroupTypeNotFound( type_id=GROUP_TYPE_2['id']))) req = fake_request( '/v2/fake/share-group-types/%s' % GROUP_TYPE_2['id']) self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete, req, GROUP_TYPE_2['id']) mock_get.assert_called_once_with(mock.ANY, GROUP_TYPE_2['id']) @ddt.data({'microversion': '2.31', 'experimental': True}, {'microversion': SG_GRADUATION_VERSION, 'experimental': False}) @ddt.unpack def test_create_minimal(self, microversion, experimental): fake_type = copy.deepcopy(GROUP_TYPE_1) fake_type['share_types'] = [{'share_type_id': SHARE_TYPE_ID}] mock_create = self.mock_object(share_group_types, 'create') mock_get = self.mock_object( share_group_types, 'get_by_name', mock.Mock(return_value=fake_type)) req = fake_request('/v2/fake/share-group-types', version=microversion, experimental=experimental) fake_body = {'share_group_type': { 'name': GROUP_TYPE_1['name'], 'share_types': [SHARE_TYPE_ID], }} expected_type = { 'id': GROUP_TYPE_1['id'], 'name': GROUP_TYPE_1['name'], 'is_public': True, 'group_specs': {}, 'share_types': [SHARE_TYPE_ID], } if self.is_microversion_ge(microversion, '2.46'): expected_type['is_default'] = False res_dict = self.controller.create(req, fake_body) mock_create.assert_called_once_with( mock.ANY, GROUP_TYPE_1['name'], [SHARE_TYPE_ID], {}, True) mock_get.assert_called_once_with(mock.ANY, GROUP_TYPE_1['name']) self.assertEqual(expected_type, res_dict['share_group_type']) @ddt.data( None, {'my_fake_group_spec': 'false'}, ) def test_create_with_group_specs(self, specs): fake_type = copy.deepcopy(GROUP_TYPE_1) fake_type['share_types'] = [{'share_type_id': SHARE_TYPE_ID}] fake_type['group_specs'] = specs mock_create = self.mock_object(share_group_types, 'create') mock_get = self.mock_object( share_group_types, 'get_by_name', mock.Mock(return_value=fake_type)) req = fake_request('/v2/fake/share-group-types') fake_body = {'share_group_type': { 'name': GROUP_TYPE_1['name'], 'share_types': [SHARE_TYPE_ID], 'group_specs': specs, }} expected_type = { 'id': GROUP_TYPE_1['id'], 'name': GROUP_TYPE_1['name'], 'is_public': True, 'group_specs': specs, 'share_types': [SHARE_TYPE_ID], } res_dict = self.controller.create(req, fake_body) mock_create.assert_called_once_with( mock.ANY, GROUP_TYPE_1['name'], [SHARE_TYPE_ID], specs, True) mock_get.assert_called_once_with(mock.ANY, GROUP_TYPE_1['name']) self.assertEqual(expected_type, res_dict['share_group_type']) @ddt.data( 'str', ['l', 'i', 's', 't'], set([1]), ('t', 'u', 'p', 'l', 'e'), 1, {"foo": 1}, {1: "foo"}, {"foo": "bar", "quuz": []} ) def test_create_with_wrong_group_specs(self, specs): fake_type = copy.deepcopy(GROUP_TYPE_1) fake_type['share_types'] = [{'share_type_id': SHARE_TYPE_ID}] fake_type['group_specs'] = specs mock_create = self.mock_object(share_group_types, 'create') mock_get = self.mock_object( share_group_types, 'get_by_name', mock.Mock(return_value=fake_type)) req = fake_request('/v2/fake/share-group-types') fake_body = {'share_group_type': { 'name': GROUP_TYPE_1['name'], 'share_types': [SHARE_TYPE_ID], 'group_specs': specs, }} self.assertRaises( webob.exc.HTTPBadRequest, self.controller.create, req, fake_body) self.assertEqual(0, mock_create.call_count) self.assertEqual(0, mock_get.call_count) def test_create_private_share_group_type(self): fake_type = copy.deepcopy(GROUP_TYPE_1) fake_type['share_types'] = [{'share_type_id': SHARE_TYPE_ID}] fake_type['is_public'] = False mock_create = self.mock_object(share_group_types, 'create') mock_get = self.mock_object( share_group_types, 'get_by_name', mock.Mock(return_value=fake_type)) req = fake_request('/v2/fake/share-group-types') fake_body = {'share_group_type': { 'name': GROUP_TYPE_1['name'], 'share_types': [SHARE_TYPE_ID], 'is_public': False }} expected_type = { 'id': GROUP_TYPE_1['id'], 'name': GROUP_TYPE_1['name'], 'is_public': False, 'group_specs': {}, 'share_types': [SHARE_TYPE_ID], } res_dict = self.controller.create(req, fake_body) mock_create.assert_called_once_with( mock.ANY, GROUP_TYPE_1['name'], [SHARE_TYPE_ID], {}, False) mock_get.assert_called_once_with(mock.ANY, GROUP_TYPE_1['name']) self.assertEqual(expected_type, res_dict['share_group_type']) def test_create_invalid_request_duplicate_name(self): mock_create = self.mock_object( share_group_types, 'create', mock.Mock(side_effect=exception.ShareGroupTypeExists( type_id=GROUP_TYPE_1['name']))) req = fake_request('/v2/fake/sahre-group-types') fake_body = {'share_group_type': { 'name': GROUP_TYPE_1['name'], 'share_types': [SHARE_TYPE_ID], }} self.assertRaises( webob.exc.HTTPConflict, self.controller.create, req, fake_body) mock_create.assert_called_once_with( mock.ANY, GROUP_TYPE_1['name'], [SHARE_TYPE_ID], {}, True) def test_create_invalid_request_missing_name(self): req = fake_request('/v2/fake/share-group-types') fake_body = {'share_group_type': {'share_types': [SHARE_TYPE_ID]}} self.assertRaises( webob.exc.HTTPBadRequest, self.controller.create, req, fake_body) def test_create_invalid_request_missing_share_types(self): req = fake_request('/v2/fake/share-group-types') fake_body = {'share_group_type': {'name': GROUP_TYPE_1['name']}} self.assertRaises( webob.exc.HTTPBadRequest, self.controller.create, req, fake_body) def test_create_provided_share_type_does_not_exist(self): req = fake_request('/v2/fake/share-group-types', admin=True) fake_body = { 'share_group_type': { 'name': GROUP_TYPE_1['name'], 'share_types': SHARE_TYPE_ID + '_does_not_exist', } } self.assertRaises( webob.exc.HTTPNotFound, self.controller.create, req, fake_body) @ddt.data(('2.45', True), ('2.45', False), ('2.46', True), ('2.46', False)) @ddt.unpack def test_share_group_types_create_with_is_default_key(self, version, admin): # is_default is false fake_type = copy.deepcopy(GROUP_TYPE_1) fake_type['share_types'] = [{'share_type_id': SHARE_TYPE_ID}] self.mock_object(share_group_types, 'create') self.mock_object( share_group_types, 'get_by_name', mock.Mock(return_value=fake_type)) req = fake_request('/v2/fake/share-group-types', version=version, admin=admin) fake_body = {'share_group_type': { 'name': GROUP_TYPE_1['name'], 'share_types': [SHARE_TYPE_ID], }} res_dict = self.controller.create(req, fake_body) if self.is_microversion_ge(version, '2.46'): self.assertIn('is_default', res_dict['share_group_type']) self.assertIs(False, res_dict['share_group_type']['is_default']) else: self.assertNotIn('is_default', res_dict['share_group_type']) # is_default is true default_type_name = 'group type 3' CONF.set_default('default_share_group_type', default_type_name) fake_type = copy.deepcopy(GROUP_TYPE_3) fake_type['share_types'] = [{'share_type_id': SHARE_TYPE_ID}] self.mock_object(share_group_types, 'create') self.mock_object( share_group_types, 'get_by_name', mock.Mock(return_value=fake_type)) req = fake_request('/v2/fake/share-group-types', version=version, admin=admin) fake_body = {'share_group_type': { 'name': GROUP_TYPE_3['name'], 'share_types': [SHARE_TYPE_ID], }} res_dict = self.controller.create(req, fake_body) if self.is_microversion_ge(version, '2.46'): self.assertIn('is_default', res_dict['share_group_type']) self.assertIs(True, res_dict['share_group_type']['is_default']) else: self.assertNotIn('is_default', res_dict['share_group_type']) @ddt.data(('2.45', True), ('2.45', False), ('2.46', True), ('2.46', False)) @ddt.unpack def test_share_group_types_list_with_is_default_key(self, version, admin): fake_types = { GROUP_TYPE_1['name']: GROUP_TYPE_1, GROUP_TYPE_2['name']: GROUP_TYPE_2, } self.mock_object( share_group_types, 'get_all', mock.Mock(return_value=fake_types)) req = fake_request( '/v2/fake/share-group-types?is_public=all', version=version, admin=admin) res_dict = self.controller.index(req) for res in res_dict['share_group_types']: if self.is_microversion_ge(version, '2.46'): self.assertIn('is_default', res) self.assertIs(False, res['is_default']) else: self.assertNotIn('is_default', res) self.assertEqual(2, len(res_dict['share_group_types'])) @ddt.data(('2.45', True), ('2.45', False), ('2.46', True), ('2.46', False)) @ddt.unpack def test_shares_group_types_show_with_is_default_key(self, version, admin): self.mock_object( share_group_types, 'get', mock.Mock(return_value=GROUP_TYPE_2)) req = fake_request('/v2/fake/group-types/%s' % GROUP_TYPE_2['id'], version=version, admin=admin) res_dict = self.controller.show(req, GROUP_TYPE_2['id']) if self.is_microversion_ge(version, '2.46'): self.assertIn('is_default', res_dict['share_group_type']) self.assertIs(False, res_dict['share_group_type']['is_default']) else: self.assertNotIn('is_default', res_dict['share_group_type']) @ddt.ddt class ShareGroupTypeAccessTest(test.TestCase): def setUp(self): super(ShareGroupTypeAccessTest, self).setUp() self.controller = types.ShareGroupTypesController() def test_list_type_access_public(self): self.mock_object( share_group_types, 'get', mock.Mock(return_value=GROUP_TYPE_1)) req = fake_request( '/v2/fake/share-group-types/%s' % GROUP_TYPE_1['id'], admin=True) self.assertRaises( webob.exc.HTTPNotFound, self.controller.share_group_type_access, req, GROUP_TYPE_1['id']) def test_list_type_access_private(self): fake_type = copy.deepcopy(GROUP_TYPE_2) fake_type['projects'] = [PROJ2_UUID, PROJ3_UUID] mock_get = self.mock_object( share_group_types, 'get', mock.Mock(return_value=fake_type)) expected = {'share_group_type_access': [ {'share_group_type_id': fake_type['id'], 'project_id': PROJ2_UUID}, {'share_group_type_id': fake_type['id'], 'project_id': PROJ3_UUID}, ]} req = fake_request( '/v2/fake/share-group-types/%s' % fake_type['id'], admin=True) actual = self.controller.share_group_type_access(req, fake_type['id']) mock_get.assert_called_once_with( mock.ANY, fake_type['id'], expected_fields=['projects']) self.assertEqual(expected, actual) def test_list_type_access_type_not_found(self): self.mock_object( share_group_types, 'get', mock.Mock(side_effect=exception.ShareGroupTypeNotFound( type_id=GROUP_TYPE_2['id']))) req = fake_request( '/v2/fake/share-group-types/%s' % GROUP_TYPE_2['id'], admin=True) self.assertRaises( webob.exc.HTTPNotFound, self.controller.share_group_type_access, req, GROUP_TYPE_2['id']) @ddt.data({'microversion': '2.31', 'experimental': True}, {'microversion': SG_GRADUATION_VERSION, 'experimental': False}) @ddt.unpack def test_add_project_access(self, microversion, experimental): self.mock_object(share_group_types, 'get', mock.Mock(return_value=GROUP_TYPE_2)) mock_add_access = self.mock_object( share_group_types, 'add_share_group_type_access') body = {'addProjectAccess': {'project': PROJ1_UUID}} req = fake_request( '/v2/fake/share-group-types/%s' % GROUP_TYPE_2['id'], admin=True, experimental=experimental, version=microversion ) response = self.controller.add_project_access( req, GROUP_TYPE_2['id'], body) mock_add_access.assert_called_once_with( mock.ANY, GROUP_TYPE_2['id'], PROJ1_UUID) self.assertEqual(202, response.status_code) def test_add_project_access_non_existent_type(self): self.mock_object( share_group_types, 'get', mock.Mock(side_effect=exception.ShareGroupTypeNotFound( type_id=GROUP_TYPE_2['id']))) body = {'addProjectAccess': {'project': PROJ1_UUID}} req = fake_request( '/v2/fake/share-group-types/%s' % GROUP_TYPE_2['id'], admin=True) self.assertRaises( webob.exc.HTTPNotFound, self.controller.add_project_access, req, GROUP_TYPE_2['id'], body) def test_add_project_access_missing_project_in_body(self): body = {'addProjectAccess': {}} req = fake_request( '/v2/fake/share-group-types/%s' % GROUP_TYPE_2['id'], admin=True) self.assertRaises( webob.exc.HTTPBadRequest, self.controller.add_project_access, req, GROUP_TYPE_2['id'], body) def test_add_project_access_missing_add_project_access_in_body(self): body = {} req = fake_request( '/v2/fake/share-group-types/%s' % GROUP_TYPE_2['id'], admin=True) self.assertRaises( webob.exc.HTTPBadRequest, self.controller.add_project_access, req, GROUP_TYPE_2['id'], body) def test_add_project_access_with_already_added_access(self): self.mock_object( share_group_types, 'get', mock.Mock(return_value=GROUP_TYPE_2)) mock_add_access = self.mock_object( share_group_types, 'add_share_group_type_access', mock.Mock(side_effect=exception.ShareGroupTypeAccessExists( type_id=GROUP_TYPE_2['id'], project_id=PROJ1_UUID)) ) body = {'addProjectAccess': {'project': PROJ1_UUID}} req = fake_request( '/v2/fake/share-group-types/%s' % GROUP_TYPE_2['id'], admin=True) self.assertRaises( webob.exc.HTTPConflict, self.controller.add_project_access, req, GROUP_TYPE_2['id'], body) mock_add_access.assert_called_once_with( mock.ANY, GROUP_TYPE_2['id'], PROJ1_UUID) def test_add_project_access_to_public_share_type(self): self.mock_object( share_group_types, 'get', mock.Mock(return_value=GROUP_TYPE_1)) body = {'addProjectAccess': {'project': PROJ1_UUID}} req = fake_request( '/v2/fake/share-group-types/%s' % GROUP_TYPE_1['id'], admin=True) self.assertRaises( webob.exc.HTTPConflict, self.controller.add_project_access, req, GROUP_TYPE_1['id'], body) @ddt.data({'microversion': '2.31', 'experimental': True}, {'microversion': SG_GRADUATION_VERSION, 'experimental': False}) @ddt.unpack def test_remove_project_access(self, microversion, experimental): self.mock_object( share_group_types, 'get', mock.Mock(return_value=GROUP_TYPE_2)) mock_remove_access = self.mock_object( share_group_types, 'remove_share_group_type_access') body = {'removeProjectAccess': {'project': PROJ1_UUID}} req = fake_request( '/v2/fake/share-group-types/%s' % GROUP_TYPE_2['id'], admin=True, version=microversion, experimental=experimental) response = self.controller.remove_project_access( req, GROUP_TYPE_2['id'], body) mock_remove_access.assert_called_once_with( mock.ANY, GROUP_TYPE_2['id'], PROJ1_UUID) self.assertEqual(202, response.status_code) def test_remove_project_access_nonexistent_rule(self): self.mock_object( share_group_types, 'get', mock.Mock(return_value=GROUP_TYPE_2)) mock_remove_access = self.mock_object( share_group_types, 'remove_share_group_type_access', mock.Mock( side_effect=exception.ShareGroupTypeAccessNotFound( type_id=GROUP_TYPE_2['id'], project_id=PROJ1_UUID))) body = {'removeProjectAccess': {'project': PROJ1_UUID}} req = fake_request('/v2/fake/group-types/%s' % GROUP_TYPE_2['id'], admin=True) self.assertRaises( webob.exc.HTTPNotFound, self.controller.remove_project_access, req, GROUP_TYPE_2['id'], body) mock_remove_access.assert_called_once_with( mock.ANY, GROUP_TYPE_2['id'], PROJ1_UUID) def test_remove_project_access_from_public_share_type(self): self.mock_object( share_group_types, 'get', mock.Mock(return_value=GROUP_TYPE_1)) body = {'removeProjectAccess': {'project': PROJ1_UUID}} req = fake_request( '/v2/fake/share-group-types/%s' % GROUP_TYPE_1['id'], admin=True) self.assertRaises(webob.exc.HTTPConflict, self.controller.remove_project_access, req, GROUP_TYPE_1['id'], body) def test_remove_project_access_non_existent_type(self): self.mock_object( share_group_types, 'get', mock.Mock(side_effect=exception.ShareGroupTypeNotFound( type_id=GROUP_TYPE_2['id']))) body = {'removeProjectAccess': {'project': PROJ1_UUID}} req = fake_request( '/v2/fake/share-group-types/%s' % GROUP_TYPE_2['id'], admin=True) self.assertRaises(webob.exc.HTTPNotFound, self.controller.remove_project_access, req, GROUP_TYPE_2['id'], body) def test_remove_project_access_missing_project_in_body(self): body = {'removeProjectAccess': {}} req = fake_request( '/v2/fake/share-group-types/%s' % GROUP_TYPE_2['id'], admin=True) self.assertRaises(webob.exc.HTTPBadRequest, self.controller.remove_project_access, req, GROUP_TYPE_2['id'], body) def test_remove_project_access_missing_remove_project_access_in_body(self): body = {} req = fake_request( '/v2/fake/share-group-types/%s' % GROUP_TYPE_2['id'], admin=True) self.assertRaises(webob.exc.HTTPBadRequest, self.controller.remove_project_access, req, GROUP_TYPE_2['id'], body) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/api/v2/test_share_groups.py0000664000175000017500000012722500000000000022704 0ustar00zuulzuul00000000000000# Copyright 2015 Alex Meade # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import datetime from unittest import mock import ddt from oslo_config import cfg from oslo_serialization import jsonutils from oslo_utils import uuidutils import webob from manila.api.openstack import wsgi import manila.api.v2.share_groups as share_groups from manila.common import constants from manila import context from manila import db from manila import exception from manila import policy from manila.share import share_types from manila.share_group import api as share_group_api from manila.share_group import share_group_types from manila import test from manila.tests.api import fakes from manila.tests import db_utils CONF = cfg.CONF SG_GRADUATION_VERSION = '2.55' @ddt.ddt class ShareGroupAPITest(test.TestCase): """Consistency Groups API Test suite.""" def setUp(self): super(ShareGroupAPITest, self).setUp() self.controller = share_groups.ShareGroupController() self.resource_name = self.controller.resource_name self.fake_share_type = {'id': uuidutils.generate_uuid()} self.fake_share_group_type = { 'id': uuidutils.generate_uuid()} self.api_version = '2.34' self.request = fakes.HTTPRequest.blank( '/share-groups', version=self.api_version, experimental=True) self.flags(transport_url='rabbit://fake:fake@mqhost:5672') self.admin_context = context.RequestContext('admin', 'fake', True) self.member_context = context.RequestContext('fake', 'fake') self.mock_policy_check = self.mock_object( policy, 'check_policy', mock.Mock(return_value=True)) self.context = self.request.environ['manila.context'] self.mock_object(share_group_types, 'get_default', mock.Mock(return_value=self.fake_share_group_type)) self.mock_object(share_types, 'get_default_share_type', mock.Mock(return_value=self.fake_share_type)) def _get_context(self, role): return getattr(self, '%s_context' % role) def _setup_share_group_data(self, share_group=None, version='2.31'): if share_group is None: share_group = db_utils.create_share_group( status=constants.STATUS_AVAILABLE) path = '/v2/fake/share-groups/%s/action' % share_group['id'] req = fakes.HTTPRequest.blank(path, script_name=path, version=version) req.headers[wsgi.API_VERSION_REQUEST_HEADER] = version req.headers[wsgi.EXPERIMENTAL_API_REQUEST_HEADER] = 'True' return share_group, req def _get_fake_share_group(self, ctxt=None, **values): if ctxt is None: ctxt = self.context share_group_db_dict = { 'id': 'fake_id', 'user_id': 'fakeuser', 'project_id': 'fakeproject', 'status': constants.STATUS_CREATING, 'name': 'fake name', 'description': 'fake description', 'host': None, 'availability_zone': None, 'consistent_snapshot_support': None, 'source_share_group_snapshot_id': None, 'share_group_type_id': self.fake_share_group_type.get('id'), 'share_network_id': uuidutils.generate_uuid(), 'share_server_id': uuidutils.generate_uuid(), 'share_types': [], 'created_at': datetime.datetime(1, 1, 1, 1, 1, 1), } share_group_db_dict.update(**values) expected_share_group = { 'id': share_group_db_dict['id'], 'project_id': share_group_db_dict['project_id'], 'status': share_group_db_dict['status'], 'name': share_group_db_dict['name'], 'description': share_group_db_dict['description'], 'host': share_group_db_dict['host'], 'availability_zone': share_group_db_dict['availability_zone'], 'consistent_snapshot_support': share_group_db_dict[ 'consistent_snapshot_support'], 'source_share_group_snapshot_id': share_group_db_dict[ 'source_share_group_snapshot_id'], 'share_group_type_id': share_group_db_dict['share_group_type_id'], 'share_network_id': share_group_db_dict['share_network_id'], 'share_server_id': share_group_db_dict['share_server_id'], 'share_types': [st['share_type_id'] for st in share_group_db_dict.get('share_types')], 'created_at': datetime.datetime(1, 1, 1, 1, 1, 1), 'links': mock.ANY, } if not ctxt.is_admin: del expected_share_group['share_server_id'] return share_group_db_dict, expected_share_group def _get_fake_simple_share_group(self, **values): share_group = {'id': 'fake_id', 'name': None} share_group.update(**values) expected_share_group = copy.deepcopy(share_group) expected_share_group['links'] = mock.ANY return share_group, expected_share_group def _get_fake_custom_request_and_context(self, microversion, experimental): req = fakes.HTTPRequest.blank( '/share-groups', version=microversion, experimental=experimental) req_context = req.environ['manila.context'] return req, req_context @ddt.data({'microversion': '2.34', 'experimental': True}, {'microversion': SG_GRADUATION_VERSION, 'experimental': False}) @ddt.unpack def test_share_group_create(self, microversion, experimental): fake, expected = self._get_fake_share_group() self.mock_object(share_types, 'get_default_share_type', mock.Mock(return_value=self.fake_share_type)) self.mock_object(self.controller.share_group_api, 'create', mock.Mock(return_value=fake)) req, req_context = self._get_fake_custom_request_and_context( microversion, experimental) body = {"share_group": {}} res_dict = self.controller.create(req, body) self.controller.share_group_api.create.assert_called_once_with( req_context, share_group_type_id=self.fake_share_group_type['id'], share_type_ids=[self.fake_share_type['id']]) self.assertEqual(expected, res_dict['share_group']) self.mock_policy_check.assert_called_once_with( req_context, self.resource_name, 'create') def test_group_create_invalid_group_snapshot_state(self): fake_snap_id = uuidutils.generate_uuid() self.mock_object( self.controller.share_group_api, 'create', mock.Mock(side_effect=exception.InvalidShareGroupSnapshot( reason='bad status', ))) body = { "share_group": { "source_share_group_snapshot_id": fake_snap_id } } self.assertRaises(webob.exc.HTTPConflict, self.controller.create, self.request, body) self.mock_policy_check.assert_called_once_with( self.context, self.resource_name, 'create') def test_share_group_create_no_default_share_type(self): fake_group, expected_group = self._get_fake_share_group() self.mock_object(share_types, 'get_default_share_type', mock.Mock(return_value=None)) self.mock_object(self.controller.share_group_api, 'create', mock.Mock(return_value=fake_group)) body = {"share_group": {}} self.assertRaises( webob.exc.HTTPBadRequest, self.controller.create, self.request, body) self.mock_policy_check.assert_called_once_with( self.context, self.resource_name, 'create') def test_share_group_create_no_default_group_type(self): fake_group, expected_group = self._get_fake_share_group() self.mock_object( share_group_types, 'get_default', mock.Mock(return_value=None)) self.mock_object( self.controller.share_group_api, 'create', mock.Mock(return_value=fake_group)) body = {"share_group": {}} self.assertRaises( webob.exc.HTTPBadRequest, self.controller.create, self.request, body) self.mock_policy_check.assert_called_once_with( self.context, self.resource_name, 'create') def test_share_group_create_with_group_type_specified(self): fake_share_group, expected_group = self._get_fake_share_group() self.mock_object( share_group_types, 'get_default', mock.Mock(return_value=None)) self.mock_object( self.controller.share_group_api, 'create', mock.Mock(return_value=fake_share_group)) body = { "share_group": { "share_group_type_id": self.fake_share_group_type.get('id'), } } self.controller.create(self.request, body) self.controller.share_group_api.create.assert_called_once_with( self.context, share_group_type_id=self.fake_share_group_type['id'], share_type_ids=[self.fake_share_type['id']]) self.mock_policy_check.assert_called_once_with( self.context, self.resource_name, 'create') def test_share_group_create_with_invalid_group_type_specified(self): fake_share_group, expected_share_group = self._get_fake_share_group() self.mock_object( share_group_types, 'get_default', mock.Mock(return_value=None)) self.mock_object(self.controller.share_group_api, 'create', mock.Mock(return_value=fake_share_group)) body = {"share_group": {"group_type_id": "invalid"}} self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, self.request, body) self.mock_policy_check.assert_called_once_with( self.context, self.resource_name, 'create') def test_share_group_create_with_az(self): fake_az_name = 'fake_az_name' fake_az_id = 'fake_az_id' fake_share_group, expected_share_group = self._get_fake_share_group( availability_zone_id=fake_az_id) self.mock_object( self.controller.share_group_api, 'create', mock.Mock(return_value=fake_share_group)) self.mock_object( share_groups.db, 'availability_zone_get', mock.Mock(return_value=type( 'FakeAZ', (object, ), { 'id': fake_az_id, 'name': fake_az_name, }))) body = {"share_group": {"availability_zone": fake_az_name}} res_dict = self.controller.create(self.request, body) self.controller.share_group_api.create.assert_called_once_with( self.context, availability_zone_id=fake_az_id, availability_zone=fake_az_name, share_group_type_id=self.fake_share_group_type['id'], share_type_ids=[self.fake_share_type['id']]) share_groups.db.availability_zone_get.assert_called_once_with( self.context, fake_az_name) self.assertEqual(expected_share_group, res_dict['share_group']) self.mock_policy_check.assert_called_once_with( self.context, self.resource_name, 'create') def test_share_group_create_with_az_and_source_share_group_snapshot(self): fake_az_name = 'fake_az_name' fake_az_id = 'fake_az_id' fake_share_group, expected_share_group = self._get_fake_share_group( availability_zone_id=fake_az_id) self.mock_object( self.controller.share_group_api, 'create', mock.Mock(return_value=fake_share_group)) self.mock_object( share_groups.db, 'availability_zone_get', mock.Mock(return_value=type( 'FakeAZ', (object, ), { 'id': fake_az_id, 'name': fake_az_name, }))) body = {"share_group": { "availability_zone": fake_az_name, "source_share_group_snapshot_id": 'fake_sgs_id', }} self.assertRaises( webob.exc.HTTPBadRequest, self.controller.create, self.request, body) self.controller.share_group_api.create.assert_not_called() share_groups.db.availability_zone_get.assert_not_called() self.mock_policy_check.assert_called_once_with( self.context, self.resource_name, 'create') def test_share_group_create_with_nonexistent_az(self): fake_az_name = 'fake_az_name' fake_az_id = 'fake_az_id' fake_share_group, expected_share_group = self._get_fake_share_group( availability_zone_id=fake_az_id) self.mock_object( self.controller.share_group_api, 'create', mock.Mock(return_value=fake_share_group)) self.mock_object( share_groups.db, 'availability_zone_get', mock.Mock( side_effect=exception.AvailabilityZoneNotFound(id=fake_az_id))) body = {"share_group": {"availability_zone": fake_az_name}} self.assertRaises( webob.exc.HTTPNotFound, self.controller.create, self.request, body) self.assertEqual(0, self.controller.share_group_api.create.call_count) share_groups.db.availability_zone_get.assert_called_once_with( self.context, fake_az_name) self.mock_policy_check.assert_called_once_with( self.context, self.resource_name, 'create') def test_share_group_create_with_name(self): fake_name = 'fake_name' fake_share_group, expected_share_group = self._get_fake_share_group( name=fake_name) self.mock_object(self.controller.share_group_api, 'create', mock.Mock(return_value=fake_share_group)) body = {"share_group": {"name": fake_name}} res_dict = self.controller.create(self.request, body) self.controller.share_group_api.create.assert_called_once_with( self.context, name=fake_name, share_group_type_id=self.fake_share_group_type['id'], share_type_ids=[self.fake_share_type['id']]) self.assertEqual(expected_share_group, res_dict['share_group']) self.mock_policy_check.assert_called_once_with( self.context, self.resource_name, 'create') def test_share_group_create_with_description(self): fake_description = 'fake_description' fake_share_group, expected_share_group = self._get_fake_share_group( description=fake_description) self.mock_object(share_types, 'get_default_share_type', mock.Mock(return_value=self.fake_share_type)) self.mock_object(self.controller.share_group_api, 'create', mock.Mock(return_value=fake_share_group)) body = {"share_group": {"description": fake_description}} res_dict = self.controller.create(self.request, body) self.controller.share_group_api.create.assert_called_once_with( self.context, description=fake_description, share_group_type_id=self.fake_share_group_type['id'], share_type_ids=[self.fake_share_type['id']]) self.assertEqual(expected_share_group, res_dict['share_group']) self.mock_policy_check.assert_called_once_with( self.context, self.resource_name, 'create') def test_share_group_create_with_share_types(self): fake_share_types = [{"share_type_id": self.fake_share_type['id']}] fake_group, expected_group = self._get_fake_share_group( share_types=fake_share_types) self.mock_object(self.controller.share_group_api, 'create', mock.Mock(return_value=fake_group)) body = { "share_group": { "share_types": [self.fake_share_type['id']] } } res_dict = self.controller.create(self.request, body) self.controller.share_group_api.create.assert_called_once_with( self.context, share_group_type_id=self.fake_share_group_type['id'], share_type_ids=[self.fake_share_type['id']]) self.assertEqual(expected_group, res_dict['share_group']) self.mock_policy_check.assert_called_once_with( self.context, self.resource_name, 'create') def test_sg_create_with_source_sg_snapshot_id_and_share_network(self): fake_snap_id = uuidutils.generate_uuid() fake_net_id = uuidutils.generate_uuid() self.mock_object(share_types, 'get_default_share_type', mock.Mock(return_value=self.fake_share_type)) mock_api_call = self.mock_object( self.controller.share_group_api, 'create') body = { "share_group": { "source_share_group_snapshot_id": fake_snap_id, "share_network_id": fake_net_id, } } self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, self.request, body) self.assertFalse(mock_api_call.called) self.mock_policy_check.assert_called_once_with( self.context, self.resource_name, 'create') def test_share_group_create_with_source_sg_snapshot_id(self): fake_snap_id = uuidutils.generate_uuid() fake_share_group, expected_group = self._get_fake_share_group( source_share_group_snapshot_id=fake_snap_id) self.mock_object(share_types, 'get_default_share_type', mock.Mock(return_value=self.fake_share_type)) self.mock_object(self.controller.share_group_api, 'create', mock.Mock(return_value=fake_share_group)) body = { "share_group": { "source_share_group_snapshot_id": fake_snap_id, } } res_dict = self.controller.create(self.request, body) self.controller.share_group_api.create.assert_called_once_with( self.context, share_group_type_id=self.fake_share_group_type['id'], source_share_group_snapshot_id=fake_snap_id) self.assertEqual(expected_group, res_dict['share_group']) self.mock_policy_check.assert_called_once_with( self.context, self.resource_name, 'create') def test_share_group_create_with_share_network_id(self): fake_net_id = uuidutils.generate_uuid() fake_group, expected_group = self._get_fake_share_group( share_network_id=fake_net_id) self.mock_object(share_types, 'get_default_share_type', mock.Mock(return_value=self.fake_share_type)) self.mock_object(self.controller.share_group_api, 'create', mock.Mock(return_value=fake_group)) body = { "share_group": { "share_network_id": fake_net_id, } } res_dict = self.controller.create(self.request, body) self.controller.share_group_api.create.assert_called_once_with( self.context, share_network_id=fake_net_id, share_group_type_id=self.fake_share_group_type['id'], share_type_ids=mock.ANY) self.assertEqual(expected_group, res_dict['share_group']) self.mock_policy_check.assert_called_once_with( self.context, self.resource_name, 'create') def test_sg_create_no_default_share_type_with_share_group_snapshot(self): fake_snap_id = uuidutils.generate_uuid() fake, expected = self._get_fake_share_group() self.mock_object(share_types, 'get_default_share_type', mock.Mock(return_value=None)) self.mock_object(self.controller.share_group_api, 'create', mock.Mock(return_value=fake)) body = { "share_group": { "source_share_group_snapshot_id": fake_snap_id, } } res_dict = self.controller.create(self.request, body) self.controller.share_group_api.create.assert_called_once_with( self.context, share_group_type_id=self.fake_share_group_type['id'], source_share_group_snapshot_id=fake_snap_id) self.assertEqual(expected, res_dict['share_group']) self.mock_policy_check.assert_called_once_with( self.context, self.resource_name, 'create') def test_share_group_create_with_name_and_description(self): fake_name = 'fake_name' fake_description = 'fake_description' fake_group, expected_group = self._get_fake_share_group( name=fake_name, description=fake_description) self.mock_object(share_types, 'get_default_share_type', mock.Mock(return_value=self.fake_share_type)) self.mock_object(self.controller.share_group_api, 'create', mock.Mock(return_value=fake_group)) body = { "share_group": { "name": fake_name, "description": fake_description } } res_dict = self.controller.create(self.request, body) self.controller.share_group_api.create.assert_called_once_with( self.context, name=fake_name, description=fake_description, share_group_type_id=self.fake_share_group_type['id'], share_type_ids=[self.fake_share_type['id']]) self.assertEqual(expected_group, res_dict['share_group']) self.mock_policy_check.assert_called_once_with( self.context, self.resource_name, 'create') def test_share_group_create_invalid_body(self): body = {"not_group": {}} self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, self.request, body) self.mock_policy_check.assert_called_once_with( self.context, self.resource_name, 'create') def test_group_create_invalid_body_share_types_and_source_group_snapshot( self): body = { "share_group": { "share_types": [], "source_share_group_snapshot_id": "", } } self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, self.request, body) self.mock_policy_check.assert_called_once_with( self.context, self.resource_name, 'create') def test_share_group_create_source_group_snapshot_not_in_available(self): fake_snap_id = uuidutils.generate_uuid() body = { "share_group": { "source_share_group_snapshot_id": fake_snap_id, } } self.mock_object(self.controller.share_group_api, 'create', mock.Mock( side_effect=exception.InvalidShareGroupSnapshot(reason='blah'))) self.assertRaises( webob.exc.HTTPConflict, self.controller.create, self.request, body) self.mock_policy_check.assert_called_once_with( self.context, self.resource_name, 'create') def test_share_group_create_source_group_snapshot_does_not_exist(self): fake_snap_id = uuidutils.generate_uuid() body = { "share_group": {"source_share_group_snapshot_id": fake_snap_id} } self.mock_object( self.controller.share_group_api, 'create', mock.Mock(side_effect=exception.ShareGroupSnapshotNotFound( share_group_snapshot_id=fake_snap_id))) self.assertRaises( webob.exc.HTTPBadRequest, self.controller.create, self.request, body) self.mock_policy_check.assert_called_once_with( self.context, self.resource_name, 'create') def test_share_group_create_invalid_input(self): fake_snap_id = uuidutils.generate_uuid() body = { "share_group": {"source_share_group_snapshot_id": fake_snap_id} } self.mock_object( self.controller.share_group_api, 'create', mock.Mock(side_effect=exception.InvalidInput( reason='invalid input'))) self.assertRaises( webob.exc.HTTPBadRequest, self.controller.create, self.request, body) self.mock_policy_check.assert_called_once_with( self.context, self.resource_name, 'create') def test_share_group_create_source_group_snapshot_not_a_uuid(self): fake_snap_id = "Not a uuid" body = { "share_group": { "source_share_group_snapshot_id": fake_snap_id, } } self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, self.request, body) self.mock_policy_check.assert_called_once_with( self.context, self.resource_name, 'create') def test_share_group_create_share_network_id_not_a_uuid(self): fake_net_id = "Not a uuid" body = {"share_group": {"share_network_id": fake_net_id}} self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, self.request, body) self.mock_policy_check.assert_called_once_with( self.context, self.resource_name, 'create') def test_share_group_create_invalid_body_share_types_not_a_list(self): body = {"share_group": {"share_types": ""}} self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, self.request, body) self.mock_policy_check.assert_called_once_with( self.context, self.resource_name, 'create') def test_share_group_create_invalid_body_invalid_field(self): body = {"share_group": {"unknown_field": ""}} exc = self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, self.request, body) self.assertIn('unknown_field', str(exc)) self.mock_policy_check.assert_called_once_with( self.context, self.resource_name, 'create') def test_share_group_create_with_invalid_share_types_field(self): body = {"share_group": {"share_types": 'iamastring'}} self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, self.request, body) self.mock_policy_check.assert_called_once_with( self.context, self.resource_name, 'create') def test_share_group_create_with_invalid_share_types_field_not_uuids(self): body = {"share_group": {"share_types": ['iamastring']}} self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, self.request, body) self.mock_policy_check.assert_called_once_with( self.context, self.resource_name, 'create') @ddt.data({'microversion': '2.34', 'experimental': True}, {'microversion': SG_GRADUATION_VERSION, 'experimental': False}) @ddt.unpack def test_share_group_update_with_name_and_description( self, microversion, experimental): fake_name = 'fake_name' fake_description = 'fake_description' fake_group, expected_group = self._get_fake_share_group( name=fake_name, description=fake_description) self.mock_object(self.controller.share_group_api, 'get', mock.Mock(return_value=fake_group)) self.mock_object(self.controller.share_group_api, 'update', mock.Mock(return_value=fake_group)) req, req_context = self._get_fake_custom_request_and_context( microversion, experimental) body = { "share_group": { "name": fake_name, "description": fake_description, } } res_dict = self.controller.update(req, fake_group['id'], body) self.controller.share_group_api.update.assert_called_once_with( req_context, fake_group, {"name": fake_name, "description": fake_description}) self.assertEqual(expected_group, res_dict['share_group']) self.mock_policy_check.assert_called_once_with( req_context, self.resource_name, 'update') def test_share_group_update_group_not_found(self): body = {"share_group": {}} self.mock_object(self.controller.share_group_api, 'get', mock.Mock(side_effect=exception.NotFound)) self.assertRaises(webob.exc.HTTPNotFound, self.controller.update, self.request, 'fake_id', body) self.mock_policy_check.assert_called_once_with( self.context, self.resource_name, 'update') def test_share_group_update_invalid_body(self): body = {"not_group": {}} self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, self.request, 'fake_id', body) self.mock_policy_check.assert_called_once_with( self.context, self.resource_name, 'update') def test_share_group_update_invalid_body_invalid_field(self): body = {"share_group": {"unknown_field": ""}} exc = self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, self.request, 'fake_id', body) self.assertIn('unknown_field', str(exc)) self.mock_policy_check.assert_called_once_with( self.context, self.resource_name, 'update') def test_share_group_update_invalid_body_readonly_field(self): body = {"share_group": {"share_types": []}} exc = self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, self.request, 'fake_id', body) self.assertIn('share_types', str(exc)) self.mock_policy_check.assert_called_once_with( self.context, self.resource_name, 'update') @ddt.data({'microversion': '2.31', 'experimental': True}, {'microversion': SG_GRADUATION_VERSION, 'experimental': False}) @ddt.unpack def test_share_group_list_index(self, microversion, experimental): fake, expected = self._get_fake_simple_share_group() self.mock_object( share_group_api.API, 'get_all', mock.Mock(return_value=[fake])) req, req_context = self._get_fake_custom_request_and_context( microversion, experimental) res_dict = self.controller.index(req) self.assertEqual([expected], res_dict['share_groups']) self.mock_policy_check.assert_called_once_with( req_context, self.resource_name, 'get_all') def test_share_group_list_index_no_groups(self): self.mock_object( share_group_api.API, 'get_all', mock.Mock(return_value=[])) res_dict = self.controller.index(self.request) self.assertEqual([], res_dict['share_groups']) self.mock_policy_check.assert_called_once_with( self.context, self.resource_name, 'get_all') def test_share_group_list_index_with_limit(self): fake, expected = self._get_fake_simple_share_group() fake2, expected2 = self._get_fake_simple_share_group(id="fake_id2") self.mock_object( share_group_api.API, 'get_all', mock.Mock(return_value=[fake, fake2])) req = fakes.HTTPRequest.blank( '/share-groups?limit=1', version=self.api_version, experimental=True) req_context = req.environ['manila.context'] res_dict = self.controller.index(req) self.assertEqual(1, len(res_dict['share_groups'])) self.assertEqual([expected], res_dict['share_groups']) self.mock_policy_check.assert_called_once_with( req_context, self.resource_name, 'get_all') def test_share_group_list_index_with_limit_and_offset(self): fake, expected = self._get_fake_simple_share_group() fake2, expected2 = self._get_fake_simple_share_group( id="fake_id2") self.mock_object(share_group_api.API, 'get_all', mock.Mock(return_value=[fake, fake2])) req = fakes.HTTPRequest.blank( '/share-groups?limit=1&offset=1', version=self.api_version, experimental=True) req_context = req.environ['manila.context'] res_dict = self.controller.index(req) self.assertEqual(1, len(res_dict['share_groups'])) self.assertEqual([expected2], res_dict['share_groups']) self.mock_policy_check.assert_called_once_with( req_context, self.resource_name, 'get_all') def test_share_group_list_index_with_like_filter(self): fake, expected = self._get_fake_simple_share_group( name='fake_1', description='fake_ds_1') fake2, expected2 = self._get_fake_simple_share_group( name='fake_2', description='fake_ds_2') self.mock_object(share_group_api.API, 'get_all', mock.Mock(return_value=[fake, fake2])) req = fakes.HTTPRequest.blank( '/share-groups?name~=fake&description~=fake', version='2.36', experimental=True) req_context = req.environ['manila.context'] res_dict = self.controller.index(req) expected.pop('description') expected2.pop('description') self.assertEqual(2, len(res_dict['share_groups'])) self.assertEqual([expected, expected2], res_dict['share_groups']) self.mock_policy_check.assert_called_once_with( req_context, self.resource_name, 'get_all') @ddt.data({'microversion': '2.34', 'experimental': True}, {'microversion': SG_GRADUATION_VERSION, 'experimental': False}) @ddt.unpack def test_share_group_list_detail(self, microversion, experimental): fake, expected = self._get_fake_share_group() self.mock_object( share_group_api.API, 'get_all', mock.Mock(return_value=[fake])) req, req_context = self._get_fake_custom_request_and_context( microversion, experimental) res_dict = self.controller.detail(req) self.assertEqual([expected], res_dict['share_groups']) self.mock_policy_check.assert_called_once_with( req_context, self.resource_name, 'get_all') def test_share_group_list_detail_no_groups(self): self.mock_object( share_group_api.API, 'get_all', mock.Mock(return_value=[])) res_dict = self.controller.detail(self.request) self.assertEqual([], res_dict['share_groups']) self.mock_policy_check.assert_called_once_with( self.context, self.resource_name, 'get_all') def test_share_group_list_detail_with_limit(self): req = fakes.HTTPRequest.blank('/share-groups?limit=1', version=self.api_version, experimental=True) req_context = req.environ['manila.context'] fake_group, expected_group = self._get_fake_share_group( ctxt=req_context) fake_group2, expected_group2 = self._get_fake_share_group( ctxt=req_context, id="fake_id2") self.mock_object(share_group_api.API, 'get_all', mock.Mock(return_value=[fake_group, fake_group2])) res_dict = self.controller.detail(req) self.assertEqual(1, len(res_dict['share_groups'])) self.assertEqual([expected_group], res_dict['share_groups']) self.mock_policy_check.assert_called_once_with( req_context, self.resource_name, 'get_all') def test_share_group_list_detail_with_limit_and_offset(self): req = fakes.HTTPRequest.blank('/share-groups?limit=1&offset=1', version=self.api_version, experimental=True) req_context = req.environ['manila.context'] fake_group, expected_group = self._get_fake_share_group( ctxt=req_context) fake_group2, expected_group2 = self._get_fake_share_group( id="fake_id2", ctxt=req_context) self.mock_object(share_group_api.API, 'get_all', mock.Mock(return_value=[fake_group, fake_group2])) res_dict = self.controller.detail(req) self.assertEqual(1, len(res_dict['share_groups'])) self.assertEqual([expected_group2], res_dict['share_groups']) self.mock_policy_check.assert_called_once_with( req_context, self.resource_name, 'get_all') @ddt.data({'microversion': '2.31', 'experimental': True}, {'microversion': SG_GRADUATION_VERSION, 'experimental': False}) @ddt.unpack def test_share_group_delete(self, microversion, experimental): fake_group, expected_group = self._get_fake_share_group() self.mock_object(share_group_api.API, 'get', mock.Mock(return_value=fake_group)) self.mock_object(share_group_api.API, 'delete') req, req_context = self._get_fake_custom_request_and_context( microversion, experimental) res = self.controller.delete(req, fake_group['id']) self.assertEqual(202, res.status_code) self.mock_policy_check.assert_called_once_with( req_context, self.resource_name, 'delete') def test_share_group_delete_group_not_found(self): fake_group, expected_group = self._get_fake_share_group() self.mock_object(share_group_api.API, 'get', mock.Mock(side_effect=exception.NotFound)) self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete, self.request, fake_group['id']) self.mock_policy_check.assert_called_once_with( self.context, self.resource_name, 'delete') def test_share_group_delete_in_conflicting_status(self): fake, expected = self._get_fake_share_group() self.mock_object( share_group_api.API, 'get', mock.Mock(return_value=fake)) self.mock_object(share_group_api.API, 'delete', mock.Mock( side_effect=exception.InvalidShareGroup(reason='blah'))) self.assertRaises( webob.exc.HTTPConflict, self.controller.delete, self.request, fake['id']) self.mock_policy_check.assert_called_once_with( self.context, self.resource_name, 'delete') @ddt.data({'microversion': '2.34', 'experimental': True}, {'microversion': SG_GRADUATION_VERSION, 'experimental': False}) @ddt.unpack def test_share_group_show(self, microversion, experimental): fake, expected = self._get_fake_share_group() self.mock_object( share_group_api.API, 'get', mock.Mock(return_value=fake)) req = fakes.HTTPRequest.blank( '/share-groupss/%s' % fake['id'], version=microversion, experimental=experimental) req_context = req.environ['manila.context'] res_dict = self.controller.show(req, fake['id']) self.assertEqual(expected, res_dict['share_group']) self.mock_policy_check.assert_called_once_with( req_context, self.resource_name, 'get') def test_share_group_show_as_admin(self): req = fakes.HTTPRequest.blank( '/share-groupss/my_group_id', version=self.api_version, experimental=True) admin_context = req.environ['manila.context'].elevated() req.environ['manila.context'] = admin_context fake_group, expected_group = self._get_fake_share_group( ctxt=admin_context, id='my_group_id') self.mock_object(share_group_api.API, 'get', mock.Mock(return_value=fake_group)) res_dict = self.controller.show(req, fake_group['id']) self.assertEqual(expected_group, res_dict['share_group']) self.assertIsNotNone(res_dict['share_group']['share_server_id']) self.mock_policy_check.assert_called_once_with( admin_context, self.resource_name, 'get') def test_share_group_show_group_not_found(self): req = fakes.HTTPRequest.blank( '/share-groupss/myfakegroup', version=self.api_version, experimental=True) req_context = req.environ['manila.context'] fake, expected = self._get_fake_share_group( ctxt=req_context, id='myfakegroup') self.mock_object(share_group_api.API, 'get', mock.Mock(side_effect=exception.NotFound)) self.assertRaises( webob.exc.HTTPNotFound, self.controller.show, req, fake['id']) self.mock_policy_check.assert_called_once_with( req_context, self.resource_name, 'get') @ddt.data({'microversion': '2.31', 'experimental': True}, {'microversion': SG_GRADUATION_VERSION, 'experimental': False}) @ddt.unpack def test__reset_status_call(self, microversion, experimental): self.mock_object(self.controller, '_reset_status') req, _junk = self._get_fake_custom_request_and_context( microversion, experimental) sg_id = 'fake' body = {'reset_status': {'status': constants.STATUS_ERROR}} self.controller.share_group_reset_status(req, sg_id, body) self.controller._reset_status.assert_called_once_with(req, sg_id, body) @ddt.data(*fakes.fixture_reset_status_with_different_roles) @ddt.unpack def test_share_groups_reset_status_with_different_roles( self, role, valid_code, valid_status, version): ctxt = self._get_context(role) share_group, req = self._setup_share_group_data() action_name = 'reset_status' body = {action_name: {'status': constants.STATUS_ERROR}} req.method = 'POST' req.headers['content-type'] = 'application/json' req.body = jsonutils.dumps(body).encode("utf-8") req.headers['X-Openstack-Manila-Api-Version'] = self.api_version req.environ['manila.context'] = ctxt with mock.patch.object( policy, 'check_policy', fakes.mock_fake_admin_check): resp = req.get_response(fakes.app()) # validate response code and model status self.assertEqual(valid_code, resp.status_int) actual_model = db.share_group_get(ctxt, share_group['id']) self.assertEqual(valid_status, actual_model['status']) @ddt.data(*fakes.fixture_force_delete_with_different_roles) @ddt.unpack def test_share_group_force_delete_with_different_roles(self, role, resp_code, version): ctxt = self._get_context(role) share_group, req = self._setup_share_group_data() req.method = 'POST' req.headers['content-type'] = 'application/json' action_name = 'force_delete' body = {action_name: {}} req.body = jsonutils.dumps(body).encode("utf-8") req.headers['X-Openstack-Manila-Api-Version'] = self.api_version req.environ['manila.context'] = ctxt with mock.patch.object( policy, 'check_policy', fakes.mock_fake_admin_check): resp = req.get_response(fakes.app()) # validate response self.assertEqual(resp_code, resp.status_int) @ddt.data({'microversion': '2.31', 'experimental': True}, {'microversion': SG_GRADUATION_VERSION, 'experimental': False}) @ddt.unpack def test__force_delete_call(self, microversion, experimental): self.mock_object(self.controller, '_force_delete') req, _junk = self._get_fake_custom_request_and_context( microversion, experimental) sg_id = 'fake' body = {'force_delete': {}} self.controller.share_group_force_delete(req, sg_id, body) self.controller._force_delete.assert_called_once_with(req, sg_id, body) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/api/v2/test_share_instance_export_locations.py0000664000175000017500000001453300000000000026642 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import ddt from webob import exc from manila.api.v2 import share_instance_export_locations as export_locations from manila import context from manila import db from manila import exception from manila import policy from manila import test from manila.tests.api import fakes from manila.tests import db_utils @ddt.ddt class ShareInstanceExportLocationsAPITest(test.TestCase): def _get_request(self, version="2.9", use_admin_context=True): req = fakes.HTTPRequest.blank( '/v2/share_instances/%s/export_locations' % self.share_instance_id, version=version, use_admin_context=use_admin_context) return req def setUp(self): super(ShareInstanceExportLocationsAPITest, self).setUp() self.controller = ( export_locations.ShareInstanceExportLocationController()) self.resource_name = self.controller.resource_name self.ctxt = { 'admin': context.RequestContext('admin', 'fake', True), 'user': context.RequestContext('fake', 'fake'), } self.mock_policy_check = self.mock_object( policy, 'check_policy', mock.Mock(return_value=True)) self.share = db_utils.create_share() self.share_instance_id = self.share.instance.id self.req = self._get_request() paths = ['fake1/1/', 'fake2/2', 'fake3/3'] db.export_locations_update( self.ctxt['admin'], self.share_instance_id, paths, False) @ddt.data({'role': 'admin', 'version': '2.9'}, {'role': 'user', 'version': '2.9'}, {'role': 'admin', 'version': '2.13'}, {'role': 'user', 'version': '2.13'}) @ddt.unpack def test_list_and_show(self, role, version): summary_keys = ['id', 'path'] admin_summary_keys = summary_keys + [ 'share_instance_id', 'is_admin_only'] detail_keys = summary_keys + ['created_at', 'updated_at'] admin_detail_keys = admin_summary_keys + ['created_at', 'updated_at'] self._test_list_and_show(role, version, summary_keys, detail_keys, admin_summary_keys, admin_detail_keys) @ddt.data('admin', 'user') def test_list_and_show_with_preferred_flag(self, role): summary_keys = ['id', 'path', 'preferred'] admin_summary_keys = summary_keys + [ 'share_instance_id', 'is_admin_only'] detail_keys = summary_keys + ['created_at', 'updated_at'] admin_detail_keys = admin_summary_keys + ['created_at', 'updated_at'] self._test_list_and_show(role, '2.14', summary_keys, detail_keys, admin_summary_keys, admin_detail_keys) def _test_list_and_show(self, role, version, summary_keys, detail_keys, admin_summary_keys, admin_detail_keys): req = self._get_request(version=version, use_admin_context=(role == 'admin')) index_result = self.controller.index(req, self.share_instance_id) self.assertIn('export_locations', index_result) self.assertEqual(1, len(index_result)) self.assertEqual(3, len(index_result['export_locations'])) for index_el in index_result['export_locations']: self.assertIn('id', index_el) show_result = self.controller.show( req, self.share_instance_id, index_el['id']) self.assertIn('export_location', show_result) self.assertEqual(1, len(show_result)) show_el = show_result['export_location'] # Check summary keys in index result & detail keys in show result if role == 'admin': self.assertEqual(len(admin_summary_keys), len(index_el)) for key in admin_summary_keys: self.assertIn(key, index_el) self.assertEqual(len(admin_detail_keys), len(show_el)) for key in admin_detail_keys: self.assertIn(key, show_el) else: self.assertEqual(len(summary_keys), len(index_el)) for key in summary_keys: self.assertIn(key, index_el) self.assertEqual(len(detail_keys), len(show_el)) for key in detail_keys: self.assertIn(key, show_el) # Ensure keys common to index & show results have matching values for key in summary_keys: self.assertEqual(index_el[key], show_el[key]) def test_list_export_locations_share_instance_not_found(self): self.assertRaises( exc.HTTPNotFound, self.controller.index, self.req, 'inexistent_share_instance_id', ) def test_show_export_location_share_instance_not_found(self): index_result = self.controller.index(self.req, self.share_instance_id) el_id = index_result['export_locations'][0]['id'] self.assertRaises( exc.HTTPNotFound, self.controller.show, self.req, 'inexistent_share_id', el_id, ) @ddt.data('1.0', '2.0', '2.8') def test_list_with_unsupported_version(self, version): self.assertRaises( exception.VersionNotFoundForAPIMethod, self.controller.index, self._get_request(version), self.share_instance_id, ) @ddt.data('1.0', '2.0', '2.8') def test_show_with_unsupported_version(self, version): index_result = self.controller.index(self.req, self.share_instance_id) self.assertRaises( exception.VersionNotFoundForAPIMethod, self.controller.show, self._get_request(version), self.share_instance_id, index_result['export_locations'][0]['id'] ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/api/v2/test_share_instances.py0000664000175000017500000003620700000000000023353 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import ddt from oslo_config import cfg from oslo_serialization import jsonutils from webob import exc as webob_exc from manila.api.openstack import api_version_request from manila.api.v2 import share_instances from manila.common import constants from manila import context from manila import db from manila import exception from manila import policy from manila import test from manila.tests.api import fakes from manila.tests import db_utils CONF = cfg.CONF @ddt.ddt class ShareInstancesAPITest(test.TestCase): """Share instances API Test.""" def setUp(self): super(ShareInstancesAPITest, self).setUp() self.controller = share_instances.ShareInstancesController() self.resource_name = self.controller.resource_name self.mock_policy_check = self.mock_object( policy, 'check_policy', mock.Mock(return_value=True)) self.admin_context = context.RequestContext('admin', 'fake', True) self.member_context = context.RequestContext('fake', 'fake') def _get_context(self, role): return getattr(self, '%s_context' % role) def _setup_share_instance_data(self, instance=None, version='2.7'): if instance is None: instance = db_utils.create_share(status=constants.STATUS_AVAILABLE, size='1').instance path = '/v2/fake/share_instances/%s/action' % instance['id'] req = fakes.HTTPRequest.blank(path, script_name=path, version=version) return instance, req def _get_request(self, uri, context=None, version="2.3"): if context is None: context = self.admin_context req = fakes.HTTPRequest.blank(uri, version=version) req.environ['manila.context'] = context return req def _validate_ids_in_share_instances_list(self, expected, actual): self.assertEqual(len(expected), len(actual)) self.assertEqual([i['id'] for i in expected], [i['id'] for i in actual]) @ddt.data("2.3", "2.34", "2.35", "2.69") def test_index(self, version): url = '/share_instances' if (api_version_request.APIVersionRequest(version) >= api_version_request.APIVersionRequest('2.35')): url += "?export_location_path=/admin/export/location" if (api_version_request.APIVersionRequest(version) >= api_version_request.APIVersionRequest('2.69')): url += "&is_soft_deleted=true" req = self._get_request(url, version=version) req_context = req.environ['manila.context'] last_instance = [db_utils.create_share(size=1, is_soft_deleted=True).instance] share_instances_count = 3 other_instances = [ db_utils.create_share(size=s + 1).instance for s in range(0, share_instances_count) ] test_instances = other_instances + last_instance db.export_locations_update( self.admin_context, test_instances[0]['id'], '/admin/export/location', False) actual_result = self.controller.index(req) if (api_version_request.APIVersionRequest(version) >= api_version_request.APIVersionRequest('2.69')): test_instances = [] elif (api_version_request.APIVersionRequest(version) >= api_version_request.APIVersionRequest('2.35')): test_instances = test_instances[:1] else: test_instances = other_instances self._validate_ids_in_share_instances_list( test_instances, actual_result['share_instances']) self.mock_policy_check.assert_called_once_with( req_context, self.resource_name, 'index') def test_index_with_limit(self): req = self._get_request('/v2/fake/share_instances') req_context = req.environ['manila.context'] share_instances_count = 3 test_instances = [ db_utils.create_share(size=s + 1).instance for s in range(0, share_instances_count) ] expect_links = [ { 'href': ( 'http://localhost/share/v2/fake/share_instances?' 'limit=3&marker=%s' % test_instances[2]['id']), 'rel': 'next', } ] url = '/v2/fake/share_instances?limit=3' req = self._get_request(url) actual_result = self.controller.index(req) self._validate_ids_in_share_instances_list( test_instances, actual_result['share_instances']) self.assertEqual(expect_links, actual_result['share_instances_links']) self.mock_policy_check.assert_called_once_with( req_context, self.resource_name, 'index') @ddt.data('2.3', '2.54', '2.71') def test_show(self, version): test_instance = db_utils.create_share(size=1).instance id = test_instance['id'] actual_result = self.controller.show( self._get_request('fake', version=version), id) self.assertEqual(id, actual_result['share_instance']['id']) if (api_version_request.APIVersionRequest(version) >= api_version_request.APIVersionRequest("2.54")): self.assertIn("progress", actual_result['share_instance']) else: self.assertNotIn("progress", actual_result['share_instance']) if (api_version_request.APIVersionRequest(version) >= api_version_request.APIVersionRequest("2.71")): self.assertIn("updated_at", actual_result['share_instance']) else: self.assertNotIn("updated_at", actual_result['share_instance']) self.mock_policy_check.assert_called_once_with( self.admin_context, self.resource_name, 'show') def test_show_with_export_locations(self): test_instance = db_utils.create_share(size=1).instance req = self._get_request('fake', version="2.8") id = test_instance['id'] actual_result = self.controller.show(req, id) self.assertEqual(id, actual_result['share_instance']['id']) self.assertIn("export_location", actual_result['share_instance']) self.assertIn("export_locations", actual_result['share_instance']) self.mock_policy_check.assert_called_once_with( self.admin_context, self.resource_name, 'show') def test_show_without_export_locations(self): test_instance = db_utils.create_share(size=1).instance req = self._get_request('fake', version="2.9") id = test_instance['id'] actual_result = self.controller.show(req, id) self.assertEqual(id, actual_result['share_instance']['id']) self.assertNotIn("export_location", actual_result['share_instance']) self.assertNotIn("export_locations", actual_result['share_instance']) self.mock_policy_check.assert_called_once_with( self.admin_context, self.resource_name, 'show') def test_show_with_replica_state(self): test_instance = db_utils.create_share(size=1).instance req = self._get_request('fake', version="2.11") id = test_instance['id'] actual_result = self.controller.show(req, id) self.assertEqual(id, actual_result['share_instance']['id']) self.assertIn("replica_state", actual_result['share_instance']) self.mock_policy_check.assert_called_once_with( self.admin_context, self.resource_name, 'show') @ddt.data("2.3", "2.8", "2.9", "2.11") def test_get_share_instances(self, version): test_share = db_utils.create_share(size=1) id = test_share['id'] req = self._get_request('fake', version=version) req_context = req.environ['manila.context'] share_policy_check_call = mock.call( req_context, 'share', 'get', mock.ANY, do_raise=False) get_instances_policy_check_call = mock.call( req_context, 'share_instance', 'index') actual_result = self.controller.get_share_instances(req, id) self._validate_ids_in_share_instances_list( [test_share.instance], actual_result['share_instances'] ) self.assertEqual(1, len(actual_result.get("share_instances", 0))) for instance in actual_result["share_instances"]: if (api_version_request.APIVersionRequest(version) > api_version_request.APIVersionRequest("2.8")): assert_method = self.assertNotIn else: assert_method = self.assertIn assert_method("export_location", instance) assert_method("export_locations", instance) if (api_version_request.APIVersionRequest(version) > api_version_request.APIVersionRequest("2.10")): self.assertIn("replica_state", instance) self.mock_policy_check.assert_has_calls([ get_instances_policy_check_call, share_policy_check_call]) @ddt.data('show', 'get_share_instances') def test_not_found(self, target_method_name): method = getattr(self.controller, target_method_name) action = (target_method_name if target_method_name == 'show' else 'index') self.assertRaises(webob_exc.HTTPNotFound, method, self._get_request('fake'), 'fake') self.mock_policy_check.assert_called_once_with( self.admin_context, self.resource_name, action) @ddt.data(('show', 2), ('get_share_instances', 2), ('index', 1)) @ddt.unpack def test_access(self, target_method_name, args_count): user_context = context.RequestContext('fake', 'fake') req = self._get_request('fake', user_context) policy_exception = exception.PolicyNotAuthorized( action=target_method_name) target_method = getattr(self.controller, target_method_name) args = [i for i in range(1, args_count)] with mock.patch.object(policy, 'check_policy', mock.Mock( side_effect=policy_exception)): self.assertRaises( webob_exc.HTTPForbidden, target_method, req, *args) def _reset_status(self, ctxt, model, req, db_access_method, valid_code, valid_status=None, body=None, version='2.7'): if float(version) > 2.6: action_name = 'reset_status' else: action_name = 'os-reset_status' if body is None: body = {action_name: {'status': constants.STATUS_ERROR}} req.method = 'POST' req.headers['content-type'] = 'application/json' req.headers['X-Openstack-Manila-Api-Version'] = version req.body = jsonutils.dumps(body).encode("utf-8") req.environ['manila.context'] = ctxt with mock.patch.object( policy, 'check_policy', fakes.mock_fake_admin_check): resp = req.get_response(fakes.app()) # validate response code and model status self.assertEqual(valid_code, resp.status_int) actual_model = db_access_method(ctxt, model['id']) self.assertEqual(valid_status, actual_model['status']) @ddt.data(*fakes.fixture_reset_status_with_different_roles) @ddt.unpack def test_share_instances_reset_status_with_different_roles(self, role, valid_code, valid_status, version): ctxt = self._get_context(role) instance, req = self._setup_share_instance_data(version=version) self._reset_status(ctxt, instance, req, db.share_instance_get, valid_code, valid_status, version=version) @ddt.data(*fakes.fixture_valid_reset_status_body) @ddt.unpack def test_share_instance_reset_status(self, body, version): instance, req = self._setup_share_instance_data() req.headers['X-Openstack-Manila-Api-Version'] = version if float(version) > 2.6: state = body['reset_status']['status'] else: state = body['os-reset_status']['status'] self._reset_status(self.admin_context, instance, req, db.share_instance_get, 202, state, body, version=version) @ddt.data( ({'os-reset_status': {'x-status': 'bad'}}, '2.6'), ({'os-reset_status': {'status': 'invalid'}}, '2.6'), ({'reset_status': {'x-status': 'bad'}}, '2.7'), ({'reset_status': {'status': 'invalid'}}, '2.7'), ) @ddt.unpack def test_share_instance_invalid_reset_status_body(self, body, version): instance, req = self._setup_share_instance_data() req.headers['X-Openstack-Manila-Api-Version'] = version self._reset_status(self.admin_context, instance, req, db.share_instance_get, 400, constants.STATUS_AVAILABLE, body, version=version) def _force_delete(self, ctxt, model, req, db_access_method, valid_code, check_model_in_db=False, version='2.7'): if float(version) > 2.6: action_name = 'force_delete' else: action_name = 'os-force_delete' body = {action_name: {'status': constants.STATUS_ERROR}} req.method = 'POST' req.headers['content-type'] = 'application/json' req.headers['X-Openstack-Manila-Api-Version'] = version req.body = jsonutils.dumps(body).encode("utf-8") req.environ['manila.context'] = ctxt with mock.patch.object( policy, 'check_policy', fakes.mock_fake_admin_check): resp = req.get_response(fakes.app()) # validate response self.assertEqual(valid_code, resp.status_int) if valid_code == 202 and check_model_in_db: self.assertRaises(exception.NotFound, db_access_method, ctxt, model['id']) @ddt.data(*fakes.fixture_force_delete_with_different_roles) @ddt.unpack def test_instance_force_delete_with_different_roles(self, role, resp_code, version): instance, req = self._setup_share_instance_data(version=version) ctxt = self._get_context(role) self._force_delete(ctxt, instance, req, db.share_instance_get, resp_code, version=version) def test_instance_force_delete_missing(self): instance, req = self._setup_share_instance_data( instance={'id': 'fake'}) ctxt = self._get_context('admin') self._force_delete(ctxt, instance, req, db.share_instance_get, 404) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/api/v2/test_share_network_subnets.py0000664000175000017500000006601300000000000024616 0ustar00zuulzuul00000000000000# Copyright 2019 NetApp, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from unittest import mock import ddt from oslo_config import cfg from oslo_db import exception as db_exception from manila.api import common from manila.api.openstack import api_version_request as api_version from manila.api.v2 import share_network_subnets from manila.db import api as db_api from manila import exception from manila import policy from manila import test from manila.tests.api import fakes from manila.tests import db_utils from webob import exc CONF = cfg.CONF fake_az = { 'id': 'ae525e12-07e8-4ddc-a2fd-4a89ad4a65ff', 'name': 'fake_az_name' } fake_default_subnet = { 'neutron_net_id': 'fake_nn_id', 'neutron_subnet_id': 'fake_nsn_id', 'availability_zone_id': None } fake_subnet_with_az = { 'neutron_net_id': 'fake_nn_id', 'neutron_subnet_id': 'fake_nsn_id', 'availability_zone_id': 'fake_az_id' } @ddt.ddt class ShareNetworkSubnetControllerTest(test.TestCase): """Share network subnet api test""" def setUp(self): super(ShareNetworkSubnetControllerTest, self).setUp() self.controller = share_network_subnets.ShareNetworkSubnetController() self.mock_policy_check = self.mock_object( policy, 'check_policy', mock.Mock(return_value=True)) self.resource_name = self.controller.resource_name self.mock_az_get = self.mock_object(db_api, 'availability_zone_get', mock.Mock(return_value=fake_az)) self.share_network = db_utils.create_share_network( name='fake_network', id='fake_sn_id') self.subnet_metadata = {'fake_key': 'fake_value'} self.subnet = db_utils.create_share_network_subnet( share_network_id=self.share_network['id'], metadata=self.subnet_metadata) self.share_server = db_utils.create_share_server( share_network_subnets=[self.subnet]) self.share = db_utils.create_share() def test_share_network_subnet_delete(self): req = fakes.HTTPRequest.blank('/subnets/%s' % self.subnet['id'], version="2.51") context = req.environ['manila.context'] self.subnet['share_servers'] = [self.share_server] mock_sns_get = self.mock_object( db_api, 'share_network_subnet_get', mock.Mock(return_value=self.subnet)) mock_all_get_all_shares_by_ss = self.mock_object( db_api, 'share_instance_get_all_by_share_server', mock.Mock(return_value=[])) mock_all_ss_are_auto_deletable = self.mock_object( self.controller, '_all_share_servers_are_auto_deletable', mock.Mock(return_value=True)) mock_delete_share_server = self.mock_object( self.controller.share_rpcapi, 'delete_share_server') mock_subnet_delete = self.mock_object(db_api, 'share_network_subnet_delete') result = self.controller.delete(req, self.share_network['id'], self.subnet['id']) self.assertEqual(202, result.status_int) mock_sns_get.assert_called_once_with( context, self.subnet['id']) mock_all_get_all_shares_by_ss.assert_called_once_with( context, self.subnet['share_servers'][0].id ) mock_all_ss_are_auto_deletable.assert_called_once_with( self.subnet) mock_delete_share_server.assert_called_once_with( context, self.subnet['share_servers'][0]) mock_subnet_delete.assert_called_once_with( context, self.subnet['id']) policy.check_policy.assert_called_once_with( context, self.resource_name, 'delete') def test_share_network_subnet_delete_network_not_found(self): req = fakes.HTTPRequest.blank('/subnets/%s' % self.subnet['id'], version="2.51") context = req.environ['manila.context'] mock_sn_get = self.mock_object( db_api, 'share_network_get', mock.Mock(side_effect=exception.ShareNetworkNotFound( share_network_id=self.share_network['id'] ))) self.assertRaises(exc.HTTPNotFound, self.controller.delete, req, self.share_network['id'], self.subnet['id']) mock_sn_get.assert_called_once_with( context, self.share_network['id']) self.mock_policy_check.assert_called_once_with( context, self.resource_name, 'delete') def test_share_network_subnet_delete_subnet_not_found(self): req = fakes.HTTPRequest.blank('/subnets/%s' % self.subnet['id'], version="2.51") context = req.environ['manila.context'] mock_sns_get = self.mock_object( db_api, 'share_network_subnet_get', mock.Mock(side_effect=exception.ShareNetworkSubnetNotFound( share_network_subnet_id=self.subnet['id'] ))) self.assertRaises(exc.HTTPNotFound, self.controller.delete, req, self.share_network['id'], self.subnet['id']) mock_sns_get.assert_called_once_with( context, self.subnet['id']) self.mock_policy_check.assert_called_once_with( context, self.resource_name, 'delete') def test_delete_subnet_with_share_servers_fail(self): req = fakes.HTTPRequest.blank('/subnets/%s' % self.subnet['id'], version="2.51") context = req.environ['manila.context'] self.subnet['share_servers'] = [self.share_server] mock_sns_get = self.mock_object( db_api, 'share_network_subnet_get', mock.Mock(return_value=self.subnet)) mock_all_get_all_shares_by_ss = self.mock_object( db_api, 'share_instance_get_all_by_share_server', mock.Mock(return_value=[])) mock_all_ss_are_auto_deletable = self.mock_object( self.controller, '_all_share_servers_are_auto_deletable', mock.Mock(return_value=False)) self.assertRaises(exc.HTTPConflict, self.controller.delete, req, self.share_network['id'], self.subnet['id']) mock_sns_get.assert_called_once_with( context, self.subnet['id']) mock_all_get_all_shares_by_ss.assert_called_once_with( context, self.subnet['share_servers'][0].id ) mock_all_ss_are_auto_deletable.assert_called_once_with( self.subnet ) self.mock_policy_check.assert_called_once_with( context, self.resource_name, 'delete') def test_delete_subnet_with_shares_fail(self): req = fakes.HTTPRequest.blank('/subnets/%s' % self.subnet['id'], version="2.51") context = req.environ['manila.context'] self.subnet['share_servers'] = [self.share_server] mock_network_get = self.mock_object( db_api, 'share_network_get') mock_sns_get = self.mock_object( db_api, 'share_network_subnet_get', mock.Mock(return_value=self.subnet)) mock_all_get_all_shares_by_ss = self.mock_object( db_api, 'share_instance_get_all_by_share_server', mock.Mock(return_value=[self.share])) self.assertRaises(exc.HTTPConflict, self.controller.delete, req, self.share_network['id'], self.subnet['id']) mock_network_get.assert_called_once_with( context, self.share_network['id']) mock_sns_get.assert_called_once_with( context, self.subnet['id']) mock_all_get_all_shares_by_ss.assert_called_once_with( context, self.subnet['share_servers'][0].id ) self.mock_policy_check.assert_called_once_with( context, self.resource_name, 'delete') def _setup_create_test_request_body(self, metadata=False): body = { 'share_network_id': self.share_network['id'], 'availability_zone': fake_az['name'], 'neutron_net_id': 'fake_nn_id', 'neutron_subnet_id': 'fake_nsn_id' } if metadata: body['metadata'] = self.subnet_metadata return body @ddt.data({'version': "2.51", 'has_share_servers': False}, {'version': "2.70", 'has_share_servers': False}, {'version': "2.70", 'has_share_servers': True}, {'version': "2.78", 'has_share_servers': False}) @ddt.unpack def test_subnet_create(self, version, has_share_servers): req = fakes.HTTPRequest.blank('/subnets', version=version) multiple_subnet_support = (req.api_version_request >= api_version.APIVersionRequest("2.70")) metadata_support = (req.api_version_request >= api_version.APIVersionRequest("2.78")) context = req.environ['manila.context'] body = { 'share-network-subnet': self._setup_create_test_request_body( metadata=metadata_support) } sn_id = body['share-network-subnet']['share_network_id'] expected_subnet = copy.deepcopy(self.subnet) if has_share_servers: expected_subnet['share_servers'] = [self.share_server] mock_validate_subnet_create = self.mock_object( common, 'validate_subnet_create', mock.Mock(return_value=(self.share_network, [expected_subnet]))) mock_subnet_create = self.mock_object( db_api, 'share_network_subnet_create', mock.Mock(return_value=expected_subnet)) mock_update_net_allocations = self.mock_object( self.controller.share_api, 'update_share_server_network_allocations', mock.Mock(return_value=expected_subnet)) mock_share_network_subnet_get = self.mock_object( db_api, 'share_network_subnet_get', mock.Mock(return_value=expected_subnet)) mock_check_metadata_properties = self.mock_object( common, 'check_metadata_properties') fake_data = body['share-network-subnet'] fake_data['share_network_id'] = self.share_network['id'] res = self.controller.create( req, body['share-network-subnet']['share_network_id'], body) view_subnet = { 'id': expected_subnet.get('id'), 'availability_zone': expected_subnet.get('availability_zone'), 'share_network_id': expected_subnet.get('share_network_id'), 'share_network_name': expected_subnet['share_network_name'], 'created_at': expected_subnet.get('created_at'), 'segmentation_id': expected_subnet.get('segmentation_id'), 'neutron_subnet_id': expected_subnet.get('neutron_subnet_id'), 'updated_at': expected_subnet.get('updated_at'), 'neutron_net_id': expected_subnet.get('neutron_net_id'), 'ip_version': expected_subnet.get('ip_version'), 'cidr': expected_subnet.get('cidr'), 'network_type': expected_subnet.get('network_type'), 'mtu': expected_subnet.get('mtu'), 'gateway': expected_subnet.get('gateway') } if metadata_support: view_subnet['metadata'] = self.subnet_metadata self.assertEqual(view_subnet, res['share_network_subnet']) mock_share_network_subnet_get.assert_called_once_with( context, expected_subnet['id']) mock_validate_subnet_create.assert_called_once_with( context, sn_id, fake_data, multiple_subnet_support) if has_share_servers: fake_data['share_servers'] = [self.share_server] mock_update_net_allocations.assert_called_once_with( context, self.share_network, fake_data) else: mock_subnet_create.assert_called_once_with( context, fake_data) self.assertEqual(metadata_support, mock_check_metadata_properties.called) @ddt.data({'exception1': exception.ServiceIsDown(service='fake_srv'), 'exc_raise': exc.HTTPInternalServerError}, {'exception1': exception.InvalidShareNetwork( reason='fake_reason'), 'exc_raise': exc.HTTPBadRequest}, {'exception1': db_exception.DBError(), 'exc_raise': exc.HTTPInternalServerError}) @ddt.unpack def test_subnet_create_fail_update_network_allocation(self, exception1, exc_raise): req = fakes.HTTPRequest.blank('/subnets', version="2.70") multiple_subnet_support = (req.api_version_request >= api_version.APIVersionRequest("2.70")) context = req.environ['manila.context'] body = { 'share-network-subnet': self._setup_create_test_request_body() } sn_id = body['share-network-subnet']['share_network_id'] expected_subnet = copy.deepcopy(self.subnet) expected_subnet['share_servers'] = [self.share_server] mock_validate_subnet_create = self.mock_object( common, 'validate_subnet_create', mock.Mock(return_value=(self.share_network, [expected_subnet]))) mock_update_net_allocations = self.mock_object( self.controller.share_api, 'update_share_server_network_allocations', mock.Mock(side_effect=exception1)) fake_data = body['share-network-subnet'] fake_data['share_network_id'] = self.share_network['id'] fake_data['share_servers'] = [self.share_server] self.assertRaises(exc_raise, self.controller.create, req, body['share-network-subnet']['share_network_id'], body) mock_validate_subnet_create.assert_called_once_with( context, sn_id, fake_data, multiple_subnet_support) mock_update_net_allocations.assert_called_once_with( context, self.share_network, fake_data) def test_subnet_create_invalid_body(self): fake_sn_id = 'fake_id' req = fakes.HTTPRequest.blank('/subnets', version="2.51") body = {} self.assertRaises(exc.HTTPBadRequest, self.controller.create, req, fake_sn_id, body) @ddt.data("2.51", "2.70") def test_subnet_create_subnet_db_error(self, version): req = fakes.HTTPRequest.blank('/subnets', version=version) body = { 'share-network-subnet': self._setup_create_test_request_body() } expected_subnet = copy.deepcopy(self.subnet) self.mock_object( common, 'validate_subnet_create', mock.Mock(return_value=(self.share_network, [expected_subnet]))) self.mock_object( db_api, 'share_network_subnet_create', mock.Mock(side_effect=db_exception.DBError())) self.assertRaises(exc.HTTPInternalServerError, self.controller.create, req, 'fake_sn_id', body) def test_show_subnet(self): subnet = db_utils.create_share_network_subnet( id='fake_sns_2', share_network_id=self.share_network['id']) expected_result = { 'share_network_subnet': { "created_at": subnet['created_at'], "id": subnet['id'], "share_network_id": subnet['share_network_id'], "share_network_name": self.share_network['name'], "availability_zone": subnet['availability_zone'], "segmentation_id": subnet['segmentation_id'], "neutron_subnet_id": subnet['neutron_subnet_id'], "updated_at": subnet['updated_at'], "neutron_net_id": subnet['neutron_net_id'], "ip_version": subnet['ip_version'], "cidr": subnet['cidr'], "network_type": subnet['network_type'], "gateway": subnet['gateway'], "mtu": subnet['mtu'], } } req = fakes.HTTPRequest.blank('/subnets/%s' % subnet['id'], version="2.51") context = req.environ['manila.context'] mock_sn_get = self.mock_object( db_api, 'share_network_get', mock.Mock( return_value=self.share_network)) mock_sns_get = self.mock_object( db_api, 'share_network_subnet_get', mock.Mock( return_value=subnet)) result = self.controller.show(req, self.share_network['id'], subnet['id']) self.assertEqual(expected_result, result) mock_sn_get.assert_called_once_with(context, self.share_network['id']) mock_sns_get.assert_called_once_with(context, subnet['id']) @ddt.data( (mock.Mock(side_effect=exception.ShareNetworkNotFound( share_network_id='fake_net_id')), None), (mock.Mock(), mock.Mock( side_effect=exception.ShareNetworkSubnetNotFound( share_network_subnet_id='fake_subnet_id')))) @ddt.unpack def test_show_subnet_not_found(self, sn_get_side_effect, sns_get_side_effect): req = fakes.HTTPRequest.blank('/subnets/%s' % self.subnet['id'], version="2.51") context = req.environ['manila.context'] mock_sn_get = self.mock_object( db_api, 'share_network_get', sn_get_side_effect) mock_sns_get = self.mock_object( db_api, 'share_network_subnet_get', sns_get_side_effect) self.assertRaises(exc.HTTPNotFound, self.controller.show, req, self.share_network['id'], self.subnet['id']) mock_sn_get.assert_called_once_with(context, self.share_network['id']) if sns_get_side_effect: mock_sns_get.assert_called_once_with(context, self.subnet['id']) def test_list_subnet(self): share_network_id = 'fake_id' subnet = db_utils.create_share_network_subnet( share_network_id=share_network_id, id='fake_id') fake_sn = db_utils.create_share_network(id=share_network_id) expected_result = { 'share_network_subnets': [{ "created_at": subnet['created_at'], "id": subnet['id'], "share_network_id": subnet['id'], "share_network_name": fake_sn["name"], "availability_zone": subnet['availability_zone'], "segmentation_id": subnet['segmentation_id'], "neutron_subnet_id": subnet['neutron_subnet_id'], "updated_at": subnet['updated_at'], "neutron_net_id": subnet['neutron_net_id'], "ip_version": subnet['ip_version'], "cidr": subnet['cidr'], "network_type": subnet['network_type'], "gateway": subnet['gateway'], "mtu": subnet['mtu'], }] } req = fakes.HTTPRequest.blank('/subnets/', version="2.51") context = req.environ['manila.context'] mock_sn_get = self.mock_object( db_api, 'share_network_get', mock.Mock( return_value=fake_sn)) result = self.controller.index(req, self.share_network['id']) self.assertEqual(expected_result, result) mock_sn_get.assert_called_once_with(context, self.share_network['id']) def test_list_subnet_share_network_not_found(self): req = fakes.HTTPRequest.blank('/subnets/', version="2.51") context = req.environ['manila.context'] mock_sn_get = self.mock_object( db_api, 'share_network_get', mock.Mock( side_effect=exception.ShareNetworkNotFound( share_network_id=self.share_network['id']))) self.assertRaises(exc.HTTPNotFound, self.controller.index, req, self.share_network['id']) mock_sn_get.assert_called_once_with(context, self.share_network['id']) def test_index_metadata(self): req = fakes.HTTPRequest.blank('/subnets/', version="2.78") mock_index = self.mock_object( self.controller, '_index_metadata', mock.Mock(return_value='fake_metadata')) result = self.controller.index_metadata(req, self.share_network['id'], self.subnet['id']) self.assertEqual('fake_metadata', result) mock_index.assert_called_once_with(req, self.subnet['id'], parent_id=self.share_network['id']) @ddt.data("2.78", "2.89") def test_create_metadata(self, version): req = fakes.HTTPRequest.blank('/subnets/', version=version) context = req.environ['manila.context'] mock_index = self.mock_object( self.controller, '_create_metadata', mock.Mock(return_value={'metadata': 'fake_metadata'})) mock_update = self.mock_object( self.controller.share_api, 'update_share_network_subnet_from_metadata') body = 'fake_metadata_body' result = self.controller.create_metadata(req, self.share_network['id'], self.subnet['id'], body) self.assertEqual('fake_metadata', result['metadata']) mock_index.assert_called_once_with(req, self.subnet['id'], body, parent_id=self.share_network['id']) metadata_support = (req.api_version_request >= api_version.APIVersionRequest("2.89")) if metadata_support: mock_update.assert_called_once_with( context, self.share_network['id'], self.subnet['id'], 'fake_metadata') else: mock_update.assert_not_called() @ddt.data("2.78", "2.89") def test_update_all_metadata(self, version): req = fakes.HTTPRequest.blank('/subnets/', version=version) context = req.environ['manila.context'] mock_index = self.mock_object( self.controller, '_update_all_metadata', mock.Mock(return_value={'metadata': 'fake_metadata'})) mock_update = self.mock_object( self.controller.share_api, 'update_share_network_subnet_from_metadata') body = 'fake_metadata_body' result = self.controller.update_all_metadata( req, self.share_network['id'], self.subnet['id'], body) self.assertEqual('fake_metadata', result['metadata']) mock_index.assert_called_once_with(req, self.subnet['id'], body, parent_id=self.share_network['id']) metadata_support = (req.api_version_request >= api_version.APIVersionRequest("2.89")) if metadata_support: mock_update.assert_called_once_with( context, self.share_network['id'], self.subnet['id'], 'fake_metadata') else: mock_update.assert_not_called() @ddt.data("2.78", "2.89") def test_update_metadata_item(self, version): req = fakes.HTTPRequest.blank('/subnets/', version=version) context = req.environ['manila.context'] mock_index = self.mock_object( self.controller, '_update_metadata_item', mock.Mock(return_value={'metadata': 'fake_metadata'})) mock_update = self.mock_object( self.controller.share_api, 'update_share_network_subnet_from_metadata') body = 'fake_metadata_body' key = 'fake_key' result = self.controller.update_metadata_item( req, self.share_network['id'], self.subnet['id'], body, key) self.assertEqual('fake_metadata', result['metadata']) mock_index.assert_called_once_with(req, self.subnet['id'], body, key, parent_id=self.share_network['id']) metadata_support = (req.api_version_request >= api_version.APIVersionRequest("2.89")) if metadata_support: mock_update.assert_called_once_with( context, self.share_network['id'], self.subnet['id'], 'fake_metadata') else: mock_update.assert_not_called() def test_show_metadata(self): req = fakes.HTTPRequest.blank('/subnets/', version="2.78") mock_index = self.mock_object( self.controller, '_show_metadata', mock.Mock(return_value='fake_metadata')) key = 'fake_key' result = self.controller.show_metadata( req, self.share_network['id'], self.subnet['id'], key) self.assertEqual('fake_metadata', result) mock_index.assert_called_once_with(req, self.subnet['id'], key, parent_id=self.share_network['id']) @ddt.data("2.78", "2.89") def test_delete_metadata(self, version): req = fakes.HTTPRequest.blank('/subnets/', version=version) context = req.environ['manila.context'] mock_index = self.mock_object( self.controller, '_delete_metadata', mock.Mock(return_value='fake_metadata')) mock_sn_get = self.mock_object( db_api, 'share_network_get', mock.Mock( return_value=self.share_network)) key = 'fake_key' CONF.set_default( "driver_updatable_subnet_metadata", ['fake_key', 'fake_key2']) result = self.controller.delete_metadata( req, self.share_network['id'], self.subnet['id'], key) self.assertEqual('fake_metadata', result) mock_index.assert_called_once_with(req, self.subnet['id'], key, parent_id=self.share_network['id']) metadata_support = (req.api_version_request >= api_version.APIVersionRequest("2.89")) if metadata_support: mock_sn_get.assert_called_once_with( context, self.share_network['id']) else: mock_sn_get.assert_not_called() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/api/v2/test_share_networks.py0000664000175000017500000022741000000000000023236 0ustar00zuulzuul00000000000000# Copyright 2014 NetApp # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from unittest import mock from urllib import parse import ddt from oslo_db import exception as db_exception from oslo_utils import timeutils from webob import exc as webob_exc from manila.api import common from manila.api.openstack import api_version_request as api_version from manila.api.v2 import share_networks from manila.db import api as db_api from manila import exception from manila import quota from manila.share import api as share_api from manila import test from manila.tests.api import fakes from manila.tests import db_utils fake_share_network_subnet = { 'id': 'fake subnet id', 'neutron_net_id': 'fake net id', 'neutron_subnet_id': 'fake subnet id', 'network_type': 'vlan', 'segmentation_id': 1000, 'cidr': '10.0.0.0/24', 'ip_version': 4, 'share_network_id': 'fake network id', 'availability_zone_id': None, 'share_servers': [], 'availability_zone': [] } fake_share_network = { 'id': 'fake network id', 'project_id': 'fake project', 'created_at': timeutils.parse_strtime('2002-02-02', fmt="%Y-%m-%d"), 'updated_at': None, 'name': 'fake name', 'description': 'fake description', 'security_services': [], 'share_network_subnets': [], 'security_service_update_support': True, 'status': 'active' } fake_share_network_shortened = { 'id': 'fake network id', 'name': 'fake name', } fake_share_network_with_ss = { 'id': 'sn-id', 'project_id': 'fake', 'created_at': timeutils.parse_strtime('2001-01-01', fmt="%Y-%m-%d"), 'updated_at': None, 'name': 'test-sn', 'description': 'fake description', 'share_network_subnets': [], 'security_services': [{'id': 'fake-ss-id'}] } fake_sn_with_ss_shortened = { 'id': 'sn-id', 'name': 'test-sn', } ADD_UPDATE_SEC_SERVICE_VERSION = '2.63' QUOTAS = quota.QUOTAS @ddt.ddt class ShareNetworkAPITest(test.TestCase): def setUp(self): super(ShareNetworkAPITest, self).setUp() self.controller = share_networks.ShareNetworkController() self.req = fakes.HTTPRequest.blank('/share-networks') self.body = {share_networks.RESOURCE_NAME: {'name': 'fake name'}} self.context = self.req.environ['manila.context'] self.share_api = share_api.API() def _check_share_network_view_shortened(self, view, share_nw): self.assertEqual(share_nw['id'], view['id']) self.assertEqual(share_nw['name'], view['name']) def _check_share_network_view(self, view, share_nw): self.assertEqual(share_nw['id'], view['id']) self.assertEqual(share_nw['project_id'], view['project_id']) self.assertEqual(share_nw['created_at'], view['created_at']) self.assertEqual(share_nw['updated_at'], view['updated_at']) self.assertEqual(share_nw['name'], view['name']) self.assertEqual(share_nw['description'], view['description']) self.assertNotIn('shares', view) self.assertNotIn('network_allocations', view) self.assertNotIn('security_services', view) def _setup_body_for_create_test(self, data): data.update({'user_id': 'fake_user_id'}) body = {share_networks.RESOURCE_NAME: data} return body @ddt.data( {'neutron_net_id': 'fake', 'neutron_subnet_id': 'fake'}) def test_create_valid_cases(self, data): body = self._setup_body_for_create_test(data) result = self.controller.create(self.req, body) data.pop('user_id', None) for k, v in data.items(): self.assertIn(data[k], result['share_network'][k]) @ddt.data( {'neutron_net_id': 'fake', 'neutron_subnet_id': 'fake', 'availability_zone': 'fake'}) def test_create_valid_cases_upper_2_50(self, data): req = fakes.HTTPRequest.blank('/share-networks', version="2.51") context = req.environ['manila.context'] body = self._setup_body_for_create_test(data) fake_az = { 'name': 'fake', 'id': 'fake_id' } self.mock_object(db_api, 'availability_zone_get', mock.Mock(return_value=fake_az)) result = self.controller.create(req, body) result_subnet = result['share_network']['share_network_subnets'][0] data.pop('user_id', None) data.pop('project_id', None) data.pop('availability_zone_id', None) data.pop('id', None) data['availability_zone'] = result_subnet['availability_zone'] for k, v in data.items(): self.assertIn(k, result_subnet.keys()) db_api.availability_zone_get.assert_called_once_with( context, fake_az['name'] ) @ddt.data( {'nova_net_id': 'foo', 'neutron_net_id': 'bar'}, {'nova_net_id': 'foo', 'neutron_subnet_id': 'quuz'}, {'nova_net_id': 'foo', 'neutron_net_id': 'bar', 'neutron_subnet_id': 'quuz'}, {'nova_net_id': 'fake_nova_net_id'}, {'neutron_net_id': 'bar'}, {'neutron_subnet_id': 'quuz'}) def test_create_invalid_cases(self, data): data.update({'user_id': 'fake_user_id'}) body = {share_networks.RESOURCE_NAME: data} self.assertRaises( webob_exc.HTTPBadRequest, self.controller.create, self.req, body) @ddt.data( {'name': 'new fake name'}, {'description': 'new fake description'}, {'name': 'new fake name', 'description': 'new fake description'}) def test_update_valid_cases(self, data): body = {share_networks.RESOURCE_NAME: {'user_id': 'fake_user'}} created = self.controller.create(self.req, body) body = {share_networks.RESOURCE_NAME: data} result = self.controller.update( self.req, created['share_network']['id'], body) for k, v in data.items(): self.assertIn(data[k], result['share_network'][k]) self._check_share_network_view( result[share_networks.RESOURCE_NAME], result['share_network']) @ddt.data( {'nova_net_id': 'foo', 'neutron_net_id': 'bar'}, {'nova_net_id': 'foo', 'neutron_subnet_id': 'quuz'}, {'nova_net_id': 'foo', 'neutron_net_id': 'bar', 'neutron_subnet_id': 'quuz'}, {'nova_net_id': 'fake_nova_net_id'}, ) def test_update_invalid_cases(self, data): body = {share_networks.RESOURCE_NAME: {'user_id': 'fake_user'}} created = self.controller.create(self.req, body) body = {share_networks.RESOURCE_NAME: data} self.assertRaises( webob_exc.HTTPBadRequest, self.controller.update, self.req, created['share_network']['id'], body) @ddt.data( ({'share_network_subnets': [ {'share_network_id': fake_share_network['id']}]}, True), ({'share_network_subnets': []}, False)) @ddt.unpack def test__subnet_has_search_opt(self, network, has_search_opt): search_opts = { 'share_network_id': fake_share_network['id'] } result = None for key, value in search_opts.items(): result = self.controller._subnet_has_search_opt( key, value, network) self.assertEqual(has_search_opt, result) def test_create_nominal(self): self.mock_object(db_api, 'share_network_subnet_create') self.mock_object(db_api, 'share_network_get', mock.Mock(return_value=fake_share_network)) self.mock_object(common, 'check_net_id_and_subnet_id') with mock.patch.object(db_api, 'share_network_create', mock.Mock(return_value=fake_share_network)): result = self.controller.create(self.req, self.body) db_api.share_network_create.assert_called_once_with( self.req.environ['manila.context'], self.body[share_networks.RESOURCE_NAME]) self._check_share_network_view( result[share_networks.RESOURCE_NAME], fake_share_network) def test_create_db_api_exception(self): with mock.patch.object(db_api, 'share_network_create', mock.Mock(side_effect=db_exception.DBError)): self.assertRaises(webob_exc.HTTPInternalServerError, self.controller.create, self.req, self.body) def test_create_wrong_body(self): body = None self.assertRaises(webob_exc.HTTPUnprocessableEntity, self.controller.create, self.req, body) @ddt.data( {'availability_zone': 'fake-zone'}) def test_create_az_not_found(self, data): req = fakes.HTTPRequest.blank('/share-networks', version="2.51") self.mock_object( db_api, 'availability_zone_get', mock.Mock( side_effect=exception.AvailabilityZoneNotFound(id='fake'))) body = {share_networks.RESOURCE_NAME: data} self.assertRaises(webob_exc.HTTPBadRequest, self.controller.create, req, body) def test_create_error_on_network_creation(self): self.mock_object(share_networks.QUOTAS, 'reserve', mock.Mock(return_value='fake_reservation')) self.mock_object(share_networks.QUOTAS, 'rollback') self.mock_object(db_api, 'share_network_create', mock.Mock(side_effect=db_exception.DBError())) self.assertRaises(webob_exc.HTTPInternalServerError, self.controller.create, self.req, self.body) self.assertTrue(share_networks.QUOTAS.rollback.called) def test_create_error_on_subnet_creation(self): data = { 'neutron_net_id': 'fake', 'neutron_subnet_id': 'fake', 'id': fake_share_network['id'] } subnet_data = copy.deepcopy(data) self.mock_object(share_networks.QUOTAS, 'reserve', mock.Mock(return_value='fake_reservation')) self.mock_object(share_networks.QUOTAS, 'rollback') self.mock_object(db_api, 'share_network_create', mock.Mock(return_value=fake_share_network)) self.mock_object(db_api, 'share_network_subnet_create', mock.Mock(side_effect=db_exception.DBError())) self.mock_object(db_api, 'share_network_delete') body = {share_networks.RESOURCE_NAME: data} self.assertRaises(webob_exc.HTTPInternalServerError, self.controller.create, self.req, body) db_api.share_network_create.assert_called_once_with(self.context, data) subnet_data['share_network_id'] = data['id'] subnet_data.pop('id') db_api.share_network_subnet_create.assert_called_once_with( self.context, subnet_data) db_api.share_network_delete.assert_called_once_with( self.context, fake_share_network['id']) self.assertTrue(share_networks.QUOTAS.rollback.called) def test_delete_nominal(self): share_nw = fake_share_network.copy() subnet = fake_share_network_subnet.copy() subnet['share_servers'] = ['foo', 'bar'] share_nw['share_network_subnets'] = [subnet] self.mock_object(db_api, 'share_network_get', mock.Mock(return_value=share_nw)) self.mock_object(db_api, 'share_instance_get_all_by_share_network', mock.Mock(return_value=[])) self.mock_object(self.controller.share_rpcapi, 'delete_share_server') self.mock_object(self.controller, '_all_share_servers_are_auto_deletable', mock.Mock(return_value=True)) self.mock_object(db_api, 'share_network_delete') self.controller.delete(self.req, share_nw['id']) db_api.share_network_get.assert_called_once_with( self.req.environ['manila.context'], share_nw['id']) (db_api.share_instance_get_all_by_share_network. assert_called_once_with(self.req.environ['manila.context'], share_nw['id'])) self.controller.share_rpcapi.delete_share_server.assert_has_calls([ mock.call(self.req.environ['manila.context'], 'foo'), mock.call(self.req.environ['manila.context'], 'bar')]) db_api.share_network_delete.assert_called_once_with( self.req.environ['manila.context'], share_nw['id']) def test_delete_not_found(self): share_nw = 'fake network id' self.mock_object(db_api, 'share_network_get', mock.Mock(side_effect=exception.ShareNetworkNotFound( share_network_id=share_nw))) self.assertRaises(webob_exc.HTTPNotFound, self.controller.delete, self.req, share_nw) def test_quota_delete_reservation_failed(self): share_nw = fake_share_network.copy() subnet = fake_share_network_subnet.copy() subnet['share_servers'] = ['foo', 'bar'] share_nw['share_network_subnets'] = [subnet] share_nw['user_id'] = 'fake_user_id' self.mock_object(db_api, 'share_network_get', mock.Mock(return_value=share_nw)) self.mock_object(db_api, 'share_instance_get_all_by_share_network', mock.Mock(return_value=[])) self.mock_object(self.controller, '_all_share_servers_are_auto_deletable', mock.Mock(return_value=True)) self.mock_object(self.controller.share_rpcapi, 'delete_share_server') self.mock_object(db_api, 'share_network_delete') self.mock_object(share_networks.QUOTAS, 'reserve', mock.Mock(side_effect=Exception)) self.mock_object(share_networks.QUOTAS, 'commit') self.controller.delete(self.req, share_nw['id']) db_api.share_network_get.assert_called_once_with( self.req.environ['manila.context'], share_nw['id']) (db_api.share_instance_get_all_by_share_network. assert_called_once_with(self.req.environ['manila.context'], share_nw['id'])) self.controller.share_rpcapi.delete_share_server.assert_has_calls([ mock.call(self.req.environ['manila.context'], 'foo'), mock.call(self.req.environ['manila.context'], 'bar')]) db_api.share_network_delete.assert_called_once_with( self.req.environ['manila.context'], share_nw['id']) share_networks.QUOTAS.reserve.assert_called_once_with( self.req.environ['manila.context'], project_id=share_nw['project_id'], share_networks=-1, user_id=share_nw['user_id'] ) self.assertFalse(share_networks.QUOTAS.commit.called) def test_delete_in_use_by_share(self): share_nw = fake_share_network.copy() self.mock_object(db_api, 'share_network_get', mock.Mock(return_value=share_nw)) self.mock_object(db_api, 'share_instance_get_all_by_share_network', mock.Mock(return_value=['foo', 'bar'])) self.assertRaises(webob_exc.HTTPConflict, self.controller.delete, self.req, share_nw['id']) db_api.share_network_get.assert_called_once_with( self.req.environ['manila.context'], share_nw['id']) (db_api.share_instance_get_all_by_share_network. assert_called_once_with(self.req.environ['manila.context'], share_nw['id'])) def test_delete_in_use_by_share_group(self): share_nw = fake_share_network.copy() self.mock_object(db_api, 'share_network_get', mock.Mock(return_value=share_nw)) self.mock_object(db_api, 'count_share_groups_in_share_network', mock.Mock(return_value=2)) self.assertRaises(webob_exc.HTTPConflict, self.controller.delete, self.req, share_nw['id']) db_api.share_network_get.assert_called_once_with( self.req.environ['manila.context'], share_nw['id']) def test_delete_contains_is_auto_deletable_false_servers(self): share_nw = fake_share_network.copy() self.mock_object(db_api, 'share_network_get', mock.Mock(return_value=share_nw)) self.mock_object(db_api, 'count_share_groups_in_share_network') self.mock_object(share_networks.ShareNetworkController, '_all_share_servers_are_auto_deletable', mock.Mock(return_value=False)) self.assertRaises(webob_exc.HTTPConflict, self.controller.delete, self.req, share_nw['id']) db_api.share_network_get.assert_called_once_with( self.req.environ['manila.context'], share_nw['id']) def test_delete_contains_more_than_one_subnet(self): share_nw = fake_share_network.copy() self.mock_object(db_api, 'share_network_get', mock.Mock(return_value=share_nw)) self.mock_object(db_api, 'share_instance_get_all_by_share_network', mock.Mock(return_value=None)) self.mock_object(db_api, 'count_share_groups_in_share_network', mock.Mock(return_value=None)) self.mock_object(self.controller, '_share_network_contains_subnets', mock.Mock(return_value=True)) self.assertRaises(webob_exc.HTTPConflict, self.controller.delete, self.req, share_nw['id']) db_api.share_network_get.assert_called_once_with( self.context, share_nw['id']) (db_api.share_instance_get_all_by_share_network .assert_called_once_with(self.context, share_nw['id'])) db_api.count_share_groups_in_share_network.assert_called_once_with( self.context, share_nw['id'] ) (self.controller._share_network_contains_subnets .assert_called_once_with(share_nw)) def test_delete_subnet_contains_share_server(self): share_nw = fake_share_network.copy() share_nw['share_network_subnets'].append({ 'id': 'fake_sns_id', 'share_servers': [{'id': 'fake_share_server_id'}] }) self.mock_object(db_api, 'share_network_get', mock.Mock(return_value=share_nw)) self.mock_object(db_api, 'count_share_groups_in_share_network', mock.Mock(return_value=0)) self.mock_object(self.controller, '_share_network_contains_subnets', mock.Mock(return_value=False)) self.mock_object( self.controller, '_all_share_servers_are_auto_deletable', mock.Mock(return_value=False)) self.assertRaises(webob_exc.HTTPConflict, self.controller.delete, self.req, share_nw['id']) @ddt.data( ({'share_servers': [{'is_auto_deletable': True}, {'is_auto_deletable': True}]}, True), ({'share_servers': [{'is_auto_deletable': True}, {'is_auto_deletable': False}]}, False), ) @ddt.unpack def test__share_servers_are_auto_deletable(self, fake_share_network, expected_result): self.assertEqual( expected_result, self.controller._all_share_servers_are_auto_deletable( fake_share_network)) @ddt.data( ({'share_network_subnets': [{'share_servers': [{}, {}]}]}, True), ({'share_network_subnets': [{'share_servers': []}]}, False), ) @ddt.unpack def test__share_network_subnets_contain_share_servers(self, share_network, expected_result): self.assertEqual( expected_result, self.controller._share_network_subnets_contain_share_servers( share_network)) def test_show_nominal(self): share_nw = 'fake network id' with mock.patch.object(db_api, 'share_network_get', mock.Mock(return_value=fake_share_network)): result = self.controller.show(self.req, share_nw) db_api.share_network_get.assert_called_once_with( self.req.environ['manila.context'], share_nw) self._check_share_network_view( result[share_networks.RESOURCE_NAME], fake_share_network) def test_show_not_found(self): share_nw = 'fake network id' test_exception = exception.ShareNetworkNotFound( share_network_id=share_nw) with mock.patch.object(db_api, 'share_network_get', mock.Mock(side_effect=test_exception)): self.assertRaises(webob_exc.HTTPNotFound, self.controller.show, self.req, share_nw) def test_index_no_filters(self): networks = [fake_share_network] with mock.patch.object(db_api, 'share_network_get_all_by_filter', mock.Mock(return_value=networks)): result = self.controller.index(self.req) db_api.share_network_get_all_by_filter.assert_called_once_with( self.context, filters={}) self.assertEqual(1, len(result[share_networks.RESOURCES_NAME])) self._check_share_network_view_shortened( result[share_networks.RESOURCES_NAME][0], fake_share_network_shortened) def test_index_detailed(self): networks = [fake_share_network] with mock.patch.object(db_api, 'share_network_get_all_by_filter', mock.Mock(return_value=networks)): result = self.controller.detail(self.req) db_api.share_network_get_all_by_filter.assert_called_once_with( self.context, filters={}) self.assertEqual(1, len(result[share_networks.RESOURCES_NAME])) self._check_share_network_view( result[share_networks.RESOURCES_NAME][0], fake_share_network) @mock.patch.object(db_api, 'share_network_get_all_by_filter', mock.Mock()) def test_index_filter_by_security_service(self): db_api.share_network_get_all_by_filter.return_value = [ fake_share_network_with_ss] req = fakes.HTTPRequest.blank( '/share_networks?security_service_id=fake-ss-id') result = self.controller.index(req) filters = {'security_service_id': 'fake-ss-id'} (db_api.share_network_get_all_by_filter. assert_called_once_with(req.environ['manila.context'], filters=filters)) self.assertEqual(1, len(result[share_networks.RESOURCES_NAME])) self._check_share_network_view_shortened( result[share_networks.RESOURCES_NAME][0], fake_sn_with_ss_shortened) @mock.patch.object(db_api, 'share_network_get_all_by_filter', mock.Mock()) def test_index_all_tenants_non_admin_context(self): req = fakes.HTTPRequest.blank( '/share_networks?all_tenants=1') fake_context = req.environ['manila.context'] db_api.share_network_get_all_by_filter.return_value = [] self.controller.index(req) db_api.share_network_get_all_by_filter.assert_called_with( fake_context, filters={}) @mock.patch.object(db_api, 'share_network_get_all_by_filter', mock.Mock()) def test_index_all_tenants_admin_context(self): db_api.share_network_get_all_by_filter.return_value = [ fake_share_network] req = fakes.HTTPRequest.blank( '/share_networks?all_tenants=1', use_admin_context=True) result = self.controller.index(req) db_api.share_network_get_all_by_filter.assert_called_once_with( req.environ['manila.context'], filters={}) self.assertEqual(1, len(result[share_networks.RESOURCES_NAME])) self._check_share_network_view_shortened( result[share_networks.RESOURCES_NAME][0], fake_share_network_shortened) @mock.patch.object(db_api, 'share_network_get_all', mock.Mock()) def test_index_all_tenants_with_invaild_value(self): req = fakes.HTTPRequest.blank( '/share_networks?all_tenants=wonk', use_admin_context=True) self.assertRaises(exception.InvalidInput, self.controller.index, req) @mock.patch.object(db_api, 'share_network_get_all_by_filter', mock.Mock()) def test_index_all_tenants_with_value_zero(self): db_api.share_network_get_all_by_filter.return_value = [ fake_share_network] req = fakes.HTTPRequest.blank( '/share_networks?all_tenants=0', use_admin_context=True) result = self.controller.index(req) self.assertEqual(1, len(result[share_networks.RESOURCES_NAME])) self._check_share_network_view_shortened( result[share_networks.RESOURCES_NAME][0], fake_share_network_shortened) filters = {'project_id': 'fake'} db_api.share_network_get_all_by_filter.assert_called_once_with( req.environ['manila.context'], filters=filters) @mock.patch.object(db_api, 'share_network_get_all_by_filter', mock.Mock()) def test_index_filter_by_project_id_non_admin_context(self): req = fakes.HTTPRequest.blank( '/share_networks?project_id=fake project') fake_context = req.environ['manila.context'] db_api.share_network_get_all_by_filter.return_value = [] self.controller.index(req) db_api.share_network_get_all_by_filter.assert_called_with( fake_context, filters={}) @mock.patch.object(db_api, 'share_network_get_all_by_filter', mock.Mock()) def test_index_filter_by_project_id_admin_context(self): db_api.share_network_get_all_by_filter.return_value = [ fake_share_network_with_ss ] req = fakes.HTTPRequest.blank( '/share_networks?project_id=fake', use_admin_context=True) result = self.controller.index(req) filters = {'project_id': 'fake'} db_api.share_network_get_all_by_filter.assert_called_once_with( req.environ['manila.context'], filters=filters) self.assertEqual(1, len(result[share_networks.RESOURCES_NAME])) self._check_share_network_view_shortened( result[share_networks.RESOURCES_NAME][0], fake_sn_with_ss_shortened) @mock.patch.object(db_api, 'share_network_get_all_by_filter', mock.Mock()) def test_index_filter_by_ss_and_project_id_admin_context(self): db_api.share_network_get_all_by_filter.return_value = [ fake_share_network_with_ss ] req = fakes.HTTPRequest.blank( '/share_networks?security_service_id=fake-ss-id&project_id=fake', use_admin_context=True) result = self.controller.index(req) filters = {'project_id': 'fake', 'security_service_id': 'fake-ss-id'} db_api.share_network_get_all_by_filter.assert_called_once_with( req.environ['manila.context'], filters=filters) self.assertEqual(1, len(result[share_networks.RESOURCES_NAME])) self._check_share_network_view_shortened( result[share_networks.RESOURCES_NAME][0], fake_sn_with_ss_shortened) @ddt.data(('name=fo', 0), ('description=d', 0), ('name=foo&description=d', 0), ('name=foo', 1), ('description=ds', 1), ('name~=foo&description~=ds', 2), ('name=foo&description~=ds', 1), ('name~=foo&description=ds', 1)) @ddt.unpack @mock.patch.object(db_api, 'share_network_get_all_by_filter', mock.Mock()) def test_index_filter_by_name_and_description( self, filter, share_network_number): fake_objs = [{'name': 'fo2', 'description': 'd2', 'id': 'fake1'}, {'name': 'foo', 'description': 'ds', 'id': 'fake2'}, {'name': 'foo1', 'description': 'ds1', 'id': 'fake3'}] db_api.share_network_get_all_by_filter.return_value = fake_objs req = fakes.HTTPRequest.blank( '/share_networks?' + filter, use_admin_context=True, version='2.36') result = self.controller.index(req) filters = {'project_id': self.context.project_id} db_api.share_network_get_all_by_filter.assert_called_with( req.environ['manila.context'], filters=filters) self.assertEqual(share_network_number, len(result[share_networks.RESOURCES_NAME])) if share_network_number > 0: self._check_share_network_view_shortened( result[share_networks.RESOURCES_NAME][0], fake_objs[1]) if share_network_number > 1: self._check_share_network_view_shortened( result[share_networks.RESOURCES_NAME][1], fake_objs[2]) @mock.patch.object(db_api, 'share_network_get_all_by_filter', mock.Mock()) def test_index_all_filter_opts(self): valid_filter_opts = { 'created_before': '2001-02-02', 'created_since': '1999-01-01', 'name': 'test-sn' } db_api.share_network_get_all_by_filter.return_value = [ fake_share_network_with_ss] query_string = '/share-networks?' + parse.urlencode(sorted( [(k, v) for (k, v) in list(valid_filter_opts.items())])) for use_admin_context in [True, False]: req = fakes.HTTPRequest.blank(query_string, use_admin_context=use_admin_context) result = self.controller.index(req) parsed_created_before = timeutils.parse_strtime( valid_filter_opts['created_before'], fmt="%Y-%m-%d") parsed_created_since = timeutils.parse_strtime( valid_filter_opts['created_since'], fmt="%Y-%m-%d") filters = {'created_before': parsed_created_before, 'created_since': parsed_created_since} if use_admin_context: filters['project_id'] = 'fake' db_api.share_network_get_all_by_filter.assert_called_with( req.environ['manila.context'], filters=filters) self.assertEqual(1, len(result[share_networks.RESOURCES_NAME])) self._check_share_network_view_shortened( result[share_networks.RESOURCES_NAME][0], fake_sn_with_ss_shortened) @mock.patch.object(db_api, 'share_network_get', mock.Mock()) def test_update_nominal(self): share_nw = 'fake network id' db_api.share_network_get.return_value = fake_share_network body = {share_networks.RESOURCE_NAME: {'name': 'new name'}} with mock.patch.object(db_api, 'share_network_update', mock.Mock(return_value=fake_share_network)): result = self.controller.update(self.req, share_nw, body) db_api.share_network_update.assert_called_once_with( self.req.environ['manila.context'], share_nw, body[share_networks.RESOURCE_NAME]) self._check_share_network_view( result[share_networks.RESOURCE_NAME], fake_share_network) @mock.patch.object(db_api, 'share_network_get', mock.Mock()) def test_update_not_found(self): share_nw = 'fake network id' db_api.share_network_get.side_effect = exception.ShareNetworkNotFound( share_network_id=share_nw) self.assertRaises(webob_exc.HTTPNotFound, self.controller.update, self.req, share_nw, self.body) def test_update_invalid_body(self): self.assertRaises(webob_exc.HTTPUnprocessableEntity, self.controller.update, self.req, 'fake_sn_id', None) @mock.patch.object(db_api, 'share_network_get', mock.Mock()) def test_update_invalid_key_in_use(self): share_nw = fake_share_network.copy() subnet = fake_share_network_subnet.copy() subnet['share_servers'] = [{'id': 1}] share_nw['share_network_subnets'] = [subnet] db_api.share_network_get.return_value = share_nw body = { share_networks.RESOURCE_NAME: { 'name': 'new name', 'user_id': 'new id', }, } self.assertRaises(webob_exc.HTTPForbidden, self.controller.update, self.req, share_nw['id'], body) @mock.patch.object(db_api, 'share_network_get', mock.Mock()) @mock.patch.object(db_api, 'share_network_update', mock.Mock()) def test_update_valid_keys_in_use(self): share_nw = fake_share_network.copy() subnet = fake_share_network_subnet.copy() subnet['share_servers'] = [{'id': 1}] share_nw['share_network_subnets'] = [subnet] updated_share_nw = share_nw.copy() updated_share_nw['name'] = 'new name' updated_share_nw['description'] = 'new description' db_api.share_network_get.return_value = share_nw db_api.share_network_update.return_value = updated_share_nw body = { share_networks.RESOURCE_NAME: { 'name': updated_share_nw['name'], 'description': updated_share_nw['description'], }, } self.controller.update(self.req, share_nw['id'], body) db_api.share_network_get.assert_called_once_with(self.context, share_nw['id']) db_api.share_network_update.assert_called_once_with( self.context, share_nw['id'], body['share_network']) @mock.patch.object(db_api, 'share_network_get', mock.Mock()) def test_update_db_api_exception(self): share_nw = 'fake network id' db_api.share_network_get.return_value = fake_share_network self.mock_object( self.controller, '_share_network_subnets_contain_share_servers', mock.Mock(return_value=False)) self.mock_object(db_api, 'share_network_subnet_get_default_subnets', mock.Mock(return_value=[fake_share_network_subnet])) self.mock_object(db_api, 'share_network_subnet_update') body = {share_networks.RESOURCE_NAME: {'neutron_subnet_id': 'new subnet'}} with mock.patch.object(db_api, 'share_network_update', mock.Mock(side_effect=db_exception.DBError)): self.assertRaises(webob_exc.HTTPBadRequest, self.controller.update, self.req, share_nw, body) (db_api.share_network_subnet_get_default_subnets. assert_called_once_with(self.context, share_nw)) db_api.share_network_subnet_update.assert_called_once_with( self.context, fake_share_network_subnet['id'], body['share_network']) @ddt.data((webob_exc.HTTPBadRequest, 1, None, 'new subnet'), (webob_exc.HTTPBadRequest, 2, None, 'new subnet'), (webob_exc.HTTPBadRequest, None, 'neutron net', None)) @ddt.unpack def test_update_default_subnet_errors(self, exception_to_raise, get_default_subnet_return_length, neutron_net_id, neutron_subnet_id): share_nw = 'fake network id' self.mock_object(db_api, 'share_network_get', mock.Mock(return_value=fake_share_network)) self.mock_object( self.controller, '_share_network_subnets_contain_share_servers', mock.Mock(return_value=False)) self.mock_object(db_api, 'share_network_subnet_get_default_subnets', mock.Mock(return_value=None)) if get_default_subnet_return_length: fake_subnet = copy.deepcopy(fake_share_network_subnet) fake_subnet['neutron_net_id'] = None fake_subnet['neutron_subnet_id'] = None if get_default_subnet_return_length == 1: (db_api.share_network_subnet_get_default_subnets. return_value) = [fake_subnet] elif get_default_subnet_return_length == 2: (db_api.share_network_subnet_get_default_subnets. return_value) = [fake_subnet, fake_subnet] body = { share_networks.RESOURCE_NAME: { 'neutron_net_id': neutron_net_id, 'neutron_subnet_id': neutron_subnet_id } } self.assertRaises(exception_to_raise, self.controller.update, self.req, share_nw, body) (db_api.share_network_subnet_get_default_subnets. assert_called_once_with(self.context, share_nw)) @ddt.data(*set(("1.0", "2.25", "2.26", api_version._MAX_API_VERSION))) def test_add_security_service(self, microversion): share_network_id = 'fake network id' security_service_id = 'fake ss id' self.mock_object( self.controller, '_share_network_subnets_contain_share_servers') body = {'add_security_service': {'security_service_id': security_service_id}} req = fakes.HTTPRequest.blank('/share-networks', version=microversion) with mock.patch.object(self.controller, 'add_security_service', mock.Mock()): self.controller.add_security_service(req, share_network_id, body) self.controller.add_security_service.assert_called_once_with( req, share_network_id, body) def _setup_add_sec_services_with_servers_tests( self, share_network, security_service, network_is_active=True, version=ADD_UPDATE_SEC_SERVICE_VERSION, share_api_update_services_action=mock.Mock()): self.mock_object( db_api, 'share_network_get', mock.Mock(return_value=share_network)) self.mock_object( db_api, 'security_service_get', mock.Mock(return_value=security_service)) self.mock_object( self.controller, '_share_network_subnets_contain_share_servers', mock.Mock(return_value=True)) self.mock_object( self.controller.share_api, 'update_share_network_security_service', share_api_update_services_action) self.mock_object( common, 'check_share_network_is_active', mock.Mock(return_value=network_is_active)) self.mock_object(db_api, 'share_network_add_security_service') self.mock_object(self.controller._view_builder, 'build_share_network') body = { 'add_security_service': { 'security_service_id': security_service['id'] } } req = fakes.HTTPRequest.blank( '/add_security_service', version=version, use_admin_context=True) context = req.environ['manila.context'] return req, context, body def test_add_security_service_with_servers(self): security_service = db_utils.create_security_service() security_service_id = security_service['id'] share_network = db_utils.create_share_network() share_network_id = share_network['id'] req, context, body = self._setup_add_sec_services_with_servers_tests( share_network, security_service) self.controller.add_security_service(req, share_network_id, body) db_api.security_service_get.assert_called_once_with( context, security_service_id) (self.controller._share_network_subnets_contain_share_servers. assert_called_once_with(share_network)) db_api.share_network_get.assert_called_once_with( context, share_network_id) (self.controller.share_api.update_share_network_security_service. assert_called_once_with(context, share_network, security_service)) def test_add_security_service_with_server_invalid_version(self): security_service = db_utils.create_security_service() security_service_id = security_service['id'] share_network = db_utils.create_share_network() share_network_id = share_network['id'] req, context, body = self._setup_add_sec_services_with_servers_tests( share_network, security_service, version='2.59') self.assertRaises( webob_exc.HTTPForbidden, self.controller.add_security_service, req, share_network_id, body ) db_api.security_service_get.assert_called_once_with( context, security_service_id) (self.controller._share_network_subnets_contain_share_servers. assert_called_once_with(share_network)) db_api.share_network_get.assert_called_once_with( context, share_network_id) @ddt.data( (exception.ServiceIsDown(message='fake'), webob_exc.HTTPConflict), (exception.InvalidShareNetwork(message='fake'), webob_exc.HTTPBadRequest) ) @ddt.unpack def test_add_security_service_with_server_update_failed( self, side_effect, exception_to_raise): security_service = db_utils.create_security_service() security_service_id = security_service['id'] share_network_id = fake_share_network['id'] fake_share_network['security_service_update_support'] = True action = mock.Mock(side_effect=side_effect) req, context, body = self._setup_add_sec_services_with_servers_tests( fake_share_network, security_service, share_api_update_services_action=action) self.assertRaises( exception_to_raise, self.controller.add_security_service, req, share_network_id, body ) db_api.security_service_get.assert_called_once_with( context, security_service_id) db_api.share_network_get.assert_called_once_with( context, share_network_id) (self.controller.share_api.update_share_network_security_service. assert_called_once_with(context, fake_share_network, security_service)) @ddt.data( (exception.NotFound(message='fake'), webob_exc.HTTPNotFound), (exception.ShareNetworkSecurityServiceAssociationError(message='fake'), webob_exc.HTTPBadRequest)) @ddt.unpack def test_action_add_security_service_conflict(self, captured_exception, expected_raised_exception): share_network = fake_share_network.copy() share_network['security_services'] = [{'id': 'security_service_1', 'type': 'ldap'}] security_service = {'id': ' security_service_2', 'type': 'ldap'} body = {'add_security_service': {'security_service_id': security_service['id']}} request = fakes.HTTPRequest.blank( '/share-networks', use_admin_context=True) self.mock_object( self.controller, '_share_network_subnets_contain_share_servers', mock.Mock(return_value=False)) update_sec_serv_mock = self.mock_object( self.controller.share_api, 'update_share_network_security_service') self.mock_object(db_api, 'share_network_get', mock.Mock(return_value=share_network)) self.mock_object(db_api, 'security_service_get', mock.Mock(return_value=security_service)) self.mock_object(share_networks.policy, 'check_policy') self.mock_object( db_api, 'share_network_add_security_service', mock.Mock(side_effect=captured_exception)) db_api.security_service_get.return_value = security_service db_api.share_network_get.return_value = share_network self.assertRaises(expected_raised_exception, self.controller.add_security_service, request, share_network['id'], body) db_api.share_network_get.assert_called_once_with( request.environ['manila.context'], share_network['id']) db_api.security_service_get.assert_called_once_with( request.environ['manila.context'], security_service['id']) share_networks.policy.check_policy.assert_called_once_with( request.environ['manila.context'], share_networks.RESOURCE_NAME, 'add_security_service', target_obj=share_network) update_sec_serv_mock.assert_called_once_with( request.environ['manila.context'], share_network, security_service) def _setup_update_sec_services_with_servers_tests( self, share_network, security_services, version=ADD_UPDATE_SEC_SERVICE_VERSION, share_api_update_services_action=mock.Mock()): self.mock_object( db_api, 'share_network_get', mock.Mock(return_value=share_network)) self.mock_object( db_api, 'security_service_get', mock.Mock(side_effect=security_services)) self.mock_object( self.controller.share_api, 'update_share_network_security_service', share_api_update_services_action) self.mock_object(self.controller._view_builder, 'build_share_network') self.mock_object(db_api, 'share_network_update_security_service') body = { 'update_security_service': { 'current_service_id': security_services[0]['id'], 'new_service_id': security_services[1]['id'] } } req = fakes.HTTPRequest.blank( '/add_security_service', version=version, use_admin_context=True) context = req.environ['manila.context'] return req, context, body def test_update_security_service_service_not_found(self): security_services = [ db_utils.create_security_service() for i in range(2)] share_network = copy.deepcopy(fake_share_network) share_network['security_service_update_support'] = True req, context, body = ( self._setup_update_sec_services_with_servers_tests( share_network, security_services)) db_api.security_service_get.side_effect = exception.NotFound('fake') self.assertRaises( webob_exc.HTTPBadRequest, self.controller.update_security_service, req, share_network['id'], body) db_api.share_network_get.assert_called_once_with( context, share_network['id']) db_api.security_service_get.assert_has_calls( [mock.call(context, security_services[0]['id'])] ) @ddt.data( (exception.ServiceIsDown(message='fake'), webob_exc.HTTPConflict), (exception.InvalidShareNetwork(message='fake'), webob_exc.HTTPBadRequest)) @ddt.unpack def test_update_security_service_share_api_failure(self, side_effect, exc): security_services = [ db_utils.create_security_service() for i in range(2)] share_network = copy.deepcopy(fake_share_network) share_network['security_service_update_support'] = True req, context, body = ( self._setup_update_sec_services_with_servers_tests( share_network, security_services, share_api_update_services_action=mock.Mock( side_effect=side_effect))) self.assertRaises( exc, self.controller.update_security_service, req, share_network['id'], body) db_api.share_network_get.assert_called_once_with( context, share_network['id']) db_api.security_service_get.assert_has_calls( [mock.call(context, security_services[0]['id']), mock.call(context, security_services[1]['id'])] ) def test_update_security_service(self): security_services = [ db_utils.create_security_service() for i in range(2)] share_network = copy.copy(fake_share_network) share_network['security_service_update_support'] = True req, context, body = ( self._setup_update_sec_services_with_servers_tests( share_network, security_services)) self.controller.update_security_service( req, share_network['id'], body) db_api.share_network_get.assert_called_once_with( context, share_network['id']) db_api.security_service_get.assert_has_calls( [mock.call(context, security_services[0]['id']), mock.call(context, security_services[1]['id'])] ) (self.controller.share_api.update_share_network_security_service. assert_called_once_with( context, share_network, security_services[1], current_security_service=security_services[0])) db_api.share_network_update_security_service.assert_called_once_with( context, share_network['id'], security_services[0]['id'], security_services[1]['id']) @ddt.data(*set(("1.0", "2.25", "2.26", api_version._MAX_API_VERSION))) def test_action_remove_security_service(self, microversion): share_network_id = 'fake network id' security_service_id = 'fake ss id' self.mock_object( self.controller, '_share_network_subnets_contain_share_servers') body = {'remove_security_service': {'security_service_id': security_service_id}} req = fakes.HTTPRequest.blank('/share-networks', version=microversion) with mock.patch.object(self.controller, 'remove_security_service', mock.Mock()): self.controller.remove_security_service( req, share_network_id, body) self.controller.remove_security_service.assert_called_once_with( req, share_network_id, body) @mock.patch.object(db_api, 'share_network_get', mock.Mock()) @mock.patch.object(share_networks.policy, 'check_policy', mock.Mock()) def test_action_remove_security_service_forbidden(self): share_network = fake_share_network.copy() subnet = fake_share_network_subnet.copy() subnet['share_servers'] = ['foo'] share_network['share_network_subnets'] = [subnet] db_api.share_network_get.return_value = share_network self.mock_object( self.controller, '_share_network_subnets_contain_share_servers', mock.Mock(return_value=True)) body = { 'remove_security_service': { 'security_service_id': 'fake id', }, } self.assertRaises(webob_exc.HTTPForbidden, self.controller.remove_security_service, self.req, share_network['id'], body) db_api.share_network_get.assert_called_once_with( self.req.environ['manila.context'], share_network['id']) share_networks.policy.check_policy.assert_called_once_with( self.req.environ['manila.context'], share_networks.RESOURCE_NAME, 'remove_security_service', target_obj=share_network) @mock.patch.object(share_networks.policy, 'check_policy', mock.Mock()) def test_action_remove_security_service_share_no_share_servers(self): share_network = fake_share_network.copy() subnet = fake_share_network_subnet.copy() share_network['share_network_subnets'] = [subnet] mock_share_net_get = self.mock_object( db_api, 'share_network_get', mock.Mock(return_value=share_network)) mock_contains_servers = self.mock_object( self.controller, '_share_network_subnets_contain_share_servers', mock.Mock(return_value=False)) mock_api_remove = self.mock_object( db_api, 'share_network_remove_security_service', mock.Mock(return_value=share_network)) mock_view = self.mock_object( self.controller._view_builder, 'build_share_network', mock.Mock(return_value='fake_view')) body = { 'remove_security_service': { 'security_service_id': 'fake_ss_id', }, } view = self.controller.remove_security_service( self.req, share_network['id'], body) self.assertEqual('fake_view', view) mock_share_net_get.assert_called_once_with( self.req.environ['manila.context'], share_network['id']) share_networks.policy.check_policy.assert_called_once_with( self.req.environ['manila.context'], share_networks.RESOURCE_NAME, 'remove_security_service', target_obj=share_network) mock_contains_servers.assert_called_once_with(share_network) mock_api_remove.assert_called_once_with( self.req.environ['manila.context'], share_network['id'], 'fake_ss_id') mock_view.assert_called_once_with(self.req, share_network) @ddt.data( (KeyError, webob_exc.HTTPBadRequest), (exception.NotFound(message='fake'), webob_exc.HTTPNotFound), (exception.ShareNetworkSecurityServiceDissociationError( message='fake'), webob_exc.HTTPBadRequest)) @ddt.unpack def test_action_remove_security_service_share_api_exception(self, api_exp, expect_exp): share_network = fake_share_network.copy() subnet = fake_share_network_subnet.copy() share_network['share_network_subnets'] = [subnet] mock_policy = self.mock_object( share_networks.policy, 'check_policy', mock.Mock()) mock_share_net_get = self.mock_object( db_api, 'share_network_get', mock.Mock(return_value=share_network)) mock_contains_servers = self.mock_object( self.controller, '_share_network_subnets_contain_share_servers', mock.Mock(return_value=False)) mock_api_remove = self.mock_object( db_api, 'share_network_remove_security_service', mock.Mock(side_effect=api_exp)) body = { 'remove_security_service': { 'security_service_id': 'fake_ss_id', }, } self.assertRaises(expect_exp, self.controller.remove_security_service, self.req, share_network['id'], body) mock_share_net_get.assert_called_once_with( self.req.environ['manila.context'], share_network['id']) db_api.share_network_get.assert_called_once_with( self.req.environ['manila.context'], share_network['id']) mock_policy.assert_called_once_with( self.req.environ['manila.context'], share_networks.RESOURCE_NAME, 'remove_security_service', target_obj=share_network) mock_contains_servers.assert_called_once_with(share_network) mock_api_remove.assert_called_once_with( self.req.environ['manila.context'], share_network['id'], 'fake_ss_id') @ddt.data('add_security_service', 'remove_security_service') def test_action_security_service_contains_share_servers(self, action): share_network = fake_share_network.copy() security_service = {'id': ' security_service_2', 'type': 'ldap'} method_to_call = ( self.controller.add_security_service if action == 'add_security_service' else self.controller.remove_security_service) body = { action: { 'security_service_id': security_service['id'] } } self.mock_object(share_networks.policy, 'check_policy') self.mock_object(db_api, 'share_network_get', mock.Mock(return_value=share_network)) self.mock_object(db_api, 'security_service_get', mock.Mock(return_value=security_service)) self.mock_object( self.controller, '_share_network_subnets_contain_share_servers', mock.Mock(return_value=True)) self.assertRaises(webob_exc.HTTPForbidden, method_to_call, self.req, share_network['id'], body) db_api.share_network_get.assert_called_once_with( self.req.environ['manila.context'], share_network['id']) share_networks.policy.check_policy.assert_called_once_with( self.req.environ['manila.context'], share_networks.RESOURCE_NAME, action, target_obj=share_network) def _setup_data_for_update_tests(self, is_check=False): security_services = [ db_utils.create_security_service() for i in range(2)] share_network = db_utils.create_share_network() action = ('update_security_service_check' if is_check else 'update_security_service') body = { action: { 'current_service_id': security_services[0]['id'], 'new_service_id': security_services[1]['id'], } } if is_check: body[action]['reset_operation'] = False request = fakes.HTTPRequest.blank( '/v2/fake/share-networks/%s/action' % share_network['id'], use_admin_context=True, version='2.63') return security_services, share_network, body, request def test_check_update_security_service_not_found(self): security_services, share_network, body, request = ( self._setup_data_for_update_tests(is_check=True)) context = request.environ['manila.context'] self.mock_object(share_networks.policy, 'check_policy') self.mock_object(db_api, 'share_network_get', mock.Mock(return_value=share_network)) self.mock_object(db_api, 'security_service_get', mock.Mock(side_effect=exception.NotFound())) self.assertRaises( webob_exc.HTTPBadRequest, self.controller.check_update_security_service, request, share_network['id'], body) db_api.share_network_get.assert_called_once_with( context, share_network['id'] ) db_api.security_service_get.assert_called_once_with( context, security_services[0]['id']) def test_check_update_security_service(self): security_services, share_network, body, request = ( self._setup_data_for_update_tests(is_check=True)) context = request.environ['manila.context'] share_api_return = {'fake_key': 'fake_value'} self.mock_object(share_networks.policy, 'check_policy') self.mock_object(db_api, 'share_network_get', mock.Mock(return_value=share_network)) self.mock_object( db_api, 'security_service_get', mock.Mock( side_effect=[security_services[0], security_services[1]])) self.mock_object( self.controller.share_api, 'check_share_network_security_service_update', mock.Mock(return_vale=share_api_return)) self.mock_object( self.controller._view_builder, 'build_security_service_update_check') self.controller.check_update_security_service( request, share_network['id'], body) db_api.share_network_get.assert_called_once_with( context, share_network['id']) db_api.security_service_get.assert_has_calls( [mock.call(context, security_services[0]['id']), mock.call(context, security_services[1]['id'])]) (self.controller.share_api.check_share_network_security_service_update. assert_called_once_with( context, share_network, security_services[1], current_security_service=security_services[0], reset_operation=False)) @ddt.data( (exception.ServiceIsDown(message='fake'), webob_exc.HTTPConflict), (exception.InvalidShareNetwork(message='fake'), webob_exc.HTTPBadRequest), (exception.InvalidSecurityService(message='fake'), webob_exc.HTTPConflict)) @ddt.unpack def test_check_update_security_service_share_api_failed( self, captured_exception, exception_to_be_raised): security_services, share_network, body, request = ( self._setup_data_for_update_tests(is_check=True)) context = request.environ['manila.context'] self.mock_object(share_networks.policy, 'check_policy') self.mock_object(db_api, 'share_network_get', mock.Mock(return_value=share_network)) self.mock_object( db_api, 'security_service_get', mock.Mock( side_effect=[security_services[0], security_services[1]])) self.mock_object( self.controller.share_api, 'check_share_network_security_service_update', mock.Mock(side_effect=captured_exception)) self.assertRaises( exception_to_be_raised, self.controller.check_update_security_service, request, share_network['id'], body) db_api.share_network_get.assert_called_once_with( context, share_network['id']) db_api.security_service_get.assert_has_calls( [mock.call(context, security_services[0]['id']), mock.call(context, security_services[1]['id'])]) (self.controller.share_api.check_share_network_security_service_update. assert_called_once_with( context, share_network, security_services[1], current_security_service=security_services[0], reset_operation=False)) def _setup_data_for_check_add_tests(self): security_service = db_utils.create_security_service() share_network = db_utils.create_share_network() body = { 'add_security_service_check': { 'reset_operation': False, 'security_service_id': security_service['id'], } } request = fakes.HTTPRequest.blank( '/share-networks', use_admin_context=True, version='2.63') return security_service, share_network, body, request def test_check_add_security_service_not_found(self): security_service, share_network, body, request = ( self._setup_data_for_check_add_tests()) context = request.environ['manila.context'] self.mock_object(share_networks.policy, 'check_policy') self.mock_object(db_api, 'share_network_get', mock.Mock(return_value=share_network)) self.mock_object(db_api, 'security_service_get', mock.Mock(side_effect=exception.NotFound())) self.assertRaises( webob_exc.HTTPBadRequest, self.controller.check_add_security_service, request, share_network['id'], body) db_api.share_network_get.assert_called_once_with( context, share_network['id'] ) db_api.security_service_get.assert_called_once_with( context, security_service['id'], project_only=True) def test_check_add_security_service(self): security_service, share_network, body, request = ( self._setup_data_for_check_add_tests()) context = request.environ['manila.context'] share_api_return = {'fake_key': 'fake_value'} self.mock_object(share_networks.policy, 'check_policy') self.mock_object(db_api, 'share_network_get', mock.Mock(return_value=share_network)) self.mock_object( db_api, 'security_service_get', mock.Mock(return_value=security_service)) self.mock_object( self.controller.share_api, 'check_share_network_security_service_update', mock.Mock(return_vale=share_api_return)) self.mock_object( self.controller._view_builder, 'build_security_service_update_check') self.controller.check_add_security_service( request, share_network['id'], body) db_api.share_network_get.assert_called_once_with( context, share_network['id']) db_api.security_service_get.assert_called_once_with( context, security_service['id'], project_only=True) (self.controller.share_api.check_share_network_security_service_update. assert_called_once_with( context, share_network, security_service, reset_operation=False)) @ddt.data( (exception.ServiceIsDown(message='fake'), webob_exc.HTTPConflict), (exception.InvalidShareNetwork(message='fake'), webob_exc.HTTPBadRequest), (exception.InvalidSecurityService(message='fake'), webob_exc.HTTPConflict)) @ddt.unpack def test_check_add_security_service_share_api_failed( self, captured_exception, exception_to_be_raised): security_service, share_network, body, request = ( self._setup_data_for_check_add_tests()) context = request.environ['manila.context'] self.mock_object(share_networks.policy, 'check_policy') self.mock_object(db_api, 'share_network_get', mock.Mock(return_value=share_network)) self.mock_object( db_api, 'security_service_get', mock.Mock(return_value=security_service)) self.mock_object( self.controller.share_api, 'check_share_network_security_service_update', mock.Mock(side_effect=captured_exception)) self.assertRaises( exception_to_be_raised, self.controller.check_add_security_service, request, share_network['id'], body) db_api.share_network_get.assert_called_once_with( context, share_network['id']) db_api.security_service_get.assert_called_once_with( context, security_service['id'], project_only=True) (self.controller.share_api.check_share_network_security_service_update. assert_called_once_with( context, share_network, security_service, reset_operation=False)) @ddt.data( (exception.NotFound(message='fake'), webob_exc.HTTPBadRequest)) @ddt.unpack def test_check_add_security_service_failed_project_id( self, captured_exception, exception_to_be_raised): security_service, share_network, body, request = ( self._setup_data_for_check_add_tests()) share_network = fake_share_network context = request.environ['manila.context'] share_api_return = {'fake_key': 'fake_value'} self.mock_object(share_networks.policy, 'check_policy') self.mock_object(db_api, 'share_network_get', mock.Mock(return_value=share_network)) self.mock_object( db_api, 'security_service_get', mock.Mock(side_effect=captured_exception)) self.mock_object( self.controller.share_api, 'check_share_network_security_service_update', mock.Mock(return_vale=share_api_return)) self.mock_object( self.controller._view_builder, 'build_security_service_update_check') self.assertRaises( exception_to_be_raised, self.controller.check_add_security_service, request, share_network['id'], body) db_api.share_network_get.assert_called_once_with( context, share_network['id']) db_api.security_service_get.assert_called_once_with( context, security_service['id'], project_only=True) @ddt.data( (exception.ServiceIsDown(message='fake'), webob_exc.HTTPConflict), (exception.InvalidShareNetwork(message='fake'), webob_exc.HTTPBadRequest), (exception.InvalidSecurityService(message='fake'), webob_exc.HTTPConflict)) @ddt.unpack def test_update_security_service_share_api_failed( self, captured_exception, exception_to_be_raised): security_services, share_network, body, request = ( self._setup_data_for_update_tests()) context = request.environ['manila.context'] self.mock_object(share_networks.policy, 'check_policy') self.mock_object(db_api, 'share_network_get', mock.Mock(return_value=share_network)) self.mock_object( db_api, 'security_service_get', mock.Mock( side_effect=[security_services[0], security_services[1]])) self.mock_object( self.controller.share_api, 'update_share_network_security_service', mock.Mock(side_effect=captured_exception)) self.assertRaises(exception_to_be_raised, self.controller.update_security_service, request, share_network['id'], body) db_api.share_network_get.assert_called_once_with( context, share_network['id']) db_api.security_service_get.assert_has_calls( [mock.call(context, security_services[0]['id']), mock.call(context, security_services[1]['id'])]) (self.controller.share_api.update_share_network_security_service. assert_called_once_with( context, share_network, security_services[1], current_security_service=security_services[0])) def test_reset_status(self): share_network = db_utils.create_share_network() request = fakes.HTTPRequest.blank( '/v2/fake/share-networks/%s/action' % share_network['id'], use_admin_context=True, version='2.63') self.mock_object(db_api, 'share_network_update') status = {'status': 'active'} body = {'reset_status': status} response = self.controller.reset_status(request, share_network['id'], body) self.assertEqual(202, response.status_int) db_api.share_network_update.assert_called_once_with( request.environ['manila.context'], share_network['id'], {'status': 'active'}) @ddt.data([], ['fake_server']) def test_share_network_subnet_create_check(self, servers): body = { 'share_network_subnet_create_check': { 'reset_operation': False, 'availability_zone': 'fake_az', } } request = fakes.HTTPRequest.blank( '/share-networks', use_admin_context=True, version='2.70') context = request.environ['manila.context'] share_net = 'fake_net' subnet = {'share_servers': servers} existing_subnets = [subnet] mock_validate_subnet = self.mock_object( common, 'validate_subnet_create', mock.Mock(return_value=(share_net, existing_subnets))) share_api_return = { 'compatible': not bool(servers), 'hosts_check_result': {} } mock_check_update = self.mock_object( self.controller.share_api, 'check_update_share_server_network_allocations', mock.Mock(return_value=share_api_return)) subnet_view = 'fake_subnet' mock_view = self.mock_object( self.controller._view_builder, 'build_share_network_subnet_create_check', mock.Mock(return_value=subnet_view)) net_id = 'fake_net_id' response = self.controller.share_network_subnet_create_check( request, net_id, body) self.assertEqual(subnet_view, response) data = body['share_network_subnet_create_check'] mock_validate_subnet.assert_called_once_with( context, net_id, data, True) if servers: data['share_servers'] = servers mock_check_update.assert_called_once_with( context, share_net, data, False) else: mock_check_update.assert_not_called() mock_view.assert_called_once_with(request, share_api_return) @ddt.data( (exception.ServiceIsDown(message='fake'), webob_exc.HTTPInternalServerError), (exception.InvalidShareNetwork(message='fake'), webob_exc.HTTPBadRequest)) @ddt.unpack def test_share_network_subnet_create_check_api_failed( self, captured_exception, exception_to_be_raised): body = { 'share_network_subnet_create_check': { 'reset_operation': False, 'availability_zone': 'fake_az', } } request = fakes.HTTPRequest.blank( '/share-networks', use_admin_context=True, version='2.70') share_net = 'fake_net' subnet = {'share_servers': 'fake_server'} existing_subnets = [subnet] self.mock_object( common, 'validate_subnet_create', mock.Mock(return_value=(share_net, existing_subnets))) self.mock_object( self.controller.share_api, 'check_update_share_server_network_allocations', mock.Mock(side_effect=captured_exception)) self.assertRaises(exception_to_be_raised, self.controller.share_network_subnet_create_check, request, 'fake_net_id', body) def test_share_network_sec_service_delete(self): self.mock_object(db_api, 'share_network_get', mock.Mock(return_value=fake_share_network_with_ss)) self.mock_object(db_api, 'share_network_delete') self.mock_object(db_api, 'share_network_remove_security_service') self.controller.delete(self.req, fake_share_network_with_ss['id']) db_api.share_network_get.assert_called_once_with( self.req.environ['manila.context'], fake_share_network_with_ss['id']) db_api.share_network_remove_security_service.assert_called_once_with( self.req.environ['manila.context'], fake_share_network_with_ss['id'], fake_share_network_with_ss['security_services'][0]['id']) db_api.share_network_delete.assert_called_once_with( self.req.environ['manila.context'], fake_share_network_with_ss['id']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/api/v2/test_share_replica_export_locations.py0000664000175000017500000002045500000000000026455 0ustar00zuulzuul00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import ddt from webob import exc from manila.api.v2 import share_replica_export_locations as export_locations from manila.common import constants from manila import context from manila import db from manila import exception from manila import policy from manila import test from manila.tests.api import fakes from manila.tests import db_utils GRADUATION_VERSION = '2.56' @ddt.ddt class ShareReplicaExportLocationsAPITest(test.TestCase): def _get_request(self, version="2.47", use_admin_context=False): req = fakes.HTTPRequest.blank( '/v2/share-replicas/%s/export-locations' % self.active_replica_id, version=version, use_admin_context=use_admin_context, experimental=True) return req def setUp(self): super(ShareReplicaExportLocationsAPITest, self).setUp() self.controller = ( export_locations.ShareReplicaExportLocationController()) self.resource_name = 'share_replica_export_location' self.ctxt = context.RequestContext('fake', 'fake') self.mock_policy_check = self.mock_object( policy, 'check_policy', mock.Mock(return_value=True)) self.share = db_utils.create_share( replication_type=constants.REPLICATION_TYPE_READABLE, replica_state=constants.REPLICA_STATE_ACTIVE) self.active_replica_id = self.share.instance.id self.req = self._get_request() exports = [ {'path': 'myshare.mydomain/active-replica-exp1', 'is_admin_only': False}, {'path': 'myshare.mydomain/active-replica-exp2', 'is_admin_only': False}, ] db.export_locations_update( self.ctxt, self.active_replica_id, exports) # Replicas self.share_replica2 = db_utils.create_share_replica( share_id=self.share.id, replica_state=constants.REPLICA_STATE_IN_SYNC) self.share_replica3 = db_utils.create_share_replica( share_id=self.share.id, replica_state=constants.REPLICA_STATE_OUT_OF_SYNC) replica2_exports = [ {'path': 'myshare.mydomain/insync-replica-exp', 'is_admin_only': False}, {'path': 'myshare.mydomain/insync-replica-exp2', 'is_admin_only': False} ] replica3_exports = [ {'path': 'myshare.mydomain/outofsync-replica-exp', 'is_admin_only': False}, {'path': 'myshare.mydomain/outofsync-replica-exp2', 'is_admin_only': False} ] db.export_locations_update( self.ctxt, self.share_replica2.id, replica2_exports) db.export_locations_update( self.ctxt, self.share_replica3.id, replica3_exports) @ddt.data(('user', '2.47'), ('admin', GRADUATION_VERSION)) @ddt.unpack def test_list_and_show(self, role, microversion): summary_keys = [ 'id', 'path', 'replica_state', 'availability_zone', 'preferred' ] admin_summary_keys = summary_keys + [ 'share_instance_id', 'is_admin_only' ] detail_keys = summary_keys + ['created_at', 'updated_at'] admin_detail_keys = admin_summary_keys + ['created_at', 'updated_at'] self._test_list_and_show(role, summary_keys, detail_keys, admin_summary_keys, admin_detail_keys, microversion=microversion) def _test_list_and_show(self, role, summary_keys, detail_keys, admin_summary_keys, admin_detail_keys, microversion='2.47'): req = self._get_request(version=microversion, use_admin_context=(role == 'admin')) for replica_id in (self.active_replica_id, self.share_replica2.id, self.share_replica3.id): index_result = self.controller.index(req, replica_id) self.assertIn('export_locations', index_result) self.assertEqual(1, len(index_result)) self.assertEqual(2, len(index_result['export_locations'])) for index_el in index_result['export_locations']: self.assertIn('id', index_el) show_result = self.controller.show( req, replica_id, index_el['id']) self.assertIn('export_location', show_result) self.assertEqual(1, len(show_result)) show_el = show_result['export_location'] # Check summary keys in index result & detail keys in show if role == 'admin': self.assertEqual(len(admin_summary_keys), len(index_el)) for key in admin_summary_keys: self.assertIn(key, index_el) self.assertEqual(len(admin_detail_keys), len(show_el)) for key in admin_detail_keys: self.assertIn(key, show_el) else: self.assertEqual(len(summary_keys), len(index_el)) for key in summary_keys: self.assertIn(key, index_el) self.assertEqual(len(detail_keys), len(show_el)) for key in detail_keys: self.assertIn(key, show_el) # Ensure keys common to index & show have matching values for key in summary_keys: self.assertEqual(index_el[key], show_el[key]) def test_list_and_show_with_non_replicas(self): non_replicated_share = db_utils.create_share() instance_id = non_replicated_share.instance.id exports = [ {'path': 'myshare.mydomain/non-replicated-share', 'is_admin_only': False}, {'path': 'myshare.mydomain/non-replicated-share-2', 'is_admin_only': False}, ] db.export_locations_update(self.ctxt, instance_id, exports) updated_exports = db.export_location_get_all_by_share_id( self.ctxt, non_replicated_share.id) self.assertRaises(exc.HTTPNotFound, self.controller.index, self.req, instance_id) for export in updated_exports: self.assertRaises(exc.HTTPNotFound, self.controller.show, self.req, instance_id, export['id']) def test_list_export_locations_share_replica_not_found(self): self.assertRaises( exc.HTTPNotFound, self.controller.index, self.req, 'non-existent-share-replica-id') def test_show_export_location_share_replica_not_found(self): index_result = self.controller.index(self.req, self.active_replica_id) el_id = index_result['export_locations'][0]['id'] self.assertRaises( exc.HTTPNotFound, self.controller.show, self.req, 'non-existent-share-replica-id', el_id) self.assertRaises( exc.HTTPNotFound, self.controller.show, self.req, self.active_replica_id, 'non-existent-export-location-id') @ddt.data('1.0', '2.0', '2.46') def test_list_with_unsupported_version(self, version): self.assertRaises( exception.VersionNotFoundForAPIMethod, self.controller.index, self._get_request(version), self.active_replica_id) @ddt.data('1.0', '2.0', '2.46') def test_show_with_unsupported_version(self, version): index_result = self.controller.index(self.req, self.active_replica_id) self.assertRaises( exception.VersionNotFoundForAPIMethod, self.controller.show, self._get_request(version), self.active_replica_id, index_result['export_locations'][0]['id']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/api/v2/test_share_replicas.py0000664000175000017500000012417700000000000023172 0ustar00zuulzuul00000000000000# Copyright 2015 Goutham Pacha Ravi # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import copy import ddt from oslo_config import cfg from oslo_serialization import jsonutils from oslo_utils import strutils from webob import exc from manila.api import common from manila.api.openstack import api_version_request as api_version from manila.api.v2 import share_replicas from manila.common import constants from manila import context from manila import exception from manila import policy from manila import share from manila import test from manila.tests.api import fakes from manila.tests import db_utils from manila.tests import fake_share CONF = cfg.CONF CAST_RULES_READONLY_VERSION = '2.30' PRE_GRADUATION_VERSION = '2.55' GRADUATION_VERSION = '2.56' PROMOTE_QUIESCE_WAIT_VERSION = '2.75' @ddt.ddt class ShareReplicasApiTest(test.TestCase): """Share Replicas API Test Cases.""" def setUp(self): super(ShareReplicasApiTest, self).setUp() self.controller = share_replicas.ShareReplicationController() self.resource_name = self.controller.resource_name self.api_version = share_replicas.MIN_SUPPORTED_API_VERSION self.replicas_req = fakes.HTTPRequest.blank( '/share-replicas', version=self.api_version, experimental=True) self.member_context = context.RequestContext('fake', 'fake') self.replicas_req.environ['manila.context'] = self.member_context self.replicas_req_admin = fakes.HTTPRequest.blank( '/share-replicas', version=self.api_version, experimental=True, use_admin_context=True) self.admin_context = self.replicas_req_admin.environ['manila.context'] self.mock_policy_check = self.mock_object(policy, 'check_policy') self.fake_share_network = { 'id': 'fake network id', 'project_id': 'fake project', 'updated_at': None, 'name': 'fake name', 'description': 'fake description', 'security_services': [], 'share_network_subnets': [], 'security_service_update_support': True, 'status': 'active' } def _get_context(self, role): return getattr(self, '%s_context' % role) def _create_replica_get_req(self, **kwargs): if 'status' not in kwargs: kwargs['status'] = constants.STATUS_AVAILABLE if 'replica_state' not in kwargs: kwargs['replica_state'] = constants.REPLICA_STATE_IN_SYNC replica = db_utils.create_share_replica(**kwargs) path = '/v2/fake/share-replicas/%s/action' % replica['id'] microversion = kwargs.get('microversion', self.api_version) experimental = True if (api_version.APIVersionRequest(microversion) >= api_version.APIVersionRequest(GRADUATION_VERSION)): experimental = False req = fakes.HTTPRequest.blank(path, script_name=path, version=microversion, experimental=experimental) req.method = 'POST' req.headers['content-type'] = 'application/json' req.headers['X-Openstack-Manila-Api-Version'] = microversion req.headers['X-Openstack-Manila-Api-Experimental'] = True return replica, req def _get_fake_replica( self, summary=False, admin=False, microversion=share_replicas.MIN_SUPPORTED_API_VERSION, **values): replica = fake_share.fake_replica(**values) replica['updated_at'] = '2016-02-11T19:57:56.506805' expected_keys = {'id', 'share_id', 'status', 'replica_state'} expected_replica = {key: replica[key] for key in replica if key in expected_keys} if not summary: expected_replica.update({ 'availability_zone': None, 'created_at': None, 'share_network_id': replica['share_network_id'], 'updated_at': replica['updated_at'], }) if admin: expected_replica['share_server_id'] = replica['share_server_id'] expected_replica['host'] = replica['host'] if (api_version.APIVersionRequest(microversion) >= api_version.APIVersionRequest(CAST_RULES_READONLY_VERSION) and admin): expected_replica['cast_rules_to_readonly'] = False return replica, expected_replica def _get_request(self, microversion, is_admin=False): experimental = (api_version.APIVersionRequest(microversion) <= api_version.APIVersionRequest(GRADUATION_VERSION)) req = fakes.HTTPRequest.blank( '/share-replicas', version=microversion, experimental=experimental, use_admin_context=is_admin) return req @ddt.data((False, PRE_GRADUATION_VERSION), (False, GRADUATION_VERSION)) @ddt.unpack def test_list_replicas_summary(self, is_admin, microversion): fake_replica, expected_replica = self._get_fake_replica( summary=True, admin=is_admin, microversion=microversion) self.mock_object(share_replicas.db, 'share_replicas_get_all', mock.Mock(return_value=[fake_replica])) req = self._get_request(is_admin=is_admin, microversion=microversion) context = req.environ['manila.context'] res_dict = self.controller.index(req) self.assertEqual([expected_replica], res_dict['share_replicas']) self.mock_policy_check.assert_called_once_with( context, self.resource_name, 'get_all') def test_list_share_replicas_summary(self): fake_replica, expected_replica = self._get_fake_replica(summary=True) self.mock_object(share_replicas.db, 'share_replicas_get_all_by_share', mock.Mock(return_value=[fake_replica])) req = fakes.HTTPRequest.blank( '/share-replicas?share_id=FAKE_SHARE_ID', version=self.api_version, experimental=True) req_context = req.environ['manila.context'] res_dict = self.controller.index(req) self.assertEqual([expected_replica], res_dict['share_replicas']) self.mock_policy_check.assert_called_once_with( req_context, self.resource_name, 'get_all') @ddt.data((True, PRE_GRADUATION_VERSION), (False, GRADUATION_VERSION)) @ddt.unpack def test_list_replicas_detail(self, is_admin, microversion): fake_replica, expected_replica = self._get_fake_replica( admin=is_admin, microversion=microversion) self.mock_object(share_replicas.db, 'share_replicas_get_all', mock.Mock(return_value=[fake_replica])) req = self._get_request(is_admin=is_admin, microversion=microversion) req_context = req.environ['manila.context'] res_dict = self.controller.detail(req) self.assertEqual([expected_replica], res_dict['share_replicas']) self.mock_policy_check.assert_called_once_with( req_context, self.resource_name, 'get_all') def test_list_replicas_detail_with_limit(self): fake_replica_1, expected_replica_1 = self._get_fake_replica() fake_replica_2, expected_replica_2 = self._get_fake_replica( id="fake_id2") self.mock_object( share_replicas.db, 'share_replicas_get_all', mock.Mock(return_value=[fake_replica_1, fake_replica_2])) req = fakes.HTTPRequest.blank('/share-replicas?limit=1', version=self.api_version, experimental=True) req_context = req.environ['manila.context'] res_dict = self.controller.detail(req) self.assertEqual(1, len(res_dict['share_replicas'])) self.assertEqual([expected_replica_1], res_dict['share_replicas']) self.mock_policy_check.assert_called_once_with( req_context, self.resource_name, 'get_all') def test_list_replicas_detail_with_limit_and_offset(self): fake_replica_1, expected_replica_1 = self._get_fake_replica() fake_replica_2, expected_replica_2 = self._get_fake_replica( id="fake_id2") self.mock_object( share_replicas.db, 'share_replicas_get_all', mock.Mock(return_value=[fake_replica_1, fake_replica_2])) req = fakes.HTTPRequest.blank( '/share-replicas/detail?limit=1&offset=1', version=self.api_version, experimental=True) req_context = req.environ['manila.context'] res_dict = self.controller.detail(req) self.assertEqual(1, len(res_dict['share_replicas'])) self.assertEqual([expected_replica_2], res_dict['share_replicas']) self.mock_policy_check.assert_called_once_with( req_context, self.resource_name, 'get_all') def test_list_share_replicas_detail_invalid_share(self): self.mock_object(share_replicas.db, 'share_replicas_get_all_by_share', mock.Mock(side_effect=exception.NotFound)) mock__view_builder_call = self.mock_object( share_replicas.replication_view.ReplicationViewBuilder, 'detail_list') req = self.replicas_req req.GET['share_id'] = 'FAKE_SHARE_ID' self.assertRaises(exc.HTTPNotFound, self.controller.detail, req) self.assertFalse(mock__view_builder_call.called) self.mock_policy_check.assert_called_once_with( self.member_context, self.resource_name, 'get_all') @ddt.data(True, False) def test_list_share_replicas_detail(self, is_admin): fake_replica, expected_replica = self._get_fake_replica(admin=is_admin) self.mock_object(share_replicas.db, 'share_replicas_get_all_by_share', mock.Mock(return_value=[fake_replica])) req = fakes.HTTPRequest.blank( '/share-replicas?share_id=FAKE_SHARE_ID', version=self.api_version, experimental=True) req.environ['manila.context'] = ( self.member_context if not is_admin else self.admin_context) req_context = req.environ['manila.context'] res_dict = self.controller.detail(req) self.assertEqual([expected_replica], res_dict['share_replicas']) self.mock_policy_check.assert_called_once_with( req_context, self.resource_name, 'get_all') def test_list_share_replicas_with_limit(self): fake_replica_1, expected_replica_1 = self._get_fake_replica() fake_replica_2, expected_replica_2 = self._get_fake_replica( id="fake_id2") self.mock_object( share_replicas.db, 'share_replicas_get_all_by_share', mock.Mock(return_value=[fake_replica_1, fake_replica_2])) req = fakes.HTTPRequest.blank( '/share-replicas?share_id=FAKE_SHARE_ID&limit=1', version=self.api_version, experimental=True) req_context = req.environ['manila.context'] res_dict = self.controller.detail(req) self.assertEqual(1, len(res_dict['share_replicas'])) self.assertEqual([expected_replica_1], res_dict['share_replicas']) self.mock_policy_check.assert_called_once_with( req_context, self.resource_name, 'get_all') def test_list_share_replicas_with_limit_and_offset(self): fake_replica_1, expected_replica_1 = self._get_fake_replica() fake_replica_2, expected_replica_2 = self._get_fake_replica( id="fake_id2") self.mock_object( share_replicas.db, 'share_replicas_get_all_by_share', mock.Mock(return_value=[fake_replica_1, fake_replica_2])) req = fakes.HTTPRequest.blank( '/share-replicas?share_id=FAKE_SHARE_ID&limit=1&offset=1', version=self.api_version, experimental=True) req_context = req.environ['manila.context'] res_dict = self.controller.detail(req) self.assertEqual(1, len(res_dict['share_replicas'])) self.assertEqual([expected_replica_2], res_dict['share_replicas']) self.mock_policy_check.assert_called_once_with( req_context, self.resource_name, 'get_all') @ddt.data((True, PRE_GRADUATION_VERSION), (False, GRADUATION_VERSION)) @ddt.unpack def test_show(self, is_admin, microversion): fake_replica, expected_replica = self._get_fake_replica( admin=is_admin, microversion=microversion) self.mock_object( share_replicas.db, 'share_replica_get', mock.Mock(return_value=fake_replica)) req = self._get_request(microversion, is_admin) req_context = req.environ['manila.context'] res_dict = self.controller.show(req, fake_replica.get('id')) self.assertEqual(expected_replica, res_dict['share_replica']) self.mock_policy_check.assert_called_once_with( req_context, self.resource_name, 'show') def test_show_no_replica(self): mock__view_builder_call = self.mock_object( share_replicas.replication_view.ReplicationViewBuilder, 'detail') fake_exception = exception.ShareReplicaNotFound( replica_id='FAKE_REPLICA_ID') self.mock_object(share_replicas.db, 'share_replica_get', mock.Mock( side_effect=fake_exception)) self.assertRaises(exc.HTTPNotFound, self.controller.show, self.replicas_req, 'FAKE_REPLICA_ID') self.assertFalse(mock__view_builder_call.called) self.mock_policy_check.assert_called_once_with( self.member_context, self.resource_name, 'show') def test_create_invalid_body(self): body = {} mock__view_builder_call = self.mock_object( share_replicas.replication_view.ReplicationViewBuilder, 'detail_list') self.assertRaises(exc.HTTPUnprocessableEntity, self.controller.create, self.replicas_req, body) self.assertEqual(0, mock__view_builder_call.call_count) self.mock_policy_check.assert_called_once_with( self.member_context, self.resource_name, 'create') def test_create_no_share_id(self): body = { 'share_replica': { 'share_id': None, 'availability_zone': None, } } mock__view_builder_call = self.mock_object( share_replicas.replication_view.ReplicationViewBuilder, 'detail_list') self.assertRaises(exc.HTTPBadRequest, self.controller.create, self.replicas_req, body) self.assertFalse(mock__view_builder_call.called) self.mock_policy_check.assert_called_once_with( self.member_context, self.resource_name, 'create') def test_create_invalid_share_id(self): body = { 'share_replica': { 'share_id': 'FAKE_SHAREID', 'availability_zone': 'FAKE_AZ' } } mock__view_builder_call = self.mock_object( share_replicas.replication_view.ReplicationViewBuilder, 'detail_list') self.mock_object(share_replicas.db, 'share_get', mock.Mock(side_effect=exception.NotFound)) self.assertRaises(exc.HTTPNotFound, self.controller.create, self.replicas_req, body) self.assertFalse(mock__view_builder_call.called) self.mock_policy_check.assert_called_once_with( self.member_context, self.resource_name, 'create') def test_create_has_been_soft_deleted(self): share_ref = fake_share.fake_share(is_soft_deleted=True) body = { 'share_replica': { 'share_id': 'FAKE_SHAREID', 'availability_zone': 'FAKE_AZ' } } mock__view_builder_call = self.mock_object( share_replicas.replication_view.ReplicationViewBuilder, 'detail_list') self.mock_object(share_replicas.db, 'share_get', mock.Mock(return_value=share_ref)) self.assertRaises(exc.HTTPForbidden, self.controller.create, self.replicas_req, body) self.assertFalse(mock__view_builder_call.called) self.mock_policy_check.assert_called_once_with( self.member_context, self.resource_name, 'create') def test_create_has_been_encrypted(self): share_ref = fake_share.fake_share(encryption_key_ref='fake') body = { 'share_replica': { 'share_id': 'FAKE_SHAREID', 'availability_zone': 'FAKE_AZ' } } mock__view_builder_call = self.mock_object( share_replicas.replication_view.ReplicationViewBuilder, 'detail_list') self.mock_object(share_replicas.db, 'share_get', mock.Mock(return_value=share_ref)) self.assertRaises(exc.HTTPForbidden, self.controller.create, self.replicas_req, body) self.assertFalse(mock__view_builder_call.called) self.mock_policy_check.assert_called_once_with( self.member_context, self.resource_name, 'create') def test_create_invalid_network_id(self): fake_replica, _ = self._get_fake_replica( replication_type='writable') req = self._get_request("2.72", False) req_context = req.environ['manila.context'] body = { 'share_replica': { 'share_id': 'FAKE_SHAREID', 'availability_zone': 'FAKE_AZ', 'share_network_id': 'FAKE_NETID' } } mock__view_builder_call = self.mock_object( share_replicas.replication_view.ReplicationViewBuilder, 'detail_list') self.mock_object(share_replicas.db, 'share_get', mock.Mock(return_value=fake_replica)) self.mock_object(share_replicas.db, 'share_network_get', mock.Mock(side_effect=exception.ShareNetworkNotFound( share_network_id='FAKE_NETID'))) self.assertRaises(exc.HTTPBadRequest, self.controller.create, req, body) self.assertFalse(mock__view_builder_call.called) self.mock_policy_check.assert_called_once_with( req_context, self.resource_name, 'create') @ddt.data(exception.AvailabilityZoneNotFound, exception.ReplicationException, exception.ShareBusyException) def test_create_exception_path(self, exception_type): fake_replica, _ = self._get_fake_replica( replication_type='writable') mock__view_builder_call = self.mock_object( share_replicas.replication_view.ReplicationViewBuilder, 'detail_list') share_network = db_utils.create_share_network() body = { 'share_replica': { 'share_id': 'FAKE_SHAREID', 'availability_zone': 'FAKE_AZ' } } exc_args = {'id': 'xyz', 'reason': 'abc'} self.mock_object(share_replicas.db, 'share_get', mock.Mock(return_value=fake_replica)) self.mock_object(share.API, 'create_share_replica', mock.Mock(side_effect=exception_type(**exc_args))) self.mock_object(share_replicas.db, 'share_network_get', mock.Mock(return_value=share_network)) self.mock_object(common, 'check_share_network_is_active', mock.Mock(return_value=True)) self.assertRaises(exc.HTTPBadRequest, self.controller.create, self.replicas_req, body) self.assertFalse(mock__view_builder_call.called) self.mock_policy_check.assert_called_once_with( self.member_context, self.resource_name, 'create') share_replicas.db.share_network_get.assert_called_once_with( self.member_context, fake_replica['share_network_id']) common.check_share_network_is_active.assert_called_once_with( share_network) @ddt.data((True, PRE_GRADUATION_VERSION), (False, GRADUATION_VERSION), (False, "2.72")) @ddt.unpack def test_create(self, is_admin, microversion): fake_replica, expected_replica = self._get_fake_replica( replication_type='writable', admin=is_admin, microversion=microversion) body = { 'share_replica': { 'share_id': 'FAKE_SHAREID', 'availability_zone': 'FAKE_AZ' } } if self.is_microversion_ge(microversion, '2.72'): body["share_replica"].update({"share_network_id": 'FAKE_NETID'}) share_network = {'id': 'FAKE_NETID'} else: share_network = db_utils.create_share_network() self.mock_object(share_replicas.db, 'share_get', mock.Mock(return_value=fake_replica)) self.mock_object(share.API, 'create_share_replica', mock.Mock(return_value=fake_replica)) self.mock_object(share_replicas.db, 'share_replicas_get_available_active_replica', mock.Mock(return_value=[{'id': 'active1'}])) self.mock_object(share_replicas.db, 'share_network_get', mock.Mock(return_value=share_network)) self.mock_object(common, 'check_share_network_is_active', mock.Mock(return_value=True)) req = self._get_request(microversion, is_admin) req_context = req.environ['manila.context'] res_dict = self.controller.create(req, body) self.assertEqual(expected_replica, res_dict['share_replica']) self.mock_policy_check.assert_called_once_with( req_context, self.resource_name, 'create') if self.is_microversion_ge(microversion, '2.72'): share_replicas.db.share_network_get.assert_called_once_with( req_context, 'FAKE_NETID') else: share_replicas.db.share_network_get.assert_called_once_with( req_context, fake_replica['share_network_id']) common.check_share_network_is_active.assert_called_once_with( share_network) def test_delete_invalid_replica(self): fake_exception = exception.ShareReplicaNotFound( replica_id='FAKE_REPLICA_ID') self.mock_object(share_replicas.db, 'share_replica_get', mock.Mock(side_effect=fake_exception)) mock_delete_replica_call = self.mock_object( share.API, 'delete_share_replica') self.assertRaises( exc.HTTPNotFound, self.controller.delete, self.replicas_req, 'FAKE_REPLICA_ID') self.assertFalse(mock_delete_replica_call.called) self.mock_policy_check.assert_called_once_with( self.member_context, self.resource_name, 'delete') def test_delete_exception(self): fake_replica_1 = self._get_fake_replica( share_id='FAKE_SHARE_ID', replica_state=constants.REPLICA_STATE_ACTIVE)[0] fake_replica_2 = self._get_fake_replica( share_id='FAKE_SHARE_ID', replica_state=constants.REPLICA_STATE_ACTIVE)[0] exception_type = exception.ReplicationException(reason='xyz') self.mock_object(share_replicas.db, 'share_replica_get', mock.Mock(return_value=fake_replica_1)) self.mock_object( share_replicas.db, 'share_replicas_get_all_by_share', mock.Mock(return_value=[fake_replica_1, fake_replica_2])) self.mock_object(share.API, 'delete_share_replica', mock.Mock(side_effect=exception_type)) self.assertRaises(exc.HTTPBadRequest, self.controller.delete, self.replicas_req, 'FAKE_REPLICA_ID') self.mock_policy_check.assert_called_once_with( self.member_context, self.resource_name, 'delete') @ddt.data(PRE_GRADUATION_VERSION, GRADUATION_VERSION) def test_delete(self, microversion): fake_replica = self._get_fake_replica( share_id='FAKE_SHARE_ID', replica_state=constants.REPLICA_STATE_ACTIVE)[0] req = self._get_request(microversion=microversion) context = req.environ['manila.context'] self.mock_object(share_replicas.db, 'share_replica_get', mock.Mock(return_value=fake_replica)) self.mock_object(share.API, 'delete_share_replica') resp = self.controller.delete( req, 'FAKE_REPLICA_ID') self.assertEqual(202, resp.status_code) self.mock_policy_check.assert_called_once_with( context, self.resource_name, 'delete') def test_promote_invalid_replica_id(self): body = {'promote': None} fake_exception = exception.ShareReplicaNotFound( replica_id='FAKE_REPLICA_ID') self.mock_object(share_replicas.db, 'share_replica_get', mock.Mock(side_effect=fake_exception)) self.assertRaises(exc.HTTPNotFound, self.controller.promote, self.replicas_req, 'FAKE_REPLICA_ID', body) self.mock_policy_check.assert_called_once_with( self.member_context, self.resource_name, 'promote') def test_promote_already_active(self): body = {'promote': None} replica, expected_replica = self._get_fake_replica( replica_state=constants.REPLICA_STATE_ACTIVE) self.mock_object(share_replicas.db, 'share_replica_get', mock.Mock(return_value=replica)) self.mock_object(share_replicas.db, 'share_network_get', mock.Mock(return_value=self.fake_share_network)) mock_api_promote_replica_call = self.mock_object( share.API, 'promote_share_replica') resp = self.controller.promote(self.replicas_req, replica['id'], body) self.assertEqual(200, resp.status_code) self.assertFalse(mock_api_promote_replica_call.called) self.mock_policy_check.assert_called_once_with( self.member_context, self.resource_name, 'promote') def test_promote_replication_exception(self): body = {'promote': None} replica, expected_replica = self._get_fake_replica( replica_state=constants.REPLICA_STATE_IN_SYNC) exception_type = exception.ReplicationException(reason='xyz') self.mock_object(share_replicas.db, 'share_replica_get', mock.Mock(return_value=replica)) self.mock_object(share_replicas.db, 'share_network_get', mock.Mock(return_value=self.fake_share_network)) mock_api_promote_replica_call = self.mock_object( share.API, 'promote_share_replica', mock.Mock(side_effect=exception_type)) self.assertRaises(exc.HTTPBadRequest, self.controller.promote, self.replicas_req, replica['id'], body) self.assertTrue(mock_api_promote_replica_call.called) self.mock_policy_check.assert_called_once_with( self.member_context, self.resource_name, 'promote') def test_promote_share_network_not_active(self): body = {'promote': None} replica, expected_replica = self._get_fake_replica( replica_state=constants.REPLICA_STATE_IN_SYNC) fake_share_network = copy.deepcopy(self.fake_share_network) fake_share_network['status'] = constants.STATUS_NETWORK_CHANGE self.mock_object(share_replicas.db, 'share_replica_get', mock.Mock(return_value=replica)) self.mock_object(share_replicas.db, 'share_network_get', mock.Mock(return_value=fake_share_network)) self.assertRaises(exc.HTTPBadRequest, self.controller.promote, self.replicas_req, replica['id'], body) self.mock_policy_check.assert_called_once_with( self.member_context, self.resource_name, 'promote') def test_promote_admin_required_exception(self): body = {'promote': None} replica, expected_replica = self._get_fake_replica( replica_state=constants.REPLICA_STATE_IN_SYNC) self.mock_object(share_replicas.db, 'share_replica_get', mock.Mock(return_value=replica)) self.mock_object(share_replicas.db, 'share_network_get', mock.Mock(return_value=self.fake_share_network)) mock_api_promote_replica_call = self.mock_object( share.API, 'promote_share_replica', mock.Mock(side_effect=exception.AdminRequired)) self.assertRaises(exc.HTTPForbidden, self.controller.promote, self.replicas_req, replica['id'], body) self.assertTrue(mock_api_promote_replica_call.called) self.mock_policy_check.assert_called_once_with( self.member_context, self.resource_name, 'promote') @ddt.data(PRE_GRADUATION_VERSION, GRADUATION_VERSION) def test_promote(self, microversion): body = {'promote': None} replica, expected_replica = self._get_fake_replica( replica_state=constants.REPLICA_STATE_IN_SYNC, microversion=microversion) self.mock_object(share_replicas.db, 'share_replica_get', mock.Mock(return_value=replica)) self.mock_object(share_replicas.db, 'share_network_get', mock.Mock(return_value=self.fake_share_network)) mock_api_promote_replica_call = self.mock_object( share.API, 'promote_share_replica', mock.Mock(return_value=replica)) req = self._get_request(microversion=microversion) context = req.environ['manila.context'] resp = self.controller.promote(req, replica['id'], body) self.assertEqual(expected_replica, resp['share_replica']) self.assertTrue(mock_api_promote_replica_call.called) self.mock_policy_check.assert_called_once_with( context, self.resource_name, 'promote') @ddt.data(('2.74', None), (PROMOTE_QUIESCE_WAIT_VERSION, None), (PROMOTE_QUIESCE_WAIT_VERSION, 10), (PROMOTE_QUIESCE_WAIT_VERSION, 'foobar'), ) @ddt.unpack def test_promote_quiesce_wait_time(self, microversion, time): body = {'promote': {'quiesce_wait_time': time}} replica, expected_replica = self._get_fake_replica( replica_state=constants.REPLICA_STATE_IN_SYNC, microversion=microversion) self.mock_object(share_replicas.db, 'share_replica_get', mock.Mock(return_value=replica)) self.mock_object(share_replicas.db, 'share_network_get', mock.Mock(return_value=self.fake_share_network)) req = self._get_request(microversion=microversion) allow_quiesce_wait_time = False if (api_version.APIVersionRequest(microversion) >= api_version.APIVersionRequest(PROMOTE_QUIESCE_WAIT_VERSION)): allow_quiesce_wait_time = True if time and allow_quiesce_wait_time: if strutils.is_int_like(time): mock_api_promote_replica_call = self.mock_object( share.API, 'promote_share_replica', mock.Mock(return_value=replica)) resp = self.controller.promote(req, replica['id'], body) self.assertEqual(expected_replica, resp['share_replica']) self.assertTrue(mock_api_promote_replica_call.called) else: self.assertRaises(exc.HTTPBadRequest, self.controller.promote, req, replica['id'], body) @ddt.data('index', 'detail', '_show', '_create', '_delete_share_replica', '_promote', 'reset_replica_state', 'reset_status', '_resync') def test_policy_not_authorized(self, method_name): method = getattr(self.controller, method_name) arguments = { 'id': 'FAKE_REPLICA_ID', 'body': {'FAKE_KEY': 'FAKE_VAL'}, } if method_name in ('index', 'detail'): arguments.clear() noauthexc = exception.PolicyNotAuthorized(action=method_name) with mock.patch.object( policy, 'check_policy', mock.Mock(side_effect=noauthexc)): self.assertRaises( exc.HTTPForbidden, method, self.replicas_req, **arguments) @ddt.data('index', 'detail', 'show', 'create', 'delete', 'promote', 'reset_replica_state', 'reset_status', 'resync') def test_upsupported_microversion(self, method_name): unsupported_microversions = ('1.0', '2.2', '2.10') method = getattr(self.controller, method_name) arguments = { 'id': 'FAKE_REPLICA_ID', 'body': {'FAKE_KEY': 'FAKE_VAL'}, } if method_name in ('index', 'detail'): arguments.clear() for microversion in unsupported_microversions: req = fakes.HTTPRequest.blank( '/share-replicas', version=microversion, experimental=True) self.assertRaises(exception.VersionNotFoundForAPIMethod, method, req, **arguments) def _reset_status(self, context, replica, req, valid_code=202, status_attr='status', valid_status=None, body=None): if status_attr == 'status': action_name = 'reset_status' body = body or {action_name: {'status': constants.STATUS_ERROR}} else: action_name = 'reset_replica_state' body = body or { action_name: {'replica_state': constants.STATUS_ERROR}, } req.body = jsonutils.dumps(body).encode("utf-8") req.environ['manila.context'] = context with mock.patch.object( policy, 'check_policy', fakes.mock_fake_admin_check): resp = req.get_response(fakes.app()) # validate response code and model status self.assertEqual(valid_code, resp.status_int) actual_replica = share_replicas.db.share_replica_get( context, replica['id']) self.assertEqual(valid_status, actual_replica[status_attr]) @ddt.data(*fakes.fixture_reset_replica_status_with_different_roles) @ddt.unpack def test_reset_status_with_different_roles(self, role, valid_code, valid_status, microversion): context = self._get_context(role) replica, action_req = self._create_replica_get_req( microversion=microversion) self._reset_status(context, replica, action_req, valid_code=valid_code, status_attr='status', valid_status=valid_status) @ddt.data( {'os-reset_status': {'x-status': 'bad'}}, {'os-reset_status': {'status': constants.STATUS_AVAILABLE}}, {'reset_status': {'x-status': 'bad'}}, {'reset_status': {'status': 'invalid'}}, ) def test_reset_status_invalid_body(self, body): replica, action_req = self._create_replica_get_req() self._reset_status(self.admin_context, replica, action_req, valid_code=400, status_attr='status', valid_status=constants.STATUS_AVAILABLE, body=body) @ddt.data(*fakes.fixture_reset_replica_state_with_different_roles) @ddt.unpack def test_reset_replica_state_with_different_roles( self, role, valid_code, valid_status, microversion): context = self._get_context(role) replica, action_req = self._create_replica_get_req( microversion=microversion) body = {'reset_replica_state': {'replica_state': valid_status}} self._reset_status(context, replica, action_req, valid_code=valid_code, status_attr='replica_state', valid_status=valid_status, body=body) def test_reset_replica_with_active_state(self): body = { 'reset_replica_state': { 'replica_state': constants.REPLICA_STATE_OUT_OF_SYNC, } } replica, action_req = self._create_replica_get_req( replica_state=constants.REPLICA_STATE_ACTIVE) self._reset_status(self.admin_context, replica, action_req, status_attr='replica_state', valid_code=400, valid_status=constants.REPLICA_STATE_ACTIVE, body=body) @ddt.data( {'os-reset_replica_state': {'x-replica_state': 'bad'}}, {'os-reset_replica_state': {'replica_state': constants.STATUS_ERROR}}, {'reset_replica_state': {'x-replica_state': 'bad'}}, {'reset_replica_state': {'replica_state': constants.STATUS_AVAILABLE}}, ) def test_reset_replica_state_invalid_body(self, body): replica, action_req = self._create_replica_get_req() self._reset_status(self.admin_context, replica, action_req, valid_code=400, status_attr='status', valid_status=constants.STATUS_AVAILABLE, body=body) def _force_delete(self, context, req, valid_code=202): body = {'force_delete': {}} req.environ['manila.context'] = context req.body = jsonutils.dumps(body).encode("utf-8") with mock.patch.object( policy, 'check_policy', fakes.mock_fake_admin_check): resp = req.get_response(fakes.app()) # validate response self.assertEqual(valid_code, resp.status_int) @ddt.data(*fakes.fixture_force_delete_with_different_roles) @ddt.unpack def test_force_delete_replica_with_different_roles(self, role, resp_code, version): replica, req = self._create_replica_get_req() context = self._get_context(role) self._force_delete(context, req, valid_code=resp_code) @ddt.data((PRE_GRADUATION_VERSION, 202), (GRADUATION_VERSION, 202)) @ddt.unpack def test_force_delete_replica(self, microversion, resp_code): replica, req = self._create_replica_get_req(microversion=microversion) context = self.admin_context self._force_delete(context, req, valid_code=resp_code) def test_force_delete_missing_replica(self): replica, req = self._create_replica_get_req() share_replicas.db.share_replica_delete( self.admin_context, replica['id'], need_to_update_usages=False) self._force_delete(self.admin_context, req, valid_code=404) def test_resync_replica_not_found(self): replica, req = self._create_replica_get_req() share_replicas.db.share_replica_delete( self.admin_context, replica['id'], need_to_update_usages=False) share_api_call = self.mock_object(self.controller.share_api, 'update_share_replica') body = {'resync': {}} req.body = jsonutils.dumps(body).encode("utf-8") req.environ['manila.context'] = self.admin_context with mock.patch.object( policy, 'check_policy', fakes.mock_fake_admin_check): resp = req.get_response(fakes.app()) self.assertEqual(404, resp.status_int) self.assertFalse(share_api_call.called) def test_resync_API_exception(self): replica, req = self._create_replica_get_req( replica_state=constants.REPLICA_STATE_OUT_OF_SYNC) self.mock_object(share_replicas.db, 'share_replica_get', mock.Mock(return_value=replica)) share_api_call = self.mock_object( share.API, 'update_share_replica', mock.Mock( side_effect=exception.InvalidHost(reason=''))) body = {'resync': None} req.body = jsonutils.dumps(body).encode("utf-8") req.environ['manila.context'] = self.admin_context with mock.patch.object( policy, 'check_policy', fakes.mock_fake_admin_check): resp = req.get_response(fakes.app()) self.assertEqual(400, resp.status_int) share_api_call.assert_called_once_with(self.admin_context, replica) @ddt.data((constants.REPLICA_STATE_ACTIVE, PRE_GRADUATION_VERSION), (constants.REPLICA_STATE_IN_SYNC, PRE_GRADUATION_VERSION), (constants.REPLICA_STATE_OUT_OF_SYNC, GRADUATION_VERSION), (constants.STATUS_ERROR, GRADUATION_VERSION)) @ddt.unpack def test_resync(self, replica_state, microversion): replica, req = self._create_replica_get_req( replica_state=replica_state, host='skywalker@jedi#temple', microversion=microversion) share_api_call = self.mock_object( share.API, 'update_share_replica', mock.Mock(return_value=None)) body = {'resync': {}} req.body = jsonutils.dumps(body).encode("utf-8") req.environ['manila.context'] = self.admin_context with mock.patch.object( policy, 'check_policy', fakes.mock_fake_admin_check): resp = req.get_response(fakes.app()) if replica_state == constants.REPLICA_STATE_ACTIVE: self.assertEqual(200, resp.status_int) self.assertFalse(share_api_call.called) else: self.assertEqual(202, resp.status_int) self.assertTrue(share_api_call.called) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/api/v2/test_share_servers.py0000664000175000017500000015076200000000000023060 0ustar00zuulzuul00000000000000# Copyright 2019 NetApp, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import ddt import webob from manila.api import common from manila.api.v2 import share_servers from manila.common import constants from manila import context as ctx_api from manila.db import api as db_api from manila import exception from manila import policy from manila.share import api as share_api from manila import test from manila.tests.api import fakes from manila.tests import db_utils from manila import utils @ddt.ddt class ShareServerControllerTest(test.TestCase): """Share server api test""" def setUp(self): super(ShareServerControllerTest, self).setUp() self.mock_policy_check = self.mock_object( policy, 'check_policy', mock.Mock(return_value=True)) self.controller = share_servers.ShareServerController() self.resource_name = self.controller.resource_name @ddt.data(constants.STATUS_ACTIVE, constants.STATUS_ERROR, constants.STATUS_DELETING, constants.STATUS_CREATING, constants.STATUS_MANAGING, constants.STATUS_UNMANAGING, constants.STATUS_UNMANAGE_ERROR, constants.STATUS_MANAGE_ERROR) def test_share_server_reset_status(self, status): req = fakes.HTTPRequest.blank('/v2/share-servers/fake-share-server/', use_admin_context=True, version="2.49") body = {'reset_status': {'status': status}} context = req.environ['manila.context'] self.mock_object(self.controller, '_get', mock.Mock( return_value={'share_server': 'object'})) mock_update = self.mock_object(db_api, 'share_server_update') result = self.controller.share_server_reset_status( req, 'fake_server_id', body) self.assertEqual(202, result.status_int) policy.check_policy.assert_has_calls([ mock.call(context, self.resource_name, 'reset_status'), mock.call(context, self.resource_name, 'reset_status', target_obj={'share_server': 'object'}), ]) mock_update.assert_called_once_with( context, 'fake_server_id', {'status': status}) def test_share_reset_server_status_invalid(self): req = fakes.HTTPRequest.blank('/reset_status', use_admin_context=True, version="2.49") body = {'reset_status': {'status': constants.STATUS_EXTENDING}} context = req.environ['manila.context'] self.assertRaises( webob.exc.HTTPBadRequest, self.controller.share_server_reset_status, req, id='fake_server_id', body=body) policy.check_policy.assert_called_once_with( context, self.resource_name, 'reset_status') def test_share_server_reset_status_no_body(self): req = fakes.HTTPRequest.blank('/reset_status', use_admin_context=True, version="2.49") context = req.environ['manila.context'] self.assertRaises( webob.exc.HTTPBadRequest, self.controller.share_server_reset_status, req, id='fake_server_id', body={}) policy.check_policy.assert_called_once_with( context, self.resource_name, 'reset_status') def test_share_server_reset_status_no_status(self): req = fakes.HTTPRequest.blank('/reset_status', use_admin_context=True, version="2.49") context = req.environ['manila.context'] self.assertRaises( webob.exc.HTTPBadRequest, self.controller.share_server_reset_status, req, id='fake_server_id', body={'reset_status': {}}) policy.check_policy.assert_called_once_with( context, self.resource_name, 'reset_status') def _setup_manage_test_request_body(self): body = { 'share_network_id': 'fake_net_id', 'share_network_subnet_id': 'fake_subnet_id', 'host': 'fake_host', 'identifier': 'fake_identifier', 'driver_options': {'opt1': 'fake_opt1', 'opt2': 'fake_opt2'}, } return body @ddt.data('fake_net_name', '') def test_manage(self, share_net_name): """Tests share server manage""" req = fakes.HTTPRequest.blank('/v2/share-servers/', use_admin_context=True, version="2.49") context = req.environ['manila.context'] share_network = db_utils.create_share_network(name=share_net_name) share_net_subnet = [db_utils.create_share_network_subnet( share_network_id=share_network['id'])] share_server = db_utils.create_share_server( share_network_subnet_id=share_net_subnet[0]['id'], host='fake_host', identifier='fake_identifier', is_auto_deletable=False, share_network_subnets=share_net_subnet) self.mock_object(db_api, 'share_network_get', mock.Mock( return_value=share_network)) self.mock_object(db_api, 'share_network_subnet_get_default_subnets', mock.Mock(return_value=share_net_subnet)) self.mock_object(utils, 'validate_service_host') body = { 'share_server': self._setup_manage_test_request_body() } manage_share_server_mock = self.mock_object( share_api.API, 'manage_share_server', mock.Mock(return_value=share_server)) result = self.controller.manage(req, body) expected_result = { 'share_server': { 'id': share_server['id'], 'project_id': 'fake', 'updated_at': share_server['updated_at'], 'status': constants.STATUS_ACTIVE, 'host': 'fake_host', 'share_network_id': (share_server['share_network_subnets'][0] ['share_network_id']), 'created_at': share_server['created_at'], 'backend_details': {}, 'identifier': share_server['identifier'], 'is_auto_deletable': share_server['is_auto_deletable'], } } if share_net_name != '': expected_result['share_server']['share_network_name'] = ( 'fake_net_name') else: expected_result['share_server']['share_network_name'] = ( share_net_subnet[0]['share_network_id']) req_params = body['share_server'] manage_share_server_mock.assert_called_once_with( context, req_params['identifier'], req_params['host'], share_net_subnet[0], req_params['driver_options']) self.assertEqual(expected_result, result) self.mock_policy_check.assert_called_once_with( context, self.resource_name, 'manage_share_server') def test_manage_invalid(self): req = fakes.HTTPRequest.blank('/manage_share_server', use_admin_context=True, version="2.49") share_network = db_utils.create_share_network() share_net_subnet = [db_utils.create_share_network_subnet( share_network_id=share_network['id'])] body = { 'share_server': self._setup_manage_test_request_body() } body['share_server']['driver_options'] = [] self.mock_object(utils, 'validate_service_host') self.mock_object(db_api, 'share_network_get', mock.Mock(return_value=share_network)) self.mock_object(db_api, 'share_network_subnet_get_default_subnets', mock.Mock(return_value=share_net_subnet)) self.assertRaises(webob.exc.HTTPBadRequest, self.controller.manage, req, body) def test_manage_forbidden(self): """Tests share server manage without admin privileges""" req = fakes.HTTPRequest.blank('/manage_share_server', version="2.49") error = mock.Mock(side_effect=exception.PolicyNotAuthorized(action='')) self.mock_object(share_api.API, 'manage_share_server', error) share_network = db_utils.create_share_network() share_net_subnet = [db_utils.create_share_network_subnet( share_network_id=share_network['id'])] self.mock_object(db_api, 'share_network_get', mock.Mock( return_value=share_network)) self.mock_object(db_api, 'share_network_subnet_get_default_subnets', mock.Mock(return_value=share_net_subnet)) self.mock_object(utils, 'validate_service_host') body = { 'share_server': self._setup_manage_test_request_body() } self.assertRaises(webob.exc.HTTPForbidden, self.controller.manage, req, body) def test__validate_manage_share_server_validate_no_body(self): """Tests share server manage""" req = fakes.HTTPRequest.blank('/manage', version="2.49") body = {} self.assertRaises(webob.exc.HTTPUnprocessableEntity, self.controller.manage, req, body) @ddt.data({'empty': False, 'key': 'host'}, {'empty': False, 'key': 'share_network_id'}, {'empty': False, 'key': 'identifier'}, {'empty': True, 'key': 'host'}, {'empty': True, 'key': 'share_network_id'}, {'empty': True, 'key': 'identifier'}) @ddt.unpack def test__validate_manage_share_server_validate_without_parameters( self, empty, key): """Tests share server manage without some parameters""" req = fakes.HTTPRequest.blank('/manage_share_server', version="2.49") self.mock_object(share_api.API, 'manage_share_server', mock.Mock()) body = { 'share_server': self._setup_manage_test_request_body(), } if empty: body['share_server'][key] = None else: body['share_server'].pop(key) self.assertRaises(webob.exc.HTTPBadRequest, self.controller.manage, req, body) @ddt.data( (webob.exc.HTTPBadRequest, exception.ServiceNotFound('foobar')), (webob.exc.HTTPBadRequest, exception.ServiceIsDown('foobar')), (webob.exc.HTTPForbidden, exception.PolicyNotAuthorized('foobar')), (webob.exc.HTTPForbidden, exception.AdminRequired()) ) @ddt.unpack def test__validate_manage_share_server_validate_service_host( self, exception_to_raise, side_effect_exception): req = fakes.HTTPRequest.blank('/manage', version="2.49") context = req.environ['manila.context'] error = mock.Mock(side_effect=side_effect_exception) self.mock_object(utils, 'validate_service_host', error) share_network = db_utils.create_share_network() share_net_subnet = [db_utils.create_share_network_subnet( share_network_id=share_network['id'])] self.mock_object(db_api, 'share_network_get', mock.Mock( return_value=share_network)) self.mock_object(db_api, 'share_network_subnet_get_default_subnets', mock.Mock(return_value=share_net_subnet)) self.mock_object(common, 'check_share_network_is_active', mock.Mock(return_value=True)) self.assertRaises( exception_to_raise, self.controller.manage, req, {'share_server': self._setup_manage_test_request_body()}) common.check_share_network_is_active.assert_called_once_with( share_net_subnet[0]['share_network']) policy.check_policy.assert_called_once_with( context, self.resource_name, 'manage_share_server') def test__validate_manage_share_network_not_active(self): req = fakes.HTTPRequest.blank('/manage', version="2.49") context = req.environ['manila.context'] share_network = db_utils.create_share_network() share_net_subnet = [db_utils.create_share_network_subnet( share_network_id=share_network['id'])] self.mock_object(db_api, 'share_network_get', mock.Mock( return_value=share_network)) self.mock_object(db_api, 'share_network_subnet_get_default_subnets', mock.Mock(return_value=share_net_subnet)) self.mock_object(utils, 'validate_service_host') self.mock_object(common, 'check_share_network_is_active', mock.Mock(side_effect=webob.exc.HTTPBadRequest())) self.assertRaises( webob.exc.HTTPBadRequest, self.controller.manage, req, {'share_server': self._setup_manage_test_request_body()}) common.check_share_network_is_active.assert_called_once_with( share_net_subnet[0]['share_network']) policy.check_policy.assert_called_once_with( context, self.resource_name, 'manage_share_server') def test__validate_manage_share_server_share_network_not_found(self): req = fakes.HTTPRequest.blank('/manage', version="2.49") context = req.environ['manila.context'] self.mock_object(utils, 'validate_service_host') error = mock.Mock( side_effect=exception.ShareNetworkNotFound(share_network_id="foo")) self.mock_object(db_api, 'share_network_get', error) body = self._setup_manage_test_request_body() self.assertRaises(webob.exc.HTTPBadRequest, self.controller.manage, req, {'share_server': body}) policy.check_policy.assert_called_once_with( context, self.resource_name, 'manage_share_server') def test__validate_manage_share_server_driver_opts_not_instance_dict(self): req = fakes.HTTPRequest.blank('/manage', version="2.49") context = req.environ['manila.context'] self.mock_object(utils, 'validate_service_host') self.mock_object(db_api, 'share_network_get') body = self._setup_manage_test_request_body() body['driver_options'] = 'incorrect' self.assertRaises(webob.exc.HTTPBadRequest, self.controller.manage, req, {'share_server': body}) policy.check_policy.assert_called_once_with( context, self.resource_name, 'manage_share_server') def test__validate_manage_share_server_error_extract_host(self): req = fakes.HTTPRequest.blank('/manage', version="2.49") context = req.environ['manila.context'] body = self._setup_manage_test_request_body() body['host'] = 'fake@backend#pool' self.assertRaises(webob.exc.HTTPBadRequest, self.controller.manage, req, {'share_server': body}) policy.check_policy.assert_called_once_with( context, self.resource_name, 'manage_share_server') @ddt.data(True, False) def test__validate_manage_share_server_error_subnet_not_found( self, body_contains_subnet): req = fakes.HTTPRequest.blank('/manage', version="2.51") context = req.environ['manila.context'] share_network = db_utils.create_share_network() body = {'share_server': self._setup_manage_test_request_body()} share_net_subnet = [db_utils.create_share_network_subnet( share_network_id=share_network['id'])] body['share_server']['share_network_subnet_id'] = ( share_net_subnet[0]['id'] if body_contains_subnet else None) self.mock_object( db_api, 'share_network_subnet_get_all_with_same_az', mock.Mock(side_effect=exception.ShareNetworkSubnetNotFound( share_network_subnet_id='fake'))) self.mock_object(db_api, 'share_network_subnet_get_default_subnets', mock.Mock(return_value=None)) self.assertRaises(webob.exc.HTTPBadRequest, self.controller.manage, req, body) policy.check_policy.assert_called_once_with( context, self.resource_name, 'manage_share_server') if body_contains_subnet: (db_api.share_network_subnet_get_all_with_same_az. assert_called_once_with(context, share_net_subnet[0]['id'])) else: (db_api.share_network_subnet_get_default_subnets .assert_called_once_with( context, body['share_server']['share_network_id'])) @ddt.data(True, False) def test__validate_manage_share_server_error_multiple_subnet( self, body_contains_subnet): req = fakes.HTTPRequest.blank('/manage', version="2.70") context = req.environ['manila.context'] share_network = db_utils.create_share_network() body = {'share_server': self._setup_manage_test_request_body()} share_net_subnets = [ db_utils.create_share_network_subnet( share_network_id=share_network['id']), db_utils.create_share_network_subnet( share_network_id=share_network['id'], id='fake_sns_id_2'), ] body['share_server']['share_network_subnet_id'] = ( share_net_subnets[0]['id'] if body_contains_subnet else None) self.mock_object( db_api, 'share_network_subnet_get_all_with_same_az', mock.Mock(return_value=share_net_subnets)) self.mock_object(db_api, 'share_network_subnet_get_default_subnets', mock.Mock(return_value=share_net_subnets)) self.assertRaises(webob.exc.HTTPBadRequest, self.controller.manage, req, body) policy.check_policy.assert_called_once_with( context, self.resource_name, 'manage_share_server') if body_contains_subnet: (db_api.share_network_subnet_get_all_with_same_az. assert_called_once_with(context, share_net_subnets[0]['id'])) else: (db_api.share_network_subnet_get_default_subnets .assert_called_once_with( context, body['share_server']['share_network_id'])) @ddt.data(True, False) def test_unmanage(self, force): server = self._setup_unmanage_tests() req = fakes.HTTPRequest.blank('/unmanage', version="2.49") context = req.environ['manila.context'] mock_get = self.mock_object( db_api, 'share_server_get', mock.Mock(return_value=server)) mock_unmanage = self.mock_object( share_api.API, 'unmanage_share_server', mock.Mock(return_value=202)) body = {'unmanage': {'force': force}} resp = self.controller.unmanage(req, server['id'], body) self.assertEqual(202, resp.status_int) mock_get.assert_called_once_with(context, server['id']) mock_unmanage.assert_called_once_with(context, server, force=force) def test_unmanage_share_server_not_found(self): """Tests unmanaging share servers""" req = fakes.HTTPRequest.blank('/v2/share-servers/fake_server_id/', version="2.49") context = req.environ['manila.context'] share_server_error = mock.Mock( side_effect=exception.ShareServerNotFound( share_server_id='fake_server_id')) get_mock = self.mock_object( db_api, 'share_server_get', share_server_error) body = {'unmanage': {'force': True}} self.assertRaises(webob.exc.HTTPNotFound, self.controller.unmanage, req, 'fake_server_id', body) get_mock.assert_called_once_with(context, 'fake_server_id') def test_unmanage_share_server_multiple_subnets_fail(self): """Tests unmanaging share servers""" server = self._setup_unmanage_tests(multiple_subnets=True) get_mock = self.mock_object(db_api, 'share_server_get', mock.Mock(return_value=server)) req = fakes.HTTPRequest.blank('/unmanage_share_server', version="2.70") context = req.environ['manila.context'] body = {'unmanage': {'force': True}} self.assertRaises(webob.exc.HTTPBadRequest, self.controller.unmanage, req, server['id'], body) get_mock.assert_called_once_with(context, server['id']) @ddt.data(constants.STATUS_MANAGING, constants.STATUS_DELETING, constants.STATUS_CREATING, constants.STATUS_UNMANAGING) def test_unmanage_share_server_invalid_statuses(self, status): """Tests unmanaging share servers""" server = self._setup_unmanage_tests(status=status) get_mock = self.mock_object(db_api, 'share_server_get', mock.Mock(return_value=server)) req = fakes.HTTPRequest.blank('/unmanage_share_server', version="2.49") context = req.environ['manila.context'] body = {'unmanage': {'force': True}} self.assertRaises(webob.exc.HTTPBadRequest, self.controller.unmanage, req, server['id'], body) get_mock.assert_called_once_with(context, server['id']) def _setup_unmanage_tests(self, status=constants.STATUS_ACTIVE, multiple_subnets=False): share_network = db_utils.create_share_network() network_subnets = [db_utils.create_share_network_subnet( id='fake_sns_id', share_network_id=share_network['id'])] if multiple_subnets: share_network1 = db_utils.create_share_network() network_subnets.append(db_utils.create_share_network_subnet( share_network_id=share_network1['id'], id='fake_sns_id_2')) server = db_utils.create_share_server( id='fake_server_id', status=status, share_network_subnets=network_subnets) self.mock_object(db_api, 'share_server_get', mock.Mock(return_value=server)) self.mock_object(db_api, 'share_network_get', mock.Mock(return_value=share_network)) self.mock_object(db_api, 'share_network_subnet_get', mock.Mock(return_value=network_subnets)) return server @ddt.data(exception.ShareServerInUse, exception.PolicyNotAuthorized) def test_unmanage_share_server_badrequest(self, exc): req = fakes.HTTPRequest.blank('/unmanage', version="2.49") server = self._setup_unmanage_tests() context = req.environ['manila.context'] error = mock.Mock(side_effect=exc('foobar')) mock_unmanage = self.mock_object( share_api.API, 'unmanage_share_server', error) self.mock_object(common, 'check_share_network_is_active', mock.Mock(return_value=True)) body = {'unmanage': {'force': True}} self.assertRaises(webob.exc.HTTPBadRequest, self.controller.unmanage, req, 'fake_server_id', body) mock_unmanage.assert_called_once_with(context, server, force=True) db_api.share_network_get.assert_called() common.check_share_network_is_active.assert_called() policy.check_policy.assert_called_once_with( context, self.resource_name, 'unmanage_share_server') def test_unmanage_share_server_network_not_active(self): """Tests unmanaging share servers""" req = fakes.HTTPRequest.blank( '/v2/share-servers/fake_server_id/', version="2.63") context = req.environ['manila.context'] share_server = db_utils.create_share_server() network_subnets = [db_utils.create_share_network_subnet()] share_server['share_network_subnets'] = network_subnets share_network = db_utils.create_share_network() get_mock = self.mock_object( db_api, 'share_server_get', mock.Mock(return_value=share_server)) get_network_mock = self.mock_object( db_api, 'share_network_get', mock.Mock(return_value=share_network)) is_active_mock = self.mock_object( common, 'check_share_network_is_active', mock.Mock(side_effect=webob.exc.HTTPBadRequest())) body = {'unmanage': {'force': True}} self.assertRaises(webob.exc.HTTPBadRequest, self.controller.unmanage, req, 'fake_server_id', body) get_mock.assert_called_once_with(context, 'fake_server_id') get_network_mock.assert_called_once_with( context, share_server['share_network_subnets'][0]['share_network_id']) is_active_mock.assert_called_once_with(share_network) def _get_server_migration_request(self, server_id, version='2.57'): req = fakes.HTTPRequest.blank( '/share-servers/%s/action' % server_id, use_admin_context=True, version=version) req.method = 'POST' req.headers['content-type'] = 'application/json' req.api_version_request.experimental = True return req def test__share_server_migration_start(self): server = db_utils.create_share_server(id='fake_server_id', status=constants.STATUS_ACTIVE) share_network = db_utils.create_share_network() req = self._get_server_migration_request(server['id']) context = req.environ['manila.context'] self.mock_object(db_api, 'share_network_get', mock.Mock( return_value=share_network)) self.mock_object(db_api, 'share_server_get', mock.Mock(return_value=server)) self.mock_object(common, 'check_share_network_is_active', mock.Mock(return_value=True)) self.mock_object(share_api.API, 'share_server_migration_start') body = { 'migration_start': { 'host': 'fake_host', 'preserve_snapshots': True, 'writable': True, 'nondisruptive': True, 'new_share_network_id': 'fake_net_id', } } self.controller.share_server_migration_start( req, server['id'], body) db_api.share_server_get.assert_called_once_with( context, server['id']) share_api.API.share_server_migration_start.assert_called_once_with( context, server, 'fake_host', True, True, True, new_share_network=share_network) db_api.share_network_get.assert_called_once_with( context, 'fake_net_id') common.check_share_network_is_active.assert_called_once_with( share_network) @ddt.data({'api_exception': exception.ServiceIsDown(service='fake_srv'), 'expected_exception': webob.exc.HTTPBadRequest}, {'api_exception': exception.InvalidShareServer( reason='fake_reason'), 'expected_exception': webob.exc.HTTPConflict}, {'api_exception': exception.InvalidInput(reason='fake_reason'), 'expected_exception': webob.exc.HTTPBadRequest}) @ddt.unpack def test__share_server_migration_start_conflict(self, api_exception, expected_exception): share_network = db_utils.create_share_network() share_network_subnet = [db_utils.create_share_network_subnet( share_network_id=share_network['id'])] server = db_utils.create_share_server( id='fake_server_id', status=constants.STATUS_ACTIVE, share_network_subnet_id=share_network_subnet[0]['id']) server['share_network_subnets'] = share_network_subnet req = self._get_server_migration_request(server['id']) context = req.environ['manila.context'] body = { 'migration_start': { 'host': 'fake_host', 'preserve_snapshots': True, 'writable': True, 'nondisruptive': True } } self.mock_object(share_api.API, 'share_server_migration_start', mock.Mock(side_effect=api_exception)) self.mock_object(db_api, 'share_server_get', mock.Mock(return_value=server)) self.mock_object(common, 'check_share_network_is_active', mock.Mock(return_value=True)) self.mock_object(db_api, 'share_network_get', mock.Mock(return_value=share_network)) self.assertRaises(expected_exception, self.controller.share_server_migration_start, req, server['id'], body) db_api.share_server_get.assert_called_once_with(context, server['id']) migration_start_params = body['migration_start'] common.check_share_network_is_active.assert_called_once_with( share_network) db_api.share_network_get.assert_called_once_with( context, share_network['id']) share_api.API.share_server_migration_start.assert_called_once_with( context, server, migration_start_params['host'], migration_start_params['writable'], migration_start_params['nondisruptive'], migration_start_params['preserve_snapshots'], new_share_network=None) @ddt.data('host', 'body') def test__share_server_migration_start_missing_mandatory(self, param): server = db_utils.create_share_server( id='fake_server_id', status=constants.STATUS_ACTIVE) req = self._get_server_migration_request(server['id']) context = req.environ['manila.context'] body = { 'migration_start': { 'host': 'fake_host', 'preserve_metadata': True, 'preserve_snapshots': True, 'writable': True, 'nondisruptive': True } } if param == 'body': body.pop('migration_start') else: body['migration_start'].pop(param) self.mock_object(share_api.API, 'share_server_migration_start') self.mock_object(db_api, 'share_server_get', mock.Mock(return_value=server)) self.assertRaises( webob.exc.HTTPBadRequest, getattr(self.controller, 'share_server_migration_start'), req, server['id'], body) db_api.share_server_get.assert_called_once_with(context, server['id']) @ddt.data('nondisruptive', 'writable', 'preserve_snapshots') def test__share_server_migration_start_non_boolean(self, param): server = db_utils.create_share_server( id='fake_server_id', status=constants.STATUS_ACTIVE) req = self._get_server_migration_request(server['id']) context = req.environ['manila.context'] body = { 'migration_start': { 'host': 'fake_host', 'preserve_snapshots': True, 'writable': True, 'nondisruptive': True } } body['migration_start'][param] = None self.mock_object(share_api.API, 'share_server_migration_start') self.mock_object(db_api, 'share_server_get', mock.Mock(return_value=server)) self.assertRaises( webob.exc.HTTPBadRequest, getattr(self.controller, 'share_server_migration_start'), req, server['id'], body) db_api.share_server_get.assert_called_once_with(context, server['id']) def test__share_server_migration_start_share_server_not_found(self): fake_id = 'fake_server_id' req = self._get_server_migration_request(fake_id) context = req.environ['manila.context'] body = {'migration_start': {'host': 'fake_host'}} self.mock_object(db_api, 'share_server_get', mock.Mock(side_effect=exception.ShareServerNotFound( share_server_id=fake_id))) self.assertRaises(webob.exc.HTTPNotFound, self.controller.share_server_migration_start, req, fake_id, body) db_api.share_server_get.assert_called_once_with(context, fake_id) def test__share_server_migration_start_new_share_network_not_found(self): server = db_utils.create_share_server( id='fake_server_id', status=constants.STATUS_ACTIVE) req = self._get_server_migration_request(server['id']) context = req.environ['manila.context'] body = { 'migration_start': { 'host': 'fake_host', 'preserve_metadata': True, 'preserve_snapshots': True, 'writable': True, 'nondisruptive': True, 'new_share_network_id': 'nonexistent'}} self.mock_object(db_api, 'share_network_get', mock.Mock(side_effect=exception.NotFound())) self.mock_object(db_api, 'share_server_get', mock.Mock(return_value=server)) self.assertRaises(webob.exc.HTTPBadRequest, self.controller.share_server_migration_start, req, server['id'], body) db_api.share_network_get.assert_called_once_with(context, 'nonexistent') db_api.share_server_get.assert_called_once_with(context, server['id']) def test__share_server_migration_start_host_with_pool(self): server = db_utils.create_share_server(id='fake_server_id', status=constants.STATUS_ACTIVE) req = self._get_server_migration_request(server['id']) body = { 'migration_start': { 'host': 'fake_host@fakebackend#pool', 'preserve_snapshots': True, 'writable': True, 'nondisruptive': True, 'new_share_network_id': 'fake_net_id', } } self.assertRaises(webob.exc.HTTPBadRequest, self.controller.share_server_migration_start, req, server['id'], body) def test_share_server_migration_check_host_with_pool(self): server = db_utils.create_share_server(id='fake_server_id', status=constants.STATUS_ACTIVE) req = self._get_server_migration_request(server['id']) body = { 'migration_start': { 'host': 'fake_host@fakebackend#pool', 'preserve_snapshots': True, 'writable': True, 'nondisruptive': True, 'new_share_network_id': 'fake_net_id', } } self.assertRaises(webob.exc.HTTPBadRequest, self.controller.share_server_migration_check, req, server['id'], body) @ddt.data(constants.TASK_STATE_MIGRATION_ERROR, None) def test_reset_task_state(self, task_state): server = db_utils.create_share_server( id='fake_server_id', status=constants.STATUS_ACTIVE) req = self._get_server_migration_request(server['id']) update = {'task_state': task_state} body = {'reset_task_state': update} self.mock_object(db_api, 'share_server_update') response = self.controller.share_server_reset_task_state( req, server['id'], body) self.assertEqual(202, response.status_int) db_api.share_server_update.assert_called_once_with(utils.IsAMatcher( ctx_api.RequestContext), server['id'], update) def test_reset_task_state_error_body(self): server = db_utils.create_share_server( id='fake_server_id', status=constants.STATUS_ACTIVE) req = self._get_server_migration_request(server['id']) update = {'error': 'error'} body = {'reset_task_state': update} self.assertRaises(webob.exc.HTTPBadRequest, self.controller.share_server_reset_task_state, req, server['id'], body) def test_reset_task_state_error_invalid(self): server = db_utils.create_share_server( id='fake_server_id', status=constants.STATUS_ACTIVE) req = self._get_server_migration_request(server['id']) update = {'task_state': 'error'} body = {'reset_task_state': update} self.assertRaises(webob.exc.HTTPBadRequest, self.controller.share_server_reset_task_state, req, server['id'], body) def test_reset_task_state_not_found(self): server = db_utils.create_share_server( id='fake_server_id', status=constants.STATUS_ACTIVE) req = self._get_server_migration_request(server['id']) update = {'task_state': constants.TASK_STATE_MIGRATION_ERROR} body = {'reset_task_state': update} self.mock_object(db_api, 'share_server_get', mock.Mock(side_effect=exception.ShareServerNotFound( share_server_id='fake_server_id'))) self.mock_object(db_api, 'share_server_update') self.assertRaises(webob.exc.HTTPNotFound, self.controller.share_server_reset_task_state, req, server['id'], body) db_api.share_server_get.assert_called_once_with(utils.IsAMatcher( ctx_api.RequestContext), server['id']) db_api.share_server_update.assert_not_called() def test_share_server_migration_complete(self): server = db_utils.create_share_server( id='fake_server_id', status=constants.STATUS_ACTIVE) req = self._get_server_migration_request(server['id']) context = req.environ['manila.context'] body = {'migration_complete': None} api_return = { 'destination_share_server_id': 'fake_destination_id' } self.mock_object(share_api.API, 'share_server_migration_complete', mock.Mock(return_value=api_return)) self.mock_object(db_api, 'share_server_get', mock.Mock(return_value=server)) result = self.controller.share_server_migration_complete( req, server['id'], body) self.assertEqual(api_return, result) share_api.API.share_server_migration_complete.assert_called_once_with( utils.IsAMatcher(ctx_api.RequestContext), server) db_api.share_server_get.assert_called_once_with(context, server['id']) def test_share_server_migration_complete_not_found(self): fake_id = 'fake_server_id' req = self._get_server_migration_request(fake_id) context = req.environ['manila.context'] body = {'migration_complete': None} self.mock_object(db_api, 'share_server_get', mock.Mock(side_effect=exception.ShareServerNotFound( share_server_id=fake_id))) self.mock_object(share_api.API, 'share_server_migration_complete') self.assertRaises(webob.exc.HTTPNotFound, self.controller.share_server_migration_complete, req, fake_id, body) db_api.share_server_get.assert_called_once_with(context, fake_id) @ddt.data({'api_exception': exception.ServiceIsDown(service='fake_srv'), 'expected_exception': webob.exc.HTTPBadRequest}, {'api_exception': exception.InvalidShareServer(reason=""), 'expected_exception': webob.exc.HTTPBadRequest}) @ddt.unpack def test_share_server_migration_complete_exceptions(self, api_exception, expected_exception): fake_id = 'fake_server_id' req = self._get_server_migration_request(fake_id) context = req.environ['manila.context'] body = {'migration_complete': None} self.mock_object(db_api, 'share_server_get', mock.Mock(return_value='fake_share_server')) self.mock_object(share_api.API, 'share_server_migration_complete', mock.Mock(side_effect=api_exception)) self.assertRaises(expected_exception, self.controller.share_server_migration_complete, req, fake_id, body) db_api.share_server_get.assert_called_once_with(context, fake_id) share_api.API.share_server_migration_complete.assert_called_once_with( context, 'fake_share_server') def test_share_server_migration_cancel(self): server = db_utils.create_share_server( id='fake_server_id', status=constants.STATUS_ACTIVE) req = self._get_server_migration_request(server['id']) context = req.environ['manila.context'] body = {'migration_cancel': None} self.mock_object(db_api, 'share_server_get', mock.Mock(return_value=server)) self.mock_object(share_api.API, 'share_server_migration_cancel') self.controller.share_server_migration_cancel( req, server['id'], body) share_api.API.share_server_migration_cancel.assert_called_once_with( utils.IsAMatcher(ctx_api.RequestContext), server) db_api.share_server_get.assert_called_once_with(context, server['id']) def test_share_server_migration_cancel_not_found(self): fake_id = 'fake_server_id' req = self._get_server_migration_request(fake_id) context = req.environ['manila.context'] body = {'migration_cancel': None} self.mock_object(db_api, 'share_server_get', mock.Mock(side_effect=exception.ShareServerNotFound( share_server_id=fake_id))) self.mock_object(share_api.API, 'share_server_migration_cancel') self.assertRaises(webob.exc.HTTPNotFound, self.controller.share_server_migration_cancel, req, fake_id, body) db_api.share_server_get.assert_called_once_with(context, fake_id) @ddt.data({'api_exception': exception.ServiceIsDown(service='fake_srv'), 'expected_exception': webob.exc.HTTPBadRequest}, {'api_exception': exception.InvalidShareServer(reason=""), 'expected_exception': webob.exc.HTTPBadRequest}) @ddt.unpack def test_share_server_migration_cancel_exceptions(self, api_exception, expected_exception): fake_id = 'fake_server_id' req = self._get_server_migration_request(fake_id) context = req.environ['manila.context'] body = {'migration_complete': None} self.mock_object(db_api, 'share_server_get', mock.Mock(return_value='fake_share_server')) self.mock_object(share_api.API, 'share_server_migration_cancel', mock.Mock(side_effect=api_exception)) self.assertRaises(expected_exception, self.controller.share_server_migration_cancel, req, fake_id, body) db_api.share_server_get.assert_called_once_with(context, fake_id) share_api.API.share_server_migration_cancel.assert_called_once_with( context, 'fake_share_server') def test_share_server_migration_get_progress(self): server = db_utils.create_share_server( id='fake_server_id', status=constants.STATUS_ACTIVE, task_state=constants.TASK_STATE_MIGRATION_SUCCESS) req = self._get_server_migration_request(server['id']) body = {'migration_get_progress': None} expected = { 'total_progress': 'fake', 'task_state': constants.TASK_STATE_MIGRATION_SUCCESS, 'destination_share_server_id': 'fake_destination_server_id' } self.mock_object(share_api.API, 'share_server_migration_get_progress', mock.Mock(return_value=expected)) response = self.controller.share_server_migration_get_progress( req, server['id'], body) self.assertEqual(expected, response) (share_api.API.share_server_migration_get_progress. assert_called_once_with(utils.IsAMatcher(ctx_api.RequestContext), server['id'])) @ddt.data({'api_exception': exception.ServiceIsDown(service='fake_srv'), 'expected_exception': webob.exc.HTTPConflict}, {'api_exception': exception.InvalidShareServer(reason=""), 'expected_exception': webob.exc.HTTPBadRequest}) @ddt.unpack def test_share_server_migration_get_progress_exceptions( self, api_exception, expected_exception): fake_id = 'fake_server_id' req = self._get_server_migration_request(fake_id) context = req.environ['manila.context'] body = {'migration_complete': None} self.mock_object(db_api, 'share_server_get', mock.Mock(return_value='fake_share_server')) mock_get_progress = self.mock_object( share_api.API, 'share_server_migration_get_progress', mock.Mock(side_effect=api_exception)) self.assertRaises(expected_exception, self.controller.share_server_migration_get_progress, req, fake_id, body) mock_get_progress.assert_called_once_with(context, fake_id) def test_share_server_migration_check(self): fake_id = 'fake_server_id' fake_share_server = db_utils.create_share_server(id=fake_id) fake_share_network = db_utils.create_share_network() req = self._get_server_migration_request(fake_id) context = req.environ['manila.context'] requested_writable = False requested_nondisruptive = False requested_preserve_snapshots = False fake_host = 'fakehost@fakebackend' body = { 'migration_check': { 'writable': requested_writable, 'nondisruptive': requested_nondisruptive, 'preserve_snapshots': requested_preserve_snapshots, 'new_share_network_id': fake_share_network['id'], 'host': fake_host } } driver_result = { 'compatible': False, 'writable': False, 'nondisruptive': True, 'preserve_snapshots': False, 'share_network_id': 'fake_network_uuid', 'migration_cancel': False, 'migration_get_progress': False, } mock_server_get = self.mock_object( db_api, 'share_server_get', mock.Mock(return_value=fake_share_server)) mock_network_get = self.mock_object( db_api, 'share_network_get', mock.Mock(return_value=fake_share_network)) self.mock_object(common, 'check_share_network_is_active', mock.Mock(return_value=True)) mock_migration_check = self.mock_object( share_api.API, 'share_server_migration_check', mock.Mock(return_value=driver_result)) result = self.controller.share_server_migration_check( req, fake_id, body) expected_result_keys = ['compatible', 'requested_capabilities', 'supported_capabilities'] [self.assertIn(key, result) for key in expected_result_keys] mock_server_get.assert_called_once_with( context, fake_share_server['id']) mock_network_get.assert_called_once_with( context, fake_share_network['id']) common.check_share_network_is_active.assert_called_once_with( fake_share_network) mock_migration_check.assert_called_once_with( context, fake_share_server, fake_host, requested_writable, requested_nondisruptive, requested_preserve_snapshots, new_share_network=fake_share_network) @ddt.data( (webob.exc.HTTPNotFound, True, False, {'migration_check': {}}), (webob.exc.HTTPBadRequest, False, True, {'migration_check': {'new_share_network_id': 'fake_id'}}), (webob.exc.HTTPBadRequest, False, False, None) ) @ddt.unpack def test_share_server_migration_check_exception( self, exception_to_raise, raise_server_get_exception, raise_network_get_action, body): req = self._get_server_migration_request('fake_id') context = req.environ['manila.context'] if body: body['migration_check']['writable'] = False body['migration_check']['nondisruptive'] = False body['migration_check']['preserve_snapshots'] = False body['migration_check']['host'] = 'fakehost@fakebackend' else: body = {} server = db_utils.create_share_server( id='fake_server_id', status=constants.STATUS_ACTIVE) server_get = mock.Mock(return_value=server) network_get = mock.Mock() if raise_server_get_exception: server_get = mock.Mock( side_effect=exception.ShareServerNotFound( share_server_id='fake')) if raise_network_get_action: network_get = mock.Mock( side_effect=exception.ShareNetworkNotFound( share_network_id='fake')) mock_server_get = self.mock_object( db_api, 'share_server_get', server_get) mock_network_get = self.mock_object( db_api, 'share_network_get', network_get) self.assertRaises( exception_to_raise, self.controller.share_server_migration_check, req, 'fake_id', body ) mock_server_get.assert_called_once_with( context, 'fake_id') if raise_network_get_action: mock_network_get.assert_called_once_with(context, 'fake_id') @ddt.data( {'api_exception': exception.ServiceIsDown(service='fake_srv'), 'expected_exception': webob.exc.HTTPBadRequest}, {'api_exception': exception.InvalidShareServer(reason=""), 'expected_exception': webob.exc.HTTPBadRequest}) @ddt.unpack def test_share_server_migration_complete_exceptions_from_api( self, api_exception, expected_exception): req = self._get_server_migration_request('fake_id') context = req.environ['manila.context'] body = { 'migration_check': { 'writable': False, 'nondisruptive': False, 'preserve_snapshots': True, 'host': 'fakehost@fakebackend', } } self.mock_object(db_api, 'share_server_get', mock.Mock(return_value='fake_share_server')) self.mock_object(share_api.API, 'share_server_migration_complete', mock.Mock(side_effect=api_exception)) self.assertRaises( expected_exception, self.controller.share_server_migration_complete, req, 'fake_id', body ) db_api.share_server_get.assert_called_once_with(context, 'fake_id') share_api.API.share_server_migration_complete.assert_called_once_with( context, 'fake_share_server', ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/api/v2/test_share_snapshot_export_locations.py0000664000175000017500000001032300000000000026666 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Hitachi Data Systems # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import ddt from manila.api.v2 import share_snapshot_export_locations as export_locations from manila.common import constants from manila import context from manila.db.sqlalchemy import api as db_api from manila import exception from manila import test from manila.tests.api import fakes from manila.tests import db_utils @ddt.ddt class ShareSnapshotExportLocationsAPITest(test.TestCase): def _get_request(self, version="2.32", use_admin_context=True): req = fakes.HTTPRequest.blank( '/v2/fake/snapshots/%s/export-locations' % self.snapshot['id'], version=version, use_admin_context=use_admin_context) return req def setUp(self): super(ShareSnapshotExportLocationsAPITest, self).setUp() self.controller = ( export_locations.ShareSnapshotExportLocationController()) self.share = db_utils.create_share() self.snapshot = db_utils.create_snapshot( status=constants.STATUS_AVAILABLE, share_id=self.share['id']) self.snapshot_instance = db_utils.create_snapshot_instance( status=constants.STATUS_AVAILABLE, share_instance_id=self.share['instance']['id'], snapshot_id=self.snapshot['id']) self.values = { 'share_snapshot_instance_id': self.snapshot_instance['id'], 'path': 'fake/user_path', 'is_admin_only': True, } self.exp_loc = db_api.share_snapshot_instance_export_location_create( context.get_admin_context(), self.values) self.req = self._get_request() def test_index(self): self.mock_object( db_api, 'share_snapshot_instance_export_locations_get_all', mock.Mock(return_value=[self.exp_loc])) out = self.controller.index(self._get_request('2.32'), self.snapshot['id']) values = { 'share_snapshot_export_locations': [{ 'share_snapshot_instance_id': self.snapshot_instance['id'], 'path': 'fake/user_path', 'is_admin_only': True, 'id': self.exp_loc['id'], 'links': [{ 'href': 'http://localhost/share/v2/fake/' 'share_snapshot_export_locations/' + self.exp_loc['id'], 'rel': 'self' }, { 'href': 'http://localhost/share/fake/' 'share_snapshot_export_locations/' + self.exp_loc['id'], 'rel': 'bookmark' }], }] } self.assertSubDictMatch(values, out) def test_show(self): out = self.controller.show(self._get_request('2.32'), self.snapshot['id'], self.exp_loc['id']) self.assertSubDictMatch( {'share_snapshot_export_location': self.values}, out) @ddt.data('1.0', '2.0', '2.5', '2.8', '2.31') def test_list_with_unsupported_version(self, version): self.assertRaises( exception.VersionNotFoundForAPIMethod, self.controller.index, self._get_request(version), self.snapshot_instance['id'], ) @ddt.data('1.0', '2.0', '2.5', '2.8', '2.31') def test_show_with_unsupported_version(self, version): self.assertRaises( exception.VersionNotFoundForAPIMethod, self.controller.show, self._get_request(version), self.snapshot['id'], self.exp_loc['id'] ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/api/v2/test_share_snapshot_instance_export_locations.py0000664000175000017500000001031300000000000030551 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Hitachi Data Systems # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import ddt from manila.api.v2 import share_snapshot_instance_export_locations as exp_loc from manila.common import constants from manila import context from manila.db.sqlalchemy import api as db_api from manila import exception from manila import test from manila.tests.api import fakes from manila.tests import db_utils @ddt.ddt class ShareSnapshotInstanceExportLocationsAPITest(test.TestCase): def _get_request(self, version="2.32", use_admin_context=True): req = fakes.HTTPRequest.blank( '/v2/fake/snapshot-instances/%s/export-locations' % self.snapshot_instance['id'], version=version, use_admin_context=use_admin_context) return req def setUp(self): super(ShareSnapshotInstanceExportLocationsAPITest, self).setUp() self.controller = ( exp_loc.ShareSnapshotInstanceExportLocationController()) self.share = db_utils.create_share() self.snapshot = db_utils.create_snapshot( status=constants.STATUS_AVAILABLE, share_id=self.share['id']) self.snapshot_instance = db_utils.create_snapshot_instance( 'fake_snapshot_id_1', status=constants.STATUS_CREATING, share_instance_id=self.share['instance']['id']) self.values = { 'share_snapshot_instance_id': self.snapshot_instance['id'], 'path': 'fake/user_path', 'is_admin_only': True, } self.el = db_api.share_snapshot_instance_export_location_create( context.get_admin_context(), self.values) self.req = self._get_request() def test_index(self): self.mock_object( db_api, 'share_snapshot_instance_export_locations_get_all', mock.Mock(return_value=[self.el])) out = self.controller.index(self._get_request('2.32'), self.snapshot_instance['id']) values = { 'share_snapshot_export_locations': [{ 'share_snapshot_instance_id': self.snapshot_instance['id'], 'path': 'fake/user_path', 'is_admin_only': True, 'id': self.el['id'], 'links': [{ 'href': 'http://localhost/share/v2/fake/' 'share_snapshot_export_locations/' + self.el['id'], 'rel': 'self' }, { 'href': 'http://localhost/share/fake/' 'share_snapshot_export_locations/' + self.el['id'], 'rel': 'bookmark' }], }] } self.assertSubDictMatch(values, out) def test_show(self): out = self.controller.show(self._get_request('2.32'), self.snapshot_instance['id'], self.el['id']) self.assertSubDictMatch( {'share_snapshot_export_location': self.values}, out) @ddt.data('1.0', '2.0', '2.5', '2.8', '2.31') def test_list_with_unsupported_version(self, version): self.assertRaises( exception.VersionNotFoundForAPIMethod, self.controller.index, self._get_request(version), self.snapshot_instance['id'], ) @ddt.data('1.0', '2.0', '2.5', '2.8', '2.31') def test_show_with_unsupported_version(self, version): self.assertRaises( exception.VersionNotFoundForAPIMethod, self.controller.show, self._get_request(version), self.snapshot['id'], self.el['id'] ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/api/v2/test_share_snapshot_instances.py0000664000175000017500000002406600000000000025272 0ustar00zuulzuul00000000000000# Copyright 2016 Huawei Inc. # All Rights Reserved. # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import ddt from oslo_config import cfg from oslo_serialization import jsonutils from webob import exc from manila.api.v2 import share_snapshot_instances from manila.common import constants from manila import context from manila import exception from manila import policy from manila import test from manila.tests.api import fakes from manila.tests import db_utils from manila.tests import fake_share CONF = cfg.CONF @ddt.ddt class ShareSnapshotInstancesApiTest(test.TestCase): """Share snapshot instance Api Test.""" def setUp(self): super(ShareSnapshotInstancesApiTest, self).setUp() self.controller = (share_snapshot_instances. ShareSnapshotInstancesController()) self.resource_name = self.controller.resource_name self.api_version = '2.19' self.snapshot_instances_req = fakes.HTTPRequest.blank( '/snapshot-instances', version=self.api_version) self.admin_context = context.RequestContext('admin', 'fake', True) self.member_context = context.RequestContext('fake', 'fake') self.snapshot_instances_req.environ['manila.context'] = ( self.admin_context) self.snapshot_instances_req_admin = fakes.HTTPRequest.blank( '/snapshot-instances', version=self.api_version, use_admin_context=True) self.mock_policy_check = self.mock_object(policy, 'check_policy') def _get_fake_snapshot_instance(self, summary=False, **values): snapshot_instance = fake_share.fake_snapshot_instance( as_primitive=True) expected_keys = { 'id', 'snapshot_id', 'status', } expected_snapshot_instance = {key: snapshot_instance[key] for key in snapshot_instance if key in expected_keys} if not summary: expected_snapshot_instance['share_id'] = ( snapshot_instance.get('share_instance').get('share_id')) expected_snapshot_instance.update({ 'created_at': snapshot_instance.get('created_at'), 'updated_at': snapshot_instance.get('updated_at'), 'progress': snapshot_instance.get('progress'), 'provider_location': snapshot_instance.get( 'provider_location'), 'share_instance_id': snapshot_instance.get( 'share_instance_id'), }) return snapshot_instance, expected_snapshot_instance def _setup_snapshot_instance_data(self, instance=None): if instance is None: share_instance = db_utils.create_share_instance( status=constants.STATUS_AVAILABLE, share_id='fake_share_id_1') instance = db_utils.create_snapshot_instance( 'fake_snapshot_id_1', status=constants.STATUS_AVAILABLE, share_instance_id=share_instance['id']) path = '/v2/fake/snapshot-instances/%s/action' % instance['id'] req = fakes.HTTPRequest.blank(path, version=self.api_version, script_name=path) req.method = 'POST' req.headers['content-type'] = 'application/json' req.headers['X-Openstack-Manila-Api-Version'] = self.api_version return instance, req def _get_context(self, role): return getattr(self, '%s_context' % role) @ddt.data(None, 'FAKE_SNAPSHOT_ID') def test_list_snapshot_instances_summary(self, snapshot_id): snapshot_instance, expected_snapshot_instance = ( self._get_fake_snapshot_instance(summary=True)) self.mock_object(share_snapshot_instances.db, 'share_snapshot_instance_get_all_with_filters', mock.Mock(return_value=[snapshot_instance])) url = '/snapshot-instances' if snapshot_id: url += '?snapshot_id=%s' % snapshot_id req = fakes.HTTPRequest.blank(url, version=self.api_version) req_context = req.environ['manila.context'] res_dict = self.controller.index(req) self.assertEqual([expected_snapshot_instance], res_dict['snapshot_instances']) self.mock_policy_check.assert_called_once_with( req_context, self.resource_name, 'index') def test_list_snapshot_instances_detail(self): snapshot_instance, expected_snapshot_instance = ( self._get_fake_snapshot_instance()) self.mock_object(share_snapshot_instances.db, 'share_snapshot_instance_get_all_with_filters', mock.Mock(return_value=[snapshot_instance])) res_dict = self.controller.detail(self.snapshot_instances_req) self.assertEqual([expected_snapshot_instance], res_dict['snapshot_instances']) self.mock_policy_check.assert_called_once_with( self.admin_context, self.resource_name, 'detail') def test_list_snapshot_instances_detail_invalid_snapshot(self): self.mock_object(share_snapshot_instances.db, 'share_snapshot_instance_get_all_with_filters', mock.Mock(return_value=[])) req = self.snapshot_instances_req req.GET['snapshot_id'] = 'FAKE_SNAPSHOT_ID' res_dict = self.controller.detail(req) self.assertEqual([], res_dict['snapshot_instances']) self.mock_policy_check.assert_called_once_with( self.admin_context, self.resource_name, 'detail') def test_show(self): snapshot_instance, expected_snapshot_instance = ( self._get_fake_snapshot_instance()) self.mock_object( share_snapshot_instances.db, 'share_snapshot_instance_get', mock.Mock(return_value=snapshot_instance)) res_dict = self.controller.show(self.snapshot_instances_req, snapshot_instance.get('id')) self.assertEqual(expected_snapshot_instance, res_dict['snapshot_instance']) self.mock_policy_check.assert_called_once_with( self.admin_context, self.resource_name, 'show') def test_show_snapshot_instance_not_found(self): mock__view_builder_call = self.mock_object( share_snapshot_instances.instance_view.ViewBuilder, 'detail') fake_exception = exception.ShareSnapshotInstanceNotFound( instance_id='FAKE_SNAPSHOT_INSTANCE_ID') self.mock_object(share_snapshot_instances.db, 'share_snapshot_instance_get', mock.Mock(side_effect=fake_exception)) self.assertRaises(exc.HTTPNotFound, self.controller.show, self.snapshot_instances_req, 'FAKE_SNAPSHOT_INSTANCE_ID') self.assertFalse(mock__view_builder_call.called) @ddt.data('index', 'detail', 'show', 'reset_status') def test_policy_not_authorized(self, method_name): method = getattr(self.controller, method_name) if method_name in ('index', 'detail'): arguments = {} else: arguments = { 'id': 'FAKE_SNAPSHOT_ID', 'body': {'FAKE_KEY': 'FAKE_VAL'}, } noauthexc = exception.PolicyNotAuthorized(action=method_name) with mock.patch.object( policy, 'check_policy', mock.Mock(side_effect=noauthexc)): self.assertRaises( exc.HTTPForbidden, method, self.snapshot_instances_req, **arguments) @ddt.data('index', 'show', 'detail', 'reset_status') def test_upsupported_microversion(self, method_name): unsupported_microversions = ('1.0', '2.18') method = getattr(self.controller, method_name) arguments = { 'id': 'FAKE_SNAPSHOT_ID', } if method_name in ('index'): arguments.clear() for microversion in unsupported_microversions: req = fakes.HTTPRequest.blank( '/snapshot-instances', version=microversion) self.assertRaises(exception.VersionNotFoundForAPIMethod, method, req, **arguments) def _reset_status(self, context, instance, req, valid_code=202, valid_status=None, body=None): if body is None: body = {'reset_status': {'status': constants.STATUS_ERROR}} req.body = jsonutils.dumps(body).encode("utf-8") req.environ['manila.context'] = context with mock.patch.object( policy, 'check_policy', fakes.mock_fake_admin_check): resp = req.get_response(fakes.app()) # validate response code and model status self.assertEqual(valid_code, resp.status_int) actual_instance = ( share_snapshot_instances.db.share_snapshot_instance_get( context, instance['id'])) self.assertEqual(valid_status, actual_instance['status']) @ddt.data(*fakes.fixture_reset_status_with_different_roles) @ddt.unpack def test_reset_status_with_different_roles(self, role, valid_code, valid_status, version): instance, action_req = self._setup_snapshot_instance_data() ctxt = self._get_context(role) self._reset_status(ctxt, instance, action_req, valid_code=valid_code, valid_status=valid_status) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/api/v2/test_share_snapshots.py0000664000175000017500000013577400000000000023417 0ustar00zuulzuul00000000000000# Copyright 2015 EMC Corporation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import ast import ddt from oslo_serialization import jsonutils import webob from manila.api.openstack import api_version_request as api_version from manila.api.v2 import share_snapshots from manila.common import constants from manila import context from manila import db from manila import exception from manila import policy from manila.share import api as share_api from manila import test from manila.tests.api.contrib import stubs from manila.tests.api import fakes from manila.tests import db_utils from manila.tests import fake_share from manila import utils MIN_MANAGE_SNAPSHOT_API_VERSION = '2.12' def get_fake_manage_body(share_id=None, provider_location=None, driver_options=None, **kwargs): fake_snapshot = { 'share_id': share_id, 'provider_location': provider_location, 'driver_options': driver_options, 'user_id': 'fake_user_id', 'project_id': 'fake_project_id', } fake_snapshot.update(kwargs) return {'snapshot': fake_snapshot} @ddt.ddt class ShareSnapshotAPITest(test.TestCase): """Share Snapshot API Test.""" def setUp(self): super(ShareSnapshotAPITest, self).setUp() self.controller = share_snapshots.ShareSnapshotsController() self.mock_object(share_api.API, 'get', stubs.stub_share_get) self.mock_object(share_api.API, 'get_all_snapshots', stubs.stub_snapshot_get_all_by_project) self.mock_object(share_api.API, 'get_snapshot', stubs.stub_snapshot_get) self.mock_object(share_api.API, 'snapshot_update', stubs.stub_snapshot_update) self.mock_object( policy, 'check_policy', mock.Mock(return_value=True) ) self.snp_example = { 'share_id': 100, 'size': 12, 'force': False, 'display_name': 'updated_snapshot_name', 'display_description': 'updated_snapshot_description', } @ddt.data('1.0', '2.16', '2.17') def test_snapshot_create(self, version): self.mock_object(share_api.API, 'create_snapshot', stubs.stub_snapshot_create) body = { 'snapshot': { 'share_id': 'fakeshareid', 'force': False, 'name': 'displaysnapname', 'description': 'displaysnapdesc', } } url = ('/v2/fake/snapshots' if version.startswith('2.') else '/v1/fake/snapshots') req = fakes.HTTPRequest.blank(url, version=version) res_dict = self.controller.create(req, body) expected = fake_share.expected_snapshot(version=version, id=200) self.assertEqual(expected, res_dict) @ddt.data(0, False) def test_snapshot_create_no_support(self, snapshot_support): self.mock_object(share_api.API, 'create_snapshot') self.mock_object( share_api.API, 'get', mock.Mock(return_value={'snapshot_support': snapshot_support})) body = { 'snapshot': { 'share_id': 100, 'force': False, 'name': 'fake_share_name', 'description': 'fake_share_description', } } req = fakes.HTTPRequest.blank('/v2/fake/snapshots') self.assertRaises( webob.exc.HTTPUnprocessableEntity, self.controller.create, req, body) self.assertFalse(share_api.API.create_snapshot.called) def test_snapshot_create_no_body(self): body = {} req = fakes.HTTPRequest.blank('/v2/fake/snapshots') self.assertRaises(webob.exc.HTTPUnprocessableEntity, self.controller.create, req, body) def test_snapshot_delete(self): self.mock_object(share_api.API, 'delete_snapshot', stubs.stub_snapshot_delete) req = fakes.HTTPRequest.blank('/v2/fake/snapshots/200') resp = self.controller.delete(req, 200) self.assertEqual(202, resp.status_int) def test_snapshot_delete_nofound(self): self.mock_object(share_api.API, 'get_snapshot', stubs.stub_snapshot_get_notfound) req = fakes.HTTPRequest.blank('/v2/fake/snapshots/200') self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete, req, 200) @ddt.data('2.0', '2.16', '2.17', '2.73') def test_snapshot_show(self, version): req = fakes.HTTPRequest.blank('/v2/fake/snapshots/200', version=version) expected = fake_share.expected_snapshot(version=version, id=200) res_dict = self.controller.show(req, 200) self.assertEqual(expected, res_dict) def test_snapshot_show_nofound(self): self.mock_object(share_api.API, 'get_snapshot', stubs.stub_snapshot_get_notfound) req = fakes.HTTPRequest.blank('/v2/fake/snapshots/200') self.assertRaises(webob.exc.HTTPNotFound, self.controller.show, req, '200') def test_snapshot_list_summary(self): self.mock_object(share_api.API, 'get_all_snapshots', stubs.stub_snapshot_get_all_by_project) req = fakes.HTTPRequest.blank('/v2/fake/snapshots') res_dict = self.controller.index(req) expected = { 'snapshots': [ { 'name': 'displaysnapname', 'id': 2, 'links': [ { 'href': 'http://localhost/share/v2/fake/' 'snapshots/2', 'rel': 'self' }, { 'href': 'http://localhost/share/fake/snapshots/2', 'rel': 'bookmark' } ], } ] } self.assertEqual(expected, res_dict) def _snapshot_list_summary_with_search_opts(self, version, use_admin_context): search_opts = fake_share.search_opts() if (api_version.APIVersionRequest(version) >= api_version.APIVersionRequest('2.36')): search_opts.pop('name') search_opts['display_name~'] = 'fake_name' if (api_version.APIVersionRequest(version) >= api_version.APIVersionRequest('2.79')): search_opts.update({'with_count': 'true'}) # fake_key should be filtered for non-admin url = '/v2/fake/snapshots?fake_key=fake_value' for k, v in search_opts.items(): url = url + '&' + k + '=' + v req = fakes.HTTPRequest.blank( url, use_admin_context=use_admin_context, version=version) method = 'get_all_snapshots' db_snapshots = [ {'id': 'id1', 'display_name': 'n1', 'status': 'fake_status', }, {'id': 'id2', 'display_name': 'n2', 'status': 'fake_status', }, {'id': 'id3', 'display_name': 'n3', 'status': 'fake_status', }, ] mock_action = {'return_value': [db_snapshots[1]]} if (api_version.APIVersionRequest(version) >= api_version.APIVersionRequest('2.79')): method = 'get_all_snapshots_with_count' mock_action = {'side_effect': [(1, [db_snapshots[1]])]} mock_get_all_snapshots = ( self.mock_object(share_api.API, method, mock.Mock(**mock_action))) result = self.controller.index(req) search_opts_expected = { 'status': search_opts['status'], 'share_id': search_opts['share_id'], } if (api_version.APIVersionRequest(version) >= api_version.APIVersionRequest('2.36')): search_opts_expected['display_name~'] = 'fake_name' else: search_opts_expected['display_name'] = search_opts['name'] if use_admin_context: search_opts_expected.update({'fake_key': 'fake_value'}) mock_get_all_snapshots.assert_called_once_with( req.environ['manila.context'], limit=int(search_opts['limit']), offset=int(search_opts['offset']), sort_key=search_opts['sort_key'], sort_dir=search_opts['sort_dir'], search_opts=search_opts_expected, ) self.assertEqual(1, len(result['snapshots'])) self.assertEqual(db_snapshots[1]['id'], result['snapshots'][0]['id']) self.assertEqual( db_snapshots[1]['display_name'], result['snapshots'][0]['name']) if (api_version.APIVersionRequest(version) >= api_version.APIVersionRequest('2.79')): self.assertEqual(1, result['count']) @ddt.data({'version': '2.35', 'use_admin_context': True}, {'version': '2.36', 'use_admin_context': True}, {'version': '2.79', 'use_admin_context': True}, {'version': '2.35', 'use_admin_context': False}, {'version': '2.36', 'use_admin_context': False}, {'version': '2.79', 'use_admin_context': False}) @ddt.unpack def test_snapshot_list_summary_with_search_opts(self, version, use_admin_context): self._snapshot_list_summary_with_search_opts( version=version, use_admin_context=use_admin_context) def test_snapshot_list_metadata_filter(self, version='2.73', use_admin_context=True): search_opts = { 'sort_key': 'fake_sort_key', 'sort_dir': 'fake_sort_dir', 'offset': '1', 'limit': '1', 'metadata': "{'foo': 'bar'}" } # fake_key should be filtered for non-admin url = '/v2/fake/snapshots?fake_key=fake_value' for k, v in search_opts.items(): url = url + '&' + k + '=' + v req = fakes.HTTPRequest.blank( url, use_admin_context=use_admin_context, version=version) snapshots = [ {'id': 'id1', 'metadata': {'foo': 'bar'}} ] self.mock_object(share_api.API, 'get_all_snapshots', mock.Mock(return_value=snapshots)) result = self.controller.index(req) search_opts_expected = { 'metadata': ast.literal_eval(search_opts['metadata']) } if use_admin_context: search_opts_expected.update({'fake_key': 'fake_value'}) share_api.API.get_all_snapshots.assert_called_once_with( req.environ['manila.context'], limit=int(search_opts['limit']), offset=int(search_opts['offset']), sort_key=search_opts['sort_key'], sort_dir=search_opts['sort_dir'], search_opts=search_opts_expected, ) self.assertEqual(1, len(result['snapshots'])) self.assertEqual(snapshots[0]['id'], result['snapshots'][0]['id']) def _snapshot_list_detail_with_search_opts(self, version, use_admin_context): search_opts = fake_share.search_opts() if (api_version.APIVersionRequest(version) >= api_version.APIVersionRequest('2.79')): search_opts.update({'with_count': 'true'}) # fake_key should be filtered for non-admin url = '/v2/fake/shares/detail?fake_key=fake_value' for k, v in search_opts.items(): url = url + '&' + k + '=' + v req = fakes.HTTPRequest.blank(url, use_admin_context=use_admin_context) method = 'get_all_snapshots' db_snapshots = [ { 'id': 'id1', 'display_name': 'n1', 'status': 'fake_status', 'aggregate_status': 'fake_status', }, { 'id': 'id2', 'display_name': 'n2', 'status': 'someotherstatus', 'aggregate_status': 'fake_status', 'share_id': 'fake_share_id', }, { 'id': 'id3', 'display_name': 'n3', 'status': 'fake_status', 'aggregate_status': 'fake_status', }, ] mock_action = {'return_value': [db_snapshots[1]]} if (api_version.APIVersionRequest(version) >= api_version.APIVersionRequest('2.79')): method = 'get_all_snapshots_with_count' mock_action = {'side_effect': [(1, [db_snapshots[1]])]} mock_get_all_snapshots = ( self.mock_object(share_api.API, method, mock.Mock(**mock_action))) result = self.controller.detail(req) search_opts_expected = { 'display_name': search_opts['name'], 'status': search_opts['status'], 'share_id': search_opts['share_id'], } if use_admin_context: search_opts_expected.update({'fake_key': 'fake_value'}) mock_get_all_snapshots.assert_called_once_with( req.environ['manila.context'], limit=int(search_opts['limit']), offset=int(search_opts['offset']), sort_key=search_opts['sort_key'], sort_dir=search_opts['sort_dir'], search_opts=search_opts_expected, ) self.assertEqual(1, len(result['snapshots'])) self.assertEqual(db_snapshots[1]['id'], result['snapshots'][0]['id']) self.assertEqual( db_snapshots[1]['display_name'], result['snapshots'][0]['name']) self.assertEqual( db_snapshots[1]['aggregate_status'], result['snapshots'][0]['status']) self.assertEqual( db_snapshots[1]['share_id'], result['snapshots'][0]['share_id']) if (api_version.APIVersionRequest(version) >= api_version.APIVersionRequest('2.79')): self.assertEqual(1, result['count']) @ddt.data({'version': '2.78', 'use_admin_context': True}, {'version': '2.78', 'use_admin_context': False}, {'version': '2.79', 'use_admin_context': True}, {'version': '2.79', 'use_admin_context': False}) @ddt.unpack def test_snapshot_list_detail_with_search_opts(self, version, use_admin_context): self._snapshot_list_detail_with_search_opts( version=version, use_admin_context=use_admin_context) @ddt.data('2.0', '2.16', '2.17') def test_snapshot_list_detail(self, version): env = {'QUERY_STRING': 'name=Share+Test+Name'} req = fakes.HTTPRequest.blank('/v2/fake/snapshots/detail', environ=env, version=version) expected_s = fake_share.expected_snapshot(version=version, id=2) expected = {'snapshots': [expected_s['snapshot']]} res_dict = self.controller.detail(req) self.assertEqual(expected, res_dict) @ddt.data('2.0', '2.16', '2.17') def test_snapshot_updates_display_name_and_description(self, version): snp = self.snp_example body = {"snapshot": snp} req = fakes.HTTPRequest.blank('/v2/fake/snapshot/1', version=version) res_dict = self.controller.update(req, 1, body) self.assertEqual(snp["display_name"], res_dict['snapshot']["name"]) if (api_version.APIVersionRequest(version) <= api_version.APIVersionRequest('2.16')): self.assertNotIn('user_id', res_dict['snapshot']) self.assertNotIn('project_id', res_dict['snapshot']) else: self.assertIn('user_id', res_dict['snapshot']) self.assertIn('project_id', res_dict['snapshot']) def test_share_update_invalid_key(self): snp = self.snp_example body = {"snapshot": snp} req = fakes.HTTPRequest.blank('/v2/fake/snapshot/1') res_dict = self.controller.update(req, 1, body) self.assertNotEqual(snp["size"], res_dict['snapshot']["size"]) def test_access_list(self): share = db_utils.create_share(mount_snapshot_support=True) snapshot = db_utils.create_snapshot( status=constants.STATUS_AVAILABLE, share_id=share['id']) expected = [] self.mock_object(share_api.API, 'get', mock.Mock(return_value=share)) self.mock_object(share_api.API, 'get_snapshot', mock.Mock(return_value=snapshot)) self.mock_object(share_api.API, 'snapshot_access_get_all', mock.Mock(return_value=expected)) id = 'fake_snap_id' req = fakes.HTTPRequest.blank('/v2/fake/snapshots/%s/action' % id, version='2.32') actual = self.controller.access_list(req, id) self.assertEqual(expected, actual['snapshot_access_list']) @ddt.data(('1.1.1.1', '2.32'), ('1.1.1.1', '2.38'), ('1001::1001', '2.38')) @ddt.unpack def test_allow_access(self, ip_address, version): share = db_utils.create_share(mount_snapshot_support=True) snapshot = db_utils.create_snapshot( status=constants.STATUS_AVAILABLE, share_id=share['id']) access = { 'id': 'fake_id', 'access_type': 'ip', 'access_to': ip_address, 'state': 'new', } get = self.mock_object(share_api.API, 'get', mock.Mock(return_value=share)) get_snapshot = self.mock_object(share_api.API, 'get_snapshot', mock.Mock(return_value=snapshot)) allow_access = self.mock_object(share_api.API, 'snapshot_allow_access', mock.Mock(return_value=access)) body = {'allow_access': access} req = fakes.HTTPRequest.blank( '/v2/fake/snapshots/%s/action' % snapshot['id'], version=version) actual = self.controller.allow_access(req, snapshot['id'], body) self.assertEqual(access, actual['snapshot_access']) get.assert_called_once_with(utils.IsAMatcher(context.RequestContext), share['id']) get_snapshot.assert_called_once_with( utils.IsAMatcher(context.RequestContext), snapshot['id']) allow_access.assert_called_once_with( utils.IsAMatcher(context.RequestContext), snapshot, access['access_type'], access['access_to']) def test_allow_access_data_not_found_exception(self): share = db_utils.create_share(mount_snapshot_support=True) snapshot = db_utils.create_snapshot( status=constants.STATUS_AVAILABLE, share_id=share['id']) req = fakes.HTTPRequest.blank( '/v2/fake/snapshots/%s/action' % snapshot['id'], version='2.32') body = {} self.assertRaises(webob.exc.HTTPBadRequest, self.controller.allow_access, req, snapshot['id'], body) def test_allow_access_exists_exception(self): share = db_utils.create_share(mount_snapshot_support=True) snapshot = db_utils.create_snapshot( status=constants.STATUS_AVAILABLE, share_id=share['id']) req = fakes.HTTPRequest.blank( '/v2/fake/snapshots/%s/action' % snapshot['id'], version='2.32') access = { 'id': 'fake_id', 'access_type': 'ip', 'access_to': '1.1.1.1', 'state': 'new', } msg = "Share snapshot access exists." get = self.mock_object(share_api.API, 'get', mock.Mock( return_value=share)) get_snapshot = self.mock_object(share_api.API, 'get_snapshot', mock.Mock(return_value=snapshot)) allow_access = self.mock_object( share_api.API, 'snapshot_allow_access', mock.Mock( side_effect=exception.ShareSnapshotAccessExists(msg))) body = {'allow_access': access} self.assertRaises(webob.exc.HTTPBadRequest, self.controller.allow_access, req, snapshot['id'], body) get.assert_called_once_with(utils.IsAMatcher(context.RequestContext), share['id']) get_snapshot.assert_called_once_with( utils.IsAMatcher(context.RequestContext), snapshot['id']) allow_access.assert_called_once_with( utils.IsAMatcher(context.RequestContext), snapshot, access['access_type'], access['access_to']) def test_allow_access_share_without_mount_snap_support(self): share = db_utils.create_share(mount_snapshot_support=False) snapshot = db_utils.create_snapshot( status=constants.STATUS_AVAILABLE, share_id=share['id']) access = { 'id': 'fake_id', 'access_type': 'ip', 'access_to': '1.1.1.1', 'state': 'new', } get_snapshot = self.mock_object(share_api.API, 'get_snapshot', mock.Mock(return_value=snapshot)) get = self.mock_object(share_api.API, 'get', mock.Mock(return_value=share)) body = {'allow_access': access} req = fakes.HTTPRequest.blank( '/v2/fake/snapshots/%s/action' % snapshot['id'], version='2.32') self.assertRaises(webob.exc.HTTPBadRequest, self.controller.allow_access, req, snapshot['id'], body) get.assert_called_once_with(utils.IsAMatcher(context.RequestContext), share['id']) get_snapshot.assert_called_once_with( utils.IsAMatcher(context.RequestContext), snapshot['id']) def test_allow_access_empty_parameters(self): share = db_utils.create_share(mount_snapshot_support=True) snapshot = db_utils.create_snapshot( status=constants.STATUS_AVAILABLE, share_id=share['id']) access = {'id': 'fake_id', 'access_type': '', 'access_to': ''} body = {'allow_access': access} req = fakes.HTTPRequest.blank( '/v2/fake/snapshots/%s/action' % snapshot['id'], version='2.32') self.assertRaises(webob.exc.HTTPBadRequest, self.controller.allow_access, req, snapshot['id'], body) def test_deny_access(self): share = db_utils.create_share(mount_snapshot_support=True) snapshot = db_utils.create_snapshot( status=constants.STATUS_AVAILABLE, share_id=share['id']) access = db_utils.create_snapshot_access( share_snapshot_id=snapshot['id']) get = self.mock_object(share_api.API, 'get', mock.Mock(return_value=share)) get_snapshot = self.mock_object(share_api.API, 'get_snapshot', mock.Mock(return_value=snapshot)) access_get = self.mock_object(share_api.API, 'snapshot_access_get', mock.Mock(return_value=access)) deny_access = self.mock_object(share_api.API, 'snapshot_deny_access') body = {'deny_access': {'access_id': access.id}} req = fakes.HTTPRequest.blank( '/v2/fake/snapshots/%s/action' % snapshot['id'], version='2.32') resp = self.controller.deny_access(req, snapshot['id'], body) self.assertEqual(202, resp.status_int) get.assert_called_once_with(utils.IsAMatcher(context.RequestContext), share['id']) get_snapshot.assert_called_once_with( utils.IsAMatcher(context.RequestContext), snapshot['id']) access_get.assert_called_once_with( utils.IsAMatcher(context.RequestContext), body['deny_access']['access_id']) deny_access.assert_called_once_with( utils.IsAMatcher(context.RequestContext), snapshot, access) def test_deny_access_data_not_found_exception(self): share = db_utils.create_share(mount_snapshot_support=True) snapshot = db_utils.create_snapshot( status=constants.STATUS_AVAILABLE, share_id=share['id']) req = fakes.HTTPRequest.blank( '/v2/fake/snapshots/%s/action' % snapshot['id'], version='2.32') body = {} self.assertRaises(webob.exc.HTTPBadRequest, self.controller.deny_access, req, snapshot['id'], body) def test_deny_access_access_rule_not_found(self): share = db_utils.create_share(mount_snapshot_support=True) snapshot = db_utils.create_snapshot( status=constants.STATUS_AVAILABLE, share_id=share['id']) access = db_utils.create_snapshot_access( share_snapshot_id=snapshot['id']) wrong_access = { 'access_type': 'fake_type', 'access_to': 'fake_IP', 'share_snapshot_id': 'fake_id' } get = self.mock_object(share_api.API, 'get', mock.Mock(return_value=share)) get_snapshot = self.mock_object(share_api.API, 'get_snapshot', mock.Mock(return_value=snapshot)) access_get = self.mock_object(share_api.API, 'snapshot_access_get', mock.Mock(return_value=wrong_access)) body = {'deny_access': {'access_id': access.id}} req = fakes.HTTPRequest.blank( '/v2/fake/snapshots/%s/action' % snapshot['id'], version='2.32') self.assertRaises(webob.exc.HTTPBadRequest, self.controller.deny_access, req, snapshot['id'], body) get.assert_called_once_with(utils.IsAMatcher(context.RequestContext), share['id']) get_snapshot.assert_called_once_with( utils.IsAMatcher(context.RequestContext), snapshot['id']) access_get.assert_called_once_with( utils.IsAMatcher(context.RequestContext), body['deny_access']['access_id']) @ddt.ddt class ShareSnapshotAdminActionsAPITest(test.TestCase): def setUp(self): super(ShareSnapshotAdminActionsAPITest, self).setUp() self.controller = share_snapshots.ShareSnapshotsController() self.flags(transport_url='rabbit://fake:fake@mqhost:5672') self.admin_context = context.RequestContext('admin', 'fake', True) self.member_context = context.RequestContext('fake', 'fake') self.resource_name = self.controller.resource_name self.manage_request = fakes.HTTPRequest.blank( '/v2/fake/snapshots/manage', use_admin_context=True, version=MIN_MANAGE_SNAPSHOT_API_VERSION) self.snapshot_id = 'fake' self.unmanage_request = fakes.HTTPRequest.blank( '/v2/fake/snapshots/%s/unmanage' % self.snapshot_id, use_admin_context=True, version=MIN_MANAGE_SNAPSHOT_API_VERSION) def _get_context(self, role): return getattr(self, '%s_context' % role) def _setup_snapshot_data(self, snapshot=None, version='2.7'): if snapshot is None: share = db_utils.create_share() snapshot = db_utils.create_snapshot( status=constants.STATUS_AVAILABLE, share_id=share['id']) path = '/v2/fake/snapshots/%s/action' % snapshot['id'] req = fakes.HTTPRequest.blank(path, script_name=path, version=version) return snapshot, req def _reset_status(self, ctxt, model, req, db_access_method, valid_code, valid_status=None, body=None, version='2.7'): if float(version) > 2.6: action_name = 'reset_status' else: action_name = 'os-reset_status' if body is None: body = {action_name: {'status': constants.STATUS_ERROR}} req.method = 'POST' req.headers['content-type'] = 'application/json' req.headers['X-Openstack-Manila-Api-Version'] = version req.body = jsonutils.dumps(body).encode("utf-8") req.environ['manila.context'] = ctxt resp = req.get_response(fakes.app()) # validate response code and model status self.assertEqual(valid_code, resp.status_int) actual_model = db_access_method(ctxt, model['id']) self.assertEqual(valid_status, actual_model['status']) @ddt.data(*fakes.fixture_reset_status_with_different_roles) @ddt.unpack def test_snapshot_reset_status_with_different_roles(self, role, valid_code, valid_status, version): ctxt = self._get_context(role) snapshot, req = self._setup_snapshot_data(version=version) self._reset_status(ctxt, snapshot, req, db.share_snapshot_get, valid_code, valid_status, version=version) @ddt.data( ({'os-reset_status': {'x-status': 'bad'}}, '2.6'), ({'reset_status': {'x-status': 'bad'}}, '2.7'), ({'os-reset_status': {'status': 'invalid'}}, '2.6'), ({'reset_status': {'status': 'invalid'}}, '2.7'), ) @ddt.unpack def test_snapshot_invalid_reset_status_body(self, body, version): snapshot, req = self._setup_snapshot_data(version=version) self._reset_status(self.admin_context, snapshot, req, db.share_snapshot_get, 400, constants.STATUS_AVAILABLE, body, version=version) def _force_delete(self, ctxt, model, req, db_access_method, valid_code, version='2.7'): if float(version) > 2.6: action_name = 'force_delete' else: action_name = 'os-force_delete' req.method = 'POST' req.headers['content-type'] = 'application/json' req.headers['X-Openstack-Manila-Api-Version'] = version req.body = jsonutils.dumps({action_name: {}}).encode("utf-8") req.environ['manila.context'] = ctxt resp = req.get_response(fakes.app()) # Validate response self.assertEqual(valid_code, resp.status_int) @ddt.data(*fakes.fixture_force_delete_with_different_roles) @ddt.unpack def test_snapshot_force_delete_with_different_roles(self, role, resp_code, version): ctxt = self._get_context(role) snapshot, req = self._setup_snapshot_data(version=version) self._force_delete(ctxt, snapshot, req, db.share_snapshot_get, resp_code, version=version) def test_snapshot_force_delete_missing(self): ctxt = self._get_context('admin') snapshot, req = self._setup_snapshot_data(snapshot={'id': 'fake'}) self._force_delete(ctxt, snapshot, req, db.share_snapshot_get, 404) @ddt.data( {}, {'snapshots': {}}, {'snapshot': get_fake_manage_body(share_id='xxxxxxxx')}, {'snapshot': get_fake_manage_body(provider_location='xxxxxxxx')}, {'snapshot': {'provider_location': {'x': 'y'}, 'share_id': 'xyzzy'}}, ) def test_snapshot_manage_invalid_body(self, body): self.mock_policy_check = self.mock_object( policy, 'check_policy', mock.Mock(return_value=True)) self.assertRaises(webob.exc.HTTPUnprocessableEntity, self.controller.manage, self.manage_request, body) self.mock_policy_check.assert_called_once_with( self.manage_request.environ['manila.context'], self.resource_name, 'manage_snapshot') @ddt.data( {'version': '2.12', 'data': get_fake_manage_body(name='foo', display_description='bar')}, {'version': '2.12', 'data': get_fake_manage_body(display_name='foo', description='bar')}, {'version': '2.17', 'data': get_fake_manage_body(display_name='foo', description='bar')}, {'version': '2.17', 'data': get_fake_manage_body(name='foo', display_description='bar')}, ) @ddt.unpack def test_snapshot_manage(self, version, data): self.mock_policy_check = self.mock_object( policy, 'check_policy', mock.Mock(return_value=True)) data['snapshot']['share_id'] = 'fake' data['snapshot']['provider_location'] = 'fake_volume_snapshot_id' data['snapshot']['driver_options'] = {} return_share = fake_share.fake_share(is_soft_deleted=False, id='fake') return_snapshot = fake_share.fake_snapshot( create_instance=True, id='fake_snap', provider_location='fake_volume_snapshot_id') self.mock_object( share_api.API, 'get', mock.Mock( return_value=return_share)) self.mock_object( share_api.API, 'manage_snapshot', mock.Mock( return_value=return_snapshot)) share_snapshot = { 'share_id': 'fake', 'provider_location': 'fake_volume_snapshot_id', 'display_name': 'foo', 'display_description': 'bar', } req = fakes.HTTPRequest.blank('/v2/fake/snapshots/manage', use_admin_context=True, version=version) actual_result = self.controller.manage(req, data) actual_snapshot = actual_result['snapshot'] share_api.API.manage_snapshot.assert_called_once_with( mock.ANY, share_snapshot, data['snapshot']['driver_options'], share=return_share) self.assertEqual(return_snapshot['id'], actual_result['snapshot']['id']) self.assertEqual('fake_volume_snapshot_id', actual_result['snapshot']['provider_location']) if (api_version.APIVersionRequest(version) >= api_version.APIVersionRequest('2.17')): self.assertEqual(return_snapshot['user_id'], actual_snapshot['user_id']) self.assertEqual(return_snapshot['project_id'], actual_snapshot['project_id']) else: self.assertNotIn('user_id', actual_snapshot) self.assertNotIn('project_id', actual_snapshot) self.mock_policy_check.assert_called_once_with( req.environ['manila.context'], self.resource_name, 'manage_snapshot') @ddt.data(exception.ShareNotFound(share_id='fake'), exception.ShareSnapshotNotFound(snapshot_id='fake'), exception.ManageInvalidShareSnapshot(reason='error'), exception.InvalidShare(reason='error')) def test_manage_exception(self, exception_type): self.mock_policy_check = self.mock_object( policy, 'check_policy', mock.Mock(return_value=True)) body = get_fake_manage_body( share_id='fake', provider_location='fake_volume_snapshot_id', driver_options={}) return_share = fake_share.fake_share(is_soft_deleted=False, id='fake') self.mock_object( share_api.API, 'get', mock.Mock( return_value=return_share)) self.mock_object( share_api.API, 'manage_snapshot', mock.Mock( side_effect=exception_type)) http_ex = webob.exc.HTTPNotFound if (isinstance(exception_type, exception.ManageInvalidShareSnapshot) or isinstance(exception_type, exception.InvalidShare)): http_ex = webob.exc.HTTPConflict self.assertRaises(http_ex, self.controller.manage, self.manage_request, body) self.mock_policy_check.assert_called_once_with( self.manage_request.environ['manila.context'], self.resource_name, 'manage_snapshot') def test_manage_share_has_been_soft_deleted(self): self.mock_policy_check = self.mock_object( policy, 'check_policy', mock.Mock(return_value=True)) body = get_fake_manage_body( share_id='fake', provider_location='fake_volume_snapshot_id', driver_options={}) return_share = fake_share.fake_share(is_soft_deleted=True, id='fake') self.mock_object( share_api.API, 'get', mock.Mock( return_value=return_share)) self.assertRaises(webob.exc.HTTPForbidden, self.controller.manage, self.manage_request, body) self.mock_policy_check.assert_called_once_with( self.manage_request.environ['manila.context'], self.resource_name, 'manage_snapshot') @ddt.data('1.0', '2.6', '2.11') def test_manage_version_not_found(self, version): body = get_fake_manage_body( share_id='fake', provider_location='fake_volume_snapshot_id', driver_options={}) fake_req = fakes.HTTPRequest.blank( '/v2/fake/snapshots/manage', use_admin_context=True, version=version) self.assertRaises(exception.VersionNotFoundForAPIMethod, self.controller.manage, fake_req, body) def test_snapshot__unmanage(self): body = {} snapshot = {'status': constants.STATUS_AVAILABLE, 'id': 'bar_id', 'share_id': 'bar_id'} fake_req = fakes.HTTPRequest.blank('/v2/fake/snapshots/unmanage', use_admin_context=True, version='2.49') mock_unmanage = self.mock_object(self.controller, '_unmanage') self.controller.unmanage(fake_req, snapshot['id'], body) mock_unmanage.assert_called_once_with(fake_req, snapshot['id'], body, allow_dhss_true=True) def test_snapshot_unmanage_share_server(self): self.mock_policy_check = self.mock_object( policy, 'check_policy', mock.Mock(return_value=True)) share = {'status': constants.STATUS_AVAILABLE, 'id': 'bar_id', 'share_server_id': 'fake_server_id'} self.mock_object(share_api.API, 'get', mock.Mock(return_value=share)) snapshot = {'status': constants.STATUS_AVAILABLE, 'id': 'foo_id', 'share_id': 'bar_id'} self.mock_object(share_api.API, 'get_snapshot', mock.Mock(return_value=snapshot)) self.assertRaises(webob.exc.HTTPForbidden, self.controller.unmanage, self.unmanage_request, snapshot['id']) self.controller.share_api.get_snapshot.assert_called_once_with( self.unmanage_request.environ['manila.context'], snapshot['id']) self.controller.share_api.get.assert_called_once_with( self.unmanage_request.environ['manila.context'], share['id']) self.mock_policy_check.assert_called_once_with( self.unmanage_request.environ['manila.context'], self.resource_name, 'unmanage_snapshot') def test_snapshot_unmanage_replicated_snapshot(self): self.mock_policy_check = self.mock_object( policy, 'check_policy', mock.Mock(return_value=True)) share = {'status': constants.STATUS_AVAILABLE, 'id': 'bar_id', 'has_replicas': True} self.mock_object(share_api.API, 'get', mock.Mock(return_value=share)) snapshot = {'status': constants.STATUS_AVAILABLE, 'id': 'foo_id', 'share_id': 'bar_id'} self.mock_object(share_api.API, 'get_snapshot', mock.Mock(return_value=snapshot)) self.assertRaises(webob.exc.HTTPConflict, self.controller.unmanage, self.unmanage_request, snapshot['id']) self.controller.share_api.get_snapshot.assert_called_once_with( self.unmanage_request.environ['manila.context'], snapshot['id']) self.controller.share_api.get.assert_called_once_with( self.unmanage_request.environ['manila.context'], share['id']) self.mock_policy_check.assert_called_once_with( self.unmanage_request.environ['manila.context'], self.resource_name, 'unmanage_snapshot') @ddt.data(*constants.TRANSITIONAL_STATUSES) def test_snapshot_unmanage_with_transitional_state(self, status): self.mock_policy_check = self.mock_object( policy, 'check_policy', mock.Mock(return_value=True)) share = {'status': constants.STATUS_AVAILABLE, 'id': 'bar_id'} self.mock_object(share_api.API, 'get', mock.Mock(return_value=share)) snapshot = {'status': status, 'id': 'foo_id', 'share_id': 'bar_id'} self.mock_object( self.controller.share_api, 'get_snapshot', mock.Mock(return_value=snapshot)) self.assertRaises( webob.exc.HTTPForbidden, self.controller.unmanage, self.unmanage_request, snapshot['id']) self.controller.share_api.get_snapshot.assert_called_once_with( self.unmanage_request.environ['manila.context'], snapshot['id']) self.controller.share_api.get.assert_called_once_with( self.unmanage_request.environ['manila.context'], share['id']) self.mock_policy_check.assert_called_once_with( self.unmanage_request.environ['manila.context'], self.resource_name, 'unmanage_snapshot') def test_snapshot_unmanage(self): self.mock_policy_check = self.mock_object( policy, 'check_policy', mock.Mock(return_value=True)) share = {'status': constants.STATUS_AVAILABLE, 'id': 'bar_id', 'host': 'fake_host'} self.mock_object(share_api.API, 'get', mock.Mock(return_value=share)) snapshot = {'status': constants.STATUS_AVAILABLE, 'id': 'foo_id', 'share_id': 'bar_id'} self.mock_object(share_api.API, 'get_snapshot', mock.Mock(return_value=snapshot)) self.mock_object(share_api.API, 'unmanage_snapshot', mock.Mock()) actual_result = self.controller.unmanage(self.unmanage_request, snapshot['id']) self.assertEqual(202, actual_result.status_int) self.controller.share_api.get_snapshot.assert_called_once_with( self.unmanage_request.environ['manila.context'], snapshot['id']) share_api.API.unmanage_snapshot.assert_called_once_with( mock.ANY, snapshot, 'fake_host') self.mock_policy_check.assert_called_once_with( self.unmanage_request.environ['manila.context'], self.resource_name, 'unmanage_snapshot') def test_unmanage_share_not_found(self): self.mock_policy_check = self.mock_object( policy, 'check_policy', mock.Mock(return_value=True)) self.mock_object( share_api.API, 'get', mock.Mock( side_effect=exception.ShareNotFound(share_id='fake'))) snapshot = {'status': constants.STATUS_AVAILABLE, 'id': 'foo_id', 'share_id': 'bar_id'} self.mock_object(share_api.API, 'get_snapshot', mock.Mock(return_value=snapshot)) self.mock_object(share_api.API, 'unmanage_snapshot', mock.Mock()) self.assertRaises(webob.exc.HTTPNotFound, self.controller.unmanage, self.unmanage_request, 'foo_id') self.mock_policy_check.assert_called_once_with( self.unmanage_request.environ['manila.context'], self.resource_name, 'unmanage_snapshot') def test_unmanage_snapshot_not_found(self): self.mock_policy_check = self.mock_object( policy, 'check_policy', mock.Mock(return_value=True)) share = {'status': constants.STATUS_AVAILABLE, 'id': 'bar_id'} self.mock_object(share_api.API, 'get', mock.Mock(return_value=share)) self.mock_object( share_api.API, 'get_snapshot', mock.Mock( side_effect=exception.ShareSnapshotNotFound( snapshot_id='foo_id'))) self.mock_object(share_api.API, 'unmanage_snapshot', mock.Mock()) self.assertRaises(webob.exc.HTTPNotFound, self.controller.unmanage, self.unmanage_request, 'foo_id') self.mock_policy_check.assert_called_once_with( self.unmanage_request.environ['manila.context'], self.resource_name, 'unmanage_snapshot') @ddt.data('1.0', '2.6', '2.11') def test_unmanage_version_not_found(self, version): snapshot_id = 'fake' fake_req = fakes.HTTPRequest.blank( '/v2/fake/snapshots/%s/unmanage' % snapshot_id, use_admin_context=True, version=version) self.assertRaises(exception.VersionNotFoundForAPIMethod, self.controller.unmanage, fake_req, 'fake') def test_snapshot_unmanage_dhss_true_with_share_server(self): self.mock_policy_check = self.mock_object( policy, 'check_policy', mock.Mock(return_value=True)) share = {'status': constants.STATUS_AVAILABLE, 'id': 'bar_id', 'host': 'fake_host', 'share_server_id': 'fake'} mock_get = self.mock_object(share_api.API, 'get', mock.Mock(return_value=share)) snapshot = {'status': constants.STATUS_AVAILABLE, 'id': 'bar_id', 'share_id': 'bar_id'} self.mock_object(share_api.API, 'get_snapshot', mock.Mock(return_value=snapshot)) self.mock_object(share_api.API, 'unmanage_snapshot') actual_result = self.controller._unmanage(self.unmanage_request, snapshot['id'], allow_dhss_true=True) self.assertEqual(202, actual_result.status_int) self.controller.share_api.get_snapshot.assert_called_once_with( self.unmanage_request.environ['manila.context'], snapshot['id']) share_api.API.unmanage_snapshot.assert_called_once_with( mock.ANY, snapshot, 'fake_host') mock_get.assert_called_once_with( self.unmanage_request.environ['manila.context'], snapshot['id'] ) self.mock_policy_check.assert_called_once_with( self.unmanage_request.environ['manila.context'], self.resource_name, 'unmanage_snapshot') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/api/v2/test_share_transfer.py0000664000175000017500000005323700000000000023212 0ustar00zuulzuul00000000000000# Copyright (c) 2022 China Telecom Digital Intelligence. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import http.client as http_client from unittest import mock import ddt from oslo_serialization import jsonutils import webob from manila.api.v2 import share_transfer from manila import context from manila import db from manila import exception from manila import quota from manila.share import api as share_api from manila.share import rpcapi as share_rpcapi from manila.share import share_types from manila import test from manila.tests.api import fakes from manila.tests import db_utils from manila.transfer import api as transfer_api SHARE_TRANSFER_VERSION = "2.77" @ddt.ddt class ShareTransferAPITestCase(test.TestCase): """Test Case for transfers V3 API.""" microversion = SHARE_TRANSFER_VERSION def setUp(self): super(ShareTransferAPITestCase, self).setUp() self.share_transfer_api = transfer_api.API() self.v2_controller = share_transfer.ShareTransferController() self.ctxt = context.RequestContext( 'fake_user_id', 'fake_project_id', auth_token=True, is_admin=True) def _create_transfer(self, share_id='fake_share_id', display_name='test_transfer'): transfer = self.share_transfer_api.create(context.get_admin_context(), share_id, display_name) return transfer def _create_share(self, display_name='test_share', display_description='this is a test share', status='available', size=1, project_id='fake_project_id', user_id='fake_user_id', share_network_id=None, mount_point_name=None): """Create a share object.""" share_type = db_utils.create_share_type() instance_list = [] if mount_point_name: instance_list.append( db_utils.create_share_instance( status=status, share_id='fake_id', mount_point_name=mount_point_name ) ) share = db_utils.create_share( display_name=display_name, display_description=display_description, status=status, size=size, project_id=project_id, user_id=user_id, share_type_id=share_type['id'], share_network_id=share_network_id, instances=instance_list, mount_point_name=mount_point_name ) share_id = share['id'] return share_id def test_show_transfer(self): share_id = self._create_share(size=5) transfer = self._create_transfer(share_id) path = '/v2/fake_project_id/share-transfers/%s' % transfer['id'] req = fakes.HTTPRequest.blank(path, version=self.microversion) req.environ['manila.context'] = self.ctxt req.method = 'GET' req.headers['Content-Type'] = 'application/json' res_dict = self.v2_controller.show(req, transfer['id']) self.assertEqual('test_transfer', res_dict['transfer']['name']) self.assertEqual(transfer['id'], res_dict['transfer']['id']) self.assertEqual(share_id, res_dict['transfer']['resource_id']) def test_list_transfers(self): share_id_1 = self._create_share(size=5) share_id_2 = self._create_share(size=5) transfer1 = self._create_transfer(share_id_1) transfer2 = self._create_transfer(share_id_2) path = '/v2/fake_project_id/share-transfers' req = fakes.HTTPRequest.blank(path, version=self.microversion) req.environ['manila.context'] = self.ctxt req.method = 'GET' req.headers['Content-Type'] = 'application/json' res_dict = self.v2_controller.index(req) self.assertEqual(transfer1['id'], res_dict['transfers'][1]['id']) self.assertEqual('test_transfer', res_dict['transfers'][1]['name']) self.assertEqual(transfer2['id'], res_dict['transfers'][0]['id']) self.assertEqual('test_transfer', res_dict['transfers'][0]['name']) def test_list_transfers_with_all_tenants(self): share_id_1 = self._create_share(size=5) share_id_2 = self._create_share(size=5, project_id='fake_project_id2', user_id='fake_user_id2') self._create_transfer(share_id_1) self._create_transfer(share_id_2) path = '/v2/fake_project_id/share-transfers?all_tenants=true' req = fakes.HTTPRequest.blank(path, version=self.microversion) req.environ['manila.context'] = context.get_admin_context() req.method = 'GET' req.headers['Content-Type'] = 'application/json' res_dict = self.v2_controller.index(req) self.assertEqual(2, len(res_dict['transfers'])) def test_list_transfers_with_limit(self): share_id_1 = self._create_share(size=5) share_id_2 = self._create_share(size=5) self._create_transfer(share_id_1) self._create_transfer(share_id_2) path = '/v2/fake_project_id/share-transfers?limit=1' req = fakes.HTTPRequest.blank(path, version=self.microversion) req.environ['manila.context'] = self.ctxt req.method = 'GET' req.headers['Content-Type'] = 'application/json' res_dict = self.v2_controller.index(req) self.assertEqual(1, len(res_dict['transfers'])) @ddt.data("desc", "asc") def test_list_transfers_with_sort(self, sort_dir): share_id_1 = self._create_share(size=5) share_id_2 = self._create_share(size=5) transfer1 = self._create_transfer(share_id_1) transfer2 = self._create_transfer(share_id_2) path = \ '/v2/fake_project_id/share-transfers?sort_key=id&sort_dir=%s' % ( sort_dir) req = fakes.HTTPRequest.blank(path, version=self.microversion) req.environ['manila.context'] = self.ctxt req.method = 'GET' req.headers['Content-Type'] = 'application/json' res_dict = self.v2_controller.index(req) self.assertEqual(2, len(res_dict['transfers'])) order_ids = sorted([transfer1['id'], transfer2['id']]) expect_result = order_ids[1] if sort_dir == "desc" else order_ids[0] self.assertEqual(expect_result, res_dict['transfers'][0]['id']) def test_list_transfers_detail(self): share_id_1 = self._create_share(size=5) share_id_2 = self._create_share(size=5) transfer1 = self._create_transfer(share_id_1) transfer2 = self._create_transfer(share_id_2) path = '/v2/fake_project_id/share-transfers/detail' req = fakes.HTTPRequest.blank(path, version=self.microversion) req.environ['manila.context'] = self.ctxt req.method = 'GET' req.headers['Content-Type'] = 'application/json' req.headers['Accept'] = 'application/json' res_dict = self.v2_controller.detail(req) self.assertEqual('test_transfer', res_dict['transfers'][1]['name']) self.assertEqual(transfer1['id'], res_dict['transfers'][1]['id']) self.assertEqual(share_id_1, res_dict['transfers'][1]['resource_id']) self.assertEqual('test_transfer', res_dict['transfers'][0]['name']) self.assertEqual(transfer2['id'], res_dict['transfers'][0]['id']) self.assertEqual(share_id_2, res_dict['transfers'][0]['resource_id']) def test_create_transfer(self): share_id = self._create_share(status='available', size=5) body = {"transfer": {"name": "transfer1", "share_id": share_id}} path = '/v2/fake_project_id/share-transfers' req = fakes.HTTPRequest.blank(path, version=self.microversion) req.environ['manila.context'] = self.ctxt req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.body = jsonutils.dumps(body).encode("utf-8") res_dict = self.v2_controller.create(req, body) self.assertIn('id', res_dict['transfer']) self.assertIn('auth_key', res_dict['transfer']) self.assertIn('created_at', res_dict['transfer']) self.assertIn('name', res_dict['transfer']) self.assertIn('resource_id', res_dict['transfer']) @ddt.data({}, {"transfer": {"name": "transfer1"}}, {"transfer": {"name": "transfer1", "share_id": "invalid_share_id"}}) def test_create_transfer_with_invalid_body(self, body): path = '/v2/fake_project_id/share-transfers' req = fakes.HTTPRequest.blank(path, version=self.microversion) req.environ['manila.context'] = self.ctxt req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.body = jsonutils.dumps(body).encode("utf-8") self.assertRaises(webob.exc.HTTPBadRequest, self.v2_controller.create, req, body) def test_create_transfer_with_invalid_share_status(self): share_id = self._create_share() body = {"transfer": {"name": "transfer1", "share_id": share_id}} db.share_update(context.get_admin_context(), share_id, {'status': 'error'}) path = '/v2/fake_project_id/share-transfers' req = fakes.HTTPRequest.blank(path, version=self.microversion) req.environ['manila.context'] = self.ctxt req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.body = jsonutils.dumps(body).encode("utf-8") self.assertRaises(webob.exc.HTTPBadRequest, self.v2_controller.create, req, body) def test_create_transfer_with_invalid_mount_point_name(self): share_id = self._create_share( project_id='fake_pid', mount_point_name='fake_pid_mount_point_name') body = {"transfer": {"name": "transfer1", "share_id": share_id}} db.share_update(context.get_admin_context(), share_id, {'status': 'error'}) path = '/v2/fake_project_id/share-transfers' req = fakes.HTTPRequest.blank(path, version=self.microversion) req.environ['manila.context'] = self.ctxt req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.body = jsonutils.dumps(body).encode("utf-8") self.assertRaises(webob.exc.HTTPBadRequest, self.v2_controller.create, req, body) def test_create_transfer_with_project_id_prefix_mount_point_name(self): share_id = self._create_share(project_id='fake', mount_point_name='fake_mp') self.assertRaises(exception.Invalid, self.share_transfer_api.create, context.get_admin_context(), share_id, 'test_missing_share_type') def test_create_transfer_share_with_network_id(self): share_id = self._create_share(share_network_id='fake_id') body = {"transfer": {"name": "transfer1", "share_id": share_id}} path = '/v2/fake_project_id/share-transfers' req = fakes.HTTPRequest.blank(path, version=self.microversion) req.environ['manila.context'] = self.ctxt req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.body = jsonutils.dumps(body).encode("utf-8") self.assertRaises(webob.exc.HTTPBadRequest, self.v2_controller.create, req, body) def test_create_transfer_share_with_invalid_snapshot(self): share_id = self._create_share(share_network_id='fake_id') db_utils.create_snapshot(share_id=share_id) body = {"transfer": {"name": "transfer1", "share_id": share_id}} path = '/v2/fake_project_id/share-transfers' req = fakes.HTTPRequest.blank(path, version=self.microversion) req.environ['manila.context'] = self.ctxt req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.body = jsonutils.dumps(body).encode("utf-8") self.assertRaises(webob.exc.HTTPBadRequest, self.v2_controller.create, req, body) def test_delete_transfer_awaiting_transfer(self): share_id = self._create_share() transfer = self.share_transfer_api.create(context.get_admin_context(), share_id, 'test_transfer') path = '/v2/fake_project_id/share-transfers/%s' % transfer['id'] req = fakes.HTTPRequest.blank(path, version=self.microversion) req.environ['manila.context'] = self.ctxt req.method = 'DELETE' req.headers['Content-Type'] = 'application/json' self.v2_controller.delete(req, transfer['id']) # verify transfer has been deleted req = fakes.HTTPRequest.blank(path, version=self.microversion) req.environ['manila.context'] = self.ctxt req.method = 'GET' req.headers['Content-Type'] = 'application/json' res = req.get_response(fakes.app()) self.assertEqual(http_client.NOT_FOUND, res.status_int) self.assertEqual(db.share_get(context.get_admin_context(), share_id)['status'], 'available') def test_delete_transfer_not_awaiting_transfer(self): share_id = self._create_share() transfer = self.share_transfer_api.create(context.get_admin_context(), share_id, 'test_transfer') db.share_update(context.get_admin_context(), share_id, {'status': 'available'}) path = '/v2/fake_project_id/share-transfers/%s' % transfer['id'] req = fakes.HTTPRequest.blank(path, version=self.microversion) req.environ['manila.context'] = self.ctxt req.method = 'DELETE' req.headers['Content-Type'] = 'application/json' self.assertRaises(exception.InvalidShare, self.v2_controller.delete, req, transfer['id']) def test_transfer_accept_share_id_specified(self): share_id = self._create_share() transfer = self.share_transfer_api.create(context.get_admin_context(), share_id, 'test_transfer') self.mock_object(quota.QUOTAS, 'reserve', mock.Mock()) self.mock_object(quota.QUOTAS, 'commit', mock.Mock()) self.mock_object(share_api.API, 'check_is_share_size_within_per_share_quota_limit', mock.Mock()) self.mock_object(share_rpcapi.ShareAPI, 'transfer_accept', mock.Mock()) fake_share_type = {'id': 'fake_id', 'name': 'fake_name', 'is_public': True} self.mock_object(share_types, 'get_share_type', mock.Mock(return_value=fake_share_type)) self.mock_object(db, 'share_snapshot_get_all_for_share', mock.Mock(return_value={})) body = {"accept": {"auth_key": transfer['auth_key']}} path = '/v2/fake_project_id/share-transfers/%s/accept' % transfer['id'] req = fakes.HTTPRequest.blank(path, version=self.microversion) req.environ['manila.context'] = self.ctxt req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.body = jsonutils.dumps(body).encode("utf-8") self.v2_controller.accept(req, transfer['id'], body) def test_transfer_accept_with_not_public_share_type(self): share_id = self._create_share() transfer = self.share_transfer_api.create(context.get_admin_context(), share_id, 'test_transfer') fake_share_type = {'id': 'fake_id', 'name': 'fake_name', 'is_public': False, 'projects': ['project_id1', 'project_id2']} self.mock_object(share_types, 'get_share_type', mock.Mock(return_value=fake_share_type)) body = {"accept": {"auth_key": transfer['auth_key']}} path = '/v2/fake_project_id/share-transfers/%s/accept' % transfer['id'] req = fakes.HTTPRequest.blank(path, version=self.microversion) req.environ['manila.context'] = self.ctxt req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.body = jsonutils.dumps(body).encode("utf-8") self.assertRaises(webob.exc.HTTPBadRequest, self.v2_controller.accept, req, transfer['id'], body) @ddt.data({}, {"accept": {}}, {"accept": {"auth_key": "fake_auth_key", "clear_access_rules": "invalid_bool"}}) def test_transfer_accept_with_invalid_body(self, body): path = '/v2/fake_project_id/share-transfers/fake_transfer_id/accept' req = fakes.HTTPRequest.blank(path, version=self.microversion) req.environ['manila.context'] = self.ctxt req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.body = jsonutils.dumps(body).encode("utf-8") self.assertRaises(webob.exc.HTTPBadRequest, self.v2_controller.accept, req, 'fake_transfer_id', body) def test_transfer_accept_with_invalid_auth_key(self): share_id = self._create_share(size=5) transfer = self._create_transfer(share_id) body = {"accept": {"auth_key": "invalid_auth_key"}} path = '/v2/fake_project_id/share-transfers/%s/accept' % transfer['id'] req = fakes.HTTPRequest.blank(path, version=self.microversion) req.environ['manila.context'] = self.ctxt req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.body = jsonutils.dumps(body).encode("utf-8") self.assertRaises(webob.exc.HTTPBadRequest, self.v2_controller.accept, req, transfer['id'], body) def test_transfer_accept_with_invalid_share_status(self): share_id = self._create_share(size=5) transfer = self._create_transfer(share_id) db.share_update(context.get_admin_context(), share_id, {'status': 'error'}) body = {"accept": {"auth_key": transfer['auth_key']}} path = '/v2/fake_project_id/share-transfers/%s/accept' % transfer['id'] req = fakes.HTTPRequest.blank(path, version=self.microversion) req.environ['manila.context'] = self.ctxt req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.body = jsonutils.dumps(body).encode("utf-8") self.assertRaises(webob.exc.HTTPBadRequest, self.v2_controller.accept, req, transfer['id'], body) @ddt.data({'overs': {'gigabytes': 'fake'}}, {'overs': {'shares': 'fake'}}, {'overs': {'snapshot_gigabytes': 'fake'}}, {'overs': {'snapshots': 'fake'}}) @ddt.unpack def test_accept_share_over_quota(self, overs): share_id = self._create_share() db_utils.create_snapshot(share_id=share_id, status='available') transfer = self.share_transfer_api.create(context.get_admin_context(), share_id, 'test_transfer') usages = {'gigabytes': {'reserved': 5, 'in_use': 5}, 'shares': {'reserved': 10, 'in_use': 10}, 'snapshot_gigabytes': {'reserved': 5, 'in_use': 5}, 'snapshots': {'reserved': 10, 'in_use': 10}} quotas = {'gigabytes': 5, 'shares': 10, 'snapshot_gigabytes': 5, 'snapshots': 10} exc = exception.OverQuota(overs=overs, usages=usages, quotas=quotas) self.mock_object(quota.QUOTAS, 'reserve', mock.Mock(side_effect=exc)) self.mock_object(quota.QUOTAS, 'commit', mock.Mock()) self.mock_object(share_api.API, 'check_is_share_size_within_per_share_quota_limit', mock.Mock()) self.mock_object(share_rpcapi.ShareAPI, 'transfer_accept', mock.Mock()) fake_share_type = {'id': 'fake_id', 'name': 'fake_name', 'is_public': True} self.mock_object(share_types, 'get_share_type', mock.Mock(return_value=fake_share_type)) body = {"accept": {"auth_key": transfer['auth_key']}} path = '/v2/fake_project_id/share-transfers/%s/accept' % transfer['id'] req = fakes.HTTPRequest.blank(path, version=self.microversion) req.environ['manila.context'] = self.ctxt req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.body = jsonutils.dumps(body).encode("utf-8") self.assertRaises(webob.exc.HTTPRequestEntityTooLarge, self.v2_controller.accept, req, transfer['id'], body) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/api/v2/test_share_types.py0000664000175000017500000012423200000000000022524 0ustar00zuulzuul00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import random from unittest import mock import ddt from oslo_config import cfg from oslo_utils import timeutils import webob from manila.api.v2 import share_types as types from manila.api.views import types as views_types from manila.common import constants from manila import context from manila import db from manila import exception from manila import policy from manila.share import share_types from manila import test from manila.tests.api import fakes from manila.tests import fake_notifier CONF = cfg.CONF def stub_share_type(id): specs = { "key1": "value1", "key2": "value2", "key3": "value3", "key4": "value4", "key5": "value5", constants.ExtraSpecs.DRIVER_HANDLES_SHARE_SERVERS: "true", } if id == 4: name = 'update_share_type_%s' % str(id) description = 'update_description_%s' % str(id) is_public = False else: name = 'share_type_%s' % str(id) description = 'description_%s' % str(id) is_public = True share_type = { 'id': str(id), 'name': name, 'description': description, 'is_public': is_public, 'extra_specs': specs, 'required_extra_specs': { constants.ExtraSpecs.DRIVER_HANDLES_SHARE_SERVERS: "true", } } return share_type def return_share_types_get_all_types(context, search_opts=None): return dict( share_type_1=stub_share_type(1), share_type_2=stub_share_type(2), share_type_3=stub_share_type(3) ) def stub_default_name(): return 'default_share_type' def stub_default_share_type(id): return dict( id=id, name=stub_default_name(), description='description_%s' % str(id), required_extra_specs={ constants.ExtraSpecs.DRIVER_HANDLES_SHARE_SERVERS: "true", } ) def return_all_share_types(context, search_opts=None): mock_value = dict( share_type_1=stub_share_type(1), share_type_2=stub_share_type(2), share_type_3=stub_default_share_type(3) ) return mock_value def return_default_share_type(context, search_opts=None): return stub_default_share_type(3) def return_empty_share_types_get_all_types(context, search_opts=None): return {} def return_share_types_get_share_type(context, id=1): if id == "777": raise exception.ShareTypeNotFound(share_type_id=id) return stub_share_type(int(id)) def return_share_type_update(context, id=4, name=None, description=None, is_public=None): if id == 888: raise exception.ShareTypeUpdateFailed(id=id) if id == 999: raise exception.ShareTypeNotFound(share_type_id=id) pre_share_type = stub_share_type(int(id)) new_name = name new_description = description return pre_share_type.update({"name": new_name, "description": new_description, "is_public": is_public}) def return_share_types_get_by_name(context, name): if name == "777": raise exception.ShareTypeNotFoundByName(share_type_name=name) return stub_share_type(int(name.split("_")[2])) def return_share_types_destroy(context, name): if name == "777": raise exception.ShareTypeNotFoundByName(share_type_name=name) pass def return_share_types_with_volumes_destroy(context, id): if id == "1": raise exception.ShareTypeInUse(share_type_id=id) pass def return_share_types_create(context, name, specs, is_public, description): pass def make_create_body(name="test_share_1", extra_specs=None, spec_driver_handles_share_servers=True, description=None): if not extra_specs: extra_specs = {} if spec_driver_handles_share_servers is not None: extra_specs[constants.ExtraSpecs.DRIVER_HANDLES_SHARE_SERVERS] = ( spec_driver_handles_share_servers) body = { "share_type": { "name": name, "extra_specs": extra_specs, } } if description: body["share_type"].update({"description": description}) return body def generate_long_description(des_length=256): random_str = '' base_str = 'ABCDEFGHIGKLMNOPQRSTUVWXYZabcdefghigklmnopqrstuvwxyz' length = len(base_str) - 1 for i in range(des_length): random_str += base_str[random.randint(0, length)] return random_str def make_update_body(name=None, description=None, is_public=None): body = {"share_type": {}} if name: body["share_type"].update({"name": name}) if description: body["share_type"].update({"description": description}) if is_public is not None: body["share_type"].update( {"share_type_access:is_public": is_public}) return body @ddt.ddt class ShareTypesAPITest(test.TestCase): def setUp(self): super(ShareTypesAPITest, self).setUp() self.flags(host='fake') self.controller = types.ShareTypesController() self.resource_name = self.controller.resource_name self.mock_object(policy, 'check_policy', mock.Mock(return_value=True)) fake_notifier.reset() self.addCleanup(fake_notifier.reset) self.mock_object( share_types, 'create', mock.Mock(side_effect=return_share_types_create)) self.mock_object( share_types, 'get_share_type_by_name', mock.Mock(side_effect=return_share_types_get_by_name)) self.mock_object( share_types, 'get_share_type', mock.Mock(side_effect=return_share_types_get_share_type)) self.mock_object( share_types, 'update', mock.Mock(side_effect=return_share_type_update)) self.mock_object( share_types, 'destroy', mock.Mock(side_effect=return_share_types_destroy)) @ddt.data(True, False) def test_share_types_index(self, admin): self.mock_object(share_types, 'get_all_types', return_share_types_get_all_types) req = fakes.HTTPRequest.blank('/v2/fake/types', use_admin_context=admin) res_dict = self.controller.index(req) self.assertEqual(3, len(res_dict['share_types'])) expected_names = ['share_type_1', 'share_type_2', 'share_type_3'] actual_names = map(lambda e: e['name'], res_dict['share_types']) self.assertEqual(set(expected_names), set(actual_names)) for entry in res_dict['share_types']: if admin: self.assertEqual('value1', entry['extra_specs'].get('key1')) else: self.assertIsNone(entry['extra_specs'].get('key1')) self.assertIn('required_extra_specs', entry) required_extra_spec = entry['required_extra_specs'].get( constants.ExtraSpecs.DRIVER_HANDLES_SHARE_SERVERS, '') self.assertEqual('true', required_extra_spec) policy.check_policy.assert_called_once_with( req.environ['manila.context'], self.resource_name, 'index') def test_share_types_index_no_data(self): self.mock_object(share_types, 'get_all_types', return_empty_share_types_get_all_types) req = fakes.HTTPRequest.blank('/v2/fake/types') res_dict = self.controller.index(req) self.assertEqual(0, len(res_dict['share_types'])) policy.check_policy.assert_called_once_with( req.environ['manila.context'], self.resource_name, 'index') def test_share_types_show(self): self.mock_object(share_types, 'get_share_type', return_share_types_get_share_type) req = fakes.HTTPRequest.blank('/v2/fake/types/1') res_dict = self.controller.show(req, 1) self.assertEqual(2, len(res_dict)) self.assertEqual('1', res_dict['share_type']['id']) self.assertEqual('share_type_1', res_dict['share_type']['name']) expect = {constants.ExtraSpecs.DRIVER_HANDLES_SHARE_SERVERS: "true"} self.assertEqual(expect, res_dict['share_type']['required_extra_specs']) policy.check_policy.assert_called_once_with( req.environ['manila.context'], self.resource_name, 'show') def test_share_types_show_not_found(self): self.mock_object(share_types, 'get_share_type', return_share_types_get_share_type) req = fakes.HTTPRequest.blank('/v2/fake/types/777') self.assertRaises(webob.exc.HTTPNotFound, self.controller.show, req, '777') policy.check_policy.assert_called_once_with( req.environ['manila.context'], self.resource_name, 'show') def test_share_types_default(self): self.mock_object(share_types, 'get_default_share_type', return_share_types_get_share_type) req = fakes.HTTPRequest.blank('/v2/fake/types/default') res_dict = self.controller.default(req) self.assertEqual(2, len(res_dict)) self.assertEqual('1', res_dict['share_type']['id']) self.assertEqual('share_type_1', res_dict['share_type']['name']) policy.check_policy.assert_called_once_with( req.environ['manila.context'], self.resource_name, 'default') def test_share_types_default_not_found(self): self.mock_object(share_types, 'get_default_share_type', mock.Mock(side_effect=exception.ShareTypeNotFound( share_type_id="fake"))) req = fakes.HTTPRequest.blank('/v2/fake/types/default') self.assertRaises(webob.exc.HTTPNotFound, self.controller.default, req) policy.check_policy.assert_called_once_with( req.environ['manila.context'], self.resource_name, 'default') @ddt.data( ('1.0', 'os-share-type-access', True), ('1.0', 'os-share-type-access', False), ('2.0', 'os-share-type-access', True), ('2.0', 'os-share-type-access', False), ('2.6', 'os-share-type-access', True), ('2.6', 'os-share-type-access', False), ('2.7', 'share_type_access', True), ('2.7', 'share_type_access', False), ('2.23', 'share_type_access', True), ('2.23', 'share_type_access', False), ('2.24', 'share_type_access', True), ('2.24', 'share_type_access', False), ('2.27', 'share_type_access', True), ('2.27', 'share_type_access', False), ('2.41', 'share_type_access', True), ('2.41', 'share_type_access', False), ) @ddt.unpack def test_view_builder_show(self, version, prefix, admin): view_builder = views_types.ViewBuilder() now = timeutils.utcnow().isoformat() raw_share_type = dict( name='new_type', description='description_test', deleted=False, created_at=now, updated_at=now, extra_specs={}, deleted_at=None, required_extra_specs={}, id=42, ) request = fakes.HTTPRequest.blank("/v%s" % version[0], version=version, use_admin_context=admin) request.headers['X-Openstack-Manila-Api-Version'] = version output = view_builder.show(request, raw_share_type) self.assertIn('share_type', output) expected_share_type = { 'name': 'new_type', 'extra_specs': {}, '%s:is_public' % prefix: True, 'required_extra_specs': {}, 'id': 42, } if self.is_microversion_ge(version, '2.24') and not admin: for extra_spec in constants.ExtraSpecs.INFERRED_OPTIONAL_MAP: expected_share_type['extra_specs'][extra_spec] = ( constants.ExtraSpecs.INFERRED_OPTIONAL_MAP[extra_spec]) if self.is_microversion_ge(version, '2.41'): expected_share_type['description'] = 'description_test' self.assertDictEqual(expected_share_type, output['share_type']) @ddt.data( ('1.0', 'os-share-type-access', True), ('1.0', 'os-share-type-access', False), ('2.0', 'os-share-type-access', True), ('2.0', 'os-share-type-access', False), ('2.6', 'os-share-type-access', True), ('2.6', 'os-share-type-access', False), ('2.7', 'share_type_access', True), ('2.7', 'share_type_access', False), ('2.23', 'share_type_access', True), ('2.23', 'share_type_access', False), ('2.24', 'share_type_access', True), ('2.24', 'share_type_access', False), ('2.27', 'share_type_access', True), ('2.27', 'share_type_access', False), ('2.41', 'share_type_access', True), ('2.41', 'share_type_access', False), ) @ddt.unpack def test_view_builder_list(self, version, prefix, admin): view_builder = views_types.ViewBuilder() extra_specs = { constants.ExtraSpecs.SNAPSHOT_SUPPORT: True, constants.ExtraSpecs.CREATE_SHARE_FROM_SNAPSHOT_SUPPORT: False, constants.ExtraSpecs.REVERT_TO_SNAPSHOT_SUPPORT: True, constants.ExtraSpecs.MOUNT_SNAPSHOT_SUPPORT: True, constants.ExtraSpecs.MOUNT_POINT_NAME_SUPPORT: True, } now = timeutils.utcnow().isoformat() raw_share_types = [] for i in range(0, 10): raw_share_types.append( dict( name='new_type', description='description_test', deleted=False, created_at=now, updated_at=now, extra_specs=extra_specs, required_extra_specs={}, deleted_at=None, id=42 + i ) ) request = fakes.HTTPRequest.blank("/v%s" % version[0], version=version, use_admin_context=admin) output = view_builder.index(request, raw_share_types) self.assertIn('share_types', output) expected_share_type = { 'name': 'new_type', 'extra_specs': extra_specs, '%s:is_public' % prefix: True, 'required_extra_specs': {}, } if self.is_microversion_ge(version, '2.41'): expected_share_type['description'] = 'description_test' for i in range(0, 10): expected_share_type['id'] = 42 + i self.assertDictEqual(expected_share_type, output['share_types'][i]) @ddt.data( ("new_name", "new_description", "wrong_bool"), (" ", "new_description", "true"), (" ", generate_long_description(256), "true"), (None, None, None), ) @ddt.unpack def test_share_types_update_with_invalid_parameter( self, name, description, is_public): req = fakes.HTTPRequest.blank('/v2/fake/types/4', version='2.50') body = make_update_body(name, description, is_public) self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, req, 4, body) self.assertEqual(0, len(fake_notifier.NOTIFICATIONS)) def test_share_types_update_with_invalid_body(self): req = fakes.HTTPRequest.blank('/v2/fake/types/4', version='2.50') body = {'share_type': 'i_am_invalid_body'} self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, req, 4, body) self.assertEqual(0, len(fake_notifier.NOTIFICATIONS)) def test_share_types_update(self): req = fakes.HTTPRequest.blank('/v2/fake/types/4', version='2.50') self.assertEqual(0, len(fake_notifier.NOTIFICATIONS)) body = make_update_body("update_share_type_4", "update_description_4", is_public=False) res_dict = self.controller.update(req, 4, body) self.assertEqual(1, len(fake_notifier.NOTIFICATIONS)) self.assertEqual(2, len(res_dict)) self.assertEqual('update_share_type_4', res_dict['share_type']['name']) self.assertEqual('update_share_type_4', res_dict['volume_type']['name']) self.assertIs(False, res_dict['share_type']['share_type_access:is_public']) self.assertEqual('update_description_4', res_dict['share_type']['description']) self.assertEqual('update_description_4', res_dict['volume_type']['description']) def test_share_types_update_pre_v250(self): req = fakes.HTTPRequest.blank('/v2/fake/types/4', version='2.49') self.assertEqual(0, len(fake_notifier.NOTIFICATIONS)) body = make_update_body("update_share_type_4", "update_description_4", is_public=False) self.assertRaises(exception.VersionNotFoundForAPIMethod, self.controller.update, req, 4, body) self.assertEqual(0, len(fake_notifier.NOTIFICATIONS)) def test_share_types_update_failed(self): self.assertEqual(0, len(fake_notifier.NOTIFICATIONS)) req = fakes.HTTPRequest.blank('/v2/fake/types/888', version='2.50') body = make_update_body("update_share_type_888", "update_description_888", is_public=False) self.assertRaises(webob.exc.HTTPInternalServerError, self.controller.update, req, 888, body) self.assertEqual(1, len(fake_notifier.NOTIFICATIONS)) def test_share_types_update_not_found(self): self.assertEqual(0, len(fake_notifier.NOTIFICATIONS)) req = fakes.HTTPRequest.blank('/v2/fake/types/999', version='2.50') body = make_update_body("update_share_type_999", "update_description_999", is_public=False) self.assertRaises(exception.ShareTypeNotFound, self.controller.update, req, 999, body) self.assertEqual(1, len(fake_notifier.NOTIFICATIONS)) def test_share_types_delete(self): req = fakes.HTTPRequest.blank('/v2/fake/types/1') self.assertEqual(0, len(fake_notifier.NOTIFICATIONS)) self.controller._delete(req, 1) self.assertEqual(1, len(fake_notifier.NOTIFICATIONS)) def test_share_types_delete_not_found(self): self.assertEqual(0, len(fake_notifier.NOTIFICATIONS)) req = fakes.HTTPRequest.blank('/v2/fake/types/777') self.assertRaises(webob.exc.HTTPNotFound, self.controller._delete, req, '777') self.assertEqual(1, len(fake_notifier.NOTIFICATIONS)) def test_share_types_delete_in_use(self): req = fakes.HTTPRequest.blank('/v2/fake/types/1') self.assertEqual(0, len(fake_notifier.NOTIFICATIONS)) side_effect = exception.ShareTypeInUse(share_type_id='fake_id') self.mock_object(share_types, 'destroy', mock.Mock(side_effect=side_effect)) self.assertRaises(webob.exc.HTTPBadRequest, self.controller._delete, req, 1) def test_share_types_with_volumes_destroy(self): req = fakes.HTTPRequest.blank('/v2/fake/types/1') self.assertEqual(0, len(fake_notifier.NOTIFICATIONS)) self.controller._delete(req, 1) self.assertEqual(1, len(fake_notifier.NOTIFICATIONS)) @ddt.data( (make_create_body("share_type_1"), "2.24"), (make_create_body(spec_driver_handles_share_servers=True), "2.24"), (make_create_body(spec_driver_handles_share_servers=False), "2.24"), (make_create_body("share_type_1"), "2.23"), (make_create_body(spec_driver_handles_share_servers=True), "2.23"), (make_create_body(spec_driver_handles_share_servers=False), "2.23"), (make_create_body(description="description_1"), "2.41")) @ddt.unpack def test_create(self, body, version): req = fakes.HTTPRequest.blank('/v2/fake/types', version=version) self.assertEqual(0, len(fake_notifier.NOTIFICATIONS)) res_dict = self.controller.create(req, body) self.assertEqual(1, len(fake_notifier.NOTIFICATIONS)) self.assertEqual(2, len(res_dict)) self.assertEqual('share_type_1', res_dict['share_type']['name']) self.assertEqual('share_type_1', res_dict['volume_type']['name']) if self.is_microversion_ge(version, '2.41'): self.assertEqual(body['share_type']['description'], res_dict['share_type']['description']) self.assertEqual(body['share_type']['description'], res_dict['volume_type']['description']) for extra_spec in constants.ExtraSpecs.REQUIRED: self.assertIn(extra_spec, res_dict['share_type']['required_extra_specs']) expected_extra_specs = { constants.ExtraSpecs.DRIVER_HANDLES_SHARE_SERVERS: True, } if self.is_microversion_lt(version, '2.24'): expected_extra_specs[constants.ExtraSpecs.SNAPSHOT_SUPPORT] = True expected_extra_specs.update(body['share_type']['extra_specs']) share_types.create.assert_called_once_with( mock.ANY, body['share_type']['name'], expected_extra_specs, True, description=body['share_type'].get('description')) @ddt.data(None, make_create_body(""), make_create_body("n" * 256), {'foo': {'a': 'b'}}, {'share_type': 'string'}, make_create_body(spec_driver_handles_share_servers=None), make_create_body(spec_driver_handles_share_servers=""), make_create_body(spec_driver_handles_share_servers=[]), ) def test_create_invalid_request_1_0(self, body): req = fakes.HTTPRequest.blank('/v2/fake/types', version="1.0") self.assertEqual(0, len(fake_notifier.NOTIFICATIONS)) self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, req, body) self.assertEqual(0, len(fake_notifier.NOTIFICATIONS)) @ddt.data(*constants.ExtraSpecs.REQUIRED) def test_create_invalid_request_2_23(self, required_extra_spec): req = fakes.HTTPRequest.blank('/v2/fake/types', version="2.24") self.assertEqual(0, len(fake_notifier.NOTIFICATIONS)) body = make_create_body("share_type_1") del body['share_type']['extra_specs'][required_extra_spec] self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, req, body) self.assertEqual(0, len(fake_notifier.NOTIFICATIONS)) def test_create_already_exists(self): side_effect = exception.ShareTypeExists(id='fake_id') self.mock_object(share_types, 'create', mock.Mock(side_effect=side_effect)) req = fakes.HTTPRequest.blank('/v2/fake/types', version="2.24") self.assertEqual(0, len(fake_notifier.NOTIFICATIONS)) body = make_create_body('share_type_1') self.assertRaises(webob.exc.HTTPConflict, self.controller.create, req, body) self.assertEqual(1, len(fake_notifier.NOTIFICATIONS)) def test_create_not_found(self): self.mock_object(share_types, 'create', mock.Mock(side_effect=exception.NotFound)) req = fakes.HTTPRequest.blank('/v2/fake/types', version="2.24") self.assertEqual(0, len(fake_notifier.NOTIFICATIONS)) body = make_create_body('share_type_1') self.assertRaises(webob.exc.HTTPNotFound, self.controller.create, req, body) self.assertEqual(1, len(fake_notifier.NOTIFICATIONS)) def assert_share_type_list_equal(self, expected, observed): self.assertEqual(len(expected), len(observed)) expected = sorted(expected, key=lambda item: item['id']) observed = sorted(observed, key=lambda item: item['id']) for d1, d2 in zip(expected, observed): self.assertEqual(d1['id'], d2['id']) @ddt.data(('2.45', True), ('2.45', False), ('2.46', True), ('2.46', False)) @ddt.unpack def test_share_types_create_with_is_default_key(self, version, admin): req = fakes.HTTPRequest.blank('/v2/fake/types', version=version, use_admin_context=admin) body = make_create_body() res_dict = self.controller.create(req, body) if self.is_microversion_ge(version, '2.46'): self.assertIn('is_default', res_dict['share_type']) self.assertIs(False, res_dict['share_type']['is_default']) else: self.assertNotIn('is_default', res_dict['share_type']) @ddt.data(('2.45', True), ('2.45', False), ('2.46', True), ('2.46', False)) @ddt.unpack def test_share_types_index_with_is_default_key(self, version, admin): default_type_name = stub_default_name() CONF.set_default("default_share_type", default_type_name) self.mock_object(share_types, 'get_all_types', return_all_share_types) req = fakes.HTTPRequest.blank('/v2/fake/types', version=version, use_admin_context=admin) res_dict = self.controller.index(req) self.assertEqual(3, len(res_dict['share_types'])) for res in res_dict['share_types']: if self.is_microversion_ge(version, '2.46'): self.assertIn('is_default', res) expected = res['name'] == default_type_name self.assertIs(res['is_default'], expected) else: self.assertNotIn('is_default', res) @ddt.data(('2.45', True), ('2.45', False), ('2.46', True), ('2.46', False)) @ddt.unpack def test_share_types_default_with_is_default_key(self, version, admin): default_type_name = stub_default_name() CONF.set_default("default_share_type", default_type_name) self.mock_object(share_types, 'get_default_share_type', return_default_share_type) req = fakes.HTTPRequest.blank('/v2/fake/types/default_share_type', version=version, use_admin_context=admin) res_dict = self.controller.default(req) if self.is_microversion_ge(version, '2.46'): self.assertIn('is_default', res_dict['share_type']) self.assertIs(True, res_dict['share_type']['is_default']) else: self.assertNotIn('is_default', res_dict['share_type']) def generate_type(type_id, is_public): return { 'id': type_id, 'name': u'test', 'description': u'ds_test', 'deleted': False, 'created_at': datetime.datetime(2012, 1, 1, 1, 1, 1, 1), 'updated_at': None, 'deleted_at': None, 'is_public': bool(is_public), 'extra_specs': {} } SHARE_TYPES = { '0': generate_type('0', True), '1': generate_type('1', True), '2': generate_type('2', False), '3': generate_type('3', False)} PROJ1_UUID = '11111111-1111-1111-1111-111111111111' PROJ2_UUID = '22222222-2222-2222-2222-222222222222' PROJ3_UUID = '33333333-3333-3333-3333-333333333333' ACCESS_LIST = [{'share_type_id': '2', 'project_id': PROJ2_UUID}, {'share_type_id': '2', 'project_id': PROJ3_UUID}, {'share_type_id': '3', 'project_id': PROJ3_UUID}] def fake_share_type_get(context, id, inactive=False, expected_fields=None): vol = SHARE_TYPES[id] if expected_fields and 'projects' in expected_fields: vol['projects'] = [a['project_id'] for a in ACCESS_LIST if a['share_type_id'] == id] return vol def _has_type_access(type_id, project_id): for access in ACCESS_LIST: if (access['share_type_id'] == type_id and access['project_id'] == project_id): return True return False def fake_share_type_get_all(context, inactive=False, filters=None): if filters is None or filters.get('is_public', None) is None: return SHARE_TYPES res = {} for k, v in SHARE_TYPES.items(): if filters['is_public'] and _has_type_access(k, context.project_id): res.update({k: v}) continue if v['is_public'] == filters['is_public']: res.update({k: v}) return res class FakeResponse(object): obj = {'share_type': {'id': '0'}, 'share_types': [{'id': '0'}, {'id': '2'}]} def attach(self, **kwargs): pass class FakeRequest(object): environ = {"manila.context": context.get_admin_context()} def get_db_share_type(self, resource_id): return SHARE_TYPES[resource_id] @ddt.ddt class ShareTypeAccessTest(test.TestCase): def setUp(self): super(ShareTypeAccessTest, self).setUp() self.controller = types.ShareTypesController() self.req = FakeRequest() self.mock_object(db, 'share_type_get', fake_share_type_get) self.mock_object(db, 'share_type_get_all', fake_share_type_get_all) def assertShareTypeListEqual(self, expected, observed): self.assertEqual(len(expected), len(observed)) expected = sorted(expected, key=lambda item: item['id']) observed = sorted(observed, key=lambda item: item['id']) for d1, d2 in zip(expected, observed): self.assertEqual(d1['id'], d2['id']) def test_list_type_access_public(self): """Querying os-share-type-access on public type should return 404.""" req = fakes.HTTPRequest.blank('/v1/fake/types/os-share-type-access', use_admin_context=True) self.assertRaises(webob.exc.HTTPNotFound, self.controller.share_type_access, req, '1') def test_list_type_access_private(self): expected = {'share_type_access': [ {'share_type_id': '2', 'project_id': PROJ2_UUID}, {'share_type_id': '2', 'project_id': PROJ3_UUID}, ]} result = self.controller.share_type_access(self.req, '2') self.assertEqual(expected, result) def test_list_with_no_context(self): req = fakes.HTTPRequest.blank('/v1/types/fake/types') self.assertRaises(webob.exc.HTTPForbidden, self.controller.share_type_access, req, 'fake') def test_list_not_found(self): side_effect = exception.ShareTypeNotFound(share_type_id='fake_id') self.mock_object(share_types, 'get_share_type', mock.Mock(side_effect=side_effect)) self.assertRaises(webob.exc.HTTPNotFound, self.controller.share_type_access, self.req, 'fake') def test_list_type_with_admin_default_proj1(self): expected = {'share_types': [{'id': '0'}, {'id': '1'}]} req = fakes.HTTPRequest.blank('/v1/fake/types', use_admin_context=True) req.environ['manila.context'].project_id = PROJ1_UUID result = self.controller.index(req) self.assertShareTypeListEqual(expected['share_types'], result['share_types']) def test_list_type_with_admin_default_proj2(self): expected = {'share_types': [{'id': '0'}, {'id': '1'}, {'id': '2'}]} req = fakes.HTTPRequest.blank('/v2/fake/types', use_admin_context=True) req.environ['manila.context'].project_id = PROJ2_UUID result = self.controller.index(req) self.assertShareTypeListEqual(expected['share_types'], result['share_types']) def test_list_type_with_admin_ispublic_true(self): expected = {'share_types': [{'id': '0'}, {'id': '1'}]} req = fakes.HTTPRequest.blank('/v2/fake/types?is_public=true', use_admin_context=True) result = self.controller.index(req) self.assertShareTypeListEqual(expected['share_types'], result['share_types']) def test_list_type_with_admin_ispublic_false(self): expected = {'share_types': [{'id': '2'}, {'id': '3'}]} req = fakes.HTTPRequest.blank('/v2/fake/types?is_public=false', use_admin_context=True) result = self.controller.index(req) self.assertShareTypeListEqual(expected['share_types'], result['share_types']) def test_list_type_with_admin_ispublic_false_proj2(self): expected = {'share_types': [{'id': '2'}, {'id': '3'}]} req = fakes.HTTPRequest.blank('/v2/fake/types?is_public=false', use_admin_context=True) req.environ['manila.context'].project_id = PROJ2_UUID result = self.controller.index(req) self.assertShareTypeListEqual(expected['share_types'], result['share_types']) def test_list_type_with_admin_ispublic_none(self): expected = {'share_types': [ {'id': '0'}, {'id': '1'}, {'id': '2'}, {'id': '3'}, ]} req = fakes.HTTPRequest.blank('/v2/fake/types?is_public=all', use_admin_context=True) result = self.controller.index(req) self.assertShareTypeListEqual(expected['share_types'], result['share_types']) def test_list_type_with_no_admin_default(self): expected = {'share_types': [{'id': '0'}, {'id': '1'}]} req = fakes.HTTPRequest.blank('/v2/fake/types', use_admin_context=False) result = self.controller.index(req) self.assertShareTypeListEqual(expected['share_types'], result['share_types']) def test_list_type_with_no_admin_ispublic_true(self): expected = {'share_types': [{'id': '0'}, {'id': '1'}]} req = fakes.HTTPRequest.blank('/v2/fake/types?is_public=true', use_admin_context=False) result = self.controller.index(req) self.assertShareTypeListEqual(expected['share_types'], result['share_types']) def test_list_type_with_no_admin_ispublic_false(self): expected = {'share_types': [{'id': '0'}, {'id': '1'}]} req = fakes.HTTPRequest.blank('/v2/fake/types?is_public=false', use_admin_context=False) result = self.controller.index(req) self.assertShareTypeListEqual(expected['share_types'], result['share_types']) def test_list_type_with_no_admin_ispublic_none(self): expected = {'share_types': [{'id': '0'}, {'id': '1'}]} req = fakes.HTTPRequest.blank('/v2/fake/types?is_public=all', use_admin_context=False) result = self.controller.index(req) self.assertShareTypeListEqual(expected['share_types'], result['share_types']) def test_add_project_access(self): def stub_add_share_type_access(context, type_id, project_id): self.assertEqual('3', type_id, "type_id") self.assertEqual(PROJ2_UUID, project_id, "project_id") self.mock_object(db, 'share_type_access_add', stub_add_share_type_access) body = {'addProjectAccess': {'project': PROJ2_UUID}} req = fakes.HTTPRequest.blank('/v2/fake/types/2/action', use_admin_context=True) result = self.controller._add_project_access(req, '3', body) self.assertEqual(202, result.status_code) @ddt.data({'addProjectAccess': {'project': 'fake_project'}}, {'invalid': {'project': PROJ2_UUID}}) def test_add_project_access_bad_request(self, body): req = fakes.HTTPRequest.blank('/v2/fake/types/2/action', use_admin_context=True) self.assertRaises(webob.exc.HTTPBadRequest, self.controller._add_project_access, req, '2', body) def test_add_project_access_with_no_admin_user(self): req = fakes.HTTPRequest.blank('/v2/fake/types/2/action', use_admin_context=False) body = {'addProjectAccess': {'project': PROJ2_UUID}} self.assertRaises(webob.exc.HTTPForbidden, self.controller._add_project_access, req, '2', body) def test_add_project_access_with_already_added_access(self): def stub_add_share_type_access(context, type_id, project_id): raise exception.ShareTypeAccessExists(share_type_id=type_id, project_id=project_id) self.mock_object(db, 'share_type_access_add', stub_add_share_type_access) body = {'addProjectAccess': {'project': PROJ2_UUID}} req = fakes.HTTPRequest.blank('/v2/fake/types/2/action', use_admin_context=True) self.assertRaises(webob.exc.HTTPConflict, self.controller._add_project_access, req, '3', body) def test_add_project_access_to_public_share_type(self): share_type_id = '3' body = {'addProjectAccess': {'project': PROJ2_UUID}} self.mock_object(share_types, 'get_share_type', mock.Mock(return_value={"is_public": True})) req = fakes.HTTPRequest.blank('/v2/fake/types/2/action', use_admin_context=True) self.assertRaises(webob.exc.HTTPConflict, self.controller._add_project_access, req, share_type_id, body) share_types.get_share_type.assert_called_once_with( mock.ANY, share_type_id) def test_remove_project_access(self): share_type = stub_share_type(2) share_type['is_public'] = False self.mock_object(share_types, 'get_share_type', mock.Mock(return_value=share_type)) self.mock_object(share_types, 'remove_share_type_access') body = {'removeProjectAccess': {'project': PROJ2_UUID}} req = fakes.HTTPRequest.blank('/v2/fake/types/2/action', use_admin_context=True) result = self.controller._remove_project_access(req, '2', body) self.assertEqual(202, result.status_code) @ddt.data({'removeProjectAccess': {'project': 'fake_project'}}, {'invalid': {'project': PROJ2_UUID}}) def test_remove_project_access_bad_request(self, body): req = fakes.HTTPRequest.blank('/v2/fake/types/2/action', use_admin_context=True) self.assertRaises(webob.exc.HTTPBadRequest, self.controller._remove_project_access, req, '2', body) def test_remove_project_access_with_bad_access(self): def stub_remove_share_type_access(context, type_id, project_id): raise exception.ShareTypeAccessNotFound(share_type_id=type_id, project_id=project_id) self.mock_object(db, 'share_type_access_remove', stub_remove_share_type_access) body = {'removeProjectAccess': {'project': PROJ2_UUID}} req = fakes.HTTPRequest.blank('/v2/fake/types/2/action', use_admin_context=True) self.assertRaises(webob.exc.HTTPNotFound, self.controller._remove_project_access, req, '3', body) def test_remove_project_access_with_no_admin_user(self): req = fakes.HTTPRequest.blank('/v2/fake/types/2/action', use_admin_context=False) body = {'removeProjectAccess': {'project': PROJ2_UUID}} self.assertRaises(webob.exc.HTTPForbidden, self.controller._remove_project_access, req, '2', body) def test_remove_project_access_from_public_share_type(self): share_type_id = '3' body = {'removeProjectAccess': {'project': PROJ2_UUID}} self.mock_object(share_types, 'get_share_type', mock.Mock(return_value={"is_public": True})) req = fakes.HTTPRequest.blank('/v2/fake/types/2/action', use_admin_context=True) self.assertRaises(webob.exc.HTTPConflict, self.controller._remove_project_access, req, share_type_id, body) share_types.get_share_type.assert_called_once_with( mock.ANY, share_type_id) def test_remove_project_access_by_nonexistent_share_type(self): self.mock_object(share_types, 'get_share_type', return_share_types_get_share_type) body = {'removeProjectAccess': {'project': PROJ2_UUID}} req = fakes.HTTPRequest.blank('/v2/fake/types/777/action', use_admin_context=True) self.assertRaises(webob.exc.HTTPNotFound, self.controller._remove_project_access, req, '777', body) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/api/v2/test_shares.py0000664000175000017500000042472700000000000021477 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Mirantis inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import datetime import itertools from unittest import mock import ddt from oslo_config import cfg from oslo_serialization import jsonutils from oslo_utils import uuidutils import webob import webob.exc from manila.api import common from manila.api.openstack import api_version_request as api_version from manila.api.v2 import share_replicas from manila.api.v2 import shares from manila.common import constants from manila import context from manila import db from manila import exception from manila import policy from manila.share import api as share_api from manila.share import share_types from manila import test from manila.tests.api.contrib import stubs from manila.tests.api import fakes from manila.tests import db_utils from manila.tests import fake_share from manila.tests import utils as test_utils from manila import utils CONF = cfg.CONF LATEST_MICROVERSION = api_version._MAX_API_VERSION @ddt.ddt class ShareAPITest(test.TestCase): """Share API Test.""" def setUp(self): super(ShareAPITest, self).setUp() self.controller = shares.ShareController() self.mock_object(db, 'availability_zone_get') self.mock_object(share_api.API, 'get_all', stubs.stub_get_all_shares) self.mock_object(share_api.API, 'get', stubs.stub_share_get) self.mock_object(share_api.API, 'update', stubs.stub_share_update) self.mock_object(share_api.API, 'delete', stubs.stub_share_delete) self.mock_object(share_api.API, 'soft_delete', stubs.stub_share_soft_delete) self.mock_object(share_api.API, 'restore', stubs.stub_share_restore) self.mock_object(share_api.API, 'get_snapshot', stubs.stub_snapshot_get) self.mock_object(share_types, 'get_share_type', stubs.stub_share_type_get) self.maxDiff = None self.share = { "id": "1", "size": 100, "display_name": "Share Test Name", "display_description": "Share Test Desc", "share_proto": "fakeproto", "availability_zone": "zone1:host1", "is_public": False, "task_state": None } self.share_in_recycle_bin = { "id": "1", "size": 100, "display_name": "Share Test Name", "display_description": "Share Test Desc", "share_proto": "fakeproto", "availability_zone": "zone1:host1", "is_public": False, "task_state": None, "is_soft_deleted": True, "status": "available" } self.share_in_recycle_bin_is_deleting = { "id": "1", "size": 100, "display_name": "Share Test Name", "display_description": "Share Test Desc", "share_proto": "fakeproto", "availability_zone": "zone1:host1", "is_public": False, "task_state": None, "is_soft_deleted": True, "status": "deleting" } self.create_mock = mock.Mock( return_value=stubs.stub_share( '1', display_name=self.share['display_name'], display_description=self.share['display_description'], size=100, share_proto=self.share['share_proto'].upper(), instance={ 'availability_zone': self.share['availability_zone'], }) ) self.vt = { 'id': 'fake_volume_type_id', 'name': 'fake_volume_type_name', 'required_extra_specs': { 'driver_handles_share_servers': 'False' }, 'extra_specs': { 'driver_handles_share_servers': 'False' } } self.snapshot = { 'id': '2', 'share_id': '1', 'status': constants.STATUS_AVAILABLE, } CONF.set_default("default_share_type", None) self.mock_object(policy, 'check_policy') def _process_expected_share_detailed_response(self, shr_dict, req_version): """Sets version based parameters on share dictionary.""" share_dict = copy.deepcopy(shr_dict) changed_parameters = { '2.2': {'snapshot_support': True}, '2.5': {'task_state': None}, '2.6': {'share_type_name': None}, '2.10': {'access_rules_status': constants.ACCESS_STATE_ACTIVE}, '2.11': {'replication_type': None, 'has_replicas': False}, '2.16': {'user_id': 'fakeuser'}, '2.24': {'create_share_from_snapshot_support': True}, '2.27': {'revert_to_snapshot_support': False}, '2.31': {'share_group_id': None, 'source_share_group_snapshot_member_id': None}, '2.32': {'mount_snapshot_support': False}, '2.90': {'encryption_key_ref': None}, } # Apply all the share transformations if self.is_microversion_ge(req_version, '2.9'): share_dict.pop('export_locations', None) share_dict.pop('export_location', None) for version, parameters in changed_parameters.items(): for param, default in parameters.items(): if self.is_microversion_ge(req_version, version): share_dict[param] = share_dict.get(param, default) else: share_dict.pop(param, None) return share_dict def _get_expected_share_detailed_response(self, values=None, admin=False, version='2.0'): share = { 'id': '1', 'name': 'displayname', 'availability_zone': 'fakeaz', 'description': 'displaydesc', 'export_location': 'fake_location', 'export_locations': ['fake_location', 'fake_location2'], 'project_id': 'fakeproject', 'created_at': datetime.datetime(1, 1, 1, 1, 1, 1), 'share_proto': 'FAKEPROTO', 'metadata': {}, 'size': 1, 'snapshot_id': '2', 'share_network_id': None, 'status': 'fakestatus', 'share_type': '1', 'volume_type': '1', 'snapshot_support': True, 'is_public': False, 'task_state': None, 'share_type_name': None, 'links': [ { 'href': 'http://localhost/share/v2/fake/shares/1', 'rel': 'self' }, { 'href': 'http://localhost/share/fake/shares/1', 'rel': 'bookmark' } ], } if values: if 'display_name' in values: values['name'] = values.pop('display_name') if 'display_description' in values: values['description'] = values.pop('display_description') share.update(values) if share.get('share_proto'): share['share_proto'] = share['share_proto'].upper() if admin: share['share_server_id'] = 'fake_share_server_id' share['host'] = 'fakehost' return { 'share': self._process_expected_share_detailed_response( share, version) } def test__revert(self): share = copy.deepcopy(self.share) share['status'] = constants.STATUS_AVAILABLE share['revert_to_snapshot_support'] = True share["instances"] = [ { "id": "fakeid", "access_rules_status": constants.ACCESS_STATE_ACTIVE, }, ] share = fake_share.fake_share(**share) snapshot = copy.deepcopy(self.snapshot) snapshot['status'] = constants.STATUS_AVAILABLE body = {'revert': {'snapshot_id': '2'}} req = fakes.HTTPRequest.blank('/v2/fake/shares/1/action', use_admin_context=False, version='2.27') mock_validate_revert_parameters = self.mock_object( self.controller, '_validate_revert_parameters', mock.Mock(return_value=body['revert'])) mock_get = self.mock_object( share_api.API, 'get', mock.Mock(return_value=share)) mock_get_snapshot = self.mock_object( share_api.API, 'get_snapshot', mock.Mock(return_value=snapshot)) mock_get_latest_snapshot_for_share = self.mock_object( share_api.API, 'get_latest_snapshot_for_share', mock.Mock(return_value=snapshot)) mock_revert_to_snapshot = self.mock_object( share_api.API, 'revert_to_snapshot') response = self.controller._revert(req, '1', body=body) self.assertEqual(202, response.status_int) mock_validate_revert_parameters.assert_called_once_with( utils.IsAMatcher(context.RequestContext), body) mock_get.assert_called_once_with( utils.IsAMatcher(context.RequestContext), '1') mock_get_snapshot.assert_called_once_with( utils.IsAMatcher(context.RequestContext), '2') mock_get_latest_snapshot_for_share.assert_called_once_with( utils.IsAMatcher(context.RequestContext), '1') mock_revert_to_snapshot.assert_called_once_with( utils.IsAMatcher(context.RequestContext), share, snapshot) def test__revert_share_has_been_soft_deleted(self): snapshot = copy.deepcopy(self.snapshot) body = {'revert': {'snapshot_id': '2'}} req = fakes.HTTPRequest.blank('/v2/fake/shares/1/action', use_admin_context=False, version='2.27') self.mock_object( self.controller, '_validate_revert_parameters', mock.Mock(return_value=body['revert'])) self.mock_object(share_api.API, 'get', mock.Mock(return_value=self.share_in_recycle_bin)) self.mock_object( share_api.API, 'get_snapshot', mock.Mock(return_value=snapshot)) self.assertRaises( webob.exc.HTTPForbidden, self.controller._revert, req, 1, body) def test__revert_not_supported(self): share = copy.deepcopy(self.share) share['revert_to_snapshot_support'] = False share = fake_share.fake_share(**share) snapshot = copy.deepcopy(self.snapshot) snapshot['status'] = constants.STATUS_AVAILABLE snapshot['share_id'] = 'wrong_id' body = {'revert': {'snapshot_id': '2'}} req = fakes.HTTPRequest.blank('/v2/fake/shares/1/action', use_admin_context=False, version='2.27') self.mock_object( self.controller, '_validate_revert_parameters', mock.Mock(return_value=body['revert'])) self.mock_object(share_api.API, 'get', mock.Mock(return_value=share)) self.mock_object( share_api.API, 'get_snapshot', mock.Mock(return_value=snapshot)) self.assertRaises(webob.exc.HTTPBadRequest, self.controller._revert, req, '1', body=body) def test__revert_id_mismatch(self): share = copy.deepcopy(self.share) share['status'] = constants.STATUS_AVAILABLE share['revert_to_snapshot_support'] = True share = fake_share.fake_share(**share) snapshot = copy.deepcopy(self.snapshot) snapshot['status'] = constants.STATUS_AVAILABLE snapshot['share_id'] = 'wrong_id' body = {'revert': {'snapshot_id': '2'}} req = fakes.HTTPRequest.blank('/v2/fake/shares/1/action', use_admin_context=False, version='2.27') self.mock_object( self.controller, '_validate_revert_parameters', mock.Mock(return_value=body['revert'])) self.mock_object(share_api.API, 'get', mock.Mock(return_value=share)) self.mock_object( share_api.API, 'get_snapshot', mock.Mock(return_value=snapshot)) self.assertRaises(webob.exc.HTTPBadRequest, self.controller._revert, req, '1', body=body) @ddt.data( { 'share_status': constants.STATUS_ERROR, 'share_is_busy': False, 'snapshot_status': constants.STATUS_AVAILABLE, }, { 'share_status': constants.STATUS_AVAILABLE, 'share_is_busy': True, 'snapshot_status': constants.STATUS_AVAILABLE, }, { 'share_status': constants.STATUS_AVAILABLE, 'share_is_busy': False, 'snapshot_status': constants.STATUS_ERROR, }) @ddt.unpack def test__revert_invalid_status(self, share_status, share_is_busy, snapshot_status): share = copy.deepcopy(self.share) share['status'] = share_status share['is_busy'] = share_is_busy share['revert_to_snapshot_support'] = True share = fake_share.fake_share(**share) snapshot = copy.deepcopy(self.snapshot) snapshot['status'] = snapshot_status body = {'revert': {'snapshot_id': '2'}} req = fakes.HTTPRequest.blank('/v2/fake/shares/1/action', use_admin_context=False, version='2.27') self.mock_object( self.controller, '_validate_revert_parameters', mock.Mock(return_value=body['revert'])) self.mock_object(share_api.API, 'get', mock.Mock(return_value=share)) self.mock_object( share_api.API, 'get_snapshot', mock.Mock(return_value=snapshot)) self.assertRaises(webob.exc.HTTPConflict, self.controller._revert, req, '1', body=body) def test__revert_snapshot_latest_not_found(self): share = copy.deepcopy(self.share) share['status'] = constants.STATUS_AVAILABLE share['revert_to_snapshot_support'] = True share = fake_share.fake_share(**share) snapshot = copy.deepcopy(self.snapshot) snapshot['status'] = constants.STATUS_AVAILABLE body = {'revert': {'snapshot_id': '2'}} req = fakes.HTTPRequest.blank('/v2/fake/shares/1/action', use_admin_context=False, version='2.27') self.mock_object( self.controller, '_validate_revert_parameters', mock.Mock(return_value=body['revert'])) self.mock_object(share_api.API, 'get', mock.Mock(return_value=share)) self.mock_object( share_api.API, 'get_snapshot', mock.Mock(return_value=snapshot)) self.mock_object( share_api.API, 'get_latest_snapshot_for_share', mock.Mock(return_value=None)) self.assertRaises(webob.exc.HTTPBadRequest, self.controller._revert, req, '1', body=body) def test__revert_snapshot_access_applying(self): share = copy.deepcopy(self.share) share['status'] = constants.STATUS_AVAILABLE share['revert_to_snapshot_support'] = True share["instances"] = [ { "id": "fakeid", "access_rules_status": constants.SHARE_INSTANCE_RULES_SYNCING, }, ] share = fake_share.fake_share(**share) snapshot = copy.deepcopy(self.snapshot) snapshot['status'] = constants.STATUS_AVAILABLE body = {'revert': {'snapshot_id': '2'}} req = fakes.HTTPRequest.blank('/v2/fake/shares/1/action', use_admin_context=False, version='2.27') self.mock_object( self.controller, '_validate_revert_parameters', mock.Mock(return_value=body['revert'])) self.mock_object(share_api.API, 'get', mock.Mock(return_value=share)) self.mock_object(share_api.API, 'get_snapshot', mock.Mock(return_value=snapshot)) self.mock_object(share_api.API, 'get_latest_snapshot_for_share', mock.Mock(return_value=snapshot)) self.mock_object(share_api.API, 'revert_to_snapshot') self.assertRaises(webob.exc.HTTPConflict, self.controller._revert, req, '1', body=body) def test__revert_snapshot_not_latest(self): share = copy.deepcopy(self.share) share['status'] = constants.STATUS_AVAILABLE share['revert_to_snapshot_support'] = True share = fake_share.fake_share(**share) snapshot = copy.deepcopy(self.snapshot) snapshot['status'] = constants.STATUS_AVAILABLE latest_snapshot = copy.deepcopy(self.snapshot) latest_snapshot['status'] = constants.STATUS_AVAILABLE latest_snapshot['id'] = '3' body = {'revert': {'snapshot_id': '2'}} req = fakes.HTTPRequest.blank('/v2/fake/shares/1/action', use_admin_context=False, version='2.27') self.mock_object( self.controller, '_validate_revert_parameters', mock.Mock(return_value=body['revert'])) self.mock_object(share_api.API, 'get', mock.Mock(return_value=share)) self.mock_object( share_api.API, 'get_snapshot', mock.Mock(return_value=snapshot)) self.mock_object( share_api.API, 'get_latest_snapshot_for_share', mock.Mock(return_value=latest_snapshot)) self.assertRaises(webob.exc.HTTPConflict, self.controller._revert, req, '1', body=body) @ddt.data( { 'caught': exception.ShareNotFound, 'exc_args': { 'share_id': '1', }, 'thrown': webob.exc.HTTPNotFound, }, { 'caught': exception.ShareSnapshotNotFound, 'exc_args': { 'snapshot_id': '2', }, 'thrown': webob.exc.HTTPBadRequest, }, { 'caught': exception.ShareSizeExceedsAvailableQuota, 'exc_args': {}, 'thrown': webob.exc.HTTPForbidden, }, { 'caught': exception.ReplicationException, 'exc_args': { 'reason': 'catastrophic failure', }, 'thrown': webob.exc.HTTPBadRequest, }) @ddt.unpack def test__revert_exception(self, caught, exc_args, thrown): body = {'revert': {'snapshot_id': '2'}} req = fakes.HTTPRequest.blank('/v2/fake/shares/1/action', use_admin_context=False, version='2.27') self.mock_object( self.controller, '_validate_revert_parameters', mock.Mock(return_value=body['revert'])) self.mock_object( share_api.API, 'get', mock.Mock(side_effect=caught(**exc_args))) self.assertRaises(thrown, self.controller._revert, req, '1', body=body) def test_validate_revert_parameters(self): body = {'revert': {'snapshot_id': 'fake_snapshot_id'}} result = self.controller._validate_revert_parameters( 'fake_context', body) self.assertEqual(body['revert'], result) @ddt.data( None, {}, {'manage': {'snapshot_id': 'fake_snapshot_id'}}, {'revert': {'share_id': 'fake_snapshot_id'}}, {'revert': {'snapshot_id': ''}}, ) def test_validate_revert_parameters_invalid(self, body): self.assertRaises(webob.exc.HTTPBadRequest, self.controller._validate_revert_parameters, 'fake_context', body) @ddt.data("2.0", "2.1") def test_share_create_original(self, microversion): self.mock_object(share_api.API, 'create', self.create_mock) body = {"share": copy.deepcopy(self.share)} req = fakes.HTTPRequest.blank('/v2/fake/shares', version=microversion) res_dict = self.controller.create(req, body) expected = self._get_expected_share_detailed_response( self.share, version=microversion) self.assertEqual(expected, res_dict) @ddt.data("2.2", "2.3") def test_share_create_with_snapshot_support_without_cg(self, microversion): self.mock_object(share_api.API, 'create', self.create_mock) body = {"share": copy.deepcopy(self.share)} req = fakes.HTTPRequest.blank('/v2/fake/shares', version=microversion) res_dict = self.controller.create(req, body) expected = self._get_expected_share_detailed_response( self.share, version=microversion) self.assertEqual(expected, res_dict) def test_share_create_with_share_group(self): self.mock_object(share_api.API, 'create', self.create_mock) body = {"share": copy.deepcopy(self.share)} req = fakes.HTTPRequest.blank('/v2/fake/shares', version="2.31", experimental=True) res_dict = self.controller.create(req, body) expected = self._get_expected_share_detailed_response( self.share, version="2.31") self.assertEqual(expected, res_dict) def test_share_create_with_sg_and_availability_zone(self): sg_id = 'fake_sg_id' az_id = 'bar_az_id' az_name = 'fake_name' self.mock_object(share_api.API, 'create', self.create_mock) self.mock_object( db, 'availability_zone_get', mock.Mock(return_value=type( 'ReqAZ', (object, ), {"id": az_id, "name": az_name}))) self.mock_object( db, 'share_group_get', mock.Mock(return_value={"availability_zone_id": az_id})) body = {"share": { "size": 100, "share_proto": "fakeproto", "availability_zone": az_id, "share_group_id": sg_id, }} req = fakes.HTTPRequest.blank( '/v2/fake/shares', version="2.31", experimental=True) self.controller.create(req, body) self.assertEqual(db.availability_zone_get.call_count, 2) db.availability_zone_get.assert_called_with( req.environ['manila.context'], az_id) db.share_group_get.assert_called_once_with( req.environ['manila.context'], sg_id) share_api.API.create.assert_called_once_with( req.environ['manila.context'], body['share']['share_proto'].upper(), body['share']['size'], None, None, share_group_id=body['share']['share_group_id'], is_public=False, metadata=None, snapshot_id=None, availability_zone=az_name, scheduler_hints=None, encryption_key_ref=None) def test_share_create_with_sg_and_different_availability_zone(self): sg_id = 'fake_sg_id' sg_az = 'foo_az_id' req_az = 'bar_az_id' req_az_name = 'fake_az_name' self.mock_object(share_api.API, 'create', self.create_mock) self.mock_object( db, 'availability_zone_get', mock.Mock(return_value=type('ReqAZ', (object, ), { "id": req_az, "name": req_az_name}))) self.mock_object( db, 'share_group_get', mock.Mock(return_value={"availability_zone_id": sg_az})) body = {"share": { "size": 100, "share_proto": "fakeproto", "availability_zone": req_az, "share_group_id": sg_id, }} req = fakes.HTTPRequest.blank( '/v2/fake/shares', version="2.31", experimental=True) self.assertRaises( exception.InvalidInput, self.controller.create, req, body) db.availability_zone_get.assert_called_once_with( req.environ['manila.context'], req_az) db.share_group_get.assert_called_once_with( req.environ['manila.context'], sg_id) self.assertEqual(0, share_api.API.create.call_count) def test_share_create_with_nonexistent_share_group(self): sg_id = 'fake_sg_id' self.mock_object(share_api.API, 'create', self.create_mock) self.mock_object(db, 'availability_zone_get') self.mock_object( db, 'share_group_get', mock.Mock(side_effect=exception.ShareGroupNotFound( share_group_id=sg_id))) body = {"share": { "size": 100, "share_proto": "fakeproto", "share_group_id": sg_id, }} req = fakes.HTTPRequest.blank( '/v2/fake/shares', version="2.31", experimental=True) self.assertRaises( webob.exc.HTTPNotFound, self.controller.create, req, body) self.assertEqual(0, db.availability_zone_get.call_count) self.assertEqual(0, share_api.API.create.call_count) db.share_group_get.assert_called_once_with( req.environ['manila.context'], sg_id) @ddt.data({'encryption_support': True, 'dhss': True}, {'encryption_support': True, 'dhss': False}, {'encryption_support': False, 'dhss': True}, {'encryption_support': False, 'dhss': False}) @ddt.unpack def test_share_create_with_encryption(self, encryption_support, dhss): share = { "id": "1", "size": 100, "display_name": "Share Test Name", "display_description": "Share Test Desc", "share_proto": "fakeproto", "share_network_id": "fakenetid" } fake_network = {'id': 'fakenetid'} create_mock = mock.Mock( return_value=stubs.stub_share( '1', size=100, display_name=share['display_name'], display_description=share['display_description'], share_proto=share['share_proto'].upper(), share_network_id=share['share_network_id'], instance={ 'encryption_key_ref': 'fake_key_uuid', }) ) self.mock_object(share_api.API, 'create', create_mock) self.mock_object(share_api.API, 'get_share_network', mock.Mock( return_value=fake_network)) self.mock_object(common, 'check_share_network_is_active', mock.Mock(return_value=True)) stype = copy.deepcopy(self.vt) if dhss: stype['extra_specs']['driver_handles_share_servers'] = 'true' else: stype['extra_specs']['driver_handles_share_servers'] = 'false' if encryption_support: stype['extra_specs']['encryption_support'] = 'share' else: stype['extra_specs']['encryption_support'] = 'fake' self.mock_object(share_types, 'get_share_type_by_name', mock.Mock( return_value=stype)) share['encryption_key_ref'] = 'fake_key_uuid' request_args = copy.deepcopy(share) request_args['share_type'] = 'fake_volume_type_name' body = {"share": request_args} req = fakes.HTTPRequest.blank('/v2/fake/shares', version='2.90') if dhss and encryption_support: result = self.controller.create(req, body) expected = self._get_expected_share_detailed_response( share, version='2.90') self.assertEqual( expected['share']['encryption_key_ref'], result['share']['encryption_key_ref']) else: self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, req, body) def test_share_create_with_valid_default_share_type(self): self.mock_object(share_types, 'get_share_type_by_name', mock.Mock(return_value=self.vt)) CONF.set_default("default_share_type", self.vt['name']) self.mock_object(share_api.API, 'create', self.create_mock) body = {"share": copy.deepcopy(self.share)} req = fakes.HTTPRequest.blank('/v2/fake/shares', version='2.7') res_dict = self.controller.create(req, body) expected = self._get_expected_share_detailed_response(self.share, version='2.7') share_types.get_share_type_by_name.assert_called_once_with( utils.IsAMatcher(context.RequestContext), self.vt['name']) self.assertEqual(expected, res_dict) def test_share_create_with_invalid_default_share_type(self): self.mock_object( share_types, 'get_default_share_type', mock.Mock(side_effect=exception.ShareTypeNotFoundByName( self.vt['name'])), ) CONF.set_default("default_share_type", self.vt['name']) req = fakes.HTTPRequest.blank('/v2/fake/shares', version='2.7') self.assertRaises(exception.ShareTypeNotFoundByName, self.controller.create, req, {'share': self.share}) share_types.get_default_share_type.assert_called_once_with() def test_share_create_with_replication(self): self.mock_object(share_api.API, 'create', self.create_mock) body = {"share": copy.deepcopy(self.share)} req = fakes.HTTPRequest.blank( '/v2/fake/shares', version=share_replicas.MIN_SUPPORTED_API_VERSION) res_dict = self.controller.create(req, body) expected = self._get_expected_share_detailed_response( self.share, version=share_replicas.MIN_SUPPORTED_API_VERSION) self.assertEqual(expected, res_dict) def test_share_create_with_share_net(self): shr = { "size": 100, "name": "Share Test Name", "description": "Share Test Desc", "share_proto": "fakeproto", "availability_zone": "zone1:host1", "share_network_id": "fakenetid" } fake_network = {'id': 'fakenetid'} share_net_subnets = [db_utils.create_share_network_subnet( id='fake_subnet_id', share_network_id=fake_network['id'])] create_mock = mock.Mock(return_value=stubs.stub_share('1', display_name=shr['name'], display_description=shr['description'], size=shr['size'], share_proto=shr['share_proto'].upper(), availability_zone=shr['availability_zone'], share_network_id=shr['share_network_id'])) self.mock_object(share_api.API, 'create', create_mock) self.mock_object(share_api.API, 'get_share_network', mock.Mock( return_value=fake_network)) self.mock_object(common, 'check_share_network_is_active', mock.Mock(return_value=True)) self.mock_object( db, 'share_network_subnets_get_all_by_availability_zone_id', mock.Mock(return_value=share_net_subnets)) body = {"share": copy.deepcopy(shr)} req = fakes.HTTPRequest.blank('/v2/fake/shares', version='2.7') res_dict = self.controller.create(req, body) expected = self._get_expected_share_detailed_response( shr, version='2.7') self.assertDictEqual(expected, res_dict) # pylint: disable=unsubscriptable-object self.assertEqual("fakenetid", create_mock.call_args[1]['share_network_id']) common.check_share_network_is_active.assert_called_once_with( fake_network) @ddt.data("2.15", "2.16") def test_share_create_original_with_user_id(self, microversion): self.mock_object(share_api.API, 'create', self.create_mock) body = {"share": copy.deepcopy(self.share)} req = fakes.HTTPRequest.blank('/v2/fake/shares', version=microversion) res_dict = self.controller.create(req, body) expected = self._get_expected_share_detailed_response( self.share, version=microversion) self.assertEqual(expected, res_dict) @ddt.data(test_utils.annotated('/v2.0_az_unsupported', ('2.0', False)), test_utils.annotated('/v2.0_az_supported', ('2.0', True)), test_utils.annotated('/v2.47_az_unsupported', ('2.47', False)), test_utils.annotated('/v2.47_az_supported', ('2.47', True))) @ddt.unpack def test_share_create_with_share_type_azs(self, version, az_supported): """For API version<2.48, AZ validation should not be performed.""" self.mock_object(share_api.API, 'create', self.create_mock) create_args = copy.deepcopy(self.share) create_args['availability_zone'] = 'az1' if az_supported else 'az2' create_args['share_type'] = uuidutils.generate_uuid() stype_with_azs = copy.deepcopy(self.vt) stype_with_azs['extra_specs']['availability_zones'] = 'az1,az3' self.mock_object(share_types, 'get_share_type', mock.Mock( return_value=stype_with_azs)) req = fakes.HTTPRequest.blank('/v2/fake/shares', version=version) res_dict = self.controller.create(req, {'share': create_args}) expected = self._get_expected_share_detailed_response( values=self.share, version=version) self.assertEqual(expected, res_dict) @ddt.data(*set([ test_utils.annotated('v2.48_share_from_snap', ('2.48', True)), test_utils.annotated('v2.48_share_not_from_snap', ('2.48', False)), test_utils.annotated('v%s_share_from_snap' % LATEST_MICROVERSION, (LATEST_MICROVERSION, True)), test_utils.annotated('v%s_share_not_from_snap' % LATEST_MICROVERSION, (LATEST_MICROVERSION, False))])) @ddt.unpack def test_share_create_az_not_in_share_type(self, version, snap): """For API version>=2.48, AZ validation should be performed.""" self.mock_object(share_api.API, 'create', self.create_mock) create_args = copy.deepcopy(self.share) create_args['availability_zone'] = 'az2' create_args['share_type'] = (uuidutils.generate_uuid() if not snap else None) create_args['snapshot_id'] = (uuidutils.generate_uuid() if snap else None) stype_with_azs = copy.deepcopy(self.vt) stype_with_azs['extra_specs']['availability_zones'] = 'az1 , az3' self.mock_object(share_types, 'get_share_type', mock.Mock( return_value=stype_with_azs)) self.mock_object(share_api.API, 'get_snapshot', stubs.stub_snapshot_get) req = fakes.HTTPRequest.blank('/v2/fake/shares', version=version) self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, req, {'share': create_args}) share_api.API.create.assert_not_called() def test_migration_start(self): share = db_utils.create_share() share_network = db_utils.create_share_network() share_type = {'share_type_id': 'fake_type_id'} req = fakes.HTTPRequest.blank( '/v2/fake/shares/%s/action' % share['id'], use_admin_context=True, version='2.29') req.method = 'POST' req.headers['content-type'] = 'application/json' req.api_version_request.experimental = True context = req.environ['manila.context'] self.mock_object(db, 'share_network_get', mock.Mock( return_value=share_network)) self.mock_object(db, 'share_type_get', mock.Mock( return_value=share_type)) body = { 'migration_start': { 'host': 'fake_host', 'preserve_metadata': True, 'preserve_snapshots': True, 'writable': True, 'nondisruptive': True, 'new_share_network_id': 'fake_net_id', 'new_share_type_id': 'fake_type_id', } } method = 'migration_start' self.mock_object(share_api.API, 'migration_start', mock.Mock(return_value=202)) self.mock_object(share_api.API, 'get', mock.Mock(return_value=share)) response = getattr(self.controller, method)(req, share['id'], body) self.assertEqual(202, response.status_int) share_api.API.get.assert_called_once_with(context, share['id']) share_api.API.migration_start.assert_called_once_with( context, share, 'fake_host', False, True, True, True, True, new_share_network=share_network, new_share_type=share_type) db.share_network_get.assert_called_once_with( context, 'fake_net_id') db.share_type_get.assert_called_once_with( context, 'fake_type_id') def test_migration_start_conflict(self): share = db_utils.create_share() req = fakes.HTTPRequest.blank( '/v2/fake/shares/%s/action' % share['id'], use_admin_context=True) req.method = 'POST' req.headers['content-type'] = 'application/json' req.api_version_request = api_version.APIVersionRequest('2.29') req.api_version_request.experimental = True body = { 'migration_start': { 'host': 'fake_host', 'preserve_metadata': True, 'preserve_snapshots': True, 'writable': True, 'nondisruptive': True, } } self.mock_object(share_api.API, 'migration_start', mock.Mock(side_effect=exception.Conflict(err='err'))) self.assertRaises(webob.exc.HTTPConflict, self.controller.migration_start, req, share['id'], body) @ddt.data('nondisruptive', 'writable', 'preserve_metadata', 'preserve_snapshots', 'host', 'body') def test_migration_start_missing_mandatory(self, param): share = db_utils.create_share() req = fakes.HTTPRequest.blank( '/v2/fake/shares/%s/action' % share['id'], use_admin_context=True, version='2.29') req.method = 'POST' req.headers['content-type'] = 'application/json' req.api_version_request.experimental = True body = { 'migration_start': { 'host': 'fake_host', 'preserve_metadata': True, 'preserve_snapshots': True, 'writable': True, 'nondisruptive': True, } } if param == 'body': body.pop('migration_start') else: body['migration_start'].pop(param) method = 'migration_start' self.mock_object(share_api.API, 'migration_start') self.mock_object(share_api.API, 'get', mock.Mock(return_value=share)) self.assertRaises(webob.exc.HTTPBadRequest, getattr(self.controller, method), req, 'fake_id', body) @ddt.data('nondisruptive', 'writable', 'preserve_metadata', 'preserve_snapshots', 'force_host_assisted_migration') def test_migration_start_non_boolean(self, param): share = db_utils.create_share() req = fakes.HTTPRequest.blank( '/v2/fake/shares/%s/action' % share['id'], use_admin_context=True, version='2.29') req.method = 'POST' req.headers['content-type'] = 'application/json' req.api_version_request.experimental = True body = { 'migration_start': { 'host': 'fake_host', 'preserve_metadata': True, 'preserve_snapshots': True, 'writable': True, 'nondisruptive': True, } } body['migration_start'][param] = None method = 'migration_start' self.mock_object(share_api.API, 'migration_start') self.mock_object(share_api.API, 'get', mock.Mock(return_value=share)) self.assertRaises(webob.exc.HTTPBadRequest, getattr(self.controller, method), req, 'fake_id', body) def test_migration_start_no_share_id(self): req = fakes.HTTPRequest.blank('/v2/fake/shares/%s/action' % 'fake_id', use_admin_context=True, version='2.29') req.method = 'POST' req.headers['content-type'] = 'application/json' req.api_version_request.experimental = True body = {'migration_start': {'host': 'fake_host'}} method = 'migration_start' self.mock_object(share_api.API, 'get', mock.Mock(side_effect=[exception.NotFound])) self.assertRaises(webob.exc.HTTPNotFound, getattr(self.controller, method), req, 'fake_id', body) def test_migration_start_new_share_network_not_found(self): share = db_utils.create_share() req = fakes.HTTPRequest.blank( '/v2/fake/shares/%s/action' % share['id'], use_admin_context=True, version='2.29') context = req.environ['manila.context'] req.method = 'POST' req.headers['content-type'] = 'application/json' req.api_version_request.experimental = True body = { 'migration_start': { 'host': 'fake_host', 'preserve_metadata': True, 'preserve_snapshots': True, 'writable': True, 'nondisruptive': True, 'new_share_network_id': 'nonexistent'}} self.mock_object(db, 'share_network_get', mock.Mock(side_effect=exception.NotFound())) self.assertRaises(webob.exc.HTTPBadRequest, self.controller.migration_start, req, share['id'], body) db.share_network_get.assert_called_once_with(context, 'nonexistent') def test_migration_start_new_share_type_not_found(self): share = db_utils.create_share() req = fakes.HTTPRequest.blank( '/v2/fake/shares/%s/action' % share['id'], use_admin_context=True, version='2.29') context = req.environ['manila.context'] req.method = 'POST' req.headers['content-type'] = 'application/json' req.api_version_request.experimental = True body = { 'migration_start': { 'host': 'fake_host', 'preserve_metadata': True, 'preserve_snapshots': True, 'writable': True, 'nondisruptive': True, 'new_share_type_id': 'nonexistent'}} self.mock_object(db, 'share_type_get', mock.Mock(side_effect=exception.NotFound())) self.assertRaises(webob.exc.HTTPBadRequest, self.controller.migration_start, req, share['id'], body) db.share_type_get.assert_called_once_with(context, 'nonexistent') def test_migration_start_invalid_force_host_assisted_migration(self): share = db_utils.create_share() req = fakes.HTTPRequest.blank( '/v2/fake/shares/%s/action' % share['id'], use_admin_context=True, version='2.29') req.method = 'POST' req.headers['content-type'] = 'application/json' req.api_version_request.experimental = True body = {'migration_start': {'host': 'fake_host', 'force_host_assisted_migration': 'fake'}} method = 'migration_start' self.assertRaises(webob.exc.HTTPBadRequest, getattr(self.controller, method), req, share['id'], body) @ddt.data('writable', 'preserve_metadata') def test_migration_start_invalid_writable_preserve_metadata( self, parameter): share = db_utils.create_share() req = fakes.HTTPRequest.blank( '/v2/fake/shares/%s/action' % share['id'], use_admin_context=True, version='2.29') req.method = 'POST' req.headers['content-type'] = 'application/json' req.api_version_request.experimental = True body = {'migration_start': {'host': 'fake_host', parameter: 'invalid'}} self.assertRaises(webob.exc.HTTPBadRequest, self.controller.migration_start, req, share['id'], body) @ddt.data(constants.TASK_STATE_MIGRATION_ERROR, None) def test_reset_task_state(self, task_state): share = db_utils.create_share() req = fakes.HTTPRequest.blank( '/v2/fake/shares/%s/action' % share['id'], use_admin_context=True, version='2.22') req.method = 'POST' req.headers['content-type'] = 'application/json' req.api_version_request.experimental = True update = {'task_state': task_state} body = {'reset_task_state': update} self.mock_object(db, 'share_update') response = self.controller.reset_task_state(req, share['id'], body) self.assertEqual(202, response.status_int) db.share_update.assert_called_once_with(utils.IsAMatcher( context.RequestContext), share['id'], update) def test_reset_task_state_error_body(self): share = db_utils.create_share() req = fakes.HTTPRequest.blank( '/v2/fake/shares/%s/action' % share['id'], use_admin_context=True, version='2.22') req.method = 'POST' req.headers['content-type'] = 'application/json' req.api_version_request.experimental = True update = {'error': 'error'} body = {'reset_task_state': update} self.assertRaises(webob.exc.HTTPBadRequest, self.controller.reset_task_state, req, share['id'], body) def test_reset_task_state_error_invalid(self): share = db_utils.create_share() req = fakes.HTTPRequest.blank( '/v2/fake/shares/%s/action' % share['id'], use_admin_context=True, version='2.22') req.method = 'POST' req.headers['content-type'] = 'application/json' req.api_version_request.experimental = True update = {'task_state': 'error'} body = {'reset_task_state': update} self.assertRaises(webob.exc.HTTPBadRequest, self.controller.reset_task_state, req, share['id'], body) def test_reset_task_state_not_found(self): share = db_utils.create_share() req = fakes.HTTPRequest.blank( '/v2/fake/shares/%s/action' % share['id'], use_admin_context=True, version='2.22') req.method = 'POST' req.headers['content-type'] = 'application/json' req.api_version_request.experimental = True update = {'task_state': constants.TASK_STATE_MIGRATION_ERROR} body = {'reset_task_state': update} self.mock_object(share_api.API, 'get', mock.Mock(side_effect=exception.NotFound)) self.mock_object(db, 'share_update') self.assertRaises(exception.NotFound, self.controller.reset_task_state, req, share['id'], body) share_api.API.get.assert_called_once_with(utils.IsAMatcher( context.RequestContext), share['id']) db.share_update.assert_not_called() def test_reset_task_state_share_other_project_public_share(self): share = db_utils.create_share(is_public=True) req = fakes.HTTPRequest.blank( '/v2/fake/shares/%s/action' % share['id'], use_admin_context=True, version=LATEST_MICROVERSION) req.method = 'POST' req.headers['content-type'] = 'application/json' req.api_version_request.experimental = True update = {'task_state': constants.TASK_STATE_MIGRATION_ERROR} body = {'reset_task_state': update} # NOTE(gouthamr): we're testing a scenario where someone has access # to the RBAC rule share:reset_task_state, but doesn't own the share. # Ideally we'd override the default policy, but it's a shared # resource and we'll bleed into other tests, so we'll mock the # policy check to return False instead rbac_checks = [None, None, exception.NotAuthorized] with mock.patch.object(policy, 'check_policy', side_effect=rbac_checks): self.mock_object(share_api.API, 'get', mock.Mock(return_value=share)) self.assertRaises(webob.exc.HTTPForbidden, self.controller.reset_task_state, req, share['id'], body) def test_reset_task_state_share_has_been_soft_deleted(self): share = self.share_in_recycle_bin req = fakes.HTTPRequest.blank( '/v2/fake/shares/%s/action' % share['id'], use_admin_context=True, version='2.22') req.method = 'POST' req.headers['content-type'] = 'application/json' req.api_version_request.experimental = True update = {'task_state': constants.TASK_STATE_MIGRATION_ERROR} body = {'reset_task_state': update} self.mock_object(share_api.API, 'get', mock.Mock(return_value=share)) self.assertRaises(webob.exc.HTTPForbidden, self.controller.reset_task_state, req, share['id'], body) def test_migration_complete(self): share = db_utils.create_share() req = fakes.HTTPRequest.blank( '/v2/fake/shares/%s/action' % share['id'], use_admin_context=True, version='2.22') req.method = 'POST' req.headers['content-type'] = 'application/json' req.api_version_request.experimental = True body = {'migration_complete': None} self.mock_object(share_api.API, 'get', mock.Mock(return_value=share)) self.mock_object(share_api.API, 'migration_complete') response = self.controller.migration_complete(req, share['id'], body) self.assertEqual(202, response.status_int) share_api.API.migration_complete.assert_called_once_with( utils.IsAMatcher(context.RequestContext), share) def test_migration_complete_not_found(self): share = db_utils.create_share() req = fakes.HTTPRequest.blank( '/v2/fake/shares/%s/action' % share['id'], use_admin_context=True, version='2.22') req.method = 'POST' req.headers['content-type'] = 'application/json' req.api_version_request.experimental = True body = {'migration_complete': None} self.mock_object(share_api.API, 'get', mock.Mock(side_effect=exception.NotFound())) self.mock_object(share_api.API, 'migration_complete') self.assertRaises(webob.exc.HTTPNotFound, self.controller.migration_complete, req, share['id'], body) def test_migration_cancel(self): share = db_utils.create_share() req = fakes.HTTPRequest.blank( '/v2/fake/shares/%s/action' % share['id'], use_admin_context=True, version='2.22') req.method = 'POST' req.headers['content-type'] = 'application/json' req.api_version_request.experimental = True body = {'migration_cancel': None} self.mock_object(share_api.API, 'get', mock.Mock(return_value=share)) self.mock_object(share_api.API, 'migration_cancel') response = self.controller.migration_cancel(req, share['id'], body) self.assertEqual(202, response.status_int) share_api.API.migration_cancel.assert_called_once_with( utils.IsAMatcher(context.RequestContext), share) def test_migration_cancel_not_found(self): share = db_utils.create_share() req = fakes.HTTPRequest.blank( '/v2/fake/shares/%s/action' % share['id'], use_admin_context=True, version='2.22') req.method = 'POST' req.headers['content-type'] = 'application/json' req.api_version_request.experimental = True body = {'migration_cancel': None} self.mock_object(share_api.API, 'get', mock.Mock(side_effect=exception.NotFound())) self.mock_object(share_api.API, 'migration_cancel') self.assertRaises(webob.exc.HTTPNotFound, self.controller.migration_cancel, req, share['id'], body) def test_migration_get_progress(self): share = db_utils.create_share( task_state=constants.TASK_STATE_MIGRATION_SUCCESS) req = fakes.HTTPRequest.blank( '/v2/fake/shares/%s/action' % share['id'], use_admin_context=True, version='2.22') req.method = 'POST' req.headers['content-type'] = 'application/json' req.api_version_request.experimental = True body = {'migration_get_progress': None} expected = { 'total_progress': 'fake', 'task_state': constants.TASK_STATE_MIGRATION_SUCCESS, } self.mock_object(share_api.API, 'get', mock.Mock(return_value=share)) self.mock_object(share_api.API, 'migration_get_progress', mock.Mock(return_value=copy.deepcopy(expected))) response = self.controller.migration_get_progress(req, share['id'], body) self.assertEqual(expected, response) share_api.API.migration_get_progress.assert_called_once_with( utils.IsAMatcher(context.RequestContext), share) def test_migration_get_progress_not_found(self): share = db_utils.create_share() req = fakes.HTTPRequest.blank( '/v2/fake/shares/%s/action' % share['id'], use_admin_context=True, version='2.22') req.method = 'POST' req.headers['content-type'] = 'application/json' req.api_version_request.experimental = True body = {'migration_get_progress': None} self.mock_object(share_api.API, 'get', mock.Mock(side_effect=exception.NotFound())) self.mock_object(share_api.API, 'migration_get_progress') self.assertRaises(webob.exc.HTTPNotFound, self.controller.migration_get_progress, req, share['id'], body) def test_share_create_from_snapshot_without_share_net_no_parent(self): shr = { "size": 100, "name": "Share Test Name", "description": "Share Test Desc", "share_proto": "fakeproto", "availability_zone": "zone1:host1", "snapshot_id": 333, "share_network_id": None, } create_mock = mock.Mock(return_value=stubs.stub_share('1', display_name=shr['name'], display_description=shr['description'], size=shr['size'], share_proto=shr['share_proto'].upper(), snapshot_id=shr['snapshot_id'], instance=dict( availability_zone=shr['availability_zone'], share_network_id=shr['share_network_id']))) self.mock_object(share_api.API, 'create', create_mock) body = {"share": copy.deepcopy(shr)} req = fakes.HTTPRequest.blank('/v2/fake/shares', version='2.7') res_dict = self.controller.create(req, body) expected = self._get_expected_share_detailed_response( shr, version='2.7') self.assertEqual(expected, res_dict) def test_share_create_from_snapshot_without_share_net_parent_exists(self): shr = { "size": 100, "name": "Share Test Name", "description": "Share Test Desc", "share_proto": "fakeproto", "availability_zone": "zone1:host1", "snapshot_id": 333, "share_network_id": None, } parent_share_net = 444 fake_network = {'id': parent_share_net} share_net_subnets = [db_utils.create_share_network_subnet( id='fake_subnet_id', share_network_id=fake_network['id'])] create_mock = mock.Mock(return_value=stubs.stub_share('1', display_name=shr['name'], display_description=shr['description'], size=shr['size'], share_proto=shr['share_proto'].upper(), snapshot_id=shr['snapshot_id'], instance=dict( availability_zone=shr['availability_zone'], share_network_id=shr['share_network_id']))) self.mock_object(share_api.API, 'create', create_mock) self.mock_object(share_api.API, 'get_snapshot', stubs.stub_snapshot_get) self.mock_object(common, 'check_share_network_is_active', mock.Mock(return_value=True)) parent_share = stubs.stub_share( '1', instance={'share_network_id': parent_share_net}, create_share_from_snapshot_support=True) self.mock_object(share_api.API, 'get', mock.Mock( return_value=parent_share)) self.mock_object(share_api.API, 'get_share_network', mock.Mock( return_value=fake_network)) self.mock_object( db, 'share_network_subnets_get_all_by_availability_zone_id', mock.Mock(return_value=share_net_subnets)) body = {"share": copy.deepcopy(shr)} req = fakes.HTTPRequest.blank('/v2/fake/shares', version='2.7') res_dict = self.controller.create(req, body) expected = self._get_expected_share_detailed_response( shr, version='2.7') self.assertEqual(expected, res_dict) # pylint: disable=unsubscriptable-object self.assertEqual(parent_share_net, create_mock.call_args[1]['share_network_id']) common.check_share_network_is_active.assert_called_once_with( fake_network) def test_share_create_from_snapshot_with_share_net_equals_parent(self): parent_share_net = 444 shr = { "size": 100, "name": "Share Test Name", "description": "Share Test Desc", "share_proto": "fakeproto", "availability_zone": "zone1:host1", "snapshot_id": 333, "share_network_id": parent_share_net, } share_net_subnets = [db_utils.create_share_network_subnet( id='fake_subnet_id', share_network_id=parent_share_net)] create_mock = mock.Mock(return_value=stubs.stub_share('1', display_name=shr['name'], display_description=shr['description'], size=shr['size'], share_proto=shr['share_proto'].upper(), snapshot_id=shr['snapshot_id'], instance=dict( availability_zone=shr['availability_zone'], share_network_id=shr['share_network_id']))) self.mock_object(share_api.API, 'create', create_mock) self.mock_object(share_api.API, 'get_snapshot', stubs.stub_snapshot_get) parent_share = stubs.stub_share( '1', instance={'share_network_id': parent_share_net}, create_share_from_snapshot_support=True) self.mock_object(share_api.API, 'get', mock.Mock( return_value=parent_share)) self.mock_object(share_api.API, 'get_share_network', mock.Mock( return_value={'id': parent_share_net})) self.mock_object(common, 'check_share_network_is_active', mock.Mock(return_value=True)) self.mock_object( db, 'share_network_subnets_get_all_by_availability_zone_id', mock.Mock(return_value=share_net_subnets)) body = {"share": copy.deepcopy(shr)} req = fakes.HTTPRequest.blank('/v2/fake/shares', version='2.7') res_dict = self.controller.create(req, body) expected = self._get_expected_share_detailed_response( shr, version='2.7') self.assertDictEqual(expected, res_dict) # pylint: disable=unsubscriptable-object self.assertEqual(parent_share_net, create_mock.call_args[1]['share_network_id']) def test_share_create_from_snapshot_invalid_share_net(self): self.mock_object(share_api.API, 'create') shr = { "size": 100, "name": "Share Test Name", "description": "Share Test Desc", "share_proto": "fakeproto", "availability_zone": "zone1:host1", "snapshot_id": 333, "share_network_id": 1234, } body = {"share": shr} req = fakes.HTTPRequest.blank('/v2/fake/shares', version='2.7') self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, req, body) def test_share_create_from_snapshot_not_supported(self): parent_share_net = 444 self.mock_object(share_api.API, 'create') shr = { "size": 100, "name": "Share Test Name", "description": "Share Test Desc", "share_proto": "fakeproto", "availability_zone": "zone1:host1", "snapshot_id": 333, "share_network_id": parent_share_net, } parent_share = stubs.stub_share( '1', instance={'share_network_id': parent_share_net}, create_share_from_snapshot_support=False) self.mock_object(share_api.API, 'get', mock.Mock( return_value=parent_share)) self.mock_object(share_api.API, 'get_share_network', mock.Mock( return_value={'id': parent_share_net})) body = {"share": shr} req = fakes.HTTPRequest.blank('/shares', version='2.24') self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, req, body) def test_share_creation_fails_with_bad_size(self): shr = {"size": '', "name": "Share Test Name", "description": "Share Test Desc", "share_proto": "fakeproto", "availability_zone": "zone1:host1"} body = {"share": shr} req = fakes.HTTPRequest.blank('/shares', version='2.7') self.assertRaises(exception.InvalidInput, self.controller.create, req, body) def test_share_create_no_body(self): req = fakes.HTTPRequest.blank('/shares', version='2.7') self.assertRaises(webob.exc.HTTPUnprocessableEntity, self.controller.create, req, {}) def test_share_create_invalid_availability_zone(self): self.mock_object( db, 'availability_zone_get', mock.Mock(side_effect=exception.AvailabilityZoneNotFound(id='id')) ) body = {"share": copy.deepcopy(self.share)} req = fakes.HTTPRequest.blank('/v2/shares', version='2.7') self.assertRaises(webob.exc.HTTPNotFound, self.controller.create, req, body) def test_share_show(self): req = fakes.HTTPRequest.blank('/v2/fake/shares/1') expected = self._get_expected_share_detailed_response() res_dict = self.controller.show(req, '1') self.assertEqual(expected, res_dict) def test_share_show_with_share_group(self): req = fakes.HTTPRequest.blank( '/v2/fake/shares/1', version='2.31', experimental=True) expected = self._get_expected_share_detailed_response(version='2.31') res_dict = self.controller.show(req, '1') self.assertDictEqual(expected, res_dict) def test_share_show_with_share_group_earlier_version(self): req = fakes.HTTPRequest.blank( '/v2/fake/shares/1', version='2.23', experimental=True) expected = self._get_expected_share_detailed_response(version='2.23') res_dict = self.controller.show(req, '1') self.assertDictEqual(expected, res_dict) def test_share_show_with_share_type_name(self): req = fakes.HTTPRequest.blank('/v2/fake/shares/1', version='2.6') res_dict = self.controller.show(req, '1') expected = self._get_expected_share_detailed_response(version='2.6') self.assertEqual(expected, res_dict) @ddt.data("2.15", "2.16") def test_share_show_with_user_id(self, microversion): req = fakes.HTTPRequest.blank('/v2/fake/shares/1', version=microversion) res_dict = self.controller.show(req, '1') expected = self._get_expected_share_detailed_response( version=microversion) self.assertEqual(expected, res_dict) def test_share_show_admin(self): req = fakes.HTTPRequest.blank('/v2/fake/shares/1', use_admin_context=True) expected = self._get_expected_share_detailed_response(admin=True) res_dict = self.controller.show(req, '1') self.assertEqual(expected, res_dict) def test_share_show_no_share(self): self.mock_object(share_api.API, 'get', stubs.stub_share_get_notfound) req = fakes.HTTPRequest.blank('/v2/fake/shares/1') self.assertRaises(webob.exc.HTTPNotFound, self.controller.show, req, '1') def test_share_show_with_replication_type(self): api_vers = share_replicas.MIN_SUPPORTED_API_VERSION req = fakes.HTTPRequest.blank('/v2/fake/shares/1', version=api_vers) res_dict = self.controller.show(req, '1') expected = self._get_expected_share_detailed_response(version=api_vers) self.assertEqual(expected, res_dict) @ddt.data(('2.10', True), ('2.27', True), ('2.28', False)) @ddt.unpack def test_share_show_access_rules_status_translated(self, version, translated): share = db_utils.create_share( access_rules_status=constants.SHARE_INSTANCE_RULES_SYNCING, status=constants.STATUS_AVAILABLE) self.mock_object(share_api.API, 'get', mock.Mock(return_value=share)) req = fakes.HTTPRequest.blank( '/v2/fake/shares/%s' % share['id'], version=version) res_dict = self.controller.show(req, share['id']) expected = (constants.STATUS_OUT_OF_SYNC if translated else constants.SHARE_INSTANCE_RULES_SYNCING) self.assertEqual(expected, res_dict['share']['access_rules_status']) def test_share_soft_delete(self): req = fakes.HTTPRequest.blank('/v2/fake/shares/1/action', version='2.69') body = {"soft_delete": None} resp = self.controller.share_soft_delete(req, 1, body) self.assertEqual(202, resp.status_int) def test_share_soft_delete_has_been_soft_deleted_already(self): req = fakes.HTTPRequest.blank('/v2/fake/shares/1/action', version='2.69') body = {"soft_delete": None} self.mock_object(share_api.API, 'get', mock.Mock(return_value=self.share_in_recycle_bin)) self.mock_object(share_api.API, 'soft_delete', mock.Mock( side_effect=exception.InvalidShare(reason='err'))) self.assertRaises( webob.exc.HTTPForbidden, self.controller.share_soft_delete, req, 1, body) def test_share_soft_delete_has_replicas(self): req = fakes.HTTPRequest.blank('/v2/fake/shares/1/action', version='2.69') body = {"soft_delete": None} self.mock_object(share_api.API, 'get', mock.Mock(return_value=self.share)) self.mock_object(share_api.API, 'soft_delete', mock.Mock(side_effect=exception.Conflict(err='err'))) self.assertRaises( webob.exc.HTTPConflict, self.controller.share_soft_delete, req, 1, body) def test_share_restore(self): req = fakes.HTTPRequest.blank('/v2/fake/shares/1/action', version='2.69') body = {"restore": None} self.mock_object(share_api.API, 'get', mock.Mock(return_value=self.share_in_recycle_bin)) resp = self.controller.share_restore(req, 1, body) self.assertEqual(202, resp.status_int) def test_share_restore_with_deleting_status(self): req = fakes.HTTPRequest.blank('/v2/fake/shares/1/action', version='2.69') body = {"restore": None} self.mock_object( share_api.API, 'get', mock.Mock(return_value=self.share_in_recycle_bin_is_deleting)) self.assertRaises( webob.exc.HTTPForbidden, self.controller.share_restore, req, 1, body) def test_share_delete(self): req = fakes.HTTPRequest.blank('/v2/fake/shares/1') resp = self.controller.delete(req, 1) self.assertEqual(202, resp.status_int) def test_share_delete_has_replicas(self): req = fakes.HTTPRequest.blank('/v2/fake/shares/1') self.mock_object(share_api.API, 'get', mock.Mock(return_value=self.share)) self.mock_object(share_api.API, 'delete', mock.Mock(side_effect=exception.Conflict(err='err'))) self.assertRaises( webob.exc.HTTPConflict, self.controller.delete, req, 1) def test_share_delete_in_share_group_param_not_provided(self): fake_share = stubs.stub_share('fake_share', share_group_id='fake_group_id') self.mock_object(share_api.API, 'get', mock.Mock(return_value=fake_share)) req = fakes.HTTPRequest.blank('/v2/fake/shares/1') self.assertRaises(webob.exc.HTTPBadRequest, self.controller.delete, req, 1) def test_share_delete_in_share_group(self): fake_share = stubs.stub_share('fake_share', share_group_id='fake_group_id') self.mock_object(share_api.API, 'get', mock.Mock(return_value=fake_share)) req = fakes.HTTPRequest.blank( '/v2/fake/shares/1?share_group_id=fake_group_id') resp = self.controller.delete(req, 1) self.assertEqual(202, resp.status_int) def test_share_delete_in_share_group_wrong_id(self): fake_share = stubs.stub_share('fake_share', share_group_id='fake_group_id') self.mock_object(share_api.API, 'get', mock.Mock(return_value=fake_share)) req = fakes.HTTPRequest.blank( '/v2/fake/shares/1?share_group_id=not_fake_group_id') self.assertRaises(webob.exc.HTTPBadRequest, self.controller.delete, req, 1) def test_share_update(self): shr = self.share body = {"share": shr} req = fakes.HTTPRequest.blank('/v2/fake/share/1') res_dict = self.controller.update(req, 1, body) self.assertEqual(shr["display_name"], res_dict['share']["name"]) self.assertEqual(shr["display_description"], res_dict['share']["description"]) self.assertEqual(shr['is_public'], res_dict['share']['is_public']) def test_share_update_with_share_group(self): shr = self.share body = {"share": shr} req = fakes.HTTPRequest.blank( '/v2/fake/share/1', version="2.31", experimental=True) res_dict = self.controller.update(req, 1, body) self.assertIsNone(res_dict['share']["share_group_id"]) self.assertIsNone( res_dict['share']["source_share_group_snapshot_member_id"]) def test_share_not_updates_size(self): req = fakes.HTTPRequest.blank('/v2/fake/share/1') res_dict = self.controller.update(req, 1, {"share": self.share}) self.assertNotEqual(res_dict['share']["size"], self.share["size"]) def test_share_delete_no_share(self): self.mock_object(share_api.API, 'get', stubs.stub_share_get_notfound) req = fakes.HTTPRequest.blank('/v2/fake/shares/1') self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete, req, 1) @ddt.data({'use_admin_context': False, 'version': '2.4'}, {'use_admin_context': True, 'version': '2.4'}, {'use_admin_context': True, 'version': '2.35'}, {'use_admin_context': False, 'version': '2.35'}, {'use_admin_context': True, 'version': '2.36'}, {'use_admin_context': False, 'version': '2.36'}, {'use_admin_context': True, 'version': '2.42'}, {'use_admin_context': False, 'version': '2.42'}, {'use_admin_context': False, 'version': '2.69'}, {'use_admin_context': True, 'version': '2.69'}) @ddt.unpack def test_share_list_summary_with_search_opts(self, use_admin_context, version): search_opts = { 'name': 'fake_name', 'status': constants.STATUS_AVAILABLE, 'share_server_id': 'fake_share_server_id', 'share_type_id': 'fake_share_type_id', 'snapshot_id': 'fake_snapshot_id', 'share_network_id': 'fake_share_network_id', 'metadata': '%7B%27k1%27%3A+%27v1%27%7D', # serialized k1=v1 'extra_specs': '%7B%27k2%27%3A+%27v2%27%7D', # serialized k2=v2 'sort_key': 'fake_sort_key', 'sort_dir': 'fake_sort_dir', 'limit': '1', 'offset': '1', 'is_public': 'False', 'export_location_id': 'fake_export_location_id', 'export_location_path': 'fake_export_location_path', } if (api_version.APIVersionRequest(version) >= api_version.APIVersionRequest('2.36')): search_opts.update( {'display_name~': 'fake', 'display_description~': 'fake'}) if (api_version.APIVersionRequest(version) >= api_version.APIVersionRequest('2.69')): search_opts.update({'is_soft_deleted': True}) method = 'get_all' shares = [ {'id': 'id1', 'display_name': 'n1'}, {'id': 'id2', 'display_name': 'n2'}, {'id': 'id3', 'display_name': 'n3'}, ] mock_action = {'return_value': [shares[1]]} if (api_version.APIVersionRequest(version) >= api_version.APIVersionRequest('2.42')): search_opts.update({'with_count': 'true'}) method = 'get_all_with_count' mock_action = {'side_effect': [(1, [shares[1]])]} if use_admin_context: search_opts['host'] = 'fake_host' # fake_key should be filtered for non-admin url = '/v2/fake/shares?fake_key=fake_value' for k, v in search_opts.items(): url = url + '&' + k + '=' + str(v) req = fakes.HTTPRequest.blank(url, version=version, use_admin_context=use_admin_context) mock_get_all = ( self.mock_object(share_api.API, method, mock.Mock(**mock_action))) result = self.controller.index(req) search_opts_expected = { 'display_name': search_opts['name'], 'status': search_opts['status'], 'share_server_id': search_opts['share_server_id'], 'share_type_id': search_opts['share_type_id'], 'snapshot_id': search_opts['snapshot_id'], 'share_network_id': search_opts['share_network_id'], 'metadata': {'k1': 'v1'}, 'extra_specs': {'k2': 'v2'}, 'is_public': 'False', 'limit': '1', 'offset': '1' } if (api_version.APIVersionRequest(version) >= api_version.APIVersionRequest('2.35')): search_opts_expected['export_location_id'] = ( search_opts['export_location_id']) search_opts_expected['export_location_path'] = ( search_opts['export_location_path']) if (api_version.APIVersionRequest(version) >= api_version.APIVersionRequest('2.36')): search_opts_expected.update( {'display_name~': search_opts['display_name~'], 'display_description~': search_opts['display_description~']}) if (api_version.APIVersionRequest(version) >= api_version.APIVersionRequest('2.69')): search_opts_expected['is_soft_deleted'] = ( search_opts['is_soft_deleted']) policy.check_policy.assert_called_once_with( req.environ['manila.context'], 'share', 'get_all') if use_admin_context: search_opts_expected.update({'fake_key': 'fake_value'}) search_opts_expected['host'] = search_opts['host'] mock_get_all.assert_called_once_with( req.environ['manila.context'], sort_key=search_opts['sort_key'], sort_dir=search_opts['sort_dir'], search_opts=search_opts_expected, ) self.assertEqual(1, len(result['shares'])) self.assertEqual(shares[1]['id'], result['shares'][0]['id']) self.assertEqual( shares[1]['display_name'], result['shares'][0]['name']) if (api_version.APIVersionRequest(version) >= api_version.APIVersionRequest('2.42')): self.assertEqual(1, result['count']) @ddt.data({'use_admin_context': True, 'version': '2.42'}, {'use_admin_context': False, 'version': '2.42'}) @ddt.unpack def test_share_list_summary_with_search_opt_count_0(self, use_admin_context, version): search_opts = { 'sort_key': 'fake_sort_key', 'sort_dir': 'fake_sort_dir', 'with_count': 'true' } if use_admin_context: search_opts['host'] = 'fake_host' # fake_key should be filtered url = '/v2/fake/shares?fake_key=fake_value' for k, v in search_opts.items(): url = url + '&' + k + '=' + v req = fakes.HTTPRequest.blank(url, version=version, use_admin_context=use_admin_context) self.mock_object(share_api.API, 'get_all_with_count', mock.Mock(side_effect=[(0, [])])) result = self.controller.index(req) search_opts_expected = {} policy.check_policy.assert_called_once_with( req.environ['manila.context'], 'share', 'get_all') if use_admin_context: search_opts_expected.update({'fake_key': 'fake_value'}) search_opts_expected['host'] = search_opts['host'] share_api.API.get_all_with_count.assert_called_once_with( req.environ['manila.context'], sort_key=search_opts['sort_key'], sort_dir=search_opts['sort_dir'], search_opts=search_opts_expected, ) self.assertEqual(0, len(result['shares'])) self.assertEqual(0, result['count']) def test_share_list_summary(self): self.mock_object(share_api.API, 'get_all', stubs.stub_share_get_all_by_project) req = fakes.HTTPRequest.blank('/v2/fake/shares') res_dict = self.controller.index(req) expected = { 'shares': [ { 'name': 'displayname', 'id': '1', 'links': [ { 'href': 'http://localhost/share/v2/fake/shares/1', 'rel': 'self' }, { 'href': 'http://localhost/share/fake/shares/1', 'rel': 'bookmark' } ], } ] } policy.check_policy.assert_called_once_with( req.environ['manila.context'], 'share', 'get_all') self.assertEqual(expected, res_dict) @ddt.data({'use_admin_context': False, 'version': '2.4'}, {'use_admin_context': True, 'version': '2.4'}, {'use_admin_context': True, 'version': '2.35'}, {'use_admin_context': False, 'version': '2.35'}, {'use_admin_context': True, 'version': '2.42'}, {'use_admin_context': False, 'version': '2.42'}, {'use_admin_context': True, 'version': '2.69'}, {'use_admin_context': False, 'version': '2.69'}) @ddt.unpack def test_share_list_detail_with_search_opts(self, use_admin_context, version): search_opts = { 'name': 'fake_name', 'status': constants.STATUS_AVAILABLE, 'share_server_id': 'fake_share_server_id', 'share_type_id': 'fake_share_type_id', 'snapshot_id': 'fake_snapshot_id', 'share_network_id': 'fake_share_network_id', 'metadata': '%7B%27k1%27%3A+%27v1%27%7D', # serialized k1=v1 'extra_specs': '%7B%27k2%27%3A+%27v2%27%7D', # serialized k2=v2 'sort_key': 'fake_sort_key', 'sort_dir': 'fake_sort_dir', 'limit': '1', 'offset': '1', 'is_public': 'False', 'export_location_id': 'fake_export_location_id', 'export_location_path': 'fake_export_location_path', } shares = [ {'id': 'id1', 'display_name': 'n1'}, { 'id': 'id2', 'display_name': 'n2', 'status': constants.STATUS_AVAILABLE, 'snapshot_id': 'fake_snapshot_id', 'instance': { 'host': 'fake_host', 'share_network_id': 'fake_share_network_id', 'share_type_id': 'fake_share_type_id', }, 'has_replicas': False, 'is_soft_deleted': True, 'scheduled_to_be_deleted_at': 'fake_datatime', }, {'id': 'id3', 'display_name': 'n3'}, ] method = 'get_all' mock_action = {'return_value': [shares[1]]} if (api_version.APIVersionRequest(version) >= api_version.APIVersionRequest('2.42')): search_opts.update({'with_count': 'true'}) method = 'get_all_with_count' mock_action = {'side_effect': [(1, [shares[1]])]} if (api_version.APIVersionRequest(version) >= api_version.APIVersionRequest('2.69')): search_opts.update({'is_soft_deleted': True}) if use_admin_context: search_opts['host'] = 'fake_host' # fake_key should be filtered for non-admin url = '/v2/fake/shares/detail?fake_key=fake_value' for k, v in search_opts.items(): url = url + '&' + k + '=' + str(v) req = fakes.HTTPRequest.blank(url, version=version, use_admin_context=use_admin_context) mock_get_all = self.mock_object(share_api.API, method, mock.Mock(**mock_action)) result = self.controller.detail(req) search_opts_expected = { 'display_name': search_opts['name'], 'status': search_opts['status'], 'share_server_id': search_opts['share_server_id'], 'share_type_id': search_opts['share_type_id'], 'snapshot_id': search_opts['snapshot_id'], 'share_network_id': search_opts['share_network_id'], 'metadata': {'k1': 'v1'}, 'extra_specs': {'k2': 'v2'}, 'is_public': 'False', 'limit': '1', 'offset': '1' } if (api_version.APIVersionRequest(version) >= api_version.APIVersionRequest('2.35')): search_opts_expected['export_location_id'] = ( search_opts['export_location_id']) search_opts_expected['export_location_path'] = ( search_opts['export_location_path']) if (api_version.APIVersionRequest(version) >= api_version.APIVersionRequest('2.69')): search_opts_expected['is_soft_deleted'] = ( search_opts['is_soft_deleted']) if use_admin_context: search_opts_expected.update({'fake_key': 'fake_value'}) search_opts_expected['host'] = search_opts['host'] policy.check_policy.assert_called_once_with( req.environ['manila.context'], 'share', 'get_all') mock_get_all.assert_called_once_with( req.environ['manila.context'], sort_key=search_opts['sort_key'], sort_dir=search_opts['sort_dir'], search_opts=search_opts_expected, ) self.assertEqual(1, len(result['shares'])) self.assertEqual(shares[1]['id'], result['shares'][0]['id']) self.assertEqual( shares[1]['display_name'], result['shares'][0]['name']) self.assertEqual( shares[1]['snapshot_id'], result['shares'][0]['snapshot_id']) self.assertEqual( shares[1]['status'], result['shares'][0]['status']) self.assertEqual( shares[1]['instance']['share_type_id'], result['shares'][0]['share_type']) self.assertEqual( shares[1]['snapshot_id'], result['shares'][0]['snapshot_id']) if use_admin_context: self.assertEqual( shares[1]['instance']['host'], result['shares'][0]['host']) self.assertEqual( shares[1]['instance']['share_network_id'], result['shares'][0]['share_network_id']) if (api_version.APIVersionRequest(version) >= api_version.APIVersionRequest('2.42')): self.assertEqual(1, result['count']) if (api_version.APIVersionRequest(version) >= api_version.APIVersionRequest('2.69')): self.assertEqual( shares[1]['scheduled_to_be_deleted_at'], result['shares'][0]['scheduled_to_be_deleted_at']) def _list_detail_common_expected(self, admin=False): share_dict = { 'status': 'fakestatus', 'description': 'displaydesc', 'export_location': 'fake_location', 'export_locations': ['fake_location', 'fake_location2'], 'availability_zone': 'fakeaz', 'name': 'displayname', 'share_proto': 'FAKEPROTO', 'metadata': {}, 'project_id': 'fakeproject', 'id': '1', 'snapshot_id': '2', 'snapshot_support': True, 'share_network_id': None, 'created_at': datetime.datetime(1, 1, 1, 1, 1, 1), 'size': 1, 'share_type': '1', 'volume_type': '1', 'is_public': False, 'links': [ { 'href': 'http://localhost/share/v2/fake/shares/1', 'rel': 'self' }, { 'href': 'http://localhost/share/fake/shares/1', 'rel': 'bookmark' } ], } if admin: share_dict['host'] = 'fakehost' return {'shares': [share_dict]} def _list_detail_test_common(self, req, expected): self.mock_object(share_api.API, 'get_all', stubs.stub_share_get_all_by_project) res_dict = self.controller.detail(req) policy.check_policy.assert_called_once_with( req.environ['manila.context'], 'share', 'get_all') self.assertDictListMatch(expected['shares'], res_dict['shares']) self.assertEqual(res_dict['shares'][0]['volume_type'], res_dict['shares'][0]['share_type']) def test_share_list_detail(self): env = {'QUERY_STRING': 'name=Share+Test+Name'} req = fakes.HTTPRequest.blank('/v2/fake/shares/detail', environ=env) expected = self._list_detail_common_expected() expected['shares'][0].pop('snapshot_support') self._list_detail_test_common(req, expected) def test_share_list_detail_with_share_group(self): env = {'QUERY_STRING': 'name=Share+Test+Name'} req = fakes.HTTPRequest.blank('/v2/fake/shares/detail', environ=env, version="2.31", experimental=True) expected = self._list_detail_common_expected() expected['shares'][0]['task_state'] = None expected['shares'][0]['share_type_name'] = None expected['shares'][0].pop('export_location') expected['shares'][0].pop('export_locations') expected['shares'][0]['access_rules_status'] = 'active' expected['shares'][0]['replication_type'] = None expected['shares'][0]['has_replicas'] = False expected['shares'][0]['user_id'] = 'fakeuser' expected['shares'][0]['create_share_from_snapshot_support'] = True expected['shares'][0]['revert_to_snapshot_support'] = False expected['shares'][0]['share_group_id'] = None expected['shares'][0]['source_share_group_snapshot_member_id'] = None self._list_detail_test_common(req, expected) def test_share_list_detail_with_task_state(self): env = {'QUERY_STRING': 'name=Share+Test+Name'} req = fakes.HTTPRequest.blank('/v2/fake/shares/detail', environ=env, version="2.5") expected = self._list_detail_common_expected() expected['shares'][0]['task_state'] = None self._list_detail_test_common(req, expected) def test_share_list_detail_without_export_locations(self): env = {'QUERY_STRING': 'name=Share+Test+Name'} req = fakes.HTTPRequest.blank('/v2/fake/shares/detail', environ=env, version="2.9") expected = self._list_detail_common_expected() expected['shares'][0]['task_state'] = None expected['shares'][0]['share_type_name'] = None expected['shares'][0].pop('export_location') expected['shares'][0].pop('export_locations') self._list_detail_test_common(req, expected) def test_share_list_detail_with_replication_type(self): self.mock_object(share_api.API, 'get_all', stubs.stub_share_get_all_by_project) env = {'QUERY_STRING': 'name=Share+Test+Name'} req = fakes.HTTPRequest.blank( '/v2/fake/shares/detail', environ=env, version=share_replicas.MIN_SUPPORTED_API_VERSION) res_dict = self.controller.detail(req) policy.check_policy.assert_called_once_with( req.environ['manila.context'], 'share', 'get_all') expected = { 'shares': [ { 'status': 'fakestatus', 'description': 'displaydesc', 'availability_zone': 'fakeaz', 'name': 'displayname', 'share_proto': 'FAKEPROTO', 'metadata': {}, 'project_id': 'fakeproject', 'access_rules_status': 'active', 'id': '1', 'snapshot_id': '2', 'share_network_id': None, 'created_at': datetime.datetime(1, 1, 1, 1, 1, 1), 'size': 1, 'share_type_name': None, 'share_type': '1', 'volume_type': '1', 'is_public': False, 'snapshot_support': True, 'has_replicas': False, 'replication_type': None, 'task_state': None, 'links': [ { 'href': 'http://localhost/share/v2/fake/shares/1', 'rel': 'self' }, { 'href': 'http://localhost/share/fake/shares/1', 'rel': 'bookmark' } ], } ] } self.assertEqual(expected, res_dict) self.assertEqual(res_dict['shares'][0]['volume_type'], res_dict['shares'][0]['share_type']) def test_remove_invalid_options(self): ctx = context.RequestContext('fakeuser', 'fakeproject', is_admin=False) search_opts = {'a': 'a', 'b': 'b', 'c': 'c', 'd': 'd'} expected_opts = {'a': 'a', 'c': 'c'} allowed_opts = ['a', 'c'] common.remove_invalid_options(ctx, search_opts, allowed_opts) self.assertEqual(expected_opts, search_opts) def test_remove_invalid_options_admin(self): ctx = context.RequestContext('fakeuser', 'fakeproject', is_admin=True) search_opts = {'a': 'a', 'b': 'b', 'c': 'c', 'd': 'd'} expected_opts = {'a': 'a', 'b': 'b', 'c': 'c', 'd': 'd'} allowed_opts = ['a', 'c'] common.remove_invalid_options(ctx, search_opts, allowed_opts) self.assertEqual(expected_opts, search_opts) def test_create_metadata(self): id = 'fake_share_id' body = {'metadata': {'key1': 'val1', 'key2': 'val2'}} mock_validate = self.mock_object( self.controller, '_validate_metadata_for_update', mock.Mock(return_value=body['metadata'])) mock_create = self.mock_object( self.controller, '_create_metadata', mock.Mock(return_value=body)) self.mock_object(share_api.API, 'update_share_from_metadata') req = fakes.HTTPRequest.blank( '/v2/shares/%s/metadata' % id) res = self.controller.create_metadata(req, id, body) self.assertEqual(body, res) mock_validate.assert_called_once_with(req, id, body['metadata'], delete=False) mock_create.assert_called_once_with(req, id, body) def test_update_all_metadata(self): id = 'fake_share_id' body = {'metadata': {'key1': 'val1', 'key2': 'val2'}} mock_validate = self.mock_object( self.controller, '_validate_metadata_for_update', mock.Mock(return_value=body['metadata'])) mock_update = self.mock_object( self.controller, '_update_all_metadata', mock.Mock(return_value=body)) self.mock_object(share_api.API, 'update_share_from_metadata') req = fakes.HTTPRequest.blank( '/v2/shares/%s/metadata' % id) res = self.controller.update_all_metadata(req, id, body) self.assertEqual(body, res) mock_validate.assert_called_once_with(req, id, body['metadata']) mock_update.assert_called_once_with(req, id, body) def test_delete_metadata(self): mock_delete = self.mock_object( self.controller, '_delete_metadata', mock.Mock()) req = fakes.HTTPRequest.blank( '/v2/shares/%s/metadata/fake_key' % id) self.controller.delete_metadata(req, id, 'fake_key') mock_delete.assert_called_once_with(req, id, 'fake_key') def _fake_access_get(self, ctxt, access_id): class Access(object): def __init__(self, **kwargs): self.STATE_NEW = 'fake_new' self.STATE_ACTIVE = 'fake_active' self.STATE_ERROR = 'fake_error' self.params = kwargs self.params['state'] = self.STATE_NEW self.share_id = kwargs.get('share_id') self.id = access_id def __getitem__(self, item): return self.params[item] access = Access(access_id=access_id, share_id='fake_share_id') return access @ddt.ddt class ShareActionsTest(test.TestCase): def setUp(self): super(ShareActionsTest, self).setUp() self.controller = shares.ShareController() self.mock_object(share_api.API, 'get', stubs.stub_share_get) self.mock_object(policy, 'check_policy') @ddt.unpack @ddt.data( {"access": {'access_type': 'ip', 'access_to': '127.0.0.1'}, "version": "2.7"}, {"access": {'access_type': 'user', 'access_to': '1' * 4}, "version": "2.7"}, {"access": {'access_type': 'user', 'access_to': '1' * 255}, "version": "2.7"}, {"access": {'access_type': 'user', 'access_to': 'fake{.-_\'`}'}, "version": "2.7"}, {"access": {'access_type': 'user', 'access_to': 'MYDOMAIN-Administrator'}, "version": "2.7"}, {"access": {'access_type': 'user', 'access_to': 'test group name'}, "version": "2.7"}, {"access": {'access_type': 'user', 'access_to': 'group$.-_\'`{}'}, "version": "2.7"}, {"access": {'access_type': 'cert', 'access_to': 'x'}, "version": "2.7"}, {"access": {'access_type': 'cert', 'access_to': 'tenant.example.com'}, "version": "2.7"}, {"access": {'access_type': 'cert', 'access_to': 'x' * 64}, "version": "2.7"}, {"access": {'access_type': 'ip', 'access_to': 'ad80::abaa:0:c2:2'}, "version": "2.38"}, {"access": {'access_type': 'ip', 'access_to': 'AD80:ABAA::'}, "version": "2.38"}, {"access": {'access_type': 'ip', 'access_to': 'AD80::/36'}, "version": "2.38"}, {"access": {'access_type': 'ip', 'access_to': 'AD80:ABAA::/128'}, "version": "2.38"}, {"access": {'access_type': 'ip', 'access_to': '127.0.0.1'}, "version": "2.38"}, {"access": {'access_type': 'ip', 'access_to': '127.0.0.1', 'metadata': {'test_key': 'test_value'}}, "version": "2.45"}, {"access": {'access_type': 'ip', 'access_to': '127.0.0.1', 'metadata': {'k' * 255: 'v' * 1023}}, "version": "2.45"}, ) def test_allow_access(self, access, version): self.mock_object(share_api.API, 'allow_access', mock.Mock(return_value={'fake': 'fake'})) self.mock_object(self.controller._access_view_builder, 'view', mock.Mock(return_value={'access': {'fake': 'fake'}})) id = 'fake_share_id' body = {'allow_access': access} expected = {'access': {'fake': 'fake'}} req = fakes.HTTPRequest.blank( '/v2/tenant1/shares/%s/action' % id, version=version) res = self.controller.allow_access(req, id, body) self.assertEqual(expected, res) @ddt.unpack @ddt.data( {"access": {'access_type': 'error_type', 'access_to': '127.0.0.1'}, "version": "2.7"}, {"access": {'access_type': 'ip', 'access_to': 'localhost'}, "version": "2.7"}, {"access": {'access_type': 'ip', 'access_to': '127.0.0.*'}, "version": "2.7"}, {"access": {'access_type': 'ip', 'access_to': '127.0.0.0/33'}, "version": "2.7"}, {"access": {'access_type': 'ip', 'access_to': '127.0.0.256'}, "version": "2.7"}, {"access": {'access_type': 'user', 'access_to': '1'}, "version": "2.7"}, {"access": {'access_type': 'user', 'access_to': '1' * 3}, "version": "2.7"}, {"access": {'access_type': 'user', 'access_to': '1' * 256}, "version": "2.7"}, {"access": {'access_type': 'user', 'access_to': 'root<>'}, "version": "2.7"}, {"access": {'access_type': 'user', 'access_to': 'group\\'}, "version": "2.7"}, {"access": {'access_type': 'user', 'access_to': '+=*?group'}, "version": "2.7"}, {"access": {'access_type': 'cert', 'access_to': ''}, "version": "2.7"}, {"access": {'access_type': 'cert', 'access_to': ' '}, "version": "2.7"}, {"access": {'access_type': 'cert', 'access_to': 'x' * 65}, "version": "2.7"}, {"access": {'access_type': 'ip', 'access_to': 'ad80::abaa:0:c2:2'}, "version": "2.37"}, {"access": {'access_type': 'ip', 'access_to': '127.4.0.3/33'}, "version": "2.38"}, {"access": {'access_type': 'ip', 'access_to': 'AD80:ABAA::*'}, "version": "2.38"}, {"access": {'access_type': 'ip', 'access_to': 'AD80::/129'}, "version": "2.38"}, {"access": {'access_type': 'ip', 'access_to': 'ad80::abaa:0:c2:2/64'}, "version": "2.38"}, {"access": {'access_type': 'ip', 'access_to': '127.0.0.1', 'metadata': {'k' * 256: 'v' * 1024}}, "version": "2.45"}, {"access": {'access_type': 'ip', 'access_to': '127.0.0.1', 'metadata': {'key': None}}, "version": "2.45"}, ) def test_allow_access_error(self, access, version): id = 'fake_share_id' body = {'allow_access': access} req = fakes.HTTPRequest.blank('/v2/tenant1/shares/%s/action' % id, version=version) self.assertRaises(webob.exc.HTTPBadRequest, self.controller.allow_access, req, id, body) @ddt.unpack @ddt.data( {'exc': None, 'access_to': 'alice', 'version': '2.13'}, {'exc': webob.exc.HTTPBadRequest, 'access_to': 'alice', 'version': '2.11'} ) def test_allow_access_ceph(self, exc, access_to, version): share_id = "fake_id" self.mock_object(share_api.API, 'allow_access', mock.Mock(return_value={'fake': 'fake'})) self.mock_object(self.controller._access_view_builder, 'view', mock.Mock(return_value={'access': {'fake': 'fake'}})) req = fakes.HTTPRequest.blank( '/v2/shares/%s/action' % share_id, version=version) body = {'allow_access': { 'access_type': 'cephx', 'access_to': access_to, 'access_level': 'rw' }} if exc: self.assertRaises(exc, self.controller.allow_access, req, share_id, body) else: expected = {'access': {'fake': 'fake'}} res = self.controller.allow_access(req, id, body) self.assertEqual(expected, res) @ddt.data('2.1', '2.27') def test_allow_access_access_rules_status_is_in_error(self, version): share = db_utils.create_share( access_rules_status=constants.SHARE_INSTANCE_RULES_ERROR) req = fakes.HTTPRequest.blank( '/v2/shares/%s/action' % share['id'], version=version) self.mock_object(share_api.API, 'get', mock.Mock(return_value=share)) self.mock_object(share_api.API, 'allow_access') if (api_version.APIVersionRequest(version) >= api_version.APIVersionRequest('2.7')): key = 'allow_access' method = self.controller.allow_access else: key = 'os-allow_access' method = self.controller.allow_access_legacy body = { key: { 'access_type': 'user', 'access_to': 'crimsontide', 'access_level': 'rw', } } self.assertRaises(webob.exc.HTTPBadRequest, method, req, share['id'], body) self.assertFalse(share_api.API.allow_access.called) @ddt.data(*itertools.product( ('2.1', '2.27'), (constants.SHARE_INSTANCE_RULES_SYNCING, constants.STATUS_ACTIVE))) @ddt.unpack def test_allow_access_no_transitional_states(self, version, status): share = db_utils.create_share(access_rules_status=status, status=constants.STATUS_AVAILABLE) req = fakes.HTTPRequest.blank( '/v2/shares/%s/action' % share['id'], version=version) ctxt = req.environ['manila.context'] access = { 'access_type': 'user', 'access_to': 'clemsontigers', 'access_level': 'rw', } expected_mapping = { constants.SHARE_INSTANCE_RULES_SYNCING: constants.STATUS_NEW, constants.SHARE_INSTANCE_RULES_ERROR: constants.ACCESS_STATE_ERROR, constants.STATUS_ACTIVE: constants.ACCESS_STATE_ACTIVE, } share = db.share_get(ctxt, share['id']) updated_access = db_utils.create_access(share_id=share['id'], **access) expected_access = access expected_access.update( { 'id': updated_access['id'], 'state': expected_mapping[share['access_rules_status']], 'share_id': updated_access['share_id'], }) if (api_version.APIVersionRequest(version) >= api_version.APIVersionRequest('2.7')): key = 'allow_access' method = self.controller.allow_access else: key = 'os-allow_access' method = self.controller.allow_access_legacy if (api_version.APIVersionRequest(version) >= api_version.APIVersionRequest('2.13')): expected_access['access_key'] = None self.mock_object(share_api.API, 'get', mock.Mock(return_value=share)) self.mock_object(share_api.API, 'allow_access', mock.Mock(return_value=updated_access)) body = {key: access} access = method(req, share['id'], body) self.assertEqual(expected_access, access['access']) share_api.API.allow_access.assert_called_once_with( req.environ['manila.context'], share, 'user', 'clemsontigers', 'rw', None, False) @ddt.data(*itertools.product( set(['2.28', api_version._MAX_API_VERSION]), (constants.SHARE_INSTANCE_RULES_ERROR, constants.SHARE_INSTANCE_RULES_SYNCING, constants.STATUS_ACTIVE))) @ddt.unpack def test_allow_access_access_rules_status_dont_care(self, version, status): access = { 'access_type': 'user', 'access_to': 'clemsontigers', 'access_level': 'rw', } updated_access = db_utils.create_access(**access) expected_access = access expected_access.update( { 'id': updated_access['id'], 'state': updated_access['state'], 'share_id': updated_access['share_id'], 'access_key': None, }) share = db_utils.create_share(access_rules_status=status) req = fakes.HTTPRequest.blank( '/v2/shares/%s/action' % share['id'], version=version) self.mock_object(share_api.API, 'get', mock.Mock(return_value=share)) self.mock_object(share_api.API, 'allow_access', mock.Mock(return_value=updated_access)) body = {'allow_access': access} access = self.controller.allow_access(req, share['id'], body) if api_version.APIVersionRequest(version) >= ( api_version.APIVersionRequest("2.33")): expected_access.update( { 'created_at': updated_access['created_at'], 'updated_at': updated_access['updated_at'], }) if api_version.APIVersionRequest(version) >= ( api_version.APIVersionRequest("2.45")): expected_access.update( { 'metadata': {}, }) if api_version.APIVersionRequest(version) >= ( api_version.APIVersionRequest("2.74")): allow_on_error_state = True else: allow_on_error_state = False self.assertEqual(expected_access, access['access']) share_api.API.allow_access.assert_called_once_with( req.environ['manila.context'], share, 'user', 'clemsontigers', 'rw', None, allow_on_error_state) def test_deny_access(self): def _stub_deny_access(*args, **kwargs): pass self.mock_object(share_api.API, "deny_access", _stub_deny_access) self.mock_object(share_api.API, "access_get", _fake_access_get) id = 'fake_share_id' body = {"os-deny_access": {"access_id": 'fake_acces_id'}} req = fakes.HTTPRequest.blank('/v2/tenant1/shares/%s/action' % id) res = self.controller._deny_access(req, id, body) self.assertEqual(202, res.status_int) def test_deny_access_not_found(self): def _stub_deny_access(*args, **kwargs): pass self.mock_object(share_api.API, "deny_access", _stub_deny_access) self.mock_object(share_api.API, "access_get", _fake_access_get) id = 'super_fake_share_id' body = {"os-deny_access": {"access_id": 'fake_acces_id'}} req = fakes.HTTPRequest.blank('/v2/tenant1/shares/%s/action' % id) self.assertRaises(webob.exc.HTTPNotFound, self.controller._deny_access, req, id, body) def test_access_list(self): fake_access_list = [ { "state": "fakestatus", "id": "fake_access_id", "access_type": "fakeip", "access_to": "127.0.0.1", } ] self.mock_object(self.controller._access_view_builder, 'list_view', mock.Mock(return_value={'access_list': fake_access_list})) id = 'fake_share_id' body = {"os-access_list": None} req = fakes.HTTPRequest.blank('/v2/tenant1/shares/%s/action' % id) res_dict = self.controller._access_list(req, id, body) self.assertEqual({'access_list': fake_access_list}, res_dict) @ddt.unpack @ddt.data( {'body': {'os-extend': {'new_size': 2}}, 'version': '2.6'}, {'body': {'extend': {'new_size': 2}}, 'version': '2.7'}, ) def test_extend(self, body, version): id = 'fake_share_id' share = stubs.stub_share_get(None, None, id) self.mock_object(share_api.API, 'get', mock.Mock(return_value=share)) self.mock_object(share_api.API, "extend") size = '2' req = fakes.HTTPRequest.blank( '/v2/shares/%s/action' % id, version=version) actual_response = self.controller._extend(req, id, body) share_api.API.get.assert_called_once_with(mock.ANY, id) share_api.API.extend.assert_called_once_with( mock.ANY, share, int(size), force=False) self.assertEqual(202, actual_response.status_int) @ddt.data({"os-extend": ""}, {"os-extend": {"new_size": "foo"}}, {"os-extend": {"new_size": {'foo': 'bar'}}}) def test_extend_invalid_body(self, body): id = 'fake_share_id' req = fakes.HTTPRequest.blank('/v2/shares/%s/action' % id) self.assertRaises(webob.exc.HTTPBadRequest, self.controller._extend, req, id, body) @ddt.data({'source': exception.InvalidInput, 'target': webob.exc.HTTPBadRequest}, {'source': exception.InvalidShare, 'target': webob.exc.HTTPBadRequest}, {'source': exception.ShareSizeExceedsAvailableQuota, 'target': webob.exc.HTTPForbidden}) @ddt.unpack def test_extend_exception(self, source, target): id = 'fake_share_id' req = fakes.HTTPRequest.blank('/v2/shares/%s/action' % id) body = {"os-extend": {'new_size': '123'}} self.mock_object(share_api.API, "extend", mock.Mock(side_effect=source('fake'))) self.assertRaises(target, self.controller._extend, req, id, body) @ddt.unpack @ddt.data( {'body': {'os-shrink': {'new_size': 1}}, 'version': '2.6'}, {'body': {'shrink': {'new_size': 1}}, 'version': '2.7'}, ) def test_shrink(self, body, version): id = 'fake_share_id' share = stubs.stub_share_get(None, None, id) self.mock_object(share_api.API, 'get', mock.Mock(return_value=share)) self.mock_object(share_api.API, "shrink") size = '1' req = fakes.HTTPRequest.blank( '/v2/shares/%s/action' % id, version=version) actual_response = self.controller._shrink(req, id, body) share_api.API.get.assert_called_once_with(mock.ANY, id) share_api.API.shrink.assert_called_once_with( mock.ANY, share, int(size)) self.assertEqual(202, actual_response.status_int) @ddt.data({"os-shrink": ""}, {"os-shrink": {"new_size": "foo"}}, {"os-shrink": {"new_size": {'foo': 'bar'}}}) def test_shrink_invalid_body(self, body): id = 'fake_share_id' req = fakes.HTTPRequest.blank('/v2/shares/%s/action' % id) self.assertRaises(webob.exc.HTTPBadRequest, self.controller._shrink, req, id, body) @ddt.data({'source': exception.InvalidInput, 'target': webob.exc.HTTPBadRequest}, {'source': exception.InvalidShare, 'target': webob.exc.HTTPBadRequest}) @ddt.unpack def test_shrink_exception(self, source, target): id = 'fake_share_id' req = fakes.HTTPRequest.blank('/v2/shares/%s/action' % id) body = {"os-shrink": {'new_size': '123'}} self.mock_object(share_api.API, "shrink", mock.Mock(side_effect=source('fake'))) self.assertRaises(target, self.controller._shrink, req, id, body) @ddt.ddt class ShareAdminActionsAPITest(test.TestCase): def setUp(self): super(ShareAdminActionsAPITest, self).setUp() CONF.set_default("default_share_type", None) self.flags(transport_url='rabbit://fake:fake@mqhost:5672') self.share_api = share_api.API() self.admin_context = context.RequestContext('admin', 'fake', True) self.member_context = context.RequestContext('fake', 'fake') def _get_context(self, role): return getattr(self, '%s_context' % role) def _setup_share_data(self, share=None, version='2.7'): if share is None: share = db_utils.create_share(status=constants.STATUS_AVAILABLE, size='1', override_defaults=True) path = '/v2/fake/shares/%s/action' % share['id'] req = fakes.HTTPRequest.blank(path, script_name=path, version=version) return share, req def _reset_status(self, ctxt, model, req, db_access_method, valid_code, valid_status=None, body=None, version='2.7'): if float(version) > 2.6: action_name = 'reset_status' else: action_name = 'os-reset_status' if body is None: body = {action_name: {'status': constants.STATUS_ERROR}} req.method = 'POST' req.headers['content-type'] = 'application/json' req.headers['X-Openstack-Manila-Api-Version'] = version req.body = jsonutils.dumps(body).encode("utf-8") req.environ['manila.context'] = ctxt resp = req.get_response(fakes.app(), catch_exc_info=True) # validate response code and model status self.assertEqual(valid_code, resp.status_int) if valid_code == 404 and db_access_method is not None: self.assertRaises(exception.NotFound, db_access_method, ctxt, model['id']) elif db_access_method: actual_model = db_access_method(ctxt, model['id']) self.assertEqual(valid_status, actual_model['status']) @ddt.data(*fakes.fixture_reset_status_with_different_roles) @ddt.unpack def test_share_reset_status_with_different_roles(self, role, valid_code, valid_status, version): share, req = self._setup_share_data(version=version) ctxt = self._get_context(role) self.mock_object(share_api.API, 'get', mock.Mock(return_value=share)) self._reset_status(ctxt, share, req, db.share_get, valid_code, valid_status, version=version) @ddt.data(*fakes.fixture_invalid_reset_status_body) def test_share_invalid_reset_status_body(self, body): share, req = self._setup_share_data(version='2.6') ctxt = self.admin_context self.mock_object(share_api.API, 'get', mock.Mock(return_value=share)) self._reset_status(ctxt, share, req, db.share_get, 400, constants.STATUS_AVAILABLE, body, version='2.6') @ddt.data('2.6', '2.7') def test_share_reset_status_for_missing(self, version): fake_share = {'id': 'missing-share-id', 'is_soft_deleted': False} req = fakes.HTTPRequest.blank( '/v2/fake/shares/%s/action' % fake_share['id'], version=version) self._reset_status(self.admin_context, fake_share, req, db.share_get, 404, version=version) @ddt.data('2.6', '2.7') def test_reset_status_other_project_public_share(self, version): # NOTE(gouthamr): we're testing a scenario where someone has access # to the RBAC rule share:reset_status, but doesn't own the share. # Ideally we'd override the default policy, but it's a shared # resource and we'll bleed into other tests, so we'll mock the # policy check to return False instead share, req = self._setup_share_data(version=version) share['is_public'] = True rbac_checks = [None, exception.NotAuthorized] with mock.patch.object(policy, 'authorize', side_effect=rbac_checks): self.mock_object(share_api.API, 'get', mock.Mock(return_value=share)) self._reset_status( self.member_context, share, req, None, 403, version=version) def _force_delete(self, ctxt, model, req, db_access_method, valid_code, check_model_in_db=False, version='2.7'): if float(version) > 2.6: action_name = 'force_delete' else: action_name = 'os-force_delete' req.method = 'POST' req.headers['content-type'] = 'application/json' req.headers['X-Openstack-Manila-Api-Version'] = version req.body = jsonutils.dumps({action_name: {}}).encode("utf-8") req.environ['manila.context'] = ctxt resp = req.get_response(fakes.app()) # validate response self.assertEqual(valid_code, resp.status_int) if valid_code == 202 and check_model_in_db: self.assertRaises(exception.NotFound, db_access_method, ctxt, model['id']) @ddt.data(*fakes.fixture_force_delete_with_different_roles) @ddt.unpack def test_share_force_delete_with_different_roles(self, role, resp_code, version): share, req = self._setup_share_data(version=version) ctxt = self._get_context(role) self._force_delete(ctxt, share, req, db.share_get, resp_code, check_model_in_db=True, version=version) @ddt.data('2.6', '2.7') def test_share_force_delete_missing(self, version): share, req = self._setup_share_data( share={'id': 'fake'}, version=version) ctxt = self._get_context('admin') self._force_delete( ctxt, share, req, db.share_get, 404, version=version) @ddt.ddt class ShareUnmanageTest(test.TestCase): def setUp(self): super(ShareUnmanageTest, self).setUp() self.controller = shares.ShareController() self.mock_object(share_api.API, 'get_all', stubs.stub_get_all_shares) self.mock_object(share_api.API, 'get', stubs.stub_share_get) self.mock_object(share_api.API, 'update', stubs.stub_share_update) self.mock_object(share_api.API, 'delete', stubs.stub_share_delete) self.mock_object(share_api.API, 'get_snapshot', stubs.stub_snapshot_get) self.share_id = 'fake' self.request = fakes.HTTPRequest.blank( '/v2/fake/share/%s/unmanage' % self.share_id, use_admin_context=True, version='2.7', ) def test_unmanage_share(self): share = dict(status=constants.STATUS_AVAILABLE, id='foo_id', instance={}) self.mock_object(share_api.API, 'get', mock.Mock(return_value=share)) self.mock_object(share_api.API, 'unmanage', mock.Mock()) self.mock_object( self.controller.share_api.db, 'share_snapshot_get_all_for_share', mock.Mock(return_value=[])) actual_result = self.controller.unmanage(self.request, share['id']) self.assertEqual(202, actual_result.status_int) (self.controller.share_api.db.share_snapshot_get_all_for_share. assert_called_once_with( self.request.environ['manila.context'], share['id'])) self.controller.share_api.get.assert_called_once_with( self.request.environ['manila.context'], share['id']) share_api.API.unmanage.assert_called_once_with( self.request.environ['manila.context'], share) def test__unmanage(self): body = {} req = fakes.HTTPRequest.blank('/v2/fake/shares/1/action', use_admin_context=False, version='2.49') share = dict(status=constants.STATUS_AVAILABLE, id='foo_id', instance={}) mock_unmanage = self.mock_object(self.controller, '_unmanage') self.controller.unmanage(req, share['id'], body) mock_unmanage.assert_called_once_with( req, share['id'], body, allow_dhss_true=True ) def test_unmanage_share_that_has_snapshots(self): share = dict(status=constants.STATUS_AVAILABLE, id='foo_id', instance={}) snapshots = ['foo', 'bar'] self.mock_object(self.controller.share_api, 'unmanage') self.mock_object( self.controller.share_api.db, 'share_snapshot_get_all_for_share', mock.Mock(return_value=snapshots)) self.mock_object( self.controller.share_api, 'get', mock.Mock(return_value=share)) self.assertRaises( webob.exc.HTTPForbidden, self.controller.unmanage, self.request, share['id']) self.assertFalse(self.controller.share_api.unmanage.called) (self.controller.share_api.db.share_snapshot_get_all_for_share. assert_called_once_with( self.request.environ['manila.context'], share['id'])) self.controller.share_api.get.assert_called_once_with( self.request.environ['manila.context'], share['id']) def test_unmanage_share_based_on_share_server(self): share = dict(instance=dict(share_server_id='foo_id'), id='bar_id') self.mock_object( self.controller.share_api, 'get', mock.Mock(return_value=share)) self.assertRaises( webob.exc.HTTPForbidden, self.controller.unmanage, self.request, share['id']) self.controller.share_api.get.assert_called_once_with( self.request.environ['manila.context'], share['id']) @ddt.data(*constants.TRANSITIONAL_STATUSES) def test_unmanage_share_with_transitional_state(self, share_status): share = dict(status=share_status, id='foo_id', instance={}) self.mock_object( self.controller.share_api, 'get', mock.Mock(return_value=share)) self.assertRaises( webob.exc.HTTPForbidden, self.controller.unmanage, self.request, share['id']) self.controller.share_api.get.assert_called_once_with( self.request.environ['manila.context'], share['id']) def test_unmanage_share_not_found(self): self.mock_object(share_api.API, 'get', mock.Mock( side_effect=exception.NotFound)) self.mock_object(share_api.API, 'unmanage', mock.Mock()) self.assertRaises(webob.exc.HTTPNotFound, self.controller.unmanage, self.request, self.share_id) @ddt.data(exception.InvalidShare(reason="fake"), exception.PolicyNotAuthorized(action="fake"),) def test_unmanage_share_invalid(self, side_effect): share = dict(status=constants.STATUS_AVAILABLE, id='foo_id', instance={}) self.mock_object(share_api.API, 'get', mock.Mock(return_value=share)) self.mock_object(share_api.API, 'unmanage', mock.Mock( side_effect=side_effect)) self.assertRaises(webob.exc.HTTPForbidden, self.controller.unmanage, self.request, self.share_id) def test_wrong_permissions(self): share_id = 'fake' req = fakes.HTTPRequest.blank('/v2/fake/share/%s/unmanage' % share_id, use_admin_context=False, version='2.7') self.assertRaises(webob.exc.HTTPForbidden, self.controller.unmanage, req, share_id) def test_unsupported_version(self): share_id = 'fake' req = fakes.HTTPRequest.blank('/v2/fake/share/%s/unmanage' % share_id, use_admin_context=False, version='2.6') self.assertRaises(exception.VersionNotFoundForAPIMethod, self.controller.unmanage, req, share_id) def get_fake_manage_body(export_path='/fake', service_host='fake@host#POOL', protocol='fake', share_type='fake', **kwargs): fake_share = { 'export_path': export_path, 'service_host': service_host, 'protocol': protocol, 'share_type': share_type, } fake_share.update(kwargs) return {'share': fake_share} @ddt.ddt class ShareManageTest(test.TestCase): def setUp(self): super(ShareManageTest, self).setUp() self.controller = shares.ShareController() self.resource_name = self.controller.resource_name self.request = fakes.HTTPRequest.blank( '/v2/shares/manage', use_admin_context=True, version='2.7') self.mock_policy_check = self.mock_object( policy, 'check_policy', mock.Mock(return_value=True)) def _setup_manage_mocks(self, service_is_up=True): self.mock_object(db, 'service_get_by_host_and_topic', mock.Mock( return_value={'host': 'fake'})) self.mock_object(share_types, 'get_share_type_by_name_or_id', mock.Mock(return_value={'id': 'fake'})) self.mock_object(utils, 'service_is_up', mock.Mock( return_value=service_is_up)) if service_is_up: self.mock_object(utils, 'validate_service_host') else: self.mock_object( utils, 'validate_service_host', mock.Mock(side_effect=exception.ServiceIsDown(service='fake'))) def test__manage(self): body = {} req = fakes.HTTPRequest.blank( '/v2/shares/manage', use_admin_context=True, version='2.49') mock_manage = self.mock_object(self.controller, '_manage') self.controller.manage(req, body) mock_manage.assert_called_once_with( req, body, allow_dhss_true=True ) @ddt.data({}, {'shares': {}}, {'share': get_fake_manage_body('', None, None)}) def test_share_manage_invalid_body(self, body): self.assertRaises(webob.exc.HTTPUnprocessableEntity, self.controller.manage, self.request, body) def test_share_manage_service_not_found(self): body = get_fake_manage_body() self.mock_object(db, 'service_get_by_host_and_topic', mock.Mock( side_effect=exception.ServiceNotFound(service_id='fake'))) self.assertRaises(webob.exc.HTTPNotFound, self.controller.manage, self.request, body) def test_share_manage_share_type_not_found(self): body = get_fake_manage_body() self.mock_object(db, 'service_get_by_host_and_topic', mock.Mock()) self.mock_object(utils, 'service_is_up', mock.Mock(return_value=True)) self.mock_object(db, 'share_type_get_by_name', mock.Mock( side_effect=exception.ShareTypeNotFoundByName( share_type_name='fake'))) self.assertRaises(webob.exc.HTTPNotFound, self.controller.manage, self.request, body) @ddt.data({'service_is_up': False, 'service_host': 'fake@host#POOL'}, {'service_is_up': True, 'service_host': 'fake@host'}) def test_share_manage_bad_request(self, settings): body = get_fake_manage_body(service_host=settings.pop('service_host')) self._setup_manage_mocks(**settings) self.assertRaises(webob.exc.HTTPBadRequest, self.controller.manage, self.request, body) def test_share_manage_duplicate_share(self): body = get_fake_manage_body() exc = exception.InvalidShare(reason="fake") self._setup_manage_mocks() self.mock_object(share_api.API, 'manage', mock.Mock(side_effect=exc)) self.assertRaises(webob.exc.HTTPConflict, self.controller.manage, self.request, body) def test_share_manage_forbidden_manage(self): body = get_fake_manage_body() self._setup_manage_mocks() error = mock.Mock(side_effect=exception.PolicyNotAuthorized(action='')) self.mock_object(share_api.API, 'manage', error) self.assertRaises(webob.exc.HTTPForbidden, self.controller.manage, self.request, body) def test_share_manage_forbidden_validate_service_host(self): body = get_fake_manage_body() self._setup_manage_mocks() error = mock.Mock(side_effect=exception.PolicyNotAuthorized(action='')) self.mock_object( utils, 'validate_service_host', mock.Mock(side_effect=error)) self.assertRaises(webob.exc.HTTPForbidden, self.controller.manage, self.request, body) @ddt.data( get_fake_manage_body(name='foo', description='bar'), get_fake_manage_body(display_name='foo', description='bar'), get_fake_manage_body(name='foo', display_description='bar'), get_fake_manage_body(display_name='foo', display_description='bar'), get_fake_manage_body(display_name='foo', display_description='bar', driver_options=dict(volume_id='quuz')), ) def test_share_manage(self, data): self._test_share_manage(data, "2.7") @ddt.data( get_fake_manage_body(name='foo', description='bar', is_public=True), get_fake_manage_body(name='foo', description='bar', is_public=False) ) def test_share_manage_with_is_public(self, data): self._test_share_manage(data, "2.8") def test_share_manage_with_user_id(self): self._test_share_manage(get_fake_manage_body( name='foo', description='bar', is_public=True), "2.16") def _test_share_manage(self, data, version): expected = { 'share': { 'status': 'fakestatus', 'description': 'displaydesc', 'availability_zone': 'fakeaz', 'name': 'displayname', 'share_proto': 'FAKEPROTO', 'metadata': {}, 'project_id': 'fakeproject', 'host': 'fakehost', 'id': 'fake', 'snapshot_id': '2', 'share_network_id': None, 'created_at': datetime.datetime(1, 1, 1, 1, 1, 1), 'size': 1, 'share_type_name': None, 'share_server_id': 'fake_share_server_id', 'share_type': '1', 'volume_type': '1', 'is_public': False, 'snapshot_support': True, 'task_state': None, 'links': [ { 'href': 'http://localhost/share/v2/fake/shares/fake', 'rel': 'self' }, { 'href': 'http://localhost/share/fake/shares/fake', 'rel': 'bookmark' } ], } } self._setup_manage_mocks() return_share = mock.Mock( return_value=stubs.stub_share( 'fake', instance={ 'share_type_id': '1', }) ) self.mock_object( share_api.API, 'manage', return_share) self.mock_object( common, 'validate_public_share_policy', mock.Mock(side_effect=lambda *args, **kwargs: args[1])) share = { 'host': data['share']['service_host'], 'export_location_path': data['share']['export_path'], 'share_proto': data['share']['protocol'].upper(), 'share_type_id': 'fake', 'display_name': 'foo', 'display_description': 'bar', } driver_options = data['share'].get('driver_options', {}) if (api_version.APIVersionRequest(version) <= api_version.APIVersionRequest('2.8')): expected['share']['export_location'] = 'fake_location' expected['share']['export_locations'] = ( ['fake_location', 'fake_location2']) if (api_version.APIVersionRequest(version) >= api_version.APIVersionRequest('2.10')): expected['share']['access_rules_status'] = ( constants.STATUS_ACTIVE) if (api_version.APIVersionRequest(version) >= api_version.APIVersionRequest('2.11')): expected['share']['has_replicas'] = False expected['share']['replication_type'] = None if (api_version.APIVersionRequest(version) >= api_version.APIVersionRequest('2.16')): expected['share']['user_id'] = 'fakeuser' if (api_version.APIVersionRequest(version) >= api_version.APIVersionRequest('2.8')): share['is_public'] = data['share']['is_public'] if (api_version.APIVersionRequest(version) >= api_version.APIVersionRequest('2.80')): share['source_backup_id'] = None req = fakes.HTTPRequest.blank('/v2/fake/shares/manage', version=version, use_admin_context=True) actual_result = self.controller.manage(req, data) share_api.API.manage.assert_called_once_with( mock.ANY, share, driver_options) self.assertIsNotNone(actual_result) self.assertEqual(expected, actual_result) self.mock_policy_check.assert_called_once_with( req.environ['manila.context'], self.resource_name, 'manage') def test_wrong_permissions(self): body = get_fake_manage_body() self.assertRaises( webob.exc.HTTPForbidden, self.controller.manage, fakes.HTTPRequest.blank('/v2/fake/share/manage', use_admin_context=False, version='2.7'), body, ) def test_unsupported_version(self): share_id = 'fake' req = fakes.HTTPRequest.blank( '/v2/fake/share/manage', use_admin_context=False, version='2.6') self.assertRaises(exception.VersionNotFoundForAPIMethod, self.controller.manage, req, share_id) def test_revert(self): mock_revert = self.mock_object( self.controller, '_revert', mock.Mock(return_value='fake_response')) req = fakes.HTTPRequest.blank('/v2/fake/shares/fake_id/action', use_admin_context=False, version='2.27') result = self.controller.revert(req, 'fake_id', 'fake_body') self.assertEqual('fake_response', result) mock_revert.assert_called_once_with( req, 'fake_id', 'fake_body') def test_revert_unsupported(self): req = fakes.HTTPRequest.blank('/v2/shares/fake_id/action', use_admin_context=False, version='2.24') self.assertRaises(exception.VersionNotFoundForAPIMethod, self.controller.revert, req, 'fake_id', 'fake_body') ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315601.965671 manila-21.0.0/manila/tests/api/views/0000775000175000017500000000000000000000000017367 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/api/views/__init__.py0000664000175000017500000000000000000000000021466 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/api/views/test_quota_class_sets.py0000664000175000017500000001101300000000000024350 0ustar00zuulzuul00000000000000# Copyright (c) 2017 Mirantis, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ddt from manila.api.openstack import api_version_request as api_version from manila.api.views import quota_class_sets from manila import test from manila.tests.api import fakes @ddt.ddt class ViewBuilderTestCase(test.TestCase): def setUp(self): super(ViewBuilderTestCase, self).setUp() self.builder = quota_class_sets.ViewBuilder() def test__collection_name(self): self.assertEqual('quota_class_set', self.builder._collection_name) @ddt.data( ("fake_quota_class", "2.40"), (None, "2.40"), ("fake_quota_class", "2.39"), (None, "2.39"), ("fake_quota_class", "2.53"), (None, "2.53"), ("fake_quota_class", "2.62"), (None, "2.62"), ("fake_quota_class", "2.80"), (None, "2.80"), ("fake_quota_class", "2.90"), (None, "2.90"), ) @ddt.unpack def test_detail_list_with_share_type(self, quota_class, microversion): req = fakes.HTTPRequest.blank('/quota-sets', version=microversion) quota_class_set = { "shares": 13, "gigabytes": 31, "snapshots": 14, "snapshot_gigabytes": 41, "share_groups": 15, "share_group_snapshots": 51, "share_networks": 16, } expected = {self.builder._collection_name: { "shares": quota_class_set["shares"], "gigabytes": quota_class_set["gigabytes"], "snapshots": quota_class_set["snapshots"], "snapshot_gigabytes": quota_class_set["snapshot_gigabytes"], "share_networks": quota_class_set["share_networks"], }} if quota_class: expected[self.builder._collection_name]['id'] = quota_class if (api_version.APIVersionRequest(microversion) >= ( api_version.APIVersionRequest("2.40"))): expected[self.builder._collection_name][ "share_groups"] = quota_class_set["share_groups"] expected[self.builder._collection_name][ "share_group_snapshots"] = quota_class_set[ "share_group_snapshots"] if req.api_version_request >= api_version.APIVersionRequest("2.53"): fake_share_replicas_value = 46 fake_replica_gigabytes_value = 100 expected[self.builder._collection_name]["share_replicas"] = ( fake_share_replicas_value) expected[self.builder._collection_name][ "replica_gigabytes"] = fake_replica_gigabytes_value quota_class_set['share_replicas'] = fake_share_replicas_value quota_class_set['replica_gigabytes'] = fake_replica_gigabytes_value if req.api_version_request >= api_version.APIVersionRequest("2.62"): fake_per_share_gigabytes = 10 expected[self.builder._collection_name][ "per_share_gigabytes"] = fake_per_share_gigabytes quota_class_set['per_share_gigabytes'] = fake_per_share_gigabytes if req.api_version_request >= api_version.APIVersionRequest("2.80"): fake_share_backups_value = 46 fake_backup_gigabytes_value = 100 expected[self.builder._collection_name]["backups"] = ( fake_share_backups_value) expected[self.builder._collection_name][ "backup_gigabytes"] = fake_backup_gigabytes_value quota_class_set['backups'] = fake_share_backups_value quota_class_set['backup_gigabytes'] = fake_backup_gigabytes_value if req.api_version_request >= api_version.APIVersionRequest("2.90"): fake_encryption_keys = 10 expected[self.builder._collection_name][ "encryption_keys"] = fake_encryption_keys quota_class_set['encryption_keys'] = ( fake_encryption_keys) result = self.builder.detail_list( req, quota_class_set, quota_class=quota_class) self.assertEqual(expected, result) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/api/views/test_quota_sets.py0000664000175000017500000001177500000000000023202 0ustar00zuulzuul00000000000000# Copyright (c) 2017 Mirantis, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ddt from manila.api.openstack import api_version_request as api_version from manila.api.views import quota_sets from manila import test from manila.tests.api import fakes @ddt.ddt class ViewBuilderTestCase(test.TestCase): def setUp(self): super(ViewBuilderTestCase, self).setUp() self.builder = quota_sets.ViewBuilder() def test__collection_name(self): self.assertEqual('quota_set', self.builder._collection_name) @ddt.data( ('fake_project_id', 'fake_share_type_id', "2.40"), (None, 'fake_share_type_id', "2.40"), ('fake_project_id', None, "2.40"), (None, None, "2.40"), ('fake_project_id', 'fake_share_type_id', "2.39"), (None, 'fake_share_type_id', "2.39"), ('fake_project_id', None, "2.39"), (None, None, "2.39"), (None, 'fake_share_type_id', "2.53"), ('fake_project_id', None, "2.53"), (None, None, "2.53"), (None, 'fake_share_type_id', "2.62"), ('fake_project_id', None, "2.62"), (None, None, "2.62"), ('fake_project_id', None, "2.80"), (None, None, "2.80"), ('fake_project_id', None, "2.90"), (None, None, "2.90"), ) @ddt.unpack def test_detail_list_with_share_type(self, project_id, share_type, microversion): req = fakes.HTTPRequest.blank('/quota-sets', version=microversion) quota_set = { "shares": 13, "gigabytes": 31, "snapshots": 14, "snapshot_gigabytes": 41, "share_groups": 15, "share_group_snapshots": 51, "share_networks": 16, } expected = {self.builder._collection_name: { "shares": quota_set["shares"], "gigabytes": quota_set["gigabytes"], "snapshots": quota_set["snapshots"], "snapshot_gigabytes": quota_set["snapshot_gigabytes"], }} if project_id: expected[self.builder._collection_name]['id'] = project_id if not share_type: expected[self.builder._collection_name][ "share_networks"] = quota_set["share_networks"] if (api_version.APIVersionRequest(microversion) >= ( api_version.APIVersionRequest("2.40"))): expected[self.builder._collection_name][ "share_groups"] = quota_set["share_groups"] expected[self.builder._collection_name][ "share_group_snapshots"] = quota_set[ "share_group_snapshots"] if req.api_version_request >= api_version.APIVersionRequest("2.53"): fake_share_replicas_value = 46 fake_replica_gigabytes_value = 100 expected[self.builder._collection_name]["share_replicas"] = ( fake_share_replicas_value) expected[self.builder._collection_name][ "replica_gigabytes"] = fake_replica_gigabytes_value quota_set['share_replicas'] = fake_share_replicas_value quota_set['replica_gigabytes'] = fake_replica_gigabytes_value if req.api_version_request >= api_version.APIVersionRequest("2.62"): fake_per_share_gigabytes = 10 expected[self.builder._collection_name]["per_share_gigabytes"] = ( fake_per_share_gigabytes) quota_set['per_share_gigabytes'] = fake_per_share_gigabytes if req.api_version_request >= api_version.APIVersionRequest("2.80"): fake_share_backups_value = 46 fake_backup_gigabytes_value = 100 expected[self.builder._collection_name]["backups"] = ( fake_share_backups_value) expected[self.builder._collection_name][ "backup_gigabytes"] = fake_backup_gigabytes_value quota_set['backups'] = fake_share_backups_value quota_set['backup_gigabytes'] = fake_backup_gigabytes_value if req.api_version_request >= api_version.APIVersionRequest("2.90"): fake_encryption_keys = 10 expected[self.builder._collection_name][ "encryption_keys"] = ( fake_per_share_gigabytes) quota_set['encryption_keys'] = fake_encryption_keys result = self.builder.detail_list( req, quota_set, project_id=project_id, share_type=share_type) self.assertEqual(expected, result) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/api/views/test_scheduler_stats.py0000664000175000017500000000606300000000000024201 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Clinton Knight. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from manila.api.views import scheduler_stats from manila import test POOL1 = { 'name': 'host1@backend1#pool1', 'host': 'host1', 'backend': 'backend1', 'pool': 'pool1', 'other': 'junk', 'capabilities': { 'pool_name': 'pool1', 'driver_handles_share_servers': False, 'qos': 'False', 'timestamp': '2015-03-15T19:15:42.611690', 'allocated_capacity_gb': 5, 'total_capacity_gb': 10, }, } POOL2 = { 'name': 'host1@backend1#pool2', 'host': 'host1', 'backend': 'backend1', 'pool': 'pool2', 'capabilities': { 'pool_name': 'pool2', 'driver_handles_share_servers': False, 'qos': 'False', 'timestamp': '2015-03-15T19:15:42.611690', 'allocated_capacity_gb': 15, 'total_capacity_gb': 20, }, } POOLS = [POOL1, POOL2] POOLS_DETAIL_VIEW = { 'pools': [ { 'name': 'host1@backend1#pool1', 'host': 'host1', 'backend': 'backend1', 'pool': 'pool1', 'capabilities': { 'pool_name': 'pool1', 'driver_handles_share_servers': False, 'qos': 'False', 'timestamp': '2015-03-15T19:15:42.611690', 'allocated_capacity_gb': 5, 'total_capacity_gb': 10, }, }, { 'name': 'host1@backend1#pool2', 'host': 'host1', 'backend': 'backend1', 'pool': 'pool2', 'capabilities': { 'pool_name': 'pool2', 'driver_handles_share_servers': False, 'qos': 'False', 'timestamp': '2015-03-15T19:15:42.611690', 'allocated_capacity_gb': 15, 'total_capacity_gb': 20, } } ] } class ViewBuilderTestCase(test.TestCase): def setUp(self): super(ViewBuilderTestCase, self).setUp() self.builder = scheduler_stats.ViewBuilder() def test_pools(self): result = self.builder.pools(POOLS) # Remove capabilities for summary view expected = copy.deepcopy(POOLS_DETAIL_VIEW) for pool in expected['pools']: del pool['capabilities'] self.assertDictEqual(expected, result) def test_pools_with_details(self): result = self.builder.pools(POOLS, detail=True) expected = POOLS_DETAIL_VIEW self.assertDictEqual(expected, result) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/api/views/test_share_accesses.py0000664000175000017500000000715300000000000023761 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import ddt from manila.api.openstack import api_version_request as api_version from manila.api.views import share_accesses from manila.share import api from manila import test from manila.tests.api import fakes @ddt.ddt class ViewBuilderTestCase(test.TestCase): def setUp(self): super(ViewBuilderTestCase, self).setUp() self.builder = share_accesses.ViewBuilder() self.fake_access = { 'id': 'fakeaccessid', 'share_id': 'fakeshareid', 'access_level': 'fakeaccesslevel', 'access_to': 'fakeacccessto', 'access_type': 'fakeaccesstype', 'state': 'fakeaccessstate', 'access_key': 'fakeaccesskey', 'created_at': 'fakecreated_at', 'updated_at': 'fakeupdated_at', 'metadata': {}, } self.fake_share = { 'access_rules_status': self.fake_access['state'], } def test_collection_name(self): self.assertEqual('share_accesses', self.builder._collection_name) @ddt.data("2.20", "2.21", "2.33", "2.45") def test_view(self, version): req = fakes.HTTPRequest.blank('/shares', version=version) self.mock_object(api.API, 'get', mock.Mock(return_value=self.fake_share)) result = self.builder.view(req, self.fake_access) self._delete_unsupport_key(version, True) self.assertEqual({'access': self.fake_access}, result) @ddt.data("2.20", "2.21", "2.33", "2.45") def test_summary_view(self, version): req = fakes.HTTPRequest.blank('/shares', version=version) self.mock_object(api.API, 'get', mock.Mock(return_value=self.fake_share)) result = self.builder.summary_view(req, self.fake_access) self._delete_unsupport_key(version) self.assertEqual({'access': self.fake_access}, result) @ddt.data("2.20", "2.21", "2.33", "2.45") def test_list_view(self, version): req = fakes.HTTPRequest.blank('/shares', version=version) self.mock_object(api.API, 'get', mock.Mock(return_value=self.fake_share)) accesses = [self.fake_access, ] result = self.builder.list_view(req, accesses) self._delete_unsupport_key(version) self.assertEqual({'access_list': accesses}, result) def _delete_unsupport_key(self, version, support_share_id=False): if (api_version.APIVersionRequest(version) < api_version.APIVersionRequest("2.21")): del self.fake_access['access_key'] if (api_version.APIVersionRequest(version) < api_version.APIVersionRequest("2.33")): del self.fake_access['created_at'] del self.fake_access['updated_at'] if (api_version.APIVersionRequest(version) < api_version.APIVersionRequest("2.45")): del self.fake_access['metadata'] if not support_share_id: del self.fake_access['share_id'] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/api/views/test_share_network_subnets.py0000664000175000017500000000676400000000000025433 0ustar00zuulzuul00000000000000# Copyright 2019 NetApp, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ddt from manila.api.openstack import api_version_request as api_version from manila.api.views import share_network_subnets from manila import test from manila.tests.api import fakes from manila.tests import db_utils @ddt.ddt class ViewBuilderTestCase(test.TestCase): def setUp(self): super(ViewBuilderTestCase, self).setUp() self.builder = share_network_subnets.ViewBuilder() self.share_network = db_utils.create_share_network( name='fake_network', id='fake_sn_id') def _validate_is_detail_return(self, result, metadata_support=False): expected_keys = ['id', 'created_at', 'updated_at', 'neutron_net_id', 'neutron_subnet_id', 'network_type', 'cidr', 'segmentation_id', 'ip_version', 'share_network_id', 'availability_zone', 'gateway', 'mtu'] if metadata_support: expected_keys.append('metadata') for key in expected_keys: self.assertIn(key, result) def test_build_share_network_subnet(self): req = fakes.HTTPRequest.blank('/subnets', version='2.51') subnet = db_utils.create_share_network_subnet( share_network_id=self.share_network['id']) result = self.builder.build_share_network_subnet(req, subnet) self.assertEqual(1, len(result)) self.assertIn('share_network_subnet', result) self.assertEqual(subnet['id'], result['share_network_subnet']['id']) self.assertEqual(subnet['share_network_id'], result['share_network_subnet']['share_network_id']) self.assertIsNone( result['share_network_subnet']['availability_zone']) self._validate_is_detail_return(result['share_network_subnet']) @ddt.data("2.51", "2.78") def test_build_share_network_subnets(self, microversion): metadata_support = (api_version.APIVersionRequest(microversion) >= api_version.APIVersionRequest('2.78')) req = fakes.HTTPRequest.blank('/subnets', version=microversion) share_network = db_utils.create_share_network( name='fake_network', id='fake_sn_id_1') expected_metadata = {'fake_key': 'fake_value'} subnet = db_utils.create_share_network_subnet( share_network_id=share_network['id'], metadata=expected_metadata) result = self.builder.build_share_network_subnets(req, [subnet]) self.assertIn('share_network_subnets', result) self.assertEqual(1, len(result['share_network_subnets'])) subnet_list = result['share_network_subnets'] for subnet in subnet_list: self._validate_is_detail_return(subnet, metadata_support=metadata_support) if metadata_support: self.assertEqual(expected_metadata, subnet['metadata']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/api/views/test_share_networks.py0000664000175000017500000003301300000000000024036 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Mirantis, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import ddt import itertools from manila.api.openstack import api_version_request as api_version from manila.api.views import share_networks from manila import test from manila.tests.api import fakes @ddt.ddt class ViewBuilderTestCase(test.TestCase): def setUp(self): super(ViewBuilderTestCase, self).setUp() self.builder = share_networks.ViewBuilder() def test__collection_name(self): self.assertEqual('share_networks', self.builder._collection_name) @ddt.data(*itertools.product( [ {'id': 'fake_sn_id', 'name': 'fake_sn_name', 'share_network_subnets': []}, {'id': 'fake_sn_id', 'name': 'fake_sn_name', 'share_network_subnets': [], 'fake_extra_key': 'foo'}, {'id': 'fake_sn_id', 'name': 'fake_sn_name', 'share_network_subnets': [ {'availability_zone_id': None, 'id': 'fake', 'availability_zone': None, 'is_default': False }], 'fake_extra_key': 'foo'}, ], ["1.0", "2.0", "2.18", "2.20", "2.25", "2.26", "2.49", api_version._MAX_API_VERSION]) ) @ddt.unpack def test_build_share_network(self, share_network_data, microversion): gateway_support = (api_version.APIVersionRequest(microversion) >= api_version.APIVersionRequest('2.18')) mtu_support = (api_version.APIVersionRequest(microversion) >= api_version.APIVersionRequest('2.20')) nova_net_support = (api_version.APIVersionRequest(microversion) < api_version.APIVersionRequest('2.26')) default_net_info_support = (api_version.APIVersionRequest(microversion) <= api_version.APIVersionRequest('2.49')) subnets_support = (api_version.APIVersionRequest(microversion) > api_version.APIVersionRequest('2.49')) status_and_sec_serv_update = ( api_version.APIVersionRequest(microversion) >= api_version.APIVersionRequest('2.63')) network_allocation_update_support = ( api_version.APIVersionRequest(microversion) >= api_version.APIVersionRequest('2.69')) req = fakes.HTTPRequest.blank('/share-networks', version=microversion) expected_keys = { 'id', 'name', 'project_id', 'created_at', 'updated_at', 'description'} if subnets_support: expected_keys.add('share_network_subnets') else: if default_net_info_support: network_info = { 'neutron_net_id', 'neutron_subnet_id', 'network_type', 'segmentation_id', 'cidr', 'ip_version'} expected_keys.update(network_info) if gateway_support: expected_keys.add('gateway') if mtu_support: expected_keys.add('mtu') if nova_net_support: expected_keys.add('nova_net_id') if status_and_sec_serv_update: expected_keys.update({'status', 'security_service_update_support'}) if network_allocation_update_support: expected_keys.add('network_allocation_update_support') result = self.builder.build_share_network(req, share_network_data) self.assertEqual(1, len(result)) self.assertIn('share_network', result) self.assertEqual(share_network_data['id'], result['share_network']['id']) self.assertEqual(share_network_data['name'], result['share_network']['name']) self.assertEqual(len(expected_keys), len(result['share_network'])) for key in expected_keys: self.assertIn(key, result['share_network']) for key in result['share_network']: self.assertIn(key, expected_keys) @ddt.data(*itertools.product( [ [], [{'id': 'fake_id', 'name': 'fake_name', 'project_id': 'fake_project_id', 'created_at': 'fake_created_at', 'updated_at': 'fake_updated_at', 'neutron_net_id': 'fake_neutron_net_id', 'neutron_subnet_id': 'fake_neutron_subnet_id', 'network_type': 'fake_network_type', 'segmentation_id': 'fake_segmentation_id', 'cidr': 'fake_cidr', 'ip_version': 'fake_ip_version', 'description': 'fake_description'}, {'id': 'fake_id2', 'name': 'fake_name2'}], ], set(["1.0", "2.0", "2.18", "2.20", "2.25", "2.26", "2.49", api_version._MAX_API_VERSION])) ) @ddt.unpack def test_build_share_networks_with_details(self, share_networks, microversion): gateway_support = (api_version.APIVersionRequest(microversion) >= api_version.APIVersionRequest('2.18')) mtu_support = (api_version.APIVersionRequest(microversion) >= api_version.APIVersionRequest('2.20')) nova_net_support = (api_version.APIVersionRequest(microversion) < api_version.APIVersionRequest('2.26')) default_net_info_support = (api_version.APIVersionRequest(microversion) <= api_version.APIVersionRequest('2.49')) subnets_support = (api_version.APIVersionRequest(microversion) > api_version.APIVersionRequest('2.49')) status_and_sec_serv_update = ( api_version.APIVersionRequest(microversion) >= api_version.APIVersionRequest('2.63')) network_allocation_update_support = ( api_version.APIVersionRequest(microversion) >= api_version.APIVersionRequest('2.69')) subnet_metadata_support = ( api_version.APIVersionRequest(microversion) >= api_version.APIVersionRequest('2.78')) req = fakes.HTTPRequest.blank('/share-networks', version=microversion) expected_networks_list = [] for share_network in share_networks: expected_data = { 'id': share_network.get('id'), 'name': share_network.get('name'), 'project_id': share_network.get('project_id'), 'created_at': share_network.get('created_at'), 'updated_at': share_network.get('updated_at'), 'description': share_network.get('description'), } if subnets_support: expected_subnet = { 'id': 'fake_subnet_id', 'availability_zone': 'fake_az', 'created_at': share_network.get('created_at'), 'updated_at': share_network.get('updated_at'), 'segmentation_id': share_network.get('segmentation_id'), 'neutron_net_id': share_network.get('neutron_net_id'), 'neutron_subnet_id': share_network.get( 'neutron_subnet_id'), 'ip_version': share_network.get('ip_version'), 'cidr': share_network.get('cidr'), 'network_type': share_network.get('network_type'), 'mtu': share_network.get('mtu'), 'gateway': share_network.get('gateway'), } subnet = expected_subnet if subnet_metadata_support: subnet = copy.deepcopy(expected_subnet) expected_subnet['metadata'] = {'fake_key': 'fake_value'} subnet['subnet_metadata'] = expected_subnet['metadata'] expected_data.update( {'share_network_subnets': [expected_subnet]}) share_network.update({'share_network_subnets': [subnet]}) else: if default_net_info_support: network_data = { 'neutron_net_id': share_network.get('neutron_net_id'), 'neutron_subnet_id': share_network.get( 'neutron_subnet_id'), 'network_type': share_network.get('network_type'), 'segmentation_id': share_network.get( 'segmentation_id'), 'cidr': share_network.get('cidr'), 'ip_version': share_network.get('ip_version'), } expected_data.update(network_data) if gateway_support: share_network.update({'gateway': 'fake_gateway'}) expected_data.update({'gateway': share_network.get('gateway')}) if mtu_support: share_network.update({'mtu': 1509}) expected_data.update({'mtu': share_network.get('mtu')}) if nova_net_support: share_network.update({'nova_net_id': 'fake_nova_net_id'}) expected_data.update({'nova_net_id': None}) if status_and_sec_serv_update: share_network.update( {'status': 'active', 'security_service_update_support': False}) expected_data.update( {'status': 'active', 'security_service_update_support': False}) if network_allocation_update_support: share_network.update( {'network_allocation_update_support': None}) expected_data.update( {'network_allocation_update_support': None}) expected_networks_list.append(expected_data) expected = {'share_networks': expected_networks_list} result = self.builder.build_share_networks(req, share_networks, is_detail=True) self.assertEqual(expected, result) @ddt.data(*itertools.product( [ [], [{'id': 'foo', 'name': 'bar'}], [{'id': 'id1', 'name': 'name1'}, {'id': 'id2', 'name': 'name2'}], [{'id': 'id1', 'name': 'name1'}, {'id': 'id2', 'name': 'name2', 'fake': 'I should not be returned'}] ], set(["1.0", "2.0", "2.18", "2.20", "2.25", "2.26", "2.49", api_version._MAX_API_VERSION])) ) @ddt.unpack def test_build_share_networks_without_details(self, share_networks, microversion): req = fakes.HTTPRequest.blank('/share-networks', version=microversion) expected = [] for share_network in share_networks: expected.append({ 'id': share_network.get('id'), 'name': share_network.get('name') }) expected = {'share_networks': expected} result = self.builder.build_share_networks(req, share_networks, is_detail=False) self.assertEqual(expected, result) @ddt.data(('update_security_service', True), ('add_security_service', False)) @ddt.unpack def test_build_security_service_update_check(self, operation, is_admin): req = fakes.HTTPRequest.blank('/share-networks', use_admin_context=is_admin) params = {'new_service_id': 'new_id'} if operation == 'update_security_service': params['current_service_id'] = 'current_id' hosts_result = { 'compatible': True, 'hosts_check_result': {'hostA': True} } expected = { 'compatible': True, 'requested_operation': { 'operation': operation, 'current_security_service': params.get('current_service_id'), 'new_security_service': params.get('new_service_id'), }, } if is_admin: expected['hosts_check_result'] = hosts_result['hosts_check_result'] result = self.builder.build_security_service_update_check(req, params, hosts_result) self.assertEqual(expected, result) @ddt.data(True, False) def test_build_share_network_subnet_create_check(self, is_admin): req = fakes.HTTPRequest.blank('/share-networks', use_admin_context=is_admin) hosts_result = { 'compatible': True, 'hosts_check_result': {'hostA': True} } expected = {'compatible': True} if is_admin: expected['hosts_check_result'] = hosts_result['hosts_check_result'] result = self.builder.build_share_network_subnet_create_check( req, hosts_result) self.assertEqual(expected, result) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/api/views/test_shares.py0000664000175000017500000001101100000000000022257 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Mirantis, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import ddt from manila.api.views import shares from manila.common import constants from manila import test from manila.tests.api.contrib import stubs from manila.tests.api import fakes @ddt.ddt class ViewBuilderTestCase(test.TestCase): def setUp(self): super(ViewBuilderTestCase, self).setUp() self.builder = shares.ViewBuilder() self.fake_share = self._get_fake_share() def _get_fake_share(self): fake_share = { 'share_type_id': 'fake_share_type_id', 'share_type': { 'name': 'fake_share_type_name', }, 'export_location': 'fake_export_location', 'export_locations': ['fake_export_location'], 'access_rules_status': 'fake_rule_status', 'instance': { 'share_type': { 'name': 'fake_share_type_name', }, 'share_type_id': 'fake_share_type_id', 'progress': '100%', }, 'replication_type': 'fake_replication_type', 'has_replicas': False, 'user_id': 'fake_userid', 'snapshot_support': True, 'create_share_from_snapshot_support': True, 'revert_to_snapshot_support': True, 'progress': '100%', 'scheduled_to_be_deleted_at': 'fake_datetime', } return stubs.stub_share('fake_id', **fake_share) def test__collection_name(self): self.assertEqual('shares', self.builder._collection_name) @ddt.data('2.6', '2.9', '2.10', '2.11', '2.16', '2.24', '2.27', '2.54', '2.69') def test_detail(self, microversion): req = fakes.HTTPRequest.blank('/shares', version=microversion) result = self.builder.detail(req, self.fake_share) expected = { 'id': self.fake_share['id'], 'share_type': self.fake_share['share_type_id'], 'share_type_name': self.fake_share['share_type']['name'], 'export_location': 'fake_export_location', 'export_locations': ['fake_export_location'], 'snapshot_support': True, } if self.is_microversion_ge(microversion, '2.9'): expected.pop('export_location') expected.pop('export_locations') if self.is_microversion_ge(microversion, '2.10'): expected['access_rules_status'] = 'fake_rule_status' if self.is_microversion_ge(microversion, '2.11'): expected['replication_type'] = 'fake_replication_type' expected['has_replicas'] = False if self.is_microversion_ge(microversion, '2.16'): expected['user_id'] = 'fake_userid' if self.is_microversion_ge(microversion, '2.24'): expected['create_share_from_snapshot_support'] = True if self.is_microversion_ge(microversion, '2.27'): expected['revert_to_snapshot_support'] = True if self.is_microversion_ge(microversion, '2.54'): expected['progress'] = '100%' if self.is_microversion_ge(microversion, '2.69'): expected['scheduled_to_be_deleted_at'] = 'fake_datetime' self.assertSubDictMatch(expected, result['share']) @ddt.data('1.0', '2.51', '2.54') def test_detail_translate_creating_from_snapshot_status(self, microversion): req = fakes.HTTPRequest.blank('/shares', version=microversion) new_share_status = constants.STATUS_CREATING_FROM_SNAPSHOT fake_shr = copy.deepcopy(self.fake_share) fake_shr.update( {'status': new_share_status}) result = self.builder.detail(req, fake_shr) expected = { 'status': constants.STATUS_CREATING, } if self.is_microversion_ge(microversion, '2.54'): expected['status'] = new_share_status self.assertSubDictMatch(expected, result['share']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/api/views/test_versions.py0000664000175000017500000001064700000000000022660 0ustar00zuulzuul00000000000000# Copyright 2015 Clinton Knight # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from unittest import mock import ddt from manila.api.views import versions from manila import test class FakeRequest(object): def __init__(self, application_url): self.application_url = application_url URL_BASE = 'http://localhost/' FAKE_HREF = URL_BASE + 'v1/' FAKE_VERSIONS = { "v1.0": { "id": "v1.0", "status": "CURRENT", "version": "1.1", "min_version": "1.0", "updated": "2015-07-30T11:33:21Z", "links": [ { "rel": "describedby", "type": "text/html", "href": 'http://docs.openstack.org/', }, ], "media-types": [ { "base": "application/json", "type": "application/vnd.openstack.share+json;version=1", } ], }, } FAKE_LINKS = [ { "rel": "describedby", "type": "text/html", "href": 'http://docs.openstack.org/', }, { 'rel': 'self', 'href': FAKE_HREF }, ] @ddt.ddt class ViewBuilderTestCase(test.TestCase): def _get_builder(self): request = FakeRequest('fake') return versions.get_view_builder(request) def test_build_versions(self): self.mock_object(versions.ViewBuilder, '_build_links', mock.Mock(return_value=FAKE_LINKS)) result = self._get_builder().build_versions(FAKE_VERSIONS) expected = {'versions': list(FAKE_VERSIONS.values())} expected['versions'][0]['links'] = FAKE_LINKS self.assertEqual(expected, result) def test_build_version(self): self.mock_object(versions.ViewBuilder, '_build_links', mock.Mock(return_value=FAKE_LINKS)) result = self._get_builder()._build_version(FAKE_VERSIONS['v1.0']) expected = copy.deepcopy(FAKE_VERSIONS['v1.0']) expected['links'] = FAKE_LINKS self.assertEqual(expected, result) def test_build_links(self): self.mock_object(versions.ViewBuilder, '_generate_href', mock.Mock(return_value=FAKE_HREF)) result = self._get_builder()._build_links(FAKE_VERSIONS['v1.0']) self.assertEqual(FAKE_LINKS, result) def test_generate_href_defaults(self): self.mock_object(versions.ViewBuilder, '_get_base_url_without_version', mock.Mock(return_value=URL_BASE)) result = self._get_builder()._generate_href() self.assertEqual('http://localhost/v1/', result) @ddt.data( ('v2', None, URL_BASE + 'v2/'), ('/v2/', None, URL_BASE + 'v2/'), ('/v2/', 'fake_path', URL_BASE + 'v2/fake_path'), ('/v2/', '/fake_path/', URL_BASE + 'v2/fake_path/'), ) @ddt.unpack def test_generate_href_no_path(self, version, path, expected): self.mock_object(versions.ViewBuilder, '_get_base_url_without_version', mock.Mock(return_value=URL_BASE)) result = self._get_builder()._generate_href(version=version, path=path) self.assertEqual(expected, result) @ddt.data( ('http://1.1.1.1/', 'http://1.1.1.1/'), ('http://localhost/', 'http://localhost/'), ('http://1.1.1.1/v1/', 'http://1.1.1.1/'), ('http://1.1.1.1/v1', 'http://1.1.1.1/'), ('http://1.1.1.1/v11', 'http://1.1.1.1/'), ) @ddt.unpack def test_get_base_url_without_version(self, base_url, base_url_no_version): request = FakeRequest(base_url) builder = versions.get_view_builder(request) result = builder._get_base_url_without_version() self.assertEqual(base_url_no_version, result) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315601.969671 manila-21.0.0/manila/tests/cmd/0000775000175000017500000000000000000000000016224 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/cmd/__init__.py0000664000175000017500000000000000000000000020323 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/cmd/test_api.py0000664000175000017500000000352500000000000020413 0ustar00zuulzuul00000000000000# Copyright 2015 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sys from manila.cmd import api as manila_api from manila import test from manila import version CONF = manila_api.CONF class ManilaCmdApiTestCase(test.TestCase): def setUp(self): super(ManilaCmdApiTestCase, self).setUp() sys.argv = ['manila-api'] def test_main(self): self.mock_object(manila_api.log, 'setup') self.mock_object(manila_api.log, 'register_options') self.mock_object(manila_api.utils, 'monkey_patch') self.mock_object(manila_api.service, 'process_launcher') self.mock_object(manila_api.service, 'WSGIService') manila_api.main() process_launcher = manila_api.service.process_launcher process_launcher.assert_called_once_with() self.assertTrue(process_launcher.return_value.launch_service.called) self.assertTrue(process_launcher.return_value.wait.called) self.assertEqual('manila', CONF.project) self.assertEqual(version.version_string(), CONF.version) manila_api.log.setup.assert_called_once_with(CONF, "manila") manila_api.log.register_options.assert_called_once_with(CONF) manila_api.utils.monkey_patch.assert_called_once_with() manila_api.service.WSGIService.assert_called_once_with('osapi_share') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/cmd/test_data.py0000664000175000017500000000344500000000000020554 0ustar00zuulzuul00000000000000# Copyright 2015, Hitachi Data Systems. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sys from manila.cmd import data as manila_data from manila import test from manila import version CONF = manila_data.CONF class ManilaCmdDataTestCase(test.TestCase): def test_main(self): sys.argv = ['manila-data'] self.mock_object(manila_data.log, 'setup') self.mock_object(manila_data.log, 'register_options') self.mock_object(manila_data.utils, 'monkey_patch') self.mock_object(manila_data.service.Service, 'create') self.mock_object(manila_data.service, 'serve') self.mock_object(manila_data.service, 'wait') manila_data.main() self.assertEqual('manila', CONF.project) self.assertEqual(version.version_string(), CONF.version) manila_data.log.setup.assert_called_once_with(CONF, "manila") manila_data.log.register_options.assert_called_once_with(CONF) manila_data.utils.monkey_patch.assert_called_once_with() manila_data.service.Service.create.assert_called_once_with( binary='manila-data') manila_data.service.wait.assert_called_once_with() manila_data.service.serve.assert_called_once_with( manila_data.service.Service.create.return_value) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/cmd/test_manage.py0000664000175000017500000005726000000000000021077 0ustar00zuulzuul00000000000000# Copyright 2015 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import code import io import readline import sys from unittest import mock import yaml import ddt from oslo_config import cfg from oslo_serialization import jsonutils from manila.cmd import manage as manila_manage from manila import context from manila import db from manila.db import migration from manila import test from manila import utils from manila import version CONF = cfg.CONF @ddt.ddt class ManilaCmdManageTestCase(test.TestCase): def setUp(self): super(ManilaCmdManageTestCase, self).setUp() sys.argv = ['manila-share'] CONF(sys.argv[1:], project='manila', version=version.version_string()) self.shell_commands = manila_manage.ShellCommands() self.host_commands = manila_manage.HostCommands() self.db_commands = manila_manage.DbCommands() self.version_commands = manila_manage.VersionCommands() self.config_commands = manila_manage.ConfigCommands() self.get_log_cmds = manila_manage.GetLogCommands() self.service_cmds = manila_manage.ServiceCommands() self.share_cmds = manila_manage.ShareCommands() self.server_cmds = manila_manage.ShareServerCommands() self.list_commands = manila_manage.ListCommand() @mock.patch.object(manila_manage.ShellCommands, 'run', mock.Mock()) def test_shell_commands_bpython(self): self.shell_commands.bpython() manila_manage.ShellCommands.run.assert_called_once_with('bpython') @mock.patch.object(manila_manage.ShellCommands, 'run', mock.Mock()) def test_shell_commands_ipython(self): self.shell_commands.ipython() manila_manage.ShellCommands.run.assert_called_once_with('ipython') @mock.patch.object(manila_manage.ShellCommands, 'run', mock.Mock()) def test_shell_commands_python(self): self.shell_commands.python() manila_manage.ShellCommands.run.assert_called_once_with('python') @ddt.data({}, {'shell': 'bpython'}) def test_run_bpython(self, kwargs): try: import bpython except ImportError as e: self.skipTest(str(e)) self.mock_object(bpython, 'embed') self.shell_commands.run(**kwargs) bpython.embed.assert_called_once_with() def test_run_bpython_import_error(self): try: import bpython import IPython except ImportError as e: self.skipTest(str(e)) self.mock_object(bpython, 'embed', mock.Mock(side_effect=ImportError())) self.mock_object(IPython, 'embed') self.shell_commands.run(shell='bpython') IPython.embed.assert_called_once_with() def test_run(self): try: import bpython except ImportError as e: self.skipTest(str(e)) self.mock_object(bpython, 'embed') self.shell_commands.run() bpython.embed.assert_called_once_with() def test_run_ipython(self): try: import IPython except ImportError as e: self.skipTest(str(e)) self.mock_object(IPython, 'embed') self.shell_commands.run(shell='ipython') IPython.embed.assert_called_once_with() def test_run_ipython_import_error(self): try: import IPython if not hasattr(IPython, 'Shell'): setattr(IPython, 'Shell', mock.Mock()) setattr(IPython.Shell, 'IPShell', mock.Mock(side_effect=ImportError())) except ImportError as e: self.skipTest(str(e)) self.mock_object(IPython, 'embed', mock.Mock(side_effect=ImportError())) self.mock_object(readline, 'parse_and_bind') self.mock_object(code, 'interact') shell = IPython.embed.return_value self.shell_commands.run(shell='ipython') IPython.Shell.IPShell.assert_called_once_with(argv=[]) self.assertFalse(shell.mainloop.called) self.assertTrue(readline.parse_and_bind.called) code.interact.assert_called_once_with() def test_run_python(self): self.mock_object(readline, 'parse_and_bind') self.mock_object(code, 'interact') self.shell_commands.run(shell='python') readline.parse_and_bind.assert_called_once_with("tab:complete") code.interact.assert_called_once_with() def test_run_python_import_error(self): self.mock_object(readline, 'parse_and_bind') self.mock_object(code, 'interact') self.shell_commands.run(shell='python') readline.parse_and_bind.assert_called_once_with("tab:complete") code.interact.assert_called_once_with() @mock.patch('builtins.print') def test_list(self, print_mock): serv_1 = { 'host': 'fake_host1', 'availability_zone': {'name': 'avail_zone1'}, } serv_2 = { 'host': 'fake_host2', 'availability_zone': {'name': 'avail_zone2'}, } self.mock_object(db, 'service_get_all', mock.Mock(return_value=[serv_1, serv_2])) self.mock_object(context, 'get_admin_context', mock.Mock(return_value='admin_ctxt')) self.host_commands.list(zone='avail_zone1') context.get_admin_context.assert_called_once_with() db.service_get_all.assert_called_once_with('admin_ctxt') print_mock.assert_has_calls([ mock.call(u'host \tzone '), mock.call('fake_host1 \tavail_zone1 ')]) @mock.patch('builtins.print') def test_list_zone_is_none(self, print_mock): serv_1 = { 'host': 'fake_host1', 'availability_zone': {'name': 'avail_zone1'}, } serv_2 = { 'host': 'fake_host2', 'availability_zone': {'name': 'avail_zone2'}, } self.mock_object(db, 'service_get_all', mock.Mock(return_value=[serv_1, serv_2])) self.mock_object(context, 'get_admin_context', mock.Mock(return_value='admin_ctxt')) self.host_commands.list() context.get_admin_context.assert_called_once_with() db.service_get_all.assert_called_once_with('admin_ctxt') print_mock.assert_has_calls([ mock.call(u'host \tzone '), mock.call('fake_host1 \tavail_zone1 '), mock.call('fake_host2 \tavail_zone2 ')]) def test_sync(self): self.mock_object(migration, 'upgrade') self.db_commands.sync(version='123') migration.upgrade.assert_called_once_with('123') def test_version(self): self.mock_object(migration, 'version') self.db_commands.version() migration.version.assert_called_once_with() def test_downgrade(self): self.mock_object(migration, 'downgrade') self.db_commands.downgrade(version='123') migration.downgrade.assert_called_once_with('123') def test_revision(self): self.mock_object(migration, 'revision') self.db_commands.revision('message', True) migration.revision.assert_called_once_with('message', True) def test_stamp(self): self.mock_object(migration, 'stamp') self.db_commands.stamp(version='123') migration.stamp.assert_called_once_with('123') def test_version_commands_list(self): self.mock_object(version, 'version_string', mock.Mock(return_value='123')) with mock.patch('sys.stdout', new=io.StringIO()) as fake_out: self.version_commands.list() version.version_string.assert_called_once_with() self.assertEqual('123\n', fake_out.getvalue()) def test_version_commands_call(self): self.mock_object(version, 'version_string', mock.Mock(return_value='123')) with mock.patch('sys.stdout', new=io.StringIO()) as fake_out: self.version_commands() version.version_string.assert_called_once_with() self.assertEqual('123\n', fake_out.getvalue()) def test_get_log_commands_no_errors(self): with mock.patch('sys.stdout', new=io.StringIO()) as fake_out: CONF.set_override('log_dir', None) expected_out = 'No errors in logfiles!\n' self.get_log_cmds.errors() self.assertEqual(expected_out, fake_out.getvalue()) @mock.patch('builtins.open') @mock.patch('os.listdir') def test_get_log_commands_errors(self, listdir, open): CONF.set_override('log_dir', 'fake-dir') listdir.return_value = ['fake-error.log'] with mock.patch('sys.stdout', new=io.StringIO()) as fake_out: open.return_value = io.StringIO( '[ ERROR ] fake-error-message') expected_out = ('fake-dir/fake-error.log:-\n' 'Line 1 : [ ERROR ] fake-error-message\n') self.get_log_cmds.errors() self.assertEqual(expected_out, fake_out.getvalue()) open.assert_called_once_with('fake-dir/fake-error.log', 'r') listdir.assert_called_once_with(CONF.log_dir) @mock.patch('builtins.open') @mock.patch('os.path.exists') def test_get_log_commands_syslog_no_log_file(self, path_exists, open): path_exists.return_value = False exit = self.assertRaises(SystemExit, self.get_log_cmds.syslog) self.assertEqual(1, exit.code) path_exists.assert_any_call('/var/log/syslog') path_exists.assert_any_call('/var/log/messages') @mock.patch('manila.utils.service_is_up') @mock.patch('manila.db.service_get_all') @mock.patch('manila.context.get_admin_context') def test_service_commands_list(self, get_admin_context, service_get_all, service_is_up): ctxt = context.RequestContext('fake-user', 'fake-project') get_admin_context.return_value = ctxt service = {'binary': 'manila-binary', 'host': 'fake-host.fake-domain', 'availability_zone': {'name': 'fake-zone'}, 'updated_at': '2014-06-30 11:22:33', 'disabled': False} service_get_all.return_value = [service] service_is_up.return_value = True with mock.patch('sys.stdout', new=io.StringIO()) as fake_out: format = "%-16s %-36s %-16s %-10s %-5s %-10s" print_format = format % ('Binary', 'Host', 'Zone', 'Status', 'State', 'Updated at') service_format = format % (service['binary'], service['host'].partition('.')[0], service['availability_zone']['name'], 'enabled', ':-)', service['updated_at']) expected_out = print_format + '\n' + service_format + '\n' self.service_cmds.list(format_output='table') self.assertEqual(expected_out, fake_out.getvalue()) get_admin_context.assert_called_with() service_get_all.assert_called_with(ctxt) service_is_up.assert_called_with(service) @ddt.data('json', 'yaml') def test_service_commands_list_format(self, format_output): ctxt = context.RequestContext('fake-user', 'fake-project') format_method_name = f'list_{format_output}' mock_list_method = self.mock_object( self.service_cmds, format_method_name) get_admin_context = self.mock_object(context, 'get_admin_context') service_get_all = self.mock_object(db, 'service_get_all') service_is_up = self.mock_object(utils, 'service_is_up') get_admin_context.return_value = ctxt service = {'binary': 'manila-binary', 'host': 'fake-host.fake-domain', 'availability_zone': {'name': 'fake-zone'}, 'updated_at': '2014-06-30 11:22:33', 'disabled': False} services = [service] service_get_all.return_value = services service_is_up.return_value = True with mock.patch('sys.stdout', new=io.StringIO()): self.service_cmds.list(format_output=format_output) get_admin_context.assert_called_with() service_get_all.assert_called_with(ctxt) service_is_up.assert_called_with(service) service_format = { 'binary': service['binary'], 'host': service['host'].partition('.')[0], 'zone': service['availability_zone']['name'], 'status': 'enabled', 'state': ':-)', 'updated_at': service['updated_at'], } mock_list_method.assert_called_once_with( 'services', [service_format]) @ddt.data(True, False) def test_service_commands_cleanup(self, service_is_up): ctxt = context.RequestContext('fake-user', 'fake-project') self.mock_object(context, 'get_admin_context', mock.Mock(return_value=ctxt)) service = {'id': 17, 'binary': 'manila-binary', 'host': 'fake-host.fake-domain', 'availability_zone': {'name': 'fake-zone'}, 'updated_at': '2020-06-17 07:22:33', 'disabled': False} self.mock_object(db, 'service_get_all', mock.Mock(return_value=[service])) self.mock_object(db, 'service_destroy') self.mock_object(utils, 'service_is_up', mock.Mock(return_value=service_is_up)) with mock.patch('sys.stdout', new=io.StringIO()) as fake_out: if not service_is_up: expected_out = "Cleaned up service %s" % service['host'] else: expected_out = '' self.service_cmds.cleanup() self.assertEqual(expected_out, fake_out.getvalue().strip()) context.get_admin_context.assert_called_with() db.service_get_all.assert_called_with(ctxt) utils.service_is_up.assert_called_with(service) if not service_is_up: db.service_destroy.assert_called_with(ctxt, service['id']) else: self.assertFalse(db.service_destroy.called) def test_methods_of(self): obj = type('Fake', (object,), {name: lambda: 'fake_' for name in ('_a', 'b', 'c')}) expected = [('b', obj.b), ('c', obj.c)] self.assertEqual(expected, manila_manage.methods_of(obj)) @mock.patch('oslo_config.cfg.ConfigOpts.register_cli_opt') def test_main_argv_lt_2(self, register_cli_opt): script_name = 'manila-manage' sys.argv = [script_name] CONF(sys.argv[1:], project='manila', version=version.version_string()) exit = self.assertRaises(SystemExit, manila_manage.main) self.assertTrue(register_cli_opt.called) self.assertEqual(2, exit.code) @mock.patch('oslo_config.cfg.ConfigOpts.__call__') @mock.patch('oslo_log.log.register_options') @mock.patch('oslo_log.log.setup') @mock.patch('oslo_config.cfg.ConfigOpts.register_cli_opt') def test_main_sudo_failed(self, register_cli_opt, log_setup, register_log_opts, config_opts_call): script_name = 'manila-manage' sys.argv = [script_name, 'fake_category', 'fake_action'] config_opts_call.side_effect = cfg.ConfigFilesNotFoundError( mock.sentinel._namespace) exit = self.assertRaises(SystemExit, manila_manage.main) self.assertTrue(register_cli_opt.called) register_log_opts.assert_called_once_with(CONF) config_opts_call.assert_called_once_with( sys.argv[1:], project='manila', version=version.version_string()) self.assertFalse(log_setup.called) self.assertEqual(2, exit.code) @mock.patch('oslo_config.cfg.ConfigOpts.__call__') @mock.patch('oslo_config.cfg.ConfigOpts.register_cli_opt') @mock.patch('oslo_log.log.register_options') def test_main(self, register_log_opts, register_cli_opt, config_opts_call): script_name = 'manila-manage' sys.argv = [script_name, 'config', 'list'] action_fn = mock.MagicMock() CONF.category = mock.MagicMock(action_fn=action_fn) manila_manage.main() self.assertTrue(register_cli_opt.called) register_log_opts.assert_called_once_with(CONF) config_opts_call.assert_called_once_with( sys.argv[1:], project='manila', version=version.version_string()) self.assertTrue(action_fn.called) @ddt.data('bar', '-bar', '--bar') def test_get_arg_string(self, arg): parsed_arg = manila_manage.get_arg_string(arg) self.assertEqual('bar', parsed_arg) @ddt.data({'current_host': 'controller-0@fancystore01#pool100', 'new_host': 'controller-0@fancystore01'}, {'current_host': 'controller-0@fancystore01', 'new_host': 'controller-0'}) @ddt.unpack def test_share_update_host_fail_validation(self, current_host, new_host): self.mock_object(context, 'get_admin_context', mock.Mock(return_value='admin_ctxt')) self.mock_object(db, 'share_resources_host_update') self.assertRaises(SystemExit, self.share_cmds.update_host, current_host, new_host) self.assertFalse(db.share_resources_host_update.called) @ddt.data({'current_host': 'controller-0@fancystore01#pool100', 'new_host': 'controller-0@fancystore02#pool0'}, {'current_host': 'controller-0@fancystore01', 'new_host': 'controller-1@fancystore01'}, {'current_host': 'controller-0', 'new_host': 'controller-1'}, {'current_host': 'controller-0@fancystore01#pool100', 'new_host': 'controller-1@fancystore02', 'force': True}) @ddt.unpack def test_share_update_host(self, current_host, new_host, force=False): db_op = {'instances': 3, 'groups': 4, 'servers': 2} self.mock_object(context, 'get_admin_context', mock.Mock(return_value='admin_ctxt')) self.mock_object(db, 'share_resources_host_update', mock.Mock(return_value=db_op)) with mock.patch('sys.stdout', new=io.StringIO()) as intercepted_op: self.share_cmds.update_host(current_host, new_host, force) expected_op = ("Updated host of 3 share instances, 4 share groups and " "2 share servers on %(chost)s to %(nhost)s." % {'chost': current_host, 'nhost': new_host}) self.assertEqual(expected_op, intercepted_op.getvalue().strip()) db.share_resources_host_update.assert_called_once_with( 'admin_ctxt', current_host, new_host) def test_share_delete(self): share_id = "fake_share_id" share = { 'id': share_id, 'instances': [ {'id': 'instance_id1', 'replica_state': 'active'}, {'id': 'instance_id2', 'replica_state': 'error'}, {'id': 'instance_id3', 'replica_state': 'active'}, ] } self.mock_object(context, 'get_admin_context', mock.Mock(return_value='admin_ctxt')) self.mock_object(db, 'share_get', mock.Mock(return_value=share)) self.mock_object(db, 'share_instance_delete', mock.Mock(return_value=None)) self.share_cmds.delete(share_id) db.share_instance_delete.assert_has_calls([ mock.call('admin_ctxt', 'instance_id2'), mock.call('admin_ctxt', 'instance_id1'), mock.call('admin_ctxt', 'instance_id3'), ]) self.assertEqual(3, db.share_instance_delete.call_count) def test_share_server_update_capability(self): self.mock_object(context, 'get_admin_context', mock.Mock(return_value='admin_ctxt')) self.mock_object(db, 'share_servers_update') share_servers = 'server_id_a,server_id_b' share_server_list = [server.strip() for server in share_servers.split(",")] capabilities = "security_service_update_support" \ ",network_allocation_update_support" capabilities_list = capabilities.split(",") values_to_update = [ {capabilities_list[0]: True, capabilities_list[1]: True}] with mock.patch('sys.stdout', new=io.StringIO()) as output: self.server_cmds.update_share_server_capabilities( share_servers, capabilities, True) expected_op = ("The capability(ies) %(cap)s of the following share " "server(s) %(servers)s was(were) updated to " "%(value)s.") % { 'cap': capabilities_list, 'servers': share_server_list, 'value': True, } self.assertEqual(expected_op, output.getvalue().strip()) db.share_servers_update.assert_called_once_with( 'admin_ctxt', share_server_list, values_to_update[0]) def test_share_server_update_capability_not_supported(self): share_servers = 'server_id_a' capabilities = 'invalid_capability' exit = self.assertRaises( SystemExit, self.server_cmds.update_share_server_capabilities, share_servers, capabilities, True) self.assertEqual(1, exit.code) @mock.patch('builtins.print') def test_list_commands_json(self, mock_print): resource_name = 'service' service_format = [{ 'binary': 'manila-binary', 'host': 'fake-host', 'availability_zone': 'fakeaz', 'status': 'enabled', 'state': ':-)', 'updated_at': '13 04:57:49 PM -03 2023' }] mock_json_dumps = self.mock_object( jsonutils, 'dumps', mock.Mock(return_value=service_format[0])) services = {resource_name: service_format} self.list_commands.list_json('service', service_format) mock_json_dumps.assert_called_once_with( services, indent=4) mock_print.assert_called_once_with(service_format[0]) @mock.patch('builtins.print') def test_list_commands_yaml(self, mock_print): resource_name = 'service' service_format = [{ 'binary': 'manila-binary', 'host': 'fake-host', 'availability_zone': 'fakeaz', 'status': 'enabled', 'state': ':-)', 'updated_at': '13 04:57:49 PM -03 2023' }] mock_yaml_dump = self.mock_object( yaml, 'dump', mock.Mock(return_value=service_format[0])) services = {resource_name: service_format} self.list_commands.list_yaml('service', service_format) mock_yaml_dump.assert_called_once_with( services) mock_print.assert_called_once_with(service_format[0]) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/cmd/test_scheduler.py0000664000175000017500000000361200000000000021615 0ustar00zuulzuul00000000000000# Copyright 2015 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sys from manila.cmd import scheduler as manila_scheduler from manila import test from manila import version CONF = manila_scheduler.CONF class ManilaCmdSchedulerTestCase(test.TestCase): def test_main(self): sys.argv = ['manila-scheduler'] self.mock_object(manila_scheduler.log, 'setup') self.mock_object(manila_scheduler.log, 'register_options') self.mock_object(manila_scheduler.utils, 'monkey_patch') self.mock_object(manila_scheduler.service.Service, 'create') self.mock_object(manila_scheduler.service, 'serve') self.mock_object(manila_scheduler.service, 'wait') manila_scheduler.main() self.assertEqual('manila', CONF.project) self.assertEqual(version.version_string(), CONF.version) manila_scheduler.log.setup.assert_called_once_with(CONF, "manila") manila_scheduler.log.register_options.assert_called_once_with(CONF) manila_scheduler.utils.monkey_patch.assert_called_once_with() manila_scheduler.service.Service.create.assert_called_once_with( binary='manila-scheduler', coordination=True) manila_scheduler.service.wait.assert_called_once_with() manila_scheduler.service.serve.assert_called_once_with( manila_scheduler.service.Service.create.return_value) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/cmd/test_share.py0000664000175000017500000000515000000000000020740 0ustar00zuulzuul00000000000000# Copyright 2015 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sys from unittest import mock import ddt from manila.cmd import share as manila_share from manila import test CONF = manila_share.CONF @ddt.ddt class ManilaCmdShareTestCase(test.TestCase): @ddt.data(None, [], ['foo', ], ['foo', 'bar', ]) def test_main(self, backends): self.mock_object(manila_share.log, 'setup') self.mock_object(manila_share.log, 'register_options') self.mock_object(manila_share.utils, 'monkey_patch') self.mock_object(manila_share.service, 'process_launcher') self.mock_object(manila_share.service.Service, 'create') self.launcher = manila_share.service.process_launcher.return_value self.mock_object(self.launcher, 'launch_service') self.mock_object(self.launcher, 'wait') self.server = manila_share.service.Service.create.return_value fake_host = 'fake.host' CONF.set_override('enabled_share_backends', backends) CONF.set_override('host', fake_host) sys.argv = ['manila-share'] manila_share.main() manila_share.log.setup.assert_called_once_with(CONF, "manila") manila_share.log.register_options.assert_called_once_with(CONF) manila_share.utils.monkey_patch.assert_called_once_with() manila_share.service.process_launcher.assert_called_once_with() self.launcher.wait.assert_called_once_with() if backends: manila_share.service.Service.create.assert_has_calls([ mock.call( host=fake_host + '@' + backend, service_name=backend, binary='manila-share', coordination=True, ) for backend in backends ]) self.launcher.launch_service.assert_has_calls([ mock.call(self.server) for backend in backends]) else: manila_share.service.Service.create.assert_called_once_with( binary='manila-share') self.launcher.launch_service.assert_called_once_with(self.server) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/cmd/test_status.py0000664000175000017500000000144700000000000021166 0ustar00zuulzuul00000000000000# Copyright (c) 2018 NEC, Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from manila.cmd import status from manila import test class TestUpgradeChecks(test.TestCase): def setUp(self): super(TestUpgradeChecks, self).setUp() self.cmd = status.Checks() ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315601.969671 manila-21.0.0/manila/tests/common/0000775000175000017500000000000000000000000016751 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/common/__init__.py0000664000175000017500000000000000000000000021050 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/common/test_client_auth.py0000664000175000017500000001000200000000000022652 0ustar00zuulzuul00000000000000# Copyright 2016 SAP SE # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from keystoneauth1 import loading as auth from oslo_config import cfg from manila.common import client_auth from manila import exception from manila import test from manila.tests import fake_client_exception_class class ClientAuthTestCase(test.TestCase): def setUp(self): super(ClientAuthTestCase, self).setUp() self.context = mock.Mock() self.fake_client = mock.Mock() self.exception_mod = fake_client_exception_class self.auth = client_auth.AuthClientLoader(self.fake_client, 'foo_group') def test_get_client_admin_true(self): mock_load_session = self.mock_object(auth, 'load_session_from_conf_options') self.auth.get_client(self.context, admin=True) mock_load_session.assert_called_once_with(client_auth.CONF, 'foo_group') self.fake_client.assert_called_once_with( session=mock_load_session(), auth=auth.load_auth_from_conf_options( client_auth.CONF, 'foo_group')) def test_get_client_admin_false(self): self.mock_object(auth, 'load_session_from_conf_options') self.assertRaises(exception.ManilaException, self.auth.get_client, self.context, admin=False) def test_load_auth_plugin_caching(self): self.auth.admin_auth = 'admin obj' result = self.auth._load_auth_plugin() self.assertEqual(self.auth.admin_auth, result) def test_load_auth_plugin_no_auth(self): auth.load_auth_from_conf_options.return_value = None self.assertRaises(exception.BadConfigurationException, self.auth._load_auth_plugin) @mock.patch.object(auth, 'get_session_conf_options') @mock.patch.object(auth, 'get_auth_common_conf_options') @mock.patch.object(auth, 'get_auth_plugin_conf_options') def test_list_opts(self, auth_conf, common_conf, session_conf): session_conf.return_value = [cfg.StrOpt('username'), cfg.StrOpt('password')] common_conf.return_value = ([cfg.StrOpt('auth_url')]) auth_conf.return_value = [cfg.StrOpt('password')] result = client_auth.AuthClientLoader.list_opts("foo_group") self.assertEqual('foo_group', result[0][0]) for entry in result[0][1]: self.assertIn(entry.name, ['username', 'auth_url', 'password']) common_conf.assert_called_once_with() auth_conf.assert_called_once_with('password') @mock.patch.object(auth, 'get_session_conf_options') @mock.patch.object(auth, 'get_auth_common_conf_options') @mock.patch.object(auth, 'get_auth_plugin_conf_options') def test_list_opts_not_found(self, auth_conf, common_conf, session_conf): session_conf.return_value = [cfg.StrOpt('username'), cfg.StrOpt('password')] common_conf.return_value = ([cfg.StrOpt('auth_url')]) auth_conf.return_value = [cfg.StrOpt('tenant')] result = client_auth.AuthClientLoader.list_opts("foo_group") self.assertEqual('foo_group', result[0][0]) for entry in result[0][1]: self.assertIn(entry.name, ['username', 'auth_url', 'password', 'tenant']) common_conf.assert_called_once_with() auth_conf.assert_called_once_with('password') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/common/test_config.py0000664000175000017500000000325000000000000021627 0ustar00zuulzuul00000000000000# Copyright 2015 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ddt from manila.common import config from manila.common import constants from manila import exception from manila import test from manila.tests import utils as test_utils VALID_CASES = [proto.lower() for proto in constants.SUPPORTED_SHARE_PROTOCOLS] VALID_CASES.extend([proto.upper() for proto in VALID_CASES]) VALID_CASES.append(','.join(case for case in VALID_CASES)) @ddt.ddt class VerifyConfigShareProtocolsTestCase(test.TestCase): @ddt.data(*VALID_CASES) def test_verify_share_protocols_valid_cases(self, proto): data = dict(DEFAULT=dict(enabled_share_protocols=proto)) with test_utils.create_temp_config_with_opts(data): config.verify_share_protocols() @ddt.data(None, '', 'fake', [], ['fake'], [VALID_CASES[0] + 'fake']) def test_verify_share_protocols_invalid_cases(self, proto): data = dict(DEFAULT=dict(enabled_share_protocols=proto)) with test_utils.create_temp_config_with_opts(data): self.assertRaises( exception.ManilaException, config.verify_share_protocols) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315601.969671 manila-21.0.0/manila/tests/compute/0000775000175000017500000000000000000000000017135 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/compute/__init__.py0000664000175000017500000000000000000000000021234 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/compute/test_nova.py0000664000175000017500000002670200000000000021520 0ustar00zuulzuul00000000000000# Copyright 2014 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import ddt from novaclient import exceptions as nova_exception from novaclient import utils from novaclient.v2 import servers as nova_servers from manila.compute import nova from manila import context from manila import exception from manila import test from manila.tests import utils as test_utils class Volume(object): def __init__(self, volume_id): self.id = volume_id self.volumeId = volume_id class Network(object): def __init__(self, net_id): self.id = net_id self.label = 'fake_label_%s' % net_id class FakeNovaClient(object): class Servers(object): def get(self, instance_id): return {'id': instance_id} def list(self, *args, **kwargs): return [{'id': 'id1'}, {'id': 'id2'}] def create(self, *args, **kwargs): return {'id': 'created_id'} def __getattr__(self, item): return None class Volumes(object): def get(self, volume_id): return Volume(volume_id) def list(self, detailed, *args, **kwargs): return [{'id': 'id1'}, {'id': 'id2'}] def create(self, *args, **kwargs): return {'id': 'created_id'} def __getattr__(self, item): return None class Networks(object): def get(self, net_id): return Network(net_id) def __init__(self): self.servers = self.Servers() self.volumes = self.Volumes() self.keypairs = self.servers self.networks = self.Networks() @nova.translate_server_exception def decorated_by_translate_server_exception(self, context, instance_id, exc): if exc: raise exc(instance_id) else: return 'OK' @ddt.ddt class TranslateServerExceptionTestCase(test.TestCase): def test_translate_server_exception(self): result = decorated_by_translate_server_exception( 'foo_self', 'foo_ctxt', 'foo_instance_id', None) self.assertEqual('OK', result) def test_translate_server_exception_not_found(self): self.assertRaises( exception.InstanceNotFound, decorated_by_translate_server_exception, 'foo_self', 'foo_ctxt', 'foo_instance_id', nova_exception.NotFound) def test_translate_server_exception_bad_request(self): self.assertRaises( exception.InvalidInput, decorated_by_translate_server_exception, 'foo_self', 'foo_ctxt', 'foo_instance_id', nova_exception.BadRequest) @ddt.data( nova_exception.HTTPNotImplemented, nova_exception.RetryAfterException, nova_exception.Unauthorized, nova_exception.Forbidden, nova_exception.MethodNotAllowed, nova_exception.OverLimit, nova_exception.RateLimit, ) def test_translate_server_exception_other_exception(self, exc): self.assertRaises( exception.ManilaException, decorated_by_translate_server_exception, 'foo_self', 'foo_ctxt', 'foo_instance_id', exc) def get_fake_auth_obj(): return type('FakeAuthObj', (object, ), {'get_client': mock.Mock()}) class NovaclientTestCase(test.TestCase): @mock.patch('manila.compute.nova.AUTH_OBJ', None) def test_no_auth_obj(self): mock_client_loader = self.mock_object( nova.client_auth, 'AuthClientLoader') fake_context = 'fake_context' data = { 'nova': { 'api_microversion': 'foo_api_microversion', 'endpoint_type': 'internalURL', 'region_name': 'foo_region_name', } } with test_utils.create_temp_config_with_opts(data): nova.novaclient(fake_context) mock_client_loader.assert_called_once_with( client_class=nova.nova_client.Client, cfg_group=nova.NOVA_GROUP ) mock_client_loader.return_value.get_client.assert_called_once_with( fake_context, version=data['nova']['api_microversion'], endpoint_type=data['nova']['endpoint_type'], region_name=data['nova']['region_name'], ) @mock.patch('manila.compute.nova.AUTH_OBJ', get_fake_auth_obj()) def test_with_auth_obj(self): fake_context = 'fake_context' data = { 'nova': { 'api_microversion': 'foo_api_microversion', 'endpoint_type': 'internalURL', 'region_name': 'foo_region_name', } } with test_utils.create_temp_config_with_opts(data): nova.novaclient(fake_context) nova.AUTH_OBJ.get_client.assert_called_once_with( fake_context, version=data['nova']['api_microversion'], endpoint_type=data['nova']['endpoint_type'], region_name=data['nova']['region_name'], ) @ddt.ddt class NovaApiTestCase(test.TestCase): def setUp(self): super(NovaApiTestCase, self).setUp() self.api = nova.API() self.novaclient = FakeNovaClient() self.ctx = context.get_admin_context() self.mock_object(nova, 'novaclient', mock.Mock(return_value=self.novaclient)) self.mock_object(nova, '_untranslate_server_summary_view', lambda server: server) def test_server_create(self): result = self.api.server_create(self.ctx, 'server_name', 'fake_image', 'fake_flavor', None, None, None) self.assertEqual('created_id', result['id']) def test_server_delete(self): self.mock_object(self.novaclient.servers, 'delete') self.api.server_delete(self.ctx, 'id1') self.novaclient.servers.delete.assert_called_once_with('id1') def test_server_get(self): instance_id = 'instance_id1' result = self.api.server_get(self.ctx, instance_id) self.assertEqual(instance_id, result['id']) def test_server_get_by_name_or_id(self): instance_id = 'instance_id1' server = {'id': instance_id, 'fake_key': 'fake_value'} self.mock_object(utils, 'find_resource', mock.Mock(return_value=server)) result = self.api.server_get_by_name_or_id(self.ctx, instance_id) self.assertEqual(instance_id, result['id']) utils.find_resource.assert_called_once_with(mock.ANY, instance_id) def test_server_get_by_name_or_id_failed(self): instance_id = 'instance_id1' server = {'id': instance_id, 'fake_key': 'fake_value'} self.mock_object(utils, 'find_resource', mock.Mock(return_value=server, side_effect=nova_exception.CommandError)) self.assertRaises(exception.ManilaException, self.api.server_get_by_name_or_id, self.ctx, instance_id) utils.find_resource.assert_any_call(mock.ANY, instance_id) utils.find_resource.assert_called_with(mock.ANY, instance_id, all_tenants=True) @ddt.data( {'nova_e': nova_exception.NotFound(404), 'manila_e': exception.InstanceNotFound}, {'nova_e': nova_exception.BadRequest(400), 'manila_e': exception.InvalidInput}, ) @ddt.unpack def test_server_get_failed(self, nova_e, manila_e): nova.novaclient.side_effect = nova_e instance_id = 'instance_id' self.assertRaises(manila_e, self.api.server_get, self.ctx, instance_id) def test_server_reboot_hard(self): self.mock_object(self.novaclient.servers, 'reboot') self.api.server_reboot(self.ctx, 'id1') self.novaclient.servers.reboot.assert_called_once_with( 'id1', nova_servers.REBOOT_HARD) def test_server_reboot_soft(self): self.mock_object(self.novaclient.servers, 'reboot') self.api.server_reboot(self.ctx, 'id1', True) self.novaclient.servers.reboot.assert_called_once_with( 'id1', nova_servers.REBOOT_SOFT) def test_instance_volume_attach(self): self.mock_object(self.novaclient.volumes, 'create_server_volume') self.api.instance_volume_attach(self.ctx, 'instance_id', 'vol_id', 'device') (self.novaclient.volumes.create_server_volume. assert_called_once_with('instance_id', 'vol_id', 'device')) def test_instance_volume_detach(self): self.mock_object(self.novaclient.volumes, 'delete_server_volume') self.api.instance_volume_detach(self.ctx, 'instance_id', 'att_id') (self.novaclient.volumes.delete_server_volume. assert_called_once_with('instance_id', 'att_id')) def test_instance_volumes_list(self): self.mock_object( self.novaclient.volumes, 'get_server_volumes', mock.Mock(return_value=[Volume('id1'), Volume('id2')])) result = self.api.instance_volumes_list(self.ctx, 'instance_id') self.assertEqual(['id1', 'id2'], result) def test_server_update(self): self.mock_object(self.novaclient.servers, 'update') self.api.server_update(self.ctx, 'id1', 'new_name') self.novaclient.servers.update.assert_called_once_with('id1', name='new_name') def test_keypair_import(self): self.mock_object(self.novaclient.keypairs, 'create') self.api.keypair_import(self.ctx, 'keypair_name', 'fake_pub_key') (self.novaclient.keypairs.create. assert_called_once_with('keypair_name', 'fake_pub_key')) def test_keypair_delete(self): self.mock_object(self.novaclient.keypairs, 'delete') self.api.keypair_delete(self.ctx, 'fake_keypair_id') (self.novaclient.keypairs.delete. assert_called_once_with('fake_keypair_id')) def test_keypair_list(self): self.assertEqual([{'id': 'id1'}, {'id': 'id2'}], self.api.keypair_list(self.ctx)) class ToDictTestCase(test.TestCase): def test_dict_provided(self): fake_dict = {'foo_key': 'foo_value', 'bar_key': 'bar_value'} result = nova._to_dict(fake_dict) self.assertEqual(fake_dict, result) def test_obj_provided_with_to_dict_method(self): expected = {'foo': 'bar'} class FakeObj(object): def __init__(self): self.fake_attr = 'fake_attr_value' def to_dict(self): return expected fake_obj = FakeObj() result = nova._to_dict(fake_obj) self.assertEqual(expected, result) def test_obj_provided_without_to_dict_method(self): expected = {'foo': 'bar'} class FakeObj(object): def __init__(self): self.foo = expected['foo'] fake_obj = FakeObj() result = nova._to_dict(fake_obj) self.assertEqual(expected, result) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/conf_fixture.py0000664000175000017500000001011100000000000020520 0ustar00zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from oslo_policy import opts from oslo_service import wsgi # some of these are imported for their side-effects from manila.api import openstack # noqa from manila.common import config CONF = config.CONF def set_defaults(conf): _safe_set_of_opts(conf, 'verbose', True) _safe_set_of_opts(conf, 'state_path', os.path.abspath( os.path.join(os.path.dirname(__file__), '..', '..'))) _safe_set_of_opts(conf, 'connection', "sqlite://", group='database') _safe_set_of_opts(conf, 'sqlite_synchronous', False) _POLICY_PATH = os.path.abspath(os.path.join(CONF.state_path, 'manila/tests/policy.yaml')) opts.set_defaults(conf, policy_file=_POLICY_PATH) _safe_set_of_opts(conf, 'share_export_ip', '0.0.0.0') _safe_set_of_opts(conf, 'service_instance_user', 'fake_user') _API_PASTE_PATH = os.path.abspath(os.path.join(CONF.state_path, 'etc/manila/api-paste.ini')) wsgi.register_opts(conf) _safe_set_of_opts(conf, 'api_paste_config', _API_PASTE_PATH) # we use "fake" and "openstack" as project ID in a number of tests _safe_set_of_opts(conf, 'project_id_regex', r"[0-9a-fopnstk\-]+") _safe_set_of_opts(conf, 'share_driver', 'manila.tests.fake_driver.FakeShareDriver') _safe_set_of_opts(conf, 'auth_strategy', 'noauth') _safe_set_of_opts(conf, 'zfs_share_export_ip', '1.1.1.1') _safe_set_of_opts(conf, 'zfs_service_ip', '2.2.2.2') _safe_set_of_opts(conf, 'zfs_zpool_list', ['foo', 'bar']) _safe_set_of_opts(conf, 'zfs_share_helpers', 'NFS=foo.bar.Helper') _safe_set_of_opts(conf, 'zfs_replica_snapshot_prefix', 'foo_prefix_') _safe_set_of_opts(conf, 'hitachi_hsp_host', '172.24.47.190') _safe_set_of_opts(conf, 'hitachi_hsp_username', 'hsp_user') _safe_set_of_opts(conf, 'hitachi_hsp_password', 'hsp_password') _safe_set_of_opts(conf, 'as13000_nas_ip', '1.1.1.1') _safe_set_of_opts(conf, 'as13000_nas_login', 'admin') _safe_set_of_opts(conf, 'as13000_nas_password', 'password') _safe_set_of_opts(conf, 'as13000_share_pools', 'pool0') _safe_set_of_opts(conf, 'instorage_nas_ip', '1.1.1.1') _safe_set_of_opts(conf, 'instorage_nas_login', 'admin') _safe_set_of_opts(conf, 'instorage_nas_password', 'password') _safe_set_of_opts(conf, 'instorage_nas_pools', 'pool0') _safe_set_of_opts(conf, 'infortrend_nas_ip', '172.27.1.1') _safe_set_of_opts(conf, 'infortrend_share_pools', 'share-pool-01') _safe_set_of_opts(conf, 'infortrend_share_channels', '0,1') _safe_set_of_opts(conf, 'macrosan_nas_ip', '1.1.1.1') _safe_set_of_opts(conf, 'macrosan_share_pools', 'pool0') _safe_set_of_opts(conf, 'qnap_management_url', 'http://1.2.3.4:8080') _safe_set_of_opts(conf, 'qnap_share_ip', '1.2.3.4') _safe_set_of_opts(conf, 'qnap_nas_login', 'admin') _safe_set_of_opts(conf, 'qnap_nas_password', 'qnapadmin') _safe_set_of_opts(conf, 'qnap_poolname', 'Storage Pool 1') _safe_set_of_opts(conf, 'unity_server_meta_pool', 'nas_server_pool') conf.set_default('response_validation', 'error', group='api') def _safe_set_of_opts(conf, *args, **kwargs): try: conf.set_default(*args, **kwargs) except config.cfg.NoSuchOptError: # Assumed that opt is not imported and not used pass ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315601.969671 manila-21.0.0/manila/tests/data/0000775000175000017500000000000000000000000016372 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/data/__init__.py0000664000175000017500000000000000000000000020471 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/data/test_helper.py0000664000175000017500000003314500000000000021270 0ustar00zuulzuul00000000000000# Copyright 2015 Hitachi Data Systems inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from unittest import mock import ddt from manila.common import constants from manila import context from manila.data import helper as data_copy_helper from manila import db from manila import exception from manila.share import rpcapi as share_rpc from manila import test from manila.tests import db_utils from manila import utils @ddt.ddt class DataServiceHelperTestCase(test.TestCase): """Tests DataServiceHelper.""" def setUp(self): super(DataServiceHelperTestCase, self).setUp() self.share = db_utils.create_share() self.share_instance = db_utils.create_share_instance( share_id=self.share['id'], status=constants.STATUS_AVAILABLE) self.context = context.get_admin_context() self.share_instance = db.share_instance_get( self.context, self.share_instance['id'], with_share_data=True) self.access = db_utils.create_access(share_id=self.share['id']) self.helper = data_copy_helper.DataServiceHelper( self.context, db, self.share) @ddt.data(True, False) def test_allow_access_to_data_service(self, allow_dest_instance): access = db_utils.create_access(share_id=self.share['id']) info_src = { 'access_mapping': { 'ip': ['nfs'], 'user': ['cifs', 'nfs'], } } info_dest = { 'access_mapping': { 'ip': ['nfs', 'cifs'], 'user': ['cifs'], } } if allow_dest_instance: mapping = {'ip': ['nfs'], 'user': ['cifs']} else: mapping = info_src['access_mapping'] fake_access = { 'access_to': 'fake_ip', 'access_level': constants.ACCESS_LEVEL_RW, 'access_type': 'ip', } access_values = fake_access access_values['share_id'] = self.share['id'] self.mock_object( self.helper, '_get_access_entries_according_to_mapping', mock.Mock(return_value=[fake_access])) self.mock_object( self.helper.db, 'share_access_get_all_by_type_and_access', mock.Mock(return_value=[access])) change_data_access_call = self.mock_object( self.helper, '_change_data_access_to_instance') self.mock_object(self.helper.db, 'share_instance_access_create', mock.Mock(return_value=access)) if allow_dest_instance: result = self.helper.allow_access_to_data_service( self.share_instance, info_src, self.share_instance, info_dest) else: result = self.helper.allow_access_to_data_service( self.share_instance, info_src) self.assertEqual([access], result) (self.helper._get_access_entries_according_to_mapping. assert_called_once_with(mapping)) (self.helper.db.share_access_get_all_by_type_and_access. assert_called_once_with( self.context, self.share['id'], fake_access['access_type'], fake_access['access_to'])) access_create_calls = [ mock.call(self.context, access_values, self.share_instance['id']) ] if allow_dest_instance: access_create_calls.append(mock.call( self.context, access_values, self.share_instance['id'])) self.helper.db.share_instance_access_create.assert_has_calls( access_create_calls) change_access_calls = [ mock.call(self.share_instance, [access], deny=True), mock.call(self.share_instance), ] if allow_dest_instance: change_access_calls.append( mock.call(self.share_instance)) self.assertEqual(len(change_access_calls), change_data_access_call.call_count) change_data_access_call.assert_has_calls(change_access_calls) @ddt.data({'ip': []}, {'cert': []}, {'user': []}, {'cephx': []}, {'x': []}) def test__get_access_entries_according_to_mapping(self, mapping): data_copy_helper.CONF.data_node_access_cert = 'fake' data_copy_helper.CONF.data_node_access_ips = 'fake' data_copy_helper.CONF.data_node_access_admin_user = 'fake' expected = [{ 'access_type': list(mapping.keys())[0], 'access_level': constants.ACCESS_LEVEL_RW, 'access_to': 'fake', }] exists = [x for x in mapping if x in ('ip', 'user', 'cert')] if exists: result = self.helper._get_access_entries_according_to_mapping( mapping) self.assertEqual(expected, result) else: self.assertRaises( exception.ShareDataCopyFailed, self.helper._get_access_entries_according_to_mapping, mapping) def test__get_access_entries_according_to_mapping_exception_not_set(self): data_copy_helper.CONF.data_node_access_ips = None self.assertRaises( exception.ShareDataCopyFailed, self.helper._get_access_entries_according_to_mapping, {'ip': []}) def test__get_access_entries_according_to_mapping_ip_list(self): ips = ['fake1', 'fake2'] data_copy_helper.CONF.data_node_access_ips = ips expected = [{ 'access_type': 'ip', 'access_level': constants.ACCESS_LEVEL_RW, 'access_to': x, } for x in ips] result = self.helper._get_access_entries_according_to_mapping( {'ip': []}) self.assertEqual(expected, result) def test_deny_access_to_data_service(self): # mocks self.mock_object(self.helper, '_change_data_access_to_instance') # run self.helper.deny_access_to_data_service( [self.access], self.share_instance['id']) # asserts self.helper._change_data_access_to_instance.assert_called_once_with( self.share_instance['id'], [self.access], deny=True) @ddt.data(None, Exception('fake')) def test_cleanup_data_access(self, exc): # mocks self.mock_object(self.helper, 'deny_access_to_data_service', mock.Mock(side_effect=exc)) self.mock_object(data_copy_helper.LOG, 'warning') # run self.helper.cleanup_data_access([self.access], self.share_instance['id']) # asserts self.helper.deny_access_to_data_service.assert_called_once_with( [self.access], self.share_instance['id']) if exc: self.assertTrue(data_copy_helper.LOG.warning.called) @ddt.data(False, True) def test_cleanup_temp_folder(self, exc): fake_path = ''.join(('/fake_path/', self.share_instance['id'])) # mocks self.mock_object(os.path, 'exists', mock.Mock(side_effect=[True, True, exc])) self.mock_object(os, 'rmdir') self.mock_object(data_copy_helper.LOG, 'warning') # run self.helper.cleanup_temp_folder( '/fake_path/', self.share_instance['id']) # asserts os.rmdir.assert_called_once_with(fake_path) os.path.exists.assert_has_calls([ mock.call(fake_path), mock.call(fake_path), mock.call(fake_path) ]) if exc: self.assertTrue(data_copy_helper.LOG.warning.called) @ddt.data(None, Exception('fake')) def test_cleanup_unmount_temp_folder(self, exc): # mocks self.mock_object(self.helper, 'unmount_share_instance_or_backup', mock.Mock(side_effect=exc)) self.mock_object(data_copy_helper.LOG, 'warning') unmount_info = { 'unmount': 'unmount_template', 'share_instance_id': self.share_instance['id'] } # run self.helper.cleanup_unmount_temp_folder(unmount_info, 'fake_path') # asserts self.helper.unmount_share_instance_or_backup.assert_called_once_with( unmount_info, 'fake_path') if exc: self.assertTrue(data_copy_helper.LOG.warning.called) @ddt.data(True, False) def test__change_data_access_to_instance(self, deny): access_rule = db_utils.create_access(share_id=self.share['id']) access_rule = db.share_instance_access_get( self.context, access_rule['id'], self.share_instance['id']) # mocks self.mock_object(share_rpc.ShareAPI, 'update_access') self.mock_object(utils, 'wait_for_access_update') mock_access_rules_status_update = self.mock_object( self.helper.access_helper, 'get_and_update_share_instance_access_rules_status') mock_rules_update = self.mock_object( self.helper.access_helper, 'get_and_update_share_instance_access_rules') # run self.helper._change_data_access_to_instance( self.share_instance, access_rule, deny=deny) # asserts if deny: mock_rules_update.assert_called_once_with( self.context, share_instance_id=self.share_instance['id'], filters={'access_id': [access_rule['id']]}, updates={'state': constants.ACCESS_STATE_QUEUED_TO_DENY}) else: self.assertFalse(mock_rules_update.called) share_rpc.ShareAPI.update_access.assert_called_once_with( self.context, self.share_instance) mock_access_rules_status_update.assert_called_once_with( self.context, status=constants.SHARE_INSTANCE_RULES_SYNCING, share_instance_id=self.share_instance['id']) utils.wait_for_access_update.assert_called_once_with( self.context, self.helper.db, self.share_instance, data_copy_helper.CONF.data_access_wait_access_rules_timeout) @ddt.data('migration', 'backup', 'restore') def test_mount_share_instance_or_backup(self, op): # mocks self.mock_object(utils, 'execute') exists_calls = [False, True] if op == 'backup': exists_calls.extend([False, True]) if op == 'restore': exists_calls.append([True]) self.mock_object(os.path, 'exists', mock.Mock(side_effect=exists_calls)) self.mock_object(os, 'makedirs') mount_info = {'mount': 'mount %(path)s'} if op in ('backup', 'restore'): fake_path = '/fake_backup_path/' mount_info.update( {'backup_id': 'fake_backup_id', 'mount_point': '/fake_backup_path/', op: True}) if op == 'migration': share_instance_id = self.share_instance['id'] fake_path = ''.join(('/fake_path/', share_instance_id)) mount_info.update({'share_instance_id': share_instance_id}) # run self.helper.mount_share_instance_or_backup(mount_info, '/fake_path') # asserts utils.execute.assert_called_once_with('mount', fake_path, run_as_root=True) if op == 'migration': os.makedirs.assert_called_once_with(fake_path) os.path.exists.assert_has_calls([ mock.call(fake_path), mock.call(fake_path), ]) if op == 'backup': os.makedirs.assert_has_calls([ mock.call(fake_path), mock.call(fake_path + 'fake_backup_id') ]) os.path.exists.assert_has_calls([ mock.call(fake_path), mock.call(fake_path), mock.call(fake_path + 'fake_backup_id'), mock.call(fake_path + 'fake_backup_id'), ]) if op == 'restore': os.makedirs.assert_called_once_with(fake_path) os.path.exists.assert_has_calls([ mock.call(fake_path), mock.call(fake_path), mock.call(fake_path + 'fake_backup_id'), ]) @ddt.data([True, True], [True, False], [True, Exception('fake')]) def test_unmount_share_instance_or_backup(self, side_effect): fake_path = ''.join(('/fake_path/', self.share_instance['id'])) # mocks self.mock_object(utils, 'execute') self.mock_object(os.path, 'exists', mock.Mock( side_effect=side_effect)) self.mock_object(os, 'rmdir') self.mock_object(data_copy_helper.LOG, 'warning') unmount_info = { 'unmount': 'unmount %(path)s', 'share_instance_id': self.share_instance['id'] } # run self.helper.unmount_share_instance_or_backup( unmount_info, '/fake_path') # asserts utils.execute.assert_called_once_with('unmount', fake_path, run_as_root=True) os.rmdir.assert_called_once_with(fake_path) os.path.exists.assert_has_calls([ mock.call(fake_path), mock.call(fake_path), ]) if any(isinstance(x, Exception) for x in side_effect): self.assertTrue(data_copy_helper.LOG.warning.called) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/data/test_manager.py0000664000175000017500000011401400000000000021416 0ustar00zuulzuul00000000000000# Copyright 2015, Hitachi Data Systems. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests For Data Manager """ from unittest import mock import ddt from oslo_config import cfg from manila.common import constants from manila import context from manila.data import helper from manila.data import manager from manila.data import utils as data_utils from manila import db from manila import exception from manila import quota from manila.share import rpcapi as share_rpc from manila import test from manila.tests import db_utils from manila import utils CONF = cfg.CONF @ddt.ddt class DataManagerTestCase(test.TestCase): """Test case for data manager.""" def setUp(self): super(DataManagerTestCase, self).setUp() manager.CONF.set_default('mount_tmp_location', '/tmp/') manager.CONF.set_default('backup_mount_tmp_location', '/tmp/') manager.CONF.set_default( 'backup_driver', 'manila.tests.fake_backup_driver.FakeBackupDriver') self.manager = manager.DataManager() self.context = context.get_admin_context() self.topic = 'fake_topic' self.share = db_utils.create_share() def test_init(self): manager = self.manager self.assertIsNotNone(manager) @ddt.data(constants.TASK_STATE_DATA_COPYING_COMPLETING, constants.TASK_STATE_DATA_COPYING_STARTING, constants.TASK_STATE_DATA_COPYING_IN_PROGRESS) def test_init_host(self, status): share = db_utils.create_share( task_state=status) # mocks self.mock_object(db, 'share_get_all', mock.Mock( return_value=[share])) self.mock_object(db, 'share_update') # run self.manager.init_host() # asserts db.share_get_all.assert_called_once_with( utils.IsAMatcher(context.RequestContext)) db.share_update.assert_called_with( utils.IsAMatcher(context.RequestContext), share['id'], {'task_state': constants.TASK_STATE_DATA_COPYING_ERROR}) @ddt.data(None, Exception('fake'), exception.ShareDataCopyCancelled()) def test_migration_start(self, exc): migration_info_src = { 'mount': 'mount_cmd_src', 'unmount': 'unmount_cmd_src', } migration_info_dest = { 'mount': 'mount_cmd_dest', 'unmount': 'unmount_cmd_dest', } # mocks self.mock_object(db, 'share_get', mock.Mock(return_value=self.share)) self.mock_object(db, 'share_instance_get', mock.Mock( return_value=self.share.instance)) self.mock_object(data_utils, 'Copy', mock.Mock(return_value='fake_copy')) if exc is None: self.manager.busy_tasks_shares[self.share['id']] = 'fake_copy' self.mock_object(self.manager, '_copy_share_data', mock.Mock(side_effect=exc)) self.mock_object(share_rpc.ShareAPI, 'migration_complete') if exc is not None and not isinstance( exc, exception.ShareDataCopyCancelled): self.mock_object(db, 'share_update') # run if exc is None or isinstance(exc, exception.ShareDataCopyCancelled): self.manager.migration_start( self.context, [], self.share['id'], 'ins1_id', 'ins2_id', migration_info_src, migration_info_dest) else: self.assertRaises( exception.ShareDataCopyFailed, self.manager.migration_start, self.context, [], self.share['id'], 'ins1_id', 'ins2_id', migration_info_src, migration_info_dest) db.share_update.assert_called_once_with( self.context, self.share['id'], {'task_state': constants.TASK_STATE_DATA_COPYING_ERROR}) # asserts self.assertFalse(self.manager.busy_tasks_shares.get(self.share['id'])) if exc: share_rpc.ShareAPI.migration_complete.assert_called_once_with( self.context, self.share.instance, 'ins2_id') @ddt.data( {'cancelled': False, 'exc': None, 'case': 'migration'}, {'cancelled': False, 'exc': Exception('fake'), 'case': 'migration'}, {'cancelled': True, 'exc': None, 'case': 'migration'}, {'cancelled': False, 'exc': None, 'case': 'backup'}, {'cancelled': False, 'exc': Exception('fake'), 'case': 'backup'}, {'cancelled': True, 'exc': None, 'case': 'backup'}, {'cancelled': False, 'exc': None, 'case': 'restore'}, {'cancelled': False, 'exc': Exception('fake'), 'case': 'restore'}, {'cancelled': True, 'exc': None, 'case': 'restore'}, ) @ddt.unpack def test__copy_share_data(self, cancelled, exc, case): access = db_utils.create_access(share_id=self.share['id']) if case == 'migration': connection_info_src = { 'mount': 'mount_cmd_src', 'unmount': 'unmount_cmd_src', 'share_id': self.share['id'], 'share_instance_id': 'ins1_id', 'mount_point': '/tmp/ins1_id', } connection_info_dest = { 'mount': 'mount_cmd_dest', 'unmount': 'unmount_cmd_dest', 'share_id': None, 'share_instance_id': 'ins2_id', 'mount_point': '/tmp/ins2_id', } if case == 'backup': connection_info_src = { 'mount': 'mount_cmd_src', 'unmount': 'unmount_cmd_src', 'share_id': self.share['id'], 'share_instance_id': 'ins1_id', 'mount_point': '/tmp/ins1_id', } connection_info_dest = { 'mount': 'mount_cmd_dest', 'unmount': 'unmount_cmd_dest', 'share_id': None, 'share_instance_id': None, 'mount_point': '/tmp/backup_id', 'backup': True } if case == 'restore': connection_info_src = { 'mount': 'mount_cmd_src', 'unmount': 'unmount_cmd_src', 'share_id': None, 'share_instance_id': None, 'mount_point': '/tmp/backup_id', 'restore': True } connection_info_dest = { 'mount': 'mount_cmd_dest', 'unmount': 'unmount_cmd_dest', 'share_id': self.share['id'], 'share_instance_id': 'ins2_id', 'mount_point': '/tmp/ins2_id', } get_progress = {'total_progress': 100} # mocks fake_copy = mock.MagicMock(cancelled=cancelled) self.mock_object(db, 'share_update') self.mock_object(db, 'share_instance_get', mock.Mock(side_effect=[self.share['instance'], self.share['instance']])) self.mock_object(helper.DataServiceHelper, 'allow_access_to_data_service', mock.Mock(return_value=[access])) self.mock_object(helper.DataServiceHelper, 'mount_share_instance_or_backup') self.mock_object(fake_copy, 'run', mock.Mock(side_effect=exc)) self.mock_object(fake_copy, 'get_progress', mock.Mock(return_value=get_progress)) self.mock_object(helper.DataServiceHelper, 'unmount_share_instance_or_backup', mock.Mock(side_effect=Exception('fake'))) self.mock_object(helper.DataServiceHelper, 'deny_access_to_data_service', mock.Mock(side_effect=Exception('fake'))) extra_updates = None # run if cancelled: self.assertRaises( exception.ShareDataCopyCancelled, self.manager._copy_share_data, self.context, fake_copy, connection_info_src, connection_info_dest) extra_updates = [ mock.call( self.context, self.share['id'], {'task_state': constants.TASK_STATE_DATA_COPYING_COMPLETING}), mock.call( self.context, self.share['id'], {'task_state': constants.TASK_STATE_DATA_COPYING_CANCELLED}) ] elif exc: self.assertRaises( exception.ShareDataCopyFailed, self.manager._copy_share_data, self.context, fake_copy, connection_info_src, connection_info_dest) else: self.manager._copy_share_data( self.context, fake_copy, connection_info_src, connection_info_dest) extra_updates = [ mock.call( self.context, self.share['id'], {'task_state': constants.TASK_STATE_DATA_COPYING_COMPLETING}), mock.call( self.context, self.share['id'], {'task_state': constants.TASK_STATE_DATA_COPYING_COMPLETED}) ] # asserts self.assertEqual( self.manager.busy_tasks_shares[self.share['id']], fake_copy) update_list = [ mock.call( self.context, self.share['id'], {'task_state': constants.TASK_STATE_DATA_COPYING_STARTING}), mock.call( self.context, self.share['id'], {'task_state': constants.TASK_STATE_DATA_COPYING_IN_PROGRESS}), ] if extra_updates: update_list = update_list + extra_updates db.share_update.assert_has_calls(update_list) helper.DataServiceHelper.\ mount_share_instance_or_backup.assert_has_calls([ mock.call(connection_info_src, '/tmp/'), mock.call(connection_info_dest, '/tmp/')]) fake_copy.run.assert_called_once_with() if exc is None: fake_copy.get_progress.assert_called_once_with() helper.DataServiceHelper.\ unmount_share_instance_or_backup.assert_has_calls([ mock.call(connection_info_src, '/tmp/'), mock.call(connection_info_dest, '/tmp/')]) def test__copy_share_data_exception_access(self): connection_info_src = { 'mount': 'mount_cmd_src', 'unmount': 'unmount_cmd_src', 'share_id': self.share['id'], 'share_instance_id': 'ins1_id', 'mount_point': '/tmp/ins1_id', } connection_info_dest = { 'mount': 'mount_cmd_dest', 'unmount': 'unmount_cmd_dest', 'share_id': None, 'share_instance_id': 'ins2_id', 'mount_point': '/tmp/ins2_id', } fake_copy = mock.MagicMock(cancelled=False) # mocks self.mock_object(db, 'share_update') self.mock_object(db, 'share_instance_get', mock.Mock(side_effect=[self.share['instance'], self.share['instance']])) self.mock_object( helper.DataServiceHelper, 'allow_access_to_data_service', mock.Mock( side_effect=exception.ShareDataCopyFailed(reason='fake'))) self.mock_object(helper.DataServiceHelper, 'cleanup_data_access') # run self.assertRaises(exception.ShareDataCopyFailed, self.manager._copy_share_data, self.context, fake_copy, connection_info_src, connection_info_dest) # asserts db.share_update.assert_called_once_with( self.context, self.share['id'], {'task_state': constants.TASK_STATE_DATA_COPYING_STARTING}) (helper.DataServiceHelper.allow_access_to_data_service. assert_called_once_with( self.share['instance'], connection_info_src, self.share['instance'], connection_info_dest)) def test__copy_share_data_exception_mount_1(self): access = db_utils.create_access(share_id=self.share['id']) connection_info_src = { 'mount': 'mount_cmd_src', 'unmount': 'unmount_cmd_src', 'share_id': self.share['id'], 'share_instance_id': 'ins1_id', 'mount_point': '/tmp/ins1_id', } connection_info_dest = { 'mount': 'mount_cmd_dest', 'unmount': 'unmount_cmd_dest', 'share_id': None, 'share_instance_id': 'ins2_id', 'mount_point': '/tmp/ins2_id', } fake_copy = mock.MagicMock(cancelled=False) # mocks self.mock_object(db, 'share_update') self.mock_object(db, 'share_instance_get', mock.Mock(side_effect=[self.share['instance'], self.share['instance']])) self.mock_object(helper.DataServiceHelper, 'allow_access_to_data_service', mock.Mock(return_value=[access])) self.mock_object(helper.DataServiceHelper, 'mount_share_instance_or_backup', mock.Mock(side_effect=Exception('fake'))) self.mock_object(helper.DataServiceHelper, 'cleanup_data_access') self.mock_object(helper.DataServiceHelper, 'cleanup_temp_folder') # run self.assertRaises(exception.ShareDataCopyFailed, self.manager._copy_share_data, self.context, fake_copy, connection_info_src, connection_info_dest) # asserts db.share_update.assert_called_once_with( self.context, self.share['id'], {'task_state': constants.TASK_STATE_DATA_COPYING_STARTING}) helper.DataServiceHelper.\ mount_share_instance_or_backup.assert_called_once_with( connection_info_src, '/tmp/') helper.DataServiceHelper.cleanup_temp_folder.assert_called_once_with( '/tmp/', 'ins1_id') helper.DataServiceHelper.cleanup_data_access.assert_has_calls([ mock.call([access], self.share['instance']), mock.call([access], self.share['instance'])]) def test__copy_share_data_exception_mount_2(self): access = db_utils.create_access(share_id=self.share['id']) connection_info_src = { 'mount': 'mount_cmd_src', 'unmount': 'unmount_cmd_src', 'share_id': self.share['id'], 'share_instance_id': 'ins1_id', 'mount_point': '/tmp/ins1_id', } connection_info_dest = { 'mount': 'mount_cmd_dest', 'unmount': 'unmount_cmd_dest', 'share_id': None, 'share_instance_id': 'ins2_id', 'mount_point': '/tmp/ins2_id', } fake_copy = mock.MagicMock(cancelled=False) # mocks self.mock_object(db, 'share_update') self.mock_object(db, 'share_instance_get', mock.Mock(side_effect=[self.share['instance'], self.share['instance']])) self.mock_object(helper.DataServiceHelper, 'allow_access_to_data_service', mock.Mock(return_value=[access])) self.mock_object(helper.DataServiceHelper, 'mount_share_instance_or_backup', mock.Mock(side_effect=[None, Exception('fake')])) self.mock_object(helper.DataServiceHelper, 'cleanup_data_access') self.mock_object(helper.DataServiceHelper, 'cleanup_temp_folder') self.mock_object(helper.DataServiceHelper, 'cleanup_unmount_temp_folder') # run self.assertRaises(exception.ShareDataCopyFailed, self.manager._copy_share_data, self.context, fake_copy, connection_info_src, connection_info_dest) # asserts db.share_update.assert_called_once_with( self.context, self.share['id'], {'task_state': constants.TASK_STATE_DATA_COPYING_STARTING}) helper.DataServiceHelper.\ mount_share_instance_or_backup.assert_has_calls([ mock.call(connection_info_src, '/tmp/'), mock.call(connection_info_dest, '/tmp/')]) helper.DataServiceHelper.cleanup_unmount_temp_folder.\ assert_called_once_with(connection_info_src, '/tmp/') helper.DataServiceHelper.cleanup_temp_folder.assert_has_calls([ mock.call('/tmp/', 'ins2_id'), mock.call('/tmp/', 'ins1_id')]) def test_data_copy_cancel(self): share = db_utils.create_share() self.manager.busy_tasks_shares[share['id']] = data_utils.Copy # mocks self.mock_object(data_utils.Copy, 'cancel') # run self.manager.data_copy_cancel(self.context, share['id']) # asserts data_utils.Copy.cancel.assert_called_once_with() def test_data_copy_cancel_not_copying(self): self.assertRaises(exception.InvalidShare, self.manager.data_copy_cancel, self.context, 'fake_id') def test_data_copy_get_progress(self): share = db_utils.create_share() self.manager.busy_tasks_shares[share['id']] = data_utils.Copy expected = 'fake_progress' # mocks self.mock_object(data_utils.Copy, 'get_progress', mock.Mock(return_value=expected)) # run result = self.manager.data_copy_get_progress(self.context, share['id']) # asserts self.assertEqual(expected, result) data_utils.Copy.get_progress.assert_called_once_with() def test_data_copy_get_progress_not_copying(self): self.assertRaises(exception.InvalidShare, self.manager.data_copy_get_progress, self.context, 'fake_id') def test_create_share_backup(self): share_info = db_utils.create_share( status=constants.STATUS_BACKUP_CREATING) backup_info = db_utils.create_backup( share_info['id'], status=constants.STATUS_CREATING) # mocks self.mock_object(db, 'share_update') self.mock_object(db, 'share_backup_update') self.mock_object(db, 'share_get', mock.Mock(return_value=share_info)) self.mock_object(db, 'share_backup_get', mock.Mock(return_value=backup_info)) self.mock_object(self.manager, '_run_backup', mock.Mock(side_effect=None)) self.manager.create_backup(self.context, backup_info) db.share_update.assert_called_with( self.context, share_info['id'], {'status': constants.STATUS_AVAILABLE}) db.share_backup_update.assert_called_with( self.context, backup_info['id'], {'status': constants.STATUS_AVAILABLE, 'progress': '100'}) def test_create_share_backup_exception(self): share_info = db_utils.create_share(status=constants.STATUS_AVAILABLE) backup_info = db_utils.create_backup( share_info['id'], status=constants.STATUS_AVAILABLE, size=2) # mocks self.mock_object(db, 'share_update') self.mock_object(db, 'share_backup_update') self.mock_object(db, 'share_get', mock.Mock(return_value=share_info)) self.mock_object(db, 'share_backup_get', mock.Mock(return_value=backup_info)) self.mock_object( self.manager, '_run_backup', mock.Mock( side_effect=exception.ShareDataCopyFailed(reason='fake'))) self.assertRaises(exception.ManilaException, self.manager.create_backup, self.context, backup_info) db.share_update.assert_called_with( self.context, share_info['id'], {'status': constants.STATUS_AVAILABLE}) db.share_backup_update.assert_called() @ddt.data('90', '100') def test_create_share_backup_continue(self, progress): share_info = db_utils.create_share( status=constants.STATUS_BACKUP_CREATING) backup_info = db_utils.create_backup( share_info['id'], status=constants.STATUS_CREATING, topic=CONF.data_topic) # mocks self.mock_object(db, 'share_update') self.mock_object(db, 'share_backup_update') self.mock_object(db, 'share_backups_get_all', mock.Mock(return_value=[backup_info])) self.mock_object(self.manager, 'data_copy_get_progress', mock.Mock(return_value={'total_progress': progress})) self.manager.create_backup_continue(self.context) if progress == '100': db.share_backup_update.assert_called_with( self.context, backup_info['id'], {'status': constants.STATUS_AVAILABLE, 'progress': '100'}) db.share_update.assert_called_with( self.context, share_info['id'], {'status': constants.STATUS_AVAILABLE}) else: db.share_backup_update.assert_called_with( self.context, backup_info['id'], {'progress': progress}) def test_create_share_backup_continue_exception(self): share_info = db_utils.create_share( status=constants.STATUS_BACKUP_CREATING) backup_info = db_utils.create_backup( share_info['id'], status=constants.STATUS_CREATING, topic=CONF.data_topic) # mocks self.mock_object(db, 'share_update') self.mock_object(db, 'share_backup_update') self.mock_object(db, 'share_backups_get_all', mock.Mock(return_value=[backup_info])) self.mock_object(self.manager, 'data_copy_get_progress', mock.Mock(side_effect=exception.ManilaException)) self.manager.create_backup_continue(self.context) db.share_backup_update.assert_called_with( self.context, backup_info['id'], {'status': constants.STATUS_ERROR, 'progress': '0'}) db.share_update.assert_called_with( self.context, share_info['id'], {'status': constants.STATUS_AVAILABLE}) @ddt.data(None, exception.ShareDataCopyFailed(reason='fake')) def test__run_backup(self, exc): share_info = db_utils.create_share(status=constants.STATUS_AVAILABLE) backup_info = db_utils.create_backup( share_info['id'], status=constants.STATUS_AVAILABLE, size=2) share_instance = { 'export_locations': [{ 'path': 'test_path', "is_admin_only": False }, ], 'share_proto': 'nfs', } # mocks self.mock_object(db, 'share_instance_get', mock.Mock(return_value=share_instance)) self.mock_object(data_utils, 'Copy', mock.Mock(return_value='fake_copy')) self.manager.busy_tasks_shares[self.share['id']] = 'fake_copy' self.mock_object(self.manager, '_copy_share_data', mock.Mock(side_effect=exc)) self.mock_object(self.manager, '_run_backup') if exc is isinstance(exc, exception.ShareDataCopyFailed): self.assertRaises(exception.ShareDataCopyFailed, self.manager._run_backup, self.context, backup_info, share_info) else: self.manager._run_backup(self.context, backup_info, share_info) def test_delete_share_backup(self): share_info = db_utils.create_share(status=constants.STATUS_AVAILABLE) backup_info = db_utils.create_backup( share_info['id'], status=constants.STATUS_AVAILABLE, size=2) # mocks self.mock_object(db, 'share_backup_delete') self.mock_object(db, 'share_backup_get', mock.Mock(return_value=backup_info)) self.mock_object(utils, 'execute') reservation = 'fake' self.mock_object(quota.QUOTAS, 'reserve', mock.Mock(return_value=reservation)) self.mock_object(quota.QUOTAS, 'commit') self.manager.delete_backup(self.context, backup_info) db.share_backup_delete.assert_called_with( self.context, backup_info['id']) def test_delete_share_backup_exception(self): share_info = db_utils.create_share(status=constants.STATUS_AVAILABLE) backup_info = db_utils.create_backup( share_info['id'], status=constants.STATUS_AVAILABLE, size=2) # mocks self.mock_object(db, 'share_backup_get', mock.Mock(return_value=backup_info)) self.mock_object(utils, 'execute') self.mock_object( quota.QUOTAS, 'reserve', mock.Mock(side_effect=exception.ManilaException)) self.assertRaises(exception.ManilaException, self.manager.delete_backup, self.context, backup_info) def test_restore_share_backup(self): share_info = db_utils.create_share(status=constants.STATUS_AVAILABLE) backup_info = db_utils.create_backup( share_info['id'], status=constants.STATUS_AVAILABLE, size=2) share_id = share_info['id'] # mocks self.mock_object(db, 'share_update') self.mock_object(db, 'share_backup_update') self.mock_object(db, 'share_get', mock.Mock(return_value=share_info)) self.mock_object(db, 'share_backup_get', mock.Mock(return_value=backup_info)) self.mock_object(self.manager, '_run_restore') self.manager.restore_backup(self.context, backup_info, share_id) db.share_update.assert_called_with( self.context, share_info['id'], {'status': constants.STATUS_AVAILABLE}) db.share_backup_update.assert_called_with( self.context, backup_info['id'], {'status': constants.STATUS_AVAILABLE, 'restore_progress': '100'}) def test_restore_share_backup_respects_restore_to_target(self): target_share_info = db_utils.create_share( status=constants.STATUS_AVAILABLE) share_info = db_utils.create_share(status=constants.STATUS_AVAILABLE) backup_info = db_utils.create_backup( share_info['id'], status=constants.STATUS_AVAILABLE, size=2) target_share_id = target_share_info['id'] # mocks with mock.patch.object(self.manager.backup_driver, 'restore_to_target_support', False): self.mock_object(self.manager, '_run_restore') self.mock_object(self.manager.backup_driver, 'restore') self.mock_object(db, 'share_get', mock.Mock(return_value=target_share_info)) self.mock_object(db, 'share_backup_get', mock.Mock(return_value=backup_info)) self.assertRaises(exception.BackupException, self.manager.restore_backup, self.context, backup_info, target_share_id) self.manager._run_restore.assert_not_called() self.manager.backup_driver.restore.assert_not_called() def test_restore_share_backup_exception(self): share_info = db_utils.create_share(status=constants.STATUS_AVAILABLE) backup_info = db_utils.create_backup( share_info['id'], status=constants.STATUS_AVAILABLE, size=2) share_id = share_info['id'] # mocks self.mock_object(db, 'share_update') self.mock_object(db, 'share_get', mock.Mock(return_value=share_info)) self.mock_object(db, 'share_backup_get', mock.Mock(return_value=backup_info)) self.mock_object( self.manager, '_run_restore', mock.Mock( side_effect=exception.ShareDataCopyFailed(reason='fake'))) self.assertRaises(exception.ManilaException, self.manager.restore_backup, self.context, backup_info, share_id) db.share_update.assert_called_with( self.context, share_info['id'], {'status': constants.STATUS_BACKUP_RESTORING_ERROR}) @ddt.data('90', '100') def test_restore_share_backup_continue(self, progress): share_info = db_utils.create_share( status=constants.STATUS_BACKUP_RESTORING) backup_info = db_utils.create_backup( share_info['id'], status=constants.STATUS_RESTORING, topic=CONF.data_topic) share_info['source_backup_id'] = backup_info['id'] # mocks self.mock_object(db, 'share_update') self.mock_object(db, 'share_backup_update') self.mock_object(db, 'share_get_all', mock.Mock(return_value=[share_info])) self.mock_object(db, 'share_backups_get_all', mock.Mock(return_value=[backup_info])) self.mock_object(self.manager, 'data_copy_get_progress', mock.Mock(return_value={'total_progress': progress})) self.manager.restore_backup_continue(self.context) if progress == '100': db.share_backup_update.assert_called_with( self.context, backup_info['id'], {'status': constants.STATUS_AVAILABLE, 'restore_progress': '100'}) db.share_update.assert_called_with( self.context, share_info['id'], {'status': constants.STATUS_AVAILABLE}) else: db.share_backup_update.assert_called_with( self.context, backup_info['id'], {'restore_progress': progress}) def test_restore_share_backup_continue_exception(self): share_info = db_utils.create_share( status=constants.STATUS_BACKUP_RESTORING) backup_info = db_utils.create_backup( share_info['id'], status=constants.STATUS_RESTORING, topic=CONF.data_topic) share_info['source_backup_id'] = backup_info['id'] # mocks self.mock_object(db, 'share_update') self.mock_object(db, 'share_backup_update') self.mock_object(db, 'share_get_all', mock.Mock(return_value=[share_info])) self.mock_object(db, 'share_backups_get_all', mock.Mock(return_value=[backup_info])) self.mock_object(self.manager, 'data_copy_get_progress', mock.Mock(side_effect=exception.ManilaException)) self.manager.restore_backup_continue(self.context) db.share_backup_update.assert_called_with( self.context, backup_info['id'], {'status': constants.STATUS_AVAILABLE, 'restore_progress': '0'}) db.share_update.assert_called_with( self.context, share_info['id'], {'status': constants.STATUS_BACKUP_RESTORING_ERROR}) @ddt.data(None, exception.ShareDataCopyFailed(reason='fake')) def test__run_restore(self, exc): share_info = db_utils.create_share(status=constants.STATUS_AVAILABLE) backup_info = db_utils.create_backup( share_info['id'], status=constants.STATUS_AVAILABLE, size=2) share_instance = { 'export_locations': [{ 'path': 'test_path', "is_admin_only": False }, ], 'share_proto': 'nfs', } # mocks self.mock_object(db, 'share_instance_get', mock.Mock(return_value=share_instance)) self.mock_object(data_utils, 'Copy', mock.Mock(return_value='fake_copy')) self.manager.busy_tasks_shares[self.share['id']] = 'fake_copy' self.mock_object(self.manager, '_copy_share_data', mock.Mock(side_effect=exc)) self.mock_object(self.manager, '_run_restore') if exc is isinstance(exc, exception.ShareDataCopyFailed): self.assertRaises(exception.ShareDataCopyFailed, self.manager._run_restore, self.context, backup_info, share_info) else: self.manager._run_restore(self.context, backup_info, share_info) def test_create_backup_respects_use_data_manager(self): share_info = db_utils.create_share(status=constants.STATUS_AVAILABLE) backup_info = db_utils.create_backup( share_info['id'], status=constants.STATUS_AVAILABLE, size=2) # mocks self.mock_object(self.manager, '_run_backup') self.mock_object(self.manager.backup_driver, 'backup') self.mock_object(self.manager.db, 'share_get', mock.Mock(return_value=share_info)) self.mock_object(self.manager.db, 'share_backup_get', mock.Mock(return_value=backup_info)) # tests with mock.patch.object( self.manager.backup_driver, 'use_data_manager', False): self.manager.create_backup(self.context, backup_info) self.assertFalse(self.manager.backup_driver.use_data_manager) self.manager._run_backup.assert_not_called() self.manager.backup_driver.backup.assert_called_with( self.context, backup_info, share_info) def test_restore_backup_respects_use_data_manager(self): share_info = db_utils.create_share(status=constants.STATUS_AVAILABLE) share_id = share_info['id'] backup_info = db_utils.create_backup( share_info['id'], status=constants.STATUS_AVAILABLE, size=2) # mocks self.mock_object(self.manager, '_run_restore') self.mock_object(self.manager.backup_driver, 'restore') self.mock_object(self.manager.db, 'share_get', mock.Mock(return_value=share_info)) self.mock_object(self.manager.db, 'share_backup_get', mock.Mock(return_value=backup_info)) # tests with mock.patch.object( self.manager.backup_driver, 'use_data_manager', False): self.manager.restore_backup(self.context, backup_info, share_id) self.assertFalse(self.manager.backup_driver.use_data_manager) self.manager._run_restore.assert_not_called() self.manager.backup_driver.restore.assert_called_with( self.context, backup_info, share_info) def test_delete_backup_respects_use_data_manager(self): share_info = db_utils.create_share(status=constants.STATUS_AVAILABLE) backup_info = db_utils.create_backup( share_info['id'], status=constants.STATUS_AVAILABLE, size=2) # mocks self.mock_object(self.manager.backup_driver, 'delete') self.mock_object(self.manager.db, 'share_backup_get', mock.Mock(return_value=backup_info)) # tests with mock.patch.object( self.manager.backup_driver, 'use_data_manager', False): self.manager.delete_backup(self.context, backup_info) self.assertFalse(self.manager.backup_driver.use_data_manager) self.manager.backup_driver.delete.assert_called_with( self.context, backup_info) def test_create_backup_continue_respects_use_data_manager(self): share_info = db_utils.create_share( status=constants.STATUS_BACKUP_RESTORING) backup_info = db_utils.create_backup( share_info['id'], status=constants.STATUS_CREATING, size=2) # mocks self.mock_object(self.manager, 'data_copy_get_progress') self.mock_object(self.manager.backup_driver, 'get_backup_progress') self.mock_object(self.manager.db, 'share_backups_get_all', mock.Mock(return_value=[backup_info])) self.mock_object(self.manager.db, 'share_backup_update') self.mock_object(self.manager.db, 'share_get', mock.Mock(return_value=share_info)) # tests with mock.patch.object( self.manager.backup_driver, 'use_data_manager', False): self.manager.create_backup_continue(self.context) self.assertFalse(self.manager.backup_driver.use_data_manager) self.manager.data_copy_get_progress.assert_not_called() self.manager.backup_driver.get_backup_progress.assert_called_with( self.context, backup_info, share_info) def test_restore_backup_continue_respects_use_data_manager(self): share_info = db_utils.create_share( status=constants.STATUS_BACKUP_RESTORING) backup_info = db_utils.create_backup( share_info['id'], status=constants.STATUS_BACKUP_RESTORING, size=2) # mocks self.mock_object(self.manager, 'data_copy_get_progress') self.mock_object(self.manager.backup_driver, 'get_restore_progress') self.mock_object(self.manager.db, 'share_get_all', mock.Mock(return_value=[share_info])) self.mock_object(self.manager.db, 'share_backups_get_all', mock.Mock(return_value=[backup_info])) self.mock_object(self.manager.db, 'share_backup_update') # tests with mock.patch.object( self.manager.backup_driver, 'use_data_manager', False): self.manager.restore_backup_continue(self.context) self.assertFalse(self.manager.backup_driver.use_data_manager) self.manager.data_copy_get_progress.assert_not_called() self.manager.backup_driver.get_restore_progress.assert_called_with( self.context, backup_info, share_info) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/data/test_rpcapi.py0000664000175000017500000001100100000000000021252 0ustar00zuulzuul00000000000000# Copyright 2015, Hitachi Data Systems. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Unit Tests for manila.data.rpcapi """ import copy from unittest import mock from oslo_config import cfg from oslo_serialization import jsonutils from manila.common import constants from manila import context from manila.data import rpcapi as data_rpcapi from manila import test from manila.tests import db_utils CONF = cfg.CONF class DataRpcAPITestCase(test.TestCase): def setUp(self): super(DataRpcAPITestCase, self).setUp() share = db_utils.create_share( availability_zone=CONF.storage_availability_zone, status=constants.STATUS_AVAILABLE ) self.fake_share = jsonutils.to_primitive(share) self.backup = db_utils.create_backup( share_id=self.fake_share['id'], status=constants.STATUS_AVAILABLE) def tearDown(self): super(DataRpcAPITestCase, self).tearDown() def _test_data_api(self, method, rpc_method, fanout=False, **kwargs): ctxt = context.RequestContext('fake_user', 'fake_project') rpcapi = data_rpcapi.DataAPI() expected_retval = 'foo' if method == 'call' else None target = { "fanout": fanout, "version": kwargs.pop('version', '1.0'), } expected_msg = copy.deepcopy(kwargs) self.fake_args = None self.fake_kwargs = None def _fake_prepare_method(*args, **kwds): for kwd in kwds: self.assertEqual(target[kwd], kwds[kwd]) return rpcapi.client def _fake_rpc_method(*args, **kwargs): self.fake_args = args self.fake_kwargs = kwargs if expected_retval: return expected_retval with mock.patch.object(rpcapi.client, "prepare") as mock_prepared: mock_prepared.side_effect = _fake_prepare_method with mock.patch.object(rpcapi.client, rpc_method) as mock_method: mock_method.side_effect = _fake_rpc_method retval = getattr(rpcapi, method)(ctxt, **kwargs) self.assertEqual(expected_retval, retval) expected_args = [ctxt, method, expected_msg] for arg, expected_arg in zip(self.fake_args, expected_args): self.assertEqual(expected_arg, arg) def test_migration_start(self): self._test_data_api('migration_start', rpc_method='cast', version='1.0', share_id=self.fake_share['id'], ignore_list=[], share_instance_id='fake_ins_id', dest_share_instance_id='dest_fake_ins_id', connection_info_src={}, connection_info_dest={}) def test_data_copy_cancel(self): self._test_data_api('data_copy_cancel', rpc_method='call', version='1.0', share_id=self.fake_share['id']) def test_data_copy_get_progress(self): self._test_data_api('data_copy_get_progress', rpc_method='call', version='1.0', share_id=self.fake_share['id']) def test_create_backup(self): self._test_data_api('create_backup', rpc_method='cast', version='1.1', backup=self.backup) def test_delete_backup(self): self._test_data_api('delete_backup', rpc_method='cast', version='1.1', backup=self.backup) def test_restore_backup(self): self._test_data_api('restore_backup', rpc_method='cast', version='1.1', backup=self.backup, share_id=self.fake_share['id']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/data/test_utils.py0000664000175000017500000002640600000000000021153 0ustar00zuulzuul00000000000000# Copyright 2015 Hitachi Data Systems inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import time from unittest import mock from manila.data import utils as data_utils from manila import exception from manila import test from manila import utils class CopyClassTestCase(test.TestCase): def setUp(self): super(CopyClassTestCase, self).setUp() src = '/path/fake/src' dest = '/path/fake/dst' ignore_list = ['item'] self._copy = data_utils.Copy(src, dest, ignore_list) self._copy.total_size = 10000 self._copy.current_size = 100 self._copy.current_copy = {'file_path': '/fake/path', 'size': 100} self._copy.check_hash = True self.mock_log = self.mock_object(data_utils, 'LOG') def test_get_progress(self): expected = {'total_progress': 1, 'current_file_path': '/fake/path', 'current_file_progress': 100} # mocks self.mock_object(utils, 'execute', mock.Mock(return_value=("100", ""))) # run self._copy.initialized = True out = self._copy.get_progress() # asserts self.assertEqual(expected, out) utils.execute.assert_called_once_with("stat", "-c", "%s", "/fake/path", run_as_root=True) def test_get_progress_not_initialized(self): expected = {'total_progress': 0} # run self._copy.initialized = False out = self._copy.get_progress() # asserts self.assertEqual(expected, out) def test_get_progress_completed_empty(self): expected = {'total_progress': 100} # run self._copy.initialized = True self._copy.completed = True self._copy.total_size = 0 out = self._copy.get_progress() # asserts self.assertEqual(expected, out) def test_get_progress_current_copy_none(self): self._copy.current_copy = None expected = {'total_progress': 0} # run self._copy.initialized = True out = self._copy.get_progress() # asserts self.assertEqual(expected, out) def test_get_progress_exception(self): expected = {'total_progress': 1, 'current_file_path': '/fake/path', 'current_file_progress': 0} # mocks self.mock_object( utils, 'execute', mock.Mock(side_effect=utils.processutils.ProcessExecutionError())) # run self._copy.initialized = True out = self._copy.get_progress() # asserts self.assertEqual(expected, out) utils.execute.assert_called_once_with("stat", "-c", "%s", "/fake/path", run_as_root=True) def test_cancel(self): self._copy.cancelled = False # run self._copy.cancel() # asserts self.assertTrue(self._copy.cancelled) # reset self._copy.cancelled = False def test_get_total_size(self): self._copy.total_size = 0 values = [("folder1/\nitem/\nfile1\nitem", ""), ("", ""), ("10000", "")] def get_output(*args, **kwargs): return values.pop(0) # mocks self.mock_object(utils, 'execute', mock.Mock( side_effect=get_output)) # run self._copy.get_total_size(self._copy.src) # asserts self.assertEqual(10000, self._copy.total_size) utils.execute.assert_has_calls([ mock.call("ls", "-pA1", "--group-directories-first", self._copy.src, run_as_root=True), mock.call("ls", "-pA1", "--group-directories-first", os.path.join(self._copy.src, "folder1/"), run_as_root=True), mock.call("stat", "-c", "%s", os.path.join(self._copy.src, "file1"), run_as_root=True) ]) def test_get_total_size_cancelled_1(self): self._copy.total_size = 0 self._copy.cancelled = True # run self._copy.get_total_size(self._copy.src) # asserts self.assertEqual(0, self._copy.total_size) # reset self._copy.total_size = 10000 self._copy.cancelled = False def test_get_total_size_cancelled_2(self): self._copy.total_size = 0 def ls_output(*args, **kwargs): self._copy.cancelled = True return "folder1/", "" # mocks self.mock_object(utils, 'execute', mock.Mock( side_effect=ls_output)) # run self._copy.get_total_size(self._copy.src) # asserts self.assertEqual(0, self._copy.total_size) utils.execute.assert_called_once_with( "ls", "-pA1", "--group-directories-first", self._copy.src, run_as_root=True) # reset self._copy.total_size = 10000 self._copy.cancelled = False def test_copy_data(self): values = [("folder1/\nitem/\nfile1\nitem", ""), "", ("", ""), ("10000", ""), "", ""] def get_output(*args, **kwargs): return values.pop(0) # mocks self.mock_object(data_utils, '_validate_item', mock.Mock(side_effect=[exception.ShareDataCopyFailed( reason='fake'), None])) self.mock_object(utils, 'execute', mock.Mock( side_effect=get_output)) self.mock_object(self._copy, 'get_progress') self.mock_object(time, 'sleep') # run self._copy.copy_data(self._copy.src) # asserts self._copy.get_progress.assert_called_once_with() utils.execute.assert_has_calls([ mock.call("ls", "-pA1", "--group-directories-first", self._copy.src, run_as_root=True), mock.call("mkdir", "-p", os.path.join(self._copy.dest, "folder1/"), run_as_root=True), mock.call("ls", "-pA1", "--group-directories-first", os.path.join(self._copy.src, "folder1/"), run_as_root=True), mock.call("stat", "-c", "%s", os.path.join(self._copy.src, "file1"), run_as_root=True), mock.call("cp", "-P", "--preserve=all", os.path.join(self._copy.src, "file1"), os.path.join(self._copy.dest, "file1"), run_as_root=True), mock.call("cp", "-P", "--preserve=all", os.path.join(self._copy.src, "file1"), os.path.join(self._copy.dest, "file1"), run_as_root=True) ]) def test__validate_item(self): self.mock_object(utils, 'execute', mock.Mock( side_effect=[("abcxyz", ""), ("defrst", "")])) self.assertRaises(exception.ShareDataCopyFailed, data_utils._validate_item, 'src', 'dest') utils.execute.assert_has_calls([ mock.call("sha256sum", "src", run_as_root=True), mock.call("sha256sum", "dest", run_as_root=True), ]) def test_copy_data_cancelled_1(self): self._copy.cancelled = True # run self._copy.copy_data(self._copy.src) # reset self._copy.cancelled = False def test_copy_data_cancelled_2(self): def ls_output(*args, **kwargs): self._copy.cancelled = True return "folder1/", "" # mocks self.mock_object(utils, 'execute', mock.Mock( side_effect=ls_output)) # run self._copy.copy_data(self._copy.src) # asserts utils.execute.assert_called_once_with( "ls", "-pA1", "--group-directories-first", self._copy.src, run_as_root=True) # reset self._copy.cancelled = False def test_copy_stats(self): values = [("folder1/\nitem/\nfile1\nitem", ""), ("", ""), "", "", "", "", "", ""] def get_output(*args, **kwargs): return values.pop(0) # mocks self.mock_object(utils, 'execute', mock.Mock( side_effect=get_output)) # run self._copy.copy_stats(self._copy.src) # asserts utils.execute.assert_has_calls([ mock.call("ls", "-pA1", "--group-directories-first", self._copy.src, run_as_root=True), mock.call("ls", "-pA1", "--group-directories-first", os.path.join(self._copy.src, "folder1/"), run_as_root=True), mock.call( "chmod", "--reference=%s" % os.path.join(self._copy.src, "folder1/"), os.path.join(self._copy.dest, "folder1/"), run_as_root=True), mock.call( "touch", "--reference=%s" % os.path.join(self._copy.src, "folder1/"), os.path.join(self._copy.dest, "folder1/"), run_as_root=True), mock.call( "chown", "--reference=%s" % os.path.join(self._copy.src, "folder1/"), os.path.join(self._copy.dest, "folder1/"), run_as_root=True), ]) def test_copy_stats_cancelled_1(self): self._copy.cancelled = True # run self._copy.copy_stats(self._copy.src) # reset self._copy.cancelled = False def test_copy_stats_cancelled_2(self): def ls_output(*args, **kwargs): self._copy.cancelled = True return "folder1/", "" # mocks self.mock_object(utils, 'execute', mock.Mock( side_effect=ls_output)) # run self._copy.copy_stats(self._copy.src) # asserts utils.execute.assert_called_once_with( "ls", "-pA1", "--group-directories-first", self._copy.src, run_as_root=True) # reset self._copy.cancelled = False def test_run(self): # mocks self.mock_object(self._copy, 'get_total_size') self.mock_object(self._copy, 'copy_data') self.mock_object(self._copy, 'copy_stats') self.mock_object(self._copy, 'get_progress') # run self._copy.run() # asserts self.assertTrue(data_utils.LOG.info.called) self._copy.get_total_size.assert_called_once_with(self._copy.src) self._copy.copy_data.assert_called_once_with(self._copy.src) self._copy.copy_stats.assert_called_once_with(self._copy.src) self._copy.get_progress.assert_called_once_with() ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315601.973671 manila-21.0.0/manila/tests/db/0000775000175000017500000000000000000000000016046 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/db/__init__.py0000664000175000017500000000000000000000000020145 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/db/fakes.py0000664000175000017500000000307300000000000017514 0ustar00zuulzuul00000000000000# Copyright (c) 2011 X.commerce, a business unit of eBay Inc. # Copyright 2010 OpenStack, LLC # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Stubouts, mocks and fixtures for the test suite.""" from manila import db class FakeModel(object): """Stubs out for model.""" def __init__(self, values): self.values = values def __getattr__(self, name): return self.values.get(name) def __getitem__(self, key): if key in self.values: return self.values[key] else: raise NotImplementedError() def __repr__(self): return '' % self.values def get(self, key, default=None): return self.__getattr__(key) or default def __contains__(self, key): return self._getattr__(key) def to_dict(self): return self.values def stub_out(stubs, funcs): """Set the stubs in mapping in the db api.""" for func in funcs: func_name = '_'.join(func.__name__.split('_')[1:]) stubs.Set(db, func_name, func) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315601.973671 manila-21.0.0/manila/tests/db/migrations/0000775000175000017500000000000000000000000020222 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/db/migrations/__init__.py0000664000175000017500000000000000000000000022321 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315601.973671 manila-21.0.0/manila/tests/db/migrations/alembic/0000775000175000017500000000000000000000000021616 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/db/migrations/alembic/__init__.py0000664000175000017500000000000000000000000023715 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/db/migrations/alembic/migrations_data_checks.py0000664000175000017500000040711600000000000026666 0ustar00zuulzuul00000000000000# Copyright 2015 Mirantis inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests data for database migrations. All database migrations with data manipulation (like moving data from column to the table) should have data check class: @map_to_migration('1f0bd302c1a6') # Revision of checked db migration class FooMigrationChecks(BaseMigrationChecks): def setup_upgrade_data(self, conn): ... def check_upgrade(self, conn, data): ... def check_downgrade(self, conn): ... See BaseMigrationChecks class for more information. """ import abc import copy import datetime from oslo_db import exception as oslo_db_exc from oslo_utils import uuidutils from sqlalchemy import exc as sa_exc from manila.common import constants from manila.db.migrations import utils class DbMigrationsData(object): migration_mappings = {} methods_mapping = { 'pre': 'setup_upgrade_data', 'check': 'check_upgrade', 'post': 'check_downgrade', } def __getattr__(self, item): parts = item.split('_') is_mapping_method = ( len(parts) > 2 and parts[0] == '' and parts[1] in self.methods_mapping ) if not is_mapping_method: return super(DbMigrationsData, self).__getattribute__(item) check_obj = self.migration_mappings.get(parts[-1], None) if check_obj is None: raise AttributeError check_obj.set_test_case(self) return getattr(check_obj, self.methods_mapping.get(parts[1])) def map_to_migration(revision): def decorator(cls): DbMigrationsData.migration_mappings[revision] = cls() return cls return decorator class BaseMigrationChecks(metaclass=abc.ABCMeta): def __init__(self): self.test_case = None def set_test_case(self, test_case): self.test_case = test_case @abc.abstractmethod def setup_upgrade_data(self, conn): """This method should be used to insert test data for migration. :param conn: SQLAlchemy conn :return: any data which will be passed to 'check_upgrade' as 'data' arg """ @abc.abstractmethod def check_upgrade(self, conn, data): """This method should be used to do assertions after upgrade method. To perform assertions use 'self.test_case' instance property: self.test_case.assertTrue(True) :param conn: SQLAlchemy conn :param data: data returned by 'setup_upgrade_data' """ @abc.abstractmethod def check_downgrade(self, conn): """This method should be used to do assertions after downgrade method. To perform assertions use 'self.test_case' instance property: self.test_case.assertTrue(True) :param conn: SQLAlchemy conn """ def fake_share(**kwargs): share = { 'id': uuidutils.generate_uuid(), 'display_name': 'fake_share', 'display_description': 'my fake share', 'snapshot_id': uuidutils.generate_uuid(), 'is_public': True, 'size': 1, 'deleted': 'False', 'share_proto': 'fake_proto', 'user_id': uuidutils.generate_uuid(), 'project_id': uuidutils.generate_uuid(), 'snapshot_support': True, 'task_state': None, } share.update(kwargs) return share def fake_instance(share_id=None, **kwargs): instance = { 'id': uuidutils.generate_uuid(), 'share_id': share_id or uuidutils.generate_uuid(), 'deleted': 'False', 'host': 'openstack@BackendZ#PoolA', 'status': 'available', 'scheduled_at': datetime.datetime(2015, 8, 10, 0, 5, 58), 'launched_at': datetime.datetime(2015, 8, 10, 0, 5, 58), 'terminated_at': None, 'access_rules_status': 'active', } instance.update(kwargs) return instance @map_to_migration('38e632621e5a') class ShareTypeMigrationChecks(BaseMigrationChecks): def _get_fake_data(self): extra_specs = [] self.share_type_ids = [] volume_types = [ { 'id': uuidutils.generate_uuid(), 'deleted': 'False', 'name': 'vol-type-A', }, { 'id': uuidutils.generate_uuid(), 'deleted': 'False', 'name': 'vol-type-B', }, ] for idx, volume_type in enumerate(volume_types): extra_specs.append({ 'volume_type_id': volume_type['id'], 'key': 'foo', 'value': 'bar%s' % idx, 'deleted': False, }) extra_specs.append({ 'volume_type_id': volume_type['id'], 'key': 'xyzzy', 'value': 'spoon_%s' % idx, 'deleted': False, }) self.share_type_ids.append(volume_type['id']) return volume_types, extra_specs def setup_upgrade_data(self, conn): (self.volume_types, self.extra_specs) = self._get_fake_data() volume_types_table = utils.load_table('volume_types', conn) conn.execute(volume_types_table.insert().values(self.volume_types)) extra_specs_table = utils.load_table('volume_type_extra_specs', conn) conn.execute(extra_specs_table.insert().values(self.extra_specs)) def check_upgrade(self, conn, data): # Verify table transformations share_types_table = utils.load_table('share_types', conn) share_types_specs_table = utils.load_table( 'share_type_extra_specs', conn) self.test_case.assertRaises(sa_exc.NoSuchTableError, utils.load_table, 'volume_types', conn) self.test_case.assertRaises(sa_exc.NoSuchTableError, utils.load_table, 'volume_type_extra_specs', conn) # Verify presence of data share_type_ids = [ st._mapping['id'] for st in conn.execute( share_types_table.select() ) if st._mapping['id'] in self.share_type_ids ] self.test_case.assertEqual(sorted(self.share_type_ids), sorted(share_type_ids)) extra_specs = [ { 'type': es._mapping['share_type_id'], 'key': es._mapping['spec_key']} for es in conn.execute( share_types_specs_table.select() ) if es._mapping['share_type_id'] in self.share_type_ids ] self.test_case.assertEqual(4, len(extra_specs)) def check_downgrade(self, conn): # Verify table transformations volume_types_table = utils.load_table('volume_types', conn) volume_types_specs_table = utils.load_table( 'volume_type_extra_specs', conn) self.test_case.assertRaises(sa_exc.NoSuchTableError, utils.load_table, 'share_types', conn) self.test_case.assertRaises(sa_exc.NoSuchTableError, utils.load_table, 'share_type_extra_specs', conn) # Verify presence of data volume_type_ids = [ vt._mapping['id'] for vt in conn.execute(volume_types_table.select()) if vt._mapping['id'] in self.share_type_ids ] self.test_case.assertEqual(sorted(self.share_type_ids), sorted(volume_type_ids)) extra_specs = [ {'type': es._mapping['volume_type_id'], 'key': es._mapping['key']} for es in conn.execute(volume_types_specs_table.select()) if es._mapping['volume_type_id'] in self.share_type_ids ] self.test_case.assertEqual(4, len(extra_specs)) @map_to_migration('5077ffcc5f1c') class ShareInstanceMigrationChecks(BaseMigrationChecks): def _prepare_fake_data(self): time = datetime.datetime(2017, 1, 12, 12, 12, 12) self.share = { 'id': uuidutils.generate_uuid(), 'host': 'fake_host', 'status': 'fake_status', 'scheduled_at': time, 'launched_at': time, 'terminated_at': time, 'availability_zone': 'fake_az'} self.share_snapshot = { 'id': uuidutils.generate_uuid(), 'status': 'fake_status', 'share_id': self.share['id'], 'progress': 'fake_progress'} self.share_export_location = { 'id': 1001, 'share_id': self.share['id']} def setup_upgrade_data(self, conn): self._prepare_fake_data() share_table = utils.load_table('shares', conn) conn.execute(share_table.insert().values(self.share)) snapshot_table = utils.load_table('share_snapshots', conn) conn.execute(snapshot_table.insert().values(self.share_snapshot)) el_table = utils.load_table('share_export_locations', conn) conn.execute(el_table.insert().values(self.share_export_location)) def check_upgrade(self, conn, data): share_table = utils.load_table('shares', conn) s_instance_table = utils.load_table('share_instances', conn) ss_instance_table = utils.load_table('share_snapshot_instances', conn) snapshot_table = utils.load_table('share_snapshots', conn) instance_el_table = utils.load_table('share_instance_export_locations', conn) # Check shares table for column in ['host', 'status', 'scheduled_at', 'launched_at', 'terminated_at', 'share_network_id', 'share_server_id', 'availability_zone']: rows = conn.execute(share_table.select()) for row in rows: self.test_case.assertFalse(hasattr(row, column)) # Check share instance table s_instance_record = conn.execute(s_instance_table.select().where( s_instance_table.c.share_id == self.share['id'])).first() self.test_case.assertTrue(s_instance_record is not None) for column in ['host', 'status', 'scheduled_at', 'launched_at', 'terminated_at', 'availability_zone']: self.test_case.assertEqual(self.share[column], s_instance_record._mapping[column]) # Check snapshot table for column in ['status', 'progress']: rows = conn.execute(snapshot_table.select()) for row in rows: self.test_case.assertFalse(hasattr(row, column)) # Check snapshot instance table ss_instance_record = conn.execute(ss_instance_table.select().where( ss_instance_table.c.snapshot_id == self.share_snapshot['id']) ).first() self.test_case.assertEqual( s_instance_record._mapping['id'], ss_instance_record._mapping['share_instance_id']) for column in ['status', 'progress']: self.test_case.assertEqual(self.share_snapshot[column], ss_instance_record._mapping[column]) # Check share export location table self.test_case.assertRaises( sa_exc.NoSuchTableError, utils.load_table, 'share_export_locations', conn) # Check share instance export location table el_record = conn.execute(instance_el_table.select().where( instance_el_table.c.share_instance_id == s_instance_record._mapping['id']) ).first() self.test_case.assertFalse(el_record is None) self.test_case.assertTrue(hasattr(el_record, 'share_instance_id')) self.test_case.assertFalse(hasattr(el_record, 'share_id')) def check_downgrade(self, conn): self.test_case.assertRaises( sa_exc.NoSuchTableError, utils.load_table, 'share_snapshot_instances', conn) self.test_case.assertRaises( sa_exc.NoSuchTableError, utils.load_table, 'share_instances', conn) self.test_case.assertRaises( sa_exc.NoSuchTableError, utils.load_table, 'share_instance_export_locations', conn) share_table = utils.load_table('shares', conn) snapshot_table = utils.load_table('share_snapshots', conn) share_el_table = utils.load_table('share_export_locations', conn) for column in ['host', 'status', 'scheduled_at', 'launched_at', 'terminated_at', 'share_network_id', 'share_server_id', 'availability_zone']: rows = conn.execute(share_table.select()) for row in rows: self.test_case.assertTrue(hasattr(row, column)) for column in ['status', 'progress']: rows = conn.execute(snapshot_table.select()) for row in rows: self.test_case.assertTrue(hasattr(row, column)) rows = conn.execute(share_el_table.select()) for row in rows: self.test_case.assertFalse(hasattr(row, 'share_instance_id')) self.test_case.assertTrue( hasattr(row, 'share_id')) @map_to_migration('1f0bd302c1a6') class AvailabilityZoneMigrationChecks(BaseMigrationChecks): valid_az_names = ('az1', 'az2') def _get_service_data(self, options): base_dict = { 'binary': 'manila-share', 'topic': 'share', 'disabled': False, 'report_count': '100', } base_dict.update(options) return base_dict def setup_upgrade_data(self, conn): service_fixture = [ self._get_service_data( {'deleted': 0, 'host': 'fake1', 'availability_zone': 'az1'} ), self._get_service_data( {'deleted': 0, 'host': 'fake2', 'availability_zone': 'az1'} ), self._get_service_data( {'deleted': 1, 'host': 'fake3', 'availability_zone': 'az2'} ), ] services_table = utils.load_table('services', conn) for fixture in service_fixture: conn.execute(services_table.insert().values(fixture)) def check_upgrade(self, conn, _): az_table = utils.load_table('availability_zones', conn) for az in conn.execute(az_table.select()): self.test_case.assertTrue(uuidutils.is_uuid_like(az.id)) self.test_case.assertIn(az.name, self.valid_az_names) self.test_case.assertEqual('False', az.deleted) services_table = utils.load_table('services', conn) for service in conn.execute(services_table.select()): self.test_case.assertTrue( uuidutils.is_uuid_like(service.availability_zone_id) ) def check_downgrade(self, conn): services_table = utils.load_table('services', conn) for service in conn.execute(services_table.select()): self.test_case.assertIn( service.availability_zone, self.valid_az_names ) @map_to_migration('dda6de06349') class ShareInstanceExportLocationMetadataChecks(BaseMigrationChecks): el_table_name = 'share_instance_export_locations' elm_table_name = 'share_instance_export_locations_metadata' def setup_upgrade_data(self, conn): # Setup shares share_fixture = [{'id': 'foo_share_id'}, {'id': 'bar_share_id'}] share_table = utils.load_table('shares', conn) for fixture in share_fixture: conn.execute(share_table.insert().values(fixture)) # Setup share instances si_fixture = [ {'id': 'foo_share_instance_id_oof', 'share_id': share_fixture[0]['id']}, {'id': 'bar_share_instance_id_rab', 'share_id': share_fixture[1]['id']}, ] si_table = utils.load_table('share_instances', conn) for fixture in si_fixture: conn.execute(si_table.insert().values(fixture)) # Setup export locations el_fixture = [ {'id': 1, 'path': '/1', 'share_instance_id': si_fixture[0]['id']}, {'id': 2, 'path': '/2', 'share_instance_id': si_fixture[1]['id']}, ] el_table = utils.load_table(self.el_table_name, conn) for fixture in el_fixture: conn.execute(el_table.insert().values(fixture)) def check_upgrade(self, conn, data): el_table = utils.load_table( 'share_instance_export_locations', conn) for el in conn.execute(el_table.select()): self.test_case.assertTrue(hasattr(el, 'is_admin_only')) self.test_case.assertTrue(hasattr(el, 'uuid')) self.test_case.assertEqual(False, el.is_admin_only) self.test_case.assertTrue(uuidutils.is_uuid_like(el.uuid)) # Write export location metadata el_metadata = [ {'key': 'foo_key', 'value': 'foo_value', 'export_location_id': 1}, {'key': 'bar_key', 'value': 'bar_value', 'export_location_id': 2}, ] elm_table = utils.load_table(self.elm_table_name, conn) conn.execute(elm_table.insert().values(el_metadata)) # Verify values of written metadata for el_meta_datum in el_metadata: el_id = el_meta_datum['export_location_id'] records = conn.execute(elm_table.select().where( elm_table.c.export_location_id == el_id)) self.test_case.assertEqual(1, records.rowcount) record = records.first() expected_keys = ( 'id', 'created_at', 'updated_at', 'deleted_at', 'deleted', 'export_location_id', 'key', 'value', ) self.test_case.assertEqual( len(expected_keys), len(record._mapping.keys())) for key in expected_keys: self.test_case.assertIn(key, record._mapping.keys()) for k, v in el_meta_datum.items(): self.test_case.assertTrue(hasattr(record, k)) self.test_case.assertEqual(v, getattr(record, k)) def check_downgrade(self, conn): el_table = utils.load_table( 'share_instance_export_locations', conn) for el in conn.execute(el_table.select()): self.test_case.assertFalse(hasattr(el, 'is_admin_only')) self.test_case.assertFalse(hasattr(el, 'uuid')) self.test_case.assertRaises( sa_exc.NoSuchTableError, utils.load_table, self.elm_table_name, conn) @map_to_migration('344c1ac4747f') class AccessRulesStatusMigrationChecks(BaseMigrationChecks): def _get_instance_data(self, data): base_dict = {} base_dict.update(data) return base_dict def setup_upgrade_data(self, conn): share_table = utils.load_table('shares', conn) share = { 'id': 1, 'share_proto': "NFS", 'size': 0, 'snapshot_id': None, 'user_id': 'fake', 'project_id': 'fake', } conn.execute(share_table.insert().values(share)) rules1 = [ {'id': 'r1', 'share_instance_id': 1, 'state': 'active', 'deleted': 'False'}, {'id': 'r2', 'share_instance_id': 1, 'state': 'active', 'deleted': 'False'}, {'id': 'r3', 'share_instance_id': 1, 'state': 'deleting', 'deleted': 'False'}, ] rules2 = [ {'id': 'r4', 'share_instance_id': 2, 'state': 'active', 'deleted': 'False'}, {'id': 'r5', 'share_instance_id': 2, 'state': 'error', 'deleted': 'False'}, ] rules3 = [ {'id': 'r6', 'share_instance_id': 3, 'state': 'new', 'deleted': 'False'}, ] instance_fixtures = [ {'id': 1, 'deleted': 'False', 'host': 'fake1', 'share_id': 1, 'status': 'available', 'rules': rules1}, {'id': 2, 'deleted': 'False', 'host': 'fake2', 'share_id': 1, 'status': 'available', 'rules': rules2}, {'id': 3, 'deleted': 'False', 'host': 'fake3', 'share_id': 1, 'status': 'available', 'rules': rules3}, {'id': 4, 'deleted': 'False', 'host': 'fake4', 'share_id': 1, 'status': 'deleting', 'rules': []}, ] share_instances_table = utils.load_table('share_instances', conn) share_instances_rules_table = utils.load_table( 'share_instance_access_map', conn) for fixture in instance_fixtures: rules = fixture.pop('rules') conn.execute(share_instances_table.insert().values(fixture)) for rule in rules: conn.execute(share_instances_rules_table.insert().values(rule)) def check_upgrade(self, conn, _): instances_table = utils.load_table('share_instances', conn) valid_statuses = { '1': 'active', '2': 'error', '3': 'out_of_sync', '4': None, } instances = conn.execute(instances_table.select().where( instances_table.c.id in valid_statuses.keys())) for instance in instances: self.test_case.assertEqual(valid_statuses[instance['id']], instance['access_rules_status']) def check_downgrade(self, conn): share_instances_rules_table = utils.load_table( 'share_instance_access_map', conn) share_instance_rules_to_check = conn.execute( share_instances_rules_table.select().where( share_instances_rules_table.c.id.in_(('1', '2', '3', '4')))) valid_statuses = { '1': 'active', '2': 'error', '3': 'error', '4': None, } for rule in share_instance_rules_to_check: valid_state = valid_statuses[rule['share_instance_id']] self.test_case.assertEqual(valid_state, rule['state']) @map_to_migration('293fac1130ca') class ShareReplicationMigrationChecks(BaseMigrationChecks): valid_share_display_names = ('FAKE_SHARE_1', 'FAKE_SHARE_2', 'FAKE_SHARE_3') valid_share_ids = [] valid_replication_types = ('writable', 'readable', 'dr') def _load_tables_and_get_data(self, conn): share_table = utils.load_table('shares', conn) share_instances_table = utils.load_table('share_instances', conn) shares = conn.execute( share_table.select().where(share_table.c.id.in_( self.valid_share_ids)) ).fetchall() share_instances = conn.execute(share_instances_table.select().where( share_instances_table.c.share_id.in_(self.valid_share_ids)) ).fetchall() return shares, share_instances def setup_upgrade_data(self, conn): shares_data = [] instances_data = [] self.valid_share_ids = [] for share_display_name in self.valid_share_display_names: share_ref = fake_share(display_name=share_display_name) shares_data.append(share_ref) instances_data.append(fake_instance(share_id=share_ref['id'])) shares_table = utils.load_table('shares', conn) for share in shares_data: self.valid_share_ids.append(share['id']) conn.execute(shares_table.insert().values(share)) shares_instances_table = utils.load_table('share_instances', conn) for share_instance in instances_data: conn.execute( shares_instances_table.insert().values(share_instance)) def check_upgrade(self, conn, _): shares, share_instances = self._load_tables_and_get_data(conn) share_ids = [share._mapping['id'] for share in shares] share_instance_share_ids = [share_instance._mapping['share_id'] for share_instance in share_instances] # Assert no data is lost for sid in self.valid_share_ids: self.test_case.assertIn(sid, share_ids) self.test_case.assertIn(sid, share_instance_share_ids) for share in shares: self.test_case.assertIn(share._mapping['display_name'], self.valid_share_display_names) self.test_case.assertEqual('False', share.deleted) self.test_case.assertTrue(hasattr(share, 'replication_type')) for share_instance in share_instances: self.test_case.assertTrue(hasattr(share_instance, 'replica_state')) def check_downgrade(self, conn): shares, share_instances = self._load_tables_and_get_data(conn) share_ids = [share._mapping['id'] for share in shares] share_instance_share_ids = [share_instance._mapping['share_id'] for share_instance in share_instances] # Assert no data is lost for sid in self.valid_share_ids: self.test_case.assertIn(sid, share_ids) self.test_case.assertIn(sid, share_instance_share_ids) for share in shares: self.test_case.assertEqual('False', share.deleted) self.test_case.assertIn(share.display_name, self.valid_share_display_names) self.test_case.assertFalse(hasattr(share, 'replication_type')) for share_instance in share_instances: self.test_case.assertEqual('False', share_instance.deleted) self.test_case.assertIn(share_instance.share_id, self.valid_share_ids) self.test_case.assertFalse( hasattr(share_instance, 'replica_state')) @map_to_migration('5155c7077f99') class NetworkAllocationsNewLabelColumnChecks(BaseMigrationChecks): table_name = 'network_allocations' ids = ['fake_network_allocation_id_%d' % i for i in (1, 2, 3)] def setup_upgrade_data(self, conn): user_id = 'user_id' project_id = 'project_id' share_server_id = 'foo_share_server_id' # Create share network share_network_data = { 'id': 'foo_share_network_id', 'user_id': user_id, 'project_id': project_id, } sn_table = utils.load_table('share_networks', conn) conn.execute(sn_table.insert().values(share_network_data)) # Create share server share_server_data = { 'id': share_server_id, 'share_network_id': share_network_data['id'], 'host': 'fake_host', 'status': 'active', } ss_table = utils.load_table('share_servers', conn) conn.execute(ss_table.insert().values(share_server_data)) # Create network allocations network_allocations = [ {'id': self.ids[0], 'share_server_id': share_server_id, 'ip_address': '1.1.1.1'}, {'id': self.ids[1], 'share_server_id': share_server_id, 'ip_address': '2.2.2.2'}, ] na_table = utils.load_table(self.table_name, conn) for network_allocation in network_allocations: conn.execute(na_table.insert().values(network_allocation)) def check_upgrade(self, conn, data): na_table = utils.load_table(self.table_name, conn) for na in conn.execute(na_table.select()): self.test_case.assertTrue(hasattr(na, 'label')) self.test_case.assertEqual(na.label, 'user') # Create admin network allocation network_allocations = [ {'id': self.ids[2], 'share_server_id': na.share_server_id, 'ip_address': '3.3.3.3', 'label': 'admin', 'network_type': 'vlan', 'segmentation_id': 1005, 'ip_version': 4, 'cidr': '240.0.0.0/16'}, ] conn.execute(na_table.insert().values(network_allocations)) # Select admin network allocations for na in conn.execute( na_table.select().where(na_table.c.label == 'admin')): self.test_case.assertTrue(hasattr(na, 'label')) self.test_case.assertEqual('admin', na.label) for col_name in ('network_type', 'segmentation_id', 'ip_version', 'cidr'): self.test_case.assertTrue(hasattr(na, col_name)) self.test_case.assertEqual( network_allocations[0][col_name], getattr(na, col_name)) def check_downgrade(self, conn): na_table = utils.load_table(self.table_name, conn) db_result = conn.execute(na_table.select()) self.test_case.assertTrue(db_result.rowcount >= len(self.ids)) for na in db_result: for col_name in ('label', 'network_type', 'segmentation_id', 'ip_version', 'cidr'): self.test_case.assertFalse(hasattr(na, col_name)) @map_to_migration('eb6d5544cbbd') class ShareSnapshotInstanceNewProviderLocationColumnChecks( BaseMigrationChecks): table_name = 'share_snapshot_instances' def setup_upgrade_data(self, conn): # Setup shares share_data = {'id': 'new_share_id'} s_table = utils.load_table('shares', conn) conn.execute(s_table.insert().values(share_data)) # Setup share instances share_instance_data = { 'id': 'new_share_instance_id', 'share_id': share_data['id'] } si_table = utils.load_table('share_instances', conn) conn.execute(si_table.insert().values(share_instance_data)) # Setup share snapshots share_snapshot_data = { 'id': 'new_snapshot_id', 'share_id': share_data['id']} snap_table = utils.load_table('share_snapshots', conn) conn.execute(snap_table.insert().values(share_snapshot_data)) # Setup snapshot instances snapshot_instance_data = { 'id': 'new_snapshot_instance_id', 'snapshot_id': share_snapshot_data['id'], 'share_instance_id': share_instance_data['id'] } snap_i_table = utils.load_table('share_snapshot_instances', conn) conn.execute(snap_i_table.insert().values(snapshot_instance_data)) def check_upgrade(self, conn, data): ss_table = utils.load_table(self.table_name, conn) db_result = conn.execute(ss_table.select().where( ss_table.c.id == 'new_snapshot_instance_id')) self.test_case.assertTrue(db_result.rowcount > 0) for ss in db_result: self.test_case.assertTrue(hasattr(ss, 'provider_location')) self.test_case.assertEqual('new_snapshot_id', ss.snapshot_id) def check_downgrade(self, conn): ss_table = utils.load_table(self.table_name, conn) db_result = conn.execute(ss_table.select().where( ss_table.c.id == 'new_snapshot_instance_id')) self.test_case.assertTrue(db_result.rowcount > 0) for ss in db_result: self.test_case.assertFalse(hasattr(ss, 'provider_location')) self.test_case.assertEqual('new_snapshot_id', ss.snapshot_id) @map_to_migration('221a83cfd85b') class ShareNetworksFieldLengthChecks(BaseMigrationChecks): def setup_upgrade_data(self, conn): user_id = '123456789123456789' project_id = 'project_id' # Create share network data share_network_data = { 'id': 'foo_share_network_id_2', 'user_id': user_id, 'project_id': project_id, } sn_table = utils.load_table('share_networks', conn) conn.execute(sn_table.insert().values(share_network_data)) # Create security_service data security_services_data = { 'id': 'foo_security_services_id', 'type': 'foo_type', 'project_id': project_id } ss_table = utils.load_table('security_services', conn) conn.execute(ss_table.insert().values(security_services_data)) def _check_length_for_table_columns(self, table_name, conn, cols, length): table = utils.load_table(table_name, conn) db_result = conn.execute(table.select()) self.test_case.assertTrue(db_result.rowcount > 0) for col in cols: self.test_case.assertEqual(table.columns.get(col).type.length, length) def check_upgrade(self, conn, data): self._check_length_for_table_columns('share_networks', conn, ('user_id', 'project_id'), 255) self._check_length_for_table_columns('security_services', conn, ('project_id',), 255) def check_downgrade(self, conn): self._check_length_for_table_columns('share_networks', conn, ('user_id', 'project_id'), 36) self._check_length_for_table_columns('security_services', conn, ('project_id',), 36) @map_to_migration('fdfb668d19e1') class NewGatewayColumnChecks(BaseMigrationChecks): na_table_name = 'network_allocations' sn_table_name = 'share_networks' na_ids = ['network_allocation_id_fake_%d' % i for i in (1, 2, 3)] sn_ids = ['share_network_id_fake_%d' % i for i in (1, 2)] def setup_upgrade_data(self, conn): user_id = 'user_id' project_id = 'project_id' share_server_id = 'share_server_id_foo' # Create share network share_network_data = { 'id': self.sn_ids[0], 'user_id': user_id, 'project_id': project_id, } sn_table = utils.load_table(self.sn_table_name, conn) conn.execute(sn_table.insert().values(share_network_data)) # Create share server share_server_data = { 'id': share_server_id, 'share_network_id': share_network_data['id'], 'host': 'fake_host', 'status': 'active', } ss_table = utils.load_table('share_servers', conn) conn.execute(ss_table.insert().values(share_server_data)) # Create network allocations network_allocations = [ { 'id': self.na_ids[0], 'share_server_id': share_server_id, 'ip_address': '1.1.1.1', }, { 'id': self.na_ids[1], 'share_server_id': share_server_id, 'ip_address': '2.2.2.2', }, ] na_table = utils.load_table(self.na_table_name, conn) conn.execute(na_table.insert().values(network_allocations)) def check_upgrade(self, conn, data): na_table = utils.load_table(self.na_table_name, conn) for na in conn.execute(na_table.select()): self.test_case.assertTrue(hasattr(na, 'gateway')) # Create network allocation network_allocations = [ { 'id': self.na_ids[2], 'share_server_id': na.share_server_id, 'ip_address': '3.3.3.3', 'gateway': '3.3.3.1', 'network_type': 'vlan', 'segmentation_id': 1005, 'ip_version': 4, 'cidr': '240.0.0.0/16', }, ] conn.execute(na_table.insert().values(network_allocations)) # Select network allocations with gateway info for na in conn.execute( na_table.select().where(na_table.c.gateway == '3.3.3.1')): self.test_case.assertTrue(hasattr(na, 'gateway')) self.test_case.assertEqual(network_allocations[0]['gateway'], getattr(na, 'gateway')) sn_table = utils.load_table(self.sn_table_name, conn) for sn in conn.execute(sn_table.select()): self.test_case.assertTrue(hasattr(sn, 'gateway')) # Create share network share_networks = [ { 'id': self.sn_ids[1], 'user_id': sn.user_id, 'project_id': sn.project_id, 'gateway': '1.1.1.1', 'name': 'name_foo', }, ] conn.execute(sn_table.insert().values(share_networks)) # Select share network for sn in conn.execute( sn_table.select().where(sn_table.c.name == 'name_foo')): self.test_case.assertTrue(hasattr(sn, 'gateway')) self.test_case.assertEqual(share_networks[0]['gateway'], getattr(sn, 'gateway')) def check_downgrade(self, conn): for table_name, ids in ((self.na_table_name, self.na_ids), (self.sn_table_name, self.sn_ids)): table = utils.load_table(table_name, conn) db_result = conn.execute(table.select()) self.test_case.assertTrue(db_result.rowcount >= len(ids)) for record in db_result: self.test_case.assertFalse(hasattr(record, 'gateway')) @map_to_migration('e8ea58723178') class RemoveHostFromDriverPrivateDataChecks(BaseMigrationChecks): table_name = 'drivers_private_data' host_column_name = 'host' def setup_upgrade_data(self, conn): dpd_data = { 'created_at': datetime.datetime(2016, 7, 14, 22, 31, 22), 'deleted': 0, 'host': 'host1', 'entity_uuid': 'entity_uuid1', 'key': 'key1', 'value': 'value1' } dpd_table = utils.load_table(self.table_name, conn) conn.execute(dpd_table.insert().values(dpd_data)) def check_upgrade(self, conn, data): dpd_table = utils.load_table(self.table_name, conn) rows = conn.execute(dpd_table.select()) for row in rows: self.test_case.assertFalse(hasattr(row, self.host_column_name)) def check_downgrade(self, conn): dpd_table = utils.load_table(self.table_name, conn) rows = conn.execute(dpd_table.select()) for row in rows: self.test_case.assertTrue(hasattr(row, self.host_column_name)) self.test_case.assertEqual( 'unknown', row._mapping[self.host_column_name]) @map_to_migration('493eaffd79e1') class NewMTUColumnChecks(BaseMigrationChecks): na_table_name = 'network_allocations' sn_table_name = 'share_networks' na_ids = ['network_allocation_id_fake_3_%d' % i for i in (1, 2, 3)] sn_ids = ['share_network_id_fake_3_%d' % i for i in (1, 2)] def setup_upgrade_data(self, conn): user_id = 'user_id' project_id = 'project_id' share_server_id = 'share_server_id_foo_2' # Create share network share_network_data = { 'id': self.sn_ids[0], 'user_id': user_id, 'project_id': project_id, } sn_table = utils.load_table(self.sn_table_name, conn) conn.execute(sn_table.insert().values(share_network_data)) # Create share server share_server_data = { 'id': share_server_id, 'share_network_id': share_network_data['id'], 'host': 'fake_host', 'status': 'active', } ss_table = utils.load_table('share_servers', conn) conn.execute(ss_table.insert().values(share_server_data)) # Create network allocations network_allocations = [ { 'id': self.na_ids[0], 'share_server_id': share_server_id, 'ip_address': '1.1.1.1', }, { 'id': self.na_ids[1], 'share_server_id': share_server_id, 'ip_address': '2.2.2.2', }, ] na_table = utils.load_table(self.na_table_name, conn) conn.execute(na_table.insert().values(network_allocations)) def check_upgrade(self, conn, data): na_table = utils.load_table(self.na_table_name, conn) for na in conn.execute(na_table.select()): self.test_case.assertTrue(hasattr(na, 'mtu')) # Create network allocation network_allocations = [ { 'id': self.na_ids[2], 'share_server_id': na.share_server_id, 'ip_address': '3.3.3.3', 'gateway': '3.3.3.1', 'network_type': 'vlan', 'segmentation_id': 1005, 'ip_version': 4, 'cidr': '240.0.0.0/16', 'mtu': 1509, }, ] conn.execute(na_table.insert().values(network_allocations)) # Select network allocations with mtu info for na in conn.execute( na_table.select().where(na_table.c.mtu == '1509')): self.test_case.assertTrue(hasattr(na, 'mtu')) self.test_case.assertEqual(network_allocations[0]['mtu'], getattr(na, 'mtu')) # Select all entries and check for the value for na in conn.execute(na_table.select()): self.test_case.assertTrue(hasattr(na, 'mtu')) if na._mapping['id'] == self.na_ids[2]: self.test_case.assertEqual(network_allocations[0]['mtu'], getattr(na, 'mtu')) else: self.test_case.assertIsNone(na._mapping['mtu']) sn_table = utils.load_table(self.sn_table_name, conn) for sn in conn.execute(sn_table.select()): self.test_case.assertTrue(hasattr(sn, 'mtu')) # Create share network share_networks = [ { 'id': self.sn_ids[1], 'user_id': sn.user_id, 'project_id': sn.project_id, 'gateway': '1.1.1.1', 'name': 'name_foo_2', 'mtu': 1509, }, ] conn.execute(sn_table.insert().values(share_networks)) # Select share network with MTU set for sn in conn.execute( sn_table.select().where(sn_table.c.name == 'name_foo_2')): self.test_case.assertTrue(hasattr(sn, 'mtu')) self.test_case.assertEqual(share_networks[0]['mtu'], getattr(sn, 'mtu')) # Select all entries and check for the value for sn in conn.execute(sn_table.select()): self.test_case.assertTrue(hasattr(sn, 'mtu')) if sn._mapping['id'] == self.sn_ids[1]: self.test_case.assertEqual(network_allocations[0]['mtu'], getattr(sn, 'mtu')) else: self.test_case.assertIsNone(sn._mapping['mtu']) def check_downgrade(self, conn): for table_name, ids in ((self.na_table_name, self.na_ids), (self.sn_table_name, self.sn_ids)): table = utils.load_table(table_name, conn) db_result = conn.execute(table.select()) self.test_case.assertTrue(db_result.rowcount >= len(ids)) for record in db_result: self.test_case.assertFalse(hasattr(record, 'mtu')) @map_to_migration('63809d875e32') class AddAccessKeyToShareAccessMapping(BaseMigrationChecks): table_name = 'share_access_map' access_key_column_name = 'access_key' def setup_upgrade_data(self, conn): share_data = { 'id': uuidutils.generate_uuid(), 'share_proto': "CEPHFS", 'size': 1, 'snapshot_id': None, 'user_id': 'fake', 'project_id': 'fake' } share_table = utils.load_table('shares', conn) conn.execute(share_table.insert().values(share_data)) share_instance_data = { 'id': uuidutils.generate_uuid(), 'deleted': 'False', 'host': 'fake', 'share_id': share_data['id'], 'status': 'available', 'access_rules_status': 'active' } share_instance_table = utils.load_table('share_instances', conn) conn.execute(share_instance_table.insert().values(share_instance_data)) share_access_data = { 'id': uuidutils.generate_uuid(), 'share_id': share_data['id'], 'access_type': 'cephx', 'access_to': 'alice', 'deleted': 'False' } share_access_table = utils.load_table(self.table_name, conn) conn.execute(share_access_table.insert().values(share_access_data)) share_instance_access_data = { 'id': uuidutils.generate_uuid(), 'share_instance_id': share_instance_data['id'], 'access_id': share_access_data['id'], 'deleted': 'False' } share_instance_access_table = utils.load_table( 'share_instance_access_map', conn) conn.execute(share_instance_access_table.insert().values( share_instance_access_data)) def check_upgrade(self, conn, data): share_access_table = utils.load_table(self.table_name, conn) rows = conn.execute(share_access_table.select()) for row in rows: self.test_case.assertTrue(hasattr(row, self.access_key_column_name)) def check_downgrade(self, conn): share_access_table = utils.load_table(self.table_name, conn) rows = conn.execute(share_access_table.select()) for row in rows: self.test_case.assertFalse(hasattr(row, self.access_key_column_name)) @map_to_migration('48a7beae3117') class MoveShareTypeIdToInstancesCheck(BaseMigrationChecks): some_shares = [ { 'id': 's1', 'share_type_id': 't1', }, { 'id': 's2', 'share_type_id': 't2', }, { 'id': 's3', 'share_type_id': 't3', }, ] share_ids = [x['id'] for x in some_shares] some_instances = [ { 'id': 'i1', 'share_id': 's3', }, { 'id': 'i2', 'share_id': 's2', }, { 'id': 'i3', 'share_id': 's2', }, { 'id': 'i4', 'share_id': 's1', }, ] instance_ids = [x['id'] for x in some_instances] some_share_types = [ {'id': 't1'}, {'id': 't2'}, {'id': 't3'}, ] def setup_upgrade_data(self, conn): shares_table = utils.load_table('shares', conn) share_instances_table = utils.load_table('share_instances', conn) share_types_table = utils.load_table('share_types', conn) for stype in self.some_share_types: conn.execute(share_types_table.insert().values(stype)) for share in self.some_shares: conn.execute(shares_table.insert().values(share)) for instance in self.some_instances: conn.execute(share_instances_table.insert().values(instance)) def check_upgrade(self, conn, data): shares_table = utils.load_table('shares', conn) share_instances_table = utils.load_table('share_instances', conn) for instance in conn.execute(share_instances_table.select().where( share_instances_table.c.id in self.instance_ids)): share = conn.execute(shares_table.select().where( instance['share_id'] == shares_table.c.id)).first() self.test_case.assertEqual( next((x for x in self.some_shares if share['id'] == x['id']), None)['share_type_id'], instance['share_type_id']) for share in conn.execute(share_instances_table.select().where( shares_table.c.id in self.share_ids)): self.test_case.assertNotIn('share_type_id', share) def check_downgrade(self, conn): shares_table = utils.load_table('shares', conn) share_instances_table = utils.load_table('share_instances', conn) for instance in conn.execute(share_instances_table.select().where( share_instances_table.c.id in self.instance_ids)): self.test_case.assertNotIn('share_type_id', instance) for share in conn.execute(share_instances_table.select().where( shares_table.c.id in self.share_ids)): self.test_case.assertEqual( next((x for x in self.some_shares if share['id'] == x['id']), None)['share_type_id'], share['share_type_id']) @map_to_migration('3e7d62517afa') class CreateFromSnapshotExtraSpecAndShareColumn(BaseMigrationChecks): expected_attr = constants.ExtraSpecs.CREATE_SHARE_FROM_SNAPSHOT_SUPPORT snap_support_attr = constants.ExtraSpecs.SNAPSHOT_SUPPORT def _get_fake_data(self): extra_specs = [] shares = [] share_instances = [] share_types = [ { 'id': uuidutils.generate_uuid(), 'deleted': 'False', 'name': 'share-type-1', 'is_public': False, }, { 'id': uuidutils.generate_uuid(), 'deleted': 'False', 'name': 'share-type-2', 'is_public': True, }, ] snapshot_support = (False, True) dhss = ('True', 'False') for idx, share_type in enumerate(share_types): extra_specs.append({ 'share_type_id': share_type['id'], 'spec_key': 'snapshot_support', 'spec_value': snapshot_support[idx], 'deleted': 0, }) extra_specs.append({ 'share_type_id': share_type['id'], 'spec_key': 'driver_handles_share_servers', 'spec_value': dhss[idx], 'deleted': 0, }) share = fake_share(snapshot_support=snapshot_support[idx]) shares.append(share) share_instances.append( fake_instance(share_id=share['id'], share_type_id=share_type['id']) ) return share_types, extra_specs, shares, share_instances def setup_upgrade_data(self, conn): (self.share_types, self.extra_specs, self.shares, self.share_instances) = self._get_fake_data() share_types_table = utils.load_table('share_types', conn) conn.execute(share_types_table.insert().values(self.share_types)) extra_specs_table = utils.load_table('share_type_extra_specs', conn) conn.execute(extra_specs_table.insert().values(self.extra_specs)) shares_table = utils.load_table('shares', conn) conn.execute(shares_table.insert().values(self.shares)) share_instances_table = utils.load_table('share_instances', conn) conn.execute( share_instances_table.insert().values(self.share_instances)) def check_upgrade(self, conn, data): share_type_ids = [st['id'] for st in self.share_types] share_ids = [s['id'] for s in self.shares] shares_table = utils.load_table('shares', conn) share_types_table = utils.load_table('share_types', conn) extra_specs_table = utils.load_table('share_type_extra_specs', conn) # Pre-existing Shares must be present shares_in_db = conn.execute(shares_table.select()).fetchall() share_ids_in_db = [s._mapping['id'] for s in shares_in_db] self.test_case.assertTrue(len(share_ids_in_db) > 1) for share_id in share_ids: self.test_case.assertIn(share_id, share_ids_in_db) # new shares attr must match snapshot support for share in shares_in_db: self.test_case.assertTrue(hasattr(share, self.expected_attr)) self.test_case.assertEqual(share._mapping[self.snap_support_attr], share._mapping[self.expected_attr]) # Pre-existing Share types must be present share_types_in_db = ( conn.execute(share_types_table.select()).fetchall()) share_type_ids_in_db = [s._mapping['id'] for s in share_types_in_db] for share_type_id in share_type_ids: self.test_case.assertIn(share_type_id, share_type_ids_in_db) # Pre-existing extra specs must be present extra_specs_in_db = ( conn.execute(extra_specs_table.select().where( extra_specs_table.c.deleted == 0)).fetchall()) self.test_case.assertGreaterEqual(len(extra_specs_in_db), len(self.extra_specs)) # New Extra spec for share types must match snapshot support for share_type_id in share_type_ids: new_extra_spec = [x for x in extra_specs_in_db if x._mapping['spec_key'] == self.expected_attr and x._mapping['share_type_id'] == share_type_id] snapshot_support_spec = [ x for x in extra_specs_in_db if x._mapping['spec_key'] == self.snap_support_attr and x._mapping['share_type_id'] == share_type_id] self.test_case.assertEqual(1, len(new_extra_spec)) self.test_case.assertEqual(1, len(snapshot_support_spec)) self.test_case.assertEqual( snapshot_support_spec[0]._mapping['spec_value'], new_extra_spec[0]._mapping['spec_value']) def check_downgrade(self, conn): share_type_ids = [st['id'] for st in self.share_types] share_ids = [s['id'] for s in self.shares] shares_table = utils.load_table('shares', conn) share_types_table = utils.load_table('share_types', conn) extra_specs_table = utils.load_table('share_type_extra_specs', conn) # Pre-existing Shares must be present shares_in_db = conn.execute(shares_table.select()).fetchall() share_ids_in_db = [s._mapping['id'] for s in shares_in_db] self.test_case.assertTrue(len(share_ids_in_db) > 1) for share_id in share_ids: self.test_case.assertIn(share_id, share_ids_in_db) # Shares should have no attr to create share from snapshot for share in shares_in_db: self.test_case.assertFalse(hasattr(share, self.expected_attr)) # Pre-existing Share types must be present share_types_in_db = ( conn.execute(share_types_table.select()).fetchall()) share_type_ids_in_db = [s._mapping['id'] for s in share_types_in_db] for share_type_id in share_type_ids: self.test_case.assertIn(share_type_id, share_type_ids_in_db) # Pre-existing extra specs must be present extra_specs_in_db = ( conn.execute(extra_specs_table.select().where( extra_specs_table.c.deleted == 0)).fetchall()) self.test_case.assertGreaterEqual(len(extra_specs_in_db), len(self.extra_specs)) # Share types must not have create share from snapshot extra spec for share_type_id in share_type_ids: new_extra_spec = [x for x in extra_specs_in_db if x._mapping['spec_key'] == self.expected_attr and x._mapping['share_type_id'] == share_type_id] self.test_case.assertEqual(0, len(new_extra_spec)) @map_to_migration('87ce15c59bbe') class RevertToSnapshotShareColumn(BaseMigrationChecks): expected_attr = constants.ExtraSpecs.REVERT_TO_SNAPSHOT_SUPPORT def _get_fake_data(self): extra_specs = [] shares = [] share_instances = [] share_types = [ { 'id': uuidutils.generate_uuid(), 'deleted': 'False', 'name': 'revert-1', 'is_public': False, }, { 'id': uuidutils.generate_uuid(), 'deleted': 'False', 'name': 'revert-2', 'is_public': True, }, ] snapshot_support = (False, True) dhss = ('True', 'False') for idx, share_type in enumerate(share_types): extra_specs.append({ 'share_type_id': share_type['id'], 'spec_key': 'snapshot_support', 'spec_value': snapshot_support[idx], 'deleted': 0, }) extra_specs.append({ 'share_type_id': share_type['id'], 'spec_key': 'driver_handles_share_servers', 'spec_value': dhss[idx], 'deleted': 0, }) share = fake_share(snapshot_support=snapshot_support[idx]) shares.append(share) share_instances.append( fake_instance(share_id=share['id'], share_type_id=share_type['id']) ) return share_types, extra_specs, shares, share_instances def setup_upgrade_data(self, conn): (self.share_types, self.extra_specs, self.shares, self.share_instances) = self._get_fake_data() share_types_table = utils.load_table('share_types', conn) conn.execute(share_types_table.insert().values(self.share_types)) extra_specs_table = utils.load_table('share_type_extra_specs', conn) conn.execute(extra_specs_table.insert().values(self.extra_specs)) shares_table = utils.load_table('shares', conn) conn.execute(shares_table.insert().values(self.shares)) share_instances_table = utils.load_table('share_instances', conn) conn.execute( share_instances_table.insert().values(self.share_instances)) def check_upgrade(self, conn, data): share_ids = [s['id'] for s in self.shares] shares_table = utils.load_table('shares', conn) # Pre-existing Shares must be present shares_in_db = conn.execute(shares_table.select().where( shares_table.c.deleted == 'False')).fetchall() share_ids_in_db = [s._mapping['id'] for s in shares_in_db] self.test_case.assertTrue(len(share_ids_in_db) > 1) for share_id in share_ids: self.test_case.assertIn(share_id, share_ids_in_db) # New shares attr must be present and set to False for share in shares_in_db: self.test_case.assertTrue(hasattr(share, self.expected_attr)) self.test_case.assertEqual( False, share._mapping[self.expected_attr]) def check_downgrade(self, conn): share_ids = [s['id'] for s in self.shares] shares_table = utils.load_table('shares', conn) # Pre-existing Shares must be present shares_in_db = conn.execute(shares_table.select()).fetchall() share_ids_in_db = [s._mapping['id'] for s in shares_in_db] self.test_case.assertTrue(len(share_ids_in_db) > 1) for share_id in share_ids: self.test_case.assertIn(share_id, share_ids_in_db) # Shares should have no attr to revert share to snapshot for share in shares_in_db: self.test_case.assertFalse(hasattr(share, self.expected_attr)) @map_to_migration('95e3cf760840') class RemoveNovaNetIdColumnFromShareNetworks(BaseMigrationChecks): table_name = 'share_networks' nova_net_column_name = 'nova_net_id' def setup_upgrade_data(self, conn): user_id = 'user_id' project_id = 'project_id' nova_net_id = 'foo_nova_net_id' share_network_data = { 'id': 'foo_share_network_id_3', 'user_id': user_id, 'project_id': project_id, 'nova_net_id': nova_net_id, } sn_table = utils.load_table(self.table_name, conn) conn.execute(sn_table.insert().values(share_network_data)) def check_upgrade(self, conn, data): sn_table = utils.load_table(self.table_name, conn) rows = conn.execute(sn_table.select()) self.test_case.assertGreater(rows.rowcount, 0) for row in rows: self.test_case.assertFalse(hasattr(row, self.nova_net_column_name)) def check_downgrade(self, conn): sn_table = utils.load_table(self.table_name, conn) rows = conn.execute(sn_table.select()) self.test_case.assertGreater(rows.rowcount, 0) for row in rows: self.test_case.assertTrue(hasattr(row, self.nova_net_column_name)) self.test_case.assertIsNone( row._mapping[self.nova_net_column_name]) @map_to_migration('54667b9cade7') class RestoreStateToShareInstanceAccessMap(BaseMigrationChecks): new_instance_mapping_state = { constants.STATUS_ACTIVE: constants.STATUS_ACTIVE, constants.SHARE_INSTANCE_RULES_SYNCING: constants.ACCESS_STATE_QUEUED_TO_APPLY, constants.STATUS_OUT_OF_SYNC: constants.ACCESS_STATE_QUEUED_TO_APPLY, 'updating': constants.ACCESS_STATE_QUEUED_TO_APPLY, 'updating_multiple': constants.ACCESS_STATE_QUEUED_TO_APPLY, constants.SHARE_INSTANCE_RULES_ERROR: constants.ACCESS_STATE_ERROR, } new_access_rules_status = { constants.STATUS_ACTIVE: constants.STATUS_ACTIVE, constants.STATUS_OUT_OF_SYNC: constants.SHARE_INSTANCE_RULES_SYNCING, 'updating': constants.SHARE_INSTANCE_RULES_SYNCING, 'updating_multiple': constants.SHARE_INSTANCE_RULES_SYNCING, constants.SHARE_INSTANCE_RULES_ERROR: constants.SHARE_INSTANCE_RULES_ERROR, } @staticmethod def generate_share_instance(sid, access_rules_status): share_instance_data = { 'id': uuidutils.generate_uuid(), 'deleted': 'False', 'host': 'fake', 'share_id': sid, 'status': constants.STATUS_AVAILABLE, 'access_rules_status': access_rules_status } return share_instance_data @staticmethod def generate_share_instance_access_map(share_access_data_id, share_instance_id): share_instance_access_data = { 'id': uuidutils.generate_uuid(), 'share_instance_id': share_instance_id, 'access_id': share_access_data_id, 'deleted': 'False' } return share_instance_access_data def setup_upgrade_data(self, conn): share_data = { 'id': uuidutils.generate_uuid(), 'share_proto': 'fake', 'size': 1, 'snapshot_id': None, 'user_id': 'fake', 'project_id': 'fake' } share_table = utils.load_table('shares', conn) conn.execute(share_table.insert().values(share_data)) share_instances = [ self.generate_share_instance( share_data['id'], constants.STATUS_ACTIVE), self.generate_share_instance( share_data['id'], constants.STATUS_OUT_OF_SYNC), self.generate_share_instance( share_data['id'], constants.STATUS_ERROR), self.generate_share_instance( share_data['id'], 'updating'), self.generate_share_instance( share_data['id'], 'updating_multiple'), ] self.updating_share_instance = share_instances[3] self.updating_multiple_share_instance = share_instances[4] share_instance_table = utils.load_table('share_instances', conn) for share_instance_data in share_instances: conn.execute( share_instance_table.insert().values(share_instance_data)) share_access_data = { 'id': uuidutils.generate_uuid(), 'share_id': share_data['id'], 'access_type': 'fake', 'access_to': 'alice', 'deleted': 'False' } share_access_table = utils.load_table('share_access_map', conn) conn.execute(share_access_table.insert().values(share_access_data)) share_instance_access_data = [] for share_instance in share_instances: sia_map = self.generate_share_instance_access_map( share_access_data['id'], share_instance['id']) share_instance_access_data.append(sia_map) share_instance_access_table = utils.load_table( 'share_instance_access_map', conn) for sia_map in share_instance_access_data: conn.execute(share_instance_access_table.insert().values(sia_map)) def check_upgrade(self, conn, data): share_instance_table = utils.load_table('share_instances', conn) sia_table = utils.load_table('share_instance_access_map', conn) for rule in conn.execute(sia_table.select()): self.test_case.assertTrue(hasattr(rule, 'state')) correlated_share_instances = conn.execute( share_instance_table.select().where( share_instance_table.c.id == rule._mapping['share_instance_id'])) access_rules_status = getattr(correlated_share_instances.first(), 'access_rules_status') self.test_case.assertEqual( self.new_instance_mapping_state[access_rules_status], rule._mapping['state']) for instance in conn.execute(share_instance_table.select()): self.test_case.assertTrue(instance._mapping['access_rules_status'] not in ('updating', 'updating_multiple', constants.STATUS_OUT_OF_SYNC)) if instance._mapping['id'] in ( self.updating_share_instance['id'], self.updating_multiple_share_instance['id'] ): self.test_case.assertEqual( constants.SHARE_INSTANCE_RULES_SYNCING, instance._mapping['access_rules_status']) def check_downgrade(self, conn): share_instance_table = utils.load_table('share_instances', conn) sia_table = utils.load_table('share_instance_access_map', conn) for rule in conn.execute(sia_table.select()): self.test_case.assertFalse(hasattr(rule, 'state')) for instance in conn.execute(share_instance_table.select()): if instance._mapping['id'] in ( self.updating_share_instance['id'], self.updating_multiple_share_instance['id'] ): self.test_case.assertEqual( constants.STATUS_OUT_OF_SYNC, instance._mapping['access_rules_status']) @map_to_migration('e9f79621d83f') class AddCastRulesToReadonlyToInstances(BaseMigrationChecks): share_type = { 'id': uuidutils.generate_uuid(), } shares = [ { 'id': uuidutils.generate_uuid(), 'replication_type': constants.REPLICATION_TYPE_READABLE, }, { 'id': uuidutils.generate_uuid(), 'replication_type': constants.REPLICATION_TYPE_READABLE, }, { 'id': uuidutils.generate_uuid(), 'replication_type': constants.REPLICATION_TYPE_WRITABLE, }, { 'id': uuidutils.generate_uuid(), }, ] share_ids = [x['id'] for x in shares] correct_instance = { 'id': uuidutils.generate_uuid(), 'share_id': share_ids[1], 'replica_state': constants.REPLICA_STATE_IN_SYNC, 'status': constants.STATUS_AVAILABLE, 'share_type_id': share_type['id'], } instances = [ { 'id': uuidutils.generate_uuid(), 'share_id': share_ids[0], 'replica_state': constants.REPLICA_STATE_ACTIVE, 'status': constants.STATUS_AVAILABLE, 'share_type_id': share_type['id'], }, { 'id': uuidutils.generate_uuid(), 'share_id': share_ids[0], 'replica_state': constants.REPLICA_STATE_IN_SYNC, 'status': constants.STATUS_REPLICATION_CHANGE, 'share_type_id': share_type['id'], }, { 'id': uuidutils.generate_uuid(), 'share_id': share_ids[1], 'replica_state': constants.REPLICA_STATE_ACTIVE, 'status': constants.STATUS_REPLICATION_CHANGE, 'share_type_id': share_type['id'], }, correct_instance, { 'id': uuidutils.generate_uuid(), 'share_id': share_ids[2], 'replica_state': constants.REPLICA_STATE_ACTIVE, 'status': constants.STATUS_REPLICATION_CHANGE, 'share_type_id': share_type['id'], }, { 'id': uuidutils.generate_uuid(), 'share_id': share_ids[2], 'replica_state': constants.REPLICA_STATE_IN_SYNC, 'status': constants.STATUS_AVAILABLE, 'share_type_id': share_type['id'], }, { 'id': uuidutils.generate_uuid(), 'share_id': share_ids[3], 'status': constants.STATUS_AVAILABLE, 'share_type_id': share_type['id'], }, ] instance_ids = share_ids = [x['id'] for x in instances] def setup_upgrade_data(self, conn): shares_table = utils.load_table('shares', conn) share_instances_table = utils.load_table('share_instances', conn) share_types_table = utils.load_table('share_types', conn) conn.execute(share_types_table.insert().values(self.share_type)) for share in self.shares: conn.execute(shares_table.insert().values(share)) for instance in self.instances: conn.execute(share_instances_table.insert().values(instance)) def check_upgrade(self, conn, data): shares_table = utils.load_table('shares', conn) share_instances_table = utils.load_table('share_instances', conn) for instance in conn.execute(share_instances_table.select().where( share_instances_table.c.id in self.instance_ids)): self.test_case.assertIn('cast_rules_to_readonly', instance) share = conn.execute(shares_table.select().where( instance._mapping['share_id'] == shares_table.c.id)).first() if (instance['replica_state'] != constants.REPLICA_STATE_ACTIVE and share._mapping['replication_type'] == constants.REPLICATION_TYPE_READABLE and instance._mapping['status'] != constants.STATUS_REPLICATION_CHANGE): self.test_case.assertTrue( instance._mapping['cast_rules_to_readonly']) self.test_case.assertEqual(instance._mapping['id'], self.correct_instance['id']) else: self.test_case.assertEqual( False, instance._mapping['cast_rules_to_readonly']) def check_downgrade(self, conn): share_instances_table = utils.load_table('share_instances', conn) for instance in conn.execute(share_instances_table.select()): self.test_case.assertNotIn('cast_rules_to_readonly', instance) @map_to_migration('03da71c0e321') class ShareGroupMigrationChecks(BaseMigrationChecks): def setup_upgrade_data(self, conn): # Create share type self.share_type_id = uuidutils.generate_uuid() st_fixture = { 'deleted': "False", 'id': self.share_type_id, } st_table = utils.load_table('share_types', conn) conn.execute(st_table.insert().values(st_fixture)) # Create CG self.cg_id = uuidutils.generate_uuid() cg_fixture = { 'deleted': "False", 'id': self.cg_id, 'user_id': 'fake_user', 'project_id': 'fake_project_id', } cg_table = utils.load_table('consistency_groups', conn) conn.execute(cg_table.insert().values(cg_fixture)) # Create share_type group mapping self.mapping_id = uuidutils.generate_uuid() mapping_fixture = { 'deleted': "False", 'id': self.mapping_id, 'consistency_group_id': self.cg_id, 'share_type_id': self.share_type_id, } mapping_table = utils.load_table( 'consistency_group_share_type_mappings', conn) conn.execute(mapping_table.insert().values(mapping_fixture)) # Create share self.share_id = uuidutils.generate_uuid() share_fixture = { 'deleted': "False", 'id': self.share_id, 'consistency_group_id': self.cg_id, 'user_id': 'fake_user', 'project_id': 'fake_project_id', } share_table = utils.load_table('shares', conn) conn.execute(share_table.insert().values(share_fixture)) # Create share instance self.share_instance_id = uuidutils.generate_uuid() share_instance_fixture = { 'deleted': "False", 'share_type_id': self.share_type_id, 'id': self.share_instance_id, 'share_id': self.share_id, 'cast_rules_to_readonly': False, } share_instance_table = utils.load_table('share_instances', conn) conn.execute( share_instance_table.insert().values(share_instance_fixture)) # Create cgsnapshot self.cgsnapshot_id = uuidutils.generate_uuid() cg_snap_fixture = { 'deleted': "False", 'id': self.cgsnapshot_id, 'consistency_group_id': self.cg_id, 'user_id': 'fake_user', 'project_id': 'fake_project_id', } cgsnapshots_table = utils.load_table('cgsnapshots', conn) conn.execute(cgsnapshots_table.insert().values(cg_snap_fixture)) # Create cgsnapshot member self.cgsnapshot_member_id = uuidutils.generate_uuid() cg_snap_member_fixture = { 'deleted': "False", 'id': self.cgsnapshot_member_id, 'cgsnapshot_id': self.cgsnapshot_id, 'share_type_id': self.share_type_id, 'share_instance_id': self.share_instance_id, 'share_id': self.share_id, 'user_id': 'fake_user', 'project_id': 'fake_project_id', } cgsnapshot_members_table = utils.load_table( 'cgsnapshot_members', conn) conn.execute( cgsnapshot_members_table.insert().values(cg_snap_member_fixture)) def check_upgrade(self, conn, data): sg_table = utils.load_table("share_groups", conn) db_result = conn.execute(sg_table.select().where( sg_table.c.id == self.cg_id)) self.test_case.assertEqual(1, db_result.rowcount) sg = db_result.first() self.test_case.assertIsNone( sg._mapping['source_share_group_snapshot_id']) share_table = utils.load_table("shares", conn) share_result = conn.execute(share_table.select().where( share_table.c.id == self.share_id)) self.test_case.assertEqual(1, share_result.rowcount) share = share_result.first() self.test_case.assertEqual( self.cg_id, share._mapping['share_group_id']) self.test_case.assertIsNone( share._mapping['source_share_group_snapshot_member_id']) mapping_table = utils.load_table( "share_group_share_type_mappings", conn) mapping_result = conn.execute(mapping_table.select().where( mapping_table.c.id == self.mapping_id)) self.test_case.assertEqual(1, mapping_result.rowcount) mapping_record = mapping_result.first() self.test_case.assertEqual( self.cg_id, mapping_record._mapping['share_group_id']) self.test_case.assertEqual( self.share_type_id, mapping_record._mapping['share_type_id']) sgs_table = utils.load_table("share_group_snapshots", conn) db_result = conn.execute(sgs_table.select().where( sgs_table.c.id == self.cgsnapshot_id)) self.test_case.assertEqual(1, db_result.rowcount) sgs = db_result.first() self.test_case.assertEqual(self.cg_id, sgs._mapping['share_group_id']) sgsm_table = utils.load_table("share_group_snapshot_members", conn) db_result = conn.execute(sgsm_table.select().where( sgsm_table.c.id == self.cgsnapshot_member_id)) self.test_case.assertEqual(1, db_result.rowcount) sgsm = db_result.first() self.test_case.assertEqual( self.cgsnapshot_id, sgsm._mapping['share_group_snapshot_id']) self.test_case.assertNotIn('share_type_id', sgsm) def check_downgrade(self, conn): cg_table = utils.load_table("consistency_groups", conn) db_result = conn.execute(cg_table.select().where( cg_table.c.id == self.cg_id)) self.test_case.assertEqual(1, db_result.rowcount) cg = db_result.first() self.test_case.assertIsNone(cg._mapping['source_cgsnapshot_id']) share_table = utils.load_table("shares", conn) share_result = conn.execute(share_table.select().where( share_table.c.id == self.share_id)) self.test_case.assertEqual(1, share_result.rowcount) share = share_result.first() self.test_case.assertEqual( self.cg_id, share._mapping['consistency_group_id']) self.test_case.assertIsNone( share._mapping['source_cgsnapshot_member_id']) mapping_table = utils.load_table( "consistency_group_share_type_mappings", conn) mapping_result = conn.execute(mapping_table.select().where( mapping_table.c.id == self.mapping_id)) self.test_case.assertEqual(1, mapping_result.rowcount) cg_st_mapping = mapping_result.first() self.test_case.assertEqual( self.cg_id, cg_st_mapping._mapping['consistency_group_id']) self.test_case.assertEqual( self.share_type_id, cg_st_mapping._mapping['share_type_id']) cg_snapshots_table = utils.load_table("cgsnapshots", conn) db_result = conn.execute(cg_snapshots_table.select().where( cg_snapshots_table.c.id == self.cgsnapshot_id)) self.test_case.assertEqual(1, db_result.rowcount) cgsnap = db_result.first() self.test_case.assertEqual( self.cg_id, cgsnap._mapping['consistency_group_id']) cg_snap_member_table = utils.load_table("cgsnapshot_members", conn) db_result = conn.execute(cg_snap_member_table.select().where( cg_snap_member_table.c.id == self.cgsnapshot_member_id)) self.test_case.assertEqual(1, db_result.rowcount) member = db_result.first() self.test_case.assertEqual( self.cgsnapshot_id, member._mapping['cgsnapshot_id']) self.test_case.assertIn('share_type_id', member._mapping) self.test_case.assertEqual( self.share_type_id, member._mapping['share_type_id']) @map_to_migration('927920b37453') class ShareGroupSnapshotMemberNewProviderLocationColumnChecks( BaseMigrationChecks): table_name = 'share_group_snapshot_members' share_group_type_id = uuidutils.generate_uuid() share_group_id = uuidutils.generate_uuid() share_id = uuidutils.generate_uuid() share_instance_id = uuidutils.generate_uuid() share_group_snapshot_id = uuidutils.generate_uuid() share_group_snapshot_member_id = uuidutils.generate_uuid() def setup_upgrade_data(self, conn): # Setup share group type sgt_data = { 'id': self.share_group_type_id, 'name': uuidutils.generate_uuid(), } sgt_table = utils.load_table('share_group_types', conn) conn.execute(sgt_table.insert().values(sgt_data)) # Setup share group sg_data = { 'id': self.share_group_id, 'project_id': 'fake_project_id', 'user_id': 'fake_user_id', 'share_group_type_id': self.share_group_type_id, } sg_table = utils.load_table('share_groups', conn) conn.execute(sg_table.insert().values(sg_data)) # Setup shares share_data = { 'id': self.share_id, 'share_group_id': self.share_group_id, } s_table = utils.load_table('shares', conn) conn.execute(s_table.insert().values(share_data)) # Setup share instances share_instance_data = { 'id': self.share_instance_id, 'share_id': share_data['id'], 'cast_rules_to_readonly': False, } si_table = utils.load_table('share_instances', conn) conn.execute(si_table.insert().values(share_instance_data)) # Setup share group snapshot sgs_data = { 'id': self.share_group_snapshot_id, 'share_group_id': self.share_group_id, 'project_id': 'fake_project_id', 'user_id': 'fake_user_id', } sgs_table = utils.load_table('share_group_snapshots', conn) conn.execute(sgs_table.insert().values(sgs_data)) # Setup share group snapshot member sgsm_data = { 'id': self.share_group_snapshot_member_id, 'share_group_snapshot_id': self.share_group_snapshot_id, 'share_id': self.share_id, 'share_instance_id': self.share_instance_id, 'project_id': 'fake_project_id', 'user_id': 'fake_user_id', } sgsm_table = utils.load_table(self.table_name, conn) conn.execute(sgsm_table.insert().values(sgsm_data)) def check_upgrade(self, conn, data): sgsm_table = utils.load_table(self.table_name, conn) db_result = conn.execute(sgsm_table.select().where( sgsm_table.c.id == self.share_group_snapshot_member_id)) self.test_case.assertEqual(1, db_result.rowcount) for sgsm in db_result: self.test_case.assertTrue(hasattr(sgsm, 'provider_location')) # Check that we can write string data to the new field # pylint: disable=no-value-for-parameter conn.execute(sgsm_table.update().where( sgsm_table.c.id == self.share_group_snapshot_member_id, ).values({ 'provider_location': ('z' * 255), })) def check_downgrade(self, conn): sgsm_table = utils.load_table(self.table_name, conn) db_result = conn.execute(sgsm_table.select().where( sgsm_table.c.id == self.share_group_snapshot_member_id)) self.test_case.assertEqual(1, db_result.rowcount) for sgsm in db_result: self.test_case.assertFalse(hasattr(sgsm, 'provider_location')) @map_to_migration('d5db24264f5c') class ShareGroupNewConsistentSnapshotSupportColumnChecks(BaseMigrationChecks): table_name = 'share_groups' new_attr_name = 'consistent_snapshot_support' share_group_type_id = uuidutils.generate_uuid() share_group_id = uuidutils.generate_uuid() def setup_upgrade_data(self, conn): # Setup share group type sgt_data = { 'id': self.share_group_type_id, 'name': uuidutils.generate_uuid(), } sgt_table = utils.load_table('share_group_types', conn) conn.execute(sgt_table.insert().values(sgt_data)) # Setup share group sg_data = { 'id': self.share_group_id, 'project_id': 'fake_project_id', 'user_id': 'fake_user_id', 'share_group_type_id': self.share_group_type_id, } sg_table = utils.load_table('share_groups', conn) conn.execute(sg_table.insert().values(sg_data)) def check_upgrade(self, conn, data): sg_table = utils.load_table(self.table_name, conn) db_result = conn.execute(sg_table.select().where( sg_table.c.id == self.share_group_id)) self.test_case.assertEqual(1, db_result.rowcount) for sg in db_result: self.test_case.assertTrue(hasattr(sg, self.new_attr_name)) # Check that we can write proper enum data to the new field for value in (None, 'pool', 'host'): # pylint: disable=no-value-for-parameter conn.execute(sg_table.update().where( sg_table.c.id == self.share_group_id, ).values({self.new_attr_name: value})) # Check that we cannot write values that are not allowed by enum. for value in ('', 'fake', 'pool1', 'host1', '1pool', '1host'): # pylint: disable=no-value-for-parameter self.test_case.assertRaises( # FIXME(zzzeek) - oslo.db may require exception translation # updates here for the particular DataError in question (oslo_db_exc.DBError, sa_exc.DataError), conn.execute, sg_table.update().where( sg_table.c.id == self.share_group_id ).values({self.new_attr_name: value}) ) def check_downgrade(self, conn): sg_table = utils.load_table(self.table_name, conn) db_result = conn.execute(sg_table.select().where( sg_table.c.id == self.share_group_id)) self.test_case.assertEqual(1, db_result.rowcount) for sg in db_result: self.test_case.assertFalse(hasattr(sg, self.new_attr_name)) @map_to_migration('7d142971c4ef') class ReservationExpireIndexChecks(BaseMigrationChecks): def setup_upgrade_data(self, conn): pass def _get_reservations_expire_delete_index(self, conn): reservation_table = utils.load_table('reservations', conn) members = ['deleted', 'expire'] for idx in reservation_table.indexes: if sorted(idx.columns.keys()) == members: return idx def check_upgrade(self, conn, data): self.test_case.assertTrue( self._get_reservations_expire_delete_index(conn)) def check_downgrade(self, conn): self.test_case.assertFalse( self._get_reservations_expire_delete_index(conn)) @map_to_migration('5237b6625330') class ShareGroupNewAvailabilityZoneIDColumnChecks(BaseMigrationChecks): table_name = 'share_groups' new_attr_name = 'availability_zone_id' share_group_type_id = uuidutils.generate_uuid() share_group_id = uuidutils.generate_uuid() availability_zone_id = uuidutils.generate_uuid() def setup_upgrade_data(self, conn): # Setup AZ az_data = { 'id': self.availability_zone_id, 'name': uuidutils.generate_uuid(), } az_table = utils.load_table('availability_zones', conn) conn.execute(az_table.insert().values(az_data)) # Setup share group type sgt_data = { 'id': self.share_group_type_id, 'name': uuidutils.generate_uuid(), } sgt_table = utils.load_table('share_group_types', conn) conn.execute(sgt_table.insert().values(sgt_data)) # Setup share group sg_data = { 'id': self.share_group_id, 'project_id': 'fake_project_id', 'user_id': 'fake_user_id', 'share_group_type_id': self.share_group_type_id, } sg_table = utils.load_table('share_groups', conn) conn.execute(sg_table.insert().values(sg_data)) def check_upgrade(self, conn, data): sg_table = utils.load_table(self.table_name, conn) db_result = conn.execute(sg_table.select().where( sg_table.c.id == self.share_group_id)) self.test_case.assertEqual(1, db_result.rowcount) for sg in db_result: self.test_case.assertTrue(hasattr(sg, self.new_attr_name)) # Check that we can write proper data to the new field for value in (None, self.availability_zone_id): # pylint: disable=no-value-for-parameter conn.execute(sg_table.update().where( sg_table.c.id == self.share_group_id, ).values({self.new_attr_name: value})) def check_downgrade(self, conn): sg_table = utils.load_table(self.table_name, conn) db_result = conn.execute(sg_table.select().where( sg_table.c.id == self.share_group_id)) self.test_case.assertEqual(1, db_result.rowcount) for sg in db_result: self.test_case.assertFalse(hasattr(sg, self.new_attr_name)) @map_to_migration('31252d671ae5') class SquashSGSnapshotMembersAndSSIModelsChecks(BaseMigrationChecks): old_table_name = 'share_group_snapshot_members' new_table_name = 'share_snapshot_instances' share_group_type_id = uuidutils.generate_uuid() share_group_id = uuidutils.generate_uuid() share_id = uuidutils.generate_uuid() share_instance_id = uuidutils.generate_uuid() share_group_snapshot_id = uuidutils.generate_uuid() share_group_snapshot_member_id = uuidutils.generate_uuid() keys = ( 'user_id', 'project_id', 'size', 'share_proto', 'share_group_snapshot_id', ) def setup_upgrade_data(self, conn): # Setup share group type sgt_data = { 'id': self.share_group_type_id, 'name': uuidutils.generate_uuid(), } sgt_table = utils.load_table('share_group_types', conn) conn.execute(sgt_table.insert().values(sgt_data)) # Setup share group sg_data = { 'id': self.share_group_id, 'project_id': 'fake_project_id', 'user_id': 'fake_user_id', 'share_group_type_id': self.share_group_type_id, } sg_table = utils.load_table('share_groups', conn) conn.execute(sg_table.insert().values(sg_data)) # Setup shares share_data = { 'id': self.share_id, 'share_group_id': self.share_group_id, } s_table = utils.load_table('shares', conn) conn.execute(s_table.insert().values(share_data)) # Setup share instances share_instance_data = { 'id': self.share_instance_id, 'share_id': share_data['id'], 'cast_rules_to_readonly': False, } si_table = utils.load_table('share_instances', conn) conn.execute(si_table.insert().values(share_instance_data)) # Setup share group snapshot sgs_data = { 'id': self.share_group_snapshot_id, 'share_group_id': self.share_group_id, 'project_id': 'fake_project_id', 'user_id': 'fake_user_id', } sgs_table = utils.load_table('share_group_snapshots', conn) conn.execute(sgs_table.insert().values(sgs_data)) # Setup share group snapshot member sgsm_data = { 'id': self.share_group_snapshot_member_id, 'share_group_snapshot_id': self.share_group_snapshot_id, 'share_id': self.share_id, 'share_instance_id': self.share_instance_id, 'project_id': 'fake_project_id', 'user_id': 'fake_user_id', } sgsm_table = utils.load_table(self.old_table_name, conn) conn.execute(sgsm_table.insert().values(sgsm_data)) def check_upgrade(self, conn, data): ssi_table = utils.load_table(self.new_table_name, conn) db_result = conn.execute(ssi_table.select().where( ssi_table.c.id == self.share_group_snapshot_member_id)) self.test_case.assertEqual(1, db_result.rowcount) for ssi in db_result: for key in self.keys: self.test_case.assertTrue(hasattr(ssi, key)) # Check that we can write string data to the new fields # pylint: disable=no-value-for-parameter conn.execute(ssi_table.update().where( ssi_table.c.id == self.share_group_snapshot_member_id, ).values({ 'user_id': ('u' * 255), 'project_id': ('p' * 255), 'share_proto': ('s' * 255), 'size': 123456789, 'share_group_snapshot_id': self.share_group_snapshot_id, })) # Check that table 'share_group_snapshot_members' does not # exist anymore self.test_case.assertRaises( sa_exc.NoSuchTableError, utils.load_table, 'share_group_snapshot_members', conn) def check_downgrade(self, conn): sgsm_table = utils.load_table(self.old_table_name, conn) db_result = conn.execute(sgsm_table.select().where( sgsm_table.c.id == self.share_group_snapshot_member_id)) self.test_case.assertEqual(1, db_result.rowcount) for sgsm in db_result: for key in self.keys: self.test_case.assertTrue(hasattr(sgsm, key)) # Check that create SGS member is absent in SSI table ssi_table = utils.load_table(self.new_table_name, conn) db_result = conn.execute(ssi_table.select().where( ssi_table.c.id == self.share_group_snapshot_member_id)) self.test_case.assertEqual(0, db_result.rowcount) @map_to_migration('238720805ce1') class MessagesTableChecks(BaseMigrationChecks): new_table_name = 'messages' def setup_upgrade_data(self, conn): pass def check_upgrade(self, conn, data): message_data = { 'id': uuidutils.generate_uuid(), 'project_id': 'x' * 255, 'request_id': 'x' * 255, 'resource_type': 'x' * 255, 'resource_id': 'y' * 36, 'action_id': 'y' * 10, 'detail_id': 'y' * 10, 'message_level': 'x' * 255, 'created_at': datetime.datetime(2017, 7, 10, 18, 5, 58), 'updated_at': None, 'deleted_at': None, 'deleted': 0, 'expires_at': datetime.datetime(2017, 7, 11, 18, 5, 58), } new_table = utils.load_table(self.new_table_name, conn) conn.execute(new_table.insert().values(message_data)) def check_downgrade(self, conn): self.test_case.assertRaises(sa_exc.NoSuchTableError, utils.load_table, 'messages', conn) @map_to_migration('b516de97bfee') class ProjectShareTypesQuotasChecks(BaseMigrationChecks): new_table_name = 'project_share_type_quotas' usages_table = 'quota_usages' reservations_table = 'reservations' st_record_id = uuidutils.generate_uuid() def setup_upgrade_data(self, conn): # Create share type self.st_data = { 'id': self.st_record_id, 'name': uuidutils.generate_uuid(), 'deleted': "False", } st_table = utils.load_table('share_types', conn) conn.execute(st_table.insert().values(self.st_data)) def check_upgrade(self, conn, data): # Create share type quota self.quota_data = { 'project_id': 'x' * 255, 'resource': 'y' * 255, 'hard_limit': 987654321, 'created_at': datetime.datetime(2017, 4, 11, 18, 5, 58), 'updated_at': None, 'deleted_at': None, 'deleted': 0, 'share_type_id': self.st_record_id, } new_table = utils.load_table(self.new_table_name, conn) conn.execute(new_table.insert().values(self.quota_data)) # Create usage record self.usages_data = { 'project_id': 'x' * 255, 'user_id': None, 'share_type_id': self.st_record_id, 'resource': 'y' * 255, 'in_use': 13, 'reserved': 15, } usages_table = utils.load_table(self.usages_table, conn) conn.execute(usages_table.insert().values(self.usages_data)) # Create reservation record self.reservations_data = { 'uuid': uuidutils.generate_uuid(), 'usage_id': 1, 'project_id': 'x' * 255, 'user_id': None, 'share_type_id': self.st_record_id, 'resource': 'y' * 255, 'delta': 13, 'expire': datetime.datetime(2399, 4, 11, 18, 5, 58), } reservations_table = utils.load_table(self.reservations_table, conn) conn.execute( reservations_table.insert().values(self.reservations_data)) def check_downgrade(self, conn): self.test_case.assertRaises( sa_exc.NoSuchTableError, utils.load_table, self.new_table_name, conn) for table_name in (self.usages_table, self.reservations_table): table = utils.load_table(table_name, conn) db_result = conn.execute(table.select()) self.test_case.assertGreater(db_result.rowcount, 0) for row in db_result: self.test_case.assertFalse(hasattr(row, 'share_type_id')) @map_to_migration('829a09b0ddd4') class FixProjectShareTypesQuotasUniqueConstraintChecks(BaseMigrationChecks): st_record_id = uuidutils.generate_uuid() def setup_upgrade_data(self, conn): # Create share type self.st_data = { 'id': self.st_record_id, 'name': uuidutils.generate_uuid(), 'deleted': "False", } st_table = utils.load_table('share_types', conn) conn.execute(st_table.insert().values(self.st_data)) def check_upgrade(self, conn, data): for project_id in ('x' * 255, 'x'): # Create share type quota self.quota_data = { 'project_id': project_id, 'resource': 'y' * 255, 'hard_limit': 987654321, 'created_at': datetime.datetime(2017, 4, 11, 18, 5, 58), 'updated_at': None, 'deleted_at': None, 'deleted': 0, 'share_type_id': self.st_record_id, } new_table = utils.load_table('project_share_type_quotas', conn) conn.execute(new_table.insert().values(self.quota_data)) def check_downgrade(self, conn): pass @map_to_migration('27cb96d991fa') class NewDescriptionColumnChecks(BaseMigrationChecks): st_table_name = 'share_types' st_ids = ['share_type_id_fake_3_%d' % i for i in (1, 2)] def setup_upgrade_data(self, conn): # Create share type share_type_data = { 'id': self.st_ids[0], 'name': 'name_1', } st_table = utils.load_table(self.st_table_name, conn) conn.execute(st_table.insert().values(share_type_data)) def check_upgrade(self, conn, data): st_table = utils.load_table(self.st_table_name, conn) for na in conn.execute(st_table.select()): self.test_case.assertTrue(hasattr(na, 'description')) share_type_data_ds = { 'id': self.st_ids[1], 'name': 'name_1', 'description': 'description_1', } conn.execute(st_table.insert().values(share_type_data_ds)) st = conn.execute(st_table.select().where( share_type_data_ds['id'] == st_table.c.id)).first() self.test_case.assertEqual( share_type_data_ds['description'], st._mapping['description']) def check_downgrade(self, conn): table = utils.load_table(self.st_table_name, conn) db_result = conn.execute(table.select()) for record in db_result: self.test_case.assertFalse(hasattr(record, 'description')) @map_to_migration('4a482571410f') class BackenInfoTableChecks(BaseMigrationChecks): new_table_name = 'backend_info' def setup_upgrade_data(self, conn): pass def check_upgrade(self, conn, data): data = { 'host': 'test_host', 'info_hash': 'test_hash', 'created_at': datetime.datetime(2017, 7, 10, 18, 5, 58), 'updated_at': None, 'deleted_at': None, 'deleted': 0, } new_table = utils.load_table(self.new_table_name, conn) conn.execute(new_table.insert().values(data)) def check_downgrade(self, conn): self.test_case.assertRaises(sa_exc.NoSuchTableError, utils.load_table, self.new_table_name, conn) @map_to_migration('579c267fbb4d') class ShareInstanceAccessMapTableChecks(BaseMigrationChecks): share_access_table = 'share_access_map' share_instance_access_table = 'share_instance_access_map' @staticmethod def generate_share_instance(share_id, **kwargs): share_instance_data = { 'id': uuidutils.generate_uuid(), 'deleted': 'False', 'host': 'fake', 'share_id': share_id, 'status': constants.STATUS_AVAILABLE, } share_instance_data.update(**kwargs) return share_instance_data @staticmethod def generate_share_access_map(share_id, **kwargs): share_access_data = { 'id': uuidutils.generate_uuid(), 'share_id': share_id, 'deleted': 'False', 'access_type': 'ip', 'access_to': '192.0.2.10', } share_access_data.update(**kwargs) return share_access_data def setup_upgrade_data(self, conn): share = { 'id': uuidutils.generate_uuid(), 'share_proto': 'fake', 'size': 1, 'snapshot_id': None, 'user_id': 'fake', 'project_id': 'fake' } share_table = utils.load_table('shares', conn) conn.execute(share_table.insert().values(share)) share_instances = [ self.generate_share_instance(share['id']), self.generate_share_instance(share['id']), ] share_instance_table = utils.load_table('share_instances', conn) for share_instance in share_instances: conn.execute(share_instance_table.insert().values(share_instance)) share_accesses = [ self.generate_share_access_map( share['id'], state=constants.ACCESS_STATE_ACTIVE), self.generate_share_access_map( share['id'], state=constants.ACCESS_STATE_ERROR), ] self.active_share_access = share_accesses[0] self.error_share_access = share_accesses[1] share_access_table = utils.load_table('share_access_map', conn) conn.execute(share_access_table.insert().values(share_accesses)) def check_upgrade(self, conn, data): share_access_table = utils.load_table( self.share_access_table, conn) share_instance_access_table = utils.load_table( self.share_instance_access_table, conn) share_accesses = conn.execute(share_access_table.select()) share_instance_accesses = conn.execute( share_instance_access_table.select()) for share_access in share_accesses: self.test_case.assertFalse(hasattr(share_access, 'state')) for si_access in share_instance_accesses: if si_access._mapping['access_id'] in ( self.active_share_access['id'], self.error_share_access['id'] ): self.test_case.assertIn(si_access._mapping['state'], (self.active_share_access['state'], self.error_share_access['state'])) def check_downgrade(self, conn): self.test_case.assertRaises( sa_exc.NoSuchTableError, utils.load_table, self.share_instance_access_table, conn) share_access_table = utils.load_table( self.share_access_table, conn) share_accesses = conn.execute(share_access_table.select().where( share_access_table.c.id.in_((self.active_share_access['id'], self.error_share_access['id'])))) for share_access in share_accesses: self.test_case.assertTrue(hasattr(share_access, 'state')) if share_access._mapping['id'] == self.active_share_access['id']: self.test_case.assertEqual( constants.ACCESS_STATE_ACTIVE, share_access._mapping['state']) elif share_access._mapping['id'] == self.error_share_access['id']: self.test_case.assertEqual( constants.ACCESS_STATE_ERROR, share_access._mapping['state']) @map_to_migration('097fad24d2fc') class ShareInstancesShareIdIndexChecks(BaseMigrationChecks): def setup_upgrade_data(self, conn): pass def _get_share_instances_share_id_index(self, conn): share_instances_table = utils.load_table('share_instances', conn) for idx in share_instances_table.indexes: if idx.name == 'share_instances_share_id_idx': return idx def check_upgrade(self, conn, data): self.test_case.assertTrue( self._get_share_instances_share_id_index(conn)) def check_downgrade(self, conn): self.test_case.assertFalse( self._get_share_instances_share_id_index(conn)) @map_to_migration('11ee96se625f3') class AccessMetadataTableChecks(BaseMigrationChecks): new_table_name = 'share_access_rules_metadata' record_access_id = uuidutils.generate_uuid() def setup_upgrade_data(self, conn): share_data = { 'id': uuidutils.generate_uuid(), 'share_proto': "NFS", 'size': 1, 'snapshot_id': None, 'user_id': 'fake', 'project_id': 'fake' } share_table = utils.load_table('shares', conn) conn.execute(share_table.insert().values(share_data)) share_instance_data = { 'id': uuidutils.generate_uuid(), 'deleted': 'False', 'host': 'fake', 'share_id': share_data['id'], 'status': 'available', 'access_rules_status': 'active', 'cast_rules_to_readonly': False, } share_instance_table = utils.load_table('share_instances', conn) conn.execute(share_instance_table.insert().values(share_instance_data)) share_access_data = { 'id': self.record_access_id, 'share_id': share_data['id'], 'access_type': 'NFS', 'access_to': '10.0.0.1', 'deleted': 'False' } share_access_table = utils.load_table('share_access_map', conn) conn.execute(share_access_table.insert().values(share_access_data)) share_instance_access_data = { 'id': uuidutils.generate_uuid(), 'share_instance_id': share_instance_data['id'], 'access_id': share_access_data['id'], 'deleted': 'False' } share_instance_access_table = utils.load_table( 'share_instance_access_map', conn) conn.execute(share_instance_access_table.insert().values( share_instance_access_data)) def check_upgrade(self, conn, data): data = { 'id': 1, 'key': 't' * 255, 'value': 'v' * 1023, 'access_id': self.record_access_id, 'created_at': datetime.datetime(2017, 7, 10, 18, 5, 58), 'updated_at': None, 'deleted_at': None, 'deleted': 'False', } new_table = utils.load_table(self.new_table_name, conn) conn.execute(new_table.insert().values(data)) def check_downgrade(self, conn): self.test_case.assertRaises(sa_exc.NoSuchTableError, utils.load_table, self.new_table_name, conn) @map_to_migration('6a3fd2984bc31') class ShareServerIsAutoDeletableAndIdentifierChecks(BaseMigrationChecks): def setup_upgrade_data(self, conn): user_id = 'user_id' project_id = 'project_id' # Create share network share_network_data = { 'id': 'fake_sn_id', 'user_id': user_id, 'project_id': project_id, } sn_table = utils.load_table('share_networks', conn) conn.execute(sn_table.insert().values(share_network_data)) # Create share server share_server_data = { 'id': 'fake_ss_id', 'share_network_id': share_network_data['id'], 'host': 'fake_host', 'status': 'active', } ss_table = utils.load_table('share_servers', conn) conn.execute(ss_table.insert().values(share_server_data)) def check_upgrade(self, conn, data): ss_table = utils.load_table('share_servers', conn) for ss in conn.execute(ss_table.select()): self.test_case.assertTrue(hasattr(ss, 'is_auto_deletable')) self.test_case.assertEqual(1, ss.is_auto_deletable) self.test_case.assertTrue(hasattr(ss, 'identifier')) self.test_case.assertEqual(ss.id, ss.identifier) def check_downgrade(self, conn): ss_table = utils.load_table('share_servers', conn) for ss in conn.execute(ss_table.select()): self.test_case.assertFalse(hasattr(ss, 'is_auto_deletable')) self.test_case.assertFalse(hasattr(ss, 'identifier')) @map_to_migration('805685098bd2') class ShareNetworkSubnetMigrationChecks(BaseMigrationChecks): user_id = '6VFQ87wnV24lg1c2q1q0lJkTbQBPFZ1m4968' project_id = '19HAW8w58yeUPBy8zGex4EGulWZHd8zZGtHk' share_network = { 'id': uuidutils.generate_uuid(), 'user_id': user_id, 'project_id': project_id, 'neutron_net_id': uuidutils.generate_uuid(), 'neutron_subnet_id': uuidutils.generate_uuid(), 'cidr': '203.0.113.0/24', 'ip_version': 4, 'network_type': 'vxlan', 'segmentation_id': 100, 'gateway': 'fake_gateway', 'mtu': 1500, } share_networks = [share_network] sns_table_name = 'share_network_subnets' sn_table_name = 'share_networks' ss_table_name = 'share_servers' expected_keys = ['neutron_net_id', 'neutron_subnet_id', 'cidr', 'ip_version', 'network_type', 'segmentation_id', 'gateway', 'mtu'] def _setup_data_for_empty_neutron_net_and_subnet_id_test(self, network): network['id'] = uuidutils.generate_uuid() for key in self.expected_keys: network[key] = None return network def setup_upgrade_data(self, conn): share_network_data_without_net_info = ( self._setup_data_for_empty_neutron_net_and_subnet_id_test( copy.deepcopy(self.share_network))) self.share_networks.append(share_network_data_without_net_info) # Load the table to be used below sn_table = utils.load_table(self.sn_table_name, conn) ss_table = utils.load_table(self.ss_table_name, conn) # Share server data share_server_data = { 'host': 'acme@controller-ostk-0', 'status': 'active', } # Create share share networks and one share server for each of them for network in self.share_networks: share_server_data['share_network_id'] = network['id'] share_server_data['id'] = uuidutils.generate_uuid() conn.execute(sn_table.insert().values(network)) conn.execute(ss_table.insert().values(share_server_data)) def check_upgrade(self, conn, data): # Load the necessary tables sn_table = utils.load_table(self.sn_table_name, conn) sns_table = utils.load_table(self.sns_table_name, conn) ss_table = utils.load_table(self.ss_table_name, conn) for network in self.share_networks: sn_record = conn.execute(sn_table.select().where( sn_table.c.id == network['id'])).first() for key in self.expected_keys: self.test_case.assertFalse(hasattr(sn_record, key)) sns_record = conn.execute(sns_table.select().where( sns_table.c.share_network_id == network['id'])).first() for key in self.expected_keys: self.test_case.assertTrue(hasattr(sns_record, key)) self.test_case.assertEqual( network[key], sns_record._mapping[key]) ss_record = ( conn.execute( ss_table.select().where( ss_table.c.share_network_subnet_id == sns_record._mapping['id']) ).first()) self.test_case.assertIs( True, hasattr(ss_record, 'share_network_subnet_id')) self.test_case.assertEqual( ss_record._mapping['share_network_subnet_id'], sns_record._mapping['id'] ) self.test_case.assertIs( False, hasattr(ss_record, 'share_network_id')) def check_downgrade(self, conn): sn_table = utils.load_table(self.sn_table_name, conn) # Check if the share network table contains the expected keys for sn in conn.execute(sn_table.select()): for key in self.expected_keys: self.test_case.assertTrue(hasattr(sn, key)) ss_table = utils.load_table(self.ss_table_name, conn) for network in self.share_networks: for ss in conn.execute(ss_table.select().where( ss_table.c.share_network_id == network['id'])): self.test_case.assertFalse(hasattr(ss, 'share_network_subnet_id')) self.test_case.assertTrue(hasattr(ss, 'share_network_id')) self.test_case.assertEqual(network['id'], ss['id']) # Check if the created table doesn't exists anymore self.test_case.assertRaises( sa_exc.NoSuchTableError, utils.load_table, self.sns_table_name, conn) @map_to_migration('e6d88547b381') class ShareInstanceProgressFieldChecks(BaseMigrationChecks): si_table_name = 'share_instances' progress_field_name = 'progress' def setup_upgrade_data(self, conn): pass def check_upgrade(self, conn, data): si_table = utils.load_table(self.si_table_name, conn) for si_record in conn.execute(si_table.select()): self.test_case.assertTrue(hasattr(si_record, self.progress_field_name)) if si_record._mapping['status'] == constants.STATUS_AVAILABLE: self.test_case.assertEqual( '100%', si_record._mapping[self.progress_field_name] ) else: self.test_case.assertIsNone( si_record._mapping[self.progress_field_name]) def check_downgrade(self, conn): si_table = utils.load_table(self.si_table_name, conn) for si_record in conn.execute(si_table.select()): self.test_case.assertFalse(hasattr(si_record, self.progress_field_name)) @map_to_migration('5aa813ae673d') class ShareServerTaskState(BaseMigrationChecks): def setup_upgrade_data(self, conn): # Create share server share_server_data = { 'id': uuidutils.generate_uuid(), 'host': 'fake_host', 'status': 'active', } ss_table = utils.load_table('share_servers', conn) conn.execute(ss_table.insert().values(share_server_data)) def check_upgrade(self, conn, data): ss_table = utils.load_table('share_servers', conn) for ss in conn.execute(ss_table.select()): self.test_case.assertTrue(hasattr(ss, 'task_state')) self.test_case.assertTrue(hasattr(ss, 'source_share_server_id')) self.test_case.assertIsNone(ss._mapping['task_state']) self.test_case.assertIsNone(ss._mapping['source_share_server_id']) def check_downgrade(self, conn): ss_table = utils.load_table('share_servers', conn) for ss in conn.execute(ss_table.select()): self.test_case.assertFalse(hasattr(ss, 'task_state')) self.test_case.assertFalse(hasattr(ss, 'source_share_server_id')) @map_to_migration('478c445d8d3e') class AddUpdateSecurityServiceControlFields(BaseMigrationChecks): def setup_upgrade_data(self, conn): user_id = 'user_id' project_id = 'project_id' # Create share network share_network_data = { 'id': uuidutils.generate_uuid(), 'user_id': user_id, 'project_id': project_id, } sn_table = utils.load_table('share_networks', conn) conn.execute(sn_table.insert().values(share_network_data)) share_network_subnet_data = { 'id': uuidutils.generate_uuid(), 'share_network_id': share_network_data['id'] } sns_table = utils.load_table('share_network_subnets', conn) conn.execute(sns_table.insert().values(share_network_subnet_data)) # Create share server share_server_data = { 'id': uuidutils.generate_uuid(), 'share_network_subnet_id': share_network_subnet_data['id'], 'host': 'fake_host', 'status': 'active', } ss_table = utils.load_table('share_servers', conn) conn.execute(ss_table.insert().values(share_server_data)) def check_upgrade(self, conn, data): ss_table = utils.load_table('share_servers', conn) for ss in conn.execute(ss_table.select()): self.test_case.assertTrue( hasattr(ss, 'security_service_update_support')) self.test_case.assertEqual( False, ss.security_service_update_support) sn_table = utils.load_table('share_networks', conn) for sn in conn.execute(sn_table.select()): self.test_case.assertTrue(hasattr(sn, 'status')) self.test_case.assertEqual(constants.STATUS_NETWORK_ACTIVE, sn.status) async_op_data = { 'created_at': datetime.datetime(2021, 3, 12, 17, 40, 34), 'updated_at': None, 'deleted_at': None, 'deleted': 0, 'entity_uuid': uuidutils.generate_uuid(), 'key': 't' * 255, 'value': 'v' * 1023, } async_op_data_table = utils.load_table('async_operation_data', conn) conn.execute(async_op_data_table.insert().values(async_op_data)) def check_downgrade(self, conn): ss_table = utils.load_table('share_servers', conn) for ss in conn.execute(ss_table.select()): self.test_case.assertFalse( hasattr(ss, 'security_service_update_support')) sn_table = utils.load_table('share_networks', conn) for sn in conn.execute(sn_table.select()): self.test_case.assertFalse(hasattr(sn, 'status')) self.test_case.assertRaises( sa_exc.NoSuchTableError, utils.load_table, 'async_operation_data', conn) @map_to_migration('1946cb97bb8d') class ShareIsSoftDeleted(BaseMigrationChecks): def setup_upgrade_data(self, conn): # Setup shares share_fixture = [{'id': 'foo_share_id1'}, {'id': 'bar_share_id1'}] share_table = utils.load_table('shares', conn) for fixture in share_fixture: conn.execute(share_table.insert().values(fixture)) # Setup share instances si_fixture = [ {'id': 'foo_share_instance_id_oof1', 'share_id': share_fixture[0]['id'], 'cast_rules_to_readonly': False}, {'id': 'bar_share_instance_id_rab1', 'share_id': share_fixture[1]['id'], 'cast_rules_to_readonly': False}, ] si_table = utils.load_table('share_instances', conn) for fixture in si_fixture: conn.execute(si_table.insert().values(fixture)) def check_upgrade(self, conn, data): s_table = utils.load_table('shares', conn) for s in conn.execute(s_table.select()): self.test_case.assertTrue(hasattr(s, 'is_soft_deleted')) self.test_case.assertTrue(hasattr(s, 'scheduled_to_be_deleted_at')) self.test_case.assertIn(s._mapping['is_soft_deleted'], (0, False)) self.test_case.assertIsNone( s._mapping['scheduled_to_be_deleted_at']) def check_downgrade(self, conn): s_table = utils.load_table('shares', conn) for s in conn.execute(s_table.select()): self.test_case.assertFalse(hasattr(s, 'is_soft_deleted')) self.test_case.assertFalse(hasattr(s, 'scheduled_to_be_deleted_at')) @map_to_migration('a87e0fb17dee') class ShareServerMultipleSubnets(BaseMigrationChecks): def setup_upgrade_data(self, conn): user_id = 'user_id_multiple_subnets' project_id = 'project_id_multiple_subnets' # Create share network share_network_data = { 'id': uuidutils.generate_uuid(), 'user_id': user_id, 'project_id': project_id, } sn_table = utils.load_table('share_networks', conn) conn.execute(sn_table.insert().values(share_network_data)) # Create share network subnets share_network_subnet_data = { 'id': uuidutils.generate_uuid(), 'share_network_id': share_network_data['id'] } sns_table = utils.load_table('share_network_subnets', conn) conn.execute(sns_table.insert().values(share_network_subnet_data)) # Create share server share_server_data = { 'id': uuidutils.generate_uuid(), 'host': 'fake_host', 'status': 'active', 'share_network_subnet_id': share_network_subnet_data['id'], } ss_table = utils.load_table('share_servers', conn) conn.execute(ss_table.insert().values(share_server_data)) def check_upgrade(self, conn, data): ss_sns_map_table = utils.load_table( 'share_server_share_network_subnet_mappings', conn) ss_table = utils.load_table('share_servers', conn) sns_table = utils.load_table('share_network_subnets', conn) na_table = utils.load_table('network_allocations', conn) na_record = conn.execute(na_table.select()).first() self.test_case.assertFalse(na_record is None) self.test_case.assertTrue( hasattr(na_record, 'share_network_subnet_id')) for map_record in conn.execute(ss_sns_map_table.select()): self.test_case.assertTrue( hasattr(map_record, 'share_network_subnet_id')) self.test_case.assertTrue( hasattr(map_record, 'share_server_id')) ss_record = conn.execute( ss_table .select() .where(ss_table.c.id == map_record._mapping['share_server_id']) ).first() self.test_case.assertFalse(ss_record is None) self.test_case.assertFalse( hasattr(ss_record, 'share_network_subnet_id')) self.test_case.assertTrue( hasattr(ss_record, 'network_allocation_update_support')) sns_record = conn.execute( sns_table .select() .where(sns_table.c.id == map_record._mapping['share_network_subnet_id']) ).first() self.test_case.assertFalse(sns_record is None) def check_downgrade(self, conn): ss_table = utils.load_table('share_servers', conn) na_table = utils.load_table('network_allocations', conn) self.test_case.assertRaises( sa_exc.NoSuchTableError, utils.load_table, 'share_server_share_network_subnet_mappings', conn) for ss_record in conn.execute(ss_table.select()): self.test_case.assertTrue( hasattr(ss_record, 'share_network_subnet_id')) self.test_case.assertFalse( hasattr(ss_record, 'network_allocation_update_support')) na_record = conn.execute( na_table .select() ).first() self.test_case.assertFalse( hasattr(na_record, 'share_network_subnet_id')) @map_to_migration('bb5938d74b73') class AddSnapshotMetadata(BaseMigrationChecks): snapshot_id = uuidutils.generate_uuid() new_table_name = 'share_snapshot_metadata' def setup_upgrade_data(self, conn): # Setup Share share_data = { 'id': uuidutils.generate_uuid(), 'share_proto': "NFS", 'size': 1, 'snapshot_id': None, 'user_id': 'fake', 'project_id': 'fake' } share_table = utils.load_table('shares', conn) conn.execute(share_table.insert().values(share_data)) share_instance_data = { 'id': uuidutils.generate_uuid(), 'deleted': 'False', 'host': 'fake', 'share_id': share_data['id'], 'status': 'available', 'access_rules_status': 'active', 'cast_rules_to_readonly': False, } share_instance_table = utils.load_table('share_instances', conn) conn.execute(share_instance_table.insert().values(share_instance_data)) # Setup Share Snapshot share_snapshot_data = { 'id': self.snapshot_id, 'share_id': share_data['id'] } snapshot_table = utils.load_table('share_snapshots', conn) conn.execute(snapshot_table.insert().values(share_snapshot_data)) # Setup snapshot instances snapshot_instance_data = { 'id': uuidutils.generate_uuid(), 'snapshot_id': share_snapshot_data['id'], 'share_instance_id': share_instance_data['id'] } snap_i_table = utils.load_table('share_snapshot_instances', conn) conn.execute(snap_i_table.insert().values(snapshot_instance_data)) def check_upgrade(self, conn, data): data = { 'id': 1, 'key': 't' * 255, 'value': 'v' * 1023, 'share_snapshot_id': self.snapshot_id, 'deleted': 'False', } new_table = utils.load_table(self.new_table_name, conn) conn.execute(new_table.insert().values(data)) item = conn.execute( new_table.select().where(new_table.c.id == data['id'])).first() self.test_case.assertTrue(hasattr(item, 'id')) self.test_case.assertEqual(data['id'], item._mapping['id']) self.test_case.assertTrue(hasattr(item, 'key')) self.test_case.assertEqual(data['key'], item._mapping['key']) self.test_case.assertTrue(hasattr(item, 'value')) self.test_case.assertEqual(data['value'], item._mapping['value']) self.test_case.assertTrue(hasattr(item, 'share_snapshot_id')) self.test_case.assertEqual(self.snapshot_id, item._mapping['share_snapshot_id']) self.test_case.assertTrue(hasattr(item, 'deleted')) self.test_case.assertEqual('False', item._mapping['deleted']) def check_downgrade(self, conn): self.test_case.assertRaises(sa_exc.NoSuchTableError, utils.load_table, self.new_table_name, conn) @map_to_migration('ac0620cbe74d') class AddSubnetMetadata(BaseMigrationChecks): share_subnet_id = uuidutils.generate_uuid() new_table_name = 'share_network_subnet_metadata' def setup_upgrade_data(self, conn): # Setup Share network. share_network_data = { 'id': uuidutils.generate_uuid(), 'user_id': 'fake', 'project_id': 'fake' } network_table = utils.load_table('share_networks', conn) conn.execute(network_table.insert().values(share_network_data)) # Setup share network subnet. share_network_subnet_data = { 'id': self.share_subnet_id, 'share_network_id': share_network_data['id'] } network_table = utils.load_table('share_network_subnets', conn) conn.execute(network_table.insert().values(share_network_subnet_data)) def check_upgrade(self, conn, data): data = { 'id': 1, 'key': 't' * 255, 'value': 'v' * 1023, 'share_network_subnet_id': self.share_subnet_id, 'deleted': 'False', } new_table = utils.load_table(self.new_table_name, conn) conn.execute(new_table.insert().values(data)) item = conn.execute( new_table.select().where(new_table.c.id == data['id'])).first() self.test_case.assertTrue(hasattr(item, 'id')) self.test_case.assertEqual(data['id'], item._mapping['id']) self.test_case.assertTrue(hasattr(item, 'key')) self.test_case.assertEqual(data['key'], item._mapping['key']) self.test_case.assertTrue(hasattr(item, 'value')) self.test_case.assertEqual(data['value'], item._mapping['value']) self.test_case.assertTrue(hasattr(item, 'share_network_subnet_id')) self.test_case.assertEqual(self.share_subnet_id, item._mapping['share_network_subnet_id']) self.test_case.assertTrue(hasattr(item, 'deleted')) self.test_case.assertEqual('False', item._mapping['deleted']) def check_downgrade(self, conn): self.test_case.assertRaises(sa_exc.NoSuchTableError, utils.load_table, self.new_table_name, conn) @map_to_migration('aebe2a413e13') class AddServiceState(BaseMigrationChecks): def _get_service_data(self, options): base_dict = { 'binary': 'manila-share', 'topic': 'share', 'disabled': False, 'report_count': '100', } base_dict.update(options) return base_dict def setup_upgrade_data(self, conn): service_fixture = [ self._get_service_data({'host': 'fake1'}), self._get_service_data({'host': 'fake2'}), ] services_table = utils.load_table('services', conn) for fixture in service_fixture: conn.execute(services_table.insert().values(fixture)) def check_upgrade(self, conn, data): s_table = utils.load_table('services', conn) for s in conn.execute(s_table.select()): self.test_case.assertTrue(hasattr(s, 'state')) def check_downgrade(self, conn): s_table = utils.load_table('services', conn) for s in conn.execute(s_table.select()): self.test_case.assertFalse(hasattr(s, 'state')) @map_to_migration('cb20f743ca7b') class AddResourceLocks(BaseMigrationChecks): def setup_upgrade_data(self, conn): pass def check_upgrade(self, conn, data): lock_data = { 'id': uuidutils.generate_uuid(), 'project_id': uuidutils.generate_uuid(dashed=False), 'user_id': uuidutils.generate_uuid(dashed=False), 'resource_id': uuidutils.generate_uuid(), 'created_at': datetime.datetime(2023, 7, 18, 12, 6, 30), 'updated_at': None, 'deleted_at': None, 'deleted': 'False', 'resource_type': 'share', 'resource_action': 'delete', 'lock_reason': 'xyzzy' * 200, 'lock_context': 'user', } locks_table = utils.load_table('resource_locks', conn) conn.execute(locks_table.insert().values(lock_data)) def check_downgrade(self, conn): self.test_case.assertRaises(sa_exc.NoSuchTableError, utils.load_table, 'resource_locks', conn) @map_to_migration('99d328f0a3d2') class ServiceDisabledReason(BaseMigrationChecks): def _get_service_data(self, options): base_dict = { 'binary': 'manila-share', 'topic': 'share', 'disabled': False, 'report_count': '100', } base_dict.update(options) return base_dict def setup_upgrade_data(self, conn): service_fixture = [ self._get_service_data({'host': 'fake1'}), self._get_service_data({'host': 'fake2'}), ] services_table = utils.load_table('services', conn) for fixture in service_fixture: conn.execute(services_table.insert().values(fixture)) def check_upgrade(self, conn, data): service_table = utils.load_table('services', conn) for s in conn.execute(service_table.select()): self.test_case.assertTrue(hasattr(s, 'disabled_reason')) def check_downgrade(self, conn): service_table = utils.load_table('services', conn) for s in conn.execute(service_table.select()): self.test_case.assertFalse(hasattr(s, 'disabled_reason')) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/db/migrations/alembic/test_migration.py0000664000175000017500000001756100000000000025232 0ustar00zuulzuul00000000000000# Copyright 2010-2011 OpenStack, LLC # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests for database migrations. """ from unittest import mock from alembic import script from oslo_db.sqlalchemy import enginefacade from oslo_db.sqlalchemy import test_fixtures from oslo_log import log from oslotest import base as test_base from sqlalchemy.sql import text from manila.db.migrations.alembic import migration from manila.tests.db.migrations.alembic import migrations_data_checks from manila.tests import utils as test_utils LOG = log.getLogger('manila.tests.test_migrations') class ManilaMigrationsCheckers(migrations_data_checks.DbMigrationsData): """Test alembic migrations.""" def setUp(self): super().setUp() self.engine = enginefacade.writer.get_engine() @property def snake_walk(self): return True @property def downgrade(self): return True @property def INIT_VERSION(self): pass @property def REPOSITORY(self): pass @property def migration_api(self): return migration @property def migrate_engine(self): return self.engine def _walk_versions(self, snake_walk=False, downgrade=True): # Determine latest version script from the repo, then # upgrade from 1 through to the latest, with no data # in the databases. This just checks that the schema itself # upgrades successfully. # Place the database under version control alembic_cfg = migration._alembic_config() script_directory = script.ScriptDirectory.from_config(alembic_cfg) self.assertIsNone(self.migration_api.version()) versions = [ver for ver in script_directory.walk_revisions()] LOG.debug('latest version is %s', versions[0].revision) for version in reversed(versions): self._migrate_up(version.revision, with_data=True) if snake_walk: downgraded = self._migrate_down( version, with_data=True) if downgraded: self._migrate_up(version.revision) if downgrade: for version in versions: downgraded = self._migrate_down(version) if snake_walk and downgraded: self._migrate_up(version.revision) self._migrate_down(version) def _migrate_down(self, version, with_data=False): try: self.migration_api.downgrade(version.down_revision) except NotImplementedError: # NOTE(sirp): some migrations, namely release-level # migrations, don't support a downgrade. return False self.assertEqual(version.down_revision, self.migration_api.version()) if with_data: post_downgrade = getattr( self, "_post_downgrade_%s" % version.revision, None) if post_downgrade: with self.engine.begin() as conn: post_downgrade(conn) return True def _migrate_up(self, version, with_data=False): """migrate up to a new version of the db. We allow for data insertion and post checks at every migration version with special _pre_upgrade_### and _check_### functions in the main test. """ # NOTE(sdague): try block is here because it's impossible to debug # where a failed data migration happens otherwise try: if with_data: data = None pre_upgrade = getattr( self, "_pre_upgrade_%s" % version, None) if pre_upgrade: with self.engine.begin() as conn: data = pre_upgrade(conn) self.migration_api.upgrade(version) self.assertEqual(version, self.migration_api.version()) if with_data: check = getattr(self, "_check_%s" % version, None) if check: with self.engine.begin() as conn: check(conn, data) except Exception as e: LOG.error("Failed to migrate to version %(version)s on engine " "%(engine)s. Exception while running the migration: " "%(exception)s", {'version': version, 'engine': self.engine, 'exception': e}) raise # NOTE(vponomaryov): set 12 minutes timeout for case of running it on # very slow nodes/VMs. Note, that this test becomes slower with each # addition of new DB migration. On fast nodes it can take about 5-10 secs # having Mitaka set of migrations. # 'pymysql' works much slower on slow nodes than 'psycopg2'. And such # timeout mostly required for testing of 'mysql' backend. @test_utils.set_timeout(720) def test_walk_versions(self): """Walks all version scripts for each tested database. While walking, ensure that there are no errors in the version scripts for each engine. """ with mock.patch('manila.db.sqlalchemy.api.get_engine', return_value=self.engine): self._walk_versions(snake_walk=self.snake_walk, downgrade=self.downgrade) def test_single_branch(self): alembic_cfg = migration._alembic_config() script_directory = script.ScriptDirectory.from_config(alembic_cfg) actual_result = script_directory.get_heads() self.assertEqual(1, len(actual_result), "Db migrations should have only one branch.") class TestManilaMigrationsMySQL( ManilaMigrationsCheckers, test_fixtures.OpportunisticDBTestMixin, test_base.BaseTestCase, ): """Run migration tests on MySQL backend.""" FIXTURE = test_fixtures.MySQLOpportunisticFixture @test_utils.set_timeout(300) def test_mysql_innodb(self): """Test that table creation on mysql only builds InnoDB tables.""" with mock.patch('manila.db.sqlalchemy.api.get_engine', return_value=self.engine): self._walk_versions(snake_walk=False, downgrade=False) with self.engine.begin() as conn: # sanity check sanity_check = """SELECT count(*) FROM information_schema.tables WHERE table_schema = :database;""" total = conn.execute( text(sanity_check), {"database": self.engine.url.database}) self.assertGreater( total.scalar(), 0, "No tables found. Wrong schema?") noninnodb_query = """ SELECT count(*) FROM information_schema.TABLES WHERE table_schema = :database AND engine != 'InnoDB' AND table_name != 'alembic_version';""" count = conn.execute( text(noninnodb_query), {"database": self.engine.url.database} ).scalar() self.assertEqual(0, count, "%d non InnoDB tables created" % count) class TestManilaMigrationsPostgreSQL( ManilaMigrationsCheckers, test_fixtures.OpportunisticDBTestMixin, test_base.BaseTestCase, ): """Run migration tests on PostgreSQL backend.""" FIXTURE = test_fixtures.PostgresqlOpportunisticFixture ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/db/migrations/test_utils.py0000664000175000017500000000176400000000000023003 0ustar00zuulzuul00000000000000# Copyright 2015 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from manila.db.migrations import utils from manila.db.sqlalchemy import api from manila import test class MigrationUtilsTestCase(test.TestCase): def test_load_table(self): connection = api.get_engine() table_name = 'shares' actual_result = utils.load_table(table_name, connection) self.assertIsNotNone(actual_result) self.assertEqual(table_name, actual_result.name) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315601.973671 manila-21.0.0/manila/tests/db/sqlalchemy/0000775000175000017500000000000000000000000020210 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/db/sqlalchemy/__init__.py0000664000175000017500000000000000000000000022307 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/db/sqlalchemy/test_api.py0000664000175000017500000074512700000000000022412 0ustar00zuulzuul00000000000000# Copyright 2013 OpenStack Foundation # Copyright (c) 2014 NetApp, Inc. # Copyright (c) 2015 Rushil Chugh # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Testing of SQLAlchemy backend.""" import copy import datetime import random from unittest import mock import ddt from oslo_db import exception as db_exception from oslo_utils import timeutils from oslo_utils import uuidutils from manila.common import constants from manila import context from manila.db.sqlalchemy import api as db_api from manila.db.sqlalchemy import models from manila import exception from manila import quota from manila import test from manila.tests import db_utils QUOTAS = quota.QUOTAS security_service_dict = { 'id': 'fake id', 'project_id': 'fake project', 'type': 'ldap', 'dns_ip': 'fake dns', 'server': 'fake ldap server', 'domain': 'fake ldap domain', 'default_ad_site': 'fake ldap default_ad_site', 'ou': 'fake ldap ou', 'user': 'fake user', 'password': 'fake password', 'name': 'whatever', 'description': 'nevermind', } class BaseDatabaseAPITestCase(test.TestCase): def _check_fields(self, expected, actual): for key in expected: self.assertEqual(expected[key], actual[key]) @ddt.ddt class GenericDatabaseAPITestCase(test.TestCase): def setUp(self): """Run before each test.""" super(GenericDatabaseAPITestCase, self).setUp() self.ctxt = context.get_admin_context() @ddt.unpack @ddt.data( {'values': {'test': 'fake'}, 'call_count': 1}, {'values': {'test': 'fake', 'id': 'fake'}, 'call_count': 0}, {'values': {'test': 'fake', 'fooid': 'fake'}, 'call_count': 1}, {'values': {'test': 'fake', 'idfoo': 'fake'}, 'call_count': 1}, ) def test_ensure_model_values_has_id(self, values, call_count): self.mock_object(uuidutils, 'generate_uuid') db_api.ensure_model_dict_has_id(values) self.assertEqual(call_count, uuidutils.generate_uuid.call_count) self.assertIn('id', values) def test_custom_query(self): share = db_utils.create_share() share_access = db_utils.create_access(share_id=share['id']) db_api.share_instance_access_delete( self.ctxt, share_access.instance_mappings[0].id) self.assertRaises(exception.NotFound, db_api.share_access_get, self.ctxt, share_access.id) @ddt.ddt class ShareAccessDatabaseAPITestCase(test.TestCase): def setUp(self): """Run before each test.""" super(ShareAccessDatabaseAPITestCase, self).setUp() self.ctxt = context.get_admin_context() @ddt.data(0, 3) def test_share_access_get_all_for_share(self, len_rules): share = db_utils.create_share() rules = [db_utils.create_access(share_id=share['id']) for i in range(0, len_rules)] rule_ids = [r['id'] for r in rules] result = db_api.share_access_get_all_for_share(self.ctxt, share['id']) self.assertEqual(len_rules, len(result)) result_ids = [r['id'] for r in result] self.assertEqual(rule_ids, result_ids) def test_share_access_get_all_for_share_no_instance_mappings(self): share = db_utils.create_share() share_instance = share['instance'] rule = db_utils.create_access(share_id=share['id']) # Mark instance mapping soft deleted db_api.share_instance_access_update( self.ctxt, rule['id'], share_instance['id'], {'deleted': "True"}) result = db_api.share_access_get_all_for_share(self.ctxt, share['id']) self.assertEqual([], result) def test_share_instance_access_update(self): share = db_utils.create_share() access = db_utils.create_access(share_id=share['id']) instance_access_mapping = db_api.share_instance_access_get( self.ctxt, access['id'], share.instance['id']) self.assertEqual(constants.ACCESS_STATE_QUEUED_TO_APPLY, access['state']) self.assertIsNone(access['access_key']) db_api.share_instance_access_update( self.ctxt, access['id'], share.instance['id'], {'state': constants.STATUS_ERROR, 'access_key': 'watson4heisman'}) instance_access_mapping = db_api.share_instance_access_get( self.ctxt, access['id'], share.instance['id']) access = db_api.share_access_get(self.ctxt, access['id']) self.assertEqual(constants.STATUS_ERROR, instance_access_mapping['state']) self.assertEqual('watson4heisman', access['access_key']) self.assertIsNotNone(access['updated_at']) time_now = timeutils.utcnow() self.assertTrue(access['updated_at'] < time_now) self.assertTrue(instance_access_mapping['updated_at'] < time_now) @ddt.data(True, False) def test_share_access_get_all_for_instance_with_share_access_data( self, with_share_access_data): share = db_utils.create_share() access_1 = db_utils.create_access(share_id=share['id']) access_2 = db_utils.create_access(share_id=share['id']) share_access_keys = ('access_to', 'access_type', 'access_level', 'share_id') rules = db_api.share_access_get_all_for_instance( self.ctxt, share.instance['id'], with_share_access_data=with_share_access_data) share_access_keys_present = True if with_share_access_data else False actual_access_ids = [r['access_id'] for r in rules] self.assertIsInstance(actual_access_ids, list) expected = [access_1['id'], access_2['id']] self.assertEqual(len(expected), len(actual_access_ids)) for pool in expected: self.assertIn(pool, actual_access_ids) for rule in rules: for key in share_access_keys: self.assertEqual(share_access_keys_present, key in rule) self.assertIn('state', rule) def test_share_access_get_all_for_instance_with_filters(self): share = db_utils.create_share() new_share_instance = db_utils.create_share_instance( share_id=share['id']) access_1 = db_utils.create_access(share_id=share['id']) access_2 = db_utils.create_access(share_id=share['id']) share_access_keys = ('access_to', 'access_type', 'access_level', 'share_id') db_api.share_instance_access_update( self.ctxt, access_1['id'], new_share_instance['id'], {'state': constants.STATUS_ACTIVE}) rules = db_api.share_access_get_all_for_instance( self.ctxt, new_share_instance['id'], filters={'state': constants.ACCESS_STATE_QUEUED_TO_APPLY}) self.assertEqual(1, len(rules)) self.assertEqual(access_2['id'], rules[0]['access_id']) for rule in rules: for key in share_access_keys: self.assertIn(key, rule) def test_share_instance_access_delete(self): share = db_utils.create_share() access = db_utils.create_access(share_id=share['id'], metadata={'key1': 'v1'}) instance_access_mapping = db_api.share_instance_access_get( self.ctxt, access['id'], share.instance['id']) db_api.share_instance_access_delete( self.ctxt, instance_access_mapping['id']) rules = db_api.share_access_get_all_for_instance( self.ctxt, share.instance['id']) self.assertEqual([], rules) self.assertRaises(exception.NotFound, db_api.share_instance_access_get, self.ctxt, access['id'], share['instance']['id']) def test_share_instance_access_delete_with_locks(self): share = db_utils.create_share() access = db_utils.create_access(share_id=share['id'], metadata={'key1': 'v1'}) # create a share and an access lock to ensure they'll be deleted access_lock = db_utils.create_lock(resource_id=access['id']) share_lock_reason = ( constants.SHARE_LOCKED_BY_ACCESS_LOCK_REASON % {'lock_id': access_lock['id']} ) share_lock = db_utils.create_lock( resource_id=share['id'], lock_reason=share_lock_reason ) # create another share lock, to ensure it won't be deleted unrelated_share_lock = db_utils.create_lock(resource_id=share['id']) instance_access_mapping = db_api.share_instance_access_get( self.ctxt, access['id'], share.instance['id']) db_api.share_instance_access_delete( self.ctxt, instance_access_mapping['id']) rules = db_api.share_access_get_all_for_instance( self.ctxt, share.instance['id']) unrelated_share_lock_get = ( db_api.resource_lock_get(self.ctxt, unrelated_share_lock['id']) ) self.assertEqual([], rules) self.assertEqual(unrelated_share_lock['id'], unrelated_share_lock_get['id']) # ensure the access rules and the locks have been dropped self.assertRaises(exception.NotFound, db_api.share_instance_access_get, self.ctxt, access['id'], share['instance']['id']) self.assertRaises(exception.NotFound, db_api.resource_lock_get, self.ctxt, access_lock['id']) self.assertRaises(exception.NotFound, db_api.resource_lock_get, self.ctxt, share_lock['id']) def test_one_share_with_two_share_instance_access_delete(self): metadata = {'key2': 'v2', 'key3': 'v3'} share = db_utils.create_share() instance = db_utils.create_share_instance(share_id=share['id']) access = db_utils.create_access(share_id=share['id'], metadata=metadata) instance_access_mapping1 = db_api.share_instance_access_get( self.ctxt, access['id'], share.instance['id']) instance_access_mapping2 = db_api.share_instance_access_get( self.ctxt, access['id'], instance['id']) self.assertEqual(instance_access_mapping1['access_id'], instance_access_mapping2['access_id']) db_api.share_instance_delete(self.ctxt, instance['id']) get_accesses = db_api.share_access_get_all_for_share(self.ctxt, share['id']) self.assertEqual(1, len(get_accesses)) get_metadata = ( get_accesses[0].get('share_access_rules_metadata') or {}) get_metadata = {item['key']: item['value'] for item in get_metadata} self.assertEqual(metadata, get_metadata) self.assertEqual(access['id'], get_accesses[0]['id']) db_api.share_instance_delete(self.ctxt, share['instance']['id']) self.assertRaises(exception.NotFound, db_api.share_instance_access_get, self.ctxt, access['id'], share['instance']['id']) get_accesses = db_api.share_access_get_all_for_share(self.ctxt, share['id']) self.assertEqual(0, len(get_accesses)) @ddt.data(True, False) def test_share_instance_access_get_with_share_access_data( self, with_share_access_data): share = db_utils.create_share() access = db_utils.create_access(share_id=share['id']) instance_access = db_api.share_instance_access_get( self.ctxt, access['id'], share['instance']['id'], with_share_access_data=with_share_access_data) for key in ('share_id', 'access_type', 'access_to', 'access_level', 'access_key'): self.assertEqual(with_share_access_data, key in instance_access) @ddt.data({'existing': {'access_type': 'cephx', 'access_to': 'alice'}, 'new': {'access_type': 'user', 'access_to': 'alice'}, 'result': False}, {'existing': {'access_type': 'user', 'access_to': 'bob'}, 'new': {'access_type': 'user', 'access_to': 'bob'}, 'result': True}, {'existing': {'access_type': 'ip', 'access_to': '10.0.0.10/32'}, 'new': {'access_type': 'ip', 'access_to': '10.0.0.10'}, 'result': True}, {'existing': {'access_type': 'ip', 'access_to': '10.10.0.11'}, 'new': {'access_type': 'ip', 'access_to': '10.10.0.11'}, 'result': True}, {'existing': {'access_type': 'ip', 'access_to': 'fd21::11'}, 'new': {'access_type': 'ip', 'access_to': 'fd21::11'}, 'result': True}, {'existing': {'access_type': 'ip', 'access_to': 'fd21::10'}, 'new': {'access_type': 'ip', 'access_to': 'fd21::10/128'}, 'result': True}, {'existing': {'access_type': 'ip', 'access_to': '10.10.0.0/22'}, 'new': {'access_type': 'ip', 'access_to': '10.10.0.0/24'}, 'result': False}, {'existing': {'access_type': 'ip', 'access_to': '2620:52::/48'}, 'new': {'access_type': 'ip', 'access_to': '2620:52:0:13b8::/64'}, 'result': False}) @ddt.unpack def test_share_access_check_for_existing_access(self, existing, new, result): share = db_utils.create_share() db_utils.create_access(share_id=share['id'], access_type=existing['access_type'], access_to=existing['access_to']) rule_exists = db_api.share_access_check_for_existing_access( self.ctxt, share['id'], new['access_type'], new['access_to']) self.assertEqual(result, rule_exists) def test_share_access_get_all_for_share_with_metadata(self): share = db_utils.create_share() rules = [db_utils.create_access( share_id=share['id'], metadata={'key1': i}) for i in range(0, 3)] rule_ids = [r['id'] for r in rules] result = db_api.share_access_get_all_for_share(self.ctxt, share['id']) self.assertEqual(3, len(result)) result_ids = [r['id'] for r in result] self.assertEqual(rule_ids, result_ids) result = db_api.share_access_get_all_for_share( self.ctxt, share['id'], {'metadata': {'key1': '2'}}) self.assertEqual(1, len(result)) self.assertEqual(rules[2]['id'], result[0]['id']) def test_share_access_metadata_update(self): share = db_utils.create_share() new_metadata = {'key1': 'test_update', 'key2': 'v2'} rule = db_utils.create_access(share_id=share['id'], metadata={'key1': 'v1'}) result_metadata = db_api.share_access_metadata_update( self.ctxt, rule['id'], metadata=new_metadata) result = db_api.share_access_get(self.ctxt, rule['id']) self.assertEqual(new_metadata, result_metadata) metadata = result.get('share_access_rules_metadata') if metadata: metadata = {item['key']: item['value'] for item in metadata} else: metadata = {} self.assertEqual(new_metadata, metadata) def test_share_access_get_with_context(self): ctxt = context.RequestContext('demo', 'fake', False) share = db_utils.create_share(project_id=ctxt.project_id) rules = [db_utils.create_access(share_id=share['id'])] result = db_api.share_access_get_with_context(ctxt, rules[0]['id']) self.assertEqual(result['project_id'], ctxt.project_id) def test_share_access_get_with_context_not_found(self): self.assertRaises( exception.NotFound, db_api.share_access_get_with_context, self.ctxt, 'fake_rule_id') @ddt.ddt class ShareDatabaseAPITestCase(test.TestCase): def setUp(self): """Run before each test.""" super(ShareDatabaseAPITestCase, self).setUp() self.ctxt = context.get_admin_context() def test_share_create(self): share = db_api.share_create( self.ctxt, {'user_id': 'user', 'project_id': 'project', 'host': 'foo'}, ) self.assertEqual('user', share.user_id) self.assertEqual('project', share.project_id) self.assertEqual('foo', share.host) self.assertEqual(1, len(share.instances)) self.assertIsInstance(share.instances[0], models.ShareInstance) self.assertEqual(share.instances[0], share.instance) def test_share_create__no_instance(self): share = db_api.share_create( self.ctxt, {'user_id': 'user', 'project_id': 'project', 'host': 'foo'}, create_share_instance=False, ) self.assertEqual('user', share.user_id) self.assertEqual('project', share.project_id) self.assertIsNone(share.host) self.assertEqual(0, len(share.instances)) @ddt.data('yes', 'no', 'only') def test_share_read_deleted(self, read_deleted): share = db_utils.create_share() test_ctxt = context.get_admin_context(read_deleted=read_deleted) admin_ctxt = context.get_admin_context(read_deleted='yes') if read_deleted in ('yes', 'no'): self.assertIsNotNone(db_api.share_get(test_ctxt, share['id'])) elif read_deleted == 'only': self.assertRaises(exception.NotFound, db_api.share_get, test_ctxt, share['id']) # we don't use the to be tested context here and # we need to delete the share instance before we can delete the share db_api.share_instance_delete(admin_ctxt, share['instance']['id']) db_api.share_delete(admin_ctxt, share['id']) if read_deleted in ('yes', 'only'): self.assertIsNotNone(db_api.share_get(test_ctxt, share['id'])) elif read_deleted == 'no': self.assertRaises(exception.NotFound, db_api.share_get, test_ctxt, share['id']) def test_share_filter_by_host_with_pools(self): share_instances = [[ db_api.share_create(self.ctxt, {'host': value}).instance for value in ('foo', 'foo#pool0')]] db_utils.create_share() self._assertEqualListsOfObjects(share_instances[0], db_api.share_instance_get_all_by_host( self.ctxt, 'foo'), ignored_keys=['share_type', 'share_type_id', 'export_locations']) def test_share_filter_all_by_host_with_pools_multiple_hosts(self): share_instances = [[ db_api.share_create(self.ctxt, {'host': value}).instance for value in ('foo', 'foo#pool0', 'foo', 'foo#pool1')]] db_utils.create_share() self._assertEqualListsOfObjects(share_instances[0], db_api.share_instance_get_all_by_host( self.ctxt, 'foo'), ignored_keys=['share_type', 'share_type_id', 'export_locations']) def test_share_filter_all_by_share_server(self): share_network = db_utils.create_share_network() share_server = db_utils.create_share_server() share = db_utils.create_share(share_server_id=share_server['id'], share_network_id=share_network['id']) actual_result = db_api.share_get_all_by_share_server( self.ctxt, share_server['id']) self.assertEqual(1, len(actual_result)) self.assertEqual(share['id'], actual_result[0].id) def test_share_in_recycle_bin_filter_all_by_share_server(self): share_network = db_utils.create_share_network() share_server = db_utils.create_share_server() share = db_utils.create_share(share_server_id=share_server['id'], share_network_id=share_network['id'], is_soft_deleted=True) actual_result = db_api.share_get_all_soft_deleted( self.ctxt, share_server['id']) self.assertEqual(1, len(actual_result)) self.assertEqual(share['id'], actual_result[0].id) def test_share_in_recycle_bin_filter_all_by_share_network(self): share_network = db_utils.create_share_network() share_server = db_utils.create_share_server() share = db_utils.create_share(share_server_id=share_server['id'], share_network_id=share_network['id'], is_soft_deleted=True) actual_result = db_api.share_get_all_soft_deleted_by_network( self.ctxt, share_network['id']) self.assertEqual(1, len(actual_result)) self.assertEqual(share['id'], actual_result[0].id) def test_share_filter_all_by_share_group(self): group = db_utils.create_share_group() share = db_utils.create_share(share_group_id=group['id']) actual_result = db_api.share_get_all_by_share_group_id( self.ctxt, group['id']) self.assertEqual(1, len(actual_result)) self.assertEqual(share['id'], actual_result[0].id) def test_share_instance_delete_with_share(self): share = db_utils.create_share() self.assertIsNotNone(db_api.share_get(self.ctxt, share['id'])) self.assertIsNotNone(db_api.share_metadata_get(self.ctxt, share['id'])) db_api.share_instance_delete(self.ctxt, share.instance['id']) self.assertRaises(exception.NotFound, db_api.share_get, self.ctxt, share['id']) self.assertRaises(exception.NotFound, db_api.share_metadata_get, self.ctxt, share['id']) def test_share_instance_delete_with_share_need_to_update_usages(self): share = db_utils.create_share() self.assertIsNotNone(db_api.share_get(self.ctxt, share['id'])) self.assertIsNotNone(db_api.share_metadata_get(self.ctxt, share['id'])) self.mock_object(quota.QUOTAS, 'reserve', mock.Mock(return_value='reservation')) self.mock_object(quota.QUOTAS, 'commit') db_api.share_instance_delete( self.ctxt, share.instance['id'], need_to_update_usages=True) self.assertRaises(exception.NotFound, db_api.share_get, self.ctxt, share['id']) self.assertRaises(exception.NotFound, db_api.share_metadata_get, self.ctxt, share['id']) quota.QUOTAS.reserve.assert_called_once_with( self.ctxt, project_id=share['project_id'], shares=-1, gigabytes=-share['size'], share_type_id=None, user_id=share['user_id'] ) quota.QUOTAS.commit.assert_called_once_with( self.ctxt, mock.ANY, project_id=share['project_id'], share_type_id=None, user_id=share['user_id'] ) def test_share_instance_get(self): share = db_utils.create_share() instance = db_api.share_instance_get(self.ctxt, share.instance['id']) self.assertEqual('share-%s' % instance['id'], instance['name']) @ddt.data({'with_share_data': True, 'status': constants.STATUS_AVAILABLE}, {'with_share_data': False, 'status': None}) @ddt.unpack def test_share_instance_get_all_by_host(self, with_share_data, status): kwargs = {'status': status} if status else {} db_utils.create_share(**kwargs) instances = db_api.share_instance_get_all_by_host( self.ctxt, 'fake_host', with_share_data=with_share_data, status=status) self.assertEqual(1, len(instances)) instance = instances[0] self.assertEqual('share-%s' % instance['id'], instance['name']) if with_share_data: self.assertEqual('NFS', instance['share_proto']) self.assertEqual(0, instance['size']) else: self.assertNotIn('share_proto', instance) def test_share_instance_get_all_by_host_not_found_exception(self): db_utils.create_share() instances = db_api.share_instance_get_all_by_host( self.ctxt, 'not_found_host', True) self.assertEqual(0, len(instances)) @ddt.data( {'status': constants.STATUS_AVAILABLE}, {'status': None}) @ddt.unpack def test_share_instance_get_all_by_host_no_instance(self, status): db_utils.create_share_without_instance() instances = db_api.share_instance_get_all_by_host( self.ctxt, "fake_host", with_share_data=True, status=status ) self.assertEqual(0, len(instances)) def test_share_instance_get_all_by_share_group(self): group = db_utils.create_share_group() db_utils.create_share(share_group_id=group['id']) db_utils.create_share() instances = db_api.share_instance_get_all_by_share_group_id( self.ctxt, group['id']) self.assertEqual(1, len(instances)) instance = instances[0] self.assertEqual('share-%s' % instance['id'], instance['name']) @ddt.data('id', 'path') def test_share_instance_get_all_by_export_location(self, type): share = db_utils.create_share() initial_location = ['fake_export_location'] db_api.export_locations_update( self.ctxt, share.instance['id'], initial_location, False) if type == 'id': export_location = db_api.export_location_get_all_by_share_id( self.ctxt, share['id']) value = export_location[0]['uuid'] else: value = 'fake_export_location' instances = db_api.share_instance_get_all( self.ctxt, filters={'export_location_' + type: value}) self.assertEqual(1, len(instances)) instance = instances[0] self.assertEqual('share-%s' % instance['id'], instance['name']) def test_share_instance_get_all_by_is_soft_deleted(self): db_utils.create_share() db_utils.create_share(is_soft_deleted=True) instances = db_api.share_instance_get_all( self.ctxt, filters={'is_soft_deleted': True}) self.assertEqual(1, len(instances)) instance = instances[0] self.assertEqual('share-%s' % instance['id'], instance['name']) def test_share_instance_get_all_by_status(self): share = db_utils.create_share() db_utils.create_share_instance( share_id=share['id'], status='creating') share2 = db_utils.create_share() db_utils.create_share_instance( share_id=share2['id'], status='error_deferred_deleting') instances = db_api.share_instance_get_all( self.ctxt, filters={'status': 'error_deferred_deleting'}) self.assertEqual(1, len(instances)) def test_share_instance_get_all_by_ids(self): fake_share = db_utils.create_share() expected_share_instance = db_utils.create_share_instance( share_id=fake_share['id']) # Populate the db with a dummy share db_utils.create_share_instance(share_id=fake_share['id']) instances = db_api.share_instance_get_all( self.ctxt, filters={'instance_ids': [expected_share_instance['id']]}) self.assertEqual(1, len(instances)) instance = instances[0] self.assertEqual('share-%s' % instance['id'], instance['name']) @ddt.data('host', 'share_group_id') def test_share_get_all_sort_by_share_instance_fields(self, sort_key): shares = [db_utils.create_share(**{sort_key: n, 'size': 1}) for n in ('test1', 'test2')] actual_result = db_api.share_get_all( self.ctxt, sort_key=sort_key, sort_dir='desc') self.assertEqual(2, len(actual_result)) self.assertEqual(shares[0]['id'], actual_result[1]['id']) @ddt.data('id') def test_share_get_all_sort_by_share_fields(self, sort_key): shares = [db_utils.create_share(**{sort_key: n, 'size': 1}) for n in ('FAKE_UUID1', 'FAKE_UUID2')] actual_result = db_api.share_get_all( self.ctxt, sort_key=sort_key, sort_dir='desc') self.assertEqual(2, len(actual_result)) self.assertEqual(shares[0]['id'], actual_result[1]['id']) @ddt.data('id', 'path') def test_share_get_all_by_export_location(self, type): share = db_utils.create_share() initial_location = ['fake_export_location'] db_api.export_locations_update( self.ctxt, share.instance['id'], initial_location, False) if type == 'id': export_location = db_api.export_location_get_all_by_share_id( self.ctxt, share['id']) value = export_location[0]['uuid'] else: value = 'fake_export_location' actual_result = db_api.share_get_all( self.ctxt, filters={'export_location_' + type: value}) self.assertEqual(1, len(actual_result)) self.assertEqual(share['id'], actual_result[0]['id']) @ddt.data('id', 'path') def test_share_get_all_by_export_location_not_exist(self, type): share = db_utils.create_share() initial_location = ['fake_export_location'] db_api.export_locations_update( self.ctxt, share.instance['id'], initial_location, False) filter = {'export_location_' + type: 'export_location_not_exist'} actual_result = db_api.share_get_all(self.ctxt, filters=filter) self.assertEqual(0, len(actual_result)) @ddt.data((10, 5), (20, 5)) @ddt.unpack def test_share_get_all_with_limit(self, limit, offset): for i in range(limit + 5): db_utils.create_share() filters = {'limit': offset, 'offset': 0} shares_not_requested = db_api.share_get_all( self.ctxt, filters=filters) filters = {'limit': limit, 'offset': offset} shares_requested = db_api.share_get_all(self.ctxt, filters=filters) shares_not_requested_ids = [s['id'] for s in shares_not_requested] shares_requested_ids = [s['id'] for s in shares_requested] self.assertEqual(offset, len(shares_not_requested_ids)) self.assertEqual(limit, len(shares_requested_ids)) self.assertEqual(0, len( set(shares_requested_ids) & set(shares_not_requested_ids))) @ddt.data( ({'display_name~': 'fake_name'}, 3, 3), ({'display_name~': 'fake_name', 'limit': 2}, 3, 2) ) @ddt.unpack def test_share_get_all_with_count(self, filters, amount_of_shares, expected_shares_len): c_shares = [ db_utils.create_share(display_name='fake_name_%s' % str(i)) for i in range(amount_of_shares)] # create one more share instance db_utils.create_share_instance(share_id=c_shares[0]['id']) db_utils.create_share_instance(share_id=c_shares[1]['id']) count, shares = db_api.share_get_all_with_count( self.ctxt, filters=filters) self.assertEqual(count, amount_of_shares) for share in shares: self.assertIn('fake_name', share['display_name']) self.assertEqual(expected_shares_len, len(shares)) def test_share_get_all_by_share_group_id_with_count(self): share_groups = [db_utils.create_share_group() for i in range(2)] shares = [ db_utils.create_share(share_group_id=share_group['id']) for share_group in share_groups] count, result = db_api.share_get_all_by_share_group_id_with_count( self.ctxt, share_groups[0]['id']) self.assertEqual(count, 1) self.assertEqual(shares[0]['id'], result[0]['id']) self.assertEqual(1, len(result)) def test_share_get_all_by_share_server_with_count(self): share_servers = [db_utils.create_share_server() for i in range(2)] shares = [ db_utils.create_share(share_server_id=share_server['id']) for share_server in share_servers] count, result = db_api.share_get_all_by_share_server_with_count( self.ctxt, share_servers[0]['id']) self.assertEqual(count, 1) self.assertEqual(shares[0]['id'], result[0]['id']) self.assertEqual(1, len(result)) def test_share_get_all_by_project_with_count(self): project_ids = ['fake_id_1', 'fake_id_2'] shares = [ db_utils.create_share(project_id=project_id) for project_id in project_ids] count, result = db_api.share_get_all_by_project_with_count( self.ctxt, project_ids[0]) self.assertEqual(count, 1) self.assertEqual(shares[0]['id'], result[0]['id']) self.assertEqual(1, len(result)) def test_share_get_all_expired(self): now_time = timeutils.utcnow() time_delta = datetime.timedelta(seconds=3600) time1 = now_time + time_delta time2 = now_time - time_delta share1 = db_utils.create_share(status=constants.STATUS_AVAILABLE, is_soft_deleted=False, scheduled_to_be_deleted_at=None) share2 = db_utils.create_share(status=constants.STATUS_AVAILABLE, is_soft_deleted=True, scheduled_to_be_deleted_at=time1) share3 = db_utils.create_share(status=constants.STATUS_AVAILABLE, is_soft_deleted=True, scheduled_to_be_deleted_at=time2) shares = [share1, share2, share3] result = db_api.share_get_all_expired(self.ctxt) self.assertEqual(1, len(result)) self.assertEqual(shares[2]['id'], result[0]['id']) @ddt.data( ({'status': constants.STATUS_AVAILABLE}, 'status', [constants.STATUS_AVAILABLE, constants.STATUS_ERROR]), ({'share_group_id': 'fake_group_id'}, 'share_group_id', ['fake_group_id', 'group_id']), ({'snapshot_id': 'fake_snapshot_id'}, 'snapshot_id', ['fake_snapshot_id', 'snapshot_id']), ({'share_type_id': 'fake_type_id'}, 'share_type_id', ['fake_type_id', 'type_id']), ({'host': 'fakehost@fakebackend#fakepool'}, 'host', ['fakehost@fakebackend#fakepool', 'foo@bar#test']), ({'share_network_id': 'fake_net_id'}, 'share_network_id', ['fake_net_id', 'net_id']), ({'display_name': 'fake_share_name'}, 'display_name', ['fake_share_name', 'share_name']), ({'display_description': 'fake description'}, 'display_description', ['fake description', 'description']), ({'is_soft_deleted': True}, 'is_soft_deleted', [True, False]) ) @ddt.unpack def test_share_get_all_with_filters(self, filters, key, share_values): for value in share_values: kwargs = {key: value} db_utils.create_share(**kwargs) results = db_api.share_get_all(self.ctxt, filters=filters) for share in results: self.assertEqual(share[key], filters[key]) @ddt.data( ('display_name~', 'display_name', ['fake_name_1', 'fake_name_2', 'fake_name_3'], 'fake_name'), ('display_description~', 'display_description', ['fake desc 1', 'fake desc 2', 'fake desc 3'], 'fake desc') ) @ddt.unpack def test_share_get_all_like_filters( self, filter_name, key, share_values, like_value): for value in share_values: kwargs = {key: value} db_utils.create_share(**kwargs) db_utils.create_share( display_name='irrelevant_name', display_description='should not be queried') filters = {filter_name: like_value} results = db_api.share_get_all(self.ctxt, filters=filters) self.assertEqual(len(share_values), len(results)) @ddt.data( ('display_name~', 'display_name', ['fake_name_1', 'fake_name_2', 'fake_name_%'], '%'), ('display_description~', 'display_description', ['fake desc 1', 'fake desc 2', 'fake desc %'], '%') ) @ddt.unpack def test_share_get_all_like_filters_with_percent_sign( self, filter_name, key, share_values, like_value): for value in share_values: kwargs = {key: value} db_utils.create_share(**kwargs) db_utils.create_share( display_name='irrelevant_name', display_description='should not be queried') filters = {filter_name: like_value} results = db_api.share_get_all(self.ctxt, filters=filters) self.assertEqual(1, len(results)) @ddt.data(None, 'writable') def test_share_get_has_replicas_field(self, replication_type): share = db_utils.create_share(replication_type=replication_type) db_share = db_api.share_get(self.ctxt, share['id']) self.assertIn('has_replicas', db_share) @ddt.data({'with_share_data': False, 'with_share_server': False}, {'with_share_data': False, 'with_share_server': True}, {'with_share_data': True, 'with_share_server': False}, {'with_share_data': True, 'with_share_server': True}) @ddt.unpack def test_share_replicas_get_all(self, with_share_data, with_share_server): share_server = db_utils.create_share_server() share_1 = db_utils.create_share() share_2 = db_utils.create_share() db_utils.create_share_replica( replica_state=constants.REPLICA_STATE_ACTIVE, share_id=share_1['id'], share_server_id=share_server['id']) db_utils.create_share_replica( replica_state=constants.REPLICA_STATE_IN_SYNC, share_id=share_1['id'], share_server_id=share_server['id']) db_utils.create_share_replica( replica_state=constants.REPLICA_STATE_OUT_OF_SYNC, share_id=share_2['id'], share_server_id=share_server['id']) db_utils.create_share_replica(share_id=share_2['id']) expected_ss_keys = { 'backend_details', 'host', 'id', 'share_network_subnet_ids', 'status', } expected_share_keys = { 'project_id', 'share_type_id', 'display_name', 'name', 'share_proto', 'is_public', 'source_share_group_snapshot_member_id', } share_replicas = db_api.share_replicas_get_all( self.ctxt, with_share_server=with_share_server, with_share_data=with_share_data, ) self.assertEqual(3, len(share_replicas)) for replica in share_replicas: if with_share_server: self.assertTrue(expected_ss_keys.issubset( replica['share_server'].keys())) else: self.assertNotIn('share_server', replica.keys()) self.assertEqual( with_share_data, expected_share_keys.issubset(replica.keys())) @ddt.data({'with_share_data': False, 'with_share_server': False}, {'with_share_data': False, 'with_share_server': True}, {'with_share_data': True, 'with_share_server': False}, {'with_share_data': True, 'with_share_server': True}) @ddt.unpack def test_share_replicas_get_all_by_share(self, with_share_data, with_share_server): share_server = db_utils.create_share_server() share = db_utils.create_share() db_utils.create_share_replica( replica_state=constants.REPLICA_STATE_ACTIVE, share_id=share['id'], share_server_id=share_server['id']) db_utils.create_share_replica( replica_state=constants.REPLICA_STATE_IN_SYNC, share_id=share['id'], share_server_id=share_server['id']) db_utils.create_share_replica( replica_state=constants.REPLICA_STATE_OUT_OF_SYNC, share_id=share['id'], share_server_id=share_server['id']) expected_ss_keys = { 'backend_details', 'host', 'id', 'share_network_subnet_ids', 'status', } expected_share_keys = { 'project_id', 'share_type_id', 'display_name', 'name', 'share_proto', 'is_public', 'source_share_group_snapshot_member_id', } share_replicas = db_api.share_replicas_get_all_by_share( self.ctxt, share['id'], with_share_server=with_share_server, with_share_data=with_share_data) self.assertEqual(3, len(share_replicas)) for replica in share_replicas: if with_share_server: self.assertTrue(expected_ss_keys.issubset( replica['share_server'].keys())) else: self.assertNotIn('share_server', replica.keys()) self.assertEqual(with_share_data, expected_share_keys.issubset(replica.keys())) def test_share_replicas_get_available_active_replica(self): share_server = db_utils.create_share_server() share_1 = db_utils.create_share() share_2 = db_utils.create_share() share_3 = db_utils.create_share() db_utils.create_share_replica( id='Replica1', share_id=share_1['id'], status=constants.STATUS_AVAILABLE, replica_state=constants.REPLICA_STATE_ACTIVE, share_server_id=share_server['id']) db_utils.create_share_replica( id='Replica2', status=constants.STATUS_AVAILABLE, share_id=share_1['id'], replica_state=constants.REPLICA_STATE_ACTIVE, share_server_id=share_server['id']) db_utils.create_share_replica( id='Replica3', status=constants.STATUS_AVAILABLE, share_id=share_2['id'], replica_state=constants.REPLICA_STATE_ACTIVE) db_utils.create_share_replica( id='Replica4', status=constants.STATUS_ERROR, share_id=share_2['id'], replica_state=constants.REPLICA_STATE_ACTIVE) db_utils.create_share_replica( id='Replica5', status=constants.STATUS_AVAILABLE, share_id=share_2['id'], replica_state=constants.REPLICA_STATE_IN_SYNC) db_utils.create_share_replica( id='Replica6', share_id=share_3['id'], status=constants.STATUS_AVAILABLE, replica_state=constants.REPLICA_STATE_IN_SYNC) expected_ss_keys = { 'backend_details', 'host', 'id', 'share_network_subnet_ids', 'status', } expected_share_keys = { 'project_id', 'share_type_id', 'display_name', 'name', 'share_proto', 'is_public', 'source_share_group_snapshot_member_id', } replica_share_1 = ( db_api.share_replicas_get_available_active_replica( self.ctxt, share_1['id'], with_share_server=True, ) ) replica_share_2 = ( db_api.share_replicas_get_available_active_replica( self.ctxt, share_2['id'], with_share_server=True, with_share_data=True) ) replica_share_3 = ( db_api.share_replicas_get_available_active_replica( self.ctxt, share_3['id'], ) ) self.assertIn(replica_share_1.get('id'), ['Replica1', 'Replica2']) self.assertTrue(expected_ss_keys.issubset( replica_share_1['share_server'].keys())) self.assertFalse( expected_share_keys.issubset(replica_share_1.keys())) self.assertEqual(replica_share_2.get('id'), 'Replica3') self.assertFalse(replica_share_2['share_server']) self.assertTrue( expected_share_keys.issubset(replica_share_2.keys())) self.assertIsNone(replica_share_3) def test_share_replica_get_exception(self): replica = db_utils.create_share_replica(share_id='FAKE_SHARE_ID') self.assertRaises(exception.ShareReplicaNotFound, db_api.share_replica_get, self.ctxt, replica['id']) def test_share_replica_get_without_share_data(self): share = db_utils.create_share() replica = db_utils.create_share_replica( share_id=share['id'], replica_state=constants.REPLICA_STATE_ACTIVE) expected_extra_keys = { 'project_id', 'share_type_id', 'display_name', 'name', 'share_proto', 'is_public', 'source_share_group_snapshot_member_id', } share_replica = db_api.share_replica_get(self.ctxt, replica['id']) self.assertIsNotNone(share_replica['replica_state']) self.assertEqual(share['id'], share_replica['share_id']) self.assertFalse(expected_extra_keys.issubset(share_replica.keys())) def test_share_replica_get_with_share_data(self): share = db_utils.create_share() replica = db_utils.create_share_replica( share_id=share['id'], replica_state=constants.REPLICA_STATE_ACTIVE) expected_extra_keys = { 'project_id', 'share_type_id', 'display_name', 'name', 'share_proto', 'is_public', 'source_share_group_snapshot_member_id', } share_replica = db_api.share_replica_get( self.ctxt, replica['id'], with_share_data=True) self.assertIsNotNone(share_replica['replica_state']) self.assertEqual(share['id'], share_replica['share_id']) self.assertTrue(expected_extra_keys.issubset(share_replica.keys())) def test_share_replica_get_with_share_server(self): share_server = db_utils.create_share_server() share = db_utils.create_share() replica = db_utils.create_share_replica( share_id=share['id'], replica_state=constants.REPLICA_STATE_ACTIVE, share_server_id=share_server['id'] ) expected_extra_keys = { 'backend_details', 'host', 'id', 'share_network_subnet_ids', 'status', } share_replica = db_api.share_replica_get( self.ctxt, replica['id'], with_share_server=True, ) self.assertIsNotNone(share_replica['replica_state']) self.assertEqual( share_server['id'], share_replica['share_server_id']) self.assertTrue(expected_extra_keys.issubset( share_replica['share_server'].keys())) def test_share_replica_update(self): share = db_utils.create_share() replica = db_utils.create_share_replica( share_id=share['id'], replica_state=constants.REPLICA_STATE_ACTIVE) updated_replica = db_api.share_replica_update( self.ctxt, replica['id'], {'replica_state': constants.REPLICA_STATE_OUT_OF_SYNC}) self.assertEqual(constants.REPLICA_STATE_OUT_OF_SYNC, updated_replica['replica_state']) def test_share_replica_delete(self): share = db_utils.create_share() share = db_api.share_get(self.ctxt, share['id']) self.mock_object(quota.QUOTAS, 'reserve', mock.Mock(return_value='reservation')) self.mock_object(quota.QUOTAS, 'commit') replica = db_utils.create_share_replica( share_id=share['id'], replica_state=constants.REPLICA_STATE_ACTIVE) self.assertEqual(1, len( db_api.share_replicas_get_all_by_share(self.ctxt, share['id']))) db_api.share_replica_delete(self.ctxt, replica['id']) self.assertEqual( [], db_api.share_replicas_get_all_by_share(self.ctxt, share['id'])) share_type_id = share['instances'][0].get('share_type_id', None) quota.QUOTAS.reserve.assert_called_once_with( self.ctxt, project_id=share['project_id'], user_id=share['user_id'], share_type_id=share_type_id, share_replicas=-1, replica_gigabytes=share['size']) quota.QUOTAS.commit.assert_called_once_with( self.ctxt, 'reservation', project_id=share['project_id'], user_id=share['user_id'], share_type_id=share_type_id) @ddt.data( (True, {"share_replicas": -1, "replica_gigabytes": 0}, 'active'), (False, {"shares": -1, "gigabytes": 0}, None), (False, {"shares": -1, "gigabytes": 0, "share_replicas": -1, "replica_gigabytes": 0}, 'active') ) @ddt.unpack def test_share_instance_delete_quota_error(self, is_replica, deltas, replica_state): share = db_utils.create_share(replica_state=replica_state) share = db_api.share_get(self.ctxt, share['id']) instance_id = share['instances'][0]['id'] if is_replica: replica = db_utils.create_share_replica( share_id=share['id'], replica_state=constants.REPLICA_STATE_ACTIVE) instance_id = replica['id'] reservation = 'fake' share_type_id = share['instances'][0]['share_type_id'] self.mock_object(quota.QUOTAS, 'reserve', mock.Mock(return_value=reservation)) self.mock_object(quota.QUOTAS, 'commit', mock.Mock( side_effect=exception.QuotaError('fake'))) self.mock_object(quota.QUOTAS, 'rollback') # NOTE(silvacarlose): not calling with assertRaises since the # _update_share_instance_usages method is not raising an exception db_api.share_instance_delete( self.ctxt, instance_id, need_to_update_usages=True) quota.QUOTAS.reserve.assert_called_once_with( self.ctxt, project_id=share['project_id'], user_id=share['user_id'], share_type_id=share_type_id, **deltas) quota.QUOTAS.commit.assert_called_once_with( self.ctxt, reservation, project_id=share['project_id'], user_id=share['user_id'], share_type_id=share_type_id) quota.QUOTAS.rollback.assert_called_once_with( self.ctxt, reservation, share_type_id=share_type_id) def test_share_instance_access_copy(self): share = db_utils.create_share() rules = [] for i in range(0, 5): rules.append(db_utils.create_access(share_id=share['id'])) instance = db_utils.create_share_instance(share_id=share['id']) share_access_rules = db_api.share_instance_access_copy( self.ctxt, share['id'], instance['id']) share_access_rule_ids = [a['id'] for a in share_access_rules] self.assertEqual(5, len(share_access_rules)) for rule_id in share_access_rule_ids: self.assertIsNotNone( db_api.share_instance_access_get( self.ctxt, rule_id, instance['id'])) def test_share_soft_delete(self): share = db_utils.create_share() db_api.share_soft_delete(self.ctxt, share['id']) share = db_api.share_get(self.ctxt, share['id']) self.assertEqual(share['is_soft_deleted'], True) def test_share_restore(self): share = db_utils.create_share(is_soft_deleted=True) db_api.share_restore(self.ctxt, share['id']) share = db_api.share_get(self.ctxt, share['id']) self.assertEqual(share['is_soft_deleted'], False) def test_share_metadata_get(self): metadata = {'a': 'b', 'c': 'd'} share_1 = db_utils.create_share(size=1) db_api.share_metadata_update( self.ctxt, share_id=share_1['id'], metadata=metadata, delete=False) self.assertEqual( metadata, db_api.share_metadata_get( self.ctxt, share_id=share_1['id'])) def test_share_metadata_get_item(self): metadata = {'a': 'b', 'c': 'd'} key = 'a' shouldbe = {'a': 'b'} share_1 = db_utils.create_share(size=1) db_api.share_metadata_update( self.ctxt, share_id=share_1['id'], metadata=metadata, delete=False) self.assertEqual( shouldbe, db_api.share_metadata_get_item( self.ctxt, share_id=share_1['id'], key=key)) def test_share_metadata_update(self): metadata1 = {'a': '1', 'c': '2'} metadata2 = {'a': '3', 'd': '5'} should_be = {'a': '3', 'c': '2', 'd': '5'} share_1 = db_utils.create_share(size=1) db_api.share_metadata_update( self.ctxt, share_id=share_1['id'], metadata=metadata1, delete=False) db_api.share_metadata_update( self.ctxt, share_id=share_1['id'], metadata=metadata2, delete=False) self.assertEqual( should_be, db_api.share_metadata_get( self.ctxt, share_id=share_1['id'])) def test_share_metadata_update_item(self): metadata1 = {'a': '1', 'c': '2'} metadata2 = {'a': '3'} should_be = {'a': '3', 'c': '2'} share_1 = db_utils.create_share(size=1) db_api.share_metadata_update( self.ctxt, share_id=share_1['id'], metadata=metadata1, delete=False) db_api.share_metadata_update_item( self.ctxt, share_id=share_1['id'], item=metadata2) self.assertEqual( should_be, db_api.share_metadata_get( self.ctxt, share_id=share_1['id'])) def test_share_metadata_delete(self): key = 'a' metadata = {'a': '1', 'c': '2'} should_be = {'c': '2'} share_1 = db_utils.create_share(size=1) db_api.share_metadata_update( self.ctxt, share_id=share_1['id'], metadata=metadata, delete=False) db_api.share_metadata_delete( self.ctxt, share_id=share_1['id'], key=key) self.assertEqual( should_be, db_api.share_metadata_get( self.ctxt, share_id=share_1['id'])) @ddt.ddt class ShareGroupDatabaseAPITestCase(test.TestCase): def setUp(self): """Run before each test.""" super(ShareGroupDatabaseAPITestCase, self).setUp() self.ctxt = context.get_admin_context() def test_share_group_create_with_share_type(self): fake_share_types = ["fake_share_type"] share_group = db_utils.create_share_group(share_types=fake_share_types) share_group = db_api.share_group_get(self.ctxt, share_group['id']) self.assertEqual(1, len(share_group['share_types'])) def test_share_group_get(self): share_group = db_utils.create_share_group() self.assertDictEqual( dict(share_group), dict(db_api.share_group_get(self.ctxt, share_group['id']))) def test_count_share_groups_in_share_network(self): share_network = db_utils.create_share_network() db_utils.create_share_group() db_utils.create_share_group(share_network_id=share_network['id']) count = db_api.count_share_groups_in_share_network( self.ctxt, share_network_id=share_network['id']) self.assertEqual(1, count) def test_share_group_get_all(self): expected_share_group = db_utils.create_share_group() share_groups = db_api.share_group_get_all(self.ctxt, detailed=False) self.assertEqual(1, len(share_groups)) share_group = share_groups[0] self.assertEqual(2, len(dict(share_group).keys())) self.assertEqual(expected_share_group['id'], share_group['id']) self.assertEqual(expected_share_group['name'], share_group['name']) def test_share_group_get_all_with_detail(self): expected_share_group = db_utils.create_share_group() share_groups = db_api.share_group_get_all(self.ctxt, detailed=True) self.assertEqual(1, len(share_groups)) self.assertDictEqual(dict(expected_share_group), dict(share_groups[0])) def test_share_group_get_all_by_host(self): fake_host = 'my_fake_host' expected_share_group = db_utils.create_share_group(host=fake_host) db_utils.create_share_group() share_groups = db_api.share_group_get_all_by_host( self.ctxt, fake_host, detailed=False) self.assertEqual(1, len(share_groups)) share_group = share_groups[0] self.assertEqual(2, len(dict(share_group).keys())) self.assertEqual(expected_share_group['id'], share_group['id']) self.assertEqual(expected_share_group['name'], share_group['name']) def test_share_group_get_all_by_host_with_details(self): fake_host = 'my_fake_host' expected_share_group = db_utils.create_share_group(host=fake_host) db_utils.create_share_group() share_groups = db_api.share_group_get_all_by_host( self.ctxt, fake_host, detailed=True) self.assertEqual(1, len(share_groups)) share_group = share_groups[0] self.assertDictEqual(dict(expected_share_group), dict(share_group)) self.assertEqual(fake_host, share_group['host']) def test_share_group_get_all_by_project(self): fake_project = 'fake_project' expected_group = db_utils.create_share_group( project_id=fake_project) db_utils.create_share_group() groups = db_api.share_group_get_all_by_project(self.ctxt, fake_project, detailed=False) self.assertEqual(1, len(groups)) group = groups[0] self.assertEqual(2, len(dict(group).keys())) self.assertEqual(expected_group['id'], group['id']) self.assertEqual(expected_group['name'], group['name']) def test_share_group_get_all_by_share_server(self): fake_server = 123 expected_group = db_utils.create_share_group( share_server_id=fake_server) db_utils.create_share_group() groups = db_api.share_group_get_all_by_share_server(self.ctxt, fake_server) self.assertEqual(1, len(groups)) group = groups[0] self.assertEqual(expected_group['id'], group['id']) self.assertEqual(expected_group['name'], group['name']) def test_share_group_get_all_by_project_with_details(self): fake_project = 'fake_project' expected_group = db_utils.create_share_group( project_id=fake_project) db_utils.create_share_group() groups = db_api.share_group_get_all_by_project(self.ctxt, fake_project, detailed=True) self.assertEqual(1, len(groups)) group = groups[0] self.assertDictEqual(dict(expected_group), dict(group)) self.assertEqual(fake_project, group['project_id']) @ddt.data(({'name': 'fo'}, 0), ({'description': 'd'}, 0), ({'name': 'foo', 'description': 'd'}, 0), ({'name': 'foo'}, 1), ({'description': 'ds'}, 1), ({'name~': 'foo', 'description~': 'ds'}, 2), ({'name': 'foo', 'description~': 'ds'}, 1), ({'name~': 'foo', 'description': 'ds'}, 1)) @ddt.unpack def test_share_group_get_all_by_name_and_description( self, search_opts, group_number): db_utils.create_share_group(name='fo1', description='d1') expected_group1 = db_utils.create_share_group(name='foo', description='ds') expected_group2 = db_utils.create_share_group(name='foo1', description='ds2') groups = db_api.share_group_get_all( self.ctxt, detailed=True, filters=search_opts) self.assertEqual(group_number, len(groups)) if group_number == 1: self.assertDictEqual(dict(expected_group1), dict(groups[0])) elif group_number == 2: self.assertDictEqual(dict(expected_group1), dict(groups[1])) self.assertDictEqual(dict(expected_group2), dict(groups[0])) def test_share_group_update(self): fake_name = "my_fake_name" expected_group = db_utils.create_share_group() expected_group['name'] = fake_name db_api.share_group_update(self.ctxt, expected_group['id'], {'name': fake_name}) group = db_api.share_group_get(self.ctxt, expected_group['id']) self.assertEqual(fake_name, group['name']) def test_share_group_destroy(self): group = db_utils.create_share_group() db_api.share_group_get(self.ctxt, group['id']) db_api.share_group_destroy(self.ctxt, group['id']) self.assertRaises(exception.NotFound, db_api.share_group_get, self.ctxt, group['id']) def test_count_shares_in_share_group(self): sg = db_utils.create_share_group() db_utils.create_share(share_group_id=sg['id']) db_utils.create_share() count = db_api.count_shares_in_share_group(self.ctxt, sg['id']) self.assertEqual(1, count) def test_count_sg_snapshots_in_share_group(self): sg = db_utils.create_share_group() db_utils.create_share_group_snapshot(sg['id']) db_utils.create_share_group_snapshot(sg['id']) count = db_api.count_share_group_snapshots_in_share_group( self.ctxt, sg['id']) self.assertEqual(2, count) def test_share_group_snapshot_get(self): sg = db_utils.create_share_group() sg_snap = dict(db_utils.create_share_group_snapshot(sg['id'])) sg_snap_source_group = sg_snap.pop('share_group', {}) get_sg_snap = dict( db_api.share_group_snapshot_get(self.ctxt, sg_snap['id'])) get_sg_snap_source_group = get_sg_snap.pop('share_group', {}) self.assertDictEqual( dict(sg_snap_source_group), dict(get_sg_snap_source_group)) self.assertDictEqual(sg_snap, get_sg_snap) def test_share_group_snapshot_get_all(self): sg = db_utils.create_share_group() expected_sg_snap = db_utils.create_share_group_snapshot(sg['id']) snaps = db_api.share_group_snapshot_get_all(self.ctxt, detailed=False) self.assertEqual(1, len(snaps)) snap = snaps[0] self.assertEqual(2, len(dict(snap).keys())) self.assertEqual(expected_sg_snap['id'], snap['id']) self.assertEqual(expected_sg_snap['name'], snap['name']) def test_share_group_snapshot_get_all_with_detail(self): sg = db_utils.create_share_group() expected_sg_snap = dict(db_utils.create_share_group_snapshot(sg['id'])) sg_snap_source_group = expected_sg_snap.pop('share_group', {}) snaps = db_api.share_group_snapshot_get_all(self.ctxt, detailed=True) self.assertEqual(1, len(snaps)) actual_sg_snap = dict(snaps[0]) get_sg_snap_source = actual_sg_snap.pop('share_group', {}) self.assertDictEqual( dict(sg_snap_source_group), dict(get_sg_snap_source)) self.assertDictEqual(expected_sg_snap, actual_sg_snap) def test_share_group_snapshot_get_all_by_project(self): fake_project = uuidutils.generate_uuid() sg = db_utils.create_share_group() expected_sg_snap = db_utils.create_share_group_snapshot( sg['id'], project_id=fake_project) snaps = db_api.share_group_snapshot_get_all_by_project( self.ctxt, fake_project, detailed=False) self.assertEqual(1, len(snaps)) snap = snaps[0] self.assertEqual(2, len(dict(snap).keys())) self.assertEqual(expected_sg_snap['id'], snap['id']) self.assertEqual(expected_sg_snap['name'], snap['name']) def test_share_group_snapshot_get_all_by_project_with_details(self): fake_project = uuidutils.generate_uuid() sg = db_utils.create_share_group() expected_sg_snap = dict(db_utils.create_share_group_snapshot( sg['id'], project_id=fake_project)) sg_snap_source_group = expected_sg_snap.pop( 'share_group', {}) snaps = db_api.share_group_snapshot_get_all_by_project( self.ctxt, fake_project, detailed=True) self.assertEqual(1, len(snaps)) actual_snap = dict(snaps[0]) get_sg_snap_source = actual_snap.pop('share_group', {}) self.assertDictEqual( dict(sg_snap_source_group), dict(get_sg_snap_source)) self.assertEqual(expected_sg_snap, actual_snap) self.assertEqual(fake_project, actual_snap['project_id']) def test_share_group_snapshot_update(self): fake_name = "my_fake_name" sg = db_utils.create_share_group() expected_sg_snap = db_utils.create_share_group_snapshot(sg['id']) expected_sg_snap['name'] = fake_name db_api.share_group_snapshot_update( self.ctxt, expected_sg_snap['id'], {'name': fake_name}) sg_snap = db_api.share_group_snapshot_get( self.ctxt, expected_sg_snap['id']) self.assertEqual(fake_name, sg_snap['name']) def test_share_group_snapshot_destroy(self): sg = db_utils.create_share_group() sg_snap = db_utils.create_share_group_snapshot(sg['id']) db_api.share_group_snapshot_get(self.ctxt, sg_snap['id']) db_api.share_group_snapshot_destroy(self.ctxt, sg_snap['id']) self.assertRaises( exception.NotFound, db_api.share_group_snapshot_get, self.ctxt, sg_snap['id']) def test_share_group_snapshot_members_get_all(self): sg = db_utils.create_share_group() share = db_utils.create_share(share_group_id=sg['id']) si = db_utils.create_share_instance(share_id=share['id']) sg_snap = db_utils.create_share_group_snapshot(sg['id']) expected_member = dict(db_utils.create_share_group_snapshot_member( sg_snap['id'], share_instance_id=si['id'])) sg_snap_source_member = expected_member.pop( 'share_group_snapshot', {}) sg_snap_source_member = expected_member.pop('share_instance', {}) members = db_api.share_group_snapshot_members_get_all( self.ctxt, sg_snap['id']) self.assertEqual(1, len(members)) member = dict(members[0]) get_sg_snap_source_member = member.pop( 'share_group_snapshot', {}) get_sg_snap_source_member = member.pop('share_instance', {}) self.assertDictEqual(dict( sg_snap_source_member), dict(get_sg_snap_source_member)) self.assertDictEqual(expected_member, member) def test_count_share_group_snapshot_members_in_share(self): sg = db_utils.create_share_group() share = db_utils.create_share(share_group_id=sg['id']) si = db_utils.create_share_instance(share_id=share['id']) share2 = db_utils.create_share(share_group_id=sg['id']) si2 = db_utils.create_share_instance(share_id=share2['id']) sg_snap = db_utils.create_share_group_snapshot(sg['id']) db_utils.create_share_group_snapshot_member( sg_snap['id'], share_instance_id=si['id']) db_utils.create_share_group_snapshot_member( sg_snap['id'], share_instance_id=si2['id']) count = db_api.count_share_group_snapshot_members_in_share( self.ctxt, share['id']) self.assertEqual(1, count) def test_share_group_snapshot_members_get(self): sg = db_utils.create_share_group() share = db_utils.create_share(share_group_id=sg['id']) si = db_utils.create_share_instance(share_id=share['id']) sg_snap = db_utils.create_share_group_snapshot(sg['id']) expected_member = dict(db_utils.create_share_group_snapshot_member( sg_snap['id'], share_instance_id=si['id'])) sg_snap_source_member = expected_member.pop('share_group_snapshot', {}) sg_snap_source_member = expected_member.pop('share_instance', {}) member = dict(db_api.share_group_snapshot_member_get( self.ctxt, expected_member['id'])) get_sg_snap_source_member = member.pop('share_group_snapshot', {}) get_sg_snap_source_member = member.pop('share_instance', {}) self.assertDictEqual(dict( sg_snap_source_member), dict(get_sg_snap_source_member)) self.assertDictEqual(expected_member, member) def test_share_group_snapshot_members_get_not_found(self): self.assertRaises( exception.ShareGroupSnapshotMemberNotFound, db_api.share_group_snapshot_member_get, self.ctxt, 'fake_id') def test_share_group_snapshot_member_update(self): sg = db_utils.create_share_group() share = db_utils.create_share(share_group_id=sg['id']) si = db_utils.create_share_instance(share_id=share['id']) sg_snap = db_utils.create_share_group_snapshot(sg['id']) expected_member = db_utils.create_share_group_snapshot_member( sg_snap['id'], share_instance_id=si['id']) db_api.share_group_snapshot_member_update( self.ctxt, expected_member['id'], {'status': constants.STATUS_AVAILABLE}) member = db_api.share_group_snapshot_member_get( self.ctxt, expected_member['id']) self.assertEqual(constants.STATUS_AVAILABLE, member['status']) @ddt.ddt class ShareGroupTypeAPITestCase(test.TestCase): def setUp(self): super(ShareGroupTypeAPITestCase, self).setUp() self.ctxt = context.RequestContext( user_id='user_id', project_id='project_id', is_admin=True) @ddt.data(True, False) def test_share_type_destroy_in_use(self, used_by_groups): share_type_1 = db_utils.create_share_type(name='fike') share_type_2 = db_utils.create_share_type(name='bowman') share_group_type_1 = db_utils.create_share_group_type( name='orange', is_public=False, share_types=[share_type_1['id']], group_specs={'dabo': 'allin', 'cadence': 'count'}, override_defaults=True) db_api.share_group_type_access_add(self.ctxt, share_group_type_1['id'], "2018ndaetfigovnsaslcahfavmrpions") db_api.share_group_type_access_add(self.ctxt, share_group_type_1['id'], "2016ndaetfigovnsaslcahfavmrpions") share_group_type_2 = db_utils.create_share_group_type( name='regalia', share_types=[share_type_2['id']]) if used_by_groups: share_group_1 = db_utils.create_share_group( share_group_type_id=share_group_type_1['id'], share_types=[share_type_1['id']]) share_group_2 = db_utils.create_share_group( share_group_type_id=share_group_type_2['id'], share_types=[share_type_2['id']]) self.assertRaises(exception.ShareGroupTypeInUse, db_api.share_group_type_destroy, self.ctxt, share_group_type_1['id']) self.assertRaises(exception.ShareGroupTypeInUse, db_api.share_group_type_destroy, self.ctxt, share_group_type_2['id']) # Cleanup share groups db_api.share_group_destroy(self.ctxt, share_group_1['id']) db_api.share_group_destroy(self.ctxt, share_group_2['id']) # Let's cleanup share_group_type_1 and verify it is gone self.assertIsNone(db_api.share_group_type_destroy( self.ctxt, share_group_type_1['id'])) self.assertDictEqual( {}, db_api.share_group_type_specs_get( self.ctxt, share_group_type_1['id'])) self.assertRaises(exception.ShareGroupTypeNotFound, db_api.share_group_type_access_get_all, self.ctxt, share_group_type_1['id']) self.assertRaises(exception.ShareGroupTypeNotFound, db_api.share_group_type_get, self.ctxt, share_group_type_1['id']) # share_group_type_2 must still be around self.assertEqual(share_group_type_2['id'], db_api.share_group_type_get( self.ctxt, share_group_type_2['id'])['id']) @ddt.ddt class ShareSnapshotDatabaseAPITestCase(test.TestCase): def setUp(self): """Run before each test.""" super(ShareSnapshotDatabaseAPITestCase, self).setUp() self.ctxt = context.get_admin_context() self.share_instances = [ db_utils.create_share_instance( status=constants.STATUS_REPLICATION_CHANGE, share_id='fake_share_id_1'), db_utils.create_share_instance( status=constants.STATUS_AVAILABLE, share_id='fake_share_id_1'), db_utils.create_share_instance( status=constants.STATUS_ERROR_DELETING, share_id='fake_share_id_2'), db_utils.create_share_instance( status=constants.STATUS_MANAGING, share_id='fake_share_id_2'), ] self.share_1 = db_utils.create_share( id='fake_share_id_1', instances=self.share_instances[0:2]) self.share_2 = db_utils.create_share( id='fake_share_id_2', instances=self.share_instances[2:-1]) self.snapshot_instances = [ db_utils.create_snapshot_instance( 'fake_snapshot_id_1', status=constants.STATUS_CREATING, share_instance_id=self.share_instances[0]['id']), db_utils.create_snapshot_instance( 'fake_snapshot_id_1', status=constants.STATUS_ERROR, share_instance_id=self.share_instances[1]['id']), db_utils.create_snapshot_instance( 'fake_snapshot_id_1', status=constants.STATUS_DELETING, share_instance_id=self.share_instances[2]['id']), db_utils.create_snapshot_instance( 'fake_snapshot_id_2', status=constants.STATUS_AVAILABLE, id='fake_snapshot_instance_id', provider_location='hogsmeade:snapshot1', progress='87%', share_instance_id=self.share_instances[3]['id']), ] self.snapshot_1 = db_utils.create_snapshot( id='fake_snapshot_id_1', share_id=self.share_1['id'], instances=self.snapshot_instances[0:3]) self.snapshot_2 = db_utils.create_snapshot( id='fake_snapshot_id_2', share_id=self.share_2['id'], instances=self.snapshot_instances[3:4], metadata={'foo': 'bar'}) self.snapshot_instance_export_locations = [ db_utils.create_snapshot_instance_export_locations( self.snapshot_instances[0].id, path='1.1.1.1:/fake_path', is_admin_only=True), db_utils.create_snapshot_instance_export_locations( self.snapshot_instances[1].id, path='2.2.2.2:/fake_path', is_admin_only=True), db_utils.create_snapshot_instance_export_locations( self.snapshot_instances[2].id, path='3.3.3.3:/fake_path', is_admin_only=True), db_utils.create_snapshot_instance_export_locations( self.snapshot_instances[3].id, path='4.4.4.4:/fake_path', is_admin_only=True) ] def test_create(self): share = db_utils.create_share(size=1) values = { 'share_id': share['id'], 'size': share['size'], 'user_id': share['user_id'], 'project_id': share['project_id'], 'status': constants.STATUS_CREATING, 'progress': '0%', 'share_size': share['size'], 'display_name': 'fake', 'display_description': 'fake', 'share_proto': share['share_proto'] } actual_result = db_api.share_snapshot_create( self.ctxt, values, create_snapshot_instance=True) self.assertEqual(1, len(actual_result.instances)) self.assertSubDictMatch(values, actual_result.to_dict()) @ddt.data( ({'with_count': True}, 3, 3), ({'with_count': True, 'limit': 2}, 3, 2) ) @ddt.unpack def test_share_snapshot_get_all_with_count(self, filters, amount_of_share_snapshots, expected_share_snapshots_len): share = db_utils.create_share(size=1) values = { 'share_id': share['id'], 'size': share['size'], 'user_id': share['user_id'], 'project_id': share['project_id'], 'status': constants.STATUS_CREATING, 'progress': '0%', 'share_size': share['size'], 'display_description': 'fake_count_test', 'share_proto': share['share_proto'], } # consider only shares created in this function filters.update({'share_id': share['id']}) for i in range(amount_of_share_snapshots): tmp_values = copy.deepcopy(values) tmp_values['display_name'] = 'fake_name_%s' % str(i) db_api.share_snapshot_create(self.ctxt, tmp_values) limit = filters.get('limit') count, share_snapshots = db_api.share_snapshot_get_all_with_count( self.ctxt, filters=filters, limit=limit) self.assertEqual(count, amount_of_share_snapshots) self.assertEqual(expected_share_snapshots_len, len(share_snapshots)) def test_share_snapshot_get_all_with_filters_some(self): expected_status = constants.STATUS_AVAILABLE filters = { 'status': expected_status, 'metadata': {'foo': 'bar'} } snapshots = db_api.share_snapshot_get_all(self.ctxt, filters=filters) self.assertEqual(1, len(snapshots)) for snapshot in snapshots: s = snapshot.get('share_snapshot_metadata') for k, v in filters['metadata'].items(): filter_meta_key = k filter_meta_val = v self.assertEqual('fake_snapshot_id_2', snapshot['id']) self.assertEqual(snapshot['status'], filters['status']) self.assertEqual(s[0]['key'], filter_meta_key) self.assertEqual(s[0]['value'], filter_meta_val) def test_share_snapshot_get_latest_for_share(self): share = db_utils.create_share(size=1) values = { 'share_id': share['id'], 'size': share['size'], 'user_id': share['user_id'], 'project_id': share['project_id'], 'status': constants.STATUS_CREATING, 'progress': '0%', 'share_size': share['size'], 'display_description': 'fake', 'share_proto': share['share_proto'], } values1 = copy.deepcopy(values) values1['display_name'] = 'snap1' db_api.share_snapshot_create(self.ctxt, values1) values2 = copy.deepcopy(values) values2['display_name'] = 'snap2' db_api.share_snapshot_create(self.ctxt, values2) values3 = copy.deepcopy(values) values3['display_name'] = 'snap3' db_api.share_snapshot_create(self.ctxt, values3) result = db_api.share_snapshot_get_latest_for_share(self.ctxt, share['id']) self.assertSubDictMatch(values3, result.to_dict()) def test_get_instance(self): snapshot = db_utils.create_snapshot(with_share=True) instance = db_api.share_snapshot_instance_get( self.ctxt, snapshot.instance['id'], with_share_data=True) instance_dict = instance.to_dict() self.assertTrue(hasattr(instance, 'name')) self.assertTrue(hasattr(instance, 'share_name')) self.assertTrue(hasattr(instance, 'share_id')) self.assertIn('name', instance_dict) self.assertIn('share_name', instance_dict) @ddt.data(None, constants.STATUS_ERROR) def test_share_snapshot_instance_get_all_with_filters_some(self, status): expected_status = status or (constants.STATUS_CREATING, constants.STATUS_DELETING) expected_number = 1 if status else 2 filters = { 'snapshot_ids': 'fake_snapshot_id_1', 'statuses': expected_status } instances = db_api.share_snapshot_instance_get_all_with_filters( self.ctxt, filters) for instance in instances: self.assertEqual('fake_snapshot_id_1', instance['snapshot_id']) self.assertIn(instance['status'], filters['statuses']) self.assertEqual(expected_number, len(instances)) def test_share_snapshot_instance_get_all_with_filters_all_filters(self): filters = { 'snapshot_ids': 'fake_snapshot_id_2', 'instance_ids': 'fake_snapshot_instance_id', 'statuses': constants.STATUS_AVAILABLE, 'share_instance_ids': self.share_instances[3]['id'], } instances = db_api.share_snapshot_instance_get_all_with_filters( self.ctxt, filters, with_share_data=True) self.assertEqual(1, len(instances)) self.assertEqual('fake_snapshot_instance_id', instances[0]['id']) self.assertEqual( self.share_2['id'], instances[0]['share_instance']['share_id']) def test_share_snapshot_instance_get_all_with_filters_wrong_filters(self): filters = { 'some_key': 'some_value', 'some_other_key': 'some_other_value', } instances = db_api.share_snapshot_instance_get_all_with_filters( self.ctxt, filters) self.assertEqual(4, len(instances)) def test_share_snapshot_instance_create(self): snapshot = db_utils.create_snapshot(with_share=True) share = snapshot['share'] share_instance = db_utils.create_share_instance(share_id=share['id']) values = { 'snapshot_id': snapshot['id'], 'share_instance_id': share_instance['id'], 'status': constants.STATUS_MANAGING, 'progress': '88%', 'provider_location': 'whomping_willow', } actual_result = db_api.share_snapshot_instance_create( self.ctxt, snapshot['id'], values) snapshot = db_api.share_snapshot_get(self.ctxt, snapshot['id']) self.assertSubDictMatch(values, actual_result.to_dict()) self.assertEqual(2, len(snapshot['instances'])) def test_share_snapshot_instance_update(self): snapshot = db_utils.create_snapshot(with_share=True) values = { 'snapshot_id': snapshot['id'], 'status': constants.STATUS_ERROR, 'progress': '18%', 'provider_location': 'godrics_hollow', } actual_result = db_api.share_snapshot_instance_update( self.ctxt, snapshot['instance']['id'], values) self.assertSubDictMatch(values, actual_result.to_dict()) @ddt.data(2, 1) def test_share_snapshot_instance_delete(self, instances): snapshot = db_utils.create_snapshot(with_share=True) first_instance_id = snapshot['instance']['id'] if instances > 1: instance = db_utils.create_snapshot_instance( snapshot['id'], share_instance_id=snapshot['share']['instance']['id']) else: instance = snapshot['instance'] retval = db_api.share_snapshot_instance_delete( self.ctxt, instance['id']) self.assertIsNone(retval) if instances == 1: self.assertRaises(exception.ShareSnapshotNotFound, db_api.share_snapshot_get, self.ctxt, snapshot['id']) else: snapshot = db_api.share_snapshot_get(self.ctxt, snapshot['id']) self.assertEqual(1, len(snapshot['instances'])) self.assertEqual(first_instance_id, snapshot['instance']['id']) def test_share_snapshot_access_create(self): values = { 'share_snapshot_id': self.snapshot_1['id'], } actual_result = db_api.share_snapshot_access_create(self.ctxt, values) self.assertSubDictMatch(values, actual_result.to_dict()) def test_share_snapshot_instance_access_get_all(self): access = db_utils.create_snapshot_access( share_snapshot_id=self.snapshot_1['id']) # NOTE(zzzeek) the create_snapshot_access routine iterates through the # ShareSnapshot.instances collection and creates a new # ShareSnapshotInstanceAccessMapping for each ShareSnapshotInstance. # however, this collection is unordered and does not have any guarantee # that its ordering would match that of our self.snapshot_instances # collection. Therefore key the fixture values and the resulting # ShareSnapshotInstanceAccessMapping objects by share_snapshot_id # and compare individually. values_by_sid = { snapshot_instance.id: { 'share_snapshot_instance_id': snapshot_instance.id, 'access_id': access['id'] } for snapshot_instance in self.snapshot_instances if "fake" not in snapshot_instance.id } rules_by_sid = { rule.share_snapshot_instance_id: rule for rule in db_api.share_snapshot_instance_access_get_all( self.ctxt, access['id'] ) } for sid in values_by_sid: self.assertSubDictMatch( values_by_sid[sid], rules_by_sid[sid].to_dict() ) def test_share_snapshot_access_get(self): access = db_utils.create_snapshot_access( share_snapshot_id=self.snapshot_1['id']) values = {'share_snapshot_id': self.snapshot_1['id']} actual_value = db_api.share_snapshot_access_get( self.ctxt, access['id']) self.assertSubDictMatch(values, actual_value.to_dict()) def test_share_snapshot_access_get_all_for_share_snapshot(self): access = db_utils.create_snapshot_access( share_snapshot_id=self.snapshot_1['id']) values = {'access_type': access['access_type'], 'access_to': access['access_to'], 'share_snapshot_id': self.snapshot_1['id']} actual_value = db_api.share_snapshot_access_get_all_for_share_snapshot( self.ctxt, self.snapshot_1['id'], {}) self.assertSubDictMatch(values, actual_value[0].to_dict()) @ddt.data({'existing': {'access_type': 'cephx', 'access_to': 'alice'}, 'new': {'access_type': 'user', 'access_to': 'alice'}, 'result': False}, {'existing': {'access_type': 'user', 'access_to': 'bob'}, 'new': {'access_type': 'user', 'access_to': 'bob'}, 'result': True}, {'existing': {'access_type': 'ip', 'access_to': '10.0.0.10/32'}, 'new': {'access_type': 'ip', 'access_to': '10.0.0.10'}, 'result': True}, {'existing': {'access_type': 'ip', 'access_to': '10.10.0.11'}, 'new': {'access_type': 'ip', 'access_to': '10.10.0.11'}, 'result': True}, {'existing': {'access_type': 'ip', 'access_to': 'fd21::11'}, 'new': {'access_type': 'ip', 'access_to': 'fd21::11'}, 'result': True}, {'existing': {'access_type': 'ip', 'access_to': 'fd21::10'}, 'new': {'access_type': 'ip', 'access_to': 'fd21::10/128'}, 'result': True}, {'existing': {'access_type': 'ip', 'access_to': '10.10.0.0/22'}, 'new': {'access_type': 'ip', 'access_to': '10.10.0.0/24'}, 'result': False}, {'existing': {'access_type': 'ip', 'access_to': '2620:52::/48'}, 'new': {'access_type': 'ip', 'access_to': '2620:52:0:13b8::/64'}, 'result': False}) @ddt.unpack def test_share_snapshot_check_for_existing_access(self, existing, new, result): db_utils.create_snapshot_access( share_snapshot_id=self.snapshot_1['id'], access_type=existing['access_type'], access_to=existing['access_to']) rule_exists = db_api.share_snapshot_check_for_existing_access( self.ctxt, self.snapshot_1['id'], new['access_type'], new['access_to']) self.assertEqual(result, rule_exists) def test_share_snapshot_access_get_all_for_snapshot_instance(self): access = db_utils.create_snapshot_access( share_snapshot_id=self.snapshot_1['id']) values = {'access_type': access['access_type'], 'access_to': access['access_to'], 'share_snapshot_id': self.snapshot_1['id']} out = db_api.share_snapshot_access_get_all_for_snapshot_instance( self.ctxt, self.snapshot_instances[0].id) self.assertSubDictMatch(values, out[0].to_dict()) def test_share_snapshot_instance_access_update_state(self): access = db_utils.create_snapshot_access( share_snapshot_id=self.snapshot_1['id']) values = {'state': constants.STATUS_ACTIVE, 'access_id': access['id'], 'share_snapshot_instance_id': self.snapshot_instances[0].id} actual_result = db_api.share_snapshot_instance_access_update( self.ctxt, access['id'], self.snapshot_1.instance['id'], {'state': constants.STATUS_ACTIVE}) self.assertSubDictMatch(values, actual_result.to_dict()) self.assertIsNotNone(actual_result['updated_at']) time_now = timeutils.utcnow() self.assertTrue(actual_result['updated_at'] < time_now) def test_share_snapshot_instance_access_get(self): access = db_utils.create_snapshot_access( share_snapshot_id=self.snapshot_1['id']) values = {'access_id': access['id'], 'share_snapshot_instance_id': self.snapshot_instances[0].id} actual_result = db_api.share_snapshot_instance_access_get( self.ctxt, access['id'], self.snapshot_instances[0].id) self.assertSubDictMatch(values, actual_result.to_dict()) def test_share_snapshot_instance_access_delete(self): access = db_utils.create_snapshot_access( share_snapshot_id=self.snapshot_1['id']) db_api.share_snapshot_instance_access_delete( self.ctxt, access['id'], self.snapshot_1.instance['id']) def test_share_snapshot_instance_export_location_create(self): values = { 'share_snapshot_instance_id': self.snapshot_instances[0].id, } actual_result = db_api.share_snapshot_instance_export_location_create( self.ctxt, values) self.assertSubDictMatch(values, actual_result.to_dict()) def test_share_snapshot_export_locations_get(self): out = db_api.share_snapshot_export_locations_get( self.ctxt, self.snapshot_1['id']) keys = ['share_snapshot_instance_id', 'path', 'is_admin_only'] for expected, actual in zip(self.snapshot_instance_export_locations, out): [self.assertEqual(expected[k], actual[k]) for k in keys] def test_share_snapshot_instance_export_locations_get(self): out = db_api.share_snapshot_instance_export_locations_get_all( self.ctxt, self.snapshot_instances[0].id) keys = ['share_snapshot_instance_id', 'path', 'is_admin_only'] for key in keys: self.assertEqual(self.snapshot_instance_export_locations[0][key], out[0][key]) def test_share_snapshot_instance_export_locations_update(self): snapshot = db_utils.create_snapshot(with_share=True) initial_locations = ['fake1/1/', 'fake2/2', 'fake3/3'] update_locations = ['fake4/4', 'fake2/2', 'fake3/3'] # add initial locations db_api.share_snapshot_instance_export_locations_update( self.ctxt, snapshot.instance['id'], initial_locations, False) # update locations db_api.share_snapshot_instance_export_locations_update( self.ctxt, snapshot.instance['id'], update_locations, True) get_result = db_api.share_snapshot_instance_export_locations_get_all( self.ctxt, snapshot.instance['id']) result_locations = [el['path'] for el in get_result] self.assertEqual(sorted(result_locations), sorted(update_locations)) def test_share_snapshot_instance_export_locations_update_wrong_type(self): snapshot = db_utils.create_snapshot(with_share=True) new_export_locations = [1] self.assertRaises( exception.ManilaException, db_api.share_snapshot_instance_export_locations_update, self.ctxt, snapshot.instance['id'], new_export_locations, False) def test_share_snapshot_metadata_get(self): metadata = {'a': 'b', 'c': 'd'} self.share_1 = db_utils.create_share(size=1) self.snapshot_1 = db_utils.create_snapshot( share_id=self.share_1['id']) db_api.share_snapshot_metadata_update( self.ctxt, share_snapshot_id=self.snapshot_1['id'], metadata=metadata, delete=False) self.assertEqual( metadata, db_api.share_snapshot_metadata_get( self.ctxt, share_snapshot_id=self.snapshot_1['id'])) def test_share_snapshot_metadata_get_item(self): metadata = {'a': 'b', 'c': 'd'} key = 'a' shouldbe = {'a': 'b'} self.share_1 = db_utils.create_share(size=1) self.snapshot_1 = db_utils.create_snapshot( share_id=self.share_1['id']) db_api.share_snapshot_metadata_update( self.ctxt, share_snapshot_id=self.snapshot_1['id'], metadata=metadata, delete=False) self.assertEqual( shouldbe, db_api.share_snapshot_metadata_get_item( self.ctxt, share_snapshot_id=self.snapshot_1['id'], key=key)) def test_share_snapshot_metadata_update(self): metadata1 = {'a': '1', 'c': '2'} metadata2 = {'a': '3', 'd': '5'} should_be = {'a': '3', 'c': '2', 'd': '5'} self.share_1 = db_utils.create_share(size=1) self.snapshot_1 = db_utils.create_snapshot( share_id=self.share_1['id']) db_api.share_snapshot_metadata_update( self.ctxt, share_snapshot_id=self.snapshot_1['id'], metadata=metadata1, delete=False) db_api.share_snapshot_metadata_update( self.ctxt, share_snapshot_id=self.snapshot_1['id'], metadata=metadata2, delete=False) self.assertEqual( should_be, db_api.share_snapshot_metadata_get( self.ctxt, share_snapshot_id=self.snapshot_1['id'])) def test_share_snapshot_metadata_delete(self): key = 'a' metadata = {'a': '1', 'c': '2'} should_be = {'c': '2'} self.share_1 = db_utils.create_share(size=1) self.snapshot_1 = db_utils.create_snapshot( share_id=self.share_1['id']) db_api.share_snapshot_metadata_update( self.ctxt, share_snapshot_id=self.snapshot_1['id'], metadata=metadata, delete=False) db_api.share_snapshot_metadata_delete( self.ctxt, share_snapshot_id=self.snapshot_1['id'], key=key) self.assertEqual( should_be, db_api.share_snapshot_metadata_get( self.ctxt, share_snapshot_id=self.snapshot_1['id'])) class ShareExportLocationsDatabaseAPITestCase(test.TestCase): def setUp(self): super(ShareExportLocationsDatabaseAPITestCase, self).setUp() self.ctxt = context.get_admin_context() def test_update_valid_order(self): share = db_utils.create_share() initial_locations = ['fake1/1/', 'fake2/2', 'fake3/3'] update_locations = ['fake4/4', 'fake2/2', 'fake3/3'] # add initial locations db_api.export_locations_update( self.ctxt, share.instance['id'], initial_locations, False) # update locations db_api.export_locations_update( self.ctxt, share.instance['id'], update_locations, True) actual_result = db_api.export_location_get_all( self.ctxt, share['id']) # actual result should contain locations in exact same order self.assertEqual(actual_result, update_locations) def test_update_export_locations_with_metadata(self): share = db_utils.create_share() original_export_locations = [ { 'path': 'fake1/1/', 'is_admin_only': True, 'metadata': { 'foo': 'bar', 'preferred': '1' }, }, { 'path': 'fake2/1/', 'is_admin_only': True, 'metadata': { 'clem': 'son', 'preferred': '0' }, }, ] # add initial locations db_api.export_locations_update( self.ctxt, share.instance['id'], original_export_locations, False) updated_export_locations = [ { 'path': 'fake1/1/', 'is_admin_only': True, 'metadata': { 'foo': 'quz', 'preferred': '0' }, }, { 'path': 'fake2/1/', 'is_admin_only': True, 'metadata': { 'clem': 'son', 'preferred': '1', }, }, ] # update locations db_api.export_locations_update( self.ctxt, share.instance['id'], updated_export_locations, True) actual_result = db_api.export_location_get_all_by_share_id( self.ctxt, share['id']) actual_export_locations = [ { 'path': el['path'], 'is_admin_only': el['is_admin_only'], 'metadata': el['el_metadata'], } for el in actual_result ] self.assertEqual(updated_export_locations, actual_export_locations) def test_update_string(self): share = db_utils.create_share() initial_location = 'fake1/1/' db_api.export_locations_update( self.ctxt, share.instance['id'], initial_location, False) actual_result = db_api.export_location_get_all( self.ctxt, share['id']) self.assertEqual(actual_result, [initial_location]) def test_get_admin_export_locations(self): ctxt_user = context.RequestContext( user_id='fake user', project_id='fake project', is_admin=False) share = db_utils.create_share() locations = [ {'path': 'fake1/1/', 'is_admin_only': True}, {'path': 'fake2/2/', 'is_admin_only': True}, {'path': 'fake3/3/', 'is_admin_only': True}, ] db_api.export_locations_update( self.ctxt, share.instance['id'], locations, delete=False) user_result = db_api.export_location_get_all(ctxt_user, share['id']) self.assertEqual([], user_result) admin_result = db_api.export_location_get_all( self.ctxt, share['id']) self.assertEqual(3, len(admin_result)) for location in locations: self.assertIn(location['path'], admin_result) def test_get_user_export_locations(self): ctxt_user = context.RequestContext( user_id='fake user', project_id='fake project', is_admin=False) share = db_utils.create_share() locations = [ {'path': 'fake1/1/', 'is_admin_only': False}, {'path': 'fake2/2/', 'is_admin_only': False}, {'path': 'fake3/3/', 'is_admin_only': False}, ] db_api.export_locations_update( self.ctxt, share.instance['id'], locations, delete=False) user_result = db_api.export_location_get_all(ctxt_user, share['id']) self.assertEqual(3, len(user_result)) for location in locations: self.assertIn(location['path'], user_result) admin_result = db_api.export_location_get_all( self.ctxt, share['id']) self.assertEqual(3, len(admin_result)) for location in locations: self.assertIn(location['path'], admin_result) def test_get_user_export_locations_old_view(self): ctxt_user = context.RequestContext( user_id='fake user', project_id='fake project', is_admin=False) share = db_utils.create_share() locations = ['fake1/1/', 'fake2/2', 'fake3/3'] db_api.export_locations_update( self.ctxt, share.instance['id'], locations, delete=False) user_result = db_api.export_location_get_all(ctxt_user, share['id']) self.assertEqual(locations, user_result) admin_result = db_api.export_location_get_all( self.ctxt, share['id']) self.assertEqual(locations, admin_result) @ddt.ddt class ShareInstanceExportLocationsMetadataDatabaseAPITestCase(test.TestCase): def setUp(self): clname = ShareInstanceExportLocationsMetadataDatabaseAPITestCase super(clname, self).setUp() self.ctxt = context.get_admin_context() share_id = 'fake_share_id' instances = [ db_utils.create_share_instance( share_id=share_id, status=constants.STATUS_AVAILABLE), db_utils.create_share_instance( share_id=share_id, status=constants.STATUS_MIGRATING), db_utils.create_share_instance( share_id=share_id, status=constants.STATUS_MIGRATING_TO), ] self.share = db_utils.create_share( id=share_id, instances=instances) self.initial_locations = ['/fake/foo/', '/fake/bar', '/fake/quuz'] self.shown_locations = ['/fake/foo/', '/fake/bar'] for i in range(0, 3): db_api.export_locations_update( self.ctxt, instances[i]['id'], self.initial_locations[i], delete=False) def _get_export_location_uuid_by_path(self, path): els = db_api.export_location_get_all_by_share_id( self.ctxt, self.share.id) export_location_uuid = None for el in els: if el.path == path: export_location_uuid = el.uuid self.assertIsNotNone(export_location_uuid) return export_location_uuid def test_get_export_locations_by_share_id(self): els = db_api.export_location_get_all_by_share_id( self.ctxt, self.share.id) self.assertEqual(3, len(els)) for path in self.shown_locations: self.assertTrue(any([path in el.path for el in els])) def test_get_export_locations_by_share_id_ignore_migration_dest(self): els = db_api.export_location_get_all_by_share_id( self.ctxt, self.share.id, ignore_migration_destination=True) self.assertEqual(2, len(els)) for path in self.shown_locations: self.assertTrue(any([path in el.path for el in els])) def test_get_export_locations_by_share_instance_id(self): els = db_api.export_location_get_all_by_share_instance_id( self.ctxt, self.share.instance.id) self.assertEqual(1, len(els)) for path in [self.shown_locations[1]]: self.assertTrue(any([path in el.path for el in els])) def test_export_location_metadata_update_delete(self): export_location_uuid = self._get_export_location_uuid_by_path( self.initial_locations[0]) metadata = { 'foo_key': 'foo_value', 'bar_key': 'bar_value', 'quuz_key': 'quuz_value', } db_api.export_location_metadata_update( self.ctxt, export_location_uuid, metadata, False) db_api.export_location_metadata_delete( self.ctxt, export_location_uuid, list(metadata.keys())[0:-1]) result = db_api.export_location_metadata_get( self.ctxt, export_location_uuid) key = list(metadata.keys())[-1] self.assertEqual({key: metadata[key]}, result) db_api.export_location_metadata_delete( self.ctxt, export_location_uuid) result = db_api.export_location_metadata_get( self.ctxt, export_location_uuid) self.assertEqual({}, result) def test_export_location_metadata_update_get(self): # Write metadata for target export location export_location_uuid = self._get_export_location_uuid_by_path( self.initial_locations[0]) metadata = {'foo_key': 'foo_value', 'bar_key': 'bar_value'} db_api.export_location_metadata_update( self.ctxt, export_location_uuid, metadata, False) # Write metadata for some concurrent export location other_export_location_uuid = self._get_export_location_uuid_by_path( self.initial_locations[1]) other_metadata = {'key_from_other_el': 'value_of_key_from_other_el'} db_api.export_location_metadata_update( self.ctxt, other_export_location_uuid, other_metadata, False) result = db_api.export_location_metadata_get( self.ctxt, export_location_uuid) self.assertEqual(metadata, result) updated_metadata = { 'foo_key': metadata['foo_key'], 'quuz_key': 'quuz_value', } db_api.export_location_metadata_update( self.ctxt, export_location_uuid, updated_metadata, True) result = db_api.export_location_metadata_get( self.ctxt, export_location_uuid) self.assertEqual(updated_metadata, result) def test_export_location_metadata_get_item(self): export_location_uuid = self._get_export_location_uuid_by_path( self.initial_locations[0]) metadata = {'foo_key': 'foo_value', 'bar_key': 'bar_value'} db_api.export_location_metadata_update( self.ctxt, export_location_uuid, metadata, False) result = db_api.export_location_metadata_get_item( self.ctxt, export_location_uuid, 'foo_key') self.assertEqual( {'foo_key': 'foo_value'}, result) def test_export_location_metadata_get_item_invalid(self): export_location_uuid = self._get_export_location_uuid_by_path( self.initial_locations[0]) metadata = {'foo_key': 'foo_value', 'bar_key': 'bar_value'} db_api.export_location_metadata_update( self.ctxt, export_location_uuid, metadata, False) self.assertRaises(exception.MetadataItemNotFound, db_api.export_location_metadata_get_item, self.ctxt, export_location_uuid, 'foo') @ddt.data( ("k", "v"), ("k" * 256, "v"), ("k", "v" * 1024), ("k" * 256, "v" * 1024), ) @ddt.unpack def test_set_metadata_with_different_length(self, key, value): export_location_uuid = self._get_export_location_uuid_by_path( self.initial_locations[1]) metadata = {key: value} db_api.export_location_metadata_update( self.ctxt, export_location_uuid, metadata, False) result = db_api.export_location_metadata_get( self.ctxt, export_location_uuid) self.assertEqual(metadata, result) @ddt.ddt class DriverPrivateDataDatabaseAPITestCase(test.TestCase): def setUp(self): """Run before each test.""" super(DriverPrivateDataDatabaseAPITestCase, self).setUp() self.ctxt = context.get_admin_context() def _get_driver_test_data(self): return uuidutils.generate_uuid() @ddt.data({"details": {"foo": "bar", "tee": "too"}, "valid": {"foo": "bar", "tee": "too"}}, {"details": {"foo": "bar", "tee": ["test"]}, "valid": {"foo": "bar", "tee": str(["test"])}}) @ddt.unpack def test_update(self, details, valid): test_id = self._get_driver_test_data() initial_data = db_api.driver_private_data_get(self.ctxt, test_id) db_api.driver_private_data_update(self.ctxt, test_id, details) actual_data = db_api.driver_private_data_get(self.ctxt, test_id) self.assertEqual({}, initial_data) self.assertEqual(valid, actual_data) @ddt.data({'with_deleted': True, 'append': False}, {'with_deleted': True, 'append': True}, {'with_deleted': False, 'append': False}, {'with_deleted': False, 'append': True}) @ddt.unpack def test_update_with_more_values(self, with_deleted, append): test_id = self._get_driver_test_data() details = {"tee": "too"} more_details = {"foo": "bar"} result = {"tee": "too", "foo": "bar"} db_api.driver_private_data_update(self.ctxt, test_id, details) if with_deleted: db_api.driver_private_data_delete(self.ctxt, test_id) if append: more_details.update(details) if with_deleted and not append: result.pop("tee") db_api.driver_private_data_update(self.ctxt, test_id, more_details) actual_result = db_api.driver_private_data_get(self.ctxt, test_id) self.assertEqual(result, actual_result) @ddt.data(True, False) def test_update_with_duplicate(self, with_deleted): test_id = self._get_driver_test_data() details = {"tee": "too"} db_api.driver_private_data_update(self.ctxt, test_id, details) if with_deleted: db_api.driver_private_data_delete(self.ctxt, test_id) db_api.driver_private_data_update(self.ctxt, test_id, details) actual_result = db_api.driver_private_data_get(self.ctxt, test_id) self.assertEqual(details, actual_result) def test_update_with_delete_existing(self): test_id = self._get_driver_test_data() details = {"key1": "val1", "key2": "val2", "key3": "val3"} details_update = {"key1": "val1_upd", "key4": "new_val"} # Create new details db_api.driver_private_data_update(self.ctxt, test_id, details) db_api.driver_private_data_update(self.ctxt, test_id, details_update, delete_existing=True) actual_result = db_api.driver_private_data_get( self.ctxt, test_id) self.assertEqual(details_update, actual_result) def test_get(self): test_id = self._get_driver_test_data() test_key = "foo" test_keys = [test_key, "tee"] details = {test_keys[0]: "val", test_keys[1]: "val", "mee": "foo"} db_api.driver_private_data_update(self.ctxt, test_id, details) actual_result_all = db_api.driver_private_data_get( self.ctxt, test_id) actual_result_single_key = db_api.driver_private_data_get( self.ctxt, test_id, test_key) actual_result_list = db_api.driver_private_data_get( self.ctxt, test_id, test_keys) self.assertEqual(details, actual_result_all) self.assertEqual(details[test_key], actual_result_single_key) self.assertEqual(dict.fromkeys(test_keys, "val"), actual_result_list) def test_delete_single(self): test_id = self._get_driver_test_data() test_key = "foo" details = {test_key: "bar", "tee": "too"} valid_result = {"tee": "too"} db_api.driver_private_data_update(self.ctxt, test_id, details) db_api.driver_private_data_delete(self.ctxt, test_id, test_key) actual_result = db_api.driver_private_data_get( self.ctxt, test_id) self.assertEqual(valid_result, actual_result) def test_delete_all(self): test_id = self._get_driver_test_data() details = {"foo": "bar", "tee": "too"} db_api.driver_private_data_update(self.ctxt, test_id, details) db_api.driver_private_data_delete(self.ctxt, test_id) actual_result = db_api.driver_private_data_get( self.ctxt, test_id) self.assertEqual({}, actual_result) @ddt.ddt class ShareNetworkDatabaseAPITestCase(BaseDatabaseAPITestCase): def __init__(self, *args, **kwargs): super(ShareNetworkDatabaseAPITestCase, self).__init__(*args, **kwargs) self.fake_context = context.RequestContext(user_id='fake user', project_id='fake project', is_admin=False) def setUp(self): super(ShareNetworkDatabaseAPITestCase, self).setUp() self.share_nw_dict = {'id': 'fake network id', 'project_id': self.fake_context.project_id, 'user_id': 'fake_user_id', 'name': 'whatever', 'description': 'fake description'} def test_create_one_network(self): result = db_api.share_network_create(self.fake_context, self.share_nw_dict) self._check_fields(expected=self.share_nw_dict, actual=result) self.assertEqual(0, len(result['share_instances'])) self.assertEqual(0, len(result['security_services'])) def test_create_two_networks_in_different_tenants(self): share_nw_dict2 = self.share_nw_dict.copy() share_nw_dict2['id'] = None share_nw_dict2['project_id'] = 'fake project 2' result1 = db_api.share_network_create(self.fake_context, self.share_nw_dict) result2 = db_api.share_network_create(self.fake_context.elevated(), share_nw_dict2) self._check_fields(expected=self.share_nw_dict, actual=result1) self._check_fields(expected=share_nw_dict2, actual=result2) def test_create_two_networks_in_one_tenant(self): share_nw_dict2 = self.share_nw_dict.copy() share_nw_dict2['id'] += "suffix" result1 = db_api.share_network_create(self.fake_context, self.share_nw_dict) result2 = db_api.share_network_create(self.fake_context, share_nw_dict2) self._check_fields(expected=self.share_nw_dict, actual=result1) self._check_fields(expected=share_nw_dict2, actual=result2) def test_create_with_duplicated_id(self): db_api.share_network_create(self.fake_context, self.share_nw_dict) self.assertRaises(db_exception.DBDuplicateEntry, db_api.share_network_create, self.fake_context, self.share_nw_dict) def test_get(self): db_api.share_network_create(self.fake_context, self.share_nw_dict) result = db_api.share_network_get(self.fake_context, self.share_nw_dict['id']) self._check_fields(expected=self.share_nw_dict, actual=result) self.assertEqual(0, len(result['share_instances'])) self.assertEqual(0, len(result['security_services'])) def _create_share_network_for_project(self, project_id): ctx = context.RequestContext(user_id='fake user', project_id=project_id, is_admin=False) share_data = self.share_nw_dict.copy() share_data['project_id'] = project_id db_api.share_network_create(ctx, share_data) return share_data def test_get_other_tenant_as_admin(self): expected = self._create_share_network_for_project('fake project 2') result = db_api.share_network_get(self.fake_context.elevated(), self.share_nw_dict['id']) self._check_fields(expected=expected, actual=result) self.assertEqual(0, len(result['share_instances'])) self.assertEqual(0, len(result['security_services'])) def test_get_other_tenant(self): self._create_share_network_for_project('fake project 2') self.assertRaises(exception.ShareNetworkNotFound, db_api.share_network_get, self.fake_context, self.share_nw_dict['id']) @ddt.data([{'id': 'fake share id1'}], [{'id': 'fake share id1'}, {'id': 'fake share id2'}],) def test_get_with_shares(self, shares): db_api.share_network_create(self.fake_context, self.share_nw_dict) share_instances = [] for share in shares: share.update({'share_network_id': self.share_nw_dict['id']}) share_instances.append( db_api.share_create(self.fake_context, share).instance ) result = db_api.share_network_get(self.fake_context, self.share_nw_dict['id']) self.assertEqual(len(shares), len(result['share_instances'])) for index, share_instance in enumerate(share_instances): self.assertEqual( share_instance['share_network_id'], result['share_instances'][index]['share_network_id'] ) @ddt.data([{'id': 'fake security service id1', 'type': 'fake type'}], [{'id': 'fake security service id1', 'type': 'fake type'}, {'id': 'fake security service id2', 'type': 'fake type'}]) def test_get_with_security_services(self, security_services): db_api.share_network_create(self.fake_context, self.share_nw_dict) for service in security_services: service.update({'project_id': self.fake_context.project_id}) db_api.security_service_create(self.fake_context, service) db_api.share_network_add_security_service( self.fake_context, self.share_nw_dict['id'], service['id']) result = db_api.share_network_get(self.fake_context, self.share_nw_dict['id']) self.assertEqual(len(security_services), len(result['security_services'])) for index, service in enumerate(security_services): self._check_fields(expected=service, actual=result['security_services'][index]) @ddt.data([{'id': 'fake_id_1', 'availability_zone_id': 'None'}], [{'id': 'fake_id_2', 'availability_zone_id': 'None'}, {'id': 'fake_id_3', 'availability_zone_id': 'fake_az_id'}]) def test_get_with_subnets(self, subnets): db_api.share_network_create(self.fake_context, self.share_nw_dict) for subnet in subnets: subnet['share_network_id'] = self.share_nw_dict['id'] db_api.share_network_subnet_create(self.fake_context, subnet) result = db_api.share_network_get(self.fake_context, self.share_nw_dict['id']) self.assertEqual(len(subnets), len(result['share_network_subnets'])) for index, subnet in enumerate(subnets): self._check_fields(expected=subnet, actual=result['share_network_subnets'][index]) def test_get_not_found(self): self.assertRaises(exception.ShareNetworkNotFound, db_api.share_network_get, self.fake_context, 'fake id') def test_delete(self): db_api.share_network_create(self.fake_context, self.share_nw_dict) db_api.share_network_delete(self.fake_context, self.share_nw_dict['id']) self.assertRaises(exception.ShareNetworkNotFound, db_api.share_network_get, self.fake_context, self.share_nw_dict['id']) @ddt.data([{'id': 'fake_id_1', 'availability_zone_id': 'None'}], [{'id': 'fake_id_2', 'availability_zone_id': 'None'}, {'id': 'fake_id_3', 'availability_zone_id': 'fake_az_id'}]) def test_delete_with_subnets(self, subnets): db_api.share_network_create(self.fake_context, self.share_nw_dict) for subnet in subnets: subnet['share_network_id'] = self.share_nw_dict['id'] db_api.share_network_subnet_create(self.fake_context, subnet) db_api.share_network_delete(self.fake_context, self.share_nw_dict['id']) self.assertRaises(exception.ShareNetworkSubnetNotFound, db_api.share_network_subnet_get, self.fake_context, subnets[0]['id']) def test_delete_not_found(self): self.assertRaises(exception.ShareNetworkNotFound, db_api.share_network_delete, self.fake_context, 'fake id') def test_update(self): new_name = 'fake_new_name' db_api.share_network_create(self.fake_context, self.share_nw_dict) result_update = db_api.share_network_update(self.fake_context, self.share_nw_dict['id'], {'name': new_name}) result_get = db_api.share_network_get(self.fake_context, self.share_nw_dict['id']) self.assertEqual(new_name, result_update['name']) self._check_fields(expected=dict(result_update.items()), actual=dict(result_get.items())) def test_update_not_found(self): self.assertRaises(exception.ShareNetworkNotFound, db_api.share_network_update, self.fake_context, 'fake id', {}) @ddt.data(1, 2) def test_get_all_one_record(self, records_count): index = 0 share_networks = [] while index < records_count: share_network_dict = dict(self.share_nw_dict) fake_id = 'fake_id%s' % index share_network_dict.update({'id': fake_id, 'project_id': fake_id}) share_networks.append(share_network_dict) db_api.share_network_create(self.fake_context.elevated(), share_network_dict) index += 1 result = db_api.share_network_get_all(self.fake_context.elevated()) self.assertEqual(len(share_networks), len(result)) for index, net in enumerate(share_networks): self._check_fields(expected=net, actual=result[index]) def test_get_all_by_filter_with_project_id(self): db_api.share_network_create(self.fake_context, self.share_nw_dict) share_nw_dict2 = dict(self.share_nw_dict) share_nw_dict2['id'] = 'fake share nw id2' share_nw_dict2['project_id'] = 'fake project 2' new_context = context.RequestContext(user_id='fake user 2', project_id='fake project 2', is_admin=False) db_api.share_network_create(new_context, share_nw_dict2) filters = {'project_id': share_nw_dict2['project_id']} result = db_api.share_network_get_all_by_filter( self.fake_context.elevated(), filters=filters) self.assertEqual(1, len(result)) self._check_fields(expected=share_nw_dict2, actual=result[0]) def test_get_all_with_created_since_or_before_filter(self): now = timeutils.utcnow() share_nw1 = dict(self.share_nw_dict) share_nw2 = dict(self.share_nw_dict) share_nw3 = dict(self.share_nw_dict) share_nw1['created_at'] = (now - datetime.timedelta(seconds=1)) share_nw2['created_at'] = (now + datetime.timedelta(seconds=1)) share_nw3['created_at'] = (now + datetime.timedelta(seconds=2)) share_nw1['id'] = 'fake share nw id1' share_nw2['id'] = 'fake share nw id2' share_nw3['id'] = 'fake share nw id3' db_api.share_network_create(self.fake_context, share_nw1) db_api.share_network_create(self.fake_context, share_nw2) db_api.share_network_create(self.fake_context, share_nw3) filters1 = {'created_before': now} filters2 = {'created_since': now} result1 = db_api.share_network_get_all_by_filter( self.fake_context.elevated(), filters=filters1) result2 = db_api.share_network_get_all_by_filter( self.fake_context.elevated(), filters=filters2) self.assertEqual(1, len(result1)) self.assertEqual(2, len(result2)) def test_get_all_by_project(self): db_api.share_network_create(self.fake_context, self.share_nw_dict) share_nw_dict2 = dict(self.share_nw_dict) share_nw_dict2['id'] = 'fake share nw id2' share_nw_dict2['project_id'] = 'fake project 2' new_context = context.RequestContext(user_id='fake user 2', project_id='fake project 2', is_admin=False) db_api.share_network_create(new_context, share_nw_dict2) result = db_api.share_network_get_all_by_project( self.fake_context.elevated(), share_nw_dict2['project_id']) self.assertEqual(1, len(result)) self._check_fields(expected=share_nw_dict2, actual=result[0]) def test_add_security_service(self): security_dict1 = {'id': 'fake security service id1', 'project_id': self.fake_context.project_id, 'type': 'fake type'} db_api.share_network_create(self.fake_context, self.share_nw_dict) db_api.security_service_create(self.fake_context, security_dict1) db_api.share_network_add_security_service(self.fake_context, self.share_nw_dict['id'], security_dict1['id']) with db_api.context_manager.reader.using(self.fake_context): result = (db_api.model_query( self.fake_context, models.ShareNetworkSecurityServiceAssociation). filter_by(security_service_id=security_dict1['id']). filter_by(share_network_id=self.share_nw_dict['id']). first()) self.assertIsNotNone(result) def test_add_security_service_not_found_01(self): security_service_id = 'unknown security service' db_api.share_network_create(self.fake_context, self.share_nw_dict) self.assertRaises(exception.SecurityServiceNotFound, db_api.share_network_add_security_service, self.fake_context, self.share_nw_dict['id'], security_service_id) def test_add_security_service_not_found_02(self): security_dict1 = {'id': 'fake security service id1', 'project_id': self.fake_context.project_id, 'type': 'fake type'} share_nw_id = 'unknown share network' db_api.security_service_create(self.fake_context, security_dict1) self.assertRaises(exception.ShareNetworkNotFound, db_api.share_network_add_security_service, self.fake_context, share_nw_id, security_dict1['id']) def test_add_security_service_association_error_already_associated(self): security_dict1 = {'id': 'fake security service id1', 'project_id': self.fake_context.project_id, 'type': 'fake type'} db_api.share_network_create(self.fake_context, self.share_nw_dict) db_api.security_service_create(self.fake_context, security_dict1) db_api.share_network_add_security_service(self.fake_context, self.share_nw_dict['id'], security_dict1['id']) self.assertRaises( exception.ShareNetworkSecurityServiceAssociationError, db_api.share_network_add_security_service, self.fake_context, self.share_nw_dict['id'], security_dict1['id']) def test_remove_security_service(self): security_dict1 = {'id': 'fake security service id1', 'project_id': self.fake_context.project_id, 'type': 'fake type'} db_api.share_network_create(self.fake_context, self.share_nw_dict) db_api.security_service_create(self.fake_context, security_dict1) db_api.share_network_add_security_service(self.fake_context, self.share_nw_dict['id'], security_dict1['id']) db_api.share_network_remove_security_service(self.fake_context, self.share_nw_dict['id'], security_dict1['id']) with db_api.context_manager.reader.using(self.fake_context): result = (db_api.model_query( self.fake_context, models.ShareNetworkSecurityServiceAssociation). filter_by(security_service_id=security_dict1['id']). filter_by(share_network_id=self.share_nw_dict['id']). first()) self.assertIsNone(result) share_nw_ref = db_api.share_network_get(self.fake_context, self.share_nw_dict['id']) self.assertEqual(0, len(share_nw_ref['security_services'])) def test_remove_security_service_not_found_01(self): security_service_id = 'unknown security service' db_api.share_network_create(self.fake_context, self.share_nw_dict) self.assertRaises(exception.SecurityServiceNotFound, db_api.share_network_remove_security_service, self.fake_context, self.share_nw_dict['id'], security_service_id) def test_remove_security_service_not_found_02(self): security_dict1 = {'id': 'fake security service id1', 'project_id': self.fake_context.project_id, 'type': 'fake type'} share_nw_id = 'unknown share network' db_api.security_service_create(self.fake_context, security_dict1) self.assertRaises(exception.ShareNetworkNotFound, db_api.share_network_remove_security_service, self.fake_context, share_nw_id, security_dict1['id']) def test_remove_security_service_dissociation_error(self): security_dict1 = {'id': 'fake security service id1', 'project_id': self.fake_context.project_id, 'type': 'fake type'} db_api.share_network_create(self.fake_context, self.share_nw_dict) db_api.security_service_create(self.fake_context, security_dict1) self.assertRaises( exception.ShareNetworkSecurityServiceDissociationError, db_api.share_network_remove_security_service, self.fake_context, self.share_nw_dict['id'], security_dict1['id']) def test_security_services_relation(self): security_dict1 = {'id': 'fake security service id1', 'project_id': self.fake_context.project_id, 'type': 'fake type'} db_api.share_network_create(self.fake_context, self.share_nw_dict) db_api.security_service_create(self.fake_context, security_dict1) result = db_api.share_network_get(self.fake_context, self.share_nw_dict['id']) self.assertEqual(0, len(result['security_services'])) def test_shares_relation(self): share_dict = {'id': 'fake share id1'} db_api.share_network_create(self.fake_context, self.share_nw_dict) db_api.share_create(self.fake_context, share_dict) result = db_api.share_network_get(self.fake_context, self.share_nw_dict['id']) self.assertEqual(0, len(result['share_instances'])) def test_association_get(self): network = db_api.share_network_create( self.fake_context, self.share_nw_dict) security_service = db_api.security_service_create( self.fake_context, security_service_dict) network_id = network['id'] security_service_id = security_service['id'] db_api.share_network_add_security_service( self.fake_context, network_id, security_service_id) result = db_api.share_network_security_service_association_get( self.fake_context, network_id, security_service_id) self.assertEqual(result['share_network_id'], network_id) self.assertEqual(result['security_service_id'], security_service_id) def test_share_network_update_security_service(self): new_sec_service = copy.copy(security_service_dict) new_sec_service['id'] = 'fakeid' share_network_id = self.share_nw_dict['id'] db_api.share_network_create( self.fake_context, self.share_nw_dict) db_api.security_service_create( self.fake_context, security_service_dict) db_api.security_service_create(self.fake_context, new_sec_service) db_api.share_network_add_security_service( self.fake_context, share_network_id, security_service_dict['id']) db_api.share_network_update_security_service( self.fake_context, share_network_id, security_service_dict['id'], new_sec_service['id']) association = db_api.share_network_security_service_association_get( self.fake_context, share_network_id, new_sec_service['id']) self.assertEqual(association['share_network_id'], share_network_id) self.assertEqual( association['security_service_id'], new_sec_service['id']) @ddt.ddt class ShareNetworkSubnetDatabaseAPITestCase(BaseDatabaseAPITestCase): def __init__(self, *args, **kwargs): super(ShareNetworkSubnetDatabaseAPITestCase, self).__init__( *args, **kwargs) self.fake_context = context.RequestContext(user_id='fake user', project_id='fake project', is_admin=False) def setUp(self): super(ShareNetworkSubnetDatabaseAPITestCase, self).setUp() self.subnet_dict = {'id': 'fake network id', 'neutron_net_id': 'fake net id', 'neutron_subnet_id': 'fake subnet id', 'network_type': 'vlan', 'segmentation_id': 1000, 'share_network_id': 'fake_id', 'cidr': '10.0.0.0/24', 'ip_version': 4, 'availability_zone_id': None} def test_create(self): result = db_api.share_network_subnet_create( self.fake_context, self.subnet_dict) self._check_fields(expected=self.subnet_dict, actual=result) def test_create_duplicated_id(self): db_api.share_network_subnet_create(self.fake_context, self.subnet_dict) self.assertRaises(db_exception.DBDuplicateEntry, db_api.share_network_subnet_create, self.fake_context, self.subnet_dict) def test_get(self): db_api.share_network_subnet_create(self.fake_context, self.subnet_dict) result = db_api.share_network_subnet_get(self.fake_context, self.subnet_dict['id']) self._check_fields(expected=self.subnet_dict, actual=result) @ddt.data([{'id': 'fake_id_1', 'identifier': 'fake_identifier', 'host': 'fake_host'}], [{'id': 'fake_id_2', 'identifier': 'fake_identifier', 'host': 'fake_host'}, {'id': 'fake_id_3', 'identifier': 'fake_identifier', 'host': 'fake_host'}]) def test_get_with_share_servers(self, share_servers): share_net_subnets = [ db_api.share_network_subnet_create( self.fake_context, self.subnet_dict)] for share_server in share_servers: share_server['share_network_subnets'] = share_net_subnets db_api.share_server_create(self.fake_context, share_server) result = db_api.share_network_subnet_get(self.fake_context, self.subnet_dict['id']) self.assertEqual(len(share_servers), len(result['share_servers'])) for index, share_server in enumerate(share_servers): result = db_api.share_network_subnet_get_all_by_share_server_id( self.fake_context, share_server['id']) for key, value in share_server['share_network_subnets'][0].items(): if key != 'share_servers': self.assertEqual(value, result[0][key]) def test_get_not_found(self): db_api.share_network_subnet_create(self.fake_context, self.subnet_dict) self.assertRaises(exception.ShareNetworkSubnetNotFound, db_api.share_network_subnet_get, self.fake_context, 'fake_id') def test_delete(self): db_api.share_network_subnet_create(self.fake_context, self.subnet_dict) db_api.share_network_subnet_delete(self.fake_context, self.subnet_dict['id']) self.assertRaises(exception.ShareNetworkSubnetNotFound, db_api.share_network_subnet_delete, self.fake_context, self.subnet_dict['id']) def test_delete_not_found(self): self.assertRaises(exception.ShareNetworkSubnetNotFound, db_api.share_network_subnet_delete, self.fake_context, 'fake_id') def test_update(self): update_dict = { 'gateway': 'fake_gateway', 'ip_version': 6, 'mtu': '' } db_api.share_network_subnet_create(self.fake_context, self.subnet_dict) db_api.share_network_subnet_update( self.fake_context, self.subnet_dict['id'], update_dict) result = db_api.share_network_subnet_get(self.fake_context, self.subnet_dict['id']) self._check_fields(expected=update_dict, actual=result) def test_update_not_found(self): self.assertRaises(exception.ShareNetworkSubnetNotFound, db_api.share_network_subnet_update, self.fake_context, self.subnet_dict['id'], {}) @ddt.data([ { 'id': 'sn_id1', 'project_id': 'fake project', 'user_id': 'fake' } ], [ { 'id': 'fake_id', 'project_id': 'fake project', 'user_id': 'fake' }, { 'id': 'sn_id2', 'project_id': 'fake project', 'user_id': 'fake' } ]) def test_get_all_by_share_network(self, share_networks): for idx, share_network in enumerate(share_networks): self.subnet_dict['share_network_id'] = share_network['id'] self.subnet_dict['id'] = 'fake_id%s' % idx db_api.share_network_create(self.fake_context, share_network) db_api.share_network_subnet_create(self.fake_context, self.subnet_dict) for share_network in share_networks: subnets = db_api.share_network_subnet_get_all_by_share_network( self.fake_context, share_network['id']) self.assertEqual(1, len(subnets)) def test_get_by_availability_zone_id(self): with db_api.context_manager.writer.using(self.fake_context): az = db_api._availability_zone_create_if_not_exist( self.fake_context, 'fake_zone_id', ) self.subnet_dict['availability_zone_id'] = az['id'] db_api.share_network_subnet_create(self.fake_context, self.subnet_dict) result = db_api.share_network_subnets_get_all_by_availability_zone_id( self.fake_context, self.subnet_dict['share_network_id'], az['id']) self._check_fields(expected=self.subnet_dict, actual=result[0]) def test_get_az_subnets(self): with db_api.context_manager.writer.using(self.fake_context): az = db_api._availability_zone_create_if_not_exist( self.fake_context, 'fake_zone_id', ) self.subnet_dict['availability_zone_id'] = az['id'] db_api.share_network_subnet_create(self.fake_context, self.subnet_dict) result = db_api.share_network_subnet_get_all_with_same_az( self.fake_context, self.subnet_dict['id']) self.subnet_dict['share_network'] = None self._check_fields(expected=self.subnet_dict, actual=result[0]) def test_get_az_subnets_not_found(self): self.assertRaises( exception.ShareNetworkSubnetNotFound, db_api.share_network_subnet_get_all_with_same_az, self.fake_context, 'share_network_subnet_id') def test_get_default_subnet(self): db_api.share_network_subnet_create(self.fake_context, self.subnet_dict) result = db_api.share_network_subnet_get_default_subnets( self.fake_context, self.subnet_dict['share_network_id']) self._check_fields(expected=self.subnet_dict, actual=result[0]) def test_get_by_share_server_id_not_found(self): self.assertRaises( exception.ShareNetworkSubnetNotFoundByShareServer, db_api.share_network_subnet_get_all_by_share_server_id, self.fake_context, 'share_server_id') def test_share_network_subnet_metadata_get(self): metadata = {'a': 'b', 'c': 'd'} subnet_1 = db_api.share_network_subnet_create( self.fake_context, self.subnet_dict) db_api.share_network_subnet_metadata_update( self.fake_context, share_network_subnet_id=subnet_1['id'], metadata=metadata, delete=False) self.assertEqual( metadata, db_api.share_network_subnet_metadata_get( self.fake_context, share_network_subnet_id=subnet_1['id'])) def test_share_network_subnet_metadata_get_item(self): metadata = {'a': 'b', 'c': 'd'} key = 'a' shouldbe = {'a': 'b'} subnet_1 = db_api.share_network_subnet_create( self.fake_context, self.subnet_dict) db_api.share_network_subnet_metadata_update( self.fake_context, share_network_subnet_id=subnet_1['id'], metadata=metadata, delete=False) self.assertEqual( shouldbe, db_api.share_network_subnet_metadata_get_item( self.fake_context, share_network_subnet_id=subnet_1['id'], key=key)) def test_share_network_subnet_metadata_update(self): metadata1 = {'a': '1', 'c': '2'} metadata2 = {'a': '3', 'd': '5'} should_be = {'a': '3', 'c': '2', 'd': '5'} subnet_1 = db_api.share_network_subnet_create( self.fake_context, self.subnet_dict) db_api.share_network_subnet_metadata_update( self.fake_context, share_network_subnet_id=subnet_1['id'], metadata=metadata1, delete=False) db_api.share_network_subnet_metadata_update( self.fake_context, share_network_subnet_id=subnet_1['id'], metadata=metadata2, delete=False) self.assertEqual( should_be, db_api.share_network_subnet_metadata_get( self.fake_context, share_network_subnet_id=subnet_1['id'])) def test_share_network_subnet_metadata_delete(self): key = 'a' metadata = {'a': '1', 'c': '2'} should_be = {'c': '2'} subnet_1 = db_api.share_network_subnet_create( self.fake_context, self.subnet_dict) db_api.share_network_subnet_metadata_update( self.fake_context, share_network_subnet_id=subnet_1['id'], metadata=metadata, delete=False) db_api.share_network_subnet_metadata_delete( self.fake_context, share_network_subnet_id=subnet_1['id'], key=key) self.assertEqual( should_be, db_api.share_network_subnet_metadata_get( self.fake_context, share_network_subnet_id=subnet_1['id'])) @ddt.ddt class SecurityServiceDatabaseAPITestCase(BaseDatabaseAPITestCase): def __init__(self, *args, **kwargs): super(SecurityServiceDatabaseAPITestCase, self).__init__(*args, **kwargs) self.fake_context = context.RequestContext(user_id='fake user', project_id='fake project', is_admin=False) def _check_expected_fields(self, result, expected): for key in expected: self.assertEqual(expected[key], result[key]) def test_create(self): result = db_api.security_service_create(self.fake_context, security_service_dict) self._check_expected_fields(result, security_service_dict) def test_create_with_duplicated_id(self): db_api.security_service_create(self.fake_context, security_service_dict) self.assertRaises(db_exception.DBDuplicateEntry, db_api.security_service_create, self.fake_context, security_service_dict) def test_get(self): db_api.security_service_create(self.fake_context, security_service_dict) result = db_api.security_service_get(self.fake_context, security_service_dict['id']) self._check_expected_fields(result, security_service_dict) def test_get_not_found(self): self.assertRaises(exception.SecurityServiceNotFound, db_api.security_service_get, self.fake_context, 'wrong id') def test_get_all_by_share_network(self): dict1 = security_service_dict dict2 = security_service_dict.copy() dict2['id'] = 'fake id 2' db_api.security_service_create(self.fake_context, dict1) db_api.security_service_create(self.fake_context, dict2) share_nw_dict = {'id': 'fake network id', 'project_id': 'fake project', 'user_id': 'fake_user_id'} db_api.share_network_create(self.fake_context, share_nw_dict) db_api.share_network_add_security_service( self.fake_context, share_nw_dict['id'], dict1['id']) result = db_api.security_service_get_all_by_share_network( self.fake_context, share_nw_dict['id']) self._check_expected_fields(result[0], dict1) self.assertEqual(1, len(result)) def test_delete(self): db_api.security_service_create(self.fake_context, security_service_dict) db_api.security_service_delete(self.fake_context, security_service_dict['id']) self.assertRaises(exception.SecurityServiceNotFound, db_api.security_service_get, self.fake_context, security_service_dict['id']) def test_update(self): update_dict = { 'dns_ip': 'new dns', 'server': 'new ldap server', 'domain': 'new ldap domain', 'default_ad_site': 'new ldap default_ad_site', 'ou': 'new ldap ou', 'user': 'new user', 'password': 'new password', 'name': 'new whatever', 'description': 'new nevermind', } db_api.security_service_create(self.fake_context, security_service_dict) result = db_api.security_service_update(self.fake_context, security_service_dict['id'], update_dict) self._check_expected_fields(result, update_dict) def test_update_no_updates(self): db_api.security_service_create(self.fake_context, security_service_dict) result = db_api.security_service_update(self.fake_context, security_service_dict['id'], {}) self._check_expected_fields(result, security_service_dict) def test_update_not_found(self): self.assertRaises(exception.SecurityServiceNotFound, db_api.security_service_update, self.fake_context, 'wrong id', {}) def test_get_all_no_records(self): result = db_api.security_service_get_all(self.fake_context) self.assertEqual(0, len(result)) @ddt.data(1, 2) def test_get_all(self, records_count): index = 0 services = [] while index < records_count: service_dict = dict(security_service_dict) service_dict.update({'id': 'fake_id%s' % index}) services.append(service_dict) db_api.security_service_create(self.fake_context, service_dict) index += 1 result = db_api.security_service_get_all(self.fake_context) self.assertEqual(len(services), len(result)) for index, service in enumerate(services): self._check_fields(expected=service, actual=result[index]) def test_get_all_two_records(self): dict1 = security_service_dict dict2 = security_service_dict.copy() dict2['id'] = 'fake id 2' db_api.security_service_create(self.fake_context, dict1) db_api.security_service_create(self.fake_context, dict2) result = db_api.security_service_get_all(self.fake_context) self.assertEqual(2, len(result)) def test_get_all_by_project(self): dict1 = security_service_dict dict2 = security_service_dict.copy() dict2['id'] = 'fake id 2' dict2['project_id'] = 'fake project 2' db_api.security_service_create(self.fake_context, dict1) db_api.security_service_create(self.fake_context, dict2) result1 = db_api.security_service_get_all_by_project( self.fake_context, dict1['project_id']) self.assertEqual(1, len(result1)) self._check_expected_fields(result1[0], dict1) result2 = db_api.security_service_get_all_by_project( self.fake_context, dict2['project_id']) self.assertEqual(1, len(result2)) self._check_expected_fields(result2[0], dict2) @ddt.ddt class ShareServerDatabaseAPITestCase(test.TestCase): def setUp(self): super(ShareServerDatabaseAPITestCase, self).setUp() self.ctxt = context.RequestContext(user_id='user_id', project_id='project_id', is_admin=True) self.share_net_subnets = [ db_utils.create_share_network_subnet( id=uuidutils.generate_uuid(), share_network_id=uuidutils.generate_uuid())] def test_share_server_get(self): expected = db_utils.create_share_server( share_network_subnets=self.share_net_subnets) server = db_api.share_server_get(self.ctxt, expected['id']) self.assertEqual(expected['id'], server['id']) self.assertEqual(expected.share_network_subnets[0]['id'], server.share_network_subnets[0]['id']) self.assertEqual( expected.share_network_subnets[0]['share_network_id'], server.share_network_subnets[0]['share_network_id']) self.assertEqual(expected.host, server.host) self.assertEqual(expected.status, server.status) def test_get_not_found(self): fake_id = 'FAKE_UUID' self.assertRaises(exception.ShareServerNotFound, db_api.share_server_get, self.ctxt, fake_id) def test_create(self): server = db_utils.create_share_server( share_network_subnets=self.share_net_subnets) self.assertTrue(server['id']) self.assertEqual(server.share_network_subnets[0]['id'], server['share_network_subnets'][0]['id']) self.assertEqual( server.share_network_subnets[0]['share_network_id'], server['share_network_subnets'][0]['share_network_id']) self.assertEqual(server.host, server['host']) self.assertEqual(server.status, server['status']) def test_delete(self): server = db_utils.create_share_server() num_records = len(db_api.share_server_get_all(self.ctxt)) db_api.share_server_delete(self.ctxt, server['id']) self.assertEqual(num_records - 1, len(db_api.share_server_get_all(self.ctxt))) def test_delete_not_found(self): fake_id = 'FAKE_UUID' self.assertRaises(exception.ShareServerNotFound, db_api.share_server_delete, self.ctxt, fake_id) def test_update(self): share_net_subnets_update = [ db_utils.create_share_network_subnet( id=uuidutils.generate_uuid(), share_network_id=uuidutils.generate_uuid())] update = { 'share_network_subnets': share_net_subnets_update, 'host': 'update_host', 'status': constants.STATUS_ACTIVE, } server = db_utils.create_share_server( share_network_subnets=self.share_net_subnets) updated_server = db_api.share_server_update(self.ctxt, server['id'], update) self.assertEqual(server['id'], updated_server['id']) self.assertEqual( update['share_network_subnets'][0]['share_network_id'], updated_server.share_network_subnets[0]['share_network_id']) self.assertEqual(update['host'], updated_server.host) self.assertEqual(update['status'], updated_server.status) def test_update_not_found(self): fake_id = 'FAKE_UUID' self.assertRaises(exception.ShareServerNotFound, db_api.share_server_update, self.ctxt, fake_id, {}) def test_get_all_by_host_and_share_subnet_valid(self): subnet_1 = { 'id': '1', 'share_network_id': '1', } subnet_2 = { 'id': '2', 'share_network_id': '2', } share_net_subnets1 = db_utils.create_share_network_subnet(**subnet_1) share_net_subnets2 = db_utils.create_share_network_subnet(**subnet_2) valid = { 'share_network_subnets': [share_net_subnets1], 'host': 'host1', 'status': constants.STATUS_ACTIVE, } invalid = { 'share_network_subnets': [share_net_subnets2], 'host': 'host1', 'status': constants.STATUS_ERROR, } other = { 'share_network_subnets': [share_net_subnets1], 'host': 'host2', 'status': constants.STATUS_ACTIVE, } valid = db_utils.create_share_server(**valid) db_utils.create_share_server(**invalid) db_utils.create_share_server(**other) servers = db_api.share_server_get_all_by_host_and_share_subnet_valid( self.ctxt, host='host1', share_subnet_id='1') self.assertEqual(valid['id'], servers[0]['id']) def test_get_all_by_host_and_share_subnet_valid_not_found(self): self.assertRaises( exception.ShareServerNotFound, db_api.share_server_get_all_by_host_and_share_subnet_valid, self.ctxt, host='fake', share_subnet_id='fake' ) def test_get_all_by_host_and_share_subnet(self): subnet_1 = { 'id': '1', 'share_network_id': '1', } share_net_subnets1 = db_utils.create_share_network_subnet(**subnet_1) valid = { 'share_network_subnets': [share_net_subnets1], 'host': 'host1', 'status': constants.STATUS_SERVER_NETWORK_CHANGE, } other = { 'share_network_subnets': [share_net_subnets1], 'host': 'host1', 'status': constants.STATUS_ERROR, } invalid = { 'share_network_subnets': [share_net_subnets1], 'host': 'host2', 'status': constants.STATUS_ACTIVE, } valid = db_utils.create_share_server(**valid) invalid = db_utils.create_share_server(**invalid) other = db_utils.create_share_server(**other) servers = db_api.share_server_get_all_by_host_and_or_share_subnet( self.ctxt, host='host1', share_subnet_id='1') self.assertEqual(2, len(servers)) ids = [s['id'] for s in servers] self.assertIn(valid['id'], ids) self.assertIn(other['id'], ids) self.assertNotIn(invalid['id'], ids) def test_get_all_by_host_and_share_subnet_not_found(self): self.assertRaises( exception.ShareServerNotFound, db_api.share_server_get_all_by_host_and_or_share_subnet, self.ctxt, host='fake', share_subnet_id='fake' ) def test_get_all(self): srv1 = { 'host': 'host1', 'status': constants.STATUS_ACTIVE, } srv2 = { 'host': 'host1', 'status': constants.STATUS_ERROR, } srv3 = { 'host': 'host2', 'status': constants.STATUS_ACTIVE, } servers = db_api.share_server_get_all(self.ctxt) self.assertEqual(0, len(servers)) to_delete = db_utils.create_share_server(**srv1) db_utils.create_share_server(**srv2) db_utils.create_share_server(**srv3) servers = db_api.share_server_get_all(self.ctxt) self.assertEqual(3, len(servers)) db_api.share_server_delete(self.ctxt, to_delete['id']) servers = db_api.share_server_get_all(self.ctxt) self.assertEqual(2, len(servers)) def test_backend_details_set(self): details = { 'value1': '1', 'value2': '2', } server = db_utils.create_share_server() db_api.share_server_backend_details_set(self.ctxt, server['id'], details) self.assertDictEqual( details, db_api.share_server_get(self.ctxt, server['id'])['backend_details'] ) details.update({'value2': '4'}) db_api.share_server_backend_details_set(self.ctxt, server['id'], details) self.assertDictEqual( details, db_api.share_server_get(self.ctxt, server['id'])['backend_details'] ) def test_backend_details_set_not_found(self): fake_id = 'FAKE_UUID' self.assertRaises(exception.ShareServerNotFound, db_api.share_server_backend_details_set, self.ctxt, fake_id, {}) def test_get_with_details(self): values = { 'share_network_subnets': [ db_utils.create_share_network_subnet( id='fake_subnet_id', share_network_id='fake_share_net_id')], 'host': 'hostname', 'status': constants.STATUS_ACTIVE, } details = { 'value1': '1', 'value2': '2', } srv_id = db_utils.create_share_server(**values)['id'] db_api.share_server_backend_details_set(self.ctxt, srv_id, details) server = db_api.share_server_get(self.ctxt, srv_id) self.assertEqual(srv_id, server['id']) self.assertEqual(values['share_network_subnets'][0]['id'], server.share_network_subnets[0]['id']) self.assertEqual( values['share_network_subnets'][0]['share_network_id'], server.share_network_subnets[0]['share_network_id']) self.assertEqual(values['host'], server.host) self.assertEqual(values['status'], server.status) self.assertDictEqual(server['backend_details'], details) self.assertIn('backend_details', server.to_dict()) def test_delete_with_details(self): server = db_utils.create_share_server(backend_details={ 'value1': '1', 'value2': '2', }) num_records = len(db_api.share_server_get_all(self.ctxt)) db_api.share_server_delete(self.ctxt, server['id']) self.assertEqual(num_records - 1, len(db_api.share_server_get_all(self.ctxt))) @ddt.data('foobar', 'fake', '-fake-', 'some_fake_', 'some-fake-', '-fake-identifier', '_fake_identifier', 'some-fake-identifier', 'some_fake_identifier') def test_share_server_search_by_identifier(self, identifier): server = { 'host': 'hostname', 'status': constants.STATUS_ACTIVE, 'is_auto_deletable': True, 'updated_at': datetime.datetime(2018, 5, 1), 'identifier': 'some_fake_identifier', } server = db_utils.create_share_server(**server) if identifier in ('foobar', 'fake', '-fake-', 'some_fake_', 'some-fake-'): self.assertRaises(exception.ShareServerNotFound, db_api.share_server_search_by_identifier, self.ctxt, identifier) else: result = db_api.share_server_search_by_identifier( self.ctxt, identifier) self.assertEqual(server['id'], result[0]['id']) @ddt.data((True, True, True, 3), (True, True, False, 2), (True, False, False, 1), (False, False, False, 0)) @ddt.unpack def test_share_server_get_all_unused_deletable(self, server_1_is_auto_deletable, server_2_is_auto_deletable, server_3_is_auto_deletable, expected_len): server1 = { 'host': 'hostname', 'status': constants.STATUS_ACTIVE, 'is_auto_deletable': server_1_is_auto_deletable, 'updated_at': datetime.datetime(2018, 5, 1) } server2 = { 'host': 'hostname', 'status': constants.STATUS_ACTIVE, 'is_auto_deletable': server_2_is_auto_deletable, 'updated_at': datetime.datetime(2018, 5, 1) } server3 = { 'host': 'hostname', 'status': constants.STATUS_ACTIVE, 'is_auto_deletable': server_3_is_auto_deletable, 'updated_at': datetime.datetime(2018, 5, 1) } db_utils.create_share_server(**server1) db_utils.create_share_server(**server2) db_utils.create_share_server(**server3) host = 'hostname' updated_before = datetime.datetime(2019, 5, 1) unused_deletable = db_api.share_server_get_all_unused_deletable( self.ctxt, host, updated_before) self.assertEqual(expected_len, len(unused_deletable)) @ddt.data({'host': 'fakepool@fakehost'}, {'status': constants.STATUS_SERVER_MIGRATING_TO}, {'source_share_server_id': 'fake_ss_id'}, {'share_network_id': uuidutils.generate_uuid()}) def test_share_server_get_all_with_filters(self, filters): server_data = copy.copy(filters) share_network_id = server_data.pop('share_network_id', None) share_network_subnet = {} if share_network_id: db_utils.create_share_network(id=share_network_id) share_network_subnet = db_utils.create_share_network_subnet( id=uuidutils.generate_uuid(), share_network_id=share_network_id) server_data['share_network_subnets'] = [share_network_subnet] db_utils.create_share_server(**server_data) db_utils.create_share_server() filter_keys = filters.keys() results = db_api.share_server_get_all_with_filters(self.ctxt, filters) self.assertEqual(1, len(results)) for result in results: for key in filter_keys: if key == 'share_network_id': self.assertEqual(share_network_subnet['share_network_id'], filters[key]) self.assertEqual(share_network_subnet['id'], result['share_network_subnets'][0]['id']) else: self.assertEqual(result[key], filters[key]) @ddt.data('fake@fake', 'host1@backend1') def test_share_server_get_all_by_host(self, host): db_utils.create_share_server(host='fake@fake') db_utils.create_share_server(host='host1@backend1') share_servers = db_api.share_server_get_all_by_host(self.ctxt, host) self.assertEqual(1, len(share_servers)) for share_server in share_servers: self.assertEqual(host, share_server['host']) def test_share_servers_update(self): servers = [db_utils.create_share_server() for __ in range(1, 3)] server_ids = [server['id'] for server in servers] values = {'status': constants.STATUS_NETWORK_CHANGE} db_api.share_servers_update( self.ctxt, server_ids, values) share_servers = [ db_api.share_server_get(self.ctxt, server_id) for server_id in server_ids] for ss in share_servers: self.assertEqual(constants.STATUS_NETWORK_CHANGE, ss['status']) def test_encryption_keys_get_count(self): servers = [db_utils.create_share_server( encryption_key_ref=uuidutils.generate_uuid()) for __ in range(1, 4)] count = db_api.encryption_keys_get_count(context.get_admin_context()) self.assertEqual(count, len(servers)) class ServiceDatabaseAPITestCase(test.TestCase): def setUp(self): super(ServiceDatabaseAPITestCase, self).setUp() self.ctxt = context.RequestContext(user_id='user_id', project_id='project_id', is_admin=True) self.service_data = {'host': "fake_host", 'binary': "fake_binary", 'topic': "fake_topic", 'report_count': 0, 'availability_zone': "fake_zone"} def test_create(self): """Ensure we create a new AZ if necessary.""" service = db_api.service_create(self.ctxt, self.service_data) az = db_api.availability_zone_get(self.ctxt, "fake_zone") self.assertEqual(az.id, service.availability_zone_id) self.assertSubDictMatch(self.service_data, service.to_dict()) def test_create__az_exists(self): """Ensure we use an AZ is it already exists.""" with db_api.context_manager.writer.using(self.ctxt): az = db_api._availability_zone_create_if_not_exist( self.ctxt, 'fake_zone', ) service = db_api.service_create(self.ctxt, self.service_data) self.assertEqual(az.id, service.availability_zone_id) self.assertSubDictMatch(self.service_data, service.to_dict()) def test_create__az_missing(self): """Ensure we fail if AZ info is missing.""" self.service_data.pop('availability_zone') exc = self.assertRaises( ValueError, db_api.service_create, self.ctxt, self.service_data, ) self.assertIn( "Values dict should have 'availability_zone' field.", str(exc), ) def test_update(self): az_name = 'fake_zone2' update_data = {"availability_zone": az_name} service = db_api.service_create(self.ctxt, self.service_data) db_api.service_update(self.ctxt, service['id'], update_data) service = db_api.service_get(self.ctxt, service['id']) az = db_api.availability_zone_get(self.ctxt, az_name) self.assertEqual(az.id, service.availability_zone_id) valid_values = self.service_data valid_values.update(update_data) self.assertSubDictMatch(valid_values, service.to_dict()) @ddt.ddt class AvailabilityZonesDatabaseAPITestCase(test.TestCase): def setUp(self): super(AvailabilityZonesDatabaseAPITestCase, self).setUp() self.ctxt = context.RequestContext(user_id='user_id', project_id='project_id', is_admin=True) @ddt.data({'fake': 'fake'}, {}, {'fakeavailability_zone': 'fake'}, {'availability_zone': None}, {'availability_zone': ''}) def test_ensure_availability_zone_exists_invalid(self, test_values): self.assertRaises(ValueError, db_api.ensure_availability_zone_exists, self.ctxt, test_values) def test_az_get(self): az_name = 'test_az' with db_api.context_manager.writer.using(self.ctxt): az = db_api._availability_zone_create_if_not_exist( self.ctxt, az_name ) az_by_id = db_api.availability_zone_get(self.ctxt, az['id']) az_by_name = db_api.availability_zone_get(self.ctxt, az_name) self.assertEqual(az_name, az_by_id['name']) self.assertEqual(az_name, az_by_name['name']) self.assertEqual(az['id'], az_by_id['id']) self.assertEqual(az['id'], az_by_name['id']) def test_az_get_all(self): with db_api.context_manager.writer.using(self.ctxt): db_api._availability_zone_create_if_not_exist(self.ctxt, 'test1') db_api._availability_zone_create_if_not_exist(self.ctxt, 'test2') db_api._availability_zone_create_if_not_exist(self.ctxt, 'test3') db_api.service_create(self.ctxt, {'availability_zone': 'test2'}) actual_result = db_api.availability_zone_get_all(self.ctxt) self.assertEqual(1, len(actual_result)) self.assertEqual('test2', actual_result[0]['name']) @ddt.ddt class NetworkAllocationsDatabaseAPITestCase(test.TestCase): def setUp(self): super(NetworkAllocationsDatabaseAPITestCase, self).setUp() self.user_id = 'user_id' self.project_id = 'project_id' self.share_server_id = 'foo_share_server_id' self.share_network_subnet_id = 'foo_share_network_subnet_id' self.ctxt = context.RequestContext( user_id=self.user_id, project_id=self.project_id, is_admin=True) self.user_network_allocations = [ {'share_server_id': self.share_server_id, 'ip_address': '1.1.1.1', 'status': constants.STATUS_ACTIVE, 'label': None, 'share_network_subnet_id': self.share_network_subnet_id}, {'share_server_id': self.share_server_id, 'ip_address': '2.2.2.2', 'status': constants.STATUS_ACTIVE, 'label': 'user', 'share_network_subnet_id': self.share_network_subnet_id}, ] self.admin_network_allocations = [ {'share_server_id': self.share_server_id, 'ip_address': '3.3.3.3', 'status': constants.STATUS_ACTIVE, 'label': 'admin', 'share_network_subnet_id': None}, {'share_server_id': self.share_server_id, 'ip_address': '4.4.4.4', 'status': constants.STATUS_ACTIVE, 'label': 'admin', 'share_network_subnet_id': None}, ] def _setup_network_allocations_get_for_share_server(self): # Create share network share_network_data = { 'id': 'foo_share_network_id', 'user_id': self.user_id, 'project_id': self.project_id, } db_api.share_network_create(self.ctxt, share_network_data) # Create share network subnet share_network_subnet_data = { 'id': self.share_network_subnet_id, 'share_network_id': self.user_id, } db_api.share_network_subnet_create(self.ctxt, share_network_subnet_data) # Create share server share_server_data = { 'id': self.share_server_id, 'host': 'fake_host', 'status': 'active', } db_api.share_server_create(self.ctxt, share_server_data) # Create user network allocations for user_network_allocation in self.user_network_allocations: db_api.network_allocation_create( self.ctxt, user_network_allocation) # Create admin network allocations for admin_network_allocation in self.admin_network_allocations: db_api.network_allocation_create( self.ctxt, admin_network_allocation) def test_get_only_user_network_allocations(self): self._setup_network_allocations_get_for_share_server() result = db_api.network_allocations_get_for_share_server( self.ctxt, self.share_server_id, label='user') self.assertEqual( len(self.user_network_allocations), len(result)) for na in result: self.assertIn(na.label, (None, 'user')) def test_get_only_admin_network_allocations(self): self._setup_network_allocations_get_for_share_server() result = db_api.network_allocations_get_for_share_server( self.ctxt, self.share_server_id, label='admin') self.assertEqual( len(self.admin_network_allocations), len(result)) for na in result: self.assertEqual(na.label, 'admin') def test_get_all_network_allocations(self): self._setup_network_allocations_get_for_share_server() result = db_api.network_allocations_get_for_share_server( self.ctxt, self.share_server_id, label=None) self.assertEqual( len(self.user_network_allocations + self.admin_network_allocations), len(result) ) for na in result: self.assertIn(na.label, ('admin', 'user', None)) def test_network_allocation_get(self): self._setup_network_allocations_get_for_share_server() for allocation in self.admin_network_allocations: result = db_api.network_allocation_get(self.ctxt, allocation['id']) self.assertIsInstance(result, models.NetworkAllocation) self.assertEqual(allocation['id'], result.id) for allocation in self.user_network_allocations: result = db_api.network_allocation_get(self.ctxt, allocation['id']) self.assertIsInstance(result, models.NetworkAllocation) self.assertEqual(allocation['id'], result.id) def test_network_allocation_get_no_result(self): self._setup_network_allocations_get_for_share_server() self.assertRaises(exception.NotFound, db_api.network_allocation_get, self.ctxt, id='fake') def test_network_allocation_get_by_subnet_id(self): self._setup_network_allocations_get_for_share_server() result = db_api.network_allocations_get_for_share_server( self.ctxt, self.share_server_id, subnet_id=self.share_network_subnet_id) self.assertEqual(2, len(result)) for network_allocation in result: self.assertIsInstance(network_allocation, models.NetworkAllocation) self.assertEqual(self.share_network_subnet_id, network_allocation.share_network_subnet_id) @ddt.data(True, False) def test_network_allocation_get_read_deleted(self, read_deleted): self._setup_network_allocations_get_for_share_server() deleted_allocation = { 'share_server_id': self.share_server_id, 'ip_address': '1.1.1.1', 'status': constants.STATUS_ACTIVE, 'label': None, 'deleted': True, } new_obj = db_api.network_allocation_create(self.ctxt, deleted_allocation) if read_deleted: result = db_api.network_allocation_get(self.ctxt, new_obj.id, read_deleted=read_deleted) self.assertIsInstance(result, models.NetworkAllocation) self.assertEqual(new_obj.id, result.id) else: self.assertRaises(exception.NotFound, db_api.network_allocation_get, self.ctxt, id=self.share_server_id) def test_network_allocation_update(self): self._setup_network_allocations_get_for_share_server() for allocation in self.admin_network_allocations: old_obj = db_api.network_allocation_get(self.ctxt, allocation['id']) self.assertEqual('False', old_obj.deleted) updated_object = db_api.network_allocation_update( self.ctxt, allocation['id'], {'deleted': 'True'}) self.assertEqual('True', updated_object.deleted) @ddt.data(True, False) def test_network_allocation_update_read_deleted(self, read_deleted): self._setup_network_allocations_get_for_share_server() db_api.network_allocation_update( self.ctxt, self.admin_network_allocations[0]['id'], {'deleted': 'True'} ) if read_deleted: updated_object = db_api.network_allocation_update( self.ctxt, self.admin_network_allocations[0]['id'], {'deleted': 'False'}, read_deleted=read_deleted ) self.assertEqual('False', updated_object.deleted) else: self.assertRaises(exception.NotFound, db_api.network_allocation_update, self.ctxt, id=self.share_server_id, values={'deleted': read_deleted}, read_deleted=read_deleted) class ReservationDatabaseAPITest(test.TestCase): def setUp(self): super(ReservationDatabaseAPITest, self).setUp() self.context = context.get_admin_context() def test_reservation_expire(self): quota_usage = db_api.quota_usage_create(self.context, 'fake_project', 'fake_user', 'fake_resource', 0, 12, until_refresh=None) with db_api.context_manager.writer.using(self.context): for time_s in (-1, 1): reservation = db_api._reservation_create( self.context, 'fake_uuid', quota_usage, 'fake_project', 'fake_user', 'fake_resource', 10, timeutils.utcnow() + datetime.timedelta(days=time_s), ) db_api.reservation_expire(self.context) with db_api.context_manager.reader.using(self.context): reservations = db_api._quota_reservations_query( self.context, ['fake_uuid'], ).all() quota_usage = db_api.quota_usage_get(self.context, 'fake_project', 'fake_resource') self.assertEqual(1, len(reservations)) self.assertEqual(reservation['id'], reservations[0]['id']) self.assertEqual(2, quota_usage['reserved']) @ddt.ddt class PurgeDeletedTest(test.TestCase): def setUp(self): super(PurgeDeletedTest, self).setUp() self.context = context.get_admin_context() def _days_ago(self, begin, end): return timeutils.utcnow() - datetime.timedelta( days=random.randint(begin, end)) def _turn_on_foreign_key(self): engine = db_api.get_engine() connection = engine.raw_connection() try: cursor = connection.cursor() cursor.execute("PRAGMA foreign_keys = ON") finally: connection.close() @ddt.data({"del_days": 0, "num_left": 0}, {"del_days": 10, "num_left": 2}, {"del_days": 20, "num_left": 4}) @ddt.unpack def test_purge_records_with_del_days(self, del_days, num_left): fake_now = timeutils.utcnow() with mock.patch.object(timeutils, 'utcnow', mock.Mock(return_value=fake_now)): # create resources soft-deleted in 0~9, 10~19 days ago for start, end in ((0, 9), (10, 19)): for unused in range(2): # share type db_utils.create_share_type(id=uuidutils.generate_uuid(), deleted_at=self._days_ago(start, end)) # share share = db_utils.create_share_without_instance( metadata={}, deleted_at=self._days_ago(start, end)) # create share network network = db_utils.create_share_network( id=uuidutils.generate_uuid(), deleted_at=self._days_ago(start, end)) # create security service db_utils.create_security_service( id=uuidutils.generate_uuid(), share_network_id=network.id, deleted_at=self._days_ago(start, end)) # create share instance s_instance = db_utils.create_share_instance( id=uuidutils.generate_uuid(), share_network_id=network.id, share_id=share.id) # share access db_utils.create_share_access( id=uuidutils.generate_uuid(), share_id=share['id'], deleted_at=self._days_ago(start, end)) # create share server db_utils.create_share_server( id=uuidutils.generate_uuid(), deleted_at=self._days_ago(start, end)) # create snapshot db_api.share_snapshot_create( self.context, {'share_id': share['id'], 'deleted_at': self._days_ago(start, end)}, create_snapshot_instance=False) # update share instance db_api.share_instance_update( self.context, s_instance.id, {'deleted_at': self._days_ago(start, end)}) db_api.purge_deleted_records(self.context, age_in_days=del_days) for model in [models.ShareTypes, models.Share, models.ShareNetwork, models.ShareAccessMapping, models.ShareInstance, models.ShareServer, models.ShareSnapshot, models.SecurityService]: with db_api.context_manager.reader.using(self.context): rows = db_api.model_query(self.context, model).count() self.assertEqual(num_left, rows) def test_purge_records_with_illegal_args(self): self.assertRaises(TypeError, db_api.purge_deleted_records, self.context) self.assertRaises(exception.InvalidParameterValue, db_api.purge_deleted_records, self.context, age_in_days=-1) def test_purge_records_with_constraint(self): self._turn_on_foreign_key() type_id = uuidutils.generate_uuid() # create share type1 db_utils.create_share_type(id=type_id, deleted_at=self._days_ago(1, 1)) # create share type2 db_utils.create_share_type(id=uuidutils.generate_uuid(), deleted_at=self._days_ago(1, 1)) # create share share = db_utils.create_share(share_type_id=type_id) db_api.purge_deleted_records(self.context, age_in_days=0) with db_api.context_manager.reader.using(self.context): type_row = db_api.model_query( self.context, models.ShareTypes ).count() # share type1 should not be deleted self.assertEqual(1, type_row) with db_api.context_manager.writer.using(self.context): db_api.model_query(self.context, models.ShareInstance).delete() db_api.share_delete(self.context, share['id']) db_api.purge_deleted_records(self.context, age_in_days=0) with db_api.context_manager.reader.using(self.context): s_row = db_api.model_query(self.context, models.Share).count() type_row = db_api.model_query( self.context, models.ShareTypes ).count() self.assertEqual(0, s_row + type_row) @ddt.ddt class ShareTypeAPITestCase(test.TestCase): def setUp(self): super(ShareTypeAPITestCase, self).setUp() self.ctxt = context.RequestContext( user_id='user_id', project_id='project_id', is_admin=True) @ddt.data({'used_by_shares': True, 'used_by_group_types': False}, {'used_by_shares': False, 'used_by_group_types': True}, {'used_by_shares': True, 'used_by_group_types': True}) @ddt.unpack def test_share_type_destroy_in_use(self, used_by_shares, used_by_group_types): share_type_1 = db_utils.create_share_type( name='orange', extra_specs={'somekey': 'someval'}, is_public=False, override_defaults=True) share_type_2 = db_utils.create_share_type( name='regalia', override_defaults=True) db_api.share_type_access_add(self.ctxt, share_type_1['id'], "2018ndaetfigovnsaslcahfavmrpions") db_api.share_type_access_add(self.ctxt, share_type_1['id'], "2016ndaetfigovnsaslcahfavmrpions") if used_by_shares: share_1 = db_utils.create_share(share_type_id=share_type_1['id']) db_utils.create_share(share_type_id=share_type_2['id']) if used_by_group_types: group_type_1 = db_utils.create_share_group_type( name='crimson', share_types=[share_type_1['id']]) db_utils.create_share_group_type( name='tide', share_types=[share_type_2['id']]) share_group_1 = db_utils.create_share_group( share_group_type_id=group_type_1['id'], share_types=[share_type_1['id']]) self.assertRaises(exception.ShareTypeInUse, db_api.share_type_destroy, self.ctxt, share_type_1['id']) self.assertRaises(exception.ShareTypeInUse, db_api.share_type_destroy, self.ctxt, share_type_2['id']) # Let's cleanup share_type_1 and verify it is gone if used_by_shares: db_api.share_instance_delete(self.ctxt, share_1.instance.id) if used_by_group_types: db_api.share_group_destroy(self.ctxt, share_group_1['id']) db_api.share_group_type_destroy(self.ctxt, group_type_1['id']) self.assertIsNone( db_api.share_type_destroy(self.ctxt, share_type_1['id'])) self.assertDictEqual( {}, db_api.share_type_extra_specs_get( self.ctxt, share_type_1['id'])) self.assertRaises(exception.ShareTypeNotFound, db_api.share_type_access_get_all, self.ctxt, share_type_1['id']) self.assertRaises(exception.ShareTypeNotFound, db_api.share_type_get, self.ctxt, share_type_1['id']) # share_type_2 must still be around self.assertEqual( share_type_2['id'], db_api.share_type_get(self.ctxt, share_type_2['id'])['id']) @ddt.data({'usages': False, 'reservations': False}, {'usages': False, 'reservations': True}, {'usages': True, 'reservations': False}) @ddt.unpack def test_share_type_destroy_quotas_and_reservations(self, usages, reservations): share_type = db_utils.create_share_type(name='clemsontigers') shares_quota = db_api.quota_create( self.ctxt, "fake-project-id", 'shares', 10, share_type_id=share_type['id']) snapshots_quota = db_api.quota_create( self.ctxt, "fake-project-id", 'snapshots', 30, share_type_id=share_type['id']) if reservations: resources = { 'shares': quota.ReservableResource('shares', '_sync_shares'), 'snapshots': quota.ReservableResource( 'snapshots', '_sync_snapshots'), } project_quotas = { 'shares': shares_quota.hard_limit, 'snapshots': snapshots_quota.hard_limit, } user_quotas = { 'shares': shares_quota.hard_limit, 'snapshots': snapshots_quota.hard_limit, } deltas = {'shares': 1, 'snapshots': 3} expire = timeutils.utcnow() + datetime.timedelta(seconds=86400) reservation_uuids = db_api.quota_reserve( self.ctxt, resources, project_quotas, user_quotas, project_quotas, deltas, expire, False, 30, project_id='fake-project-id', share_type_id=share_type['id']) with db_api.context_manager.reader.using(self.ctxt): q_reservations = db_api._quota_reservations_query( self.ctxt, reservation_uuids, ).all() # There should be 2 "user" reservations and 2 "share-type" # quota reservations self.assertEqual(4, len(q_reservations)) q_share_type_reservations = [qr for qr in q_reservations if qr['share_type_id'] is not None] # There should be exactly two "share type" quota reservations self.assertEqual(2, len(q_share_type_reservations)) for q_reservation in q_share_type_reservations: self.assertEqual(q_reservation['share_type_id'], share_type['id']) if usages: db_api.quota_usage_create(self.ctxt, 'fake-project-id', 'fake-user-id', 'shares', 3, 2, False, share_type_id=share_type['id']) db_api.quota_usage_create(self.ctxt, 'fake-project-id', 'fake-user-id', 'snapshots', 2, 2, False, share_type_id=share_type['id']) q_usages = db_api.quota_usage_get_all_by_project_and_share_type( self.ctxt, 'fake-project-id', share_type['id']) self.assertEqual(3, q_usages['shares']['in_use']) self.assertEqual(2, q_usages['shares']['reserved']) self.assertEqual(2, q_usages['snapshots']['in_use']) self.assertEqual(2, q_usages['snapshots']['reserved']) # Validate that quotas exist share_type_quotas = db_api.quota_get_all_by_project_and_share_type( self.ctxt, 'fake-project-id', share_type['id']) expected_quotas = { 'project_id': 'fake-project-id', 'share_type_id': share_type['id'], 'shares': 10, 'snapshots': 30, } self.assertDictEqual(expected_quotas, share_type_quotas) db_api.share_type_destroy(self.ctxt, share_type['id']) self.assertRaises(exception.ShareTypeNotFound, db_api.share_type_get, self.ctxt, share_type['id']) # Quotas must be gone share_type_quotas = db_api.quota_get_all_by_project_and_share_type( self.ctxt, 'fake-project-id', share_type['id']) self.assertEqual({'project_id': 'fake-project-id', 'share_type_id': share_type['id']}, share_type_quotas) # Check usages and reservations if usages: q_usages = db_api.quota_usage_get_all_by_project_and_share_type( self.ctxt, 'fake-project-id', share_type['id']) expected_q_usages = {'project_id': 'fake-project-id', 'share_type_id': share_type['id']} self.assertDictEqual(expected_q_usages, q_usages) if reservations: with db_api.context_manager.reader.using(self.ctxt): q_reservations = db_api._quota_reservations_query( self.ctxt, reservation_uuids, ).all() # just "user" quota reservations should be left, since we didn't # clean them up. self.assertEqual(2, len(q_reservations)) for q_reservation in q_reservations: self.assertIsNone(q_reservation['share_type_id']) @ddt.data( (None, None, 5), ('fake2', None, 2), (None, 'fake', 3), ) @ddt.unpack def test_share_replica_data_get_for_project( self, user_id, share_type_id, expected_result): kwargs = {} if share_type_id: kwargs.update({'id': share_type_id}) share_type_1 = db_utils.create_share_type(**kwargs) share_type_2 = db_utils.create_share_type() share_1 = db_utils.create_share(size=1, user_id='fake', share_type_id=share_type_1['id']) share_2 = db_utils.create_share(size=1, user_id='fake2', share_type_id=share_type_2['id']) project_id = share_1['project_id'] db_utils.create_share_replica( replica_state=constants.REPLICA_STATE_ACTIVE, share_id=share_1['id'], share_type_id=share_type_1['id']) db_utils.create_share_replica( replica_state=constants.REPLICA_STATE_IN_SYNC, share_id=share_1['id'], share_type_id=share_type_1['id']) db_utils.create_share_replica( replica_state=constants.REPLICA_STATE_IN_SYNC, share_id=share_1['id'], share_type_id=share_type_1['id']) db_utils.create_share_replica( replica_state=constants.REPLICA_STATE_ACTIVE, share_id=share_2['id'], share_type_id=share_type_2['id']) db_utils.create_share_replica( replica_state=constants.REPLICA_STATE_IN_SYNC, share_id=share_2['id'], share_type_id=share_type_2['id']) kwargs = {} if user_id: kwargs.update({'user_id': user_id}) if share_type_id: kwargs.update({'share_type_id': share_type_id}) with db_api.context_manager.reader.using(self.ctxt): total_amt, total_size = db_api._share_replica_data_get_for_project( self.ctxt, project_id, **kwargs, ) self.assertEqual(expected_result, total_amt) self.assertEqual(expected_result, total_size) def test_share_type_get_by_name_or_id_found_by_id(self): share_type = db_utils.create_share_type() result = db_api.share_type_get_by_name_or_id( self.ctxt, share_type['id']) self.assertIsNotNone(result) self.assertEqual(share_type['id'], result['id']) def test_share_type_get_by_name_or_id_found_by_name(self): name = uuidutils.generate_uuid() db_utils.create_share_type(name=name) result = db_api.share_type_get_by_name_or_id(self.ctxt, name) self.assertIsNotNone(result) self.assertEqual(name, result['name']) self.assertNotEqual(name, result['id']) def test_share_type_get_by_name_or_id_when_does_not_exist(self): fake_id = uuidutils.generate_uuid() result = db_api.share_type_get_by_name_or_id(self.ctxt, fake_id) self.assertIsNone(result) def test_share_type_get_with_none_id(self): self.assertRaises(exception.DefaultShareTypeNotConfigured, db_api.share_type_get, self.ctxt, None) @ddt.data( {'name': 'st_1', 'description': 'des_1', 'is_public': True}, {'name': 'st_2', 'description': 'des_2', 'is_public': None}, {'name': 'st_3', 'description': None, 'is_public': False}, {'name': None, 'description': 'des_4', 'is_public': True}, ) @ddt.unpack def test_share_type_update(self, name, description, is_public): values = {} if name: values.update({'name': name}) if description: values.update({'description': description}) if is_public is not None: values.update({'is_public': is_public}) share_type = db_utils.create_share_type(name='st_name') db_api.share_type_update(self.ctxt, share_type['id'], values) updated_st = db_api.share_type_get_by_name_or_id(self.ctxt, share_type['id']) if name: self.assertEqual(name, updated_st['name']) if description: self.assertEqual(description, updated_st['description']) if is_public is not None: self.assertEqual(is_public, updated_st['is_public']) def test_share_type_update_not_found(self): share_type = db_utils.create_share_type(name='st_update_test') db_api.share_type_destroy(self.ctxt, share_type['id']) values = {"name": "not_exist"} self.assertRaises(exception.ShareTypeNotFound, db_api.share_type_update, self.ctxt, share_type['id'], values) class MessagesDatabaseAPITestCase(test.TestCase): def setUp(self): super(MessagesDatabaseAPITestCase, self).setUp() self.user_id = uuidutils.generate_uuid() self.project_id = uuidutils.generate_uuid() self.ctxt = context.RequestContext( user_id=self.user_id, project_id=self.project_id, is_admin=False) def test_message_create(self): result = db_utils.create_message(project_id=self.project_id, action_id='001') self.assertIsNotNone(result['id']) def test_message_delete(self): result = db_utils.create_message(project_id=self.project_id, action_id='001') db_api.message_destroy(self.ctxt, result) self.assertRaises(exception.NotFound, db_api.message_get, self.ctxt, result['id']) def test_message_get(self): message = db_utils.create_message(project_id=self.project_id, action_id='001') result = db_api.message_get(self.ctxt, message['id']) self.assertEqual(message['id'], result['id']) self.assertEqual(message['action_id'], result['action_id']) self.assertEqual(message['detail_id'], result['detail_id']) self.assertEqual(message['project_id'], result['project_id']) self.assertEqual(message['message_level'], result['message_level']) def test_message_get_not_found(self): self.assertRaises(exception.MessageNotFound, db_api.message_get, self.ctxt, 'fake_id') def test_message_get_different_project(self): message = db_utils.create_message(project_id='another-project', action_id='001') self.assertRaises(exception.MessageNotFound, db_api.message_get, self.ctxt, message['id']) def test_message_get_all(self): db_utils.create_message(project_id=self.project_id, action_id='001') db_utils.create_message(project_id=self.project_id, action_id='001') db_utils.create_message(project_id='another-project', action_id='001') result = db_api.message_get_all(self.ctxt) self.assertEqual(2, len(result)) def test_message_get_all_as_admin(self): db_utils.create_message(project_id=self.project_id, action_id='001') db_utils.create_message(project_id=self.project_id, action_id='001') db_utils.create_message(project_id='another-project', action_id='001') result = db_api.message_get_all(self.ctxt.elevated()) self.assertEqual(3, len(result)) def test_message_get_all_with_filter(self): for i in ['001', '002', '002']: db_utils.create_message(project_id=self.project_id, action_id=i) result = db_api.message_get_all(self.ctxt, filters={'action_id': '002'}) self.assertEqual(2, len(result)) def test_message_get_all_with_created_since_or_before_filter(self): now = timeutils.utcnow() db_utils.create_message(project_id=self.project_id, action_id='001', created_at=now - datetime.timedelta(seconds=1)) db_utils.create_message(project_id=self.project_id, action_id='001', created_at=now + datetime.timedelta(seconds=1)) db_utils.create_message(project_id=self.project_id, action_id='001', created_at=now + datetime.timedelta(seconds=2)) result1 = db_api.message_get_all(self.ctxt, filters={'created_before': now}) result2 = db_api.message_get_all(self.ctxt, filters={'created_since': now}) self.assertEqual(1, len(result1)) self.assertEqual(2, len(result2)) def test_message_get_all_with_invalid_sort_key(self): self.assertRaises(exception.InvalidInput, db_api.message_get_all, self.ctxt, sort_key='invalid_key') def test_message_get_all_sorted_asc(self): ids = [] for i in ['001', '002', '003']: msg = db_utils.create_message(project_id=self.project_id, action_id=i) ids.append(msg.id) result = db_api.message_get_all(self.ctxt, sort_key='action_id', sort_dir='asc') result_ids = [r.id for r in result] self.assertEqual(result_ids, ids) def test_message_get_all_with_limit_and_offset(self): for i in ['001', '002']: db_utils.create_message(project_id=self.project_id, action_id=i) result = db_api.message_get_all(self.ctxt, limit=1, offset=1) self.assertEqual(1, len(result)) def test_message_get_all_sorted(self): ids = [] for i in ['003', '002', '001']: msg = db_utils.create_message(project_id=self.project_id, action_id=i) ids.append(msg.id) # Default the sort direction to descending result = db_api.message_get_all(self.ctxt, sort_key='action_id') result_ids = [r.id for r in result] self.assertEqual(result_ids, ids) def test_cleanup_expired_messages(self): adm_context = self.ctxt.elevated() now = timeutils.utcnow() db_utils.create_message(project_id=self.project_id, action_id='001', expires_at=now) db_utils.create_message(project_id=self.project_id, action_id='001', expires_at=now - datetime.timedelta(days=1)) db_utils.create_message(project_id=self.project_id, action_id='001', expires_at=now + datetime.timedelta(days=1)) with mock.patch.object(timeutils, 'utcnow') as mock_time_now: mock_time_now.return_value = now db_api.cleanup_expired_messages(adm_context) messages = db_api.message_get_all(adm_context) self.assertEqual(2, len(messages)) class BackendInfoDatabaseAPITestCase(test.TestCase): def setUp(self): """Run before each test.""" super(BackendInfoDatabaseAPITestCase, self).setUp() self.ctxt = context.get_admin_context() def test_create(self): host = "fake_host" value = "fake_hash_value" initial_data = db_api.backend_info_get(self.ctxt, host) db_api.backend_info_update(self.ctxt, host, value) actual_data = db_api.backend_info_get(self.ctxt, host) self.assertIsNone(initial_data) self.assertEqual(value, actual_data['info_hash']) self.assertEqual(host, actual_data['host']) def test_get(self): host = "fake_host" value = "fake_hash_value" db_api.backend_info_update(self.ctxt, host, value, False) actual_result = db_api.backend_info_get(self.ctxt, host) self.assertEqual(value, actual_result['info_hash']) self.assertEqual(host, actual_result['host']) def test_delete(self): host = "fake_host" value = "fake_hash_value" db_api.backend_info_update(self.ctxt, host, value) initial_data = db_api.backend_info_get(self.ctxt, host) db_api.backend_info_update(self.ctxt, host, delete_existing=True) actual_data = db_api.backend_info_get(self.ctxt, host) self.assertEqual(value, initial_data['info_hash']) self.assertEqual(host, initial_data['host']) self.assertIsNone(actual_data) def test_double_update(self): host = "fake_host" value_1 = "fake_hash_value_1" value_2 = "fake_hash_value_2" initial_data = db_api.backend_info_get(self.ctxt, host) db_api.backend_info_update(self.ctxt, host, value_1) db_api.backend_info_update(self.ctxt, host, value_2) actual_data = db_api.backend_info_get(self.ctxt, host) self.assertIsNone(initial_data) self.assertEqual(value_2, actual_data['info_hash']) self.assertEqual(host, actual_data['host']) @ddt.ddt class ShareResourcesAPITestCase(test.TestCase): def setUp(self): super(ShareResourcesAPITestCase, self).setUp() self.context = context.get_admin_context() @ddt.data('controller-100', 'controller-0@otherstore03', 'controller-0@otherstore01#pool200') def test_share_resources_host_update_no_matches(self, current_host): share_id = uuidutils.generate_uuid() share_network_id = uuidutils.generate_uuid() share_network_subnet_id = uuidutils.generate_uuid() share_net_subnets = [db_utils.create_share_network_subnet( id=share_network_subnet_id, share_network_id=share_network_id)] if '@' in current_host: if '#' in current_host: new_host = 'new-controller-X@backendX#poolX' else: new_host = 'new-controller-X@backendX' else: new_host = 'new-controller-X' resources = [ # noqa # share db_utils.create_share_without_instance( id=share_id, status=constants.STATUS_AVAILABLE), # share instances db_utils.create_share_instance( share_id=share_id, host='controller-0@fancystore01#pool100', status=constants.STATUS_AVAILABLE), db_utils.create_share_instance( share_id=share_id, host='controller-0@otherstore02#pool100', status=constants.STATUS_ERROR), db_utils.create_share_instance( share_id=share_id, host='controller-2@beststore07#pool200', status=constants.STATUS_DELETING), # share groups db_utils.create_share_group( share_network_id=share_network_id, host='controller-0@fancystore01#pool200', status=constants.STATUS_AVAILABLE), db_utils.create_share_group( share_network_id=share_network_id, host='controller-0@otherstore02#pool100', status=constants.STATUS_ERROR), db_utils.create_share_group( share_network_id=share_network_id, host='controller-2@beststore07#pool100', status=constants.STATUS_DELETING), # share servers db_utils.create_share_server( share_network_subnets=share_net_subnets, host='controller-0@fancystore01', status=constants.STATUS_ACTIVE), db_utils.create_share_server( share_network_subnets=share_net_subnets, host='controller-0@otherstore02#pool100', status=constants.STATUS_ERROR), db_utils.create_share_server( share_network_subnets=share_net_subnets, host='controller-2@beststore07', status=constants.STATUS_DELETING), ] updates = db_api.share_resources_host_update(self.context, current_host, new_host) expected_updates = {'instances': 0, 'servers': 0, 'groups': 0} self.assertDictEqual(expected_updates, updates) # validate that resources are unmodified: share_instances = db_api.share_instance_get_all( self.context, filters={'share_id': share_id}) share_groups = db_api.share_group_get_all( self.context, filters={'share_network_id': share_network_id}) with db_api.context_manager.reader.using(self.context): share_servers = db_api._share_server_get_query( self.context ).filter( models.ShareServer.share_network_subnets.any( id=share_net_subnets[0]['id'] ) ).all() self.assertEqual(3, len(share_instances)) self.assertEqual(3, len(share_groups)) self.assertEqual(3, len(share_servers)) for share_instance in share_instances: self.assertTrue(not share_instance['host'].startswith(new_host)) for share_group in share_groups: self.assertTrue(not share_group['host'].startswith(new_host)) for share_server in share_servers: self.assertTrue(not share_server['host'].startswith(new_host)) @ddt.data( {'current_host': 'controller-2', 'expected_updates': {'instances': 1, 'servers': 2, 'groups': 1}}, {'current_host': 'controller-0@fancystore01', 'expected_updates': {'instances': 2, 'servers': 1, 'groups': 2}}, {'current_host': 'controller-0@fancystore01#pool100', 'expected_updates': {'instances': 1, 'servers': 1, 'groups': 0}}) @ddt.unpack def test_share_resources_host_update_partial_matches(self, current_host, expected_updates): share_id = uuidutils.generate_uuid() share_network_id = uuidutils.generate_uuid() share_network_subnet_id = uuidutils.generate_uuid() share_net_subnets = [db_utils.create_share_network_subnet( id=share_network_subnet_id, share_network_id=share_network_id)] if '@' in current_host: if '#' in current_host: new_host = 'new-controller-X@backendX#poolX' else: new_host = 'new-controller-X@backendX' else: new_host = 'new-controller-X' total_updates_expected = (expected_updates['instances'] + expected_updates['groups'] + expected_updates['servers']) resources = [ # noqa # share db_utils.create_share_without_instance( id=share_id, status=constants.STATUS_AVAILABLE), # share instances db_utils.create_share_instance( share_id=share_id, host='controller-0@fancystore01#pool100', status=constants.STATUS_AVAILABLE), db_utils.create_share_instance( share_id=share_id, host='controller-0@fancystore01#pool200', status=constants.STATUS_ERROR), db_utils.create_share_instance( share_id=share_id, host='controller-2@beststore07#pool200', status=constants.STATUS_DELETING), # share groups db_utils.create_share_group( share_network_id=share_network_id, host='controller-0@fancystore01#pool101', status=constants.STATUS_ACTIVE), db_utils.create_share_group( share_network_id=share_network_id, host='controller-0@fancystore01#pool101', status=constants.STATUS_ERROR), db_utils.create_share_group( share_network_id=share_network_id, host='controller-2@beststore07#pool200', status=constants.STATUS_DELETING), # share servers db_utils.create_share_server( share_network_subnets=share_net_subnets, host='controller-0@fancystore01#pool100', status=constants.STATUS_ACTIVE), db_utils.create_share_server( share_network_subnets=share_net_subnets, host='controller-2@fancystore01', status=constants.STATUS_ERROR), db_utils.create_share_server( share_network_subnets=share_net_subnets, host='controller-2@beststore07#pool200', status=constants.STATUS_DELETING), ] actual_updates = db_api.share_resources_host_update( self.context, current_host, new_host) share_instances = db_api.share_instance_get_all( self.context, filters={'share_id': share_id}) share_groups = db_api.share_group_get_all( self.context, filters={'share_network_id': share_network_id}) with db_api.context_manager.reader.using(self.context): share_servers = db_api._share_server_get_query( self.context ).filter( models.ShareServer.share_network_subnets.any( id=share_net_subnets[0]['id'] ) ).all() updated_resources = [ res for res in share_instances + share_groups + share_servers if res['host'].startswith(new_host) ] self.assertEqual(expected_updates, actual_updates) self.assertEqual(total_updates_expected, len(updated_resources)) def test_share_instance_status_update(self): for i in range(1, 3): instances = [ db_utils.create_share_instance( status=constants.STATUS_SERVER_MIGRATING, share_id='fake') for __ in range(1, 3)] share_instance_ids = [instance['id'] for instance in instances] values = {'status': constants.STATUS_AVAILABLE} db_api.share_instance_status_update( self.context, share_instance_ids, values) instances = [ db_api.share_instance_get(self.context, instance_id) for instance_id in share_instance_ids] for instance in instances: self.assertEqual(constants.STATUS_AVAILABLE, instance['status']) def test_share_snapshot_instances_status_update(self): share_instance = db_utils.create_share_instance( status=constants.STATUS_AVAILABLE, share_id='fake') instances = [ db_utils.create_snapshot_instance( 'fake_snapshot_id_1', status=constants.STATUS_CREATING, share_instance_id=share_instance['id']) for __ in range(1, 3)] snapshot_instance_ids = [instance['id'] for instance in instances] values = {'status': constants.STATUS_AVAILABLE} db_api.share_snapshot_instances_status_update( self.context, snapshot_instance_ids, values) instances = [ db_api.share_snapshot_instance_get(self.context, instance_id) for instance_id in snapshot_instance_ids] for instance in instances: self.assertEqual(constants.STATUS_AVAILABLE, instance['status']) def test_share_and_snapshot_instances_status_update(self): share_instance = db_utils.create_share_instance( status=constants.STATUS_AVAILABLE, share_id='fake') share_instance_ids = [share_instance['id']] snap_instances = [ db_utils.create_snapshot_instance( 'fake_snapshot_id_1', status=constants.STATUS_CREATING, share_instance_id=share_instance['id']) for __ in range(1, 3)] snapshot_instance_ids = [instance['id'] for instance in snap_instances] values = {'status': constants.STATUS_AVAILABLE} mock_update_share_instances = self.mock_object( db_api, '_share_instance_status_update', mock.Mock(return_value=[share_instance])) mock_update_snap_instances = self.mock_object( db_api, '_share_snapshot_instances_status_update', mock.Mock(return_value=snap_instances)) updated_share_instances, updated_snap_instances = ( db_api.share_and_snapshot_instances_status_update( self.context, values, share_instance_ids=share_instance_ids, snapshot_instance_ids=snapshot_instance_ids)) mock_update_share_instances.assert_called_once_with( self.context, share_instance_ids, values) mock_update_snap_instances.assert_called_once_with( self.context, snapshot_instance_ids, values) self.assertEqual(updated_share_instances, [share_instance]) self.assertEqual(updated_snap_instances, snap_instances) @ddt.data( { 'share_instance_status': constants.STATUS_ERROR, 'snap_instance_status': constants.STATUS_AVAILABLE, 'expected_exc': exception.InvalidShareInstance }, { 'share_instance_status': constants.STATUS_AVAILABLE, 'snap_instance_status': constants.STATUS_ERROR, 'expected_exc': exception.InvalidShareSnapshotInstance } ) @ddt.unpack def test_share_and_snapshot_instances_status_update_invalid_status( self, share_instance_status, snap_instance_status, expected_exc): share_instance = db_utils.create_share_instance( status=share_instance_status, share_id='fake') share_snapshot_instance = db_utils.create_snapshot_instance( 'fake_snapshot_id_1', status=snap_instance_status, share_instance_id=share_instance['id']) share_instance_ids = [share_instance['id']] snap_instance_ids = [share_snapshot_instance['id']] values = {'status': constants.STATUS_AVAILABLE} mock_instances_get_all = self.mock_object( db_api, '_share_instance_get_all', mock.Mock(return_value=[share_instance])) mock_snap_instances_get_all = self.mock_object( db_api, '_share_snapshot_instance_get_all_with_filters', mock.Mock(return_value=[share_snapshot_instance])) self.assertRaises(expected_exc, db_api.share_and_snapshot_instances_status_update, self.context, values, share_instance_ids=share_instance_ids, snapshot_instance_ids=snap_instance_ids, current_expected_status=constants.STATUS_AVAILABLE) mock_instances_get_all.assert_called_once_with( self.context, filters={'instance_ids': share_instance_ids}, ) if snap_instance_status == constants.STATUS_ERROR: mock_snap_instances_get_all.assert_called_once_with( self.context, {'instance_ids': snap_instance_ids}, ) @ddt.ddt class AsyncOperationDatabaseAPITestCase(test.TestCase): def setUp(self): """Run before each test.""" super(AsyncOperationDatabaseAPITestCase, self).setUp() self.user_id = uuidutils.generate_uuid() self.project_id = uuidutils.generate_uuid() self.ctxt = context.RequestContext( user_id=self.user_id, project_id=self.project_id, is_admin=False) def _get_async_operation_test_data(self): return uuidutils.generate_uuid() @ddt.data({"details": {"foo": "bar", "tee": "too"}, "valid": {"foo": "bar", "tee": "too"}}, {"details": {"foo": "bar", "tee": ["test"]}, "valid": {"foo": "bar", "tee": str(["test"])}}) @ddt.unpack def test_update(self, details, valid): entity_id = self._get_async_operation_test_data() initial_data = db_api.async_operation_data_get(self.ctxt, entity_id) db_api.async_operation_data_update(self.ctxt, entity_id, details) actual_data = db_api.async_operation_data_get(self.ctxt, entity_id) self.assertEqual({}, initial_data) self.assertEqual(valid, actual_data) @ddt.data({'with_deleted': True, 'append': False}, {'with_deleted': True, 'append': True}, {'with_deleted': False, 'append': False}, {'with_deleted': False, 'append': True}) @ddt.unpack def test_update_with_more_values(self, with_deleted, append): entity_id = self._get_async_operation_test_data() details = {"tee": "too"} more_details = {"foo": "bar"} result = {"tee": "too", "foo": "bar"} db_api.async_operation_data_update(self.ctxt, entity_id, details) if with_deleted: db_api.async_operation_data_delete(self.ctxt, entity_id) if append: more_details.update(details) if with_deleted and not append: result.pop("tee") db_api.async_operation_data_update(self.ctxt, entity_id, more_details) actual_result = db_api.async_operation_data_get(self.ctxt, entity_id) self.assertEqual(result, actual_result) @ddt.data(True, False) def test_update_with_duplicate(self, with_deleted): entity_id = self._get_async_operation_test_data() details = {"tee": "too"} db_api.async_operation_data_update(self.ctxt, entity_id, details) if with_deleted: db_api.async_operation_data_delete(self.ctxt, entity_id) db_api.async_operation_data_update(self.ctxt, entity_id, details) actual_result = db_api.async_operation_data_get(self.ctxt, entity_id) self.assertEqual(details, actual_result) def test_update_with_delete_existing(self): resource_id = self._get_async_operation_test_data() details = {"key1": "val1", "key2": "val2", "key3": "val3"} details_update = {"key1": "val1_upd", "key4": "new_val"} # Create new details db_api.async_operation_data_update(self.ctxt, resource_id, details) db_api.async_operation_data_update(self.ctxt, resource_id, details_update, delete_existing=True) actual_result = db_api.async_operation_data_get(self.ctxt, resource_id) self.assertEqual(details_update, actual_result) def test_get(self): resource_id = self._get_async_operation_test_data() test_key = "foo" test_keys = [test_key, "tee"] details = {test_keys[0]: "val", test_keys[1]: "val", "mee": "foo"} db_api.async_operation_data_update(self.ctxt, resource_id, details) actual_result_all = db_api.async_operation_data_get( self.ctxt, resource_id) actual_result_single_key = db_api.async_operation_data_get( self.ctxt, resource_id, test_key) actual_result_list = db_api.async_operation_data_get( self.ctxt, resource_id, test_keys) self.assertEqual(details, actual_result_all) self.assertEqual(details[test_key], actual_result_single_key) self.assertEqual(dict.fromkeys(test_keys, "val"), actual_result_list) def test_delete_single(self): test_id = self._get_async_operation_test_data() test_key = "foo" details = {test_key: "bar", "tee": "too"} valid_result = {"tee": "too"} db_api.async_operation_data_update(self.ctxt, test_id, details) db_api.async_operation_data_delete(self.ctxt, test_id, test_key) actual_result = db_api.async_operation_data_get( self.ctxt, test_id) self.assertEqual(valid_result, actual_result) def test_delete_all(self): test_id = self._get_async_operation_test_data() details = {"foo": "bar", "tee": "too"} db_api.async_operation_data_update(self.ctxt, test_id, details) db_api.async_operation_data_delete(self.ctxt, test_id) actual_result = db_api.async_operation_data_get( self.ctxt, test_id) self.assertEqual({}, actual_result) class TransfersTestCase(test.TestCase): """Test case for transfers.""" def setUp(self): super(TransfersTestCase, self).setUp() self.user_id = uuidutils.generate_uuid() self.project_id = uuidutils.generate_uuid() self.ctxt = context.RequestContext(user_id=self.user_id, project_id=self.project_id) @staticmethod def _create_transfer(resource_type='share', resource_id=None, source_project_id=None): """Create a transfer object.""" if resource_id and source_project_id: transfer = db_utils.create_transfer( resource_type=resource_type, resource_id=resource_id, source_project_id=source_project_id) elif resource_id: transfer = db_utils.create_transfer( resource_type=resource_type, resource_id=resource_id) elif source_project_id: transfer = db_utils.create_transfer( resource_type=resource_type, source_project_id=source_project_id) else: transfer = db_utils.create_transfer( resource_type=resource_type) return transfer['id'] def test_transfer_create(self): # If the resource_id is Null a KeyError exception will be raised. self.assertRaises(KeyError, self._create_transfer) share = db_utils.create_share(size=1, user_id=self.user_id, project_id=self.project_id) share_id = share['id'] self._create_transfer(resource_id=share_id) def test_transfer_get(self): share_id = db_utils.create_share(size=1, user_id=self.user_id, project_id=self.project_id)['id'] transfer_id = self._create_transfer(resource_id=share_id) transfer = db_api.transfer_get(self.ctxt, transfer_id) self.assertEqual(share_id, transfer['resource_id']) new_ctxt = context.RequestContext(user_id='new_user_id', project_id='new_project_id') self.assertRaises(exception.TransferNotFound, db_api.transfer_get, new_ctxt, transfer_id) transfer = db_api.transfer_get(new_ctxt.elevated(), transfer_id) self.assertEqual(share_id, transfer['resource_id']) def test_transfer_get_all(self): share_id1 = db_utils.create_share(size=1, user_id=self.user_id, project_id=self.project_id)['id'] share_id2 = db_utils.create_share(size=1, user_id=self.user_id, project_id=self.project_id)['id'] self._create_transfer(resource_id=share_id1, source_project_id=self.project_id) self._create_transfer(resource_id=share_id2, source_project_id=self.project_id) self.assertRaises(exception.NotAuthorized, db_api.transfer_get_all, self.ctxt) transfers = db_api.transfer_get_all(context.get_admin_context()) self.assertEqual(2, len(transfers)) transfers = db_api.transfer_get_all_by_project(self.ctxt, self.project_id) self.assertEqual(2, len(transfers)) new_ctxt = context.RequestContext(user_id='new_user_id', project_id='new_project_id') transfers = db_api.transfer_get_all_by_project(new_ctxt, 'new_project_id') self.assertEqual(0, len(transfers)) def test_transfer_destroy(self): share_id1 = db_utils.create_share(size=1, user_id=self.user_id, project_id=self.project_id)['id'] share_id2 = db_utils.create_share(size=1, user_id=self.user_id, project_id=self.project_id)['id'] transfer_id1 = self._create_transfer(resource_id=share_id1, source_project_id=self.project_id) transfer_id2 = self._create_transfer(resource_id=share_id2, source_project_id=self.project_id) transfers = db_api.transfer_get_all(context.get_admin_context()) self.assertEqual(2, len(transfers)) db_api.transfer_destroy(self.ctxt, transfer_id1) transfers = db_api.transfer_get_all(context.get_admin_context()) self.assertEqual(1, len(transfers)) db_api.transfer_destroy(self.ctxt, transfer_id2) transfers = db_api.transfer_get_all(context.get_admin_context()) self.assertEqual(0, len(transfers)) def test_transfer_accept_then_rollback(self): share = db_utils.create_share(size=1, user_id=self.user_id, project_id=self.project_id) transfer_id = self._create_transfer(resource_id=share['id'], source_project_id=self.project_id) new_ctxt = context.RequestContext(user_id='new_user_id', project_id='new_project_id') transfer = db_api.transfer_get(new_ctxt.elevated(), transfer_id) self.assertEqual(share['project_id'], transfer['source_project_id']) self.assertFalse(transfer['accepted']) self.assertIsNone(transfer['destination_project_id']) # accept the transfer db_api.transfer_accept(new_ctxt.elevated(), transfer_id, 'new_user_id', 'new_project_id') admin_context = new_ctxt.elevated() with db_api.context_manager.reader.using(admin_context): transfer = db_api.model_query( admin_context, models.Transfer, read_deleted='yes', ).filter_by(id=transfer_id).first() share = db_api.share_get(new_ctxt.elevated(), share['id']) self.assertEqual(share['project_id'], 'new_project_id') self.assertEqual(share['user_id'], 'new_user_id') self.assertTrue(transfer['accepted']) self.assertEqual('new_project_id', transfer['destination_project_id']) # then test rollback the transfer db_api.transfer_accept_rollback(new_ctxt.elevated(), transfer_id, self.user_id, self.project_id) with db_api.context_manager.reader.using(admin_context): transfer = db_api.model_query( admin_context, models.Transfer, ).filter_by(id=transfer_id).first() share = db_api.share_get(new_ctxt.elevated(), share['id']) self.assertEqual(share['project_id'], self.project_id) self.assertEqual(share['user_id'], self.user_id) self.assertFalse(transfer['accepted']) class ShareBackupDatabaseAPITestCase(BaseDatabaseAPITestCase): def setUp(self): """Run before each test.""" super(ShareBackupDatabaseAPITestCase, self).setUp() self.ctxt = context.get_admin_context() self.backup = { 'id': 'fake_backup_id', 'host': "fake_host", 'user_id': 'fake', 'project_id': 'fake', 'availability_zone': 'fake_availability_zone', 'status': constants.STATUS_CREATING, 'progress': '0', 'display_name': 'fake_name', 'display_description': 'fake_description', 'size': 1, } self.share_id = "fake_share_id" def test_create_share_backup(self): result = db_api.share_backup_create( self.ctxt, self.share_id, self.backup) self._check_fields(expected=self.backup, actual=result) def test_get(self): db_api.share_backup_create( self.ctxt, self.share_id, self.backup) result = db_api.share_backup_get( self.ctxt, self.backup['id']) self._check_fields(expected=self.backup, actual=result) def test_delete(self): db_api.share_backup_create( self.ctxt, self.share_id, self.backup) db_api.share_backup_delete(self.ctxt, self.backup['id']) self.assertRaises(exception.ShareBackupNotFound, db_api.share_backup_get, self.ctxt, self.backup['id']) def test_delete_not_found(self): self.assertRaises(exception.ShareBackupNotFound, db_api.share_backup_delete, self.ctxt, 'fake not exist id') def test_update(self): new_status = constants.STATUS_ERROR db_api.share_backup_create( self.ctxt, self.share_id, self.backup) result_update = db_api.share_backup_update( self.ctxt, self.backup['id'], {'status': constants.STATUS_ERROR}) result_get = db_api.share_backup_get(self.ctxt, self.backup['id']) self.assertEqual(new_status, result_update['status']) self._check_fields(expected=dict(result_update.items()), actual=dict(result_get.items())) def test_update_not_found(self): self.assertRaises(exception.ShareBackupNotFound, db_api.share_backup_update, self.ctxt, 'fake id', {}) class ResourceLocksTestCase(test.TestCase): """Test case for resource locks.""" def setUp(self): super(ResourceLocksTestCase, self).setUp() self.user_id = uuidutils.generate_uuid(dashed=False) self.project_id = uuidutils.generate_uuid(dashed=False) self.ctxt = context.RequestContext(user_id=self.user_id, project_id=self.project_id) def test_resource_lock_create(self): lock_data = { 'resource_id': uuidutils.generate_uuid(), 'resource_type': 'share', 'resource_action': 'delete', 'lock_context': 'user', 'user_id': self.user_id, 'project_id': self.project_id, 'lock_reason': 'xyzzyspoon!', } lock = db_api.resource_lock_create(self.ctxt, lock_data) self.assertTrue(uuidutils.is_uuid_like(lock['id'])) self.assertEqual(lock_data['user_id'], lock['user_id']) self.assertEqual(lock_data['project_id'], lock['project_id']) self.assertIsNone(lock['updated_at']) self.assertEqual('False', lock['deleted']) def test_resource_lock_update_invalid(self): self.assertRaises(exception.ResourceLockNotFound, db_api.resource_lock_update, self.ctxt, 'invalid-lock-id', {'lock_reason': 'yadayada'}) def test_resource_lock_update(self): lock = db_utils.create_lock(project_id=self.project_id) updated_lock = db_api.resource_lock_update( self.ctxt, lock['id'], {'lock_reason': 'new reason'}, ) self.assertEqual(lock['id'], updated_lock['id']) self.assertEqual('new reason', updated_lock['lock_reason']) self.assertEqual(lock['user_id'], updated_lock['user_id']) self.assertEqual(lock['project_id'], updated_lock['project_id']) lock_get = db_api.resource_lock_get(self.ctxt, lock['id']) self.assertEqual(lock['id'], lock_get['id']) self.assertEqual('new reason', lock_get['lock_reason']) self.assertEqual(lock['user_id'], lock_get['user_id']) self.assertEqual(lock['project_id'], lock_get['project_id']) def test_resource_lock_delete_invalid(self): self.assertRaises(exception.ResourceLockNotFound, db_api.resource_lock_delete, self.ctxt, 'invalid-lock-id') def test_resource_lock_delete(self): lock = db_utils.create_lock(project_id=self.project_id) lock_get = db_api.resource_lock_get(self.ctxt, lock['id']) return_value = db_api.resource_lock_delete(self.ctxt, lock['id']) self.assertIsNone(return_value) with db_api.context_manager.reader.using(self.ctxt): self.assertRaises(exception.ResourceLockNotFound, db_api._resource_lock_get, self.ctxt, lock_get['id']) def test_resource_lock_get_invalid(self): self.assertRaises(exception.ResourceLockNotFound, db_api.resource_lock_get, self.ctxt, 'invalid-lock-id') def test_resource_lock_get(self): lock = db_utils.create_lock(project_id=self.project_id) lock_get = db_api.resource_lock_get(self.ctxt, lock['id']) self.assertEqual(lock['id'], lock_get['id']) self.assertEqual('for the tests', lock_get['lock_reason']) self.assertEqual(lock['user_id'], lock_get['user_id']) self.assertEqual(lock['project_id'], lock_get['project_id']) def test_resource_lock_get_all_basic_filters(self): user_id_2 = uuidutils.generate_uuid(dashed=False) project_id_2 = uuidutils.generate_uuid(dashed=False) lk_1 = db_utils.create_lock(lock_reason='austin', user_id=self.user_id, project_id=self.project_id) lk_2 = db_utils.create_lock(lock_reason='bexar', user_id=self.user_id, project_id=self.project_id) lk_3 = db_utils.create_lock(lock_reason='cactus', user_id=self.user_id, project_id=self.project_id) lk_4 = db_utils.create_lock(lock_reason='diablo', user_id=user_id_2, project_id=project_id_2) lk_5 = db_utils.create_lock(lock_reason='essex') project_locks_limited_offset, count = db_api.resource_lock_get_all( self.ctxt, limit=2, offset=1, show_count=True) self.assertEqual(2, len(project_locks_limited_offset)) self.assertEqual(3, count) order_expected = [lk_2['id'], lk_1['id']] self.assertEqual(order_expected, [lock['id'] for lock in project_locks_limited_offset]) all_project_locks, count = db_api.resource_lock_get_all( self.ctxt, filters={'all_projects': True}, sort_dir='asc') self.assertEqual(5, len(all_project_locks)) order_expected = [ lk_1['id'], lk_2['id'], lk_3['id'], lk_4['id'], lk_5['id'] ] self.assertEqual(order_expected, [lock['id'] for lock in all_project_locks]) self.assertNotIn(lk_5['project_id'], [self.project_id, project_id_2]) self.assertIsNone(count) filtered_locks, count = db_api.resource_lock_get_all( self.ctxt, filters={'lock_reason~': 'xar'}) self.assertEqual(1, len(filtered_locks)) self.assertIsNone(count) self.assertEqual(lk_2['id'], filtered_locks[0]['id']) def test_resource_locks_get_all_time_filters(self): now = timeutils.utcnow() lock_1 = db_utils.create_lock( lock_reason='folsom', project_id=self.project_id, created_at=now - datetime.timedelta(seconds=1), ) lock_2 = db_utils.create_lock( lock_reason='grizly', project_id=self.project_id, created_at=now + datetime.timedelta(seconds=1), ) lock_3 = db_utils.create_lock( lock_reason='havana', project_id=self.project_id, created_at=now + datetime.timedelta(seconds=2), ) filters1 = {'created_before': now} filters2 = {'created_since': now} result1, count1 = db_api.resource_lock_get_all( self.ctxt, filters=filters1) result2, count2 = db_api.resource_lock_get_all( self.ctxt, filters=filters2) self.assertEqual(1, len(result1)) self.assertEqual(lock_1['id'], result1[0]['id']) self.assertEqual(2, len(result2)) self.assertEqual([lock_3['id'], lock_2['id']], [lock['id'] for lock in result2]) self.assertIsNone(count1) self.assertIsNone(count2) filters1.update(filters2) result3, count3 = db_api.resource_lock_get_all( self.ctxt, filters=filters1, show_count=True) self.assertEqual(0, len(result3)) self.assertEqual(0, count3) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/db/sqlalchemy/test_models.py0000664000175000017500000002657700000000000023125 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Hitachi Data Systems. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Testing of SQLAlchemy model classes.""" import ddt from manila.common import constants from manila import context from manila.db.sqlalchemy import api as db_api from manila import test from manila.tests import db_utils @ddt.ddt class ShareTestCase(test.TestCase): """Testing of SQLAlchemy Share model class.""" @ddt.data(constants.STATUS_MANAGE_ERROR, constants.STATUS_CREATING, constants.STATUS_EXTENDING, constants.STATUS_DELETING, constants.STATUS_EXTENDING_ERROR, constants.STATUS_ERROR_DELETING, constants.STATUS_MANAGING, constants.STATUS_MANAGE_ERROR) def test_share_instance_available(self, status): instance_list = [ db_utils.create_share_instance(status=constants.STATUS_AVAILABLE, share_id='fake_id'), db_utils.create_share_instance(status=status, share_id='fake_id') ] share1 = db_utils.create_share(instances=instance_list) share2 = db_utils.create_share(instances=list(reversed(instance_list))) self.assertEqual(constants.STATUS_AVAILABLE, share1.instance['status']) self.assertEqual(constants.STATUS_AVAILABLE, share2.instance['status']) @ddt.data([constants.STATUS_MANAGE_ERROR, constants.STATUS_CREATING], [constants.STATUS_ERROR_DELETING, constants.STATUS_DELETING], [constants.STATUS_ERROR, constants.STATUS_MANAGING], [constants.STATUS_UNMANAGE_ERROR, constants.STATUS_UNMANAGING], [constants.STATUS_INACTIVE, constants.STATUS_EXTENDING], [constants.STATUS_SHRINKING_ERROR, constants.STATUS_SHRINKING]) @ddt.unpack def test_share_instance_not_transitional(self, status, trans_status): instance_list = [ db_utils.create_share_instance(status=status, share_id='fake_id'), db_utils.create_share_instance(status=trans_status, share_id='fake_id') ] share1 = db_utils.create_share(instances=instance_list) share2 = db_utils.create_share(instances=list(reversed(instance_list))) self.assertEqual(status, share1.instance['status']) self.assertEqual(status, share2.instance['status']) def test_share_instance_creating(self): share = db_utils.create_share(status=constants.STATUS_CREATING) self.assertEqual(constants.STATUS_CREATING, share.instance['status']) @ddt.data(constants.STATUS_REPLICATION_CHANGE, constants.STATUS_AVAILABLE, constants.STATUS_ERROR, constants.STATUS_CREATING) def test_share_instance_reverting(self, status): instance_list = [ db_utils.create_share_instance( status=constants.STATUS_REVERTING, share_id='fake_id'), db_utils.create_share_instance( status=status, share_id='fake_id'), db_utils.create_share_instance( status=constants.STATUS_ERROR_DELETING, share_id='fake_id'), ] share1 = db_utils.create_share(instances=instance_list) share2 = db_utils.create_share(instances=list(reversed(instance_list))) self.assertEqual( constants.STATUS_REVERTING, share1.instance['status']) self.assertEqual( constants.STATUS_REVERTING, share2.instance['status']) @ddt.data(constants.STATUS_AVAILABLE, constants.STATUS_ERROR, constants.STATUS_CREATING) def test_share_instance_replication_change(self, status): instance_list = [ db_utils.create_share_instance( status=constants.STATUS_REPLICATION_CHANGE, share_id='fake_id'), db_utils.create_share_instance( status=status, share_id='fake_id'), db_utils.create_share_instance( status=constants.STATUS_ERROR_DELETING, share_id='fake_id') ] share1 = db_utils.create_share(instances=instance_list) share2 = db_utils.create_share(instances=list(reversed(instance_list))) self.assertEqual( constants.STATUS_REPLICATION_CHANGE, share1.instance['status']) self.assertEqual( constants.STATUS_REPLICATION_CHANGE, share2.instance['status']) def test_share_instance_prefer_active_instance(self): instance_list = [ db_utils.create_share_instance( status=constants.STATUS_AVAILABLE, share_id='fake_id', replica_state=constants.REPLICA_STATE_IN_SYNC), db_utils.create_share_instance( status=constants.STATUS_CREATING, share_id='fake_id', replica_state=constants.REPLICA_STATE_OUT_OF_SYNC), db_utils.create_share_instance( status=constants.STATUS_ERROR, share_id='fake_id', replica_state=constants.REPLICA_STATE_ACTIVE), db_utils.create_share_instance( status=constants.STATUS_MANAGING, share_id='fake_id', replica_state=constants.REPLICA_STATE_ACTIVE), ] share1 = db_utils.create_share(instances=instance_list) share2 = db_utils.create_share(instances=list(reversed(instance_list))) self.assertEqual( constants.STATUS_ERROR, share1.instance['status']) self.assertEqual( constants.STATUS_ERROR, share2.instance['status']) def test_access_rules_status_no_instances(self): share = db_utils.create_share(instances=[]) self.assertEqual(constants.STATUS_ACTIVE, share.access_rules_status) @ddt.data(constants.STATUS_ACTIVE, constants.SHARE_INSTANCE_RULES_SYNCING, constants.SHARE_INSTANCE_RULES_ERROR) def test_access_rules_status(self, access_status): instances = [ db_utils.create_share_instance( share_id='fake_id', status=constants.STATUS_ERROR, access_rules_status=constants.STATUS_ACTIVE), db_utils.create_share_instance( share_id='fake_id', status=constants.STATUS_AVAILABLE, access_rules_status=constants.STATUS_ACTIVE), db_utils.create_share_instance( share_id='fake_id', status=constants.STATUS_AVAILABLE, access_rules_status=access_status), ] share = db_utils.create_share(instances=instances) self.assertEqual(access_status, share.access_rules_status) @ddt.ddt class ShareAccessTestCase(test.TestCase): """Testing of SQLAlchemy Share Access related model classes.""" @ddt.data(constants.ACCESS_STATE_QUEUED_TO_APPLY, constants.ACCESS_STATE_ACTIVE, constants.ACCESS_STATE_ERROR, constants.ACCESS_STATE_APPLYING) def test_share_access_mapping_state(self, expected_status): ctxt = context.get_admin_context() share = db_utils.create_share() share_instances = [ share.instance, db_utils.create_share_instance(share_id=share['id']), db_utils.create_share_instance(share_id=share['id']), db_utils.create_share_instance(share_id=share['id']), ] access_rule = db_utils.create_access(share_id=share['id']) # Update the access mapping states db_api.share_instance_access_update( ctxt, access_rule['id'], share_instances[0]['id'], {'state': constants.ACCESS_STATE_ACTIVE}) db_api.share_instance_access_update( ctxt, access_rule['id'], share_instances[1]['id'], {'state': expected_status}) db_api.share_instance_access_update( ctxt, access_rule['id'], share_instances[2]['id'], {'state': constants.ACCESS_STATE_ACTIVE}) db_api.share_instance_access_update( ctxt, access_rule['id'], share_instances[3]['id'], {'deleted': 'True', 'state': constants.STATUS_DELETED}) access_rule = db_api.share_access_get(ctxt, access_rule['id']) self.assertEqual(expected_status, access_rule['state']) class ShareSnapshotTestCase(test.TestCase): """Testing of SQLAlchemy ShareSnapshot model class.""" def test_instance_and_proxified_properties(self): in_sync_replica_instance = db_utils.create_share_instance( status=constants.STATUS_AVAILABLE, share_id='fake_id', replica_state=constants.REPLICA_STATE_IN_SYNC) active_replica_instance = db_utils.create_share_instance( status=constants.STATUS_AVAILABLE, share_id='fake_id', replica_state=constants.REPLICA_STATE_ACTIVE) out_of_sync_replica_instance = db_utils.create_share_instance( status=constants.STATUS_ERROR, share_id='fake_id', replica_state=constants.REPLICA_STATE_OUT_OF_SYNC) non_replica_instance = db_utils.create_share_instance( status=constants.STATUS_CREATING, share_id='fake_id') share_instances = [ in_sync_replica_instance, active_replica_instance, out_of_sync_replica_instance, non_replica_instance, ] share = db_utils.create_share(instances=share_instances) snapshot_instance_list = [ db_utils.create_snapshot_instance( 'fake_snapshot_id', status=constants.STATUS_CREATING, share_instance_id=out_of_sync_replica_instance['id']), db_utils.create_snapshot_instance( 'fake_snapshot_id', status=constants.STATUS_ERROR, share_instance_id=in_sync_replica_instance['id']), db_utils.create_snapshot_instance( 'fake_snapshot_id', status=constants.STATUS_AVAILABLE, provider_location='hogsmeade:snapshot1', progress='87%', share_instance_id=active_replica_instance['id']), db_utils.create_snapshot_instance( 'fake_snapshot_id', status=constants.STATUS_MANAGING, share_instance_id=non_replica_instance['id']), ] snapshot = db_utils.create_snapshot( id='fake_snapshot_id', share_id=share['id'], instances=snapshot_instance_list) # Proxified properties self.assertEqual(constants.STATUS_AVAILABLE, snapshot['status']) self.assertEqual(constants.STATUS_ERROR, snapshot['aggregate_status']) self.assertEqual('hogsmeade:snapshot1', snapshot['provider_location']) self.assertEqual('87%', snapshot['progress']) # Snapshot properties expected_share_name = '-'.join(['share', share['id']]) self.assertEqual(expected_share_name, snapshot['share_name']) self.assertEqual(active_replica_instance['id'], snapshot['instance']['share_instance_id']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/db/test_api.py0000664000175000017500000000411400000000000020230 0ustar00zuulzuul00000000000000# Copyright (c) Goutham Pacha Ravi. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Unit Tests for the interface methods in the manila/db/api.py.""" import re from manila.db import api as db_interface from manila.db.sqlalchemy import api as db_api from manila import test class DBInterfaceTestCase(test.TestCase): """Test cases for the DB Interface methods.""" def test_interface_methods(self): """Ensure that implementation methods match interfaces. manila/db/api module is merely shim layer between the database implementation and the other methods using these implementations. Bugs are introduced when the shims go out of sync with the actual implementation. So this test ensures that method names and signatures match between the interface and the implementation. """ members = dir(db_interface) # Ignore private methods for the file and any other members that # need not match. ignore_members = re.compile(r'^_|CONF|IMPL') interfaces = [i for i in members if not ignore_members.match(i)] for interface in interfaces: method = getattr(db_interface, interface) if callable(method): mock_method_call = self.mock_object(db_api, interface) # kwargs always specify defaults, ignore them in the signature. args = filter( lambda x: x != 'kwargs', method.__code__.co_varnames) method(*args) self.assertTrue(mock_method_call.called) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/db/test_migration.py0000664000175000017500000000546300000000000021460 0ustar00zuulzuul00000000000000# Copyright 2014 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import alembic from manila.db import migration from manila import test class MigrationTestCase(test.TestCase): def setUp(self): super(MigrationTestCase, self).setUp() self.config_patcher = mock.patch( 'manila.db.migrations.alembic.migration._alembic_config') self.config = self.config_patcher.start() self.config.return_value = 'fake_config' self.addCleanup(self.config_patcher.stop) @mock.patch('alembic.command.upgrade') def test_upgrade(self, upgrade): migration.upgrade('version_1') upgrade.assert_called_once_with('fake_config', 'version_1') @mock.patch('alembic.command.upgrade') def test_upgrade_none_version(self, upgrade): migration.upgrade(None) upgrade.assert_called_once_with('fake_config', 'head') @mock.patch('alembic.command.downgrade') def test_downgrade(self, downgrade): migration.downgrade('version_1') downgrade.assert_called_once_with('fake_config', 'version_1') @mock.patch('alembic.command.downgrade') def test_downgrade_none_version(self, downgrade): migration.downgrade(None) downgrade.assert_called_once_with('fake_config', 'base') @mock.patch('alembic.command.stamp') def test_stamp(self, stamp): migration.stamp('version_1') stamp.assert_called_once_with('fake_config', 'version_1') @mock.patch('alembic.command.stamp') def test_stamp_none_version(self, stamp): migration.stamp(None) stamp.assert_called_once_with('fake_config', 'head') @mock.patch('alembic.command.revision') def test_revision(self, revision): migration.revision('test_message', 'autogenerate_value') revision.assert_called_once_with('fake_config', 'test_message', 'autogenerate_value') @mock.patch.object(alembic.migration.MigrationContext, 'configure', mock.Mock()) def test_version(self): context = mock.Mock() context.get_current_revision = mock.Mock() alembic.migration.MigrationContext.configure.return_value = context migration.version() context.get_current_revision.assert_called_once_with() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/db_utils.py0000664000175000017500000002467400000000000017655 0ustar00zuulzuul00000000000000# Copyright 2015 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from oslo_utils import uuidutils from manila.common import constants from manila import context from manila import db from manila.message import message_levels def _create_db_row(method, default_values, custom_values): override_defaults = custom_values.pop('override_defaults', None) if override_defaults: default_values = custom_values else: default_values.update(copy.deepcopy(custom_values)) return method(context.get_admin_context(), default_values) def create_share_group(**kwargs): """Create a share group object.""" share_group = { 'share_network_id': None, 'share_server_id': None, 'user_id': 'fake', 'project_id': 'fake', 'status': constants.STATUS_CREATING, 'host': 'fake_host' } return _create_db_row(db.share_group_create, share_group, kwargs) def create_share_group_snapshot(share_group_id, **kwargs): """Create a share group snapshot object.""" snapshot = { 'share_group_id': share_group_id, 'user_id': 'fake', 'project_id': 'fake', 'status': constants.STATUS_CREATING, } return _create_db_row(db.share_group_snapshot_create, snapshot, kwargs) def create_share_group_snapshot_member(share_group_snapshot_id, **kwargs): """Create a share group snapshot member object.""" member = { 'share_proto': "NFS", 'size': 0, 'share_instance_id': None, 'user_id': 'fake', 'project_id': 'fake', 'status': 'creating', 'share_group_snapshot_id': share_group_snapshot_id, } return _create_db_row( db.share_group_snapshot_member_create, member, kwargs) def create_share_access(**kwargs): share_access = { 'id': 'fake_id', 'access_type': 'ip', 'access_to': 'fake_ip_address' } return _create_db_row(db.share_access_create, share_access, kwargs) def create_share(**kwargs): """Create a share object.""" share = { 'share_proto': "NFS", 'size': 0, 'snapshot_id': None, 'share_network_id': None, 'share_server_id': None, 'user_id': 'fake', 'project_id': 'fake', 'metadata': {}, 'availability_zone': 'fake_availability_zone', 'status': constants.STATUS_CREATING, 'host': 'fake_host', 'is_soft_deleted': False, 'mount_point_name': 'fake_mp', } return _create_db_row(db.share_create, share, kwargs) def create_share_without_instance(**kwargs): share = { 'share_proto': "NFS", 'size': 0, 'snapshot_id': None, 'share_network_id': None, 'share_server_id': None, 'user_id': 'fake', 'project_id': 'fake', 'metadata': {}, 'availability_zone': 'fake_availability_zone', 'status': constants.STATUS_CREATING, 'host': 'fake_host', 'is_soft_deleted': False, 'mount_point_name': None, } share.update(copy.deepcopy(kwargs)) return db.share_create(context.get_admin_context(), share, False) def create_share_instance(**kwargs): """Create a share instance object.""" return db.share_instance_create(context.get_admin_context(), kwargs.pop('share_id'), kwargs) def create_share_replica(**kwargs): """Create a share replica object.""" if 'share_id' not in kwargs: share = create_share() kwargs['share_id'] = share['id'] return db.share_instance_create(context.get_admin_context(), kwargs.pop('share_id'), kwargs) def create_snapshot(**kwargs): """Create a snapshot object.""" with_share = kwargs.pop('with_share', False) share = None if with_share: share = create_share(status=constants.STATUS_AVAILABLE, size=kwargs.get('size', 0)) snapshot = { 'share_proto': "NFS", 'size': 0, 'share_id': share['id'] if with_share else None, 'user_id': 'fake', 'project_id': 'fake', 'status': 'creating', 'provider_location': 'fake', } snapshot.update(kwargs) return db.share_snapshot_create( context.get_admin_context(), snapshot, create_snapshot_instance='instances' not in snapshot, ) def create_snapshot_instance(snapshot_id, **kwargs): """Create a share snapshot instance object.""" snapshot_instance = { 'provider_location': 'fake_provider_location', 'progress': '0%', 'status': constants.STATUS_CREATING, } snapshot_instance.update(kwargs) return db.share_snapshot_instance_create( context.get_admin_context(), snapshot_id, snapshot_instance) def create_snapshot_instance_export_locations(snapshot_id, **kwargs): """Create a snapshot instance export location object.""" export_location = { 'share_snapshot_instance_id': snapshot_id, } export_location.update(kwargs) return db.share_snapshot_instance_export_location_create( context.get_admin_context(), export_location) def create_access(**kwargs): """Create an access rule object.""" state = kwargs.pop('state', constants.ACCESS_STATE_QUEUED_TO_APPLY) access = { 'access_type': 'fake_type', 'access_to': 'fake_IP', 'share_id': kwargs.pop('share_id', None) or create_share()['id'], } access.update(kwargs) share_access_rule = _create_db_row(db.share_access_create, access, kwargs) for mapping in share_access_rule.instance_mappings: db.share_instance_access_update( context.get_admin_context(), share_access_rule['id'], mapping.share_instance_id, {'state': state}) return share_access_rule def create_snapshot_access(**kwargs): """Create a snapshot access rule object.""" access = { 'access_type': 'fake_type', 'access_to': 'fake_IP', 'share_snapshot_id': None, } return _create_db_row(db.share_snapshot_access_create, access, kwargs) def create_share_server(**kwargs): """Create a share server object.""" backend_details = kwargs.pop('backend_details', {}) srv = { 'host': 'host1', 'status': constants.STATUS_ACTIVE } share_srv = _create_db_row(db.share_server_create, srv, kwargs) if backend_details: db.share_server_backend_details_set( context.get_admin_context(), share_srv['id'], backend_details) return db.share_server_get(context.get_admin_context(), share_srv['id']) def create_share_type(**kwargs): """Create a share type object""" share_type = { 'name': 'fake_type', 'is_public': True, } return _create_db_row(db.share_type_create, share_type, kwargs) def create_share_group_type(**kwargs): """Create a share group type object""" share_group_type = { 'name': 'fake_group_type', 'is_public': True, } return _create_db_row(db.share_group_type_create, share_group_type, kwargs) def create_share_network(**kwargs): """Create a share network object.""" net = { 'user_id': 'fake', 'project_id': 'fake', 'status': 'active', 'name': 'whatever', 'description': 'fake description', } return _create_db_row(db.share_network_create, net, kwargs) def create_share_network_subnet(**kwargs): """Create a share network subnet object.""" subnet = { 'id': 'fake_sns_id', 'neutron_net_id': 'fake-neutron-net', 'neutron_subnet_id': 'fake-neutron-subnet', 'network_type': 'vlan', 'segmentation_id': 1000, 'cidr': '10.0.0.0/24', 'ip_version': 4, 'availability_zone_id': 'fake_zone_id', 'share_network_id': 'fake_sn_id', 'gateway': None, 'mtu': None } return _create_db_row(db.share_network_subnet_create, subnet, kwargs) def create_security_service(**kwargs): share_network_id = kwargs.pop('share_network_id', None) service = { 'type': "FAKE", 'project_id': 'fake-project-id', } service_ref = _create_db_row(db.security_service_create, service, kwargs) if share_network_id: db.share_network_add_security_service(context.get_admin_context(), share_network_id, service_ref['id']) return service_ref def create_message(**kwargs): message_dict = { 'action': 'fake_Action', 'project_id': 'fake-project-id', 'message_level': message_levels.ERROR, } return _create_db_row(db.message_create, message_dict, kwargs) def create_transfer(**kwargs): transfer = {'display_name': 'display_name', 'salt': 'salt', 'crypt_hash': 'crypt_hash', 'resource_type': constants.SHARE_RESOURCE_TYPE} return _create_db_row(db.transfer_create, transfer, kwargs) def create_backup(share_id, **kwargs): """Create a share backup object.""" backup = { 'host': "fake_host", 'share_network_id': None, 'share_server_id': None, 'user_id': 'fake', 'project_id': 'fake', 'availability_zone': 'fake_availability_zone', 'status': constants.STATUS_CREATING, 'topic': 'fake_topic', 'description': 'fake_description', 'size': '1', } backup.update(kwargs) return db.share_backup_create( context.get_admin_context(), share_id, backup) def create_lock(**kwargs): lock = { 'resource_id': uuidutils.generate_uuid(), 'user_id': uuidutils.generate_uuid(dashed=False), 'project_id': uuidutils.generate_uuid(dashed=False), 'lock_context': 'user', 'lock_reason': 'for the tests', 'resource_type': 'share', 'resource_action': 'delete', } return _create_db_row(db.resource_lock_create, lock, kwargs) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/declare_conf.py0000664000175000017500000000152000000000000020435 0ustar00zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg CONF = cfg.CONF CONF.register_opt(cfg.IntOpt('answer', default=42, help='test conf')) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/fake_backup_driver.py0000664000175000017500000000324100000000000021641 0ustar00zuulzuul00000000000000# Copyright 2023 Cloudification GmbH. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from manila.data import backup_driver class FakeBackupDriver(backup_driver.BackupDriver): """Fake Backup driver.""" def __init__(self, *args, **kwargs): super(FakeBackupDriver, self).__init__(*args, **kwargs) pass def backup(self, context, backup, share): """Start a backup of a specified share.""" pass def restore(self, context, backup, share): """Restore a saved backup.""" pass def delete(self, context, backup): """Delete a saved backup.""" pass def get_backup_progress(self, context, backup, share): """Fetch the progress of a in progress backup""" return def get_restore_progress(self, context, backup, share): """Fetch the progress of a in progress restore""" return def get_backup_info(self, backup): """Get backup capabilities information of driver.""" backup_info = { 'mount': 'mount -vt fake_proto /fake-export %(path)s', 'unmount': 'umount -v %(path)s', } return backup_info ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/fake_client_exception_class.py0000664000175000017500000000140600000000000023543 0ustar00zuulzuul00000000000000# Copyright 2016 SAP SE # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. class Unauthorized(Exception): status_code = 401 message = "Unauthorized: bad credentials." def __init__(self, message=None): pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/fake_compute.py0000664000175000017500000000445500000000000020505 0ustar00zuulzuul00000000000000# Copyright 2013 OpenStack Foundation # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg CONF = cfg.CONF class FakeServer(object): def __init__(self, **kwargs): self.id = kwargs.pop('id', 'fake_id') self.status = kwargs.pop('status', 'ACTIVE') self.networks = kwargs.pop('networks', {'fake_net': 'fake_net_value'}) for key, value in kwargs.items(): setattr(self, key, value) def __getitem__(self, attr): return getattr(self, attr) def __setitem__(self, attr, value): setattr(self, attr, value) def get(self, attr, default): return getattr(self, attr, default) def update(self, *args, **kwargs): pass class FakeKeypair(object): def __init__(self, **kwargs): self.id = kwargs.pop('id', 'fake_keypair_id') self.name = None for key, value in kwargs.items(): setattr(self, key, value) class API(object): """Fake Compute API.""" def instance_volume_attach(self, ctx, server_id, volume_id, mount_path): pass def instance_volume_detach(self, ctx, server_id, volume_id): pass def instance_volumes_list(self, ctx, server_id): pass def server_create(self, *args, **kwargs): pass def server_delete(self, *args, **kwargs): pass def server_get(self, *args, **kwargs): pass def server_get_by_name_or_id(self, *args, **kwargs): pass def server_reboot(self, *args, **kwargs): pass def keypair_list(self, *args, **kwargs): pass def keypair_import(self, *args, **kwargs): pass def keypair_delete(self, *args, **kwargs): pass def add_security_group_to_server(self, *args, **kwargs): pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/fake_driver.py0000664000175000017500000001005500000000000020315 0ustar00zuulzuul00000000000000# Copyright 2012 OpenStack Foundation # Copyright 2014 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log from manila.common import constants from manila.share import driver from manila.tests import fake_service_instance LOG = log.getLogger(__name__) class FakeShareDriver(driver.ShareDriver): """Fake share driver. This fake driver can be also used as a test driver within a real running manila-share instance. To activate it use this in manila.conf:: enabled_share_backends = fake [fake] driver_handles_share_servers = True share_backend_name = fake share_driver = manila.tests.fake_driver.FakeShareDriver With it you basically mocked all backend driver calls but e.g. networking will still be activated. """ def __init__(self, *args, **kwargs): self._setup_service_instance_manager() super(FakeShareDriver, self).__init__([True, False], *args, **kwargs) def _setup_service_instance_manager(self): self.service_instance_manager = ( fake_service_instance.FakeServiceInstanceManager()) def manage_existing(self, share, driver_options, share_server=None): LOG.debug("Fake share driver: manage") LOG.debug("Fake share driver: driver options: %s", str(driver_options)) return {'size': 1} def unmanage(self, share, share_server=None): LOG.debug("Fake share driver: unmanage") @property def driver_handles_share_servers(self): if not isinstance(self.configuration.safe_get( 'driver_handles_share_servers'), bool): return True return self.configuration.driver_handles_share_servers def create_snapshot(self, context, snapshot, share_server=None): pass def delete_snapshot(self, context, snapshot, share_server=None): pass def create_share(self, context, share, share_server=None): return ['/fake/path', '/fake/path2'] def create_share_from_snapshot(self, context, share, snapshot, share_server=None, parent_share=None): return { 'export_locations': ['/fake/path', '/fake/path2'], 'status': constants.STATUS_AVAILABLE, } def delete_share(self, context, share, share_server=None): pass def ensure_share(self, context, share, share_server=None): pass def allow_access(self, context, share, access, share_server=None): pass def deny_access(self, context, share, access, share_server=None): pass def get_share_stats(self, refresh=False): return None def do_setup(self, context): pass def setup_server(self, *args, **kwargs): pass def teardown_server(self, *args, **kwargs): pass def get_network_allocations_number(self): # NOTE(vponomaryov): Simulate drivers that use share servers and # do not use 'service_instance' module. return 2 def _verify_share_server_handling(self, driver_handles_share_servers): return super(FakeShareDriver, self)._verify_share_server_handling( driver_handles_share_servers) def create_share_group(self, context, group_id, share_server=None): pass def delete_share_group(self, context, group_id, share_server=None): pass def get_share_status(self, share, share_server=None): return { 'export_locations': ['/fake/path', '/fake/path2'], 'status': constants.STATUS_AVAILABLE, } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/fake_image.py0000664000175000017500000000155200000000000020106 0ustar00zuulzuul00000000000000# Copyright 2013 OpenStack Foundation # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg CONF = cfg.CONF class FakeImage(object): def __init__(self, **kwargs): self.id = kwargs.pop('id', 'fake_image_id') for key, value in kwargs.items(): setattr(self, key, value) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/fake_network.py0000664000175000017500000001447500000000000020525 0ustar00zuulzuul00000000000000# Copyright 2013 OpenStack Foundation # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_utils import uuidutils CONF = cfg.CONF class FakeNetwork(object): def __init__(self, **kwargs): self.id = kwargs.pop('id', 'fake_net_id') self.name = kwargs.pop('name', 'net_name') self.subnets = kwargs.pop('subnets', []) for key, value in kwargs.items(): setattr(self, key, value) def __getitem__(self, attr): return getattr(self, attr) class FakeSubnet(object): def __init__(self, **kwargs): self.id = kwargs.pop('id', 'fake_subnet_id') self.network_id = kwargs.pop('network_id', 'fake_net_id') self.cidr = kwargs.pop('cidr', 'fake_cidr') for key, value in kwargs.items(): setattr(self, key, value) def __getitem__(self, attr): return getattr(self, attr) class FakePort(object): def __init__(self, **kwargs): self.id = kwargs.pop('id', 'fake_subnet_id') self.network_id = kwargs.pop('network_id', 'fake_net_id') self.fixed_ips = kwargs.pop('fixed_ips', []) for key, value in kwargs.items(): setattr(self, key, value) def __getitem__(self, attr): return getattr(self, attr) class FakeRouter(object): def __init__(self, **kwargs): self.id = kwargs.pop('id', 'fake_router_id') self.name = kwargs.pop('name', 'fake_router_name') for key, value in kwargs.items(): setattr(self, key, value) def __getitem__(self, attr): return getattr(self, attr) def __setitem__(self, attr, value): setattr(self, attr, value) class FakeDeviceAddr(object): def __init__(self, list_of_addresses=None): self.addresses = list_of_addresses or [ dict(ip_version=4, cidr='1.0.0.0/27'), dict(ip_version=4, cidr='2.0.0.0/27'), dict(ip_version=6, cidr='3.0.0.0/27'), ] def list(self): return self.addresses class FakeDevice(object): def __init__(self, name=None, list_of_addresses=None): self.addr = FakeDeviceAddr(list_of_addresses) self.name = name or 'fake_device_name' class API(object): """Fake Network API.""" admin_project_id = 'fake_admin_project_id' network = { "status": "ACTIVE", "subnets": ["fake_subnet_id"], "name": "fake_network", "tenant_id": "fake_tenant_id", "shared": False, "id": "fake_id", "router:external": False, } port = { "status": "ACTIVE", "allowed_address_pairs": [], "admin_state_up": True, "network_id": "fake_network_id", "tenant_id": "fake_tenant_id", "extra_dhcp_opts": [], "device_owner": "fake", "binding:capabilities": {"port_filter": True}, "mac_address": "00:00:00:00:00:00", "fixed_ips": [ {"subnet_id": "56537094-98d7-430a-b513-81c4dc6d9903", "ip_address": "10.12.12.10"} ], "id": "fake_port_id", "security_groups": ["fake_sec_group_id"], "device_id": "fake_device_id" } def get_all_admin_project_networks(self): net1 = self.network.copy() net1['tenant_id'] = self.admin_project_id net1['id'] = uuidutils.generate_uuid() net2 = self.network.copy() net2['tenant_id'] = self.admin_project_id net2['id'] = uuidutils.generate_uuid() return [net1, net2] def create_port(self, tenant_id, network_id, subnet_id=None, fixed_ip=None, device_owner=None, device_id=None): port = self.port.copy() port['network_id'] = network_id port['admin_state_up'] = True port['tenant_id'] = tenant_id if fixed_ip: fixed_ip_dict = {'ip_address': fixed_ip} if subnet_id: fixed_ip_dict.update({'subnet_id': subnet_id}) port['fixed_ips'] = [fixed_ip_dict] if device_owner: port['device_owner'] = device_owner if device_id: port['device_id'] = device_id return port def list_ports(self, **search_opts): """List ports for the client based on search options.""" ports = [] for i in range(2): ports.append(self.port.copy()) for port in ports: port['id'] = uuidutils.generate_uuid() for key, val in search_opts.items(): port[key] = val if 'id' in search_opts: return ports return ports def show_port(self, port_id): """Return the port for the client given the port id.""" port = self.port.copy() port['id'] = port_id return port def delete_port(self, port_id): pass def get_subnet(self, subnet_id): pass def subnet_create(self, *args, **kwargs): pass def router_add_interface(self, *args, **kwargs): pass def show_router(self, *args, **kwargs): pass def update_port_fixed_ips(self, *args, **kwargs): pass def router_remove_interface(self, *args, **kwargs): pass def update_subnet(self, *args, **kwargs): pass def get_all_networks(self): """Get all networks for client.""" net1 = self.network.copy() net2 = self.network.copy() net1['id'] = uuidutils.generate_uuid() net2['id'] = uuidutils.generate_uuid() return [net1, net2] def get_network(self, network_uuid): """Get specific network for client.""" network = self.network.copy() network['id'] = network_uuid return network def network_create(self, tenant_id, name): network = self.network.copy() network['tenant_id'] = tenant_id network['name'] = name return network ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/fake_notifier.py0000664000175000017500000000471000000000000020642 0ustar00zuulzuul00000000000000# Copyright 2014 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import functools import oslo_messaging as messaging from oslo_serialization import jsonutils from manila import rpc NOTIFICATIONS = [] def reset(): del NOTIFICATIONS[:] FakeMessage = collections.namedtuple( 'Message', ['publisher_id', 'priority', 'event_type', 'payload'], ) class FakeNotifier(object): def __init__(self, transport, publisher_id=None, serializer=None): self.transport = transport self.publisher_id = publisher_id for priority in ['debug', 'info', 'warn', 'error', 'critical']: setattr(self, priority, functools.partial(self._notify, priority.upper())) self._serializer = serializer or messaging.serializer.NoOpSerializer() def prepare(self, publisher_id=None): if publisher_id is None: publisher_id = self.publisher_id return self.__class__(self.transport, publisher_id, self._serializer) def _notify(self, priority, ctxt, event_type, payload): payload = self._serializer.serialize_entity(ctxt, payload) # NOTE(sileht): simulate the kombu serializer # this permit to raise an exception if something have not # been serialized correctly jsonutils.to_primitive(payload) msg = dict(publisher_id=self.publisher_id, priority=priority, event_type=event_type, payload=payload) NOTIFICATIONS.append(msg) def stub_notifier(testcase): testcase.mock_object(messaging, 'Notifier', FakeNotifier) if rpc.NOTIFIER: serializer = getattr(rpc.NOTIFIER, '_serializer', None) testcase.mock_object(rpc, 'NOTIFIER', FakeNotifier(rpc.NOTIFIER.transport, rpc.NOTIFIER.publisher_id, serializer=serializer)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/fake_service_instance.py0000664000175000017500000000361600000000000022353 0ustar00zuulzuul00000000000000# Copyright 2014 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from manila.tests import fake_compute class FakeServiceInstanceManager(object): def __init__(self, *args, **kwargs): self.db = mock.Mock() self._helpers = { 'CIFS': mock.Mock(), 'NFS': mock.Mock(), } self.share_networks_locks = {} self.share_networks_servers = {} self.fake_server = fake_compute.FakeServer() self.service_instance_name_template = 'manila_fake_service_instance-%s' self._network_helper = None def get_service_instance(self, context, share_network_id, create=True): return self.fake_server @property def network_helper(self): return self._get_network_helper() def _get_network_helper(self): self._network_helper = FakeNeutronNetworkHelper() return self._network_helper def _create_service_instance(self, context, instance_name, share_network_id, old_server_ip): return self.fake_server def _delete_server(self, context, server): pass def _get_service_instance_name(self, share_network_id): return self.service_instance_name_template % share_network_id class FakeNeutronNetworkHelper(object): def setup_connectivity_with_service_instances(self): pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/fake_share.py0000664000175000017500000002601700000000000020131 0ustar00zuulzuul00000000000000# Copyright 2013 OpenStack Foundation # Copyright 2015 Intel, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime from manila.api.openstack import api_version_request as api_version from manila.common import constants from manila.db.sqlalchemy import models from manila.tests.db import fakes as db_fakes from oslo_utils import uuidutils def fake_share(**kwargs): share = { 'id': 'fakeid', 'name': 'fakename', 'size': 1, 'share_proto': 'fake_proto', 'share_network_id': 'fake share network id', 'share_server_id': 'fake share server id', 'export_location': 'fake_location:/fake_share', 'project_id': 'fake_project_uuid', 'availability_zone': 'fake_az', 'snapshot_support': 'True', 'replication_type': None, 'is_busy': False, 'share_group_id': None, 'instance': { 'id': 'fake_share_instance_id', 'host': 'fakehost', 'share_type_id': '1', 'share_network_id': 'fake share network id', }, 'mount_snapshot_support': False, } share.update(kwargs) return db_fakes.FakeModel(share) def fake_share_instance(base_share=None, **kwargs): if base_share is None: share = fake_share() else: share = base_share share_instance = { 'share_id': share['id'], 'id': "fakeinstanceid", 'status': "active", 'host': 'fakehost', 'share_network_id': 'fakesharenetworkid', 'share_server_id': 'fakeshareserverid', 'share_type_id': '1', 'mount_point_name': None, } for attr in models.ShareInstance._proxified_properties: share_instance[attr] = getattr(share, attr, None) share_instance.update(kwargs) return db_fakes.FakeModel(share_instance) def fake_share_type(**kwargs): share_type = { 'id': "fakesharetype", 'name': "fakesharetypename", 'is_public': False, 'extra_specs': { 'driver_handles_share_servers': 'False', }, 'mount_point_name_support': False } extra_specs = kwargs.pop('extra_specs', {}) for key, value in extra_specs.items(): share_type['extra_specs'][key] = value share_type.update(kwargs) return db_fakes.FakeModel(share_type) def fake_snapshot(create_instance=False, **kwargs): instance_keys = ('instance_id', 'snapshot_id', 'share_instance_id', 'status', 'progress', 'provider_location') snapshot_keys = ('id', 'share_name', 'share_id', 'name', 'share_size', 'share_proto', 'instance', 'aggregate_status', 'share', 'project_id', 'size') instance_kwargs = {k: kwargs.get(k) for k in instance_keys if k in kwargs} snapshot_kwargs = {k: kwargs.get(k) for k in snapshot_keys if k in kwargs} aggregate_status = snapshot_kwargs.get( 'aggregate_status', instance_kwargs.get( 'status', constants.STATUS_CREATING)) snapshot = { 'id': 'fakesnapshotid', 'share_name': 'fakename', 'share_id': 'fakeid', 'name': 'fakesnapshotname', 'share_size': 1, 'share_proto': 'fake_proto', 'instance': {}, 'share': 'fake_share', 'aggregate_status': aggregate_status, 'project_id': 'fakeprojectid', 'size': 1, 'user_id': 'xyzzy', } snapshot.update(snapshot_kwargs) if create_instance: if 'instance_id' in instance_kwargs: instance_kwargs['id'] = instance_kwargs.pop('instance_id') snapshot['instance'] = fake_snapshot_instance( base_snapshot=snapshot, **instance_kwargs) snapshot['status'] = snapshot['instance']['status'] snapshot['provider_location'] = ( snapshot['instance']['provider_location'] ) snapshot['progress'] = snapshot['instance']['progress'] snapshot['instances'] = snapshot['instance'], else: snapshot['status'] = constants.STATUS_AVAILABLE snapshot['progress'] = '0%' snapshot['provider_location'] = 'fake' snapshot.update(instance_kwargs) return db_fakes.FakeModel(snapshot) def fake_snapshot_instance(base_snapshot=None, as_primitive=False, **kwargs): if base_snapshot is None: base_snapshot = fake_snapshot() snapshot_instance = { 'id': 'fakesnapshotinstanceid', 'snapshot_id': base_snapshot['id'], 'status': constants.STATUS_CREATING, 'progress': '0%', 'provider_location': 'i_live_here_actually', 'share_name': 'fakename', 'share_id': 'fakeshareinstanceid', 'share_instance': { 'share_id': 'fakeshareid', 'share_type_id': '1', }, 'share_instance_id': 'fakeshareinstanceid', 'deleted': False, 'updated_at': datetime.datetime(2016, 3, 21, 0, 5, 58), 'created_at': datetime.datetime(2016, 3, 21, 0, 5, 58), 'deleted_at': None, 'share': fake_share(), } snapshot_instance.update(kwargs) if as_primitive: return snapshot_instance else: return db_fakes.FakeModel(snapshot_instance) def expected_snapshot(version=None, id='fake_snapshot_id', **kwargs): api_major_version = 'v2' if version and version.startswith('2.') else 'v1' self_link = 'http://localhost/share/%s/fake/snapshots/%s' % ( api_major_version, id) bookmark_link = 'http://localhost/share/fake/snapshots/%s' % id snapshot = { 'id': id, 'share_id': 'fakeshareid', 'created_at': datetime.datetime(1, 1, 1, 1, 1, 1), 'status': 'fakesnapstatus', 'name': 'displaysnapname', 'description': 'displaysnapdesc', 'share_size': 1, 'size': 1, 'share_proto': 'fakesnapproto', 'links': [ { 'href': self_link, 'rel': 'self', }, { 'href': bookmark_link, 'rel': 'bookmark', }, ], } if version and (api_version.APIVersionRequest(version) >= api_version.APIVersionRequest('2.17')): snapshot.update({ 'user_id': 'fakesnapuser', 'project_id': 'fakesnapproject', }) if version and (api_version.APIVersionRequest(version) >= api_version.APIVersionRequest('2.73')): snapshot.update({ 'metadata': {} }) snapshot.update(kwargs) return {'snapshot': snapshot} def search_opts(**kwargs): search_opts = { 'name': 'fake_name', 'status': 'fake_status', 'share_id': 'fake_share_id', 'sort_key': 'fake_sort_key', 'sort_dir': 'fake_sort_dir', 'offset': '1', 'limit': '1', } search_opts.update(kwargs) return search_opts def fake_access(**kwargs): access = { 'id': 'fakeaccid', 'access_type': 'ip', 'access_to': '10.0.0.1', 'access_level': 'rw', 'state': 'active', } access.update(kwargs) return db_fakes.FakeModel(access) def fake_replica(id=None, as_primitive=True, for_manager=False, **kwargs): replica = { 'id': id or uuidutils.generate_uuid(), 'share_id': 'f0e4bb5e-65f0-11e5-9d70-feff819cdc9f', 'deleted': False, 'host': 'openstack@BackendZ#PoolA', 'status': 'available', 'scheduled_at': datetime.datetime(2015, 8, 10, 0, 5, 58), 'launched_at': datetime.datetime(2015, 8, 10, 0, 5, 58), 'terminated_at': None, 'replica_state': None, 'availability_zone_id': 'f6e146d0-65f0-11e5-9d70-feff819cdc9f', 'export_locations': [{'path': 'path1'}, {'path': 'path2'}], 'share_network_id': '4ccd5318-65f1-11e5-9d70-feff819cdc9f', 'share_server_id': '53099868-65f1-11e5-9d70-feff819cdc9f', 'access_rules_status': constants.SHARE_INSTANCE_RULES_SYNCING, } if for_manager: replica.update({ 'user_id': None, 'project_id': None, 'share_type_id': None, 'size': None, 'display_name': None, 'display_description': None, 'replication_type': None, 'snapshot_id': None, 'share_proto': None, 'is_public': None, 'share_group_id': None, 'source_share_group_snapshot_member_id': None, 'availability_zone': 'fake_az', }) replica.update(kwargs) if as_primitive: return replica else: return db_fakes.FakeModel(replica) def fake_replica_request_spec(as_primitive=True, **kwargs): replica = fake_replica(id='9c0db763-a109-4862-b010-10f2bd395295') all_replica_hosts = ','.join(['fake_active_replica_host', replica['host']]) request_spec = { 'share_properties': fake_share( id='f0e4bb5e-65f0-11e5-9d70-feff819cdc9f'), 'share_instance_properties': replica, 'share_proto': 'nfs', 'share_id': 'f0e4bb5e-65f0-11e5-9d70-feff819cdc9f', 'snapshot_id': None, 'share_type': 'fake_share_type', 'share_group': None, 'active_replica_host': 'fake_active_replica_host', 'all_replica_hosts': all_replica_hosts, } request_spec.update(kwargs) if as_primitive: return request_spec else: return db_fakes.FakeModel(request_spec) def fake_share_server_get(): fake_share_server = { 'status': constants.STATUS_ACTIVE, 'updated_at': None, 'host': 'fake_host', 'share_network_name': 'fake_sn_name', 'project_id': 'fake_project_id', 'id': 'fake_share_server_id', 'backend_details': { 'security_service_active_directory': '{"name": "fake_AD"}', 'security_service_ldap': '{"name": "fake_LDAP"}', 'security_service_kerberos': '{"name": "fake_kerberos"}', } } return fake_share_server def fake_backup(as_primitive=True, **kwargs): backup = { 'id': uuidutils.generate_uuid(), 'host': "fake_host", 'user_id': 'fake', 'project_id': 'fake', 'availability_zone': 'fake_availability_zone', 'status': constants.STATUS_CREATING, 'progress': '0', 'restore_progress': '0', 'topic': 'fake_topic', 'share_id': uuidutils.generate_uuid(), 'display_name': 'fake_name', 'display_description': 'fake_description', 'size': '1', } backup.update(kwargs) if as_primitive: return backup else: return db_fakes.FakeModel(backup) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/fake_utils.py0000664000175000017500000000721400000000000020165 0ustar00zuulzuul00000000000000# Copyright (c) 2011 Citrix Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """This modules stubs out functions in manila.utils.""" import re from unittest import mock from eventlet import greenthread from oslo_log import log from manila import exception from manila import utils LOG = log.getLogger(__name__) _fake_execute_repliers = [] _fake_execute_log = [] def fake_execute_get_log(): return _fake_execute_log def fake_execute_clear_log(): global _fake_execute_log _fake_execute_log = [] def fake_execute_set_repliers(repliers): """Allows the client to configure replies to commands.""" global _fake_execute_repliers _fake_execute_repliers = repliers def fake_execute_default_reply_handler(*ignore_args, **ignore_kwargs): """A reply handler for commands that haven't been added to the reply list. Returns empty strings for stdout and stderr. """ return '', '' def fake_execute(*cmd_parts, **kwargs): """This function stubs out execute. It optionally executes a preconfigued function to return expected data. """ global _fake_execute_repliers process_input = kwargs.get('process_input', None) check_exit_code = kwargs.get('check_exit_code', 0) delay_on_retry = kwargs.get('delay_on_retry', True) attempts = kwargs.get('attempts', 1) run_as_root = kwargs.get('run_as_root', False) cmd_str = ' '.join(str(part) for part in cmd_parts) LOG.debug("Faking execution of cmd (subprocess): %s", cmd_str) _fake_execute_log.append(cmd_str) reply_handler = fake_execute_default_reply_handler for fake_replier in _fake_execute_repliers: if re.match(fake_replier[0], cmd_str): reply_handler = fake_replier[1] LOG.debug('Faked command matched %s', fake_replier[0]) break if isinstance(reply_handler, str): # If the reply handler is a string, return it as stdout reply = reply_handler, '' else: try: # Alternative is a function, so call it reply = reply_handler(cmd_parts, process_input=process_input, delay_on_retry=delay_on_retry, attempts=attempts, run_as_root=run_as_root, check_exit_code=check_exit_code) except exception.ProcessExecutionError as e: LOG.debug('Faked command raised an exception %s', e) raise stdout = reply[0] stderr = reply[1] LOG.debug("Reply to faked command is stdout='%(stdout)s' " "stderr='%(stderr)s'.", {"stdout": stdout, "stderr": stderr}) # Replicate the sleep call in the real function greenthread.sleep(0) return reply def stub_out_utils_execute(testcase): fake_execute_set_repliers([]) fake_execute_clear_log() testcase.mock_object(utils, 'execute', fake_execute) def get_fake_lock_context(): context_manager_mock = mock.Mock() setattr(context_manager_mock, '__enter__', mock.Mock()) setattr(context_manager_mock, '__exit__', mock.Mock()) return context_manager_mock ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/fake_volume.py0000664000175000017500000000360300000000000020332 0ustar00zuulzuul00000000000000# Copyright 2013 OpenStack Foundation # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg CONF = cfg.CONF class FakeVolume(object): def __init__(self, **kwargs): self.id = kwargs.pop('id', 'fake_vol_id') self.status = kwargs.pop('status', 'available') self.device = kwargs.pop('device', '') for key, value in kwargs.items(): setattr(self, key, value) def __getitem__(self, attr): return getattr(self, attr) class FakeVolumeSnapshot(object): def __init__(self, **kwargs): self.id = kwargs.pop('id', 'fake_volsnap_id') self.status = kwargs.pop('status', 'available') for key, value in kwargs.items(): setattr(self, key, value) def __getitem__(self, attr): return getattr(self, attr) class API(object): """Fake Volume API.""" def get(self, *args, **kwargs): pass def create_snapshot_force(self, *args, **kwargs): pass def get_snapshot(self, *args, **kwargs): pass def delete_snapshot(self, *args, **kwargs): pass def create(self, *args, **kwargs): pass def extend(self, *args, **kwargs): pass def get_all(self, search_opts): pass def delete(self, volume_id): pass def get_all_snapshots(self, search_opts): pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/fake_zfssa.py0000664000175000017500000000562700000000000020161 0ustar00zuulzuul00000000000000# Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Fake ZFS Storage Appliance, for unit testing. """ class FakeResponse(object): def __init__(self, statuscode): self.status = statuscode self.data = 'data' class FakeZFSSA(object): """Fake ZFS SA.""" def __init__(self): self.user = None self.host = 'fakehost' self.url = 'fakeurl' self.rclient = None def login(self, user): self.user = user def set_host(self, host, timeout=None): self.host = host def enable_service(self, service): return True def create_project(self, pool, project, arg): pass def get_share(self, pool, project, share): pass def create_share(self, pool, project, share): pass def delete_share(self, pool, project, share): pass def create_snapshot(self, pool, project, share): pass def delete_snapshot(self, pool, project, share, snapshot): pass def clone_snapshot(self, pool, project, share, snapshot, clone, size): pass def has_clones(self, pool, project, vol, snapshot): return False def modify_share(self, pool, project, share, arg): pass def allow_access_nfs(self, pool, project, share, access): pass def deny_access_nfs(self, pool, project, share, access): pass def get_project_stats(self, pool, project): pass def create_schema(self, schema): pass class FakeRestClient(object): """Fake ZFSSA Rest Client.""" def __init__(self): self.url = None self.headers = None self.log_function = None self.local = None self.base_path = None self.timeout = 60 self.do_logout = False self.auth_str = None def _path(self, path, base_path=None): pass def _authoriza(self): pass def login(self, auth_str): pass def logout(self): pass def islogin(self): pass def request(self, path, request, body=None, **kwargs): pass def get(self, path, **kwargs): pass def post(self, path, body="", **kwargs): pass def put(self, path, body="", **kwargs): pass def delete(self, path, **kwargs): pass def head(self, path, **kwargs): pass ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315601.973671 manila-21.0.0/manila/tests/hacking/0000775000175000017500000000000000000000000017065 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/hacking/__init__.py0000664000175000017500000000000000000000000021164 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/hacking/checks.py0000664000175000017500000002263600000000000020710 0ustar00zuulzuul00000000000000# Copyright (c) 2012, Cloudscaling # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ast import re from hacking import core """ Guidelines for writing new hacking checks - Use only for Manila specific tests. OpenStack general tests should be submitted to the common 'hacking' module. - Pick numbers in the range M3xx. Find the current test with the highest allocated number and then pick the next value. - Keep the test method code in the source file ordered based on the M3xx value. - List the new rule in the top level HACKING.rst file - Add test cases for each new rule to manila/tests/test_hacking.py """ UNDERSCORE_IMPORT_FILES = [] translated_log = re.compile( r"(.)*LOG\." r"(audit|debug|error|info|warn|warning|critical|exception)" r"\(" r"(_|_LE|_LI|_LW)" r"\(") string_translation = re.compile(r"[^_]*_\(\s*('|\")") underscore_import_check = re.compile(r"(.)*import _$") underscore_import_check_multi = re.compile(r"(.)*import (.)*_, (.)*") # We need this for cases where they have created their own _ function. custom_underscore_check = re.compile(r"(.)*_\s*=\s*(.)*") oslo_namespace_imports = re.compile(r"from[\s]*oslo[.](.*)") dict_constructor_with_list_copy_re = re.compile(r".*\bdict\((\[)?(\(|\[)") assert_no_xrange_re = re.compile(r"\s*xrange\s*\(") assert_True = re.compile(r".*assertEqual\(True, .*\)") no_log_warn = re.compile(r"\s*LOG.warn\(.*") class BaseASTChecker(ast.NodeVisitor): """Provides a simple framework for writing AST-based checks. Subclasses should implement visit_* methods like any other AST visitor implementation. When they detect an error for a particular node the method should call ``self.add_error(offending_node)``. Details about where in the code the error occurred will be pulled from the node object. Subclasses should also provide a class variable named CHECK_DESC to be used for the human readable error message. """ CHECK_DESC = 'No check message specified' def __init__(self, tree, filename): """This object is created automatically by pep8. :param tree: an AST tree :param filename: name of the file being analyzed (ignored by our checks) """ self._tree = tree self._errors = [] def run(self): """Called automatically by pep8.""" self.visit(self._tree) return self._errors def add_error(self, node, message=None): """Add an error caused by a node to the list of errors for pep8.""" message = message or self.CHECK_DESC error = (node.lineno, node.col_offset, message, self.__class__) self._errors.append(error) def _check_call_names(self, call_node, names): if isinstance(call_node, ast.Call): if isinstance(call_node.func, ast.Name): if call_node.func.id in names: return True return False @core.flake8ext def no_translate_logs(logical_line): if translated_log.match(logical_line): yield (0, "M359 Don't translate log messages!") class CheckLoggingFormatArgs(BaseASTChecker): """Check for improper use of logging format arguments. LOG.debug("Volume %s caught fire and is at %d degrees C and climbing.", ('volume1', 500)) The format arguments should not be a tuple as it is easy to miss. """ name = "check_logging_format_args" version = "1.0" CHECK_DESC = 'M310 Log method arguments should not be a tuple.' LOG_METHODS = [ 'debug', 'info', 'warn', 'warning', 'error', 'exception', 'critical', 'fatal', 'trace', 'log' ] def _find_name(self, node): """Return the fully qualified name or a Name or Attribute.""" if isinstance(node, ast.Name): return node.id elif (isinstance(node, ast.Attribute) and isinstance(node.value, (ast.Name, ast.Attribute))): method_name = node.attr obj_name = self._find_name(node.value) if obj_name is None: return None return obj_name + '.' + method_name elif isinstance(node, str): return node else: # could be Subscript, Call or many more return None def visit_Call(self, node): """Look for the 'LOG.*' calls.""" # extract the obj_name and method_name if isinstance(node.func, ast.Attribute): obj_name = self._find_name(node.func.value) if isinstance(node.func.value, ast.Name): method_name = node.func.attr elif isinstance(node.func.value, ast.Attribute): obj_name = self._find_name(node.func.value) method_name = node.func.attr else: # could be Subscript, Call or many more return super(CheckLoggingFormatArgs, self).generic_visit(node) # obj must be a logger instance and method must be a log helper if (obj_name != 'LOG' or method_name not in self.LOG_METHODS): return super(CheckLoggingFormatArgs, self).generic_visit(node) # the call must have arguments if not len(node.args): return super(CheckLoggingFormatArgs, self).generic_visit(node) # any argument should not be a tuple for arg in node.args: if isinstance(arg, ast.Tuple): self.add_error(arg) return super(CheckLoggingFormatArgs, self).generic_visit(node) @core.flake8ext def check_explicit_underscore_import(logical_line, filename): """Check for explicit import of the _ function We need to ensure that any files that are using the _() function to translate logs are explicitly importing the _ function. We can't trust unit test to catch whether the import has been added so we need to check for it here. """ # Build a list of the files that have _ imported. No further # checking needed once it is found. if filename in UNDERSCORE_IMPORT_FILES: pass elif (underscore_import_check.match(logical_line) or underscore_import_check_multi.match(logical_line) or custom_underscore_check.match(logical_line)): UNDERSCORE_IMPORT_FILES.append(filename) elif string_translation.match(logical_line): yield (0, "M323: Found use of _() without explicit import of _ !") class CheckForTransAdd(BaseASTChecker): """Checks for the use of concatenation on a translated string. Translations should not be concatenated with other strings, but should instead include the string being added to the translated string to give the translators the most information. """ name = "check_for_trans_add" version = "1.0" CHECK_DESC = ('M326 Translated messages cannot be concatenated. ' 'String should be included in translated message.') TRANS_FUNC = ['_', '_LI', '_LW', '_LE', '_LC'] def visit_BinOp(self, node): if isinstance(node.op, ast.Add): if self._check_call_names(node.left, self.TRANS_FUNC): self.add_error(node.left) elif self._check_call_names(node.right, self.TRANS_FUNC): self.add_error(node.right) super(CheckForTransAdd, self).generic_visit(node) @core.flake8ext def check_oslo_namespace_imports(logical_line, filename, noqa): if noqa: return if re.match(oslo_namespace_imports, logical_line): msg = ("M333: '%s' must be used instead of '%s'.") % ( logical_line.replace('oslo.', 'oslo_'), logical_line) yield (0, msg) @core.flake8ext def dict_constructor_with_list_copy(logical_line): msg = ("M336: Must use a dict comprehension instead of a dict constructor" " with a sequence of key-value pairs." ) if dict_constructor_with_list_copy_re.match(logical_line): yield (0, msg) @core.flake8ext def no_xrange(logical_line): if assert_no_xrange_re.match(logical_line): yield (0, "M337: Do not use xrange().") @core.flake8ext def validate_assertTrue(logical_line): if re.match(assert_True, logical_line): msg = ("M313: Unit tests should use assertTrue(value) instead" " of using assertEqual(True, value).") yield (0, msg) @core.flake8ext def check_uuid4(logical_line): """Generating UUID Use oslo_utils.uuidutils to generate UUID instead of uuid4(). M354 """ msg = ("M354: Use oslo_utils.uuidutils to generate UUID instead " "of uuid4().") if "uuid4()." in logical_line: return if "uuid4()" in logical_line: yield (0, msg) @core.flake8ext def no_log_warn_check(logical_line): """Disallow 'LOG.warn' Deprecated LOG.warn(), instead use LOG.warning ://bugs.launchpad.net/manila/+bug/1508442 M338 """ msg = ("M338: LOG.warn is deprecated, use LOG.warning.") if re.match(no_log_warn, logical_line): yield (0, msg) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315601.973671 manila-21.0.0/manila/tests/image/0000775000175000017500000000000000000000000016543 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/image/__init__.py0000664000175000017500000000000000000000000020642 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/image/test_image.py0000664000175000017500000001010300000000000021231 0ustar00zuulzuul00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from manila import context from manila.image import glance from manila import test from manila.tests import utils as test_utils class FakeGlanceClient(object): class Image(object): def list(self, *args, **kwargs): return [{'id': 'id1'}, {'id': 'id2'}] def __getattr__(self, item): return None def __init__(self): self.image = self.Image() def get_fake_auth_obj(): return type('FakeAuthObj', (object, ), {'get_client': mock.Mock()}) class GlanceClientTestCase(test.TestCase): @mock.patch('manila.image.glance.AUTH_OBJ', None) def test_no_auth_obj(self): mock_client_loader = self.mock_object( glance.client_auth, 'AuthClientLoader') fake_context = 'fake_context' data = { 'glance': { 'api_microversion': 'foo_api_microversion', 'endpoint_type': 'internal', 'region_name': 'foo_region_name' } } with test_utils.create_temp_config_with_opts(data): glance.glanceclient(fake_context) mock_client_loader.assert_called_once_with( client_class=glance.glance_client.Client, cfg_group=glance.GLANCE_GROUP ) mock_client_loader.return_value.get_client.assert_called_once_with( fake_context, version=data['glance']['api_microversion'], interface=data['glance']['endpoint_type'], region_name=data['glance']['region_name'] ) @mock.patch('manila.image.glance.AUTH_OBJ', get_fake_auth_obj()) def test_with_auth_obj(self): fake_context = 'fake_context' data = { 'glance': { 'api_microversion': 'foo_api_microversion', 'endpoint_type': 'internal', 'region_name': 'foo_region_name' } } with test_utils.create_temp_config_with_opts(data): glance.glanceclient(fake_context) glance.AUTH_OBJ.get_client.assert_called_once_with( fake_context, version=data['glance']['api_microversion'], interface=data['glance']['endpoint_type'], region_name=data['glance']['region_name'] ) class GlanceApiTestCase(test.TestCase): def setUp(self): super(GlanceApiTestCase, self).setUp() self.api = glance.API() self.glanceclient = FakeGlanceClient() self.ctx = context.get_admin_context() self.mock_object(glance, 'glanceclient', mock.Mock(return_value=self.glanceclient)) def test_image_list_glanceclient_has_no_proxy(self): image_list = ['fake', 'image', 'list'] class FakeGlanceClient(object): def list(self): return image_list self.glanceclient.glance = FakeGlanceClient() result = self.api.image_list(self.ctx) self.assertEqual(image_list, result) def test_image_list_glanceclient_has_proxy(self): image_list1 = ['fake', 'image', 'list1'] image_list2 = ['fake', 'image', 'list2'] class FakeImagesClient(object): def list(self): return image_list1 class FakeGlanceClient(object): def list(self): return image_list2 self.glanceclient.images = FakeImagesClient() self.glanceclient.glance = FakeGlanceClient() result = self.api.image_list(self.ctx) self.assertEqual(image_list1, result) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315601.977671 manila-21.0.0/manila/tests/integrated/0000775000175000017500000000000000000000000017607 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/integrated/__init__.py0000664000175000017500000000000000000000000021706 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315601.977671 manila-21.0.0/manila/tests/integrated/api/0000775000175000017500000000000000000000000020360 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/integrated/api/__init__.py0000664000175000017500000000000000000000000022457 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/integrated/api/client.py0000664000175000017500000001665100000000000022221 0ustar00zuulzuul00000000000000# Copyright (c) 2011 Justin Santa Barbara # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import netaddr from urllib import parse import requests from oslo_log import log from oslo_serialization import jsonutils LOG = log.getLogger(__name__) class OpenStackApiException(Exception): def __init__(self, message=None, response=None): self.response = response if not message: message = 'Unspecified error' if response: _status = response.status_code _body = response.text message = ('%(message)s\nStatus Code: %(_status)s\n' 'Body: %(_body)s') % { "message": message, "_status": _status, "_body": _body } super(OpenStackApiException, self).__init__(message) class OpenStackApiAuthenticationException(OpenStackApiException): def __init__(self, response=None, message=None): if not message: message = "Authentication error" super(OpenStackApiAuthenticationException, self).__init__(message, response) class OpenStackApiAuthorizationException(OpenStackApiException): def __init__(self, response=None, message=None): if not message: message = "Authorization error" super(OpenStackApiAuthorizationException, self).__init__(message, response) class OpenStackApiNotFoundException(OpenStackApiException): def __init__(self, response=None, message=None): if not message: message = "Item not found" super(OpenStackApiNotFoundException, self).__init__(message, response) class TestOpenStackClient(object): """Simple OpenStack API Client. This is a really basic OpenStack API client that is under our control, so we can make changes / insert hooks for testing """ def __init__(self, auth_user, auth_key, endpoint): super(TestOpenStackClient, self).__init__() self.auth_result = None self.auth_user = auth_user self.auth_key = auth_key self.endpoint = endpoint # default project_id self.project_id = 'openstack' def request(self, url, method='GET', body=None, headers=None, ssl_verify=True, stream=False): _headers = {'Content-Type': 'application/json'} _headers.update(headers or {}) parsed_url = parse.urlparse(url) port = parsed_url.port or '' hostname = parsed_url.hostname if netaddr.valid_ipv6(hostname): hostname = "[%s]" % hostname scheme = parsed_url.scheme relative_url = parsed_url.path if parsed_url.query: relative_url = relative_url + "?" + parsed_url.query LOG.debug("Doing %(method)s on %(relative_url)s, body: %(body)s", {"method": method, "relative_url": relative_url, "body": body or {}}) _url = "%s://%s:%s%s" % (scheme, hostname, port, relative_url) response = requests.request(method, _url, data=body, headers=_headers, verify=ssl_verify, stream=stream) return response def _authenticate(self): if self.auth_result: return self.auth_result headers = {'X-Auth-User': self.auth_user, 'X-Auth-Key': self.auth_key, 'X-Auth-Project-Id': self.project_id} response = self.request(self.endpoint, headers=headers) http_status = response.status_code LOG.debug("%(endpoint)s => code %(http_status)s.", {"endpoint": self.endpoint, "http_status": http_status}) if http_status == 401: raise OpenStackApiAuthenticationException(response=response) self.auth_result = response.headers return self.auth_result def api_request(self, relative_uri, check_response_status=None, **kwargs): auth_result = self._authenticate() base_uri = auth_result['x-server-management-url'] full_uri = '%s/%s' % (base_uri, relative_uri) headers = kwargs.setdefault('headers', {}) headers['X-Auth-Token'] = auth_result['x-auth-token'] response = self.request(full_uri, **kwargs) http_status = response.status_code LOG.debug("%(relative_uri)s => code %(http_status)s.", {"relative_uri": relative_uri, "http_status": http_status}) if check_response_status: if http_status not in check_response_status: if http_status == 404: raise OpenStackApiNotFoundException(response=response) elif http_status == 401: raise OpenStackApiAuthorizationException(response=response) else: raise OpenStackApiException( message="Unexpected status code", response=response) return response def _decode_json(self, response): body = response.text LOG.debug("Decoding JSON: %s.", (body)) if body: return jsonutils.loads(body) else: return "" def api_options(self, relative_uri, **kwargs): kwargs['method'] = 'OPTIONS' kwargs.setdefault('check_response_status', [200]) response = self.api_request(relative_uri, **kwargs) return self._decode_json(response) def api_get(self, relative_uri, **kwargs): kwargs.setdefault('check_response_status', [200]) response = self.api_request(relative_uri, **kwargs) return self._decode_json(response) def api_post(self, relative_uri, body, **kwargs): kwargs['method'] = 'POST' if body: headers = kwargs.setdefault('headers', {}) headers['Content-Type'] = 'application/json' kwargs['body'] = jsonutils.dumps(body) kwargs.setdefault('check_response_status', [200, 202]) response = self.api_request(relative_uri, **kwargs) return self._decode_json(response) def api_put(self, relative_uri, body, **kwargs): kwargs['method'] = 'PUT' if body: headers = kwargs.setdefault('headers', {}) headers['Content-Type'] = 'application/json' kwargs['body'] = jsonutils.dumps(body) kwargs.setdefault('check_response_status', [200, 202, 204]) response = self.api_request(relative_uri, **kwargs) return self._decode_json(response) def api_delete(self, relative_uri, **kwargs): kwargs['method'] = 'DELETE' kwargs.setdefault('check_response_status', [200, 202, 204]) return self.api_request(relative_uri, **kwargs) def get_shares(self, detail=True): rel_url = '/shares/detail' if detail else '/shares' return self.api_get(rel_url)['shares'] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/integrated/integrated_helpers.py0000664000175000017500000000766100000000000024043 0ustar00zuulzuul00000000000000# Copyright 2011 Justin Santa Barbara # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Provides common functionality for integrated unit tests """ import random import string from oslo_log import log from manila import service from manila import test # For the flags from manila.tests.integrated.api import client from oslo_config import cfg from oslo_utils import uuidutils CONF = cfg.CONF LOG = log.getLogger(__name__) def generate_random_alphanumeric(length): """Creates a random alphanumeric string of specified length.""" return ''.join(random.choice(string.ascii_uppercase + string.digits) for _x in range(length)) def generate_random_numeric(length): """Creates a random numeric string of specified length.""" return ''.join(random.choice(string.digits) for _x in range(length)) def generate_new_element(items, prefix, numeric=False): """Creates a random string with prefix, that is not in 'items' list.""" while True: if numeric: candidate = prefix + generate_random_numeric(8) else: candidate = prefix + generate_random_alphanumeric(8) if candidate not in items: return candidate LOG.debug("Random collision on %s.", candidate) class _IntegratedTestBase(test.TestCase): def setUp(self): super(_IntegratedTestBase, self).setUp() f = self._get_flags() self.flags(**f) # set up services self.share = self.start_service('share') self.scheduler = self.start_service('scheduler') self._start_api_service() self.api = client.TestOpenStackClient('fake', 'fake', self.endpoint) def tearDown(self): self.osapi.stop() super(_IntegratedTestBase, self).tearDown() def _start_api_service(self): self.osapi = service.WSGIService("osapi_share") self.osapi.start() self.endpoint = 'http://%s:%s/v2' % (self.osapi.host, self.osapi.port) LOG.info("Manila API started at %s", self.endpoint) def _get_flags(self): """An opportunity to setup flags, before the services are started.""" f = {} # Ensure tests only listen on localhost f['osapi_share_listen'] = '127.0.0.1' # Auto-assign ports to allow concurrent tests f['osapi_share_listen_port'] = 0 return f def get_unused_server_name(self): servers = self.api.get_servers() server_names = [server['name'] for server in servers] return generate_new_element(server_names, 'server') def get_invalid_image(self): return uuidutils.generate_uuid() def _build_minimal_create_server_request(self): server = {} image = self.api.get_images()[0] LOG.debug("Image: %s.", image) if 'imageRef' in image: image_href = image['imageRef'] else: image_href = image['id'] image_href = 'http://fake.server/%s' % image_href # We now have a valid imageId server['imageRef'] = image_href # Set a valid flavorId flavor = self.api.get_flavors()[0] LOG.debug("Using flavor: %s.", flavor) server['flavorRef'] = 'http://fake.server/%s' % flavor['id'] # Set a valid server name server_name = self.get_unused_server_name() server['name'] = server_name return server ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/integrated/test_extensions.py0000664000175000017500000000264500000000000023426 0ustar00zuulzuul00000000000000# Copyright 2011 Justin Santa Barbara # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_log import log from manila.tests.integrated import integrated_helpers CONF = cfg.CONF LOG = log.getLogger(__name__) class ExtensionsTest(integrated_helpers._IntegratedTestBase): def _get_flags(self): f = super(ExtensionsTest, self)._get_flags() f['osapi_share_extension'] = CONF.osapi_share_extension[:] f['osapi_share_extension'].append( 'manila.tests.api.extensions.foxinsocks.Foxinsocks') return f def test_get_foxnsocks(self): """Simple check that fox-n-socks works.""" response = self.api.api_request('/foxnsocks') foxnsocks = response.text LOG.debug("foxnsocks: %s.", foxnsocks) self.assertEqual('Try to say this Mr. Knox, sir...', foxnsocks) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/integrated/test_login.py0000664000175000017500000000177100000000000022336 0ustar00zuulzuul00000000000000# Copyright 2011 Justin Santa Barbara # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log from manila.tests.integrated import integrated_helpers LOG = log.getLogger(__name__) class LoginTest(integrated_helpers._IntegratedTestBase): def test_login(self): """Simple check - we list shares - so we know we're logged in.""" shares = self.api.get_shares() for share in shares: LOG.debug("share: %s", share) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315601.977671 manila-21.0.0/manila/tests/keymgr/0000775000175000017500000000000000000000000016757 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/keymgr/__init__.py0000664000175000017500000000000000000000000021056 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/keymgr/test_barbican.py0000664000175000017500000001014100000000000022126 0ustar00zuulzuul00000000000000# Copyright 2025 Cloudification GmbH. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from oslo_config import cfg from oslo_utils import uuidutils from manila import exception from manila.keymgr import barbican as barbican_module from manila import test CONF = cfg.CONF class BarbicanSecretACLTestCase(test.TestCase): def setUp(self): super(BarbicanSecretACLTestCase, self).setUp() self.context = mock.Mock() self.secret_ref = 'mock-secret-id' self.barbican_acl = barbican_module.BarbicanSecretACL(CONF) def test_get_client_and_href_with_valid_secret(self): mock_href = uuidutils.generate_uuid() mock_client = mock.Mock() self.mock_object(self.barbican_acl, '_get_barbican_client', mock.Mock(return_value=(mock_client, mock.Mock()))) self.mock_object(self.barbican_acl, '_create_secret_ref', mock.Mock(return_value=mock_href)) result_client, result_href = self.barbican_acl.get_client_and_href( self.context, mock_href) self.assertEqual(mock_client, result_client) self.assertEqual(mock_href, result_href) def test_get_client_and_href_missing_backend(self): CONF.set_default('backend', 'wrong.backend', group='key_manager') self.assertRaises( exception.ManilaBarbicanACLError, self.barbican_acl.get_client_and_href, self.context, self.secret_ref) def test_get_client_and_href_missing_secret_ref(self): self.assertRaises( exception.ManilaBarbicanACLError, self.barbican_acl.get_client_and_href, self.context, None) class BarbicanUserAppCredsTestCase(test.TestCase): def setUp(self): super(BarbicanUserAppCredsTestCase, self).setUp() self.context = mock.Mock() self.app_creds = barbican_module.BarbicanUserAppCreds(CONF) @mock.patch('keystoneclient.v3.client.Client') @mock.patch('keystoneauth1.session.Session') @mock.patch('keystoneauth1.loading.load_auth_from_conf_options') def test_get_application_credentials_success(self, mock_auth, mock_session, mock_client): fake_cred = mock.Mock() mock_instance = mock.Mock() mock_instance.application_credentials.get.return_value = fake_cred mock_client.return_value = mock_instance result = self.app_creds.get_application_credentials( self.context, 'fake_id') self.assertEqual(fake_cred, result) def test_get_application_credentials_missing_id(self): self.assertRaises( exception.ManilaBarbicanAppCredsError, self.app_creds.get_application_credentials, self.context, None) @mock.patch('keystoneclient.v3.client.Client') @mock.patch('keystoneauth1.session.Session') @mock.patch('keystoneauth1.loading.load_auth_from_conf_options') def test_delete_application_credentials(self, mock_auth, mock_session, mock_client): mock_instance = mock.Mock() mock_client.return_value = mock_instance self.app_creds.delete_application_credentials(self.context, 'cred_id') mock_instance.application_credentials.delete.assert_called_once() def test_delete_application_credentials_missing_id(self): self.assertRaises( exception.ManilaBarbicanAppCredsError, self.app_creds.delete_application_credentials, self.context, None) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315601.977671 manila-21.0.0/manila/tests/lock/0000775000175000017500000000000000000000000016411 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/lock/__init__.py0000664000175000017500000000000000000000000020510 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/lock/test_api.py0000664000175000017500000005171400000000000020603 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import ddt from oslo_config import cfg from manila.common import constants from manila import context from manila import exception from manila.lock import api as lock_api from manila import policy from manila import test from manila.tests import utils as test_utils from manila import utils CONF = cfg.CONF @ddt.ddt class ResourceLockApiTest(test.TestCase): def setUp(self): super(ResourceLockApiTest, self).setUp() self.lock_api = lock_api.API() self.mock_object(self.lock_api, 'db') self.ctxt = context.RequestContext('fakeuser', 'fakeproject', is_admin=False) self.mock_object(policy, 'check_policy') @ddt.data( test_utils.annotated( 'admin_context', (context.RequestContext('fake', 'fake', is_admin=True), 'admin'), ), test_utils.annotated( 'admin_also_service_context', (context.RequestContext('fake', 'fake', service_roles=['service'], is_admin=True), 'service'), ), test_utils.annotated( 'service_context', (context.RequestContext('fake', 'fake', service_roles=['service'], is_admin=False), 'service'), ), test_utils.annotated( 'user_context', (context.RequestContext('fake', 'fake', is_admin=False), 'user') ), ) @ddt.unpack def test__get_lock_context(self, ctxt, expected_lock_context): result = self.lock_api._get_lock_context(ctxt) self.assertEqual(expected_lock_context, result['lock_context']) self.assertEqual(('fake', 'fake'), (result['user_id'], result['project_id'])) @ddt.data( test_utils.annotated( 'user_manipulating_admin_lock', (context.RequestContext('fake', 'fake', is_admin=False), 'admin'), ), test_utils.annotated( 'user_manipulating_service_lock', (context.RequestContext('fake', 'fake', is_admin=False), 'service'), ), test_utils.annotated( 'service_manipulating_admin_lock', (context.RequestContext('fake', 'fake', is_admin=False, service_roles=['service']), 'admin'), ), ) @ddt.unpack def test__check_allow_lock_manipulation_not_allowed(self, ctxt, lock_ctxt): self.assertRaises(exception.NotAuthorized, self.lock_api._check_allow_lock_manipulation, ctxt, {'lock_context': lock_ctxt}) @ddt.data( test_utils.annotated( 'user_manipulating_user_lock', (context.RequestContext('fake', 'fake', is_admin=False), 'user'), ), test_utils.annotated( 'service_manipulating_service_lock', (context.RequestContext( 'fake', 'fake', is_admin=False, service_roles=['service']), 'service'), ), test_utils.annotated( 'service_manipulating_user_lock', (context.RequestContext( 'fake', 'fake', is_admin=False, service_roles=['service']), 'user'), ), test_utils.annotated( 'admin_manipulating_service_lock', (context.RequestContext('fake', 'fake', is_admin=True), 'service'), ), test_utils.annotated( 'admin_manipulating_user_lock', (context.RequestContext('fake', 'fake', is_admin=True), 'user'), ), ) @ddt.unpack def test__check_allow_lock_manipulation_allowed(self, ctxt, lock_ctxt): result = self.lock_api._check_allow_lock_manipulation( ctxt, {'lock_context': lock_ctxt} ) self.assertIsNone(result) @ddt.data( test_utils.annotated( 'service_manipulating_user_lock', (context.RequestContext( 'fake', 'fake', is_admin=False, service_roles=['service']), 'user', 'user_b'), ), test_utils.annotated( 'admin_manipulating_user_lock', (context.RequestContext('fake', 'fake', is_admin=True), 'admin', 'user_a'), ), test_utils.annotated( 'user_manipulating_locks_they_own', (context.RequestContext('user_a', 'fake', is_admin=False), 'user', 'user_a'), ), test_utils.annotated( 'user_manipulating_other_users_lock', (context.RequestContext('user_a', 'fake', is_admin=False), 'user', 'user_b'), ), ) @ddt.unpack def test_access_is_restricted(self, ctxt, lock_ctxt, lock_user): resource_lock = { 'user_id': lock_user, 'lock_context': lock_ctxt } is_restricted = ( (not ctxt.is_admin and not ctxt.is_service) and lock_user != ctxt.user_id) expected_mock_policy = {} if is_restricted: expected_mock_policy['side_effect'] = exception.NotAuthorized self.mock_object(self.lock_api, '_check_allow_lock_manipulation') self.mock_object(policy, 'check_policy', mock.Mock(**expected_mock_policy)) result = self.lock_api.access_is_restricted( ctxt, resource_lock ) self.assertEqual(is_restricted, result) def test_access_is_restricted_not_authorized(self): resource_lock = { 'user_id': 'fakeuserid', 'lock_context': 'user' } ctxt = context.RequestContext('fake', 'fake') self.mock_object(self.lock_api, '_check_allow_lock_manipulation', mock.Mock(side_effect=exception.NotAuthorized())) result = self.lock_api.access_is_restricted( ctxt, resource_lock ) self.assertTrue(result) def test_get_all_all_projects_ignored(self): self.mock_object(policy, 'check_policy', mock.Mock(return_value=False)) self.mock_object(self.lock_api.db, 'resource_lock_get_all', mock.Mock(return_value=('list of locks', None))) locks, count = self.lock_api.get_all( self.ctxt, search_opts={ 'all_projects': True, 'project_id': '5dca5323e33b49fca4a5b261c72e612c', }) self.lock_api.db.resource_lock_get_all.assert_called_once_with( utils.IsAMatcher(context.RequestContext), filters={}, limit=None, offset=None, sort_key='created_at', sort_dir='desc', show_count=False, ) self.assertEqual(('list of locks', None), (locks, count)) def test_get_all_with_filters(self): self.mock_object(self.lock_api.db, 'resource_lock_get_all', mock.Mock(return_value=('list of locks', 4))) search_opts = { 'all_projects': True, 'project_id': '5dca5323e33b49fca4a5b261c72e612c', 'resource_type': 'snapshot', } locks = self.lock_api.get_all( self.ctxt, limit=3, offset=3, search_opts=search_opts, show_count=True ) self.lock_api.db.resource_lock_get_all.assert_called_once_with( utils.IsAMatcher(context.RequestContext), filters=search_opts, limit=3, offset=3, sort_key='created_at', sort_dir='desc', show_count=True, ) self.assertEqual('list of locks', locks[0]) self.assertEqual(4, locks[1]) def test_create_lock_resource_not_owned_by_user(self): self.mock_object( policy, 'check_policy', mock.Mock(side_effect=exception.PolicyNotAuthorized( action="resource_lock:create")), ) self.assertRaises(exception.PolicyNotAuthorized, self.lock_api.create, self.ctxt, resource_id='19529cea-0471-4972-adaa-fee8694b7538', resource_type='share', resource_action='delete') self.lock_api.db.share_get.assert_called_once_with( utils.IsAMatcher(context.RequestContext), '19529cea-0471-4972-adaa-fee8694b7538', ) self.lock_api.db.resource_lock_create.assert_not_called() @ddt.data(constants.STATUS_DELETING, constants.STATUS_ERROR_DELETING, constants.STATUS_UNMANAGING, constants.STATUS_MANAGE_ERROR_UNMANAGING, constants.STATUS_UNMANAGE_ERROR, constants.STATUS_UNMANAGED, constants.STATUS_DELETED) def test_create_lock_invalid_resource_status(self, status): self.mock_object(self.lock_api.db, 'resource_lock_create', mock.Mock(return_value='created_obj')) self.mock_object(self.lock_api.db, 'share_get', mock.Mock(return_value={'status': status})) self.assertRaises(exception.InvalidInput, self.lock_api.create, self.ctxt, resource_id='7dab6090-1dfd-4829-bbaf-602fcd1c8248', resource_action='delete', resource_type='share') self.lock_api.db.resource_lock_create.assert_not_called() def test_create_lock_invalid_resource_soft_deleted(self): self.mock_object(self.lock_api.db, 'resource_lock_create', mock.Mock(return_value='created_obj')) self.mock_object(self.lock_api.db, 'share_get', mock.Mock(return_value={'is_soft_deleted': True})) self.assertRaises(exception.InvalidInput, self.lock_api.create, self.ctxt, resource_id='0bbf0b62-cb29-4218-920b-3f62faa99ff8', resource_action='delete', resource_type='share') self.lock_api.db.resource_lock_create.assert_not_called() def test_create_lock(self): self.mock_object(self.lock_api.db, 'resource_lock_create', mock.Mock(return_value='created_obj')) mock_share = { 'id': 'cacac01c-853d-47f3-afcb-da4484bd09a5', 'status': constants.STATUS_AVAILABLE, 'is_soft_deleted': False, } self.mock_object(self.lock_api.db, 'share_get', mock.Mock(return_value=mock_share)) result = self.lock_api.create( self.ctxt, resource_id='cacac01c-853d-47f3-afcb-da4484bd09a5', resource_action='delete', resource_type='share', ) self.assertEqual('created_obj', result) db_create_arg = self.lock_api.db.resource_lock_create.call_args[0][1] expected_create_arg = { 'resource_id': 'cacac01c-853d-47f3-afcb-da4484bd09a5', 'resource_action': 'delete', 'user_id': 'fakeuser', 'project_id': 'fakeproject', 'lock_context': 'user', 'lock_reason': None, 'resource_type': constants.SHARE_RESOURCE_TYPE } self.assertEqual(expected_create_arg, db_create_arg) def test_create_access_show_lock(self): self.mock_object(self.lock_api.db, 'resource_lock_create', mock.Mock(return_value='created_obj')) mock_access = { 'id': 'cacac01c-853d-47f3-afcb-da4484bd09a5', 'state': constants.STATUS_ACTIVE, } self.mock_object(self.lock_api.db, 'access_get', mock.Mock(return_value=mock_access)) self.mock_object(self.lock_api.db, 'resource_lock_get_all', mock.Mock(return_value=['', 0])) self.mock_object(self.ctxt, 'elevated', mock.Mock(return_value=self.ctxt)) result = self.lock_api.create( self.ctxt, resource_id='cacac01c-853d-47f3-afcb-da4484bd09a5', resource_action=constants.RESOURCE_ACTION_SHOW, resource_type=constants.SHARE_ACCESS_RESOURCE_TYPE, ) self.assertEqual('created_obj', result) db_create_arg = self.lock_api.db.resource_lock_create.call_args[0][1] resource_id = 'cacac01c-853d-47f3-afcb-da4484bd09a5' expected_create_arg = { 'resource_id': resource_id, 'resource_action': constants.RESOURCE_ACTION_SHOW, 'user_id': 'fakeuser', 'project_id': 'fakeproject', 'lock_context': 'user', 'lock_reason': None, 'resource_type': constants.SHARE_ACCESS_RESOURCE_TYPE } self.assertEqual(expected_create_arg, db_create_arg) filters = { 'resource_id': resource_id, 'resource_action': constants.RESOURCE_ACTION_SHOW, 'all_projects': True } self.lock_api.db.resource_lock_get_all.assert_called_once_with( self.ctxt, filters=filters) def test_create_visibility_lock_lock_exists(self): self.mock_object(self.lock_api.db, 'resource_lock_create', mock.Mock(return_value='created_obj')) self.mock_object(self.lock_api.db, 'resource_lock_get_all', mock.Mock(return_value=['visibility_lock', 1])) self.mock_object(self.ctxt, 'elevated', mock.Mock(return_value=self.ctxt)) self.assertRaises( exception.ResourceVisibilityLockExists, self.lock_api.create, self.ctxt, resource_id='cacac01c-853d-47f3-afcb-da4484bd09a5', resource_action=constants.RESOURCE_ACTION_SHOW, resource_type=constants.SHARE_ACCESS_RESOURCE_TYPE, ) resource_id = 'cacac01c-853d-47f3-afcb-da4484bd09a5' filters = { 'resource_id': resource_id, 'resource_action': constants.RESOURCE_ACTION_SHOW, 'all_projects': True } self.lock_api.db.resource_lock_get_all.assert_called_once_with( self.ctxt, filters=filters) @ddt.data(True, False) def test_update_lock_resource_not_allowed_with_policy_failure( self, policy_fails): lock = {'id': 'd767d3cd-1187-404a-a91f-8b172e0e768e'} if policy_fails: self.mock_object( policy, 'check_policy', mock.Mock( side_effect=exception.PolicyNotAuthorized( action='resource_lock:update'), ), ) self.mock_object( self.lock_api, '_check_allow_lock_manipulation', mock.Mock( side_effect=exception.NotAuthorized ), ) self.assertRaises(exception.NotAuthorized, self.lock_api.update, self.ctxt, lock, {'foo': 'bar'}) @ddt.data(constants.STATUS_DELETING, constants.STATUS_ERROR_DELETING, constants.STATUS_UNMANAGING, constants.STATUS_MANAGE_ERROR_UNMANAGING, constants.STATUS_UNMANAGE_ERROR, constants.STATUS_UNMANAGED, constants.STATUS_DELETED) def test_update_invalid_resource_status(self, status): lock = { 'id': 'd767d3cd-1187-404a-a91f-8b172e0e768e', 'resource_id': '266cf54f-f9cf-4d6c-94f3-7b67f00e0465', 'resource_action': 'something', 'resource_type': 'share', } self.mock_object(self.lock_api, '_check_allow_lock_manipulation') self.mock_object(self.lock_api.db, 'share_get', mock.Mock(return_value={'status': status})) self.assertRaises(exception.InvalidInput, self.lock_api.update, self.ctxt, lock, {'resource_action': 'delete'}) self.lock_api.db.resource_lock_update.assert_not_called() def test_update(self): self.mock_object(self.lock_api, '_check_allow_lock_manipulation') self.mock_object(self.lock_api.db, 'resource_lock_update', mock.Mock(return_value='updated_obj')) lock = {'id': 'd767d3cd-1187-404a-a91f-8b172e0e768e'} result = self.lock_api.update( self.ctxt, lock, {'foo': 'bar'}, ) self.assertEqual('updated_obj', result) self.lock_api.db.resource_lock_update.assert_called_once_with( utils.IsAMatcher(context.RequestContext), 'd767d3cd-1187-404a-a91f-8b172e0e768e', {'foo': 'bar'}, ) @ddt.data(True, False) def test_delete_not_allowed_with_policy_failure(self, policy_fails): self.mock_object(self.lock_api.db, 'resource_lock_get', mock.Mock( return_value={'id': 'd767d3cd-1187-404a-a91f-8b172e0e768e'})) if policy_fails: self.mock_object( policy, 'check_policy', mock.Mock( side_effect=exception.PolicyNotAuthorized( action='resource_lock:delete'), ), ) self.mock_object( self.lock_api, '_check_allow_lock_manipulation', mock.Mock( side_effect=exception.NotAuthorized ), ) self.assertRaises(exception.NotAuthorized, self.lock_api.delete, self.ctxt, 'd767d3cd-1187-404a-a91f-8b172e0e768e') policy.check_policy.assert_called_once_with( utils.IsAMatcher(context.RequestContext), 'resource_lock', 'delete', {'id': 'd767d3cd-1187-404a-a91f-8b172e0e768e'}, ) self.assertEqual(not policy_fails, self.lock_api._check_allow_lock_manipulation.called) self.lock_api.db.resource_lock_delete.assert_not_called() def test_delete(self): self.mock_object(self.lock_api.db, 'resource_lock_get', mock.Mock( return_value={'id': 'd767d3cd-1187-404a-a91f-8b172e0e768e'})) self.mock_object(self.lock_api, '_check_allow_lock_manipulation') result = self.lock_api.delete(self.ctxt, 'd767d3cd-1187-404a-a91f-8b172e0e768e') self.assertIsNone(result) self.lock_api.db.resource_lock_delete.assert_called_once_with( utils.IsAMatcher(context.RequestContext), 'd767d3cd-1187-404a-a91f-8b172e0e768e' ) def test_ensure_context_can_delete_lock_policy_fails(self): lock = {'id': 'd767d3cd-1187-404a-a91f-8b172e0e768e'} self.mock_object( self.lock_api.db, 'resource_lock_get', mock.Mock(return_value=lock) ) self.mock_object( policy, 'check_policy', mock.Mock(side_effect=exception.PolicyNotAuthorized( action="resource_lock:delete")), ) self.assertRaises( exception.NotAuthorized, self.lock_api.ensure_context_can_delete_lock, self.ctxt, 'd767d3cd-1187-404a-a91f-8b172e0e768e') self.lock_api.db.resource_lock_get.assert_called_once_with( self.ctxt, 'd767d3cd-1187-404a-a91f-8b172e0e768e' ) policy.check_policy.assert_called_once_with( self.ctxt, 'resource_lock', 'delete', lock) def test_ensure_context_can_delete_lock(self): lock = {'id': 'd767d3cd-1187-404a-a91f-8b172e0e768e'} self.mock_object( self.lock_api.db, 'resource_lock_get', mock.Mock(return_value=lock) ) self.mock_object(policy, 'check_policy') self.mock_object(self.lock_api, '_check_allow_lock_manipulation') self.lock_api.ensure_context_can_delete_lock( self.ctxt, 'd767d3cd-1187-404a-a91f-8b172e0e768e') self.lock_api.db.resource_lock_get.assert_called_once_with( self.ctxt, 'd767d3cd-1187-404a-a91f-8b172e0e768e' ) policy.check_policy.assert_called_once_with( self.ctxt, 'resource_lock', 'delete', lock) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315601.977671 manila-21.0.0/manila/tests/message/0000775000175000017500000000000000000000000017105 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/message/__init__.py0000664000175000017500000000000000000000000021204 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/message/test_api.py0000664000175000017500000001003500000000000021266 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime from unittest import mock from oslo_config import cfg from oslo_utils import timeutils from manila import context from manila.message import api as message_api from manila.message.message_field import Action as MsgAction from manila.message.message_field import Detail as MsgDetail from manila.message import message_levels from manila import test CONF = cfg.CONF class MessageApiTest(test.TestCase): def setUp(self): super(MessageApiTest, self).setUp() self.message_api = message_api.API() self.mock_object(self.message_api, 'db') self.ctxt = context.RequestContext('admin', 'fakeproject', True) self.ctxt.request_id = 'fakerequestid' @mock.patch.object(timeutils, 'utcnow') def test_create(self, mock_utcnow): CONF.set_override('message_ttl', 300) now = datetime.datetime.now(datetime.timezone.utc).replace(tzinfo=None) mock_utcnow.return_value = now expected_expires_at = now + datetime.timedelta( seconds=300) expected_message_record = { 'project_id': 'fakeproject', 'request_id': 'fakerequestid', 'resource_type': 'fake_resource_type', 'resource_id': None, 'action_id': MsgAction.ALLOCATE_HOST[0], 'detail_id': MsgDetail.NO_VALID_HOST[0], 'message_level': message_levels.ERROR, 'expires_at': expected_expires_at, } self.message_api.create(self.ctxt, MsgAction.ALLOCATE_HOST, "fakeproject", detail=MsgDetail.NO_VALID_HOST, resource_type="fake_resource_type") self.message_api.db.message_create.assert_called_once_with( self.ctxt, expected_message_record) def test_create_swallows_exception(self): self.mock_object(self.message_api.db, 'message_create', mock.Mock(side_effect=Exception())) exception_log = self.mock_object(message_api.LOG, 'exception') self.message_api.create(self.ctxt, MsgAction.ALLOCATE_HOST, 'fakeproject', 'fake_resource') self.message_api.db.message_create.assert_called_once_with( self.ctxt, mock.ANY) exception_log.assert_called_once_with( 'Failed to create message record for request_id %s', self.ctxt.request_id) def test_get(self): self.message_api.get(self.ctxt, 'fake_id') self.message_api.db.message_get.assert_called_once_with(self.ctxt, 'fake_id') def test_get_all(self): self.message_api.get_all(self.ctxt) self.message_api.db.message_get_all.assert_called_once_with( self.ctxt, filters={}, limit=None, offset=None, sort_dir=None, sort_key=None) def test_delete(self): self.message_api.delete(self.ctxt, 'fake_id') self.message_api.db.message_destroy.assert_called_once_with( self.ctxt, 'fake_id') def test_cleanup_expired_messages(self): admin_context = mock.Mock() self.mock_object(self.ctxt, 'elevated', mock.Mock(return_value=admin_context)) self.message_api.cleanup_expired_messages(self.ctxt) self.message_api.db.cleanup_expired_messages.assert_called_once_with( admin_context) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/message/test_message_field.py0000664000175000017500000000537400000000000023316 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ddt from oslo_config import cfg from manila import exception from manila.message import message_field from manila import test CONF = cfg.CONF @ddt.ddt class MessageFieldTest(test.TestCase): @ddt.data(message_field.Action, message_field.Detail) def test_unique_ids(self, cls): """Assert that no action or detail id is duplicated.""" ids = [name[0] for name in cls.ALL] self.assertEqual(len(ids), len(set(ids))) @ddt.data({'id': '001', 'content': 'allocate host'}, {'id': 'invalid', 'content': None}) @ddt.unpack def test_translate_action(self, id, content): result = message_field.translate_action(id) if content is None: content = 'unknown action' self.assertEqual(content, result) @ddt.data({'id': '001', 'content': 'An unknown error occurred.'}, {'id': '002', 'content': 'No storage could be allocated for this share ' 'request. Trying again with a different size or ' 'share type may succeed.'}, {'id': 'invalid', 'content': None}) @ddt.unpack def test_translate_detail(self, id, content): result = message_field.translate_detail(id) if content is None: content = 'An unknown error occurred.' self.assertEqual(content, result) @ddt.data({'exception': exception.NoValidHost(reason='fake reason'), 'detail': '', 'expected': '002'}, {'exception': exception.NoValidHost( detail_data={'last_filter': 'CapacityFilter'}, reason='fake reason'), 'detail': '', 'expected': '009'}, {'exception': exception.NoValidHost( detail_data={'last_filter': 'FakeFilter'}, reason='fake reason'), 'detail': '', 'expected': '002'}, {'exception': None, 'detail': message_field.Detail.NO_VALID_HOST, 'expected': '002'}) @ddt.unpack def test_translate_detail_id(self, exception, detail, expected): result = message_field.translate_detail_id(exception, detail) self.assertEqual(expected, result) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315601.977671 manila-21.0.0/manila/tests/monkey_patch_example/0000775000175000017500000000000000000000000021655 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/monkey_patch_example/__init__.py0000664000175000017500000000212200000000000023763 0ustar00zuulzuul00000000000000# Copyright 2011 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Example Module for testing utils.monkey_patch().""" CALLED_FUNCTION = [] def example_decorator(name, function): """decorator for notify which is used from utils.monkey_patch(). :param name: name of the function :param function: - object of the function :returns: function -- decorated function """ def wrapped_func(*args, **kwarg): CALLED_FUNCTION.append(name) return function(*args, **kwarg) return wrapped_func ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/monkey_patch_example/example_a.py0000664000175000017500000000161700000000000024167 0ustar00zuulzuul00000000000000# Copyright 2011 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Example Module A for testing utils.monkey_patch().""" def example_function_a(): return 'Example function' class ExampleClassA(object): def example_method(self): return 'Example method' def example_method_add(self, arg1, arg2): return arg1 + arg2 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/monkey_patch_example/example_b.py0000664000175000017500000000162000000000000024162 0ustar00zuulzuul00000000000000# Copyright 2011 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Example Module B for testing utils.monkey_patch().""" def example_function_b(): return 'Example function' class ExampleClassB(object): def example_method(self): return 'Example method' def example_method_add(self, arg1, arg2): return arg1 + arg2 ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315601.977671 manila-21.0.0/manila/tests/network/0000775000175000017500000000000000000000000017152 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/network/__init__.py0000664000175000017500000000000000000000000021251 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315601.9816709 manila-21.0.0/manila/tests/network/linux/0000775000175000017500000000000000000000000020311 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/network/linux/__init__.py0000664000175000017500000000000000000000000022410 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/network/linux/test_interface.py0000664000175000017500000002505600000000000023672 0ustar00zuulzuul00000000000000# Copyright 2014 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from manila.network.linux import interface from manila.network.linux import ip_lib from manila import test from manila.tests import conf_fixture from manila.tests import fake_network from manila import utils class BaseChild(interface.LinuxInterfaceDriver): def plug(self, *args): pass def unplug(self, *args): pass FakeSubnet = { 'cidr': '192.168.1.1/24', } FakeAllocation = { 'subnet': FakeSubnet, 'ip_address': '192.168.1.2', 'ip_version': 4, } FakePort = { 'id': 'abcdef01-1234-5678-90ab-ba0987654321', 'fixed_ips': [FakeAllocation], 'device_id': 'cccccccc-cccc-cccc-cccc-cccccccccccc', } class TestBase(test.TestCase): def setUp(self): super(TestBase, self).setUp() self.conf = conf_fixture.CONF self.conf.register_opts(interface.OPTS) self.ip_dev_p = mock.patch.object(ip_lib, 'IPDevice') self.ip_dev = self.ip_dev_p.start() self.ip_p = mock.patch.object(ip_lib, 'IPWrapper') self.ip = self.ip_p.start() self.device_exists_p = mock.patch.object(ip_lib, 'device_exists') self.device_exists = self.device_exists_p.start() self.addCleanup(self.ip_dev_p.stop) self.addCleanup(self.ip_p.stop) self.addCleanup(self.device_exists_p.stop) class TestABCDriver(TestBase): def test_verify_abs_class_has_abs_methods(self): class ICanNotBeInstancetiated(interface.LinuxInterfaceDriver): pass try: # pylint: disable=abstract-class-instantiated ICanNotBeInstancetiated() except TypeError: pass except Exception as e: self.fail("Unexpected exception thrown: '%s'" % e) else: self.fail("ExpectedException 'TypeError' not thrown.") def test_get_device_name(self): bc = BaseChild() device_name = bc.get_device_name(FakePort) self.assertEqual('tapabcdef01-12', device_name) def test_l3_init(self): addresses = [dict(ip_version=4, scope='global', dynamic=False, cidr='172.16.77.240/24')] self.ip_dev().addr.list = mock.Mock(return_value=addresses) bc = BaseChild() self.mock_object(bc, '_remove_outdated_interfaces') ns = '12345678-1234-5678-90ab-ba0987654321' bc.init_l3('tap0', ['192.168.1.2/24'], namespace=ns, clear_cidrs=['192.168.0.0/16']) self.ip_dev.assert_has_calls( [mock.call('tap0', namespace=ns), mock.call().route.clear_outdated_routes('192.168.0.0/16'), mock.call().addr.list(scope='global', filters=['permanent']), mock.call().addr.add(4, '192.168.1.2/24', '192.168.1.255'), mock.call().addr.delete(4, '172.16.77.240/24'), mock.call().route.pullup_route('tap0')]) bc._remove_outdated_interfaces.assert_called_with(self.ip_dev()) def test__remove_outdated_interfaces(self): device = fake_network.FakeDevice( 'foobarquuz', [dict(ip_version=4, cidr='1.0.0.0/27')]) devices = [fake_network.FakeDevice('foobar')] self.ip().get_devices = mock.Mock(return_value=devices) bc = BaseChild() self.mock_object(bc, 'unplug') bc._remove_outdated_interfaces(device) bc.unplug.assert_called_once_with('foobar') def test__get_set_of_device_cidrs(self): device = fake_network.FakeDevice('foo') expected = set(('1.0.0.0/27', '2.0.0.0/27')) bc = BaseChild() result = bc._get_set_of_device_cidrs(device) self.assertEqual(expected, result) def test__get_set_of_device_cidrs_exception(self): device = fake_network.FakeDevice('foo') self.mock_object(device.addr, 'list', mock.Mock( side_effect=Exception('foo does not exist'))) bc = BaseChild() result = bc._get_set_of_device_cidrs(device) self.assertEqual(set(), result) class TestNoopInterfaceDriver(TestBase): def test_init_l3(self): self.ip.assert_not_called() self.ip_dev.assert_not_called() def test_plug(self): self.ip.assert_not_called() self.ip_dev.assert_not_called() def test_unplug(self): self.ip.assert_not_called() self.ip_dev.assert_not_called() class TestOVSInterfaceDriver(TestBase): def test_get_device_name(self): br = interface.OVSInterfaceDriver() device_name = br.get_device_name(FakePort) self.assertEqual('tapabcdef01-12', device_name) def test_plug_no_ns(self): self._test_plug() def test_plug_with_ns(self): self._test_plug(namespace='01234567-1234-1234-99') def test_plug_alt_bridge(self): self._test_plug(bridge='br-foo') def _test_plug(self, additional_expectation=None, bridge=None, namespace=None): if additional_expectation is None: additional_expectation = [] if not bridge: bridge = 'br-int' def device_exists(dev, namespace=None): return dev == bridge vsctl_cmd = ['ovs-vsctl', '--', '--may-exist', 'add-port', bridge, 'tap0', '--', 'set', 'Interface', 'tap0', 'type=internal', '--', 'set', 'Interface', 'tap0', 'external-ids:iface-id=port-1234', '--', 'set', 'Interface', 'tap0', 'external-ids:iface-status=active', '--', 'set', 'Interface', 'tap0', 'external-ids:attached-mac=aa:bb:cc:dd:ee:ff'] with mock.patch.object(utils, 'execute') as execute: ovs = interface.OVSInterfaceDriver() self.device_exists.side_effect = device_exists ovs.plug('tap0', 'port-1234', 'aa:bb:cc:dd:ee:ff', bridge=bridge, namespace=namespace) execute.assert_called_once_with(*vsctl_cmd, run_as_root=True) expected = [mock.call(), mock.call().device('tap0'), mock.call().device().link.set_address('aa:bb:cc:dd:ee:ff')] expected.extend(additional_expectation) if namespace: expected.extend( [mock.call().ensure_namespace(namespace), mock.call().ensure_namespace().add_device_to_namespace( mock.ANY)]) expected.extend([mock.call().device().link.set_up()]) self.ip.assert_has_calls(expected) def test_plug_reset_mac(self): fake_mac_addr = 'aa:bb:cc:dd:ee:ff' self.device_exists.return_value = True self.ip().device().link.address = mock.Mock(return_value=fake_mac_addr) ovs = interface.OVSInterfaceDriver() ovs.plug('tap0', 'port-1234', 'ff:ee:dd:cc:bb:aa', bridge='br-int') expected = [mock.call(), mock.call().device('tap0'), mock.call().device().link.set_address('ff:ee:dd:cc:bb:aa'), mock.call().device().link.set_up()] self.ip.assert_has_calls(expected) def test_unplug(self, bridge=None): if not bridge: bridge = 'br-int' with mock.patch('manila.network.linux.ovs_lib.OVSBridge') as ovs_br: ovs = interface.OVSInterfaceDriver() ovs.unplug('tap0') ovs_br.assert_has_calls([mock.call(bridge), mock.call().delete_port('tap0')]) class TestBridgeInterfaceDriver(TestBase): def test_get_device_name(self): br = interface.BridgeInterfaceDriver() device_name = br.get_device_name(FakePort) self.assertEqual('ns-abcdef01-12', device_name) def test_plug_no_ns(self): self._test_plug() def test_plug_with_ns(self): self._test_plug(namespace='01234567-1234-1234-99') def _test_plug(self, namespace=None, mtu=None): def device_exists(device, root_helper=None, namespace=None): return device.startswith('brq') root_veth = mock.Mock() ns_veth = mock.Mock() self.ip().add_veth = mock.Mock(return_value=(root_veth, ns_veth)) self.device_exists.side_effect = device_exists br = interface.BridgeInterfaceDriver() mac_address = 'aa:bb:cc:dd:ee:ff' br.plug('ns-0', 'port-1234', mac_address, namespace=namespace) ip_calls = [mock.call(), mock.call().add_veth('tap0', 'ns-0', namespace2=namespace)] ns_veth.assert_has_calls([mock.call.link.set_address(mac_address)]) self.ip.assert_has_calls(ip_calls) root_veth.assert_has_calls([mock.call.link.set_up()]) ns_veth.assert_has_calls([mock.call.link.set_up()]) def test_plug_dev_exists(self): self.device_exists.return_value = True with mock.patch('manila.network.linux.interface.LOG.warning') as log: br = interface.BridgeInterfaceDriver() br.plug('port-1234', 'tap0', 'aa:bb:cc:dd:ee:ff') self.ip_dev.assert_has_calls([]) self.assertEqual(1, log.call_count) def test_unplug_no_device(self): self.device_exists.return_value = False self.ip_dev().link.delete.side_effect = RuntimeError with mock.patch('manila.network.linux.interface.LOG') as log: br = interface.BridgeInterfaceDriver() br.unplug('tap0') [mock.call(), mock.call('tap0'), mock.call().link.delete()] self.assertEqual(1, log.error.call_count) def test_unplug(self): self.device_exists.return_value = True with mock.patch('manila.network.linux.interface.LOG.debug') as log: br = interface.BridgeInterfaceDriver() br.unplug('tap0') self.assertTrue(log.called) self.ip_dev.assert_has_calls([mock.call('tap0', None), mock.call().link.delete()]) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/network/linux/test_ip_lib.py0000664000175000017500000007172000000000000023167 0ustar00zuulzuul00000000000000# Copyright 2014 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from manila.network.linux import ip_lib from manila import test NETNS_SAMPLE = [ '12345678-1234-5678-abcd-1234567890ab', 'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb', 'cccccccc-cccc-cccc-cccc-cccccccccccc'] LINK_SAMPLE = [ '1: lo: mtu 16436 qdisc noqueue state UNKNOWN \\' 'link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00', '2: eth0: mtu 1500 qdisc mq state UP ' 'qlen 1000\\ link/ether cc:dd:ee:ff:ab:cd brd ff:ff:ff:ff:ff:ff' '\\ alias openvswitch', '3: br-int: mtu 1500 qdisc noop state DOWN ' '\\ link/ether aa:bb:cc:dd:ee:ff brd ff:ff:ff:ff:ff:ff', '4: gw-ddc717df-49: mtu 1500 qdisc noop ' 'state DOWN \\ link/ether fe:dc:ba:fe:dc:ba brd ff:ff:ff:ff:ff:ff', '5: eth0.50@eth0: mtu 1500 qdisc ' ' noqueue master brq0b24798c-07 state UP mode DEFAULT' '\\ link/ether ab:04:49:b6:ab:a0 brd ff:ff:ff:ff:ff:ff'] ADDR_SAMPLE = (""" 2: eth0: mtu 1500 qdisc mq state UP qlen 1000 link/ether dd:cc:aa:b9:76:ce brd ff:ff:ff:ff:ff:ff inet 172.16.77.240/24 brd 172.16.77.255 scope global eth0 inet6 2001:470:9:1224:5595:dd51:6ba2:e788/64 scope global temporary dynamic valid_lft 14187sec preferred_lft 3387sec inet6 2001:470:9:1224:fd91:272:581e:3a32/64 scope global temporary """ """deprecated dynamic valid_lft 14187sec preferred_lft 0sec inet6 2001:470:9:1224:4508:b885:5fb:740b/64 scope global temporary """ """deprecated dynamic valid_lft 14187sec preferred_lft 0sec inet6 2001:470:9:1224:dfcc:aaff:feb9:76ce/64 scope global dynamic valid_lft 14187sec preferred_lft 3387sec inet6 fe80::dfcc:aaff:feb9:76ce/64 scope link valid_lft forever preferred_lft forever """) ADDR_SAMPLE2 = (""" 2: eth0: mtu 1500 qdisc mq state UP qlen 1000 link/ether dd:cc:aa:b9:76:ce brd ff:ff:ff:ff:ff:ff inet 172.16.77.240/24 scope global eth0 inet6 2001:470:9:1224:5595:dd51:6ba2:e788/64 scope global temporary dynamic valid_lft 14187sec preferred_lft 3387sec inet6 2001:470:9:1224:fd91:272:581e:3a32/64 scope global temporary """ """deprecated dynamic valid_lft 14187sec preferred_lft 0sec inet6 2001:470:9:1224:4508:b885:5fb:740b/64 scope global temporary """ """deprecated dynamic valid_lft 14187sec preferred_lft 0sec inet6 2001:470:9:1224:dfcc:aaff:feb9:76ce/64 scope global dynamic valid_lft 14187sec preferred_lft 3387sec inet6 fe80::dfcc:aaff:feb9:76ce/64 scope link valid_lft forever preferred_lft forever """) GATEWAY_SAMPLE1 = (""" default via 10.35.19.254 metric 100 10.35.16.0/22 proto kernel scope link src 10.35.17.97 """) GATEWAY_SAMPLE2 = (""" default via 10.35.19.254 metric 100 """) GATEWAY_SAMPLE3 = (""" 10.35.16.0/22 proto kernel scope link src 10.35.17.97 """) GATEWAY_SAMPLE4 = (""" default via 10.35.19.254 """) GATEWAY_SAMPLE5 = (""" default via 172.24.47.1 dev eth0 10.0.0.0/24 dev tapc226b810-a0 proto kernel scope link src 10.0.0.3 10.254.0.0/28 dev tap6de90453-1c proto kernel scope link src 10.254.0.4 10.35.16.0/22 proto kernel scope link src 10.35.17.97 172.24.4.0/24 via 10.35.19.254 metric 100 """) DEVICE_ROUTE_SAMPLE = ("10.0.0.0/24 scope link src 10.0.0.2") SUBNET_SAMPLE1 = ("10.0.0.0/24 dev qr-23380d11-d2 scope link src 10.0.0.1\n" "10.0.0.0/24 dev tap1d7888a7-10 scope link src 10.0.0.2") SUBNET_SAMPLE2 = ("10.0.0.0/24 dev tap1d7888a7-10 scope link src 10.0.0.2\n" "10.0.0.0/24 dev qr-23380d11-d2 scope link src 10.0.0.1") class TestSubProcessBase(test.TestCase): def setUp(self): super(TestSubProcessBase, self).setUp() self.execute_p = mock.patch('manila.utils.execute') self.execute = self.execute_p.start() def tearDown(self): self.execute_p.stop() super(TestSubProcessBase, self).tearDown() def test_execute_wrapper(self): ip_lib.SubProcessBase._execute('o', 'link', ('list',)) self.execute.assert_called_once_with('ip', '-o', 'link', 'list', run_as_root=False) def test_execute_wrapper_int_options(self): ip_lib.SubProcessBase._execute([4], 'link', ('list',)) self.execute.assert_called_once_with('ip', '-4', 'link', 'list', run_as_root=False) def test_execute_wrapper_no_options(self): ip_lib.SubProcessBase._execute([], 'link', ('list',)) self.execute.assert_called_once_with('ip', 'link', 'list', run_as_root=False) def test_run_no_namespace(self): base = ip_lib.SubProcessBase() base._run([], 'link', ('list',)) self.execute.assert_called_once_with('ip', 'link', 'list', run_as_root=False) def test_run_namespace(self): base = ip_lib.SubProcessBase('ns') base._run([], 'link', ('list',)) self.execute.assert_called_once_with('ip', 'netns', 'exec', 'ns', 'ip', 'link', 'list', run_as_root=True) def test_as_root_namespace(self): base = ip_lib.SubProcessBase('ns') base._as_root([], 'link', ('list',)) self.execute.assert_called_once_with('ip', 'netns', 'exec', 'ns', 'ip', 'link', 'list', run_as_root=True) class TestIpWrapper(test.TestCase): def setUp(self): super(TestIpWrapper, self).setUp() self.execute_p = mock.patch.object(ip_lib.IPWrapper, '_execute') self.execute = self.execute_p.start() def tearDown(self): self.execute_p.stop() super(TestIpWrapper, self).tearDown() def test_get_devices(self): self.execute.return_value = '\n'.join(LINK_SAMPLE) retval = ip_lib.IPWrapper().get_devices() self.assertEqual([ip_lib.IPDevice('lo'), ip_lib.IPDevice('eth0'), ip_lib.IPDevice('br-int'), ip_lib.IPDevice('gw-ddc717df-49'), ip_lib.IPDevice('eth0.50')], retval) self.execute.assert_called_once_with('o', 'link', ('list',), None) def test_get_devices_malformed_line(self): self.execute.return_value = '\n'.join(LINK_SAMPLE + ['gibberish']) retval = ip_lib.IPWrapper().get_devices() self.assertEqual([ip_lib.IPDevice('lo'), ip_lib.IPDevice('eth0'), ip_lib.IPDevice('br-int'), ip_lib.IPDevice('gw-ddc717df-49'), ip_lib.IPDevice('eth0.50')], retval) self.execute.assert_called_once_with('o', 'link', ('list',), None) def test_get_namespaces(self): self.execute.return_value = '\n'.join(NETNS_SAMPLE) retval = ip_lib.IPWrapper.get_namespaces() self.assertEqual(['12345678-1234-5678-abcd-1234567890ab', 'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb', 'cccccccc-cccc-cccc-cccc-cccccccccccc'], retval) self.execute.assert_called_once_with('', 'netns', ('list',)) def test_add_tuntap(self): ip_lib.IPWrapper().add_tuntap('tap0') self.execute.assert_called_once_with('', 'tuntap', ('add', 'tap0', 'mode', 'tap'), None, as_root=True) def test_add_veth(self): ip_lib.IPWrapper().add_veth('tap0', 'tap1') self.execute.assert_called_once_with('', 'link', ('add', 'tap0', 'type', 'veth', 'peer', 'name', 'tap1'), None, as_root=True) def test_add_veth_with_namespaces(self): ns2 = 'ns2' with mock.patch.object(ip_lib.IPWrapper, 'ensure_namespace') as en: ip_lib.IPWrapper().add_veth('tap0', 'tap1', namespace2=ns2) en.assert_has_calls([mock.call(ns2)]) self.execute.assert_called_once_with('', 'link', ('add', 'tap0', 'type', 'veth', 'peer', 'name', 'tap1', 'netns', ns2), None, as_root=True) def test_get_device(self): dev = ip_lib.IPWrapper('ns').device('eth0') self.assertEqual('ns', dev.namespace) self.assertEqual('eth0', dev.name) def test_ensure_namespace(self): with mock.patch.object(ip_lib, 'IPDevice') as ip_dev: ip = ip_lib.IPWrapper() with mock.patch.object(ip.netns, 'exists') as ns_exists: ns_exists.return_value = False ip.ensure_namespace('ns') self.execute.assert_has_calls( [mock.call([], 'netns', ('add', 'ns'), None, as_root=True)]) ip_dev.assert_has_calls([mock.call('lo', 'ns'), mock.call().link.set_up()]) def test_ensure_namespace_existing(self): with mock.patch.object(ip_lib, 'IpNetnsCommand') as ip_ns_cmd: ip_ns_cmd.exists.return_value = True ns = ip_lib.IPWrapper().ensure_namespace('ns') self.assertFalse(self.execute.called) self.assertEqual('ns', ns.namespace) def test_namespace_is_empty_no_devices(self): ip = ip_lib.IPWrapper('ns') with mock.patch.object(ip, 'get_devices') as get_devices: get_devices.return_value = [] self.assertTrue(ip.namespace_is_empty()) get_devices.assert_called_once_with(exclude_loopback=True) def test_namespace_is_empty(self): ip = ip_lib.IPWrapper('ns') with mock.patch.object(ip, 'get_devices') as get_devices: get_devices.return_value = [mock.Mock()] self.assertFalse(ip.namespace_is_empty()) get_devices.assert_called_once_with(exclude_loopback=True) def test_garbage_collect_namespace_does_not_exist(self): with mock.patch.object(ip_lib, 'IpNetnsCommand') as ip_ns_cmd_cls: ip_ns_cmd_cls.return_value.exists.return_value = False ip = ip_lib.IPWrapper('ns') with mock.patch.object(ip, 'namespace_is_empty') as mock_is_empty: self.assertFalse(ip.garbage_collect_namespace()) ip_ns_cmd_cls.assert_has_calls([mock.call().exists('ns')]) self.assertNotIn(mock.call().delete('ns'), ip_ns_cmd_cls.return_value.mock_calls) self.assertEqual([], mock_is_empty.mock_calls) def test_garbage_collect_namespace_existing_empty_ns(self): with mock.patch.object(ip_lib, 'IpNetnsCommand') as ip_ns_cmd_cls: ip_ns_cmd_cls.return_value.exists.return_value = True ip = ip_lib.IPWrapper('ns') with mock.patch.object(ip, 'namespace_is_empty') as mock_is_empty: mock_is_empty.return_value = True self.assertTrue(ip.garbage_collect_namespace()) mock_is_empty.assert_called_once_with() expected = [mock.call().exists('ns'), mock.call().delete('ns')] ip_ns_cmd_cls.assert_has_calls(expected) def test_garbage_collect_namespace_existing_not_empty(self): lo_device = mock.Mock() lo_device.name = 'lo' tap_device = mock.Mock() tap_device.name = 'tap1' with mock.patch.object(ip_lib, 'IpNetnsCommand') as ip_ns_cmd_cls: ip_ns_cmd_cls.return_value.exists.return_value = True ip = ip_lib.IPWrapper('ns') with mock.patch.object(ip, 'namespace_is_empty') as mock_is_empty: mock_is_empty.return_value = False self.assertFalse(ip.garbage_collect_namespace()) mock_is_empty.assert_called_once_with() expected = [mock.call(ip), mock.call().exists('ns')] self.assertEqual(expected, ip_ns_cmd_cls.mock_calls) self.assertNotIn(mock.call().delete('ns'), ip_ns_cmd_cls.mock_calls) def test_add_device_to_namespace(self): dev = mock.Mock() ip_lib.IPWrapper('ns').add_device_to_namespace(dev) dev.assert_has_calls([mock.call.link.set_netns('ns')]) def test_add_device_to_namespace_is_none(self): dev = mock.Mock() ip_lib.IPWrapper().add_device_to_namespace(dev) self.assertEqual([], dev.mock_calls) class TestIPDevice(test.TestCase): def test_eq_same_name(self): dev1 = ip_lib.IPDevice('tap0') dev2 = ip_lib.IPDevice('tap0') self.assertEqual(dev1, dev2) def test_eq_diff_name(self): dev1 = ip_lib.IPDevice('tap0') dev2 = ip_lib.IPDevice('tap1') self.assertNotEqual(dev1, dev2) def test_eq_same_namespace(self): dev1 = ip_lib.IPDevice('tap0', 'ns1') dev2 = ip_lib.IPDevice('tap0', 'ns1') self.assertEqual(dev1, dev2) def test_eq_diff_namespace(self): dev1 = ip_lib.IPDevice('tap0', 'ns1') dev2 = ip_lib.IPDevice('tap0', 'ns2') self.assertNotEqual(dev1, dev2) def test_eq_other_is_none(self): dev1 = ip_lib.IPDevice('tap0', 'ns1') self.assertIsNotNone(dev1) def test_str(self): self.assertEqual('tap0', str(ip_lib.IPDevice('tap0'))) class TestIPCommandBase(test.TestCase): def setUp(self): super(TestIPCommandBase, self).setUp() self.ip = mock.Mock() self.ip.namespace = 'namespace' self.ip_cmd = ip_lib.IpCommandBase(self.ip) self.ip_cmd.COMMAND = 'foo' def test_run(self): self.ip_cmd._run('link', 'show') self.ip.assert_has_calls([mock.call._run([], 'foo', ('link', 'show'))]) def test_run_with_options(self): self.ip_cmd._run('link', options='o') self.ip.assert_has_calls([mock.call._run('o', 'foo', ('link', ))]) def test_as_root(self): self.ip_cmd._as_root('link') self.ip.assert_has_calls( [mock.call._as_root([], 'foo', ('link', ), False)]) def test_as_root_with_options(self): self.ip_cmd._as_root('link', options='o') self.ip.assert_has_calls( [mock.call._as_root('o', 'foo', ('link', ), False)]) class TestIPDeviceCommandBase(test.TestCase): def setUp(self): super(TestIPDeviceCommandBase, self).setUp() self.ip_dev = mock.Mock() self.ip_dev.name = 'eth0' self.ip_dev._execute = mock.Mock(return_value='executed') self.ip_cmd = ip_lib.IpDeviceCommandBase(self.ip_dev) self.ip_cmd.COMMAND = 'foo' def test_name_property(self): self.assertEqual('eth0', self.ip_cmd.name) class TestIPCmdBase(test.TestCase): def setUp(self): super(TestIPCmdBase, self).setUp() self.parent = mock.Mock() self.parent.name = 'eth0' def _assert_call(self, options, args): self.parent.assert_has_calls([ mock.call._run(options, self.command, args)]) def _assert_sudo(self, options, args, force_root_namespace=False): self.parent.assert_has_calls( [mock.call._as_root(options, self.command, args, force_root_namespace)]) class TestIpLinkCommand(TestIPCmdBase): def setUp(self): super(TestIpLinkCommand, self).setUp() self.parent._run.return_value = LINK_SAMPLE[1] self.command = 'link' self.link_cmd = ip_lib.IpLinkCommand(self.parent) def test_set_address(self): self.link_cmd.set_address('aa:bb:cc:dd:ee:ff') self._assert_sudo([], ('set', 'eth0', 'address', 'aa:bb:cc:dd:ee:ff')) def test_set_mtu(self): self.link_cmd.set_mtu(1500) self._assert_sudo([], ('set', 'eth0', 'mtu', 1500)) def test_set_up(self): self.link_cmd.set_up() self._assert_sudo([], ('set', 'eth0', 'up')) def test_set_down(self): self.link_cmd.set_down() self._assert_sudo([], ('set', 'eth0', 'down')) def test_set_netns(self): self.link_cmd.set_netns('foo') self._assert_sudo([], ('set', 'eth0', 'netns', 'foo')) self.assertEqual('foo', self.parent.namespace) def test_set_name(self): self.link_cmd.set_name('tap1') self._assert_sudo([], ('set', 'eth0', 'name', 'tap1')) self.assertEqual('tap1', self.parent.name) def test_set_alias(self): self.link_cmd.set_alias('openvswitch') self._assert_sudo([], ('set', 'eth0', 'alias', 'openvswitch')) def test_delete(self): self.link_cmd.delete() self._assert_sudo([], ('delete', 'eth0')) def test_address_property(self): self.parent._execute = mock.Mock(return_value=LINK_SAMPLE[1]) self.assertEqual('cc:dd:ee:ff:ab:cd', self.link_cmd.address) def test_mtu_property(self): self.parent._execute = mock.Mock(return_value=LINK_SAMPLE[1]) self.assertEqual(1500, self.link_cmd.mtu) def test_qdisc_property(self): self.parent._execute = mock.Mock(return_value=LINK_SAMPLE[1]) self.assertEqual('mq', self.link_cmd.qdisc) def test_qlen_property(self): self.parent._execute = mock.Mock(return_value=LINK_SAMPLE[1]) self.assertEqual(1000, self.link_cmd.qlen) def test_alias_property(self): self.parent._execute = mock.Mock(return_value=LINK_SAMPLE[1]) self.assertEqual('openvswitch', self.link_cmd.alias) def test_state_property(self): self.parent._execute = mock.Mock(return_value=LINK_SAMPLE[1]) self.assertEqual('UP', self.link_cmd.state) def test_settings_property(self): expected = {'mtu': 1500, 'qlen': 1000, 'state': 'UP', 'qdisc': 'mq', 'brd': 'ff:ff:ff:ff:ff:ff', 'link/ether': 'cc:dd:ee:ff:ab:cd', 'alias': 'openvswitch'} self.parent._execute = mock.Mock(return_value=LINK_SAMPLE[1]) self.assertEqual(expected, self.link_cmd.attributes) self._assert_call('o', ('show', 'eth0')) class TestIpAddrCommand(TestIPCmdBase): def setUp(self): super(TestIpAddrCommand, self).setUp() self.parent.name = 'tap0' self.command = 'addr' self.addr_cmd = ip_lib.IpAddrCommand(self.parent) def test_add_address(self): self.addr_cmd.add(4, '192.168.45.100/24', '192.168.45.255') self._assert_sudo([4], ('add', '192.168.45.100/24', 'brd', '192.168.45.255', 'scope', 'global', 'dev', 'tap0')) def test_add_address_scoped(self): self.addr_cmd.add(4, '192.168.45.100/24', '192.168.45.255', scope='link') self._assert_sudo([4], ('add', '192.168.45.100/24', 'brd', '192.168.45.255', 'scope', 'link', 'dev', 'tap0')) def test_del_address(self): self.addr_cmd.delete(4, '192.168.45.100/24') self._assert_sudo([4], ('del', '192.168.45.100/24', 'dev', 'tap0')) def test_flush(self): self.addr_cmd.flush() self._assert_sudo([], ('flush', 'tap0')) def test_list(self): expected = [ dict(ip_version=4, scope='global', dynamic=False, cidr='172.16.77.240/24', broadcast='172.16.77.255'), dict(ip_version=6, scope='global', dynamic=True, cidr='2001:470:9:1224:5595:dd51:6ba2:e788/64', broadcast='::'), dict(ip_version=6, scope='global', dynamic=True, cidr='2001:470:9:1224:fd91:272:581e:3a32/64', broadcast='::'), dict(ip_version=6, scope='global', dynamic=True, cidr='2001:470:9:1224:4508:b885:5fb:740b/64', broadcast='::'), dict(ip_version=6, scope='global', dynamic=True, cidr='2001:470:9:1224:dfcc:aaff:feb9:76ce/64', broadcast='::'), dict(ip_version=6, scope='link', dynamic=False, cidr='fe80::dfcc:aaff:feb9:76ce/64', broadcast='::')] test_cases = [ADDR_SAMPLE, ADDR_SAMPLE2] for test_case in test_cases: self.parent._run = mock.Mock(return_value=test_case) self.assertEqual(expected, self.addr_cmd.list()) self._assert_call([], ('show', 'tap0')) def test_list_filtered(self): expected = [ dict(ip_version=4, scope='global', dynamic=False, cidr='172.16.77.240/24', broadcast='172.16.77.255')] test_cases = [ADDR_SAMPLE, ADDR_SAMPLE2] for test_case in test_cases: output = '\n'.join(test_case.split('\n')[0:4]) self.parent._run.return_value = output self.assertEqual(expected, self.addr_cmd.list('global', filters=['permanent'])) self._assert_call([], ('show', 'tap0', 'permanent', 'scope', 'global')) class TestIpRouteCommand(TestIPCmdBase): def setUp(self): super(TestIpRouteCommand, self).setUp() self.parent.name = 'eth0' self.command = 'route' self.route_cmd = ip_lib.IpRouteCommand(self.parent) def test_add_gateway(self): gateway = '192.168.45.100' metric = 100 self.route_cmd.add_gateway(gateway, metric) self._assert_sudo([], ('replace', 'default', 'via', gateway, 'metric', metric, 'dev', self.parent.name)) def test_del_gateway(self): gateway = '192.168.45.100' self.route_cmd.delete_gateway(gateway) self._assert_sudo([], ('del', 'default', 'via', gateway, 'dev', self.parent.name)) def test_get_gateway(self): test_cases = [{'sample': GATEWAY_SAMPLE1, 'expected': {'gateway': '10.35.19.254', 'metric': 100}}, {'sample': GATEWAY_SAMPLE2, 'expected': {'gateway': '10.35.19.254', 'metric': 100}}, {'sample': GATEWAY_SAMPLE3, 'expected': None}, {'sample': GATEWAY_SAMPLE4, 'expected': {'gateway': '10.35.19.254'}}] for test_case in test_cases: self.parent._run = mock.Mock(return_value=test_case['sample']) self.assertEqual(test_case['expected'], self.route_cmd.get_gateway()) def test_pullup_route(self): # interface is not the first in the list - requires # deleting and creating existing entries output = [DEVICE_ROUTE_SAMPLE, SUBNET_SAMPLE1] def pullup_side_effect(self, *args): result = output.pop(0) return result self.parent._run = mock.Mock(side_effect=pullup_side_effect) self.route_cmd.pullup_route('tap1d7888a7-10') self._assert_sudo([], ('del', '10.0.0.0/24', 'dev', 'qr-23380d11-d2')) self._assert_sudo([], ('append', '10.0.0.0/24', 'proto', 'kernel', 'src', '10.0.0.1', 'dev', 'qr-23380d11-d2')) def test_pullup_route_first(self): # interface is first in the list - no changes output = [DEVICE_ROUTE_SAMPLE, SUBNET_SAMPLE2] def pullup_side_effect(self, *args): result = output.pop(0) return result self.parent._run = mock.Mock(side_effect=pullup_side_effect) self.route_cmd.pullup_route('tap1d7888a7-10') # Check two calls - device get and subnet get self.assertEqual(2, len(self.parent._run.mock_calls)) def test_list(self): self.route_cmd._as_root = mock.Mock(return_value=GATEWAY_SAMPLE5) expected = [{'Destination': 'default', 'Device': 'eth0', 'Gateway': '172.24.47.1'}, {'Destination': '10.0.0.0/24', 'Device': 'tapc226b810-a0'}, {'Destination': '10.254.0.0/28', 'Device': 'tap6de90453-1c'}, {'Destination': '10.35.16.0/22'}, {'Destination': '172.24.4.0/24', 'Gateway': '10.35.19.254'}] result = self.route_cmd.list() self.assertEqual(expected, result) self.route_cmd._as_root.assert_called_once_with('list') def test_delete_net_route(self): self.route_cmd._as_root = mock.Mock() self.route_cmd.delete_net_route('10.0.0.0/24', 'br-ex') self.route_cmd._as_root.assert_called_once_with( 'delete', '10.0.0.0/24', 'dev', 'br-ex') def test_clear_outdated_routes(self): self.route_cmd.delete_net_route = mock.Mock() list_result = [{'Destination': 'default', 'Device': 'eth0', 'Gateway': '172.24.47.1'}, {'Destination': '10.0.0.0/24', 'Device': 'eth0'}, {'Destination': '10.0.0.0/24', 'Device': 'br-ex'}] self.route_cmd.list = mock.Mock(return_value=list_result) self.route_cmd.clear_outdated_routes('10.0.0.0/24') self.route_cmd.delete_net_route.assert_called_once_with( '10.0.0.0/24', 'br-ex') class TestIpNetnsCommand(TestIPCmdBase): def setUp(self): super(TestIpNetnsCommand, self).setUp() self.command = 'netns' self.netns_cmd = ip_lib.IpNetnsCommand(self.parent) def test_add_namespace(self): ns = self.netns_cmd.add('ns') self._assert_sudo([], ('add', 'ns'), force_root_namespace=True) self.assertEqual('ns', ns.namespace) def test_delete_namespace(self): with mock.patch('manila.utils.execute'): self.netns_cmd.delete('ns') self._assert_sudo([], ('delete', 'ns'), force_root_namespace=True) def test_namespace_exists(self): retval = '\n'.join(NETNS_SAMPLE) self.parent._as_root.return_value = retval self.assertTrue( self.netns_cmd.exists('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb')) self._assert_sudo('o', ('list',), force_root_namespace=True) def test_namespace_doest_not_exist(self): retval = '\n'.join(NETNS_SAMPLE) self.parent._as_root.return_value = retval self.assertFalse( self.netns_cmd.exists('bbbbbbbb-1111-2222-3333-bbbbbbbbbbbb')) self._assert_sudo('o', ('list',), force_root_namespace=True) def test_execute(self): self.parent.namespace = 'ns' with mock.patch('manila.utils.execute') as execute: self.netns_cmd.execute(['ip', 'link', 'list']) execute.assert_called_once_with('ip', 'netns', 'exec', 'ns', 'ip', 'link', 'list', run_as_root=True, check_exit_code=True) def test_execute_env_var_prepend(self): self.parent.namespace = 'ns' with mock.patch('manila.utils.execute') as execute: env = dict(FOO=1, BAR=2) self.netns_cmd.execute(['ip', 'link', 'list'], env) execute.assert_called_once_with( 'ip', 'netns', 'exec', 'ns', 'env', 'BAR=2', 'FOO=1', 'ip', 'link', 'list', run_as_root=True, check_exit_code=True) class TestDeviceExists(test.TestCase): def test_device_exists(self): with mock.patch.object(ip_lib.IPDevice, '_execute') as _execute: _execute.return_value = LINK_SAMPLE[1] self.assertTrue(ip_lib.device_exists('eth0')) _execute.assert_called_once_with('o', 'link', ('show', 'eth0')) def test_device_does_not_exist(self): with mock.patch.object(ip_lib.IPDevice, '_execute') as _execute: _execute.return_value = '' _execute.side_effect = RuntimeError('Device does not exist.') self.assertFalse(ip_lib.device_exists('eth0')) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/network/linux/test_ovs_lib.py0000664000175000017500000000551500000000000023365 0ustar00zuulzuul00000000000000# Copyright 2014 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from manila.network.linux import ovs_lib from manila import test class OVS_Lib_Test(test.TestCase): """A test suite to exercise the OVS libraries.""" def setUp(self): super(OVS_Lib_Test, self).setUp() self.BR_NAME = "br-int" self.TO = "--timeout=2" self.br = ovs_lib.OVSBridge(self.BR_NAME) self.execute_p = mock.patch('manila.utils.execute') self.execute = self.execute_p.start() def tearDown(self): self.execute_p.stop() super(OVS_Lib_Test, self).tearDown() def test_reset_bridge(self): self.br.reset_bridge() self.execute.assert_has_calls([mock.call("ovs-vsctl", self.TO, "--", "--if-exists", "del-br", self.BR_NAME, run_as_root=True), mock.call("ovs-vsctl", self.TO, "add-br", self.BR_NAME, run_as_root=True)]) def test_delete_port(self): pname = "tap5" self.br.delete_port(pname) self.execute.assert_called_once_with("ovs-vsctl", self.TO, "--", "--if-exists", "del-port", self.BR_NAME, pname, run_as_root=True) def test_port_id_regex(self): result = ('external_ids : {attached-mac="fa:16:3e:23:5b:f2",' ' iface-id="5c1321a7-c73f-4a77-95e6-9f86402e5c8f",' ' iface-status=active}\nname :' ' "dhc5c1321a7-c7"\nofport : 2\n') match = self.br.re_id.search(result) vif_mac = match.group('vif_mac') vif_id = match.group('vif_id') port_name = match.group('port_name') ofport = int(match.group('ofport')) self.assertEqual('fa:16:3e:23:5b:f2', vif_mac) self.assertEqual('5c1321a7-c73f-4a77-95e6-9f86402e5c8f', vif_id) self.assertEqual('dhc5c1321a7-c7', port_name) self.assertEqual(2, ofport) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315601.9816709 manila-21.0.0/manila/tests/network/neutron/0000775000175000017500000000000000000000000020644 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/network/neutron/__init__.py0000664000175000017500000000000000000000000022743 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/network/neutron/test_neutron_api.py0000664000175000017500000006514100000000000024607 0ustar00zuulzuul00000000000000# Copyright 2013 OpenStack Foundation # Copyright 2014 Mirantis Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from neutronclient.common import exceptions as neutron_client_exc from neutronclient.v2_0 import client as clientv20 from oslo_config import cfg from manila.db import base from manila import exception from manila.network.neutron import api as neutron_api from manila.network.neutron import constants as neutron_constants from manila import test from manila.tests.db import fakes from manila.tests import utils as test_utils CONF = cfg.CONF class FakeNeutronClient(object): def create_port(self, body): return body def delete_port(self, port_id): pass def show_port(self, port_id): pass def list_ports(self, **search_opts): pass def create_port_binding(self, port_id, body): return body def delete_port_binding(self, port_id, host_id): pass def activate_port_binding(self, port_id, host_id): pass def list_networks(self): pass def show_network(self, network_uuid): pass def show_subnet(self, subnet_uuid): pass def create_router(self, body): return body def list_routers(self): pass def create_network(self, body): return body def create_subnet(self, body): return body def update_port(self, port_id, body): return body def add_interface_router(self, router_id, subnet_id, port_id): pass def update_router(self, router_id, body): return body def show_router(self, router_id): pass def list_extensions(self): pass class NeutronclientTestCase(test.TestCase): def test_no_auth_obj(self): mock_client_loader = self.mock_object( neutron_api.client_auth, 'AuthClientLoader') fake_context = 'fake_context' data = { 'neutron': { 'url': 'http://localhost:9696', 'endpoint_type': 'internalURL', 'region_name': 'foo_region_name', } } self.client = None with test_utils.create_temp_config_with_opts(data): self.client = neutron_api.API() self.client.get_client(fake_context) mock_client_loader.assert_called_once_with( client_class=neutron_api.clientv20.Client, cfg_group=neutron_api.NEUTRON_GROUP ) mock_client_loader.return_value.get_client.assert_called_once_with( self.client, fake_context, endpoint_type=data['neutron']['endpoint_type'], region_name=data['neutron']['region_name'], endpoint_override=data['neutron']['url'], ) def test_with_auth_obj(self): fake_context = 'fake_context' data = { 'neutron': { 'url': 'http://localhost:9696', 'endpoint_type': 'internalURL', 'region_name': 'foo_region_name', } } self.client = None with test_utils.create_temp_config_with_opts(data): self.client = neutron_api.API() self.client.auth_obj = type( 'FakeAuthObj', (object, ), {'get_client': mock.Mock()}) self.client.get_client(fake_context) self.client.auth_obj.get_client.assert_called_once_with( self.client, fake_context, endpoint_type=data['neutron']['endpoint_type'], region_name=data['neutron']['region_name'], endpoint_override=data['neutron']['url'], ) class NeutronApiTest(test.TestCase): def setUp(self): super(NeutronApiTest, self).setUp() self.mock_object(base, 'Base', fakes.FakeModel) self.mock_object( clientv20, 'Client', mock.Mock(return_value=FakeNeutronClient())) self.neutron_api = neutron_api.API() def test_create_api_object(self): # instantiate Neutron API object neutron_api_instance = neutron_api.API() # Verify results self.assertTrue(hasattr(neutron_api_instance, 'client')) self.assertTrue(hasattr(neutron_api_instance, 'configuration')) self.assertEqual('DEFAULT', neutron_api_instance.config_group_name) def test_create_port_with_all_args(self): # Set up test data self.mock_object(self.neutron_api, '_has_port_binding_extension', mock.Mock(return_value=True)) port_args = { 'tenant_id': 'test tenant', 'network_id': 'test net', 'host_id': 'test host', 'subnet_id': 'test subnet', 'fixed_ip': 'test ip', 'device_owner': 'test owner', 'device_id': 'test device', 'mac_address': 'test mac', 'security_group_ids': 'test group', 'dhcp_opts': 'test dhcp', } # Execute method 'create_port' port = self.neutron_api.create_port(**port_args) # Verify results self.assertEqual(port_args['tenant_id'], port['tenant_id']) self.assertEqual(port_args['network_id'], port['network_id']) self.assertEqual(port_args['host_id'], port['binding:host_id']) self.assertEqual(port_args['subnet_id'], port['fixed_ips'][0]['subnet_id']) self.assertEqual(port_args['fixed_ip'], port['fixed_ips'][0]['ip_address']) self.assertEqual(port_args['device_owner'], port['device_owner']) self.assertEqual(port_args['device_id'], port['device_id']) self.assertEqual(port_args['mac_address'], port['mac_address']) self.assertEqual(port_args['security_group_ids'], port['security_groups']) self.assertEqual(port_args['dhcp_opts'], port['extra_dhcp_opts']) self.neutron_api._has_port_binding_extension.assert_called_once_with() self.assertTrue(clientv20.Client.called) def test_create_port_with_required_args(self): # Set up test data port_args = {'tenant_id': 'test tenant', 'network_id': 'test net'} # Execute method 'create_port' port = self.neutron_api.create_port(**port_args) # Verify results self.assertEqual(port_args['tenant_id'], port['tenant_id']) self.assertEqual(port_args['network_id'], port['network_id']) self.assertTrue(clientv20.Client.called) def test_create_port_with_additional_kwargs(self): # Set up test data port_args = {'tenant_id': 'test tenant', 'network_id': 'test net', 'binding_arg': 'foo'} # Execute method 'create_port' port = self.neutron_api.create_port(**port_args) # Verify results self.assertEqual(port_args['tenant_id'], port['tenant_id']) self.assertEqual(port_args['network_id'], port['network_id']) self.assertEqual(port_args['binding_arg'], port['binding_arg']) self.assertTrue(clientv20.Client.called) def test_create_port_with_host_id_no_binding_ext(self): self.mock_object(self.neutron_api, '_has_port_binding_extension', mock.Mock(return_value=False)) port_args = { 'tenant_id': 'test tenant', 'network_id': 'test net', 'host_id': 'foohost' } self.assertRaises(exception.NetworkException, self.neutron_api.create_port, **port_args) @mock.patch.object(neutron_api.LOG, 'exception', mock.Mock()) def test_create_port_exception(self): self.mock_object( self.neutron_api.client, 'create_port', mock.Mock(side_effect=neutron_client_exc.NeutronClientException)) port_args = {'tenant_id': 'test tenant', 'network_id': 'test net'} # Execute method 'create_port' self.assertRaises(exception.NetworkException, self.neutron_api.create_port, **port_args) # Verify results self.assertTrue(neutron_api.LOG.exception.called) self.assertTrue(clientv20.Client.called) self.assertTrue(self.neutron_api.client.create_port.called) @mock.patch.object(neutron_api.LOG, 'exception', mock.Mock()) def test_create_port_exception_status_409(self): # Set up test data self.mock_object( self.neutron_api.client, 'create_port', mock.Mock(side_effect=neutron_client_exc.NeutronClientException( status_code=409))) port_args = {'tenant_id': 'test tenant', 'network_id': 'test net'} # Execute method 'create_port' self.assertRaises(exception.PortLimitExceeded, self.neutron_api.create_port, **port_args) # Verify results self.assertTrue(neutron_api.LOG.exception.called) self.assertTrue(clientv20.Client.called) self.assertTrue(self.neutron_api.client.create_port.called) def test_delete_port(self): # Set up test data self.mock_object(self.neutron_api.client, 'delete_port') port_id = 'test port id' # Execute method 'delete_port' self.neutron_api.delete_port(port_id) # Verify results self.neutron_api.client.delete_port.assert_called_once_with(port_id) self.assertTrue(clientv20.Client.called) def test_delete_port_NeutronClientException(self): # Set up test data self.mock_object( self.neutron_api.client, 'delete_port', mock.Mock(side_effect=neutron_client_exc.NeutronClientException())) port_id = 'test port id' self.assertRaises(exception.NetworkException, self.neutron_api.delete_port, port_id) # Verify results self.neutron_api.client.delete_port.assert_called_once_with(port_id) self.assertTrue(clientv20.Client.called) def test_delete_port_PortNotFoundClient(self): # Set up test data self.mock_object( self.neutron_api.client, 'delete_port', mock.Mock(side_effect=neutron_client_exc.PortNotFoundClient())) port_id = 'test port id' # Execute method 'delete_port' self.neutron_api.delete_port(port_id) # Verify results self.neutron_api.client.delete_port.assert_called_once_with(port_id) self.assertTrue(clientv20.Client.called) def test_list_ports(self): # Set up test data search_opts = {'test_option': 'test_value'} fake_ports = [{'fake port': 'fake port info'}] self.mock_object( self.neutron_api.client, 'list_ports', mock.Mock(return_value={'ports': fake_ports})) # Execute method 'list_ports' ports = self.neutron_api.list_ports(**search_opts) # Verify results self.assertEqual(fake_ports, ports) self.assertTrue(clientv20.Client.called) self.neutron_api.client.list_ports.assert_called_once_with( **search_opts) def test_show_port(self): # Set up test data port_id = 'test port id' fake_port = {'fake port': 'fake port info'} self.mock_object( self.neutron_api.client, 'show_port', mock.Mock(return_value={'port': fake_port})) # Execute method 'show_port' port = self.neutron_api.show_port(port_id) # Verify results self.assertEqual(fake_port, port) self.assertTrue(clientv20.Client.called) self.neutron_api.client.show_port.assert_called_once_with(port_id) def test_get_network(self): # Set up test data network_id = 'test network id' fake_network = {'fake network': 'fake network info'} self.mock_object( self.neutron_api.client, 'show_network', mock.Mock(return_value={'network': fake_network})) # Execute method 'get_network' network = self.neutron_api.get_network(network_id) # Verify results self.assertEqual(fake_network, network) self.assertTrue(clientv20.Client.called) self.neutron_api.client.show_network.assert_called_once_with( network_id) def test_get_subnet(self): # Set up test data subnet_id = 'fake subnet id' self.mock_object( self.neutron_api.client, 'show_subnet', mock.Mock(return_value={'subnet': {}})) # Execute method 'get_subnet' subnet = self.neutron_api.get_subnet(subnet_id) # Verify results self.assertEqual({}, subnet) self.assertTrue(clientv20.Client.called) self.neutron_api.client.show_subnet.assert_called_once_with( subnet_id) def test_get_all_network(self): # Set up test data fake_networks = [{'fake network': 'fake network info'}] self.mock_object( self.neutron_api.client, 'list_networks', mock.Mock(return_value={'networks': fake_networks})) # Execute method 'get_all_networks' networks = self.neutron_api.get_all_networks() # Verify results self.assertEqual(fake_networks, networks) self.assertTrue(clientv20.Client.called) self.neutron_api.client.list_networks.assert_called_once_with() def test_list_extensions(self): # Set up test data extensions = [ {'name': neutron_constants.PORTBINDING_EXT}, {'name': neutron_constants.PROVIDER_NW_EXT}, ] self.mock_object( self.neutron_api.client, 'list_extensions', mock.Mock(return_value={'extensions': extensions})) # Execute method 'list_extensions' result = self.neutron_api.list_extensions() # Verify results self.assertTrue(clientv20.Client.called) self.neutron_api.client.list_extensions.assert_called_once_with() self.assertIn(neutron_constants.PORTBINDING_EXT, result) self.assertIn(neutron_constants.PROVIDER_NW_EXT, result) self.assertEqual( extensions[0], result[neutron_constants.PORTBINDING_EXT]) self.assertEqual( extensions[1], result[neutron_constants.PROVIDER_NW_EXT]) def test_create_network(self): # Set up test data net_args = {'tenant_id': 'test tenant', 'name': 'test name'} # Execute method 'network_create' network = self.neutron_api.network_create(**net_args) # Verify results self.assertEqual(net_args['tenant_id'], network['tenant_id']) self.assertEqual(net_args['name'], network['name']) self.assertTrue(clientv20.Client.called) def test_create_subnet(self): # Set up test data subnet_args = { 'tenant_id': 'test tenant', 'name': 'test name', 'net_id': 'test net id', 'cidr': '10.0.0.0/24', } # Execute method 'subnet_create' subnet = self.neutron_api.subnet_create(**subnet_args) # Verify results self.assertEqual(subnet_args['tenant_id'], subnet['tenant_id']) self.assertEqual(subnet_args['name'], subnet['name']) self.assertTrue(clientv20.Client.called) def test_create_router(self): # Set up test data router_args = {'tenant_id': 'test tenant', 'name': 'test name'} # Execute method 'router_create' router = self.neutron_api.router_create(**router_args) # Verify results self.assertEqual(router_args['tenant_id'], router['tenant_id']) self.assertEqual(router_args['name'], router['name']) self.assertTrue(clientv20.Client.called) def test_list_routers(self): # Set up test data fake_routers = [{'fake router': 'fake router info'}] self.mock_object( self.neutron_api.client, 'list_routers', mock.Mock(return_value={'routers': fake_routers})) # Execute method 'router_list' networks = self.neutron_api.router_list() # Verify results self.assertEqual(fake_routers, networks) self.assertTrue(clientv20.Client.called) self.neutron_api.client.list_routers.assert_called_once_with() def test_create_network_exception(self): # Set up test data net_args = {'tenant_id': 'test tenant', 'name': 'test name'} self.mock_object( self.neutron_api.client, 'create_network', mock.Mock(side_effect=neutron_client_exc.NeutronClientException)) # Execute method 'network_create' self.assertRaises( exception.NetworkException, self.neutron_api.network_create, **net_args) # Verify results self.neutron_api.client.create_network.assert_called_once_with( {'network': net_args}) self.assertTrue(clientv20.Client.called) def test_create_subnet_exception(self): # Set up test data subnet_args = { 'tenant_id': 'test tenant', 'name': 'test name', 'net_id': 'test net id', 'cidr': '10.0.0.0/24', } self.mock_object( self.neutron_api.client, 'create_subnet', mock.Mock(side_effect=neutron_client_exc.NeutronClientException)) # Execute method 'subnet_create' self.assertRaises( exception.NetworkException, self.neutron_api.subnet_create, **subnet_args) # Verify results expected_data = { 'network_id': subnet_args['net_id'], 'tenant_id': subnet_args['tenant_id'], 'cidr': subnet_args['cidr'], 'name': subnet_args['name'], 'ip_version': 4, } self.neutron_api.client.create_subnet.assert_called_once_with( {'subnet': expected_data}) self.assertTrue(clientv20.Client.called) def test_create_router_exception(self): # Set up test data router_args = {'tenant_id': 'test tenant', 'name': 'test name'} self.mock_object( self.neutron_api.client, 'create_router', mock.Mock(side_effect=neutron_client_exc.NeutronClientException)) # Execute method 'router_create' self.assertRaises( exception.NetworkException, self.neutron_api.router_create, **router_args) # Verify results self.neutron_api.client.create_router.assert_called_once_with( {'router': router_args}) self.assertTrue(clientv20.Client.called) def test_update_port_fixed_ips(self): # Set up test data port_id = 'test_port' fixed_ips = {'fixed_ips': [{'subnet_id': 'test subnet'}]} # Execute method 'update_port_fixed_ips' port = self.neutron_api.update_port_fixed_ips(port_id, fixed_ips) # Verify results self.assertEqual(fixed_ips, port) self.assertTrue(clientv20.Client.called) def test_update_port_fixed_ips_exception(self): # Set up test data port_id = 'test_port' fixed_ips = {'fixed_ips': [{'subnet_id': 'test subnet'}]} self.mock_object( self.neutron_api.client, 'update_port', mock.Mock(side_effect=neutron_client_exc.NeutronClientException)) # Execute method 'update_port_fixed_ips' self.assertRaises( exception.NetworkException, self.neutron_api.update_port_fixed_ips, port_id, fixed_ips) # Verify results self.neutron_api.client.update_port.assert_called_once_with( port_id, {'port': fixed_ips}) self.assertTrue(clientv20.Client.called) def test_bind_port_to_host(self): port_id = 'test_port' host = 'test_host' vnic_type = 'test_vnic_type' port = self.neutron_api.bind_port_to_host(port_id, host, vnic_type) self.assertEqual(host, port['host']) self.assertTrue(clientv20.Client.called) def test_delete_port_binding(self): port_id = 'test_port' host = 'test_host' self.neutron_api.delete_port_binding(port_id, host) self.assertTrue(clientv20.Client.called) def test_activate_port_binding(self): port_id = 'test_port' host = 'test_host' self.neutron_api.activate_port_binding(port_id, host) self.assertTrue(clientv20.Client.called) def test_router_update_routes(self): # Set up test data router_id = 'test_router' routes = { 'routes': [ {'destination': '0.0.0.0/0', 'nexthop': '8.8.8.8', }, ], } # Execute method 'router_update_routes' router = self.neutron_api.router_update_routes(router_id, routes) # Verify results self.assertEqual(routes, router) self.assertTrue(clientv20.Client.called) def test_router_update_routes_exception(self): # Set up test data router_id = 'test_router' routes = { 'routes': [ {'destination': '0.0.0.0/0', 'nexthop': '8.8.8.8', }, ], } self.mock_object( self.neutron_api.client, 'update_router', mock.Mock(side_effect=neutron_client_exc.NeutronClientException)) # Execute method 'router_update_routes' self.assertRaises( exception.NetworkException, self.neutron_api.router_update_routes, router_id, routes) # Verify results self.neutron_api.client.update_router.assert_called_once_with( router_id, {'router': routes}) self.assertTrue(clientv20.Client.called) def test_show_router(self): # Set up test data router_id = 'test router id' fake_router = {'fake router': 'fake router info'} self.mock_object( self.neutron_api.client, 'show_router', mock.Mock(return_value={'router': fake_router})) # Execute method 'show_router' port = self.neutron_api.show_router(router_id) # Verify results self.assertEqual(fake_router, port) self.assertTrue(clientv20.Client.called) self.neutron_api.client.show_router.assert_called_once_with(router_id) def test_router_add_interface(self): # Set up test data router_id = 'test port id' subnet_id = 'test subnet id' port_id = 'test port id' self.mock_object(self.neutron_api.client, 'add_interface_router') # Execute method 'router_add_interface' self.neutron_api.router_add_interface(router_id, subnet_id, port_id) # Verify results self.neutron_api.client.add_interface_router.assert_called_once_with( port_id, {'subnet_id': subnet_id, 'port_id': port_id}) self.assertTrue(clientv20.Client.called) def test_router_add_interface_exception(self): # Set up test data router_id = 'test port id' subnet_id = 'test subnet id' port_id = 'test port id' self.mock_object( self.neutron_api.client, 'add_interface_router', mock.Mock(side_effect=neutron_client_exc.NeutronClientException)) # Execute method 'router_add_interface' self.assertRaises( exception.NetworkException, self.neutron_api.router_add_interface, router_id, subnet_id, port_id) # Verify results self.neutron_api.client.add_interface_router.assert_called_once_with( router_id, {'subnet_id': subnet_id, 'port_id': port_id}) self.assertTrue(clientv20.Client.called) def test_admin_project_id_exist(self): fake_admin_project_id = 'fake_admin_project_id_value' self.neutron_api.client.httpclient = mock.Mock() self.neutron_api.client.httpclient.auth_token = mock.Mock() self.neutron_api.client.httpclient.get_project_id = mock.Mock( return_value=fake_admin_project_id) admin_project_id = self.neutron_api.admin_project_id self.assertEqual(fake_admin_project_id, admin_project_id) self.neutron_api.client.httpclient.auth_token.called def test_admin_project_id_not_exist(self): fake_admin_project_id = 'fake_admin_project_id_value' self.neutron_api.client.httpclient = mock.Mock() self.neutron_api.client.httpclient.auth_token = mock.Mock( return_value=None) self.neutron_api.client.httpclient.authenticate = mock.Mock() self.neutron_api.client.httpclient.get_project_id = mock.Mock( return_value=fake_admin_project_id) admin_project_id = self.neutron_api.admin_project_id self.assertEqual(fake_admin_project_id, admin_project_id) self.neutron_api.client.httpclient.auth_token.called self.neutron_api.client.httpclient.authenticate.called def test_admin_project_id_not_exist_with_failure(self): self.neutron_api.client.httpclient = mock.Mock() self.neutron_api.client.httpclient.auth_token = None self.neutron_api.client.httpclient.authenticate = mock.Mock( side_effect=neutron_client_exc.NeutronClientException) self.neutron_api.client.httpclient.auth_tenant_id = mock.Mock() try: self.neutron_api.admin_project_id except exception.NetworkException: pass else: raise Exception('Expected error was not raised') self.assertTrue(self.neutron_api.client.httpclient.authenticate.called) self.assertFalse( self.neutron_api.client.httpclient.auth_tenant_id.called) def test_get_all_admin_project_networks(self): fake_networks = {'networks': ['fake_net_1', 'fake_net_2']} self.mock_object( self.neutron_api.client, 'list_networks', mock.Mock(return_value=fake_networks)) self.neutron_api.client.httpclient = mock.Mock() self.neutron_api.client.httpclient.auth_token = mock.Mock() self.neutron_api.client.httpclient.auth_tenant_id = mock.Mock() networks = self.neutron_api.get_all_admin_project_networks() self.assertEqual(fake_networks['networks'], networks) self.neutron_api.client.httpclient.auth_token.called self.neutron_api.client.httpclient.auth_tenant_id.called self.neutron_api.client.list_networks.assert_called_once_with( tenant_id=self.neutron_api.admin_project_id, shared=False) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/network/neutron/test_neutron_plugin.py0000664000175000017500000026346500000000000025345 0ustar00zuulzuul00000000000000# Copyright 2013 OpenStack Foundation # Copyright 2015 Mirantis, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import time from unittest import mock import ddt from oslo_config import cfg from manila.common import constants from manila import context from manila.db import api as db_api from manila import exception from manila.network.neutron import api as neutron_api from manila.network.neutron import constants as neutron_constants from manila.network.neutron import neutron_network_plugin as plugin from manila.share import utils as share_utils from manila import test from manila.tests import utils as test_utils CONF = cfg.CONF fake_neutron_port = { "status": "ACTIVE", "allowed_address_pairs": [], "admin_state_up": True, "network_id": "test_net_id", "tenant_id": "fake_tenant_id", "extra_dhcp_opts": [], "device_owner": "test", "binding:capabilities": {"port_filter": True}, "mac_address": "test_mac", "fixed_ips": [ {"subnet_id": "test_subnet_id", "ip_address": "203.0.113.100"}, ], "id": "test_port_id", "security_groups": ["fake_sec_group_id"], "device_id": "fake_device_id", } fake_neutron_network = { 'admin_state_up': True, 'availability_zone_hints': [], 'availability_zones': ['nova'], 'description': '', 'id': 'fake net id', 'ipv4_address_scope': None, 'ipv6_address_scope': None, 'name': 'test_neutron_network', 'port_security_enabled': True, 'provider:network_type': 'vxlan', 'provider:physical_network': None, 'provider:segmentation_id': 1234, 'router:external': False, 'shared': False, 'status': 'ACTIVE', 'subnets': ['fake subnet id', 'fake subnet id 2'], } fake_ip_version = 4 fake_neutron_subnet = { 'cidr': '10.0.0.0/24', 'ip_version': fake_ip_version, 'gateway_ip': '10.0.0.1', } fake_share_network_subnet = { 'id': 'fake nw subnet id', 'neutron_subnet_id': fake_neutron_network['subnets'][0], 'neutron_net_id': fake_neutron_network['id'], 'network_type': 'fake_network_type', 'segmentation_id': 1234, 'ip_version': 4, 'cidr': 'fake_cidr', 'gateway': 'fake_gateway', 'mtu': 1509, } fake_share_network = { 'id': 'fake nw info id', 'project_id': 'fake project id', 'status': 'test_subnet_status', 'name': 'fake name', 'description': 'fake description', 'security_services': [], 'subnets': [fake_share_network_subnet], } fake_share_server = { 'id': 'fake nw info id', 'status': 'test_server_status', 'host': 'fake@host', 'network_allocations': [], 'shares': [], } fake_network_allocation = { 'id': fake_neutron_port['id'], 'share_server_id': fake_share_server['id'], 'ip_address': fake_neutron_port['fixed_ips'][0]['ip_address'], 'mac_address': fake_neutron_port['mac_address'], 'status': constants.STATUS_ACTIVE, 'label': 'user', 'network_type': fake_share_network_subnet['network_type'], 'segmentation_id': fake_share_network_subnet['segmentation_id'], 'ip_version': fake_share_network_subnet['ip_version'], 'cidr': fake_share_network_subnet['cidr'], 'gateway': fake_share_network_subnet['gateway'], 'mtu': 1509, 'share_network_subnet_id': fake_share_network_subnet['id'], } fake_nw_info = { 'segments': [ { 'provider:network_type': 'vlan', 'provider:physical_network': 'net1', 'provider:segmentation_id': 3926, }, { 'provider:network_type': 'vlan', 'provider:physical_network': 'net2', 'provider:segmentation_id': 1249, }, { 'provider:network_type': 'vxlan', 'provider:physical_network': None, 'provider:segmentation_id': 2000, }, ], 'mtu': 1509, } fake_neutron_network_multi = { 'admin_state_up': True, 'availability_zone_hints': [], 'availability_zones': ['nova'], 'description': '', 'id': 'fake net id', 'ipv4_address_scope': None, 'ipv6_address_scope': None, 'name': 'test_neutron_network', 'port_security_enabled': True, 'router:external': False, 'shared': False, 'status': 'ACTIVE', 'subnets': ['fake subnet id', 'fake subnet id 2'], 'segments': fake_nw_info['segments'], 'mtu': fake_nw_info['mtu'], } fake_share_network_multi = { 'id': 'fake nw info id', 'neutron_subnet_id': fake_neutron_network_multi['subnets'][0], 'neutron_net_id': fake_neutron_network_multi['id'], 'project_id': 'fake project id', 'status': 'test_subnet_status', 'name': 'fake name', 'description': 'fake description', 'security_services': [], 'ip_version': None, 'cidr': 'fake_cidr', 'gateway': 'fake_gateway', 'mtu': fake_neutron_network_multi['mtu'] - 1, } fake_network_allocation_multi = { 'id': fake_neutron_port['id'], 'share_server_id': fake_share_server['id'], 'ip_address': fake_neutron_port['fixed_ips'][0]['ip_address'], 'mac_address': fake_neutron_port['mac_address'], 'status': constants.STATUS_ACTIVE, 'label': 'user', 'network_type': None, 'segmentation_id': None, 'ip_version': fake_neutron_subnet['ip_version'], 'cidr': fake_neutron_subnet['cidr'], 'gateway': fake_neutron_subnet['gateway_ip'], 'mtu': fake_neutron_network_multi['mtu'], 'share_network_subnet_id': fake_share_network['id'], } fake_binding_profile = { 'neutron_switch_id': 'fake switch id', 'neutron_port_id': 'fake port id', 'neutron_switch_info': 'fake switch info' } fake_network_allocation_ext = { 'id': 'fake port binding id', 'share_server_id': fake_share_server['id'], 'ip_address': fake_neutron_port['fixed_ips'][0]['ip_address'], 'mac_address': fake_neutron_port['mac_address'], 'status': constants.STATUS_ACTIVE, 'label': fake_nw_info['segments'][1]['provider:physical_network'], 'network_type': fake_share_network_subnet['network_type'], 'segmentation_id': ( fake_nw_info['segments'][1]['provider:segmentation_id'] ), 'ip_version': fake_share_network_subnet['ip_version'], 'cidr': fake_share_network_subnet['cidr'], 'gateway': fake_share_network_subnet['gateway'], 'mtu': 1509, } @ddt.ddt class NeutronNetworkPluginTest(test.TestCase): def setUp(self): super(NeutronNetworkPluginTest, self).setUp() self.plugin = self._get_neutron_network_plugin_instance() self.plugin.db = db_api self.fake_context = context.RequestContext(user_id='fake user', project_id='fake project', is_admin=False) def _get_neutron_network_plugin_instance(self, config_data=None): if config_data is None: return plugin.NeutronNetworkPlugin() with test_utils.create_temp_config_with_opts(config_data): return plugin.NeutronNetworkPlugin() @mock.patch.object(db_api, 'network_allocation_create', mock.Mock(return_values=fake_network_allocation)) @mock.patch.object(db_api, 'share_network_get', mock.Mock(return_value=fake_share_network)) @mock.patch.object(db_api, 'share_server_get', mock.Mock(return_value=fake_share_server)) def test_allocate_network_external_neutron_network(self): has_provider_nw_ext = mock.patch.object( self.plugin, '_has_provider_network_extension').start() has_provider_nw_ext.return_value = True save_nw_data = mock.patch.object(self.plugin, '_save_neutron_network_data', mock.Mock(return_value=True)).start() save_subnet_data = mock.patch.object( self.plugin, '_save_neutron_subnet_data').start() with mock.patch.object(self.plugin.neutron_api, 'create_port', mock.Mock(return_value=fake_neutron_port)): self.plugin.allocate_network( self.fake_context, fake_share_server, fake_share_network, fake_share_network_subnet, allocation_info={'count': 1}) has_provider_nw_ext.assert_any_call() save_nw_data.assert_called_once_with(self.fake_context, fake_share_network_subnet, save_db=True) save_subnet_data.assert_called_once_with(self.fake_context, fake_share_network_subnet, save_db=True) self.plugin.neutron_api.create_port.assert_called_once_with( fake_share_network['project_id'], network_id=fake_share_network_subnet['neutron_net_id'], subnet_id=fake_share_network_subnet['neutron_subnet_id'], device_owner='manila:share', device_id=fake_share_network['id'], name=fake_share_network['id'] + '_0', admin_state_up=False, ) db_api.network_allocation_create.assert_called_once_with( self.fake_context, fake_network_allocation) has_provider_nw_ext.stop() save_nw_data.stop() save_subnet_data.stop() @mock.patch.object(db_api, 'network_allocation_create', mock.Mock(return_values=fake_network_allocation)) @mock.patch.object(db_api, 'share_network_get', mock.Mock(return_value=fake_share_network)) @mock.patch.object(db_api, 'share_server_get', mock.Mock(return_value=fake_share_server)) def test_allocate_network_one_allocation(self): has_provider_nw_ext = mock.patch.object( self.plugin, '_has_provider_network_extension').start() has_provider_nw_ext.return_value = True save_nw_data = mock.patch.object(self.plugin, '_save_neutron_network_data').start() save_subnet_data = mock.patch.object( self.plugin, '_save_neutron_subnet_data').start() with mock.patch.object(self.plugin.neutron_api, 'create_port', mock.Mock(return_value=fake_neutron_port)): self.plugin.allocate_network( self.fake_context, fake_share_server, fake_share_network, fake_share_network_subnet, allocation_info={'count': 1}) has_provider_nw_ext.assert_any_call() save_nw_data.assert_called_once_with(self.fake_context, fake_share_network_subnet, save_db=True) save_subnet_data.assert_called_once_with(self.fake_context, fake_share_network_subnet, save_db=True) self.plugin.neutron_api.create_port.assert_called_once_with( fake_share_network['project_id'], network_id=fake_share_network_subnet['neutron_net_id'], subnet_id=fake_share_network_subnet['neutron_subnet_id'], device_owner='manila:share', device_id=fake_share_network['id'], name=fake_share_network['id'] + '_0', admin_state_up=False, ) db_api.network_allocation_create.assert_called_once_with( self.fake_context, fake_network_allocation) has_provider_nw_ext.stop() save_nw_data.stop() save_subnet_data.stop() @mock.patch.object(db_api, 'network_allocation_create', mock.Mock(return_values=fake_network_allocation)) @mock.patch.object(db_api, 'share_network_get', mock.Mock(return_value=fake_share_network)) @mock.patch.object(db_api, 'share_server_get', mock.Mock(return_value=fake_share_server)) def test_allocate_network_two_allocation(self): has_provider_nw_ext = mock.patch.object( self.plugin, '_has_provider_network_extension').start() has_provider_nw_ext.return_value = True save_nw_data = mock.patch.object(self.plugin, '_save_neutron_network_data').start() save_subnet_data = mock.patch.object( self.plugin, '_save_neutron_subnet_data').start() with mock.patch.object(self.plugin.neutron_api, 'create_port', mock.Mock(return_value=fake_neutron_port)): self.plugin.allocate_network( self.fake_context, fake_share_server, fake_share_network, fake_share_network_subnet, count=2) neutron_api_calls = [ mock.call( fake_share_network['project_id'], network_id=fake_share_network_subnet['neutron_net_id'], subnet_id=fake_share_network_subnet['neutron_subnet_id'], device_owner='manila:share', device_id=fake_share_network['id'], name=fake_share_network['id'] + '_0', admin_state_up=False, ), mock.call( fake_share_network['project_id'], network_id=fake_share_network_subnet['neutron_net_id'], subnet_id=fake_share_network_subnet['neutron_subnet_id'], device_owner='manila:share', device_id=fake_share_network['id'], name=fake_share_network['id'] + '_1', admin_state_up=False, ) ] db_api_calls = [ mock.call(self.fake_context, fake_network_allocation), mock.call(self.fake_context, fake_network_allocation) ] self.plugin.neutron_api.create_port.assert_has_calls( neutron_api_calls) db_api.network_allocation_create.assert_has_calls(db_api_calls) has_provider_nw_ext.stop() save_nw_data.stop() save_subnet_data.stop() @mock.patch.object(db_api, 'share_network_update', mock.Mock()) def test_allocate_network_create_port_exception(self): has_provider_nw_ext = mock.patch.object( self.plugin, '_has_provider_network_extension').start() has_provider_nw_ext.return_value = True save_nw_data = mock.patch.object(self.plugin, '_save_neutron_network_data').start() save_subnet_data = mock.patch.object( self.plugin, '_save_neutron_subnet_data').start() create_port = mock.patch.object(self.plugin.neutron_api, 'create_port').start() create_port.side_effect = exception.NetworkException self.assertRaises(exception.NetworkException, self.plugin.allocate_network, self.fake_context, fake_share_server, fake_share_network) has_provider_nw_ext.stop() save_nw_data.stop() save_subnet_data.stop() create_port.stop() def _setup_manage_network_allocations(self): allocations = ['192.168.0.11', '192.168.0.12', 'fd12::2000'] neutron_ports = [ copy.deepcopy(fake_neutron_port), copy.deepcopy(fake_neutron_port), copy.deepcopy(fake_neutron_port), copy.deepcopy(fake_neutron_port), ] neutron_ports[0]['fixed_ips'][0]['ip_address'] = '192.168.0.10' neutron_ports[0]['id'] = 'fake_port_id_0' neutron_ports[1]['fixed_ips'][0]['ip_address'] = '192.168.0.11' neutron_ports[1]['id'] = 'fake_port_id_1' neutron_ports[2]['fixed_ips'][0]['ip_address'] = '192.168.0.12' neutron_ports[2]['id'] = 'fake_port_id_2' neutron_ports[3]['fixed_ips'][0]['ip_address'] = '192.168.0.13' neutron_ports[3]['id'] = 'fake_port_id_3' self.mock_object(self.plugin, '_verify_share_network_subnet') self.mock_object(self.plugin, '_store_and_get_neutron_net_info') self.mock_object(self.plugin.neutron_api, 'list_ports', mock.Mock(return_value=neutron_ports)) return neutron_ports, allocations @ddt.data({}, exception.NotFound) def test_manage_network_allocations_create_update(self, side_effect): neutron_ports, allocations = self._setup_manage_network_allocations() self.mock_object(db_api, 'network_allocation_get', mock.Mock( side_effect=[exception.NotFound, side_effect, exception.NotFound, side_effect])) if side_effect: self.mock_object(db_api, 'network_allocation_create') else: self.mock_object(db_api, 'network_allocation_update') result = self.plugin.manage_network_allocations( self.fake_context, allocations, fake_share_server, share_network_subnet=fake_share_network_subnet) self.assertEqual(['fd12::2000'], result) self.plugin.neutron_api.list_ports.assert_called_once_with( network_id=fake_share_network_subnet['neutron_net_id'], device_owner='manila:share', fixed_ips='subnet_id=' + fake_share_network_subnet['neutron_subnet_id']) db_api.network_allocation_get.assert_has_calls([ mock.call(self.fake_context, 'fake_port_id_1', read_deleted=False), mock.call(self.fake_context, 'fake_port_id_1', read_deleted=True), mock.call(self.fake_context, 'fake_port_id_2', read_deleted=False), mock.call(self.fake_context, 'fake_port_id_2', read_deleted=True), ]) port_dict_list = [{ 'share_server_id': fake_share_server['id'], 'ip_address': x, 'gateway': fake_share_network_subnet['gateway'], 'mac_address': fake_neutron_port['mac_address'], 'status': constants.STATUS_ACTIVE, 'label': 'user', 'network_type': fake_share_network_subnet['network_type'], 'segmentation_id': fake_share_network_subnet['segmentation_id'], 'ip_version': fake_share_network_subnet['ip_version'], 'cidr': fake_share_network_subnet['cidr'], 'mtu': fake_share_network_subnet['mtu'], 'share_network_subnet_id': fake_share_network_subnet['id'], } for x in ['192.168.0.11', '192.168.0.12']] if side_effect: port_dict_list[0]['id'] = 'fake_port_id_1' port_dict_list[1]['id'] = 'fake_port_id_2' db_api.network_allocation_create.assert_has_calls([ mock.call(self.fake_context, port_dict_list[0]), mock.call(self.fake_context, port_dict_list[1]) ]) else: for x in port_dict_list: x['deleted_at'] = None x['deleted'] = 'False' db_api.network_allocation_update.assert_has_calls([ mock.call(self.fake_context, 'fake_port_id_1', port_dict_list[0], read_deleted=True), mock.call(self.fake_context, 'fake_port_id_2', port_dict_list[1], read_deleted=True) ]) self.plugin._verify_share_network_subnet.assert_called_once_with( fake_share_server['id'], fake_share_network_subnet) self.plugin._store_and_get_neutron_net_info( self.fake_context, fake_share_network_subnet) def test__get_ports_respective_to_ips_multiple_fixed_ips(self): self.mock_object(plugin.LOG, 'warning') allocations = ['192.168.0.10', '192.168.0.11', '192.168.0.12'] neutron_ports = [ copy.deepcopy(fake_neutron_port), copy.deepcopy(fake_neutron_port), ] neutron_ports[0]['fixed_ips'][0]['ip_address'] = '192.168.0.10' neutron_ports[0]['id'] = 'fake_port_id_0' neutron_ports[0]['fixed_ips'].append({'ip_address': '192.168.0.11', 'subnet_id': 'test_subnet_id'}) neutron_ports[1]['fixed_ips'][0]['ip_address'] = '192.168.0.12' neutron_ports[1]['id'] = 'fake_port_id_2' expected = [{'port': neutron_ports[0], 'allocation': '192.168.0.10'}, {'port': neutron_ports[1], 'allocation': '192.168.0.12'}] result = self.plugin._get_ports_respective_to_ips(allocations, neutron_ports) self.assertEqual(expected, result) self.assertIs(True, plugin.LOG.warning.called) def test_manage_network_allocations_exception(self): neutron_ports, allocations = self._setup_manage_network_allocations() fake_allocation = { 'id': 'fake_port_id', 'share_server_id': 'fake_server_id' } self.mock_object(db_api, 'network_allocation_get', mock.Mock(return_value=fake_allocation)) self.assertRaises( exception.ManageShareServerError, self.plugin.manage_network_allocations, self.fake_context, allocations, fake_share_server, fake_share_network, fake_share_network_subnet) db_api.network_allocation_get.assert_called_once_with( self.fake_context, 'fake_port_id_1', read_deleted=False) def test_unmanage_network_allocations(self): neutron_ports = [ copy.deepcopy(fake_neutron_port), copy.deepcopy(fake_neutron_port), ] neutron_ports[0]['id'] = 'fake_port_id_0' neutron_ports[1]['id'] = 'fake_port_id_1' get_mock = self.mock_object( db_api, 'network_allocations_get_for_share_server', mock.Mock(return_value=neutron_ports)) self.mock_object(db_api, 'network_allocation_delete') self.plugin.unmanage_network_allocations( self.fake_context, fake_share_server['id']) get_mock.assert_called_once_with( self.fake_context, fake_share_server['id']) db_api.network_allocation_delete.assert_has_calls([ mock.call(self.fake_context, 'fake_port_id_0'), mock.call(self.fake_context, 'fake_port_id_1') ]) @mock.patch.object(db_api, 'network_allocation_delete', mock.Mock()) @mock.patch.object(db_api, 'share_network_update', mock.Mock()) @mock.patch.object(db_api, 'network_allocations_get_for_share_server', mock.Mock(return_value=[fake_network_allocation])) def test_deallocate_network_nominal(self): share_srv = {'id': fake_share_server['id']} share_srv['network_allocations'] = [fake_network_allocation] with mock.patch.object(self.plugin.neutron_api, 'delete_port', mock.Mock()): self.plugin.deallocate_network(self.fake_context, share_srv) self.plugin.neutron_api.delete_port.assert_called_once_with( fake_network_allocation['id']) db_api.network_allocation_delete.assert_called_once_with( self.fake_context, fake_network_allocation['id']) @mock.patch.object(db_api, 'share_network_update', mock.Mock(return_value=fake_share_network)) @mock.patch.object(db_api, 'network_allocation_update', mock.Mock()) @mock.patch.object(db_api, 'network_allocations_get_for_share_server', mock.Mock(return_value=[fake_network_allocation])) def test_deallocate_network_neutron_api_exception(self): share_srv = {'id': fake_share_server['id']} share_srv['network_allocations'] = [fake_network_allocation] delete_port = mock.patch.object(self.plugin.neutron_api, 'delete_port').start() delete_port.side_effect = exception.NetworkException self.assertRaises(exception.NetworkException, self.plugin.deallocate_network, self.fake_context, share_srv) db_api.network_allocation_update.assert_called_once_with( self.fake_context, fake_network_allocation['id'], {'status': constants.STATUS_ERROR}) delete_port.stop() @mock.patch.object(db_api, 'share_network_subnet_update', mock.Mock()) def test_save_neutron_network_data(self): neutron_nw_info = { 'provider:network_type': 'vlan', 'provider:segmentation_id': 1000, 'mtu': 1509, 'router:external': True, } share_nw_update_dict = { 'network_type': 'vlan', 'segmentation_id': 1000, 'mtu': 1509, } with mock.patch.object(self.plugin.neutron_api, 'get_network', mock.Mock(return_value=neutron_nw_info)): is_external_network = self.plugin._save_neutron_network_data( self.fake_context, fake_share_network_subnet) self.plugin.neutron_api.get_network.assert_called_once_with( fake_share_network_subnet['neutron_net_id']) self.plugin.db.share_network_subnet_update.assert_called_once_with( self.fake_context, fake_share_network_subnet['id'], share_nw_update_dict) self.assertTrue(is_external_network) @mock.patch.object(db_api, 'share_network_subnet_update', mock.Mock()) def test_save_neutron_network_data_multi_segment(self): share_nw_update_dict = { 'network_type': 'vlan', 'segmentation_id': 3926, 'mtu': 1509 } config_data = { 'DEFAULT': { 'neutron_physical_net_name': 'net1', } } self.mock_object(self.plugin.neutron_api, 'get_network') self.plugin.neutron_api.get_network.return_value = fake_nw_info with test_utils.create_temp_config_with_opts(config_data): self.plugin._save_neutron_network_data(self.fake_context, fake_share_network_subnet) self.plugin.neutron_api.get_network.assert_called_once_with( fake_share_network_subnet['neutron_net_id']) self.plugin.db.share_network_subnet_update.assert_called_once_with( self.fake_context, fake_share_network_subnet['id'], share_nw_update_dict) @mock.patch.object(db_api, 'share_network_update', mock.Mock()) def test_save_neutron_network_data_multi_segment_without_ident(self): config_data = { 'DEFAULT': { 'neutron_physical_net_name': 'net100', } } self.mock_object(self.plugin.neutron_api, 'get_network') self.plugin.neutron_api.get_network.return_value = fake_nw_info with test_utils.create_temp_config_with_opts(config_data): self.assertRaises(exception.NetworkBadConfigurationException, self.plugin._save_neutron_network_data, self.fake_context, fake_share_network_subnet) @mock.patch.object(db_api, 'share_network_update', mock.Mock()) def test_save_neutron_network_data_multi_segment_without_cfg(self): self.mock_object(self.plugin.neutron_api, 'get_network') self.plugin.neutron_api.get_network.return_value = fake_nw_info self.assertRaises(exception.NetworkBadConfigurationException, self.plugin._save_neutron_network_data, self.fake_context, fake_share_network_subnet) @mock.patch.object(db_api, 'share_network_subnet_update', mock.Mock()) def test_save_neutron_subnet_data(self): neutron_subnet_info = fake_neutron_subnet subnet_value = { 'cidr': '10.0.0.0/24', 'ip_version': 4, 'gateway': '10.0.0.1', } with mock.patch.object(self.plugin.neutron_api, 'get_subnet', mock.Mock(return_value=neutron_subnet_info)): self.plugin._save_neutron_subnet_data(self.fake_context, fake_share_network_subnet) self.plugin.neutron_api.get_subnet.assert_called_once_with( fake_share_network_subnet['neutron_subnet_id']) self.plugin.db.share_network_subnet_update.assert_called_once_with( self.fake_context, fake_share_network_subnet['id'], subnet_value) def test_has_network_provider_extension_true(self): extensions = {neutron_constants.PROVIDER_NW_EXT: {}} with mock.patch.object(self.plugin.neutron_api, 'list_extensions', mock.Mock(return_value=extensions)): result = self.plugin._has_provider_network_extension() self.plugin.neutron_api.list_extensions.assert_any_call() self.assertTrue(result) def test_has_network_provider_extension_false(self): with mock.patch.object(self.plugin.neutron_api, 'list_extensions', mock.Mock(return_value={})): result = self.plugin._has_provider_network_extension() self.plugin.neutron_api.list_extensions.assert_any_call() self.assertFalse(result) @ddt.ddt class NeutronSingleNetworkPluginTest(test.TestCase): def setUp(self): super(NeutronSingleNetworkPluginTest, self).setUp() self.context = 'fake_context' def test_init_valid(self): fake_net_id = 'fake_net_id' fake_subnet_id = 'fake_subnet_id' config_data = { 'DEFAULT': { 'neutron_net_id': fake_net_id, 'neutron_subnet_id': fake_subnet_id, } } fake_net = {'subnets': ['fake1', 'fake2', fake_subnet_id]} self.mock_object( neutron_api.API, 'get_network', mock.Mock(return_value=fake_net)) with test_utils.create_temp_config_with_opts(config_data): instance = plugin.NeutronSingleNetworkPlugin() self.assertEqual(fake_net_id, instance.net) self.assertEqual(fake_subnet_id, instance.subnet) neutron_api.API.get_network.assert_called_once_with(fake_net_id) @ddt.data( {'net': None, 'subnet': None}, {'net': 'fake_net_id', 'subnet': None}, {'net': None, 'subnet': 'fake_subnet_id'}) @ddt.unpack def test_init_invalid(self, net, subnet): config_data = dict() # Simulate absence of set values if net: config_data['neutron_net_id'] = net if subnet: config_data['neutron_subnet_id'] = subnet config_data = dict(DEFAULT=config_data) with test_utils.create_temp_config_with_opts(config_data): self.assertRaises( exception.NetworkBadConfigurationException, plugin.NeutronSingleNetworkPlugin) @ddt.data({}, {'subnets': []}, {'subnets': ['different_foo_subnet']}) def test_init_subnet_does_not_belong_to_net(self, fake_net): fake_net_id = 'fake_net_id' config_data = { 'DEFAULT': { 'neutron_net_id': fake_net_id, 'neutron_subnet_id': 'fake_subnet_id', } } self.mock_object( neutron_api.API, 'get_network', mock.Mock(return_value=fake_net)) with test_utils.create_temp_config_with_opts(config_data): self.assertRaises( exception.NetworkBadConfigurationException, plugin.NeutronSingleNetworkPlugin) neutron_api.API.get_network.assert_called_once_with(fake_net_id) def _get_neutron_network_plugin_instance( self, config_data=None, label=None): if not config_data: fake_subnet_id = 'fake_subnet_id' config_data = { 'DEFAULT': { 'neutron_net_id': 'fake_net_id', 'neutron_subnet_id': fake_subnet_id, } } fake_net = {'subnets': [fake_subnet_id]} self.mock_object( neutron_api.API, 'get_network', mock.Mock(return_value=fake_net)) with test_utils.create_temp_config_with_opts(config_data): instance = plugin.NeutronSingleNetworkPlugin(label=label) return instance def test___update_share_network_net_data_same_values(self): instance = self._get_neutron_network_plugin_instance() share_network = { 'neutron_net_id': instance.net, 'neutron_subnet_id': instance.subnet, } result = instance._update_share_network_net_data( self.context, share_network) self.assertEqual(share_network, result) def test___update_share_network_net_data_different_values_empty(self): instance = self._get_neutron_network_plugin_instance() share_network_input = { 'id': 'fake_share_network_id', } share_network_result = { 'neutron_net_id': instance.net, 'neutron_subnet_id': instance.subnet, } self.mock_object( instance.db, 'share_network_subnet_update', mock.Mock(return_value='foo')) instance._update_share_network_net_data( self.context, share_network_input) instance.db.share_network_subnet_update.assert_called_once_with( self.context, share_network_input['id'], share_network_result) @ddt.data( {'n': 'fake_net_id', 's': 'bar'}, {'n': 'foo', 's': 'fake_subnet_id'}) @ddt.unpack def test___update_share_network_net_data_different_values(self, n, s): instance = self._get_neutron_network_plugin_instance() share_network = { 'id': 'fake_share_network_id', 'neutron_net_id': n, 'neutron_subnet_id': s, } self.mock_object( instance.db, 'share_network_update', mock.Mock(return_value=share_network)) self.assertRaises( exception.NetworkBadConfigurationException, instance._update_share_network_net_data, self.context, share_network) self.assertFalse(instance.db.share_network_update.called) def test_allocate_network(self): self.mock_object(plugin.NeutronNetworkPlugin, 'allocate_network') plugin.NeutronNetworkPlugin.allocate_network.return_value = [ fake_neutron_port, fake_neutron_port] instance = self._get_neutron_network_plugin_instance() share_server = 'fake_share_server' share_network = {'id': 'fake_share_network'} share_network_subnet = {'id': 'fake_share_network_subnet'} share_network_subnet_upd = {'id': 'updated_fake_share_network_subnet'} count = 2 device_owner = 'fake_device_owner' self.mock_object( instance, '_update_share_network_net_data', mock.Mock(return_value=share_network_subnet_upd)) instance.allocate_network( self.context, share_server, share_network, share_network_subnet, count=count, device_owner=device_owner) instance._update_share_network_net_data.assert_called_once_with( self.context, share_network_subnet) plugin.NeutronNetworkPlugin.allocate_network.assert_called_once_with( self.context, share_server, share_network, share_network_subnet_upd, count=count, device_owner=device_owner) def test_manage_network_allocations(self): allocations = ['192.168.10.10', 'fd12::2000'] instance = self._get_neutron_network_plugin_instance() parent = self.mock_object( plugin.NeutronNetworkPlugin, 'manage_network_allocations', mock.Mock(return_value=['fd12::2000'])) self.mock_object( instance, '_update_share_network_net_data', mock.Mock(return_value=fake_share_network_subnet)) result = instance.manage_network_allocations( self.context, allocations, fake_share_server, fake_share_network, fake_share_network_subnet) self.assertEqual(['fd12::2000'], result) instance._update_share_network_net_data.assert_called_once_with( self.context, fake_share_network_subnet) parent.assert_called_once_with( self.context, allocations, fake_share_server, fake_share_network, fake_share_network_subnet) def test_manage_network_allocations_admin(self): allocations = ['192.168.10.10', 'fd12::2000'] instance = self._get_neutron_network_plugin_instance(label='admin') parent = self.mock_object( plugin.NeutronNetworkPlugin, 'manage_network_allocations', mock.Mock(return_value=['fd12::2000'])) share_network_dict = { 'project_id': instance.neutron_api.admin_project_id, 'neutron_net_id': 'fake_net_id', 'neutron_subnet_id': 'fake_subnet_id', } result = instance.manage_network_allocations( self.context, allocations, fake_share_server, share_network_subnet=share_network_dict) self.assertEqual(['fd12::2000'], result) parent.assert_called_once_with( self.context, allocations, fake_share_server, None, share_network_dict) @ddt.ddt class NeutronBindNetworkPluginTest(test.TestCase): def setUp(self): super(NeutronBindNetworkPluginTest, self).setUp() self.fake_context = context.RequestContext(user_id='fake user', project_id='fake project', is_admin=False) self.has_binding_ext_mock = self.mock_object( neutron_api.API, '_has_port_binding_extension') self.has_binding_ext_mock.return_value = True self.bind_plugin = self._get_neutron_network_plugin_instance() self.bind_plugin.db = db_api self.sleep_mock = self.mock_object(time, 'sleep') self.fake_share_network_multi = dict(fake_share_network_multi) def _get_neutron_network_plugin_instance(self, config_data=None): if config_data is None: return plugin.NeutronBindNetworkPlugin() with test_utils.create_temp_config_with_opts(config_data): return plugin.NeutronBindNetworkPlugin() def test_wait_for_bind(self): self.mock_object(self.bind_plugin.neutron_api, 'show_port') self.bind_plugin.neutron_api.show_port.return_value = fake_neutron_port self.bind_plugin._wait_for_ports_bind([fake_neutron_port], fake_share_server) self.bind_plugin.neutron_api.show_port.assert_called_once_with( fake_neutron_port['id']) self.sleep_mock.assert_not_called() def test_wait_for_bind_error(self): fake_neut_port = copy.copy(fake_neutron_port) fake_neut_port['status'] = 'ERROR' self.mock_object(self.bind_plugin.neutron_api, 'show_port') self.bind_plugin.neutron_api.show_port.return_value = fake_neut_port self.assertRaises(exception.NetworkException, self.bind_plugin._wait_for_ports_bind, [fake_neut_port, fake_neut_port], fake_share_server) self.bind_plugin.neutron_api.show_port.assert_called_once_with( fake_neutron_port['id']) self.sleep_mock.assert_not_called() @ddt.data(('DOWN', 'ACTIVE'), ('DOWN', 'DOWN'), ('ACTIVE', 'DOWN')) def test_wait_for_bind_two_ports_no_bind(self, state): fake_neut_port1 = copy.copy(fake_neutron_port) fake_neut_port1['status'] = state[0] fake_neut_port2 = copy.copy(fake_neutron_port) fake_neut_port2['status'] = state[1] self.mock_object(self.bind_plugin.neutron_api, 'show_port') self.bind_plugin.neutron_api.show_port.side_effect = ( [fake_neut_port1, fake_neut_port2] * 20) self.assertRaises(exception.NetworkBindException, self.bind_plugin._wait_for_ports_bind, [fake_neut_port1, fake_neut_port2], fake_share_server) @mock.patch.object(db_api, 'share_network_get', mock.Mock(return_value=fake_share_network)) @mock.patch.object(db_api, 'share_server_get', mock.Mock(return_value=fake_share_server)) def test_allocate_network_one_allocation(self): self.mock_object(self.bind_plugin, '_has_provider_network_extension') self.bind_plugin._has_provider_network_extension.return_value = True save_nw_data = self.mock_object(self.bind_plugin, '_save_neutron_network_data', mock.Mock(return_value=False)) save_subnet_data = self.mock_object(self.bind_plugin, '_save_neutron_subnet_data') self.mock_object(self.bind_plugin, '_wait_for_ports_bind') neutron_host_id_opts = plugin.neutron_bind_network_plugin_opts[1] self.mock_object(neutron_host_id_opts, 'default') neutron_host_id_opts.default = 'foohost1' self.mock_object(db_api, 'network_allocation_create') db_api.network_allocation_create.return_value = fake_network_allocation self.mock_object(self.bind_plugin.neutron_api, 'get_network') self.bind_plugin.neutron_api.get_network.return_value = ( fake_neutron_network) with mock.patch.object(self.bind_plugin.neutron_api, 'create_port', mock.Mock(return_value=fake_neutron_port)): self.bind_plugin.allocate_network( self.fake_context, fake_share_server, fake_share_network, fake_share_network_subnet, allocation_info={'count': 1}) self.bind_plugin._has_provider_network_extension.assert_any_call() save_nw_data.assert_called_once_with(self.fake_context, fake_share_network_subnet, save_db=True) save_subnet_data.assert_called_once_with(self.fake_context, fake_share_network_subnet, save_db=True) expected_kwargs = { 'binding:vnic_type': 'baremetal', 'host_id': 'foohost1', 'network_id': fake_share_network_subnet['neutron_net_id'], 'subnet_id': fake_share_network_subnet['neutron_subnet_id'], 'device_owner': 'manila:share', 'device_id': fake_share_network['id'], 'name': fake_share_network['id'] + '_0', 'admin_state_up': True, } self.bind_plugin.neutron_api.create_port.assert_called_once_with( fake_share_network['project_id'], **expected_kwargs) db_api.network_allocation_create.assert_called_once_with( self.fake_context, fake_network_allocation) self.bind_plugin._wait_for_ports_bind.assert_called_once_with( [db_api.network_allocation_create( self.fake_context, fake_network_allocation)], fake_share_server) @mock.patch.object(db_api, 'network_allocation_create', mock.Mock(return_values=fake_network_allocation_multi)) @mock.patch.object(db_api, 'share_network_get', mock.Mock(return_value=fake_share_network_multi)) @mock.patch.object(db_api, 'share_server_get', mock.Mock(return_value=fake_share_server)) def test_allocate_network_multi_segment(self): network_allocation_update_data = { 'network_type': fake_nw_info['segments'][0]['provider:network_type'], 'segmentation_id': fake_nw_info['segments'][0]['provider:segmentation_id'], } network_update_data = dict(network_allocation_update_data) network_update_data['mtu'] = fake_nw_info['mtu'] fake_network_allocation_multi_updated = dict( fake_network_allocation_multi) fake_network_allocation_multi_updated.update( network_allocation_update_data) fake_share_network_multi_updated = dict(fake_share_network_multi) fake_share_network_multi_updated.update(network_update_data) fake_share_network_multi_updated.update(fake_neutron_subnet) config_data = { 'DEFAULT': { 'neutron_net_id': 'fake net id', 'neutron_subnet_id': 'fake subnet id', 'neutron_physical_net_name': 'net1', } } self.bind_plugin = self._get_neutron_network_plugin_instance( config_data) self.bind_plugin.db = db_api self.mock_object(self.bind_plugin, '_has_provider_network_extension') self.bind_plugin._has_provider_network_extension.return_value = True self.mock_object(self.bind_plugin, '_wait_for_ports_bind') neutron_host_id_opts = plugin.neutron_bind_network_plugin_opts[1] self.mock_object(neutron_host_id_opts, 'default') neutron_host_id_opts.default = 'foohost1' self.mock_object(db_api, 'network_allocation_create') db_api.network_allocation_create.return_value = ( fake_network_allocation_multi) self.mock_object(db_api, 'network_allocation_update') db_api.network_allocation_update.return_value = ( fake_network_allocation_multi_updated) self.mock_object(self.bind_plugin.neutron_api, 'get_network') self.bind_plugin.neutron_api.get_network.return_value = ( fake_neutron_network_multi) self.mock_object(self.bind_plugin.neutron_api, 'get_subnet') self.bind_plugin.neutron_api.get_subnet.return_value = ( fake_neutron_subnet) self.mock_object(db_api, 'share_network_subnet_update') with mock.patch.object(self.bind_plugin.neutron_api, 'create_port', mock.Mock(return_value=fake_neutron_port)): self.bind_plugin.allocate_network( self.fake_context, fake_share_server, fake_share_network, self.fake_share_network_multi, allocation_info={'count': 1}) self.bind_plugin._has_provider_network_extension.assert_any_call() expected_kwargs = { 'binding:vnic_type': 'baremetal', 'host_id': 'foohost1', 'network_id': fake_share_network_multi['neutron_net_id'], 'subnet_id': fake_share_network_multi['neutron_subnet_id'], 'device_owner': 'manila:share', 'device_id': fake_share_network_multi['id'], 'name': fake_share_network['id'] + '_0', 'admin_state_up': True, } self.bind_plugin.neutron_api.create_port.assert_called_once_with( fake_share_network_multi['project_id'], **expected_kwargs) db_api.network_allocation_create.assert_called_once_with( self.fake_context, fake_network_allocation_multi) db_api.share_network_subnet_update.assert_called_with( self.fake_context, fake_share_network_multi['id'], network_update_data) network_allocation_update_data['cidr'] = ( fake_neutron_subnet['cidr']) network_allocation_update_data['ip_version'] = ( fake_neutron_subnet['ip_version']) db_api.network_allocation_update.assert_called_once_with( self.fake_context, fake_neutron_port['id'], network_allocation_update_data) def test_extend_network_allocations(self): old_network_allocation = copy.deepcopy(fake_network_allocation) fake_network = copy.deepcopy(fake_neutron_network_multi) fake_ss = copy.deepcopy(fake_share_server) fake_ss["share_network_subnet"] = fake_share_network_subnet fake_host_id = "fake_host_id" fake_physical_net = "net2" fake_port_id = old_network_allocation["id"] fake_vnic_type = "baremetal" config_data = { 'DEFAULT': { "neutron_host_id": fake_host_id, "neutron_vnic_type": fake_vnic_type, 'neutron_physical_net_name': fake_physical_net, } } self.bind_plugin = self._get_neutron_network_plugin_instance( config_data ) self.mock_object( self.bind_plugin.neutron_api, "get_network", mock.Mock(return_value=fake_network), ) self.mock_object(self.bind_plugin.neutron_api, "bind_port_to_host") self.mock_object( self.bind_plugin.db, "network_allocations_get_for_share_server", mock.Mock(return_value=[old_network_allocation])) # calling the extend_network_allocations method self.bind_plugin.extend_network_allocations(self.fake_context, fake_ss) # testing the calls, we expect the port to be bound to the current host # and the new network allocation to be created self.bind_plugin.neutron_api.bind_port_to_host.assert_called_once_with( fake_port_id, fake_host_id, fake_vnic_type ) def test_delete_extended_allocations(self): old_network_allocation = copy.deepcopy(fake_network_allocation) fake_ss = copy.deepcopy(fake_share_server) fake_host_id = "fake_host_id" fake_physical_net = "net2" fake_port_id = old_network_allocation["id"] fake_vnic_type = "baremetal" config_data = { "DEFAULT": { "neutron_host_id": fake_host_id, "neutron_vnic_type": fake_vnic_type, "neutron_physical_net_name": fake_physical_net, } } self.bind_plugin = self._get_neutron_network_plugin_instance( config_data) self.mock_object(self.bind_plugin.neutron_api, "delete_port_binding") self.mock_object(self.bind_plugin.db, "network_allocation_delete") self.mock_object(self.bind_plugin.db, "network_allocations_get_for_share_server", mock.Mock(return_value=[old_network_allocation])) self.bind_plugin.delete_extended_allocations(self.fake_context, fake_ss) neutron_api = self.bind_plugin.neutron_api neutron_api.delete_port_binding.assert_called_once_with( fake_port_id, fake_host_id) @ddt.unpack def test_cutover_network_allocation(self): fake_alloc = copy.deepcopy(fake_network_allocation) fake_network = copy.deepcopy(fake_neutron_network_multi) fake_old_ss = copy.deepcopy(fake_share_server) fake_old_ss["share_network_subnet"] = fake_share_network_subnet fake_dest_ss = copy.deepcopy(fake_share_server) fake_dest_ss["host"] = "fake_host2@backend2#pool2" fake_old_host = share_utils.extract_host(fake_old_ss["host"], "host") fake_host_id = "fake_host_id" fake_physical_net = "net2" fake_port_id = fake_alloc["id"] fake_vnic_type = "baremetal" config_data = { "DEFAULT": { "neutron_host_id": fake_host_id, "neutron_vnic_type": fake_vnic_type, "neutron_physical_net_name": fake_physical_net, } } self.bind_plugin = self._get_neutron_network_plugin_instance( config_data) self.mock_object(self.bind_plugin.neutron_api, "get_network", mock.Mock(return_value=fake_network)) self.mock_object(self.bind_plugin.neutron_api, "bind_port_to_host") self.mock_object(self.bind_plugin.db, "network_allocation_create") self.mock_object(self.bind_plugin.db, "network_allocations_get_for_share_server", mock.Mock(return_value=[fake_alloc])) neutron_api = self.bind_plugin.neutron_api db_api = self.bind_plugin.db self.mock_object(neutron_api, "activate_port_binding") self.mock_object(neutron_api, "delete_port_binding") self.mock_object(db_api, "network_allocation_update") self.mock_object(db_api, "network_allocation_delete") self.mock_object(db_api, "share_network_subnet_update") self.bind_plugin.cutover_network_allocations( self.fake_context, fake_old_ss) neutron_api.activate_port_binding.assert_called_once_with( fake_port_id, fake_host_id) neutron_api.delete_port_binding.assert_called_once_with( fake_port_id, fake_old_host) @ddt.data({ 'neutron_binding_profiles': None, 'binding_profiles': {} }, { 'neutron_binding_profiles': 'fake_profile', 'binding_profiles': {} }, { 'neutron_binding_profiles': 'fake_profile', 'binding_profiles': None }, { 'neutron_binding_profiles': 'fake_profile', 'binding_profiles': { 'fake_profile': { 'neutron_switch_id': 'fake switch id', 'neutron_port_id': 'fake port id', 'neutron_switch_info': 'switch_ip: 127.0.0.1' } } }, { 'neutron_binding_profiles': None, 'binding_profiles': { 'fake_profile': { 'neutron_switch_id': 'fake switch id', 'neutron_port_id': 'fake port id', 'neutron_switch_info': 'switch_ip: 127.0.0.1' } } }, { 'neutron_binding_profiles': 'fake_profile_one,fake_profile_two', 'binding_profiles': { 'fake_profile_one': { 'neutron_switch_id': 'fake switch id 1', 'neutron_port_id': 'fake port id 1', 'neutron_switch_info': 'switch_ip: 127.0.0.1' }, 'fake_profile_two': { 'neutron_switch_id': 'fake switch id 2', 'neutron_port_id': 'fake port id 2', 'neutron_switch_info': 'switch_ip: 127.0.0.2' } } }, { 'neutron_binding_profiles': 'fake_profile_two', 'binding_profiles': { 'fake_profile_one': { 'neutron_switch_id': 'fake switch id 1', 'neutron_port_id': 'fake port id 1', 'neutron_switch_info': 'switch_ip: 127.0.0.1' }, 'fake_profile_two': { 'neutron_switch_id': 'fake switch id 2', 'neutron_port_id': 'fake port id 2', 'neutron_switch_info': 'switch_ip: 127.0.0.2' } } }) @ddt.unpack @mock.patch.object(db_api, 'share_network_get', mock.Mock(return_value=fake_share_network)) @mock.patch.object(db_api, 'share_server_get', mock.Mock(return_value=fake_share_server)) def test__get_port_create_args(self, neutron_binding_profiles, binding_profiles): fake_device_owner = 'share' fake_host_id = 'fake host' neutron_host_id_opts = plugin.neutron_bind_network_plugin_opts[1] self.mock_object(neutron_host_id_opts, 'default') neutron_host_id_opts.default = fake_host_id config_data = { 'DEFAULT': { 'neutron_net_id': fake_neutron_network['id'], 'neutron_subnet_id': fake_neutron_network['subnets'][0] } } # Simulate absence of set values if neutron_binding_profiles: config_data['DEFAULT'][ 'neutron_binding_profiles'] = neutron_binding_profiles if binding_profiles: for name, binding_profile in binding_profiles.items(): config_data[name] = binding_profile instance = self._get_neutron_network_plugin_instance(config_data) create_args = instance._get_port_create_args(fake_share_server, fake_share_network_subnet, fake_device_owner) expected_create_args = { 'binding:vnic_type': 'baremetal', 'host_id': fake_host_id, 'network_id': fake_share_network_subnet['neutron_net_id'], 'subnet_id': fake_share_network_subnet['neutron_subnet_id'], 'device_owner': 'manila:' + fake_device_owner, 'device_id': fake_share_server['id'], 'name': fake_share_server['id'] + '_0', 'admin_state_up': True, } if neutron_binding_profiles: expected_create_args['binding:profile'] = { 'local_link_information': [] } local_links = expected_create_args[ 'binding:profile']['local_link_information'] for profile in neutron_binding_profiles.split(','): if binding_profiles is None: binding_profile = {} else: binding_profile = binding_profiles.get(profile, {}) local_links.append({ 'port_id': binding_profile.get('neutron_port_id', None), 'switch_id': binding_profile.get('neutron_switch_id', None) }) switch_info = binding_profile.get('neutron_switch_info', None) if switch_info is None: local_links[-1]['switch_info'] = None else: local_links[-1]['switch_info'] = cfg.types.Dict()( switch_info) self.assertEqual(expected_create_args, create_args) @mock.patch.object(db_api, 'share_network_get', mock.Mock(return_value=fake_share_network)) @mock.patch.object(db_api, 'share_server_get', mock.Mock(return_value=fake_share_server)) def test__get_port_create_args_host_id(self): fake_device_owner = 'share' fake_host_id = 'fake host' config_data = { 'DEFAULT': { 'neutron_net_id': fake_neutron_network['id'], 'neutron_subnet_id': fake_neutron_network['subnets'][0], 'neutron_host_id': fake_host_id } } instance = self._get_neutron_network_plugin_instance(config_data) create_args = instance._get_port_create_args(fake_share_server, fake_share_network_subnet, fake_device_owner) expected_create_args = { 'binding:vnic_type': 'baremetal', 'host_id': fake_host_id, 'network_id': fake_share_network_subnet['neutron_net_id'], 'subnet_id': fake_share_network_subnet['neutron_subnet_id'], 'device_owner': 'manila:' + fake_device_owner, 'device_id': fake_share_server['id'], 'name': fake_share_server['id'] + '_0', 'admin_state_up': True, } self.assertEqual(expected_create_args, create_args) @ddt.ddt class NeutronBindSingleNetworkPluginTest(test.TestCase): def setUp(self): super(NeutronBindSingleNetworkPluginTest, self).setUp() self.context = 'fake_context' self.fake_context = context.RequestContext(user_id='fake user', project_id='fake project', is_admin=False) self.has_binding_ext_mock = self.mock_object( neutron_api.API, '_has_port_binding_extension') self.has_binding_ext_mock.return_value = True self.bind_plugin = plugin.NeutronBindNetworkPlugin() self.bind_plugin.db = db_api self.sleep_mock = self.mock_object(time, 'sleep') self.bind_plugin = self._get_neutron_network_plugin_instance() self.bind_plugin.db = db_api def _get_neutron_network_plugin_instance(self, config_data=None): if not config_data: fake_net_id = 'fake net id' fake_subnet_id = 'fake subnet id' config_data = { 'DEFAULT': { 'neutron_net_id': fake_net_id, 'neutron_subnet_id': fake_subnet_id, 'neutron_physical_net_name': 'net1', } } fake_net = {'subnets': ['fake1', 'fake2', fake_subnet_id]} self.mock_object( neutron_api.API, 'get_network', mock.Mock(return_value=fake_net)) with test_utils.create_temp_config_with_opts(config_data): return plugin.NeutronBindSingleNetworkPlugin() def test_allocate_network(self): self.mock_object(plugin.NeutronNetworkPlugin, 'allocate_network') plugin.NeutronNetworkPlugin.allocate_network.return_value = [ 'port1', 'port2'] instance = self._get_neutron_network_plugin_instance() share_server = 'fake_share_server' share_network = {} share_network_subnet = {'neutron_net_id': {}} share_network_upd = {'neutron_net_id': {'upd': True}} count = 2 device_owner = 'fake_device_owner' self.mock_object( instance, '_update_share_network_net_data', mock.Mock(return_value=share_network_upd)) self.mock_object(instance, '_wait_for_ports_bind', mock.Mock()) instance.allocate_network( self.context, share_server, share_network, share_network_subnet, count=count, device_owner=device_owner) instance._update_share_network_net_data.assert_called_once_with( self.context, share_network_subnet) plugin.NeutronNetworkPlugin.allocate_network.assert_called_once_with( self.context, share_server, share_network, share_network_upd, count=count, device_owner=device_owner) instance._wait_for_ports_bind.assert_called_once_with( ['port1', 'port2'], share_server) def test_init_valid(self): fake_net_id = 'fake_net_id' fake_subnet_id = 'fake_subnet_id' config_data = { 'DEFAULT': { 'neutron_net_id': fake_net_id, 'neutron_subnet_id': fake_subnet_id, } } fake_net = {'subnets': ['fake1', 'fake2', fake_subnet_id]} self.mock_object( neutron_api.API, 'get_network', mock.Mock(return_value=fake_net)) with test_utils.create_temp_config_with_opts(config_data): instance = plugin.NeutronSingleNetworkPlugin() self.assertEqual(fake_net_id, instance.net) self.assertEqual(fake_subnet_id, instance.subnet) neutron_api.API.get_network.assert_called_once_with(fake_net_id) @ddt.data( {'net': None, 'subnet': None}, {'net': 'fake_net_id', 'subnet': None}, {'net': None, 'subnet': 'fake_subnet_id'}) @ddt.unpack def test_init_invalid(self, net, subnet): config_data = dict() # Simulate absence of set values if net: config_data['neutron_net_id'] = net if subnet: config_data['neutron_subnet_id'] = subnet config_data = dict(DEFAULT=config_data) with test_utils.create_temp_config_with_opts(config_data): self.assertRaises( exception.NetworkBadConfigurationException, plugin.NeutronSingleNetworkPlugin) @ddt.data({}, {'subnets': []}, {'subnets': ['different_foo_subnet']}) def test_init_subnet_does_not_belong_to_net(self, fake_net): fake_net_id = 'fake_net_id' config_data = { 'DEFAULT': { 'neutron_net_id': fake_net_id, 'neutron_subnet_id': 'fake_subnet_id', } } self.mock_object( neutron_api.API, 'get_network', mock.Mock(return_value=fake_net)) with test_utils.create_temp_config_with_opts(config_data): self.assertRaises( exception.NetworkBadConfigurationException, plugin.NeutronSingleNetworkPlugin) neutron_api.API.get_network.assert_called_once_with(fake_net_id) def _get_neutron_single_network_plugin_instance(self): fake_subnet_id = 'fake_subnet_id' config_data = { 'DEFAULT': { 'neutron_net_id': 'fake_net_id', 'neutron_subnet_id': fake_subnet_id, } } fake_net = {'subnets': [fake_subnet_id]} self.mock_object( neutron_api.API, 'get_network', mock.Mock(return_value=fake_net)) with test_utils.create_temp_config_with_opts(config_data): instance = plugin.NeutronSingleNetworkPlugin() return instance def test___update_share_network_net_data_same_values(self): instance = self._get_neutron_single_network_plugin_instance() share_network = { 'neutron_net_id': instance.net, 'neutron_subnet_id': instance.subnet, } result = instance._update_share_network_net_data( self.context, share_network) self.assertEqual(share_network, result) def test___update_share_network_net_data_different_values_empty(self): instance = self._get_neutron_single_network_plugin_instance() share_network_subnet_input = { 'id': 'fake_share_network_id', } share_network_result = { 'neutron_net_id': instance.net, 'neutron_subnet_id': instance.subnet, } self.mock_object( instance.db, 'share_network_subnet_update', mock.Mock(return_value='foo')) instance._update_share_network_net_data( self.context, share_network_subnet_input) instance.db.share_network_subnet_update.assert_called_once_with( self.context, share_network_subnet_input['id'], share_network_result) @ddt.data( {'n': 'fake_net_id', 's': 'bar'}, {'n': 'foo', 's': 'fake_subnet_id'}) @ddt.unpack def test___update_share_network_net_data_different_values(self, n, s): instance = self._get_neutron_single_network_plugin_instance() share_network = { 'id': 'fake_share_network_id', 'neutron_net_id': n, 'neutron_subnet_id': s, } self.mock_object( instance.db, 'share_network_update', mock.Mock(return_value=share_network)) self.assertRaises( exception.NetworkBadConfigurationException, instance._update_share_network_net_data, self.context, share_network) self.assertFalse(instance.db.share_network_update.called) def test_wait_for_bind(self): self.mock_object(self.bind_plugin.neutron_api, 'show_port') self.bind_plugin.neutron_api.show_port.return_value = fake_neutron_port self.bind_plugin._wait_for_ports_bind([fake_neutron_port], fake_share_server) self.bind_plugin.neutron_api.show_port.assert_called_once_with( fake_neutron_port['id']) self.sleep_mock.assert_not_called() def test_wait_for_bind_error(self): fake_neut_port = copy.copy(fake_neutron_port) fake_neut_port['status'] = 'ERROR' self.mock_object(self.bind_plugin.neutron_api, 'show_port') self.bind_plugin.neutron_api.show_port.return_value = fake_neut_port self.assertRaises(exception.NetworkException, self.bind_plugin._wait_for_ports_bind, [fake_neut_port, fake_neut_port], fake_share_server) self.bind_plugin.neutron_api.show_port.assert_called_once_with( fake_neutron_port['id']) self.sleep_mock.assert_not_called() @ddt.data(('DOWN', 'ACTIVE'), ('DOWN', 'DOWN'), ('ACTIVE', 'DOWN')) def test_wait_for_bind_two_ports_no_bind(self, state): fake_neut_port1 = copy.copy(fake_neutron_port) fake_neut_port1['status'] = state[0] fake_neut_port2 = copy.copy(fake_neutron_port) fake_neut_port2['status'] = state[1] self.mock_object(self.bind_plugin.neutron_api, 'show_port') self.bind_plugin.neutron_api.show_port.side_effect = ( [fake_neut_port1, fake_neut_port2] * 20) self.assertRaises(exception.NetworkBindException, self.bind_plugin._wait_for_ports_bind, [fake_neut_port1, fake_neut_port2], fake_share_server) @mock.patch.object(db_api, 'network_allocation_create', mock.Mock(return_values=fake_network_allocation)) @mock.patch.object(db_api, 'share_network_get', mock.Mock(return_value=fake_share_network)) @mock.patch.object(db_api, 'share_server_get', mock.Mock(return_value=fake_share_server)) def test_allocate_network_one_allocation(self): self.mock_object(self.bind_plugin, '_has_provider_network_extension') self.bind_plugin._has_provider_network_extension.return_value = True save_nw_data = self.mock_object(self.bind_plugin, '_save_neutron_network_data', mock.Mock(return_value=False)) save_subnet_data = self.mock_object(self.bind_plugin, '_save_neutron_subnet_data') self.mock_object(self.bind_plugin, '_wait_for_ports_bind') neutron_host_id_opts = plugin.neutron_bind_network_plugin_opts[1] self.mock_object(neutron_host_id_opts, 'default') neutron_host_id_opts.default = 'foohost1' self.mock_object(db_api, 'network_allocation_create') with mock.patch.object(self.bind_plugin.neutron_api, 'create_port', mock.Mock(return_value=fake_neutron_port)): self.bind_plugin.allocate_network( self.fake_context, fake_share_server, fake_share_network, fake_share_network_subnet, allocation_info={'count': 1}) self.bind_plugin._has_provider_network_extension.assert_any_call() save_nw_data.assert_called_once_with(self.fake_context, fake_share_network_subnet, save_db=True) save_subnet_data.assert_called_once_with(self.fake_context, fake_share_network_subnet, save_db=True) expected_kwargs = { 'binding:vnic_type': 'baremetal', 'host_id': 'foohost1', 'network_id': fake_share_network_subnet['neutron_net_id'], 'subnet_id': fake_share_network_subnet['neutron_subnet_id'], 'device_owner': 'manila:share', 'device_id': fake_share_network['id'], 'name': fake_share_network['id'] + '_0', 'admin_state_up': True, } self.bind_plugin.neutron_api.create_port.assert_called_once_with( fake_share_network['project_id'], **expected_kwargs) db_api.network_allocation_create.assert_called_once_with( self.fake_context, fake_network_allocation) self.bind_plugin._wait_for_ports_bind.assert_called_once_with( [db_api.network_allocation_create( self.fake_context, fake_network_allocation)], fake_share_server) @ddt.data({ 'neutron_binding_profiles': None, 'binding_profiles': {} }, { 'neutron_binding_profiles': 'fake_profile', 'binding_profiles': {} }, { 'neutron_binding_profiles': 'fake_profile', 'binding_profiles': None }, { 'neutron_binding_profiles': 'fake_profile', 'binding_profiles': { 'fake_profile': { 'neutron_switch_id': 'fake switch id', 'neutron_port_id': 'fake port id', 'neutron_switch_info': 'switch_ip: 127.0.0.1' } } }, { 'neutron_binding_profiles': None, 'binding_profiles': { 'fake_profile': { 'neutron_switch_id': 'fake switch id', 'neutron_port_id': 'fake port id', 'neutron_switch_info': 'switch_ip: 127.0.0.1' } } }, { 'neutron_binding_profiles': 'fake_profile_one,fake_profile_two', 'binding_profiles': { 'fake_profile_one': { 'neutron_switch_id': 'fake switch id 1', 'neutron_port_id': 'fake port id 1', 'neutron_switch_info': 'switch_ip: 127.0.0.1' }, 'fake_profile_two': { 'neutron_switch_id': 'fake switch id 2', 'neutron_port_id': 'fake port id 2', 'neutron_switch_info': 'switch_ip: 127.0.0.2' } } }, { 'neutron_binding_profiles': 'fake_profile_two', 'binding_profiles': { 'fake_profile_one': { 'neutron_switch_id': 'fake switch id 1', 'neutron_port_id': 'fake port id 1', 'neutron_switch_info': 'switch_ip: 127.0.0.1' }, 'fake_profile_two': { 'neutron_switch_id': 'fake switch id 2', 'neutron_port_id': 'fake port id 2', 'neutron_switch_info': 'switch_ip: 127.0.0.2' } } }) @ddt.unpack @mock.patch.object(db_api, 'share_network_get', mock.Mock(return_value=fake_share_network)) @mock.patch.object(db_api, 'share_server_get', mock.Mock(return_value=fake_share_server)) def test__get_port_create_args(self, neutron_binding_profiles, binding_profiles): fake_device_owner = 'share' fake_host_id = 'fake host' neutron_host_id_opts = plugin.neutron_bind_network_plugin_opts[1] self.mock_object(neutron_host_id_opts, 'default') neutron_host_id_opts.default = fake_host_id config_data = { 'DEFAULT': { 'neutron_net_id': fake_neutron_network['id'], 'neutron_subnet_id': fake_neutron_network['subnets'][0] } } # Simulate absence of set values if neutron_binding_profiles: config_data['DEFAULT'][ 'neutron_binding_profiles'] = neutron_binding_profiles if binding_profiles: for name, binding_profile in binding_profiles.items(): config_data[name] = binding_profile instance = self._get_neutron_network_plugin_instance(config_data) create_args = instance._get_port_create_args(fake_share_server, fake_share_network_subnet, fake_device_owner) expected_create_args = { 'binding:vnic_type': 'baremetal', 'host_id': fake_host_id, 'network_id': fake_share_network_subnet['neutron_net_id'], 'subnet_id': fake_share_network_subnet['neutron_subnet_id'], 'device_owner': 'manila:' + fake_device_owner, 'device_id': fake_share_server['id'], 'name': fake_share_server['id'] + '_0', 'admin_state_up': True, } if neutron_binding_profiles: expected_create_args['binding:profile'] = { 'local_link_information': [] } local_links = expected_create_args[ 'binding:profile']['local_link_information'] for profile in neutron_binding_profiles.split(','): if binding_profiles is None: binding_profile = {} else: binding_profile = binding_profiles.get(profile, {}) local_links.append({ 'port_id': binding_profile.get('neutron_port_id', None), 'switch_id': binding_profile.get('neutron_switch_id', None) }) switch_info = binding_profile.get('neutron_switch_info', None) if switch_info is None: local_links[-1]['switch_info'] = None else: local_links[-1]['switch_info'] = cfg.types.Dict()( switch_info) self.assertEqual(expected_create_args, create_args) @mock.patch.object(db_api, 'share_network_get', mock.Mock(return_value=fake_share_network)) @mock.patch.object(db_api, 'share_server_get', mock.Mock(return_value=fake_share_server)) def test__get_port_create_args_host_id(self): fake_device_owner = 'share' fake_host_id = 'fake host' config_data = { 'DEFAULT': { 'neutron_net_id': fake_neutron_network['id'], 'neutron_subnet_id': fake_neutron_network['subnets'][0], 'neutron_host_id': fake_host_id } } instance = self._get_neutron_network_plugin_instance(config_data) create_args = instance._get_port_create_args(fake_share_server, fake_share_network_subnet, fake_device_owner) expected_create_args = { 'binding:vnic_type': 'baremetal', 'host_id': fake_host_id, 'network_id': fake_share_network_subnet['neutron_net_id'], 'subnet_id': fake_share_network_subnet['neutron_subnet_id'], 'device_owner': 'manila:' + fake_device_owner, 'device_id': fake_share_server['id'], 'name': fake_share_server['id'] + '_0', 'admin_state_up': True, } self.assertEqual(expected_create_args, create_args) class NeutronBindNetworkPluginWithNormalTypeTest(test.TestCase): def setUp(self): super(NeutronBindNetworkPluginWithNormalTypeTest, self).setUp() config_data = { 'DEFAULT': { 'neutron_vnic_type': 'normal', } } self.plugin = plugin.NeutronNetworkPlugin() self.plugin.db = db_api self.fake_context = context.RequestContext(user_id='fake user', project_id='fake project', is_admin=False) with test_utils.create_temp_config_with_opts(config_data): self.bind_plugin = plugin.NeutronBindNetworkPlugin() self.bind_plugin.db = db_api @mock.patch.object(db_api, 'network_allocation_create', mock.Mock(return_values=fake_network_allocation)) @mock.patch.object(db_api, 'share_network_get', mock.Mock(return_value=fake_share_network)) @mock.patch.object(db_api, 'share_server_get', mock.Mock(return_value=fake_share_server)) def test_allocate_network_one_allocation(self): self.mock_object(self.bind_plugin, '_has_provider_network_extension') self.bind_plugin._has_provider_network_extension.return_value = True save_nw_data = self.mock_object(self.bind_plugin, '_save_neutron_network_data', mock.Mock(return_value=False)) save_subnet_data = self.mock_object(self.bind_plugin, '_save_neutron_subnet_data') self.mock_object(self.bind_plugin, '_wait_for_ports_bind') neutron_host_id_opts = plugin.neutron_bind_network_plugin_opts[1] self.mock_object(neutron_host_id_opts, 'default') neutron_host_id_opts.default = 'foohost1' self.mock_object(db_api, 'network_allocation_create') multi_seg = self.mock_object( self.bind_plugin, '_is_neutron_multi_segment') multi_seg.return_value = False with mock.patch.object(self.bind_plugin.neutron_api, 'create_port', mock.Mock(return_value=fake_neutron_port)): self.bind_plugin.allocate_network( self.fake_context, fake_share_server, fake_share_network, fake_share_network_subnet, allocation_info={'count': 1}) self.bind_plugin._has_provider_network_extension.assert_any_call() save_nw_data.assert_called_once_with(self.fake_context, fake_share_network_subnet, save_db=True) save_subnet_data.assert_called_once_with(self.fake_context, fake_share_network_subnet, save_db=True) expected_kwargs = { 'binding:vnic_type': 'normal', 'host_id': 'foohost1', 'network_id': fake_share_network_subnet['neutron_net_id'], 'subnet_id': fake_share_network_subnet['neutron_subnet_id'], 'device_owner': 'manila:share', 'device_id': fake_share_server['id'], 'name': fake_share_server['id'] + '_0', 'admin_state_up': True, } self.bind_plugin.neutron_api.create_port.assert_called_once_with( fake_share_network['project_id'], **expected_kwargs) db_api.network_allocation_create.assert_called_once_with( self.fake_context, fake_network_allocation) self.bind_plugin._wait_for_ports_bind.assert_not_called() def test_update_network_allocation(self): self.mock_object(self.bind_plugin, '_wait_for_ports_bind') self.mock_object(db_api, 'network_allocations_get_for_share_server') db_api.network_allocations_get_for_share_server.return_value = [ fake_neutron_port] self.bind_plugin.update_network_allocation(self.fake_context, fake_share_server) self.bind_plugin._wait_for_ports_bind.assert_called_once_with( [fake_neutron_port], fake_share_server) @ddt.ddt class NeutronBindSingleNetworkPluginWithNormalTypeTest(test.TestCase): def setUp(self): super(NeutronBindSingleNetworkPluginWithNormalTypeTest, self).setUp() fake_net_id = 'fake net id' fake_subnet_id = 'fake subnet id' config_data = { 'DEFAULT': { 'neutron_net_id': fake_net_id, 'neutron_subnet_id': fake_subnet_id, 'neutron_vnic_type': 'normal', } } fake_net = {'subnets': ['fake1', 'fake2', fake_subnet_id]} self.mock_object( neutron_api.API, 'get_network', mock.Mock(return_value=fake_net)) self.plugin = plugin.NeutronNetworkPlugin() self.plugin.db = db_api self.fake_context = context.RequestContext(user_id='fake user', project_id='fake project', is_admin=False) with test_utils.create_temp_config_with_opts(config_data): self.bind_plugin = plugin.NeutronBindSingleNetworkPlugin() self.bind_plugin.db = db_api @mock.patch.object(db_api, 'network_allocation_create', mock.Mock(return_values=fake_network_allocation)) @mock.patch.object(db_api, 'share_network_get', mock.Mock(return_value=fake_share_network)) @mock.patch.object(db_api, 'share_server_get', mock.Mock(return_value=fake_share_server)) def test_allocate_network_one_allocation(self): self.mock_object(self.bind_plugin, '_has_provider_network_extension') self.bind_plugin._has_provider_network_extension.return_value = True save_nw_data = self.mock_object(self.bind_plugin, '_save_neutron_network_data', mock.Mock(return_value=False)) save_subnet_data = self.mock_object(self.bind_plugin, '_save_neutron_subnet_data') self.mock_object(self.bind_plugin, '_wait_for_ports_bind') neutron_host_id_opts = plugin.neutron_bind_network_plugin_opts[1] self.mock_object(neutron_host_id_opts, 'default') neutron_host_id_opts.default = 'foohost1' self.mock_object(db_api, 'network_allocation_create') with mock.patch.object(self.bind_plugin.neutron_api, 'create_port', mock.Mock(return_value=fake_neutron_port)): self.bind_plugin.allocate_network( self.fake_context, fake_share_server, fake_share_network, fake_share_network_subnet, allocation_info={'count': 1}) self.bind_plugin._has_provider_network_extension.assert_any_call() save_nw_data.assert_called_once_with(self.fake_context, fake_share_network_subnet, save_db=True) save_subnet_data.assert_called_once_with(self.fake_context, fake_share_network_subnet, save_db=True) expected_kwargs = { 'binding:vnic_type': 'normal', 'host_id': 'foohost1', 'network_id': fake_share_network_subnet['neutron_net_id'], 'subnet_id': fake_share_network_subnet['neutron_subnet_id'], 'device_owner': 'manila:share', 'device_id': fake_share_network['id'], 'name': fake_share_network['id'] + '_0', 'admin_state_up': True, } self.bind_plugin.neutron_api.create_port.assert_called_once_with( fake_share_network['project_id'], **expected_kwargs) db_api.network_allocation_create.assert_called_once_with( self.fake_context, fake_network_allocation) self.bind_plugin._wait_for_ports_bind.assert_not_called() def test_update_network_allocation(self): self.mock_object(self.bind_plugin, '_wait_for_ports_bind') self.mock_object(db_api, 'network_allocations_get_for_share_server') db_api.network_allocations_get_for_share_server.return_value = [ fake_neutron_port] self.bind_plugin.update_network_allocation(self.fake_context, fake_share_server) self.bind_plugin._wait_for_ports_bind.assert_called_once_with( [fake_neutron_port], fake_share_server) @ddt.data({'fix_ips': [{'ip_address': 'test_ip'}, {'ip_address': '10.78.223.129'}], 'ip_version': 4}, {'fix_ips': [{'ip_address': 'test_ip'}, {'ip_address': 'ad80::abaa:0:c2:2'}], 'ip_version': 6}, {'fix_ips': [{'ip_address': '10.78.223.129'}, {'ip_address': 'ad80::abaa:0:c2:2'}], 'ip_version': 6}, ) @ddt.unpack def test__get_matched_ip_address(self, fix_ips, ip_version): result = self.bind_plugin._get_matched_ip_address(fix_ips, ip_version) self.assertEqual(fix_ips[1]['ip_address'], result) @ddt.data({'fix_ips': [{'ip_address': 'test_ip_1'}, {'ip_address': 'test_ip_2'}], 'ip_version': (4, 6)}, {'fix_ips': [{'ip_address': 'ad80::abaa:0:c2:1'}, {'ip_address': 'ad80::abaa:0:c2:2'}], 'ip_version': (4, )}, {'fix_ips': [{'ip_address': '192.0.0.2'}, {'ip_address': '192.0.0.3'}], 'ip_version': (6, )}, {'fix_ips': [{'ip_address': '192.0.0.2/12'}, {'ip_address': '192.0.0.330'}, {'ip_address': 'ad80::001::ad80'}, {'ip_address': 'ad80::abaa:0:c2:2/64'}], 'ip_version': (4, 6)}, ) @ddt.unpack def test__get_matched_ip_address_illegal(self, fix_ips, ip_version): for version in ip_version: self.assertRaises(exception.NetworkBadConfigurationException, self.bind_plugin._get_matched_ip_address, fix_ips, version) def _setup_include_network_info(self): data = { 'DEFAULT': { 'neutron_net_id': 'fake net id', 'neutron_subnet_id': 'fake subnet id', 'neutron_physical_net_name': 'net1', } } with test_utils.create_temp_config_with_opts(data): instance = plugin.NeutronNetworkPlugin() return instance def test_include_network_info(self): instance = self._setup_include_network_info() self.mock_object(instance, '_store_and_get_neutron_net_info') instance.include_network_info(fake_share_network) instance._store_and_get_neutron_net_info.assert_called_once_with( None, fake_share_network, save_db=False) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/network/test_standalone_network_plugin.py0000664000175000017500000005572000000000000026053 0ustar00zuulzuul00000000000000# Copyright 2015 Mirantis, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import ddt import netaddr from oslo_config import cfg from manila.common import constants from manila import context from manila import exception from manila.network import standalone_network_plugin as plugin from manila import test from manila.tests import utils as test_utils CONF = cfg.CONF fake_context = context.RequestContext( user_id='fake user', project_id='fake project', is_admin=False) fake_share_server = dict(id='fake_share_server_id') fake_share_network = dict(id='fake_share_network_id') fake_share_network_subnet = dict(id='fake_share_network_subnet_id') @ddt.ddt class StandaloneNetworkPluginTest(test.TestCase): @ddt.data('custom_config_group_name', 'DEFAULT') def test_init_only_with_required_data_v4(self, group_name): data = { group_name: { 'standalone_network_plugin_gateway': '10.0.0.1', 'standalone_network_plugin_mask': '24', }, } with test_utils.create_temp_config_with_opts(data): instance = plugin.StandaloneNetworkPlugin( config_group_name=group_name) self.assertEqual('10.0.0.1', instance.gateway) self.assertEqual('24', instance.mask) self.assertIsNone(instance.segmentation_id) self.assertIsNone(instance.allowed_ip_ranges) self.assertEqual(4, instance.ip_version) self.assertEqual(netaddr.IPNetwork('10.0.0.1/24'), instance.net) self.assertEqual(['10.0.0.1/24'], instance.allowed_cidrs) self.assertEqual( ('10.0.0.0', '10.0.0.1', '10.0.0.255'), instance.reserved_addresses) @ddt.data('custom_config_group_name', 'DEFAULT') def test_init_with_all_data_v4(self, group_name): data = { group_name: { 'standalone_network_plugin_gateway': '10.0.0.1', 'standalone_network_plugin_mask': '255.255.0.0', 'standalone_network_plugin_network_type': 'vlan', 'standalone_network_plugin_segmentation_id': 1001, 'standalone_network_plugin_allowed_ip_ranges': ( '10.0.0.3-10.0.0.7,10.0.0.69-10.0.0.157,10.0.0.213'), 'network_plugin_ipv4_enabled': True, }, } allowed_cidrs = [ '10.0.0.3/32', '10.0.0.4/30', '10.0.0.69/32', '10.0.0.70/31', '10.0.0.72/29', '10.0.0.80/28', '10.0.0.96/27', '10.0.0.128/28', '10.0.0.144/29', '10.0.0.152/30', '10.0.0.156/31', '10.0.0.213/32', ] with test_utils.create_temp_config_with_opts(data): instance = plugin.StandaloneNetworkPlugin( config_group_name=group_name) self.assertEqual(4, instance.ip_version) self.assertEqual('10.0.0.1', instance.gateway) self.assertEqual('255.255.0.0', instance.mask) self.assertEqual('vlan', instance.network_type) self.assertEqual(1001, instance.segmentation_id) self.assertEqual(allowed_cidrs, instance.allowed_cidrs) self.assertEqual( ['10.0.0.3-10.0.0.7', '10.0.0.69-10.0.0.157', '10.0.0.213'], instance.allowed_ip_ranges) self.assertEqual( netaddr.IPNetwork('10.0.0.1/255.255.0.0'), instance.net) self.assertEqual( ('10.0.0.0', '10.0.0.1', '10.0.255.255'), instance.reserved_addresses) @ddt.data('custom_config_group_name', 'DEFAULT') def test_init_only_with_required_data_v6(self, group_name): data = { group_name: { 'standalone_network_plugin_gateway': ( '2001:cdba::3257:9652'), 'standalone_network_plugin_mask': '48', 'network_plugin_ipv6_enabled': True, }, } with test_utils.create_temp_config_with_opts(data): instance = plugin.StandaloneNetworkPlugin( config_group_name=group_name) self.assertEqual( '2001:cdba::3257:9652', instance.gateway) self.assertEqual('48', instance.mask) self.assertIsNone(instance.segmentation_id) self.assertIsNone(instance.allowed_ip_ranges) self.assertEqual(6, instance.ip_version) self.assertEqual( netaddr.IPNetwork('2001:cdba::3257:9652/48'), instance.net) self.assertEqual( ['2001:cdba::3257:9652/48'], instance.allowed_cidrs) self.assertEqual( ('2001:cdba::', '2001:cdba::3257:9652', netaddr.IPAddress('2001:cdba:0:ffff:ffff:ffff:ffff:ffff').format() ), instance.reserved_addresses) @ddt.data('custom_config_group_name', 'DEFAULT') def test_init_with_all_data_v6(self, group_name): data = { group_name: { 'standalone_network_plugin_gateway': '2001:db8::0001', 'standalone_network_plugin_mask': '88', 'standalone_network_plugin_network_type': 'vlan', 'standalone_network_plugin_segmentation_id': 3999, 'standalone_network_plugin_allowed_ip_ranges': ( '2001:db8::-2001:db8:0000:0000:0000:007f:ffff:ffff'), 'network_plugin_ipv6_enabled': True, }, } with test_utils.create_temp_config_with_opts(data): instance = plugin.StandaloneNetworkPlugin( config_group_name=group_name) self.assertEqual(6, instance.ip_version) self.assertEqual('2001:db8::0001', instance.gateway) self.assertEqual('88', instance.mask) self.assertEqual('vlan', instance.network_type) self.assertEqual(3999, instance.segmentation_id) self.assertEqual(['2001:db8::/89'], instance.allowed_cidrs) self.assertEqual( ['2001:db8::-2001:db8:0000:0000:0000:007f:ffff:ffff'], instance.allowed_ip_ranges) self.assertEqual( netaddr.IPNetwork('2001:db8::0001/88'), instance.net) self.assertEqual( ('2001:db8::', '2001:db8::0001', '2001:db8::ff:ffff:ffff'), instance.reserved_addresses) @ddt.data('flat', 'vlan', 'vxlan', 'gre') def test_init_with_valid_network_types_v4(self, network_type): data = { 'DEFAULT': { 'standalone_network_plugin_gateway': '10.0.0.1', 'standalone_network_plugin_mask': '255.255.0.0', 'standalone_network_plugin_network_type': network_type, 'standalone_network_plugin_segmentation_id': 1001, 'network_plugin_ipv4_enabled': True, }, } with test_utils.create_temp_config_with_opts(data): instance = plugin.StandaloneNetworkPlugin( config_group_name='DEFAULT') self.assertEqual(instance.network_type, network_type) @ddt.data( 'foo', 'foovlan', 'vlanfoo', 'foovlanbar', 'None', 'Vlan', 'vlaN') def test_init_with_fake_network_types_v4(self, fake_network_type): data = { 'DEFAULT': { 'standalone_network_plugin_gateway': '10.0.0.1', 'standalone_network_plugin_mask': '255.255.0.0', 'standalone_network_plugin_network_type': fake_network_type, 'standalone_network_plugin_segmentation_id': 1001, 'network_plugin_ipv4_enabled': True, }, } with test_utils.create_temp_config_with_opts(data): self.assertRaises( cfg.ConfigFileValueError, plugin.StandaloneNetworkPlugin, config_group_name='DEFAULT', ) @ddt.data('custom_config_group_name', 'DEFAULT') def test_invalid_init_without_any_config_definitions(self, group_name): self.assertRaises( exception.NetworkBadConfigurationException, plugin.StandaloneNetworkPlugin, config_group_name=group_name) @ddt.data( {}, {'gateway': '20.0.0.1'}, {'mask': '8'}, {'gateway': '20.0.0.1', 'mask': '33'}, {'gateway': '20.0.0.256', 'mask': '16'}) def test_invalid_init_required_data_improper(self, data): group_name = 'custom_group_name' if 'gateway' in data: data['standalone_network_plugin_gateway'] = data.pop('gateway') if 'mask' in data: data['standalone_network_plugin_mask'] = data.pop('mask') data = {group_name: data} with test_utils.create_temp_config_with_opts(data): self.assertRaises( exception.NetworkBadConfigurationException, plugin.StandaloneNetworkPlugin, config_group_name=group_name) @ddt.data( 'fake', '11.0.0.0-11.0.0.5-11.0.0.11', '11.0.0.0-11.0.0.5', '10.0.10.0-10.0.10.5', '10.0.0.0-10.0.0.5,fake', '10.0.10.0-10.0.10.5,10.0.0.0-10.0.0.5', '10.0.10.0-10.0.10.5,10.0.0.10-10.0.10.5', '10.0.0.0-10.0.0.5,10.0.10.0-10.0.10.5') def test_invalid_init_incorrect_allowed_ip_ranges_v4(self, ip_range): group_name = 'DEFAULT' data = { group_name: { 'standalone_network_plugin_gateway': '10.0.0.1', 'standalone_network_plugin_mask': '255.255.255.0', 'standalone_network_plugin_allowed_ip_ranges': ip_range, }, } with test_utils.create_temp_config_with_opts(data): self.assertRaises( exception.NetworkBadConfigurationException, plugin.StandaloneNetworkPlugin, config_group_name=group_name) @ddt.data( {'gateway': '2001:db8::0001', 'vers': 4}, {'gateway': '10.0.0.1', 'vers': 6}) @ddt.unpack def test_invalid_init_mismatch_of_versions(self, gateway, vers): group_name = 'DEFAULT' data = { group_name: { 'standalone_network_plugin_gateway': gateway, 'standalone_network_plugin_mask': '25', }, } if vers == 4: data[group_name]['network_plugin_ipv4_enabled'] = True if vers == 6: data[group_name]['network_plugin_ipv4_enabled'] = False data[group_name]['network_plugin_ipv6_enabled'] = True with test_utils.create_temp_config_with_opts(data): self.assertRaises( exception.NetworkBadConfigurationException, plugin.StandaloneNetworkPlugin, config_group_name=group_name) def test_deallocate_network(self): share_server_id = 'fake_share_server_id' data = { 'DEFAULT': { 'standalone_network_plugin_gateway': '10.0.0.1', 'standalone_network_plugin_mask': '24', }, } fake_allocations = [{'id': 'fake1'}, {'id': 'fake2'}] with test_utils.create_temp_config_with_opts(data): instance = plugin.StandaloneNetworkPlugin() self.mock_object( instance.db, 'network_allocations_get_for_share_server', mock.Mock(return_value=fake_allocations)) self.mock_object(instance.db, 'network_allocation_delete') instance.deallocate_network(fake_context, share_server_id) (instance.db.network_allocations_get_for_share_server. assert_called_once_with(fake_context, share_server_id)) (instance.db.network_allocation_delete. assert_has_calls([ mock.call(fake_context, 'fake1'), mock.call(fake_context, 'fake2'), ])) def test_allocate_network_zero_addresses_ipv4(self): data = { 'DEFAULT': { 'standalone_network_plugin_gateway': '10.0.0.1', 'standalone_network_plugin_mask': '24', }, } with test_utils.create_temp_config_with_opts(data): instance = plugin.StandaloneNetworkPlugin() self.mock_object(instance.db, 'share_network_subnet_update') allocations = instance.allocate_network( fake_context, fake_share_server, fake_share_network, fake_share_network_subnet, count=0) self.assertEqual([], allocations) instance.db.share_network_subnet_update.assert_called_once_with( fake_context, fake_share_network_subnet['id'], dict(network_type=None, segmentation_id=None, cidr=str(instance.net.cidr), gateway=str(instance.gateway), ip_version=4, mtu=1500)) def test_allocate_network_zero_addresses_ipv6(self): data = { 'DEFAULT': { 'standalone_network_plugin_gateway': '2001:db8::0001', 'standalone_network_plugin_mask': '64', 'network_plugin_ipv6_enabled': True, }, } with test_utils.create_temp_config_with_opts(data): instance = plugin.StandaloneNetworkPlugin() self.mock_object(instance.db, 'share_network_subnet_update') allocations = instance.allocate_network( fake_context, fake_share_server, fake_share_network, fake_share_network_subnet, count=0) self.assertEqual([], allocations) instance.db.share_network_subnet_update.assert_called_once_with( fake_context, fake_share_network_subnet['id'], dict(network_type=None, segmentation_id=None, cidr=str(instance.net.cidr), gateway=str(instance.gateway), ip_version=6, mtu=1500)) @ddt.data('admin', 'user') def test_allocate_network_one_ip_address_ipv4_no_usages_exist(self, label): data = { 'DEFAULT': { 'standalone_network_plugin_network_type': 'vlan', 'standalone_network_plugin_segmentation_id': 1003, 'standalone_network_plugin_gateway': '10.0.0.1', 'standalone_network_plugin_mask': '24', }, } with test_utils.create_temp_config_with_opts(data): instance = plugin.StandaloneNetworkPlugin(label=label) if label != 'admin': self.mock_object(instance.db, 'share_network_subnet_update') self.mock_object(instance.db, 'network_allocation_create') self.mock_object( instance.db, 'network_allocations_get_by_ip_address', mock.Mock(return_value=[])) allocations = instance.allocate_network( fake_context, fake_share_server, fake_share_network, fake_share_network_subnet) self.assertEqual(1, len(allocations)) na_data = { 'network_type': 'vlan', 'segmentation_id': 1003, 'cidr': '10.0.0.0/24', 'gateway': '10.0.0.1', 'ip_version': 4, 'mtu': 1500, } if label != 'admin': instance.db.share_network_subnet_update.assert_called_once_with( fake_context, fake_share_network_subnet['id'], na_data) na_data['share_network_subnet_id'] = \ fake_share_network_subnet['id'] instance.db.network_allocations_get_by_ip_address.assert_has_calls( [mock.call(fake_context, '10.0.0.2')]) instance.db.network_allocation_create.assert_called_once_with( fake_context, dict(share_server_id=fake_share_server['id'], ip_address='10.0.0.2', status=constants.STATUS_ACTIVE, label=label, **na_data)) def test_allocate_network_two_ip_addresses_ipv4_two_usages_exist(self): ctxt = type('FakeCtxt', (object,), {'fake': ['10.0.0.2', '10.0.0.4']}) def fake_get_allocations_by_ip_address(context, ip_address): if ip_address not in context.fake: context.fake.append(ip_address) return [] else: return context.fake data = { 'DEFAULT': { 'standalone_network_plugin_gateway': '10.0.0.1', 'standalone_network_plugin_mask': '24', }, } with test_utils.create_temp_config_with_opts(data): instance = plugin.StandaloneNetworkPlugin() self.mock_object(instance.db, 'share_network_subnet_update') self.mock_object(instance.db, 'network_allocation_create') self.mock_object( instance.db, 'network_allocations_get_by_ip_address', mock.Mock(side_effect=fake_get_allocations_by_ip_address)) allocations = instance.allocate_network( ctxt, fake_share_server, fake_share_network, fake_share_network_subnet, count=2) self.assertEqual(2, len(allocations)) na_data = { 'network_type': None, 'segmentation_id': None, 'cidr': str(instance.net.cidr), 'gateway': str(instance.gateway), 'ip_version': 4, 'mtu': 1500, } instance.db.share_network_subnet_update.assert_called_once_with( ctxt, fake_share_network_subnet['id'], dict(**na_data)) instance.db.network_allocations_get_by_ip_address.assert_has_calls( [mock.call(ctxt, '10.0.0.2'), mock.call(ctxt, '10.0.0.3'), mock.call(ctxt, '10.0.0.4'), mock.call(ctxt, '10.0.0.5')]) na_data['share_network_subnet_id'] = fake_share_network_subnet['id'] instance.db.network_allocation_create.assert_has_calls([ mock.call( ctxt, dict(share_server_id=fake_share_server['id'], ip_address='10.0.0.3', status=constants.STATUS_ACTIVE, label='user', **na_data)), mock.call( ctxt, dict(share_server_id=fake_share_server['id'], ip_address='10.0.0.5', status=constants.STATUS_ACTIVE, label='user', **na_data)), ]) def test_allocate_network_no_available_ipv4_addresses(self): data = { 'DEFAULT': { 'standalone_network_plugin_gateway': '10.0.0.1', 'standalone_network_plugin_mask': '30', }, } with test_utils.create_temp_config_with_opts(data): instance = plugin.StandaloneNetworkPlugin() self.mock_object(instance.db, 'share_network_subnet_update') self.mock_object(instance.db, 'network_allocation_create') self.mock_object( instance.db, 'network_allocations_get_by_ip_address', mock.Mock(return_value=['not empty list'])) self.assertRaises( exception.NetworkBadConfigurationException, instance.allocate_network, fake_context, fake_share_server, fake_share_network, fake_share_network_subnet) instance.db.share_network_subnet_update.assert_called_once_with( fake_context, fake_share_network_subnet['id'], dict(network_type=None, segmentation_id=None, cidr=str(instance.net.cidr), gateway=str(instance.gateway), ip_version=4, mtu=1500)) instance.db.network_allocations_get_by_ip_address.assert_has_calls( [mock.call(fake_context, '10.0.0.2')]) def _setup_manage_network_allocations(self, label=None): data = { 'DEFAULT': { 'standalone_network_plugin_gateway': '192.168.0.1', 'standalone_network_plugin_mask': '24', }, } with test_utils.create_temp_config_with_opts(data): instance = plugin.StandaloneNetworkPlugin(label=label) return instance @ddt.data('admin', None) def test_manage_network_allocations(self, label): allocations = ['192.168.0.11', '192.168.0.12', 'fd12::2000'] instance = self._setup_manage_network_allocations(label=label) if not label: self.mock_object(instance, '_verify_share_network_subnet') self.mock_object(instance.db, 'share_network_subnet_update') self.mock_object(instance.db, 'network_allocation_create') result = instance.manage_network_allocations( fake_context, allocations, fake_share_server, fake_share_network, fake_share_network_subnet) self.assertEqual(['fd12::2000'], result) network_data = { 'network_type': instance.network_type, 'segmentation_id': instance.segmentation_id, 'cidr': str(instance.net.cidr), 'gateway': str(instance.gateway), 'ip_version': instance.ip_version, 'mtu': instance.mtu, } data_list = [{ 'share_server_id': fake_share_server['id'], 'ip_address': x, 'status': constants.STATUS_ACTIVE, 'label': instance.label, } for x in ['192.168.0.11', '192.168.0.12']] data_list[0].update(network_data) data_list[1].update(network_data) if not label: instance.db.share_network_subnet_update.assert_called_once_with( fake_context, fake_share_network_subnet['id'], network_data) data_list[0]['share_network_subnet_id'] = ( fake_share_network_subnet['id']) data_list[1]['share_network_subnet_id'] = ( fake_share_network_subnet['id']) instance._verify_share_network_subnet.assert_called_once_with( fake_share_server['id'], fake_share_network_subnet) instance.db.network_allocation_create.assert_has_calls([ mock.call(fake_context, data_list[0]), mock.call(fake_context, data_list[1]) ]) def test_unmanage_network_allocations(self): instance = self._setup_manage_network_allocations() self.mock_object(instance, 'deallocate_network') instance.unmanage_network_allocations('context', 'server_id') instance.deallocate_network.assert_called_once_with( 'context', 'server_id') def _setup_include_network_info(self): data = { 'DEFAULT': { 'standalone_network_plugin_gateway': '192.168.0.1', 'standalone_network_plugin_mask': '24', }, } with test_utils.create_temp_config_with_opts(data): instance = plugin.StandaloneNetworkPlugin() return instance def test_include_network_info(self): instance = self._setup_include_network_info() self.mock_object(instance, '_save_network_info') instance.include_network_info(fake_share_network) instance._save_network_info.assert_called_once_with( None, fake_share_network, save_db=False) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/policy.yaml0000664000175000017500000001127100000000000017646 0ustar00zuulzuul00000000000000# WARNING: Below rules are either deprecated rules # or extra rules in policy file, it is strongly # recommended to switch to new rules. "context_is_admin": "role:admin" "context_is_host_admin": "role:admin and project_id:%(project_id)s" "admin_api": "is_admin:True" "admin_or_owner": "is_admin:True or project_id:%(project_id)s" "default": "rule:admin_or_owner" "availability_zone:index": "rule:default" "quota_set:update": "rule:admin_api" "quota_set:show": "rule:default" "quota_set:delete": "rule:admin_api" "quota_class_set:show": "rule:default" "quota_class_set:update": "rule:admin_api" "service:index": "rule:admin_api" "service:update": "rule:admin_api" "share:create": "" "share:list_by_share_server_id": "rule:admin_api" "share:get": "" "share:get_all": "" "share:delete": "rule:default" "share:update": "rule:default" "share:snapshot_update": "" "share:create_snapshot": "" "share:delete_snapshot": "" "share:get_snapshot": "" "share:get_all_snapshots": "" "share:extend": "" "share:shrink": "" "share:manage": "rule:admin_api" "share:unmanage": "rule:admin_api" "share:force_delete": "rule:admin_api" "share:reset_status": "rule:admin_api" "share:migration_start": "rule:admin_api" "share:migration_complete": "rule:admin_api" "share:migration_cancel": "rule:admin_api" "share:migration_get_progress": "rule:admin_api" "share_export_location:index": "rule:default" "share_export_location:show": "rule:default" "share_type:index": "rule:default" "share_type:show": "rule:default" "share_type:default": "rule:default" "share_type:create": "rule:default" "share_type:delete": "rule:default" "share_type:add_project_access": "rule:admin_api" "share_type:list_project_access": "rule:admin_api" "share_type:remove_project_access": "rule:admin_api" "share_types_extra_spec:create": "rule:default" "share_types_extra_spec:update": "rule:default" "share_types_extra_spec:show": "rule:default" "share_types_extra_spec:index": "rule:default" "share_types_extra_spec:delete": "rule:default" "share_instance:index": "rule:admin_api" "share_instance:show": "rule:admin_api" "share_instance:force_delete": "rule:admin_api" "share_instance:reset_status": "rule:admin_api" "share_snapshot:force_delete": "rule:admin_api" "share_snapshot:reset_status": "rule:admin_api" "share_snapshot:manage_snapshot": "rule:admin_api" "share_snapshot:unmanage_snapshot": "rule:admin_api" "share_network:create": "" "share_network:index": "" "share_network:detail": "" "share_network:show": "" "share_network:update": "" "share_network:delete": "" "share_network:get_all_share_networks": "rule:admin_api" "share_server:index": "rule:admin_api" "share_server:show": "rule:admin_api" "share_server:details": "rule:admin_api" "share_server:delete": "rule:admin_api" "share:get_share_metadata": "" "share:delete_share_metadata": "" "share:update_share_metadata": "" "share_extension:availability_zones": "" "security_service:index": "" "security_service:get_all_security_services": "rule:admin_api" "scheduler_stats:pools:index": "rule:admin_api" "scheduler_stats:pools:detail": "rule:admin_api" "share_group:create": "rule:default" "share_group:delete": "rule:default" "share_group:update": "rule:default" "share_group:get": "rule:default" "share_group:get_all": "rule:default" "share_group:force_delete": "rule:admin_api" "share_group:reset_status": "rule:admin_api" "share_group_snapshot:create": "rule:default" "share_group_snapshot:delete": "rule:default" "share_group_snapshot:update": "rule:default" "share_group_snapshot:get": "rule:default" "share_group_snapshot:get_all": "rule:default" "share_group_snapshot:force_delete": "rule:admin_api" "share_group_snapshot:reset_status": "rule:admin_api" "share_replica:get_all": "rule:default" "share_replica:show": "rule:default" "share_replica:create": "rule:default" "share_replica:delete": "rule:default" "share_replica:promote": "rule:default" "share_replica:resync": "rule:admin_api" "share_replica:reset_status": "rule:admin_api" "share_replica:force_delete": "rule:admin_api" "share_replica:reset_replica_state": "rule:admin_api" "share_group_type:index": "rule:default" "share_group_type:show": "rule:default" "share_group_type:default": "rule:default" "share_group_type:create": "rule:admin_api" "share_group_type:delete": "rule:admin_api" "share_group_type:add_project_access": "rule:admin_api" "share_group_type:list_project_access": "rule:admin_api" "share_group_type:remove_project_access": "rule:admin_api" "share_group_types_spec:create": "rule:admin_api" "share_group_types_spec:update": "rule:admin_api" "share_group_types_spec:show": "rule:admin_api" "share_group_types_spec:index": "rule:admin_api" "share_group_types_spec:delete": "rule:admin_api" "message:delete": "rule:default" "message:get": "rule:default" "message:get_all": "rule:default" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/runtime_conf.py0000664000175000017500000000153000000000000020522 0ustar00zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg CONF = cfg.CONF CONF.register_opt(cfg.IntOpt('runtime_answer', default=54, help='test flag')) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315601.9816709 manila-21.0.0/manila/tests/scheduler/0000775000175000017500000000000000000000000017437 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/scheduler/__init__.py0000664000175000017500000000000000000000000021536 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315601.9856708 manila-21.0.0/manila/tests/scheduler/drivers/0000775000175000017500000000000000000000000021115 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/scheduler/drivers/__init__.py0000664000175000017500000000000000000000000023214 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/scheduler/drivers/test_base.py0000664000175000017500000000771300000000000023450 0ustar00zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests For Base Scheduler """ from unittest import mock from oslo_config import cfg from oslo_utils import timeutils from manila import context from manila import db from manila.scheduler.drivers import base from manila import test from manila import utils CONF = cfg.CONF class SchedulerTestCase(test.TestCase): """Test case for base scheduler driver class.""" # So we can subclass this test and re-use tests if we need. driver_cls = base.Scheduler def setUp(self): super(SchedulerTestCase, self).setUp() self.driver = self.driver_cls() self.context = context.RequestContext('fake_user', 'fake_project') self.topic = 'fake_topic' def test_update_service_capabilities(self): service_name = 'fake_service' host = 'fake_host' capabilities = {'fake_capability': 'fake_value'} timestamp = 1111 with mock.patch.object(self.driver.host_manager, 'update_service_capabilities', mock.Mock()): self.driver.update_service_capabilities( service_name, host, capabilities, timestamp) (self.driver.host_manager.update_service_capabilities. assert_called_once_with(service_name, host, capabilities, timestamp)) def test_hosts_up(self): service1 = {'host': 'host1'} service2 = {'host': 'host2'} services = [service1, service2] def fake_service_is_up(*args, **kwargs): if args[0]['host'] == 'host1': return False return True with mock.patch.object(db, 'service_get_all_by_topic', mock.Mock(return_value=services)): with mock.patch.object(utils, 'service_is_up', mock.Mock(side_effect=fake_service_is_up)): result = self.driver.hosts_up(self.context, self.topic) self.assertEqual(['host2'], result) db.service_get_all_by_topic.assert_called_once_with( self.context, self.topic) class SchedulerDriverBaseTestCase(SchedulerTestCase): """Test cases for base scheduler driver class methods. These can't fail if the driver is changed. """ def test_unimplemented_schedule(self): fake_args = (1, 2, 3) fake_kwargs = {'cat': 'meow'} self.assertRaises(NotImplementedError, self.driver.schedule, self.context, self.topic, 'schedule_something', *fake_args, **fake_kwargs) class SchedulerDriverModuleTestCase(test.TestCase): """Test case for scheduler driver module methods.""" def setUp(self): super(SchedulerDriverModuleTestCase, self).setUp() self.context = context.RequestContext('fake_user', 'fake_project') @mock.patch.object(db, 'share_update', mock.Mock()) def test_share_host_update_db(self): with mock.patch.object(timeutils, 'utcnow', mock.Mock(return_value='fake-now')): base.share_update_db(self.context, 31337, 'fake_host') db.share_update.assert_called_once_with( self.context, 31337, {'host': 'fake_host', 'scheduled_at': 'fake-now'}) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/scheduler/drivers/test_filter.py0000664000175000017500000007403700000000000024026 0ustar00zuulzuul00000000000000# Copyright 2011 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests For Filter Scheduler. """ from unittest import mock import ddt from oslo_utils import strutils from manila.common import constants from manila import context from manila import exception from manila.message import message_field from manila.scheduler.drivers import base from manila.scheduler.drivers import filter from manila.scheduler import host_manager from manila.tests.scheduler.drivers import test_base from manila.tests.scheduler import fakes SNAPSHOT_SUPPORT = constants.ExtraSpecs.SNAPSHOT_SUPPORT REPLICATION_TYPE_SPEC = constants.ExtraSpecs.REPLICATION_TYPE_SPEC @ddt.ddt class FilterSchedulerTestCase(test_base.SchedulerTestCase): """Test case for Filter Scheduler.""" driver_cls = filter.FilterScheduler def test___format_filter_properties_active_replica_host_is_provided(self): sched = fakes.FakeFilterScheduler() fake_context = context.RequestContext('user', 'project') fake_type = {'name': 'NFS'} request_spec = { 'share_properties': {'project_id': 1, 'size': 1}, 'share_instance_properties': {}, 'share_type': fake_type, 'share_id': ['fake-id1'], 'active_replica_host': 'fake_ar_host', } hosts = [fakes.FakeHostState(host, {'replication_domain': 'xyzzy'}) for host in ('fake_ar_host', 'fake_host_2')] self.mock_object(sched.host_manager, 'get_all_host_states_share', mock.Mock(return_value=hosts)) self.mock_object(sched, 'populate_filter_properties_share') retval = sched._format_filter_properties( fake_context, {}, request_spec) self.assertDictEqual(fake_type, retval[0]['resource_type']) self.assertIn('replication_domain', retval[0]) # no "share_proto" was specified in the request_spec self.assertNotIn('storage_protocol', retval[0]) def test___format_filter_properties_no_default_share_type_provided(self): sched = fakes.FakeFilterScheduler() create_mock_message = self.mock_object(sched.message_api, 'create') fake_context = context.RequestContext('user', 'project') request_spec = { 'share_properties': {'project_id': 'string', 'size': 1}, 'share_instance_properties': {}, 'share_type': None, 'share_id': 'fake-id1', } self.assertRaises(exception.InvalidParameterValue, sched._format_filter_properties, fake_context, {}, request_spec) create_mock_message.assert_called_once_with( fake_context, message_field.Action.CREATE, fake_context.project_id, resource_type=message_field.Resource.SHARE, resource_id='fake-id1', detail=message_field.Detail.NO_DEFAULT_SHARE_TYPE) @ddt.data(True, False) def test__format_filter_properties_backend_specified_for_replica( self, has_share_backend_name): sched = fakes.FakeFilterScheduler() fake_context = context.RequestContext('user', 'project') fake_type = {'name': 'NFS', 'extra_specs': {}} request_spec = { 'share_properties': {'project_id': 1, 'size': 1}, 'share_instance_properties': {}, 'share_type': fake_type, 'share_id': 'fake-id1', 'active_replica_host': 'fake_ar_host', } if has_share_backend_name: request_spec['share_type']['extra_specs'].update( {'share_backend_name': 'fake_backend'}) self.mock_object(sched.host_manager, 'get_all_host_states_share', mock.Mock(return_value=[])) retval = sched._format_filter_properties( fake_context, {}, request_spec) self.assertDictEqual(fake_type, retval[0]['resource_type']) self.assertNotIn('share_backend_name', retval[0]['share_type']['extra_specs']) @ddt.data(True, False) def test__format_filter_properties_storage_protocol_extra_spec_present( self, spec_present): sched = fakes.FakeFilterScheduler() fake_context = context.RequestContext('user', 'project') extra_specs_requested = ( {'storage_protocol': 'NFS_CIFS'} if spec_present else {} ) fake_type = { 'name': 'regalia', 'extra_specs': extra_specs_requested, } request_spec = { 'share_properties': {'project_id': 1, 'size': 1}, 'share_instance_properties': {}, 'share_proto': 'CEPHFS', 'share_type': fake_type, 'share_id': 'fake-id1', } retval = sched._format_filter_properties( fake_context, {}, request_spec)[0] filter_spec = retval['share_type']['extra_specs']['storage_protocol'] expected_spec = 'NFS_CIFS' if spec_present else ' CEPHFS' self.assertEqual(expected_spec, filter_spec) self.assertDictEqual(fake_type, retval['resource_type']) def test_create_share_no_hosts(self): # Ensure empty hosts/child_zones result in WillNotSchedule exception. sched = fakes.FakeFilterScheduler() fake_context = context.RequestContext('user', 'project') create_mock_message = self.mock_object(sched.message_api, 'create') request_spec = { 'share_properties': {'project_id': 1, 'size': 1}, 'share_instance_properties': {}, 'share_type': {'name': 'NFS'}, 'share_id': 'fake-id1', } self.assertRaises(exception.WillNotSchedule, sched.schedule_create_share, fake_context, request_spec, {}) create_mock_message.assert_called_once_with( fake_context, message_field.Action.CREATE, fake_context.project_id, resource_type=message_field.Resource.SHARE, resource_id=request_spec.get('share_id', None), detail=message_field.Detail.SHARE_BACKEND_NOT_READY_YET) @ddt.data( {'name': 'foo'}, {'name': 'foo', 'extra_specs': {}}, *[{'name': 'foo', 'extra_specs': {SNAPSHOT_SUPPORT: v}} for v in ('True', ' True', 'true', '1')] ) @mock.patch('manila.db.service_get_all_by_topic') def test__schedule_share_with_snapshot_support( self, share_type, _mock_service_get_all_by_topic): sched = fakes.FakeFilterScheduler() sched.host_manager = fakes.FakeHostManager() fake_context = context.RequestContext('user', 'project', is_admin=True) fakes.mock_host_manager_db_calls(_mock_service_get_all_by_topic) request_spec = { 'share_type': share_type, 'share_properties': {'project_id': 1, 'size': 1}, 'share_instance_properties': {}, } weighed_host = sched._schedule_share(fake_context, request_spec, {}) self.assertIsNotNone(weighed_host) self.assertIsNotNone(weighed_host.obj) self.assertTrue(hasattr(weighed_host.obj, SNAPSHOT_SUPPORT)) expected_snapshot_support = strutils.bool_from_string( share_type.get('extra_specs', {}).get( SNAPSHOT_SUPPORT, 'True').split()[-1]) self.assertEqual( expected_snapshot_support, getattr(weighed_host.obj, SNAPSHOT_SUPPORT)) self.assertTrue(_mock_service_get_all_by_topic.called) @ddt.data( *[{'name': 'foo', 'extra_specs': {SNAPSHOT_SUPPORT: v}} for v in ('False', ' False', 'false', '0')] ) @mock.patch('manila.db.service_get_all_by_topic') def test__schedule_share_without_snapshot_support( self, share_type, _mock_service_get_all_by_topic): sched = fakes.FakeFilterScheduler() sched.host_manager = fakes.FakeHostManager() fake_context = context.RequestContext('user', 'project', is_admin=True) fakes.mock_host_manager_db_calls(_mock_service_get_all_by_topic) request_spec = { 'share_type': share_type, 'share_properties': {'project_id': 1, 'size': 1}, 'share_instance_properties': {'project_id': 1, 'size': 1}, } self.assertRaises(exception.NoValidHost, sched._schedule_share, fake_context, request_spec, {}) self.assertTrue(_mock_service_get_all_by_topic.called) @ddt.data( *[{'name': 'foo', 'extra_specs': { SNAPSHOT_SUPPORT: 'True', REPLICATION_TYPE_SPEC: v }} for v in ('writable', 'readable', 'dr')] ) @mock.patch('manila.db.service_get_all_by_topic') def test__schedule_share_with_valid_replication_spec( self, share_type, _mock_service_get_all_by_topic): sched = fakes.FakeFilterScheduler() sched.host_manager = fakes.FakeHostManager() fake_context = context.RequestContext('user', 'project', is_admin=True) fakes.mock_host_manager_db_calls(_mock_service_get_all_by_topic) request_spec = { 'share_type': share_type, 'share_properties': {'project_id': 1, 'size': 1}, 'share_instance_properties': {'project_id': 1, 'size': 1}, } weighed_host = sched._schedule_share(fake_context, request_spec, {}) self.assertIsNotNone(weighed_host) self.assertIsNotNone(weighed_host.obj) self.assertTrue(hasattr(weighed_host.obj, REPLICATION_TYPE_SPEC)) expected_replication_type_support = ( share_type.get('extra_specs', {}).get(REPLICATION_TYPE_SPEC)) self.assertEqual( expected_replication_type_support, getattr(weighed_host.obj, REPLICATION_TYPE_SPEC)) self.assertTrue(_mock_service_get_all_by_topic.called) @ddt.data( *[{'name': 'foo', 'extra_specs': { SNAPSHOT_SUPPORT: 'True', REPLICATION_TYPE_SPEC: v }} for v in ('None', 'readwrite', 'activesync')] ) @mock.patch('manila.db.service_get_all_by_topic') def test__schedule_share_with_invalid_replication_type_spec( self, share_type, _mock_service_get_all_by_topic): sched = fakes.FakeFilterScheduler() sched.host_manager = fakes.FakeHostManager() fake_context = context.RequestContext('user', 'project', is_admin=True) fakes.mock_host_manager_db_calls(_mock_service_get_all_by_topic) request_spec = { 'share_type': share_type, 'share_properties': {'project_id': 1, 'size': 1}, 'share_instance_properties': {'project_id': 1, 'size': 1}, } self.assertRaises(exception.NoValidHost, sched._schedule_share, fake_context, request_spec, {}) self.assertTrue(_mock_service_get_all_by_topic.called) @ddt.data({'storage_protocol': 'CEPHFS'}, {'storage_protocol': ' CEPHFS'}, {'name': 'foo'}) @mock.patch('manila.db.service_get_all_by_topic') def test__schedule_share_storage_protocol_not_supported( self, share_type, _mock_service_get_all_by_topic): sched = fakes.FakeFilterScheduler() sched.host_manager = fakes.FakeHostManager() requested_share_proto = ( share_type.get('storage_protocol', '').strip(' ') or 'MAPRFS' ) fake_context = context.RequestContext('user', 'project', is_admin=True) fakes.mock_host_manager_db_calls(_mock_service_get_all_by_topic) request_spec = { 'share_type': share_type, 'share_properties': {'project_id': 1, 'size': 1}, 'share_instance_properties': {'project_id': 1, 'size': 1}, 'share_proto': requested_share_proto, } self.assertRaises(exception.NoValidHost, sched._schedule_share, fake_context, request_spec, {}) self.assertTrue(_mock_service_get_all_by_topic.called) @ddt.data({'storage_protocol': 'GLUSTERFS'}, {'storage_protocol': ' GLUSTERFS'}, {'name': 'foo'}) @mock.patch('manila.db.service_get_all_by_topic') def test__schedule_share_valid_storage_protocol( self, share_type, _mock_service_get_all_by_topic): sched = fakes.FakeFilterScheduler() sched.host_manager = fakes.FakeHostManager() fake_context = context.RequestContext('user', 'project', is_admin=True) fakes.mock_host_manager_db_calls(_mock_service_get_all_by_topic) request_spec = { 'share_type': share_type, 'share_properties': {'project_id': 1, 'size': 1}, 'share_instance_properties': {'project_id': 1, 'size': 1}, 'share_proto': 'GLUSTERFS', } weighed_host = sched._schedule_share(fake_context, request_spec, {}) self.assertIsNotNone(weighed_host) self.assertIsNotNone(weighed_host.obj) self.assertEqual('GLUSTERFS', getattr(weighed_host.obj, 'storage_protocol')) self.assertEqual('host6', weighed_host.obj.host.split('#')[0]) self.assertTrue(_mock_service_get_all_by_topic.called) def _setup_dedupe_fakes(self, extra_specs): sched = fakes.FakeFilterScheduler() sched.host_manager = fakes.FakeHostManager() fake_context = context.RequestContext('user', 'project', is_admin=True) share_type = {'name': 'foo', 'extra_specs': extra_specs} request_spec = { 'share_type': share_type, 'share_properties': {'project_id': 1, 'size': 1}, 'share_instance_properties': {'project_id': 1, 'size': 1}, } return sched, fake_context, request_spec @mock.patch('manila.db.service_get_all_by_topic') def test__schedule_share_with_default_dedupe_value( self, _mock_service_get_all_by_topic): sched, fake_context, request_spec = self._setup_dedupe_fakes( {'capabilities:dedupe': ' False'}) fakes.mock_host_manager_db_calls(_mock_service_get_all_by_topic) weighed_host = sched._schedule_share(fake_context, request_spec, {}) self.assertIsNotNone(weighed_host) self.assertIsNotNone(weighed_host.obj) self.assertTrue(hasattr(weighed_host.obj, 'dedupe')) self.assertFalse(weighed_host.obj.dedupe) self.assertTrue(_mock_service_get_all_by_topic.called) @ddt.data('True', ' True') @mock.patch('manila.db.service_get_all_by_topic') def test__schedule_share_with_default_dedupe_value_fail( self, capability, _mock_service_get_all_by_topic): sched, fake_context, request_spec = self._setup_dedupe_fakes( {'capabilities:dedupe': capability}) fakes.mock_host_manager_db_calls(_mock_service_get_all_by_topic) self.assertRaises(exception.NoValidHost, sched._schedule_share, fake_context, request_spec, {}) self.assertTrue(_mock_service_get_all_by_topic.called) def test_schedule_share_type_is_none(self): sched = fakes.FakeFilterScheduler() request_spec = { 'share_type': None, 'share_properties': {'project_id': 1, 'size': 1}, } self.assertRaises(exception.InvalidParameterValue, sched._schedule_share, self.context, request_spec) @mock.patch('manila.db.service_get_all_by_topic') def test_schedule_share_with_instance_properties( self, _mock_service_get_all_by_topic): sched = fakes.FakeFilterScheduler() sched.host_manager = fakes.FakeHostManager() fake_context = context.RequestContext('user', 'project', is_admin=True) fakes.mock_host_manager_db_calls(_mock_service_get_all_by_topic) share_type = {'name': 'foo'} request_spec = { 'share_type': share_type, 'share_properties': {'project_id': 1, 'size': 1}, 'share_instance_properties': {'availability_zone_id': "fake_az"}, } self.assertRaises(exception.NoValidHost, sched._schedule_share, fake_context, request_spec, {}) self.assertTrue(_mock_service_get_all_by_topic.called) def test_max_attempts(self): self.flags(scheduler_max_attempts=4) sched = fakes.FakeFilterScheduler() self.assertEqual(4, sched._max_attempts()) def test_invalid_max_attempts(self): self.flags(scheduler_max_attempts=0) self.assertRaises(exception.InvalidParameterValue, fakes.FakeFilterScheduler) @mock.patch('manila.scheduler.host_manager.HostManager.' 'get_all_host_states_share') def test_retry_disabled(self, _mock_get_all_host_states): # Retry info should not get populated when re-scheduling is off. self.flags(scheduler_max_attempts=1) sched = fakes.FakeFilterScheduler() sched.host_manager = fakes.FakeHostManager() request_spec = { 'share_type': {'name': 'iSCSI'}, 'share_properties': {'project_id': 1, 'size': 1}, 'share_instance_properties': {}, } filter_properties = {} self.assertRaises(exception.NoValidHost, sched._schedule_share, self.context, request_spec, filter_properties=filter_properties) # Should not have retry info in the populated filter properties. self.assertNotIn("retry", filter_properties) @mock.patch('manila.scheduler.host_manager.HostManager.' 'get_all_host_states_share') def test_retry_attempt_one(self, _mock_get_all_host_states): # Test retry logic on initial scheduling attempt. self.flags(scheduler_max_attempts=2) sched = fakes.FakeFilterScheduler() sched.host_manager = fakes.FakeHostManager() request_spec = { 'share_type': {'name': 'iSCSI'}, 'share_properties': {'project_id': 1, 'size': 1}, 'share_instance_properties': {}, } filter_properties = {} self.assertRaises(exception.NoValidHost, sched._schedule_share, self.context, request_spec, filter_properties=filter_properties) num_attempts = filter_properties['retry']['num_attempts'] self.assertEqual(1, num_attempts) @mock.patch('manila.scheduler.host_manager.HostManager.' 'get_all_host_states_share') def test_retry_attempt_two(self, _mock_get_all_host_states): # Test retry logic when re-scheduling. self.flags(scheduler_max_attempts=2) sched = fakes.FakeFilterScheduler() sched.host_manager = fakes.FakeHostManager() request_spec = { 'share_type': {'name': 'iSCSI'}, 'share_properties': {'project_id': 1, 'size': 1}, 'share_instance_properties': {}, } retry = dict(num_attempts=1) filter_properties = dict(retry=retry) self.assertRaises(exception.NoValidHost, sched._schedule_share, self.context, request_spec, filter_properties=filter_properties) num_attempts = filter_properties['retry']['num_attempts'] self.assertEqual(2, num_attempts) def test_retry_exceeded_max_attempts(self): # Test for necessary explosion when max retries is exceeded. self.flags(scheduler_max_attempts=2) sched = fakes.FakeFilterScheduler() request_spec = { 'share_type': {'name': 'iSCSI'}, 'share_properties': {'project_id': 1, 'size': 1}, } retry = dict(num_attempts=2) filter_properties = dict(retry=retry) self.assertRaises(exception.NoValidHost, sched._schedule_share, self.context, request_spec, filter_properties=filter_properties) def test_add_retry_host(self): retry = dict(num_attempts=1, hosts=[]) filter_properties = dict(retry=retry) host = "fakehost" sched = fakes.FakeFilterScheduler() sched._add_retry_host(filter_properties, host) hosts = filter_properties['retry']['hosts'] self.assertEqual(1, len(hosts)) self.assertEqual(host, hosts[0]) def test_post_select_populate(self): # Test addition of certain filter props after a node is selected. retry = {'hosts': [], 'num_attempts': 1} filter_properties = {'retry': retry} sched = fakes.FakeFilterScheduler() host_state = host_manager.HostState('host') host_state.total_capacity_gb = 1024 sched._post_select_populate_filter_properties(filter_properties, host_state) self.assertEqual('host', filter_properties['retry']['hosts'][0]) self.assertEqual(1024, host_state.total_capacity_gb) def test_schedule_create_share_group(self): # Ensure empty hosts/child_zones result in NoValidHosts exception. sched = fakes.FakeFilterScheduler() fake_context = context.RequestContext('user', 'project') fake_host = 'fake_host' request_spec = {'share_types': [{'id': 'NFS'}]} self.mock_object(sched, "_get_best_host_for_share_group", mock.Mock(return_value=fake_host)) fake_updated_group = mock.Mock() self.mock_object(base, "share_group_update_db", mock.Mock( return_value=fake_updated_group)) self.mock_object(sched.share_rpcapi, "create_share_group") sched.schedule_create_share_group(fake_context, 'fake_id', request_spec, {}) sched._get_best_host_for_share_group.assert_called_once_with( fake_context, request_spec) base.share_group_update_db.assert_called_once_with( fake_context, 'fake_id', fake_host) sched.share_rpcapi.create_share_group.assert_called_once_with( fake_context, fake_updated_group, fake_host) def test_create_group_no_hosts(self): # Ensure empty hosts/child_zones result in NoValidHosts exception. sched = fakes.FakeFilterScheduler() fake_context = context.RequestContext('user', 'project') request_spec = {'share_types': [{'id': 'NFS'}]} self.assertRaises(exception.NoValidHost, sched.schedule_create_share_group, fake_context, 'fake_id', request_spec, {}) @mock.patch('manila.db.service_get_all_by_topic') def test_get_weighted_candidates_for_share_group( self, _mock_service_get_all_by_topic): sched = fakes.FakeFilterScheduler() sched.host_manager = fakes.FakeHostManager() fake_context = context.RequestContext('user', 'project') fakes.mock_host_manager_db_calls(_mock_service_get_all_by_topic) request_spec = {'share_types': [{'name': 'NFS', 'extra_specs': { SNAPSHOT_SUPPORT: 'True', }}]} hosts = sched._get_weighted_candidates_share_group( fake_context, request_spec) self.assertTrue(hosts) @mock.patch('manila.db.service_get_all_by_topic') def test_get_weighted_candidates_for_share_group_no_hosts( self, _mock_service_get_all_by_topic): sched = fakes.FakeFilterScheduler() sched.host_manager = fakes.FakeHostManager() fake_context = context.RequestContext('user', 'project') fakes.mock_host_manager_db_calls(_mock_service_get_all_by_topic) request_spec = {'share_types': [{'name': 'NFS', 'extra_specs': { SNAPSHOT_SUPPORT: 'False', }}]} hosts = sched._get_weighted_candidates_share_group( fake_context, request_spec) self.assertEqual([], hosts) @mock.patch('manila.db.service_get_all_by_topic') def test_get_weighted_candidates_for_share_group_many_hosts( self, _mock_service_get_all_by_topic): sched = fakes.FakeFilterScheduler() sched.host_manager = fakes.FakeHostManager() fake_context = context.RequestContext('user', 'project') fakes.mock_host_manager_db_calls(_mock_service_get_all_by_topic) request_spec = {'share_types': [{'name': 'NFS', 'extra_specs': { SNAPSHOT_SUPPORT: 'True', }}]} hosts = sched._get_weighted_candidates_share_group( fake_context, request_spec) self.assertEqual(6, len(hosts)) def _host_passes_filters_setup(self, mock_obj): sched = fakes.FakeFilterScheduler() sched.host_manager = fakes.FakeHostManager() fake_context = context.RequestContext('user', 'project', is_admin=True) fakes.mock_host_manager_db_calls(mock_obj) return (sched, fake_context) @mock.patch('manila.db.service_get_all_by_topic') def test_host_passes_filters_happy_day(self, _mock_service_get_topic): sched, ctx = self._host_passes_filters_setup( _mock_service_get_topic) request_spec = {'share_id': 1, 'share_type': {'name': 'fake_type'}, 'share_instance_properties': {}, 'share_properties': {'project_id': 1, 'size': 1}} ret_host = sched.host_passes_filters(ctx, 'host1#_pool0', request_spec, {}) self.assertEqual('host1#_pool0', ret_host.host) self.assertTrue(_mock_service_get_topic.called) @mock.patch('manila.db.service_get_all_by_topic') def test_host_passes_filters_no_capacity(self, _mock_service_get_topic): sched, ctx = self._host_passes_filters_setup( _mock_service_get_topic) request_spec = {'share_id': 1, 'share_type': {'name': 'fake_type'}, 'share_instance_properties': {}, 'share_properties': {'project_id': 1, 'size': 1024}} self.assertRaises(exception.NoValidHost, sched.host_passes_filters, ctx, 'host3#_pool0', request_spec, {}) self.assertTrue(_mock_service_get_topic.called) def test_schedule_create_replica_no_host(self): sched = fakes.FakeFilterScheduler() request_spec = { 'share_type': {'name': 'fake_type'}, 'share_properties': {'project_id': 1, 'size': 1}, 'share_instance_properties': {'project_id': 1, 'size': 1}, } self.mock_object(sched.host_manager, 'get_all_host_states_share', mock.Mock(return_value=[])) self.mock_object(sched.host_manager, 'get_filtered_hosts', mock.Mock(return_value=(None, 'filter'))) self.assertRaises(exception.WillNotSchedule, sched.schedule_create_replica, self.context, request_spec, {}) @mock.patch('manila.db.service_get_all_by_topic') def test__schedule_share_with_disabled_host( self, _mock_service_get_all_by_topic): sched = fakes.FakeFilterScheduler() sched.host_manager = fakes.FakeHostManager() fake_context = context.RequestContext('user', 'project', is_admin=True) fake_type = {'name': 'NFS'} request_spec = { 'share_properties': {'project_id': 1, 'size': 1}, 'share_instance_properties': {}, 'share_type': fake_type, 'share_id': 'fake-id1', } filter_properties = { 'scheduler_hints': {'only_host': 'host7#_pool0'} } fakes.mock_host_manager_db_calls(_mock_service_get_all_by_topic, disabled=True) weighed_host = sched._schedule_share( fake_context, request_spec, filter_properties=filter_properties) self.assertIsNotNone(weighed_host) self.assertIsNotNone(weighed_host.obj) self.assertEqual('host7', weighed_host.obj.host.split('#')[0]) self.assertTrue(_mock_service_get_all_by_topic.called) def test_schedule_create_replica(self): sched = fakes.FakeFilterScheduler() request_spec = fakes.fake_replica_request_spec() host = 'fake_host' replica_id = request_spec['share_instance_properties']['id'] mock_update_db_call = self.mock_object( base, 'share_replica_update_db', mock.Mock(return_value='replica')) mock_share_rpcapi_call = self.mock_object( sched.share_rpcapi, 'create_share_replica') self.mock_object( self.driver_cls, '_schedule_share', mock.Mock(return_value=fakes.get_fake_host(host_name=host))) retval = sched.schedule_create_replica( self.context, fakes.fake_replica_request_spec(), {}) self.assertIsNone(retval) mock_update_db_call.assert_called_once_with( self.context, replica_id, host) mock_share_rpcapi_call.assert_called_once_with( self.context, 'replica', host, request_spec=request_spec, filter_properties={}) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/scheduler/drivers/test_simple.py0000664000175000017500000001611200000000000024020 0ustar00zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests For Simple Scheduler """ from unittest import mock from oslo_config import cfg from manila import context from manila import db from manila import exception from manila.scheduler.drivers import base from manila.scheduler.drivers import simple from manila.share import rpcapi as share_rpcapi from manila import test from manila.tests import db_utils from manila import utils CONF = cfg.CONF class SimpleSchedulerSharesTestCase(test.TestCase): """Test case for simple scheduler create share method.""" def setUp(self): super(SimpleSchedulerSharesTestCase, self).setUp() self.mock_object(share_rpcapi, 'ShareAPI') self.driver = simple.SimpleScheduler() self.context = context.RequestContext('fake_user', 'fake_project') self.admin_context = context.RequestContext('fake_admin_user', 'fake_project') self.admin_context.is_admin = True @mock.patch.object(utils, 'service_is_up', mock.Mock(return_value=True)) def test_create_share_if_two_services_up(self): share_id = 'fake' fake_share = {'id': share_id, 'size': 1} fake_service_1 = {'disabled': False, 'host': 'fake_host1'} fake_service_2 = {'disabled': False, 'host': 'fake_host2'} fake_result = [(fake_service_1, 2), (fake_service_2, 1)] fake_request_spec = { 'share_id': share_id, 'share_properties': fake_share, } self.mock_object(db, 'service_get_all_share_sorted', mock.Mock(return_value=fake_result)) self.mock_object(base, 'share_update_db', mock.Mock(return_value=db_utils.create_share())) self.driver.schedule_create_share(self.context, fake_request_spec, {}) utils.service_is_up.assert_called_once_with(utils.IsAMatcher(dict)) db.service_get_all_share_sorted.assert_called_once_with( utils.IsAMatcher(context.RequestContext)) base.share_update_db.assert_called_once_with( utils.IsAMatcher(context.RequestContext), share_id, 'fake_host1') def test_create_share_if_services_not_available(self): share_id = 'fake' fake_share = {'id': share_id, 'size': 1} fake_result = [] fake_request_spec = { 'share_id': share_id, 'share_properties': fake_share, } with mock.patch.object(db, 'service_get_all_share_sorted', mock.Mock(return_value=fake_result)): self.assertRaises(exception.NoValidHost, self.driver.schedule_create_share, self.context, fake_request_spec, {}) db.service_get_all_share_sorted.assert_called_once_with( utils.IsAMatcher(context.RequestContext)) def test_create_share_if_max_gigabytes_exceeded(self): share_id = 'fake' fake_share = {'id': share_id, 'size': 10001} fake_service_1 = {'disabled': False, 'host': 'fake_host1'} fake_service_2 = {'disabled': False, 'host': 'fake_host2'} fake_result = [(fake_service_1, 5), (fake_service_2, 7)] fake_request_spec = { 'share_id': share_id, 'share_properties': fake_share, } with mock.patch.object(db, 'service_get_all_share_sorted', mock.Mock(return_value=fake_result)): self.assertRaises(exception.NoValidHost, self.driver.schedule_create_share, self.context, fake_request_spec, {}) db.service_get_all_share_sorted.assert_called_once_with( utils.IsAMatcher(context.RequestContext)) @mock.patch.object(utils, 'service_is_up', mock.Mock(return_value=True)) def test_create_share_availability_zone(self): share_id = 'fake' fake_share = { 'id': share_id, 'size': 1, } fake_instance = { 'availability_zone_id': 'fake', } fake_service_1 = { 'disabled': False, 'host': 'fake_host1', 'availability_zone_id': 'fake', } fake_service_2 = { 'disabled': False, 'host': 'fake_host2', 'availability_zone_id': 'super_fake', } fake_result = [(fake_service_1, 0), (fake_service_2, 1)] fake_request_spec = { 'share_id': share_id, 'share_properties': fake_share, 'share_instance_properties': fake_instance, } self.mock_object(db, 'service_get_all_share_sorted', mock.Mock(return_value=fake_result)) self.mock_object(base, 'share_update_db', mock.Mock(return_value=db_utils.create_share())) self.driver.schedule_create_share(self.context, fake_request_spec, {}) utils.service_is_up.assert_called_once_with(fake_service_1) base.share_update_db.assert_called_once_with( utils.IsAMatcher(context.RequestContext), share_id, fake_service_1['host']) db.service_get_all_share_sorted.assert_called_once_with( utils.IsAMatcher(context.RequestContext)) @mock.patch.object(utils, 'service_is_up', mock.Mock(return_value=True)) def test_create_share_availability_zone_on_host(self): share_id = 'fake' fake_share = { 'id': share_id, 'availability_zone': 'fake:fake', 'size': 1, } fake_service = {'disabled': False, 'host': 'fake'} fake_request_spec = { 'share_id': share_id, 'share_properties': fake_share, } self.mock_object(db, 'service_get_all_share_sorted', mock.Mock(return_value=[(fake_service, 1)])) self.mock_object(base, 'share_update_db', mock.Mock(return_value=db_utils.create_share())) self.driver.schedule_create_share(self.admin_context, fake_request_spec, {}) utils.service_is_up.assert_called_once_with(fake_service) db.service_get_all_share_sorted.assert_called_once_with( utils.IsAMatcher(context.RequestContext)) base.share_update_db.assert_called_once_with( utils.IsAMatcher(context.RequestContext), share_id, 'fake') ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315601.9856708 manila-21.0.0/manila/tests/scheduler/evaluator/0000775000175000017500000000000000000000000021441 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/scheduler/evaluator/__init__.py0000664000175000017500000000000000000000000023540 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/scheduler/evaluator/test_evaluator.py0000664000175000017500000001456400000000000025066 0ustar00zuulzuul00000000000000# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from manila import exception from manila.scheduler.evaluator import evaluator from manila import test class EvaluatorTestCase(test.TestCase): def test_simple_integer(self): self.assertEqual(2, evaluator.evaluate("1+1")) self.assertEqual(9, evaluator.evaluate("2+3+4")) self.assertEqual(23, evaluator.evaluate("11+12")) self.assertEqual(30, evaluator.evaluate("5*6")) self.assertEqual(2, evaluator.evaluate("22/11")) self.assertEqual(38, evaluator.evaluate("109-71")) self.assertEqual( 493, evaluator.evaluate("872 - 453 + 44 / 22 * 4 + 66")) def test_simple_float(self): self.assertEqual(2.0, evaluator.evaluate("1.0 + 1.0")) self.assertEqual(2.5, evaluator.evaluate("1.5 + 1.0")) self.assertEqual(3.0, evaluator.evaluate("1.5 * 2.0")) def test_int_float_mix(self): self.assertEqual(2.5, evaluator.evaluate("1.5 + 1")) self.assertEqual(4.25, evaluator.evaluate("8.5 / 2")) self.assertEqual(5.25, evaluator.evaluate("10/4+0.75 + 2")) def test_negative_numbers(self): self.assertEqual(-2, evaluator.evaluate("-2")) self.assertEqual(-1, evaluator.evaluate("-2+1")) self.assertEqual(3, evaluator.evaluate("5+-2")) def test_exponent(self): self.assertEqual(8, evaluator.evaluate("2^3")) self.assertEqual(-8, evaluator.evaluate("-2 ^ 3")) self.assertEqual(15.625, evaluator.evaluate("2.5 ^ 3")) self.assertEqual(8, evaluator.evaluate("4 ^ 1.5")) def test_function(self): self.assertEqual(5, evaluator.evaluate("abs(-5)")) self.assertEqual(2, evaluator.evaluate("abs(2)")) self.assertEqual(1, evaluator.evaluate("min(1, 100)")) self.assertEqual(100, evaluator.evaluate("max(1, 100)")) self.assertEqual(100, evaluator.evaluate("max(1, 2, 100)")) def test_parentheses(self): self.assertEqual(1, evaluator.evaluate("(1)")) self.assertEqual(-1, evaluator.evaluate("(-1)")) self.assertEqual(2, evaluator.evaluate("(1+1)")) self.assertEqual(15, evaluator.evaluate("(1+2) * 5")) self.assertEqual(3, evaluator.evaluate("(1+2)*(3-1)/((1+(2-1)))")) self.assertEqual( -8.0, evaluator. evaluate("((1.0 / 0.5) * (2)) *(-2)")) def test_comparisons(self): self.assertTrue(evaluator.evaluate("1 < 2")) self.assertTrue(evaluator.evaluate("2 > 1")) self.assertTrue(evaluator.evaluate("2 != 1")) self.assertFalse(evaluator.evaluate("1 > 2")) self.assertFalse(evaluator.evaluate("2 < 1")) self.assertFalse(evaluator.evaluate("2 == 1")) self.assertTrue(evaluator.evaluate("(1 == 1) == !(1 == 2)")) def test_logic_ops(self): self.assertTrue(evaluator.evaluate("(1 == 1) AND (2 == 2)")) self.assertTrue(evaluator.evaluate("(1 == 1) and (2 == 2)")) self.assertTrue(evaluator.evaluate("(1 == 1) && (2 == 2)")) self.assertFalse(evaluator.evaluate("(1 == 1) && (5 == 2)")) self.assertTrue(evaluator.evaluate("(1 == 1) OR (5 == 2)")) self.assertTrue(evaluator.evaluate("(1 == 1) or (5 == 2)")) self.assertTrue(evaluator.evaluate("(1 == 1) || (5 == 2)")) self.assertFalse(evaluator.evaluate("(5 == 1) || (5 == 2)")) self.assertFalse(evaluator.evaluate("(1 == 1) AND NOT (2 == 2)")) self.assertFalse(evaluator.evaluate("(1 == 1) AND not (2 == 2)")) self.assertFalse(evaluator.evaluate("(1 == 1) AND !(2 == 2)")) self.assertTrue(evaluator.evaluate("(1 == 1) AND NOT (5 == 2)")) self.assertTrue(evaluator.evaluate("(1 == 1) OR NOT (2 == 2) " "AND (5 == 5)")) def test_ternary_conditional(self): self.assertEqual(5, evaluator.evaluate("(1 < 2) ? 5 : 10")) self.assertEqual(10, evaluator.evaluate("(1 > 2) ? 5 : 10")) def test_variables_dict(self): stats = {'iops': 1000, 'usage': 0.65, 'count': 503, 'free_space': 407} request = {'iops': 500, 'size': 4} self.assertEqual(1500, evaluator.evaluate("stats.iops + request.iops", stats=stats, request=request)) def test_missing_var(self): stats = {'iops': 1000, 'usage': 0.65, 'count': 503, 'free_space': 407} request = {'iops': 500, 'size': 4} self.assertRaises(exception.EvaluatorParseException, evaluator.evaluate, "foo.bob + 5", stats=stats, request=request) self.assertRaises(exception.EvaluatorParseException, evaluator.evaluate, "stats.bob + 5", stats=stats, request=request) self.assertRaises(exception.EvaluatorParseException, evaluator.evaluate, "fake.var + 1", stats=stats, request=request, fake=None) def test_bad_expression(self): self.assertRaises(exception.EvaluatorParseException, evaluator.evaluate, "1/*1") def test_nonnumber_comparison(self): nonnumber = {'test': 'foo'} request = {'test': 'bar'} self.assertTrue( evaluator.evaluate("nonnumber.test != request.test", nonnumber=nonnumber, request=request)) self.assertFalse( evaluator.evaluate("nonnumber.test == request.test", nonnumber=nonnumber, request=request)) def test_div_zero(self): self.assertRaises(exception.EvaluatorParseException, evaluator.evaluate, "7 / 0") ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/scheduler/fakes.py0000664000175000017500000007145700000000000021120 0ustar00zuulzuul00000000000000# Copyright 2011 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Fakes For Scheduler tests. """ from oslo_utils import timeutils from manila.scheduler.drivers import filter from manila.scheduler import host_manager from manila.scheduler.weighers import base_host as base_host_weigher FAKE_AZ_1 = {'name': 'zone1', 'id': '24433438-c9b5-4cb5-a472-f78462aa5f31'} FAKE_AZ_2 = {'name': 'zone2', 'id': 'ebef050c-d20d-4c44-b272-1a0adce11cb5'} FAKE_AZ_3 = {'name': 'zone3', 'id': '18e7e6e2-39d6-466b-a706-2717bd1086e1'} FAKE_AZ_4 = {'name': 'zone4', 'id': '9ca40ee4-3c2a-4635-9a18-233cf6e0ad0b'} FAKE_AZ_5 = {'name': 'zone4', 'id': 'd76d921d-d6fa-41b4-a180-fb68952784bd'} FAKE_AZ_6 = {'name': 'zone4', 'id': 'bc09c3d6-671c-4d55-9f43-f00757aabc50'} SHARE_SERVICES_NO_POOLS = [ dict(id=1, host='host1', topic='share', disabled=False, updated_at=timeutils.utcnow(), availability_zone_id=FAKE_AZ_1['id'], availability_zone=FAKE_AZ_1), dict(id=2, host='host2@back1', topic='share', disabled=False, updated_at=timeutils.utcnow(), availability_zone_id=FAKE_AZ_2['id'], availability_zone=FAKE_AZ_2), dict(id=3, host='host2@back2', topic='share', disabled=False, updated_at=timeutils.utcnow(), availability_zone_id=FAKE_AZ_3['id'], availability_zone=FAKE_AZ_3), ] SERVICE_STATES_NO_POOLS = { 'host1': dict(share_backend_name='AAA', total_capacity_gb=512, free_capacity_gb=200, timestamp=None, reserved_percentage=0, reserved_snapshot_percentage=0, reserved_share_extend_percentage=0, provisioned_capacity_gb=312, max_over_subscription_ratio=1.0, thin_provisioning=False, snapshot_support=False, create_share_from_snapshot_support=False, revert_to_snapshot_support=True, mount_snapshot_support=True, driver_handles_share_servers=False, mount_point_name_support=False), 'host2@back1': dict(share_backend_name='BBB', total_capacity_gb=256, free_capacity_gb=100, timestamp=None, reserved_percentage=0, reserved_snapshot_percentage=0, reserved_share_extend_percentage=0, provisioned_capacity_gb=400, max_over_subscription_ratio=2.0, thin_provisioning=True, snapshot_support=True, create_share_from_snapshot_support=True, revert_to_snapshot_support=False, mount_snapshot_support=False, driver_handles_share_servers=False, mount_point_name_support=False), 'host2@back2': dict(share_backend_name='CCC', total_capacity_gb=10000, free_capacity_gb=700, timestamp=None, reserved_percentage=0, reserved_snapshot_percentage=0, reserved_share_extend_percentage=0, provisioned_capacity_gb=50000, max_over_subscription_ratio=20.0, thin_provisioning=True, snapshot_support=True, create_share_from_snapshot_support=True, revert_to_snapshot_support=False, mount_snapshot_support=False, driver_handles_share_servers=False, mount_point_name_support=False), } SHARE_SERVICES_WITH_POOLS = [ dict(id=1, host='host1@AAA', topic='share', disabled=False, updated_at=timeutils.utcnow(), availability_zone_id=FAKE_AZ_1['id'], availability_zone=FAKE_AZ_1), dict(id=2, host='host2@BBB', topic='share', disabled=False, updated_at=timeutils.utcnow(), availability_zone_id=FAKE_AZ_2['id'], availability_zone=FAKE_AZ_2), dict(id=3, host='host3@CCC', topic='share', disabled=False, updated_at=timeutils.utcnow(), availability_zone_id=FAKE_AZ_3['id'], availability_zone=FAKE_AZ_3), dict(id=4, host='host4@DDD', topic='share', disabled=False, updated_at=timeutils.utcnow(), availability_zone_id=FAKE_AZ_4['id'], availability_zone=FAKE_AZ_4), # service on host5 is disabled dict(id=5, host='host5@EEE', topic='share', disabled=True, updated_at=timeutils.utcnow(), availability_zone_id=FAKE_AZ_5['id'], availability_zone=FAKE_AZ_5), dict(id=6, host='host6@FFF', topic='share', disabled=True, updated_at=timeutils.utcnow(), availability_zone_id=FAKE_AZ_6['id'], availability_zone=FAKE_AZ_6), ] SHARE_SERVICE_STATES_WITH_POOLS = { 'host1@AAA': dict(share_backend_name='AAA', timestamp=None, reserved_percentage=0, reserved_snapshot_percentage=0, reserved_share_extend_percentage=0, driver_handles_share_servers=False, snapshot_support=True, create_share_from_snapshot_support=True, revert_to_snapshot_support=True, replication_type=None, pools=[dict(pool_name='pool1', total_capacity_gb=51, free_capacity_gb=41, reserved_percentage=0, reserved_snapshot_percentage=0, reserved_share_extend_percentage=0, provisioned_capacity_gb=10, max_over_subscription_ratio=1.0, thin_provisioning=False, mount_point_name_support=False, )]), 'host2@BBB': dict(share_backend_name='BBB', timestamp=None, reserved_percentage=0, reserved_snapshot_percentage=0, reserved_share_extend_percentage=0, driver_handles_share_servers=False, snapshot_support=True, create_share_from_snapshot_support=True, revert_to_snapshot_support=False, replication_type=None, pools=[dict(pool_name='pool2', total_capacity_gb=52, free_capacity_gb=42, reserved_percentage=0, reserved_snapshot_percentage=0, reserved_share_extend_percentage=0, provisioned_capacity_gb=60, max_over_subscription_ratio=2.0, thin_provisioning=True, mount_point_name_support=False, )]), 'host3@CCC': dict(share_backend_name='CCC', timestamp=None, reserved_percentage=0, reserved_snapshot_percentage=0, reserved_share_extend_percentage=0, driver_handles_share_servers=False, snapshot_support=True, create_share_from_snapshot_support=True, revert_to_snapshot_support=False, replication_type=None, pools=[dict(pool_name='pool3', total_capacity_gb=53, free_capacity_gb=43, reserved_percentage=0, reserved_snapshot_percentage=0, reserved_share_extend_percentage=0, provisioned_capacity_gb=100, max_over_subscription_ratio=20.0, thin_provisioning=True, mount_point_name_support=False, )]), 'host4@DDD': dict(share_backend_name='DDD', timestamp=None, reserved_percentage=0, reserved_snapshot_percentage=0, reserved_share_extend_percentage=0, driver_handles_share_servers=False, snapshot_support=True, create_share_from_snapshot_support=True, revert_to_snapshot_support=False, replication_type=None, pools=[dict(pool_name='pool4a', total_capacity_gb=541, free_capacity_gb=441, reserved_percentage=0, reserved_snapshot_percentage=0, reserved_share_extend_percentage=0, provisioned_capacity_gb=800, max_over_subscription_ratio=2.0, thin_provisioning=True, mount_point_name_support=False, ), dict(pool_name='pool4b', total_capacity_gb=542, free_capacity_gb=442, reserved_percentage=0, reserved_snapshot_percentage=0, reserved_share_extend_percentage=0, provisioned_capacity_gb=2000, max_over_subscription_ratio=10.0, thin_provisioning=True, mount_point_name_support=False, )]), 'host5@EEE': dict(share_backend_name='EEE', timestamp=None, reserved_percentage=0, reserved_snapshot_percentage=0, reserved_share_extend_percentage=0, driver_handles_share_servers=False, snapshot_support=True, create_share_from_snapshot_support=True, revert_to_snapshot_support=False, replication_type=None, pools=[dict(pool_name='pool5a', total_capacity_gb=551, free_capacity_gb=451, reserved_percentage=0, reserved_snapshot_percentage=0, reserved_share_extend_percentage=0, provisioned_capacity_gb=100, max_over_subscription_ratio=1.0, thin_provisioning=False, mount_point_name_support=False, ), dict(pool_name='pool5b', total_capacity_gb=552, free_capacity_gb=452, reserved_percentage=0, reserved_snapshot_percentage=0, reserved_share_extend_percentage=0, provisioned_capacity_gb=100, max_over_subscription_ratio=1.0, thin_provisioning=False, mount_point_name_support=False, )]), 'host6@FFF': dict(share_backend_name='FFF', timestamp=None, reserved_percentage=0, reserved_snapshot_percentage=0, reserved_share_extend_percentage=0, driver_handles_share_servers=False, snapshot_support=True, create_share_from_snapshot_support=True, revert_to_snapshot_support=False, replication_type=None, pools=[dict(pool_name='pool6a', total_capacity_gb='unknown', free_capacity_gb='unknown', reserved_percentage=0, reserved_snapshot_percentage=0, reserved_share_extend_percentage=0, provisioned_capacity_gb=100, max_over_subscription_ratio=1.0, thin_provisioning=False, mount_point_name_support=False, ), dict(pool_name='pool6b', total_capacity_gb='unknown', free_capacity_gb='unknown', reserved_percentage=0, reserved_snapshot_percentage=0, reserved_share_extend_percentage=0, provisioned_capacity_gb=100, max_over_subscription_ratio=1.0, thin_provisioning=False, mount_point_name_support=False, )]), } FAKE_ACTIVE_IQ_WEIGHER_LIST = [ "fake_aggregate_1:fake_cluster_name1", "fake_aggregate_2:fake_cluster_name2", "fake_aggregate_3:fake_cluster_name3" ] FAKE_ACTIVE_IQ_WEIGHER_AGGREGATES_RESPONSE = { "records": [ { "name": "fake_aggregate_1", "key": "fake_key_1", "cluster": { "name": "fake_cluster_name1" } }, { "name": "fake_aggregate_2", "key": "fake_key_2", "cluster": { "name": "fake_cluster_name2" } }, { "name": "fake_aggregate_3", "key": "fake_key_3", "cluster": { "name": "fake_cluster_name3" } } ] } FAKE_ACTIVE_IQ_WEIGHER_BALANCE_RESPONSE = [ { "key": "fake_key_1", "scores": { "total_weighted_score": 10.0 } }, { "key": "fake_key_2", "scores": { "total_weighted_score": 20.0 } } ] class FakeHostManagerNetAppOnly(host_manager.HostManager): def __init__(self): super(FakeHostManagerNetAppOnly, self).__init__() self.service_states = { 'host1': { 'total_capacity_gb': 1024, 'free_capacity_gb': 1024, 'allocated_capacity_gb': 0, 'thin_provisioning': False, 'reserved_percentage': 10, 'reserved_snapshot_percentage': 5, 'reserved_share_extend_percentage': 15, 'timestamp': None, 'snapshot_support': True, 'create_share_from_snapshot_support': True, 'replication_type': 'writable', 'replication_domain': 'endor', 'storage_protocol': 'NFS_CIFS', 'vendor_name': 'NetApp', 'netapp_cluster_name': 'cluster1', }, 'host2': { 'total_capacity_gb': 2048, 'free_capacity_gb': 300, 'allocated_capacity_gb': 1748, 'provisioned_capacity_gb': 1748, 'max_over_subscription_ratio': 2.0, 'thin_provisioning': True, 'reserved_percentage': 10, 'reserved_snapshot_percentage': 5, 'reserved_share_extend_percentage': 15, 'timestamp': None, 'snapshot_support': True, 'create_share_from_snapshot_support': True, 'replication_type': 'readable', 'replication_domain': 'kashyyyk', 'storage_protocol': 'NFS_CIFS', 'vendor_name': 'NetApp', 'netapp_cluster_name': 'cluster2', }, 'host3': { 'total_capacity_gb': 512, 'free_capacity_gb': 256, 'allocated_capacity_gb': 256, 'provisioned_capacity_gb': 256, 'max_over_subscription_ratio': 2.0, 'thin_provisioning': [False], 'reserved_percentage': 0, 'reserved_snapshot_percentage': 0, 'reserved_share_extend_percentage': 0, 'snapshot_support': True, 'create_share_from_snapshot_support': True, 'timestamp': None, 'storage_protocol': 'NFS_CIFS', 'vendor_name': 'NetApp', 'netapp_cluster_name': 'cluster3', }, 'host4': { 'total_capacity_gb': 2048, 'free_capacity_gb': 200, 'allocated_capacity_gb': 1848, 'provisioned_capacity_gb': 1848, 'max_over_subscription_ratio': 1.0, 'thin_provisioning': [True], 'reserved_percentage': 5, 'reserved_snapshot_percentage': 2, 'reserved_share_extend_percentage': 5, 'timestamp': None, 'snapshot_support': True, 'create_share_from_snapshot_support': True, 'replication_type': 'dr', 'replication_domain': 'naboo', 'storage_protocol': 'NFS_CIFS', 'vendor_name': 'NetApp', 'netapp_cluster_name': 'cluster4', }, 'host5': { 'total_capacity_gb': 2048, 'free_capacity_gb': 500, 'allocated_capacity_gb': 1548, 'provisioned_capacity_gb': 1548, 'max_over_subscription_ratio': 1.5, 'thin_provisioning': [True, False], 'reserved_percentage': 5, 'reserved_snapshot_percentage': 2, 'reserved_share_extend_percentage': 5, 'timestamp': None, 'snapshot_support': True, 'create_share_from_snapshot_support': True, 'replication_type': None, 'storage_protocol': 'NFS_CIFS', 'vendor_name': 'NetApp', 'netapp_cluster_name': 'cluster5', }, 'host6': { 'total_capacity_gb': 'unknown', 'free_capacity_gb': 'unknown', 'allocated_capacity_gb': 1548, 'thin_provisioning': False, 'reserved_percentage': 5, 'reserved_snapshot_percentage': 2, 'reserved_share_extend_percentage': 5, 'snapshot_support': True, 'create_share_from_snapshot_support': True, 'timestamp': None, 'storage_protocol': 'GLUSTERFS', 'vendor_name': 'NetApp', 'netapp_cluster_name': 'cluster6', }, } class FakeFilterScheduler(filter.FilterScheduler): def __init__(self, *args, **kwargs): super(FakeFilterScheduler, self).__init__(*args, **kwargs) self.host_manager = host_manager.HostManager() class FakeHostManager(host_manager.HostManager): def __init__(self): super(FakeHostManager, self).__init__() self.service_states = { 'host1': {'total_capacity_gb': 1024, 'free_capacity_gb': 1024, 'allocated_capacity_gb': 0, 'thin_provisioning': False, 'reserved_percentage': 10, 'reserved_snapshot_percentage': 5, 'reserved_share_extend_percentage': 15, 'timestamp': None, 'snapshot_support': True, 'create_share_from_snapshot_support': True, 'replication_type': 'writable', 'replication_domain': 'endor', 'storage_protocol': 'NFS_CIFS', 'vendor_name': 'Dummy', }, 'host2': {'total_capacity_gb': 2048, 'free_capacity_gb': 300, 'allocated_capacity_gb': 1748, 'provisioned_capacity_gb': 1748, 'max_over_subscription_ratio': 2.0, 'thin_provisioning': True, 'reserved_percentage': 10, 'reserved_snapshot_percentage': 5, 'reserved_share_extend_percentage': 15, 'timestamp': None, 'snapshot_support': True, 'create_share_from_snapshot_support': True, 'replication_type': 'readable', 'replication_domain': 'kashyyyk', 'storage_protocol': 'NFS_CIFS', 'vendor_name': 'Dummy', }, 'host3': {'total_capacity_gb': 512, 'free_capacity_gb': 256, 'allocated_capacity_gb': 256, 'provisioned_capacity_gb': 256, 'max_over_subscription_ratio': 2.0, 'thin_provisioning': [False], 'reserved_percentage': 0, 'reserved_snapshot_percentage': 0, 'reserved_share_extend_percentage': 0, 'snapshot_support': True, 'create_share_from_snapshot_support': True, 'timestamp': None, 'storage_protocol': 'NFS_CIFS', 'vendor_name': 'Dummy', }, 'host4': {'total_capacity_gb': 2048, 'free_capacity_gb': 200, 'allocated_capacity_gb': 1848, 'provisioned_capacity_gb': 1848, 'max_over_subscription_ratio': 1.0, 'thin_provisioning': [True], 'reserved_percentage': 5, 'reserved_snapshot_percentage': 2, 'reserved_share_extend_percentage': 5, 'timestamp': None, 'snapshot_support': True, 'create_share_from_snapshot_support': True, 'replication_type': 'dr', 'replication_domain': 'naboo', 'storage_protocol': 'NFS_CIFS', 'vendor_name': 'Dummy', }, 'host5': {'total_capacity_gb': 2048, 'free_capacity_gb': 500, 'allocated_capacity_gb': 1548, 'provisioned_capacity_gb': 1548, 'max_over_subscription_ratio': 1.5, 'thin_provisioning': [True, False], 'reserved_percentage': 5, 'reserved_snapshot_percentage': 2, 'reserved_share_extend_percentage': 5, 'timestamp': None, 'snapshot_support': True, 'create_share_from_snapshot_support': True, 'replication_type': None, 'storage_protocol': 'NFS_CIFS', 'vendor_name': 'Dummy', }, 'host6': {'total_capacity_gb': 'unknown', 'free_capacity_gb': 'unknown', 'allocated_capacity_gb': 1548, 'thin_provisioning': False, 'reserved_percentage': 5, 'reserved_snapshot_percentage': 2, 'reserved_share_extend_percentage': 5, 'snapshot_support': True, 'create_share_from_snapshot_support': True, 'timestamp': None, 'storage_protocol': 'GLUSTERFS', 'vendor_name': 'Dummy', }, 'host7': {'total_capacity_gb': 'unknown', 'free_capacity_gb': 'unknown', 'allocated_capacity_gb': 1548, 'thin_provisioning': False, 'reserved_percentage': 5, 'reserved_snapshot_percentage': 2, 'reserved_share_extend_percentage': 5, 'snapshot_support': True, 'create_share_from_snapshot_support': True, 'timestamp': None, 'storage_protocol': 'NFS_CIFS', 'vendor_name': 'Dummy', }, } class FakeHostState(host_manager.HostState): def __init__(self, host, attribute_dict): super(FakeHostState, self).__init__(host) for (key, val) in attribute_dict.items(): setattr(self, key, val) FAKE_HOST_STRING_1 = 'openstack@BackendA#PoolX' FAKE_HOST_STRING_2 = 'openstack@BackendB#PoolY' FAKE_HOST_STRING_3 = 'openstack@BackendC#PoolZ' def mock_host_manager_db_calls(mock_obj, disabled=None): services = [ dict(id=1, host='host1', topic='share', disabled=False, availability_zone=FAKE_AZ_1, availability_zone_id=FAKE_AZ_1['id'], updated_at=timeutils.utcnow()), dict(id=2, host='host2', topic='share', disabled=False, availability_zone=FAKE_AZ_1, availability_zone_id=FAKE_AZ_1['id'], updated_at=timeutils.utcnow()), dict(id=3, host='host3', topic='share', disabled=False, availability_zone=FAKE_AZ_2, availability_zone_id=FAKE_AZ_2['id'], updated_at=timeutils.utcnow()), dict(id=4, host='host4', topic='share', disabled=False, availability_zone=FAKE_AZ_3, availability_zone_id=FAKE_AZ_3['id'], updated_at=timeutils.utcnow()), dict(id=5, host='host5', topic='share', disabled=False, availability_zone=FAKE_AZ_3, availability_zone_id=FAKE_AZ_3['id'], updated_at=timeutils.utcnow()), dict(id=6, host='host6', topic='share', disabled=False, availability_zone=FAKE_AZ_4, availability_zone_id=FAKE_AZ_4['id'], updated_at=timeutils.utcnow()), dict(id=7, host='host7', topic='share', disabled=True, availability_zone=FAKE_AZ_4, availability_zone_id=FAKE_AZ_4['id'], updated_at=timeutils.utcnow()), ] if disabled is None: mock_obj.return_value = services else: mock_obj.return_value = [service for service in services if service['disabled'] == disabled] class FakeWeigher1(base_host_weigher.BaseHostWeigher): def __init__(self): pass class FakeWeigher2(base_host_weigher.BaseHostWeigher): def __init__(self): pass class FakeClass(object): def __init__(self): pass def fake_replica_request_spec(**kwargs): request_spec = { 'share_properties': { 'id': 'f0e4bb5e-65f0-11e5-9d70-feff819cdc9f', 'name': 'fakename', 'size': 1, 'share_network_id': '4ccd5318-65f1-11e5-9d70-feff819cdc9f', 'availability_zone': 'fake_az', 'replication_type': 'dr', }, 'share_instance_properties': { 'id': '8d5566df-1e83-4373-84b8-6f8153a0ac41', 'share_id': 'f0e4bb5e-65f0-11e5-9d70-feff819cdc9f', 'host': 'openstack@BackendZ#PoolA', 'status': 'available', 'availability_zone_id': 'f6e146d0-65f0-11e5-9d70-feff819cdc9f', 'share_network_id': '4ccd5318-65f1-11e5-9d70-feff819cdc9f', 'share_server_id': '53099868-65f1-11e5-9d70-feff819cdc9f', }, 'share_proto': 'nfs', 'share_id': 'f0e4bb5e-65f0-11e5-9d70-feff819cdc9f', 'snapshot_id': None, 'share_type': 'fake_share_type', 'share_group': None, } request_spec.update(kwargs) return request_spec def get_fake_host(host_name=None): class FakeHost(object): def __init__(self, host_name=None): self.host = host_name or 'openstack@BackendZ#PoolA' class FakeWeightedHost(object): def __init__(self, host_name=None): self.obj = FakeHost(host_name=host_name) return FakeWeightedHost(host_name=host_name) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315601.9896708 manila-21.0.0/manila/tests/scheduler/filters/0000775000175000017500000000000000000000000021107 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/scheduler/filters/__init__.py0000664000175000017500000000000000000000000023206 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/scheduler/filters/test_affinity.py0000664000175000017500000001017400000000000024334 0ustar00zuulzuul00000000000000# Copyright (c) 2021 SAP. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ddt from unittest import mock from manila import exception from manila.scheduler.filters import affinity from manila import test from manila.tests.scheduler import fakes fake_hosts = [ fakes.FakeHostState('host1', {}), fakes.FakeHostState('host2', {}), fakes.FakeHostState('host3', {}), ] fake_shares_1 = { 'abb6e0ac-7c3e-4ce0-8a69-5a166d246882': { 'instances': [ {'host': fake_hosts[0].host} ] }, '4de0cc74-450c-4468-8159-52128cf03407': { 'instances': [ {'host': fake_hosts[0].host} ] }, } fake_shares_2 = { 'c920fb61-e250-4c3c-a25d-1fdd9ca7cbc3': { 'instances': [ {'host': fake_hosts[1].host} ] }, } fake_shares_3 = { '3923bebf-9825-4a66-971e-6092a9fe2dbb': { 'instances': [ {'host': fake_hosts[2].host} ] }, } @ddt.ddt class AffinityFilterTestCase(test.TestCase): """Test case for AffinityFilter.""" def setUp(self): super(AffinityFilterTestCase, self).setUp() self.filter = affinity.AffinityFilter() self.anti_filter = affinity.AntiAffinityFilter() def _make_filter_hints(self, *hints): return { 'context': None, 'scheduler_hints': {'same_host': ','.join(list(hints))}, } def _make_anti_filter_hints(self, *hints): return { 'context': None, 'scheduler_hints': {'different_host': ','.join(list(hints))}, } def _fake_get(self, context, uuid): if uuid in fake_shares_1.keys(): return fake_shares_1[uuid] if uuid in fake_shares_2.keys(): return fake_shares_2[uuid] if uuid in fake_shares_3.keys(): return fake_shares_3[uuid] raise exception.ShareNotFound(uuid) @ddt.data('b5c207da-ac0b-43b0-8691-c6c9e860199d') @mock.patch('manila.share.api.API.get') def test_affinity_share_not_found(self, unknown_id, mock_share_get): mock_share_get.side_effect = self._fake_get self.assertRaises(exception.ShareNotFound, self.filter._validate, self._make_filter_hints(unknown_id)) @ddt.data( {'context': None}, {'context': None, 'scheduler_hints': None}, {'context': None, 'scheduler_hints': {}}, ) def test_affinity_scheduler_hint_not_set(self, hints): self.assertRaises(affinity.SchedulerHintsNotSet, self.filter._validate, hints) @ mock.patch('manila.share.api.API.get') def test_affinity_filter(self, mock_share_get): mock_share_get.side_effect = self._fake_get share_ids = fake_shares_1.keys() hints = self._make_filter_hints(*share_ids) valid_hosts = self.filter.filter_all(fake_hosts, hints) valid_hosts = [h.host for h in valid_hosts] self.assertIn('host1', valid_hosts) self.assertNotIn('host2', valid_hosts) self.assertNotIn('host3', valid_hosts) @ mock.patch('manila.share.api.API.get') def test_anti_affinity_filter(self, mock_share_get): mock_share_get.side_effect = self._fake_get share_ids = fake_shares_2.keys() hints = self._make_anti_filter_hints(*share_ids) valid_hosts = self.anti_filter.filter_all(fake_hosts, hints) valid_hosts = [h.host for h in valid_hosts] self.assertIn('host1', valid_hosts) self.assertIn('host3', valid_hosts) self.assertNotIn('host2', valid_hosts) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/scheduler/filters/test_availability_zone.py0000664000175000017500000000776000000000000026237 0ustar00zuulzuul00000000000000# Copyright 2011 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests For AvailabilityZoneFilter. """ import ddt from oslo_context import context from manila.scheduler.filters import availability_zone from manila import test from manila.tests.scheduler import fakes @ddt.ddt class HostFiltersTestCase(test.TestCase): """Test case for AvailabilityZoneFilter.""" def setUp(self): super(HostFiltersTestCase, self).setUp() self.filter = availability_zone.AvailabilityZoneFilter() self.az_id = 'e3ecad6f-e984-4cd1-b149-d83c962374a8' self.fake_service = { 'service': { 'availability_zone_id': self.az_id, 'availability_zone': { 'name': 'nova', 'id': self.az_id } } } @staticmethod def _make_zone_request(zone, is_admin=False): ctxt = context.RequestContext('fake', 'fake', is_admin=is_admin) return { 'context': ctxt, 'request_spec': { 'resource_properties': { 'availability_zone_id': zone } } } def test_availability_zone_filter_same(self): request = self._make_zone_request(self.az_id) host = fakes.FakeHostState('host1', self.fake_service) self.assertTrue(self.filter.host_passes(host, request)) def test_availability_zone_filter_different(self): request = self._make_zone_request('bad') host = fakes.FakeHostState('host1', self.fake_service) self.assertFalse(self.filter.host_passes(host, request)) def test_availability_zone_filter_empty(self): request = {} host = fakes.FakeHostState('host1', self.fake_service) self.assertTrue(self.filter.host_passes(host, request)) def test_availability_zone_filter_both_request_AZ_and_type_AZs_match(self): request = self._make_zone_request( '9382098d-d40f-42a2-8f31-8eb78ee18c02') request['request_spec']['availability_zones'] = [ 'nova', 'super nova', 'hypernova'] service = { 'availability_zone': { 'name': 'nova', 'id': '9382098d-d40f-42a2-8f31-8eb78ee18c02', }, 'availability_zone_id': '9382098d-d40f-42a2-8f31-8eb78ee18c02', } host = fakes.FakeHostState('host1', {'service': service}) self.assertTrue(self.filter.host_passes(host, request)) @ddt.data((['zone1', 'zone2', 'zone 4', 'zone3'], 'zone2', True), (['zone1zone2zone3'], 'zone2', False), (['zone1zone2zone3'], 'nova', False), (['zone1', 'zone2', 'zone 4', 'zone3'], 'zone 4', True)) @ddt.unpack def test_availability_zone_filter_only_share_type_AZs( self, supported_azs, request_az, host_passes): service = { 'availability_zone': { 'name': request_az, 'id': '9382098d-d40f-42a2-8f31-8eb78ee18c02', }, 'availability_zone_id': '9382098d-d40f-42a2-8f31-8eb78ee18c02', } request = self._make_zone_request(None) request['request_spec']['availability_zones'] = supported_azs request['request_spec']['az_request_multiple_subnet_support_map'] = \ {'zone2': 2} host = fakes.FakeHostState('host1', {'service': service}) self.assertEqual(host_passes, self.filter.host_passes(host, request)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/scheduler/filters/test_base.py0000664000175000017500000001263600000000000023442 0ustar00zuulzuul00000000000000# Copyright (c) 2013 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from unittest import mock from manila.scheduler.filters import base from manila import test class TestBaseFilter(test.TestCase): def setUp(self): super(TestBaseFilter, self).setUp() self.filter = base.BaseFilter() def test_filter_one_is_called(self): filters = [1, 2, 3, 4] filter_properties = {'x': 'y'} side_effect = lambda value, props: value in [2, 3] # noqa: E731 self.mock_object(self.filter, '_filter_one', mock.Mock(side_effect=side_effect)) result = list(self.filter.filter_all(filters, filter_properties)) self.assertEqual([2, 3], result) class FakeExtension(object): def __init__(self, plugin): self.plugin = plugin class BaseFakeFilter(base.BaseFilter): pass class FakeFilter1(BaseFakeFilter): """Derives from BaseFakeFilter and has a fake entry point defined. Entry point is returned by fake ExtensionManager. Should be included in the output of all_classes. """ class FakeFilter2(BaseFakeFilter): """Derives from BaseFakeFilter but has no entry point. Should be not included in all_classes. """ class FakeFilter3(base.BaseFilter): """Does not derive from BaseFakeFilter. Should not be included. """ class FakeFilter4(BaseFakeFilter): """Derives from BaseFakeFilter and has an entry point. Should be included. """ class FakeFilter5(BaseFakeFilter): """Derives from BaseFakeFilter but has no entry point. Should not be included. """ run_filter_once_per_request = True class FakeExtensionManager(list): def __init__(self, namespace): classes = [FakeFilter1, FakeFilter3, FakeFilter4] exts = map(FakeExtension, classes) super(FakeExtensionManager, self).__init__(exts) self.namespace = namespace class TestBaseFilterHandler(test.TestCase): def setUp(self): super(TestBaseFilterHandler, self).setUp() self.mock_object(base.base_handler.extension, 'ExtensionManager', FakeExtensionManager) self.handler = base.BaseFilterHandler(BaseFakeFilter, 'fake_filters') def test_get_all_classes(self): # In order for a FakeFilter to be returned by get_all_classes, it has # to comply with these rules: # * It must be derived from BaseFakeFilter # AND # * It must have a python entrypoint assigned (returned by # FakeExtensionManager) expected = [FakeFilter1, FakeFilter4] result = self.handler.get_all_classes() self.assertEqual(expected, result) def _get_filtered_objects(self, filter_classes, index=0): filter_objs_initial = [1, 2, 3, 4] filter_properties = {'x': 'y'} return self.handler.get_filtered_objects(filter_classes, filter_objs_initial, filter_properties, index) @mock.patch.object(FakeFilter4, 'filter_all') @mock.patch.object(FakeFilter3, 'filter_all', return_value=None) def test_get_filtered_objects_return_none(self, fake3_filter_all, fake4_filter_all): filter_classes = [FakeFilter1, FakeFilter2, FakeFilter3, FakeFilter4] result, last_filter = self._get_filtered_objects(filter_classes) self.assertIsNone(result) self.assertFalse(fake4_filter_all.called) self.assertEqual('FakeFilter3', last_filter) def test_get_filtered_objects(self): filter_objs_expected = [1, 2, 3, 4] filter_classes = [FakeFilter1, FakeFilter2, FakeFilter3, FakeFilter4] result, last_filter = self._get_filtered_objects(filter_classes) self.assertEqual(filter_objs_expected, result) self.assertEqual('FakeFilter4', last_filter) def test_get_filtered_objects_with_filter_run_once(self): filter_objs_expected = [1, 2, 3, 4] filter_classes = [FakeFilter5] with mock.patch.object(FakeFilter5, 'filter_all', return_value=filter_objs_expected ) as fake5_filter_all: result, last_filter = self._get_filtered_objects(filter_classes) self.assertEqual(filter_objs_expected, result) self.assertEqual(1, fake5_filter_all.call_count) result, last_filter = self._get_filtered_objects( filter_classes, index=1) self.assertEqual(filter_objs_expected, result) self.assertEqual(1, fake5_filter_all.call_count) result, last_filter = self._get_filtered_objects( filter_classes, index=2) self.assertEqual(filter_objs_expected, result) self.assertEqual(1, fake5_filter_all.call_count) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/scheduler/filters/test_base_host.py0000664000175000017500000000335400000000000024474 0ustar00zuulzuul00000000000000# Copyright 2011 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests For Scheduler Host Filters. """ from oslo_serialization import jsonutils from manila.scheduler.filters import base_host from manila import test class TestFilter(test.TestCase): pass class TestBogusFilter(object): """Class that doesn't inherit from BaseHostFilter.""" pass class HostFiltersTestCase(test.TestCase): """Test case for host filters.""" def setUp(self): super(HostFiltersTestCase, self).setUp() self.json_query = jsonutils.dumps( ['and', ['>=', '$free_ram_mb', 1024], ['>=', '$free_disk_mb', 200 * 1024]]) namespace = 'manila.scheduler.filters' filter_handler = base_host.HostFilterHandler(namespace) classes = filter_handler.get_all_classes() self.class_map = {} for cls in classes: self.class_map[cls.__name__] = cls def test_all_filters(self): # Double check at least a couple of known filters exist self.assertIn('JsonFilter', self.class_map) self.assertIn('CapabilitiesFilter', self.class_map) self.assertIn('AvailabilityZoneFilter', self.class_map) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/scheduler/filters/test_capabilities.py0000664000175000017500000001650700000000000025162 0ustar00zuulzuul00000000000000# Copyright 2011 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests For CapabilitiesFilter. """ import ddt from manila.scheduler.filters import capabilities from manila import test from manila.tests.scheduler import fakes @ddt.ddt class HostFiltersTestCase(test.TestCase): """Test case for CapabilitiesFilter.""" def setUp(self): super(HostFiltersTestCase, self).setUp() self.filter = capabilities.CapabilitiesFilter() def _do_test_type_filter_extra_specs(self, ecaps, especs, passes): capabilities = {'enabled': True} capabilities.update(ecaps) service = {'disabled': False} filter_properties = {'resource_type': {'name': 'fake_type', 'extra_specs': especs}} host = fakes.FakeHostState('host1', {'free_capacity_gb': 1024, 'capabilities': capabilities, 'service': service}) assertion = self.assertTrue if passes else self.assertFalse assertion(self.filter.host_passes(host, filter_properties)) def test_mount_point_name_support_pass(self): capabilities = {'mount_point_name_support': True} service = {'disabled': False} filter_properties = { 'resource_type': { 'request_spec': { 'share_properties': { 'mount_point_name': 'fake_mp', } } } } host = fakes.FakeHostState('host1', {'free_capacity_gb': 1024, 'capabilities': capabilities, 'service': service}) self.assertTrue(self.filter.host_passes(host, filter_properties)) def test_capability_filter_passes_extra_specs_simple(self): self._do_test_type_filter_extra_specs( ecaps={'opt1': '1', 'opt2': '2'}, especs={'opt1': '1', 'opt2': '2'}, passes=True) def test_capability_filter_passes_extra_specs_ignore_azs_spec(self): self._do_test_type_filter_extra_specs( ecaps={'opt1': '1', 'opt2': '2'}, especs={'opt1': '1', 'opt2': '2', 'availability_zones': 'az1,az2'}, passes=True) def test_capability_filter_fails_extra_specs_simple(self): self._do_test_type_filter_extra_specs( ecaps={'opt1': '1', 'opt2': '2'}, especs={'opt1': '1', 'opt2': '222'}, passes=False) def test_capability_filter_passes_extra_specs_complex(self): self._do_test_type_filter_extra_specs( ecaps={'opt1': 10, 'opt2': 5}, especs={'opt1': '>= 2', 'opt2': '<= 8'}, passes=True) def test_capability_filter_fails_extra_specs_complex(self): self._do_test_type_filter_extra_specs( ecaps={'opt1': 10, 'opt2': 5}, especs={'opt1': '>= 2', 'opt2': '>= 8'}, passes=False) def test_capability_filter_passes_extra_specs_list_simple(self): self._do_test_type_filter_extra_specs( ecaps={'opt1': ['1', '2'], 'opt2': '2'}, especs={'opt1': '1', 'opt2': '2'}, passes=True) @ddt.data(' True', ' False') def test_capability_filter_passes_extra_specs_list_complex(self, opt1): self._do_test_type_filter_extra_specs( ecaps={'opt1': [True, False], 'opt2': ['1', '2']}, especs={'opt1': opt1, 'opt2': '<= 8'}, passes=True) def test_capability_filter_fails_extra_specs_list_simple(self): self._do_test_type_filter_extra_specs( ecaps={'opt1': ['1', '2'], 'opt2': ['2']}, especs={'opt1': '3', 'opt2': '2'}, passes=False) def test_capability_filter_fails_extra_specs_list_complex(self): self._do_test_type_filter_extra_specs( ecaps={'opt1': [True, False], 'opt2': ['1', '2']}, especs={'opt1': 'fake', 'opt2': '<= 8'}, passes=False) def test_capability_filter_passes_scope_extra_specs(self): self._do_test_type_filter_extra_specs( ecaps={'scope_lv1': {'opt1': 10}}, especs={'capabilities:scope_lv1:opt1': '>= 2'}, passes=True) def test_capability_filter_passes_fakescope_extra_specs(self): self._do_test_type_filter_extra_specs( ecaps={'scope_lv1': {'opt1': 10}, 'opt2': 5}, especs={'scope_lv1:opt1': '= 2', 'opt2': '>= 3'}, passes=True) def test_capability_filter_fails_scope_extra_specs(self): self._do_test_type_filter_extra_specs( ecaps={'scope_lv1': {'opt1': 10}}, especs={'capabilities:scope_lv1:opt1': '<= 2'}, passes=False) def test_capability_filter_passes_multi_level_scope_extra_specs(self): self._do_test_type_filter_extra_specs( ecaps={'scope_lv0': {'scope_lv1': {'scope_lv2': {'opt1': 10}}}}, especs={'capabilities:scope_lv0:scope_lv1:scope_lv2:opt1': '>= 2'}, passes=True) def test_capability_filter_fails_wrong_scope_extra_specs(self): self._do_test_type_filter_extra_specs( ecaps={'scope_lv0': {'opt1': 10}}, especs={'capabilities:scope_lv1:opt1': '>= 2'}, passes=False) def test_capability_filter_passes_multi_level_scope_extra_specs_list(self): self._do_test_type_filter_extra_specs( ecaps={ 'scope_lv0': { 'scope_lv1': { 'scope_lv2': { 'opt1': [True, False], }, }, }, }, especs={ 'capabilities:scope_lv0:scope_lv1:scope_lv2:opt1': ' True', }, passes=True) def test_capability_filter_fails_multi_level_scope_extra_specs_list(self): self._do_test_type_filter_extra_specs( ecaps={ 'scope_lv0': { 'scope_lv1': { 'scope_lv2': { 'opt1': [True, False], 'opt2': ['1', '2'], }, }, }, }, especs={ 'capabilities:scope_lv0:scope_lv1:scope_lv2:opt1': ' True', 'capabilities:scope_lv0:scope_lv1:scope_lv2:opt2': '3', }, passes=False) def test_capability_filter_fails_wrong_scope_extra_specs_list(self): self._do_test_type_filter_extra_specs( ecaps={'scope_lv0': {'opt1': [True, False]}}, especs={'capabilities:scope_lv1:opt1': ' True'}, passes=False) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/scheduler/filters/test_capacity.py0000664000175000017500000004371700000000000024331 0ustar00zuulzuul00000000000000# Copyright 2011 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests For CapacityFilter. """ import ddt from manila.scheduler.filters import capacity from manila import test from manila.tests.scheduler import fakes from manila import utils @ddt.ddt class HostFiltersTestCase(test.TestCase): """Test case CapacityFilter.""" def setUp(self): super(HostFiltersTestCase, self).setUp() self.filter = capacity.CapacityFilter() def _stub_service_is_up(self, ret_value): def fake_service_is_up(service): return ret_value self.mock_object(utils, 'service_is_up', fake_service_is_up) @ddt.data( {'size': 100, 'share_on': None, 'host': 'host1'}, {'size': 100, 'share_on': 'host1#pool1', 'host': 'host1#pools1'}) @ddt.unpack def test_capacity_filter_passes(self, size, share_on, host): self._stub_service_is_up(True) filter_properties = {'size': size, 'share_exists_on': share_on} service = {'disabled': False} host = fakes.FakeHostState(host, {'total_capacity_gb': 500, 'free_capacity_gb': 200, 'updated_at': None, 'service': service}) self.assertTrue(self.filter.host_passes(host, filter_properties)) @ddt.data( {'free_capacity': 120, 'total_capacity': 200, 'reserved': 20}, {'free_capacity': None, 'total_capacity': None, 'reserved': None}) @ddt.unpack def test_capacity_filter_fails(self, free_capacity, total_capacity, reserved): self._stub_service_is_up(True) filter_properties = {'size': 100} service = {'disabled': False} host = fakes.FakeHostState('host1', {'total_capacity_gb': total_capacity, 'free_capacity_gb': free_capacity, 'reserved_percentage': reserved, 'updated_at': None, 'service': service}) self.assertFalse(self.filter.host_passes(host, filter_properties)) @ddt.data( {'free_capacity': 120, 'total_capacity': 200, 'reserved': 20, 'reserved_snapshot': 5}) @ddt.unpack def test_capacity_filter_passes_snapshot_reserved(self, free_capacity, total_capacity, reserved, reserved_snapshot): self._stub_service_is_up(True) filter_properties = {'size': 100, 'snapshot_id': 1234} service = {'disabled': False} host = fakes.FakeHostState('host1', {'total_capacity_gb': total_capacity, 'free_capacity_gb': free_capacity, 'reserved_percentage': reserved, 'reserved_snapshot_percentage': reserved_snapshot, 'updated_at': None, 'service': service}) self.assertTrue(self.filter.host_passes(host, filter_properties)) @ddt.data( {'free_capacity': 120, 'total_capacity': 200, 'reserved': 20, 'reserved_snapshot': 15}) @ddt.unpack def test_capacity_filter_fails_snapshot_reserved(self, free_capacity, total_capacity, reserved, reserved_snapshot): self._stub_service_is_up(True) filter_properties = {'size': 100, 'snapshot_id': 1234} service = {'disabled': False} host = fakes.FakeHostState('host1', {'total_capacity_gb': total_capacity, 'free_capacity_gb': free_capacity, 'reserved_percentage': reserved, 'reserved_snapshot_percentage': reserved_snapshot, 'updated_at': None, 'service': service}) self.assertFalse(self.filter.host_passes(host, filter_properties)) @ddt.data( {'free_capacity': 120, 'total_capacity': 200, 'reserved': 20, 'reserved_share_extend_percentage': 5}) @ddt.unpack def test_capacity_filter_passes_share_extend_reserved( self, free_capacity, total_capacity, reserved, reserved_share_extend_percentage): self._stub_service_is_up(True) filter_properties = {'size': 100, 'is_share_extend': True} service = {'disabled': False} host = fakes.FakeHostState('host1', {'total_capacity_gb': total_capacity, 'free_capacity_gb': free_capacity, 'reserved_percentage': reserved, 'reserved_share_extend_percentage': reserved_share_extend_percentage, 'updated_at': None, 'service': service}) self.assertTrue(self.filter.host_passes(host, filter_properties)) @ddt.data( {'free_capacity': 120, 'total_capacity': 200, 'reserved': 20, 'reserved_share_extend_percentage': 15}) @ddt.unpack def test_capacity_filter_fails_share_extend_reserved( self, free_capacity, total_capacity, reserved, reserved_share_extend_percentage): self._stub_service_is_up(True) filter_properties = {'size': 100, 'is_share_extend': True} service = {'disabled': False} host = fakes.FakeHostState('host1', {'total_capacity_gb': total_capacity, 'free_capacity_gb': free_capacity, 'reserved_percentage': reserved, 'reserved_share_extend_percentage': reserved_share_extend_percentage, 'updated_at': None, 'service': service}) self.assertFalse(self.filter.host_passes(host, filter_properties)) def test_capacity_filter_passes_unknown(self): free = 'unknown' self._stub_service_is_up(True) filter_properties = {'size': 100} service = {'disabled': False} host = fakes.FakeHostState('host1', {'free_capacity_gb': free, 'updated_at': None, 'service': service}) self.assertTrue(self.filter.host_passes(host, filter_properties)) @ddt.data( {'free_capacity': 'unknown', 'total_capacity': 'unknown'}, {'free_capacity': 200, 'total_capacity': 'unknown'}) @ddt.unpack def test_capacity_filter_passes_total(self, free_capacity, total_capacity): self._stub_service_is_up(True) filter_properties = {'size': 100} service = {'disabled': False} host = fakes.FakeHostState('host1', {'free_capacity_gb': free_capacity, 'total_capacity_gb': total_capacity, 'reserved_percentage': 0, 'updated_at': None, 'service': service}) self.assertTrue(self.filter.host_passes(host, filter_properties)) @ddt.data( {'free': 200, 'total': 'unknown', 'reserved': 5}, {'free': 50, 'total': 'unknown', 'reserved': 0}, {'free': 200, 'total': 0, 'reserved': 0}) @ddt.unpack def test_capacity_filter_fails_total(self, free, total, reserved): self._stub_service_is_up(True) filter_properties = {'size': 100} service = {'disabled': False} host = fakes.FakeHostState('host1', {'free_capacity_gb': free, 'total_capacity_gb': total, 'reserved_percentage': reserved, 'updated_at': None, 'service': service}) self.assertFalse(self.filter.host_passes(host, filter_properties)) @ddt.data( {'size': 100, 'cap_thin': ' True', 'total': 500, 'free': 200, 'provisioned': 500, 'max_ratio': 2.0, 'reserved': 5, 'thin_prov': True, 'cap_thin_key': 'capabilities:thin_provisioning'}, {'size': 3000, 'cap_thin': ' True', 'total': 500, 'free': 200, 'provisioned': 7000, 'max_ratio': 20, 'reserved': 5, 'thin_prov': True, 'cap_thin_key': 'thin_provisioning'}, {'size': 100, 'cap_thin': ' False', 'total': 500, 'free': 200, 'provisioned': 300, 'max_ratio': 1.0, 'reserved': 5, 'thin_prov': False, 'cap_thin_key': 'capabilities:thin_provisioning'}, {'size': 100, 'cap_thin': ' True', 'total': 500, 'free': 200, 'provisioned': 400, 'max_ratio': 1.0, 'reserved': 5, 'thin_prov': True, 'cap_thin_key': 'thin_provisioning'}, {'size': 100, 'cap_thin': ' True', 'total': 500, 'free': 125, 'provisioned': 400, 'max_ratio': 2.0, 'reserved': 5, 'thin_prov': True, 'cap_thin_key': 'capabilities:thin_provisioning'}, {'size': 100, 'cap_thin': ' True', 'total': 500, 'free': 80, 'provisioned': 600, 'max_ratio': 2.0, 'reserved': 5, 'thin_prov': True, 'cap_thin_key': 'thin_provisioning'}, {'size': 100, 'cap_thin': ' True', 'total': 500, 'free': 100, 'provisioned': 400, 'max_ratio': 2.0, 'reserved': 0, 'thin_prov': True, 'cap_thin_key': 'capabilities:thin_provisioning'}, {'size': 100, 'cap_thin': ' True', 'total': 500, 'free': 200, 'provisioned': 500, 'max_ratio': 2.0, 'reserved': 5, 'thin_prov': [True, False], 'cap_thin_key': 'thin_provisioning'}, {'size': 3000, 'cap_thin': ' True', 'total': 500, 'free': 200, 'provisioned': 7000, 'max_ratio': 20, 'reserved': 5, 'thin_prov': [True], 'cap_thin_key': 'capabilities:thin_provisioning'}, {'size': 100, 'cap_thin': ' False', 'total': 500, 'free': 200, 'provisioned': 300, 'max_ratio': 1.0, 'reserved': 5, 'thin_prov': [False], 'cap_thin_key': 'thin_provisioning'}, {'size': 100, 'cap_thin': 'True', 'total': 500, 'free': 200, 'provisioned': 400, 'max_ratio': 1.0, 'reserved': 5, 'thin_prov': [False, True], 'cap_thin_key': 'capabilities:thin_provisioning'}, {'size': 100, 'cap_thin': 'False', 'total': 500, 'free': 200, 'provisioned': 300, 'max_ratio': 1.0, 'reserved': 5, 'thin_prov': False, 'cap_thin_key': 'thin_provisioning'}, {'size': 100, 'cap_thin': 'true', 'total': 500, 'free': 125, 'provisioned': 400, 'max_ratio': 2.0, 'reserved': 5, 'thin_prov': [True, ], 'cap_thin_key': 'capabilities:thin_provisioning'}, {'size': 100, 'cap_thin': 'false', 'total': 500, 'free': 200, 'provisioned': 300, 'max_ratio': 1.0, 'reserved': 5, 'thin_prov': [False, ], 'cap_thin_key': 'thin_provisioning'}, {'size': 100, 'cap_thin': None, 'total': 500, 'free': 80, 'provisioned': 600, 'max_ratio': 2.0, 'reserved': 5, 'thin_prov': True, 'cap_thin_key': None},) @ddt.unpack def test_filter_thin_passes(self, size, cap_thin, total, free, provisioned, max_ratio, reserved, thin_prov, cap_thin_key): self._stub_service_is_up(True) filter_properties = { 'size': size, 'snapshot_id': None, 'share_type': { 'extra_specs': { cap_thin_key: cap_thin, } } } service = {'disabled': False} host = fakes.FakeHostState('host1', {'total_capacity_gb': total, 'free_capacity_gb': free, 'provisioned_capacity_gb': provisioned, 'max_over_subscription_ratio': max_ratio, 'reserved_percentage': reserved, 'thin_provisioning': thin_prov, 'updated_at': None, 'service': service}) self.assertTrue(self.filter.host_passes(host, filter_properties)) @ddt.data( {'size': 200, 'cap_thin': ' True', 'total': 500, 'free': 100, 'provisioned': 400, 'max_ratio': 0.8, 'reserved': 0, 'thin_prov': True, 'cap_thin_key': 'capabilities:thin_provisioning'}, {'size': 100, 'cap_thin': ' True', 'total': 500, 'free': 200, 'provisioned': 700, 'max_ratio': 1.5, 'reserved': 5, 'thin_prov': True, 'cap_thin_key': 'thin_provisioning'}, {'size': 2000, 'cap_thin': ' True', 'total': 500, 'free': 30, 'provisioned': 9000, 'max_ratio': 20.0, 'reserved': 0, 'thin_prov': True, 'cap_thin_key': 'capabilities:thin_provisioning'}, {'size': 100, 'cap_thin': ' True', 'total': 500, 'free': 100, 'provisioned': 1000, 'max_ratio': 2.0, 'reserved': 5, 'thin_prov': True, 'cap_thin_key': 'thin_provisioning'}, {'size': 100, 'cap_thin': ' False', 'total': 500, 'free': 100, 'provisioned': 400, 'max_ratio': 1.0, 'reserved': 5, 'thin_prov': False, 'cap_thin_key': 'capabilities:thin_provisioning'}, {'size': 100, 'cap_thin': ' True', 'total': 500, 'free': 0, 'provisioned': 800, 'max_ratio': 2.0, 'reserved': 5, 'thin_prov': True, 'cap_thin_key': 'thin_provisioning'}, {'size': 100, 'cap_thin': ' True', 'total': 500, 'free': 99, 'provisioned': 1000, 'max_ratio': 2.0, 'reserved': 5, 'thin_prov': True, 'cap_thin_key': 'capabilities:thin_provisioning'}, {'size': 400, 'cap_thin': ' True', 'total': 500, 'free': 200, 'provisioned': 600, 'max_ratio': 2.0, 'reserved': 5, 'thin_prov': True, 'cap_thin_key': 'thin_provisioning'}, {'size': 200, 'cap_thin': ' True', 'total': 500, 'free': 100, 'provisioned': 400, 'max_ratio': 0.8, 'reserved': 0, 'thin_prov': [False, True], 'cap_thin_key': 'capabilities:thin_provisioning'}, {'size': 2000, 'cap_thin': ' True', 'total': 500, 'free': 30, 'provisioned': 9000, 'max_ratio': 20.0, 'reserved': 0, 'thin_prov': [True], 'cap_thin_key': 'thin_provisioning'}, {'size': 100, 'cap_thin': ' False', 'total': 500, 'free': 100, 'provisioned': 400, 'max_ratio': 1.0, 'reserved': 5, 'thin_prov': [False], 'cap_thin_key': 'thin_provisioning'}, {'size': 100, 'cap_thin': 'False', 'total': 500, 'free': 100, 'provisioned': 400, 'max_ratio': 1.0, 'reserved': 5, 'thin_prov': False, 'cap_thin_key': 'capabilities:thin_provisioning'}, {'size': 100, 'cap_thin': 'True', 'total': 500, 'free': 0, 'provisioned': 800, 'max_ratio': 2.0, 'reserved': 5, 'thin_prov': [False, True], 'cap_thin_key': 'thin_provisioning'}, {'size': 100, 'cap_thin': 'true', 'total': 500, 'free': 99, 'provisioned': 1000, 'max_ratio': 2.0, 'reserved': 5, 'thin_prov': [True, ], 'cap_thin_key': 'capabilities:thin_provisioning'}, {'size': 100, 'cap_thin': 'false', 'total': 500, 'free': 100, 'provisioned': 400, 'max_ratio': 1.0, 'reserved': 5, 'thin_prov': [False, ], 'cap_thin_key': 'thin_provisioning'}, {'size': 2000, 'cap_thin': None, 'total': 500, 'free': 30, 'provisioned': 9000, 'max_ratio': 20.0, 'reserved': 0, 'thin_prov': [True], 'cap_thin_key': None},) @ddt.unpack def test_filter_thin_fails(self, size, cap_thin, total, free, provisioned, max_ratio, reserved, thin_prov, cap_thin_key): self._stub_service_is_up(True) filter_properties = { 'size': size, 'share_type': { 'extra_specs': { cap_thin_key: cap_thin, } } } service = {'disabled': False} host = fakes.FakeHostState('host1', {'total_capacity_gb': total, 'free_capacity_gb': free, 'provisioned_capacity_gb': provisioned, 'max_over_subscription_ratio': max_ratio, 'reserved_percentage': reserved, 'thin_provisioning': thin_prov, 'updated_at': None, 'service': service}) self.assertFalse(self.filter.host_passes(host, filter_properties)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/scheduler/filters/test_create_from_snapshot.py0000664000175000017500000000707600000000000026737 0ustar00zuulzuul00000000000000# Copyright 2020 NetApp, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests for the CreateFromSnapshotFilter. """ import ddt from manila.scheduler.filters import create_from_snapshot from manila import test from manila.tests.scheduler import fakes @ddt.ddt class CreateFromSnapshotFilterTestCase(test.TestCase): """Test case for CreateFromSnapshotFilter.""" def setUp(self): super(CreateFromSnapshotFilterTestCase, self).setUp() self.filter = create_from_snapshot.CreateFromSnapshotFilter() @staticmethod def _create_request(snapshot_id=None, snapshot_host=None, replication_domain=None): return { 'request_spec': { 'snapshot_id': snapshot_id, 'snapshot_host': snapshot_host, }, 'replication_domain': replication_domain, } @staticmethod def _create_host_state(host=None, rep_domain=None): return fakes.FakeHostState(host, { 'replication_domain': rep_domain, }) def test_without_snapshot_id(self): request = self._create_request() host = self._create_host_state(host='fake_host') self.assertTrue(self.filter.host_passes(host, request)) def test_without_snapshot_host(self): request = self._create_request(snapshot_id='fake_snapshot_id', replication_domain="fake_domain") host = self._create_host_state(host='fake_host', rep_domain='fake_domain_2') self.assertTrue(self.filter.host_passes(host, request)) @ddt.data(('host1@AAA#pool1', 'host1@AAA#pool1'), ('host1@AAA#pool1', 'host1@AAA#pool2')) @ddt.unpack def test_same_backend(self, request_host, host_state): request = self._create_request(snapshot_id='fake_snapshot_id', snapshot_host=request_host) host = self._create_host_state(host=host_state) self.assertTrue(self.filter.host_passes(host, request)) def test_same_availability_zone(self): request = self._create_request(snapshot_id='fake_snapshot_id', snapshot_host='fake_host', replication_domain="fake_domain") host = self._create_host_state(host='fake_host_2', rep_domain='fake_domain') self.assertTrue(self.filter.host_passes(host, request)) def test_different_backend_and_availability_zone(self): request = self._create_request(snapshot_id='fake_snapshot_id', snapshot_host='fake_host', replication_domain="fake_domain") host = self._create_host_state(host='fake_host_2', rep_domain='fake_domain_2') self.assertFalse(self.filter.host_passes(host, request)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/scheduler/filters/test_driver.py0000664000175000017500000001161200000000000024014 0ustar00zuulzuul00000000000000# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from manila.scheduler.filters import driver from manila import test from manila.tests.scheduler import fakes class HostFiltersTestCase(test.TestCase): def setUp(self): super(HostFiltersTestCase, self).setUp() self.filter = driver.DriverFilter() def test_passing_function(self): host1 = fakes.FakeHostState( 'host1', { 'capabilities': { 'filter_function': '1 == 1', } }) filter_properties = {'share_type': {}} self.assertTrue(self.filter.host_passes(host1, filter_properties)) def test_failing_function(self): host1 = fakes.FakeHostState( 'host1', { 'capabilities': { 'filter_function': '1 == 2', } }) filter_properties = {'share_type': {}} self.assertFalse(self.filter.host_passes(host1, filter_properties)) def test_no_filter_function(self): host1 = fakes.FakeHostState( 'host1', { 'capabilities': { 'filter_function': None, } }) filter_properties = {'share_type': {}} self.assertTrue(self.filter.host_passes(host1, filter_properties)) def test_not_implemented(self): host1 = fakes.FakeHostState( 'host1', { 'capabilities': {} }) filter_properties = {'share_type': {}} self.assertTrue(self.filter.host_passes(host1, filter_properties)) def test_no_share_extra_specs(self): host1 = fakes.FakeHostState( 'host1', { 'capabilities': { 'filter_function': '1 == 1', } }) filter_properties = {'share_type': {}} self.assertTrue(self.filter.host_passes(host1, filter_properties)) def test_function_extra_spec_replacement(self): host1 = fakes.FakeHostState( 'host1', { 'capabilities': { 'filter_function': 'extra.var == 1', } }) filter_properties = { 'share_type': { 'extra_specs': { 'var': 1, } } } self.assertTrue(self.filter.host_passes(host1, filter_properties)) def test_function_stats_replacement(self): host1 = fakes.FakeHostState( 'host1', { 'total_capacity_gb': 100, 'capabilities': { 'filter_function': 'stats.total_capacity_gb < 200', } }) filter_properties = {'share_type': {}} self.assertTrue(self.filter.host_passes(host1, filter_properties)) def test_function_share_replacement(self): host1 = fakes.FakeHostState( 'host1', { 'capabilities': { 'filter_function': 'share.size < 5', } }) filter_properties = { 'request_spec': { 'resource_properties': { 'size': 1 } } } self.assertTrue(self.filter.host_passes(host1, filter_properties)) def test_function_exception_caught(self): host1 = fakes.FakeHostState( 'host1', { 'capabilities': { 'filter_function': '1 / 0 == 0', } }) filter_properties = {} self.assertFalse(self.filter.host_passes(host1, filter_properties)) def test_capabilities(self): host1 = fakes.FakeHostState( 'host1', { 'capabilities': { 'foo': 10, 'filter_function': 'capabilities.foo == 10', }, }) filter_properties = {} self.assertTrue(self.filter.host_passes(host1, filter_properties)) def test_wrong_capabilities(self): host1 = fakes.FakeHostState( 'host1', { 'capabilities': { 'bar': 10, 'filter_function': 'capabilities.foo == 10', }, }) filter_properties = {} self.assertFalse(self.filter.host_passes(host1, filter_properties)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/scheduler/filters/test_extra_specs_ops.py0000664000175000017500000000522200000000000025722 0ustar00zuulzuul00000000000000# Copyright 2011 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests For Scheduler Host Filters. """ import ddt from manila.scheduler.filters import extra_specs_ops from manila import test @ddt.ddt class ExtraSpecsOpsTestCase(test.TestCase): def _do_extra_specs_ops_test(self, value, req, matches): assertion = self.assertTrue if matches else self.assertFalse assertion(extra_specs_ops.match(value, req)) @ddt.unpack @ddt.data( ('1', '1', True), ('', '1', False), ('3', '1', False), ('222', '2', False), ('4', '> 2', False), ('123', '= 123', True), ('124', '= 123', True), ('34', '=234', False), ('34', '=', False), ('123', 's== 123', True), ('1234', 's== 123', False), ('1234', 's!= 123', True), ('123', 's!= 123', False), ('1000', 's>= 234', False), ('1234', 's<= 1000', False), ('2', 's< 12', False), ('12', 's> 2', False), ('12311321', ' 11', True), ('12311321', ' 12311321', True), ('12311321', ' 12311321 ', True), ('12310321', ' 11', False), ('12310321', ' 11 ', False), ('abc', ' ABC', True), (True, 'True', True), (True, ' True', True), (True, ' False', False), (False, 'False', True), (False, ' False', True), (False, ' True', False), (False, 'Nonsense', False), (False, ' Nonsense', True), (True, 'False', False), (False, 'True', False), ('12', ' 11 12', True), ('13', ' 11 12', False), ('13', ' 11 12 ', False), ('abc', ' ABC def', True), ('2', '<= 10', True), ('3', '<= 2', False), ('3', '>= 1', True), ('2', '>= 3', False), ('nfs', 'NFS', True), ('NFS', 'nfs', True), ('cifs', 'nfs', False), ) def test_extra_specs_matches_simple(self, value, req, matches): self._do_extra_specs_ops_test( value, req, matches) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/scheduler/filters/test_host.py0000664000175000017500000000452700000000000023505 0ustar00zuulzuul00000000000000# Copyright 2021 Cloudification GmbH. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ddt from manila import context from manila.scheduler.filters import host from manila import test from manila.tests.scheduler import fakes fake_host1 = fakes.FakeHostState('host1', {}) fake_host2 = fakes.FakeHostState('host2', {}) @ddt.ddt class OnlyHostFilterTestCase(test.TestCase): """Test case for OnlyHostFilter.""" def setUp(self): super(OnlyHostFilterTestCase, self).setUp() self.filter = host.OnlyHostFilter() self.user_context = context.RequestContext('user', 'project') self.admin_context = context.RequestContext('user', 'project', is_admin=True) def _make_filter_properties(self, hint): return { 'context': self.admin_context, 'scheduler_hints': hint, } @ddt.data((fake_host1, {'scheduler_hints': None}), (fake_host1, {'scheduler_hints': {}}), (fake_host1, {'scheduler_hints': {'only_host': fake_host2.host}})) @ddt.unpack def test_only_host_filter_user_context(self, host, filter_properties): context = {'context': self.user_context} filter_properties.update(context) self.assertTrue(self.filter.host_passes(host, filter_properties)) @ddt.data((fake_host1, None, True), (fake_host1, {}, True), (fake_host1, {'only_host': fake_host1.host}, True), (fake_host2, {'only_host': fake_host1.host}, False)) @ddt.unpack def test_only_host_filter_admin_context(self, host, hint, host_passes): filter_properties = self._make_filter_properties(hint) self.assertEqual(host_passes, self.filter.host_passes(host, filter_properties)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/scheduler/filters/test_ignore_attempted_hosts.py0000664000175000017500000000365100000000000027277 0ustar00zuulzuul00000000000000# Copyright 2011 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests For IgnoreAttemptedHost filter. """ from manila.scheduler.filters import ignore_attempted_hosts from manila import test from manila.tests.scheduler import fakes class HostFiltersTestCase(test.TestCase): """Test case for IgnoreAttemptedHost filter.""" def setUp(self): super(HostFiltersTestCase, self).setUp() self.filter = ignore_attempted_hosts.IgnoreAttemptedHostsFilter() def test_ignore_attempted_hosts_filter_disabled(self): # Test case where re-scheduling is disabled. host = fakes.FakeHostState('host1', {}) filter_properties = {} self.assertTrue(self.filter.host_passes(host, filter_properties)) def test_ignore_attempted_hosts_filter_pass(self): # Node not previously tried. host = fakes.FakeHostState('host1', {}) attempted = dict(num_attempts=2, hosts=['host2']) filter_properties = dict(retry=attempted) self.assertTrue(self.filter.host_passes(host, filter_properties)) def test_ignore_attempted_hosts_filter_fail(self): # Node was already tried. host = fakes.FakeHostState('host1', {}) attempted = dict(num_attempts=2, hosts=['host1']) filter_properties = dict(retry=attempted) self.assertFalse(self.filter.host_passes(host, filter_properties)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/scheduler/filters/test_json.py0000664000175000017500000003325400000000000023500 0ustar00zuulzuul00000000000000# Copyright 2011 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests For JsonFilter. """ from oslo_serialization import jsonutils from manila.scheduler.filters import json from manila import test from manila.tests.scheduler import fakes class HostFiltersTestCase(test.TestCase): """Test case for JsonFilter.""" def setUp(self): super(HostFiltersTestCase, self).setUp() self.json_query = jsonutils.dumps( ['and', ['>=', '$free_ram_mb', 1024], ['>=', '$free_disk_mb', 200 * 1024]]) self.filter = json.JsonFilter() def test_json_filter_passes(self): filter_properties = {'resource_type': {'memory_mb': 1024, 'root_gb': 200, 'ephemeral_gb': 0}, 'scheduler_hints': {'query': self.json_query}} capabilities = {'enabled': True} host = fakes.FakeHostState('host1', {'free_ram_mb': 1024, 'free_disk_mb': 200 * 1024, 'capabilities': capabilities}) self.assertTrue(self.filter.host_passes(host, filter_properties)) def test_json_filter_passes_with_no_query(self): filter_properties = {'resource_type': {'memory_mb': 1024, 'root_gb': 200, 'ephemeral_gb': 0}} capabilities = {'enabled': True} host = fakes.FakeHostState('host1', {'free_ram_mb': 0, 'free_disk_mb': 0, 'capabilities': capabilities}) self.assertTrue(self.filter.host_passes(host, filter_properties)) def test_json_filter_fails_on_memory(self): filter_properties = {'resource_type': {'memory_mb': 1024, 'root_gb': 200, 'ephemeral_gb': 0}, 'scheduler_hints': {'query': self.json_query}} capabilities = {'enabled': True} host = fakes.FakeHostState('host1', {'free_ram_mb': 1023, 'free_disk_mb': 200 * 1024, 'capabilities': capabilities}) self.assertFalse(self.filter.host_passes(host, filter_properties)) def test_json_filter_fails_on_disk(self): filter_properties = {'resource_type': {'memory_mb': 1024, 'root_gb': 200, 'ephemeral_gb': 0}, 'scheduler_hints': {'query': self.json_query}} capabilities = {'enabled': True} host = fakes.FakeHostState('host1', {'free_ram_mb': 1024, 'free_disk_mb': (200 * 1024) - 1, 'capabilities': capabilities}) self.assertFalse(self.filter.host_passes(host, filter_properties)) def test_json_filter_fails_on_caps_disabled(self): json_query = jsonutils.dumps( ['and', ['>=', '$free_ram_mb', 1024], ['>=', '$free_disk_mb', 200 * 1024], '$capabilities.enabled']) filter_properties = {'resource_type': {'memory_mb': 1024, 'root_gb': 200, 'ephemeral_gb': 0}, 'scheduler_hints': {'query': json_query}} capabilities = {'enabled': False} host = fakes.FakeHostState('host1', {'free_ram_mb': 1024, 'free_disk_mb': 200 * 1024, 'capabilities': capabilities}) self.assertFalse(self.filter.host_passes(host, filter_properties)) def test_json_filter_fails_on_service_disabled(self): json_query = jsonutils.dumps( ['and', ['>=', '$free_ram_mb', 1024], ['>=', '$free_disk_mb', 200 * 1024], ['not', '$service.disabled']]) filter_properties = {'resource_type': {'memory_mb': 1024, 'local_gb': 200}, 'scheduler_hints': {'query': json_query}} capabilities = {'enabled': True} host = fakes.FakeHostState('host1', {'free_ram_mb': 1024, 'free_disk_mb': 200 * 1024, 'capabilities': capabilities}) self.assertFalse(self.filter.host_passes(host, filter_properties)) def test_json_filter_happy_day(self): """Test json filter more thoroughly.""" raw = ['and', '$capabilities.enabled', ['=', '$capabilities.opt1', 'match'], ['or', ['and', ['<', '$free_ram_mb', 30], ['<', '$free_disk_mb', 300]], ['and', ['>', '$free_ram_mb', 30], ['>', '$free_disk_mb', 300]]]] filter_properties = { 'scheduler_hints': { 'query': jsonutils.dumps(raw), }, } # Passes capabilities = {'enabled': True, 'opt1': 'match'} service = {'disabled': False} host = fakes.FakeHostState('host1', {'free_ram_mb': 10, 'free_disk_mb': 200, 'capabilities': capabilities, 'service': service}) self.assertTrue(self.filter.host_passes(host, filter_properties)) # Passes capabilities = {'enabled': True, 'opt1': 'match'} service = {'disabled': False} host = fakes.FakeHostState('host1', {'free_ram_mb': 40, 'free_disk_mb': 400, 'capabilities': capabilities, 'service': service}) self.assertTrue(self.filter.host_passes(host, filter_properties)) # Fails due to capabilities being disabled capabilities = {'enabled': False, 'opt1': 'match'} service = {'disabled': False} host = fakes.FakeHostState('host1', {'free_ram_mb': 40, 'free_disk_mb': 400, 'capabilities': capabilities, 'service': service}) self.assertFalse(self.filter.host_passes(host, filter_properties)) # Fails due to being exact memory/disk we don't want capabilities = {'enabled': True, 'opt1': 'match'} service = {'disabled': False} host = fakes.FakeHostState('host1', {'free_ram_mb': 30, 'free_disk_mb': 300, 'capabilities': capabilities, 'service': service}) self.assertFalse(self.filter.host_passes(host, filter_properties)) # Fails due to memory lower but disk higher capabilities = {'enabled': True, 'opt1': 'match'} service = {'disabled': False} host = fakes.FakeHostState('host1', {'free_ram_mb': 20, 'free_disk_mb': 400, 'capabilities': capabilities, 'service': service}) self.assertFalse(self.filter.host_passes(host, filter_properties)) # Fails due to capabilities 'opt1' not equal capabilities = {'enabled': True, 'opt1': 'no-match'} service = {'enabled': True} host = fakes.FakeHostState('host1', {'free_ram_mb': 20, 'free_disk_mb': 400, 'capabilities': capabilities, 'service': service}) self.assertFalse(self.filter.host_passes(host, filter_properties)) def test_json_filter_basic_operators(self): host = fakes.FakeHostState('host1', {'capabilities': {'enabled': True}}) # (operator, arguments, expected_result) ops_to_test = [ ['=', [1, 1], True], ['=', [1, 2], False], ['<', [1, 2], True], ['<', [1, 1], False], ['<', [2, 1], False], ['>', [2, 1], True], ['>', [2, 2], False], ['>', [2, 3], False], ['<=', [1, 2], True], ['<=', [1, 1], True], ['<=', [2, 1], False], ['>=', [2, 1], True], ['>=', [2, 2], True], ['>=', [2, 3], False], ['in', [1, 1], True], ['in', [1, 1, 2, 3], True], ['in', [4, 1, 2, 3], False], ['not', [True], False], ['not', [False], True], ['or', [True, False], True], ['or', [False, False], False], ['and', [True, True], True], ['and', [False, False], False], ['and', [True, False], False], # Nested ((True or False) and (2 > 1)) == Passes ['and', [['or', True, False], ['>', 2, 1]], True]] for (op, args, expected) in ops_to_test: raw = [op] + args filter_properties = { 'scheduler_hints': { 'query': jsonutils.dumps(raw), }, } self.assertEqual(expected, self.filter.host_passes(host, filter_properties)) # This results in [False, True, False, True] and if any are True # then it passes... raw = ['not', True, False, True, False] filter_properties = { 'scheduler_hints': { 'query': jsonutils.dumps(raw), }, } self.assertTrue(self.filter.host_passes(host, filter_properties)) # This results in [False, False, False] and if any are True # then it passes...which this doesn't raw = ['not', True, True, True] filter_properties = { 'scheduler_hints': { 'query': jsonutils.dumps(raw), }, } self.assertFalse(self.filter.host_passes(host, filter_properties)) def test_json_filter_unknown_operator_raises(self): raw = ['!=', 1, 2] filter_properties = { 'scheduler_hints': { 'query': jsonutils.dumps(raw), }, } host = fakes.FakeHostState('host1', {'capabilities': {'enabled': True}}) self.assertRaises(KeyError, self.filter.host_passes, host, filter_properties) def test_json_filter_type_errror_passes(self): filter_properties = { 'scheduler_hints': None } host = fakes.FakeHostState('host1', {'capabilities': {'enabled': True}}) self.assertTrue(self.filter.host_passes(host, filter_properties)) def test_json_filter_empty_filters_pass(self): host = fakes.FakeHostState('host1', {'capabilities': {'enabled': True}}) raw = [] filter_properties = { 'scheduler_hints': { 'query': jsonutils.dumps(raw), }, } self.assertTrue(self.filter.host_passes(host, filter_properties)) raw = {} filter_properties = { 'scheduler_hints': { 'query': jsonutils.dumps(raw), }, } self.assertTrue(self.filter.host_passes(host, filter_properties)) def test_json_filter_invalid_num_arguments_fails(self): host = fakes.FakeHostState('host1', {'capabilities': {'enabled': True}}) raw = ['>', ['and', ['or', ['not', ['<', ['>=', ['<=', ['in', ]]]]]]]] filter_properties = { 'scheduler_hints': { 'query': jsonutils.dumps(raw), }, } self.assertFalse(self.filter.host_passes(host, filter_properties)) raw = ['>', 1] filter_properties = { 'scheduler_hints': { 'query': jsonutils.dumps(raw), }, } self.assertFalse(self.filter.host_passes(host, filter_properties)) def test_json_filter_unknown_variable_ignored(self): host = fakes.FakeHostState('host1', {'capabilities': {'enabled': True}}) raw = ['=', '$........', 1, 1] filter_properties = { 'scheduler_hints': { 'query': jsonutils.dumps(raw), }, } self.assertTrue(self.filter.host_passes(host, filter_properties)) raw = ['=', '$foo', 2, 2] filter_properties = { 'scheduler_hints': { 'query': jsonutils.dumps(raw), }, } self.assertTrue(self.filter.host_passes(host, filter_properties)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/scheduler/filters/test_retry.py0000664000175000017500000000342500000000000023671 0ustar00zuulzuul00000000000000# Copyright 2011 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests For RetryFilter. """ from manila.scheduler.filters import retry from manila import test from manila.tests.scheduler import fakes class HostFiltersTestCase(test.TestCase): """Test case for RetryFilter.""" def setUp(self): super(HostFiltersTestCase, self).setUp() self.filter = retry.RetryFilter() def test_retry_filter_disabled(self): # Test case where retry/re-scheduling is disabled. host = fakes.FakeHostState('host1', {}) filter_properties = {} self.assertTrue(self.filter.host_passes(host, filter_properties)) def test_retry_filter_pass(self): # Node not previously tried. host = fakes.FakeHostState('host1', {}) retry = dict(num_attempts=2, hosts=['host2']) filter_properties = dict(retry=retry) self.assertTrue(self.filter.host_passes(host, filter_properties)) def test_retry_filter_fail(self): # Node was already tried. host = fakes.FakeHostState('host1', {}) retry = dict(num_attempts=1, hosts=['host1']) filter_properties = dict(retry=retry) self.assertFalse(self.filter.host_passes(host, filter_properties)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/scheduler/filters/test_share_replication.py0000664000175000017500000001166100000000000026220 0ustar00zuulzuul00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests for the ShareReplicationFilter. """ import ddt from oslo_context import context from manila.scheduler.filters import share_replication from manila import test from manila.tests.scheduler import fakes @ddt.ddt class ShareReplicationFilterTestCase(test.TestCase): """Test case for ShareReplicationFilter.""" def setUp(self): super(ShareReplicationFilterTestCase, self).setUp() self.filter = share_replication.ShareReplicationFilter() self.debug_log = self.mock_object(share_replication.LOG, 'debug') @staticmethod def _create_replica_request(replication_domain='kashyyyk', replication_type='dr', active_replica_host=fakes.FAKE_HOST_STRING_1, all_replica_hosts=fakes.FAKE_HOST_STRING_1, is_admin=False): ctxt = context.RequestContext('fake', 'fake', is_admin=is_admin) return { 'context': ctxt, 'request_spec': { 'active_replica_host': active_replica_host, 'all_replica_hosts': all_replica_hosts, }, 'resource_type': { 'extra_specs': { 'replication_type': replication_type, }, }, 'replication_domain': replication_domain, } @ddt.data('tatooine', '') def test_share_replication_filter_fails_incompatible_domain(self, domain): request = self._create_replica_request() host = fakes.FakeHostState('host1', { 'replication_domain': domain, }) self.assertFalse(self.filter.host_passes(host, request)) self.assertTrue(self.debug_log.called) def test_share_replication_filter_fails_no_replication_domain(self): request = self._create_replica_request() host = fakes.FakeHostState('host1', { 'replication_domain': None, }) self.assertFalse(self.filter.host_passes(host, request)) self.assertTrue(self.debug_log.called) def test_share_replication_filter_fails_host_has_replicas(self): all_replica_hosts = ','.join(['host1', fakes.FAKE_HOST_STRING_1]) request = self._create_replica_request( all_replica_hosts=all_replica_hosts) host = fakes.FakeHostState('host1', { 'replication_domain': 'kashyyyk', }) self.assertFalse(self.filter.host_passes(host, request)) self.assertTrue(self.debug_log.called) def test_share_replication_filter_passes_no_replication_type(self): request = self._create_replica_request(replication_type=None) host = fakes.FakeHostState('host1', { 'replication_domain': 'tatooine', }) self.assertTrue(self.filter.host_passes(host, request)) def test_share_replication_filter_passes_no_active_replica_host(self): request = self._create_replica_request(active_replica_host=None) host = fakes.FakeHostState('host1', { 'replication_domain': 'tatooine', }) self.assertTrue(self.filter.host_passes(host, request)) def test_share_replication_filter_passes_happy_day(self): all_replica_hosts = ','.join(['host1', fakes.FAKE_HOST_STRING_1]) request = self._create_replica_request( all_replica_hosts=all_replica_hosts) host = fakes.FakeHostState('host2', { 'replication_domain': 'kashyyyk', }) self.assertTrue(self.filter.host_passes(host, request)) def test_share_replication_filter_empty(self): request = {} host = fakes.FakeHostState('host1', { 'replication_domain': 'naboo', }) self.assertTrue(self.filter.host_passes(host, request)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/scheduler/test_host_manager.py0000664000175000017500000016241300000000000023526 0ustar00zuulzuul00000000000000# Copyright (c) 2011 OpenStack, LLC # Copyright (c) 2015 Rushil Chugh # Copyright (c) 2015 Clinton Knight # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests For HostManager """ import copy from unittest import mock import ddt from oslo_config import cfg from oslo_utils import timeutils from manila import context from manila import db from manila import exception from manila.scheduler.filters import base_host from manila.scheduler import host_manager from manila.scheduler import utils as scheduler_utils from manila import test from manila.tests.scheduler import fakes from manila import utils CONF = cfg.CONF class FakeFilterClass1(base_host.BaseHostFilter): def host_passes(self, host_state, filter_properties): pass class FakeFilterClass2(base_host.BaseHostFilter): def host_passes(self, host_state, filter_properties): pass @ddt.ddt class HostManagerTestCase(test.TestCase): """Test case for HostManager class.""" def setUp(self): super(HostManagerTestCase, self).setUp() self.host_manager = host_manager.HostManager() self.fake_hosts = [host_manager.HostState('fake_host%s' % x) for x in range(1, 5)] def test_choose_host_filters_not_found(self): self.flags(scheduler_default_filters='FakeFilterClass3') self.host_manager.filter_classes = [FakeFilterClass1, FakeFilterClass2] self.assertRaises(exception.SchedulerHostFilterNotFound, self.host_manager._choose_host_filters, None) def test_choose_host_filters(self): self.flags(scheduler_default_filters=['FakeFilterClass2']) self.host_manager.filter_classes = [FakeFilterClass1, FakeFilterClass2] # Test 'share' returns 1 correct function filter_classes = self.host_manager._choose_host_filters(None) self.assertEqual(1, len(filter_classes)) self.assertEqual('FakeFilterClass2', filter_classes[0].__name__) def _verify_result(self, info, result): for x in info['got_fprops']: self.assertEqual(info['expected_fprops'], x) self.assertEqual(set(info['expected_objs']), set(info['got_objs'])) self.assertEqual(set(info['got_objs']), set(result)) def test_get_filtered_hosts(self): fake_properties = {'moo': 1, 'cow': 2} info = { 'expected_objs': self.fake_hosts, 'expected_fprops': fake_properties, } with mock.patch.object(self.host_manager, '_choose_host_filters', mock.Mock(return_value=[FakeFilterClass1])): info['got_objs'] = [] info['got_fprops'] = [] def fake_filter_one(_self, obj, filter_props): info['got_objs'].append(obj) info['got_fprops'].append(filter_props) return True self.mock_object(FakeFilterClass1, '_filter_one', fake_filter_one) result, last_filter = self.host_manager.get_filtered_hosts( self.fake_hosts, fake_properties) self._verify_result(info, result) self.host_manager._choose_host_filters.assert_called_once_with( mock.ANY) def test_update_service_capabilities_for_shares(self): service_states = self.host_manager.service_states self.assertDictEqual(service_states, {}) host1_share_capabs = dict(free_capacity_gb=4321, timestamp=1) host2_share_capabs = dict(free_capacity_gb=5432, timestamp=1) host3_share_capabs = dict(free_capacity_gb=6543, timestamp=1) service_name = 'share' self.host_manager.update_service_capabilities( service_name, 'host1', host1_share_capabs, 31337) self.host_manager.update_service_capabilities( service_name, 'host2', host2_share_capabs, 31338) self.host_manager.update_service_capabilities( service_name, 'host3', host3_share_capabs, 31339) # Make sure dictionary isn't re-assigned self.assertEqual(service_states, self.host_manager.service_states) # Make sure original dictionary wasn't copied self.assertEqual(1, host1_share_capabs['timestamp']) host1_share_capabs['timestamp'] = 31337 host2_share_capabs['timestamp'] = 31338 host3_share_capabs['timestamp'] = 31339 expected = { 'host1': host1_share_capabs, 'host2': host2_share_capabs, 'host3': host3_share_capabs, } self.assertDictEqual(service_states, expected) def test_get_all_host_states_share(self): fake_context = context.RequestContext('user', 'project') topic = CONF.share_topic tmp_pools = copy.deepcopy(fakes.SHARE_SERVICES_WITH_POOLS) tmp_enable_pools = tmp_pools[:-2] self.mock_object( db, 'service_get_all_by_topic', mock.Mock(return_value=tmp_enable_pools)) self.mock_object(utils, 'service_is_up', mock.Mock(return_value=True)) with mock.patch.dict(self.host_manager.service_states, fakes.SHARE_SERVICE_STATES_WITH_POOLS): # Get service self.host_manager.get_all_host_states_share(fake_context) # Disabled one service tmp_enable_pools.pop() self.mock_object( db, 'service_get_all_by_topic', mock.Mock(return_value=tmp_enable_pools)) # Get service again self.host_manager.get_all_host_states_share(fake_context) host_state_map = self.host_manager.host_state_map self.assertEqual(3, len(host_state_map)) # Check that service is up for i in range(3): share_node = fakes.SHARE_SERVICES_WITH_POOLS[i] host = share_node['host'] self.assertEqual(share_node, host_state_map[host].service) db.service_get_all_by_topic.assert_called_once_with( fake_context, topic, consider_disabled=False) def test_get_pools_no_pools(self): fake_context = context.RequestContext('user', 'project') self.mock_object(utils, 'service_is_up', mock.Mock(return_value=True)) self.mock_object( db, 'service_get_all_by_topic', mock.Mock(return_value=fakes.SHARE_SERVICES_NO_POOLS)) host_manager.LOG.warning = mock.Mock() with mock.patch.dict(self.host_manager.service_states, fakes.SERVICE_STATES_NO_POOLS): res = self.host_manager.get_pools(context=fake_context) expected = [ { 'name': 'host1#AAA', 'host': 'host1', 'backend': None, 'pool': 'AAA', 'capabilities': { 'timestamp': None, 'share_backend_name': 'AAA', 'free_capacity_gb': 200, 'driver_version': None, 'total_capacity_gb': 512, 'reserved_percentage': 0, 'reserved_snapshot_percentage': 0, 'reserved_share_extend_percentage': 0, 'provisioned_capacity_gb': 312, 'max_over_subscription_ratio': 1.0, 'thin_provisioning': False, 'vendor_name': None, 'storage_protocol': None, 'driver_handles_share_servers': False, 'snapshot_support': False, 'create_share_from_snapshot_support': False, 'revert_to_snapshot_support': True, 'mount_snapshot_support': True, 'dedupe': False, 'compression': False, 'replication_type': None, 'replication_domain': None, 'sg_consistent_snapshot_support': None, 'security_service_update_support': False, 'network_allocation_update_support': False, 'share_server_multiple_subnet_support': False, 'mount_point_name_support': False, 'share_replicas_migration_support': False, 'encryption_support': None }, }, { 'name': 'host2@back1#BBB', 'host': 'host2', 'backend': 'back1', 'pool': 'BBB', 'capabilities': { 'timestamp': None, 'share_backend_name': 'BBB', 'free_capacity_gb': 100, 'driver_version': None, 'total_capacity_gb': 256, 'reserved_percentage': 0, 'reserved_snapshot_percentage': 0, 'reserved_share_extend_percentage': 0, 'provisioned_capacity_gb': 400, 'max_over_subscription_ratio': 2.0, 'thin_provisioning': True, 'vendor_name': None, 'storage_protocol': None, 'driver_handles_share_servers': False, 'snapshot_support': True, 'create_share_from_snapshot_support': True, 'revert_to_snapshot_support': False, 'mount_snapshot_support': False, 'dedupe': False, 'compression': False, 'replication_type': None, 'replication_domain': None, 'sg_consistent_snapshot_support': None, 'security_service_update_support': False, 'network_allocation_update_support': False, 'share_server_multiple_subnet_support': False, 'mount_point_name_support': False, 'share_replicas_migration_support': False, 'encryption_support': None }, }, { 'name': 'host2@back2#CCC', 'host': 'host2', 'backend': 'back2', 'pool': 'CCC', 'capabilities': { 'timestamp': None, 'share_backend_name': 'CCC', 'free_capacity_gb': 700, 'driver_version': None, 'total_capacity_gb': 10000, 'reserved_percentage': 0, 'reserved_snapshot_percentage': 0, 'reserved_share_extend_percentage': 0, 'provisioned_capacity_gb': 50000, 'max_over_subscription_ratio': 20.0, 'thin_provisioning': True, 'vendor_name': None, 'storage_protocol': None, 'driver_handles_share_servers': False, 'snapshot_support': True, 'create_share_from_snapshot_support': True, 'revert_to_snapshot_support': False, 'mount_snapshot_support': False, 'dedupe': False, 'compression': False, 'replication_type': None, 'replication_domain': None, 'sg_consistent_snapshot_support': None, 'security_service_update_support': False, 'network_allocation_update_support': False, 'share_server_multiple_subnet_support': False, 'mount_point_name_support': False, 'share_replicas_migration_support': False, 'encryption_support': None }, }, ] self.assertIsInstance(res, list) self.assertEqual(len(expected), len(res)) for pool in expected: self.assertIn(pool, res) def test_get_pools(self): fake_context = context.RequestContext('user', 'project') self.mock_object(utils, 'service_is_up', mock.Mock(return_value=True)) self.mock_object( db, 'service_get_all_by_topic', mock.Mock(return_value=fakes.SHARE_SERVICES_WITH_POOLS)) host_manager.LOG.warning = mock.Mock() with mock.patch.dict(self.host_manager.service_states, fakes.SHARE_SERVICE_STATES_WITH_POOLS): res = self.host_manager.get_pools(fake_context) expected = [ { 'name': 'host1@AAA#pool1', 'host': 'host1', 'backend': 'AAA', 'pool': 'pool1', 'capabilities': { 'pool_name': 'pool1', 'timestamp': None, 'share_backend_name': 'AAA', 'free_capacity_gb': 41, 'driver_version': None, 'total_capacity_gb': 51, 'reserved_percentage': 0, 'reserved_snapshot_percentage': 0, 'reserved_share_extend_percentage': 0, 'provisioned_capacity_gb': 10, 'max_over_subscription_ratio': 1.0, 'thin_provisioning': False, 'vendor_name': None, 'storage_protocol': None, 'driver_handles_share_servers': False, 'snapshot_support': True, 'create_share_from_snapshot_support': True, 'revert_to_snapshot_support': True, 'mount_snapshot_support': False, 'dedupe': False, 'compression': False, 'replication_type': None, 'replication_domain': None, 'sg_consistent_snapshot_support': None, 'security_service_update_support': False, 'network_allocation_update_support': False, 'share_server_multiple_subnet_support': False, 'mount_point_name_support': False, 'share_replicas_migration_support': False, 'encryption_support': None }, }, { 'name': 'host2@BBB#pool2', 'host': 'host2', 'backend': 'BBB', 'pool': 'pool2', 'capabilities': { 'pool_name': 'pool2', 'timestamp': None, 'share_backend_name': 'BBB', 'free_capacity_gb': 42, 'driver_version': None, 'total_capacity_gb': 52, 'reserved_percentage': 0, 'reserved_snapshot_percentage': 0, 'reserved_share_extend_percentage': 0, 'provisioned_capacity_gb': 60, 'max_over_subscription_ratio': 2.0, 'thin_provisioning': True, 'vendor_name': None, 'storage_protocol': None, 'driver_handles_share_servers': False, 'snapshot_support': True, 'create_share_from_snapshot_support': True, 'revert_to_snapshot_support': False, 'mount_snapshot_support': False, 'dedupe': False, 'compression': False, 'replication_type': None, 'replication_domain': None, 'sg_consistent_snapshot_support': None, 'security_service_update_support': False, 'network_allocation_update_support': False, 'share_server_multiple_subnet_support': False, 'mount_point_name_support': False, 'share_replicas_migration_support': False, 'encryption_support': None }, }, { 'name': 'host3@CCC#pool3', 'host': 'host3', 'backend': 'CCC', 'pool': 'pool3', 'capabilities': { 'pool_name': 'pool3', 'timestamp': None, 'share_backend_name': 'CCC', 'free_capacity_gb': 43, 'driver_version': None, 'total_capacity_gb': 53, 'reserved_percentage': 0, 'reserved_snapshot_percentage': 0, 'reserved_share_extend_percentage': 0, 'provisioned_capacity_gb': 100, 'max_over_subscription_ratio': 20.0, 'thin_provisioning': True, 'vendor_name': None, 'storage_protocol': None, 'driver_handles_share_servers': False, 'snapshot_support': True, 'create_share_from_snapshot_support': True, 'revert_to_snapshot_support': False, 'mount_snapshot_support': False, 'dedupe': False, 'compression': False, 'replication_type': None, 'replication_domain': None, 'sg_consistent_snapshot_support': None, 'security_service_update_support': False, 'network_allocation_update_support': False, 'share_server_multiple_subnet_support': False, 'mount_point_name_support': False, 'share_replicas_migration_support': False, 'encryption_support': None }, }, { 'name': 'host4@DDD#pool4a', 'host': 'host4', 'backend': 'DDD', 'pool': 'pool4a', 'capabilities': { 'pool_name': 'pool4a', 'timestamp': None, 'share_backend_name': 'DDD', 'free_capacity_gb': 441, 'driver_version': None, 'total_capacity_gb': 541, 'reserved_percentage': 0, 'reserved_snapshot_percentage': 0, 'reserved_share_extend_percentage': 0, 'provisioned_capacity_gb': 800, 'max_over_subscription_ratio': 2.0, 'thin_provisioning': True, 'vendor_name': None, 'storage_protocol': None, 'driver_handles_share_servers': False, 'snapshot_support': True, 'create_share_from_snapshot_support': True, 'revert_to_snapshot_support': False, 'mount_snapshot_support': False, 'dedupe': False, 'compression': False, 'replication_type': None, 'replication_domain': None, 'sg_consistent_snapshot_support': None, 'security_service_update_support': False, 'network_allocation_update_support': False, 'share_server_multiple_subnet_support': False, 'mount_point_name_support': False, 'share_replicas_migration_support': False, 'encryption_support': None }, }, { 'name': 'host4@DDD#pool4b', 'host': 'host4', 'backend': 'DDD', 'pool': 'pool4b', 'capabilities': { 'pool_name': 'pool4b', 'timestamp': None, 'share_backend_name': 'DDD', 'free_capacity_gb': 442, 'driver_version': None, 'total_capacity_gb': 542, 'reserved_percentage': 0, 'reserved_snapshot_percentage': 0, 'reserved_share_extend_percentage': 0, 'provisioned_capacity_gb': 2000, 'max_over_subscription_ratio': 10.0, 'thin_provisioning': True, 'vendor_name': None, 'storage_protocol': None, 'driver_handles_share_servers': False, 'snapshot_support': True, 'create_share_from_snapshot_support': True, 'revert_to_snapshot_support': False, 'mount_snapshot_support': False, 'dedupe': False, 'compression': False, 'replication_type': None, 'replication_domain': None, 'sg_consistent_snapshot_support': None, 'security_service_update_support': False, 'network_allocation_update_support': False, 'share_server_multiple_subnet_support': False, 'mount_point_name_support': False, 'share_replicas_migration_support': False, 'encryption_support': None, }, }, ] self.assertIsInstance(res, list) self.assertIsInstance(self.host_manager.host_state_map, dict) self.assertEqual(len(expected), len(res)) for pool in expected: self.assertIn(pool, res) def test_get_pools_host_down(self): fake_context = context.RequestContext('user', 'project') mock_service_is_up = self.mock_object(utils, 'service_is_up') self.mock_object( db, 'service_get_all_by_topic', mock.Mock(return_value=fakes.SHARE_SERVICES_NO_POOLS)) host_manager.LOG.warning = mock.Mock() with mock.patch.dict(self.host_manager.service_states, fakes.SERVICE_STATES_NO_POOLS): # Initialize host data with all services present mock_service_is_up.side_effect = [True, True, True] # Call once to update the host state map self.host_manager.get_pools(fake_context) self.assertEqual(len(fakes.SHARE_SERVICES_NO_POOLS), len(self.host_manager.host_state_map)) # Then mock one host as down mock_service_is_up.side_effect = [True, True, False] res = self.host_manager.get_pools(fake_context) expected = [ { 'name': 'host1#AAA', 'host': 'host1', 'backend': None, 'pool': 'AAA', 'capabilities': { 'timestamp': None, 'driver_handles_share_servers': False, 'snapshot_support': False, 'create_share_from_snapshot_support': False, 'revert_to_snapshot_support': True, 'mount_snapshot_support': True, 'share_backend_name': 'AAA', 'free_capacity_gb': 200, 'driver_version': None, 'total_capacity_gb': 512, 'reserved_percentage': 0, 'reserved_snapshot_percentage': 0, 'reserved_share_extend_percentage': 0, 'vendor_name': None, 'storage_protocol': None, 'provisioned_capacity_gb': 312, 'max_over_subscription_ratio': 1.0, 'thin_provisioning': False, 'dedupe': False, 'compression': False, 'replication_type': None, 'replication_domain': None, 'sg_consistent_snapshot_support': None, 'security_service_update_support': False, 'network_allocation_update_support': False, 'share_server_multiple_subnet_support': False, 'mount_point_name_support': False, 'share_replicas_migration_support': False, 'encryption_support': None }, }, { 'name': 'host2@back1#BBB', 'host': 'host2', 'backend': 'back1', 'pool': 'BBB', 'capabilities': { 'timestamp': None, 'driver_handles_share_servers': False, 'snapshot_support': True, 'create_share_from_snapshot_support': True, 'revert_to_snapshot_support': False, 'mount_snapshot_support': False, 'share_backend_name': 'BBB', 'free_capacity_gb': 100, 'driver_version': None, 'total_capacity_gb': 256, 'reserved_percentage': 0, 'reserved_snapshot_percentage': 0, 'reserved_share_extend_percentage': 0, 'vendor_name': None, 'storage_protocol': None, 'provisioned_capacity_gb': 400, 'max_over_subscription_ratio': 2.0, 'thin_provisioning': True, 'dedupe': False, 'compression': False, 'replication_type': None, 'replication_domain': None, 'sg_consistent_snapshot_support': None, 'security_service_update_support': False, 'network_allocation_update_support': False, 'share_server_multiple_subnet_support': False, 'mount_point_name_support': False, 'share_replicas_migration_support': False, 'encryption_support': None }, }, ] self.assertIsInstance(res, list) self.assertIsInstance(self.host_manager.host_state_map, dict) self.assertEqual(len(expected), len(res)) self.assertEqual(len(expected), len(self.host_manager.host_state_map)) for pool in expected: self.assertIn(pool, res) def test_get_pools_with_filters(self): fake_context = context.RequestContext('user', 'project') self.mock_object(utils, 'service_is_up', mock.Mock(return_value=True)) self.mock_object( db, 'service_get_all_by_topic', mock.Mock(return_value=fakes.SHARE_SERVICES_WITH_POOLS)) host_manager.LOG.warning = mock.Mock() with mock.patch.dict(self.host_manager.service_states, fakes.SHARE_SERVICE_STATES_WITH_POOLS): res = self.host_manager.get_pools( context=fake_context, filters={'host': 'host2', 'pool': 'pool*', 'capabilities': {'dedupe': 'False'}}) expected = [ { 'name': 'host2@BBB#pool2', 'host': 'host2', 'backend': 'BBB', 'pool': 'pool2', 'capabilities': { 'pool_name': 'pool2', 'timestamp': None, 'driver_handles_share_servers': False, 'snapshot_support': True, 'create_share_from_snapshot_support': True, 'revert_to_snapshot_support': False, 'mount_snapshot_support': False, 'share_backend_name': 'BBB', 'free_capacity_gb': 42, 'driver_version': None, 'total_capacity_gb': 52, 'reserved_percentage': 0, 'reserved_snapshot_percentage': 0, 'reserved_share_extend_percentage': 0, 'provisioned_capacity_gb': 60, 'max_over_subscription_ratio': 2.0, 'thin_provisioning': True, 'vendor_name': None, 'storage_protocol': None, 'dedupe': False, 'compression': False, 'replication_type': None, 'replication_domain': None, 'sg_consistent_snapshot_support': None, 'security_service_update_support': False, 'network_allocation_update_support': False, 'share_server_multiple_subnet_support': False, 'mount_point_name_support': False, 'share_replicas_migration_support': False, 'encryption_support': None }, }, ] self.assertIsInstance(res, list) self.assertEqual(len(expected), len(res)) for pool in expected: self.assertIn(pool, res) @ddt.data( None, {}, {'key1': 'value1'}, {'capabilities': {'dedupe': 'False'}}, {'capabilities': {'dedupe': ' False'}}, {'key1': 'value1', 'key2': 'value*'}, {'key1': '.*', 'key2': '.*'}, ) def test_passes_filters_true(self, filter): data = { 'key1': 'value1', 'key2': 'value2', 'key3': 'value3', 'capabilities': {'dedupe': False}, } self.assertTrue(self.host_manager._passes_filters(data, filter)) @ddt.data( {'key1': 'value$'}, {'key4': 'value'}, {'capabilities': {'dedupe': 'True'}}, {'capabilities': {'dedupe': ' True'}}, {'key1': 'value1.+', 'key2': 'value*'}, ) def test_passes_filters_false(self, filter): data = { 'key1': 'value1', 'key2': 'value2', 'key3': 'value3', 'capabilities': {'dedupe': False}, } self.assertFalse(self.host_manager._passes_filters(data, filter)) class HostStateTestCase(test.TestCase): """Test case for HostState class.""" def test_update_from_share_capability_nopool(self): fake_context = context.RequestContext('user', 'project', is_admin=True) share_capability = {'total_capacity_gb': 0, 'free_capacity_gb': 100, 'reserved_percentage': 0, 'reserved_snapshot_percentage': 0, 'reserved_share_extend_percentage': 0, 'timestamp': None, 'ipv4_support': True, 'ipv6_support': False} fake_host = host_manager.HostState('host1', share_capability) self.assertIsNone(fake_host.free_capacity_gb) fake_host.update_from_share_capability(share_capability, context=fake_context) # Backend level stats remain uninitialized self.assertEqual(0, fake_host.total_capacity_gb) self.assertIsNone(fake_host.free_capacity_gb) self.assertTrue(fake_host.ipv4_support) self.assertFalse(fake_host.ipv6_support) # Pool stats has been updated self.assertEqual(0, fake_host.pools['_pool0'].total_capacity_gb) self.assertEqual(100, fake_host.pools['_pool0'].free_capacity_gb) self.assertTrue(fake_host.pools['_pool0'].ipv4_support) self.assertFalse(fake_host.pools['_pool0'].ipv6_support) # Test update for existing host state share_capability.update(dict(total_capacity_gb=1000)) fake_host.update_from_share_capability(share_capability, context=fake_context) self.assertEqual(1000, fake_host.pools['_pool0'].total_capacity_gb) # Test update for existing host state with different backend name share_capability.update(dict(share_backend_name='magic')) fake_host.update_from_share_capability(share_capability, context=fake_context) self.assertEqual(1000, fake_host.pools['magic'].total_capacity_gb) self.assertEqual(100, fake_host.pools['magic'].free_capacity_gb) # 'pool0' becomes nonactive pool, and is deleted self.assertRaises(KeyError, lambda: fake_host.pools['pool0']) def test_update_from_share_capability_with_pools(self): fake_context = context.RequestContext('user', 'project', is_admin=True) fake_host = host_manager.HostState('host1#pool1') self.assertIsNone(fake_host.free_capacity_gb) capability = { 'share_backend_name': 'Backend1', 'vendor_name': 'OpenStack', 'driver_version': '1.1', 'storage_protocol': 'NFS_CIFS', 'ipv4_support': True, 'ipv6_support': False, 'pools': [ {'pool_name': 'pool1', 'total_capacity_gb': 500, 'free_capacity_gb': 230, 'allocated_capacity_gb': 270, 'qos': 'False', 'reserved_percentage': 0, 'reserved_snapshot_percentage': 0, 'reserved_share_extend_percentage': 0, 'dying_disks': 100, 'super_hero_1': 'spider-man', 'super_hero_2': 'flash', 'super_hero_3': 'neoncat', }, {'pool_name': 'pool2', 'total_capacity_gb': 1024, 'free_capacity_gb': 1024, 'allocated_capacity_gb': 0, 'qos': 'False', 'reserved_percentage': 0, 'reserved_snapshot_percentage': 0, 'reserved_share_extend_percentage': 0, 'dying_disks': 200, 'super_hero_1': 'superman', 'super_hero_2': 'Hulk', } ], 'timestamp': None, } fake_host.update_from_share_capability(capability, context=fake_context) self.assertEqual('Backend1', fake_host.share_backend_name) self.assertEqual('NFS_CIFS', fake_host.storage_protocol) self.assertEqual('OpenStack', fake_host.vendor_name) self.assertEqual('1.1', fake_host.driver_version) self.assertTrue(fake_host.ipv4_support) self.assertFalse(fake_host.ipv6_support) # Backend level stats remain uninitialized self.assertEqual(0, fake_host.total_capacity_gb) self.assertIsNone(fake_host.free_capacity_gb) # Pool stats has been updated self.assertEqual(2, len(fake_host.pools)) self.assertEqual(500, fake_host.pools['pool1'].total_capacity_gb) self.assertEqual(230, fake_host.pools['pool1'].free_capacity_gb) self.assertTrue(fake_host.pools['pool1'].ipv4_support) self.assertFalse(fake_host.pools['pool1'].ipv6_support) self.assertEqual(1024, fake_host.pools['pool2'].total_capacity_gb) self.assertEqual(1024, fake_host.pools['pool2'].free_capacity_gb) self.assertTrue(fake_host.pools['pool2'].ipv4_support) self.assertFalse(fake_host.pools['pool2'].ipv6_support) capability = { 'share_backend_name': 'Backend1', 'vendor_name': 'OpenStack', 'driver_version': '1.0', 'storage_protocol': 'NFS_CIFS', 'pools': [ {'pool_name': 'pool3', 'total_capacity_gb': 10000, 'free_capacity_gb': 10000, 'allocated_capacity_gb': 0, 'qos': 'False', 'reserved_percentage': 0, 'reserved_snapshot_percentage': 0, 'reserved_share_extend_percentage': 0, }, ], 'timestamp': None, } # test update HostState Record fake_host.update_from_share_capability(capability, context=fake_context) self.assertEqual('1.0', fake_host.driver_version) # Non-active pool stats has been removed self.assertEqual(1, len(fake_host.pools)) self.assertRaises(KeyError, lambda: fake_host.pools['pool1']) self.assertRaises(KeyError, lambda: fake_host.pools['pool2']) self.assertEqual(10000, fake_host.pools['pool3'].total_capacity_gb) self.assertEqual(10000, fake_host.pools['pool3'].free_capacity_gb) def test_update_from_share_unknown_capability(self): share_capability = { 'total_capacity_gb': 'unknown', 'free_capacity_gb': 'unknown', 'allocated_capacity_gb': 1, 'reserved_percentage': 0, 'reserved_snapshot_percentage': 0, 'reserved_share_extend_percentage': 0, 'timestamp': None } fake_context = context.RequestContext('user', 'project', is_admin=True) fake_host = host_manager.HostState('host1#_pool0') self.assertIsNone(fake_host.free_capacity_gb) fake_host.update_from_share_capability(share_capability, context=fake_context) # Backend level stats remain uninitialized self.assertEqual(fake_host.total_capacity_gb, 0) self.assertIsNone(fake_host.free_capacity_gb) # Pool stats has been updated self.assertEqual(fake_host.pools['_pool0'].total_capacity_gb, 'unknown') self.assertEqual(fake_host.pools['_pool0'].free_capacity_gb, 'unknown') def test_consume_from_share_capability(self): fake_context = context.RequestContext('user', 'project', is_admin=True) share_size = 10 free_capacity = 100 provisioned_capacity_gb = 50 fake_share = {'id': 'foo', 'size': share_size} share_capability = { 'total_capacity_gb': free_capacity * 2, 'free_capacity_gb': free_capacity, 'provisioned_capacity_gb': provisioned_capacity_gb, 'allocated_capacity_gb': provisioned_capacity_gb, 'reserved_percentage': 0, 'reserved_snapshot_percentage': 0, 'reserved_share_extend_percentage': 0, 'timestamp': None } fake_host = host_manager.PoolState('host1', share_capability, '_pool0') fake_host.update_from_share_capability(share_capability, context=fake_context) fake_host.consume_from_share(fake_share) self.assertEqual(fake_host.free_capacity_gb, free_capacity - share_size) self.assertEqual(fake_host.provisioned_capacity_gb, provisioned_capacity_gb + share_size) self.assertEqual(fake_host.allocated_capacity_gb, provisioned_capacity_gb + share_size) def test_consume_from_share_unknown_capability(self): share_capability = { 'total_capacity_gb': 'unknown', 'free_capacity_gb': 'unknown', 'provisioned_capacity_gb': None, 'allocated_capacity_gb': 0, 'reserved_percentage': 0, 'reserved_snapshot_percentage': 0, 'reserved_share_extend_percentage': 0, 'timestamp': None } fake_context = context.RequestContext('user', 'project', is_admin=True) fake_host = host_manager.PoolState('host1', share_capability, '_pool0') share_size = 1000 fake_share = {'id': 'foo', 'size': share_size} fake_host.update_from_share_capability(share_capability, context=fake_context) fake_host.consume_from_share(fake_share) self.assertEqual(fake_host.total_capacity_gb, 'unknown') self.assertEqual(fake_host.free_capacity_gb, 'unknown') self.assertIsNone(fake_host.provisioned_capacity_gb) self.assertEqual(fake_host.allocated_capacity_gb, share_size) def test_consume_from_share_invalid_capacity(self): fake_host = host_manager.PoolState('host1', {}, '_pool0') fake_host.free_capacity_gb = 'invalid_foo_string' fake_host.provisioned_capacity_gb = None fake_host.allocated_capacity_gb = 0 fake_share = {'id': 'fake', 'size': 10} self.assertRaises(exception.InvalidCapacity, fake_host.consume_from_share, fake_share) def test_repr(self): capability = { 'share_backend_name': 'Backend1', 'vendor_name': 'OpenStack', 'driver_version': '1.0', 'storage_protocol': 'NFS_CIFS', 'total_capacity_gb': 20000, 'free_capacity_gb': 15000, 'allocated_capacity_gb': 5000, 'timestamp': None, 'reserved_percentage': 0, 'reserved_snapshot_percentage': 0, 'reserved_share_extend_percentage': 0, } fake_context = context.RequestContext('user', 'project', is_admin=True) fake_host = host_manager.HostState('host1') fake_host.update_from_share_capability(capability, context=fake_context) result = fake_host.__repr__() expected = ("host: 'host1', free_capacity_gb: None, " "pools: {'Backend1': host: 'host1#Backend1', " "free_capacity_gb: 15000, pools: None}") self.assertEqual(expected, result) @ddt.ddt class PoolStateTestCase(test.TestCase): """Test case for HostState class.""" @ddt.data( { 'share_capability': {'total_capacity_gb': 1024, 'free_capacity_gb': 512, 'reserved_percentage': 0, 'timestamp': None, 'reserved_snapshot_percentage': 0, 'reserved_share_extend_percentage': 0, 'thin_provisioning': True, 'cap1': 'val1', 'cap2': 'val2'}, 'instances': [ { 'id': 1, 'host': 'host1', 'status': 'available', 'share_id': 11, 'size': 4, 'updated_at': timeutils.utcnow() }, { 'id': 2, 'host': 'host1', 'status': 'available', 'share_id': 12, 'size': None, 'updated_at': timeutils.utcnow() }, ] }, { 'share_capability': {'total_capacity_gb': 1024, 'free_capacity_gb': 512, 'reserved_percentage': 0, 'timestamp': None, 'reserved_snapshot_percentage': 0, 'reserved_share_extend_percentage': 0, 'thin_provisioning': False, 'cap1': 'val1', 'cap2': 'val2'}, 'instances': [ { 'id': 1, 'host': 'host1', 'status': 'available', 'share_id': 11, 'size': 1, 'updated_at': timeutils.utcnow() }, { 'id': 2, 'host': 'host1', 'status': 'available', 'share_id': 12, 'size': None, 'updated_at': timeutils.utcnow() }, ] }, { 'share_capability': {'total_capacity_gb': 1024, 'free_capacity_gb': 512, 'reserved_percentage': 0, 'timestamp': None, 'reserved_snapshot_percentage': 0, 'reserved_share_extend_percentage': 0, 'thin_provisioning': [False], 'cap1': 'val1', 'cap2': 'val2'}, 'instances': [ { 'id': 1, 'host': 'host1', 'status': 'available', 'share_id': 11, 'size': 1, 'updated_at': timeutils.utcnow() }, { 'id': 2, 'host': 'host1', 'status': 'available', 'share_id': 12, 'size': None, 'updated_at': timeutils.utcnow() }, ] }, { 'share_capability': {'total_capacity_gb': 1024, 'free_capacity_gb': 512, 'reserved_percentage': 0, 'timestamp': None, 'reserved_snapshot_percentage': 0, 'reserved_share_extend_percentage': 0, 'thin_provisioning': [True, False], 'cap1': 'val1', 'cap2': 'val2'}, 'instances': [ { 'id': 1, 'host': 'host1', 'status': 'available', 'share_id': 11, 'size': 4, 'updated_at': timeutils.utcnow() }, { 'id': 2, 'host': 'host1', 'status': 'available', 'share_id': 12, 'size': None, 'updated_at': timeutils.utcnow() }, ] }, { 'share_capability': {'total_capacity_gb': 1024, 'free_capacity_gb': 512, 'reserved_percentage': 0, 'timestamp': None, 'reserved_snapshot_percentage': 0, 'reserved_share_extend_percentage': 0, 'cap1': 'val1', 'cap2': 'val2', 'ipv4_support': True, 'ipv6_support': False}, 'instances': [] }, { 'share_capability': {'total_capacity_gb': 1024, 'free_capacity_gb': 512, 'reserved_percentage': 0, 'timestamp': None, 'reserved_snapshot_percentage': 0, 'reserved_share_extend_percentage': 0, 'thin_provisioning': True, 'cap1': 'val1', 'cap2': 'val2', 'ipv4_support': True, 'ipv6_support': False}, 'instances': [] }, { 'share_capability': {'total_capacity_gb': 1024, 'free_capacity_gb': 512, 'reserved_percentage': 0, 'timestamp': None, 'reserved_snapshot_percentage': 0, 'reserved_share_extend_percentage': 0, 'thin_provisioning': [False], 'cap1': 'val1', 'cap2': 'val2', 'ipv4_support': True, 'ipv6_support': False}, 'instances': [] }, { 'share_capability': {'total_capacity_gb': 1024, 'free_capacity_gb': 512, 'reserved_percentage': 0, 'timestamp': None, 'reserved_snapshot_percentage': 0, 'reserved_share_extend_percentage': 0, 'thin_provisioning': [True, False], 'cap1': 'val1', 'cap2': 'val2', 'ipv4_support': True, 'ipv6_support': False}, 'instances': [] }, { 'share_capability': {'total_capacity_gb': 1024, 'free_capacity_gb': 512, 'allocated_capacity_gb': 256, 'reserved_percentage': 0, 'reserved_snapshot_percentage': 0, 'reserved_share_extend_percentage': 0, 'timestamp': None, 'cap1': 'val1', 'cap2': 'val2', 'ipv4_support': False, 'ipv6_support': True }, 'instances': [ { 'id': 1, 'host': 'host1', 'status': 'available', 'share_id': 11, 'size': 4, 'updated_at': timeutils.utcnow() }, ] }, { 'share_capability': {'total_capacity_gb': 1024, 'free_capacity_gb': 512, 'allocated_capacity_gb': 256, 'reserved_percentage': 0, 'reserved_snapshot_percentage': 0, 'reserved_share_extend_percentage': 0, 'timestamp': None, 'cap1': 'val1', 'cap2': 'val2', 'ipv4_support': True, 'ipv6_support': True}, 'instances': [] }, { 'share_capability': {'total_capacity_gb': 1024, 'free_capacity_gb': 512, 'provisioned_capacity_gb': 256, 'reserved_percentage': 0, 'reserved_snapshot_percentage': 0, 'reserved_share_extend_percentage': 0, 'timestamp': None, 'cap1': 'val1', 'cap2': 'val2', 'ipv4_support': False, 'ipv6_support': False }, 'instances': [ { 'id': 1, 'host': 'host1', 'status': 'available', 'share_id': 11, 'size': 1, 'updated_at': timeutils.utcnow() }, ] }, { 'share_capability': {'total_capacity_gb': 1024, 'free_capacity_gb': 512, 'allocated_capacity_gb': 256, 'provisioned_capacity_gb': 1, 'thin_provisioning': True, 'reserved_percentage': 0, 'reserved_snapshot_percentage': 0, 'reserved_share_extend_percentage': 0, 'timestamp': None, 'cap1': 'val1', 'cap2': 'val2'}, 'instances': [ { 'id': 1, 'host': 'host1', 'status': 'available', 'share_id': 11, 'size': 1, 'updated_at': timeutils.utcnow() }, ] }, { 'share_capability': {'total_capacity_gb': 1024, 'free_capacity_gb': 512, 'allocated_capacity_gb': 256, 'provisioned_capacity_gb': 1, 'thin_provisioning': [False], 'reserved_percentage': 0, 'reserved_snapshot_percentage': 0, 'reserved_share_extend_percentage': 0, 'timestamp': None, 'cap1': 'val1', 'cap2': 'val2'}, 'instances': [ { 'id': 1, 'host': 'host1', 'status': 'available', 'share_id': 11, 'size': 1, 'updated_at': timeutils.utcnow() }, ] }, { 'share_capability': {'total_capacity_gb': 1024, 'free_capacity_gb': 512, 'allocated_capacity_gb': 256, 'provisioned_capacity_gb': 1, 'thin_provisioning': [True, False], 'reserved_percentage': 0, 'reserved_snapshot_percentage': 0, 'reserved_share_extend_percentage': 0, 'timestamp': None, 'cap1': 'val1', 'cap2': 'val2'}, 'instances': [ { 'id': 1, 'host': 'host1', 'status': 'available', 'share_id': 11, 'size': 1, 'updated_at': timeutils.utcnow() }, ] }, { 'share_capability': {'total_capacity_gb': 1024, 'free_capacity_gb': 512, 'allocated_capacity_gb': 256, 'provisioned_capacity_gb': 256, 'thin_provisioning': False, 'reserved_percentage': 0, 'reserved_snapshot_percentage': 0, 'reserved_share_extend_percentage': 0, 'timestamp': None, 'cap1': 'val1', 'cap2': 'val2'}, 'instances': [ { 'id': 1, 'host': 'host1', 'status': 'available', 'share_id': 11, 'size': 1, 'updated_at': timeutils.utcnow() }, ] }, { 'share_capability': {'total_capacity_gb': 1024, 'free_capacity_gb': 512, 'allocated_capacity_gb': 256, 'provisioned_capacity_gb': 256, 'thin_provisioning': [False], 'reserved_percentage': 0, 'reserved_snapshot_percentage': 0, 'reserved_share_extend_percentage': 0, 'timestamp': None, 'cap1': 'val1', 'cap2': 'val2'}, 'instances': [ { 'id': 1, 'host': 'host1', 'status': 'available', 'share_id': 11, 'size': 1, 'updated_at': timeutils.utcnow() }, ] }, ) @ddt.unpack def test_update_from_share_capability(self, share_capability, instances): fake_context = context.RequestContext('user', 'project', is_admin=True) sizes = [instance['size'] or 0 for instance in instances] self.mock_object( db, 'share_instance_sizes_sum_by_host', mock.Mock(return_value=sum(sizes))) fake_pool = host_manager.PoolState('host1', None, 'pool0') self.assertIsNone(fake_pool.free_capacity_gb) fake_pool.update_from_share_capability(share_capability, context=fake_context) self.assertEqual('host1#pool0', fake_pool.host) self.assertEqual('pool0', fake_pool.pool_name) self.assertEqual(1024, fake_pool.total_capacity_gb) self.assertEqual(512, fake_pool.free_capacity_gb) self.assertDictEqual(share_capability, dict(fake_pool.capabilities)) if 'thin_provisioning' in share_capability: thin_provisioned = scheduler_utils.thin_provisioning( share_capability['thin_provisioning']) else: thin_provisioned = False if thin_provisioned: self.assertEqual(thin_provisioned, fake_pool.thin_provisioning) if 'provisioned_capacity_gb' not in share_capability or ( share_capability['provisioned_capacity_gb'] is None): db.share_instance_sizes_sum_by_host.assert_called_once_with( fake_context, fake_pool.host) if len(instances) > 0: self.assertEqual(4, fake_pool.provisioned_capacity_gb) else: self.assertEqual(0, fake_pool.provisioned_capacity_gb) else: self.assertFalse(db.share_instance_sizes_sum_by_host.called) self.assertEqual(share_capability['provisioned_capacity_gb'], fake_pool.provisioned_capacity_gb) else: self.assertFalse(fake_pool.thin_provisioning) self.assertFalse(db.share_instance_sizes_sum_by_host.called) if 'provisioned_capacity_gb' not in share_capability or ( share_capability['provisioned_capacity_gb'] is None): self.assertIsNone(fake_pool.provisioned_capacity_gb) else: self.assertEqual(share_capability['provisioned_capacity_gb'], fake_pool.provisioned_capacity_gb) if 'allocated_capacity_gb' in share_capability: self.assertEqual(share_capability['allocated_capacity_gb'], fake_pool.allocated_capacity_gb) else: self.assertEqual(0, fake_pool.allocated_capacity_gb) if 'ipv4_support' in share_capability: self.assertEqual(share_capability['ipv4_support'], fake_pool.ipv4_support) if 'ipv6_support' in share_capability: self.assertEqual(share_capability['ipv6_support'], fake_pool.ipv6_support) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/scheduler/test_manager.py0000664000175000017500000004476100000000000022476 0ustar00zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests For Scheduler Manager """ from importlib import reload from unittest import mock import ddt from oslo_config import cfg from manila.common import constants from manila import context from manila import db from manila import exception from manila.message import message_field from manila import quota from manila.scheduler.drivers import base from manila.scheduler.drivers import filter from manila.scheduler import manager from manila.share import rpcapi as share_rpcapi from manila.share import share_types from manila import test from manila.tests import db_utils from manila.tests import fake_share as fakes from manila import utils CONF = cfg.CONF @ddt.ddt class SchedulerManagerTestCase(test.TestCase): """Test case for scheduler manager.""" driver_cls = base.Scheduler driver_cls_name = 'manila.scheduler.drivers.base.Scheduler' def setUp(self): super(SchedulerManagerTestCase, self).setUp() self.periodic_tasks = [] def _periodic_task(*args, **kwargs): def decorator(f): self.periodic_tasks.append(f) return f return mock.Mock(side_effect=decorator) self.mock_periodic_task = self.mock_object( manager.periodic_task, 'periodic_task', mock.Mock(side_effect=_periodic_task)) reload(manager) self.flags(scheduler_driver=self.driver_cls_name) self.manager = manager.SchedulerManager() self.context = context.RequestContext('fake_user', 'fake_project') self.topic = 'fake_topic' self.fake_args = (1, 2, 3) self.fake_kwargs = {'cat': 'meow', 'dog': 'woof'} def raise_no_valid_host(self, *args, **kwargs): raise exception.NoValidHost(reason="") def test_1_correct_init(self): # Correct scheduler driver manager = self.manager self.assertIsInstance(manager.driver, self.driver_cls) @ddt.data('manila.scheduler.filter_scheduler.FilterScheduler', 'manila.scheduler.drivers.filter.FilterScheduler') def test_scheduler_driver_mapper(self, driver_class): test_manager = manager.SchedulerManager(scheduler_driver=driver_class) self.assertIsInstance(test_manager.driver, filter.FilterScheduler) def test_init_host_with_rpc(self): self.mock_object(context, 'get_admin_context', mock.Mock(return_value='fake_admin_context')) self.mock_object(self.manager, 'request_service_capabilities') self.manager.init_host_with_rpc() self.manager.request_service_capabilities.assert_called_once_with( 'fake_admin_context') def test_get_host_list(self): self.mock_object(self.manager.driver, 'get_host_list') self.manager.get_host_list(context) self.manager.driver.get_host_list.assert_called_once_with() def test_get_service_capabilities(self): self.mock_object(self.manager.driver, 'get_service_capabilities') self.manager.get_service_capabilities(context) self.manager.driver.get_service_capabilities.assert_called_once_with() def test_update_service_capabilities(self): service_name = 'fake_service' host = 'fake_host' with mock.patch.object(self.manager.driver, 'update_service_capabilities', mock.Mock()): self.manager.update_service_capabilities( self.context, service_name=service_name, host=host) (self.manager.driver.update_service_capabilities. assert_called_once_with(service_name, host, {}, None)) with mock.patch.object(self.manager.driver, 'update_service_capabilities', mock.Mock()): capabilities = {'fake_capability': 'fake_value'} self.manager.update_service_capabilities( self.context, service_name=service_name, host=host, capabilities=capabilities) (self.manager.driver.update_service_capabilities. assert_called_once_with(service_name, host, capabilities, None)) @mock.patch.object(db, 'share_update', mock.Mock()) @mock.patch('manila.message.api.API.create') def test_create_share_exception_puts_share_in_error_state( self, _mock_message_create): """Test NoValidHost exception for create_share. Puts the share in 'error' state and eats the exception. """ fake_share_id = 1 request_spec = {'share_id': fake_share_id} ex = exception.NoValidHost(reason='') with mock.patch.object( self.manager.driver, 'schedule_create_share', mock.Mock(side_effect=ex)): self.mock_object(manager.LOG, 'error') self.manager.create_share_instance( self.context, request_spec=request_spec, filter_properties={}) db.share_update.assert_called_once_with( self.context, fake_share_id, {'status': 'error'}) (self.manager.driver.schedule_create_share. assert_called_once_with(self.context, request_spec, {})) manager.LOG.error.assert_called_once_with(mock.ANY, mock.ANY) _mock_message_create.assert_called_once_with( self.context, message_field.Action.ALLOCATE_HOST, self.context.project_id, resource_type='SHARE', exception=ex, resource_id=fake_share_id) @mock.patch.object(db, 'share_update', mock.Mock()) def test_create_share_other_exception_puts_share_in_error_state(self): """Test any exception except NoValidHost for create_share. Puts the share in 'error' state and re-raises the exception. """ fake_share_id = 1 request_spec = {'share_id': fake_share_id} with mock.patch.object(self.manager.driver, 'schedule_create_share', mock.Mock(side_effect=exception.QuotaError)): self.mock_object(manager.LOG, 'error') self.assertRaises(exception.QuotaError, self.manager.create_share_instance, self.context, request_spec=request_spec, filter_properties={}) db.share_update.assert_called_once_with( self.context, fake_share_id, {'status': 'error'}) (self.manager.driver.schedule_create_share. assert_called_once_with(self.context, request_spec, {})) manager.LOG.error.assert_called_once_with(mock.ANY, mock.ANY) @mock.patch.object(quota.QUOTAS, 'expire') def test__expire_reservations(self, mock_expire): self.manager._expire_reservations(self.context) mock_expire.assert_called_once_with(self.context) @mock.patch('manila.message.api.API.cleanup_expired_messages') def test__clean_expired_messages(self, mock_expire): self.manager._clean_expired_messages(self.context) mock_expire.assert_called_once_with(self.context) @mock.patch.object(db, 'service_get_all', mock.Mock()) @mock.patch.object(db, 'service_update', mock.Mock()) @mock.patch.object(utils, "service_is_up", mock.Mock()) def test__mark_services_as_down(self): db.service_get_all.return_value = [ {"id": 1, "state": "up"}, {"id": 2, "state": "up"}, {"id": 3, "state": "stopped"}, {"id": 4, "state": "stopped"}, {"id": 5, "state": "down"}, {"id": 6, "state": "down"}, ] utils.service_is_up.side_effect = [False, True] * 3 self.manager._mark_services_as_down(self.context) db.service_get_all.assert_called_once_with(self.context) self.assertEqual(6, utils.service_is_up.call_count) db.service_update.assert_called_once_with( self.context, 1, {"state": "down"}) def test_periodic_tasks(self): self.assertEqual(3, self.mock_periodic_task.call_count) self.assertEqual(3, len(self.periodic_tasks)) self.assertEqual( self.periodic_tasks[0].__name__, self.manager._expire_reservations.__name__) self.assertEqual( self.periodic_tasks[1].__name__, self.manager._clean_expired_messages.__name__) self.assertEqual( self.periodic_tasks[2].__name__, self.manager._mark_services_as_down.__name__) def test_get_pools(self): """Ensure get_pools exists and calls base_scheduler.get_pools.""" mock_get_pools = self.mock_object(self.manager.driver, 'get_pools', mock.Mock(return_value='fake_pools')) result = self.manager.get_pools(self.context, filters='fake_filters') mock_get_pools.assert_called_once_with(self.context, 'fake_filters', False) self.assertEqual('fake_pools', result) @mock.patch.object(db, 'share_group_update', mock.Mock()) def test_create_group_no_valid_host_puts_group_in_error_state(self): """Test that NoValidHost is raised for create_share_group. Puts the share in 'error' state and eats the exception. """ fake_group_id = 1 group_id = fake_group_id request_spec = {"share_group_id": group_id} with mock.patch.object( self.manager.driver, 'schedule_create_share_group', mock.Mock(side_effect=self.raise_no_valid_host)): self.manager.create_share_group(self.context, fake_group_id, request_spec=request_spec, filter_properties={}) db.share_group_update.assert_called_once_with( self.context, fake_group_id, {'status': 'error'}) (self.manager.driver.schedule_create_share_group. assert_called_once_with(self.context, group_id, request_spec, {})) @mock.patch.object(db, 'share_group_update', mock.Mock()) def test_create_group_exception_puts_group_in_error_state(self): """Test that exceptions for create_share_group. Puts the share in 'error' state and raises the exception. """ fake_group_id = 1 group_id = fake_group_id request_spec = {"share_group_id": group_id} with mock.patch.object(self.manager.driver, 'schedule_create_share_group', mock.Mock(side_effect=exception.NotFound)): self.assertRaises(exception.NotFound, self.manager.create_share_group, self.context, fake_group_id, request_spec=request_spec, filter_properties={}) def test_migrate_share_to_host(self): class fake_host(object): host = 'fake@backend#pool' share = db_utils.create_share() host = fake_host() self.mock_object(db, 'share_get', mock.Mock(return_value=share)) self.mock_object(share_rpcapi.ShareAPI, 'migration_start', mock.Mock(side_effect=TypeError)) self.mock_object(base.Scheduler, 'host_passes_filters', mock.Mock(return_value=host)) self.mock_object( share_types, 'revert_allocated_share_type_quotas_during_migration') self.assertRaises( TypeError, self.manager.migrate_share_to_host, self.context, share['id'], 'fake@backend#pool', False, True, True, False, True, 'fake_net_id', 'fake_type_id', {}, None) db.share_get.assert_called_once_with(self.context, share['id']) base.Scheduler.host_passes_filters.assert_called_once_with( self.context, 'fake@backend#pool', {}, None) share_rpcapi.ShareAPI.migration_start.assert_called_once_with( self.context, share, host.host, False, True, True, False, True, 'fake_net_id', 'fake_type_id') (share_types.revert_allocated_share_type_quotas_during_migration. assert_called_once_with(self.context, share, 'fake_type_id')) @ddt.data(exception.NoValidHost(reason='fake'), TypeError) def test_migrate_share_to_host_exception(self, exc): share = db_utils.create_share(status=constants.STATUS_MIGRATING) host = 'fake@backend#pool' request_spec = {'share_id': share['id']} self.mock_object(db, 'share_get', mock.Mock(return_value=share)) self.mock_object( base.Scheduler, 'host_passes_filters', mock.Mock(side_effect=exc)) self.mock_object(db, 'share_update') self.mock_object(db, 'share_instance_update') self.mock_object( share_types, 'revert_allocated_share_type_quotas_during_migration') capture = (exception.NoValidHost if isinstance(exc, exception.NoValidHost) else TypeError) self.assertRaises( capture, self.manager.migrate_share_to_host, self.context, share['id'], host, False, True, True, False, True, 'fake_net_id', 'fake_type_id', request_spec, None) base.Scheduler.host_passes_filters.assert_called_once_with( self.context, host, request_spec, None) db.share_get.assert_called_once_with(self.context, share['id']) db.share_update.assert_called_once_with( self.context, share['id'], {'task_state': constants.TASK_STATE_MIGRATION_ERROR}) db.share_instance_update.assert_called_once_with( self.context, share.instance['id'], {'status': constants.STATUS_AVAILABLE}) (share_types.revert_allocated_share_type_quotas_during_migration. assert_called_once_with(self.context, share, 'fake_type_id')) def test_manage_share(self): share = db_utils.create_share() self.mock_object(db, 'share_get', mock.Mock(return_value=share)) self.mock_object(share_rpcapi.ShareAPI, 'manage_share') self.mock_object(base.Scheduler, 'host_passes_filters') self.manager.manage_share(self.context, share['id'], 'driver_options', {}, None) def test_manage_share_exception(self): share = db_utils.create_share() db_update = self.mock_object(db, 'share_update', mock.Mock()) self.mock_object( base.Scheduler, 'host_passes_filters', mock.Mock(side_effect=exception.NoValidHost('fake'))) share_id = share['id'] self.assertRaises( exception.NoValidHost, self.manager.manage_share, self.context, share['id'], 'driver_options', {'share_id': share_id}, None) db_update.assert_called_once_with( self.context, share_id, {'status': constants.STATUS_MANAGE_ERROR, 'size': 0}) def test_create_share_replica_exception_path(self): """Test 'raisable' exceptions for create_share_replica.""" db_update = self.mock_object(db, 'share_replica_update') self.mock_object(db, 'share_snapshot_instance_get_all_with_filters', mock.Mock(return_value=[{'id': '123'}])) snap_update = self.mock_object(db, 'share_snapshot_instance_update') request_spec = fakes.fake_replica_request_spec() replica_id = request_spec.get('share_instance_properties').get('id') expected_updates = { 'status': constants.STATUS_ERROR, 'replica_state': constants.STATUS_ERROR, } with mock.patch.object(self.manager.driver, 'schedule_create_replica', mock.Mock(side_effect=exception.NotFound)): self.assertRaises(exception.NotFound, self.manager.create_share_replica, self.context, request_spec=request_spec, filter_properties={}) db_update.assert_called_once_with( self.context, replica_id, expected_updates) snap_update.assert_called_once_with( self.context, '123', {'status': constants.STATUS_ERROR}) def test_create_share_replica_no_valid_host(self): """Test the NoValidHost exception for create_share_replica.""" db_update = self.mock_object(db, 'share_replica_update') request_spec = fakes.fake_replica_request_spec() replica_id = request_spec.get('share_instance_properties').get('id') expected_updates = { 'status': constants.STATUS_ERROR, 'replica_state': constants.STATUS_ERROR, } with mock.patch.object( self.manager.driver, 'schedule_create_replica', mock.Mock(side_effect=self.raise_no_valid_host)): retval = self.manager.create_share_replica( self.context, request_spec=request_spec, filter_properties={}) self.assertIsNone(retval) db_update.assert_called_once_with( self.context, replica_id, expected_updates) def test_create_share_replica(self): """Test happy path for create_share_replica.""" db_update = self.mock_object(db, 'share_replica_update') mock_scheduler_driver_call = self.mock_object( self.manager.driver, 'schedule_create_replica') request_spec = fakes.fake_replica_request_spec() retval = self.manager.create_share_replica( self.context, request_spec=request_spec, filter_properties={}) mock_scheduler_driver_call.assert_called_once_with( self.context, request_spec, {}) self.assertFalse(db_update.called) self.assertIsNone(retval) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/scheduler/test_rpcapi.py0000664000175000017500000001351000000000000022326 0ustar00zuulzuul00000000000000# Copyright 2012, Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Unit Tests for manila.scheduler.rpcapi """ import copy from unittest import mock from oslo_config import cfg from manila import context from manila.scheduler import rpcapi as scheduler_rpcapi from manila import test CONF = cfg.CONF class SchedulerRpcAPITestCase(test.TestCase): def tearDown(self): super(SchedulerRpcAPITestCase, self).tearDown() def _test_scheduler_api(self, method, rpc_method, fanout=False, **kwargs): ctxt = context.RequestContext('fake_user', 'fake_project') rpcapi = scheduler_rpcapi.SchedulerAPI() expected_retval = 'foo' if method == 'call' else None target = { "fanout": fanout, "version": kwargs.pop('version', '1.0'), } expected_msg = copy.deepcopy(kwargs) self.fake_args = None self.fake_kwargs = None def _fake_prepare_method(*args, **kwds): for kwd in kwds: self.assertEqual(target[kwd], kwds[kwd]) return rpcapi.client def _fake_rpc_method(*args, **kwargs): self.fake_args = args self.fake_kwargs = kwargs if expected_retval: return expected_retval with mock.patch.object(rpcapi.client, "prepare") as mock_prepared: mock_prepared.side_effect = _fake_prepare_method with mock.patch.object(rpcapi.client, rpc_method) as mock_method: mock_method.side_effect = _fake_rpc_method retval = getattr(rpcapi, method)(ctxt, **kwargs) self.assertEqual(expected_retval, retval) expected_args = [ctxt, method, expected_msg] for arg, expected_arg in zip(self.fake_args, expected_args): self.assertEqual(expected_arg, arg) def test_update_service_capabilities(self): self._test_scheduler_api('update_service_capabilities', rpc_method='cast', service_name='fake_name', host='fake_host', capabilities='fake_capabilities', fanout=True, version='1.10') def test_create_share_instance(self): self._test_scheduler_api('create_share_instance', rpc_method='cast', request_spec='fake_request_spec', filter_properties='filter_properties', version='1.2') def test_get_pools(self): self._test_scheduler_api('get_pools', rpc_method='call', filters=None, version='1.9') def test_create_share_group(self): self._test_scheduler_api('create_share_group', rpc_method='cast', share_group_id='fake_share_group_id', request_spec='fake_request_spec', filter_properties='filter_properties', version='1.8') def test_migrate_share_to_host(self): self._test_scheduler_api('migrate_share_to_host', rpc_method='cast', share_id='share_id', host='host', force_host_assisted_migration=True, preserve_metadata=True, writable=True, nondisruptive=False, preserve_snapshots=True, new_share_network_id='fake_net_id', new_share_type_id='fake_type_id', request_spec='fake_request_spec', filter_properties='filter_properties', version='1.7') def test_create_share_replica(self): self._test_scheduler_api('create_share_replica', rpc_method='cast', request_spec='fake_request_spec', filter_properties='filter_properties', version='1.5') def test_manage_share(self): self._test_scheduler_api('manage_share', rpc_method='cast', share_id='share_id', driver_options='fake_driver_options', request_spec='fake_request_spec', filter_properties='filter_properties', version='1.6') def test_extend_share(self): self._test_scheduler_api('extend_share', rpc_method='cast', share_id='share_id', new_size='fake_size', reservations='fake_reservations', request_spec='fake_request_spec', filter_properties='filter_properties', version='1.11',) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/scheduler/test_scheduler_options.py0000664000175000017500000001172000000000000024602 0ustar00zuulzuul00000000000000# Copyright 2011 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests For scheduler options. """ import datetime from oslo_serialization import jsonutils from manila.scheduler import scheduler_options from manila import test class FakeSchedulerOptions(scheduler_options.SchedulerOptions): def __init__(self, last_checked, now, file_old, file_now, data, filedata): super(FakeSchedulerOptions, self).__init__() # Change internals ... self.last_modified = file_old self.last_checked = last_checked self.data = data # For overrides ... self._time_now = now self._file_now = file_now self._file_data = filedata.encode("utf-8") self.file_was_loaded = False def _get_file_timestamp(self, filename): return self._file_now def _get_file_handle(self, filename): self.file_was_loaded = True import io return io.BytesIO(self._file_data) def _get_time_now(self): return self._time_now class SchedulerOptionsTestCase(test.TestCase): def test_get_configuration_first_time_no_flag(self): last_checked = None now = datetime.datetime(2012, 1, 1, 1, 1, 1) file_old = None file_now = datetime.datetime(2012, 1, 1, 1, 1, 1) data = dict(a=1, b=2, c=3) jdata = jsonutils.dumps(data) fake = FakeSchedulerOptions(last_checked, now, file_old, file_now, {}, jdata) self.assertEqual({}, fake.get_configuration()) self.assertFalse(fake.file_was_loaded) def test_get_configuration_first_time_empty_file(self): last_checked = None now = datetime.datetime(2012, 1, 1, 1, 1, 1) file_old = None file_now = datetime.datetime(2012, 1, 1, 1, 1, 1) jdata = "" fake = FakeSchedulerOptions(last_checked, now, file_old, file_now, {}, jdata) self.assertEqual({}, fake.get_configuration('foo.json')) self.assertTrue(fake.file_was_loaded) def test_get_configuration_first_time_happy_day(self): last_checked = None now = datetime.datetime(2012, 1, 1, 1, 1, 1) file_old = None file_now = datetime.datetime(2012, 1, 1, 1, 1, 1) data = dict(a=1, b=2, c=3) jdata = jsonutils.dumps(data) fake = FakeSchedulerOptions(last_checked, now, file_old, file_now, {}, jdata) self.assertEqual(data, fake.get_configuration('foo.json')) self.assertTrue(fake.file_was_loaded) def test_get_configuration_second_time_no_change(self): last_checked = datetime.datetime(2011, 1, 1, 1, 1, 1) now = datetime.datetime(2012, 1, 1, 1, 1, 1) file_old = datetime.datetime(2012, 1, 1, 1, 1, 1) file_now = datetime.datetime(2012, 1, 1, 1, 1, 1) data = dict(a=1, b=2, c=3) jdata = jsonutils.dumps(data) fake = FakeSchedulerOptions(last_checked, now, file_old, file_now, data, jdata) self.assertEqual(data, fake.get_configuration('foo.json')) self.assertFalse(fake.file_was_loaded) def test_get_configuration_second_time_too_fast(self): last_checked = datetime.datetime(2011, 1, 1, 1, 1, 1) now = datetime.datetime(2011, 1, 1, 1, 1, 2) file_old = datetime.datetime(2012, 1, 1, 1, 1, 1) file_now = datetime.datetime(2013, 1, 1, 1, 1, 1) old_data = dict(a=1, b=2, c=3) data = dict(a=11, b=12, c=13) jdata = jsonutils.dumps(data) fake = FakeSchedulerOptions(last_checked, now, file_old, file_now, old_data, jdata) self.assertEqual(old_data, fake.get_configuration('foo.json')) self.assertFalse(fake.file_was_loaded) def test_get_configuration_second_time_change(self): last_checked = datetime.datetime(2011, 1, 1, 1, 1, 1) now = datetime.datetime(2012, 1, 1, 1, 1, 1) file_old = datetime.datetime(2012, 1, 1, 1, 1, 1) file_now = datetime.datetime(2013, 1, 1, 1, 1, 1) old_data = dict(a=1, b=2, c=3) data = dict(a=11, b=12, c=13) jdata = jsonutils.dumps(data) fake = FakeSchedulerOptions(last_checked, now, file_old, file_now, old_data, jdata) self.assertEqual(data, fake.get_configuration('foo.json')) self.assertTrue(fake.file_was_loaded) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/scheduler/test_utils.py0000664000175000017500000000361400000000000022214 0ustar00zuulzuul00000000000000# Copyright 2016 EMC Corporation OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests For utils. """ import ddt from manila.scheduler import utils from manila import test @ddt.ddt class UtilsTestCase(test.TestCase): """Test case for utils.""" @ddt.data( ({'extra_specs': {'thin_provisioning': True}}, True), ({'extra_specs': {'thin_provisioning': False}}, False), ({'extra_specs': {'foo': 'bar'}}, True), ({'foo': 'bar'}, True), ({'extra_specs': {'thin_provisioning': ' True'}}, True), ({'extra_specs': {'thin_provisioning': ' False'}}, False), ({'extra_specs': {'thin_provisioning': ' True'}}, False), ({'extra_specs': {}}, True), ({}, True), ) @ddt.unpack def test_use_thin_logic(self, properties, use_thin): use_thin_logic = utils.use_thin_logic(properties) self.assertEqual(use_thin, use_thin_logic) @ddt.data( (True, True), (False, False), (None, False), ([True, False], True), ([True], True), ([False], False), ('wrong', False), ) @ddt.unpack def test_thin_provisioning(self, thin_capabilities, thin): thin_provisioning = utils.thin_provisioning(thin_capabilities) self.assertEqual(thin, thin_provisioning) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315601.9896708 manila-21.0.0/manila/tests/scheduler/weighers/0000775000175000017500000000000000000000000021254 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/scheduler/weighers/__init__.py0000664000175000017500000000000000000000000023353 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/scheduler/weighers/test_base.py0000664000175000017500000000440600000000000023603 0ustar00zuulzuul00000000000000# Copyright 2011-2012 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests For Scheduler weighers. """ from manila.scheduler.weighers import base from manila import test from manila.tests.scheduler import fakes class TestWeightHandler(test.TestCase): def test_get_all_classes(self): namespace = "manila.tests.scheduler.fakes" handler = base.BaseWeightHandler( base.BaseWeigher, namespace) classes = handler.get_all_classes() self.assertIn(fakes.FakeWeigher1, classes) self.assertIn(fakes.FakeWeigher2, classes) self.assertNotIn(fakes.FakeClass, classes) def test_no_multiplier(self): class FakeWeigher(base.BaseWeigher): def _weigh_object(self, *args, **kwargs): pass self.assertEqual(1.0, FakeWeigher().weight_multiplier()) def test_no_weight_object(self): class FakeWeigher(base.BaseWeigher): def weight_multiplier(self, *args, **kwargs): pass self.assertRaises(TypeError, FakeWeigher) def test_normalization(self): # weight_list, expected_result, minval, maxval map_ = ( ((), (), None, None), ((0.0, 0.0), (0.0, 0.0), None, None), ((1.0, 1.0), (0.0, 0.0), None, None), ((20.0, 50.0), (0.0, 1.0), None, None), ((20.0, 50.0), (0.0, 0.375), None, 100.0), ((20.0, 50.0), (0.4, 1.0), 0.0, None), ((20.0, 50.0), (0.2, 0.5), 0.0, 100.0), ) for seq, result, minval, maxval in map_: ret = base.normalize(seq, minval=minval, maxval=maxval) self.assertEqual(result, tuple(ret)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/scheduler/weighers/test_capacity.py0000664000175000017500000002657400000000000024500 0ustar00zuulzuul00000000000000# Copyright 2011-2012 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests For Capacity Weigher. """ from unittest import mock import ddt from oslo_config import cfg from manila import context from manila.scheduler.weighers import base_host from manila.scheduler.weighers import capacity from manila.share import utils from manila import test from manila.tests.scheduler import fakes CONF = cfg.CONF @ddt.ddt class CapacityWeigherTestCase(test.TestCase): def setUp(self): super(CapacityWeigherTestCase, self).setUp() self.host_manager = fakes.FakeHostManager() self.weight_handler = base_host.HostWeightHandler( 'manila.scheduler.weighers') def _get_weighed_host(self, hosts, weight_properties=None, index=0): if weight_properties is None: weight_properties = {'size': 1} return self.weight_handler.get_weighed_objects( [capacity.CapacityWeigher], hosts, weight_properties)[index] @mock.patch('manila.db.api.IMPL.service_get_all_by_topic') def _get_all_hosts(self, _mock_service_get_all_by_topic, disabled=False): ctxt = context.get_admin_context() fakes.mock_host_manager_db_calls(_mock_service_get_all_by_topic, disabled=disabled) host_states = self.host_manager.get_all_host_states_share(ctxt) _mock_service_get_all_by_topic.assert_called_once_with( ctxt, CONF.share_topic, consider_disabled=False) return host_states # NOTE(xyang): If thin_provisioning = True and # max_over_subscription_ratio >= 1, use the following formula: # free = math.floor(total * host_state.max_over_subscription_ratio # - host_state.provisioned_capacity_gb # - total * reserved) # Otherwise, use the following formula: # free = math.floor(free_space - total * reserved) @ddt.data( {'cap_thin': ' True', 'cap_thin_key': 'capabilities:thin_provisioning', 'winner': 'host2'}, {'cap_thin': ' False', 'cap_thin_key': 'thin_provisioning', 'winner': 'host1'}, {'cap_thin': 'True', 'cap_thin_key': 'capabilities:thin_provisioning', 'winner': 'host2'}, {'cap_thin': 'False', 'cap_thin_key': 'thin_provisioning', 'winner': 'host1'}, {'cap_thin': 'true', 'cap_thin_key': 'capabilities:thin_provisioning', 'winner': 'host2'}, {'cap_thin': 'false', 'cap_thin_key': 'thin_provisioning', 'winner': 'host1'}, {'cap_thin': None, 'cap_thin_key': None, 'winner': 'host2'}, ) @ddt.unpack def test_default_of_spreading_first(self, cap_thin, cap_thin_key, winner): hosts = self._get_all_hosts() # pylint: disable=no-value-for-parameter # Results for the 1st test # {'capabilities:thin_provisioning': ' True'}: # host1: thin_provisioning = False # free_capacity_gb = 1024 # free = math.floor(1024 - 1024 * 0.1) = 921.0 # weight = 0.40 # host2: thin_provisioning = True # max_over_subscription_ratio = 2.0 # free_capacity_gb = 300 # free = math.floor(2048 * 2.0 - 1748 - 2048 * 0.1)=2143.0 # weight = 1.0 # host3: thin_provisioning = [False] # free_capacity_gb = 512 # free = math.floor(256 - 512 * 0)=256.0 # weight = 0.08 # host4: thin_provisioning = [True] # max_over_subscription_ratio = 1.0 # free_capacity_gb = 200 # free = math.floor(2048 * 1.0 - 1848 - 2048 * 0.05) = 97.0 # weight = 0.0 # host5: thin_provisioning = [True, False] # max_over_subscription_ratio = 1.5 # free_capacity_gb = 500 # free = math.floor(2048 * 1.5 - 1548 - 2048 * 0.05) = 1421.0 # weight = 0.65 # host6: thin_provisioning = False # free = inf # weight = 0.0 # so, host2 should win: weight_properties = { 'size': 1, 'share_type': { 'extra_specs': { cap_thin_key: cap_thin, } } } weighed_host = self._get_weighed_host( hosts, weight_properties=weight_properties) self.assertEqual(1.0, weighed_host.weight) self.assertEqual( winner, utils.extract_host(weighed_host.obj.host)) def test_unknown_is_last(self): hosts = self._get_all_hosts() # pylint: disable=no-value-for-parameter last_host = self._get_weighed_host(hosts, index=-1) self.assertEqual( 'host6', utils.extract_host(last_host.obj.host)) self.assertEqual(0.0, last_host.weight) @ddt.data( {'cap_thin': ' True', 'cap_thin_key': 'capabilities:thin_provisioning', 'winner': 'host4'}, {'cap_thin': ' False', 'cap_thin_key': 'thin_provisioning', 'winner': 'host2'}, {'cap_thin': 'True', 'cap_thin_key': 'capabilities:thin_provisioning', 'winner': 'host4'}, {'cap_thin': 'False', 'cap_thin_key': 'thin_provisioning', 'winner': 'host2'}, {'cap_thin': 'true', 'cap_thin_key': 'capabilities:thin_provisioning', 'winner': 'host4'}, {'cap_thin': 'false', 'cap_thin_key': 'thin_provisioning', 'winner': 'host2'}, {'cap_thin': None, 'cap_thin_key': None, 'winner': 'host4'}, ) @ddt.unpack def test_capacity_weight_multiplier_negative_1(self, cap_thin, cap_thin_key, winner): self.flags(capacity_weight_multiplier=-1.0) hosts = self._get_all_hosts() # pylint: disable=no-value-for-parameter # Results for the 1st test # {'capabilities:thin_provisioning': ' True'}: # host1: thin_provisioning = False # free_capacity_gb = 1024 # free = math.floor(1024 - 1024 * 0.1) = 921.0 # free * (-1) = -921.0 # weight = -0.40 # host2: thin_provisioning = True # max_over_subscription_ratio = 2.0 # free_capacity_gb = 300 # free = math.floor(2048 * 2.0-1748-2048 * 0.1) = 2143.0 # free * (-1) = -2143.0 # weight = -1.0 # host3: thin_provisioning = [False] # free_capacity_gb = 512 # free = math.floor(256 - 512 * 0) = 256.0 # free * (-1) = -256.0 # weight = -0.08 # host4: thin_provisioning = [True] # max_over_subscription_ratio = 1.0 # free_capacity_gb = 200 # free = math.floor(2048 * 1.0 - 1848 - 2048 * 0.05) = 97.0 # free * (-1) = -97.0 # weight = 0.0 # host5: thin_provisioning = [True, False] # max_over_subscription_ratio = 1.5 # free_capacity_gb = 500 # free = math.floor(2048 * 1.5 - 1548 - 2048 * 0.05) = 1421.0 # free * (-1) = -1421.0 # weight = -0.65 # host6: thin_provisioning = False # free = inf # free * (-1) = -inf # weight = 0.0 # so, host4 should win: weight_properties = { 'size': 1, 'share_type': { 'extra_specs': { cap_thin_key: cap_thin, } } } weighed_host = self._get_weighed_host( hosts, weight_properties=weight_properties) self.assertEqual(0.0, weighed_host.weight) self.assertEqual( winner, utils.extract_host(weighed_host.obj.host)) @ddt.data( {'cap_thin': ' True', 'cap_thin_key': 'capabilities:thin_provisioning', 'winner': 'host2'}, {'cap_thin': ' False', 'cap_thin_key': 'thin_provisioning', 'winner': 'host1'}, {'cap_thin': 'True', 'cap_thin_key': 'capabilities:thin_provisioning', 'winner': 'host2'}, {'cap_thin': 'False', 'cap_thin_key': 'thin_provisioning', 'winner': 'host1'}, {'cap_thin': 'true', 'cap_thin_key': 'capabilities:thin_provisioning', 'winner': 'host2'}, {'cap_thin': 'false', 'cap_thin_key': 'thin_provisioning', 'winner': 'host1'}, {'cap_thin': None, 'cap_thin_key': None, 'winner': 'host2'}, ) @ddt.unpack def test_capacity_weight_multiplier_2(self, cap_thin, cap_thin_key, winner): self.flags(capacity_weight_multiplier=2.0) hosts = self._get_all_hosts() # pylint: disable=no-value-for-parameter # Results for the 1st test # {'capabilities:thin_provisioning': ' True'}: # host1: thin_provisioning = False # free_capacity_gb = 1024 # free = math.floor(1024-1024*0.1) = 921.0 # free * 2 = 1842.0 # weight = 0.81 # host2: thin_provisioning = True # max_over_subscription_ratio = 2.0 # free_capacity_gb = 300 # free = math.floor(2048 * 2.0 - 1748 - 2048 * 0.1) = 2143.0 # free * 2 = 4286.0 # weight = 2.0 # host3: thin_provisioning = [False] # free_capacity_gb = 512 # free = math.floor(256 - 512 * 0) = 256.0 # free * 2 = 512.0 # weight = 0.16 # host4: thin_provisioning = [True] # max_over_subscription_ratio = 1.0 # free_capacity_gb = 200 # free = math.floor(2048 * 1.0 - 1848 - 2048 * 0.05) = 97.0 # free * 2 = 194.0 # weight = 0.0 # host5: thin_provisioning = [True, False] # max_over_subscription_ratio = 1.5 # free_capacity_gb = 500 # free = math.floor(2048 * 1.5 - 1548 - 2048 * 0.05) = 1421.0 # free * 2 = 2842.0 # weight = 1.29 # host6: thin_provisioning = False # free = inf # weight = 0.0 # so, host2 should win: weight_properties = { 'size': 1, 'share_type': { 'extra_specs': { cap_thin_key: cap_thin, } } } weighed_host = self._get_weighed_host( hosts, weight_properties=weight_properties) self.assertEqual(2.0, weighed_host.weight) self.assertEqual( winner, utils.extract_host(weighed_host.obj.host)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/scheduler/weighers/test_goodness.py0000664000175000017500000001372000000000000024511 0ustar00zuulzuul00000000000000# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests For Goodness Weigher. """ from manila.scheduler.weighers import goodness from manila import test from manila.tests.scheduler import fakes class GoodnessWeigherTestCase(test.TestCase): def test_goodness_weigher_with_no_goodness_function(self): weigher = goodness.GoodnessWeigher() host_state = fakes.FakeHostState('host1', { 'host': 'host.example.com', 'capabilities': { 'foo': '50' } }) weight_properties = {} weight = weigher._weigh_object(host_state, weight_properties) self.assertEqual(0, weight) def test_goodness_weigher_passing_host(self): weigher = goodness.GoodnessWeigher() host_state = fakes.FakeHostState('host1', { 'host': 'host.example.com', 'capabilities': { 'goodness_function': '100' } }) host_state_2 = fakes.FakeHostState('host2', { 'host': 'host2.example.com', 'capabilities': { 'goodness_function': '0' } }) host_state_3 = fakes.FakeHostState('host3', { 'host': 'host3.example.com', 'capabilities': { 'goodness_function': '100 / 2' } }) weight_properties = {} weight = weigher._weigh_object(host_state, weight_properties) self.assertEqual(100, weight) weight = weigher._weigh_object(host_state_2, weight_properties) self.assertEqual(0, weight) weight = weigher._weigh_object(host_state_3, weight_properties) self.assertEqual(50, weight) def test_goodness_weigher_capabilities_substitution(self): weigher = goodness.GoodnessWeigher() host_state = fakes.FakeHostState('host1', { 'host': 'host.example.com', 'capabilities': { 'foo': 50, 'goodness_function': '10 + capabilities.foo' } }) weight_properties = {} weight = weigher._weigh_object(host_state, weight_properties) self.assertEqual(60, weight) def test_goodness_weigher_extra_specs_substitution(self): weigher = goodness.GoodnessWeigher() host_state = fakes.FakeHostState('host1', { 'host': 'host.example.com', 'capabilities': { 'goodness_function': '10 + extra.foo' } }) weight_properties = { 'share_type': { 'extra_specs': { 'foo': 50 } } } weight = weigher._weigh_object(host_state, weight_properties) self.assertEqual(60, weight) def test_goodness_weigher_share_substitution(self): weigher = goodness.GoodnessWeigher() host_state = fakes.FakeHostState('host1', { 'host': 'host.example.com', 'capabilities': { 'goodness_function': '10 + share.foo' } }) weight_properties = { 'request_spec': { 'resource_properties': { 'foo': 50 } } } weight = weigher._weigh_object(host_state, weight_properties) self.assertEqual(60, weight) def test_goodness_weigher_stats_substitution(self): weigher = goodness.GoodnessWeigher() host_state = fakes.FakeHostState('host1', { 'host': 'host.example.com', 'capabilities': { 'goodness_function': 'stats.free_capacity_gb > 20' }, 'free_capacity_gb': 50 }) weight_properties = {} weight = weigher._weigh_object(host_state, weight_properties) self.assertEqual(100, weight) def test_goodness_weigher_invalid_substitution(self): weigher = goodness.GoodnessWeigher() host_state = fakes.FakeHostState('host1', { 'host': 'host.example.com', 'capabilities': { 'goodness_function': '10 + stats.my_val' }, 'foo': 50 }) weight_properties = {} weight = weigher._weigh_object(host_state, weight_properties) self.assertEqual(0, weight) def test_goodness_weigher_host_rating_out_of_bounds(self): weigher = goodness.GoodnessWeigher() host_state = fakes.FakeHostState('host1', { 'host': 'host.example.com', 'capabilities': { 'goodness_function': '-10' } }) host_state_2 = fakes.FakeHostState('host2', { 'host': 'host2.example.com', 'capabilities': { 'goodness_function': '200' } }) weight_properties = {} weight = weigher._weigh_object(host_state, weight_properties) self.assertEqual(0, weight) weight = weigher._weigh_object(host_state_2, weight_properties) self.assertEqual(0, weight) def test_goodness_weigher_invalid_goodness_function(self): weigher = goodness.GoodnessWeigher() host_state = fakes.FakeHostState('host1', { 'host': 'host.example.com', 'capabilities': { 'goodness_function': '50 / 0' } }) weight_properties = {} weight = weigher._weigh_object(host_state, weight_properties) self.assertEqual(0, weight) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/scheduler/weighers/test_host_affinity.py0000664000175000017500000001317000000000000025535 0ustar00zuulzuul00000000000000# Copyright 2020 NetApp, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests for Host Affinity Weigher. """ from unittest import mock from manila.common import constants from manila.db import api as db_api from manila.scheduler.weighers import host_affinity from manila import test from manila.tests import db_utils from manila.tests.scheduler import fakes class HostAffinityWeigherTestCase(test.TestCase): def setUp(self): super(HostAffinityWeigherTestCase, self).setUp() self.weigher = host_affinity.HostAffinityWeigher() @staticmethod def _create_weight_properties(snapshot_id=None, snapshot_host=None, availability_zone_id=None): return { 'request_spec': { 'snapshot_id': snapshot_id, 'snapshot_host': snapshot_host, }, 'availability_zone_id': availability_zone_id, } def test_without_snapshot_id(self): host_state = fakes.FakeHostState('host1', { 'host': 'host1@AAA#pool2', }) weight_properties = self._create_weight_properties( snapshot_host='fake_snapshot_host') weight = self.weigher._weigh_object(host_state, weight_properties) self.assertEqual(0, weight) def test_without_snapshot_host(self): host_state = fakes.FakeHostState('host1', { 'host': 'host1@AAA#pool2', }) weight_properties = self._create_weight_properties( snapshot_id='fake_snapshot_id') weight = self.weigher._weigh_object(host_state, weight_properties) self.assertEqual(0, weight) def test_same_backend_and_pool(self): share = db_utils.create_share(host="host1@AAA#pool1", status=constants.STATUS_AVAILABLE) snapshot = db_utils.create_snapshot(share_id=share['id']) self.mock_object(db_api, 'share_snapshot_get', mock.Mock(return_value=snapshot)) host_state = fakes.FakeHostState('host1@AAA#pool1', {}) weight_properties = self._create_weight_properties( snapshot_id=snapshot['id'], snapshot_host=share['host']) weight = self.weigher._weigh_object(host_state, weight_properties) self.assertEqual(100, weight) def test_same_backend_different_pool(self): share = db_utils.create_share(host="host1@AAA#pool1", status=constants.STATUS_AVAILABLE) snapshot = db_utils.create_snapshot(share_id=share['id']) self.mock_object(db_api, 'share_snapshot_get', mock.Mock(return_value=snapshot)) host_state = fakes.FakeHostState('host1@AAA#pool2', {}) weight_properties = self._create_weight_properties( snapshot_id=snapshot['id'], snapshot_host=share['host']) weight = self.weigher._weigh_object(host_state, weight_properties) self.assertEqual(75, weight) def test_different_backend_same_availability_zone(self): share = db_utils.create_share( host="host1@AAA#pool1", status=constants.STATUS_AVAILABLE, availability_zone=fakes.FAKE_AZ_1['name']) snapshot = db_utils.create_snapshot(share_id=share['id']) self.mock_object(db_api, 'share_snapshot_get', mock.Mock(return_value=snapshot)) self.mock_object(db_api, 'availability_zone_get', mock.Mock(return_value=type( 'FakeAZ', (object, ), { 'id': fakes.FAKE_AZ_1['id'], 'name': fakes.FAKE_AZ_1['name'], }))) host_state = fakes.FakeHostState('host2@BBB#pool1', {}) weight_properties = self._create_weight_properties( snapshot_id=snapshot['id'], snapshot_host=share['host'], availability_zone_id='zone1') weight = self.weigher._weigh_object(host_state, weight_properties) self.assertEqual(50, weight) def test_different_backend_and_availability_zone(self): share = db_utils.create_share( host="host1@AAA#pool1", status=constants.STATUS_AVAILABLE, availability_zone=fakes.FAKE_AZ_1['name']) snapshot = db_utils.create_snapshot(share_id=share['id']) self.mock_object(db_api, 'share_snapshot_get', mock.Mock(return_value=snapshot)) self.mock_object(db_api, 'availability_zone_get', mock.Mock(return_value=type( 'FakeAZ', (object,), { 'id': fakes.FAKE_AZ_2['id'], 'name': fakes.FAKE_AZ_2['name'], }))) host_state = fakes.FakeHostState('host2@BBB#pool1', {}) weight_properties = self._create_weight_properties( snapshot_id=snapshot['id'], snapshot_host=share['host'], availability_zone_id='zone1' ) weight = self.weigher._weigh_object(host_state, weight_properties) self.assertEqual(25, weight) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/scheduler/weighers/test_netapp_aiq.py0000664000175000017500000002713000000000000025011 0ustar00zuulzuul00000000000000# Copyright 2023 NetApp, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests For NetApp Active IQ Weigher. """ from unittest import mock import ddt from oslo_config import cfg from oslo_serialization import jsonutils import requests from manila import context from manila import exception from manila.scheduler.weighers import base_host from manila.scheduler.weighers import netapp_aiq from manila.share import utils from manila import test from manila.tests.scheduler import fakes from manila.tests import utils as test_utils CONF = cfg.CONF @ddt.ddt class NetAppAIQWeigherTestCase(test.TestCase): def setUp(self): super(NetAppAIQWeigherTestCase, self).setUp() self.weight_handler = base_host.HostWeightHandler( 'manila.scheduler.weighers') netapp_aiq.LOG.debug = mock.Mock() netapp_aiq.LOG.error = mock.Mock() self.mock_session = mock.Mock() self.mock_session.get = mock.Mock() self.mock_session.post = mock.Mock() self.mock_session.delete = mock.Mock() self.mock_session.patch = mock.Mock() self.mock_session.put = mock.Mock() data = { 'netapp_active_iq': { 'aiq_hostname': "10.10.10.10", 'aiq_transport_type': 'https', 'aiq_ssl_verify': True, 'aiq_ssl_cert_path': 'fake_cert', 'aiq_username': 'fake_user', 'aiq_password': 'fake_password', 'aiq_eval_method': 1, 'aiq_priority_order': 'ops' } } self.netapp_aiq_weigher = None with test_utils.create_temp_config_with_opts(data): self.netapp_aiq_weigher = netapp_aiq.NetAppAIQWeigher() def test__weigh_object(self): self.assertRaises(NotImplementedError, self.netapp_aiq_weigher._weigh_object, "fake", "fake") @ddt.data( {'resource_keys': ["fake_resource_key"], 'performance_level': None}, {'resource_keys': ["fake_resource_key"], 'performance_level': "fake_psl"}, {'resource_keys': [], 'performance_level': 'fake_psl'}) @ddt.unpack def test__weigh_active_iq(self, resource_keys, performance_level): weight_properties = { 'size': 1, 'share_type': { 'extra_specs': { "netapp:performance_service_level_name": "fake_name", } } } mock_get_psl_id = self.mock_object( self.netapp_aiq_weigher, '_get_performance_level_id', mock.Mock(return_value=performance_level)) mock_get_resource_keys = self.mock_object( self.netapp_aiq_weigher, '_get_resource_keys', mock.Mock(return_value=resource_keys)) mock_balance_aggregates = self.mock_object( self.netapp_aiq_weigher, '_balance_aggregates', mock.Mock(return_value=["1.0", "1.0"])) res = self.netapp_aiq_weigher._weigh_active_iq( fakes.FAKE_ACTIVE_IQ_WEIGHER_LIST, weight_properties) mock_get_psl_id.assert_called_once_with("fake_name") if not resource_keys or not performance_level: self.assertEqual([], res) else: self.assertEqual(["1.0", "1.0"], res) if performance_level: mock_get_resource_keys.assert_called_once_with( fakes.FAKE_ACTIVE_IQ_WEIGHER_LIST) else: mock_get_resource_keys.assert_not_called() if not resource_keys or not performance_level: mock_balance_aggregates.assert_not_called() else: mock_balance_aggregates.assert_called_once_with( resource_keys, 1, performance_level) @ddt.data(True, False) def test__get_url(self, ipv6): if ipv6: self.netapp_aiq_weigher.host = "2001:db8::" else: self.netapp_aiq_weigher.host = "1.1.1.1" self.netapp_aiq_weigher.port = "fake_port" self.netapp_aiq_weigher.protocol = "fake_protocol" res = self.netapp_aiq_weigher._get_url() if ipv6: self.assertEqual('fake_protocol://[2001:db8::]:fake_port/api/', res) else: self.assertEqual('fake_protocol://1.1.1.1:fake_port/api/', res) @ddt.data('get', 'post', 'delete', 'patch', 'put') def test__get_request_method(self, method): res = self.netapp_aiq_weigher._get_request_method( method, self.mock_session) if method == 'get': self.assertEqual(self.mock_session.get, res) elif method == 'post': self.assertEqual(self.mock_session.post, res) elif method == 'delete': self.assertEqual(self.mock_session.delete, res) elif method == 'put': self.assertEqual(self.mock_session.put, res) elif method == 'patch': self.assertEqual(self.mock_session.patch, res) def test__get_session_method(self): mock_session_builder = self.mock_object( requests, 'Session', mock.Mock(return_value=self.mock_session)) mock__get_request_method = self.mock_object( self.netapp_aiq_weigher, '_get_request_method', mock.Mock(return_value=self.mock_session.post)) res = self.netapp_aiq_weigher._get_session_method('post') self.assertEqual(self.mock_session.post, res) mock_session_builder.assert_called_once_with() mock__get_request_method.assert_called_once_with( 'post', self.mock_session) def test__call_active_iq(self): response = mock.Mock() response.content = "fake_response" response.status_code = "fake_code" mock_post = mock.Mock(return_value=response) mock__get_session_method = self.mock_object( self.netapp_aiq_weigher, '_get_session_method', mock.Mock(return_value=mock_post)) fake_url = "fake_url" fake_path = "/fake_path" mock__get_url = self.mock_object( self.netapp_aiq_weigher, '_get_url', mock.Mock(return_value=fake_url)) self.netapp_aiq_weigher._call_active_iq(fake_path, "post", body="fake_body") mock_post.assert_called_once_with(fake_url + fake_path, json="fake_body") self.assertTrue(netapp_aiq.LOG.debug.called) mock__get_session_method.assert_called_once_with("post") mock__get_url.assert_called_once_with() @ddt.data({}, jsonutils.dumps( fakes.FAKE_ACTIVE_IQ_WEIGHER_AGGREGATES_RESPONSE)) def test__get_resource_keys(self, api_res): mock__call_active_iq = self.mock_object( self.netapp_aiq_weigher, '_call_active_iq', mock.Mock(return_value=(200, api_res))) res = self.netapp_aiq_weigher._get_resource_keys( fakes.FAKE_ACTIVE_IQ_WEIGHER_LIST) if api_res: self.assertEqual(['fake_key_1', 'fake_key_2', 'fake_key_3'], res) else: self.assertEqual([0, 0, 0], res) mock__call_active_iq.assert_called_once_with( 'datacenter/storage/aggregates', 'get') @ddt.data(mock.Mock(side_effect=exception.NotFound), mock.Mock(return_value=(400, "fake_res"))) def test__get_resource_keys_error(self, mock_cal): self.mock_object( self.netapp_aiq_weigher, '_call_active_iq', mock_cal) res = self.netapp_aiq_weigher._get_resource_keys( fakes.FAKE_ACTIVE_IQ_WEIGHER_LIST) self.assertEqual([], res) self.assertTrue(netapp_aiq.LOG.error.called) @ddt.data([], jsonutils.dumps( fakes.FAKE_ACTIVE_IQ_WEIGHER_BALANCE_RESPONSE)) def test__balance_aggregates(self, api_res): mock__call_active_iq = self.mock_object( self.netapp_aiq_weigher, '_call_active_iq', mock.Mock(return_value=(200, api_res))) res = self.netapp_aiq_weigher._balance_aggregates( ['fake_key_1', 'fake_key_2', 0, 'fake_key_3'], 10, 'fake_uuid') if not api_res: self.assertEqual([0.0, 0.0, 0.0, 0.0], res) else: self.assertEqual([10.0, 20.0, 0.0, 0.0], res) fake_body = { "capacity": '10GB', "eval_method": 1, "opt_method": 0, "priority_order": ['ops'], "separate_flag": False, "resource_keys": ['fake_key_1', 'fake_key_2', 'fake_key_3'], "ssl_key": 'fake_uuid' } mock__call_active_iq.assert_called_once_with( 'storage-provider/data-placement/balance', 'post', body=fake_body) @ddt.data(mock.Mock(side_effect=exception.NotFound), mock.Mock(return_value=(400, "fake_res"))) def test__balance_aggregates_error(self, mock_cal): self.mock_object( self.netapp_aiq_weigher, '_call_active_iq', mock_cal) res = self.netapp_aiq_weigher._balance_aggregates( ['fake_key_1', 'fake_key_2', 0, 'fake_key_3'], 10, 'fake_uuid') self.assertEqual([], res) self.assertTrue(netapp_aiq.LOG.error.called) @mock.patch('manila.db.api.IMPL.service_get_all_by_topic') def _get_all_hosts(self, _mock_service_get_all_by_topic, disabled=False): ctxt = context.get_admin_context() fakes.mock_host_manager_db_calls(_mock_service_get_all_by_topic, disabled=disabled) host_states = self.host_manager.get_all_host_states_share(ctxt) _mock_service_get_all_by_topic.assert_called_once_with( ctxt, CONF.share_topic, consider_disabled=False) return host_states def test_weigh_objects_netapp_only(self): self.host_manager = fakes.FakeHostManagerNetAppOnly() hosts = self._get_all_hosts() # pylint: disable=no-value-for-parameter weight_properties = "fake_properties" mock_weigh_active_iq = self.mock_object( netapp_aiq.NetAppAIQWeigher, '_weigh_active_iq', # third host wins mock.Mock(return_value=[0.0, 0.0, 10.0, 0.0, 0.0, 0.0])) weighed_host = self.weight_handler.get_weighed_objects( [netapp_aiq.NetAppAIQWeigher], hosts, weight_properties)[0] mock_weigh_active_iq.assert_called() self.assertEqual(1.0, weighed_host.weight) self.assertEqual( 'host3', utils.extract_host(weighed_host.obj.host)) def test_weigh_objects_non_netapp_backends(self): self.host_manager = fakes.FakeHostManager() hosts = self._get_all_hosts() # pylint: disable=no-value-for-parameter weight_properties = "fake_properties" mock_weigh_active_iq = self.mock_object( netapp_aiq.NetAppAIQWeigher, '_weigh_active_iq') weighed_host = self.weight_handler.get_weighed_objects( [netapp_aiq.NetAppAIQWeigher], hosts, weight_properties)[0] mock_weigh_active_iq.assert_not_called() self.assertEqual(0.0, weighed_host.weight) self.assertEqual( 'host1', utils.extract_host(weighed_host.obj.host)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/scheduler/weighers/test_pool.py0000664000175000017500000001516600000000000023647 0ustar00zuulzuul00000000000000# Copyright 2015 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests For Pool Weigher. """ from unittest import mock from oslo_config import cfg from oslo_utils import timeutils from manila import context from manila.db import api as db_api from manila.scheduler.weighers import base_host from manila.scheduler.weighers import pool from manila.share import utils from manila import test from manila.tests.scheduler import fakes CONF = cfg.CONF class PoolWeigherTestCase(test.TestCase): def setUp(self): super(PoolWeigherTestCase, self).setUp() self.host_manager = fakes.FakeHostManager() self.weight_handler = base_host.HostWeightHandler( 'manila.scheduler.weighers') share_servers = [ {'id': 'fake_server_id0'}, {'id': 'fake_server_id1'}, {'id': 'fake_server_id2'}, {'id': 'fake_server_id3'}, {'id': 'fake_server_id4'}, ] services = [ dict(id=1, host='host1@AAA', topic='share', disabled=False, availability_zone='zone1', updated_at=timeutils.utcnow()), dict(id=2, host='host2@BBB', topic='share', disabled=False, availability_zone='zone1', updated_at=timeutils.utcnow()), dict(id=3, host='host3@CCC', topic='share', disabled=False, availability_zone='zone2', updated_at=timeutils.utcnow()), dict(id=4, host='host@DDD', topic='share', disabled=False, availability_zone='zone3', updated_at=timeutils.utcnow()), dict(id=5, host='host5@EEE', topic='share', disabled=False, availability_zone='zone3', updated_at=timeutils.utcnow()), ] self.host_manager.service_states = ( fakes.SHARE_SERVICE_STATES_WITH_POOLS) self.mock_object(db_api, 'share_server_get_all_by_host', mock.Mock(return_value=share_servers)) self.mock_object(db_api.IMPL, 'service_get_all_by_topic', mock.Mock(return_value=services)) def _get_weighed_host(self, hosts, weight_properties=None): if weight_properties is None: weight_properties = { 'server_pools_mapping': { 'fake_server_id2': [{'pool_name': 'pool2'}, ], }, } return self.weight_handler.get_weighed_objects( [pool.PoolWeigher], hosts, weight_properties)[0] def _get_all_hosts(self): ctxt = context.get_admin_context() host_states = self.host_manager.get_all_host_states_share(ctxt) db_api.IMPL.service_get_all_by_topic.assert_called_once_with( ctxt, CONF.share_topic, consider_disabled=False) return host_states def test_no_server_pool_mapping(self): weight_properties = { 'server_pools_mapping': {}, } weighed_host = self._get_weighed_host(self._get_all_hosts(), weight_properties) self.assertEqual(0.0, weighed_host.weight) def test_choose_pool_with_existing_share_server(self): # host1: weight = 0*(1.0) # host2: weight = 1*(1.0) # host3: weight = 0*(1.0) # host4: weight = 0*(1.0) # host5: weight = 0*(1.0) # so, host2 should win: weighed_host = self._get_weighed_host(self._get_all_hosts()) self.assertEqual(1.0, weighed_host.weight) self.assertEqual( 'host2@BBB', utils.extract_host(weighed_host.obj.host)) def test_pool_weight_multiplier_positive(self): self.flags(pool_weight_multiplier=2.0) # host1: weight = 0*(2.0) # host2: weight = 1*(2.0) # host3: weight = 0*(2.0) # host4: weight = 0*(2.0) # host5: weight = 0*(2.0) # so, host2 should win: weighed_host = self._get_weighed_host(self._get_all_hosts()) self.assertEqual(2.0, weighed_host.weight) self.assertEqual( 'host2@BBB', utils.extract_host(weighed_host.obj.host)) def test_pool_weight_multiplier_negative(self): self.flags(pool_weight_multiplier=-1.0) weight_properties = { 'server_pools_mapping': { 'fake_server_id0': [{'pool_name': 'pool1'}], 'fake_server_id2': [{'pool_name': 'pool3'}], 'fake_server_id3': [ {'pool_name': 'pool4a'}, {'pool_name': 'pool4b'}, ], 'fake_server_id4': [ {'pool_name': 'pool5a'}, {'pool_name': 'pool5b'}, ], }, } # host1: weight = 1*(-1.0) # host2: weight = 0*(-1.0) # host3: weight = 1*(-1.0) # host4: weight = 1*(-1.0) # host5: weight = 1*(-1.0) # so, host2 should win: weighed_host = self._get_weighed_host(self._get_all_hosts(), weight_properties) self.assertEqual(0.0, weighed_host.weight) self.assertEqual( 'host2@BBB', utils.extract_host(weighed_host.obj.host)) def test_pool_weigher_all_pools_with_share_servers(self): weight_properties = { 'server_pools_mapping': { 'fake_server_id0': [{'pool_name': 'pool1'}], 'fake_server_id1': [{'pool_name': 'pool2'}], 'fake_server_id2': [{'pool_name': 'pool3'}], 'fake_server_id3': [ {'pool_name': 'pool4a'}, {'pool_name': 'pool4b'}, ], 'fake_server_id4': [ {'pool_name': 'pool5a'}, {'pool_name': 'pool5b'}, ], }, } # host1: weight = 1*(1.0) # host2: weight = 1*(1.0) # host3: weight = 1*(1.0) # host4: weight = 1*(1.0) # host5: weight = 1*(1.0) # But after normalization all weighers will be 0 weighed_host = self._get_weighed_host(self._get_all_hosts(), weight_properties) self.assertEqual(0.0, weighed_host.weight) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315601.9896708 manila-21.0.0/manila/tests/services/0000775000175000017500000000000000000000000017304 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/services/__init__.py0000664000175000017500000000000000000000000021403 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/services/test_api.py0000664000175000017500000000356600000000000021500 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from webob import exc from manila import context from manila.services import api as service_api from manila import test class ServicesApiTest(test.TestCase): def setUp(self): super(ServicesApiTest, self).setUp() self.context = context.get_admin_context() self.share_rpcapi = mock.Mock() self.share_rpcapi.ensure_shares = mock.Mock() self.services_api = service_api.API() self.mock_object( self.services_api, 'share_rpcapi', self.share_rpcapi ) def test_ensure_shares(self): host = 'fake_host@fakebackend' fake_service = { 'id': 'fake_service_id', 'state': 'up' } self.services_api.ensure_shares(self.context, fake_service, host) self.share_rpcapi.ensure_driver_resources.assert_called_once_with( self.context, host ) def test_ensure_shares_host_down(self): host = 'fake_host@fakebackend' fake_service = { 'id': 'fake_service_id', 'state': 'down' } self.assertRaises( exc.HTTPConflict, self.services_api.ensure_shares, self.context, fake_service, host ) self.share_rpcapi.ensure_shares.assert_not_called() ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315601.9936707 manila-21.0.0/manila/tests/share/0000775000175000017500000000000000000000000016563 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/__init__.py0000664000175000017500000000000000000000000020662 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315601.9976707 manila-21.0.0/manila/tests/share/drivers/0000775000175000017500000000000000000000000020241 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/__init__.py0000664000175000017500000000000000000000000022340 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315601.9976707 manila-21.0.0/manila/tests/share/drivers/cephfs/0000775000175000017500000000000000000000000021511 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/cephfs/__init__.py0000664000175000017500000000000000000000000023610 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/cephfs/test_driver.py0000664000175000017500000023362600000000000024431 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import math from unittest import mock import ddt from oslo_utils import units from manila.common import constants from manila import context import manila.exception as exception from manila.share import configuration from manila.share.drivers.cephfs import driver from manila.share import share_types from manila import test from manila.tests import fake_share DEFAULT_VOLUME_MODE = '755' ALT_VOLUME_MODE = '644' class MockRadosModule(object): """Mocked up version of the rados module.""" class Rados(mock.Mock): def __init__(self, *args, **kwargs): mock.Mock.__init__(self, spec=[ "connect", "shutdown", "state" ]) self.get_mon_addrs = mock.Mock(return_value=["1.2.3.4", "5.6.7.8"]) self.get_cluster_stats = mock.Mock(return_value={ "kb": 172953600, "kb_avail": 157123584, "kb_used": 15830016, "num_objects": 26, }) class Error(mock.Mock): pass class MockAllocationCapacityCache(mock.Mock): """Mocked up version of the rados module.""" def __init__(self, *args, **kwargs): mock.Mock.__init__(self, spec=[ "update_data" ]) self.is_expired = mock.Mock(return_value=False) self.get_data = mock.Mock(return_value=20.0) class MockCephArgparseModule(object): """Mocked up version of the ceph_argparse module.""" class json_command(mock.Mock): def __init__(self, *args, **kwargs): mock.Mock.__init__(self, spec=[ "connect", "shutdown", "state" ]) @ddt.ddt class AllocationCapacityCacheTestCase(test.TestCase): """Test the Allocation capacity cache class. This is a cache with a getter and a setter for the allocated capacity cached value in the driver, also with a timeout control. """ def setUp(self): super(AllocationCapacityCacheTestCase, self).setUp() timeout = 10 self._allocation_capacity_cache = driver.AllocationCapacityCache( timeout ) def test_set_get_data(self): # Nothing set yet, info should be "expired" self.assertTrue( self._allocation_capacity_cache.is_expired() ) # Class value starts with None expected_allocated_capacity_gb = None cached_allocated_capacity_gb = ( self._allocation_capacity_cache.get_data() ) self.assertEqual( cached_allocated_capacity_gb, expected_allocated_capacity_gb ) # Set a new value and ensure it works properly expected_allocated_capacity_gb = 100.0 self._allocation_capacity_cache.update_data( expected_allocated_capacity_gb ) cached_allocated_capacity_gb = ( self._allocation_capacity_cache.get_data() ) self.assertEqual( cached_allocated_capacity_gb, expected_allocated_capacity_gb ) @ddt.ddt class CephFSDriverTestCase(test.TestCase): """Test the CephFS driver. This is a very simple driver that mainly calls through to the CephFSVolumeClient interface, so the tests validate that the Manila driver calls map to the appropriate CephFSVolumeClient calls. """ def setUp(self): super(CephFSDriverTestCase, self).setUp() self._execute = mock.Mock() self.fake_conf = configuration.Configuration(None) self._context = context.get_admin_context() self._share = fake_share.fake_share(share_proto='CEPHFS') self._snapshot = fake_share.fake_snapshot_instance() self.fake_conf.set_default('driver_handles_share_servers', False) self.fake_conf.set_default('cephfs_auth_id', 'manila') self.mock_object(driver, "rados_command") self.mock_object(driver, "rados", MockRadosModule) self.mock_object(driver, "json_command", MockCephArgparseModule) self.mock_object(driver, 'NativeProtocolHelper') self.mock_object(driver, 'NFSProtocolHelper') self.mock_object(driver, 'NFSClusterProtocolHelper') self.mock_object(driver, "AllocationCapacityCache", MockAllocationCapacityCache) driver.ceph_default_target = ('mon-mgr', ) self.fake_private_storage = mock.Mock() self.mock_object(self.fake_private_storage, 'get', mock.Mock(return_value=None)) self._driver = ( driver.CephFSDriver(execute=self._execute, configuration=self.fake_conf, private_storage=self.fake_private_storage)) self._driver.protocol_helper = mock.Mock() self._driver._cached_allocated_capacity_gb = ( MockAllocationCapacityCache() ) type(self._driver).volname = mock.PropertyMock(return_value='cephfs') self.mock_object(share_types, 'get_share_type_extra_specs', mock.Mock(return_value={})) @ddt.data( ('cephfs', None), ('nfs', None), ('nfs', 'fs-manila') ) @ddt.unpack def test_do_setup(self, protocol_helper, cephfs_nfs_cluster_id): self._driver.configuration.cephfs_protocol_helper_type = ( protocol_helper) self.fake_conf.set_default('cephfs_nfs_cluster_id', cephfs_nfs_cluster_id) self.mock_object( self._driver, '_get_cephfs_filesystem_allocation', mock.Mock(return_value=10) ) self._driver.do_setup(self._context) if protocol_helper == 'cephfs': driver.NativeProtocolHelper.assert_called_once_with( self._execute, self._driver.configuration, rados_client=self._driver._rados_client, volname=self._driver.volname) else: if self.fake_conf.cephfs_nfs_cluster_id is None: driver.NFSProtocolHelper.assert_called_once_with( self._execute, self._driver.configuration, rados_client=self._driver._rados_client, volname=self._driver.volname) else: driver.NFSClusterProtocolHelper.assert_called_once_with( self._execute, self._driver.configuration, rados_client=self._driver._rados_client, volname=self._driver.volname) self._driver.protocol_helper.init_helper.assert_called_once_with() self.assertEqual(DEFAULT_VOLUME_MODE, self._driver._cephfs_volume_mode) def test__get_sub_name(self): sub_name = self._driver._get_subvolume_name(self._share["id"]) self.assertEqual(sub_name, self._share["id"]) def test__get_sub_name_has_other_name(self): expected_sub_name = 'user_specified_subvolume_name' self.mock_object( self._driver.private_storage, 'get', mock.Mock(return_value=expected_sub_name) ) sub_name = self._driver._get_subvolume_name(self._share["id"]) self.assertEqual(expected_sub_name, sub_name) def test__get_sub_snapshot_name(self): sub_name = self._driver._get_subvolume_snapshot_name( self._snapshot["id"] ) self.assertEqual(sub_name, self._snapshot["id"]) def test__get_sub_snapshot_name_has_other_name(self): expected_sub_snap_name = 'user_specified_subvolume_snapshot_name' self.mock_object( self._driver.private_storage, 'get', mock.Mock(return_value=expected_sub_snap_name) ) sub_name = self._driver._get_subvolume_snapshot_name( self._snapshot["id"] ) self.assertEqual(expected_sub_snap_name, sub_name) @ddt.data( ('{"version": "ceph version 16.2.4"}', 'pacific'), ('{"version": "ceph version 15.1.2"}', 'octopus'), ('{"version": "ceph version 14.3.1"}', 'nautilus'), ) @ddt.unpack def test_version_check(self, ceph_mon_version, codename): driver.ceph_default_target = None driver.rados_command.return_value = ceph_mon_version self.mock_object( self._driver, '_get_cephfs_filesystem_allocation', mock.Mock(return_value=10) ) self._driver.do_setup(self._context) if codename == 'nautilus': self.assertEqual(('mgr', ), driver.ceph_default_target) else: self.assertEqual(('mon-mgr', ), driver.ceph_default_target) driver.rados_command.assert_called_once_with( self._driver.rados_client, "version", target=('mon', )) self.assertEqual(1, driver.rados_command.call_count) def test_version_check_not_supported(self): driver.ceph_default_target = None driver.rados_command.return_value = ( '{"version": "ceph version 13.0.1"}') self.assertRaises(exception.ShareBackendException, self._driver.do_setup, self._context) @ddt.data('cephfs', 'nfs') def test_check_for_setup_error(self, protocol_helper): self._driver.configuration.cephfs_protocol_helper_type = ( protocol_helper) self._driver.check_for_setup_error() (self._driver.protocol_helper.check_for_setup_error. assert_called_once_with()) def test_create_share(self): create_share_prefix = "fs subvolume create" get_path_prefix = "fs subvolume getpath" create_share_dict = { "vol_name": self._driver.volname, "sub_name": self._share["id"], "size": self._share["size"] * units.Gi, "namespace_isolated": True, "mode": DEFAULT_VOLUME_MODE, } get_path_dict = { "vol_name": self._driver.volname, "sub_name": self._share["id"], } self._driver.create_share(self._context, self._share) driver.rados_command.assert_has_calls([ mock.call(self._driver.rados_client, create_share_prefix, create_share_dict), mock.call(self._driver.rados_client, get_path_prefix, get_path_dict)]) self.assertEqual(2, driver.rados_command.call_count) def test_create_share_error(self): share = fake_share.fake_share(share_proto='NFS') self.assertRaises(exception.ShareBackendException, self._driver.create_share, self._context, share) def _setup_manage_subvolume_test(self): fake_els = [ {'path': 'fake/path'} ] share_with_el = fake_share.fake_share(export_locations=fake_els) expected_subvolume_info_argdict = { "vol_name": self._driver.volname, "sub_name": fake_els[0]["path"], } subvolume_info_mock_result = { 'atime': '2024-07-23 16:50:03', 'bytes_pcent': '0.00', 'bytes_quota': 2147483648, 'bytes_used': 0, 'created_at': '2024-07-23 16:50:03', 'ctime': '2024-07-23 17:24:49', 'data_pool': 'cephfs.cephfs.data', 'features': ['snapshot-clone', 'snapshot-autoprotect'], 'gid': 0, 'mode': 755, 'mon_addrs': ['10.0.0.1:6342'], 'mtime': '2024-07-23 16:50:03', 'path': '/volumes/_nogroup/subbvol/475a-4972-9f6b-fe025a8d383f', 'pool_namespace': 'fsvolumes_cephfs', 'state': 'complete', 'type': 'subvolume', 'uid': 0 } return ( share_with_el, expected_subvolume_info_argdict, subvolume_info_mock_result ) def test_manage_existing_no_subvolume_name(self): self.assertRaises( exception.ShareBackendException, self._driver.manage_existing, { 'id': 'fake_project_uuid_1', 'export_locations': [{'path': None}] }, {} ) def test_manage_existing_subvolume_not_found(self): driver.rados_command.side_effect = exception.ShareBackendException( msg="does not exist" ) fake_els = [ {'path': 'fake/path'} ] share_with_el = fake_share.fake_share(export_locations=fake_els) expected_info_argdict = { "vol_name": self._driver.volname, "sub_name": fake_els[0]["path"], } self.assertRaises( exception.ShareBackendException, self._driver.manage_existing, share_with_el, {} ) driver.rados_command.assert_called_once_with( self._driver.rados_client, "fs subvolume info", expected_info_argdict, json_obj=True ) def test_manage_existing_subvolume_infinite_no_provided_size(self): share_with_el, expected_info_argdict, subvolume_info = ( self._setup_manage_subvolume_test() ) subvolume_info['bytes_quota'] = "infinite" driver.rados_command.return_value = subvolume_info self.assertRaises( exception.ShareBackendException, self._driver.manage_existing, share_with_el, {} ) driver.rados_command.assert_called_once_with( self._driver.rados_client, "fs subvolume info", expected_info_argdict, json_obj=True ) @ddt.data( exception.ShareShrinkingPossibleDataLoss, exception.ShareBackendException ) def test_manage_existing_subvolume_infinite_size(self, expected_exception): share_with_el, expected_info_argdict, subvolume_info = ( self._setup_manage_subvolume_test() ) subvolume_info['bytes_quota'] = "infinite" driver.rados_command.return_value = subvolume_info new_size = 1 mock_resize = self.mock_object( self._driver, '_resize_share', mock.Mock(side_effect=expected_exception('fake')) ) self.assertRaises( expected_exception, self._driver.manage_existing, share_with_el, {'size': new_size} ) driver.rados_command.assert_called_once_with( self._driver.rados_client, "fs subvolume info", expected_info_argdict, json_obj=True ) mock_resize.assert_called_once_with( share_with_el, new_size, no_shrink=True ) @ddt.data(True, False) def test_manage_existing(self, current_size_is_smaller): share_with_el, expected_info_argdict, subvolume_info = ( self._setup_manage_subvolume_test() ) if current_size_is_smaller: # set this to half gb, to ensure it will turn into 1gb subvolume_info['bytes_quota'] = 536870912 subvolume_name = share_with_el["export_locations"][0]["path"] expected_share_metadata = {"subvolume_name": subvolume_name} expected_share_updates = { "size": int( math.ceil(int(subvolume_info['bytes_quota']) / units.Gi)), "export_locations": subvolume_name } driver.rados_command.return_value = subvolume_info self.mock_object( self._driver, '_get_export_locations', mock.Mock(return_value=subvolume_name)) mock_resize_share = self.mock_object(self._driver, '_resize_share') share_updates = self._driver.manage_existing(share_with_el, {}) self.assertEqual(expected_share_updates, share_updates) driver.rados_command.assert_called_once_with( self._driver.rados_client, "fs subvolume info", expected_info_argdict, json_obj=True ) self._driver.private_storage.update.assert_called_once_with( share_with_el['id'], expected_share_metadata ) self._driver._get_export_locations.assert_called_once_with( share_with_el, subvolume_name=subvolume_name ) if current_size_is_smaller: mock_resize_share.assert_called_once_with( share_with_el, 1, no_shrink=True ) else: mock_resize_share.assert_not_called() def test_manage_existing_snapshot_no_snapshot_name(self): self.assertRaises( exception.ShareBackendException, self._driver.manage_existing_snapshot, { 'id': 'fake_project_uuid_1', 'provider_location': None, }, {} ) def test_manage_existing_snapshot_subvolume_not_found(self): driver.rados_command.side_effect = exception.ShareBackendException( msg="does not exist" ) snapshot_instance = { 'id': 'fake_project_uuid_1', 'provider_location': 'fake/provider/location', 'share_instance_id': 'fake_share_instance_id' } expected_info_argdict = { "vol_name": self._driver.volname, "sub_name": snapshot_instance["share_instance_id"] } self.assertRaises( exception.ShareBackendException, self._driver.manage_existing_snapshot, snapshot_instance, {} ) driver.rados_command.assert_called_once_with( self._driver.rados_client, "fs subvolume info", expected_info_argdict, json_obj=True ) def test_manage_existing_snapshot_snapshot_not_found(self): _, expected_info_argdict, subvolume_info = ( self._setup_manage_subvolume_test() ) expected_snapshot_name = 'fake/provider/location' snapshot_instance = { 'id': 'fake_project_uuid_1', 'provider_location': expected_snapshot_name, 'share_instance_id': 'fake_share_instance_id' } expected_info_argdict = { "vol_name": self._driver.volname, "sub_name": snapshot_instance["share_instance_id"] } expected_snap_info_argdict = { "vol_name": self._driver.volname, "sub_name": snapshot_instance["share_instance_id"], "snap_name": expected_snapshot_name } driver.rados_command.side_effect = [ subvolume_info, exception.ShareBackendException(msg="does not exist") ] self.assertRaises( exception.ShareBackendException, self._driver.manage_existing_snapshot, snapshot_instance, {} ) driver.rados_command.assert_has_calls([ mock.call( self._driver.rados_client, "fs subvolume info", expected_info_argdict, json_obj=True ), mock.call( self._driver.rados_client, "fs subvolume snapshot info", expected_snap_info_argdict, json_obj=True ) ]) def test_manage_existing_snapshot(self): _, expected_info_argdict, subvolume_info = ( self._setup_manage_subvolume_test() ) expected_snapshot_name = 'fake_snapshot_name' snapshot_instance = { 'id': 'fake_project_uuid_1', 'provider_location': expected_snapshot_name, 'share_instance_id': 'fake_share_instance_id', 'snapshot_id': 'fake_snapshot_id' } expected_info_argdict = { "vol_name": self._driver.volname, "sub_name": snapshot_instance["share_instance_id"] } expected_snap_info_argdict = { "vol_name": self._driver.volname, "sub_name": snapshot_instance["share_instance_id"], "snap_name": expected_snapshot_name } driver.rados_command.side_effect = [ subvolume_info, {'name': expected_snapshot_name} ] expected_result = { 'provider_location': expected_snapshot_name } result = self._driver.manage_existing_snapshot( snapshot_instance, {} ) self.assertEqual(expected_result, result) driver.rados_command.assert_has_calls([ mock.call( self._driver.rados_client, "fs subvolume info", expected_info_argdict, json_obj=True ), mock.call( self._driver.rados_client, "fs subvolume snapshot info", expected_snap_info_argdict, json_obj=True ) ]) self.fake_private_storage.update.assert_called_once_with( snapshot_instance['snapshot_id'], {"subvolume_snapshot_name": expected_snapshot_name} ) def test_update_access(self): alice = { 'id': 'instance_mapping_id1', 'access_id': 'accessid1', 'access_level': 'rw', 'access_type': 'cephx', 'access_to': 'alice' } add_rules = access_rules = [alice, ] delete_rules = [] update_rules = [] self._driver.update_access( self._context, self._share, access_rules, add_rules, delete_rules, update_rules, None) self._driver.protocol_helper.update_access.assert_called_once_with( self._context, self._share, access_rules, add_rules, delete_rules, update_rules, share_server=None, sub_name=self._share['id']) def test_ensure_shares(self): self._driver.protocol_helper.reapply_rules_while_ensuring_shares = True shares = [ fake_share.fake_share(share_id='123', share_proto='NFS'), fake_share.fake_share(share_id='456', share_proto='NFS'), fake_share.fake_share(share_id='789', share_proto='NFS') ] export_locations = [ { 'path': '1.2.3.4,5.6.7.8:/foo/bar', 'is_admin_only': False, 'metadata': {}, }, { 'path': '1.2.3.4,5.6.7.8:/foo/quz', 'is_admin_only': False, 'metadata': {}, }, ] share_backend_info = {'metadata': {'__mount_options': 'fs=cephfs'}} metadata = share_backend_info.get('metadata') expected_updates = { shares[0]['id']: { 'status': constants.STATUS_ERROR, 'reapply_access_rules': True, 'metadata': metadata, }, shares[1]['id']: { 'export_locations': export_locations[0], 'reapply_access_rules': True, 'metadata': metadata, }, shares[2]['id']: { 'export_locations': export_locations[1], 'reapply_access_rules': True, 'metadata': metadata, } } err_message = (f"Error ENOENT: subvolume {self._share['id']} does " f"not exist") expected_exception = exception.ShareBackendException(err_message) self.mock_object( self._driver, '_get_export_locations', mock.Mock(side_effect=[expected_exception] + export_locations)) self.mock_object( self._driver, 'get_optional_share_creation_data', mock.Mock(return_value=share_backend_info)) actual_updates = self._driver.ensure_shares(self._context, shares) self.assertEqual(3, self._driver._get_export_locations.call_count) self._driver._get_export_locations.assert_has_calls([ mock.call(shares[0]), mock.call(shares[1]), mock.call(shares[2])]) self.assertTrue(self._driver.get_optional_share_creation_data.called) self.assertEqual(expected_updates, actual_updates) def test_delete_share(self): clone_status_prefix = "fs clone status" clone_status_dict = { "vol_name": self._driver.volname, "clone_name": self._share["id"], } delete_share_prefix = "fs subvolume rm" delete_share_dict = { "vol_name": self._driver.volname, "sub_name": self._share["id"], "force": True, } driver.rados_command.side_effect = [driver.rados.Error, mock.Mock()] self._driver.delete_share(self._context, self._share) driver.rados_command.assert_has_calls([ mock.call(self._driver.rados_client, clone_status_prefix, clone_status_dict), mock.call(self._driver.rados_client, delete_share_prefix, delete_share_dict)]) self.assertEqual(2, driver.rados_command.call_count) def test_extend_share(self): extend_share_prefix = "fs subvolume resize" new_size_gb = self._share['size'] * 2 new_size = new_size_gb * units.Gi extend_share_dict = { "vol_name": self._driver.volname, "sub_name": self._share["id"], "new_size": new_size, } self._driver.extend_share(self._share, new_size_gb, None) driver.rados_command.assert_called_once_with( self._driver.rados_client, extend_share_prefix, extend_share_dict) def test_shrink_share(self): shrink_share_prefix = "fs subvolume resize" new_size_gb = self._share['size'] * 0.5 new_size = new_size_gb * units.Gi shrink_share_dict = { "vol_name": self._driver.volname, "sub_name": self._share["id"], "new_size": new_size, "no_shrink": True, } self._driver.shrink_share(self._share, new_size_gb, None) driver.rados_command.assert_called_once_with( self._driver.rados_client, shrink_share_prefix, shrink_share_dict) def test_shrink_share_full(self): """That shrink fails when share is too full.""" shrink_share_prefix = "fs subvolume resize" new_size_gb = self._share['size'] * 0.5 new_size = new_size_gb * units.Gi msg = ("Can't resize the subvolume. " "The new size '{0}' would be lesser " "than the current used size '{1}'".format( new_size, self._share['size'])) driver.rados_command.side_effect = exception.ShareBackendException(msg) shrink_share_dict = { "vol_name": self._driver.volname, "sub_name": self._share["id"], "new_size": new_size, "no_shrink": True, } # Pretend to be full up self.assertRaises(exception.ShareShrinkingPossibleDataLoss, self._driver.shrink_share, self._share, new_size_gb, None) driver.rados_command.assert_called_once_with( self._driver.rados_client, shrink_share_prefix, shrink_share_dict) def test_create_snapshot(self): snapshot_create_prefix = "fs subvolume snapshot create" snapshot_create_dict = { "vol_name": self._driver.volname, "sub_name": self._snapshot["share_id"], "snap_name": self._snapshot["snapshot_id"] } self._driver.create_snapshot(self._context, self._snapshot, None) driver.rados_command.assert_called_once_with( self._driver.rados_client, snapshot_create_prefix, snapshot_create_dict) def test_delete_snapshot(self): legacy_snap_name = "_".join( [self._snapshot["snapshot_id"], self._snapshot["id"]]) snapshot_remove_prefix = "fs subvolume snapshot rm" snapshot_remove_dict = { "vol_name": self._driver.volname, "sub_name": self._snapshot["share_id"], "snap_name": legacy_snap_name, "force": True } snapshot_remove_dict_2 = snapshot_remove_dict.copy() snapshot_remove_dict_2.update( {"snap_name": self._snapshot["snapshot_id"]}) self.mock_object( self._driver, '_get_subvolume_snapshot_name', mock.Mock(return_value=self._snapshot["snapshot_id"])) self._driver.delete_snapshot(self._context, self._snapshot, None) driver.rados_command.assert_has_calls([ mock.call(self._driver.rados_client, snapshot_remove_prefix, snapshot_remove_dict), mock.call(self._driver.rados_client, snapshot_remove_prefix, snapshot_remove_dict_2)]) self.assertEqual(2, driver.rados_command.call_count) def test_create_share_group(self): group_create_prefix = "fs subvolumegroup create" group_create_dict = { "vol_name": self._driver.volname, "group_name": "grp1", "mode": DEFAULT_VOLUME_MODE, } self._driver.create_share_group(self._context, {"id": "grp1"}, None) driver.rados_command.assert_called_once_with( self._driver.rados_client, group_create_prefix, group_create_dict) def test_delete_share_group(self): group_delete_prefix = "fs subvolumegroup rm" group_delete_dict = { "vol_name": self._driver.volname, "group_name": "grp1", "force": True, } self._driver.delete_share_group(self._context, {"id": "grp1"}, None) driver.rados_command.assert_called_once_with( self._driver.rados_client, group_delete_prefix, group_delete_dict) def test_create_share_group_snapshot(self): msg = ("Share group snapshot feature is no longer supported in " "mainline CephFS (existing group snapshots can still be " "listed and deleted).") driver.rados_command.side_effect = exception.ShareBackendException(msg) self.assertRaises(exception.ShareBackendException, self._driver.create_share_group_snapshot, self._context, {'share_group_id': 'sgid', 'id': 'snapid'}) def test_delete_share_group_snapshot(self): group_snapshot_delete_prefix = "fs subvolumegroup snapshot rm" group_snapshot_delete_dict = { "vol_name": self._driver.volname, "group_name": "sgid", "snap_name": "snapid", "force": True, } self._driver.delete_share_group_snapshot(self._context, { 'share_group_id': 'sgid', 'id': 'snapid', "force": True, }) driver.rados_command.assert_called_once_with( self._driver.rados_client, group_snapshot_delete_prefix, group_snapshot_delete_dict) def test_create_share_from_snapshot(self): parent_share = { 'id': 'fakeparentshareid', 'name': 'fakeparentshare', } create_share_from_snapshot_prefix = "fs subvolume snapshot clone" create_share_from_snapshot_dict = { "vol_name": self._driver.volname, "sub_name": parent_share["id"], "snap_name": self._snapshot["snapshot_id"], "target_sub_name": self._share["id"] } get_clone_status_prefix = "fs clone status" get_clone_status_dict = { "vol_name": self._driver.volname, "clone_name": self._share["id"], } driver.rados_command.return_value = { 'status': { 'state': 'in-progress', }, } self._driver.create_share_from_snapshot( self._context, self._share, self._snapshot, None, parent_share=parent_share ) driver.rados_command.assert_has_calls([ mock.call(self._driver.rados_client, create_share_from_snapshot_prefix, create_share_from_snapshot_dict), mock.call(self._driver.rados_client, get_clone_status_prefix, get_clone_status_dict, True)]) self.assertEqual(2, driver.rados_command.call_count) def test_delete_share_from_snapshot(self): clone_status_prefix = "fs clone status" clone_status_dict = { "vol_name": self._driver.volname, "clone_name": self._share["id"], } clone_cancel_prefix = "fs clone cancel" clone_cancel_dict = { "vol_name": self._driver.volname, "clone_name": self._share["id"], "force": True, } delete_share_prefix = "fs subvolume rm" delete_share_dict = { "vol_name": self._driver.volname, "sub_name": self._share["id"], "force": True, } driver.rados_command.side_effect = [ 'in-progress', mock.Mock(), mock.Mock()] self._driver.delete_share(self._context, self._share) driver.rados_command.assert_has_calls([ mock.call(self._driver.rados_client, clone_status_prefix, clone_status_dict), mock.call(self._driver.rados_client, clone_cancel_prefix, clone_cancel_dict), mock.call(self._driver.rados_client, delete_share_prefix, delete_share_dict)]) self.assertEqual(3, driver.rados_command.call_count) def test_delete_driver(self): # Create share to prompt volume_client construction self._driver.create_share(self._context, self._share) rc = self._driver._rados_client del self._driver rc.shutdown.assert_called_once_with() def test_delete_driver_no_client(self): self.assertIsNone(self._driver._rados_client) del self._driver @ddt.data( [21474836480, 293878, 97848372], [21474836480, "infinite", 97848372], ["infinite", "infinite", "infinite"], ) def test__get_cephfs_filesystem_allocation(self, share_sizes): subvolume_ls_args = {"vol_name": self._driver.volname} rados_returns = [] rados_subvolume_list_result = [] subvolume_info_mock_calls = [] subvolume_names = [] expected_allocated_size_gb = 0 for idx, size in enumerate(share_sizes): subvolume_name = f"subvolume{idx}" subvolume_names.append(subvolume_name) rados_returns.append({"bytes_quota": share_sizes[idx]}) rados_subvolume_list_result.append({"name": subvolume_name}) if size != "infinite": expected_allocated_size_gb += size if expected_allocated_size_gb > 0: expected_allocated_size_gb = ( round(int(expected_allocated_size_gb) / units.Gi, 2) ) # first call we make to rados is the subvolume ls rados_returns.insert(0, rados_subvolume_list_result) driver.rados_command.side_effect = rados_returns allocated_size_gb = self._driver._get_cephfs_filesystem_allocation() self.assertEqual(allocated_size_gb, expected_allocated_size_gb) for name in subvolume_names: subvolume_info_arg_dict = { "vol_name": self._driver.volname, "sub_name": name } subvolume_info_mock_calls.append( mock.call( self._driver._rados_client, "fs subvolume info", subvolume_info_arg_dict, json_obj=True ) ) driver.rados_command.assert_has_calls([ mock.call( self._driver._rados_client, "fs subvolume ls", subvolume_ls_args, json_obj=True), *subvolume_info_mock_calls ]) @ddt.data(True, False) def test_update_share_stats(self, cache_expired): allocated_capacity_gb = 20.0 self._driver.get_configured_ip_versions = mock.Mock(return_value=[4]) self._driver.configuration.local_conf.set_override( 'reserved_share_percentage', 5) self._driver.configuration.local_conf.set_override( 'reserved_share_from_snapshot_percentage', 2) self._driver.configuration.local_conf.set_override( 'reserved_share_extend_percentage', 2) self._driver._cached_allocated_capacity_gb.is_expired = mock.Mock( return_value=cache_expired ) self.mock_object( self._driver, '_get_cephfs_filesystem_allocation', mock.Mock(return_value=20.0) ) self.mock_object( self._driver, '_get_cephfs_filesystem_allocation', mock.Mock(return_value=allocated_capacity_gb) ) self._driver._update_share_stats() result = self._driver._stats self.assertEqual(5, result['pools'][0]['reserved_percentage']) self.assertEqual(2, result['pools'][0]['reserved_snapshot_percentage']) self.assertEqual( 2, result['pools'][0]['reserved_share_extend_percentage']) self.assertEqual(164.94, result['pools'][0]['total_capacity_gb']) self.assertEqual(149.84, result['pools'][0]['free_capacity_gb']) self.assertEqual(20.0, result['pools'][0]['allocated_capacity_gb']) self.assertTrue(result['ipv4_support']) self.assertFalse(result['ipv6_support']) self.assertEqual("CEPHFS", result['storage_protocol']) if cache_expired: self._driver._get_cephfs_filesystem_allocation.assert_called_once() (self._driver._cached_allocated_capacity_gb .update_data.assert_called_once_with(allocated_capacity_gb)) else: (self._driver._cached_allocated_capacity_gb .get_data.assert_called_once()) @ddt.data('cephfs', 'nfs') def test_get_configured_ip_versions(self, protocol_helper): self._driver.configuration.cephfs_protocol_helper_type = ( protocol_helper) self._driver.get_configured_ip_versions() (self._driver.protocol_helper.get_configured_ip_versions. assert_called_once_with()) @ddt.data( ([{'id': 'instance_mapping_id1', 'access_id': 'accessid1', 'access_level': 'rw', 'access_type': 'cephx', 'access_to': 'alice' }], 'fake_project_uuid_1'), ([{'id': 'instance_mapping_id1', 'access_id': 'accessid1', 'access_level': 'rw', 'access_type': 'cephx', 'access_to': 'alice' }], 'fake_project_uuid_2'), ([], 'fake_project_uuid_1'), ([], 'fake_project_uuid_2'), ) @ddt.unpack def test_transfer_accept(self, access_rules, new_project): fake_share_1 = {"project_id": "fake_project_uuid_1"} same_project = new_project == 'fake_project_uuid_1' if access_rules and not same_project: self.assertRaises(exception.DriverCannotTransferShareWithRules, self._driver.transfer_accept, self._context, fake_share_1, 'new_user', new_project, access_rules) @ddt.ddt class NativeProtocolHelperTestCase(test.TestCase): def setUp(self): super(NativeProtocolHelperTestCase, self).setUp() self.fake_conf = configuration.Configuration(None) self._context = context.get_admin_context() self._share = fake_share.fake_share_instance(share_proto='CEPHFS') self.fake_conf.set_default('driver_handles_share_servers', False) self.mock_object(driver, "rados_command") driver.ceph_default_target = ('mon-mgr', ) self._native_protocol_helper = driver.NativeProtocolHelper( None, self.fake_conf, rados_client=MockRadosModule.Rados(), volname="cephfs" ) self._rados_client = self._native_protocol_helper.rados_client self._native_protocol_helper.get_mon_addrs = mock.Mock( return_value=['1.2.3.4', '5.6.7.8']) def test_check_for_setup_error(self): expected = None result = self._native_protocol_helper.check_for_setup_error() self.assertEqual(expected, result) def test_get_export_locations(self): fake_cephfs_subvolume_path = '/foo/bar' expected_export_locations = { 'path': '1.2.3.4,5.6.7.8:/foo/bar', 'is_admin_only': False, 'metadata': {}, } export_locations = self._native_protocol_helper.get_export_locations( self._share, fake_cephfs_subvolume_path) self.assertEqual(expected_export_locations, export_locations) self._native_protocol_helper.get_mon_addrs.assert_called_once_with() @ddt.data(constants.ACCESS_LEVEL_RW, constants.ACCESS_LEVEL_RO) def test_allow_access_rw_ro(self, mode): access_allow_prefix = "fs subvolume authorize" access_allow_mode = "r" if mode == "ro" else "rw" access_allow_dict = { "vol_name": self._native_protocol_helper.volname, "sub_name": self._share["id"], "auth_id": "alice", "tenant_id": self._share["project_id"], "access_level": access_allow_mode, } rule = { 'access_level': mode, 'access_to': 'alice', 'access_type': 'cephx', } driver.rados_command.return_value = 'native-zorilla' auth_key = self._native_protocol_helper._allow_access( self._context, self._share, rule, sub_name=self._share['id']) self.assertEqual("native-zorilla", auth_key) driver.rados_command.assert_called_once_with( self._rados_client, access_allow_prefix, access_allow_dict) def test_allow_access_wrong_type(self): self.assertRaises( exception.InvalidShareAccessType, self._native_protocol_helper._allow_access, self._context, self._share, { 'access_level': constants.ACCESS_LEVEL_RW, 'access_type': 'RHUBARB', 'access_to': 'alice' }, self._share['id'] ) def test_allow_access_same_cephx_id_as_manila_service(self): self.assertRaises( exception.InvalidShareAccess, self._native_protocol_helper._allow_access, self._context, self._share, { 'access_level': constants.ACCESS_LEVEL_RW, 'access_type': 'cephx', 'access_to': 'manila', }, self._share['id'] ) def test_allow_access_to_preexisting_ceph_user(self): msg = ("auth ID: admin exists and not created by " "ceph manager plugin. Not allowed to modify") driver.rados_command.side_effect = exception.ShareBackendException(msg) self.assertRaises(exception.InvalidShareAccess, self._native_protocol_helper._allow_access, self._context, self._share, { 'access_level': constants.ACCESS_LEVEL_RW, 'access_type': 'cephx', 'access_to': 'admin' }, self._share['id'] ) def test_deny_access(self): access_deny_prefix = "fs subvolume deauthorize" access_deny_dict = { "vol_name": self._native_protocol_helper.volname, "sub_name": self._share["id"], "auth_id": "alice", } evict_prefix = "fs subvolume evict" evict_dict = access_deny_dict self._native_protocol_helper._deny_access( self._context, self._share, { 'access_level': 'rw', 'access_type': 'cephx', 'access_to': 'alice' }, sub_name=self._share['id'] ) driver.rados_command.assert_has_calls([ mock.call(self._native_protocol_helper.rados_client, access_deny_prefix, access_deny_dict), mock.call(self._native_protocol_helper.rados_client, evict_prefix, evict_dict)]) self.assertEqual(2, driver.rados_command.call_count) def test_deny_access_missing_access_rule(self): access_deny_prefix = "fs subvolume deauthorize" exception_msg = ( f"json_command failed - prefix=fs subvolume deauthorize, " f"argdict='vol_name': {self._native_protocol_helper.volname}, " f"'sub_name': '{self._share['id']}', 'auth_id': 'alice', " f"'format': 'json' - exception message: [errno -2] " f"auth ID: alice doesn't exist.") driver.rados_command.side_effect = exception.ShareBackendException( msg=exception_msg) access_deny_dict = { "vol_name": self._native_protocol_helper.volname, "sub_name": self._share["id"], "auth_id": "alice", } self._native_protocol_helper._deny_access( self._context, self._share, { 'access_level': 'rw', 'access_type': 'cephx', 'access_to': 'alice' }, sub_name=self._share['id'] ) driver.rados_command.assert_called_once_with( self._native_protocol_helper.rados_client, access_deny_prefix, access_deny_dict) self.assertEqual(1, driver.rados_command.call_count) def test_update_access_add_rm(self): alice = { 'id': 'instance_mapping_id1', 'access_id': 'accessid1', 'access_level': 'rw', 'access_type': 'cephx', 'access_to': 'alice' } bob = { 'id': 'instance_mapping_id2', 'access_id': 'accessid2', 'access_level': 'ro', 'access_type': 'cephx', 'access_to': 'bob' } manila = { 'id': 'instance_mapping_id3', 'access_id': 'accessid3', 'access_level': 'ro', 'access_type': 'cephx', 'access_to': 'manila' } admin = { 'id': 'instance_mapping_id4', 'access_id': 'accessid4', 'access_level': 'rw', 'access_type': 'cephx', 'access_to': 'admin' } dabo = { 'id': 'instance_mapping_id5', 'access_id': 'accessid5', 'access_level': 'rwx', 'access_type': 'cephx', 'access_to': 'dabo' } allow_access_side_effects = [ 'abc123', exception.InvalidShareAccess(reason='not'), exception.InvalidShareAccess(reason='allowed'), exception.InvalidShareAccessLevel(level='rwx') ] self.mock_object(self._native_protocol_helper.message_api, 'create') self.mock_object(self._native_protocol_helper, '_deny_access') self.mock_object(self._native_protocol_helper, '_allow_access', mock.Mock(side_effect=allow_access_side_effects)) access_updates = self._native_protocol_helper.update_access( self._context, self._share, access_rules=[alice, manila, admin, dabo], add_rules=[alice, manila, admin, dabo], delete_rules=[bob], update_rules=[], sub_name=self._share['id'] ) expected_access_updates = { 'accessid1': {'access_key': 'abc123'}, 'accessid3': {'state': 'error'}, 'accessid4': {'state': 'error'}, 'accessid5': {'state': 'error'} } self.assertEqual(expected_access_updates, access_updates) self._native_protocol_helper._allow_access.assert_has_calls( [mock.call(self._context, self._share, alice, sub_name=self._share['id']), mock.call(self._context, self._share, manila, sub_name=self._share['id']), mock.call(self._context, self._share, admin, sub_name=self._share['id'])]) self._native_protocol_helper._deny_access.assert_called_once_with( self._context, self._share, bob, sub_name=self._share['id']) self.assertEqual( 3, self._native_protocol_helper.message_api.create.call_count) def test_update_access_all(self): get_authorized_ids_prefix = "fs subvolume authorized_list" get_authorized_ids_dict = { "vol_name": self._native_protocol_helper.volname, "sub_name": self._share["id"] } access_allow_prefix = "fs subvolume authorize" access_allow_dict = { "vol_name": self._native_protocol_helper.volname, "sub_name": self._share["id"], "auth_id": "alice", "tenant_id": self._share["project_id"], "access_level": "rw", } access_deny_prefix = "fs subvolume deauthorize" access_deny_john_dict = { "vol_name": self._native_protocol_helper.volname, "sub_name": self._share["id"], "auth_id": "john", } access_deny_paul_dict = { "vol_name": self._native_protocol_helper.volname, "sub_name": self._share["id"], "auth_id": "paul", } evict_prefix = "fs subvolume evict" alice = { 'id': 'instance_mapping_id1', 'access_id': 'accessid1', 'access_level': 'rw', 'access_type': 'cephx', 'access_to': 'alice', } driver.rados_command.side_effect = [ [{"john": "rw"}, {"paul": "r"}], 'abc123', mock.Mock(), mock.Mock(), mock.Mock(), mock.Mock()] access_updates = self._native_protocol_helper.update_access( self._context, self._share, access_rules=[alice], add_rules=[], delete_rules=[], update_rules=[], sub_name=self._share['id']) self.assertEqual( {'accessid1': {'access_key': 'abc123'}}, access_updates) driver.rados_command.assert_has_calls([ mock.call(self._native_protocol_helper.rados_client, get_authorized_ids_prefix, get_authorized_ids_dict, json_obj=True), mock.call(self._native_protocol_helper.rados_client, access_allow_prefix, access_allow_dict), mock.call(self._native_protocol_helper.rados_client, access_deny_prefix, access_deny_john_dict), mock.call(self._native_protocol_helper.rados_client, evict_prefix, access_deny_john_dict), mock.call(self._native_protocol_helper.rados_client, access_deny_prefix, access_deny_paul_dict), mock.call(self._native_protocol_helper.rados_client, evict_prefix, access_deny_paul_dict)], any_order=True) self.assertEqual(6, driver.rados_command.call_count) def test_get_configured_ip_versions(self): expected = [4] result = self._native_protocol_helper.get_configured_ip_versions() self.assertEqual(expected, result) @ddt.ddt class NFSProtocolHelperTestCase(test.TestCase): def setUp(self): super(NFSProtocolHelperTestCase, self).setUp() self._execute = mock.Mock() self._share = fake_share.fake_share(share_proto='NFS') self._rados_client = MockRadosModule.Rados() self._volname = "cephfs" self.fake_conf = configuration.Configuration(None) self.fake_conf.set_default('cephfs_ganesha_server_ip', 'fakeip') self.mock_object(driver.ganesha_utils, 'SSHExecutor') self.mock_object(driver.ganesha_utils, 'RootExecutor') self.mock_object(driver.socket, 'gethostname') self.mock_object(driver, "rados_command") driver.ceph_default_target = ('mon-mgr', ) self._nfs_helper = driver.NFSProtocolHelper( self._execute, self.fake_conf, rados_client=self._rados_client, volname=self._volname) @ddt.data( (['fakehost', 'some.host.name', 'some.host.name.', '1.1.1.0'], False), (['fakehost', 'some.host.name', 'some.host.name.', '1.1..1.0'], True), (['fakehost', 'some.host.name', 'some.host.name', '1.1.1.256'], True), (['fakehost..', 'some.host.name', 'some.host.name', '1.1.1.0'], True), (['fakehost', 'some.host.name..', 'some.host.name', '1.1.1.0'], True), (['fakehost', 'some.host.name', 'some.host.name.', '1.1..1.0'], True), (['fakehost', 'some.host.name', '1.1.1.0/24'], True), (['fakehost', 'some.host.name', '1.1.1.0', '1001::1001'], False), (['fakehost', 'some.host.name', '1.1.1.0', '1001:1001'], True), (['fakehost', 'some.host.name', '1.1.1.0', '1001::1001:'], True), (['fakehost', 'some.host.name', '1.1.1.0', '1001::1001.'], True), (['fakehost', 'some.host.name', '1.1.1.0', '1001::1001/129.'], True), ) @ddt.unpack def test_check_for_setup_error(self, cephfs_ganesha_export_ips, raises): fake_conf = configuration.Configuration(None) fake_conf.set_default('cephfs_ganesha_export_ips', cephfs_ganesha_export_ips) helper = driver.NFSProtocolHelper( self._execute, fake_conf, rados_client=MockRadosModule.Rados(), volname="cephfs" ) if raises: self.assertRaises(exception.InvalidParameterValue, helper.check_for_setup_error) else: self.assertIsNone(helper.check_for_setup_error()) @ddt.data(False, True) def test_init_executor_type(self, ganesha_server_is_remote): fake_conf = configuration.Configuration(None) conf_args_list = [ ('cephfs_ganesha_server_is_remote', ganesha_server_is_remote), ('cephfs_ganesha_server_ip', 'fakeip'), ('cephfs_ganesha_server_username', 'fake_username'), ('cephfs_ganesha_server_password', 'fakepwd'), ('cephfs_ganesha_path_to_private_key', 'fakepathtokey')] for args in conf_args_list: fake_conf.set_default(*args) driver.NFSProtocolHelper( self._execute, fake_conf, rados_client=MockRadosModule.Rados(), volname="cephfs" ) if ganesha_server_is_remote: driver.ganesha_utils.SSHExecutor.assert_has_calls( [mock.call('fakeip', 22, None, 'fake_username', password='fakepwd', privatekey='fakepathtokey')]) else: driver.ganesha_utils.RootExecutor.assert_has_calls( [mock.call(self._execute)]) @ddt.data('fakeip', None) def test_init_identify_local_host(self, ganesha_server_ip): self.mock_object(driver.LOG, 'info') fake_conf = configuration.Configuration(None) conf_args_list = [ ('cephfs_ganesha_server_ip', ganesha_server_ip), ('cephfs_ganesha_server_username', 'fake_username'), ('cephfs_ganesha_server_password', 'fakepwd'), ('cephfs_ganesha_path_to_private_key', 'fakepathtokey')] for args in conf_args_list: fake_conf.set_default(*args) driver.NFSProtocolHelper( self._execute, fake_conf, rados_client=MockRadosModule.Rados(), volname="cephfs" ) driver.ganesha_utils.RootExecutor.assert_has_calls( [mock.call(self._execute)]) if ganesha_server_ip: self.assertFalse(driver.socket.gethostname.called) self.assertFalse(driver.LOG.info.called) else: driver.socket.gethostname.assert_called_once_with() driver.LOG.info.assert_called_once() def test_get_export_locations_no_export_ips_configured(self): cephfs_subvolume_path = "/foo/bar" fake_conf = configuration.Configuration(None) fake_conf.set_default('cephfs_ganesha_server_ip', '1.2.3.4') helper = driver.NFSProtocolHelper( self._execute, fake_conf, rados_client=MockRadosModule.Rados(), volname="cephfs" ) ret = helper.get_export_locations(self._share, cephfs_subvolume_path) self.assertEqual( [{ 'path': '1.2.3.4:/foo/bar', 'is_admin_only': False, 'metadata': { 'preferred': False, }, }], ret) def test_get_export_locations_with_export_ips_configured(self): fake_conf = configuration.Configuration(None) conf_args_list = [ ('cephfs_ganesha_server_ip', '1.2.3.4'), ('cephfs_ganesha_export_ips', ['127.0.0.1', 'fd3f:c057:1192:1::1', '::1'])] for args in conf_args_list: fake_conf.set_default(*args) helper = driver.NFSProtocolHelper( self._execute, fake_conf, rados_client=MockRadosModule.Rados(), volname="cephfs" ) cephfs_subvolume_path = "/foo/bar" ret = helper.get_export_locations(self._share, cephfs_subvolume_path) self._assertEqualListsOfObjects( [ { 'path': '127.0.0.1:/foo/bar', 'is_admin_only': False, 'metadata': { 'preferred': False, }, }, { 'path': '[fd3f:c057:1192:1::1]:/foo/bar', 'is_admin_only': False, 'metadata': { 'preferred': False, }, }, { 'path': '[::1]:/foo/bar', 'is_admin_only': False, 'metadata': { 'preferred': False, }, }, ], ret) @ddt.data(('some.host.name', None, [4, 6]), ('host.', None, [4, 6]), ('1001::1001', None, [6]), ('1.1.1.0', None, [4]), (None, ['1001::1001', '1.1.1.0'], [6, 4]), (None, ['1001::1001'], [6]), (None, ['1.1.1.0'], [4]), (None, ['1001::1001/129', '1.1.1.0'], [4, 6])) @ddt.unpack def test_get_configured_ip_versions( self, cephfs_ganesha_server_ip, cephfs_ganesha_export_ips, configured_ip_version): fake_conf = configuration.Configuration(None) conf_args_list = [ ('cephfs_ganesha_server_ip', cephfs_ganesha_server_ip), ('cephfs_ganesha_export_ips', cephfs_ganesha_export_ips)] for args in conf_args_list: fake_conf.set_default(*args) helper = driver.NFSProtocolHelper( self._execute, fake_conf, rados_client=MockRadosModule.Rados(), volname="cephfs" ) self.assertEqual(set(configured_ip_version), set(helper.get_configured_ip_versions())) self.assertEqual(set(configured_ip_version), helper.configured_ip_versions) def test_get_configured_ip_versions_already_set(self): fake_conf = configuration.Configuration(None) helper = driver.NFSProtocolHelper( self._execute, fake_conf, rados_client=MockRadosModule.Rados(), volname="cephfs" ) ip_versions = ['foo', 'bar'] helper.configured_ip_versions = ip_versions result = helper.get_configured_ip_versions() self.assertEqual(ip_versions, result) def test_default_config_hook(self): fake_conf_dict = {'key': 'value1'} self.mock_object(driver.ganesha.GaneshaNASHelper, '_default_config_hook', mock.Mock(return_value={})) self.mock_object(driver.ganesha_utils, 'path_from', mock.Mock(return_value='/fakedir/cephfs/conf')) self.mock_object(self._nfs_helper, '_load_conf_dir', mock.Mock(return_value=fake_conf_dict)) ret = self._nfs_helper._default_config_hook() (driver.ganesha.GaneshaNASHelper._default_config_hook. assert_called_once_with()) driver.ganesha_utils.path_from.assert_called_once_with( driver.__file__, 'conf') self._nfs_helper._load_conf_dir.assert_called_once_with( '/fakedir/cephfs/conf') self.assertEqual(fake_conf_dict, ret) def test_fsal_hook(self): access_allow_prefix = "fs subvolume authorize" access_allow_dict = { "vol_name": self._nfs_helper.volname, "sub_name": self._share["id"], "auth_id": "ganesha-fakeid", "tenant_id": self._share["project_id"], "access_level": "rw", } expected_ret = { "Name": "Ceph", "User_Id": "ganesha-fakeid", "Secret_Access_Key": "ganesha-zorilla", "Filesystem": self._nfs_helper.volname } driver.rados_command.return_value = 'ganesha-zorilla' ret = self._nfs_helper._fsal_hook( None, self._share, None, self._share['id'] ) driver.rados_command.assert_called_once_with( self._nfs_helper.rados_client, access_allow_prefix, access_allow_dict) self.assertEqual(expected_ret, ret) def test_cleanup_fsal_hook(self): access_deny_prefix = "fs subvolume deauthorize" access_deny_dict = { "vol_name": self._nfs_helper.volname, "sub_name": self._share["id"], "auth_id": "ganesha-fakeid", } ret = self._nfs_helper._cleanup_fsal_hook( None, self._share, None, self._share['id'] ) driver.rados_command.assert_called_once_with( self._nfs_helper.rados_client, access_deny_prefix, access_deny_dict) self.assertIsNone(ret) def test_get_export_path(self): get_path_prefix = "fs subvolume getpath" get_path_dict = { "vol_name": self._nfs_helper.volname, "sub_name": self._share["id"], } driver.rados_command.return_value = '/foo/bar' ret = self._nfs_helper._get_export_path(self._share) driver.rados_command.assert_called_once_with( self._nfs_helper.rados_client, get_path_prefix, get_path_dict) self.assertEqual('/foo/bar', ret) def test_get_export_pseudo_path(self): get_path_prefix = "fs subvolume getpath" get_path_dict = { "vol_name": self._nfs_helper.volname, "sub_name": self._share["id"], } driver.rados_command.return_value = '/foo/bar' ret = self._nfs_helper._get_export_pseudo_path(self._share) driver.rados_command.assert_called_once_with( self._nfs_helper.rados_client, get_path_prefix, get_path_dict) self.assertEqual('/foo/bar', ret) @ddt.ddt class NFSClusterProtocolHelperTestCase(test.TestCase): def setUp(self): super(NFSClusterProtocolHelperTestCase, self).setUp() self._execute = mock.Mock() self._context = context.get_admin_context() self._share = fake_share.fake_share(share_proto='NFS') self._rados_client = MockRadosModule.Rados() self._volname = "cephfs" self.fake_conf = configuration.Configuration(None) self.mock_object(driver.NFSClusterProtocolHelper, '_get_export_path', mock.Mock(return_value="ganesha:/foo/bar")) self.mock_object(driver.NFSClusterProtocolHelper, '_get_export_pseudo_path', mock.Mock(return_value="ganesha:/foo/bar")) self.mock_object(driver, "rados_command") driver.ceph_default_target = ('mon-mgr', ) self._nfscluster_protocol_helper = driver.NFSClusterProtocolHelper( self._execute, self.fake_conf, rados_client=self._rados_client, volname=self._volname) type(self._nfscluster_protocol_helper).nfs_clusterid = ( mock.PropertyMock(return_value='fs-manila')) def test_get_export_ips_no_backends(self): fake_conf = configuration.Configuration(None) cluster_info = { "fs-manila": { "virtual_ip": None, "backend": [] } } driver.rados_command.return_value = json.dumps(cluster_info) helper = driver.NFSClusterProtocolHelper( self._execute, fake_conf, rados_client=self._rados_client, volname=self._volname ) self.assertRaises(exception.ShareBackendException, helper._get_export_ips) @ddt.data(constants.ACCESS_LEVEL_RW, constants.ACCESS_LEVEL_RO) def test_allow_access_rw_ro_when_export_does_not_exist(self, mode): export_info_prefix = "nfs export info" access_allow_prefix = "nfs export apply" nfs_clusterid = self._nfscluster_protocol_helper.nfs_clusterid volname = self._nfscluster_protocol_helper.volname driver.rados_command.return_value = {} clients = { 'access_type': mode, 'addresses': ['10.0.0.1'], 'squash': 'none' } export_info_dict = { "cluster_id": nfs_clusterid, "pseudo_path": "ganesha:/foo/bar", } access_allow_dict = { "cluster_id": nfs_clusterid, } export = { "path": "ganesha:/foo/bar", "cluster_id": nfs_clusterid, "pseudo": "ganesha:/foo/bar", "squash": "none", "security_label": True, "fsal": { "name": "CEPH", "fs_name": volname, }, "clients": clients } inbuf = json.dumps(export).encode('utf-8') self._nfscluster_protocol_helper._allow_access( self._share, clients, sub_name=self._share['id'] ) driver.rados_command.assert_has_calls([ mock.call(self._rados_client, export_info_prefix, export_info_dict, json_obj=True), mock.call(self._rados_client, access_allow_prefix, access_allow_dict, inbuf=inbuf)]) self.assertEqual(2, driver.rados_command.call_count) @ddt.data(constants.ACCESS_LEVEL_RW, constants.ACCESS_LEVEL_RO) def test_allow_access_rw_ro_when_export_exist(self, mode): export_info_prefix = "nfs export info" access_allow_prefix = "nfs export apply" nfs_clusterid = self._nfscluster_protocol_helper.nfs_clusterid volname = self._nfscluster_protocol_helper.volname new_clients = { 'access_type': mode, 'addresses': ['10.0.0.2'], 'squash': 'none' } export_info_dict = { "cluster_id": nfs_clusterid, "pseudo_path": "ganesha:/foo/bar", } access_allow_dict = { "cluster_id": nfs_clusterid, } export = { "path": "ganesha:/foo/bar", "cluster_id": nfs_clusterid, "pseudo": "ganesha:/foo/bar", "squash": "none", "security_label": True, "fsal": { "name": "CEPH", "User_Id": "nfs.user", "fs_name": volname }, "clients": { 'access_type': "ro", 'addresses': ['10.0.0.1'], 'squash': 'none' } } driver.rados_command.return_value = export export['clients'] = new_clients inbuf = json.dumps(export).encode('utf-8') self._nfscluster_protocol_helper._allow_access( self._share, new_clients, sub_name=self._share['id'] ) driver.rados_command.assert_has_calls([ mock.call(self._rados_client, export_info_prefix, export_info_dict, json_obj=True), mock.call(self._rados_client, access_allow_prefix, access_allow_dict, inbuf=inbuf)]) self.assertEqual(2, driver.rados_command.call_count) def test_deny_access(self): access_deny_prefix = "nfs export rm" nfs_clusterid = self._nfscluster_protocol_helper.nfs_clusterid access_deny_dict = { "cluster_id": nfs_clusterid, "pseudo_path": "ganesha:/foo/bar" } self._nfscluster_protocol_helper._deny_access( self._share, self._share['id'] ) driver.rados_command.assert_called_once_with( self._rados_client, access_deny_prefix, access_deny_dict) def test_get_export_locations(self): cluster_info_prefix = "nfs cluster info" nfs_clusterid = self._nfscluster_protocol_helper.nfs_clusterid cluster_info_dict = { "cluster_id": nfs_clusterid, } cluster_info = {"fs-manila": { "virtual_ip": None, "backend": [ {"hostname": "fake-ceph-node-1", "ip": "10.0.0.10", "port": "1010"}, {"hostname": "fake-ceph-node-2", "ip": "10.0.0.11", "port": "1011"} ] }} driver.rados_command.return_value = json.dumps(cluster_info) fake_cephfs_subvolume_path = "/foo/bar" expected_export_locations = [{ 'path': '10.0.0.10:/foo/bar', 'is_admin_only': False, 'metadata': { 'preferred': True, }, }, { 'path': '10.0.0.11:/foo/bar', 'is_admin_only': False, 'metadata': { 'preferred': True, }, }] export_locations = ( self._nfscluster_protocol_helper.get_export_locations( self._share, fake_cephfs_subvolume_path)) driver.rados_command.assert_called_once_with( self._rados_client, cluster_info_prefix, cluster_info_dict) self._assertEqualListsOfObjects(expected_export_locations, export_locations) @ddt.data('cephfs_ganesha_server_ip', 'cephfs_ganesha_export_ips') def test_get_export_locations_ganesha_still_configured(self, confopt): if confopt == 'cephfs_ganesha_server_ip': val = '10.0.0.1' else: val = ['10.0.0.2', '10.0.0.3'] cluster_info_prefix = "nfs cluster info" nfs_clusterid = self._nfscluster_protocol_helper.nfs_clusterid self.fake_conf.set_default(confopt, val) cluster_info_dict = { "cluster_id": nfs_clusterid, } cluster_info = {"fs-manila": { "virtual_ip": None, "backend": [ {"hostname": "fake-ceph-node-1", "ip": "10.0.0.10", "port": "1010"}, {"hostname": "fake-ceph-node-2", "ip": "10.0.0.11", "port": "1011"} ] }} driver.rados_command.return_value = json.dumps(cluster_info) fake_cephfs_subvolume_path = "/foo/bar" expected_export_locations = [ { 'path': '10.0.0.10:/foo/bar', 'is_admin_only': False, 'metadata': { 'preferred': True, }, }, { 'path': '10.0.0.11:/foo/bar', 'is_admin_only': False, 'metadata': { 'preferred': True, }, }, ] if isinstance(val, list): for ip in val: expected_export_locations.append( { 'path': f'{ip}:/foo/bar', 'is_admin_only': False, 'metadata': { 'preferred': False, }, }, ) else: expected_export_locations.append( { 'path': f'{val}:/foo/bar', 'is_admin_only': False, 'metadata': { 'preferred': False, }, } ) expected_export_locations = sorted( expected_export_locations, key=lambda d: d['path'] ) export_locations = ( self._nfscluster_protocol_helper.get_export_locations( self._share, fake_cephfs_subvolume_path) ) actual_export_locations = sorted( export_locations, key=lambda d: d['path'] ) driver.rados_command.assert_called_once_with( self._rados_client, cluster_info_prefix, cluster_info_dict) self.assertEqual(expected_export_locations, actual_export_locations) @ddt.ddt class CephFSDriverAltConfigTestCase(test.TestCase): """Test the CephFS driver with non-default config values.""" def setUp(self): super(CephFSDriverAltConfigTestCase, self).setUp() self._execute = mock.Mock() self.fake_conf = configuration.Configuration(None) self._rados_client = MockRadosModule.Rados() self._context = context.get_admin_context() self._share = fake_share.fake_share(share_proto='CEPHFS') self.fake_conf.set_default('driver_handles_share_servers', False) self.fake_conf.set_default('cephfs_auth_id', 'manila') self.mock_object(driver, "rados", MockRadosModule) self.mock_object(driver, "json_command", MockCephArgparseModule.json_command) self.mock_object(driver, "rados_command") self.mock_object(driver, 'NativeProtocolHelper') self.mock_object(driver, 'NFSProtocolHelper') driver.ceph_default_target = ('mon-mgr', ) @ddt.data('cephfs', 'nfs') def test_do_setup_alt_volume_mode(self, protocol_helper): self.fake_conf.set_default('cephfs_volume_mode', ALT_VOLUME_MODE) self._driver = driver.CephFSDriver(execute=self._execute, configuration=self.fake_conf, rados_client=self._rados_client) self.mock_object( self._driver, '_get_cephfs_filesystem_allocation', mock.Mock(return_value=10) ) type(self._driver).volname = mock.PropertyMock(return_value='cephfs') self._driver.configuration.cephfs_protocol_helper_type = ( protocol_helper) self._driver.do_setup(self._context) if protocol_helper == 'cephfs': driver.NativeProtocolHelper.assert_called_once_with( self._execute, self._driver.configuration, rados_client=self._driver.rados_client, volname=self._driver.volname) else: driver.NFSProtocolHelper.assert_called_once_with( self._execute, self._driver.configuration, rados_client=self._driver._rados_client, volname=self._driver.volname) self._driver.protocol_helper.init_helper.assert_called_once_with() self.assertEqual(ALT_VOLUME_MODE, self._driver._cephfs_volume_mode) @ddt.data('0o759', '0x755', '12a3') def test_volume_mode_exception(self, volume_mode): # cephfs_volume_mode must be a string representing an int as octal self.fake_conf.set_default('cephfs_volume_mode', volume_mode) self.assertRaises(exception.BadConfigurationException, driver.CephFSDriver, execute=self._execute, configuration=self.fake_conf) @ddt.ddt class MiscTests(test.TestCase): @ddt.data({'import_exc': None}, {'import_exc': ImportError}) @ddt.unpack def test_rados_module_missing(self, import_exc): driver.rados = None with mock.patch.object( driver.importutils, 'import_module', side_effect=import_exc) as mock_import_module: if import_exc: self.assertRaises( exception.ShareBackendException, driver.setup_rados) else: driver.setup_rados() self.assertEqual(mock_import_module.return_value, driver.rados) mock_import_module.assert_called_once_with('rados') @ddt.data({'import_exc': None}, {'import_exc': ImportError}) @ddt.unpack def test_setup_json_class_missing(self, import_exc): driver.json_command = None with mock.patch.object( driver.importutils, 'import_class', side_effect=import_exc) as mock_import_class: if import_exc: self.assertRaises( exception.ShareBackendException, driver.setup_json_command) else: driver.setup_json_command() self.assertEqual(mock_import_class.return_value, driver.json_command) mock_import_class.assert_called_once_with( 'ceph_argparse.json_command') ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315601.9976707 manila-21.0.0/manila/tests/share/drivers/container/0000775000175000017500000000000000000000000022223 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/container/__init__.py0000664000175000017500000000000000000000000024322 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/container/fakes.py0000664000175000017500000001677100000000000023702 0ustar00zuulzuul00000000000000# Copyright 2016 Mirantis, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Some useful fakes.""" from manila.tests.db import fakes as db_fakes FAKE_VSCTL_LIST_INTERFACES_X = ( 'fake stuff\n' 'foo not_a_veth something_fake bar\n' 'foo veth11b2c34 something_fake bar\n' 'foo veth25f6g7h manila-container="fake1" bar\n' 'foo veth3jd83j7 manila-container="my_container" bar\n' 'foo veth4i9j10k manila-container="fake2" bar\n' 'more fake stuff\n' ) FAKE_VSCTL_LIST_INTERFACES = ( 'fake stuff\n' 'foo not_a_veth something_fake bar\n' 'foo veth11b2c34 something_fake bar\n' 'foo veth25f6g7h manila-container="fake1" bar\n' 'foo veth3jd83j7 manila-container="manila_my_container" bar\n' 'foo veth4i9j10k manila-container="fake2" bar\n' 'more fake stuff\n' ) FAKE_VSCTL_LIST_INTERFACE_1 = ( 'fake stuff\n' 'foo veth11b2c34 something_fake bar\n' 'more fake stuff\n' ) FAKE_VSCTL_LIST_INTERFACE_2 = ( 'fake stuff\n' 'foo veth25f6g7h manila-container="fake1" bar\n' 'more fake stuff\n' ) FAKE_VSCTL_LIST_INTERFACE_3_X = ( 'fake stuff\n' 'foo veth3jd83j7 manila-container="my_container" bar\n' 'more fake stuff\n' ) FAKE_VSCTL_LIST_INTERFACE_3 = ( 'fake stuff\n' 'foo veth3jd83j7 manila-container="manila_my_container" bar\n' 'more fake stuff\n' ) FAKE_VSCTL_LIST_INTERFACE_4 = ( 'fake stuff\n' 'foo veth4i9j10k manila-container="fake2" bar\n' 'more fake stuff\n' ) FAKE_IP_LINK_SHOW = ( ('1: lo: mtu 65536 qdisc noqueue state UNKNOWN ' 'mode DEFAULT group default qlen 1000\\ link/loopback ' '00:00:00:00:00:00 brd 00:00:00:00:00:00\n' '13: eth0@if16: mtu 1500 qdisc noqueue ' 'state UP mode DEFAULT group default \\ link/ether 02:42:ac:15:00:02 ' 'brd ff:ff:ff:ff:ff:ff\n' '15: eth1@if14: mtu 1500 qdisc noqueue ' 'state UP mode DEFAULT group default \\ link/ether 02:42:ac:14:00:02 ' 'brd ff:ff:ff:ff:ff:ff\n', '') ) FAKE_IP_LINK_SHOW_MASTER = ( ('16: fake_veth@if14: mtu 1500 qdisc ' 'noqueue master br-a7d71c3e77c2 state UP mode DEFAULT group default\n' ' link/ether 4a:10:0c:f2:d2:2c brd ff:ff:ff:ff:ff:ff link-netnsid 0\n', '') ) FAKE_IP_ADDR_SHOW = ( [('283: eth0 inet 192.168.144.19/24 brd 192.168.144.255 scope global ' 'eth0\\ valid_lft forever preferred_lft forever', ''), ('287: eth1 inet 10.0.0.131/8 brd 8.255.255.255 scope global eth1\\ ' ' valid_lft forever preferred_lft forever', '')] ) FAKE_DOCKER_INSPECT_NETWORKS = ( ('{"fake_docker_network_0":{"IPAMConfig":{},"Links":null,"Aliases":' '["dab16d2703dc"],"NetworkID":' '"cf8c7cb5cecda1ef8240921d5d09e2a1bf9e308a0261459f5a69114cd4e6283c",' '"EndpointID":' '"312a035f32be713c7b56093dde2beec950785ddeb29c9bd18018d43ffd4f64bd",' '"Gateway":"10.10.10.1","IPAddress":"10.10.10.10","IPPrefixLen":24,' '"IPv6Gateway":"","GlobalIPv6Address":"","GlobalIPv6PrefixLen":0,' '"MacAddress":"10:10:10:10:10:10","DriverOpts":{}},' '"fake_docker_network_1":{"IPAMConfig":{},"Links":null,"Aliases":' '["dab16d2703dc"],"NetworkID":' '"e978d91d70c30695557018c8847a551267e99c083063391c07dc9a730bfef9dc",' '"EndpointID":' '"8e34044764cd52b9d092ac66af8fb7130cdd423b521c3bf6e57b8095f6f0a085",' '"Gateway":"20.20.20.1","IPAddress":"20.20.20.20","IPPrefixLen":24,' '"IPv6Gateway":"","GlobalIPv6Address":"","GlobalIPv6PrefixLen":0,' '"MacAddress":"20:20:20:20:20:20","DriverOpts":{}}}', '') ) def fake_share(**kwargs): share = { 'id': 'fakeid', 'share_id': 'fakeshareid', 'name': 'fakename', 'size': 1, 'share_proto': 'NFS', 'host': 'host@backend#vg', 'export_location': '127.0.0.1:/mnt/nfs/volume-00002', } share.update(kwargs) return db_fakes.FakeModel(share) def fake_share_instances(**kwargs): share_instances = { 'id': 'fakeid', 'share_id': 'fakeshareid', 'host': 'host@backend#vg', 'export_location': '127.0.0.1:/mnt/nfs/volume-00002', } share_instances.update(kwargs) return [db_fakes.FakeModel(share_instances)] def fake_access(**kwargs): access = { 'id': 'fakeaccid', 'access_type': 'ip', 'access_to': '10.0.0.2', 'access_level': 'rw', 'state': 'active', } access.update(kwargs) return db_fakes.FakeModel(access) def fake_network(**kwargs): allocations = db_fakes.FakeModel({'id': 'fake_allocation_id', 'ip_address': '127.0.0.0.1', 'mac_address': 'fe:16:3e:61:e0:58'}) network = { 'id': 'fake_network_id', 'server_id': 'fake_server_id', 'network_allocations': [allocations], 'neutron_net_id': 'fake_net', 'neutron_subnet_id': 'fake_subnet' } network.update(kwargs) return [db_fakes.FakeModel(network)] def fake_network_with_security_services(**kwargs): allocations = db_fakes.FakeModel({'id': 'fake_allocation_id', 'ip_address': '127.0.0.0.1', 'mac_address': 'fe:16:3e:61:e0:58'}) security_services = db_fakes.FakeModel({'status': 'fake_status', 'id': 'fake_security_service_id', 'project_id': 'fake_project_id', 'type': 'fake_type', 'name': 'fake_name'}) network = { 'id': 'fake_network_id', 'server_id': 'fake_server_id', 'network_allocations': [allocations], 'neutron_net_id': 'fake_net', 'neutron_subnet_id': 'fake_subnet', 'security_services': [security_services], } network.update(kwargs) return [db_fakes.FakeModel(network)] def fake_share_server(**kwargs): share_server = { 'id': 'fake' } share_server.update(kwargs) return db_fakes.FakeModel(share_server) def fake_identifier(): return '7cf7c200-d3af-4e05-b87e-9167c95dfcad' def fake_share_no_export_location(**kwargs): share = { 'share_id': 'fakeshareid', } share.update(kwargs) return db_fakes.FakeModel(share) def fake_current_network_allocations(): current_network_allocations = { 'subnets': [ { 'network_allocations': [ { 'id': 'fake_id_current', 'ip_address': '192.168.144.100', } ] } ] } return current_network_allocations def fake_new_network_allocations(): new_network_allocations = { 'network_allocations': [ { 'id': 'fake_id_new', 'ip_address': '10.0.0.100', } ] } return new_network_allocations ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/container/test_container_helper.py0000664000175000017500000004211400000000000027157 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Mirantis, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Unit tests for the Container helper module.""" from unittest import mock import ddt from manila import exception from manila.share import configuration from manila.share.drivers.container import container_helper from manila import test from manila.tests.share.drivers.container import fakes @ddt.ddt class DockerExecHelperTestCase(test.TestCase): """Tests DockerExecHelper""" def setUp(self): super(DockerExecHelperTestCase, self).setUp() self.fake_conf = configuration.Configuration(None) self.fake_conf.container_image_name = "fake_image" self.fake_conf.container_volume_mount_path = "/tmp/shares" self.DockerExecHelper = container_helper.DockerExecHelper( configuration=self.fake_conf) def test_create_container(self): fake_name = 'fake_container' self.DockerExecHelper.configuration.container_image_name = 'fake_image' self.mock_object(self.DockerExecHelper, '_inner_execute', mock.Mock(return_value=('fake_container_id', ''))) self.mock_object(self.DockerExecHelper, 'disconnect_network') self.DockerExecHelper.create_container(fake_name) self.DockerExecHelper._inner_execute.assert_called_once_with([ 'docker', 'container', 'create', '--name=%s' % fake_name, '--privileged', '-v', '/dev:/dev', '-v', '/tmp/shares:/shares', 'fake_image']) self.DockerExecHelper.disconnect_network.assert_called_once_with( 'bridge', fake_name) def test_create_container_failure(self): self.mock_object(self.DockerExecHelper, '_inner_execute', mock.Mock(side_effect=OSError())) self.assertRaises(exception.ShareBackendException, self.DockerExecHelper.create_container) def test_start_container(self): fake_name = 'fake_container' self.mock_object(self.DockerExecHelper, '_inner_execute', mock.Mock()) self.DockerExecHelper.start_container(fake_name) self.DockerExecHelper._inner_execute.assert_called_once_with([ 'docker', 'container', 'start', 'fake_container']) def test_start_container_impossible_failure(self): self.mock_object(self.DockerExecHelper, "_inner_execute", mock.Mock(side_effect=OSError())) self.assertRaises(exception.ShareBackendException, self.DockerExecHelper.start_container, None) def test_stop_container(self): self.mock_object(self.DockerExecHelper, "_inner_execute", mock.Mock(return_value=['fake_output', None])) expected = ['docker', 'stop', 'manila-fake-conainer'] self.DockerExecHelper.stop_container("manila-fake-conainer") self.DockerExecHelper._inner_execute.assert_called_once_with(expected) def test_stop_container_oh_noes(self): self.mock_object(self.DockerExecHelper, "_inner_execute", mock.Mock(side_effect=OSError)) self.assertRaises(exception.ShareBackendException, self.DockerExecHelper.stop_container, "manila-fake-container") def test_execute(self): self.mock_object(self.DockerExecHelper, "_inner_execute", mock.Mock(return_value='fake_output')) expected = ['docker', 'exec', '-i', 'fake_container', 'fake_script'] self.DockerExecHelper.execute("fake_container", ["fake_script"]) self.DockerExecHelper._inner_execute.assert_called_once_with( expected, ignore_errors=False) def test_execute_name_not_there(self): self.assertRaises(exception.ManilaException, self.DockerExecHelper.execute, None, ['do', 'stuff']) def test_execute_command_not_there(self): self.assertRaises(exception.ManilaException, self.DockerExecHelper.execute, 'fake-name', None) def test_execute_bad_command_format(self): self.assertRaises(exception.ManilaException, self.DockerExecHelper.execute, 'fake-name', 'do stuff') def test__inner_execute_ok(self): self.DockerExecHelper._execute = mock.Mock(return_value='fake') result = self.DockerExecHelper._inner_execute("fake_command") self.assertEqual(result, 'fake') def test__inner_execute_not_ok(self): self.DockerExecHelper._execute = mock.Mock(side_effect=[OSError()]) self.assertRaises(OSError, self.DockerExecHelper._inner_execute, "fake_command") def test__inner_execute_not_ok_ignore_errors(self): self.DockerExecHelper._execute = mock.Mock(side_effect=OSError()) result = self.DockerExecHelper._inner_execute("fake_command", ignore_errors=True) self.assertIsNone(result) def test_fetch_container_addresses(self): fake_name = 'fake_container' fake_addresses = ['192.168.144.19', '10.0.0.131'] fake_ip_addr_show = fakes.FAKE_IP_ADDR_SHOW fake_interfaces = ['eth0', 'eth1'] self.mock_object(self.DockerExecHelper, 'fetch_container_interfaces', mock.Mock(return_value=fake_interfaces)) self.mock_object(self.DockerExecHelper, 'execute', mock.Mock(side_effect=[fake_ip_addr_show[0], fake_ip_addr_show[1]])) self.assertEqual(fake_addresses, self.DockerExecHelper.fetch_container_addresses( fake_name, 'inet')) (self.DockerExecHelper.fetch_container_interfaces .assert_called_once_with(fake_name)) self.DockerExecHelper.execute.assert_any_call( fake_name, ['ip', '-oneline', '-family', 'inet', 'address', 'show', 'scope', 'global', 'dev', 'eth0'] ) self.DockerExecHelper.execute.assert_any_call( fake_name, ['ip', '-oneline', '-family', 'inet', 'address', 'show', 'scope', 'global', 'dev', 'eth1'] ) def test_fetch_container_interfaces(self): fake_name = 'fake_container' fake_eths = fakes.FAKE_IP_LINK_SHOW self.mock_object(self.DockerExecHelper, 'execute', mock.Mock(return_value=fake_eths)) self.assertEqual( ['eth0', 'eth1'], self.DockerExecHelper.fetch_container_interfaces(fake_name)) self.DockerExecHelper.execute.assert_called_once_with( fake_name, ['ip', '-o', 'link', 'show']) def test_rename_container(self): fake_old_name = 'old_name' fake_new_name = 'new_name' fake_veth_names = ['fake_veth'] self.mock_object(self.DockerExecHelper, 'get_container_veths', mock.Mock(return_value=fake_veth_names)) self.mock_object(self.DockerExecHelper, '_inner_execute', mock.Mock(side_effect=[None, None])) self.DockerExecHelper.rename_container(fake_old_name, fake_new_name) self.DockerExecHelper.get_container_veths.assert_called_once_with( fake_old_name) self.DockerExecHelper._inner_execute.assert_has_calls([ mock.call(['docker', 'rename', fake_old_name, fake_new_name]), mock.call(['ovs-vsctl', 'set', 'interface', fake_veth_names[0], 'external-ids:manila-container=%s' % fake_new_name])]) def test_rename_container_exception_veth(self): fake_old_name = 'old_name' fake_new_name = 'new_name' self.mock_object(self.DockerExecHelper, 'get_container_veths', mock.Mock(return_value=[])) self.assertRaises(exception.ManilaException, self.DockerExecHelper.rename_container, fake_old_name, fake_new_name) @ddt.data([['fake', ''], OSError, ['fake', '']], [['fake', ''], OSError, OSError], [OSError]) def test_rename_container_exception_cmds(self, side_effect): fake_old_name = 'old_name' fake_new_name = 'new_name' fake_veth_names = ['fake_veth'] self.mock_object(self.DockerExecHelper, 'get_container_veths', mock.Mock(return_value=fake_veth_names)) self.mock_object(self.DockerExecHelper, '_inner_execute', mock.Mock(side_effect=side_effect)) self.assertRaises(exception.ShareBackendException, self.DockerExecHelper.rename_container, fake_old_name, fake_new_name) if len(side_effect) > 1: self.DockerExecHelper._inner_execute.assert_has_calls([ mock.call(['docker', 'rename', fake_old_name, fake_new_name]), mock.call(['ovs-vsctl', 'set', 'interface', fake_veth_names[0], 'external-ids:manila-container=%s' % fake_new_name]) ]) else: self.DockerExecHelper._inner_execute.assert_has_calls([ mock.call(['docker', 'rename', fake_old_name, fake_new_name])]) @ddt.data((["wrong_name\nfake\nfake_container\nfake_name'"], True), (["wrong_name\nfake_container\nfake'"], False), ("\n", False)) @ddt.unpack def test_container_exists(self, fake_return_value, expected_result): self.DockerExecHelper._execute = mock.Mock( return_value=fake_return_value) result = self.DockerExecHelper.container_exists("fake_name") self.DockerExecHelper._execute.assert_called_once_with( "docker", "ps", "--no-trunc", "--format='{{.Names}}'", run_as_root=True) self.assertEqual(expected_result, result) def test_create_network(self): fake_network_name = 'fake_network_name' self.mock_object(self.DockerExecHelper, '_inner_execute', mock.Mock(return_value=('fake_network_id', ''))) self.DockerExecHelper.create_network(fake_network_name) self.DockerExecHelper._inner_execute.assert_called_once_with([ 'docker', 'network', 'create', fake_network_name]) def test_create_network_failure(self): self.mock_object(self.DockerExecHelper, '_inner_execute', mock.Mock(side_effect=OSError())) self.assertRaises(exception.ShareBackendException, self.DockerExecHelper.create_network, None) def test_remove_network(self): fake_network_name = 'fake_network_name' self.mock_object(self.DockerExecHelper, '_inner_execute', mock.Mock(return_value=('fake_network_id', ''))) self.DockerExecHelper.remove_network(fake_network_name) self.DockerExecHelper._inner_execute.assert_called_once_with([ 'docker', 'network', 'remove', fake_network_name]) def test_remove_network_failure(self): self.mock_object(self.DockerExecHelper, '_inner_execute', mock.Mock(side_effect=OSError())) self.assertRaises(exception.ShareBackendException, self.DockerExecHelper.remove_network, None) def test_connect_network(self): fake_network_name = 'fake_network_name' fake_server_id = 'fake_server_id' self.mock_object(self.DockerExecHelper, '_inner_execute') self.DockerExecHelper.connect_network(fake_network_name, fake_server_id) self.DockerExecHelper._inner_execute.assert_called_once_with([ 'docker', 'network', 'connect', fake_network_name, fake_server_id]) def test_connect_network_failure(self): self.mock_object(self.DockerExecHelper, '_inner_execute', mock.Mock(side_effect=OSError())) self.assertRaises(exception.ShareBackendException, self.DockerExecHelper.connect_network, None, None) def test_disconnect_network(self): fake_network_name = 'fake_network_name' fake_server_id = 'fake_server_id' self.mock_object(self.DockerExecHelper, '_inner_execute') self.DockerExecHelper.disconnect_network(fake_network_name, fake_server_id) self.DockerExecHelper._inner_execute.assert_called_once_with([ 'docker', 'network', 'disconnect', fake_network_name, fake_server_id]) def test_disconnect_network_failure(self): self.mock_object(self.DockerExecHelper, '_inner_execute', mock.Mock(side_effect=OSError())) self.assertRaises(exception.ShareBackendException, self.DockerExecHelper.disconnect_network, None, None) def test_get_container_networks(self): fake_container_name = 'fake_container_name' fake_docker_inspect_networks = fakes.FAKE_DOCKER_INSPECT_NETWORKS fake_networks = ['fake_docker_network_0', 'fake_docker_network_1'] self.mock_object(self.DockerExecHelper, '_inner_execute', mock.Mock(return_value=fake_docker_inspect_networks)) self.assertEqual( fake_networks, self.DockerExecHelper.get_container_networks(fake_container_name)) self.DockerExecHelper._inner_execute.assert_called_once_with([ 'docker', 'container', 'inspect', '-f', '\'{{json .NetworkSettings.Networks}}\'', fake_container_name]) def test_get_container_networks_failure(self): self.mock_object(self.DockerExecHelper, '_inner_execute', mock.Mock(side_effect=OSError())) self.assertRaises(exception.ShareBackendException, self.DockerExecHelper.get_container_networks, None) def test_get_container_veths(self): fake_container_name = 'fake_container_name' fake_eths_iflinks = ('10\n11\n', '') fake_veths = ['fake_veth_0', 'fake_veth_1'] self.mock_object(self.DockerExecHelper, 'execute', mock.Mock(return_value=fake_eths_iflinks)) self.mock_object( self.DockerExecHelper, '_execute', mock.Mock(side_effect=[('/sys/class/net/%s/ifindex' % fake_veths[0], ''), ('/sys/class/net/%s/ifindex' % fake_veths[1], '')])) self.assertEqual( fake_veths, self.DockerExecHelper.get_container_veths(fake_container_name)) self.DockerExecHelper.execute.assert_called_once_with( fake_container_name, ['bash', '-c', 'cat /sys/class/net/eth*/iflink']) self.DockerExecHelper._execute.assert_has_calls([ mock.call('bash', '-c', 'grep -l 10 /sys/class/net/veth*/ifindex'), mock.call('bash', '-c', 'grep -l 11 /sys/class/net/veth*/ifindex') ]) def test_get_network_bridge(self): fake_network_name = 'fake_network_name' fake_network_id = ('012345abcdef', '') fake_bridge = 'br-' + fake_network_id[0] self.mock_object(self.DockerExecHelper, '_inner_execute', mock.Mock(return_value=fake_network_id)) self.assertEqual( fake_bridge, self.DockerExecHelper.get_network_bridge(fake_network_name)) self.DockerExecHelper._inner_execute.assert_called_once_with([ 'docker', 'network', 'inspect', '-f', '{{.Id}}', fake_network_name ]) def test_get_network_bridge_failure(self): self.mock_object(self.DockerExecHelper, '_inner_execute', mock.Mock(side_effect=OSError())) self.assertRaises(exception.ShareBackendException, self.DockerExecHelper.get_network_bridge, None) def test_get_veth_from_bridge(self): fake_bridge = 'br-012345abcdef' fake_ip_link_show_master = fakes.FAKE_IP_LINK_SHOW_MASTER fake_veth = 'fake_veth' self.mock_object(self.DockerExecHelper, '_execute', mock.Mock(return_value=fake_ip_link_show_master)) self.assertEqual( fake_veth, self.DockerExecHelper.get_veth_from_bridge(fake_bridge)) self.DockerExecHelper._execute.assert_called_once_with('ip', 'link', 'show', 'master', fake_bridge) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/container/test_driver.py0000664000175000017500000011004500000000000025130 0ustar00zuulzuul00000000000000# Copyright 2016 Mirantis, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Unit tests for the Container driver module.""" import functools from unittest import mock import ddt from oslo_config import cfg from oslo_serialization import jsonutils from manila.common import constants as const from manila import context from manila import exception from manila.share import configuration from manila.share.drivers.container import driver from manila.share.drivers.container import protocol_helper from manila import test from manila.tests import db_utils from manila.tests import fake_utils from manila.tests.share.drivers.container import fakes as cont_fakes CONF = cfg.CONF CONF.import_opt('lvm_share_export_ips', 'manila.share.drivers.lvm') @ddt.ddt class ContainerShareDriverTestCase(test.TestCase): """Tests ContainerShareDriver""" def setUp(self): super(ContainerShareDriverTestCase, self).setUp() fake_utils.stub_out_utils_execute(self) self._context = context.get_admin_context() self._db = mock.Mock() self.fake_conf = configuration.Configuration(None) CONF.set_default('driver_handles_share_servers', True) self._driver = driver.ContainerShareDriver( configuration=self.fake_conf) self.share = cont_fakes.fake_share() self.access = cont_fakes.fake_access() self.server = { 'public_address': self.fake_conf.lvm_share_export_ips, 'instance_id': 'LVM', } # Used only to test compatibility with share manager self.share_server = "fake_share_server" def fake_exec_sync(self, *args, **kwargs): kwargs['execute_arguments'].append(args) try: ret_val = kwargs['ret_val'] except KeyError: ret_val = None return ret_val def test__get_helper_ok(self): share = cont_fakes.fake_share(share_proto='CIFS') expected = protocol_helper.DockerCIFSHelper(None) actual = self._driver._get_helper(share) self.assertEqual(type(expected), type(actual)) def test__get_helper_existing_ok(self): share = cont_fakes.fake_share(share_proto='CIFS') expected = protocol_helper.DockerCIFSHelper self._driver._helpers = {'CIFS': expected} actual = self._driver._get_helper(share) self.assertEqual(expected, type(actual)) def test__get_helper_not_ok(self): share = cont_fakes.fake_share() self.assertRaises(exception.InvalidShare, self._driver._get_helper, share) def test_update_share_stats(self): self.mock_object(self._driver.storage, 'get_share_server_pools', mock.Mock(return_value='test-pool')) self._driver._update_share_stats() self.assertEqual('Docker', self._driver._stats['share_backend_name']) self.assertEqual('CIFS', self._driver._stats['storage_protocol']) self.assertEqual(0, self._driver._stats['reserved_percentage']) self.assertEqual( 0, self._driver._stats['reserved_snapshot_percentage']) self.assertEqual( 0, self._driver._stats['reserved_share_extend_percentage']) self.assertIsNone(self._driver._stats['consistency_group_support']) self.assertEqual(False, self._driver._stats['snapshot_support']) self.assertEqual('ContainerShareDriver', self._driver._stats['driver_name']) self.assertEqual('test-pool', self._driver._stats['pools']) self.assertTrue(self._driver._stats['ipv4_support']) self.assertFalse(self._driver._stats['ipv6_support']) self.assertFalse(self._driver. _stats['mount_point_name_support']) def test_create_share(self): share_server = {'id': 'fake'} fake_container_name = 'manila_fake_container' mock_provide_storage = self.mock_object(self._driver.storage, 'provide_storage') mock_get_container_name = self.mock_object( self._driver, '_get_container_name', mock.Mock(return_value=fake_container_name)) mock_create_and_mount = self.mock_object( self._driver, '_create_export_and_mount_storage', mock.Mock(return_value='export_location')) self.assertEqual('export_location', self._driver.create_share(self._context, self.share, share_server)) mock_provide_storage.assert_called_once_with( self.share.share_id, self.share.size ) mock_create_and_mount.assert_called_once_with( self.share, fake_container_name, self.share.share_id ) mock_get_container_name.assert_called_once_with( share_server['id'] ) def test__create_export_and_mount_storage(self): helper = mock.Mock() server_id = 'fake_id' share_name = 'fake_name' mock_create_share = self.mock_object( helper, 'create_share', mock.Mock(return_value='export_location')) mock__get_helper = self.mock_object( self._driver, "_get_helper", mock.Mock(return_value=helper)) self.mock_object(self._driver.storage, "_get_lv_device", mock.Mock(return_value={})) mock_execute = self.mock_object(self._driver.container, 'execute') self.assertEqual('export_location', self._driver._create_export_and_mount_storage( self.share, server_id, share_name)) mock_create_share.assert_called_once_with(server_id) mock__get_helper.assert_called_once_with(self.share) mock_execute.assert_has_calls([ mock.call(server_id, ["mkdir", "-m", "750", "/shares/%s" % share_name]), mock.call(server_id, ["mount", {}, "/shares/%s" % share_name]) ]) def test__delete_export_and_umount_storage(self): helper = mock.Mock() server_id = 'fake_id' share_name = 'fake_name' mock__get_helper = self.mock_object( self._driver, "_get_helper", mock.Mock(return_value=helper)) mock_delete_share = self.mock_object(helper, 'delete_share') mock_execute = self.mock_object(self._driver.container, 'execute') self._driver._delete_export_and_umount_storage( self.share, server_id, share_name) mock__get_helper.assert_called_once_with(self.share) mock_delete_share.assert_called_once_with( server_id, share_name, ignore_errors=False) mock_execute.assert_has_calls([ mock.call(server_id, ["umount", "/shares/%s" % share_name], ignore_errors=False), mock.call(server_id, ["rm", "-fR", "/shares/%s" % share_name], ignore_errors=True)] ) def test_delete_share(self): fake_server_id = "manila_container_name" fake_share_name = "fake_share_name" fake_share_server = {'id': 'fake'} mock_get_container_name = self.mock_object( self._driver, '_get_container_name', mock.Mock(return_value=fake_server_id)) mock_get_share_name = self.mock_object( self._driver, '_get_share_name', mock.Mock(return_value=fake_share_name)) self.mock_object(self._driver.storage, 'remove_storage') mock_delete_and_umount = self.mock_object( self._driver, '_delete_export_and_umount_storage') self._driver.delete_share(self._context, self.share, fake_share_server) mock_get_container_name.assert_called_once_with( fake_share_server['id'] ) mock_get_share_name.assert_called_with( self.share ) mock_delete_and_umount.assert_called_once_with( self.share, fake_server_id, fake_share_name, ignore_errors=True ) @ddt.data(True, False) def test__get_share_name(self, has_export_location): if not has_export_location: fake_share = cont_fakes.fake_share_no_export_location() expected_result = fake_share.share_id else: fake_share = cont_fakes.fake_share() expected_result = fake_share['export_location'].split('/')[-1] result = self._driver._get_share_name(fake_share) self.assertEqual(expected_result, result) def test_extend_share(self): fake_new_size = 2 fake_share_server = {'id': 'fake-server'} share = cont_fakes.fake_share() share_name = self._driver._get_share_name(share) actual_arguments = [] expected_arguments = [ ('manila_fake_server', ['umount', '/shares/%s' % share_name]), ('manila_fake_server', ['mount', '/dev/manila_docker_volumes/%s' % share_name, '/shares/%s' % share_name]) ] mock_extend_share = self.mock_object(self._driver.storage, "extend_share") self._driver.container.execute = functools.partial( self.fake_exec_sync, execute_arguments=actual_arguments, ret_val='') self._driver.extend_share(share, fake_new_size, fake_share_server) self.assertEqual(expected_arguments, actual_arguments) mock_extend_share.assert_called_once_with(share_name, fake_new_size, fake_share_server) def test_ensure_share(self): # Does effectively nothing by design. self.assertEqual(1, 1) def test_update_access_access_rules_ok(self): helper = mock.Mock() fake_share_name = self._driver._get_share_name(self.share) self.mock_object(self._driver, "_get_helper", mock.Mock(return_value=helper)) self._driver.update_access(self._context, self.share, [{'access_level': const.ACCESS_LEVEL_RW}], [], [], [], {"id": "fake"}) helper.update_access.assert_called_with('manila_fake', fake_share_name, [{'access_level': 'rw'}], [], []) def test_get_network_allocation_numer(self): # Does effectively nothing by design. self.assertEqual(1, self._driver.get_network_allocations_number()) def test__get_container_name(self): self.assertEqual("manila_fake_server", self._driver._get_container_name("fake-server")) def test_do_setup(self): # Does effectively nothing by design. self.assertEqual(1, 1) def test_check_for_setup_error_host_not_ok_class_ok(self): setattr(self._driver.configuration.local_conf, 'neutron_host_id', None) self.assertRaises(exception.ManilaException, self._driver.check_for_setup_error) def test_check_for_setup_error_host_not_ok_class_some_other(self): setattr(self._driver.configuration.local_conf, 'neutron_host_id', None) setattr(self._driver.configuration.local_conf, 'network_api_class', 'manila.share.drivers.container.driver.ContainerShareDriver') self.mock_object(driver.LOG, "warning") self._driver.check_for_setup_error() setattr(self._driver.configuration.local_conf, 'network_api_class', 'manila.network.neutron.neutron_network_plugin.' 'NeutronNetworkPlugin') self.assertTrue(driver.LOG.warning.called) def test__connect_to_network(self): network_info = cont_fakes.fake_network()[0] helper = mock.Mock() self.mock_object(self._driver, "_execute", mock.Mock(return_value=helper)) self.mock_object(self._driver.container, "execute") self._driver._connect_to_network("fake-server", network_info, "fake-veth", "fake-host-bridge", "fake0") @ddt.data({'veth': ["fake_veth"], 'exception': None}, {'veth': ["fake_veth"], 'exception': exception.ProcessExecutionError('fake')}, {'veth': ["fake_veth"], 'exception': None}) @ddt.unpack def test__teardown_server(self, veth, exception): fake_server_details = {"id": "b5afb5c1-6011-43c4-8a37-29820e6951a7"} fake_networks = ["fake_docker_network_0"] container_name = self._driver._get_container_name( fake_server_details['id']) mock_stop_container = self.mock_object( self._driver.container, "stop_container") mock_get_container_veths = self.mock_object( self._driver.container, "get_container_veths", mock.Mock(return_value=veth)) mock_get_container_networks = self.mock_object( self._driver.container, "get_container_networks", mock.Mock(return_value=fake_networks)) mock_execute = self.mock_object(self._driver, "_execute", mock.Mock(side_effect=exception)) self._driver._teardown_server( server_details=fake_server_details) mock_stop_container.assert_called_once_with( container_name ) mock_get_container_veths.assert_called_once_with( container_name ) mock_get_container_networks.assert_called_once_with( container_name ) if exception is None and veth is not None: mock_execute.assert_called_once_with( "ovs-vsctl", "--", "del-port", self._driver.configuration.container_ovs_bridge_name, veth[0], run_as_root=True) def test__setup_server_network(self): fake_server_id = 'fake_container_id' fake_network_info = cont_fakes.fake_network() fake_existing_interfaces = [] fake_bridge = 'br-012345abcdef' fake_veth = 'fake_veth' self.mock_object(self._driver.container, 'fetch_container_interfaces', mock.Mock(return_value=fake_existing_interfaces)) self.mock_object(driver.uuidutils, 'generate_uuid', mock.Mock(return_value='fakeuuid')) self.mock_object(self._driver.container, 'create_network') self.mock_object(self._driver.container, 'connect_network') self.mock_object(self._driver.container, 'get_network_bridge', mock.Mock(return_value=fake_bridge)) self.mock_object(self._driver.container, 'get_veth_from_bridge', mock.Mock(return_value=fake_veth)) self.mock_object(self._driver, '_connect_to_network') self._driver._setup_server_network(fake_server_id, fake_network_info) (self._driver.container.fetch_container_interfaces .assert_called_once_with(fake_server_id)) self._driver.container.create_network.assert_called_with( 'manila-docker-network-fakeuuid') self._driver.container.connect_network.assert_called_with( 'manila-docker-network-fakeuuid', fake_server_id) self._driver.container.get_network_bridge.assert_called_with( 'manila-docker-network-fakeuuid') self._driver.container.get_veth_from_bridge.assert_called_with( fake_bridge) self._driver._connect_to_network.assert_called_with( fake_server_id, fake_network_info[0], fake_veth, fake_bridge, 'eth0') def test__setup_server_network_existing_interfaces(self): fake_server_id = 'fake_container_id' fake_network_info = cont_fakes.fake_network() fake_existing_interfaces = cont_fakes.FAKE_IP_LINK_SHOW fake_bridge = 'br-012345abcdef' fake_veth = 'fake_veth' self.mock_object(self._driver.container, 'fetch_container_interfaces', mock.Mock(return_value=fake_existing_interfaces)) self.mock_object(driver.uuidutils, 'generate_uuid', mock.Mock(return_value='fakeuuid')) self.mock_object(self._driver.container, 'create_network') self.mock_object(self._driver.container, 'connect_network') self.mock_object(self._driver.container, 'get_network_bridge', mock.Mock(return_value=fake_bridge)) self.mock_object(self._driver.container, 'get_veth_from_bridge', mock.Mock(return_value=fake_veth)) self.mock_object(self._driver, '_connect_to_network') self._driver._setup_server_network(fake_server_id, fake_network_info) (self._driver.container.fetch_container_interfaces .assert_called_once_with(fake_server_id)) self._driver.container.create_network.assert_called_with( 'manila-docker-network-fakeuuid') self._driver.container.connect_network.assert_called_with( 'manila-docker-network-fakeuuid', fake_server_id) self._driver.container.get_network_bridge.assert_called_with( 'manila-docker-network-fakeuuid') self._driver.container.get_veth_from_bridge.assert_called_with( fake_bridge) self._driver._connect_to_network.assert_called_with( fake_server_id, fake_network_info[0], fake_veth, fake_bridge, 'eth2') def test__setup_server_container_fails(self): network_info = cont_fakes.fake_network() self.mock_object(self._driver.container, 'start_container') self._driver.container.start_container.side_effect = KeyError() self.assertRaises(exception.ManilaException, self._driver._setup_server, network_info) def test__setup_server_ok(self): fake_network_info = cont_fakes.fake_network() self.mock_object(self._driver, '_get_container_name', mock.Mock(return_value='fake_server_id')) self.mock_object(self._driver.container, 'create_container') self.mock_object(self._driver.container, 'start_container') self.mock_object(self._driver, '_setup_server_network') self.assertEqual(fake_network_info[0]['server_id'], self._driver._setup_server(fake_network_info)['id']) self._driver._get_container_name.assert_called_once_with( fake_network_info[0]['server_id']) self._driver.container.create_container.assert_called_once_with( 'fake_server_id') self._driver.container.start_container.assert_called_once_with( 'fake_server_id') self._driver._setup_server_network.assert_called_once_with( fake_network_info[0]['server_id'], fake_network_info) def test__setup_server_security_services(self): fake_network_info = cont_fakes.fake_network_with_security_services() self.mock_object(self._driver, '_get_container_name') self.mock_object(self._driver.container, 'create_container') self.mock_object(self._driver.container, 'start_container') self.mock_object(self._driver, '_setup_server_network') self.mock_object(self._driver, 'setup_security_services') self._driver._setup_server(fake_network_info) self._driver.setup_security_services.assert_called_once() def test_manage_existing(self): fake_container_name = "manila_fake_container" fake_export_location = 'export_location' expected_result = { 'size': 1, 'export_locations': fake_export_location } fake_share_server = cont_fakes.fake_share() fake_share_name = self._driver._get_share_name(self.share) mock_get_container_name = self.mock_object( self._driver, '_get_container_name', mock.Mock(return_value=fake_container_name)) mock_get_share_name = self.mock_object( self._driver, '_get_share_name', mock.Mock(return_value=fake_share_name)) mock_rename_storage = self.mock_object( self._driver.storage, 'rename_storage') mock_get_size = self.mock_object( self._driver.storage, 'get_size', mock.Mock(return_value=1)) mock_delete_and_umount = self.mock_object( self._driver, '_delete_export_and_umount_storage') mock_create_and_mount = self.mock_object( self._driver, '_create_export_and_mount_storage', mock.Mock(return_value=fake_export_location) ) result = self._driver.manage_existing_with_server( self.share, {}, fake_share_server) mock_rename_storage.assert_called_once_with( fake_share_name, self.share.share_id ) mock_get_size.assert_called_once_with( fake_share_name ) mock_delete_and_umount.assert_called_once_with( self.share, fake_container_name, fake_share_name ) mock_create_and_mount.assert_called_once_with( self.share, fake_container_name, self.share.share_id ) mock_get_container_name.assert_called_once_with( fake_share_server['id'] ) mock_get_share_name.assert_called_with( self.share ) self.assertEqual(expected_result, result) def test_manage_existing_no_share_server(self): self.assertRaises(exception.ShareBackendException, self._driver.manage_existing_with_server, self.share, {}) def test_unmanage(self): self.assertIsNone(self._driver.unmanage_with_server(self.share)) def test_get_share_server_network_info(self): fake_share_server = cont_fakes.fake_share_server() fake_id = cont_fakes.fake_identifier() expected_result = ['veth11b2c34'] self.mock_object(self._driver, '_get_correct_container_old_name', mock.Mock(return_value=fake_id)) self.mock_object(self._driver.container, 'fetch_container_addresses', mock.Mock(return_value=expected_result)) result = self._driver.get_share_server_network_info(self._context, fake_share_server, fake_id, {}) self.assertEqual(expected_result, result) def test_manage_server(self): fake_id = cont_fakes.fake_identifier() fake_share_server = cont_fakes.fake_share_server() fake_container_name = "manila_fake_container" fake_container_old_name = "fake_old_name" mock_get_container_name = self.mock_object( self._driver, '_get_container_name', mock.Mock(return_value=fake_container_name)) mock_get_correct_container_old_name = self.mock_object( self._driver, '_get_correct_container_old_name', mock.Mock(return_value=fake_container_old_name) ) mock_rename_container = self.mock_object(self._driver.container, 'rename_container') expected_result = {'id': fake_share_server['id']} new_identifier, new_backend_details = self._driver.manage_server( self._context, fake_share_server, fake_id, {}) self.assertEqual(expected_result, new_backend_details) self.assertEqual(fake_container_name, new_identifier) mock_rename_container.assert_called_once_with( fake_container_old_name, fake_container_name) mock_get_container_name.assert_called_with( fake_share_server['id'] ) mock_get_correct_container_old_name.assert_called_once_with( fake_id ) @ddt.data(True, False) def test__get_correct_container_old_name(self, container_exists): expected_name = 'fake-name' fake_name = 'fake-name' mock_container_exists = self.mock_object( self._driver.container, 'container_exists', mock.Mock(return_value=container_exists)) if not container_exists: expected_name = 'manila_fake_name' result = self._driver._get_correct_container_old_name(fake_name) self.assertEqual(expected_name, result) mock_container_exists.assert_called_once_with( fake_name ) def test_migration_complete(self): share_server = {'id': 'fakeid'} fake_container_name = 'manila_fake_container' new_export_location = 'new_export_location' mock_migraton_storage = self.mock_object(self._driver.storage, 'migration_complete') mock_get_container_name = self.mock_object( self._driver, '_get_container_name', mock.Mock(return_value=fake_container_name)) mock_mount = self.mock_object( self._driver, '_mount_storage', mock.Mock(return_value=new_export_location)) mock_umount = self.mock_object(self._driver, '_umount_storage') expected_location = {'export_locations': new_export_location} self.assertEqual(expected_location, self._driver.migration_complete( self._context, self.share, self.share, None, None, share_server, share_server)) mock_migraton_storage.assert_called_once_with( self._context, self.share, self.share, None, None, destination_share_server=share_server, share_server=share_server ) mock_mount.assert_called_once_with( self.share, fake_container_name, self.share.share_id ) mock_umount.assert_called_once_with( self.share, fake_container_name, self.share.share_id ) mock_get_container_name.assert_called_with( share_server['id'] ) def test_share_server_migration_complete(self): source_server = {'id': 'source_fake_id', 'host': 'host@back1'} dest_server = {'id': 'dest_fake_id', 'host': 'host@back2'} fake_container_name = 'manila_fake_container' new_export_location = 'new_export_location' fake_pool_name = 'fake_vg' shares_list = [self.share, self.share] mock_get_container_name = self.mock_object( self._driver, '_get_container_name', mock.Mock(return_value=fake_container_name)) mock_umount = self.mock_object(self._driver, '_umount_storage') mock_migraton_storage = self.mock_object( self._driver.storage, 'share_server_migration_complete') mock_mount = self.mock_object( self._driver, '_mount_storage', mock.Mock(return_value=new_export_location)) mock_get_pool = self.mock_object( self._driver.storage, 'get_share_pool_name', mock.Mock(return_value=fake_pool_name)) share_updates = {} for fake_share in shares_list: share_updates[fake_share['id']] = { 'export_locations': new_export_location, 'pool_name': fake_pool_name, } expected_result = { 'share_updates': share_updates, } self.assertDictEqual(expected_result, self._driver.share_server_migration_complete( self._context, source_server, dest_server, shares_list, None, None)) mock_migraton_storage.assert_called_once_with( self._context, source_server, dest_server, shares_list, None, None) # assert shares for fake_share in shares_list: mock_get_pool.assert_any_call(fake_share['share_id']) mock_umount.assert_any_call(fake_share, fake_container_name, fake_share.share_id) mock_mount.assert_any_call(fake_share, fake_container_name, fake_share.share_id) mock_get_container_name.assert_any_call(source_server['id']) mock_get_container_name.assert_any_call(dest_server['id']) def test__get_different_security_service_keys(self): sec_service_keys = ['dns_ip', 'server', 'domain', 'user', 'password', 'ou'] current_security_service = {} [current_security_service.update({key: key + '_1'}) for key in sec_service_keys] new_security_service = {} [new_security_service.update({key: key + '_2'}) for key in sec_service_keys] db_utils.create_security_service(**current_security_service) db_utils.create_security_service(**new_security_service) different_keys = self._driver._get_different_security_service_keys( current_security_service, new_security_service) [self.assertIn(key, different_keys) for key in sec_service_keys] @ddt.data( (['dns_ip', 'server', 'domain', 'user', 'password', 'ou'], False), (['user', 'password'], True) ) @ddt.unpack def test__check_if_all_fields_are_updatable(self, keys, expected_result): current_security_service = db_utils.create_security_service() new_security_service = db_utils.create_security_service() mock_get_keys = self.mock_object( self._driver, '_get_different_security_service_keys', mock.Mock(return_value=keys)) result = self._driver._check_if_all_fields_are_updatable( current_security_service, new_security_service) self.assertEqual(expected_result, result) mock_get_keys.assert_called_once_with( current_security_service, new_security_service ) @ddt.data(True, False) def test_update_share_server_security_service( self, with_current_service): new_security_service = db_utils.create_security_service() current_security_service = ( db_utils.create_security_service() if with_current_service else None) share_server = db_utils.create_share_server() fake_container_name = 'fake_name' network_info = {} share_instances = [] share_instance_access_rules = [] mock_check_update = self.mock_object( self._driver, 'check_update_share_server_security_service', mock.Mock(return_value=True)) mock_get_container_name = self.mock_object( self._driver, '_get_container_name', mock.Mock(return_value=fake_container_name)) mock_setup = self.mock_object(self._driver, 'setup_security_services') mock_update_sec_service = self.mock_object( self._driver.security_service_helper, 'update_security_service') self._driver.update_share_server_security_service( self._context, share_server, network_info, share_instances, share_instance_access_rules, new_security_service, current_security_service=current_security_service) mock_check_update.assert_called_once_with( self._context, share_server, network_info, share_instances, share_instance_access_rules, new_security_service, current_security_service=current_security_service ) mock_get_container_name.assert_called_once_with(share_server['id']) if with_current_service: mock_update_sec_service.assert_called_once_with( fake_container_name, current_security_service, new_security_service) else: mock_setup.assert_called_once_with( fake_container_name, [new_security_service]) def test_update_share_server_security_service_not_supported(self): new_security_service = db_utils.create_security_service() current_security_service = db_utils.create_security_service() share_server = db_utils.create_share_server() share_instances = [] share_instance_access_rules = [] network_info = {} mock_check_update = self.mock_object( self._driver, 'check_update_share_server_security_service', mock.Mock(return_value=False)) self.assertRaises( exception.ManilaException, self._driver.update_share_server_security_service, self._context, share_server, network_info, share_instances, share_instance_access_rules, new_security_service, current_security_service=current_security_service) mock_check_update.assert_called_once_with( self._context, share_server, network_info, share_instances, share_instance_access_rules, new_security_service, current_security_service=current_security_service) def test__form_share_server_update_return(self): fake_share_server = cont_fakes.fake_share_server() fake_current_network_allocations = ( cont_fakes.fake_current_network_allocations()) fake_new_network_allocations = ( cont_fakes.fake_new_network_allocations()) fake_share_instances = cont_fakes.fake_share_instances() fake_server_id = 'fake_container_id' fake_addresses = ['192.168.144.100', '10.0.0.100'] fake_subnet_allocations = { 'fake_id_current': '192.168.144.100', 'fake_id_new': '10.0.0.100' } fake_share_updates = { 'fakeid': [ { 'is_admin_only': False, 'path': '//%s/fakeshareid' % fake_addresses[0], 'preferred': False }, { 'is_admin_only': False, 'path': '//%s/fakeshareid' % fake_addresses[1], 'preferred': False } ] } fake_server_details = { 'subnet_allocations': jsonutils.dumps(fake_subnet_allocations) } fake_return = { 'share_updates': fake_share_updates, 'server_details': fake_server_details } self.mock_object(self._driver, '_get_container_name', mock.Mock(return_value=fake_server_id)) self.mock_object(self._driver.container, 'fetch_container_addresses', mock.Mock(return_value=fake_addresses)) self.assertEqual( fake_return, self._driver._form_share_server_update_return( fake_share_server, fake_current_network_allocations, fake_new_network_allocations, fake_share_instances)) self._driver._get_container_name.assert_called_once_with( fake_share_server['id']) (self._driver.container.fetch_container_addresses .assert_called_once_with(fake_server_id, 'inet')) def test_check_update_share_server_network_allocations(self): fake_share_server = cont_fakes.fake_share_server() self.mock_object(driver.LOG, 'debug') self.assertTrue( self._driver.check_update_share_server_network_allocations( None, fake_share_server, None, None, None, None, None)) self.assertTrue(driver.LOG.debug.called) def test_update_share_server_network_allocations(self): fake_share_server = cont_fakes.fake_share_server() fake_server_id = 'fake_container_id' fake_return = 'fake_return' self.mock_object(self._driver, '_get_container_name', mock.Mock(return_value=fake_server_id)) self.mock_object(self._driver, '_setup_server_network') self.mock_object(self._driver, '_form_share_server_update_return', mock.Mock(return_value=fake_return)) self.assertEqual(fake_return, self._driver.update_share_server_network_allocations( None, fake_share_server, None, None, None, None, None)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/container/test_protocol_helper.py0000664000175000017500000003107600000000000027043 0ustar00zuulzuul00000000000000# Copyright 2016 Mirantis, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Unit tests for the Protocol helper module.""" import functools from unittest import mock import ddt from manila.common import constants as const from manila import exception from manila.share.drivers.container import protocol_helper from manila import test from manila.tests.share.drivers.container.fakes import fake_share @ddt.ddt class DockerCIFSHelperTestCase(test.TestCase): """Tests ContainerShareDriver""" def setUp(self): super(DockerCIFSHelperTestCase, self).setUp() self._helper = mock.Mock() self.fake_conf = mock.Mock() self.fake_conf.container_cifs_guest_ok = "yes" self.DockerCIFSHelper = protocol_helper.DockerCIFSHelper( self._helper, share=fake_share(), config=self.fake_conf) def fake_exec_sync(self, *args, **kwargs): kwargs["execute_arguments"].append(args) try: ret_val = kwargs["ret_val"] except KeyError: ret_val = None return [ret_val] def test_create_share_guest_ok(self): expected_arguments = [ ("fakeserver", ["net", "conf", "addshare", "fakeshareid", "/shares/fakeshareid", "writeable=y", "guest_ok=y"]), ("fakeserver", ["net", "conf", "setparm", "fakeshareid", "browseable", "yes"]), ("fakeserver", ["net", "conf", "setparm", "fakeshareid", "hosts allow", "127.0.0.1"]), ("fakeserver", ["net", "conf", "setparm", "fakeshareid", "read only", "no"]), ("fakeserver", ["net", "conf", "setparm", "fakeshareid", "hosts deny", "0.0.0.0/0"]), ("fakeserver", ["net", "conf", "setparm", "fakeshareid", "create mask", "0755"])] actual_arguments = [] self._helper.execute = functools.partial( self.fake_exec_sync, execute_arguments=actual_arguments, ret_val=" fake 192.0.2.2/24 more fake \n" * 20) self.DockerCIFSHelper.share = fake_share() self.mock_object(self.DockerCIFSHelper.container, 'fetch_container_addresses', mock.Mock(return_value=['192.0.2.2'])) self.DockerCIFSHelper.create_share("fakeserver") self.assertEqual(expected_arguments.sort(), actual_arguments.sort()) def test_create_share_guest_not_ok(self): self.DockerCIFSHelper.conf = mock.Mock() self.DockerCIFSHelper.conf.container_cifs_guest_ok = False expected_arguments = [ ("fakeserver", ["net", "conf", "addshare", "fakeshareid", "/shares/fakeshareid", "writeable=y", "guest_ok=n"]), ("fakeserver", ["net", "conf", "setparm", "fakeshareid", "browseable", "yes"]), ("fakeserver", ["net", "conf", "setparm", "fakeshareid", "hosts allow", "192.0.2.2"]), ("fakeserver", ["net", "conf", "setparm", "fakeshareid", "read only", "no"]), ("fakeserver", ["net", "conf", "setparm", "fakeshareid", "hosts deny", "0.0.0.0/0"]), ("fakeserver", ["net", "conf", "setparm", "fakeshareid", "create mask", "0755"])] actual_arguments = [] self._helper.execute = functools.partial( self.fake_exec_sync, execute_arguments=actual_arguments, ret_val=" fake 192.0.2.2/24 more fake \n" * 20) self.DockerCIFSHelper.share = fake_share() self.mock_object(self.DockerCIFSHelper.container, 'fetch_container_addresses', mock.Mock(return_value=['192.0.2.2'])) self.DockerCIFSHelper.create_share("fakeserver") self.assertEqual(expected_arguments.sort(), actual_arguments.sort()) def test_delete_share(self): self.DockerCIFSHelper.share = fake_share() self.DockerCIFSHelper.delete_share("fakeserver", "fakeshareid") self.DockerCIFSHelper.container.execute.assert_called_with( "fakeserver", ["net", "conf", "delshare", "fakeshareid"], ignore_errors=False) def test__get_access_group_ro(self): result = self.DockerCIFSHelper._get_access_group(const.ACCESS_LEVEL_RO) self.assertEqual("read list", result) def test__get_access_group_rw(self): result = self.DockerCIFSHelper._get_access_group(const.ACCESS_LEVEL_RW) self.assertEqual("valid users", result) def test__get_access_group_other(self): self.assertRaises(exception.InvalidShareAccessLevel, self.DockerCIFSHelper._get_access_group, "fake_level") def test__get_existing_users(self): self.DockerCIFSHelper.container.execute = mock.Mock( return_value=("fake_user", "")) result = self.DockerCIFSHelper._get_existing_users("fake_server_id", "fake_share", "fake_access") self.assertEqual("fake_user", result) self.DockerCIFSHelper.container.execute.assert_called_once_with( "fake_server_id", ["net", "conf", "getparm", "fake_share", "fake_access"], ignore_errors=True) def test__set_users(self): self.DockerCIFSHelper.container.execute = mock.Mock() self.DockerCIFSHelper._set_users("fake_server_id", "fake_share", "fake_access", "fake_user") self.DockerCIFSHelper.container.execute.assert_called_once_with( "fake_server_id", ["net", "conf", "setparm", "fake_share", "fake_access", "fake_user"]) def test__allow_access_ok(self): self.DockerCIFSHelper._get_access_group = mock.Mock( return_value="valid users") self.DockerCIFSHelper._get_existing_users = mock.Mock( return_value="fake_user") self.DockerCIFSHelper._set_users = mock.Mock() self.DockerCIFSHelper._allow_access("fake_share", "fake_server_id", "fake_user2", "rw") self.DockerCIFSHelper._get_access_group.assert_called_once_with("rw") self.DockerCIFSHelper._get_existing_users.assert_called_once_with( "fake_server_id", "fake_share", "valid users") self.DockerCIFSHelper._set_users.assert_called_once_with( "fake_server_id", "fake_share", "valid users", "fake_user fake_user2") def test__allow_access_not_ok(self): self.DockerCIFSHelper._get_access_group = mock.Mock( return_value="valid users") self.DockerCIFSHelper._get_existing_users = mock.Mock() self.DockerCIFSHelper._get_existing_users.side_effect = TypeError self.DockerCIFSHelper._set_users = mock.Mock() self.DockerCIFSHelper._allow_access("fake_share", "fake_server_id", "fake_user2", "rw") self.DockerCIFSHelper._get_access_group.assert_called_once_with("rw") self.DockerCIFSHelper._get_existing_users.assert_called_once_with( "fake_server_id", "fake_share", "valid users") self.DockerCIFSHelper._set_users.assert_called_once_with( "fake_server_id", "fake_share", "valid users", "fake_user2") def test__deny_access_ok(self): self.DockerCIFSHelper._get_access_group = mock.Mock( return_value="valid users") self.DockerCIFSHelper._get_existing_users = mock.Mock( return_value="fake_user fake_user2") self.DockerCIFSHelper._set_users = mock.Mock() self.DockerCIFSHelper._deny_access("fake_share", "fake_server_id", "fake_user2", "rw") self.DockerCIFSHelper._get_access_group.assert_called_once_with("rw") self.DockerCIFSHelper._get_existing_users.assert_called_once_with( "fake_server_id", "fake_share", "valid users") self.DockerCIFSHelper._set_users.assert_called_once_with( "fake_server_id", "fake_share", "valid users", "fake_user") def test__deny_access_ok_so_many_users(self): self.DockerCIFSHelper._get_access_group = mock.Mock( return_value="valid users") self.DockerCIFSHelper._get_existing_users = mock.Mock( return_value="joost jaap huub dirk") self.DockerCIFSHelper._set_users = mock.Mock() # Sorry, Jaap. self.DockerCIFSHelper._deny_access("fake_share", "fake_server_id", "jaap", "rw") self.DockerCIFSHelper._get_access_group.assert_called_once_with("rw") self.DockerCIFSHelper._get_existing_users.assert_called_once_with( "fake_server_id", "fake_share", "valid users") self.DockerCIFSHelper._set_users.assert_called_once_with( "fake_server_id", "fake_share", "valid users", "dirk huub joost") def test__deny_access_not_ok(self): self.DockerCIFSHelper._get_access_group = mock.Mock( return_value="valid users") self.DockerCIFSHelper._get_existing_users = mock.Mock() self.DockerCIFSHelper._get_existing_users.side_effect = TypeError self.DockerCIFSHelper._set_users = mock.Mock() self.mock_object(protocol_helper.LOG, "warning") self.DockerCIFSHelper._deny_access("fake_share", "fake_server_id", "fake_user2", "rw") self.DockerCIFSHelper._get_access_group.assert_called_once_with("rw") self.DockerCIFSHelper._get_existing_users.assert_called_once_with( "fake_server_id", "fake_share", "valid users") self.assertFalse(self.DockerCIFSHelper._set_users.called) self.assertTrue(protocol_helper.LOG.warning.called) def test_update_access_access_rules_wrong_type(self): allow_rules = [{ "access_to": "192.0.2.2", "access_level": "ro", "access_type": "fake" }] self.mock_object(self.DockerCIFSHelper, "_allow_access") self.assertRaises(exception.InvalidShareAccess, self.DockerCIFSHelper.update_access, "fakeserver", "fakeshareid", allow_rules, [], []) def test_update_access_access_rules_ok(self): access_rules = [{ "access_to": "fakeuser", "access_level": "ro", "access_type": "user" }] self.mock_object(self.DockerCIFSHelper, "_allow_access") self.DockerCIFSHelper.container.execute = mock.Mock() self.DockerCIFSHelper.update_access("fakeserver", "fakeshareid", access_rules, [], []) self.DockerCIFSHelper._allow_access.assert_called_once_with( "fakeshareid", "fakeserver", "fakeuser", "ro") self.DockerCIFSHelper.container.execute.assert_called_once_with( "fakeserver", ["net", "conf", "setparm", "fakeshareid", "valid users", ""]) def test_update_access_add_rules(self): add_rules = [{ "access_to": "fakeuser", "access_level": "ro", "access_type": "user" }] self.mock_object(self.DockerCIFSHelper, "_allow_access") self.DockerCIFSHelper.update_access("fakeserver", "fakeshareid", [], add_rules, []) self.DockerCIFSHelper._allow_access.assert_called_once_with( "fakeshareid", "fakeserver", "fakeuser", "ro") def test_update_access_delete_rules(self): delete_rules = [{ "access_to": "fakeuser", "access_level": "ro", "access_type": "user" }] self.mock_object(self.DockerCIFSHelper, "_deny_access") self.DockerCIFSHelper.update_access("fakeserver", "fakeshareid", [], [], delete_rules) self.DockerCIFSHelper._deny_access.assert_called_once_with( "fakeshareid", "fakeserver", "fakeuser", "ro") ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/container/test_security_service_helper.py0000664000175000017500000001442700000000000030572 0ustar00zuulzuul00000000000000# Copyright (c) 2021 NetApp, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Unit tests for the Security Service helper module.""" from unittest import mock import ddt from manila import exception from manila.share import configuration from manila.share.drivers.container import security_service_helper from manila import test from manila.tests import db_utils INVALID_CREDENTIALS_EXIT_CODE = 49 @ddt.ddt class SecurityServiceHelperTestCase(test.TestCase): """Tests DockerExecHelper""" def setUp(self): super(SecurityServiceHelperTestCase, self).setUp() self.fake_conf = configuration.Configuration(None) self.fake_conf.container_image_name = "fake_image" self.fake_conf.container_volume_mount_path = "/tmp/shares" self.security_service_helper = ( security_service_helper.SecurityServiceHelper( configuration=self.fake_conf)) def test_setup_security_service(self): share_server = db_utils.create_share_server() security_service = db_utils.create_security_service() mock_ldap_bind = self.mock_object( self.security_service_helper, 'ldap_bind') self.security_service_helper.setup_security_service( share_server['id'], security_service) mock_ldap_bind.assert_called_once_with( share_server['id'], security_service) def test_update_security_service(self): share_server = db_utils.create_share_server() current_security_service = db_utils.create_security_service() new_security_service = db_utils.create_security_service() mock_ldap_bind = self.mock_object( self.security_service_helper, 'ldap_bind') self.security_service_helper.update_security_service( share_server['id'], current_security_service, new_security_service) mock_ldap_bind.assert_called_once_with( share_server['id'], new_security_service) def _setup_test_ldap_bind_tests(self): share_server = db_utils.create_security_service() security_service = db_utils.create_security_service() ldap_get_info = { 'ss_password': security_service['password'], 'ss_user': security_service['user'] } expected_cmd = [ "docker", "exec", "%s" % share_server['id'], "ldapwhoami", "-x", "-H", "ldap://localhost:389", "-D", "cn=%s,dc=example,dc=com" % ldap_get_info[ "ss_user"], "-w", "%s" % ldap_get_info["ss_password"]] return share_server, security_service, ldap_get_info, expected_cmd def test_ldap_bind(self): share_server, security_service, ldap_get_info, expected_cmd = ( self._setup_test_ldap_bind_tests()) mock_ldap_get_info = self.mock_object( self.security_service_helper, 'ldap_get_info', mock.Mock(return_value=ldap_get_info)) mock_ldap_retry_operation = self.mock_object( self.security_service_helper, 'ldap_retry_operation') self.security_service_helper.ldap_bind( share_server['id'], security_service) mock_ldap_get_info.assert_called_once_with(security_service) mock_ldap_retry_operation.assert_called_once_with(expected_cmd, run_as_root=True) def test_ldap_get_info(self): security_service = db_utils.create_security_service() expected_ldap_get_info = { 'ss_password': security_service['password'], 'ss_user': security_service['user'] } ldap_get_info = self.security_service_helper.ldap_get_info( security_service) self.assertEqual(expected_ldap_get_info, ldap_get_info) @ddt.data( {'type': 'ldap'}, {'user': 'fake_user'}, {'password': 'fake_password'}, ) def test_ldap_get_info_exception(self, sec_service_data): self.assertRaises( exception.ShareBackendException, self.security_service_helper.ldap_get_info, sec_service_data ) def test_ldap_retry_operation(self): mock_cmd = ["command", "to", "be", "executed"] mock_execute = self.mock_object(self.security_service_helper, '_execute') self.security_service_helper.ldap_retry_operation(mock_cmd, run_as_root=True) mock_execute.assert_called_once_with(*mock_cmd, run_as_root=True) def test_ldap_retry_operation_timeout(self): mock_cmd = ["command", "to", "be", "executed"] mock_execute = self.mock_object( self.security_service_helper, '_execute', mock.Mock( side_effect=exception.ProcessExecutionError(exit_code=1))) self.assertRaises( exception.ShareBackendException, self.security_service_helper.ldap_retry_operation, mock_cmd, run_as_root=False, timeout=10) mock_execute.assert_has_calls([ mock.call(*mock_cmd, run_as_root=False), mock.call(*mock_cmd, run_as_root=False)]) def test_ldap_retry_operation_invalid_credential(self): mock_cmd = ["command", "to", "be", "executed"] mock_execute = self.mock_object( self.security_service_helper, '_execute', mock.Mock( side_effect=exception.ProcessExecutionError( exit_code=49))) self.assertRaises( exception.ShareBackendException, self.security_service_helper.ldap_retry_operation, mock_cmd, run_as_root=False) mock_execute.assert_called_once_with(*mock_cmd, run_as_root=False) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/container/test_storage_helper.py0000664000175000017500000003204300000000000026641 0ustar00zuulzuul00000000000000# Copyright 2016 Mirantis, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Unit tests for the Storage helper module.""" import functools from unittest import mock import ddt from manila import exception from manila.share import configuration from manila.share.drivers.container import storage_helper from manila import test from manila.tests import fake_share as base_fake_share from manila.tests.share.drivers.container.fakes import fake_share @ddt.ddt class LVMHelperTestCase(test.TestCase): """Tests ContainerShareDriver""" def setUp(self): super(LVMHelperTestCase, self).setUp() self.share = fake_share() self.fake_conf = configuration.Configuration(None) self.fake_conf.container_volume_mount_path = "/tmp/shares" self.LVMHelper = storage_helper.LVMHelper(configuration=self.fake_conf) self.context = mock.Mock() def fake_exec_sync(self, *args, **kwargs): kwargs['execute_arguments'].append(args) try: ret_val = kwargs['ret_val'] except KeyError: ret_val = None return ret_val def test_lvmhelper_setup_explodes_in_gore_on_no_config_supplied(self): self.assertRaises(exception.ManilaException, storage_helper.LVMHelper, None) @ddt.data("62.50g 72.50g", " 72.50g 62.50g\n", " <62.50g <72.50g\n") def test_get_share_server_pools(self, ret_vgs): expected_result = [{'reserved_percentage': 0, 'reserved_snapshot_percentage': 0, 'reserved_share_extend_percentage': 0, 'pool_name': 'manila_docker_volumes', 'total_capacity_gb': 72.5, 'free_capacity_gb': 62.5}] self.mock_object(self.LVMHelper, "_execute", mock.Mock(return_value=(ret_vgs, 0))) result = self.LVMHelper.get_share_server_pools() self.assertEqual(expected_result, result) def test__get_lv_device(self): fake_share_name = 'fakeshareid' self.assertEqual("/dev/manila_docker_volumes/%s" % fake_share_name, self.LVMHelper._get_lv_device(fake_share_name)) def test__get_lv_folder(self): fake_share_name = 'fakeshareid' self.assertEqual("/tmp/shares/%s" % fake_share_name, self.LVMHelper._get_lv_folder(fake_share_name)) def test_provide_storage(self): actual_arguments = [] fake_share_name = 'fakeshareid' expected_arguments = [ ('lvcreate', '-p', 'rw', '-L', '1G', '-n', 'fakeshareid', 'manila_docker_volumes'), ('mkfs.ext4', '/dev/manila_docker_volumes/fakeshareid'), ] self.LVMHelper._execute = functools.partial( self.fake_exec_sync, execute_arguments=actual_arguments, ret_val='') self.LVMHelper.provide_storage(fake_share_name, 1) self.assertEqual(expected_arguments, actual_arguments) @ddt.data(None, exception.ProcessExecutionError) def test__try_to_unmount_device(self, side_effect): device = {} mock_warning = self.mock_object(storage_helper.LOG, 'warning') mock_execute = self.mock_object(self.LVMHelper, '_execute', mock.Mock(side_effect=side_effect)) self.LVMHelper._try_to_unmount_device(device) mock_execute.assert_called_once_with( "umount", device, run_as_root=True ) if side_effect is not None: mock_warning.assert_called_once() def test_remove_storage(self): fake_share_name = 'fakeshareid' fake_device = {} mock_get_lv_device = self.mock_object( self.LVMHelper, '_get_lv_device', mock.Mock(return_value=fake_device)) mock_try_to_umount = self.mock_object(self.LVMHelper, '_try_to_unmount_device') mock_execute = self.mock_object(self.LVMHelper, '_execute') self.LVMHelper.remove_storage(fake_share_name) mock_get_lv_device.assert_called_once_with( fake_share_name ) mock_try_to_umount.assert_called_once_with(fake_device) mock_execute.assert_called_once_with( 'lvremove', '-f', '--autobackup', 'n', fake_device, run_as_root=True ) def test_remove_storage_lvremove_failed(self): fake_share_name = 'fakeshareid' def fake_execute(*args, **kwargs): if 'lvremove' in args: raise exception.ProcessExecutionError() self.mock_object(storage_helper.LOG, "warning") self.mock_object(self.LVMHelper, "_execute", fake_execute) self.LVMHelper.remove_storage(fake_share_name) self.assertTrue(storage_helper.LOG.warning.called) @ddt.data(None, exception.ProcessExecutionError) def test_rename_storage(self, side_effect): fake_old_share_name = 'fake_old_name' fake_new_share_name = 'fake_new_name' fake_new_device = "/dev/new_device" fake_old_device = "/dev/old_device" mock_get_lv_device = self.mock_object( self.LVMHelper, '_get_lv_device', mock.Mock(side_effect=[fake_old_device, fake_new_device])) mock_try_to_umount = self.mock_object(self.LVMHelper, '_try_to_unmount_device') mock_execute = self.mock_object(self.LVMHelper, '_execute', mock.Mock(side_effect=side_effect)) if side_effect is None: self.LVMHelper.rename_storage(fake_old_share_name, fake_new_share_name) else: self.assertRaises(exception.ProcessExecutionError, self.LVMHelper.rename_storage, fake_old_share_name, fake_new_share_name) mock_try_to_umount.assert_called_once_with(fake_old_device) mock_execute.assert_called_once_with( "lvrename", "--autobackup", "n", fake_old_device, fake_new_device, run_as_root=True ) mock_get_lv_device.assert_has_calls([ mock.call(fake_old_share_name), mock.call(fake_new_share_name) ]) def test_extend_share(self): actual_arguments = [] expected_arguments = [ ('lvextend', '-L', 'shareG', '-n', '/dev/manila_docker_volumes/fakeshareid'), ('e2fsck', '-f', '-y', '/dev/manila_docker_volumes/fakeshareid'), ('resize2fs', '/dev/manila_docker_volumes/fakeshareid'), ] fake_share_name = 'fakeshareid' self.LVMHelper._execute = functools.partial( self.fake_exec_sync, execute_arguments=actual_arguments, ret_val='') self.LVMHelper.extend_share(fake_share_name, 'share', 3) self.assertEqual(expected_arguments, actual_arguments) def test_get_size(self): share_name = 'fakeshareid' fake_old_device = {} mock_get_lv_device = self.mock_object( self.LVMHelper, '_get_lv_device', mock.Mock(return_value=fake_old_device)) mock_execute = self.mock_object(self.LVMHelper, '_execute', mock.Mock(return_value=[1, "args"])) result = self.LVMHelper.get_size(share_name) mock_execute.assert_called_once_with( "lvs", "-o", "lv_size", "--noheadings", "--nosuffix", "--units", "g", fake_old_device, run_as_root=True ) mock_get_lv_device.assert_called_once_with(share_name) self.assertEqual(result, 1) @ddt.data({'source_host': 'host@back1#vg1', 'dest_host': 'host@back2#vg2', 'compatible': False}, {'source_host': 'host@back1#vg1', 'dest_host': 'host@back2#vg1', 'compatible': True}, {'source_host': 'host@back1#vg1', 'dest_host': 'host@back1#vg1', 'compatible': True}) @ddt.unpack def test_migration_check_compatibility( self, source_host, dest_host, compatible): mock_exception_log = self.mock_object(storage_helper.LOG, 'exception') source_share = base_fake_share.fake_share_instance(host=source_host) dest_share = base_fake_share.fake_share_instance(host=dest_host) migration_compatibility = self.LVMHelper.migration_check_compatibility( self.context, source_share, dest_share, share_server=None, destination_share_server=None) expected_compatibility = { 'compatible': compatible, 'writable': True, 'nondisruptive': False, 'preserve_metadata': True, 'preserve_snapshots': False, } self.assertDictEqual(expected_compatibility, migration_compatibility) if not compatible: mock_exception_log.assert_called_once() def test_migration_continue(self): end1Phase = self.LVMHelper.migration_continue( self.context, None, None, None, None, share_server=None, destination_share_server=None) self.assertTrue(end1Phase) def test_migration_get_progress(self): progress = self.LVMHelper.migration_get_progress( self.context, None, None, None, None, share_server=None, destination_share_server=None) expected_progress = { 'total_progress': 100, } self.assertDictEqual(expected_progress, progress) @ddt.data({'source_host': 'host@back1', 'dest_host': 'host@back1', 'shares_specs': {}}, {'source_host': 'host@back1', 'dest_host': 'host@back2#vg1', 'shares_specs': {'shares_req_spec': [ {'share_instance_properties': {'host': 'host@back1#vg2'}} ]}}) @ddt.unpack def test_share_server_migration_check_compatibility_false( self, source_host, dest_host, shares_specs): not_compatible = { 'compatible': False, 'writable': None, 'nondisruptive': None, 'preserve_snapshots': None, 'migration_cancel': None, 'migration_get_progress': None, } mock_error_log = self.mock_object(storage_helper.LOG, 'error') source_server = {'id': 'fake_id', 'host': source_host} migration_compatibility = ( self.LVMHelper.share_server_migration_check_compatibility( self.context, source_server, dest_host, None, None, shares_specs)) self.assertDictEqual(not_compatible, migration_compatibility) mock_error_log.assert_called_once() @ddt.data({'source_host': 'host@back1', 'dest_host': 'host@back2#vg1', 'shares_specs': {'shares_req_spec': [ {'share_instance_properties': {'host': 'host@back1#vg1'}} ]}}, {'source_host': 'host@back1', 'dest_host': 'host@back2', 'shares_specs': {'shares_req_spec': [ {'share_instance_properties': {'host': 'host@back1#vg1'}} ]}}) @ddt.unpack def test_share_server_migration_check_compatibility_true( self, source_host, dest_host, shares_specs): compatible = { 'compatible': True, 'writable': True, 'nondisruptive': False, 'preserve_snapshots': False, 'migration_cancel': True, 'migration_get_progress': True, } source_server = {'id': 'fake_id', 'host': source_host} migration_compatibility = ( self.LVMHelper.share_server_migration_check_compatibility( self.context, source_server, dest_host, None, None, shares_specs)) self.assertDictEqual(compatible, migration_compatibility) def test_share_server_migration_continue(self): end1Phase = self.LVMHelper.share_server_migration_continue( self.context, None, None, None, None) self.assertTrue(end1Phase) def test_share_server_migration_get_progess(self): progress = self.LVMHelper.share_server_migration_get_progress( self.context, None, None, None, None) expected_progress = { 'total_progress': 100, } self.assertDictEqual(expected_progress, progress) def test_get_share_pool_name(self): fake_vg_name = 'fake_vg' self.LVMHelper.configuration.container_volume_group = fake_vg_name vg_name = self.LVMHelper.get_share_pool_name('fake_share_id') self.assertEqual(vg_name, fake_vg_name) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315601.9976707 manila-21.0.0/manila/tests/share/drivers/dell_emc/0000775000175000017500000000000000000000000022005 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/dell_emc/__init__.py0000664000175000017500000000000000000000000024104 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315601.9976707 manila-21.0.0/manila/tests/share/drivers/dell_emc/common/0000775000175000017500000000000000000000000023275 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/dell_emc/common/__init__.py0000664000175000017500000000000000000000000025374 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315602.0016706 manila-21.0.0/manila/tests/share/drivers/dell_emc/common/enas/0000775000175000017500000000000000000000000024223 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/dell_emc/common/enas/__init__.py0000664000175000017500000000000000000000000026322 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/dell_emc/common/enas/fakes.py0000664000175000017500000017212400000000000025675 0ustar00zuulzuul00000000000000# Copyright (c) 2015 EMC Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from oslo_utils import units from manila.common import constants as const from manila.share import configuration as conf from manila.share.drivers.dell_emc.common.enas import utils from manila.tests import fake_share def query(func): def inner(*args, **kwargs): return ( '' '' + func(*args, **kwargs) + '' ) return inner def start_task(func): def inner(*args, **kwargs): return ( '' '' + func(*args, **kwargs) + '') return inner def response(func): def inner(*args, **kwargs): return ( '' '' + func(*args, **kwargs) + '' ).encode() return inner class FakeData(object): # Share information share_id = '7cf7c200_d3af_4e05_b87e_9167c95df4f9' host = 'HostA@BackendB#fake_pool_name' share_name = share_id share_size = 10 new_size = 20 src_share_name = '7cf7c200_d3af_4e05_b87e_9167c95df4f0' # Snapshot information snapshot_name = 'de4c9050-e2f9-4ce1-ade4-5ed0c9f26451' src_snap_name = 'de4c9050-e2f9-4ce1-ade4-5ed0c9f26452' snapshot_id = 'fake_snap_id' snapshot_size = 10 * units.Ki # Share network information share_network_id = 'c5b3a865-56d0-4d88-abe5-879965e099c9' cidr = '192.168.1.0/24' cidr_v6 = 'fdf8:f53b:82e1::/64' segmentation_id = 100 network_allocations_id1 = '132dbb10-9a36-46f2-8d89-3d909830c356' network_allocations_id2 = '7eabdeed-bad2-46ea-bd0f-a33884c869e0' network_allocations_id3 = '98c9e490-a842-4e59-b59a-a6042069d35b' network_allocations_id4 = '6319a917-ab95-4b65-a498-773ae33c5550' network_allocations_ip1 = '192.168.1.1' network_allocations_ip2 = '192.168.1.2' network_allocations_ip3 = 'fdf8:f53b:82e1::1' network_allocations_ip4 = 'fdf8:f53b:82e1::2' network_allocations_ip_version1 = 4 network_allocations_ip_version2 = 4 network_allocations_ip_version3 = 6 network_allocations_ip_version4 = 6 domain_name = 'fake_domain' domain_user = 'administrator' domain_password = 'password' dns_ip_address = '192.168.1.200' dns_ipv6_address = 'fdf8:f53b:82e1::f' # Share server information share_server_id = '56aafd02-4d44-43d7-b784-57fc88167224' # Filesystem information filesystem_name = share_name filesystem_id = 'fake_filesystem_id' filesystem_size = 10 * units.Ki filesystem_new_size = 20 * units.Ki # Mountpoint information path = '/' + share_name # Mover information mover_name = 'server_2' mover_id = 'fake_mover_id' interface_name1 = network_allocations_id1[-12:] interface_name2 = network_allocations_id2[-12:] interface_name3 = network_allocations_id3[-12:] interface_name4 = network_allocations_id4[-12:] long_interface_name = network_allocations_id1 net_mask = '255.255.255.0' net_mask_v6 = 64 device_name = 'cge-1-0' interconnect_id = '2001' # VDM information vdm_name = share_server_id vdm_id = 'fake_vdm_id' # Pool information pool_name = 'fake_pool_name' pool_id = 'fake_pool_id' pool_used_size = 20480 pool_total_size = 511999 # NFS share access information rw_hosts = ['192.168.1.1', '192.168.1.2'] ro_hosts = ['192.168.1.3', '192.168.1.4'] nfs_host_ip = '192.168.1.5' rw_hosts_ipv6 = ['fdf8:f53b:82e1::1', 'fdf8:f53b:82e1::2'] ro_hosts_ipv6 = ['fdf8:f53b:82e1::3', 'fdf8:f53b:82e1::4'] nfs_host_ipv6 = 'fdf8:f53b:82e1::5' fake_output = '' fake_error_msg = 'fake error message' emc_share_backend = 'vnx' powermax_share_backend = 'powermax' emc_nas_server = '192.168.1.20' emc_nas_login = 'fakename' emc_nas_password = 'fakepassword' share_backend_name = 'EMC_NAS_Storage' cifs_access = """ 1478607389: SMB:11: Unix user 'Guest' UID=32769 1478607389: SMB:10: FindUserUid:Access_Password 'Guest',1=0x8001 T=0 1478607389: SHARE: 6: ALLOWED:fullcontrol:S-1-5-15-3399d125-6dcdf5f4 1478607389: SMB:11: Unix user 'Administrator' UID=32768 1478607389: SMB:10: FindUserUid:Access_Password 'Administrator', 1478607389: SHARE: 6: ALLOWED:fullcontrol:S-1-5-15-3399d125 """ class StorageObjectTestData(object): def __init__(self): self.share_name = FakeData.share_name self.filesystem_name = FakeData.filesystem_name self.filesystem_id = FakeData.filesystem_id self.filesystem_size = 10 * units.Ki self.filesystem_new_size = 20 * units.Ki self.path = FakeData.path self.snapshot_name = FakeData.snapshot_name self.snapshot_id = FakeData.snapshot_id self.snapshot_size = 10 * units.Ki self.src_snap_name = FakeData.src_snap_name self.src_fileystems_name = FakeData.src_share_name self.mover_name = FakeData.mover_name self.mover_id = FakeData.mover_id self.vdm_name = FakeData.vdm_name self.vdm_id = FakeData.vdm_id self.pool_name = FakeData.pool_name self.pool_id = FakeData.pool_id self.pool_used_size = FakeData.pool_used_size self.pool_total_size = FakeData.pool_total_size self.interface_name1 = FakeData.interface_name1 self.interface_name2 = FakeData.interface_name2 self.interface_name3 = FakeData.interface_name3 self.interface_name4 = FakeData.interface_name4 self.long_interface_name = FakeData.long_interface_name self.ip_address1 = FakeData.network_allocations_ip1 self.ip_address2 = FakeData.network_allocations_ip2 self.ip_address3 = FakeData.network_allocations_ip3 self.ip_address4 = FakeData.network_allocations_ip4 self.net_mask = FakeData.net_mask self.net_mask_v6 = FakeData.net_mask_v6 self.vlan_id = FakeData.segmentation_id self.cifs_server_name = FakeData.vdm_name self.domain_name = FakeData.domain_name self.domain_user = FakeData.domain_user self.domain_password = FakeData.domain_password self.dns_ip_address = FakeData.dns_ip_address self.device_name = FakeData.device_name self.interconnect_id = FakeData.interconnect_id self.rw_hosts = FakeData.rw_hosts self.ro_hosts = FakeData.ro_hosts self.nfs_host_ip = FakeData.nfs_host_ip self.rw_hosts_ipv6 = FakeData.rw_hosts_ipv6 self.ro_hosts_ipv6 = FakeData.ro_hosts_ipv6 self.nfs_host_ipv6 = FakeData.nfs_host_ipv6 self.fake_output = FakeData.fake_output @response def resp_get_error(self): return ( '' '' 'Fake description.' 'Fake action.' 'Fake diagnostics.' '' '' 'Fake description.' 'Fake action.' 'Fake diagnostics.' '' ' ' ) @response def resp_get_without_value(self): return ( '' ) @response def resp_task_succeed(self): return ( '' '' '' ) @response def resp_task_error(self): return ( '' '' '' ) @response def resp_invalid_mover_id(self): return ( '' '' 'The Mover ID supplied with the request is invalid.' '' 'Refer to the XML API v2 schema/documentation and correct ' 'your user program logic.' ' Exception tag: 14fb692e556 Exception ' 'message: com.emc.nas.ccmd.common.MessageInstanceImpl@5004000d ' '' '' ' ' ) @response def resp_need_retry(self): return ('' '' '' ' fake desp. ' 'fake action ' '') @start_task def req_fake_start_task(self): return '' class FileSystemTestData(StorageObjectTestData): def __init__(self): super(FileSystemTestData, self).__init__() @start_task def req_create_on_vdm(self): return ( '' '' '' '' % {'name': self.filesystem_name, 'id': self.vdm_id, 'pool_id': self.pool_id, 'size': self.filesystem_size} ) @start_task def req_create_on_mover(self): return ( '' '' '' '' % {'name': self.filesystem_name, 'id': self.mover_id, 'pool_id': self.pool_id, 'size': self.filesystem_size} ) @response def resp_create_but_already_exist(self): return ( ' ' '' '' '' '' '' '' ' ' ) @start_task def req_delete(self): return ( '' % {'id': self.filesystem_id} ) @response def resp_delete_but_failed(self): return ( '' '' 'The file system ID supplied with the request is ' 'invalid.' 'Refer to the XML API v2 schema/documentation and correct ' 'your user program logic.' ' Exception tag: 14fb6b6a7b8 Exception ' 'message: com.emc.nas.ccmd.common.MessageInstanceImpl@5004000e ' '' '' ' ' ) @start_task def req_extend(self): return ( '' '' '' % {'id': self.filesystem_id, 'pool_id': self.pool_id, 'size': self.filesystem_new_size - self.filesystem_size} ) @response def resp_extend_but_error(self): return ( '' '' 'Fake description.' 'Fake action.' ' Fake diagnostics.' '' ' ' ) @query def req_get(self): return ( '' '' '' '' % {'name': self.filesystem_name} ) @response def resp_get_succeed(self): return ( '' '' '' '' '' % {'name': self.filesystem_name, 'id': self.filesystem_id, 'size': self.filesystem_size, 'pool_id': self.pool_id} ) @response def resp_get_but_miss_property(self): return ( '' '' '' '' '' % {'name': self.filesystem_name, 'id': self.filesystem_id, 'size': self.filesystem_size, 'pool_id': self.pool_id} ) @response def resp_get_but_not_found(self): return ( '' '' 'The query may be incomplete because some of the ' 'Celerra components are unavailable or do not exist. Another ' 'reason may be application error. ' 'If the entire Celerra is functioning correctly, ' 'check your client application logic. ' 'File system not found.' '' '' 'The query may be incomplete because some of the ' 'Celerra components are unavailable or do not exist. Another ' 'reason may be application error.' 'If the entire Celerra is functioning correctly, ' 'check your client application logic.' 'Migration file system not found.' '' ' ' ) def cmd_create_from_ckpt(self): return [ 'env', 'NAS_DB=/nas', '/nas/bin/nas_fs', '-name', self.filesystem_name, '-type', 'uxfs', '-create', 'samesize=' + self.src_fileystems_name, 'pool=' + self.pool_name, 'storage=SINGLE', 'worm=off', '-thin', 'no', '-option', 'slice=y', ] def cmd_copy_ckpt(self): session_name = self.filesystem_name + ':' + self.src_snap_name return [ 'env', 'NAS_DB=/nas', '/nas/bin/nas_copy', '-name', session_name[0:63], '-source', '-ckpt', self.src_snap_name, '-destination', '-fs', self.filesystem_name, '-interconnect', "id=" + self.interconnect_id, '-overwrite_destination', '-full_copy', ] output_copy_ckpt = "OK" error_copy_ckpt = "ERROR" def cmd_nas_fs_info(self): return [ 'env', 'NAS_DB=/nas', '/nas/bin/nas_fs', '-info', self.filesystem_name, ] def output_info(self): return ( """output = id = 515 name = %(share_name)s acl = 0 in_use = True type = uxfs worm = off volume = v993 deduplication = Off thin_storage = True tiering_policy = Auto-Tier/Optimize Pool compressed= False mirrored = False ckpts = %(ckpt)s stor_devs = FNM00124500890-004B disks = d7 disk=d7 fakeinfo""" % {'share_name': self.filesystem_name, 'ckpt': self.snapshot_name}) def cmd_delete(self): return [ 'env', 'NAS_DB=/nas', '/nas/bin/nas_fs', '-delete', self.snapshot_name, '-Force', ] class SnapshotTestData(StorageObjectTestData): def __init__(self): super(SnapshotTestData, self).__init__() @start_task def req_create(self): return ( '' '' '' % {'fsid': self.filesystem_id, 'name': self.snapshot_name, 'pool_id': self.pool_id} ) @start_task def req_create_with_size(self): return ( '' '' '' '' % {'fsid': self.filesystem_id, 'name': self.snapshot_name, 'pool_id': self.pool_id, 'size': self.snapshot_size} ) @response def resp_create_but_already_exist(self): return ( '' '' '' '' '' '' ) @query def req_get(self): return ( '' '' % {'name': self.snapshot_name} ) @response def resp_get_succeed(self): return ( '' '' % {'name': self.snapshot_name, 'fs_id': self.filesystem_id, 'snap_id': self.snapshot_id} ) @start_task def req_delete(self): return ( '' % {'id': self.snapshot_id} ) class MountPointTestData(StorageObjectTestData): def __init__(self): super(MountPointTestData, self).__init__() @start_task def req_create(self, mover_id, is_vdm=True): return ( '' '' '' % {'path': self.path, 'fs_id': self.filesystem_id, 'mover_id': mover_id, 'is_vdm': 'true' if is_vdm else 'false'} ) @response def resp_create_but_already_exist(self): return ( '' '' '' ' ' '' '' ' ') @start_task def req_delete(self, mover_id, is_vdm=True): return ( '' % {'path': self.path, 'mover_id': mover_id, 'is_vdm': 'true' if is_vdm else 'false'} ) @response def resp_delete_but_nonexistent(self): return ( '' ' ' ' ' '' '' ' ' ) @query def req_get(self, mover_id, is_vdm=True): return ( '' % {'mover_id': mover_id, 'is_vdm': 'true' if is_vdm else 'false'} ) @response def resp_get_succeed(self, mover_id, is_vdm=True): return ( '' '' '' '' % {'path': self.path, 'fsID': self.filesystem_id, 'mover_id': mover_id, 'is_vdm': 'true' if is_vdm else 'false'} ) def cmd_server_mount(self, mode): return [ 'env', 'NAS_DB=/nas', '/nas/bin/server_mount', self.vdm_name, '-option', mode, self.filesystem_name, self.path, ] def cmd_server_umount(self): return [ 'env', 'NAS_DB=/nas', '/nas/bin/server_umount', self.vdm_name, '-perm', self.snapshot_name, ] class VDMTestData(StorageObjectTestData): def __init__(self): super(VDMTestData, self).__init__() @start_task def req_create(self): return ( '' % {'mover_id': self.mover_id, 'vdm_name': self.vdm_name} ) @response def resp_create_but_already_exist(self): return ( '' '' '' 'Duplicate name specified' 'Specify a unqiue name' '' '' 'Duplicate name specified' 'Specify a unqiue name' '' '' ' ' ) @query def req_get(self): return '' @response def resp_get_succeed(self, name=None, interface1=None, interface2=None): if name is None: name = self.vdm_name if interface1 is None: interface1 = self.interface_name1 if interface2 is None: interface2 = self.interface_name2 return ( '' '' '' '
  • %(interface1)s
  • %(interface2)s
  • ' '
    ' % {'vdm_name': name, 'vdm_id': self.vdm_id, 'mover_id': self.mover_id, 'interface1': interface1, 'interface2': interface2} ) @response def resp_get_but_not_found(self): return ( '' ) @start_task def req_delete(self): return '' % {'vdmid': self.vdm_id} def cmd_attach_nfs_interface(self, interface=None): if interface is None: interface = self.interface_name2 return [ 'env', 'NAS_DB=/nas', '/nas/bin/nas_server', '-vdm', self.vdm_name, '-attach', interface, ] def cmd_detach_nfs_interface(self): return [ 'env', 'NAS_DB=/nas', '/nas/bin/nas_server', '-vdm', self.vdm_name, '-detach', self.interface_name2, ] def cmd_get_interfaces(self): return [ 'env', 'NAS_DB=/nas', '/nas/bin/nas_server', '-i', '-vdm', self.vdm_name, ] def output_get_interfaces_vdm(self, cifs_interface=FakeData.interface_name1, nfs_interface=FakeData.interface_name2): return ( """id = %(vdmid)s name = %(name)s acl = 0 type = vdm server = server_2 rootfs = root_fs_vdm_vdm-fakeid I18N mode = UNICODE mountedfs = member_of = status : defined = enabled actual = loaded, active Interfaces to services mapping: interface=%(nfs_if_name)s :vdm interface=%(cifs_if_name)s :cifs""" % {'vdmid': self.vdm_id, 'name': self.vdm_name, 'nfs_if_name': nfs_interface, 'cifs_if_name': cifs_interface} ) def output_get_interfaces_nfs(self, cifs_interface=FakeData.interface_name1, nfs_interface=FakeData.interface_name2): return ( """id = %(vdmid)s name = %(name)s acl = 0 type = vdm server = server_2 rootfs = root_fs_vdm_vdm-fakeid I18N mode = UNICODE mountedfs = member_of = status : defined = enabled actual = loaded, active Interfaces to services mapping: interface=%(nfs_if_name)s :nfs interface=%(cifs_if_name)s :cifs""" % {'vdmid': self.vdm_id, 'name': self.vdm_name, 'nfs_if_name': nfs_interface, 'cifs_if_name': cifs_interface} ) class PoolTestData(StorageObjectTestData): def __init__(self): super(PoolTestData, self).__init__() @query def req_get(self): return ( '' ) @response def resp_get_succeed(self, name=None, id=None): if not name: name = self.pool_name if not id: id = self.pool_id return ( '' '' '' '' '' '' '' % {'name': name, 'id': id, 'pool_used_size': self.pool_used_size, 'pool_total_size': self.pool_total_size} ) class MoverTestData(StorageObjectTestData): def __init__(self): super(MoverTestData, self).__init__() @query def req_get_ref(self): return ( '' '' '' ) @response def resp_get_ref_succeed(self, name=None): if not name: name = self.mover_name return ( '' '' 'The query may be incomplete because some of the ' 'Celerra components are unavailable or do not exist. Another ' 'reason may be application error.' 'If the entire Celerra is functioning correctly, ' 'check your client application logic.' 'Standby Data Mover server_2.faulted.server_3 is ' 'out of service.' '' '' '' '' % {'name': name, 'id': self.mover_id} ) @query def req_get(self): return ( '' '' '' % {'id': self.mover_id} ) @response def resp_get_succeed(self, name=None): if not name: name = self.mover_name return ( '' '' '' '' '' '' '' '' '' % {'id': self.mover_id, 'name': name, 'long_interface_name': self.long_interface_name[:31], 'interface_name1': self.interface_name1, 'interface_name2': self.interface_name2} ) @start_task def req_create_interface(self, if_name=FakeData.interface_name1, ip=FakeData.network_allocations_ip1): return ( '' % {'if_name': if_name, 'vlan': self.vlan_id, 'ip': ip, 'mover_id': self.mover_id, 'device_name': self.device_name, 'net_mask': self.net_mask} ) @start_task def req_create_interface_with_ipv6(self, if_name=FakeData.interface_name3, ip=FakeData.network_allocations_ip3): return ( '' % {'if_name': if_name, 'vlan': self.vlan_id, 'ip': ip, 'mover_id': self.mover_id, 'device_name': self.device_name, 'net_mask': self.net_mask_v6} ) @response def resp_create_interface_but_name_already_exist(self): return ( '' '' 'Duplicate name specified' 'Specify a unqiue name' '' '' % {'interface_name': self.interface_name1} ) @response def resp_create_interface_but_ip_already_exist(self): return ( '' '' '' '' '' % {'ip': self.ip_address1} ) @response def resp_create_interface_with_conflicted_vlan_id(self): return ( '' '' 'The operation cannot complete because other ' 'interfaces on the same subnet are in a different VLAN. ' 'The Data Mover requires all interfaces in the same subnet ' 'to be in the same VLAN.' 'Specify a VLAN to match other interfaces in the same ' 'subnet. To move multiple interfaces to a different VLAN, ' 'first set the VLAN id on each interface to 0, ' 'and then set their VLAN id\'s to the new VLAN number.' '' '' ) @start_task def req_delete_interface(self, ip=FakeData.network_allocations_ip1): return ( '' % {'ip': ip, 'mover_id': self.mover_id, } ) @response def resp_delete_interface_but_nonexistent(self): return ( '' '' '' '' '' '' ) def cmd_get_interconnect_id(self): return [ 'env', 'NAS_DB=/nas', '/nas/bin/nas_cel', '-interconnect', '-l', ] def output_get_interconnect_id(self): return ( 'id name source_server destination_system destination_server\n' '%(id)s loopback %(src_server)s nas149 %(dest_server)s\n' % {'id': self.interconnect_id, 'src_server': self.mover_name, 'dest_server': self.mover_name} ) def cmd_get_physical_devices(self): return [ 'env', 'NAS_DB=/nas', '/nas/bin/server_sysconfig', self.mover_name, '-pci', ] def output_get_physical_devices(self): return ( 'server_2 : PCI DEVICES:\n' 'On Board:\n' ' PMC QE8 Fibre Channel Controller\n' ' 0: fcp-0-0 IRQ: 20 addr: 5006016047a00245\n' ' 0: fcp-0-1 IRQ: 21 addr: 5006016147a00245\n' ' 0: fcp-0-2 IRQ: 22 addr: 5006016247a00245\n' ' 0: fcp-0-3 IRQ: 23 addr: 5006016347a00245\n' ' Broadcom Gigabit Ethernet Controller\n' ' 0: cge-1-0 IRQ: 24\n' ' speed=auto duplex=auto txflowctl=disable rxflowctl=disable\n' ' Link: Up\n' ' 0: cge-1-1 IRQ: 25\n' ' speed=auto duplex=auto txflowctl=disable rxflowctl=disable\n' ' Link: Down\n' ' 0: cge-1-2 IRQ: 26\n' ' speed=auto duplex=auto txflowctl=disable rxflowctl=disable\n' ' Link: Down\n' ' 0: cge-1-3 IRQ: 27\n' ' speed=auto duplex=auto txflowctl=disable rxflowctl=disable\n' ' Link: Up\n' 'Slot: 4\n' ' PLX PCI-Express Switch Controller\n' ' 1: PLX PEX8648 IRQ: 10\n' ) class DNSDomainTestData(StorageObjectTestData): def __init__(self): super(DNSDomainTestData, self).__init__() @start_task def req_create(self, ip_addr=None): if ip_addr is None: ip_addr = self.dns_ip_address return ( '' % {'mover_id': self.mover_id, 'domain_name': self.domain_name, 'server_ips': ip_addr} ) @start_task def req_delete(self): return ( '' % {'mover_id': self.mover_id, 'domain_name': self.domain_name} ) class CIFSServerTestData(StorageObjectTestData): def __init__(self): super(CIFSServerTestData, self).__init__() @start_task def req_create(self, mover_id, is_vdm=True, ip_addr=None): if ip_addr is None: ip_addr = self.ip_address1 return ( '' '' '
  • %(alias)s
  • ' '' '
    ' % {'ip': ip_addr, 'comp_name': self.cifs_server_name, 'name': self.cifs_server_name[-14:], 'mover_id': mover_id, 'alias': self.cifs_server_name[-12:], 'domain_user': self.domain_user, 'domain_password': self.domain_password, 'domain': self.domain_name, 'is_vdm': 'true' if is_vdm else 'false'} ) @query def req_get(self, mover_id, is_vdm=True): return ( '' '' '' % {'mover_id': mover_id, 'is_vdm': 'true' if is_vdm else 'false'} ) @response def resp_get_succeed(self, mover_id, is_vdm, join_domain, cifs_server_name=None, ip_addr=None): if cifs_server_name is None: cifs_server_name = self.cifs_server_name if ip_addr is None: ip_addr = self.ip_address1 return ( '' '' '
  • %(alias)s
  • ' % {'mover_id': mover_id, 'cifsserver': self.cifs_server_name[-14:], 'ip': ip_addr, 'is_vdm': 'true' if is_vdm else 'false', 'alias': self.cifs_server_name[-12:], 'domain': self.domain_name, 'join_domain': 'true' if join_domain else 'false', 'comp_name': cifs_server_name} ) @response def resp_get_without_interface(self, mover_id, is_vdm, join_domain): return ( '' '' '
  • %(alias)s
  • ' '
    ' % {'mover_id': mover_id, 'cifsserver': self.cifs_server_name[-14:], 'is_vdm': 'true' if is_vdm else 'false', 'alias': self.cifs_server_name[-12:], 'domain': self.domain_name, 'join_domain': 'true' if join_domain else 'false', 'comp_name': self.cifs_server_name} ) @start_task def req_modify(self, mover_id, is_vdm=True, join_domain=False): return ( '' '' '' % {'mover_id': mover_id, 'is_vdm': 'true' if is_vdm else 'false', 'join_domain': 'true' if join_domain else 'false', 'cifsserver': self.cifs_server_name[-14:], 'username': self.domain_user, 'pw': self.domain_password} ) @response def resp_modify_but_already_join_domain(self): return ( ' ' '' 'Fake description' 'Fake action.' '' ' ' ) @response def resp_modify_but_unjoin_domain(self): return ( ' ' '' 'Fake description' 'Fake action.' '' ' ' ) @start_task def req_delete(self, mover_id, is_vdm=True): return ( '' % {'mover_id': mover_id, 'is_vdm': 'true' if is_vdm else 'false', 'cifsserver': self.cifs_server_name[-14:]} ) class CIFSShareTestData(StorageObjectTestData): def __init__(self): super(CIFSShareTestData, self).__init__() @start_task def req_create(self, mover_id, is_vdm=True): return ( '' '' '
  • %(cifsserver)s
  • ' '
    ' % {'path': '/' + self.share_name, 'share_name': self.share_name, 'mover_id': mover_id, 'is_vdm': 'true' if is_vdm else 'false', 'cifsserver': self.cifs_server_name[-14:]} ) @start_task def req_delete(self, mover_id, is_vdm=True): return ( '' '
  • %(cifsserver)s
  • ' '
    ' % {'share_name': self.share_name, 'mover_id': mover_id, 'is_vdm': 'true' if is_vdm else 'false', 'cifsserver': self.cifs_server_name[-12:]} ) @query def req_get(self): return '' % self.share_name @response def resp_get_succeed(self, mover_id, is_vdm=True): return ( '' '' '
  • %(alias)s
  • ' '
    ' '
    ' % {'path': self.path, 'fsid': self.filesystem_id, 'name': self.share_name, 'moverid': mover_id, 'is_vdm': 'true' if is_vdm else 'false', 'alias': self.cifs_server_name[-12:]} ) def cmd_disable_access(self): cmd_str = 'sharesd %s set noaccess' % self.share_name return [ 'env', 'NAS_DB=/nas', '/nas/bin/.server_config', self.vdm_name, '-v', '%s' % cmd_str, ] def cmd_change_access(self, access_level=const.ACCESS_LEVEL_RW, action='grant', user=None): if user is None: user = self.domain_user account = user + '@' + self.domain_name if access_level == const.ACCESS_LEVEL_RW: str_access = 'fullcontrol' else: str_access = 'read' allow_str = ( 'sharesd %(share_name)s %(action)s %(account)s=%(access)s' % {'share_name': self.share_name, 'action': action, 'account': account, 'access': str_access} ) return [ 'env', 'NAS_DB=/nas', '/nas/bin/.server_config', self.vdm_name, '-v', '%s' % allow_str, ] def cmd_get_access(self): get_str = 'sharesd %s dump' % self.share_name return [ 'env', 'NAS_DB=/nas', '/nas/bin/.server_config', self.vdm_name, '-v', '%s' % get_str, ] def output_allow_access(self): return ( "Command succeeded: :3 sharesd %(share)s grant " "%(user)s@%(domain)s=fullcontrol" % {'share': self.share_name, 'user': self.domain_user, 'domain': self.domain_name} ) def output_allow_access_but_duplicate_ace(self): return ( '%(vdm_name)s : commands processed: 1' 'output is complete' '1443422844: SMB: 6: ACE for %(domain)s\\%(user)s ' 'unchanged' '1443422844: ADMIN: 3: ' 'Command failed: :23 ' 'sharesd %(share)s grant %(user)s@%(domain)s=read' 'Error 4020: %(vdm_name)s : failed to complete command"' % {'share': self.share_name, 'user': self.domain_user, 'domain': self.domain_name, 'vdm_name': self.vdm_name} ) def output_deny_access_but_no_ace(self): return ( '%(vdm_name)s : commands processed: 1' 'output is complete' '1443515516: SMB: 6: No ACE found for %(domain)s\\%(user)s ' '1443515516: ADMIN: 3: ' 'Command failed: :26 ' 'sharesd %(share)s revoke %(user)s@%(domain)s=read' 'Error 4020: %(vdm_name)s : failed to complete command"' % {'share': self.share_name, 'user': self.domain_user, 'domain': self.domain_name, 'vdm_name': self.vdm_name} ) def output_deny_access_but_no_user_found(self): return ( '%(vdm_name)s : commands processed: 1' 'output is complete' '1443520322: SMB: 6: Cannot get mapping for %(domain)s\\%(user)s ' '1443520322: ADMIN: 3: ' 'Command failed: :26 ' 'sharesd %(share)s revoke %(user)s@%(domain)s=read' 'Error 4020: %(vdm_name)s : failed to complete command"' % {'share': self.share_name, 'user': self.domain_user, 'domain': self.domain_name, 'vdm_name': self.vdm_name} ) class NFSShareTestData(StorageObjectTestData): def __init__(self): super(NFSShareTestData, self).__init__() def cmd_create(self): default_access = 'access=-0.0.0.0/0.0.0.0' return [ 'env', 'NAS_DB=/nas', '/nas/bin/server_export', self.vdm_name, '-option', default_access, self.path, ] def output_create(self): return "%s : done" % self.vdm_name def cmd_get(self): return [ 'env', 'NAS_DB=/nas', '/nas/bin/server_export', self.vdm_name, '-P', 'nfs', '-list', self.path, ] def output_get_succeed(self, rw_hosts, ro_hosts): rw_hosts = [utils.convert_ipv6_format_if_needed(ip_addr) for ip_addr in rw_hosts] ro_hosts = [utils.convert_ipv6_format_if_needed(ip_addr) for ip_addr in ro_hosts] if rw_hosts and ro_hosts: return ( '%(mover_name)s :\nexport "%(path)s" ' 'access=%(host)s:-0.0.0.0/0.0.0.0 root=%(host)s ' 'rw=%(rw_host)s ro=%(ro_host)s\n' % {'mover_name': self.vdm_name, 'path': self.path, 'host': ":".join(rw_hosts + ro_hosts), 'rw_host': ":".join(rw_hosts), 'ro_host': ":".join(ro_hosts)} ) elif rw_hosts: return ( '%(mover_name)s :\nexport "%(path)s" ' 'access=%(host)s:-0.0.0.0/0.0.0.0 root=%(host)s ' 'rw=%(rw_host)s\n' % {'mover_name': self.vdm_name, 'host': ":".join(rw_hosts), 'path': self.path, 'rw_host': ":".join(rw_hosts)} ) elif ro_hosts: return ( '%(mover_name)s :\nexport "%(path)s" ' 'access=%(host)s:-0.0.0.0/0.0.0.0 root=%(host)s ' 'ro=%(ro_host)s\n' % {'mover_name': self.vdm_name, 'host': ":".join(ro_hosts), 'path': self.path, 'ro_host': ":".join(ro_hosts)} ) else: return ( '%(mover_name)s :\nexport "%(path)s" ' 'access=-0.0.0.0/0.0.0.0\n' % {'mover_name': self.vdm_name, 'path': self.path} ) def output_get_but_not_found(self): return ( '%(mover_name)s : \nError 2: %(mover_name)s : ' 'No such file or directory \n' % {'mover_name': self.vdm_name} ) def cmd_delete(self): return [ 'env', 'NAS_DB=/nas', '/nas/bin/server_export', self.vdm_name, '-unexport', '-perm', self.path, ] def output_delete_succeed(self): return "%s : done" % self.vdm_name def output_delete_but_locked(self): return ("Error 2201: %s : unable to acquire lock(s), try later" % self.vdm_name) def cmd_set_access(self, rw_hosts, ro_hosts): rw_hosts = [utils.convert_ipv6_format_if_needed(ip_addr) for ip_addr in rw_hosts] ro_hosts = [utils.convert_ipv6_format_if_needed(ip_addr) for ip_addr in ro_hosts] access_str = ("access=%(access_hosts)s:-0.0.0.0/0.0.0.0," "root=%(root_hosts)s,rw=%(rw_hosts)s,ro=%(ro_hosts)s" % {'rw_hosts': ":".join(rw_hosts), 'ro_hosts': ":".join(ro_hosts), 'root_hosts': ":".join(rw_hosts + ro_hosts), 'access_hosts': ":".join(rw_hosts + ro_hosts)}) return [ 'env', 'NAS_DB=/nas', '/nas/bin/server_export', self.vdm_name, '-ignore', '-option', access_str, self.path, ] def output_set_access_success(self): return "%s : done" % self.vdm_name class FakeEMCShareDriver(object): def __init__(self, enas_type='vnx'): self.configuration = conf.Configuration(None) self.configuration.append_config_values = mock.Mock(return_value=0) self.configuration.emc_share_backend = FakeData.emc_share_backend self.configuration.vnx_server_container = FakeData.mover_name if enas_type == 'powermax': self.configuration.emc_share_backend = ( FakeData.powermax_share_backend) self.configuration.vmax_server_container = FakeData.mover_name self.configuration.emc_nas_server = FakeData.emc_nas_server self.configuration.emc_nas_login = FakeData.emc_nas_login self.configuration.emc_nas_password = FakeData.emc_nas_password self.configuration.share_backend_name = FakeData.share_backend_name CIFS_SHARE = fake_share.fake_share( id=FakeData.share_id, name=FakeData.share_name, size=FakeData.share_size, share_network_id=FakeData.share_network_id, share_server_id=FakeData.share_server_id, host=FakeData.host, share_proto='CIFS') NFS_SHARE = fake_share.fake_share( id=FakeData.share_id, name=FakeData.share_name, size=FakeData.share_size, share_network_id=FakeData.share_network_id, share_server_id=FakeData.share_server_id, host=FakeData.host, share_proto='NFS') CIFS_RW_ACCESS = fake_share.fake_access( access_type='user', access_to=FakeData.domain_user, access_level='rw') CIFS_RO_ACCESS = fake_share.fake_access( access_type='user', access_to=FakeData.domain_user, access_level='ro') NFS_RW_ACCESS = fake_share.fake_access( access_type='ip', access_to=FakeData.nfs_host_ip, access_level='rw') NFS_RW_ACCESS_IPV6 = fake_share.fake_access( access_type='ip', access_to=FakeData.nfs_host_ipv6, access_level='rw') NFS_RO_ACCESS = fake_share.fake_access( access_type='ip', access_to=FakeData.nfs_host_ip, access_level='ro') NFS_RO_ACCESS_IPV6 = fake_share.fake_access( access_type='ip', access_to=FakeData.nfs_host_ipv6, access_level='ro') SHARE_SERVER = { 'id': FakeData.share_server_id, 'share_network': { 'name': 'fake_share_network', 'id': FakeData.share_network_id }, 'share_network_id': FakeData.share_network_id, 'backend_details': { 'share_server_name': FakeData.vdm_name, 'cifs_if': FakeData.network_allocations_ip1, 'nfs_if': FakeData.network_allocations_ip2, } } SHARE_SERVER_IPV6 = { 'id': FakeData.share_server_id, 'share_network': { 'name': 'fake_share_network', 'id': FakeData.share_network_id }, 'share_network_id': FakeData.share_network_id, 'backend_details': { 'share_server_name': FakeData.vdm_name, 'cifs_if': FakeData.network_allocations_ip3, 'nfs_if': FakeData.network_allocations_ip4, } } SERVER_DETAIL = { 'share_server_name': FakeData.vdm_name, 'cifs_if': FakeData.network_allocations_ip1, 'nfs_if': FakeData.network_allocations_ip2, } SERVER_DETAIL_IPV6 = { 'share_server_name': FakeData.vdm_name, 'cifs_if': FakeData.network_allocations_ip3, 'nfs_if': FakeData.network_allocations_ip4, } SECURITY_SERVICE = [ { 'type': 'active_directory', 'domain': FakeData.domain_name, 'dns_ip': FakeData.dns_ip_address, 'user': FakeData.domain_user, 'password': FakeData.domain_password }, ] SECURITY_SERVICE_IPV6 = [ { 'type': 'active_directory', 'domain': FakeData.domain_name, 'dns_ip': FakeData.dns_ipv6_address, 'user': FakeData.domain_user, 'password': FakeData.domain_password }, ] NETWORK_INFO = { 'server_id': FakeData.share_server_id, 'cidr': FakeData.cidr, 'security_services': [ {'type': 'active_directory', 'domain': FakeData.domain_name, 'dns_ip': FakeData.dns_ip_address, 'user': FakeData.domain_user, 'password': FakeData.domain_password}, ], 'segmentation_id': FakeData.segmentation_id, 'network_type': 'vlan', 'network_allocations': [ {'id': FakeData.network_allocations_id1, 'ip_address': FakeData.network_allocations_ip1, 'ip_version': FakeData.network_allocations_ip_version1}, {'id': FakeData.network_allocations_id2, 'ip_address': FakeData.network_allocations_ip2, 'ip_version': FakeData.network_allocations_ip_version2} ] } NETWORK_INFO_IPV6 = { 'server_id': FakeData.share_server_id, 'cidr': FakeData.cidr_v6, 'security_services': [ {'type': 'active_directory', 'domain': FakeData.domain_name, 'dns_ip': FakeData.dns_ipv6_address, 'user': FakeData.domain_user, 'password': FakeData.domain_password}, ], 'segmentation_id': FakeData.segmentation_id, 'network_type': 'vlan', 'network_allocations': [ {'id': FakeData.network_allocations_id3, 'ip_address': FakeData.network_allocations_ip3, 'ip_version': FakeData.network_allocations_ip_version3}, {'id': FakeData.network_allocations_id4, 'ip_address': FakeData.network_allocations_ip4, 'ip_version': FakeData.network_allocations_ip_version4} ] } STATS = dict( share_backend_name='VNX', vendor_name='EMC', storage_protocol='NFS_CIFS', driver_version='2.0.0,') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/dell_emc/common/enas/test_connector.py0000664000175000017500000001754600000000000027643 0ustar00zuulzuul00000000000000# Copyright (c) 2015 EMC Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from urllib import error as url_error from urllib import request as url_request from eventlet import greenthread from oslo_concurrency import processutils from manila import exception from manila.share import configuration as conf from manila.share.drivers.dell_emc.common.enas import connector from manila import ssh_utils from manila import test from manila.tests.share.drivers.dell_emc.common.enas import fakes from manila.tests.share.drivers.dell_emc.common.enas import utils as enas_utils class XMLAPIConnectorTestData(object): FAKE_BODY = '' FAKE_RESP = '' FAKE_METHOD = 'fake_method' FAKE_KEY = 'key' FAKE_VALUE = 'value' @staticmethod def req_auth_url(): return 'https://' + fakes.FakeData.emc_nas_server + '/Login' @staticmethod def req_credential(): return ( 'user=' + fakes.FakeData.emc_nas_login + '&password=' + fakes.FakeData.emc_nas_password + '&Login=Login' ).encode() @staticmethod def req_url_encode(): return {'Content-Type': 'application/x-www-form-urlencoded'} @staticmethod def req_url(): return ( 'https://' + fakes.FakeData.emc_nas_server + '/servlets/CelerraManagementServices' ) XML_CONN_TD = XMLAPIConnectorTestData class XMLAPIConnectorTest(test.TestCase): @mock.patch.object(url_request, 'Request', mock.Mock()) def setUp(self): super(XMLAPIConnectorTest, self).setUp() emc_share_driver = fakes.FakeEMCShareDriver() self.configuration = emc_share_driver.configuration xml_socket = mock.Mock() xml_socket.read = mock.Mock(return_value=XML_CONN_TD.FAKE_RESP) opener = mock.Mock() opener.open = mock.Mock(return_value=xml_socket) with mock.patch.object(url_request, 'build_opener', mock.Mock(return_value=opener)): self.XmlConnector = connector.XMLAPIConnector( configuration=self.configuration, debug=False) expected_calls = [ mock.call(XML_CONN_TD.req_auth_url(), XML_CONN_TD.req_credential(), XML_CONN_TD.req_url_encode()), ] url_request.Request.assert_has_calls(expected_calls) def test_request_with_debug(self): self.XmlConnector.debug = True request = mock.Mock() request.headers = {XML_CONN_TD.FAKE_KEY: XML_CONN_TD.FAKE_VALUE} request.get_full_url = mock.Mock( return_value=XML_CONN_TD.FAKE_VALUE) with mock.patch.object(url_request, 'Request', mock.Mock(return_value=request)): rsp = self.XmlConnector.request(XML_CONN_TD.FAKE_BODY, XML_CONN_TD.FAKE_METHOD) self.assertEqual(XML_CONN_TD.FAKE_RESP, rsp) def test_request_with_no_authorized_exception(self): xml_socket = mock.Mock() xml_socket.read = mock.Mock(return_value=XML_CONN_TD.FAKE_RESP) hook = enas_utils.RequestSideEffect() hook.append(ex=url_error.HTTPError(XML_CONN_TD.req_url(), '403', 'fake_message', None, None)) hook.append(xml_socket) hook.append(xml_socket) self.XmlConnector.url_opener.open = mock.Mock(side_effect=hook) self.XmlConnector.request(XML_CONN_TD.FAKE_BODY) def test_request_with_general_exception(self): hook = enas_utils.RequestSideEffect() hook.append(ex=url_error.HTTPError(XML_CONN_TD.req_url(), 'error_code', 'fake_message', None, None)) self.XmlConnector.url_opener.open = mock.Mock(side_effect=hook) self.assertRaises(exception.ManilaException, self.XmlConnector.request, XML_CONN_TD.FAKE_BODY) class MockSSH(object): def __enter__(self): return self def __exit__(self, type, value, traceback): pass class MockSSHPool(object): def __init__(self): self.ssh = MockSSH() def item(self): try: return self.ssh finally: pass class CmdConnectorTest(test.TestCase): def setUp(self): super(CmdConnectorTest, self).setUp() self.configuration = conf.Configuration(None) self.configuration.append_config_values = mock.Mock(return_value=0) self.configuration.emc_nas_login = fakes.FakeData.emc_nas_login self.configuration.emc_nas_password = fakes.FakeData.emc_nas_password self.configuration.emc_nas_server = fakes.FakeData.emc_nas_server self.configuration.emc_ssl_cert_verify = False self.configuration.emc_ssl_cert_path = None self.sshpool = MockSSHPool() with mock.patch.object(ssh_utils, "SSHPool", mock.Mock(return_value=self.sshpool)): self.CmdHelper = connector.SSHConnector( configuration=self.configuration, debug=False) ssh_utils.SSHPool.assert_called_once_with( ip=fakes.FakeData.emc_nas_server, port=22, conn_timeout=None, login=fakes.FakeData.emc_nas_login, password=fakes.FakeData.emc_nas_password) def test_run_ssh(self): with mock.patch.object(processutils, "ssh_execute", mock.Mock(return_value=('fake_output', ''))): cmd_list = ['fake', 'cmd'] self.CmdHelper.run_ssh(cmd_list) processutils.ssh_execute.assert_called_once_with( self.sshpool.item(), 'fake cmd', check_exit_code=False) def test_run_ssh_with_debug(self): self.CmdHelper.debug = True with mock.patch.object(processutils, "ssh_execute", mock.Mock(return_value=('fake_output', ''))): cmd_list = ['fake', 'cmd'] self.CmdHelper.run_ssh(cmd_list) processutils.ssh_execute.assert_called_once_with( self.sshpool.item(), 'fake cmd', check_exit_code=False) @mock.patch.object( processutils, "ssh_execute", mock.Mock(side_effect=processutils.ProcessExecutionError)) def test_run_ssh_exception(self): cmd_list = ['fake', 'cmd'] self.mock_object(greenthread, 'sleep', mock.Mock()) sshpool = MockSSHPool() with mock.patch.object(ssh_utils, "SSHPool", mock.Mock(return_value=sshpool)): self.CmdHelper = connector.SSHConnector(self.configuration) self.assertRaises(processutils.ProcessExecutionError, self.CmdHelper.run_ssh, cmd_list, True) ssh_utils.SSHPool.assert_called_once_with( ip=fakes.FakeData.emc_nas_server, port=22, conn_timeout=None, login=fakes.FakeData.emc_nas_login, password=fakes.FakeData.emc_nas_password) processutils.ssh_execute.assert_called_once_with( sshpool.item(), 'fake cmd', check_exit_code=True) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/dell_emc/common/enas/test_utils.py0000664000175000017500000001366300000000000027005 0ustar00zuulzuul00000000000000# Copyright (c) 2016 EMC Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ssl from unittest import mock import ddt from manila.share.drivers.dell_emc.common.enas import utils from manila import test @ddt.ddt class ENASUtilsTestCase(test.TestCase): @ddt.data({'full': ['cge-1-0', 'cge-1-1', 'cge-3-0', 'cge-3-1', 'cge-12-3'], 'matchers': ['cge-?-0', 'cge-3*', 'foo'], 'matched': set(['cge-1-0', 'cge-3-0', 'cge-3-1']), 'unmatched': set(['cge-1-1', 'cge-12-3'])}, {'full': ['cge-1-0', 'cge-1-1'], 'matchers': ['cge-1-0'], 'matched': set(['cge-1-0']), 'unmatched': set(['cge-1-1'])}, {'full': ['cge-1-0', 'cge-1-1'], 'matchers': ['foo'], 'matched': set([]), 'unmatched': set(['cge-1-0', 'cge-1-1'])}) @ddt.unpack def test_do_match_any(self, full, matchers, matched, unmatched): real_matched, real_unmatched = utils.do_match_any( full, matchers) self.assertEqual(matched, real_matched) self.assertEqual(unmatched, real_unmatched) class SslContextTestCase(test.TestCase): def test_create_ssl_context(self): configuration = mock.Mock() configuration.emc_ssl_cert_verify = True configuration.emc_ssl_cert_path = "./cert_path/" self.mock_object(ssl, 'create_default_context') context = utils.create_ssl_context(configuration) self.assertIsNotNone(context) def test_create_ssl_context_no_verify(self): configuration = mock.Mock() configuration.emc_ssl_cert_verify = False self.mock_object(ssl, 'create_default_context') context = utils.create_ssl_context(configuration) self.assertFalse(context.check_hostname) def test_no_create_default_context(self): """Test scenario of running on python 2.7.8 or earlier.""" configuration = mock.Mock() configuration.emc_ssl_cert_verify = False self.mock_object(ssl, 'create_default_context', mock.Mock(side_effect=AttributeError)) context = utils.create_ssl_context(configuration) self.assertIsNone(context) @ddt.ddt class ParseIpaddrTestCase(test.TestCase): @ddt.data({'lst_ipaddr': ['192.168.100.101', '192.168.100.102', '192.168.100.103']}, {'lst_ipaddr': ['[fdf8:f53b:82e4::57]', '[fdf8:f53b:82e4::54]', '[fdf8:f53b:82e4::55]']}, {'lst_ipaddr': ['[fdf8:f53b:82e4::57]', '[fdf8:f53b:82e4::54]', '192.168.100.103', '[fdf8:f53b:82e4::55]']}, {'lst_ipaddr': ['192.168.100.101', '[fdf8:f53b:82e4::57]', '[fdf8:f53b:82e4::54]', '192.168.100.101', '[fdf8:f53b:82e4::55]', '192.168.100.102']},) @ddt.unpack def test_parse_ipv4_addr(self, lst_ipaddr): self.assertEqual(lst_ipaddr, utils.parse_ipaddr(':'.join(lst_ipaddr))) @ddt.ddt class ConvertIPv6FormatTestCase(test.TestCase): @ddt.data({'ip_addr': 'fdf8:f53b:82e4::55'}, {'ip_addr': 'fdf8:f53b:82e4::55/64'}, {'ip_addr': 'fdf8:f53b:82e4::55/128'}) @ddt.unpack def test_ipv6_addr(self, ip_addr): expected_ip_addr = '[%s]' % ip_addr self.assertEqual(expected_ip_addr, utils.convert_ipv6_format_if_needed(ip_addr)) @ddt.data({'ip_addr': '192.168.1.100'}, {'ip_addr': '192.168.1.100/24'}, {'ip_addr': '192.168.1.100/32'}, {'ip_addr': '[fdf8:f53b:82e4::55]'}) @ddt.unpack def test_invalid_ipv6_addr(self, ip_addr): self.assertEqual(ip_addr, utils.convert_ipv6_format_if_needed(ip_addr)) @ddt.ddt class ExportUncPathTestCase(test.TestCase): @ddt.data({'ip_addr': 'fdf8:f53b:82e4::55'}, {'ip_addr': 'fdf8:f53b:82e4::'}, {'ip_addr': '2018::'}) @ddt.unpack def test_ipv6_addr(self, ip_addr): expected_ip_addr = '%s.ipv6-literal.net' % ip_addr.replace(':', '-') self.assertEqual(expected_ip_addr, utils.export_unc_path(ip_addr)) @ddt.data({'ip_addr': '192.168.1.100'}, {'ip_addr': '192.168.1.100/24'}, {'ip_addr': '192.168.1.100/32'}, {'ip_addr': 'fdf8:f53b:82e4::55/64'}, {'ip_addr': 'fdf8:f53b:82e4::55/128'}, {'ip_addr': '[fdf8:f53b:82e4::55]'}) @ddt.unpack def test_invalid_ipv6_addr(self, ip_addr): self.assertEqual(ip_addr, utils.export_unc_path(ip_addr)) @ddt.ddt class SizeToGbTestCase(test.TestCase): @ddt.data({'size_in_bytes': 1073741824, 'size_in_gb': 1.0}, {'size_in_bytes': 5610301030, 'size_in_gb': 5.22}) @ddt.unpack def test_bytes_to_gb(self, size_in_bytes, size_in_gb): self.assertEqual(size_in_gb, utils.bytes_to_gb(size_in_bytes)) @ddt.data({'size_in_mb': 1024, 'size_in_gb': 1.0}, {'size_in_mb': 5346, 'size_in_gb': 5.22}) @ddt.unpack def test_mb_to_gb(self, size_in_mb, size_in_gb): self.assertEqual(size_in_gb, utils.mb_to_gb(size_in_mb)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/dell_emc/common/enas/utils.py0000664000175000017500000001246700000000000025747 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Dell Inc. or its subsidiaries. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import doctest from unittest import mock from lxml import doctestcompare CHECKER = doctestcompare.LXMLOutputChecker() PARSE_XML = doctest.register_optionflag('PARSE_XML') class RequestSideEffect(object): def __init__(self): self.actions = [] self.started = False def append(self, resp=None, ex=None): if not self.started: self.actions.append((resp, ex)) def __call__(self, *args, **kwargs): if not self.started: self.started = True self.actions.reverse() item = self.actions.pop() if item[1]: raise item[1] else: return item[0] class SSHSideEffect(object): def __init__(self): self.actions = [] self.started = False def append(self, resp=None, err=None, ex=None): if not self.started: self.actions.append((resp, err, ex)) def __call__(self, rel_url, req_data=None, method=None, return_rest_err=True, *args, **kwargs): if not self.started: self.started = True self.actions.reverse() item = self.actions.pop() if item[2]: raise item[2] else: if return_rest_err: return item[0:2] else: return item[1] class EMCMock(mock.Mock): def _get_req_from_call(self, call): if len(call) == 3: return call[1][0] elif len(call) == 2: return call[0][0] def assert_has_calls(self, calls): if len(calls) != len(self.mock_calls): raise AssertionError( 'Mismatch error.\nExpected: %r\n' 'Actual: %r' % (calls, self.mock_calls) ) iter_expect = iter(calls) iter_actual = iter(self.mock_calls) while True: try: expect = self._get_req_from_call(next(iter_expect)) actual = self._get_req_from_call(next(iter_actual)) except StopIteration: return True if not isinstance(expect, bytes): expect = expect.encode("latin-1") if not isinstance(actual, bytes): actual = actual.encode("latin-1") if not CHECKER.check_output(expect, actual, PARSE_XML): raise AssertionError( 'Mismatch error.\nExpected: %r\n' 'Actual: %r' % (calls, self.mock_calls) ) class EMCNFSShareMock(mock.Mock): def assert_has_calls(self, calls): if len(calls) != len(self.mock_calls): raise AssertionError( 'Mismatch error.\nExpected: %r\n' 'Actual: %r' % (calls, self.mock_calls) ) iter_expect = iter(calls) iter_actual = iter(self.mock_calls) while True: try: expect = next(iter_expect)[1][0] actual = next(iter_actual)[1][0] except StopIteration: return True if not self._option_check(expect, actual): raise AssertionError( 'Mismatch error.\nExpected: %r\n' 'Actual: %r' % (calls, self.mock_calls) ) def _option_parser(self, option): option_map = {} for item in option.split(','): key, value = item.split('=') option_map[key] = value return option_map @staticmethod def _opt_value_from_map(opt_map, key): value = opt_map.get(key) if value: ret = set(value.split(':')) else: ret = set() return ret def _option_check(self, expect, actual): if '-option' in actual and '-option' in expect: exp_option = expect[expect.index('-option') + 1] act_option = actual[actual.index('-option') + 1] exp_opt_map = self._option_parser(exp_option) act_opt_map = self._option_parser(act_option) for key in exp_opt_map: exp_set = self._opt_value_from_map(exp_opt_map, key) act_set = self._opt_value_from_map(act_opt_map, key) if exp_set != act_set: return False return True def patch_get_managed_ports_vnx(*arg, **kwargs): return mock.patch('manila.share.drivers.dell_emc.plugins.vnx.connection.' 'VNXStorageConnection.get_managed_ports', mock.Mock(*arg, **kwargs)) def patch_get_managed_ports_powermax(*arg, **kwargs): return mock.patch( 'manila.share.drivers.dell_emc.plugins.powermax.connection.' 'PowerMaxStorageConnection.get_managed_ports', mock.Mock(*arg, **kwargs)) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315602.0016706 manila-21.0.0/manila/tests/share/drivers/dell_emc/plugins/0000775000175000017500000000000000000000000023466 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/dell_emc/plugins/__init__.py0000664000175000017500000000000000000000000025565 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315602.0016706 manila-21.0.0/manila/tests/share/drivers/dell_emc/plugins/powerflex/0000775000175000017500000000000000000000000025501 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/dell_emc/plugins/powerflex/__init__.py0000664000175000017500000000000000000000000027600 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315602.0056705 manila-21.0.0/manila/tests/share/drivers/dell_emc/plugins/powerflex/mockup/0000775000175000017500000000000000000000000026777 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000021200000000000011450 xustar0000000000000000116 path=manila-21.0.0/manila/tests/share/drivers/dell_emc/plugins/powerflex/mockup/create_filesystem_response.json 22 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/dell_emc/plugins/powerflex/mockup/create_filesystem_respons0000664000175000017500000000006500000000000034203 0ustar00zuulzuul00000000000000{ "id": "6432b79e-1cc3-0414-3ffd-2a50fb1ccff3" } ././@PaxHeader0000000000000000000000000000021200000000000011450 xustar0000000000000000116 path=manila-21.0.0/manila/tests/share/drivers/dell_emc/plugins/powerflex/mockup/create_nfs_export_response.json 22 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/dell_emc/plugins/powerflex/mockup/create_nfs_export_respons0000664000175000017500000000006500000000000034206 0ustar00zuulzuul00000000000000{ "id": "6433a2b2-6d60-f737-9f3b-2a50fb1ccff3" } ././@PaxHeader0000000000000000000000000000021400000000000011452 xustar0000000000000000118 path=manila-21.0.0/manila/tests/share/drivers/dell_emc/plugins/powerflex/mockup/create_nfs_snapshot_response.json 22 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/dell_emc/plugins/powerflex/mockup/create_nfs_snapshot_respo0000664000175000017500000000006500000000000034163 0ustar00zuulzuul00000000000000{ "id": "6433b635-6c1f-878e-6467-2a50fb1ccff3" } ././@PaxHeader0000000000000000000000000000021100000000000011447 xustar0000000000000000115 path=manila-21.0.0/manila/tests/share/drivers/dell_emc/plugins/powerflex/mockup/get_fileystem_id_response.json 22 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/dell_emc/plugins/powerflex/mockup/get_fileystem_id_response0000664000175000017500000000010500000000000034150 0ustar00zuulzuul00000000000000[ { "id": "6432b79e-1cc3-0414-3ffd-2a50fb1ccff3" } ] ././@PaxHeader0000000000000000000000000000022200000000000011451 xustar0000000000000000124 path=manila-21.0.0/manila/tests/share/drivers/dell_emc/plugins/powerflex/mockup/get_fsid_from_export_name_response.json 22 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/dell_emc/plugins/powerflex/mockup/get_fsid_from_export_name0000664000175000017500000000012100000000000034124 0ustar00zuulzuul00000000000000[ { "file_system_id": "6432b79e-1cc3-0414-3ffd-2a50fb1ccff3" } ] ././@PaxHeader0000000000000000000000000000022400000000000011453 xustar0000000000000000126 path=manila-21.0.0/manila/tests/share/drivers/dell_emc/plugins/powerflex/mockup/get_fsid_from_snapshot_name_response.json 22 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/dell_emc/plugins/powerflex/mockup/get_fsid_from_snapshot_na0000664000175000017500000000010500000000000034122 0ustar00zuulzuul00000000000000[ { "id": "6433b635-6c1f-878e-6467-2a50fb1ccff3" } ] ././@PaxHeader0000000000000000000000000000021200000000000011450 xustar0000000000000000116 path=manila-21.0.0/manila/tests/share/drivers/dell_emc/plugins/powerflex/mockup/get_nas_server_id_response.json 22 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/dell_emc/plugins/powerflex/mockup/get_nas_server_id_respons0000664000175000017500000000010500000000000034151 0ustar00zuulzuul00000000000000[ { "id": "64132f37-d33e-9d4a-89ba-d625520a4779" } ] ././@PaxHeader0000000000000000000000000000021200000000000011450 xustar0000000000000000116 path=manila-21.0.0/manila/tests/share/drivers/dell_emc/plugins/powerflex/mockup/get_nfs_export_id_response.json 22 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/dell_emc/plugins/powerflex/mockup/get_nfs_export_id_respons0000664000175000017500000000010500000000000034171 0ustar00zuulzuul00000000000000[ { "id": "6433a2b2-6d60-f737-9f3b-2a50fb1ccff3" } ] ././@PaxHeader0000000000000000000000000000021400000000000011452 xustar0000000000000000118 path=manila-21.0.0/manila/tests/share/drivers/dell_emc/plugins/powerflex/mockup/get_nfs_export_name_response.json 22 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/dell_emc/plugins/powerflex/mockup/get_nfs_export_name_respo0000664000175000017500000000112300000000000034155 0ustar00zuulzuul00000000000000{ "id": "6433a2b2-6d60-f737-9f3b-2a50fb1ccff3", "file_system_id": "6432b79e-1cc3-0414-3ffd-2a50fb1ccff3", "name": "Manila-UT-filesystem", "path": "/Manila-UT-filesystem", "description": null, "default_access": "NO_ACCESS", "min_security": "SYS", "nfs_owner_username": "root", "no_access_hosts": [], "read_only_hosts": [], "read_only_root_hosts": [], "read_write_hosts": [], "read_write_root_hosts": [], "anonymous_UID": -2, "anonymous_GID": -2, "is_no_SUID": false, "default_access_l10n": null, "min_security_l10n": null } ././@PaxHeader0000000000000000000000000000021400000000000011452 xustar0000000000000000118 path=manila-21.0.0/manila/tests/share/drivers/dell_emc/plugins/powerflex/mockup/get_storage_pool_id_response.json 22 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/dell_emc/plugins/powerflex/mockup/get_storage_pool_id_respo0000664000175000017500000000002200000000000034134 0ustar00zuulzuul00000000000000"28515fee00000000"././@PaxHeader0000000000000000000000000000022100000000000011450 xustar0000000000000000123 path=manila-21.0.0/manila/tests/share/drivers/dell_emc/plugins/powerflex/mockup/get_storage_pool_spare_percentage.json 22 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/dell_emc/plugins/powerflex/mockup/get_storage_pool_spare_pe0000664000175000017500000001007000000000000034132 0ustar00zuulzuul00000000000000{ "name": "Env8-SP-SW_SSD-1", "rebuildIoPriorityPolicy": "limitNumOfConcurrentIos", "rebalanceIoPriorityPolicy": "favorAppIos", "vtreeMigrationIoPriorityPolicy": "favorAppIos", "protectedMaintenanceModeIoPriorityPolicy": "limitNumOfConcurrentIos", "rebuildIoPriorityNumOfConcurrentIosPerDevice": 1, "rebalanceIoPriorityNumOfConcurrentIosPerDevice": 1, "vtreeMigrationIoPriorityNumOfConcurrentIosPerDevice": 1, "protectedMaintenanceModeIoPriorityNumOfConcurrentIosPerDevice": 1, "rebuildIoPriorityBwLimitPerDeviceInKbps": 10240, "rebalanceIoPriorityBwLimitPerDeviceInKbps": 10240, "vtreeMigrationIoPriorityBwLimitPerDeviceInKbps": 10240, "protectedMaintenanceModeIoPriorityBwLimitPerDeviceInKbps": 10240, "rebuildIoPriorityAppIopsPerDeviceThreshold": null, "rebalanceIoPriorityAppIopsPerDeviceThreshold": null, "vtreeMigrationIoPriorityAppIopsPerDeviceThreshold": null, "protectedMaintenanceModeIoPriorityAppIopsPerDeviceThreshold": null, "rebuildIoPriorityAppBwPerDeviceThresholdInKbps": null, "rebalanceIoPriorityAppBwPerDeviceThresholdInKbps": null, "zeroPaddingEnabled": true, "vtreeMigrationIoPriorityAppBwPerDeviceThresholdInKbps": null, "protectedMaintenanceModeIoPriorityAppBwPerDeviceThresholdInKbps": null, "rebuildIoPriorityQuietPeriodInMsec": null, "rebalanceIoPriorityQuietPeriodInMsec": null, "vtreeMigrationIoPriorityQuietPeriodInMsec": null, "protectedMaintenanceModeIoPriorityQuietPeriodInMsec": null, "useRmcache": false, "backgroundScannerMode": "DataComparison", "backgroundScannerBWLimitKBps": 3072, "fglAccpId": null, "fglMetadataSizeXx100": null, "fglNvdimmWriteCacheSizeInMb": null, "fglNvdimmMetadataAmortizationX100": null, "mediaType": "SSD", "rmcacheWriteHandlingMode": "Cached", "checksumEnabled": false, "rebalanceEnabled": true, "fragmentationEnabled": true, "numOfParallelRebuildRebalanceJobsPerDevice": 2, "bgScannerCompareErrorAction": "ReportAndFix", "bgScannerReadErrorAction": "ReportAndFix", "externalAccelerationType": "None", "compressionMethod": "Invalid", "fglExtraCapacity": null, "fglOverProvisioningFactor": null, "fglWriteAtomicitySize": null, "fglMaxCompressionRatio": null, "fglPerfProfile": null, "replicationCapacityMaxRatio": 35, "persistentChecksumEnabled": true, "persistentChecksumState": "Protected", "persistentChecksumBuilderLimitKb": 3072, "protectionDomainId": "95c5a8b100000000", "rebuildEnabled": true, "dataLayout": "MediumGranularity", "persistentChecksumValidateOnRead": false, "spClass": "Nas", "addressSpaceUsage": "Normal", "useRfcache": false, "sparePercentage": 34, "capacityAlertHighThreshold": 66, "capacityAlertCriticalThreshold": 83, "capacityUsageState": "Normal", "addressSpaceUsageType": "DeviceCapacityLimit", "capacityUsageType": "NetCapacity", "id": "28515fee00000000", "links": [ { "rel": "self", "href": "/api/instances/StoragePool::28515fee00000000" }, { "rel": "/api/StoragePool/relationship/Statistics", "href": "/api/instances/StoragePool::28515fee00000000/relationships/Statistics" }, { "rel": "/api/StoragePool/relationship/SpSds", "href": "/api/instances/StoragePool::28515fee00000000/relationships/SpSds" }, { "rel": "/api/StoragePool/relationship/Volume", "href": "/api/instances/StoragePool::28515fee00000000/relationships/Volume" }, { "rel": "/api/StoragePool/relationship/Device", "href": "/api/instances/StoragePool::28515fee00000000/relationships/Device" }, { "rel": "/api/StoragePool/relationship/VTree", "href": "/api/instances/StoragePool::28515fee00000000/relationships/VTree" }, { "rel": "/api/parent/relationship/protectionDomainId", "href": "/api/instances/ProtectionDomain::95c5a8b100000000" } ] } ././@PaxHeader0000000000000000000000000000021200000000000011450 xustar0000000000000000116 path=manila-21.0.0/manila/tests/share/drivers/dell_emc/plugins/powerflex/mockup/get_storage_pool_statistic.json 22 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/dell_emc/plugins/powerflex/mockup/get_storage_pool_statisti0000664000175000017500000002643700000000000034216 0ustar00zuulzuul00000000000000{ "backgroundScanFixedReadErrorCount": 0, "pendingMovingOutBckRebuildJobs": 0, "degradedHealthyCapacityInKb": 0, "activeMovingOutFwdRebuildJobs": 0, "bckRebuildWriteBwc": { "numSeconds": 0, "totalWeightInKb": 0, "numOccured": 0 }, "netFglUncompressedDataSizeInKb": 0, "primaryReadFromDevBwc": { "numSeconds": 5, "totalWeightInKb": 1188, "numOccured": 18 }, "BackgroundScannedInMB": 233566, "volumeIds": [ "2488c46c00000003", "2488c46d00000004", "2488c46e00000005", "2488c46f00000006", "2488c47000000007", "2488c47100000008", "2488c47200000009", "2488c4730000000a", "2488eb7e00000001" ], "maxUserDataCapacityInKb": 3185378304, "persistentChecksumBuilderProgress": 100, "rfcacheReadsSkippedAlignedSizeTooLarge": 0, "pendingMovingInRebalanceJobs": 0, "rfcacheWritesSkippedHeavyLoad": 0, "unusedCapacityInKb": 3132161024, "userDataSdcReadLatency": { "numSeconds": 5, "totalWeightInKb": 31971, "numOccured": 45 }, "totalReadBwc": { "numSeconds": 5, "totalWeightInKb": 4644, "numOccured": 45 }, "numOfDeviceAtFaultRebuilds": 0, "totalWriteBwc": { "numSeconds": 5, "totalWeightInKb": 76, "numOccured": 19 }, "persistentChecksumCapacityInKb": 2359296, "rmPendingAllocatedInKb": 0, "numOfVolumes": 12, "rfcacheIosOutstanding": 0, "numOfMappedToAllVolumes": 0, "capacityAvailableForVolumeAllocationInKb": 1551892480, "netThinUserDataCapacityInKb": 26608640, "backgroundScanFixedCompareErrorCount": 0, "volMigrationWriteBwc": { "numSeconds": 0, "totalWeightInKb": 0, "numOccured": 0 }, "thinAndSnapshotRatio": 6.9356937, "pendingMovingInEnterProtectedMaintenanceModeJobs": 0, "fglUserDataCapacityInKb": 0, "activeMovingInNormRebuildJobs": 0, "aggregateCompressionLevel": "Uncompressed", "targetOtherLatency": { "numSeconds": 0, "totalWeightInKb": 0, "numOccured": 0 }, "netUserDataCapacityInKb": 26608640, "pendingMovingOutExitProtectedMaintenanceModeJobs": 0, "overallUsageRatio": 6.9356937, "volMigrationReadBwc": { "numSeconds": 0, "totalWeightInKb": 0, "numOccured": 0 }, "rfcacheReadsSkippedInternalError": 0, "netCapacityInUseNoOverheadInKb": 26608640, "pendingMovingInBckRebuildJobs": 0, "activeBckRebuildCapacityInKb": 0, "rebalanceCapacityInKb": 0, "pendingMovingInExitProtectedMaintenanceModeJobs": 0, "rfcacheReadsSkippedLowResources": 0, "rplJournalCapAllowed": 1115684864, "userDataSdcTrimLatency": { "numSeconds": 0, "totalWeightInKb": 0, "numOccured": 0 }, "thinCapacityInUseInKb": 0, "activeMovingInEnterProtectedMaintenanceModeJobs": 0, "rfcacheWritesSkippedInternalError": 0, "netUserDataCapacityNoTrimInKb": 26608640, "rfcacheWritesSkippedCacheMiss": 0, "degradedFailedCapacityInKb": 0, "activeNormRebuildCapacityInKb": 0, "numOfMigratingVolumes": 0, "fglSparesInKb": 0, "snapCapacityInUseInKb": 0, "compressionRatio": 1, "rfcacheWriteMiss": 0, "primaryReadFromRmcacheBwc": { "numSeconds": 0, "totalWeightInKb": 0, "numOccured": 0 }, "migratingVtreeIds": [], "numOfVtrees": 12, "userDataCapacityNoTrimInKb": 53217280, "rfacheReadHit": 0, "rplUsedJournalCap": 21504, "compressedDataCompressionRatio": 0, "pendingMovingCapacityInKb": 0, "numOfSnapshots": 0, "pendingFwdRebuildCapacityInKb": 0, "tempCapacityInKb": 0, "totalFglMigrationSizeInKb": 0, "normRebuildCapacityInKb": 0, "logWrittenBlocksInKb": 0, "numOfThickBaseVolumes": 0, "primaryWriteBwc": { "numSeconds": 5, "totalWeightInKb": 36, "numOccured": 9 }, "enterProtectedMaintenanceModeReadBwc": { "numSeconds": 0, "totalWeightInKb": 0, "numOccured": 0 }, "activeRebalanceCapacityInKb": 0, "numOfReplicationJournalVolumes": 3, "rfcacheReadsSkippedLockIos": 0, "unreachableUnusedCapacityInKb": 0, "netProvisionedAddressesInKb": 26608640, "trimmedUserDataCapacityInKb": 0, "provisionedAddressesInKb": 53217280, "numOfVolumesInDeletion": 0, "maxCapacityInKb": 4826330112, "pendingMovingOutFwdRebuildJobs": 0, "rmPendingThickInKb": 0, "protectedCapacityInKb": 53217280, "secondaryWriteBwc": { "numSeconds": 5, "totalWeightInKb": 40, "numOccured": 10 }, "normRebuildReadBwc": { "numSeconds": 0, "totalWeightInKb": 0, "numOccured": 0 }, "thinCapacityAllocatedInKb": 369098752, "netFglUserDataCapacityInKb": 0, "metadataOverheadInKb": 0, "thinCapacityAllocatedInKm": 369098752, "rebalanceWriteBwc": { "numSeconds": 0, "totalWeightInKb": 0, "numOccured": 0 }, "primaryVacInKb": 184549376, "deviceIds": [ "fbdbf0a700000000", "fbdef0a800010000", "fbdff0a600020000" ], "secondaryVacInKb": 184549376, "netSnapshotCapacityInKb": 0, "numOfDevices": 3, "rplTotalJournalCap": 25165824, "failedCapacityInKb": 0, "netMetadataOverheadInKb": 0, "activeMovingOutBckRebuildJobs": 0, "rfcacheReadsFromCache": 0, "pendingMovingInNormRebuildJobs": 0, "enterProtectedMaintenanceModeCapacityInKb": 0, "activeMovingOutEnterProtectedMaintenanceModeJobs": 0, "primaryReadBwc": { "numSeconds": 5, "totalWeightInKb": 4644, "numOccured": 45 }, "failedVacInKb": 0, "fglCompressedDataSizeInKb": 0, "fglUncompressedDataSizeInKb": 0, "pendingRebalanceCapacityInKb": 0, "rfcacheAvgReadTime": 0, "semiProtectedCapacityInKb": 0, "pendingMovingOutEnterProtectedMaintenanceModeJobs": 0, "mgUserDdataCcapacityInKb": 53217280, "netMgUserDataCapacityInKb": 26608640, "snapshotCapacityInKb": 0, "fwdRebuildReadBwc": { "numSeconds": 0, "totalWeightInKb": 0, "numOccured": 0 }, "rfcacheWritesReceived": 0, "netUnusedCapacityInKb": 1566080512, "thinUserDataCapacityInKb": 53217280, "protectedVacInKb": 369098752, "bckRebuildCapacityInKb": 0, "activeMovingInFwdRebuildJobs": 0, "activeMovingRebalanceJobs": 0, "netTrimmedUserDataCapacityInKb": 0, "pendingMovingRebalanceJobs": 0, "numOfMarkedVolumesForReplication": 1, "degradedHealthyVacInKb": 0, "semiProtectedVacInKb": 0, "userDataReadBwc": { "numSeconds": 5, "totalWeightInKb": 4644, "numOccured": 45 }, "pendingBckRebuildCapacityInKb": 0, "capacityLimitInKb": 4826330112, "vtreeIds": [ "3ad4906800000003", "3ad4906900000004", "3ad4906a00000005", "3ad4906b00000006", "3ad4906c00000007", "3ad4906d00000008", "3ad4906e00000009", "3ad4906f0000000a", "3ad4b77700000002", "3ad4b7780000000b", "3ad4b7790000000c", "3ad4b77a00000000" ], "activeMovingCapacityInKb": 0, "targetWriteLatency": { "numSeconds": 5, "totalWeightInKb": 11392, "numOccured": 9 }, "pendingExitProtectedMaintenanceModeCapacityInKb": 0, "rfcacheIosSkipped": 0, "userDataWriteBwc": { "numSeconds": 5, "totalWeightInKb": 40, "numOccured": 10 }, "inMaintenanceVacInKb": 0, "exitProtectedMaintenanceModeReadBwc": { "numSeconds": 0, "totalWeightInKb": 0, "numOccured": 0 }, "netFglSparesInKb": 0, "rfcacheReadsSkipped": 0, "activeExitProtectedMaintenanceModeCapacityInKb": 0, "activeMovingOutExitProtectedMaintenanceModeJobs": 0, "numOfUnmappedVolumes": 4, "tempCapacityVacInKb": 0, "volumeAddressSpaceInKb": 184549376, "currentFglMigrationSizeInKb": 0, "rfcacheWritesSkippedMaxIoSize": 0, "netMaxUserDataCapacityInKb": 1592689152, "numOfMigratingVtrees": 0, "atRestCapacityInKb": 26608640, "rfacheWriteHit": 0, "bckRebuildReadBwc": { "numSeconds": 0, "totalWeightInKb": 0, "numOccured": 0 }, "rfcacheSourceDeviceWrites": 0, "spareCapacityInKb": 1640951808, "enterProtectedMaintenanceModeWriteBwc": { "numSeconds": 0, "totalWeightInKb": 0, "numOccured": 0 }, "rfcacheIoErrors": 0, "normRebuildWriteBwc": { "numSeconds": 0, "totalWeightInKb": 0, "numOccured": 0 }, "inaccessibleCapacityInKb": 0, "capacityInUseInKb": 53217280, "rebalanceReadBwc": { "numSeconds": 0, "totalWeightInKb": 0, "numOccured": 0 }, "rfcacheReadsSkippedMaxIoSize": 0, "activeMovingInExitProtectedMaintenanceModeJobs": 0, "secondaryReadFromDevBwc": { "numSeconds": 0, "totalWeightInKb": 0, "numOccured": 0 }, "secondaryReadBwc": { "numSeconds": 0, "totalWeightInKb": 0, "numOccured": 0 }, "rfcacheWritesSkippedStuckIo": 0, "secondaryReadFromRmcacheBwc": { "numSeconds": 0, "totalWeightInKb": 0, "numOccured": 0 }, "inMaintenanceCapacityInKb": 0, "exposedCapacityInKb": 0, "netFglCompressedDataSizeInKb": 0, "userDataSdcWriteLatency": { "numSeconds": 5, "totalWeightInKb": 17356, "numOccured": 10 }, "inUseVacInKb": 369098752, "fwdRebuildCapacityInKb": 0, "thickCapacityInUseInKb": 0, "backgroundScanReadErrorCount": 0, "activeMovingInRebalanceJobs": 0, "migratingVolumeIds": [], "rfcacheWritesSkippedLowResources": 0, "capacityInUseNoOverheadInKb": 53217280, "exitProtectedMaintenanceModeWriteBwc": { "numSeconds": 0, "totalWeightInKb": 0, "numOccured": 0 }, "rfcacheSkippedUnlinedWrite": 0, "netCapacityInUseInKb": 26608640, "numOfOutgoingMigrations": 0, "rfcacheAvgWriteTime": 0, "pendingNormRebuildCapacityInKb": 0, "pendingMovingOutNormrebuildJobs": 0, "rfcacheSourceDeviceReads": 0, "rfcacheReadsPending": 0, "volumeAllocationLimitInKb": 15745417216, "fwdRebuildWriteBwc": { "numSeconds": 0, "totalWeightInKb": 0, "numOccured": 0 }, "rfcacheReadsSkippedHeavyLoad": 0, "rfcacheReadMiss": 0, "targetReadLatency": { "numSeconds": 5, "totalWeightInKb": 8488, "numOccured": 18 }, "userDataCapacityInKb": 53217280, "activeMovingInBckRebuildJobs": 0, "movingCapacityInKb": 0, "activeEnterProtectedMaintenanceModeCapacityInKb": 0, "backgroundScanCompareErrorCount": 0, "pendingMovingInFwdRebuildJobs": 0, "rfcacheReadsReceived": 0, "spSdsIds": [ "ebb7772100020000", "ebb7772200000000", "ebb6772300010000" ], "pendingEnterProtectedMaintenanceModeCapacityInKb": 0, "vtreeAddresSpaceInKb": 184549376, "snapCapacityInUseOccupiedInKb": 0, "activeFwdRebuildCapacityInKb": 0, "rfcacheReadsSkippedStuckIo": 0, "activeMovingOutNormRebuildJobs": 0, "rfcacheWritePending": 0, "numOfThinBaseVolumes": 12, "degradedFailedVacInKb": 0, "userDataTrimBwc": { "numSeconds": 0, "totalWeightInKb": 0, "numOccured": 0 }, "numOfIncomingVtreeMigrations": 0 } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/dell_emc/plugins/powerflex/mockup/login_response.json0000664000175000017500000000737100000000000032730 0ustar00zuulzuul00000000000000{ "scope": "openid profile email", "access_token": "eyJhbGciOiJSUzI1NiIsInR5cCIgOiAiSldUIiwia2lkIiA6ICI5QTNCYXpXRGRvdEdQcTM3TkQyVHNDSmhiVXYwOXprb2hMWG9tNE94bXVRIn0.eyJleHAiOjE2ODExMTcwNDIsImlhdCI6MTY4MTExNjc0MiwianRpIjoiMGZiMjk4MmUtMjJmZC00MDhjLWI4MmMtYTMwNTEyNDk4NGQ4IiwiaXNzIjoiaHR0cHM6Ly9wZmxleDRlbnY4LnBpZS5sYWIuZW1jLmNvbS9hdXRoL3JlYWxtcy9wb3dlcmZsZXgiLCJhdWQiOlsiUG93ZXJmbGV4U2VydmljZXMiLCJhY2NvdW50Il0sInN1YiI6IjEzMzRhMDAxLWU2MmItNDdhYy1iZGNlLWIyNmVlZThiMjAyZiIsInR5cCI6IkJlYXJlciIsImF6cCI6InBvd2VyZmxleFJlc3QiLCJzZXNzaW9uX3N0YXRlIjoiMzU0MDhiNTQtNWE0ZS00ODNlLTkwYTUtZDA2M2ZjMDFkY2JlIiwiYWNyIjoiMSIsImFsbG93ZWQtb3JpZ2lucyI6WyIqIl0sInJlYWxtX2FjY2VzcyI6eyJyb2xlcyI6WyJTdXBlclVzZXIiXX0sInJlc291cmNlX2FjY2VzcyI6eyJQb3dlcmZsZXhTZXJ2aWNlcyI6eyJyb2xlcyI6WyJzdGFuZGFyZCIsIlJlYWRPbmx5IiwiQWRtaW5pc3RyYXRvciIsIm9wZXJhdG9yIl19LCJhY2NvdW50Ijp7InJvbGVzIjpbIm1hbmFnZS1hY2NvdW50IiwibWFuYWdlLWFjY291bnQtbGlua3MiLCJ2aWV3LXByb2ZpbGUiXX19LCJzY29wZSI6Im9wZW5pZCBwcm9maWxlIGVtYWlsIiwic2lkIjoiMzU0MDhiNTQtNWE0ZS00ODNlLTkwYTUtZDA2M2ZjMDFkY2JlIiwiZW1haWxfdmVyaWZpZWQiOnRydWUsInBvd2VyZmxleCI6eyJwZXJtaXNzaW9ucyI6eyJTdXBlclVzZXIiOlsiR0xCOkdMQiJdfX0sIm5hbWUiOiJhZG1pbiBhZG1pbiIsInByZWZlcnJlZF91c2VybmFtZSI6ImFkbWluIiwiZ2l2ZW5fbmFtZSI6ImFkbWluIiwiZmFtaWx5X25hbWUiOiJhZG1pbiIsImVtYWlsIjoiYWRtaW5AZXhhbXBsZS5jb20ifQ.D78oxRxnf6hE238Wd9rVlm7L7ZpA_qqsHH_igqyA_ELtX-I3k0VMvOAKdpTOci5qEcMQYTgwQQ09ADUApw12wOxhgU_WCbSGdq07Emqfnb9Yw2vD1m6_sNNMrHOfgWXlpjZq6tS7ew7MGlnymzZXuUMRdPoI4QYZ8XDyIaqprHmJ3P1W4am9PAOWcciRMgwJo9t0LhJl2yP8fQKVgRXxnTAUVja1TYk_U8huKv9oqQR3dYLVJrGuBv8-YOvnS_RXNhUcZQUf0AGJzEG9Vjfk8MpuhuvAqjbiTQYei5rxosfxje3eVCEifEezxkZzdr_BFs1XQ-Df_Ll6m_psoxL7bA", "expires_in": 300, "refresh_expires_in": 1800, "refresh_token": "eyJhbGciOiJIUzI1NiIsInR5cCIgOiAiSldUIiwia2lkIiA6ICJjZmU2NGYxNi05ZGRmLTQyYmUtYmVkMi04ZjMyZWNjM2RkYzAifQ.eyJleHAiOjE2ODExMTg1NDIsImlhdCI6MTY4MTExNjc0MiwianRpIjoiNzMxNGViZDgtNWU4Yy00N2MxLTg5OGMtYjkyZTFhYTg0ZGZlIiwiaXNzIjoiaHR0cHM6Ly9wZmxleDRlbnY4LnBpZS5sYWIuZW1jLmNvbS9hdXRoL3JlYWxtcy9wb3dlcmZsZXgiLCJhdWQiOiJodHRwczovL3BmbGV4NGVudjgucGllLmxhYi5lbWMuY29tL2F1dGgvcmVhbG1zL3Bvd2VyZmxleCIsInN1YiI6IjEzMzRhMDAxLWU2MmItNDdhYy1iZGNlLWIyNmVlZThiMjAyZiIsInR5cCI6IlJlZnJlc2giLCJhenAiOiJwb3dlcmZsZXhSZXN0Iiwic2Vzc2lvbl9zdGF0ZSI6IjM1NDA4YjU0LTVhNGUtNDgzZS05MGE1LWQwNjNmYzAxZGNiZSIsInNjb3BlIjoib3BlbmlkIHByb2ZpbGUgZW1haWwiLCJzaWQiOiIzNTQwOGI1NC01YTRlLTQ4M2UtOTBhNS1kMDYzZmMwMWRjYmUifQ.12EA6mujHEmsC49adECuqWrqhsfCnQHv5aGo_hipSsw", "token_type": "Bearer", "id_token": "eyJhbGciOiJSUzI1NiIsInR5cCIgOiAiSldUIiwia2lkIiA6ICI5QTNCYXpXRGRvdEdQcTM3TkQyVHNDSmhiVXYwOXprb2hMWG9tNE94bXVRIn0.eyJleHAiOjE2ODExMTcwNDIsImlhdCI6MTY4MTExNjc0MiwiYXV0aF90aW1lIjowLCJqdGkiOiI5YTg0ZDM4OC1iNTc5LTQ2ZGEtYjBkNC1mZjdlYzQ1MzA0MmMiLCJpc3MiOiJodHRwczovL3BmbGV4NGVudjgucGllLmxhYi5lbWMuY29tL2F1dGgvcmVhbG1zL3Bvd2VyZmxleCIsImF1ZCI6WyJwb3dlcmZsZXhSZXN0IiwiUG93ZXJmbGV4U2VydmljZXMiXSwic3ViIjoiMTMzNGEwMDEtZTYyYi00N2FjLWJkY2UtYjI2ZWVlOGIyMDJmIiwidHlwIjoiSUQiLCJhenAiOiJwb3dlcmZsZXhSZXN0Iiwic2Vzc2lvbl9zdGF0ZSI6IjM1NDA4YjU0LTVhNGUtNDgzZS05MGE1LWQwNjNmYzAxZGNiZSIsImF0X2hhc2giOiJielhNLVVrN0dHNl9lQlpVNTVXUVd3IiwiYWNyIjoiMSIsInNpZCI6IjM1NDA4YjU0LTVhNGUtNDgzZS05MGE1LWQwNjNmYzAxZGNiZSIsImVtYWlsX3ZlcmlmaWVkIjp0cnVlLCJwb3dlcmZsZXgiOnsicGVybWlzc2lvbnMiOnsiU3VwZXJVc2VyIjpbIkdMQjpHTEIiXX19LCJuYW1lIjoiYWRtaW4gYWRtaW4iLCJwcmVmZXJyZWRfdXNlcm5hbWUiOiJhZG1pbiIsImdpdmVuX25hbWUiOiJhZG1pbiIsImZhbWlseV9uYW1lIjoiYWRtaW4iLCJlbWFpbCI6ImFkbWluQGV4YW1wbGUuY29tIn0.MVOfN10vq7VD75HMV4N2SYiGpVtnGRpXGFu3WLFPBrQjZrwFkKFb6gmtijw0Onz3xBcg7Eq7asd8lKcBaQ03LY_ru0DXpoStAlCd8z1Vfs2J5boYwn41QHrzwLn0VJK4w6zyHWbRXpK33gTNKjyX0L_JM_o2ZaCJZX8Hxvhb96-LAanbOBtwl1KR-umBHWh6FQOt43YRXAwQSo4Qz425taTmrb2U-LUu1hVZz8GjUmi2dakor6tRgT1ysxM7-9lsNXrFpgZk0XynKpxPg3yDxCdSEkIyoCGB8RH617kN4P1sGicWIk_swDZekwR23LNUiG9tjedaHTriuNAkQZ5a3w", "session_state": "35408b54-5a4e-483e-90a5-d063fc01dcbe" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/dell_emc/plugins/powerflex/test_connection.py0000664000175000017500000004655700000000000031272 0ustar00zuulzuul00000000000000# Copyright (c) 2023 EMC Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import ddt from oslo_log import log from oslo_utils import units from manila.common import constants as const from manila import exception from manila.share.drivers.dell_emc.plugins.powerflex import connection from manila import test LOG = log.getLogger(__name__) @ddt.ddt class PowerFlexTest(test.TestCase): """Integration test for the PowerFlex Manila driver.""" POWERFLEX_ADDR = "192.168.0.110" SHARE_NAME = "Manila-UT-filesystem" STORAGE_POOL_ID = "28515fee00000000" FILESYSTEM_ID = "6432b79e-1cc3-0414-3ffd-2a50fb1ccff3" NFS_EXPORT_ID = "6433a2b2-6d60-f737-9f3b-2a50fb1ccff3" NFS_EXPORT_NAME = "Manila-UT-filesystem" SNAPSHOT_NAME = "Manila-UT-filesystem-snap" SNAPSHOT_PATH = "Manila-UT-filesystem" SNAPSHOT_ID = "75758d63-2946-4c07-9118-9a6c6027d5e7" NAS_SERVER_IP = "192.168.11.23" class MockConfig(object): def safe_get(self, value): if value == "dell_nas_backend_host": return "192.168.0.110" elif value == "dell_nas_backend_port": return 443 elif value == "dell_nas_login": return "admin" elif value == "dell_nas_password": return "pwd" elif value == "powerflex_storage_pool": return "Env8-SP-SW_SSD-1" elif value == "powerflex_protection_domain": return "Env8-PD-1" elif value == "dell_nas_server": return "env8nasserver" else: return None @mock.patch( "manila.share.drivers.dell_emc.plugins.powerflex.object_manager." "StorageObjectManager", autospec=True, ) def setUp(self, mock_powerflex_manager): super(PowerFlexTest, self).setUp() self._mock_powerflex_manager = mock_powerflex_manager.return_value self.storage_connection = connection.PowerFlexStorageConnection(LOG) self.mock_context = mock.Mock("Context") self.mock_emc_driver = mock.Mock("EmcDriver") self._mock_config = self.MockConfig() self.mock_emc_driver.attach_mock(self._mock_config, "configuration") self.storage_connection.connect( self.mock_emc_driver, self.mock_context ) @mock.patch( "manila.share.drivers.dell_emc.plugins.powerflex.object_manager." "StorageObjectManager", autospec=True, ) def test_connect(self, mock_powerflex_manager): storage_connection = connection.PowerFlexStorageConnection(LOG) # execute method under test storage_connection.connect(self.mock_emc_driver, self.mock_context) # verify connect sets driver params appropriately mock_config = self.MockConfig() server_addr = mock_config.safe_get("dell_nas_backend_host") self.assertEqual(server_addr, storage_connection.rest_ip) expected_port = int(mock_config.safe_get("dell_nas_backend_port")) self.assertEqual(expected_port, storage_connection.rest_port) self.assertEqual( "https://{0}:{1}".format(server_addr, expected_port), storage_connection.host_url, ) expected_username = mock_config.safe_get("dell_nas_login") self.assertEqual(expected_username, storage_connection.rest_username) expected_password = mock_config.safe_get("dell_nas_password") self.assertEqual(expected_password, storage_connection.rest_password) expected_erify_certificates = mock_config.safe_get( "dell_ssl_cert_verify" ) self.assertEqual( expected_erify_certificates, storage_connection.verify_certificate ) def test_create_share_nfs(self): self._mock_powerflex_manager.get_storage_pool_id.return_value = ( self.STORAGE_POOL_ID ) self._mock_powerflex_manager.create_filesystem.return_value = ( self.FILESYSTEM_ID ) self._mock_powerflex_manager.create_nfs_export.return_value = ( self.NFS_EXPORT_ID ) self._mock_powerflex_manager.get_nfs_export_name.return_value = ( self.NFS_EXPORT_NAME ) self._mock_powerflex_manager.get_nas_server_interfaces.return_value = ( [self.NAS_SERVER_IP] ) self.assertFalse( self._mock_powerflex_manager.get_storage_pool_id.called ) self.assertFalse(self._mock_powerflex_manager.create_filesystem.called) self.assertFalse(self._mock_powerflex_manager.create_nfs_export.called) self.assertFalse( self._mock_powerflex_manager.get_nfs_export_name.called ) # create the share share = {"name": self.SHARE_NAME, "share_proto": "NFS", "size": 8} locations = self.storage_connection.create_share( self.mock_context, share, None ) # verify location and API call made expected_locations = [{"path": "%s:/%s" % ( self.NAS_SERVER_IP, self.SHARE_NAME, )}] self.assertEqual(expected_locations, locations) self._mock_powerflex_manager.get_storage_pool_id.assert_called_with( self._mock_config.safe_get("powerflex_protection_domain"), self._mock_config.safe_get("powerflex_storage_pool"), ) self._mock_powerflex_manager.create_filesystem.assert_called_with( self.STORAGE_POOL_ID, self._mock_config.safe_get("dell_nas_server"), self.SHARE_NAME, 8 * units.Gi, ) self._mock_powerflex_manager.create_nfs_export.assert_called_with( self.FILESYSTEM_ID, self.SHARE_NAME ) self._mock_powerflex_manager.get_nfs_export_name.assert_called_with( self.NFS_EXPORT_ID ) def test_create_share_nfs_filesystem_id_not_found(self): share = {"name": self.SHARE_NAME, "share_proto": "NFS", "size": 8} self._mock_powerflex_manager.create_filesystem.return_value = None self.assertRaises( exception.ShareBackendException, self.storage_connection.create_share, self.mock_context, share, share_server=None, ) def test_create_share_nfs_backend_failure(self): share = {"name": self.SHARE_NAME, "share_proto": "NFS", "size": 8} self._mock_powerflex_manager.create_nfs_export.return_value = False self.assertRaises( exception.ShareBackendException, self.storage_connection.create_share, self.mock_context, share, share_server=None, ) def test_create_snapshot(self): self._mock_powerflex_manager.get_fsid_from_export_name.return_value = ( self.FILESYSTEM_ID ) self._mock_powerflex_manager.create_snapshot.return_value = True snapshot = { "name": self.SNAPSHOT_NAME, "share_name": self.SNAPSHOT_PATH, "id": self.SNAPSHOT_ID, } self.storage_connection.create_snapshot( self.mock_context, snapshot, None ) # verify the create snapshot API call is executed self._mock_powerflex_manager.get_fsid_from_export_name. \ assert_called_with( self.SNAPSHOT_PATH ) self._mock_powerflex_manager.create_snapshot.assert_called_with( self.SNAPSHOT_NAME, self.FILESYSTEM_ID ) def test_create_snapshot_failure(self): self._mock_powerflex_manager.get_fsid_from_export_name.return_value = ( self.FILESYSTEM_ID ) self._mock_powerflex_manager.create_snapshot.return_value = False snapshot = { "name": self.SNAPSHOT_NAME, "share_name": self.SNAPSHOT_PATH, "id": self.SNAPSHOT_ID, } self.storage_connection.create_snapshot( self.mock_context, snapshot, None ) def test_delete_share_nfs(self): share = {"name": self.SHARE_NAME, "share_proto": "NFS"} self._mock_powerflex_manager.get_filesystem_id.return_value = ( self.FILESYSTEM_ID ) self.assertFalse(self._mock_powerflex_manager.get_filesystem_id.called) self.assertFalse(self._mock_powerflex_manager.delete_filesystem.called) # delete the share self.storage_connection.delete_share(self.mock_context, share, None) # verify share delete self._mock_powerflex_manager.get_filesystem_id.assert_called_with( self.SHARE_NAME ) self._mock_powerflex_manager.delete_filesystem.assert_called_with( self.FILESYSTEM_ID ) def test_delete_nfs_share_backend_failure(self): share = {"name": self.SHARE_NAME, "share_proto": "NFS"} self._mock_powerflex_manager.delete_filesystem.return_value = False self.assertRaises( exception.ShareBackendException, self.storage_connection.delete_share, self.mock_context, share, None, ) def test_delete_nfs_share_share_does_not_exist(self): self._mock_powerflex_manager.get_filesystem_id.return_value = None share = {"name": self.SHARE_NAME, "share_proto": "NFS"} # verify the calling delete on a non-existent share returns and does # not throw exception self.storage_connection.delete_share(self.mock_context, share, None) self.assertTrue(self._mock_powerflex_manager.get_filesystem_id.called) self.assertFalse(self._mock_powerflex_manager.delete_filesystem.called) def test_delete_snapshot(self): self._mock_powerflex_manager.get_fsid_from_snapshot_name. \ return_value = ( self.FILESYSTEM_ID ) self.assertFalse( self._mock_powerflex_manager.get_fsid_from_snapshot_name.called ) self.assertFalse(self._mock_powerflex_manager.delete_filesystem.called) # delete the created snapshot snapshot = { "name": self.SNAPSHOT_NAME, "share_name": self.SNAPSHOT_PATH, "id": self.SNAPSHOT_ID, } self.storage_connection.delete_snapshot( self.mock_context, snapshot, None ) # verify the API call was made to delete the snapshot self._mock_powerflex_manager.get_fsid_from_snapshot_name. \ assert_called_with( self.SNAPSHOT_NAME ) self._mock_powerflex_manager.delete_filesystem.assert_called_with( self.FILESYSTEM_ID ) def test_delete_snapshot_backend_failure(self): self._mock_powerflex_manager.get_fsid_from_snapshot_name. \ return_value = ( self.FILESYSTEM_ID ) self._mock_powerflex_manager.delete_filesystem.return_value = False self.assertFalse( self._mock_powerflex_manager.get_fsid_from_snapshot_name.called ) self.assertFalse(self._mock_powerflex_manager.delete_filesystem.called) snapshot = { "name": self.SNAPSHOT_NAME, "share_name": self.SNAPSHOT_PATH, "id": self.SNAPSHOT_ID, } # verify the API call was made to delete the snapshot self.assertRaises( exception.ShareBackendException, self.storage_connection.delete_snapshot, self.mock_context, snapshot, None, ) self._mock_powerflex_manager.get_fsid_from_snapshot_name. \ assert_called_with( self.SNAPSHOT_NAME ) self._mock_powerflex_manager.delete_filesystem.assert_called_with( self.FILESYSTEM_ID ) def test_extend_share(self): new_share_size = 20 share = { "name": self.SHARE_NAME, "share_proto": "NFS", "size": new_share_size, } self._mock_powerflex_manager.get_filesystem_id.return_value = ( self.FILESYSTEM_ID ) self.assertFalse(self._mock_powerflex_manager.get_filesystem_id.called) self.storage_connection.extend_share(share, new_share_size) self._mock_powerflex_manager.get_filesystem_id.assert_called_with( self.SHARE_NAME ) expected_quota_size = new_share_size * units.Gi self._mock_powerflex_manager.extend_export.assert_called_once_with( self.FILESYSTEM_ID, expected_quota_size ) def test_update_access_add_nfs(self): share = {"name": self.SHARE_NAME, "share_proto": "NFS"} self._mock_powerflex_manager.get_nfs_export_id.return_value = ( self.NFS_EXPORT_ID ) self._mock_powerflex_manager.set_export_access.return_value = True self.assertFalse(self._mock_powerflex_manager.get_nfs_export_id.called) self.assertFalse(self._mock_powerflex_manager.set_export_access.called) nfs_rw_ip = "192.168.0.10" nfs_ro_ip = "192.168.0.11" nfs_access_rw = { "access_type": "ip", "access_to": nfs_rw_ip, "access_level": const.ACCESS_LEVEL_RW, "access_id": "09960614-8574-4e03-89cf-7cf267b0bd08", } nfs_access_ro = { "access_type": "ip", "access_to": nfs_ro_ip, "access_level": const.ACCESS_LEVEL_RO, "access_id": "09960614-8574-4e03-89cf-7cf267b0bd08", } access_rules = [nfs_access_rw, nfs_access_ro] self.storage_connection.update_access( self.mock_context, share, access_rules, add_rules=None, delete_rules=None, share_server=None, ) self._mock_powerflex_manager.get_nfs_export_id.assert_called_once_with( self.SHARE_NAME ) self._mock_powerflex_manager.set_export_access.assert_called_once_with( self.NFS_EXPORT_ID, {nfs_rw_ip}, {nfs_ro_ip} ) def test_update_access_add_nfs_invalid_acess_type(self): share = { "name": self.SHARE_NAME, "share_proto": "NFS", "display_name": "foo_display_name", } nfs_rw_ip = "192.168.0.10" nfs_ro_ip = "192.168.0.11" nfs_access_rw = { "access_type": "invalid_type", "access_to": nfs_rw_ip, "access_level": const.ACCESS_LEVEL_RW, "access_id": "09960614-8574-4e03-89cf-7cf267b0bd08", } nfs_access_ro = { "access_type": "invalid_type", "access_to": nfs_ro_ip, "access_level": const.ACCESS_LEVEL_RO, "access_id": "09960614-8574-4e03-89cf-7cf267b0bd09", } access_rules = [nfs_access_rw, nfs_access_ro] self._mock_powerflex_manager.get_nfs_export_id.return_value = ( self.NFS_EXPORT_ID ) access_updates = self.storage_connection.update_access( self.mock_context, share, access_rules, add_rules=None, delete_rules=None, share_server=None, ) self._mock_powerflex_manager.set_export_access.assert_called_once_with( self.NFS_EXPORT_ID, set(), set() ) self.assertIsNotNone(access_updates) def test_update_access_add_nfs_backend_failure(self): share = { "name": self.SHARE_NAME, "share_proto": "NFS", "display_name": "foo_display_name", } self._mock_powerflex_manager.get_nfs_export_id.return_value = ( self.NFS_EXPORT_ID ) self._mock_powerflex_manager.set_export_access.return_value = False self.assertFalse(self._mock_powerflex_manager.get_nfs_export_id.called) self.assertFalse(self._mock_powerflex_manager.set_export_access.called) nfs_rw_ip = "192.168.0.10" nfs_ro_ip = "192.168.0.11" nfs_access_rw = { "access_type": "ip", "access_to": nfs_rw_ip, "access_level": const.ACCESS_LEVEL_RW, "access_id": "09960614-8574-4e03-89cf-7cf267b0bd08", } nfs_access_ro = { "access_type": "ip", "access_to": nfs_ro_ip, "access_level": const.ACCESS_LEVEL_RO, "access_id": "09960614-8574-4e03-89cf-7cf267b0bd08", } access_rules = [nfs_access_rw, nfs_access_ro] self.assertRaises( exception.ShareBackendException, self.storage_connection.update_access, self.mock_context, share, access_rules, add_rules=None, delete_rules=None, share_server=None, ) def test_update_share_stats(self): data = dict( share_backend_name='powerflex', vendor_name='Dell EMC', storage_protocol='NFS_CIFS', snapshot_support=True, create_share_from_snapshot_support=True) stats = dict( maxCapacityInKb=4826330112, capacityInUseInKb=53217280, netUnusedCapacityInKb=1566080512, primaryVacInKb=184549376) self._mock_powerflex_manager.get_storage_pool_id.return_value = ( self.STORAGE_POOL_ID ) self._mock_powerflex_manager.get_storage_pool_statistic. \ return_value = stats self.storage_connection.update_share_stats(data) self.assertEqual(data['storage_protocol'], 'NFS') self.assertEqual(data['create_share_from_snapshot_support'], False) self.assertEqual(data['driver_version'], connection.VERSION) self.assertIsNotNone(data['pools']) def test_get_default_filter_function(self): filter = self.storage_connection.get_default_filter_function() self.assertEqual(filter, "share.size >= 3") def test_create_share_from_snapshot(self): self.assertRaises( NotImplementedError, self.storage_connection.create_share_from_snapshot, self.mock_context, share=None, snapshot=None, ) def test_allow_access(self): self.assertRaises( NotImplementedError, self.storage_connection.allow_access, self.mock_context, share=None, access=None, share_server=None, ) def test_deny_access(self): self.assertRaises( NotImplementedError, self.storage_connection.deny_access, self.mock_context, share=None, access=None, share_server=None, ) def test_setup_server(self): self.assertRaises( NotImplementedError, self.storage_connection.setup_server, network_info=None, ) def test_teardown_server(self): self.assertRaises( NotImplementedError, self.storage_connection.teardown_server, server_details=None, ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/dell_emc/plugins/powerflex/test_object_manager.py0000664000175000017500000004225100000000000032056 0ustar00zuulzuul00000000000000# Copyright (c) 2023 Dell Inc. or its subsidiaries. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from http import client as http_client import json import os import ddt import requests_mock from manila import exception from manila.share.drivers.dell_emc.plugins.powerflex import ( object_manager as manager ) from manila import test @ddt.ddt class StorageObjectManagerTestCase(test.TestCase): def setUp(self): super(StorageObjectManagerTestCase, self).setUp() self._mock_url = "https://192.168.0.110:443" self.manager = manager.StorageObjectManager( self._mock_url, username="admin", password="pwd", export_path=None ) self.mockup_file_base = os.path.join( os.path.dirname(os.path.realpath(__file__)), 'mockup') @ddt.data(False, True) def test__get_headers(self, got_token): self.manager.got_token = got_token self.manager.rest_token = "token_str" self.assertEqual( self.manager._get_headers().get("Authorization") is not None, got_token, ) def _getJsonFile(self, filename): f = open(os.path.join(self.mockup_file_base, filename)) data = json.load(f) f.close() return data @requests_mock.mock() def test_get_nas_server_id(self, m): nas_server = "env8nasserver" self.assertEqual(0, len(m.request_history)) self._add_get_nas_server_id_response( m, nas_server, self._getJsonFile("get_nas_server_id_response.json") ) id = self.manager.get_nas_server_id(nas_server) self.assertEqual(id, "64132f37-d33e-9d4a-89ba-d625520a4779") def _add_get_nas_server_id_response(self, m, nas_server, json_str): url = "{0}/rest/v1/nas-servers?select=id&name=eq.{1}".format( self._mock_url, nas_server ) m.get(url, status_code=200, json=json_str) @requests_mock.mock() def test_create_filesystem(self, m): nas_server = "env8nasserver" self.assertEqual(0, len(m.request_history)) self._add_get_nas_server_id_response( m, nas_server, self._getJsonFile("get_nas_server_id_response.json") ) storage_pool_id = "8515fee00000000" self._add_create_filesystem_response( m, self._getJsonFile("create_filesystem_response.json") ) id = self.manager.create_filesystem( storage_pool_id, nas_server, name="Manila-filesystem", size=3221225472, ) self.assertEqual(id, "6432b79e-1cc3-0414-3ffd-2a50fb1ccff3") def _add_create_filesystem_response(self, m, json_str): url = "{0}/rest/v1/file-systems".format(self._mock_url) m.post(url, status_code=201, json=json_str) @requests_mock.mock() def test_create_nfs_export(self, m): filesystem_id = "6432b79e-1cc3-0414-3ffd-2a50fb1ccff3" name = "Manila-UT-filesystem" self.assertEqual(0, len(m.request_history)) self._add_create_nfs_export_response( m, self._getJsonFile("create_nfs_export_response.json") ) id = self.manager.create_nfs_export(filesystem_id, name) self.assertEqual(id, "6433a2b2-6d60-f737-9f3b-2a50fb1ccff3") def _add_create_nfs_export_response(self, m, json_str): url = "{0}/rest/v1/nfs-exports".format(self._mock_url) m.post(url, status_code=201, json=json_str) @requests_mock.mock() def test_delete_filesystem(self, m): filesystem_id = "6432b79e-1cc3-0414-3ffd-2a50fb1ccff3" self.assertEqual(0, len(m.request_history)) self._add_delete_filesystem_response(m, filesystem_id) result = self.manager.delete_filesystem(filesystem_id) self.assertEqual(result, True) def _add_delete_filesystem_response(self, m, filesystem_id): url = "{0}/rest/v1/file-systems/{1}".format( self._mock_url, filesystem_id ) m.delete(url, status_code=204) @requests_mock.mock() def test_create_snapshot(self, m): name = "Manila-UT-filesystem-snap" filesystem_id = "6432b79e-1cc3-0414-3ffd-2a50fb1ccff3" self.assertEqual(0, len(m.request_history)) self._add_create_snapshot_response( m, filesystem_id, self._getJsonFile("create_nfs_snapshot_response.json"), ) result = self.manager.create_snapshot(name, filesystem_id) self.assertEqual(result, True) def _add_create_snapshot_response(self, m, filesystem_id, json_str): url = "{0}/rest/v1/file-systems/{1}/snapshot".format( self._mock_url, filesystem_id ) m.post(url, status_code=201, json=json_str) @requests_mock.mock() def test_get_nfs_export_name(self, m): export_id = "6433a2b2-6d60-f737-9f3b-2a50fb1ccff3" self.assertEqual(0, len(m.request_history)) self._add_get_nfs_export_name_response( m, export_id, self._getJsonFile("get_nfs_export_name_response.json"), ) name = self.manager.get_nfs_export_name(export_id) self.assertEqual(name, "Manila-UT-filesystem") def _add_get_nfs_export_name_response(self, m, export_id, json_str): url = "{0}/rest/v1/nfs-exports/{1}?select=*".format( self._mock_url, export_id ) m.get(url, status_code=200, json=json_str) @requests_mock.mock() def test_get_filesystem_id(self, m): name = "Manila-UT-filesystem" self.assertEqual(0, len(m.request_history)) self._add_get_filesystem_id_response( m, name, self._getJsonFile("get_fileystem_id_response.json") ) id = self.manager.get_filesystem_id(name) self.assertEqual(id, "6432b79e-1cc3-0414-3ffd-2a50fb1ccff3") def _add_get_filesystem_id_response(self, m, name, json_str): url = "{0}/rest/v1/file-systems?select=id&name=eq.{1}".format( self._mock_url, name ) m.get(url, status_code=200, json=json_str) @requests_mock.mock() def test_get_nfs_export_id(self, m): name = "Manila-UT-filesystem" self.assertEqual(0, len(m.request_history)) self._add_get_nfs_export_id_response( m, name, self._getJsonFile("get_nfs_export_id_response.json") ) id = self.manager.get_nfs_export_id(name) self.assertEqual(id, "6433a2b2-6d60-f737-9f3b-2a50fb1ccff3") def _add_get_nfs_export_id_response(self, m, name, json_str): url = "{0}/rest/v1/nfs-exports?select=id&name=eq.{1}".format( self._mock_url, name ) m.get(url, status_code=200, json=json_str) @requests_mock.mock() def test_get_storage_pool_id(self, m): protection_domain_name = "Env8-PD-1" storage_pool_name = "Env8-SP-SW_SSD-1" self.assertEqual(0, len(m.request_history)) self._add_get_storage_pool_id_response( m, self._getJsonFile("get_storage_pool_id_response.json") ) id = self.manager.get_storage_pool_id( protection_domain_name, storage_pool_name ) self.assertEqual(id, "28515fee00000000") def _add_get_storage_pool_id_response(self, m, json_str): url = "{0}/api/types/StoragePool/instances/action/queryIdByKey".format( self._mock_url ) m.post(url, status_code=200, json=json_str) @requests_mock.mock() def test_set_export_access(self, m): export_id = "6433a2b2-6d60-f737-9f3b-2a50fb1ccff3" rw_hosts = "192.168.1.110" ro_hosts = "192.168.1.111" self.assertEqual(0, len(m.request_history)) self._add_set_export_access_response(m, export_id) result = self.manager.set_export_access(export_id, rw_hosts, ro_hosts) self.assertEqual(result, True) def _add_set_export_access_response(self, m, export_id): url = "{0}/rest/v1/nfs-exports/{1}".format(self._mock_url, export_id) m.patch(url, status_code=204) @requests_mock.mock() def test_extend_export(self, m): filesystem_id = "6432b79e-1cc3-0414-3ffd-2a50fb1ccff3" new_size = 6441225472 self.assertEqual(0, len(m.request_history)) self._add_extend_export_response(m, filesystem_id) result = self.manager.extend_export(filesystem_id, new_size) self.assertEqual(result, True) def _add_extend_export_response(self, m, filesystem_id): url = "{0}/rest/v1/file-systems/{1}".format( self._mock_url, filesystem_id ) m.patch(url, status_code=204) @requests_mock.mock() def test_get_fsid_from_export_name(self, m): name = "Manila-UT-filesystem" self.assertEqual(0, len(m.request_history)) self._add_get_fsid_from_export_name_response( m, name, self._getJsonFile("get_fsid_from_export_name_response.json"), ) id = self.manager.get_fsid_from_export_name(name) self.assertEqual(id, "6432b79e-1cc3-0414-3ffd-2a50fb1ccff3") def _add_get_fsid_from_export_name_response(self, m, name, json_str): url = ( "{0}/rest/v1/nfs-exports?select=file_system_id&name=eq.{1}".format( self._mock_url, name ) ) m.get(url, status_code=200, json=json_str) @requests_mock.mock() def test_get_fsid_from_snapshot_name(self, m): snapshot_name = "Manila-UT-filesystem-snap" self.assertEqual(0, len(m.request_history)) self._add_get_fsid_from_snapshot_name_response( m, snapshot_name, self._getJsonFile("get_fsid_from_snapshot_name_response.json"), ) id = self.manager.get_fsid_from_snapshot_name(snapshot_name) self.assertEqual(id, "6433b635-6c1f-878e-6467-2a50fb1ccff3") def _add_get_fsid_from_snapshot_name_response( self, m, snapshot_name, json_str ): url = "{0}/rest/v1/file-systems?select=id&name=eq.{1}".format( self._mock_url, snapshot_name ) m.get(url, status_code=200, json=json_str) @requests_mock.mock() def test_check_response_with_login_get(self, m): nas_server = "env8nasserver" self.assertEqual(0, len(m.request_history)) self._add_get_nas_server_id_response_list(m, nas_server) self._add_login_success_response(m) id = self.manager.get_nas_server_id(nas_server) self.assertEqual(id, "64132f37-d33e-9d4a-89ba-d625520a4779") def _add_get_nas_server_id_response_list(self, m, nas_server): url = "{0}/rest/v1/nas-servers?select=id&name=eq.{1}".format( self._mock_url, nas_server ) m.get( url, [ {"status_code": http_client.UNAUTHORIZED}, { "status_code": 200, "json": self._getJsonFile( "get_nas_server_id_response.json" ), }, ], ) def _add_login_success_response(self, m): url = "{0}/rest/auth/login".format(self._mock_url) m.post( url, status_code=200, json=self._getJsonFile("login_response.json") ) @requests_mock.mock() def test_check_response_with_login_post(self, m): filesystem_id = "6432b79e-1cc3-0414-3ffd-2a50fb1ccff3" name = "Manila-UT-filesystem" self.assertEqual(0, len(m.request_history)) self._add_create_nfs_export_response_list(m) self._add_login_success_response(m) id = self.manager.create_nfs_export(filesystem_id, name) self.assertEqual(id, "6433a2b2-6d60-f737-9f3b-2a50fb1ccff3") def _add_create_nfs_export_response_list(self, m): url = "{0}/rest/v1/nfs-exports".format(self._mock_url) m.post( url, [ {"status_code": http_client.UNAUTHORIZED}, { "status_code": 201, "json": self._getJsonFile( "create_nfs_export_response.json" ), }, ], ) @requests_mock.mock() def test_check_response_with_login_delete(self, m): filesystem_id = "6432b79e-1cc3-0414-3ffd-2a50fb1ccff3" self.assertEqual(0, len(m.request_history)) self._add_delete_filesystem_response_list(m, filesystem_id) self._add_login_success_response(m) result = self.manager.delete_filesystem(filesystem_id) self.assertEqual(result, True) def _add_delete_filesystem_response_list(self, m, filesystem_id): url = "{0}/rest/v1/file-systems/{1}".format( self._mock_url, filesystem_id ) m.delete( url, [{"status_code": http_client.UNAUTHORIZED}, {"status_code": 204}], ) @requests_mock.mock() def test_check_response_with_login_patch(self, m): filesystem_id = "6432b79e-1cc3-0414-3ffd-2a50fb1ccff3" new_size = 6441225472 self.assertEqual(0, len(m.request_history)) self._add_extend_export_response_list(m, filesystem_id) self._add_login_success_response(m) result = self.manager.extend_export(filesystem_id, new_size) self.assertEqual(result, True) def _add_extend_export_response_list(self, m, filesystem_id): url = "{0}/rest/v1/file-systems/{1}".format( self._mock_url, filesystem_id ) m.patch( url, [{"status_code": http_client.UNAUTHORIZED}, {"status_code": 204}], ) @requests_mock.mock() def test_check_response_with_invalid_credential(self, m): nas_server = "env8nasserver" self.assertEqual(0, len(m.request_history)) self._add_get_nas_server_id_unauthorized_response(m, nas_server) self._add_login_fail_response(m) self.assertRaises( exception.NotAuthorized, self.manager.get_nas_server_id, nas_server ) def _add_get_nas_server_id_unauthorized_response(self, m, nas_server): url = "{0}/rest/v1/nas-servers?select=id&name=eq.{1}".format( self._mock_url, nas_server ) m.get(url, status_code=http_client.UNAUTHORIZED) def _add_login_fail_response(self, m): url = "{0}/rest/auth/login".format(self._mock_url) m.post(url, status_code=http_client.UNAUTHORIZED) @requests_mock.mock() def test_execute_powerflex_post_request_with_no_param(self, m): url = self._mock_url + "/fake_url" self.assertEqual(0, len(m.request_history)) m.post(url, status_code=201) res, response = self.manager.execute_powerflex_post_request(url) self.assertEqual(res.status_code, 201) @requests_mock.mock() def test_execute_powerflex_patch_request_with_no_param(self, m): url = self._mock_url + "/fake_url" self.assertEqual(0, len(m.request_history)) m.patch(url, status_code=204) res = self.manager.execute_powerflex_patch_request(url) self.assertEqual(res.status_code, 204) @requests_mock.mock() def test_get_storage_pool_spare_percentage(self, m): storage_pool_id = "28515fee00000000" self.assertEqual(0, len(m.request_history)) self._add_get_storage_pool_spare_percentage( m, storage_pool_id, self._getJsonFile("get_storage_pool_spare_percentage.json"), ) spare = self.manager.get_storage_pool_spare_percentage(storage_pool_id) self.assertEqual(spare, 34) def _add_get_storage_pool_spare_percentage(self, m, storage_pool_id, json_str): url = ( "{0}/api/instances/StoragePool::{1}".format( self._mock_url, storage_pool_id ) ) m.get(url, status_code=200, json=json_str) @requests_mock.mock() def test_get_storage_pool_statistic(self, m): storage_pool_id = "28515fee00000000" self.assertEqual(0, len(m.request_history)) self._add_get_storage_pool_statistic( m, storage_pool_id, self._getJsonFile("get_storage_pool_statistic.json"), ) statistic = self.manager.get_storage_pool_statistic(storage_pool_id) self.assertEqual(statistic['maxCapacityInKb'], 4826330112) self.assertEqual(statistic['capacityInUseInKb'], 53217280) self.assertEqual(statistic['netUnusedCapacityInKb'], 1566080512) self.assertEqual(statistic['primaryVacInKb'], 184549376) def _add_get_storage_pool_statistic(self, m, storage_pool_id, json_str): url = ( ("{0}/api/instances/StoragePool::{1}/relationships/" + "Statistics").format( self._mock_url, storage_pool_id ) ) m.get(url, status_code=200, json=json_str) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315602.0056705 manila-21.0.0/manila/tests/share/drivers/dell_emc/plugins/powermax/0000775000175000017500000000000000000000000025330 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/dell_emc/plugins/powermax/__init__.py0000664000175000017500000000000000000000000027427 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/dell_emc/plugins/powermax/test_connection.py0000664000175000017500000027745000000000000031117 0ustar00zuulzuul00000000000000# Copyright (c) 2019 Dell Inc. or its subsidiaries. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from unittest import mock import ddt from oslo_log import log from manila import exception from manila.share.drivers.dell_emc.common.enas import connector from manila.share.drivers.dell_emc.common.enas import utils as enas_utils from manila.share.drivers.dell_emc.plugins.vnx import connection from manila.share.drivers.dell_emc.plugins.vnx import object_manager from manila import test from manila.tests import fake_share from manila.tests.share.drivers.dell_emc.common.enas import fakes from manila.tests.share.drivers.dell_emc.common.enas import utils LOG = log.getLogger(__name__) @ddt.ddt class StorageConnectionTestCase(test.TestCase): @mock.patch.object(connector.XMLAPIConnector, "_do_setup", mock.Mock()) def setUp(self): super(StorageConnectionTestCase, self).setUp() self.emc_share_driver = fakes.FakeEMCShareDriver() self.connection = connection.VNXStorageConnection(LOG) self.pool = fakes.PoolTestData() self.vdm = fakes.VDMTestData() self.mover = fakes.MoverTestData() self.fs = fakes.FileSystemTestData() self.mount = fakes.MountPointTestData() self.snap = fakes.SnapshotTestData() self.cifs_share = fakes.CIFSShareTestData() self.nfs_share = fakes.NFSShareTestData() self.cifs_server = fakes.CIFSServerTestData() self.dns = fakes.DNSDomainTestData() with mock.patch.object(connector.XMLAPIConnector, 'request', mock.Mock()): self.connection.connect(self.emc_share_driver, None) def test_check_for_setup_error(self): hook = utils.RequestSideEffect() hook.append(self.mover.resp_get_ref_succeed()) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock with mock.patch.object(connection.VNXStorageConnection, '_get_managed_storage_pools', mock.Mock()): self.connection.check_for_setup_error() expected_calls = [mock.call(self.mover.req_get_ref())] xml_req_mock.assert_has_calls(expected_calls) def test_check_for_setup_error_with_invalid_mover_name(self): hook = utils.RequestSideEffect() hook.append(self.mover.resp_get_error()) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock self.assertRaises(exception.InvalidParameterValue, self.connection.check_for_setup_error) expected_calls = [mock.call(self.mover.req_get_ref())] xml_req_mock.assert_has_calls(expected_calls) @ddt.data({'pool_conf': None, 'real_pools': ['fake_pool', 'nas_pool'], 'matched_pool': set()}, {'pool_conf': [], 'real_pools': ['fake_pool', 'nas_pool'], 'matched_pool': set()}, {'pool_conf': ['*'], 'real_pools': ['fake_pool', 'nas_pool'], 'matched_pool': {'fake_pool', 'nas_pool'}}, {'pool_conf': ['fake_*'], 'real_pools': ['fake_pool', 'nas_pool', 'Perf_Pool'], 'matched_pool': {'fake_pool'}}, {'pool_conf': ['*pool'], 'real_pools': ['fake_pool', 'NAS_Pool', 'Perf_POOL'], 'matched_pool': {'fake_pool'}}, {'pool_conf': ['nas_pool'], 'real_pools': ['fake_pool', 'nas_pool', 'perf_pool'], 'matched_pool': {'nas_pool'}}) @ddt.unpack def test__get_managed_storage_pools(self, pool_conf, real_pools, matched_pool): with mock.patch.object(object_manager.StoragePool, 'get_all', mock.Mock(return_value=('ok', real_pools))): pool = self.connection._get_managed_storage_pools(pool_conf) self.assertEqual(matched_pool, pool) def test__get_managed_storage_pools_failed_to_get_pool_info(self): hook = utils.RequestSideEffect() hook.append(self.pool.resp_get_error()) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock pool_conf = fakes.FakeData.pool_name self.assertRaises(exception.EMCVnxXMLAPIError, self.connection._get_managed_storage_pools, pool_conf) expected_calls = [mock.call(self.pool.req_get())] xml_req_mock.assert_has_calls(expected_calls) @ddt.data( {'pool_conf': ['fake_*'], 'real_pools': ['nas_pool', 'Perf_Pool']}, {'pool_conf': ['*pool'], 'real_pools': ['NAS_Pool', 'Perf_POOL']}, {'pool_conf': ['nas_pool'], 'real_pools': ['fake_pool', 'perf_pool']}, ) @ddt.unpack def test__get_managed_storage_pools_without_matched_pool(self, pool_conf, real_pools): with mock.patch.object(object_manager.StoragePool, 'get_all', mock.Mock(return_value=('ok', real_pools))): self.assertRaises(exception.InvalidParameterValue, self.connection._get_managed_storage_pools, pool_conf) def test_create_cifs_share(self): share_server = fakes.SHARE_SERVER share = fakes.CIFS_SHARE hook = utils.RequestSideEffect() hook.append(self.vdm.resp_get_succeed()) hook.append(self.cifs_server.resp_get_succeed( mover_id=self.vdm.vdm_id, is_vdm=True, join_domain=True)) hook.append(self.pool.resp_get_succeed()) hook.append(self.fs.resp_task_succeed()) hook.append(self.cifs_share.resp_task_succeed()) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock ssh_hook = utils.SSHSideEffect() ssh_hook.append() ssh_cmd_mock = mock.Mock(side_effect=ssh_hook) self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock location = self.connection.create_share(None, share, share_server) expected_calls = [ mock.call(self.vdm.req_get()), mock.call(self.cifs_server.req_get(self.vdm.vdm_id)), mock.call(self.pool.req_get()), mock.call(self.fs.req_create_on_vdm()), mock.call(self.cifs_share.req_create(self.vdm.vdm_id)), ] xml_req_mock.assert_has_calls(expected_calls) ssh_calls = [mock.call(self.cifs_share.cmd_disable_access(), True)] ssh_cmd_mock.assert_has_calls(ssh_calls) self.assertEqual(location, [{'path': r'\\%s\%s' % ( fakes.FakeData.network_allocations_ip1, share['name'])}], 'CIFS export path is incorrect') def test_create_cifs_share_with_ipv6(self): share_server = fakes.SHARE_SERVER_IPV6 share = fakes.CIFS_SHARE hook = utils.RequestSideEffect() hook.append(self.vdm.resp_get_succeed( interface1=fakes.FakeData.interface_name3, interface2=fakes.FakeData.interface_name4)) hook.append(self.cifs_server.resp_get_succeed( mover_id=self.vdm.vdm_id, is_vdm=True, join_domain=True, ip_addr=fakes.FakeData.network_allocations_ip3)) hook.append(self.pool.resp_get_succeed()) hook.append(self.fs.resp_task_succeed()) hook.append(self.cifs_share.resp_task_succeed()) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock ssh_hook = utils.SSHSideEffect() ssh_hook.append() ssh_cmd_mock = mock.Mock(side_effect=ssh_hook) self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock location = self.connection.create_share(None, share, share_server) expected_calls = [ mock.call(self.vdm.req_get()), mock.call(self.cifs_server.req_get(self.vdm.vdm_id)), mock.call(self.pool.req_get()), mock.call(self.fs.req_create_on_vdm()), mock.call(self.cifs_share.req_create(self.vdm.vdm_id)), ] xml_req_mock.assert_has_calls(expected_calls) ssh_calls = [mock.call(self.cifs_share.cmd_disable_access(), True)] ssh_cmd_mock.assert_has_calls(ssh_calls) self.assertEqual( location, [{'path': r'\\%s.ipv6-literal.net\%s' % ( fakes.FakeData.network_allocations_ip3.replace(':', '-'), share['name'])}], 'CIFS export path is incorrect') def test_create_nfs_share(self): share_server = fakes.SHARE_SERVER share = fakes.NFS_SHARE hook = utils.RequestSideEffect() hook.append(self.pool.resp_get_succeed()) hook.append(self.vdm.resp_get_succeed()) hook.append(self.fs.resp_task_succeed()) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock ssh_hook = utils.SSHSideEffect() ssh_hook.append(self.nfs_share.output_create()) ssh_cmd_mock = mock.Mock(side_effect=ssh_hook) self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock location = self.connection.create_share(None, share, share_server) expected_calls = [ mock.call(self.pool.req_get()), mock.call(self.vdm.req_get()), mock.call(self.fs.req_create_on_vdm()), ] xml_req_mock.assert_has_calls(expected_calls) ssh_calls = [mock.call(self.nfs_share.cmd_create(), True)] ssh_cmd_mock.assert_has_calls(ssh_calls) self.assertEqual(location, [{'path': '192.168.1.2:/%s' % share['name']}], 'NFS export path is incorrect') def test_create_nfs_share_with_ipv6(self): share_server = fakes.SHARE_SERVER_IPV6 share = fakes.NFS_SHARE hook = utils.RequestSideEffect() hook.append(self.pool.resp_get_succeed()) hook.append(self.vdm.resp_get_succeed( interface1=fakes.FakeData.interface_name3, interface2=fakes.FakeData.interface_name4)) hook.append(self.fs.resp_task_succeed()) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock ssh_hook = utils.SSHSideEffect() ssh_hook.append(self.nfs_share.output_create()) ssh_cmd_mock = mock.Mock(side_effect=ssh_hook) self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock location = self.connection.create_share(None, share, share_server) expected_calls = [ mock.call(self.pool.req_get()), mock.call(self.vdm.req_get()), mock.call(self.fs.req_create_on_vdm()), ] xml_req_mock.assert_has_calls(expected_calls) ssh_calls = [mock.call(self.nfs_share.cmd_create(), True)] ssh_cmd_mock.assert_has_calls(ssh_calls) self.assertEqual(location, [{'path': '[%s]:/%s' % ( fakes.FakeData.network_allocations_ip4, share['name'])}], 'NFS export path is incorrect') def test_create_cifs_share_without_share_server(self): share = fakes.CIFS_SHARE self.assertRaises(exception.InvalidInput, self.connection.create_share, None, share, None) def test_create_cifs_share_without_share_server_name(self): share = fakes.CIFS_SHARE share_server = copy.deepcopy(fakes.SHARE_SERVER) share_server['backend_details']['share_server_name'] = None self.assertRaises(exception.EMCVnxXMLAPIError, self.connection.create_share, None, share, share_server) def test_create_cifs_share_with_invalide_cifs_server_name(self): share_server = fakes.SHARE_SERVER share = fakes.CIFS_SHARE hook = utils.RequestSideEffect() hook.append(self.vdm.resp_get_succeed()) hook.append(self.cifs_server.resp_get_error()) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock self.assertRaises(exception.EMCVnxXMLAPIError, self.connection.create_share, None, share, share_server) expected_calls = [ mock.call(self.vdm.req_get()), mock.call(self.cifs_server.req_get(self.vdm.vdm_id)), ] xml_req_mock.assert_has_calls(expected_calls) def test_create_cifs_share_without_interface_in_cifs_server(self): share_server = fakes.SHARE_SERVER share = fakes.CIFS_SHARE hook = utils.RequestSideEffect() hook.append(self.vdm.resp_get_succeed()) hook.append(self.cifs_server.resp_get_without_interface( mover_id=self.vdm.vdm_id, is_vdm=True, join_domain=True)) hook.append(self.pool.resp_get_succeed()) hook.append(self.fs.resp_task_succeed()) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock self.assertRaises(exception.EMCVnxXMLAPIError, self.connection.create_share, None, share, share_server) expected_calls = [ mock.call(self.vdm.req_get()), mock.call(self.cifs_server.req_get(self.vdm.vdm_id)), mock.call(self.pool.req_get()), mock.call(self.fs.req_create_on_vdm()), ] xml_req_mock.assert_has_calls(expected_calls) def test_create_cifs_share_without_pool_name(self): share_server = fakes.SHARE_SERVER share = fake_share.fake_share(host='HostA@BackendB', share_proto='CIFS') self.assertRaises(exception.InvalidHost, self.connection.create_share, None, share, share_server) def test_create_cifs_share_from_snapshot(self): share_server = fakes.SHARE_SERVER share = fakes.CIFS_SHARE snapshot = fake_share.fake_snapshot( name=fakes.FakeData.src_snap_name, share_name=fakes.FakeData.src_share_name, share_id=fakes.FakeData.src_share_name, id=fakes.FakeData.src_snap_name) hook = utils.RequestSideEffect() hook.append(self.fs.resp_get_succeed()) hook.append(self.vdm.resp_get_succeed()) hook.append(self.cifs_server.resp_get_succeed( mover_id=self.vdm.vdm_id, is_vdm=True, join_domain=True)) hook.append(self.cifs_share.resp_task_succeed()) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock ssh_hook = utils.SSHSideEffect() ssh_hook.append(self.mover.output_get_interconnect_id()) ssh_hook.append() ssh_hook.append() ssh_hook.append(self.fs.output_copy_ckpt) ssh_hook.append(self.fs.output_info()) ssh_hook.append() ssh_hook.append() ssh_hook.append() ssh_hook.append() ssh_cmd_mock = mock.Mock(side_effect=ssh_hook) self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock location = self.connection.create_share_from_snapshot( None, share, snapshot, share_server) expected_calls = [ mock.call(self.fs.req_get()), mock.call(self.vdm.req_get()), mock.call(self.cifs_server.req_get(self.vdm.vdm_id)), mock.call(self.cifs_share.req_create(self.vdm.vdm_id)), ] xml_req_mock.assert_has_calls(expected_calls) ssh_calls = [ mock.call(self.mover.cmd_get_interconnect_id(), False), mock.call(self.fs.cmd_create_from_ckpt(), False), mock.call(self.mount.cmd_server_mount('ro'), False), mock.call(self.fs.cmd_copy_ckpt(), True), mock.call(self.fs.cmd_nas_fs_info(), False), mock.call(self.mount.cmd_server_umount(), False), mock.call(self.fs.cmd_delete(), False), mock.call(self.mount.cmd_server_mount('rw'), False), mock.call(self.cifs_share.cmd_disable_access(), True), ] ssh_cmd_mock.assert_has_calls(ssh_calls) self.assertEqual(location, [{'path': r'\\192.168.1.1\%s' % share['name']}], 'CIFS export path is incorrect') def test_create_cifs_share_from_snapshot_with_ipv6(self): share_server = fakes.SHARE_SERVER_IPV6 share = fakes.CIFS_SHARE snapshot = fake_share.fake_snapshot( name=fakes.FakeData.src_snap_name, share_name=fakes.FakeData.src_share_name, share_id=fakes.FakeData.src_share_name, id=fakes.FakeData.src_snap_name) hook = utils.RequestSideEffect() hook.append(self.fs.resp_get_succeed()) hook.append(self.vdm.resp_get_succeed( interface1=fakes.FakeData.interface_name3, interface2=fakes.FakeData.interface_name4)) hook.append(self.cifs_server.resp_get_succeed( mover_id=self.vdm.vdm_id, is_vdm=True, join_domain=True, ip_addr=fakes.FakeData.network_allocations_ip3)) hook.append(self.cifs_share.resp_task_succeed()) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock ssh_hook = utils.SSHSideEffect() ssh_hook.append(self.mover.output_get_interconnect_id()) ssh_hook.append() ssh_hook.append() ssh_hook.append(self.fs.output_copy_ckpt) ssh_hook.append(self.fs.output_info()) ssh_hook.append() ssh_hook.append() ssh_hook.append() ssh_hook.append() ssh_cmd_mock = mock.Mock(side_effect=ssh_hook) self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock location = self.connection.create_share_from_snapshot( None, share, snapshot, share_server) expected_calls = [ mock.call(self.fs.req_get()), mock.call(self.vdm.req_get()), mock.call(self.cifs_server.req_get(self.vdm.vdm_id)), mock.call(self.cifs_share.req_create(self.vdm.vdm_id)), ] xml_req_mock.assert_has_calls(expected_calls) ssh_calls = [ mock.call(self.mover.cmd_get_interconnect_id(), False), mock.call(self.fs.cmd_create_from_ckpt(), False), mock.call(self.mount.cmd_server_mount('ro'), False), mock.call(self.fs.cmd_copy_ckpt(), True), mock.call(self.fs.cmd_nas_fs_info(), False), mock.call(self.mount.cmd_server_umount(), False), mock.call(self.fs.cmd_delete(), False), mock.call(self.mount.cmd_server_mount('rw'), False), mock.call(self.cifs_share.cmd_disable_access(), True), ] ssh_cmd_mock.assert_has_calls(ssh_calls) self.assertEqual( location, [{'path': r'\\%s.ipv6-literal.net\%s' % ( fakes.FakeData.network_allocations_ip3.replace(':', '-'), share['name'])}], 'CIFS export path is incorrect') def test_create_nfs_share_from_snapshot(self): share_server = fakes.SHARE_SERVER share = fakes.NFS_SHARE snapshot = fake_share.fake_snapshot( name=fakes.FakeData.src_snap_name, share_name=fakes.FakeData.src_share_name, share_id=fakes.FakeData.src_share_name, id=fakes.FakeData.src_snap_name) hook = utils.RequestSideEffect() hook.append(self.fs.resp_get_succeed()) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock ssh_hook = utils.SSHSideEffect() ssh_hook.append(self.mover.output_get_interconnect_id()) ssh_hook.append() ssh_hook.append() ssh_hook.append(self.fs.output_copy_ckpt) ssh_hook.append(self.fs.output_info()) ssh_hook.append() ssh_hook.append() ssh_hook.append() ssh_hook.append(self.nfs_share.output_create()) ssh_cmd_mock = mock.Mock(side_effect=ssh_hook) self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock location = self.connection.create_share_from_snapshot( None, share, snapshot, share_server) expected_calls = [mock.call(self.fs.req_get())] xml_req_mock.assert_has_calls(expected_calls) ssh_calls = [ mock.call(self.mover.cmd_get_interconnect_id(), False), mock.call(self.fs.cmd_create_from_ckpt(), False), mock.call(self.mount.cmd_server_mount('ro'), False), mock.call(self.fs.cmd_copy_ckpt(), True), mock.call(self.fs.cmd_nas_fs_info(), False), mock.call(self.mount.cmd_server_umount(), False), mock.call(self.fs.cmd_delete(), False), mock.call(self.mount.cmd_server_mount('rw'), False), mock.call(self.nfs_share.cmd_create(), True) ] ssh_cmd_mock.assert_has_calls(ssh_calls) self.assertEqual(location, [{'path': '192.168.1.2:/%s' % share['name']}], 'NFS export path is incorrect') def test_create_nfs_share_from_snapshot_with_ipv6(self): share_server = fakes.SHARE_SERVER_IPV6 share = fakes.NFS_SHARE snapshot = fake_share.fake_snapshot( name=fakes.FakeData.src_snap_name, share_name=fakes.FakeData.src_share_name, share_id=fakes.FakeData.src_share_name, id=fakes.FakeData.src_snap_name) hook = utils.RequestSideEffect() hook.append(self.fs.resp_get_succeed()) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock ssh_hook = utils.SSHSideEffect() ssh_hook.append(self.mover.output_get_interconnect_id()) ssh_hook.append() ssh_hook.append() ssh_hook.append(self.fs.output_copy_ckpt) ssh_hook.append(self.fs.output_info()) ssh_hook.append() ssh_hook.append() ssh_hook.append() ssh_hook.append(self.nfs_share.output_create()) ssh_cmd_mock = mock.Mock(side_effect=ssh_hook) self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock location = self.connection.create_share_from_snapshot( None, share, snapshot, share_server) expected_calls = [mock.call(self.fs.req_get())] xml_req_mock.assert_has_calls(expected_calls) ssh_calls = [ mock.call(self.mover.cmd_get_interconnect_id(), False), mock.call(self.fs.cmd_create_from_ckpt(), False), mock.call(self.mount.cmd_server_mount('ro'), False), mock.call(self.fs.cmd_copy_ckpt(), True), mock.call(self.fs.cmd_nas_fs_info(), False), mock.call(self.mount.cmd_server_umount(), False), mock.call(self.fs.cmd_delete(), False), mock.call(self.mount.cmd_server_mount('rw'), False), mock.call(self.nfs_share.cmd_create(), True) ] ssh_cmd_mock.assert_has_calls(ssh_calls) self.assertEqual( location, [{'path': '[%s]:/%s' % ( fakes.FakeData.network_allocations_ip4, share['name'])}], 'NFS export path is incorrect') def test_create_share_with_incorrect_proto(self): share_server = fakes.SHARE_SERVER share = fake_share.fake_share(share_proto='FAKE_PROTO') self.assertRaises(exception.InvalidShare, self.connection.create_share, context=None, share=share, share_server=share_server) def test_create_share_from_snapshot_with_incorrect_proto(self): share_server = fakes.SHARE_SERVER share = fake_share.fake_share(share_proto='FAKE_PROTO') snapshot = fake_share.fake_snapshot() self.assertRaises(exception.InvalidShare, self.connection.create_share_from_snapshot, None, share, snapshot, share_server) def test_create_share_from_snapshot_without_pool_name(self): share_server = fakes.SHARE_SERVER share = fake_share.fake_share(host='HostA@BackendB', share_proto='CIFS') snapshot = fake_share.fake_snapshot() self.assertRaises(exception.InvalidHost, self.connection.create_share_from_snapshot, None, share, snapshot, share_server) def test_delete_cifs_share(self): share_server = fakes.SHARE_SERVER share = fakes.CIFS_SHARE hook = utils.RequestSideEffect() hook.append(self.cifs_share.resp_get_succeed(self.vdm.vdm_id)) hook.append(self.vdm.resp_get_succeed()) hook.append(self.cifs_share.resp_task_succeed()) hook.append(self.mount.resp_task_succeed()) hook.append(self.fs.resp_get_succeed()) hook.append(self.fs.resp_task_succeed()) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock self.connection.delete_share(None, share, share_server) expected_calls = [ mock.call(self.cifs_share.req_get()), mock.call(self.vdm.req_get()), mock.call(self.cifs_share.req_delete(self.vdm.vdm_id)), mock.call(self.mount.req_delete(self.vdm.vdm_id)), mock.call(self.fs.req_get()), mock.call(self.fs.req_delete()), ] xml_req_mock.assert_has_calls(expected_calls) def test_delete_cifs_share_with_ipv6(self): share_server = fakes.SHARE_SERVER_IPV6 share = fakes.CIFS_SHARE hook = utils.RequestSideEffect() hook.append(self.cifs_share.resp_get_succeed(self.vdm.vdm_id)) hook.append(self.vdm.resp_get_succeed( interface1=fakes.FakeData.interface_name3, interface2=fakes.FakeData.interface_name4)) hook.append(self.cifs_share.resp_task_succeed()) hook.append(self.mount.resp_task_succeed()) hook.append(self.fs.resp_get_succeed()) hook.append(self.fs.resp_task_succeed()) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock self.connection.delete_share(None, share, share_server) expected_calls = [ mock.call(self.cifs_share.req_get()), mock.call(self.vdm.req_get()), mock.call(self.cifs_share.req_delete(self.vdm.vdm_id)), mock.call(self.mount.req_delete(self.vdm.vdm_id)), mock.call(self.fs.req_get()), mock.call(self.fs.req_delete()), ] xml_req_mock.assert_has_calls(expected_calls) def test_delete_nfs_share(self): share_server = fakes.SHARE_SERVER share = fakes.NFS_SHARE hook = utils.RequestSideEffect() hook.append(self.vdm.resp_get_succeed()) hook.append(self.mount.resp_task_succeed()) hook.append(self.fs.resp_get_succeed()) hook.append(self.fs.resp_task_succeed()) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock ssh_hook = utils.SSHSideEffect() ssh_hook.append(self.nfs_share.output_get_succeed( rw_hosts=self.nfs_share.rw_hosts, ro_hosts=self.nfs_share.ro_hosts)) ssh_hook.append(self.nfs_share.output_delete_succeed()) ssh_cmd_mock = mock.Mock(side_effect=ssh_hook) self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock self.connection.delete_share(None, share, share_server) expected_calls = [ mock.call(self.vdm.req_get()), mock.call(self.mount.req_delete(self.vdm.vdm_id)), mock.call(self.fs.req_get()), mock.call(self.fs.req_delete()), ] xml_req_mock.assert_has_calls(expected_calls) ssh_calls = [ mock.call(self.nfs_share.cmd_get(), False), mock.call(self.nfs_share.cmd_delete(), True), ] ssh_cmd_mock.assert_has_calls(ssh_calls) def test_delete_nfs_share_with_ipv6(self): share_server = fakes.SHARE_SERVER_IPV6 share = fakes.NFS_SHARE hook = utils.RequestSideEffect() hook.append(self.vdm.resp_get_succeed( interface1=fakes.FakeData.interface_name3, interface2=fakes.FakeData.interface_name4)) hook.append(self.mount.resp_task_succeed()) hook.append(self.fs.resp_get_succeed()) hook.append(self.fs.resp_task_succeed()) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock ssh_hook = utils.SSHSideEffect() ssh_hook.append(self.nfs_share.output_get_succeed( rw_hosts=self.nfs_share.rw_hosts, ro_hosts=self.nfs_share.ro_hosts)) ssh_hook.append(self.nfs_share.output_delete_succeed()) ssh_cmd_mock = mock.Mock(side_effect=ssh_hook) self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock self.connection.delete_share(None, share, share_server) expected_calls = [ mock.call(self.vdm.req_get()), mock.call(self.mount.req_delete(self.vdm.vdm_id)), mock.call(self.fs.req_get()), mock.call(self.fs.req_delete()), ] xml_req_mock.assert_has_calls(expected_calls) ssh_calls = [ mock.call(self.nfs_share.cmd_get(), False), mock.call(self.nfs_share.cmd_delete(), True), ] ssh_cmd_mock.assert_has_calls(ssh_calls) def test_delete_share_without_share_server(self): share = fakes.CIFS_SHARE self.connection.delete_share(None, share) def test_delete_share_with_incorrect_proto(self): share_server = fakes.SHARE_SERVER share = fake_share.fake_share(share_proto='FAKE_PROTO') self.assertRaises(exception.InvalidShare, self.connection.delete_share, context=None, share=share, share_server=share_server) def test_delete_cifs_share_with_nonexistent_mount_and_filesystem(self): share_server = fakes.SHARE_SERVER share = fakes.CIFS_SHARE hook = utils.RequestSideEffect() hook.append(self.cifs_share.resp_get_succeed(self.vdm.vdm_id)) hook.append(self.vdm.resp_get_succeed()) hook.append(self.cifs_share.resp_task_succeed()) hook.append(self.mount.resp_task_error()) hook.append(self.fs.resp_get_succeed()) hook.append(self.fs.resp_task_error()) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock self.connection.delete_share(None, share, share_server) expected_calls = [ mock.call(self.cifs_share.req_get()), mock.call(self.vdm.req_get()), mock.call(self.cifs_share.req_delete(self.vdm.vdm_id)), mock.call(self.mount.req_delete(self.vdm.vdm_id)), mock.call(self.fs.req_get()), mock.call(self.fs.req_delete()), ] xml_req_mock.assert_has_calls(expected_calls) def test_extend_share(self): share_server = fakes.SHARE_SERVER share = fakes.CIFS_SHARE new_size = fakes.FakeData.new_size hook = utils.RequestSideEffect() hook.append(self.fs.resp_get_succeed()) hook.append(self.pool.resp_get_succeed()) hook.append(self.fs.resp_task_succeed()) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock self.connection.extend_share(share, new_size, share_server) expected_calls = [ mock.call(self.fs.req_get()), mock.call(self.pool.req_get()), mock.call(self.fs.req_extend()), ] xml_req_mock.assert_has_calls(expected_calls) def test_extend_share_with_ipv6(self): share_server = fakes.SHARE_SERVER_IPV6 share = fakes.CIFS_SHARE new_size = fakes.FakeData.new_size hook = utils.RequestSideEffect() hook.append(self.fs.resp_get_succeed()) hook.append(self.pool.resp_get_succeed()) hook.append(self.fs.resp_task_succeed()) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock self.connection.extend_share(share, new_size, share_server) expected_calls = [ mock.call(self.fs.req_get()), mock.call(self.pool.req_get()), mock.call(self.fs.req_extend()), ] xml_req_mock.assert_has_calls(expected_calls) def test_extend_share_without_pool_name(self): share_server = fakes.SHARE_SERVER share = fake_share.fake_share(host='HostA@BackendB', share_proto='CIFS') new_size = fakes.FakeData.new_size self.assertRaises(exception.InvalidHost, self.connection.extend_share, share, new_size, share_server) def test_create_snapshot(self): share_server = fakes.SHARE_SERVER snapshot = fake_share.fake_snapshot( id=fakes.FakeData.snapshot_name, share_id=fakes.FakeData.filesystem_name, share_name=fakes.FakeData.share_name) hook = utils.RequestSideEffect() hook.append(self.fs.resp_get_succeed()) hook.append(self.snap.resp_task_succeed()) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock self.connection.create_snapshot(None, snapshot, share_server) expected_calls = [ mock.call(self.fs.req_get()), mock.call(self.snap.req_create()), ] xml_req_mock.assert_has_calls(expected_calls) def test_create_snapshot_with_ipv6(self): share_server = fakes.SHARE_SERVER_IPV6 snapshot = fake_share.fake_snapshot( id=fakes.FakeData.snapshot_name, share_id=fakes.FakeData.filesystem_name, share_name=fakes.FakeData.share_name) hook = utils.RequestSideEffect() hook.append(self.fs.resp_get_succeed()) hook.append(self.snap.resp_task_succeed()) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock self.connection.create_snapshot(None, snapshot, share_server) expected_calls = [ mock.call(self.fs.req_get()), mock.call(self.snap.req_create()), ] xml_req_mock.assert_has_calls(expected_calls) def test_create_snapshot_with_incorrect_share_info(self): share_server = fakes.SHARE_SERVER snapshot = fake_share.fake_snapshot( id=fakes.FakeData.snapshot_name, share_id=fakes.FakeData.filesystem_name, share_name=fakes.FakeData.share_name) hook = utils.RequestSideEffect() hook.append(self.fs.resp_get_but_not_found()) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock self.assertRaises(exception.EMCVnxXMLAPIError, self.connection.create_snapshot, None, snapshot, share_server) expected_calls = [mock.call(self.fs.req_get())] xml_req_mock.assert_has_calls(expected_calls) def test_delete_snapshot(self): share_server = fakes.SHARE_SERVER snapshot = fake_share.fake_snapshot( id=fakes.FakeData.snapshot_name, share_id=fakes.FakeData.filesystem_name, share_name=fakes.FakeData.share_name) hook = utils.RequestSideEffect() hook.append(self.snap.resp_get_succeed()) hook.append(self.snap.resp_task_succeed()) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock self.connection.delete_snapshot(None, snapshot, share_server) expected_calls = [ mock.call(self.snap.req_get()), mock.call(self.snap.req_delete()), ] xml_req_mock.assert_has_calls(expected_calls) def test_delete_snapshot_with_ipv6(self): share_server = fakes.SHARE_SERVER_IPV6 snapshot = fake_share.fake_snapshot( id=fakes.FakeData.snapshot_name, share_id=fakes.FakeData.filesystem_name, share_name=fakes.FakeData.share_name) hook = utils.RequestSideEffect() hook.append(self.snap.resp_get_succeed()) hook.append(self.snap.resp_task_succeed()) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock self.connection.delete_snapshot(None, snapshot, share_server) expected_calls = [ mock.call(self.snap.req_get()), mock.call(self.snap.req_delete()), ] xml_req_mock.assert_has_calls(expected_calls) @utils.patch_get_managed_ports_vnx(return_value=['cge-1-0']) def test_setup_server(self): hook = utils.RequestSideEffect() hook.append(self.vdm.resp_get_but_not_found()) hook.append(self.mover.resp_get_ref_succeed()) hook.append(self.vdm.resp_task_succeed()) hook.append(self.mover.resp_task_succeed()) hook.append(self.mover.resp_task_succeed()) hook.append(self.dns.resp_task_succeed()) hook.append(self.vdm.resp_get_succeed()) hook.append(self.cifs_server.resp_task_succeed()) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock ssh_hook = utils.SSHSideEffect() ssh_hook.append() ssh_cmd_mock = mock.Mock(side_effect=ssh_hook) self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock self.connection.setup_server(fakes.NETWORK_INFO, None) if_name_1 = fakes.FakeData.interface_name1 if_name_2 = fakes.FakeData.interface_name2 expected_calls = [ mock.call(self.vdm.req_get()), mock.call(self.mover.req_get_ref()), mock.call(self.vdm.req_create()), mock.call(self.mover.req_create_interface( if_name=if_name_1, ip=fakes.FakeData.network_allocations_ip1)), mock.call(self.mover.req_create_interface( if_name=if_name_2, ip=fakes.FakeData.network_allocations_ip2)), mock.call(self.dns.req_create()), mock.call(self.vdm.req_get()), mock.call(self.cifs_server.req_create(self.vdm.vdm_id)), ] xml_req_mock.assert_has_calls(expected_calls) ssh_calls = [ mock.call(self.vdm.cmd_attach_nfs_interface(), False), ] ssh_cmd_mock.assert_has_calls(ssh_calls) @utils.patch_get_managed_ports_vnx(return_value=['cge-1-0']) def test_setup_server_with_ipv6(self): hook = utils.RequestSideEffect() hook.append(self.vdm.resp_get_but_not_found()) hook.append(self.mover.resp_get_ref_succeed()) hook.append(self.vdm.resp_task_succeed()) hook.append(self.mover.resp_task_succeed()) hook.append(self.mover.resp_task_succeed()) hook.append(self.dns.resp_task_succeed()) hook.append(self.vdm.resp_get_succeed()) hook.append(self.cifs_server.resp_task_succeed()) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock ssh_hook = utils.SSHSideEffect() ssh_hook.append() ssh_cmd_mock = mock.Mock(side_effect=ssh_hook) self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock self.connection.setup_server(fakes.NETWORK_INFO_IPV6, None) if_name_1 = fakes.FakeData.interface_name3 if_name_2 = fakes.FakeData.interface_name4 expect_ip_1 = fakes.FakeData.network_allocations_ip3 expect_ip_2 = fakes.FakeData.network_allocations_ip4 expected_calls = [ mock.call(self.vdm.req_get()), mock.call(self.mover.req_get_ref()), mock.call(self.vdm.req_create()), mock.call(self.mover.req_create_interface_with_ipv6( if_name=if_name_1, ip=expect_ip_1)), mock.call(self.mover.req_create_interface_with_ipv6( if_name=if_name_2, ip=expect_ip_2)), mock.call(self.dns.req_create( ip_addr=fakes.FakeData.dns_ipv6_address)), mock.call(self.vdm.req_get()), mock.call(self.cifs_server.req_create( self.vdm.vdm_id, ip_addr=fakes.FakeData.network_allocations_ip3)), ] xml_req_mock.assert_has_calls(expected_calls) ssh_calls = [ mock.call(self.vdm.cmd_attach_nfs_interface( interface=fakes.FakeData.interface_name4), False), ] ssh_cmd_mock.assert_has_calls(ssh_calls) @utils.patch_get_managed_ports_vnx(return_value=['cge-1-0']) def test_setup_server_with_existing_vdm(self): hook = utils.RequestSideEffect() hook.append(self.vdm.resp_get_succeed()) hook.append(self.mover.resp_get_ref_succeed()) hook.append(self.mover.resp_task_succeed()) hook.append(self.mover.resp_task_succeed()) hook.append(self.dns.resp_task_succeed()) hook.append(self.cifs_server.resp_task_succeed()) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock ssh_hook = utils.SSHSideEffect() ssh_hook.append() ssh_cmd_mock = mock.Mock(side_effect=ssh_hook) self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock self.connection.setup_server(fakes.NETWORK_INFO, None) if_name_1 = fakes.FakeData.network_allocations_id1[-12:] if_name_2 = fakes.FakeData.network_allocations_id2[-12:] expected_calls = [ mock.call(self.vdm.req_get()), mock.call(self.mover.req_get_ref()), mock.call(self.mover.req_create_interface( if_name=if_name_1, ip=fakes.FakeData.network_allocations_ip1)), mock.call(self.mover.req_create_interface( if_name=if_name_2, ip=fakes.FakeData.network_allocations_ip2)), mock.call(self.dns.req_create()), mock.call(self.cifs_server.req_create(self.vdm.vdm_id)), ] xml_req_mock.assert_has_calls(expected_calls) ssh_calls = [ mock.call(self.vdm.cmd_attach_nfs_interface(), False), ] ssh_cmd_mock.assert_has_calls(ssh_calls) def test_setup_server_with_invalid_security_service(self): network_info = copy.deepcopy(fakes.NETWORK_INFO) network_info['security_services'][0]['type'] = 'fake_type' self.assertRaises(exception.EMCVnxXMLAPIError, self.connection.setup_server, network_info, None) @utils.patch_get_managed_ports_vnx( side_effect=exception.EMCVnxXMLAPIError( err="Get managed ports fail.")) def test_setup_server_without_valid_physical_device(self): hook = utils.RequestSideEffect() hook.append(self.vdm.resp_get_but_not_found()) hook.append(self.mover.resp_get_ref_succeed()) hook.append(self.vdm.resp_task_succeed()) hook.append(self.vdm.resp_get_succeed()) hook.append(self.cifs_server.resp_get_without_value()) hook.append(self.vdm.resp_task_succeed()) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock ssh_hook = utils.SSHSideEffect() ssh_hook.append(self.vdm.output_get_interfaces_vdm(nfs_interface='')) ssh_cmd_mock = mock.Mock(side_effect=ssh_hook) self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock self.assertRaises(exception.EMCVnxXMLAPIError, self.connection.setup_server, fakes.NETWORK_INFO, None) expected_calls = [ mock.call(self.vdm.req_get()), mock.call(self.mover.req_get_ref()), mock.call(self.vdm.req_create()), mock.call(self.vdm.req_get()), mock.call(self.cifs_server.req_get(self.vdm.vdm_id)), mock.call(self.vdm.req_delete()), ] xml_req_mock.assert_has_calls(expected_calls) ssh_calls = [ mock.call(self.vdm.cmd_get_interfaces(), False), ] ssh_cmd_mock.assert_has_calls(ssh_calls) @utils.patch_get_managed_ports_vnx(return_value=['cge-1-0']) def test_setup_server_with_exception(self): hook = utils.RequestSideEffect() hook.append(self.vdm.resp_get_but_not_found()) hook.append(self.mover.resp_get_ref_succeed()) hook.append(self.vdm.resp_task_succeed()) hook.append(self.mover.resp_task_succeed()) hook.append(self.mover.resp_task_error()) hook.append(self.vdm.resp_get_succeed()) hook.append(self.cifs_server.resp_get_without_value()) hook.append(self.mover.resp_task_succeed()) hook.append(self.vdm.resp_task_succeed()) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock ssh_hook = utils.SSHSideEffect() ssh_hook.append(self.vdm.output_get_interfaces_vdm(nfs_interface='')) ssh_cmd_mock = mock.Mock(side_effect=ssh_hook) self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock self.assertRaises(exception.EMCVnxXMLAPIError, self.connection.setup_server, fakes.NETWORK_INFO, None) if_name_1 = fakes.FakeData.network_allocations_id1[-12:] if_name_2 = fakes.FakeData.network_allocations_id2[-12:] expected_calls = [ mock.call(self.vdm.req_get()), mock.call(self.mover.req_get_ref()), mock.call(self.vdm.req_create()), mock.call(self.mover.req_create_interface( if_name=if_name_1, ip=fakes.FakeData.network_allocations_ip1)), mock.call(self.mover.req_create_interface( if_name=if_name_2, ip=fakes.FakeData.network_allocations_ip2)), mock.call(self.vdm.req_get()), mock.call(self.cifs_server.req_get(self.vdm.vdm_id)), mock.call(self.mover.req_delete_interface( fakes.FakeData.network_allocations_ip1)), mock.call(self.vdm.req_delete()), ] xml_req_mock.assert_has_calls(expected_calls) ssh_calls = [ mock.call(self.vdm.cmd_get_interfaces(), False), ] ssh_cmd_mock.assert_has_calls(ssh_calls) def test_teardown_server(self): hook = utils.RequestSideEffect() hook.append(self.vdm.resp_get_succeed()) hook.append(self.cifs_server.resp_get_succeed( mover_id=self.vdm.vdm_id, is_vdm=True, join_domain=True)) hook.append(self.cifs_server.resp_task_succeed()) hook.append(self.cifs_server.resp_get_succeed( mover_id=self.vdm.vdm_id, is_vdm=True, join_domain=False)) hook.append(self.mover.resp_get_ref_succeed()) hook.append(self.mover.resp_task_succeed()) hook.append(self.mover.resp_task_succeed()) hook.append(self.vdm.resp_task_succeed()) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock ssh_hook = utils.SSHSideEffect() ssh_hook.append(self.vdm.output_get_interfaces_vdm()) ssh_hook.append() ssh_cmd_mock = mock.Mock(side_effect=ssh_hook) self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock self.connection.teardown_server(fakes.SERVER_DETAIL, fakes.SECURITY_SERVICE) expected_calls = [ mock.call(self.vdm.req_get()), mock.call(self.cifs_server.req_get(self.vdm.vdm_id)), mock.call(self.cifs_server.req_modify( mover_id=self.vdm.vdm_id, is_vdm=True, join_domain=False)), mock.call(self.cifs_server.req_delete(self.vdm.vdm_id)), mock.call(self.mover.req_get_ref()), mock.call(self.mover.req_delete_interface( fakes.FakeData.network_allocations_ip1)), mock.call(self.mover.req_delete_interface( fakes.FakeData.network_allocations_ip2)), mock.call(self.vdm.req_delete()), ] xml_req_mock.assert_has_calls(expected_calls) ssh_calls = [ mock.call(self.vdm.cmd_get_interfaces(), False), mock.call(self.vdm.cmd_detach_nfs_interface(), True), ] ssh_cmd_mock.assert_has_calls(ssh_calls) def test_teardown_server_with_ipv6(self): hook = utils.RequestSideEffect() hook.append(self.vdm.resp_get_succeed()) hook.append(self.cifs_server.resp_get_succeed( mover_id=self.vdm.vdm_id, is_vdm=True, join_domain=True)) hook.append(self.cifs_server.resp_task_succeed()) hook.append(self.cifs_server.resp_get_succeed( mover_id=self.vdm.vdm_id, is_vdm=True, join_domain=False)) hook.append(self.mover.resp_get_ref_succeed()) hook.append(self.mover.resp_task_succeed()) hook.append(self.mover.resp_task_succeed()) hook.append(self.vdm.resp_task_succeed()) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock ssh_hook = utils.SSHSideEffect() ssh_hook.append(self.vdm.output_get_interfaces_vdm()) ssh_hook.append() ssh_cmd_mock = mock.Mock(side_effect=ssh_hook) self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock self.connection.teardown_server(fakes.SERVER_DETAIL_IPV6, fakes.SECURITY_SERVICE_IPV6) expected_calls = [ mock.call(self.vdm.req_get()), mock.call(self.cifs_server.req_get(self.vdm.vdm_id)), mock.call(self.cifs_server.req_modify( mover_id=self.vdm.vdm_id, is_vdm=True, join_domain=False)), mock.call(self.cifs_server.req_delete(self.vdm.vdm_id)), mock.call(self.mover.req_get_ref()), mock.call(self.mover.req_delete_interface( fakes.FakeData.network_allocations_ip3)), mock.call(self.mover.req_delete_interface( fakes.FakeData.network_allocations_ip4)), mock.call(self.vdm.req_delete()), ] xml_req_mock.assert_has_calls(expected_calls) ssh_calls = [ mock.call(self.vdm.cmd_get_interfaces(), False), mock.call(self.vdm.cmd_detach_nfs_interface(), True), ] ssh_cmd_mock.assert_has_calls(ssh_calls) def test_teardown_server_without_server_detail(self): self.connection.teardown_server(None, fakes.SECURITY_SERVICE) def test_teardown_server_without_security_services(self): hook = utils.RequestSideEffect() hook.append(self.vdm.resp_get_succeed()) hook.append(self.mover.resp_get_ref_succeed()) hook.append(self.mover.resp_task_succeed()) hook.append(self.mover.resp_task_succeed()) hook.append(self.vdm.resp_task_succeed()) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock ssh_hook = utils.SSHSideEffect() ssh_hook.append(self.vdm.output_get_interfaces_vdm()) ssh_hook.append() ssh_cmd_mock = mock.Mock(side_effect=ssh_hook) self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock self.connection.teardown_server(fakes.SERVER_DETAIL, []) expected_calls = [ mock.call(self.vdm.req_get()), mock.call(self.mover.req_get_ref()), mock.call(self.mover.req_delete_interface( fakes.FakeData.network_allocations_ip1)), mock.call(self.mover.req_delete_interface( fakes.FakeData.network_allocations_ip2)), mock.call(self.vdm.req_delete()), ] xml_req_mock.assert_has_calls(expected_calls) ssh_calls = [ mock.call(self.vdm.cmd_get_interfaces(), False), mock.call(self.vdm.cmd_detach_nfs_interface(), True), ] ssh_cmd_mock.assert_has_calls(ssh_calls) def test_teardown_server_without_share_server_name_in_server_detail(self): server_detail = { 'cifs_if': fakes.FakeData.network_allocations_ip1, 'nfs_if': fakes.FakeData.network_allocations_ip2, } self.connection.teardown_server(server_detail, fakes.SECURITY_SERVICE) def test_teardown_server_with_invalid_server_name(self): hook = utils.RequestSideEffect() hook.append(self.vdm.resp_get_error()) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock self.connection.teardown_server(fakes.SERVER_DETAIL, fakes.SECURITY_SERVICE) expected_calls = [mock.call(self.vdm.req_get())] xml_req_mock.assert_has_calls(expected_calls) def test_teardown_server_without_cifs_server(self): hook = utils.RequestSideEffect() hook.append(self.vdm.resp_get_succeed()) hook.append(self.cifs_server.resp_get_error()) hook.append(self.mover.resp_get_ref_succeed()) hook.append(self.cifs_server.resp_task_succeed()) hook.append(self.cifs_server.resp_get_succeed( mover_id=self.vdm.vdm_id, is_vdm=True, join_domain=False)) hook.append(self.mover.resp_task_succeed()) hook.append(self.mover.resp_task_succeed()) hook.append(self.vdm.resp_task_succeed()) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock ssh_hook = utils.SSHSideEffect() ssh_hook.append(self.vdm.output_get_interfaces_vdm()) ssh_hook.append() ssh_cmd_mock = mock.Mock(side_effect=ssh_hook) self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock self.connection.teardown_server(fakes.SERVER_DETAIL, fakes.SECURITY_SERVICE) expected_calls = [ mock.call(self.vdm.req_get()), mock.call(self.cifs_server.req_get(self.vdm.vdm_id)), mock.call(self.mover.req_get_ref()), mock.call(self.mover.req_delete_interface( fakes.FakeData.network_allocations_ip1)), mock.call(self.mover.req_delete_interface( fakes.FakeData.network_allocations_ip2)), mock.call(self.vdm.req_delete()), ] xml_req_mock.assert_has_calls(expected_calls) ssh_calls = [ mock.call(self.vdm.cmd_get_interfaces(), False), mock.call(self.vdm.cmd_detach_nfs_interface(), True), ] ssh_cmd_mock.assert_has_calls(ssh_calls) def test_teardown_server_with_invalid_cifs_server_modification(self): hook = utils.RequestSideEffect() hook.append(self.vdm.resp_get_succeed()) hook.append(self.cifs_server.resp_get_succeed( mover_id=self.vdm.vdm_id, is_vdm=True, join_domain=True)) hook.append(self.cifs_server.resp_task_error()) hook.append(self.cifs_server.resp_task_succeed()) hook.append(self.mover.resp_get_ref_succeed()) hook.append(self.mover.resp_task_succeed()) hook.append(self.mover.resp_task_succeed()) hook.append(self.vdm.resp_task_succeed()) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock ssh_hook = utils.SSHSideEffect() ssh_hook.append(self.vdm.output_get_interfaces_vdm()) ssh_hook.append() ssh_cmd_mock = mock.Mock(side_effect=ssh_hook) self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock self.connection.teardown_server(fakes.SERVER_DETAIL, fakes.SECURITY_SERVICE) expected_calls = [ mock.call(self.vdm.req_get()), mock.call(self.cifs_server.req_get(self.vdm.vdm_id)), mock.call(self.cifs_server.req_modify(self.vdm.vdm_id)), mock.call(self.cifs_server.req_delete(self.vdm.vdm_id)), mock.call(self.mover.req_get_ref()), mock.call(self.mover.req_delete_interface( fakes.FakeData.network_allocations_ip1)), mock.call(self.mover.req_delete_interface( fakes.FakeData.network_allocations_ip2)), mock.call(self.vdm.req_delete()), ] xml_req_mock.assert_has_calls(expected_calls) ssh_calls = [ mock.call(self.vdm.cmd_get_interfaces(), False), mock.call(self.vdm.cmd_detach_nfs_interface(), True), ] ssh_cmd_mock.assert_has_calls(ssh_calls) def test_update_access_add_cifs_rw(self): share_server = fakes.SHARE_SERVER share = fakes.CIFS_SHARE access = fakes.CIFS_RW_ACCESS hook = utils.RequestSideEffect() hook.append(self.vdm.resp_get_succeed()) hook.append(self.cifs_server.resp_get_succeed( mover_id=self.vdm.vdm_id, is_vdm=True, join_domain=True)) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock ssh_hook = utils.SSHSideEffect() ssh_hook.append(self.cifs_share.output_allow_access()) ssh_cmd_mock = mock.Mock(side_effect=ssh_hook) self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock self.connection.update_access(None, share, [], [access], [], share_server=share_server) expected_calls = [ mock.call(self.vdm.req_get()), mock.call(self.cifs_server.req_get(self.vdm.vdm_id)), ] xml_req_mock.assert_has_calls(expected_calls) ssh_calls = [ mock.call(self.cifs_share.cmd_change_access(), True), ] ssh_cmd_mock.assert_has_calls(ssh_calls) def test_update_access_add_cifs_rw_with_ipv6(self): share_server = fakes.SHARE_SERVER_IPV6 share = fakes.CIFS_SHARE access = fakes.CIFS_RW_ACCESS hook = utils.RequestSideEffect() hook.append(self.vdm.resp_get_succeed( interface1=fakes.FakeData.interface_name3, interface2=fakes.FakeData.interface_name4)) hook.append(self.cifs_server.resp_get_succeed( mover_id=self.vdm.vdm_id, is_vdm=True, join_domain=True, ip_addr=fakes.FakeData.network_allocations_ip3)) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock ssh_hook = utils.SSHSideEffect() ssh_hook.append(self.cifs_share.output_allow_access()) ssh_cmd_mock = mock.Mock(side_effect=ssh_hook) self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock self.connection.update_access(None, share, [], [access], [], share_server=share_server) expected_calls = [ mock.call(self.vdm.req_get()), mock.call(self.cifs_server.req_get(self.vdm.vdm_id)), ] xml_req_mock.assert_has_calls(expected_calls) ssh_calls = [ mock.call(self.cifs_share.cmd_change_access(), True), ] ssh_cmd_mock.assert_has_calls(ssh_calls) def test_update_access_deny_nfs(self): share_server = fakes.SHARE_SERVER share = fakes.NFS_SHARE access = fakes.NFS_RW_ACCESS rw_hosts = copy.deepcopy(fakes.FakeData.rw_hosts) rw_hosts.append(access['access_to']) ssh_hook = utils.SSHSideEffect() ssh_hook.append(self.nfs_share.output_get_succeed( rw_hosts=rw_hosts, ro_hosts=fakes.FakeData.ro_hosts)) ssh_hook.append(self.nfs_share.output_set_access_success()) ssh_hook.append(self.nfs_share.output_get_succeed( rw_hosts=fakes.FakeData.rw_hosts, ro_hosts=fakes.FakeData.ro_hosts)) ssh_cmd_mock = utils.EMCNFSShareMock(side_effect=ssh_hook) self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock self.connection.update_access(None, share, [], [], [access], share_server=share_server) ssh_calls = [ mock.call(self.nfs_share.cmd_get(), True), mock.call(self.nfs_share.cmd_set_access( rw_hosts=self.nfs_share.rw_hosts, ro_hosts=self.nfs_share.ro_hosts), True), mock.call(self.nfs_share.cmd_get(), True), ] ssh_cmd_mock.assert_has_calls(ssh_calls) def test_update_access_deny_nfs_with_ipv6(self): share_server = fakes.SHARE_SERVER_IPV6 share = fakes.NFS_SHARE access = fakes.NFS_RW_ACCESS rw_hosts = copy.deepcopy(fakes.FakeData.rw_hosts_ipv6) rw_hosts.append(access['access_to']) ssh_hook = utils.SSHSideEffect() ssh_hook.append(self.nfs_share.output_get_succeed( rw_hosts=rw_hosts, ro_hosts=fakes.FakeData.ro_hosts_ipv6)) ssh_hook.append(self.nfs_share.output_set_access_success()) ssh_hook.append(self.nfs_share.output_get_succeed( rw_hosts=fakes.FakeData.rw_hosts_ipv6, ro_hosts=fakes.FakeData.ro_hosts_ipv6)) ssh_cmd_mock = utils.EMCNFSShareMock(side_effect=ssh_hook) self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock self.connection.update_access(None, share, [], [], [access], share_server=share_server) ssh_calls = [ mock.call(self.nfs_share.cmd_get(), True), mock.call(self.nfs_share.cmd_set_access( rw_hosts=self.nfs_share.rw_hosts_ipv6, ro_hosts=self.nfs_share.ro_hosts_ipv6), True), mock.call(self.nfs_share.cmd_get(), True), ] ssh_cmd_mock.assert_has_calls(ssh_calls) def test_update_access_recover_nfs_rule(self): share_server = fakes.SHARE_SERVER share = fakes.NFS_SHARE access = fakes.NFS_RW_ACCESS hosts = ['192.168.1.5'] rw_hosts = copy.deepcopy(fakes.FakeData.rw_hosts) rw_hosts.append(access['access_to']) ssh_hook = utils.SSHSideEffect() ssh_hook.append(self.nfs_share.output_get_succeed( rw_hosts=rw_hosts, ro_hosts=fakes.FakeData.ro_hosts)) ssh_hook.append(self.nfs_share.output_set_access_success()) ssh_hook.append(self.nfs_share.output_get_succeed( rw_hosts=hosts, ro_hosts=[])) ssh_cmd_mock = utils.EMCNFSShareMock(side_effect=ssh_hook) self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock self.connection.update_access(None, share, [access], [], [], share_server=share_server) ssh_calls = [ mock.call(self.nfs_share.cmd_get(), True), mock.call(self.nfs_share.cmd_set_access( rw_hosts=hosts, ro_hosts=[]), True), mock.call(self.nfs_share.cmd_get(), True), ] ssh_cmd_mock.assert_has_calls(ssh_calls) def test_update_access_recover_nfs_rule_with_ipv6(self): share_server = fakes.SHARE_SERVER_IPV6 share = fakes.NFS_SHARE access = fakes.NFS_RW_ACCESS_IPV6 hosts = ['fdf8:f53b:82e1::5'] rw_hosts = copy.deepcopy(fakes.FakeData.rw_hosts_ipv6) rw_hosts.append(access['access_to']) ssh_hook = utils.SSHSideEffect() ssh_hook.append(self.nfs_share.output_get_succeed( rw_hosts=rw_hosts, ro_hosts=fakes.FakeData.ro_hosts_ipv6)) ssh_hook.append(self.nfs_share.output_set_access_success()) ssh_hook.append(self.nfs_share.output_get_succeed( rw_hosts=hosts, ro_hosts=[])) ssh_cmd_mock = utils.EMCNFSShareMock(side_effect=ssh_hook) self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock self.connection.update_access(None, share, [access], [], [], share_server=share_server) ssh_calls = [ mock.call(self.nfs_share.cmd_get(), True), mock.call(self.nfs_share.cmd_set_access( rw_hosts=hosts, ro_hosts=[]), True), mock.call(self.nfs_share.cmd_get(), True), ] ssh_cmd_mock.assert_has_calls(ssh_calls) def test_update_access_recover_cifs_rule(self): share_server = fakes.SHARE_SERVER share = fakes.CIFS_SHARE access = fakes.CIFS_RW_ACCESS hook = utils.RequestSideEffect() hook.append(self.vdm.resp_get_succeed()) hook.append(self.cifs_server.resp_get_succeed( mover_id=self.vdm.vdm_id, is_vdm=True, join_domain=True)) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock ssh_hook = utils.SSHSideEffect() ssh_hook.append(self.cifs_share.output_allow_access()) ssh_hook.append(fakes.FakeData.cifs_access) ssh_hook.append('Command succeeded') ssh_cmd_mock = mock.Mock(side_effect=ssh_hook) self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock self.connection.update_access(None, share, [access], [], [], share_server=share_server) expected_calls = [ mock.call(self.vdm.req_get()), mock.call(self.cifs_server.req_get(self.vdm.vdm_id)), ] xml_req_mock.assert_has_calls(expected_calls) ssh_calls = [ mock.call(self.cifs_share.cmd_change_access(), True), mock.call(self.cifs_share.cmd_get_access(), True), mock.call(self.cifs_share.cmd_change_access( action='revoke', user='guest'), True), ] ssh_cmd_mock.assert_has_calls(ssh_calls) def test_update_access_recover_cifs_rule_with_ipv6(self): share_server = fakes.SHARE_SERVER_IPV6 share = fakes.CIFS_SHARE access = fakes.CIFS_RW_ACCESS hook = utils.RequestSideEffect() hook.append(self.vdm.resp_get_succeed( interface1=fakes.FakeData.interface_name3, interface2=fakes.FakeData.interface_name4)) hook.append(self.cifs_server.resp_get_succeed( mover_id=self.vdm.vdm_id, is_vdm=True, join_domain=True, ip_addr=fakes.FakeData.network_allocations_ip3)) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock ssh_hook = utils.SSHSideEffect() ssh_hook.append(self.cifs_share.output_allow_access()) ssh_hook.append(fakes.FakeData.cifs_access) ssh_hook.append('Command succeeded') ssh_cmd_mock = mock.Mock(side_effect=ssh_hook) self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock self.connection.update_access(None, share, [access], [], [], share_server=share_server) expected_calls = [ mock.call(self.vdm.req_get()), mock.call(self.cifs_server.req_get(self.vdm.vdm_id)), ] xml_req_mock.assert_has_calls(expected_calls) ssh_calls = [ mock.call(self.cifs_share.cmd_change_access(), True), mock.call(self.cifs_share.cmd_get_access(), True), mock.call(self.cifs_share.cmd_change_access( action='revoke', user='guest'), True), ] ssh_cmd_mock.assert_has_calls(ssh_calls) def test_cifs_clear_access_server_not_found(self): server = fakes.SHARE_SERVER hook = utils.RequestSideEffect() hook.append(self.vdm.resp_get_succeed()) hook.append(self.cifs_server.resp_get_succeed( mover_id=self.vdm.vdm_id, is_vdm=True, join_domain=True, cifs_server_name='cifs_server_name')) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock self.assertRaises(exception.EMCVnxXMLAPIError, self.connection._cifs_clear_access, 'share_name', server, None) expected_calls = [ mock.call(self.vdm.req_get()), mock.call(self.cifs_server.req_get(self.vdm.vdm_id)), ] xml_req_mock.assert_has_calls(expected_calls) def test_allow_cifs_rw_access(self): share_server = fakes.SHARE_SERVER share = fakes.CIFS_SHARE access = fakes.CIFS_RW_ACCESS hook = utils.RequestSideEffect() hook.append(self.vdm.resp_get_succeed()) hook.append(self.cifs_server.resp_get_succeed( mover_id=self.vdm.vdm_id, is_vdm=True, join_domain=True)) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock ssh_hook = utils.SSHSideEffect() ssh_hook.append(self.cifs_share.output_allow_access()) ssh_cmd_mock = mock.Mock(side_effect=ssh_hook) self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock self.connection.allow_access(None, share, access, share_server) expected_calls = [ mock.call(self.vdm.req_get()), mock.call(self.cifs_server.req_get(self.vdm.vdm_id)), ] xml_req_mock.assert_has_calls(expected_calls) ssh_calls = [ mock.call(self.cifs_share.cmd_change_access(), True), ] ssh_cmd_mock.assert_has_calls(ssh_calls) def test_allow_cifs_rw_access_with_ipv6(self): share_server = fakes.SHARE_SERVER_IPV6 share = fakes.CIFS_SHARE access = fakes.CIFS_RW_ACCESS hook = utils.RequestSideEffect() hook.append(self.vdm.resp_get_succeed( interface1=fakes.FakeData.interface_name3, interface2=fakes.FakeData.interface_name4)) hook.append(self.cifs_server.resp_get_succeed( mover_id=self.vdm.vdm_id, is_vdm=True, join_domain=True, ip_addr=fakes.FakeData.network_allocations_ip3)) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock ssh_hook = utils.SSHSideEffect() ssh_hook.append(self.cifs_share.output_allow_access()) ssh_cmd_mock = mock.Mock(side_effect=ssh_hook) self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock self.connection.allow_access(None, share, access, share_server) expected_calls = [ mock.call(self.vdm.req_get()), mock.call(self.cifs_server.req_get(self.vdm.vdm_id)), ] xml_req_mock.assert_has_calls(expected_calls) ssh_calls = [ mock.call(self.cifs_share.cmd_change_access(), True), ] ssh_cmd_mock.assert_has_calls(ssh_calls) def test_allow_cifs_ro_access(self): share_server = fakes.SHARE_SERVER share = fakes.CIFS_SHARE access = fakes.CIFS_RO_ACCESS hook = utils.RequestSideEffect() hook.append(self.vdm.resp_get_succeed()) hook.append(self.cifs_server.resp_get_succeed( mover_id=self.vdm.vdm_id, is_vdm=True, join_domain=True)) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock ssh_hook = utils.SSHSideEffect() ssh_hook.append(self.cifs_share.output_allow_access()) ssh_cmd_mock = mock.Mock(side_effect=ssh_hook) self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock self.connection.allow_access(None, share, access, share_server) expected_calls = [ mock.call(self.vdm.req_get()), mock.call(self.cifs_server.req_get(self.vdm.vdm_id)), ] xml_req_mock.assert_has_calls(expected_calls) ssh_calls = [ mock.call(self.cifs_share.cmd_change_access('ro'), True), ] ssh_cmd_mock.assert_has_calls(ssh_calls) def test_allow_cifs_ro_access_with_ipv6(self): share_server = fakes.SHARE_SERVER_IPV6 share = fakes.CIFS_SHARE access = fakes.CIFS_RO_ACCESS hook = utils.RequestSideEffect() hook.append(self.vdm.resp_get_succeed( interface1=fakes.FakeData.interface_name3, interface2=fakes.FakeData.interface_name4)) hook.append(self.cifs_server.resp_get_succeed( mover_id=self.vdm.vdm_id, is_vdm=True, join_domain=True, ip_addr=fakes.FakeData.network_allocations_ip3)) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock ssh_hook = utils.SSHSideEffect() ssh_hook.append(self.cifs_share.output_allow_access()) ssh_cmd_mock = mock.Mock(side_effect=ssh_hook) self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock self.connection.allow_access(None, share, access, share_server) expected_calls = [ mock.call(self.vdm.req_get()), mock.call(self.cifs_server.req_get(self.vdm.vdm_id)), ] xml_req_mock.assert_has_calls(expected_calls) ssh_calls = [ mock.call(self.cifs_share.cmd_change_access('ro'), True), ] ssh_cmd_mock.assert_has_calls(ssh_calls) def test_allow_ro_access_without_share_server_name(self): share = fakes.CIFS_SHARE share_server = copy.deepcopy(fakes.SHARE_SERVER) share_server['backend_details'].pop('share_server_name') access = fakes.CIFS_RO_ACCESS hook = utils.RequestSideEffect() hook.append(self.vdm.resp_get_succeed()) hook.append(self.cifs_server.resp_get_succeed( mover_id=self.vdm.vdm_id, is_vdm=True, join_domain=True)) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock ssh_hook = utils.SSHSideEffect() ssh_hook.append(self.cifs_share.output_allow_access()) ssh_cmd_mock = mock.Mock(side_effect=ssh_hook) self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock self.connection.allow_access(None, share, access, share_server) expected_calls = [ mock.call(self.vdm.req_get()), mock.call(self.cifs_server.req_get(self.vdm.vdm_id)), ] xml_req_mock.assert_has_calls(expected_calls) ssh_calls = [ mock.call(self.cifs_share.cmd_change_access('ro'), True), ] ssh_cmd_mock.assert_has_calls(ssh_calls) def test_allow_access_with_invalid_access_level(self): share_server = fakes.SHARE_SERVER share = fakes.CIFS_SHARE access = fake_share.fake_access(access_level='fake_level') self.assertRaises(exception.InvalidShareAccessLevel, self.connection.allow_access, None, share, access, share_server) def test_allow_access_with_invalid_share_server_name(self): share_server = fakes.SHARE_SERVER share = fakes.CIFS_SHARE access = fakes.CIFS_RW_ACCESS hook = utils.RequestSideEffect() hook.append(self.vdm.resp_get_succeed()) hook.append(self.cifs_server.resp_get_error()) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock self.assertRaises(exception.EMCVnxXMLAPIError, self.connection.allow_access, None, share, access, share_server) expected_calls = [ mock.call(self.vdm.req_get()), mock.call(self.cifs_server.req_get(self.vdm.vdm_id)), ] xml_req_mock.assert_has_calls(expected_calls) def test_allow_nfs_access(self): share_server = fakes.SHARE_SERVER share = fakes.NFS_SHARE access = fakes.NFS_RW_ACCESS rw_hosts = copy.deepcopy(fakes.FakeData.rw_hosts) rw_hosts.append(access['access_to']) ssh_hook = utils.SSHSideEffect() ssh_hook.append(self.nfs_share.output_get_succeed( rw_hosts=fakes.FakeData.rw_hosts, ro_hosts=fakes.FakeData.ro_hosts)) ssh_hook.append(self.nfs_share.output_set_access_success()) ssh_hook.append(self.nfs_share.output_get_succeed( rw_hosts=rw_hosts, ro_hosts=fakes.FakeData.ro_hosts)) ssh_cmd_mock = utils.EMCNFSShareMock(side_effect=ssh_hook) self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock self.connection.allow_access(None, share, access, share_server) ssh_calls = [ mock.call(self.nfs_share.cmd_get(), True), mock.call(self.nfs_share.cmd_set_access( rw_hosts=rw_hosts, ro_hosts=self.nfs_share.ro_hosts), True), mock.call(self.nfs_share.cmd_get(), True), ] ssh_cmd_mock.assert_has_calls(ssh_calls) def test_allow_nfs_access_with_ipv6(self): share_server = fakes.SHARE_SERVER_IPV6 share = fakes.NFS_SHARE access = fakes.NFS_RW_ACCESS_IPV6 rw_hosts = copy.deepcopy(fakes.FakeData.rw_hosts_ipv6) rw_hosts.append(access['access_to']) ssh_hook = utils.SSHSideEffect() ssh_hook.append(self.nfs_share.output_get_succeed( rw_hosts=fakes.FakeData.rw_hosts_ipv6, ro_hosts=fakes.FakeData.ro_hosts_ipv6)) ssh_hook.append(self.nfs_share.output_set_access_success()) ssh_hook.append(self.nfs_share.output_get_succeed( rw_hosts=rw_hosts, ro_hosts=fakes.FakeData.ro_hosts_ipv6)) ssh_cmd_mock = utils.EMCNFSShareMock(side_effect=ssh_hook) self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock self.connection.allow_access(None, share, access, share_server) ssh_calls = [ mock.call(self.nfs_share.cmd_get(), True), mock.call(self.nfs_share.cmd_set_access( rw_hosts=rw_hosts, ro_hosts=self.nfs_share.ro_hosts_ipv6), True), mock.call(self.nfs_share.cmd_get(), True), ] ssh_cmd_mock.assert_has_calls(ssh_calls) def test_allow_cifs_access_with_incorrect_access_type(self): share_server = fakes.SHARE_SERVER share = fakes.CIFS_SHARE access = fake_share.fake_access(access_type='fake_type') self.assertRaises(exception.InvalidShareAccess, self.connection.allow_access, None, share, access, share_server) def test_allow_nfs_access_with_incorrect_access_type(self): share_server = fakes.SHARE_SERVER share = fakes.NFS_SHARE access = fake_share.fake_access(access_type='fake_type') self.assertRaises(exception.InvalidShareAccess, self.connection.allow_access, None, share, access, share_server) def test_allow_access_with_incorrect_proto(self): share_server = fakes.SHARE_SERVER share = fake_share.fake_share(share_proto='FAKE_PROTO') access = fake_share.fake_access() self.assertRaises(exception.InvalidShare, self.connection.allow_access, None, share, access, share_server) def test_deny_cifs_rw_access(self): share_server = fakes.SHARE_SERVER share = fakes.CIFS_SHARE access = fakes.CIFS_RW_ACCESS hook = utils.RequestSideEffect() hook.append(self.vdm.resp_get_succeed()) hook.append(self.cifs_server.resp_get_succeed( mover_id=self.vdm.vdm_id, is_vdm=True, join_domain=True)) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock ssh_hook = utils.SSHSideEffect() ssh_hook.append(self.cifs_share.output_allow_access()) ssh_cmd_mock = mock.Mock(side_effect=ssh_hook) self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock self.connection.deny_access(None, share, access, share_server) expected_calls = [ mock.call(self.vdm.req_get()), mock.call(self.cifs_server.req_get(self.vdm.vdm_id)), ] xml_req_mock.assert_has_calls(expected_calls) ssh_calls = [ mock.call(self.cifs_share.cmd_change_access(action='revoke'), True), ] ssh_cmd_mock.assert_has_calls(ssh_calls) def test_deny_cifs_rw_access_with_ipv6(self): share_server = fakes.SHARE_SERVER_IPV6 share = fakes.CIFS_SHARE access = fakes.CIFS_RW_ACCESS hook = utils.RequestSideEffect() hook.append(self.vdm.resp_get_succeed( interface1=fakes.FakeData.interface_name3, interface2=fakes.FakeData.interface_name4)) hook.append(self.cifs_server.resp_get_succeed( mover_id=self.vdm.vdm_id, is_vdm=True, join_domain=True, ip_addr=fakes.FakeData.network_allocations_ip3)) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock ssh_hook = utils.SSHSideEffect() ssh_hook.append(self.cifs_share.output_allow_access()) ssh_cmd_mock = mock.Mock(side_effect=ssh_hook) self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock self.connection.deny_access(None, share, access, share_server) expected_calls = [ mock.call(self.vdm.req_get()), mock.call(self.cifs_server.req_get(self.vdm.vdm_id)), ] xml_req_mock.assert_has_calls(expected_calls) ssh_calls = [ mock.call(self.cifs_share.cmd_change_access(action='revoke'), True), ] ssh_cmd_mock.assert_has_calls(ssh_calls) def test_deny_cifs_ro_access(self): share_server = fakes.SHARE_SERVER share = fakes.CIFS_SHARE access = fakes.CIFS_RO_ACCESS hook = utils.RequestSideEffect() hook.append(self.vdm.resp_get_succeed()) hook.append(self.cifs_server.resp_get_succeed( mover_id=self.vdm.vdm_id, is_vdm=True, join_domain=True)) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock ssh_hook = utils.SSHSideEffect() ssh_hook.append(self.cifs_share.output_allow_access()) ssh_cmd_mock = mock.Mock(side_effect=ssh_hook) self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock self.connection.deny_access(None, share, access, share_server) expected_calls = [ mock.call(self.vdm.req_get()), mock.call(self.cifs_server.req_get(self.vdm.vdm_id)), ] xml_req_mock.assert_has_calls(expected_calls) ssh_calls = [ mock.call(self.cifs_share.cmd_change_access('ro', 'revoke'), True), ] ssh_cmd_mock.assert_has_calls(ssh_calls) def test_deny_cifs_ro_access_with_ipv6(self): share_server = fakes.SHARE_SERVER_IPV6 share = fakes.CIFS_SHARE access = fakes.CIFS_RO_ACCESS hook = utils.RequestSideEffect() hook.append(self.vdm.resp_get_succeed( interface1=fakes.FakeData.interface_name3, interface2=fakes.FakeData.interface_name4)) hook.append(self.cifs_server.resp_get_succeed( mover_id=self.vdm.vdm_id, is_vdm=True, join_domain=True, ip_addr=fakes.FakeData.network_allocations_ip3)) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock ssh_hook = utils.SSHSideEffect() ssh_hook.append(self.cifs_share.output_allow_access()) ssh_cmd_mock = mock.Mock(side_effect=ssh_hook) self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock self.connection.deny_access(None, share, access, share_server) expected_calls = [ mock.call(self.vdm.req_get()), mock.call(self.cifs_server.req_get(self.vdm.vdm_id)), ] xml_req_mock.assert_has_calls(expected_calls) ssh_calls = [ mock.call(self.cifs_share.cmd_change_access('ro', 'revoke'), True), ] ssh_cmd_mock.assert_has_calls(ssh_calls) def test_deny_cifs_access_with_invliad_share_server_name(self): share_server = fakes.SHARE_SERVER share = fakes.CIFS_SHARE access = fakes.CIFS_RW_ACCESS hook = utils.RequestSideEffect() hook.append(self.vdm.resp_get_succeed()) hook.append(self.cifs_server.resp_get_error()) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock self.assertRaises(exception.EMCVnxXMLAPIError, self.connection.deny_access, None, share, access, share_server) expected_calls = [ mock.call(self.vdm.req_get()), mock.call(self.cifs_server.req_get(self.vdm.vdm_id)), ] xml_req_mock.assert_has_calls(expected_calls) def test_deny_nfs_access(self): share_server = fakes.SHARE_SERVER share = fakes.NFS_SHARE access = fakes.NFS_RW_ACCESS rw_hosts = copy.deepcopy(fakes.FakeData.rw_hosts) rw_hosts.append(access['access_to']) ssh_hook = utils.SSHSideEffect() ssh_hook.append(self.nfs_share.output_get_succeed( rw_hosts=rw_hosts, ro_hosts=fakes.FakeData.ro_hosts)) ssh_hook.append(self.nfs_share.output_set_access_success()) ssh_hook.append(self.nfs_share.output_get_succeed( rw_hosts=fakes.FakeData.rw_hosts, ro_hosts=fakes.FakeData.ro_hosts)) ssh_cmd_mock = utils.EMCNFSShareMock(side_effect=ssh_hook) self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock self.connection.deny_access(None, share, access, share_server) ssh_calls = [ mock.call(self.nfs_share.cmd_get(), True), mock.call(self.nfs_share.cmd_set_access( rw_hosts=self.nfs_share.rw_hosts, ro_hosts=self.nfs_share.ro_hosts), True), mock.call(self.nfs_share.cmd_get(), True), ] ssh_cmd_mock.assert_has_calls(ssh_calls) def test_deny_nfs_access_with_ipv6(self): share_server = fakes.SHARE_SERVER_IPV6 share = fakes.NFS_SHARE access = fakes.NFS_RW_ACCESS_IPV6 rw_hosts = copy.deepcopy(fakes.FakeData.rw_hosts_ipv6) rw_hosts.append(access['access_to']) ssh_hook = utils.SSHSideEffect() ssh_hook.append(self.nfs_share.output_get_succeed( rw_hosts=rw_hosts, ro_hosts=fakes.FakeData.ro_hosts_ipv6)) ssh_hook.append(self.nfs_share.output_set_access_success()) ssh_hook.append(self.nfs_share.output_get_succeed( rw_hosts=fakes.FakeData.rw_hosts_ipv6, ro_hosts=fakes.FakeData.ro_hosts_ipv6)) ssh_cmd_mock = utils.EMCNFSShareMock(side_effect=ssh_hook) self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock self.connection.deny_access(None, share, access, share_server) ssh_calls = [ mock.call(self.nfs_share.cmd_get(), True), mock.call(self.nfs_share.cmd_set_access( rw_hosts=self.nfs_share.rw_hosts_ipv6, ro_hosts=self.nfs_share.ro_hosts_ipv6), True), mock.call(self.nfs_share.cmd_get(), True), ] ssh_cmd_mock.assert_has_calls(ssh_calls) def test_deny_access_with_incorrect_proto(self): share_server = fakes.SHARE_SERVER share = fake_share.fake_share(share_proto='FAKE_PROTO') access = fakes.CIFS_RW_ACCESS self.assertRaises(exception.InvalidShare, self.connection.deny_access, None, share, access, share_server) def test_deny_cifs_access_with_incorrect_access_type(self): share_server = fakes.SHARE_SERVER share = fakes.CIFS_SHARE access = fake_share.fake_access(access_type='fake_type') self.assertRaises(exception.InvalidShareAccess, self.connection.deny_access, None, share, access, share_server) def test_deny_nfs_access_with_incorrect_access_type(self): share_server = fakes.SHARE_SERVER share = fakes.NFS_SHARE access = fake_share.fake_access(access_type='fake_type') self.assertRaises(exception.InvalidShareAccess, self.connection.deny_access, None, share, access, share_server) def test_update_share_stats(self): hook = utils.RequestSideEffect() hook.append(self.mover.resp_get_ref_succeed()) hook.append(self.pool.resp_get_succeed()) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock self.connection.update_share_stats(fakes.STATS) expected_calls = [ mock.call(self.mover.req_get_ref()), mock.call(self.pool.req_get()), ] xml_req_mock.assert_has_calls(expected_calls) for pool in fakes.STATS['pools']: if pool['pool_name'] == fakes.FakeData.pool_name: self.assertEqual( enas_utils.mb_to_gb(fakes.FakeData.pool_total_size), pool['total_capacity_gb']) free_size = (fakes.FakeData.pool_total_size - fakes.FakeData.pool_used_size) self.assertEqual(enas_utils.mb_to_gb(free_size), pool['free_capacity_gb']) def test_update_share_stats_without_matched_config_pools(self): self.connection.pools = set('fake_pool') hook = utils.RequestSideEffect() hook.append(self.mover.resp_get_ref_succeed()) hook.append(self.pool.resp_get_succeed()) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock self.assertRaises(exception.EMCVnxXMLAPIError, self.connection.update_share_stats, fakes.STATS) expected_calls = [ mock.call(self.mover.req_get_ref()), mock.call(self.pool.req_get()), ] xml_req_mock.assert_has_calls(expected_calls) def test_get_pool(self): share = fakes.CIFS_SHARE hook = utils.RequestSideEffect() hook.append(self.fs.resp_get_succeed()) hook.append(self.pool.resp_get_succeed()) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock pool_name = self.connection.get_pool(share) expected_calls = [ mock.call(self.fs.req_get()), mock.call(self.pool.req_get()), ] xml_req_mock.assert_has_calls(expected_calls) self.assertEqual(fakes.FakeData.pool_name, pool_name) def test_get_pool_failed_to_get_filesystem_info(self): share = fakes.CIFS_SHARE hook = utils.RequestSideEffect() hook.append(self.fs.resp_get_error()) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock self.assertRaises(exception.EMCVnxXMLAPIError, self.connection.get_pool, share) expected_calls = [mock.call(self.fs.req_get())] xml_req_mock.assert_has_calls(expected_calls) def test_get_pool_failed_to_get_pool_info(self): share = fakes.CIFS_SHARE hook = utils.RequestSideEffect() hook.append(self.fs.resp_get_succeed()) hook.append(self.pool.resp_get_error()) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock self.assertRaises(exception.EMCVnxXMLAPIError, self.connection.get_pool, share) expected_calls = [ mock.call(self.fs.req_get()), mock.call(self.pool.req_get()), ] xml_req_mock.assert_has_calls(expected_calls) def test_get_pool_failed_to_find_matched_pool_name(self): share = fakes.CIFS_SHARE hook = utils.RequestSideEffect() hook.append(self.fs.resp_get_succeed()) hook.append(self.pool.resp_get_succeed(name='unmatch_pool_name', id='unmatch_pool_id')) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock self.assertRaises(exception.EMCVnxXMLAPIError, self.connection.get_pool, share) expected_calls = [ mock.call(self.fs.req_get()), mock.call(self.pool.req_get()), ] xml_req_mock.assert_has_calls(expected_calls) @ddt.data({'port_conf': None, 'managed_ports': ['cge-1-0', 'cge-1-3']}, {'port_conf': '*', 'managed_ports': ['cge-1-0', 'cge-1-3']}, {'port_conf': ['cge-1-*'], 'managed_ports': ['cge-1-0', 'cge-1-3']}, {'port_conf': ['cge-1-3'], 'managed_ports': ['cge-1-3']}) @ddt.unpack def test_get_managed_ports_one_port(self, port_conf, managed_ports): hook = utils.SSHSideEffect() hook.append(self.mover.output_get_physical_devices()) ssh_cmd_mock = mock.Mock(side_effect=hook) expected_calls = [ mock.call(self.mover.cmd_get_physical_devices(), False), ] self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock self.connection.port_conf = port_conf ports = self.connection.get_managed_ports() self.assertIsInstance(ports, list) self.assertEqual(sorted(managed_ports), sorted(ports)) ssh_cmd_mock.assert_has_calls(expected_calls) def test_get_managed_ports_no_valid_port(self): hook = utils.SSHSideEffect() hook.append(self.mover.output_get_physical_devices()) ssh_cmd_mock = mock.Mock(side_effect=hook) self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock self.connection.port_conf = ['cge-2-0'] self.assertRaises(exception.BadConfigurationException, self.connection.get_managed_ports) def test_get_managed_ports_query_devices_failed(self): hook = utils.SSHSideEffect() hook.append(self.mover.fake_output) ssh_cmd_mock = mock.Mock(side_effect=hook) self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock self.connection.port_conf = ['cge-2-0'] self.assertRaises(exception.EMCVnxXMLAPIError, self.connection.get_managed_ports) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/dell_emc/plugins/powermax/test_object_manager.py0000664000175000017500000037560200000000000031716 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Dell Inc. or its subsidiaries. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from unittest import mock import ddt from lxml import builder from oslo_concurrency import processutils from manila import exception from manila.common import constants as const from manila.share.drivers.dell_emc.common.enas import connector from manila.share.drivers.dell_emc.common.enas import constants from manila.share.drivers.dell_emc.common.enas import xml_api_parser as parser from manila.share.drivers.dell_emc.plugins.powermax import ( object_manager as manager) from manila import test from manila.tests.share.drivers.dell_emc.common.enas import fakes from manila.tests.share.drivers.dell_emc.common.enas import utils class StorageObjectManagerTestCase(test.TestCase): @mock.patch.object(connector, "XMLAPIConnector", mock.Mock()) @mock.patch.object(connector, "SSHConnector", mock.Mock()) def setUp(self): super(StorageObjectManagerTestCase, self).setUp() emd_share_driver = fakes.FakeEMCShareDriver('powermax') self.manager = manager.StorageObjectManager( emd_share_driver.configuration) def test_get_storage_context(self): type_map = { 'FileSystem': manager.FileSystem, 'StoragePool': manager.StoragePool, 'MountPoint': manager.MountPoint, 'Mover': manager.Mover, 'VDM': manager.VDM, 'Snapshot': manager.Snapshot, 'MoverInterface': manager.MoverInterface, 'DNSDomain': manager.DNSDomain, 'CIFSServer': manager.CIFSServer, 'CIFSShare': manager.CIFSShare, 'NFSShare': manager.NFSShare, } for key, value in type_map.items(): self.assertTrue( isinstance(self.manager.getStorageContext(key), value)) for key in self.manager.context.keys(): self.assertIn(key, type_map) def test_get_storage_context_invalid_type(self): fake_type = 'fake_type' self.assertRaises(exception.EMCPowerMaxXMLAPIError, self.manager.getStorageContext, fake_type) class StorageObjectTestCaseBase(test.TestCase): @mock.patch.object(connector, "XMLAPIConnector", mock.Mock()) @mock.patch.object(connector, "SSHConnector", mock.Mock()) def setUp(self): super(StorageObjectTestCaseBase, self).setUp() emd_share_driver = fakes.FakeEMCShareDriver('powermax') self.manager = manager.StorageObjectManager( emd_share_driver.configuration) self.base = fakes.StorageObjectTestData() self.pool = fakes.PoolTestData() self.vdm = fakes.VDMTestData() self.mover = fakes.MoverTestData() self.fs = fakes.FileSystemTestData() self.mount = fakes.MountPointTestData() self.snap = fakes.SnapshotTestData() self.cifs_share = fakes.CIFSShareTestData() self.nfs_share = fakes.NFSShareTestData() self.cifs_server = fakes.CIFSServerTestData() self.dns = fakes.DNSDomainTestData() class StorageObjectTestCase(StorageObjectTestCaseBase): def test_xml_api_retry(self): hook = utils.RequestSideEffect() hook.append(self.base.resp_need_retry()) hook.append(self.base.resp_task_succeed()) elt_maker = builder.ElementMaker(nsmap={None: constants.XML_NAMESPACE}) xml_parser = parser.XMLAPIParser() storage_object = manager.StorageObject(self.manager.connectors, elt_maker, xml_parser, self.manager) storage_object.conn['XML'].request = utils.EMCMock(side_effect=hook) fake_req = storage_object._build_task_package( elt_maker.StartFake(name='foo') ) resp = storage_object._send_request(fake_req) self.assertEqual('ok', resp['maxSeverity']) expected_calls = [ mock.call(self.base.req_fake_start_task()), mock.call(self.base.req_fake_start_task()) ] storage_object.conn['XML'].request.assert_has_calls(expected_calls) class FileSystemTestCase(StorageObjectTestCaseBase): def setUp(self): super(FileSystemTestCase, self).setUp() self.hook = utils.RequestSideEffect() self.ssh_hook = utils.SSHSideEffect() def test_create_file_system_on_vdm(self): self.hook.append(self.pool.resp_get_succeed()) self.hook.append(self.vdm.resp_get_succeed()) self.hook.append(self.fs.resp_task_succeed()) context = self.manager.getStorageContext('FileSystem') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) context.create(name=self.fs.filesystem_name, size=self.fs.filesystem_size, pool_name=self.pool.pool_name, mover_name=self.vdm.vdm_name, is_vdm=True) expected_calls = [ mock.call(self.pool.req_get()), mock.call(self.vdm.req_get()), mock.call(self.fs.req_create_on_vdm()), ] context.conn['XML'].request.assert_has_calls(expected_calls) def test_create_file_system_on_mover(self): self.hook.append(self.pool.resp_get_succeed()) self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.fs.resp_task_succeed()) context = self.manager.getStorageContext('FileSystem') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) context.create(name=self.fs.filesystem_name, size=self.fs.filesystem_size, pool_name=self.pool.pool_name, mover_name=self.mover.mover_name, is_vdm=False) expected_calls = [ mock.call(self.pool.req_get()), mock.call(self.mover.req_get_ref()), mock.call(self.fs.req_create_on_mover()), ] context.conn['XML'].request.assert_has_calls(expected_calls) def test_create_file_system_but_already_exist(self): self.hook.append(self.pool.resp_get_succeed()) self.hook.append(self.vdm.resp_get_succeed()) self.hook.append(self.fs.resp_create_but_already_exist()) context = self.manager.getStorageContext('FileSystem') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) context.create(name=self.fs.filesystem_name, size=self.fs.filesystem_size, pool_name=self.pool.pool_name, mover_name=self.vdm.vdm_name, is_vdm=True) expected_calls = [ mock.call(self.pool.req_get()), mock.call(self.vdm.req_get()), mock.call(self.fs.req_create_on_vdm()), ] context.conn['XML'].request.assert_has_calls(expected_calls) @mock.patch('time.sleep') def test_create_file_system_invalid_mover_id(self, sleep_mock): self.hook.append(self.pool.resp_get_succeed()) self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.fs.resp_invalid_mover_id()) self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.fs.resp_task_succeed()) context = self.manager.getStorageContext('FileSystem') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) context.create(name=self.fs.filesystem_name, size=self.fs.filesystem_size, pool_name=self.pool.pool_name, mover_name=self.mover.mover_name, is_vdm=False) expected_calls = [ mock.call(self.pool.req_get()), mock.call(self.mover.req_get_ref()), mock.call(self.fs.req_create_on_mover()), mock.call(self.mover.req_get_ref()), mock.call(self.fs.req_create_on_mover()), ] context.conn['XML'].request.assert_has_calls(expected_calls) self.assertTrue(sleep_mock.called) def test_create_file_system_with_error(self): self.hook.append(self.pool.resp_get_succeed()) self.hook.append(self.vdm.resp_get_succeed()) self.hook.append(self.fs.resp_task_error()) context = self.manager.getStorageContext('FileSystem') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) self.assertRaises(exception.EMCPowerMaxXMLAPIError, context.create, name=self.fs.filesystem_name, size=self.fs.filesystem_size, pool_name=self.pool.pool_name, mover_name=self.vdm.vdm_name, is_vdm=True) expected_calls = [ mock.call(self.pool.req_get()), mock.call(self.vdm.req_get()), mock.call(self.fs.req_create_on_vdm()), ] context.conn['XML'].request.assert_has_calls(expected_calls) def test_get_file_system(self): self.hook.append(self.fs.resp_get_succeed()) context = self.manager.getStorageContext('FileSystem') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) status, out = context.get(self.fs.filesystem_name) self.assertEqual(constants.STATUS_OK, status) self.assertIn(self.fs.filesystem_name, context.filesystem_map) property_map = [ 'name', 'pools_id', 'volume_id', 'size', 'id', 'type', 'dataServicePolicies', ] for prop in property_map: self.assertIn(prop, out) id = context.get_id(self.fs.filesystem_name) self.assertEqual(self.fs.filesystem_id, id) expected_calls = [mock.call(self.fs.req_get())] context.conn['XML'].request.assert_has_calls(expected_calls) def test_get_file_system_but_not_found(self): self.hook.append(self.fs.resp_get_but_not_found()) self.hook.append(self.fs.resp_get_without_value()) self.hook.append(self.fs.resp_get_error()) self.hook.append(self.fs.resp_get_but_not_found()) context = self.manager.getStorageContext('FileSystem') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) status, out = context.get(self.fs.filesystem_name) self.assertEqual(constants.STATUS_NOT_FOUND, status) status, out = context.get(self.fs.filesystem_name) self.assertEqual(constants.STATUS_NOT_FOUND, status) status, out = context.get(self.fs.filesystem_name) self.assertEqual(constants.STATUS_ERROR, status) self.assertRaises(exception.EMCPowerMaxXMLAPIError, context.get_id, self.fs.filesystem_name) expected_calls = [ mock.call(self.fs.req_get()), mock.call(self.fs.req_get()), mock.call(self.fs.req_get()), mock.call(self.fs.req_get()), ] context.conn['XML'].request.assert_has_calls(expected_calls) def test_get_file_system_but_miss_property(self): self.hook.append(self.fs.resp_get_but_miss_property()) context = self.manager.getStorageContext('FileSystem') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) status, out = context.get(self.fs.filesystem_name) self.assertEqual(constants.STATUS_OK, status) self.assertIn(self.fs.filesystem_name, context.filesystem_map) property_map = [ 'name', 'pools_id', 'volume_id', 'size', 'id', 'type', 'dataServicePolicies', ] for prop in property_map: self.assertIn(prop, out) self.assertIsNone(out['dataServicePolicies']) id = context.get_id(self.fs.filesystem_name) self.assertEqual(self.fs.filesystem_id, id) expected_calls = [mock.call(self.fs.req_get())] context.conn['XML'].request.assert_has_calls(expected_calls) def test_delete_file_system(self): self.hook.append(self.fs.resp_get_succeed()) self.hook.append(self.fs.resp_task_succeed()) context = self.manager.getStorageContext('FileSystem') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) context.delete(self.fs.filesystem_name) self.assertNotIn(self.fs.filesystem_name, context.filesystem_map) expected_calls = [ mock.call(self.fs.req_get()), mock.call(self.fs.req_delete()), ] context.conn['XML'].request.assert_has_calls(expected_calls) self.assertNotIn(self.fs.filesystem_name, context.filesystem_map) def test_delete_file_system_but_not_found(self): self.hook.append(self.fs.resp_get_but_not_found()) context = self.manager.getStorageContext('FileSystem') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) context.delete(self.fs.filesystem_name) expected_calls = [mock.call(self.fs.req_get())] context.conn['XML'].request.assert_has_calls(expected_calls) def test_delete_file_system_but_get_file_system_error(self): self.hook.append(self.fs.resp_get_error()) context = self.manager.getStorageContext('FileSystem') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) self.assertRaises(exception.EMCPowerMaxXMLAPIError, context.delete, self.fs.filesystem_name) expected_calls = [mock.call(self.fs.req_get())] context.conn['XML'].request.assert_has_calls(expected_calls) def test_delete_file_system_with_error(self): self.hook.append(self.fs.resp_get_succeed()) self.hook.append(self.fs.resp_delete_but_failed()) context = self.manager.getStorageContext('FileSystem') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) self.assertRaises(exception.EMCPowerMaxXMLAPIError, context.delete, self.fs.filesystem_name) expected_calls = [ mock.call(self.fs.req_get()), mock.call(self.fs.req_delete()), ] context.conn['XML'].request.assert_has_calls(expected_calls) self.assertIn(self.fs.filesystem_name, context.filesystem_map) def test_extend_file_system(self): self.hook.append(self.fs.resp_get_succeed()) self.hook.append(self.pool.resp_get_succeed()) self.hook.append(self.fs.resp_task_succeed()) context = self.manager.getStorageContext('FileSystem') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) context.extend(name=self.fs.filesystem_name, pool_name=self.pool.pool_name, new_size=self.fs.filesystem_new_size) expected_calls = [ mock.call(self.fs.req_get()), mock.call(self.pool.req_get()), mock.call(self.fs.req_extend()), ] context.conn['XML'].request.assert_has_calls(expected_calls) def test_extend_file_system_but_not_found(self): self.hook.append(self.fs.resp_get_but_not_found()) context = self.manager.getStorageContext('FileSystem') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) self.assertRaises(exception.EMCPowerMaxXMLAPIError, context.extend, name=self.fs.filesystem_name, pool_name=self.fs.pool_name, new_size=self.fs.filesystem_new_size) expected_calls = [mock.call(self.fs.req_get())] context.conn['XML'].request.assert_has_calls(expected_calls) def test_extend_file_system_with_small_size(self): self.hook.append(self.fs.resp_get_succeed()) context = self.manager.getStorageContext('FileSystem') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) self.assertRaises(exception.EMCPowerMaxXMLAPIError, context.extend, name=self.fs.filesystem_name, pool_name=self.pool.pool_name, new_size=1) expected_calls = [mock.call(self.fs.req_get())] context.conn['XML'].request.assert_has_calls(expected_calls) def test_extend_file_system_with_same_size(self): self.hook.append(self.fs.resp_get_succeed()) context = self.manager.getStorageContext('FileSystem') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) context.extend(name=self.fs.filesystem_name, pool_name=self.pool.pool_name, new_size=self.fs.filesystem_size) expected_calls = [mock.call(self.fs.req_get())] context.conn['XML'].request.assert_has_calls(expected_calls) def test_extend_file_system_with_error(self): self.hook.append(self.fs.resp_get_succeed()) self.hook.append(self.pool.resp_get_succeed()) self.hook.append(self.fs.resp_extend_but_error()) context = self.manager.getStorageContext('FileSystem') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) self.assertRaises(exception.EMCPowerMaxXMLAPIError, context.extend, name=self.fs.filesystem_name, pool_name=self.pool.pool_name, new_size=self.fs.filesystem_new_size) expected_calls = [ mock.call(self.fs.req_get()), mock.call(self.pool.req_get()), mock.call(self.fs.req_extend()), ] context.conn['XML'].request.assert_has_calls(expected_calls) def test_create_filesystem_from_snapshot(self): self.ssh_hook.append() self.ssh_hook.append() self.ssh_hook.append(self.fs.output_copy_ckpt) self.ssh_hook.append(self.fs.output_info()) self.ssh_hook.append() self.ssh_hook.append() self.ssh_hook.append() context = self.manager.getStorageContext('FileSystem') context.conn['SSH'].run_ssh = mock.Mock(side_effect=self.ssh_hook) context.create_from_snapshot(self.fs.filesystem_name, self.snap.src_snap_name, self.fs.src_fileystems_name, self.pool.pool_name, self.vdm.vdm_name, self.mover.interconnect_id) ssh_calls = [ mock.call(self.fs.cmd_create_from_ckpt(), False), mock.call(self.mount.cmd_server_mount('ro'), False), mock.call(self.fs.cmd_copy_ckpt(), True), mock.call(self.fs.cmd_nas_fs_info(), False), mock.call(self.mount.cmd_server_umount(), False), mock.call(self.fs.cmd_delete(), False), mock.call(self.mount.cmd_server_mount('rw'), False), ] context.conn['SSH'].run_ssh.assert_has_calls(ssh_calls) def test_create_filesystem_from_snapshot_with_error(self): self.ssh_hook.append() self.ssh_hook.append() self.ssh_hook.append(ex=processutils.ProcessExecutionError( stdout=self.fs.fake_output, stderr=None)) self.ssh_hook.append(self.fs.output_info()) self.ssh_hook.append() self.ssh_hook.append() self.ssh_hook.append() context = self.manager.getStorageContext('FileSystem') context.conn['SSH'].run_ssh = mock.Mock(side_effect=self.ssh_hook) context.create_from_snapshot( self.fs.filesystem_name, self.snap.src_snap_name, self.fs.src_fileystems_name, self.pool.pool_name, self.vdm.vdm_name, self.mover.interconnect_id, ) ssh_calls = [ mock.call(self.fs.cmd_create_from_ckpt(), False), mock.call(self.mount.cmd_server_mount('ro'), False), mock.call(self.fs.cmd_copy_ckpt(), True), mock.call(self.fs.cmd_nas_fs_info(), False), mock.call(self.mount.cmd_server_umount(), False), mock.call(self.fs.cmd_delete(), False), mock.call(self.mount.cmd_server_mount('rw'), False), ] context.conn['SSH'].run_ssh.assert_has_calls(ssh_calls) class MountPointTestCase(StorageObjectTestCaseBase): def setUp(self): super(MountPointTestCase, self).setUp() self.hook = utils.RequestSideEffect() def test_create_mount_point_on_vdm(self): self.hook.append(self.fs.resp_get_succeed()) self.hook.append(self.vdm.resp_get_succeed()) self.hook.append(self.mount.resp_task_succeed()) context = self.manager.getStorageContext('MountPoint') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) context.create(mount_path=self.mount.path, fs_name=self.fs.filesystem_name, mover_name=self.vdm.vdm_name, is_vdm=True) expected_calls = [ mock.call(self.fs.req_get()), mock.call(self.vdm.req_get()), mock.call(self.mount.req_create(self.vdm.vdm_id, True)), ] context.conn['XML'].request.assert_has_calls(expected_calls) def test_create_mount_point_on_mover(self): self.hook.append(self.fs.resp_get_succeed()) self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.mount.resp_task_succeed()) context = self.manager.getStorageContext('MountPoint') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) context.create(mount_path=self.mount.path, fs_name=self.fs.filesystem_name, mover_name=self.mover.mover_name, is_vdm=False) expected_calls = [ mock.call(self.fs.req_get()), mock.call(self.mover.req_get_ref()), mock.call(self.mount.req_create(self.mover.mover_id, False)), ] context.conn['XML'].request.assert_has_calls(expected_calls) def test_create_mount_point_but_already_exist(self): self.hook.append(self.fs.resp_get_succeed()) self.hook.append(self.vdm.resp_get_succeed()) self.hook.append(self.mount.resp_create_but_already_exist()) context = self.manager.getStorageContext('MountPoint') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) context.create(mount_path=self.mount.path, fs_name=self.fs.filesystem_name, mover_name=self.vdm.vdm_name, is_vdm=True) expected_calls = [ mock.call(self.fs.req_get()), mock.call(self.vdm.req_get()), mock.call(self.mount.req_create(self.vdm.vdm_id)), ] context.conn['XML'].request.assert_has_calls(expected_calls) @mock.patch('time.sleep') def test_create_mount_point_invalid_mover_id(self, sleep_mock): self.hook.append(self.fs.resp_get_succeed()) self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.mount.resp_invalid_mover_id()) self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.mount.resp_task_succeed()) context = self.manager.getStorageContext('MountPoint') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) context.create(mount_path=self.mount.path, fs_name=self.fs.filesystem_name, mover_name=self.mover.mover_name, is_vdm=False) expected_calls = [ mock.call(self.fs.req_get()), mock.call(self.mover.req_get_ref()), mock.call(self.mount.req_create(self.mover.mover_id, False)), mock.call(self.mover.req_get_ref()), mock.call(self.mount.req_create(self.mover.mover_id, False)), ] context.conn['XML'].request.assert_has_calls(expected_calls) self.assertTrue(sleep_mock.called) def test_create_mount_point_with_error(self): self.hook.append(self.fs.resp_get_succeed()) self.hook.append(self.vdm.resp_get_succeed()) self.hook.append(self.mount.resp_task_error()) context = self.manager.getStorageContext('MountPoint') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) self.assertRaises(exception.EMCPowerMaxXMLAPIError, context.create, mount_path=self.mount.path, fs_name=self.fs.filesystem_name, mover_name=self.vdm.vdm_name, is_vdm=True) expected_calls = [ mock.call(self.fs.req_get()), mock.call(self.vdm.req_get()), mock.call(self.mount.req_create(self.vdm.vdm_id)), ] context.conn['XML'].request.assert_has_calls(expected_calls) def test_delete_mount_point_on_vdm(self): self.hook.append(self.vdm.resp_get_succeed()) self.hook.append(self.mount.resp_task_succeed()) context = self.manager.getStorageContext('MountPoint') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) context.delete(mount_path=self.mount.path, mover_name=self.vdm.vdm_name, is_vdm=True) expected_calls = [ mock.call(self.vdm.req_get()), mock.call(self.mount.req_delete(self.vdm.vdm_id)), ] context.conn['XML'].request.assert_has_calls(expected_calls) def test_delete_mount_point_on_mover(self): self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.mount.resp_task_succeed()) context = self.manager.getStorageContext('MountPoint') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) context.delete(mount_path=self.mount.path, mover_name=self.mover.mover_name, is_vdm=False) expected_calls = [ mock.call(self.mover.req_get_ref()), mock.call(self.mount.req_delete(self.mover.mover_id, False)), ] context.conn['XML'].request.assert_has_calls(expected_calls) def test_delete_mount_point_but_nonexistent(self): self.hook.append(self.vdm.resp_get_succeed()) self.hook.append(self.mount.resp_delete_but_nonexistent()) context = self.manager.getStorageContext('MountPoint') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) context.delete(mount_path=self.mount.path, mover_name=self.vdm.vdm_name, is_vdm=True) expected_calls = [ mock.call(self.vdm.req_get()), mock.call(self.mount.req_delete(self.vdm.vdm_id)), ] context.conn['XML'].request.assert_has_calls(expected_calls) @mock.patch('time.sleep') def test_delete_mount_point_invalid_mover_id(self, sleep_mock): self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.mount.resp_invalid_mover_id()) self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.mount.resp_task_succeed()) context = self.manager.getStorageContext('MountPoint') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) context.delete(mount_path=self.mount.path, mover_name=self.mover.mover_name, is_vdm=False) expected_calls = [ mock.call(self.mover.req_get_ref()), mock.call(self.mount.req_delete(self.mover.mover_id, False)), mock.call(self.mover.req_get_ref()), mock.call(self.mount.req_delete(self.mover.mover_id, False)), ] context.conn['XML'].request.assert_has_calls(expected_calls) self.assertTrue(sleep_mock.called) def test_delete_mount_point_with_error(self): self.hook.append(self.vdm.resp_get_succeed()) self.hook.append(self.mount.resp_task_error()) context = self.manager.getStorageContext('MountPoint') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) self.assertRaises(exception.EMCPowerMaxXMLAPIError, context.delete, mount_path=self.mount.path, mover_name=self.vdm.vdm_name, is_vdm=True) expected_calls = [ mock.call(self.vdm.req_get()), mock.call(self.mount.req_delete(self.vdm.vdm_id)), ] context.conn['XML'].request.assert_has_calls(expected_calls) def test_get_mount_points(self): self.hook.append(self.vdm.resp_get_succeed()) self.hook.append(self.mount.resp_get_succeed(self.vdm.vdm_id)) self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.mount.resp_get_succeed(self.mover.mover_id, False)) context = self.manager.getStorageContext('MountPoint') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) status, out = context.get(self.vdm.vdm_name) self.assertEqual(constants.STATUS_OK, status) property_map = [ 'path', 'mover', 'moverIdIsVdm', 'fileSystem', ] for item in out: for prop in property_map: self.assertIn(prop, item) status, out = context.get(self.mover.mover_name, False) self.assertEqual(constants.STATUS_OK, status) property_map = [ 'path', 'mover', 'moverIdIsVdm', 'fileSystem', ] for item in out: for prop in property_map: self.assertIn(prop, item) expected_calls = [ mock.call(self.vdm.req_get()), mock.call(self.mount.req_get(self.vdm.vdm_id)), mock.call(self.mover.req_get_ref()), mock.call(self.mount.req_get(self.mover.mover_id, False)), ] context.conn['XML'].request.assert_has_calls(expected_calls) def test_get_mount_points_but_not_found(self): self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.mount.resp_get_without_value()) context = self.manager.getStorageContext('MountPoint') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) status, out = context.get(self.mover.mover_name, False) self.assertEqual(constants.STATUS_NOT_FOUND, status) expected_calls = [ mock.call(self.mover.req_get_ref()), mock.call(self.mount.req_get(self.mover.mover_id, False)), ] context.conn['XML'].request.assert_has_calls(expected_calls) @mock.patch('time.sleep') def test_get_mount_points_invalid_mover_id(self, sleep_mock): self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.mount.resp_invalid_mover_id()) self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.mount.resp_get_succeed(self.mover.mover_id, False)) context = self.manager.getStorageContext('MountPoint') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) status, out = context.get(self.mover.mover_name, False) self.assertEqual(constants.STATUS_OK, status) property_map = [ 'path', 'mover', 'moverIdIsVdm', 'fileSystem', ] for item in out: for prop in property_map: self.assertIn(prop, item) expected_calls = [ mock.call(self.mover.req_get_ref()), mock.call(self.mount.req_get(self.mover.mover_id, False)), mock.call(self.mover.req_get_ref()), mock.call(self.mount.req_get(self.mover.mover_id, False)), ] context.conn['XML'].request.assert_has_calls(expected_calls) self.assertTrue(sleep_mock.called) def test_get_mount_points_with_error(self): self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.mount.resp_get_error()) context = self.manager.getStorageContext('MountPoint') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) status, out = context.get(self.mover.mover_name, False) self.assertEqual(constants.STATUS_ERROR, status) expected_calls = [ mock.call(self.mover.req_get_ref()), mock.call(self.mount.req_get(self.mover.mover_id, False)), ] context.conn['XML'].request.assert_has_calls(expected_calls) class VDMTestCase(StorageObjectTestCaseBase): def setUp(self): super(VDMTestCase, self).setUp() self.hook = utils.RequestSideEffect() self.ssh_hook = utils.SSHSideEffect() def test_create_vdm(self): self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.vdm.resp_task_succeed()) context = self.manager.getStorageContext('VDM') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) context.create(self.vdm.vdm_name, self.mover.mover_name) expected_calls = [ mock.call(self.mover.req_get_ref()), mock.call(self.vdm.req_create()), ] context.conn['XML'].request.assert_has_calls(expected_calls) def test_create_vdm_but_already_exist(self): self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.vdm.resp_create_but_already_exist()) context = self.manager.getStorageContext('VDM') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) # Create VDM which already exists. context.create(self.vdm.vdm_name, self.mover.mover_name) expected_calls = [ mock.call(self.mover.req_get_ref()), mock.call(self.vdm.req_create()), ] context.conn['XML'].request.assert_has_calls(expected_calls) @mock.patch('time.sleep') def test_create_vdm_invalid_mover_id(self, sleep_mock): self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.vdm.resp_invalid_mover_id()) self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.vdm.resp_task_succeed()) context = self.manager.getStorageContext('VDM') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) # Create VDM with invalid mover ID context.create(self.vdm.vdm_name, self.mover.mover_name) expected_calls = [ mock.call(self.mover.req_get_ref()), mock.call(self.vdm.req_create()), mock.call(self.mover.req_get_ref()), mock.call(self.vdm.req_create()), ] context.conn['XML'].request.assert_has_calls(expected_calls) self.assertTrue(sleep_mock.called) def test_create_vdm_with_error(self): self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.vdm.resp_task_error()) context = self.manager.getStorageContext('VDM') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) # Create VDM with invalid mover ID self.assertRaises(exception.EMCPowerMaxXMLAPIError, context.create, name=self.vdm.vdm_name, mover_name=self.mover.mover_name) expected_calls = [ mock.call(self.mover.req_get_ref()), mock.call(self.vdm.req_create()), ] context.conn['XML'].request.assert_has_calls(expected_calls) def test_get_vdm(self): self.hook.append(self.vdm.resp_get_succeed()) context = self.manager.getStorageContext('VDM') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) status, out = context.get(self.vdm.vdm_name) self.assertEqual(constants.STATUS_OK, status) self.assertIn(self.vdm.vdm_name, context.vdm_map) property_map = [ 'name', 'id', 'state', 'host_mover_id', 'interfaces', ] for prop in property_map: self.assertIn(prop, out) expected_calls = [mock.call(self.vdm.req_get())] context.conn['XML'].request.assert_has_calls(expected_calls) def test_get_vdm_with_error(self): self.hook.append(self.vdm.resp_get_error()) context = self.manager.getStorageContext('VDM') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) # Get VDM with error status, out = context.get(self.vdm.vdm_name) self.assertEqual(constants.STATUS_ERROR, status) expected_calls = [mock.call(self.vdm.req_get())] context.conn['XML'].request.assert_has_calls(expected_calls) def test_get_vdm_but_not_found(self): self.hook.append(self.vdm.resp_get_without_value()) self.hook.append(self.vdm.resp_get_succeed('fake')) context = self.manager.getStorageContext('VDM') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) # Get VDM which does not exist status, out = context.get(self.vdm.vdm_name) self.assertEqual(constants.STATUS_NOT_FOUND, status) status, out = context.get(self.vdm.vdm_name) self.assertEqual(constants.STATUS_NOT_FOUND, status) expected_calls = [ mock.call(self.vdm.req_get()), mock.call(self.vdm.req_get()), ] context.conn['XML'].request.assert_has_calls(expected_calls) def test_get_vdm_id_with_error(self): self.hook.append(self.vdm.resp_get_error()) context = self.manager.getStorageContext('VDM') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) self.assertRaises(exception.EMCPowerMaxXMLAPIError, context.get_id, self.vdm.vdm_name) expected_calls = [mock.call(self.vdm.req_get())] context.conn['XML'].request.assert_has_calls(expected_calls) def test_delete_vdm(self): self.hook.append(self.vdm.resp_get_succeed()) self.hook.append(self.vdm.resp_task_succeed()) context = self.manager.getStorageContext('VDM') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) context.delete(self.vdm.vdm_name) expected_calls = [ mock.call(self.vdm.req_get()), mock.call(self.vdm.req_delete()), ] context.conn['XML'].request.assert_has_calls(expected_calls) def test_delete_vdm_but_not_found(self): self.hook.append(self.vdm.resp_get_but_not_found()) context = self.manager.getStorageContext('VDM') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) context.delete(self.vdm.vdm_name) expected_calls = [mock.call(self.vdm.req_get())] context.conn['XML'].request.assert_has_calls(expected_calls) def test_delete_vdm_but_failed_to_get_vdm(self): self.hook.append(self.vdm.resp_get_error()) context = self.manager.getStorageContext('VDM') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) self.assertRaises(exception.EMCPowerMaxXMLAPIError, context.delete, self.vdm.vdm_name) expected_calls = [mock.call(self.vdm.req_get())] context.conn['XML'].request.assert_has_calls(expected_calls) def test_delete_vdm_with_error(self): self.hook.append(self.vdm.resp_get_succeed()) self.hook.append(self.vdm.resp_task_error()) context = self.manager.getStorageContext('VDM') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) self.assertRaises(exception.EMCPowerMaxXMLAPIError, context.delete, self.vdm.vdm_name) expected_calls = [ mock.call(self.vdm.req_get()), mock.call(self.vdm.req_delete()), ] context.conn['XML'].request.assert_has_calls(expected_calls) def test_attach_detach_nfs_interface(self): self.ssh_hook.append() self.ssh_hook.append() context = self.manager.getStorageContext('VDM') context.conn['SSH'].run_ssh = mock.Mock(side_effect=self.ssh_hook) context.attach_nfs_interface(self.vdm.vdm_name, self.mover.interface_name2) context.detach_nfs_interface(self.vdm.vdm_name, self.mover.interface_name2) ssh_calls = [ mock.call(self.vdm.cmd_attach_nfs_interface(), False), mock.call(self.vdm.cmd_detach_nfs_interface(), True), ] context.conn['SSH'].run_ssh.assert_has_calls(ssh_calls) def test_detach_nfs_interface_with_error(self): self.ssh_hook.append(ex=processutils.ProcessExecutionError( stdout=self.vdm.fake_output)) self.ssh_hook.append(self.vdm.output_get_interfaces_vdm( self.mover.interface_name2)) self.ssh_hook.append(ex=processutils.ProcessExecutionError( stdout=self.vdm.fake_output)) self.ssh_hook.append(self.vdm.output_get_interfaces_vdm( nfs_interface=fakes.FakeData.interface_name1)) context = self.manager.getStorageContext('VDM') context.conn['SSH'].run_ssh = mock.Mock(side_effect=self.ssh_hook) self.assertRaises(exception.EMCPowerMaxXMLAPIError, context.detach_nfs_interface, self.vdm.vdm_name, self.mover.interface_name2) context.detach_nfs_interface(self.vdm.vdm_name, self.mover.interface_name2) ssh_calls = [ mock.call(self.vdm.cmd_detach_nfs_interface(), True), mock.call(self.vdm.cmd_get_interfaces(), False), mock.call(self.vdm.cmd_detach_nfs_interface(), True), mock.call(self.vdm.cmd_get_interfaces(), False), ] context.conn['SSH'].run_ssh.assert_has_calls(ssh_calls) def test_get_cifs_nfs_interface(self): self.ssh_hook.append(self.vdm.output_get_interfaces_vdm()) context = self.manager.getStorageContext('VDM') context.conn['SSH'].run_ssh = mock.Mock(side_effect=self.ssh_hook) interfaces = context.get_interfaces(self.vdm.vdm_name) self.assertIsNotNone(interfaces['cifs']) self.assertIsNotNone(interfaces['nfs']) ssh_calls = [mock.call(self.vdm.cmd_get_interfaces(), False)] context.conn['SSH'].run_ssh.assert_has_calls(ssh_calls) class StoragePoolTestCase(StorageObjectTestCaseBase): def setUp(self): super(StoragePoolTestCase, self).setUp() self.hook = utils.RequestSideEffect() def test_get_pool(self): self.hook.append(self.pool.resp_get_succeed()) context = self.manager.getStorageContext('StoragePool') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) status, out = context.get(self.pool.pool_name) self.assertEqual(constants.STATUS_OK, status) self.assertIn(self.pool.pool_name, context.pool_map) property_map = [ 'name', 'movers_id', 'total_size', 'used_size', 'diskType', 'dataServicePolicies', 'id', ] for prop in property_map: self.assertIn(prop, out) expected_calls = [mock.call(self.pool.req_get())] context.conn['XML'].request.assert_has_calls(expected_calls) def test_get_pool_with_error(self): self.hook.append(self.pool.resp_get_error()) self.hook.append(self.pool.resp_get_without_value()) self.hook.append(self.pool.resp_get_succeed(name='other')) context = self.manager.getStorageContext('StoragePool') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) status, out = context.get(self.pool.pool_name) self.assertEqual(constants.STATUS_ERROR, status) status, out = context.get(self.pool.pool_name) self.assertEqual(constants.STATUS_NOT_FOUND, status) status, out = context.get(self.pool.pool_name) self.assertEqual(constants.STATUS_NOT_FOUND, status) expected_calls = [ mock.call(self.pool.req_get()), mock.call(self.pool.req_get()), mock.call(self.pool.req_get()), ] context.conn['XML'].request.assert_has_calls(expected_calls) def test_get_pool_id_with_error(self): self.hook.append(self.pool.resp_get_error()) context = self.manager.getStorageContext('StoragePool') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) self.assertRaises(exception.EMCPowerMaxXMLAPIError, context.get_id, self.pool.pool_name) expected_calls = [mock.call(self.pool.req_get())] context.conn['XML'].request.assert_has_calls(expected_calls) class MoverTestCase(StorageObjectTestCaseBase): def setUp(self): super(MoverTestCase, self).setUp() self.hook = utils.RequestSideEffect() self.ssh_hook = utils.SSHSideEffect() def test_get_mover(self): self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.mover.resp_get_succeed()) self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.mover.resp_get_succeed()) context = self.manager.getStorageContext('Mover') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) status, out = context.get(self.mover.mover_name) self.assertEqual(constants.STATUS_OK, status) self.assertIn(self.mover.mover_name, context.mover_map) property_map = [ 'name', 'id', 'Status', 'version', 'uptime', 'role', 'interfaces', 'devices', 'dns_domain', ] for prop in property_map: self.assertIn(prop, out) status, out = context.get(self.mover.mover_name) self.assertEqual(constants.STATUS_OK, status) status, out = context.get(self.mover.mover_name, True) self.assertEqual(constants.STATUS_OK, status) expected_calls = [ mock.call(self.mover.req_get_ref()), mock.call(self.mover.req_get()), mock.call(self.mover.req_get_ref()), mock.call(self.mover.req_get()), ] context.conn['XML'].request.assert_has_calls(expected_calls) def test_get_mover_ref_not_found(self): self.hook.append(self.mover.resp_get_ref_succeed(name='other')) context = self.manager.getStorageContext('Mover') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) status, out = context.get_ref(self.mover.mover_name) self.assertEqual(constants.STATUS_NOT_FOUND, status) expected_calls = [mock.call(self.mover.req_get_ref())] context.conn['XML'].request.assert_has_calls(expected_calls) def test_get_mover_ref_with_error(self): self.hook.append(self.mover.resp_get_error()) context = self.manager.getStorageContext('Mover') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) status, out = context.get_ref(self.mover.mover_name) self.assertEqual(constants.STATUS_ERROR, status) expected_calls = [mock.call(self.mover.req_get_ref())] context.conn['XML'].request.assert_has_calls(expected_calls) def test_get_mover_ref_and_mover(self): self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.mover.resp_get_succeed()) context = self.manager.getStorageContext('Mover') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) status, out = context.get_ref(self.mover.mover_name) self.assertEqual(constants.STATUS_OK, status) property_map = ['name', 'id'] for prop in property_map: self.assertIn(prop, out) status, out = context.get(self.mover.mover_name) self.assertEqual(constants.STATUS_OK, status) self.assertIn(self.mover.mover_name, context.mover_map) property_map = [ 'name', 'id', 'Status', 'version', 'uptime', 'role', 'interfaces', 'devices', 'dns_domain', ] for prop in property_map: self.assertIn(prop, out) expected_calls = [ mock.call(self.mover.req_get_ref()), mock.call(self.mover.req_get()), ] context.conn['XML'].request.assert_has_calls(expected_calls) def test_get_mover_failed_to_get_mover_ref(self): self.hook.append(self.mover.resp_get_error()) context = self.manager.getStorageContext('Mover') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) self.assertRaises(exception.EMCPowerMaxXMLAPIError, context.get, self.mover.mover_name) expected_calls = [mock.call(self.mover.req_get_ref())] context.conn['XML'].request.assert_has_calls(expected_calls) def test_get_mover_but_not_found(self): self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.mover.resp_get_without_value()) context = self.manager.getStorageContext('Mover') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) status, out = context.get(name=self.mover.mover_name, force=True) self.assertEqual(constants.STATUS_NOT_FOUND, status) expected_calls = [ mock.call(self.mover.req_get_ref()), mock.call(self.mover.req_get()), ] context.conn['XML'].request.assert_has_calls(expected_calls) def test_get_mover_with_error(self): self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.mover.resp_get_error()) context = self.manager.getStorageContext('Mover') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) status, out = context.get(self.mover.mover_name) self.assertEqual(constants.STATUS_ERROR, status) expected_calls = [ mock.call(self.mover.req_get_ref()), mock.call(self.mover.req_get()), ] context.conn['XML'].request.assert_has_calls(expected_calls) def test_get_interconnect_id(self): self.ssh_hook.append(self.mover.output_get_interconnect_id()) context = self.manager.getStorageContext('Mover') context.conn['SSH'].run_ssh = mock.Mock(side_effect=self.ssh_hook) conn_id = context.get_interconnect_id(self.mover.mover_name, self.mover.mover_name) self.assertEqual(self.mover.interconnect_id, conn_id) ssh_calls = [mock.call(self.mover.cmd_get_interconnect_id(), False)] context.conn['SSH'].run_ssh.assert_has_calls(ssh_calls) def test_get_physical_devices(self): self.ssh_hook.append(self.mover.output_get_physical_devices()) context = self.manager.getStorageContext('Mover') context.conn['SSH'].run_ssh = mock.Mock(side_effect=self.ssh_hook) devices = context.get_physical_devices(self.mover.mover_name) self.assertIn(self.mover.device_name, devices) ssh_calls = [mock.call(self.mover.cmd_get_physical_devices(), False)] context.conn['SSH'].run_ssh.assert_has_calls(ssh_calls) class SnapshotTestCase(StorageObjectTestCaseBase): def setUp(self): super(SnapshotTestCase, self).setUp() self.hook = utils.RequestSideEffect() def test_create_snapshot(self): self.hook.append(self.fs.resp_get_succeed()) self.hook.append(self.snap.resp_task_succeed()) context = self.manager.getStorageContext('Snapshot') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) context.create(name=self.snap.snapshot_name, fs_name=self.fs.filesystem_name, pool_id=self.pool.pool_id) expected_calls = [ mock.call(self.fs.req_get()), mock.call(self.snap.req_create()), ] context.conn['XML'].request.assert_has_calls(expected_calls) def test_create_snapshot_but_already_exist(self): self.hook.append(self.fs.resp_get_succeed()) self.hook.append(self.snap.resp_create_but_already_exist()) context = self.manager.getStorageContext('Snapshot') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) context.create(name=self.snap.snapshot_name, fs_name=self.fs.filesystem_name, pool_id=self.pool.pool_id, ckpt_size=self.snap.snapshot_size) expected_calls = [ mock.call(self.fs.req_get()), mock.call(self.snap.req_create_with_size()), ] context.conn['XML'].request.assert_has_calls(expected_calls) def test_create_snapshot_with_error(self): self.hook.append(self.fs.resp_get_succeed()) self.hook.append(self.snap.resp_task_error()) context = self.manager.getStorageContext('Snapshot') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) self.assertRaises(exception.EMCPowerMaxXMLAPIError, context.create, name=self.snap.snapshot_name, fs_name=self.fs.filesystem_name, pool_id=self.pool.pool_id, ckpt_size=self.snap.snapshot_size) expected_calls = [ mock.call(self.fs.req_get()), mock.call(self.snap.req_create_with_size()), ] context.conn['XML'].request.assert_has_calls(expected_calls) def test_get_snapshot(self): self.hook.append(self.snap.resp_get_succeed()) context = self.manager.getStorageContext('Snapshot') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) status, out = context.get(self.snap.snapshot_name) self.assertEqual(constants.STATUS_OK, status) self.assertIn(self.snap.snapshot_name, context.snap_map) property_map = [ 'name', 'id', 'checkpointOf', 'state', ] for prop in property_map: self.assertIn(prop, out) expected_calls = [mock.call(self.snap.req_get())] context.conn['XML'].request.assert_has_calls(expected_calls) def test_get_snapshot_but_not_found(self): self.hook.append(self.snap.resp_get_without_value()) context = self.manager.getStorageContext('Snapshot') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) status, out = context.get(self.snap.snapshot_name) self.assertEqual(constants.STATUS_NOT_FOUND, status) expected_calls = [mock.call(self.snap.req_get())] context.conn['XML'].request.assert_has_calls(expected_calls) def test_get_snapshot_with_error(self): self.hook.append(self.snap.resp_get_error()) context = self.manager.getStorageContext('Snapshot') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) status, out = context.get(self.snap.snapshot_name) self.assertEqual(constants.STATUS_ERROR, status) expected_calls = [mock.call(self.snap.req_get())] context.conn['XML'].request.assert_has_calls(expected_calls) def test_delete_snapshot(self): self.hook.append(self.snap.resp_get_succeed()) self.hook.append(self.snap.resp_task_succeed()) context = self.manager.getStorageContext('Snapshot') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) context.delete(self.snap.snapshot_name) self.assertNotIn(self.snap.snapshot_name, context.snap_map) expected_calls = [ mock.call(self.snap.req_get()), mock.call(self.snap.req_delete()), ] context.conn['XML'].request.assert_has_calls(expected_calls) def test_delete_snapshot_failed_to_get_snapshot(self): self.hook.append(self.snap.resp_get_error()) context = self.manager.getStorageContext('Snapshot') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) self.assertRaises(exception.EMCPowerMaxXMLAPIError, context.delete, self.snap.snapshot_name) expected_calls = [mock.call(self.snap.req_get())] context.conn['XML'].request.assert_has_calls(expected_calls) def test_delete_snapshot_but_not_found(self): self.hook.append(self.snap.resp_get_without_value()) context = self.manager.getStorageContext('Snapshot') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) context.delete(self.snap.snapshot_name) self.assertNotIn(self.snap.snapshot_name, context.snap_map) expected_calls = [mock.call(self.snap.req_get())] context.conn['XML'].request.assert_has_calls(expected_calls) def test_delete_snapshot_with_error(self): self.hook.append(self.snap.resp_get_succeed()) self.hook.append(self.snap.resp_task_error()) context = self.manager.getStorageContext('Snapshot') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) self.assertRaises(exception.EMCPowerMaxXMLAPIError, context.delete, self.snap.snapshot_name) expected_calls = [ mock.call(self.snap.req_get()), mock.call(self.snap.req_delete()), ] context.conn['XML'].request.assert_has_calls(expected_calls) def test_get_snapshot_id(self): self.hook.append(self.snap.resp_get_succeed()) context = self.manager.getStorageContext('Snapshot') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) id = context.get_id(self.snap.snapshot_name) self.assertEqual(self.snap.snapshot_id, id) expected_calls = [mock.call(self.snap.req_get())] context.conn['XML'].request.assert_has_calls(expected_calls) def test_get_snapshot_id_with_error(self): self.hook.append(self.snap.resp_get_error()) context = self.manager.getStorageContext('Snapshot') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) self.assertRaises(exception.EMCPowerMaxXMLAPIError, context.get_id, self.snap.snapshot_name) expected_calls = [mock.call(self.snap.req_get())] context.conn['XML'].request.assert_has_calls(expected_calls) @ddt.ddt class MoverInterfaceTestCase(StorageObjectTestCaseBase): def setUp(self): super(MoverInterfaceTestCase, self).setUp() self.hook = utils.RequestSideEffect() def test_create_mover_interface(self): self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.mover.resp_task_succeed()) self.hook.append(self.mover.resp_task_succeed()) context = self.manager.getStorageContext('MoverInterface') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) interface = { 'name': self.mover.interface_name1, 'device_name': self.mover.device_name, 'ip': self.mover.ip_address1, 'mover_name': self.mover.mover_name, 'net_mask': self.mover.net_mask, 'vlan_id': self.mover.vlan_id, } context.create(interface) interface['name'] = self.mover.long_interface_name context.create(interface) expected_calls = [ mock.call(self.mover.req_get_ref()), mock.call(self.mover.req_create_interface()), mock.call(self.mover.req_create_interface( self.mover.long_interface_name[:31])), ] context.conn['XML'].request.assert_has_calls(expected_calls) def test_create_mover_interface_name_already_exist(self): self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append( self.mover.resp_create_interface_but_name_already_exist()) context = self.manager.getStorageContext('MoverInterface') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) interface = { 'name': self.mover.interface_name1, 'device_name': self.mover.device_name, 'ip': self.mover.ip_address1, 'mover_name': self.mover.mover_name, 'net_mask': self.mover.net_mask, 'vlan_id': self.mover.vlan_id, } context.create(interface) expected_calls = [ mock.call(self.mover.req_get_ref()), mock.call(self.mover.req_create_interface()), ] context.conn['XML'].request.assert_has_calls(expected_calls) def test_create_mover_interface_ip_already_exist(self): self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append( self.mover.resp_create_interface_but_ip_already_exist()) context = self.manager.getStorageContext('MoverInterface') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) interface = { 'name': self.mover.interface_name1, 'device_name': self.mover.device_name, 'ip': self.mover.ip_address1, 'mover_name': self.mover.mover_name, 'net_mask': self.mover.net_mask, 'vlan_id': self.mover.vlan_id, } context.create(interface) expected_calls = [ mock.call(self.mover.req_get_ref()), mock.call(self.mover.req_create_interface()), ] context.conn['XML'].request.assert_has_calls(expected_calls) @ddt.data(fakes.MoverTestData().resp_task_succeed(), fakes.MoverTestData().resp_task_error()) def test_create_mover_interface_with_conflict_vlan_id(self, xml_resp): self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append( self.mover.resp_create_interface_with_conflicted_vlan_id()) self.hook.append(xml_resp) context = self.manager.getStorageContext('MoverInterface') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) interface = { 'name': self.mover.interface_name1, 'device_name': self.mover.device_name, 'ip': self.mover.ip_address1, 'mover_name': self.mover.mover_name, 'net_mask': self.mover.net_mask, 'vlan_id': self.mover.vlan_id, } self.assertRaises(exception.EMCPowerMaxXMLAPIError, context.create, interface) expected_calls = [ mock.call(self.mover.req_get_ref()), mock.call(self.mover.req_create_interface()), mock.call(self.mover.req_delete_interface()), ] context.conn['XML'].request.assert_has_calls(expected_calls) @mock.patch('time.sleep') def test_create_mover_interface_invalid_mover_id(self, sleep_mock): self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.mover.resp_invalid_mover_id()) self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.mover.resp_task_succeed()) context = self.manager.getStorageContext('MoverInterface') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) interface = { 'name': self.mover.interface_name1, 'device_name': self.mover.device_name, 'ip': self.mover.ip_address1, 'mover_name': self.mover.mover_name, 'net_mask': self.mover.net_mask, 'vlan_id': self.mover.vlan_id, } context.create(interface) expected_calls = [ mock.call(self.mover.req_get_ref()), mock.call(self.mover.req_create_interface()), mock.call(self.mover.req_get_ref()), mock.call(self.mover.req_create_interface()), ] context.conn['XML'].request.assert_has_calls(expected_calls) self.assertTrue(sleep_mock.called) def test_create_mover_interface_with_error(self): self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.mover.resp_task_error()) context = self.manager.getStorageContext('MoverInterface') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) interface = { 'name': self.mover.interface_name1, 'device_name': self.mover.device_name, 'ip': self.mover.ip_address1, 'mover_name': self.mover.mover_name, 'net_mask': self.mover.net_mask, 'vlan_id': self.mover.vlan_id, } self.assertRaises(exception.EMCPowerMaxXMLAPIError, context.create, interface) expected_calls = [ mock.call(self.mover.req_get_ref()), mock.call(self.mover.req_create_interface()), ] context.conn['XML'].request.assert_has_calls(expected_calls) def test_get_mover_interface(self): self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.mover.resp_get_succeed()) self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.mover.resp_get_succeed()) context = self.manager.getStorageContext('MoverInterface') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) status, out = context.get(name=self.mover.interface_name1, mover_name=self.mover.mover_name) self.assertEqual(constants.STATUS_OK, status) property_map = [ 'name', 'device', 'up', 'ipVersion', 'netMask', 'ipAddress', 'vlanid', ] for prop in property_map: self.assertIn(prop, out) context.get(name=self.mover.long_interface_name, mover_name=self.mover.mover_name) expected_calls = [ mock.call(self.mover.req_get_ref()), mock.call(self.mover.req_get()), mock.call(self.mover.req_get_ref()), mock.call(self.mover.req_get()), ] context.conn['XML'].request.assert_has_calls(expected_calls) def test_get_mover_interface_not_found(self): self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.mover.resp_get_without_value()) context = self.manager.getStorageContext('MoverInterface') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) status, out = context.get(name=self.mover.interface_name1, mover_name=self.mover.mover_name) self.assertEqual(constants.STATUS_NOT_FOUND, status) expected_calls = [ mock.call(self.mover.req_get_ref()), mock.call(self.mover.req_get()), ] context.conn['XML'].request.assert_has_calls(expected_calls) def test_delete_mover_interface(self): self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.mover.resp_task_succeed()) context = self.manager.getStorageContext('MoverInterface') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) context.delete(ip_addr=self.mover.ip_address1, mover_name=self.mover.mover_name) expected_calls = [ mock.call(self.mover.req_get_ref()), mock.call(self.mover.req_delete_interface()), ] context.conn['XML'].request.assert_has_calls(expected_calls) def test_delete_mover_interface_but_nonexistent(self): self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.mover.resp_delete_interface_but_nonexistent()) context = self.manager.getStorageContext('MoverInterface') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) context.delete(ip_addr=self.mover.ip_address1, mover_name=self.mover.mover_name) expected_calls = [ mock.call(self.mover.req_get_ref()), mock.call(self.mover.req_delete_interface()), ] context.conn['XML'].request.assert_has_calls(expected_calls) @mock.patch('time.sleep') def test_delete_mover_interface_invalid_mover_id(self, sleep_mock): self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.mover.resp_invalid_mover_id()) self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.mover.resp_task_succeed()) context = self.manager.getStorageContext('MoverInterface') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) context.delete(ip_addr=self.mover.ip_address1, mover_name=self.mover.mover_name) expected_calls = [ mock.call(self.mover.req_get_ref()), mock.call(self.mover.req_delete_interface()), mock.call(self.mover.req_get_ref()), mock.call(self.mover.req_delete_interface()), ] context.conn['XML'].request.assert_has_calls(expected_calls) self.assertTrue(sleep_mock.called) def test_delete_mover_interface_with_error(self): self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.mover.resp_task_error()) context = self.manager.getStorageContext('MoverInterface') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) self.assertRaises(exception.EMCPowerMaxXMLAPIError, context.delete, ip_addr=self.mover.ip_address1, mover_name=self.mover.mover_name) expected_calls = [ mock.call(self.mover.req_get_ref()), mock.call(self.mover.req_delete_interface()), ] context.conn['XML'].request.assert_has_calls(expected_calls) class DNSDomainTestCase(StorageObjectTestCaseBase): def setUp(self): super(DNSDomainTestCase, self).setUp() self.hook = utils.RequestSideEffect() def test_create_dns_domain(self): self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.dns.resp_task_succeed()) context = self.manager.getStorageContext('DNSDomain') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) context.create(mover_name=self.mover.mover_name, name=self.dns.domain_name, servers=self.dns.dns_ip_address) expected_calls = [ mock.call(self.mover.req_get_ref()), mock.call(self.dns.req_create()), ] context.conn['XML'].request.assert_has_calls(expected_calls) @mock.patch('time.sleep') def test_create_dns_domain_invalid_mover_id(self, sleep_mock): self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.dns.resp_invalid_mover_id()) self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.dns.resp_task_succeed()) context = self.manager.getStorageContext('DNSDomain') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) context.create(mover_name=self.mover.mover_name, name=self.dns.domain_name, servers=self.dns.dns_ip_address) expected_calls = [ mock.call(self.mover.req_get_ref()), mock.call(self.dns.req_create()), mock.call(self.mover.req_get_ref()), mock.call(self.dns.req_create()), ] context.conn['XML'].request.assert_has_calls(expected_calls) self.assertTrue(sleep_mock.called) def test_create_dns_domain_with_error(self): self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.dns.resp_task_error()) context = self.manager.getStorageContext('DNSDomain') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) self.assertRaises(exception.EMCPowerMaxXMLAPIError, context.create, mover_name=self.mover.mover_name, name=self.mover.domain_name, servers=self.dns.dns_ip_address) expected_calls = [ mock.call(self.mover.req_get_ref()), mock.call(self.dns.req_create()), ] context.conn['XML'].request.assert_has_calls(expected_calls) def test_delete_dns_domain(self): self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.dns.resp_task_succeed()) self.hook.append(self.dns.resp_task_error()) context = self.manager.getStorageContext('DNSDomain') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) context.delete(mover_name=self.mover.mover_name, name=self.mover.domain_name) context.delete(mover_name=self.mover.mover_name, name=self.mover.domain_name) expected_calls = [ mock.call(self.mover.req_get_ref()), mock.call(self.dns.req_delete()), mock.call(self.dns.req_delete()), ] context.conn['XML'].request.assert_has_calls(expected_calls) @mock.patch('time.sleep') def test_delete_dns_domain_invalid_mover_id(self, sleep_mock): self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.dns.resp_invalid_mover_id()) self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.dns.resp_task_succeed()) context = self.manager.getStorageContext('DNSDomain') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) context.delete(mover_name=self.mover.mover_name, name=self.mover.domain_name) expected_calls = [ mock.call(self.mover.req_get_ref()), mock.call(self.dns.req_delete()), mock.call(self.mover.req_get_ref()), mock.call(self.dns.req_delete()), ] context.conn['XML'].request.assert_has_calls(expected_calls) self.assertTrue(sleep_mock.called) class CIFSServerTestCase(StorageObjectTestCaseBase): def setUp(self): super(CIFSServerTestCase, self).setUp() self.hook = utils.RequestSideEffect() def test_create_cifs_server(self): self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.cifs_server.resp_task_succeed()) self.hook.append(self.vdm.resp_get_succeed()) self.hook.append(self.cifs_server.resp_task_succeed()) self.hook.append(self.cifs_server.resp_task_error()) self.hook.append(self.cifs_server.resp_get_succeed( mover_id=self.vdm.vdm_id, is_vdm=True, join_domain=True)) context = self.manager.getStorageContext('CIFSServer') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) # Create CIFS server on mover cifs_server_args = { 'name': self.cifs_server.cifs_server_name, 'interface_ip': self.cifs_server.ip_address1, 'domain_name': self.cifs_server.domain_name, 'user_name': self.cifs_server.domain_user, 'password': self.cifs_server.domain_password, 'mover_name': self.mover.mover_name, 'is_vdm': False, } context.create(cifs_server_args) # Create CIFS server on VDM cifs_server_args = { 'name': self.cifs_server.cifs_server_name, 'interface_ip': self.cifs_server.ip_address1, 'domain_name': self.cifs_server.domain_name, 'user_name': self.cifs_server.domain_user, 'password': self.cifs_server.domain_password, 'mover_name': self.vdm.vdm_name, 'is_vdm': True, } context.create(cifs_server_args) # Create CIFS server on VDM cifs_server_args = { 'name': self.cifs_server.cifs_server_name, 'interface_ip': self.cifs_server.ip_address1, 'domain_name': self.cifs_server.domain_name, 'user_name': self.cifs_server.domain_user, 'password': self.cifs_server.domain_password, 'mover_name': self.vdm.vdm_name, 'is_vdm': True, } context.create(cifs_server_args) expected_calls = [ mock.call(self.mover.req_get_ref()), mock.call(self.cifs_server.req_create(self.mover.mover_id, False)), mock.call(self.vdm.req_get()), mock.call(self.cifs_server.req_create(self.vdm.vdm_id)), mock.call(self.cifs_server.req_create(self.vdm.vdm_id)), mock.call(self.cifs_server.req_get(self.vdm.vdm_id)), ] context.conn['XML'].request.assert_has_calls(expected_calls) def test_create_cifs_server_already_exist(self): self.hook.append(self.vdm.resp_get_succeed()) self.hook.append(self.cifs_server.resp_task_error()) self.hook.append(self.cifs_server.resp_get_succeed( mover_id=self.vdm.vdm_id, is_vdm=True, join_domain=True)) context = self.manager.getStorageContext('CIFSServer') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) @mock.patch('time.sleep') def test_create_cifs_server_invalid_mover_id(self, sleep_mock): self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.cifs_server.resp_invalid_mover_id()) self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.cifs_server.resp_task_succeed()) context = self.manager.getStorageContext('CIFSServer') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) # Create CIFS server on mover cifs_server_args = { 'name': self.cifs_server.cifs_server_name, 'interface_ip': self.cifs_server.ip_address1, 'domain_name': self.cifs_server.domain_name, 'user_name': self.cifs_server.domain_user, 'password': self.cifs_server.domain_password, 'mover_name': self.mover.mover_name, 'is_vdm': False, } context.create(cifs_server_args) expected_calls = [ mock.call(self.mover.req_get_ref()), mock.call(self.cifs_server.req_create(self.mover.mover_id, False)), mock.call(self.mover.req_get_ref()), mock.call(self.cifs_server.req_create(self.mover.mover_id, False)), ] context.conn['XML'].request.assert_has_calls(expected_calls) self.assertTrue(sleep_mock.called) def test_create_cifs_server_with_error(self): self.hook.append(self.vdm.resp_get_succeed()) self.hook.append(self.cifs_server.resp_task_error()) self.hook.append(self.cifs_server.resp_get_error()) context = self.manager.getStorageContext('CIFSServer') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) # Create CIFS server on VDM cifs_server_args = { 'name': self.cifs_server.cifs_server_name, 'interface_ip': self.cifs_server.ip_address1, 'domain_name': self.cifs_server.domain_name, 'user_name': self.cifs_server.domain_user, 'password': self.cifs_server.domain_password, 'mover_name': self.vdm.vdm_name, 'is_vdm': True, } self.assertRaises(exception.EMCPowerMaxXMLAPIError, context.create, cifs_server_args) expected_calls = [ mock.call(self.vdm.req_get()), mock.call(self.cifs_server.req_create(self.vdm.vdm_id)), mock.call(self.cifs_server.req_get(self.vdm.vdm_id)), ] context.conn['XML'].request.assert_has_calls(expected_calls) def test_get_all_cifs_server(self): self.hook.append(self.vdm.resp_get_succeed()) self.hook.append(self.cifs_server.resp_get_succeed( mover_id=self.vdm.vdm_id, is_vdm=True, join_domain=True)) self.hook.append(self.cifs_server.resp_get_succeed( mover_id=self.vdm.vdm_id, is_vdm=True, join_domain=True)) context = self.manager.getStorageContext('CIFSServer') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) status, out = context.get_all(self.vdm.vdm_name) self.assertEqual(constants.STATUS_OK, status) self.assertIn(self.vdm.vdm_name, context.cifs_server_map) # Get CIFS server from the cache status, out = context.get_all(self.vdm.vdm_name) self.assertEqual(constants.STATUS_OK, status) self.assertIn(self.vdm.vdm_name, context.cifs_server_map) expected_calls = [ mock.call(self.vdm.req_get()), mock.call(self.cifs_server.req_get(self.vdm.vdm_id)), mock.call(self.cifs_server.req_get(self.vdm.vdm_id)), ] context.conn['XML'].request.assert_has_calls(expected_calls) @mock.patch('time.sleep') def test_get_all_cifs_server_invalid_mover_id(self, sleep_mock): self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.cifs_server.resp_invalid_mover_id()) self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.cifs_server.resp_get_succeed( mover_id=self.mover.mover_id, is_vdm=False, join_domain=True)) context = self.manager.getStorageContext('CIFSServer') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) status, out = context.get_all(self.mover.mover_name, False) self.assertEqual(constants.STATUS_OK, status) self.assertIn(self.mover.mover_name, context.cifs_server_map) expected_calls = [ mock.call(self.mover.req_get_ref()), mock.call(self.cifs_server.req_get(self.mover.mover_id, False)), mock.call(self.mover.req_get_ref()), mock.call(self.cifs_server.req_get(self.mover.mover_id, False)), ] context.conn['XML'].request.assert_has_calls(expected_calls) self.assertTrue(sleep_mock.called) def test_get_cifs_server(self): self.hook.append(self.vdm.resp_get_succeed()) self.hook.append(self.cifs_server.resp_get_succeed( mover_id=self.vdm.vdm_id, is_vdm=True, join_domain=True)) context = self.manager.getStorageContext('CIFSServer') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) status, out = context.get(name=self.cifs_server.cifs_server_name, mover_name=self.vdm.vdm_name) self.assertEqual(constants.STATUS_OK, status) property_map = { 'name', 'compName', 'Aliases', 'type', 'interfaces', 'domain', 'domainJoined', 'mover', 'moverIdIsVdm', } for prop in property_map: self.assertIn(prop, out) context.get(name=self.cifs_server.cifs_server_name, mover_name=self.vdm.vdm_name) expected_calls = [ mock.call(self.vdm.req_get()), mock.call(self.cifs_server.req_get(self.vdm.vdm_id)), ] context.conn['XML'].request.assert_has_calls(expected_calls) def test_modify_cifs_server(self): self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.cifs_server.resp_task_succeed()) self.hook.append(self.vdm.resp_get_succeed()) self.hook.append(self.cifs_server.resp_task_succeed()) context = self.manager.getStorageContext('CIFSServer') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) cifs_server_args = { 'name': self.cifs_server.cifs_server_name[-14:], 'join_domain': True, 'user_name': self.cifs_server.domain_user, 'password': self.cifs_server.domain_password, 'mover_name': self.mover.mover_name, 'is_vdm': False, } context.modify(cifs_server_args) cifs_server_args = { 'name': self.cifs_server.cifs_server_name[-14:], 'join_domain': False, 'user_name': self.cifs_server.domain_user, 'password': self.cifs_server.domain_password, 'mover_name': self.vdm.vdm_name, } context.modify(cifs_server_args) expected_calls = [ mock.call(self.mover.req_get_ref()), mock.call(self.cifs_server.req_modify( mover_id=self.mover.mover_id, is_vdm=False, join_domain=True)), mock.call(self.vdm.req_get()), mock.call(self.cifs_server.req_modify( mover_id=self.vdm.vdm_id, is_vdm=True, join_domain=False)), ] context.conn['XML'].request.assert_has_calls(expected_calls) def test_modify_cifs_server_but_unjoin_domain(self): self.hook.append(self.vdm.resp_get_succeed()) self.hook.append(self.cifs_server.resp_modify_but_unjoin_domain()) context = self.manager.getStorageContext('CIFSServer') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) cifs_server_args = { 'name': self.cifs_server.cifs_server_name[-14:], 'join_domain': False, 'user_name': self.cifs_server.domain_user, 'password': self.cifs_server.domain_password, 'mover_name': self.vdm.vdm_name, } context.modify(cifs_server_args) expected_calls = [ mock.call(self.vdm.req_get()), mock.call(self.cifs_server.req_modify( mover_id=self.vdm.vdm_id, is_vdm=True, join_domain=False)), ] context.conn['XML'].request.assert_has_calls(expected_calls) def test_modify_cifs_server_but_already_join_domain(self): self.hook.append(self.vdm.resp_get_succeed()) self.hook.append( self.cifs_server.resp_modify_but_already_join_domain()) context = self.manager.getStorageContext('CIFSServer') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) cifs_server_args = { 'name': self.cifs_server.cifs_server_name[-14:], 'join_domain': True, 'user_name': self.cifs_server.domain_user, 'password': self.cifs_server.domain_password, 'mover_name': self.vdm.vdm_name, } context.modify(cifs_server_args) expected_calls = [ mock.call(self.vdm.req_get()), mock.call(self.cifs_server.req_modify( mover_id=self.vdm.vdm_id, is_vdm=True, join_domain=True)), ] context.conn['XML'].request.assert_has_calls(expected_calls) @mock.patch('time.sleep') def test_modify_cifs_server_invalid_mover_id(self, sleep_mock): self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.cifs_server.resp_invalid_mover_id()) self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.cifs_server.resp_task_succeed()) context = self.manager.getStorageContext('CIFSServer') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) cifs_server_args = { 'name': self.cifs_server.cifs_server_name[-14:], 'join_domain': True, 'user_name': self.cifs_server.domain_user, 'password': self.cifs_server.domain_password, 'mover_name': self.mover.mover_name, 'is_vdm': False, } context.modify(cifs_server_args) expected_calls = [ mock.call(self.mover.req_get_ref()), mock.call(self.cifs_server.req_modify( mover_id=self.mover.mover_id, is_vdm=False, join_domain=True)), mock.call(self.mover.req_get_ref()), mock.call(self.cifs_server.req_modify( mover_id=self.mover.mover_id, is_vdm=False, join_domain=True)), ] context.conn['XML'].request.assert_has_calls(expected_calls) self.assertTrue(sleep_mock.called) def test_modify_cifs_server_with_error(self): self.hook.append(self.vdm.resp_get_succeed()) self.hook.append(self.cifs_server.resp_task_error()) context = self.manager.getStorageContext('CIFSServer') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) cifs_server_args = { 'name': self.cifs_server.cifs_server_name[-14:], 'join_domain': False, 'user_name': self.cifs_server.domain_user, 'password': self.cifs_server.domain_password, 'mover_name': self.vdm.vdm_name, } self.assertRaises(exception.EMCPowerMaxXMLAPIError, context.modify, cifs_server_args) expected_calls = [ mock.call(self.vdm.req_get()), mock.call(self.cifs_server.req_modify( mover_id=self.vdm.vdm_id, is_vdm=True, join_domain=False)), ] context.conn['XML'].request.assert_has_calls(expected_calls) def test_delete_cifs_server(self): self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.cifs_server.resp_get_succeed( mover_id=self.mover.mover_id, is_vdm=False, join_domain=True)) self.hook.append(self.cifs_server.resp_task_succeed()) self.hook.append(self.vdm.resp_get_succeed()) self.hook.append(self.cifs_server.resp_get_succeed( mover_id=self.vdm.vdm_id, is_vdm=True, join_domain=False)) self.hook.append(self.cifs_server.resp_task_succeed()) context = self.manager.getStorageContext('CIFSServer') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) context.delete(computer_name=self.cifs_server.cifs_server_name, mover_name=self.mover.mover_name, is_vdm=False) context.delete(computer_name=self.cifs_server.cifs_server_name, mover_name=self.vdm.vdm_name) expected_calls = [ mock.call(self.mover.req_get_ref()), mock.call(self.cifs_server.req_get(self.mover.mover_id, False)), mock.call(self.cifs_server.req_delete(self.mover.mover_id, False)), mock.call(self.vdm.req_get()), mock.call(self.cifs_server.req_get(self.vdm.vdm_id)), mock.call(self.cifs_server.req_delete(self.vdm.vdm_id)), ] context.conn['XML'].request.assert_has_calls(expected_calls) def test_delete_cifs_server_but_not_found(self): self.hook.append(self.mover.resp_get_without_value()) self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.cifs_server.resp_get_without_value()) context = self.manager.getStorageContext('CIFSServer') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) context.delete(computer_name=self.cifs_server.cifs_server_name, mover_name=self.mover.mover_name, is_vdm=False) context.delete(computer_name=self.cifs_server.cifs_server_name, mover_name=self.mover.mover_name, is_vdm=False) expected_calls = [ mock.call(self.mover.req_get_ref()), mock.call(self.mover.req_get_ref()), mock.call(self.cifs_server.req_get(self.mover.mover_id, False)), ] context.conn['XML'].request.assert_has_calls(expected_calls) def test_delete_cifs_server_with_error(self): self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.cifs_server.resp_get_succeed( mover_id=self.mover.mover_id, is_vdm=False, join_domain=True)) self.hook.append(self.cifs_server.resp_task_error()) context = self.manager.getStorageContext('CIFSServer') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) self.assertRaises(exception.EMCPowerMaxXMLAPIError, context.delete, computer_name=self.cifs_server.cifs_server_name, mover_name=self.mover.mover_name, is_vdm=False) expected_calls = [ mock.call(self.mover.req_get_ref()), mock.call(self.cifs_server.req_get(self.mover.mover_id, False)), mock.call(self.cifs_server.req_delete(self.mover.mover_id, False)), ] context.conn['XML'].request.assert_has_calls(expected_calls) class CIFSShareTestCase(StorageObjectTestCaseBase): def setUp(self): super(CIFSShareTestCase, self).setUp() self.hook = utils.RequestSideEffect() self.ssh_hook = utils.SSHSideEffect() def test_create_cifs_share(self): self.hook.append(self.vdm.resp_get_succeed()) self.hook.append(self.cifs_share.resp_task_succeed()) self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.cifs_share.resp_task_succeed()) context = self.manager.getStorageContext('CIFSShare') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) context.create(name=self.cifs_share.share_name, server_name=self.cifs_share.cifs_server_name[-14:], mover_name=self.vdm.vdm_name, is_vdm=True) context.create(name=self.cifs_share.share_name, server_name=self.cifs_share.cifs_server_name[-14:], mover_name=self.mover.mover_name, is_vdm=False) expected_calls = [ mock.call(self.vdm.req_get()), mock.call(self.cifs_share.req_create(self.vdm.vdm_id)), mock.call(self.mover.req_get_ref()), mock.call(self.cifs_share.req_create(self.mover.mover_id, False)), ] context.conn['XML'].request.assert_has_calls(expected_calls) @mock.patch('time.sleep') def test_create_cifs_share_invalid_mover_id(self, sleep_mock): self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.cifs_share.resp_invalid_mover_id()) self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.cifs_share.resp_task_succeed()) context = self.manager.getStorageContext('CIFSShare') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) context.create(name=self.cifs_share.share_name, server_name=self.cifs_share.cifs_server_name[-14:], mover_name=self.mover.mover_name, is_vdm=False) expected_calls = [ mock.call(self.mover.req_get_ref()), mock.call(self.cifs_share.req_create(self.mover.mover_id, False)), mock.call(self.mover.req_get_ref()), mock.call(self.cifs_share.req_create(self.mover.mover_id, False)), ] context.conn['XML'].request.assert_has_calls(expected_calls) self.assertTrue(sleep_mock.called) def test_create_cifs_share_with_error(self): self.hook.append(self.vdm.resp_get_succeed()) self.hook.append(self.cifs_share.resp_task_error()) context = self.manager.getStorageContext('CIFSShare') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) self.assertRaises(exception.EMCPowerMaxXMLAPIError, context.create, name=self.cifs_share.share_name, server_name=self.cifs_share.cifs_server_name[-14:], mover_name=self.vdm.vdm_name, is_vdm=True) expected_calls = [ mock.call(self.vdm.req_get()), mock.call(self.cifs_share.req_create(self.vdm.vdm_id)), ] context.conn['XML'].request.assert_has_calls(expected_calls) def test_delete_cifs_share(self): self.hook.append(self.cifs_share.resp_get_succeed(self.vdm.vdm_id)) self.hook.append(self.vdm.resp_get_succeed()) self.hook.append(self.cifs_share.resp_task_succeed()) self.hook.append(self.cifs_share.resp_get_succeed(self.mover.mover_id, False)) self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.cifs_share.resp_task_succeed()) context = self.manager.getStorageContext('CIFSShare') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) context.delete(name=self.cifs_share.share_name, mover_name=self.vdm.vdm_name, is_vdm=True) context.delete(name=self.cifs_share.share_name, mover_name=self.mover.mover_name, is_vdm=False) expected_calls = [ mock.call(self.cifs_share.req_get()), mock.call(self.vdm.req_get()), mock.call(self.cifs_share.req_delete(self.vdm.vdm_id)), mock.call(self.cifs_share.req_get()), mock.call(self.mover.req_get_ref()), mock.call(self.cifs_share.req_delete(self.mover.mover_id, False)), ] context.conn['XML'].request.assert_has_calls(expected_calls) def test_delete_cifs_share_not_found(self): self.hook.append(self.cifs_share.resp_get_error()) self.hook.append(self.cifs_share.resp_get_without_value()) context = self.manager.getStorageContext('CIFSShare') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) self.assertRaises(exception.EMCPowerMaxXMLAPIError, context.delete, name=self.cifs_share.share_name, mover_name=self.vdm.vdm_name, is_vdm=True) context.delete(name=self.cifs_share.share_name, mover_name=self.vdm.vdm_name, is_vdm=True) expected_calls = [ mock.call(self.cifs_share.req_get()), mock.call(self.cifs_share.req_get()), ] context.conn['XML'].request.assert_has_calls(expected_calls) @mock.patch('time.sleep') def test_delete_cifs_share_invalid_mover_id(self, sleep_mock): self.hook.append(self.cifs_share.resp_get_succeed(self.mover.mover_id, False)) self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.cifs_share.resp_invalid_mover_id()) self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.cifs_share.resp_task_succeed()) context = self.manager.getStorageContext('CIFSShare') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) context.delete(name=self.cifs_share.share_name, mover_name=self.mover.mover_name, is_vdm=False) expected_calls = [ mock.call(self.cifs_share.req_get()), mock.call(self.mover.req_get_ref()), mock.call(self.cifs_share.req_delete(self.mover.mover_id, False)), mock.call(self.mover.req_get_ref()), mock.call(self.cifs_share.req_delete(self.mover.mover_id, False)), ] context.conn['XML'].request.assert_has_calls(expected_calls) self.assertTrue(sleep_mock.called) def test_delete_cifs_share_with_error(self): self.hook.append(self.cifs_share.resp_get_succeed(self.vdm.vdm_id)) self.hook.append(self.vdm.resp_get_succeed()) self.hook.append(self.cifs_share.resp_task_error()) context = self.manager.getStorageContext('CIFSShare') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) self.assertRaises(exception.EMCPowerMaxXMLAPIError, context.delete, name=self.cifs_share.share_name, mover_name=self.vdm.vdm_name, is_vdm=True) expected_calls = [ mock.call(self.cifs_share.req_get()), mock.call(self.vdm.req_get()), mock.call(self.cifs_share.req_delete(self.vdm.vdm_id)), ] context.conn['XML'].request.assert_has_calls(expected_calls) def test_get_cifs_share(self): self.hook.append(self.cifs_share.resp_get_succeed(self.vdm.vdm_id)) context = self.manager.getStorageContext('CIFSShare') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) context.get(self.cifs_share.share_name) expected_calls = [mock.call(self.cifs_share.req_get())] context.conn['XML'].request.assert_has_calls(expected_calls) def test_disable_share_access(self): self.ssh_hook.append('Command succeeded') context = self.manager.getStorageContext('CIFSShare') context.conn['SSH'].run_ssh = mock.Mock(side_effect=self.ssh_hook) context.disable_share_access(share_name=self.cifs_share.share_name, mover_name=self.vdm.vdm_name) ssh_calls = [mock.call(self.cifs_share.cmd_disable_access(), True)] context.conn['SSH'].run_ssh.assert_has_calls(ssh_calls) def test_disable_share_access_with_error(self): self.ssh_hook.append(ex=processutils.ProcessExecutionError( stdout=self.cifs_share.fake_output)) context = self.manager.getStorageContext('CIFSShare') context.conn['SSH'].run_ssh = mock.Mock(side_effect=self.ssh_hook) self.assertRaises(exception.EMCPowerMaxXMLAPIError, context.disable_share_access, share_name=self.cifs_share.share_name, mover_name=self.vdm.vdm_name) ssh_calls = [mock.call(self.cifs_share.cmd_disable_access(), True)] context.conn['SSH'].run_ssh.assert_has_calls(ssh_calls) def test_allow_share_access(self): self.ssh_hook.append(self.cifs_share.output_allow_access()) context = self.manager.getStorageContext('CIFSShare') context.conn['SSH'].run_ssh = mock.Mock(side_effect=self.ssh_hook) context.allow_share_access(mover_name=self.vdm.vdm_name, share_name=self.cifs_share.share_name, user_name=self.cifs_server.domain_user, domain=self.cifs_server.domain_name, access=constants.CIFS_ACL_FULLCONTROL) ssh_calls = [mock.call(self.cifs_share.cmd_change_access(), True)] context.conn['SSH'].run_ssh.assert_has_calls(ssh_calls) def test_allow_share_access_duplicate_ACE(self): expt_dup_ace = processutils.ProcessExecutionError( stdout=self.cifs_share.output_allow_access_but_duplicate_ace()) self.ssh_hook.append(ex=expt_dup_ace) context = self.manager.getStorageContext('CIFSShare') context.conn['SSH'].run_ssh = mock.Mock(side_effect=self.ssh_hook) context.allow_share_access(mover_name=self.vdm.vdm_name, share_name=self.cifs_share.share_name, user_name=self.cifs_server.domain_user, domain=self.cifs_server.domain_name, access=constants.CIFS_ACL_FULLCONTROL) ssh_calls = [mock.call(self.cifs_share.cmd_change_access(), True)] context.conn['SSH'].run_ssh.assert_has_calls(ssh_calls) def test_allow_share_access_with_error(self): expt_err = processutils.ProcessExecutionError( self.cifs_share.fake_output) self.ssh_hook.append(ex=expt_err) context = self.manager.getStorageContext('CIFSShare') context.conn['SSH'].run_ssh = mock.Mock(side_effect=self.ssh_hook) self.assertRaises(exception.EMCPowerMaxXMLAPIError, context.allow_share_access, mover_name=self.vdm.vdm_name, share_name=self.cifs_share.share_name, user_name=self.cifs_server.domain_user, domain=self.cifs_server.domain_name, access=constants.CIFS_ACL_FULLCONTROL) ssh_calls = [mock.call(self.cifs_share.cmd_change_access(), True)] context.conn['SSH'].run_ssh.assert_has_calls(ssh_calls) def test_deny_share_access(self): self.ssh_hook.append('Command succeeded') context = self.manager.getStorageContext('CIFSShare') context.conn['SSH'].run_ssh = mock.Mock(side_effect=self.ssh_hook) context.deny_share_access(mover_name=self.vdm.vdm_name, share_name=self.cifs_share.share_name, user_name=self.cifs_server.domain_user, domain=self.cifs_server.domain_name, access=constants.CIFS_ACL_FULLCONTROL) ssh_calls = [ mock.call(self.cifs_share.cmd_change_access(action='revoke'), True), ] context.conn['SSH'].run_ssh.assert_has_calls(ssh_calls) def test_deny_share_access_no_ace(self): expt_no_ace = processutils.ProcessExecutionError( stdout=self.cifs_share.output_deny_access_but_no_ace()) self.ssh_hook.append(ex=expt_no_ace) context = self.manager.getStorageContext('CIFSShare') context.conn['SSH'].run_ssh = mock.Mock(side_effect=self.ssh_hook) context.deny_share_access(mover_name=self.vdm.vdm_name, share_name=self.cifs_share.share_name, user_name=self.cifs_server.domain_user, domain=self.cifs_server.domain_name, access=constants.CIFS_ACL_FULLCONTROL) ssh_calls = [ mock.call(self.cifs_share.cmd_change_access(action='revoke'), True), ] context.conn['SSH'].run_ssh.assert_has_calls(ssh_calls) def test_deny_share_access_but_no_user_found(self): expt_no_user = processutils.ProcessExecutionError( stdout=self.cifs_share.output_deny_access_but_no_user_found()) self.ssh_hook.append(ex=expt_no_user) context = self.manager.getStorageContext('CIFSShare') context.conn['SSH'].run_ssh = mock.Mock(side_effect=self.ssh_hook) context.deny_share_access(mover_name=self.vdm.vdm_name, share_name=self.cifs_share.share_name, user_name=self.cifs_server.domain_user, domain=self.cifs_server.domain_name, access=constants.CIFS_ACL_FULLCONTROL) ssh_calls = [ mock.call(self.cifs_share.cmd_change_access(action='revoke'), True), ] context.conn['SSH'].run_ssh.assert_has_calls(ssh_calls) def test_deny_share_access_with_error(self): expt_err = processutils.ProcessExecutionError( self.cifs_share.fake_output) self.ssh_hook.append(ex=expt_err) context = self.manager.getStorageContext('CIFSShare') context.conn['SSH'].run_ssh = mock.Mock(side_effect=self.ssh_hook) self.assertRaises(exception.EMCPowerMaxXMLAPIError, context.deny_share_access, mover_name=self.vdm.vdm_name, share_name=self.cifs_share.share_name, user_name=self.cifs_server.domain_user, domain=self.cifs_server.domain_name, access=constants.CIFS_ACL_FULLCONTROL) ssh_calls = [ mock.call(self.cifs_share.cmd_change_access(action='revoke'), True), ] context.conn['SSH'].run_ssh.assert_has_calls(ssh_calls) def test_get_share_access(self): self.ssh_hook.append(fakes.FakeData.cifs_access) context = self.manager.getStorageContext('CIFSShare') context.conn['SSH'].run_ssh = mock.Mock(side_effect=self.ssh_hook) ret = context.get_share_access( mover_name=self.vdm.vdm_name, share_name=self.cifs_share.share_name) ssh_calls = [ mock.call(self.cifs_share.cmd_get_access(), True), ] self.assertEqual(2, len(ret)) self.assertEqual(constants.CIFS_ACL_FULLCONTROL, ret['administrator']) self.assertEqual(constants.CIFS_ACL_FULLCONTROL, ret['guest']) context.conn['SSH'].run_ssh.assert_has_calls(ssh_calls) def test_get_share_access_failed(self): expt_err = processutils.ProcessExecutionError( stdout=self.nfs_share.fake_output) self.ssh_hook.append(ex=expt_err) context = self.manager.getStorageContext('CIFSShare') context.conn['SSH'].run_ssh = mock.Mock(side_effect=self.ssh_hook) self.assertRaises(exception.EMCPowerMaxXMLAPIError, context.get_share_access, mover_name=self.vdm.vdm_name, share_name=self.cifs_share.share_name) ssh_calls = [ mock.call(self.cifs_share.cmd_get_access(), True), ] context.conn['SSH'].run_ssh.assert_has_calls(ssh_calls) def test_clear_share_access_has_white_list(self): self.ssh_hook.append(fakes.FakeData.cifs_access) self.ssh_hook.append('Command succeeded') context = self.manager.getStorageContext('CIFSShare') context.conn['SSH'].run_ssh = mock.Mock(side_effect=self.ssh_hook) to_remove = context.clear_share_access( mover_name=self.vdm.vdm_name, share_name=self.cifs_share.share_name, domain=self.cifs_server.domain_name, white_list_users=['guest']) ssh_calls = [ mock.call(self.cifs_share.cmd_get_access(), True), mock.call(self.cifs_share.cmd_change_access(action='revoke'), True), ] self.assertEqual({'administrator'}, to_remove) context.conn['SSH'].run_ssh.assert_has_calls(ssh_calls) class NFSShareTestCase(StorageObjectTestCaseBase): def setUp(self): super(NFSShareTestCase, self).setUp() self.ssh_hook = utils.SSHSideEffect() def test_create_nfs_share(self): self.ssh_hook.append(self.nfs_share.output_create()) context = self.manager.getStorageContext('NFSShare') context.conn['SSH'].run_ssh = mock.Mock(side_effect=self.ssh_hook) context.create(name=self.nfs_share.share_name, mover_name=self.vdm.vdm_name) ssh_calls = [mock.call(self.nfs_share.cmd_create(), True)] context.conn['SSH'].run_ssh.assert_has_calls(ssh_calls) def test_create_nfs_share_with_error(self): expt_err = processutils.ProcessExecutionError( stdout=self.nfs_share.fake_output) self.ssh_hook.append(ex=expt_err) context = self.manager.getStorageContext('NFSShare') context.conn['SSH'].run_ssh = mock.Mock(side_effect=self.ssh_hook) self.assertRaises(exception.EMCPowerMaxXMLAPIError, context.create, name=self.nfs_share.share_name, mover_name=self.vdm.vdm_name) ssh_calls = [mock.call(self.nfs_share.cmd_create(), True)] context.conn['SSH'].run_ssh.assert_has_calls(ssh_calls) def test_delete_nfs_share(self): self.ssh_hook.append(self.nfs_share.output_get_succeed( rw_hosts=self.nfs_share.rw_hosts, ro_hosts=self.nfs_share.ro_hosts)) self.ssh_hook.append(self.nfs_share.output_delete_succeed()) context = self.manager.getStorageContext('NFSShare') context.conn['SSH'].run_ssh = mock.Mock(side_effect=self.ssh_hook) context.delete(name=self.nfs_share.share_name, mover_name=self.vdm.vdm_name) ssh_calls = [ mock.call(self.nfs_share.cmd_get(), False), mock.call(self.nfs_share.cmd_delete(), True), ] context.conn['SSH'].run_ssh.assert_has_calls(ssh_calls) def test_delete_nfs_share_not_found(self): expt_not_found = processutils.ProcessExecutionError( stdout=self.nfs_share.output_get_but_not_found()) self.ssh_hook.append(ex=expt_not_found) context = self.manager.getStorageContext('NFSShare') context.conn['SSH'].run_ssh = mock.Mock(side_effect=self.ssh_hook) context.delete(name=self.nfs_share.share_name, mover_name=self.vdm.vdm_name) ssh_calls = [mock.call(self.nfs_share.cmd_get(), False)] context.conn['SSH'].run_ssh.assert_has_calls(ssh_calls) @mock.patch('time.sleep') def test_delete_nfs_share_locked(self, sleep_mock): self.ssh_hook.append(self.nfs_share.output_get_succeed( rw_hosts=self.nfs_share.rw_hosts, ro_hosts=self.nfs_share.ro_hosts)) expt_locked = processutils.ProcessExecutionError( stdout=self.nfs_share.output_delete_but_locked()) self.ssh_hook.append(ex=expt_locked) self.ssh_hook.append(self.nfs_share.output_delete_succeed()) context = self.manager.getStorageContext('NFSShare') context.conn['SSH'].run_ssh = mock.Mock(side_effect=self.ssh_hook) context.delete(name=self.nfs_share.share_name, mover_name=self.vdm.vdm_name) ssh_calls = [ mock.call(self.nfs_share.cmd_get(), False), mock.call(self.nfs_share.cmd_delete(), True), mock.call(self.nfs_share.cmd_delete(), True), ] context.conn['SSH'].run_ssh.assert_has_calls(ssh_calls) self.assertTrue(sleep_mock.called) def test_delete_nfs_share_with_error(self): self.ssh_hook.append(self.nfs_share.output_get_succeed( rw_hosts=self.nfs_share.rw_hosts, ro_hosts=self.nfs_share.ro_hosts)) expt_err = processutils.ProcessExecutionError( stdout=self.nfs_share.fake_output) self.ssh_hook.append(ex=expt_err) context = self.manager.getStorageContext('NFSShare') context.conn['SSH'].run_ssh = mock.Mock(side_effect=self.ssh_hook) self.assertRaises(exception.EMCPowerMaxXMLAPIError, context.delete, name=self.nfs_share.share_name, mover_name=self.vdm.vdm_name) ssh_calls = [ mock.call(self.nfs_share.cmd_get(), False), mock.call(self.nfs_share.cmd_delete(), True), ] context.conn['SSH'].run_ssh.assert_has_calls(ssh_calls) def test_get_nfs_share(self): self.ssh_hook.append(self.nfs_share.output_get_succeed( rw_hosts=self.nfs_share.rw_hosts, ro_hosts=self.nfs_share.ro_hosts)) context = self.manager.getStorageContext('NFSShare') context.conn['SSH'].run_ssh = mock.Mock(side_effect=self.ssh_hook) context.get(name=self.nfs_share.share_name, mover_name=self.vdm.vdm_name) # Get NFS share from cache context.get(name=self.nfs_share.share_name, mover_name=self.vdm.vdm_name) ssh_calls = [mock.call(self.nfs_share.cmd_get(), False)] context.conn['SSH'].run_ssh.assert_has_calls(ssh_calls) def test_get_nfs_share_not_found(self): expt_not_found = processutils.ProcessExecutionError( stdout=self.nfs_share.output_get_but_not_found()) self.ssh_hook.append(ex=expt_not_found) self.ssh_hook.append(self.nfs_share.output_get_but_not_found()) context = self.manager.getStorageContext('NFSShare') context.conn['SSH'].run_ssh = mock.Mock(side_effect=self.ssh_hook) context.get(name=self.nfs_share.share_name, mover_name=self.vdm.vdm_name) context.get(name=self.nfs_share.share_name, mover_name=self.vdm.vdm_name) ssh_calls = [ mock.call(self.nfs_share.cmd_get(), False), mock.call(self.nfs_share.cmd_get(), False), ] context.conn['SSH'].run_ssh.assert_has_calls(ssh_calls) def test_get_nfs_share_with_error(self): expt_err = processutils.ProcessExecutionError( stdout=self.nfs_share.fake_output) self.ssh_hook.append(ex=expt_err) context = self.manager.getStorageContext('NFSShare') context.conn['SSH'].run_ssh = mock.Mock(side_effect=self.ssh_hook) self.assertRaises(exception.EMCPowerMaxXMLAPIError, context.get, name=self.nfs_share.share_name, mover_name=self.vdm.vdm_name) ssh_calls = [mock.call(self.nfs_share.cmd_get(), False)] context.conn['SSH'].run_ssh.assert_has_calls(ssh_calls) def test_allow_share_access(self): rw_hosts = copy.deepcopy(self.nfs_share.rw_hosts) rw_hosts.append(self.nfs_share.nfs_host_ip) ro_hosts = copy.deepcopy(self.nfs_share.ro_hosts) ro_hosts.append(self.nfs_share.nfs_host_ip) self.ssh_hook.append(self.nfs_share.output_get_succeed( rw_hosts=self.nfs_share.rw_hosts, ro_hosts=self.nfs_share.ro_hosts)) self.ssh_hook.append(self.nfs_share.output_set_access_success()) self.ssh_hook.append(self.nfs_share.output_get_succeed( rw_hosts=rw_hosts, ro_hosts=self.nfs_share.ro_hosts)) self.ssh_hook.append(self.nfs_share.output_set_access_success()) self.ssh_hook.append(self.nfs_share.output_get_succeed( rw_hosts=self.nfs_share.rw_hosts, ro_hosts=ro_hosts)) self.ssh_hook.append(self.nfs_share.output_set_access_success()) self.ssh_hook.append(self.nfs_share.output_get_succeed( rw_hosts=rw_hosts, ro_hosts=self.nfs_share.ro_hosts)) context = self.manager.getStorageContext('NFSShare') context.conn['SSH'].run_ssh = utils.EMCNFSShareMock( side_effect=self.ssh_hook) context.allow_share_access(share_name=self.nfs_share.share_name, host_ip=self.nfs_share.nfs_host_ip, mover_name=self.vdm.vdm_name, access_level=const.ACCESS_LEVEL_RW) context.allow_share_access(share_name=self.nfs_share.share_name, host_ip=self.nfs_share.nfs_host_ip, mover_name=self.vdm.vdm_name, access_level=const.ACCESS_LEVEL_RO) context.allow_share_access(share_name=self.nfs_share.share_name, host_ip=self.nfs_share.nfs_host_ip, mover_name=self.vdm.vdm_name, access_level=const.ACCESS_LEVEL_RW) context.allow_share_access(share_name=self.nfs_share.share_name, host_ip=self.nfs_share.nfs_host_ip, mover_name=self.vdm.vdm_name, access_level=const.ACCESS_LEVEL_RW) ssh_calls = [ mock.call(self.nfs_share.cmd_get()), mock.call(self.nfs_share.cmd_set_access( rw_hosts=rw_hosts, ro_hosts=self.nfs_share.ro_hosts)), mock.call(self.nfs_share.cmd_get()), mock.call(self.nfs_share.cmd_set_access( rw_hosts=self.nfs_share.rw_hosts, ro_hosts=ro_hosts)), mock.call(self.nfs_share.cmd_get()), mock.call(self.nfs_share.cmd_set_access( rw_hosts=rw_hosts, ro_hosts=self.nfs_share.ro_hosts)), mock.call(self.nfs_share.cmd_get()), ] context.conn['SSH'].run_ssh.assert_has_calls(ssh_calls) def test_allow_share_access_not_found(self): expt_not_found = processutils.ProcessExecutionError( stdout=self.nfs_share.output_get_but_not_found()) self.ssh_hook.append(ex=expt_not_found) context = self.manager.getStorageContext('NFSShare') context.conn['SSH'].run_ssh = utils.EMCNFSShareMock( side_effect=self.ssh_hook) self.assertRaises(exception.EMCPowerMaxXMLAPIError, context.allow_share_access, share_name=self.nfs_share.share_name, host_ip=self.nfs_share.nfs_host_ip, mover_name=self.vdm.vdm_name, access_level=const.ACCESS_LEVEL_RW) ssh_calls = [mock.call(self.nfs_share.cmd_get())] context.conn['SSH'].run_ssh.assert_has_calls(ssh_calls) def test_deny_rw_share_access(self): rw_hosts = copy.deepcopy(self.nfs_share.rw_hosts) rw_hosts.append(self.nfs_share.nfs_host_ip) self.ssh_hook.append(self.nfs_share.output_get_succeed( rw_hosts=rw_hosts, ro_hosts=self.nfs_share.ro_hosts)) self.ssh_hook.append(self.nfs_share.output_set_access_success()) self.ssh_hook.append(self.nfs_share.output_get_succeed( rw_hosts=self.nfs_share.rw_hosts, ro_hosts=self.nfs_share.ro_hosts)) context = self.manager.getStorageContext('NFSShare') context.conn['SSH'].run_ssh = utils.EMCNFSShareMock( side_effect=self.ssh_hook) context.deny_share_access(share_name=self.nfs_share.share_name, host_ip=self.nfs_share.nfs_host_ip, mover_name=self.vdm.vdm_name) ssh_calls = [ mock.call(self.nfs_share.cmd_get()), mock.call(self.nfs_share.cmd_set_access(self.nfs_share.rw_hosts, self.nfs_share.ro_hosts)), mock.call(self.nfs_share.cmd_get()), ] context.conn['SSH'].run_ssh.assert_has_calls(ssh_calls) def test_clear_share_access(self): hosts = ['192.168.1.1', '192.168.1.3'] self.ssh_hook.append(self.nfs_share.output_get_succeed( rw_hosts=self.nfs_share.rw_hosts, ro_hosts=self.nfs_share.ro_hosts)) self.ssh_hook.append(self.nfs_share.output_set_access_success()) self.ssh_hook.append(self.nfs_share.output_get_succeed( rw_hosts=[hosts[0]], ro_hosts=[hosts[1]])) context = self.manager.getStorageContext('NFSShare') context.conn['SSH'].run_ssh = utils.EMCNFSShareMock( side_effect=self.ssh_hook) context.clear_share_access(share_name=self.nfs_share.share_name, mover_name=self.vdm.vdm_name, white_list_hosts=hosts) ssh_calls = [ mock.call(self.nfs_share.cmd_get()), mock.call(self.nfs_share.cmd_set_access( rw_hosts=[hosts[0]], ro_hosts=[hosts[1]])), mock.call(self.nfs_share.cmd_get()), ] context.conn['SSH'].run_ssh.assert_has_calls(ssh_calls) def test_deny_ro_share_access(self): ro_hosts = copy.deepcopy(self.nfs_share.ro_hosts) ro_hosts.append(self.nfs_share.nfs_host_ip) self.ssh_hook.append(self.nfs_share.output_get_succeed( rw_hosts=self.nfs_share.rw_hosts, ro_hosts=ro_hosts)) self.ssh_hook.append(self.nfs_share.output_set_access_success()) self.ssh_hook.append(self.nfs_share.output_get_succeed( rw_hosts=self.nfs_share.rw_hosts, ro_hosts=self.nfs_share.ro_hosts)) context = self.manager.getStorageContext('NFSShare') context.conn['SSH'].run_ssh = utils.EMCNFSShareMock( side_effect=self.ssh_hook) context.deny_share_access(share_name=self.nfs_share.share_name, host_ip=self.nfs_share.nfs_host_ip, mover_name=self.vdm.vdm_name) context.deny_share_access(share_name=self.nfs_share.share_name, host_ip=self.nfs_share.nfs_host_ip, mover_name=self.vdm.vdm_name) ssh_calls = [ mock.call(self.nfs_share.cmd_get()), mock.call(self.nfs_share.cmd_set_access(self.nfs_share.rw_hosts, self.nfs_share.ro_hosts)), mock.call(self.nfs_share.cmd_get()), ] context.conn['SSH'].run_ssh.assert_has_calls(ssh_calls) def test_deny_share_not_found(self): expt_not_found = processutils.ProcessExecutionError( stdout=self.nfs_share.output_get_but_not_found()) self.ssh_hook.append(ex=expt_not_found) context = self.manager.getStorageContext('NFSShare') context.conn['SSH'].run_ssh = utils.EMCNFSShareMock( side_effect=self.ssh_hook) self.assertRaises(exception.EMCPowerMaxXMLAPIError, context.deny_share_access, share_name=self.nfs_share.share_name, host_ip=self.nfs_share.nfs_host_ip, mover_name=self.vdm.vdm_name) ssh_calls = [mock.call(self.nfs_share.cmd_get())] context.conn['SSH'].run_ssh.assert_has_calls(ssh_calls) def test_deny_rw_share_with_error(self): rw_hosts = copy.deepcopy(self.nfs_share.rw_hosts) rw_hosts.append(self.nfs_share.nfs_host_ip) self.ssh_hook.append(self.nfs_share.output_get_succeed( rw_hosts=rw_hosts, ro_hosts=self.nfs_share.ro_hosts)) expt_not_found = processutils.ProcessExecutionError( stdout=self.nfs_share.output_get_but_not_found()) self.ssh_hook.append(ex=expt_not_found) context = self.manager.getStorageContext('NFSShare') context.conn['SSH'].run_ssh = utils.EMCNFSShareMock( side_effect=self.ssh_hook) self.assertRaises(exception.EMCPowerMaxXMLAPIError, context.deny_share_access, share_name=self.nfs_share.share_name, host_ip=self.nfs_share.nfs_host_ip, mover_name=self.vdm.vdm_name) ssh_calls = [ mock.call(self.nfs_share.cmd_get()), mock.call(self.nfs_share.cmd_set_access(self.nfs_share.rw_hosts, self.nfs_share.ro_hosts)), ] context.conn['SSH'].run_ssh.assert_has_calls(ssh_calls) def test_clear_share_access_failed_to_get_share(self): self.ssh_hook.append("no output.") context = self.manager.getStorageContext('NFSShare') context.conn['SSH'].run_ssh = mock.Mock(side_effect=self.ssh_hook) self.assertRaises(exception.EMCPowerMaxXMLAPIError, context.clear_share_access, share_name=self.nfs_share.share_name, mover_name=self.vdm.vdm_name, white_list_hosts=None) context.conn['SSH'].run_ssh.assert_called_once_with( self.nfs_share.cmd_get(), False) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315602.0056705 manila-21.0.0/manila/tests/share/drivers/dell_emc/plugins/powerscale/0000775000175000017500000000000000000000000025632 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/dell_emc/plugins/powerscale/__init__.py0000664000175000017500000000000000000000000027731 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/dell_emc/plugins/powerscale/test_powerscale.py0000664000175000017500000012235100000000000031413 0ustar00zuulzuul00000000000000# Copyright (c) 2015 EMC Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import ddt from oslo_log import log from oslo_utils import units from manila.common import constants as const from manila import exception from manila.i18n import _ from manila.share.drivers.dell_emc.plugins.powerscale import powerscale from manila import test LOG = log.getLogger(__name__) @ddt.ddt class PowerScaleTest(test.TestCase): """Integration test for the PowerScale Manila driver.""" POWERSCALE_ADDR = '10.0.0.1' API_URL = 'https://%s:8080' % POWERSCALE_ADDR AUTH = ('admin', 'admin') ROOT_DIR = '/ifs/manila-test' SHARE_NAME = 'share-foo' SHARE_DIR = ROOT_DIR + '/' + SHARE_NAME ADMIN_HOME_DIR = '/ifs/home/admin' CLONE_DIR = ROOT_DIR + '/clone-dir' class MockConfig(object): def safe_get(self, value): if value == 'emc_nas_server': return '10.0.0.1' elif value == 'emc_nas_server_port': return '8080' elif value == 'emc_nas_login': return 'admin' elif value == 'emc_nas_password': return 'a' elif value == 'emc_nas_root_dir': return '/ifs/manila-test' elif value == 'powerscale_dir_permission': return '0777' else: return None class MockInvalidConfig(object): def safe_get(self, value): if value == 'emc_nas_server': return '10.0.0.1' elif value == 'emc_nas_server_port': return '8080' elif value == 'emc_nas_login': return 'admin' elif value == 'emc_nas_root_dir': return '/ifs/manila-test' else: return None @mock.patch( 'manila.share.drivers.dell_emc.plugins.powerscale.powerscale.' 'powerscale_api.PowerScaleApi', autospec=True) def setUp(self, mock_isi_api): super(PowerScaleTest, self).setUp() self._mock_powerscale_api = mock_isi_api.return_value self.storage_connection = powerscale.PowerScaleStorageConnection(LOG) self.mock_context = mock.Mock('Context') self.mock_emc_driver = mock.Mock('EmcDriver') self.mock_emc_driver.attach_mock(self.MockConfig(), 'configuration') self.storage_connection.connect( self.mock_emc_driver, self.mock_context) def test_allow_access(self): self.assertRaises( NotImplementedError, self.storage_connection.allow_access, self.mock_context, share=None, access=None, share_server=None, ) def test_deny_access(self): self.assertRaises( NotImplementedError, self.storage_connection.deny_access, self.mock_context, share=None, access=None, share_server=None, ) def test_create_share_nfs(self): share_path = self.SHARE_DIR self.assertFalse(self._mock_powerscale_api.create_directory.called) self.assertFalse(self._mock_powerscale_api.create_nfs_export.called) # create the share share = {"name": self.SHARE_NAME, "share_proto": 'NFS', "size": 8} location = self.storage_connection.create_share(self.mock_context, share, None) # verify location and API call made path = '%s:%s' % (self.POWERSCALE_ADDR, self.SHARE_DIR) expected_location = [{'is_admin_only': False, 'metadata': {"preferred": True}, 'path': path}] self.assertEqual(expected_location, location) self._mock_powerscale_api.create_directory.assert_called_with( share_path, False) self._mock_powerscale_api.create_nfs_export.assert_called_with( share_path) # verify directory quota call made self._mock_powerscale_api.quota_create.assert_called_with( share_path, 'directory', 8 * units.Gi) def test_create_share_cifs(self): self.assertFalse(self._mock_powerscale_api.create_directory.called) self.assertFalse(self._mock_powerscale_api.create_smb_share.called) # create the share share = {"name": self.SHARE_NAME, "share_proto": 'CIFS', "size": 8} location = self.storage_connection.create_share(self.mock_context, share, None) path = '\\\\{0}\\{1}'.format(self.POWERSCALE_ADDR, self.SHARE_NAME) expected_location = [{'is_admin_only': False, 'metadata': {"preferred": True}, 'path': path}] self.assertEqual(expected_location, location) self._mock_powerscale_api.create_directory.assert_called_once_with( self.SHARE_DIR, False) self._mock_powerscale_api.create_smb_share.assert_called_once_with( self.SHARE_NAME, self.SHARE_DIR) # verify directory quota call made self._mock_powerscale_api.quota_create.assert_called_with( self.SHARE_DIR, 'directory', 8 * units.Gi) def test_create_share_invalid_share_protocol(self): share = {"name": self.SHARE_NAME, "share_proto": 'FOO_PROTOCOL'} self.assertRaises( exception.InvalidShare, self.storage_connection.create_share, self.mock_context, share, share_server=None) def test_create_share_nfs_backend_failure(self): share = {"name": self.SHARE_NAME, "share_proto": 'NFS'} self._mock_powerscale_api.create_nfs_export.return_value = False self.assertRaises( exception.ShareBackendException, self.storage_connection.create_share, self.mock_context, share, share_server=None) def test_create_share_cifs_backend_failure(self): share = {"name": self.SHARE_NAME, "share_proto": 'CIFS'} self._mock_powerscale_api.create_smb_share.return_value = False self.assertRaises( exception.ShareBackendException, self.storage_connection.create_share, self.mock_context, share, share_server=None) def test_create_directory_backend_failure(self): share = {"name": self.SHARE_NAME, "share_proto": 'NFS'} self._mock_powerscale_api.create_directory.return_value = False self.assertRaises( exception.ShareBackendException, self.storage_connection.create_share, self.mock_context, share, share_server=None) def test_create_snapshot(self): # create snapshot snapshot_name = "snapshot01" snapshot_path = '/ifs/home/admin' snapshot = {'name': snapshot_name, 'share_name': snapshot_path} self.storage_connection.create_snapshot(self.mock_context, snapshot, None) # verify the create snapshot API call is executed self._mock_powerscale_api.create_snapshot.assert_called_with( snapshot_name, snapshot_path) def test_create_snapshot_backend_failure(self): snapshot_name = "snapshot01" snapshot_path = '/ifs/home/admin' snapshot = {'name': snapshot_name, 'share_name': snapshot_path} self._mock_powerscale_api.create_snapshot.return_value = False self.assertRaises( exception.ShareBackendException, self.storage_connection.create_snapshot, self.mock_context, snapshot, None) def test_create_share_from_snapshot_nfs(self): # assertions self.assertFalse(self._mock_powerscale_api.create_nfs_export.called) self.assertFalse(self._mock_powerscale_api.clone_snapshot.called) snapshot_name = "snapshot01" snapshot_path = '/ifs/home/admin' # execute method under test snapshot = {'name': snapshot_name, 'share_name': snapshot_path} share = {"name": self.SHARE_NAME, "share_proto": 'NFS', 'size': 5} location = self.storage_connection.create_share_from_snapshot( self.mock_context, share, snapshot, None) # verify NFS export created at expected location self._mock_powerscale_api.create_nfs_export.assert_called_with( self.SHARE_DIR) # verify clone_directory(container_path) method called self._mock_powerscale_api.clone_snapshot.assert_called_once_with( snapshot_name, self.SHARE_DIR) path = '{0}:{1}'.format( self.POWERSCALE_ADDR, self.SHARE_DIR) expected_location = {'is_admin_only': False, 'metadata': {"preferred": True}, 'path': path} self.assertEqual(expected_location, location[0]) # verify directory quota call made self._mock_powerscale_api.quota_create.assert_called_with( self.SHARE_DIR, 'directory', 5 * units.Gi) def test_create_share_from_snapshot_cifs(self): # assertions self.assertFalse(self._mock_powerscale_api.create_smb_share.called) self.assertFalse(self._mock_powerscale_api.clone_snapshot.called) # setup snapshot_name = "snapshot01" snapshot_path = '/ifs/home/admin' new_share_name = 'clone-dir' # execute method under test snapshot = {'name': snapshot_name, 'share_name': snapshot_path} share = {"name": new_share_name, "share_proto": 'CIFS', "size": 2} location = self.storage_connection.create_share_from_snapshot( self.mock_context, share, snapshot, None) # verify call made to create new CIFS share self._mock_powerscale_api.create_smb_share.assert_called_once_with( new_share_name, self.CLONE_DIR) self._mock_powerscale_api.clone_snapshot.assert_called_once_with( snapshot_name, self.CLONE_DIR) path = '\\\\{0}\\{1}'.format(self.POWERSCALE_ADDR, new_share_name) expected_location = {'is_admin_only': False, 'metadata': {"preferred": True}, 'path': path} self.assertEqual(expected_location, location[0]) # verify directory quota call made expected_share_path = '{0}/{1}'.format(self.ROOT_DIR, new_share_name) self._mock_powerscale_api.quota_create.assert_called_with( expected_share_path, 'directory', 2 * units.Gi) def test_delete_share_nfs(self): share = {"name": self.SHARE_NAME, "share_proto": 'NFS'} fake_share_num = 42 self._mock_powerscale_api.lookup_nfs_export.return_value = ( fake_share_num) self.assertFalse(self._mock_powerscale_api.delete_nfs_share.called) # delete the share self.storage_connection.delete_share(self.mock_context, share, None) # verify share delete self._mock_powerscale_api.delete_nfs_share.assert_called_with( fake_share_num) def test_delete_share_cifs(self): self.assertFalse(self._mock_powerscale_api.delete_smb_share.called) # delete the share share = {"name": self.SHARE_NAME, "share_proto": 'CIFS'} self.storage_connection.delete_share(self.mock_context, share, None) # verify share deleted self._mock_powerscale_api.delete_smb_share.assert_called_with( self.SHARE_NAME) @mock.patch( 'manila.share.drivers.dell_emc.plugins.powerscale.powerscale.LOG') def test_delete_share_invalid_share_proto(self, mock_log): share = {"name": self.SHARE_NAME, "share_proto": 'FOO_PROTOCOL'} self.storage_connection.delete_share(self.mock_context, share, None) mock_log.warning.assert_called_once_with( 'Unsupported share type: FOO_PROTOCOL.') def test_delete_nfs_share_backend_failure(self): share = {"name": self.SHARE_NAME, "share_proto": 'NFS'} self._mock_powerscale_api.delete_nfs_share.return_value = False self.assertRaises( exception.ShareBackendException, self.storage_connection.delete_share, self.mock_context, share, None ) def test_delete_nfs_share_share_does_not_exist(self): self._mock_powerscale_api.lookup_nfs_export.return_value = None share = {"name": self.SHARE_NAME, "share_proto": 'NFS'} # verify the calling delete on a non-existent share returns and does # not throw exception self.storage_connection.delete_share(self.mock_context, share, None) def test_delete_cifs_share_backend_failure(self): share = {"name": self.SHARE_NAME, "share_proto": 'CIFS'} self._mock_powerscale_api.delete_smb_share.return_value = False self.assertRaises( exception.ShareBackendException, self.storage_connection.delete_share, self.mock_context, share, None ) def test_delete_cifs_share_share_does_not_exist(self): share = {"name": self.SHARE_NAME, "share_proto": 'CIFS'} self._mock_powerscale_api.lookup_smb_share.return_value = None # verify the calling delete on a non-existent share returns and does # not throw exception self.storage_connection.delete_share(self.mock_context, share, None) @mock.patch( 'manila.share.drivers.dell_emc.plugins.powerscale.powerscale.LOG' ) def test_delete_quota_success(self, mock_log): path = '/path/to/quota' quota_id = '123' quota_data = {'id': quota_id} self._mock_powerscale_api.quota_get.return_value = quota_data self._mock_powerscale_api.delete_quota.return_value = True self.storage_connection._delete_quota(path) self._mock_powerscale_api.quota_get.assert_called_once_with( path, 'directory') self._mock_powerscale_api.delete_quota.assert_called_once_with( quota_id) mock_log.debug.assert_called_once_with(f'Removing quota {quota_id}') mock_log.warning.assert_not_called() @mock.patch( 'manila.share.drivers.dell_emc.plugins.powerscale.powerscale.LOG' ) def test_delete_quota_failure(self, mock_log): path = '/path/to/quota' quota_id = '123' quota_data = {'id': quota_id} self._mock_powerscale_api.quota_get.return_value = quota_data self._mock_powerscale_api.delete_quota.return_value = False self.storage_connection._delete_quota(path) self._mock_powerscale_api.quota_get.assert_called_once_with( path, 'directory') self._mock_powerscale_api.delete_quota.assert_called_once_with( quota_id) mock_log.debug.assert_called_once_with(f'Removing quota {quota_id}') mock_log.error.assert_called_once_with( _('Failed to delete quota "%(quota_id)s" for ' 'directory "%(dir)s".') % {'quota_id': quota_id, 'dir': path}) mock_log.warning.assert_not_called() @mock.patch( 'manila.share.drivers.dell_emc.plugins.powerscale.powerscale.LOG' ) def test_delete_quota_not_found(self, mock_log): path = '/path/to/quota' self._mock_powerscale_api.quota_get.return_value = None self.storage_connection._delete_quota(path) self._mock_powerscale_api.quota_get.assert_called_once_with( path, 'directory') self._mock_powerscale_api.delete_quota.assert_not_called() mock_log.debug.assert_not_called() mock_log.warning.assert_called_once_with(f'Quota not found for {path}') @mock.patch( 'manila.share.drivers.dell_emc.plugins.powerscale.powerscale.LOG' ) def test_delete_directory_success(self, mock_log): path = '/path/to/directory' self._mock_powerscale_api.is_path_existent.return_value = True self._mock_powerscale_api.delete_path.return_value = True self.storage_connection._delete_directory(path) self._mock_powerscale_api.delete_path.assert_called_once_with( path, recursive=True) mock_log.debug.assert_called_once_with(f'Removing directory {path}') mock_log.warning.assert_not_called() @mock.patch( 'manila.share.drivers.dell_emc.plugins.powerscale.powerscale.LOG' ) def test_delete_directory_failure(self, mock_log): path = '/path/to/directory' self._mock_powerscale_api.is_path_existent.return_value = True self._mock_powerscale_api.delete_path.return_value = False self.storage_connection._delete_directory(path) self._mock_powerscale_api.delete_path.assert_called_once_with( path, recursive=True) mock_log.debug.assert_called_once_with(f'Removing directory {path}') mock_log.error.assert_called_once_with( _('Failed to delete directory "%(dir)s".') % {'dir': path}) mock_log.warning.assert_not_called() @mock.patch( 'manila.share.drivers.dell_emc.plugins.powerscale.powerscale.LOG' ) def test_delete_directory_not_found(self, mock_log): path = '/path/to/directory' self._mock_powerscale_api.is_path_existent.return_value = False self.storage_connection._delete_directory(path) self._mock_powerscale_api.delete_path.assert_not_called() mock_log.warning.assert_called_once_with( _('Directory not found for %s') % path) def test_delete_snapshot(self): # create a snapshot snapshot_name = "snapshot01" snapshot_path = '/ifs/home/admin' snapshot = {'name': snapshot_name, 'share_name': snapshot_path} self.assertFalse(self._mock_powerscale_api.delete_snapshot.called) # delete the created snapshot self.storage_connection.delete_snapshot(self.mock_context, snapshot, None) # verify the API call was made to delete the snapshot self._mock_powerscale_api.delete_snapshot.assert_called_once_with( snapshot_name) def test_delete_snapshot_failure(self): snapshot = {'name': 'test_snapshot'} self._mock_powerscale_api.delete_snapshot.return_value = False self.assertRaises( exception.ShareBackendException, self.storage_connection.delete_snapshot, self.mock_context, snapshot, None) self._mock_powerscale_api.delete_snapshot.assert_called_once_with( snapshot['name']) def test_ensure_share(self): share = {"name": self.SHARE_NAME, "share_proto": 'CIFS'} self.assertRaises(NotImplementedError, self.storage_connection.ensure_share, self.mock_context, share, None) @mock.patch( 'manila.share.drivers.dell_emc.plugins.powerscale.powerscale.' 'powerscale_api.PowerScaleApi', autospec=True) def test_connect(self, mock_isi_api): storage_connection = powerscale.PowerScaleStorageConnection(LOG) # execute method under test storage_connection.connect( self.mock_emc_driver, self.mock_context) # verify connect sets driver params appropriately mock_config = self.MockConfig() server_addr = mock_config.safe_get('emc_nas_server') self.assertEqual(server_addr, storage_connection._server) expected_port = mock_config.safe_get('emc_nas_server_port') self.assertEqual(expected_port, storage_connection._port) self.assertEqual('https://{0}:{1}'.format(server_addr, expected_port), storage_connection._server_url) expected_username = mock_config.safe_get('emc_nas_login') self.assertEqual(expected_username, storage_connection._username) expected_password = mock_config.safe_get('emc_nas_password') self.assertEqual(expected_password, storage_connection._password) self.assertFalse(storage_connection._verify_ssl_cert) expected_dir_permission = mock_config.safe_get( 'powerscale_dir_permission') self.assertEqual(expected_dir_permission, storage_connection._dir_permission) @mock.patch( 'manila.share.drivers.dell_emc.plugins.powerscale.powerscale.' 'powerscale_api.PowerScaleApi', autospec=True) def test_connect_root_dir_does_not_exist(self, mock_isi_api): mock_powerscale_api = mock_isi_api.return_value mock_powerscale_api.is_path_existent.return_value = False storage_connection = powerscale.PowerScaleStorageConnection(LOG) # call method under test storage_connection.connect(self.mock_emc_driver, self.mock_context) mock_powerscale_api.create_directory.assert_called_once_with( self.ROOT_DIR, recursive=True) def test_connect_invalid_config(self): mock_emc_driver = mock.Mock('EmcDriver') mock_emc_driver.attach_mock(self.MockInvalidConfig(), 'configuration') self.assertRaises(exception.BadConfigurationException, self.storage_connection.connect, mock_emc_driver, self.mock_context ) def test_update_share_stats(self): self._mock_powerscale_api.get_space_stats.return_value = { 'total': 1000 * units.Gi, 'free': 100 * units.Gi, } self._mock_powerscale_api.get_allocated_space.return_value = 2110.0 stats_dict = {'share_backend_name': 'PowerScale_backend'} self.storage_connection.update_share_stats(stats_dict) expected_pool_stats = { 'pool_name': 'PowerScale_backend', 'reserved_percentage': 0, 'reserved_snapshot_percentage': 0, 'reserved_share_extend_percentage': 0, 'max_over_subscription_ratio': None, 'thin_provisioning': True, 'total_capacity_gb': 1000, 'free_capacity_gb': 100, 'allocated_capacity_gb': 2110.0, 'qos': False, } expected_stats = { 'share_backend_name': 'PowerScale_backend', 'driver_version': powerscale.VERSION, 'storage_protocol': 'NFS_CIFS', 'pools': [expected_pool_stats] } self.assertEqual(expected_stats, stats_dict) def test_get_network_allocations_number(self): # call method under test num = self.storage_connection.get_network_allocations_number() self.assertEqual(0, num) def test_extend_share(self): quota_id = 'abcdef' new_share_size = 8 share = { "name": self.SHARE_NAME, "share_proto": 'NFS', "size": new_share_size } self._mock_powerscale_api.quota_get.return_value = {'id': quota_id} self.assertFalse(self._mock_powerscale_api.quota_set.called) self.storage_connection.extend_share(share, new_share_size) share_path = '{0}/{1}'.format(self.ROOT_DIR, self.SHARE_NAME) expected_quota_size = new_share_size * units.Gi self._mock_powerscale_api.quota_set.assert_called_once_with( share_path, 'directory', expected_quota_size) def test_update_access_add_nfs(self): share = { "name": self.SHARE_NAME, "share_proto": 'NFS', } fake_export_id = 4 self._mock_powerscale_api.lookup_nfs_export.return_value = ( fake_export_id) self._mock_powerscale_api.get_nfs_export.return_value = { 'clients': [], 'read_only_clients': [] } nfs_access = { 'access_type': 'ip', 'access_to': '10.1.1.10', 'access_level': const.ACCESS_LEVEL_RW, 'access_id': '09960614-8574-4e03-89cf-7cf267b0bd08' } access_rules = [nfs_access] self._mock_powerscale_api.modify_nfs_export_access.return_value = True rule_map = self.storage_connection.update_access( self.mock_context, share, access_rules, [], [], share_server=None) expected_rule_map = { '09960614-8574-4e03-89cf-7cf267b0bd08': { 'state': 'active' } } self._mock_powerscale_api.modify_nfs_export_access. \ assert_called_once_with(fake_export_id, [], ['10.1.1.10']) self.assertEqual(expected_rule_map, rule_map) def test_update_access_add_cifs(self): share = { "name": self.SHARE_NAME, "share_proto": 'CIFS', } access = { 'access_type': 'user', 'access_to': 'foo', 'access_level': const.ACCESS_LEVEL_RW, 'access_id': '09960614-8574-4e03-89cf-7cf267b0bd08' } access_rules = [access] self._mock_powerscale_api.get_user_sid.return_value = { 'id': 'SID:S-1-5-22', 'name': 'foo', 'type': 'user', } rule_map = self.storage_connection.update_access( self.mock_context, share, access_rules, [], []) expected_permissions = [ { "permission": "change", "permission_type": "allow", "trustee": { "id": "SID:S-1-5-22", "name": "foo", "type": "user" } } ] self._mock_powerscale_api.modify_smb_share_access.\ assert_called_once_with( self.SHARE_NAME, host_acl=[], permissions=expected_permissions) expected_rule_map = { '09960614-8574-4e03-89cf-7cf267b0bd08': { 'state': 'active' } } self.assertEqual(expected_rule_map, rule_map) def test_update_access_delete_nfs(self): share = { "name": self.SHARE_NAME, "share_proto": 'NFS', } fake_export_id = 4 self._mock_powerscale_api.lookup_nfs_export.return_value = ( fake_export_id) # simulate an IP added to the whitelist ip_addr = '10.0.0.4' ip_addr_ro = '10.0.0.50' self._mock_powerscale_api.get_nfs_export.return_value = { 'clients': [ip_addr], 'read_only_clients': [ip_addr_ro]} access_rules = [] self._mock_powerscale_api.modify_nfs_export_access.return_value = True rule_map = self.storage_connection.update_access( self.mock_context, share, access_rules, [], []) self._mock_powerscale_api.modify_nfs_export_access. \ assert_called_once_with(fake_export_id, [], []) self.assertEqual({}, rule_map) def test_update_access_delete_cifs(self): share = { "name": self.SHARE_NAME, "share_proto": 'CIFS', } access_rules = [] self._mock_powerscale_api.lookup_smb_share.return_value = { 'permissions': [ { 'permission': 'change', 'permission_type': 'allow', 'trustee': { 'id': 'SID:S-1-5-21', 'name': 'newuser', 'type': 'user', } } ] } self._mock_powerscale_api.modify_smb_share_access.return_value = None rule_map = self.storage_connection.update_access( self.mock_context, share, access_rules, [], []) self._mock_powerscale_api.modify_smb_share_access.\ assert_called_once_with( self.SHARE_NAME, host_acl=[], permissions=[]) self.assertEqual({}, rule_map) def test_update_access_nfs_share_not_found(self): share = { "name": self.SHARE_NAME, "share_proto": 'NFS', } access = { 'access_type': 'user', 'access_to': 'foouser', 'access_level': const.ACCESS_LEVEL_RW, 'access_id': '09960614-8574-4e03-89cf-7cf267b0bd08' } access_rules = [access] self._mock_powerscale_api.lookup_nfs_export.return_value = None rule_map = self.storage_connection.update_access( self.mock_context, share, access_rules, [], []) expected_rule_map = { '09960614-8574-4e03-89cf-7cf267b0bd08': { 'state': 'error' } } self.assertEqual(expected_rule_map, rule_map) def test_update_access_nfs_http_error_on_clear_rules(self): share = { "name": self.SHARE_NAME, "share_proto": 'NFS', } access = { 'access_type': 'user', 'access_to': 'foouser', 'access_level': const.ACCESS_LEVEL_RW, 'access_id': '09960614-8574-4e03-89cf-7cf267b0bd08' } access_rules = [access] self._mock_powerscale_api.modify_nfs_export_access.return_value = False rule_map = self.storage_connection.update_access( self.mock_context, share, access_rules, [], []) expected_rule_map = { '09960614-8574-4e03-89cf-7cf267b0bd08': { 'state': 'error' } } self.assertEqual(expected_rule_map, rule_map) def test_update_access_cifs_http_error_on_clear_rules(self): share = { "name": self.SHARE_NAME, "share_proto": 'CIFS', } access = { 'access_type': 'user', 'access_to': 'foo', 'access_level': const.ACCESS_LEVEL_RW, 'access_id': '09960614-8574-4e03-89cf-7cf267b0bd08' } access_rules = [access] self._mock_powerscale_api.modify_smb_share_access.return_value = False rule_map = self.storage_connection.update_access( self.mock_context, share, access_rules, None, None) expected_rule_map = { '09960614-8574-4e03-89cf-7cf267b0bd08': { 'state': 'error' } } self.assertEqual(expected_rule_map, rule_map) def test_update_access_cifs_invalid_user_access_level(self): share = { "name": self.SHARE_NAME, "share_proto": 'CIFS', } access = { 'access_type': 'user', 'access_to': 'foo', 'access_level': 'fake', 'access_id': '09960614-8574-4e03-89cf-7cf267b0bd08' } access_rules = [access] self._mock_powerscale_api.modify_smb_share_access.return_value = True rule_map = self.storage_connection.update_access( self.mock_context, share, access_rules, [], []) expected_rule_map = { '09960614-8574-4e03-89cf-7cf267b0bd08': { 'state': 'error' } } self.assertEqual(expected_rule_map, rule_map) def test_update_access_cifs_user_not_found(self): share = { "name": self.SHARE_NAME, "share_proto": 'CIFS', } access = { 'access_type': 'user', 'access_to': 'foo', 'access_level': const.ACCESS_LEVEL_RW, 'access_id': '09960614-8574-4e03-89cf-7cf267b0bd08' } access_rules = [access] self._mock_powerscale_api.get_user_sid.return_value = None self._mock_powerscale_api.modify_smb_share_access.return_value = True rule_map = self.storage_connection.update_access( self.mock_context, share, access_rules, [], []) expected_rule_map = { '09960614-8574-4e03-89cf-7cf267b0bd08': { 'state': 'error' } } self.assertEqual(expected_rule_map, rule_map) def test_update_access_cifs_invalid_access_type(self): share = { "name": self.SHARE_NAME, "share_proto": 'CIFS', } access = { 'access_type': 'foo', 'access_to': 'foo', 'access_level': const.ACCESS_LEVEL_RW, 'access_id': '09960614-8574-4e03-89cf-7cf267b0bd08' } access_rules = [access] rule_map = self.storage_connection.update_access( self.mock_context, share, access_rules, [], []) expected_rule_map = { '09960614-8574-4e03-89cf-7cf267b0bd08': { 'state': 'error' } } self.assertEqual(expected_rule_map, rule_map) def test_update_access_recover_nfs(self): # verify that new ips are added and ips not in rules are removed share = { "name": self.SHARE_NAME, "share_proto": 'NFS', } fake_export_id = 4 self._mock_powerscale_api.lookup_nfs_export.return_value = ( fake_export_id) self._mock_powerscale_api.get_nfs_export.return_value = { 'clients': ['10.1.1.8'], 'read_only_clients': ['10.2.0.2'] } nfs_access_1 = { 'access_type': 'ip', 'access_to': '10.1.1.10', 'access_level': const.ACCESS_LEVEL_RW, 'access_id': '09960614-8574-4e03-89cf-7cf267b0bd08' } nfs_access_2 = { 'access_type': 'ip', 'access_to': '10.1.1.2', 'access_level': const.ACCESS_LEVEL_RO, 'access_id': '19960614-8574-4e03-89cf-7cf267b0bd08' } access_rules = [nfs_access_1, nfs_access_2] self._mock_powerscale_api.modify_nfs_export_access.return_value = True rule_map = self.storage_connection.update_access( self.mock_context, share, access_rules, [], []) expected_rule_map = { '09960614-8574-4e03-89cf-7cf267b0bd08': { 'state': 'active' }, '19960614-8574-4e03-89cf-7cf267b0bd08': { 'state': 'active' } } self._mock_powerscale_api.modify_nfs_export_access. \ assert_called_once_with(fake_export_id, ['10.1.1.2'], ['10.1.1.10']) self.assertEqual(expected_rule_map, rule_map) def test_update_access_recover_cifs(self): share = { "name": self.SHARE_NAME, "share_proto": 'CIFS', } self._mock_powerscale_api.get_user_sid.return_value = { 'id': 'SID:S-1-5-22', 'name': 'testuser', 'type': 'user', } self._mock_powerscale_api.modify_smb_share_access.return_value = True access_1 = { 'access_type': 'ip', 'access_to': '10.1.1.10', 'access_level': const.ACCESS_LEVEL_RW, 'access_id': '09960614-8574-4e03-89cf-7cf267b0bd08' } access_2 = { 'access_type': 'user', 'access_to': 'testuser', 'access_level': const.ACCESS_LEVEL_RO, 'access_id': '19960614-8574-4e03-89cf-7cf267b0bd08' } access_rules = [access_1, access_2] rule_map = self.storage_connection.update_access( self.mock_context, share, access_rules, [], []) expected_data = { 'host_acl': ['allow:10.1.1.10'], 'permissions': [ { 'permission': 'read', 'permission_type': 'allow', 'trustee': { 'id': 'SID:S-1-5-22', 'name': 'testuser', 'type': 'user', } } ] } expected_rule_map = { '09960614-8574-4e03-89cf-7cf267b0bd08': { 'state': 'active' }, '19960614-8574-4e03-89cf-7cf267b0bd08': { 'state': 'active' } } self._mock_powerscale_api.lookup_smb_share.assert_not_called() self._mock_powerscale_api.get_user_sid.assert_called_once_with( 'testuser') self._mock_powerscale_api.modify_smb_share_access.\ assert_called_once_with( self.SHARE_NAME, host_acl=expected_data['host_acl'], permissions=expected_data['permissions'] ) self.assertEqual(expected_rule_map, rule_map) def test_update_access_with_cifs_ip_readonly(self): # Note: Driver does not currently support readonly access for "ip" type share = {'name': self.SHARE_NAME, 'share_proto': 'CIFS'} access = {'access_type': 'ip', 'access_to': '10.1.1.10', 'access_level': const.ACCESS_LEVEL_RO, 'access_id': '09960614-8574-4e03-89cf-7cf267b0bd08'} rule_map = self.storage_connection.update_access( self.mock_context, share, [access], None, None) expected_rule_map = { '09960614-8574-4e03-89cf-7cf267b0bd08': {'state': 'error'}} self.assertEqual(expected_rule_map, rule_map) def test_delete_quota_when_quota_exists(self): path = '/path/to/quota' quota_id = '123' quota_data = {'id': quota_id} self._mock_powerscale_api.quota_get.return_value = quota_data self._mock_powerscale_api.delete_quota.return_value = True self.storage_connection._delete_quota(path) self._mock_powerscale_api.quota_get.assert_called_once_with( path, 'directory') self._mock_powerscale_api.delete_quota.assert_called_once_with( quota_id) def test_delete_quota_when_quota_does_not_exist(self): path = '/path/to/quota' self._mock_powerscale_api.quota_get.return_value = None self.storage_connection._delete_quota(path) self._mock_powerscale_api.quota_get.assert_called_once_with( path, 'directory') self._mock_powerscale_api.delete_quota.assert_not_called() def test_delete_directory_when_path_exists(self): path = '/path/to/directory' self.storage_connection._delete_directory(path) self._mock_powerscale_api.is_path_existent.assert_called_with(path) self._mock_powerscale_api.delete_path.assert_called_with( path, recursive=True) def test_delete_directory_when_path_does_not_exist(self): path = '/path/to/directory' self._mock_powerscale_api.is_path_existent.return_value = False self.storage_connection._delete_directory(path) self._mock_powerscale_api.is_path_existent.assert_called_with(path) self._mock_powerscale_api.delete_path.assert_not_called() def test_get_backend_info(self): self._mock_powerscale_api.get_cluster_version.return_value = '1.0' result = self.storage_connection.get_backend_info(None) expected_info = { 'driver_version': powerscale.VERSION, 'cluster_version': '1.0', 'rest_server': self.POWERSCALE_ADDR, 'rest_port': '8080', } self.assertEqual(expected_info, result) def test_ensure_shares_nfs_share_exists(self): share = { 'id': '123', 'share_proto': 'NFS', 'name': 'my_share', } container_path = '/ifs/my_share' location = '10.0.0.1:/ifs/my_share' self.storage_connection._get_container_path = mock.MagicMock( return_value=container_path) self._mock_powerscale_api.lookup_nfs_export.return_value = '123' result = self.storage_connection.ensure_shares(None, [share]) expected_result = { '123': { 'export_locations': [location], 'status': 'available', 'reapply_access_rules': True, } } self.assertEqual(result, expected_result) def test_ensure_shares_cifs_share_exists(self): share = { 'id': '123', 'share_proto': 'CIFS', 'name': 'my_share', } location = '\\\\10.0.0.1\\my_share' self._mock_powerscale_api.lookup_smb_share.return_value = share result = self.storage_connection.ensure_shares(None, [share]) expected_result = { '123': { 'export_locations': [location], 'status': 'available', 'reapply_access_rules': True, } } self.assertEqual(result, expected_result) def test_ensure_shares_nfs_share_does_not_exist(self): share = { 'id': '123', 'share_proto': 'NFS', 'name': 'my_share', } self._mock_powerscale_api.lookup_nfs_export.return_value = None result = self.storage_connection.ensure_shares(None, [share]) expected_result = { '123': { 'export_locations': [], 'status': 'error', 'reapply_access_rules': False, } } self.assertEqual(result, expected_result) def test_ensure_shares_cifs_share_does_not_exist(self): share = { 'id': '123', 'share_proto': 'CIFS', 'name': 'my_share', } self._mock_powerscale_api.lookup_smb_share.return_value = None result = self.storage_connection.ensure_shares(None, [share]) expected_result = { '123': { 'export_locations': [], 'status': 'error', 'reapply_access_rules': False, } } self.assertEqual(result, expected_result) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/dell_emc/plugins/powerscale/test_powerscale_api.py0000664000175000017500000012546000000000000032250 0ustar00zuulzuul00000000000000# Copyright (c) 2015 EMC Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import ddt from oslo_serialization import jsonutils as json import requests import requests_mock from manila import exception from manila.share.drivers.dell_emc.plugins.powerscale import powerscale_api from manila import test @ddt.ddt class PowerScaleApiTest(test.TestCase): @mock.patch('manila.share.drivers.dell_emc.plugins.powerscale.' 'powerscale_api.PowerScaleApi.create_session') def setUp(self, mockup_create_session): super(PowerScaleApiTest, self).setUp() mockup_create_session.return_value = True self._mock_url = 'https://localhost:8080' self.username = 'admin' self.password = 'pwd' self.dir_permission = '0777' self.powerscale_api = powerscale_api.PowerScaleApi( self._mock_url, self.username, self.password, dir_permission=self.dir_permission ) self.powerscale_api_threshold = powerscale_api.PowerScaleApi( self._mock_url, self.username, self.password, dir_permission=self.dir_permission, threshold_limit=80 ) @mock.patch('manila.share.drivers.dell_emc.plugins.powerscale.' 'powerscale_api.PowerScaleApi.create_session') def test__init__login_failure(self, mockup_create_session): mockup_create_session.return_value = False self.assertRaises( exception.BadConfigurationException, self.powerscale_api.__init__, self._mock_url, self.username, self.password, False, None, self.dir_permission ) def test__verify_cert(self): verify_cert = self.powerscale_api.verify_ssl_cert certificate_path = self.powerscale_api.certificate_path self.powerscale_api.verify_ssl_cert = True self.powerscale_api.certificate_path = "fake_certificate_path" self.assertEqual(self.powerscale_api._verify_cert, self.powerscale_api.certificate_path) self.powerscale_api.verify_ssl_cert = verify_cert self.powerscale_api.certificate_path = certificate_path @mock.patch('requests.Session.request') def test_create_session_success(self, mock_request): mock_response = mock.Mock() mock_response.status_code = 201 mock_response.cookies = {'isisessid': 'test_session_token', 'isicsrf': 'test_csrf_token'} mock_request.return_value = mock_response result = self.powerscale_api.create_session( self.username, self.password) mock_request.assert_called_once_with( 'POST', self._mock_url + '/session/1/session', headers={"Content-type": "application/json"}, data=json.dumps({"username": self.username, "password": self.password, "services": ["platform", "namespace"]}), verify=False ) self.assertTrue(result) self.assertEqual(self.powerscale_api.session_token, 'test_session_token') self.assertEqual(self.powerscale_api.csrf_token, 'test_csrf_token') @mock.patch('requests.Session.request') def test_create_session_failure(self, mock_request): mock_response = mock.Mock() mock_response.status_code = 401 mock_response.json.return_value = { 'message': 'Username or password is incorrect.'} mock_request.return_value = mock_response result = self.powerscale_api.create_session( self.username, self.password) self.assertFalse(result) self.assertIsNone(self.powerscale_api.session_token) self.assertIsNone(self.powerscale_api.csrf_token) @ddt.data(False, True) def test_create_directory(self, is_recursive): with requests_mock.Mocker() as m: path = '/ifs/test' self.assertEqual(0, len(m.request_history)) self._add_create_directory_response(m, path, is_recursive) r = self.powerscale_api.create_directory(path, recursive=is_recursive) self.assertTrue(r) self.assertEqual(1, len(m.request_history)) request = m.request_history[0] self._verify_dir_creation_request(request, path, is_recursive) def test_create_directory_no_permission(self): with requests_mock.Mocker() as m: path = '/ifs/test' self.powerscale_api.dir_permission = None self.assertEqual(0, len(m.request_history)) self._add_create_directory_response(m, path, True) r = self.powerscale_api.create_directory(path, recursive=True) self.powerscale_api.dir_permission = '0777' self.assertTrue(r) self.assertEqual(1, len(m.request_history)) request = m.request_history[0] self.assertNotIn("x-isi-ifs-access-control", request.headers) @requests_mock.mock() def test_clone_snapshot(self, m): snapshot_name = 'snapshot01' fq_target_dir = '/ifs/admin/target' self.assertEqual(0, len(m.request_history)) self._add_create_directory_response(m, fq_target_dir, False) snapshots_json = ( '{"snapshots": ' '[{"name": "snapshot01", "path": "/ifs/admin/source"}]' '}' ) self._add_get_snapshot_response(m, snapshot_name, snapshots_json) # In order to test cloning a snapshot, we build out a mock # source directory tree. After the method under test is called we # will verify the necessary calls are made to clone a snapshot. source_dir_listing_json = ( '{"children": [' '{"name": "dir1", "type": "container"},' '{"name": "dir2", "type": "container"},' '{"name": "file1", "type": "object"},' '{"name": "file2", "type": "object"}' ']}' ) self._add_get_directory_listing_response( m, '/ifs/.snapshot/{0}/admin/source'.format(snapshot_name), source_dir_listing_json) # Add request responses for creating directories and cloning files # to the destination tree self._add_file_clone_response(m, '/ifs/admin/target/file1', snapshot_name) self._add_file_clone_response(m, '/ifs/admin/target/file2', snapshot_name) self._add_create_directory_response(m, fq_target_dir + '/dir1', False) self._add_get_directory_listing_response( m, '/ifs/.snapshot/{0}/admin/source/dir1'.format(snapshot_name), '{"children": [' '{"name": "file11", "type": "object"}, ' '{"name": "file12", "type": "object"}' ']}') self._add_file_clone_response(m, '/ifs/admin/target/dir1/file11', snapshot_name) self._add_file_clone_response(m, '/ifs/admin/target/dir1/file12', snapshot_name) self._add_create_directory_response(m, fq_target_dir + '/dir2', False) self._add_get_directory_listing_response( m, '/ifs/.snapshot/{0}/admin/source/dir2'.format(snapshot_name), '{"children": [' '{"name": "file21", "type": "object"}, ' '{"name": "file22", "type": "object"}' ']}') self._add_file_clone_response(m, '/ifs/admin/target/dir2/file21', snapshot_name) self._add_file_clone_response(m, '/ifs/admin/target/dir2/file22', snapshot_name) # Call method under test self.powerscale_api.clone_snapshot(snapshot_name, fq_target_dir) # Verify calls needed to clone the source snapshot to the target dir expected_calls = [] clone_path_list = [ 'file1', 'file2', 'dir1/file11', 'dir1/file12', 'dir2/file21', 'dir2/file22'] for path in clone_path_list: expected_call = PowerScaleApiTest.ExpectedCall( PowerScaleApiTest.ExpectedCall.FILE_CLONE, self._mock_url + '/namespace/ifs/admin/target/' + path, ['/ifs/admin/target/' + path, '/ifs/admin/source/' + path, snapshot_name]) expected_calls.append(expected_call) dir_path_list = [ ('/dir1?recursive', '/dir1'), ('/dir2?recursive', '/dir2'), ('?recursive=', '')] for url, path in dir_path_list: expected_call = PowerScaleApiTest.ExpectedCall( PowerScaleApiTest.ExpectedCall.DIR_CREATION, self._mock_url + '/namespace/ifs/admin/target' + url, ['/ifs/admin/target' + path, False]) expected_calls.append(expected_call) self._verify_clone_snapshot_calls(expected_calls, m.request_history) class ExpectedCall(object): DIR_CREATION = 'dir_creation' FILE_CLONE = 'file_clone' def __init__(self, request_type, match_url, verify_args): self.request_type = request_type self.match_url = match_url self.verify_args = verify_args def _verify_clone_snapshot_calls(self, expected_calls, response_calls): actual_calls = [] for call in response_calls: actual_calls.append(call) for expected_call in expected_calls: # Match the expected call to the actual call, then verify match_found = False for call in actual_calls: if call.url.startswith(expected_call.match_url): match_found = True if expected_call.request_type == 'dir_creation': self._verify_dir_creation_request( call, *expected_call.verify_args) elif expected_call.request_type == 'file_clone': pass else: self.fail('Invalid request type') actual_calls.remove(call) self.assertTrue(match_found) @requests_mock.mock() def test_get_directory_listing(self, m): self.assertEqual(0, len(m.request_history)) fq_dir_path = 'ifs/admin/test' json_str = '{"my_json": "test123"}' self._add_get_directory_listing_response(m, fq_dir_path, json_str) actual_json = self.powerscale_api.get_directory_listing(fq_dir_path) self.assertEqual(1, len(m.request_history)) self.assertEqual(json.loads(json_str), actual_json) @ddt.data((200, True), (404, False)) def test_is_path_existent(self, data): status_code, expected_return_value = data with requests_mock.mock() as m: self.assertEqual(0, len(m.request_history)) path = '/ifs/home/admin' m.head('{0}/namespace{1}'.format(self._mock_url, path), status_code=status_code) r = self.powerscale_api.is_path_existent(path) self.assertEqual(expected_return_value, r) self.assertEqual(1, len(m.request_history)) @requests_mock.mock() def test_is_path_existent_unexpected_error(self, m): path = '/ifs/home/admin' m.head('{0}/namespace{1}'.format(self._mock_url, path), status_code=400) self.assertRaises( requests.exceptions.HTTPError, self.powerscale_api.is_path_existent, '/ifs/home/admin') @ddt.data( (200, '{"snapshots": [{"path": "/ifs/home/test"}]}', {'path': '/ifs/home/test'}), (404, '{"errors": []}', None) ) def test_get_snapshot(self, data): status_code, json_body, expected_return_value = data with requests_mock.mock() as m: self.assertEqual(0, len(m.request_history)) snapshot_name = 'foo1' self._add_get_snapshot_response(m, snapshot_name, json_body, status=status_code) r = self.powerscale_api.get_snapshot(snapshot_name) self.assertEqual(1, len(m.request_history)) self.assertEqual(expected_return_value, r) @requests_mock.mock() def test_get_snapshot_unexpected_error(self, m): snapshot_name = 'foo1' json_body = '{"snapshots": [{"path": "/ifs/home/test"}]}' self._add_get_snapshot_response( m, snapshot_name, json_body, status=400) self.assertRaises( requests.exceptions.HTTPError, self.powerscale_api.get_snapshot, snapshot_name) @requests_mock.mock() def test_get_snapshots(self, m): self.assertEqual(0, len(m.request_history)) snapshot_json = '{"snapshots": [{"path": "/ifs/home/test"}]}' m.get('{0}/platform/1/snapshot/snapshots'.format(self._mock_url), status_code=200, json=json.loads(snapshot_json)) r = self.powerscale_api.get_snapshots() self.assertEqual(1, len(m.request_history)) self.assertEqual(json.loads(snapshot_json), r) @requests_mock.mock() def test_get_snapshots_error_occurred(self, m): self.assertEqual(0, len(m.request_history)) m.get('{0}/platform/1/snapshot/snapshots'.format(self._mock_url), status_code=404) self.assertRaises(requests.exceptions.HTTPError, self.powerscale_api.get_snapshots) self.assertEqual(1, len(m.request_history)) @ddt.data( ('/ifs/home/admin', '{"exports": [{"id": 42, "paths": ["/ifs/home/admin"]}], "total": 1}', 42), ('/ifs/home/test', '{"exports": [], "total": 0}', None) ) def test_lookup_nfs_export(self, data): share_path, response_json, expected_return = data with requests_mock.mock() as m: self.assertEqual(0, len(m.request_history)) m.get('{0}/platform/12/protocols/nfs/exports?path={1}' .format(self._mock_url, share_path.replace('/', '%2F')), json=json.loads(response_json)) r = self.powerscale_api.lookup_nfs_export(share_path) self.assertEqual(1, len(m.request_history)) self.assertEqual(expected_return, r) @requests_mock.mock() def test_get_nfs_export(self, m): self.assertEqual(0, len(m.request_history)) export_id = 42 response_json = '{"exports": [{"id": 1}]}' status_code = 200 m.get('{0}/platform/1/protocols/nfs/exports/{1}' .format(self._mock_url, export_id), json=json.loads(response_json), status_code=status_code) r = self.powerscale_api.get_nfs_export(export_id) self.assertEqual(1, len(m.request_history)) self.assertEqual(json.loads('{"id": 1}'), r) @requests_mock.mock() def test_get_nfs_export_error(self, m): self.assertEqual(0, len(m.request_history)) export_id = 3 response_json = '{}' status_code = 404 m.get('{0}/platform/1/protocols/nfs/exports/{1}' .format(self._mock_url, export_id), json=json.loads(response_json), status_code=status_code) r = self.powerscale_api.get_nfs_export(export_id) self.assertEqual(1, len(m.request_history)) self.assertIsNone(r) @requests_mock.mock() def test_lookup_smb_share(self, m): self.assertEqual(0, len(m.request_history)) share_name = 'my_smb_share' share_json = '{"id": "my_smb_share"}' response_json = '{{"shares": [{0}]}}'.format(share_json) m.get('{0}/platform/1/protocols/smb/shares/{1}' .format(self._mock_url, share_name), status_code=200, json=json.loads(response_json)) r = self.powerscale_api.lookup_smb_share(share_name) self.assertEqual(1, len(m.request_history)) self.assertEqual(json.loads(share_json), r) @requests_mock.mock() def test_lookup_smb_share_error(self, m): self.assertEqual(0, len(m.request_history)) share_name = 'my_smb_share' m.get('{0}/platform/1/protocols/smb/shares/{1}'.format( self._mock_url, share_name), status_code=404) r = self.powerscale_api.lookup_smb_share(share_name) self.assertEqual(1, len(m.request_history)) self.assertIsNone(r) @ddt.data((201, True), (404, False)) def test_create_nfs_export(self, data): status_code, expected_return_value = data with requests_mock.mock() as m: self.assertEqual(0, len(m.request_history)) export_path = '/ifs/home/test' m.post(self._mock_url + '/platform/1/protocols/nfs/exports', status_code=status_code) r = self.powerscale_api.create_nfs_export(export_path) self.assertEqual(1, len(m.request_history)) call = m.request_history[0] expected_request_body = '{"paths": ["/ifs/home/test"]}' self.assertEqual(json.loads(expected_request_body), json.loads(call.body)) self.assertEqual(expected_return_value, r) @ddt.data((201, True), (404, False)) def test_create_smb_share(self, data): status_code, expected_return_value = data with requests_mock.mock() as m: self.assertEqual(0, len(m.request_history)) share_name = 'my_smb_share' share_path = '/ifs/home/admin/smb_share' m.post(self._mock_url + '/platform/1/protocols/smb/shares', status_code=status_code) r = self.powerscale_api.create_smb_share(share_name, share_path) self.assertEqual(expected_return_value, r) self.assertEqual(1, len(m.request_history)) expected_request_data = { 'name': share_name, 'path': share_path, 'permissions': [] } self.assertEqual(expected_request_data, json.loads(m.request_history[0].body)) @requests_mock.mock() def test_create_snapshot(self, m): self.assertEqual(0, len(m.request_history)) snapshot_name = 'my_snapshot_01' snapshot_path = '/ifs/home/admin' m.post(self._mock_url + '/platform/1/snapshot/snapshots', status_code=201) r = self.powerscale_api.create_snapshot(snapshot_name, snapshot_path) self.assertEqual(1, len(m.request_history)) self.assertTrue(r) expected_request_body = json.loads( '{{"name": "{0}", "path": "{1}"}}' .format(snapshot_name, snapshot_path) ) self.assertEqual(expected_request_body, json.loads(m.request_history[0].body)) @requests_mock.mock() def test_create_snapshot_error_case(self, m): self.assertEqual(0, len(m.request_history)) snapshot_name = 'my_snapshot_01' snapshot_path = '/ifs/home/admin' m.post(self._mock_url + '/platform/1/snapshot/snapshots', status_code=404) self.assertEqual( self.powerscale_api.create_snapshot(snapshot_name, snapshot_path), False ) @ddt.data(True, False) def test_delete_path(self, is_recursive_delete): with requests_mock.mock() as m: self.assertEqual(0, len(m.request_history)) fq_path = '/ifs/home/admin/test' m.delete(self._mock_url + '/namespace' + fq_path + '?recursive=' + str(is_recursive_delete), status_code=204) self.powerscale_api.delete_path( fq_path, recursive=is_recursive_delete) self.assertEqual(1, len(m.request_history)) @requests_mock.mock() def test_delete_path_error_case(self, m): fq_path = '/ifs/home/admin/test' m.delete(self._mock_url + '/namespace' + fq_path + '?recursive=False', status_code=403) self.assertEqual( self.powerscale_api.delete_path( fq_path, recursive=False), False) @ddt.data((204, True), (404, False)) def test_delete_nfs_share(self, data): status_code, expected_return_value = data with requests_mock.mock() as m: self.assertEqual(0, len(m.request_history)) share_number = 42 m.delete('{0}/platform/1/protocols/nfs/exports/{1}' .format(self._mock_url, share_number), status_code=status_code) r = self.powerscale_api.delete_nfs_share(share_number) self.assertEqual(1, len(m.request_history)) self.assertEqual(expected_return_value, r) @ddt.data((204, True), (404, False)) def test_delete_smb_shares(self, data): status_code, expected_return_value = data with requests_mock.mock() as m: self.assertEqual(0, len(m.request_history)) share_name = 'smb_share_42' m.delete('{0}/platform/1/protocols/smb/shares/{1}' .format(self._mock_url, share_name), status_code=status_code) r = self.powerscale_api.delete_smb_share(share_name) self.assertEqual(1, len(m.request_history)) self.assertEqual(expected_return_value, r) @requests_mock.mock() def test_delete_snapshot(self, m): self.assertEqual(0, len(m.request_history)) m.delete(self._mock_url + '/platform/1/snapshot/snapshots/my_snapshot', status_code=204) self.powerscale_api.delete_snapshot("my_snapshot") self.assertEqual(1, len(m.request_history)) @requests_mock.mock() def test_delete_snapshot_error_case(self, m): m.delete(self._mock_url + '/platform/1/snapshot/snapshots/my_snapshot', status_code=403) self.assertEqual( self.powerscale_api.delete_snapshot("my_snapshot"), False) @requests_mock.mock() def test_quota_create(self, m): quota_path = '/ifs/manila/test' quota_size = 256 self.assertEqual(0, len(m.request_history)) m.post(self._mock_url + '/platform/1/quota/quotas', status_code=201) self.powerscale_api.quota_create(quota_path, 'directory', quota_size) self.assertEqual(1, len(m.request_history)) expected_request_json = { 'path': quota_path, 'type': 'directory', 'include_snapshots': False, 'thresholds_include_overhead': False, 'enforced': True, 'thresholds': {'hard': quota_size}, } call_body = m.request_history[0].body self.assertEqual(expected_request_json, json.loads(call_body)) @requests_mock.mock() def test_quota_create_with_threshold(self, m): quota_path = '/ifs/manila/test' quota_size = 100 self.assertEqual(0, len(m.request_history)) m.post(self._mock_url + '/platform/1/quota/quotas', status_code=201) self.powerscale_api_threshold.quota_create( quota_path, 'directory', quota_size ) advisory_size = round( (quota_size * self.powerscale_api_threshold.threshold_limit) / 100) self.assertEqual(1, len(m.request_history)) expected_request_json = { 'path': quota_path, 'type': 'directory', 'include_snapshots': False, 'thresholds_include_overhead': False, 'enforced': True, 'thresholds': {'hard': quota_size, 'advisory': advisory_size}, } call_body = m.request_history[0].body self.assertEqual(expected_request_json, json.loads(call_body)) @requests_mock.mock() def test_quota_create__path_does_not_exist(self, m): quota_path = '/ifs/test2' self.assertEqual(0, len(m.request_history)) m.post(self._mock_url + '/platform/1/quota/quotas', status_code=400) self.assertRaises( requests.exceptions.HTTPError, self.powerscale_api.quota_create, quota_path, 'directory', 2 ) @requests_mock.mock() def test_quota_get(self, m): self.assertEqual(0, len(m.request_history)) response_json = {'quotas': [{}]} m.get(self._mock_url + '/platform/1/quota/quotas', json=response_json, status_code=200) quota_path = "/ifs/manila/test" quota_type = "directory" self.powerscale_api.quota_get(quota_path, quota_type) self.assertEqual(1, len(m.request_history)) request_query_string = m.request_history[0].qs expected_query_string = {'path': [quota_path]} self.assertEqual(expected_query_string, request_query_string) @requests_mock.mock() def test_quota_get__path_does_not_exist(self, m): self.assertEqual(0, len(m.request_history)) m.get(self._mock_url + '/platform/1/quota/quotas', status_code=404) response = self.powerscale_api.quota_get( '/ifs/does_not_exist', 'directory') self.assertIsNone(response) @requests_mock.mock() def test_quota_modify(self, m): self.assertEqual(0, len(m.request_history)) quota_id = "ADEF1G" new_size = 1024 m.put('{0}/platform/1/quota/quotas/{1}'.format( self._mock_url, quota_id), status_code=204) self.powerscale_api.quota_modify_size(quota_id, new_size) self.assertEqual(1, len(m.request_history)) expected_request_body = {'thresholds': {'hard': new_size}} request_body = m.request_history[0].body self.assertEqual(expected_request_body, json.loads(request_body)) @requests_mock.mock() def test_quota_modify_with_threshold(self, m): self.assertEqual(0, len(m.request_history)) quota_id = "ADEF1G" new_size = 1024 advisory_size = round( (new_size * self.powerscale_api_threshold.threshold_limit) / 100) m.put('{0}/platform/1/quota/quotas/{1}'.format( self._mock_url, quota_id), status_code=204) self.powerscale_api_threshold.quota_modify_size(quota_id, new_size) self.assertEqual(1, len(m.request_history)) expected_request_body = {'thresholds': {'hard': new_size, 'advisory': advisory_size}} request_body = m.request_history[0].body self.assertEqual(expected_request_body, json.loads(request_body)) @requests_mock.mock() def test_quota_modify__given_id_does_not_exist(self, m): quota_id = 'ADE2F' m.put('{0}/platform/1/quota/quotas/{1}'.format( self._mock_url, quota_id), status_code=404) self.assertRaises( requests.exceptions.HTTPError, self.powerscale_api.quota_modify_size, quota_id, 1024 ) @requests_mock.mock() def test_quota_set__quota_already_exists(self, m): self.assertEqual(0, len(m.request_history)) quota_path = '/ifs/manila/test' quota_type = 'directory' quota_size = 256 quota_id = 'AFE2C' m.get('{0}/platform/1/quota/quotas'.format( self._mock_url), json={'quotas': [{'id': quota_id}]}, status_code=200) m.put( '{0}/platform/1/quota/quotas/{1}'.format(self._mock_url, quota_id), status_code=204 ) self.powerscale_api.quota_set(quota_path, quota_type, quota_size) expected_quota_modify_json = {'thresholds': {'hard': quota_size}} quota_put_json = json.loads(m.request_history[1].body) self.assertEqual(expected_quota_modify_json, quota_put_json) @requests_mock.mock() def test_quota_set__quota_does_not_already_exist(self, m): self.assertEqual(0, len(m.request_history)) m.get('{0}/platform/1/quota/quotas'.format( self._mock_url), status_code=404) m.post('{0}/platform/1/quota/quotas'.format(self._mock_url), status_code=201) quota_path = '/ifs/manila/test' quota_type = 'directory' quota_size = 256 self.powerscale_api.quota_set(quota_path, quota_type, quota_size) # verify a call is made to create a quota expected_create_json = { str('path'): quota_path, str('type'): 'directory', str('include_snapshots'): False, str('thresholds_include_overhead'): False, str('enforced'): True, str('thresholds'): {str('hard'): quota_size}, } create_request_json = json.loads(m.request_history[1].body) self.assertEqual(expected_create_json, create_request_json) @requests_mock.mock() def test_quota_set__path_does_not_already_exist(self, m): m.get(self._mock_url + '/platform/1/quota/quotas', status_code=400) e = self.assertRaises( requests.exceptions.HTTPError, self.powerscale_api.quota_set, '/ifs/does_not_exist', 'directory', 2048 ) self.assertEqual(400, e.response.status_code) def test_get_user_sid_success(self): sid = {"id": "SID:S-1-22-1-0", "name": "foo", "type": "user"} self.powerscale_api.auth_lookup_user = mock.MagicMock( return_value={ "mapping": [{"user": {"sid": sid}}] } ) expected_sid = self.powerscale_api.get_user_sid('foo') self.assertEqual(expected_sid, sid) def test_get_user_sid_wrong_mappings(self): self.powerscale_api.auth_lookup_user = mock.MagicMock( return_value={ "mapping": [{"user": {"sid": 'fake_sid1'}}, {"user": {"sid": 'fake_sid2'}}] } ) expected_sid = self.powerscale_api.get_user_sid('foo') self.assertIsNone(expected_sid) def test_get_user_sid_user_not_found(self): self.powerscale_api.auth_lookup_user = mock.MagicMock( return_value=None ) expected_sid = self.powerscale_api.get_user_sid('foo') self.assertIsNone(expected_sid) @requests_mock.mock() def test_auth_lookup_user(self, m): user = 'foo' auth_url = '{0}/platform/1/auth/mapping/users/lookup?user={1}'.format( self._mock_url, user) example_sid = 'SID:S-1-5-21' sid_json = { 'id': example_sid, 'name': user, 'type': 'user' } auth_json = { 'mapping': [ {'user': {'sid': sid_json}} ] } m.get(auth_url, status_code=200, json=auth_json) returned_auth_json = self.powerscale_api.auth_lookup_user(user) self.assertEqual(auth_json, returned_auth_json) @requests_mock.mock() def test_auth_lookup_user_with_nonexistent_user(self, m): user = 'nonexistent' auth_url = '{0}/platform/1/auth/mapping/users/lookup?user={1}'.format( self._mock_url, user) m.get(auth_url, status_code=404) self.assertIsNone(self.powerscale_api.auth_lookup_user(user)) @requests_mock.mock() def test_auth_lookup_user_with_backend_error(self, m): user = 'foo' auth_url = '{0}/platform/1/auth/mapping/users/lookup?user={1}'.format( self._mock_url, user) m.get(auth_url, status_code=400) self.assertIsNone(self.powerscale_api.auth_lookup_user(user)) def _add_create_directory_response(self, m, path, is_recursive): url = '{0}/namespace{1}?recursive={2}'.format( self._mock_url, path, str(is_recursive)) m.put(url, status_code=200) def _add_file_clone_response(self, m, fq_dest_path, snapshot_name): url = '{0}/namespace{1}?clone=true&snapshot={2}'.format( self._mock_url, fq_dest_path, snapshot_name) m.put(url) def _add_get_directory_listing_response(self, m, fq_dir_path, json_str): url = '{0}/namespace{1}?detail=default'.format( self._mock_url, fq_dir_path) m.get(url, json=json.loads(json_str), status_code=200) def _add_get_snapshot_response( self, m, snapshot_name, json_str, status=200): url = '{0}/platform/1/snapshot/snapshots/{1}'.format( self._mock_url, snapshot_name ) m.get(url, status_code=status, json=json.loads(json_str)) def _verify_dir_creation_request(self, request, path, is_recursive): self.assertEqual('PUT', request.method) expected_url = '{0}/namespace{1}?recursive={2}'.format( self._mock_url, path, str(is_recursive)) self.assertEqual(expected_url, request.url) self.assertIn("x-isi-ifs-target-type", request.headers) self.assertEqual("container", request.headers['x-isi-ifs-target-type']) self.assertIn("x-isi-ifs-access-control", request.headers) self.assertEqual(self.dir_permission, request.headers['x-isi-ifs-access-control']) def _verify_clone_file_from_snapshot( self, request, fq_file_path, fq_dest_path, snapshot_name): self.assertEqual('PUT', request.method) expected_url = '{0}/namespace{1}?clone=true&snapshot={2}'.format( self._mock_url, fq_dest_path, snapshot_name ) self.assertEqual(expected_url, request.request.url) self.assertIn("x-isi-ifs-copy-source", request.headers) self.assertEqual('/namespace' + fq_file_path, request.headers['x-isi-ifs-copy-source']) def test_modify_nfs_export_access_success(self): self.powerscale_api.send_put_request = mock.MagicMock() share_id = '123' ro_ips = ['10.0.0.1', '10.0.0.2'] rw_ips = ['10.0.0.3', '10.0.0.4'] self.powerscale_api.modify_nfs_export_access(share_id, ro_ips, rw_ips) expected_url = '{0}/platform/1/protocols/nfs/exports/{1}'.format( self.powerscale_api.host_url, share_id) expected_data = {'read_only_clients': ro_ips, 'clients': rw_ips} self.powerscale_api.send_put_request.assert_called_once_with( expected_url, data=expected_data) def test_modify_nfs_export_access_no_ro_ips(self): self.powerscale_api.send_put_request = mock.MagicMock() share_id = '123' rw_ips = ['10.0.0.3', '10.0.0.4'] self.powerscale_api.modify_nfs_export_access(share_id, None, rw_ips) expected_url = '{0}/platform/1/protocols/nfs/exports/{1}'.format( self.powerscale_api.host_url, share_id) expected_data = {'clients': rw_ips} self.powerscale_api.send_put_request.assert_called_once_with( expected_url, data=expected_data) def test_modify_nfs_export_access_no_rw_ips(self): self.powerscale_api.send_put_request = mock.MagicMock() share_id = '123' ro_ips = ['10.0.0.1', '10.0.0.2'] self.powerscale_api.modify_nfs_export_access(share_id, ro_ips, None) expected_url = '{0}/platform/1/protocols/nfs/exports/{1}'.format( self.powerscale_api.host_url, share_id) expected_data = {'read_only_clients': ro_ips} self.powerscale_api.send_put_request.assert_called_once_with( expected_url, data=expected_data) @mock.patch('requests.Session.request') def test_request_with_401_response(self, mock_request): """Test sending a request with a 401 Unauthorized response.""" mock_request.return_value.status_code = 401 self.powerscale_api.create_session = mock.MagicMock(return_value=True) self.powerscale_api.request('GET', 'http://example.com/api/data') self.assertEqual(mock_request.call_count, 2) def test_delete_quota_sends_delete_request(self): self.powerscale_api.send_delete_request = mock.MagicMock() quota_id = '123' self.powerscale_api.delete_quota(quota_id) self.powerscale_api.send_delete_request.assert_called_once_with( '{0}/platform/1/quota/quotas/{1}'.format( self.powerscale_api.host_url, quota_id) ) def test_delete_quota_raises_exception_on_error(self): quota_id = '123' self.powerscale_api.send_delete_request = mock.MagicMock( side_effect=requests.exceptions.HTTPError) self.assertRaises(requests.exceptions.HTTPError, self.powerscale_api.delete_quota, quota_id) def test_get_space_stats_success(self): self.powerscale_api.send_get_request = mock.MagicMock() self.powerscale_api.send_get_request.return_value.status_code = 200 self.powerscale_api.send_get_request.return_value.json.return_value = { 'stats': [ {'key': 'ifs.bytes.free', 'value': 1000}, {'key': 'ifs.bytes.total', 'value': 2000}, {'key': 'ifs.bytes.used', 'value': 500} ] } result = self.powerscale_api.get_space_stats() self.assertEqual(result, {'total': 2000, 'free': 1000, 'used': 500}) def test_get_space_stats_failure(self): self.powerscale_api.send_get_request = mock.MagicMock() self.powerscale_api.send_get_request.return_value.status_code = 400 self.assertRaises(exception.ShareBackendException, self.powerscale_api.get_space_stats) def test_get_allocated_space_success(self): self.powerscale_api.send_get_request = mock.MagicMock() self.powerscale_api.send_get_request.return_value.status_code = 200 self.powerscale_api.send_get_request.return_value.json.return_value = { 'quotas': [ { 'path': '/ifs/home', 'thresholds': { 'hard': None } }, { 'path': '/ifs/manila/CI-1d52ed66-a1ee-4b19-8f56-3706b', 'thresholds': { 'hard': 2147483648000 } }, { 'path': '/ifs/manila/CI-0b622133-8b58-4a9f-ad1a-b8247', 'thresholds': { 'hard': 107374182400 } }, { 'path': '/ifs/nilesh', 'thresholds': { 'hard': 10737418240 } } ] } result = self.powerscale_api.get_allocated_space() self.assertEqual(result, 2110.0) def test_get_allocated_space_failure(self): self.powerscale_api.send_get_request = mock.MagicMock() self.powerscale_api.send_get_request.return_value.status_code = 400 self.assertRaises(exception.ShareBackendException, self.powerscale_api.get_allocated_space) def test_get_cluster_version_success(self): self.powerscale_api.send_get_request = mock.MagicMock() self.powerscale_api.send_get_request.return_value.status_code = 200 self.powerscale_api.send_get_request.return_value.json.return_value = { 'nodes': [{'release': '1.0'}]} version = self.powerscale_api.get_cluster_version() self.assertEqual(version, '1.0') self.powerscale_api.send_get_request.assert_called_once_with( '{0}/platform/12/cluster/version'.format( self.powerscale_api.host_url) ) def test_get_cluster_version_failure(self): self.powerscale_api.send_get_request = mock.MagicMock() self.powerscale_api.send_get_request.return_value.status_code = 404 self.assertRaises(exception.ShareBackendException, self.powerscale_api.get_cluster_version) self.powerscale_api.send_get_request.assert_called_once_with( '{0}/platform/12/cluster/version'.format( self.powerscale_api.host_url) ) def test_modify_smb_share_access_with_host_acl_and_smb_permission(self): self.powerscale_api.send_put_request = mock.MagicMock() share_name = 'my_share' host_acl = 'host1,host2' smb_permission = 'read' self.powerscale_api.modify_smb_share_access( share_name, host_acl, smb_permission) expected_url = '{0}/platform/1/protocols/smb/shares/{1}'.format( self.powerscale_api.host_url, share_name) expected_data = {'host_acl': host_acl, 'permissions': smb_permission} self.powerscale_api.send_put_request.assert_called_with( expected_url, data=expected_data) def test_modify_smb_share_access_with_host_acl_only(self): self.powerscale_api.send_put_request = mock.MagicMock() share_name = 'my_share' host_acl = 'host1,host2' self.powerscale_api.modify_smb_share_access(share_name, host_acl) expected_url = '{0}/platform/1/protocols/smb/shares/{1}'.format( self.powerscale_api.host_url, share_name) expected_data = {'host_acl': host_acl} self.powerscale_api.send_put_request.assert_called_with( expected_url, data=expected_data) def test_modify_smb_share_access_with_smb_permission_only(self): self.powerscale_api.send_put_request = mock.MagicMock() share_name = 'my_share' smb_permission = 'read' self.powerscale_api.modify_smb_share_access( share_name, permissions=smb_permission) expected_url = '{0}/platform/1/protocols/smb/shares/{1}'.format( self.powerscale_api.host_url, share_name) expected_data = {'permissions': smb_permission} self.powerscale_api.send_put_request.assert_called_with( expected_url, data=expected_data) def test_modify_smb_share_access_with_no_arguments(self): self.powerscale_api.send_put_request = mock.MagicMock() share_name = 'my_share' self.powerscale_api.modify_smb_share_access(share_name) expected_url = '{0}/platform/1/protocols/smb/shares/{1}'.format( self.powerscale_api.host_url, share_name) expected_data = {} self.powerscale_api.send_put_request.assert_called_with( expected_url, data=expected_data) def test_modify_smb_share_access_with_http_error(self): self.powerscale_api.send_put_request = mock.MagicMock( side_effect=requests.exceptions.HTTPError ) share_name = 'my_share' host_acl = 'host1,host2' smb_permission = 'read' self.assertRaises(requests.exceptions.HTTPError, self.powerscale_api.modify_smb_share_access, share_name, host_acl, smb_permission) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315602.0056705 manila-21.0.0/manila/tests/share/drivers/dell_emc/plugins/powerstore/0000775000175000017500000000000000000000000025677 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/dell_emc/plugins/powerstore/__init__.py0000664000175000017500000000000000000000000027776 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315602.0096705 manila-21.0.0/manila/tests/share/drivers/dell_emc/plugins/powerstore/mockup/0000775000175000017500000000000000000000000027175 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000021000000000000011446 xustar0000000000000000114 path=manila-21.0.0/manila/tests/share/drivers/dell_emc/plugins/powerstore/mockup/clone_snapshot_response.json 22 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/dell_emc/plugins/powerstore/mockup/clone_snapshot_response.0000664000175000017500000000006400000000000034133 0ustar00zuulzuul00000000000000{ "id": "64560f05-e677-ec2a-7fcf-1a9efb93188b" }././@PaxHeader0000000000000000000000000000021300000000000011451 xustar0000000000000000117 path=manila-21.0.0/manila/tests/share/drivers/dell_emc/plugins/powerstore/mockup/create_filesystem_response.json 22 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/dell_emc/plugins/powerstore/mockup/create_filesystem_respon0000664000175000017500000000006400000000000034215 0ustar00zuulzuul00000000000000{ "id": "6454e9a9-a698-e9bc-ca61-1a9efb93188b" }././@PaxHeader0000000000000000000000000000021300000000000011451 xustar0000000000000000117 path=manila-21.0.0/manila/tests/share/drivers/dell_emc/plugins/powerstore/mockup/create_nfs_export_response.json 22 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/dell_emc/plugins/powerstore/mockup/create_nfs_export_respon0000664000175000017500000000006400000000000034220 0ustar00zuulzuul00000000000000{ "id": "6454ec18-7b8d-1532-1b8a-1a9efb93188b" }././@PaxHeader0000000000000000000000000000021200000000000011450 xustar0000000000000000116 path=manila-21.0.0/manila/tests/share/drivers/dell_emc/plugins/powerstore/mockup/create_smb_share_response.json 22 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/dell_emc/plugins/powerstore/mockup/create_smb_share_respons0000664000175000017500000000006400000000000034157 0ustar00zuulzuul00000000000000{ "id": "64927ae9-3403-6930-a784-f227b9987c54" }././@PaxHeader0000000000000000000000000000021100000000000011447 xustar0000000000000000115 path=manila-21.0.0/manila/tests/share/drivers/dell_emc/plugins/powerstore/mockup/create_snapshot_response.json 22 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/dell_emc/plugins/powerstore/mockup/create_snapshot_response0000664000175000017500000000006400000000000034220 0ustar00zuulzuul00000000000000{ "id": "6454ea29-09c3-030e-cfc3-1a9efb93188b" }././@PaxHeader0000000000000000000000000000021000000000000011446 xustar0000000000000000114 path=manila-21.0.0/manila/tests/share/drivers/dell_emc/plugins/powerstore/mockup/get_cluster_id_response.json 22 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/dell_emc/plugins/powerstore/mockup/get_cluster_id_response.0000664000175000017500000000004100000000000034103 0ustar00zuulzuul00000000000000[ { "id": "0" } ]././@PaxHeader0000000000000000000000000000021200000000000011450 xustar0000000000000000116 path=manila-21.0.0/manila/tests/share/drivers/dell_emc/plugins/powerstore/mockup/get_fileystem_id_response.json 22 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/dell_emc/plugins/powerstore/mockup/get_fileystem_id_respons0000664000175000017500000000010400000000000034200 0ustar00zuulzuul00000000000000[ { "id": "6454e9a9-a698-e9bc-ca61-1a9efb93188b" } ]././@PaxHeader0000000000000000000000000000022300000000000011452 xustar0000000000000000125 path=manila-21.0.0/manila/tests/share/drivers/dell_emc/plugins/powerstore/mockup/get_fsid_from_export_name_response.json 22 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/dell_emc/plugins/powerstore/mockup/get_fsid_from_export_nam0000664000175000017500000000012000000000000034154 0ustar00zuulzuul00000000000000[ { "file_system_id": "6454e9a9-a698-e9bc-ca61-1a9efb93188b" } ]././@PaxHeader0000000000000000000000000000022200000000000011451 xustar0000000000000000124 path=manila-21.0.0/manila/tests/share/drivers/dell_emc/plugins/powerstore/mockup/get_fsid_from_share_name_response.json 22 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/dell_emc/plugins/powerstore/mockup/get_fsid_from_share_name0000664000175000017500000000012000000000000034102 0ustar00zuulzuul00000000000000[ { "file_system_id": "6454e9a9-a698-e9bc-ca61-1a9efb93188b" } ]././@PaxHeader0000000000000000000000000000021300000000000011451 xustar0000000000000000117 path=manila-21.0.0/manila/tests/share/drivers/dell_emc/plugins/powerstore/mockup/get_nas_server_id_response.json 22 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/dell_emc/plugins/powerstore/mockup/get_nas_server_id_respon0000664000175000017500000000010400000000000034163 0ustar00zuulzuul00000000000000[ { "id": "6423d56e-eaf3-7424-be0b-1a9efb93188b" } ]././@PaxHeader0000000000000000000000000000022300000000000011452 xustar0000000000000000125 path=manila-21.0.0/manila/tests/share/drivers/dell_emc/plugins/powerstore/mockup/get_nas_server_interfaces_response.json 22 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/dell_emc/plugins/powerstore/mockup/get_nas_server_interface0000664000175000017500000000043600000000000034151 0ustar00zuulzuul00000000000000{ "current_preferred_IPv4_interface_id": "6423d586-4070-f752-c4da-1a9efb93188b", "current_preferred_IPv6_interface_id": null, "file_interfaces": [ { "id": "6423d586-4070-f752-c4da-1a9efb93188b", "ip_address": "192.168.11.23" } ] }././@PaxHeader0000000000000000000000000000022400000000000011453 xustar0000000000000000126 path=manila-21.0.0/manila/tests/share/drivers/dell_emc/plugins/powerstore/mockup/get_nas_server_smb_netbios_response.json 22 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/dell_emc/plugins/powerstore/mockup/get_nas_server_smb_netbi0000664000175000017500000000030100000000000034142 0ustar00zuulzuul00000000000000[ { "smb_servers": [ { "is_standalone": true, "domain": null, "netbios_name": "OPENSTACK" } ] } ]././@PaxHeader0000000000000000000000000000021300000000000011451 xustar0000000000000000117 path=manila-21.0.0/manila/tests/share/drivers/dell_emc/plugins/powerstore/mockup/get_nfs_export_id_response.json 22 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/dell_emc/plugins/powerstore/mockup/get_nfs_export_id_respon0000664000175000017500000000010400000000000034203 0ustar00zuulzuul00000000000000[ { "id": "6454ec18-7b8d-1532-1b8a-1a9efb93188b" } ]././@PaxHeader0000000000000000000000000000021500000000000011453 xustar0000000000000000119 path=manila-21.0.0/manila/tests/share/drivers/dell_emc/plugins/powerstore/mockup/get_nfs_export_name_response.json 22 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/dell_emc/plugins/powerstore/mockup/get_nfs_export_name_resp0000664000175000017500000000004600000000000034177 0ustar00zuulzuul00000000000000{ "name": "powerstore-nfs-share" }././@PaxHeader0000000000000000000000000000021200000000000011450 xustar0000000000000000116 path=manila-21.0.0/manila/tests/share/drivers/dell_emc/plugins/powerstore/mockup/get_smb_share_id_response.json 22 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/dell_emc/plugins/powerstore/mockup/get_smb_share_id_respons0000664000175000017500000000010400000000000034142 0ustar00zuulzuul00000000000000[ { "id": "64927ae9-3403-6930-a784-f227b9987c54" } ]././@PaxHeader0000000000000000000000000000023200000000000011452 xustar0000000000000000132 path=manila-21.0.0/manila/tests/share/drivers/dell_emc/plugins/powerstore/mockup/resize_filesystem_shrink_failure_response.json 22 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/dell_emc/plugins/powerstore/mockup/resize_filesystem_shrink0000664000175000017500000000045600000000000034250 0ustar00zuulzuul00000000000000{ "messages": [ { "code": "0xE08010080449", "severity": "Error", "message_l10n": "The new size for the file system is below the file system's current size used (5222 MB).", "arguments": [ "5222" ] } ] }././@PaxHeader0000000000000000000000000000023300000000000011453 xustar0000000000000000133 path=manila-21.0.0/manila/tests/share/drivers/dell_emc/plugins/powerstore/mockup/retreive_cluster_capacity_metrics_response.json 22 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/dell_emc/plugins/powerstore/mockup/retreive_cluster_capacit0000664000175000017500000101271400000000000034200 0ustar00zuulzuul00000000000000[ { "timestamp": "2023-06-06T04:35:00Z", "cluster_id": "0", "logical_provisioned": 91900268773376, "logical_used": 69117132800, "logical_used_volume": 47464128512, "logical_used_file_system": 21653004288, "logical_used_vvol": 0, "shared_logical_used_volume": 22338211840, "shared_logical_used_file_system": 14362869760, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 368199442199, "efficiency_ratio": 12341.914, "data_reduction": 4.9288387, "data_physical_used": 7446192809, "snapshot_savings": 10.068099, "thin_savings": 460.541, "shared_logical_used": 36701081600, "system_free_space": 140331964416, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T04:40:00Z", "cluster_id": "0", "logical_provisioned": 91903489998848, "logical_used": 69118181376, "logical_used_volume": 47465177088, "logical_used_file_system": 21653004288, "logical_used_vvol": 0, "shared_logical_used_volume": 22339260416, "shared_logical_used_file_system": 14362865664, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 368199339818, "efficiency_ratio": 12342.247, "data_reduction": 4.928939, "data_physical_used": 7446252880, "snapshot_savings": 10.068111, "thin_savings": 460.5264, "shared_logical_used": 36702126080, "system_free_space": 140332295168, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T04:45:00Z", "cluster_id": "0", "logical_provisioned": 91900268773376, "logical_used": 69118181376, "logical_used_volume": 47465177088, "logical_used_file_system": 21653004288, "logical_used_vvol": 0, "shared_logical_used_volume": 22339260416, "shared_logical_used_file_system": 14362869760, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 368164442322, "efficiency_ratio": 12341.806, "data_reduction": 4.928936, "data_physical_used": 7446257948, "snapshot_savings": 10.068099, "thin_savings": 460.5264, "shared_logical_used": 36702130176, "system_free_space": 140332015001, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T04:50:00Z", "cluster_id": "0", "logical_provisioned": 91900268773376, "logical_used": 69118181376, "logical_used_volume": 47465177088, "logical_used_file_system": 21653004288, "logical_used_vvol": 0, "shared_logical_used_volume": 22339260416, "shared_logical_used_file_system": 14362869760, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 368166718759, "efficiency_ratio": 12341.7295, "data_reduction": 4.9289055, "data_physical_used": 7446304322, "snapshot_savings": 10.068099, "thin_savings": 460.5264, "shared_logical_used": 36702130176, "system_free_space": 140330769817, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T04:55:00Z", "cluster_id": "0", "logical_provisioned": 91900268773376, "logical_used": 69118181376, "logical_used_volume": 47465177088, "logical_used_file_system": 21653004288, "logical_used_vvol": 0, "shared_logical_used_volume": 22339260416, "shared_logical_used_file_system": 14362865664, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 368167150394, "efficiency_ratio": 12341.69, "data_reduction": 4.9288893, "data_physical_used": 7446327815, "snapshot_savings": 10.068111, "thin_savings": 460.5264, "shared_logical_used": 36702126080, "system_free_space": 140329758105, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T05:00:00Z", "cluster_id": "0", "logical_provisioned": 91916374900736, "logical_used": 69255864320, "logical_used_volume": 47602860032, "logical_used_file_system": 21653004288, "logical_used_vvol": 0, "shared_logical_used_volume": 22474838016, "shared_logical_used_file_system": 14362869760, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 368169691583, "efficiency_ratio": 12343.818, "data_reduction": 4.9470835, "data_physical_used": 7446348795, "snapshot_savings": 10.074033, "thin_savings": 459.1045, "shared_logical_used": 36837707776, "system_free_space": 140327617945, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T05:05:00Z", "cluster_id": "0", "logical_provisioned": 91903489998848, "logical_used": 69187022848, "logical_used_volume": 47534018560, "logical_used_file_system": 21653004288, "logical_used_vvol": 0, "shared_logical_used_volume": 22408101888, "shared_logical_used_file_system": 14362869760, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 368174539400, "efficiency_ratio": 12342.058, "data_reduction": 4.938109, "data_physical_used": 7446367317, "snapshot_savings": 10.068099, "thin_savings": 459.66843, "shared_logical_used": 36770971648, "system_free_space": 140323372646, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T05:10:00Z", "cluster_id": "0", "logical_provisioned": 91906711224320, "logical_used": 69187022848, "logical_used_volume": 47534018560, "logical_used_file_system": 21653004288, "logical_used_vvol": 0, "shared_logical_used_volume": 22408101888, "shared_logical_used_file_system": 14362865664, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 368177730720, "efficiency_ratio": 12342.48, "data_reduction": 4.9381046, "data_physical_used": 7446372969, "snapshot_savings": 10.068111, "thin_savings": 459.66843, "shared_logical_used": 36770967552, "system_free_space": 140320127385, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T05:15:00Z", "cluster_id": "0", "logical_provisioned": 91903489998848, "logical_used": 69187022848, "logical_used_volume": 47534018560, "logical_used_file_system": 21653004288, "logical_used_vvol": 0, "shared_logical_used_volume": 22408101888, "shared_logical_used_file_system": 14362869760, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 368178465182, "efficiency_ratio": 12341.962, "data_reduction": 4.9380703, "data_physical_used": 7446425323, "snapshot_savings": 10.068099, "thin_savings": 459.66843, "shared_logical_used": 36770971648, "system_free_space": 140320220774, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T05:20:00Z", "cluster_id": "0", "logical_provisioned": 91903489998848, "logical_used": 69187022848, "logical_used_volume": 47534018560, "logical_used_file_system": 21653004288, "logical_used_vvol": 0, "shared_logical_used_volume": 22408101888, "shared_logical_used_file_system": 14362869760, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 368177839928, "efficiency_ratio": 12341.931, "data_reduction": 4.9380584, "data_physical_used": 7446443354, "snapshot_savings": 10.068099, "thin_savings": 459.66843, "shared_logical_used": 36770971648, "system_free_space": 140320633241, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T05:25:00Z", "cluster_id": "0", "logical_provisioned": 91903489998848, "logical_used": 69187022848, "logical_used_volume": 47534018560, "logical_used_file_system": 21653004288, "logical_used_vvol": 0, "shared_logical_used_volume": 22408101888, "shared_logical_used_file_system": 14362865664, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 368180160696, "efficiency_ratio": 12341.853, "data_reduction": 4.9380264, "data_physical_used": 7446490810, "snapshot_savings": 10.068111, "thin_savings": 459.66843, "shared_logical_used": 36770967552, "system_free_space": 140319228518, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T05:30:00Z", "cluster_id": "0", "logical_provisioned": 91900268773376, "logical_used": 69118181376, "logical_used_volume": 47465177088, "logical_used_file_system": 21653004288, "logical_used_vvol": 0, "shared_logical_used_volume": 22341365760, "shared_logical_used_file_system": 14362869760, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 368180501570, "efficiency_ratio": 12341.426, "data_reduction": 4.929067, "data_physical_used": 7446487422, "snapshot_savings": 10.062173, "thin_savings": 460.5264, "shared_logical_used": 36704235520, "system_free_space": 140318516428, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T05:35:00Z", "cluster_id": "0", "logical_provisioned": 92141860683776, "logical_used": 69128286208, "logical_used_volume": 47475281920, "logical_used_file_system": 21653004288, "logical_used_vvol": 0, "shared_logical_used_volume": 22347259904, "shared_logical_used_file_system": 14362869760, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 368145905892, "efficiency_ratio": 12373.854, "data_reduction": 4.9298525, "data_physical_used": 7446496643, "snapshot_savings": 10.074033, "thin_savings": 467.67657, "shared_logical_used": 36710129664, "system_free_space": 140316932710, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T05:40:00Z", "cluster_id": "0", "logical_provisioned": 92145081909248, "logical_used": 69241122816, "logical_used_volume": 47588118528, "logical_used_file_system": 21653004288, "logical_used_vvol": 0, "shared_logical_used_volume": 22462201856, "shared_logical_used_file_system": 14362865664, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 368154732810, "efficiency_ratio": 12373.911, "data_reduction": 4.9451375, "data_physical_used": 7446722689, "snapshot_savings": 10.068111, "thin_savings": 466.08948, "shared_logical_used": 36825067520, "system_free_space": 140305671577, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T05:45:00Z", "cluster_id": "0", "logical_provisioned": 91922817351680, "logical_used": 69190463488, "logical_used_volume": 47537459200, "logical_used_file_system": 21653004288, "logical_used_vvol": 0, "shared_logical_used_volume": 22411542528, "shared_logical_used_file_system": 14362869760, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 368155105051, "efficiency_ratio": 12344.429, "data_reduction": 4.9384813, "data_physical_used": 7446502371, "snapshot_savings": 10.068099, "thin_savings": 460.20294, "shared_logical_used": 36774412288, "system_free_space": 140307718348, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T05:50:00Z", "cluster_id": "0", "logical_provisioned": 91913153675264, "logical_used": 69259231232, "logical_used_volume": 47606226944, "logical_used_file_system": 21653004288, "logical_used_vvol": 0, "shared_logical_used_volume": 22480310272, "shared_logical_used_file_system": 14362869760, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 368161507774, "efficiency_ratio": 12343.046, "data_reduction": 4.947682, "data_physical_used": 7446554299, "snapshot_savings": 10.068099, "thin_savings": 458.96118, "shared_logical_used": 36843180032, "system_free_space": 140301807616, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T05:55:00Z", "cluster_id": "0", "logical_provisioned": 91913153675264, "logical_used": 69259231232, "logical_used_volume": 47606226944, "logical_used_file_system": 21653004288, "logical_used_vvol": 0, "shared_logical_used_volume": 22480310272, "shared_logical_used_file_system": 14362865664, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 368163117317, "efficiency_ratio": 12342.907, "data_reduction": 4.9476256, "data_physical_used": 7446637765, "snapshot_savings": 10.068111, "thin_savings": 458.96118, "shared_logical_used": 36843175936, "system_free_space": 140300321177, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T06:00:00Z", "cluster_id": "0", "logical_provisioned": 91906711224320, "logical_used": 69255864320, "logical_used_volume": 47602860032, "logical_used_file_system": 21653004288, "logical_used_vvol": 0, "shared_logical_used_volume": 22476943360, "shared_logical_used_file_system": 14362869760, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 368164038746, "efficiency_ratio": 12341.99, "data_reduction": 4.9471536, "data_physical_used": 7446668975, "snapshot_savings": 10.068099, "thin_savings": 458.81396, "shared_logical_used": 36839813120, "system_free_space": 140299469004, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T06:05:00Z", "cluster_id": "0", "logical_provisioned": 91906711224320, "logical_used": 69255888896, "logical_used_volume": 47602860032, "logical_used_file_system": 21653028864, "logical_used_vvol": 0, "shared_logical_used_volume": 22476943360, "shared_logical_used_file_system": 14362894336, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 368166139306, "efficiency_ratio": 12342.007, "data_reduction": 4.9471636, "data_physical_used": 7446658310, "snapshot_savings": 10.068099, "thin_savings": 458.81363, "shared_logical_used": 36839837696, "system_free_space": 140298212147, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T06:10:00Z", "cluster_id": "0", "logical_provisioned": 91909932449792, "logical_used": 69255888896, "logical_used_volume": 47602860032, "logical_used_file_system": 21653028864, "logical_used_vvol": 0, "shared_logical_used_volume": 22476943360, "shared_logical_used_file_system": 14362890240, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 368168275932, "efficiency_ratio": 12342.41, "data_reduction": 4.947151, "data_physical_used": 7446676302, "snapshot_savings": 10.068111, "thin_savings": 458.81363, "shared_logical_used": 36839833600, "system_free_space": 140295445504, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T06:15:00Z", "cluster_id": "0", "logical_provisioned": 91938923479040, "logical_used": 69600096256, "logical_used_volume": 47947067392, "logical_used_file_system": 21653028864, "logical_used_vvol": 0, "shared_logical_used_volume": 22821150720, "shared_logical_used_file_system": 14362894336, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 368170856414, "efficiency_ratio": 12346.2, "data_reduction": 4.993333, "data_physical_used": 7446738579, "snapshot_savings": 10.068099, "thin_savings": 455.07318, "shared_logical_used": 37184045056, "system_free_space": 140292678860, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T06:20:00Z", "cluster_id": "0", "logical_provisioned": 91906711224320, "logical_used": 69255888896, "logical_used_volume": 47602860032, "logical_used_file_system": 21653028864, "logical_used_vvol": 0, "shared_logical_used_volume": 22476943360, "shared_logical_used_file_system": 14362894336, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 368174592503, "efficiency_ratio": 12341.916, "data_reduction": 4.9471273, "data_physical_used": 7446713195, "snapshot_savings": 10.068099, "thin_savings": 458.81363, "shared_logical_used": 36839837696, "system_free_space": 140289324646, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T06:25:00Z", "cluster_id": "0", "logical_provisioned": 91987241861120, "logical_used": 70013145088, "logical_used_volume": 48360116224, "logical_used_file_system": 21653028864, "logical_used_vvol": 0, "shared_logical_used_volume": 23234199552, "shared_logical_used_file_system": 14362890240, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 368143919552, "efficiency_ratio": 12352.64, "data_reduction": 5.0487795, "data_physical_used": 7446768135, "snapshot_savings": 10.068111, "thin_savings": 450.96854, "shared_logical_used": 37597089792, "system_free_space": 140282861363, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T06:30:00Z", "cluster_id": "0", "logical_provisioned": 91987241861120, "logical_used": 70770397184, "logical_used_volume": 49117368320, "logical_used_file_system": 21653028864, "logical_used_vvol": 0, "shared_logical_used_volume": 23989346304, "shared_logical_used_file_system": 14362894336, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 368150315261, "efficiency_ratio": 12352.582, "data_reduction": 5.1501617, "data_physical_used": 7446802819, "snapshot_savings": 10.074033, "thin_savings": 441.14966, "shared_logical_used": 38352240640, "system_free_space": 140279946854, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T06:35:00Z", "cluster_id": "0", "logical_provisioned": 92067772497920, "logical_used": 69600092160, "logical_used_volume": 47947063296, "logical_used_file_system": 21653028864, "logical_used_vvol": 0, "shared_logical_used_volume": 22821146624, "shared_logical_used_file_system": 14362894336, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 368130093950, "efficiency_ratio": 12363.455, "data_reduction": 4.993313, "data_physical_used": 7446767645, "snapshot_savings": 10.068099, "thin_savings": 458.90695, "shared_logical_used": 37184040960, "system_free_space": 140300667494, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T06:40:00Z", "cluster_id": "0", "logical_provisioned": 92061330046976, "logical_used": 70357352448, "logical_used_volume": 48704323584, "logical_used_file_system": 21653028864, "logical_used_vvol": 0, "shared_logical_used_volume": 23509565440, "shared_logical_used_file_system": 14362890240, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 368139217378, "efficiency_ratio": 12362.499, "data_reduction": 5.08572, "data_physical_used": 7446822369, "snapshot_savings": 10.087369, "thin_savings": 449.508, "shared_logical_used": 37872455680, "system_free_space": 140292585472, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T06:45:00Z", "cluster_id": "0", "logical_provisioned": 92061330046976, "logical_used": 71183450112, "logical_used_volume": 49530421248, "logical_used_file_system": 21653028864, "logical_used_vvol": 0, "shared_logical_used_volume": 24404504576, "shared_logical_used_file_system": 14362890240, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 368146757964, "efficiency_ratio": 12362.353, "data_reduction": 5.205836, "data_physical_used": 7446910590, "snapshot_savings": 10.068111, "thin_savings": 438.07715, "shared_logical_used": 38767394816, "system_free_space": 140285954867, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T06:50:00Z", "cluster_id": "0", "logical_provisioned": 92063477530624, "logical_used": 72107651072, "logical_used_volume": 50454622208, "logical_used_file_system": 21653028864, "logical_used_vvol": 0, "shared_logical_used_volume": 25328705536, "shared_logical_used_file_system": 14362894336, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 368156353827, "efficiency_ratio": 12362.5625, "data_reduction": 5.3299084, "data_physical_used": 7446957527, "snapshot_savings": 10.068099, "thin_savings": 426.92657, "shared_logical_used": 39691599872, "system_free_space": 140276973977, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T06:55:00Z", "cluster_id": "0", "logical_provisioned": 91902416257024, "logical_used": 69147471872, "logical_used_volume": 47494443008, "logical_used_file_system": 21653028864, "logical_used_vvol": 0, "shared_logical_used_volume": 22366420992, "shared_logical_used_file_system": 14362890240, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 368161178952, "efficiency_ratio": 12341.031, "data_reduction": 4.932162, "data_physical_used": 7446899091, "snapshot_savings": 10.074044, "thin_savings": 460.18436, "shared_logical_used": 36729311232, "system_free_space": 140272931020, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T07:00:00Z", "cluster_id": "0", "logical_provisioned": 92144008167424, "logical_used": 69299822592, "logical_used_volume": 47646793728, "logical_used_file_system": 21653028864, "logical_used_vvol": 0, "shared_logical_used_volume": 22491742208, "shared_logical_used_file_system": 14362894336, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 368171842103, "efficiency_ratio": 12373.169, "data_reduction": 4.948869, "data_physical_used": 7447082594, "snapshot_savings": 10.070908, "thin_savings": 465.7694, "shared_logical_used": 36854636544, "system_free_space": 140266424934, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T07:05:00Z", "cluster_id": "0", "logical_provisioned": 92224538804224, "logical_used": 69224849408, "logical_used_volume": 47571783680, "logical_used_file_system": 21653065728, "logical_used_vvol": 0, "shared_logical_used_volume": 22441050112, "shared_logical_used_file_system": 14362931200, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 368176728774, "efficiency_ratio": 12383.941, "data_reduction": 4.942051, "data_physical_used": 7447106830, "snapshot_savings": 10.081684, "thin_savings": 468.80545, "shared_logical_used": 36803981312, "system_free_space": 140261985075, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T07:10:00Z", "cluster_id": "0", "logical_provisioned": 91905637482496, "logical_used": 69147508736, "logical_used_volume": 47494443008, "logical_used_file_system": 21653065728, "logical_used_vvol": 0, "shared_logical_used_volume": 22368526336, "shared_logical_used_file_system": 14362927104, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 368181567370, "efficiency_ratio": 12341.275, "data_reduction": 4.932374, "data_physical_used": 7447013507, "snapshot_savings": 10.068111, "thin_savings": 460.18384, "shared_logical_used": 36731453440, "system_free_space": 140258490777, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T07:15:00Z", "cluster_id": "0", "logical_provisioned": 91902416257024, "logical_used": 69147508736, "logical_used_volume": 47494443008, "logical_used_file_system": 21653065728, "logical_used_vvol": 0, "shared_logical_used_volume": 22368526336, "shared_logical_used_file_system": 14362931200, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 368144613667, "efficiency_ratio": 12340.817, "data_reduction": 4.9323645, "data_physical_used": 7447028306, "snapshot_savings": 10.068099, "thin_savings": 460.18384, "shared_logical_used": 36731457536, "system_free_space": 140258187264, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T07:20:00Z", "cluster_id": "0", "logical_provisioned": 91902416257024, "logical_used": 69148557312, "logical_used_volume": 47495491584, "logical_used_file_system": 21653065728, "logical_used_vvol": 0, "shared_logical_used_volume": 22369574912, "shared_logical_used_file_system": 14362931200, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 368144135877, "efficiency_ratio": 12340.695, "data_reduction": 4.9324565, "data_physical_used": 7447102030, "snapshot_savings": 10.068099, "thin_savings": 460.16928, "shared_logical_used": 36732506112, "system_free_space": 140257346764, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T07:25:00Z", "cluster_id": "0", "logical_provisioned": 91902416257024, "logical_used": 69148557312, "logical_used_volume": 47495491584, "logical_used_file_system": 21653065728, "logical_used_vvol": 0, "shared_logical_used_volume": 22367469568, "shared_logical_used_file_system": 14362927104, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 368144879573, "efficiency_ratio": 12340.68, "data_reduction": 4.9321666, "data_physical_used": 7447111553, "snapshot_savings": 10.074044, "thin_savings": 460.16928, "shared_logical_used": 36730396672, "system_free_space": 140256008192, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T07:30:00Z", "cluster_id": "0", "logical_provisioned": 91902416257024, "logical_used": 69148557312, "logical_used_volume": 47495491584, "logical_used_file_system": 21653065728, "logical_used_vvol": 0, "shared_logical_used_volume": 22369574912, "shared_logical_used_file_system": 14362931200, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 368146587747, "efficiency_ratio": 12340.527, "data_reduction": 4.9323893, "data_physical_used": 7447203205, "snapshot_savings": 10.068099, "thin_savings": 460.16928, "shared_logical_used": 36732506112, "system_free_space": 140254163763, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T07:35:00Z", "cluster_id": "0", "logical_provisioned": 91797189558272, "logical_used": 67769225216, "logical_used_volume": 46116159488, "logical_used_file_system": 21653065728, "logical_used_vvol": 0, "shared_logical_used_volume": 21473198080, "shared_logical_used_file_system": 14362931200, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 367878444333, "efficiency_ratio": 12785.8955, "data_reduction": 4.9914055, "data_physical_used": 7179566371, "snapshot_savings": 9.452273, "thin_savings": 473.34198, "shared_logical_used": 35836129280, "system_free_space": 140254965350, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T07:40:00Z", "cluster_id": "0", "logical_provisioned": 91575998742528, "logical_used": 64727523328, "logical_used_volume": 43074457600, "logical_used_file_system": 21653065728, "logical_used_vvol": 0, "shared_logical_used_volume": 18513879040, "shared_logical_used_file_system": 14362927104, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 367203881526, "efficiency_ratio": 14070.879, "data_reduction": 5.051603, "data_physical_used": 6508193283, "snapshot_savings": 11.488635, "thin_savings": 502.19165, "shared_logical_used": 32876806144, "system_free_space": 140258525798, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T07:45:00Z", "cluster_id": "0", "logical_provisioned": 91213074006016, "logical_used": 59746930688, "logical_used_volume": 38093864960, "logical_used_file_system": 21653065728, "logical_used_vvol": 0, "shared_logical_used_volume": 13563117568, "shared_logical_used_file_system": 14362931200, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365985891398, "efficiency_ratio": 17202.197, "data_reduction": 5.266672, "data_physical_used": 5302408847, "snapshot_savings": 14.749792, "thin_savings": 572.1041, "shared_logical_used": 27926048768, "system_free_space": 140270755840, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T07:50:00Z", "cluster_id": "0", "logical_provisioned": 91121805950976, "logical_used": 59366010880, "logical_used_volume": 37712945152, "logical_used_file_system": 21653065728, "logical_used_vvol": 0, "shared_logical_used_volume": 13182197760, "shared_logical_used_file_system": 14362931200, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365988031896, "efficiency_ratio": 17185.04, "data_reduction": 5.19485, "data_physical_used": 5302392004, "snapshot_savings": 14.749792, "thin_savings": 577.9752, "shared_logical_used": 27545128960, "system_free_space": 140269211033, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T07:55:00Z", "cluster_id": "0", "logical_provisioned": 91039127830528, "logical_used": 58794541056, "logical_used_volume": 37141475328, "logical_used_file_system": 21653065728, "logical_used_vvol": 0, "shared_logical_used_volume": 12616937472, "shared_logical_used_file_system": 14362927104, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365854185689, "efficiency_ratio": 17615.115, "data_reduction": 5.2203207, "data_physical_used": 5168238892, "snapshot_savings": 16.103075, "thin_savings": 583.9509, "shared_logical_used": 26979864576, "system_free_space": 140268763545, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T08:00:00Z", "cluster_id": "0", "logical_provisioned": 91087446212608, "logical_used": 58801233920, "logical_used_volume": 37148168192, "logical_used_file_system": 21653065728, "logical_used_vvol": 0, "shared_logical_used_volume": 12623630336, "shared_logical_used_file_system": 14362931200, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365855733729, "efficiency_ratio": 17623.926, "data_reduction": 5.2214565, "data_physical_used": 5168397434, "snapshot_savings": 16.103045, "thin_savings": 585.7359, "shared_logical_used": 26986561536, "system_free_space": 140267393843, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T08:05:00Z", "cluster_id": "0", "logical_provisioned": 91103552339968, "logical_used": 59207589888, "logical_used_volume": 37554524160, "logical_used_file_system": 21653065728, "logical_used_vvol": 0, "shared_logical_used_volume": 13029986304, "shared_logical_used_file_system": 14362931200, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365827261848, "efficiency_ratio": 17627.115, "data_reduction": 5.3001018, "data_physical_used": 5168375931, "snapshot_savings": 16.103045, "thin_savings": 576.95996, "shared_logical_used": 27392917504, "system_free_space": 140258953830, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T08:10:00Z", "cluster_id": "0", "logical_provisioned": 91074561310720, "logical_used": 59069906944, "logical_used_volume": 37416841216, "logical_used_file_system": 21653065728, "logical_used_vvol": 0, "shared_logical_used_volume": 12826828800, "shared_logical_used_file_system": 14362931200, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365837607100, "efficiency_ratio": 17621.414, "data_reduction": 5.260767, "data_physical_used": 5168402626, "snapshot_savings": 16.109976, "thin_savings": 580.4267, "shared_logical_used": 27189760000, "system_free_space": 140270798643, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T08:15:00Z", "cluster_id": "0", "logical_provisioned": 91103552339968, "logical_used": 59207589888, "logical_used_volume": 37554524160, "logical_used_file_system": 21653065728, "logical_used_vvol": 0, "shared_logical_used_volume": 13029986304, "shared_logical_used_file_system": 14362931200, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365842908117, "efficiency_ratio": 17626.904, "data_reduction": 5.3000383, "data_physical_used": 5168437630, "snapshot_savings": 16.103045, "thin_savings": 576.95996, "shared_logical_used": 27392917504, "system_free_space": 140266136985, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T08:20:00Z", "cluster_id": "0", "logical_provisioned": 91071340085248, "logical_used": 59007795200, "logical_used_volume": 37354729472, "logical_used_file_system": 21653065728, "logical_used_vvol": 0, "shared_logical_used_volume": 12831875072, "shared_logical_used_file_system": 14362931200, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365853119150, "efficiency_ratio": 17620.533, "data_reduction": 5.261666, "data_physical_used": 5168478406, "snapshot_savings": 16.04214, "thin_savings": 580.42676, "shared_logical_used": 27194806272, "system_free_space": 140256817561, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T08:25:00Z", "cluster_id": "0", "logical_provisioned": 91103552339968, "logical_used": 59348627456, "logical_used_volume": 37695561728, "logical_used_file_system": 21653065728, "logical_used_vvol": 0, "shared_logical_used_volume": 13168918528, "shared_logical_used_file_system": 14362927104, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365861479450, "efficiency_ratio": 17626.66, "data_reduction": 5.326845, "data_physical_used": 5168508770, "snapshot_savings": 16.119186, "thin_savings": 573.75977, "shared_logical_used": 27531845632, "system_free_space": 140249202483, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T08:30:00Z", "cluster_id": "0", "logical_provisioned": 91090667438080, "logical_used": 59142643712, "logical_used_volume": 37489577984, "logical_used_file_system": 21653065728, "logical_used_vvol": 0, "shared_logical_used_volume": 12965040128, "shared_logical_used_file_system": 14362931200, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365869715264, "efficiency_ratio": 17624.158, "data_reduction": 5.2873964, "data_physical_used": 5168511920, "snapshot_savings": 16.103045, "thin_savings": 577.9348, "shared_logical_used": 27327971328, "system_free_space": 140241486233, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T08:35:00Z", "cluster_id": "0", "logical_provisioned": 91090667438080, "logical_used": 58871787520, "logical_used_volume": 37218721792, "logical_used_file_system": 21653065728, "logical_used_vvol": 0, "shared_logical_used_volume": 12694183936, "shared_logical_used_file_system": 14362931200, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365877240811, "efficiency_ratio": 17624.027, "data_reduction": 5.234953, "data_physical_used": 5168549681, "snapshot_savings": 16.103045, "thin_savings": 584.2087, "shared_logical_used": 27057115136, "system_free_space": 140234147430, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T08:40:00Z", "cluster_id": "0", "logical_provisioned": 91093888663552, "logical_used": 59620638720, "logical_used_volume": 37967572992, "logical_used_file_system": 21653065728, "logical_used_vvol": 0, "shared_logical_used_volume": 13374193664, "shared_logical_used_file_system": 14362931200, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365885980158, "efficiency_ratio": 17624.475, "data_reduction": 5.366466, "data_physical_used": 5168601474, "snapshot_savings": 16.135725, "thin_savings": 568.709, "shared_logical_used": 27737124864, "system_free_space": 140225894195, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T08:45:00Z", "cluster_id": "0", "logical_provisioned": 91090667438080, "logical_used": 59207589888, "logical_used_volume": 37554524160, "logical_used_file_system": 21653065728, "logical_used_vvol": 0, "shared_logical_used_volume": 13038411776, "shared_logical_used_file_system": 14362931200, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365892978009, "efficiency_ratio": 17623.852, "data_reduction": 5.3015003, "data_physical_used": 5168601625, "snapshot_savings": 16.038893, "thin_savings": 576.45044, "shared_logical_used": 27401342976, "system_free_space": 140218465894, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T08:50:00Z", "cluster_id": "0", "logical_provisioned": 91106773565440, "logical_used": 59491373056, "logical_used_volume": 37838307328, "logical_used_file_system": 21653065728, "logical_used_vvol": 0, "shared_logical_used_volume": 13313769472, "shared_logical_used_file_system": 14362931200, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365900043663, "efficiency_ratio": 17626.658, "data_reduction": 5.354681, "data_physical_used": 5168692943, "snapshot_savings": 16.103045, "thin_savings": 570.68274, "shared_logical_used": 27676700672, "system_free_space": 140211870310, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T08:55:00Z", "cluster_id": "0", "logical_provisioned": 91087446212608, "logical_used": 59082158080, "logical_used_volume": 37429092352, "logical_used_file_system": 21653065728, "logical_used_vvol": 0, "shared_logical_used_volume": 12904554496, "shared_logical_used_file_system": 14362931200, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365871036221, "efficiency_ratio": 17622.871, "data_reduction": 5.275495, "data_physical_used": 5168706689, "snapshot_savings": 16.103045, "thin_savings": 579.1961, "shared_logical_used": 27267485696, "system_free_space": 140204212428, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T09:00:00Z", "cluster_id": "0", "logical_provisioned": 91093888663552, "logical_used": 58933903360, "logical_used_volume": 37280837632, "logical_used_file_system": 21653065728, "logical_used_vvol": 0, "shared_logical_used_volume": 12756299776, "shared_logical_used_file_system": 14362931200, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365878205379, "efficiency_ratio": 17624.18, "data_reduction": 5.2468305, "data_physical_used": 5168687977, "snapshot_savings": 16.103045, "thin_savings": 582.88666, "shared_logical_used": 27119230976, "system_free_space": 140196986470, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T09:05:00Z", "cluster_id": "0", "logical_provisioned": 91093888663552, "logical_used": 59482951680, "logical_used_volume": 37829885952, "logical_used_file_system": 21653065728, "logical_used_vvol": 0, "shared_logical_used_volume": 13305348096, "shared_logical_used_file_system": 14362931200, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365886806842, "efficiency_ratio": 17623.898, "data_reduction": 5.352971, "data_physical_used": 5168770389, "snapshot_savings": 16.103045, "thin_savings": 570.3667, "shared_logical_used": 27668279296, "system_free_space": 140188830515, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T09:10:00Z", "cluster_id": "0", "logical_provisioned": 91100331114496, "logical_used": 59147165696, "logical_used_volume": 37494099968, "logical_used_file_system": 21653065728, "logical_used_vvol": 0, "shared_logical_used_volume": 12900720640, "shared_logical_used_file_system": 14362931200, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365893983316, "efficiency_ratio": 17625.152, "data_reduction": 5.27469, "data_physical_used": 5168768262, "snapshot_savings": 16.135725, "thin_savings": 579.6685, "shared_logical_used": 27263651840, "system_free_space": 140183682457, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T09:15:00Z", "cluster_id": "0", "logical_provisioned": 91097109889024, "logical_used": 59486396416, "logical_used_volume": 37833330688, "logical_used_file_system": 21653065728, "logical_used_vvol": 0, "shared_logical_used_volume": 13308792832, "shared_logical_used_file_system": 14362931200, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365901888822, "efficiency_ratio": 17624.441, "data_reduction": 5.353613, "data_physical_used": 5168794183, "snapshot_savings": 16.078943, "thin_savings": 570.49097, "shared_logical_used": 27671724032, "system_free_space": 140176102400, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T09:20:00Z", "cluster_id": "0", "logical_provisioned": 91100331114496, "logical_used": 59078324224, "logical_used_volume": 37425258496, "logical_used_file_system": 21653065728, "logical_used_vvol": 0, "shared_logical_used_volume": 12900720640, "shared_logical_used_file_system": 14362931200, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365907564821, "efficiency_ratio": 17625.057, "data_reduction": 5.2746615, "data_physical_used": 5168796453, "snapshot_savings": 16.103045, "thin_savings": 579.7966, "shared_logical_used": 27263651840, "system_free_space": 140170880409, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T09:25:00Z", "cluster_id": "0", "logical_provisioned": 91097109889024, "logical_used": 59419164672, "logical_used_volume": 37766098944, "logical_used_file_system": 21653065728, "logical_used_vvol": 0, "shared_logical_used_volume": 13241561088, "shared_logical_used_file_system": 14362931200, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365916528170, "efficiency_ratio": 17624.133, "data_reduction": 5.3405123, "data_physical_used": 5168884853, "snapshot_savings": 16.066921, "thin_savings": 572.0332, "shared_logical_used": 27604492288, "system_free_space": 140169191628, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T09:30:00Z", "cluster_id": "0", "logical_provisioned": 91103552339968, "logical_used": 59138748416, "logical_used_volume": 37485682688, "logical_used_file_system": 21653065728, "logical_used_vvol": 0, "shared_logical_used_volume": 12961144832, "shared_logical_used_file_system": 14362931200, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365922599563, "efficiency_ratio": 17625.32, "data_reduction": 5.2862434, "data_physical_used": 5168902307, "snapshot_savings": 16.103045, "thin_savings": 578.53503, "shared_logical_used": 27324076032, "system_free_space": 140162895667, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T09:35:00Z", "cluster_id": "0", "logical_provisioned": 91119658467328, "logical_used": 59491373056, "logical_used_volume": 37838307328, "logical_used_file_system": 21653065728, "logical_used_vvol": 0, "shared_logical_used_volume": 13313769472, "shared_logical_used_file_system": 14362931200, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365908223007, "efficiency_ratio": 17628.18, "data_reduction": 5.354386, "data_physical_used": 5168977215, "snapshot_savings": 16.103045, "thin_savings": 571.18665, "shared_logical_used": 27676700672, "system_free_space": 140177810636, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T09:40:00Z", "cluster_id": "0", "logical_provisioned": 91093888663552, "logical_used": 59345272832, "logical_used_volume": 37692207104, "logical_used_file_system": 21653065728, "logical_used_vvol": 0, "shared_logical_used_volume": 13098827776, "shared_logical_used_file_system": 14362931200, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365912124682, "efficiency_ratio": 17623.223, "data_reduction": 5.312812, "data_physical_used": 5168968687, "snapshot_savings": 16.135725, "thin_savings": 574.8853, "shared_logical_used": 27461758976, "system_free_space": 140182293299, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T09:45:00Z", "cluster_id": "0", "logical_provisioned": 91122879692800, "logical_used": 59697905664, "logical_used_volume": 38044839936, "logical_used_file_system": 21653065728, "logical_used_vvol": 0, "shared_logical_used_volume": 13520302080, "shared_logical_used_file_system": 14362931200, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365859135273, "efficiency_ratio": 17628.639, "data_reduction": 5.394292, "data_physical_used": 5169025746, "snapshot_savings": 16.103045, "thin_savings": 566.73505, "shared_logical_used": 27883233280, "system_free_space": 140198079897, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T09:50:00Z", "cluster_id": "0", "logical_provisioned": 91090667438080, "logical_used": 59414114304, "logical_used_volume": 37761048576, "logical_used_file_system": 21653065728, "logical_used_vvol": 0, "shared_logical_used_volume": 13236510720, "shared_logical_used_file_system": 14362931200, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365846719338, "efficiency_ratio": 17622.467, "data_reduction": 5.339408, "data_physical_used": 5169008273, "snapshot_savings": 16.103045, "thin_savings": 571.78046, "shared_logical_used": 27599441920, "system_free_space": 140210216550, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T09:55:00Z", "cluster_id": "0", "logical_provisioned": 91126100918272, "logical_used": 59906461696, "logical_used_volume": 38253395968, "logical_used_file_system": 21653065728, "logical_used_vvol": 0, "shared_logical_used_volume": 13728866304, "shared_logical_used_file_system": 14362931200, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365843414730, "efficiency_ratio": 17629.068, "data_reduction": 5.4345813, "data_physical_used": 5169082589, "snapshot_savings": 16.087961, "thin_savings": 562.35583, "shared_logical_used": 28091797504, "system_free_space": 140213726412, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T10:00:00Z", "cluster_id": "0", "logical_provisioned": 91126100918272, "logical_used": 59563958272, "logical_used_volume": 37910892544, "logical_used_file_system": 21653065728, "logical_used_vvol": 0, "shared_logical_used_volume": 13386362880, "shared_logical_used_file_system": 14362927104, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365841702480, "efficiency_ratio": 17628.887, "data_reduction": 5.368265, "data_physical_used": 5169135399, "snapshot_savings": 16.087992, "thin_savings": 569.8677, "shared_logical_used": 27749289984, "system_free_space": 140216138956, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T10:05:00Z", "cluster_id": "0", "logical_provisioned": 91093888663552, "logical_used": 59213041664, "logical_used_volume": 37559975936, "logical_used_file_system": 21653065728, "logical_used_vvol": 0, "shared_logical_used_volume": 13035446272, "shared_logical_used_file_system": 14362931200, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365842379235, "efficiency_ratio": 17622.705, "data_reduction": 5.300394, "data_physical_used": 5169120534, "snapshot_savings": 16.087961, "thin_savings": 576.50134, "shared_logical_used": 27398377472, "system_free_space": 140216189542, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T10:10:00Z", "cluster_id": "0", "logical_provisioned": 91126100918272, "logical_used": 59565662208, "logical_used_volume": 37912596480, "logical_used_file_system": 21653065728, "logical_used_vvol": 0, "shared_logical_used_volume": 13388066816, "shared_logical_used_file_system": 14362931200, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365847322153, "efficiency_ratio": 17628.69, "data_reduction": 5.3685355, "data_physical_used": 5169193303, "snapshot_savings": 16.087961, "thin_savings": 569.8298, "shared_logical_used": 27750998016, "system_free_space": 140212621312, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T10:15:00Z", "cluster_id": "0", "logical_provisioned": 91103552339968, "logical_used": 59417894912, "logical_used_volume": 37764829184, "logical_used_file_system": 21653065728, "logical_used_vvol": 0, "shared_logical_used_volume": 13240299520, "shared_logical_used_file_system": 14362931200, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365845000988, "efficiency_ratio": 17624.182, "data_reduction": 5.3399053, "data_physical_used": 5169236058, "snapshot_savings": 16.087961, "thin_savings": 572.2481, "shared_logical_used": 27603230720, "system_free_space": 140222730649, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T10:20:00Z", "cluster_id": "0", "logical_provisioned": 91135764594688, "logical_used": 59770458112, "logical_used_volume": 38117392384, "logical_used_file_system": 21653065728, "logical_used_vvol": 0, "shared_logical_used_volume": 13592862720, "shared_logical_used_file_system": 14362931200, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365844144006, "efficiency_ratio": 17630.047, "data_reduction": 5.407997, "data_physical_used": 5169343501, "snapshot_savings": 16.087961, "thin_savings": 565.6887, "shared_logical_used": 27955793920, "system_free_space": 140223563366, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T10:25:00Z", "cluster_id": "0", "logical_provisioned": 91135764594688, "logical_used": 60428009472, "logical_used_volume": 38461648896, "logical_used_file_system": 21966360576, "logical_used_vvol": 0, "shared_logical_used_volume": 13937119232, "shared_logical_used_file_system": 14362931200, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365846290337, "efficiency_ratio": 17630.166, "data_reduction": 5.4746294, "data_physical_used": 5169308912, "snapshot_savings": 16.23654, "thin_savings": 558.2534, "shared_logical_used": 28300050432, "system_free_space": 140221633331, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T10:30:00Z", "cluster_id": "0", "logical_provisioned": 91119658467328, "logical_used": 59762102272, "logical_used_volume": 38109036544, "logical_used_file_system": 21653065728, "logical_used_vvol": 0, "shared_logical_used_volume": 13586612224, "shared_logical_used_file_system": 14362931200, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365843875470, "efficiency_ratio": 17626.805, "data_reduction": 5.4067492, "data_physical_used": 5169380108, "snapshot_savings": 16.071913, "thin_savings": 565.24835, "shared_logical_used": 27949543424, "system_free_space": 140224256000, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T10:35:00Z", "cluster_id": "0", "logical_provisioned": 91119658467328, "logical_used": 59419574272, "logical_used_volume": 37766508544, "logical_used_file_system": 21653065728, "logical_used_vvol": 0, "shared_logical_used_volume": 13241978880, "shared_logical_used_file_system": 14362931200, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365806698576, "efficiency_ratio": 17626.854, "data_reduction": 5.340096, "data_physical_used": 5169366233, "snapshot_savings": 16.087961, "thin_savings": 572.8421, "shared_logical_used": 27604910080, "system_free_space": 140225256038, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T10:40:00Z", "cluster_id": "0", "logical_provisioned": 91138985820160, "logical_used": 60079820800, "logical_used_volume": 38113460224, "logical_used_file_system": 21966360576, "logical_used_vvol": 0, "shared_logical_used_volume": 13588930560, "shared_logical_used_file_system": 14362931200, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365805226080, "efficiency_ratio": 17630.29, "data_reduction": 5.4071198, "data_physical_used": 5169455220, "snapshot_savings": 16.23654, "thin_savings": 565.7748, "shared_logical_used": 27951861760, "system_free_space": 140226917580, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T10:45:00Z", "cluster_id": "0", "logical_provisioned": 91122879692800, "logical_used": 59489476608, "logical_used_volume": 37836410880, "logical_used_file_system": 21653065728, "logical_used_vvol": 0, "shared_logical_used_volume": 13311881216, "shared_logical_used_file_system": 14362931200, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365802435777, "efficiency_ratio": 17627.236, "data_reduction": 5.353545, "data_physical_used": 5169436429, "snapshot_savings": 16.087961, "thin_savings": 571.40186, "shared_logical_used": 27674812416, "system_free_space": 140229298995, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T10:50:00Z", "cluster_id": "0", "logical_provisioned": 91138985820160, "logical_used": 59840954368, "logical_used_volume": 38187888640, "logical_used_file_system": 21653065728, "logical_used_vvol": 0, "shared_logical_used_volume": 13663358976, "shared_logical_used_file_system": 14362931200, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365795460767, "efficiency_ratio": 17630.1, "data_reduction": 5.421459, "data_physical_used": 5169510603, "snapshot_savings": 16.087961, "thin_savings": 564.27435, "shared_logical_used": 28026290176, "system_free_space": 140237283737, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T10:55:00Z", "cluster_id": "0", "logical_provisioned": 91122879692800, "logical_used": 59801088000, "logical_used_volume": 37834727424, "logical_used_file_system": 21966360576, "logical_used_vvol": 0, "shared_logical_used_volume": 13309767680, "shared_logical_used_file_system": 14362931200, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365792576948, "efficiency_ratio": 17626.998, "data_reduction": 5.3530636, "data_physical_used": 5169506989, "snapshot_savings": 16.239851, "thin_savings": 571.4395, "shared_logical_used": 27672698880, "system_free_space": 140240567910, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T11:00:00Z", "cluster_id": "0", "logical_provisioned": 91122879692800, "logical_used": 59837042688, "logical_used_volume": 38183976960, "logical_used_file_system": 21653065728, "logical_used_vvol": 0, "shared_logical_used_volume": 13659443200, "shared_logical_used_file_system": 14362931200, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365796205224, "efficiency_ratio": 17626.893, "data_reduction": 5.4206734, "data_physical_used": 5169537712, "snapshot_savings": 16.087992, "thin_savings": 563.738, "shared_logical_used": 28022374400, "system_free_space": 140252253184, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T11:05:00Z", "cluster_id": "0", "logical_provisioned": 91122879692800, "logical_used": 59491131392, "logical_used_volume": 37838065664, "logical_used_file_system": 21653065728, "logical_used_vvol": 0, "shared_logical_used_volume": 13313536000, "shared_logical_used_file_system": 14362931200, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365797490798, "efficiency_ratio": 17626.941, "data_reduction": 5.3537755, "data_physical_used": 5169523246, "snapshot_savings": 16.075983, "thin_savings": 571.40234, "shared_logical_used": 27676467200, "system_free_space": 140251677286, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T11:10:00Z", "cluster_id": "0", "logical_provisioned": 91142207045632, "logical_used": 60291387392, "logical_used_volume": 38325026816, "logical_used_file_system": 21966360576, "logical_used_vvol": 0, "shared_logical_used_volume": 13731655680, "shared_logical_used_file_system": 14362931200, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365800048505, "efficiency_ratio": 17630.592, "data_reduction": 5.43463, "data_physical_used": 5169549140, "snapshot_savings": 16.269188, "thin_savings": 562.7913, "shared_logical_used": 28094586880, "system_free_space": 140249303654, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T11:15:00Z", "cluster_id": "0", "logical_provisioned": 91126100918272, "logical_used": 59625467904, "logical_used_volume": 37972402176, "logical_used_file_system": 21653065728, "logical_used_vvol": 0, "shared_logical_used_volume": 13447872512, "shared_logical_used_file_system": 14362931200, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365802891813, "efficiency_ratio": 17627.438, "data_reduction": 5.379723, "data_physical_used": 5169560579, "snapshot_savings": 16.087961, "thin_savings": 568.5039, "shared_logical_used": 27810803712, "system_free_space": 140246684876, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T11:20:00Z", "cluster_id": "0", "logical_provisioned": 91142207045632, "logical_used": 60246720512, "logical_used_volume": 38593654784, "logical_used_file_system": 21653065728, "logical_used_vvol": 0, "shared_logical_used_volume": 14069125120, "shared_logical_used_file_system": 14362931200, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365811796157, "efficiency_ratio": 17630.531, "data_reduction": 5.4998918, "data_physical_used": 5169566504, "snapshot_savings": 16.087961, "thin_savings": 555.6986, "shared_logical_used": 28432056320, "system_free_space": 140250611097, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T11:25:00Z", "cluster_id": "0", "logical_provisioned": 91126100918272, "logical_used": 59840409600, "logical_used_volume": 38187343872, "logical_used_file_system": 21653065728, "logical_used_vvol": 0, "shared_logical_used_volume": 13662814208, "shared_logical_used_file_system": 14362931200, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365775236864, "efficiency_ratio": 17627.342, "data_reduction": 5.421272, "data_physical_used": 5169588561, "snapshot_savings": 16.087961, "thin_savings": 563.78906, "shared_logical_used": 28025745408, "system_free_space": 140254113177, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T11:30:00Z", "cluster_id": "0", "logical_provisioned": 91126100918272, "logical_used": 60176179200, "logical_used_volume": 38523113472, "logical_used_file_system": 21653065728, "logical_used_vvol": 0, "shared_logical_used_volume": 13998583808, "shared_logical_used_file_system": 14362931200, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365769318138, "efficiency_ratio": 17627.068, "data_reduction": 5.486138, "data_physical_used": 5169668408, "snapshot_savings": 16.087961, "thin_savings": 556.5783, "shared_logical_used": 28361515008, "system_free_space": 140259876044, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T11:35:00Z", "cluster_id": "0", "logical_provisioned": 91129322143744, "logical_used": 59633868800, "logical_used_volume": 37980803072, "logical_used_file_system": 21653065728, "logical_used_vvol": 0, "shared_logical_used_volume": 13456273408, "shared_logical_used_file_system": 14362931200, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365775515428, "efficiency_ratio": 17627.639, "data_reduction": 5.3812194, "data_physical_used": 5169684213, "snapshot_savings": 16.087961, "thin_savings": 568.4434, "shared_logical_used": 27819204608, "system_free_space": 140258179481, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T11:40:00Z", "cluster_id": "0", "logical_provisioned": 91129322143744, "logical_used": 60179566592, "logical_used_volume": 38526500864, "logical_used_file_system": 21653065728, "logical_used_vvol": 0, "shared_logical_used_volume": 13933129728, "shared_logical_used_file_system": 14362931200, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365785780536, "efficiency_ratio": 17627.285, "data_reduction": 5.4733505, "data_physical_used": 5169788050, "snapshot_savings": 16.096502, "thin_savings": 558.0412, "shared_logical_used": 28296060928, "system_free_space": 140248412569, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T11:45:00Z", "cluster_id": "0", "logical_provisioned": 91142207045632, "logical_used": 60253409280, "logical_used_volume": 38600343552, "logical_used_file_system": 21653065728, "logical_used_vvol": 0, "shared_logical_used_volume": 14075813888, "shared_logical_used_file_system": 14362931200, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365791392198, "efficiency_ratio": 17629.59, "data_reduction": 5.5008917, "data_physical_used": 5169842584, "snapshot_savings": 16.087961, "thin_savings": 555.55743, "shared_logical_used": 28438745088, "system_free_space": 140242933760, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T11:50:00Z", "cluster_id": "0", "logical_provisioned": 91126100918272, "logical_used": 60245041152, "logical_used_volume": 38591975424, "logical_used_file_system": 21653065728, "logical_used_vvol": 0, "shared_logical_used_volume": 14067445760, "shared_logical_used_file_system": 14362931200, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365796708491, "efficiency_ratio": 17626.506, "data_reduction": 5.499283, "data_physical_used": 5169833549, "snapshot_savings": 16.087961, "thin_savings": 555.1222, "shared_logical_used": 28430376960, "system_free_space": 140238602854, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T11:55:00Z", "cluster_id": "0", "logical_provisioned": 91126100918272, "logical_used": 60245041152, "logical_used_volume": 38591975424, "logical_used_file_system": 21653065728, "logical_used_vvol": 0, "shared_logical_used_volume": 14065340416, "shared_logical_used_file_system": 14362931200, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365801452702, "efficiency_ratio": 17626.164, "data_reduction": 5.498769, "data_physical_used": 5169933856, "snapshot_savings": 16.10404, "thin_savings": 555.1222, "shared_logical_used": 28428271616, "system_free_space": 140233968435, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T12:00:00Z", "cluster_id": "0", "logical_provisioned": 91126100918272, "logical_used": 60246724608, "logical_used_volume": 38593658880, "logical_used_file_system": 21653065728, "logical_used_vvol": 0, "shared_logical_used_volume": 14069129216, "shared_logical_used_file_system": 14362931200, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365806593370, "efficiency_ratio": 17626.275, "data_reduction": 5.4995365, "data_physical_used": 5169900981, "snapshot_savings": 16.075924, "thin_savings": 555.1222, "shared_logical_used": 28432060416, "system_free_space": 140229302886, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T12:05:00Z", "cluster_id": "0", "logical_provisioned": 91126100918272, "logical_used": 60245065728, "logical_used_volume": 38591975424, "logical_used_file_system": 21653090304, "logical_used_vvol": 0, "shared_logical_used_volume": 14075850752, "shared_logical_used_file_system": 14362955776, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365815070191, "efficiency_ratio": 17625.807, "data_reduction": 5.500695, "data_physical_used": 5170038538, "snapshot_savings": 16.024088, "thin_savings": 555.12164, "shared_logical_used": 28438806528, "system_free_space": 140225866956, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T12:10:00Z", "cluster_id": "0", "logical_provisioned": 91132543369216, "logical_used": 59627163648, "logical_used_volume": 37974073344, "logical_used_file_system": 21653090304, "logical_used_vvol": 0, "shared_logical_used_volume": 13451227136, "shared_logical_used_file_system": 14362955776, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365814669191, "efficiency_ratio": 17627.36, "data_reduction": 5.379973, "data_physical_used": 5169948525, "snapshot_savings": 16.075127, "thin_savings": 568.5917, "shared_logical_used": 27814182912, "system_free_space": 140218512588, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T12:15:00Z", "cluster_id": "0", "logical_provisioned": 91145428271104, "logical_used": 60109586432, "logical_used_volume": 38454366208, "logical_used_file_system": 21655220224, "logical_used_vvol": 0, "shared_logical_used_volume": 13929836544, "shared_logical_used_file_system": 14365306880, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365785189761, "efficiency_ratio": 17629.742, "data_reduction": 5.4729686, "data_physical_used": 5169980766, "snapshot_savings": 16.086273, "thin_savings": 558.73175, "shared_logical_used": 28295143424, "system_free_space": 140210835251, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T12:20:00Z", "cluster_id": "0", "logical_provisioned": 91113216016384, "logical_used": 59695394816, "logical_used_volume": 38040174592, "logical_used_file_system": 21655220224, "logical_used_vvol": 0, "shared_logical_used_volume": 13515644928, "shared_logical_used_file_system": 14365306880, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365789983934, "efficiency_ratio": 17623.383, "data_reduction": 5.392814, "data_physical_used": 5170018952, "snapshot_savings": 16.086273, "thin_savings": 566.4614, "shared_logical_used": 27880951808, "system_free_space": 140204060672, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T12:25:00Z", "cluster_id": "0", "logical_provisioned": 91119658467328, "logical_used": 59973513216, "logical_used_volume": 38318292992, "logical_used_file_system": 21655220224, "logical_used_vvol": 0, "shared_logical_used_volume": 13793763328, "shared_logical_used_file_system": 14365306880, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365798972149, "efficiency_ratio": 17624.516, "data_reduction": 5.4465737, "data_physical_used": 5170052340, "snapshot_savings": 16.086273, "thin_savings": 560.6611, "shared_logical_used": 28159070208, "system_free_space": 140197379481, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T12:30:00Z", "cluster_id": "0", "logical_provisioned": 91129322143744, "logical_used": 59632656384, "logical_used_volume": 37977436160, "logical_used_file_system": 21655220224, "logical_used_vvol": 0, "shared_logical_used_volume": 13450801152, "shared_logical_used_file_system": 14365306880, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365804126745, "efficiency_ratio": 17626.318, "data_reduction": 5.380217, "data_physical_used": 5170071717, "snapshot_savings": 16.086334, "thin_savings": 568.51965, "shared_logical_used": 27816108032, "system_free_space": 140193017446, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T12:35:00Z", "cluster_id": "0", "logical_provisioned": 91113216016384, "logical_used": 60042768384, "logical_used_volume": 38387548160, "logical_used_file_system": 21655220224, "logical_used_vvol": 0, "shared_logical_used_volume": 13863018496, "shared_logical_used_file_system": 14365306880, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365808836675, "efficiency_ratio": 17623.18, "data_reduction": 5.459942, "data_physical_used": 5170078160, "snapshot_savings": 16.071285, "thin_savings": 558.9727, "shared_logical_used": 28228325376, "system_free_space": 140192110796, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T12:40:00Z", "cluster_id": "0", "logical_provisioned": 91100331114496, "logical_used": 59629719552, "logical_used_volume": 37974499328, "logical_used_file_system": 21655220224, "logical_used_vvol": 0, "shared_logical_used_volume": 13449969664, "shared_logical_used_file_system": 14365306880, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365798341573, "efficiency_ratio": 17620.58, "data_reduction": 5.3800163, "data_physical_used": 5170110149, "snapshot_savings": 16.071285, "thin_savings": 567.32837, "shared_logical_used": 27815276544, "system_free_space": 140203196825, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T12:45:00Z", "cluster_id": "0", "logical_provisioned": 91113216016384, "logical_used": 59973926912, "logical_used_volume": 38318706688, "logical_used_file_system": 21655220224, "logical_used_vvol": 0, "shared_logical_used_volume": 13794177024, "shared_logical_used_file_system": 14365306880, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365801064472, "efficiency_ratio": 17622.973, "data_reduction": 5.4465623, "data_physical_used": 5170138924, "snapshot_savings": 16.071285, "thin_savings": 560.44995, "shared_logical_used": 28159483904, "system_free_space": 140200527462, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T12:50:00Z", "cluster_id": "0", "logical_provisioned": 91103552339968, "logical_used": 59767402496, "logical_used_volume": 38112182272, "logical_used_file_system": 21655220224, "logical_used_vvol": 0, "shared_logical_used_volume": 13587652608, "shared_logical_used_file_system": 14365306880, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365806260026, "efficiency_ratio": 17621.092, "data_reduction": 5.4066133, "data_physical_used": 5170142165, "snapshot_savings": 16.071285, "thin_savings": 564.555, "shared_logical_used": 27952959488, "system_free_space": 140196301619, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T12:55:00Z", "cluster_id": "0", "logical_provisioned": 91119658467328, "logical_used": 60111609856, "logical_used_volume": 38456389632, "logical_used_file_system": 21655220224, "logical_used_vvol": 0, "shared_logical_used_volume": 13929754624, "shared_logical_used_file_system": 14365306880, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365810563302, "efficiency_ratio": 17624.174, "data_reduction": 5.472771, "data_physical_used": 5170152254, "snapshot_savings": 16.08733, "thin_savings": 557.7492, "shared_logical_used": 28295061504, "system_free_space": 140192340377, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T13:00:00Z", "cluster_id": "0", "logical_provisioned": 91119658467328, "logical_used": 60455809024, "logical_used_volume": 38800588800, "logical_used_file_system": 21655220224, "logical_used_vvol": 0, "shared_logical_used_volume": 14276067328, "shared_logical_used_file_system": 14365306880, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365815666359, "efficiency_ratio": 17624.115, "data_reduction": 5.5397363, "data_physical_used": 5170169204, "snapshot_savings": 16.071222, "thin_savings": 550.5131, "shared_logical_used": 28641374208, "system_free_space": 140187709849, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T13:05:00Z", "cluster_id": "0", "logical_provisioned": 91155091947520, "logical_used": 60182159360, "logical_used_volume": 38526910464, "logical_used_file_system": 21655248896, "logical_used_vvol": 0, "shared_logical_used_volume": 14002380800, "shared_logical_used_file_system": 14365335552, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365786250469, "efficiency_ratio": 17630.63, "data_reduction": 5.4867005, "data_physical_used": 5170268873, "snapshot_savings": 16.071285, "thin_savings": 557.6001, "shared_logical_used": 28367716352, "system_free_space": 140180865228, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T13:10:00Z", "cluster_id": "0", "logical_provisioned": 91142207045632, "logical_used": 59906805760, "logical_used_volume": 38251556864, "logical_used_file_system": 21655248896, "logical_used_vvol": 0, "shared_logical_used_volume": 13658185728, "shared_logical_used_file_system": 14365335552, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365795001731, "efficiency_ratio": 17628.193, "data_reduction": 5.420146, "data_physical_used": 5170252044, "snapshot_savings": 16.103897, "thin_savings": 564.38513, "shared_logical_used": 28023521280, "system_free_space": 140179635609, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T13:15:00Z", "cluster_id": "0", "logical_provisioned": 91155091947520, "logical_used": 60182151168, "logical_used_volume": 38526902272, "logical_used_file_system": 21655248896, "logical_used_vvol": 0, "shared_logical_used_volume": 14002372608, "shared_logical_used_file_system": 14365331456, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365805781755, "efficiency_ratio": 17630.602, "data_reduction": 5.4866896, "data_physical_used": 5170276768, "snapshot_savings": 16.071318, "thin_savings": 557.6003, "shared_logical_used": 28367704064, "system_free_space": 140168868659, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T13:20:00Z", "cluster_id": "0", "logical_provisioned": 91135764594688, "logical_used": 60839665664, "logical_used_volume": 38871121920, "logical_used_file_system": 21968543744, "logical_used_vvol": 0, "shared_logical_used_volume": 14346592256, "shared_logical_used_file_system": 14365335552, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365813011467, "efficiency_ratio": 17626.729, "data_reduction": 5.553224, "data_physical_used": 5170316970, "snapshot_savings": 16.219702, "thin_savings": 549.6583, "shared_logical_used": 28711927808, "system_free_space": 140161907302, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T13:25:00Z", "cluster_id": "0", "logical_provisioned": 91145428271104, "logical_used": 60188971008, "logical_used_volume": 38533722112, "logical_used_file_system": 21655248896, "logical_used_vvol": 0, "shared_logical_used_volume": 14007087104, "shared_logical_used_file_system": 14365335552, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365820354615, "efficiency_ratio": 17628.334, "data_reduction": 5.487478, "data_physical_used": 5170393968, "snapshot_savings": 16.08733, "thin_savings": 557.0875, "shared_logical_used": 28372422656, "system_free_space": 140157720371, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T13:30:00Z", "cluster_id": "0", "logical_provisioned": 91161534398464, "logical_used": 60670775296, "logical_used_volume": 39015526400, "logical_used_file_system": 21655248896, "logical_used_vvol": 0, "shared_logical_used_volume": 14490996736, "shared_logical_used_file_system": 14365335552, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365829742392, "efficiency_ratio": 17631.34, "data_reduction": 5.581036, "data_physical_used": 5170425978, "snapshot_savings": 16.071285, "thin_savings": 547.6541, "shared_logical_used": 28856332288, "system_free_space": 140147377561, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T13:35:00Z", "cluster_id": "0", "logical_provisioned": 91116437241856, "logical_used": 60318158848, "logical_used_volume": 38662909952, "logical_used_file_system": 21655248896, "logical_used_vvol": 0, "shared_logical_used_volume": 14138380288, "shared_logical_used_file_system": 14365335552, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365833216343, "efficiency_ratio": 17622.566, "data_reduction": 5.5128207, "data_physical_used": 5170441002, "snapshot_savings": 16.071285, "thin_savings": 553.26227, "shared_logical_used": 28503715840, "system_free_space": 140143642009, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T13:40:00Z", "cluster_id": "0", "logical_provisioned": 91132543369216, "logical_used": 60455845888, "logical_used_volume": 38800596992, "logical_used_file_system": 21655248896, "logical_used_vvol": 0, "shared_logical_used_volume": 14276067328, "shared_logical_used_file_system": 14365335552, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365832621264, "efficiency_ratio": 17625.729, "data_reduction": 5.5394654, "data_physical_used": 5170427320, "snapshot_savings": 16.071285, "thin_savings": 550.99805, "shared_logical_used": 28641402880, "system_free_space": 140141132185, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T13:45:00Z", "cluster_id": "0", "logical_provisioned": 91116437241856, "logical_used": 60111638528, "logical_used_volume": 38456389632, "logical_used_file_system": 21655248896, "logical_used_vvol": 0, "shared_logical_used_volume": 13931859968, "shared_logical_used_file_system": 14365335552, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365838627202, "efficiency_ratio": 17622.484, "data_reduction": 5.472853, "data_physical_used": 5170464575, "snapshot_savings": 16.071285, "thin_savings": 557.6256, "shared_logical_used": 28297195520, "system_free_space": 140141817036, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T13:50:00Z", "cluster_id": "0", "logical_provisioned": 91132543369216, "logical_used": 60593598464, "logical_used_volume": 38938349568, "logical_used_file_system": 21655248896, "logical_used_vvol": 0, "shared_logical_used_volume": 14413819904, "shared_logical_used_file_system": 14365335552, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365843718402, "efficiency_ratio": 17625.39, "data_reduction": 5.5660014, "data_physical_used": 5170526463, "snapshot_savings": 16.071285, "thin_savings": 548.1519, "shared_logical_used": 28779155456, "system_free_space": 140136820736, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T13:55:00Z", "cluster_id": "0", "logical_provisioned": 91116437241856, "logical_used": 60180480000, "logical_used_volume": 38525231104, "logical_used_file_system": 21655248896, "logical_used_vvol": 0, "shared_logical_used_volume": 13998596096, "shared_logical_used_file_system": 14365335552, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365805057197, "efficiency_ratio": 17622.402, "data_reduction": 5.485735, "data_physical_used": 5170488827, "snapshot_savings": 16.08733, "thin_savings": 556.1635, "shared_logical_used": 28363931648, "system_free_space": 140138474496, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T14:00:00Z", "cluster_id": "0", "logical_provisioned": 91132543369216, "logical_used": 60731211776, "logical_used_volume": 39075962880, "logical_used_file_system": 21655248896, "logical_used_vvol": 0, "shared_logical_used_volume": 14555045888, "shared_logical_used_file_system": 14365335552, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365809841450, "efficiency_ratio": 17625.39, "data_reduction": 5.593315, "data_physical_used": 5170526033, "snapshot_savings": 16.043827, "thin_savings": 545.3378, "shared_logical_used": 28920381440, "system_free_space": 140133306982, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T14:05:00Z", "cluster_id": "0", "logical_provisioned": 91151870722048, "logical_used": 60319039488, "logical_used_volume": 38663790592, "logical_used_file_system": 21655248896, "logical_used_vvol": 0, "shared_logical_used_volume": 14139260928, "shared_logical_used_file_system": 14365335552, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365813667679, "efficiency_ratio": 17628.988, "data_reduction": 5.5128565, "data_physical_used": 5170567603, "snapshot_savings": 16.071285, "thin_savings": 554.5863, "shared_logical_used": 28504596480, "system_free_space": 140129614233, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T14:10:00Z", "cluster_id": "0", "logical_provisioned": 91171198074880, "logical_used": 60459220992, "logical_used_volume": 38803972096, "logical_used_file_system": 21655248896, "logical_used_vvol": 0, "shared_logical_used_volume": 14279442432, "shared_logical_used_file_system": 14365335552, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365820516227, "efficiency_ratio": 17632.64, "data_reduction": 5.539941, "data_physical_used": 5170592937, "snapshot_savings": 16.071285, "thin_savings": 552.26337, "shared_logical_used": 28644777984, "system_free_space": 140123730739, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T14:15:00Z", "cluster_id": "0", "logical_provisioned": 91142207045632, "logical_used": 60249321472, "logical_used_volume": 38594072576, "logical_used_file_system": 21655248896, "logical_used_vvol": 0, "shared_logical_used_volume": 14069542912, "shared_logical_used_file_system": 14365335552, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365824942648, "efficiency_ratio": 17626.727, "data_reduction": 5.499251, "data_physical_used": 5170682346, "snapshot_savings": 16.071285, "thin_savings": 555.688, "shared_logical_used": 28434878464, "system_free_space": 140119333683, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T14:20:00Z", "cluster_id": "0", "logical_provisioned": 91174419300352, "logical_used": 60593537024, "logical_used_volume": 38938288128, "logical_used_file_system": 21655248896, "logical_used_vvol": 0, "shared_logical_used_volume": 14413758464, "shared_logical_used_file_system": 14365335552, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365831059229, "efficiency_ratio": 17632.885, "data_reduction": 5.5657983, "data_physical_used": 5170703963, "snapshot_savings": 16.071285, "thin_savings": 549.72345, "shared_logical_used": 28779094016, "system_free_space": 140113730355, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T14:25:00Z", "cluster_id": "0", "logical_provisioned": 91190525427712, "logical_used": 60877287424, "logical_used_volume": 39222038528, "logical_used_file_system": 21655248896, "logical_used_vvol": 0, "shared_logical_used_volume": 14697508864, "shared_logical_used_file_system": 14365335552, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365837994239, "efficiency_ratio": 17635.893, "data_reduction": 5.6206408, "data_physical_used": 5170735342, "snapshot_savings": 16.071285, "thin_savings": 544.5335, "shared_logical_used": 29062844416, "system_free_space": 140106877952, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T14:30:00Z", "cluster_id": "0", "logical_provisioned": 91142207045632, "logical_used": 60455845888, "logical_used_volume": 38800596992, "logical_used_file_system": 21655248896, "logical_used_vvol": 0, "shared_logical_used_volume": 14273961984, "shared_logical_used_file_system": 14365335552, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365842456168, "efficiency_ratio": 17626.586, "data_reduction": 5.5387406, "data_physical_used": 5170724069, "snapshot_savings": 16.08733, "thin_savings": 551.3623, "shared_logical_used": 28639297536, "system_free_space": 140102333030, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T14:35:00Z", "cluster_id": "0", "logical_provisioned": 91158313172992, "logical_used": 60601946112, "logical_used_volume": 38946697216, "logical_used_file_system": 21655248896, "logical_used_vvol": 0, "shared_logical_used_volume": 14422167552, "shared_logical_used_file_system": 14365335552, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365845009106, "efficiency_ratio": 17629.48, "data_reduction": 5.5673337, "data_physical_used": 5170788220, "snapshot_savings": 16.071285, "thin_savings": 548.94635, "shared_logical_used": 28787503104, "system_free_space": 140099924377, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T14:40:00Z", "cluster_id": "0", "logical_provisioned": 91145428271104, "logical_used": 60326580224, "logical_used_volume": 38671331328, "logical_used_file_system": 21655248896, "logical_used_vvol": 0, "shared_logical_used_volume": 14077960192, "shared_logical_used_file_system": 14365335552, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365847274931, "efficiency_ratio": 17627.092, "data_reduction": 5.5007977, "data_physical_used": 5170758239, "snapshot_savings": 16.103897, "thin_savings": 555.5104, "shared_logical_used": 28443295744, "system_free_space": 140097710284, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T14:45:00Z", "cluster_id": "0", "logical_provisioned": 91158313172992, "logical_used": 60601946112, "logical_used_volume": 38946697216, "logical_used_file_system": 21655248896, "logical_used_vvol": 0, "shared_logical_used_volume": 14422167552, "shared_logical_used_file_system": 14365335552, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365816418712, "efficiency_ratio": 17629.357, "data_reduction": 5.567294, "data_physical_used": 5170824912, "snapshot_savings": 16.071285, "thin_savings": 548.94635, "shared_logical_used": 28787503104, "system_free_space": 140095414476, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T14:50:00Z", "cluster_id": "0", "logical_provisioned": 91158313172992, "logical_used": 60946153472, "logical_used_volume": 39290904576, "logical_used_file_system": 21655248896, "logical_used_vvol": 0, "shared_logical_used_volume": 14766374912, "shared_logical_used_file_system": 14365335552, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365820119529, "efficiency_ratio": 17629.312, "data_reduction": 5.633847, "data_physical_used": 5170837800, "snapshot_savings": 16.071285, "thin_savings": 541.95355, "shared_logical_used": 29131710464, "system_free_space": 140091499929, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T14:55:00Z", "cluster_id": "0", "logical_provisioned": 91151870722048, "logical_used": 60224057344, "logical_used_volume": 39015612416, "logical_used_file_system": 21208444928, "logical_used_vvol": 0, "shared_logical_used_volume": 14491082752, "shared_logical_used_file_system": 14365335552, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365822706496, "efficiency_ratio": 17628.057, "data_reduction": 5.580605, "data_physical_used": 5170840683, "snapshot_savings": 15.859624, "thin_savings": 547.291, "shared_logical_used": 28856418304, "system_free_space": 140088931737, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T15:00:00Z", "cluster_id": "0", "logical_provisioned": 91167976849408, "logical_used": 60808470528, "logical_used_volume": 39153221632, "logical_used_file_system": 21655248896, "logical_used_vvol": 0, "shared_logical_used_volume": 14628691968, "shared_logical_used_file_system": 14365335552, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365825493344, "efficiency_ratio": 17631.094, "data_reduction": 5.6071925, "data_physical_used": 5170863503, "snapshot_savings": 16.071285, "thin_savings": 545.0887, "shared_logical_used": 28994027520, "system_free_space": 140086655385, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T15:05:00Z", "cluster_id": "0", "logical_provisioned": 91151870722048, "logical_used": 60455845888, "logical_used_volume": 38800596992, "logical_used_file_system": 21655248896, "logical_used_vvol": 0, "shared_logical_used_volume": 14276067328, "shared_logical_used_file_system": 14365335552, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365830895753, "efficiency_ratio": 17627.97, "data_reduction": 5.5389953, "data_physical_used": 5170866038, "snapshot_savings": 16.071285, "thin_savings": 551.7265, "shared_logical_used": 28641402880, "system_free_space": 140081550131, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T15:10:00Z", "cluster_id": "0", "logical_provisioned": 91155091947520, "logical_used": 60800053248, "logical_used_volume": 39144804352, "logical_used_file_system": 21655248896, "logical_used_vvol": 0, "shared_logical_used_volume": 14620274688, "shared_logical_used_file_system": 14365335552, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365838347443, "efficiency_ratio": 17628.574, "data_reduction": 5.6055555, "data_physical_used": 5170871960, "snapshot_savings": 16.071285, "thin_savings": 544.6601, "shared_logical_used": 28985610240, "system_free_space": 140074880614, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T15:15:00Z", "cluster_id": "0", "logical_provisioned": 91151870722048, "logical_used": 61006577664, "logical_used_volume": 39351328768, "logical_used_file_system": 21655248896, "logical_used_vvol": 0, "shared_logical_used_volume": 14826799104, "shared_logical_used_file_system": 14365335552, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365840133970, "efficiency_ratio": 17627.889, "data_reduction": 5.6454763, "data_physical_used": 5170889567, "snapshot_savings": 16.071285, "thin_savings": 540.5065, "shared_logical_used": 29192134656, "system_free_space": 140080553984, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T15:20:00Z", "cluster_id": "0", "logical_provisioned": 91135764594688, "logical_used": 60455845888, "logical_used_volume": 38800596992, "logical_used_file_system": 21655248896, "logical_used_vvol": 0, "shared_logical_used_volume": 14276067328, "shared_logical_used_file_system": 14365335552, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365842050691, "efficiency_ratio": 17624.68, "data_reduction": 5.5389404, "data_physical_used": 5170917527, "snapshot_savings": 16.071285, "thin_savings": 551.11945, "shared_logical_used": 28641402880, "system_free_space": 140079028633, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T15:25:00Z", "cluster_id": "0", "logical_provisioned": 91151870722048, "logical_used": 60800053248, "logical_used_volume": 39144804352, "logical_used_file_system": 21655248896, "logical_used_vvol": 0, "shared_logical_used_volume": 14620274688, "shared_logical_used_file_system": 14365335552, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365846162679, "efficiency_ratio": 17627.836, "data_reduction": 5.6055193, "data_physical_used": 5170905476, "snapshot_savings": 16.071285, "thin_savings": 544.6601, "shared_logical_used": 28985610240, "system_free_space": 140075098521, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T15:30:00Z", "cluster_id": "0", "logical_provisioned": 91151870722048, "logical_used": 61144260608, "logical_used_volume": 39489011712, "logical_used_file_system": 21655248896, "logical_used_vvol": 0, "shared_logical_used_volume": 14964482048, "shared_logical_used_file_system": 14365335552, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365850033574, "efficiency_ratio": 17627.662, "data_reduction": 5.6720295, "data_physical_used": 5170956246, "snapshot_savings": 16.071285, "thin_savings": 537.7724, "shared_logical_used": 29329817600, "system_free_space": 140071483596, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T15:35:00Z", "cluster_id": "0", "logical_provisioned": 91151870722048, "logical_used": 60800053248, "logical_used_volume": 39144804352, "logical_used_file_system": 21655248896, "logical_used_vvol": 0, "shared_logical_used_volume": 14620274688, "shared_logical_used_file_system": 14365335552, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365817120925, "efficiency_ratio": 17627.71, "data_reduction": 5.6054797, "data_physical_used": 5170941998, "snapshot_savings": 16.071285, "thin_savings": 544.6601, "shared_logical_used": 28985610240, "system_free_space": 140067417292, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T15:40:00Z", "cluster_id": "0", "logical_provisioned": 91187304202240, "logical_used": 61144260608, "logical_used_volume": 39489011712, "logical_used_file_system": 21655248896, "logical_used_vvol": 0, "shared_logical_used_volume": 14964482048, "shared_logical_used_file_system": 14365335552, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365822166104, "efficiency_ratio": 17634.387, "data_reduction": 5.6719885, "data_physical_used": 5170993910, "snapshot_savings": 16.071285, "thin_savings": 538.9558, "shared_logical_used": 29329817600, "system_free_space": 140062210867, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T15:45:00Z", "cluster_id": "0", "logical_provisioned": 91167976849408, "logical_used": 60531429376, "logical_used_volume": 38876180480, "logical_used_file_system": 21655248896, "logical_used_vvol": 0, "shared_logical_used_volume": 14351650816, "shared_logical_used_file_system": 14365335552, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365811973164, "efficiency_ratio": 17630.725, "data_reduction": 5.553499, "data_physical_used": 5170971795, "snapshot_savings": 16.071285, "thin_savings": 550.7645, "shared_logical_used": 28716986368, "system_free_space": 140072942796, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T15:50:00Z", "cluster_id": "0", "logical_provisioned": 91171198074880, "logical_used": 60805079040, "logical_used_volume": 39149830144, "logical_used_file_system": 21655248896, "logical_used_vvol": 0, "shared_logical_used_volume": 14625300480, "shared_logical_used_file_system": 14365335552, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365820156467, "efficiency_ratio": 17631.275, "data_reduction": 5.606396, "data_physical_used": 5170993298, "snapshot_savings": 16.071285, "thin_savings": 545.2773, "shared_logical_used": 28990636032, "system_free_space": 140066055372, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T15:55:00Z", "cluster_id": "0", "logical_provisioned": 91180861751296, "logical_used": 61147627520, "logical_used_volume": 39492378624, "logical_used_file_system": 21655248896, "logical_used_vvol": 0, "shared_logical_used_volume": 14967848960, "shared_logical_used_file_system": 14365335552, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365826774202, "efficiency_ratio": 17633.057, "data_reduction": 5.672612, "data_physical_used": 5171018940, "snapshot_savings": 16.071285, "thin_savings": 538.7708, "shared_logical_used": 29333184512, "system_free_space": 140060300288, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T16:00:00Z", "cluster_id": "0", "logical_provisioned": 91171198074880, "logical_used": 60533104640, "logical_used_volume": 38877855744, "logical_used_file_system": 21655248896, "logical_used_vvol": 0, "shared_logical_used_volume": 14351220736, "shared_logical_used_file_system": 14365335552, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365829855439, "efficiency_ratio": 17631.232, "data_reduction": 5.5533795, "data_physical_used": 5171005249, "snapshot_savings": 16.08733, "thin_savings": 550.8509, "shared_logical_used": 28716556288, "system_free_space": 140058051174, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T16:05:00Z", "cluster_id": "0", "logical_provisioned": 91187304202240, "logical_used": 60873945088, "logical_used_volume": 39218696192, "logical_used_file_system": 21655248896, "logical_used_vvol": 0, "shared_logical_used_volume": 14694166528, "shared_logical_used_file_system": 14365335552, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365836261702, "efficiency_ratio": 17634.209, "data_reduction": 5.6196566, "data_physical_used": 5171046120, "snapshot_savings": 16.071285, "thin_savings": 544.4815, "shared_logical_used": 29059502080, "system_free_space": 140052183244, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T16:10:00Z", "cluster_id": "0", "logical_provisioned": 91164755623936, "logical_used": 61006577664, "logical_used_volume": 39351328768, "logical_used_file_system": 21655248896, "logical_used_vvol": 0, "shared_logical_used_volume": 14757957632, "shared_logical_used_file_system": 14365335552, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365844209867, "efficiency_ratio": 17629.715, "data_reduction": 5.63195, "data_physical_used": 5171085537, "snapshot_savings": 16.103897, "thin_savings": 542.2417, "shared_logical_used": 29123293184, "system_free_space": 140045136281, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T16:15:00Z", "cluster_id": "0", "logical_provisioned": 91193746653184, "logical_used": 61016043520, "logical_used_volume": 39360794624, "logical_used_file_system": 21655248896, "logical_used_vvol": 0, "shared_logical_used_volume": 14836264960, "shared_logical_used_file_system": 14365335552, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365849381350, "efficiency_ratio": 17635.354, "data_reduction": 5.647104, "data_physical_used": 5171075409, "snapshot_savings": 16.071285, "thin_savings": 541.86334, "shared_logical_used": 29201600512, "system_free_space": 140043848294, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T16:20:00Z", "cluster_id": "0", "logical_provisioned": 91180861751296, "logical_used": 60740677632, "logical_used_volume": 39085428736, "logical_used_file_system": 21655248896, "logical_used_vvol": 0, "shared_logical_used_volume": 14560899072, "shared_logical_used_file_system": 14365335552, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365856134265, "efficiency_ratio": 17632.842, "data_reduction": 5.593846, "data_physical_used": 5171081933, "snapshot_savings": 16.071285, "thin_savings": 546.94727, "shared_logical_used": 28926234624, "system_free_space": 140037665177, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T16:25:00Z", "cluster_id": "0", "logical_provisioned": 91196967878656, "logical_used": 61084884992, "logical_used_volume": 39429636096, "logical_used_file_system": 21655248896, "logical_used_vvol": 0, "shared_logical_used_volume": 14903013376, "shared_logical_used_file_system": 14365335552, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365824865478, "efficiency_ratio": 17635.838, "data_reduction": 5.659967, "data_physical_used": 5171116561, "snapshot_savings": 16.087236, "thin_savings": 540.6085, "shared_logical_used": 29268348928, "system_free_space": 140032408166, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T16:30:00Z", "cluster_id": "0", "logical_provisioned": 91196967878656, "logical_used": 61460328448, "logical_used_volume": 39805079552, "logical_used_file_system": 21655248896, "logical_used_vvol": 0, "shared_logical_used_volume": 15251415040, "shared_logical_used_file_system": 14365331456, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365828760687, "efficiency_ratio": 17635.643, "data_reduction": 5.7272773, "data_physical_used": 5171173841, "snapshot_savings": 16.07009, "thin_savings": 533.8429, "shared_logical_used": 29616746496, "system_free_space": 140027999436, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T16:35:00Z", "cluster_id": "0", "logical_provisioned": 91184082976768, "logical_used": 61016035328, "logical_used_volume": 39360786432, "logical_used_file_system": 21655248896, "logical_used_vvol": 0, "shared_logical_used_volume": 14836256768, "shared_logical_used_file_system": 14365335552, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365835293177, "efficiency_ratio": 17632.908, "data_reduction": 5.646918, "data_physical_used": 5171244722, "snapshot_savings": 16.071285, "thin_savings": 541.5068, "shared_logical_used": 29201592320, "system_free_space": 140022154854, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T16:40:00Z", "cluster_id": "0", "logical_provisioned": 91219516456960, "logical_used": 61222555648, "logical_used_volume": 39567306752, "logical_used_file_system": 21655248896, "logical_used_vvol": 0, "shared_logical_used_volume": 14973935616, "shared_logical_used_file_system": 14365335552, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365841058980, "efficiency_ratio": 17639.676, "data_reduction": 5.673514, "data_physical_used": 5171269430, "snapshot_savings": 16.103897, "thin_savings": 539.9517, "shared_logical_used": 29339271168, "system_free_space": 140017248051, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T16:45:00Z", "cluster_id": "0", "logical_provisioned": 91184082976768, "logical_used": 60809494528, "logical_used_volume": 39154245632, "logical_used_file_system": 21655248896, "logical_used_vvol": 0, "shared_logical_used_volume": 14629715968, "shared_logical_used_file_system": 14365335552, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365846209618, "efficiency_ratio": 17632.463, "data_reduction": 5.6068354, "data_physical_used": 5171375456, "snapshot_savings": 16.071285, "thin_savings": 545.667, "shared_logical_used": 28995051520, "system_free_space": 140019485491, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T16:50:00Z", "cluster_id": "0", "logical_provisioned": 91200189104128, "logical_used": 61153722368, "logical_used_volume": 39498473472, "logical_used_file_system": 21655248896, "logical_used_vvol": 0, "shared_logical_used_volume": 14973943808, "shared_logical_used_file_system": 14365335552, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365852076492, "efficiency_ratio": 17635.506, "data_reduction": 5.673377, "data_physical_used": 5171396302, "snapshot_savings": 16.071285, "thin_savings": 539.36005, "shared_logical_used": 29339279360, "system_free_space": 140013341286, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T16:55:00Z", "cluster_id": "0", "logical_provisioned": 91216295231488, "logical_used": 61221441536, "logical_used_volume": 39566192640, "logical_used_file_system": 21655248896, "logical_used_vvol": 0, "shared_logical_used_volume": 15039557632, "shared_logical_used_file_system": 14365335552, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365855944905, "efficiency_ratio": 17638.328, "data_reduction": 5.6859703, "data_physical_used": 5171482250, "snapshot_savings": 16.08733, "thin_savings": 538.612, "shared_logical_used": 29404893184, "system_free_space": 140010442342, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T17:00:00Z", "cluster_id": "0", "logical_provisioned": 91190525427712, "logical_used": 60947202048, "logical_used_volume": 39291953152, "logical_used_file_system": 21655248896, "logical_used_vvol": 0, "shared_logical_used_volume": 14767042560, "shared_logical_used_file_system": 14365335552, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365860476414, "efficiency_ratio": 17633.352, "data_reduction": 5.6332765, "data_physical_used": 5171480221, "snapshot_savings": 16.074186, "thin_savings": 543.12463, "shared_logical_used": 29132378112, "system_free_space": 140005449932, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T17:05:00Z", "cluster_id": "0", "logical_provisioned": 91174419300352, "logical_used": 61291393024, "logical_used_volume": 39636144128, "logical_used_file_system": 21655248896, "logical_used_vvol": 0, "shared_logical_used_volume": 15111614464, "shared_logical_used_file_system": 14365335552, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365864797572, "efficiency_ratio": 17630.016, "data_reduction": 5.6998343, "data_physical_used": 5171545015, "snapshot_savings": 16.071285, "thin_savings": 535.705, "shared_logical_used": 29476950016, "system_free_space": 140002329190, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T17:10:00Z", "cluster_id": "0", "logical_provisioned": 91193746653184, "logical_used": 61360250880, "logical_used_volume": 39705001984, "logical_used_file_system": 21655248896, "logical_used_vvol": 0, "shared_logical_used_volume": 15111630848, "shared_logical_used_file_system": 14365335552, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365866855536, "efficiency_ratio": 17633.502, "data_reduction": 5.699756, "data_physical_used": 5171618707, "snapshot_savings": 16.103897, "thin_savings": 536.2933, "shared_logical_used": 29476966400, "system_free_space": 140000924467, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T17:15:00Z", "cluster_id": "0", "logical_provisioned": 91177640525824, "logical_used": 61016043520, "logical_used_volume": 39360794624, "logical_used_file_system": 21655248896, "logical_used_vvol": 0, "shared_logical_used_volume": 14836264960, "shared_logical_used_file_system": 14365335552, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365833249408, "efficiency_ratio": 17630.477, "data_reduction": 5.646539, "data_physical_used": 5171592919, "snapshot_savings": 16.071285, "thin_savings": 541.26886, "shared_logical_used": 29201600512, "system_free_space": 139997562470, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T17:20:00Z", "cluster_id": "0", "logical_provisioned": 91193746653184, "logical_used": 61360250880, "logical_used_volume": 39705001984, "logical_used_file_system": 21655248896, "logical_used_vvol": 0, "shared_logical_used_volume": 15180472320, "shared_logical_used_file_system": 14365335552, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365837241496, "efficiency_ratio": 17633.54, "data_reduction": 5.71308, "data_physical_used": 5171607802, "snapshot_savings": 16.071285, "thin_savings": 535.06494, "shared_logical_used": 29545807872, "system_free_space": 139993192652, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T17:25:00Z", "cluster_id": "0", "logical_provisioned": 91193746653184, "logical_used": 61704458240, "logical_used_volume": 40049209344, "logical_used_file_system": 21655248896, "logical_used_vvol": 0, "shared_logical_used_volume": 15522574336, "shared_logical_used_file_system": 14365335552, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365841972407, "efficiency_ratio": 17633.18, "data_reduction": 5.7791123, "data_physical_used": 5171712827, "snapshot_savings": 16.08733, "thin_savings": 528.435, "shared_logical_used": 29887909888, "system_free_space": 139988997939, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T17:30:00Z", "cluster_id": "0", "logical_provisioned": 91193746653184, "logical_used": 61360250880, "logical_used_volume": 39705001984, "logical_used_file_system": 21655248896, "logical_used_vvol": 0, "shared_logical_used_volume": 15182577664, "shared_logical_used_file_system": 14365335552, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365845876914, "efficiency_ratio": 17633.143, "data_reduction": 5.7133584, "data_physical_used": 5171724203, "snapshot_savings": 16.055273, "thin_savings": 535.06494, "shared_logical_used": 29547913216, "system_free_space": 139985772134, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T17:35:00Z", "cluster_id": "0", "logical_provisioned": 91209852780544, "logical_used": 61699416064, "logical_used_volume": 40044167168, "logical_used_file_system": 21655248896, "logical_used_vvol": 0, "shared_logical_used_volume": 15519637504, "shared_logical_used_file_system": 14365335552, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365850815685, "efficiency_ratio": 17636.057, "data_reduction": 5.7784667, "data_physical_used": 5171782845, "snapshot_savings": 16.071285, "thin_savings": 529.11084, "shared_logical_used": 29884973056, "system_free_space": 139981180518, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T17:40:00Z", "cluster_id": "0", "logical_provisioned": 91196967878656, "logical_used": 61081493504, "logical_used_volume": 39426244608, "logical_used_file_system": 21655248896, "logical_used_vvol": 0, "shared_logical_used_volume": 14832873472, "shared_logical_used_file_system": 14365335552, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365856289297, "efficiency_ratio": 17633.484, "data_reduction": 5.64565, "data_physical_used": 5171806710, "snapshot_savings": 16.103897, "thin_savings": 541.9312, "shared_logical_used": 29198209024, "system_free_space": 139976503296, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T17:45:00Z", "cluster_id": "0", "logical_provisioned": 91209852780544, "logical_used": 61488390144, "logical_used_volume": 39833141248, "logical_used_file_system": 21655248896, "logical_used_vvol": 0, "shared_logical_used_volume": 15308611584, "shared_logical_used_file_system": 14365335552, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365863649946, "efficiency_ratio": 17635.855, "data_reduction": 5.737598, "data_physical_used": 5171841525, "snapshot_savings": 16.071285, "thin_savings": 533.1618, "shared_logical_used": 29673947136, "system_free_space": 139969849344, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T17:50:00Z", "cluster_id": "0", "logical_provisioned": 91196967878656, "logical_used": 61076475904, "logical_used_volume": 39421227008, "logical_used_file_system": 21655248896, "logical_used_vvol": 0, "shared_logical_used_volume": 14896697344, "shared_logical_used_file_system": 14365335552, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365868148185, "efficiency_ratio": 17633.201, "data_reduction": 5.6579, "data_physical_used": 5171889786, "snapshot_savings": 16.071285, "thin_savings": 540.77594, "shared_logical_used": 29262032896, "system_free_space": 139965623500, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T17:55:00Z", "cluster_id": "0", "logical_provisioned": 91196967878656, "logical_used": 61420675072, "logical_used_volume": 39765426176, "logical_used_file_system": 21655248896, "logical_used_vvol": 0, "shared_logical_used_volume": 15240896512, "shared_logical_used_file_system": 14365335552, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365873987573, "efficiency_ratio": 17632.848, "data_reduction": 5.724337, "data_physical_used": 5171992916, "snapshot_savings": 16.071285, "thin_savings": 534.0062, "shared_logical_used": 29606232064, "system_free_space": 139964032000, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T18:00:00Z", "cluster_id": "0", "logical_provisioned": 91213074006016, "logical_used": 61489516544, "logical_used_volume": 39834267648, "logical_used_file_system": 21655248896, "logical_used_vvol": 0, "shared_logical_used_volume": 15307632640, "shared_logical_used_file_system": 14365335552, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365882728336, "efficiency_ratio": 17636.074, "data_reduction": 5.737277, "data_physical_used": 5171960118, "snapshot_savings": 16.08733, "thin_savings": 533.2569, "shared_logical_used": 29672968192, "system_free_space": 139960413184, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T18:05:00Z", "cluster_id": "0", "logical_provisioned": 91200189104128, "logical_used": 61148684288, "logical_used_volume": 39493410816, "logical_used_file_system": 21655273472, "logical_used_vvol": 0, "shared_logical_used_volume": 14968881152, "shared_logical_used_file_system": 14365360128, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365854803236, "efficiency_ratio": 17633.566, "data_reduction": 5.671778, "data_physical_used": 5171965501, "snapshot_savings": 16.071285, "thin_savings": 539.4599, "shared_logical_used": 29334241280, "system_free_space": 139958852812, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T18:10:00Z", "cluster_id": "0", "logical_provisioned": 91196967878656, "logical_used": 61494591488, "logical_used_volume": 39839318016, "logical_used_file_system": 21655273472, "logical_used_vvol": 0, "shared_logical_used_volume": 15314788352, "shared_logical_used_file_system": 14365360128, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365861644442, "efficiency_ratio": 17632.844, "data_reduction": 5.738627, "data_physical_used": 5171994735, "snapshot_savings": 16.071285, "thin_savings": 532.45764, "shared_logical_used": 29680148480, "system_free_space": 139965483417, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T18:15:00Z", "cluster_id": "0", "logical_provisioned": 91177640525824, "logical_used": 61150384128, "logical_used_volume": 39495110656, "logical_used_file_system": 21655273472, "logical_used_vvol": 0, "shared_logical_used_volume": 14970580992, "shared_logical_used_file_system": 14365360128, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365864445797, "efficiency_ratio": 17628.898, "data_reduction": 5.672008, "data_physical_used": 5172055559, "snapshot_savings": 16.071285, "thin_savings": 538.59796, "shared_logical_used": 29335941120, "system_free_space": 139963374387, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T18:20:00Z", "cluster_id": "0", "logical_provisioned": 91193746653184, "logical_used": 61494591488, "logical_used_volume": 39839318016, "logical_used_file_system": 21655273472, "logical_used_vvol": 0, "shared_logical_used_volume": 15314788352, "shared_logical_used_file_system": 14365360128, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365869028598, "efficiency_ratio": 17631.945, "data_reduction": 5.7385373, "data_physical_used": 5172075305, "snapshot_savings": 16.071285, "thin_savings": 532.45764, "shared_logical_used": 29680148480, "system_free_space": 139959335321, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T18:25:00Z", "cluster_id": "0", "logical_provisioned": 91193746653184, "logical_used": 61632274432, "logical_used_volume": 39977000960, "logical_used_file_system": 21655273472, "logical_used_vvol": 0, "shared_logical_used_volume": 15450378240, "shared_logical_used_file_system": 14365360128, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365871276701, "efficiency_ratio": 17631.94, "data_reduction": 5.7647514, "data_physical_used": 5172077290, "snapshot_savings": 16.087236, "thin_savings": 529.81177, "shared_logical_used": 29815738368, "system_free_space": 139964790784, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T18:30:00Z", "cluster_id": "0", "logical_provisioned": 91180861751296, "logical_used": 61219225600, "logical_used_volume": 39563952128, "logical_used_file_system": 21655273472, "logical_used_vvol": 0, "shared_logical_used_volume": 15039422464, "shared_logical_used_file_system": 14365360128, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365874459047, "efficiency_ratio": 17629.168, "data_reduction": 5.6852045, "data_physical_used": 5172158999, "snapshot_savings": 16.071285, "thin_savings": 537.3575, "shared_logical_used": 29404782592, "system_free_space": 139962074726, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T18:35:00Z", "cluster_id": "0", "logical_provisioned": 91196967878656, "logical_used": 61563432960, "logical_used_volume": 39908159488, "logical_used_file_system": 21655273472, "logical_used_vvol": 0, "shared_logical_used_volume": 15383629824, "shared_logical_used_file_system": 14365360128, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365879363657, "efficiency_ratio": 17632.299, "data_reduction": 5.75176, "data_physical_used": 5172154424, "snapshot_savings": 16.071285, "thin_savings": 531.248, "shared_logical_used": 29748989952, "system_free_space": 139957444198, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T18:40:00Z", "cluster_id": "0", "logical_provisioned": 91206631555072, "logical_used": 61974151168, "logical_used_volume": 40318877696, "logical_used_file_system": 21655273472, "logical_used_vvol": 0, "shared_logical_used_volume": 15725506560, "shared_logical_used_file_system": 14365360128, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365882811290, "efficiency_ratio": 17633.955, "data_reduction": 5.8177896, "data_physical_used": 5172216394, "snapshot_savings": 16.103897, "thin_savings": 524.98706, "shared_logical_used": 30090866688, "system_free_space": 139957269094, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T18:45:00Z", "cluster_id": "0", "logical_provisioned": 91219516456960, "logical_used": 61223616512, "logical_used_volume": 39568343040, "logical_used_file_system": 21655273472, "logical_used_vvol": 0, "shared_logical_used_volume": 15043813376, "shared_logical_used_file_system": 14365360128, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365884896519, "efficiency_ratio": 17636.11, "data_reduction": 5.685882, "data_physical_used": 5172315210, "snapshot_savings": 16.071285, "thin_savings": 538.6871, "shared_logical_used": 29409173504, "system_free_space": 139955876044, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T18:50:00Z", "cluster_id": "0", "logical_provisioned": 91219516456960, "logical_used": 61564436480, "logical_used_volume": 39909163008, "logical_used_file_system": 21655273472, "logical_used_vvol": 0, "shared_logical_used_volume": 15384633344, "shared_logical_used_file_system": 14365360128, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365891024605, "efficiency_ratio": 17636.19, "data_reduction": 5.7518015, "data_physical_used": 5172291677, "snapshot_savings": 16.071285, "thin_savings": 532.0445, "shared_logical_used": 29749993472, "system_free_space": 139950821376, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T18:55:00Z", "cluster_id": "0", "logical_provisioned": 91219516456960, "logical_used": 61908652032, "logical_used_volume": 40253378560, "logical_used_file_system": 21655273472, "logical_used_vvol": 0, "shared_logical_used_volume": 15728848896, "shared_logical_used_file_system": 14365360128, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365872254293, "efficiency_ratio": 17636.107, "data_reduction": 5.8183236, "data_physical_used": 5172316087, "snapshot_savings": 16.071285, "thin_savings": 525.49994, "shared_logical_used": 30094209024, "system_free_space": 139945369804, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T19:00:00Z", "cluster_id": "0", "logical_provisioned": 91196967878656, "logical_used": 61421748224, "logical_used_volume": 39766474752, "logical_used_file_system": 21655273472, "logical_used_vvol": 0, "shared_logical_used_volume": 15239839744, "shared_logical_used_file_system": 14365360128, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365881899958, "efficiency_ratio": 17631.791, "data_reduction": 5.723794, "data_physical_used": 5172303288, "snapshot_savings": 16.08733, "thin_savings": 533.9854, "shared_logical_used": 29605199872, "system_free_space": 139942599270, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T19:05:00Z", "cluster_id": "0", "logical_provisioned": 91196967878656, "logical_used": 61765988352, "logical_used_volume": 40110682112, "logical_used_file_system": 21655306240, "logical_used_vvol": 0, "shared_logical_used_volume": 15586152448, "shared_logical_used_file_system": 14365392896, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365886955214, "efficiency_ratio": 17631.648, "data_reduction": 5.7907095, "data_physical_used": 5172344723, "snapshot_savings": 16.071285, "thin_savings": 527.3828, "shared_logical_used": 29951545344, "system_free_space": 139938451251, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T19:10:00Z", "cluster_id": "0", "logical_provisioned": 91200189104128, "logical_used": 62110195712, "logical_used_volume": 40454889472, "logical_used_file_system": 21655306240, "logical_used_vvol": 0, "shared_logical_used_volume": 15930359808, "shared_logical_used_file_system": 14365392896, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365893660530, "efficiency_ratio": 17632.314, "data_reduction": 5.8572707, "data_physical_used": 5172332665, "snapshot_savings": 16.071285, "thin_savings": 520.9422, "shared_logical_used": 30295752704, "system_free_space": 139932832358, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T19:15:00Z", "cluster_id": "0", "logical_provisioned": 91180861751296, "logical_used": 61765988352, "logical_used_volume": 40110682112, "logical_used_file_system": 21655306240, "logical_used_vvol": 0, "shared_logical_used_volume": 15586152448, "shared_logical_used_file_system": 14365392896, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365895240769, "efficiency_ratio": 17628.527, "data_reduction": 5.7907066, "data_physical_used": 5172347548, "snapshot_savings": 16.071285, "thin_savings": 526.8043, "shared_logical_used": 29951545344, "system_free_space": 139931637760, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T19:20:00Z", "cluster_id": "0", "logical_provisioned": 91196967878656, "logical_used": 61834903552, "logical_used_volume": 40179597312, "logical_used_file_system": 21655306240, "logical_used_vvol": 0, "shared_logical_used_volume": 15655067648, "shared_logical_used_file_system": 14365392896, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365896338628, "efficiency_ratio": 17631.484, "data_reduction": 5.8039784, "data_physical_used": 5172393371, "snapshot_savings": 16.071285, "thin_savings": 526.0806, "shared_logical_used": 30020460544, "system_free_space": 139930855628, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T19:25:00Z", "cluster_id": "0", "logical_provisioned": 91180861751296, "logical_used": 61421780992, "logical_used_volume": 39766474752, "logical_used_file_system": 21655306240, "logical_used_vvol": 0, "shared_logical_used_volume": 15241945088, "shared_logical_used_file_system": 14365392896, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365881122779, "efficiency_ratio": 17628.172, "data_reduction": 5.724044, "data_physical_used": 5172451083, "snapshot_savings": 16.071285, "thin_savings": 533.399, "shared_logical_used": 29607337984, "system_free_space": 139946268672, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T19:30:00Z", "cluster_id": "0", "logical_provisioned": 91196967878656, "logical_used": 61765988352, "logical_used_volume": 40110682112, "logical_used_file_system": 21655306240, "logical_used_vvol": 0, "shared_logical_used_volume": 15586152448, "shared_logical_used_file_system": 14365392896, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365885558280, "efficiency_ratio": 17631.408, "data_reduction": 5.7906303, "data_physical_used": 5172415235, "snapshot_savings": 16.071285, "thin_savings": 527.3828, "shared_logical_used": 29951545344, "system_free_space": 139942490316, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T19:35:00Z", "cluster_id": "0", "logical_provisioned": 91229180133376, "logical_used": 62115250176, "logical_used_volume": 40459943936, "logical_used_file_system": 21655306240, "logical_used_vvol": 0, "shared_logical_used_volume": 15935414272, "shared_logical_used_file_system": 14365392896, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365890206972, "efficiency_ratio": 17637.512, "data_reduction": 5.858113, "data_physical_used": 5172452028, "snapshot_savings": 16.071285, "thin_savings": 521.99146, "shared_logical_used": 30300807168, "system_free_space": 139938174976, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T19:40:00Z", "cluster_id": "0", "logical_provisioned": 91206631555072, "logical_used": 60833480704, "logical_used_volume": 39781625856, "logical_used_file_system": 21051854848, "logical_used_vvol": 0, "shared_logical_used_volume": 15257096192, "shared_logical_used_file_system": 14365392896, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365896331412, "efficiency_ratio": 17633.014, "data_reduction": 5.7269273, "data_physical_used": 5172492857, "snapshot_savings": 15.785416, "thin_savings": 534.04193, "shared_logical_used": 29622489088, "system_free_space": 139932135833, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T19:45:00Z", "cluster_id": "0", "logical_provisioned": 91206631555072, "logical_used": 61108846592, "logical_used_volume": 40056991744, "logical_used_file_system": 21051854848, "logical_used_vvol": 0, "shared_logical_used_volume": 15532462080, "shared_logical_used_file_system": 14365388800, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365898862232, "efficiency_ratio": 17632.924, "data_reduction": 5.7801337, "data_physical_used": 5172518862, "snapshot_savings": 15.785446, "thin_savings": 528.74963, "shared_logical_used": 29897850880, "system_free_space": 139930256384, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T19:50:00Z", "cluster_id": "0", "logical_provisioned": 91206631555072, "logical_used": 61712334848, "logical_used_volume": 40056991744, "logical_used_file_system": 21655343104, "logical_used_vvol": 0, "shared_logical_used_volume": 15532462080, "shared_logical_used_file_system": 14365429760, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365861837107, "efficiency_ratio": 17632.78, "data_reduction": 5.780094, "data_physical_used": 5172561554, "snapshot_savings": 16.071285, "thin_savings": 528.74896, "shared_logical_used": 29897891840, "system_free_space": 139929637683, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T19:55:00Z", "cluster_id": "0", "logical_provisioned": 91206631555072, "logical_used": 61712334848, "logical_used_volume": 40056991744, "logical_used_file_system": 21655343104, "logical_used_vvol": 0, "shared_logical_used_volume": 15532462080, "shared_logical_used_file_system": 14365429760, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365862320052, "efficiency_ratio": 17632.562, "data_reduction": 5.780023, "data_physical_used": 5172625150, "snapshot_savings": 16.071285, "thin_savings": 528.74896, "shared_logical_used": 29897891840, "system_free_space": 139929197977, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T20:00:00Z", "cluster_id": "0", "logical_provisioned": 91206631555072, "logical_used": 61743575040, "logical_used_volume": 40088231936, "logical_used_file_system": 21655343104, "logical_used_vvol": 0, "shared_logical_used_volume": 15534567424, "shared_logical_used_file_system": 14365429760, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365862962062, "efficiency_ratio": 17632.63, "data_reduction": 5.7804527, "data_physical_used": 5172605068, "snapshot_savings": 16.070057, "thin_savings": 528.74896, "shared_logical_used": 29899997184, "system_free_space": 139939525222, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T20:05:00Z", "cluster_id": "0", "logical_provisioned": 91206631555072, "logical_used": 61712334848, "logical_used_volume": 40056991744, "logical_used_file_system": 21655343104, "logical_used_vvol": 0, "shared_logical_used_volume": 15532462080, "shared_logical_used_file_system": 14365429760, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 366789195819, "efficiency_ratio": 17632.414, "data_reduction": 5.7799745, "data_physical_used": 5172668641, "snapshot_savings": 16.071285, "thin_savings": 528.74896, "shared_logical_used": 29897891840, "system_free_space": 139934680678, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T20:10:00Z", "cluster_id": "0", "logical_provisioned": 91196967878656, "logical_used": 61697183744, "logical_used_volume": 40041840640, "logical_used_file_system": 21655343104, "logical_used_vvol": 0, "shared_logical_used_volume": 15517310976, "shared_logical_used_file_system": 14365429760, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 366793044283, "efficiency_ratio": 17630.484, "data_reduction": 5.7770257, "data_physical_used": 5172686235, "snapshot_savings": 16.071285, "thin_savings": 528.5734, "shared_logical_used": 29882740736, "system_free_space": 139931505459, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T20:15:00Z", "cluster_id": "0", "logical_provisioned": 91213074006016, "logical_used": 61430185984, "logical_used_volume": 39774842880, "logical_used_file_system": 21655343104, "logical_used_vvol": 0, "shared_logical_used_volume": 15250313216, "shared_logical_used_file_system": 14365429760, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 366794215416, "efficiency_ratio": 17633.545, "data_reduction": 5.725391, "data_physical_used": 5172702287, "snapshot_savings": 16.071285, "thin_savings": 534.40717, "shared_logical_used": 29615742976, "system_free_space": 139930649395, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T20:20:00Z", "cluster_id": "0", "logical_provisioned": 91245286260736, "logical_used": 61774442496, "logical_used_volume": 40119099392, "logical_used_file_system": 21655343104, "logical_used_vvol": 0, "shared_logical_used_volume": 15594569728, "shared_logical_used_file_system": 14365429760, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 366799606369, "efficiency_ratio": 17639.678, "data_reduction": 5.7919126, "data_physical_used": 5172729986, "snapshot_savings": 16.071198, "thin_savings": 528.958, "shared_logical_used": 29959999488, "system_free_space": 139925839872, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T20:25:00Z", "cluster_id": "0", "logical_provisioned": 91229180133376, "logical_used": 62111911936, "logical_used_volume": 40456568832, "logical_used_file_system": 21655343104, "logical_used_vvol": 0, "shared_logical_used_volume": 15932039168, "shared_logical_used_file_system": 14365429760, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 366805087403, "efficiency_ratio": 17636.305, "data_reduction": 5.857066, "data_physical_used": 5172806255, "snapshot_savings": 16.071285, "thin_savings": 522.0533, "shared_logical_used": 30297468928, "system_free_space": 139920956416, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T20:30:00Z", "cluster_id": "0", "logical_provisioned": 91213074006016, "logical_used": 61430231040, "logical_used_volume": 39774887936, "logical_used_file_system": 21655343104, "logical_used_vvol": 0, "shared_logical_used_volume": 15248252928, "shared_logical_used_file_system": 14365429760, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 366813696034, "efficiency_ratio": 17633.291, "data_reduction": 5.7249107, "data_physical_used": 5172776489, "snapshot_savings": 16.08733, "thin_savings": 534.4063, "shared_logical_used": 29613682688, "system_free_space": 139912870502, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T20:35:00Z", "cluster_id": "0", "logical_provisioned": 91213074006016, "logical_used": 61774442496, "logical_used_volume": 40119099392, "logical_used_file_system": 21655343104, "logical_used_vvol": 0, "shared_logical_used_volume": 15594569728, "shared_logical_used_file_system": 14365429760, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 366776074785, "efficiency_ratio": 17633.062, "data_reduction": 5.7917852, "data_physical_used": 5172843389, "snapshot_savings": 16.071285, "thin_savings": 527.8011, "shared_logical_used": 29959999488, "system_free_space": 139914819993, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T20:40:00Z", "cluster_id": "0", "logical_provisioned": 91213074006016, "logical_used": 61705601024, "logical_used_volume": 40050257920, "logical_used_file_system": 21655343104, "logical_used_vvol": 0, "shared_logical_used_volume": 15525728256, "shared_logical_used_file_system": 14365429760, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 366775476885, "efficiency_ratio": 17633.105, "data_reduction": 5.778491, "data_physical_used": 5172831261, "snapshot_savings": 16.071285, "thin_savings": 528.99304, "shared_logical_used": 29891158016, "system_free_space": 139915096268, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T20:45:00Z", "cluster_id": "0", "logical_provisioned": 91213074006016, "logical_used": 61774442496, "logical_used_volume": 40119099392, "logical_used_file_system": 21655343104, "logical_used_vvol": 0, "shared_logical_used_volume": 15594573824, "shared_logical_used_file_system": 14365429760, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 366777992735, "efficiency_ratio": 17632.922, "data_reduction": 5.79174, "data_physical_used": 5172884999, "snapshot_savings": 16.071255, "thin_savings": 527.8011, "shared_logical_used": 29960003584, "system_free_space": 139912570880, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T20:50:00Z", "cluster_id": "0", "logical_provisioned": 91213074006016, "logical_used": 62118649856, "logical_used_volume": 40463306752, "logical_used_file_system": 21655343104, "logical_used_vvol": 0, "shared_logical_used_volume": 15938781184, "shared_logical_used_file_system": 14365429760, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 366780997938, "efficiency_ratio": 17632.617, "data_reduction": 5.858179, "data_physical_used": 5172974825, "snapshot_savings": 16.071255, "thin_savings": 521.35724, "shared_logical_used": 30304210944, "system_free_space": 139909998796, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T20:55:00Z", "cluster_id": "0", "logical_provisioned": 91203410329600, "logical_used": 61912125440, "logical_used_volume": 40256782336, "logical_used_file_system": 21655343104, "logical_used_vvol": 0, "shared_logical_used_volume": 15732256768, "shared_logical_used_file_system": 14365429760, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 366784737587, "efficiency_ratio": 17630.77, "data_reduction": 5.818262, "data_physical_used": 5172968212, "snapshot_savings": 16.071255, "thin_savings": 524.85925, "shared_logical_used": 30097686528, "system_free_space": 139909971558, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T21:00:00Z", "cluster_id": "0", "logical_provisioned": 91219516456960, "logical_used": 62256332800, "logical_used_volume": 40600989696, "logical_used_file_system": 21655343104, "logical_used_vvol": 0, "shared_logical_used_volume": 16074358784, "shared_logical_used_file_system": 14365429760, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 366787913847, "efficiency_ratio": 17633.746, "data_reduction": 5.8843493, "data_physical_used": 5173008435, "snapshot_savings": 16.0873, "thin_savings": 519.05096, "shared_logical_used": 30439788544, "system_free_space": 139907119308, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T21:05:00Z", "cluster_id": "0", "logical_provisioned": 91219516456960, "logical_used": 61566238720, "logical_used_volume": 39910895616, "logical_used_file_system": 21655343104, "logical_used_vvol": 0, "shared_logical_used_volume": 15386370048, "shared_logical_used_file_system": 14365429760, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 366789816702, "efficiency_ratio": 17633.611, "data_reduction": 5.751309, "data_physical_used": 5173048360, "snapshot_savings": 16.071255, "thin_savings": 532.0098, "shared_logical_used": 29751799808, "system_free_space": 139905558937, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T21:10:00Z", "cluster_id": "0", "logical_provisioned": 91213074006016, "logical_used": 61907075072, "logical_used_volume": 40251731968, "logical_used_file_system": 21655343104, "logical_used_vvol": 0, "shared_logical_used_volume": 15727206400, "shared_logical_used_file_system": 14365429760, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 366796843137, "efficiency_ratio": 17632.227, "data_reduction": 5.8171506, "data_physical_used": 5173088582, "snapshot_savings": 16.071255, "thin_savings": 525.1842, "shared_logical_used": 30092636160, "system_free_space": 139900150169, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T21:15:00Z", "cluster_id": "0", "logical_provisioned": 91209852780544, "logical_used": 62251282432, "logical_used_volume": 40595939328, "logical_used_file_system": 21655343104, "logical_used_vvol": 0, "shared_logical_used_volume": 16071413760, "shared_logical_used_file_system": 14365429760, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 366788910784, "efficiency_ratio": 17631.475, "data_reduction": 5.8836455, "data_physical_used": 5173126418, "snapshot_savings": 16.071255, "thin_savings": 518.8023, "shared_logical_used": 30436843520, "system_free_space": 139908539596, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T21:20:00Z", "cluster_id": "0", "logical_provisioned": 91196967878656, "logical_used": 61700550656, "logical_used_volume": 40045207552, "logical_used_file_system": 21655343104, "logical_used_vvol": 0, "shared_logical_used_volume": 15520681984, "shared_logical_used_file_system": 14365429760, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 366791454683, "efficiency_ratio": 17628.918, "data_reduction": 5.7771635, "data_physical_used": 5173145953, "snapshot_savings": 16.071255, "thin_savings": 528.62537, "shared_logical_used": 29886111744, "system_free_space": 139907430604, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T21:25:00Z", "cluster_id": "0", "logical_provisioned": 91213074006016, "logical_used": 61975916544, "logical_used_volume": 40320573440, "logical_used_file_system": 21655343104, "logical_used_vvol": 0, "shared_logical_used_volume": 15796047872, "shared_logical_used_file_system": 14365429760, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 366758393916, "efficiency_ratio": 17631.832, "data_reduction": 5.8303275, "data_physical_used": 5173204667, "snapshot_savings": 16.071255, "thin_savings": 524.01013, "shared_logical_used": 30161477632, "system_free_space": 139903932416, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T21:30:00Z", "cluster_id": "0", "logical_provisioned": 91213074006016, "logical_used": 62044758016, "logical_used_volume": 40389414912, "logical_used_file_system": 21655343104, "logical_used_vvol": 0, "shared_logical_used_volume": 15862784000, "shared_logical_used_file_system": 14365429760, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 366761557589, "efficiency_ratio": 17631.768, "data_reduction": 5.843207, "data_physical_used": 5173223645, "snapshot_savings": 16.0873, "thin_savings": 522.72723, "shared_logical_used": 30228213760, "system_free_space": 139900694937, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T21:35:00Z", "cluster_id": "0", "logical_provisioned": 91200189104128, "logical_used": 61700550656, "logical_used_volume": 40045207552, "logical_used_file_system": 21655343104, "logical_used_vvol": 0, "shared_logical_used_volume": 15520681984, "shared_logical_used_file_system": 14365429760, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 366766307344, "efficiency_ratio": 17629.176, "data_reduction": 5.7770443, "data_physical_used": 5173253370, "snapshot_savings": 16.071255, "thin_savings": 528.74133, "shared_logical_used": 29886111744, "system_free_space": 139896597504, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T21:40:00Z", "cluster_id": "0", "logical_provisioned": 91235622584320, "logical_used": 62113599488, "logical_used_volume": 40458256384, "logical_used_file_system": 21655343104, "logical_used_vvol": 0, "shared_logical_used_volume": 15864889344, "shared_logical_used_file_system": 14365429760, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 366773907855, "efficiency_ratio": 17635.941, "data_reduction": 5.8435526, "data_physical_used": 5173277797, "snapshot_savings": 16.103867, "thin_savings": 523.4146, "shared_logical_used": 30230319104, "system_free_space": 139889702297, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T21:45:00Z", "cluster_id": "0", "logical_provisioned": 91225958907904, "logical_used": 61437939712, "logical_used_volume": 40386048000, "logical_used_file_system": 21051891712, "logical_used_vvol": 0, "shared_logical_used_volume": 15861522432, "shared_logical_used_file_system": 14365429760, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 366775199211, "efficiency_ratio": 17633.807, "data_reduction": 5.842813, "data_physical_used": 5173355787, "snapshot_savings": 15.785385, "thin_savings": 523.2481, "shared_logical_used": 30226952192, "system_free_space": 139888927948, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T21:50:00Z", "cluster_id": "0", "logical_provisioned": 91216295231488, "logical_used": 61908721664, "logical_used_volume": 40253378560, "logical_used_file_system": 21655343104, "logical_used_vvol": 0, "shared_logical_used_volume": 15866535936, "shared_logical_used_file_system": 14365429760, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 366780221446, "efficiency_ratio": 17631.777, "data_reduction": 5.8437285, "data_physical_used": 5173403728, "snapshot_savings": 14.668742, "thin_savings": 526.6792, "shared_logical_used": 30231965696, "system_free_space": 139884180684, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T21:55:00Z", "cluster_id": "0", "logical_provisioned": 91248507486208, "logical_used": 62184087552, "logical_used_volume": 40528744448, "logical_used_file_system": 21655343104, "logical_used_vvol": 0, "shared_logical_used_volume": 16004218880, "shared_logical_used_file_system": 14365429760, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 366784882029, "efficiency_ratio": 17638.086, "data_reduction": 5.8703694, "data_physical_used": 5173379602, "snapshot_savings": 16.071255, "thin_savings": 521.4039, "shared_logical_used": 30369648640, "system_free_space": 139879616307, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T22:00:00Z", "cluster_id": "0", "logical_provisioned": 91248507486208, "logical_used": 62316830720, "logical_used_volume": 40661487616, "logical_used_file_system": 21655343104, "logical_used_vvol": 0, "shared_logical_used_volume": 16134856704, "shared_logical_used_file_system": 14365429760, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 366787987067, "efficiency_ratio": 17638.096, "data_reduction": 5.8956246, "data_physical_used": 5173376479, "snapshot_savings": 16.0873, "thin_savings": 518.96606, "shared_logical_used": 30500286464, "system_free_space": 139877277696, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T22:05:00Z", "cluster_id": "0", "logical_provisioned": 91216295231488, "logical_used": 61685952512, "logical_used_volume": 40179523584, "logical_used_file_system": 21506428928, "logical_used_vvol": 0, "shared_logical_used_volume": 15654998016, "shared_logical_used_file_system": 14365429760, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 366791071261, "efficiency_ratio": 17631.838, "data_reduction": 5.802859, "data_physical_used": 5173385973, "snapshot_savings": 16.00071, "thin_savings": 526.7738, "shared_logical_used": 30020427776, "system_free_space": 139873798963, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T22:10:00Z", "cluster_id": "0", "logical_provisioned": 91245286260736, "logical_used": 62252965888, "logical_used_volume": 40597622784, "logical_used_file_system": 21655343104, "logical_used_vvol": 0, "shared_logical_used_volume": 16004255744, "shared_logical_used_file_system": 14365429760, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 366799117229, "efficiency_ratio": 17637.174, "data_reduction": 5.8702807, "data_physical_used": 5173463884, "snapshot_savings": 16.103867, "thin_savings": 521.1752, "shared_logical_used": 30369685504, "system_free_space": 139866806476, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T22:15:00Z", "cluster_id": "0", "logical_provisioned": 91248507486208, "logical_used": 61644460032, "logical_used_volume": 40592568320, "logical_used_file_system": 21051891712, "logical_used_vvol": 0, "shared_logical_used_volume": 16068042752, "shared_logical_used_file_system": 14365429760, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 366763833450, "efficiency_ratio": 17637.81, "data_reduction": 5.882615, "data_physical_used": 5173460088, "snapshot_savings": 15.785385, "thin_savings": 520.2289, "shared_logical_used": 30433472512, "system_free_space": 139872047923, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T22:20:00Z", "cluster_id": "0", "logical_provisioned": 91235622584320, "logical_used": 61921505280, "logical_used_volume": 40869613568, "logical_used_file_system": 21051891712, "logical_used_vvol": 0, "shared_logical_used_volume": 16345088000, "shared_logical_used_file_system": 14365429760, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 366767882698, "efficiency_ratio": 17635.162, "data_reduction": 5.936113, "data_physical_used": 5173506766, "snapshot_savings": 15.785385, "thin_savings": 514.7389, "shared_logical_used": 30710517760, "system_free_space": 139867849318, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T22:25:00Z", "cluster_id": "0", "logical_provisioned": 91232401358848, "logical_used": 61506777088, "logical_used_volume": 40454885376, "logical_used_file_system": 21051891712, "logical_used_vvol": 0, "shared_logical_used_volume": 15932039168, "shared_logical_used_file_system": 14365429760, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 366771474733, "efficiency_ratio": 17634.547, "data_reduction": 5.8562756, "data_physical_used": 5173504537, "snapshot_savings": 15.772837, "thin_savings": 522.1987, "shared_logical_used": 30297468928, "system_free_space": 139864413388, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T22:30:00Z", "cluster_id": "0", "logical_provisioned": 91264613613568, "logical_used": 62192590848, "logical_used_volume": 40537247744, "logical_used_file_system": 21655343104, "logical_used_vvol": 0, "shared_logical_used_volume": 16010616832, "shared_logical_used_file_system": 14365429760, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 366775036834, "efficiency_ratio": 17640.7, "data_reduction": 5.87144, "data_physical_used": 5173525909, "snapshot_savings": 16.0873, "thin_savings": 521.8168, "shared_logical_used": 30376046592, "system_free_space": 139868362956, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T22:35:00Z", "cluster_id": "0", "logical_provisioned": 91238843809792, "logical_used": 61843230720, "logical_used_volume": 40187887616, "logical_used_file_system": 21655343104, "logical_used_vvol": 0, "shared_logical_used_volume": 15663362048, "shared_logical_used_file_system": 14365429760, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 366780097656, "efficiency_ratio": 17635.771, "data_reduction": 5.804336, "data_physical_used": 5173510219, "snapshot_savings": 16.071255, "thin_savings": 527.42365, "shared_logical_used": 30028791808, "system_free_space": 139863440588, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T22:40:00Z", "cluster_id": "0", "logical_provisioned": 91251728711680, "logical_used": 62256312320, "logical_used_volume": 40600969216, "logical_used_file_system": 21655343104, "logical_used_vvol": 0, "shared_logical_used_volume": 16007602176, "shared_logical_used_file_system": 14365429760, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 366786356055, "efficiency_ratio": 17638.002, "data_reduction": 5.8707886, "data_physical_used": 5173586460, "snapshot_savings": 16.103867, "thin_savings": 521.34143, "shared_logical_used": 30373031936, "system_free_space": 139857374208, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T22:45:00Z", "cluster_id": "0", "logical_provisioned": 91248507486208, "logical_used": 62184108032, "logical_used_volume": 40528764928, "logical_used_file_system": 21655343104, "logical_used_vvol": 0, "shared_logical_used_volume": 16004239360, "shared_logical_used_file_system": 14365429760, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 366787833980, "efficiency_ratio": 17637.342, "data_reduction": 5.870126, "data_physical_used": 5173597610, "snapshot_savings": 16.071255, "thin_savings": 521.40344, "shared_logical_used": 30369669120, "system_free_space": 139856311910, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T22:50:00Z", "cluster_id": "0", "logical_provisioned": 91248507486208, "logical_used": 62527574016, "logical_used_volume": 40872230912, "logical_used_file_system": 21655343104, "logical_used_vvol": 0, "shared_logical_used_volume": 16347705344, "shared_logical_used_file_system": 14365429760, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 366794178903, "efficiency_ratio": 17637.273, "data_reduction": 5.936491, "data_physical_used": 5173617521, "snapshot_savings": 16.071255, "thin_savings": 515.1423, "shared_logical_used": 30713135104, "system_free_space": 139850490675, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T22:55:00Z", "cluster_id": "0", "logical_provisioned": 91238843809792, "logical_used": 61980950528, "logical_used_volume": 40325607424, "logical_used_file_system": 21655343104, "logical_used_vvol": 0, "shared_logical_used_volume": 15801081856, "shared_logical_used_file_system": 14365429760, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 366797611005, "efficiency_ratio": 17635.379, "data_reduction": 5.8308263, "data_physical_used": 5173625239, "snapshot_savings": 16.071255, "thin_savings": 524.83466, "shared_logical_used": 30166511616, "system_free_space": 139847634534, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T23:00:00Z", "cluster_id": "0", "logical_provisioned": 91254949937152, "logical_used": 62321795072, "logical_used_volume": 40666451968, "logical_used_file_system": 21655343104, "logical_used_vvol": 0, "shared_logical_used_volume": 16139821056, "shared_logical_used_file_system": 14365425664, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 366802911147, "efficiency_ratio": 17638.326, "data_reduction": 5.8962445, "data_physical_used": 5173673771, "snapshot_savings": 16.08733, "thin_savings": 519.1022, "shared_logical_used": 30505246720, "system_free_space": 139852914892, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T23:05:00Z", "cluster_id": "0", "logical_provisioned": 91254949937152, "logical_used": 62530015232, "logical_used_volume": 40874672128, "logical_used_file_system": 21655343104, "logical_used_vvol": 0, "shared_logical_used_volume": 16350146560, "shared_logical_used_file_system": 14365429760, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 366771736246, "efficiency_ratio": 17638.125, "data_reduction": 5.93683, "data_physical_used": 5173733176, "snapshot_savings": 16.071255, "thin_savings": 515.32355, "shared_logical_used": 30715576320, "system_free_space": 139851774771, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T23:10:00Z", "cluster_id": "0", "logical_provisioned": 91225958907904, "logical_used": 62046433280, "logical_used_volume": 40391090176, "logical_used_file_system": 21655343104, "logical_used_vvol": 0, "shared_logical_used_volume": 15866564608, "shared_logical_used_file_system": 14365429760, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 366775745003, "efficiency_ratio": 17632.436, "data_reduction": 5.8433332, "data_physical_used": 5173758609, "snapshot_savings": 16.071255, "thin_savings": 523.1543, "shared_logical_used": 30231994368, "system_free_space": 139847552819, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T23:15:00Z", "cluster_id": "0", "logical_provisioned": 91225958907904, "logical_used": 62043062272, "logical_used_volume": 40387719168, "logical_used_file_system": 21655343104, "logical_used_vvol": 0, "shared_logical_used_volume": 15863193600, "shared_logical_used_file_system": 14365429760, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 366776910103, "efficiency_ratio": 17632.379, "data_reduction": 5.8426633, "data_physical_used": 5173775042, "snapshot_savings": 16.071255, "thin_savings": 523.21704, "shared_logical_used": 30228623360, "system_free_space": 139846513868, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T23:20:00Z", "cluster_id": "0", "logical_provisioned": 91222737682432, "logical_used": 62041391104, "logical_used_volume": 40386048000, "logical_used_file_system": 21655343104, "logical_used_vvol": 0, "shared_logical_used_volume": 15863197696, "shared_logical_used_file_system": 14365429760, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 366779452915, "efficiency_ratio": 17631.605, "data_reduction": 5.8426137, "data_physical_used": 5173819546, "snapshot_savings": 16.05851, "thin_savings": 523.13354, "shared_logical_used": 30228627456, "system_free_space": 139844132454, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T23:25:00Z", "cluster_id": "0", "logical_provisioned": 91225958907904, "logical_used": 62043074560, "logical_used_volume": 40387731456, "logical_used_file_system": 21655343104, "logical_used_vvol": 0, "shared_logical_used_volume": 15863205888, "shared_logical_used_file_system": 14365429760, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 366782657303, "efficiency_ratio": 17632.176, "data_reduction": 5.842598, "data_physical_used": 5173834907, "snapshot_savings": 16.071255, "thin_savings": 523.2168, "shared_logical_used": 30228635648, "system_free_space": 139841326899, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T23:30:00Z", "cluster_id": "0", "logical_provisioned": 91225958907904, "logical_used": 62073278464, "logical_used_volume": 40417935360, "logical_used_file_system": 21655343104, "logical_used_vvol": 0, "shared_logical_used_volume": 15864274944, "shared_logical_used_file_system": 14365429760, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 366786538345, "efficiency_ratio": 17632.064, "data_reduction": 5.842768, "data_physical_used": 5173866837, "snapshot_savings": 16.070026, "thin_savings": 523.2361, "shared_logical_used": 30229704704, "system_free_space": 139837692518, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T23:35:00Z", "cluster_id": "0", "logical_provisioned": 91209852780544, "logical_used": 62049808384, "logical_used_volume": 40394465280, "logical_used_file_system": 21655343104, "logical_used_vvol": 0, "shared_logical_used_volume": 15869939712, "shared_logical_used_file_system": 14365429760, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 366788382717, "efficiency_ratio": 17628.842, "data_reduction": 5.8438263, "data_physical_used": 5173899097, "snapshot_savings": 16.071255, "thin_savings": 522.51886, "shared_logical_used": 30235369472, "system_free_space": 139836147712, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T23:40:00Z", "cluster_id": "0", "logical_provisioned": 91213074006016, "logical_used": 62049808384, "logical_used_volume": 40394465280, "logical_used_file_system": 21655343104, "logical_used_vvol": 0, "shared_logical_used_volume": 15869939712, "shared_logical_used_file_system": 14365429760, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 366792928344, "efficiency_ratio": 17629.248, "data_reduction": 5.843755, "data_physical_used": 5173962881, "snapshot_savings": 16.071255, "thin_savings": 522.51886, "shared_logical_used": 30235369472, "system_free_space": 139831882956, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T23:45:00Z", "cluster_id": "0", "logical_provisioned": 91209852780544, "logical_used": 62049808384, "logical_used_volume": 40394465280, "logical_used_file_system": 21655343104, "logical_used_vvol": 0, "shared_logical_used_volume": 15869939712, "shared_logical_used_file_system": 14365429760, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 366793806758, "efficiency_ratio": 17628.527, "data_reduction": 5.843722, "data_physical_used": 5173991785, "snapshot_savings": 16.071255, "thin_savings": 522.51886, "shared_logical_used": 30235369472, "system_free_space": 139831178649, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T23:50:00Z", "cluster_id": "0", "logical_provisioned": 91225958907904, "logical_used": 62187491328, "logical_used_volume": 40532148224, "logical_used_file_system": 21655343104, "logical_used_vvol": 0, "shared_logical_used_volume": 16007622656, "shared_logical_used_file_system": 14365429760, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 366795514672, "efficiency_ratio": 17631.715, "data_reduction": 5.8703575, "data_physical_used": 5173970137, "snapshot_savings": 16.071255, "thin_savings": 520.5432, "shared_logical_used": 30373052416, "system_free_space": 139829668864, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-06T23:55:00Z", "cluster_id": "0", "logical_provisioned": 91242065035264, "logical_used": 62600540160, "logical_used_volume": 40945197056, "logical_used_file_system": 21655343104, "logical_used_vvol": 0, "shared_logical_used_volume": 16420671488, "shared_logical_used_file_system": 14365429760, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 366763863484, "efficiency_ratio": 17634.465, "data_reduction": 5.950067, "data_physical_used": 5174076208, "snapshot_savings": 16.071255, "thin_savings": 513.60675, "shared_logical_used": 30786101248, "system_free_space": 139824703692, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-07T00:00:00Z", "cluster_id": "0", "logical_provisioned": 91229180133376, "logical_used": 62118649856, "logical_used_volume": 40463306752, "logical_used_file_system": 21655343104, "logical_used_vvol": 0, "shared_logical_used_volume": 15938781184, "shared_logical_used_file_system": 14365429760, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 366769331290, "efficiency_ratio": 17631.885, "data_reduction": 5.856902, "data_physical_used": 5174102614, "snapshot_savings": 16.071255, "thin_savings": 521.9285, "shared_logical_used": 30304210944, "system_free_space": 139821446758, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-07T00:05:00Z", "cluster_id": "0", "logical_provisioned": 91245286260736, "logical_used": 62454464512, "logical_used_volume": 40799096832, "logical_used_file_system": 21655367680, "logical_used_vvol": 0, "shared_logical_used_volume": 16274571264, "shared_logical_used_file_system": 14365454336, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 366772343468, "efficiency_ratio": 17635.03, "data_reduction": 5.9218154, "data_physical_used": 5174093296, "snapshot_savings": 16.071255, "thin_savings": 516.3495, "shared_logical_used": 30640025600, "system_free_space": 139818777395, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-07T00:10:00Z", "cluster_id": "0", "logical_provisioned": 91277498515456, "logical_used": 61929394176, "logical_used_volume": 40877477888, "logical_used_file_system": 21051916288, "logical_used_vvol": 0, "shared_logical_used_volume": 16352952320, "shared_logical_used_file_system": 14365454336, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 366778716276, "efficiency_ratio": 17641.059, "data_reduction": 5.9368978, "data_physical_used": 5174151237, "snapshot_savings": 15.785385, "thin_savings": 516.0607, "shared_logical_used": 30718406656, "system_free_space": 139813932851, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-07T00:15:00Z", "cluster_id": "0", "logical_provisioned": 91245286260736, "logical_used": 62119698432, "logical_used_volume": 40464330752, "logical_used_file_system": 21655367680, "logical_used_vvol": 0, "shared_logical_used_volume": 15939805184, "shared_logical_used_file_system": 14365454336, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 366763036042, "efficiency_ratio": 17634.854, "data_reduction": 5.8570566, "data_physical_used": 5174144913, "snapshot_savings": 16.071255, "thin_savings": 522.48035, "shared_logical_used": 30305259520, "system_free_space": 139829762252, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-07T00:20:00Z", "cluster_id": "0", "logical_provisioned": 91261392388096, "logical_used": 62463905792, "logical_used_volume": 40808538112, "logical_used_file_system": 21655367680, "logical_used_vvol": 0, "shared_logical_used_volume": 16284012544, "shared_logical_used_file_system": 14365454336, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 366770020283, "efficiency_ratio": 17637.66, "data_reduction": 5.9234786, "data_physical_used": 5174234856, "snapshot_savings": 16.071255, "thin_savings": 516.743, "shared_logical_used": 30649466880, "system_free_space": 139827544268, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-07T00:25:00Z", "cluster_id": "0", "logical_provisioned": 91261392388096, "logical_used": 62527713280, "logical_used_volume": 40872345600, "logical_used_file_system": 21655367680, "logical_used_vvol": 0, "shared_logical_used_volume": 16347820032, "shared_logical_used_file_system": 14365454336, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 366774551360, "efficiency_ratio": 17637.531, "data_reduction": 5.9357667, "data_physical_used": 5174272607, "snapshot_savings": 16.071255, "thin_savings": 515.5902, "shared_logical_used": 30713274368, "system_free_space": 139823723110, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-07T00:30:00Z", "cluster_id": "0", "logical_provisioned": 91245286260736, "logical_used": 62118027264, "logical_used_volume": 40462659584, "logical_used_file_system": 21655367680, "logical_used_vvol": 0, "shared_logical_used_volume": 15936028672, "shared_logical_used_file_system": 14365454336, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 366781288062, "efficiency_ratio": 17634.088, "data_reduction": 5.856072, "data_physical_used": 5174370042, "snapshot_savings": 16.0873, "thin_savings": 522.51135, "shared_logical_used": 30301483008, "system_free_space": 139817862963, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-07T00:35:00Z", "cluster_id": "0", "logical_provisioned": 91261392388096, "logical_used": 62460559360, "logical_used_volume": 40805191680, "logical_used_file_system": 21655367680, "logical_used_vvol": 0, "shared_logical_used_volume": 16280666112, "shared_logical_used_file_system": 14365454336, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 366786600999, "efficiency_ratio": 17637.123, "data_reduction": 5.9226513, "data_physical_used": 5174392331, "snapshot_savings": 16.071255, "thin_savings": 516.8036, "shared_logical_used": 30646120448, "system_free_space": 139812963942, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-07T00:40:00Z", "cluster_id": "0", "logical_provisioned": 91245286260736, "logical_used": 62129106944, "logical_used_volume": 41077190656, "logical_used_file_system": 21051916288, "logical_used_vvol": 0, "shared_logical_used_volume": 16552665088, "shared_logical_used_file_system": 14365454336, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 366790672188, "efficiency_ratio": 17633.932, "data_reduction": 5.9751906, "data_physical_used": 5174415388, "snapshot_savings": 15.785385, "thin_savings": 511.253, "shared_logical_used": 30918119424, "system_free_space": 139809006592, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-07T00:45:00Z", "cluster_id": "0", "logical_provisioned": 91229180133376, "logical_used": 62463930368, "logical_used_volume": 40808562688, "logical_used_file_system": 21655367680, "logical_used_vvol": 0, "shared_logical_used_volume": 16284037120, "shared_logical_used_file_system": 14365454336, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 366758298037, "efficiency_ratio": 17630.617, "data_reduction": 5.923208, "data_physical_used": 5174474698, "snapshot_savings": 16.071255, "thin_savings": 515.61383, "shared_logical_used": 30649491456, "system_free_space": 139804881920, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-07T00:50:00Z", "cluster_id": "0", "logical_provisioned": 91245286260736, "logical_used": 62739296256, "logical_used_volume": 41083928576, "logical_used_file_system": 21655367680, "logical_used_vvol": 0, "shared_logical_used_volume": 16559403008, "shared_logical_used_file_system": 14365454336, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 366760859602, "efficiency_ratio": 17633.65, "data_reduction": 5.976398, "data_physical_used": 5174497900, "snapshot_savings": 16.071255, "thin_savings": 511.24524, "shared_logical_used": 30924857344, "system_free_space": 139802387660, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-07T00:55:00Z", "cluster_id": "0", "logical_provisioned": 91232401358848, "logical_used": 62188564480, "logical_used_volume": 40533196800, "logical_used_file_system": 21655367680, "logical_used_vvol": 0, "shared_logical_used_volume": 16008671232, "shared_logical_used_file_system": 14365454336, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 366762407946, "efficiency_ratio": 17631.045, "data_reduction": 5.8699274, "data_physical_used": 5174531824, "snapshot_savings": 16.071255, "thin_savings": 520.7514, "shared_logical_used": 30374125568, "system_free_space": 139801107456, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-07T01:00:00Z", "cluster_id": "0", "logical_provisioned": 91232401358848, "logical_used": 62532771840, "logical_used_volume": 40877404160, "logical_used_file_system": 21655367680, "logical_used_vvol": 0, "shared_logical_used_volume": 16352878592, "shared_logical_used_file_system": 14365454336, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 366768448721, "efficiency_ratio": 17630.883, "data_reduction": 5.936392, "data_physical_used": 5174579837, "snapshot_savings": 16.071255, "thin_savings": 514.48566, "shared_logical_used": 30718332928, "system_free_space": 139795901030, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-07T01:05:00Z", "cluster_id": "0", "logical_provisioned": 91248507486208, "logical_used": 62670491648, "logical_used_volume": 41015087104, "logical_used_file_system": 21655404544, "logical_used_vvol": 0, "shared_logical_used_volume": 16490561536, "shared_logical_used_file_system": 14365491200, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 366766771681, "efficiency_ratio": 17633.662, "data_reduction": 5.9628944, "data_physical_used": 5174677046, "snapshot_savings": 16.071255, "thin_savings": 512.58105, "shared_logical_used": 30856052736, "system_free_space": 139794986598, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-07T01:10:00Z", "cluster_id": "0", "logical_provisioned": 91203410329600, "logical_used": 62180184064, "logical_used_volume": 40524779520, "logical_used_file_system": 21655404544, "logical_used_vvol": 0, "shared_logical_used_volume": 16000253952, "shared_logical_used_file_system": 14365491200, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 366771575464, "efficiency_ratio": 17624.812, "data_reduction": 5.868098, "data_physical_used": 5174716708, "snapshot_savings": 16.071255, "thin_savings": 519.7658, "shared_logical_used": 30365745152, "system_free_space": 139790496153, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-07T01:15:00Z", "cluster_id": "0", "logical_provisioned": 91248507486208, "logical_used": 62197010432, "logical_used_volume": 40541605888, "logical_used_file_system": 21655404544, "logical_used_vvol": 0, "shared_logical_used_volume": 16017084416, "shared_logical_used_file_system": 14365491200, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 366773201012, "efficiency_ratio": 17633.36, "data_reduction": 5.8712945, "data_physical_used": 5174765948, "snapshot_savings": 16.071222, "thin_savings": 521.1655, "shared_logical_used": 30382575616, "system_free_space": 139789138124, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-07T01:20:00Z", "cluster_id": "0", "logical_provisioned": 91232401358848, "logical_used": 62188593152, "logical_used_volume": 40533188608, "logical_used_file_system": 21655404544, "logical_used_vvol": 0, "shared_logical_used_volume": 16008667136, "shared_logical_used_file_system": 14365491200, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 366775284691, "efficiency_ratio": 17630.139, "data_reduction": 5.869632, "data_physical_used": 5174797963, "snapshot_savings": 16.071222, "thin_savings": 520.75085, "shared_logical_used": 30374158336, "system_free_space": 139787830681, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-07T01:25:00Z", "cluster_id": "0", "logical_provisioned": 91232401358848, "logical_used": 62188552192, "logical_used_volume": 40533147648, "logical_used_file_system": 21655404544, "logical_used_vvol": 0, "shared_logical_used_volume": 16008622080, "shared_logical_used_file_system": 14365491200, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 366779287755, "efficiency_ratio": 17630.098, "data_reduction": 5.86961, "data_physical_used": 5174809803, "snapshot_savings": 16.071255, "thin_savings": 520.75165, "shared_logical_used": 30374113280, "system_free_space": 139785048473, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-07T01:30:00Z", "cluster_id": "0", "logical_provisioned": 91232401358848, "logical_used": 62186901504, "logical_used_volume": 40531496960, "logical_used_file_system": 21655404544, "logical_used_vvol": 0, "shared_logical_used_volume": 16006971392, "shared_logical_used_file_system": 14365491200, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 366782754235, "efficiency_ratio": 17630.105, "data_reduction": 5.869293, "data_physical_used": 5174807755, "snapshot_savings": 16.071255, "thin_savings": 520.78204, "shared_logical_used": 30372462592, "system_free_space": 139781756518, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-07T01:35:00Z", "cluster_id": "0", "logical_provisioned": 91216295231488, "logical_used": 62180184064, "logical_used_volume": 40524779520, "logical_used_file_system": 21655404544, "logical_used_vvol": 0, "shared_logical_used_volume": 16000253952, "shared_logical_used_file_system": 14365491200, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 366749343981, "efficiency_ratio": 17626.766, "data_reduction": 5.8679194, "data_physical_used": 5174874247, "snapshot_savings": 16.071255, "thin_savings": 520.3358, "shared_logical_used": 30365745152, "system_free_space": 139778651340, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-07T01:40:00Z", "cluster_id": "0", "logical_provisioned": 91222737682432, "logical_used": 61805572096, "logical_used_volume": 40596971520, "logical_used_file_system": 21208600576, "logical_used_vvol": 0, "shared_logical_used_volume": 16003604480, "shared_logical_used_file_system": 14365491200, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 366753335194, "efficiency_ratio": 17627.967, "data_reduction": 5.8685517, "data_physical_used": 5174887373, "snapshot_savings": 15.892204, "thin_savings": 520.3881, "shared_logical_used": 30369095680, "system_free_space": 139774514995, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-07T01:45:00Z", "cluster_id": "0", "logical_provisioned": 91216295231488, "logical_used": 62317867008, "logical_used_volume": 40662462464, "logical_used_file_system": 21655404544, "logical_used_vvol": 0, "shared_logical_used_volume": 16137936896, "shared_logical_used_file_system": 14365491200, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 366755289540, "efficiency_ratio": 17626.658, "data_reduction": 5.8944893, "data_physical_used": 5174905645, "snapshot_savings": 16.071255, "thin_savings": 517.81256, "shared_logical_used": 30503428096, "system_free_space": 139772736716, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-07T01:50:00Z", "cluster_id": "0", "logical_provisioned": 91232401358848, "logical_used": 62524391424, "logical_used_volume": 40868986880, "logical_used_file_system": 21655404544, "logical_used_vvol": 0, "shared_logical_used_volume": 16344461312, "shared_logical_used_file_system": 14365491200, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 366758071316, "efficiency_ratio": 17629.678, "data_reduction": 5.934367, "data_physical_used": 5174933124, "snapshot_savings": 16.071255, "thin_savings": 514.6364, "shared_logical_used": 30709952512, "system_free_space": 139770623795, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-07T01:55:00Z", "cluster_id": "0", "logical_provisioned": 91232401358848, "logical_used": 62593232896, "logical_used_volume": 40937828352, "logical_used_file_system": 21655404544, "logical_used_vvol": 0, "shared_logical_used_volume": 16413302784, "shared_logical_used_file_system": 14365491200, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 366683444356, "efficiency_ratio": 17629.68, "data_reduction": 5.9476705, "data_physical_used": 5174932286, "snapshot_savings": 16.071255, "thin_savings": 513.4006, "shared_logical_used": 30778793984, "system_free_space": 139845408768, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-07T02:00:00Z", "cluster_id": "0", "logical_provisioned": 91229180133376, "logical_used": 62486790144, "logical_used_volume": 40831385600, "logical_used_file_system": 21655404544, "logical_used_vvol": 0, "shared_logical_used_volume": 16277737472, "shared_logical_used_file_system": 14365491200, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 366625371642, "efficiency_ratio": 17628.791, "data_reduction": 5.9213843, "data_physical_used": 5175010747, "snapshot_savings": 16.069933, "thin_savings": 515.7653, "shared_logical_used": 30643228672, "system_free_space": 139907391692, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-07T02:05:00Z", "cluster_id": "0", "logical_provisioned": 91216295231488, "logical_used": 62524391424, "logical_used_volume": 40868986880, "logical_used_file_system": 21655404544, "logical_used_vvol": 0, "shared_logical_used_volume": 16344461312, "shared_logical_used_file_system": 14365491200, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 366536751824, "efficiency_ratio": 17626.398, "data_reduction": 5.934311, "data_physical_used": 5174982138, "snapshot_savings": 16.071255, "thin_savings": 514.07324, "shared_logical_used": 30709952512, "system_free_space": 139995994316, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-07T02:10:00Z", "cluster_id": "0", "logical_provisioned": 91235622584320, "logical_used": 62868598784, "logical_used_volume": 41213194240, "logical_used_file_system": 21655404544, "logical_used_vvol": 0, "shared_logical_used_volume": 16619827200, "shared_logical_used_file_system": 14365491200, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 366495583001, "efficiency_ratio": 17630.078, "data_reduction": 5.987503, "data_physical_used": 5174998369, "snapshot_savings": 16.103867, "thin_savings": 509.7285, "shared_logical_used": 30985318400, "system_free_space": 140038225510, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-07T02:15:00Z", "cluster_id": "0", "logical_provisioned": 91264613613568, "logical_used": 62193627136, "logical_used_volume": 40538222592, "logical_used_file_system": 21655404544, "logical_used_vvol": 0, "shared_logical_used_volume": 16013697024, "shared_logical_used_file_system": 14365487104, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 366471999915, "efficiency_ratio": 17635.643, "data_reduction": 5.870363, "data_physical_used": 5175009333, "snapshot_savings": 16.071285, "thin_savings": 521.79767, "shared_logical_used": 30379184128, "system_free_space": 140061751705, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-07T02:20:00Z", "cluster_id": "0", "logical_provisioned": 91264613613568, "logical_used": 62532345856, "logical_used_volume": 40876941312, "logical_used_file_system": 21655404544, "logical_used_vvol": 0, "shared_logical_used_volume": 16352415744, "shared_logical_used_file_system": 14365491200, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 366454756867, "efficiency_ratio": 17635.684, "data_reduction": 5.93583, "data_physical_used": 5174997447, "snapshot_savings": 16.071255, "thin_savings": 515.6193, "shared_logical_used": 30717906944, "system_free_space": 140079281561, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-07T02:25:00Z", "cluster_id": "0", "logical_provisioned": 91251728711680, "logical_used": 62526074880, "logical_used_volume": 40870670336, "logical_used_file_system": 21655404544, "logical_used_vvol": 0, "shared_logical_used_volume": 16346144768, "shared_logical_used_file_system": 14365491200, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 366401043928, "efficiency_ratio": 17633.209, "data_reduction": 5.9346237, "data_physical_used": 5174992832, "snapshot_savings": 16.071255, "thin_savings": 515.28186, "shared_logical_used": 30711635968, "system_free_space": 140095939788, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-07T02:30:00Z", "cluster_id": "0", "logical_provisioned": 91280719740928, "logical_used": 62878687232, "logical_used_volume": 41223282688, "logical_used_file_system": 21655404544, "logical_used_vvol": 0, "shared_logical_used_volume": 16696651776, "shared_logical_used_file_system": 14365491200, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 366381371149, "efficiency_ratio": 17638.664, "data_reduction": 6.002305, "data_physical_used": 5175035672, "snapshot_savings": 16.079655, "thin_savings": 510.02658, "shared_logical_used": 31062142976, "system_free_space": 140115617587, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-07T02:35:00Z", "cluster_id": "0", "logical_provisioned": 91251728711680, "logical_used": 62253010944, "logical_used_volume": 40597606400, "logical_used_file_system": 21655404544, "logical_used_vvol": 0, "shared_logical_used_volume": 16073080832, "shared_logical_used_file_system": 14365491200, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 366366734057, "efficiency_ratio": 17633.127, "data_reduction": 5.88183, "data_physical_used": 5175017014, "snapshot_savings": 16.071255, "thin_savings": 520.24896, "shared_logical_used": 30438572032, "system_free_space": 140138322739, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-07T02:40:00Z", "cluster_id": "0", "logical_provisioned": 91254949937152, "logical_used": 62593232896, "logical_used_volume": 40937828352, "logical_used_file_system": 21655404544, "logical_used_vvol": 0, "shared_logical_used_volume": 16413302784, "shared_logical_used_file_system": 14365491200, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 366343815698, "efficiency_ratio": 17633.676, "data_reduction": 5.947549, "data_physical_used": 5175038377, "snapshot_savings": 16.071255, "thin_savings": 514.07477, "shared_logical_used": 30778793984, "system_free_space": 140161689395, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-07T02:45:00Z", "cluster_id": "0", "logical_provisioned": 91251728711680, "logical_used": 62945857536, "logical_used_volume": 41290452992, "logical_used_file_system": 21655404544, "logical_used_vvol": 0, "shared_logical_used_volume": 16765927424, "shared_logical_used_file_system": 14365491200, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 366323450293, "efficiency_ratio": 17632.615, "data_reduction": 6.015539, "data_physical_used": 5175167138, "snapshot_savings": 16.071255, "thin_savings": 507.8283, "shared_logical_used": 31131418624, "system_free_space": 140182503424, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-07T02:50:00Z", "cluster_id": "0", "logical_provisioned": 91235622584320, "logical_used": 62257442816, "logical_used_volume": 40602038272, "logical_used_file_system": 21655404544, "logical_used_vvol": 0, "shared_logical_used_volume": 16077512704, "shared_logical_used_file_system": 14365491200, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 366293100641, "efficiency_ratio": 17629.314, "data_reduction": 5.8824534, "data_physical_used": 5175222250, "snapshot_savings": 16.071255, "thin_savings": 519.59906, "shared_logical_used": 30443003904, "system_free_space": 140213559091, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-07T02:55:00Z", "cluster_id": "0", "logical_provisioned": 91251728711680, "logical_used": 62601650176, "logical_used_volume": 40946245632, "logical_used_file_system": 21655404544, "logical_used_vvol": 0, "shared_logical_used_volume": 16421720064, "shared_logical_used_file_system": 14365491200, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 366273959041, "efficiency_ratio": 17632.422, "data_reduction": 5.948962, "data_physical_used": 5175223968, "snapshot_savings": 16.071255, "thin_savings": 513.9239, "shared_logical_used": 30787211264, "system_free_space": 140233365299, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-07T03:00:00Z", "cluster_id": "0", "logical_provisioned": 91251728711680, "logical_used": 62739333120, "logical_used_volume": 41083928576, "logical_used_file_system": 21655404544, "logical_used_vvol": 0, "shared_logical_used_volume": 16559403008, "shared_logical_used_file_system": 14365491200, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 366245762717, "efficiency_ratio": 17632.3, "data_reduction": 5.9755254, "data_physical_used": 5175259901, "snapshot_savings": 16.071255, "thin_savings": 511.46817, "shared_logical_used": 30924894208, "system_free_space": 140261950054, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-07T03:05:00Z", "cluster_id": "0", "logical_provisioned": 91251728711680, "logical_used": 62945857536, "logical_used_volume": 41290452992, "logical_used_file_system": 21655404544, "logical_used_vvol": 0, "shared_logical_used_volume": 16765927424, "shared_logical_used_file_system": 14365491200, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 366213764316, "efficiency_ratio": 17632.125, "data_reduction": 6.015372, "data_physical_used": 5175310603, "snapshot_savings": 16.071255, "thin_savings": 507.8283, "shared_logical_used": 31131418624, "system_free_space": 140293846220, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-07T03:10:00Z", "cluster_id": "0", "logical_provisioned": 91242065035264, "logical_used": 62395125760, "logical_used_volume": 40739721216, "logical_used_file_system": 21655404544, "logical_used_vvol": 0, "shared_logical_used_volume": 16215195648, "shared_logical_used_file_system": 14365491200, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 366158780035, "efficiency_ratio": 17630.223, "data_reduction": 5.908944, "data_physical_used": 5175321590, "snapshot_savings": 16.071255, "thin_savings": 517.1994, "shared_logical_used": 30580686848, "system_free_space": 140349490380, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-07T03:15:00Z", "cluster_id": "0", "logical_provisioned": 91271056064512, "logical_used": 62667120640, "logical_used_volume": 41011716096, "logical_used_file_system": 21655404544, "logical_used_vvol": 0, "shared_logical_used_volume": 16487190528, "shared_logical_used_file_system": 14365491200, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 366096964095, "efficiency_ratio": 17635.639, "data_reduction": 5.9614377, "data_physical_used": 5175375776, "snapshot_savings": 16.071255, "thin_savings": 513.42566, "shared_logical_used": 30852681728, "system_free_space": 140374658662, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-07T03:20:00Z", "cluster_id": "0", "logical_provisioned": 91287162191872, "logical_used": 62678859776, "logical_used_volume": 41023455232, "logical_used_file_system": 21655404544, "logical_used_vvol": 0, "shared_logical_used_volume": 16498929664, "shared_logical_used_file_system": 14365491200, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 366066974179, "efficiency_ratio": 17638.627, "data_reduction": 5.963664, "data_physical_used": 5175412310, "snapshot_savings": 16.071255, "thin_savings": 513.7762, "shared_logical_used": 30864420864, "system_free_space": 140405955584, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-07T03:25:00Z", "cluster_id": "0", "logical_provisioned": 91254949937152, "logical_used": 63006281728, "logical_used_volume": 41350877184, "logical_used_file_system": 21655404544, "logical_used_vvol": 0, "shared_logical_used_volume": 16826351616, "shared_logical_used_file_system": 14365491200, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 366053564738, "efficiency_ratio": 17632.443, "data_reduction": 6.0269427, "data_physical_used": 5175400235, "snapshot_savings": 16.071255, "thin_savings": 506.8839, "shared_logical_used": 31191842816, "system_free_space": 140419886080, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-07T03:30:00Z", "cluster_id": "0", "logical_provisioned": 91261392388096, "logical_used": 62532816896, "logical_used_volume": 40877412352, "logical_used_file_system": 21655404544, "logical_used_vvol": 0, "shared_logical_used_volume": 16352886784, "shared_logical_used_file_system": 14365491200, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 366046067158, "efficiency_ratio": 17633.525, "data_reduction": 5.935405, "data_physical_used": 5175447993, "snapshot_savings": 16.071255, "thin_savings": 515.4982, "shared_logical_used": 30718377984, "system_free_space": 140428147097, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-07T03:35:00Z", "cluster_id": "0", "logical_provisioned": 91245286260736, "logical_used": 62799757312, "logical_used_volume": 41144352768, "logical_used_file_system": 21655404544, "logical_used_vvol": 0, "shared_logical_used_volume": 16619827200, "shared_logical_used_file_system": 14365491200, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 366029415895, "efficiency_ratio": 17630.205, "data_reduction": 5.9869123, "data_physical_used": 5175509236, "snapshot_savings": 16.071255, "thin_savings": 510.17474, "shared_logical_used": 30985318400, "system_free_space": 140445330636, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-07T03:40:00Z", "cluster_id": "0", "logical_provisioned": 91248507486208, "logical_used": 62937440256, "logical_used_volume": 41282035712, "logical_used_file_system": 21655404544, "logical_used_vvol": 0, "shared_logical_used_volume": 16688668672, "shared_logical_used_file_system": 14365491200, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 366008738547, "efficiency_ratio": 17630.64, "data_reduction": 6.00015, "data_physical_used": 5175563565, "snapshot_savings": 16.103867, "thin_savings": 508.96127, "shared_logical_used": 31054159872, "system_free_space": 140466347008, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-07T03:45:00Z", "cluster_id": "0", "logical_provisioned": 91245286260736, "logical_used": 63143964672, "logical_used_volume": 41488560128, "logical_used_file_system": 21655404544, "logical_used_vvol": 0, "shared_logical_used_volume": 16964034560, "shared_logical_used_file_system": 14365491200, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 366011296918, "efficiency_ratio": 17629.637, "data_reduction": 6.053224, "data_physical_used": 5175676093, "snapshot_savings": 16.071255, "thin_savings": 504.16464, "shared_logical_used": 31329525760, "system_free_space": 140464031744, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-07T03:50:00Z", "cluster_id": "0", "logical_provisioned": 91235622584320, "logical_used": 62730989568, "logical_used_volume": 41075585024, "logical_used_file_system": 21655404544, "logical_used_vvol": 0, "shared_logical_used_volume": 16551059456, "shared_logical_used_file_system": 14365491200, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 366014005134, "efficiency_ratio": 17627.865, "data_reduction": 5.973465, "data_physical_used": 5175647505, "snapshot_savings": 16.071255, "thin_savings": 511.0572, "shared_logical_used": 30916550656, "system_free_space": 140462179532, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-07T03:55:00Z", "cluster_id": "0", "logical_provisioned": 91144354529280, "logical_used": 62905544704, "logical_used_volume": 41250140160, "logical_used_file_system": 21655404544, "logical_used_vvol": 0, "shared_logical_used_volume": 16757510144, "shared_logical_used_file_system": 14365491200, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 366016283611, "efficiency_ratio": 17609.996, "data_reduction": 6.0132737, "data_physical_used": 5175716714, "snapshot_savings": 16.056145, "thin_savings": 507.97565, "shared_logical_used": 31123001344, "system_free_space": 140460335104, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-07T04:00:00Z", "cluster_id": "0", "logical_provisioned": 91144354529280, "logical_used": 63249752064, "logical_used_volume": 41594347520, "logical_used_file_system": 21655404544, "logical_used_vvol": 0, "shared_logical_used_volume": 17101717504, "shared_logical_used_file_system": 14365491200, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 366022030619, "efficiency_ratio": 17609.904, "data_reduction": 6.079746, "data_physical_used": 5175743802, "snapshot_savings": 16.056145, "thin_savings": 502.01956, "shared_logical_used": 31467208704, "system_free_space": 140455202611, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-07T04:05:00Z", "cluster_id": "0", "logical_provisioned": 90976850804736, "logical_used": 59325788160, "logical_used_volume": 37670383616, "logical_used_file_system": 21655404544, "logical_used_vvol": 0, "shared_logical_used_volume": 13177753600, "shared_logical_used_file_system": 14365491200, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365985099277, "efficiency_ratio": 17578.006, "data_reduction": 5.3217416, "data_physical_used": 5175607271, "snapshot_savings": 16.056145, "thin_savings": 572.89014, "shared_logical_used": 27543244800, "system_free_space": 140455735705, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-07T04:10:00Z", "cluster_id": "0", "logical_provisioned": 91028390412288, "logical_used": 59750621184, "logical_used_volume": 38095216640, "logical_used_file_system": 21655404544, "logical_used_vvol": 0, "shared_logical_used_volume": 13533745152, "shared_logical_used_file_system": 14365491200, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365991373534, "efficiency_ratio": 17587.8, "data_reduction": 5.390474, "data_physical_used": 5175655236, "snapshot_savings": 16.088757, "thin_savings": 566.8554, "shared_logical_used": 27899236352, "system_free_space": 140449937817, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-07T04:15:00Z", "cluster_id": "0", "logical_provisioned": 91005841833984, "logical_used": 59670003712, "logical_used_volume": 38014599168, "logical_used_file_system": 21655404544, "logical_used_vvol": 0, "shared_logical_used_volume": 13521969152, "shared_logical_used_file_system": 14365491200, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365993184951, "efficiency_ratio": 17583.463, "data_reduction": 5.3882046, "data_physical_used": 5175649788, "snapshot_savings": 16.056145, "thin_savings": 566.36456, "shared_logical_used": 27887460352, "system_free_space": 140448151756, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-07T04:20:00Z", "cluster_id": "0", "logical_provisioned": 90989735706624, "logical_used": 59669983232, "logical_used_volume": 38014578688, "logical_used_file_system": 21655404544, "logical_used_vvol": 0, "shared_logical_used_volume": 13521948672, "shared_logical_used_file_system": 14365491200, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365995629590, "efficiency_ratio": 17580.326, "data_reduction": 5.388193, "data_physical_used": 5175657086, "snapshot_savings": 16.056145, "thin_savings": 565.7402, "shared_logical_used": 27887439872, "system_free_space": 140446233395, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-07T04:25:00Z", "cluster_id": "0", "logical_provisioned": 91005841833984, "logical_used": 59678400512, "logical_used_volume": 38022995968, "logical_used_file_system": 21655404544, "logical_used_vvol": 0, "shared_logical_used_volume": 13530365952, "shared_logical_used_file_system": 14365491200, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 365999686685, "efficiency_ratio": 17583.098, "data_reduction": 5.389715, "data_physical_used": 5175757336, "snapshot_savings": 16.056145, "thin_savings": 566.1801, "shared_logical_used": 27895857152, "system_free_space": 140443139891, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" }, { "timestamp": "2023-06-07T04:30:00Z", "cluster_id": "0", "logical_provisioned": 91021947961344, "logical_used": 59681759232, "logical_used_volume": 38026354688, "logical_used_file_system": 21655404544, "logical_used_vvol": 0, "shared_logical_used_volume": 13531619328, "shared_logical_used_file_system": 14365491200, "shared_logical_used_vvol": 0, "physical_total": 47345047046144, "physical_used": 366003363027, "efficiency_ratio": 17586.121, "data_reduction": 5.3899302, "data_physical_used": 5175783211, "snapshot_savings": 16.072174, "thin_savings": 566.73096, "shared_logical_used": 27897110528, "system_free_space": 140439984128, "repeat_count": 1, "response_definition": "space_metrics_by_cluster", "entity": "space_metrics_by_cluster" } ]././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/dell_emc/plugins/powerstore/test_client.py0000664000175000017500000006331200000000000030573 0ustar00zuulzuul00000000000000# Copyright (c) 2023 Dell Inc. or its subsidiaries. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import os import requests_mock from manila.share.drivers.dell_emc.plugins.powerstore import client from manila import test class TestClient(test.TestCase): REST_IP = "192.168.0.110" NAS_SERVER_NAME = "powerstore-nasserver" NAS_SERVER_ID = "6423d56e-eaf3-7424-be0b-1a9efb93188b" NAS_SERVER_IP = "192.168.11.23" NFS_EXPORT_NAME = "powerstore-nfs-share" NFS_EXPORT_SIZE = 3221225472 NFS_EXPORT_NEW_SIZE = 6221225472 FILESYSTEM_ID = "6454e9a9-a698-e9bc-ca61-1a9efb93188b" NFS_EXPORT_ID = "6454ec18-7b8d-1532-1b8a-1a9efb93188b" RW_HOSTS = "192.168.1.10" RO_HOSTS = "192.168.1.11" SMB_SHARE_NAME = "powerstore-smb-share" SMB_SHARE_ID = "64927ae9-3403-6930-a784-f227b9987c54" RW_USERS = "user1" RO_USERS = "user2" SNAPSHOT_NAME = "powerstore-nfs-share-snap" SNAPSHOT_ID = "6454ea29-09c3-030e-cfc3-1a9efb93188b" CLONE_ID = "64560f05-e677-ec2a-7fcf-1a9efb93188b" CLONE_NAME = "powerstore-nfs-share-snap-clone" CLUSTER_ID = "0" CLIENT_OPTIONS = { "rest_ip": REST_IP, "rest_username": "admin", "rest_password": "pwd", "verify_certificate": False, "certificate_path": None } def setUp(self): super(TestClient, self).setUp() self._mock_url = "https://%s/api/rest" % self.REST_IP self.client = client.PowerStoreClient(**self.CLIENT_OPTIONS) self.mockup_file_base = os.path.join( os.path.dirname(os.path.realpath(__file__)), 'mockup') def _getJsonFile(self, filename): f = open(os.path.join(self.mockup_file_base, filename)) data = json.load(f) f.close() return data def test__verify_cert(self): verify_cert = self.client.verify_certificate certificate_path = self.client.certificate_path self.client.verify_certificate = True self.client.certificate_path = "fake_certificate_path" self.assertEqual(self.client._verify_cert, self.client.certificate_path) self.client.verify_certificate = verify_cert self.client.certificate_path = certificate_path @requests_mock.mock() def test__send_request(self, m): url = "{0}/fake_res".format(self._mock_url) m.get(url, status_code=200) self.client._send_get_request("/fake_res", None, None, False) @requests_mock.mock() def test_get_nas_server_id(self, m): self.assertEqual(0, len(m.request_history)) self._add_get_nas_server_id_response( m, self.NAS_SERVER_NAME, self._getJsonFile("get_nas_server_id_response.json") ) id = self.client.get_nas_server_id(self.NAS_SERVER_NAME) self.assertEqual(id, self.NAS_SERVER_ID) def _add_get_nas_server_id_response(self, m, nas_server, json_str): url = "{0}/nas_server?name=eq.{1}".format( self._mock_url, nas_server ) m.get(url, status_code=200, json=json_str) @requests_mock.mock() def test_get_nas_server_id_failure(self, m): self.assertEqual(0, len(m.request_history)) self._add_get_nas_server_id_response_failure( m, self.NAS_SERVER_NAME ) id = self.client.get_nas_server_id(self.NAS_SERVER_NAME) self.assertIsNone(id) def _add_get_nas_server_id_response_failure(self, m, nas_server): url = "{0}/nas_server?name=eq.{1}".format( self._mock_url, nas_server ) m.get(url, status_code=400) @requests_mock.mock() def test_get_nas_server_interfaces(self, m): self.assertEqual(0, len(m.request_history)) self._add_get_nas_server_interfaces_response( m, self.NAS_SERVER_ID, self._getJsonFile("get_nas_server_interfaces_response.json") ) interfaces = self.client.get_nas_server_interfaces(self.NAS_SERVER_ID) self.assertEqual(interfaces[0]['ip'], self.NAS_SERVER_IP) self.assertEqual(interfaces[0]['preferred'], True) def _add_get_nas_server_interfaces_response(self, m, nas_server_id, json_str): url = "{0}/nas_server/{1}?select=" \ "current_preferred_IPv4_interface_id," \ "current_preferred_IPv6_interface_id," \ "file_interfaces(id,ip_address)".format( self._mock_url, nas_server_id ) m.get(url, status_code=200, json=json_str) @requests_mock.mock() def test_get_nas_server_interfaces_failure(self, m): self.assertEqual(0, len(m.request_history)) self._add_get_nas_server_interfaces_response_failure( m, self.NAS_SERVER_ID ) interfaces = self.client.get_nas_server_interfaces(self.NAS_SERVER_ID) self.assertIsNone(interfaces) def _add_get_nas_server_interfaces_response_failure(self, m, nas_server_id): url = "{0}/nas_server/{1}?select=" \ "current_preferred_IPv4_interface_id," \ "current_preferred_IPv6_interface_id," \ "file_interfaces(id,ip_address)".format( self._mock_url, nas_server_id ) m.get(url, status_code=400) @requests_mock.mock() def test_create_filesystem(self, m): self.assertEqual(0, len(m.request_history)) self._add_create_filesystem_response( m, self._getJsonFile("create_filesystem_response.json") ) id = self.client.create_filesystem( self.NAS_SERVER_ID, self.NFS_EXPORT_NAME, self.NFS_EXPORT_SIZE ) self.assertEqual(id, self.FILESYSTEM_ID) def _add_create_filesystem_response(self, m, json_str): url = "{0}/file_system".format(self._mock_url) m.post(url, status_code=201, json=json_str) @requests_mock.mock() def test_create_filesystem_failure(self, m): self.assertEqual(0, len(m.request_history)) self._add_create_filesystem_response_failure(m) id = self.client.create_filesystem( self.NAS_SERVER_ID, self.NFS_EXPORT_NAME, self.NFS_EXPORT_SIZE ) self.assertIsNone(id) def _add_create_filesystem_response_failure(self, m): url = "{0}/file_system".format(self._mock_url) m.post(url, status_code=400) @requests_mock.mock() def test_create_nfs_export(self, m): self.assertEqual(0, len(m.request_history)) self._add_create_nfs_export_response( m, self._getJsonFile("create_nfs_export_response.json") ) id = self.client.create_nfs_export(self.FILESYSTEM_ID, self.NFS_EXPORT_NAME) self.assertEqual(id, self.NFS_EXPORT_ID) def _add_create_nfs_export_response(self, m, json_str): url = "{0}/nfs_export".format(self._mock_url) m.post(url, status_code=201, json=json_str) @requests_mock.mock() def test_create_nfs_export_failure(self, m): self.assertEqual(0, len(m.request_history)) self._add_create_nfs_export_response_failure(m) id = self.client.create_nfs_export(self.FILESYSTEM_ID, self.NFS_EXPORT_NAME) self.assertIsNone(id) def _add_create_nfs_export_response_failure(self, m): url = "{0}/nfs_export".format(self._mock_url) m.post(url, status_code=400) @requests_mock.mock() def test_delete_filesystem(self, m): self.assertEqual(0, len(m.request_history)) self._add_delete_filesystem_response(m, self.FILESYSTEM_ID) result = self.client.delete_filesystem(self.FILESYSTEM_ID) self.assertEqual(result, True) def _add_delete_filesystem_response(self, m, filesystem_id): url = "{0}/file_system/{1}".format( self._mock_url, filesystem_id ) m.delete(url, status_code=204) @requests_mock.mock() def test_get_nfs_export_name(self, m): self.assertEqual(0, len(m.request_history)) self._add_get_nfs_export_name_response( m, self.NFS_EXPORT_ID, self._getJsonFile("get_nfs_export_name_response.json"), ) name = self.client.get_nfs_export_name(self.NFS_EXPORT_ID) self.assertEqual(name, self.NFS_EXPORT_NAME) def _add_get_nfs_export_name_response(self, m, export_id, json_str): url = "{0}/nfs_export/{1}?select=name".format( self._mock_url, export_id ) m.get(url, status_code=200, json=json_str) @requests_mock.mock() def test_get_nfs_export_name_failure(self, m): self.assertEqual(0, len(m.request_history)) self._add_get_nfs_export_name_response_failure(m, self.NFS_EXPORT_ID) name = self.client.get_nfs_export_name(self.NFS_EXPORT_ID) self.assertIsNone(name) def _add_get_nfs_export_name_response_failure(self, m, export_id): url = "{0}/nfs_export/{1}?select=name".format( self._mock_url, export_id ) m.get(url, status_code=400) @requests_mock.mock() def test_get_nfs_export_id(self, m): self.assertEqual(0, len(m.request_history)) self._add_get_nfs_export_id_response( m, self.NFS_EXPORT_NAME, self._getJsonFile("get_nfs_export_id_response.json") ) id = self.client.get_nfs_export_id(self.NFS_EXPORT_NAME) self.assertEqual(id, self.NFS_EXPORT_ID) def _add_get_nfs_export_id_response(self, m, name, json_str): url = "{0}/nfs_export?select=id&name=eq.{1}".format( self._mock_url, name ) m.get(url, status_code=200, json=json_str) @requests_mock.mock() def test_get_nfs_export_id_failure(self, m): self.assertEqual(0, len(m.request_history)) self._add_get_nfs_export_id_response_failure(m, self.NFS_EXPORT_NAME) id = self.client.get_nfs_export_id(self.NFS_EXPORT_NAME) self.assertIsNone(id) def _add_get_nfs_export_id_response_failure(self, m, name): url = "{0}/nfs_export?select=id&name=eq.{1}".format( self._mock_url, name ) m.get(url, status_code=400) @requests_mock.mock() def test_get_filesystem_id(self, m): self.assertEqual(0, len(m.request_history)) self._add_get_filesystem_id_response( m, self.NFS_EXPORT_NAME, self._getJsonFile("get_fileystem_id_response.json") ) id = self.client.get_filesystem_id(self.NFS_EXPORT_NAME) self.assertEqual(id, self.FILESYSTEM_ID) def _add_get_filesystem_id_response(self, m, name, json_str): url = "{0}/file_system?name=eq.{1}".format( self._mock_url, name ) m.get(url, status_code=200, json=json_str) @requests_mock.mock() def test_get_filesystem_id_failure(self, m): self.assertEqual(0, len(m.request_history)) self._add_get_filesystem_id_response_failure(m, self.NFS_EXPORT_NAME) id = self.client.get_filesystem_id(self.NFS_EXPORT_NAME) self.assertIsNone(id) def _add_get_filesystem_id_response_failure(self, m, name): url = "{0}/file_system?name=eq.{1}".format( self._mock_url, name ) m.get(url, status_code=400) @requests_mock.mock() def test_set_export_access(self, m): self.assertEqual(0, len(m.request_history)) self._add_set_export_access_response(m, self.NFS_EXPORT_ID) result = self.client.set_export_access(self.NFS_EXPORT_ID, self.RW_HOSTS, self.RO_HOSTS) self.assertEqual(result, True) def _add_set_export_access_response(self, m, export_id): url = "{0}/nfs_export/{1}".format(self._mock_url, export_id) m.patch(url, status_code=204) @requests_mock.mock() def test_resize_filesystem(self, m): self.assertEqual(0, len(m.request_history)) self._add_resize_filesystem_response(m, self.FILESYSTEM_ID) result, detail = self.client.resize_filesystem( self.FILESYSTEM_ID, self.NFS_EXPORT_NEW_SIZE) self.assertTrue(result) self.assertIsNone(detail) def _add_resize_filesystem_response(self, m, filesystem_id): url = "{0}/file_system/{1}".format( self._mock_url, filesystem_id ) m.patch(url, status_code=204) @requests_mock.mock() def test_resize_filesystem_shrink_failure(self, m): self.assertEqual(0, len(m.request_history)) self._add_resize_filesystem_shrink_failure_response( m, self.FILESYSTEM_ID, self._getJsonFile( "resize_filesystem_shrink_failure_response.json")) result, detail = self.client.resize_filesystem( self.FILESYSTEM_ID, self.NFS_EXPORT_NEW_SIZE) self.assertFalse(result) self.assertIsNotNone(detail) def _add_resize_filesystem_shrink_failure_response( self, m, filesystem_id, json_str): url = "{0}/file_system/{1}".format( self._mock_url, filesystem_id ) m.patch(url, status_code=422, json=json_str) @requests_mock.mock() def test_get_fsid_from_export_name(self, m): self.assertEqual(0, len(m.request_history)) self._add_get_fsid_from_export_name_response( m, self.NFS_EXPORT_NAME, self._getJsonFile("get_fsid_from_export_name_response.json") ) id = self.client.get_fsid_from_export_name(self.NFS_EXPORT_NAME) self.assertEqual(id, self.FILESYSTEM_ID) def _add_get_fsid_from_export_name_response(self, m, name, json_str): url = "{0}/nfs_export?select=file_system_id&name=eq.{1}".format( self._mock_url, name ) m.get(url, status_code=200, json=json_str) @requests_mock.mock() def test_get_fsid_from_export_name_failure(self, m): self.assertEqual(0, len(m.request_history)) self._add_get_fsid_from_export_name_response_failure( m, self.NFS_EXPORT_NAME ) id = self.client.get_fsid_from_export_name(self.NFS_EXPORT_NAME) self.assertIsNone(id) def _add_get_fsid_from_export_name_response_failure(self, m, name): url = "{0}/nfs_export?select=file_system_id&name=eq.{1}".format( self._mock_url, name ) m.get(url, status_code=400) @requests_mock.mock() def test_create_snapshot(self, m): self.assertEqual(0, len(m.request_history)) self._add_create_snapshot_response( m, self.FILESYSTEM_ID, self._getJsonFile("create_snapshot_response.json") ) id = self.client.create_snapshot(self.FILESYSTEM_ID, self.SNAPSHOT_NAME) self.assertEqual(id, self.SNAPSHOT_ID) def _add_create_snapshot_response(self, m, filesystem_id, json_str): url = "{0}/file_system/{1}/snapshot".format(self._mock_url, filesystem_id) m.post(url, status_code=201, json=json_str) @requests_mock.mock() def test_create_snapshot_failure(self, m): self.assertEqual(0, len(m.request_history)) self._add_create_snapshot_response_failure(m, self.FILESYSTEM_ID) id = self.client.create_snapshot(self.FILESYSTEM_ID, self.SNAPSHOT_NAME) self.assertIsNone(id) def _add_create_snapshot_response_failure(self, m, filesystem_id): url = "{0}/file_system/{1}/snapshot".format(self._mock_url, filesystem_id) m.post(url, status_code=400) @requests_mock.mock() def test_restore_snapshot(self, m): self.assertEqual(0, len(m.request_history)) self._add_restore_snapshot_response( m, self.SNAPSHOT_ID ) result = self.client.restore_snapshot(self.SNAPSHOT_ID) self.assertEqual(result, True) def _add_restore_snapshot_response(self, m, snapshot_id): url = "{0}/file_system/{1}/restore".format(self._mock_url, snapshot_id) m.post(url, status_code=204) @requests_mock.mock() def test_restore_snapshot_failure(self, m): self.assertEqual(0, len(m.request_history)) self._add_restore_snapshot_response_failure( m, self.SNAPSHOT_ID ) result = self.client.restore_snapshot(self.SNAPSHOT_ID) self.assertEqual(result, False) def _add_restore_snapshot_response_failure(self, m, snapshot_id): url = "{0}/file_system/{1}/restore".format(self._mock_url, snapshot_id) m.post(url, status_code=400) @requests_mock.mock() def test_clone_snapshot(self, m): self.assertEqual(0, len(m.request_history)) self._add_clone_snapshot_response( m, self.SNAPSHOT_ID, self._getJsonFile("clone_snapshot_response.json") ) id = self.client.clone_snapshot(self.SNAPSHOT_ID, self.CLONE_NAME) self.assertEqual(id, self.CLONE_ID) def _add_clone_snapshot_response(self, m, snapshot_id, json_str): url = "{0}/file_system/{1}/clone".format(self._mock_url, snapshot_id) m.post(url, status_code=201, json=json_str) @requests_mock.mock() def test_clone_snapshot_failure(self, m): self.assertEqual(0, len(m.request_history)) self._add_clone_snapshot_response_failure( m, self.SNAPSHOT_ID ) id = self.client.clone_snapshot(self.SNAPSHOT_ID, self.CLONE_NAME) self.assertIsNone(id) def _add_clone_snapshot_response_failure(self, m, snapshot_id): url = "{0}/file_system/{1}/clone".format(self._mock_url, snapshot_id) m.post(url, status_code=400) @requests_mock.mock() def test_get_cluster_id(self, m): self.assertEqual(0, len(m.request_history)) self._add_get_cluster_id_response( m, self._getJsonFile("get_cluster_id_response.json") ) id = self.client.get_cluster_id() self.assertEqual(id, self.CLUSTER_ID) def _add_get_cluster_id_response(self, m, json_str): url = "{0}/cluster".format(self._mock_url) m.get(url, status_code=200, json=json_str) @requests_mock.mock() def test_get_cluster_id_failure(self, m): self.assertEqual(0, len(m.request_history)) self._add_get_cluster_id_response_failure(m) id = self.client.get_cluster_id() self.assertIsNone(id) def _add_get_cluster_id_response_failure(self, m): url = "{0}/cluster".format(self._mock_url) m.get(url, status_code=400) @requests_mock.mock() def test_retreive_cluster_capacity_metrics(self, m): self.assertEqual(0, len(m.request_history)) self._add_retreive_cluster_capacity_metrics_response( m, self._getJsonFile( "retreive_cluster_capacity_metrics_response.json") ) total, used = self.client.retreive_cluster_capacity_metrics( self.CLUSTER_ID) self.assertEqual(total, 47345047046144) self.assertEqual(used, 366003363027) def _add_retreive_cluster_capacity_metrics_response(self, m, json_str): url = "{0}/metrics/generate?order=timestamp".format(self._mock_url) m.post(url, status_code=200, json=json_str) @requests_mock.mock() def test_retreive_cluster_capacity_metrics_failure(self, m): self.assertEqual(0, len(m.request_history)) self._add_retreive_cluster_capacity_metrics_response_failure(m) total, used = self.client.retreive_cluster_capacity_metrics( self.CLUSTER_ID) self.assertIsNone(total) self.assertIsNone(used) def _add_retreive_cluster_capacity_metrics_response_failure(self, m): url = "{0}/metrics/generate?order=timestamp".format(self._mock_url) m.post(url, status_code=400) @requests_mock.mock() def test_create_smb_share(self, m): self.assertEqual(0, len(m.request_history)) self._add_create_smb_share_response( m, self._getJsonFile("create_smb_share_response.json") ) id = self.client.create_smb_share(self.FILESYSTEM_ID, self.SMB_SHARE_NAME) self.assertEqual(id, self.SMB_SHARE_ID) def _add_create_smb_share_response(self, m, json_str): url = "{0}/smb_share".format(self._mock_url) m.post(url, status_code=201, json=json_str) @requests_mock.mock() def test_create_smb_share_failure(self, m): self.assertEqual(0, len(m.request_history)) self._add_create_smb_share_response_failure(m) id = self.client.create_smb_share(self.FILESYSTEM_ID, self.SMB_SHARE_NAME) self.assertIsNone(id) def _add_create_smb_share_response_failure(self, m): url = "{0}/smb_share".format(self._mock_url) m.post(url, status_code=400) @requests_mock.mock() def test_get_fsid_from_share_name(self, m): self.assertEqual(0, len(m.request_history)) self._add_get_fsid_from_share_name_response( m, self.NFS_EXPORT_NAME, self._getJsonFile("get_fsid_from_share_name_response.json") ) id = self.client.get_fsid_from_share_name(self.NFS_EXPORT_NAME) self.assertEqual(id, self.FILESYSTEM_ID) def _add_get_fsid_from_share_name_response(self, m, name, json_str): url = "{0}/smb_share?select=file_system_id&name=eq.{1}".format( self._mock_url, name ) m.get(url, status_code=200, json=json_str) @requests_mock.mock() def test_get_fsid_from_share_name_failure(self, m): self.assertEqual(0, len(m.request_history)) self._add_get_fsid_from_share_name_response_failure( m, self.SMB_SHARE_NAME ) id = self.client.get_fsid_from_share_name(self.SMB_SHARE_NAME) self.assertIsNone(id) def _add_get_fsid_from_share_name_response_failure(self, m, name): url = "{0}/smb_share?select=file_system_id&name=eq.{1}".format( self._mock_url, name ) m.get(url, status_code=400) @requests_mock.mock() def test_get_smb_share_id(self, m): self.assertEqual(0, len(m.request_history)) self._add_get_smb_share_id_response( m, self.SMB_SHARE_NAME, self._getJsonFile("get_smb_share_id_response.json") ) id = self.client.get_smb_share_id(self.SMB_SHARE_NAME) self.assertEqual(id, self.SMB_SHARE_ID) def _add_get_smb_share_id_response(self, m, name, json_str): url = "{0}/smb_share?select=id&name=eq.{1}".format( self._mock_url, name ) m.get(url, status_code=200, json=json_str) @requests_mock.mock() def test_get_smb_share_id_failure(self, m): self.assertEqual(0, len(m.request_history)) self._add_get_smb_share_id_response_failure( m, self.SMB_SHARE_NAME ) id = self.client.get_smb_share_id(self.SMB_SHARE_NAME) self.assertIsNone(id) def _add_get_smb_share_id_response_failure(self, m, name): url = "{0}/smb_share?select=id&name=eq.{1}".format( self._mock_url, name ) m.get(url, status_code=400) @requests_mock.mock() def test_get_nas_server_smb_netbios(self, m): self.assertEqual(0, len(m.request_history)) self._add_get_nas_server_smb_netbios_response( m, self.SMB_SHARE_NAME, self._getJsonFile("get_nas_server_smb_netbios_response.json") ) id = self.client.get_nas_server_smb_netbios(self.SMB_SHARE_NAME) self.assertEqual(id, "OPENSTACK") def _add_get_nas_server_smb_netbios_response(self, m, name, json_str): url = "{0}/nas_server?select=smb_servers" \ "(is_standalone,netbios_name)&name=eq.{1}".format( self._mock_url, name ) m.get(url, status_code=200, json=json_str) @requests_mock.mock() def test_get_nas_server_smb_netbios_failure(self, m): self.assertEqual(0, len(m.request_history)) self._add_get_nas_server_smb_netbios_response_failure( m, self.SMB_SHARE_NAME ) id = self.client.get_nas_server_smb_netbios(self.SMB_SHARE_NAME) self.assertIsNone(id) def _add_get_nas_server_smb_netbios_response_failure(self, m, name): url = "{0}/nas_server?select=smb_servers" \ "(is_standalone,netbios_name)&name=eq.{1}".format( self._mock_url, name ) m.get(url, status_code=400) @requests_mock.mock() def test_set_acl(self, m): self.assertEqual(0, len(m.request_history)) self._add_set_acl_response(m, self.SMB_SHARE_ID) result = self.client.set_acl(self.SMB_SHARE_ID, self.RW_USERS, self.RO_USERS) self.assertEqual(result, True) def _add_set_acl_response(self, m, share_id): url = "{0}/smb_share/{1}/set_acl".format(self._mock_url, share_id) m.post(url, status_code=204) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/dell_emc/plugins/powerstore/test_connection.py0000664000175000017500000010571400000000000031457 0ustar00zuulzuul00000000000000# Copyright (c) 2023 Dell Inc. or its subsidiaries. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from oslo_log import log from oslo_utils import units from manila.common import constants as const from manila import exception from manila.share.drivers.dell_emc.plugins.powerstore import connection from manila import test LOG = log.getLogger(__name__) class PowerStoreTest(test.TestCase): """Unit test for the PowerStore Manila driver.""" REST_IP = "192.168.0.110" NAS_SERVER_NAME = "powerstore-nasserver" NAS_SERVER_ID = "6423d56e-eaf3-7424-be0b-1a9efb93188b" NAS_SERVER_IP = "192.168.11.23" SHARE_NAME = "powerstore-share" SHARE_SIZE_GB = 3 SHARE_NEW_SIZE_GB = 6 FILESYSTEM_ID = "6454e9a9-a698-e9bc-ca61-1a9efb93188b" NFS_EXPORT_ID = "6454ec18-7b8d-1532-1b8a-1a9efb93188b" SMB_SHARE_ID = "64927ae9-3403-6930-a784-f227b9987c54" RW_HOSTS = "192.168.1.10" RO_HOSTS = "192.168.1.11" RW_USERS = "user_1" RO_USERS = "user_2" SNAPSHOT_NAME = "powerstore-share-snap" SNAPSHOT_ID = "6454ea29-09c3-030e-cfc3-1a9efb93188b" CLONE_ID = "64560f05-e677-ec2a-7fcf-1a9efb93188b" CLONE_NAME = "powerstore-nfs-share-snap-clone" class MockConfig(object): def safe_get(self, value): if value == "dell_nas_backend_host": return "192.168.0.110" elif value == "dell_nas_login": return "admin" elif value == "dell_nas_password": return "pwd" elif value == "dell_nas_server": return "powerstore-nasserver" elif value == "dell_ad_domain": return "domain_name" elif value == "dell_ssl_cert_verify": return True elif value == "dell_ssl_cert_path": return "powerstore_cert_path" @mock.patch( "manila.share.drivers.dell_emc.plugins.powerstore.client." "PowerStoreClient", autospec=True, ) def setUp(self, mock_powerstore_client): super(PowerStoreTest, self).setUp() self._mock_powerstore_client = mock_powerstore_client.return_value self.storage_connection = connection.PowerStoreStorageConnection(LOG) self.mock_context = mock.Mock("Context") self.mock_emc_driver = mock.Mock("EmcDriver") self._mock_config = self.MockConfig() self.mock_emc_driver.attach_mock(self._mock_config, "configuration") self.storage_connection.connect( self.mock_emc_driver, self.mock_context ) def test_connect(self): storage_connection = connection.PowerStoreStorageConnection(LOG) # execute method under test storage_connection.connect(self.mock_emc_driver, self.mock_context) # verify connect sets driver params appropriately mock_config = self.MockConfig() server_addr = mock_config.safe_get("dell_nas_backend_host") self.assertEqual(server_addr, storage_connection.rest_ip) expected_username = mock_config.safe_get("dell_nas_login") self.assertEqual(expected_username, storage_connection.rest_username) expected_password = mock_config.safe_get("dell_nas_password") self.assertEqual(expected_password, storage_connection.rest_password) expected_nas_server = mock_config.safe_get("dell_nas_server") self.assertEqual(expected_nas_server, storage_connection.nas_server) expected_ad_domain = mock_config.safe_get("dell_ad_domain") self.assertEqual(expected_ad_domain, storage_connection.ad_domain) expected_verify_certificate = mock_config.safe_get( "dell_ssl_cert_verify" ) self.assertEqual( expected_verify_certificate, storage_connection.verify_certificate ) def test_create_share_nfs(self): self._mock_powerstore_client.get_nas_server_id.return_value = ( self.NAS_SERVER_ID ) self._mock_powerstore_client.create_filesystem.return_value = ( self.FILESYSTEM_ID ) self._mock_powerstore_client.create_nfs_export.return_value = ( self.NFS_EXPORT_ID ) self._mock_powerstore_client.get_nas_server_interfaces.return_value = ( [{"ip": self.NAS_SERVER_IP, "preferred": True}] ) self.assertFalse(self._mock_powerstore_client.get_nas_server_id.called) self.assertFalse(self._mock_powerstore_client.create_filesystem.called) self.assertFalse(self._mock_powerstore_client.create_nfs_export.called) self.assertFalse( self._mock_powerstore_client.get_nas_server_interfaces.called ) # create the share share = {"name": self.SHARE_NAME, "share_proto": "NFS", "size": self.SHARE_SIZE_GB} locations = self.storage_connection.create_share( self.mock_context, share, None ) # verify location and API call made expected_locations = [ {"path": "%s:/%s" % ( self.NAS_SERVER_IP, self.SHARE_NAME, ), "metadata": { "preferred": True}}] self.assertEqual(expected_locations, locations) self._mock_powerstore_client.get_nas_server_id.assert_called_with( self._mock_config.safe_get("dell_nas_server") ) self._mock_powerstore_client.create_filesystem.assert_called_with( self.NAS_SERVER_ID, self.SHARE_NAME, self.SHARE_SIZE_GB * units.Gi, ) self._mock_powerstore_client.create_nfs_export.assert_called_with( self.FILESYSTEM_ID, self.SHARE_NAME ) self._mock_powerstore_client.get_nas_server_interfaces. \ assert_called_with( self.NAS_SERVER_ID ) def test_create_share_cifs(self): self._mock_powerstore_client.get_nas_server_id.return_value = ( self.NAS_SERVER_ID ) self._mock_powerstore_client.create_filesystem.return_value = ( self.FILESYSTEM_ID ) self._mock_powerstore_client.create_smb_share.return_value = ( self.SMB_SHARE_ID ) self._mock_powerstore_client.get_nas_server_interfaces.return_value = ( [{"ip": self.NAS_SERVER_IP, "preferred": True}] ) self.assertFalse(self._mock_powerstore_client.get_nas_server_id.called) self.assertFalse(self._mock_powerstore_client.create_filesystem.called) self.assertFalse(self._mock_powerstore_client.create_smb_share.called) self.assertFalse( self._mock_powerstore_client.get_nas_server_interfaces.called ) # create the share share = {"name": self.SHARE_NAME, "share_proto": "CIFS", "size": self.SHARE_SIZE_GB} locations = self.storage_connection.create_share( self.mock_context, share, None ) # verify location and API call made expected_locations = [ {"path": "\\\\%s\\%s" % ( self.NAS_SERVER_IP, self.SHARE_NAME), "metadata": { "preferred": True}}] self.assertEqual(expected_locations, locations) self._mock_powerstore_client.get_nas_server_id.assert_called_with( self._mock_config.safe_get("dell_nas_server") ) self._mock_powerstore_client.create_filesystem.assert_called_with( self.NAS_SERVER_ID, self.SHARE_NAME, self.SHARE_SIZE_GB * units.Gi, ) self._mock_powerstore_client.create_smb_share.assert_called_with( self.FILESYSTEM_ID, self.SHARE_NAME ) self._mock_powerstore_client.get_nas_server_interfaces. \ assert_called_with( self.NAS_SERVER_ID ) def test_create_share_filesystem_id_not_found(self): share = {"name": self.SHARE_NAME, "share_proto": "NFS", "size": self.SHARE_SIZE_GB} self._mock_powerstore_client.create_filesystem.return_value = None self.assertRaises( exception.ShareBackendException, self.storage_connection.create_share, self.mock_context, share, share_server=None ) def test_create_share_nfs_backend_failure(self): share = {"name": self.SHARE_NAME, "share_proto": "NFS", "size": self.SHARE_SIZE_GB} self._mock_powerstore_client.create_nfs_export.return_value = None self.assertRaises( exception.ShareBackendException, self.storage_connection.create_share, self.mock_context, share, share_server=None ) def test_create_share_cifs_backend_failure(self): share = {"name": self.SHARE_NAME, "share_proto": "CIFS", "size": self.SHARE_SIZE_GB} self._mock_powerstore_client.create_smb_share.return_value = None self.assertRaises( exception.ShareBackendException, self.storage_connection.create_share, self.mock_context, share, share_server=None ) def test_delete_share_nfs(self): share = {"name": self.SHARE_NAME, "share_proto": "NFS"} self._mock_powerstore_client.get_filesystem_id.return_value = ( self.FILESYSTEM_ID ) self.assertFalse(self._mock_powerstore_client.get_filesystem_id.called) self.assertFalse(self._mock_powerstore_client.delete_filesystem.called) # delete the share self.storage_connection.delete_share(self.mock_context, share, None) # verify share delete self._mock_powerstore_client.get_filesystem_id.assert_called_with( self.SHARE_NAME ) self._mock_powerstore_client.delete_filesystem.assert_called_with( self.FILESYSTEM_ID ) def test_delete_nfs_share_backend_failure(self): share = {"name": self.SHARE_NAME, "share_proto": "NFS"} self._mock_powerstore_client.delete_filesystem.return_value = False self.assertRaises( exception.ShareBackendException, self.storage_connection.delete_share, self.mock_context, share, None, ) def test_delete_nfs_share_share_does_not_exist(self): self._mock_powerstore_client.get_filesystem_id.return_value = None share = {"name": self.SHARE_NAME, "share_proto": "NFS"} # verify the calling delete on a non-existent share returns and does # not throw exception self.storage_connection.delete_share(self.mock_context, share, None) self.assertTrue(self._mock_powerstore_client.get_filesystem_id.called) self.assertFalse(self._mock_powerstore_client.delete_filesystem.called) def test_extend_share(self): share = { "name": self.SHARE_NAME, "share_proto": "NFS", "size": self.SHARE_NEW_SIZE_GB, } self._mock_powerstore_client.get_filesystem_id.return_value = ( self.FILESYSTEM_ID ) self._mock_powerstore_client.resize_filesystem.return_value = ( True, None ) self.assertFalse(self._mock_powerstore_client.get_filesystem_id.called) self.storage_connection.extend_share(share, self.SHARE_NEW_SIZE_GB, self.NAS_SERVER_NAME) self._mock_powerstore_client.get_filesystem_id.assert_called_with( self.SHARE_NAME ) expected_quota_size = self.SHARE_NEW_SIZE_GB * units.Gi self._mock_powerstore_client.resize_filesystem.assert_called_once_with( self.FILESYSTEM_ID, expected_quota_size ) def test_shrink_share(self): share = { "name": self.SHARE_NAME, "share_proto": "NFS", "size": self.SHARE_SIZE_GB, } self._mock_powerstore_client.get_filesystem_id.return_value = ( self.FILESYSTEM_ID ) self._mock_powerstore_client.resize_filesystem.return_value = ( True, None ) self.assertFalse(self._mock_powerstore_client.get_filesystem_id.called) self.storage_connection.shrink_share(share, self.SHARE_NEW_SIZE_GB, self.NAS_SERVER_NAME) self._mock_powerstore_client.get_filesystem_id.assert_called_with( self.SHARE_NAME ) expected_quota_size = self.SHARE_NEW_SIZE_GB * units.Gi self._mock_powerstore_client.resize_filesystem.assert_called_once_with( self.FILESYSTEM_ID, expected_quota_size ) def test_shrink_share_failure(self): share = { "name": self.SHARE_NAME, "share_proto": "NFS", "size": self.SHARE_SIZE_GB, "id": self.CLONE_ID } self._mock_powerstore_client.get_filesystem_id.return_value = ( self.FILESYSTEM_ID ) self._mock_powerstore_client.resize_filesystem.return_value = ( False, "msg" ) self.assertRaises( exception.ShareShrinkingPossibleDataLoss, self.storage_connection.shrink_share, share, self.SHARE_NEW_SIZE_GB, self.NAS_SERVER_NAME ) def test_shrink_share_backend_failure(self): share = { "name": self.SHARE_NAME, "share_proto": "NFS", "size": self.SHARE_SIZE_GB, } self._mock_powerstore_client.get_filesystem_id.return_value = ( self.FILESYSTEM_ID ) self._mock_powerstore_client.resize_filesystem.return_value = ( False, None ) self.assertRaises( exception.ShareBackendException, self.storage_connection.shrink_share, share, self.SHARE_NEW_SIZE_GB, self.NAS_SERVER_NAME ) def test_update_access_add_nfs(self): share = {"name": self.SHARE_NAME, "share_proto": "NFS"} self._mock_powerstore_client.get_nfs_export_id.return_value = ( self.NFS_EXPORT_ID ) self._mock_powerstore_client.set_export_access.return_value = True self.assertFalse(self._mock_powerstore_client.get_nfs_export_id.called) self.assertFalse(self._mock_powerstore_client.set_export_access.called) nfs_access_rw = { "access_type": "ip", "access_to": self.RW_HOSTS, "access_level": const.ACCESS_LEVEL_RW, "access_id": "09960614-8574-4e03-89cf-7cf267b0bd08", } nfs_access_ro = { "access_type": "ip", "access_to": self.RO_HOSTS, "access_level": const.ACCESS_LEVEL_RO, "access_id": "09960614-8574-4e03-89cf-7cf267b0bd08", } access_rules = [nfs_access_rw, nfs_access_ro] self.storage_connection.update_access( self.mock_context, share, access_rules, add_rules=None, delete_rules=None, share_server=None, ) self._mock_powerstore_client.get_nfs_export_id.assert_called_once_with( self.SHARE_NAME ) self._mock_powerstore_client.set_export_access.assert_called_once_with( self.NFS_EXPORT_ID, {self.RW_HOSTS}, {self.RO_HOSTS} ) def test_update_access_add_cifs(self): share = {"name": self.SHARE_NAME, "share_proto": "CIFS"} self._mock_powerstore_client.get_smb_share_id.return_value = ( self.SMB_SHARE_ID ) self._mock_powerstore_client.set_acl.return_value = True self.assertFalse(self._mock_powerstore_client.get_smb_share_id.called) self.assertFalse(self._mock_powerstore_client.set_acl.called) cifs_access_rw = { "access_type": "user", "access_to": self.RW_USERS, "access_level": const.ACCESS_LEVEL_RW, "access_id": "09960614-8574-4e03-89cf-7cf267b0bd08", } cifs_access_ro = { "access_type": "user", "access_to": self.RO_USERS, "access_level": const.ACCESS_LEVEL_RO, "access_id": "09960614-8574-4e03-89cf-7cf267b0bd08", } access_rules = [cifs_access_rw, cifs_access_ro] self.storage_connection.update_access( self.mock_context, share, access_rules, add_rules=None, delete_rules=None, share_server=None, ) self._mock_powerstore_client.get_smb_share_id.assert_called_once_with( self.SHARE_NAME ) self._mock_powerstore_client.set_acl.assert_called_once_with( self.SMB_SHARE_ID, {'domain_name\\user_1'}, {'domain_name\\user_2'} ) def test_update_access_invalid_prefix(self): share = {"name": self.SHARE_NAME, "share_proto": "CIFS"} self._mock_powerstore_client.get_smb_share_id.return_value = ( self.SMB_SHARE_ID ) self._mock_powerstore_client.get_nas_server_smb_netbios. \ return_value = None self.assertFalse(self._mock_powerstore_client.get_smb_share_id.called) self.assertFalse(self._mock_powerstore_client.set_acl.called) cifs_access_rw = { "access_type": "user", "access_to": self.RW_USERS, "access_level": const.ACCESS_LEVEL_RW, "access_id": "09960614-8574-4e03-89cf-7cf267b0bd08", } cifs_access_ro = { "access_type": "user", "access_to": self.RO_USERS, "access_level": const.ACCESS_LEVEL_RO, "access_id": "09960614-8574-4e03-89cf-7cf267b0bd08", } access_rules = [cifs_access_rw, cifs_access_ro] self.storage_connection.ad_domain = None access_updates = self.storage_connection.update_access( self.mock_context, share, access_rules, add_rules=None, delete_rules=None, share_server=None, ) self._mock_powerstore_client.set_acl.assert_called_once_with( self.SMB_SHARE_ID, set(), set() ) self.assertIsNotNone(access_updates) def test_update_access_add_nfs_invalid_acess_type(self): share = { "name": self.SHARE_NAME, "share_proto": "NFS", "display_name": "foo_display_name", } nfs_access_rw = { "access_type": "invalid_type", "access_to": self.RW_HOSTS, "access_level": const.ACCESS_LEVEL_RW, "access_id": "09960614-8574-4e03-89cf-7cf267b0bd08", } nfs_access_ro = { "access_type": "invalid_type", "access_to": self.RO_HOSTS, "access_level": const.ACCESS_LEVEL_RO, "access_id": "09960614-8574-4e03-89cf-7cf267b0bd09", } access_rules = [nfs_access_rw, nfs_access_ro] self._mock_powerstore_client.get_nfs_export_id.return_value = ( self.NFS_EXPORT_ID ) access_updates = self.storage_connection.update_access( self.mock_context, share, access_rules, add_rules=None, delete_rules=None, share_server=None, ) self._mock_powerstore_client.set_export_access.assert_called_once_with( self.NFS_EXPORT_ID, set(), set() ) self.assertIsNotNone(access_updates) def test_update_access_add_cifs_invalid_acess_type(self): share = { "name": self.SHARE_NAME, "share_proto": "CIFS", "display_name": "foo_display_name", } cifs_access_rw = { "access_type": "invalid_type", "access_to": self.RW_USERS, "access_level": const.ACCESS_LEVEL_RW, "access_id": "09960614-8574-4e03-89cf-7cf267b0bd08", } cifs_access_ro = { "access_type": "invalid_type", "access_to": self.RO_USERS, "access_level": const.ACCESS_LEVEL_RO, "access_id": "09960614-8574-4e03-89cf-7cf267b0bd09", } access_rules = [cifs_access_rw, cifs_access_ro] self._mock_powerstore_client.get_smb_share_id.return_value = ( self.SMB_SHARE_ID ) access_updates = self.storage_connection.update_access( self.mock_context, share, access_rules, add_rules=None, delete_rules=None, share_server=None, ) self._mock_powerstore_client.set_acl.assert_called_once_with( self.SMB_SHARE_ID, set(), set() ) self.assertIsNotNone(access_updates) def test_update_access_add_nfs_backend_failure(self): share = { "name": self.SHARE_NAME, "share_proto": "NFS", "display_name": "foo_display_name", } self._mock_powerstore_client.get_nfs_export_id.return_value = ( self.NFS_EXPORT_ID ) self._mock_powerstore_client.set_export_access.return_value = False self.assertFalse(self._mock_powerstore_client.get_nfs_export_id.called) self.assertFalse(self._mock_powerstore_client.set_export_access.called) nfs_access_rw = { "access_type": "ip", "access_to": self.RW_HOSTS, "access_level": const.ACCESS_LEVEL_RW, "access_id": "09960614-8574-4e03-89cf-7cf267b0bd08", } nfs_access_ro = { "access_type": "ip", "access_to": self.RO_HOSTS, "access_level": const.ACCESS_LEVEL_RO, "access_id": "09960614-8574-4e03-89cf-7cf267b0bd08", } access_rules = [nfs_access_rw, nfs_access_ro] self.assertRaises( exception.ShareBackendException, self.storage_connection.update_access, self.mock_context, share, access_rules, add_rules=None, delete_rules=None, share_server=None, ) def test_update_access_add_cifs_backend_failure(self): share = { "name": self.SHARE_NAME, "share_proto": "CIFS", "display_name": "foo_display_name", } self._mock_powerstore_client.set_acl.return_value = False cifs_access_rw = { "access_type": "user", "access_to": self.RW_USERS, "access_level": const.ACCESS_LEVEL_RW, "access_id": "09960614-8574-4e03-89cf-7cf267b0bd08", } cifs_access_ro = { "access_type": "user", "access_to": self.RO_USERS, "access_level": const.ACCESS_LEVEL_RO, "access_id": "09960614-8574-4e03-89cf-7cf267b0bd08", } access_rules = [cifs_access_rw, cifs_access_ro] self.assertRaises( exception.ShareBackendException, self.storage_connection.update_access, self.mock_context, share, access_rules, add_rules=None, delete_rules=None, share_server=None, ) def test_allow_access(self): self.assertRaises( NotImplementedError, self.storage_connection.allow_access, self.mock_context, share=None, access=None, share_server=None, ) def test_deny_access(self): self.assertRaises( NotImplementedError, self.storage_connection.deny_access, self.mock_context, share=None, access=None, share_server=None, ) def test_update_share_stats(self): data = dict( share_backend_name='powerstore', vendor_name='Dell EMC', storage_protocol='NFS_CIFS', snapshot_support=True, create_share_from_snapshot_support=True) self._mock_powerstore_client.get_cluster_id.return_value = "0" self._mock_powerstore_client. \ retreive_cluster_capacity_metrics.return_value = \ 47345047046144, 366003363027 self.storage_connection.update_share_stats(data) self.assertEqual(data['storage_protocol'], 'NFS_CIFS') self.assertEqual(data['driver_version'], connection.VERSION) self.assertEqual(data['total_capacity_gb'], 44093) self.assertEqual(data['free_capacity_gb'], 43752) def test_update_share_stats_failure(self): data = dict( share_backend_name='powerstore', vendor_name='Dell EMC', storage_protocol='NFS_CIFS', snapshot_support=True, create_share_from_snapshot_support=True) self._mock_powerstore_client. \ retreive_cluster_capacity_metrics.return_value = \ None, None self.storage_connection.update_share_stats(data) self.assertIsNone(data.get('total_capacity_gb')) self.assertIsNone(data.get('free_capacity_gb')) def test_create_snapshot(self): self._mock_powerstore_client.get_filesystem_id.return_value = ( self.FILESYSTEM_ID ) self._mock_powerstore_client. \ create_snapshot.return_value = self.SNAPSHOT_ID snapshot = { "name": self.SNAPSHOT_NAME, "share_name": self.SHARE_NAME } self.storage_connection.create_snapshot( self.mock_context, snapshot, None ) self._mock_powerstore_client.get_filesystem_id. \ assert_called_with( self.SHARE_NAME ) self._mock_powerstore_client.create_snapshot.assert_called_with( self.FILESYSTEM_ID, self.SNAPSHOT_NAME ) def test_create_snapshot_invalid_filesystem_id(self): self._mock_powerstore_client.get_filesystem_id.return_value = ( None ) snapshot = { "name": self.SNAPSHOT_NAME, "share_name": self.SHARE_NAME } self.assertRaises( exception.ShareBackendException, self.storage_connection.create_snapshot, self.mock_context, snapshot, None ) def test_create_snapshot_backend_failure(self): self._mock_powerstore_client.get_filesystem_id.return_value = ( self.FILESYSTEM_ID ) self._mock_powerstore_client. \ create_snapshot.return_value = None snapshot = { "name": self.SNAPSHOT_NAME, "share_name": self.SHARE_NAME, "share": { "share_proto": "NFS" } } self.assertRaises( exception.ShareBackendException, self.storage_connection.create_snapshot, self.mock_context, snapshot, None ) def test_delete_snapshot(self): self._mock_powerstore_client.get_filesystem_id.return_value = ( self.SNAPSHOT_ID ) self._mock_powerstore_client.delete_filesystem.return_value = True snapshot = { "name": self.SNAPSHOT_NAME, "share_name": self.SHARE_NAME } self.storage_connection.delete_snapshot( self.mock_context, snapshot, None ) self._mock_powerstore_client.get_filesystem_id. \ assert_called_with( self.SNAPSHOT_NAME ) self._mock_powerstore_client.delete_filesystem.assert_called_with( self.SNAPSHOT_ID ) def test_delete_snapshot_backend_failure(self): self._mock_powerstore_client.get_filesystem_id.return_value = ( self.SNAPSHOT_ID ) self._mock_powerstore_client.delete_filesystem.return_value = False snapshot = { "name": self.SNAPSHOT_NAME, "share_name": self.SHARE_NAME } self.assertRaises( exception.ShareBackendException, self.storage_connection.delete_snapshot, self.mock_context, snapshot, None ) def test_revert_to_snapshot(self): self._mock_powerstore_client.get_filesystem_id.return_value = ( self.SNAPSHOT_ID ) self._mock_powerstore_client.restore_snapshot.return_value = True snapshot = { "name": self.SNAPSHOT_NAME, "share_name": self.SHARE_NAME } self.storage_connection.revert_to_snapshot( self.mock_context, snapshot, None, None, None ) self._mock_powerstore_client.get_filesystem_id. \ assert_called_with( self.SNAPSHOT_NAME ) self._mock_powerstore_client.restore_snapshot.assert_called_with( self.SNAPSHOT_ID ) def test_revert_to_snapshot_backend_failure(self): self._mock_powerstore_client.get_filesystem_id.return_value = ( self.SNAPSHOT_ID ) self._mock_powerstore_client.restore_snapshot.return_value = False snapshot = { "name": self.SNAPSHOT_NAME, "share_name": self.SHARE_NAME } self.assertRaises( exception.ShareBackendException, self.storage_connection.revert_to_snapshot, self.mock_context, snapshot, None, None, None ) def test_create_share_from_snapshot_nfs(self): self._mock_powerstore_client.get_nas_server_id.return_value = ( self.NAS_SERVER_ID ) self._mock_powerstore_client.get_filesystem_id.return_value = ( self.SNAPSHOT_ID ) self._mock_powerstore_client.clone_snapshot.return_value = ( self.CLONE_ID ) self._mock_powerstore_client.create_nfs_export.return_value = ( self.NFS_EXPORT_ID ) self._mock_powerstore_client.get_nas_server_interfaces.return_value = ( [{"ip": self.NAS_SERVER_IP, "preferred": True}] ) self._mock_powerstore_client.resize_filesystem.return_value = ( True, None ) share = {"name": self.SHARE_NAME, "share_proto": "NFS", "size": self.SHARE_NEW_SIZE_GB} snapshot = {"name": self.SNAPSHOT_NAME, "size": self.SHARE_SIZE_GB} locations = self.storage_connection.create_share_from_snapshot( self.mock_context, share, snapshot ) expected_locations = [ {"path": "%s:/%s" % ( self.NAS_SERVER_IP, self.SHARE_NAME, ), "metadata": { "preferred": True}}] self.assertEqual(expected_locations, locations) self._mock_powerstore_client.get_nas_server_id.assert_called_with( self._mock_config.safe_get("dell_nas_server") ) self._mock_powerstore_client.clone_snapshot.assert_called_with( self.SNAPSHOT_ID, self.SHARE_NAME ) self._mock_powerstore_client.create_nfs_export.assert_called_with( self.CLONE_ID, self.SHARE_NAME ) self._mock_powerstore_client.get_nas_server_interfaces. \ assert_called_with( self.NAS_SERVER_ID ) def test_create_share_from_snapshot_cifs(self): self._mock_powerstore_client.get_nas_server_id.return_value = ( self.NAS_SERVER_ID ) self._mock_powerstore_client.get_filesystem_id.return_value = ( self.SNAPSHOT_ID ) self._mock_powerstore_client.clone_snapshot.return_value = ( self.CLONE_ID ) self._mock_powerstore_client.create_smb_share.return_value = ( self.NFS_EXPORT_ID ) self._mock_powerstore_client.get_nas_server_interfaces.return_value = ( [{"ip": self.NAS_SERVER_IP, "preferred": True}] ) self._mock_powerstore_client.resize_filesystem.return_value = ( True, None ) share = {"name": self.SHARE_NAME, "share_proto": "CIFS", "size": self.SHARE_NEW_SIZE_GB} snapshot = {"name": self.SNAPSHOT_NAME, "size": self.SHARE_SIZE_GB} locations = self.storage_connection.create_share_from_snapshot( self.mock_context, share, snapshot ) expected_locations = [ {"path": "\\\\%s\\%s" % ( self.NAS_SERVER_IP, self.SHARE_NAME), "metadata": { "preferred": True}}] self.assertEqual(expected_locations, locations) self._mock_powerstore_client.get_nas_server_id.assert_called_with( self._mock_config.safe_get("dell_nas_server") ) self._mock_powerstore_client.clone_snapshot.assert_called_with( self.SNAPSHOT_ID, self.SHARE_NAME ) self._mock_powerstore_client.create_smb_share.assert_called_with( self.CLONE_ID, self.SHARE_NAME ) self._mock_powerstore_client.get_nas_server_interfaces. \ assert_called_with( self.NAS_SERVER_ID ) def test_create_share_from_snapshot_clone_failure(self): share = {"name": self.SHARE_NAME, "share_proto": "NFS"} snapshot = {"name": self.SNAPSHOT_NAME} self._mock_powerstore_client.clone_snapshot.return_value = None self.assertRaises( exception.ShareBackendException, self.storage_connection.create_share_from_snapshot, self.mock_context, share, snapshot ) def test_create_share_from_snapshot_export_failure(self): share = {"name": self.SHARE_NAME, "share_proto": "NFS"} snapshot = {"name": self.SNAPSHOT_NAME} self._mock_powerstore_client.create_nfs_export.return_value = None self.assertRaises( exception.ShareBackendException, self.storage_connection.create_share_from_snapshot, self.mock_context, share, snapshot ) def test_create_share_from_snapshot_share_failure(self): share = {"name": self.SHARE_NAME, "share_proto": "CIFS"} snapshot = {"name": self.SNAPSHOT_NAME} self._mock_powerstore_client.create_smb_share.return_value = None self.assertRaises( exception.ShareBackendException, self.storage_connection.create_share_from_snapshot, self.mock_context, share, snapshot ) def test_get_default_filter_function(self): filter = self.storage_connection.get_default_filter_function() self.assertEqual(filter, "share.size >= 3") ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315602.0136704 manila-21.0.0/manila/tests/share/drivers/dell_emc/plugins/unity/0000775000175000017500000000000000000000000024636 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/dell_emc/plugins/unity/__init__.py0000664000175000017500000000136200000000000026751 0ustar00zuulzuul00000000000000# Copyright (c) 2016 EMC Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sys from unittest import mock sys.modules['storops'] = mock.Mock() sys.modules['storops.unity'] = mock.Mock() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/dell_emc/plugins/unity/fake_exceptions.py0000664000175000017500000000347400000000000030367 0ustar00zuulzuul00000000000000# Copyright (c) 2016 EMC Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. class UnityFakeException(Exception): pass class UnityException(UnityFakeException): pass class UnitySmbShareNameExistedError(UnityException): pass class UnityFileSystemNameAlreadyExisted(UnityException): pass class UnityNasServerNameUsedError(UnityException): pass class UnityNfsShareNameExistedError(UnityException): pass class UnitySnapNameInUseError(UnityException): pass class UnityIpAddressUsedError(UnityException): pass class UnityResourceNotFoundError(UnityException): pass class UnityOneDnsPerNasServerError(UnityException): pass class UnitySmbNameInUseError(UnityException): pass class UnityNfsAlreadyEnabledError(UnityException): pass class UnityHostNotFoundException(UnityException): pass class UnityNothingToModifyError(UnityException): pass class UnityShareShrinkSizeTooSmallError(UnityException): pass class UnityTenantNameInUseError(UnityException): pass class UnityVLANUsedByOtherTenantError(UnityException): pass class SystemAPINotSupported(UnityException): pass class UnityVLANAlreadyHasInterfaceError(UnityException): pass class UnityAclUserNotFoundError(UnityException): pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/dell_emc/plugins/unity/mocked_manila.yaml0000664000175000017500000002645000000000000030314 0ustar00zuulzuul00000000000000network_allocations: _type: 'network_allocations' _properties: &network_allocations_prop - id: '04ac4c27-9cf7-4406-809c-13edc93e4849' ip_address: 'fake_ip_addr_1' cidr: '192.168.1.0/24' segmentation_id: null gateway: '192.168.1.1' network_type: flat mtu: 1500 - id: '0cf87de7-5c65-4036-8b6a-e8176c356958' ip_address: 'fake_ip_addr_2' cidr: '192.168.1.0/24' segmentation_id: null gateway: '192.168.1.1' network_type: flat mtu: 1500 network_allocations_vlan: _type: 'network_allocations' _properties: &network_allocations_vlan_prop - id: '04ac4c27-9cf7-4406-809c-13edc93e4849' ip_address: 'fake_ip_addr_1' cidr: '192.168.1.0/24' segmentation_id: 160 gateway: '192.168.1.1' network_type: vlan mtu: 1500 - id: '0cf87de7-5c65-4036-8b6a-e8176c356958' ip_address: 'fake_ip_addr_2' cidr: '192.168.1.0/24' segmentation_id: 160 gateway: '192.168.1.1' network_type: vlan mtu: 1500 network_allocations_vxlan: _type: 'network_allocations' _properties: &network_allocations_vxlan_prop - id: '04ac4c27-9cf7-4406-809c-13edc93e4849' ip_address: 'fake_ip_addr_1' cidr: '192.168.1.0/24' segmentation_id: 123 gateway: '192.168.1.1' network_type: vxlan mtu: 1500 network_allocations_ipv6: _type: 'network_allocations' _properties: &network_allocations_ipv6_prop - id: '04ac4c27-9cf7-4406-809c-13edc93e9844' ip_address: '2001:db8:0:1:f816:3eff:fe76:35c4' cidr: '2001:db8:0:1:f816:3eff:fe76:35c4/64' segmentation_id: 170 gateway: '2001:db8:0:1::1' network_type: vlan mtu: 1500 active_directory: _type: 'security_service' _properties: &active_directory_prop type: 'active_directory' domain: 'fake_domain_name' dns_ip: 'fake_dns_ip' user: 'fake_user' password: 'fake_password' kerberos: _type: 'security_service' _properties: &kerberos_prop <<: *active_directory_prop type: 'kerberos' server: 'fake_server' security_services: _type: 'security_services' _properties: &security_services_prop services: [*active_directory_prop, *kerberos_prop] network_info__flat: _type: 'network_info' _properties: &network_info_flat_prop name: 'share_network' neutron_subnet_id: 'a3f3eeac-0b16-4932-8c03-0a37003644ff' network_type: 'flat' neutron_net_id: 'e6c96730-2bcf-4ce3-86fa-7cb7740086cb' ip_version: 4 id: '232d8218-2743-41d1-832b-4194626e691e' mtu: 1500 network_allocations: *network_allocations_prop server_id: '78fd845f-8e7d-487f-bfde-051d83e78103' segmentation_id: 0 security_services: [] network_info__vlan: _type: 'network_info' _properties: &network_info__vlan_prop <<: *network_info_flat_prop network_type: 'vlan' network_allocations: *network_allocations_vlan_prop segmentation_id: 160 network_info__vxlan: _type: 'network_info' _properties: &network_info__vxlan_prop <<: *network_info_flat_prop network_type: 'vxlan' network_allocations: *network_allocations_vxlan_prop network_info__ipv6: _type: 'network_info' _properties: &network_info__ipv6_prop <<: *network_info_flat_prop network_allocations: *network_allocations_ipv6_prop segmentation_id: 170 network_info__active_directory: _type: 'network_info' _properties: <<: *network_info__vlan_prop security_services: [*active_directory_prop] network_info__kerberos: _type: 'network_info' _properties: <<: *network_info_flat_prop security_services: [*kerberos_prop] share_server: _type: 'share_server' _properties: &share_server_prop status: 'active' share_network: *network_info_flat_prop share_network_id: '232d8218-2743-41d1-832b-4194626e691e' host: 'openstack@VNX' backend_details: share_server_name: '78fd845f-8e7d-487f-bfde-051d83e78103' network_allocations: *network_allocations_prop id: '78fd845f-8e7d-487f-bfde-051d83e78103' identifier: 'c2e48947-98ed-4eae-999b-fa0b83731dfd' share_server__no_share_server_name: _type: 'share_server' _properties: <<: *share_server_prop backend_details: share_server_name: None id: '78fd845f-8e7d-487f-bfde-051d83e78103' server_detail: _type: 'server_detail' _properties: &server_detail_prop share_server_name: '78fd845f-8e7d-487f-bfde-051d83e78103' cifs_share: _type: 'share' _properties: &cifs_share_prop share_id: '708e753c-aacb-411f-9c8a-8b8175da4e73' availability_zone_id: 'de628fb6-1c99-41f6-a06a-adb61ff693b5' share_network_id: '232d8218-2743-41d1-832b-4194626e691e' share_server_id: '78fd845f-8e7d-487f-bfde-051d83e78103' id: '716100cc-e0b4-416b-ac27-d38dd019330d' size: 1 user_id: '19bbda71b578471a93363653dcb4c61d' status: 'creating' share_type_id: '57679eab-3e67-4052-b180-62b609670e93' host: 'openstack@VNX#Pool_2' display_name: 'cifs_share' share_proto: 'CIFS' export_locations: [] is_public: False managed_cifs_share: _type: 'share' _properties: &managed_cifs_share_share_prop share_id: '708e753c-aacb-411f-9c8a-8b8175da4e73' availability_zone_id: 'de628fb6-1c99-41f6-a06a-adb61ff693b5' share_network_id: '232d8218-2743-41d1-832b-4194626e691e' share_server_id: '78fd845f-8e7d-487f-bfde-051d83e78103' id: '716100cc-e0b4-416b-ac27-d38dd019330d' size: 10 user_id: '19bbda71b578471a93363653dcb4c61d' status: 'creating' share_type_id: '57679eab-3e67-4052-b180-62b609670e93' host: 'openstack@VNX#Pool_2' display_name: 'cifs_share' share_proto: 'CIFS' export_locations: [path: '\\10.0.0.1\bd23121f-hg4e-432c-12cd2c5-bb93dfghe212'] is_public: False snapshot_support: False nfs_share: _type: 'share' _properties: &nfs_share_prop share_id: '12eb3777-7008-4721-8243-422507db8f9d' availability_zone_id: 'de628fb6-1c99-41f6-a06a-adb61ff693b5' share_network_id: '232d8218-2743-41d1-832b-4194626e691e' share_server_id: '78fd845f-8e7d-487f-bfde-051d83e78103' id: 'cb532599-8dc6-4c3e-bb21-74ea54be566c' size: 1 user_id: '19bbda71b578471a93363653dcb4c61d' status: 'creating' share_type_id: '57679eab-3e67-4052-b180-62b609670e93' host: 'openstack@VNX#Pool_2' display_name: 'nfs_share' share_proto: 'NFS' export_locations: null is_public: False managed_nfs_share: _type: 'share' _properties: &managed_nfs_share_prop share_id: '12eb3777-7008-4721-8243-422507db8f9d' availability_zone_id: 'de628fb6-1c99-41f6-a06a-adb61ff693b5' share_network_id: '232d8218-2743-41d1-832b-4194626e691e' share_server_id: '78fd845f-8e7d-487f-bfde-051d83e78103' id: 'cb532599-8dc6-4c3e-bb21-74ea54be566c' size: 9 user_id: '19bbda71b578471a93363653dcb4c61d' status: 'creating' share_type_id: '57679eab-3e67-4052-b180-62b609670e93' host: 'openstack@VNX#Pool_2' display_name: 'nfs_share' share_proto: 'NFS' export_locations: [path: '172.168.201.201:/ad1caddf-097e-462c-8ac6-5592ed6fe22f'] is_public: False snapshot_support: False dhss_false_cifs_share: _type: 'share' _properties: &dhss_false_cifs_share_prop share_id: '708e753c-aacb-411f-9c8a-8b8175da4e73' availability_zone_id: 'de628fb6-1c99-41f6-a06a-adb61ff693b5' share_network_id: '232d8218-2743-41d1-832b-4194626e691e' share_server_id: 'test-dhss-false-427f-b4de-0ad83el5j8' id: '716100cc-e0b4-416b-ac27-d38dd019330d' size: 1 user_id: '19bbda71b578471a93363653dcb4c61d' status: 'creating' share_type_id: '57679eab-3e67-4052-b180-62b609670e93' host: 'openstack@VNX#Pool_2' display_name: 'cifs_share' share_proto: 'CIFS' export_locations: [] is_public: False dhss_false_nfs_share: _type: 'share' _properties: &dhss_false_nfs_share_prop share_id: '12eb3777-7008-4721-8243-422507db8f9d' availability_zone_id: 'de628fb6-1c99-41f6-a06a-adb61ff693b5' share_network_id: '232d8218-2743-41d1-832b-4194626e691e' share_server_id: 'test-dhss-false-427f-b4de-0ad83el5j8' id: 'cb532599-8dc6-4c3e-bb21-74ea54be566c' size: 1 user_id: '19bbda71b578471a93363653dcb4c61d' status: 'creating' share_type_id: '57679eab-3e67-4052-b180-62b609670e93' host: 'openstack@VNX#Pool_2' display_name: 'nfs_share' share_proto: 'NFS' export_locations: [] is_public: False shrink_cifs_share: _type: 'share' _properties: &shrink_cifs_share_prop share_id: '708e753c-aacb-411f-9c8a-8b8175da4e73' availability_zone_id: 'de628fb6-1c99-41f6-a06a-adb61ff693b5' share_network_id: '232d8218-2743-41d1-832b-4194626e691e' share_server_id: '78fd845f-8e7d-487f-bfde-051d83e78103' id: '716100cc-e0b4-416b-ac27-d38dd019330d' size: 9 user_id: '19bbda71b578471a93363653dcb4c61d' status: 'creating' share_type_id: '57679eab-3e67-4052-b180-62b609670e93' host: 'openstack@VNX#Pool_2' display_name: 'cifs_share' share_proto: 'CIFS' export_locations: [] is_public: False shrink_nfs_share: _type: 'share' _properties: &shrink_nfs_share_prop share_id: '12eb3777-7008-4721-8243-422507db8f9d' availability_zone_id: 'de628fb6-1c99-41f6-a06a-adb61ff693b5' share_network_id: '232d8218-2743-41d1-832b-4194626e691e' share_server_id: '78fd845f-8e7d-487f-bfde-051d83e78103' id: 'cb532599-8dc6-4c3e-bb21-74ea54be566c' size: 9 user_id: '19bbda71b578471a93363653dcb4c61d' status: 'creating' share_type_id: '57679eab-3e67-4052-b180-62b609670e93' host: 'openstack@VNX#Pool_2' display_name: 'nfs_share' share_proto: 'NFS' export_locations: [] is_public: False invalid_share: _type: 'share' _properties: &invalid_share_prop share_id: '12eb3777-7008-4721-8243-422507db8f9d' availability_zone_id: 'de628fb6-1c99-41f6-a06a-adb61ff693b5' share_network_id: '232d8218-2743-41d1-832b-4194626e691e' share_server_id: '78fd845f-8e7d-487f-bfde-051d83e78103' id: 'cb532599-8dc6-4c3e-bb21-74ea54be566c' size: 1 user_id: '19bbda71b578471a93363653dcb4c61d' status: 'creating' share_type_id: '57679eab-3e67-4052-b180-62b609670e93' host: 'openstack@VNX#Pool_2' display_name: 'nfs_share' share_proto: 'fake_proto' export_locations: [] is_public: False snapshot: _type: 'snapshot' _properties: &snapshot_prop status: 'creating' share_instance_id: '27e4625e-c336-4749-85bc-634216755fbc' share: share_proto: 'CIFS' id: 's24r-3fgw2-g039ef-j029f0-nrver' snapshot_id: '345476cc-32ab-4565-ba88-e4733b7ffa0e' progress: '0%' id: 'ab411797-b1cf-4035-bf14-8771a7bf1805' share_id: '27e4625e-c336-4749-85bc-634216755fbc' provider_location: '23047-ef2344-4563cvw-r4323cwed' cifs_rw_access: _type: 'access' _properties: access_level: 'rw' access_to: 'administrator' access_type: 'user' cifs_ro_access: _type: 'access' _properties: access_level: 'ro' access_to: 'administrator' access_type: 'user' nfs_rw_access: _type: 'access' _properties: access_level: 'rw' access_to: '192.168.1.1' access_type: 'ip' nfs_rw_access_cidr: _type: 'access' _properties: access_level: 'rw' access_to: '192.168.1.0/24' access_type: 'ip' nfs_ro_access: _type: 'access' _properties: access_level: 'ro' access_to: '192.168.1.1' access_type: 'ip' invalid_access: _type: 'access' _properties: access_level: 'fake_access_level' access_to: 'fake_access_to' access_type: 'fake_type' ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/dell_emc/plugins/unity/mocked_unity.yaml0000664000175000017500000010451100000000000030216 0ustar00zuulzuul00000000000000sp_a: &sp_a _properties: name: 'SPA' id: 'SPA' existed: true _methods: get_id: 'SPA' sp_b: &sp_b _properties: name: 'SPB' id: 'SPB' existed: true _methods: get_id: 'SPB' sp_c: &sp_invalid _properties: id: 'SPC' existed: false interface_1: &interface_1 _properties: ip_address: 'fake_ip_addr_1' interface_2: &interface_2 _properties: ip_address: 'fake_ip_addr_2' interface_ipv6: &interface_ipv6 _properties: ip_addr: '2001:db8:0:1:f816:3eff:fe76:35c4' gateway: '2001:db8:0:1::1' prefix_length: '64' vlan_id: '201' nas_server: &nas_server _properties: &nas_server_prop name: '78fd845f-8e7d-487f-bfde-051d83e78103' file_interface: [*interface_1, *interface_2] current_sp: *sp_a home_sp: *sp_a nas_server_ipv6: &nas_server_ipv6 _properties: &nas_server_ipv6_prop name: 'af1eef2f-be66-4df1-8f25-9720f087da05' file_interface: [*interface_ipv6] current_sp: *sp_a home_sp: *sp_a filesystem_base: &filesystem_base _properties: &filesystem_base_prop name: 'fake_filesystem_name' id: 'fake_filesystem_id' size_total: 50000000000 is_thin_enabled: true pool: null nas_server: null cifs_share: [] nfs_share: [] _methods: has_snap: False snap_base: _properties: &snap_base_prop name: 'fake_snap_name' id: 'fake_snap_id' size: 50000000000 filesystem: *filesystem_base share_base: _properties: &share_base_prop name: 'fake_share_name' id: 'fake_share_id' filesystem: null snap: null cifs_share_base: &cifs_share_base _properties: &cifs_share_base_prop <<: *share_base_prop nfs_share_base: &nfs_share_base _properties: &nfs_share_base_prop <<: *share_base_prop pool_base: _properties: &pool_base_prop name: 'fake_pool_name' pool_id: 0 state: Ready user_capacity_gbs: 1311 total_subscribed_capacity_gbs: 131 available_capacity_gbs: 132 percent_full_threshold: 70 fast_cache: True pool_1: &pool_1 _properties: &pool_1_prop <<: *pool_base_prop name: 'pool_1' size_total: 500000 size_used: 10000 size_subscribed: 30000 pool_2: &pool_2 _properties: &pool_2_prop <<: *pool_base_prop name: 'pool_2' size_total: 600000 size_used: 20000 size_subscribed: 40000 nas_server_pool: &nas_server_pool _properties: <<: *pool_base_prop name: 'nas_server_pool' port_base: _properties: &port_base_prop is_link_up: true id: 'fake_name' parent_storage_processor: *sp_a port_1: &port_1 _properties: <<: *port_base_prop is_link_up: true id: 'spa_eth1' parent_storage_processor: *sp_a _methods: get_id: 'spa_eth1' port_2: &port_2 _properties: <<: *port_base_prop is_link_up: true id: 'spa_eth2' parent_storage_processor: *sp_a _methods: get_id: 'spa_eth2' port_3: &port_internal_port _properties: <<: *port_base_prop is_link_up: true id: 'internal_port' parent_storage_processor: *sp_a _methods: get_id: 'internal_port' port_4: &port_4 _properties: <<: *port_base_prop is_link_up: true id: 'spb_eth1' parent_storage_processor: *sp_b _methods: get_id: 'spb_eth1' la_port: &la_port _properties: is_link_up: true id: 'spa_la_4' parent_storage_processor: *sp_a _methods: get_id: 'spa_la_4' tenant_1: &tenant_1 _properties: id: "tenant_1" name: "Tenant1" uuid: "173ca6c3-5952-427d-82a6-df88f49e3926" vlans: [2] snapshot_1: &snapshot_1 _properties: id: "snapshot_1" name: "Snapshot_1" _methods: restore: True unity_base: &unity_base _methods: &unity_base_method get_sp: *sp_a get_pool: _side_effect: [[*pool_1, *pool_2, *nas_server_pool], *nas_server_pool] get_file_port: [*port_1, *port_2] test_connect: &test_connect unity: *unity_base test_connect_with_ipv6: &test_connect_with_ipv6 unity: *unity_base test_dhss_false_connect: &test_dhss_false_connect unity: *unity_base test_connect__invalid_sp_configuration: unity: _methods: <<: *unity_base_method get_sp: *sp_invalid test_connect__invalid_pool_configuration: *test_connect test_create_nfs_share: nfs_share: &nfs_share__test_create_nfs_share _properties: <<: *nfs_share_base_prop name: 'cb532599-8dc6-4c3e-bb21-74ea54be566c' pool: &pool__test_create_nfs_share _properties: <<: *pool_base_prop name: 'Pool_2' _methods: create_nfs_share: None unity: _methods: <<: *unity_base_method get_pool: _side_effect: [*pool__test_create_nfs_share] get_nas_server: *nas_server test_create_cifs_share: cifs_share: &cifs_share__test_create_cifs_share _properties: <<: *cifs_share_base_prop name: '716100cc-e0b4-416b-ac27-d38dd019330d' _methods: enable_ace: filesystem: &filesystem__test_create_cifs_share _properties: &filesystem_prop__test_create_cifs_share <<: *filesystem_base_prop name: '716100cc-e0b4-416b-ac27-d38dd4587340' _methods: create_cifs_share: *cifs_share__test_create_cifs_share pool: &pool__test_create_cifs_share _properties: <<: *pool_base_prop name: 'Pool_2' _methods: create_filesystem: *filesystem__test_create_cifs_share unity: _methods: <<: *unity_base_method get_pool: _side_effect: [*pool__test_create_cifs_share] get_nas_server: *nas_server test_dhss_false_create_nfs_share: nfs_share: &nfs_share__test_dhss_false_create_nfs_share _properties: <<: *nfs_share_base_prop name: 'cb532599-8dc6-4c3e-bb21-74ea54be566c' pool: &pool__test_dhss_false_create_nfs_share _properties: <<: *pool_base_prop name: 'Pool_2' _methods: create_nfs_share: None unity: _methods: <<: *unity_base_method get_pool: _side_effect: [*pool__test_dhss_false_create_nfs_share] get_nas_server: *nas_server test_dhss_false_create_cifs_share: cifs_share: &cifs_share__test_dhss_false_create_cifs_share _properties: <<: *cifs_share_base_prop name: '716100cc-e0b4-416b-ac27-d38dd019330d' _methods: enable_ace: filesystem: &filesystem__test_dhss_false_create_cifs_share _properties: &filesystem_prop__test_dhss_false_create_cifs_share <<: *filesystem_base_prop name: '716100cc-e0b4-416b-ac27-d38dd4587340' _methods: create_cifs_share: *cifs_share__test_dhss_false_create_cifs_share pool: &pool__test_dhss_false_create_cifs_share _properties: <<: *pool_base_prop name: 'Pool_2' _methods: create_filesystem: *filesystem__test_dhss_false_create_cifs_share unity: _methods: <<: *unity_base_method get_pool: _side_effect: [*pool__test_dhss_false_create_cifs_share] get_nas_server: *nas_server test_create_share_with_invalid_share_server: pool: &pool__test_create_share_with_invalid_share_server _properties: <<: *pool_base_prop name: 'Pool_2' unity: _methods: <<: *unity_base_method get_pool: _side_effect: [*pool__test_create_share_with_invalid_share_server] get_nas_server: _raise: UnityResourceNotFoundError: 'Failed to get NAS server.' test_delete_share: filesystem: &filesystem__test_delete_share _properties: &filesystem_prop__test_delete_share <<: *filesystem_base_prop name: '716100cc-e0b4-416b-ac27-d38dd019330d' _methods: delete: update: has_snap: False cifs_share: &cifs_share__test_delete_share _properties: <<: *cifs_share_base_prop name: '716100cc-e0b4-416b-ac27-d38dd019330d' filesystem: *filesystem__test_delete_share _methods: delete: unity: _methods: <<: *unity_base_method get_cifs_share: *cifs_share__test_delete_share test_delete_share__with_invalid_share: unity: _methods: <<: *unity_base_method get_cifs_share: _raise: UnityResourceNotFoundError: 'Failed to get CIFS share.' test_delete_share__create_from_snap: filesystem: &filesystem__test_delete_share__create_from_snap _properties: &filesystem_prop__test_delete_share__create_from_snap <<: *filesystem_base_prop name: '716100cc-e0b4-416b-ac27-d38dd4587340' _methods: delete: update: has_snap: False snap: &snap__test_delete_share__create_from_snap _properties: &snap_prop__test_delete_share__create_from_snap <<: *snap_base_prop name: '716100cc-e0b4-416b-ac27-d38dd019330d' filesystem: *filesystem__test_delete_share__create_from_snap _methods: delete: cifs_share: &cifs_share__test_delete_share__create_from_snap _properties: <<: *cifs_share_base_prop name: '716100cc-e0b4-416b-ac27-d38dd019330d' snap: *snap__test_delete_share__create_from_snap _methods: delete: unity: _methods: <<: *unity_base_method get_cifs_share: *cifs_share__test_delete_share__create_from_snap get_snap: *snap__test_delete_share__create_from_snap test_delete_share__create_from_snap_but_not_isolated: filesystem: &filesystem__test_delete_share__create_from_snap_but_not_isolated _properties: &filesystem_prop__test_delete_share__create_from_snap_but_not_isolated <<: *filesystem_base_prop name: '716100cc-e0b4-416b-ac27-d38dd4587340' cifs_share: [*cifs_share_base] nfs_share: [*nfs_share_base] _methods: delete: update: has_snap: True snap: &snap__test_delete_share__create_from_snap_but_not_isolated _properties: &snap_prop__test_delete_share__create_from_snap_but_not_isolated <<: *snap_base_prop name: '716100cc-e0b4-416b-ac27-d38dd019330d' filesystem: *filesystem__test_delete_share__create_from_snap_but_not_isolated _methods: delete: cifs_share: &cifs_share__test_delete_share__create_from_snap_but_not_isolated _properties: <<: *cifs_share_base_prop name: '716100cc-e0b4-416b-ac27-d38dd019330d' snap: *snap__test_delete_share__create_from_snap_but_not_isolated _methods: delete: unity: _methods: <<: *unity_base_method get_cifs_share: *cifs_share__test_delete_share__create_from_snap_but_not_isolated test_delete_share__but_not_isolated: filesystem: &filesystem__test_delete_share__but_not_isolated _properties: &filesystem_prop__test_delete_share__but_not_isolated <<: *filesystem_base_prop name: '716100cc-e0b4-416b-ac27-d38dd4587340' _methods: update: has_snap: True cifs_share: &cifs_share__test_delete_share__but_not_isolated _properties: <<: *cifs_share_base_prop name: '716100cc-e0b4-416b-ac27-d38dd019330d' filesystem: *filesystem__test_delete_share__but_not_isolated _methods: delete: unity: _methods: <<: *unity_base_method get_cifs_share: *cifs_share__test_delete_share__but_not_isolated test_extend_cifs_share: filesystem: &filesystem__test_extend_cifs_share _properties: &filesystem_prop__test_extend_cifs_share <<: *filesystem_base_prop name: '716100cc-e0b4-416b-ac27-d38dd019330d' _methods: extend: cifs_share: &cifs_share__test_extend_cifs_share _properties: <<: *cifs_share_base_prop name: '716100cc-e0b4-416b-ac27-d38dd019330d' filesystem: *filesystem__test_extend_cifs_share unity: _methods: <<: *unity_base_method get_cifs_share: *cifs_share__test_extend_cifs_share test_extend_nfs_share: filesystem: &filesystem__test_extend_nfs_share _properties: &filesystem_prop__test_extend_nfs_share <<: *filesystem_base_prop name: '716100cc-e0b4-416b-ac27-d38dd019330d' _methods: extend: cifs_share: &cifs_share__test_extend_nfs_share _properties: <<: *cifs_share_base_prop name: '716100cc-e0b4-416b-ac27-d38dd019330d' filesystem: *filesystem__test_extend_nfs_share unity: _methods: <<: *unity_base_method get_nfs_share: *cifs_share__test_extend_nfs_share test_shrink_cifs_share: filesystem: &filesystem__test_shrink_cifs_share _properties: &filesystem_prop__test_shrink_cifs_share <<: *filesystem_base_prop name: '716100cc-e0b4-416b-ac27-d38dd019330d' _methods: shrink: cifs_share: &cifs_share__test_shrink_cifs_share _properties: <<: *cifs_share_base_prop name: '716100cc-e0b4-416b-ac27-d38dd019330d' filesystem: *filesystem__test_shrink_cifs_share unity: _methods: <<: *unity_base_method get_cifs_share: *cifs_share__test_shrink_cifs_share test_shrink_nfs_share: filesystem: &filesystem__test_shrink_nfs_share _properties: &filesystem_prop__test_shrink_nfs_share <<: *filesystem_base_prop name: '716100cc-e0b4-416b-ac27-d38dd019330d' _methods: shrink: cifs_share: &cifs_share__test_shrink_nfs_share _properties: <<: *cifs_share_base_prop name: '716100cc-e0b4-416b-ac27-d38dd019330d' filesystem: *filesystem__test_shrink_nfs_share unity: _methods: <<: *unity_base_method get_nfs_share: *cifs_share__test_shrink_nfs_share test_extend_share__create_from_snap: snap: &snap__test_extend_share__create_from_snap _properties: &snap_prop__test_extend_share__create_from_snap <<: *snap_base_prop name: '716100cc-e0b4-416b-ac27-d38dd019330d' cifs_share: &cifs_share__test_extend_share__create_from_snap _properties: <<: *cifs_share_base_prop name: '716100cc-e0b4-416b-ac27-d38dd019330d' snap: *snap__test_extend_share__create_from_snap unity: _methods: <<: *unity_base_method get_cifs_share: *cifs_share__test_extend_share__create_from_snap test_shrink_share_create_from_snap: snap: &snap__test_shrink_share_create_from_snap _properties: &snap_prop__test_shrink_share__create_from_snap <<: *snap_base_prop name: '716100cc-e0b4-416b-ac27-d38dd019330d' cifs_share: &cifs_share__test_shrink_share__create_from_snap _properties: <<: *cifs_share_base_prop name: '716100cc-e0b4-416b-ac27-d38dd019330d' snap: *snap__test_shrink_share_create_from_snap unity: _methods: <<: *unity_base_method get_cifs_share: *cifs_share__test_shrink_share__create_from_snap test_create_snapshot_from_filesystem: filesystem: &filesystem__test_create_snapshot_from_filesystem _properties: &filesystem_prop__test_create_snapshot_from_filesystem <<: *filesystem_base_prop name: '716100cc-e0b4-416b-ac27-d38dd019330d' _methods: create_snap: cifs_share: &cifs_share__test_create_snapshot_from_filesystem _properties: <<: *cifs_share_base_prop name: '716100cc-e0b4-416b-ac27-d38dd019330d' filesystem: *filesystem__test_create_snapshot_from_filesystem unity: _methods: <<: *unity_base_method get_cifs_share: *cifs_share__test_create_snapshot_from_filesystem test_create_snapshot_from_snapshot: snap: &snap__test_create_snapshot_from_snapshot _properties: &snap_prop__test_create_snapshot_from_snapshot <<: *snap_base_prop name: '716100cc-e0b4-416b-ac27-d38dd019330d' _methods: create_snap: cifs_share: &cifs_share__test_create_snapshot_from_snapshot _properties: <<: *cifs_share_base_prop name: '716100cc-e0b4-416b-ac27-d38dd019330d' snap: *snap__test_create_snapshot_from_snapshot unity: _methods: <<: *unity_base_method get_cifs_share: *cifs_share__test_create_snapshot_from_snapshot get_snap: *snap__test_create_snapshot_from_snapshot test_delete_snapshot: snap: &snap__test_delete_snapshot _properties: &snap_prop__test_delete_snapshot <<: *snap_base_prop name: '716100cc-e0b4-416b-ac27-d38dd019330d' _methods: delete: unity: _methods: <<: *unity_base_method get_snap: *snap__test_delete_snapshot test_ensure_share_exists: cifs_share: &cifs_share_ensure_share_exists _properties: existed: True unity: _methods: <<: *unity_base_method get_cifs_share: *cifs_share_ensure_share_exists test_ensure_share_not_exists: cifs_share: &cifs_share_ensure_share_not_exists _properties: existed: False unity: _methods: <<: *unity_base_method get_cifs_share: *cifs_share_ensure_share_not_exists test_update_share_stats: unity: _methods: <<: *unity_base_method get_pool: _side_effect: [[*pool_1, *pool_2]] test_update_share_stats__nonexistent_pools: unity: _methods: <<: *unity_base_method get_pool: _side_effect: [[]] test_get_pool: filesystem: &filesystem__test_get_pool _properties: &filesystem_prop__test_get_pool <<: *filesystem_base_prop name: '716100cc-e0b4-416b-ac27-d38dd019330d' pool: *pool_1 cifs_share: &cifs_share__test_get_pool _properties: <<: *cifs_share_base_prop name: '716100cc-e0b4-416b-ac27-d38dd019330d' filesystem: *filesystem__test_get_pool unity: _methods: <<: *unity_base_method get_cifs_share: *cifs_share__test_get_pool test_setup_server: &test_setup_server nas_server_1: &nas_server_1__test_setup_server _properties: <<: *nas_server_prop existed: false home_sp: *sp_a ip_port: &ip_port _methods: set_mtu: nas_server_2: &nas_server_2__test_setup_server _properties: <<: *nas_server_prop _methods: &nas_server_2__test_setup_server_mehtod create_file_interface: enable_nfs_service: unity: _methods: &unity_method__test_setup_server <<: *unity_base_method get_nas_server: *nas_server_1__test_setup_server create_nas_server: *nas_server_2__test_setup_server get_ip_port: *ip_port test_setup_server__vlan_network: <<: *test_setup_server nas_server: &nas_server__test_setup_server_flat_network _properties: <<: *nas_server_prop existed: true _methods: create_file_interface: create_dns_server: enable_nfs_service: unity: _methods: <<: *unity_method__test_setup_server get_nas_server: *nas_server__test_setup_server_flat_network create_tenant: *tenant_1 test_setup_server__vxlan_network: <<: *test_setup_server nas_server_2: &nas_server_2__test_setup_server__vxlan_network _properties: <<: *nas_server_prop _methods: delete: unity: _methods: <<: *unity_method__test_setup_server get_nas_server: *nas_server_2__test_setup_server__vxlan_network test_setup_server__active_directory: <<: *test_setup_server nas_server_2: &nas_server_2__test_setup_server__active_directory _properties: <<: *nas_server_prop _methods: create_file_interface: create_dns_server: enable_cifs_service: enable_nfs_service: unity: _methods: &unity_method__test_setup_server__active_directory <<: *unity_method__test_setup_server create_nas_server: *nas_server_2__test_setup_server__active_directory create_tenant: *tenant_1 test_setup_server__kerberos: *test_setup_server test_setup_server__throw_exception: <<: *test_setup_server nas_server_1: &nas_server_1__test_setup_server__throw_exception _properties: <<: *nas_server_prop existed: false nas_server_2: &nas_server_2__test_setup_server__throw_exception _properties: <<: *nas_server_prop tenant: _methods: create_file_interface: create_dns_server: enable_cifs_service: enable_nfs_service: _raise: UnityException: 'Failed to enable NFS service.' delete: unity: _methods: <<: *unity_method__test_setup_server get_nas_server: *nas_server_2__test_setup_server__throw_exception create_nas_server: *nas_server_2__test_setup_server__throw_exception create_tenant: *tenant_1 test_teardown_server: tenant: _properties: nas_servers: [] _methods: delete: nas_server: &nas_server__test_teardown_server _properties: <<: *nas_server_prop tenant: _methods: delete: unity: _methods: <<: *unity_base_method get_nas_server: *nas_server__test_teardown_server test__get_managed_pools: &test__get_managed_pools unity: _methods: <<: *unity_base_method get_pool: [*pool_1, *pool_2, *nas_server_pool] test__get_managed_pools__invalid_pool_configuration: *test__get_managed_pools test_validate_port_configuration: &test_validate_port_configuration unity: _methods: <<: *unity_base_method get_file_port: [*port_1, *port_2, *port_internal_port, *port_4, *la_port] test_validate_port_configuration_exception: *test_validate_port_configuration test__get_managed_pools__invalid_port_configuration: *test_validate_port_configuration test_create_cifs_share_from_snapshot: cifs_share: &cifs_share__test_create_cifs_share_from_snapshot _properties: <<: *cifs_share_base_prop name: '716100cc-e0b4-416b-ac27-d38dd019330d' _methods: enable_ace: snapshot_1: &snapshot_1__test_create_cifs_share_from_snapshot _properties: &snapshot_1_prop__test_create_cifs_share_from_snapshot <<: *snap_base_prop name: '716100cc-e0b4-416b-ac27-d38dd019330d' _methods: create_cifs_share: *cifs_share__test_create_cifs_share_from_snapshot snapshot_2: &snapshot_2__test_create_cifs_share_from_snapshot _properties: &snapshot__prop__test_create_cifs_share_from_snapshot <<: *snap_base_prop name: '716100cc-e0b4-416b-ac27-d38dd4587340' _methods: create_snap: *snapshot_1__test_create_cifs_share_from_snapshot unity: _methods: <<: *unity_base_method get_nas_server: *nas_server get_snap: *snapshot_2__test_create_cifs_share_from_snapshot test_create_nfs_share_from_snapshot: nfs_share: &nfs_share__test_create_nfs_share_from_snapshot _properties: <<: *nfs_share_base_prop name: '716100cc-e0b4-416b-ac27-d38dd019330d' _methods: enable_ace: snapshot_1: &snapshot_1__test_create_nfs_share_from_snapshot _properties: &snapshot_1_prop__test_create_nfs_share_from_snapshot <<: *snap_base_prop name: '716100cc-e0b4-416b-ac27-d38dd019330d' _methods: create_nfs_share: *nfs_share__test_create_nfs_share_from_snapshot snapshot_2: &snapshot_2__test_create_nfs_share_from_snapshot _properties: &snapshot__prop__test_create_nfs_share_from_snapshot <<: *snap_base_prop name: '716100cc-e0b4-416b-ac27-d38dd4587340' _methods: create_snap: *snapshot_1__test_create_nfs_share_from_snapshot unity: _methods: <<: *unity_base_method get_nas_server: *nas_server get_snap: *snapshot_2__test_create_nfs_share_from_snapshot test_create_share_from_snapshot_no_server_name: unity: _methods: <<: *unity_base_method get_nas_server: _raise: UnityResourceNotFoundError: 'NAS server is not found' test_clear_share_access_cifs: cifs_share: &cifs_share__test_clear_share_access_cifs _methods: clear_access: _raise: UnityException: 'clear cifs access invoked' unity: _methods: <<: *unity_base_method get_cifs_share: *cifs_share__test_clear_share_access_cifs test_clear_share_access_nfs: nfs_share: &nfs_share__test_clear_share_access_nfs _methods: clear_access: _raise: UnityException: 'clear nfs access invoked' unity: _methods: <<: *unity_base_method get_nfs_share: *nfs_share__test_clear_share_access_nfs test_allow_rw_cifs_share_access: &test_allow_rw_cifs_share_access cifs_share: &cifs_share__test_allow_rw_cifs_share_access _properties: <<: *cifs_share_base_prop name: '716100cc-e0b4-416b-ac27-d38dd019330d' _methods: add_ace: unity: _methods: <<: *unity_base_method get_cifs_share: *cifs_share__test_allow_rw_cifs_share_access test_update_access_allow_rw: *test_allow_rw_cifs_share_access test_update_access_recovery: cifs_share: &cifs_share__test_update_access_recovery _methods: add_ace: clear_access: unity: _methods: <<: *unity_base_method get_cifs_share: *cifs_share__test_update_access_recovery test_allow_ro_cifs_share_access: *test_allow_rw_cifs_share_access test_allow_rw_nfs_share_access: nfs_share: &nfs_share__test_allow_rw_nfs_share_access _properties: <<: *nfs_share_base_prop name: '716100cc-e0b4-416b-ac27-d38dd019330d' _methods: allow_read_write_access: allow_root_access: unity: _methods: <<: *unity_base_method get_nfs_share: *nfs_share__test_allow_rw_nfs_share_access test_allow_ro_nfs_share_access: nfs_share: &nfs_share__test_allow_ro_nfs_share_access _properties: <<: *nfs_share_base_prop name: '716100cc-e0b4-416b-ac27-d38dd019330d' _methods: allow_read_only_access: unity: _methods: <<: *unity_base_method get_nfs_share: *nfs_share__test_allow_ro_nfs_share_access test_deny_cifs_share_access: cifs_share: &cifs_share__test_deny_cifs_share_access _properties: <<: *cifs_share_base_prop name: '716100cc-e0b4-416b-ac27-d38dd019330d' _methods: delete_ace: unity: _methods: <<: *unity_base_method get_cifs_share: *cifs_share__test_deny_cifs_share_access test_deny_nfs_share_access: &test_deny_nfs_share_access nfs_share: &nfs_share__test_deny_nfs_share_access _properties: <<: *nfs_share_base_prop name: '716100cc-e0b4-416b-ac27-d38dd019330d' _methods: delete_access: unity: _methods: <<: *unity_base_method get_nfs_share: *nfs_share__test_deny_nfs_share_access test_update_access_deny_nfs: *test_deny_nfs_share_access # The following test cases are for client.py test_create_cifs_share__existed_expt: filesystem: _methods: create_cifs_share: _raise: UnitySmbShareNameExistedError: 'CIFS share already exists.' cifs_share: &cifs_share__test_create_cifs_share__existed_expt _properties: name: '716100cc-e0b4-416b-ac27-d38dd019330d' unity: _methods: get_cifs_share: *cifs_share__test_create_cifs_share__existed_expt test_create_nfs_share__existed_expt: filesystem: _methods: create_nfs_share: _raise: UnityNfsShareNameExistedError: 'NFS share already exists.' nfs_share: &nfs_share__test_create_nfs_share__existed_expt _properties: name: '716100cc-e0b4-416b-ac27-d38dd019330d' unity: _methods: get_nfs_share: *nfs_share__test_create_nfs_share__existed_expt test_create_nfs_share_batch: nfs_share: &nfs_share__test_create_nfs_share_batch _properties: name: '716100cc-e0b4-416b-ac27-d38dd019330d' size: 151081080 unity: _methods: get_nfs_share: *nfs_share__test_create_nfs_share_batch pool: _methods: create_nfs_share: nas_server: _properties: <<: *nas_server_prop test_get_share_with_invalid_proto: share: _properties: <<: *share_base_prop test_create_filesystem__existed_expt: filesystem: &filesystem__test_create_filesystem__existed_expt _properties: name: '716100cc-e0b4-416b-ac27-d38dd019330d' size: 10 proto: 'CIFS' pool: _methods: create_filesystem: _raise: UnityFileSystemNameAlreadyExisted: 'Pool already exists.' nas_server: _properties: <<: *nas_server_prop unity: _methods: get_filesystem: *filesystem__test_create_filesystem__existed_expt test_delete_filesystem__nonexistent_expt: filesystem: _properties: name: already removed filsystem _methods: delete: _raise: UnityResourceNotFoundError: 'Filesystem is non-existent.' test_create_nas_server__existed_expt: sp: _properites: name: 'SP' pool: _properites: name: 'fake_pool' nas_server: &nas_server__test_create_nas_server__existed_expt _properties: <<: *nas_server_prop unity: _methods: create_nas_server: _raise: UnityNasServerNameUsedError: 'NAS Server already exists.' get_nas_server: *nas_server__test_create_nas_server__existed_expt test_delete_nas_server__nonexistent_expt: nas_server: &nas_server__test_delete_nas_server__nonexistent_expt _properties: <<: *nas_server_prop tenant: _methods: delete: _raise: UnityResourceNotFoundError: 'NAS server is non-existent.' unity: _methods: get_nas_server: *nas_server__test_delete_nas_server__nonexistent_expt test_create_dns_server__existed_expt: nas_server: _methods: create_dns_server: _raise: UnityOneDnsPerNasServerError: 'DNS server already exists.' test_create_interface__existed_expt: nas_server: _properties: <<: *nas_server_prop _methods: create_file_interface: _raise: UnityIpAddressUsedError: 'IP address is already used.' test_enable_cifs_service__existed_expt: nas_server: _properties: <<: *nas_server_prop _methods: enable_cifs_service: _raise: UnitySmbNameInUseError: 'CIFS server already exists.' test_enable_nfs_service__existed_expt: nas_server: _properties: <<: *nas_server_prop _methods: enable_nfs_service: _raise: UnityNfsAlreadyEnabledError: 'NFS server already exists.' test_create_snapshot__existed_expt: filesystem: _properties: <<: *filesystem_base_prop _methods: create_snap: _raise: UnitySnapNameInUseError: 'Snapshot already exists.' snapshot: _properties: <<: *snap_base_prop test_create_snap_of_snap__existed_expt: src_snapshot: _methods: create_snap: _raise: UnitySnapNameInUseError: 'Snapshot already exists.' dest_snapshot: &dest_snapshot__test_create_snap_of_snap__existed_expt _properties: <<: *snap_base_prop unity: _methods: get_snap: *dest_snapshot__test_create_snap_of_snap__existed_expt test_delete_snapshot__nonexistent_expt: snapshot: _properties: <<: *snap_base_prop _methods: delete: _raise: UnityResourceNotFoundError: 'Snapshot is non-existent.' test_nfs_deny_access__nonexistent_expt: nfs_share: &nfs_share__test_nfs_deny_access__nonexistent_expt _methods: delete_access: _raise: UnityHostNotFoundException: "Unity Host is non-existent" unity: _methods: get_nfs_share: *nfs_share__test_nfs_deny_access__nonexistent_expt test_get_storage_processor: unity: _methods: get_sp: *sp_a test_extend_filesystem: fs: _methods: get_id: 'svc_12' extend: _raise: UnityNothingToModifyError: test_shrink_filesystem: fs: _methods: get_id: 'svc_11' shrink: _raise: UnityNothingToModifyError: test_shrink_filesystem_size_too_small: fs: _methods: get_id: 'svc_10' shrink: _raise: UnityShareShrinkSizeTooSmallError: test_get_tenant: unity: _methods: create_tenant: *tenant_1 test_get_tenant_preexist: unity: _methods: create_tenant: _raise: UnityVLANUsedByOtherTenantError: get_tenant_use_vlan: *tenant_1 test_get_tenant_name_inuse_but_vlan_not_used: unity: _methods: create_tenant: _raise: UnityTenantNameInUseError: get_tenant_use_vlan: test_get_tenant_for_vlan_already_has_interfaces: unity: _methods: create_tenant: _raise: UnityVLANAlreadyHasInterfaceError: get_tenant_use_vlan: *tenant_1 test_get_file_ports: link_down_port: &down_port _properties: <<: *port_base_prop is_link_up: false id: 'down_port' _methods: get_id: 'down_port' unity: _methods: get_file_port: [*port_1, *port_internal_port, *down_port, *la_port] test_create_file_interface_ipv6: file_interface: *interface_ipv6 nas_server: _methods: create_file_interface: test_get_snapshot: unity: _methods: get_snap: *snapshot_1 test_get_snapshot_nonexistent_expt: unity: _methods: get_snap: _raise: UnityResourceNotFoundError: test_restore_snapshot: unity: _methods: get_snap: *snapshot_1 test_manage_cifs_share_with_server: filesystem: &filesystem__test_manage_cifs_share_with_server _properties: &filesystem_prop__test_manage_cifs_share_with_server <<: *filesystem_base_prop name: '716100cc-e0b4-416b-ac27-d38dd019330d' size_total: 5368709120 _methods: shrink: cifs_share: &cifs_share__test_manage_cifs_share_with_server _properties: <<: *cifs_share_base_prop name: '716100cc-e0b4-416b-ac27-d38dd019330d' filesystem: *filesystem__test_manage_cifs_share_with_server unity: _methods: <<: *unity_base_method get_cifs_share: *cifs_share__test_manage_cifs_share_with_server test_manage_cifs_share: filesystem: &filesystem__test_manage_cifs_share _properties: &filesystem_prop__test_manage_cifs_share <<: *filesystem_base_prop name: '716100cc-e0b4-416b-ac27-d38dd019330d' size_total: 5368709120 _methods: shrink: cifs_share: &cifs_share__test_manage_cifs_share _properties: <<: *cifs_share_base_prop name: '716100cc-e0b4-416b-ac27-d38dd019330d' filesystem: *filesystem__test_manage_cifs_share unity: _methods: <<: *unity_base_method get_cifs_share: *cifs_share__test_manage_cifs_share test_manage_nfs_share_with_server: filesystem: &filesystem__test_manage_nfs_share_with_server _properties: &filesystem_prop__test_manage_nfs_share_with_server <<: *filesystem_base_prop name: '716100cc-e0b4-416b-ac27-d38dd019330d' size_total: 5368709120 _methods: extend: nfs_share: &nfs_share__test_manage_nfs_share_with_server _properties: <<: *nfs_share_base_prop name: '716100cc-e0b4-416b-ac27-d38dd019330d' filesystem: *filesystem__test_manage_nfs_share_with_server unity: _methods: <<: *unity_base_method get_nfs_share: *nfs_share__test_manage_nfs_share_with_server test_manage_nfs_share: filesystem: &filesystem__test_manage_nfs_share _properties: &filesystem_prop__test_manage_nfs_share <<: *filesystem_base_prop size_total: 5368709120 _methods: shrink: nfs_share: &nfs_share__test_manage_nfs_share _properties: <<: *nfs_share_base_prop filesystem: *filesystem__test_manage_nfs_share unity: _methods: <<: *unity_base_method get_nfs_share: *nfs_share__test_manage_nfs_share test_get_share_server_network_info: unity: _methods: <<: *unity_base_method get_nas_server: *nas_server ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/dell_emc/plugins/unity/res_mock.py0000664000175000017500000003130400000000000027013 0ustar00zuulzuul00000000000000# Copyright (c) 2016 EMC Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from oslo_config import cfg from oslo_log import log from manila.share import configuration as conf from manila.share.drivers.dell_emc.plugins.unity import client from manila.share.drivers.dell_emc.plugins.unity import connection from manila.tests.db import fakes as db_fakes from manila.tests import fake_share from manila.tests.share.drivers.dell_emc.plugins.unity import fake_exceptions from manila.tests.share.drivers.dell_emc.plugins.unity import utils client.storops_ex = fake_exceptions connection.storops_ex = fake_exceptions LOG = log.getLogger(__name__) SYMBOL_TYPE = '_type' SYMBOL_PROPERTIES = '_properties' SYMBOL_METHODS = '_methods' SYMBOL_SIDE_EFFECT = '_side_effect' SYMBOL_RAISE = '_raise' CONF = cfg.CONF def _has_side_effect(node): return isinstance(node, dict) and SYMBOL_SIDE_EFFECT in node def _has_raise(node): return isinstance(node, dict) and SYMBOL_RAISE in node def fake_share_server(**kwargs): share_server = { 'instance_id': 'fake_instance_id', 'backend_details': {}, } share_server.update(kwargs) return db_fakes.FakeModel(share_server) def fake_network_info(**kwargs): network_info = { 'id': 'fake_net_id', 'name': 'net_name', 'subnet': [], } network_info.update(kwargs) return network_info def fake_server_detail(**kwargs): server_detail = { 'share_server_name': 'fake_server_name', } server_detail.update(kwargs) return server_detail def fake_security_services(**kwargs): return kwargs['services'] def fake_access(**kwargs): access = {} access.update(kwargs) return access class FakeEMCShareDriver(object): def __init__(self, dhss=None): if dhss in (True, False): CONF.set_default('driver_handles_share_servers', dhss) self.configuration = conf.Configuration(None) self.configuration.emc_share_backend = 'unity' self.configuration.emc_nas_server = '192.168.1.1' self.configuration.emc_nas_login = 'fake_user' self.configuration.emc_nas_password = 'fake_password' self.configuration.share_backend_name = 'EMC_NAS_Storage' self.configuration.vnx_server_meta_pool = 'nas_server_pool' self.configuration.unity_server_meta_pool = 'nas_server_pool' self.configuration.local_conf.max_over_subscription_ratio = 20 class FakeEMCShareDriverIPv6(object): def __init__(self, dhss=None): if dhss in (True, False): CONF.set_default('driver_handles_share_servers', dhss) self.configuration = conf.Configuration(None) self.configuration.emc_share_backend = 'unity' self.configuration.emc_nas_server = 'fa27:2a95:e734:0:0:0:0:01' self.configuration.emc_nas_login = 'fake_user' self.configuration.emc_nas_password = 'fake_password' self.configuration.share_backend_name = 'EMC_NAS_Storage' self.configuration.vnx_server_meta_pool = 'nas_server_pool' self.configuration.unity_server_meta_pool = 'nas_server_pool' self.configuration.local_conf.max_over_subscription_ratio = 20 STATS = dict( share_backend_name='Unity', vendor_name='EMC', storage_protocol='NFS_CIFS', driver_version='2.0.0,', pools=[], ) class DriverResourceMock(dict): fake_func_mapping = {} def __init__(self, yaml_file): yaml_dict = utils.load_yaml(yaml_file) if isinstance(yaml_dict, dict): for name, body in yaml_dict.items(): if isinstance(body, dict): props = body[SYMBOL_PROPERTIES] if isinstance(props, dict): for prop_name, prop_value in props.items(): if isinstance(prop_value, dict) and prop_value: # get the first key as the convert function func_name = list(prop_value.keys())[0] if func_name.startswith('_'): func = getattr(self, func_name) props[prop_name] = ( func(**prop_value[func_name])) if body[SYMBOL_TYPE] in self.fake_func_mapping: self[name] = ( self.fake_func_mapping[body[SYMBOL_TYPE]](**props)) class ManilaResourceMock(DriverResourceMock): fake_func_mapping = { 'share': fake_share.fake_share, 'snapshot': fake_share.fake_snapshot, 'network_info': fake_network_info, 'share_server': fake_share_server, 'server_detail': fake_server_detail, 'security_services': fake_security_services, 'access': fake_access, } def __init__(self, yaml_file): super(ManilaResourceMock, self).__init__(yaml_file) class StorageObjectMock(object): PROPS = 'props' def __init__(self, yaml_dict): self.__dict__[StorageObjectMock.PROPS] = {} props = yaml_dict.get(SYMBOL_PROPERTIES, None) if props: for k, v in props.items(): setattr(self, k, StoragePropertyMock(k, v)()) methods = yaml_dict.get(SYMBOL_METHODS, None) if methods: for k, v in methods.items(): setattr(self, k, StorageMethodMock(k, v)) def __setattr__(self, key, value): self.__dict__[StorageObjectMock.PROPS][key] = value def __getattr__(self, item): try: super(StorageObjectMock, self).__getattr__(item) except AttributeError: return self.__dict__[StorageObjectMock.PROPS][item] except KeyError: raise KeyError('No such method or property for mock object.') class StoragePropertyMock(mock.PropertyMock): def __init__(self, name, property_body): return_value = property_body side_effect = None # only support return_value and side_effect for property if _has_side_effect(property_body): side_effect = property_body[SYMBOL_SIDE_EFFECT] return_value = None if side_effect: super(StoragePropertyMock, self).__init__( name=name, side_effect=side_effect) elif return_value: super(StoragePropertyMock, self).__init__( name=name, return_value=_build_mock_object(return_value)) else: super(StoragePropertyMock, self).__init__( name=name, return_value=return_value) class StorageMethodMock(mock.Mock): def __init__(self, name, method_body): return_value = method_body exception = None side_effect = None # support return_value, side_effect and exception for method if _has_side_effect(method_body) or _has_raise(method_body): exception = method_body.get(SYMBOL_RAISE, None) side_effect = method_body.get(SYMBOL_SIDE_EFFECT, None) return_value = None if exception: if isinstance(exception, dict) and exception: ex_name = list(exception.keys())[0] ex = getattr(fake_exceptions, ex_name) super(StorageMethodMock, self).__init__( name=name, side_effect=ex(exception[ex_name])) elif side_effect: super(StorageMethodMock, self).__init__( name=name, side_effect=_build_mock_object(side_effect)) elif return_value is not None: super(StorageMethodMock, self).__init__( name=name, return_value=_build_mock_object(return_value)) else: super(StorageMethodMock, self).__init__( name=name, return_value=None) class StorageResourceMock(dict): def __init__(self, yaml_file): yaml_dict = utils.load_yaml(yaml_file) if isinstance(yaml_dict, dict): for section, sec_body in yaml_dict.items(): self[section] = {} if isinstance(sec_body, dict): for obj_name, obj_body in sec_body.items(): self[section][obj_name] = _build_mock_object(obj_body) def _is_mock_object(yaml_info): return (isinstance(yaml_info, dict) and (SYMBOL_PROPERTIES in yaml_info or SYMBOL_METHODS in yaml_info)) def _build_mock_object(yaml_dict): if _is_mock_object(yaml_dict): return StorageObjectMock(yaml_dict) elif isinstance(yaml_dict, dict): return {k: _build_mock_object(v) for k, v in yaml_dict.items()} elif isinstance(yaml_dict, list): return [_build_mock_object(each) for each in yaml_dict] else: return yaml_dict manila_res = ManilaResourceMock('mocked_manila.yaml') unity_res = StorageResourceMock('mocked_unity.yaml') STORAGE_RES_MAPPING = { 'TestClient': unity_res, 'TestConnection': unity_res, 'TestConnectionDHSSFalse': unity_res, } def mock_input(resource): def inner_dec(func): def decorated(cls, *args, **kwargs): if cls._testMethodName in resource: storage_res = resource[cls._testMethodName] return func(cls, storage_res, *args, **kwargs) return decorated return inner_dec mock_client_input = mock_input(unity_res) def patch_client(func): def client_decorator(cls, *args, **kwargs): storage_res = {} if func.__name__ in STORAGE_RES_MAPPING[cls.__class__.__name__]: storage_res = ( STORAGE_RES_MAPPING[cls.__class__.__name__][func.__name__]) with utils.patch_system as patched_system: if 'unity' in storage_res: patched_system.return_value = storage_res['unity'] _client = client.UnityClient(host='fake_host', username='fake_user', password='fake_passwd') return func(cls, _client, *args, **kwargs) return client_decorator def mock_driver_input(resource): def inner_dec(func): def decorated(cls, *args, **kwargs): return func(cls, resource, *args, **kwargs) return decorated return inner_dec mock_manila_input = mock_driver_input(manila_res) def patch_connection_init(func): def connection_decorator(cls, *args, **kwargs): storage_res = {} if func.__name__ in STORAGE_RES_MAPPING[cls.__class__.__name__]: storage_res = ( STORAGE_RES_MAPPING[cls.__class__.__name__][func.__name__]) with utils.patch_system as patched_system: if 'unity' in storage_res: patched_system.return_value = storage_res['unity'] conn = connection.UnityStorageConnection(LOG) return func(cls, conn, *args, **kwargs) return connection_decorator def do_connection_connect(conn, res): conn.config = None conn.client = client.UnityClient(host='fake_host', username='fake_user', password='fake_passwd') conn.pool_conf = ['pool_1', 'pool_2'] conn.pool_set = set(['pool_1', 'pool_2']) conn.reserved_percentage = 0 conn.reserved_snapshot_percentage = 0 conn.reserved_share_extend_percentage = 0 conn.max_over_subscription_ratio = 20 conn.port_set = set(['spa_eth1', 'spa_eth2']) conn.nas_server_pool = StorageObjectMock(res['nas_server_pool']) conn.storage_processor = StorageObjectMock(res['sp_a']) conn.report_default_filter_function = False def patch_connection(func): def connection_decorator(cls, *args, **kwargs): storage_res = {} if func.__name__ in STORAGE_RES_MAPPING[cls.__class__.__name__]: storage_res = ( STORAGE_RES_MAPPING[cls.__class__.__name__][func.__name__]) with utils.patch_system as patched_system: conn = connection.UnityStorageConnection(LOG) if 'unity' in storage_res: patched_system.return_value = storage_res['unity'] do_connection_connect( conn, STORAGE_RES_MAPPING[cls.__class__.__name__]) return func(cls, conn, *args, **kwargs) return connection_decorator ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/dell_emc/plugins/unity/test_client.py0000664000175000017500000002344400000000000027534 0ustar00zuulzuul00000000000000# Copyright (c) 2016 EMC Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import ddt from oslo_utils import units from manila import exception from manila import test from manila.tests.share.drivers.dell_emc.plugins.unity import fake_exceptions from manila.tests.share.drivers.dell_emc.plugins.unity import res_mock @ddt.ddt class TestClient(test.TestCase): @res_mock.mock_client_input @res_mock.patch_client def test_create_cifs_share__existed_expt(self, client, mocked_input): resource = mocked_input['filesystem'] share = mocked_input['cifs_share'] new_share = client.create_cifs_share(resource, share.name) self.assertEqual(share.name, new_share.name) @res_mock.mock_client_input @res_mock.patch_client def test_create_nfs_share__existed_expt(self, client, mocked_input): resource = mocked_input['filesystem'] share = mocked_input['nfs_share'] new_share = client.create_nfs_share(resource, share.name) self.assertEqual(share.name, new_share.name) @res_mock.mock_client_input @res_mock.patch_client def test_create_nfs_filesystem_and_share(self, client, mocked_input): pool = mocked_input['pool'] nas_server = mocked_input['nas_server'] share = mocked_input['nfs_share'] client.create_nfs_filesystem_and_share( pool, nas_server, share.name, share.size) @res_mock.mock_client_input @res_mock.patch_client def test_get_share_with_invalid_proto(self, client, mocked_input): share = mocked_input['share'] self.assertRaises(exception.BadConfigurationException, client.get_share, share.name, 'fake_proto') @res_mock.mock_client_input @res_mock.patch_client def test_create_filesystem__existed_expt(self, client, mocked_input): pool = mocked_input['pool'] nas_server = mocked_input['nas_server'] filesystem = mocked_input['filesystem'] new_filesystem = client.create_filesystem(pool, nas_server, filesystem.name, filesystem.size, filesystem.proto) self.assertEqual(filesystem.name, new_filesystem.name) @res_mock.mock_client_input @res_mock.patch_client def test_delete_filesystem__nonexistent_expt(self, client, mocked_input): filesystem = mocked_input['filesystem'] client.delete_filesystem(filesystem) @res_mock.mock_client_input @res_mock.patch_client def test_create_nas_server__existed_expt(self, client, mocked_input): sp = mocked_input['sp'] pool = mocked_input['pool'] nas_server = mocked_input['nas_server'] new_nas_server = client.create_nas_server(nas_server.name, sp, pool) self.assertEqual(nas_server.name, new_nas_server.name) @res_mock.mock_client_input @res_mock.patch_client def test_delete_nas_server__nonexistent_expt(self, client, mocked_input): nas_server = mocked_input['nas_server'] client.delete_nas_server(nas_server.name) @res_mock.mock_client_input @res_mock.patch_client def test_create_dns_server__existed_expt(self, client, mocked_input): nas_server = mocked_input['nas_server'] client.create_dns_server(nas_server, 'fake_domain', 'fake_dns_ip') @res_mock.mock_client_input @res_mock.patch_client def test_create_interface__existed_expt(self, client, mocked_input): nas_server = mocked_input['nas_server'] self.assertRaises(exception.IPAddressInUse, client.create_interface, nas_server, 'fake_ip_addr', 'fake_mask', 'fake_gateway', port_id='fake_port_id') @res_mock.mock_client_input @res_mock.patch_client def test_enable_cifs_service__existed_expt(self, client, mocked_input): nas_server = mocked_input['nas_server'] client.enable_cifs_service( nas_server, 'domain_name', 'fake_user', 'fake_passwd') @res_mock.mock_client_input @res_mock.patch_client def test_enable_nfs_service__existed_expt(self, client, mocked_input): nas_server = mocked_input['nas_server'] client.enable_nfs_service(nas_server) @res_mock.mock_client_input @res_mock.patch_client def test_create_snapshot__existed_expt(self, client, mocked_input): nas_server = mocked_input['filesystem'] exp_snap = mocked_input['snapshot'] client.create_snapshot(nas_server, exp_snap.name) @res_mock.mock_client_input @res_mock.patch_client def test_create_snap_of_snap__existed_expt(self, client, mocked_input): snapshot = mocked_input['src_snapshot'] dest_snap = mocked_input['dest_snapshot'] new_snap = client.create_snap_of_snap(snapshot, dest_snap.name) self.assertEqual(dest_snap.name, new_snap.name) @res_mock.mock_client_input @res_mock.patch_client def test_delete_snapshot__nonexistent_expt(self, client, mocked_input): snapshot = mocked_input['snapshot'] client.delete_snapshot(snapshot) @res_mock.patch_client def test_cifs_deny_access__nonexistentuser_expt(self, client): try: client.cifs_deny_access('fake_share_name', 'fake_username') except fake_exceptions.UnityAclUserNotFoundError: self.fail("UnityAclUserNotFoundError raised unexpectedly!") @res_mock.patch_client def test_nfs_deny_access__nonexistent_expt(self, client): client.nfs_deny_access('fake_share_name', 'fake_ip_addr') @res_mock.patch_client def test_get_storage_processor(self, client): sp = client.get_storage_processor(sp_id='SPA') self.assertEqual('SPA', sp.name) @res_mock.mock_client_input @res_mock.patch_client def test_extend_filesystem(self, client, mocked_input): fs = mocked_input['fs'] size = client.extend_filesystem(fs, 5) self.assertEqual(5 * units.Gi, size) @res_mock.mock_client_input @res_mock.patch_client def test_shrink_filesystem(self, client, mocked_input): fs = mocked_input['fs'] size = client.shrink_filesystem('fake_share_id_1', fs, 4) self.assertEqual(4 * units.Gi, size) @res_mock.mock_client_input @res_mock.patch_client def test_shrink_filesystem_size_too_small(self, client, mocked_input): fs = mocked_input['fs'] self.assertRaises(exception.ShareShrinkingPossibleDataLoss, client.shrink_filesystem, 'fake_share_id_2', fs, 4) @res_mock.patch_client def test_get_file_ports(self, client): ports = client.get_file_ports() self.assertEqual(2, len(ports)) @res_mock.patch_client def test_get_tenant(self, client): tenant = client.get_tenant('test', 5) self.assertEqual('tenant_1', tenant.id) @res_mock.patch_client def test_get_tenant_preexist(self, client): tenant = client.get_tenant('test', 6) self.assertEqual('tenant_1', tenant.id) @res_mock.patch_client def test_get_tenant_name_inuse_but_vlan_not_used(self, client): self.assertRaises(fake_exceptions.UnityTenantNameInUseError, client.get_tenant, 'test', 7) @res_mock.patch_client def test_get_tenant_for_vlan_0(self, client): tenant = client.get_tenant('tenant', 0) self.assertIsNone(tenant) @res_mock.patch_client def test_get_tenant_for_vlan_already_has_interfaces(self, client): tenant = client.get_tenant('tenant', 3) self.assertEqual('tenant_1', tenant.id) @res_mock.mock_client_input @res_mock.patch_client def test_create_file_interface_ipv6(self, client, mocked_input): mock_nas_server = mock.Mock() mock_nas_server.create_file_interface = mock.Mock(return_value=None) mock_file_interface = mocked_input['file_interface'] mock_port_id = mock.Mock() client.create_interface(mock_nas_server, mock_file_interface.ip_addr, netmask=None, gateway=mock_file_interface.gateway, port_id=mock_port_id, vlan_id=mock_file_interface.vlan_id, prefix_length=mock_file_interface.prefix_length ) mock_nas_server.create_file_interface.assert_called_once_with( mock_port_id, mock_file_interface.ip_addr, netmask=None, v6_prefix_length=mock_file_interface.prefix_length, gateway=mock_file_interface.gateway, vlan_id=mock_file_interface.vlan_id) @res_mock.patch_client def test_get_snapshot(self, client): snapshot = client.get_snapshot('Snapshot_1') self.assertEqual('snapshot_1', snapshot.id) @res_mock.patch_client def test_restore_snapshot(self, client): snapshot = client.get_snapshot('Snapshot_1') rst = client.restore_snapshot(snapshot.name) self.assertIs(True, rst) snapshot.restore.assert_called_once_with(delete_backup=True) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/dell_emc/plugins/unity/test_connection.py0000664000175000017500000011110000000000000030400 0ustar00zuulzuul00000000000000# Copyright (c) 2016 EMC Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import ddt from manila import exception from manila.share.drivers.dell_emc.common.enas import utils as enas_utils from manila import test from manila.tests.share.drivers.dell_emc.plugins.unity import fake_exceptions from manila.tests.share.drivers.dell_emc.plugins.unity import res_mock from manila.tests.share.drivers.dell_emc.plugins.unity import utils from oslo_utils import units from unittest import mock @ddt.ddt class TestConnection(test.TestCase): client = None @classmethod def setUpClass(cls): cls.emc_share_driver = res_mock.FakeEMCShareDriver() @res_mock.patch_connection_init def test_connect(self, connection): connection.connect(res_mock.FakeEMCShareDriver(dhss=True), None) @res_mock.patch_connection_init def test_connect_with_ipv6(self, connection): connection.connect(res_mock.FakeEMCShareDriverIPv6( dhss=True), None) @res_mock.patch_connection def test_connect__invalid_pool_configuration(self, connection): f = connection.client.system.get_pool f.side_effect = fake_exceptions.UnityResourceNotFoundError() self.assertRaises(exception.BadConfigurationException, connection._config_pool, 'faked_pool_name') @res_mock.mock_manila_input @res_mock.patch_connection def test_create_nfs_share(self, connection, mocked_input): share = mocked_input['nfs_share'] share_server = mocked_input['share_server'] location = connection.create_share(None, share, share_server) exp_location = [ {'path': 'fake_ip_addr_1:/cb532599-8dc6-4c3e-bb21-74ea54be566c'}, {'path': 'fake_ip_addr_2:/cb532599-8dc6-4c3e-bb21-74ea54be566c'}, ] exp_location = sorted(exp_location, key=lambda x: sorted(x['path'])) location = sorted(location, key=lambda x: sorted(x['path'])) self.assertEqual(exp_location, location) @res_mock.mock_manila_input @res_mock.patch_connection def test_create_cifs_share(self, connection, mocked_input): share = mocked_input['cifs_share'] share_server = mocked_input['share_server'] location = connection.create_share(None, share, share_server) exp_location = [ {'path': r'\\fake_ip_addr_1\716100cc-e0b4-416b-ac27-d38dd019330d'}, {'path': r'\\fake_ip_addr_2\716100cc-e0b4-416b-ac27-d38dd019330d'}, ] exp_location = sorted(exp_location, key=lambda x: sorted(x['path'])) location = sorted(location, key=lambda x: sorted(x['path'])) self.assertEqual(exp_location, location) @res_mock.mock_manila_input @res_mock.patch_connection def test_create_share_with_invalid_proto(self, connection, mocked_input): share = mocked_input['invalid_share'] share_server = mocked_input['share_server'] self.assertRaises(exception.InvalidShare, connection.create_share, None, share, share_server) @res_mock.mock_manila_input @res_mock.patch_connection def test_create_share_without_share_server(self, connection, mocked_input): share = mocked_input['cifs_share'] self.assertRaises(exception.InvalidInput, connection.create_share, None, share, None) @res_mock.mock_manila_input @res_mock.patch_connection def test_create_share__no_server_name_in_backend_details(self, connection, mocked_input): share = mocked_input['cifs_share'] share_server = { 'backend_details': {'share_server_name': None}, 'id': 'test', 'identifier': '', } self.assertRaises(exception.InvalidInput, connection.create_share, None, share, share_server) @res_mock.mock_manila_input @res_mock.patch_connection def test_create_share_with_invalid_share_server(self, connection, mocked_input): share = mocked_input['cifs_share'] share_server = mocked_input['share_server'] self.assertRaises(exception.EMCUnityError, connection.create_share, None, share, share_server) @res_mock.mock_manila_input @res_mock.patch_connection def test_delete_share(self, connection, mocked_input): share = mocked_input['cifs_share'] share_server = mocked_input['share_server'] connection.delete_share(None, share, share_server) @res_mock.mock_manila_input @res_mock.patch_connection def test_delete_share__with_invalid_share(self, connection, mocked_input): share = mocked_input['cifs_share'] connection.delete_share(None, share, None) @res_mock.mock_manila_input @res_mock.patch_connection def test_delete_share__create_from_snap(self, connection, mocked_input): share = mocked_input['cifs_share'] share_server = mocked_input['share_server'] connection.delete_share(None, share, share_server) @res_mock.mock_manila_input @res_mock.patch_connection def test_delete_share__create_from_snap_but_not_isolated(self, connection, mocked_input): share = mocked_input['cifs_share'] share_server = mocked_input['share_server'] connection.delete_share(None, share, share_server) @res_mock.mock_manila_input @res_mock.patch_connection def test_delete_share__but_not_isolated(self, connection, mocked_input): share = mocked_input['cifs_share'] share_server = mocked_input['share_server'] connection.delete_share(None, share, share_server) @res_mock.mock_manila_input @res_mock.patch_connection def test_shrink_cifs_share(self, connection, mocked_input): share = mocked_input['shrink_cifs_share'] new_size = 4 * units.Gi connection.shrink_share(share, new_size) @res_mock.mock_manila_input @res_mock.patch_connection def test_shrink_nfs_share(self, connection, mocked_input): share = mocked_input['shrink_nfs_share'] new_size = 4 * units.Gi connection.shrink_share(share, new_size) @res_mock.mock_manila_input @res_mock.patch_connection def test_extend_cifs_share(self, connection, mocked_input): share = mocked_input['cifs_share'] share_server = mocked_input['share_server'] new_size = 50 * units.Gi connection.extend_share(share, new_size, share_server) @res_mock.mock_manila_input @res_mock.patch_connection def test_extend_nfs_share(self, connection, mocked_input): share = mocked_input['nfs_share'] share_server = mocked_input['share_server'] new_size = 50 * units.Gi connection.extend_share(share, new_size, share_server) @res_mock.mock_manila_input @res_mock.patch_connection def test_extend_share__create_from_snap(self, connection, mocked_input): share = mocked_input['cifs_share'] share_server = mocked_input['share_server'] new_size = 50 * units.Gi self.assertRaises(exception.ShareExtendingError, connection.extend_share, share, new_size, share_server) @res_mock.mock_manila_input @res_mock.patch_connection def test_shrink_share_create_from_snap(self, connection, mocked_input): share = mocked_input['shrink_cifs_share'] share_server = mocked_input['share_server'] new_size = 4 * units.Gi self.assertRaises(exception.ShareShrinkingError, connection.shrink_share, share, new_size, share_server) @res_mock.mock_manila_input @res_mock.patch_connection def test_create_snapshot_from_filesystem(self, connection, mocked_input): snapshot = mocked_input['snapshot'] share_server = mocked_input['share_server'] result = connection.create_snapshot(None, snapshot, share_server) self.assertEqual('ab411797-b1cf-4035-bf14-8771a7bf1805', result['provider_location']) @res_mock.mock_manila_input @res_mock.patch_connection def test_create_snapshot_from_snapshot(self, connection, mocked_input): snapshot = mocked_input['snapshot'] share_server = mocked_input['share_server'] connection.create_snapshot(None, snapshot, share_server) @res_mock.mock_manila_input @res_mock.patch_connection def test_delete_snapshot(self, connection, mocked_input): snapshot = mocked_input['snapshot'] share_server = mocked_input['share_server'] connection.delete_snapshot(None, snapshot, share_server) @res_mock.mock_manila_input @res_mock.patch_connection def test_ensure_share_exists(self, connection, mocked_input): share = mocked_input['cifs_share'] connection.ensure_share(None, share, None) @res_mock.mock_manila_input @res_mock.patch_connection def test_ensure_share_not_exists(self, connection, mocked_input): share = mocked_input['cifs_share'] self.assertRaises(exception.ShareNotFound, connection.ensure_share, None, share, None) @res_mock.patch_connection def test_update_share_stats(self, connection): stat_dict = copy.deepcopy(res_mock.STATS) connection.update_share_stats(stat_dict) self.assertEqual(5, len(stat_dict)) pool = stat_dict['pools'][0] self.assertEqual('pool_1', pool['pool_name']) self.assertEqual( enas_utils.bytes_to_gb(500000.0), pool['total_capacity_gb']) self.assertEqual(False, pool['qos']) self.assertEqual( enas_utils.bytes_to_gb(30000.0), pool['provisioned_capacity_gb']) self.assertEqual(20, pool['max_over_subscription_ratio']) self.assertEqual( enas_utils.bytes_to_gb(10000.0), pool['allocated_capacity_gb']) self.assertEqual(0, pool['reserved_percentage']) self.assertEqual(0, pool['reserved_snapshot_percentage']) self.assertEqual(0, pool['reserved_share_extend_percentage']) self.assertTrue(pool['thin_provisioning']) self.assertEqual( enas_utils.bytes_to_gb(490000.0), pool['free_capacity_gb']) @res_mock.patch_connection def test_update_share_stats__nonexistent_pools(self, connection): stat_dict = copy.deepcopy(res_mock.STATS) self.assertRaises(exception.EMCUnityError, connection.update_share_stats, stat_dict) @res_mock.mock_manila_input @res_mock.patch_connection def test_get_pool(self, connection, mocked_input): share = mocked_input['cifs_share'] connection.get_pool(share) @utils.patch_find_ports_by_mtu @res_mock.mock_manila_input @res_mock.patch_connection def test_setup_server(self, connection, mocked_input, find_ports): find_ports.return_value = {'SPA': {'spa_eth1'}} network_info = mocked_input['network_info__flat'] server_info = connection.setup_server(network_info) self.assertEqual( {'share_server_name': '78fd845f-8e7d-487f-bfde-051d83e78103'}, server_info) self.assertIsNone(connection.client.system.create_nas_server. call_args[1]['tenant']) @utils.patch_find_ports_by_mtu @res_mock.mock_manila_input @res_mock.patch_connection def test_setup_server__vlan_network(self, connection, mocked_input, find_ports): find_ports.return_value = {'SPA': {'spa_eth1'}} network_info = mocked_input['network_info__vlan'] connection.setup_server(network_info) self.assertEqual('tenant_1', connection.client.system.create_nas_server .call_args[1]['tenant'].id) @utils.patch_find_ports_by_mtu @res_mock.mock_manila_input @res_mock.patch_connection def test_setup_server__vxlan_network(self, connection, mocked_input, find_ports): find_ports.return_value = {'SPA': {'spa_eth1'}} network_info = mocked_input['network_info__vxlan'] self.assertRaises(exception.NetworkBadConfigurationException, connection.setup_server, network_info) @utils.patch_find_ports_by_mtu @res_mock.mock_manila_input @res_mock.patch_connection def test_setup_server__active_directory(self, connection, mocked_input, find_ports): find_ports.return_value = {'SPA': {'spa_eth1'}} network_info = mocked_input['network_info__active_directory'] connection.setup_server(network_info) @utils.patch_find_ports_by_mtu @res_mock.mock_manila_input @res_mock.patch_connection def test_setup_server__kerberos(self, connection, mocked_input, find_ports): find_ports.return_value = {'SPA': {'spa_eth1'}} network_info = mocked_input['network_info__kerberos'] connection.setup_server(network_info) @utils.patch_find_ports_by_mtu @res_mock.mock_manila_input @res_mock.patch_connection def test_setup_server__throw_exception(self, connection, mocked_input, find_ports): find_ports.return_value = {'SPA': {'spa_eth1'}} network_info = mocked_input['network_info__flat'] self.assertRaises(fake_exceptions.UnityException, connection.setup_server, network_info) @res_mock.mock_manila_input @res_mock.patch_connection def test_teardown_server(self, connection, mocked_input): server_detail = mocked_input['server_detail'] security_services = mocked_input['security_services'] connection.teardown_server(server_detail, security_services) @res_mock.mock_manila_input @res_mock.patch_connection def test_teardown_server__no_server_detail(self, connection, mocked_input): security_services = mocked_input['security_services'] connection.teardown_server(None, security_services) @res_mock.mock_manila_input @res_mock.patch_connection def test_teardown_server__no_share_server_name(self, connection, mocked_input): server_detail = {'share_server_name': None} security_services = mocked_input['security_services'] connection.teardown_server(server_detail, security_services) @ddt.data({'configured_pools': None, 'matched_pools': {'pool_1', 'pool_2', 'nas_server_pool'}}, {'configured_pools': ['*'], 'matched_pools': {'pool_1', 'pool_2', 'nas_server_pool'}}, {'configured_pools': ['pool_*'], 'matched_pools': {'pool_1', 'pool_2'}}, {'configured_pools': ['*pool'], 'matched_pools': {'nas_server_pool'}}, {'configured_pools': ['nas_server_pool'], 'matched_pools': {'nas_server_pool'}}, {'configured_pools': ['nas_*', 'pool_*'], 'matched_pools': {'pool_1', 'pool_2', 'nas_server_pool'}}) @res_mock.patch_connection @ddt.unpack def test__get_managed_pools(self, connection, mocked_input): configured_pools = mocked_input['configured_pools'] matched_pool = mocked_input['matched_pools'] pools = connection._get_managed_pools(configured_pools) self.assertEqual(matched_pool, pools) @res_mock.patch_connection def test__get_managed_pools__invalid_pool_configuration(self, connection): configured_pools = 'fake_pool' self.assertRaises(exception.BadConfigurationException, connection._get_managed_pools, configured_pools) @res_mock.patch_connection def test_validate_port_configuration(self, connection): sp_ports_map = connection.validate_port_configuration(['sp*']) self.assertEqual({'spa_eth1', 'spa_eth2', 'spa_la_4'}, sp_ports_map['SPA']) self.assertEqual({'spb_eth1'}, sp_ports_map['SPB']) @res_mock.patch_connection def test_validate_port_configuration_exception(self, connection): self.assertRaises(exception.BadConfigurationException, connection.validate_port_configuration, ['xxxx*']) @res_mock.patch_connection def test__get_pool_name_from_host__no_pool_name(self, connection): host = 'openstack@Unity' self.assertRaises(exception.InvalidHost, connection._get_pool_name_from_host, host) @res_mock.mock_manila_input @res_mock.patch_connection def test_create_cifs_share_from_snapshot(self, connection, mocked_input): share = mocked_input['cifs_share'] snapshot = mocked_input['snapshot'] share_server = mocked_input['share_server'] connection.create_share_from_snapshot(None, share, snapshot, share_server) @res_mock.mock_manila_input @res_mock.patch_connection def test_create_nfs_share_from_snapshot(self, connection, mocked_input): share = mocked_input['nfs_share'] snapshot = mocked_input['snapshot'] share_server = mocked_input['share_server'] connection.create_share_from_snapshot(None, share, snapshot, share_server) @res_mock.mock_manila_input @res_mock.patch_connection def test_create_share_from_snapshot_no_server_name(self, connection, mocked_input): share = mocked_input['nfs_share'] snapshot = mocked_input['snapshot'] share_server = mocked_input['share_server__no_share_server_name'] self.assertRaises(exception.EMCUnityError, connection.create_share_from_snapshot, None, share, snapshot, share_server) @res_mock.mock_manila_input @res_mock.patch_connection def test_clear_share_access_cifs(self, connection, mocked_input): share = mocked_input['cifs_share'] self.assertRaises(fake_exceptions.UnityException, connection.clear_access, share) @res_mock.mock_manila_input @res_mock.patch_connection def test_clear_share_access_nfs(self, connection, mocked_input): share = mocked_input['nfs_share'] self.assertRaises(fake_exceptions.UnityException, connection.clear_access, share) @res_mock.mock_manila_input @res_mock.patch_connection def test_allow_rw_cifs_share_access(self, connection, mocked_input): share = mocked_input['cifs_share'] rw_access = mocked_input['cifs_rw_access'] share_server = mocked_input['share_server'] connection.allow_access(None, share, rw_access, share_server) @res_mock.mock_manila_input @res_mock.patch_connection def test_update_access_allow_rw(self, connection, mocked_input): share = mocked_input['cifs_share'] rw_access = mocked_input['cifs_rw_access'] share_server = mocked_input['share_server'] connection.update_access(None, share, None, [rw_access], None, share_server) @res_mock.mock_manila_input @res_mock.patch_connection def test_update_access_recovery(self, connection, mocked_input): share = mocked_input['cifs_share'] rw_access = mocked_input['cifs_rw_access'] share_server = mocked_input['share_server'] connection.update_access(None, share, [rw_access], None, None, share_server) @res_mock.mock_manila_input @res_mock.patch_connection def test_allow_ro_cifs_share_access(self, connection, mocked_input): share = mocked_input['cifs_share'] rw_access = mocked_input['cifs_ro_access'] share_server = mocked_input['share_server'] connection.allow_access(None, share, rw_access, share_server) @res_mock.mock_manila_input @res_mock.patch_connection def test_allow_rw_nfs_share_access(self, connection, mocked_input): share = mocked_input['nfs_share'] rw_access = mocked_input['nfs_rw_access'] share_server = mocked_input['share_server'] connection.allow_access(None, share, rw_access, share_server) @res_mock.mock_manila_input @res_mock.patch_connection def test_allow_rw_nfs_share_access_cidr(self, connection, mocked_input): share = mocked_input['nfs_share'] rw_access = mocked_input['nfs_rw_access_cidr'] share_server = mocked_input['share_server'] connection.allow_access(None, share, rw_access, share_server) @res_mock.mock_manila_input @res_mock.patch_connection def test_allow_ro_nfs_share_access(self, connection, mocked_input): share = mocked_input['nfs_share'] ro_access = mocked_input['nfs_ro_access'] share_server = mocked_input['share_server'] connection.allow_access(None, share, ro_access, share_server) @res_mock.mock_manila_input @res_mock.patch_connection def test_deny_cifs_share_access(self, connection, mocked_input): share = mocked_input['cifs_share'] rw_access = mocked_input['cifs_rw_access'] share_server = mocked_input['share_server'] connection.deny_access(None, share, rw_access, share_server) @res_mock.mock_manila_input @res_mock.patch_connection def test_deny_nfs_share_access(self, connection, mocked_input): share = mocked_input['nfs_share'] rw_access = mocked_input['nfs_rw_access'] share_server = mocked_input['share_server'] connection.deny_access(None, share, rw_access, share_server) @res_mock.mock_manila_input @res_mock.patch_connection def test_update_access_deny_nfs(self, connection, mocked_input): share = mocked_input['nfs_share'] rw_access = mocked_input['nfs_rw_access'] connection.update_access(None, share, None, None, [rw_access], None) @res_mock.mock_manila_input @res_mock.patch_connection def test__validate_cifs_share_access_type(self, connection, mocked_input): share = mocked_input['cifs_share'] rw_access = mocked_input['invalid_access'] self.assertRaises(exception.InvalidShareAccess, connection._validate_share_access_type, share, rw_access) @res_mock.mock_manila_input @res_mock.patch_connection def test__validate_nfs_share_access_type(self, connection, mocked_input): share = mocked_input['nfs_share'] rw_access = mocked_input['invalid_access'] self.assertRaises(exception.InvalidShareAccess, connection._validate_share_access_type, share, rw_access) @res_mock.patch_connection def test_get_network_allocations_number(self, connection): self.assertEqual(1, connection.get_network_allocations_number()) @res_mock.patch_connection def test_get_proto_enum(self, connection): self.assertIn('FSSupportedProtocolEnum.CIFS', str(connection._get_proto_enum('CIFS'))) self.assertIn('FSSupportedProtocolEnum.NFS', str(connection._get_proto_enum('nfs'))) @res_mock.mock_manila_input @res_mock.patch_connection def test_allow_access_error_access_level(self, connection, mocked_input): share = mocked_input['nfs_share'] rw_access = mocked_input['invalid_access'] self.assertRaises(exception.InvalidShareAccessLevel, connection.allow_access, None, share, rw_access) @res_mock.patch_connection def test__create_network_interface_ipv6(self, connection): connection.client.create_interface = mock.Mock(return_value=None) nas_server = mock.Mock() network = {'ip_address': '2001:db8:0:1:f816:3eff:fe76:35c4', 'cidr': '2001:db8:0:1:f816:3eff:fe76:35c4/64', 'gateway': '2001:db8:0:1::1', 'segmentation_id': '201'} port_id = mock.Mock() connection._create_network_interface(nas_server, network, port_id) expected = {'ip_addr': '2001:db8:0:1:f816:3eff:fe76:35c4', 'netmask': None, 'gateway': '2001:db8:0:1::1', 'port_id': port_id, 'vlan_id': '201', 'prefix_length': '64'} connection.client.create_interface.assert_called_once_with(nas_server, **expected) @res_mock.patch_connection def test__create_network_interface_ipv4(self, connection): connection.client.create_interface = mock.Mock(return_value=None) nas_server = mock.Mock() network = {'ip_address': '192.168.1.10', 'cidr': '192.168.1.10/24', 'gateway': '192.168.1.1', 'segmentation_id': '201'} port_id = mock.Mock() connection._create_network_interface(nas_server, network, port_id) expected = {'ip_addr': '192.168.1.10', 'netmask': '255.255.255.0', 'gateway': '192.168.1.1', 'port_id': port_id, 'vlan_id': '201'} connection.client.create_interface.assert_called_once_with(nas_server, **expected) @res_mock.mock_manila_input @res_mock.patch_connection def test_revert_to_snapshot(self, connection, mocked_input): context = mock.Mock() snapshot = mocked_input['snapshot'] share_access_rules = [mocked_input['nfs_rw_access'], ] snapshot_access_rules = [mocked_input['nfs_rw_access'], ] connection.revert_to_snapshot(context, snapshot, share_access_rules, snapshot_access_rules) @res_mock.patch_connection_init def test_dhss_false_connect_without_nas_server(self, connection): self.assertRaises(exception.BadConfigurationException, connection.connect, res_mock.FakeEMCShareDriver(dhss=False), None) @res_mock.mock_manila_input @res_mock.patch_connection def test_dhss_false_create_nfs_share(self, connection, mocked_input): connection.driver_handles_share_servers = False connection.unity_share_server = 'test-dhss-false-427f-b4de-0ad83el5j8' share = mocked_input['dhss_false_nfs_share'] share_server = mocked_input['share_server'] location = connection.create_share(None, share, share_server) exp_location = [ {'path': 'fake_ip_addr_1:/cb532599-8dc6-4c3e-bb21-74ea54be566c'}, {'path': 'fake_ip_addr_2:/cb532599-8dc6-4c3e-bb21-74ea54be566c'}, ] exp_location = sorted(exp_location, key=lambda x: sorted(x['path'])) location = sorted(location, key=lambda x: sorted(x['path'])) self.assertEqual(exp_location, location) @res_mock.mock_manila_input @res_mock.patch_connection def test_dhss_false_create_cifs_share(self, connection, mocked_input): connection.driver_handles_share_servers = False connection.unity_share_server = 'test-dhss-false-427f-b4de-0ad83el5j8' share = mocked_input['dhss_false_cifs_share'] share_server = mocked_input['share_server'] location = connection.create_share(None, share, share_server) exp_location = [ {'path': r'\\fake_ip_addr_1\716100cc-e0b4-416b-ac27-d38dd019330d'}, {'path': r'\\fake_ip_addr_2\716100cc-e0b4-416b-ac27-d38dd019330d'}, ] exp_location = sorted(exp_location, key=lambda x: sorted(x['path'])) location = sorted(location, key=lambda x: sorted(x['path'])) self.assertEqual(exp_location, location) @res_mock.mock_manila_input @res_mock.patch_connection def test_get_share_server_id(self, connection, mocked_input): share_server = mocked_input['share_server'] result = connection._get_server_name(share_server) expected = 'c2e48947-98ed-4eae-999b-fa0b83731dfd' self.assertEqual(expected, result) @res_mock.mock_manila_input @res_mock.patch_connection def test_manage_snapshot(self, connection, mocked_input): snapshot = mocked_input['snapshot'] driver_options = {'size': 8} result = connection.manage_existing_snapshot(snapshot, driver_options, None) expected = {'provider_location': '23047-ef2344-4563cvw-r4323cwed', 'size': 8} self.assertEqual(expected, result) @res_mock.mock_manila_input @res_mock.patch_connection def test_manage_snapshot_wrong_size_type(self, connection, mocked_input): snapshot = mocked_input['snapshot'] driver_options = {'size': 'str_size'} self.assertRaises(exception.ManageInvalidShareSnapshot, connection.manage_existing_snapshot, snapshot, driver_options, None) @res_mock.mock_manila_input @res_mock.patch_connection def test_manage_snapshot_with_server(self, connection, mocked_input): share_server = mocked_input['share_server'] snapshot = mocked_input['snapshot'] driver_options = {} result = connection.manage_existing_snapshot_with_server( snapshot, driver_options, share_server) expected = {'provider_location': '23047-ef2344-4563cvw-r4323cwed', 'size': 1} self.assertEqual(expected, result) @res_mock.mock_manila_input @res_mock.patch_connection def test_get_share_server_network_info(self, connection, mocked_input): share_server = mocked_input['share_server'] identifier = 'test_manage_nas_server' result = connection.get_share_server_network_info(None, share_server, identifier, None) expected = ['fake_ip_addr_1', 'fake_ip_addr_2'] self.assertEqual(expected, result) @res_mock.mock_manila_input @res_mock.patch_connection def test_manage_server(self, connection, mocked_input): share_server = mocked_input['share_server'] identifier = 'test_manage_nas_server' result = connection.manage_server(None, share_server, identifier, None) expected = (identifier, None) self.assertEqual(expected, result) @res_mock.mock_manila_input @res_mock.patch_connection def test_manage_nfs_share(self, connection, mocked_input): share = mocked_input['managed_nfs_share'] driver_options = {'size': 3} result = connection.manage_existing(share, driver_options) path = '172.168.201.201:/ad1caddf-097e-462c-8ac6-5592ed6fe22f' expected = {'export_locations': {'path': path}, 'size': 3} self.assertEqual(expected, result) @res_mock.mock_manila_input @res_mock.patch_connection def test_manage_nfs_share_with_server(self, connection, mocked_input): share = mocked_input['managed_nfs_share'] share_server = mocked_input['share_server'] driver_options = {'size': 8} result = connection.manage_existing_with_server(share, driver_options, share_server) path = '172.168.201.201:/ad1caddf-097e-462c-8ac6-5592ed6fe22f' expected = {'export_locations': {'path': path}, 'size': 8} self.assertEqual(expected, result) @res_mock.mock_manila_input @res_mock.patch_connection def test_manage_cifs_share(self, connection, mocked_input): share = mocked_input['managed_cifs_share'] driver_options = {'size': 3} result = connection.manage_existing(share, driver_options) path = '\\\\10.0.0.1\\bd23121f-hg4e-432c-12cd2c5-bb93dfghe212' expected = {'export_locations': {'path': path}, 'size': 3} self.assertEqual(expected, result) @res_mock.mock_manila_input @res_mock.patch_connection def test_manage_cifs_share_with_server(self, connection, mocked_input): connection.client.create_interface = mock.Mock(return_value=None) share = mocked_input['managed_cifs_share'] share_server = mocked_input['share_server'] driver_options = {'size': 3} result = connection.manage_existing_with_server(share, driver_options, share_server) path = '\\\\10.0.0.1\\bd23121f-hg4e-432c-12cd2c5-bb93dfghe212' expected = {'export_locations': {'path': path}, 'size': 3} self.assertEqual(expected, result) @res_mock.mock_manila_input @res_mock.patch_connection def test_manage_with_wrong_size_data_type(self, connection, mocked_input): connection.client.create_interface = mock.Mock(return_value=None) share = mocked_input['managed_nfs_share'] share_server = mocked_input['share_server'] driver_options = {'size': 'str_size'} self.assertRaises(exception.ManageInvalidShare, connection.manage_existing_with_server, share, driver_options, share_server) @res_mock.mock_manila_input @res_mock.patch_connection def test_manage_without_size(self, connection, mocked_input): connection.client.create_interface = mock.Mock(return_value=None) share = mocked_input['managed_nfs_share'] share_server = mocked_input['share_server'] driver_options = {'size': 0} result = connection.manage_existing_with_server(share, driver_options, share_server) path = '172.168.201.201:/ad1caddf-097e-462c-8ac6-5592ed6fe22f' expected = {'export_locations': {'path': path}, 'size': 1} self.assertEqual(expected, result) @res_mock.mock_manila_input @res_mock.patch_connection def test_manage_without_export_locations(self, connection, mocked_input): connection.client.create_interface = mock.Mock(return_value=None) share = mocked_input['nfs_share'] share_server = mocked_input['share_server'] driver_options = {'size': 3} self.assertRaises(exception.ManageInvalidShare, connection.manage_existing_with_server, share, driver_options, share_server) @res_mock.mock_manila_input @res_mock.patch_connection def test_get_default_filter_function_disable_report(self, connection, mocked_input): expected = None actual = connection.get_default_filter_function() self.assertEqual(expected, actual) @res_mock.mock_manila_input @res_mock.patch_connection def test_get_default_filter_function_enable_report(self, connection, mocked_input): expected = "share.size >= 3" connection.report_default_filter_function = True actual = connection.get_default_filter_function() self.assertEqual(expected, actual) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/dell_emc/plugins/unity/test_utils.py0000664000175000017500000001535200000000000027415 0ustar00zuulzuul00000000000000# Copyright (c) 2016 EMC Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ddt from oslo_utils import units from manila.share.drivers.dell_emc.plugins.unity import utils from manila import test class MockSP(object): def __init__(self, sp_id): self.sp_id = sp_id def get_id(self): return self.sp_id SPA = MockSP('spa') SPB = MockSP('spb') class MockPort(object): def __init__(self, sp, port_id, mtu): self._sp = sp self.port_id = port_id self.mtu = mtu def get_id(self): return self.port_id @property def parent_storage_processor(self): return self._sp SPA_ETH0 = MockPort(SPA, 'spa_eth0', 1500) SPA_ETH1 = MockPort(SPA, 'spa_eth1', 9000) SPB_ETH0 = MockPort(SPB, 'spb_eth0', 1500) SPB_ETH1 = MockPort(SPB, 'spb_eth1', 9000) SPA_LA1 = MockPort(SPA, 'spa_la_1', 1500) SPB_LA1 = MockPort(SPB, 'spb_la_1', 1500) class MockPortV5(object): def __init__(self, sp, port_id, mtu): self._sp = sp self.port_id = port_id self.mtu = mtu def get_id(self): return self.port_id @property def storage_processor(self): return self._sp SPA_LA2 = MockPort(SPA, 'spa_la_2', 1500) SPB_LA2 = MockPort(SPB, 'spb_la_2', 1500) @ddt.ddt class TestUtils(test.TestCase): @ddt.data({'matcher': None, 'matched': {'pool_1', 'pool_2', 'nas_server_pool'}, 'not_matched': set()}, {'matcher': ['*'], 'matched': {'pool_1', 'pool_2', 'nas_server_pool'}, 'not_matched': set()}, {'matcher': ['pool_*'], 'matched': {'pool_1', 'pool_2'}, 'not_matched': {'nas_server_pool'}}, {'matcher': ['*pool'], 'matched': {'nas_server_pool'}, 'not_matched': {'pool_1', 'pool_2'}}, {'matcher': ['nas_server_pool'], 'matched': {'nas_server_pool'}, 'not_matched': {'pool_1', 'pool_2'}}, {'matcher': ['nas_*', 'pool_*'], 'matched': {'pool_1', 'pool_2', 'nas_server_pool'}, 'not_matched': set()}) def test_do_match(self, data): full = ['pool_1 ', ' pool_2', ' nas_server_pool '] matcher = data['matcher'] expected_matched = data['matched'] expected_not_matched = data['not_matched'] matched, not_matched = utils.do_match(full, matcher) self.assertEqual(expected_matched, matched) self.assertEqual(expected_not_matched, not_matched) @ddt.data({'ports': [SPA_ETH0, SPB_ETH0], 'ids_conf': None, 'port_map': {'spa': {'spa_eth0'}, 'spb': {'spb_eth0'}}, 'unmanaged': set()}, {'ports': [SPA_ETH0, SPB_ETH0], 'ids_conf': [' '], 'port_map': {'spa': {'spa_eth0'}, 'spb': {'spb_eth0'}}, 'unmanaged': set()}, {'ports': [SPA_ETH0, SPB_ETH0, SPA_ETH1], 'ids_conf': ['spa*'], 'port_map': {'spa': {'spa_eth0', 'spa_eth1'}}, 'unmanaged': {'spb_eth0'}}, {'ports': [SPA_LA2, SPB_LA2], 'ids_conf': None, 'port_map': {'spa': {'spa_la_2'}, 'spb': {'spb_la_2'}}, 'unmanaged': set()}, ) @ddt.unpack def test_match_ports(self, ports, ids_conf, port_map, unmanaged): sp_ports_map, unmanaged_port_ids = utils.match_ports(ports, ids_conf) self.assertEqual(port_map, sp_ports_map) self.assertEqual(unmanaged, unmanaged_port_ids) def test_find_ports_by_mtu(self): all_ports = [SPA_ETH0, SPB_ETH0, SPA_ETH1, SPB_ETH1, SPA_LA1, SPB_LA1] port_ids_conf = '*' port_map = utils.find_ports_by_mtu(all_ports, port_ids_conf, 1500) self.assertEqual({'spa': {'spa_eth0', 'spa_la_1'}, 'spb': {'spb_eth0', 'spb_la_1'}}, port_map) def test_gb_to_byte(self): self.assertEqual(3 * units.Gi, utils.gib_to_byte(3)) def test_get_snapshot_id(self): snapshot = {'provider_location': '23047-ef2344-4563cvw-r4323cwed', 'id': 'test_id'} result = utils.get_snapshot_id(snapshot) expected = '23047-ef2344-4563cvw-r4323cwed' self.assertEqual(expected, result) def test_get_snapshot_id_without_pl(self): snapshot = {'provider_location': '', 'id': 'test_id'} result = utils.get_snapshot_id(snapshot) expected = 'test_id' self.assertEqual(expected, result) def test_get_nfs_share_id(self): nfs_share = {'export_locations': [{'path': '10.10.1.12:/addf-97e-46c-8ac6-55922f', 'share_instance_id': 'e24-457e-47-12c6-gf345'}], 'share_proto': 'NFS', 'id': 'test_nfs_id'} result = utils.get_share_backend_id(nfs_share) expected = 'addf-97e-46c-8ac6-55922f' self.assertEqual(expected, result) def test_get_nfs_share_id_without_path(self): nfs_share = {'export_locations': [{'path': '', 'share_instance_id': 'ev24-7e-4-12c6-g45245'}], 'share_proto': 'NFS', 'id': 'test_nfs_id'} result = utils.get_share_backend_id(nfs_share) expected = 'test_nfs_id' self.assertEqual(expected, result) def test_get_cifs_share_id(self): cifs_share = {'export_locations': [{'path': '\\\\17.66.5.3\\bdf-h4e-42c-122c5-b212', 'share_instance_id': 'ev4-47e-48-126-gfbh452'}], 'share_proto': 'CIFS', 'id': 'test_cifs_id'} result = utils.get_share_backend_id(cifs_share) expected = 'bdf-h4e-42c-122c5-b212' self.assertEqual(expected, result) def test_get_cifs_share_id_without_path(self): cifs_share = {'export_locations': [{'path': '', 'share_instance_id': 'ef4-47e-48-12c6-gf452'}], 'share_proto': 'CIFS', 'id': 'test_cifs_id'} result = utils.get_share_backend_id(cifs_share) expected = 'test_cifs_id' self.assertEqual(expected, result) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/dell_emc/plugins/unity/utils.py0000664000175000017500000000226300000000000026353 0ustar00zuulzuul00000000000000# Copyright (c) 2016 EMC Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from os import path from unittest import mock import yaml from oslo_log import log LOG = log.getLogger(__name__) patch_system = mock.patch('storops.UnitySystem') def load_yaml(file_name): yaml_file = '{}/{}'.format(path.dirname(path.abspath(__file__)), file_name) with open(yaml_file) as f: res = yaml.safe_load(f) LOG.debug('Loaded yaml mock objects from %s.', yaml_file) return res patch_find_ports_by_mtu = mock.patch('manila.share.drivers.dell_emc.plugins.' 'unity.utils.find_ports_by_mtu') ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315602.0136704 manila-21.0.0/manila/tests/share/drivers/dell_emc/plugins/vnx/0000775000175000017500000000000000000000000024301 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/dell_emc/plugins/vnx/__init__.py0000664000175000017500000000000000000000000026400 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/dell_emc/plugins/vnx/test_connection.py0000664000175000017500000027747600000000000030100 0ustar00zuulzuul00000000000000# Copyright (c) 2015 EMC Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from unittest import mock import ddt from oslo_log import log from manila import exception from manila.share.drivers.dell_emc.common.enas import connector from manila.share.drivers.dell_emc.common.enas import utils as enas_utils from manila.share.drivers.dell_emc.plugins.vnx import connection from manila.share.drivers.dell_emc.plugins.vnx import object_manager from manila import test from manila.tests import fake_share from manila.tests.share.drivers.dell_emc.common.enas import fakes from manila.tests.share.drivers.dell_emc.common.enas import utils LOG = log.getLogger(__name__) @ddt.ddt class StorageConnectionTestCase(test.TestCase): @mock.patch.object(connector.XMLAPIConnector, "_do_setup", mock.Mock()) def setUp(self): super(StorageConnectionTestCase, self).setUp() self.emc_share_driver = fakes.FakeEMCShareDriver() self.connection = connection.VNXStorageConnection(LOG) self.pool = fakes.PoolTestData() self.vdm = fakes.VDMTestData() self.mover = fakes.MoverTestData() self.fs = fakes.FileSystemTestData() self.mount = fakes.MountPointTestData() self.snap = fakes.SnapshotTestData() self.cifs_share = fakes.CIFSShareTestData() self.nfs_share = fakes.NFSShareTestData() self.cifs_server = fakes.CIFSServerTestData() self.dns = fakes.DNSDomainTestData() with mock.patch.object(connector.XMLAPIConnector, 'request', mock.Mock()): self.connection.connect(self.emc_share_driver, None) def test_check_for_setup_error(self): hook = utils.RequestSideEffect() hook.append(self.mover.resp_get_ref_succeed()) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock with mock.patch.object(connection.VNXStorageConnection, '_get_managed_storage_pools', mock.Mock()): self.connection.check_for_setup_error() expected_calls = [mock.call(self.mover.req_get_ref())] xml_req_mock.assert_has_calls(expected_calls) def test_check_for_setup_error_with_invalid_mover_name(self): hook = utils.RequestSideEffect() hook.append(self.mover.resp_get_error()) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock self.assertRaises(exception.InvalidParameterValue, self.connection.check_for_setup_error) expected_calls = [mock.call(self.mover.req_get_ref())] xml_req_mock.assert_has_calls(expected_calls) @ddt.data({'pool_conf': None, 'real_pools': ['fake_pool', 'nas_pool'], 'matched_pool': set()}, {'pool_conf': [], 'real_pools': ['fake_pool', 'nas_pool'], 'matched_pool': set()}, {'pool_conf': ['*'], 'real_pools': ['fake_pool', 'nas_pool'], 'matched_pool': {'fake_pool', 'nas_pool'}}, {'pool_conf': ['fake_*'], 'real_pools': ['fake_pool', 'nas_pool', 'Perf_Pool'], 'matched_pool': {'fake_pool'}}, {'pool_conf': ['*pool'], 'real_pools': ['fake_pool', 'NAS_Pool', 'Perf_POOL'], 'matched_pool': {'fake_pool'}}, {'pool_conf': ['nas_pool'], 'real_pools': ['fake_pool', 'nas_pool', 'perf_pool'], 'matched_pool': {'nas_pool'}}) @ddt.unpack def test__get_managed_storage_pools(self, pool_conf, real_pools, matched_pool): with mock.patch.object(object_manager.StoragePool, 'get_all', mock.Mock(return_value=('ok', real_pools))): pool = self.connection._get_managed_storage_pools(pool_conf) self.assertEqual(matched_pool, pool) def test__get_managed_storage_pools_failed_to_get_pool_info(self): hook = utils.RequestSideEffect() hook.append(self.pool.resp_get_error()) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock pool_conf = fakes.FakeData.pool_name self.assertRaises(exception.EMCVnxXMLAPIError, self.connection._get_managed_storage_pools, pool_conf) expected_calls = [mock.call(self.pool.req_get())] xml_req_mock.assert_has_calls(expected_calls) @ddt.data( {'pool_conf': ['fake_*'], 'real_pools': ['nas_pool', 'Perf_Pool']}, {'pool_conf': ['*pool'], 'real_pools': ['NAS_Pool', 'Perf_POOL']}, {'pool_conf': ['nas_pool'], 'real_pools': ['fake_pool', 'perf_pool']}, ) @ddt.unpack def test__get_managed_storage_pools_without_matched_pool(self, pool_conf, real_pools): with mock.patch.object(object_manager.StoragePool, 'get_all', mock.Mock(return_value=('ok', real_pools))): self.assertRaises(exception.InvalidParameterValue, self.connection._get_managed_storage_pools, pool_conf) def test_create_cifs_share(self): share_server = fakes.SHARE_SERVER share = fakes.CIFS_SHARE hook = utils.RequestSideEffect() hook.append(self.vdm.resp_get_succeed()) hook.append(self.cifs_server.resp_get_succeed( mover_id=self.vdm.vdm_id, is_vdm=True, join_domain=True)) hook.append(self.pool.resp_get_succeed()) hook.append(self.fs.resp_task_succeed()) hook.append(self.cifs_share.resp_task_succeed()) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock ssh_hook = utils.SSHSideEffect() ssh_hook.append() ssh_cmd_mock = mock.Mock(side_effect=ssh_hook) self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock location = self.connection.create_share(None, share, share_server) expected_calls = [ mock.call(self.vdm.req_get()), mock.call(self.cifs_server.req_get(self.vdm.vdm_id)), mock.call(self.pool.req_get()), mock.call(self.fs.req_create_on_vdm()), mock.call(self.cifs_share.req_create(self.vdm.vdm_id)), ] xml_req_mock.assert_has_calls(expected_calls) ssh_calls = [mock.call(self.cifs_share.cmd_disable_access(), True)] ssh_cmd_mock.assert_has_calls(ssh_calls) self.assertEqual(location, [{'path': r'\\%s\%s' % ( fakes.FakeData.network_allocations_ip1, share['name'])}], 'CIFS export path is incorrect') def test_create_cifs_share_with_ipv6(self): share_server = fakes.SHARE_SERVER_IPV6 share = fakes.CIFS_SHARE hook = utils.RequestSideEffect() hook.append(self.vdm.resp_get_succeed( interface1=fakes.FakeData.interface_name3, interface2=fakes.FakeData.interface_name4)) hook.append(self.cifs_server.resp_get_succeed( mover_id=self.vdm.vdm_id, is_vdm=True, join_domain=True, ip_addr=fakes.FakeData.network_allocations_ip3)) hook.append(self.pool.resp_get_succeed()) hook.append(self.fs.resp_task_succeed()) hook.append(self.cifs_share.resp_task_succeed()) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock ssh_hook = utils.SSHSideEffect() ssh_hook.append() ssh_cmd_mock = mock.Mock(side_effect=ssh_hook) self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock location = self.connection.create_share(None, share, share_server) expected_calls = [ mock.call(self.vdm.req_get()), mock.call(self.cifs_server.req_get(self.vdm.vdm_id)), mock.call(self.pool.req_get()), mock.call(self.fs.req_create_on_vdm()), mock.call(self.cifs_share.req_create(self.vdm.vdm_id)), ] xml_req_mock.assert_has_calls(expected_calls) ssh_calls = [mock.call(self.cifs_share.cmd_disable_access(), True)] ssh_cmd_mock.assert_has_calls(ssh_calls) self.assertEqual( location, [{'path': r'\\%s.ipv6-literal.net\%s' % ( fakes.FakeData.network_allocations_ip3.replace(':', '-'), share['name'])}], 'CIFS export path is incorrect' ) def test_create_nfs_share(self): share_server = fakes.SHARE_SERVER share = fakes.NFS_SHARE hook = utils.RequestSideEffect() hook.append(self.pool.resp_get_succeed()) hook.append(self.vdm.resp_get_succeed()) hook.append(self.fs.resp_task_succeed()) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock ssh_hook = utils.SSHSideEffect() ssh_hook.append(self.nfs_share.output_create()) ssh_cmd_mock = mock.Mock(side_effect=ssh_hook) self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock location = self.connection.create_share(None, share, share_server) expected_calls = [ mock.call(self.pool.req_get()), mock.call(self.vdm.req_get()), mock.call(self.fs.req_create_on_vdm()), ] xml_req_mock.assert_has_calls(expected_calls) ssh_calls = [mock.call(self.nfs_share.cmd_create(), True)] ssh_cmd_mock.assert_has_calls(ssh_calls) self.assertEqual(location, [{'path': '192.168.1.2:/%s' % share['name']}], 'NFS export path is incorrect') def test_create_nfs_share_with_ipv6(self): share_server = fakes.SHARE_SERVER_IPV6 share = fakes.NFS_SHARE hook = utils.RequestSideEffect() hook.append(self.pool.resp_get_succeed()) hook.append(self.vdm.resp_get_succeed( interface1=fakes.FakeData.interface_name3, interface2=fakes.FakeData.interface_name4)) hook.append(self.fs.resp_task_succeed()) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock ssh_hook = utils.SSHSideEffect() ssh_hook.append(self.nfs_share.output_create()) ssh_cmd_mock = mock.Mock(side_effect=ssh_hook) self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock location = self.connection.create_share(None, share, share_server) expected_calls = [ mock.call(self.pool.req_get()), mock.call(self.vdm.req_get()), mock.call(self.fs.req_create_on_vdm()), ] xml_req_mock.assert_has_calls(expected_calls) ssh_calls = [mock.call(self.nfs_share.cmd_create(), True)] ssh_cmd_mock.assert_has_calls(ssh_calls) self.assertEqual(location, [{'path': '[%s]:/%s' % ( fakes.FakeData.network_allocations_ip4, share['name'])}], 'NFS export path is incorrect') def test_create_cifs_share_without_share_server(self): share = fakes.CIFS_SHARE self.assertRaises(exception.InvalidInput, self.connection.create_share, None, share, None) def test_create_cifs_share_without_share_server_name(self): share = fakes.CIFS_SHARE share_server = copy.deepcopy(fakes.SHARE_SERVER) share_server['backend_details']['share_server_name'] = None self.assertRaises(exception.EMCVnxXMLAPIError, self.connection.create_share, None, share, share_server) def test_create_cifs_share_with_invalide_cifs_server_name(self): share_server = fakes.SHARE_SERVER share = fakes.CIFS_SHARE hook = utils.RequestSideEffect() hook.append(self.vdm.resp_get_succeed()) hook.append(self.cifs_server.resp_get_error()) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock self.assertRaises(exception.EMCVnxXMLAPIError, self.connection.create_share, None, share, share_server) expected_calls = [ mock.call(self.vdm.req_get()), mock.call(self.cifs_server.req_get(self.vdm.vdm_id)), ] xml_req_mock.assert_has_calls(expected_calls) def test_create_cifs_share_without_interface_in_cifs_server(self): share_server = fakes.SHARE_SERVER share = fakes.CIFS_SHARE hook = utils.RequestSideEffect() hook.append(self.vdm.resp_get_succeed()) hook.append(self.cifs_server.resp_get_without_interface( mover_id=self.vdm.vdm_id, is_vdm=True, join_domain=True)) hook.append(self.pool.resp_get_succeed()) hook.append(self.fs.resp_task_succeed()) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock self.assertRaises(exception.EMCVnxXMLAPIError, self.connection.create_share, None, share, share_server) expected_calls = [ mock.call(self.vdm.req_get()), mock.call(self.cifs_server.req_get(self.vdm.vdm_id)), mock.call(self.pool.req_get()), mock.call(self.fs.req_create_on_vdm()), ] xml_req_mock.assert_has_calls(expected_calls) def test_create_cifs_share_without_pool_name(self): share_server = fakes.SHARE_SERVER share = fake_share.fake_share(host='HostA@BackendB', share_proto='CIFS') self.assertRaises(exception.InvalidHost, self.connection.create_share, None, share, share_server) def test_create_cifs_share_from_snapshot(self): share_server = fakes.SHARE_SERVER share = fakes.CIFS_SHARE snapshot = fake_share.fake_snapshot( name=fakes.FakeData.src_snap_name, share_name=fakes.FakeData.src_share_name, share_id=fakes.FakeData.src_share_name, id=fakes.FakeData.src_snap_name) hook = utils.RequestSideEffect() hook.append(self.fs.resp_get_succeed()) hook.append(self.vdm.resp_get_succeed()) hook.append(self.cifs_server.resp_get_succeed( mover_id=self.vdm.vdm_id, is_vdm=True, join_domain=True)) hook.append(self.cifs_share.resp_task_succeed()) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock ssh_hook = utils.SSHSideEffect() ssh_hook.append(self.mover.output_get_interconnect_id()) ssh_hook.append() ssh_hook.append() ssh_hook.append(self.fs.output_copy_ckpt) ssh_hook.append(self.fs.output_info()) ssh_hook.append() ssh_hook.append() ssh_hook.append() ssh_hook.append() ssh_cmd_mock = mock.Mock(side_effect=ssh_hook) self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock location = self.connection.create_share_from_snapshot( None, share, snapshot, share_server) expected_calls = [ mock.call(self.fs.req_get()), mock.call(self.vdm.req_get()), mock.call(self.cifs_server.req_get(self.vdm.vdm_id)), mock.call(self.cifs_share.req_create(self.vdm.vdm_id)), ] xml_req_mock.assert_has_calls(expected_calls) ssh_calls = [ mock.call(self.mover.cmd_get_interconnect_id(), False), mock.call(self.fs.cmd_create_from_ckpt(), False), mock.call(self.mount.cmd_server_mount('ro'), False), mock.call(self.fs.cmd_copy_ckpt(), True), mock.call(self.fs.cmd_nas_fs_info(), False), mock.call(self.mount.cmd_server_umount(), False), mock.call(self.fs.cmd_delete(), False), mock.call(self.mount.cmd_server_mount('rw'), False), mock.call(self.cifs_share.cmd_disable_access(), True), ] ssh_cmd_mock.assert_has_calls(ssh_calls) self.assertEqual(location, [{'path': r'\\192.168.1.1\%s' % share['name']}], 'CIFS export path is incorrect') def test_create_cifs_share_from_snapshot_with_ipv6(self): share_server = fakes.SHARE_SERVER_IPV6 share = fakes.CIFS_SHARE snapshot = fake_share.fake_snapshot( name=fakes.FakeData.src_snap_name, share_name=fakes.FakeData.src_share_name, share_id=fakes.FakeData.src_share_name, id=fakes.FakeData.src_snap_name) hook = utils.RequestSideEffect() hook.append(self.fs.resp_get_succeed()) hook.append(self.vdm.resp_get_succeed( interface1=fakes.FakeData.interface_name3, interface2=fakes.FakeData.interface_name4)) hook.append(self.cifs_server.resp_get_succeed( mover_id=self.vdm.vdm_id, is_vdm=True, join_domain=True, ip_addr=fakes.FakeData.network_allocations_ip3)) hook.append(self.cifs_share.resp_task_succeed()) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock ssh_hook = utils.SSHSideEffect() ssh_hook.append(self.mover.output_get_interconnect_id()) ssh_hook.append() ssh_hook.append() ssh_hook.append(self.fs.output_copy_ckpt) ssh_hook.append(self.fs.output_info()) ssh_hook.append() ssh_hook.append() ssh_hook.append() ssh_hook.append() ssh_cmd_mock = mock.Mock(side_effect=ssh_hook) self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock location = self.connection.create_share_from_snapshot( None, share, snapshot, share_server) expected_calls = [ mock.call(self.fs.req_get()), mock.call(self.vdm.req_get()), mock.call(self.cifs_server.req_get(self.vdm.vdm_id)), mock.call(self.cifs_share.req_create(self.vdm.vdm_id)), ] xml_req_mock.assert_has_calls(expected_calls) ssh_calls = [ mock.call(self.mover.cmd_get_interconnect_id(), False), mock.call(self.fs.cmd_create_from_ckpt(), False), mock.call(self.mount.cmd_server_mount('ro'), False), mock.call(self.fs.cmd_copy_ckpt(), True), mock.call(self.fs.cmd_nas_fs_info(), False), mock.call(self.mount.cmd_server_umount(), False), mock.call(self.fs.cmd_delete(), False), mock.call(self.mount.cmd_server_mount('rw'), False), mock.call(self.cifs_share.cmd_disable_access(), True), ] ssh_cmd_mock.assert_has_calls(ssh_calls) self.assertEqual( location, [{'path': r'\\%s.ipv6-literal.net\%s' % ( fakes.FakeData.network_allocations_ip3.replace(':', '-'), share['name'])}], 'CIFS export path is incorrect') def test_create_nfs_share_from_snapshot(self): share_server = fakes.SHARE_SERVER share = fakes.NFS_SHARE snapshot = fake_share.fake_snapshot( name=fakes.FakeData.src_snap_name, share_name=fakes.FakeData.src_share_name, share_id=fakes.FakeData.src_share_name, id=fakes.FakeData.src_snap_name) hook = utils.RequestSideEffect() hook.append(self.fs.resp_get_succeed()) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock ssh_hook = utils.SSHSideEffect() ssh_hook.append(self.mover.output_get_interconnect_id()) ssh_hook.append() ssh_hook.append() ssh_hook.append(self.fs.output_copy_ckpt) ssh_hook.append(self.fs.output_info()) ssh_hook.append() ssh_hook.append() ssh_hook.append() ssh_hook.append(self.nfs_share.output_create()) ssh_cmd_mock = mock.Mock(side_effect=ssh_hook) self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock location = self.connection.create_share_from_snapshot( None, share, snapshot, share_server) expected_calls = [mock.call(self.fs.req_get())] xml_req_mock.assert_has_calls(expected_calls) ssh_calls = [ mock.call(self.mover.cmd_get_interconnect_id(), False), mock.call(self.fs.cmd_create_from_ckpt(), False), mock.call(self.mount.cmd_server_mount('ro'), False), mock.call(self.fs.cmd_copy_ckpt(), True), mock.call(self.fs.cmd_nas_fs_info(), False), mock.call(self.mount.cmd_server_umount(), False), mock.call(self.fs.cmd_delete(), False), mock.call(self.mount.cmd_server_mount('rw'), False), mock.call(self.nfs_share.cmd_create(), True) ] ssh_cmd_mock.assert_has_calls(ssh_calls) self.assertEqual(location, [{'path': '192.168.1.2:/%s' % share['name']}], 'NFS export path is incorrect') def test_create_nfs_share_from_snapshot_with_ipv6(self): share_server = fakes.SHARE_SERVER_IPV6 share = fakes.NFS_SHARE snapshot = fake_share.fake_snapshot( name=fakes.FakeData.src_snap_name, share_name=fakes.FakeData.src_share_name, share_id=fakes.FakeData.src_share_name, id=fakes.FakeData.src_snap_name) hook = utils.RequestSideEffect() hook.append(self.fs.resp_get_succeed()) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock ssh_hook = utils.SSHSideEffect() ssh_hook.append(self.mover.output_get_interconnect_id()) ssh_hook.append() ssh_hook.append() ssh_hook.append(self.fs.output_copy_ckpt) ssh_hook.append(self.fs.output_info()) ssh_hook.append() ssh_hook.append() ssh_hook.append() ssh_hook.append(self.nfs_share.output_create()) ssh_cmd_mock = mock.Mock(side_effect=ssh_hook) self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock location = self.connection.create_share_from_snapshot( None, share, snapshot, share_server) expected_calls = [mock.call(self.fs.req_get())] xml_req_mock.assert_has_calls(expected_calls) ssh_calls = [ mock.call(self.mover.cmd_get_interconnect_id(), False), mock.call(self.fs.cmd_create_from_ckpt(), False), mock.call(self.mount.cmd_server_mount('ro'), False), mock.call(self.fs.cmd_copy_ckpt(), True), mock.call(self.fs.cmd_nas_fs_info(), False), mock.call(self.mount.cmd_server_umount(), False), mock.call(self.fs.cmd_delete(), False), mock.call(self.mount.cmd_server_mount('rw'), False), mock.call(self.nfs_share.cmd_create(), True) ] ssh_cmd_mock.assert_has_calls(ssh_calls) self.assertEqual(location, [{'path': '[%s]:/%s' % ( fakes.FakeData.network_allocations_ip4, share['name'])}], 'NFS export path is incorrect') def test_create_share_with_incorrect_proto(self): share_server = fakes.SHARE_SERVER share = fake_share.fake_share(share_proto='FAKE_PROTO') self.assertRaises(exception.InvalidShare, self.connection.create_share, context=None, share=share, share_server=share_server) def test_create_share_from_snapshot_with_incorrect_proto(self): share_server = fakes.SHARE_SERVER share = fake_share.fake_share(share_proto='FAKE_PROTO') snapshot = fake_share.fake_snapshot() self.assertRaises(exception.InvalidShare, self.connection.create_share_from_snapshot, None, share, snapshot, share_server) def test_create_share_from_snapshot_without_pool_name(self): share_server = fakes.SHARE_SERVER share = fake_share.fake_share(host='HostA@BackendB', share_proto='CIFS') snapshot = fake_share.fake_snapshot() self.assertRaises(exception.InvalidHost, self.connection.create_share_from_snapshot, None, share, snapshot, share_server) def test_delete_cifs_share(self): share_server = fakes.SHARE_SERVER share = fakes.CIFS_SHARE hook = utils.RequestSideEffect() hook.append(self.cifs_share.resp_get_succeed(self.vdm.vdm_id)) hook.append(self.vdm.resp_get_succeed()) hook.append(self.cifs_share.resp_task_succeed()) hook.append(self.mount.resp_task_succeed()) hook.append(self.fs.resp_get_succeed()) hook.append(self.fs.resp_task_succeed()) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock self.connection.delete_share(None, share, share_server) expected_calls = [ mock.call(self.cifs_share.req_get()), mock.call(self.vdm.req_get()), mock.call(self.cifs_share.req_delete(self.vdm.vdm_id)), mock.call(self.mount.req_delete(self.vdm.vdm_id)), mock.call(self.fs.req_get()), mock.call(self.fs.req_delete()), ] xml_req_mock.assert_has_calls(expected_calls) def test_delete_cifs_share_with_ipv6(self): share_server = fakes.SHARE_SERVER_IPV6 share = fakes.CIFS_SHARE hook = utils.RequestSideEffect() hook.append(self.cifs_share.resp_get_succeed(self.vdm.vdm_id)) hook.append(self.vdm.resp_get_succeed( interface1=fakes.FakeData.interface_name3, interface2=fakes.FakeData.interface_name4)) hook.append(self.cifs_share.resp_task_succeed()) hook.append(self.mount.resp_task_succeed()) hook.append(self.fs.resp_get_succeed()) hook.append(self.fs.resp_task_succeed()) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock self.connection.delete_share(None, share, share_server) expected_calls = [ mock.call(self.cifs_share.req_get()), mock.call(self.vdm.req_get()), mock.call(self.cifs_share.req_delete(self.vdm.vdm_id)), mock.call(self.mount.req_delete(self.vdm.vdm_id)), mock.call(self.fs.req_get()), mock.call(self.fs.req_delete()), ] xml_req_mock.assert_has_calls(expected_calls) def test_delete_nfs_share(self): share_server = fakes.SHARE_SERVER share = fakes.NFS_SHARE hook = utils.RequestSideEffect() hook.append(self.vdm.resp_get_succeed()) hook.append(self.mount.resp_task_succeed()) hook.append(self.fs.resp_get_succeed()) hook.append(self.fs.resp_task_succeed()) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock ssh_hook = utils.SSHSideEffect() ssh_hook.append(self.nfs_share.output_get_succeed( rw_hosts=self.nfs_share.rw_hosts, ro_hosts=self.nfs_share.ro_hosts)) ssh_hook.append(self.nfs_share.output_delete_succeed()) ssh_cmd_mock = mock.Mock(side_effect=ssh_hook) self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock self.connection.delete_share(None, share, share_server) expected_calls = [ mock.call(self.vdm.req_get()), mock.call(self.mount.req_delete(self.vdm.vdm_id)), mock.call(self.fs.req_get()), mock.call(self.fs.req_delete()), ] xml_req_mock.assert_has_calls(expected_calls) ssh_calls = [ mock.call(self.nfs_share.cmd_get(), False), mock.call(self.nfs_share.cmd_delete(), True), ] ssh_cmd_mock.assert_has_calls(ssh_calls) def test_delete_nfs_share_with_ipv6(self): share_server = fakes.SHARE_SERVER_IPV6 share = fakes.NFS_SHARE hook = utils.RequestSideEffect() hook.append(self.vdm.resp_get_succeed( interface1=fakes.FakeData.interface_name3, interface2=fakes.FakeData.interface_name4)) hook.append(self.mount.resp_task_succeed()) hook.append(self.fs.resp_get_succeed()) hook.append(self.fs.resp_task_succeed()) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock ssh_hook = utils.SSHSideEffect() ssh_hook.append(self.nfs_share.output_get_succeed( rw_hosts=self.nfs_share.rw_hosts, ro_hosts=self.nfs_share.ro_hosts)) ssh_hook.append(self.nfs_share.output_delete_succeed()) ssh_cmd_mock = mock.Mock(side_effect=ssh_hook) self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock self.connection.delete_share(None, share, share_server) expected_calls = [ mock.call(self.vdm.req_get()), mock.call(self.mount.req_delete(self.vdm.vdm_id)), mock.call(self.fs.req_get()), mock.call(self.fs.req_delete()), ] xml_req_mock.assert_has_calls(expected_calls) ssh_calls = [ mock.call(self.nfs_share.cmd_get(), False), mock.call(self.nfs_share.cmd_delete(), True), ] ssh_cmd_mock.assert_has_calls(ssh_calls) def test_delete_share_without_share_server(self): share = fakes.CIFS_SHARE self.connection.delete_share(None, share) def test_delete_share_with_incorrect_proto(self): share_server = fakes.SHARE_SERVER share = fake_share.fake_share(share_proto='FAKE_PROTO') self.assertRaises(exception.InvalidShare, self.connection.delete_share, context=None, share=share, share_server=share_server) def test_delete_cifs_share_with_nonexistent_mount_and_filesystem(self): share_server = fakes.SHARE_SERVER share = fakes.CIFS_SHARE hook = utils.RequestSideEffect() hook.append(self.cifs_share.resp_get_succeed(self.vdm.vdm_id)) hook.append(self.vdm.resp_get_succeed()) hook.append(self.cifs_share.resp_task_succeed()) hook.append(self.mount.resp_task_error()) hook.append(self.fs.resp_get_succeed()) hook.append(self.fs.resp_task_error()) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock self.connection.delete_share(None, share, share_server) expected_calls = [ mock.call(self.cifs_share.req_get()), mock.call(self.vdm.req_get()), mock.call(self.cifs_share.req_delete(self.vdm.vdm_id)), mock.call(self.mount.req_delete(self.vdm.vdm_id)), mock.call(self.fs.req_get()), mock.call(self.fs.req_delete()), ] xml_req_mock.assert_has_calls(expected_calls) def test_extend_share(self): share_server = fakes.SHARE_SERVER share = fakes.CIFS_SHARE new_size = fakes.FakeData.new_size hook = utils.RequestSideEffect() hook.append(self.fs.resp_get_succeed()) hook.append(self.pool.resp_get_succeed()) hook.append(self.fs.resp_task_succeed()) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock self.connection.extend_share(share, new_size, share_server) expected_calls = [ mock.call(self.fs.req_get()), mock.call(self.pool.req_get()), mock.call(self.fs.req_extend()), ] xml_req_mock.assert_has_calls(expected_calls) def test_extend_share_with_ipv6(self): share_server = fakes.SHARE_SERVER_IPV6 share = fakes.CIFS_SHARE new_size = fakes.FakeData.new_size hook = utils.RequestSideEffect() hook.append(self.fs.resp_get_succeed()) hook.append(self.pool.resp_get_succeed()) hook.append(self.fs.resp_task_succeed()) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock self.connection.extend_share(share, new_size, share_server) expected_calls = [ mock.call(self.fs.req_get()), mock.call(self.pool.req_get()), mock.call(self.fs.req_extend()), ] xml_req_mock.assert_has_calls(expected_calls) def test_extend_share_without_pool_name(self): share_server = fakes.SHARE_SERVER share = fake_share.fake_share(host='HostA@BackendB', share_proto='CIFS') new_size = fakes.FakeData.new_size self.assertRaises(exception.InvalidHost, self.connection.extend_share, share, new_size, share_server) def test_create_snapshot(self): share_server = fakes.SHARE_SERVER snapshot = fake_share.fake_snapshot( id=fakes.FakeData.snapshot_name, share_id=fakes.FakeData.filesystem_name, share_name=fakes.FakeData.share_name) hook = utils.RequestSideEffect() hook.append(self.fs.resp_get_succeed()) hook.append(self.snap.resp_task_succeed()) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock self.connection.create_snapshot(None, snapshot, share_server) expected_calls = [ mock.call(self.fs.req_get()), mock.call(self.snap.req_create()), ] xml_req_mock.assert_has_calls(expected_calls) def test_create_snapshot_with_ipv6(self): share_server = fakes.SHARE_SERVER_IPV6 snapshot = fake_share.fake_snapshot( id=fakes.FakeData.snapshot_name, share_id=fakes.FakeData.filesystem_name, share_name=fakes.FakeData.share_name) hook = utils.RequestSideEffect() hook.append(self.fs.resp_get_succeed()) hook.append(self.snap.resp_task_succeed()) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock self.connection.create_snapshot(None, snapshot, share_server) expected_calls = [ mock.call(self.fs.req_get()), mock.call(self.snap.req_create()), ] xml_req_mock.assert_has_calls(expected_calls) def test_create_snapshot_with_incorrect_share_info(self): share_server = fakes.SHARE_SERVER snapshot = fake_share.fake_snapshot( id=fakes.FakeData.snapshot_name, share_id=fakes.FakeData.filesystem_name, share_name=fakes.FakeData.share_name) hook = utils.RequestSideEffect() hook.append(self.fs.resp_get_but_not_found()) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock self.assertRaises(exception.EMCVnxXMLAPIError, self.connection.create_snapshot, None, snapshot, share_server) expected_calls = [mock.call(self.fs.req_get())] xml_req_mock.assert_has_calls(expected_calls) def test_delete_snapshot(self): share_server = fakes.SHARE_SERVER snapshot = fake_share.fake_snapshot( id=fakes.FakeData.snapshot_name, share_id=fakes.FakeData.filesystem_name, share_name=fakes.FakeData.share_name) hook = utils.RequestSideEffect() hook.append(self.snap.resp_get_succeed()) hook.append(self.snap.resp_task_succeed()) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock self.connection.delete_snapshot(None, snapshot, share_server) expected_calls = [ mock.call(self.snap.req_get()), mock.call(self.snap.req_delete()), ] xml_req_mock.assert_has_calls(expected_calls) def test_delete_snapshot_with_ipv6(self): share_server = fakes.SHARE_SERVER_IPV6 snapshot = fake_share.fake_snapshot( id=fakes.FakeData.snapshot_name, share_id=fakes.FakeData.filesystem_name, share_name=fakes.FakeData.share_name) hook = utils.RequestSideEffect() hook.append(self.snap.resp_get_succeed()) hook.append(self.snap.resp_task_succeed()) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock self.connection.delete_snapshot(None, snapshot, share_server) expected_calls = [ mock.call(self.snap.req_get()), mock.call(self.snap.req_delete()), ] xml_req_mock.assert_has_calls(expected_calls) @utils.patch_get_managed_ports_vnx(return_value=['cge-1-0']) def test_setup_server(self): hook = utils.RequestSideEffect() hook.append(self.vdm.resp_get_but_not_found()) hook.append(self.mover.resp_get_ref_succeed()) hook.append(self.vdm.resp_task_succeed()) hook.append(self.mover.resp_task_succeed()) hook.append(self.mover.resp_task_succeed()) hook.append(self.dns.resp_task_succeed()) hook.append(self.vdm.resp_get_succeed()) hook.append(self.cifs_server.resp_task_succeed()) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock ssh_hook = utils.SSHSideEffect() ssh_hook.append() ssh_cmd_mock = mock.Mock(side_effect=ssh_hook) self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock self.connection.setup_server(fakes.NETWORK_INFO, None) if_name_1 = fakes.FakeData.interface_name1 if_name_2 = fakes.FakeData.interface_name2 expected_calls = [ mock.call(self.vdm.req_get()), mock.call(self.mover.req_get_ref()), mock.call(self.vdm.req_create()), mock.call(self.mover.req_create_interface( if_name=if_name_1, ip=fakes.FakeData.network_allocations_ip1)), mock.call(self.mover.req_create_interface( if_name=if_name_2, ip=fakes.FakeData.network_allocations_ip2)), mock.call(self.dns.req_create()), mock.call(self.vdm.req_get()), mock.call(self.cifs_server.req_create(self.vdm.vdm_id)), ] xml_req_mock.assert_has_calls(expected_calls) ssh_calls = [ mock.call(self.vdm.cmd_attach_nfs_interface(), False), ] ssh_cmd_mock.assert_has_calls(ssh_calls) @utils.patch_get_managed_ports_vnx(return_value=['cge-1-0']) def test_setup_server_with_ipv6(self): hook = utils.RequestSideEffect() hook.append(self.vdm.resp_get_but_not_found()) hook.append(self.mover.resp_get_ref_succeed()) hook.append(self.vdm.resp_task_succeed()) hook.append(self.mover.resp_task_succeed()) hook.append(self.mover.resp_task_succeed()) hook.append(self.dns.resp_task_succeed()) hook.append(self.vdm.resp_get_succeed()) hook.append(self.cifs_server.resp_task_succeed()) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock ssh_hook = utils.SSHSideEffect() ssh_hook.append() ssh_cmd_mock = mock.Mock(side_effect=ssh_hook) self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock self.connection.setup_server(fakes.NETWORK_INFO_IPV6, None) if_name_1 = fakes.FakeData.interface_name3 if_name_2 = fakes.FakeData.interface_name4 expect_ip_1 = fakes.FakeData.network_allocations_ip3 expect_ip_2 = fakes.FakeData.network_allocations_ip4 expected_calls = [ mock.call(self.vdm.req_get()), mock.call(self.mover.req_get_ref()), mock.call(self.vdm.req_create()), mock.call(self.mover.req_create_interface_with_ipv6( if_name=if_name_1, ip=expect_ip_1)), mock.call(self.mover.req_create_interface_with_ipv6( if_name=if_name_2, ip=expect_ip_2)), mock.call(self.dns.req_create( ip_addr=fakes.FakeData.dns_ipv6_address)), mock.call(self.vdm.req_get()), mock.call(self.cifs_server.req_create( self.vdm.vdm_id, ip_addr=fakes.FakeData.network_allocations_ip3)), ] xml_req_mock.assert_has_calls(expected_calls) ssh_calls = [ mock.call(self.vdm.cmd_attach_nfs_interface( interface=fakes.FakeData.interface_name4), False), ] ssh_cmd_mock.assert_has_calls(ssh_calls) @utils.patch_get_managed_ports_vnx(return_value=['cge-1-0']) def test_setup_server_with_existing_vdm(self): hook = utils.RequestSideEffect() hook.append(self.vdm.resp_get_succeed()) hook.append(self.mover.resp_get_ref_succeed()) hook.append(self.mover.resp_task_succeed()) hook.append(self.mover.resp_task_succeed()) hook.append(self.dns.resp_task_succeed()) hook.append(self.cifs_server.resp_task_succeed()) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock ssh_hook = utils.SSHSideEffect() ssh_hook.append() ssh_cmd_mock = mock.Mock(side_effect=ssh_hook) self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock self.connection.setup_server(fakes.NETWORK_INFO, None) if_name_1 = fakes.FakeData.network_allocations_id1[-12:] if_name_2 = fakes.FakeData.network_allocations_id2[-12:] expected_calls = [ mock.call(self.vdm.req_get()), mock.call(self.mover.req_get_ref()), mock.call(self.mover.req_create_interface( if_name=if_name_1, ip=fakes.FakeData.network_allocations_ip1)), mock.call(self.mover.req_create_interface( if_name=if_name_2, ip=fakes.FakeData.network_allocations_ip2)), mock.call(self.dns.req_create()), mock.call(self.cifs_server.req_create(self.vdm.vdm_id)), ] xml_req_mock.assert_has_calls(expected_calls) ssh_calls = [ mock.call(self.vdm.cmd_attach_nfs_interface(), False), ] ssh_cmd_mock.assert_has_calls(ssh_calls) def test_setup_server_with_invalid_security_service(self): network_info = copy.deepcopy(fakes.NETWORK_INFO) network_info['security_services'][0]['type'] = 'fake_type' self.assertRaises(exception.EMCVnxXMLAPIError, self.connection.setup_server, network_info, None) @utils.patch_get_managed_ports_vnx( side_effect=exception.EMCVnxXMLAPIError( err="Get managed ports fail.")) def test_setup_server_without_valid_physical_device(self): hook = utils.RequestSideEffect() hook.append(self.vdm.resp_get_but_not_found()) hook.append(self.mover.resp_get_ref_succeed()) hook.append(self.vdm.resp_task_succeed()) hook.append(self.vdm.resp_get_succeed()) hook.append(self.cifs_server.resp_get_without_value()) hook.append(self.vdm.resp_task_succeed()) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock ssh_hook = utils.SSHSideEffect() ssh_hook.append(self.vdm.output_get_interfaces_vdm(nfs_interface='')) ssh_cmd_mock = mock.Mock(side_effect=ssh_hook) self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock self.assertRaises(exception.EMCVnxXMLAPIError, self.connection.setup_server, fakes.NETWORK_INFO, None) expected_calls = [ mock.call(self.vdm.req_get()), mock.call(self.mover.req_get_ref()), mock.call(self.vdm.req_create()), mock.call(self.vdm.req_get()), mock.call(self.cifs_server.req_get(self.vdm.vdm_id)), mock.call(self.vdm.req_delete()), ] xml_req_mock.assert_has_calls(expected_calls) ssh_calls = [ mock.call(self.vdm.cmd_get_interfaces(), False), ] ssh_cmd_mock.assert_has_calls(ssh_calls) @utils.patch_get_managed_ports_vnx(return_value=['cge-1-0']) def test_setup_server_with_exception(self): hook = utils.RequestSideEffect() hook.append(self.vdm.resp_get_but_not_found()) hook.append(self.mover.resp_get_ref_succeed()) hook.append(self.vdm.resp_task_succeed()) hook.append(self.mover.resp_task_succeed()) hook.append(self.mover.resp_task_error()) hook.append(self.vdm.resp_get_succeed()) hook.append(self.cifs_server.resp_get_without_value()) hook.append(self.mover.resp_task_succeed()) hook.append(self.vdm.resp_task_succeed()) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock ssh_hook = utils.SSHSideEffect() ssh_hook.append(self.vdm.output_get_interfaces_vdm(nfs_interface='')) ssh_cmd_mock = mock.Mock(side_effect=ssh_hook) self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock self.assertRaises(exception.EMCVnxXMLAPIError, self.connection.setup_server, fakes.NETWORK_INFO, None) if_name_1 = fakes.FakeData.network_allocations_id1[-12:] if_name_2 = fakes.FakeData.network_allocations_id2[-12:] expected_calls = [ mock.call(self.vdm.req_get()), mock.call(self.mover.req_get_ref()), mock.call(self.vdm.req_create()), mock.call(self.mover.req_create_interface( if_name=if_name_1, ip=fakes.FakeData.network_allocations_ip1)), mock.call(self.mover.req_create_interface( if_name=if_name_2, ip=fakes.FakeData.network_allocations_ip2)), mock.call(self.vdm.req_get()), mock.call(self.cifs_server.req_get(self.vdm.vdm_id)), mock.call(self.mover.req_delete_interface( fakes.FakeData.network_allocations_ip1)), mock.call(self.vdm.req_delete()), ] xml_req_mock.assert_has_calls(expected_calls) ssh_calls = [ mock.call(self.vdm.cmd_get_interfaces(), False), ] ssh_cmd_mock.assert_has_calls(ssh_calls) def test_teardown_server(self): hook = utils.RequestSideEffect() hook.append(self.vdm.resp_get_succeed()) hook.append(self.cifs_server.resp_get_succeed( mover_id=self.vdm.vdm_id, is_vdm=True, join_domain=True)) hook.append(self.cifs_server.resp_task_succeed()) hook.append(self.cifs_server.resp_get_succeed( mover_id=self.vdm.vdm_id, is_vdm=True, join_domain=False)) hook.append(self.mover.resp_get_ref_succeed()) hook.append(self.mover.resp_task_succeed()) hook.append(self.mover.resp_task_succeed()) hook.append(self.vdm.resp_task_succeed()) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock ssh_hook = utils.SSHSideEffect() ssh_hook.append(self.vdm.output_get_interfaces_vdm()) ssh_hook.append() ssh_cmd_mock = mock.Mock(side_effect=ssh_hook) self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock self.connection.teardown_server(fakes.SERVER_DETAIL, fakes.SECURITY_SERVICE) expected_calls = [ mock.call(self.vdm.req_get()), mock.call(self.cifs_server.req_get(self.vdm.vdm_id)), mock.call(self.cifs_server.req_modify( mover_id=self.vdm.vdm_id, is_vdm=True, join_domain=False)), mock.call(self.cifs_server.req_delete(self.vdm.vdm_id)), mock.call(self.mover.req_get_ref()), mock.call(self.mover.req_delete_interface( fakes.FakeData.network_allocations_ip1)), mock.call(self.mover.req_delete_interface( fakes.FakeData.network_allocations_ip2)), mock.call(self.vdm.req_delete()), ] xml_req_mock.assert_has_calls(expected_calls) ssh_calls = [ mock.call(self.vdm.cmd_get_interfaces(), False), mock.call(self.vdm.cmd_detach_nfs_interface(), True), ] ssh_cmd_mock.assert_has_calls(ssh_calls) def test_teardown_server_with_ipv6(self): hook = utils.RequestSideEffect() hook.append(self.vdm.resp_get_succeed()) hook.append(self.cifs_server.resp_get_succeed( mover_id=self.vdm.vdm_id, is_vdm=True, join_domain=True)) hook.append(self.cifs_server.resp_task_succeed()) hook.append(self.cifs_server.resp_get_succeed( mover_id=self.vdm.vdm_id, is_vdm=True, join_domain=False)) hook.append(self.mover.resp_get_ref_succeed()) hook.append(self.mover.resp_task_succeed()) hook.append(self.mover.resp_task_succeed()) hook.append(self.vdm.resp_task_succeed()) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock ssh_hook = utils.SSHSideEffect() ssh_hook.append(self.vdm.output_get_interfaces_vdm()) ssh_hook.append() ssh_cmd_mock = mock.Mock(side_effect=ssh_hook) self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock self.connection.teardown_server(fakes.SERVER_DETAIL_IPV6, fakes.SECURITY_SERVICE_IPV6) expected_calls = [ mock.call(self.vdm.req_get()), mock.call(self.cifs_server.req_get(self.vdm.vdm_id)), mock.call(self.cifs_server.req_modify( mover_id=self.vdm.vdm_id, is_vdm=True, join_domain=False)), mock.call(self.cifs_server.req_delete(self.vdm.vdm_id)), mock.call(self.mover.req_get_ref()), mock.call(self.mover.req_delete_interface( fakes.FakeData.network_allocations_ip3)), mock.call(self.mover.req_delete_interface( fakes.FakeData.network_allocations_ip4)), mock.call(self.vdm.req_delete()), ] xml_req_mock.assert_has_calls(expected_calls) ssh_calls = [ mock.call(self.vdm.cmd_get_interfaces(), False), mock.call(self.vdm.cmd_detach_nfs_interface(), True), ] ssh_cmd_mock.assert_has_calls(ssh_calls) def test_teardown_server_without_server_detail(self): self.connection.teardown_server(None, fakes.SECURITY_SERVICE) def test_teardown_server_without_security_services(self): hook = utils.RequestSideEffect() hook.append(self.vdm.resp_get_succeed()) hook.append(self.mover.resp_get_ref_succeed()) hook.append(self.mover.resp_task_succeed()) hook.append(self.mover.resp_task_succeed()) hook.append(self.vdm.resp_task_succeed()) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock ssh_hook = utils.SSHSideEffect() ssh_hook.append(self.vdm.output_get_interfaces_vdm()) ssh_hook.append() ssh_cmd_mock = mock.Mock(side_effect=ssh_hook) self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock self.connection.teardown_server(fakes.SERVER_DETAIL, []) expected_calls = [ mock.call(self.vdm.req_get()), mock.call(self.mover.req_get_ref()), mock.call(self.mover.req_delete_interface( fakes.FakeData.network_allocations_ip1)), mock.call(self.mover.req_delete_interface( fakes.FakeData.network_allocations_ip2)), mock.call(self.vdm.req_delete()), ] xml_req_mock.assert_has_calls(expected_calls) ssh_calls = [ mock.call(self.vdm.cmd_get_interfaces(), False), mock.call(self.vdm.cmd_detach_nfs_interface(), True), ] ssh_cmd_mock.assert_has_calls(ssh_calls) def test_teardown_server_without_share_server_name_in_server_detail(self): server_detail = { 'cifs_if': fakes.FakeData.network_allocations_ip1, 'nfs_if': fakes.FakeData.network_allocations_ip2, } self.connection.teardown_server(server_detail, fakes.SECURITY_SERVICE) def test_teardown_server_with_invalid_server_name(self): hook = utils.RequestSideEffect() hook.append(self.vdm.resp_get_error()) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock self.connection.teardown_server(fakes.SERVER_DETAIL, fakes.SECURITY_SERVICE) expected_calls = [mock.call(self.vdm.req_get())] xml_req_mock.assert_has_calls(expected_calls) def test_teardown_server_without_cifs_server(self): hook = utils.RequestSideEffect() hook.append(self.vdm.resp_get_succeed()) hook.append(self.cifs_server.resp_get_error()) hook.append(self.mover.resp_get_ref_succeed()) hook.append(self.cifs_server.resp_task_succeed()) hook.append(self.cifs_server.resp_get_succeed( mover_id=self.vdm.vdm_id, is_vdm=True, join_domain=False)) hook.append(self.mover.resp_task_succeed()) hook.append(self.mover.resp_task_succeed()) hook.append(self.vdm.resp_task_succeed()) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock ssh_hook = utils.SSHSideEffect() ssh_hook.append(self.vdm.output_get_interfaces_vdm()) ssh_hook.append() ssh_cmd_mock = mock.Mock(side_effect=ssh_hook) self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock self.connection.teardown_server(fakes.SERVER_DETAIL, fakes.SECURITY_SERVICE) expected_calls = [ mock.call(self.vdm.req_get()), mock.call(self.cifs_server.req_get(self.vdm.vdm_id)), mock.call(self.mover.req_get_ref()), mock.call(self.mover.req_delete_interface( fakes.FakeData.network_allocations_ip1)), mock.call(self.mover.req_delete_interface( fakes.FakeData.network_allocations_ip2)), mock.call(self.vdm.req_delete()), ] xml_req_mock.assert_has_calls(expected_calls) ssh_calls = [ mock.call(self.vdm.cmd_get_interfaces(), False), mock.call(self.vdm.cmd_detach_nfs_interface(), True), ] ssh_cmd_mock.assert_has_calls(ssh_calls) def test_teardown_server_with_invalid_cifs_server_modification(self): hook = utils.RequestSideEffect() hook.append(self.vdm.resp_get_succeed()) hook.append(self.cifs_server.resp_get_succeed( mover_id=self.vdm.vdm_id, is_vdm=True, join_domain=True)) hook.append(self.cifs_server.resp_task_error()) hook.append(self.cifs_server.resp_task_succeed()) hook.append(self.mover.resp_get_ref_succeed()) hook.append(self.mover.resp_task_succeed()) hook.append(self.mover.resp_task_succeed()) hook.append(self.vdm.resp_task_succeed()) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock ssh_hook = utils.SSHSideEffect() ssh_hook.append(self.vdm.output_get_interfaces_vdm()) ssh_hook.append() ssh_cmd_mock = mock.Mock(side_effect=ssh_hook) self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock self.connection.teardown_server(fakes.SERVER_DETAIL, fakes.SECURITY_SERVICE) expected_calls = [ mock.call(self.vdm.req_get()), mock.call(self.cifs_server.req_get(self.vdm.vdm_id)), mock.call(self.cifs_server.req_modify(self.vdm.vdm_id)), mock.call(self.cifs_server.req_delete(self.vdm.vdm_id)), mock.call(self.mover.req_get_ref()), mock.call(self.mover.req_delete_interface( fakes.FakeData.network_allocations_ip1)), mock.call(self.mover.req_delete_interface( fakes.FakeData.network_allocations_ip2)), mock.call(self.vdm.req_delete()), ] xml_req_mock.assert_has_calls(expected_calls) ssh_calls = [ mock.call(self.vdm.cmd_get_interfaces(), False), mock.call(self.vdm.cmd_detach_nfs_interface(), True), ] ssh_cmd_mock.assert_has_calls(ssh_calls) def test_update_access_add_cifs_rw(self): share_server = fakes.SHARE_SERVER share = fakes.CIFS_SHARE access = fakes.CIFS_RW_ACCESS hook = utils.RequestSideEffect() hook.append(self.vdm.resp_get_succeed()) hook.append(self.cifs_server.resp_get_succeed( mover_id=self.vdm.vdm_id, is_vdm=True, join_domain=True)) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock ssh_hook = utils.SSHSideEffect() ssh_hook.append(self.cifs_share.output_allow_access()) ssh_cmd_mock = mock.Mock(side_effect=ssh_hook) self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock self.connection.update_access(None, share, [], [access], [], share_server=share_server) expected_calls = [ mock.call(self.vdm.req_get()), mock.call(self.cifs_server.req_get(self.vdm.vdm_id)), ] xml_req_mock.assert_has_calls(expected_calls) ssh_calls = [ mock.call(self.cifs_share.cmd_change_access(), True), ] ssh_cmd_mock.assert_has_calls(ssh_calls) def test_update_access_add_cifs_rw_with_ipv6(self): share_server = fakes.SHARE_SERVER_IPV6 share = fakes.CIFS_SHARE access = fakes.CIFS_RW_ACCESS hook = utils.RequestSideEffect() hook.append(self.vdm.resp_get_succeed( interface1=fakes.FakeData.interface_name3, interface2=fakes.FakeData.interface_name4)) hook.append(self.cifs_server.resp_get_succeed( mover_id=self.vdm.vdm_id, is_vdm=True, join_domain=True, ip_addr=fakes.FakeData.network_allocations_ip3)) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock ssh_hook = utils.SSHSideEffect() ssh_hook.append(self.cifs_share.output_allow_access()) ssh_cmd_mock = mock.Mock(side_effect=ssh_hook) self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock self.connection.update_access(None, share, [], [access], [], share_server=share_server) expected_calls = [ mock.call(self.vdm.req_get()), mock.call(self.cifs_server.req_get(self.vdm.vdm_id)), ] xml_req_mock.assert_has_calls(expected_calls) ssh_calls = [ mock.call(self.cifs_share.cmd_change_access(), True), ] ssh_cmd_mock.assert_has_calls(ssh_calls) def test_update_access_deny_nfs(self): share_server = fakes.SHARE_SERVER share = fakes.NFS_SHARE access = fakes.NFS_RW_ACCESS rw_hosts = copy.deepcopy(fakes.FakeData.rw_hosts) rw_hosts.append(access['access_to']) ssh_hook = utils.SSHSideEffect() ssh_hook.append(self.nfs_share.output_get_succeed( rw_hosts=rw_hosts, ro_hosts=fakes.FakeData.ro_hosts)) ssh_hook.append(self.nfs_share.output_set_access_success()) ssh_hook.append(self.nfs_share.output_get_succeed( rw_hosts=fakes.FakeData.rw_hosts, ro_hosts=fakes.FakeData.ro_hosts)) ssh_cmd_mock = utils.EMCNFSShareMock(side_effect=ssh_hook) self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock self.connection.update_access(None, share, [], [], [access], share_server=share_server) ssh_calls = [ mock.call(self.nfs_share.cmd_get(), True), mock.call(self.nfs_share.cmd_set_access( rw_hosts=self.nfs_share.rw_hosts, ro_hosts=self.nfs_share.ro_hosts), True), mock.call(self.nfs_share.cmd_get(), True), ] ssh_cmd_mock.assert_has_calls(ssh_calls) def test_update_access_deny_nfs_with_ipv6(self): share_server = fakes.SHARE_SERVER_IPV6 share = fakes.NFS_SHARE access = fakes.NFS_RW_ACCESS rw_hosts = copy.deepcopy(fakes.FakeData.rw_hosts_ipv6) rw_hosts.append(access['access_to']) ssh_hook = utils.SSHSideEffect() ssh_hook.append(self.nfs_share.output_get_succeed( rw_hosts=rw_hosts, ro_hosts=fakes.FakeData.ro_hosts_ipv6)) ssh_hook.append(self.nfs_share.output_set_access_success()) ssh_hook.append(self.nfs_share.output_get_succeed( rw_hosts=fakes.FakeData.rw_hosts_ipv6, ro_hosts=fakes.FakeData.ro_hosts_ipv6)) ssh_cmd_mock = utils.EMCNFSShareMock(side_effect=ssh_hook) self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock self.connection.update_access(None, share, [], [], [access], share_server=share_server) ssh_calls = [ mock.call(self.nfs_share.cmd_get(), True), mock.call(self.nfs_share.cmd_set_access( rw_hosts=self.nfs_share.rw_hosts_ipv6, ro_hosts=self.nfs_share.ro_hosts_ipv6), True), mock.call(self.nfs_share.cmd_get(), True), ] ssh_cmd_mock.assert_has_calls(ssh_calls) def test_update_access_recover_nfs_rule(self): share_server = fakes.SHARE_SERVER share = fakes.NFS_SHARE access = fakes.NFS_RW_ACCESS hosts = ['192.168.1.5'] rw_hosts = copy.deepcopy(fakes.FakeData.rw_hosts) rw_hosts.append(access['access_to']) ssh_hook = utils.SSHSideEffect() ssh_hook.append(self.nfs_share.output_get_succeed( rw_hosts=rw_hosts, ro_hosts=fakes.FakeData.ro_hosts)) ssh_hook.append(self.nfs_share.output_set_access_success()) ssh_hook.append(self.nfs_share.output_get_succeed( rw_hosts=hosts, ro_hosts=[])) ssh_cmd_mock = utils.EMCNFSShareMock(side_effect=ssh_hook) self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock self.connection.update_access(None, share, [access], [], [], share_server=share_server) ssh_calls = [ mock.call(self.nfs_share.cmd_get(), True), mock.call(self.nfs_share.cmd_set_access( rw_hosts=hosts, ro_hosts=[]), True), mock.call(self.nfs_share.cmd_get(), True), ] ssh_cmd_mock.assert_has_calls(ssh_calls) def test_update_access_recover_nfs_rule_with_ipv6(self): share_server = fakes.SHARE_SERVER_IPV6 share = fakes.NFS_SHARE access = fakes.NFS_RW_ACCESS_IPV6 hosts = ['fdf8:f53b:82e1::5'] rw_hosts = copy.deepcopy(fakes.FakeData.rw_hosts_ipv6) rw_hosts.append(access['access_to']) ssh_hook = utils.SSHSideEffect() ssh_hook.append(self.nfs_share.output_get_succeed( rw_hosts=rw_hosts, ro_hosts=fakes.FakeData.ro_hosts_ipv6)) ssh_hook.append(self.nfs_share.output_set_access_success()) ssh_hook.append(self.nfs_share.output_get_succeed( rw_hosts=hosts, ro_hosts=[])) ssh_cmd_mock = utils.EMCNFSShareMock(side_effect=ssh_hook) self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock self.connection.update_access(None, share, [access], [], [], share_server=share_server) ssh_calls = [ mock.call(self.nfs_share.cmd_get(), True), mock.call(self.nfs_share.cmd_set_access( rw_hosts=hosts, ro_hosts=[]), True), mock.call(self.nfs_share.cmd_get(), True), ] ssh_cmd_mock.assert_has_calls(ssh_calls) def test_update_access_recover_cifs_rule(self): share_server = fakes.SHARE_SERVER share = fakes.CIFS_SHARE access = fakes.CIFS_RW_ACCESS hook = utils.RequestSideEffect() hook.append(self.vdm.resp_get_succeed()) hook.append(self.cifs_server.resp_get_succeed( mover_id=self.vdm.vdm_id, is_vdm=True, join_domain=True)) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock ssh_hook = utils.SSHSideEffect() ssh_hook.append(self.cifs_share.output_allow_access()) ssh_hook.append(fakes.FakeData.cifs_access) ssh_hook.append('Command succeeded') ssh_cmd_mock = mock.Mock(side_effect=ssh_hook) self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock self.connection.update_access(None, share, [access], [], [], share_server=share_server) expected_calls = [ mock.call(self.vdm.req_get()), mock.call(self.cifs_server.req_get(self.vdm.vdm_id)), ] xml_req_mock.assert_has_calls(expected_calls) ssh_calls = [ mock.call(self.cifs_share.cmd_change_access(), True), mock.call(self.cifs_share.cmd_get_access(), True), mock.call(self.cifs_share.cmd_change_access( action='revoke', user='guest'), True), ] ssh_cmd_mock.assert_has_calls(ssh_calls) def test_update_access_recover_cifs_rule_with_ipv6(self): share_server = fakes.SHARE_SERVER_IPV6 share = fakes.CIFS_SHARE access = fakes.CIFS_RW_ACCESS hook = utils.RequestSideEffect() hook.append(self.vdm.resp_get_succeed( interface1=fakes.FakeData.interface_name3, interface2=fakes.FakeData.interface_name4)) hook.append(self.cifs_server.resp_get_succeed( mover_id=self.vdm.vdm_id, is_vdm=True, join_domain=True, ip_addr=fakes.FakeData.network_allocations_ip3)) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock ssh_hook = utils.SSHSideEffect() ssh_hook.append(self.cifs_share.output_allow_access()) ssh_hook.append(fakes.FakeData.cifs_access) ssh_hook.append('Command succeeded') ssh_cmd_mock = mock.Mock(side_effect=ssh_hook) self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock self.connection.update_access(None, share, [access], [], [], share_server=share_server) expected_calls = [ mock.call(self.vdm.req_get()), mock.call(self.cifs_server.req_get(self.vdm.vdm_id)), ] xml_req_mock.assert_has_calls(expected_calls) ssh_calls = [ mock.call(self.cifs_share.cmd_change_access(), True), mock.call(self.cifs_share.cmd_get_access(), True), mock.call(self.cifs_share.cmd_change_access( action='revoke', user='guest'), True), ] ssh_cmd_mock.assert_has_calls(ssh_calls) def test_cifs_clear_access_server_not_found(self): server = fakes.SHARE_SERVER hook = utils.RequestSideEffect() hook.append(self.vdm.resp_get_succeed()) hook.append(self.cifs_server.resp_get_succeed( mover_id=self.vdm.vdm_id, is_vdm=True, join_domain=True, cifs_server_name='cifs_server_name')) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock self.assertRaises(exception.EMCVnxXMLAPIError, self.connection._cifs_clear_access, 'share_name', server, None) expected_calls = [ mock.call(self.vdm.req_get()), mock.call(self.cifs_server.req_get(self.vdm.vdm_id)), ] xml_req_mock.assert_has_calls(expected_calls) def test_allow_cifs_rw_access(self): share_server = fakes.SHARE_SERVER share = fakes.CIFS_SHARE access = fakes.CIFS_RW_ACCESS hook = utils.RequestSideEffect() hook.append(self.vdm.resp_get_succeed()) hook.append(self.cifs_server.resp_get_succeed( mover_id=self.vdm.vdm_id, is_vdm=True, join_domain=True)) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock ssh_hook = utils.SSHSideEffect() ssh_hook.append(self.cifs_share.output_allow_access()) ssh_cmd_mock = mock.Mock(side_effect=ssh_hook) self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock self.connection.allow_access(None, share, access, share_server) expected_calls = [ mock.call(self.vdm.req_get()), mock.call(self.cifs_server.req_get(self.vdm.vdm_id)), ] xml_req_mock.assert_has_calls(expected_calls) ssh_calls = [ mock.call(self.cifs_share.cmd_change_access(), True), ] ssh_cmd_mock.assert_has_calls(ssh_calls) def test_allow_cifs_rw_access_with_ipv6(self): share_server = fakes.SHARE_SERVER_IPV6 share = fakes.CIFS_SHARE access = fakes.CIFS_RW_ACCESS hook = utils.RequestSideEffect() hook.append(self.vdm.resp_get_succeed( interface1=fakes.FakeData.interface_name3, interface2=fakes.FakeData.interface_name4)) hook.append(self.cifs_server.resp_get_succeed( mover_id=self.vdm.vdm_id, is_vdm=True, join_domain=True, ip_addr=fakes.FakeData.network_allocations_ip3)) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock ssh_hook = utils.SSHSideEffect() ssh_hook.append(self.cifs_share.output_allow_access()) ssh_cmd_mock = mock.Mock(side_effect=ssh_hook) self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock self.connection.allow_access(None, share, access, share_server) expected_calls = [ mock.call(self.vdm.req_get()), mock.call(self.cifs_server.req_get(self.vdm.vdm_id)), ] xml_req_mock.assert_has_calls(expected_calls) ssh_calls = [ mock.call(self.cifs_share.cmd_change_access(), True), ] ssh_cmd_mock.assert_has_calls(ssh_calls) def test_allow_cifs_ro_access(self): share_server = fakes.SHARE_SERVER share = fakes.CIFS_SHARE access = fakes.CIFS_RO_ACCESS hook = utils.RequestSideEffect() hook.append(self.vdm.resp_get_succeed()) hook.append(self.cifs_server.resp_get_succeed( mover_id=self.vdm.vdm_id, is_vdm=True, join_domain=True)) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock ssh_hook = utils.SSHSideEffect() ssh_hook.append(self.cifs_share.output_allow_access()) ssh_cmd_mock = mock.Mock(side_effect=ssh_hook) self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock self.connection.allow_access(None, share, access, share_server) expected_calls = [ mock.call(self.vdm.req_get()), mock.call(self.cifs_server.req_get(self.vdm.vdm_id)), ] xml_req_mock.assert_has_calls(expected_calls) ssh_calls = [ mock.call(self.cifs_share.cmd_change_access('ro'), True), ] ssh_cmd_mock.assert_has_calls(ssh_calls) def test_allow_cifs_ro_access_with_ipv6(self): share_server = fakes.SHARE_SERVER_IPV6 share = fakes.CIFS_SHARE access = fakes.CIFS_RO_ACCESS hook = utils.RequestSideEffect() hook.append(self.vdm.resp_get_succeed( interface1=fakes.FakeData.interface_name3, interface2=fakes.FakeData.interface_name4)) hook.append(self.cifs_server.resp_get_succeed( mover_id=self.vdm.vdm_id, is_vdm=True, join_domain=True, ip_addr=fakes.FakeData.network_allocations_ip3)) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock ssh_hook = utils.SSHSideEffect() ssh_hook.append(self.cifs_share.output_allow_access()) ssh_cmd_mock = mock.Mock(side_effect=ssh_hook) self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock self.connection.allow_access(None, share, access, share_server) expected_calls = [ mock.call(self.vdm.req_get()), mock.call(self.cifs_server.req_get(self.vdm.vdm_id)), ] xml_req_mock.assert_has_calls(expected_calls) ssh_calls = [ mock.call(self.cifs_share.cmd_change_access('ro'), True), ] ssh_cmd_mock.assert_has_calls(ssh_calls) def test_allow_ro_access_without_share_server_name(self): share = fakes.CIFS_SHARE share_server = copy.deepcopy(fakes.SHARE_SERVER) share_server['backend_details'].pop('share_server_name') access = fakes.CIFS_RO_ACCESS hook = utils.RequestSideEffect() hook.append(self.vdm.resp_get_succeed()) hook.append(self.cifs_server.resp_get_succeed( mover_id=self.vdm.vdm_id, is_vdm=True, join_domain=True)) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock ssh_hook = utils.SSHSideEffect() ssh_hook.append(self.cifs_share.output_allow_access()) ssh_cmd_mock = mock.Mock(side_effect=ssh_hook) self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock self.connection.allow_access(None, share, access, share_server) expected_calls = [ mock.call(self.vdm.req_get()), mock.call(self.cifs_server.req_get(self.vdm.vdm_id)), ] xml_req_mock.assert_has_calls(expected_calls) ssh_calls = [ mock.call(self.cifs_share.cmd_change_access('ro'), True), ] ssh_cmd_mock.assert_has_calls(ssh_calls) def test_allow_access_with_invalid_access_level(self): share_server = fakes.SHARE_SERVER share = fakes.CIFS_SHARE access = fake_share.fake_access(access_level='fake_level') self.assertRaises(exception.InvalidShareAccessLevel, self.connection.allow_access, None, share, access, share_server) def test_allow_access_with_invalid_share_server_name(self): share_server = fakes.SHARE_SERVER share = fakes.CIFS_SHARE access = fakes.CIFS_RW_ACCESS hook = utils.RequestSideEffect() hook.append(self.vdm.resp_get_succeed()) hook.append(self.cifs_server.resp_get_error()) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock self.assertRaises(exception.EMCVnxXMLAPIError, self.connection.allow_access, None, share, access, share_server) expected_calls = [ mock.call(self.vdm.req_get()), mock.call(self.cifs_server.req_get(self.vdm.vdm_id)), ] xml_req_mock.assert_has_calls(expected_calls) def test_allow_nfs_access(self): share_server = fakes.SHARE_SERVER share = fakes.NFS_SHARE access = fakes.NFS_RW_ACCESS rw_hosts = copy.deepcopy(fakes.FakeData.rw_hosts) rw_hosts.append(access['access_to']) ssh_hook = utils.SSHSideEffect() ssh_hook.append(self.nfs_share.output_get_succeed( rw_hosts=fakes.FakeData.rw_hosts, ro_hosts=fakes.FakeData.ro_hosts)) ssh_hook.append(self.nfs_share.output_set_access_success()) ssh_hook.append(self.nfs_share.output_get_succeed( rw_hosts=rw_hosts, ro_hosts=fakes.FakeData.ro_hosts)) ssh_cmd_mock = utils.EMCNFSShareMock(side_effect=ssh_hook) self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock self.connection.allow_access(None, share, access, share_server) ssh_calls = [ mock.call(self.nfs_share.cmd_get(), True), mock.call(self.nfs_share.cmd_set_access( rw_hosts=rw_hosts, ro_hosts=self.nfs_share.ro_hosts), True), mock.call(self.nfs_share.cmd_get(), True), ] ssh_cmd_mock.assert_has_calls(ssh_calls) def test_allow_nfs_access_with_ipv6(self): share_server = fakes.SHARE_SERVER_IPV6 share = fakes.NFS_SHARE access = fakes.NFS_RW_ACCESS_IPV6 rw_hosts = copy.deepcopy(fakes.FakeData.rw_hosts_ipv6) rw_hosts.append(access['access_to']) ssh_hook = utils.SSHSideEffect() ssh_hook.append(self.nfs_share.output_get_succeed( rw_hosts=fakes.FakeData.rw_hosts_ipv6, ro_hosts=fakes.FakeData.ro_hosts_ipv6)) ssh_hook.append(self.nfs_share.output_set_access_success()) ssh_hook.append(self.nfs_share.output_get_succeed( rw_hosts=rw_hosts, ro_hosts=fakes.FakeData.ro_hosts_ipv6)) ssh_cmd_mock = utils.EMCNFSShareMock(side_effect=ssh_hook) self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock self.connection.allow_access(None, share, access, share_server) ssh_calls = [ mock.call(self.nfs_share.cmd_get(), True), mock.call(self.nfs_share.cmd_set_access( rw_hosts=rw_hosts, ro_hosts=self.nfs_share.ro_hosts_ipv6), True), mock.call(self.nfs_share.cmd_get(), True), ] ssh_cmd_mock.assert_has_calls(ssh_calls) def test_allow_cifs_access_with_incorrect_access_type(self): share_server = fakes.SHARE_SERVER share = fakes.CIFS_SHARE access = fake_share.fake_access(access_type='fake_type') self.assertRaises(exception.InvalidShareAccess, self.connection.allow_access, None, share, access, share_server) def test_allow_nfs_access_with_incorrect_access_type(self): share_server = fakes.SHARE_SERVER share = fakes.NFS_SHARE access = fake_share.fake_access(access_type='fake_type') self.assertRaises(exception.InvalidShareAccess, self.connection.allow_access, None, share, access, share_server) def test_allow_access_with_incorrect_proto(self): share_server = fakes.SHARE_SERVER share = fake_share.fake_share(share_proto='FAKE_PROTO') access = fake_share.fake_access() self.assertRaises(exception.InvalidShare, self.connection.allow_access, None, share, access, share_server) def test_deny_cifs_rw_access(self): share_server = fakes.SHARE_SERVER share = fakes.CIFS_SHARE access = fakes.CIFS_RW_ACCESS hook = utils.RequestSideEffect() hook.append(self.vdm.resp_get_succeed()) hook.append(self.cifs_server.resp_get_succeed( mover_id=self.vdm.vdm_id, is_vdm=True, join_domain=True)) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock ssh_hook = utils.SSHSideEffect() ssh_hook.append(self.cifs_share.output_allow_access()) ssh_cmd_mock = mock.Mock(side_effect=ssh_hook) self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock self.connection.deny_access(None, share, access, share_server) expected_calls = [ mock.call(self.vdm.req_get()), mock.call(self.cifs_server.req_get(self.vdm.vdm_id)), ] xml_req_mock.assert_has_calls(expected_calls) ssh_calls = [ mock.call(self.cifs_share.cmd_change_access(action='revoke'), True), ] ssh_cmd_mock.assert_has_calls(ssh_calls) def test_deny_cifs_rw_access_with_ipv6(self): share_server = fakes.SHARE_SERVER_IPV6 share = fakes.CIFS_SHARE access = fakes.CIFS_RW_ACCESS hook = utils.RequestSideEffect() hook.append(self.vdm.resp_get_succeed( interface1=fakes.FakeData.interface_name3, interface2=fakes.FakeData.interface_name4)) hook.append(self.cifs_server.resp_get_succeed( mover_id=self.vdm.vdm_id, is_vdm=True, join_domain=True, ip_addr=fakes.FakeData.network_allocations_ip3)) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock ssh_hook = utils.SSHSideEffect() ssh_hook.append(self.cifs_share.output_allow_access()) ssh_cmd_mock = mock.Mock(side_effect=ssh_hook) self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock self.connection.deny_access(None, share, access, share_server) expected_calls = [ mock.call(self.vdm.req_get()), mock.call(self.cifs_server.req_get(self.vdm.vdm_id)), ] xml_req_mock.assert_has_calls(expected_calls) ssh_calls = [ mock.call(self.cifs_share.cmd_change_access(action='revoke'), True), ] ssh_cmd_mock.assert_has_calls(ssh_calls) def test_deny_cifs_ro_access(self): share_server = fakes.SHARE_SERVER share = fakes.CIFS_SHARE access = fakes.CIFS_RO_ACCESS hook = utils.RequestSideEffect() hook.append(self.vdm.resp_get_succeed()) hook.append(self.cifs_server.resp_get_succeed( mover_id=self.vdm.vdm_id, is_vdm=True, join_domain=True)) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock ssh_hook = utils.SSHSideEffect() ssh_hook.append(self.cifs_share.output_allow_access()) ssh_cmd_mock = mock.Mock(side_effect=ssh_hook) self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock self.connection.deny_access(None, share, access, share_server) expected_calls = [ mock.call(self.vdm.req_get()), mock.call(self.cifs_server.req_get(self.vdm.vdm_id)), ] xml_req_mock.assert_has_calls(expected_calls) ssh_calls = [ mock.call(self.cifs_share.cmd_change_access('ro', 'revoke'), True), ] ssh_cmd_mock.assert_has_calls(ssh_calls) def test_deny_cifs_ro_access_with_ipv6(self): share_server = fakes.SHARE_SERVER_IPV6 share = fakes.CIFS_SHARE access = fakes.CIFS_RO_ACCESS hook = utils.RequestSideEffect() hook.append(self.vdm.resp_get_succeed( interface1=fakes.FakeData.interface_name3, interface2=fakes.FakeData.interface_name4)) hook.append(self.cifs_server.resp_get_succeed( mover_id=self.vdm.vdm_id, is_vdm=True, join_domain=True, ip_addr=fakes.FakeData.network_allocations_ip3)) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock ssh_hook = utils.SSHSideEffect() ssh_hook.append(self.cifs_share.output_allow_access()) ssh_cmd_mock = mock.Mock(side_effect=ssh_hook) self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock self.connection.deny_access(None, share, access, share_server) expected_calls = [ mock.call(self.vdm.req_get()), mock.call(self.cifs_server.req_get(self.vdm.vdm_id)), ] xml_req_mock.assert_has_calls(expected_calls) ssh_calls = [ mock.call(self.cifs_share.cmd_change_access('ro', 'revoke'), True), ] ssh_cmd_mock.assert_has_calls(ssh_calls) def test_deny_cifs_access_with_invliad_share_server_name(self): share_server = fakes.SHARE_SERVER share = fakes.CIFS_SHARE access = fakes.CIFS_RW_ACCESS hook = utils.RequestSideEffect() hook.append(self.vdm.resp_get_succeed()) hook.append(self.cifs_server.resp_get_error()) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock self.assertRaises(exception.EMCVnxXMLAPIError, self.connection.deny_access, None, share, access, share_server) expected_calls = [ mock.call(self.vdm.req_get()), mock.call(self.cifs_server.req_get(self.vdm.vdm_id)), ] xml_req_mock.assert_has_calls(expected_calls) def test_deny_nfs_access(self): share_server = fakes.SHARE_SERVER share = fakes.NFS_SHARE access = fakes.NFS_RW_ACCESS rw_hosts = copy.deepcopy(fakes.FakeData.rw_hosts) rw_hosts.append(access['access_to']) ssh_hook = utils.SSHSideEffect() ssh_hook.append(self.nfs_share.output_get_succeed( rw_hosts=rw_hosts, ro_hosts=fakes.FakeData.ro_hosts)) ssh_hook.append(self.nfs_share.output_set_access_success()) ssh_hook.append(self.nfs_share.output_get_succeed( rw_hosts=fakes.FakeData.rw_hosts, ro_hosts=fakes.FakeData.ro_hosts)) ssh_cmd_mock = utils.EMCNFSShareMock(side_effect=ssh_hook) self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock self.connection.deny_access(None, share, access, share_server) ssh_calls = [ mock.call(self.nfs_share.cmd_get(), True), mock.call(self.nfs_share.cmd_set_access( rw_hosts=self.nfs_share.rw_hosts, ro_hosts=self.nfs_share.ro_hosts), True), mock.call(self.nfs_share.cmd_get(), True), ] ssh_cmd_mock.assert_has_calls(ssh_calls) def test_deny_nfs_access_with_ipv6(self): share_server = fakes.SHARE_SERVER_IPV6 share = fakes.NFS_SHARE access = fakes.NFS_RW_ACCESS_IPV6 rw_hosts = copy.deepcopy(fakes.FakeData.rw_hosts_ipv6) rw_hosts.append(access['access_to']) ssh_hook = utils.SSHSideEffect() ssh_hook.append(self.nfs_share.output_get_succeed( rw_hosts=rw_hosts, ro_hosts=fakes.FakeData.ro_hosts_ipv6)) ssh_hook.append(self.nfs_share.output_set_access_success()) ssh_hook.append(self.nfs_share.output_get_succeed( rw_hosts=fakes.FakeData.rw_hosts_ipv6, ro_hosts=fakes.FakeData.ro_hosts_ipv6)) ssh_cmd_mock = utils.EMCNFSShareMock(side_effect=ssh_hook) self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock self.connection.deny_access(None, share, access, share_server) ssh_calls = [ mock.call(self.nfs_share.cmd_get(), True), mock.call(self.nfs_share.cmd_set_access( rw_hosts=self.nfs_share.rw_hosts_ipv6, ro_hosts=self.nfs_share.ro_hosts_ipv6), True), mock.call(self.nfs_share.cmd_get(), True), ] ssh_cmd_mock.assert_has_calls(ssh_calls) def test_deny_access_with_incorrect_proto(self): share_server = fakes.SHARE_SERVER share = fake_share.fake_share(share_proto='FAKE_PROTO') access = fakes.CIFS_RW_ACCESS self.assertRaises(exception.InvalidShare, self.connection.deny_access, None, share, access, share_server) def test_deny_cifs_access_with_incorrect_access_type(self): share_server = fakes.SHARE_SERVER share = fakes.CIFS_SHARE access = fake_share.fake_access(access_type='fake_type') self.assertRaises(exception.InvalidShareAccess, self.connection.deny_access, None, share, access, share_server) def test_deny_nfs_access_with_incorrect_access_type(self): share_server = fakes.SHARE_SERVER share = fakes.NFS_SHARE access = fake_share.fake_access(access_type='fake_type') self.assertRaises(exception.InvalidShareAccess, self.connection.deny_access, None, share, access, share_server) def test_update_share_stats(self): hook = utils.RequestSideEffect() hook.append(self.mover.resp_get_ref_succeed()) hook.append(self.pool.resp_get_succeed()) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock self.connection.update_share_stats(fakes.STATS) expected_calls = [ mock.call(self.mover.req_get_ref()), mock.call(self.pool.req_get()), ] xml_req_mock.assert_has_calls(expected_calls) for pool in fakes.STATS['pools']: if pool['pool_name'] == fakes.FakeData.pool_name: self.assertEqual( enas_utils.mb_to_gb(fakes.FakeData.pool_total_size), pool['total_capacity_gb']) free_size = (fakes.FakeData.pool_total_size - fakes.FakeData.pool_used_size) self.assertEqual( enas_utils.mb_to_gb(free_size), pool['free_capacity_gb']) def test_update_share_stats_without_matched_config_pools(self): self.connection.pools = set('fake_pool') hook = utils.RequestSideEffect() hook.append(self.mover.resp_get_ref_succeed()) hook.append(self.pool.resp_get_succeed()) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock self.assertRaises(exception.EMCVnxXMLAPIError, self.connection.update_share_stats, fakes.STATS) expected_calls = [ mock.call(self.mover.req_get_ref()), mock.call(self.pool.req_get()), ] xml_req_mock.assert_has_calls(expected_calls) def test_get_pool(self): share = fakes.CIFS_SHARE hook = utils.RequestSideEffect() hook.append(self.fs.resp_get_succeed()) hook.append(self.pool.resp_get_succeed()) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock pool_name = self.connection.get_pool(share) expected_calls = [ mock.call(self.fs.req_get()), mock.call(self.pool.req_get()), ] xml_req_mock.assert_has_calls(expected_calls) self.assertEqual(fakes.FakeData.pool_name, pool_name) def test_get_pool_failed_to_get_filesystem_info(self): share = fakes.CIFS_SHARE hook = utils.RequestSideEffect() hook.append(self.fs.resp_get_error()) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock self.assertRaises(exception.EMCVnxXMLAPIError, self.connection.get_pool, share) expected_calls = [mock.call(self.fs.req_get())] xml_req_mock.assert_has_calls(expected_calls) def test_get_pool_failed_to_get_pool_info(self): share = fakes.CIFS_SHARE hook = utils.RequestSideEffect() hook.append(self.fs.resp_get_succeed()) hook.append(self.pool.resp_get_error()) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock self.assertRaises(exception.EMCVnxXMLAPIError, self.connection.get_pool, share) expected_calls = [ mock.call(self.fs.req_get()), mock.call(self.pool.req_get()), ] xml_req_mock.assert_has_calls(expected_calls) def test_get_pool_failed_to_find_matched_pool_name(self): share = fakes.CIFS_SHARE hook = utils.RequestSideEffect() hook.append(self.fs.resp_get_succeed()) hook.append(self.pool.resp_get_succeed(name='unmatch_pool_name', id='unmatch_pool_id')) xml_req_mock = utils.EMCMock(side_effect=hook) self.connection.manager.connectors['XML'].request = xml_req_mock self.assertRaises(exception.EMCVnxXMLAPIError, self.connection.get_pool, share) expected_calls = [ mock.call(self.fs.req_get()), mock.call(self.pool.req_get()), ] xml_req_mock.assert_has_calls(expected_calls) @ddt.data({'port_conf': None, 'managed_ports': ['cge-1-0', 'cge-1-3']}, {'port_conf': '*', 'managed_ports': ['cge-1-0', 'cge-1-3']}, {'port_conf': ['cge-1-*'], 'managed_ports': ['cge-1-0', 'cge-1-3']}, {'port_conf': ['cge-1-3'], 'managed_ports': ['cge-1-3']}) @ddt.unpack def test_get_managed_ports_one_port(self, port_conf, managed_ports): hook = utils.SSHSideEffect() hook.append(self.mover.output_get_physical_devices()) ssh_cmd_mock = mock.Mock(side_effect=hook) expected_calls = [ mock.call(self.mover.cmd_get_physical_devices(), False), ] self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock self.connection.port_conf = port_conf ports = self.connection.get_managed_ports() self.assertIsInstance(ports, list) self.assertEqual(sorted(managed_ports), sorted(ports)) ssh_cmd_mock.assert_has_calls(expected_calls) def test_get_managed_ports_no_valid_port(self): hook = utils.SSHSideEffect() hook.append(self.mover.output_get_physical_devices()) ssh_cmd_mock = mock.Mock(side_effect=hook) self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock self.connection.port_conf = ['cge-2-0'] self.assertRaises(exception.BadConfigurationException, self.connection.get_managed_ports) def test_get_managed_ports_query_devices_failed(self): hook = utils.SSHSideEffect() hook.append(self.mover.fake_output) ssh_cmd_mock = mock.Mock(side_effect=hook) self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock self.connection.port_conf = ['cge-2-0'] self.assertRaises(exception.EMCVnxXMLAPIError, self.connection.get_managed_ports) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/dell_emc/plugins/vnx/test_object_manager.py0000664000175000017500000037546400000000000030675 0ustar00zuulzuul00000000000000# Copyright (c) 2015 EMC Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import time from unittest import mock import ddt from lxml import builder from oslo_concurrency import processutils from manila.common import constants as const from manila import exception from manila.share.drivers.dell_emc.common.enas import connector from manila.share.drivers.dell_emc.common.enas import constants from manila.share.drivers.dell_emc.common.enas import xml_api_parser as parser from manila.share.drivers.dell_emc.plugins.vnx import object_manager as manager from manila import test from manila.tests.share.drivers.dell_emc.common.enas import fakes from manila.tests.share.drivers.dell_emc.common.enas import utils class StorageObjectManagerTestCase(test.TestCase): @mock.patch.object(connector, "XMLAPIConnector", mock.Mock()) @mock.patch.object(connector, "SSHConnector", mock.Mock()) def setUp(self): super(StorageObjectManagerTestCase, self).setUp() emd_share_driver = fakes.FakeEMCShareDriver() self.manager = manager.StorageObjectManager( emd_share_driver.configuration) def test_get_storage_context(self): type_map = { 'FileSystem': manager.FileSystem, 'StoragePool': manager.StoragePool, 'MountPoint': manager.MountPoint, 'Mover': manager.Mover, 'VDM': manager.VDM, 'Snapshot': manager.Snapshot, 'MoverInterface': manager.MoverInterface, 'DNSDomain': manager.DNSDomain, 'CIFSServer': manager.CIFSServer, 'CIFSShare': manager.CIFSShare, 'NFSShare': manager.NFSShare, } for key, value in type_map.items(): self.assertTrue( isinstance(self.manager.getStorageContext(key), value)) for key in self.manager.context.keys(): self.assertIn(key, type_map) def test_get_storage_context_invalid_type(self): fake_type = 'fake_type' self.assertRaises(exception.EMCVnxXMLAPIError, self.manager.getStorageContext, fake_type) class StorageObjectTestCaseBase(test.TestCase): @mock.patch.object(connector, "XMLAPIConnector", mock.Mock()) @mock.patch.object(connector, "SSHConnector", mock.Mock()) def setUp(self): super(StorageObjectTestCaseBase, self).setUp() emd_share_driver = fakes.FakeEMCShareDriver() self.manager = manager.StorageObjectManager( emd_share_driver.configuration) self.base = fakes.StorageObjectTestData() self.pool = fakes.PoolTestData() self.vdm = fakes.VDMTestData() self.mover = fakes.MoverTestData() self.fs = fakes.FileSystemTestData() self.mount = fakes.MountPointTestData() self.snap = fakes.SnapshotTestData() self.cifs_share = fakes.CIFSShareTestData() self.nfs_share = fakes.NFSShareTestData() self.cifs_server = fakes.CIFSServerTestData() self.dns = fakes.DNSDomainTestData() class StorageObjectTestCase(StorageObjectTestCaseBase): def test_xml_api_retry(self): hook = utils.RequestSideEffect() hook.append(self.base.resp_need_retry()) hook.append(self.base.resp_task_succeed()) elt_maker = builder.ElementMaker(nsmap={None: constants.XML_NAMESPACE}) xml_parser = parser.XMLAPIParser() storage_object = manager.StorageObject(self.manager.connectors, elt_maker, xml_parser, self.manager) storage_object.conn['XML'].request = utils.EMCMock(side_effect=hook) fake_req = storage_object._build_task_package( elt_maker.StartFake(name='foo') ) self.mock_object(time, 'sleep') resp = storage_object._send_request(fake_req) self.assertEqual('ok', resp['maxSeverity']) expected_calls = [ mock.call(self.base.req_fake_start_task()), mock.call(self.base.req_fake_start_task()) ] storage_object.conn['XML'].request.assert_has_calls(expected_calls) class FileSystemTestCase(StorageObjectTestCaseBase): def setUp(self): super(FileSystemTestCase, self).setUp() self.hook = utils.RequestSideEffect() self.ssh_hook = utils.SSHSideEffect() def test_create_file_system_on_vdm(self): self.hook.append(self.pool.resp_get_succeed()) self.hook.append(self.vdm.resp_get_succeed()) self.hook.append(self.fs.resp_task_succeed()) context = self.manager.getStorageContext('FileSystem') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) context.create(name=self.fs.filesystem_name, size=self.fs.filesystem_size, pool_name=self.pool.pool_name, mover_name=self.vdm.vdm_name, is_vdm=True) expected_calls = [ mock.call(self.pool.req_get()), mock.call(self.vdm.req_get()), mock.call(self.fs.req_create_on_vdm()), ] context.conn['XML'].request.assert_has_calls(expected_calls) def test_create_file_system_on_mover(self): self.hook.append(self.pool.resp_get_succeed()) self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.fs.resp_task_succeed()) context = self.manager.getStorageContext('FileSystem') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) context.create(name=self.fs.filesystem_name, size=self.fs.filesystem_size, pool_name=self.pool.pool_name, mover_name=self.mover.mover_name, is_vdm=False) expected_calls = [ mock.call(self.pool.req_get()), mock.call(self.mover.req_get_ref()), mock.call(self.fs.req_create_on_mover()), ] context.conn['XML'].request.assert_has_calls(expected_calls) def test_create_file_system_but_already_exist(self): self.hook.append(self.pool.resp_get_succeed()) self.hook.append(self.vdm.resp_get_succeed()) self.hook.append(self.fs.resp_create_but_already_exist()) context = self.manager.getStorageContext('FileSystem') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) context.create(name=self.fs.filesystem_name, size=self.fs.filesystem_size, pool_name=self.pool.pool_name, mover_name=self.vdm.vdm_name, is_vdm=True) expected_calls = [ mock.call(self.pool.req_get()), mock.call(self.vdm.req_get()), mock.call(self.fs.req_create_on_vdm()), ] context.conn['XML'].request.assert_has_calls(expected_calls) @mock.patch('time.sleep') def test_create_file_system_invalid_mover_id(self, sleep_mock): self.hook.append(self.pool.resp_get_succeed()) self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.fs.resp_invalid_mover_id()) self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.fs.resp_task_succeed()) context = self.manager.getStorageContext('FileSystem') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) context.create(name=self.fs.filesystem_name, size=self.fs.filesystem_size, pool_name=self.pool.pool_name, mover_name=self.mover.mover_name, is_vdm=False) expected_calls = [ mock.call(self.pool.req_get()), mock.call(self.mover.req_get_ref()), mock.call(self.fs.req_create_on_mover()), mock.call(self.mover.req_get_ref()), mock.call(self.fs.req_create_on_mover()), ] context.conn['XML'].request.assert_has_calls(expected_calls) self.assertTrue(sleep_mock.called) def test_create_file_system_with_error(self): self.hook.append(self.pool.resp_get_succeed()) self.hook.append(self.vdm.resp_get_succeed()) self.hook.append(self.fs.resp_task_error()) context = self.manager.getStorageContext('FileSystem') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) self.assertRaises(exception.EMCVnxXMLAPIError, context.create, name=self.fs.filesystem_name, size=self.fs.filesystem_size, pool_name=self.pool.pool_name, mover_name=self.vdm.vdm_name, is_vdm=True) expected_calls = [ mock.call(self.pool.req_get()), mock.call(self.vdm.req_get()), mock.call(self.fs.req_create_on_vdm()), ] context.conn['XML'].request.assert_has_calls(expected_calls) def test_get_file_system(self): self.hook.append(self.fs.resp_get_succeed()) context = self.manager.getStorageContext('FileSystem') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) status, out = context.get(self.fs.filesystem_name) self.assertEqual(constants.STATUS_OK, status) self.assertIn(self.fs.filesystem_name, context.filesystem_map) property_map = [ 'name', 'pools_id', 'volume_id', 'size', 'id', 'type', 'dataServicePolicies', ] for prop in property_map: self.assertIn(prop, out) id = context.get_id(self.fs.filesystem_name) self.assertEqual(self.fs.filesystem_id, id) expected_calls = [mock.call(self.fs.req_get())] context.conn['XML'].request.assert_has_calls(expected_calls) def test_get_file_system_but_not_found(self): self.hook.append(self.fs.resp_get_but_not_found()) self.hook.append(self.fs.resp_get_without_value()) self.hook.append(self.fs.resp_get_error()) self.hook.append(self.fs.resp_get_but_not_found()) context = self.manager.getStorageContext('FileSystem') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) status, out = context.get(self.fs.filesystem_name) self.assertEqual(constants.STATUS_NOT_FOUND, status) status, out = context.get(self.fs.filesystem_name) self.assertEqual(constants.STATUS_NOT_FOUND, status) status, out = context.get(self.fs.filesystem_name) self.assertEqual(constants.STATUS_ERROR, status) self.assertRaises(exception.EMCVnxXMLAPIError, context.get_id, self.fs.filesystem_name) expected_calls = [ mock.call(self.fs.req_get()), mock.call(self.fs.req_get()), mock.call(self.fs.req_get()), mock.call(self.fs.req_get()), ] context.conn['XML'].request.assert_has_calls(expected_calls) def test_get_file_system_but_miss_property(self): self.hook.append(self.fs.resp_get_but_miss_property()) context = self.manager.getStorageContext('FileSystem') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) status, out = context.get(self.fs.filesystem_name) self.assertEqual(constants.STATUS_OK, status) self.assertIn(self.fs.filesystem_name, context.filesystem_map) property_map = [ 'name', 'pools_id', 'volume_id', 'size', 'id', 'type', 'dataServicePolicies', ] for prop in property_map: self.assertIn(prop, out) self.assertIsNone(out['dataServicePolicies']) id = context.get_id(self.fs.filesystem_name) self.assertEqual(self.fs.filesystem_id, id) expected_calls = [mock.call(self.fs.req_get())] context.conn['XML'].request.assert_has_calls(expected_calls) def test_delete_file_system(self): self.hook.append(self.fs.resp_get_succeed()) self.hook.append(self.fs.resp_task_succeed()) context = self.manager.getStorageContext('FileSystem') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) context.delete(self.fs.filesystem_name) self.assertNotIn(self.fs.filesystem_name, context.filesystem_map) expected_calls = [ mock.call(self.fs.req_get()), mock.call(self.fs.req_delete()), ] context.conn['XML'].request.assert_has_calls(expected_calls) self.assertNotIn(self.fs.filesystem_name, context.filesystem_map) def test_delete_file_system_but_not_found(self): self.hook.append(self.fs.resp_get_but_not_found()) context = self.manager.getStorageContext('FileSystem') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) context.delete(self.fs.filesystem_name) expected_calls = [mock.call(self.fs.req_get())] context.conn['XML'].request.assert_has_calls(expected_calls) def test_delete_file_system_but_get_file_system_error(self): self.hook.append(self.fs.resp_get_error()) context = self.manager.getStorageContext('FileSystem') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) self.assertRaises(exception.EMCVnxXMLAPIError, context.delete, self.fs.filesystem_name) expected_calls = [mock.call(self.fs.req_get())] context.conn['XML'].request.assert_has_calls(expected_calls) def test_delete_file_system_with_error(self): self.hook.append(self.fs.resp_get_succeed()) self.hook.append(self.fs.resp_delete_but_failed()) context = self.manager.getStorageContext('FileSystem') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) self.assertRaises(exception.EMCVnxXMLAPIError, context.delete, self.fs.filesystem_name) expected_calls = [ mock.call(self.fs.req_get()), mock.call(self.fs.req_delete()), ] context.conn['XML'].request.assert_has_calls(expected_calls) self.assertIn(self.fs.filesystem_name, context.filesystem_map) def test_extend_file_system(self): self.hook.append(self.fs.resp_get_succeed()) self.hook.append(self.pool.resp_get_succeed()) self.hook.append(self.fs.resp_task_succeed()) context = self.manager.getStorageContext('FileSystem') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) context.extend(name=self.fs.filesystem_name, pool_name=self.pool.pool_name, new_size=self.fs.filesystem_new_size) expected_calls = [ mock.call(self.fs.req_get()), mock.call(self.pool.req_get()), mock.call(self.fs.req_extend()), ] context.conn['XML'].request.assert_has_calls(expected_calls) def test_extend_file_system_but_not_found(self): self.hook.append(self.fs.resp_get_but_not_found()) context = self.manager.getStorageContext('FileSystem') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) self.assertRaises(exception.EMCVnxXMLAPIError, context.extend, name=self.fs.filesystem_name, pool_name=self.fs.pool_name, new_size=self.fs.filesystem_new_size) expected_calls = [mock.call(self.fs.req_get())] context.conn['XML'].request.assert_has_calls(expected_calls) def test_extend_file_system_with_small_size(self): self.hook.append(self.fs.resp_get_succeed()) context = self.manager.getStorageContext('FileSystem') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) self.assertRaises(exception.EMCVnxXMLAPIError, context.extend, name=self.fs.filesystem_name, pool_name=self.pool.pool_name, new_size=1) expected_calls = [mock.call(self.fs.req_get())] context.conn['XML'].request.assert_has_calls(expected_calls) def test_extend_file_system_with_same_size(self): self.hook.append(self.fs.resp_get_succeed()) context = self.manager.getStorageContext('FileSystem') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) context.extend(name=self.fs.filesystem_name, pool_name=self.pool.pool_name, new_size=self.fs.filesystem_size) expected_calls = [mock.call(self.fs.req_get())] context.conn['XML'].request.assert_has_calls(expected_calls) def test_extend_file_system_with_error(self): self.hook.append(self.fs.resp_get_succeed()) self.hook.append(self.pool.resp_get_succeed()) self.hook.append(self.fs.resp_extend_but_error()) context = self.manager.getStorageContext('FileSystem') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) self.assertRaises(exception.EMCVnxXMLAPIError, context.extend, name=self.fs.filesystem_name, pool_name=self.pool.pool_name, new_size=self.fs.filesystem_new_size) expected_calls = [ mock.call(self.fs.req_get()), mock.call(self.pool.req_get()), mock.call(self.fs.req_extend()), ] context.conn['XML'].request.assert_has_calls(expected_calls) def test_create_filesystem_from_snapshot(self): self.ssh_hook.append() self.ssh_hook.append() self.ssh_hook.append(self.fs.output_copy_ckpt) self.ssh_hook.append(self.fs.output_info()) self.ssh_hook.append() self.ssh_hook.append() self.ssh_hook.append() context = self.manager.getStorageContext('FileSystem') context.conn['SSH'].run_ssh = mock.Mock(side_effect=self.ssh_hook) context.create_from_snapshot(self.fs.filesystem_name, self.snap.src_snap_name, self.fs.src_fileystems_name, self.pool.pool_name, self.vdm.vdm_name, self.mover.interconnect_id,) ssh_calls = [ mock.call(self.fs.cmd_create_from_ckpt(), False), mock.call(self.mount.cmd_server_mount('ro'), False), mock.call(self.fs.cmd_copy_ckpt(), True), mock.call(self.fs.cmd_nas_fs_info(), False), mock.call(self.mount.cmd_server_umount(), False), mock.call(self.fs.cmd_delete(), False), mock.call(self.mount.cmd_server_mount('rw'), False), ] context.conn['SSH'].run_ssh.assert_has_calls(ssh_calls) def test_create_filesystem_from_snapshot_with_error(self): self.ssh_hook.append() self.ssh_hook.append() self.ssh_hook.append(ex=processutils.ProcessExecutionError( stdout=self.fs.fake_output, stderr=None)) self.ssh_hook.append(self.fs.output_info()) self.ssh_hook.append() self.ssh_hook.append() self.ssh_hook.append() context = self.manager.getStorageContext('FileSystem') context.conn['SSH'].run_ssh = mock.Mock(side_effect=self.ssh_hook) context.create_from_snapshot( self.fs.filesystem_name, self.snap.src_snap_name, self.fs.src_fileystems_name, self.pool.pool_name, self.vdm.vdm_name, self.mover.interconnect_id, ) ssh_calls = [ mock.call(self.fs.cmd_create_from_ckpt(), False), mock.call(self.mount.cmd_server_mount('ro'), False), mock.call(self.fs.cmd_copy_ckpt(), True), mock.call(self.fs.cmd_nas_fs_info(), False), mock.call(self.mount.cmd_server_umount(), False), mock.call(self.fs.cmd_delete(), False), mock.call(self.mount.cmd_server_mount('rw'), False), ] context.conn['SSH'].run_ssh.assert_has_calls(ssh_calls) class MountPointTestCase(StorageObjectTestCaseBase): def setUp(self): super(MountPointTestCase, self).setUp() self.hook = utils.RequestSideEffect() def test_create_mount_point_on_vdm(self): self.hook.append(self.fs.resp_get_succeed()) self.hook.append(self.vdm.resp_get_succeed()) self.hook.append(self.mount.resp_task_succeed()) context = self.manager.getStorageContext('MountPoint') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) context.create(mount_path=self.mount.path, fs_name=self.fs.filesystem_name, mover_name=self.vdm.vdm_name, is_vdm=True) expected_calls = [ mock.call(self.fs.req_get()), mock.call(self.vdm.req_get()), mock.call(self.mount.req_create(self.vdm.vdm_id, True)), ] context.conn['XML'].request.assert_has_calls(expected_calls) def test_create_mount_point_on_mover(self): self.hook.append(self.fs.resp_get_succeed()) self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.mount.resp_task_succeed()) context = self.manager.getStorageContext('MountPoint') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) context.create(mount_path=self.mount.path, fs_name=self.fs.filesystem_name, mover_name=self.mover.mover_name, is_vdm=False) expected_calls = [ mock.call(self.fs.req_get()), mock.call(self.mover.req_get_ref()), mock.call(self.mount.req_create(self.mover.mover_id, False)), ] context.conn['XML'].request.assert_has_calls(expected_calls) def test_create_mount_point_but_already_exist(self): self.hook.append(self.fs.resp_get_succeed()) self.hook.append(self.vdm.resp_get_succeed()) self.hook.append(self.mount.resp_create_but_already_exist()) context = self.manager.getStorageContext('MountPoint') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) context.create(mount_path=self.mount.path, fs_name=self.fs.filesystem_name, mover_name=self.vdm.vdm_name, is_vdm=True) expected_calls = [ mock.call(self.fs.req_get()), mock.call(self.vdm.req_get()), mock.call(self.mount.req_create(self.vdm.vdm_id)), ] context.conn['XML'].request.assert_has_calls(expected_calls) @mock.patch('time.sleep') def test_create_mount_point_invalid_mover_id(self, sleep_mock): self.hook.append(self.fs.resp_get_succeed()) self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.mount.resp_invalid_mover_id()) self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.mount.resp_task_succeed()) context = self.manager.getStorageContext('MountPoint') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) context.create(mount_path=self.mount.path, fs_name=self.fs.filesystem_name, mover_name=self.mover.mover_name, is_vdm=False) expected_calls = [ mock.call(self.fs.req_get()), mock.call(self.mover.req_get_ref()), mock.call(self.mount.req_create(self.mover.mover_id, False)), mock.call(self.mover.req_get_ref()), mock.call(self.mount.req_create(self.mover.mover_id, False)), ] context.conn['XML'].request.assert_has_calls(expected_calls) self.assertTrue(sleep_mock.called) def test_create_mount_point_with_error(self): self.hook.append(self.fs.resp_get_succeed()) self.hook.append(self.vdm.resp_get_succeed()) self.hook.append(self.mount.resp_task_error()) context = self.manager.getStorageContext('MountPoint') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) self.assertRaises(exception.EMCVnxXMLAPIError, context.create, mount_path=self.mount.path, fs_name=self.fs.filesystem_name, mover_name=self.vdm.vdm_name, is_vdm=True) expected_calls = [ mock.call(self.fs.req_get()), mock.call(self.vdm.req_get()), mock.call(self.mount.req_create(self.vdm.vdm_id)), ] context.conn['XML'].request.assert_has_calls(expected_calls) def test_delete_mount_point_on_vdm(self): self.hook.append(self.vdm.resp_get_succeed()) self.hook.append(self.mount.resp_task_succeed()) context = self.manager.getStorageContext('MountPoint') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) context.delete(mount_path=self.mount.path, mover_name=self.vdm.vdm_name, is_vdm=True) expected_calls = [ mock.call(self.vdm.req_get()), mock.call(self.mount.req_delete(self.vdm.vdm_id)), ] context.conn['XML'].request.assert_has_calls(expected_calls) def test_delete_mount_point_on_mover(self): self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.mount.resp_task_succeed()) context = self.manager.getStorageContext('MountPoint') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) context.delete(mount_path=self.mount.path, mover_name=self.mover.mover_name, is_vdm=False) expected_calls = [ mock.call(self.mover.req_get_ref()), mock.call(self.mount.req_delete(self.mover.mover_id, False)), ] context.conn['XML'].request.assert_has_calls(expected_calls) def test_delete_mount_point_but_nonexistent(self): self.hook.append(self.vdm.resp_get_succeed()) self.hook.append(self.mount.resp_delete_but_nonexistent()) context = self.manager.getStorageContext('MountPoint') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) context.delete(mount_path=self.mount.path, mover_name=self.vdm.vdm_name, is_vdm=True) expected_calls = [ mock.call(self.vdm.req_get()), mock.call(self.mount.req_delete(self.vdm.vdm_id)), ] context.conn['XML'].request.assert_has_calls(expected_calls) @mock.patch('time.sleep') def test_delete_mount_point_invalid_mover_id(self, sleep_mock): self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.mount.resp_invalid_mover_id()) self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.mount.resp_task_succeed()) context = self.manager.getStorageContext('MountPoint') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) context.delete(mount_path=self.mount.path, mover_name=self.mover.mover_name, is_vdm=False) expected_calls = [ mock.call(self.mover.req_get_ref()), mock.call(self.mount.req_delete(self.mover.mover_id, False)), mock.call(self.mover.req_get_ref()), mock.call(self.mount.req_delete(self.mover.mover_id, False)), ] context.conn['XML'].request.assert_has_calls(expected_calls) self.assertTrue(sleep_mock.called) def test_delete_mount_point_with_error(self): self.hook.append(self.vdm.resp_get_succeed()) self.hook.append(self.mount.resp_task_error()) context = self.manager.getStorageContext('MountPoint') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) self.assertRaises(exception.EMCVnxXMLAPIError, context.delete, mount_path=self.mount.path, mover_name=self.vdm.vdm_name, is_vdm=True) expected_calls = [ mock.call(self.vdm.req_get()), mock.call(self.mount.req_delete(self.vdm.vdm_id)), ] context.conn['XML'].request.assert_has_calls(expected_calls) def test_get_mount_points(self): self.hook.append(self.vdm.resp_get_succeed()) self.hook.append(self.mount.resp_get_succeed(self.vdm.vdm_id)) self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.mount.resp_get_succeed(self.mover.mover_id, False)) context = self.manager.getStorageContext('MountPoint') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) status, out = context.get(self.vdm.vdm_name) self.assertEqual(constants.STATUS_OK, status) property_map = [ 'path', 'mover', 'moverIdIsVdm', 'fileSystem', ] for item in out: for prop in property_map: self.assertIn(prop, item) status, out = context.get(self.mover.mover_name, False) self.assertEqual(constants.STATUS_OK, status) property_map = [ 'path', 'mover', 'moverIdIsVdm', 'fileSystem', ] for item in out: for prop in property_map: self.assertIn(prop, item) expected_calls = [ mock.call(self.vdm.req_get()), mock.call(self.mount.req_get(self.vdm.vdm_id)), mock.call(self.mover.req_get_ref()), mock.call(self.mount.req_get(self.mover.mover_id, False)), ] context.conn['XML'].request.assert_has_calls(expected_calls) def test_get_mount_points_but_not_found(self): self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.mount.resp_get_without_value()) context = self.manager.getStorageContext('MountPoint') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) status, out = context.get(self.mover.mover_name, False) self.assertEqual(constants.STATUS_NOT_FOUND, status) expected_calls = [ mock.call(self.mover.req_get_ref()), mock.call(self.mount.req_get(self.mover.mover_id, False)), ] context.conn['XML'].request.assert_has_calls(expected_calls) @mock.patch('time.sleep') def test_get_mount_points_invalid_mover_id(self, sleep_mock): self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.mount.resp_invalid_mover_id()) self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.mount.resp_get_succeed(self.mover.mover_id, False)) context = self.manager.getStorageContext('MountPoint') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) status, out = context.get(self.mover.mover_name, False) self.assertEqual(constants.STATUS_OK, status) property_map = [ 'path', 'mover', 'moverIdIsVdm', 'fileSystem', ] for item in out: for prop in property_map: self.assertIn(prop, item) expected_calls = [ mock.call(self.mover.req_get_ref()), mock.call(self.mount.req_get(self.mover.mover_id, False)), mock.call(self.mover.req_get_ref()), mock.call(self.mount.req_get(self.mover.mover_id, False)), ] context.conn['XML'].request.assert_has_calls(expected_calls) self.assertTrue(sleep_mock.called) def test_get_mount_points_with_error(self): self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.mount.resp_get_error()) context = self.manager.getStorageContext('MountPoint') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) status, out = context.get(self.mover.mover_name, False) self.assertEqual(constants.STATUS_ERROR, status) expected_calls = [ mock.call(self.mover.req_get_ref()), mock.call(self.mount.req_get(self.mover.mover_id, False)), ] context.conn['XML'].request.assert_has_calls(expected_calls) @ddt.ddt class VDMTestCase(StorageObjectTestCaseBase): def setUp(self): super(VDMTestCase, self).setUp() self.hook = utils.RequestSideEffect() self.ssh_hook = utils.SSHSideEffect() def test_create_vdm(self): self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.vdm.resp_task_succeed()) context = self.manager.getStorageContext('VDM') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) context.create(self.vdm.vdm_name, self.mover.mover_name) expected_calls = [ mock.call(self.mover.req_get_ref()), mock.call(self.vdm.req_create()), ] context.conn['XML'].request.assert_has_calls(expected_calls) def test_create_vdm_but_already_exist(self): self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.vdm.resp_create_but_already_exist()) context = self.manager.getStorageContext('VDM') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) # Create VDM which already exists. context.create(self.vdm.vdm_name, self.mover.mover_name) expected_calls = [ mock.call(self.mover.req_get_ref()), mock.call(self.vdm.req_create()), ] context.conn['XML'].request.assert_has_calls(expected_calls) @mock.patch('time.sleep') def test_create_vdm_invalid_mover_id(self, sleep_mock): self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.vdm.resp_invalid_mover_id()) self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.vdm.resp_task_succeed()) context = self.manager.getStorageContext('VDM') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) # Create VDM with invalid mover ID context.create(self.vdm.vdm_name, self.mover.mover_name) expected_calls = [ mock.call(self.mover.req_get_ref()), mock.call(self.vdm.req_create()), mock.call(self.mover.req_get_ref()), mock.call(self.vdm.req_create()), ] context.conn['XML'].request.assert_has_calls(expected_calls) self.assertTrue(sleep_mock.called) def test_create_vdm_with_error(self): self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.vdm.resp_task_error()) context = self.manager.getStorageContext('VDM') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) # Create VDM with invalid mover ID self.assertRaises(exception.EMCVnxXMLAPIError, context.create, name=self.vdm.vdm_name, mover_name=self.mover.mover_name) expected_calls = [ mock.call(self.mover.req_get_ref()), mock.call(self.vdm.req_create()), ] context.conn['XML'].request.assert_has_calls(expected_calls) def test_get_vdm(self): self.hook.append(self.vdm.resp_get_succeed()) context = self.manager.getStorageContext('VDM') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) status, out = context.get(self.vdm.vdm_name) self.assertEqual(constants.STATUS_OK, status) self.assertIn(self.vdm.vdm_name, context.vdm_map) property_map = [ 'name', 'id', 'state', 'host_mover_id', 'interfaces', ] for prop in property_map: self.assertIn(prop, out) expected_calls = [mock.call(self.vdm.req_get())] context.conn['XML'].request.assert_has_calls(expected_calls) def test_get_vdm_with_error(self): self.hook.append(self.vdm.resp_get_error()) context = self.manager.getStorageContext('VDM') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) # Get VDM with error status, out = context.get(self.vdm.vdm_name) self.assertEqual(constants.STATUS_ERROR, status) expected_calls = [mock.call(self.vdm.req_get())] context.conn['XML'].request.assert_has_calls(expected_calls) def test_get_vdm_but_not_found(self): self.hook.append(self.vdm.resp_get_without_value()) self.hook.append(self.vdm.resp_get_succeed('fake')) context = self.manager.getStorageContext('VDM') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) # Get VDM which does not exist status, out = context.get(self.vdm.vdm_name) self.assertEqual(constants.STATUS_NOT_FOUND, status) status, out = context.get(self.vdm.vdm_name) self.assertEqual(constants.STATUS_NOT_FOUND, status) expected_calls = [ mock.call(self.vdm.req_get()), mock.call(self.vdm.req_get()), ] context.conn['XML'].request.assert_has_calls(expected_calls) def test_get_vdm_id_with_error(self): self.hook.append(self.vdm.resp_get_error()) context = self.manager.getStorageContext('VDM') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) self.assertRaises(exception.EMCVnxXMLAPIError, context.get_id, self.vdm.vdm_name) expected_calls = [mock.call(self.vdm.req_get())] context.conn['XML'].request.assert_has_calls(expected_calls) def test_delete_vdm(self): self.hook.append(self.vdm.resp_get_succeed()) self.hook.append(self.vdm.resp_task_succeed()) context = self.manager.getStorageContext('VDM') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) context.delete(self.vdm.vdm_name) expected_calls = [ mock.call(self.vdm.req_get()), mock.call(self.vdm.req_delete()), ] context.conn['XML'].request.assert_has_calls(expected_calls) def test_delete_vdm_but_not_found(self): self.hook.append(self.vdm.resp_get_but_not_found()) context = self.manager.getStorageContext('VDM') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) context.delete(self.vdm.vdm_name) expected_calls = [mock.call(self.vdm.req_get())] context.conn['XML'].request.assert_has_calls(expected_calls) def test_delete_vdm_but_failed_to_get_vdm(self): self.hook.append(self.vdm.resp_get_error()) context = self.manager.getStorageContext('VDM') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) self.assertRaises(exception.EMCVnxXMLAPIError, context.delete, self.vdm.vdm_name) expected_calls = [mock.call(self.vdm.req_get())] context.conn['XML'].request.assert_has_calls(expected_calls) def test_delete_vdm_with_error(self): self.hook.append(self.vdm.resp_get_succeed()) self.hook.append(self.vdm.resp_task_error()) context = self.manager.getStorageContext('VDM') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) self.assertRaises(exception.EMCVnxXMLAPIError, context.delete, self.vdm.vdm_name) expected_calls = [ mock.call(self.vdm.req_get()), mock.call(self.vdm.req_delete()), ] context.conn['XML'].request.assert_has_calls(expected_calls) def test_attach_detach_nfs_interface(self): self.ssh_hook.append() self.ssh_hook.append() context = self.manager.getStorageContext('VDM') context.conn['SSH'].run_ssh = mock.Mock(side_effect=self.ssh_hook) context.attach_nfs_interface(self.vdm.vdm_name, self.mover.interface_name2) context.detach_nfs_interface(self.vdm.vdm_name, self.mover.interface_name2) ssh_calls = [ mock.call(self.vdm.cmd_attach_nfs_interface(), False), mock.call(self.vdm.cmd_detach_nfs_interface(), True), ] context.conn['SSH'].run_ssh.assert_has_calls(ssh_calls) def test_detach_nfs_interface_with_error(self): self.ssh_hook.append(ex=processutils.ProcessExecutionError( stdout=self.vdm.fake_output)) self.ssh_hook.append(self.vdm.output_get_interfaces_vdm( self.mover.interface_name2)) self.ssh_hook.append(ex=processutils.ProcessExecutionError( stdout=self.vdm.fake_output)) self.ssh_hook.append(self.vdm.output_get_interfaces_vdm( nfs_interface=fakes.FakeData.interface_name1)) context = self.manager.getStorageContext('VDM') context.conn['SSH'].run_ssh = mock.Mock(side_effect=self.ssh_hook) self.assertRaises(exception.EMCVnxXMLAPIError, context.detach_nfs_interface, self.vdm.vdm_name, self.mover.interface_name2) context.detach_nfs_interface(self.vdm.vdm_name, self.mover.interface_name2) ssh_calls = [ mock.call(self.vdm.cmd_detach_nfs_interface(), True), mock.call(self.vdm.cmd_get_interfaces(), False), mock.call(self.vdm.cmd_detach_nfs_interface(), True), mock.call(self.vdm.cmd_get_interfaces(), False), ] context.conn['SSH'].run_ssh.assert_has_calls(ssh_calls) @ddt.data(fakes.VDMTestData().output_get_interfaces_vdm(), fakes.VDMTestData().output_get_interfaces_nfs()) def test_get_cifs_nfs_interface(self, fake_output): self.ssh_hook.append(fake_output) context = self.manager.getStorageContext('VDM') context.conn['SSH'].run_ssh = mock.Mock(side_effect=self.ssh_hook) interfaces = context.get_interfaces(self.vdm.vdm_name) self.assertIsNotNone(interfaces['cifs']) self.assertIsNotNone(interfaces['nfs']) ssh_calls = [mock.call(self.vdm.cmd_get_interfaces(), False)] context.conn['SSH'].run_ssh.assert_has_calls(ssh_calls) class StoragePoolTestCase(StorageObjectTestCaseBase): def setUp(self): super(StoragePoolTestCase, self).setUp() self.hook = utils.RequestSideEffect() def test_get_pool(self): self.hook.append(self.pool.resp_get_succeed()) context = self.manager.getStorageContext('StoragePool') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) status, out = context.get(self.pool.pool_name) self.assertEqual(constants.STATUS_OK, status) self.assertIn(self.pool.pool_name, context.pool_map) property_map = [ 'name', 'movers_id', 'total_size', 'used_size', 'diskType', 'dataServicePolicies', 'id', ] for prop in property_map: self.assertIn(prop, out) expected_calls = [mock.call(self.pool.req_get())] context.conn['XML'].request.assert_has_calls(expected_calls) def test_get_pool_with_error(self): self.hook.append(self.pool.resp_get_error()) self.hook.append(self.pool.resp_get_without_value()) self.hook.append(self.pool.resp_get_succeed(name='other')) context = self.manager.getStorageContext('StoragePool') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) status, out = context.get(self.pool.pool_name) self.assertEqual(constants.STATUS_ERROR, status) status, out = context.get(self.pool.pool_name) self.assertEqual(constants.STATUS_NOT_FOUND, status) status, out = context.get(self.pool.pool_name) self.assertEqual(constants.STATUS_NOT_FOUND, status) expected_calls = [ mock.call(self.pool.req_get()), mock.call(self.pool.req_get()), mock.call(self.pool.req_get()), ] context.conn['XML'].request.assert_has_calls(expected_calls) def test_get_pool_id_with_error(self): self.hook.append(self.pool.resp_get_error()) context = self.manager.getStorageContext('StoragePool') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) self.assertRaises(exception.EMCVnxXMLAPIError, context.get_id, self.pool.pool_name) expected_calls = [mock.call(self.pool.req_get())] context.conn['XML'].request.assert_has_calls(expected_calls) class MoverTestCase(StorageObjectTestCaseBase): def setUp(self): super(MoverTestCase, self).setUp() self.hook = utils.RequestSideEffect() self.ssh_hook = utils.SSHSideEffect() def test_get_mover(self): self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.mover.resp_get_succeed()) self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.mover.resp_get_succeed()) context = self.manager.getStorageContext('Mover') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) status, out = context.get(self.mover.mover_name) self.assertEqual(constants.STATUS_OK, status) self.assertIn(self.mover.mover_name, context.mover_map) property_map = [ 'name', 'id', 'Status', 'version', 'uptime', 'role', 'interfaces', 'devices', 'dns_domain', ] for prop in property_map: self.assertIn(prop, out) status, out = context.get(self.mover.mover_name) self.assertEqual(constants.STATUS_OK, status) status, out = context.get(self.mover.mover_name, True) self.assertEqual(constants.STATUS_OK, status) expected_calls = [ mock.call(self.mover.req_get_ref()), mock.call(self.mover.req_get()), mock.call(self.mover.req_get_ref()), mock.call(self.mover.req_get()), ] context.conn['XML'].request.assert_has_calls(expected_calls) def test_get_mover_ref_not_found(self): self.hook.append(self.mover.resp_get_ref_succeed(name='other')) context = self.manager.getStorageContext('Mover') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) status, out = context.get_ref(self.mover.mover_name) self.assertEqual(constants.STATUS_NOT_FOUND, status) expected_calls = [mock.call(self.mover.req_get_ref())] context.conn['XML'].request.assert_has_calls(expected_calls) def test_get_mover_ref_with_error(self): self.hook.append(self.mover.resp_get_error()) context = self.manager.getStorageContext('Mover') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) status, out = context.get_ref(self.mover.mover_name) self.assertEqual(constants.STATUS_ERROR, status) expected_calls = [mock.call(self.mover.req_get_ref())] context.conn['XML'].request.assert_has_calls(expected_calls) def test_get_mover_ref_and_mover(self): self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.mover.resp_get_succeed()) context = self.manager.getStorageContext('Mover') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) status, out = context.get_ref(self.mover.mover_name) self.assertEqual(constants.STATUS_OK, status) property_map = ['name', 'id'] for prop in property_map: self.assertIn(prop, out) status, out = context.get(self.mover.mover_name) self.assertEqual(constants.STATUS_OK, status) self.assertIn(self.mover.mover_name, context.mover_map) property_map = [ 'name', 'id', 'Status', 'version', 'uptime', 'role', 'interfaces', 'devices', 'dns_domain', ] for prop in property_map: self.assertIn(prop, out) expected_calls = [ mock.call(self.mover.req_get_ref()), mock.call(self.mover.req_get()), ] context.conn['XML'].request.assert_has_calls(expected_calls) def test_get_mover_failed_to_get_mover_ref(self): self.hook.append(self.mover.resp_get_error()) context = self.manager.getStorageContext('Mover') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) self.assertRaises(exception.EMCVnxXMLAPIError, context.get, self.mover.mover_name) expected_calls = [mock.call(self.mover.req_get_ref())] context.conn['XML'].request.assert_has_calls(expected_calls) def test_get_mover_but_not_found(self): self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.mover.resp_get_without_value()) context = self.manager.getStorageContext('Mover') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) status, out = context.get(name=self.mover.mover_name, force=True) self.assertEqual(constants.STATUS_NOT_FOUND, status) expected_calls = [ mock.call(self.mover.req_get_ref()), mock.call(self.mover.req_get()), ] context.conn['XML'].request.assert_has_calls(expected_calls) def test_get_mover_with_error(self): self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.mover.resp_get_error()) context = self.manager.getStorageContext('Mover') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) status, out = context.get(self.mover.mover_name) self.assertEqual(constants.STATUS_ERROR, status) expected_calls = [ mock.call(self.mover.req_get_ref()), mock.call(self.mover.req_get()), ] context.conn['XML'].request.assert_has_calls(expected_calls) def test_get_interconnect_id(self): self.ssh_hook.append(self.mover.output_get_interconnect_id()) context = self.manager.getStorageContext('Mover') context.conn['SSH'].run_ssh = mock.Mock(side_effect=self.ssh_hook) conn_id = context.get_interconnect_id(self.mover.mover_name, self.mover.mover_name) self.assertEqual(self.mover.interconnect_id, conn_id) ssh_calls = [mock.call(self.mover.cmd_get_interconnect_id(), False)] context.conn['SSH'].run_ssh.assert_has_calls(ssh_calls) def test_get_physical_devices(self): self.ssh_hook.append(self.mover.output_get_physical_devices()) context = self.manager.getStorageContext('Mover') context.conn['SSH'].run_ssh = mock.Mock(side_effect=self.ssh_hook) devices = context.get_physical_devices(self.mover.mover_name) self.assertIn(self.mover.device_name, devices) ssh_calls = [mock.call(self.mover.cmd_get_physical_devices(), False)] context.conn['SSH'].run_ssh.assert_has_calls(ssh_calls) class SnapshotTestCase(StorageObjectTestCaseBase): def setUp(self): super(SnapshotTestCase, self).setUp() self.hook = utils.RequestSideEffect() def test_create_snapshot(self): self.hook.append(self.fs.resp_get_succeed()) self.hook.append(self.snap.resp_task_succeed()) context = self.manager.getStorageContext('Snapshot') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) context.create(name=self.snap.snapshot_name, fs_name=self.fs.filesystem_name, pool_id=self.pool.pool_id) expected_calls = [ mock.call(self.fs.req_get()), mock.call(self.snap.req_create()), ] context.conn['XML'].request.assert_has_calls(expected_calls) def test_create_snapshot_but_already_exist(self): self.hook.append(self.fs.resp_get_succeed()) self.hook.append(self.snap.resp_create_but_already_exist()) context = self.manager.getStorageContext('Snapshot') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) context.create(name=self.snap.snapshot_name, fs_name=self.fs.filesystem_name, pool_id=self.pool.pool_id, ckpt_size=self.snap.snapshot_size) expected_calls = [ mock.call(self.fs.req_get()), mock.call(self.snap.req_create_with_size()), ] context.conn['XML'].request.assert_has_calls(expected_calls) def test_create_snapshot_with_error(self): self.hook.append(self.fs.resp_get_succeed()) self.hook.append(self.snap.resp_task_error()) context = self.manager.getStorageContext('Snapshot') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) self.assertRaises(exception.EMCVnxXMLAPIError, context.create, name=self.snap.snapshot_name, fs_name=self.fs.filesystem_name, pool_id=self.pool.pool_id, ckpt_size=self.snap.snapshot_size) expected_calls = [ mock.call(self.fs.req_get()), mock.call(self.snap.req_create_with_size()), ] context.conn['XML'].request.assert_has_calls(expected_calls) def test_get_snapshot(self): self.hook.append(self.snap.resp_get_succeed()) context = self.manager.getStorageContext('Snapshot') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) status, out = context.get(self.snap.snapshot_name) self.assertEqual(constants.STATUS_OK, status) self.assertIn(self.snap.snapshot_name, context.snap_map) property_map = [ 'name', 'id', 'checkpointOf', 'state', ] for prop in property_map: self.assertIn(prop, out) expected_calls = [mock.call(self.snap.req_get())] context.conn['XML'].request.assert_has_calls(expected_calls) def test_get_snapshot_but_not_found(self): self.hook.append(self.snap.resp_get_without_value()) context = self.manager.getStorageContext('Snapshot') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) status, out = context.get(self.snap.snapshot_name) self.assertEqual(constants.STATUS_NOT_FOUND, status) expected_calls = [mock.call(self.snap.req_get())] context.conn['XML'].request.assert_has_calls(expected_calls) def test_get_snapshot_with_error(self): self.hook.append(self.snap.resp_get_error()) context = self.manager.getStorageContext('Snapshot') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) status, out = context.get(self.snap.snapshot_name) self.assertEqual(constants.STATUS_ERROR, status) expected_calls = [mock.call(self.snap.req_get())] context.conn['XML'].request.assert_has_calls(expected_calls) def test_delete_snapshot(self): self.hook.append(self.snap.resp_get_succeed()) self.hook.append(self.snap.resp_task_succeed()) context = self.manager.getStorageContext('Snapshot') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) context.delete(self.snap.snapshot_name) self.assertNotIn(self.snap.snapshot_name, context.snap_map) expected_calls = [ mock.call(self.snap.req_get()), mock.call(self.snap.req_delete()), ] context.conn['XML'].request.assert_has_calls(expected_calls) def test_delete_snapshot_failed_to_get_snapshot(self): self.hook.append(self.snap.resp_get_error()) context = self.manager.getStorageContext('Snapshot') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) self.assertRaises(exception.EMCVnxXMLAPIError, context.delete, self.snap.snapshot_name) expected_calls = [mock.call(self.snap.req_get())] context.conn['XML'].request.assert_has_calls(expected_calls) def test_delete_snapshot_but_not_found(self): self.hook.append(self.snap.resp_get_without_value()) context = self.manager.getStorageContext('Snapshot') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) context.delete(self.snap.snapshot_name) self.assertNotIn(self.snap.snapshot_name, context.snap_map) expected_calls = [mock.call(self.snap.req_get())] context.conn['XML'].request.assert_has_calls(expected_calls) def test_delete_snapshot_with_error(self): self.hook.append(self.snap.resp_get_succeed()) self.hook.append(self.snap.resp_task_error()) context = self.manager.getStorageContext('Snapshot') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) self.assertRaises(exception.EMCVnxXMLAPIError, context.delete, self.snap.snapshot_name) expected_calls = [ mock.call(self.snap.req_get()), mock.call(self.snap.req_delete()), ] context.conn['XML'].request.assert_has_calls(expected_calls) def test_get_snapshot_id(self): self.hook.append(self.snap.resp_get_succeed()) context = self.manager.getStorageContext('Snapshot') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) id = context.get_id(self.snap.snapshot_name) self.assertEqual(self.snap.snapshot_id, id) expected_calls = [mock.call(self.snap.req_get())] context.conn['XML'].request.assert_has_calls(expected_calls) def test_get_snapshot_id_with_error(self): self.hook.append(self.snap.resp_get_error()) context = self.manager.getStorageContext('Snapshot') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) self.assertRaises(exception.EMCVnxXMLAPIError, context.get_id, self.snap.snapshot_name) expected_calls = [mock.call(self.snap.req_get())] context.conn['XML'].request.assert_has_calls(expected_calls) @ddt.ddt class MoverInterfaceTestCase(StorageObjectTestCaseBase): def setUp(self): super(MoverInterfaceTestCase, self).setUp() self.hook = utils.RequestSideEffect() def test_create_mover_interface(self): self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.mover.resp_task_succeed()) self.hook.append(self.mover.resp_task_succeed()) context = self.manager.getStorageContext('MoverInterface') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) interface = { 'name': self.mover.interface_name1, 'device_name': self.mover.device_name, 'ip': self.mover.ip_address1, 'mover_name': self.mover.mover_name, 'net_mask': self.mover.net_mask, 'vlan_id': self.mover.vlan_id, } context.create(interface) interface['name'] = self.mover.long_interface_name context.create(interface) expected_calls = [ mock.call(self.mover.req_get_ref()), mock.call(self.mover.req_create_interface()), mock.call(self.mover.req_create_interface( self.mover.long_interface_name[:31])), ] context.conn['XML'].request.assert_has_calls(expected_calls) def test_create_mover_interface_name_already_exist(self): self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append( self.mover.resp_create_interface_but_name_already_exist()) context = self.manager.getStorageContext('MoverInterface') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) interface = { 'name': self.mover.interface_name1, 'device_name': self.mover.device_name, 'ip': self.mover.ip_address1, 'mover_name': self.mover.mover_name, 'net_mask': self.mover.net_mask, 'vlan_id': self.mover.vlan_id, } context.create(interface) expected_calls = [ mock.call(self.mover.req_get_ref()), mock.call(self.mover.req_create_interface()), ] context.conn['XML'].request.assert_has_calls(expected_calls) def test_create_mover_interface_ip_already_exist(self): self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append( self.mover.resp_create_interface_but_ip_already_exist()) context = self.manager.getStorageContext('MoverInterface') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) interface = { 'name': self.mover.interface_name1, 'device_name': self.mover.device_name, 'ip': self.mover.ip_address1, 'mover_name': self.mover.mover_name, 'net_mask': self.mover.net_mask, 'vlan_id': self.mover.vlan_id, } context.create(interface) expected_calls = [ mock.call(self.mover.req_get_ref()), mock.call(self.mover.req_create_interface()), ] context.conn['XML'].request.assert_has_calls(expected_calls) @ddt.data(fakes.MoverTestData().resp_task_succeed(), fakes.MoverTestData().resp_task_error()) def test_create_mover_interface_with_conflict_vlan_id(self, xml_resp): self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append( self.mover.resp_create_interface_with_conflicted_vlan_id()) self.hook.append(xml_resp) context = self.manager.getStorageContext('MoverInterface') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) interface = { 'name': self.mover.interface_name1, 'device_name': self.mover.device_name, 'ip': self.mover.ip_address1, 'mover_name': self.mover.mover_name, 'net_mask': self.mover.net_mask, 'vlan_id': self.mover.vlan_id, } self.assertRaises(exception.EMCVnxXMLAPIError, context.create, interface) expected_calls = [ mock.call(self.mover.req_get_ref()), mock.call(self.mover.req_create_interface()), mock.call(self.mover.req_delete_interface()), ] context.conn['XML'].request.assert_has_calls(expected_calls) @mock.patch('time.sleep') def test_create_mover_interface_invalid_mover_id(self, sleep_mock): self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.mover.resp_invalid_mover_id()) self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.mover.resp_task_succeed()) context = self.manager.getStorageContext('MoverInterface') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) interface = { 'name': self.mover.interface_name1, 'device_name': self.mover.device_name, 'ip': self.mover.ip_address1, 'mover_name': self.mover.mover_name, 'net_mask': self.mover.net_mask, 'vlan_id': self.mover.vlan_id, } context.create(interface) expected_calls = [ mock.call(self.mover.req_get_ref()), mock.call(self.mover.req_create_interface()), mock.call(self.mover.req_get_ref()), mock.call(self.mover.req_create_interface()), ] context.conn['XML'].request.assert_has_calls(expected_calls) self.assertTrue(sleep_mock.called) def test_create_mover_interface_with_error(self): self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.mover.resp_task_error()) context = self.manager.getStorageContext('MoverInterface') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) interface = { 'name': self.mover.interface_name1, 'device_name': self.mover.device_name, 'ip': self.mover.ip_address1, 'mover_name': self.mover.mover_name, 'net_mask': self.mover.net_mask, 'vlan_id': self.mover.vlan_id, } self.assertRaises(exception.EMCVnxXMLAPIError, context.create, interface) expected_calls = [ mock.call(self.mover.req_get_ref()), mock.call(self.mover.req_create_interface()), ] context.conn['XML'].request.assert_has_calls(expected_calls) def test_get_mover_interface(self): self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.mover.resp_get_succeed()) self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.mover.resp_get_succeed()) context = self.manager.getStorageContext('MoverInterface') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) status, out = context.get(name=self.mover.interface_name1, mover_name=self.mover.mover_name) self.assertEqual(constants.STATUS_OK, status) property_map = [ 'name', 'device', 'up', 'ipVersion', 'netMask', 'ipAddress', 'vlanid', ] for prop in property_map: self.assertIn(prop, out) context.get(name=self.mover.long_interface_name, mover_name=self.mover.mover_name) expected_calls = [ mock.call(self.mover.req_get_ref()), mock.call(self.mover.req_get()), mock.call(self.mover.req_get_ref()), mock.call(self.mover.req_get()), ] context.conn['XML'].request.assert_has_calls(expected_calls) def test_get_mover_interface_not_found(self): self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.mover.resp_get_without_value()) context = self.manager.getStorageContext('MoverInterface') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) status, out = context.get(name=self.mover.interface_name1, mover_name=self.mover.mover_name) self.assertEqual(constants.STATUS_NOT_FOUND, status) expected_calls = [ mock.call(self.mover.req_get_ref()), mock.call(self.mover.req_get()), ] context.conn['XML'].request.assert_has_calls(expected_calls) def test_delete_mover_interface(self): self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.mover.resp_task_succeed()) context = self.manager.getStorageContext('MoverInterface') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) context.delete(ip_addr=self.mover.ip_address1, mover_name=self.mover.mover_name) expected_calls = [ mock.call(self.mover.req_get_ref()), mock.call(self.mover.req_delete_interface()), ] context.conn['XML'].request.assert_has_calls(expected_calls) def test_delete_mover_interface_but_nonexistent(self): self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.mover.resp_delete_interface_but_nonexistent()) context = self.manager.getStorageContext('MoverInterface') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) context.delete(ip_addr=self.mover.ip_address1, mover_name=self.mover.mover_name) expected_calls = [ mock.call(self.mover.req_get_ref()), mock.call(self.mover.req_delete_interface()), ] context.conn['XML'].request.assert_has_calls(expected_calls) @mock.patch('time.sleep') def test_delete_mover_interface_invalid_mover_id(self, sleep_mock): self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.mover.resp_invalid_mover_id()) self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.mover.resp_task_succeed()) context = self.manager.getStorageContext('MoverInterface') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) context.delete(ip_addr=self.mover.ip_address1, mover_name=self.mover.mover_name) expected_calls = [ mock.call(self.mover.req_get_ref()), mock.call(self.mover.req_delete_interface()), mock.call(self.mover.req_get_ref()), mock.call(self.mover.req_delete_interface()), ] context.conn['XML'].request.assert_has_calls(expected_calls) self.assertTrue(sleep_mock.called) def test_delete_mover_interface_with_error(self): self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.mover.resp_task_error()) context = self.manager.getStorageContext('MoverInterface') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) self.assertRaises(exception.EMCVnxXMLAPIError, context.delete, ip_addr=self.mover.ip_address1, mover_name=self.mover.mover_name) expected_calls = [ mock.call(self.mover.req_get_ref()), mock.call(self.mover.req_delete_interface()), ] context.conn['XML'].request.assert_has_calls(expected_calls) class DNSDomainTestCase(StorageObjectTestCaseBase): def setUp(self): super(DNSDomainTestCase, self).setUp() self.hook = utils.RequestSideEffect() def test_create_dns_domain(self): self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.dns.resp_task_succeed()) context = self.manager.getStorageContext('DNSDomain') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) context.create(mover_name=self.mover.mover_name, name=self.dns.domain_name, servers=self.dns.dns_ip_address) expected_calls = [ mock.call(self.mover.req_get_ref()), mock.call(self.dns.req_create()), ] context.conn['XML'].request.assert_has_calls(expected_calls) @mock.patch('time.sleep') def test_create_dns_domain_invalid_mover_id(self, sleep_mock): self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.dns.resp_invalid_mover_id()) self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.dns.resp_task_succeed()) context = self.manager.getStorageContext('DNSDomain') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) context.create(mover_name=self.mover.mover_name, name=self.dns.domain_name, servers=self.dns.dns_ip_address) expected_calls = [ mock.call(self.mover.req_get_ref()), mock.call(self.dns.req_create()), mock.call(self.mover.req_get_ref()), mock.call(self.dns.req_create()), ] context.conn['XML'].request.assert_has_calls(expected_calls) self.assertTrue(sleep_mock.called) def test_create_dns_domain_with_error(self): self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.dns.resp_task_error()) context = self.manager.getStorageContext('DNSDomain') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) self.assertRaises(exception.EMCVnxXMLAPIError, context.create, mover_name=self.mover.mover_name, name=self.mover.domain_name, servers=self.dns.dns_ip_address) expected_calls = [ mock.call(self.mover.req_get_ref()), mock.call(self.dns.req_create()), ] context.conn['XML'].request.assert_has_calls(expected_calls) def test_delete_dns_domain(self): self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.dns.resp_task_succeed()) self.hook.append(self.dns.resp_task_error()) context = self.manager.getStorageContext('DNSDomain') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) context.delete(mover_name=self.mover.mover_name, name=self.mover.domain_name) context.delete(mover_name=self.mover.mover_name, name=self.mover.domain_name) expected_calls = [ mock.call(self.mover.req_get_ref()), mock.call(self.dns.req_delete()), mock.call(self.dns.req_delete()), ] context.conn['XML'].request.assert_has_calls(expected_calls) @mock.patch('time.sleep') def test_delete_dns_domain_invalid_mover_id(self, sleep_mock): self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.dns.resp_invalid_mover_id()) self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.dns.resp_task_succeed()) context = self.manager.getStorageContext('DNSDomain') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) context.delete(mover_name=self.mover.mover_name, name=self.mover.domain_name) expected_calls = [ mock.call(self.mover.req_get_ref()), mock.call(self.dns.req_delete()), mock.call(self.mover.req_get_ref()), mock.call(self.dns.req_delete()), ] context.conn['XML'].request.assert_has_calls(expected_calls) self.assertTrue(sleep_mock.called) class CIFSServerTestCase(StorageObjectTestCaseBase): def setUp(self): super(CIFSServerTestCase, self).setUp() self.hook = utils.RequestSideEffect() def test_create_cifs_server(self): self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.cifs_server.resp_task_succeed()) self.hook.append(self.vdm.resp_get_succeed()) self.hook.append(self.cifs_server.resp_task_succeed()) self.hook.append(self.cifs_server.resp_task_error()) self.hook.append(self.cifs_server.resp_get_succeed( mover_id=self.vdm.vdm_id, is_vdm=True, join_domain=True)) context = self.manager.getStorageContext('CIFSServer') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) # Create CIFS server on mover cifs_server_args = { 'name': self.cifs_server.cifs_server_name, 'interface_ip': self.cifs_server.ip_address1, 'domain_name': self.cifs_server.domain_name, 'user_name': self.cifs_server.domain_user, 'password': self.cifs_server.domain_password, 'mover_name': self.mover.mover_name, 'is_vdm': False, } context.create(cifs_server_args) # Create CIFS server on VDM cifs_server_args = { 'name': self.cifs_server.cifs_server_name, 'interface_ip': self.cifs_server.ip_address1, 'domain_name': self.cifs_server.domain_name, 'user_name': self.cifs_server.domain_user, 'password': self.cifs_server.domain_password, 'mover_name': self.vdm.vdm_name, 'is_vdm': True, } context.create(cifs_server_args) # Create CIFS server on VDM cifs_server_args = { 'name': self.cifs_server.cifs_server_name, 'interface_ip': self.cifs_server.ip_address1, 'domain_name': self.cifs_server.domain_name, 'user_name': self.cifs_server.domain_user, 'password': self.cifs_server.domain_password, 'mover_name': self.vdm.vdm_name, 'is_vdm': True, } context.create(cifs_server_args) expected_calls = [ mock.call(self.mover.req_get_ref()), mock.call(self.cifs_server.req_create(self.mover.mover_id, False)), mock.call(self.vdm.req_get()), mock.call(self.cifs_server.req_create(self.vdm.vdm_id)), mock.call(self.cifs_server.req_create(self.vdm.vdm_id)), mock.call(self.cifs_server.req_get(self.vdm.vdm_id)), ] context.conn['XML'].request.assert_has_calls(expected_calls) def test_create_cifs_server_already_exist(self): self.hook.append(self.vdm.resp_get_succeed()) self.hook.append(self.cifs_server.resp_task_error()) self.hook.append(self.cifs_server.resp_get_succeed( mover_id=self.vdm.vdm_id, is_vdm=True, join_domain=True)) context = self.manager.getStorageContext('CIFSServer') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) @mock.patch('time.sleep') def test_create_cifs_server_invalid_mover_id(self, sleep_mock): self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.cifs_server.resp_invalid_mover_id()) self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.cifs_server.resp_task_succeed()) context = self.manager.getStorageContext('CIFSServer') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) # Create CIFS server on mover cifs_server_args = { 'name': self.cifs_server.cifs_server_name, 'interface_ip': self.cifs_server.ip_address1, 'domain_name': self.cifs_server.domain_name, 'user_name': self.cifs_server.domain_user, 'password': self.cifs_server.domain_password, 'mover_name': self.mover.mover_name, 'is_vdm': False, } context.create(cifs_server_args) expected_calls = [ mock.call(self.mover.req_get_ref()), mock.call(self.cifs_server.req_create(self.mover.mover_id, False)), mock.call(self.mover.req_get_ref()), mock.call(self.cifs_server.req_create(self.mover.mover_id, False)), ] context.conn['XML'].request.assert_has_calls(expected_calls) self.assertTrue(sleep_mock.called) def test_create_cifs_server_with_error(self): self.hook.append(self.vdm.resp_get_succeed()) self.hook.append(self.cifs_server.resp_task_error()) self.hook.append(self.cifs_server.resp_get_error()) context = self.manager.getStorageContext('CIFSServer') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) # Create CIFS server on VDM cifs_server_args = { 'name': self.cifs_server.cifs_server_name, 'interface_ip': self.cifs_server.ip_address1, 'domain_name': self.cifs_server.domain_name, 'user_name': self.cifs_server.domain_user, 'password': self.cifs_server.domain_password, 'mover_name': self.vdm.vdm_name, 'is_vdm': True, } self.assertRaises(exception.EMCVnxXMLAPIError, context.create, cifs_server_args) expected_calls = [ mock.call(self.vdm.req_get()), mock.call(self.cifs_server.req_create(self.vdm.vdm_id)), mock.call(self.cifs_server.req_get(self.vdm.vdm_id)), ] context.conn['XML'].request.assert_has_calls(expected_calls) def test_get_all_cifs_server(self): self.hook.append(self.vdm.resp_get_succeed()) self.hook.append(self.cifs_server.resp_get_succeed( mover_id=self.vdm.vdm_id, is_vdm=True, join_domain=True)) self.hook.append(self.cifs_server.resp_get_succeed( mover_id=self.vdm.vdm_id, is_vdm=True, join_domain=True)) context = self.manager.getStorageContext('CIFSServer') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) status, out = context.get_all(self.vdm.vdm_name) self.assertEqual(constants.STATUS_OK, status) self.assertIn(self.vdm.vdm_name, context.cifs_server_map) # Get CIFS server from the cache status, out = context.get_all(self.vdm.vdm_name) self.assertEqual(constants.STATUS_OK, status) self.assertIn(self.vdm.vdm_name, context.cifs_server_map) expected_calls = [ mock.call(self.vdm.req_get()), mock.call(self.cifs_server.req_get(self.vdm.vdm_id)), mock.call(self.cifs_server.req_get(self.vdm.vdm_id)), ] context.conn['XML'].request.assert_has_calls(expected_calls) @mock.patch('time.sleep') def test_get_all_cifs_server_invalid_mover_id(self, sleep_mock): self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.cifs_server.resp_invalid_mover_id()) self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.cifs_server.resp_get_succeed( mover_id=self.mover.mover_id, is_vdm=False, join_domain=True)) context = self.manager.getStorageContext('CIFSServer') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) status, out = context.get_all(self.mover.mover_name, False) self.assertEqual(constants.STATUS_OK, status) self.assertIn(self.mover.mover_name, context.cifs_server_map) expected_calls = [ mock.call(self.mover.req_get_ref()), mock.call(self.cifs_server.req_get(self.mover.mover_id, False)), mock.call(self.mover.req_get_ref()), mock.call(self.cifs_server.req_get(self.mover.mover_id, False)), ] context.conn['XML'].request.assert_has_calls(expected_calls) self.assertTrue(sleep_mock.called) def test_get_cifs_server(self): self.hook.append(self.vdm.resp_get_succeed()) self.hook.append(self.cifs_server.resp_get_succeed( mover_id=self.vdm.vdm_id, is_vdm=True, join_domain=True)) context = self.manager.getStorageContext('CIFSServer') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) status, out = context.get(name=self.cifs_server.cifs_server_name, mover_name=self.vdm.vdm_name) self.assertEqual(constants.STATUS_OK, status) property_map = { 'name', 'compName', 'Aliases', 'type', 'interfaces', 'domain', 'domainJoined', 'mover', 'moverIdIsVdm', } for prop in property_map: self.assertIn(prop, out) context.get(name=self.cifs_server.cifs_server_name, mover_name=self.vdm.vdm_name) expected_calls = [ mock.call(self.vdm.req_get()), mock.call(self.cifs_server.req_get(self.vdm.vdm_id)), ] context.conn['XML'].request.assert_has_calls(expected_calls) def test_modify_cifs_server(self): self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.cifs_server.resp_task_succeed()) self.hook.append(self.vdm.resp_get_succeed()) self.hook.append(self.cifs_server.resp_task_succeed()) context = self.manager.getStorageContext('CIFSServer') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) cifs_server_args = { 'name': self.cifs_server.cifs_server_name[-14:], 'join_domain': True, 'user_name': self.cifs_server.domain_user, 'password': self.cifs_server.domain_password, 'mover_name': self.mover.mover_name, 'is_vdm': False, } context.modify(cifs_server_args) cifs_server_args = { 'name': self.cifs_server.cifs_server_name[-14:], 'join_domain': False, 'user_name': self.cifs_server.domain_user, 'password': self.cifs_server.domain_password, 'mover_name': self.vdm.vdm_name, } context.modify(cifs_server_args) expected_calls = [ mock.call(self.mover.req_get_ref()), mock.call(self.cifs_server.req_modify( mover_id=self.mover.mover_id, is_vdm=False, join_domain=True)), mock.call(self.vdm.req_get()), mock.call(self.cifs_server.req_modify( mover_id=self.vdm.vdm_id, is_vdm=True, join_domain=False)), ] context.conn['XML'].request.assert_has_calls(expected_calls) def test_modify_cifs_server_but_unjoin_domain(self): self.hook.append(self.vdm.resp_get_succeed()) self.hook.append(self.cifs_server.resp_modify_but_unjoin_domain()) context = self.manager.getStorageContext('CIFSServer') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) cifs_server_args = { 'name': self.cifs_server.cifs_server_name[-14:], 'join_domain': False, 'user_name': self.cifs_server.domain_user, 'password': self.cifs_server.domain_password, 'mover_name': self.vdm.vdm_name, } context.modify(cifs_server_args) expected_calls = [ mock.call(self.vdm.req_get()), mock.call(self.cifs_server.req_modify( mover_id=self.vdm.vdm_id, is_vdm=True, join_domain=False)), ] context.conn['XML'].request.assert_has_calls(expected_calls) def test_modify_cifs_server_but_already_join_domain(self): self.hook.append(self.vdm.resp_get_succeed()) self.hook.append( self.cifs_server.resp_modify_but_already_join_domain()) context = self.manager.getStorageContext('CIFSServer') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) cifs_server_args = { 'name': self.cifs_server.cifs_server_name[-14:], 'join_domain': True, 'user_name': self.cifs_server.domain_user, 'password': self.cifs_server.domain_password, 'mover_name': self.vdm.vdm_name, } context.modify(cifs_server_args) expected_calls = [ mock.call(self.vdm.req_get()), mock.call(self.cifs_server.req_modify( mover_id=self.vdm.vdm_id, is_vdm=True, join_domain=True)), ] context.conn['XML'].request.assert_has_calls(expected_calls) @mock.patch('time.sleep') def test_modify_cifs_server_invalid_mover_id(self, sleep_mock): self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.cifs_server.resp_invalid_mover_id()) self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.cifs_server.resp_task_succeed()) context = self.manager.getStorageContext('CIFSServer') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) cifs_server_args = { 'name': self.cifs_server.cifs_server_name[-14:], 'join_domain': True, 'user_name': self.cifs_server.domain_user, 'password': self.cifs_server.domain_password, 'mover_name': self.mover.mover_name, 'is_vdm': False, } context.modify(cifs_server_args) expected_calls = [ mock.call(self.mover.req_get_ref()), mock.call(self.cifs_server.req_modify( mover_id=self.mover.mover_id, is_vdm=False, join_domain=True)), mock.call(self.mover.req_get_ref()), mock.call(self.cifs_server.req_modify( mover_id=self.mover.mover_id, is_vdm=False, join_domain=True)), ] context.conn['XML'].request.assert_has_calls(expected_calls) self.assertTrue(sleep_mock.called) def test_modify_cifs_server_with_error(self): self.hook.append(self.vdm.resp_get_succeed()) self.hook.append(self.cifs_server.resp_task_error()) context = self.manager.getStorageContext('CIFSServer') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) cifs_server_args = { 'name': self.cifs_server.cifs_server_name[-14:], 'join_domain': False, 'user_name': self.cifs_server.domain_user, 'password': self.cifs_server.domain_password, 'mover_name': self.vdm.vdm_name, } self.assertRaises(exception.EMCVnxXMLAPIError, context.modify, cifs_server_args) expected_calls = [ mock.call(self.vdm.req_get()), mock.call(self.cifs_server.req_modify( mover_id=self.vdm.vdm_id, is_vdm=True, join_domain=False)), ] context.conn['XML'].request.assert_has_calls(expected_calls) def test_delete_cifs_server(self): self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.cifs_server.resp_get_succeed( mover_id=self.mover.mover_id, is_vdm=False, join_domain=True)) self.hook.append(self.cifs_server.resp_task_succeed()) self.hook.append(self.vdm.resp_get_succeed()) self.hook.append(self.cifs_server.resp_get_succeed( mover_id=self.vdm.vdm_id, is_vdm=True, join_domain=False)) self.hook.append(self.cifs_server.resp_task_succeed()) context = self.manager.getStorageContext('CIFSServer') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) context.delete(computer_name=self.cifs_server.cifs_server_name, mover_name=self.mover.mover_name, is_vdm=False) context.delete(computer_name=self.cifs_server.cifs_server_name, mover_name=self.vdm.vdm_name) expected_calls = [ mock.call(self.mover.req_get_ref()), mock.call(self.cifs_server.req_get(self.mover.mover_id, False)), mock.call(self.cifs_server.req_delete(self.mover.mover_id, False)), mock.call(self.vdm.req_get()), mock.call(self.cifs_server.req_get(self.vdm.vdm_id)), mock.call(self.cifs_server.req_delete(self.vdm.vdm_id)), ] context.conn['XML'].request.assert_has_calls(expected_calls) def test_delete_cifs_server_but_not_found(self): self.hook.append(self.mover.resp_get_without_value()) self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.cifs_server.resp_get_without_value()) context = self.manager.getStorageContext('CIFSServer') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) context.delete(computer_name=self.cifs_server.cifs_server_name, mover_name=self.mover.mover_name, is_vdm=False) context.delete(computer_name=self.cifs_server.cifs_server_name, mover_name=self.mover.mover_name, is_vdm=False) expected_calls = [ mock.call(self.mover.req_get_ref()), mock.call(self.mover.req_get_ref()), mock.call(self.cifs_server.req_get(self.mover.mover_id, False)), ] context.conn['XML'].request.assert_has_calls(expected_calls) def test_delete_cifs_server_with_error(self): self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.cifs_server.resp_get_succeed( mover_id=self.mover.mover_id, is_vdm=False, join_domain=True)) self.hook.append(self.cifs_server.resp_task_error()) context = self.manager.getStorageContext('CIFSServer') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) self.assertRaises(exception.EMCVnxXMLAPIError, context.delete, computer_name=self.cifs_server.cifs_server_name, mover_name=self.mover.mover_name, is_vdm=False) expected_calls = [ mock.call(self.mover.req_get_ref()), mock.call(self.cifs_server.req_get(self.mover.mover_id, False)), mock.call(self.cifs_server.req_delete(self.mover.mover_id, False)), ] context.conn['XML'].request.assert_has_calls(expected_calls) class CIFSShareTestCase(StorageObjectTestCaseBase): def setUp(self): super(CIFSShareTestCase, self).setUp() self.hook = utils.RequestSideEffect() self.ssh_hook = utils.SSHSideEffect() def test_create_cifs_share(self): self.hook.append(self.vdm.resp_get_succeed()) self.hook.append(self.cifs_share.resp_task_succeed()) self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.cifs_share.resp_task_succeed()) context = self.manager.getStorageContext('CIFSShare') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) context.create(name=self.cifs_share.share_name, server_name=self.cifs_share.cifs_server_name[-14:], mover_name=self.vdm.vdm_name, is_vdm=True) context.create(name=self.cifs_share.share_name, server_name=self.cifs_share.cifs_server_name[-14:], mover_name=self.mover.mover_name, is_vdm=False) expected_calls = [ mock.call(self.vdm.req_get()), mock.call(self.cifs_share.req_create(self.vdm.vdm_id)), mock.call(self.mover.req_get_ref()), mock.call(self.cifs_share.req_create(self.mover.mover_id, False)), ] context.conn['XML'].request.assert_has_calls(expected_calls) @mock.patch('time.sleep') def test_create_cifs_share_invalid_mover_id(self, sleep_mock): self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.cifs_share.resp_invalid_mover_id()) self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.cifs_share.resp_task_succeed()) context = self.manager.getStorageContext('CIFSShare') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) context.create(name=self.cifs_share.share_name, server_name=self.cifs_share.cifs_server_name[-14:], mover_name=self.mover.mover_name, is_vdm=False) expected_calls = [ mock.call(self.mover.req_get_ref()), mock.call(self.cifs_share.req_create(self.mover.mover_id, False)), mock.call(self.mover.req_get_ref()), mock.call(self.cifs_share.req_create(self.mover.mover_id, False)), ] context.conn['XML'].request.assert_has_calls(expected_calls) self.assertTrue(sleep_mock.called) def test_create_cifs_share_with_error(self): self.hook.append(self.vdm.resp_get_succeed()) self.hook.append(self.cifs_share.resp_task_error()) context = self.manager.getStorageContext('CIFSShare') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) self.assertRaises(exception.EMCVnxXMLAPIError, context.create, name=self.cifs_share.share_name, server_name=self.cifs_share.cifs_server_name[-14:], mover_name=self.vdm.vdm_name, is_vdm=True) expected_calls = [ mock.call(self.vdm.req_get()), mock.call(self.cifs_share.req_create(self.vdm.vdm_id)), ] context.conn['XML'].request.assert_has_calls(expected_calls) def test_delete_cifs_share(self): self.hook.append(self.cifs_share.resp_get_succeed(self.vdm.vdm_id)) self.hook.append(self.vdm.resp_get_succeed()) self.hook.append(self.cifs_share.resp_task_succeed()) self.hook.append(self.cifs_share.resp_get_succeed(self.mover.mover_id, False)) self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.cifs_share.resp_task_succeed()) context = self.manager.getStorageContext('CIFSShare') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) context.delete(name=self.cifs_share.share_name, mover_name=self.vdm.vdm_name, is_vdm=True) context.delete(name=self.cifs_share.share_name, mover_name=self.mover.mover_name, is_vdm=False) expected_calls = [ mock.call(self.cifs_share.req_get()), mock.call(self.vdm.req_get()), mock.call(self.cifs_share.req_delete(self.vdm.vdm_id)), mock.call(self.cifs_share.req_get()), mock.call(self.mover.req_get_ref()), mock.call(self.cifs_share.req_delete(self.mover.mover_id, False)), ] context.conn['XML'].request.assert_has_calls(expected_calls) def test_delete_cifs_share_not_found(self): self.hook.append(self.cifs_share.resp_get_error()) self.hook.append(self.cifs_share.resp_get_without_value()) context = self.manager.getStorageContext('CIFSShare') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) self.assertRaises(exception.EMCVnxXMLAPIError, context.delete, name=self.cifs_share.share_name, mover_name=self.vdm.vdm_name, is_vdm=True) context.delete(name=self.cifs_share.share_name, mover_name=self.vdm.vdm_name, is_vdm=True) expected_calls = [ mock.call(self.cifs_share.req_get()), mock.call(self.cifs_share.req_get()), ] context.conn['XML'].request.assert_has_calls(expected_calls) @mock.patch('time.sleep') def test_delete_cifs_share_invalid_mover_id(self, sleep_mock): self.hook.append(self.cifs_share.resp_get_succeed(self.mover.mover_id, False)) self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.cifs_share.resp_invalid_mover_id()) self.hook.append(self.mover.resp_get_ref_succeed()) self.hook.append(self.cifs_share.resp_task_succeed()) context = self.manager.getStorageContext('CIFSShare') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) context.delete(name=self.cifs_share.share_name, mover_name=self.mover.mover_name, is_vdm=False) expected_calls = [ mock.call(self.cifs_share.req_get()), mock.call(self.mover.req_get_ref()), mock.call(self.cifs_share.req_delete(self.mover.mover_id, False)), mock.call(self.mover.req_get_ref()), mock.call(self.cifs_share.req_delete(self.mover.mover_id, False)), ] context.conn['XML'].request.assert_has_calls(expected_calls) self.assertTrue(sleep_mock.called) def test_delete_cifs_share_with_error(self): self.hook.append(self.cifs_share.resp_get_succeed(self.vdm.vdm_id)) self.hook.append(self.vdm.resp_get_succeed()) self.hook.append(self.cifs_share.resp_task_error()) context = self.manager.getStorageContext('CIFSShare') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) self.assertRaises(exception.EMCVnxXMLAPIError, context.delete, name=self.cifs_share.share_name, mover_name=self.vdm.vdm_name, is_vdm=True) expected_calls = [ mock.call(self.cifs_share.req_get()), mock.call(self.vdm.req_get()), mock.call(self.cifs_share.req_delete(self.vdm.vdm_id)), ] context.conn['XML'].request.assert_has_calls(expected_calls) def test_get_cifs_share(self): self.hook.append(self.cifs_share.resp_get_succeed(self.vdm.vdm_id)) context = self.manager.getStorageContext('CIFSShare') context.conn['XML'].request = utils.EMCMock(side_effect=self.hook) context.get(self.cifs_share.share_name) expected_calls = [mock.call(self.cifs_share.req_get())] context.conn['XML'].request.assert_has_calls(expected_calls) def test_disable_share_access(self): self.ssh_hook.append('Command succeeded') context = self.manager.getStorageContext('CIFSShare') context.conn['SSH'].run_ssh = mock.Mock(side_effect=self.ssh_hook) context.disable_share_access(share_name=self.cifs_share.share_name, mover_name=self.vdm.vdm_name) ssh_calls = [mock.call(self.cifs_share.cmd_disable_access(), True)] context.conn['SSH'].run_ssh.assert_has_calls(ssh_calls) def test_disable_share_access_with_error(self): self.ssh_hook.append(ex=processutils.ProcessExecutionError( stdout=self.cifs_share.fake_output)) context = self.manager.getStorageContext('CIFSShare') context.conn['SSH'].run_ssh = mock.Mock(side_effect=self.ssh_hook) self.assertRaises(exception.EMCVnxXMLAPIError, context.disable_share_access, share_name=self.cifs_share.share_name, mover_name=self.vdm.vdm_name) ssh_calls = [mock.call(self.cifs_share.cmd_disable_access(), True)] context.conn['SSH'].run_ssh.assert_has_calls(ssh_calls) def test_allow_share_access(self): self.ssh_hook.append(self.cifs_share.output_allow_access()) context = self.manager.getStorageContext('CIFSShare') context.conn['SSH'].run_ssh = mock.Mock(side_effect=self.ssh_hook) context.allow_share_access(mover_name=self.vdm.vdm_name, share_name=self.cifs_share.share_name, user_name=self.cifs_server.domain_user, domain=self.cifs_server.domain_name, access=constants.CIFS_ACL_FULLCONTROL) ssh_calls = [mock.call(self.cifs_share.cmd_change_access(), True)] context.conn['SSH'].run_ssh.assert_has_calls(ssh_calls) def test_allow_share_access_duplicate_ACE(self): expt_dup_ace = processutils.ProcessExecutionError( stdout=self.cifs_share.output_allow_access_but_duplicate_ace()) self.ssh_hook.append(ex=expt_dup_ace) context = self.manager.getStorageContext('CIFSShare') context.conn['SSH'].run_ssh = mock.Mock(side_effect=self.ssh_hook) context.allow_share_access(mover_name=self.vdm.vdm_name, share_name=self.cifs_share.share_name, user_name=self.cifs_server.domain_user, domain=self.cifs_server.domain_name, access=constants.CIFS_ACL_FULLCONTROL) ssh_calls = [mock.call(self.cifs_share.cmd_change_access(), True)] context.conn['SSH'].run_ssh.assert_has_calls(ssh_calls) def test_allow_share_access_with_error(self): expt_err = processutils.ProcessExecutionError( self.cifs_share.fake_output) self.ssh_hook.append(ex=expt_err) context = self.manager.getStorageContext('CIFSShare') context.conn['SSH'].run_ssh = mock.Mock(side_effect=self.ssh_hook) self.assertRaises(exception.EMCVnxXMLAPIError, context.allow_share_access, mover_name=self.vdm.vdm_name, share_name=self.cifs_share.share_name, user_name=self.cifs_server.domain_user, domain=self.cifs_server.domain_name, access=constants.CIFS_ACL_FULLCONTROL) ssh_calls = [mock.call(self.cifs_share.cmd_change_access(), True)] context.conn['SSH'].run_ssh.assert_has_calls(ssh_calls) def test_deny_share_access(self): self.ssh_hook.append('Command succeeded') context = self.manager.getStorageContext('CIFSShare') context.conn['SSH'].run_ssh = mock.Mock(side_effect=self.ssh_hook) context.deny_share_access(mover_name=self.vdm.vdm_name, share_name=self.cifs_share.share_name, user_name=self.cifs_server.domain_user, domain=self.cifs_server.domain_name, access=constants.CIFS_ACL_FULLCONTROL) ssh_calls = [ mock.call(self.cifs_share.cmd_change_access(action='revoke'), True), ] context.conn['SSH'].run_ssh.assert_has_calls(ssh_calls) def test_deny_share_access_no_ace(self): expt_no_ace = processutils.ProcessExecutionError( stdout=self.cifs_share.output_deny_access_but_no_ace()) self.ssh_hook.append(ex=expt_no_ace) context = self.manager.getStorageContext('CIFSShare') context.conn['SSH'].run_ssh = mock.Mock(side_effect=self.ssh_hook) context.deny_share_access(mover_name=self.vdm.vdm_name, share_name=self.cifs_share.share_name, user_name=self.cifs_server.domain_user, domain=self.cifs_server.domain_name, access=constants.CIFS_ACL_FULLCONTROL) ssh_calls = [ mock.call(self.cifs_share.cmd_change_access(action='revoke'), True), ] context.conn['SSH'].run_ssh.assert_has_calls(ssh_calls) def test_deny_share_access_but_no_user_found(self): expt_no_user = processutils.ProcessExecutionError( stdout=self.cifs_share.output_deny_access_but_no_user_found()) self.ssh_hook.append(ex=expt_no_user) context = self.manager.getStorageContext('CIFSShare') context.conn['SSH'].run_ssh = mock.Mock(side_effect=self.ssh_hook) context.deny_share_access(mover_name=self.vdm.vdm_name, share_name=self.cifs_share.share_name, user_name=self.cifs_server.domain_user, domain=self.cifs_server.domain_name, access=constants.CIFS_ACL_FULLCONTROL) ssh_calls = [ mock.call(self.cifs_share.cmd_change_access(action='revoke'), True), ] context.conn['SSH'].run_ssh.assert_has_calls(ssh_calls) def test_deny_share_access_with_error(self): expt_err = processutils.ProcessExecutionError( self.cifs_share.fake_output) self.ssh_hook.append(ex=expt_err) context = self.manager.getStorageContext('CIFSShare') context.conn['SSH'].run_ssh = mock.Mock(side_effect=self.ssh_hook) self.assertRaises(exception.EMCVnxXMLAPIError, context.deny_share_access, mover_name=self.vdm.vdm_name, share_name=self.cifs_share.share_name, user_name=self.cifs_server.domain_user, domain=self.cifs_server.domain_name, access=constants.CIFS_ACL_FULLCONTROL) ssh_calls = [ mock.call(self.cifs_share.cmd_change_access(action='revoke'), True), ] context.conn['SSH'].run_ssh.assert_has_calls(ssh_calls) def test_get_share_access(self): self.ssh_hook.append(fakes.FakeData.cifs_access) context = self.manager.getStorageContext('CIFSShare') context.conn['SSH'].run_ssh = mock.Mock(side_effect=self.ssh_hook) ret = context.get_share_access( mover_name=self.vdm.vdm_name, share_name=self.cifs_share.share_name) ssh_calls = [ mock.call(self.cifs_share.cmd_get_access(), True), ] self.assertEqual(2, len(ret)) self.assertEqual(constants.CIFS_ACL_FULLCONTROL, ret['administrator']) self.assertEqual(constants.CIFS_ACL_FULLCONTROL, ret['guest']) context.conn['SSH'].run_ssh.assert_has_calls(ssh_calls) def test_get_share_access_failed(self): expt_err = processutils.ProcessExecutionError( stdout=self.nfs_share.fake_output) self.ssh_hook.append(ex=expt_err) context = self.manager.getStorageContext('CIFSShare') context.conn['SSH'].run_ssh = mock.Mock(side_effect=self.ssh_hook) self.assertRaises(exception.EMCVnxXMLAPIError, context.get_share_access, mover_name=self.vdm.vdm_name, share_name=self.cifs_share.share_name) ssh_calls = [ mock.call(self.cifs_share.cmd_get_access(), True), ] context.conn['SSH'].run_ssh.assert_has_calls(ssh_calls) def test_clear_share_access_has_white_list(self): self.ssh_hook.append(fakes.FakeData.cifs_access) self.ssh_hook.append('Command succeeded') context = self.manager.getStorageContext('CIFSShare') context.conn['SSH'].run_ssh = mock.Mock(side_effect=self.ssh_hook) to_remove = context.clear_share_access( mover_name=self.vdm.vdm_name, share_name=self.cifs_share.share_name, domain=self.cifs_server.domain_name, white_list_users=['guest']) ssh_calls = [ mock.call(self.cifs_share.cmd_get_access(), True), mock.call(self.cifs_share.cmd_change_access(action='revoke'), True), ] self.assertEqual({'administrator'}, to_remove) context.conn['SSH'].run_ssh.assert_has_calls(ssh_calls) class NFSShareTestCase(StorageObjectTestCaseBase): def setUp(self): super(NFSShareTestCase, self).setUp() self.ssh_hook = utils.SSHSideEffect() def test_create_nfs_share(self): self.ssh_hook.append(self.nfs_share.output_create()) context = self.manager.getStorageContext('NFSShare') context.conn['SSH'].run_ssh = mock.Mock(side_effect=self.ssh_hook) context.create(name=self.nfs_share.share_name, mover_name=self.vdm.vdm_name) ssh_calls = [mock.call(self.nfs_share.cmd_create(), True)] context.conn['SSH'].run_ssh.assert_has_calls(ssh_calls) def test_create_nfs_share_with_error(self): expt_err = processutils.ProcessExecutionError( stdout=self.nfs_share.fake_output) self.ssh_hook.append(ex=expt_err) context = self.manager.getStorageContext('NFSShare') context.conn['SSH'].run_ssh = mock.Mock(side_effect=self.ssh_hook) self.assertRaises(exception.EMCVnxXMLAPIError, context.create, name=self.nfs_share.share_name, mover_name=self.vdm.vdm_name) ssh_calls = [mock.call(self.nfs_share.cmd_create(), True)] context.conn['SSH'].run_ssh.assert_has_calls(ssh_calls) def test_delete_nfs_share(self): self.ssh_hook.append(self.nfs_share.output_get_succeed( rw_hosts=self.nfs_share.rw_hosts, ro_hosts=self.nfs_share.ro_hosts)) self.ssh_hook.append(self.nfs_share.output_delete_succeed()) context = self.manager.getStorageContext('NFSShare') context.conn['SSH'].run_ssh = mock.Mock(side_effect=self.ssh_hook) context.delete(name=self.nfs_share.share_name, mover_name=self.vdm.vdm_name) ssh_calls = [ mock.call(self.nfs_share.cmd_get(), False), mock.call(self.nfs_share.cmd_delete(), True), ] context.conn['SSH'].run_ssh.assert_has_calls(ssh_calls) def test_delete_nfs_share_not_found(self): expt_not_found = processutils.ProcessExecutionError( stdout=self.nfs_share.output_get_but_not_found()) self.ssh_hook.append(ex=expt_not_found) context = self.manager.getStorageContext('NFSShare') context.conn['SSH'].run_ssh = mock.Mock(side_effect=self.ssh_hook) context.delete(name=self.nfs_share.share_name, mover_name=self.vdm.vdm_name) ssh_calls = [mock.call(self.nfs_share.cmd_get(), False)] context.conn['SSH'].run_ssh.assert_has_calls(ssh_calls) @mock.patch('time.sleep') def test_delete_nfs_share_locked(self, sleep_mock): self.ssh_hook.append(self.nfs_share.output_get_succeed( rw_hosts=self.nfs_share.rw_hosts, ro_hosts=self.nfs_share.ro_hosts)) expt_locked = processutils.ProcessExecutionError( stdout=self.nfs_share.output_delete_but_locked()) self.ssh_hook.append(ex=expt_locked) self.ssh_hook.append(self.nfs_share.output_delete_succeed()) context = self.manager.getStorageContext('NFSShare') context.conn['SSH'].run_ssh = mock.Mock(side_effect=self.ssh_hook) context.delete(name=self.nfs_share.share_name, mover_name=self.vdm.vdm_name) ssh_calls = [ mock.call(self.nfs_share.cmd_get(), False), mock.call(self.nfs_share.cmd_delete(), True), mock.call(self.nfs_share.cmd_delete(), True), ] context.conn['SSH'].run_ssh.assert_has_calls(ssh_calls) self.assertTrue(sleep_mock.called) def test_delete_nfs_share_with_error(self): self.ssh_hook.append(self.nfs_share.output_get_succeed( rw_hosts=self.nfs_share.rw_hosts, ro_hosts=self.nfs_share.ro_hosts)) expt_err = processutils.ProcessExecutionError( stdout=self.nfs_share.fake_output) self.ssh_hook.append(ex=expt_err) context = self.manager.getStorageContext('NFSShare') context.conn['SSH'].run_ssh = mock.Mock(side_effect=self.ssh_hook) self.assertRaises(exception.EMCVnxXMLAPIError, context.delete, name=self.nfs_share.share_name, mover_name=self.vdm.vdm_name) ssh_calls = [ mock.call(self.nfs_share.cmd_get(), False), mock.call(self.nfs_share.cmd_delete(), True), ] context.conn['SSH'].run_ssh.assert_has_calls(ssh_calls) def test_get_nfs_share(self): self.ssh_hook.append(self.nfs_share.output_get_succeed( rw_hosts=self.nfs_share.rw_hosts, ro_hosts=self.nfs_share.ro_hosts)) context = self.manager.getStorageContext('NFSShare') context.conn['SSH'].run_ssh = mock.Mock(side_effect=self.ssh_hook) context.get(name=self.nfs_share.share_name, mover_name=self.vdm.vdm_name) # Get NFS share from cache context.get(name=self.nfs_share.share_name, mover_name=self.vdm.vdm_name) ssh_calls = [mock.call(self.nfs_share.cmd_get(), False)] context.conn['SSH'].run_ssh.assert_has_calls(ssh_calls) def test_get_nfs_share_not_found(self): expt_not_found = processutils.ProcessExecutionError( stdout=self.nfs_share.output_get_but_not_found()) self.ssh_hook.append(ex=expt_not_found) self.ssh_hook.append(self.nfs_share.output_get_but_not_found()) context = self.manager.getStorageContext('NFSShare') context.conn['SSH'].run_ssh = mock.Mock(side_effect=self.ssh_hook) context.get(name=self.nfs_share.share_name, mover_name=self.vdm.vdm_name) context.get(name=self.nfs_share.share_name, mover_name=self.vdm.vdm_name) ssh_calls = [ mock.call(self.nfs_share.cmd_get(), False), mock.call(self.nfs_share.cmd_get(), False), ] context.conn['SSH'].run_ssh.assert_has_calls(ssh_calls) def test_get_nfs_share_with_error(self): expt_err = processutils.ProcessExecutionError( stdout=self.nfs_share.fake_output) self.ssh_hook.append(ex=expt_err) context = self.manager.getStorageContext('NFSShare') context.conn['SSH'].run_ssh = mock.Mock(side_effect=self.ssh_hook) self.assertRaises(exception.EMCVnxXMLAPIError, context.get, name=self.nfs_share.share_name, mover_name=self.vdm.vdm_name) ssh_calls = [mock.call(self.nfs_share.cmd_get(), False)] context.conn['SSH'].run_ssh.assert_has_calls(ssh_calls) def test_allow_share_access(self): rw_hosts = copy.deepcopy(self.nfs_share.rw_hosts) rw_hosts.append(self.nfs_share.nfs_host_ip) ro_hosts = copy.deepcopy(self.nfs_share.ro_hosts) ro_hosts.append(self.nfs_share.nfs_host_ip) self.ssh_hook.append(self.nfs_share.output_get_succeed( rw_hosts=self.nfs_share.rw_hosts, ro_hosts=self.nfs_share.ro_hosts)) self.ssh_hook.append(self.nfs_share.output_set_access_success()) self.ssh_hook.append(self.nfs_share.output_get_succeed( rw_hosts=rw_hosts, ro_hosts=self.nfs_share.ro_hosts)) self.ssh_hook.append(self.nfs_share.output_set_access_success()) self.ssh_hook.append(self.nfs_share.output_get_succeed( rw_hosts=self.nfs_share.rw_hosts, ro_hosts=ro_hosts)) self.ssh_hook.append(self.nfs_share.output_set_access_success()) self.ssh_hook.append(self.nfs_share.output_get_succeed( rw_hosts=rw_hosts, ro_hosts=self.nfs_share.ro_hosts)) context = self.manager.getStorageContext('NFSShare') context.conn['SSH'].run_ssh = utils.EMCNFSShareMock( side_effect=self.ssh_hook) context.allow_share_access(share_name=self.nfs_share.share_name, host_ip=self.nfs_share.nfs_host_ip, mover_name=self.vdm.vdm_name, access_level=const.ACCESS_LEVEL_RW) context.allow_share_access(share_name=self.nfs_share.share_name, host_ip=self.nfs_share.nfs_host_ip, mover_name=self.vdm.vdm_name, access_level=const.ACCESS_LEVEL_RO) context.allow_share_access(share_name=self.nfs_share.share_name, host_ip=self.nfs_share.nfs_host_ip, mover_name=self.vdm.vdm_name, access_level=const.ACCESS_LEVEL_RW) context.allow_share_access(share_name=self.nfs_share.share_name, host_ip=self.nfs_share.nfs_host_ip, mover_name=self.vdm.vdm_name, access_level=const.ACCESS_LEVEL_RW) ssh_calls = [ mock.call(self.nfs_share.cmd_get()), mock.call(self.nfs_share.cmd_set_access( rw_hosts=rw_hosts, ro_hosts=self.nfs_share.ro_hosts)), mock.call(self.nfs_share.cmd_get()), mock.call(self.nfs_share.cmd_set_access( rw_hosts=self.nfs_share.rw_hosts, ro_hosts=ro_hosts)), mock.call(self.nfs_share.cmd_get()), mock.call(self.nfs_share.cmd_set_access( rw_hosts=rw_hosts, ro_hosts=self.nfs_share.ro_hosts)), mock.call(self.nfs_share.cmd_get()), ] context.conn['SSH'].run_ssh.assert_has_calls(ssh_calls) def test_allow_share_access_not_found(self): expt_not_found = processutils.ProcessExecutionError( stdout=self.nfs_share.output_get_but_not_found()) self.ssh_hook.append(ex=expt_not_found) context = self.manager.getStorageContext('NFSShare') context.conn['SSH'].run_ssh = utils.EMCNFSShareMock( side_effect=self.ssh_hook) self.assertRaises(exception.EMCVnxXMLAPIError, context.allow_share_access, share_name=self.nfs_share.share_name, host_ip=self.nfs_share.nfs_host_ip, mover_name=self.vdm.vdm_name, access_level=const.ACCESS_LEVEL_RW) ssh_calls = [mock.call(self.nfs_share.cmd_get())] context.conn['SSH'].run_ssh.assert_has_calls(ssh_calls) def test_deny_rw_share_access(self): rw_hosts = copy.deepcopy(self.nfs_share.rw_hosts) rw_hosts.append(self.nfs_share.nfs_host_ip) self.ssh_hook.append(self.nfs_share.output_get_succeed( rw_hosts=rw_hosts, ro_hosts=self.nfs_share.ro_hosts)) self.ssh_hook.append(self.nfs_share.output_set_access_success()) self.ssh_hook.append(self.nfs_share.output_get_succeed( rw_hosts=self.nfs_share.rw_hosts, ro_hosts=self.nfs_share.ro_hosts)) context = self.manager.getStorageContext('NFSShare') context.conn['SSH'].run_ssh = utils.EMCNFSShareMock( side_effect=self.ssh_hook) context.deny_share_access(share_name=self.nfs_share.share_name, host_ip=self.nfs_share.nfs_host_ip, mover_name=self.vdm.vdm_name) ssh_calls = [ mock.call(self.nfs_share.cmd_get()), mock.call(self.nfs_share.cmd_set_access(self.nfs_share.rw_hosts, self.nfs_share.ro_hosts)), mock.call(self.nfs_share.cmd_get()), ] context.conn['SSH'].run_ssh.assert_has_calls(ssh_calls) def test_clear_share_access(self): hosts = ['192.168.1.1', '192.168.1.3'] self.ssh_hook.append(self.nfs_share.output_get_succeed( rw_hosts=self.nfs_share.rw_hosts, ro_hosts=self.nfs_share.ro_hosts)) self.ssh_hook.append(self.nfs_share.output_set_access_success()) self.ssh_hook.append(self.nfs_share.output_get_succeed( rw_hosts=[hosts[0]], ro_hosts=[hosts[1]])) context = self.manager.getStorageContext('NFSShare') context.conn['SSH'].run_ssh = utils.EMCNFSShareMock( side_effect=self.ssh_hook) context.clear_share_access(share_name=self.nfs_share.share_name, mover_name=self.vdm.vdm_name, white_list_hosts=hosts) ssh_calls = [ mock.call(self.nfs_share.cmd_get()), mock.call(self.nfs_share.cmd_set_access( rw_hosts=[hosts[0]], ro_hosts=[hosts[1]])), mock.call(self.nfs_share.cmd_get()), ] context.conn['SSH'].run_ssh.assert_has_calls(ssh_calls) def test_deny_ro_share_access(self): ro_hosts = copy.deepcopy(self.nfs_share.ro_hosts) ro_hosts.append(self.nfs_share.nfs_host_ip) self.ssh_hook.append(self.nfs_share.output_get_succeed( rw_hosts=self.nfs_share.rw_hosts, ro_hosts=ro_hosts)) self.ssh_hook.append(self.nfs_share.output_set_access_success()) self.ssh_hook.append(self.nfs_share.output_get_succeed( rw_hosts=self.nfs_share.rw_hosts, ro_hosts=self.nfs_share.ro_hosts)) context = self.manager.getStorageContext('NFSShare') context.conn['SSH'].run_ssh = utils.EMCNFSShareMock( side_effect=self.ssh_hook) context.deny_share_access(share_name=self.nfs_share.share_name, host_ip=self.nfs_share.nfs_host_ip, mover_name=self.vdm.vdm_name) context.deny_share_access(share_name=self.nfs_share.share_name, host_ip=self.nfs_share.nfs_host_ip, mover_name=self.vdm.vdm_name) ssh_calls = [ mock.call(self.nfs_share.cmd_get()), mock.call(self.nfs_share.cmd_set_access(self.nfs_share.rw_hosts, self.nfs_share.ro_hosts)), mock.call(self.nfs_share.cmd_get()), ] context.conn['SSH'].run_ssh.assert_has_calls(ssh_calls) def test_deny_share_not_found(self): expt_not_found = processutils.ProcessExecutionError( stdout=self.nfs_share.output_get_but_not_found()) self.ssh_hook.append(ex=expt_not_found) context = self.manager.getStorageContext('NFSShare') context.conn['SSH'].run_ssh = utils.EMCNFSShareMock( side_effect=self.ssh_hook) self.assertRaises(exception.EMCVnxXMLAPIError, context.deny_share_access, share_name=self.nfs_share.share_name, host_ip=self.nfs_share.nfs_host_ip, mover_name=self.vdm.vdm_name) ssh_calls = [mock.call(self.nfs_share.cmd_get())] context.conn['SSH'].run_ssh.assert_has_calls(ssh_calls) def test_deny_rw_share_with_error(self): rw_hosts = copy.deepcopy(self.nfs_share.rw_hosts) rw_hosts.append(self.nfs_share.nfs_host_ip) self.ssh_hook.append(self.nfs_share.output_get_succeed( rw_hosts=rw_hosts, ro_hosts=self.nfs_share.ro_hosts)) expt_not_found = processutils.ProcessExecutionError( stdout=self.nfs_share.output_get_but_not_found()) self.ssh_hook.append(ex=expt_not_found) context = self.manager.getStorageContext('NFSShare') context.conn['SSH'].run_ssh = utils.EMCNFSShareMock( side_effect=self.ssh_hook) self.assertRaises(exception.EMCVnxXMLAPIError, context.deny_share_access, share_name=self.nfs_share.share_name, host_ip=self.nfs_share.nfs_host_ip, mover_name=self.vdm.vdm_name) ssh_calls = [ mock.call(self.nfs_share.cmd_get()), mock.call(self.nfs_share.cmd_set_access(self.nfs_share.rw_hosts, self.nfs_share.ro_hosts)), ] context.conn['SSH'].run_ssh.assert_has_calls(ssh_calls) def test_clear_share_access_failed_to_get_share(self): self.ssh_hook.append("no output.") context = self.manager.getStorageContext('NFSShare') context.conn['SSH'].run_ssh = mock.Mock(side_effect=self.ssh_hook) self.assertRaises(exception.EMCVnxXMLAPIError, context.clear_share_access, share_name=self.nfs_share.share_name, mover_name=self.vdm.vdm_name, white_list_hosts=None) context.conn['SSH'].run_ssh.assert_called_once_with( self.nfs_share.cmd_get(), False) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/dell_emc/test_driver.py0000664000175000017500000003205000000000000024711 0ustar00zuulzuul00000000000000# Copyright (c) 2014 EMC Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from stevedore import extension from manila.share import configuration as conf from manila.share.drivers.dell_emc import driver as emcdriver from manila.share.drivers.dell_emc.plugins import base from manila import test class FakeConnection(base.StorageConnection): def __init__(self, *args, **kwargs): self.ipv6_implemented = True self.dhss_mandatory_security_service_association = {} pass @property def driver_handles_share_servers(self): return True def create_share(self, context, share, share_server): """Is called to create share.""" def create_snapshot(self, context, snapshot, share_server): """Is called to create snapshot.""" def delete_share(self, context, share, share_server): """Is called to remove share.""" def extend_share(self, share, new_size, share_server): """Is called to extend share.""" def shrink_share(self, share, new_size, share_server): """Is called to shrink share.""" def delete_snapshot(self, context, snapshot, share_server): """Is called to remove snapshot.""" def ensure_share(self, context, share, share_server): """Invoked to sure that share is exported.""" def allow_access(self, context, share, access, share_server): """Allow access to the share.""" def deny_access(self, context, share, access, share_server): """Deny access to the share.""" def raise_connect_error(self): """Check for setup error.""" def connect(self, emc_share_driver, context): """Any initialization the share driver does while starting.""" def update_share_stats(self, stats_dict): """Add key/values to stats_dict.""" def get_network_allocations_number(self): """Returns number of network allocations for creating VIFs.""" def setup_server(self, network_info, metadata=None): """Set up and configures share server with given network parameters.""" def teardown_server(self, server_details, security_services=None): """Teardown share server.""" def get_backend_info(self, context): """Get driver and array configuration parameters.""" def ensure_shares(self, context, shares): """Invoked to ensure that shares are exported.""" class FakeConnection_powermax(FakeConnection): def __init__(self, *args, **kwargs): self.dhss_mandatory_security_service_association = {} self.revert_to_snap_support = False self.shrink_share_support = False self.manage_existing_support = False self.manage_existing_with_server_support = False self.manage_existing_snapshot_support = False self.manage_snapshot_with_server_support = False self.manage_server_support = False self.get_share_server_network_info_support = False pass FAKE_BACKEND = 'fake_backend' FAKE_BACKEND_POWERMAX = 'powermax' class FakeEMCExtensionManager(object): def __init__(self): self.extensions = [] self.extensions.append( extension.Extension(name=FAKE_BACKEND, plugin=FakeConnection, entry_point=None, obj=None)) self.extensions.append( extension.Extension(name=FAKE_BACKEND_POWERMAX, plugin=FakeConnection_powermax, entry_point=None, obj=None)) class EMCShareFrameworkTestCase(test.TestCase): @mock.patch('stevedore.extension.ExtensionManager', mock.Mock(return_value=FakeEMCExtensionManager())) def setUp(self): super(EMCShareFrameworkTestCase, self).setUp() self.configuration = conf.Configuration(None) self.configuration.append_config_values = mock.Mock(return_value=0) self.configuration.share_backend_name = FAKE_BACKEND self.mock_object(self.configuration, 'safe_get', self._fake_safe_get) self.driver = emcdriver.EMCShareDriver( configuration=self.configuration) self.configuration_powermax = conf.Configuration(None) self.configuration_powermax.append_config_values = \ mock.Mock(return_value=0) self.configuration_powermax.share_backend_name = FAKE_BACKEND_POWERMAX self.mock_object(self.configuration_powermax, 'safe_get', self._fake_safe_get_powermax) self.driver_powermax = emcdriver.EMCShareDriver( configuration=self.configuration_powermax) def test_driver_setup(self): FakeConnection.connect = mock.Mock() self.driver.do_setup(None) self.assertIsInstance(self.driver.plugin, FakeConnection, "Not an instance of FakeConnection") FakeConnection.connect.assert_called_with(self.driver, None) def test_update_share_stats(self): data = {} self.driver.plugin = mock.Mock() self.driver.plugin.get_default_filter_function.return_value = None self.driver._update_share_stats() data["share_backend_name"] = FAKE_BACKEND data["driver_handles_share_servers"] = True data["vendor_name"] = 'Dell EMC' data["driver_version"] = '1.0' data["storage_protocol"] = 'NFS_CIFS' data['total_capacity_gb'] = 'unknown' data['free_capacity_gb'] = 'unknown' data['reserved_percentage'] = 0 data['reserved_snapshot_percentage'] = 0 data['reserved_share_extend_percentage'] = 0 data['qos'] = False data['pools'] = None data['snapshot_support'] = True data['create_share_from_snapshot_support'] = True data['revert_to_snapshot_support'] = False data['share_group_stats'] = {'consistent_snapshot_support': None} data['mount_snapshot_support'] = False data['replication_domain'] = None data['filter_function'] = None data['goodness_function'] = None data['mount_point_name_support'] = False data['snapshot_support'] = True data['create_share_from_snapshot_support'] = True data['ipv4_support'] = True data['ipv6_support'] = False data['max_shares_per_share_server'] = -1 data['max_share_server_size'] = -1 data['security_service_update_support'] = False data['share_server_multiple_subnet_support'] = False data['network_allocation_update_support'] = False data['share_replicas_migration_support'] = False data['encryption_support'] = None self.assertEqual(data, self.driver._stats) def _fake_safe_get(self, value): if value in ['emc_share_backend', 'share_backend_name']: return FAKE_BACKEND elif value == 'driver_handles_share_servers': return True return None def _fake_safe_get_powermax(self, value): if value in ['emc_share_backend', 'share_backend_name']: return FAKE_BACKEND_POWERMAX elif value == 'driver_handles_share_servers': return True return None def test_support_manage(self): share = mock.Mock() driver_options = mock.Mock() share_server = mock.Mock() snapshot = mock.Mock() context = mock.Mock() identifier = mock.Mock() self.driver.plugin = mock.Mock() self.driver.manage_existing_support = True self.driver.manage_existing_with_server_support = True self.driver.manage_existing_snapshot_support = True self.driver.manage_snapshot_with_server_support = True self.driver.manage_server_support = True self.driver.manage_existing(share, driver_options) self.driver.manage_existing_with_server(share, driver_options, share_server) self.driver.manage_existing_snapshot(snapshot, driver_options) self.driver.manage_existing_snapshot_with_server(snapshot, driver_options, share_server) self.driver.manage_server(context, share_server, identifier, driver_options) self.driver.get_share_server_network_info_support = True self.driver.get_share_server_network_info(context, share_server, identifier, driver_options) self.driver.create_share(context, share, share_server) self.driver.create_share_from_snapshot(context, share, snapshot, share_server) self.driver.extend_share(share, 20, share_server) self.driver.shrink_share_support = True self.driver.shrink_share(share, 20, share_server) self.driver.create_snapshot(context, snapshot, share_server) self.driver.delete_share(context, share, share_server) self.driver.delete_snapshot(context, snapshot, share_server) self.driver.ensure_share(context, share, share_server) self.driver.get_backend_info(context) self.driver.ensure_shares(context, [share]) access = mock.Mock() self.driver.allow_access(context, share, access, share_server) self.driver.deny_access(context, share, access, share_server) self.driver.update_access(context, share, None, None, None, share_server) self.driver.check_for_setup_error() self.driver.get_network_allocations_number() self.driver._teardown_server(None) self.driver.revert_to_snap_support = True share_access_rules = mock.Mock() snapshot_access_rules = mock.Mock() self.driver.revert_to_snapshot(context, snapshot, share_access_rules, snapshot_access_rules, share_server) self.driver.ipv6_implemented = False self.driver.get_configured_ip_versions() def test_not_support_manage(self): share = mock.Mock() driver_options = {} share_server = mock.Mock() snapshot = mock.Mock() identifier = mock.Mock() self.driver.plugin = mock.Mock() result = self.driver.manage_existing(share, driver_options) self.assertIsInstance(result, NotImplementedError) result = self.driver.manage_existing_with_server( share, driver_options, share_server) self.assertIsInstance(result, NotImplementedError) result = self.driver.manage_existing_snapshot(snapshot, driver_options) self.assertIsInstance(result, NotImplementedError) result = self.driver.manage_existing_snapshot_with_server( snapshot, driver_options, share_server) self.assertIsInstance(result, NotImplementedError) result = self.driver.manage_server(None, share_server, identifier, driver_options) self.assertIsInstance(result, NotImplementedError) result = self.driver.get_share_server_network_info(None, share_server, identifier, driver_options) self.assertIsInstance(result, NotImplementedError) self.assertRaises(NotImplementedError, self.driver.shrink_share, share, 20, share_server) share_access_rules = mock.Mock() snapshot_access_rules = mock.Mock() self.assertRaises(NotImplementedError, self.driver.revert_to_snapshot, None, snapshot, share_access_rules, snapshot_access_rules, share_server) def test_unmanage_manage(self): share = {} server_details = {} share_server = mock.Mock() snapshot = mock.Mock() self.driver.plugin = mock.Mock(share) self.driver.unmanage(share) self.driver.unmanage_with_server(share, share_server) self.driver.unmanage_snapshot(snapshot) self.driver.unmanage_snapshot_with_server(snapshot, share_server) self.driver.unmanage_server(server_details) def test_get_default_filter_function(self): expected = None actual = self.driver.get_default_filter_function() self.assertEqual(expected, actual) def test_setup_server(self): network_info = [{}] expected = None result = self.driver._setup_server(network_info) self.assertEqual(expected, result) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/dummy.py0000664000175000017500000012702600000000000021756 0ustar00zuulzuul00000000000000# Copyright 2016 Mirantis inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Dummy share driver for testing Manila APIs and other interfaces. This driver simulates support of: - Both available driver modes: DHSS=True/False - NFS and CIFS protocols - IP access for NFS shares and USER access for CIFS shares - CIFS shares in DHSS=True driver mode - Creation and deletion of share snapshots - Share replication (readable) - Share migration - Consistency groups - Resize of a share (extend/shrink) """ import functools import time from oslo_config import cfg from oslo_log import log from oslo_serialization import jsonutils from oslo_utils import timeutils from manila.common import constants from manila import exception from manila.i18n import _ from manila.keymgr import barbican as barbican_api from manila.share import configuration from manila.share import driver from manila.share.manager import share_manager_opts # noqa from manila.share import utils as share_utils LOG = log.getLogger(__name__) dummy_opts = [ cfg.FloatOpt( "dummy_driver_default_driver_method_delay", help="Defines default time delay in seconds for each dummy driver " "method. To redefine some specific method delay use other " "'dummy_driver_driver_methods_delays' config opt. Optional.", default=2.0, min=0, ), cfg.DictOpt( "dummy_driver_driver_methods_delays", help="It is dictionary-like config option, that consists of " "driver method names as keys and integer/float values that are " "time delay in seconds. Optional.", default={ "ensure_share": "1.05", "create_share": "3.98", "get_pool": "0.5", "do_setup": "0.05", "_get_pools_info": "0.1", "_update_share_stats": "0.3", "create_replica": "3.99", "delete_replica": "2.98", "promote_replica": "0.75", "update_replica_state": "0.85", "create_replicated_snapshot": "4.15", "delete_replicated_snapshot": "3.16", "update_replicated_snapshot": "1.17", "migration_start": 1.01, "migration_continue": 1.02, # it will be called 2 times "migration_complete": 1.03, "migration_cancel": 1.04, "migration_get_progress": 1.05, "migration_check_compatibility": 0.05, "create_backup": "1.50", "restore_backup": "1.50", "update_share_network_subnet_from_metadata": "0.5", }, ), ] CONF = cfg.CONF def slow_me_down(f): @functools.wraps(f) def wrapped_func(self, *args, **kwargs): sleep_time = self.configuration.safe_get( "dummy_driver_driver_methods_delays").get( f.__name__, self.configuration.safe_get( "dummy_driver_default_driver_method_delay") ) time.sleep(float(sleep_time)) return f(self, *args, **kwargs) return wrapped_func def get_backend_configuration(backend_name): config_stanzas = CONF.list_all_sections() if backend_name not in config_stanzas: msg = _("Could not find backend stanza %(backend_name)s in " "configuration which is required for share replication and " "migration. Available stanzas are %(stanzas)s") params = { "stanzas": config_stanzas, "backend_name": backend_name, } raise exception.BadConfigurationException(reason=msg % params) config = configuration.Configuration( driver.share_opts, config_group=backend_name) config.append_config_values(dummy_opts) config.append_config_values(share_manager_opts) config.append_config_values(driver.ssh_opts) return config class DummyDriver(driver.ShareDriver): """Dummy share driver that implements all share driver interfaces.""" def __init__(self, *args, **kwargs): """Do initialization.""" super(DummyDriver, self).__init__( [False, True], *args, config_opts=[dummy_opts], **kwargs) self._verify_configuration() self.private_storage = kwargs.get('private_storage') self.backend_name = self.configuration.safe_get( "share_backend_name") or "DummyDriver" self.migration_progress = {} self.security_service_update_support = True self.network_allocation_update_support = True self.share_replicas_migration_support = True self.encryption_support = ['share_server'] def _verify_configuration(self): allowed_driver_methods = [m for m in dir(self) if m[0] != '_'] allowed_driver_methods.extend([ "_setup_server", "_teardown_server", "_get_pools_info", "_update_share_stats", ]) disallowed_driver_methods = ( "get_admin_network_allocations_number", "get_network_allocations_number", "get_share_server_pools", ) for k, v in self.configuration.safe_get( "dummy_driver_driver_methods_delays").items(): if k not in allowed_driver_methods: raise exception.BadConfigurationException(reason=( "Dummy driver does not have '%s' method." % k )) elif k in disallowed_driver_methods: raise exception.BadConfigurationException(reason=( "Method '%s' does not support delaying." % k )) try: float(v) except (TypeError, ValueError): raise exception.BadConfigurationException(reason=( "Wrong value (%(v)s) for '%(k)s' dummy driver method time " "delay is set in 'dummy_driver_driver_methods_delays' " "config option." % {"k": k, "v": v} )) def _get_share_name(self, share): mount_point_name = share.get('mount_point_name') if mount_point_name is not None: return mount_point_name return "share_%(s_id)s_%(si_id)s" % { "s_id": share["share_id"].replace("-", "_"), "si_id": share["id"].replace("-", "_")} def _get_snapshot_name(self, snapshot): return "snapshot_%(s_id)s_%(si_id)s" % { "s_id": snapshot["snapshot_id"].replace("-", "_"), "si_id": snapshot["id"].replace("-", "_")} def _get_export(self, mountpoint, ip, is_admin_only, preferred): return { "path": "%(ip)s:%(mp)s" % {"ip": ip, "mp": mountpoint}, "metadata": { "preferred": preferred, }, "is_admin_only": is_admin_only, } def _get_subnet_allocations_from_backend_details(self, backend_details): """Reads subnet_allocations info from backend details""" # NOTE(sfernand): Ensure backward compatibility for share servers # created prior to the addition of support to multiple subnets per AZ, # by read ip information using the old format in case # subnet_allocations does not exist. if 'subnet_allocations' in backend_details: subnet_allocations = jsonutils.loads( backend_details['subnet_allocations']) else: subnet_allocations = [{ 'primary_public_ip': backend_details['primary_public_ip'], 'secondary_public_ip': backend_details['secondary_public_ip'] }] return subnet_allocations def _generate_export_locations(self, mountpoint, share_server=None): if share_server: backend_details = share_server['backend_details'] subnet_allocations = ( self._get_subnet_allocations_from_backend_details( backend_details)) service_ip = backend_details["service_ip"] else: subnet_allocations = [{ "primary_public_ip": "10.0.0.10", "secondary_public_ip": "10.0.0.20", }] service_ip = "11.0.0.11" export_locations = [ self._get_export(mountpoint, service_ip, True, False)] for subnet_allocation in subnet_allocations: export_locations.append( self._get_export( mountpoint, subnet_allocation["primary_public_ip"], False, True)) export_locations.append( self._get_export( mountpoint, subnet_allocation["secondary_public_ip"], False, False)) return export_locations def _create_share(self, context, share, share_server=None): share_proto = share["share_proto"] if share_proto not in ("NFS", "CIFS"): msg = _("Unsupported share protocol provided - %s.") % share_proto raise exception.InvalidShareAccess(reason=msg) encryption_key_ref = share.get('encryption_key_ref') if encryption_key_ref and context: encryption_key_href = barbican_api.get_secret_href( context, encryption_key_ref) LOG.debug("Generated encryption_key_href %s for share create " "request.", encryption_key_href) share_name = self._get_share_name(share) mountpoint = "/path/to/fake/share/%s" % share_name self.private_storage.update( share["id"], { "fake_provider_share_name": share_name, "fake_provider_location": mountpoint, } ) return self._generate_export_locations( mountpoint, share_server=share_server) @slow_me_down def create_share(self, context, share, share_server=None): """Is called to create share.""" return self._create_share(context, share, share_server=share_server) @slow_me_down def create_share_from_snapshot(self, context, share, snapshot, share_server=None, parent_share=None): """Is called to create share from snapshot.""" export_locations = self._create_share( context, share, share_server=share_server) return { 'export_locations': export_locations, 'status': constants.STATUS_AVAILABLE } def _create_snapshot(self, snapshot, share_server=None): snapshot_name = self._get_snapshot_name(snapshot) mountpoint = "/path/to/fake/snapshot/%s" % snapshot_name self.private_storage.update( snapshot["id"], { "fake_provider_snapshot_name": snapshot_name, "fake_provider_location": mountpoint, } ) return { 'fake_key1': 'fake_value1', 'fake_key2': 'fake_value2', 'fake_key3': 'fake_value3', "provider_location": mountpoint, "export_locations": self._generate_export_locations( mountpoint, share_server=share_server) } @slow_me_down def create_snapshot(self, context, snapshot, share_server=None): """Is called to create snapshot.""" return self._create_snapshot(snapshot, share_server) @slow_me_down def delete_share(self, context, share, share_server=None): """Is called to remove share.""" self.private_storage.delete(share["id"]) @slow_me_down def delete_snapshot(self, context, snapshot, share_server=None): """Is called to remove snapshot.""" LOG.debug('Deleting snapshot with following data: %s', snapshot) self.private_storage.delete(snapshot["id"]) @slow_me_down def get_pool(self, share): """Return pool name where the share resides on.""" pool_name = share_utils.extract_host(share["host"], level="pool") return pool_name @slow_me_down def ensure_share(self, context, share, share_server=None): """Invoked to ensure that share is exported.""" @slow_me_down def update_access(self, context, share, access_rules, add_rules, delete_rules, update_rules, share_server=None): """Update access rules for given share.""" for rule in add_rules + access_rules: share_proto = share["share_proto"].lower() access_type = rule["access_type"].lower() if not ( (share_proto == "nfs" and access_type == "ip") or (share_proto == "cifs" and access_type == "user")): msg = _("Unsupported '%(access_type)s' access type provided " "for '%(share_proto)s' share protocol.") % { "access_type": access_type, "share_proto": share_proto} raise exception.InvalidShareAccess(reason=msg) @slow_me_down def snapshot_update_access(self, context, snapshot, access_rules, add_rules, delete_rules, share_server=None): """Update access rules for given snapshot.""" self.update_access(context, snapshot['share'], access_rules, add_rules, delete_rules, share_server) @slow_me_down def do_setup(self, context): """Any initialization the share driver does while starting.""" @slow_me_down def manage_existing(self, share, driver_options): """Brings an existing share under Manila management.""" new_export = share['export_location'] old_share_id = self._get_share_id_from_export(new_export) old_export = self.private_storage.get( old_share_id, key='export_location') if old_export.split(":/")[-1] == new_export.split(":/")[-1]: result = { "size": 1, "export_locations": self._create_share(None, share) } self.private_storage.delete(old_share_id) return result else: msg = ("Invalid export specified, existing share %s" " could not be found" % old_share_id) raise exception.ShareBackendException(msg=msg) @slow_me_down def manage_existing_with_server( self, share, driver_options, share_server=None): return self.manage_existing(share, driver_options) def _get_share_id_from_export(self, export_location): values = export_location.split('share_') if len(values) > 1: return values[1][37:].replace("_", "-") else: return export_location @slow_me_down def unmanage(self, share): """Removes the specified share from Manila management.""" self.private_storage.update( share['id'], {'export_location': share['export_location']}) @slow_me_down def unmanage_with_server(self, share, share_server=None): self.unmanage(share) @slow_me_down def manage_existing_snapshot_with_server(self, snapshot, driver_options, share_server=None): return self.manage_existing_snapshot(snapshot, driver_options) @slow_me_down def manage_existing_snapshot(self, snapshot, driver_options): """Brings an existing snapshot under Manila management.""" old_snap_id = self._get_snap_id_from_provider_location( snapshot['provider_location']) old_provider_location = self.private_storage.get( old_snap_id, key='provider_location') if old_provider_location == snapshot['provider_location']: self._create_snapshot(snapshot) self.private_storage.delete(old_snap_id) return {"size": 1, "provider_location": snapshot["provider_location"]} else: msg = ("Invalid provider location specified, existing snapshot %s" " could not be found" % old_snap_id) raise exception.ShareBackendException(msg=msg) def _get_snap_id_from_provider_location(self, provider_location): values = provider_location.split('snapshot_') if len(values) > 1: return values[1][37:].replace("_", "-") else: return provider_location @slow_me_down def unmanage_snapshot(self, snapshot): """Removes the specified snapshot from Manila management.""" self.private_storage.update( snapshot['id'], {'provider_location': snapshot['provider_location']}) @slow_me_down def unmanage_snapshot_with_server(self, snapshot, share_server=None): self.unmanage_snapshot(snapshot) @slow_me_down def revert_to_snapshot(self, context, snapshot, share_access_rules, snapshot_access_rules, share_server=None): """Reverts a share (in place) to the specified snapshot.""" @slow_me_down def extend_share(self, share, new_size, share_server=None): """Extends size of existing share.""" @slow_me_down def shrink_share(self, share, new_size, share_server=None): """Shrinks size of existing share.""" def get_network_allocations_number(self): """Returns number of network allocations for creating VIFs.""" return 2 def get_admin_network_allocations_number(self): return 1 @slow_me_down def _setup_server(self, network_info, metadata=None): """Sets up and configures share server with given network parameters. Redefine it within share driver when it is going to handle share servers. """ common_net_info = network_info[0] server_details = { "service_ip": common_net_info[ "admin_network_allocations"][0]["ip_address"], "username": "fake_username", "server_id": common_net_info['server_id'], } subnet_allocations = [] for subnet_info in network_info: subnet_allocations.append({ "primary_public_ip": subnet_info[ "network_allocations"][0]["ip_address"], "secondary_public_ip": subnet_info[ "network_allocations"][1]["ip_address"] }) server_details['subnet_allocations'] = jsonutils.dumps( subnet_allocations) return server_details @slow_me_down def _teardown_server(self, server_details, security_services=None): """Tears down share server.""" @slow_me_down def _get_pools_info(self): pools = [{ "pool_name": "fake_pool_for_%s" % self.backend_name, "total_capacity_gb": 1230.0, "free_capacity_gb": 1210.0, "reserved_percentage": self.configuration.reserved_share_percentage, "reserved_snapshot_percentage": self.configuration.reserved_share_from_snapshot_percentage, "reserved_share_extend_percentage": self.configuration.reserved_share_extend_percentage }] if self.configuration.replication_domain: pools[0]["replication_type"] = "readable" return pools @slow_me_down def _update_share_stats(self, data=None): """Retrieve stats info from share group.""" data = { "share_backend_name": self.backend_name, "storage_protocol": "NFS_CIFS", "reserved_percentage": self.configuration.reserved_share_percentage, "reserved_snapshot_percentage": self.configuration.reserved_share_from_snapshot_percentage, "reserved_share_extend_percentage": self.configuration.reserved_share_extend_percentage, "snapshot_support": True, "create_share_from_snapshot_support": True, "revert_to_snapshot_support": True, "mount_snapshot_support": True, "driver_name": "Dummy", "pools": self._get_pools_info(), "share_group_stats": { "consistent_snapshot_support": "pool", }, 'share_server_multiple_subnet_support': True, 'mount_point_name_support': True, } if self.configuration.replication_domain: data["replication_type"] = "readable" super(DummyDriver, self)._update_share_stats(data) def get_share_server_pools(self, share_server): """Return list of pools related to a particular share server.""" return [] @slow_me_down def create_consistency_group(self, context, cg_dict, share_server=None): """Create a consistency group.""" LOG.debug( "Successfully created dummy Consistency Group with ID: %s.", cg_dict["id"]) @slow_me_down def delete_consistency_group(self, context, cg_dict, share_server=None): """Delete a consistency group.""" LOG.debug( "Successfully deleted dummy consistency group with ID %s.", cg_dict["id"]) @slow_me_down def create_cgsnapshot(self, context, snap_dict, share_server=None): """Create a consistency group snapshot.""" LOG.debug("Successfully created CG snapshot %s.", snap_dict["id"]) return None, None @slow_me_down def delete_cgsnapshot(self, context, snap_dict, share_server=None): """Delete a consistency group snapshot.""" LOG.debug("Successfully deleted CG snapshot %s.", snap_dict["id"]) return None, None @slow_me_down def create_consistency_group_from_cgsnapshot( self, context, cg_dict, cgsnapshot_dict, share_server=None): """Create a consistency group from a cgsnapshot.""" LOG.debug( ("Successfully created dummy Consistency Group (%(cg_id)s) " "from CG snapshot (%(cg_snap_id)s)."), {"cg_id": cg_dict["id"], "cg_snap_id": cgsnapshot_dict["id"]}) return None, [] @slow_me_down def create_replica(self, context, replica_list, new_replica, access_rules, replica_snapshots, share_server=None): """Replicate the active replica to a new replica on this backend.""" replica_name = self._get_share_name(new_replica) mountpoint = "/path/to/fake/share/%s" % replica_name self.private_storage.update( new_replica["id"], { "fake_provider_replica_name": replica_name, "fake_provider_location": mountpoint, } ) return { "export_locations": self._generate_export_locations( mountpoint, share_server=share_server), "replica_state": constants.REPLICA_STATE_IN_SYNC, "access_rules_status": constants.STATUS_ACTIVE, } @slow_me_down def delete_replica(self, context, replica_list, replica_snapshots, replica, share_server=None): """Delete a replica.""" self.private_storage.delete(replica["id"]) @slow_me_down def promote_replica(self, context, replica_list, replica, access_rules, share_server=None, quiesce_wait_time=None): """Promote a replica to 'active' replica state.""" return_replica_list = [] for r in replica_list: if r["id"] == replica["id"]: replica_state = constants.REPLICA_STATE_ACTIVE else: replica_state = constants.REPLICA_STATE_IN_SYNC return_replica_list.append( {"id": r["id"], "replica_state": replica_state}) return return_replica_list @slow_me_down def update_replica_state(self, context, replica_list, replica, access_rules, replica_snapshots, share_server=None): """Update the replica_state of a replica.""" return constants.REPLICA_STATE_IN_SYNC @slow_me_down def create_replicated_snapshot(self, context, replica_list, replica_snapshots, share_server=None): """Create a snapshot on active instance and update across the replicas. """ return_replica_snapshots = [] for r in replica_snapshots: return_replica_snapshots.append( {"id": r["id"], "status": constants.STATUS_AVAILABLE}) return return_replica_snapshots @slow_me_down def revert_to_replicated_snapshot(self, context, active_replica, replica_list, active_replica_snapshot, replica_snapshots, share_access_rules, snapshot_access_rules, share_server=None): """Reverts a replicated share (in place) to the specified snapshot.""" @slow_me_down def delete_replicated_snapshot(self, context, replica_list, replica_snapshots, share_server=None): """Delete a snapshot by deleting its instances across the replicas.""" return_replica_snapshots = [] for r in replica_snapshots: return_replica_snapshots.append( {"id": r["id"], "status": constants.STATUS_DELETED}) return return_replica_snapshots @slow_me_down def update_replicated_snapshot(self, context, replica_list, share_replica, replica_snapshots, replica_snapshot, share_server=None): """Update the status of a snapshot instance that lives on a replica.""" return { "id": replica_snapshot["id"], "status": constants.STATUS_AVAILABLE} @slow_me_down def migration_check_compatibility( self, context, source_share, destination_share, share_server=None, destination_share_server=None): """Is called to test compatibility with destination backend.""" backend_name = share_utils.extract_host( destination_share['host'], level='backend_name') config = get_backend_configuration(backend_name) compatible = 'Dummy' in config.share_driver return { 'compatible': compatible, 'writable': compatible, 'preserve_metadata': compatible, 'nondisruptive': False, 'preserve_snapshots': compatible, } @slow_me_down def migration_start( self, context, source_share, destination_share, source_snapshots, snapshot_mappings, share_server=None, destination_share_server=None): """Is called to perform 1st phase of driver migration of a given share. """ LOG.debug( "Migration of dummy share with ID '%s' has been started.", source_share["id"]) self.migration_progress[source_share['share_id']] = 0 @slow_me_down def migration_continue( self, context, source_share, destination_share, source_snapshots, snapshot_mappings, share_server=None, destination_share_server=None): if source_share["id"] not in self.migration_progress: self.migration_progress[source_share["id"]] = 0 self.migration_progress[source_share["id"]] += 50 LOG.debug( "Migration of dummy share with ID '%s' is continuing, %s.", source_share["id"], self.migration_progress[source_share["id"]]) return self.migration_progress[source_share["id"]] == 100 @slow_me_down def migration_complete( self, context, source_share, destination_share, source_snapshots, snapshot_mappings, share_server=None, destination_share_server=None): """Is called to perform 2nd phase of driver migration of a given share. """ snapshot_updates = {} for src_snap_ins, dest_snap_ins in snapshot_mappings.items(): snapshot_updates[dest_snap_ins['id']] = self._create_snapshot( dest_snap_ins) return { 'snapshot_updates': snapshot_updates, 'export_locations': self._do_migration( source_share, destination_share, share_server) } def _do_migration(self, source_share_ref, dest_share_ref, share_server): share_name = self._get_share_name(dest_share_ref) mountpoint = "/path/to/fake/share/%s" % share_name self.private_storage.delete(source_share_ref["id"]) self.private_storage.update( dest_share_ref["id"], { "fake_provider_share_name": share_name, "fake_provider_location": mountpoint, } ) LOG.debug( "Migration of dummy share with ID '%s' has been completed.", source_share_ref["id"]) self.migration_progress.pop(source_share_ref["id"], None) return self._generate_export_locations( mountpoint, share_server=share_server) @slow_me_down def migration_cancel( self, context, source_share, destination_share, source_snapshots, snapshot_mappings, share_server=None, destination_share_server=None): """Is called to cancel driver migration.""" LOG.debug( "Migration of dummy share with ID '%s' has been canceled.", source_share["id"]) self.migration_progress.pop(source_share["id"], None) @slow_me_down def migration_get_progress( self, context, source_share, destination_share, source_snapshots, snapshot_mappings, share_server=None, destination_share_server=None): """Is called to get migration progress.""" # Simulate migration progress. if source_share["id"] not in self.migration_progress: self.migration_progress[source_share["id"]] = 0 total_progress = self.migration_progress[source_share["id"]] LOG.debug("Progress of current dummy share migration " "with ID '%(id)s' is %(progress)s.", { "id": source_share["id"], "progress": total_progress }) return {"total_progress": total_progress} def share_server_migration_check_compatibility( self, context, share_server, dest_host, old_share_network, new_share_network, shares_request_spec): """Is called to check migration compatibility for a share server.""" backend_name = share_utils.extract_host( dest_host, level='backend_name') config = get_backend_configuration(backend_name) compatible = 'Dummy' in config.share_driver return { 'compatible': compatible, 'writable': compatible, 'preserve_snapshots': compatible, 'nondisruptive': False, 'share_network_id': new_share_network['id'], 'migration_cancel': compatible, 'migration_get_progress': compatible, } @slow_me_down def share_server_migration_start(self, context, src_share_server, dest_share_server, shares, snapshots): """Is called to perform 1st phase of migration of a share server.""" LOG.debug( "Migration of dummy share server with ID '%s' has been started.", src_share_server["id"]) self.migration_progress[src_share_server['id']] = 0 @slow_me_down def share_server_migration_continue(self, context, src_share_server, dest_share_server, shares, snapshots): """Is called to continue the migration of a share server.""" if src_share_server["id"] not in self.migration_progress: self.migration_progress[src_share_server["id"]] = 0 self.migration_progress[src_share_server["id"]] += 50 LOG.debug( "Migration of dummy share server with ID '%s' is continuing, %s.", src_share_server["id"], self.migration_progress[src_share_server["id"]]) return self.migration_progress[src_share_server["id"]] >= 100 @slow_me_down def share_server_migration_complete(self, context, source_share_server, dest_share_server, shares, snapshots, new_network_allocations): """Is called to complete the migration of a share server.""" shares_updates = {} pools = self._get_pools_info() for instance in shares: share_name = self._get_share_name(instance) mountpoint = "/path/to/fake/share/%s" % share_name export_locations = self._generate_export_locations( mountpoint, share_server=dest_share_server) dest_pool = pools[0]['pool_name'] shares_updates.update( {instance['id']: {'export_locations': export_locations, 'pool_name': dest_pool}} ) snapshot_updates = {} for instance in snapshots: snapshot_name = self._get_snapshot_name(instance) mountpoint = "/path/to/fake/snapshot/%s" % snapshot_name snap_export_locations = self._generate_export_locations( mountpoint, share_server=dest_share_server) snapshot_updates.update( {instance['id']: { 'provider_location': mountpoint, 'export_locations': snap_export_locations}} ) LOG.debug( "Migration of dummy share server with ID '%s' has been completed.", source_share_server["id"]) self.migration_progress.pop(source_share_server["id"], None) return { 'share_updates': shares_updates, 'snapshot_updates': snapshot_updates, } @slow_me_down def share_server_migration_cancel(self, context, src_share_server, dest_share_server, shares, snapshots): """Is called to cancel a share server migration.""" LOG.debug( "Migration of dummy share server with ID '%s' has been canceled.", src_share_server["id"]) self.migration_progress.pop(src_share_server["id"], None) @slow_me_down def share_server_migration_get_progress(self, context, src_share_server, dest_share_server, shares, snapshots): """Is called to get share server migration progress.""" if src_share_server["id"] not in self.migration_progress: self.migration_progress[src_share_server["id"]] = 0 total_progress = self.migration_progress[src_share_server["id"]] LOG.debug("Progress of current dummy share server migration " "with ID '%(id)s' is %(progress)s.", { "id": src_share_server["id"], "progress": total_progress }) return {"total_progress": total_progress} def update_share_usage_size(self, context, shares): share_updates = [] gathered_at = timeutils.utcnow() for s in shares: share_updates.append({'id': s['id'], 'used_size': 1, 'gathered_at': gathered_at}) return share_updates @slow_me_down def get_share_server_network_info( self, context, share_server, identifier, driver_options): try: server_details = self.private_storage.get(identifier) except Exception: msg = ("Unable to find share server %s in " "private storage." % identifier) raise exception.ShareBackendException(msg=msg) ips = [server_details['service_ip']] subnet_allocations = ( self._get_subnet_allocations_from_backend_details(server_details)) for subnet_allocation in subnet_allocations: ips += list(subnet_allocation.values()) return ips @slow_me_down def manage_server(self, context, share_server, identifier, driver_options): server_details = self.private_storage.get(identifier) self.private_storage.delete(identifier) return identifier, server_details def unmanage_server(self, server_details, security_services=None): server_details = server_details or {} if not server_details or 'server_id' not in server_details: # This share server doesn't have any network details. Since it's # just being cleaned up, we'll log a warning and return without # errors. LOG.warning("Share server does not have network information. " "It is being unmanaged, but cannot be re-managed " "without first creating network allocations in this " "driver's private storage.") return self.private_storage.update(server_details['server_id'], server_details) def get_share_status(self, share, share_server=None): return { 'status': constants.STATUS_AVAILABLE, 'export_locations': self.private_storage.get(share['id'], key='export_location') } @slow_me_down def update_share_server_security_service(self, context, share_server, network_info, share_instances, share_instance_rules, new_security_service, current_security_service=None): if current_security_service: msg = _("Replacing security service %(cur_sec_serv_id)s by " "security service %(new_sec_serv_id)s on share server " "%(server_id)s." ) % { 'cur_sec_serv_id': current_security_service['id'], 'new_sec_serv_id': new_security_service['id'], 'server_id': share_server['id'] } else: msg = _("Adding security service %(sec_serv_id)s on share server " "%(server_id)s." ) % { 'sec_serv_id': new_security_service['id'], 'server_id': share_server['id'] } LOG.debug(msg) def check_update_share_server_security_service( self, context, share_server, network_info, share_instances, share_instance_rules, new_security_service, current_security_service=None): return True def check_update_share_server_network_allocations( self, context, share_server, current_network_allocations, new_share_network_subnet, security_services, share_instances, share_instances_rules): LOG.debug("Share server %(server)s can be updated with allocations " "from new subnet.", {'server': share_server['id']}) return True def update_share_server_network_allocations( self, context, share_server, current_network_allocations, new_network_allocations, security_services, shares, snapshots): backend_details = share_server['backend_details'] subnet_allocations = ( self._get_subnet_allocations_from_backend_details(backend_details)) subnet_allocations.append({ 'primary_public_ip': new_network_allocations[ 'network_allocations'][0]['ip_address'], 'secondary_public_ip': new_network_allocations[ 'network_allocations'][1]['ip_address'], }) new_server = { "backend_details": { "subnet_allocations": jsonutils.dumps(subnet_allocations), "service_ip": backend_details["service_ip"], } } shares_updates = {} for instance in shares: share_name = self._get_share_name(instance) mountpoint = "/path/to/fake/share/%s" % share_name export_locations = self._generate_export_locations( mountpoint, share_server=new_server) shares_updates.update( {instance['id']: export_locations} ) snapshot_updates = {} for instance in snapshots: snapshot_name = self._get_snapshot_name(instance) mountpoint = "/path/to/fake/snapshot/%s" % snapshot_name snap_export_locations = self._generate_export_locations( mountpoint, share_server=new_server) snapshot_updates.update( {instance['id']: { 'provider_location': mountpoint, 'export_locations': snap_export_locations}} ) LOG.debug( "Network update allocations of dummy share server with ID '%s' " "has been completed.", share_server["id"]) return { "share_updates": shares_updates, "snapshot_updates": snapshot_updates, "server_details": { "subnet_allocations": ( new_server["backend_details"]["subnet_allocations"]) }, } @slow_me_down def create_backup(self, context, share_instance, backup, share_server=None): LOG.debug("Created backup %(backup)s of share %(share)s " "using dummy driver.", {'backup': backup['id'], 'share': share_instance['share_id']}) def create_backup_continue(self, context, share_instance, backup, share_server=None): LOG.debug("Continue backup %(backup)s of share %(share)s " "using dummy driver.", {'backup': backup['id'], 'share': share_instance['share_id']}) return {'total_progress': '100'} def delete_backup(self, context, backup, share_instance, share_server=None): LOG.debug("Deleted backup '%s' using dummy driver.", backup['id']) @slow_me_down def restore_backup(self, context, backup, share_instance, share_server=None): LOG.debug("Restored backup %(backup)s into share %(share)s " "using dummy driver.", {'backup': backup['id'], 'share': share_instance['share_id']}) def restore_backup_continue(self, context, backup, share_instance, share_server=None): LOG.debug("Continue restore of backup %(backup)s into share " "%(share)s using dummy driver.", {'backup': backup['id'], 'share': share_instance['share_id']}) return {'total_progress': '100'} def update_share_from_metadata(self, context, share_instance, metadata, share_server=None): LOG.debug("Updated share %(share)s. Metadata %(metadata)s " "applied successfully.", {'share': share_instance['share_id'], 'metadata': metadata}) @slow_me_down def update_share_network_subnet_from_metadata(self, context, share_network, share_network_subnet, share_servers, metadata): LOG.debug("Updated share network subnet %(sn_sub)s. Metadata " "%(metadata)s applied successfully.", {'sn_sub': share_network_subnet['id'], 'metadata': metadata}) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315602.0136704 manila-21.0.0/manila/tests/share/drivers/ganesha/0000775000175000017500000000000000000000000021647 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/ganesha/__init__.py0000664000175000017500000000000000000000000023746 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/ganesha/test_manager.py0000664000175000017500000013755700000000000024714 0ustar00zuulzuul00000000000000# Copyright (c) 2014 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import io import re from unittest import mock import ddt from oslo_serialization import jsonutils from manila import exception from manila.share.drivers.ganesha import manager from manila import test from manila import utils test_export_id = 101 test_name = 'fakefile' test_path = '/fakedir0/export.d/fakefile.conf' test_tmp_path = '/fakedir0/export.d/fakefile.conf.RANDOM' test_ganesha_cnf = """EXPORT { Export_Id = 101; CLIENT { Clients = ip1; Access_Level = ro; } CLIENT { Clients = ip2; Access_Level = rw; } }""" test_dict_unicode = { u'EXPORT': { u'Export_Id': 101, u'CLIENT': [ {u'Clients': u"ip1", u'Access_Level': u'ro'}, {u'Clients': u"ip2", u'Access_Level': u'rw'}] } } test_dict_str = { 'EXPORT': { 'Export_Id': 101, 'CLIENT': [ {'Clients': 'ip1', 'Access_Level': 'ro'}, {'Clients': 'ip2', 'Access_Level': 'rw'}] } } manager_fake_kwargs = { 'ganesha_config_path': '/fakedir0/fakeconfig', 'ganesha_db_path': '/fakedir1/fake.db', 'ganesha_export_dir': '/fakedir0/export.d', 'ganesha_service_name': 'ganesha.fakeservice' } class MockRadosModule(object): """Mocked up version of Ceph's RADOS module.""" class ObjectNotFound(Exception): pass class OSError(Exception): pass class WriteOpCtx(): def __enter__(self): return self def __exit__(self, type, msg, traceback): pass def write_full(self, bytes_to_write): pass @ddt.ddt class MiscTests(test.TestCase): @ddt.data({'import_exc': None}, {'import_exc': ImportError}) @ddt.unpack def test_setup_rados(self, import_exc): manager.rados = None with mock.patch.object( manager.importutils, 'import_module', side_effect=import_exc) as mock_import_module: if import_exc: self.assertRaises( exception.ShareBackendException, manager.setup_rados) else: manager.setup_rados() self.assertEqual(mock_import_module.return_value, manager.rados) mock_import_module.assert_called_once_with('rados') class GaneshaConfigTests(test.TestCase): """Tests Ganesha config file format convertor functions.""" ref_ganesha_cnf = """EXPORT { CLIENT { Clients = ip1; Access_Level = "ro"; } CLIENT { Clients = ip2; Access_Level = "rw"; } Export_Id = 101; }""" @staticmethod def conf_mangle(*confs): """A "mangler" for the conf format. Its purpose is to transform conf data in a way so that semantically equivalent confs yield identical results. Besides this objective criteria, we seek a good trade-off between the following requirements: - low lossiness; - low code complexity. """ def _conf_mangle(conf): # split to expressions by the delimiter ";" # (braces are forced to be treated as expressions # by sandwiching them in ";"-s) conf = re.sub(r'[{}]', r';\g<0>;', conf).split(';') # whitespace-split expressions to tokens with # (equality is forced to be treated as token by # sandwiching in space) conf = map(lambda line: line.replace("=", " = ").split(), conf) # get rid of by-product empty lists (derived from superflouous # ";"-s that might have crept in due to "sandwiching") conf = map(lambda x: x, conf) # handle the non-deterministic order of confs conf = list(conf) conf.sort() return conf return (_conf_mangle(conf) for conf in confs) def test_conf2json(self): test_ganesha_cnf_with_comment = """EXPORT { # fake_export_block Export_Id = 101; CLIENT { Clients = ip1; } }""" result_dict_unicode = { u'EXPORT': { u'CLIENT': {u'Clients': u'ip1'}, u'Export_Id': 101 } } ret = manager._conf2json(test_ganesha_cnf_with_comment) self.assertEqual(result_dict_unicode, jsonutils.loads(ret)) def test_parseconf_ganesha_cnf_input(self): ret = manager.parseconf(test_ganesha_cnf) self.assertEqual(test_dict_unicode, ret) def test_parseconf_json_input(self): ret = manager.parseconf(jsonutils.dumps(test_dict_str)) self.assertEqual(test_dict_unicode, ret) def test_dump_to_conf(self): ganesha_cnf = io.StringIO() manager._dump_to_conf(test_dict_str, ganesha_cnf) self.assertEqual(*self.conf_mangle(self.ref_ganesha_cnf, ganesha_cnf.getvalue())) def test_mkconf(self): ganesha_cnf = manager.mkconf(test_dict_str) self.assertEqual(*self.conf_mangle(self.ref_ganesha_cnf, ganesha_cnf)) @ddt.ddt class GaneshaManagerTestCase(test.TestCase): """Tests GaneshaManager.""" def instantiate_ganesha_manager(self, *args, **kwargs): ganesha_rados_store_enable = kwargs.get('ganesha_rados_store_enable', False) if ganesha_rados_store_enable: with mock.patch.object( manager.GaneshaManager, '_get_rados_object') as self.mock_get_rados_object: return manager.GaneshaManager(*args, **kwargs) else: with mock.patch.object( manager.GaneshaManager, 'get_export_id', return_value=100) as self.mock_get_export_id: return manager.GaneshaManager(*args, **kwargs) def setUp(self): super(GaneshaManagerTestCase, self).setUp() self._execute = mock.Mock(return_value=('', '')) self._rados_client = mock.Mock() self._manager = self.instantiate_ganesha_manager( self._execute, 'faketag', rados_client=self._rados_client, **manager_fake_kwargs) self._setup_rados = mock.Mock() self._execute2 = mock.Mock(return_value=('', '')) self.mock_object(manager, 'rados', MockRadosModule) self.mock_object(manager, 'setup_rados', self._setup_rados) fake_kwargs = copy.copy(manager_fake_kwargs) fake_kwargs.update( ganesha_rados_store_enable=True, ganesha_rados_store_pool_name='fakepool', ganesha_rados_export_counter='fakecounter', ganesha_rados_export_index='fakeindex', rados_client=self._rados_client ) self._manager_with_rados_store = self.instantiate_ganesha_manager( self._execute2, 'faketag', **fake_kwargs) self.mock_object(utils, 'synchronized', mock.Mock(return_value=lambda f: f)) def test_init(self): self.mock_object(self._manager, 'reset_exports') self.mock_object(self._manager, 'restart_service') self.assertEqual('/fakedir0/fakeconfig', self._manager.ganesha_config_path) self.assertEqual('faketag', self._manager.tag) self.assertEqual('/fakedir0/export.d', self._manager.ganesha_export_dir) self.assertEqual('/fakedir1/fake.db', self._manager.ganesha_db_path) self.assertEqual('ganesha.fakeservice', self._manager.ganesha_service) self.assertEqual( [mock.call('mkdir', '-p', self._manager.ganesha_export_dir), mock.call('mkdir', '-p', '/fakedir1'), mock.call('sqlite3', self._manager.ganesha_db_path, 'create table ganesha(key varchar(20) primary key, ' 'value int); insert into ganesha values("exportid", ' '100);', run_as_root=False, check_exit_code=False)], self._execute.call_args_list) self.mock_get_export_id.assert_called_once_with(bump=False) def test_init_execute_error_log_message(self): fake_args = ('foo', 'bar') def raise_exception(*args, **kwargs): if args == fake_args: raise exception.GaneshaCommandFailure() test_execute = mock.Mock(side_effect=raise_exception) self.mock_object(manager.LOG, 'error') test_manager = self.instantiate_ganesha_manager( test_execute, 'faketag', **manager_fake_kwargs) self.assertRaises( exception.GaneshaCommandFailure, test_manager.execute, *fake_args, message='fakemsg') manager.LOG.error.assert_called_once_with( mock.ANY, {'tag': 'faketag', 'msg': 'fakemsg'}) def test_init_execute_error_no_log_message(self): fake_args = ('foo', 'bar') def raise_exception(*args, **kwargs): if args == fake_args: raise exception.GaneshaCommandFailure() test_execute = mock.Mock(side_effect=raise_exception) self.mock_object(manager.LOG, 'error') test_manager = self.instantiate_ganesha_manager( test_execute, 'faketag', **manager_fake_kwargs) self.assertRaises( exception.GaneshaCommandFailure, test_manager.execute, *fake_args, message='fakemsg', makelog=False) self.assertFalse(manager.LOG.error.called) @ddt.data(False, True) def test_init_with_rados_store_and_export_counter_exists( self, counter_exists): fake_execute = mock.Mock(return_value=('', '')) fake_kwargs = copy.copy(manager_fake_kwargs) fake_kwargs.update( ganesha_rados_store_enable=True, ganesha_rados_store_pool_name='fakepool', ganesha_rados_export_counter='fakecounter', ganesha_rados_export_index='fakeindex', rados_client=self._rados_client ) if counter_exists: self.mock_object( manager.GaneshaManager, '_get_rados_object', mock.Mock()) else: self.mock_object( manager.GaneshaManager, '_get_rados_object', mock.Mock(side_effect=MockRadosModule.ObjectNotFound)) self.mock_object(manager.GaneshaManager, '_put_rados_object') test_mgr = manager.GaneshaManager( fake_execute, 'faketag', **fake_kwargs) self.assertEqual('/fakedir0/fakeconfig', test_mgr.ganesha_config_path) self.assertEqual('faketag', test_mgr.tag) self.assertEqual('/fakedir0/export.d', test_mgr.ganesha_export_dir) self.assertEqual('ganesha.fakeservice', test_mgr.ganesha_service) fake_execute.assert_called_once_with( 'mkdir', '-p', '/fakedir0/export.d') self.assertTrue(test_mgr.ganesha_rados_store_enable) self.assertEqual('fakepool', test_mgr.ganesha_rados_store_pool_name) self.assertEqual('fakecounter', test_mgr.ganesha_rados_export_counter) self.assertEqual('fakeindex', test_mgr.ganesha_rados_export_index) self.assertEqual(self._rados_client, test_mgr.rados_client) self._setup_rados.assert_called_with() test_mgr._get_rados_object.assert_called_once_with('fakecounter') if counter_exists: self.assertFalse(test_mgr._put_rados_object.called) else: test_mgr._put_rados_object.assert_called_once_with( 'fakecounter', str(1000)) def test_ganesha_export_dir(self): self.assertEqual( '/fakedir0/export.d', self._manager.ganesha_export_dir) def test_getpath(self): self.assertEqual( '/fakedir0/export.d/fakefile.conf', self._manager._getpath('fakefile')) def test_get_export_rados_object_name(self): self.assertEqual( 'ganesha-export-fakeobj', self._manager._get_export_rados_object_name('fakeobj')) def test_write_tmp_conf_file(self): self.mock_object(manager.shlex, 'quote', mock.Mock(side_effect=['fakedata', test_tmp_path])) test_args = [ ('mktemp', '-p', '/fakedir0/export.d', '-t', 'fakefile.conf.XXXXXX'), ('sh', '-c', 'echo fakedata > %s' % test_tmp_path)] test_kwargs = { 'message': 'writing %s' % test_tmp_path } def return_tmpfile(*args, **kwargs): if args == test_args[0]: return (test_tmp_path + '\n', '') self.mock_object(self._manager, 'execute', mock.Mock(side_effect=return_tmpfile)) ret = self._manager._write_tmp_conf_file(test_path, 'fakedata') self._manager.execute.assert_has_calls([ mock.call(*test_args[0]), mock.call(*test_args[1], **test_kwargs)]) manager.shlex.quote.assert_has_calls([ mock.call('fakedata'), mock.call(test_tmp_path)]) self.assertEqual(test_tmp_path, ret) @ddt.data(True, False) def test_write_conf_file_with_mv_error(self, mv_error): test_data = 'fakedata' test_args = [ ('mv', test_tmp_path, test_path), ('rm', test_tmp_path)] self.mock_object(self._manager, '_getpath', mock.Mock(return_value=test_path)) self.mock_object(self._manager, '_write_tmp_conf_file', mock.Mock(return_value=test_tmp_path)) def mock_return(*args, **kwargs): if args == test_args[0]: if mv_error: raise exception.ProcessExecutionError() else: return ('', '') self.mock_object(self._manager, 'execute', mock.Mock(side_effect=mock_return)) if mv_error: self.assertRaises( exception.ProcessExecutionError, self._manager._write_conf_file, test_name, test_data) else: ret = self._manager._write_conf_file(test_name, test_data) self._manager._getpath.assert_called_once_with(test_name) self._manager._write_tmp_conf_file.assert_called_once_with( test_path, test_data) if mv_error: self._manager.execute.assert_has_calls([ mock.call(*test_args[0]), mock.call(*test_args[1])]) else: self._manager.execute.assert_has_calls([ mock.call(*test_args[0])]) self.assertEqual(test_path, ret) def test_mkindex(self): test_ls_output = 'INDEX.conf\nfakefile.conf\nfakefile.txt' test_index = '%include /fakedir0/export.d/fakefile.conf\n' self.mock_object(self._manager, 'execute', mock.Mock(return_value=(test_ls_output, ''))) self.mock_object(self._manager, '_write_conf_file') ret = self._manager._mkindex() self._manager.execute.assert_called_once_with( 'ls', '/fakedir0/export.d', run_as_root=False) self._manager._write_conf_file.assert_called_once_with( 'INDEX', test_index) self.assertIsNone(ret) def test_read_export_rados_object(self): self.mock_object(self._manager_with_rados_store, '_get_export_rados_object_name', mock.Mock(return_value='fakeobj')) self.mock_object(self._manager_with_rados_store, '_get_rados_object', mock.Mock(return_value=test_ganesha_cnf)) self.mock_object(manager, 'parseconf', mock.Mock(return_value=test_dict_unicode)) ret = self._manager_with_rados_store._read_export_rados_object( test_name) (self._manager_with_rados_store._get_export_rados_object_name. assert_called_once_with(test_name)) (self._manager_with_rados_store._get_rados_object. assert_called_once_with('fakeobj')) manager.parseconf.assert_called_once_with(test_ganesha_cnf) self.assertEqual(test_dict_unicode, ret) def test_read_export_file(self): test_args = ('cat', test_path) test_kwargs = {'message': 'reading export fakefile'} self.mock_object(self._manager, '_getpath', mock.Mock(return_value=test_path)) self.mock_object(self._manager, 'execute', mock.Mock(return_value=(test_ganesha_cnf,))) self.mock_object(manager, 'parseconf', mock.Mock(return_value=test_dict_unicode)) ret = self._manager._read_export_file(test_name) self._manager._getpath.assert_called_once_with(test_name) self._manager.execute.assert_called_once_with( *test_args, **test_kwargs) manager.parseconf.assert_called_once_with(test_ganesha_cnf) self.assertEqual(test_dict_unicode, ret) @ddt.data(False, True) def test_read_export_with_rados_store(self, rados_store_enable): self._manager.ganesha_rados_store_enable = rados_store_enable self.mock_object(self._manager, '_read_export_file', mock.Mock(return_value=test_dict_unicode)) self.mock_object(self._manager, '_read_export_rados_object', mock.Mock(return_value=test_dict_unicode)) ret = self._manager._read_export(test_name) if rados_store_enable: self._manager._read_export_rados_object.assert_called_once_with( test_name) self.assertFalse(self._manager._read_export_file.called) else: self._manager._read_export_file.assert_called_once_with(test_name) self.assertFalse(self._manager._read_export_rados_object.called) self.assertEqual(test_dict_unicode, ret) @ddt.data(True, False) def test_check_export_rados_object_exists(self, exists): self.mock_object( self._manager_with_rados_store, '_get_export_rados_object_name', mock.Mock(return_value='fakeobj')) if exists: self.mock_object( self._manager_with_rados_store, '_get_rados_object') else: self.mock_object( self._manager_with_rados_store, '_get_rados_object', mock.Mock(side_effect=MockRadosModule.ObjectNotFound)) ret = self._manager_with_rados_store._check_export_rados_object_exists( test_name) (self._manager_with_rados_store._get_export_rados_object_name. assert_called_once_with(test_name)) (self._manager_with_rados_store._get_rados_object. assert_called_once_with('fakeobj')) if exists: self.assertTrue(ret) else: self.assertFalse(ret) def test_check_file_exists(self): self.mock_object(self._manager, 'execute', mock.Mock(return_value=(test_ganesha_cnf,))) ret = self._manager._check_file_exists(test_path) self._manager.execute.assert_called_once_with( 'test', '-f', test_path, makelog=False, run_as_root=False) self.assertTrue(ret) @ddt.data(1, 4) def test_check_file_exists_error(self, exit_code): self.mock_object( self._manager, 'execute', mock.Mock(side_effect=exception.GaneshaCommandFailure( exit_code=exit_code)) ) if exit_code == 1: ret = self._manager._check_file_exists(test_path) self.assertFalse(ret) else: self.assertRaises(exception.GaneshaCommandFailure, self._manager._check_file_exists, test_path) self._manager.execute.assert_called_once_with( 'test', '-f', test_path, makelog=False, run_as_root=False) def test_check_export_file_exists(self): self.mock_object(self._manager, '_getpath', mock.Mock(return_value=test_path)) self.mock_object(self._manager, '_check_file_exists', mock.Mock(return_value=True)) ret = self._manager._check_export_file_exists(test_name) self._manager._getpath.assert_called_once_with(test_name) self._manager._check_file_exists.assert_called_once_with(test_path) self.assertTrue(ret) @ddt.data(False, True) def test_check_export_exists_with_rados_store(self, rados_store_enable): self._manager.ganesha_rados_store_enable = rados_store_enable self.mock_object(self._manager, '_check_export_file_exists', mock.Mock(return_value=True)) self.mock_object(self._manager, '_check_export_rados_object_exists', mock.Mock(return_value=True)) ret = self._manager.check_export_exists(test_name) if rados_store_enable: (self._manager._check_export_rados_object_exists. assert_called_once_with(test_name)) self.assertFalse(self._manager._check_export_file_exists.called) else: self._manager._check_export_file_exists.assert_called_once_with( test_name) self.assertFalse( self._manager._check_export_rados_object_exists.called) self.assertTrue(ret) def test_write_export_rados_object(self): self.mock_object(self._manager, '_get_export_rados_object_name', mock.Mock(return_value='fakeobj')) self.mock_object(self._manager, '_put_rados_object') self.mock_object(self._manager, '_getpath', mock.Mock(return_value=test_path)) self.mock_object(self._manager, '_write_tmp_conf_file', mock.Mock(return_value=test_tmp_path)) ret = self._manager._write_export_rados_object(test_name, 'fakedata') self._manager._get_export_rados_object_name.assert_called_once_with( test_name) self._manager._put_rados_object.assert_called_once_with( 'fakeobj', 'fakedata') self._manager._getpath.assert_called_once_with(test_name) self._manager._write_tmp_conf_file.assert_called_once_with( test_path, 'fakedata') self.assertEqual(test_tmp_path, ret) @ddt.data(True, False) def test_write_export_with_rados_store(self, rados_store_enable): self._manager.ganesha_rados_store_enable = rados_store_enable self.mock_object(manager, 'mkconf', mock.Mock(return_value=test_ganesha_cnf)) self.mock_object(self._manager, '_write_conf_file', mock.Mock(return_value=test_path)) self.mock_object(self._manager, '_write_export_rados_object', mock.Mock(return_value=test_path)) ret = self._manager._write_export(test_name, test_dict_str) manager.mkconf.assert_called_once_with(test_dict_str) if rados_store_enable: self._manager._write_export_rados_object.assert_called_once_with( test_name, test_ganesha_cnf) self.assertFalse(self._manager._write_conf_file.called) else: self._manager._write_conf_file.assert_called_once_with( test_name, test_ganesha_cnf) self.assertFalse(self._manager._write_export_rados_object.called) self.assertEqual(test_path, ret) def test_write_export_error_incomplete_export_block(self): test_errordict = { u'EXPORT': { u'Export_Id': '@config', u'CLIENT': {u'Clients': u"'ip1','ip2'"} } } self.mock_object(manager, 'mkconf', mock.Mock(return_value=test_ganesha_cnf)) self.mock_object(self._manager, '_write_conf_file', mock.Mock(return_value=test_path)) self.assertRaises(exception.InvalidParameterValue, self._manager._write_export, test_name, test_errordict) self.assertFalse(manager.mkconf.called) self.assertFalse(self._manager._write_conf_file.called) def test_rm_file(self): self.mock_object(self._manager, 'execute', mock.Mock(return_value=('', ''))) ret = self._manager._rm_export_file(test_name) self._manager.execute.assert_called_once_with('rm', '-f', test_path) self.assertIsNone(ret) def test_rm_export_file(self): self.mock_object(self._manager, '_getpath', mock.Mock(return_value=test_path)) self.mock_object(self._manager, '_rm_file') ret = self._manager._rm_export_file(test_name) self._manager._getpath.assert_called_once_with(test_name) self._manager._rm_file.assert_called_once_with(test_path) self.assertIsNone(ret) def test_rm_export_rados_object(self): self.mock_object(self._manager_with_rados_store, '_get_export_rados_object_name', mock.Mock(return_value='fakeobj')) self.mock_object(self._manager_with_rados_store, '_delete_rados_object') ret = self._manager_with_rados_store._rm_export_rados_object( test_name) (self._manager_with_rados_store._get_export_rados_object_name. assert_called_once_with(test_name)) (self._manager_with_rados_store._delete_rados_object. assert_called_once_with('fakeobj')) self.assertIsNone(ret) def test_dbus_send_ganesha(self): test_args = ('arg1', 'arg2') test_kwargs = {'key': 'value'} self.mock_object(self._manager, 'execute', mock.Mock(return_value=('', ''))) ret = self._manager._dbus_send_ganesha('fakemethod', *test_args, **test_kwargs) self._manager.execute.assert_called_once_with( 'dbus-send', '--print-reply', '--system', '--dest=org.ganesha.nfsd', '/org/ganesha/nfsd/ExportMgr', 'org.ganesha.nfsd.exportmgr.fakemethod', *test_args, message='dbus call exportmgr.fakemethod', **test_kwargs) self.assertIsNone(ret) def test_remove_export_dbus(self): self.mock_object(self._manager, '_dbus_send_ganesha') ret = self._manager._remove_export_dbus(test_export_id) self._manager._dbus_send_ganesha.assert_called_once_with( 'RemoveExport', 'uint16:101') self.assertIsNone(ret) @ddt.data('', '%url rados://fakepool/fakeobj2') def test_add_rados_object_url_to_index_with_index_data( self, index_data): self.mock_object( self._manager_with_rados_store, '_get_rados_object', mock.Mock(return_value=index_data)) self.mock_object( self._manager_with_rados_store, '_get_export_rados_object_name', mock.Mock(return_value='fakeobj1')) self.mock_object( self._manager_with_rados_store, '_put_rados_object') ret = (self._manager_with_rados_store. _add_rados_object_url_to_index('fakename')) (self._manager_with_rados_store._get_rados_object. assert_called_once_with('fakeindex')) (self._manager_with_rados_store._get_export_rados_object_name. assert_called_once_with('fakename')) if index_data: urls = ('%url rados://fakepool/fakeobj2\n' '%url rados://fakepool/fakeobj1') else: urls = '%url rados://fakepool/fakeobj1' (self._manager_with_rados_store._put_rados_object. assert_called_once_with('fakeindex', urls)) self.assertIsNone(ret) @ddt.data('', '%url rados://fakepool/fakeobj1\n' '%url rados://fakepool/fakeobj2') def test_remove_rados_object_url_from_index_with_index_data( self, index_data): self.mock_object( self._manager_with_rados_store, '_get_rados_object', mock.Mock(return_value=index_data)) self.mock_object( self._manager_with_rados_store, '_get_export_rados_object_name', mock.Mock(return_value='fakeobj1')) self.mock_object( self._manager_with_rados_store, '_put_rados_object') ret = (self._manager_with_rados_store. _remove_rados_object_url_from_index('fakename')) if index_data: (self._manager_with_rados_store._get_rados_object. assert_called_once_with('fakeindex')) (self._manager_with_rados_store._get_export_rados_object_name. assert_called_once_with('fakename')) urls = '%url rados://fakepool/fakeobj2' (self._manager_with_rados_store._put_rados_object. assert_called_once_with('fakeindex', urls)) else: (self._manager_with_rados_store._get_rados_object. assert_called_once_with('fakeindex')) self.assertFalse(self._manager_with_rados_store. _get_export_rados_object_name.called) self.assertFalse(self._manager_with_rados_store. _put_rados_object.called) self.assertIsNone(ret) @ddt.data(False, True) def test_add_export_with_rados_store(self, rados_store_enable): self._manager.ganesha_rados_store_enable = rados_store_enable self.mock_object(self._manager, '_write_export', mock.Mock(return_value=test_path)) self.mock_object(self._manager, '_dbus_send_ganesha') self.mock_object(self._manager, '_rm_file') self.mock_object(self._manager, '_add_rados_object_url_to_index') self.mock_object(self._manager, '_mkindex') ret = self._manager.add_export(test_name, test_dict_str) self._manager._write_export.assert_called_once_with( test_name, test_dict_str) self._manager._dbus_send_ganesha.assert_called_once_with( 'AddExport', 'string:' + test_path, 'string:EXPORT(Export_Id=101)') if rados_store_enable: self._manager._rm_file.assert_called_once_with(test_path) self._manager._add_rados_object_url_to_index(test_name) self.assertFalse(self._manager._mkindex.called) else: self._manager._mkindex.assert_called_once_with() self.assertFalse(self._manager._rm_file.called) self.assertFalse( self._manager._add_rados_object_url_to_index.called) self.assertIsNone(ret) def test_add_export_error_during_mkindex(self): self.mock_object(self._manager, '_write_export', mock.Mock(return_value=test_path)) self.mock_object(self._manager, '_dbus_send_ganesha') self.mock_object( self._manager, '_mkindex', mock.Mock(side_effect=exception.GaneshaCommandFailure)) self.mock_object(self._manager, '_rm_export_file') self.mock_object(self._manager, '_remove_export_dbus') self.assertRaises(exception.GaneshaCommandFailure, self._manager.add_export, test_name, test_dict_str) self._manager._write_export.assert_called_once_with( test_name, test_dict_str) self._manager._dbus_send_ganesha.assert_called_once_with( 'AddExport', 'string:' + test_path, 'string:EXPORT(Export_Id=101)') self._manager._mkindex.assert_called_once_with() self._manager._rm_export_file.assert_called_once_with(test_name) self._manager._remove_export_dbus.assert_called_once_with( test_export_id) @ddt.data(True, False) def test_add_export_error_during_write_export_with_rados_store( self, rados_store_enable): self._manager.ganesha_rados_store_enable = rados_store_enable self.mock_object( self._manager, '_write_export', mock.Mock(side_effect=exception.GaneshaCommandFailure)) self.mock_object(self._manager, '_mkindex') self.assertRaises(exception.GaneshaCommandFailure, self._manager.add_export, test_name, test_dict_str) self._manager._write_export.assert_called_once_with( test_name, test_dict_str) if rados_store_enable: self.assertFalse(self._manager._mkindex.called) else: self._manager._mkindex.assert_called_once_with() @ddt.data(True, False) def test_add_export_error_during_dbus_send_ganesha_with_rados_store( self, rados_store_enable): self._manager.ganesha_rados_store_enable = rados_store_enable self.mock_object(self._manager, '_write_export', mock.Mock(return_value=test_path)) self.mock_object( self._manager, '_dbus_send_ganesha', mock.Mock(side_effect=exception.GaneshaCommandFailure)) self.mock_object(self._manager, '_mkindex') self.mock_object(self._manager, '_rm_export_file') self.mock_object(self._manager, '_rm_export_rados_object') self.mock_object(self._manager, '_rm_file') self.mock_object(self._manager, '_remove_export_dbus') self.assertRaises(exception.GaneshaCommandFailure, self._manager.add_export, test_name, test_dict_str) self._manager._write_export.assert_called_once_with( test_name, test_dict_str) self._manager._dbus_send_ganesha.assert_called_once_with( 'AddExport', 'string:' + test_path, 'string:EXPORT(Export_Id=101)') if rados_store_enable: self._manager._rm_export_rados_object.assert_called_once_with( test_name) self._manager._rm_file.assert_called_once_with(test_path) self.assertFalse(self._manager._rm_export_file.called) self.assertFalse(self._manager._mkindex.called) else: self._manager._rm_export_file.assert_called_once_with(test_name) self._manager._mkindex.assert_called_once_with() self.assertFalse(self._manager._rm_export_rados_object.called) self.assertFalse(self._manager._rm_file.called) self.assertFalse(self._manager._remove_export_dbus.called) @ddt.data(True, False) def test_update_export_with_rados_store(self, rados_store_enable): self._manager.ganesha_rados_store_enable = rados_store_enable confdict = { 'EXPORT': { 'Export_Id': 101, 'CLIENT': {'Clients': 'ip1', 'Access_Level': 'ro'}, } } self.mock_object(self._manager, '_read_export', mock.Mock(return_value=test_dict_unicode)) self.mock_object(self._manager, '_write_export', mock.Mock(return_value=test_path)) self.mock_object(self._manager, '_dbus_send_ganesha') self.mock_object(self._manager, '_rm_file') self._manager.update_export(test_name, confdict) self._manager._read_export.assert_called_once_with(test_name) self._manager._write_export.assert_called_once_with(test_name, confdict) self._manager._dbus_send_ganesha.assert_called_once_with( 'UpdateExport', 'string:' + test_path, 'string:EXPORT(Export_Id=101)') if rados_store_enable: self._manager._rm_file.assert_called_once_with(test_path) else: self.assertFalse(self._manager._rm_file.called) @ddt.data(True, False) def test_update_export_error_with_rados_store(self, rados_store_enable): self._manager.ganesha_rados_store_enable = rados_store_enable confdict = { 'EXPORT': { 'Export_Id': 101, 'CLIENT': {'Clients': 'ip1', 'Access_Level': 'ro'}, } } self.mock_object(self._manager, '_read_export', mock.Mock(return_value=test_dict_unicode)) self.mock_object(self._manager, '_write_export', mock.Mock(return_value=test_path)) self.mock_object( self._manager, '_dbus_send_ganesha', mock.Mock(side_effect=exception.GaneshaCommandFailure)) self.mock_object(self._manager, '_rm_file') self.assertRaises(exception.GaneshaCommandFailure, self._manager.update_export, test_name, confdict) self._manager._read_export.assert_called_once_with(test_name) self._manager._write_export.assert_has_calls([ mock.call(test_name, confdict), mock.call(test_name, test_dict_unicode)]) self._manager._dbus_send_ganesha.assert_called_once_with( 'UpdateExport', 'string:' + test_path, 'string:EXPORT(Export_Id=101)') if rados_store_enable: self._manager._rm_file.assert_called_once_with(test_path) else: self.assertFalse(self._manager._rm_file.called) @ddt.data(True, False) def test_remove_export_with_rados_store(self, rados_store_enable): self._manager.ganesha_rados_store_enable = rados_store_enable self.mock_object(self._manager, '_read_export', mock.Mock(return_value=test_dict_unicode)) self.mock_object(self._manager, '_get_export_rados_object_name', mock.Mock(return_value='fakeobj')) methods = ('_remove_export_dbus', '_rm_export_file', '_mkindex', '_remove_rados_object_url_from_index', '_delete_rados_object') for method in methods: self.mock_object(self._manager, method) ret = self._manager.remove_export(test_name) self._manager._read_export.assert_called_once_with(test_name) self._manager._remove_export_dbus.assert_called_once_with( test_dict_unicode['EXPORT']['Export_Id']) if rados_store_enable: (self._manager._get_export_rados_object_name. assert_called_once_with(test_name)) self._manager._delete_rados_object.assert_called_once_with( 'fakeobj') (self._manager._remove_rados_object_url_from_index. assert_called_once_with(test_name)) self.assertFalse(self._manager._rm_export_file.called) self.assertFalse(self._manager._mkindex.called) else: self._manager._rm_export_file.assert_called_once_with(test_name) self._manager._mkindex.assert_called_once_with() self.assertFalse( self._manager._get_export_rados_object_name.called) self.assertFalse(self._manager._delete_rados_object.called) self.assertFalse( self._manager._remove_rados_object_url_from_index.called) self.assertIsNone(ret) @ddt.data(True, False) def test_remove_export_error_during_read_export_with_rados_store( self, rados_store_enable): self._manager.ganesha_rados_store_enable = rados_store_enable self.mock_object( self._manager, '_read_export', mock.Mock(side_effect=exception.GaneshaCommandFailure)) self.mock_object(self._manager, '_get_export_rados_object_name', mock.Mock(return_value='fakeobj')) methods = ('_remove_export_dbus', '_rm_export_file', '_mkindex', '_remove_rados_object_url_from_index', '_delete_rados_object') for method in methods: self.mock_object(self._manager, method) ret = self._manager.remove_export(test_name) self._manager._read_export.assert_called_once_with(test_name) self.assertFalse(self._manager._remove_export_dbus.called) if rados_store_enable: (self._manager._get_export_rados_object_name. assert_called_once_with(test_name)) self._manager._delete_rados_object.assert_called_once_with( 'fakeobj') (self._manager._remove_rados_object_url_from_index. assert_called_once_with(test_name)) self.assertFalse(self._manager._rm_export_file.called) self.assertFalse(self._manager._mkindex.called) else: self._manager._rm_export_file.assert_called_once_with(test_name) self._manager._mkindex.assert_called_once_with() self.assertFalse( self._manager._get_export_rados_object_name.called) self.assertFalse(self._manager._delete_rados_object.called) self.assertFalse( self._manager._remove_rados_object_url_from_index.called) self.assertIsNone(ret) @ddt.data(True, False) def test_remove_export_error_during_remove_export_dbus_with_rados_store( self, rados_store_enable): self._manager.ganesha_rados_store_enable = rados_store_enable self.mock_object(self._manager, '_read_export', mock.Mock(return_value=test_dict_unicode)) self.mock_object(self._manager, '_get_export_rados_object_name', mock.Mock(return_value='fakeobj')) self.mock_object( self._manager, '_remove_export_dbus', mock.Mock(side_effect=exception.GaneshaCommandFailure)) methods = ('_rm_export_file', '_mkindex', '_remove_rados_object_url_from_index', '_delete_rados_object') for method in methods: self.mock_object(self._manager, method) ret = self._manager.remove_export(test_name) self._manager._read_export.assert_called_once_with(test_name) self._manager._remove_export_dbus.assert_called_once_with( test_dict_unicode['EXPORT']['Export_Id']) if rados_store_enable: (self._manager._get_export_rados_object_name. assert_called_once_with(test_name)) self._manager._delete_rados_object.assert_called_once_with( 'fakeobj') (self._manager._remove_rados_object_url_from_index. assert_called_once_with(test_name)) self.assertFalse(self._manager._rm_export_file.called) self.assertFalse(self._manager._mkindex.called) else: self._manager._rm_export_file.assert_called_once_with(test_name) self._manager._mkindex.assert_called_once_with() self.assertFalse( self._manager._get_export_rados_object_name.called) self.assertFalse(self._manager._delete_rados_object.called) self.assertFalse( self._manager._remove_rados_object_url_from_index.called) self.assertIsNone(ret) def test_get_rados_object(self): fakebin = chr(246).encode('utf-8') ioctx = mock.Mock() ioctx.read.side_effect = [fakebin, fakebin] self._rados_client.open_ioctx = mock.Mock(return_value=ioctx) self._rados_client.conf_get = mock.Mock(return_value=256) max_size = 256 * 1024 * 1024 ret = self._manager_with_rados_store._get_rados_object('fakeobj') self._rados_client.open_ioctx.assert_called_once_with('fakepool') self._rados_client.conf_get.assert_called_once_with( 'osd_max_write_size') ioctx.read.assert_called_once_with('fakeobj', max_size) ioctx.close.assert_called_once() self.assertEqual(fakebin.decode('utf-8'), ret) def test_put_rados_object(self): faketext = chr(246) ioctx = mock.Mock() manager.rados.WriteOpCtx.write_full = mock.Mock() self._rados_client.open_ioctx = mock.Mock(return_value=ioctx) self._rados_client.conf_get = mock.Mock(return_value=256) ret = self._manager_with_rados_store._put_rados_object( 'fakeobj', faketext) self._rados_client.open_ioctx.assert_called_once_with('fakepool') self._rados_client.conf_get.assert_called_once_with( 'osd_max_write_size') manager.rados.WriteOpCtx.write_full.assert_called_once_with( faketext.encode('utf-8')) ioctx.operate_write_op.assert_called_once_with(mock.ANY, 'fakeobj') self.assertIsNone(ret) def test_delete_rados_object(self): ioctx = mock.Mock() self._rados_client.open_ioctx = mock.Mock(return_value=ioctx) ret = self._manager_with_rados_store._delete_rados_object('fakeobj') self._rados_client.open_ioctx.assert_called_once_with('fakepool') ioctx.remove_object.assert_called_once_with('fakeobj') ioctx.close.assert_called_once() self.assertIsNone(ret) def test_get_export_id(self): self.mock_object(self._manager, 'execute', mock.Mock(return_value=('exportid|101', ''))) ret = self._manager.get_export_id() self._manager.execute.assert_called_once_with( 'sqlite3', self._manager.ganesha_db_path, 'update ganesha set value = value + 1;' 'select * from ganesha where key = "exportid";', run_as_root=False) self.assertEqual(101, ret) def test_get_export_id_nobump(self): self.mock_object(self._manager, 'execute', mock.Mock(return_value=('exportid|101', ''))) ret = self._manager.get_export_id(bump=False) self._manager.execute.assert_called_once_with( 'sqlite3', self._manager.ganesha_db_path, 'select * from ganesha where key = "exportid";', run_as_root=False) self.assertEqual(101, ret) def test_get_export_id_error_invalid_export_db(self): self.mock_object(self._manager, 'execute', mock.Mock(return_value=('invalid', ''))) self.mock_object(manager.LOG, 'error') self.assertRaises(exception.InvalidSqliteDB, self._manager.get_export_id) manager.LOG.error.assert_called_once_with( mock.ANY, mock.ANY) self._manager.execute.assert_called_once_with( 'sqlite3', self._manager.ganesha_db_path, 'update ganesha set value = value + 1;' 'select * from ganesha where key = "exportid";', run_as_root=False) @ddt.data(True, False) def test_get_export_id_with_rados_store_and_bump(self, bump): self.mock_object(self._manager_with_rados_store, '_get_rados_object', mock.Mock(return_value='1000')) self.mock_object(self._manager_with_rados_store, '_put_rados_object') ret = self._manager_with_rados_store.get_export_id(bump=bump) if bump: (self._manager_with_rados_store._get_rados_object. assert_called_once_with('fakecounter')) (self._manager_with_rados_store._put_rados_object. assert_called_once_with('fakecounter', '1001')) self.assertEqual(1001, ret) else: (self._manager_with_rados_store._get_rados_object. assert_called_once_with('fakecounter')) self.assertFalse( self._manager_with_rados_store._put_rados_object.called) self.assertEqual(1000, ret) def test_restart_service(self): self.mock_object(self._manager, 'execute') ret = self._manager.restart_service() self._manager.execute.assert_called_once_with( 'service', 'ganesha.fakeservice', 'restart') self.assertIsNone(ret) def test_reset_exports(self): self.mock_object(self._manager, 'execute') self.mock_object(self._manager, '_mkindex') ret = self._manager.reset_exports() self._manager.execute.assert_called_once_with( 'sh', '-c', 'rm -f /fakedir0/export.d/*.conf') self._manager._mkindex.assert_called_once_with() self.assertIsNone(ret) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/ganesha/test_utils.py0000664000175000017500000001246400000000000024427 0ustar00zuulzuul00000000000000# Copyright (c) 2014 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from unittest import mock import ddt from manila import exception from manila.share.drivers.ganesha import utils as ganesha_utils from manila import test from manila.tests import fake_share patch_test_dict1 = {'a': 1, 'b': {'c': 2}, 'd': 3, 'e': 4} patch_test_dict2 = {'a': 11, 'b': {'f': 5}, 'd': {'g': 6}} patch_test_dict3 = {'b': {'c': 22, 'h': {'i': 7}}, 'e': None} patch_test_dict_result = { 'a': 11, 'b': {'c': 22, 'f': 5, 'h': {'i': 7}}, 'd': {'g': 6}, 'e': None, } walk_test_dict = {'a': {'b': {'c': {'d': {'e': 'f'}}}}} walk_test_list = [('e', 'f')] def fake_access(kwargs): fake_access_rule = fake_share.fake_access(**kwargs) fake_access_rule.to_dict = lambda: fake_access_rule.values return fake_access_rule @ddt.ddt class GaneshaUtilsTests(test.TestCase): """Tests Ganesha utility functions.""" def test_patch(self): ret = ganesha_utils.patch(patch_test_dict1, patch_test_dict2, patch_test_dict3) self.assertEqual(patch_test_dict_result, ret) def test_walk(self): ret = [elem for elem in ganesha_utils.walk(walk_test_dict)] self.assertEqual(walk_test_list, ret) def test_path_from(self): self.mock_object(os.path, 'abspath', lambda path: os.path.join('/foo/bar', path)) ret = ganesha_utils.path_from('baz.py', '../quux', 'tic/tac/toe') self.assertEqual('/foo/quux/tic/tac/toe', os.path.normpath(ret)) @ddt.data({'rule': {'access_type': 'ip', 'access_level': 'ro', 'access_to': '10.10.10.12'}, 'kwargs': {'abort': True}}, {'rule': {'access_type': 'cert', 'access_level': 'ro', 'access_to': 'some-CN'}, 'kwargs': {'abort': False}}, {'rule': {'access_type': 'ip', 'access_level': 'rw', 'access_to': '10.10.10.12'}, 'kwargs': {}}) @ddt.unpack def test_get_valid_access_rules(self, rule, kwargs): supported = ['ip', 'ro'] ret = ganesha_utils.validate_access_rule( *([[a] for a in supported] + [fake_access(rule)]), **kwargs) self.assertEqual( [rule['access_' + k] for k in ['type', 'level']] == supported, ret) @ddt.data({'rule': {'access_type': 'cert', 'access_level': 'ro', 'access_to': 'some-CN'}, 'trouble': exception.InvalidShareAccess}, {'rule': {'access_type': 'ip', 'access_level': 'rw', 'access_to': '10.10.10.12'}, 'trouble': exception.InvalidShareAccessLevel}) @ddt.unpack def test_get_valid_access_rules_fail(self, rule, trouble): self.assertRaises(trouble, ganesha_utils.validate_access_rule, ['ip'], ['ro'], fake_access(rule), abort=True) @ddt.data({'rule': {'access_type': 'ip', 'access_level': 'rw', 'access_to': '10.10.10.12'}, 'result': {'access_type': 'ip', 'access_level': 'rw', 'access_to': '10.10.10.12'}, }, {'rule': {'access_type': 'ip', 'access_level': 'rw', 'access_to': '0.0.0.0/0'}, 'result': {'access_type': 'ip', 'access_level': 'rw', 'access_to': '0.0.0.0'}, }, ) @ddt.unpack def test_fixup_access_rules(self, rule, result): self.assertEqual(result, ganesha_utils.fixup_access_rule(rule)) @ddt.ddt class SSHExecutorTestCase(test.TestCase): """Tests SSHExecutor.""" @ddt.data({'run_as_root': True, 'expected_prefix': 'sudo '}, {'run_as_root': False, 'expected_prefix': ''}) @ddt.unpack def test_call_ssh_exec_object_with_run_as_root( self, run_as_root, expected_prefix): with mock.patch.object(ganesha_utils.ssh_utils, 'SSHPool'): self.execute = ganesha_utils.SSHExecutor() fake_ssh_object = mock.Mock() self.mock_object(self.execute.pool, 'get', mock.Mock(return_value=fake_ssh_object)) self.mock_object(ganesha_utils.processutils, 'ssh_execute', mock.Mock(return_value=('', ''))) ret = self.execute('ls', run_as_root=run_as_root) self.assertEqual(('', ''), ret) self.execute.pool.get.assert_called_once_with() ganesha_utils.processutils.ssh_execute.assert_called_once_with( fake_ssh_object, expected_prefix + 'ls') ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315602.0176704 manila-21.0.0/manila/tests/share/drivers/glusterfs/0000775000175000017500000000000000000000000022257 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/glusterfs/__init__.py0000664000175000017500000000000000000000000024356 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/glusterfs/test_common.py0000664000175000017500000010425700000000000025171 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Test cases for GlusterFS common routines.""" from unittest import mock import ddt from oslo_config import cfg from manila import exception from manila.privsep import os as privsep_os from manila.share.drivers.glusterfs import common from manila import test from manila.tests import fake_utils CONF = cfg.CONF fake_gluster_manager_attrs = { 'export': '127.0.0.1:/testvol', 'host': '127.0.0.1', 'qualified': 'testuser@127.0.0.1:/testvol', 'user': 'testuser', 'volume': 'testvol', 'path_to_private_key': '/fakepath/to/privatekey', 'remote_server_password': 'fakepassword', } fake_args = ('foo', 'bar') fake_kwargs = {'key1': 'value1', 'key2': 'value2'} fake_path_to_private_key = '/fakepath/to/privatekey' fake_remote_server_password = 'fakepassword' NFS_EXPORT_DIR = 'nfs.export-dir' fakehost = 'example.com' fakevol = 'testvol' fakeexport = ':/'.join((fakehost, fakevol)) fakemnt = '/mnt/glusterfs' @ddt.ddt class GlusterManagerTestCase(test.TestCase): """Tests GlusterManager.""" def setUp(self): super(GlusterManagerTestCase, self).setUp() self.fake_execf = mock.Mock() self.fake_executor = mock.Mock(return_value=('', '')) with mock.patch.object(common.GlusterManager, 'make_gluster_call', return_value=self.fake_executor): self._gluster_manager = common.GlusterManager( 'testuser@127.0.0.1:/testvol', self.fake_execf, fake_path_to_private_key, fake_remote_server_password) fake_gluster_manager_dict = { 'host': '127.0.0.1', 'user': 'testuser', 'volume': 'testvol' } self._gluster_manager_dict = common.GlusterManager( fake_gluster_manager_dict, self.fake_execf, fake_path_to_private_key, fake_remote_server_password) self._gluster_manager_array = [self._gluster_manager, self._gluster_manager_dict] def test_check_volume_presence(self): common._check_volume_presence(mock.Mock())(self._gluster_manager) def test_check_volume_presence_error(self): gmgr = common.GlusterManager('testuser@127.0.0.1') self.assertRaises( exception.GlusterfsException, common._check_volume_presence(mock.Mock()), gmgr) def test_volxml_get(self): xmlout = mock.Mock() value = mock.Mock() value.text = 'foobar' xmlout.find = mock.Mock(return_value=value) ret = common.volxml_get(xmlout, 'some/path') self.assertEqual('foobar', ret) @ddt.data(None, 'some-value') def test_volxml_get_notfound_fallback(self, default): xmlout = mock.Mock() xmlout.find = mock.Mock(return_value=None) ret = common.volxml_get(xmlout, 'some/path', default=default) self.assertEqual(default, ret) def test_volxml_get_multiple(self): xmlout = mock.Mock() value = mock.Mock() value.text = 'foobar' xmlout.find = mock.Mock(side_effect=(None, value)) ret = common.volxml_get(xmlout, 'some/path', 'better/path') self.assertEqual('foobar', ret) def test_volxml_get_notfound(self): xmlout = mock.Mock() xmlout.find = mock.Mock(return_value=None) self.assertRaises(exception.InvalidShare, common.volxml_get, xmlout, 'some/path') def test_gluster_manager_common_init(self): for gmgr in self._gluster_manager_array: self.assertEqual( fake_gluster_manager_attrs['user'], gmgr.user) self.assertEqual( fake_gluster_manager_attrs['host'], gmgr.host) self.assertEqual( fake_gluster_manager_attrs['volume'], gmgr.volume) self.assertEqual( fake_gluster_manager_attrs['qualified'], gmgr.qualified) self.assertEqual( fake_gluster_manager_attrs['export'], gmgr.export) self.assertEqual( fake_gluster_manager_attrs['path_to_private_key'], gmgr.path_to_private_key) self.assertEqual( fake_gluster_manager_attrs['remote_server_password'], gmgr.remote_server_password) self.assertEqual( self.fake_executor, gmgr.gluster_call) @ddt.data({'user': 'testuser', 'host': '127.0.0.1', 'volume': 'testvol', 'path': None}, {'user': None, 'host': '127.0.0.1', 'volume': 'testvol', 'path': '/testpath'}, {'user': None, 'host': '127.0.0.1', 'volume': 'testvol', 'path': None}, {'user': None, 'host': '127.0.0.1', 'volume': None, 'path': None}, {'user': 'testuser', 'host': '127.0.0.1', 'volume': None, 'path': None}, {'user': 'testuser', 'host': '127.0.0.1', 'volume': 'testvol', 'path': '/testpath'}) def test_gluster_manager_init_check(self, test_addr_dict): test_gluster_manager = common.GlusterManager( test_addr_dict, self.fake_execf) self.assertEqual(test_addr_dict, test_gluster_manager.components) @ddt.data(None, True) def test_gluster_manager_init_has_vol(self, has_volume): test_gluster_manager = common.GlusterManager( 'testuser@127.0.0.1:/testvol', self.fake_execf, requires={'volume': has_volume}) self.assertEqual('testvol', test_gluster_manager.volume) @ddt.data(None, True) def test_gluster_manager_dict_init_has_vol(self, has_volume): test_addr_dict = {'user': 'testuser', 'host': '127.0.0.1', 'volume': 'testvol', 'path': '/testdir'} test_gluster_manager = common.GlusterManager( test_addr_dict, self.fake_execf, requires={'volume': has_volume}) self.assertEqual('testvol', test_gluster_manager.volume) @ddt.data(None, False) def test_gluster_manager_init_no_vol(self, has_volume): test_gluster_manager = common.GlusterManager( 'testuser@127.0.0.1', self.fake_execf, requires={'volume': has_volume}) self.assertIsNone(test_gluster_manager.volume) @ddt.data(None, False) def test_gluster_manager_dict_init_no_vol(self, has_volume): test_addr_dict = {'user': 'testuser', 'host': '127.0.0.1'} test_gluster_manager = common.GlusterManager( test_addr_dict, self.fake_execf, requires={'volume': has_volume}) self.assertIsNone(test_gluster_manager.volume) def test_gluster_manager_init_has_shouldnt_have_vol(self): self.assertRaises(exception.GlusterfsException, common.GlusterManager, 'testuser@127.0.0.1:/testvol', self.fake_execf, requires={'volume': False}) def test_gluster_manager_dict_init_has_shouldnt_have_vol(self): test_addr_dict = {'user': 'testuser', 'host': '127.0.0.1', 'volume': 'testvol'} self.assertRaises(exception.GlusterfsException, common.GlusterManager, test_addr_dict, self.fake_execf, requires={'volume': False}) def test_gluster_manager_hasnt_should_have_vol(self): self.assertRaises(exception.GlusterfsException, common.GlusterManager, 'testuser@127.0.0.1', self.fake_execf, requires={'volume': True}) def test_gluster_manager_dict_hasnt_should_have_vol(self): test_addr_dict = {'user': 'testuser', 'host': '127.0.0.1'} self.assertRaises(exception.GlusterfsException, common.GlusterManager, test_addr_dict, self.fake_execf, requires={'volume': True}) def test_gluster_manager_invalid(self): self.assertRaises(exception.GlusterfsException, common.GlusterManager, '127.0.0.1:vol', 'self.fake_execf') def test_gluster_manager_dict_invalid_req_host(self): test_addr_dict = {'user': 'testuser', 'volume': 'testvol'} self.assertRaises(exception.GlusterfsException, common.GlusterManager, test_addr_dict, 'self.fake_execf') @ddt.data({'user': 'testuser'}, {'host': 'johndoe@example.com'}, {'host': 'example.com/so', 'volume': 'me/path'}, {'user': 'user@error', 'host': "example.com", 'volume': 'vol'}, {'host': 'example.com', 'volume': 'vol', 'pith': '/path'}, {'host': 'example.com', 'path': '/path'}, {'user': 'user@error', 'host': "example.com", 'path': '/path'}) def test_gluster_manager_dict_invalid_input(self, test_addr_dict): self.assertRaises(exception.GlusterfsException, common.GlusterManager, test_addr_dict, 'self.fake_execf') def test_gluster_manager_getattr(self): self.assertEqual('testvol', self._gluster_manager.volume) def test_gluster_manager_getattr_called(self): class FakeGlusterManager(common.GlusterManager): pass _gluster_manager = FakeGlusterManager('127.0.0.1:/testvol', self.fake_execf) FakeGlusterManager.__getattr__ = mock.Mock() _gluster_manager.volume _gluster_manager.__getattr__.assert_called_once_with('volume') def test_gluster_manager_getattr_noattr(self): self.assertRaises(AttributeError, getattr, self._gluster_manager, 'fakeprop') @ddt.data({'mockargs': {}, 'kwargs': {}}, {'mockargs': {'side_effect': exception.ProcessExecutionError}, 'kwargs': {'error_policy': 'suppress'}}, {'mockargs': { 'side_effect': exception.ProcessExecutionError(exit_code=2)}, 'kwargs': {'error_policy': (2,)}}) @ddt.unpack def test_gluster_manager_make_gluster_call_local(self, mockargs, kwargs): fake_obj = mock.Mock(**mockargs) fake_execute = mock.Mock() kwargs.update(fake_kwargs) with mock.patch.object(common.ganesha_utils, 'RootExecutor', mock.Mock(return_value=fake_obj)): gluster_manager = common.GlusterManager( '127.0.0.1:/testvol', self.fake_execf) gluster_manager.make_gluster_call(fake_execute)(*fake_args, **kwargs) common.ganesha_utils.RootExecutor.assert_called_with( fake_execute) fake_obj.assert_called_once_with( *(('gluster',) + fake_args), **fake_kwargs) def test_gluster_manager_make_gluster_call_remote(self): fake_obj = mock.Mock() fake_execute = mock.Mock() with mock.patch.object(common.ganesha_utils, 'SSHExecutor', mock.Mock(return_value=fake_obj)): gluster_manager = common.GlusterManager( 'testuser@127.0.0.1:/testvol', self.fake_execf, fake_path_to_private_key, fake_remote_server_password) gluster_manager.make_gluster_call(fake_execute)(*fake_args, **fake_kwargs) common.ganesha_utils.SSHExecutor.assert_called_with( gluster_manager.host, 22, None, gluster_manager.user, password=gluster_manager.remote_server_password, privatekey=gluster_manager.path_to_private_key) fake_obj.assert_called_once_with( *(('gluster',) + fake_args), **fake_kwargs) @ddt.data({'trouble': exception.ProcessExecutionError, '_exception': exception.GlusterfsException, 'xkw': {}}, {'trouble': exception.ProcessExecutionError(exit_code=2), '_exception': exception.GlusterfsException, 'xkw': {'error_policy': (1,)}}, {'trouble': exception.ProcessExecutionError, '_exception': exception.GlusterfsException, 'xkw': {'error_policy': 'coerce'}}, {'trouble': exception.ProcessExecutionError, '_exception': exception.ProcessExecutionError, 'xkw': {'error_policy': 'raw'}}, {'trouble': RuntimeError, '_exception': RuntimeError, 'xkw': {}}) @ddt.unpack def test_gluster_manager_make_gluster_call_error(self, trouble, _exception, xkw): fake_obj = mock.Mock(side_effect=trouble) fake_execute = mock.Mock() kwargs = fake_kwargs.copy() kwargs.update(xkw) with mock.patch.object(common.ganesha_utils, 'RootExecutor', mock.Mock(return_value=fake_obj)): gluster_manager = common.GlusterManager( '127.0.0.1:/testvol', self.fake_execf) self.assertRaises(_exception, gluster_manager.make_gluster_call(fake_execute), *fake_args, **kwargs) common.ganesha_utils.RootExecutor.assert_called_with( fake_execute) fake_obj.assert_called_once_with( *(('gluster',) + fake_args), **fake_kwargs) def test_gluster_manager_make_gluster_call_bad_policy(self): fake_obj = mock.Mock() fake_execute = mock.Mock() with mock.patch.object(common.ganesha_utils, 'RootExecutor', mock.Mock(return_value=fake_obj)): gluster_manager = common.GlusterManager( '127.0.0.1:/testvol', self.fake_execf) self.assertRaises(TypeError, gluster_manager.make_gluster_call(fake_execute), *fake_args, error_policy='foobar') @ddt.data({}, {'opErrstr': None}, {'opErrstr': 'error'}) def test_xml_response_check(self, xdict): fdict = {'opRet': '0', 'opErrno': '0', 'some/count': '1'} fdict.update(xdict) def vxget(x, e, *a): if a: return fdict.get(e, a[0]) else: return fdict[e] xtree = mock.Mock() command = ['volume', 'command', 'fake'] with mock.patch.object(common, 'volxml_get', side_effect=vxget): self._gluster_manager.xml_response_check(xtree, command, 'some/count') self.assertTrue(common.volxml_get.called) @ddt.data('1', '2') def test_xml_response_check_failure(self, count): fdict = {'opRet': '-1', 'opErrno': '0', 'some/count': count} def vxget(x, e, *a): if a: return fdict.get(e, a[0]) else: return fdict[e] xtree = mock.Mock() command = ['volume', 'command', 'fake'] with mock.patch.object(common, 'volxml_get', side_effect=vxget): self.assertRaises(exception.GlusterfsException, self._gluster_manager.xml_response_check, xtree, command, 'some/count') self.assertTrue(common.volxml_get.called) @ddt.data({'opRet': '-2', 'opErrno': '0', 'some/count': '1'}, {'opRet': '0', 'opErrno': '1', 'some/count': '1'}, {'opRet': '0', 'opErrno': '0', 'some/count': '0'}, {'opRet': '0', 'opErrno': '0', 'some/count': '2'}) def test_xml_response_check_invalid(self, fdict): def vxget(x, *e, **kw): if kw: return fdict.get(e[0], kw['default']) else: return fdict[e[0]] xtree = mock.Mock() command = ['volume', 'command', 'fake'] with mock.patch.object(common, 'volxml_get', side_effect=vxget): self.assertRaises(exception.InvalidShare, self._gluster_manager.xml_response_check, xtree, command, 'some/count') self.assertTrue(common.volxml_get.called) @ddt.data({'opRet': '0', 'opErrno': '0'}, {'opRet': '0', 'opErrno': '0', 'some/count': '2'}) def test_xml_response_check_count_ignored(self, fdict): def vxget(x, e, *a): if a: return fdict.get(e, a[0]) else: return fdict[e] xtree = mock.Mock() command = ['volume', 'command', 'fake'] with mock.patch.object(common, 'volxml_get', side_effect=vxget): self._gluster_manager.xml_response_check(xtree, command) self.assertTrue(common.volxml_get.called) def test_get_vol_option_via_info_empty_volinfo(self): args = ('--xml', 'volume', 'info', self._gluster_manager.volume) self.mock_object(self._gluster_manager, 'gluster_call', mock.Mock(return_value=('', {}))) self.assertRaises(exception.GlusterfsException, self._gluster_manager._get_vol_option_via_info, 'foobar') self._gluster_manager.gluster_call.assert_called_once_with( *args, log=mock.ANY) def test_get_vol_option_via_info_ambiguous_volinfo(self): def xml_output(*ignore_args, **ignore_kwargs): return """ 0 0 0 """, '' args = ('--xml', 'volume', 'info', self._gluster_manager.volume) self.mock_object(self._gluster_manager, 'gluster_call', mock.Mock(side_effect=xml_output)) self.assertRaises(exception.InvalidShare, self._gluster_manager._get_vol_option_via_info, 'foobar') self._gluster_manager.gluster_call.assert_called_once_with( *args, log=mock.ANY) def test_get_vol_option_via_info_trivial_volinfo(self): def xml_output(*ignore_args, **ignore_kwargs): return """ 0 0 1 """, '' args = ('--xml', 'volume', 'info', self._gluster_manager.volume) self.mock_object(self._gluster_manager, 'gluster_call', mock.Mock(side_effect=xml_output)) ret = self._gluster_manager._get_vol_option_via_info('foobar') self.assertIsNone(ret) self._gluster_manager.gluster_call.assert_called_once_with( *args, log=mock.ANY) def test_get_vol_option_via_info(self): def xml_output(*ignore_args, **ignore_kwargs): return """ 0 0 1 """, '' args = ('--xml', 'volume', 'info', self._gluster_manager.volume) self.mock_object(self._gluster_manager, 'gluster_call', mock.Mock(side_effect=xml_output)) ret = self._gluster_manager._get_vol_option_via_info('foobar') self.assertEqual('FIRE MONKEY!', ret) self._gluster_manager.gluster_call.assert_called_once_with( *args, log=mock.ANY) def test_get_vol_user_option(self): self.mock_object(self._gluster_manager, '_get_vol_option_via_info', mock.Mock(return_value='VALUE')) ret = self._gluster_manager._get_vol_user_option('OPT') self.assertEqual(ret, 'VALUE') (self._gluster_manager._get_vol_option_via_info. assert_called_once_with('user.OPT')) def test_get_vol_regular_option_empty_reponse(self): args = ('--xml', 'volume', 'get', self._gluster_manager.volume, NFS_EXPORT_DIR) self.mock_object(self._gluster_manager, 'gluster_call', mock.Mock(return_value=('', {}))) ret = self._gluster_manager._get_vol_regular_option(NFS_EXPORT_DIR) self.assertIsNone(ret) self._gluster_manager.gluster_call.assert_called_once_with( *args, check_exit_code=False) @ddt.data(0, 2) def test_get_vol_regular_option_ambiguous_volinfo(self, count): def xml_output(*ignore_args, **ignore_kwargs): return """ 0 0 %d """ % count, '' args = ('--xml', 'volume', 'get', self._gluster_manager.volume, NFS_EXPORT_DIR) self.mock_object(self._gluster_manager, 'gluster_call', mock.Mock(side_effect=xml_output)) self.assertRaises(exception.InvalidShare, self._gluster_manager._get_vol_regular_option, NFS_EXPORT_DIR) self._gluster_manager.gluster_call.assert_called_once_with( *args, check_exit_code=False) @ddt.data({'start': "", 'end': ""}, {'start': "", 'end': ""}) def test_get_vol_regular_option(self, extratag): def xml_output(*ignore_args, **ignore_kwargs): return """ 0 0 1 %(start)s /foo(10.0.0.1|10.0.0.2),/bar(10.0.0.1) %(end)s """ % extratag, '' args = ('--xml', 'volume', 'get', self._gluster_manager.volume, NFS_EXPORT_DIR) self.mock_object(self._gluster_manager, 'gluster_call', mock.Mock(side_effect=xml_output)) ret = self._gluster_manager._get_vol_regular_option(NFS_EXPORT_DIR) self.assertEqual('/foo(10.0.0.1|10.0.0.2),/bar(10.0.0.1)', ret) self._gluster_manager.gluster_call.assert_called_once_with( *args, check_exit_code=False) def test_get_vol_regular_option_not_suppored(self): args = ('--xml', 'volume', 'get', self._gluster_manager.volume, NFS_EXPORT_DIR) self.mock_object(self._gluster_manager, 'gluster_call', mock.Mock(return_value=( """Ceci n'est pas un XML.""", ''))) self.mock_object(self._gluster_manager, '_get_vol_option_via_info', mock.Mock(return_value="VALUE")) ret = self._gluster_manager._get_vol_regular_option(NFS_EXPORT_DIR) self.assertEqual("VALUE", ret) self._gluster_manager.gluster_call.assert_called_once_with( *args, check_exit_code=False) (self._gluster_manager._get_vol_option_via_info. assert_called_once_with(NFS_EXPORT_DIR)) @ddt.data({'opt': 'some.option', 'opttype': 'regular', 'lowopt': 'some.option'}, {'opt': 'user.param', 'opttype': 'user', 'lowopt': 'param'}) @ddt.unpack def test_get_vol_option(self, opt, opttype, lowopt): for t in ('user', 'regular'): self.mock_object(self._gluster_manager, '_get_vol_%s_option' % t, mock.Mock(return_value='value-%s' % t)) ret = self._gluster_manager.get_vol_option(opt) self.assertEqual('value-%s' % opttype, ret) for t in ('user', 'regular'): func = getattr(self._gluster_manager, '_get_vol_%s_option' % t) if opttype == t: func.assert_called_once_with(lowopt) else: self.assertFalse(func.called) def test_get_vol_option_unset(self): self.mock_object(self._gluster_manager, '_get_vol_regular_option', mock.Mock(return_value=None)) ret = self._gluster_manager.get_vol_option('some.option') self.assertIsNone(ret) @ddt.data({'value': '0', 'boolval': False}, {'value': 'Off', 'boolval': False}, {'value': 'no', 'boolval': False}, {'value': '1', 'boolval': True}, {'value': 'true', 'boolval': True}, {'value': 'enAble', 'boolval': True}, {'value': None, 'boolval': None}) @ddt.unpack def test_get_vol_option_boolean(self, value, boolval): self.mock_object(self._gluster_manager, '_get_vol_regular_option', mock.Mock(return_value=value)) ret = self._gluster_manager.get_vol_option('some.option', boolean=True) self.assertEqual(boolval, ret) def test_get_vol_option_boolean_bad(self): self.mock_object(self._gluster_manager, '_get_vol_regular_option', mock.Mock(return_value='jabberwocky')) self.assertRaises(exception.GlusterfsException, self._gluster_manager.get_vol_option, 'some.option', boolean=True) @ddt.data({'setting': 'some_value', 'args': ('set', 'some_value')}, {'setting': None, 'args': ('reset',)}, {'setting': True, 'args': ('set', 'ON')}, {'setting': False, 'args': ('set', 'OFF')}) @ddt.unpack def test_set_vol_option(self, setting, args): self.mock_object(self._gluster_manager, 'gluster_call', mock.Mock()) self._gluster_manager.set_vol_option('an_option', setting) self._gluster_manager.gluster_call.assert_called_once_with( 'volume', args[0], 'testvol', 'an_option', *args[1:], error_policy=mock.ANY) @mock.patch('tenacity.nap.sleep', mock.Mock()) @ddt.data({}, {'ignore_failure': False}) def test_set_vol_option_error(self, kwargs): fake_obj = mock.Mock( side_effect=exception.ProcessExecutionError(exit_code=1)) with mock.patch.object(common.ganesha_utils, 'RootExecutor', mock.Mock(return_value=fake_obj)): gluster_manager = common.GlusterManager( '127.0.0.1:/testvol', self.fake_execf) self.assertRaises(exception.GlusterfsException, gluster_manager.set_vol_option, 'an_option', "some_value", **kwargs) self.assertTrue(fake_obj.called) def test_set_vol_option_error_relaxed(self): fake_obj = mock.Mock( side_effect=exception.ProcessExecutionError(exit_code=1)) with mock.patch.object(common.ganesha_utils, 'RootExecutor', mock.Mock(return_value=fake_obj)): gluster_manager = common.GlusterManager( '127.0.0.1:/testvol', self.fake_execf) gluster_manager.set_vol_option('an_option', "some_value", ignore_failure=True) self.assertTrue(fake_obj.called) def test_get_gluster_version(self): self.mock_object(self._gluster_manager, 'gluster_call', mock.Mock(return_value=('glusterfs 3.6.2beta3', ''))) ret = self._gluster_manager.get_gluster_version() self.assertEqual(['3', '6', '2beta3'], ret) self._gluster_manager.gluster_call.assert_called_once_with( '--version', log=mock.ANY) @ddt.data("foo 1.1.1", "glusterfs 3-6", "glusterfs 3.6beta3") def test_get_gluster_version_exception(self, versinfo): self.mock_object(self._gluster_manager, 'gluster_call', mock.Mock(return_value=(versinfo, ''))) self.assertRaises(exception.GlusterfsException, self._gluster_manager.get_gluster_version) self._gluster_manager.gluster_call.assert_called_once_with( '--version', log=mock.ANY) def test_check_gluster_version(self): self.mock_object(self._gluster_manager, 'get_gluster_version', mock.Mock(return_value=('3', '6'))) ret = self._gluster_manager.check_gluster_version((3, 5, 2)) self.assertIsNone(ret) self._gluster_manager.get_gluster_version.assert_called_once_with() def test_check_gluster_version_unmet(self): self.mock_object(self._gluster_manager, 'get_gluster_version', mock.Mock(return_value=('3', '5', '2'))) self.assertRaises(exception.GlusterfsException, self._gluster_manager.check_gluster_version, (3, 6)) self._gluster_manager.get_gluster_version.assert_called_once_with() @ddt.data(('3', '6'), ('3', '6', '2beta'), ('3', '6', '2beta', '4')) def test_numreduct(self, vers): ret = common.numreduct(vers) self.assertEqual((3, 6), ret) @ddt.ddt class GlusterFSCommonTestCase(test.TestCase): """Tests common GlusterFS utility functions.""" def setUp(self): super(GlusterFSCommonTestCase, self).setUp() fake_utils.stub_out_utils_execute(self) self._execute = fake_utils.fake_execute self.addCleanup(fake_utils.fake_execute_set_repliers, []) self.addCleanup(fake_utils.fake_execute_clear_log) self.mock_object(common.GlusterManager, 'make_gluster_call') @staticmethod def _mount_exec(vol, mnt): return ['mkdir -p %s' % mnt] def test_mount_gluster_vol(self): expected_exec = self._mount_exec(fakeexport, fakemnt) self.mock_object(privsep_os, 'mount') ret = common._mount_gluster_vol(self._execute, fakeexport, fakemnt, False) privsep_os.mount.assert_called_once_with( fakeexport, fakemnt, mount_type='glusterfs') self.assertEqual(fake_utils.fake_execute_get_log(), expected_exec) self.assertIsNone(ret) def test_mount_gluster_vol_mounted_noensure(self): def exec_runner(*ignore_args, **ignore_kwargs): raise exception.ProcessExecutionError(stderr='already mounted') expected_exec = self._mount_exec(fakeexport, fakemnt) self.mock_object( privsep_os, 'mount', mock.Mock(side_effect=exec_runner)) self.assertRaises(exception.GlusterfsException, common._mount_gluster_vol, self._execute, fakeexport, fakemnt, False) privsep_os.mount.assert_called_once_with( fakeexport, fakemnt, mount_type='glusterfs') self.assertEqual(fake_utils.fake_execute_get_log(), expected_exec) def test_mount_gluster_vol_mounted_ensure(self): def exec_runner(*ignore_args, **ignore_kwargs): raise exception.ProcessExecutionError(stderr='already mounted') expected_exec = self._mount_exec(fakeexport, fakemnt) common.LOG.warning = mock.Mock() self.mock_object( privsep_os, 'mount', mock.Mock(side_effect=exec_runner)) ret = common._mount_gluster_vol(self._execute, fakeexport, fakemnt, True) self.assertEqual(fake_utils.fake_execute_get_log(), expected_exec) self.assertIsNone(ret) self.mock_object( privsep_os, 'mount', mock.Mock(side_effect=exec_runner)) common.LOG.warning.assert_called_with( "%s is already mounted.", fakeexport) @ddt.data(True, False) def test_mount_gluster_vol_fail(self, ensure): def exec_runner(*ignore_args, **ignore_kwargs): raise RuntimeError('fake error') expected_exec = self._mount_exec(fakeexport, fakemnt) self.mock_object( privsep_os, 'mount', mock.Mock(side_effect=exec_runner)) self.assertRaises( RuntimeError, common._mount_gluster_vol, self._execute, fakeexport, fakemnt, ensure) privsep_os.mount.assert_called_once_with( fakeexport, fakemnt, mount_type='glusterfs') self.assertEqual(fake_utils.fake_execute_get_log(), expected_exec) def test_umount_gluster_vol(self): self.mock_object(privsep_os, 'umount') ret = common._umount_gluster_vol(fakemnt) privsep_os.umount.assert_called_once_with(fakemnt) self.assertIsNone(ret) @ddt.data({'in_exc': exception.ProcessExecutionError, 'out_exc': exception.GlusterfsException}, {'in_exc': RuntimeError, 'out_exc': RuntimeError}) @ddt.unpack def test_umount_gluster_vol_fail(self, in_exc, out_exc): def exec_runner(*ignore_args, **ignore_kwargs): raise in_exc('fake error') self.mock_object(privsep_os, 'umount', mock.Mock(side_effect=exec_runner)) self.assertRaises(out_exc, common._umount_gluster_vol, fakemnt) privsep_os.umount.assert_called_once_with(fakemnt) def test_restart_gluster_vol(self): gmgr = common.GlusterManager(fakeexport, self._execute, None, None) test_args = [(('volume', 'stop', fakevol, '--mode=script'), {'log': mock.ANY}), (('volume', 'start', fakevol), {'log': mock.ANY})] common._restart_gluster_vol(gmgr) self.assertEqual( [mock.call(*arg[0], **arg[1]) for arg in test_args], gmgr.gluster_call.call_args_list) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/glusterfs/test_glusterfs_native.py0000664000175000017500000002626400000000000027266 0ustar00zuulzuul00000000000000# Copyright (c) 2014 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ GlusterFS native protocol (glusterfs) driver for shares. Test cases for GlusterFS native protocol driver. """ from unittest import mock import ddt from oslo_config import cfg from manila.common import constants from manila import context from manila import exception from manila.share import configuration as config from manila.share.drivers.glusterfs import common from manila.share.drivers.glusterfs import glusterfs_native from manila import test from manila.tests import fake_utils CONF = cfg.CONF def new_share(**kwargs): share = { 'id': 'fakeid', 'name': 'fakename', 'size': 1, 'share_proto': 'glusterfs', } share.update(kwargs) return share @ddt.ddt class GlusterfsNativeShareDriverTestCase(test.TestCase): """Tests GlusterfsNativeShareDriver.""" def setUp(self): super(GlusterfsNativeShareDriverTestCase, self).setUp() fake_utils.stub_out_utils_execute(self) self._execute = fake_utils.fake_execute self._context = context.get_admin_context() self.glusterfs_target1 = 'root@host1:/gv1' self.glusterfs_target2 = 'root@host2:/gv2' self.glusterfs_server1 = 'root@host1' self.glusterfs_server2 = 'root@host2' self.glusterfs_server1_volumes = 'manila-share-1-1G\nshare1' self.glusterfs_server2_volumes = 'manila-share-2-2G\nshare2' self.share1 = new_share( export_location=self.glusterfs_target1, status=constants.STATUS_AVAILABLE) self.share2 = new_share( export_location=self.glusterfs_target2, status=constants.STATUS_AVAILABLE) self.gmgr1 = common.GlusterManager(self.glusterfs_server1, self._execute, None, None, requires={'volume': False}) self.gmgr2 = common.GlusterManager(self.glusterfs_server2, self._execute, None, None, requires={'volume': False}) self.glusterfs_volumes_dict = ( {'root@host1:/manila-share-1-1G': {'size': 1}, 'root@host2:/manila-share-2-2G': {'size': 2}}) self.glusterfs_used_vols = set([ 'root@host1:/manila-share-1-1G', 'root@host2:/manila-share-2-2G']) CONF.set_default('glusterfs_volume_pattern', r'manila-share-\d+-#{size}G$') CONF.set_default('driver_handles_share_servers', False) self.fake_conf = config.Configuration(None) self.mock_object(common.GlusterManager, 'make_gluster_call') self._driver = glusterfs_native.GlusterfsNativeShareDriver( execute=self._execute, configuration=self.fake_conf) self.addCleanup(fake_utils.fake_execute_set_repliers, []) self.addCleanup(fake_utils.fake_execute_clear_log) def test_supported_protocols(self): self.assertEqual(('GLUSTERFS', ), self._driver.supported_protocols) def test_setup_via_manager(self): gmgr = mock.Mock() gmgr.gluster_call = mock.Mock() gmgr.set_vol_option = mock.Mock() gmgr.volume = 'fakevol' gmgr.export = 'fakehost:/fakevol' gmgr.get_vol_option = mock.Mock( return_value='glusterfs-server-name,some-other-name') share = mock.Mock() settings = ( ('nfs.export-volumes', False, {}), ('client.ssl', True, {}), ('server.ssl', True, {}), ('server.dynamic-auth', True, {'ignore_failure': True}), ) call_args = ( ('volume', 'stop', 'fakevol', '--mode=script', {'log': mock.ANY}), ('volume', 'start', 'fakevol', {'log': mock.ANY}), ) ret = self._driver._setup_via_manager({'manager': gmgr, 'share': share}) gmgr.get_vol_option.assert_called_once_with('auth.ssl-allow') gmgr.set_vol_option.assert_has_calls( [mock.call(*a[:-1], **a[-1]) for a in settings]) gmgr.gluster_call.assert_has_calls( [mock.call(*a[:-1], **a[-1]) for a in call_args]) self.assertEqual(ret, gmgr.export) def test_setup_via_manager_with_parent(self): gmgr = mock.Mock() gmgr.set_vol_option = mock.Mock() gmgr.volume = 'fakevol' gmgr.export = 'fakehost:/fakevol' gmgr_parent = mock.Mock() gmgr_parent.get_vol_option = mock.Mock( return_value=( 'glusterfs-server-name,some-other-name,manila-host.com')) share = mock.Mock() share_parent = mock.Mock() settings = ( ('auth.ssl-allow', 'glusterfs-server-name,manila-host.com', {}), ('nfs.export-volumes', False, {}), ('client.ssl', True, {}), ('server.ssl', True, {}), ('server.dynamic-auth', True, {'ignore_failure': True}), ) ret = self._driver._setup_via_manager( {'manager': gmgr, 'share': share}, {'manager': gmgr_parent, 'share': share_parent}) gmgr_parent.get_vol_option.assert_called_once_with( 'auth.ssl-allow') gmgr.set_vol_option.assert_has_calls( [mock.call(*a[:-1], **a[-1]) for a in settings]) self.assertEqual(ret, gmgr.export) @ddt.data(True, False) def test_setup_via_manager_no_option_data(self, has_parent): share = mock.Mock() gmgr = mock.Mock() if has_parent: share_parent = mock.Mock() gmgr_parent = mock.Mock() share_mgr_parent = {'share': share_parent, 'manager': gmgr_parent} gmgr_queried = gmgr_parent else: share_mgr_parent = None gmgr_queried = gmgr gmgr_queried.get_vol_option = mock.Mock(return_value='') self.assertRaises(exception.GlusterfsException, self._driver._setup_via_manager, {'share': share, 'manager': gmgr}, share_mgr_parent=share_mgr_parent) gmgr_queried.get_vol_option.assert_called_once_with( 'auth.ssl-allow') def test_snapshots_are_supported(self): self.assertTrue(self._driver.snapshots_are_supported) @ddt.data({'delta': (["oldCN"], []), 'expected': "glusterCN,oldCN"}, {'delta': (["newCN"], []), 'expected': "glusterCN,newCN,oldCN"}, {'delta': ([], ["newCN"]), 'expected': "glusterCN,oldCN"}, {'delta': ([], ["oldCN"]), 'expected': "glusterCN"}) @ddt.unpack def test_update_access_via_manager(self, delta, expected): gluster_mgr = common.GlusterManager(self.glusterfs_target1, self._execute, None, None) self.mock_object( gluster_mgr, 'get_vol_option', mock.Mock( side_effect=lambda a, *x, **kw: { 'auth.ssl-allow': "glusterCN,oldCN", 'server.dynamic-auth': True}[a])) self.mock_object(gluster_mgr, 'set_vol_option') add_rules, delete_rules = ( map(lambda a: {'access_to': a}, r) for r in delta) self._driver._update_access_via_manager( gluster_mgr, self._context, self.share1, add_rules, delete_rules) argseq = [('auth.ssl-allow', {})] if delete_rules: argseq.append(('server.dynamic-auth', {'boolean': True})) self.assertEqual([mock.call(a[0], **a[1]) for a in argseq], gluster_mgr.get_vol_option.call_args_list) gluster_mgr.set_vol_option.assert_called_once_with('auth.ssl-allow', expected) def test_update_access_via_manager_restart(self): gluster_mgr = common.GlusterManager(self.glusterfs_target1, self._execute, None, None) self.mock_object( gluster_mgr, 'get_vol_option', mock.Mock( side_effect=lambda a, *x, **kw: { 'auth.ssl-allow': "glusterCN,oldCN", 'server.dynamic-auth': False}[a])) self.mock_object(gluster_mgr, 'set_vol_option') self.mock_object(common, '_restart_gluster_vol') self._driver._update_access_via_manager( gluster_mgr, self._context, self.share1, [], [{'access_to': "oldCN"}]) common._restart_gluster_vol.assert_called_once_with(gluster_mgr) @ddt.data('common name with space', 'comma,nama') def test_update_access_via_manager_badcn(self, common_name): gluster_mgr = common.GlusterManager(self.glusterfs_target1, self._execute, None, None) self.mock_object(gluster_mgr, 'get_vol_option', mock.Mock( return_value="glusterCN,oldCN")) self.assertRaises(exception.GlusterfsException, self._driver._update_access_via_manager, gluster_mgr, self._context, self.share1, [{'access_to': common_name}], []) def test_update_share_stats(self): self._driver._update_share_stats() test_data = { 'share_backend_name': 'GlusterFS-Native', 'driver_handles_share_servers': False, 'vendor_name': 'Red Hat', 'driver_version': '1.1', 'storage_protocol': 'glusterfs', 'reserved_percentage': 0, 'reserved_snapshot_percentage': 0, 'reserved_share_extend_percentage': 0, 'qos': False, 'total_capacity_gb': 'unknown', 'free_capacity_gb': 'unknown', 'pools': None, 'snapshot_support': True, 'create_share_from_snapshot_support': True, 'revert_to_snapshot_support': False, 'mount_snapshot_support': False, 'share_group_stats': { 'consistent_snapshot_support': None, }, 'replication_domain': None, 'filter_function': None, 'goodness_function': None, 'mount_point_name_support': False, 'ipv4_support': True, 'ipv6_support': False, 'security_service_update_support': False, 'share_server_multiple_subnet_support': False, 'network_allocation_update_support': False, 'share_replicas_migration_support': False, 'encryption_support': None, } self.assertEqual(test_data, self._driver._stats) def test_get_network_allocations_number(self): self.assertEqual(0, self._driver.get_network_allocations_number()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/glusterfs/test_layout.py0000664000175000017500000003130600000000000025210 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import errno import os from unittest import mock import ddt from oslo_config import cfg from oslo_utils import importutils from manila import exception from manila.share import configuration as config from manila.share import driver from manila.share.drivers.glusterfs import layout from manila import test from manila.tests import fake_share from manila.tests import fake_utils CONF = cfg.CONF fake_local_share_path = '/mnt/nfs/testvol/fakename' fake_path_to_private_key = '/fakepath/to/privatekey' fake_remote_server_password = 'fakepassword' def fake_access(kwargs): fake_access_rule = fake_share.fake_access(**kwargs) fake_access_rule.to_dict = lambda: fake_access_rule.values return fake_access_rule class GlusterfsFakeShareDriver(layout.GlusterfsShareDriverBase): supported_layouts = ('layout_fake.FakeLayout', 'layout_something.SomeLayout') supported_protocols = ('NFS,') _supported_access_types = ('ip',) _supported_access_levels = ('rw',) @ddt.ddt class GlusterfsShareDriverBaseTestCase(test.TestCase): """Tests GlusterfsShareDriverBase.""" def setUp(self): super(GlusterfsShareDriverBaseTestCase, self).setUp() CONF.set_default('driver_handles_share_servers', False) fake_conf, __ = self._setup() self._driver = GlusterfsFakeShareDriver(False, configuration=fake_conf) self.fake_share = mock.Mock(name='fake_share') self.fake_context = mock.Mock(name='fake_context') self.fake_access = mock.Mock(name='fake_access') def _setup(self): fake_conf = config.Configuration(None) fake_layout = mock.Mock() self.mock_object(importutils, "import_object", mock.Mock(return_value=fake_layout)) return fake_conf, fake_layout def test_init(self): self.assertRaises(IndexError, layout.GlusterfsShareDriverBase, False, configuration=config.Configuration(None)) @ddt.data({'has_snap': None, 'layout_name': None}, {'has_snap': False, 'layout_name': 'layout_fake.FakeLayout'}, {'has_snap': True, 'layout_name': 'layout_something.SomeLayout'}) @ddt.unpack def test_init_subclass(self, has_snap, layout_name): conf, _layout = self._setup() if layout_name is not None: conf.glusterfs_share_layout = layout_name if has_snap is None: del _layout._snapshots_are_supported else: _layout._snapshots_are_supported = has_snap _driver = GlusterfsFakeShareDriver(False, configuration=conf) snap_result = {None: False}.get(has_snap, has_snap) layout_result = {None: 'layout_fake.FakeLayout'}.get(layout_name, layout_name) importutils.import_object.assert_called_once_with( 'manila.share.drivers.glusterfs.%s' % layout_result, _driver, configuration=conf) self.assertEqual(_layout, _driver.layout) self.assertEqual(snap_result, _driver.snapshots_are_supported) def test_init_nosupp_layout(self): conf = config.Configuration(None) conf.glusterfs_share_layout = 'nonsense_layout' self.assertRaises(exception.GlusterfsException, GlusterfsFakeShareDriver, False, configuration=conf) def test_setup_via_manager(self): self.assertIsNone(self._driver._setup_via_manager(mock.Mock())) def test_supported_access_types(self): self.assertEqual(('ip',), self._driver.supported_access_types) def test_supported_access_levels(self): self.assertEqual(('rw',), self._driver.supported_access_levels) def test_access_rule_validator(self): rule = mock.Mock() abort = mock.Mock() valid = mock.Mock() self.mock_object(layout.ganesha_utils, 'validate_access_rule', mock.Mock(return_value=valid)) ret = self._driver._access_rule_validator(abort)(rule) self.assertEqual(valid, ret) layout.ganesha_utils.validate_access_rule.assert_called_once_with( ('ip',), ('rw',), rule, abort) @ddt.data({'inset': ([], ['ADD'], []), 'outset': (['ADD'], []), 'recovery': False}, {'inset': ([], [], ['DELETE']), 'outset': ([], ['DELETE']), 'recovery': False}, {'inset': (['EXISTING'], ['ADD'], ['DELETE']), 'outset': (['ADD'], ['DELETE']), 'recovery': False}, {'inset': (['EXISTING'], [], []), 'outset': (['EXISTING'], []), 'recovery': True}) @ddt.unpack def test_update_access(self, inset, outset, recovery): conf, _layout = self._setup() gluster_mgr = mock.Mock(name='gluster_mgr') self.mock_object(_layout, '_share_manager', mock.Mock(return_value=gluster_mgr)) _driver = GlusterfsFakeShareDriver(False, configuration=conf) self.mock_object(_driver, '_update_access_via_manager', mock.Mock()) rulemap = {t: fake_access({'access_type': "ip", 'access_level': "rw", 'access_to': t}) for t in ( 'EXISTING', 'ADD', 'DELETE')} in_rules, out_rules = ( [ [ rulemap[t] for t in r ] for r in rs ] for rs in (inset, outset)) _driver.update_access( self.fake_context, self.fake_share, *in_rules, []) _layout._share_manager.assert_called_once_with(self.fake_share) _driver._update_access_via_manager.assert_called_once_with( gluster_mgr, self.fake_context, self.fake_share, *out_rules, recovery=recovery) def test_update_access_via_manager(self): self.assertRaises(NotImplementedError, self._driver._update_access_via_manager, mock.Mock(), self.fake_context, self.fake_share, [self.fake_access], [self.fake_access]) @ddt.data('NFS', 'PROTATO') def test_check_proto_baseclass(self, proto): self.assertRaises(exception.ShareBackendException, layout.GlusterfsShareDriverBase._check_proto, {'share_proto': proto}) def test_check_proto(self): GlusterfsFakeShareDriver._check_proto({'share_proto': 'NFS'}) def test_check_proto_notsupported(self): self.assertRaises(exception.ShareBackendException, GlusterfsFakeShareDriver._check_proto, {'share_proto': 'PROTATO'}) @ddt.data('', '_from_snapshot') def test_create_share(self, variant): conf, _layout = self._setup() _driver = GlusterfsFakeShareDriver(False, configuration=conf) self.mock_object(_driver, '_check_proto', mock.Mock()) getattr(_driver, 'create_share%s' % variant)(self.fake_context, self.fake_share) _driver._check_proto.assert_called_once_with(self.fake_share) getattr(_layout, 'create_share%s' % variant).assert_called_once_with( self.fake_context, self.fake_share) @ddt.data(True, False) def test_update_share_stats(self, internal_exception): data = mock.Mock() conf, _layout = self._setup() def raise_exception(*args, **kwargs): raise NotImplementedError layoutstats = mock.Mock() mock_kw = ({'side_effect': raise_exception} if internal_exception else {'return_value': layoutstats}) self.mock_object(_layout, '_update_share_stats', mock.Mock(**mock_kw)) self.mock_object(driver.ShareDriver, '_update_share_stats', mock.Mock()) _driver = GlusterfsFakeShareDriver(False, configuration=conf) _driver._update_share_stats(data) if internal_exception: self.assertFalse(data.update.called) else: data.update.assert_called_once_with(layoutstats) driver.ShareDriver._update_share_stats.assert_called_once_with( data) @ddt.data('do_setup', 'create_snapshot', 'delete_share', 'delete_snapshot', 'ensure_share', 'manage_existing', 'unmanage', 'extend_share', 'shrink_share') def test_delegated_methods(self, method): conf, _layout = self._setup() _driver = GlusterfsFakeShareDriver(False, configuration=conf) fake_args = (mock.Mock(), mock.Mock(), mock.Mock()) getattr(_driver, method)(*fake_args) getattr(_layout, method).assert_called_once_with(*fake_args) @ddt.ddt class GlusterfsShareLayoutBaseTestCase(test.TestCase): """Tests GlusterfsShareLayoutBaseTestCase.""" def setUp(self): super(GlusterfsShareLayoutBaseTestCase, self).setUp() fake_utils.stub_out_utils_execute(self) self._execute = fake_utils.fake_execute self.addCleanup(fake_utils.fake_execute_set_repliers, []) self.addCleanup(fake_utils.fake_execute_clear_log) self.fake_driver = mock.Mock() self.mock_object(self.fake_driver, '_execute', self._execute) class FakeLayout(layout.GlusterfsShareLayoutBase): def _share_manager(self, share): """Return GlusterManager object representing share's backend.""" def do_setup(self, context): """Any initialization the share driver does while starting.""" def create_share(self, context, share, share_server=None): """Is called to create share.""" def create_share_from_snapshot(self, context, share, snapshot, share_server=None, parent_share=None): """Is called to create share from snapshot.""" def create_snapshot(self, context, snapshot, share_server=None): """Is called to create snapshot.""" def delete_share(self, context, share, share_server=None): """Is called to remove share.""" def delete_snapshot(self, context, snapshot, share_server=None): """Is called to remove snapshot.""" def ensure_share(self, context, share, share_server=None): """Invoked to ensure that share is exported.""" def manage_existing(self, share, driver_options): """Brings an existing share under Manila management.""" def unmanage(self, share): """Removes the specified share from Manila management.""" def extend_share(self, share, new_size, share_server=None): """Extends size of existing share.""" def shrink_share(self, share, new_size, share_server=None): """Shrinks size of existing share.""" def test_init_invalid(self): self.assertRaises(TypeError, layout.GlusterfsShareLayoutBase, mock.Mock()) def test_subclass(self): fake_conf = mock.Mock() _layout = self.FakeLayout(self.fake_driver, configuration=fake_conf) self.assertEqual(fake_conf, _layout.configuration) self.assertRaises(NotImplementedError, _layout._update_share_stats) def test_check_mount_glusterfs(self): fake_conf = mock.Mock() _driver = mock.Mock() _driver._execute = mock.Mock() _layout = self.FakeLayout(_driver, configuration=fake_conf) _layout._check_mount_glusterfs() _driver._execute.assert_called_once_with( 'mount.glusterfs', check_exit_code=False) @ddt.data({'_errno': errno.ENOENT, '_exception': exception.GlusterfsException}, {'_errno': errno.EACCES, '_exception': OSError}) @ddt.unpack def test_check_mount_glusterfs_not_installed(self, _errno, _exception): fake_conf = mock.Mock() _layout = self.FakeLayout(self.fake_driver, configuration=fake_conf) def exec_runner(*ignore_args, **ignore_kwargs): raise OSError(_errno, os.strerror(_errno)) expected_exec = ['mount.glusterfs'] fake_utils.fake_execute_set_repliers([(expected_exec[0], exec_runner)]) self.assertRaises(_exception, _layout._check_mount_glusterfs) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/glusterfs/test_layout_directory.py0000664000175000017500000005031300000000000027273 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from unittest import mock import ddt from oslo_config import cfg from manila import context from manila import exception from manila.privsep import os as privsep_os from manila.share import configuration as config from manila.share.drivers.glusterfs import common from manila.share.drivers.glusterfs import layout_directory from manila import test from manila.tests import fake_share from manila.tests import fake_utils CONF = cfg.CONF fake_gluster_manager_attrs = { 'export': '127.0.0.1:/testvol', 'host': '127.0.0.1', 'qualified': 'testuser@127.0.0.1:/testvol', 'user': 'testuser', 'volume': 'testvol', 'path_to_private_key': '/fakepath/to/privatekey', 'remote_server_password': 'fakepassword', 'components': {'user': 'testuser', 'host': '127.0.0.1', 'volume': 'testvol', 'path': None} } fake_local_share_path = '/mnt/nfs/testvol/fakename' fake_path_to_private_key = '/fakepath/to/privatekey' fake_remote_server_password = 'fakepassword' @ddt.ddt class GlusterfsDirectoryMappedLayoutTestCase(test.TestCase): """Tests GlusterfsDirectoryMappedLayout.""" def setUp(self): super(GlusterfsDirectoryMappedLayoutTestCase, self).setUp() fake_utils.stub_out_utils_execute(self) self._execute = fake_utils.fake_execute self._context = context.get_admin_context() self.addCleanup(fake_utils.fake_execute_set_repliers, []) self.addCleanup(fake_utils.fake_execute_clear_log) CONF.set_default('glusterfs_target', '127.0.0.1:/testvol') CONF.set_default('glusterfs_mount_point_base', '/mnt/nfs') CONF.set_default('glusterfs_server_password', fake_remote_server_password) CONF.set_default('glusterfs_path_to_private_key', fake_path_to_private_key) self.fake_driver = mock.Mock() self.mock_object(self.fake_driver, '_execute', self._execute) self.fake_driver.GLUSTERFS_VERSION_MIN = (3, 6) self.fake_conf = config.Configuration(None) self.mock_object(common.GlusterManager, 'make_gluster_call') self._layout = layout_directory.GlusterfsDirectoryMappedLayout( self.fake_driver, configuration=self.fake_conf) self._layout.gluster_manager = mock.Mock(**fake_gluster_manager_attrs) self.share = fake_share.fake_share(share_proto='NFS') def test_do_setup(self): fake_gluster_manager = mock.Mock(**fake_gluster_manager_attrs) self.mock_object(fake_gluster_manager, 'get_gluster_version', mock.Mock(return_value=('3', '5'))) methods = ('_check_mount_glusterfs', '_ensure_gluster_vol_mounted') for method in methods: self.mock_object(self._layout, method) self.mock_object(common, 'GlusterManager', mock.Mock(return_value=fake_gluster_manager)) self._layout.do_setup(self._context) self.assertEqual(fake_gluster_manager, self._layout.gluster_manager) common.GlusterManager.assert_called_once_with( self._layout.configuration.glusterfs_target, self._execute, self._layout.configuration.glusterfs_path_to_private_key, self._layout.configuration.glusterfs_server_password, requires={'volume': True}) self._layout.gluster_manager.gluster_call.assert_called_once_with( 'volume', 'quota', 'testvol', 'enable') self._layout._check_mount_glusterfs.assert_called_once_with() self._layout._ensure_gluster_vol_mounted.assert_called_once_with() def test_do_setup_glusterfs_target_not_set(self): self._layout.configuration.glusterfs_target = None self.assertRaises(exception.GlusterfsException, self._layout.do_setup, self._context) def test_do_setup_error_enabling_creation_share_specific_size(self): attrs = {'volume': 'testvol', 'gluster_call.side_effect': exception.GlusterfsException, 'get_vol_option.return_value': 'off'} fake_gluster_manager = mock.Mock(**attrs) self.mock_object(layout_directory.LOG, 'exception') methods = ('_check_mount_glusterfs', '_ensure_gluster_vol_mounted') for method in methods: self.mock_object(self._layout, method) self.mock_object(common, 'GlusterManager', mock.Mock(return_value=fake_gluster_manager)) self.assertRaises(exception.GlusterfsException, self._layout.do_setup, self._context) self.assertEqual(fake_gluster_manager, self._layout.gluster_manager) common.GlusterManager.assert_called_once_with( self._layout.configuration.glusterfs_target, self._execute, self._layout.configuration.glusterfs_path_to_private_key, self._layout.configuration.glusterfs_server_password, requires={'volume': True}) self._layout.gluster_manager.gluster_call.assert_called_once_with( 'volume', 'quota', 'testvol', 'enable') (self._layout.gluster_manager.get_vol_option. assert_called_once_with('features.quota')) layout_directory.LOG.exception.assert_called_once_with(mock.ANY) self._layout._check_mount_glusterfs.assert_called_once_with() self.assertFalse(self._layout._ensure_gluster_vol_mounted.called) def test_do_setup_error_already_enabled_creation_share_specific_size(self): attrs = {'volume': 'testvol', 'gluster_call.side_effect': exception.GlusterfsException, 'get_vol_option.return_value': 'on'} fake_gluster_manager = mock.Mock(**attrs) self.mock_object(layout_directory.LOG, 'error') methods = ('_check_mount_glusterfs', '_ensure_gluster_vol_mounted') for method in methods: self.mock_object(self._layout, method) self.mock_object(common, 'GlusterManager', mock.Mock(return_value=fake_gluster_manager)) self._layout.do_setup(self._context) self.assertEqual(fake_gluster_manager, self._layout.gluster_manager) common.GlusterManager.assert_called_once_with( self._layout.configuration.glusterfs_target, self._execute, self._layout.configuration.glusterfs_path_to_private_key, self._layout.configuration.glusterfs_server_password, requires={'volume': True}) self._layout.gluster_manager.gluster_call.assert_called_once_with( 'volume', 'quota', 'testvol', 'enable') (self._layout.gluster_manager.get_vol_option. assert_called_once_with('features.quota')) self.assertFalse(layout_directory.LOG.error.called) self._layout._check_mount_glusterfs.assert_called_once_with() self._layout._ensure_gluster_vol_mounted.assert_called_once_with() def test_share_manager(self): self._layout._glustermanager = mock.Mock() self._layout._share_manager(self.share) self._layout._glustermanager.assert_called_once_with( {'user': 'testuser', 'host': '127.0.0.1', 'volume': 'testvol', 'path': '/fakename'}) def test_ensure_gluster_vol_mounted(self): common._mount_gluster_vol = mock.Mock() self._layout._ensure_gluster_vol_mounted() self.assertTrue(common._mount_gluster_vol.called) def test_ensure_gluster_vol_mounted_error(self): common._mount_gluster_vol = ( mock.Mock(side_effect=exception.GlusterfsException)) self.assertRaises(exception.GlusterfsException, self._layout._ensure_gluster_vol_mounted) def test_get_local_share_path(self): with mock.patch.object(os, 'access', return_value=True): ret = self._layout._get_local_share_path(self.share) self.assertEqual('/mnt/nfs/testvol/fakename', ret) def test_local_share_path_not_exists(self): with mock.patch.object(os, 'access', return_value=False): self.assertRaises(exception.GlusterfsException, self._layout._get_local_share_path, self.share) def test_update_share_stats(self): test_statvfs = mock.Mock(f_frsize=4096, f_blocks=524288, f_bavail=524288) self._layout._get_mount_point_for_gluster_vol = ( mock.Mock(return_value='/mnt/nfs/testvol')) some_no = 42 not_some_no = some_no + 1 os_stat = (lambda path: mock.Mock(st_dev=some_no) if path == '/mnt/nfs' else mock.Mock(st_dev=not_some_no)) with mock.patch.object(os, 'statvfs', return_value=test_statvfs): with mock.patch.object(os, 'stat', os_stat): ret = self._layout._update_share_stats() test_data = { 'total_capacity_gb': 2, 'free_capacity_gb': 2, } self.assertEqual(test_data, ret) def test_update_share_stats_gluster_mnt_unavailable(self): self._layout._get_mount_point_for_gluster_vol = ( mock.Mock(return_value='/mnt/nfs/testvol')) some_no = 42 with mock.patch.object(os, 'stat', return_value=mock.Mock(st_dev=some_no)): self.assertRaises(exception.GlusterfsException, self._layout._update_share_stats) @ddt.data((), (None,)) def test_create_share(self, extra_args): expected_ret = 'testuser@127.0.0.1:/testvol/fakename' self.mock_object( self._layout, '_get_local_share_path', mock.Mock(return_value=fake_local_share_path)) gmgr = mock.Mock() self.mock_object( self._layout, '_glustermanager', mock.Mock(return_value=gmgr)) self.mock_object( self._layout.driver, '_setup_via_manager', mock.Mock(return_value=expected_ret)) self.mock_object(privsep_os, 'mkdir') ret = self._layout.create_share(self._context, self.share, *extra_args) self._layout._get_local_share_path.assert_called_once_with(self.share) self._layout.gluster_manager.gluster_call.assert_called_once_with( 'volume', 'quota', 'testvol', 'limit-usage', '/fakename', '1GB') privsep_os.mkdir.assert_called_once_with(fake_local_share_path) self._layout._glustermanager.assert_called_once_with( {'user': 'testuser', 'host': '127.0.0.1', 'volume': 'testvol', 'path': '/fakename'}) self._layout.driver._setup_via_manager.assert_called_once_with( {'share': self.share, 'manager': gmgr}) self.assertEqual(expected_ret, ret) @ddt.data(exception.ProcessExecutionError, exception.GlusterfsException) def test_create_share_unable_to_create_share(self, trouble): self.mock_object( self._layout, '_get_local_share_path', mock.Mock(return_value=fake_local_share_path)) self.mock_object(privsep_os, 'mkdir', mock.Mock(side_effect=trouble)) self.mock_object(self._layout, '_cleanup_create_share') self.mock_object(layout_directory.LOG, 'error') self.assertRaises( exception.GlusterfsException, self._layout.create_share, self._context, self.share) self._layout._get_local_share_path.assert_called_once_with(self.share) privsep_os.mkdir.assert_called_once_with(fake_local_share_path) self._layout._cleanup_create_share.assert_called_once_with( fake_local_share_path, self.share['name']) layout_directory.LOG.error.assert_called_once_with( mock.ANY, mock.ANY) def test_create_share_unable_to_create_share_weird(self): def exec_runner(*ignore_args, **ignore_kw): raise RuntimeError self.mock_object( self._layout, '_get_local_share_path', mock.Mock(return_value=fake_local_share_path)) self.mock_object(self._layout, '_cleanup_create_share') self.mock_object( privsep_os, 'mkdir', mock.Mock(side_effect=exec_runner)) self.mock_object(layout_directory.LOG, 'error') expected_exec = ['mkdir %s' % fake_local_share_path] fake_utils.fake_execute_set_repliers([(expected_exec[0], exec_runner)]) self.assertRaises( RuntimeError, self._layout.create_share, self._context, self.share) self._layout._get_local_share_path.assert_called_once_with(self.share) privsep_os.mkdir.assert_called_once_with(fake_local_share_path) self.assertFalse(self._layout._cleanup_create_share.called) def test_cleanup_create_share_local_share_path_exists(self): self.mock_object(privsep_os, 'recursive_forced_rm') self.mock_object(os.path, 'exists', mock.Mock(return_value=True)) ret = self._layout._cleanup_create_share(fake_local_share_path, self.share['name']) os.path.exists.assert_called_once_with(fake_local_share_path) privsep_os.recursive_forced_rm.assert_called_once_with( fake_local_share_path) self.assertIsNone(ret) def test_cleanup_create_share_cannot_cleanup_unusable_share(self): def exec_runner(*ignore_args, **ignore_kw): raise exception.ProcessExecutionError self.mock_object(privsep_os, 'recursive_forced_rm', mock.Mock(side_effect=exec_runner)) self.mock_object(layout_directory.LOG, 'error') self.mock_object(os.path, 'exists', mock.Mock(return_value=True)) self.assertRaises(exception.GlusterfsException, self._layout._cleanup_create_share, fake_local_share_path, self.share['name']) os.path.exists.assert_called_once_with(fake_local_share_path) layout_directory.LOG.error.assert_called_once_with(mock.ANY, mock.ANY) def test_cleanup_create_share_local_share_path_does_not_exist(self): self.mock_object(os.path, 'exists', mock.Mock(return_value=False)) ret = self._layout._cleanup_create_share(fake_local_share_path, self.share['name']) os.path.exists.assert_called_once_with(fake_local_share_path) self.assertIsNone(ret) def test_delete_share(self): local_share_path = '/mnt/nfs/testvol/fakename' self._layout._get_local_share_path = ( mock.Mock(return_value=local_share_path)) mock_force_rm = self.mock_object(privsep_os, 'recursive_forced_rm') self._layout.delete_share(self._context, self.share) mock_force_rm.assert_called_once_with( local_share_path) def test_cannot_delete_share(self): local_share_path = '/mnt/nfs/testvol/fakename' self._layout._get_local_share_path = ( mock.Mock(return_value=local_share_path)) self.mock_object( privsep_os, 'recursive_forced_rm', mock.Mock( side_effect=exception.ProcessExecutionError)) self.assertRaises(exception.ProcessExecutionError, self._layout.delete_share, self._context, self.share) privsep_os.mkdir.assert_called_once_with = local_share_path def test_delete_share_can_be_called_with_extra_arg_share_server(self): local_share_path = '/mnt/nfs/testvol/fakename' self._layout._get_local_share_path = mock.Mock( return_value=local_share_path) mock_force_rm = self.mock_object(privsep_os, 'recursive_forced_rm') share_server = None ret = self._layout.delete_share(self._context, self.share, share_server) self.assertIsNone(ret) self._layout._get_local_share_path.assert_called_once_with(self.share) mock_force_rm.assert_called_once_with(local_share_path) def test_ensure_share(self): self.assertIsNone(self._layout.ensure_share(self._context, self.share)) def test_extend_share(self): self._layout.extend_share(self.share, 3) self._layout.gluster_manager.gluster_call.assert_called_once_with( 'volume', 'quota', 'testvol', 'limit-usage', '/fakename', '3GB') def test_shrink_share(self): self.mock_object(self._layout, '_get_directory_usage', mock.Mock(return_value=10.0)) self._layout.shrink_share(self.share, 11) self._layout.gluster_manager.gluster_call.assert_called_once_with( 'volume', 'quota', 'testvol', 'limit-usage', '/fakename', '11GB') def test_shrink_share_data_loss(self): self.mock_object(self._layout, '_get_directory_usage', mock.Mock(return_value=10.0)) shrink_on_gluster = self.mock_object(self._layout, '_set_directory_quota') self.assertRaises(exception.ShareShrinkingPossibleDataLoss, self._layout.shrink_share, self.share, 9) shrink_on_gluster.assert_not_called() def test_set_directory_quota(self): self._layout._set_directory_quota(self.share, 3) self._layout.gluster_manager.gluster_call.assert_called_once_with( 'volume', 'quota', 'testvol', 'limit-usage', '/fakename', '3GB') def test_set_directory_quota_unable_to_set(self): self.mock_object(self._layout.gluster_manager, 'gluster_call', mock.Mock(side_effect=exception.GlusterfsException)) self.assertRaises(exception.GlusterfsException, self._layout._set_directory_quota, self.share, 3) self._layout.gluster_manager.gluster_call.assert_called_once_with( 'volume', 'quota', 'testvol', 'limit-usage', '/fakename', '3GB') def test_get_directory_usage(self): def xml_output(*ignore_args, **ignore_kwargs): return """ 0 0 10737418240 """, '' self.mock_object(self._layout.gluster_manager, 'gluster_call', mock.Mock(side_effect=xml_output)) ret = self._layout._get_directory_usage(self.share) self.assertEqual(10.0, ret) share_dir = '/' + self.share['name'] self._layout.gluster_manager.gluster_call.assert_called_once_with( '--xml', 'volume', 'quota', self._layout.gluster_manager.volume, 'list', share_dir) def test_get_directory_usage_unable_to_get(self): self.mock_object(self._layout.gluster_manager, 'gluster_call', mock.Mock(side_effect=exception.GlusterfsException)) self.assertRaises(exception.GlusterfsException, self._layout._get_directory_usage, self.share) share_dir = '/' + self.share['name'] self._layout.gluster_manager.gluster_call.assert_called_once_with( '--xml', 'volume', 'quota', self._layout.gluster_manager.volume, 'list', share_dir) @ddt.data( ('create_share_from_snapshot', ('context', 'share', 'snapshot'), {'share_server': None}), ('create_snapshot', ('context', 'snapshot'), {'share_server': None}), ('delete_snapshot', ('context', 'snapshot'), {'share_server': None}), ('manage_existing', ('share', 'driver_options'), {}), ('unmanage', ('share',), {})) def test_nonimplemented_methods(self, method_invocation): method, args, kwargs = method_invocation self.assertRaises(NotImplementedError, getattr(self._layout, method), *args, **kwargs) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/glusterfs/test_layout_volume.py0000664000175000017500000012555300000000000026607 0ustar00zuulzuul00000000000000# Copyright (c) 2014 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ GlusterFS volume mapped share layout testcases. """ import re import shutil import tempfile from unittest import mock import ddt from oslo_config import cfg from manila.common import constants from manila import context from manila import exception from manila.privsep import os as privsep_os from manila.share import configuration as config from manila.share.drivers.glusterfs import common from manila.share.drivers.glusterfs import layout_volume from manila import test from manila.tests import fake_utils CONF = cfg.CONF def new_share(**kwargs): share = { 'id': 'fakeid', 'name': 'fakename', 'size': 1, 'share_proto': 'glusterfs', } share.update(kwargs) return share def glusterXMLOut(**kwargs): template = """ %(ret)d %(errno)d fake error """ return template % kwargs, '' FAKE_UUID1 = '11111111-1111-1111-1111-111111111111' FAKE_UUID2 = '22222222-2222-2222-2222-222222222222' @ddt.ddt class GlusterfsVolumeMappedLayoutTestCase(test.TestCase): """Tests GlusterfsVolumeMappedLayout.""" def setUp(self): super(GlusterfsVolumeMappedLayoutTestCase, self).setUp() fake_utils.stub_out_utils_execute(self) self._execute = fake_utils.fake_execute self._context = context.get_admin_context() self.glusterfs_target1 = 'root@host1:/gv1' self.glusterfs_target2 = 'root@host2:/gv2' self.glusterfs_server1 = 'root@host1' self.glusterfs_server2 = 'root@host2' self.glusterfs_server1_volumes = 'manila-share-1-1G\nshare1' self.glusterfs_server2_volumes = 'manila-share-2-2G\nshare2' self.share1 = new_share( export_location=self.glusterfs_target1, status=constants.STATUS_AVAILABLE) self.share2 = new_share( export_location=self.glusterfs_target2, status=constants.STATUS_AVAILABLE) gmgr = common.GlusterManager self.gmgr1 = gmgr(self.glusterfs_server1, self._execute, None, None, requires={'volume': False}) self.gmgr2 = gmgr(self.glusterfs_server2, self._execute, None, None, requires={'volume': False}) self.glusterfs_volumes_dict = ( {'root@host1:/manila-share-1-1G': {'size': 1}, 'root@host2:/manila-share-2-2G': {'size': 2}}) self.glusterfs_used_vols = set([ 'root@host1:/manila-share-1-1G', 'root@host2:/manila-share-2-2G']) CONF.set_default('glusterfs_servers', [self.glusterfs_server1, self.glusterfs_server2]) CONF.set_default('glusterfs_server_password', 'fake_password') CONF.set_default('glusterfs_path_to_private_key', '/fakepath/to/privatekey') CONF.set_default('glusterfs_volume_pattern', r'manila-share-\d+-#{size}G$') CONF.set_default('driver_handles_share_servers', False) self.fake_driver = mock.Mock() self.mock_object(self.fake_driver, '_execute', self._execute) self.fake_driver.GLUSTERFS_VERSION_MIN = (3, 6) self.fake_conf = config.Configuration(None) self.mock_object(tempfile, 'mkdtemp', mock.Mock(return_value='/tmp/tmpKGHKJ')) self.mock_object(common.GlusterManager, 'make_gluster_call') self.fake_private_storage = mock.Mock() with mock.patch.object(layout_volume.GlusterfsVolumeMappedLayout, '_glustermanager', side_effect=[self.gmgr1, self.gmgr2]): self._layout = layout_volume.GlusterfsVolumeMappedLayout( self.fake_driver, configuration=self.fake_conf, private_storage=self.fake_private_storage) self._layout.glusterfs_versions = {self.glusterfs_server1: ('3', '6'), self.glusterfs_server2: ('3', '7')} self.addCleanup(fake_utils.fake_execute_set_repliers, []) self.addCleanup(fake_utils.fake_execute_clear_log) @ddt.data({"test_kwargs": {}, "requires": {"volume": True}}, {"test_kwargs": {'req_volume': False}, "requires": {"volume": False}}) @ddt.unpack def test_glustermanager(self, test_kwargs, requires): fake_obj = mock.Mock() self.mock_object(common, 'GlusterManager', mock.Mock(return_value=fake_obj)) ret = self._layout._glustermanager(self.glusterfs_target1, **test_kwargs) common.GlusterManager.assert_called_once_with( self.glusterfs_target1, self._execute, self._layout.configuration.glusterfs_path_to_private_key, self._layout.configuration.glusterfs_server_password, requires=requires) self.assertEqual(fake_obj, ret) def test_compile_volume_pattern(self): volume_pattern = r'manila-share-\d+-(?P\d+)G$' ret = self._layout._compile_volume_pattern() self.assertEqual(re.compile(volume_pattern), ret) @ddt.data({'root@host1:/manila-share-1-1G': 'NONE', 'root@host2:/manila-share-2-2G': None}, {'root@host1:/manila-share-1-1G': FAKE_UUID1, 'root@host2:/manila-share-2-2G': None}, {'root@host1:/manila-share-1-1G': 'foobarbaz', 'root@host2:/manila-share-2-2G': FAKE_UUID2}, {'root@host1:/manila-share-1-1G': FAKE_UUID1, 'root@host2:/manila-share-2-2G': FAKE_UUID2}) def test_fetch_gluster_volumes(self, sharemark): vol1_qualified = 'root@host1:/manila-share-1-1G' gmgr_vol1 = common.GlusterManager(vol1_qualified) gmgr_vol1.get_vol_option = mock.Mock( return_value=sharemark[vol1_qualified]) vol2_qualified = 'root@host2:/manila-share-2-2G' gmgr_vol2 = common.GlusterManager(vol2_qualified) gmgr_vol2.get_vol_option = mock.Mock( return_value=sharemark[vol2_qualified]) self.mock_object( self.gmgr1, 'gluster_call', mock.Mock(return_value=(self.glusterfs_server1_volumes, ''))) self.mock_object( self.gmgr2, 'gluster_call', mock.Mock(return_value=(self.glusterfs_server2_volumes, ''))) _glustermanager_calls = (self.gmgr1, gmgr_vol1, self.gmgr2, gmgr_vol2) self.mock_object(self._layout, '_glustermanager', mock.Mock(side_effect=_glustermanager_calls)) expected_output = {} for q, d in self.glusterfs_volumes_dict.items(): if sharemark[q] not in (FAKE_UUID1, FAKE_UUID2): expected_output[q] = d ret = self._layout._fetch_gluster_volumes() test_args = ('volume', 'list') self.gmgr1.gluster_call.assert_called_once_with(*test_args, log=mock.ANY) self.gmgr2.gluster_call.assert_called_once_with(*test_args, log=mock.ANY) gmgr_vol1.get_vol_option.assert_called_once_with( 'user.manila-share') gmgr_vol2.get_vol_option.assert_called_once_with( 'user.manila-share') self.assertEqual(expected_output, ret) def test_fetch_gluster_volumes_no_filter_used(self): vol1_qualified = 'root@host1:/manila-share-1-1G' gmgr_vol1 = common.GlusterManager(vol1_qualified) gmgr_vol1.get_vol_option = mock.Mock() vol2_qualified = 'root@host2:/manila-share-2-2G' gmgr_vol2 = common.GlusterManager(vol2_qualified) gmgr_vol2.get_vol_option = mock.Mock() self.mock_object( self.gmgr1, 'gluster_call', mock.Mock(return_value=(self.glusterfs_server1_volumes, ''))) self.mock_object( self.gmgr2, 'gluster_call', mock.Mock(return_value=(self.glusterfs_server2_volumes, ''))) _glustermanager_calls = (self.gmgr1, gmgr_vol1, self.gmgr2, gmgr_vol2) self.mock_object(self._layout, '_glustermanager', mock.Mock(side_effect=_glustermanager_calls)) expected_output = self.glusterfs_volumes_dict ret = self._layout._fetch_gluster_volumes(filter_used=False) test_args = ('volume', 'list') self.gmgr1.gluster_call.assert_called_once_with(*test_args, log=mock.ANY) self.gmgr2.gluster_call.assert_called_once_with(*test_args, log=mock.ANY) self.assertFalse(gmgr_vol1.get_vol_option.called) self.assertFalse(gmgr_vol2.get_vol_option.called) self.assertEqual(expected_output, ret) def test_fetch_gluster_volumes_no_keymatch(self): vol1_qualified = 'root@host1:/manila-share-1' gmgr_vol1 = common.GlusterManager(vol1_qualified) gmgr_vol1.get_vol_option = mock.Mock(return_value=None) self._layout.configuration.glusterfs_servers = [self.glusterfs_server1] self.mock_object( self.gmgr1, 'gluster_call', mock.Mock(return_value=('manila-share-1', ''))) _glustermanager_calls = (self.gmgr1, gmgr_vol1) self.mock_object(self._layout, '_glustermanager', mock.Mock(side_effect=_glustermanager_calls)) self.mock_object(self._layout, 'volume_pattern', re.compile(r'manila-share-\d+(-(?P\d+)G)?$')) expected_output = {'root@host1:/manila-share-1': {'size': None}} ret = self._layout._fetch_gluster_volumes() test_args = ('volume', 'list') self.gmgr1.gluster_call.assert_called_once_with(*test_args, log=mock.ANY) self.assertEqual(expected_output, ret) def test_fetch_gluster_volumes_error(self): test_args = ('volume', 'list') def raise_exception(*args, **kwargs): if args == test_args: raise exception.GlusterfsException() self._layout.configuration.glusterfs_servers = [self.glusterfs_server1] self.mock_object(self.gmgr1, 'gluster_call', mock.Mock(side_effect=raise_exception)) self.mock_object(self._layout, '_glustermanager', mock.Mock(return_value=self.gmgr1)) self.mock_object(layout_volume.LOG, 'error') self.assertRaises(exception.GlusterfsException, self._layout._fetch_gluster_volumes) self.gmgr1.gluster_call.assert_called_once_with(*test_args, log=mock.ANY) def test_do_setup(self): self._layout.configuration.glusterfs_servers = [self.glusterfs_server1] self.mock_object(self.gmgr1, 'get_gluster_version', mock.Mock(return_value=('3', '6'))) self.mock_object(self._layout, '_glustermanager', mock.Mock(return_value=self.gmgr1)) self.mock_object(self._layout, '_fetch_gluster_volumes', mock.Mock(return_value=self.glusterfs_volumes_dict)) self.mock_object(self._layout, '_check_mount_glusterfs') self._layout.gluster_used_vols = self.glusterfs_used_vols self.mock_object(layout_volume.LOG, 'warning') self._layout.do_setup(self._context) self._layout._fetch_gluster_volumes.assert_called_once_with( filter_used=False) self._layout._check_mount_glusterfs.assert_called_once_with() self.gmgr1.get_gluster_version.assert_called_once_with() def test_do_setup_unsupported_glusterfs_version(self): self._layout.configuration.glusterfs_servers = [self.glusterfs_server1] self.mock_object(self.gmgr1, 'get_gluster_version', mock.Mock(return_value=('3', '5'))) self.mock_object(self._layout, '_glustermanager', mock.Mock(return_value=self.gmgr1)) self.assertRaises(exception.GlusterfsException, self._layout.do_setup, self._context) self.gmgr1.get_gluster_version.assert_called_once_with() @ddt.data(exception.GlusterfsException, RuntimeError) def test_do_setup_get_gluster_version_fails(self, exc): def raise_exception(*args, **kwargs): raise exc self._layout.configuration.glusterfs_servers = [self.glusterfs_server1] self.mock_object(self.gmgr1, 'get_gluster_version', mock.Mock(side_effect=raise_exception)) self.mock_object(self._layout, '_glustermanager', mock.Mock(return_value=self.gmgr1)) self.assertRaises(exc, self._layout.do_setup, self._context) self.gmgr1.get_gluster_version.assert_called_once_with() def test_do_setup_glusterfs_no_volumes_provided_by_backend(self): self._layout.configuration.glusterfs_servers = [self.glusterfs_server1] self.mock_object(self.gmgr1, 'get_gluster_version', mock.Mock(return_value=('3', '6'))) self.mock_object(self._layout, '_glustermanager', mock.Mock(return_value=self.gmgr1)) self.mock_object(self._layout, '_fetch_gluster_volumes', mock.Mock(return_value={})) self.assertRaises(exception.GlusterfsException, self._layout.do_setup, self._context) self._layout._fetch_gluster_volumes.assert_called_once_with( filter_used=False) def test_share_manager(self): self.mock_object(self._layout, '_glustermanager', mock.Mock(return_value=self.gmgr1)) self.mock_object(self._layout.private_storage, 'get', mock.Mock(return_value='host1:/gv1')) ret = self._layout._share_manager(self.share1) self._layout.private_storage.get.assert_called_once_with( self.share1['id'], 'volume') self._layout._glustermanager.assert_called_once_with('host1:/gv1') self.assertEqual(self.gmgr1, ret) def test_share_manager_no_privdata(self): self.mock_object(self._layout.private_storage, 'get', mock.Mock(return_value=None)) ret = self._layout._share_manager(self.share1) self._layout.private_storage.get.assert_called_once_with( self.share1['id'], 'volume') self.assertIsNone(ret) def test_ensure_share(self): share = self.share1 gmgr1 = common.GlusterManager(self.glusterfs_target1, self._execute, None, None) gmgr1.set_vol_option = mock.Mock() self.mock_object(self._layout, '_share_manager', mock.Mock(return_value=gmgr1)) self._layout.ensure_share(self._context, share) self._layout._share_manager.assert_called_once_with(share) self.assertIn(self.glusterfs_target1, self._layout.gluster_used_vols) gmgr1.set_vol_option.assert_called_once_with( 'user.manila-share', share['id']) @ddt.data({"voldict": {"host:/share2G": {"size": 2}}, "used_vols": set(), "size": 1, "expected": "host:/share2G"}, {"voldict": {"host:/share2G": {"size": 2}}, "used_vols": set(), "size": 2, "expected": "host:/share2G"}, {"voldict": {"host:/share2G": {"size": 2}}, "used_vols": set(), "size": None, "expected": "host:/share2G"}, {"voldict": {"host:/share2G": {"size": 2}, "host:/share": {"size": None}}, "used_vols": set(["host:/share2G"]), "size": 1, "expected": "host:/share"}, {"voldict": {"host:/share2G": {"size": 2}, "host:/share": {"size": None}}, "used_vols": set(["host:/share2G"]), "size": 2, "expected": "host:/share"}, {"voldict": {"host:/share2G": {"size": 2}, "host:/share": {"size": None}}, "used_vols": set(["host:/share2G"]), "size": 3, "expected": "host:/share"}, {"voldict": {"host:/share2G": {"size": 2}, "host:/share": {"size": None}}, "used_vols": set(["host:/share2G"]), "size": None, "expected": "host:/share"}, {"voldict": {"host:/share": {}}, "used_vols": set(), "size": 1, "expected": "host:/share"}, {"voldict": {"host:/share": {}}, "used_vols": set(), "size": None, "expected": "host:/share"}) @ddt.unpack def test_pop_gluster_vol(self, voldict, used_vols, size, expected): gmgr = common.GlusterManager gmgr1 = gmgr(expected, self._execute, None, None) self._layout._fetch_gluster_volumes = mock.Mock(return_value=voldict) self._layout.gluster_used_vols = used_vols self._layout._glustermanager = mock.Mock(return_value=gmgr1) self._layout.volume_pattern_keys = list(voldict.values())[0].keys() result = self._layout._pop_gluster_vol(size=size) self.assertEqual(expected, result) self.assertIn(result, used_vols) self._layout._fetch_gluster_volumes.assert_called_once_with() self._layout._glustermanager.assert_called_once_with(result) @ddt.data({"voldict": {"share2G": {"size": 2}}, "used_vols": set(), "size": 3}, {"voldict": {"share2G": {"size": 2}}, "used_vols": set(["share2G"]), "size": None}) @ddt.unpack def test_pop_gluster_vol_excp(self, voldict, used_vols, size): self._layout._fetch_gluster_volumes = mock.Mock(return_value=voldict) self._layout.gluster_used_vols = used_vols self._layout.volume_pattern_keys = list(voldict.values())[0].keys() self.assertRaises(exception.GlusterfsException, self._layout._pop_gluster_vol, size=size) self._layout._fetch_gluster_volumes.assert_called_once_with() self.assertFalse( self.fake_driver._setup_via_manager.called) def test_push_gluster_vol(self): self._layout.gluster_used_vols = set([ self.glusterfs_target1, self.glusterfs_target2]) self._layout._push_gluster_vol(self.glusterfs_target2) self.assertEqual(1, len(self._layout.gluster_used_vols)) self.assertNotIn(self.glusterfs_target2, self._layout.gluster_used_vols) def test_push_gluster_vol_excp(self): self._layout.gluster_used_vols = set([self.glusterfs_target1]) self._layout.gluster_unused_vols_dict = {} self.assertRaises(exception.GlusterfsException, self._layout._push_gluster_vol, self.glusterfs_target2) @ddt.data({'vers_minor': '6', 'dirs_to_ignore': []}, {'vers_minor': '7', 'dirs_to_ignore': ['/tmp/tmpKGHKJ/.trashcan', '/tmp/tmpKGHKJ/.trashcan/internal_op']}) @ddt.unpack def test_wipe_gluster_vol(self, vers_minor, dirs_to_ignore): tmpdir = '/tmp/tmpKGHKJ' gmgr = common.GlusterManager gmgr1 = gmgr(self.glusterfs_target1, self._execute, None, None) self._layout.glusterfs_versions = { self.glusterfs_server1: ('3', vers_minor)} self.mock_object(tempfile, 'mkdtemp', mock.Mock(return_value=tmpdir)) self.mock_object(self.fake_driver, '_execute', mock.Mock()) self.mock_object(privsep_os, 'find') self.mock_object(common, '_mount_gluster_vol', mock.Mock()) self.mock_object(common, '_umount_gluster_vol', mock.Mock()) self.mock_object(shutil, 'rmtree', mock.Mock()) self._layout._wipe_gluster_vol(gmgr1) tempfile.mkdtemp.assert_called_once_with() common._mount_gluster_vol.assert_called_once_with( self.fake_driver._execute, gmgr1.export, tmpdir) privsep_os.find.assert_called_once_with( tmpdir, dirs_to_ignore=dirs_to_ignore, delete=True) common._umount_gluster_vol.assert_called_once_with( tmpdir) kwargs = {'ignore_errors': True} shutil.rmtree.assert_called_once_with(tmpdir, **kwargs) def test_wipe_gluster_vol_mount_fail(self): tmpdir = '/tmp/tmpKGHKJ' gmgr = common.GlusterManager gmgr1 = gmgr(self.glusterfs_target1, self._execute, None, None) self._layout.glusterfs_versions = { self.glusterfs_server1: ('3', '6')} self.mock_object(tempfile, 'mkdtemp', mock.Mock(return_value=tmpdir)) self.mock_object(self.fake_driver, '_execute', mock.Mock()) self.mock_object(common, '_mount_gluster_vol', mock.Mock(side_effect=exception.GlusterfsException)) self.mock_object(common, '_umount_gluster_vol', mock.Mock()) self.mock_object(shutil, 'rmtree', mock.Mock()) self.assertRaises(exception.GlusterfsException, self._layout._wipe_gluster_vol, gmgr1) tempfile.mkdtemp.assert_called_once_with() common._mount_gluster_vol.assert_called_once_with( self.fake_driver._execute, gmgr1.export, tmpdir) self.assertFalse(self.fake_driver._execute.called) self.assertFalse(common._umount_gluster_vol.called) kwargs = {'ignore_errors': True} shutil.rmtree.assert_called_once_with(tmpdir, **kwargs) def test_wipe_gluster_vol_error_wiping_gluster_vol(self): tmpdir = '/tmp/tmpKGHKJ' gmgr = common.GlusterManager gmgr1 = gmgr(self.glusterfs_target1, self._execute, None, None) self._layout.glusterfs_versions = { self.glusterfs_server1: ('3', '6')} self.mock_object(tempfile, 'mkdtemp', mock.Mock(return_value=tmpdir)) self.mock_object( privsep_os, 'find', mock.Mock(side_effect=exception.ProcessExecutionError)) self.mock_object(common, '_mount_gluster_vol', mock.Mock()) self.mock_object(common, '_umount_gluster_vol', mock.Mock()) self.mock_object(shutil, 'rmtree', mock.Mock()) self.assertRaises(exception.GlusterfsException, self._layout._wipe_gluster_vol, gmgr1) tempfile.mkdtemp.assert_called_once_with() common._mount_gluster_vol.assert_called_once_with( self.fake_driver._execute, gmgr1.export, tmpdir) common._umount_gluster_vol.assert_called_once_with(tmpdir) kwargs = {'ignore_errors': True} shutil.rmtree.assert_called_once_with(tmpdir, **kwargs) def test_create_share(self): self._layout._pop_gluster_vol = mock.Mock( return_value=self.glusterfs_target1) gmgr1 = common.GlusterManager(self.glusterfs_target1) gmgr1.set_vol_option = mock.Mock() self.mock_object(self._layout, '_glustermanager', mock.Mock(return_value=gmgr1)) self.mock_object(self.fake_driver, '_setup_via_manager', mock.Mock(return_value='host1:/gv1')) share = new_share() exp_locn = self._layout.create_share(self._context, share) self._layout._pop_gluster_vol.assert_called_once_with(share['size']) self.fake_driver._setup_via_manager.assert_called_once_with( {'manager': gmgr1, 'share': share}) self._layout.private_storage.update.assert_called_once_with( share['id'], {'volume': self.glusterfs_target1}) gmgr1.set_vol_option.assert_called_once_with( 'user.manila-share', share['id']) self.assertEqual('host1:/gv1', exp_locn) def test_create_share_error(self): self._layout._pop_gluster_vol = mock.Mock( side_effect=exception.GlusterfsException) share = new_share() self.assertRaises(exception.GlusterfsException, self._layout.create_share, self._context, share) self._layout._pop_gluster_vol.assert_called_once_with( share['size']) @ddt.data(None, '', 'Eeyore') def test_delete_share(self, clone_of): self._layout._push_gluster_vol = mock.Mock() self._layout._wipe_gluster_vol = mock.Mock() gmgr = common.GlusterManager gmgr1 = gmgr(self.glusterfs_target1, self._execute, None, None) gmgr1.set_vol_option = mock.Mock() gmgr1.get_vol_option = mock.Mock(return_value=clone_of) new_vol_addr = self.glusterfs_target1 self.mock_object(self._layout, '_glustermanager', mock.Mock(return_value=gmgr1)) self._layout.gluster_used_vols = set([self.glusterfs_target1]) self._layout.delete_share(self._context, self.share1) gmgr1.get_vol_option.assert_called_once_with( 'user.manila-cloned-from') self._layout._wipe_gluster_vol.assert_called_once_with(gmgr1) self.assertIn(new_vol_addr, self._layout.gluster_used_vols) self._layout._push_gluster_vol.assert_called_once_with( self.glusterfs_target1) self._layout.private_storage.delete.assert_called_once_with( self.share1['id']) gmgr1.set_vol_option.assert_has_calls([ mock.call('user.manila-share', 'NONE'), mock.call('nfs.disable', 'on') ]) def test_delete_share_clone(self): self._layout._push_gluster_vol = mock.Mock() self._layout._wipe_gluster_vol = mock.Mock() gmgr = common.GlusterManager gmgr1 = gmgr(self.glusterfs_target1, self._execute, None, None) gmgr1.gluster_call = mock.Mock() gmgr1.get_vol_option = mock.Mock(return_value=FAKE_UUID1) self.mock_object(self._layout, '_glustermanager', mock.Mock(return_value=gmgr1)) self._layout.gluster_used_vols = set([self.glusterfs_target1]) self._layout.delete_share(self._context, self.share1) gmgr1.get_vol_option.assert_called_once_with( 'user.manila-cloned-from') self.assertFalse(self._layout._wipe_gluster_vol.called) self._layout._push_gluster_vol.assert_called_once_with( self.glusterfs_target1) self._layout.private_storage.delete.assert_called_once_with( self.share1['id']) gmgr1.gluster_call.assert_called_once_with( 'volume', 'delete', 'gv1') def test_delete_share_error(self): self._layout._wipe_gluster_vol = mock.Mock() self._layout._wipe_gluster_vol.side_effect = ( exception.GlusterfsException) self._layout._push_gluster_vol = mock.Mock() gmgr = common.GlusterManager gmgr1 = gmgr(self.glusterfs_target1, self._execute, None, None) gmgr1.get_vol_option = mock.Mock(return_value=None) self.mock_object(self._layout, '_glustermanager', mock.Mock(return_value=gmgr1)) self._layout.gluster_used_vols = set([self.glusterfs_target1]) self.assertRaises(exception.GlusterfsException, self._layout.delete_share, self._context, self.share1) self._layout._wipe_gluster_vol.assert_called_once_with(gmgr1) self.assertFalse(self._layout._push_gluster_vol.called) def test_delete_share_missing_record(self): self.mock_object(self._layout, '_share_manager', mock.Mock(return_value=None)) self._layout.delete_share(self._context, self.share1) self._layout._share_manager.assert_called_once_with(self.share1) def test_create_snapshot(self): self._layout.gluster_nosnap_vols_dict = {} self._layout.glusterfs_versions = {self.glusterfs_server1: ('3', '6')} gmgr = common.GlusterManager gmgr1 = gmgr(self.glusterfs_target1, self._execute, None, None) self._layout.gluster_used_vols = set([self.glusterfs_target1]) self.mock_object(gmgr1, 'gluster_call', mock.Mock( side_effect=(glusterXMLOut(ret=0, errno=0),))) self.mock_object(self._layout, '_glustermanager', mock.Mock(return_value=gmgr1)) snapshot = { 'id': 'fake_snap_id', 'share_id': self.share1['id'], 'share': self.share1 } ret = self._layout.create_snapshot(self._context, snapshot) self.assertIsNone(ret) args = ('--xml', 'snapshot', 'create', 'manila-fake_snap_id', gmgr1.volume) gmgr1.gluster_call.assert_called_once_with(*args, log=mock.ANY) @ddt.data({'side_effect': (glusterXMLOut(ret=-1, errno=2),), '_exception': exception.GlusterfsException}, {'side_effect': (('', ''),), '_exception': exception.GlusterfsException}) @ddt.unpack def test_create_snapshot_error(self, side_effect, _exception): self._layout.gluster_nosnap_vols_dict = {} self._layout.glusterfs_versions = {self.glusterfs_server1: ('3', '6')} gmgr = common.GlusterManager gmgr1 = gmgr(self.glusterfs_target1, self._execute, None, None) self._layout.gluster_used_vols = set([self.glusterfs_target1]) self.mock_object(gmgr1, 'gluster_call', mock.Mock(side_effect=side_effect)) self.mock_object(self._layout, '_glustermanager', mock.Mock(return_value=gmgr1)) snapshot = { 'id': 'fake_snap_id', 'share_id': self.share1['id'], 'share': self.share1 } self.assertRaises(_exception, self._layout.create_snapshot, self._context, snapshot) args = ('--xml', 'snapshot', 'create', 'manila-fake_snap_id', gmgr1.volume) gmgr1.gluster_call.assert_called_once_with(*args, log=mock.ANY) @ddt.data({"vers_minor": '6', "exctype": exception.GlusterfsException}, {"vers_minor": '7', "exctype": exception.ShareSnapshotNotSupported}) @ddt.unpack def test_create_snapshot_no_snap(self, vers_minor, exctype): self._layout.gluster_nosnap_vols_dict = {} self._layout.glusterfs_versions = { self.glusterfs_server1: ('3', vers_minor)} gmgr = common.GlusterManager gmgr1 = gmgr(self.glusterfs_target1, self._execute, None, None) self._layout.gluster_used_vols = set([self.glusterfs_target1]) self.mock_object(gmgr1, 'gluster_call', mock.Mock( side_effect=(glusterXMLOut(ret=-1, errno=0),))) self.mock_object(self._layout, '_glustermanager', mock.Mock(return_value=gmgr1)) snapshot = { 'id': 'fake_snap_id', 'share_id': self.share1['id'], 'share': self.share1 } self.assertRaises(exctype, self._layout.create_snapshot, self._context, snapshot) args = ('--xml', 'snapshot', 'create', 'manila-fake_snap_id', gmgr1.volume) gmgr1.gluster_call.assert_called_once_with(*args, log=mock.ANY) @ddt.data({"vers_minor": '6', "exctype": exception.GlusterfsException}, {"vers_minor": '7', "exctype": exception.ShareSnapshotNotSupported}) @ddt.unpack def test_create_snapshot_no_snap_cached(self, vers_minor, exctype): self._layout.gluster_nosnap_vols_dict = { self.glusterfs_target1: 'fake error'} self._layout.glusterfs_versions = { self.glusterfs_server1: ('3', vers_minor)} self._layout.gluster_used_vols = set([self.glusterfs_target1]) gmgr = common.GlusterManager gmgr1 = gmgr(self.glusterfs_target1, self._execute, None, None) self.mock_object(self._layout, '_share_manager', mock.Mock(return_value=gmgr1)) snapshot = { 'id': 'fake_snap_id', 'share_id': self.share1['id'], 'share': self.share1 } self.assertRaises(exctype, self._layout.create_snapshot, self._context, snapshot) def test_find_actual_backend_snapshot_name(self): gmgr = common.GlusterManager gmgr1 = gmgr(self.share1['export_location'], self._execute, None, None) self.mock_object(gmgr1, 'gluster_call', mock.Mock(return_value=('fake_snap_id_xyz', ''))) snapshot = { 'id': 'fake_snap_id', 'share_id': self.share1['id'], 'share': self.share1 } ret = self._layout._find_actual_backend_snapshot_name(gmgr1, snapshot) args = ('snapshot', 'list', gmgr1.volume, '--mode=script') gmgr1.gluster_call.assert_called_once_with(*args, log=mock.ANY) self.assertEqual('fake_snap_id_xyz', ret) @ddt.data('this is too bad', 'fake_snap_id_xyx\nfake_snap_id_pqr') def test_find_actual_backend_snapshot_name_bad_snap_list(self, snaplist): gmgr = common.GlusterManager gmgr1 = gmgr(self.share1['export_location'], self._execute, None, None) self.mock_object(gmgr1, 'gluster_call', mock.Mock(return_value=(snaplist, ''))) snapshot = { 'id': 'fake_snap_id', 'share_id': self.share1['id'], 'share': self.share1 } self.assertRaises(exception.GlusterfsException, self._layout._find_actual_backend_snapshot_name, gmgr1, snapshot) args = ('snapshot', 'list', gmgr1.volume, '--mode=script') gmgr1.gluster_call.assert_called_once_with(*args, log=mock.ANY) @ddt.data({'glusterfs_target': 'root@host1:/gv1', 'glusterfs_server': 'root@host1'}, {'glusterfs_target': 'host1:/gv1', 'glusterfs_server': 'host1'}) @ddt.unpack def test_create_share_from_snapshot(self, glusterfs_target, glusterfs_server): share = new_share() snapshot = { 'id': 'fake_snap_id', 'share_instance': new_share(export_location=glusterfs_target), 'share_id': 'fake_share_id', } volume = ''.join(['manila-', share['id']]) new_vol_addr = ':/'.join([glusterfs_server, volume]) gmgr = common.GlusterManager old_gmgr = gmgr(glusterfs_target, self._execute, None, None) new_gmgr = gmgr(new_vol_addr, self._execute, None, None) self._layout.gluster_used_vols = set([glusterfs_target]) self._layout.glusterfs_versions = {glusterfs_server: ('3', '7')} self.mock_object(old_gmgr, 'gluster_call', mock.Mock(side_effect=[ ('', ''), ('', ''), ('', ''), ('', '')])) self.mock_object(new_gmgr, 'gluster_call', mock.Mock(side_effect=[('', ''), ('', '')])) self.mock_object(new_gmgr, 'get_vol_option', mock.Mock()) new_gmgr.get_vol_option.return_value = ( 'glusterfs-server-1,client') self.mock_object(self._layout, '_find_actual_backend_snapshot_name', mock.Mock(return_value='fake_snap_id_xyz')) self.mock_object(self._layout, '_share_manager', mock.Mock(return_value=old_gmgr)) self.mock_object(self._layout, '_glustermanager', mock.Mock(return_value=new_gmgr)) self.mock_object(self.fake_driver, '_setup_via_manager', mock.Mock(return_value='host1:/gv1')) ret = self._layout.create_share_from_snapshot( self._context, share, snapshot, None) (self._layout._find_actual_backend_snapshot_name. assert_called_once_with(old_gmgr, snapshot)) args = (('snapshot', 'activate', 'fake_snap_id_xyz', 'force', '--mode=script'), ('snapshot', 'clone', volume, 'fake_snap_id_xyz'), ('volume', 'start', volume)) old_gmgr.gluster_call.assert_has_calls( [mock.call(*a, log=mock.ANY) for a in args]) args = (('volume', 'set', volume, 'user.manila-share', share['id']), ('volume', 'set', volume, 'user.manila-cloned-from', snapshot['share_id'])) new_gmgr.gluster_call.assert_has_calls( [mock.call(*a, log=mock.ANY) for a in args], any_order=True) self._layout._share_manager.assert_called_once_with( snapshot['share_instance']) self._layout._glustermanager.assert_called_once_with( gmgr.parse(new_vol_addr)) self._layout.driver._setup_via_manager.assert_called_once_with( {'manager': new_gmgr, 'share': share}, {'manager': old_gmgr, 'share': snapshot['share_instance']}) self._layout.private_storage.update.assert_called_once_with( share['id'], {'volume': new_vol_addr}) self.assertIn( new_vol_addr, self._layout.gluster_used_vols) self.assertEqual(['host1:/gv1'], ret) def test_create_share_from_snapshot_error_unsupported_gluster_version( self): glusterfs_target = 'root@host1:/gv1' glusterfs_server = 'root@host1' share = new_share() volume = ''.join(['manila-', share['id']]) new_vol_addr = ':/'.join([glusterfs_server, volume]) gmgr = common.GlusterManager old_gmgr = gmgr(glusterfs_target, self._execute, None, None) new_gmgr = gmgr(new_vol_addr, self._execute, None, None) self._layout.gluster_used_vols_dict = {glusterfs_target: old_gmgr} self._layout.glusterfs_versions = {glusterfs_server: ('3', '6')} self.mock_object( old_gmgr, 'gluster_call', mock.Mock(side_effect=[('', ''), ('', '')])) self.mock_object(new_gmgr, 'get_vol_option', mock.Mock()) new_gmgr.get_vol_option.return_value = ( 'glusterfs-server-1,client') self.mock_object(self._layout, '_find_actual_backend_snapshot_name', mock.Mock(return_value='fake_snap_id_xyz')) self.mock_object(self._layout, '_share_manager', mock.Mock(return_value=old_gmgr)) self.mock_object(self._layout, '_glustermanager', mock.Mock(return_value=new_gmgr)) snapshot = { 'id': 'fake_snap_id', 'share_instance': new_share(export_location=glusterfs_target) } self.assertRaises(exception.GlusterfsException, self._layout.create_share_from_snapshot, self._context, share, snapshot) self.assertFalse( self._layout._find_actual_backend_snapshot_name.called) self.assertFalse(old_gmgr.gluster_call.called) self._layout._share_manager.assert_called_once_with( snapshot['share_instance']) self.assertFalse(self._layout._glustermanager.called) self.assertFalse(new_gmgr.get_vol_option.called) self.assertFalse(new_gmgr.gluster_call.called) self.assertNotIn(new_vol_addr, self._layout.glusterfs_versions.keys()) def test_delete_snapshot(self): self._layout.gluster_nosnap_vols_dict = {} gmgr = common.GlusterManager gmgr1 = gmgr(self.share1['export_location'], self._execute, None, None) self._layout.gluster_used_vols = set([self.glusterfs_target1]) self.mock_object(self._layout, '_find_actual_backend_snapshot_name', mock.Mock(return_value='fake_snap_id_xyz')) self.mock_object( gmgr1, 'gluster_call', mock.Mock(return_value=glusterXMLOut(ret=0, errno=0))) self.mock_object(self._layout, '_glustermanager', mock.Mock(return_value=gmgr1)) snapshot = { 'id': 'fake_snap_id', 'share_id': self.share1['id'], 'share': self.share1 } ret = self._layout.delete_snapshot(self._context, snapshot) self.assertIsNone(ret) args = ('--xml', 'snapshot', 'delete', 'fake_snap_id_xyz', '--mode=script') gmgr1.gluster_call.assert_called_once_with(*args, log=mock.ANY) (self._layout._find_actual_backend_snapshot_name. assert_called_once_with(gmgr1, snapshot)) @ddt.data({'side_effect': (glusterXMLOut(ret=-1, errno=0),), '_exception': exception.GlusterfsException}, {'side_effect': (('', ''),), '_exception': exception.GlusterfsException}) @ddt.unpack def test_delete_snapshot_error(self, side_effect, _exception): self._layout.gluster_nosnap_vols_dict = {} gmgr = common.GlusterManager gmgr1 = gmgr(self.share1['export_location'], self._execute, None, None) self._layout.gluster_used_vols = set([self.glusterfs_target1]) self.mock_object(self._layout, '_find_actual_backend_snapshot_name', mock.Mock(return_value='fake_snap_id_xyz')) args = ('--xml', 'snapshot', 'delete', 'fake_snap_id_xyz', '--mode=script') self.mock_object( gmgr1, 'gluster_call', mock.Mock(side_effect=side_effect)) self.mock_object(self._layout, '_glustermanager', mock.Mock(return_value=gmgr1)) snapshot = { 'id': 'fake_snap_id', 'share_id': self.share1['id'], 'share': self.share1 } self.assertRaises(_exception, self._layout.delete_snapshot, self._context, snapshot) gmgr1.gluster_call.assert_called_once_with(*args, log=mock.ANY) (self._layout._find_actual_backend_snapshot_name. assert_called_once_with(gmgr1, snapshot)) @ddt.data( ('manage_existing', ('share', 'driver_options'), {}), ('unmanage', ('share',), {}), ('extend_share', ('share', 'new_size'), {'share_server': None}), ('shrink_share', ('share', 'new_size'), {'share_server': None})) def test_nonimplemented_methods(self, method_invocation): method, args, kwargs = method_invocation self.assertRaises(NotImplementedError, getattr(self._layout, method), *args, **kwargs) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315602.0176704 manila-21.0.0/manila/tests/share/drivers/hdfs/0000775000175000017500000000000000000000000021165 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/hdfs/__init__.py0000664000175000017500000000000000000000000023264 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/hdfs/test_hdfs_native.py0000664000175000017500000005463400000000000025104 0ustar00zuulzuul00000000000000# Copyright 2015 Intel, Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Unit tests for HDFS native protocol driver module.""" import socket from unittest import mock from oslo_concurrency import processutils from oslo_config import cfg from manila import context from manila import exception import manila.share.configuration as config import manila.share.drivers.hdfs.hdfs_native as hdfs_native from manila import ssh_utils from manila import test from manila.tests import fake_share from manila import utils CONF = cfg.CONF class HDFSNativeShareDriverTestCase(test.TestCase): """Tests HDFSNativeShareDriver.""" def setUp(self): super(HDFSNativeShareDriverTestCase, self).setUp() self._context = context.get_admin_context() self._hdfs_execute = mock.Mock(return_value=('', '')) self.local_ip = '192.168.1.1' CONF.set_default('driver_handles_share_servers', False) CONF.set_default('hdfs_namenode_ip', self.local_ip) CONF.set_default('hdfs_ssh_name', 'fake_sshname') CONF.set_default('hdfs_ssh_pw', 'fake_sshpw') CONF.set_default('hdfs_ssh_private_key', 'fake_sshkey') self.fake_conf = config.Configuration(None) self._driver = hdfs_native.HDFSNativeShareDriver( execute=self._hdfs_execute, configuration=self.fake_conf) self.hdfs_bin = 'hdfs' self._driver._hdfs_bin = 'fake_hdfs_bin' self.share = fake_share.fake_share(share_proto='HDFS') self.snapshot = fake_share.fake_snapshot(share_proto='HDFS') self.access = fake_share.fake_access(access_type='user') self.fakesharepath = 'hdfs://1.2.3.4:5/share-0' self.fakesnapshotpath = '/share-0/.snapshot/snapshot-0' socket.gethostname = mock.Mock(return_value='testserver') socket.gethostbyname_ex = mock.Mock(return_value=( 'localhost', ['localhost.localdomain', 'testserver'], ['127.0.0.1', self.local_ip])) def test_do_setup(self): self._driver.do_setup(self._context) self.assertEqual(self._driver._hdfs_bin, self.hdfs_bin) def test_create_share(self): self._driver._create_share = mock.Mock() self._driver._get_share_path = mock.Mock( return_value=self.fakesharepath) result = self._driver.create_share(self._context, self.share, share_server=None) self._driver._create_share.assert_called_once_with(self.share) self._driver._get_share_path.assert_called_once_with(self.share) self.assertEqual(self.fakesharepath, result) def test_create_share_unsupported_proto(self): self._driver._get_share_path = mock.Mock() self.assertRaises(exception.HDFSException, self._driver.create_share, self._context, fake_share.fake_share(), share_server=None) self.assertFalse(self._driver._get_share_path.called) def test__set_share_size(self): share_dir = '/' + self.share['name'] sizestr = str(self.share['size']) + 'g' self._driver._hdfs_execute = mock.Mock(return_value=True) self._driver._set_share_size(self.share) self._driver._hdfs_execute.assert_called_once_with( 'fake_hdfs_bin', 'dfsadmin', '-setSpaceQuota', sizestr, share_dir) def test__set_share_size_exception(self): share_dir = '/' + self.share['name'] sizestr = str(self.share['size']) + 'g' self._driver._hdfs_execute = mock.Mock( side_effect=exception.ProcessExecutionError) self.assertRaises(exception.HDFSException, self._driver._set_share_size, self.share) self._driver._hdfs_execute.assert_called_once_with( 'fake_hdfs_bin', 'dfsadmin', '-setSpaceQuota', sizestr, share_dir) def test__set_share_size_with_new_size(self): share_dir = '/' + self.share['name'] new_size = 'fake_size' sizestr = new_size + 'g' self._driver._hdfs_execute = mock.Mock(return_value=True) self._driver._set_share_size(self.share, new_size) self._driver._hdfs_execute.assert_called_once_with( 'fake_hdfs_bin', 'dfsadmin', '-setSpaceQuota', sizestr, share_dir) def test__create_share(self): share_dir = '/' + self.share['name'] self._driver._hdfs_execute = mock.Mock(return_value=True) self._driver._set_share_size = mock.Mock() self._driver._create_share(self.share) self._driver._hdfs_execute.assert_any_call( 'fake_hdfs_bin', 'dfs', '-mkdir', share_dir) self._driver._set_share_size.assert_called_once_with(self.share) self._driver._hdfs_execute.assert_any_call( 'fake_hdfs_bin', 'dfsadmin', '-allowSnapshot', share_dir) def test__create_share_exception(self): share_dir = '/' + self.share['name'] self._driver._hdfs_execute = mock.Mock( side_effect=exception.ProcessExecutionError) self.assertRaises(exception.HDFSException, self._driver._create_share, self.share) self._driver._hdfs_execute.assert_called_once_with( 'fake_hdfs_bin', 'dfs', '-mkdir', share_dir) def test_create_share_from_empty_snapshot(self): return_hdfs_execute = (None, None) self._driver._hdfs_execute = mock.Mock( return_value=return_hdfs_execute) self._driver._create_share = mock.Mock(return_value=True) self._driver._get_share_path = mock.Mock(return_value=self. fakesharepath) self._driver._get_snapshot_path = mock.Mock(return_value=self. fakesnapshotpath) result = self._driver.create_share_from_snapshot(self._context, self.share, self.snapshot, share_server=None) self._driver._create_share.assert_called_once_with(self.share) self._driver._get_snapshot_path.assert_called_once_with( self.snapshot) self._driver._hdfs_execute.assert_called_once_with( 'fake_hdfs_bin', 'dfs', '-ls', self.fakesnapshotpath) self._driver._get_share_path.assert_called_once_with(self.share) self.assertEqual(self.fakesharepath, result) def test_create_share_from_snapshot(self): return_hdfs_execute = ("fake_content", None) self._driver._hdfs_execute = mock.Mock( return_value=return_hdfs_execute) self._driver._create_share = mock.Mock(return_value=True) self._driver._get_share_path = mock.Mock(return_value=self. fakesharepath) self._driver._get_snapshot_path = mock.Mock(return_value=self. fakesnapshotpath) result = self._driver.create_share_from_snapshot(self._context, self.share, self.snapshot, share_server=None) self._driver._create_share.assert_called_once_with(self.share) self._driver._get_snapshot_path.assert_called_once_with( self.snapshot) calls = [mock.call('fake_hdfs_bin', 'dfs', '-ls', self.fakesnapshotpath), mock.call('fake_hdfs_bin', 'dfs', '-cp', self.fakesnapshotpath + '/*', '/' + self.share['name'])] self._driver._hdfs_execute.assert_has_calls(calls) self._driver._get_share_path.assert_called_once_with(self.share) self.assertEqual(self.fakesharepath, result) def test_create_share_from_snapshot_exception(self): self._driver._create_share = mock.Mock(return_value=True) self._driver._get_snapshot_path = mock.Mock(return_value=self. fakesnapshotpath) self._driver._get_share_path = mock.Mock(return_value=self. fakesharepath) self._driver._hdfs_execute = mock.Mock( side_effect=exception.ProcessExecutionError) self.assertRaises(exception.HDFSException, self._driver.create_share_from_snapshot, self._context, self.share, self.snapshot, share_server=None) self._driver._create_share.assert_called_once_with(self.share) self._driver._get_snapshot_path.assert_called_once_with(self.snapshot) self._driver._hdfs_execute.assert_called_once_with( 'fake_hdfs_bin', 'dfs', '-ls', self.fakesnapshotpath) self.assertFalse(self._driver._get_share_path.called) def test_create_snapshot(self): self._driver._hdfs_execute = mock.Mock(return_value=True) self._driver.create_snapshot(self._context, self.snapshot, share_server=None) self._driver._hdfs_execute.assert_called_once_with( 'fake_hdfs_bin', 'dfs', '-createSnapshot', '/' + self.snapshot['share_name'], self.snapshot['name']) def test_create_snapshot_exception(self): self._driver._hdfs_execute = mock.Mock( side_effect=exception.ProcessExecutionError) self.assertRaises(exception.HDFSException, self._driver.create_snapshot, self._context, self.snapshot, share_server=None) self._driver._hdfs_execute.assert_called_once_with( 'fake_hdfs_bin', 'dfs', '-createSnapshot', '/' + self.snapshot['share_name'], self.snapshot['name']) def test_delete_share(self): self._driver._hdfs_execute = mock.Mock(return_value=True) self._driver.delete_share(self._context, self.share, share_server=None) self._driver._hdfs_execute.assert_called_once_with( 'fake_hdfs_bin', 'dfs', '-rm', '-r', '/' + self.share['name']) def test_delete_share_exception(self): self._driver._hdfs_execute = mock.Mock( side_effect=exception.ProcessExecutionError) self.assertRaises(exception.HDFSException, self._driver.delete_share, self._context, self.share, share_server=None) self._driver._hdfs_execute.assert_called_once_with( 'fake_hdfs_bin', 'dfs', '-rm', '-r', '/' + self.share['name']) def test_delete_snapshot(self): self._driver._hdfs_execute = mock.Mock(return_value=True) self._driver.delete_snapshot(self._context, self.snapshot, share_server=None) self._driver._hdfs_execute.assert_called_once_with( 'fake_hdfs_bin', 'dfs', '-deleteSnapshot', '/' + self.snapshot['share_name'], self.snapshot['name']) def test_delete_snapshot_exception(self): self._driver._hdfs_execute = mock.Mock( side_effect=exception.ProcessExecutionError) self.assertRaises(exception.HDFSException, self._driver.delete_snapshot, self._context, self.snapshot, share_server=None) self._driver._hdfs_execute.assert_called_once_with( 'fake_hdfs_bin', 'dfs', '-deleteSnapshot', '/' + self.snapshot['share_name'], self.snapshot['name']) def test_allow_access(self): self._driver._hdfs_execute = mock.Mock( return_value=['', '']) share_dir = '/' + self.share['name'] user_access = ':'.join([self.access['access_type'], self.access['access_to'], 'rwx']) cmd = ['fake_hdfs_bin', 'dfs', '-setfacl', '-m', '-R', user_access, share_dir] self._driver.allow_access(self._context, self.share, self.access, share_server=None) self._driver._hdfs_execute.assert_called_once_with( *cmd, check_exit_code=True) def test_allow_access_invalid_access_type(self): self.assertRaises(exception.InvalidShareAccess, self._driver.allow_access, self._context, self.share, fake_share.fake_access( access_type='invalid_access_type'), share_server=None) def test_allow_access_invalid_access_level(self): self.assertRaises(exception.InvalidShareAccess, self._driver.allow_access, self._context, self.share, fake_share.fake_access( access_level='invalid_access_level'), share_server=None) def test_allow_access_exception(self): self._driver._hdfs_execute = mock.Mock( side_effect=exception.ProcessExecutionError) share_dir = '/' + self.share['name'] user_access = ':'.join([self.access['access_type'], self.access['access_to'], 'rwx']) cmd = ['fake_hdfs_bin', 'dfs', '-setfacl', '-m', '-R', user_access, share_dir] self.assertRaises(exception.HDFSException, self._driver.allow_access, self._context, self.share, self.access, share_server=None) self._driver._hdfs_execute.assert_called_once_with( *cmd, check_exit_code=True) def test_deny_access(self): self._driver._hdfs_execute = mock.Mock(return_value=['', '']) share_dir = '/' + self.share['name'] access_name = ':'.join([self.access['access_type'], self.access['access_to']]) cmd = ['fake_hdfs_bin', 'dfs', '-setfacl', '-x', '-R', access_name, share_dir] self._driver.deny_access(self._context, self.share, self.access, share_server=None) self._driver._hdfs_execute.assert_called_once_with( *cmd, check_exit_code=True) def test_deny_access_exception(self): self._driver._hdfs_execute = mock.Mock( side_effect=exception.ProcessExecutionError) share_dir = '/' + self.share['name'] access_name = ':'.join([self.access['access_type'], self.access['access_to']]) cmd = ['fake_hdfs_bin', 'dfs', '-setfacl', '-x', '-R', access_name, share_dir] self.assertRaises(exception.HDFSException, self._driver.deny_access, self._context, self.share, self.access, share_server=None) self._driver._hdfs_execute.assert_called_once_with( *cmd, check_exit_code=True) def test_extend_share(self): new_size = "fake_size" self._driver._set_share_size = mock.Mock() self._driver.extend_share(self.share, new_size) self._driver._set_share_size.assert_called_once_with( self.share, new_size) def test__check_hdfs_state_healthy(self): fake_out = "fakeinfo\n...Status: HEALTHY" self._driver._hdfs_execute = mock.Mock(return_value=(fake_out, '')) result = self._driver._check_hdfs_state() self._driver._hdfs_execute.assert_called_once_with( 'fake_hdfs_bin', 'fsck', '/') self.assertTrue(result) def test__check_hdfs_state_down(self): fake_out = "fakeinfo\n...Status: DOWN" self._driver._hdfs_execute = mock.Mock(return_value=(fake_out, '')) result = self._driver._check_hdfs_state() self._driver._hdfs_execute.assert_called_once_with( 'fake_hdfs_bin', 'fsck', '/') self.assertFalse(result) def test__check_hdfs_state_exception(self): self._driver._hdfs_execute = mock.Mock( side_effect=exception.ProcessExecutionError) self.assertRaises(exception.HDFSException, self._driver._check_hdfs_state) self._driver._hdfs_execute.assert_called_once_with( 'fake_hdfs_bin', 'fsck', '/') def test__get_available_capacity(self): fake_out = ('Configured Capacity: 2.4\n' + 'Total Capacity: 2\n' + 'DFS free: 1') self._driver._hdfs_execute = mock.Mock(return_value=(fake_out, '')) total, free = self._driver._get_available_capacity() self._driver._hdfs_execute.assert_called_once_with( 'fake_hdfs_bin', 'dfsadmin', '-report') self.assertEqual(2, total) self.assertEqual(1, free) def test__get_available_capacity_exception(self): self._driver._hdfs_execute = mock.Mock( side_effect=exception.ProcessExecutionError) self.assertRaises(exception.HDFSException, self._driver._get_available_capacity) self._driver._hdfs_execute.assert_called_once_with( 'fake_hdfs_bin', 'dfsadmin', '-report') def test_get_share_stats_refresh_false(self): self._driver._stats = {'fake_key': 'fake_value'} result = self._driver.get_share_stats(False) self.assertEqual(self._driver._stats, result) def test_get_share_stats_refresh_true(self): self._driver._get_available_capacity = mock.Mock( return_value=(11111.0, 12345.0)) result = self._driver.get_share_stats(True) expected_keys = [ 'qos', 'driver_version', 'share_backend_name', 'free_capacity_gb', 'total_capacity_gb', 'driver_handles_share_servers', 'reserved_percentage', 'vendor_name', 'storage_protocol', 'ipv4_support', 'ipv6_support' ] for key in expected_keys: self.assertIn(key, result) self.assertTrue(result['ipv4_support']) self.assertFalse(result['ipv6_support']) self.assertEqual('HDFS', result['storage_protocol']) self._driver._get_available_capacity.assert_called_once_with() def test__hdfs_local_execute(self): cmd = 'testcmd' self.mock_object(utils, 'execute', mock.Mock(return_value=True)) self._driver._hdfs_local_execute(cmd) utils.execute.assert_called_once_with(cmd, run_as_root=False) def test__hdfs_remote_execute(self): self._driver._run_ssh = mock.Mock(return_value=True) cmd = 'testcmd' self._driver._hdfs_remote_execute(cmd, check_exit_code=True) self._driver._run_ssh.assert_called_once_with( self.local_ip, tuple([cmd]), True) def test__run_ssh(self): ssh_output = 'fake_ssh_output' cmd_list = ['fake', 'cmd'] ssh = mock.Mock() ssh.get_transport = mock.Mock() ssh.get_transport().is_active = mock.Mock(return_value=True) ssh_pool = mock.Mock() ssh_pool.create = mock.Mock(return_value=ssh) self.mock_object(ssh_utils, 'SSHPool', mock.Mock(return_value=ssh_pool)) self.mock_object(processutils, 'ssh_execute', mock.Mock(return_value=ssh_output)) result = self._driver._run_ssh(self.local_ip, cmd_list) ssh_utils.SSHPool.assert_called_once_with( self._driver.configuration.hdfs_namenode_ip, self._driver.configuration.hdfs_ssh_port, self._driver.configuration.ssh_conn_timeout, self._driver.configuration.hdfs_ssh_name, password=self._driver.configuration.hdfs_ssh_pw, privatekey=self._driver.configuration.hdfs_ssh_private_key, min_size=self._driver.configuration.ssh_min_pool_conn, max_size=self._driver.configuration.ssh_max_pool_conn) ssh_pool.create.assert_called_once_with() ssh.get_transport().is_active.assert_called_once_with() processutils.ssh_execute.assert_called_once_with( ssh, 'fake cmd', check_exit_code=False) self.assertEqual(ssh_output, result) def test__run_ssh_exception(self): cmd_list = ['fake', 'cmd'] ssh = mock.Mock() ssh.get_transport = mock.Mock() ssh.get_transport().is_active = mock.Mock(return_value=True) ssh_pool = mock.Mock() ssh_pool.create = mock.Mock(return_value=ssh) self.mock_object(ssh_utils, 'SSHPool', mock.Mock(return_value=ssh_pool)) self.mock_object(processutils, 'ssh_execute', mock.Mock(side_effect=Exception)) self.assertRaises(exception.HDFSException, self._driver._run_ssh, self.local_ip, cmd_list) ssh_utils.SSHPool.assert_called_once_with( self._driver.configuration.hdfs_namenode_ip, self._driver.configuration.hdfs_ssh_port, self._driver.configuration.ssh_conn_timeout, self._driver.configuration.hdfs_ssh_name, password=self._driver.configuration.hdfs_ssh_pw, privatekey=self._driver.configuration.hdfs_ssh_private_key, min_size=self._driver.configuration.ssh_min_pool_conn, max_size=self._driver.configuration.ssh_max_pool_conn) ssh_pool.create.assert_called_once_with() ssh.get_transport().is_active.assert_called_once_with() processutils.ssh_execute.assert_called_once_with( ssh, 'fake cmd', check_exit_code=False) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315602.0176704 manila-21.0.0/manila/tests/share/drivers/hitachi/0000775000175000017500000000000000000000000021652 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/hitachi/__init__.py0000664000175000017500000000000000000000000023751 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315602.0176704 manila-21.0.0/manila/tests/share/drivers/hitachi/hnas/0000775000175000017500000000000000000000000022603 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/hitachi/hnas/__init__.py0000664000175000017500000000000000000000000024702 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/hitachi/hnas/test_driver.py0000664000175000017500000015171300000000000025517 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Hitachi Data Systems, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import ddt from oslo_config import cfg from manila import exception import manila.share.configuration import manila.share.driver from manila.share.drivers.hitachi.hnas import driver from manila.share.drivers.hitachi.hnas import ssh from manila import test CONF = cfg.CONF share_nfs = { 'id': 'aa4a7710-f326-41fb-ad18-b4ad587fc87a', 'name': 'aa4a7710-f326-41fb-ad18-b4ad587fc87a', 'size': 50, 'host': 'hnas', 'share_proto': 'NFS', 'share_type_id': 1, 'share_network_id': 'bb329e24-3bdb-491d-acfd-dfe70c09b98d', 'share_server_id': 'cc345a53-491d-acfd-3bdb-dfe70c09b98d', 'export_locations': [{'path': '172.24.44.10:/shares/' 'aa4a7710-f326-41fb-ad18-b4ad587fc87a'}], } share_cifs = { 'id': 'f5cadaf2-afbe-4cc4-9021-85491b6b76f7', 'name': 'f5cadaf2-afbe-4cc4-9021-85491b6b76f7', 'size': 50, 'host': 'hnas', 'share_proto': 'CIFS', 'share_type_id': 1, 'share_network_id': 'bb329e24-3bdb-491d-acfd-dfe70c09b98d', 'share_server_id': 'cc345a53-491d-acfd-3bdb-dfe70c09b98d', 'export_locations': [{'path': '\\\\172.24.44.10\\' 'f5cadaf2-afbe-4cc4-9021-85491b6b76f7'}], } share_invalid_host = { 'id': 'aa4a7710-f326-41fb-ad18-b4ad587fc87a', 'name': 'aa4a7710-f326-41fb-ad18-b4ad587fc87a', 'size': 50, 'host': 'invalid', 'share_proto': 'NFS', 'share_type_id': 1, 'share_network_id': 'bb329e24-3bdb-491d-acfd-dfe70c09b98d', 'share_server_id': 'cc345a53-491d-acfd-3bdb-dfe70c09b98d', 'export_locations': [{'path': '172.24.44.10:/shares/' 'aa4a7710-f326-41fb-ad18-b4ad587fc87a'}], } share_mount_support_nfs = { 'id': '62125744-fcdd-4f55-a8c1-d1498102f634', 'name': '62125744-fcdd-4f55-a8c1-d1498102f634', 'size': 50, 'host': 'hnas', 'share_proto': 'NFS', 'share_type_id': 1, 'share_network_id': 'bb329e24-3bdb-491d-acfd-dfe70c09b98d', 'share_server_id': 'cc345a53-491d-acfd-3bdb-dfe70c09b98d', 'export_locations': [{'path': '172.24.44.10:/shares/' '62125744-fcdd-4f55-a8c1-d1498102f634'}], 'mount_snapshot_support': True, } share_mount_support_cifs = { 'id': 'd6e7dc6b-f65f-49d9-968d-936f75474f29', 'name': 'd6e7dc6b-f65f-49d9-968d-936f75474f29', 'size': 50, 'host': 'hnas', 'share_proto': 'CIFS', 'share_type_id': 1, 'share_network_id': 'bb329e24-3bdb-491d-acfd-dfe70c09b98d', 'share_server_id': 'cc345a53-491d-acfd-3bdb-dfe70c09b98d', 'export_locations': [{'path': '172.24.44.10:/shares/' 'd6e7dc6b-f65f-49d9-968d-936f75474f29'}], 'mount_snapshot_support': True, } access_nfs_rw = { 'id': 'acdc7172b-fe07-46c4-b78f-df3e0324ccd0', 'access_type': 'ip', 'access_to': '172.24.44.200', 'access_level': 'rw', 'state': 'active', } access_cifs_rw = { 'id': '43167594-40e9-b899-1f4f-b9c2176b7564', 'access_type': 'user', 'access_to': 'fake_user', 'access_level': 'rw', 'state': 'active', } access_cifs_ro = { 'id': '32407088-1f4f-40e9-b899-b9a4176b574d', 'access_type': 'user', 'access_to': 'fake_user', 'access_level': 'ro', 'state': 'active', } snapshot_nfs = { 'id': 'abba6d9b-f29c-4bf7-aac1-618cda7aaf0f', 'share_id': 'aa4a7710-f326-41fb-ad18-b4ad587fc87a', 'share': share_nfs, 'provider_location': '/snapshots/aa4a7710-f326-41fb-ad18-b4ad587fc87a/' 'abba6d9b-f29c-4bf7-aac1-618cda7aaf0f', 'size': 2, } snapshot_cifs = { 'id': '91bc6e1b-1ba5-f29c-abc1-da7618cabf0a', 'share_id': 'f5cadaf2-afbe-4cc4-9021-85491b6b76f7', 'share': share_cifs, 'provider_location': '/snapshots/f5cadaf2-afbe-4cc4-9021-85491b6b76f7/' '91bc6e1b-1ba5-f29c-abc1-da7618cabf0a', 'size': 2, } manage_snapshot = { 'id': 'bc168eb-fa71-beef-153a-3d451aa1351f', 'share_id': 'aa4a7710-f326-41fb-ad18-b4ad587fc87a', 'share': share_nfs, 'provider_location': '/snapshots/aa4a7710-f326-41fb-ad18-b4ad587fc87a' '/snapshot18-05-2106', } snapshot_mount_support_nfs = { 'id': '3377b015-a695-4a5a-8aa5-9b931b023380', 'share_id': '62125744-fcdd-4f55-a8c1-d1498102f634', 'share': share_mount_support_nfs, 'provider_location': '/snapshots/62125744-fcdd-4f55-a8c1-d1498102f634' '/3377b015-a695-4a5a-8aa5-9b931b023380', } snapshot_mount_support_cifs = { 'id': 'f9916515-5cb8-4612-afa6-7f2baa74223a', 'share_id': 'd6e7dc6b-f65f-49d9-968d-936f75474f29', 'share': share_mount_support_cifs, 'provider_location': '/snapshots/d6e7dc6b-f65f-49d9-968d-936f75474f29' '/f9916515-5cb8-4612-afa6-7f2baa74223a', } invalid_share = { 'id': 'aa4a7710-f326-41fb-ad18-b4ad587fc87a', 'name': 'aa4a7710-f326-41fb-ad18-b4ad587fc87a', 'size': 100, 'host': 'hnas', 'share_proto': 'HDFS', } invalid_snapshot = { 'id': '24dcdcb5-a582-4bcc-b462-641da143afee', 'share_id': 'aa4a7710-f326-41fb-ad18-b4ad587fc87a', 'share': invalid_share, } invalid_access_type = { 'id': 'acdc7172b-fe07-46c4-b78f-df3e0324ccd0', 'access_type': 'cert', 'access_to': 'manila_user', 'access_level': 'rw', 'state': 'active', } invalid_access_level = { 'id': 'acdc7172b-fe07-46c4-b78f-df3e0324ccd0', 'access_type': 'ip', 'access_to': 'manila_user', 'access_level': '777', 'state': 'active', } invalid_protocol_msg = ("Share backend error: Only NFS or CIFS protocol are " "currently supported. Share provided %(id)s with " "protocol %(proto)s." % {'id': invalid_share['id'], 'proto': invalid_share['share_proto']}) @ddt.ddt class HitachiHNASTestCase(test.TestCase): def setUp(self): super(HitachiHNASTestCase, self).setUp() CONF.set_default('driver_handles_share_servers', False) CONF.hitachi_hnas_evs_id = '2' CONF.hitachi_hnas_evs_ip = '172.24.44.10' CONF.hitachi_hnas_admin_network_ip = '10.20.30.40' CONF.hitachi_hnas_ip = '172.24.44.1' CONF.hitachi_hnas_ip_port = 'hitachi_hnas_ip_port' CONF.hitachi_hnas_user = 'hitachi_hnas_user' CONF.hitachi_hnas_password = 'hitachi_hnas_password' CONF.hitachi_hnas_file_system_name = 'file_system' CONF.hitachi_hnas_ssh_private_key = 'private_key' CONF.hitachi_hnas_cluster_admin_ip0 = None CONF.hitachi_hnas_stalled_job_timeout = 10 CONF.hitachi_hnas_driver_helper = ('manila.share.drivers.hitachi.hnas.' 'ssh.HNASSSHBackend') self.fake_conf = manila.share.configuration.Configuration(None) self.fake_private_storage = mock.Mock() self.mock_object(self.fake_private_storage, 'get', mock.Mock(return_value=None)) self.mock_object(self.fake_private_storage, 'delete', mock.Mock(return_value=None)) self._driver = driver.HitachiHNASDriver( private_storage=self.fake_private_storage, configuration=self.fake_conf) self._driver.backend_name = "hnas" self.mock_log = self.mock_object(driver, 'LOG') # mocking common backend calls self.mock_object(ssh.HNASSSHBackend, "check_fs_mounted", mock.Mock( return_value=True)) self.mock_object(ssh.HNASSSHBackend, "check_vvol") self.mock_object(ssh.HNASSSHBackend, "check_quota") self.mock_object(ssh.HNASSSHBackend, "check_cifs") self.mock_object(ssh.HNASSSHBackend, "check_export") self.mock_object(ssh.HNASSSHBackend, 'check_directory') @ddt.data('hitachi_hnas_driver_helper', 'hitachi_hnas_evs_id', 'hitachi_hnas_evs_ip', 'hitachi_hnas_ip', 'hitachi_hnas_user') def test_init_invalid_conf_parameters(self, attr_name): self.mock_object(manila.share.driver.ShareDriver, '__init__') setattr(CONF, attr_name, None) self.assertRaises(exception.InvalidParameterValue, self._driver.__init__) def test_init_invalid_credentials(self): self.mock_object(manila.share.driver.ShareDriver, '__init__') CONF.hitachi_hnas_password = None CONF.hitachi_hnas_ssh_private_key = None self.assertRaises(exception.InvalidParameterValue, self._driver.__init__) @ddt.data(True, False) def test_update_access_nfs(self, empty_rules): if not empty_rules: access1 = { 'access_type': 'ip', 'access_to': '172.24.10.10', 'access_level': 'rw' } access2 = { 'access_type': 'ip', 'access_to': '188.100.20.10', 'access_level': 'ro' } access_list = [access1, access2] access_list_updated = ( [access1['access_to'] + '(' + access1['access_level'] + ',norootsquash)', access2['access_to'] + '(' + access2['access_level'] + ')', ]) else: access_list = [] access_list_updated = [] self.mock_object(ssh.HNASSSHBackend, "update_nfs_access_rule", mock.Mock()) self._driver.update_access('context', share_nfs, access_list, [], [], []) ssh.HNASSSHBackend.update_nfs_access_rule.assert_called_once_with( access_list_updated, share_id=share_nfs['id']) self.assertTrue(self.mock_log.debug.called) def test_update_access_ip_exception(self): access1 = { 'access_type': 'ip', 'access_to': '188.100.20.10', 'access_level': 'ro' } access2 = { 'access_type': 'something', 'access_to': '172.24.10.10', 'access_level': 'rw' } access_list = [access1, access2] self.assertRaises(exception.InvalidShareAccess, self._driver.update_access, 'context', share_nfs, access_list, [], [], []) def test_update_access_not_found_exception(self): access1 = { 'access_type': 'ip', 'access_to': '188.100.20.10', 'access_level': 'ro' } access2 = { 'access_type': 'something', 'access_to': '172.24.10.10', 'access_level': 'rw' } access_list = [access1, access2] self.mock_object(self._driver, '_ensure_share', mock.Mock( side_effect=exception.HNASItemNotFoundException(msg='fake'))) self.assertRaises(exception.ShareResourceNotFound, self._driver.update_access, 'context', share_nfs, access_list, add_rules=[], delete_rules=[], update_rules=[]) @ddt.data([access_cifs_rw, 'acr'], [access_cifs_ro, 'ar']) @ddt.unpack def test_allow_access_cifs(self, access_cifs, permission): access_list_allow = [access_cifs] self.mock_object(ssh.HNASSSHBackend, 'cifs_allow_access') self._driver.update_access('context', share_cifs, [], access_list_allow, [], []) ssh.HNASSSHBackend.cifs_allow_access.assert_called_once_with( share_cifs['id'], 'fake_user', permission, is_snapshot=False) self.assertTrue(self.mock_log.debug.called) def test_allow_access_cifs_invalid_type(self): access_cifs_type_ip = { 'id': '43167594-40e9-b899-1f4f-b9c2176b7564', 'access_type': 'ip', 'access_to': 'fake_user', 'access_level': 'rw', 'state': 'active', } access_list_allow = [access_cifs_type_ip] self.assertRaises(exception.InvalidShareAccess, self._driver.update_access, 'context', share_cifs, [], access_list_allow, [], []) def test_deny_access_cifs(self): access_list_deny = [access_cifs_rw] self.mock_object(ssh.HNASSSHBackend, 'cifs_deny_access') self._driver.update_access('context', share_cifs, [], [], access_list_deny, []) ssh.HNASSSHBackend.cifs_deny_access.assert_called_once_with( share_cifs['id'], 'fake_user', is_snapshot=False) self.assertTrue(self.mock_log.debug.called) def test_deny_access_cifs_unsupported_type(self): access_cifs_type_ip = { 'id': '43167594-40e9-b899-1f4f-b9c2176b7564', 'access_type': 'ip', 'access_to': 'fake_user', 'access_level': 'rw', 'state': 'active', } access_list_deny = [access_cifs_type_ip] self.mock_object(ssh.HNASSSHBackend, 'cifs_deny_access') self._driver.update_access('context', share_cifs, [], [], access_list_deny, []) self.assertTrue(self.mock_log.warning.called) def test_update_access_invalid_share_protocol(self): self.mock_object(self._driver, '_ensure_share') ex = self.assertRaises(exception.ShareBackendException, self._driver.update_access, 'context', invalid_share, [], [], [], []) self.assertEqual(invalid_protocol_msg, ex.msg) def test_update_access_cifs_recovery_mode(self): access_list = [access_cifs_rw, access_cifs_ro] permission_list = [('fake_user1', 'acr'), ('fake_user2', 'ar')] self.mock_object(ssh.HNASSSHBackend, 'list_cifs_permissions', mock.Mock(return_value=permission_list)) self.mock_object(ssh.HNASSSHBackend, 'cifs_deny_access') self.mock_object(ssh.HNASSSHBackend, 'cifs_allow_access') self._driver.update_access('context', share_cifs, access_list, [], [], []) ssh.HNASSSHBackend.list_cifs_permissions.assert_called_once_with( share_cifs['id']) self.assertTrue(self.mock_log.debug.called) def _get_export(self, id, share_proto, ip, is_admin_only, is_snapshot=False): if share_proto.lower() == 'nfs': if is_snapshot: path = '/snapshots/' + id else: path = '/shares/' + id export = ':'.join((ip, path)) else: export = r'\\%s\%s' % (ip, id) return { "path": export, "is_admin_only": is_admin_only, "metadata": {}, } @ddt.data(share_nfs, share_cifs) def test_create_share(self, share): self.mock_object(driver.HitachiHNASDriver, "_check_fs_mounted", mock.Mock()) self.mock_object(ssh.HNASSSHBackend, "vvol_create") self.mock_object(ssh.HNASSSHBackend, "quota_add") self.mock_object(ssh.HNASSSHBackend, "nfs_export_add", mock.Mock( return_value='/shares/' + share['id'])) self.mock_object(ssh.HNASSSHBackend, "cifs_share_add") result = self._driver.create_share('context', share) self.assertTrue(self.mock_log.debug.called) ssh.HNASSSHBackend.vvol_create.assert_called_once_with(share['id']) ssh.HNASSSHBackend.quota_add.assert_called_once_with(share['id'], share['size']) expected = [ self._get_export( share['id'], share['share_proto'], self._driver.hnas_evs_ip, False), self._get_export( share['id'], share['share_proto'], self._driver.hnas_admin_network_ip, True)] if share['share_proto'].lower() == 'nfs': ssh.HNASSSHBackend.nfs_export_add.assert_called_once_with( share_nfs['id'], snapshot_id=None) self.assertFalse(ssh.HNASSSHBackend.cifs_share_add.called) else: ssh.HNASSSHBackend.cifs_share_add.assert_called_once_with( share_cifs['id'], snapshot_id=None) self.assertFalse(ssh.HNASSSHBackend.nfs_export_add.called) self.assertEqual(expected, result) def test_create_share_export_error(self): self.mock_object(driver.HitachiHNASDriver, "_check_fs_mounted", mock.Mock()) self.mock_object(ssh.HNASSSHBackend, "vvol_create") self.mock_object(ssh.HNASSSHBackend, "quota_add") self.mock_object(ssh.HNASSSHBackend, "nfs_export_add", mock.Mock( side_effect=exception.HNASBackendException('msg'))) self.mock_object(ssh.HNASSSHBackend, "vvol_delete") self.assertRaises(exception.HNASBackendException, self._driver.create_share, 'context', share_nfs) self.assertTrue(self.mock_log.debug.called) ssh.HNASSSHBackend.vvol_create.assert_called_once_with(share_nfs['id']) ssh.HNASSSHBackend.quota_add.assert_called_once_with(share_nfs['id'], share_nfs['size']) ssh.HNASSSHBackend.nfs_export_add.assert_called_once_with( share_nfs['id'], snapshot_id=None) ssh.HNASSSHBackend.vvol_delete.assert_called_once_with(share_nfs['id']) def test_create_share_invalid_share_protocol(self): self.mock_object(driver.HitachiHNASDriver, "_create_share", mock.Mock(return_value="path")) ex = self.assertRaises(exception.ShareBackendException, self._driver.create_share, 'context', invalid_share) self.assertEqual(invalid_protocol_msg, ex.msg) @ddt.data(share_nfs, share_cifs) def test_delete_share(self, share): self.mock_object(driver.HitachiHNASDriver, "_check_fs_mounted", mock.Mock()) self.mock_object(ssh.HNASSSHBackend, "nfs_export_del") self.mock_object(ssh.HNASSSHBackend, "cifs_share_del") self.mock_object(ssh.HNASSSHBackend, "vvol_delete") self._driver.delete_share('context', share) self.assertTrue(self.mock_log.debug.called) ssh.HNASSSHBackend.vvol_delete.assert_called_once_with(share['id']) if share['share_proto'].lower() == 'nfs': ssh.HNASSSHBackend.nfs_export_del.assert_called_once_with( share['id']) self.assertFalse(ssh.HNASSSHBackend.cifs_share_del.called) else: ssh.HNASSSHBackend.cifs_share_del.assert_called_once_with( share['id']) self.assertFalse(ssh.HNASSSHBackend.nfs_export_del.called) @ddt.data(snapshot_nfs, snapshot_cifs, snapshot_mount_support_nfs, snapshot_mount_support_cifs) def test_create_snapshot(self, snapshot): hnas_id = snapshot['share_id'] access_list = ['172.24.44.200(rw,norootsquash)', '172.24.49.180(all_squash,read_write,secure)', '172.24.49.110(ro, secure)', '172.24.49.112(secure,readwrite,norootsquash)', '172.24.49.142(read_only, secure)', '172.24.49.201(rw,read_write,readwrite)', '172.24.49.218(rw)'] ro_list = ['172.24.44.200(ro,norootsquash)', '172.24.49.180(all_squash,ro,secure)', '172.24.49.110(ro, secure)', '172.24.49.112(secure,ro,norootsquash)', '172.24.49.142(read_only, secure)', '172.24.49.201(ro,ro,ro)', '172.24.49.218(ro)'] export_locations = [ self._get_export( snapshot['id'], snapshot['share']['share_proto'], self._driver.hnas_evs_ip, False, is_snapshot=True), self._get_export( snapshot['id'], snapshot['share']['share_proto'], self._driver.hnas_admin_network_ip, True, is_snapshot=True)] expected = {'provider_location': '/snapshots/' + hnas_id + '/' + snapshot['id']} if snapshot['share'].get('mount_snapshot_support'): expected['export_locations'] = export_locations self.mock_object(ssh.HNASSSHBackend, "get_nfs_host_list", mock.Mock( return_value=access_list)) self.mock_object(ssh.HNASSSHBackend, "update_nfs_access_rule", mock.Mock()) self.mock_object(ssh.HNASSSHBackend, "is_cifs_in_use", mock.Mock( return_value=False)) self.mock_object(ssh.HNASSSHBackend, "tree_clone") self.mock_object(ssh.HNASSSHBackend, "nfs_export_add") self.mock_object(ssh.HNASSSHBackend, "cifs_share_add") out = self._driver.create_snapshot('context', snapshot) ssh.HNASSSHBackend.tree_clone.assert_called_once_with( '/shares/' + hnas_id, '/snapshots/' + hnas_id + '/' + snapshot['id']) self.assertEqual(expected, out) if snapshot['share']['share_proto'].lower() == 'nfs': ssh.HNASSSHBackend.get_nfs_host_list.assert_called_once_with( hnas_id) ssh.HNASSSHBackend.update_nfs_access_rule.assert_any_call( ro_list, share_id=hnas_id) ssh.HNASSSHBackend.update_nfs_access_rule.assert_any_call( access_list, share_id=hnas_id) else: ssh.HNASSSHBackend.is_cifs_in_use.assert_called_once_with( hnas_id) def test_create_snapshot_invalid_protocol(self): self.mock_object(self._driver, '_ensure_share') ex = self.assertRaises(exception.ShareBackendException, self._driver.create_snapshot, 'context', invalid_snapshot) self.assertEqual(invalid_protocol_msg, ex.msg) def test_create_snapshot_cifs_exception(self): cifs_excep_msg = ("Share backend error: CIFS snapshot when share is " "mounted is disabled. Set " "hitachi_hnas_allow_cifs_snapshot_while_mounted to " "True or unmount the share to take a snapshot.") self.mock_object(ssh.HNASSSHBackend, "is_cifs_in_use", mock.Mock( return_value=True)) ex = self.assertRaises(exception.ShareBackendException, self._driver.create_snapshot, 'context', snapshot_cifs) self.assertEqual(cifs_excep_msg, ex.msg) def test_create_snapshot_first_snapshot(self): hnas_id = snapshot_nfs['share_id'] self.mock_object(ssh.HNASSSHBackend, "get_nfs_host_list", mock.Mock( return_value=['172.24.44.200(rw)'])) self.mock_object(ssh.HNASSSHBackend, "update_nfs_access_rule", mock.Mock()) self.mock_object(ssh.HNASSSHBackend, "tree_clone", mock.Mock( side_effect=exception.HNASNothingToCloneException('msg'))) self.mock_object(ssh.HNASSSHBackend, "create_directory") self.mock_object(ssh.HNASSSHBackend, "nfs_export_add") self.mock_object(ssh.HNASSSHBackend, "cifs_share_add") self._driver.create_snapshot('context', snapshot_nfs) self.assertTrue(self.mock_log.warning.called) ssh.HNASSSHBackend.get_nfs_host_list.assert_called_once_with( hnas_id) ssh.HNASSSHBackend.update_nfs_access_rule.assert_any_call( ['172.24.44.200(ro)'], share_id=hnas_id) ssh.HNASSSHBackend.update_nfs_access_rule.assert_any_call( ['172.24.44.200(rw)'], share_id=hnas_id) ssh.HNASSSHBackend.create_directory.assert_called_once_with( '/snapshots/' + hnas_id + '/' + snapshot_nfs['id']) @ddt.data(snapshot_nfs, snapshot_cifs, snapshot_mount_support_nfs, snapshot_mount_support_cifs) def test_delete_snapshot(self, snapshot): hnas_share_id = snapshot['share_id'] hnas_snapshot_id = snapshot['id'] self.mock_object(driver.HitachiHNASDriver, "_check_fs_mounted") self.mock_object(ssh.HNASSSHBackend, "tree_delete") self.mock_object(ssh.HNASSSHBackend, "delete_directory") self.mock_object(ssh.HNASSSHBackend, "nfs_export_del") self.mock_object(ssh.HNASSSHBackend, "cifs_share_del") self._driver.delete_snapshot('context', snapshot) self.assertTrue(self.mock_log.debug.called) self.assertTrue(self.mock_log.info.called) driver.HitachiHNASDriver._check_fs_mounted.assert_called_once_with() ssh.HNASSSHBackend.tree_delete.assert_called_once_with( '/snapshots/' + hnas_share_id + '/' + snapshot['id']) ssh.HNASSSHBackend.delete_directory.assert_called_once_with( '/snapshots/' + hnas_share_id) if snapshot['share']['share_proto'].lower() == 'nfs': if snapshot['share'].get('mount_snapshot_support'): ssh.HNASSSHBackend.nfs_export_del.assert_called_once_with( snapshot_id=hnas_snapshot_id) else: ssh.HNASSSHBackend.nfs_export_del.assert_not_called() else: if snapshot['share'].get('mount_snapshot_support'): ssh.HNASSSHBackend.cifs_share_del.assert_called_once_with( hnas_snapshot_id) else: ssh.HNASSSHBackend.cifs_share_del.assert_not_called() def test_delete_managed_snapshot(self): hnas_id = manage_snapshot['share_id'] self.mock_object(driver.HitachiHNASDriver, "_check_fs_mounted") self.mock_object(ssh.HNASSSHBackend, "tree_delete") self.mock_object(ssh.HNASSSHBackend, "delete_directory") self.mock_object(ssh.HNASSSHBackend, "nfs_export_del") self.mock_object(ssh.HNASSSHBackend, "cifs_share_del") self._driver.delete_snapshot('context', manage_snapshot) self.assertTrue(self.mock_log.debug.called) self.assertTrue(self.mock_log.info.called) driver.HitachiHNASDriver._check_fs_mounted.assert_called_once_with() ssh.HNASSSHBackend.tree_delete.assert_called_once_with( manage_snapshot['provider_location']) ssh.HNASSSHBackend.delete_directory.assert_called_once_with( '/snapshots/' + hnas_id) @ddt.data(share_nfs, share_cifs) def test_ensure_share(self, share): result = self._driver.ensure_share('context', share) ssh.HNASSSHBackend.check_vvol.assert_called_once_with(share['id']) ssh.HNASSSHBackend.check_quota.assert_called_once_with(share['id']) expected = [ self._get_export( share['id'], share['share_proto'], self._driver.hnas_evs_ip, False), self._get_export( share['id'], share['share_proto'], self._driver.hnas_admin_network_ip, True)] if share['share_proto'].lower() == 'nfs': ssh.HNASSSHBackend.check_export.assert_called_once_with( share['id']) self.assertFalse(ssh.HNASSSHBackend.check_cifs.called) else: ssh.HNASSSHBackend.check_cifs.assert_called_once_with(share['id']) self.assertFalse(ssh.HNASSSHBackend.check_export.called) self.assertEqual(expected, result) def test_ensure_share_invalid_protocol(self): ex = self.assertRaises(exception.ShareBackendException, self._driver.ensure_share, 'context', invalid_share) self.assertEqual(invalid_protocol_msg, ex.msg) def test_shrink_share(self): self.mock_object(ssh.HNASSSHBackend, "get_share_usage", mock.Mock( return_value=10)) self.mock_object(ssh.HNASSSHBackend, "modify_quota") self._driver.shrink_share(share_nfs, 11) ssh.HNASSSHBackend.get_share_usage.assert_called_once_with( share_nfs['id']) ssh.HNASSSHBackend.modify_quota.assert_called_once_with( share_nfs['id'], 11) def test_shrink_share_new_size_lower_than_usage(self): self.mock_object(ssh.HNASSSHBackend, "get_share_usage", mock.Mock( return_value=10)) self.assertRaises(exception.ShareShrinkingPossibleDataLoss, self._driver.shrink_share, share_nfs, 9) ssh.HNASSSHBackend.get_share_usage.assert_called_once_with( share_nfs['id']) def test_extend_share(self): self.mock_object(ssh.HNASSSHBackend, "get_stats", mock.Mock( return_value=(500, 200, True))) self.mock_object(ssh.HNASSSHBackend, "modify_quota") self._driver.extend_share(share_nfs, 150) ssh.HNASSSHBackend.get_stats.assert_called_once_with() ssh.HNASSSHBackend.modify_quota.assert_called_once_with( share_nfs['id'], 150) def test_extend_share_with_no_available_space_in_fs(self): self.mock_object(ssh.HNASSSHBackend, "get_stats", mock.Mock( return_value=(500, 200, False))) self.mock_object(ssh.HNASSSHBackend, "modify_quota") self.assertRaises(exception.HNASBackendException, self._driver.extend_share, share_nfs, 1000) ssh.HNASSSHBackend.get_stats.assert_called_once_with() @ddt.data(share_nfs, share_cifs) def test_manage_existing(self, share): expected_exports = [ self._get_export( share['id'], share['share_proto'], self._driver.hnas_evs_ip, False), self._get_export( share['id'], share['share_proto'], self._driver.hnas_admin_network_ip, True)] expected_out = {'size': share['size'], 'export_locations': expected_exports} self.mock_object(ssh.HNASSSHBackend, "get_share_quota", mock.Mock( return_value=share['size'])) out = self._driver.manage_existing(share, 'option') self.assertEqual(expected_out, out) ssh.HNASSSHBackend.get_share_quota.assert_called_once_with( share['id']) def test_manage_existing_no_quota(self): self.mock_object(ssh.HNASSSHBackend, "get_share_quota", mock.Mock( return_value=None)) self.assertRaises(exception.ManageInvalidShare, self._driver.manage_existing, share_nfs, 'option') ssh.HNASSSHBackend.get_share_quota.assert_called_once_with( share_nfs['id']) def test_manage_existing_wrong_share_id(self): self.mock_object(self.fake_private_storage, 'get', mock.Mock(return_value='Wrong_share_id')) self.assertRaises(exception.HNASBackendException, self._driver.manage_existing, share_nfs, 'option') @ddt.data(':/', '1.1.1.1:/share_id', '1.1.1.1:/shares', '1.1.1.1:shares/share_id', ':/share_id') def test_manage_existing_wrong_path_format_nfs(self, wrong_location): expected_exception = ("Share backend error: Incorrect path. It " "should have the following format: " "IP:/shares/share_id.") self._test_manage_existing_wrong_path( share_nfs.copy(), expected_exception, wrong_location) @ddt.data('\\\\1.1.1.1', '1.1.1.1\\share_id', '1.1.1.1\\shares\\share_id', '\\\\1.1.1.1\\shares\\share_id', '\\\\share_id') def test_manage_existing_wrong_path_format_cifs(self, wrong_location): expected_exception = ("Share backend error: Incorrect path. It should " "have the following format: \\\\IP\\share_id.") self._test_manage_existing_wrong_path( share_cifs.copy(), expected_exception, wrong_location) def _test_manage_existing_wrong_path( self, share, expected_exception, wrong_location): share['export_locations'] = [{'path': wrong_location}] ex = self.assertRaises(exception.ShareBackendException, self._driver.manage_existing, share, 'option') self.assertEqual(expected_exception, ex.msg) def test_manage_existing_wrong_evs_ip(self): share_nfs['export_locations'] = [{'path': '172.24.44.189:/shares/' 'aa4a7710-f326-41fb-ad18-'}] self.assertRaises(exception.ShareBackendException, self._driver.manage_existing, share_nfs, 'option') def test_manage_existing_invalid_host(self): self.assertRaises(exception.ShareBackendException, self._driver.manage_existing, share_invalid_host, 'option') def test_manage_existing_invalid_protocol(self): self.assertRaises(exception.ShareBackendException, self._driver.manage_existing, invalid_share, 'option') @ddt.data(True, False) def test_unmanage(self, has_export_locations): share_copy = share_nfs.copy() if not has_export_locations: share_copy['export_locations'] = [] self._driver.unmanage(share_copy) self.assertTrue(self.fake_private_storage.delete.called) self.assertTrue(self.mock_log.info.called) def test_get_network_allocations_number(self): result = self._driver.get_network_allocations_number() self.assertEqual(0, result) @ddt.data([share_nfs, snapshot_nfs], [share_cifs, snapshot_cifs]) @ddt.unpack def test_create_share_from_snapshot(self, share, snapshot): self.mock_object(driver.HitachiHNASDriver, "_check_fs_mounted", mock.Mock()) self.mock_object(ssh.HNASSSHBackend, "vvol_create") self.mock_object(ssh.HNASSSHBackend, "quota_add") self.mock_object(ssh.HNASSSHBackend, "tree_clone") self.mock_object(ssh.HNASSSHBackend, "cifs_share_add") self.mock_object(ssh.HNASSSHBackend, "nfs_export_add") result = self._driver.create_share_from_snapshot('context', share, snapshot) ssh.HNASSSHBackend.vvol_create.assert_called_once_with(share['id']) ssh.HNASSSHBackend.quota_add.assert_called_once_with(share['id'], share['size']) ssh.HNASSSHBackend.tree_clone.assert_called_once_with( '/snapshots/' + share['id'] + '/' + snapshot['id'], '/shares/' + share['id']) expected = [ self._get_export( share['id'], share['share_proto'], self._driver.hnas_evs_ip, False), self._get_export( share['id'], share['share_proto'], self._driver.hnas_admin_network_ip, True)] if share['share_proto'].lower() == 'nfs': ssh.HNASSSHBackend.nfs_export_add.assert_called_once_with( share['id']) self.assertFalse(ssh.HNASSSHBackend.cifs_share_add.called) else: ssh.HNASSSHBackend.cifs_share_add.assert_called_once_with( share['id']) self.assertFalse(ssh.HNASSSHBackend.nfs_export_add.called) self.assertEqual(expected, result) def test_create_share_from_snapshot_empty_snapshot(self): self.mock_object(driver.HitachiHNASDriver, "_check_fs_mounted", mock.Mock()) self.mock_object(ssh.HNASSSHBackend, "vvol_create") self.mock_object(ssh.HNASSSHBackend, "quota_add") self.mock_object(ssh.HNASSSHBackend, "tree_clone", mock.Mock( side_effect=exception.HNASNothingToCloneException('msg'))) self.mock_object(ssh.HNASSSHBackend, "nfs_export_add") result = self._driver.create_share_from_snapshot('context', share_nfs, snapshot_nfs) expected = [ self._get_export( share_nfs['id'], share_nfs['share_proto'], self._driver.hnas_evs_ip, False), self._get_export( share_nfs['id'], share_nfs['share_proto'], self._driver.hnas_admin_network_ip, True)] self.assertEqual(expected, result) self.assertTrue(self.mock_log.warning.called) ssh.HNASSSHBackend.vvol_create.assert_called_once_with(share_nfs['id']) ssh.HNASSSHBackend.quota_add.assert_called_once_with(share_nfs['id'], share_nfs['size']) ssh.HNASSSHBackend.tree_clone.assert_called_once_with( '/snapshots/' + share_nfs['id'] + '/' + snapshot_nfs['id'], '/shares/' + share_nfs['id']) ssh.HNASSSHBackend.nfs_export_add.assert_called_once_with( share_nfs['id']) def test_create_share_from_snapshot_invalid_protocol(self): self.mock_object(driver.HitachiHNASDriver, "_check_fs_mounted", mock.Mock()) self.mock_object(ssh.HNASSSHBackend, "vvol_create") self.mock_object(ssh.HNASSSHBackend, "quota_add") self.mock_object(ssh.HNASSSHBackend, "tree_clone") ex = self.assertRaises(exception.ShareBackendException, self._driver.create_share_from_snapshot, 'context', invalid_share, snapshot_nfs) self.assertEqual(invalid_protocol_msg, ex.msg) def test_create_share_from_snapshot_cleanup(self): dest_path = '/snapshots/' + share_nfs['id'] + '/' + snapshot_nfs['id'] src_path = '/shares/' + share_nfs['id'] self.mock_object(driver.HitachiHNASDriver, "_check_fs_mounted", mock.Mock()) self.mock_object(ssh.HNASSSHBackend, "vvol_create") self.mock_object(ssh.HNASSSHBackend, "quota_add") self.mock_object(ssh.HNASSSHBackend, "tree_clone") self.mock_object(ssh.HNASSSHBackend, "vvol_delete") self.mock_object(ssh.HNASSSHBackend, "nfs_export_add", mock.Mock( side_effect=exception.HNASBackendException( msg='Error adding nfs export.'))) self.assertRaises(exception.HNASBackendException, self._driver.create_share_from_snapshot, 'context', share_nfs, snapshot_nfs) ssh.HNASSSHBackend.vvol_create.assert_called_once_with( share_nfs['id']) ssh.HNASSSHBackend.quota_add.assert_called_once_with( share_nfs['id'], share_nfs['size']) ssh.HNASSSHBackend.tree_clone.assert_called_once_with( dest_path, src_path) ssh.HNASSSHBackend.nfs_export_add.assert_called_once_with( share_nfs['id']) ssh.HNASSSHBackend.vvol_delete.assert_called_once_with( share_nfs['id']) def test__check_fs_mounted(self): self._driver._check_fs_mounted() ssh.HNASSSHBackend.check_fs_mounted.assert_called_once_with() def test__check_fs_mounted_not_mounted(self): self.mock_object(ssh.HNASSSHBackend, 'check_fs_mounted', mock.Mock( return_value=False)) self.assertRaises(exception.HNASBackendException, self._driver._check_fs_mounted) ssh.HNASSSHBackend.check_fs_mounted.assert_called_once_with() def test__update_share_stats(self): fake_data = { 'share_backend_name': self._driver.backend_name, 'driver_handles_share_servers': self._driver.driver_handles_share_servers, 'vendor_name': 'Hitachi', 'driver_version': '4.0.0', 'storage_protocol': 'NFS_CIFS', 'total_capacity_gb': 1000, 'free_capacity_gb': 200, 'reserved_percentage': driver.CONF.reserved_share_percentage, 'reserved_snapshot_percentage': driver.CONF.reserved_share_from_snapshot_percentage, 'reserved_share_extend_percentage': driver.CONF.reserved_share_extend_percentage, 'qos': False, 'thin_provisioning': True, 'dedupe': True, 'revert_to_snapshot_support': True, 'mount_snapshot_support': True, } self.mock_object(ssh.HNASSSHBackend, 'get_stats', mock.Mock( return_value=(1000, 200, True))) self.mock_object(driver.HitachiHNASDriver, "_check_fs_mounted", mock.Mock()) self.mock_object(manila.share.driver.ShareDriver, '_update_share_stats') self._driver._update_share_stats() self.assertTrue(self._driver.hnas.get_stats.called) (manila.share.driver.ShareDriver._update_share_stats. assert_called_once_with(fake_data)) self.assertTrue(self.mock_log.info.called) @ddt.data(snapshot_nfs, snapshot_cifs, snapshot_mount_support_nfs, snapshot_mount_support_cifs) def test_ensure_snapshot(self, snapshot): result = self._driver.ensure_snapshot('context', snapshot) if snapshot['share'].get('mount_snapshot_support'): expected = [ self._get_export( snapshot['id'], snapshot['share']['share_proto'], self._driver.hnas_evs_ip, False, is_snapshot=True), self._get_export( snapshot['id'], snapshot['share']['share_proto'], self._driver.hnas_admin_network_ip, True, is_snapshot=True)] if snapshot['share']['share_proto'].lower() == 'nfs': ssh.HNASSSHBackend.check_export.assert_called_once_with( snapshot['id'], is_snapshot=True) self.assertFalse(ssh.HNASSSHBackend.check_cifs.called) else: ssh.HNASSSHBackend.check_cifs.assert_called_once_with( snapshot['id']) self.assertFalse(ssh.HNASSSHBackend.check_export.called) else: expected = None ssh.HNASSSHBackend.check_directory.assert_called_once_with( snapshot['provider_location']) self.assertEqual(expected, result) def test_manage_existing_snapshot(self): self.mock_object(ssh.HNASSSHBackend, 'check_directory', mock.Mock(return_value=True)) self.mock_object(self._driver, '_ensure_snapshot', mock.Mock(return_value=[])) path_info = manage_snapshot['provider_location'].split('/') hnas_snapshot_id = path_info[3] out = self._driver.manage_existing_snapshot(manage_snapshot, {'size': 20}) ssh.HNASSSHBackend.check_directory.assert_called_with( '/snapshots/aa4a7710-f326-41fb-ad18-b4ad587fc87a' '/snapshot18-05-2106') self._driver._ensure_snapshot.assert_called_with( manage_snapshot, hnas_snapshot_id) self.assertEqual(20, out['size']) self.assertTrue(self.mock_log.debug.called) self.assertTrue(self.mock_log.info.called) @ddt.data(None, exception.HNASItemNotFoundException('Fake error.')) def test_manage_existing_snapshot_with_mount_support(self, exc): export_locations = [{ 'path': '172.24.44.10:/snapshots/' '3377b015-a695-4a5a-8aa5-9b931b023380'}] self.mock_object(ssh.HNASSSHBackend, 'check_directory', mock.Mock(return_value=True)) self.mock_object(self._driver, '_ensure_snapshot', mock.Mock(return_value=[], side_effect=exc)) self.mock_object(self._driver, '_get_export_locations', mock.Mock(return_value=export_locations)) if exc: self.mock_object(self._driver, '_create_export') path_info = snapshot_mount_support_nfs['provider_location'].split('/') hnas_snapshot_id = path_info[3] out = self._driver.manage_existing_snapshot( snapshot_mount_support_nfs, {'size': 20, 'export_locations': export_locations}) ssh.HNASSSHBackend.check_directory.assert_called_with( '/snapshots/62125744-fcdd-4f55-a8c1-d1498102f634' '/3377b015-a695-4a5a-8aa5-9b931b023380') self._driver._ensure_snapshot.assert_called_with( snapshot_mount_support_nfs, hnas_snapshot_id) self._driver._get_export_locations.assert_called_with( snapshot_mount_support_nfs['share']['share_proto'], hnas_snapshot_id, is_snapshot=True) if exc: self._driver._create_export.assert_called_with( snapshot_mount_support_nfs['share_id'], snapshot_mount_support_nfs['share']['share_proto'], snapshot_id=hnas_snapshot_id) self.assertEqual(20, out['size']) self.assertEqual(export_locations, out['export_locations']) self.assertTrue(self.mock_log.debug.called) self.assertTrue(self.mock_log.info.called) @ddt.data('fake_size', '128GB', '512 GB', {'size': 128}) def test_manage_snapshot_invalid_size_exception(self, size): self.assertRaises(exception.ManageInvalidShareSnapshot, self._driver.manage_existing_snapshot, manage_snapshot, {'size': size}) def test_manage_snapshot_size_not_provided_exception(self): self.assertRaises(exception.ManageInvalidShareSnapshot, self._driver.manage_existing_snapshot, manage_snapshot, {}) @ddt.data('/root/snapshot_id', '/snapshots/share1/snapshot_id', '/directory1', 'snapshots/share1/snapshot_id') def test_manage_snapshot_invalid_path_exception(self, path): snap_copy = manage_snapshot.copy() snap_copy['provider_location'] = path self.assertRaises(exception.ManageInvalidShareSnapshot, self._driver.manage_existing_snapshot, snap_copy, {'size': 20}) self.assertTrue(self.mock_log.debug.called) def test_manage_inexistent_snapshot_exception(self): self.mock_object(ssh.HNASSSHBackend, 'check_directory', mock.Mock(return_value=False)) self.assertRaises(exception.ManageInvalidShareSnapshot, self._driver.manage_existing_snapshot, manage_snapshot, {'size': 20}) self.assertTrue(self.mock_log.debug.called) def test_unmanage_snapshot(self): self._driver.unmanage_snapshot(snapshot_nfs) self.assertTrue(self.mock_log.info.called) @ddt.data({'snap': snapshot_nfs, 'exc': None}, {'snap': snapshot_cifs, 'exc': None}, {'snap': snapshot_nfs, 'exc': exception.HNASNothingToCloneException('fake')}, {'snap': snapshot_cifs, 'exc': exception.HNASNothingToCloneException('fake')}) @ddt.unpack def test_revert_to_snapshot(self, exc, snap): self.mock_object(driver.HitachiHNASDriver, "_check_fs_mounted") self.mock_object(ssh.HNASSSHBackend, 'tree_delete') self.mock_object(ssh.HNASSSHBackend, 'vvol_create') self.mock_object(ssh.HNASSSHBackend, 'quota_add') self.mock_object(ssh.HNASSSHBackend, 'tree_clone', mock.Mock(side_effect=exc)) self._driver.revert_to_snapshot('context', snap, None, None) driver.HitachiHNASDriver._check_fs_mounted.assert_called_once_with() ssh.HNASSSHBackend.tree_delete.assert_called_once_with( '/'.join(('/shares', snap['share_id']))) ssh.HNASSSHBackend.vvol_create.assert_called_once_with( snap['share_id']) ssh.HNASSSHBackend.quota_add.assert_called_once_with( snap['share_id'], 2) ssh.HNASSSHBackend.tree_clone.assert_called_once_with( '/'.join(('/snapshots', snap['share_id'], snap['id'])), '/'.join(('/shares', snap['share_id']))) ssh.HNASSSHBackend.check_directory.assert_called_once_with( snap['provider_location']) if exc: self.assertTrue(self.mock_log.warning.called) self.assertTrue(self.mock_log.info.called) def test_nfs_snapshot_update_access_allow(self): access1 = { 'access_type': 'ip', 'access_to': '172.24.10.10', } access2 = { 'access_type': 'ip', 'access_to': '172.31.20.20', } access_list = [access1, access2] self.mock_object(ssh.HNASSSHBackend, "update_nfs_access_rule") self._driver.snapshot_update_access('ctxt', snapshot_nfs, access_list, access_list, []) ssh.HNASSSHBackend.update_nfs_access_rule.assert_called_once_with( [access1['access_to'] + '(ro)', access2['access_to'] + '(ro)'], snapshot_id=snapshot_nfs['id']) ssh.HNASSSHBackend.check_directory.assert_called_once_with( snapshot_nfs['provider_location']) self.assertTrue(self.mock_log.debug.called) def test_nfs_snapshot_update_access_deny(self): access1 = { 'access_type': 'ip', 'access_to': '172.24.10.10', } self.mock_object(ssh.HNASSSHBackend, "update_nfs_access_rule") self._driver.snapshot_update_access('ctxt', snapshot_nfs, [], [], [access1]) ssh.HNASSSHBackend.update_nfs_access_rule.assert_called_once_with( [], snapshot_id=snapshot_nfs['id']) ssh.HNASSSHBackend.check_directory.assert_called_once_with( snapshot_nfs['provider_location']) self.assertTrue(self.mock_log.debug.called) def test_nfs_snapshot_update_access_invalid_access_type(self): access1 = { 'access_type': 'user', 'access_to': 'user1', } self.assertRaises(exception.InvalidSnapshotAccess, self._driver.snapshot_update_access, 'ctxt', snapshot_nfs, [access1], [], []) ssh.HNASSSHBackend.check_directory.assert_called_once_with( snapshot_nfs['provider_location']) def test_cifs_snapshot_update_access_allow(self): access1 = { 'access_type': 'user', 'access_to': 'fake_user1', } self.mock_object(ssh.HNASSSHBackend, 'cifs_allow_access') self._driver.snapshot_update_access('ctxt', snapshot_cifs, [access1], [access1], []) ssh.HNASSSHBackend.cifs_allow_access.assert_called_with( snapshot_cifs['id'], access1['access_to'], 'ar', is_snapshot=True) ssh.HNASSSHBackend.check_directory.assert_called_once_with( snapshot_cifs['provider_location']) self.assertTrue(self.mock_log.debug.called) def test_cifs_snapshot_update_access_deny(self): access1 = { 'access_type': 'user', 'access_to': 'fake_user1', } self.mock_object(ssh.HNASSSHBackend, 'cifs_deny_access') self._driver.snapshot_update_access('ctxt', snapshot_cifs, [], [], [access1]) ssh.HNASSSHBackend.cifs_deny_access.assert_called_with( snapshot_cifs['id'], access1['access_to'], is_snapshot=True) ssh.HNASSSHBackend.check_directory.assert_called_once_with( snapshot_cifs['provider_location']) self.assertTrue(self.mock_log.debug.called) def test_cifs_snapshot_update_access_recovery_mode(self): access1 = { 'access_type': 'user', 'access_to': 'fake_user1', } access2 = { 'access_type': 'user', 'access_to': 'HDS\\fake_user2', } access_list = [access1, access2] permission_list = [('fake_user1', 'ar'), ('HDS\\fake_user2', 'ar')] formatted_user = r'"\{1}{0}\{1}"'.format(access2['access_to'], '"') self.mock_object(ssh.HNASSSHBackend, 'list_cifs_permissions', mock.Mock(return_value=permission_list)) self.mock_object(ssh.HNASSSHBackend, 'cifs_deny_access') self.mock_object(ssh.HNASSSHBackend, 'cifs_allow_access') self._driver.snapshot_update_access('ctxt', snapshot_cifs, access_list, [], []) ssh.HNASSSHBackend.list_cifs_permissions.assert_called_once_with( snapshot_cifs['id']) ssh.HNASSSHBackend.cifs_deny_access.assert_called_with( snapshot_cifs['id'], formatted_user, is_snapshot=True) ssh.HNASSSHBackend.cifs_allow_access.assert_called_with( snapshot_cifs['id'], access2['access_to'].replace('\\', '\\\\'), 'ar', is_snapshot=True) ssh.HNASSSHBackend.check_directory.assert_called_once_with( snapshot_cifs['provider_location']) self.assertTrue(self.mock_log.debug.called) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/hitachi/hnas/test_ssh.py0000664000175000017500000020007000000000000025010 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Hitachi Data Systems, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import time from unittest import mock import ddt from oslo_concurrency import processutils as putils from oslo_config import cfg import paramiko from manila import exception from manila.share.drivers.hitachi.hnas import ssh from manila import ssh_utils from manila import test CONF = cfg.CONF HNAS_RESULT_empty = "" HNAS_RESULT_limits = """ Filesystem Ensure on span fake_fs: Current capacity 50GiB Thin provision: disabled Filesystem is confined to: 100GiB (Run 'filesystem-confine') Free space on span allows expansion to: 143GiB (Run 'span-expand') Chunk size allows growth to: 1069GiB (This is a conservative estimate) Largest filesystem that can be checked: 262144GiB (This is a hard limit) This server model allows growth to: 262144GiB (Upgrade the server) """ HNAS_RESULT_expdel = """Deleting the export '/dir1' on fs 'fake_fs'... NFS Export Delete: Export successfully deleted""" HNAS_RESULT_vvoldel = """ Warning: Clearing dangling space trackers from empty vivol""" HNAS_RESULT_selectfs = "Current selected file system: fake_fs, number(1)" HNAS_RESULT_expadd = "NFS Export Add: Export added successfully" HNAS_RESULT_vvol = """vvol_test email : root : /vvol_test tag : 39 usage bytes : 0 B files: 1 last modified: 2015-06-23 22:36:12.830698800+00:00""" HNAS_RESULT_vvol_error = "The virtual volume does not exist." HNAS_RESULT_mount = """ \ Request to mount file system fake_fs submitted successfully. File system fake_fs successfully mounted.""" HNAS_RESULT_quota = """Type : Explicit Target : ViVol: vvol_test Usage : 1 GB Limit : 5 GB (Hard) Warning : Unset Critical : Unset Reset : 5% (51.2 MB) File Count : 1 Limit : Unset Warning : Unset Critical : Unset Reset : 5% (0) Generate Events : Disabled Global id : 28a3c9f8-ae05-11d0-9025-836896aada5d Last modified : 2015-06-23 22:37:17.363660800+00:00 """ HNAS_RESULT_quota_tb = """Type : Explicit Target : ViVol: vvol_test Usage : 1 TB Limit : 1 TB (Hard) Warning : Unset Critical : Unset Reset : 5% (51.2 MB) File Count : 1 Limit : Unset Warning : Unset Critical : Unset Reset : 5% (0) Generate Events : Disabled Global id : 28a3c9f8-ae05-11d0-9025-836896aada5d Last modified : 2015-06-23 22:37:17.363660800+00:00 """ HNAS_RESULT_quota_mb = """Type : Explicit Target : ViVol: vvol_test Usage : 20 MB Limit : 500 MB (Hard) Warning : Unset Critical : Unset Reset : 5% (51.2 MB) File Count : 1 Limit : Unset Warning : Unset Critical : Unset Reset : 5% (0) Generate Events : Disabled Global id : 28a3c9f8-ae05-11d0-9025-836896aada5d Last modified : 2015-06-23 22:37:17.363660800+00:00 """ HNAS_RESULT_quota_unset = """Type : Explicit Target : ViVol: vvol_test Usage : 0 B Limit : Unset Warning : Unset Critical : Unset Reset : 5% (51.2 MB) File Count : 1 Limit : Unset Warning : Unset Critical : Unset Reset : 5% (0) Generate Events : Disabled Global id : 28a3c9f8-ae05-11d0-9025-836896aada5d Last modified : 2015-06-23 22:37:17.363660800+00:00 """ HNAS_RESULT_quota_err = """No quotas matching specified filter criteria. """ HNAS_RESULT_export = """Export name: vvol_test Export path: /vvol_test File system label: file_system File system size: 3.969 GB File system free space: 1.848 GB File system state: formatted = Yes mounted = Yes failed = No thin provisioned = No Access snapshots: No Display snapshots: No Read Caching: Disabled Disaster recovery setting: Recovered = No Transfer setting = Use file system default \n Export configuration:\n 127.0.0.2 """ HNAS_RESULT_wrong_export = """Export name: wrong_name Export path: /vvol_test File system label: file_system File system size: 3.969 GB File system free space: 1.848 GB File system state: formatted = Yes mounted = Yes failed = No thin provisioned = No Access snapshots: No Display snapshots: No Read Caching: Disabled Disaster recovery setting: Recovered = No Transfer setting = Use file system default Export configuration: 127.0.0.1""" HNAS_RESULT_exp_no_fs = """ Export name: no_fs Export path: /export_without_fs File system info: *** not available *** Access snapshots: Yes Display snapshots: Yes Read Caching: Disabled Disaster recovery setting: Recovered = No Transfer setting = Use file system default Export configuration: """ HNAS_RESULT_export_ip = """ Export name: vvol_test Export path: /vvol_test File system label: fake_fs File system size: 3.969 GB File system free space: 1.848 GB File system state: formatted = Yes mounted = Yes failed = No thin provisioned = No Access snapshots: No Display snapshots: No Read Caching: Disabled Disaster recovery setting: Recovered = No Transfer setting = Use file system default Export configuration: 127.0.0.1(rw) """ HNAS_RESULT_export_ip2 = """ Export name: vvol_test Export path: /vvol_test File system label: fake_fs File system size: 3.969 GB File system free space: 1.848 GB File system state: formatted = Yes mounted = Yes failed = No thin provisioned = No Access snapshots: No Display snapshots: No Read Caching: Disabled Disaster recovery setting: Recovered = No Transfer setting = Use file system default Export configuration: 127.0.0.1(ro) """ HNAS_RESULT_expmod = """Modifying the export '/fake_export' on fs 'fake_fs'... NFS Export Modify: changing configuration options to: 127.0.0.2 NFS Export Modify: Export modified successfully""" HNAS_RESULT_expnotmod = "Export not modified." HNAS_RESULT_job = """tree-operation-job-submit: Request submitted successfully. tree-operation-job-submit: Job id = d933100a-b5f6-11d0-91d9-836896aada5d""" HNAS_RESULT_vvol_list = """vol1 email : root : /shares/vol1 tag : 10 usage bytes : 0 B files: 1 last modified: 2015-07-27 22:25:02.746426000+00:00 vol2 email : root : /shares/vol2 tag : 13 usage bytes : 0 B files: 1 last modified: 2015-07-28 01:30:21.125671700+00:00 vol3 email : root : /shares/vol3 tag : 14 usage bytes : 5 GB (5368709120 B) files: 2 last modified: 2015-07-28 20:23:05.672404600+00:00""" HNAS_RESULT_tree_job_status_fail = """JOB ID : d933100a-b5f6-11d0-91d9-836896aada5d Job request Physical node : 1 EVS : 1 Volume number : 1 File system id : 2ea361c20ed0f80d0000000000000000 File system name : fs1 Source path : "/foo" Creation time : 2013-09-05 23:16:48-07:00 Destination path : "/clone/bar" Ensure destination path exists : true Job state : Job failed Job info Started : 2013-09-05 23:16:48-07:00 Ended : 2013-09-05 23:17:02-07:00 Status : Success Error details : Directories processed : 220 Files processed : 910 Data bytes processed : 34.5 MB (36174754 B) Source directories missing : 0 Source files missing : 0 Source files skipped : 801 Skipping details : 104 symlinks, 452 hard links, 47 block special devices, 25 character devices""" # noqa HNAS_RESULT_job_completed = """JOB ID : ab4211b8-aac8-11ce-91af-39e0822ea368 Job request Physical node : 1 EVS : 1 Volume number : 1 File system id : 2ea361c20ed0f80d0000000000000000 File system name : fs1 Source path : "/foo" Creation time : 2013-09-05 23:16:48-07:00 Destination path : "/clone/bar" Ensure destination path exists : true Job state : Job was completed Job info Started : 2013-09-05 23:16:48-07:00 Ended : 2013-09-05 23:17:02-07:00 Status : Success Error details : Directories processed : 220 Files processed : 910 Data bytes processed : 34.5 MB (36174754 B) Source directories missing : 0 Source files missing : 0 Source files skipped : 801 Skipping details : 104 symlinks, 452 hard links, 47 \ block special devices, 25 character devices """ HNAS_RESULT_job_running = """JOB ID : ab4211b8-aac8-11ce-91af-39e0822ea368 Job request Physical node : 1 EVS : 1 Volume number : 1 File system id : 2ea361c20ed0f80d0000000000000000 File system name : fs1 Source path : "/foo" Creation time : 2013-09-05 23:16:48-07:00 Destination path : "/clone/bar" Ensure destination path exists : true Job state : Job is running Job info Started : 2013-09-05 23:16:48-07:00 Ended : 2013-09-05 23:17:02-07:00 Status : Success Error details : Directories processed : 220 Files processed : 910 Data bytes processed : 34.5 MB (36174754 B) Source directories missing : 0 Source files missing : 0 Source files skipped : 801 Skipping details : 104 symlinks, 452 hard links, 47 \ block special devices, 25 character devices """ HNAS_RESULT_df = """ ID Label EVS Size Used Snapshots Deduped \ Avail Thin ThinSize ThinAvail FS Type ---- ------------- --- -------- -------------- --------- ------- \ ------------- ---- -------- --------- ------------------- 1051 FS-ManilaDev1 3 70.00 GB 10.00 GB (75%) 0 B (0%) NA \ 18.3 GB (25%) No 4 KB,WFS-2,128 DSBs """ HNAS_RESULT_df_tb = """ ID Label EVS Size Used Snapshots Deduped \ Avail Thin ThinSize ThinAvail FS Type ---- ------------- --- -------- -------------- --------- ------- \ ------------- ---- -------- --------- ------------------- 1051 FS-ManilaDev1 3.00 7.00 TB 2 TB (75%) 0 B (0%) NA \ 18.3 GB (25%) No 4 KB,WFS-2,128 DSBs """ HNAS_RESULT_df_dedupe_on = """ ID Label EVS Size Used Snapshots Deduped \ Avail Thin ThinSize ThinAvail FS Type ---- ------------- --- -------- -------------- --------- ------- \ ------------- ---- -------- --------- ------------------- 1051 FS-ManilaDev1 3.00 7.00 TB 2 TB (75%) NA 0 B (0%) \ 18.3 GB (25%) No 4 KB,WFS-2,128 DSBs,dedupe enabled """ HNAS_RESULT_df_unmounted = """ ID Label EVS Size Used Snapshots Deduped \ Avail Thin ThinSize ThinAvail FS Type ---- ------------- --- -------- -------------- --------- ------- \ ------------- ---- -------- --------- ------------------- 1051 FS-ManilaDev1 3 70.00 GB Not mounted 0 B (0%) NA \ 18.3 GB (25%) No 4 KB,WFS-2,128 DSBs """ HNAS_RESULT_df_error = """File system file_system not found""" HNAS_RESULT_mounted_filesystem = """ file_system 1055 fake_span Mount 2 4 5 1 """ HNAS_RESULT_unmounted_filesystem = """ file_system 1055 fake_span Umount 2 4 5 1 """ HNAS_RESULT_cifs_list = """ Share name: vvol_test Share path: \\\\shares\\vvol_test Share users: 2 Share online: Yes Share comment: Cache options: Manual local caching for documents ABE enabled: Yes Continuous Availability: No Access snapshots: No Display snapshots: No ShadowCopy enabled: Yes Lower case on create: No Follow symlinks: Yes Follow global symlinks: No Scan for viruses: Yes File system label: file_system File system size: 9.938 GB File system free space: 6.763 GB File system state: formatted = Yes mounted = Yes failed = No thin provisioned = No Disaster recovery setting: Recovered = No Transfer setting = Use file system default Home directories: Off Mount point options: """ HNAS_RESULT_different_fs_cifs_list = """ Share name: vvol_test Share path: \\\\shares\\vvol_test Share users: 0 Share online: Yes Share comment: Cache options: Manual local caching for documents ABE enabled: Yes Continuous Availability: No Access snapshots: No Display snapshots: No ShadowCopy enabled: Yes Lower case on create: No Follow symlinks: Yes Follow global symlinks: No Scan for viruses: Yes File system label: different_filesystem File system size: 9.938 GB File system free space: 6.763 GB File system state: formatted = Yes mounted = Yes failed = No thin provisioned = No Disaster recovery setting: Recovered = No Transfer setting = Use file system default Home directories: Off Mount point options: """ HNAS_RESULT_list_cifs_permissions = """ \ Displaying the details of the share 'vvol_test' on file system 'filesystem' ... Maximum user count is unlimited Type Permission User/Group U Deny Read NFSv4 user\\user1@domain.com G Deny Change & Read Unix user\\1087 U Allow Full Control Unix user\\1088 U Allow Read Unix user\\1089 ? Deny Full Control NFSv4 user\\user2@company.com X Allow Change & Read Unix user\\1090 """ HNAS_RESULT_check_snap_error = """ \ path-to-object-number/FS-TestCG: Unable to locate component: share1 path-to-object-number/FS-TestCG: Failed to resolve object number""" @ddt.ddt class HNASSSHTestCase(test.TestCase): def setUp(self): super(HNASSSHTestCase, self).setUp() self.ip = '192.168.1.1' self.port = 22 self.user = 'hnas_user' self.password = 'hnas_password' self.default_commands = ['ssc', '127.0.0.1'] self.fs_name = 'file_system' self.evs_ip = '172.24.44.1' self.evs_id = 2 self.ssh_private_key = 'private_key' self.cluster_admin_ip0 = 'fake' self.job_timeout = 30 self.mock_log = self.mock_object(ssh, 'LOG') self._driver_ssh = ssh.HNASSSHBackend(self.ip, self.user, self.password, self.ssh_private_key, self.cluster_admin_ip0, self.evs_id, self.evs_ip, self.fs_name, self.job_timeout) self.vvol = { 'id': 'vvol_test', 'share_proto': 'nfs', 'size': 4, 'host': '127.0.0.1', } self.snapshot = { 'id': 'snapshot_test', 'share_proto': 'nfs', 'size': 4, 'share_id': 'vvol_test', 'host': 'ubuntu@hitachi2#HITACHI2', } self.mock_log.debug.reset_mock() def test_get_stats(self): fake_list_command = ['df', '-a', '-f', self.fs_name] self.mock_object(ssh.HNASSSHBackend, '_execute', mock.Mock(return_value=(HNAS_RESULT_df_tb, ""))) total, free, dedupe = self._driver_ssh.get_stats() ssh.HNASSSHBackend._execute.assert_called_with(fake_list_command) self.assertEqual(7168.0, total) self.assertEqual(5120.0, free) self.assertFalse(dedupe) def test_get_stats_dedupe_on(self): fake_list_command = ['df', '-a', '-f', self.fs_name] self.mock_object( ssh.HNASSSHBackend, '_execute', mock.Mock(return_value=(HNAS_RESULT_df_dedupe_on, ""))) total, free, dedupe = self._driver_ssh.get_stats() ssh.HNASSSHBackend._execute.assert_called_with(fake_list_command) self.assertEqual(7168.0, total) self.assertEqual(5120.0, free) self.assertTrue(dedupe) def test_get_stats_error(self): fake_list_command = ['df', '-a', '-f', self.fs_name] self.mock_object(ssh.HNASSSHBackend, '_execute', mock.Mock(side_effect=putils.ProcessExecutionError)) self.assertRaises(exception.HNASBackendException, self._driver_ssh.get_stats) ssh.HNASSSHBackend._execute.assert_called_with(fake_list_command) @ddt.data(True, False) def test_nfs_export_add(self, is_snapshot): if is_snapshot: name = '/snapshots/fake_snap' path = '/snapshots/fake_share/fake_snap' else: name = path = '/shares/fake_share' fake_nfs_command = ['nfs-export', 'add', '-S', 'disable', '-c', '127.0.0.1', name, self.fs_name, path] self.mock_object(ssh.HNASSSHBackend, '_execute') if is_snapshot: self._driver_ssh.nfs_export_add('fake_share', snapshot_id='fake_snap') else: self._driver_ssh.nfs_export_add('fake_share') self._driver_ssh._execute.assert_called_with(fake_nfs_command) def test_nfs_export_add_error(self): self.mock_object(ssh.HNASSSHBackend, '_execute', mock.Mock( side_effect=[putils.ProcessExecutionError(stderr='')])) self.assertRaises(exception.HNASBackendException, self._driver_ssh.nfs_export_add, 'vvol_test') self.assertTrue(self.mock_log.exception.called) @ddt.data(True, False) def test_nfs_export_del(self, is_snapshot): if is_snapshot: name = '/snapshots/vvol_test' args = {'snapshot_id': 'vvol_test'} else: name = '/shares/vvol_test' args = {'share_id': 'vvol_test'} fake_nfs_command = ['nfs-export', 'del', name] self.mock_object(ssh.HNASSSHBackend, '_execute') self._driver_ssh.nfs_export_del(**args) self._driver_ssh._execute.assert_called_with(fake_nfs_command) def test_nfs_export_del_inexistent_export(self): self.mock_object(ssh.HNASSSHBackend, '_execute', mock.Mock( side_effect=[putils.ProcessExecutionError( stderr='does not exist')])) self._driver_ssh.nfs_export_del('vvol_test') self.assertTrue(self.mock_log.warning.called) def test_nfs_export_del_exception(self): self.assertRaises(exception.HNASBackendException, self._driver_ssh.nfs_export_del) def test_nfs_export_del_execute_error(self): self.mock_object(ssh.HNASSSHBackend, '_execute', mock.Mock( side_effect=[putils.ProcessExecutionError(stderr='')])) self.assertRaises(exception.HNASBackendException, self._driver_ssh.nfs_export_del, 'vvol_test') self.assertTrue(self.mock_log.exception.called) @ddt.data(True, False) def test_cifs_share_add(self, is_snapshot): if is_snapshot: name = 'fake_snap' path = r'\\snapshots\\fake_share\\fake_snap' else: name = 'fake_share' path = r'\\shares\\fake_share' fake_cifs_add_command = ['cifs-share', 'add', '-S', 'disable', '--enable-abe', '--nodefaultsaa', name, self.fs_name, path] self.mock_object(ssh.HNASSSHBackend, '_execute') if is_snapshot: self._driver_ssh.cifs_share_add('fake_share', snapshot_id='fake_snap') else: self._driver_ssh.cifs_share_add('fake_share') self._driver_ssh._execute.assert_called_with(fake_cifs_add_command) def test_cifs_share_add_error(self): self.mock_object(ssh.HNASSSHBackend, '_execute', mock.Mock( side_effect=[putils.ProcessExecutionError(stderr='')])) self.assertRaises(exception.HNASBackendException, self._driver_ssh.cifs_share_add, 'vvol_test') self.assertTrue(self.mock_log.exception.called) def test_cifs_share_del(self): fake_cifs_del_command = ['cifs-share', 'del', '--target-label', self.fs_name, 'vvol_test'] self.mock_object(ssh.HNASSSHBackend, '_execute') self._driver_ssh.cifs_share_del('vvol_test') self._driver_ssh._execute.assert_called_with(fake_cifs_del_command) def test_cifs_share_del_inexistent_share(self): fake_cifs_del_command = ['cifs-share', 'del', '--target-label', self.fs_name, 'vvol_test'] self.mock_object(ssh.HNASSSHBackend, '_execute', mock.Mock(side_effect=putils.ProcessExecutionError( exit_code=1))) self._driver_ssh.cifs_share_del('vvol_test') self._driver_ssh._execute.assert_called_with(fake_cifs_del_command) self.assertTrue(self.mock_log.warning.called) def test_cifs_share_del_exception(self): fake_cifs_del_command = ['cifs-share', 'del', '--target-label', self.fs_name, 'vvol_test'] self.mock_object(ssh.HNASSSHBackend, '_execute', mock.Mock(side_effect=putils.ProcessExecutionError)) self.assertRaises(exception.HNASBackendException, self._driver_ssh.cifs_share_del, 'vvol_test') self._driver_ssh._execute.assert_called_with(fake_cifs_del_command) def test_get_nfs_host_list(self): self.mock_object(ssh.HNASSSHBackend, "_get_export", mock.Mock( return_value=[ssh.Export(HNAS_RESULT_export)])) host_list = self._driver_ssh.get_nfs_host_list('fake_id') self.assertEqual(['127.0.0.2'], host_list) def test_update_nfs_access_rule_empty_host_list(self): fake_export_command = ['nfs-export', 'mod', '-c', '127.0.0.1', '/snapshots/fake_id'] self.mock_object(ssh.HNASSSHBackend, "_execute") self._driver_ssh.update_nfs_access_rule([], snapshot_id="fake_id") self._driver_ssh._execute.assert_called_with(fake_export_command) def test_update_nfs_access_rule(self): fake_export_command = ['nfs-export', 'mod', '-c', u'"127.0.0.1,127.0.0.2"', '/shares/fake_id'] self.mock_object(ssh.HNASSSHBackend, "_execute") self._driver_ssh.update_nfs_access_rule(['127.0.0.1', '127.0.0.2'], share_id="fake_id") self._driver_ssh._execute.assert_called_with(fake_export_command) def test_update_nfs_access_rule_exception_no_share_provided(self): self.assertRaises(exception.HNASBackendException, self._driver_ssh.update_nfs_access_rule, ['127.0.0.1']) def test_update_nfs_access_rule_exception_error(self): fake_export_command = ['nfs-export', 'mod', '-c', u'"127.0.0.1,127.0.0.2"', '/shares/fake_id'] self.mock_object(ssh.HNASSSHBackend, "_execute", mock.Mock( side_effect=putils.ProcessExecutionError)) self.assertRaises(exception.HNASBackendException, self._driver_ssh.update_nfs_access_rule, ['127.0.0.1', '127.0.0.2'], share_id="fake_id") self._driver_ssh._execute.assert_called_with(fake_export_command) def test_cifs_allow_access(self): fake_cifs_allow_command = ['cifs-saa', 'add', '--target-label', self.fs_name, 'vvol_test', 'fake_user', 'ar'] self.mock_object(ssh.HNASSSHBackend, '_execute') self._driver_ssh.cifs_allow_access('vvol_test', 'fake_user', 'ar') self._driver_ssh._execute.assert_called_with(fake_cifs_allow_command) @ddt.data(True, False) def test_cifs_allow_access_already_allowed_user(self, is_snapshot): fake_cifs_allow_command = ['cifs-saa', 'add', '--target-label', self.fs_name, 'vvol_test', 'fake_user', 'acr'] if not is_snapshot: fake_cifs_allow_command2 = ['cifs-saa', 'change', '--target-label', 'file_system', 'vvol_test', 'fake_user', 'acr'] self.mock_object(ssh.HNASSSHBackend, '_execute', mock.Mock(side_effect=[putils.ProcessExecutionError( stderr='already listed as a user'), "Rule modified."])) self._driver_ssh.cifs_allow_access('vvol_test', 'fake_user', 'acr', is_snapshot=is_snapshot) _execute_calls = [mock.call(fake_cifs_allow_command)] if not is_snapshot: _execute_calls.append(mock.call(fake_cifs_allow_command2)) self._driver_ssh._execute.assert_has_calls(_execute_calls) self.assertTrue(self.mock_log.debug.called) @ddt.data(True, False) def test_cifs_allow_access_exception(self, is_snapshot): fake_cifs_allow_command = ['cifs-saa', 'add', '--target-label', self.fs_name, 'vvol_test', 'fake_user', 'acr'] self.mock_object(ssh.HNASSSHBackend, '_execute', mock.Mock(side_effect=[putils.ProcessExecutionError( stderr='Could not add user/group fake_user to ' 'share \'vvol_test\'')])) self.assertRaises(exception.HNASBackendException, self._driver_ssh.cifs_allow_access, 'vvol_test', 'fake_user', 'acr', is_snapshot=is_snapshot) self._driver_ssh._execute.assert_called_with(fake_cifs_allow_command) def test_cifs_update_access_level_exception(self): fake_cifs_allow_command = ['cifs-saa', 'add', '--target-label', self.fs_name, 'vvol_test', 'fake_user', 'acr'] fake_cifs_allow_command2 = ['cifs-saa', 'change', '--target-label', 'file_system', 'vvol_test', 'fake_user', 'acr'] self.mock_object(ssh.HNASSSHBackend, '_execute', mock.Mock(side_effect=[putils.ProcessExecutionError( stderr='already listed as a user'), putils.ProcessExecutionError( stderr='Error when trying to modify rule.')])) self.assertRaises(exception.HNASBackendException, self._driver_ssh.cifs_allow_access, 'vvol_test', 'fake_user', 'acr') self._driver_ssh._execute.assert_has_calls( [mock.call(fake_cifs_allow_command), mock.call(fake_cifs_allow_command2)]) self.assertTrue(self.mock_log.debug.called) def test_cifs_deny_access(self): fake_cifs_deny_command = ['cifs-saa', 'delete', '--target-label', self.fs_name, 'vvol_test', 'fake_user'] self.mock_object(ssh.HNASSSHBackend, '_execute') self._driver_ssh.cifs_deny_access('vvol_test', 'fake_user') self._driver_ssh._execute.assert_called_with(fake_cifs_deny_command) @ddt.data(True, False) def test_cifs_deny_access_already_deleted_user(self, is_snapshot): fake_cifs_deny_command = ['cifs-saa', 'delete', '--target-label', self.fs_name, 'vvol_test', 'fake_user'] self.mock_object(ssh.HNASSSHBackend, '_execute', mock.Mock( side_effect=[putils.ProcessExecutionError( stderr='not listed as a user')])) self._driver_ssh.cifs_deny_access('vvol_test', 'fake_user', is_snapshot=is_snapshot) self._driver_ssh._execute.assert_called_with(fake_cifs_deny_command) self.assertTrue(self.mock_log.warning.called) def test_cifs_deny_access_backend_exception(self): fake_cifs_deny_command = ['cifs-saa', 'delete', '--target-label', self.fs_name, 'vvol_test', 'fake_user'] self.mock_object(ssh.HNASSSHBackend, '_execute', mock.Mock(side_effect=[putils.ProcessExecutionError( stderr='Unexpected error')])) self.assertRaises(exception.HNASBackendException, self._driver_ssh.cifs_deny_access, 'vvol_test', 'fake_user') self._driver_ssh._execute.assert_called_with(fake_cifs_deny_command) def test_list_cifs_permission(self): fake_cifs_list_command = ['cifs-saa', 'list', '--target-label', self.fs_name, 'vvol_test'] expected_out = ssh.CIFSPermissions(HNAS_RESULT_list_cifs_permissions) self.mock_object(ssh.HNASSSHBackend, '_execute', mock.Mock( return_value=(HNAS_RESULT_list_cifs_permissions, ''))) out = self._driver_ssh.list_cifs_permissions('vvol_test') for i in range(len(expected_out.permission_list)): self.assertEqual(expected_out.permission_list[i], out[i]) self._driver_ssh._execute.assert_called_with(fake_cifs_list_command) def test_list_cifs_no_permissions_added(self): fake_cifs_list_command = ['cifs-saa', 'list', '--target-label', self.fs_name, 'vvol_test'] self.mock_object(ssh.HNASSSHBackend, '_execute', mock.Mock( side_effect=[putils.ProcessExecutionError( stderr='No entries for this share')])) out = self._driver_ssh.list_cifs_permissions('vvol_test') self.assertEqual([], out) self._driver_ssh._execute.assert_called_with(fake_cifs_list_command) self.assertTrue(self.mock_log.debug.called) def test_list_cifs_exception(self): fake_cifs_list_command = ['cifs-saa', 'list', '--target-label', self.fs_name, 'vvol_test'] self.mock_object(ssh.HNASSSHBackend, '_execute', mock.Mock( side_effect=[putils.ProcessExecutionError( stderr='Error.')])) self.assertRaises(exception.HNASBackendException, self._driver_ssh.list_cifs_permissions, "vvol_test") self._driver_ssh._execute.assert_called_with(fake_cifs_list_command) self.assertTrue(self.mock_log.exception.called) def test_tree_clone_nothing_to_clone(self): fake_tree_clone_command = ['tree-clone-job-submit', '-e', '-f', self.fs_name, '/src', '/dst'] self.mock_object(ssh.HNASSSHBackend, "_execute", mock.Mock( side_effect=[putils.ProcessExecutionError( stderr='Cannot find any clonable files in the source directory' )])) self.assertRaises(exception.HNASNothingToCloneException, self._driver_ssh.tree_clone, "/src", "/dst") self._driver_ssh._execute.assert_called_with(fake_tree_clone_command) def test_tree_clone_error_cloning(self): fake_tree_clone_command = ['tree-clone-job-submit', '-e', '-f', self.fs_name, '/src', '/dst'] self.mock_object(ssh.HNASSSHBackend, "_execute", mock.Mock( side_effect=[putils.ProcessExecutionError(stderr='')])) self.assertRaises(exception.HNASBackendException, self._driver_ssh.tree_clone, "/src", "/dst") self._driver_ssh._execute.assert_called_with(fake_tree_clone_command) self.assertTrue(self.mock_log.exception.called) def test_tree_clone(self): fake_tree_clone_command = ['tree-clone-job-submit', '-e', '-f', self.fs_name, '/src', '/dst'] self.mock_object(ssh.HNASSSHBackend, "_execute", mock.Mock( side_effect=[(HNAS_RESULT_job, ''), (HNAS_RESULT_job_completed, '')])) self._driver_ssh.tree_clone("/src", "/dst") self._driver_ssh._execute.assert_any_call(fake_tree_clone_command) self.assertTrue(self.mock_log.debug.called) def test_tree_clone_job_failed(self): fake_tree_clone_command = ['tree-clone-job-submit', '-e', '-f', self.fs_name, '/src', '/dst'] self.mock_object(ssh.HNASSSHBackend, "_execute", mock.Mock( side_effect=[(HNAS_RESULT_job, ''), (HNAS_RESULT_tree_job_status_fail, '')])) self.assertRaises(exception.HNASBackendException, self._driver_ssh.tree_clone, "/src", "/dst") self._driver_ssh._execute.assert_any_call(fake_tree_clone_command) self.assertTrue(self.mock_log.error.called) def test_tree_clone_job_timeout(self): fake_tree_clone_command = ['tree-clone-job-submit', '-e', '-f', self.fs_name, '/src', '/dst'] self.mock_object(ssh.HNASSSHBackend, "_execute", mock.Mock( side_effect=[(HNAS_RESULT_job, ''), (HNAS_RESULT_job_running, ''), (HNAS_RESULT_job_running, ''), (HNAS_RESULT_job_running, ''), (HNAS_RESULT_empty, '')])) self.mock_object(time, "time", mock.Mock(side_effect=[0, 0, 200, 200])) self.mock_object(time, "sleep") self.assertRaises(exception.HNASBackendException, self._driver_ssh.tree_clone, "/src", "/dst") self._driver_ssh._execute.assert_any_call(fake_tree_clone_command) self.assertTrue(self.mock_log.error.called) def test_tree_delete_path_does_not_exist(self): fake_tree_delete_command = ['tree-delete-job-submit', '--confirm', '-f', self.fs_name, '/path'] self.mock_object(ssh.HNASSSHBackend, "_execute", mock.Mock( side_effect=[putils.ProcessExecutionError( stderr='Source path: Cannot access')] )) self._driver_ssh.tree_delete("/path") self.assertTrue(self.mock_log.warning.called) self._driver_ssh._execute.assert_called_with(fake_tree_delete_command) def test_tree_delete_error(self): fake_tree_delete_command = ['tree-delete-job-submit', '--confirm', '-f', self.fs_name, '/path'] self.mock_object(ssh.HNASSSHBackend, "_execute", mock.Mock( side_effect=[putils.ProcessExecutionError( stderr='')] )) self.assertRaises(exception.HNASBackendException, self._driver_ssh.tree_delete, "/path") self.assertTrue(self.mock_log.exception.called) self._driver_ssh._execute.assert_called_with(fake_tree_delete_command) def test_create_directory(self): locked_selectfs_args = ['create', '/path'] self.mock_object(ssh.HNASSSHBackend, "_locked_selectfs") self.mock_object(ssh.HNASSSHBackend, "check_directory", mock.Mock(return_value=True)) self._driver_ssh.create_directory("/path") self._driver_ssh._locked_selectfs.assert_called_with( *locked_selectfs_args) ssh.HNASSSHBackend.check_directory.assert_called_once_with('/path') self.assertFalse(self.mock_log.warning.called) def test_create_directory_context_change_fail(self): locked_selectfs_args = ['create', '/path'] self.mock_object(time, 'sleep') self.mock_object(ssh.HNASSSHBackend, "_locked_selectfs") self.mock_object(ssh.HNASSSHBackend, "check_directory", mock.Mock(return_value=False)) self.assertRaises(exception.HNASSSCContextChange, self._driver_ssh.create_directory, "/path") self._driver_ssh._locked_selectfs.assert_called_with( *locked_selectfs_args) ssh.HNASSSHBackend.check_directory.assert_called_with('/path') self.assertTrue(self.mock_log.warning.called) def test_create_directory_context_change_success(self): locked_selectfs_args = ['create', '/path'] self.mock_object(time, 'sleep') self.mock_object(ssh.HNASSSHBackend, "_locked_selectfs") self.mock_object(ssh.HNASSSHBackend, "check_directory", mock.Mock(side_effect=[False, False, True])) self._driver_ssh.create_directory("/path") self._driver_ssh._locked_selectfs.assert_called_with( *locked_selectfs_args) ssh.HNASSSHBackend.check_directory.assert_called_with('/path') self.assertTrue(self.mock_log.warning.called) def test_delete_directory(self): locked_selectfs_args = ['delete', '/path'] self.mock_object(ssh.HNASSSHBackend, "_locked_selectfs") self.mock_object(ssh.HNASSSHBackend, "check_directory", mock.Mock(return_value=False)) self._driver_ssh.delete_directory("/path") self._driver_ssh._locked_selectfs.assert_called_with( *locked_selectfs_args) ssh.HNASSSHBackend.check_directory.assert_called_once_with('/path') self.assertFalse(self.mock_log.debug.called) def test_delete_directory_directory_not_empty(self): locked_selectfs_args = ['delete', '/path'] self.mock_object(ssh.HNASSSHBackend, "_locked_selectfs", mock.Mock( side_effect=exception.HNASDirectoryNotEmpty(msg='fake'))) self.mock_object(ssh.HNASSSHBackend, "check_directory") self._driver_ssh.delete_directory("/path") self._driver_ssh._locked_selectfs.assert_called_with( *locked_selectfs_args) ssh.HNASSSHBackend.check_directory.assert_not_called() self.assertFalse(self.mock_log.debug.called) def test_delete_directory_context_change_fail(self): locked_selectfs_args = ['delete', '/path'] self.mock_object(time, 'sleep') self.mock_object(ssh.HNASSSHBackend, "_locked_selectfs") self.mock_object(ssh.HNASSSHBackend, "check_directory", mock.Mock(return_value=True)) self.assertRaises(exception.HNASSSCContextChange, self._driver_ssh.delete_directory, "/path") self._driver_ssh._locked_selectfs.assert_called_with( *locked_selectfs_args) ssh.HNASSSHBackend.check_directory.assert_called_with('/path') self.assertTrue(self.mock_log.debug.called) def test_delete_directory_context_change_success(self): locked_selectfs_args = ['delete', '/path'] self.mock_object(time, 'sleep') self.mock_object(ssh.HNASSSHBackend, "_locked_selectfs") self.mock_object(ssh.HNASSSHBackend, "check_directory", mock.Mock(side_effect=[True, True, False])) self._driver_ssh.delete_directory("/path") self._driver_ssh._locked_selectfs.assert_called_with( *locked_selectfs_args) ssh.HNASSSHBackend.check_directory.assert_called_with('/path') self.assertTrue(self.mock_log.debug.called) def test_check_directory(self): path = ("/snapshots/" + self.snapshot['share_id'] + "/" + self.snapshot['id']) check_snap_args = ['path-to-object-number', '-f', self.fs_name, path] self.mock_object(ssh.HNASSSHBackend, '_execute') out = self._driver_ssh.check_directory(path) self.assertTrue(out) self._driver_ssh._execute.assert_called_with(check_snap_args) def test_check_directory_retry(self): error_msg = ("Unable to run path-to-object-number as " "path-to-object-number is currently running on volume " "39.") path = ("/snapshots/" + self.snapshot['share_id'] + "/" + self.snapshot['id']) check_snap_args = ['path-to-object-number', '-f', self.fs_name, path] self.mock_object(time, "sleep") self.mock_object(ssh.HNASSSHBackend, '_execute', mock.Mock(side_effect=[putils.ProcessExecutionError( stdout=error_msg), putils.ProcessExecutionError( stdout=error_msg), 'Object number: 0x45a4'])) out = self._driver_ssh.check_directory(path) self.assertIs(True, out) self._driver_ssh._execute.assert_called_with(check_snap_args) def test_check_inexistent_snapshot(self): path = "/path/snap1/snapshot07-08-2016" check_snap_args = ['path-to-object-number', '-f', self.fs_name, path] self.mock_object(ssh.HNASSSHBackend, '_execute', mock.Mock(side_effect=putils.ProcessExecutionError( stdout=HNAS_RESULT_check_snap_error))) out = self._driver_ssh.check_directory(path) self.assertFalse(out) self._driver_ssh._execute.assert_called_with(check_snap_args) def test_check_directory_error(self): path = "/path/snap1/snapshot07-08-2016" check_snap_args = ['path-to-object-number', '-f', self.fs_name, path] self.mock_object(ssh.HNASSSHBackend, '_execute', mock.Mock(side_effect=putils.ProcessExecutionError( stdout="Internal Server Error."))) self.assertRaises(exception.HNASBackendException, self._driver_ssh.check_directory, path) self._driver_ssh._execute.assert_called_with(check_snap_args) def test_check_fs_mounted_true(self): self.mock_object(ssh.HNASSSHBackend, "_execute", mock.Mock(return_value=(HNAS_RESULT_df, ''))) self.assertTrue(self._driver_ssh.check_fs_mounted()) def test_check_fs_mounted_false(self): self.mock_object( ssh.HNASSSHBackend, "_execute", mock.Mock(return_value=(HNAS_RESULT_df_unmounted, ''))) self.assertFalse(self._driver_ssh.check_fs_mounted()) def test_check_fs_mounted_error(self): self.mock_object( ssh.HNASSSHBackend, "_execute", mock.Mock(return_value=(HNAS_RESULT_df_error, ''))) self.assertRaises(exception.HNASItemNotFoundException, self._driver_ssh.check_fs_mounted) def test_mount_already_mounted(self): fake_mount_command = ['mount', self.fs_name] self.mock_object(ssh.HNASSSHBackend, "_execute", mock.Mock( side_effect=putils.ProcessExecutionError(stderr=''))) self.assertRaises( exception.HNASBackendException, self._driver_ssh.mount) self._driver_ssh._execute.assert_called_with(fake_mount_command) def test_vvol_create(self): fake_vvol_create_command = ['virtual-volume', 'add', '--ensure', self.fs_name, 'vvol', '/shares/vvol'] self.mock_object(ssh.HNASSSHBackend, "_execute") self._driver_ssh.vvol_create("vvol") self._driver_ssh._execute.assert_called_with(fake_vvol_create_command) def test_vvol_create_error(self): fake_vvol_create_command = ['virtual-volume', 'add', '--ensure', self.fs_name, 'vvol', '/shares/vvol'] self.mock_object(ssh.HNASSSHBackend, "_execute", mock.Mock(side_effect=putils.ProcessExecutionError)) self.assertRaises(exception.HNASBackendException, self._driver_ssh.vvol_create, "vvol") self._driver_ssh._execute.assert_called_with(fake_vvol_create_command) def test_vvol_delete_vvol_does_not_exist(self): fake_vvol_delete_command = ['tree-delete-job-submit', '--confirm', '-f', self.fs_name, '/shares/vvol'] self.mock_object(ssh.HNASSSHBackend, "_execute", mock.Mock( side_effect=[putils.ProcessExecutionError( stderr='Source path: Cannot access')] )) self._driver_ssh.vvol_delete("vvol") self.assertTrue(self.mock_log.warning.called) self._driver_ssh._execute.assert_called_with(fake_vvol_delete_command) def test_vvol_delete_error(self): fake_vvol_delete_command = ['tree-delete-job-submit', '--confirm', '-f', self.fs_name, '/shares/vvol'] self.mock_object(ssh.HNASSSHBackend, "_execute", mock.Mock( side_effect=[putils.ProcessExecutionError( stderr='')] )) self.assertRaises(exception.HNASBackendException, self._driver_ssh.vvol_delete, "vvol") self.assertTrue(self.mock_log.exception.called) self._driver_ssh._execute.assert_called_with(fake_vvol_delete_command) def test_quota_add(self): fake_add_quota_command = ['quota', 'add', '--usage-limit', '1G', '--usage-hard-limit', 'yes', self.fs_name, 'vvol'] self.mock_object(ssh.HNASSSHBackend, "_execute") self._driver_ssh.quota_add('vvol', 1) self._driver_ssh._execute.assert_called_with(fake_add_quota_command) def test_modify_quota(self): fake_modify_quota_command = ['quota', 'mod', '--usage-limit', '1G', self.fs_name, 'vvol'] self.mock_object(ssh.HNASSSHBackend, "_execute") self._driver_ssh.modify_quota('vvol', 1) self._driver_ssh._execute.assert_called_with(fake_modify_quota_command) def test_quota_add_error(self): fake_add_quota_command = ['quota', 'add', '--usage-limit', '1G', '--usage-hard-limit', 'yes', self.fs_name, 'vvol'] self.mock_object(ssh.HNASSSHBackend, "_execute", mock.Mock(side_effect=putils.ProcessExecutionError)) self.assertRaises(exception.HNASBackendException, self._driver_ssh.quota_add, 'vvol', 1) self._driver_ssh._execute.assert_called_with(fake_add_quota_command) def test_modify_quota_error(self): fake_modify_quota_command = ['quota', 'mod', '--usage-limit', '1G', self.fs_name, 'vvol'] self.mock_object(ssh.HNASSSHBackend, "_execute", mock.Mock(side_effect=putils.ProcessExecutionError)) self.assertRaises(exception.HNASBackendException, self._driver_ssh.modify_quota, 'vvol', 1) self._driver_ssh._execute.assert_called_with(fake_modify_quota_command) def test_check_vvol(self): fake_check_vvol_command = ['virtual-volume', 'list', '--verbose', self.fs_name, 'vvol'] self.mock_object(ssh.HNASSSHBackend, "_execute", mock.Mock( side_effect=putils.ProcessExecutionError(stderr=''))) self.assertRaises(exception.HNASItemNotFoundException, self._driver_ssh.check_vvol, 'vvol') self._driver_ssh._execute.assert_called_with(fake_check_vvol_command) def test_check_quota(self): fake_check_quota_command = ['quota', 'list', '--verbose', self.fs_name, 'vvol'] self.mock_object(ssh.HNASSSHBackend, "_execute", mock.Mock( return_value=('No quotas matching specified filter criteria', ''))) self.assertRaises(exception.HNASItemNotFoundException, self._driver_ssh.check_quota, 'vvol') self._driver_ssh._execute.assert_called_with(fake_check_quota_command) def test_check_quota_error(self): fake_check_quota_command = ['quota', 'list', '--verbose', self.fs_name, 'vvol'] self.mock_object(ssh.HNASSSHBackend, "_execute", mock.Mock( side_effect=putils.ProcessExecutionError)) self.assertRaises(exception.HNASBackendException, self._driver_ssh.check_quota, 'vvol') self._driver_ssh._execute.assert_called_with(fake_check_quota_command) @ddt.data(True, False) def test_check_export(self, is_snapshot): self.mock_object(ssh.HNASSSHBackend, "_get_export", mock.Mock( return_value=[ssh.Export(HNAS_RESULT_export)])) self._driver_ssh.check_export("vvol_test", is_snapshot) def test_check_export_error(self): self.mock_object(ssh.HNASSSHBackend, "_get_export", mock.Mock( return_value=[ssh.Export(HNAS_RESULT_wrong_export)])) self.assertRaises(exception.HNASItemNotFoundException, self._driver_ssh.check_export, "vvol_test") def test_check_cifs(self): check_cifs_share_command = ['cifs-share', 'list', 'vvol_test'] self.mock_object(ssh.HNASSSHBackend, '_execute', mock.Mock( return_value=[HNAS_RESULT_cifs_list, ''])) self._driver_ssh.check_cifs('vvol_test') self._driver_ssh._execute.assert_called_with(check_cifs_share_command) def test_check_cifs_inexistent_share(self): check_cifs_share_command = ['cifs-share', 'list', 'wrong_vvol'] self.mock_object(ssh.HNASSSHBackend, '_execute', mock.Mock( side_effect=[putils.ProcessExecutionError( stderr='Export wrong_vvol does not exist on backend ' 'anymore.')])) self.assertRaises(exception.HNASItemNotFoundException, self._driver_ssh.check_cifs, 'wrong_vvol') self._driver_ssh._execute.assert_called_with(check_cifs_share_command) def test_check_cifs_exception(self): check_cifs_share_command = ['cifs-share', 'list', 'wrong_vvol'] self.mock_object(ssh.HNASSSHBackend, '_execute', mock.Mock( side_effect=[putils.ProcessExecutionError(stderr='Error.')])) self.assertRaises(exception.HNASBackendException, self._driver_ssh.check_cifs, 'wrong_vvol') self._driver_ssh._execute.assert_called_with(check_cifs_share_command) def test_check_cifs_different_fs_exception(self): check_cifs_share_command = ['cifs-share', 'list', 'vvol_test'] self.mock_object(ssh.HNASSSHBackend, '_execute', mock.Mock( return_value=[HNAS_RESULT_different_fs_cifs_list, ''])) self.assertRaises(exception.HNASItemNotFoundException, self._driver_ssh.check_cifs, 'vvol_test') self._driver_ssh._execute.assert_called_with(check_cifs_share_command) def test_is_cifs_in_use(self): check_cifs_share_command = ['cifs-share', 'list', 'vvol_test'] self.mock_object(ssh.HNASSSHBackend, '_execute', mock.Mock( return_value=[HNAS_RESULT_cifs_list, ''])) out = self._driver_ssh.is_cifs_in_use('vvol_test') self.assertTrue(out) self._driver_ssh._execute.assert_called_with(check_cifs_share_command) def test_is_cifs_without_use(self): check_cifs_share_command = ['cifs-share', 'list', 'vvol_test'] self.mock_object(ssh.HNASSSHBackend, '_execute', mock.Mock( return_value=[HNAS_RESULT_different_fs_cifs_list, ''])) out = self._driver_ssh.is_cifs_in_use('vvol_test') self.assertFalse(out) self._driver_ssh._execute.assert_called_with(check_cifs_share_command) def test_get_share_quota(self): self.mock_object(ssh.HNASSSHBackend, "_execute", mock.Mock( return_value=(HNAS_RESULT_quota, ''))) result = self._driver_ssh.get_share_quota("vvol_test") self.assertEqual(5, result) @ddt.data(HNAS_RESULT_quota_unset, HNAS_RESULT_quota_err) def test_get_share_quota_errors(self, hnas_output): self.mock_object(ssh.HNASSSHBackend, "_execute", mock.Mock( return_value=(hnas_output, ''))) result = self._driver_ssh.get_share_quota("vvol_test") self.assertIsNone(result) def test_get_share_quota_tb(self): self.mock_object(ssh.HNASSSHBackend, "_execute", mock.Mock( return_value=(HNAS_RESULT_quota_tb, ''))) result = self._driver_ssh.get_share_quota("vvol_test") self.assertEqual(1024, result) def test_get_share_quota_mb(self): self.mock_object(ssh.HNASSSHBackend, "_execute", mock.Mock( return_value=(HNAS_RESULT_quota_mb, ''))) self.assertRaises(exception.HNASBackendException, self._driver_ssh.get_share_quota, "vvol_test") def test_get_share_usage(self): self.mock_object(ssh.HNASSSHBackend, "_execute", mock.Mock( return_value=(HNAS_RESULT_quota, ''))) self.assertEqual(1, self._driver_ssh.get_share_usage("vvol_test")) def test_get_share_usage_error(self): self.mock_object(ssh.HNASSSHBackend, "_execute", mock.Mock( return_value=(HNAS_RESULT_quota_err, ''))) self.assertRaises(exception.HNASItemNotFoundException, self._driver_ssh.get_share_usage, "vvol_test") def test_get_share_usage_mb(self): self.mock_object(ssh.HNASSSHBackend, "_execute", mock.Mock( return_value=(HNAS_RESULT_quota_mb, ''))) self.assertEqual(0.01953125, self._driver_ssh.get_share_usage( "vvol_test")) def test_get_share_usage_tb(self): self.mock_object(ssh.HNASSSHBackend, "_execute", mock.Mock( return_value=(HNAS_RESULT_quota_tb, ''))) self.assertEqual(1024, self._driver_ssh.get_share_usage("vvol_test")) @ddt.data(True, False) def test__get_share_export(self, is_snapshot): self.mock_object(ssh.HNASSSHBackend, '_execute', mock.Mock( return_value=[HNAS_RESULT_export_ip, ''])) export_list = self._driver_ssh._get_export( name='fake_name', is_snapshot=is_snapshot) path = '/shares/fake_name' if is_snapshot: path = '/snapshots/fake_name' command = ['nfs-export', 'list ', path] self._driver_ssh._execute.assert_called_with(command) self.assertEqual('vvol_test', export_list[0].export_name) self.assertEqual('/vvol_test', export_list[0].export_path) self.assertEqual('fake_fs', export_list[0].file_system_label) self.assertEqual('Yes', export_list[0].mounted) self.assertIn('rw', export_list[0].export_configuration[0]) def test__get_share_export_fs_not_available(self): self.mock_object(ssh.HNASSSHBackend, '_execute', mock.Mock( return_value=[HNAS_RESULT_exp_no_fs, ''])) export_list = self._driver_ssh._get_export(name='fake_name') path = '/shares/fake_name' command = ['nfs-export', 'list ', path] self._driver_ssh._execute.assert_called_with(command) self.assertEqual('no_fs', export_list[0].export_name) self.assertEqual('/export_without_fs', export_list[0].export_path) self.assertEqual('*** not available ***', export_list[0].file_system_info) self.assertEqual([], export_list[0].export_configuration) not_in_keys = ['file_system_label', 'file_system_size', 'formatted', 'file_system_free_space', 'file_system_state', 'failed', 'mounted', 'thin_provisioned'] for key in not_in_keys: self.assertNotIn(key, export_list[0].__dict__) def test__get_share_export_exception_not_found(self): self.mock_object(ssh.HNASSSHBackend, "_execute", mock.Mock( side_effect=putils.ProcessExecutionError( stderr="NFS Export List: Export 'id' does not exist."))) self.assertRaises(exception.HNASItemNotFoundException, self._driver_ssh._get_export, 'fake_id') def test__get_share_export_exception_error(self): self.mock_object(ssh.HNASSSHBackend, "_execute", mock.Mock( side_effect=putils.ProcessExecutionError(stderr="Some error.") )) self.assertRaises(exception.HNASBackendException, self._driver_ssh._get_export, 'fake_id') def test__execute(self): key = self.ssh_private_key commands = ['tree-clone-job-submit', '-e', '/src', '/dst'] concat_command = ('ssc --smuauth fake console-context --evs 2 ' 'tree-clone-job-submit -e /src /dst') self.mock_object(paramiko.SSHClient, 'connect') self.mock_object(putils, 'ssh_execute', mock.Mock(return_value=[HNAS_RESULT_job, ''])) output, err = self._driver_ssh._execute(commands) putils.ssh_execute.assert_called_once_with(mock.ANY, concat_command, check_exit_code=True) paramiko.SSHClient.connect.assert_called_with(self.ip, username=self.user, key_filename=key, look_for_keys=False, timeout=None, password=self.password, port=self.port, banner_timeout=None) self.assertIn('Request submitted successfully.', output) def test__execute_ssh_exception(self): commands = ['tree-clone-job-submit', '-e', '/src', '/dst'] concat_command = ('ssc --smuauth fake console-context --evs 2 ' 'tree-clone-job-submit -e /src /dst') msg = 'Failed to establish SSC connection' self.mock_object(time, "sleep") self.mock_object(paramiko.SSHClient, 'connect') self.mock_object(putils, 'ssh_execute', mock.Mock(side_effect=[ putils.ProcessExecutionError(stderr=msg), putils.ProcessExecutionError(stderr='Invalid!')])) self.mock_object(ssh_utils.SSHPool, "item", mock.Mock(return_value=paramiko.SSHClient())) self.mock_object(paramiko.SSHClient, "set_missing_host_key_policy") self.assertRaises(putils.ProcessExecutionError, self._driver_ssh._execute, commands) putils.ssh_execute.assert_called_with(mock.ANY, concat_command, check_exit_code=True) self.assertTrue(self.mock_log.debug.called) def test__locked_selectfs_create_operation(self): exec_command = ['selectfs', self.fs_name, '\n', 'ssc', '127.0.0.1', 'console-context', '--evs', str(self.evs_id), 'mkdir', '-p', '/path'] self.mock_object(ssh.HNASSSHBackend, '_execute') self._driver_ssh._locked_selectfs('create', '/path') self._driver_ssh._execute.assert_called_with(exec_command) def test__locked_selectfs_create_operation_error(self): exec_command = ['selectfs', self.fs_name, '\n', 'ssc', '127.0.0.1', 'console-context', '--evs', str(self.evs_id), 'mkdir', '-p', '/path'] self.mock_object( ssh.HNASSSHBackend, '_execute', mock.Mock(side_effect=putils.ProcessExecutionError( stderr="some error"))) self.assertRaises(exception.HNASBackendException, self._driver_ssh._locked_selectfs, 'create', '/path') self._driver_ssh._execute.assert_called_with(exec_command) def test__locked_selectfs_create_operation_context_change(self): exec_command = ['selectfs', self.fs_name, '\n', 'ssc', '127.0.0.1', 'console-context', '--evs', str(self.evs_id), 'mkdir', '-p', '/path'] self.mock_object( ssh.HNASSSHBackend, '_execute', mock.Mock(side_effect=putils.ProcessExecutionError( stderr="Current file system invalid: VolumeNotFound"))) self.assertRaises(exception.HNASSSCContextChange, self._driver_ssh._locked_selectfs, 'create', '/path') self._driver_ssh._execute.assert_called_with(exec_command) self.assertTrue(self.mock_log.debug.called) def test__locked_selectfs_delete_operation_successful(self): exec_command = ['selectfs', self.fs_name, '\n', 'ssc', '127.0.0.1', 'console-context', '--evs', str(self.evs_id), 'rmdir', '/path'] self.mock_object(ssh.HNASSSHBackend, '_execute') self._driver_ssh._locked_selectfs('delete', '/path') self._driver_ssh._execute.assert_called_with(exec_command) def test__locked_selectfs_deleting_not_empty_directory(self): msg = 'This path has more snapshot. Currenty DirectoryNotEmpty' self.mock_object(ssh.HNASSSHBackend, '_execute', mock.Mock( side_effect=[putils.ProcessExecutionError(stderr=msg)])) self.assertRaises(exception.HNASDirectoryNotEmpty, self._driver_ssh._locked_selectfs, 'delete', '/path') self.assertTrue(self.mock_log.debug.called) def test__locked_selectfs_delete_exception(self): msg = "rmdir: cannot remove '/path'" self.mock_object(ssh.HNASSSHBackend, '_execute', mock.Mock( side_effect=[putils.ProcessExecutionError(stderr=msg)])) self.assertRaises(exception.HNASBackendException, self._driver_ssh._locked_selectfs, 'delete', 'path') self.assertTrue(self.mock_log.exception.called) def test__locked_selectfs_delete_not_found(self): msg = "rmdir: cannot remove '/path': NotFound" self.mock_object(ssh.HNASSSHBackend, '_execute', mock.Mock( side_effect=[putils.ProcessExecutionError(stderr=msg)])) self._driver_ssh._locked_selectfs('delete', 'path') self.assertTrue(self.mock_log.warning.called) def test__locked_selectfs_delete_context_change(self): msg = "Current file system invalid: VolumeNotFound" self.mock_object(ssh.HNASSSHBackend, '_execute', mock.Mock( side_effect=[putils.ProcessExecutionError(stderr=msg)])) self.assertRaises(exception.HNASSSCContextChange, self._driver_ssh._locked_selectfs, 'delete', 'path') self.assertTrue(self.mock_log.debug.called) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315602.0176704 manila-21.0.0/manila/tests/share/drivers/hitachi/hsp/0000775000175000017500000000000000000000000022444 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/hitachi/hsp/__init__.py0000664000175000017500000000000000000000000024543 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/hitachi/hsp/fakes.py0000664000175000017500000000505500000000000024114 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Hitachi Data Systems, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. file_system = { 'id': '33689245-1806-45d0-8507-0700b5f89750', 'properties': { 'cluster-id': '85d5b9e2-27f3-11e6-8b50-005056a75f66', 'quota': 107374182400, 'name': '07c966f9-fea2-4e12-ab72-97cb3c529bb5', 'used-capacity': 53687091200, 'free-capacity': 53687091200 }, } share = { 'id': 'aa4a7710-f326-41fb-ad18-b4ad587fc87a', 'name': 'aa4a7710-f326-41fb-ad18-b4ad587fc87a', 'properties': { 'file-system-id': '33689245-1806-45d0-8507-0700b5f89750', 'file-system-name': 'fake_name', }, } invalid_share = { 'id': 'aa4a7710-f326-41fb-ad18-b4ad587fc87a', 'name': 'aa4a7710-f326-41fb-ad18-b4ad587fc87a', 'size': 100, 'host': 'hsp', 'share_proto': 'CIFS', } access_rule = { 'id': 'acdc7172b-fe07-46c4-b78f-df3e0324ccd0', 'access_type': 'ip', 'access_to': '172.24.44.200', 'access_level': 'rw', } hsp_rules = [{ 'name': 'qa_access', 'host-specification': '172.24.44.200', 'read-write': 'true', }] hsp_cluster = { 'id': '835e7c00-9d04-11e5-a935-f4521480e990', 'properties': { 'total-storage-capacity': 107374182400, 'total-storage-used': 53687091200, 'total-storage-available': 53687091200, 'total-file-system-capacity': 107374182400, 'total-file-system-space-used': 53687091200, 'total-file-system-space-available': 53687091200 }, } stats_data = { 'share_backend_name': 'HSP', 'vendor_name': 'Hitachi', 'driver_version': '1.0.0', 'storage_protocol': 'NFS', 'pools': [{ 'reserved_percentage': 0, 'reserved_snapshot_percentage': 0, 'reserved_share_extend_percentage': 0, 'pool_name': 'HSP', 'thin_provisioning': True, 'total_capacity_gb': 100, 'free_capacity_gb': 50, 'max_over_subscription_ratio': 20, 'qos': False, 'dedupe': False, 'compression': False, }], } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/hitachi/hsp/test_driver.py0000664000175000017500000005462400000000000025363 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Hitachi Data Systems, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import ddt from oslo_config import cfg from manila import exception import manila.share.configuration import manila.share.driver from manila.share.drivers.hitachi.hsp import driver from manila.share.drivers.hitachi.hsp import rest from manila import test from manila.tests import fake_share from manila.tests.share.drivers.hitachi.hsp import fakes from manila.common import constants from oslo_utils import units CONF = cfg.CONF @ddt.ddt class HitachiHSPTestCase(test.TestCase): def setUp(self): super(HitachiHSPTestCase, self).setUp() CONF.set_default('driver_handles_share_servers', False) CONF.hitachi_hsp_host = '172.24.47.190' CONF.hitachi_hsp_username = 'hsp_user' CONF.hitachi_hsp_password = 'hsp_password' CONF.hitachi_hsp_job_timeout = 300 self.fake_el = [{ "path": CONF.hitachi_hsp_host + ":/fakeinstanceid", "metadata": {}, "is_admin_only": False, }] self.fake_share = fake_share.fake_share(share_proto='nfs') self.fake_share_instance = fake_share.fake_share_instance( base_share=self.fake_share, export_locations=self.fake_el) self.fake_conf = manila.share.configuration.Configuration(None) self.fake_private_storage = mock.Mock() self.mock_object(rest.HSPRestBackend, "get_cluster", mock.Mock(return_value=fakes.hsp_cluster)) self._driver = driver.HitachiHSPDriver( configuration=self.fake_conf, private_storage=self.fake_private_storage) self._driver.backend_name = "HSP" self.mock_log = self.mock_object(driver, 'LOG') @ddt.data(None, exception.HSPBackendException( message="Duplicate NFS access rule exists.")) def test_update_access_add(self, add_rule): access = { 'access_type': 'ip', 'access_to': '172.24.10.10', 'access_level': 'rw', } access_list = [access] self.mock_object(rest.HSPRestBackend, "get_file_system", mock.Mock(return_value=fakes.file_system)) self.mock_object(rest.HSPRestBackend, "get_share", mock.Mock(return_value=fakes.share)) self.mock_object(rest.HSPRestBackend, "add_access_rule", mock.Mock( side_effect=add_rule)) self._driver.update_access('context', self.fake_share_instance, [], access_list, [], []) self.assertTrue(self.mock_log.debug.called) rest.HSPRestBackend.get_file_system.assert_called_once_with( self.fake_share_instance['id']) rest.HSPRestBackend.get_share.assert_called_once_with( fakes.file_system['id']) rest.HSPRestBackend.add_access_rule.assert_called_once_with( fakes.share['id'], access['access_to'], (access['access_level'] == constants.ACCESS_LEVEL_RW)) def test_update_access_add_exception(self): access = { 'access_type': 'ip', 'access_to': '172.24.10.10', 'access_level': 'rw', } access_list = [access] self.mock_object(rest.HSPRestBackend, "get_file_system", mock.Mock(return_value=fakes.file_system)) self.mock_object(rest.HSPRestBackend, "get_share", mock.Mock(return_value=fakes.share)) self.mock_object(rest.HSPRestBackend, "add_access_rule", mock.Mock(side_effect=exception.HSPBackendException( message="HSP Backend Exception: error adding " "rule."))) self.assertRaises(exception.HSPBackendException, self._driver.update_access, 'context', self.fake_share_instance, [], access_list, [], []) rest.HSPRestBackend.get_file_system.assert_called_once_with( self.fake_share_instance['id']) rest.HSPRestBackend.get_share.assert_called_once_with( fakes.file_system['id']) rest.HSPRestBackend.add_access_rule.assert_called_once_with( fakes.share['id'], access['access_to'], (access['access_level'] == constants.ACCESS_LEVEL_RW)) def test_update_access_recovery(self): access1 = { 'access_type': 'ip', 'access_to': '172.24.10.10', 'access_level': 'rw', } access2 = { 'access_type': 'ip', 'access_to': '188.100.20.10', 'access_level': 'ro', } access_list = [access1, access2] self.mock_object(rest.HSPRestBackend, "get_file_system", mock.Mock(return_value=fakes.file_system)) self.mock_object(rest.HSPRestBackend, "get_share", mock.Mock(return_value=fakes.share)) self.mock_object(rest.HSPRestBackend, "get_access_rules", mock.Mock(side_effect=[fakes.hsp_rules, []])) self.mock_object(rest.HSPRestBackend, "delete_access_rule") self.mock_object(rest.HSPRestBackend, "add_access_rule") self._driver.update_access('context', self.fake_share_instance, access_list, [], [], []) self.assertTrue(self.mock_log.debug.called) rest.HSPRestBackend.get_file_system.assert_called_once_with( self.fake_share_instance['id']) rest.HSPRestBackend.get_share.assert_called_once_with( fakes.file_system['id']) rest.HSPRestBackend.get_access_rules.assert_has_calls([ mock.call(fakes.share['id'])]) rest.HSPRestBackend.delete_access_rule.assert_called_once_with( fakes.share['id'], fakes.share['id'] + fakes.hsp_rules[0]['host-specification']) rest.HSPRestBackend.add_access_rule.assert_has_calls([ mock.call(fakes.share['id'], access1['access_to'], True), mock.call(fakes.share['id'], access2['access_to'], False) ], any_order=True) @ddt.data(None, exception.HSPBackendException( message="No matching access rule found.")) def test_update_access_delete(self, delete_rule): access1 = { 'access_type': 'ip', 'access_to': '172.24.44.200', 'access_level': 'rw', } access2 = { 'access_type': 'something', 'access_to': '188.100.20.10', 'access_level': 'ro', } delete_rules = [access1, access2] self.mock_object(rest.HSPRestBackend, "get_file_system", mock.Mock(return_value=fakes.file_system)) self.mock_object(rest.HSPRestBackend, "get_share", mock.Mock(return_value=fakes.share)) self.mock_object(rest.HSPRestBackend, "delete_access_rule", mock.Mock(side_effect=delete_rule)) self.mock_object(rest.HSPRestBackend, "get_access_rules", mock.Mock(return_value=fakes.hsp_rules)) self._driver.update_access('context', self.fake_share_instance, [], [], delete_rules, []) self.assertTrue(self.mock_log.debug.called) rest.HSPRestBackend.get_file_system.assert_called_once_with( self.fake_share_instance['id']) rest.HSPRestBackend.get_share.assert_called_once_with( fakes.file_system['id']) rest.HSPRestBackend.delete_access_rule.assert_called_once_with( fakes.share['id'], fakes.hsp_rules[0]['name']) rest.HSPRestBackend.get_access_rules.assert_called_once_with( fakes.share['id']) def test_update_access_delete_exception(self): access1 = { 'access_type': 'ip', 'access_to': '172.24.10.10', 'access_level': 'rw', } access2 = { 'access_type': 'something', 'access_to': '188.100.20.10', 'access_level': 'ro', } delete_rules = [access1, access2] self.mock_object(rest.HSPRestBackend, "get_file_system", mock.Mock(return_value=fakes.file_system)) self.mock_object(rest.HSPRestBackend, "get_share", mock.Mock(return_value=fakes.share)) self.mock_object(rest.HSPRestBackend, "delete_access_rule", mock.Mock(side_effect=exception.HSPBackendException( message="HSP Backend Exception: error deleting " "rule."))) self.mock_object(rest.HSPRestBackend, 'get_access_rules', mock.Mock(return_value=[])) self.assertRaises(exception.HSPBackendException, self._driver.update_access, 'context', self.fake_share_instance, [], [], delete_rules, []) self.assertTrue(self.mock_log.debug.called) rest.HSPRestBackend.get_file_system.assert_called_once_with( self.fake_share_instance['id']) rest.HSPRestBackend.get_share.assert_called_once_with( fakes.file_system['id']) rest.HSPRestBackend.delete_access_rule.assert_called_once_with( fakes.share['id'], fakes.share['id'] + access1['access_to']) rest.HSPRestBackend.get_access_rules.assert_called_once_with( fakes.share['id']) @ddt.data(True, False) def test_update_access_ip_exception(self, is_recovery): access = { 'access_type': 'something', 'access_to': '172.24.10.10', 'access_level': 'rw', } access_list = [access] self.mock_object(rest.HSPRestBackend, "get_file_system", mock.Mock(return_value=fakes.file_system)) self.mock_object(rest.HSPRestBackend, "get_share", mock.Mock(return_value=fakes.share)) self.mock_object(rest.HSPRestBackend, "get_access_rules", mock.Mock(return_value=fakes.hsp_rules)) if is_recovery: access_args = [access_list, [], [], []] else: access_args = [[], access_list, [], []] self.assertRaises(exception.InvalidShareAccess, self._driver.update_access, 'context', self.fake_share_instance, *access_args) rest.HSPRestBackend.get_file_system.assert_called_once_with( self.fake_share_instance['id']) rest.HSPRestBackend.get_share.assert_called_once_with( fakes.file_system['id']) if is_recovery: rest.HSPRestBackend.get_access_rules.assert_called_once_with( fakes.share['id']) def test_update_access_not_found_exception(self): access_list = [] self.mock_object(rest.HSPRestBackend, "get_file_system", mock.Mock( side_effect=exception.HSPItemNotFoundException(msg='fake'))) self.assertRaises(exception.ShareResourceNotFound, self._driver.update_access, 'context', self.fake_share_instance, access_list, [], [], []) rest.HSPRestBackend.get_file_system.assert_called_once_with( self.fake_share_instance['id']) def test_create_share(self): self.mock_object(rest.HSPRestBackend, "add_file_system", mock.Mock()) self.mock_object(rest.HSPRestBackend, "get_file_system", mock.Mock(return_value=fakes.file_system)) self.mock_object(rest.HSPRestBackend, "add_share", mock.Mock()) result = self._driver.create_share('context', self.fake_share_instance) self.assertEqual(self.fake_el, result) self.assertTrue(self.mock_log.debug.called) rest.HSPRestBackend.add_file_system.assert_called_once_with( self.fake_share_instance['id'], self.fake_share_instance['size'] * units.Gi) rest.HSPRestBackend.get_file_system.assert_called_once_with( self.fake_share_instance['id']) rest.HSPRestBackend.add_share.assert_called_once_with( self.fake_share_instance['id'], fakes.file_system['id']) def test_create_share_export_error(self): self.mock_object(rest.HSPRestBackend, "add_file_system", mock.Mock()) self.mock_object(rest.HSPRestBackend, "get_file_system", mock.Mock(return_value=fakes.file_system)) self.mock_object(rest.HSPRestBackend, "add_share", mock.Mock( side_effect=exception.HSPBackendException(msg='fake'))) self.mock_object(rest.HSPRestBackend, "delete_file_system", mock.Mock()) self.assertRaises(exception.HSPBackendException, self._driver.create_share, 'context', self.fake_share_instance) self.assertTrue(self.mock_log.debug.called) self.assertTrue(self.mock_log.exception.called) rest.HSPRestBackend.add_file_system.assert_called_once_with( self.fake_share_instance['id'], self.fake_share_instance['size'] * units.Gi) rest.HSPRestBackend.get_file_system.assert_called_once_with( self.fake_share_instance['id']) rest.HSPRestBackend.add_share.assert_called_once_with( self.fake_share_instance['id'], fakes.file_system['id']) rest.HSPRestBackend.delete_file_system.assert_called_once_with( fakes.file_system['id']) def test_create_share_invalid_share_protocol(self): self.assertRaises(exception.InvalidShare, self._driver.create_share, 'context', fakes.invalid_share) @ddt.data(None, exception.HSPBackendException( message="No matching access rule found.")) def test_delete_share(self, delete_rule): self.mock_object(rest.HSPRestBackend, "get_file_system", mock.Mock(return_value=fakes.file_system)) self.mock_object(rest.HSPRestBackend, "get_share", mock.Mock(return_value=fakes.share)) self.mock_object(rest.HSPRestBackend, "delete_share") self.mock_object(rest.HSPRestBackend, "delete_file_system") self.mock_object(rest.HSPRestBackend, "get_access_rules", mock.Mock(return_value=[fakes.hsp_rules[0]])) self.mock_object(rest.HSPRestBackend, "delete_access_rule", mock.Mock( side_effect=[exception.HSPBackendException( message="No matching access rule found."), delete_rule])) self._driver.delete_share('context', self.fake_share_instance) self.assertTrue(self.mock_log.debug.called) rest.HSPRestBackend.get_file_system.assert_called_once_with( self.fake_share_instance['id']) rest.HSPRestBackend.get_share.assert_called_once_with( fakes.file_system['id']) rest.HSPRestBackend.delete_share.assert_called_once_with( fakes.share['id']) rest.HSPRestBackend.delete_file_system.assert_called_once_with( fakes.file_system['id']) rest.HSPRestBackend.get_access_rules.assert_called_once_with( fakes.share['id']) rest.HSPRestBackend.delete_access_rule.assert_called_once_with( fakes.share['id'], fakes.hsp_rules[0]['name']) def test_delete_share_rule_exception(self): self.mock_object(rest.HSPRestBackend, "get_file_system", mock.Mock(return_value=fakes.file_system)) self.mock_object(rest.HSPRestBackend, "get_share", mock.Mock(return_value=fakes.share)) self.mock_object(rest.HSPRestBackend, "get_access_rules", mock.Mock(return_value=[fakes.hsp_rules[0]])) self.mock_object(rest.HSPRestBackend, "delete_access_rule", mock.Mock(side_effect=exception.HSPBackendException( message="Internal Server Error."))) self.assertRaises(exception.HSPBackendException, self._driver.delete_share, 'context', self.fake_share_instance) self.assertTrue(self.mock_log.debug.called) rest.HSPRestBackend.get_file_system.assert_called_once_with( self.fake_share_instance['id']) rest.HSPRestBackend.get_share.assert_called_once_with( fakes.file_system['id']) rest.HSPRestBackend.get_access_rules.assert_called_once_with( fakes.share['id']) rest.HSPRestBackend.delete_access_rule.assert_called_once_with( fakes.share['id'], fakes.hsp_rules[0]['name']) def test_delete_share_already_deleted(self): self.mock_object(rest.HSPRestBackend, "get_file_system", mock.Mock( side_effect=exception.HSPItemNotFoundException(msg='fake'))) self.mock_object(driver.LOG, "info") self._driver.delete_share('context', self.fake_share_instance) self.assertTrue(self.mock_log.info.called) rest.HSPRestBackend.get_file_system.assert_called_once_with( self.fake_share_instance['id']) def test_extend_share(self): new_size = 2 self.mock_object(rest.HSPRestBackend, "get_file_system", mock.Mock(return_value=fakes.file_system)) self.mock_object(rest.HSPRestBackend, "resize_file_system", mock.Mock()) self._driver.extend_share(self.fake_share_instance, new_size) self.assertTrue(self.mock_log.info.called) rest.HSPRestBackend.get_cluster.assert_called_once_with() rest.HSPRestBackend.get_file_system.assert_called_once_with( self.fake_share_instance['id']) rest.HSPRestBackend.resize_file_system.assert_called_once_with( fakes.file_system['id'], new_size * units.Gi) def test_extend_share_with_no_available_space_in_fs(self): new_size = 150 self.assertRaises(exception.HSPBackendException, self._driver.extend_share, self.fake_share_instance, new_size) rest.HSPRestBackend.get_cluster.assert_called_once_with() def test_shrink_share(self): new_size = 70 self.mock_object(rest.HSPRestBackend, "get_file_system", mock.Mock(return_value=fakes.file_system)) self.mock_object(rest.HSPRestBackend, "resize_file_system", mock.Mock()) self._driver.shrink_share(self.fake_share_instance, new_size) self.assertTrue(self.mock_log.info.called) rest.HSPRestBackend.get_file_system.assert_called_once_with( self.fake_share_instance['id']) rest.HSPRestBackend.resize_file_system.assert_called_once_with( fakes.file_system['id'], new_size * units.Gi) def test_shrink_share_new_size_lower_than_usage(self): new_size = 20 self.mock_object(rest.HSPRestBackend, "get_file_system", mock.Mock(return_value=fakes.file_system)) self.assertRaises(exception.ShareShrinkingPossibleDataLoss, self._driver.shrink_share, self.fake_share_instance, new_size) rest.HSPRestBackend.get_file_system.assert_called_once_with( self.fake_share_instance['id']) def test_manage_existing(self): self.mock_object(self.fake_private_storage, "update") self.mock_object(rest.HSPRestBackend, "get_share", mock.Mock(return_value=fakes.share)) self.mock_object(rest.HSPRestBackend, "rename_file_system", mock.Mock()) self.mock_object(rest.HSPRestBackend, "get_file_system", mock.Mock(return_value=fakes.file_system)) result = self._driver.manage_existing(self.fake_share_instance, 'option') expected = { 'size': fakes.file_system['properties']['quota'] / units.Gi, 'export_locations': self.fake_el, } self.assertTrue(self.mock_log.info.called) self.assertEqual(expected, result) rest.HSPRestBackend.get_share.assert_called_once_with( name=self.fake_share_instance['id']) rest.HSPRestBackend.rename_file_system.assert_called_once_with( fakes.file_system['id'], self.fake_share_instance['id']) rest.HSPRestBackend.get_file_system.assert_called_once_with( self.fake_share_instance['id']) def test_manage_existing_wrong_share_id(self): self.mock_object(rest.HSPRestBackend, "get_share", mock.Mock( side_effect=exception.HSPItemNotFoundException(msg='fake'))) self.assertRaises(exception.ManageInvalidShare, self._driver.manage_existing, self.fake_share_instance, 'option') rest.HSPRestBackend.get_share.assert_called_once_with( name=self.fake_share_instance['id']) def test_unmanage(self): self.mock_object(self.fake_private_storage, "get", mock.Mock( return_value='original_name')) self.mock_object(self.fake_private_storage, "delete") self._driver.unmanage(self.fake_share_instance) self.assertTrue(self.mock_log.info.called) def test__update_share_stats(self): mock__update_share_stats = self.mock_object( manila.share.driver.ShareDriver, '_update_share_stats') self.mock_object(self.fake_private_storage, 'get', mock.Mock( return_value={'provisioned': 0} )) self._driver._update_share_stats() rest.HSPRestBackend.get_cluster.assert_called_once_with() mock__update_share_stats.assert_called_once_with(fakes.stats_data) self.assertTrue(self.mock_log.info.called) def test_get_default_filter_function(self): expected = "share.size >= 128" actual = self._driver.get_default_filter_function() self.assertEqual(expected, actual) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/hitachi/hsp/test_rest.py0000664000175000017500000003110300000000000025030 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Hitachi Data Systems, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ddt import json import requests import time from unittest import mock from manila import exception from manila.share.drivers.hitachi.hsp import rest from manila import test from manila.tests.share.drivers.hitachi.hsp import fakes class FakeRequests(object): status_code = 0 headers = {} content = "" def __init__(self, status_code, content='null'): self.status_code = status_code self.headers = {'location': 'fake_location'} self.content = content def json(self): return {'messages': [{'message': 'fake_msg'}]} @ddt.ddt class HitachiHSPRestTestCase(test.TestCase): def setUp(self): super(HitachiHSPRestTestCase, self).setUp() self.hitachi_hsp_host = '172.24.47.190' self.hitachi_hsp_username = 'hds_hnas_user' self.hitachi_hsp_password = 'hds_hnas_password' self._driver = rest.HSPRestBackend(self.hitachi_hsp_host, self.hitachi_hsp_username, self.hitachi_hsp_password) @ddt.data(202, 500) def test__send_post(self, code): self.mock_object(requests, "post", mock.Mock( return_value=FakeRequests(code))) if code == 202: self.mock_object(rest.HSPRestBackend, "_wait_job_status", mock.Mock()) self._driver._send_post('fake_url') rest.HSPRestBackend._wait_job_status.assert_called_once_with( 'fake_location', 'COMPLETE') else: self.assertRaises(exception.HSPBackendException, self._driver._send_post, 'fake_url') @ddt.data({'code': 200, 'content': 'null'}, {'code': 200, 'content': 'fake_content'}, {'code': 500, 'content': 'null'}) @ddt.unpack def test__send_get(self, code, content): self.mock_object(requests, "get", mock.Mock( return_value=FakeRequests(code, content))) if code == 200: result = self._driver._send_get('fake_url') if content == 'null': self.assertIsNone(result) else: self.assertEqual(FakeRequests(code, content).json(), result) else: self.assertRaises(exception.HSPBackendException, self._driver._send_get, 'fake_url') @ddt.data(202, 500) def test__send_delete(self, code): self.mock_object(requests, "delete", mock.Mock( return_value=FakeRequests(code))) if code == 202: self.mock_object(rest.HSPRestBackend, "_wait_job_status", mock.Mock()) self._driver._send_delete('fake_url') rest.HSPRestBackend._wait_job_status.assert_called_once_with( 'fake_location', 'COMPLETE') else: self.assertRaises(exception.HSPBackendException, self._driver._send_delete, 'fake_url') def test_add_file_system(self): url = "https://172.24.47.190/hspapi/file-systems/" payload = { 'quota': fakes.file_system['properties']['quota'], 'auto-access': False, 'enabled': True, 'description': '', 'record-access-time': True, 'tags': '', 'space-hwm': 90, 'space-lwm': 70, 'name': fakes.file_system['properties']['name'], } self.mock_object(rest.HSPRestBackend, "_send_post", mock.Mock()) self._driver.add_file_system(fakes.file_system['properties']['name'], fakes.file_system['properties']['quota']) rest.HSPRestBackend._send_post.assert_called_once_with( url, payload=json.dumps(payload)) def test_get_file_system(self): url = ("https://172.24.47.190/hspapi/file-systems/list?name=%s" % fakes.file_system['properties']['name']) self.mock_object(rest.HSPRestBackend, "_send_get", mock.Mock( return_value={'list': [fakes.file_system]})) result = self._driver.get_file_system( fakes.file_system['properties']['name']) self.assertEqual(fakes.file_system, result) rest.HSPRestBackend._send_get.assert_called_once_with(url) def test_get_file_system_exception(self): url = ("https://172.24.47.190/hspapi/file-systems/list?name=%s" % fakes.file_system['properties']['name']) self.mock_object(rest.HSPRestBackend, "_send_get", mock.Mock(return_value=None)) self.assertRaises(exception.HSPItemNotFoundException, self._driver.get_file_system, fakes.file_system['properties']['name']) rest.HSPRestBackend._send_get.assert_called_once_with(url) def test_delete_file_system(self): url = ("https://172.24.47.190/hspapi/file-systems/%s" % fakes.file_system['id']) self.mock_object(rest.HSPRestBackend, "_send_delete", mock.Mock()) self._driver.delete_file_system(fakes.file_system['id']) rest.HSPRestBackend._send_delete.assert_called_once_with(url) def test_resize_file_system(self): url = ("https://172.24.47.190/hspapi/file-systems/%s" % fakes.file_system['id']) new_size = 53687091200 payload = {'quota': new_size} self.mock_object(rest.HSPRestBackend, "_send_post", mock.Mock()) self._driver.resize_file_system(fakes.file_system['id'], new_size) rest.HSPRestBackend._send_post.assert_called_once_with( url, payload=json.dumps(payload)) def test_rename_file_system(self): url = ("https://172.24.47.190/hspapi/file-systems/%s" % fakes.file_system['id']) new_name = "fs_rename" payload = {'name': new_name} self.mock_object(rest.HSPRestBackend, "_send_post", mock.Mock()) self._driver.rename_file_system(fakes.file_system['id'], new_name) rest.HSPRestBackend._send_post.assert_called_once_with( url, payload=json.dumps(payload)) def test_add_share(self): url = "https://172.24.47.190/hspapi/shares/" payload = { 'description': '', 'type': 'NFS', 'enabled': True, 'tags': '', 'name': fakes.share['name'], 'file-system-id': fakes.share['properties']['file-system-id'], } self.mock_object(rest.HSPRestBackend, "_send_post", mock.Mock()) self._driver.add_share(fakes.share['name'], fakes.share['properties']['file-system-id']) rest.HSPRestBackend._send_post.assert_called_once_with( url, payload=json.dumps(payload)) @ddt.data({'fs_id': None, 'name': fakes.share['name'], 'url': 'https://172.24.47.190/hspapi/shares/list?' 'name=aa4a7710-f326-41fb-ad18-b4ad587fc87a'}, {'fs_id': fakes.share['properties']['file-system-id'], 'name': None, 'url': 'https://172.24.47.190/hspapi/shares/list?' 'file-system-id=33689245-1806-45d0-8507-0700b5f89750'}) @ddt.unpack def test_get_share(self, fs_id, name, url): self.mock_object(rest.HSPRestBackend, "_send_get", mock.Mock(return_value={'list': [fakes.share]})) result = self._driver.get_share(fs_id, name) self.assertEqual(fakes.share, result) rest.HSPRestBackend._send_get.assert_called_once_with(url) def test_get_share_exception(self): url = ("https://172.24.47.190/hspapi/shares/list?" "name=aa4a7710-f326-41fb-ad18-b4ad587fc87a") self.mock_object(rest.HSPRestBackend, "_send_get", mock.Mock( return_value=None)) self.assertRaises(exception.HSPItemNotFoundException, self._driver.get_share, None, fakes.share['name']) rest.HSPRestBackend._send_get.assert_called_once_with(url) def test_delete_share(self): url = "https://172.24.47.190/hspapi/shares/%s" % fakes.share['id'] self.mock_object(rest.HSPRestBackend, "_send_delete") self._driver.delete_share(fakes.share['id']) rest.HSPRestBackend._send_delete.assert_called_once_with(url) def test_add_access_rule(self): url = "https://172.24.47.190/hspapi/shares/%s/" % fakes.share['id'] payload = { "action": "add-access-rule", "name": fakes.share['id'] + fakes.access_rule['access_to'], "host-specification": fakes.access_rule['access_to'], "read-write": fakes.access_rule['access_level'], } self.mock_object(rest.HSPRestBackend, "_send_post", mock.Mock()) self._driver.add_access_rule(fakes.share['id'], fakes.access_rule['access_to'], fakes.access_rule['access_level']) rest.HSPRestBackend._send_post.assert_called_once_with( url, payload=json.dumps(payload)) def test_delete_access_rule(self): url = "https://172.24.47.190/hspapi/shares/%s/" % fakes.share['id'] payload = { "action": "delete-access-rule", "name": fakes.hsp_rules[0]['name'], } self.mock_object(rest.HSPRestBackend, "_send_post", mock.Mock()) self._driver.delete_access_rule(fakes.share['id'], fakes.hsp_rules[0]['name']) rest.HSPRestBackend._send_post.assert_called_once_with( url, payload=json.dumps(payload)) @ddt.data({'value': {'list': fakes.hsp_rules}, 'res': fakes.hsp_rules}, {'value': None, 'res': []}) @ddt.unpack def test_get_access_rules(self, value, res): url = ("https://172.24.47.190/hspapi/shares/%s/access-rules" % fakes.share['id']) self.mock_object(rest.HSPRestBackend, "_send_get", mock.Mock( return_value=value)) result = self._driver.get_access_rules(fakes.share['id']) self.assertEqual(res, result) rest.HSPRestBackend._send_get.assert_called_once_with(url) @ddt.data({'list': [fakes.hsp_cluster]}, None) def test_get_clusters(self, value): url = "https://172.24.47.190/hspapi/clusters/list" self.mock_object(rest.HSPRestBackend, "_send_get", mock.Mock( return_value=value)) if value: result = self._driver.get_cluster() self.assertEqual(fakes.hsp_cluster, result) else: self.assertRaises(exception.HSPBackendException, self._driver.get_cluster) rest.HSPRestBackend._send_get.assert_called_once_with(url) @ddt.data('COMPLETE', 'ERROR', 'RUNNING') def test__wait_job_status(self, stat): url = "fake_job_url" json = { 'id': 'fake_id', 'properties': { 'completion-details': 'Duplicate NFS access rule exists', 'completion-status': stat, }, 'messages': [{ 'id': 'fake_id', 'message': 'fake_msg', }] } self.mock_object(rest.HSPRestBackend, "_send_get", mock.Mock( return_value=json)) self.mock_object(time, "sleep") if stat == 'COMPLETE': self._driver._wait_job_status(url, 'COMPLETE') rest.HSPRestBackend._send_get.assert_called_once_with(url) elif stat == 'ERROR': self.assertRaises(exception.HSPBackendException, self._driver._wait_job_status, url, 'COMPLETE') rest.HSPRestBackend._send_get.assert_called_once_with(url) else: self.assertRaises(exception.HSPTimeoutException, self._driver._wait_job_status, url, 'COMPLETE') rest.HSPRestBackend._send_get.assert_has_calls([ mock.call(url), mock.call(url), mock.call(url), mock.call(url), mock.call(url), ]) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315602.0216703 manila-21.0.0/manila/tests/share/drivers/hpe/0000775000175000017500000000000000000000000021015 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/hpe/__init__.py0000664000175000017500000000000000000000000023114 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/hpe/test_hpe_3par_constants.py0000664000175000017500000001654700000000000026240 0ustar00zuulzuul00000000000000# Copyright 2015 Hewlett Packard Enterprise Development LP # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. CIFS = 'CIFS' SMB_LOWER = 'smb' NFS = 'NFS' NFS_LOWER = 'nfs' IP = 'ip' USER = 'user' USERNAME = 'USERNAME_0' ADD_USERNAME = '+USERNAME_0:fullcontrol' DROP_USERNAME = '-USERNAME_0:fullcontrol' PASSWORD = 'PASSWORD_0' READ_WRITE = 'rw' READ_ONLY = 'ro' SAN_LOGIN = 'testlogin4san' SAN_PASSWORD = 'testpassword4san' API_URL = 'https://1.2.3.4:8080/api/v1' TIMEOUT = 60 PORT = 22 SHARE_TYPE_ID = 123456789 CIDR_PREFIX = '24' # Constants to use with Mock and expect in results EXPECTED_IP_10203040 = '10.20.30.40' EXPECTED_IP_10203041 = '10.20.30.41' EXPECTED_IP_1234 = '1.2.3.4' EXPECTED_MY_IP = '9.8.7.6' EXPECTED_IP_127 = '127.0.0.1' EXPECTED_IP_127_2 = '127.0.0.2' EXPECTED_ACCESS_LEVEL = 'foo_access' EXPECTED_SUBNET = '255.255.255.0' # based on CIDR_PREFIX above EXPECTED_VLAN_TYPE = 'vlan' EXPECTED_VXLAN_TYPE = 'vxlan' EXPECTED_VLAN_TAG = '101' EXPECTED_SERVER_ID = '1a1a1a1a-2b2b-3c3c-4d4d-5e5e5e5e5e5e' EXPECTED_PROJECT_ID = 'osf-nfs-project-id' SHARE_ID = 'share-id' EXPECTED_SHARE_ID = 'osf-share-id' EXPECTED_SHARE_ID_RO = 'osf-ro-share-id' EXPECTED_SHARE_NAME = 'share-name' EXPECTED_NET_NAME = 'testnet' EXPECTED_FPG = 'pool' EXPECTED_HOST = 'hostname@backend#' + EXPECTED_FPG UNEXPECTED_FPG = 'not_a_pool' UNEXPECTED_HOST = 'hostname@backend#' + UNEXPECTED_FPG HOST_WITHOUT_POOL_1 = 'hostname@backend' HOST_WITHOUT_POOL_2 = 'hostname@backend#' EXPECTED_SHARE_PATH = '/anyfpg/anyvfs/anyfstore' EXPECTED_SIZE_1 = 1 EXPECTED_SIZE_2 = 2 EXPECTED_SNAP_NAME = 'osf-snap-name' EXPECTED_SNAP_ID = 'osf-snap-id' EXPECTED_STATS = {'test': 'stats'} EXPECTED_FPG_CONF = [{EXPECTED_FPG: [EXPECTED_IP_10203040]}] EXPECTED_FSTORE = EXPECTED_PROJECT_ID EXPECTED_VFS = 'test_vfs' EXPECTED_GET_VFS = {'vfsname': EXPECTED_VFS, 'vfsip': {'address': [EXPECTED_IP_10203040]}} EXPECTED_GET_VFS_MULTIPLES = { 'vfsname': EXPECTED_VFS, 'vfsip': {'address': [EXPECTED_IP_10203041, EXPECTED_IP_10203040]}} EXPECTED_CLIENT_GET_VFS_MEMBERS_MULTI = { 'fspname': EXPECTED_VFS, 'vfsip': [ {'networkName': EXPECTED_NET_NAME, 'fspool': EXPECTED_VFS, 'address': EXPECTED_IP_10203040, 'prefixLen': EXPECTED_SUBNET, 'vfs': EXPECTED_VFS, 'vlanTag': EXPECTED_VLAN_TAG, }, {'networkName': EXPECTED_NET_NAME, 'fspool': EXPECTED_VFS, 'address': EXPECTED_IP_10203041, 'prefixLen': EXPECTED_SUBNET, 'vfs': EXPECTED_VFS, 'vlanTag': EXPECTED_VLAN_TAG, }, ], 'vfsname': EXPECTED_VFS, } EXPECTED_MEDIATOR_GET_VFS_RET_VAL_MULTI = { 'fspname': EXPECTED_VFS, 'vfsip': { 'networkName': EXPECTED_NET_NAME, 'fspool': EXPECTED_VFS, 'address': [ EXPECTED_IP_10203040, EXPECTED_IP_10203041, ], 'prefixLen': EXPECTED_SUBNET, 'vfs': EXPECTED_VFS, 'vlanTag': EXPECTED_VLAN_TAG }, 'vfsname': EXPECTED_VFS, } EXPECTED_CLIENT_GET_VFS_MEMBERS = { 'fspname': EXPECTED_VFS, 'vfsip': { 'networkName': EXPECTED_NET_NAME, 'fspool': EXPECTED_VFS, 'address': EXPECTED_IP_10203040, 'prefixLen': EXPECTED_SUBNET, 'vfs': EXPECTED_VFS, 'vlanTag': EXPECTED_VLAN_TAG, }, 'vfsname': EXPECTED_VFS, } EXPECTED_MEDIATOR_GET_VFS_RET_VAL = { 'fspname': EXPECTED_VFS, 'vfsip': { 'networkName': EXPECTED_NET_NAME, 'fspool': EXPECTED_VFS, 'address': [EXPECTED_IP_10203040], 'prefixLen': EXPECTED_SUBNET, 'vfs': EXPECTED_VFS, 'vlanTag': EXPECTED_VLAN_TAG, }, 'vfsname': EXPECTED_VFS, } EXPECTED_CLIENT_GET_VFS_RETURN_VALUE = { 'total': 1, 'members': [EXPECTED_CLIENT_GET_VFS_MEMBERS], } EXPECTED_CLIENT_GET_VFS_RETURN_VALUE_MULTI = { 'total': 1, 'members': [EXPECTED_CLIENT_GET_VFS_MEMBERS_MULTI], } EXPECTED_FPG_MAP = {EXPECTED_FPG: {EXPECTED_VFS: [EXPECTED_IP_10203040]}} EXPECTED_FPG_MAP_MULTI_VFS = {EXPECTED_FPG: { EXPECTED_VFS: [EXPECTED_IP_10203041, EXPECTED_IP_10203040]}} EXPECTED_SHARE_IP = '10.50.3.8' EXPECTED_HPE_DEBUG = True EXPECTED_COMMENT = "OpenStack Manila - foo-comment" EXPECTED_EXTRA_SPECS = {} EXPECTED_LOCATION = ':'.join((EXPECTED_IP_1234, EXPECTED_SHARE_PATH)) EXPECTED_SUPER_SHARE = 'OPENSTACK_SUPER_SHARE' EXPECTED_SUPER_SHARE_COMMENT = ('OpenStack super share used to delete nested ' 'shares.') EXPECTED_CIFS_DOMAIN = 'LOCAL_CLUSTER' EXPECTED_MOUNT_PATH = '/mnt/' SHARE_SERVER = { 'backend_details': { 'ip': EXPECTED_IP_10203040, 'fpg': EXPECTED_FPG, 'vfs': EXPECTED_VFS, }, } # Access rules. Allow for overwrites. ACCESS_RULE_NFS = { 'access_type': IP, 'access_to': EXPECTED_IP_1234, 'access_level': READ_WRITE, } ACCESS_RULE_CIFS = { 'access_type': USER, 'access_to': USERNAME, 'access_level': READ_WRITE, } ADD_RULE_BAD_TYPE = { 'access_type': 'unsupported_other_type', 'access_to': USERNAME, 'access_level': READ_WRITE, } ADD_RULE_IP = { 'access_type': IP, 'access_to': EXPECTED_IP_1234, 'access_level': READ_WRITE, } ADD_RULE_IP_RO = { 'access_type': IP, 'access_to': EXPECTED_IP_1234, 'access_level': READ_ONLY, } ADD_RULE_USER = { 'access_type': USER, 'access_to': USERNAME, 'access_level': READ_WRITE, } DELETE_RULE_IP = { 'access_type': IP, 'access_to': EXPECTED_IP_1234, 'access_level': READ_WRITE, } DELETE_RULE_USER = { 'access_type': USER, 'access_to': USERNAME, 'access_level': READ_WRITE, } DELETE_RULE_IP_RO = { 'access_type': IP, 'access_to': EXPECTED_IP_1234, 'access_level': READ_ONLY, } GET_FSQUOTA = {'message': None, 'total': 1, 'members': [{'hardBlock': '1024', 'softBlock': '1024'}]} EXPECTED_FSIP = { 'fspool': EXPECTED_FPG, 'vfs': EXPECTED_VFS, 'address': EXPECTED_IP_1234, 'prefixLen': EXPECTED_SUBNET, 'vlanTag': EXPECTED_VLAN_TAG, } OTHER_FSIP = { 'fspool': EXPECTED_FPG, 'vfs': EXPECTED_VFS, 'address': '9.9.9.9', 'prefixLen': EXPECTED_SUBNET, 'vlanTag': EXPECTED_VLAN_TAG, } NFS_SHARE_INFO = { 'project_id': EXPECTED_PROJECT_ID, 'id': EXPECTED_SHARE_ID, 'share_proto': NFS, 'export_location': EXPECTED_LOCATION, 'size': 1234, 'host': EXPECTED_HOST, } SNAPSHOT_INFO = { 'name': EXPECTED_SNAP_NAME, 'id': EXPECTED_SNAP_ID, 'share': { 'project_id': EXPECTED_PROJECT_ID, 'id': EXPECTED_SHARE_ID, 'share_proto': NFS, 'export_location': EXPECTED_LOCATION, 'host': EXPECTED_HOST, }, } SNAPSHOT_INSTANCE = { 'name': EXPECTED_SNAP_NAME, 'id': EXPECTED_SNAP_ID, 'share_id': EXPECTED_SHARE_ID, 'share_proto': NFS, } class FakeException(Exception): pass FAKE_EXCEPTION = FakeException("Fake exception for testing.") ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/hpe/test_hpe_3par_driver.py0000664000175000017500000012432000000000000025504 0ustar00zuulzuul00000000000000# Copyright 2015 Hewlett Packard Enterprise Development LP # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from copy import deepcopy import sys from unittest import mock import ddt if 'hpe3parclient' not in sys.modules: sys.modules['hpe3parclient'] = mock.Mock() from manila import exception from manila.share.drivers.hpe import hpe_3par_driver as hpe3pardriver from manila.share.drivers.hpe import hpe_3par_mediator as hpe3parmediator from manila import test from manila.tests.share.drivers.hpe import test_hpe_3par_constants as constants @ddt.ddt class HPE3ParDriverFPGTestCase(test.TestCase): @ddt.data((-1, 4), (0, 5), (0, -1)) @ddt.unpack def test_FPG_init_args_failure(self, min_ip, max_ip): self.assertRaises(exception.HPE3ParInvalid, hpe3pardriver.FPG, min_ip, max_ip) @ddt.data(('invalid_ip_fpg, 10.256.0.1', 0, 4), (None, 0, 4), (' ', 0, 4), ('', 0, 4), ('max_ip_fpg, 10.0.0.1, 10.0.0.2, 10.0.0.3, 10.0.0.4, 10.0.0.5', 0, 4), ('min_1_ip_fpg', 1, 4)) @ddt.unpack def test_FPG_type_failures(self, value, min_ip, max_ip): fpg_type_obj = hpe3pardriver.FPG(min_ip=min_ip, max_ip=max_ip) self.assertRaises(exception.HPE3ParInvalid, fpg_type_obj, value) @ddt.data(('samplefpg, 10.0.0.1', {'samplefpg': ['10.0.0.1']}), ('samplefpg', {'samplefpg': []}), ('samplefpg, 10.0.0.1, 10.0.0.2', {'samplefpg': ['10.0.0.1', '10.0.0.2']})) @ddt.unpack def test_FPG_type_success(self, value, expected_fpg): fpg_type_obj = hpe3pardriver.FPG() fpg = fpg_type_obj(value) self.assertEqual(expected_fpg, fpg) @ddt.ddt class HPE3ParDriverTestCase(test.TestCase): def setUp(self): super(HPE3ParDriverTestCase, self).setUp() # Create a mock configuration with attributes and a safe_get() self.conf = mock.Mock() self.conf.driver_handles_share_servers = True self.conf.hpe3par_debug = constants.EXPECTED_HPE_DEBUG self.conf.hpe3par_username = constants.USERNAME self.conf.hpe3par_password = constants.PASSWORD self.conf.hpe3par_api_url = constants.API_URL self.conf.hpe3par_san_login = constants.SAN_LOGIN self.conf.hpe3par_san_password = constants.SAN_PASSWORD self.conf.hpe3par_san_ip = constants.EXPECTED_IP_1234 self.conf.hpe3par_fpg = constants.EXPECTED_FPG_CONF self.conf.hpe3par_san_ssh_port = constants.PORT self.conf.ssh_conn_timeout = constants.TIMEOUT self.conf.hpe3par_fstore_per_share = False self.conf.hpe3par_require_cifs_ip = False self.conf.hpe3par_cifs_admin_access_username = constants.USERNAME, self.conf.hpe3par_cifs_admin_access_password = constants.PASSWORD, self.conf.hpe3par_cifs_admin_access_domain = ( constants.EXPECTED_CIFS_DOMAIN), self.conf.hpe3par_share_mount_path = constants.EXPECTED_MOUNT_PATH, self.conf.my_ip = constants.EXPECTED_IP_1234 self.conf.network_config_group = 'test_network_config_group' self.conf.admin_network_config_group = ( 'test_admin_network_config_group') self.conf.filter_function = None self.conf.goodness_function = None def safe_get(attr): try: return self.conf.__getattribute__(attr) except AttributeError: return None self.conf.safe_get = safe_get self.real_hpe_3par_mediator = hpe3parmediator.HPE3ParMediator self.mock_object(hpe3parmediator, 'HPE3ParMediator') self.mock_mediator_constructor = hpe3parmediator.HPE3ParMediator self.mock_mediator = self.mock_mediator_constructor() # restore needed static methods self.mock_mediator.ensure_supported_protocol = ( self.real_hpe_3par_mediator.ensure_supported_protocol) self.mock_mediator.build_export_locations = ( self.real_hpe_3par_mediator.build_export_locations) self.driver = hpe3pardriver.HPE3ParShareDriver( configuration=self.conf) def test_driver_setup_success(self, get_vfs_ret_val=constants.EXPECTED_GET_VFS): """Driver do_setup without any errors.""" self.mock_mediator.get_vfs.return_value = get_vfs_ret_val self.driver.do_setup(None) conf = self.conf self.mock_mediator_constructor.assert_has_calls([ mock.call(hpe3par_san_ssh_port=conf.hpe3par_san_ssh_port, hpe3par_san_password=conf.hpe3par_san_password, hpe3par_username=conf.hpe3par_username, hpe3par_san_login=conf.hpe3par_san_login, hpe3par_debug=conf.hpe3par_debug, hpe3par_api_url=conf.hpe3par_api_url, hpe3par_password=conf.hpe3par_password, hpe3par_san_ip=conf.hpe3par_san_ip, hpe3par_fstore_per_share=conf.hpe3par_fstore_per_share, hpe3par_require_cifs_ip=conf.hpe3par_require_cifs_ip, hpe3par_cifs_admin_access_username=( conf.hpe3par_cifs_admin_access_username), hpe3par_cifs_admin_access_password=( conf.hpe3par_cifs_admin_access_password), hpe3par_cifs_admin_access_domain=( conf.hpe3par_cifs_admin_access_domain), hpe3par_share_mount_path=conf.hpe3par_share_mount_path, my_ip=self.conf.my_ip, ssh_conn_timeout=conf.ssh_conn_timeout)]) self.mock_mediator.assert_has_calls([ mock.call.do_setup(), mock.call.get_vfs(constants.EXPECTED_FPG)]) def test_driver_setup_dhss_success(self): """Driver do_setup without any errors with dhss=True.""" self.test_driver_setup_success() self.assertEqual(constants.EXPECTED_FPG_MAP, self.driver.fpgs) def test_driver_setup_no_dhss_success(self): """Driver do_setup without any errors with dhss=False.""" self.conf.driver_handles_share_servers = False self.test_driver_setup_success() self.assertEqual(constants.EXPECTED_FPG_MAP, self.driver.fpgs) def test_driver_setup_no_dhss_multi_getvfs_success(self): """Driver do_setup when dhss=False, getvfs returns multiple IPs.""" self.conf.driver_handles_share_servers = False self.test_driver_setup_success( get_vfs_ret_val=constants.EXPECTED_GET_VFS_MULTIPLES) self.assertEqual(constants.EXPECTED_FPG_MAP, self.driver.fpgs) def test_driver_setup_success_no_dhss_no_conf_ss_ip(self): """test driver's do_setup() Driver do_setup with dhss=False, share server ip not set in config file but discoverable at 3par array """ self.conf.driver_handles_share_servers = False # ss ip not provided in conf original_fpg = deepcopy(self.conf.hpe3par_fpg) self.conf.hpe3par_fpg[0][constants.EXPECTED_FPG] = [] self.test_driver_setup_success() self.assertEqual(constants.EXPECTED_FPG_MAP, self.driver.fpgs) constants.EXPECTED_FPG_CONF = original_fpg def test_driver_setup_failure_no_dhss_no_conf_ss_ip(self): """Configured IP address is required for dhss=False.""" self.conf.driver_handles_share_servers = False # ss ip not provided in conf fpg_without_ss_ip = deepcopy(self.conf.hpe3par_fpg) self.conf.hpe3par_fpg[0][constants.EXPECTED_FPG] = [] # ss ip not configured on array vfs_without_ss_ip = deepcopy(constants.EXPECTED_GET_VFS) vfs_without_ss_ip['vfsip']['address'] = [] self.mock_mediator.get_vfs.return_value = vfs_without_ss_ip self.assertRaises(exception.HPE3ParInvalid, self.driver.do_setup, None) constants.EXPECTED_FPG_CONF = fpg_without_ss_ip def test_driver_setup_mediator_error(self): """Driver do_setup when the mediator setup fails.""" self.mock_mediator.do_setup.side_effect = ( exception.ShareBackendException('fail')) self.assertRaises(exception.ShareBackendException, self.driver.do_setup, None) conf = self.conf self.mock_mediator_constructor.assert_has_calls([ mock.call(hpe3par_san_ssh_port=conf.hpe3par_san_ssh_port, hpe3par_san_password=conf.hpe3par_san_password, hpe3par_username=conf.hpe3par_username, hpe3par_san_login=conf.hpe3par_san_login, hpe3par_debug=conf.hpe3par_debug, hpe3par_api_url=conf.hpe3par_api_url, hpe3par_password=conf.hpe3par_password, hpe3par_san_ip=conf.hpe3par_san_ip, hpe3par_fstore_per_share=conf.hpe3par_fstore_per_share, hpe3par_require_cifs_ip=conf.hpe3par_require_cifs_ip, hpe3par_cifs_admin_access_username=( conf.hpe3par_cifs_admin_access_username), hpe3par_cifs_admin_access_password=( conf.hpe3par_cifs_admin_access_password), hpe3par_cifs_admin_access_domain=( conf.hpe3par_cifs_admin_access_domain), hpe3par_share_mount_path=conf.hpe3par_share_mount_path, my_ip=self.conf.my_ip, ssh_conn_timeout=conf.ssh_conn_timeout)]) self.mock_mediator.assert_has_calls([mock.call.do_setup()]) def test_driver_setup_with_vfs_error(self): """Driver do_setup when the get_vfs fails.""" self.mock_mediator.get_vfs.side_effect = ( exception.ShareBackendException('fail')) self.assertRaises(exception.ShareBackendException, self.driver.do_setup, None) conf = self.conf self.mock_mediator_constructor.assert_has_calls([ mock.call(hpe3par_san_ssh_port=conf.hpe3par_san_ssh_port, hpe3par_san_password=conf.hpe3par_san_password, hpe3par_username=conf.hpe3par_username, hpe3par_san_login=conf.hpe3par_san_login, hpe3par_debug=conf.hpe3par_debug, hpe3par_api_url=conf.hpe3par_api_url, hpe3par_password=conf.hpe3par_password, hpe3par_san_ip=conf.hpe3par_san_ip, hpe3par_fstore_per_share=conf.hpe3par_fstore_per_share, hpe3par_require_cifs_ip=conf.hpe3par_require_cifs_ip, hpe3par_cifs_admin_access_username=( conf.hpe3par_cifs_admin_access_username), hpe3par_cifs_admin_access_password=( conf.hpe3par_cifs_admin_access_password), hpe3par_cifs_admin_access_domain=( conf.hpe3par_cifs_admin_access_domain), hpe3par_share_mount_path=conf.hpe3par_share_mount_path, my_ip=self.conf.my_ip, ssh_conn_timeout=conf.ssh_conn_timeout)]) self.mock_mediator.assert_has_calls([ mock.call.do_setup(), mock.call.get_vfs(constants.EXPECTED_FPG)]) def test_driver_setup_conf_ips_validation_fails(self): """Driver do_setup when the _validate_pool_ips fails.""" self.conf.driver_handles_share_servers = False vfs_with_ss_ip = deepcopy(constants.EXPECTED_GET_VFS) vfs_with_ss_ip['vfsip']['address'] = ['10.100.100.100'] self.mock_mediator.get_vfs.return_value = vfs_with_ss_ip self.assertRaises(exception.HPE3ParInvalid, self.driver.do_setup, None) conf = self.conf self.mock_mediator_constructor.assert_has_calls([ mock.call(hpe3par_san_ssh_port=conf.hpe3par_san_ssh_port, hpe3par_san_password=conf.hpe3par_san_password, hpe3par_username=conf.hpe3par_username, hpe3par_san_login=conf.hpe3par_san_login, hpe3par_debug=conf.hpe3par_debug, hpe3par_api_url=conf.hpe3par_api_url, hpe3par_password=conf.hpe3par_password, hpe3par_san_ip=conf.hpe3par_san_ip, hpe3par_fstore_per_share=conf.hpe3par_fstore_per_share, hpe3par_require_cifs_ip=conf.hpe3par_require_cifs_ip, hpe3par_cifs_admin_access_username=( conf.hpe3par_cifs_admin_access_username), hpe3par_cifs_admin_access_password=( conf.hpe3par_cifs_admin_access_password), hpe3par_cifs_admin_access_domain=( conf.hpe3par_cifs_admin_access_domain), hpe3par_share_mount_path=conf.hpe3par_share_mount_path, my_ip=self.conf.my_ip, ssh_conn_timeout=conf.ssh_conn_timeout)]) self.mock_mediator.assert_has_calls([ mock.call.do_setup(), mock.call.get_vfs(constants.EXPECTED_FPG)]) def init_driver(self): """Simple driver setup for re-use with tests that need one.""" self.driver._hpe3par = self.mock_mediator self.driver.fpgs = constants.EXPECTED_FPG_MAP self.mock_object(hpe3pardriver, 'share_types') get_extra_specs = hpe3pardriver.share_types.get_extra_specs_from_share get_extra_specs.return_value = constants.EXPECTED_EXTRA_SPECS def test_driver_check_for_setup_error_success(self): """check_for_setup_error when things go well.""" # Generally this is always mocked, but here we reference the class. hpe3parmediator.HPE3ParMediator = self.real_hpe_3par_mediator self.mock_object(hpe3pardriver, 'LOG') self.init_driver() self.driver.check_for_setup_error() expected_calls = [ mock.call.debug('HPE3ParShareDriver SHA1: %s', mock.ANY), mock.call.debug('HPE3ParMediator SHA1: %s', mock.ANY) ] hpe3pardriver.LOG.assert_has_calls(expected_calls) def test_driver_check_for_setup_error_exception(self): """check_for_setup_error catch and log any exceptions.""" # Since HPE3ParMediator is mocked, we'll hit the except/log. self.mock_object(hpe3pardriver, 'LOG') self.init_driver() self.driver.check_for_setup_error() expected_calls = [ mock.call.debug('HPE3ParShareDriver SHA1: %s', mock.ANY), mock.call.debug('Source code SHA1 not logged due to: %s', mock.ANY) ] hpe3pardriver.LOG.assert_has_calls(expected_calls) @ddt.data(([constants.SHARE_SERVER], constants.SHARE_SERVER), ([], None),) @ddt.unpack def test_choose_share_server_compatible_with_share(self, share_servers, expected_share_sever): context = None share_server = self.driver.choose_share_server_compatible_with_share( context, share_servers, constants.NFS_SHARE_INFO, None, None) self.assertEqual(expected_share_sever, share_server) def test_choose_share_server_compatible_with_share_with_cg(self): context = None cg_ref = {'id': 'dummy'} self.assertRaises( exception.InvalidRequest, self.driver.choose_share_server_compatible_with_share, context, [constants.SHARE_SERVER], constants.NFS_SHARE_INFO, None, cg_ref) def do_create_share(self, protocol, share_type_id, expected_project_id, expected_share_id, expected_size): """Re-usable code for create share.""" context = None share = { 'display_name': constants.EXPECTED_SHARE_NAME, 'host': constants.EXPECTED_HOST, 'project_id': expected_project_id, 'id': expected_share_id, 'share_proto': protocol, 'share_type_id': share_type_id, 'size': expected_size, } location = self.driver.create_share(context, share, constants.SHARE_SERVER) return location def do_create_share_from_snapshot(self, protocol, share_type_id, snapshot_instance, expected_share_id, expected_size): """Re-usable code for create share from snapshot.""" context = None share = { 'project_id': constants.EXPECTED_PROJECT_ID, 'display_name': constants.EXPECTED_SHARE_NAME, 'host': constants.EXPECTED_HOST, 'id': expected_share_id, 'share_proto': protocol, 'share_type_id': share_type_id, 'size': expected_size, } location = self.driver.create_share_from_snapshot( context, share, snapshot_instance, constants.SHARE_SERVER) return location @ddt.data((constants.UNEXPECTED_HOST, exception.InvalidHost), (constants.HOST_WITHOUT_POOL_1, exception.InvalidHost), (constants.HOST_WITHOUT_POOL_2, exception.InvalidHost)) @ddt.unpack def test_driver_create_share_fails_get_pool_location(self, host, expected_exception): """get_pool_location fails to extract pool name from host""" self.init_driver() context = None share_server = None share = { 'display_name': constants.EXPECTED_SHARE_NAME, 'host': host, 'project_id': constants.EXPECTED_PROJECT_ID, 'id': constants.EXPECTED_SHARE_ID, 'share_proto': constants.CIFS, 'share_type_id': constants.SHARE_TYPE_ID, 'size': constants.EXPECTED_SIZE_2, } self.assertRaises(expected_exception, self.driver.create_share, context, share, share_server) def test_driver_create_cifs_share(self): self.init_driver() expected_location = '\\\\%s\\%s' % (constants.EXPECTED_IP_10203040, constants.EXPECTED_SHARE_NAME) self.mock_mediator.create_share.return_value = ( constants.EXPECTED_SHARE_NAME) hpe3parmediator.HPE3ParMediator = self.real_hpe_3par_mediator location = self.do_create_share(constants.CIFS, constants.SHARE_TYPE_ID, constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.EXPECTED_SIZE_2) self.assertIn(expected_location, location) expected_calls = [mock.call.create_share( constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.CIFS, constants.EXPECTED_EXTRA_SPECS, constants.EXPECTED_FPG, constants.EXPECTED_VFS, comment=mock.ANY, size=constants.EXPECTED_SIZE_2)] self.mock_mediator.assert_has_calls(expected_calls) def test_driver_create_nfs_share(self): self.init_driver() expected_location = ':'.join((constants.EXPECTED_IP_10203040, constants.EXPECTED_SHARE_PATH)) self.mock_mediator.create_share.return_value = ( constants.EXPECTED_SHARE_PATH) hpe3parmediator.HPE3ParMediator = self.real_hpe_3par_mediator location = self.do_create_share(constants.NFS, constants.SHARE_TYPE_ID, constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.EXPECTED_SIZE_1) self.assertIn(expected_location, location) expected_calls = [ mock.call.create_share(constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.NFS, constants.EXPECTED_EXTRA_SPECS, constants.EXPECTED_FPG, constants.EXPECTED_VFS, comment=mock.ANY, size=constants.EXPECTED_SIZE_1)] self.mock_mediator.assert_has_calls(expected_calls) def test_driver_create_cifs_share_from_snapshot(self): self.init_driver() expected_location = '\\\\%s\\%s' % (constants.EXPECTED_IP_10203040, constants.EXPECTED_SHARE_NAME) self.mock_mediator.create_share_from_snapshot.return_value = ( constants.EXPECTED_SHARE_NAME) hpe3parmediator.HPE3ParMediator = self.real_hpe_3par_mediator snapshot_instance = constants.SNAPSHOT_INSTANCE.copy() snapshot_instance['protocol'] = constants.CIFS location = self.do_create_share_from_snapshot( constants.CIFS, constants.SHARE_TYPE_ID, snapshot_instance, constants.EXPECTED_SHARE_ID, constants.EXPECTED_SIZE_2) self.assertIn(expected_location, location) expected_calls = [ mock.call.create_share_from_snapshot( constants.EXPECTED_SHARE_ID, constants.CIFS, constants.EXPECTED_EXTRA_SPECS, constants.EXPECTED_FSTORE, constants.EXPECTED_SHARE_ID, constants.EXPECTED_SNAP_ID, constants.EXPECTED_FPG, constants.EXPECTED_VFS, [constants.EXPECTED_IP_10203040], comment=mock.ANY, size=constants.EXPECTED_SIZE_2), ] self.mock_mediator.assert_has_calls(expected_calls) def test_driver_create_nfs_share_from_snapshot(self): self.init_driver() expected_location = ':'.join((constants.EXPECTED_IP_10203040, constants.EXPECTED_SHARE_PATH)) self.mock_mediator.create_share_from_snapshot.return_value = ( constants.EXPECTED_SHARE_PATH) hpe3parmediator.HPE3ParMediator = self.real_hpe_3par_mediator location = self.do_create_share_from_snapshot( constants.NFS, constants.SHARE_TYPE_ID, constants.SNAPSHOT_INSTANCE, constants.EXPECTED_SHARE_ID, constants.EXPECTED_SIZE_1) self.assertIn(expected_location, location) expected_calls = [ mock.call.create_share_from_snapshot( constants.EXPECTED_SHARE_ID, constants.NFS, constants.EXPECTED_EXTRA_SPECS, constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.EXPECTED_SNAP_ID, constants.EXPECTED_FPG, constants.EXPECTED_VFS, [constants.EXPECTED_IP_10203040], comment=mock.ANY, size=constants.EXPECTED_SIZE_1), ] self.mock_mediator.assert_has_calls(expected_calls) def test_driver_delete_share(self): self.init_driver() context = None share_server = None share = { 'project_id': constants.EXPECTED_PROJECT_ID, 'id': constants.EXPECTED_SHARE_ID, 'share_proto': constants.CIFS, 'size': constants.EXPECTED_SIZE_1, 'host': constants.EXPECTED_HOST } self.driver.delete_share(context, share, share_server) expected_calls = [ mock.call.delete_share(constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.EXPECTED_SIZE_1, constants.CIFS, constants.EXPECTED_FPG, constants.EXPECTED_VFS, constants.EXPECTED_IP_10203040)] self.mock_mediator.assert_has_calls(expected_calls) def test_driver_create_snapshot(self): self.init_driver() context = None share_server = None self.driver.create_snapshot(context, constants.SNAPSHOT_INFO, share_server) expected_calls = [ mock.call.create_snapshot(constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.NFS, constants.EXPECTED_SNAP_ID, constants.EXPECTED_FPG, constants.EXPECTED_VFS)] self.mock_mediator.assert_has_calls(expected_calls) def test_driver_delete_snapshot(self): self.init_driver() context = None share_server = None self.driver.delete_snapshot(context, constants.SNAPSHOT_INFO, share_server) expected_calls = [ mock.call.delete_snapshot(constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.NFS, constants.EXPECTED_SNAP_ID, constants.EXPECTED_FPG, constants.EXPECTED_VFS) ] self.mock_mediator.assert_has_calls(expected_calls) def test_driver_update_access_add_rule(self): self.init_driver() context = None self.driver.update_access(context, constants.NFS_SHARE_INFO, [constants.ACCESS_RULE_NFS], [constants.ADD_RULE_IP], [], constants.SHARE_SERVER) expected_calls = [ mock.call.update_access(constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.NFS, constants.EXPECTED_EXTRA_SPECS, [constants.ACCESS_RULE_NFS], [constants.ADD_RULE_IP], [], constants.EXPECTED_FPG, constants.EXPECTED_VFS) ] self.mock_mediator.assert_has_calls(expected_calls) def test_driver_update_access_delete_rule(self): self.init_driver() context = None self.driver.update_access(context, constants.NFS_SHARE_INFO, [constants.ACCESS_RULE_NFS], [], [constants.DELETE_RULE_IP], constants.SHARE_SERVER) expected_calls = [ mock.call.update_access(constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.NFS, constants.EXPECTED_EXTRA_SPECS, [constants.ACCESS_RULE_NFS], [], [constants.DELETE_RULE_IP], constants.EXPECTED_FPG, constants.EXPECTED_VFS) ] self.mock_mediator.assert_has_calls(expected_calls) def test_driver_extend_share(self): self.init_driver() old_size = constants.NFS_SHARE_INFO['size'] new_size = old_size * 2 share_server = None self.driver.extend_share(constants.NFS_SHARE_INFO, new_size, share_server) self.mock_mediator.resize_share.assert_called_once_with( constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.NFS, new_size, old_size, constants.EXPECTED_FPG, constants.EXPECTED_VFS) def test_driver_shrink_share(self): self.init_driver() old_size = constants.NFS_SHARE_INFO['size'] new_size = old_size / 2 share_server = None self.driver.shrink_share(constants.NFS_SHARE_INFO, new_size, share_server) self.mock_mediator.resize_share.assert_called_once_with( constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.NFS, new_size, old_size, constants.EXPECTED_FPG, constants.EXPECTED_VFS) def test_driver_get_share_stats_not_ready(self): """Protect against stats update before driver is ready.""" self.mock_object(hpe3pardriver, 'LOG') expected_result = { 'driver_handles_share_servers': True, 'qos': False, 'driver_version': self.driver.VERSION, 'free_capacity_gb': 0, 'max_over_subscription_ratio': None, 'reserved_percentage': 0, 'reserved_snapshot_percentage': 0, 'reserved_share_extend_percentage': 0, 'provisioned_capacity_gb': 0, 'share_backend_name': 'HPE_3PAR', 'snapshot_support': True, 'create_share_from_snapshot_support': True, 'revert_to_snapshot_support': False, 'mount_snapshot_support': False, 'share_group_stats': { 'consistent_snapshot_support': None, }, 'storage_protocol': 'NFS_CIFS', 'thin_provisioning': True, 'total_capacity_gb': 0, 'vendor_name': 'HPE', 'pools': None, 'replication_domain': None, 'filter_function': None, 'goodness_function': None, 'mount_point_name_support': False, 'ipv4_support': True, 'ipv6_support': False, 'max_share_server_size': -1, 'max_shares_per_share_server': -1, 'security_service_update_support': False, 'share_server_multiple_subnet_support': False, 'network_allocation_update_support': False, 'share_replicas_migration_support': False, 'encryption_support': None, } result = self.driver.get_share_stats(refresh=True) self.assertEqual(expected_result, result) expected_calls = [ mock.call.info('Skipping capacity and capabilities update. ' 'Setup has not completed.') ] hpe3pardriver.LOG.assert_has_calls(expected_calls) def test_driver_get_share_stats_no_refresh(self): """Driver does not call mediator when refresh=False.""" self.init_driver() self.driver._stats = constants.EXPECTED_STATS result = self.driver.get_share_stats(refresh=False) self.assertEqual(constants.EXPECTED_STATS, result) self.assertEqual([], self.mock_mediator.mock_calls) def test_driver_get_share_stats_with_refresh(self): """Driver adds stats from mediator to expected structure.""" self.init_driver() expected_free = constants.EXPECTED_SIZE_1 expected_capacity = constants.EXPECTED_SIZE_2 expected_version = self.driver.VERSION self.mock_mediator.get_fpg_status.return_value = { 'pool_name': constants.EXPECTED_FPG, 'total_capacity_gb': expected_capacity, 'free_capacity_gb': expected_free, 'thin_provisioning': True, 'dedupe': False, 'hpe3par_flash_cache': False, 'hp3par_flash_cache': False, 'reserved_percentage': 0, 'reserved_snapshot_percentage': 0, 'reserved_share_extend_percentage': 0, 'provisioned_capacity_gb': expected_capacity } expected_result = { 'share_backend_name': 'HPE_3PAR', 'vendor_name': 'HPE', 'driver_version': expected_version, 'storage_protocol': 'NFS_CIFS', 'driver_handles_share_servers': True, 'total_capacity_gb': 0, 'free_capacity_gb': 0, 'provisioned_capacity_gb': 0, 'reserved_percentage': 0, 'reserved_snapshot_percentage': 0, 'reserved_share_extend_percentage': 0, 'max_over_subscription_ratio': None, 'max_share_server_size': -1, 'max_shares_per_share_server': -1, 'qos': False, 'thin_provisioning': True, 'pools': [{ 'pool_name': constants.EXPECTED_FPG, 'total_capacity_gb': expected_capacity, 'free_capacity_gb': expected_free, 'thin_provisioning': True, 'dedupe': False, 'hpe3par_flash_cache': False, 'hp3par_flash_cache': False, 'reserved_percentage': 0, 'reserved_snapshot_percentage': 0, 'reserved_share_extend_percentage': 0, 'provisioned_capacity_gb': expected_capacity}], 'snapshot_support': True, 'create_share_from_snapshot_support': True, 'revert_to_snapshot_support': False, 'security_service_update_support': False, 'share_server_multiple_subnet_support': False, 'network_allocation_update_support': False, 'mount_snapshot_support': False, 'share_replicas_migration_support': False, 'encryption_support': None, 'share_group_stats': { 'consistent_snapshot_support': None, }, 'replication_domain': None, 'filter_function': None, 'goodness_function': None, 'mount_point_name_support': False, 'ipv4_support': True, 'ipv6_support': False, } result = self.driver.get_share_stats(refresh=True) self.assertEqual(expected_result, result) expected_calls = [ mock.call.get_fpg_status(constants.EXPECTED_FPG) ] self.mock_mediator.assert_has_calls(expected_calls) self.assertTrue(self.mock_mediator.get_fpg_status.called) def test_driver_get_share_stats_premature(self): """Driver init stats before init_driver completed.""" expected_version = self.driver.VERSION self.mock_mediator.get_fpg_status.return_value = {'not_called': 1} expected_result = { 'qos': False, 'driver_handles_share_servers': True, 'driver_version': expected_version, 'free_capacity_gb': 0, 'max_over_subscription_ratio': None, 'max_share_server_size': -1, 'max_shares_per_share_server': -1, 'pools': None, 'provisioned_capacity_gb': 0, 'reserved_percentage': 0, 'reserved_snapshot_percentage': 0, 'reserved_share_extend_percentage': 0, 'share_backend_name': 'HPE_3PAR', 'storage_protocol': 'NFS_CIFS', 'thin_provisioning': True, 'total_capacity_gb': 0, 'vendor_name': 'HPE', 'snapshot_support': True, 'create_share_from_snapshot_support': True, 'revert_to_snapshot_support': False, 'security_service_update_support': False, 'share_server_multiple_subnet_support': False, 'network_allocation_update_support': False, 'share_replicas_migration_support': False, 'encryption_support': None, 'mount_snapshot_support': False, 'share_group_stats': { 'consistent_snapshot_support': None, }, 'replication_domain': None, 'filter_function': None, 'goodness_function': None, 'mount_point_name_support': False, 'ipv4_support': True, 'ipv6_support': False, } result = self.driver.get_share_stats(refresh=True) self.assertEqual(expected_result, result) self.assertFalse(self.mock_mediator.get_fpg_status.called) @ddt.data(('test"dquote', 'test_dquote'), ("test'squote", "test_squote"), ('test-:;,.punc', 'test-:_punc'), ('test with spaces ', 'test with spaces '), ('x' * 300, 'x' * 300)) @ddt.unpack def test_build_comment(self, display_name, clean_name): host = 'test-stack1@backend#pool' share = { 'host': host, 'display_name': display_name } comment = self.driver.build_share_comment(share) cleaned = { 'host': host, 'clean_name': clean_name } expected = ("OpenStack Manila - host=%(host)s " "orig_name=%(clean_name)s created=" % cleaned)[:254] self.assertLess(len(comment), 255) self.assertTrue(comment.startswith(expected)) # Test for some chars that are not allowed. # Don't test with same regex as the code uses. for c in "'\".,;": self.assertNotIn(c, comment) def test_get_network_allocations_number(self): self.assertEqual(1, self.driver.get_network_allocations_number()) def test_setup_server(self): """Setup server by creating a new FSIP.""" self.init_driver() network_info = [{ 'network_allocations': [ {'ip_address': constants.EXPECTED_IP_1234}], 'cidr': '/'.join((constants.EXPECTED_IP_1234, constants.CIDR_PREFIX)), 'network_type': constants.EXPECTED_VLAN_TYPE, 'segmentation_id': constants.EXPECTED_VLAN_TAG, 'server_id': constants.EXPECTED_SERVER_ID, }] expected_result = { 'share_server_name': constants.EXPECTED_SERVER_ID, 'share_server_id': constants.EXPECTED_SERVER_ID, 'ip': constants.EXPECTED_IP_1234, 'subnet': constants.EXPECTED_SUBNET, 'vlantag': constants.EXPECTED_VLAN_TAG, 'fpg': constants.EXPECTED_FPG, 'vfs': constants.EXPECTED_VFS, } metadata = {'request_host': constants.EXPECTED_HOST} result = self.driver._setup_server(network_info, metadata) expected_calls = [ mock.call.create_fsip(constants.EXPECTED_IP_1234, constants.EXPECTED_SUBNET, constants.EXPECTED_VLAN_TAG, constants.EXPECTED_FPG, constants.EXPECTED_VFS) ] self.mock_mediator.assert_has_calls(expected_calls) self.assertEqual(expected_result, result) def test_setup_server_fails_for_unsupported_network_type(self): """Setup server fails for unsupported network type""" self.init_driver() network_info = [{ 'network_allocations': [ {'ip_address': constants.EXPECTED_IP_1234}], 'cidr': '/'.join((constants.EXPECTED_IP_1234, constants.CIDR_PREFIX)), 'network_type': constants.EXPECTED_VXLAN_TYPE, 'segmentation_id': constants.EXPECTED_VLAN_TAG, 'server_id': constants.EXPECTED_SERVER_ID, }] metadata = {'request_host': constants.EXPECTED_HOST} self.assertRaises(exception.NetworkBadConfigurationException, self.driver._setup_server, network_info, metadata) def test_setup_server_fails_for_exceed_pool_max_supported_ips(self): """Setup server fails when the VFS has reached max supported IPs""" self.init_driver() network_info = [{ 'network_allocations': [ {'ip_address': constants.EXPECTED_IP_1234}], 'cidr': '/'.join((constants.EXPECTED_IP_1234, constants.CIDR_PREFIX)), 'network_type': constants.EXPECTED_VLAN_TYPE, 'segmentation_id': constants.EXPECTED_VLAN_TAG, 'server_id': constants.EXPECTED_SERVER_ID, }] metadata = {'request_host': constants.EXPECTED_HOST} expected_vfs = self.driver.fpgs[ constants.EXPECTED_FPG][constants.EXPECTED_VFS] self.driver.fpgs[constants.EXPECTED_FPG][constants.EXPECTED_VFS] = [ '10.0.0.1', '10.0.0.2', '10.0.0.3', '10.0.0.4'] self.assertRaises(exception.Invalid, self.driver._setup_server, network_info, metadata) self.driver.fpgs[constants.EXPECTED_FPG][constants.EXPECTED_VFS ] = expected_vfs def test_teardown_server(self): """Test tear down server""" self.init_driver() server_details = { 'ip': constants.EXPECTED_IP_10203040, 'fpg': constants.EXPECTED_FPG, 'vfs': constants.EXPECTED_VFS, } self.driver._teardown_server(server_details) expected_calls = [ mock.call.remove_fsip(constants.EXPECTED_IP_10203040, constants.EXPECTED_FPG, constants.EXPECTED_VFS) ] self.mock_mediator.assert_has_calls(expected_calls) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/hpe/test_hpe_3par_mediator.py0000664000175000017500000037537500000000000026037 0ustar00zuulzuul00000000000000# Copyright 2015 Hewlett Packard Enterprise Development LP # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sys from unittest import mock import ddt if 'hpe3parclient' not in sys.modules: sys.modules['hpe3parclient'] = mock.Mock() from oslo_utils import units from manila.data import utils as data_utils from manila import exception from manila.share.drivers.hpe import hpe_3par_mediator as hpe3parmediator from manila import test from manila.tests.share.drivers.hpe import test_hpe_3par_constants as constants from manila import utils CLIENT_VERSION_MIN_OK = hpe3parmediator.MIN_CLIENT_VERSION TEST_WSAPI_VERSION_STR = '30201292' @ddt.ddt class HPE3ParMediatorTestCase(test.TestCase): def setUp(self): super(HPE3ParMediatorTestCase, self).setUp() # Fake utils.execute self.mock_object(utils, 'execute', mock.Mock(return_value={})) # Fake data_utils.Copy class FakeCopy(object): def run(self): pass def get_progress(self): return {'total_progress': 100} self.mock_copy = self.mock_object( data_utils, 'Copy', mock.Mock(return_value=FakeCopy())) # This is the fake client to use. self.mock_client = mock.Mock() # Take over the hpe3parclient module and stub the constructor. hpe3parclient = sys.modules['hpe3parclient'] hpe3parclient.version_tuple = CLIENT_VERSION_MIN_OK # Need a fake constructor to return the fake client. # This is also be used for constructor error tests. self.mock_object(hpe3parclient.file_client, 'HPE3ParFilePersonaClient') self.mock_client_constructor = ( hpe3parclient.file_client.HPE3ParFilePersonaClient ) self.mock_client = self.mock_client_constructor() # Set the mediator to use in tests. self.mediator = hpe3parmediator.HPE3ParMediator( hpe3par_username=constants.USERNAME, hpe3par_password=constants.PASSWORD, hpe3par_api_url=constants.API_URL, hpe3par_debug=constants.EXPECTED_HPE_DEBUG, hpe3par_san_ip=constants.EXPECTED_IP_1234, hpe3par_san_login=constants.SAN_LOGIN, hpe3par_san_password=constants.SAN_PASSWORD, hpe3par_san_ssh_port=constants.PORT, hpe3par_cifs_admin_access_username=constants.USERNAME, hpe3par_cifs_admin_access_password=constants.PASSWORD, hpe3par_cifs_admin_access_domain=constants.EXPECTED_CIFS_DOMAIN, hpe3par_share_mount_path=constants.EXPECTED_MOUNT_PATH, ssh_conn_timeout=constants.TIMEOUT, my_ip=constants.EXPECTED_MY_IP) def test_mediator_no_client(self): """Test missing hpe3parclient error.""" mock_log = self.mock_object(hpe3parmediator, 'LOG') self.mock_object(hpe3parmediator.HPE3ParMediator, 'no_client', None) self.assertRaises(exception.HPE3ParInvalidClient, self.mediator.do_setup) mock_log.error.assert_called_once_with(mock.ANY) def test_mediator_setup_client_init_error(self): """Any client init exceptions should result in a ManilaException.""" self.mock_client_constructor.side_effect = ( Exception('Any exception. E.g., bad version or some other ' 'non-Manila Exception.')) self.assertRaises(exception.ManilaException, self.mediator.do_setup) def test_mediator_setup_client_ssh_error(self): # This could be anything the client comes up with, but the # mediator should turn it into a ManilaException. non_manila_exception = Exception('non-manila-except') self.mock_client.setSSHOptions.side_effect = non_manila_exception self.assertRaises(exception.ManilaException, self.mediator.do_setup) self.mock_client.assert_has_calls( [mock.call.setSSHOptions(constants.EXPECTED_IP_1234, constants.SAN_LOGIN, constants.SAN_PASSWORD, port=constants.PORT, conn_timeout=constants.TIMEOUT)]) def test_mediator_vfs_exception(self): """Backend exception during get_vfs.""" self.init_mediator() self.mock_client.getvfs.side_effect = Exception('non-manila-except') self.assertRaises(exception.ManilaException, self.mediator.get_vfs, fpg=constants.EXPECTED_FPG) expected_calls = [ mock.call.getvfs(fpg=constants.EXPECTED_FPG, vfs=None), ] self.mock_client.assert_has_calls(expected_calls) def test_mediator_vfs_not_found(self): """VFS not found.""" self.init_mediator() self.mock_client.getvfs.return_value = {'total': 0} self.assertRaises(exception.ManilaException, self.mediator.get_vfs, fpg=constants.EXPECTED_FPG) expected_calls = [ mock.call.getvfs(fpg=constants.EXPECTED_FPG, vfs=None), ] self.mock_client.assert_has_calls(expected_calls) @ddt.data((constants.EXPECTED_CLIENT_GET_VFS_RETURN_VALUE, constants.EXPECTED_MEDIATOR_GET_VFS_RET_VAL), (constants.EXPECTED_CLIENT_GET_VFS_RETURN_VALUE_MULTI, constants.EXPECTED_MEDIATOR_GET_VFS_RET_VAL_MULTI)) @ddt.unpack def test_mediator_get_vfs(self, get_vfs_val, exp_vfs_val): """VFS not found.""" self.init_mediator() self.mock_client.getvfs.return_value = get_vfs_val ret_val = self.mediator.get_vfs(constants.EXPECTED_FPG) self.assertEqual(exp_vfs_val, ret_val) expected_calls = [ mock.call.getvfs(fpg=constants.EXPECTED_FPG, vfs=None), ] self.mock_client.assert_has_calls(expected_calls) def init_mediator(self): """Basic mediator setup for re-use with tests that need one.""" self.mock_client.getWsApiVersion.return_value = { 'build': TEST_WSAPI_VERSION_STR, } self.mock_client.getvfs.return_value = { 'total': 1, 'members': [{'vfsname': constants.EXPECTED_VFS}] } self.mock_client.getfshare.return_value = { 'total': 1, 'members': [ {'fstoreName': constants.EXPECTED_FSTORE, 'shareName': constants.EXPECTED_SHARE_ID, 'shareDir': constants.EXPECTED_SHARE_PATH, 'share_proto': constants.NFS, 'sharePath': constants.EXPECTED_SHARE_PATH, 'comment': constants.EXPECTED_COMMENT, }] } self.mock_client.setfshare.return_value = [] self.mock_client.setfsquota.return_value = [] self.mock_client.getfsquota.return_value = constants.GET_FSQUOTA self.mediator.do_setup() def test_mediator_setup_success(self): """Do a mediator setup without errors.""" self.init_mediator() self.assertIsNotNone(self.mediator._client) expected_calls = [ mock.call.setSSHOptions(constants.EXPECTED_IP_1234, constants.SAN_LOGIN, constants.SAN_PASSWORD, port=constants.PORT, conn_timeout=constants.TIMEOUT), mock.call.getWsApiVersion(), mock.call.debug_rest(constants.EXPECTED_HPE_DEBUG) ] self.mock_client.assert_has_calls(expected_calls) def test_mediator_client_login_error(self): """Test exception during login.""" self.init_mediator() self.mock_client.login.side_effect = constants.FAKE_EXCEPTION self.assertRaises(exception.ShareBackendException, self.mediator._wsapi_login) expected_calls = [mock.call.login(constants.USERNAME, constants.PASSWORD)] self.mock_client.assert_has_calls(expected_calls) def test_mediator_client_logout_error(self): """Test exception during logout.""" self.init_mediator() mock_log = self.mock_object(hpe3parmediator, 'LOG') fake_exception = constants.FAKE_EXCEPTION self.mock_client.http.unauthenticate.side_effect = fake_exception self.mediator._wsapi_logout() # Warning is logged (no exception thrown). self.assertTrue(mock_log.warning.called) expected_calls = [mock.call.http.unauthenticate()] self.mock_client.assert_has_calls(expected_calls) def test_mediator_client_version_unsupported(self): """Try a client with version less than minimum.""" self.hpe3parclient = sys.modules['hpe3parclient'] self.hpe3parclient.version_tuple = (CLIENT_VERSION_MIN_OK[0], CLIENT_VERSION_MIN_OK[1], CLIENT_VERSION_MIN_OK[2] - 1) mock_log = self.mock_object(hpe3parmediator, 'LOG') self.assertRaises(exception.HPE3ParInvalidClient, self.init_mediator) mock_log.error.assert_called_once_with(mock.ANY) def test_mediator_client_version_supported(self): """Try a client with a version greater than the minimum.""" # The setup success already tests the min version. Try version > min. self.hpe3parclient = sys.modules['hpe3parclient'] self.hpe3parclient.version_tuple = (CLIENT_VERSION_MIN_OK[0], CLIENT_VERSION_MIN_OK[1], CLIENT_VERSION_MIN_OK[2] + 1) self.init_mediator() expected_calls = [ mock.call.setSSHOptions(constants.EXPECTED_IP_1234, constants.SAN_LOGIN, constants.SAN_PASSWORD, port=constants.PORT, conn_timeout=constants.TIMEOUT), mock.call.getWsApiVersion(), mock.call.debug_rest(constants.EXPECTED_HPE_DEBUG) ] self.mock_client.assert_has_calls(expected_calls) def test_mediator_client_version_exception(self): """Test the getWsApiVersion exception handling.""" self.mock_client.getWsApiVersion.side_effect = constants.FAKE_EXCEPTION self.assertRaises(exception.ShareBackendException, self.init_mediator) def test_mediator_client_version_bad_return_value(self): """Test the getWsApiVersion exception handling with bad value.""" # Expecting a dict with 'build' in it. This would fail badly. self.mock_client.getWsApiVersion.return_value = 'bogus' self.assertRaises(exception.ShareBackendException, self.mediator.do_setup) def get_expected_calls_for_create_share(self, client_version, expected_fpg, expected_vfsname, expected_protocol, extra_specs, expected_project_id, expected_share_id): expected_sharedir = expected_share_id createfshare_kwargs = dict(comment=mock.ANY, fpg=expected_fpg, sharedir=expected_sharedir, fstore=expected_project_id) if expected_protocol == constants.NFS_LOWER: createfshare_kwargs['clientip'] = '127.0.0.1' # Options from extra-specs. opt_string = extra_specs.get('hpe3par:nfs_options', []) opt_list = opt_string.split(',') # Options that the mediator adds. nfs_options = ['rw', 'no_root_squash', 'insecure'] nfs_options += opt_list expected_options = ','.join(nfs_options) createfshare_kwargs['options'] = OptionMatcher( self.assertListEqual, expected_options) expected_calls = [ mock.call.createfstore(expected_vfsname, expected_project_id, comment=mock.ANY, fpg=expected_fpg), mock.call.getfsquota(fpg=expected_fpg, vfs=expected_vfsname, fstore=expected_project_id), mock.call.setfsquota(expected_vfsname, fpg=expected_fpg, hcapacity='2048', scapacity='2048', fstore=expected_project_id), mock.call.createfshare(expected_protocol, expected_vfsname, expected_share_id, **createfshare_kwargs), mock.call.getfshare(expected_protocol, expected_share_id, fpg=expected_fpg, vfs=expected_vfsname, fstore=expected_project_id)] else: smb_opts = (hpe3parmediator.ACCESS_BASED_ENUM, hpe3parmediator.CONTINUOUS_AVAIL, hpe3parmediator.CACHE) for smb_opt in smb_opts: opt_value = extra_specs.get('hpe3par:smb_%s' % smb_opt) if opt_value: opt_key = hpe3parmediator.SMB_EXTRA_SPECS_MAP[smb_opt] createfshare_kwargs[opt_key] = opt_value expected_calls = [ mock.call.createfstore(expected_vfsname, expected_project_id, comment=mock.ANY, fpg=expected_fpg), mock.call.getfsquota(fpg=expected_fpg, vfs=expected_vfsname, fstore=expected_project_id), mock.call.setfsquota(expected_vfsname, fpg=expected_fpg, hcapacity='2048', scapacity='2048', fstore=expected_project_id), mock.call.createfshare(expected_protocol, expected_vfsname, expected_share_id, **createfshare_kwargs), mock.call.getfshare(expected_protocol, expected_share_id, fpg=expected_fpg, vfs=expected_vfsname, fstore=expected_project_id)] return expected_calls @staticmethod def _build_smb_extra_specs(**kwargs): extra_specs = {'driver_handles_share_servers': False} for k, v in kwargs.items(): extra_specs['hpe3par:smb_%s' % k] = v return extra_specs @ddt.data(((4, 0, 0), None, None, None), ((4, 0, 0), 'true', None, None), ((4, 0, 0), None, 'false', None), ((4, 0, 0), None, 'false', None), ((4, 0, 0), None, None, 'optimized'), ((4, 0, 0), 'true', 'false', 'optimized')) @ddt.unpack def test_mediator_create_cifs_share(self, client_version, abe, ca, cache): self.hpe3parclient = sys.modules['hpe3parclient'] self.hpe3parclient.version_tuple = client_version self.init_mediator() self.mock_client.getfshare.return_value = { 'message': None, 'total': 1, 'members': [{'shareName': constants.EXPECTED_SHARE_NAME}] } extra_specs = self._build_smb_extra_specs(access_based_enum=abe, continuous_avail=ca, cache=cache) location = self.mediator.create_share(constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.CIFS, extra_specs, constants.EXPECTED_FPG, constants.EXPECTED_VFS, size=constants.EXPECTED_SIZE_1) self.assertEqual(constants.EXPECTED_SHARE_NAME, location) expected_calls = self.get_expected_calls_for_create_share( client_version, constants.EXPECTED_FPG, constants.EXPECTED_VFS, constants.SMB_LOWER, extra_specs, constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID) self.mock_client.assert_has_calls(expected_calls) @ddt.data('ro', 'rw', 'no_root_squash', 'root_squash', 'secure', 'insecure', 'hide,insecure,no_wdelay,ro,bogus,root_squash,test') def test_mediator_create_nfs_share_bad_options(self, nfs_options): self.init_mediator() extra_specs = {'hpe3par:nfs_options': nfs_options} self.assertRaises(exception.InvalidInput, self.mediator.create_share, constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.NFS.lower(), extra_specs, constants.EXPECTED_FPG, constants.EXPECTED_VFS, size=constants.EXPECTED_SIZE_1) self.assertFalse(self.mock_client.createfshare.called) @ddt.data('sync', 'no_wdelay,sec=sys,hide,sync') def test_mediator_create_nfs_share(self, nfs_options): self.init_mediator() self.mock_client.getfshare.return_value = { 'message': None, 'total': 1, 'members': [{'sharePath': constants.EXPECTED_SHARE_PATH}] } extra_specs = {'hpe3par:nfs_options': nfs_options} location = self.mediator.create_share(constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.NFS.lower(), extra_specs, constants.EXPECTED_FPG, constants.EXPECTED_VFS, size=constants.EXPECTED_SIZE_1) self.assertEqual(constants.EXPECTED_SHARE_PATH, location) expected_calls = self.get_expected_calls_for_create_share( hpe3parmediator.MIN_CLIENT_VERSION, constants.EXPECTED_FPG, constants.EXPECTED_VFS, constants.NFS.lower(), extra_specs, constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID) self.mock_client.assert_has_calls(expected_calls) def test_mediator_create_nfs_share_get_exception(self): self.init_mediator() self.mock_client.getfshare.side_effect = constants.FAKE_EXCEPTION self.assertRaises(exception.ShareBackendException, self.mediator.create_share, constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.NFS.lower(), constants.EXPECTED_EXTRA_SPECS, constants.EXPECTED_FPG, constants.EXPECTED_VFS, size=constants.EXPECTED_SIZE_1) @ddt.data(0, 2) def test_mediator_create_nfs_share_get_fail(self, count): self.init_mediator() self.mock_client.getfshare.return_value = {'total': count} self.assertRaises(exception.ShareBackendException, self.mediator.create_share, constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.NFS.lower(), constants.EXPECTED_EXTRA_SPECS, constants.EXPECTED_FPG, constants.EXPECTED_VFS, size=constants.EXPECTED_SIZE_1) @ddt.data(True, False) def test_mediator_create_cifs_share_from_snapshot(self, require_cifs_ip): self.init_mediator() self.mediator.hpe3par_require_cifs_ip = require_cifs_ip self.mock_client.getfsnap.return_value = { 'message': None, 'total': 1, 'members': [{'snapName': constants.EXPECTED_SNAP_ID, 'fstoreName': constants.EXPECTED_FSTORE}] } location = self.mediator.create_share_from_snapshot( constants.EXPECTED_SHARE_ID, constants.CIFS, constants.EXPECTED_EXTRA_SPECS, constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.EXPECTED_SNAP_ID, constants.EXPECTED_FPG, constants.EXPECTED_VFS, [constants.EXPECTED_IP_10203040]) self.assertEqual(constants.EXPECTED_SHARE_ID, location) expected_kwargs_ro = { 'comment': mock.ANY, 'fpg': constants.EXPECTED_FPG, 'fstore': constants.EXPECTED_FSTORE, } expected_kwargs_rw = expected_kwargs_ro.copy() expected_kwargs_ro['sharedir'] = '.snapshot/%s/%s' % ( constants.EXPECTED_SNAP_ID, constants.EXPECTED_SHARE_ID) expected_kwargs_rw['sharedir'] = constants.EXPECTED_SHARE_ID if require_cifs_ip: expected_kwargs_ro['allowip'] = constants.EXPECTED_MY_IP expected_kwargs_rw['allowip'] = ( ','.join((constants.EXPECTED_MY_IP, constants.EXPECTED_IP_127))) expected_calls = [ mock.call.getfsnap('*_%s' % constants.EXPECTED_SNAP_ID, vfs=constants.EXPECTED_VFS, fpg=constants.EXPECTED_FPG, pat=True, fstore=constants.EXPECTED_FSTORE), mock.call.createfshare(constants.SMB_LOWER, constants.EXPECTED_VFS, constants.EXPECTED_SHARE_ID, **expected_kwargs_ro), mock.call.getfshare(constants.SMB_LOWER, constants.EXPECTED_SHARE_ID, fpg=constants.EXPECTED_FPG, vfs=constants.EXPECTED_VFS, fstore=constants.EXPECTED_FSTORE), mock.call.createfshare(constants.SMB_LOWER, constants.EXPECTED_VFS, constants.EXPECTED_SHARE_ID, **expected_kwargs_rw), mock.call.getfshare(constants.SMB_LOWER, constants.EXPECTED_SHARE_ID, fpg=constants.EXPECTED_FPG, vfs=constants.EXPECTED_VFS, fstore=constants.EXPECTED_FSTORE), mock.call.setfshare(constants.SMB_LOWER, constants.EXPECTED_VFS, constants.EXPECTED_SHARE_ID, allowperm=constants.ADD_USERNAME, comment=mock.ANY, fpg=constants.EXPECTED_FPG, fstore=constants.EXPECTED_FSTORE), mock.call.setfshare(constants.SMB_LOWER, constants.EXPECTED_VFS, constants.EXPECTED_SHARE_ID, allowperm=constants.ADD_USERNAME, comment=mock.ANY, fpg=constants.EXPECTED_FPG, fstore=constants.EXPECTED_FSTORE), mock.call.setfshare(constants.SMB_LOWER, constants.EXPECTED_VFS, constants.EXPECTED_SUPER_SHARE, allowperm=constants.DROP_USERNAME, comment=mock.ANY, fpg=constants.EXPECTED_FPG, fstore=constants.EXPECTED_FSTORE), mock.call.removefshare(constants.SMB_LOWER, constants.EXPECTED_VFS, constants.EXPECTED_SHARE_ID, fpg=constants.EXPECTED_FPG, fstore=constants.EXPECTED_FSTORE), ] self.mock_client.assert_has_calls(expected_calls) def test_mediator_create_cifs_share_from_snapshot_ro(self): self.init_mediator() # RO because CIFS admin access username is not configured self.mediator.hpe3par_cifs_admin_access_username = None self.mock_client.getfsnap.return_value = { 'message': None, 'total': 1, 'members': [{'snapName': constants.EXPECTED_SNAP_ID, 'fstoreName': constants.EXPECTED_FSTORE}] } location = self.mediator.create_share_from_snapshot( constants.EXPECTED_SHARE_ID, constants.CIFS, constants.EXPECTED_EXTRA_SPECS, constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.EXPECTED_SNAP_ID, constants.EXPECTED_FPG, constants.EXPECTED_VFS, [constants.EXPECTED_IP_10203040], comment=constants.EXPECTED_COMMENT) self.assertEqual(constants.EXPECTED_SHARE_ID, location) share_dir = '.snapshot/%s/%s' % ( constants.EXPECTED_SNAP_ID, constants.EXPECTED_SHARE_ID) expected_kwargs_ro = { 'comment': constants.EXPECTED_COMMENT, 'fpg': constants.EXPECTED_FPG, 'fstore': constants.EXPECTED_FSTORE, 'sharedir': share_dir, } self.mock_client.createfshare.assert_called_once_with( constants.SMB_LOWER, constants.EXPECTED_VFS, constants.EXPECTED_SHARE_ID, **expected_kwargs_ro ) def test_mediator_create_nfs_share_from_snapshot(self): self.init_mediator() self.mock_client.getfsnap.return_value = { 'message': None, 'total': 1, 'members': [{'snapName': constants.EXPECTED_SNAP_ID, 'fstoreName': constants.EXPECTED_FSTORE}] } location = self.mediator.create_share_from_snapshot( constants.EXPECTED_SHARE_ID, constants.NFS, constants.EXPECTED_EXTRA_SPECS, constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.EXPECTED_SNAP_ID, constants.EXPECTED_FPG, constants.EXPECTED_VFS, [constants.EXPECTED_IP_10203040]) self.assertEqual(constants.EXPECTED_SHARE_PATH, location) expected_calls = [ mock.call.getfsnap('*_%s' % constants.EXPECTED_SNAP_ID, vfs=constants.EXPECTED_VFS, fpg=constants.EXPECTED_FPG, pat=True, fstore=constants.EXPECTED_FSTORE), mock.call.createfshare(constants.NFS_LOWER, constants.EXPECTED_VFS, constants.EXPECTED_SHARE_ID, comment=mock.ANY, fpg=constants.EXPECTED_FPG, sharedir='.snapshot/%s/%s' % (constants.EXPECTED_SNAP_ID, constants.EXPECTED_SHARE_ID), fstore=constants.EXPECTED_FSTORE, clientip=constants.EXPECTED_MY_IP, options='ro,no_root_squash,insecure'), mock.call.getfshare(constants.NFS_LOWER, constants.EXPECTED_SHARE_ID, fpg=constants.EXPECTED_FPG, vfs=constants.EXPECTED_VFS, fstore=constants.EXPECTED_FSTORE), mock.call.createfshare(constants.NFS_LOWER, constants.EXPECTED_VFS, constants.EXPECTED_SHARE_ID, comment=mock.ANY, fpg=constants.EXPECTED_FPG, sharedir=constants.EXPECTED_SHARE_ID, fstore=constants.EXPECTED_FSTORE, clientip=','.join(( constants.EXPECTED_MY_IP, constants.EXPECTED_IP_127)), options='rw,no_root_squash,insecure'), mock.call.getfshare(constants.NFS_LOWER, constants.EXPECTED_SHARE_ID, fpg=constants.EXPECTED_FPG, vfs=constants.EXPECTED_VFS, fstore=constants.EXPECTED_FSTORE), mock.call.getfshare(constants.NFS_LOWER, constants.EXPECTED_SHARE_ID, fpg=constants.EXPECTED_FPG, vfs=constants.EXPECTED_VFS, fstore=constants.EXPECTED_FSTORE), mock.call.setfshare(constants.NFS_LOWER, constants.EXPECTED_VFS, constants.EXPECTED_SHARE_ID, clientip=''.join(('-', constants.EXPECTED_MY_IP)), comment=mock.ANY, fpg=constants.EXPECTED_FPG, fstore=constants.EXPECTED_FSTORE), mock.call.removefshare(constants.NFS_LOWER, constants.EXPECTED_VFS, constants.EXPECTED_SHARE_ID, fpg=constants.EXPECTED_FPG, fstore=constants.EXPECTED_FSTORE), ] self.mock_client.assert_has_calls(expected_calls) def test_mediator_create_share_from_snap_copy_incomplete(self): self.init_mediator() self.mock_client.getfsnap.return_value = { 'message': None, 'total': 1, 'members': [{'snapName': constants.EXPECTED_SNAP_ID, 'fstoreName': constants.EXPECTED_FSTORE}] } mock_bad_copy = mock.Mock() mock_bad_copy.get_progress.return_value = {'total_progress': 99} self.mock_object( data_utils, 'Copy', mock.Mock(return_value=mock_bad_copy)) self.assertRaises(exception.ShareBackendException, self.mediator.create_share_from_snapshot, constants.EXPECTED_SHARE_ID, constants.NFS, constants.EXPECTED_EXTRA_SPECS, constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.EXPECTED_SNAP_ID, constants.EXPECTED_FPG, constants.EXPECTED_VFS, [constants.EXPECTED_IP_10203040]) self.assertTrue(mock_bad_copy.run.called) self.assertTrue(mock_bad_copy.get_progress.called) def test_mediator_create_share_from_snap_copy_exception(self): self.init_mediator() self.mock_client.getfsnap.return_value = { 'message': None, 'total': 1, 'members': [{'snapName': constants.EXPECTED_SNAP_ID, 'fstoreName': constants.EXPECTED_FSTORE}] } mock_bad_copy = mock.Mock() mock_bad_copy.run.side_effect = Exception('run exception') self.mock_object( data_utils, 'Copy', mock.Mock(return_value=mock_bad_copy)) self.assertRaises(exception.ShareBackendException, self.mediator.create_share_from_snapshot, constants.EXPECTED_SHARE_ID, constants.NFS, constants.EXPECTED_EXTRA_SPECS, constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.EXPECTED_SNAP_ID, constants.EXPECTED_FPG, constants.EXPECTED_VFS, [constants.EXPECTED_IP_10203040]) self.assertTrue(mock_bad_copy.run.called) def test_mediator_create_share_from_snap_not_found(self): self.init_mediator() self.mock_client.getfsnap.return_value = { 'message': None, 'total': 0, 'members': [] } self.assertRaises(exception.ShareBackendException, self.mediator.create_share_from_snapshot, constants.EXPECTED_SHARE_ID, constants.NFS, constants.EXPECTED_EXTRA_SPECS, constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.EXPECTED_SNAP_ID, constants.EXPECTED_FPG, constants.EXPECTED_VFS, [constants.EXPECTED_IP_10203040]) def test_mediator_delete_nfs_share(self): self.init_mediator() share_id = 'foo' osf_share_id = '-'.join(('osf', share_id)) osf_ro_share_id = '-ro-'.join(('osf', share_id)) fstore = osf_share_id self.mock_object(self.mediator, '_find_fstore', mock.Mock(return_value=fstore)) self.mock_object(self.mediator, '_delete_file_tree') self.mock_object(self.mediator, '_update_capacity_quotas') self.mediator.delete_share(constants.EXPECTED_PROJECT_ID, share_id, constants.EXPECTED_SIZE_1, constants.NFS, constants.EXPECTED_FPG, constants.EXPECTED_VFS, constants.EXPECTED_SHARE_IP) expected_calls = [ mock.call.removefshare(constants.NFS_LOWER, constants.EXPECTED_VFS, osf_share_id, fpg=constants.EXPECTED_FPG, fstore=fstore), mock.call.removefshare(constants.NFS_LOWER, constants.EXPECTED_VFS, osf_ro_share_id, fpg=constants.EXPECTED_FPG, fstore=fstore), mock.call.removefstore(constants.EXPECTED_VFS, fstore, fpg=constants.EXPECTED_FPG), ] self.mock_client.assert_has_calls(expected_calls) self.assertFalse(self.mediator._delete_file_tree.called) self.assertFalse(self.mediator._update_capacity_quotas.called) def test_mediator_delete_share_not_found(self): self.init_mediator() self.mock_object(self.mediator, '_find_fstore', mock.Mock(return_value=None)) self.mock_object(self.mediator, '_delete_file_tree') self.mock_object(self.mediator, '_update_capacity_quotas') self.mediator.delete_share(constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.EXPECTED_SIZE_1, constants.CIFS, constants.EXPECTED_FPG, constants.EXPECTED_VFS, constants.EXPECTED_IP_10203040) self.assertFalse(self.mock_client.removefshare.called) self.assertFalse(self.mediator._delete_file_tree.called) self.assertFalse(self.mediator._update_capacity_quotas.called) def test_mediator_delete_nfs_share_only_readonly(self): self.init_mediator() fstores = (None, constants.EXPECTED_FSTORE) self.mock_object(self.mediator, '_find_fstore', mock.Mock(side_effect=fstores)) self.mock_object(self.mediator, '_delete_file_tree') self.mock_object(self.mediator, '_update_capacity_quotas') self.mediator.delete_share(constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.EXPECTED_SIZE_1, constants.NFS, constants.EXPECTED_FPG, constants.EXPECTED_VFS, constants.EXPECTED_IP_10203040) self.mock_client.removefshare.assert_called_once_with( constants.NFS_LOWER, constants.EXPECTED_VFS, constants.EXPECTED_SHARE_ID, fpg=constants.EXPECTED_FPG, fstore=constants.EXPECTED_FSTORE ) self.assertFalse(self.mediator._delete_file_tree.called) self.assertFalse(self.mediator._update_capacity_quotas.called) def test_mediator_delete_share_exception(self): self.init_mediator() self.mock_client.removefshare.side_effect = Exception( 'removeshare fail.') self.assertRaises(exception.ShareBackendException, self.mediator.delete_share, constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.EXPECTED_SIZE_1, constants.CIFS, constants.EXPECTED_FPG, constants.EXPECTED_VFS, constants.EXPECTED_IP_10203040) expected_calls = [ mock.call.removefshare(constants.SMB_LOWER, constants.EXPECTED_VFS, constants.EXPECTED_SHARE_ID, fpg=constants.EXPECTED_FPG, fstore=constants.EXPECTED_FSTORE), ] self.mock_client.assert_has_calls(expected_calls) def test_mediator_delete_fstore_exception(self): self.init_mediator() self.mock_object(self.mediator, '_find_fstore', mock.Mock(return_value=constants.EXPECTED_SHARE_ID)) self.mock_object(self.mediator, '_delete_file_tree') self.mock_object(self.mediator, '_update_capacity_quotas') self.mock_client.removefstore.side_effect = Exception( 'removefstore fail.') self.assertRaises(exception.ShareBackendException, self.mediator.delete_share, constants.EXPECTED_PROJECT_ID, constants.SHARE_ID, constants.EXPECTED_SIZE_1, constants.CIFS, constants.EXPECTED_FPG, constants.EXPECTED_VFS, constants.EXPECTED_IP_10203040) expected_calls = [ mock.call.removefshare(constants.SMB_LOWER, constants.EXPECTED_VFS, constants.EXPECTED_SHARE_ID, fpg=constants.EXPECTED_FPG, fstore=constants.EXPECTED_SHARE_ID), mock.call.removefshare(constants.SMB_LOWER, constants.EXPECTED_VFS, constants.EXPECTED_SHARE_ID_RO, fpg=constants.EXPECTED_FPG, fstore=constants.EXPECTED_SHARE_ID), mock.call.removefstore(constants.EXPECTED_VFS, constants.EXPECTED_SHARE_ID, fpg=constants.EXPECTED_FPG), ] self.mock_client.assert_has_calls(expected_calls) self.assertFalse(self.mediator._delete_file_tree.called) self.assertFalse(self.mediator._update_capacity_quotas.called) def test_mediator_delete_file_tree_exception(self): self.init_mediator() mock_log = self.mock_object(hpe3parmediator, 'LOG') self.mock_object(self.mediator, '_find_fstore', mock.Mock(return_value=constants.EXPECTED_FSTORE)) self.mock_object(self.mediator, '_delete_file_tree', mock.Mock(side_effect=Exception('test'))) self.mock_object(self.mediator, '_update_capacity_quotas') self.mediator.delete_share(constants.EXPECTED_PROJECT_ID, constants.SHARE_ID, constants.EXPECTED_SIZE_1, constants.CIFS, constants.EXPECTED_FPG, constants.EXPECTED_VFS, constants.EXPECTED_IP_10203040) expected_calls = [ mock.call.removefshare(constants.SMB_LOWER, constants.EXPECTED_VFS, constants.EXPECTED_SHARE_ID, fpg=constants.EXPECTED_FPG, fstore=constants.EXPECTED_FSTORE), mock.call.removefshare(constants.SMB_LOWER, constants.EXPECTED_VFS, constants.EXPECTED_SHARE_ID_RO, fpg=constants.EXPECTED_FPG, fstore=constants.EXPECTED_FSTORE), ] self.mock_client.assert_has_calls(expected_calls) self.assertTrue(self.mediator._delete_file_tree.called) self.assertFalse(self.mediator._update_capacity_quotas.called) mock_log.warning.assert_called_once_with(mock.ANY, mock.ANY) def test_mediator_delete_cifs_share(self): self.init_mediator() self.mock_object(self.mediator, '_find_fstore', mock.Mock(return_value=constants.EXPECTED_FSTORE)) self.mock_object(self.mediator, '_create_mount_directory', mock.Mock(return_value={})) self.mock_object(self.mediator, '_mount_super_share', mock.Mock(return_value={})) self.mock_object(self.mediator, '_delete_share_directory', mock.Mock(return_value={})) self.mock_object(self.mediator, '_unmount_share', mock.Mock(return_value={})) self.mock_object(self.mediator, '_update_capacity_quotas', mock.Mock(return_value={})) self.mediator.delete_share(constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.EXPECTED_SIZE_1, constants.CIFS, constants.EXPECTED_FPG, constants.EXPECTED_VFS, constants.EXPECTED_IP_10203040) expected_calls = [ mock.call.removefshare(constants.SMB_LOWER, constants.EXPECTED_VFS, constants.EXPECTED_SHARE_ID, fpg=constants.EXPECTED_FPG, fstore=constants.EXPECTED_FSTORE), mock.call.createfshare(constants.SMB_LOWER, constants.EXPECTED_VFS, constants.EXPECTED_SUPER_SHARE, allowip=constants.EXPECTED_MY_IP, comment=( constants.EXPECTED_SUPER_SHARE_COMMENT), fpg=constants.EXPECTED_FPG, fstore=constants.EXPECTED_FSTORE, sharedir=''), mock.call.setfshare(constants.SMB_LOWER, constants.EXPECTED_VFS, constants.EXPECTED_SUPER_SHARE, comment=( constants.EXPECTED_SUPER_SHARE_COMMENT), allowperm=( '+' + constants.USERNAME + ':fullcontrol'), fpg=constants.EXPECTED_FPG, fstore=constants.EXPECTED_FSTORE), ] self.mock_client.assert_has_calls(expected_calls) expected_mount_path = constants.EXPECTED_MOUNT_PATH + ( constants.EXPECTED_SHARE_ID) expected_share_path = '/'.join((expected_mount_path, constants.EXPECTED_SHARE_ID)) self.mediator._create_mount_directory.assert_called_once_with( expected_mount_path) self.mediator._mount_super_share.assert_called_once_with( constants.SMB_LOWER, expected_mount_path, constants.EXPECTED_FPG, constants.EXPECTED_VFS, constants.EXPECTED_FSTORE, constants.EXPECTED_IP_10203040) self.mediator._delete_share_directory.assert_has_calls([ mock.call(expected_share_path), mock.call(expected_mount_path), ]) self.mediator._unmount_share.assert_called_once_with( expected_mount_path) self.mediator._update_capacity_quotas.assert_called_once_with( constants.EXPECTED_FSTORE, 0, constants.EXPECTED_SIZE_1, constants.EXPECTED_FPG, constants.EXPECTED_VFS) def test_mediator_delete_cifs_share_and_fstore(self): self.init_mediator() self.mock_object(self.mediator, '_find_fstore', mock.Mock(return_value=constants.EXPECTED_SHARE_ID)) self.mock_object(self.mediator, '_delete_file_tree') self.mock_object(self.mediator, '_update_capacity_quotas') self.mediator.delete_share(constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.EXPECTED_SIZE_1, constants.CIFS, constants.EXPECTED_FPG, constants.EXPECTED_VFS, constants.EXPECTED_IP_10203040) expected_calls = [ mock.call.removefshare(constants.SMB_LOWER, constants.EXPECTED_VFS, constants.EXPECTED_SHARE_ID, fpg=constants.EXPECTED_FPG, fstore=constants.EXPECTED_SHARE_ID), mock.call.removefstore(constants.EXPECTED_VFS, constants.EXPECTED_SHARE_ID, fpg=constants.EXPECTED_FPG), ] self.mock_client.assert_has_calls(expected_calls) self.assertFalse(self.mediator._delete_file_tree.called) self.assertFalse(self.mediator._update_capacity_quotas.called) def test_mediator_delete_share_with_fstore_per_share_false(self): self.init_mediator() self.mediator.hpe3par_fstore_per_share = False share_size = int(constants.EXPECTED_SIZE_1) fstore_init_size = int( constants.GET_FSQUOTA['members'][0]['hardBlock']) expected_capacity = (0 - share_size) * units.Ki + fstore_init_size self.mock_object(self.mediator, '_find_fstore', mock.Mock(return_value=constants.EXPECTED_FSTORE)) self.mock_object(self.mediator, '_create_mount_directory', mock.Mock(return_value={})) self.mock_object(self.mediator, '_mount_super_share', mock.Mock(return_value={})) self.mock_object(self.mediator, '_delete_share_directory', mock.Mock(return_value={})) self.mock_object(self.mediator, '_unmount_share', mock.Mock(return_value={})) self.mediator.delete_share(constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.EXPECTED_SIZE_1, constants.CIFS, constants.EXPECTED_FPG, constants.EXPECTED_VFS, constants.EXPECTED_IP_10203040) expected_calls = [ mock.call.removefshare(constants.SMB_LOWER, constants.EXPECTED_VFS, constants.EXPECTED_SHARE_ID, fpg=constants.EXPECTED_FPG, fstore=constants.EXPECTED_FSTORE), mock.call.createfshare(constants.SMB_LOWER, constants.EXPECTED_VFS, constants.EXPECTED_SUPER_SHARE, allowip=constants.EXPECTED_MY_IP, comment=( constants.EXPECTED_SUPER_SHARE_COMMENT), fpg=constants.EXPECTED_FPG, fstore=constants.EXPECTED_FSTORE, sharedir=''), mock.call.setfshare(constants.SMB_LOWER, constants.EXPECTED_VFS, constants.EXPECTED_SUPER_SHARE, comment=( constants.EXPECTED_SUPER_SHARE_COMMENT), allowperm=( '+' + constants.USERNAME + ':fullcontrol'), fpg=constants.EXPECTED_FPG, fstore=constants.EXPECTED_FSTORE), mock.call.getfsquota(fpg=constants.EXPECTED_FPG, fstore=constants.EXPECTED_FSTORE, vfs=constants.EXPECTED_VFS), mock.call.setfsquota(constants.EXPECTED_VFS, fpg=constants.EXPECTED_FPG, fstore=constants.EXPECTED_FSTORE, scapacity=str(expected_capacity), hcapacity=str(expected_capacity))] self.mock_client.assert_has_calls(expected_calls) expected_mount_path = constants.EXPECTED_MOUNT_PATH + ( constants.EXPECTED_SHARE_ID) self.mediator._create_mount_directory.assert_called_with( expected_mount_path) self.mediator._mount_super_share.assert_called_with( constants.SMB_LOWER, expected_mount_path, constants.EXPECTED_FPG, constants.EXPECTED_VFS, constants.EXPECTED_FSTORE, constants.EXPECTED_IP_10203040) self.mediator._delete_share_directory.assert_called_with( expected_mount_path) self.mediator._unmount_share.assert_called_with( expected_mount_path) def test_mediator_create_snapshot(self): self.init_mediator() self.mediator.create_snapshot(constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.NFS, constants.EXPECTED_SNAP_NAME, constants.EXPECTED_FPG, constants.EXPECTED_VFS) expected_calls = [ mock.call.createfsnap(constants.EXPECTED_VFS, constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SNAP_NAME, fpg=constants.EXPECTED_FPG) ] self.mock_client.assert_has_calls(expected_calls) def test_mediator_create_snapshot_not_allowed(self): self.init_mediator() self.mock_client.getfshare.return_value['members'][0]['shareDir'] = ( None) self.mock_client.getfshare.return_value['members'][0]['sharePath'] = ( 'foo/.snapshot/foo') self.assertRaises(exception.ShareBackendException, self.mediator.create_snapshot, constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.NFS, constants.EXPECTED_SNAP_NAME, constants.EXPECTED_FPG, constants.EXPECTED_VFS) def test_mediator_create_snapshot_share_not_found(self): self.init_mediator() mock_find_fshare = self.mock_object(self.mediator, '_find_fshare', mock.Mock(return_value=None)) self.assertRaises(exception.ShareBackendException, self.mediator.create_snapshot, constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.NFS, constants.EXPECTED_SNAP_NAME, constants.EXPECTED_FPG, constants.EXPECTED_VFS) mock_find_fshare.assert_called_once_with(constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.NFS, constants.EXPECTED_FPG, constants.EXPECTED_VFS) def test_mediator_create_snapshot_backend_exception(self): self.init_mediator() # createfsnap exception self.mock_client.createfsnap.side_effect = Exception( 'createfsnap fail.') self.assertRaises(exception.ShareBackendException, self.mediator.create_snapshot, constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.NFS, constants.EXPECTED_SNAP_NAME, constants.EXPECTED_FPG, constants.EXPECTED_VFS) def test_mediator_delete_snapshot(self): self.init_mediator() expected_name_from_array = 'name-from-array' self.mock_client.getfsnap.return_value = { 'total': 1, 'members': [ { 'snapName': expected_name_from_array, 'fstoreName': constants.EXPECTED_PROJECT_ID, } ], 'message': None } self.mock_client.getfshare.side_effect = [ # some typical independent NFS share (path) and SMB share (dir) { 'total': 1, 'members': [{'sharePath': '/anyfpg/anyvfs/anyfstore'}] }, { 'total': 1, 'members': [{'shareDir': []}], } ] self.mediator.delete_snapshot(constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.NFS, constants.EXPECTED_SNAP_NAME, constants.EXPECTED_FPG, constants.EXPECTED_VFS) expected_calls = [ mock.call.getfsnap('*_%s' % constants.EXPECTED_SNAP_NAME, vfs=constants.EXPECTED_VFS, fpg=constants.EXPECTED_FPG, pat=True, fstore=constants.EXPECTED_PROJECT_ID), mock.call.getfshare(constants.NFS_LOWER, fpg=constants.EXPECTED_FPG, vfs=constants.EXPECTED_VFS, fstore=constants.EXPECTED_PROJECT_ID), mock.call.getfshare(constants.SMB_LOWER, fpg=constants.EXPECTED_FPG, vfs=constants.EXPECTED_VFS, fstore=constants.EXPECTED_PROJECT_ID), mock.call.removefsnap(constants.EXPECTED_VFS, constants.EXPECTED_PROJECT_ID, fpg=constants.EXPECTED_FPG, snapname=expected_name_from_array), mock.call.startfsnapclean(constants.EXPECTED_FPG, reclaimStrategy='maxspeed') ] self.mock_client.assert_has_calls(expected_calls) def test_mediator_delete_snapshot_not_found(self): self.init_mediator() self.mock_client.getfsnap.return_value = { 'total': 0, 'members': [], } self.mediator.delete_snapshot(constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.NFS, constants.EXPECTED_SNAP_NAME, constants.EXPECTED_FPG, constants.EXPECTED_VFS) expected_calls = [ mock.call.getfsnap('*_%s' % constants.EXPECTED_SNAP_NAME, vfs=constants.EXPECTED_VFS, fpg=constants.EXPECTED_FPG, pat=True, fstore=constants.EXPECTED_SHARE_ID), ] # Code coverage for early exit when nothing to delete. self.mock_client.assert_has_calls(expected_calls) self.assertFalse(self.mock_client.getfshare.called) self.assertFalse(self.mock_client.removefsnap.called) self.assertFalse(self.mock_client.startfsnapclean.called) def test_mediator_delete_snapshot_shared_nfs(self): self.init_mediator() # Mock a share under this snapshot for NFS snapshot_dir = '.snapshot/DT_%s' % constants.EXPECTED_SNAP_NAME snapshot_path = '%s/%s' % (constants.EXPECTED_SHARE_PATH, snapshot_dir) self.mock_client.getfsnap.return_value = { 'total': 1, 'members': [{'snapName': constants.EXPECTED_SNAP_NAME}] } self.mock_client.getfshare.side_effect = [ # some typical independent NFS share (path) and SMB share (dir) { 'total': 1, 'members': [{'sharePath': snapshot_path}], }, { 'total': 0, 'members': [], } ] self.assertRaises(exception.Invalid, self.mediator.delete_snapshot, constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.NFS, constants.EXPECTED_SNAP_NAME, constants.EXPECTED_FPG, constants.EXPECTED_VFS) def test_mediator_delete_snapshot_shared_smb(self): self.init_mediator() # Mock a share under this snapshot for SMB snapshot_dir = '.snapshot/DT_%s' % constants.EXPECTED_SNAP_NAME self.mock_client.getfsnap.return_value = { 'total': 1, 'members': [{'snapName': constants.EXPECTED_SNAP_NAME}] } self.mock_client.getfshare.side_effect = [ # some typical independent NFS share (path) and SMB share (dir) { 'total': 1, 'members': [{'sharePath': constants.EXPECTED_SHARE_PATH}], }, { 'total': 1, 'members': [{'shareDir': snapshot_dir}], } ] self.assertRaises(exception.Invalid, self.mediator.delete_snapshot, constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.NFS, constants.EXPECTED_SNAP_NAME, constants.EXPECTED_FPG, constants.EXPECTED_VFS) def _assert_delete_snapshot_raises(self): self.assertRaises(exception.ShareBackendException, self.mediator.delete_snapshot, constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.NFS, constants.EXPECTED_SNAP_NAME, constants.EXPECTED_FPG, constants.EXPECTED_VFS) def test_mediator_delete_snapshot_backend_exceptions(self): self.init_mediator() # getfsnap exception self.mock_client.getfsnap.side_effect = Exception('getfsnap fail.') self._assert_delete_snapshot_raises() # getfsnap OK self.mock_client.getfsnap.side_effect = None self.mock_client.getfsnap.return_value = { 'total': 1, 'members': [{'snapName': constants.EXPECTED_SNAP_NAME, 'fstoreName': constants.EXPECTED_FSTORE}] } # getfshare exception self.mock_client.getfshare.side_effect = Exception('getfshare fail.') self._assert_delete_snapshot_raises() # getfshare OK def mock_fshare(*args, **kwargs): if args[0] == constants.NFS_LOWER: return { 'total': 1, 'members': [{'sharePath': '/anyfpg/anyvfs/anyfstore', 'fstoreName': constants.EXPECTED_FSTORE}] } else: return { 'total': 1, 'members': [{'shareDir': [], 'fstoreName': constants.EXPECTED_FSTORE}] } self.mock_client.getfshare.side_effect = mock_fshare # removefsnap exception self.mock_client.removefsnap.side_effect = Exception( 'removefsnap fail.') self._assert_delete_snapshot_raises() # removefsnap OK self.mock_client.removefsnap.side_effect = None self.mock_client.removefsnap.return_value = [] # startfsnapclean exception (logged, not raised) self.mock_client.startfsnapclean.side_effect = Exception( 'startfsnapclean fail.') mock_log = self.mock_object(hpe3parmediator, 'LOG') self.mediator.delete_snapshot(constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.NFS, constants.EXPECTED_SNAP_NAME, constants.EXPECTED_FPG, constants.EXPECTED_VFS) expected_calls = [ mock.call.getfsnap('*_%s' % constants.EXPECTED_SNAP_NAME, vfs=constants.EXPECTED_VFS, fpg=constants.EXPECTED_FPG, pat=True, fstore=constants.EXPECTED_FSTORE), mock.call.getfshare(constants.NFS_LOWER, fpg=constants.EXPECTED_FPG, vfs=constants.EXPECTED_VFS, fstore=constants.EXPECTED_FSTORE), mock.call.getfshare(constants.SMB_LOWER, fpg=constants.EXPECTED_FPG, vfs=constants.EXPECTED_VFS, fstore=constants.EXPECTED_FSTORE), mock.call.removefsnap(constants.EXPECTED_VFS, constants.EXPECTED_FSTORE, fpg=constants.EXPECTED_FPG, snapname=constants.EXPECTED_SNAP_NAME), mock.call.startfsnapclean(constants.EXPECTED_FPG, reclaimStrategy='maxspeed'), ] self.mock_client.assert_has_calls(expected_calls) self.assertTrue(mock_log.debug.called) self.assertTrue(mock_log.exception.called) @ddt.data('volname.1', ['volname.2', 'volname.3']) def test_mediator_get_fpg_status(self, volume_name_or_list): """Mediator converts client stats to capacity result.""" expected_capacity = constants.EXPECTED_SIZE_2 expected_free = constants.EXPECTED_SIZE_1 self.init_mediator() self.mock_client.getfpg.return_value = { 'total': 1, 'members': [ { 'capacityKiB': str(expected_capacity * units.Mi), 'availCapacityKiB': str(expected_free * units.Mi), 'vvs': volume_name_or_list, } ], 'message': None, } self.mock_client.getfsquota.return_value = { 'total': 3, 'members': [ {'hardBlock': 1 * units.Ki}, {'hardBlock': 2 * units.Ki}, {'hardBlock': 3 * units.Ki}, ], 'message': None, } self.mock_client.getVolume.return_value = { 'provisioningType': hpe3parmediator.DEDUPE} expected_result = { 'pool_name': constants.EXPECTED_FPG, 'free_capacity_gb': expected_free, 'hpe3par_flash_cache': False, 'hp3par_flash_cache': False, 'dedupe': True, 'thin_provisioning': True, 'total_capacity_gb': expected_capacity, 'provisioned_capacity_gb': 6, } result = self.mediator.get_fpg_status(constants.EXPECTED_FPG) self.assertEqual(expected_result, result) expected_calls = [ mock.call.getfpg(constants.EXPECTED_FPG) ] self.mock_client.assert_has_calls(expected_calls) def test_mediator_get_fpg_status_exception(self): """Exception during get_fpg_status call to getfpg.""" self.init_mediator() self.mock_client.getfpg.side_effect = constants.FAKE_EXCEPTION self.assertRaises(exception.ShareBackendException, self.mediator.get_fpg_status, constants.EXPECTED_FPG) expected_calls = [mock.call.getfpg(constants.EXPECTED_FPG)] self.mock_client.assert_has_calls(expected_calls) def test_mediator_get_fpg_status_error(self): """Unexpected result from getfpg during get_fpg_status.""" self.init_mediator() self.mock_client.getfpg.return_value = {'total': 0} self.assertRaises(exception.ShareBackendException, self.mediator.get_fpg_status, constants.EXPECTED_FPG) expected_calls = [mock.call.getfpg(constants.EXPECTED_FPG)] self.mock_client.assert_has_calls(expected_calls) def test_mediator_get_fpg_status_bad_prov_type(self): """Test get_fpg_status handling of unexpected provisioning type.""" self.init_mediator() self.mock_client.getfpg.return_value = { 'total': 1, 'members': [ { 'capacityKiB': '1', 'availCapacityKiB': '1', 'vvs': 'foo', } ], 'message': None, } self.mock_client.getVolume.return_value = { 'provisioningType': 'BOGUS'} self.assertRaises(exception.ShareBackendException, self.mediator.get_fpg_status, constants.EXPECTED_FPG) expected_calls = [mock.call.getfpg(constants.EXPECTED_FPG)] self.mock_client.assert_has_calls(expected_calls) def test_mediator_get_provisioned_error(self): """Test error during get provisioned GB.""" self.init_mediator() error_return = {'message': 'Some error happened.'} self.mock_client.getfsquota.return_value = error_return self.assertRaises(exception.ShareBackendException, self.mediator.get_provisioned_gb, constants.EXPECTED_FPG) expected_calls = [mock.call.getfsquota(fpg=constants.EXPECTED_FPG)] self.mock_client.assert_has_calls(expected_calls) def test_mediator_get_provisioned_exception(self): """Test exception during get provisioned GB.""" self.init_mediator() self.mock_client.getfsquota.side_effect = constants.FAKE_EXCEPTION self.assertRaises(exception.ShareBackendException, self.mediator.get_provisioned_gb, constants.EXPECTED_FPG) expected_calls = [mock.call.getfsquota(fpg=constants.EXPECTED_FPG)] self.mock_client.assert_has_calls(expected_calls) def test_update_access_resync_rules_nfs(self): self.init_mediator() getfshare_result = { 'shareName': constants.EXPECTED_SHARE_NAME, 'fstoreName': constants.EXPECTED_FSTORE, 'clients': [constants.EXPECTED_IP_127], 'comment': constants.EXPECTED_COMMENT, } self.mock_client.getfshare.return_value = { 'total': 1, 'members': [getfshare_result], 'message': None, } self.mediator.update_access(constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.NFS, constants.EXPECTED_EXTRA_SPECS, [constants.ACCESS_RULE_NFS], None, None, constants.EXPECTED_FPG, constants.EXPECTED_VFS) expected_calls = [ mock.call.setfshare( constants.NFS_LOWER, constants.EXPECTED_VFS, constants.EXPECTED_SHARE_NAME, clientip='+' + constants.EXPECTED_IP_1234, fpg=constants.EXPECTED_FPG, fstore=constants.EXPECTED_FSTORE, comment=constants.EXPECTED_COMMENT), ] self.mock_client.assert_has_calls(expected_calls) def test_update_access_resync_rules_cifs(self): self.init_mediator() getfshare_result = { 'shareName': constants.EXPECTED_SHARE_NAME, 'fstoreName': constants.EXPECTED_FSTORE, 'allowPerm': [['foo_user', 'fullcontrol']], 'allowIP': '', 'comment': constants.EXPECTED_COMMENT, } self.mock_client.getfshare.return_value = { 'total': 1, 'members': [getfshare_result], 'message': None, } self.mediator.update_access(constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.CIFS, constants.EXPECTED_EXTRA_SPECS, [constants.ACCESS_RULE_CIFS], None, None, constants.EXPECTED_FPG, constants.EXPECTED_VFS) expected_calls = [ mock.call.setfshare( constants.SMB_LOWER, constants.EXPECTED_VFS, constants.EXPECTED_SHARE_NAME, allowperm='+' + constants.USERNAME + ':fullcontrol', fpg=constants.EXPECTED_FPG, fstore=constants.EXPECTED_FSTORE, comment=constants.EXPECTED_COMMENT), ] self.mock_client.assert_has_calls(expected_calls) def test_mediator_allow_ip_ro_access_cifs_error(self): self.init_mediator() self.assertRaises(exception.InvalidShareAccess, self.mediator.update_access, constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.CIFS, constants.EXPECTED_EXTRA_SPECS, [constants.ACCESS_RULE_NFS], [constants.ADD_RULE_IP_RO], [], constants.EXPECTED_FPG, constants.EXPECTED_VFS) @ddt.data(constants.CIFS, constants.NFS) def test_mediator_allow_rw_snapshot_error(self, proto): self.init_mediator() getfshare_result = { 'shareName': 'foo_ro_name', 'fstoreName': 'foo_fstore', 'comment': 'foo_comment', } path = 'foo/.snapshot/foo' if proto == constants.NFS: getfshare_result['sharePath'] = path else: getfshare_result['shareDir'] = path self.mock_client.getfshare.return_value = { 'total': 1, 'members': [getfshare_result], 'message': None, } self.assertRaises(exception.InvalidShareAccess, self.mediator.update_access, constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.CIFS, constants.EXPECTED_EXTRA_SPECS, [constants.ACCESS_RULE_NFS], [constants.ADD_RULE_IP], [], constants.EXPECTED_FPG, constants.EXPECTED_VFS) @ddt.data((constants.READ_WRITE, True), (constants.READ_WRITE, False), (constants.READ_ONLY, True), (constants.READ_ONLY, False)) @ddt.unpack def test_mediator_allow_user_access_cifs(self, access_level, use_other): """"Allow user access to cifs share.""" self.init_mediator() if use_other: # Don't find share until second attempt. findings = (None, self.mock_client.getfshare.return_value['members'][0]) mock_find_fshare = self.mock_object( self.mediator, '_find_fshare', mock.Mock(side_effect=findings)) if access_level == constants.READ_ONLY: expected_allowperm = '+%s:read' % constants.USERNAME else: expected_allowperm = '+%s:fullcontrol' % constants.USERNAME constants.ADD_RULE_USER['access_level'] = access_level self.mediator.update_access(constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.CIFS, constants.EXPECTED_EXTRA_SPECS, [constants.ACCESS_RULE_CIFS], [constants.ADD_RULE_USER], [], constants.EXPECTED_FPG, constants.EXPECTED_VFS) expected_calls = [ mock.call.setfshare(constants.SMB_LOWER, constants.EXPECTED_VFS, constants.EXPECTED_SHARE_ID, allowperm=expected_allowperm, comment=constants.EXPECTED_COMMENT, fpg=constants.EXPECTED_FPG, fstore=constants.EXPECTED_FSTORE) ] self.mock_client.assert_has_calls(expected_calls) if use_other: readonly = access_level == constants.READ_ONLY expected_find_calls = [ mock.call(constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.SMB_LOWER, constants.EXPECTED_FPG, constants.EXPECTED_VFS, readonly=readonly), mock.call(constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.SMB_LOWER, constants.EXPECTED_FPG, constants.EXPECTED_VFS, readonly=not readonly), ] mock_find_fshare.assert_has_calls(expected_find_calls) @ddt.data(constants.CIFS, constants.NFS) def test_mediator_deny_rw_snapshot_error(self, proto): self.init_mediator() getfshare_result = { 'shareName': 'foo_ro_name', 'fstoreName': 'foo_fstore', 'comment': 'foo_comment', } path = 'foo/.snapshot/foo' if proto == constants.NFS: getfshare_result['sharePath'] = path else: getfshare_result['shareDir'] = path self.mock_client.getfshare.return_value = { 'total': 1, 'members': [getfshare_result], 'message': None, } mock_log = self.mock_object(hpe3parmediator, 'LOG') self.mediator.update_access(constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, proto, constants.EXPECTED_EXTRA_SPECS, [constants.ACCESS_RULE_NFS], [], [constants.DELETE_RULE_IP], constants.EXPECTED_FPG, constants.EXPECTED_VFS) self.assertFalse(self.mock_client.setfshare.called) self.assertTrue(mock_log.error.called) def test_mediator_deny_user_access_cifs(self): """"Deny user access to cifs share.""" self.init_mediator() expected_denyperm = '-%s:fullcontrol' % constants.USERNAME self.mediator.update_access(constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.CIFS, constants.EXPECTED_EXTRA_SPECS, [constants.ACCESS_RULE_CIFS], [], [constants.DELETE_RULE_USER], constants.EXPECTED_FPG, constants.EXPECTED_VFS) expected_calls = [ mock.call.setfshare(constants.SMB_LOWER, constants.EXPECTED_VFS, constants.EXPECTED_SHARE_ID, allowperm=expected_denyperm, comment=constants.EXPECTED_COMMENT, fpg=constants.EXPECTED_FPG, fstore=constants.EXPECTED_FSTORE) ] self.mock_client.assert_has_calls(expected_calls) def test_mediator_allow_ip_access_cifs(self): """"Allow ip access to cifs share.""" self.init_mediator() expected_allowip = '+%s' % constants.EXPECTED_IP_1234 self.mediator.update_access(constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.CIFS, constants.EXPECTED_EXTRA_SPECS, [constants.ACCESS_RULE_NFS], [constants.ADD_RULE_IP], [], constants.EXPECTED_FPG, constants.EXPECTED_VFS) expected_calls = [ mock.call.setfshare(constants.SMB_LOWER, constants.EXPECTED_VFS, constants.EXPECTED_SHARE_ID, allowip=expected_allowip, comment=constants.EXPECTED_COMMENT, fpg=constants.EXPECTED_FPG, fstore=constants.EXPECTED_FSTORE) ] self.mock_client.assert_has_calls(expected_calls) def test_mediator_deny_ip_access_cifs(self): """"Deny ip access to cifs share.""" self.init_mediator() expected_denyip = '-%s' % constants.EXPECTED_IP_1234 self.mediator.update_access(constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.CIFS, constants.EXPECTED_EXTRA_SPECS, [constants.ACCESS_RULE_NFS], [], [constants.DELETE_RULE_IP], constants.EXPECTED_FPG, constants.EXPECTED_VFS) expected_calls = [ mock.call.setfshare(constants.SMB_LOWER, constants.EXPECTED_VFS, constants.EXPECTED_SHARE_ID, allowip=expected_denyip, comment=constants.EXPECTED_COMMENT, fpg=constants.EXPECTED_FPG, fstore=constants.EXPECTED_FSTORE) ] self.mock_client.assert_has_calls(expected_calls) def test_mediator_allow_ip_access_nfs(self): """"Allow ip access to nfs share.""" self.init_mediator() already_exists = (hpe3parmediator.IP_ALREADY_EXISTS % constants.EXPECTED_IP_1234) self.mock_client.setfshare.side_effect = ([], [already_exists]) expected_clientip = '+%s' % constants.EXPECTED_IP_1234 for _ in range(2): # Test 2nd allow w/ already exists message. self.mediator.update_access(constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.NFS, constants.EXPECTED_EXTRA_SPECS, [constants.ACCESS_RULE_NFS], [constants.ADD_RULE_IP], [], constants.EXPECTED_FPG, constants.EXPECTED_VFS) expected_calls = 2 * [ mock.call.setfshare(constants.NFS.lower(), constants.EXPECTED_VFS, constants.EXPECTED_SHARE_ID, clientip=expected_clientip, fpg=constants.EXPECTED_FPG, fstore=constants.EXPECTED_FSTORE, comment=constants.EXPECTED_COMMENT), ] self.mock_client.assert_has_calls(expected_calls, any_order=True) def test_mediator_deny_ip_access_nfs(self): """"Deny ip access to nfs share.""" self.init_mediator() expected_clientip = '-%s' % constants.EXPECTED_IP_1234 self.mediator.update_access(constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.NFS, constants.EXPECTED_EXTRA_SPECS, [constants.ACCESS_RULE_NFS], [], [constants.DELETE_RULE_IP], constants.EXPECTED_FPG, constants.EXPECTED_VFS) expected_calls = [ mock.call.setfshare(constants.NFS.lower(), constants.EXPECTED_VFS, constants.EXPECTED_SHARE_ID, clientip=expected_clientip, fpg=constants.EXPECTED_FPG, fstore=constants.EXPECTED_FSTORE, comment=constants.EXPECTED_COMMENT) ] self.mock_client.assert_has_calls(expected_calls) def test_mediator_deny_ip_ro_access_nfs_legacy(self): self.init_mediator() # Fail to find share with new naming. Succeed finding legacy naming. legacy = { 'shareName': 'foo_name', 'fstoreName': 'foo_fstore', 'comment': 'foo_comment', 'sharePath': 'foo/.snapshot/foo', } fshares = (None, legacy) mock_find_fshare = self.mock_object(self.mediator, '_find_fshare', mock.Mock(side_effect=fshares)) expected_clientip = '-%s' % constants.EXPECTED_IP_1234 self.mediator.update_access(constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.NFS, constants.EXPECTED_EXTRA_SPECS, [constants.ACCESS_RULE_NFS], [], [constants.DELETE_RULE_IP_RO], constants.EXPECTED_FPG, constants.EXPECTED_VFS) expected_calls = [ mock.call.setfshare(constants.NFS.lower(), constants.EXPECTED_VFS, legacy['shareName'], clientip=expected_clientip, fpg=constants.EXPECTED_FPG, fstore=legacy['fstoreName'], comment=legacy['comment']) ] self.mock_client.assert_has_calls(expected_calls) expected_find_fshare_calls = [ mock.call(constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.NFS_LOWER, constants.EXPECTED_FPG, constants.EXPECTED_VFS, readonly=True), mock.call(constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.NFS_LOWER, constants.EXPECTED_FPG, constants.EXPECTED_VFS, readonly=False), ] mock_find_fshare.assert_has_calls(expected_find_fshare_calls) def test_mediator_allow_user_access_nfs(self): """"Allow user access to nfs share is not supported.""" self.init_mediator() self.assertRaises(exception.HPE3ParInvalid, self.mediator.update_access, constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.NFS, constants.EXPECTED_EXTRA_SPECS, [constants.ACCESS_RULE_NFS], [constants.ADD_RULE_USER], [], constants.EXPECTED_FPG, constants.EXPECTED_VFS) def test_mediator_allow_access_bad_proto(self): """"Allow user access to unsupported protocol.""" self.init_mediator() self.assertRaises(exception.InvalidShareAccess, self.mediator.update_access, constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, 'unsupported_other_protocol', constants.EXPECTED_EXTRA_SPECS, [constants.ACCESS_RULE_NFS], [constants.ADD_RULE_IP], [], constants.EXPECTED_FPG, constants.EXPECTED_VFS) def test_mediator_allow_access_bad_type(self): """"Allow user access to unsupported access type.""" self.init_mediator() self.assertRaises(exception.InvalidInput, self.mediator.update_access, constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.CIFS, constants.EXPECTED_EXTRA_SPECS, [constants.ACCESS_RULE_NFS], [constants.ADD_RULE_BAD_TYPE], [], constants.EXPECTED_FPG, constants.EXPECTED_VFS) def test_mediator_allow_access_missing_nfs_share(self): self.init_mediator() mock_find_fshare = self.mock_object(self.mediator, '_find_fshare', mock.Mock(return_value=None)) self.assertRaises(exception.HPE3ParInvalid, self.mediator.update_access, constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.NFS, constants.EXPECTED_EXTRA_SPECS, [constants.ACCESS_RULE_NFS], [constants.ADD_RULE_IP], [], constants.EXPECTED_FPG, constants.EXPECTED_VFS) expected_calls = [ mock.call(constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.NFS_LOWER, constants.EXPECTED_FPG, constants.EXPECTED_VFS, readonly=False), mock.call(constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.NFS_LOWER, constants.EXPECTED_FPG, constants.EXPECTED_VFS, readonly=True), ] mock_find_fshare.assert_has_calls(expected_calls) def test_mediator_allow_nfs_ro_access(self): self.init_mediator() getfshare_result = { 'shareName': 'foo_ro_name', 'fstoreName': 'foo_fstore', 'shareDir': 'foo_dir', 'comment': 'foo_comment', } findings = (None, getfshare_result) mock_find_fshare = self.mock_object(self.mediator, '_find_fshare', mock.Mock(side_effect=findings)) self.mock_client.getfshare.return_value = { 'total': 1, 'members': [getfshare_result], 'message': None, } share_id = 'foo' self.mediator.update_access(constants.EXPECTED_PROJECT_ID, share_id, constants.NFS, constants.EXPECTED_EXTRA_SPECS, [constants.ACCESS_RULE_NFS], [constants.ADD_RULE_IP_RO], [], constants.EXPECTED_FPG, constants.EXPECTED_VFS) expected_calls = [ mock.call(constants.EXPECTED_PROJECT_ID, share_id, constants.NFS_LOWER, constants.EXPECTED_FPG, constants.EXPECTED_VFS, readonly=True), mock.call(constants.EXPECTED_PROJECT_ID, share_id, constants.NFS_LOWER, constants.EXPECTED_FPG, constants.EXPECTED_VFS, readonly=False), ] mock_find_fshare.assert_has_calls(expected_calls) ro_share = 'osf-ro-%s' % share_id expected_calls = [ mock.call.createfshare(constants.NFS_LOWER, constants.EXPECTED_VFS, ro_share, clientip=constants.EXPECTED_IP_127_2, comment=getfshare_result['comment'], fpg=constants.EXPECTED_FPG, fstore=getfshare_result['fstoreName'], options='ro,no_root_squash,insecure', sharedir=getfshare_result['shareDir']), mock.call.getfshare(constants.NFS_LOWER, ro_share, fstore=getfshare_result['fstoreName'], fpg=constants.EXPECTED_FPG, vfs=constants.EXPECTED_VFS), mock.call.setfshare(constants.NFS_LOWER, constants.EXPECTED_VFS, getfshare_result['shareName'], clientip='+%s' % constants.EXPECTED_IP_1234, comment=getfshare_result['comment'], fpg=constants.EXPECTED_FPG, fstore=getfshare_result['fstoreName']), ] self.mock_client.assert_has_calls(expected_calls) def test_mediator_deny_access_missing_nfs_share(self): self.init_mediator() mock_find_fshare = self.mock_object(self.mediator, '_find_fshare', mock.Mock(return_value=None)) self.mediator.update_access(constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.NFS, constants.EXPECTED_EXTRA_SPECS, [constants.ACCESS_RULE_NFS], [], [constants.DELETE_RULE_IP], constants.EXPECTED_FPG, constants.EXPECTED_VFS) expected_calls = [ mock.call(constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.NFS_LOWER, constants.EXPECTED_FPG, constants.EXPECTED_VFS, readonly=False), ] mock_find_fshare.assert_has_calls(expected_calls) @ddt.data((hpe3parmediator.ALLOW, 'ip', True, ['IP address foo already exists']), (hpe3parmediator.ALLOW, 'ip', False, ['Another share already exists for this path and client']), (hpe3parmediator.ALLOW, 'user', True, ['"allow" permission already exists for "foo"']), (hpe3parmediator.DENY, 'ip', True, ['foo does not exist, cannot be removed']), (hpe3parmediator.DENY, 'user', True, ['foo:fullcontrol" does not exist, cannot delete it.']), (hpe3parmediator.DENY, 'user', False, ['SMB share osf-foo does not exist']), (hpe3parmediator.ALLOW, 'ip', True, ['\r']), (hpe3parmediator.ALLOW, 'user', True, ['\r']), (hpe3parmediator.DENY, 'ip', True, ['\r']), (hpe3parmediator.DENY, 'user', True, ['\r']), (hpe3parmediator.ALLOW, 'ip', True, []), (hpe3parmediator.ALLOW, 'user', True, []), (hpe3parmediator.DENY, 'ip', True, []), (hpe3parmediator.DENY, 'user', True, [])) @ddt.unpack def test_ignore_benign_access_results(self, access, access_type, expect_false, results): returned = self.mediator.ignore_benign_access_results( access, access_type, 'foo', results) if expect_false: self.assertFalse(returned) else: self.assertEqual(results, returned) @ddt.data((2, 1, True), (2, 1, False), (1, 2, True), (1, 2, False), (1024, 2048, True), (1024, 2048, False), (2048, 1024, True), (2048, 1024, False), (99999999, 1, True), (99999999, 1, False), (1, 99999999, True), (1, 99999999, False), ) @ddt.unpack def test_mediator_resize_share(self, new_size, old_size, fstore_per_share): self.init_mediator() fstore = 'foo_fstore' mock_find_fstore = self.mock_object(self.mediator, '_find_fstore', mock.Mock(return_value=fstore)) fstore_init_size = int( constants.GET_FSQUOTA['members'][0]['hardBlock']) self.mediator.hpe3par_fstore_per_share = fstore_per_share if fstore_per_share: expected_capacity = new_size * units.Ki else: expected_capacity = ( (new_size - old_size) * units.Ki + fstore_init_size) self.mediator.resize_share( constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.NFS, new_size, old_size, constants.EXPECTED_FPG, constants.EXPECTED_VFS) mock_find_fstore.assert_called_with(constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.NFS, constants.EXPECTED_FPG, constants.EXPECTED_VFS, allow_cross_protocol=False) self.mock_client.setfsquota.assert_called_with( constants.EXPECTED_VFS, fpg=constants.EXPECTED_FPG, fstore=fstore, scapacity=str(expected_capacity), hcapacity=str(expected_capacity)) @ddt.data(['This is a fake setfsquota returned error'], Exception('boom')) def test_mediator_resize_share_setfsquota_side_effects(self, side_effect): self.init_mediator() fstore_init_size = int( constants.GET_FSQUOTA['members'][0]['hardBlock']) fstore = 'foo_fstore' new_size = 2 old_size = 1 expected_capacity = (new_size - old_size) * units.Ki + fstore_init_size mock_find_fstore = self.mock_object(self.mediator, '_find_fstore', mock.Mock(return_value=fstore)) self.mock_client.setfsquota.side_effect = side_effect self.assertRaises(exception.ShareBackendException, self.mediator.resize_share, constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.NFS, new_size, old_size, constants.EXPECTED_FPG, constants.EXPECTED_VFS) mock_find_fstore.assert_called_with(constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.NFS, constants.EXPECTED_FPG, constants.EXPECTED_VFS, allow_cross_protocol=False) self.mock_client.setfsquota.assert_called_with( constants.EXPECTED_VFS, fpg=constants.EXPECTED_FPG, fstore=fstore, scapacity=str(expected_capacity), hcapacity=str(expected_capacity)) def test_mediator_resize_share_not_found(self): self.init_mediator() mock_find_fshare = self.mock_object(self.mediator, '_find_fshare', mock.Mock(return_value=None)) self.assertRaises(exception.InvalidShare, self.mediator.resize_share, constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.NFS, 999, 99, constants.EXPECTED_FPG, constants.EXPECTED_VFS) mock_find_fshare.assert_called_with(constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.NFS, constants.EXPECTED_FPG, constants.EXPECTED_VFS, allow_cross_protocol=False) @ddt.data((('nfs', 'NFS', 'nFs'), 'smb'), (('smb', 'SMB', 'SmB', 'CIFS', 'cifs', 'CiFs'), 'nfs')) @ddt.unpack def test_other_protocol(self, protocols, expected_other): for protocol in protocols: self.assertEqual(expected_other, hpe3parmediator.HPE3ParMediator().other_protocol( protocol)) @ddt.data('', 'bogus') def test_other_protocol_exception(self, protocol): self.assertRaises(exception.InvalidShareAccess, hpe3parmediator.HPE3ParMediator().other_protocol, protocol) @ddt.data(('osf-uid', None, None, 'osf-uid'), ('uid', None, True, 'osf-ro-uid'), ('uid', None, False, 'osf-uid'), ('uid', 'smb', True, 'osf-smb-ro-uid'), ('uid', 'smb', False, 'osf-smb-uid'), ('uid', 'nfs', True, 'osf-nfs-ro-uid'), ('uid', 'nfs', False, 'osf-nfs-uid')) @ddt.unpack def test_ensure_prefix(self, uid, protocol, readonly, expected): self.assertEqual(expected, hpe3parmediator.HPE3ParMediator().ensure_prefix( uid, protocol=protocol, readonly=readonly)) def test_find_fstore_search(self): self.init_mediator() mock_find_fshare = self.mock_object(self.mediator, '_find_fshare', mock.Mock(return_value=None)) result = self.mediator._find_fstore(constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.NFS, constants.EXPECTED_FPG, constants.EXPECTED_VFS) mock_find_fshare.assert_called_once_with(constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.NFS, constants.EXPECTED_FPG, constants.EXPECTED_VFS, allow_cross_protocol=False) self.assertIsNone(result) def test_find_fstore_search_xproto(self): self.init_mediator() mock_find_fshare = self.mock_object(self.mediator, '_find_fshare_with_proto', mock.Mock(return_value=None)) result = self.mediator._find_fstore(constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.NFS, constants.EXPECTED_FPG, constants.EXPECTED_VFS, allow_cross_protocol=True) expected_calls = [ mock.call(constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.NFS, constants.EXPECTED_FPG, constants.EXPECTED_VFS, readonly=False), mock.call(constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.SMB_LOWER, constants.EXPECTED_FPG, constants.EXPECTED_VFS, readonly=False), ] mock_find_fshare.assert_has_calls(expected_calls) self.assertIsNone(result) def test_find_fshare_search(self): self.init_mediator() self.mock_client.getfshare.return_value = {} result = self.mediator._find_fshare(constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.NFS, constants.EXPECTED_FPG, constants.EXPECTED_VFS) expected_calls = [ mock.call.getfshare(constants.NFS_LOWER, constants.EXPECTED_SHARE_ID, fpg=constants.EXPECTED_FPG, vfs=constants.EXPECTED_VFS, fstore=constants.EXPECTED_PROJECT_ID), mock.call.getfshare(constants.NFS_LOWER, constants.EXPECTED_SHARE_ID, fpg=constants.EXPECTED_FPG, vfs=constants.EXPECTED_VFS, fstore=constants.EXPECTED_SHARE_ID), mock.call.getfshare(constants.NFS_LOWER, constants.EXPECTED_SHARE_ID, fpg=constants.EXPECTED_FPG), mock.call.getfshare(constants.NFS_LOWER, constants.EXPECTED_SHARE_ID), ] self.mock_client.assert_has_calls(expected_calls) self.assertIsNone(result) def test_find_fshare_exception(self): self.init_mediator() self.mock_client.getfshare.side_effect = Exception('test unexpected') self.assertRaises(exception.ShareBackendException, self.mediator._find_fshare, constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.NFS, constants.EXPECTED_FPG, constants.EXPECTED_VFS) self.mock_client.getfshare.assert_called_once_with( constants.NFS_LOWER, constants.EXPECTED_SHARE_ID, fpg=constants.EXPECTED_FPG, vfs=constants.EXPECTED_VFS, fstore=constants.EXPECTED_PROJECT_ID) def test_find_fshare_hit(self): self.init_mediator() expected_result = {'shareName': 'hit'} self.mock_client.getfshare.return_value = { 'total': 1, 'members': [expected_result] } result = self.mediator._find_fshare(constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.NFS, constants.EXPECTED_FPG, constants.EXPECTED_VFS) self.mock_client.getfshare.assert_called_once_with( constants.NFS_LOWER, constants.EXPECTED_SHARE_ID, fpg=constants.EXPECTED_FPG, vfs=constants.EXPECTED_VFS, fstore=constants.EXPECTED_PROJECT_ID), self.assertEqual(expected_result, result) def test_find_fsnap_search(self): self.init_mediator() self.mock_client.getfsnap.return_value = {} result = self.mediator._find_fsnap(constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.NFS, constants.EXPECTED_SNAP_ID, constants.EXPECTED_FPG, constants.EXPECTED_VFS) expected_snap_pattern = '*_%s' % constants.EXPECTED_SNAP_ID expected_calls = [ mock.call.getfsnap(expected_snap_pattern, vfs=constants.EXPECTED_VFS, fpg=constants.EXPECTED_FPG, pat=True, fstore=constants.EXPECTED_PROJECT_ID), mock.call.getfsnap(expected_snap_pattern, vfs=constants.EXPECTED_VFS, fpg=constants.EXPECTED_FPG, pat=True, fstore=constants.EXPECTED_SHARE_ID), mock.call.getfsnap(expected_snap_pattern, fpg=constants.EXPECTED_FPG, pat=True), mock.call.getfsnap(expected_snap_pattern, pat=True), ] self.mock_client.assert_has_calls(expected_calls) self.assertIsNone(result) def test_find_fsnap_exception(self): self.init_mediator() self.mock_client.getfsnap.side_effect = Exception('test unexpected') self.assertRaises(exception.ShareBackendException, self.mediator._find_fsnap, constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.NFS, constants.EXPECTED_SNAP_ID, constants.EXPECTED_FPG, constants.EXPECTED_VFS) expected_snap_pattern = '*_%s' % constants.EXPECTED_SNAP_ID self.mock_client.getfsnap.assert_called_once_with( expected_snap_pattern, vfs=constants.EXPECTED_VFS, fpg=constants.EXPECTED_FPG, pat=True, fstore=constants.EXPECTED_PROJECT_ID) def test_find_fsnap_hit(self): self.init_mediator() expected_result = {'snapName': 'hit'} self.mock_client.getfsnap.return_value = { 'total': 1, 'members': [expected_result] } result = self.mediator._find_fsnap(constants.EXPECTED_PROJECT_ID, constants.EXPECTED_SHARE_ID, constants.NFS, constants.EXPECTED_SNAP_ID, constants.EXPECTED_FPG, constants.EXPECTED_VFS) expected_snap_pattern = '*_%s' % constants.EXPECTED_SNAP_ID self.mock_client.getfsnap.assert_called_once_with( expected_snap_pattern, vfs=constants.EXPECTED_VFS, fpg=constants.EXPECTED_FPG, pat=True, fstore=constants.EXPECTED_PROJECT_ID) self.assertEqual(expected_result, result) def test_fsip_exists(self): self.init_mediator() # Make the result member a superset of the fsip items. fsip_plus = constants.EXPECTED_FSIP.copy() fsip_plus.update({'k': 'v', 'k2': 'v2'}) self.mock_client.getfsip.return_value = { 'total': 3, 'members': [{'bogus1': 1}, fsip_plus, {'bogus2': '2'}] } self.assertTrue(self.mediator.fsip_exists(constants.EXPECTED_FSIP)) self.mock_client.getfsip.assert_called_once_with( constants.EXPECTED_VFS, fpg=constants.EXPECTED_FPG) def test_fsip_does_not_exist(self): self.init_mediator() self.mock_client.getfsip.return_value = { 'total': 3, 'members': [{'bogus1': 1}, constants.OTHER_FSIP, {'bogus2': '2'}] } self.assertFalse(self.mediator.fsip_exists(constants.EXPECTED_FSIP)) self.mock_client.getfsip.assert_called_once_with( constants.EXPECTED_VFS, fpg=constants.EXPECTED_FPG) def test_fsip_exists_exception(self): self.init_mediator() class FakeException(Exception): pass self.mock_client.getfsip.side_effect = FakeException() self.assertRaises(exception.ShareBackendException, self.mediator.fsip_exists, constants.EXPECTED_FSIP) self.mock_client.getfsip.assert_called_once_with( constants.EXPECTED_VFS, fpg=constants.EXPECTED_FPG) def test_create_fsip_success(self): self.init_mediator() # Make the result member a superset of the fsip items. fsip_plus = constants.EXPECTED_FSIP.copy() fsip_plus.update({'k': 'v', 'k2': 'v2'}) self.mock_client.getfsip.return_value = { 'total': 3, 'members': [{'bogus1': 1}, fsip_plus, {'bogus2': '2'}] } self.mediator.create_fsip(constants.EXPECTED_IP_1234, constants.EXPECTED_SUBNET, constants.EXPECTED_VLAN_TAG, constants.EXPECTED_FPG, constants.EXPECTED_VFS) self.mock_client.getfsip.assert_called_once_with( constants.EXPECTED_VFS, fpg=constants.EXPECTED_FPG) expected_calls = [ mock.call.createfsip(constants.EXPECTED_IP_1234, constants.EXPECTED_SUBNET, constants.EXPECTED_VFS, fpg=constants.EXPECTED_FPG, vlantag=constants.EXPECTED_VLAN_TAG), mock.call.getfsip(constants.EXPECTED_VFS, fpg=constants.EXPECTED_FPG), ] self.mock_client.assert_has_calls(expected_calls) def test_create_fsip_exception(self): self.init_mediator() class FakeException(Exception): pass self.mock_client.createfsip.side_effect = FakeException() self.assertRaises(exception.ShareBackendException, self.mediator.create_fsip, constants.EXPECTED_IP_1234, constants.EXPECTED_SUBNET, constants.EXPECTED_VLAN_TAG, constants.EXPECTED_FPG, constants.EXPECTED_VFS) self.mock_client.createfsip.assert_called_once_with( constants.EXPECTED_IP_1234, constants.EXPECTED_SUBNET, constants.EXPECTED_VFS, fpg=constants.EXPECTED_FPG, vlantag=constants.EXPECTED_VLAN_TAG) def test_create_fsip_get_none(self): self.init_mediator() self.mock_client.getfsip.return_value = {'members': []} self.assertRaises(exception.ShareBackendException, self.mediator.create_fsip, constants.EXPECTED_IP_1234, constants.EXPECTED_SUBNET, constants.EXPECTED_VLAN_TAG, constants.EXPECTED_FPG, constants.EXPECTED_VFS) expected_calls = [ mock.call.createfsip(constants.EXPECTED_IP_1234, constants.EXPECTED_SUBNET, constants.EXPECTED_VFS, fpg=constants.EXPECTED_FPG, vlantag=constants.EXPECTED_VLAN_TAG), mock.call.getfsip(constants.EXPECTED_VFS, fpg=constants.EXPECTED_FPG), ] self.mock_client.assert_has_calls(expected_calls) def test_remove_fsip_success(self): self.init_mediator() self.mock_client.getfsip.return_value = { 'members': [constants.OTHER_FSIP] } self.mediator.remove_fsip(constants.EXPECTED_IP_1234, constants.EXPECTED_FPG, constants.EXPECTED_VFS) expected_calls = [ mock.call.removefsip(constants.EXPECTED_VFS, constants.EXPECTED_IP_1234, fpg=constants.EXPECTED_FPG), mock.call.getfsip(constants.EXPECTED_VFS, fpg=constants.EXPECTED_FPG), ] self.mock_client.assert_has_calls(expected_calls) @ddt.data(('ip', None), ('ip', ''), (None, 'vfs'), ('', 'vfs'), (None, None), ('', '')) @ddt.unpack def test_remove_fsip_without_ip_or_vfs(self, ip, vfs): self.init_mediator() self.mediator.remove_fsip(ip, constants.EXPECTED_FPG, vfs) self.assertFalse(self.mock_client.removefsip.called) def test_remove_fsip_not_gone(self): self.init_mediator() self.mock_client.getfsip.return_value = { 'members': [constants.EXPECTED_FSIP] } self.assertRaises(exception.ShareBackendException, self.mediator.remove_fsip, constants.EXPECTED_IP_1234, constants.EXPECTED_FPG, constants.EXPECTED_VFS) expected_calls = [ mock.call.removefsip(constants.EXPECTED_VFS, constants.EXPECTED_IP_1234, fpg=constants.EXPECTED_FPG), mock.call.getfsip(constants.EXPECTED_VFS, fpg=constants.EXPECTED_FPG), ] self.mock_client.assert_has_calls(expected_calls) def test_remove_fsip_exception(self): self.init_mediator() class FakeException(Exception): pass self.mock_client.removefsip.side_effect = FakeException() self.assertRaises(exception.ShareBackendException, self.mediator.remove_fsip, constants.EXPECTED_IP_1234, constants.EXPECTED_FPG, constants.EXPECTED_VFS) self.mock_client.removefsip.assert_called_once_with( constants.EXPECTED_VFS, constants.EXPECTED_IP_1234, fpg=constants.EXPECTED_FPG) def test__create_mount_directory(self): self.init_mediator() mount_location = '/mnt/foo' self.mediator._create_mount_directory(mount_location) utils.execute.assert_called_with('mkdir', mount_location, run_as_root=True) def test__create_mount_directory_error(self): self.init_mediator() self.mock_object(utils, 'execute', mock.Mock(side_effect=Exception('mkdir error.'))) mock_log = self.mock_object(hpe3parmediator, 'LOG') mount_location = '/mnt/foo' self.mediator._create_mount_directory(mount_location) utils.execute.assert_called_with('mkdir', mount_location, run_as_root=True) # Warning is logged (no exception thrown). self.assertTrue(mock_log.warning.called) def test__mount_super_share(self): self.init_mediator() # Test mounting NFS share. protocol = 'nfs' mount_location = '/mnt/foo' fpg = 'foo-fpg' vfs = 'bar-vfs' fstore = 'fstore' mount_path = '%s:/%s/%s/%s/' % (constants.EXPECTED_IP_10203040, fpg, vfs, fstore) self.mediator._mount_super_share(protocol, mount_location, fpg, vfs, fstore, constants.EXPECTED_IP_10203040) utils.execute.assert_called_with('mount', '-t', protocol, mount_path, mount_location, run_as_root=True) # Test mounting CIFS share. protocol = 'smb' mount_path = '//%s/%s/' % (constants.EXPECTED_IP_10203040, constants.EXPECTED_SUPER_SHARE) user = 'username=%s,password=%s,domain=%s' % ( constants.USERNAME, constants.PASSWORD, constants.EXPECTED_CIFS_DOMAIN) self.mediator._mount_super_share(protocol, mount_location, fpg, vfs, fstore, constants.EXPECTED_IP_10203040) utils.execute.assert_called_with('mount', '-t', 'cifs', mount_path, mount_location, '-o', user, run_as_root=True) def test__mount_super_share_error(self): self.init_mediator() self.mock_object(utils, 'execute', mock.Mock(side_effect=Exception('mount error.'))) mock_log = self.mock_object(hpe3parmediator, 'LOG') protocol = 'nfs' mount_location = '/mnt/foo' fpg = 'foo-fpg' vfs = 'bar-vfs' fstore = 'fstore' self.mediator._mount_super_share(protocol, mount_location, fpg, vfs, fstore, constants.EXPECTED_IP_10203040) # Warning is logged (no exception thrown). self.assertTrue(mock_log.warning.called) def test__delete_share_directory(self): self.init_mediator() mount_location = '/mnt/foo' self.mediator._delete_share_directory(mount_location) utils.execute.assert_called_with('rm', '-rf', mount_location, run_as_root=True) def test__delete_share_directory_error(self): self.init_mediator() self.mock_object(utils, 'execute', mock.Mock(side_effect=Exception('rm error.'))) mock_log = self.mock_object(hpe3parmediator, 'LOG') mount_location = '/mnt/foo' self.mediator._delete_share_directory(mount_location) # Warning is logged (no exception thrown). self.assertTrue(mock_log.warning.called) def test__unmount_share(self): self.init_mediator() mount_dir = '/mnt/foo' self.mediator._unmount_share(mount_dir) utils.execute.assert_called_with('umount', mount_dir, run_as_root=True) def test__unmount_share_error(self): self.init_mediator() self.mock_object(utils, 'execute', mock.Mock(side_effect=Exception('umount error.'))) mock_log = self.mock_object(hpe3parmediator, 'LOG') mount_dir = '/mnt/foo' self.mediator._unmount_share(mount_dir) # Warning is logged (no exception thrown). self.assertTrue(mock_log.warning.called) def test__delete_file_tree_no_config_options(self): self.init_mediator() mock_log = self.mock_object(hpe3parmediator, 'LOG') self.mediator.hpe3par_cifs_admin_access_username = None self.mediator._delete_file_tree( constants.EXPECTED_SHARE_ID, constants.SMB_LOWER, constants.EXPECTED_FPG, constants.EXPECTED_VFS, constants.EXPECTED_FSTORE, constants.EXPECTED_SHARE_IP) # Warning is logged (no exception thrown). self.assertTrue(mock_log.warning.called) def test__create_super_share_createfshare_exception(self): self.init_mediator() self.mock_client.createfshare.side_effect = ( Exception("createfshare error.")) self.assertRaises( exception.ShareBackendException, self.mediator._create_super_share, constants.NFS_LOWER, constants.EXPECTED_FPG, constants.EXPECTED_VFS, constants.EXPECTED_FSTORE) def test__create_super_share_setfshare_exception(self): self.init_mediator() self.mock_client.setfshare.side_effect = ( Exception("setfshare error.")) self.assertRaises( exception.ShareBackendException, self.mediator._create_super_share, constants.SMB_LOWER, constants.EXPECTED_FPG, constants.EXPECTED_VFS, constants.EXPECTED_FSTORE) def test__revoke_admin_smb_access_error(self): self.init_mediator() self.mock_client.setfshare.side_effect = ( Exception("setfshare error")) self.assertRaises( exception.ShareBackendException, self.mediator._revoke_admin_smb_access, constants.SMB_LOWER, constants.EXPECTED_FPG, constants.EXPECTED_VFS, constants.EXPECTED_FSTORE, constants.EXPECTED_COMMENT) def test_build_export_locations_bad_protocol(self): self.assertRaises(exception.InvalidShareAccess, self.mediator.build_export_locations, "BOGUS", [constants.EXPECTED_IP_1234], constants.EXPECTED_SHARE_PATH) def test_build_export_locations_bad_ip(self): self.assertRaises(exception.InvalidInput, self.mediator.build_export_locations, constants.NFS, None, None) def test_build_export_locations_bad_path(self): self.assertRaises(exception.InvalidInput, self.mediator.build_export_locations, constants.NFS, [constants.EXPECTED_IP_1234], None) class OptionMatcher(object): """Options string order can vary. Compare as lists.""" def __init__(self, assert_func, expected_string): self.assert_func = assert_func self.expected = expected_string.split(',') def __eq__(self, actual_string): actual = actual_string.split(',') self.assert_func(sorted(self.expected), sorted(actual)) return True ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315602.0216703 manila-21.0.0/manila/tests/share/drivers/huawei/0000775000175000017500000000000000000000000021523 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/huawei/__init__.py0000664000175000017500000000000000000000000023622 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/huawei/test_huawei_nas.py0000664000175000017500000057710100000000000025272 0ustar00zuulzuul00000000000000# Copyright (c) 2014 Huawei Technologies Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Unit tests for the Huawei nas driver module.""" import os import requests import shutil import tempfile import time from unittest import mock import xml.dom.minidom import ddt from defusedxml import ElementTree as ET from oslo_serialization import jsonutils from manila.common import constants as common_constants from manila import context from manila.data import utils as data_utils from manila import db from manila import exception from manila import rpc from manila.share import configuration as conf from manila.share.drivers.huawei import constants from manila.share.drivers.huawei import huawei_nas from manila.share.drivers.huawei.v3 import connection from manila.share.drivers.huawei.v3 import helper from manila.share.drivers.huawei.v3 import replication from manila.share.drivers.huawei.v3 import rpcapi from manila.share.drivers.huawei.v3 import smartx from manila import test from manila import utils def fake_sleep(time): pass def data_session(url): if url == "/xx/sessions": data = """{"error":{"code":0}, "data":{"username":"admin", "iBaseToken":"2001031430", "deviceid":"210235G7J20000000000"}}""" if url == "sessions": data = '{"error":{"code":0},"data":{"ID":11}}' return data def filesystem(method, data, fs_status_flag): extend_share_flag = False shrink_share_flag = False if method == "PUT": if data == """{"CAPACITY": 10485760}""": data = """{"error":{"code":0}, "data":{"ID":"4", "CAPACITY":"8388608"}}""" extend_share_flag = True elif data == """{"CAPACITY": 2097152}""": data = """{"error":{"code":0}, "data":{"ID":"4", "CAPACITY":"2097152"}}""" shrink_share_flag = True elif data == """{"NAME": "share_fake_manage_uuid"}""": data = """{"error":{"code":0}, "data":{"ID":"4", "CAPACITY":"8388608"}}""" elif data == jsonutils.dumps({"ENABLEDEDUP": True, "ENABLECOMPRESSION": True}): data = """{"error":{"code":0}, "data":{"ID":"4", "CAPACITY":"8388608"}}""" elif data == jsonutils.dumps({"ENABLEDEDUP": False, "ENABLECOMPRESSION": False}): data = """{"error":{"code":0}, "data":{"ID":"4", "CAPACITY":"8388608"}}""" elif data == """{"IOPRIORITY": "3"}""": data = """{"error":{"code":0}}""" elif method == "DELETE": data = """{"error":{"code":0}}""" elif method == "GET": if fs_status_flag: data = """{"error":{"code":0}, "data":{"HEALTHSTATUS":"1", "RUNNINGSTATUS":"27", "ALLOCTYPE":"1", "CAPACITY":"8388608", "PARENTNAME":"OpenStack_Pool", "ENABLECOMPRESSION":"false", "ENABLEDEDUP":"false", "CACHEPARTITIONID":"", "SMARTCACHEPARTITIONID":"", "IOCLASSID":"11"}}""" else: data = """{"error":{"code":0}, "data":{"HEALTHSTATUS":"0", "RUNNINGSTATUS":"27", "ALLOCTYPE":"0", "CAPACITY":"8388608", "PARENTNAME":"OpenStack_Pool", "ENABLECOMPRESSION":"false", "ENABLEDEDUP":"false", "CACHEPARTITIONID":"", "SMARTCACHEPARTITIONID":"", "IOCLASSID":"11"}}""" else: data = '{"error":{"code":31755596}}' return (data, extend_share_flag, shrink_share_flag) def filesystem_thick(method, data, fs_status_flag): extend_share_flag = False shrink_share_flag = False if method == "PUT": if data == """{"CAPACITY": 10485760}""": data = """{"error":{"code":0}, "data":{"ID":"5", "CAPACITY":"8388608"}}""" extend_share_flag = True elif data == """{"CAPACITY": 2097152}""": data = """{"error":{"code":0}, "data":{"ID":"5", "CAPACITY":"2097152"}}""" shrink_share_flag = True elif data == """{"NAME": "share_fake_uuid_thickfs"}""": data = """{"error":{"code":0}, "data":{"ID":"5", "CAPACITY":"8388608"}}""" elif data == jsonutils.dumps({"ENABLEDEDUP": False, "ENABLECOMPRESSION": False}): data = """{"error":{"code":0}, "data":{"ID":"5", "CAPACITY":"8388608"}}""" elif method == "DELETE": data = """{"error":{"code":0}}""" elif method == "GET": if fs_status_flag: data = """{"error":{"code":0}, "data":{"HEALTHSTATUS":"1", "RUNNINGSTATUS":"27", "ALLOCTYPE":"0", "CAPACITY":"8388608", "PARENTNAME":"OpenStack_Pool_Thick", "ENABLECOMPRESSION":"false", "ENABLEDEDUP":"false", "CACHEPARTITIONID":"", "SMARTCACHEPARTITIONID":"", "IOCLASSID":"11"}}""" else: data = """{"error":{"code":0}, "data":{"HEALTHSTATUS":"0", "RUNNINGSTATUS":"27", "ALLOCTYPE":"0", "CAPACITY":"8388608", "PARENTNAME":"OpenStack_Pool_Thick", "ENABLECOMPRESSION":"false", "ENABLEDEDUP":"false", "CACHEPARTITIONID":"", "SMARTCACHEPARTITIONID":"", "IOCLASSID":"11"}}""" else: data = '{"error":{"code":31755596}}' return (data, extend_share_flag, shrink_share_flag) def filesystem_inpartition(method, data, fs_status_flag): extend_share_flag = False shrink_share_flag = False if method == "PUT": if data == """{"CAPACITY": 10485760}""": data = """{"error":{"code":0}, "data":{"ID":"6", "CAPACITY":"8388608"}}""" extend_share_flag = True elif data == """{"CAPACITY": 2097152}""": data = """{"error":{"code":0}, "data":{"ID":"6", "CAPACITY":"2097152"}}""" shrink_share_flag = True elif data == """{"NAME": "share_fake_manage_uuid"}""": data = """{"error":{"code":0}, "data":{"ID":"6", "CAPACITY":"8388608"}}""" elif data == """{"NAME": "share_fake_uuid_inpartition"}""": data = """{"error":{"code":0}, "data":{"ID":"6", "CAPACITY":"8388608"}}""" elif data == jsonutils.dumps({"ENABLEDEDUP": True, "ENABLECOMPRESSION": True}): data = """{"error":{"code":0}, "data":{"ID":"6", "CAPACITY":"8388608"}}""" elif data == jsonutils.dumps({"ENABLEDEDUP": False, "ENABLECOMPRESSION": False}): data = """{"error":{"code":0}, "data":{"ID":"6", "CAPACITY":"8388608"}}""" elif method == "DELETE": data = """{"error":{"code":0}}""" elif method == "GET": if fs_status_flag: data = """{"error":{"code":0}, "data":{"HEALTHSTATUS":"1", "RUNNINGSTATUS":"27", "ALLOCTYPE":"1", "CAPACITY":"8388608", "PARENTNAME":"OpenStack_Pool", "ENABLECOMPRESSION":"false", "ENABLEDEDUP":"false", "CACHEPARTITIONID":"1", "SMARTCACHEPARTITIONID":"1", "IOCLASSID":"11"}}""" else: data = """{"error":{"code":0}, "data":{"HEALTHSTATUS":"0", "RUNNINGSTATUS":"27", "ALLOCTYPE":"0", "CAPACITY":"8388608", "PARENTNAME":"OpenStack_Pool", "ENABLECOMPRESSION":"false", "ENABLEDEDUP":"false", "CACHEPARTITIONID":"1", "SMARTCACHEPARTITIONID":"1", "IOCLASSID":"11"}}""" else: data = '{"error":{"code":31755596}}' return (data, extend_share_flag, shrink_share_flag) def allow_access(type, method, data): allow_ro_flag = False allow_rw_flag = False request_data = jsonutils.loads(data) success_data = """{"error":{"code":0}}""" fail_data = """{"error":{"code":1077939723}}""" ret = None if type == "NFS": if request_data['ACCESSVAL'] == '0': allow_ro_flag = True ret = success_data elif request_data['ACCESSVAL'] == '1': allow_rw_flag = True ret = success_data elif type == "CIFS": if request_data['PERMISSION'] == '0': allow_ro_flag = True ret = success_data elif request_data['PERMISSION'] == '1': allow_rw_flag = True ret = success_data # Group name should start with '@'. if ('group' in request_data['NAME'] and not request_data['NAME'].startswith('@')): ret = fail_data if ret is None: ret = fail_data return (ret, allow_ro_flag, allow_rw_flag) def dec_driver_handles_share_servers(func): def wrapper(*args, **kw): self = args[0] self.configuration.driver_handles_share_servers = True self.recreate_fake_conf_file(logical_port='CTE0.A.H0') self.driver.plugin.configuration.manila_huawei_conf_file = ( self.fake_conf_file) self.driver.plugin.helper.login() return func(*args, **kw) return wrapper def QoS_response(method): if method == "GET": data = """{"error":{"code":0}, "data":{"NAME": "OpenStack_Fake_QoS", "MAXIOPS": "100", "FSLIST": "4", "LUNLIST": "", "RUNNINGSTATUS": "2"}}""" elif method == "PUT": data = """{"error":{"code":0}}""" else: data = """{"error":{"code":0}, "data":{"ID": "11"}}""" return data class FakeHuaweiNasHelper(helper.RestHelper): def __init__(self, *args, **kwargs): helper.RestHelper.__init__(self, *args, **kwargs) self.test_normal = True self.deviceid = None self.delete_flag = False self.allow_flag = False self.deny_flag = False self.create_snapflag = False self.setupserver_flag = False self.fs_status_flag = True self.create_share_flag = False self.snapshot_flag = True self.service_status_flag = True self.share_exist = True self.service_nfs_status_flag = True self.create_share_data_flag = False self.allow_ro_flag = False self.allow_rw_flag = False self.extend_share_flag = False self.shrink_share_flag = False self.add_fs_to_partition_flag = False self.add_fs_to_cache_flag = False self.test_multi_url_flag = 0 self.cache_exist = True self.partition_exist = True self.alloc_type = None self.custom_results = {} def _change_file_mode(self, filepath): pass def do_call(self, url, data, method, calltimeout=4): url = url.replace('http://100.115.10.69:8082/deviceManager/rest', '') url = url.replace('/210235G7J20000000000/', '') if self.custom_results and self.custom_results.get(url): result = self.custom_results[url] if isinstance(result, str): return jsonutils.loads(result) if isinstance(result, dict) and result.get(method): return jsonutils.loads(result[method]) if self.test_normal: if self.test_multi_url_flag == 1: data = '{"error":{"code":-403}}' res_json = jsonutils.loads(data) return res_json elif self.test_multi_url_flag == 2: if ('http://100.115.10.70:8082/deviceManager/rest/xx/' 'sessions' == url): self.url = url data = data_session("/xx/sessions") res_json = jsonutils.loads(data) return res_json elif (('/xx/sessions' == url) or (self.url is not None and 'http://100.115.10.69:8082/deviceManager/rest' in self.url)): data = '{"error":{"code":-403}}' res_json = jsonutils.loads(data) return res_json if url == "/xx/sessions" or url == "/sessions": data = data_session(url) if url == "/storagepool": data = """{"error":{"code":0}, "data":[{"USERFREECAPACITY":"2097152", "ID":"1", "NAME":"OpenStack_Pool", "USERTOTALCAPACITY":"4194304", "USAGETYPE":"2", "USERCONSUMEDCAPACITY":"2097152", "TIER0CAPACITY":"100", "TIER1CAPACITY":"0", "TIER2CAPACITY":"0"}, {"USERFREECAPACITY":"2097152", "ID":"2", "NAME":"OpenStack_Pool_Thick", "USERTOTALCAPACITY":"4194304", "USAGETYPE":"2", "USERCONSUMEDCAPACITY":"2097152", "TIER0CAPACITY":"100", "TIER1CAPACITY":"0", "TIER2CAPACITY":"0"}]}""" if url == "/filesystem": request_data = jsonutils.loads(data) self.alloc_type = request_data.get('ALLOCTYPE') data = """{"error":{"code":0},"data":{ "ID":"4"}}""" if url == "/system/": data = """{"error":{"code":0}, "data":{"PRODUCTVERSION": "V300R003C10", "wwn": "fake_wwn"}}""" if url == "/remote_device": data = """{"error":{"code":0}, "data":[{"ID": "0", "NAME": "fake_name", "WWN": "fake_wwn"}]}""" if url == "/ioclass" or url == "/ioclass/11": data = QoS_response(method) if url == "/ioclass/active/11": data = """{"error":{"code":0}, "data":[{"ID": "11", "MAXIOPS": "100", "FSLIST": ""}]}""" if url == "/NFSHARE" or url == "/CIFSHARE": if self.create_share_flag: data = '{"error":{"code":31755596}}' elif self.create_share_data_flag: data = '{"error":{"code":0}}' else: data = """{"error":{"code":0},"data":{ "ID":"10"}}""" if url == "/NFSHARE?range=[100-200]": if self.share_exist: data = """{"error":{"code":0}, "data":[{"ID":"1", "FSID":"4", "NAME":"test", "SHAREPATH":"/share_fake_uuid/"}, {"ID":"2", "FSID":"5", "NAME":"test", "SHAREPATH":"/share_fake_uuid_thickfs/"}, {"ID":"3", "FSID":"6", "NAME":"test", "SHAREPATH":"/share_fake_uuid_inpartition/"}]}""" else: data = """{"error":{"code":0}, "data":[{"ID":"1", "FSID":"4", "NAME":"test", "SHAREPATH":"/share_fake_uuid_fail/"}]}""" if url == "/CIFSHARE?range=[100-200]": data = """{"error":{"code":0}, "data":[{"ID":"2", "FSID":"4", "NAME":"test", "SHAREPATH":"/share_fake_uuid/"}]}""" if url == "/NFSHARE?range=[0-100]": data = """{"error":{"code":0}, "data":[{"ID":"1", "FSID":"4", "NAME":"test_fail", "SHAREPATH":"/share_fake_uuid_fail/"}]}""" if url == "/CIFSHARE?range=[0-100]": data = """{"error":{"code":0}, "data":[{"ID":"2", "FSID":"4", "NAME":"test_fail", "SHAREPATH":"/share_fake_uuid_fail/"}]}""" if url == "/NFSHARE/1" or url == "/CIFSHARE/2": data = """{"error":{"code":0}}""" self.delete_flag = True if url == "/FSSNAPSHOT": data = """{"error":{"code":0},"data":{ "ID":"3"}}""" self.create_snapflag = True if url == "/FSSNAPSHOT/4@share_snapshot_fake_snapshot_uuid": if self.snapshot_flag: data = """{"error":{"code":0}, "data":{"ID":"4@share_snapshot_fake_snapshot_uuid"}}""" else: data = '{"error":{"code":1073754118}}' self.delete_flag = True if url == "/FSSNAPSHOT/4@fake_storage_snapshot_name": if self.snapshot_flag: data = """{"error":{"code":0}, "data":{"ID":"4@share_snapshot_fake_snapshot_uuid", "NAME":"share_snapshot_fake_snapshot_uuid", "HEALTHSTATUS":"1"}}""" else: data = '{"error":{"code":1073754118}}' if url == "/FSSNAPSHOT/3": data = """{"error":{"code":0}}""" self.delete_flag = True if url == "/NFS_SHARE_AUTH_CLIENT": data, self.allow_ro_flag, self.allow_rw_flag = ( allow_access('NFS', method, data)) self.allow_flag = True if url == "/CIFS_SHARE_AUTH_CLIENT": data, self.allow_ro_flag, self.allow_rw_flag = ( allow_access('CIFS', method, data)) self.allow_flag = True if url == ("/FSSNAPSHOT?TYPE=48&PARENTID=4" "&&sortby=TIMESTAMP,d&range=[0-2000]"): data = """{"error":{"code":0}, "data":[{"ID":"3", "NAME":"share_snapshot_fake_snapshot_uuid"}]}""" self.delete_flag = True if url == ("/NFS_SHARE_AUTH_CLIENT?" "filter=PARENTID::1&range=[0-100]"): data = """{"error":{"code":0}, "data":[{"ID":"0", "NAME":"100.112.0.1_fail"}]}""" if url == ("/CIFS_SHARE_AUTH_CLIENT?" "filter=PARENTID::2&range=[0-100]"): data = """{"error":{"code":0}, "data":[{"ID":"0", "NAME":"user_name_fail"}]}""" if url == ("/NFS_SHARE_AUTH_CLIENT?" "filter=PARENTID::1&range=[100-200]"): data = """{"error":{"code":0}, "data":[{"ID":"5", "NAME":"100.112.0.2"}]}""" if url == ("/CIFS_SHARE_AUTH_CLIENT?" "filter=PARENTID::2&range=[100-200]"): data = """{"error":{"code":0}, "data":[{"ID":"6", "NAME":"user_exist"}]}""" if url in ("/NFS_SHARE_AUTH_CLIENT/0", "/NFS_SHARE_AUTH_CLIENT/5", "/CIFS_SHARE_AUTH_CLIENT/0", "/CIFS_SHARE_AUTH_CLIENT/6"): if method == "DELETE": data = """{"error":{"code":0}}""" self.deny_flag = True elif method == "GET": if 'CIFS' in url: data = """{"error":{"code":0}, "data":{"'PERMISSION'":"0"}}""" else: data = """{"error":{"code":0}, "data":{"ACCESSVAL":"0"}}""" else: data = """{"error":{"code":0}}""" self.allow_rw_flagg = True if url == "/NFSHARE/count" or url == "/CIFSHARE/count": data = """{"error":{"code":0},"data":{ "COUNT":"196"}}""" if (url == "/NFS_SHARE_AUTH_CLIENT/count?filter=PARENTID::1" or url == ("/CIFS_SHARE_AUTH_CLIENT/count?filter=" "PARENTID::2")): data = """{"error":{"code":0},"data":{ "COUNT":"196"}}""" if url == "/CIFSSERVICE": if self.service_status_flag: data = """{"error":{"code":0},"data":{ "RUNNINGSTATUS":"2"}}""" else: data = """{"error":{"code":0},"data":{ "RUNNINGSTATUS":"1"}}""" if url == "/NFSSERVICE": if self.service_nfs_status_flag: data = """{"error":{"code":0}, "data":{"RUNNINGSTATUS":"2", "SUPPORTV3":"true", "SUPPORTV4":"true"}}""" else: data = """{"error":{"code":0}, "data":{"RUNNINGSTATUS":"1", "SUPPORTV3":"true", "SUPPORTV4":"true"}}""" self.setupserver_flag = True if "/FILESYSTEM?filter=NAME::" in url: data = """{"error":{"code":0}, "data":[{"ID":"4", "NAME":"share_fake_uuid"}, {"ID":"8", "NAME":"share_fake_new_uuid"}]}""" if url == "/filesystem/4": data, self.extend_share_flag, self.shrink_share_flag = ( filesystem(method, data, self.fs_status_flag)) self.delete_flag = True if url == "/filesystem/5": data, self.extend_share_flag, self.shrink_share_flag = ( filesystem_thick(method, data, self.fs_status_flag)) self.delete_flag = True if url == "/filesystem/6": data, self.extend_share_flag, self.shrink_share_flag = ( filesystem_inpartition(method, data, self.fs_status_flag)) self.delete_flag = True if url == "/cachepartition": if self.partition_exist: data = """{"error":{"code":0}, "data":[{"ID":"7", "NAME":"test_partition_name"}]}""" else: data = """{"error":{"code":0}, "data":[{"ID":"7", "NAME":"test_partition_name_fail"}]}""" if url == "/cachepartition/1": if self.partition_exist: data = """{"error":{"code":0}, "data":{"ID":"7", "NAME":"test_partition_name"}}""" else: data = """{"error":{"code":0}, "data":{"ID":"7", "NAME":"test_partition_name_fail"}}""" if url == "/SMARTCACHEPARTITION": if self.cache_exist: data = """{"error":{"code":0}, "data":[{"ID":"8", "NAME":"test_cache_name"}]}""" else: data = """{"error":{"code":0}, "data":[{"ID":"8", "NAME":"test_cache_name_fail"}]}""" if url == "/SMARTCACHEPARTITION/1": if self.cache_exist: data = """{"error":{"code":0}, "data":{"ID":"8", "NAME":"test_cache_name"}}""" else: data = """{"error":{"code":0}, "data":{"ID":"8", "NAME":"test_cache_name_fail"}}""" if url == "/filesystem/associate/cachepartition": data = """{"error":{"code":0}}""" self.add_fs_to_partition_flag = True if url == "/SMARTCACHEPARTITION/CREATE_ASSOCIATE": data = """{"error":{"code":0}}""" self.add_fs_to_cache_flag = True if url == "/SMARTCACHEPARTITION/REMOVE_ASSOCIATE": data = """{"error":{"code":0}}""" if url == "/smartPartition/removeFs": data = """{"error":{"code":0}}""" if url == "/ETH_PORT": data = """{"error":{"code":0}, "data":[{"ID": "4", "LOCATION":"CTE0.A.H0", "IPV4ADDR":"", "BONDNAME":"", "BONDID":"", "RUNNINGSTATUS":"10"}, {"ID": "6", "LOCATION":"CTE0.A.H1", "IPV4ADDR":"", "BONDNAME":"fake_bond", "BONDID":"5", "RUNNINGSTATUS":"10"}]}""" if url == "/ETH_PORT/6": data = """{"error":{"code":0}, "data":{"ID": "6", "LOCATION":"CTE0.A.H1", "IPV4ADDR":"", "BONDNAME":"fake_bond", "BONDID":"5", "RUNNINGSTATUS":"10"}}""" if url == "/BOND_PORT": data = "{\"error\":{\"code\":0},\ \"data\":[{\"ID\": \"5\",\ \"NAME\":\"fake_bond\",\ \"PORTIDLIST\": \"[\\\"6\\\"]\",\ \"RUNNINGSTATUS\":\"10\"}]}" if url == "/vlan": if method == "GET": data = """{"error":{"code":0}}""" else: data = """{"error":{"code":0},"data":{ "ID":"4"}}""" if url == "/LIF": if method == "GET": data = """{"error":{"code":0}}""" else: data = """{"error":{"code":0},"data":{ "ID":"4"}}""" if url == "/DNS_Server": if method == "GET": data = "{\"error\":{\"code\":0},\"data\":{\ \"ADDRESS\":\"[\\\"\\\"]\"}}" else: data = """{"error":{"code":0}}""" if url == "/AD_CONFIG": if method == "GET": data = """{"error":{"code":0},"data":{ "DOMAINSTATUS":"1", "FULLDOMAINNAME":"huawei.com"}}""" else: data = """{"error":{"code":0}}""" if url == "/LDAP_CONFIG": if method == "GET": data = """{"error":{"code":0},"data":{ "BASEDN":"dc=huawei,dc=com", "LDAPSERVER": "100.97.5.87"}}""" else: data = """{"error":{"code":0}}""" if url == "/REPLICATIONPAIR": data = """{"error":{"code":0},"data":{ "ID":"fake_pair_id"}}""" if url == "/REPLICATIONPAIR/sync": data = """{"error":{"code":0}}""" if url == "/REPLICATIONPAIR/switch": data = """{"error":{"code":0}}""" if url == "/REPLICATIONPAIR/split": data = """{"error":{"code":0}}""" if url == "/REPLICATIONPAIR/CANCEL_SECODARY_WRITE_LOCK": data = """{"error":{"code":0}}""" if url == "/REPLICATIONPAIR/SET_SECODARY_WRITE_LOCK": data = """{"error":{"code":0}}""" if url == "/REPLICATIONPAIR/fake_pair_id": data = """{"error":{"code":0},"data":{ "ID": "fake_pair_id", "HEALTHSTATUS": "1", "SECRESDATASTATUS": "1", "ISPRIMARY": "false", "SECRESACCESS": "1", "RUNNINGSTATUS": "1"}}""" else: data = '{"error":{"code":31755596}}' res_json = jsonutils.loads(data) return res_json class FakeRpcClient(rpcapi.HuaweiV3API): def __init__(self, helper): super(FakeRpcClient, self).__init__() self.replica_mgr = replication.ReplicaPairManager(helper) class fake_call_context(object): def __init__(self, replica_mgr): self.replica_mgr = replica_mgr def call(self, context, func_name, **kwargs): if func_name == 'create_replica_pair': return self.replica_mgr.create_replica_pair( context, **kwargs) def create_replica_pair(self, context, host, local_share_info, remote_device_wwn, remote_fs_id): self.client.prepare = mock.Mock( return_value=self.fake_call_context(self.replica_mgr)) return super(FakeRpcClient, self).create_replica_pair( context, host, local_share_info, remote_device_wwn, remote_fs_id) class FakeRpcServer(object): def start(self): pass class FakePrivateStorage(object): def __init__(self): self.map = {} def get(self, entity_id, key=None, default=None): if self.map.get(entity_id): return self.map[entity_id].get(key, default) return default def update(self, entity_id, details, delete_existing=False): self.map[entity_id] = details def delete(self, entity_id, key=None): self.map.pop(entity_id) class FakeHuaweiNasDriver(huawei_nas.HuaweiNasDriver): """Fake HuaweiNasDriver.""" def __init__(self, *args, **kwargs): huawei_nas.HuaweiNasDriver.__init__(self, *args, **kwargs) self.plugin = connection.V3StorageConnection(self.configuration) self.plugin.helper = FakeHuaweiNasHelper(self.configuration) self.plugin.replica_mgr = replication.ReplicaPairManager( self.plugin.helper) self.plugin.rpc_client = FakeRpcClient(self.plugin.helper) self.plugin.private_storage = FakePrivateStorage() class FakeConfigParseTree(object): class FakeNode(object): def __init__(self, text): self._text = text @property def text(self): return self._text @text.setter def text(self, text): self._text = text class FakeRoot(object): def __init__(self): self._node_map = {} def findtext(self, path, default=None): if path in self._node_map: return self._node_map[path].text return default def find(self, path): if path in self._node_map: return self._node_map[path] return None def __init__(self, path_value): self.root = self.FakeRoot() for k in path_value: self.root._node_map[k] = self.FakeNode(path_value[k]) def getroot(self): return self.root def write(self, filename, format): pass @ddt.ddt class HuaweiShareDriverTestCase(test.TestCase): """Tests GenericShareDriver.""" def setUp(self): super(HuaweiShareDriverTestCase, self).setUp() self._context = context.get_admin_context() def _safe_get(opt): return getattr(self.configuration, opt) self.configuration = mock.Mock(spec=conf.Configuration) self.configuration.safe_get = mock.Mock(side_effect=_safe_get) self.configuration.network_config_group = 'fake_network_config_group' self.configuration.admin_network_config_group = ( 'fake_admin_network_config_group') self.configuration.config_group = 'fake_share_backend_name' self.configuration.share_backend_name = 'fake_share_backend_name' self.configuration.huawei_share_backend = 'V3' self.configuration.max_over_subscription_ratio = 1 self.configuration.driver_handles_share_servers = False self.configuration.replication_domain = None self.configuration.filter_function = None self.configuration.goodness_function = None self.tmp_dir = tempfile.mkdtemp() self.fake_conf_file = self.tmp_dir + '/manila_huawei_conf.xml' self.addCleanup(shutil.rmtree, self.tmp_dir) self.create_fake_conf_file(self.fake_conf_file) self.addCleanup(os.remove, self.fake_conf_file) self.configuration.manila_huawei_conf_file = self.fake_conf_file self._helper_fake = mock.Mock() self.mock_object(huawei_nas.importutils, 'import_object', mock.Mock(return_value=self._helper_fake)) self.mock_object(time, 'sleep', fake_sleep) self.driver = FakeHuaweiNasDriver(configuration=self.configuration) self.driver.plugin.helper.test_normal = True self.share_nfs = { 'id': 'fake_uuid', 'share_id': 'fake_uuid', 'project_id': 'fake_tenant_id', 'display_name': 'fake', 'name': 'share-fake-uuid', 'size': 1, 'share_proto': 'NFS', 'share_network_id': 'fake_net_id', 'share_server_id': 'fake-share-srv-id', 'export_locations': [ {'path': '100.115.10.68:/share_fake_uuid'}, ], 'host': 'fake_host@fake_backend#OpenStack_Pool', 'share_type_id': 'fake_id', } self.share_nfs_thick = { 'id': 'fake_uuid', 'project_id': 'fake_tenant_id', 'display_name': 'fake', 'name': 'share-fake-uuid', 'size': 1, 'share_proto': 'NFS', 'share_network_id': 'fake_net_id', 'share_server_id': 'fake-share-srv-id', 'host': 'fake_host@fake_backend#OpenStack_Pool_Thick', 'export_locations': [ {'path': '100.115.10.68:/share_fake_uuid'}, ], 'share_type_id': 'fake_id', } self.share_nfs_thickfs = { 'id': 'fake_uuid', 'project_id': 'fake_tenant_id', 'display_name': 'fake', 'name': 'share-fake-uuid-thickfs', 'size': 1, 'share_proto': 'NFS', 'share_network_id': 'fake_net_id', 'share_server_id': 'fake-share-srv-id', 'host': 'fake_host@fake_backend#OpenStack_Pool', 'export_locations': [ {'path': '100.115.10.68:/share_fake_uuid_thickfs'}, ], 'share_type_id': 'fake_id', } self.share_nfs_thick_thickfs = { 'id': 'fake_uuid', 'project_id': 'fake_tenant_id', 'display_name': 'fake', 'name': 'share-fake-uuid-thickfs', 'size': 1, 'share_proto': 'NFS', 'share_network_id': 'fake_net_id', 'share_server_id': 'fake-share-srv-id', 'host': 'fake_host@fake_backend#OpenStack_Pool_Thick', 'export_locations': [ {'path': '100.115.10.68:/share_fake_uuid_thickfs'}, ], 'share_type_id': 'fake_id', } self.share_nfs_inpartition = { 'id': 'fake_uuid', 'project_id': 'fake_tenant_id', 'display_name': 'fake', 'name': 'share-fake-uuid-inpartition', 'size': 1, 'share_proto': 'NFS', 'share_network_id': 'fake_net_id', 'share_server_id': 'fake-share-srv-id', 'host': 'fake_host@fake_backend#OpenStack_Pool', 'export_locations': [ {'path': '100.115.10.68:/share_fake_uuid_inpartition'}, ], 'share_type_id': 'fake_id', } self.share_manage_nfs = { 'id': 'fake_uuid', 'project_id': 'fake_tenant_id', 'display_name': 'fake', 'name': 'share-fake-manage-uuid', 'size': 1, 'share_proto': 'NFS', 'share_network_id': 'fake_net_id', 'share_server_id': 'fake-share-srv-id', 'export_locations': [ {'path': '100.115.10.68:/share_fake_uuid'}, ], 'host': 'fake_host@fake_backend#OpenStack_Pool', 'share_type_id': 'fake_id', } self.share_pool_name_not_match = { 'id': 'fake_uuid', 'project_id': 'fake_tenant_id', 'display_name': 'fake', 'name': 'share-fake-manage-uuid', 'size': 1, 'share_proto': 'NFS', 'share_network_id': 'fake_net_id', 'share_server_id': 'fake-share-srv-id', 'export_locations': [ {'path': '100.115.10.68:/share_fake_uuid'}, ], 'host': 'fake_host@fake_backend#OpenStack_Pool_not_match', 'share_type_id': 'fake_id', } self.share_proto_fail = { 'id': 'fake_uuid', 'project_id': 'fake_tenant_id', 'display_name': 'fake', 'name': 'share-fake-uuid', 'size': 1, 'share_proto': 'proto_fail', 'share_network_id': 'fake_net_id', 'share_server_id': 'fake-share-srv-id', 'host': 'fake_host@fake_backend#OpenStack_Pool', } self.share_cifs = { 'id': 'fake_uuid', 'share_id': 'fake_uuid', 'project_id': 'fake_tenant_id', 'display_name': 'fake', 'name': 'share-fake-uuid', 'size': 1, 'share_proto': 'CIFS', 'share_network_id': 'fake_net_id', 'share_server_id': 'fake-share-srv-id', 'export_locations': [ {'path': 'share_fake_uuid'}, ], 'host': 'fake_host@fake_backend#OpenStack_Pool', 'share_type_id': 'fake_id', } self.share_manage_cifs = { 'id': 'fake_uuid', 'project_id': 'fake_tenant_id', 'display_name': 'fake', 'name': 'share-fake-manage-uuid', 'size': 1, 'share_proto': 'CIFS', 'share_network_id': 'fake_net_id', 'share_server_id': 'fake-share-srv-id', 'export_locations': [ {'path': '\\\\100.115.10.68\\share_fake_uuid'}, ], 'host': 'fake_host@fake_backend#OpenStack_Pool', 'share_type_id': 'fake_id', } self.nfs_snapshot = { 'id': 'fake_snapshot_uuid', 'snapshot_id': 'fake_snapshot_uuid', 'display_name': 'snapshot', 'name': 'fake_snapshot_name', 'size': 1, 'share_name': 'share_fake_uuid', 'share_id': 'fake_uuid', 'share': { 'share_name': 'share_fake_uuid', 'share_id': 'fake_uuid', 'share_size': 1, 'share_proto': 'NFS', }, } self.cifs_snapshot = { 'id': 'fake_snapshot_uuid', 'snapshot_id': 'fake_snapshot_uuid', 'display_name': 'snapshot', 'name': 'fake_snapshot_name', 'size': 1, 'share_name': 'share_fake_uuid', 'share_id': 'fake_uuid', 'share': { 'share_name': 'share_fake_uuid', 'share_id': 'fake_uuid', 'share_size': 1, 'share_proto': 'CIFS', }, } self.storage_nfs_snapshot = { 'id': 'fake_snapshot_uuid', 'snapshot_id': 'fake_snapshot_uuid', 'display_name': 'snapshot', 'name': 'fake_snapshot_name', 'provider_location': 'fake_storage_snapshot_name', 'size': 1, 'share_name': 'share_fake_uuid', 'share_id': 'fake_uuid', 'share': { 'share_name': 'share_fake_uuid', 'share_id': 'fake_uuid', 'share_size': 1, 'share_proto': 'NFS', }, } self.storage_cifs_snapshot = { 'id': 'fake_snapshot_uuid', 'snapshot_id': 'fake_snapshot_uuid', 'display_name': 'snapshot', 'name': 'fake_snapshot_name', 'provider_location': 'fake_storage_snapshot_name', 'size': 1, 'share_name': 'share_fake_uuid', 'share_id': 'fake_uuid', 'share': { 'share_name': 'share_fake_uuid', 'share_id': 'fake_uuid', 'share_size': 1, 'share_proto': 'CIFS', }, } self.security_service = { 'id': 'fake_id', 'domain': 'FAKE', 'server': 'fake_server', 'user': 'fake_user', 'password': 'fake_password', } self.access_ip = { 'access_type': 'ip', 'access_to': '100.112.0.1', 'access_level': 'rw', } self.access_ip_exist = { 'access_type': 'ip', 'access_to': '100.112.0.2', 'access_level': 'rw', } self.access_user = { 'access_type': 'user', 'access_to': 'user_name', 'access_level': 'rw', } self.access_user_exist = { 'access_type': 'user', 'access_to': 'user_exist', 'access_level': 'rw', } self.access_group = { 'access_type': 'user', 'access_to': 'group_name', 'access_level': 'rw', } self.access_cert = { 'access_type': 'cert', 'access_to': 'fake_cert', 'access_level': 'rw', } self.driver_options = { 'volume_id': 'fake', } self.share_server = None self.driver._licenses = ['fake'] self.fake_network_allocations = [{ 'id': 'fake_network_allocation_id', 'ip_address': '111.111.111.109', }] self.fake_network_info = [{ 'server_id': '0', 'segmentation_id': '2', 'cidr': '111.111.111.0/24', 'neutron_net_id': 'fake_neutron_net_id', 'neutron_subnet_id': 'fake_neutron_subnet_id', 'security_services': '', 'network_allocations': self.fake_network_allocations, 'network_type': 'vlan', }] self.fake_active_directory = { 'type': 'active_directory', 'dns_ip': '100.97.5.5', 'user': 'ad_user', 'password': 'ad_password', 'domain': 'huawei.com' } self.fake_ldap = { 'type': 'ldap', 'server': '100.97.5.87', 'domain': 'dc=huawei,dc=com' } fake_share_type_id_not_extra = 'fake_id' self.fake_type_not_extra = { 'test_with_extra': { 'created_at': 'fake_time', 'deleted': '0', 'deleted_at': None, 'extra_specs': {}, 'required_extra_specs': {}, 'id': fake_share_type_id_not_extra, 'name': 'test_with_extra', 'updated_at': None } } fake_extra_specs = { 'capabilities:dedupe': ' True', 'capabilities:compression': ' True', 'capabilities:huawei_smartcache': ' True', 'huawei_smartcache:cachename': 'test_cache_name', 'capabilities:huawei_smartpartition': ' True', 'huawei_smartpartition:partitionname': 'test_partition_name', 'capabilities:thin_provisioning': ' True', 'test:test:test': 'test', } fake_share_type_id = 'fooid-2' self.fake_type_w_extra = { 'test_with_extra': { 'created_at': 'fake_time', 'deleted': '0', 'deleted_at': None, 'extra_specs': fake_extra_specs, 'required_extra_specs': {}, 'id': fake_share_type_id, 'name': 'test_with_extra', 'updated_at': None } } fake_extra_specs = { 'capabilities:dedupe': ' True', 'capabilities:compression': ' True', 'capabilities:huawei_smartcache': ' False', 'huawei_smartcache:cachename': None, 'capabilities:huawei_smartpartition': ' False', 'huawei_smartpartition:partitionname': None, 'capabilities:thin_provisioning': ' True', 'test:test:test': 'test', } fake_share_type_id = 'fooid-3' self.fake_type_fake_extra = { 'test_with_extra': { 'created_at': 'fake_time', 'deleted': '0', 'deleted_at': None, 'extra_specs': fake_extra_specs, 'required_extra_specs': {}, 'id': fake_share_type_id, 'name': 'test_with_extra', 'updated_at': None } } fake_extra_specs = { 'capabilities:dedupe': ' True', 'capabilities:compression': ' True', 'capabilities:huawei_smartcache': ' False', 'huawei_smartcache:cachename': None, 'capabilities:huawei_smartpartition': ' False', 'huawei_smartpartition:partitionname': None, 'capabilities:thin_provisioning': ' False', 'test:test:test': 'test', } fake_share_type_id = 'fooid-4' self.fake_type_thin_extra = { 'test_with_extra': { 'created_at': 'fake_time', 'deleted': '0', 'deleted_at': None, 'extra_specs': fake_extra_specs, 'required_extra_specs': {}, 'id': fake_share_type_id, 'name': 'test_with_extra', 'updated_at': None } } self.share_nfs_host_not_exist = { 'id': 'fake_uuid', 'project_id': 'fake_tenant_id', 'display_name': 'fake', 'name': 'share-fake-uuid', 'size': 1, 'share_proto': 'NFS', 'share_network_id': 'fake_net_id', 'share_server_id': 'fake-share-srv-id', 'host': 'fake_host@fake_backend#', } self.share_nfs_storagepool_fail = { 'id': 'fake_uuid', 'project_id': 'fake_tenant_id', 'display_name': 'fake', 'name': 'share-fake-uuid', 'size': 1, 'share_proto': 'NFS', 'share_network_id': 'fake_net_id', 'share_server_id': 'fake-share-srv-id', 'host': 'fake_host@fake_backend#OpenStack_Pool2', } fake_extra_specs = { 'driver_handles_share_servers': 'False', } fake_share_type_id = 'fake_id' self.fake_type_extra = { 'test_with_extra': { 'created_at': 'fake_time', 'deleted': '0', 'deleted_at': None, 'extra_specs': fake_extra_specs, 'required_extra_specs': {}, 'id': fake_share_type_id, 'name': 'test_with_extra', 'updated_at': None } } self.active_replica = { 'id': 'fake_active_replica_id', 'share_id': 'fake_share_id', 'name': 'share_fake_uuid', 'host': 'hostname1@backend_name1#OpenStack_Pool', 'size': 5, 'share_proto': 'NFS', 'replica_state': common_constants.REPLICA_STATE_ACTIVE, } self.new_replica = { 'id': 'fake_new_replica_id', 'share_id': 'fake_share_id', 'name': 'share_fake_new_uuid', 'host': 'hostname2@backend_name2#OpenStack_Pool', 'size': 5, 'share_proto': 'NFS', 'replica_state': common_constants.REPLICA_STATE_OUT_OF_SYNC, 'share_type_id': 'fake_id', } def _get_share_by_proto(self, share_proto): if share_proto == "NFS": share = self.share_nfs elif share_proto == "CIFS": share = self.share_cifs else: share = None return share def mock_share_type(self, share_type): self.mock_object(db, 'share_type_get', mock.Mock(return_value=share_type)) def test_no_configuration(self): self.mock_object(huawei_nas.HuaweiNasDriver, 'driver_handles_share_servers', True) self.assertRaises(exception.InvalidInput, huawei_nas.HuaweiNasDriver) def test_conf_product_fail(self): self.recreate_fake_conf_file(product_flag=False) self.driver.plugin.configuration.manila_huawei_conf_file = ( self.fake_conf_file) self.assertRaises(exception.InvalidInput, self.driver.plugin.check_conf_file) def test_conf_pool_node_fail(self): self.recreate_fake_conf_file(pool_node_flag=False) self.driver.plugin.configuration.manila_huawei_conf_file = ( self.fake_conf_file) self.assertRaises(exception.InvalidInput, self.driver.plugin.check_conf_file) def test_conf_username_fail(self): self.recreate_fake_conf_file(username_flag=False) self.driver.plugin.configuration.manila_huawei_conf_file = ( self.fake_conf_file) self.assertRaises(exception.InvalidInput, self.driver.plugin.check_conf_file) def test_conf_timeout_fail(self): self.recreate_fake_conf_file(timeout_flag=False) self.driver.plugin.configuration.manila_huawei_conf_file = ( self.fake_conf_file) timeout = self.driver.plugin._get_timeout() self.assertEqual(60, timeout) def test_conf_wait_interval_fail(self): self.recreate_fake_conf_file(wait_interval_flag=False) self.driver.plugin.configuration.manila_huawei_conf_file = ( self.fake_conf_file) wait_interval = self.driver.plugin._get_wait_interval() self.assertEqual(3, wait_interval) def test_conf_logical_ip_fail(self): self.configuration.driver_handles_share_servers = True self.recreate_fake_conf_file(logical_port="fake_port") self.driver.plugin.configuration.manila_huawei_conf_file = ( self.fake_conf_file) self.configuration.driver_handles_share_servers = False self.assertRaises(exception.InvalidInput, self.driver.plugin.check_conf_file) def test_conf_snapshot_replication_conflict(self): self.recreate_fake_conf_file(snapshot_support=True, replication_support=True) self.driver.plugin.configuration.manila_huawei_conf_file = ( self.fake_conf_file) self.driver.plugin._setup_conf() self.assertRaises(exception.BadConfigurationException, self.driver.plugin.check_conf_file) def test_get_backend_driver_fail(self): test_fake_conf_file = None self.driver.plugin.configuration.manila_huawei_conf_file = ( test_fake_conf_file) self.assertRaises(exception.InvalidInput, self.driver.get_backend_driver) def test_get_backend_driver_fail_driver_none(self): self.recreate_fake_conf_file(product_flag=False) self.driver.plugin.configuration.manila_huawei_conf_file = ( self.fake_conf_file) self.assertRaises(exception.InvalidInput, self.driver.get_backend_driver) def test_create_share_storagepool_not_exist(self): self.driver.plugin.helper.login() self.assertRaises(exception.InvalidHost, self.driver.create_share, self._context, self.share_nfs_host_not_exist, self.share_server) def test_create_share_nfs_storagepool_fail(self): self.driver.plugin.helper.login() self.assertRaises(exception.InvalidHost, self.driver.create_share, self._context, self.share_nfs_storagepool_fail, self.share_server) def test_create_share_nfs_no_data_fail(self): self.driver.plugin.helper.create_share_data_flag = True self.driver.plugin.helper.login() self.assertRaises(exception.InvalidShare, self.driver.create_share, self._context, self.share_nfs, self.share_server) def test_read_xml_fail(self): test_fake_conf_file = None self.driver.plugin.configuration.manila_huawei_conf_file = ( test_fake_conf_file) self.assertRaises(exception.InvalidInput, self.driver.plugin.helper._read_xml) def test_connect_success(self): FakeRpcServer.start = mock.Mock() rpc.get_server = mock.Mock(return_value=FakeRpcServer()) self.driver.plugin.connect() FakeRpcServer.start.assert_called_once() def test_connect_fail(self): self.driver.plugin.helper.test_multi_url_flag = 1 self.assertRaises(exception.InvalidShare, self.driver.plugin.connect) def test_login_success(self): deviceid = self.driver.plugin.helper.login() self.assertEqual("210235G7J20000000000", deviceid) def test_check_for_setup_success(self): self.driver.plugin.helper.login() self.driver.check_for_setup_error() def test_check_for_setup_service_down(self): self.driver.plugin.helper.service_status_flag = False self.driver.plugin.helper.login() self.driver.check_for_setup_error() def test_check_for_setup_nfs_down(self): self.driver.plugin.helper.service_nfs_status_flag = False self.driver.plugin.helper.login() self.driver.check_for_setup_error() def test_check_for_setup_service_false(self): self.driver.plugin.helper.login() self.driver.plugin.helper.test_normal = False self.assertRaises(exception.InvalidShare, self.driver.check_for_setup_error) def test_create_share_no_extra(self): share_type = self.fake_type_not_extra['test_with_extra'] self.mock_object(db, 'share_type_get', mock.Mock(return_value=share_type)) location = self.driver.create_share(self._context, self.share_nfs, self.share_server) self.assertEqual("100.115.10.68:/share_fake_uuid", location) self.assertEqual(constants.ALLOC_TYPE_THIN_FLAG, self.driver.plugin.helper.alloc_type) def test_create_share_with_extra_thin(self): share_type = { 'extra_specs': { 'capabilities:thin_provisioning': ' True' }, } self.mock_object(db, 'share_type_get', mock.Mock(return_value=share_type)) self.driver.plugin.helper.login() location = self.driver.create_share(self._context, self.share_nfs, self.share_server) self.assertEqual("100.115.10.68:/share_fake_uuid", location) self.assertEqual(constants.ALLOC_TYPE_THIN_FLAG, self.driver.plugin.helper.alloc_type) def test_create_share_with_extra_thick(self): share_type = { 'extra_specs': { 'capabilities:thin_provisioning': ' False' }, } self.mock_object(db, 'share_type_get', mock.Mock(return_value=share_type)) self.driver.plugin.helper.login() location = self.driver.create_share(self._context, self.share_nfs, self.share_server) self.assertEqual("100.115.10.68:/share_fake_uuid", location) self.assertEqual(constants.ALLOC_TYPE_THICK_FLAG, self.driver.plugin.helper.alloc_type) @ddt.data(*constants.VALID_SECTOR_SIZES) def test_create_share_with_sectorsize_in_type(self, sectorsize): share_type = { 'extra_specs': { 'capabilities:huawei_sectorsize': " true", 'huawei_sectorsize:sectorsize': sectorsize, }, } self.mock_share_type(share_type) self.driver.plugin.helper.login() location = self.driver.create_share(self._context, self.share_nfs, self.share_server) self.assertEqual("100.115.10.68:/share_fake_uuid", location) self.assertTrue(db.share_type_get.called) @ddt.data('128', 'xx', 'None', ' ') def test_create_share_with_illegal_sectorsize_in_type(self, sectorsize): share_type = { 'extra_specs': { 'capabilities:huawei_sectorsize': " true", 'huawei_sectorsize:sectorsize': sectorsize, }, } self.mock_share_type(share_type) self.driver.plugin.helper.login() self.assertRaises(exception.InvalidShare, self.driver.create_share, self._context, self.share_nfs, self.share_server) @ddt.data({'extra_specs': {'capabilities:huawei_sectorsize': " false", 'huawei_sectorsize:sectorsize': '0'}, 'xmlvalue': '4'}, {'extra_specs': {'capabilities:huawei_sectorsize': " False", 'huawei_sectorsize:sectorsize': '128'}, 'xmlvalue': '8'}, {'extra_specs': {'capabilities:huawei_sectorsize': "false", 'huawei_sectorsize:sectorsize': 'a'}, 'xmlvalue': '16'}, {'extra_specs': {'capabilities:huawei_sectorsize': "False", 'huawei_sectorsize:sectorsize': 'xx'}, 'xmlvalue': '32'}, {'extra_specs': {'capabilities:huawei_sectorsize': "true", 'huawei_sectorsize:sectorsize': 'None'}, 'xmlvalue': '64'}, {'extra_specs': {'capabilities:huawei_sectorsize': "True", 'huawei_sectorsize:sectorsize': ' '}, 'xmlvalue': ' '}, {'extra_specs': {'capabilities:huawei_sectorsize': "True", 'huawei_sectorsize:sectorsize': ''}, 'xmlvalue': ''}) @ddt.unpack def test_create_share_with_invalid_type_valid_xml(self, extra_specs, xmlvalue): fake_share_type = {} fake_share_type['extra_specs'] = extra_specs self.mock_share_type(fake_share_type) self.recreate_fake_conf_file(sectorsize_value=xmlvalue) self.driver.plugin.configuration.manila_huawei_conf_file = ( self.fake_conf_file) self.driver.plugin.helper.login() location = self.driver.create_share(self._context, self.share_nfs, self.share_server) self.assertEqual("100.115.10.68:/share_fake_uuid", location) self.assertTrue(db.share_type_get.called) @ddt.data({'extra_specs': {'capabilities:huawei_sectorsize': " false", 'huawei_sectorsize:sectorsize': '4'}, 'xmlvalue': '0'}, {'extra_specs': {'capabilities:huawei_sectorsize': " False", 'huawei_sectorsize:sectorsize': '8'}, 'xmlvalue': '128'}, {'extra_specs': {'capabilities:huawei_sectorsize': "false", 'huawei_sectorsize:sectorsize': '16'}, 'xmlvalue': 'a'}, {'extra_specs': {'capabilities:huawei_sectorsize': "False", 'huawei_sectorsize:sectorsize': '32'}, 'xmlvalue': 'xx'}, {'extra_specs': {'capabilities:huawei_sectorsize': "true", 'huawei_sectorsize:sectorsize': '64'}, 'xmlvalue': 'None'}) @ddt.unpack def test_create_share_with_invalid_type_illegal_xml(self, extra_specs, xmlvalue): fake_share_type = {} fake_share_type['extra_specs'] = extra_specs self.mock_share_type(fake_share_type) self.recreate_fake_conf_file(sectorsize_value=xmlvalue) self.driver.plugin.configuration.manila_huawei_conf_file = ( self.fake_conf_file) self.driver.plugin.helper.login() self.assertRaises(exception.InvalidShare, self.driver.create_share, self._context, self.share_nfs, self.share_server) def test_shrink_share_success(self): self.driver.plugin.helper.shrink_share_flag = False self.driver.plugin.helper.login() self.driver.shrink_share(self.share_nfs, 1, self.share_server) self.assertTrue(self.driver.plugin.helper.shrink_share_flag) def test_shrink_share_fail(self): self.driver.plugin.helper.login() self.driver.plugin.helper.test_normal = False self.assertRaises(exception.InvalidShare, self.driver.shrink_share, self.share_nfs, 1, self.share_server) def test_shrink_share_size_fail(self): self.driver.plugin.helper.login() self.assertRaises(exception.InvalidShare, self.driver.shrink_share, self.share_nfs, 5, self.share_server) def test_shrink_share_alloctype_fail(self): self.driver.plugin.helper.login() self.driver.plugin.helper.fs_status_flag = False self.assertRaises(exception.InvalidShare, self.driver.shrink_share, self.share_nfs, 1, self.share_server) def test_shrink_share_not_exist(self): self.driver.plugin.helper.login() self.driver.plugin.helper.share_exist = False self.assertRaises(exception.InvalidShare, self.driver.shrink_share, self.share_nfs, 1, self.share_server) def test_extend_share_success(self): self.driver.plugin.helper.extend_share_flag = False self.driver.plugin.helper.login() self.driver.extend_share(self.share_nfs, 5, self.share_server) self.assertTrue(self.driver.plugin.helper.extend_share_flag) def test_extend_share_fail(self): self.driver.plugin.helper.login() self.assertRaises(exception.InvalidInput, self.driver.extend_share, self.share_nfs, 3, self.share_server) def test_extend_share_not_exist(self): self.driver.plugin.helper.login() self.driver.plugin.helper.share_exist = False self.assertRaises(exception.InvalidShareAccess, self.driver.extend_share, self.share_nfs, 4, self.share_server) def test_create_share_nfs_success(self): share_type = self.fake_type_not_extra['test_with_extra'] self.mock_object(db, 'share_type_get', mock.Mock(return_value=share_type)) self.driver.plugin.helper.login() location = self.driver.create_share(self._context, self.share_nfs, self.share_server) self.assertEqual("100.115.10.68:/share_fake_uuid", location) def test_create_share_cifs_success(self): share_type = self.fake_type_not_extra['test_with_extra'] self.mock_object(db, 'share_type_get', mock.Mock(return_value=share_type)) self.driver.plugin.helper.login() location = self.driver.create_share(self._context, self.share_cifs, self.share_server) self.assertEqual("\\\\100.115.10.68\\share_fake_uuid", location) def test_create_share_with_extra(self): self.driver.plugin.helper.add_fs_to_partition_flag = False self.driver.plugin.helper.add_fs_to_cache_flag = False share_type = self.fake_type_w_extra['test_with_extra'] self.mock_object(db, 'share_type_get', mock.Mock(return_value=share_type)) location = self.driver.create_share(self._context, self.share_nfs, self.share_server) self.assertEqual("100.115.10.68:/share_fake_uuid", location) self.assertTrue(self.driver.plugin.helper.add_fs_to_partition_flag) self.assertTrue(self.driver.plugin.helper.add_fs_to_cache_flag) @ddt.data({'capabilities:dedupe': ' True', 'capabilities:thin_provisioning': ' False'}, {'capabilities:dedupe': ' True', 'capabilities:compression': ' True', 'capabilities:thin_provisioning': ' False'}, {'capabilities:huawei_smartcache': ' True', 'huawei_smartcache:cachename': None}, {'capabilities:huawei_smartpartition': ' True', 'huawei_smartpartition:partitionname': None}, {'capabilities:huawei_smartcache': ' True'}, {'capabilities:huawei_smartpartition': ' True'}) def test_create_share_with_extra_error(self, fake_extra_specs): fake_share_type_id = 'fooid-2' fake_type_error_extra = { 'test_with_extra': { 'created_at': 'fake_time', 'deleted': '0', 'deleted_at': None, 'extra_specs': fake_extra_specs, 'required_extra_specs': {}, 'id': fake_share_type_id, 'name': 'test_with_extra', 'updated_at': None } } share_type = fake_type_error_extra['test_with_extra'] self.mock_object(db, 'share_type_get', mock.Mock(return_value=share_type)) self.driver.plugin.helper.login() self.assertRaises(exception.InvalidShare, self.driver.create_share, self._context, self.share_nfs_thick, self.share_server) @ddt.data({"fake_extra_specs_qos": {"qos:maxIOPS": "100", "qos:maxBandWidth": "50", "qos:IOType": "0"}, "fake_qos_info": {"MAXIOPS": "100", "MAXBANDWIDTH": "50", "IOTYPE": "0", "LATENCY": "0", "NAME": "OpenStack_fake_qos"}}, {"fake_extra_specs_qos": {"qos:maxIOPS": "100", "qos:IOType": "1"}, "fake_qos_info": {"NAME": "fake_qos", "MAXIOPS": "100", "IOTYPE": "1", "LATENCY": "0"}}, {"fake_extra_specs_qos": {"qos:minIOPS": "100", "qos:minBandWidth": "50", 'qos:latency': "50", "qos:IOType": "0"}, "fake_qos_info": {"MINIOPS": "100", "MINBANDWIDTH": "50", "IOTYPE": "0", "LATENCY": "50", "NAME": "OpenStack_fake_qos"}}) @ddt.unpack def test_create_share_with_qos(self, fake_extra_specs_qos, fake_qos_info): fake_share_type_id = 'fooid-2' fake_extra_specs = {"capabilities:qos": " True"} fake_extra_specs.update(fake_extra_specs_qos) fake_type_error_extra = { 'test_with_extra': { 'created_at': 'fake_time', 'deleted': '0', 'deleted_at': None, 'extra_specs': fake_extra_specs, 'required_extra_specs': {}, 'id': fake_share_type_id, 'name': 'test_with_extra', 'updated_at': None } } fake_qos_info_respons = { "error": { "code": 0 }, "data": [{ "ID": "11", "FSLIST": u'["1", "2", "3", "4"]', "LUNLIST": '[""]', "RUNNINGSTATUS": "2", }] } fake_qos_info_respons["data"][0].update(fake_qos_info) share_type = fake_type_error_extra['test_with_extra'] self.mock_object(db, 'share_type_get', mock.Mock(return_value=share_type)) self.mock_object(helper.RestHelper, 'get_qos', mock.Mock(return_value=fake_qos_info_respons)) self.driver.plugin.configuration.manila_huawei_conf_file = ( self.fake_conf_file) self.driver.plugin.helper.login() location = self.driver.create_share(self._context, self.share_nfs, self.share_server) self.assertEqual("100.115.10.68:/share_fake_uuid", location) @ddt.data({'capabilities:qos': ' True', 'qos:maxIOPS': -1}, {'capabilities:qos': ' True', 'qos:IOTYPE': 4}, {'capabilities:qos': ' True', 'qos:IOTYPE': 100}, {'capabilities:qos': ' True', 'qos:maxIOPS': 0}, {'capabilities:qos': ' True', 'qos:minIOPS': 0}, {'capabilities:qos': ' True', 'qos:minBandWidth': 0}, {'capabilities:qos': ' True', 'qos:maxBandWidth': 0}, {'capabilities:qos': ' True', 'qos:latency': 0}, {'capabilities:qos': ' True', 'qos:maxIOPS': 100}, {'capabilities:qos': ' True', 'qos:maxIOPS': 100, 'qos:minBandWidth': 100, 'qos:IOType': '0'}) def test_create_share_with_invalid_qos(self, fake_extra_specs): fake_share_type_id = 'fooid-2' fake_type_error_extra = { 'test_with_extra': { 'created_at': 'fake_time', 'deleted': '0', 'deleted_at': None, 'extra_specs': fake_extra_specs, 'required_extra_specs': {}, 'id': fake_share_type_id, 'name': 'test_with_extra', 'updated_at': None } } share_type = fake_type_error_extra['test_with_extra'] self.mock_object(db, 'share_type_get', mock.Mock(return_value=share_type)) self.driver.plugin.configuration.manila_huawei_conf_file = ( self.fake_conf_file) self.driver.plugin.helper.login() self.assertRaises(exception.InvalidShare, self.driver.create_share, self._context, self.share_nfs, self.share_server) def test_create_share_cache_not_exist(self): self.driver.plugin.helper.cache_exist = False share_type = self.fake_type_w_extra['test_with_extra'] self.mock_object(db, 'share_type_get', mock.Mock(return_value=share_type)) self.driver.plugin.helper.login() self.assertRaises(exception.InvalidShare, self.driver.create_share, self._context, self.share_nfs, self.share_server) def test_add_share_to_cache_fail(self): opts = dict( huawei_smartcache='true', cachename=None, ) fsid = 4 smartcache = smartx.SmartCache(self.driver.plugin.helper) self.assertRaises(exception.InvalidInput, smartcache.add, opts, fsid) def test_create_share_partition_not_exist(self): self.driver.plugin.helper.partition_exist = False share_type = self.fake_type_w_extra['test_with_extra'] self.mock_object(db, 'share_type_get', mock.Mock(return_value=share_type)) self.driver.plugin.helper.login() self.assertRaises(exception.InvalidShare, self.driver.create_share, self._context, self.share_nfs, self.share_server) def test_add_share_to_partition_fail(self): opts = dict( huawei_smartpartition='true', partitionname=None, ) fsid = 4 smartpartition = smartx.SmartPartition(self.driver.plugin.helper) self.assertRaises(exception.InvalidInput, smartpartition.add, opts, fsid) def test_login_fail(self): self.driver.plugin.helper.test_normal = False self.assertRaises(exception.InvalidShare, self.driver.plugin.helper.login) def test_create_share_nfs_fs_fail(self): self.driver.plugin.helper.login() self.driver.plugin.helper.test_normal = False self.assertRaises(exception.InvalidShare, self.driver.create_share, self._context, self.share_nfs, self.share_server) def test_create_share_nfs_status_fail(self): self.driver.plugin.helper.login() self.driver.plugin.helper.fs_status_flag = False self.assertRaises(exception.InvalidShare, self.driver.create_share, self._context, self.share_nfs, self.share_server) def test_create_share_cifs_fs_fail(self): self.driver.plugin.helper.login() self.driver.plugin.helper.test_normal = False self.assertRaises(exception.InvalidShare, self.driver.create_share, self._context, self.share_cifs, self.share_server) def test_create_share_cifs_fail(self): self.driver.plugin.helper.login() self.driver.plugin.helper.create_share_flag = True self.assertRaises(exception.InvalidShare, self.driver.create_share, self._context, self.share_cifs, self.share_server) def test_create_share_nfs_fail(self): self.driver.plugin.helper.login() self.driver.plugin.helper.create_share_flag = True self.assertRaises(exception.InvalidShare, self.driver.create_share, self._context, self.share_nfs, self.share_server) @ddt.data({"share_proto": "NFS", "fake_qos_info_respons": {"ID": "11", "MAXIOPS": "100", "IOType": "2", "FSLIST": u'["0", "1", "4"]'}}, {"share_proto": "CIFS", "fake_qos_info_respons": {"ID": "11", "MAXIOPS": "100", "IOType": "2", "FSLIST": u'["4"]', "RUNNINGSTATUS": "2"}}) @ddt.unpack def test_delete_share_success(self, share_proto, fake_qos_info_respons): self.driver.plugin.helper.login() self.driver.plugin.helper.delete_flag = False if share_proto == 'NFS': share = self.share_nfs else: share = self.share_cifs with mock.patch.object(helper.RestHelper, 'get_qos_info', return_value=fake_qos_info_respons): self.driver.delete_share(self._context, share, self.share_server) self.assertTrue(self.driver.plugin.helper.delete_flag) def test_delete_share_withoutqos_success(self): self.driver.plugin.helper.login() self.driver.plugin.helper.delete_flag = False self.driver.plugin.qos_support = True self.driver.delete_share(self._context, self.share_nfs, self.share_server) self.assertTrue(self.driver.plugin.helper.delete_flag) def test_check_snapshot_id_exist_fail(self): snapshot_id = "4@share_snapshot_not_exist" self.driver.plugin.helper.login() self.driver.plugin.helper.test_normal = False snapshot_info = self.driver.plugin.helper._get_snapshot_by_id( snapshot_id) self.assertRaises(exception.InvalidShareSnapshot, self.driver.plugin.helper._check_snapshot_id_exist, snapshot_info) def test_delete_share_nfs_fail_not_exist(self): self.driver.plugin.helper.login() self.driver.plugin.helper.delete_flag = False self.driver.plugin.helper.share_exist = False self.driver.delete_share(self._context, self.share_nfs, self.share_server) self.assertTrue(self.driver.plugin.helper.delete_flag) def test_delete_share_cifs_success(self): self.driver.plugin.helper.delete_flag = False fake_qos_info_respons = { "ID": "11", "FSLIST": u'["1", "2", "3", "4"]', "LUNLIST": '[""]', "RUNNINGSTATUS": "2", } self.mock_object(helper.RestHelper, 'get_qos_info', mock.Mock(return_value=fake_qos_info_respons)) self.driver.plugin.helper.login() self.driver.delete_share(self._context, self.share_cifs, self.share_server) self.assertTrue(self.driver.plugin.helper.delete_flag) def test_get_network_allocations_number_dhss_true(self): self.configuration.driver_handles_share_servers = True number = self.driver.get_network_allocations_number() self.assertEqual(1, number) def test_get_network_allocations_number_dhss_false(self): self.configuration.driver_handles_share_servers = False number = self.driver.get_network_allocations_number() self.assertEqual(0, number) def test_create_nfsshare_from_nfssnapshot_success(self): share_type = self.fake_type_not_extra['test_with_extra'] self.mock_object(db, 'share_type_get', mock.Mock(return_value=share_type)) self.mock_object(self.driver.plugin, 'mount_share_to_host', mock.Mock(return_value={})) self.mock_object(self.driver.plugin, 'copy_snapshot_data', mock.Mock(return_value=True)) self.mock_object(self.driver.plugin, 'umount_share_from_host', mock.Mock(return_value={})) self.driver.plugin.helper.login() self.driver.plugin.helper.snapshot_flag = True location = self.driver.create_share_from_snapshot(self._context, self.share_nfs, self.nfs_snapshot, self.share_server) self.assertTrue(db.share_type_get.called) self.assertEqual(2, self.driver.plugin. mount_share_to_host.call_count) self.assertTrue(self.driver.plugin. copy_snapshot_data.called) self.assertEqual(2, self.driver.plugin. umount_share_from_host.call_count) self.assertEqual("100.115.10.68:/share_fake_uuid", location) def test_create_cifsshare_from_cifssnapshot_success(self): share_type = self.fake_type_not_extra['test_with_extra'] self.mock_object(db, 'share_type_get', mock.Mock(return_value=share_type)) self.mock_object(self.driver.plugin, 'mount_share_to_host', mock.Mock(return_value={})) self.mock_object(self.driver.plugin, 'copy_snapshot_data', mock.Mock(return_value=True)) self.mock_object(self.driver.plugin, 'umount_share_from_host', mock.Mock(return_value={})) self.driver.plugin.helper.login() self.driver.plugin.helper.snapshot_flag = True location = self.driver.create_share_from_snapshot(self._context, self.share_cifs, self.cifs_snapshot, self.share_server) self.assertTrue(db.share_type_get.called) self.assertEqual(2, self.driver.plugin. mount_share_to_host.call_count) self.assertTrue(self.driver.plugin. copy_snapshot_data.called) self.assertEqual(2, self.driver.plugin. umount_share_from_host.call_count) self.assertEqual("\\\\100.115.10.68\\share_fake_uuid", location) def test_create_nfsshare_from_cifssnapshot_success(self): share_type = self.fake_type_not_extra['test_with_extra'] self.mock_object(db, 'share_type_get', mock.Mock(return_value=share_type)) self.mock_object(self.driver.plugin, '_get_access_id', mock.Mock(return_value={})) self.mock_object(self.driver.plugin, 'mount_share_to_host', mock.Mock(return_value={})) self.mock_object(self.driver.plugin, 'copy_snapshot_data', mock.Mock(return_value=True)) self.mock_object(self.driver.plugin, 'umount_share_from_host', mock.Mock(return_value={})) self.driver.plugin.helper.login() self.driver.plugin.helper.access_id = None self.driver.plugin.helper.snapshot_flag = True location = self.driver.create_share_from_snapshot(self._context, self.share_nfs, self.cifs_snapshot, self.share_server) self.assertTrue(db.share_type_get.called) self.assertTrue(self.driver.plugin. _get_access_id.called) self.assertEqual(2, self.driver.plugin. mount_share_to_host.call_count) self.assertTrue(self.driver.plugin. copy_snapshot_data.called) self.assertEqual(2, self.driver.plugin. umount_share_from_host.call_count) self.assertEqual("100.115.10.68:/share_fake_uuid", location) def test_create_cifsshare_from_nfssnapshot_success(self): share_type = self.fake_type_not_extra['test_with_extra'] self.mock_object(db, 'share_type_get', mock.Mock(return_value=share_type)) self.mock_object(self.driver.plugin, '_get_access_id', mock.Mock(return_value={})) self.mock_object(utils, 'execute', mock.Mock(return_value=("", ""))) self.driver.plugin.helper.login() self.driver.plugin.helper.snapshot_flag = True location = self.driver.create_share_from_snapshot(self._context, self.share_cifs, self.nfs_snapshot, self.share_server) self.assertTrue(db.share_type_get.called) self.assertTrue(self.driver.plugin. _get_access_id.called) self.assertEqual(7, utils.execute.call_count) self.assertEqual("\\\\100.115.10.68\\share_fake_uuid", location) def test_create_share_from_snapshot_nonefs(self): self.driver.plugin.helper.login() self.mock_object(self.driver.plugin.helper, 'get_fsid_by_name', mock.Mock(return_value={})) self.assertRaises(exception.StorageResourceNotFound, self.driver.create_share_from_snapshot, self._context, self.share_nfs, self.nfs_snapshot, self.share_server) self.assertTrue(self.driver.plugin.helper. get_fsid_by_name.called) def test_create_share_from_notexistingsnapshot_fail(self): self.driver.plugin.helper.login() self.driver.plugin.helper.snapshot_flag = False self.assertRaises(exception.ShareSnapshotNotFound, self.driver.create_share_from_snapshot, self._context, self.share_nfs, self.nfs_snapshot, self.share_server) def test_create_share_from_share_fail(self): self.driver.plugin.helper.login() self.driver.plugin.helper.snapshot_flag = True self.mock_object(self.driver.plugin, 'check_fs_status', mock.Mock(return_value={})) self.assertRaises(exception.StorageResourceException, self.driver.create_share_from_snapshot, self._context, self.share_nfs, self.nfs_snapshot, self.share_server) self.assertTrue(self.driver.plugin.check_fs_status.called) def test_create_share_from_snapshot_share_error(self): self.mock_object(self.driver.plugin, '_get_share_proto', mock.Mock(return_value={})) self.driver.plugin.helper.login() self.driver.plugin.helper.snapshot_flag = True self.assertRaises(exception.ShareResourceNotFound, self.driver.create_share_from_snapshot, self._context, self.share_nfs, self.nfs_snapshot, self.share_server) self.assertTrue(self.driver.plugin. _get_share_proto.called) def test_create_share_from_snapshot_allow_oldaccess_fail(self): share_type = self.fake_type_not_extra['test_with_extra'] self.mock_object(db, 'share_type_get', mock.Mock(return_value=share_type)) self.mock_object(self.driver.plugin, '_get_share_proto', mock.Mock(return_value='NFS')) self.mock_object(self.driver.plugin, '_get_access_id', mock.Mock(return_value={})) self.mock_object(self.driver.plugin.helper, '_get_share_by_name', mock.Mock(return_value={})) self.driver.plugin.helper.login() self.driver.plugin.helper.snapshot_flag = True self.assertRaises(exception.ShareResourceNotFound, self.driver.create_share_from_snapshot, self._context, self.share_nfs, self.nfs_snapshot, self.share_server) self.assertTrue(db.share_type_get.called) self.assertTrue(self.driver.plugin._get_share_proto.called) self.assertTrue(self.driver.plugin._get_access_id.called) self.assertTrue(self.driver.plugin.helper._get_share_by_name.called) def test_create_share_from_snapshot_mountshare_fail(self): share_type = self.fake_type_not_extra['test_with_extra'] self.mock_object(db, 'share_type_get', mock.Mock(return_value=share_type)) self.mock_object(self.driver.plugin, 'mount_share_to_host', mock.Mock(side_effect=exception. ShareMountException('err'))) self.driver.plugin.helper.login() self.driver.plugin.helper.snapshot_flag = True self.assertRaises(exception.ShareMountException, self.driver.create_share_from_snapshot, self._context, self.share_nfs, self.nfs_snapshot, self.share_server) self.assertTrue(db.share_type_get.called) self.assertEqual(1, self.driver.plugin. mount_share_to_host.call_count) def test_create_share_from_snapshot_allow_newaccess_fail(self): share_type = self.fake_type_not_extra['test_with_extra'] self.mock_object(db, 'share_type_get', mock.Mock(return_value=share_type)) self.mock_object(self.driver.plugin, '_get_share_proto', mock.Mock(return_value='NFS')) self.mock_object(self.driver.plugin, '_get_access_id', mock.Mock(return_value='5')) self.mock_object(self.driver.plugin, 'mount_share_to_host', mock.Mock(return_value={})) self.mock_object(self.driver.plugin.helper, '_get_share_by_name', mock.Mock(return_value={})) self.mock_object(self.driver.plugin, 'umount_share_from_host', mock.Mock(return_value={})) self.driver.plugin.helper.login() self.driver.plugin.helper.snapshot_flag = True self.assertRaises(exception.ShareResourceNotFound, self.driver.create_share_from_snapshot, self._context, self.share_nfs, self.nfs_snapshot, self.share_server) self.assertTrue(db.share_type_get.called) self.assertTrue(self.driver.plugin._get_share_proto.called) self.assertTrue(self.driver.plugin._get_access_id.called) self.assertEqual(1, self.driver.plugin. mount_share_to_host.call_count) self.assertTrue(self.driver.plugin.helper. _get_share_by_name.called) self.assertEqual(1, self.driver.plugin. umount_share_from_host.call_count) def test_create_nfsshare_from_nfssnapshot_copydata_fail(self): share_type = self.fake_type_not_extra['test_with_extra'] self.mock_object(db, 'share_type_get', mock.Mock(return_value=share_type)) self.mock_object(self.driver.plugin, 'mount_share_to_host', mock.Mock(return_value={})) self.mock_object(data_utils, 'Copy', mock.Mock(side_effect=Exception('err'))) self.mock_object(utils, 'execute', mock.Mock(return_value={})) self.driver.plugin.helper.login() self.driver.plugin.helper.snapshot_flag = True self.assertRaises(exception.ShareCopyDataException, self.driver.create_share_from_snapshot, self._context, self.share_nfs, self.nfs_snapshot, self.share_server) self.assertTrue(db.share_type_get.called) self.assertEqual(2, self.driver.plugin. mount_share_to_host.call_count) self.assertTrue(data_utils.Copy.called) self.assertEqual(2, utils.execute.call_count) def test_create_nfsshare_from_nfssnapshot_umountshare_fail(self): share_type = self.fake_type_not_extra['test_with_extra'] self.mock_object(db, 'share_type_get', mock.Mock(return_value=share_type)) self.mock_object(self.driver.plugin, 'mount_share_to_host', mock.Mock(return_value={})) self.mock_object(self.driver.plugin, 'copy_snapshot_data', mock.Mock(return_value=True)) self.mock_object(self.driver.plugin, 'umount_share_from_host', mock.Mock(side_effect=exception. ShareUmountException('err'))) self.mock_object(os, 'rmdir', mock.Mock(side_effect=Exception('err'))) self.driver.plugin.helper.login() self.driver.plugin.helper.snapshot_flag = True location = self.driver.create_share_from_snapshot(self._context, self.share_nfs, self.cifs_snapshot, self.share_server) self.assertTrue(db.share_type_get.called) self.assertEqual(2, self.driver.plugin. mount_share_to_host.call_count) self.assertTrue(self.driver.plugin.copy_snapshot_data.called) self.assertEqual(2, self.driver.plugin. umount_share_from_host.call_count) self.assertTrue(os.rmdir.called) self.assertEqual("100.115.10.68:/share_fake_uuid", location) def test_get_share_stats_refresh_pool_not_exist(self): self.recreate_fake_conf_file(pool_node_flag=False) self.driver.plugin.configuration.manila_huawei_conf_file = ( self.fake_conf_file) self.assertRaises(exception.InvalidInput, self.driver._update_share_stats) @ddt.data({"snapshot_support": True, "replication_support": False}, {"snapshot_support": False, "replication_support": True}) @ddt.unpack def test_get_share_stats_refresh(self, snapshot_support, replication_support): self.recreate_fake_conf_file(snapshot_support=snapshot_support, replication_support=replication_support) self.driver.plugin.configuration.manila_huawei_conf_file = ( self.fake_conf_file) self.driver.plugin._setup_conf() self.driver._update_share_stats() expected = { "share_backend_name": "fake_share_backend_name", "driver_handles_share_servers": False, "vendor_name": "Huawei", "driver_version": "1.3", "storage_protocol": "NFS_CIFS", "reserved_percentage": 0, 'reserved_snapshot_percentage': 0, 'reserved_share_extend_percentage': 0, "total_capacity_gb": 0.0, "free_capacity_gb": 0.0, "qos": True, "snapshot_support": snapshot_support, "create_share_from_snapshot_support": snapshot_support, "revert_to_snapshot_support": snapshot_support, "mount_snapshot_support": False, "replication_domain": None, "filter_function": None, "goodness_function": None, 'mount_point_name_support': False, "pools": [], "share_group_stats": {"consistent_snapshot_support": None}, "ipv4_support": True, "ipv6_support": False, "security_service_update_support": False, "share_server_multiple_subnet_support": False, "network_allocation_update_support": False, "share_replicas_migration_support": False, 'encryption_support': None, } if replication_support: expected['replication_type'] = 'dr' pool = dict( pool_name='OpenStack_Pool', total_capacity_gb=2.0, free_capacity_gb=1.0, allocated_capacity_gb=1.0, qos=True, reserved_percentage=0, reserved_snapshot_percentage=0, reserved_share_extend_percentage=0, compression=[True, False], dedupe=[True, False], max_over_subscription_ratio=1, provisioned_capacity_gb=1.0, thin_provisioning=[True, False], huawei_smartcache=[True, False], huawei_smartpartition=[True, False], huawei_sectorsize=[True, False], huawei_disk_type='ssd', ) expected["pools"].append(pool) self.assertEqual(expected, self.driver._stats) @ddt.data({'TIER0CAPACITY': '100', 'TIER1CAPACITY': '0', 'TIER2CAPACITY': '0', 'disktype': 'ssd'}, {'TIER0CAPACITY': '0', 'TIER1CAPACITY': '100', 'TIER2CAPACITY': '0', 'disktype': 'sas'}, {'TIER0CAPACITY': '0', 'TIER1CAPACITY': '0', 'TIER2CAPACITY': '100', 'disktype': 'nl_sas'}, {'TIER0CAPACITY': '100', 'TIER1CAPACITY': '100', 'TIER2CAPACITY': '100', 'disktype': 'mix'}, {'TIER0CAPACITY': '0', 'TIER1CAPACITY': '0', 'TIER2CAPACITY': '0', 'disktype': ''}) def test_get_share_stats_disk_type(self, disk_type_value): self.driver.plugin.helper.login() storage_pool_info = {"error": {"code": 0}, "data": [{"USERFREECAPACITY": "2097152", "ID": "1", "NAME": "OpenStack_Pool", "USERTOTALCAPACITY": "4194304", "USAGETYPE": "2", "USERCONSUMEDCAPACITY": "2097152"}]} storage_pool_info['data'][0]['TIER0CAPACITY'] = ( disk_type_value['TIER0CAPACITY']) storage_pool_info['data'][0]['TIER1CAPACITY'] = ( disk_type_value['TIER1CAPACITY']) storage_pool_info['data'][0]['TIER2CAPACITY'] = ( disk_type_value['TIER2CAPACITY']) self.mock_object(self.driver.plugin.helper, '_find_all_pool_info', mock.Mock(return_value=storage_pool_info)) self.driver._update_share_stats() if disk_type_value['disktype']: self.assertEqual( disk_type_value['disktype'], self.driver._stats['pools'][0]['huawei_disk_type']) else: self.assertIsNone( self.driver._stats['pools'][0].get('huawei_disk_type')) def test_get_disk_type_pool_info_none(self): self.driver.plugin.helper.login() self.mock_object(self.driver.plugin.helper, '_find_pool_info', mock.Mock(return_value=None)) self.assertRaises(exception.InvalidInput, self.driver._update_share_stats) def test_allow_access_proto_fail(self): self.driver.plugin.helper.login() self.assertRaises(exception.InvalidInput, self.driver.allow_access, self._context, self.share_proto_fail, self.access_ip, self.share_server) def test_allow_access_ip_rw_success(self): self.driver.plugin.helper.login() self.allow_flag = False self.allow_rw_flag = False self.driver.allow_access(self._context, self.share_nfs, self.access_ip, self.share_server) self.assertTrue(self.driver.plugin.helper.allow_flag) self.assertTrue(self.driver.plugin.helper.allow_rw_flag) def test_allow_access_ip_ro_success(self): access_ro = { 'access_type': 'ip', 'access_to': '1.2.3.4', 'access_level': 'ro', } self.driver.plugin.helper.login() self.allow_flag = False self.allow_ro_flag = False self.driver.allow_access(self._context, self.share_nfs, access_ro, self.share_server) self.assertTrue(self.driver.plugin.helper.allow_flag) self.assertTrue(self.driver.plugin.helper.allow_ro_flag) def test_allow_access_nfs_user_success(self): self.driver.plugin.helper.login() self.allow_flag = False self.allow_rw_flag = False self.driver.allow_access(self._context, self.share_nfs, self.access_user, self.share_server) self.assertTrue(self.driver.plugin.helper.allow_flag) self.assertTrue(self.driver.plugin.helper.allow_rw_flag) @ddt.data( { 'access_type': 'user', 'access_to': 'user_name', 'access_level': 'rw', }, { 'access_type': 'user', 'access_to': 'group_name', 'access_level': 'rw', }, { 'access_type': 'user', 'access_to': 'domain\\user_name', 'access_level': 'rw', }, { 'access_type': 'user', 'access_to': 'domain\\group_name', 'access_level': 'rw', }, ) def test_allow_access_cifs_rw_success(self, access_user): self.driver.plugin.helper.login() self.allow_flag = False self.allow_rw_flag = False self.driver.allow_access(self._context, self.share_cifs, access_user, self.share_server) self.assertTrue(self.driver.plugin.helper.allow_flag) self.assertTrue(self.driver.plugin.helper.allow_rw_flag) def test_allow_access_cifs_user_ro_success(self): access_ro = { 'access_type': 'user', 'access_to': 'user_name', 'access_level': 'ro', } self.driver.plugin.helper.login() self.allow_flag = False self.allow_ro_flag = False self.driver.allow_access(self._context, self.share_cifs, access_ro, self.share_server) self.assertTrue(self.driver.plugin.helper.allow_flag) self.assertTrue(self.driver.plugin.helper.allow_ro_flag) def test_allow_access_level_fail(self): access_fail = { 'access_type': 'user', 'access_to': 'user_name', 'access_level': 'fail', } self.driver.plugin.helper.login() self.assertRaises(exception.InvalidShareAccess, self.driver.allow_access, self._context, self.share_cifs, access_fail, self.share_server) def test_update_access_add_delete(self): self.driver.plugin.helper.login() self.allow_flag = False self.allow_rw_flag = False self.deny_flag = False add_rules = [self.access_ip] delete_rules = [self.access_ip_exist] self.driver.update_access(self._context, self.share_nfs, None, add_rules, delete_rules, self.share_server) self.assertTrue(self.driver.plugin.helper.allow_flag) self.assertTrue(self.driver.plugin.helper.allow_rw_flag) self.assertTrue(self.driver.plugin.helper.deny_flag) def test_update_access_nfs(self): self.driver.plugin.helper.login() self.allow_flag = False self.allow_rw_flag = False rules = [self.access_ip, self.access_ip_exist] self.driver.update_access(self._context, self.share_nfs, rules, None, None, self.share_server) self.assertTrue(self.driver.plugin.helper.allow_flag) self.assertTrue(self.driver.plugin.helper.allow_rw_flag) def test_update_access_cifs(self): self.driver.plugin.helper.login() self.allow_flag = False self.allow_rw_flag = False rules = [self.access_user, self.access_user_exist] self.driver.update_access(self._context, self.share_cifs, rules, None, None, self.share_server) self.assertTrue(self.driver.plugin.helper.allow_flag) self.assertTrue(self.driver.plugin.helper.allow_rw_flag) def test_update_access_rules_share_not_exist(self): self.driver.plugin.helper.login() rules = [self.access_ip] self.driver.plugin.helper.share_exist = False self.assertRaises(exception.ShareResourceNotFound, self.driver.update_access, self._context, self.share_nfs, rules, None, None, self.share_server) @ddt.data(True, False) def test_nfs_access_for_all_ip_addresses(self, is_allow): access_all = { 'access_type': 'ip', 'access_to': '0.0.0.0/0', 'access_level': 'rw', } self.driver.plugin.helper.login() method = (self.driver.allow_access if is_allow else self.driver.deny_access) with mock.patch.object(self.driver.plugin.helper, '_get_access_from_share') as mock_call: mock_call.return_value = None method(self._context, self.share_nfs, access_all, self.share_server) mock_call.assert_called_with('1', '*', 'NFS') def test_get_share_client_type_fail(self): share_proto = 'fake_proto' self.assertRaises(exception.InvalidInput, self.driver.plugin.helper._get_share_client_type, share_proto) @ddt.data("NFS", "CIFS") def test_get_share_url_type(self, share_proto): share_url_type = self.driver.plugin.helper._get_share_url_type( share_proto) self.assertEqual(share_proto + 'HARE', share_url_type) def test_get_location_path_fail(self): share_name = 'share-fake-uuid' share_proto = 'fake_proto' self.assertRaises(exception.InvalidShareAccess, self.driver.plugin._get_location_path, share_name, share_proto) def test_allow_access_nfs_fail(self): self.driver.plugin.helper.login() self.assertRaises(exception.InvalidShareAccess, self.driver.allow_access, self._context, self.share_nfs, self.access_cert, self.share_server) def test_allow_access_cifs_fail(self): self.driver.plugin.helper.login() self.assertRaises(exception.InvalidShareAccess, self.driver.allow_access, self._context, self.share_cifs, self.access_ip, self.share_server) def test_deny_access_nfs_fail(self): self.driver.plugin.helper.login() result = self.driver.deny_access(self._context, self.share_nfs, self.access_cert, self.share_server) self.assertIsNone(result) def test_deny_access_not_exist_fail(self): self.driver.plugin.helper.login() access_ip_not_exist = { 'access_type': 'ip', 'access_to': '100.112.0.99', 'access_level': 'rw', } result = self.driver.deny_access(self._context, self.share_nfs, access_ip_not_exist, self.share_server) self.assertIsNone(result) def test_deny_access_cifs_fail(self): self.driver.plugin.helper.login() result = self.driver.deny_access(self._context, self.share_cifs, self.access_ip, self.share_server) self.assertIsNone(result) def test_allow_access_ip_share_not_exist(self): self.driver.plugin.helper.login() self.driver.plugin.helper.share_exist = False self.assertRaises(exception.ShareResourceNotFound, self.driver.allow_access, self._context, self.share_nfs, self.access_ip, self.share_server) def test_deny_access_ip_share_not_exist(self): self.driver.plugin.helper.login() self.driver.plugin.helper.share_exist = False self.driver.deny_access(self._context, self.share_nfs, self.access_ip, self.share_server) def test_allow_access_ip_fail(self): self.driver.plugin.helper.login() self.driver.plugin.helper.test_normal = False self.assertRaises(exception.InvalidShare, self.driver.allow_access, self._context, self.share_nfs, self.access_ip, self.share_server) def test_allow_access_user_fail(self): self.driver.plugin.helper.login() self.driver.plugin.helper.test_normal = False self.assertRaises(exception.InvalidShare, self.driver.allow_access, self._context, self.share_cifs, self.access_user, self.share_server) def test_deny_access_ip_success(self): self.driver.plugin.helper.login() self.deny_flag = False self.driver.deny_access(self._context, self.share_nfs, self.access_ip_exist, self.share_server) self.assertTrue(self.driver.plugin.helper.deny_flag) def test_deny_access_user_success(self): self.driver.plugin.helper.login() self.deny_flag = False self.driver.deny_access(self._context, self.share_cifs, self.access_user_exist, self.share_server) self.assertTrue(self.driver.plugin.helper.deny_flag) def test_deny_access_ip_fail(self): self.driver.plugin.helper.login() self.driver.plugin.helper.test_normal = False self.assertRaises(exception.InvalidShare, self.driver.deny_access, self._context, self.share_nfs, self.access_ip, self.share_server) def test_deny_access_user_fail(self): self.driver.plugin.helper.login() self.driver.plugin.helper.test_normal = False self.assertRaises(exception.InvalidShare, self.driver.deny_access, self._context, self.share_cifs, self.access_user, self.share_server) def test_create_nfs_snapshot_success(self): self.driver.plugin.helper.login() self.driver.plugin.helper.create_snapflag = False self.driver.create_snapshot(self._context, self.nfs_snapshot, self.share_server) self.assertTrue(self.driver.plugin.helper.create_snapflag) def test_create_nfs_snapshot_share_not_exist(self): self.driver.plugin.helper.login() self.driver.plugin.helper.share_exist = False self.assertRaises(exception.InvalidInput, self.driver.create_snapshot, self._context, self.nfs_snapshot, self.share_server) def test_create_cifs_snapshot_success(self): self.driver.plugin.helper.login() self.driver.plugin.helper.create_snapflag = False self.driver.create_snapshot(self._context, self.cifs_snapshot, self.share_server) self.assertTrue(self.driver.plugin.helper.create_snapflag) def test_delete_snapshot_success(self): self.driver.plugin.helper.login() self.driver.plugin.helper.delete_flag = False self.driver.plugin.helper.snapshot_flag = True self.driver.delete_snapshot(self._context, self.nfs_snapshot, self.share_server) self.assertTrue(self.driver.plugin.helper.delete_flag) def test_delete_snapshot_not_exist_success(self): self.driver.plugin.helper.login() self.driver.plugin.helper.delete_flag = False self.driver.plugin.helper.snapshot_flag = False self.driver.delete_snapshot(self._context, self.nfs_snapshot, self.share_server) self.assertTrue(self.driver.plugin.helper.delete_flag) def test_create_nfs_snapshot_fail(self): self.driver.plugin.helper.login() self.driver.plugin.helper.test_normal = False self.assertRaises(exception.InvalidShare, self.driver.create_snapshot, self._context, self.nfs_snapshot, self.share_server) def test_create_cifs_snapshot_fail(self): self.driver.plugin.helper.login() self.driver.plugin.helper.test_normal = False self.assertRaises(exception.InvalidShare, self.driver.create_snapshot, self._context, self.cifs_snapshot, self.share_server) def test_delete_nfs_snapshot_fail(self): self.driver.plugin.helper.login() self.driver.plugin.helper.test_normal = False self.assertRaises(exception.InvalidShare, self.driver.delete_snapshot, self._context, self.nfs_snapshot, self.share_server) def test_delete_cifs_snapshot_fail(self): self.driver.plugin.helper.login() self.driver.plugin.helper.test_normal = False self.assertRaises(exception.InvalidShare, self.driver.delete_snapshot, self._context, self.cifs_snapshot, self.share_server) @ddt.data({"share_proto": "NFS", "path": ["100.115.10.68:/share_fake_manage_uuid"]}, {"share_proto": "CIFS", "path": ["\\\\100.115.10.68\\share_fake_manage_uuid"]}) @ddt.unpack def test_manage_share_nfs_success(self, share_proto, path): if share_proto == "NFS": share = self.share_manage_nfs elif share_proto == "CIFS": share = self.share_manage_cifs share_type = self.fake_type_w_extra['test_with_extra'] self.mock_object(db, 'share_type_get', mock.Mock(return_value=share_type)) self.driver.plugin.helper.login() share_info = self.driver.manage_existing(share, self.driver_options) self.assertEqual(4, share_info["size"]) self.assertEqual(path, share_info["export_locations"]) @ddt.data({"fs_alloctype": "THIN", "path": ["100.115.10.68:/share_fake_manage_uuid"]}, {"fs_alloctype": "THICK", "path": ["100.115.10.68:/share_fake_uuid_thickfs"]}) @ddt.unpack def test_manage_share_with_default_type(self, fs_alloctype, path): if fs_alloctype == "THIN": share = self.share_manage_nfs elif fs_alloctype == "THICK": share = self.share_nfs_thick_thickfs share_type = self.fake_type_not_extra['test_with_extra'] self.mock_object(db, 'share_type_get', mock.Mock(return_value=share_type)) self.driver.plugin.helper.login() share_info = self.driver.manage_existing(share, self.driver_options) self.assertEqual(4, share_info["size"]) self.assertEqual(path, share_info["export_locations"]) @ddt.data({"path": ["100.115.10.68:/share_fake_uuid_inpartition"]}) @ddt.unpack def test_manage_share_remove_from_partition(self, path): share = self.share_nfs_inpartition share_type = self.fake_type_fake_extra['test_with_extra'] self.mock_object(db, 'share_type_get', mock.Mock(return_value=share_type)) self.driver.plugin.helper.login() share_info = self.driver.manage_existing(share, self.driver_options) self.assertEqual(4, share_info["size"]) self.assertEqual(path, share_info["export_locations"]) @ddt.data({"flag": "share_not_exist", "exc": exception.InvalidShare}, {"flag": "fs_status_error", "exc": exception.InvalidShare}, {"flag": "poolname_not_match", "exc": exception.InvalidHost}) @ddt.unpack def test_manage_share_fail(self, flag, exc): share = None if flag == "share_not_exist": self.driver.plugin.helper.share_exist = False share = self.share_nfs elif flag == "fs_status_error": self.driver.plugin.helper.fs_status_flag = False share = self.share_nfs elif flag == "poolname_not_match": share = self.share_pool_name_not_match self.driver.plugin.helper.login() share_type = self.fake_type_extra['test_with_extra'] self.mock_object(db, 'share_type_get', mock.Mock(return_value=share_type)) self.assertRaises(exc, self.driver.manage_existing, share, self.driver_options) def test_manage_share_thickfs_set_dedupe_fail(self): share = self.share_nfs_thick_thickfs self.driver.plugin.helper.login() share_type = self.fake_type_thin_extra['test_with_extra'] self.mock_object(db, 'share_type_get', mock.Mock(return_value=share_type)) self.assertRaises(exception.InvalidInput, self.driver.manage_existing, share, self.driver_options) def test_manage_share_thickfs_not_match_thinpool_fail(self): share = self.share_nfs_thickfs self.driver.plugin.helper.login() share_type = self.fake_type_extra['test_with_extra'] self.mock_object(db, 'share_type_get', mock.Mock(return_value=share_type)) self.assertRaises(exception.InvalidHost, self.driver.manage_existing, share, self.driver_options) @ddt.data({"flag": "old_cache_id", "exc": exception.InvalidInput}, {"flag": "not_old_cache_id", "exc": exception.InvalidInput}) @ddt.unpack def test_manage_share_cache_not_exist(self, flag, exc): share = None if flag == "old_cache_id": share = self.share_nfs_inpartition elif flag == "not_old_cache_id": share = self.share_nfs self.driver.plugin.helper.cache_exist = False share_type = self.fake_type_w_extra['test_with_extra'] self.mock_object(db, 'share_type_get', mock.Mock(return_value=share_type)) self.driver.plugin.helper.login() self.assertRaises(exc, self.driver.manage_existing, share, self.share_server) def test_manage_add_share_to_cache_fail(self): opts = dict( huawei_smartcache='true', huawei_smartpartition='true', cachename='test_cache_name_fake', partitionname='test_partition_name_fake', ) fs = dict( SMARTCACHEID='6', SMARTPARTITIONID=None, ) poolinfo = dict( type='Thin', ) self.assertRaises(exception.InvalidInput, self.driver.plugin.check_retype_change_opts, opts, poolinfo, fs) def test_manage_notsetcache_fail(self): opts = dict( huawei_smartcache='true', huawei_smartpartition='true', cachename=None, partitionname='test_partition_name_fake', ) fs = dict( SMARTCACHEID='6', SMARTPARTITIONID='6', ) poolinfo = dict( type='Thin', ) self.assertRaises(exception.InvalidInput, self.driver.plugin.check_retype_change_opts, opts, poolinfo, fs) @ddt.data({"flag": "old_partition_id", "exc": exception.InvalidInput}, {"flag": "not_old_partition_id", "exc": exception.InvalidInput}) @ddt.unpack def test_manage_share_partition_not_exist(self, flag, exc): share = None if flag == "old_partition_id": share = self.share_nfs_inpartition elif flag == "not_old_partition_id": share = self.share_nfs self.driver.plugin.helper.partition_exist = False share_type = self.fake_type_w_extra['test_with_extra'] self.mock_object(db, 'share_type_get', mock.Mock(return_value=share_type)) self.driver.plugin.helper.login() self.assertRaises(exc, self.driver.manage_existing, share, self.share_server) def test_manage_add_share_to_partition_fail(self): opts = dict( huawei_smartcache='true', huawei_smartpartition='true', cachename='test_cache_name_fake', partitionname='test_partition_name_fake', ) fs = dict( SMARTCACHEID=None, SMARTPARTITIONID='6', ) poolinfo = dict( type='Thin', ) self.assertRaises(exception.InvalidInput, self.driver.plugin.check_retype_change_opts, opts, poolinfo, fs) def test_manage_notset_partition_fail(self): opts = dict( huawei_smartcache='true', huawei_smartpartition='true', cachename='test_cache_name_fake', partitionname=None, ) fs = dict( SMARTCACHEID=None, SMARTPARTITIONID='6', ) poolinfo = dict( type='Thin', ) self.assertRaises(exception.InvalidInput, self.driver.plugin.check_retype_change_opts, opts, poolinfo, fs) @ddt.data({"share_proto": "NFS", "export_path": "fake_ip:/share_fake_uuid"}, {"share_proto": "NFS", "export_path": "fake_ip:/"}, {"share_proto": "NFS", "export_path": "100.112.0.1://share_fake_uuid"}, {"share_proto": "NFS", "export_path": None}, {"share_proto": "NFS", "export_path": "\\share_fake_uuid"}, {"share_proto": "CIFS", "export_path": "\\\\fake_ip\\share_fake_uuid"}, {"share_proto": "CIFS", "export_path": "\\dd\\100.115.10.68\\share_fake_uuid"}) @ddt.unpack def test_manage_export_path_fail(self, share_proto, export_path): share_manage_nfs_export_path_fail = { 'id': 'fake_uuid', 'project_id': 'fake_tenant_id', 'display_name': 'fake', 'name': 'share-fake-manage-uuid', 'size': 1, 'share_proto': share_proto, 'share_network_id': 'fake_net_id', 'share_server_id': 'fake-share-srv-id', 'export_locations': [ {'path': export_path}, ], 'host': 'fake_host@fake_backend#OpenStack_Pool', 'share_type_id': 'fake_id' } share_type = self.fake_type_extra['test_with_extra'] self.mock_object(db, 'share_type_get', mock.Mock(return_value=share_type)) self.driver.plugin.helper.login() self.assertRaises(exception.InvalidInput, self.driver.manage_existing, share_manage_nfs_export_path_fail, self.driver_options) def test_manage_logical_port_ip_fail(self): self.recreate_fake_conf_file(logical_port="") self.driver.plugin.configuration.manila_huawei_conf_file = ( self.fake_conf_file) self.driver.plugin.helper.login() share_type = self.fake_type_extra['test_with_extra'] self.mock_object(db, 'share_type_get', mock.Mock(return_value=share_type)) self.assertRaises(exception.InvalidInput, self.driver.manage_existing, self.share_nfs, self.driver_options) @ddt.data({"share_proto": "NFS", "provider_location": "share_snapshot_fake_snapshot_uuid"}, {"share_proto": "CIFS", "provider_location": "share_snapshot_fake_snapshot_uuid"}) @ddt.unpack def test_manage_existing_snapshot_success(self, share_proto, provider_location): if share_proto == "NFS": snapshot = self.storage_nfs_snapshot elif share_proto == "CIFS": snapshot = self.storage_cifs_snapshot self.driver.plugin.helper.login() snapshot_info = self.driver.manage_existing_snapshot( snapshot, self.driver_options) self.assertEqual(provider_location, snapshot_info['provider_location']) def test_manage_existing_snapshot_share_not_exist(self): self.driver.plugin.helper.login() self.mock_object(self.driver.plugin.helper, '_get_share_by_name', mock.Mock(return_value={})) self.assertRaises(exception.InvalidShare, self.driver.manage_existing_snapshot, self.storage_nfs_snapshot, self.driver_options) def test_manage_existing_snapshot_sharesnapshot_not_exist(self): self.driver.plugin.helper.login() self.mock_object(self.driver.plugin.helper, '_check_snapshot_id_exist', mock.Mock(return_value={})) self.assertRaises(exception.ManageInvalidShareSnapshot, self.driver.manage_existing_snapshot, self.storage_nfs_snapshot, self.driver_options) def test_manage_existing_snapshot_sharesnapshot_not_normal(self): snapshot_info = {"error": {"code": 0}, "data": {"ID": "4@share_snapshot_fake_snapshot_uuid", "NAME": "share_snapshot_fake_snapshot_uuid", "HEALTHSTATUS": "2"}} self.driver.plugin.helper.login() self.mock_object(self.driver.plugin.helper, '_get_snapshot_by_id', mock.Mock(return_value=snapshot_info)) self.assertRaises(exception.ManageInvalidShareSnapshot, self.driver.manage_existing_snapshot, self.storage_nfs_snapshot, self.driver_options) def test_get_pool_success(self): self.driver.plugin.helper.login() pool_name = self.driver.get_pool(self.share_nfs_host_not_exist) self.assertEqual('OpenStack_Pool', pool_name) def test_get_pool_fail(self): self.driver.plugin.helper.login() self.driver.plugin.helper.share_exist = False pool_name = self.driver.get_pool(self.share_nfs_host_not_exist) self.assertIsNone(pool_name) def test_multi_resturls_success(self): share_type = self.fake_type_not_extra['test_with_extra'] self.mock_object(db, 'share_type_get', mock.Mock(return_value=share_type)) self.recreate_fake_conf_file(multi_url=True) self.driver.plugin.configuration.manila_huawei_conf_file = ( self.fake_conf_file) self.driver.plugin.helper.test_multi_url_flag = 2 location = self.driver.create_share(self._context, self.share_nfs, self.share_server) self.assertEqual("100.115.10.68:/share_fake_uuid", location) def test_multi_resturls_fail(self): self.recreate_fake_conf_file(multi_url=True) self.driver.plugin.configuration.manila_huawei_conf_file = ( self.fake_conf_file) self.driver.plugin.helper.test_multi_url_flag = 1 self.assertRaises(exception.InvalidShare, self.driver.create_share, self._context, self.share_nfs, self.share_server) @dec_driver_handles_share_servers def test_setup_server_success(self): backend_details = self.driver.setup_server(self.fake_network_info) fake_share_server = { 'backend_details': backend_details } share_type = self.fake_type_not_extra['test_with_extra'] self.mock_object(db, 'share_type_get', mock.Mock(return_value=share_type)) location = self.driver.create_share(self._context, self.share_nfs, fake_share_server) self.assertTrue(db.share_type_get.called) self.assertEqual((self.fake_network_allocations[0]['ip_address'] + ":/share_fake_uuid"), location) @dec_driver_handles_share_servers def test_setup_server_with_bond_port_success(self): self.recreate_fake_conf_file(logical_port='fake_bond') self.driver.plugin.configuration.manila_huawei_conf_file = ( self.fake_conf_file) backend_details = self.driver.setup_server(self.fake_network_info) fake_share_server = { 'backend_details': backend_details } share_type = self.fake_type_not_extra['test_with_extra'] self.mock_object(db, 'share_type_get', mock.Mock(return_value=share_type)) location = self.driver.create_share(self._context, self.share_nfs, fake_share_server) self.assertTrue(db.share_type_get.called) self.assertEqual((self.fake_network_allocations[0]['ip_address'] + ":/share_fake_uuid"), location) @dec_driver_handles_share_servers def test_setup_server_logical_port_exist(self): def call_logical_port_exist(*args, **kwargs): url = args[0] method = args[2] if url == "/LIF" and method == "GET": data = """{"error":{"code":0},"data":[{ "ID":"4", "HOMEPORTID":"4", "IPV4ADDR":"111.111.111.109", "IPV4MASK":"255.255.255.0", "OPERATIONALSTATUS":"false"}]}""" elif url == "/LIF/4" and method == "PUT": data = """{"error":{"code":0}}""" else: return self.driver.plugin.helper.do_call(*args, **kwargs) res_json = jsonutils.loads(data) return res_json self.mock_object(self.driver.plugin.helper, "create_logical_port") with mock.patch.object(self.driver.plugin.helper, 'call') as mock_call: mock_call.side_effect = call_logical_port_exist backend_details = self.driver.setup_server(self.fake_network_info) self.assertEqual(backend_details['ip'], self.fake_network_allocations[0]['ip_address']) self.assertEqual( 0, self.driver.plugin.helper.create_logical_port.call_count) @dec_driver_handles_share_servers def test_setup_server_vlan_exist(self): def call_vlan_exist(*args, **kwargs): url = args[0] method = args[2] if url == "/vlan" and method == "GET": data = """{"error":{"code":0},"data":[{ "ID":"4", "NAME":"fake_vlan", "PORTID":"4", "TAG":"2"}]}""" else: return self.driver.plugin.helper.do_call(*args, **kwargs) res_json = jsonutils.loads(data) return res_json self.mock_object(self.driver.plugin.helper, "create_vlan") with mock.patch.object(self.driver.plugin.helper, 'call') as mock_call: mock_call.side_effect = call_vlan_exist backend_details = self.driver.setup_server(self.fake_network_info) self.assertEqual(backend_details['ip'], self.fake_network_allocations[0]['ip_address']) self.assertEqual( 0, self.driver.plugin.helper.create_vlan.call_count) def test_setup_server_invalid_ipv4(self): netwot_info_invali_ipv4 = self.fake_network_info netwot_info_invali_ipv4[0]['network_allocations'][0]['ip_address'] = ( "::1/128") self.assertRaises(exception.InvalidInput, self.driver._setup_server, netwot_info_invali_ipv4) @dec_driver_handles_share_servers def test_setup_server_network_type_error(self): vxlan_netwotk_info = self.fake_network_info vxlan_netwotk_info[0]['network_type'] = 'vxlan' self.assertRaises(exception.NetworkBadConfigurationException, self.driver.setup_server, vxlan_netwotk_info) @dec_driver_handles_share_servers def test_setup_server_port_conf_miss(self): self.recreate_fake_conf_file(logical_port='') self.driver.plugin.configuration.manila_huawei_conf_file = ( self.fake_conf_file) backend_details = self.driver.setup_server(self.fake_network_info) self.assertEqual(self.fake_network_allocations[0]['ip_address'], backend_details['ip']) @dec_driver_handles_share_servers def test_setup_server_port_offline_error(self): self.mock_object(self.driver.plugin, '_get_online_port', mock.Mock(return_value=(None, None))) self.assertRaises(exception.InvalidInput, self.driver.setup_server, self.fake_network_info) self.assertTrue(self.driver.plugin._get_online_port.called) @dec_driver_handles_share_servers def test_setup_server_port_not_exist(self): self.mock_object(self.driver.plugin.helper, 'get_port_id', mock.Mock(return_value=None)) self.assertRaises(exception.InvalidInput, self.driver.setup_server, self.fake_network_info) self.assertTrue(self.driver.plugin.helper.get_port_id.called) @dec_driver_handles_share_servers def test_setup_server_port_type_not_exist(self): self.mock_object(self.driver.plugin, '_get_optimal_port', mock.Mock(return_value=('CTE0.A.H2', '8'))) self.assertRaises(exception.InvalidInput, self.driver.setup_server, self.fake_network_info) self.assertTrue(self.driver.plugin._get_optimal_port.called) @dec_driver_handles_share_servers def test_setup_server_choose_eth_port(self): self.recreate_fake_conf_file(logical_port='CTE0.A.H0;fake_bond') self.driver.plugin.configuration.manila_huawei_conf_file = ( self.fake_conf_file) self.mock_object(self.driver.plugin.helper, 'get_all_vlan', mock.Mock(return_value=[{'NAME': 'fake_bond.10'}])) fake_network_info = self.fake_network_info backend_details = self.driver.setup_server(fake_network_info) self.assertTrue(self.driver.plugin.helper.get_all_vlan.called) self.assertEqual(self.fake_network_allocations[0]['ip_address'], backend_details['ip']) @dec_driver_handles_share_servers def test_setup_server_choose_bond_port(self): self.recreate_fake_conf_file(logical_port='CTE0.A.H0;fake_bond') self.driver.plugin.configuration.manila_huawei_conf_file = ( self.fake_conf_file) self.mock_object(self.driver.plugin.helper, 'get_all_vlan', mock.Mock(return_value=[{'NAME': 'CTE0.A.H0.10'}])) fake_network_info = self.fake_network_info backend_details = self.driver.setup_server(fake_network_info) self.assertTrue(self.driver.plugin.helper.get_all_vlan.called) self.assertEqual(self.fake_network_allocations[0]['ip_address'], backend_details['ip']) @dec_driver_handles_share_servers def test_setup_server_choose_least_logic_port(self): self.recreate_fake_conf_file( logical_port='CTE0.A.H0;CTE0.A.H2;CTE0.B.H0;BOND0') self.driver.plugin.configuration.manila_huawei_conf_file = ( self.fake_conf_file) fake_network_info = [{ 'server_id': '0', 'segmentation_id': None, 'cidr': '111.111.111.0/24', 'network_allocations': self.fake_network_allocations, 'network_type': None, }] self.mock_object(self.driver.plugin, '_get_online_port', mock.Mock(return_value=(['CTE0.A.H0', 'CTE0.A.H2', 'CTE0.B.H0'], ['BOND0']))) self.mock_object(self.driver.plugin.helper, 'get_all_logical_port', mock.Mock(return_value=[ {'HOMEPORTTYPE': constants.PORT_TYPE_ETH, 'HOMEPORTNAME': 'CTE0.A.H0'}, {'HOMEPORTTYPE': constants.PORT_TYPE_VLAN, 'HOMEPORTNAME': 'CTE0.B.H0.10'}, {'HOMEPORTTYPE': constants.PORT_TYPE_BOND, 'HOMEPORTNAME': 'BOND0'}])) self.mock_object(self.driver.plugin.helper, 'get_port_id', mock.Mock(return_value=4)) backend_details = self.driver.setup_server(fake_network_info) self.assertEqual(self.fake_network_allocations[0]['ip_address'], backend_details['ip']) self.driver.plugin._get_online_port.assert_called_once_with( ['CTE0.A.H0', 'CTE0.A.H2', 'CTE0.B.H0', 'BOND0']) self.assertTrue(self.driver.plugin.helper.get_all_logical_port.called) self.driver.plugin.helper.get_port_id.assert_called_once_with( 'CTE0.A.H2', constants.PORT_TYPE_ETH) @dec_driver_handles_share_servers def test_setup_server_create_vlan_fail(self): def call_create_vlan_fail(*args, **kwargs): url = args[0] method = args[2] if url == "/vlan" and method == "POST": data = """{"error":{"code":1}}""" res_json = jsonutils.loads(data) return res_json else: return self.driver.plugin.helper.do_call(*args, **kwargs) with mock.patch.object(self.driver.plugin.helper, 'call') as mock_call: mock_call.side_effect = call_create_vlan_fail self.assertRaises(exception.InvalidShare, self.driver.setup_server, self.fake_network_info) @dec_driver_handles_share_servers def test_setup_server_create_logical_port_fail(self): def call_create_logical_port_fail(*args, **kwargs): url = args[0] method = args[2] if url == "/LIF" and method == "POST": data = """{"error":{"code":1}}""" res_json = jsonutils.loads(data) return res_json else: return self.driver.plugin.helper.do_call(*args, **kwargs) fake_network_info = self.fake_network_info fake_network_info[0]['security_services'] = [ self.fake_active_directory, self.fake_ldap] self.mock_object(self.driver.plugin.helper, "delete_vlan") self.mock_object(self.driver.plugin.helper, "delete_AD_config") self.mock_object(self.driver.plugin.helper, "delete_LDAP_config") self.mock_object(self.driver.plugin.helper, "get_AD_config", mock.Mock(side_effect=[None, {'DOMAINSTATUS': '1'}, {'DOMAINSTATUS': '0'}])) self.mock_object( self.driver.plugin.helper, "get_LDAP_config", mock.Mock( side_effect=[None, {'BASEDN': 'dc=huawei,dc=com'}])) with mock.patch.object(self.driver.plugin.helper, 'call') as mock_call: mock_call.side_effect = call_create_logical_port_fail self.assertRaises(exception.InvalidShare, self.driver.setup_server, fake_network_info) self.assertTrue(self.driver.plugin.helper.get_AD_config.called) self.assertTrue(self.driver.plugin.helper.get_LDAP_config.called) self.assertEqual( 1, self.driver.plugin.helper.delete_vlan.call_count) self.assertEqual( 1, self.driver.plugin.helper.delete_AD_config.call_count) self.assertEqual( 1, self.driver.plugin.helper.delete_LDAP_config.call_count) @dec_driver_handles_share_servers def test_setup_server_with_ad_domain_success(self): fake_network_info = self.fake_network_info fake_network_info[0]['security_services'] = ( [self.fake_active_directory]) self.mock_object(self.driver.plugin.helper, "get_AD_config", mock.Mock( side_effect=[None, {'DOMAINSTATUS': '0', 'FULLDOMAINNAME': 'huawei.com'}, {'DOMAINSTATUS': '1', 'FULLDOMAINNAME': 'huawei.com'}])) backend_details = self.driver.setup_server(fake_network_info) self.assertEqual(self.fake_network_allocations[0]['ip_address'], backend_details['ip']) self.assertTrue(self.driver.plugin.helper.get_AD_config.called) @ddt.data( "100.97.5.87", "100.97.5.87,100.97.5.88", "100.97.5.87,100.97.5.88,100.97.5.89" ) @dec_driver_handles_share_servers def test_setup_server_with_ldap_domain_success(self, server_ips): fake_network_info = self.fake_network_info fake_network_info[0]['security_services'] = [self.fake_ldap] fake_network_info[0]['security_services'][0]['server'] = server_ips self.mock_object( self.driver.plugin.helper, "get_LDAP_config", mock.Mock( side_effect=[None, {'BASEDN': 'dc=huawei,dc=com'}])) backend_details = self.driver.setup_server(fake_network_info) self.assertEqual(self.fake_network_allocations[0]['ip_address'], backend_details['ip']) self.assertTrue(self.driver.plugin.helper.get_LDAP_config.called) @dec_driver_handles_share_servers def test_setup_server_with_ldap_domain_fail(self): server_ips = "100.97.5.87,100.97.5.88,100.97.5.89,100.97.5.86" fake_network_info = self.fake_network_info fake_network_info[0]['security_services'] = [self.fake_ldap] fake_network_info[0]['security_services'][0]['server'] = server_ips self.mock_object( self.driver.plugin.helper, "get_LDAP_config", mock.Mock( side_effect=[None, {'BASEDN': 'dc=huawei,dc=com'}])) self.assertRaises(exception.InvalidInput, self.driver.setup_server, fake_network_info) self.assertTrue(self.driver.plugin.helper.get_LDAP_config.called) @ddt.data( {'type': 'fake_unsupport'}, {'type': 'active_directory', 'dns_ip': '', 'user': '', 'password': '', 'domain': ''}, {'type': 'ldap', 'server': '', 'domain': ''}, ) @dec_driver_handles_share_servers def test_setup_server_with_security_service_invalid(self, data): fake_network_info = self.fake_network_info fake_network_info[0]['security_services'] = [data] self.assertRaises(exception.InvalidInput, self.driver.setup_server, fake_network_info) @dec_driver_handles_share_servers def test_setup_server_with_security_service_number_invalid(self): fake_network_info = self.fake_network_info ss = [ {'type': 'fake_unsupport'}, {'type': 'active_directory', 'dns_ip': '', 'user': '', 'password': '', 'domain': ''}, {'type': 'ldap', 'server': '', 'domain': ''}, ] fake_network_info[0]['security_services'] = ss self.assertRaises(exception.InvalidInput, self.driver.setup_server, fake_network_info) @dec_driver_handles_share_servers def test_setup_server_dns_exist_error(self): fake_network_info = self.fake_network_info fake_network_info[0]['security_services'] = ( [self.fake_active_directory]) self.mock_object(self.driver.plugin.helper, "get_DNS_ip_address", mock.Mock(return_value=['100.97.5.85'])) self.assertRaises(exception.InvalidInput, self.driver.setup_server, fake_network_info) self.assertTrue(self.driver.plugin.helper.get_DNS_ip_address.called) @dec_driver_handles_share_servers def test_setup_server_ad_exist_error(self): fake_network_info = self.fake_network_info fake_network_info[0]['security_services'] = ( [self.fake_active_directory]) self.mock_object(self.driver.plugin.helper, "get_AD_config", mock.Mock( return_value={'DOMAINSTATUS': '1', 'FULLDOMAINNAME': 'huawei.com'})) self.assertRaises(exception.InvalidInput, self.driver.setup_server, fake_network_info) self.assertTrue(self.driver.plugin.helper.get_AD_config.called) @dec_driver_handles_share_servers def test_setup_server_ldap_exist_error(self): fake_network_info = self.fake_network_info fake_network_info[0]['security_services'] = [self.fake_ldap] self.mock_object(self.driver.plugin.helper, "get_LDAP_config", mock.Mock( return_value={'LDAPSERVER': '100.97.5.87'})) self.assertRaises(exception.InvalidInput, self.driver.setup_server, fake_network_info) self.assertTrue(self.driver.plugin.helper.get_LDAP_config.called) @dec_driver_handles_share_servers def test_setup_server_with_dns_fail(self): fake_network_info = self.fake_network_info fake_active_directory = self.fake_active_directory ip_list = "100.97.5.5,100.97.5.6,100.97.5.7,100.97.5.8" fake_active_directory['dns_ip'] = ip_list fake_network_info[0]['security_services'] = [fake_active_directory] self.mock_object( self.driver.plugin.helper, "get_AD_config", mock.Mock(side_effect=[None, {'DOMAINSTATUS': '1'}])) self.assertRaises(exception.InvalidInput, self.driver.setup_server, fake_network_info) self.assertTrue(self.driver.plugin.helper.get_AD_config.called) @dec_driver_handles_share_servers def test_setup_server_with_ad_domain_fail(self): fake_network_info = self.fake_network_info fake_network_info[0]['security_services'] = ( [self.fake_active_directory]) self.mock_object(self.driver.plugin, '_get_wait_interval', mock.Mock(return_value=1)) self.mock_object(self.driver.plugin, '_get_timeout', mock.Mock(return_value=1)) self.mock_object( self.driver.plugin.helper, "get_AD_config", mock.Mock(side_effect=[None, {'DOMAINSTATUS': '0', 'FULLDOMAINNAME': 'huawei.com'}])) self.mock_object(self.driver.plugin.helper, "set_DNS_ip_address") self.assertRaises(exception.InvalidShare, self.driver.setup_server, fake_network_info) self.assertTrue(self.driver.plugin.helper.get_AD_config.called) self.assertTrue(self.driver.plugin._get_wait_interval.called) self.assertTrue(self.driver.plugin._get_timeout.called) self.assertEqual( 2, self.driver.plugin.helper.set_DNS_ip_address.call_count) def test_teardown_server_success(self): server_details = { "logical_port_id": "1", "vlan_id": "2", "ad_created": "1", "ldap_created": "1", } security_services = [ self.fake_ldap, self.fake_active_directory ] self.logical_port_deleted = False self.vlan_deleted = False self.ad_deleted = False self.ldap_deleted = False self.dns_deleted = False def fake_teardown_call(*args, **kwargs): url = args[0] method = args[2] if url.startswith("/LIF"): if method == "GET": data = """{"error":{"code":0},"data":[{ "ID":"1"}]}""" elif method == "DELETE": data = """{"error":{"code":0}}""" self.logical_port_deleted = True elif url.startswith("/vlan"): if method == "GET": data = """{"error":{"code":0},"data":[{ "ID":"2"}]}""" elif method == "DELETE": data = """{"error":{"code":1073813505}}""" self.vlan_deleted = True elif url == "/AD_CONFIG": if method == "PUT": data = """{"error":{"code":0}}""" self.ad_deleted = True elif method == "GET": if self.ad_deleted: data = """{"error":{"code":0},"data":{ "DOMAINSTATUS":"0"}}""" else: data = """{"error":{"code":0},"data":{ "DOMAINSTATUS":"1", "FULLDOMAINNAME":"huawei.com"}}""" else: data = """{"error":{"code":0}}""" elif url == "/LDAP_CONFIG": if method == "DELETE": data = """{"error":{"code":0}}""" self.ldap_deleted = True elif method == "GET": if self.ldap_deleted: data = """{"error":{"code":0}}""" else: data = """{"error":{"code":0},"data":{ "LDAPSERVER":"100.97.5.87", "BASEDN":"dc=huawei,dc=com"}}""" else: data = """{"error":{"code":0}}""" elif url == "/DNS_Server": if method == "GET": data = "{\"error\":{\"code\":0},\"data\":{\ \"ADDRESS\":\"[\\\"100.97.5.5\\\",\\\"\\\"]\"}}" elif method == "PUT": data = """{"error":{"code":0}}""" self.dns_deleted = True else: data = """{"error":{"code":0}}""" else: return self.driver.plugin.helper.do_call(*args, **kwargs) res_json = jsonutils.loads(data) return res_json with mock.patch.object(self.driver.plugin.helper, 'call') as mock_call: mock_call.side_effect = fake_teardown_call self.driver._teardown_server(server_details, security_services) self.assertTrue(self.logical_port_deleted) self.assertTrue(self.vlan_deleted) self.assertTrue(self.ad_deleted) self.assertTrue(self.ldap_deleted) self.assertTrue(self.dns_deleted) def test_teardown_server_with_already_deleted(self): server_details = { "logical_port_id": "1", "vlan_id": "2", "ad_created": "1", "ldap_created": "1", } security_services = [ self.fake_ldap, self.fake_active_directory ] self.mock_object(self.driver.plugin.helper, "check_logical_port_exists_by_id", mock.Mock(return_value=False)) self.mock_object(self.driver.plugin.helper, "check_vlan_exists_by_id", mock.Mock(return_value=False)) self.mock_object(self.driver.plugin.helper, "get_DNS_ip_address", mock.Mock(return_value=None)) self.mock_object(self.driver.plugin.helper, "get_AD_domain_name", mock.Mock(return_value=(False, None))) self.mock_object(self.driver.plugin.helper, "get_LDAP_domain_server", mock.Mock(return_value=(False, None))) self.driver._teardown_server(server_details, security_services) self.assertEqual(1, (self.driver.plugin.helper. check_logical_port_exists_by_id.call_count)) self.assertEqual(1, (self.driver.plugin.helper. check_vlan_exists_by_id.call_count)) self.assertEqual(1, (self.driver.plugin.helper. get_DNS_ip_address.call_count)) self.assertEqual(1, (self.driver.plugin.helper. get_AD_domain_name.call_count)) self.assertEqual(1, (self.driver.plugin.helper. get_LDAP_domain_server.call_count)) def test_teardown_server_with_vlan_logical_port_deleted(self): server_details = { "logical_port_id": "1", "vlan_id": "2", } self.mock_object(self.driver.plugin.helper, 'get_all_logical_port', mock.Mock(return_value=[{'ID': '4'}])) self.mock_object(self.driver.plugin.helper, 'get_all_vlan', mock.Mock(return_value=[{'ID': '4'}])) self.driver._teardown_server(server_details, None) self.assertEqual(1, (self.driver.plugin.helper. get_all_logical_port.call_count)) self.assertEqual(1, (self.driver.plugin.helper. get_all_vlan.call_count)) def test_teardown_server_with_empty_detail(self): server_details = {} with mock.patch.object(connection.LOG, 'debug') as mock_debug: self.driver._teardown_server(server_details, None) mock_debug.assert_called_with('Server details are empty.') @ddt.data({"share_proto": "NFS", "path": ["100.115.10.68:/share_fake_uuid"]}, {"share_proto": "CIFS", "path": ["\\\\100.115.10.68\\share_fake_uuid"]}) @ddt.unpack def test_ensure_share_sucess(self, share_proto, path): share = self._get_share_by_proto(share_proto) self.driver.plugin.helper.login() location = self.driver.ensure_share(self._context, share, self.share_server) self.assertEqual(path, location) @ddt.data({"share_proto": "NFS", "path": ["111.111.111.109:/share_fake_uuid"]}, {"share_proto": "CIFS", "path": ["\\\\111.111.111.109\\share_fake_uuid"]}) @ddt.unpack @dec_driver_handles_share_servers def test_ensure_share_with_share_server_sucess(self, share_proto, path): share = self._get_share_by_proto(share_proto) backend_details = self.driver.setup_server(self.fake_network_info) fake_share_server = {'backend_details': backend_details} location = self.driver.ensure_share(self._context, share, fake_share_server) self.assertEqual(path, location) @ddt.data({"share_proto": "NFS"}, {"share_proto": "CIFS"}) @ddt.unpack def test_ensure_share_get_share_fail(self, share_proto): share = self._get_share_by_proto(share_proto) self.mock_object(self.driver.plugin.helper, '_get_share_by_name', mock.Mock(return_value={})) self.driver.plugin.helper.login() self.assertRaises(exception.ShareResourceNotFound, self.driver.ensure_share, self._context, share, self.share_server) def test_ensure_share_get_filesystem_status_fail(self): self.driver.plugin.helper.fs_status_flag = False share = self.share_nfs_thickfs self.driver.plugin.helper.login() self.assertRaises(exception.StorageResourceException, self.driver.ensure_share, self._context, share, self.share_server) def _add_conf_file_element(self, doc, parent_element, name, value=None): new_element = doc.createElement(name) if value: new_text = doc.createTextNode(value) new_element.appendChild(new_text) parent_element.appendChild(new_element) def create_fake_conf_file(self, fake_conf_file, product_flag=True, username_flag=True, pool_node_flag=True, timeout_flag=True, wait_interval_flag=True, sectorsize_value='4', multi_url=False, logical_port='100.115.10.68', snapshot_support=True, replication_support=False): doc = xml.dom.minidom.Document() config = doc.createElement('Config') doc.appendChild(config) storage = doc.createElement('Storage') config.appendChild(storage) if self.configuration.driver_handles_share_servers: port0 = doc.createElement('Port') port0_text = doc.createTextNode(logical_port) port0.appendChild(port0_text) storage.appendChild(port0) else: controllerip0 = doc.createElement('LogicalPortIP') controllerip0_text = doc.createTextNode(logical_port) controllerip0.appendChild(controllerip0_text) storage.appendChild(controllerip0) if product_flag: product_text = doc.createTextNode('V3') else: product_text = doc.createTextNode('V3_fail') product = doc.createElement('Product') product.appendChild(product_text) storage.appendChild(product) if username_flag: username_text = doc.createTextNode('admin') else: username_text = doc.createTextNode('') username = doc.createElement('UserName') username.appendChild(username_text) storage.appendChild(username) userpassword = doc.createElement('UserPassword') userpassword_text = doc.createTextNode('Admin@storage') userpassword.appendChild(userpassword_text) storage.appendChild(userpassword) url = doc.createElement('RestURL') if multi_url: url_text = doc.createTextNode('http://100.115.10.69:8082/' 'deviceManager/rest/;' 'http://100.115.10.70:8082/' 'deviceManager/rest/') else: url_text = doc.createTextNode('http://100.115.10.69:8082/' 'deviceManager/rest/') url.appendChild(url_text) storage.appendChild(url) if snapshot_support: self._add_conf_file_element( doc, storage, 'SnapshotSupport', 'True') if replication_support: self._add_conf_file_element( doc, storage, 'ReplicationSupport', 'True') lun = doc.createElement('Filesystem') config.appendChild(lun) storagepool = doc.createElement('StoragePool') if pool_node_flag: pool_text = doc.createTextNode('OpenStack_Pool;OpenStack_Pool2; ;') else: pool_text = doc.createTextNode('') storagepool.appendChild(pool_text) timeout = doc.createElement('Timeout') if timeout_flag: timeout_text = doc.createTextNode('60') else: timeout_text = doc.createTextNode('') timeout.appendChild(timeout_text) waitinterval = doc.createElement('WaitInterval') if wait_interval_flag: waitinterval_text = doc.createTextNode('3') else: waitinterval_text = doc.createTextNode('') waitinterval.appendChild(waitinterval_text) NFSClient = doc.createElement('NFSClient') virtualip = doc.createElement('IP') virtualip_text = doc.createTextNode('100.112.0.1') virtualip.appendChild(virtualip_text) NFSClient.appendChild(virtualip) CIFSClient = doc.createElement('CIFSClient') username = doc.createElement('UserName') username_text = doc.createTextNode('user_name') username.appendChild(username_text) CIFSClient.appendChild(username) userpassword = doc.createElement('UserPassword') userpassword_text = doc.createTextNode('user_password') userpassword.appendChild(userpassword_text) CIFSClient.appendChild(userpassword) lun.appendChild(NFSClient) lun.appendChild(CIFSClient) lun.appendChild(timeout) lun.appendChild(waitinterval) lun.appendChild(storagepool) if sectorsize_value: sectorsize = doc.createElement('SectorSize') sectorsize_text = doc.createTextNode(sectorsize_value) sectorsize.appendChild(sectorsize_text) lun.appendChild(sectorsize) prefetch = doc.createElement('Prefetch') prefetch.setAttribute('Type', '0') prefetch.setAttribute('Value', '0') lun.appendChild(prefetch) fakefile = open(fake_conf_file, 'w') fakefile.write(doc.toprettyxml(indent='')) fakefile.close() def recreate_fake_conf_file(self, product_flag=True, username_flag=True, pool_node_flag=True, timeout_flag=True, wait_interval_flag=True, sectorsize_value='4', multi_url=False, logical_port='100.115.10.68', snapshot_support=True, replication_support=False): self.tmp_dir = tempfile.mkdtemp() self.fake_conf_file = self.tmp_dir + '/manila_huawei_conf.xml' self.addCleanup(shutil.rmtree, self.tmp_dir) self.create_fake_conf_file(self.fake_conf_file, product_flag, username_flag, pool_node_flag, timeout_flag, wait_interval_flag, sectorsize_value, multi_url, logical_port, snapshot_support, replication_support) self.addCleanup(os.remove, self.fake_conf_file) @ddt.data(common_constants.STATUS_ERROR, common_constants.REPLICA_STATE_IN_SYNC, common_constants.REPLICA_STATE_OUT_OF_SYNC) def test_create_replica_success(self, replica_state): share_type = self.fake_type_not_extra['test_with_extra'] self.mock_object(db, 'share_type_get', mock.Mock(return_value=share_type)) if replica_state == common_constants.STATUS_ERROR: self.driver.plugin.helper.custom_results[ '/REPLICATIONPAIR/fake_pair_id'] = { "GET": """{"error":{"code":0}, "data":{"HEALTHSTATUS": "2"}}"""} elif replica_state == common_constants.REPLICA_STATE_OUT_OF_SYNC: self.driver.plugin.helper.custom_results[ '/REPLICATIONPAIR/fake_pair_id'] = { "GET": """{"error":{"code":0}, "data":{"HEALTHSTATUS": "1", "RUNNINGSTATUS": "1", "SECRESDATASTATUS": "5"}}"""} result = self.driver.create_replica( self._context, [self.active_replica, self.new_replica], self.new_replica, [], [], None) expected = { 'export_locations': ['100.115.10.68:/share_fake_new_uuid'], 'replica_state': replica_state, 'access_rules_status': common_constants.STATUS_ACTIVE, } self.assertEqual(expected, result) self.assertEqual('fake_pair_id', self.driver.plugin.private_storage.get( 'fake_share_id', 'replica_pair_id')) @ddt.data({'url': '/FILESYSTEM?filter=NAME::share_fake_uuid' '&range=[0-8191]', 'url_result': '{"error":{"code":0}}', 'expected_exception': exception.ReplicationException}, {'url': '/NFSHARE', 'url_result': '{"error":{"code":-403}}', 'expected_exception': exception.InvalidShare}, {'url': '/REPLICATIONPAIR', 'url_result': '{"error":{"code":-403}}', 'expected_exception': exception.InvalidShare},) @ddt.unpack def test_create_replica_fail(self, url, url_result, expected_exception): share_type = self.fake_type_not_extra['test_with_extra'] self.mock_object(db, 'share_type_get', mock.Mock(return_value=share_type)) self.driver.plugin.helper.custom_results[url] = url_result self.assertRaises(expected_exception, self.driver.create_replica, self._context, [self.active_replica, self.new_replica], self.new_replica, [], [], None) self.assertIsNone(self.driver.plugin.private_storage.get( 'fake_share_id', 'replica_pair_id')) def test_create_replica_with_get_state_fail(self): share_type = self.fake_type_not_extra['test_with_extra'] self.mock_object(db, 'share_type_get', mock.Mock(return_value=share_type)) self.driver.plugin.helper.custom_results[ '/REPLICATIONPAIR/fake_pair_id'] = { "GET": """{"error":{"code":-403}}"""} result = self.driver.create_replica( self._context, [self.active_replica, self.new_replica], self.new_replica, [], [], None) expected = { 'export_locations': ['100.115.10.68:/share_fake_new_uuid'], 'replica_state': common_constants.STATUS_ERROR, 'access_rules_status': common_constants.STATUS_ACTIVE, } self.assertEqual(expected, result) self.assertEqual('fake_pair_id', self.driver.plugin.private_storage.get( 'fake_share_id', 'replica_pair_id')) def test_create_replica_with_already_exists(self): self.driver.plugin.private_storage.update( 'fake_share_id', {'replica_pair_id': 'fake_pair_id'}) self.assertRaises(exception.ReplicationException, self.driver.create_replica, self._context, [self.active_replica, self.new_replica], self.new_replica, [], [], None) @ddt.data({'pair_info': """{"HEALTHSTATUS": "2", "SECRESDATASTATUS": "2", "ISPRIMARY": "false", "SECRESACCESS": "1", "RUNNINGSTATUS": "1"}""", 'assert_method': 'get_replication_pair_by_id'}, {'pair_info': """{"HEALTHSTATUS": "1", "SECRESDATASTATUS": "2", "ISPRIMARY": "true", "SECRESACCESS": "1", "RUNNINGSTATUS": "1"}""", 'assert_method': 'switch_replication_pair'}, {'pair_info': """{"HEALTHSTATUS": "1", "SECRESDATASTATUS": "2", "ISPRIMARY": "false", "SECRESACCESS": "3", "RUNNINGSTATUS": "1"}""", 'assert_method': 'set_pair_secondary_write_lock'}, {'pair_info': """{"HEALTHSTATUS": "1", "SECRESDATASTATUS": "2", "ISPRIMARY": "false", "SECRESACCESS": "1", "RUNNINGSTATUS": "33"}""", 'assert_method': 'sync_replication_pair'},) @ddt.unpack def test_update_replica_state_success(self, pair_info, assert_method): self.driver.plugin.private_storage.update( 'fake_share_id', {'replica_pair_id': 'fake_pair_id'}) helper_method = getattr(self.driver.plugin.helper, assert_method) mocker = self.mock_object(self.driver.plugin.helper, assert_method, mock.Mock(wraps=helper_method)) self.driver.plugin.helper.custom_results[ '/REPLICATIONPAIR/fake_pair_id'] = { "GET": """{"error":{"code":0}, "data":%s}""" % pair_info} self.driver.update_replica_state( self._context, [self.active_replica, self.new_replica], self.new_replica, [], [], None) mocker.assert_called_with('fake_pair_id') @ddt.data({'pair_info': """{"HEALTHSTATUS": "1", "SECRESDATASTATUS": "2", "ISPRIMARY": "true", "SECRESACCESS": "1", "RUNNINGSTATUS": "1"}""", 'assert_method': 'switch_replication_pair', 'error_url': '/REPLICATIONPAIR/switch'}, {'pair_info': """{"HEALTHSTATUS": "1", "SECRESDATASTATUS": "2", "ISPRIMARY": "false", "SECRESACCESS": "3", "RUNNINGSTATUS": "1"}""", 'assert_method': 'set_pair_secondary_write_lock', 'error_url': '/REPLICATIONPAIR/SET_SECODARY_WRITE_LOCK'}, {'pair_info': """{"HEALTHSTATUS": "1", "SECRESDATASTATUS": "2", "ISPRIMARY": "false", "SECRESACCESS": "1", "RUNNINGSTATUS": "26"}""", 'assert_method': 'sync_replication_pair', 'error_url': '/REPLICATIONPAIR/sync'},) @ddt.unpack def test_update_replica_state_with_exception_ignore( self, pair_info, assert_method, error_url): self.driver.plugin.private_storage.update( 'fake_share_id', {'replica_pair_id': 'fake_pair_id'}) helper_method = getattr(self.driver.plugin.helper, assert_method) mocker = self.mock_object(self.driver.plugin.helper, assert_method, mock.Mock(wraps=helper_method)) self.driver.plugin.helper.custom_results[ error_url] = """{"error":{"code":-403}}""" self.driver.plugin.helper.custom_results[ '/REPLICATIONPAIR/fake_pair_id'] = { "GET": """{"error":{"code":0}, "data":%s}""" % pair_info} self.driver.update_replica_state( self._context, [self.active_replica, self.new_replica], self.new_replica, [], [], None) mocker.assert_called_once_with('fake_pair_id') def test_update_replica_state_with_replication_abnormal(self): self.driver.plugin.private_storage.update( 'fake_share_id', {'replica_pair_id': 'fake_pair_id'}) self.driver.plugin.helper.custom_results[ '/REPLICATIONPAIR/fake_pair_id'] = { "GET": """{"error":{"code":0}, "data":{"HEALTHSTATUS": "2"}}"""} result = self.driver.update_replica_state( self._context, [self.active_replica, self.new_replica], self.new_replica, [], [], None) self.assertEqual(common_constants.STATUS_ERROR, result) def test_update_replica_state_with_no_pair_id(self): result = self.driver.update_replica_state( self._context, [self.active_replica, self.new_replica], self.new_replica, [], [], None) self.assertEqual(common_constants.STATUS_ERROR, result) @ddt.data('true', 'false') def test_promote_replica_success(self, is_primary): self.driver.plugin.private_storage.update( 'fake_share_id', {'replica_pair_id': 'fake_pair_id'}) self.driver.plugin.helper.custom_results[ '/REPLICATIONPAIR/fake_pair_id'] = { "GET": """{"error": {"code": 0}, "data": {"HEALTHSTATUS": "1", "RUNNINGSTATUS": "1", "SECRESDATASTATUS": "2", "ISPRIMARY": "%s"}}""" % is_primary} result = self.driver.promote_replica( self._context, [self.active_replica, self.new_replica], self.new_replica, [], None) expected = [ {'id': self.new_replica['id'], 'replica_state': common_constants.REPLICA_STATE_ACTIVE, 'access_rules_status': common_constants.STATUS_ACTIVE}, {'id': self.active_replica['id'], 'replica_state': common_constants.REPLICA_STATE_IN_SYNC, 'access_rules_status': common_constants.SHARE_INSTANCE_RULES_SYNCING}, ] self.assertEqual(expected, result) @ddt.data({'mock_method': 'update_access', 'new_access_status': common_constants.SHARE_INSTANCE_RULES_SYNCING, 'old_access_status': common_constants.SHARE_INSTANCE_RULES_SYNCING}, {'mock_method': 'clear_access', 'new_access_status': common_constants.SHARE_INSTANCE_RULES_SYNCING, 'old_access_status': common_constants.STATUS_ACTIVE},) @ddt.unpack def test_promote_replica_with_access_update_error( self, mock_method, new_access_status, old_access_status): self.driver.plugin.private_storage.update( 'fake_share_id', {'replica_pair_id': 'fake_pair_id'}) self.driver.plugin.helper.custom_results[ '/REPLICATIONPAIR/fake_pair_id'] = { "GET": """{"error": {"code": 0}, "data": {"HEALTHSTATUS": "1", "RUNNINGSTATUS": "1", "SECRESDATASTATUS": "2", "ISPRIMARY": "false"}}"""} mocker = self.mock_object(self.driver.plugin, mock_method, mock.Mock(side_effect=Exception('err'))) result = self.driver.promote_replica( self._context, [self.active_replica, self.new_replica], self.new_replica, [], None) expected = [ {'id': self.new_replica['id'], 'replica_state': common_constants.REPLICA_STATE_ACTIVE, 'access_rules_status': new_access_status}, {'id': self.active_replica['id'], 'replica_state': common_constants.REPLICA_STATE_IN_SYNC, 'access_rules_status': old_access_status}, ] self.assertEqual(expected, result) mocker.assert_called() @ddt.data({'error_url': '/REPLICATIONPAIR/split', 'assert_method': 'split_replication_pair'}, {'error_url': '/REPLICATIONPAIR/switch', 'assert_method': 'switch_replication_pair'}, {'error_url': '/REPLICATIONPAIR/SET_SECODARY_WRITE_LOCK', 'assert_method': 'set_pair_secondary_write_lock'}, {'error_url': '/REPLICATIONPAIR/sync', 'assert_method': 'sync_replication_pair'},) @ddt.unpack def test_promote_replica_with_error_ignore(self, error_url, assert_method): self.driver.plugin.private_storage.update( 'fake_share_id', {'replica_pair_id': 'fake_pair_id'}) helper_method = getattr(self.driver.plugin.helper, assert_method) mocker = self.mock_object(self.driver.plugin.helper, assert_method, mock.Mock(wraps=helper_method)) self.driver.plugin.helper.custom_results[ error_url] = '{"error":{"code":-403}}' fake_pair_infos = [{'ISPRIMARY': 'False', 'HEALTHSTATUS': '1', 'RUNNINGSTATUS': '1', 'SECRESDATASTATUS': '1'}, {'HEALTHSTATUS': '2'}] self.mock_object(self.driver.plugin.replica_mgr, '_get_replication_pair_info', mock.Mock(side_effect=fake_pair_infos)) result = self.driver.promote_replica( self._context, [self.active_replica, self.new_replica], self.new_replica, [], None) expected = [ {'id': self.new_replica['id'], 'replica_state': common_constants.REPLICA_STATE_ACTIVE, 'access_rules_status': common_constants.STATUS_ACTIVE}, {'id': self.active_replica['id'], 'replica_state': common_constants.STATUS_ERROR, 'access_rules_status': common_constants.SHARE_INSTANCE_RULES_SYNCING}, ] self.assertEqual(expected, result) mocker.assert_called_once_with('fake_pair_id') @ddt.data({'error_url': '/REPLICATIONPAIR/fake_pair_id', 'url_result': """{"error":{"code":0}, "data":{"HEALTHSTATUS": "1", "ISPRIMARY": "false", "RUNNINGSTATUS": "1", "SECRESDATASTATUS": "5"}}""", 'expected_exception': exception.ReplicationException}, {'error_url': '/REPLICATIONPAIR/CANCEL_SECODARY_WRITE_LOCK', 'url_result': """{"error":{"code":-403}}""", 'expected_exception': exception.InvalidShare},) @ddt.unpack def test_promote_replica_fail(self, error_url, url_result, expected_exception): self.driver.plugin.private_storage.update( 'fake_share_id', {'replica_pair_id': 'fake_pair_id'}) self.driver.plugin.helper.custom_results[error_url] = url_result self.assertRaises(expected_exception, self.driver.promote_replica, self._context, [self.active_replica, self.new_replica], self.new_replica, [], None) def test_promote_replica_with_no_pair_id(self): self.assertRaises(exception.ReplicationException, self.driver.promote_replica, self._context, [self.active_replica, self.new_replica], self.new_replica, [], None) @ddt.data({'url': '/REPLICATIONPAIR/split', 'url_result': '{"error":{"code":-403}}'}, {'url': '/REPLICATIONPAIR/fake_pair_id', 'url_result': '{"error":{"code":1077937923}}'}, {'url': '/REPLICATIONPAIR/fake_pair_id', 'url_result': '{"error":{"code":0}}'},) @ddt.unpack def test_delete_replica_success(self, url, url_result): self.driver.plugin.private_storage.update( 'fake_share_id', {'replica_pair_id': 'fake_pair_id'}) self.driver.plugin.helper.custom_results['/filesystem/8'] = { "DELETE": '{"error":{"code":0}}'} self.driver.plugin.helper.custom_results[url] = url_result self.driver.delete_replica(self._context, [self.active_replica, self.new_replica], [], self.new_replica, None) self.assertIsNone(self.driver.plugin.private_storage.get( 'fake_share_id', 'replica_pair_id')) @ddt.data({'url': '/REPLICATIONPAIR/fake_pair_id', 'expected': 'fake_pair_id'}, {'url': '/filesystem/8', 'expected': None},) @ddt.unpack def test_delete_replica_fail(self, url, expected): self.driver.plugin.private_storage.update( 'fake_share_id', {'replica_pair_id': 'fake_pair_id'}) self.driver.plugin.helper.custom_results[url] = { "DELETE": '{"error":{"code":-403}}'} self.assertRaises(exception.InvalidShare, self.driver.delete_replica, self._context, [self.active_replica, self.new_replica], [], self.new_replica, None) self.assertEqual(expected, self.driver.plugin.private_storage.get( 'fake_share_id', 'replica_pair_id')) def test_delete_replica_with_no_pair_id(self): self.driver.plugin.helper.custom_results['/filesystem/8'] = { "DELETE": '{"error":{"code":0}}'} self.driver.delete_replica(self._context, [self.active_replica, self.new_replica], [], self.new_replica, None) @ddt.data({'pair_info': """{"HEALTHSTATUS": "2"}""", 'expected_state': common_constants.STATUS_ERROR}, {'pair_info': """{"HEALTHSTATUS": "1", "RUNNINGSTATUS": "26"}""", 'expected_state': common_constants.REPLICA_STATE_OUT_OF_SYNC}, {'pair_info': """{"HEALTHSTATUS": "1", "RUNNINGSTATUS": "33"}""", 'expected_state': common_constants.REPLICA_STATE_OUT_OF_SYNC}, {'pair_info': """{"HEALTHSTATUS": "1", "RUNNINGSTATUS": "34"}""", 'expected_state': common_constants.STATUS_ERROR}, {'pair_info': """{"HEALTHSTATUS": "1", "RUNNINGSTATUS": "35"}""", 'expected_state': common_constants.STATUS_ERROR}, {'pair_info': """{"HEALTHSTATUS": "1", "SECRESDATASTATUS": "1", "RUNNINGSTATUS": "1"}""", 'expected_state': common_constants.REPLICA_STATE_IN_SYNC}, {'pair_info': """{"HEALTHSTATUS": "1", "SECRESDATASTATUS": "2", "RUNNINGSTATUS": "1"}""", 'expected_state': common_constants.REPLICA_STATE_IN_SYNC}, {'pair_info': """{"HEALTHSTATUS": "1", "SECRESDATASTATUS": "5", "RUNNINGSTATUS": "1"}""", 'expected_state': common_constants.REPLICA_STATE_OUT_OF_SYNC}) @ddt.unpack def test_get_replica_state(self, pair_info, expected_state): self.driver.plugin.helper.custom_results[ '/REPLICATIONPAIR/fake_pair_id'] = { "GET": """{"error":{"code":0}, "data":%s}""" % pair_info} result_state = self.driver.plugin.replica_mgr.get_replica_state( 'fake_pair_id') self.assertEqual(expected_state, result_state) @ddt.data(*constants.QOS_STATUSES) def test_delete_qos(self, qos_status): self.driver.plugin.helper.custom_results['/ioclass/11'] = { "GET": """{"error":{"code":0}, "data":{"RUNNINGSTATUS": "%s"}}""" % qos_status } activate_deactivate_qos_mock = self.mock_object( self.driver.plugin.helper, 'activate_deactivate_qos') delete_qos_mock = self.mock_object( self.driver.plugin.helper, 'delete_qos_policy') qos = smartx.SmartQos(self.driver.plugin.helper) qos.delete_qos('11') if qos_status == constants.STATUS_QOS_INACTIVATED: activate_deactivate_qos_mock.assert_not_called() else: activate_deactivate_qos_mock.assert_called_once_with('11', False) delete_qos_mock.assert_called_once_with('11') def test_username_password_encode_decode(self): for i in (1, 2): # First loop will encode the username/password and # write back to configuration. # Second loop will get the encoded username/password and # decode them. logininfo = self.driver.plugin.helper._get_login_info() self.assertEqual('admin', logininfo['UserName']) self.assertEqual('Admin@storage', logininfo['UserPassword']) @ddt.data({ 'username': 'abc', 'password': '123456', 'expect_username': 'abc', 'expect_password': '123456', }, { 'username': '!$$$YWJj', 'password': '!$$$MTIzNDU2', 'expect_username': 'abc', 'expect_password': '123456', }, { 'username': 'ab!$$$c', 'password': '123!$$$456', 'expect_username': 'ab!$$$c', 'expect_password': '123!$$$456', }) @ddt.unpack def test__get_login_info(self, username, password, expect_username, expect_password): configs = { 'Storage/RestURL': 'https://123456', 'Storage/UserName': username, 'Storage/UserPassword': password, } self.mock_object( ET, 'parse', mock.Mock(return_value=FakeConfigParseTree(configs))) result = self.driver.plugin.helper._get_login_info() self.assertEqual(expect_username, result['UserName']) self.assertEqual(expect_password, result['UserPassword']) ET.parse.assert_called_once_with(self.fake_conf_file) def test_revert_to_snapshot_success(self): snapshot = {'id': 'fake-fs-id', 'share_name': 'share_fake_uuid'} with mock.patch.object( self.driver.plugin.helper, 'call') as mock_call: mock_call.return_value = { "error": {"code": 0}, "data": [{"ID": "4", "NAME": "share_fake_uuid"}] } self.driver.revert_to_snapshot(None, snapshot, None, None) expect_snapshot_id = "4@share_snapshot_fake_fs_id" mock_call.assert_called_with( "/FSSNAPSHOT/ROLLBACK_FSSNAPSHOT", jsonutils.dumps({"ID": expect_snapshot_id}), 'PUT') def test_revert_to_snapshot_exception(self): snapshot = {'id': 'fake-snap-id', 'share_name': 'not_exist_share_name', 'share_id': 'fake_share_id'} self.assertRaises(exception.ShareResourceNotFound, self.driver.revert_to_snapshot, None, snapshot, None, None) @ddt.data({'name': 'fake_name', 'share_proto': 'NFS', 'mount_path': 'fake_nfs_mount_path', 'mount_src': '/mnt/test'}, {'name': 'fake_name', 'share_proto': 'CIFS', 'mount_path': 'fake_cifs_mount_path', 'mount_src': '/mnt/test'}, ) def test_mount_share_to_host(self, share): access = {'access_to': 'cifs_user', 'access_password': 'cifs_password'} mocker = self.mock_object(utils, 'execute') self.driver.plugin.mount_share_to_host(share, access) if share['share_proto'] == 'NFS': mocker.assert_called_once_with( 'mount', '-t', 'nfs', 'fake_nfs_mount_path', '/mnt/test', run_as_root=True) else: mocker.assert_called_once_with( 'mount', '-t', 'cifs', 'fake_cifs_mount_path', '/mnt/test', '-o', 'username=cifs_user,password=cifs_password', run_as_root=True) @ddt.ddt class HuaweiDriverHelperTestCase(test.TestCase): def setUp(self): super(HuaweiDriverHelperTestCase, self).setUp() self.helper = helper.RestHelper(None) def test_init_http_head(self): self.helper.init_http_head() self.assertIsNone(self.helper.url) self.assertFalse(self.helper.session.verify) self.assertEqual("keep-alive", self.helper.session.headers["Connection"]) self.assertEqual("application/json", self.helper.session.headers["Content-Type"]) @ddt.data(('fake_data', 'POST'), (None, 'POST'), (None, 'PUT'), (None, 'GET'), ('fake_data', 'PUT'), (None, 'DELETE'), ) @ddt.unpack def test_do_call_with_valid_method(self, data, method): self.helper.init_http_head() mocker = self.mock_object(self.helper.session, method.lower()) self.helper.do_call("fake-rest-url", data, method) kwargs = {'timeout': constants.SOCKET_TIMEOUT} if data: kwargs['data'] = data mocker.assert_called_once_with("fake-rest-url", **kwargs) def test_do_call_with_invalid_method(self): self.assertRaises(exception.ShareBackendException, self.helper.do_call, "fake-rest-url", None, 'fake-method') def test_do_call_with_http_error(self): self.helper.init_http_head() fake_res = requests.Response() fake_res.reason = 'something wrong' fake_res.status_code = 500 fake_res.url = "fake-rest-url" self.mock_object(self.helper.session, 'post', mock.Mock(return_value=fake_res)) res = self.helper.do_call("fake-rest-url", None, 'POST') expected = { "error": { "code": 500, "description": '500 Server Error: something wrong for ' 'url: fake-rest-url'} } self.assertDictEqual(expected, res) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315602.0216703 manila-21.0.0/manila/tests/share/drivers/ibm/0000775000175000017500000000000000000000000021010 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/ibm/__init__.py0000664000175000017500000000000000000000000023107 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/ibm/test_gpfs.py0000664000175000017500000022523600000000000023372 0ustar00zuulzuul00000000000000# Copyright (c) 2014 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Unit tests for the IBM GPFS driver module.""" import re import socket from unittest import mock import ddt from oslo_config import cfg from manila import context from manila import exception import manila.share.configuration as config import manila.share.drivers.ibm.gpfs as gpfs from manila.share import share_types from manila import ssh_utils from manila import test from manila.tests import fake_share from manila import utils CONF = cfg.CONF @ddt.ddt class GPFSShareDriverTestCase(test.TestCase): """Tests GPFSShareDriver.""" def setUp(self): super(GPFSShareDriverTestCase, self).setUp() self._context = context.get_admin_context() self._gpfs_execute = mock.Mock(return_value=('', '')) self.GPFS_PATH = '/usr/lpp/mmfs/bin/' self._helper_fake = mock.Mock() CONF.set_default('driver_handles_share_servers', False) CONF.set_default('share_backend_name', 'GPFS') self.fake_conf = config.Configuration(None) self._driver = gpfs.GPFSShareDriver(execute=self._gpfs_execute, configuration=self.fake_conf) self._knfs_helper = gpfs.KNFSHelper(self._gpfs_execute, self.fake_conf) self._ces_helper = gpfs.CESHelper(self._gpfs_execute, self.fake_conf) self.fakedev = "/dev/gpfs0" self.fakefspath = "/gpfs0" self.fakesharepath = "/gpfs0/share-fakeid" self.fakeexistingshare = "existingshare" self.fakesnapshotpath = "/gpfs0/.snapshots/snapshot-fakesnapshotid" self.fake_ces_exports = """ mmcesnfslsexport:nfsexports:HEADER:version:reserved:reserved:Path:Delegations:Clients:Access_Type:Protocols:Transports:Squash:Anonymous_uid:Anonymous_gid:SecType:PrivilegedPort:DefaultDelegations:Manage_Gids:NFS_Commit: mmcesnfslsexport:nfsexports:0:1:::/gpfs0/share-fakeid:none:44.3.2.11:RW:3,4:TCP:ROOT_SQUASH:-2:-2:SYS:FALSE:none:FALSE:FALSE: mmcesnfslsexport:nfsexports:0:1:::/gpfs0/share-fakeid:none:1:2:3:4:5:6:7:8:RW:3,4:TCP:ROOT_SQUASH:-2:-2:SYS:FALSE:none:FALSE:FALSE: mmcesnfslsexport:nfsexports:0:1:::/gpfs0/share-fakeid:none:10.0.0.1:RW:3,4:TCP:ROOT_SQUASH:-2:-2:SYS:FALSE:none:FALSE:FALSE: """ self.fake_ces_exports_not_found = """ mmcesnfslsexport:nfsexports:HEADER:version:reserved:reserved:Path:Delegations:Clients:Access_Type:Protocols:Transports:Squash:Anonymous_uid:Anonymous_gid:SecType:PrivilegedPort:DefaultDelegations:Manage_Gids:NFS_Commit: """ self.mock_object(gpfs.os.path, 'exists', mock.Mock(return_value=True)) self._driver._helpers = { 'CES': self._helper_fake } self.share = fake_share.fake_share(share_proto='NFS', host='fakehost@fakehost#GPFS') self.server = { 'backend_details': { 'ip': '1.2.3.4', 'instance_id': 'fake' } } self.access = fake_share.fake_access() self.snapshot = fake_share.fake_snapshot() self.local_ip = "192.11.22.1" self.remote_ip = "192.11.22.2" self.remote_ip2 = "2.2.2.2" gpfs_nfs_server_list = [self.remote_ip, self.local_ip, self.remote_ip2, "fake_location"] self._knfs_helper.configuration.gpfs_nfs_server_list = ( gpfs_nfs_server_list) self._ces_helper.configuration.gpfs_nfs_server_list = ( gpfs_nfs_server_list) self._ces_helper.configuration.ganesha_config_path = ( "fake_ganesha_config_path") self.sshlogin = "fake_login" self.sshkey = "fake_sshkey" self.gservice = "fake_ganesha_service" self._ces_helper.configuration.gpfs_ssh_login = self.sshlogin self._ces_helper.configuration.gpfs_ssh_private_key = self.sshkey self._ces_helper.configuration.ganesha_service_name = self.gservice self.mock_object(socket, 'gethostname', mock.Mock(return_value="testserver")) self.mock_object(socket, 'gethostbyname_ex', mock.Mock( return_value=('localhost', ['localhost.localdomain', 'testserver'], ['127.0.0.1', self.local_ip]) )) def test__run_ssh(self): cmd_list = ['fake', 'cmd'] expected_cmd = 'fake cmd' ssh_pool = mock.Mock() ssh = mock.Mock() self.mock_object(ssh_utils, 'SSHPool', mock.Mock(return_value=ssh_pool)) ssh_pool.item = mock.Mock(return_value=ssh) setattr(ssh, '__enter__', mock.Mock()) setattr(ssh, '__exit__', mock.Mock()) self.mock_object(self._driver, '_gpfs_ssh_execute') self._driver._run_ssh(self.local_ip, cmd_list) self._driver._gpfs_ssh_execute.assert_called_once_with( mock.ANY, expected_cmd, check_exit_code=True, ignore_exit_code=None) def test__run_ssh_exception(self): cmd_list = ['fake', 'cmd'] ssh_pool = mock.Mock() ssh = mock.Mock() self.mock_object(ssh_utils, 'SSHPool', mock.Mock(return_value=ssh_pool)) ssh_pool.item = mock.Mock(return_value=ssh) self.mock_object(self._driver, '_gpfs_ssh_execute') self.assertRaises(exception.GPFSException, self._driver._run_ssh, self.local_ip, cmd_list) def test__gpfs_ssh_execute(self): cmd = 'fake cmd' expected_out = 'cmd successful' expected_err = 'cmd error' ssh = mock.Mock() stdin_stream = mock.Mock() stdout_stream = mock.Mock() stderr_stream = mock.Mock() ssh.exec_command = mock.Mock(return_value=(stdin_stream, stdout_stream, stderr_stream)) stdout_stream.channel.recv_exit_status = mock.Mock(return_value=-1) stdout_stream.read = mock.Mock(return_value=expected_out) stderr_stream.read = mock.Mock(return_value=expected_err) stdin_stream.close = mock.Mock() actual_out, actual_err = self._driver._gpfs_ssh_execute(ssh, cmd) self.assertEqual(actual_out, expected_out) self.assertEqual(actual_err, expected_err) def test__gpfs_ssh_execute_exception(self): cmd = 'fake cmd' ssh = mock.Mock() stdin_stream = mock.Mock() stdout_stream = mock.Mock() stderr_stream = mock.Mock() ssh.exec_command = mock.Mock(return_value=(stdin_stream, stdout_stream, stderr_stream)) stdout_stream.channel.recv_exit_status = mock.Mock(return_value=1) stdout_stream.read = mock.Mock() stderr_stream.read = mock.Mock() stdin_stream.close = mock.Mock() self.assertRaises(exception.ProcessExecutionError, self._driver._gpfs_ssh_execute, ssh, cmd) def test_get_share_stats_refresh_false(self): self._driver._stats = {'fake_key': 'fake_value'} result = self._driver.get_share_stats(False) self.assertEqual(self._driver._stats, result) def test_get_share_stats_refresh_true(self): self.mock_object( self._driver, '_get_available_capacity', mock.Mock(return_value=(11111.0, 12345.0))) result = self._driver.get_share_stats(True) expected_keys = [ 'qos', 'driver_version', 'share_backend_name', 'free_capacity_gb', 'total_capacity_gb', 'driver_handles_share_servers', 'reserved_percentage', 'vendor_name', 'storage_protocol', ] for key in expected_keys: self.assertIn(key, result) self.assertFalse(result['driver_handles_share_servers']) self.assertEqual('IBM', result['vendor_name']) self._driver._get_available_capacity.assert_called_once_with( self._driver.configuration.gpfs_mount_point_base) def test_do_setup(self): self.mock_object(self._driver, '_setup_helpers') self._driver.do_setup(self._context) self.assertEqual(self._driver._gpfs_execute, self._driver._gpfs_remote_execute) self._driver._setup_helpers.assert_called_once_with() def test_do_setup_gpfs_local_execute(self): self.mock_object(self._driver, '_setup_helpers') self._driver.configuration.is_gpfs_node = True self._driver.do_setup(self._context) self.assertEqual(self._driver._gpfs_execute, self._driver._gpfs_local_execute) self._driver._setup_helpers.assert_called_once_with() def test_setup_helpers(self): self._driver._helpers = {} CONF.set_default('gpfs_share_helpers', ['CES=fakenfs']) self.mock_object(gpfs.importutils, 'import_class', mock.Mock(return_value=self._helper_fake)) self._driver._setup_helpers() gpfs.importutils.import_class.assert_has_calls( [mock.call('fakenfs')] ) self.assertEqual(len(self._driver._helpers), 1) @ddt.data(fake_share.fake_share(), fake_share.fake_share(share_proto='NFSBOGUS')) def test__get_helper_with_wrong_proto(self, share): self.assertRaises(exception.InvalidShare, self._driver._get_helper, share) def test__local_path(self): sharename = 'fakesharename' self._driver.configuration.gpfs_mount_point_base = ( self.fakefspath) local_path = self._driver._local_path(sharename) self.assertEqual(self.fakefspath + '/' + sharename, local_path) def test__get_share_path(self): self._driver.configuration.gpfs_mount_point_base = ( self.fakefspath) share_path = self._driver._get_share_path(self.share) self.assertEqual(self.fakefspath + '/' + self.share['name'], share_path) def test__get_snapshot_path(self): self._driver.configuration.gpfs_mount_point_base = ( self.fakefspath) snapshot_path = self._driver._get_snapshot_path(self.snapshot) self.assertEqual(self.fakefspath + '/' + self.snapshot['share_name'] + '/.snapshots/' + self.snapshot['name'], snapshot_path) def test_check_for_setup_error_for_gpfs_state(self): self.mock_object(self._driver, '_check_gpfs_state', mock.Mock(return_value=False)) self.assertRaises(exception.GPFSException, self._driver.check_for_setup_error) def test_check_for_setup_error_for_export_ip(self): self.mock_object(self._driver, '_check_gpfs_state', mock.Mock(return_value=True)) self._driver.configuration.gpfs_share_export_ip = None self.assertRaises(exception.InvalidParameterValue, self._driver.check_for_setup_error) def test_check_for_setup_error_for_gpfs_mount_point_base(self): self.mock_object(self._driver, '_check_gpfs_state', mock.Mock(return_value=True)) self._driver.configuration.gpfs_share_export_ip = self.local_ip self._driver.configuration.gpfs_mount_point_base = 'test' self.assertRaises(exception.GPFSException, self._driver.check_for_setup_error) def test_check_for_setup_error_for_directory_check(self): self.mock_object(self._driver, '_check_gpfs_state', mock.Mock(return_value=True)) self._driver.configuration.gpfs_share_export_ip = self.local_ip self._driver.configuration.gpfs_mount_point_base = self.fakefspath self.mock_object(self._driver, '_is_dir', mock.Mock(return_value=False)) self.assertRaises(exception.GPFSException, self._driver.check_for_setup_error) def test_check_for_setup_error_for_gpfs_path_check(self): self.mock_object(self._driver, '_check_gpfs_state', mock.Mock(return_value=True)) self._driver.configuration.gpfs_share_export_ip = self.local_ip self._driver.configuration.gpfs_mount_point_base = self.fakefspath self.mock_object(self._driver, '_is_dir', mock.Mock(return_value=True)) self.mock_object(self._driver, '_is_gpfs_path', mock.Mock(return_value=False)) self.assertRaises(exception.GPFSException, self._driver.check_for_setup_error) def test_check_for_setup_error_for_nfs_server_type(self): self.mock_object(self._driver, '_check_gpfs_state', mock.Mock(return_value=True)) self._driver.configuration.gpfs_share_export_ip = self.local_ip self._driver.configuration.gpfs_mount_point_base = self.fakefspath self.mock_object(self._driver, '_is_dir', mock.Mock(return_value=True)) self.mock_object(self._driver, '_is_gpfs_path', mock.Mock(return_value=True)) self._driver.configuration.gpfs_nfs_server_type = 'test' self.assertRaises(exception.InvalidParameterValue, self._driver.check_for_setup_error) def test_check_for_setup_error_for_nfs_server_list(self): self.mock_object(self._driver, '_check_gpfs_state', mock.Mock(return_value=True)) self._driver.configuration.gpfs_share_export_ip = self.local_ip self._driver.configuration.gpfs_mount_point_base = self.fakefspath self.mock_object(self._driver, '_is_dir', mock.Mock(return_value=True)) self.mock_object(self._driver, '_is_gpfs_path', mock.Mock(return_value=True)) self._driver.configuration.gpfs_nfs_server_type = 'KNFS' self._driver.configuration.gpfs_nfs_server_list = None self.assertRaises(exception.InvalidParameterValue, self._driver.check_for_setup_error) def test__get_available_capacity(self): path = self.fakefspath mock_out = "Filesystem 1-blocks Used Available Capacity Mounted on\n\ /dev/gpfs0 100 30 70 30% /gpfs0" self.mock_object(self._driver, '_gpfs_execute', mock.Mock(return_value=(mock_out, ''))) available, size = self._driver._get_available_capacity(path) self.assertEqual(70, available) self.assertEqual(100, size) def test_create_share(self): self._helper_fake.create_export.return_value = 'fakelocation' methods = ('_create_share', '_get_share_path') for method in methods: self.mock_object(self._driver, method) result = self._driver.create_share(self._context, self.share, share_server=self.server) self._driver._create_share.assert_called_once_with(self.share) self._driver._get_share_path.assert_called_once_with(self.share) self.assertEqual(result, 'fakelocation') def test_create_share_from_snapshot(self): self._helper_fake.create_export.return_value = 'fakelocation' self._driver._get_share_path = mock.Mock(return_value=self. fakesharepath) self._driver._create_share_from_snapshot = mock.Mock() result = self._driver.create_share_from_snapshot(self._context, self.share, self.snapshot, share_server=None) self._driver._get_share_path.assert_called_once_with(self.share) self._driver._create_share_from_snapshot.assert_called_once_with( self.share, self.snapshot, self.fakesharepath ) self.assertEqual(result, 'fakelocation') def test_create_snapshot(self): self._driver._create_share_snapshot = mock.Mock() self._driver.create_snapshot(self._context, self.snapshot, share_server=None) self._driver._create_share_snapshot.assert_called_once_with( self.snapshot ) def test_delete_share(self): self._driver._get_share_path = mock.Mock( return_value=self.fakesharepath ) self._driver._delete_share = mock.Mock() self._driver.delete_share(self._context, self.share, share_server=None) self._driver._get_share_path.assert_called_once_with(self.share) self._driver._delete_share.assert_called_once_with(self.share) self._helper_fake.remove_export.assert_called_once_with( self.fakesharepath, self.share ) def test_delete_snapshot(self): self._driver._delete_share_snapshot = mock.Mock() self._driver.delete_snapshot(self._context, self.snapshot, share_server=None) self._driver._delete_share_snapshot.assert_called_once_with( self.snapshot ) def test__delete_share_snapshot(self): self._driver._get_gpfs_device = mock.Mock(return_value=self.fakedev) self._driver._gpfs_execute = mock.Mock(return_value=0) self._driver._delete_share_snapshot(self.snapshot) self._driver._gpfs_execute.assert_called_once_with( self.GPFS_PATH + 'mmdelsnapshot', self.fakedev, self.snapshot['name'], '-j', self.snapshot['share_name'] ) self._driver._get_gpfs_device.assert_called_once_with() def test__delete_share_snapshot_exception(self): self._driver._get_gpfs_device = mock.Mock(return_value=self.fakedev) self._driver._gpfs_execute = mock.Mock( side_effect=exception.ProcessExecutionError ) self.assertRaises(exception.GPFSException, self._driver._delete_share_snapshot, self.snapshot) self._driver._get_gpfs_device.assert_called_once_with() self._driver._gpfs_execute.assert_called_once_with( self.GPFS_PATH + 'mmdelsnapshot', self.fakedev, self.snapshot['name'], '-j', self.snapshot['share_name'] ) def test_extend_share(self): self._driver._extend_share = mock.Mock() self._driver.extend_share(self.share, 10) self._driver._extend_share.assert_called_once_with(self.share, 10) def test__extend_share(self): self._driver._get_gpfs_device = mock.Mock(return_value=self.fakedev) self._driver._gpfs_execute = mock.Mock(return_value=True) self._driver._extend_share(self.share, 10) self._driver._gpfs_execute.assert_called_once_with( self.GPFS_PATH + 'mmsetquota', self.fakedev + ':' + self.share['name'], '--block', '0:10G') self._driver._get_gpfs_device.assert_called_once_with() def test__extend_share_exception(self): self._driver._get_gpfs_device = mock.Mock(return_value=self.fakedev) self._driver._gpfs_execute = mock.Mock( side_effect=exception.ProcessExecutionError ) self.assertRaises(exception.GPFSException, self._driver._extend_share, self.share, 10) self._driver._gpfs_execute.assert_called_once_with( self.GPFS_PATH + 'mmsetquota', self.fakedev + ':' + self.share['name'], '--block', '0:10G') self._driver._get_gpfs_device.assert_called_once_with() def test_update_access_allow(self): """Test allow_access functionality via update_access.""" self._driver._get_share_path = mock.Mock( return_value=self.fakesharepath ) self._helper_fake.allow_access = mock.Mock() self._driver.update_access(self._context, self.share, ["ignored"], [self.access], [], [], share_server=None) self._helper_fake.allow_access.assert_called_once_with( self.fakesharepath, self.share, self.access) self.assertFalse(self._helper_fake.resync_access.called) self._driver._get_share_path.assert_called_once_with(self.share) def test_update_access_deny(self): """Test deny_access functionality via update_access.""" self._driver._get_share_path = mock.Mock(return_value=self. fakesharepath) self._helper_fake.deny_access = mock.Mock() self._driver.update_access(self._context, self.share, ["ignored"], [], [self.access], [], share_server=None) self._helper_fake.deny_access.assert_called_once_with( self.fakesharepath, self.share, self.access) self.assertFalse(self._helper_fake.resync_access.called) self._driver._get_share_path.assert_called_once_with(self.share) def test_update_access_both(self): """Test update_access with allow and deny lists.""" self._driver._get_share_path = mock.Mock(return_value=self. fakesharepath) self._helper_fake.deny_access = mock.Mock() self._helper_fake.allow_access = mock.Mock() self._helper_fake.resync_access = mock.Mock() access_1 = fake_share.fake_access(access_to="1.1.1.1") access_2 = fake_share.fake_access(access_to="2.2.2.2") self._driver.update_access(self._context, self.share, ["ignore"], [access_1], [access_2], [], share_server=None) self.assertFalse(self._helper_fake.resync_access.called) self._helper_fake.allow_access.assert_called_once_with( self.fakesharepath, self.share, access_1) self._helper_fake.deny_access.assert_called_once_with( self.fakesharepath, self.share, access_2) self._driver._get_share_path.assert_called_once_with(self.share) def test_update_access_resync(self): """Test recovery mode update_access.""" self._driver._get_share_path = mock.Mock(return_value=self. fakesharepath) self._helper_fake.deny_access = mock.Mock() self._helper_fake.allow_access = mock.Mock() self._helper_fake.resync_access = mock.Mock() access_1 = fake_share.fake_access(access_to="1.1.1.1") access_2 = fake_share.fake_access(access_to="2.2.2.2") self._driver.update_access(self._context, self.share, [access_1, access_2], [], [], [], share_server=None) self._helper_fake.resync_access.assert_called_once_with( self.fakesharepath, self.share, [access_1, access_2]) self.assertFalse(self._helper_fake.allow_access.called) self.assertFalse(self._helper_fake.allow_access.called) self._driver._get_share_path.assert_called_once_with(self.share) def test__check_gpfs_state_active(self): fakeout = "mmgetstate::state:\nmmgetstate::active:" self._driver._gpfs_execute = mock.Mock(return_value=(fakeout, '')) result = self._driver._check_gpfs_state() self._driver._gpfs_execute.assert_called_once_with( self.GPFS_PATH + 'mmgetstate', '-Y') self.assertEqual(result, True) def test__check_gpfs_state_down(self): fakeout = "mmgetstate::state:\nmmgetstate::down:" self._driver._gpfs_execute = mock.Mock(return_value=(fakeout, '')) result = self._driver._check_gpfs_state() self._driver._gpfs_execute.assert_called_once_with( self.GPFS_PATH + 'mmgetstate', '-Y') self.assertEqual(result, False) def test__check_gpfs_state_wrong_output_exception(self): fakeout = "mmgetstate fake out" self._driver._gpfs_execute = mock.Mock(return_value=(fakeout, '')) self.assertRaises(exception.GPFSException, self._driver._check_gpfs_state) self._driver._gpfs_execute.assert_called_once_with( self.GPFS_PATH + 'mmgetstate', '-Y') def test__check_gpfs_state_exception(self): self._driver._gpfs_execute = mock.Mock( side_effect=exception.ProcessExecutionError ) self.assertRaises(exception.GPFSException, self._driver._check_gpfs_state) self._driver._gpfs_execute.assert_called_once_with( self.GPFS_PATH + 'mmgetstate', '-Y') def test__is_dir_success(self): fakeoutput = "directory" self._driver._gpfs_execute = mock.Mock(return_value=(fakeoutput, '')) result = self._driver._is_dir(self.fakefspath) self._driver._gpfs_execute.assert_called_once_with( 'stat', '--format=%F', self.fakefspath, run_as_root=False ) self.assertEqual(result, True) def test__is_dir_failure(self): fakeoutput = "regular file" self._driver._gpfs_execute = mock.Mock(return_value=(fakeoutput, '')) result = self._driver._is_dir(self.fakefspath) self._driver._gpfs_execute.assert_called_once_with( 'stat', '--format=%F', self.fakefspath, run_as_root=False ) self.assertEqual(result, False) def test__is_dir_exception(self): self._driver._gpfs_execute = mock.Mock( side_effect=exception.ProcessExecutionError ) self.assertRaises(exception.GPFSException, self._driver._is_dir, self.fakefspath) self._driver._gpfs_execute.assert_called_once_with( 'stat', '--format=%F', self.fakefspath, run_as_root=False ) def test__is_gpfs_path_ok(self): self._driver._gpfs_execute = mock.Mock(return_value=0) result = self._driver._is_gpfs_path(self.fakefspath) self._driver._gpfs_execute.assert_called_once_with( self.GPFS_PATH + 'mmlsattr', self.fakefspath) self.assertEqual(result, True) def test__is_gpfs_path_exception(self): self._driver._gpfs_execute = mock.Mock( side_effect=exception.ProcessExecutionError ) self.assertRaises(exception.GPFSException, self._driver._is_gpfs_path, self.fakefspath) self._driver._gpfs_execute.assert_called_once_with( self.GPFS_PATH + 'mmlsattr', self.fakefspath) def test__get_gpfs_device(self): fakeout = "Filesystem\n" + self.fakedev orig_val = self._driver.configuration.gpfs_mount_point_base self._driver.configuration.gpfs_mount_point_base = self.fakefspath self._driver._gpfs_execute = mock.Mock(return_value=(fakeout, '')) result = self._driver._get_gpfs_device() self._driver._gpfs_execute.assert_called_once_with('df', self.fakefspath) self.assertEqual(result, self.fakedev) self._driver.configuration.gpfs_mount_point_base = orig_val def test__get_gpfs_device_exception(self): self._driver._gpfs_execute = mock.Mock( side_effect=exception.ProcessExecutionError) self.assertRaises(exception.GPFSException, self._driver._get_gpfs_device) def test__create_share(self): sizestr = '%sG' % self.share['size'] self._driver._gpfs_execute = mock.Mock(return_value=True) self._driver._local_path = mock.Mock(return_value=self.fakesharepath) self._driver._get_gpfs_device = mock.Mock(return_value=self.fakedev) self._driver._create_share(self.share) self._driver._gpfs_execute.assert_any_call( self.GPFS_PATH + 'mmcrfileset', self.fakedev, self.share['name'], '--inode-space', 'new') self._driver._gpfs_execute.assert_any_call( self.GPFS_PATH + 'mmlinkfileset', self.fakedev, self.share['name'], '-J', self.fakesharepath) self._driver._gpfs_execute.assert_any_call( self.GPFS_PATH + 'mmsetquota', self.fakedev + ':' + self.share['name'], '--block', '0:' + sizestr) self._driver._gpfs_execute.assert_any_call( 'chmod', '777', self.fakesharepath) self._driver._local_path.assert_called_once_with(self.share['name']) self._driver._get_gpfs_device.assert_called_once_with() def test__create_share_exception(self): self._driver._local_path = mock.Mock(return_value=self.fakesharepath) self._driver._get_gpfs_device = mock.Mock(return_value=self.fakedev) self._driver._gpfs_execute = mock.Mock( side_effect=exception.ProcessExecutionError ) self.assertRaises(exception.GPFSException, self._driver._create_share, self.share) self._driver._get_gpfs_device.assert_called_once_with() self._driver._local_path.assert_called_once_with(self.share['name']) self._driver._gpfs_execute.assert_called_once_with( self.GPFS_PATH + 'mmcrfileset', self.fakedev, self.share['name'], '--inode-space', 'new') def test__delete_share(self): self._driver._gpfs_execute = mock.Mock(return_value=True) self._driver._get_gpfs_device = mock.Mock(return_value=self.fakedev) self._driver._delete_share(self.share) self._driver._gpfs_execute.assert_any_call( self.GPFS_PATH + 'mmunlinkfileset', self.fakedev, self.share['name'], '-f', ignore_exit_code=[2]) self._driver._gpfs_execute.assert_any_call( self.GPFS_PATH + 'mmdelfileset', self.fakedev, self.share['name'], '-f', ignore_exit_code=[2]) self._driver._get_gpfs_device.assert_called_once_with() def test__delete_share_exception(self): self._driver._get_gpfs_device = mock.Mock(return_value=self.fakedev) self._driver._gpfs_execute = mock.Mock( side_effect=exception.ProcessExecutionError ) self.assertRaises(exception.GPFSException, self._driver._delete_share, self.share) self._driver._get_gpfs_device.assert_called_once_with() self._driver._gpfs_execute.assert_called_once_with( self.GPFS_PATH + 'mmunlinkfileset', self.fakedev, self.share['name'], '-f', ignore_exit_code=[2]) def test__create_share_snapshot(self): self._driver._gpfs_execute = mock.Mock(return_value=True) self._driver._get_gpfs_device = mock.Mock(return_value=self.fakedev) self._driver._create_share_snapshot(self.snapshot) self._driver._gpfs_execute.assert_called_once_with( self.GPFS_PATH + 'mmcrsnapshot', self.fakedev, self.snapshot['name'], '-j', self.snapshot['share_name'] ) self._driver._get_gpfs_device.assert_called_once_with() def test__create_share_snapshot_exception(self): self._driver._get_gpfs_device = mock.Mock(return_value=self.fakedev) self._driver._gpfs_execute = mock.Mock( side_effect=exception.ProcessExecutionError ) self.assertRaises(exception.GPFSException, self._driver._create_share_snapshot, self.snapshot) self._driver._get_gpfs_device.assert_called_once_with() self._driver._gpfs_execute.assert_called_once_with( self.GPFS_PATH + 'mmcrsnapshot', self.fakedev, self.snapshot['name'], '-j', self.snapshot['share_name'] ) def test__create_share_from_snapshot(self): self._driver._gpfs_execute = mock.Mock(return_value=True) self._driver._create_share = mock.Mock(return_value=True) self._driver._get_snapshot_path = mock.Mock(return_value=self. fakesnapshotpath) self._driver._create_share_from_snapshot(self.share, self.snapshot, self.fakesharepath) self._driver._gpfs_execute.assert_called_once_with( 'rsync', '-rp', self.fakesnapshotpath + '/', self.fakesharepath ) self._driver._create_share.assert_called_once_with(self.share) self._driver._get_snapshot_path.assert_called_once_with(self.snapshot) def test__create_share_from_snapshot_exception(self): self._driver._create_share = mock.Mock(return_value=True) self._driver._get_snapshot_path = mock.Mock(return_value=self. fakesnapshotpath) self._driver._gpfs_execute = mock.Mock( side_effect=exception.ProcessExecutionError ) self.assertRaises(exception.GPFSException, self._driver._create_share_from_snapshot, self.share, self.snapshot, self.fakesharepath) self._driver._create_share.assert_called_once_with(self.share) self._driver._get_snapshot_path.assert_called_once_with(self.snapshot) self._driver._gpfs_execute.assert_called_once_with( 'rsync', '-rp', self.fakesnapshotpath + '/', self.fakesharepath ) @ddt.data("mmlsfileset::allocInodes:\nmmlsfileset::100096:", "mmlsfileset::allocInodes:\nmmlsfileset::0:") def test__is_share_valid_with_quota(self, fakeout): self._driver._gpfs_execute = mock.Mock(return_value=(fakeout, '')) result = self._driver._is_share_valid(self.fakedev, self.fakesharepath) self._driver._gpfs_execute.assert_called_once_with( self.GPFS_PATH + 'mmlsfileset', self.fakedev, '-J', self.fakesharepath, '-L', '-Y') if fakeout == "mmlsfileset::allocInodes:\nmmlsfileset::100096:": self.assertTrue(result) else: self.assertFalse(result) def test__is_share_valid_exception(self): self._driver._gpfs_execute = mock.Mock( side_effect=exception.ProcessExecutionError) self.assertRaises(exception.ManageInvalidShare, self._driver._is_share_valid, self.fakedev, self.fakesharepath) self._driver._gpfs_execute.assert_called_once_with( self.GPFS_PATH + 'mmlsfileset', self.fakedev, '-J', self.fakesharepath, '-L', '-Y') def test__is_share_valid_no_share_exist_exception(self): fakeout = "mmlsfileset::allocInodes:" self._driver._gpfs_execute = mock.Mock(return_value=(fakeout, '')) self.assertRaises(exception.GPFSException, self._driver._is_share_valid, self.fakedev, self.fakesharepath) self._driver._gpfs_execute.assert_called_once_with( self.GPFS_PATH + 'mmlsfileset', self.fakedev, '-J', self.fakesharepath, '-L', '-Y') def test__get_share_name(self): fakeout = "mmlsfileset::filesetName:\nmmlsfileset::existingshare:" self._driver._gpfs_execute = mock.Mock(return_value=(fakeout, '')) result = self._driver._get_share_name(self.fakedev, self.fakesharepath) self.assertEqual('existingshare', result) def test__get_share_name_exception(self): self._driver._gpfs_execute = mock.Mock( side_effect=exception.ProcessExecutionError) self.assertRaises(exception.ManageInvalidShare, self._driver._get_share_name, self.fakedev, self.fakesharepath) self._driver._gpfs_execute.assert_called_once_with( self.GPFS_PATH + 'mmlsfileset', self.fakedev, '-J', self.fakesharepath, '-L', '-Y') def test__get_share_name_no_share_exist_exception(self): fakeout = "mmlsfileset::filesetName:" self._driver._gpfs_execute = mock.Mock(return_value=(fakeout, '')) self.assertRaises(exception.GPFSException, self._driver._get_share_name, self.fakedev, self.fakesharepath) self._driver._gpfs_execute.assert_called_once_with( self.GPFS_PATH + 'mmlsfileset', self.fakedev, '-J', self.fakesharepath, '-L', '-Y') @ddt.data("mmlsquota::blockLimit:\nmmlsquota::1048577", "mmlsquota::blockLimit:\nmmlsquota::1048576", "mmlsquota::blockLimit:\nmmlsquota::0") def test__manage_existing(self, fakeout): self._driver._gpfs_execute = mock.Mock(return_value=(fakeout, '')) self._helper_fake.create_export.return_value = 'fakelocation' self._driver._local_path = mock.Mock(return_value=self.fakesharepath) actual_size, actual_path = self._driver._manage_existing( self.fakedev, self.share, self.fakeexistingshare) self._driver._gpfs_execute.assert_any_call( self.GPFS_PATH + 'mmunlinkfileset', self.fakedev, self.fakeexistingshare, '-f') self._driver._gpfs_execute.assert_any_call( self.GPFS_PATH + 'mmchfileset', self.fakedev, self.fakeexistingshare, '-j', self.share['name']) self._driver._gpfs_execute.assert_any_call( self.GPFS_PATH + 'mmlinkfileset', self.fakedev, self.share['name'], '-J', self.fakesharepath) self._driver._gpfs_execute.assert_any_call( 'chmod', '777', self.fakesharepath) if fakeout == "mmlsquota::blockLimit:\nmmlsquota::1048577": self._driver._gpfs_execute.assert_called_with( self.GPFS_PATH + 'mmsetquota', self.fakedev + ':' + self.share['name'], '--block', '0:2G') self.assertEqual(2, actual_size) self.assertEqual('fakelocation', actual_path) elif fakeout == "mmlsquota::blockLimit:\nmmlsquota::0": self._driver._gpfs_execute.assert_called_with( self.GPFS_PATH + 'mmsetquota', self.fakedev + ':' + self.share['name'], '--block', '0:1G') self.assertEqual(1, actual_size) self.assertEqual('fakelocation', actual_path) else: self.assertEqual(1, actual_size) self.assertEqual('fakelocation', actual_path) def test__manage_existing_fileset_unlink_exception(self): self._driver._local_path = mock.Mock(return_value=self.fakesharepath) self._driver._gpfs_execute = mock.Mock( side_effect=exception.ProcessExecutionError) self.assertRaises(exception.GPFSException, self._driver._manage_existing, self.fakedev, self.share, self.fakeexistingshare) self._driver._local_path.assert_called_once_with(self.share['name']) self._driver._gpfs_execute.assert_called_once_with( self.GPFS_PATH + 'mmunlinkfileset', self.fakedev, self.fakeexistingshare, '-f') def test__manage_existing_fileset_creation_exception(self): self._driver._local_path = mock.Mock(return_value=self.fakesharepath) self.mock_object(self._driver, '_gpfs_execute', mock.Mock( side_effect=['', exception.ProcessExecutionError])) self.assertRaises(exception.GPFSException, self._driver._manage_existing, self.fakedev, self.share, self.fakeexistingshare) self._driver._local_path.assert_any_call(self.share['name']) self._driver._gpfs_execute.assert_has_calls([ mock.call(self.GPFS_PATH + 'mmunlinkfileset', self.fakedev, self.fakeexistingshare, '-f'), mock.call(self.GPFS_PATH + 'mmchfileset', self.fakedev, self.fakeexistingshare, '-j', self.share['name'])]) def test__manage_existing_fileset_relink_exception(self): self._driver._local_path = mock.Mock(return_value=self.fakesharepath) self.mock_object(self._driver, '_gpfs_execute', mock.Mock( side_effect=['', '', exception.ProcessExecutionError])) self.assertRaises(exception.GPFSException, self._driver._manage_existing, self.fakedev, self.share, self.fakeexistingshare) self._driver._local_path.assert_any_call(self.share['name']) self._driver._gpfs_execute.assert_has_calls([ mock.call(self.GPFS_PATH + 'mmunlinkfileset', self.fakedev, self.fakeexistingshare, '-f'), mock.call(self.GPFS_PATH + 'mmchfileset', self.fakedev, self.fakeexistingshare, '-j', self.share['name']), mock.call(self.GPFS_PATH + 'mmlinkfileset', self.fakedev, self.share['name'], '-J', self.fakesharepath)]) def test__manage_existing_permission_change_exception(self): self._driver._local_path = mock.Mock(return_value=self.fakesharepath) self.mock_object(self._driver, '_gpfs_execute', mock.Mock( side_effect=['', '', '', exception.ProcessExecutionError])) self.assertRaises(exception.GPFSException, self._driver._manage_existing, self.fakedev, self.share, self.fakeexistingshare) self._driver._local_path.assert_any_call(self.share['name']) self._driver._gpfs_execute.assert_has_calls([ mock.call(self.GPFS_PATH + 'mmunlinkfileset', self.fakedev, self.fakeexistingshare, '-f'), mock.call(self.GPFS_PATH + 'mmchfileset', self.fakedev, self.fakeexistingshare, '-j', self.share['name']), mock.call(self.GPFS_PATH + 'mmlinkfileset', self.fakedev, self.share['name'], '-J', self.fakesharepath), mock.call('chmod', '777', self.fakesharepath)]) def test__manage_existing_checking_quota_of_fileset_exception(self): self._driver._local_path = mock.Mock(return_value=self.fakesharepath) self.mock_object(self._driver, '_gpfs_execute', mock.Mock( side_effect=['', '', '', '', exception.ProcessExecutionError])) self.assertRaises(exception.GPFSException, self._driver._manage_existing, self.fakedev, self.share, self.fakeexistingshare) self._driver._local_path.assert_any_call(self.share['name']) self._driver._gpfs_execute.assert_has_calls([ mock.call(self.GPFS_PATH + 'mmunlinkfileset', self.fakedev, self.fakeexistingshare, '-f'), mock.call(self.GPFS_PATH + 'mmchfileset', self.fakedev, self.fakeexistingshare, '-j', self.share['name']), mock.call(self.GPFS_PATH + 'mmlinkfileset', self.fakedev, self.share['name'], '-J', self.fakesharepath), mock.call('chmod', '777', self.fakesharepath), mock.call(self.GPFS_PATH + 'mmlsquota', '-j', self.share['name'], '-Y', self.fakedev)]) def test__manage_existing_unable_to_get_quota_of_fileset_exception(self): fakeout = "mmlsquota::blockLimit:" self._driver._local_path = mock.Mock(return_value=self.fakesharepath) self._driver._gpfs_execute = mock.Mock(return_value=(fakeout, '')) self.assertRaises(exception.GPFSException, self._driver._manage_existing, self.fakedev, self.share, self.fakeexistingshare) self._driver._local_path.assert_any_call(self.share['name']) self._driver._gpfs_execute.assert_any_call( self.GPFS_PATH + 'mmunlinkfileset', self.fakedev, self.fakeexistingshare, '-f') self._driver._gpfs_execute.assert_any_call( self.GPFS_PATH + 'mmchfileset', self.fakedev, self.fakeexistingshare, '-j', self.share['name']) self._driver._gpfs_execute.assert_any_call( self.GPFS_PATH + 'mmlinkfileset', self.fakedev, self.share['name'], '-J', self.fakesharepath) self._driver._gpfs_execute.assert_any_call( 'chmod', '777', self.fakesharepath) self._driver._gpfs_execute.assert_called_with( self.GPFS_PATH + 'mmlsquota', '-j', self.share['name'], '-Y', self.fakedev) def test__manage_existing_set_quota_of_fileset_less_than_1G_exception( self): sizestr = '1G' mock_out = "mmlsquota::blockLimit:\nmmlsquota::0:", None self._driver._local_path = mock.Mock(return_value=self.fakesharepath) self.mock_object(self._driver, '_gpfs_execute', mock.Mock( side_effect=['', '', '', '', mock_out, exception.ProcessExecutionError])) self.assertRaises(exception.GPFSException, self._driver._manage_existing, self.fakedev, self.share, self.fakeexistingshare) self._driver._local_path.assert_any_call(self.share['name']) self._driver._gpfs_execute.assert_has_calls([ mock.call(self.GPFS_PATH + 'mmunlinkfileset', self.fakedev, self.fakeexistingshare, '-f'), mock.call(self.GPFS_PATH + 'mmchfileset', self.fakedev, self.fakeexistingshare, '-j', self.share['name']), mock.call(self.GPFS_PATH + 'mmlinkfileset', self.fakedev, self.share['name'], '-J', self.fakesharepath), mock.call('chmod', '777', self.fakesharepath), mock.call(self.GPFS_PATH + 'mmlsquota', '-j', self.share['name'], '-Y', self.fakedev), mock.call(self.GPFS_PATH + 'mmsetquota', self.fakedev + ':' + self.share['name'], '--block', '0:' + sizestr)]) def test__manage_existing_set_quota_of_fileset_grater_than_1G_exception( self): sizestr = '2G' mock_out = "mmlsquota::blockLimit:\nmmlsquota::1048577:", None self._driver._local_path = mock.Mock(return_value=self.fakesharepath) self.mock_object(self._driver, '_gpfs_execute', mock.Mock( side_effect=['', '', '', '', mock_out, exception.ProcessExecutionError])) self.assertRaises(exception.GPFSException, self._driver._manage_existing, self.fakedev, self.share, self.fakeexistingshare) self._driver._local_path.assert_any_call(self.share['name']) self._driver._gpfs_execute.assert_has_calls([ mock.call(self.GPFS_PATH + 'mmunlinkfileset', self.fakedev, self.fakeexistingshare, '-f'), mock.call(self.GPFS_PATH + 'mmchfileset', self.fakedev, self.fakeexistingshare, '-j', self.share['name']), mock.call(self.GPFS_PATH + 'mmlinkfileset', self.fakedev, self.share['name'], '-J', self.fakesharepath), mock.call('chmod', '777', self.fakesharepath), mock.call(self.GPFS_PATH + 'mmlsquota', '-j', self.share['name'], '-Y', self.fakedev), mock.call(self.GPFS_PATH + 'mmsetquota', self.fakedev + ':' + self.share['name'], '--block', '0:' + sizestr)]) def test_manage_existing(self): self._driver._manage_existing = mock.Mock(return_value=('1', 'fakelocation')) self._driver._get_gpfs_device = mock.Mock(return_value=self.fakedev) self._driver._is_share_valid = mock.Mock(return_value=True) self._driver._get_share_name = mock.Mock(return_value=self. fakeexistingshare) self._helper_fake._has_client_access = mock.Mock(return_value=[]) result = self._driver.manage_existing(self.share, {}) self.assertEqual('1', result['size']) self.assertEqual('fakelocation', result['export_locations']) def test_manage_existing_incorrect_path_exception(self): share = fake_share.fake_share(export_location="wrong_ip::wrong_path") self.assertRaises(exception.ShareBackendException, self._driver.manage_existing, share, {}) def test_manage_existing_incorrect_ip_exception(self): share = fake_share.fake_share(export_location="wrong_ip:wrong_path") self.assertRaises(exception.ShareBackendException, self._driver.manage_existing, share, {}) def test__manage_existing_invalid_export_exception(self): share = fake_share.fake_share(export_location="wrong_ip/wrong_path") self.assertRaises(exception.ShareBackendException, self._driver.manage_existing, share, {}) @ddt.data(True, False) def test_manage_existing_invalid_share_exception(self, valid_share): self._driver._get_gpfs_device = mock.Mock(return_value=self.fakedev) self._driver._is_share_valid = mock.Mock(return_value=valid_share) if valid_share: self._driver._get_share_name = mock.Mock(return_value=self. fakeexistingshare) self._helper_fake._has_client_access = mock.Mock() else: self.assertFalse(self._helper_fake._has_client_access.called) self.assertRaises(exception.ManageInvalidShare, self._driver.manage_existing, self.share, {}) def test__gpfs_local_execute(self): self.mock_object(utils, 'execute', mock.Mock(return_value=True)) cmd = "testcmd" self._driver._gpfs_local_execute(cmd, ignore_exit_code=[2]) utils.execute.assert_called_once_with(cmd, run_as_root=True, check_exit_code=[2, 0]) def test__gpfs_remote_execute(self): self._driver._run_ssh = mock.Mock(return_value=True) cmd = "testcmd" orig_value = self._driver.configuration.gpfs_share_export_ip self._driver.configuration.gpfs_share_export_ip = self.local_ip self._driver._gpfs_remote_execute(cmd, check_exit_code=True) self._driver._run_ssh.assert_called_once_with( self.local_ip, tuple([cmd]), None, True ) self._driver.configuration.gpfs_share_export_ip = orig_value def test_knfs_resync_access(self): self._knfs_helper.allow_access = mock.Mock() path = self.fakesharepath to_remove = '3.3.3.3' fake_exportfs_before = ('%(path)s\n\t\t%(ip)s\n' '/other/path\n\t\t4.4.4.4\n' % {'path': path, 'ip': to_remove}) fake_exportfs_after = '/other/path\n\t\t4.4.4.4\n' self._knfs_helper._execute = mock.Mock( return_value=(fake_exportfs_before, '')) self._knfs_helper._publish_access = mock.Mock( side_effect=[[(fake_exportfs_before, '')], [(fake_exportfs_after, '')]]) access_1 = fake_share.fake_access(access_to="1.1.1.1") access_2 = fake_share.fake_access(access_to="2.2.2.2") self._knfs_helper.resync_access(path, self.share, [access_1, access_2]) self._knfs_helper.allow_access.assert_has_calls([ mock.call(path, self.share, access_1, error_on_exists=False), mock.call(path, self.share, access_2, error_on_exists=False)]) self._knfs_helper._execute.assert_called_once_with( 'exportfs', run_as_root=True) self._knfs_helper._publish_access.assert_has_calls([ mock.call('exportfs', '-u', '%(ip)s:%(path)s' % {'ip': to_remove, 'path': path}, check_exit_code=[0, 1]), mock.call('exportfs')]) @ddt.data('rw', 'ro') def test_knfs_get_export_options(self, access_level): mock_out = {"knfs:export_options": "no_root_squash"} self.mock_object(share_types, 'get_extra_specs_from_share', mock.Mock(return_value=mock_out)) access = fake_share.fake_access(access_level=access_level) out = self._knfs_helper.get_export_options(self.share, access, 'KNFS') self.assertEqual("no_root_squash,%s" % access_level, out) def test_knfs_get_export_options_default(self): self.mock_object(share_types, 'get_extra_specs_from_share', mock.Mock(return_value={})) access = self.access out = self._knfs_helper.get_export_options(self.share, access, 'KNFS') self.assertEqual("rw", out) def test_knfs_get_export_options_invalid_option_ro(self): mock_out = {"knfs:export_options": "ro"} self.mock_object(share_types, 'get_extra_specs_from_share', mock.Mock(return_value=mock_out)) access = self.access share = fake_share.fake_share(share_type="fake_share_type") self.assertRaises(exception.InvalidInput, self._knfs_helper.get_export_options, share, access, 'KNFS') def test_knfs_get_export_options_invalid_option_rw(self): mock_out = {"knfs:export_options": "rw"} self.mock_object(share_types, 'get_extra_specs_from_share', mock.Mock(return_value=mock_out)) access = self.access share = fake_share.fake_share(share_type="fake_share_type") self.assertRaises(exception.InvalidInput, self._knfs_helper.get_export_options, share, access, 'KNFS') @ddt.data(("/gpfs0/share-fakeid\t10.0.0.1", None), ("", None), ("/gpfs0/share-fakeid\t10.0.0.1", "10.0.0.1"), ("/gpfs0/share-fakeid\t10.0.0.1", "10.0.0.2")) @ddt.unpack def test_knfs__has_client_access(self, mock_out, access_to): self._knfs_helper._execute = mock.Mock(return_value=[mock_out, 0]) result = self._knfs_helper._has_client_access(self.fakesharepath, access_to) self._ces_helper._execute.assert_called_once_with('exportfs', check_exit_code=True, run_as_root=True) if mock_out == "/gpfs0/share-fakeid\t10.0.0.1": if access_to in (None, "10.0.0.1"): self.assertTrue(result) else: self.assertFalse(result) else: self.assertFalse(result) def test_knfs_allow_access(self): self._knfs_helper._execute = mock.Mock( return_value=['/fs0 ', 0] ) self.mock_object(re, 'search', mock.Mock(return_value=None)) export_opts = None self._knfs_helper.get_export_options = mock.Mock( return_value=export_opts ) self._knfs_helper._publish_access = mock.Mock() access = self.access local_path = self.fakesharepath self._knfs_helper.allow_access(local_path, self.share, access) self._knfs_helper._execute.assert_called_once_with('exportfs', run_as_root=True) self.assertTrue(re.search.called) self._knfs_helper.get_export_options.assert_any_call( self.share, access, 'KNFS') cmd = ['exportfs', '-o', export_opts, ':'.join([access['access_to'], local_path])] self._knfs_helper._publish_access.assert_called_once_with(*cmd) def test_knfs_allow_access_access_exists(self): out = ['/fs0 ', 0] self._knfs_helper._execute = mock.Mock(return_value=out) self.mock_object(re, 'search', mock.Mock(return_value="fake")) self._knfs_helper.get_export_options = mock.Mock() access = self.access local_path = self.fakesharepath self.assertRaises(exception.ShareAccessExists, self._knfs_helper.allow_access, local_path, self.share, access) self._knfs_helper._execute.assert_any_call('exportfs', run_as_root=True) self.assertTrue(re.search.called) self.assertFalse(self._knfs_helper.get_export_options.called) def test_knfs_allow_access_publish_exception(self): self._knfs_helper.get_export_options = mock.Mock() self._knfs_helper._publish_access = mock.Mock( side_effect=exception.ProcessExecutionError('boom')) self.assertRaises(exception.GPFSException, self._knfs_helper.allow_access, self.fakesharepath, self.share, self.access, error_on_exists=False) self.assertTrue(self._knfs_helper.get_export_options.called) self.assertTrue(self._knfs_helper._publish_access.called) def test_knfs_allow_access_invalid_access(self): access = fake_share.fake_access(access_type='test') self.assertRaises(exception.InvalidShareAccess, self._knfs_helper.allow_access, self.fakesharepath, self.share, access) def test_knfs_allow_access_exception(self): self._knfs_helper._execute = mock.Mock( side_effect=exception.ProcessExecutionError ) access = self.access local_path = self.fakesharepath self.assertRaises(exception.GPFSException, self._knfs_helper.allow_access, local_path, self.share, access) self._knfs_helper._execute.assert_called_once_with('exportfs', run_as_root=True) def test_knfs__verify_denied_access_pass(self): local_path = self.fakesharepath ip = self.access['access_to'] fake_exportfs = ('/shares/share-1\n\t\t1.1.1.1\n' '/shares/share-2\n\t\t2.2.2.2\n') self._knfs_helper._publish_access = mock.Mock( return_value=[(fake_exportfs, '')]) self._knfs_helper._verify_denied_access(local_path, self.share, ip) self._knfs_helper._publish_access.assert_called_once_with('exportfs') def test_knfs__verify_denied_access_fail(self): local_path = self.fakesharepath ip = self.access['access_to'] data = {'path': local_path, 'ip': ip} fake_exportfs = ('/shares/share-1\n\t\t1.1.1.1\n' '%(path)s\n\t\t%(ip)s\n' '/shares/share-2\n\t\t2.2.2.2\n') % data self._knfs_helper._publish_access = mock.Mock( return_value=[(fake_exportfs, '')]) self.assertRaises(exception.GPFSException, self._knfs_helper._verify_denied_access, local_path, self.share, ip) self._knfs_helper._publish_access.assert_called_once_with('exportfs') def test_knfs__verify_denied_access_exception(self): self._knfs_helper._publish_access = mock.Mock( side_effect=exception.ProcessExecutionError ) ip = self.access['access_to'] local_path = self.fakesharepath self.assertRaises(exception.GPFSException, self._knfs_helper._verify_denied_access, local_path, self.share, ip) self._knfs_helper._publish_access.assert_called_once_with('exportfs') @ddt.data((None, False), ('', False), (' ', False), ('Some error to log', True)) @ddt.unpack def test_knfs__verify_denied_access_stderr(self, stderr, is_logged): """Stderr debug logging should only happen when not empty.""" outputs = [('', stderr)] self._knfs_helper._publish_access = mock.Mock(return_value=outputs) gpfs.LOG.debug = mock.Mock() self._knfs_helper._verify_denied_access( self.fakesharepath, self.share, self.remote_ip) self._knfs_helper._publish_access.assert_called_once_with('exportfs') self.assertEqual(is_logged, gpfs.LOG.debug.called) def test_knfs_deny_access(self): self._knfs_helper._publish_access = mock.Mock(return_value=[('', '')]) access = self.access local_path = self.fakesharepath self._knfs_helper.deny_access(local_path, self.share, access) deny = ['exportfs', '-u', ':'.join([access['access_to'], local_path])] self._knfs_helper._publish_access.assert_has_calls([ mock.call(*deny, check_exit_code=[0, 1]), mock.call('exportfs')]) def test_knfs_deny_access_exception(self): self._knfs_helper._publish_access = mock.Mock( side_effect=exception.ProcessExecutionError ) access = self.access local_path = self.fakesharepath cmd = ['exportfs', '-u', ':'.join([access['access_to'], local_path])] self.assertRaises(exception.GPFSException, self._knfs_helper.deny_access, local_path, self.share, access) self._knfs_helper._publish_access.assert_called_once_with( *cmd, check_exit_code=[0, 1]) def test_knfs__publish_access(self): self.mock_object(utils, 'execute') fake_command = 'fakecmd' cmd = [fake_command] self._knfs_helper._publish_access(*cmd) utils.execute.assert_any_call(*cmd, run_as_root=True, check_exit_code=True) remote_login = self.sshlogin + '@' + self.remote_ip remote_login2 = self.sshlogin + '@' + self.remote_ip2 utils.execute.assert_has_calls([ mock.call('ssh', remote_login, fake_command, check_exit_code=True, run_as_root=False), mock.call(fake_command, check_exit_code=True, run_as_root=True), mock.call('ssh', remote_login2, fake_command, check_exit_code=True, run_as_root=False)]) self.assertTrue(socket.gethostbyname_ex.called) self.assertTrue(socket.gethostname.called) def test_knfs__publish_access_exception(self): self.mock_object( utils, 'execute', mock.Mock(side_effect=(0, exception.ProcessExecutionError))) fake_command = 'fakecmd' cmd = [fake_command] self.assertRaises(exception.ProcessExecutionError, self._knfs_helper._publish_access, *cmd) self.assertTrue(socket.gethostbyname_ex.called) self.assertTrue(socket.gethostname.called) remote_login = self.sshlogin + '@' + self.remote_ip utils.execute.assert_has_calls([ mock.call('ssh', remote_login, fake_command, check_exit_code=True, run_as_root=False), mock.call(fake_command, check_exit_code=True, run_as_root=True)]) @ddt.data('rw', 'ro') def test_ces_get_export_options(self, access_level): mock_out = {"ces:export_options": "squash=no_root_squash"} self.mock_object(share_types, 'get_extra_specs_from_share', mock.Mock(return_value=mock_out)) access = fake_share.fake_access(access_level=access_level) out = self._ces_helper.get_export_options(self.share, access, 'CES') self.assertEqual("squash=no_root_squash,access_type=%s" % access_level, out) def test_ces_get_export_options_default(self): self.mock_object(share_types, 'get_extra_specs_from_share', mock.Mock(return_value={})) access = self.access out = self._ces_helper.get_export_options(self.share, access, 'CES') self.assertEqual("access_type=rw", out) def test_ces_get_export_options_invalid_option_ro(self): mock_out = {"ces:export_options": "access_type=ro"} self.mock_object(share_types, 'get_extra_specs_from_share', mock.Mock(return_value=mock_out)) access = self.access share = fake_share.fake_share(share_type="fake_share_type") self.assertRaises(exception.InvalidInput, self._ces_helper.get_export_options, share, access, 'CES') def test_ces_get_export_options_invalid_option_rw(self): mock_out = {"ces:export_options": "access_type=rw"} self.mock_object(share_types, 'get_extra_specs_from_share', mock.Mock(return_value=mock_out)) access = self.access share = fake_share.fake_share(share_type="fake_share_type") self.assertRaises(exception.InvalidInput, self._ces_helper.get_export_options, share, access, 'CES') def test__get_nfs_client_exports_exception(self): self._ces_helper._execute = mock.Mock(return_value=('junk', '')) local_path = self.fakesharepath self.assertRaises(exception.GPFSException, self._ces_helper._get_nfs_client_exports, local_path) self._ces_helper._execute.assert_called_once_with( self.GPFS_PATH + 'mmnfs', 'export', 'list', '-n', local_path, '-Y') @ddt.data('44.3.2.11', '1:2:3:4:5:6:7:8') def test__fix_export_data(self, ip): data = None for line in self.fake_ces_exports.splitlines(): if "HEADER" in line: headers = line.split(':') if ip in line: data = line.split(':') break self.assertIsNotNone( data, "Test data did not contain a line with the test IP.") result_data = self._ces_helper._fix_export_data(data, headers) self.assertEqual(ip, result_data[headers.index('Clients')]) @ddt.data((None, True), ('44.3.2.11', True), ('44.3.2.1', False), ('4.3.2.1', False), ('4.3.2.11', False), ('1.2.3.4', False), ('', False), ('*', False), ('.', False), ('1:2:3:4:5:6:7:8', True)) @ddt.unpack def test_ces__has_client_access(self, ip, has_access): mock_out = self.fake_ces_exports self._ces_helper._execute = mock.Mock( return_value=(mock_out, '')) local_path = self.fakesharepath self.assertEqual(has_access, self._ces_helper._has_client_access(local_path, ip)) self._ces_helper._execute.assert_called_once_with( self.GPFS_PATH + 'mmnfs', 'export', 'list', '-n', local_path, '-Y') def test_ces_remove_export_no_exports(self): mock_out = self.fake_ces_exports_not_found self._ces_helper._execute = mock.Mock( return_value=(mock_out, '')) local_path = self.fakesharepath self._ces_helper.remove_export(local_path, self.share) self._ces_helper._execute.assert_called_once_with( self.GPFS_PATH + 'mmnfs', 'export', 'list', '-n', local_path, '-Y') def test_ces_remove_export_existing_exports(self): mock_out = self.fake_ces_exports self._ces_helper._execute = mock.Mock( return_value=(mock_out, '')) local_path = self.fakesharepath self._ces_helper.remove_export(local_path, self.share) self._ces_helper._execute.assert_has_calls([ mock.call(self.GPFS_PATH + 'mmnfs', 'export', 'list', '-n', local_path, '-Y'), mock.call(self.GPFS_PATH + 'mmnfs', 'export', 'remove', local_path), ]) def test_ces_remove_export_exception(self): local_path = self.fakesharepath self._ces_helper._execute = mock.Mock( side_effect=exception.ProcessExecutionError) self.assertRaises(exception.GPFSException, self._ces_helper.remove_export, local_path, self.share) def test_ces_allow_access(self): mock_out = self.fake_ces_exports_not_found self._ces_helper._execute = mock.Mock( return_value=(mock_out, '')) export_opts = "access_type=rw" self._ces_helper.get_export_options = mock.Mock( return_value=export_opts) access = self.access local_path = self.fakesharepath self._ces_helper.allow_access(local_path, self.share, access) self._ces_helper._execute.assert_has_calls([ mock.call(self.GPFS_PATH + 'mmnfs', 'export', 'list', '-n', local_path, '-Y'), mock.call(self.GPFS_PATH + 'mmnfs', 'export', 'add', local_path, '-c', access['access_to'] + '(' + export_opts + ')')]) def test_ces_allow_access_existing_exports(self): mock_out = self.fake_ces_exports self._ces_helper._execute = mock.Mock( return_value=(mock_out, '')) export_opts = "access_type=rw" self._ces_helper.get_export_options = mock.Mock( return_value=export_opts) access = self.access local_path = self.fakesharepath self._ces_helper.allow_access(self.fakesharepath, self.share, self.access) self._ces_helper._execute.assert_has_calls([ mock.call(self.GPFS_PATH + 'mmnfs', 'export', 'list', '-n', local_path, '-Y'), mock.call(self.GPFS_PATH + 'mmnfs', 'export', 'change', local_path, '--nfsadd', access['access_to'] + '(' + export_opts + ')')]) def test_ces_allow_access_invalid_access_type(self): access = fake_share.fake_access(access_type='test') self.assertRaises(exception.InvalidShareAccess, self._ces_helper.allow_access, self.fakesharepath, self.share, access) def test_ces_allow_access_exception(self): access = self.access local_path = self.fakesharepath self._ces_helper._execute = mock.Mock( side_effect=exception.ProcessExecutionError) self.assertRaises(exception.GPFSException, self._ces_helper.allow_access, local_path, self.share, access) def test_ces_deny_access(self): mock_out = self.fake_ces_exports self._ces_helper._execute = mock.Mock( return_value=(mock_out, '')) access = self.access local_path = self.fakesharepath self._ces_helper.deny_access(local_path, self.share, access) self._ces_helper._execute.assert_has_calls([ mock.call(self.GPFS_PATH + 'mmnfs', 'export', 'list', '-n', local_path, '-Y'), mock.call(self.GPFS_PATH + 'mmnfs', 'export', 'change', local_path, '--nfsremove', access['access_to'])]) def test_ces_deny_access_exception(self): access = self.access local_path = self.fakesharepath self._ces_helper._execute = mock.Mock( side_effect=exception.ProcessExecutionError) self.assertRaises(exception.GPFSException, self._ces_helper.deny_access, local_path, self.share, access) def test_ces_resync_access_add(self): mock_out = self.fake_ces_exports_not_found self._ces_helper._execute = mock.Mock(return_value=(mock_out, '')) self.mock_object(share_types, 'get_extra_specs_from_share', mock.Mock(return_value={})) access_rules = [self.access] local_path = self.fakesharepath self._ces_helper.resync_access(local_path, self.share, access_rules) self._ces_helper._execute.assert_has_calls([ mock.call(self.GPFS_PATH + 'mmnfs', 'export', 'list', '-n', local_path, '-Y'), mock.call(self.GPFS_PATH + 'mmnfs', 'export', 'add', local_path, '-c', self.access['access_to'] + '(' + "access_type=rw" + ')') ]) share_types.get_extra_specs_from_share.assert_called_once_with( self.share) def test_ces_resync_access_change(self): class SortedMatch(object): def __init__(self, f, expected): self.assertEqual = f self.expected = expected def __eq__(self, actual): expected_list = self.expected.split(',') actual_list = actual.split(',') self.assertEqual(sorted(expected_list), sorted(actual_list)) return True mock_out = self.fake_ces_exports self._ces_helper._execute = mock.Mock( return_value=(mock_out, '')) self.mock_object(share_types, 'get_extra_specs_from_share', mock.Mock(return_value={})) access_rules = [fake_share.fake_access(access_to='1.1.1.1'), fake_share.fake_access( access_to='10.0.0.1', access_level='ro')] local_path = self.fakesharepath self._ces_helper.resync_access(local_path, self.share, access_rules) share_types.get_extra_specs_from_share.assert_called_once_with( self.share) to_remove = '1:2:3:4:5:6:7:8,44.3.2.11' to_add = access_rules[0]['access_to'] + '(' + "access_type=rw" + ')' to_change = access_rules[1]['access_to'] + '(' + "access_type=ro" + ')' self._ces_helper._execute.assert_has_calls([ mock.call(self.GPFS_PATH + 'mmnfs', 'export', 'list', '-n', local_path, '-Y'), mock.call(self.GPFS_PATH + 'mmnfs', 'export', 'change', local_path, '--nfsremove', SortedMatch(self.assertEqual, to_remove), '--nfsadd', to_add, '--nfschange', to_change) ]) def test_ces_resync_nothing(self): """Test that hits the add-no-rules case.""" mock_out = self.fake_ces_exports_not_found self._ces_helper._execute = mock.Mock(return_value=(mock_out, '')) local_path = self.fakesharepath self._ces_helper.resync_access(local_path, None, []) self._ces_helper._execute.assert_called_once_with( self.GPFS_PATH + 'mmnfs', 'export', 'list', '-n', local_path, '-Y') ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315602.0216703 manila-21.0.0/manila/tests/share/drivers/infinidat/0000775000175000017500000000000000000000000022206 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/infinidat/__init__.py0000664000175000017500000000000000000000000024305 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/infinidat/test_infinidat.py0000664000175000017500000011601600000000000025571 0ustar00zuulzuul00000000000000# Copyright 2022 Infinidat Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Unit tests for INFINIDAT InfiniBox share driver.""" import copy import functools import itertools from unittest import mock import ddt from oslo_utils import units from manila.common import constants from manila import exception from manila.share import configuration from manila.share.drivers.infinidat import infinibox from manila import test from manila import version _MOCK_SHARE_ID = 1 _MOCK_SNAPSHOT_ID = 2 _MOCK_CLONE_ID = 3 _MOCK_SHARE_SIZE = 4 _MOCK_NETWORK_SPACE_IP_1 = '1.2.3.4' _MOCK_NETWORK_SPACE_IP_2 = '1.2.3.5' def _create_mock__getitem__(mock): def mock__getitem__(self, key, default=None): return getattr(mock, key, default) return mock__getitem__ test_share = mock.Mock(id=_MOCK_SHARE_ID, size=_MOCK_SHARE_SIZE, share_proto='NFS') test_share.__getitem__ = _create_mock__getitem__(test_share) test_snapshot = mock.Mock(id=_MOCK_SNAPSHOT_ID, size=test_share.size, share=test_share, share_proto='NFS') test_snapshot.__getitem__ = _create_mock__getitem__(test_snapshot) original_test_clone = mock.Mock(id=_MOCK_CLONE_ID, size=test_share.size, share=test_snapshot, share_proto='NFS') original_test_clone.__getitem__ = _create_mock__getitem__(original_test_clone) def skip_driver_setup(func): @functools.wraps(func) def f(*args, **kwargs): return func(*args, **kwargs) f.__skip_driver_setup = True return f class FakeInfinisdkException(Exception): pass class FakeInfinisdkPermission(object): def __init__(self, permission): self._permission = permission def __getattr__(self, attr): return self._permission[attr] def __getitem__(self, key): return self._permission[key] class InfiniboxDriverTestCaseBase(test.TestCase): def _test_skips_driver_setup(self): test_method_name = self.id().split('.')[-1] test_method = getattr(self, test_method_name) return getattr(test_method, '__skip_driver_setup', False) def setUp(self): super(InfiniboxDriverTestCaseBase, self).setUp() self.configuration = configuration.Configuration(None) self.configuration.append_config_values( infinibox.infinidat_connection_opts) self.configuration.append_config_values( infinibox.infinidat_auth_opts) self.configuration.append_config_values( infinibox.infinidat_general_opts) self.override_config('infinibox_hostname', 'mockbox') self.override_config('infinidat_pool_name', 'mockpool') self.override_config('infinidat_nas_network_space_name', 'mockspace') self.override_config('infinidat_thin_provision', True) self.override_config('infinibox_login', 'user') self.override_config('infinibox_password', 'pass') self.override_config('infinidat_use_ssl', False) self.override_config('infinidat_suppress_ssl_warnings', False) self.override_config('network_config_group', 'test_network_config_group') self.override_config('admin_network_config_group', 'test_admin_network_config_group') self.override_config('reserved_share_percentage', 0) self.override_config('reserved_share_from_snapshot_percentage', 0) self.override_config('reserved_share_extend_percentage', 0) self.override_config('filter_function', None) self.override_config('goodness_function', None) self.override_config('driver_handles_share_servers', False) self.override_config('max_over_subscription_ratio', 2) self.override_config('infinidat_snapdir_accessible', True) self.override_config('infinidat_snapdir_visible', False) self.driver = infinibox.InfiniboxShareDriver( configuration=self.configuration) # mock external library dependencies infinisdk = self._patch( "manila.share.drivers.infinidat.infinibox.infinisdk") self._capacity_module = ( self._patch("manila.share.drivers.infinidat.infinibox.capacity")) self._capacity_module.byte = 1 self._capacity_module.GiB = units.Gi self._system = self._infinibox_mock() infinisdk.core.exceptions.InfiniSDKException = FakeInfinisdkException infinisdk.InfiniBox.return_value = self._system if not self._test_skips_driver_setup(): self.driver.do_setup(None) def _infinibox_mock(self): result = mock.Mock() self._mock_export_permissions = [] self._mock_export = mock.Mock() self._mock_export.get_export_path.return_value = '/mock_export' self._mock_export.get_permissions = self._fake_get_permissions self._mock_export.update_permissions = self._fake_update_permissions self._mock_filesystem = mock.Mock() self._mock_filesystem.has_children.return_value = False self._mock_filesystem.create_snapshot.return_value = ( self._mock_filesystem) self._mock_filesystem.get_exports.return_value = [self._mock_export, ] self._mock_filesystem.size = 4 * self._capacity_module.GiB self._mock_filesystem.get_size.return_value = ( self._mock_filesystem.size) self._mock_pool = mock.Mock() self._mock_pool.get_free_physical_capacity.return_value = units.Gi self._mock_pool.get_physical_capacity.return_value = units.Gi self._mock_pool.get_virtual_capacity.return_value = units.Gi self._mock_pool.get_free_virtual_capacity.return_value = units.Gi self._mock_network_space = mock.Mock() self._mock_network_space.get_ips.return_value = ( [mock.Mock(ip_address=_MOCK_NETWORK_SPACE_IP_1, enabled=True), mock.Mock(ip_address=_MOCK_NETWORK_SPACE_IP_2, enabled=True)]) result.network_spaces.safe_get.return_value = self._mock_network_space result.pools.safe_get.return_value = self._mock_pool result.filesystems.safe_get.return_value = self._mock_filesystem result.filesystems.create.return_value = self._mock_filesystem result.components.nodes.get_all.return_value = [] return result def _raise_infinisdk(self, *args, **kwargs): raise FakeInfinisdkException() def _fake_get_permissions(self): return self._mock_export_permissions def _fake_update_permissions(self, new_export_permissions): self._mock_export_permissions = [ FakeInfinisdkPermission(permission) for permission in new_export_permissions] def _patch(self, path, *args, **kwargs): patcher = mock.patch(path, *args, **kwargs) result = patcher.start() self.addCleanup(patcher.stop) return result @ddt.ddt class InfiniboxDriverTestCase(InfiniboxDriverTestCaseBase): def _generate_mock_metadata(self, share): return {"system": "openstack", "openstack_version": version.version_info.release_string(), "manila_id": share['id'], "manila_name": share['name'], "host.created_by": infinibox._INFINIDAT_MANILA_IDENTIFIER} def _validate_metadata(self, share): self._mock_filesystem.set_metadata_from_dict.assert_called_once_with( self._generate_mock_metadata(share)) @mock.patch("manila.share.drivers.infinidat.infinibox.infinisdk", None) def test_no_infinisdk_module(self): self.assertRaises(exception.ManilaException, self.driver.do_setup, None) @mock.patch("manila.share.drivers.infinidat.infinibox.capacity", None) def test_no_capacity_module(self): self.assertRaises(exception.ManilaException, self.driver.do_setup, None) def test_no_auth_parameters(self): self.override_config('infinibox_login', None) self.override_config('infinibox_password', None) self.assertRaises(exception.BadConfigurationException, self.driver.do_setup, None) def test_empty_auth_parameters(self): self.override_config('infinibox_login', '') self.override_config('infinibox_password', '') self.assertRaises(exception.BadConfigurationException, self.driver.do_setup, None) @skip_driver_setup def test__setup_and_get_system_object(self): # This test should skip the driver setup, as it generates more calls to # the add_auto_retry, set_source_identifier and login methods: auth = (self.configuration.infinibox_login, self.configuration.infinibox_password) self.driver._setup_and_get_system_object( self.configuration.infinibox_hostname, auth, self.configuration.infinidat_use_ssl) self._system.api.add_auto_retry.assert_called_once() self._system.api.set_source_identifier.assert_called_once_with( infinibox._INFINIDAT_MANILA_IDENTIFIER) self._system.login.assert_called_once() @skip_driver_setup @mock.patch('manila.share.drivers.infinidat.infinibox.' 'infinisdk.InfiniBox') @mock.patch('requests.packages.urllib3') def test_do_setup_ssl_enabled(self, urllib3, infinibox): self.override_config('infinidat_use_ssl', True) self.override_config('infinidat_suppress_ssl_warnings', True) auth = (self.configuration.infinibox_login, self.configuration.infinibox_password) self.driver.do_setup(None) expected = [ mock.call(urllib3.exceptions.InsecureRequestWarning), mock.call(urllib3.exceptions.InsecurePlatformWarning)] urllib3.disable_warnings.assert_has_calls(expected) infinibox.assert_called_once_with( self.configuration.infinibox_hostname, auth=auth, use_ssl=self.configuration.infinidat_use_ssl) def test_get_share_stats_refreshes(self): self.driver._update_share_stats() result = self.driver.get_share_stats() self.assertEqual(1, result["free_capacity_gb"]) # change the "free space" in the pool self._mock_pool.get_free_physical_capacity.return_value = 0 # no refresh - free capacity should stay the same result = self.driver.get_share_stats(refresh=False) self.assertEqual(1, result["free_capacity_gb"]) # refresh - free capacity should change to 0 result = self.driver.get_share_stats(refresh=True) self.assertEqual(0, result["free_capacity_gb"]) def test_get_share_stats_pool_not_found(self): self._system.pools.safe_get.return_value = None self.assertRaises(exception.ManilaException, self.driver._update_share_stats) def test__verify_share_protocol(self): # test_share is NFS by default: self.driver._verify_share_protocol(test_share) def test__verify_share_protocol_fails_for_non_nfs(self): # set test_share protocol for non-NFS (CIFS, for that matter) and see # that we fail: cifs_share = copy.deepcopy(test_share) cifs_share.share_proto = 'CIFS' # also need to re-define getitem, otherwise we'll get attributes from # test_share: cifs_share.__getitem__ = _create_mock__getitem__(cifs_share) self.assertRaises(exception.InvalidShare, self.driver._verify_share_protocol, cifs_share) def test__verify_access_type_ip(self): self.assertTrue(self.driver._verify_access_type({'access_type': 'ip'})) def test__verify_access_type_fails_for_type_user(self): self.assertRaises( exception.InvalidShareAccess, self.driver._verify_access_type, {'access_type': 'user'}) def test__verify_access_type_fails_for_type_cert(self): self.assertRaises( exception.InvalidShareAccess, self.driver._verify_access_type, {'access_type': 'cert'}) def test__get_ip_address_range_single_ip(self): ip_address = self.driver._get_ip_address_range('1.2.3.4') self.assertEqual('1.2.3.4', ip_address) def test__get_ip_address_range_ip_range(self): ip_address_range = self.driver._get_ip_address_range('5.6.7.8/28') self.assertEqual('5.6.7.1-5.6.7.14', ip_address_range) def test__get_ip_address_range_invalid_address(self): self.assertRaises(ValueError, self.driver._get_ip_address_range, 'invalid') def test__get_infinidat_pool(self): self.driver._get_infinidat_pool() self._system.pools.safe_get.assert_called_once() def test__get_infinidat_pool_no_pools(self): self._system.pools.safe_get.return_value = None self.assertRaises(exception.ShareBackendException, self.driver._get_infinidat_pool) def test__get_infinidat_pool_api_error(self): self._system.pools.safe_get.side_effect = ( self._raise_infinisdk) self.assertRaises(exception.ShareBackendException, self.driver._get_infinidat_pool) def test__get_infinidat_nas_network_space_ips(self): network_space_ips = self.driver._get_infinidat_nas_network_space_ips() self._system.network_spaces.safe_get.assert_called_once() self._mock_network_space.get_ips.assert_called_once() for network_space_ip in \ (_MOCK_NETWORK_SPACE_IP_1, _MOCK_NETWORK_SPACE_IP_2): self.assertIn(network_space_ip, network_space_ips) def test__get_infinidat_nas_network_space_ips_only_one_ip_enabled(self): self._mock_network_space.get_ips.return_value = ( [mock.Mock(ip_address=_MOCK_NETWORK_SPACE_IP_1, enabled=True), mock.Mock(ip_address=_MOCK_NETWORK_SPACE_IP_2, enabled=False)]) self.assertEqual([_MOCK_NETWORK_SPACE_IP_1], self.driver._get_infinidat_nas_network_space_ips()) def test__get_infinidat_nas_network_space_ips_no_network_space(self): self._system.network_spaces.safe_get.return_value = None self.assertRaises(exception.ShareBackendException, self.driver._get_infinidat_nas_network_space_ips) def test__get_infinidat_nas_network_space_ips_no_ips(self): self._mock_network_space.get_ips.return_value = [] self.assertRaises(exception.ShareBackendException, self.driver._get_infinidat_nas_network_space_ips) def test__get_infinidat_nas_network_space_ips_no_ips_enabled(self): self._mock_network_space.get_ips.return_value = ( [mock.Mock(ip_address=_MOCK_NETWORK_SPACE_IP_1, enabled=False), mock.Mock(ip_address=_MOCK_NETWORK_SPACE_IP_2, enabled=False)]) self.assertRaises(exception.ShareBackendException, self.driver._get_infinidat_nas_network_space_ips) def test__get_infinidat_nas_network_space_api_error(self): self._system.network_spaces.safe_get.side_effect = ( self._raise_infinisdk) self.assertRaises(exception.ShareBackendException, self.driver._get_infinidat_nas_network_space_ips) def test__get_full_nfs_export_paths(self): export_paths = self.driver._get_full_nfs_export_paths( self._mock_export.get_export_path()) for network_space_ip in \ (_MOCK_NETWORK_SPACE_IP_1, _MOCK_NETWORK_SPACE_IP_2): self.assertIn( "{network_space_ip}:{export_path}".format( network_space_ip=network_space_ip, export_path=self._mock_export.get_export_path()), export_paths) def test__get_export(self): # The default return value of get_exports is [mock_export, ]: export = self.driver._get_export(self._mock_filesystem) self._mock_filesystem.get_exports.assert_called_once() self.assertEqual(self._mock_export, export) def test__get_export_no_filesystem_exports(self): self._mock_filesystem.get_exports.return_value = [] self.assertRaises(exception.ShareBackendException, self.driver._get_export, self._mock_filesystem) def test__get_export_too_many_filesystem_exports(self): self._mock_filesystem.get_exports.return_value = [ self._mock_export, self._mock_export] self.assertRaises(exception.ShareBackendException, self.driver._get_export, self._mock_filesystem) def test__get_export_api_error(self): self._mock_filesystem.get_exports.side_effect = self._raise_infinisdk self.assertRaises(exception.ShareBackendException, self.driver._get_export, self._mock_filesystem) def test__get_infinidat_access_level_rw(self): access_level = ( self.driver._get_infinidat_access_level( {'access_level': constants.ACCESS_LEVEL_RW})) self.assertEqual('RW', access_level) def test__get_infinidat_access_level_ro(self): access_level = ( self.driver._get_infinidat_access_level( {'access_level': constants.ACCESS_LEVEL_RO})) self.assertEqual('RO', access_level) def test__get_infinidat_access_level_fails_for_invalid_level(self): self.assertRaises(exception.InvalidShareAccessLevel, self.driver._get_infinidat_access_level, {'access_level': 'invalid'}) @ddt.data(*itertools.product((True, False), (True, False), (True, False))) @ddt.unpack def test_create_share(self, thin_provision, snapdir_accessible, snapdir_visible): self.override_config('infinidat_thin_provision', thin_provision) self.override_config('infinidat_snapdir_accessible', snapdir_accessible) self.override_config('infinidat_snapdir_visible', snapdir_visible) if thin_provision: provtype = 'THIN' else: provtype = 'THICK' self.driver.do_setup(None) self.driver.create_share(None, test_share) share_name = self.driver._make_share_name(test_share) share_size = test_share.size * self._capacity_module.GiB self._system.filesystems.create.assert_called_once_with( pool=self._mock_pool, name=share_name, size=share_size, provtype=provtype, snapdir_accessible=snapdir_accessible) self._validate_metadata(test_share) self._mock_filesystem.add_export.assert_called_once_with( permissions=[], snapdir_visible=snapdir_visible) def test_create_share_pool_not_found(self): self._system.pools.safe_get.return_value = None self.assertRaises(exception.ManilaException, self.driver.create_share, None, test_share) def test_create_share_fails_non_nfs(self): # set test_share protocol for non-NFS (CIFS, for that matter) and see # that we fail: cifs_share = copy.deepcopy(test_share) cifs_share.share_proto = 'CIFS' # also need to re-define getitem, otherwise we'll get attributes from # test_share: cifs_share.__getitem__ = _create_mock__getitem__(cifs_share) self.assertRaises(exception.InvalidShare, self.driver.create_share, None, cifs_share) def test_create_share_pools_api_fail(self): # will fail when trying to get pool for share creation: self._system.pools.safe_get.side_effect = self._raise_infinisdk self.assertRaises(exception.ShareBackendException, self.driver.create_share, None, test_share) def test_create_share_network_spaces_api_fail(self): # will fail when trying to get full export path to the new share: self._system.network_spaces.safe_get.side_effect = ( self._raise_infinisdk) self.assertRaises(exception.ShareBackendException, self.driver.create_share, None, test_share) def test_delete_share(self): self.driver.delete_share(None, test_share) self._mock_filesystem.safe_delete.assert_called_once() self._mock_export.safe_delete.assert_called_once() def test_delete_share_doesnt_exist(self): self._system.shares.safe_get.return_value = None # should not raise an exception self.driver.delete_share(None, test_share) def test_delete_share_export_doesnt_exist(self): self._mock_filesystem.get_exports.return_value = [] # should not raise an exception self.driver.delete_share(None, test_share) def test_delete_share_with_snapshots(self): # deleting a share with snapshots should succeed: self._mock_filesystem.has_children.return_value = True self.driver.delete_share(None, test_share) self._mock_filesystem.safe_delete.assert_called_once() self._mock_export.safe_delete.assert_called_once() def test_delete_share_wrong_share_protocol(self): # set test_share protocol for non-NFS (CIFS, for that matter) and see # that delete_share doesn't fail: cifs_share = copy.deepcopy(test_share) cifs_share.share_proto = 'CIFS' # also need to re-define getitem, otherwise we'll get attributes from # test_share: cifs_share.__getitem__ = _create_mock__getitem__(cifs_share) self.driver.delete_share(None, cifs_share) def test_extend_share(self): self.driver.extend_share(test_share, _MOCK_SHARE_SIZE * 2) self._mock_filesystem.resize.assert_called_once() def test_extend_share_api_fail(self): self._mock_filesystem.resize.side_effect = self._raise_infinisdk self.assertRaises(exception.ShareBackendException, self.driver.extend_share, test_share, 8) @ddt.data(*itertools.product((True, False), (True, False))) @ddt.unpack def test_create_snapshot(self, snapdir_accessible, snapdir_visible): self.override_config('infinidat_snapdir_accessible', snapdir_accessible) self.override_config('infinidat_snapdir_visible', snapdir_visible) snapshot_name = self.driver._make_snapshot_name(test_snapshot) self.driver.create_snapshot(None, test_snapshot) self._mock_filesystem.create_snapshot.assert_called_once_with( name=snapshot_name, snapdir_accessible=snapdir_accessible) self._validate_metadata(test_snapshot) self._mock_filesystem.add_export.assert_called_once_with( permissions=[], snapdir_visible=snapdir_visible) def test_create_snapshot_metadata(self): self._mock_filesystem.create_snapshot.return_value = ( self._mock_filesystem) self.driver.create_snapshot(None, test_snapshot) self._validate_metadata(test_snapshot) def test_create_snapshot_share_doesnt_exist(self): self._system.filesystems.safe_get.return_value = None self.assertRaises(exception.ShareResourceNotFound, self.driver.create_snapshot, None, test_snapshot) def test_create_snapshot_create_snapshot_api_fail(self): # will fail when trying to create a child to the original share: self._mock_filesystem.create_snapshot.side_effect = ( self._raise_infinisdk) self.assertRaises(exception.ShareBackendException, self.driver.create_snapshot, None, test_snapshot) def test_create_snapshot_network_spaces_api_fail(self): # will fail when trying to get full export path to the new snapshot: self._system.network_spaces.safe_get.side_effect = ( self._raise_infinisdk) self.assertRaises(exception.ShareBackendException, self.driver.create_snapshot, None, test_snapshot) @ddt.data(*itertools.product((True, False), (True, False))) @ddt.unpack def test_create_share_from_snapshot(self, snapdir_accessible, snapdir_visible): self.override_config('infinidat_snapdir_accessible', snapdir_accessible) self.override_config('infinidat_snapdir_visible', snapdir_visible) share_name = self.driver._make_share_name(original_test_clone) self.driver.create_share_from_snapshot(None, original_test_clone, test_snapshot) self._mock_filesystem.create_snapshot.assert_called_once_with( name=share_name, write_protected=False, snapdir_accessible=snapdir_accessible) self._mock_filesystem.add_export.assert_called_once_with( permissions=[], snapdir_visible=snapdir_visible) def test_create_share_from_snapshot_bigger_size(self): test_clone = copy.copy(original_test_clone) test_clone.size = test_share.size * 2 # also need to re-define getitem, otherwise we'll get attributes from # original_get_clone: test_clone.__getitem__ = _create_mock__getitem__(test_clone) self.driver.create_share_from_snapshot(None, test_clone, test_snapshot) def test_create_share_from_snapshot_doesnt_exist(self): self._system.filesystems.safe_get.return_value = None self.assertRaises(exception.ShareSnapshotNotFound, self.driver.create_share_from_snapshot, None, original_test_clone, test_snapshot) def test_create_share_from_snapshot_create_fails(self): self._mock_filesystem.create_snapshot.side_effect = ( self._raise_infinisdk) self.assertRaises(exception.ShareBackendException, self.driver.create_share_from_snapshot, None, original_test_clone, test_snapshot) def test_delete_snapshot(self): self.driver.delete_snapshot(None, test_snapshot) self._mock_filesystem.safe_delete.assert_called_once() self._mock_export.safe_delete.assert_called_once() def test_delete_snapshot_with_snapshots(self): # deleting a snapshot with snapshots should succeed: self._mock_filesystem.has_children.return_value = True self.driver.delete_snapshot(None, test_snapshot) self._mock_filesystem.safe_delete.assert_called_once() self._mock_export.safe_delete.assert_called_once() def test_delete_snapshot_doesnt_exist(self): self._system.filesystems.safe_get.return_value = None # should not raise an exception self.driver.delete_snapshot(None, test_snapshot) def test_delete_snapshot_api_fail(self): self._mock_filesystem.safe_delete.side_effect = self._raise_infinisdk self.assertRaises(exception.ShareBackendException, self.driver.delete_snapshot, None, test_snapshot) @ddt.data(*itertools.product((True, False), (True, False), (True, False), (True, False))) @ddt.unpack def test_ensure_share(self, snapdir_accessible_expected, snapdir_accessible_actual, snapdir_visible_expected, snapdir_visible_actual): self.override_config('infinidat_snapdir_accessible', snapdir_accessible_expected) self.override_config('infinidat_snapdir_visible', snapdir_visible_expected) self._mock_filesystem.is_snapdir_accessible.return_value = ( snapdir_accessible_actual) self._mock_export.is_snapdir_visible.return_value = ( snapdir_visible_actual) self.driver.ensure_share(None, test_share) self._mock_filesystem.get_exports.assert_called_once() self._mock_export.get_export_path.assert_called_once() if snapdir_accessible_actual is not snapdir_accessible_expected: self._mock_filesystem.update_field.assert_called_once_with( 'snapdir_accessible', snapdir_accessible_expected) else: self._mock_filesystem.update_field.assert_not_called() if snapdir_visible_actual is not snapdir_visible_expected: self._mock_export.update_snapdir_visible.assert_called_once_with( snapdir_visible_expected) else: self._mock_export.update_snapdir_visible.assert_not_called() @ddt.data(True, False) def test_ensure_share_export_missing(self, snapdir_visible): self.override_config('infinidat_snapdir_visible', snapdir_visible) self._mock_filesystem.get_exports.return_value = [] self.driver.ensure_share(None, test_share) self._mock_filesystem.get_exports.assert_called_once() self._mock_filesystem.add_export.assert_called_once_with( permissions=[], snapdir_visible=snapdir_visible) def test_ensure_share_share_doesnt_exist(self): self._system.filesystems.safe_get.return_value = None self.assertRaises(exception.ShareResourceNotFound, self.driver.ensure_share, None, test_share) def test_ensure_share_get_exports_api_fail(self): self._mock_filesystem.get_exports.side_effect = self._raise_infinisdk self._mock_filesystem.add_export.side_effect = self._raise_infinisdk self.assertRaises(exception.ShareBackendException, self.driver.ensure_share, None, test_share) def test_ensure_share_network_spaces_api_fail(self): self._system.network_spaces.safe_get.side_effect = ( self._raise_infinisdk) self.assertRaises(exception.ShareBackendException, self.driver.ensure_share, None, test_share) def test_ensure_shares(self): test_shares = [test_share] test_updates = self.driver.ensure_shares(None, test_shares) self.assertEqual(len(test_shares), len(test_updates)) @ddt.data(*itertools.product((True, False), (True, False))) @ddt.unpack def test_get_backend_info(self, snapdir_accessible, snapdir_visible): self.override_config('infinidat_snapdir_accessible', snapdir_accessible) self.override_config('infinidat_snapdir_visible', snapdir_visible) expected = { 'snapdir_accessible': snapdir_accessible, 'snapdir_visible': snapdir_visible } result = self.driver.get_backend_info(None) self.assertEqual(expected, result) def test_get_network_allocations_number(self): # Mostly to increase test coverage. The return value should always be 0 # for our driver (see method documentation in base class code): self.assertEqual(0, self.driver.get_network_allocations_number()) def test_revert_to_snapshot(self): self.driver.revert_to_snapshot(None, test_snapshot, [], []) self._mock_filesystem.restore.assert_called_once() def test_revert_to_snapshot_snapshot_doesnt_exist(self): self._system.filesystems.safe_get.return_value = None self.assertRaises(exception.ShareSnapshotNotFound, self.driver.revert_to_snapshot, None, test_snapshot, [], []) def test_revert_to_snapshot_api_fail(self): self._mock_filesystem.restore.side_effect = self._raise_infinisdk self.assertRaises(exception.ShareBackendException, self.driver.revert_to_snapshot, None, test_snapshot, [], []) def test_update_access(self): access_rules = [ {'access_level': constants.ACCESS_LEVEL_RO, 'access_to': '1.2.3.4', 'access_type': 'ip'}, {'access_level': constants.ACCESS_LEVEL_RW, 'access_to': '1.2.3.5', 'access_type': 'ip'}, {'access_level': constants.ACCESS_LEVEL_RO, 'access_to': '5.6.7.8/28', 'access_type': 'ip'}] self.driver.update_access(None, test_share, access_rules, [], [], []) permissions = self._mock_filesystem.get_exports()[0].get_permissions() # now we are supposed to have three permissions: # 1. for 1.2.3.4 # 2. for 1.2.3.5 # 3. for 5.6.7.1-5.6.7.14 self.assertEqual(3, len(permissions)) # sorting according to clients, to avoid mismatch errors: permissions = sorted(permissions, key=lambda permission: permission.client) self.assertEqual('RO', permissions[0].access) self.assertEqual('1.2.3.4', permissions[0].client) self.assertTrue(permissions[0].no_root_squash) self.assertEqual('RW', permissions[1].access) self.assertEqual('1.2.3.5', permissions[1].client) self.assertTrue(permissions[1].no_root_squash) self.assertEqual('RO', permissions[2].access) self.assertEqual('5.6.7.1-5.6.7.14', permissions[2].client) self.assertTrue(permissions[2].no_root_squash) def test_update_access_share_doesnt_exist(self): self._system.filesystems.safe_get.return_value = None access_rules = [ {'access_level': constants.ACCESS_LEVEL_RO, 'access_to': '1.2.3.4', 'access_type': 'ip'}, {'access_level': constants.ACCESS_LEVEL_RW, 'access_to': '1.2.3.5', 'access_type': 'ip'}, {'access_level': constants.ACCESS_LEVEL_RO, 'access_to': '5.6.7.8/28', 'access_type': 'ip'}] self.assertRaises(exception.ShareResourceNotFound, self.driver.update_access, None, test_share, access_rules, [], [], []) def test_update_access_api_fail(self): self._mock_filesystem.get_exports.side_effect = self._raise_infinisdk access_rules = [ {'access_level': constants.ACCESS_LEVEL_RO, 'access_to': '1.2.3.4', 'access_type': 'ip'}, {'access_level': constants.ACCESS_LEVEL_RW, 'access_to': '1.2.3.5', 'access_type': 'ip'}, {'access_level': constants.ACCESS_LEVEL_RO, 'access_to': '5.6.7.8/28', 'access_type': 'ip'}] self.assertRaises(exception.ShareBackendException, self.driver.update_access, None, test_share, access_rules, [], [], []) def test_update_access_fails_non_ip_access_type(self): access_rules = [ {'access_level': constants.ACCESS_LEVEL_RO, 'access_to': '1.2.3.4', 'access_type': 'user'}] self.assertRaises(exception.InvalidShareAccess, self.driver.update_access, None, test_share, access_rules, [], [], []) def test_update_access_fails_invalid_ip(self): access_rules = [ {'access_level': constants.ACCESS_LEVEL_RO, 'access_to': 'invalid', 'access_type': 'ip'}] self.assertRaises(ValueError, self.driver.update_access, None, test_share, access_rules, [], [], []) def test_snapshot_update_access(self): access_rules = [ {'access_level': constants.ACCESS_LEVEL_RO, 'access_to': '1.2.3.4', 'access_type': 'ip'}, {'access_level': constants.ACCESS_LEVEL_RW, 'access_to': '1.2.3.5', 'access_type': 'ip'}, {'access_level': constants.ACCESS_LEVEL_RO, 'access_to': '5.6.7.8/28', 'access_type': 'ip'}] self.driver.snapshot_update_access(None, test_snapshot, access_rules, [], []) permissions = self._mock_filesystem.get_exports()[0].get_permissions() # now we are supposed to have three permissions: # 1. for 1.2.3.4 # 2. for 1.2.3.5 # 3. for 5.6.7.1-5.6.7.14 self.assertEqual(3, len(permissions)) # sorting according to clients, to avoid mismatch errors: permissions = sorted(permissions, key=lambda permission: permission.client) self.assertEqual('RO', permissions[0].access) self.assertEqual('1.2.3.4', permissions[0].client) self.assertTrue(permissions[0].no_root_squash) # despite sending a RW rule, all rules are converted to RO: self.assertEqual('RO', permissions[1].access) self.assertEqual('1.2.3.5', permissions[1].client) self.assertTrue(permissions[1].no_root_squash) self.assertEqual('RO', permissions[2].access) self.assertEqual('5.6.7.1-5.6.7.14', permissions[2].client) self.assertTrue(permissions[2].no_root_squash) def test_snapshot_update_access_snapshot_doesnt_exist(self): self._system.filesystems.safe_get.return_value = None access_rules = [ {'access_level': constants.ACCESS_LEVEL_RO, 'access_to': '1.2.3.4', 'access_type': 'ip'}, {'access_level': constants.ACCESS_LEVEL_RW, 'access_to': '1.2.3.5', 'access_type': 'ip'}, {'access_level': constants.ACCESS_LEVEL_RO, 'access_to': '5.6.7.8/28', 'access_type': 'ip'}] self.assertRaises(exception.ShareSnapshotNotFound, self.driver.snapshot_update_access, None, test_snapshot, access_rules, [], []) def test_snapshot_update_access_api_fail(self): self._mock_filesystem.get_exports.side_effect = self._raise_infinisdk access_rules = [ {'access_level': constants.ACCESS_LEVEL_RO, 'access_to': '1.2.3.4', 'access_type': 'ip'}, {'access_level': constants.ACCESS_LEVEL_RW, 'access_to': '1.2.3.5', 'access_type': 'ip'}, {'access_level': constants.ACCESS_LEVEL_RO, 'access_to': '5.6.7.8/28', 'access_type': 'ip'}] self.assertRaises(exception.ShareBackendException, self.driver.snapshot_update_access, None, test_snapshot, access_rules, [], []) def test_snapshot_update_access_fails_non_ip_access_type(self): access_rules = [ {'access_level': constants.ACCESS_LEVEL_RO, 'access_to': '1.2.3.4', 'access_type': 'user'}] self.assertRaises(exception.InvalidSnapshotAccess, self.driver.snapshot_update_access, None, test_share, access_rules, [], []) def test_snapshot_update_access_fails_invalid_ip(self): access_rules = [ {'access_level': constants.ACCESS_LEVEL_RO, 'access_to': 'invalid', 'access_type': 'ip'}] self.assertRaises(ValueError, self.driver.snapshot_update_access, None, test_share, access_rules, [], []) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315602.0216703 manila-21.0.0/manila/tests/share/drivers/infortrend/0000775000175000017500000000000000000000000022413 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/infortrend/__init__.py0000664000175000017500000000000000000000000024512 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/infortrend/fake_infortrend_manila_data.py0000664000175000017500000003650700000000000030452 0ustar00zuulzuul00000000000000# Copyright (c) 2019 Infortrend Technology, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. class InfortrendManilaTestData(object): fake_share_id = ['4d6984fd-8572-4467-964f-24936a8c4ea2', # NFS 'a7b933e6-bb77-4823-a86f-f2c3ab41a8a5'] # CIFS fake_id = ['iftt8862-2226-0126-7610-chengweichou', '987c8763-3333-4444-5555-666666666666'] fake_share_nfs = { 'share_id': fake_share_id[0], 'availability_zone': 'nova', 'terminated_at': 'datetime.datetime(2017, 5, 8, 8, 27, 25)', 'availability_zone_id': 'fd32d76d-b5a8-4c5c-93d7-8f09fc2a8ad3', 'updated_at': 'datetime.datetime(2017, 5, 8, 8, 27, 25)', 'share_network_id': None, 'export_locations': [], 'share_server_id': None, 'snapshot_id': None, 'deleted_at': None, 'id': '5a0aa06e-1c57-4996-be46-b81e360e8866', 'size': 30, 'replica_state': None, 'user_id': '4944594433f0405588928a4212964658', 'export_location': '172.27.112.223:/share-pool-01/LV-1/' + fake_share_id[0], 'display_description': None, 'consistency_group_id': None, 'project_id': '0e63326c50a246ac81fa1a0c8e003d5b', 'launched_at': 'datetime.datetime(2017, 5, 8, 8, 23, 33)', 'scheduled_at': 'datetime.datetime(2017, 5, 8, 8, 23, 29)', 'status': 'deleting', 'share_type_id': '23d8c637-0192-47fa-b921-958f22ed772f', 'deleted': 'False', 'host': 'compute@ift-manila#share-pool-01', 'access_rules_status': 'active', 'display_name': 'nfs-01', 'name': 'share-5a0aa06e-1c57-4996-be46-b81e360e8866', 'created_at': 'datetime.datetime(2017, 5, 8, 8, 23, 29)', 'share_proto': 'NFS', 'is_public': False, 'source_cgsnapshot_member_id': None } fake_share_cifs = { 'share_id': fake_share_id[1], 'availability_zone': 'nova', 'terminated_at': None, 'availability_zone_id': 'fd32d76d-b5a8-4c5c-93d7-8f09fc2a8ad3', 'updated_at': 'datetime.datetime(2017, 5, 9, 2, 28, 35)', 'share_network_id': None, 'export_locations': [], 'share_server_id': None, 'snapshot_id': None, 'deleted_at': None, 'id': 'aac4fe64-7a9c-472a-b156-9adbb50b4d29', 'size': 50, 'replica_state': None, 'user_id': '4944594433f0405588928a4212964658', 'export_location': None, 'display_description': None, 'consistency_group_id': None, 'project_id': '0e63326c50a246ac81fa1a0c8e003d5b', 'launched_at': None, 'scheduled_at': 'datetime.datetime(2017, 5, 9, 2, 28, 35)', 'status': 'creating', 'share_type_id': '23d8c637-0192-47fa-b921-958f22ed772f', 'deleted': 'False', 'host': 'compute@ift-manila#share-pool-01', 'access_rules_status': 'active', 'display_name': 'cifs-01', 'name': 'share-aac4fe64-7a9c-472a-b156-9adbb50b4d29', 'created_at': 'datetime.datetime(2017, 5, 9, 2, 28, 35)', 'share_proto': 'CIFS', 'is_public': False, 'source_cgsnapshot_member_id': None } fake_share_cifs_no_host = { 'share_id': fake_share_id[1], 'availability_zone': 'nova', 'terminated_at': None, 'availability_zone_id': 'fd32d76d-b5a8-4c5c-93d7-8f09fc2a8ad3', 'updated_at': 'datetime.datetime(2017, 5, 9, 2, 28, 35)', 'share_network_id': None, 'export_locations': [], 'share_server_id': None, 'snapshot_id': None, 'deleted_at': None, 'id': 'aac4fe64-7a9c-472a-b156-9adbb50b4d29', 'size': 50, 'replica_state': None, 'user_id': '4944594433f0405588928a4212964658', 'export_location': None, 'display_description': None, 'consistency_group_id': None, 'project_id': '0e63326c50a246ac81fa1a0c8e003d5b', 'launched_at': None, 'scheduled_at': 'datetime.datetime(2017, 5, 9, 2, 28, 35)', 'status': 'creating', 'share_type_id': '23d8c637-0192-47fa-b921-958f22ed772f', 'deleted': 'False', 'host': '', 'access_rules_status': 'active', 'display_name': 'cifs-01', 'name': 'share-aac4fe64-7a9c-472a-b156-9adbb50b4d29', 'created_at': 'datetime.datetime(2017, 5, 9, 2, 28, 35)', 'share_proto': 'CIFS', 'is_public': False, 'source_cgsnapshot_member_id': None } fake_non_exist_share = { 'share_id': fake_id[0], 'availability_zone': 'nova', 'terminated_at': 'datetime.datetime(2017, 5, 8, 8, 27, 25)', 'availability_zone_id': 'fd32d76d-b5a8-4c5c-93d7-8f09fc2a8ad3', 'updated_at': 'datetime.datetime(2017, 5, 8, 8, 27, 25)', 'share_network_id': None, 'export_locations': [], 'share_server_id': None, 'snapshot_id': None, 'deleted_at': None, 'id': fake_id[1], 'size': 30, 'replica_state': None, 'user_id': '4944594433f0405588928a4212964658', 'export_location': '172.27.112.223:/share-pool-01/LV-1/' + fake_id[0], 'display_description': None, 'consistency_group_id': None, 'project_id': '0e63326c50a246ac81fa1a0c8e003d5b', 'launched_at': 'datetime.datetime(2017, 5, 8, 8, 23, 33)', 'scheduled_at': 'datetime.datetime(2017, 5, 8, 8, 23, 29)', 'status': 'available', 'share_type_id': '23d8c637-0192-47fa-b921-958f22ed772f', 'deleted': 'False', 'host': 'compute@ift-manila#share-pool-01', 'access_rules_status': 'active', 'display_name': 'nfs-01', 'name': 'share-5a0aa06e-1c57-4996-be46-b81e360e8866', 'created_at': 'datetime.datetime(2017, 5, 8, 8, 23, 29)', 'share_proto': 'NFS', 'is_public': False, 'source_cgsnapshot_member_id': None } fake_access_rules_nfs = [{ 'share_id': fake_share_id[0], 'deleted': 'False', 'created_at': 'datetime.datetime(2017, 5, 9, 8, 41, 21)', 'updated_at': None, 'access_type': 'ip', 'access_to': '172.27.1.1', 'access_level': 'rw', 'instance_mappings': [], 'deleted_at': None, 'id': 'fa60b50f-1428-44a2-9931-7e31f0c5b033'}, { 'share_id': fake_share_id[0], 'deleted': 'False', 'created_at': 'datetime.datetime(2017, 5, 9, 8, 45, 37)', 'updated_at': None, 'access_type': 'ip', 'access_to': '172.27.1.2', 'access_level': 'rw', 'instance_mappings': [], 'deleted_at': None, 'id': '9bcdd5e6-11c7-4f8f-939c-84fa2f3334bc' }] fake_rule_ip_1 = [{ 'share_id': fake_share_id[0], 'deleted': 'False', 'created_at': 'datetime.datetime(2017, 5, 9, 8, 41, 21)', 'updated_at': None, 'access_type': 'ip', 'access_to': '172.27.1.1', 'access_level': 'rw', 'instance_mappings': [], 'deleted_at': None, 'id': 'fa60b50f-1428-44a2-9931-7e31f0c5b033' }] fake_rule_ip_2 = [{ 'share_id': fake_share_id[0], 'deleted': 'False', 'created_at': 'datetime.datetime(2017, 5, 9, 8, 45, 37)', 'updated_at': None, 'access_type': 'ip', 'access_to': '172.27.1.2', 'access_level': 'rw', 'instance_mappings': [], 'deleted_at': None, 'id': '9bcdd5e6-11c7-4f8f-939c-84fa2f3334bc' }] fake_access_rules_cifs = [{ 'share_id': fake_share_id[1], 'deleted': 'False', 'created_at': 'datetime.datetime(2017, 5, 9, 9, 39, 18)', 'updated_at': None, 'access_type': 'user', 'access_to': 'user02', 'access_level': 'ro', 'instance_mappings': [], 'deleted_at': None, 'id': '6e8bc969-51c9-4bbb-8e8b-020dc5fec81e'}, { 'share_id': fake_share_id[1], 'deleted': 'False', 'created_at': 'datetime.datetime(2017, 5, 9, 9, 38, 59)', 'updated_at': None, 'access_type': 'user', 'access_to': 'user01', 'access_level': 'rw', 'instance_mappings': [], 'deleted_at': None, 'id': '0cd9926d-fac4-4122-a523-538e98752e78' }] fake_rule_user01 = [{ 'share_id': fake_share_id[1], 'deleted': 'False', 'created_at': 'datetime.datetime(2017, 5, 9, 9, 38, 59)', 'updated_at': None, 'access_type': 'user', 'access_to': 'user01', 'access_level': 'rw', 'instance_mappings': [], 'deleted_at': None, 'id': '0cd9926d-fac4-4122-a523-538e98752e78' }] fake_rule_user02 = [{ 'share_id': fake_share_id[1], 'deleted': 'False', 'created_at': 'datetime.datetime(2017, 5, 9, 9, 39, 18)', 'updated_at': None, 'access_type': 'user', 'access_to': 'user02', 'access_level': 'ro', 'instance_mappings': [], 'deleted_at': None, 'id': '6e8bc969-51c9-4bbb-8e8b-020dc5fec81e' }] fake_rule_user03 = [{ 'share_id': fake_id[0], 'deleted': 'False', 'created_at': 'datetime.datetime(2017, 5, 9, 9, 39, 18)', 'updated_at': None, 'access_type': 'user', 'access_to': 'user03', 'access_level': 'rw', 'instance_mappings': [], 'deleted_at': None, 'id': fake_id[1] }] fake_share_for_manage_nfs = { 'share_id': '419ab73c-c0fc-4e73-b56a-70756e0b6d27', 'availability_zone': None, 'terminated_at': None, 'availability_zone_id': None, 'updated_at': None, 'share_network_id': None, 'export_locations': [{ 'uuid': '0ebd59e4-e65e-4fda-9457-320375efd0be', 'deleted': 0, 'created_at': 'datetime.datetime(2017, 5, 10, 10, 0, 3)', 'updated_at': 'datetime.datetime(2017, 5, 10, 10, 0, 3)', 'is_admin_only': False, 'share_instance_id': 'd3cfe195-85cf-41e6-be4f-a96f7e7db192', 'path': '172.27.112.223:/share-pool-01/LV-1/test-folder', 'el_metadata': {}, 'deleted_at': None, 'id': 83 }], 'share_server_id': None, 'snapshot_id': None, 'deleted_at': None, 'id': '615ac1ed-e808-40b5-8d7b-87018c6f66eb', 'size': None, 'replica_state': None, 'user_id': '4944594433f0405588928a4212964658', 'export_location': '172.27.112.223:/share-pool-01/LV-1/test-folder', 'display_description': '', 'consistency_group_id': None, 'project_id': '0e63326c50a246ac81fa1a0c8e003d5b', 'launched_at': None, 'scheduled_at': 'datetime.datetime(2017, 5, 10, 9, 22, 5)', 'status': 'manage_starting', 'share_type_id': '23d8c637-0192-47fa-b921-958f22ed772f', 'deleted': 'False', 'host': 'compute@ift-manila#share-pool-01', 'access_rules_status': 'active', 'display_name': 'test-manage', 'name': 'share-615ac1ed-e808-40b5-8d7b-87018c6f66eb', 'created_at': 'datetime.datetime(2017, 5, 10, 9, 22, 5)', 'share_proto': 'NFS', 'is_public': False, 'source_cgsnapshot_member_id': None } def _get_fake_share_for_manage(self, location=''): return { 'share_id': '419ab73c-c0fc-4e73-b56a-70756e0b6d27', 'availability_zone': None, 'terminated_at': None, 'availability_zone_id': None, 'updated_at': None, 'share_network_id': None, 'export_locations': [{ 'uuid': '0ebd59e4-e65e-4fda-9457-320375efd0be', 'deleted': 0, 'created_at': 'datetime.datetime(2017, 5, 10, 10, 0, 3)', 'updated_at': 'datetime.datetime(2017, 5, 10, 10, 0, 3)', 'is_admin_only': False, 'share_instance_id': 'd3cfe195-85cf-41e6-be4f-a96f7e7db192', 'path': location, 'el_metadata': {}, 'deleted_at': None, 'id': 83 }], 'share_server_id': None, 'snapshot_id': None, 'deleted_at': None, 'id': '615ac1ed-e808-40b5-8d7b-87018c6f66eb', 'size': None, 'replica_state': None, 'user_id': '4944594433f0405588928a4212964658', 'export_location': location, 'display_description': '', 'consistency_group_id': None, 'project_id': '0e63326c50a246ac81fa1a0c8e003d5b', 'launched_at': None, 'scheduled_at': 'datetime.datetime(2017, 5, 10, 9, 22, 5)', 'status': 'manage_starting', 'share_type_id': '23d8c637-0192-47fa-b921-958f22ed772f', 'deleted': 'False', 'host': 'compute@ift-manila#share-pool-01', 'access_rules_status': 'active', 'display_name': 'test-manage', 'name': 'share-615ac1ed-e808-40b5-8d7b-87018c6f66eb', 'created_at': 'datetime.datetime(2017, 5, 10, 9, 22, 5)', 'share_proto': 'NFS', 'is_public': False, 'source_cgsnapshot_member_id': None } fake_share_for_manage_cifs = { 'share_id': '3a1222d3-c981-490a-9390-4d560ced68eb', 'availability_zone': None, 'terminated_at': None, 'availability_zone_id': None, 'updated_at': None, 'share_network_id': None, 'export_locations': [{ 'uuid': '0ebd59e4-e65e-4fda-9457-320375efd0de', 'deleted': 0, 'created_at': 'datetime.datetime(2017, 5, 11, 10, 10, 3)', 'updated_at': 'datetime.datetime(2017, 5, 11, 10, 10, 3)', 'is_admin_only': False, 'share_instance_id': 'd3cfe195-85cf-41e6-be4f-a96f7e7db192', 'path': '\\\\172.27.113.209\\test-folder-02', 'el_metadata': {}, 'deleted_at': None, 'id': 87 }], 'share_server_id': None, 'snapshot_id': None, 'deleted_at': None, 'id': 'd156baf7-5422-4c9b-8c78-ee7943d000ec', 'size': None, 'replica_state': None, 'user_id': '4944594433f0405588928a4212964658', 'export_location': '\\\\172.27.113.209\\test-folder-02', 'display_description': '', 'consistency_group_id': None, 'project_id': '0e63326c50a246ac81fa1a0c8e003d5b', 'launched_at': None, 'scheduled_at': 'datetime.datetime(2017, 5, 11, 3, 7, 59)', 'status': 'manage_starting', 'share_type_id': '23d8c637-0192-47fa-b921-958f22ed772f', 'deleted': 'False', 'host': 'compute@ift-manila#share-pool-01', 'access_rules_status': 'active', 'display_name': 'test-manage-02', 'name': 'share-d156baf7-5422-4c9b-8c78-ee7943d000ec', 'created_at': 'datetime.datetime(2017, 5, 11, 3, 7, 59)', 'share_proto': 'CIFS', 'is_public': False, 'source_cgsnapshot_member_id': None } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/infortrend/fake_infortrend_nas_data.py0000664000175000017500000003263200000000000027765 0ustar00zuulzuul00000000000000# Copyright (c) 2019 Infortrend Technology, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. class InfortrendNASTestData(object): fake_share_id = ['5a0aa06e-1c57-4996-be46-b81e360e8866', # NFS 'aac4fe64-7a9c-472a-b156-9adbb50b4d29'] # CIFS fake_share_name = [fake_share_id[0].replace('-', ''), fake_share_id[1].replace('-', '')] fake_channel_ip = ['172.27.112.223', '172.27.113.209'] fake_service_status_data = ('(64175, 1234, 272, 0)\n\n' '{"cliCode": ' '[{"Return": "0x0000", "CLI": "Successful"}], ' '"returnCode": [], ' '"data": ' '[{"A": ' '{"NFS": ' '{"displayName": "NFS", ' '"state_time": "2017-05-04 14:19:53", ' '"enabled": true, ' '"cpu_rate": "0.0", ' '"mem_rate": "0.0", ' '"state": "exited", ' '"type": "share"}}}]}\n\n') fake_folder_status_data = ('(64175, 1234, 1017, 0)\n\n' '{"cliCode": ' '[{"Return": "0x0000", "CLI": "Successful"}], ' '"returnCode": [], ' '"data": ' '[{"utility": "1.00", ' '"used": "33886208", ' '"subshare": true, ' '"share": false, ' '"worm": "", ' '"free": "321931374592", ' '"fsType": "xfs", ' '"owner": "A", ' '"readOnly": false, ' '"modifyTime": "2017-04-27 16:16", ' '"directory": "/share-pool-01/LV-1", ' '"volumeId": "6541BAFB2E6C57B6", ' '"mounted": true, ' '"size": "321965260800"}, ' '{"utility": "1.00", ' '"used": "33779712", ' '"subshare": false, ' '"share": false, ' '"worm": "", ' '"free": "107287973888", ' '"fsType": "xfs", ' '"owner": "A", ' '"readOnly": false, ' '"modifyTime": "2017-04-27 15:45", ' '"directory": "/share-pool-02/LV-1", ' '"volumeId": "147A8FB67DA39914", ' '"mounted": true, ' '"size": "107321753600"}]}\n\n') fake_nfs_status_off = [{ 'A': { 'NFS': { 'displayName': 'NFS', 'state_time': '2017-05-04 14:19:53', 'enabled': False, 'cpu_rate': '0.0', 'mem_rate': '0.0', 'state': 'exited', 'type': 'share', } } }] fake_folder_status = [{ 'utility': '1.00', 'used': '33886208', 'subshare': True, 'share': False, 'worm': '', 'free': '321931374592', 'fsType': 'xfs', 'owner': 'A', 'readOnly': False, 'modifyTime': '2017-04-27 16:16', 'directory': '/share-pool-01/LV-1', 'volumeId': '6541BAFB2E6C57B6', 'mounted': True, 'size': '321965260800'}, { 'utility': '1.00', 'used': '33779712', 'subshare': False, 'share': False, 'worm': '', 'free': '107287973888', 'fsType': 'xfs', 'owner': 'A', 'readOnly': False, 'modifyTime': '2017-04-27 15:45', 'directory': '/share-pool-02/LV-1', 'volumeId': '147A8FB67DA39914', 'mounted': True, 'size': '107321753600', }] def fake_get_channel_status(self, ch1_status='UP'): return [{ 'datalink': 'mgmt0', 'status': 'UP', 'typeConfig': 'DHCP', 'IP': '172.27.112.125', 'MAC': '00:d0:23:00:15:a6', 'netmask': '255.255.240.0', 'type': 'dhcp', 'gateway': '172.27.127.254'}, { 'datalink': 'CH0', 'status': 'UP', 'typeConfig': 'DHCP', 'IP': self.fake_channel_ip[0], 'MAC': '00:d0:23:80:15:a6', 'netmask': '255.255.240.0', 'type': 'dhcp', 'gateway': '172.27.127.254'}, { 'datalink': 'CH1', 'status': ch1_status, 'typeConfig': 'DHCP', 'IP': self.fake_channel_ip[1], 'MAC': '00:d0:23:40:15:a6', 'netmask': '255.255.240.0', 'type': 'dhcp', 'gateway': '172.27.127.254'}, { 'datalink': 'CH2', 'status': 'DOWN', 'typeConfig': 'DHCP', 'IP': '', 'MAC': '00:d0:23:c0:15:a6', 'netmask': '', 'type': '', 'gateway': ''}, { 'datalink': 'CH3', 'status': 'DOWN', 'typeConfig': 'DHCP', 'IP': '', 'MAC': '00:d0:23:20:15:a6', 'netmask': '', 'type': '', 'gateway': '', }] fake_fquota_status = [{ 'quota': '21474836480', 'used': '0', 'name': 'test-folder', 'type': 'subfolder', 'id': '537178178'}, { 'quota': '32212254720', 'used': '0', 'name': fake_share_name[0], 'type': 'subfolder', 'id': '805306752'}, { 'quota': '53687091200', 'used': '21474836480', 'name': fake_share_name[1], 'type': 'subfolder', 'id': '69'}, { 'quota': '94091997184', 'used': '0', 'type': 'subfolder', 'id': '70', "name": 'test-folder-02' }] fake_fquota_status_with_no_settings = [] def fake_get_share_status_nfs(self, status=False): fake_share_status_nfs = [{ 'ftp': False, 'cifs': False, 'oss': False, 'sftp': False, 'nfs': status, 'directory': '/LV-1/share-pool-01/' + self.fake_share_name[0], 'exist': True, 'afp': False, 'webdav': False }] if status: fake_share_status_nfs[0]['nfs_detail'] = { 'hostList': [{ 'uid': '65534', 'insecure': 'insecure', 'squash': 'all', 'access': 'ro', 'host': '*', 'gid': '65534', 'mode': 'async', 'no_subtree_check': 'no_subtree_check', }] } return fake_share_status_nfs def fake_get_share_status_cifs(self, status=False): fake_share_status_cifs = [{ 'ftp': False, 'cifs': status, 'oss': False, 'sftp': False, 'nfs': False, 'directory': '/share-pool-01/LV-1/' + self.fake_share_name[1], 'exist': True, 'afp': False, 'webdav': False }] if status: fake_share_status_cifs[0]['cifs_detail'] = { 'available': True, 'encrypt': False, 'description': '', 'sharename': 'cifs-01', 'failover': '', 'AIO': True, 'priv': 'None', 'recycle_bin': False, 'ABE': True, } return fake_share_status_cifs fake_subfolder_data = [{ 'size': '6', 'index': '34', 'description': '', 'encryption': '', 'isEnd': False, 'share': False, 'volumeId': '6541BAFB2E6C57B6', 'quota': '', 'modifyTime': '2017-04-06 11:35', 'owner': 'A', 'path': '/share-pool-01/LV-1/UserHome', 'subshare': True, 'type': 'subfolder', 'empty': False, 'name': 'UserHome'}, { 'size': '6', 'index': '39', 'description': '', 'encryption': '', 'isEnd': False, 'share': False, 'volumeId': '6541BAFB2E6C57B6', 'quota': '21474836480', 'modifyTime': '2017-04-27 15:44', 'owner': 'A', 'path': '/share-pool-01/LV-1/test-folder', 'subshare': False, 'type': 'subfolder', 'empty': True, 'name': 'test-folder'}, { 'size': '6', 'index': '45', 'description': '', 'encryption': '', 'isEnd': False, 'share': True, 'volumeId': '6541BAFB2E6C57B6', 'quota': '32212254720', 'modifyTime': '2017-04-27 16:15', 'owner': 'A', 'path': '/share-pool-01/LV-1/' + fake_share_name[0], 'subshare': False, 'type': 'subfolder', 'empty': True, 'name': fake_share_name[0]}, { 'size': '6', 'index': '512', 'description': '', 'encryption': '', 'isEnd': True, 'share': True, 'volumeId': '6541BAFB2E6C57B6', 'quota': '53687091200', 'modifyTime': '2017-04-27 16:16', 'owner': 'A', 'path': '/share-pool-01/LV-1/' + fake_share_name[1], 'subshare': False, 'type': 'subfolder', 'empty': True, 'name': fake_share_name[1]}, { 'size': '6', 'index': '777', 'description': '', 'encryption': '', 'isEnd': False, 'share': False, 'volumeId': '6541BAFB2E6C57B6', 'quota': '94091997184', 'modifyTime': '2017-04-28 15:44', 'owner': 'A', 'path': '/share-pool-01/LV-1/test-folder-02', 'subshare': False, 'type': 'subfolder', 'empty': True, 'name': 'test-folder-02' }] fake_cifs_user_list = [{ 'Superuser': 'No', 'Group': 'users', 'Description': '', 'Quota': 'none', 'PWD Expiry Date': '2291-01-19', 'Home Directory': '/share-pool-01/LV-1/UserHome/user01', 'UID': '100001', 'Type': 'Local', 'Name': 'user01'}, { 'Superuser': 'No', 'Group': 'users', 'Description': '', 'Quota': 'none', 'PWD Expiry Date': '2017-08-07', 'Home Directory': '/share-pool-01/LV-1/UserHome/user02', 'UID': '100002', 'Type': 'Local', 'Name': 'user02' }] fake_share_status_nfs_with_rules = [{ 'ftp': False, 'cifs': False, 'oss': False, 'sftp': False, 'nfs': True, 'directory': '/share-pool-01/LV-1/' + fake_share_name[0], 'exist': True, 'nfs_detail': { 'hostList': [{ 'uid': '65534', 'insecure': 'insecure', 'squash': 'all', 'access': 'ro', 'host': '*', 'gid': '65534', 'mode': 'async', 'no_subtree_check': 'no_subtree_check'}, { 'uid': '65534', 'insecure': 'insecure', 'squash': 'all', 'access': 'rw', 'host': '172.27.1.1', 'gid': '65534', 'mode': 'async', 'no_subtree_check': 'no_subtree_check'}, { 'uid': '65534', 'insecure': 'insecure', 'squash': 'all', 'access': 'rw', 'host': '172.27.1.2', 'gid': '65534', 'mode': 'async', 'no_subtree_check': 'no_subtree_check'}] }, 'afp': False, 'webdav': False, }] fake_share_status_cifs_with_rules = [ { 'permission': { 'Read': True, 'Write': True, 'Execute': True}, 'type': 'user', 'id': '100001', 'name': 'user01' }, { 'permission': { 'Read': True, 'Write': False, 'Execute': True}, 'type': 'user', 'id': '100002', 'name': 'user02' }, { 'permission': { 'Read': True, 'Write': False, 'Execute': True}, 'type': 'group@', 'id': '100', 'name': 'users' }, { 'permission': { 'Read': True, 'Write': False, 'Execute': True}, 'type': 'other@', 'id': '', 'name': '' } ] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/infortrend/test_infortrend_nas.py0000664000175000017500000005322100000000000027042 0ustar00zuulzuul00000000000000# Copyright (c) 2019 Infortrend Technology, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import ddt from oslo_config import cfg from manila import context from manila import exception from manila.share import configuration from manila.share.drivers.infortrend import driver from manila.share.drivers.infortrend import infortrend_nas from manila import test from manila.tests.share.drivers.infortrend import fake_infortrend_manila_data from manila.tests.share.drivers.infortrend import fake_infortrend_nas_data CONF = cfg.CONF SUCCEED = (0, []) @ddt.ddt class InfortrendNASDriverTestCase(test.TestCase): def __init__(self, *args, **kwargs): super(InfortrendNASDriverTestCase, self).__init__(*args, **kwargs) self._ctxt = context.get_admin_context() self.nas_data = fake_infortrend_nas_data.InfortrendNASTestData() self.m_data = fake_infortrend_manila_data.InfortrendManilaTestData() def setUp(self): CONF.set_default('driver_handles_share_servers', False) CONF.set_default('infortrend_nas_ip', '172.27.1.1') CONF.set_default('infortrend_nas_user', 'fake_user') CONF.set_default('infortrend_nas_password', 'fake_password') CONF.set_default('infortrend_nas_ssh_key', 'fake_sshkey') CONF.set_default('infortrend_share_pools', 'share-pool-01') CONF.set_default('infortrend_share_channels', '0,1') self.fake_conf = configuration.Configuration(None) super(InfortrendNASDriverTestCase, self).setUp() def _get_driver(self, fake_conf, init_dict=False): self._driver = driver.InfortrendNASDriver( configuration=fake_conf) self._iftnas = self._driver.ift_nas self.pool_id = ['6541BAFB2E6C57B6'] self.pool_path = ['/share-pool-01/LV-1/'] if init_dict: self._iftnas.pool_dict = { 'share-pool-01': { 'id': self.pool_id[0], 'path': self.pool_path[0], } } self._iftnas.channel_dict = { '0': self.nas_data.fake_channel_ip[0], '1': self.nas_data.fake_channel_ip[1], } def test_no_login_ssh_key_and_pass(self): self.fake_conf.set_default('infortrend_nas_password', None) self.fake_conf.set_default('infortrend_nas_ssh_key', None) self.assertRaises( exception.InvalidParameterValue, self._get_driver, self.fake_conf) def test_parser_with_service_status(self): self._get_driver(self.fake_conf) expect_service_status = [{ 'A': { 'NFS': { 'displayName': 'NFS', 'state_time': '2017-05-04 14:19:53', 'enabled': True, 'cpu_rate': '0.0', 'mem_rate': '0.0', 'state': 'exited', 'type': 'share', } } }] rc, service_status = self._iftnas._parser( self.nas_data.fake_service_status_data) self.assertEqual(0, rc) self.assertDictListMatch(expect_service_status, service_status) def test_parser_with_folder_status(self): self._get_driver(self.fake_conf) expect_folder_status = [{ 'utility': '1.00', 'used': '33886208', 'subshare': True, 'share': False, 'worm': '', 'free': '321931374592', 'fsType': 'xfs', 'owner': 'A', 'readOnly': False, 'modifyTime': '2017-04-27 16:16', 'directory': self.pool_path[0][:-1], 'volumeId': self.pool_id[0], 'mounted': True, 'size': '321965260800'}, { 'utility': '1.00', 'used': '33779712', 'subshare': False, 'share': False, 'worm': '', 'free': '107287973888', 'fsType': 'xfs', 'owner': 'A', 'readOnly': False, 'modifyTime': '2017-04-27 15:45', 'directory': '/share-pool-02/LV-1', 'volumeId': '147A8FB67DA39914', 'mounted': True, 'size': '107321753600' }] rc, folder_status = self._iftnas._parser( self.nas_data.fake_folder_status_data) self.assertEqual(0, rc) self.assertDictListMatch(expect_folder_status, folder_status) def test_ensure_service_on(self): self._get_driver(self.fake_conf) mock_execute = mock.Mock( side_effect=[(0, self.nas_data.fake_nfs_status_off), SUCCEED]) self._iftnas._execute = mock_execute self._iftnas._ensure_service_on('nfs') mock_execute.assert_called_with(['service', 'restart', 'nfs']) def test_check_channels_status(self): self._get_driver(self.fake_conf) expect_channel_dict = { '0': self.nas_data.fake_channel_ip[0], '1': self.nas_data.fake_channel_ip[1], } self._iftnas._execute = mock.Mock( return_value=(0, self.nas_data.fake_get_channel_status())) self._iftnas._check_channels_status() self.assertDictEqual(expect_channel_dict, self._iftnas.channel_dict) @mock.patch.object(infortrend_nas.LOG, 'warning') def test_channel_status_down(self, log_warning): self._get_driver(self.fake_conf) self._iftnas._execute = mock.Mock( return_value=(0, self.nas_data.fake_get_channel_status('DOWN'))) self._iftnas._check_channels_status() self.assertEqual(1, log_warning.call_count) @mock.patch.object(infortrend_nas.LOG, 'error') def test_invalid_channel(self, log_error): self.fake_conf.set_default('infortrend_share_channels', '0, 6') self._get_driver(self.fake_conf) self._iftnas._execute = mock.Mock( return_value=(0, self.nas_data.fake_get_channel_status())) self.assertRaises( exception.InfortrendNASException, self._iftnas._check_channels_status) def test_check_pools_setup(self): self._get_driver(self.fake_conf) expect_pool_dict = { 'share-pool-01': { 'id': self.pool_id[0], 'path': self.pool_path[0], } } self._iftnas._execute = mock.Mock( return_value=(0, self.nas_data.fake_folder_status)) self._iftnas._check_pools_setup() self.assertDictEqual(expect_pool_dict, self._iftnas.pool_dict) def test_unknow_pools_setup(self): self.fake_conf.set_default( 'infortrend_share_pools', 'chengwei, share-pool-01') self._get_driver(self.fake_conf) self._iftnas._execute = mock.Mock( return_value=(0, self.nas_data.fake_folder_status)) self.assertRaises( exception.InfortrendNASException, self._iftnas._check_pools_setup) @mock.patch.object(infortrend_nas.InfortrendNAS, '_execute') def test_get_pool_quota_used(self, mock_execute): self._get_driver(self.fake_conf, True) mock_execute.return_value = (0, self.nas_data.fake_fquota_status) pool_quota = self._iftnas._get_pool_quota_used('share-pool-01') mock_execute.assert_called_with( ['fquota', 'status', self.pool_id[0], 'LV-1', '-t', 'folder']) self.assertEqual(201466179584, pool_quota) @mock.patch.object(infortrend_nas.InfortrendNAS, '_execute') def test_create_share_nfs(self, mock_execute): self._get_driver(self.fake_conf, True) fake_share_id = self.m_data.fake_share_nfs['id'] fake_share_name = fake_share_id.replace('-', '') expect_locations = [ self.nas_data.fake_channel_ip[0] + ':/share-pool-01/LV-1/' + fake_share_name, self.nas_data.fake_channel_ip[1] + ':/share-pool-01/LV-1/' + fake_share_name, ] mock_execute.side_effect = [ SUCCEED, # create folder SUCCEED, # set size (0, self.nas_data.fake_get_share_status_nfs()), # check proto SUCCEED, # enable proto (0, self.nas_data.fake_get_channel_status()) # update channel ] locations = self._driver.create_share( self._ctxt, self.m_data.fake_share_nfs) self.assertEqual(expect_locations, locations) mock_execute.assert_any_call( ['share', self.pool_path[0] + fake_share_name, 'nfs', 'on']) @mock.patch.object(infortrend_nas.InfortrendNAS, '_execute') def test_create_share_cifs(self, mock_execute): self._get_driver(self.fake_conf, True) fake_share_id = self.m_data.fake_share_cifs['id'] fake_share_name = fake_share_id.replace('-', '') expect_locations = [ '\\\\' + self.nas_data.fake_channel_ip[0] + '\\' + fake_share_name, '\\\\' + self.nas_data.fake_channel_ip[1] + '\\' + fake_share_name, ] mock_execute.side_effect = [ SUCCEED, # create folder SUCCEED, # set size (0, self.nas_data.fake_get_share_status_cifs()), # check proto SUCCEED, # enable proto (0, self.nas_data.fake_get_channel_status()) # update channel ] locations = self._driver.create_share( self._ctxt, self.m_data.fake_share_cifs) self.assertEqual(expect_locations, locations) mock_execute.assert_any_call( ['share', self.pool_path[0] + fake_share_name, 'cifs', 'on', '-n', fake_share_name]) @mock.patch.object(infortrend_nas.InfortrendNAS, '_execute') def test_delete_share_nfs(self, mock_execute): self._get_driver(self.fake_conf, True) fake_share_id = self.m_data.fake_share_nfs['id'] fake_share_name = fake_share_id.replace('-', '') mock_execute.side_effect = [ (0, self.nas_data.fake_subfolder_data), # pagelist folder SUCCEED, # delete folder ] self._driver.delete_share( self._ctxt, self.m_data.fake_share_nfs) mock_execute.assert_any_call( ['folder', 'options', self.pool_id[0], 'LV-1', '-d', fake_share_name]) @mock.patch.object(infortrend_nas.InfortrendNAS, '_execute') def test_delete_share_cifs(self, mock_execute): self._get_driver(self.fake_conf, True) fake_share_id = self.m_data.fake_share_cifs['id'] fake_share_name = fake_share_id.replace('-', '') mock_execute.side_effect = [ (0, self.nas_data.fake_subfolder_data), # pagelist folder SUCCEED, # delete folder ] self._driver.delete_share( self._ctxt, self.m_data.fake_share_cifs) mock_execute.assert_any_call( ['folder', 'options', self.pool_id[0], 'LV-1', '-d', fake_share_name]) @mock.patch.object(infortrend_nas.LOG, 'warning') @mock.patch.object(infortrend_nas.InfortrendNAS, '_execute') def test_delete_non_exist_share(self, mock_execute, log_warning): self._get_driver(self.fake_conf, True) mock_execute.side_effect = [ (0, self.nas_data.fake_subfolder_data), # pagelist folder ] self._driver.delete_share( self._ctxt, self.m_data.fake_non_exist_share) self.assertEqual(1, log_warning.call_count) def test_get_pool(self): self._get_driver(self.fake_conf, True) pool = self._driver.get_pool(self.m_data.fake_share_nfs) self.assertEqual('share-pool-01', pool) def test_get_pool_without_host(self): self._get_driver(self.fake_conf, True) self._iftnas._execute = mock.Mock( return_value=(0, self.nas_data.fake_subfolder_data)) pool = self._driver.get_pool(self.m_data.fake_share_cifs_no_host) self.assertEqual('share-pool-01', pool) def test_ensure_share_nfs(self): self._get_driver(self.fake_conf, True) share_id = self.m_data.fake_share_nfs['id'] share_name = share_id.replace('-', '') share_path = self.pool_path[0] + share_name expect_locations = [ self.nas_data.fake_channel_ip[0] + ':' + share_path, self.nas_data.fake_channel_ip[1] + ':' + share_path, ] self._iftnas._execute = mock.Mock( return_value=(0, self.nas_data.fake_get_channel_status())) locations = self._driver.ensure_share( self._ctxt, self.m_data.fake_share_nfs) self.assertEqual(expect_locations, locations) def test_ensure_share_cifs(self): self._get_driver(self.fake_conf, True) share_id = self.m_data.fake_share_cifs['id'] share_name = share_id.replace('-', '') expect_locations = [ '\\\\' + self.nas_data.fake_channel_ip[0] + '\\' + share_name, '\\\\' + self.nas_data.fake_channel_ip[1] + '\\' + share_name, ] self._iftnas._execute = mock.Mock( return_value=(0, self.nas_data.fake_get_channel_status())) locations = self._driver.ensure_share( self._ctxt, self.m_data.fake_share_cifs) self.assertEqual(expect_locations, locations) def test_extend_share(self): self._get_driver(self.fake_conf, True) share_id = self.m_data.fake_share_nfs['id'] share_name = share_id.replace('-', '') self._iftnas._execute = mock.Mock(return_value=SUCCEED) self._driver.extend_share(self.m_data.fake_share_nfs, 100) self._iftnas._execute.assert_called_once_with( ['fquota', 'create', self.pool_id[0], 'LV-1', share_name, '100G', '-t', 'folder']) @mock.patch.object(infortrend_nas.InfortrendNAS, '_execute') def test_shrink_share(self, mock_execute): self._get_driver(self.fake_conf, True) share_id = self.m_data.fake_share_nfs['id'] share_name = share_id.replace('-', '') mock_execute.side_effect = [ (0, self.nas_data.fake_fquota_status), # check used SUCCEED, ] self._driver.shrink_share(self.m_data.fake_share_nfs, 10) mock_execute.assert_has_calls([ mock.call(['fquota', 'status', self.pool_id[0], 'LV-1', '-t', 'folder']), mock.call(['fquota', 'create', self.pool_id[0], 'LV-1', share_name, '10G', '-t', 'folder'])]) @mock.patch.object(infortrend_nas.InfortrendNAS, '_execute') def test_shrink_share_smaller_than_used_size(self, mock_execute): self._get_driver(self.fake_conf, True) mock_execute.side_effect = [ (0, self.nas_data.fake_fquota_status), # check used ] self.assertRaises( exception.ShareShrinkingPossibleDataLoss, self._driver.shrink_share, self.m_data.fake_share_cifs, 10) def test_get_share_size(self): self._get_driver(self.fake_conf, True) self._iftnas._execute = mock.Mock( return_value=(0, self.nas_data.fake_fquota_status)) size = self._iftnas._get_share_size('', '', 'test-folder-02') self.assertEqual(87.63, size) @mock.patch.object(infortrend_nas.InfortrendNAS, '_execute') def test_manage_existing_nfs(self, mock_execute): self._get_driver(self.fake_conf, True) share_id = self.m_data.fake_share_for_manage_nfs['id'] share_name = share_id.replace('-', '') origin_share_path = self.pool_path[0] + 'test-folder' export_share_path = self.pool_path[0] + share_name expect_result = { 'size': 20.0, 'export_locations': [ self.nas_data.fake_channel_ip[0] + ':' + export_share_path, self.nas_data.fake_channel_ip[1] + ':' + export_share_path, ] } mock_execute.side_effect = [ (0, self.nas_data.fake_subfolder_data), # pagelist folder (0, self.nas_data.fake_get_share_status_nfs()), # check proto SUCCEED, # enable nfs (0, self.nas_data.fake_fquota_status), # get share size SUCCEED, # rename share (0, self.nas_data.fake_get_channel_status()) # update channel ] result = self._driver.manage_existing( self.m_data.fake_share_for_manage_nfs, {} ) self.assertEqual(expect_result, result) mock_execute.assert_has_calls([ mock.call(['pagelist', 'folder', self.pool_path[0]]), mock.call(['share', 'status', '-f', origin_share_path]), mock.call(['share', origin_share_path, 'nfs', 'on']), mock.call(['fquota', 'status', self.pool_id[0], origin_share_path.split('/')[3], '-t', 'folder']), mock.call(['folder', 'options', self.pool_id[0], 'LV-1', '-k', 'test-folder', share_name]), mock.call(['ifconfig', 'inet', 'show']), ]) @mock.patch.object(infortrend_nas.InfortrendNAS, '_execute') def test_manage_existing_cifs(self, mock_execute): self._get_driver(self.fake_conf, True) share_id = self.m_data.fake_share_for_manage_cifs['id'] share_name = share_id.replace('-', '') origin_share_path = self.pool_path[0] + 'test-folder-02' expect_result = { 'size': 87.63, 'export_locations': [ '\\\\' + self.nas_data.fake_channel_ip[0] + '\\' + share_name, '\\\\' + self.nas_data.fake_channel_ip[1] + '\\' + share_name, ] } mock_execute.side_effect = [ (0, self.nas_data.fake_subfolder_data), # pagelist folder (0, self.nas_data.fake_get_share_status_cifs()), # check proto SUCCEED, # enable cifs (0, self.nas_data.fake_fquota_status), # get share size SUCCEED, # rename share (0, self.nas_data.fake_get_channel_status()) # update channel ] result = self._driver.manage_existing( self.m_data.fake_share_for_manage_cifs, {} ) self.assertEqual(expect_result, result) mock_execute.assert_has_calls([ mock.call(['pagelist', 'folder', self.pool_path[0]]), mock.call(['share', 'status', '-f', origin_share_path]), mock.call(['share', origin_share_path, 'cifs', 'on', '-n', share_name]), mock.call(['fquota', 'status', self.pool_id[0], origin_share_path.split('/')[3], '-t', 'folder']), mock.call(['folder', 'options', self.pool_id[0], 'LV-1', '-k', 'test-folder-02', share_name]), mock.call(['ifconfig', 'inet', 'show']), ]) def test_manage_existing_with_no_location(self): self._get_driver(self.fake_conf, True) fake_share = self.m_data._get_fake_share_for_manage('') self.assertRaises( exception.InfortrendNASException, self._driver.manage_existing, fake_share, {}) @ddt.data('172.27.1.1:/share-pool-01/LV-1/test-folder', '172.27.112.223:/share-pool-01/LV-1/some-folder') def test_manage_existing_wrong_ip_or_name(self, fake_share_path): self._get_driver(self.fake_conf, True) fake_share = self.m_data._get_fake_share_for_manage(fake_share_path) self._iftnas._execute = mock.Mock( return_value=(0, self.nas_data.fake_subfolder_data)) self.assertRaises( exception.InfortrendNASException, self._driver.manage_existing, fake_share, {}) @mock.patch.object(infortrend_nas.InfortrendNAS, '_execute') def test_manage_existing_with_no_size_setting(self, mock_execute): self._get_driver(self.fake_conf, True) mock_execute.side_effect = [ (0, self.nas_data.fake_subfolder_data), # pagelist folder (0, self.nas_data.fake_get_share_status_nfs()), # check proto SUCCEED, # enable nfs (0, self.nas_data.fake_fquota_status_with_no_settings), ] self.assertRaises( exception.InfortrendNASException, self._driver.manage_existing, self.m_data.fake_share_for_manage_nfs, {}) @ddt.data('NFS', 'CIFS') @mock.patch.object(infortrend_nas.InfortrendNAS, '_execute') def test_unmanage(self, protocol, mock_execute): share_to_unmanage = (self.m_data.fake_share_nfs if protocol == 'NFS' else self.m_data.fake_share_cifs) self._get_driver(self.fake_conf, True) mock_execute.side_effect = [ (0, self.nas_data.fake_subfolder_data), # pagelist folder ] self._driver.unmanage(share_to_unmanage) mock_execute.assert_called_once_with( ['pagelist', 'folder', self.pool_path[0]], ) @mock.patch.object(infortrend_nas.LOG, 'warning') def test_unmanage_share_not_exist(self, log_warning): self._get_driver(self.fake_conf, True) self._iftnas._execute = mock.Mock( return_value=(0, self.nas_data.fake_subfolder_data)) self._driver.unmanage( self.m_data.fake_share_for_manage_nfs, ) self.assertEqual(1, log_warning.call_count) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315602.0216703 manila-21.0.0/manila/tests/share/drivers/inspur/0000775000175000017500000000000000000000000021561 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/inspur/__init__.py0000664000175000017500000000000000000000000023660 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315602.0216703 manila-21.0.0/manila/tests/share/drivers/inspur/as13000/0000775000175000017500000000000000000000000022550 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/inspur/as13000/__init__.py0000664000175000017500000000000000000000000024647 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/inspur/as13000/test_as13000_nas.py0000664000175000017500000014400500000000000026015 0ustar00zuulzuul00000000000000# Copyright 2018 Inspur Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Share driver test for Inspur AS13000 """ import json import time from unittest import mock import ddt from oslo_config import cfg import requests from manila import context from manila import exception from manila.share import driver from manila.share.drivers.inspur.as13000 import as13000_nas from manila import test from manila.tests import fake_share CONF = cfg.CONF class FakeConfig(object): def __init__(self, *args, **kwargs): self.driver_handles_share_servers = False self.share_driver = 'fake_share_driver_name' self.share_backend_name = 'fake_as13000' self.as13000_nas_ip = kwargs.get( 'as13000_nas_ip', 'some_ip') self.as13000_nas_port = kwargs.get( 'as13000_nas_port', 'some_port') self.as13000_nas_login = kwargs.get( 'as13000_nas_login', 'username') self.as13000_nas_password = kwargs.get( 'as13000_nas_password', 'password') self.as13000_share_pools = kwargs.get( 'as13000_share_pools', ['fakepool']) self.as13000_token_available_time = kwargs.get( 'as13000_token_available_time', 3600) self.network_config_group = kwargs.get( "network_config_group", "fake_network_config_group") self.admin_network_config_group = kwargs.get( "admin_network_config_group", "fake_admin_network_config_group") self.config_group = kwargs.get("config_group", "fake_config_group") self.reserved_share_percentage = kwargs.get( "reserved_share_percentage", 0) self.reserved_share_from_snapshot_percentage = kwargs.get( "reserved_share_from_snapshot_percentage", 0) self.reserved_share_extend_percentage = kwargs.get( "reserved_share_extend_percentage", 0) self.max_over_subscription_ratio = kwargs.get( "max_over_subscription_ratio", 20.0) self.filter_function = kwargs.get("filter_function", None) self.goodness_function = kwargs.get("goodness_function", None) def safe_get(self, key): return getattr(self, key) def append_config_values(self, *args, **kwargs): pass test_config = FakeConfig() class FakeResponse(object): def __init__(self, status, output): self.status_code = status self.text = 'return message' self._json = output def json(self): return self._json def close(self): pass @ddt.ddt class RestAPIExecutorTestCase(test.TestCase): def setUp(self): self.rest_api = as13000_nas.RestAPIExecutor( test_config.as13000_nas_ip, test_config.as13000_nas_port, test_config.as13000_nas_login, test_config.as13000_nas_password) super(RestAPIExecutorTestCase, self).setUp() def test_logins(self): mock_login = self.mock_object(self.rest_api, 'login', mock.Mock(return_value='fake_token')) self.rest_api.logins() mock_login.assert_called_once() def test_login(self): fake_response = { 'token': 'fake_token', 'expireTime': '7200', 'type': 0} mock_sra = self.mock_object(self.rest_api, 'send_rest_api', mock.Mock(return_value=fake_response)) result = self.rest_api.login() self.assertEqual('fake_token', result) login_params = {'name': test_config.as13000_nas_login, 'password': test_config.as13000_nas_password} mock_sra.assert_called_once_with(method='security/token', params=login_params, request_type='post') def test_logout(self): mock_sra = self.mock_object(self.rest_api, 'send_rest_api', mock.Mock(return_value=None)) self.rest_api.logout() mock_sra.assert_called_once_with( method='security/token', request_type='delete') @ddt.data(True, False) def test_refresh_token(self, force): mock_login = self.mock_object(self.rest_api, 'login', mock.Mock(return_value='fake_token')) mock_logout = self.mock_object(self.rest_api, 'logout', mock.Mock()) self.rest_api.refresh_token(force) if force is not True: mock_logout.assert_called_once_with() mock_login.assert_called_once_with() def test_send_rest_api(self): expected = {'value': 'abc'} mock_sa = self.mock_object(self.rest_api, 'send_api', mock.Mock(return_value=expected)) result = self.rest_api.send_rest_api( method='fake_method', params='fake_params', request_type='fake_type') self.assertEqual(expected, result) mock_sa.assert_called_once_with( 'fake_method', 'fake_params', 'fake_type') def test_send_rest_api_retry(self): expected = {'value': 'abc'} mock_sa = self.mock_object( self.rest_api, 'send_api', mock.Mock( side_effect=( exception.NetworkException, expected))) # mock.Mock(side_effect=exception.NetworkException)) mock_rt = self.mock_object(self.rest_api, 'refresh_token', mock.Mock()) result = self.rest_api.send_rest_api( method='fake_method', params='fake_params', request_type='fake_type' ) self.assertEqual(expected, result) mock_sa.assert_called_with( 'fake_method', 'fake_params', 'fake_type') mock_rt.assert_called_with(force=True) def test_send_rest_api_3times_fail(self): mock_sa = self.mock_object( self.rest_api, 'send_api', mock.Mock( side_effect=(exception.NetworkException))) mock_rt = self.mock_object(self.rest_api, 'refresh_token', mock.Mock()) self.assertRaises( exception.ShareBackendException, self.rest_api.send_rest_api, method='fake_method', params='fake_params', request_type='fake_type') mock_sa.assert_called_with('fake_method', 'fake_params', 'fake_type') mock_rt.assert_called_with(force=True) def test_send_rest_api_backend_error_fail(self): mock_sa = self.mock_object(self.rest_api, 'send_api', mock.Mock( side_effect=(exception.ShareBackendException( 'fake_error_message')))) mock_rt = self.mock_object(self.rest_api, 'refresh_token') self.assertRaises( exception.ShareBackendException, self.rest_api.send_rest_api, method='fake_method', params='fake_params', request_type='fake_type') mock_sa.assert_called_with('fake_method', 'fake_params', 'fake_type') mock_rt.assert_not_called() @ddt.data( {'method': 'fake_method', 'request_type': 'post', 'params': {'fake_param': 'fake_value'}}, {'method': 'fake_method', 'request_type': 'get', 'params': {'fake_param': 'fake_value'}}, {'method': 'fake_method', 'request_type': 'delete', 'params': {'fake_param': 'fake_value'}}, {'method': 'fake_method', 'request_type': 'put', 'params': {'fake_param': 'fake_value'}}, ) @ddt.unpack def test_send_api(self, method, params, request_type): self.rest_api._token_pool = ['fake_token'] if request_type in ('post', 'delete', 'put'): fake_output = {'code': 0, 'message': 'success'} elif request_type == 'get': fake_output = {'code': 0, 'data': 'fake_date'} fake_response = FakeResponse(200, fake_output) mock_request = self.mock_object(requests, request_type, mock.Mock(return_value=fake_response)) self.rest_api.send_api(method, params=params, request_type=request_type) url = 'http://%s:%s/rest/%s' % (test_config.as13000_nas_ip, test_config.as13000_nas_port, method) headers = {'X-Auth-Token': 'fake_token'} mock_request.assert_called_once_with(url, data=json.dumps(params), headers=headers) @ddt.data({'method': r'security/token', 'params': {'name': test_config.as13000_nas_login, 'password': test_config.as13000_nas_password}, 'request_type': 'post'}, {'method': r'security/token', 'params': None, 'request_type': 'delete'}) @ddt.unpack def test_send_api_access_success(self, method, params, request_type): if request_type == 'post': fake_value = {'code': 0, 'data': { 'token': 'fake_token', 'expireTime': '7200', 'type': 0}} mock_requests = self.mock_object( requests, 'post', mock.Mock( return_value=FakeResponse( 200, fake_value))) result = self.rest_api.send_api(method, params, request_type) self.assertEqual(fake_value['data'], result) mock_requests.assert_called_once_with( 'http://%s:%s/rest/%s' % (test_config.as13000_nas_ip, test_config.as13000_nas_port, method), data=json.dumps(params), headers=None) if request_type == 'delete': fake_value = {'code': 0, 'message': 'Success!'} self.rest_api._token_pool = ['fake_token'] mock_requests = self.mock_object( requests, 'delete', mock.Mock( return_value=FakeResponse( 200, fake_value))) self.rest_api.send_api(method, params, request_type) mock_requests.assert_called_once_with( 'http://%s:%s/rest/%s' % (test_config.as13000_nas_ip, test_config.as13000_nas_port, method), data=None, headers={'X-Auth-Token': 'fake_token'}) def test_send_api_wrong_access_fail(self): req_params = {'method': r'security/token', 'params': {'name': test_config.as13000_nas_login, 'password': 'fake_password'}, 'request_type': 'post'} fake_value = {'message': ' User name or password error.', 'code': 400} mock_request = self.mock_object( requests, 'post', mock.Mock( return_value=FakeResponse( 200, fake_value))) self.assertRaises( exception.ShareBackendException, self.rest_api.send_api, method=req_params['method'], params=req_params['params'], request_type=req_params['request_type']) mock_request.assert_called_once_with( 'http://%s:%s/rest/%s' % (test_config.as13000_nas_ip, test_config.as13000_nas_port, req_params['method']), data=json.dumps( req_params['params']), headers=None) def test_send_api_token_overtime_fail(self): self.rest_api._token_pool = ['fake_token'] fake_value = {'method': 'fake_url', 'params': 'fake_params', 'reuest_type': 'post'} fake_out_put = {'message': 'Unauthorized access!', 'code': 301} mock_requests = self.mock_object( requests, 'post', mock.Mock( return_value=FakeResponse( 200, fake_out_put))) self.assertRaises(exception.NetworkException, self.rest_api.send_api, method='fake_url', params='fake_params', request_type='post') mock_requests.assert_called_once_with( 'http://%s:%s/rest/%s' % (test_config.as13000_nas_ip, test_config.as13000_nas_port, fake_value['method']), data=json.dumps('fake_params'), headers={ 'X-Auth-Token': 'fake_token'}) def test_send_api_fail(self): self.rest_api._token_pool = ['fake_token'] fake_output = {'code': 100, 'message': 'fake_message'} mock_request = self.mock_object( requests, 'post', mock.Mock( return_value=FakeResponse( 200, fake_output))) self.assertRaises( exception.ShareBackendException, self.rest_api.send_api, method='fake_method', params='fake_params', request_type='post') mock_request.assert_called_once_with( 'http://%s:%s/rest/%s' % (test_config.as13000_nas_ip, test_config.as13000_nas_port, 'fake_method'), data=json.dumps('fake_params'), headers={'X-Auth-Token': 'fake_token'} ) @ddt.ddt class AS13000ShareDriverTestCase(test.TestCase): def __init__(self, *args, **kwds): super(AS13000ShareDriverTestCase, self).__init__(*args, **kwds) self._ctxt = context.get_admin_context() self.configuration = FakeConfig() def setUp(self): self.mock_object(as13000_nas.CONF, '_check_required_opts') self.driver = as13000_nas.AS13000ShareDriver( configuration=self.configuration) super(AS13000ShareDriverTestCase, self).setUp() def test_do_setup(self): mock_login = self.mock_object( as13000_nas.RestAPIExecutor, 'logins', mock.Mock()) mock_vpe = self.mock_object( self.driver, '_validate_pools_exist', mock.Mock()) mock_gdd = self.mock_object( self.driver, '_get_directory_detail', mock.Mock( return_value='{}')) mock_gni = self.mock_object( self.driver, '_get_nodes_ips', mock.Mock( return_value=['fake_ips'])) self.driver.do_setup(self._ctxt) mock_login.assert_called_once() mock_vpe.assert_called_once() mock_gdd.assert_called_once_with( test_config.as13000_share_pools[0]) mock_gni.assert_called_once() def test_do_setup_login_fail(self): mock_login = self.mock_object( as13000_nas.RestAPIExecutor, 'logins', mock.Mock( side_effect=exception.ShareBackendException('fake_exception'))) self.assertRaises( exception.ShareBackendException, self.driver.do_setup, self._ctxt) mock_login.assert_called_once() def test_do_setup_vpe_failed(self): mock_login = self.mock_object(as13000_nas.RestAPIExecutor, 'logins', mock.Mock()) side_effect = exception.InvalidInput(reason='fake_exception') mock_vpe = self.mock_object(self.driver, '_validate_pools_exist', mock.Mock(side_effect=side_effect)) self.assertRaises(exception.InvalidInput, self.driver.do_setup, self._ctxt) mock_login.assert_called_once() mock_vpe.assert_called_once() def test_check_for_setup_error_base_dir_detail_failed(self): self.driver.base_dir_detail = None self.driver.ips = ['fake_ip'] self.assertRaises( exception.ShareBackendException, self.driver.check_for_setup_error) def test_check_for_setup_error_node_status_fail(self): self.driver.base_dir_detail = 'fakepool' self.driver.ips = [] self.assertRaises(exception.ShareBackendException, self.driver.check_for_setup_error) @ddt.data('nfs', 'cifs') def test_create_share(self, share_proto): share = fake_share.fake_share(share_proto=share_proto) share_instance = fake_share.fake_share_instance(share, host="H@B#P") mock_cd = self.mock_object(self.driver, '_create_directory', mock.Mock(return_value='/fake/path')) mock_cns = self.mock_object(self.driver, '_create_nfs_share') mock_ccs = self.mock_object(self.driver, '_create_cifs_share') mock_sdq = self.mock_object(self.driver, '_set_directory_quota') self.driver.ips = ['127.0.0.1'] locations = self.driver.create_share(self._ctxt, share_instance) if share_proto == 'nfs': expect_locations = [{'path': r'127.0.0.1:/fake/path'}] self.assertEqual(locations, expect_locations) else: expect_locations = [{'path': r'\\127.0.0.1\share_fakeinstanceid'}] self.assertEqual(locations, expect_locations) mock_cd.assert_called_once_with(share_name='share_fakeinstanceid', pool_name='P') if share_proto == 'nfs': mock_cns.assert_called_once_with(share_path='/fake/path') elif share['share_proto'] == 'cifs': mock_ccs.assert_called_once_with(share_path='/fake/path', share_name='share_fakeinstanceid') mock_sdq.assert_called_once_with('/fake/path', share['size']) @ddt.data('nfs', 'cifs') def test_create_share_from_snapshot(self, share_proto): share = fake_share.fake_share(share_proto=share_proto) share_instance = fake_share.fake_share_instance(share, host="H@B#P") mock_cd = self.mock_object(self.driver, '_create_directory', mock.Mock(return_value='/fake/path')) mock_cns = self.mock_object(self.driver, '_create_nfs_share') mock_ccs = self.mock_object(self.driver, '_create_cifs_share') mock_sdq = self.mock_object(self.driver, '_set_directory_quota') mock_cdtd = self.mock_object(self.driver, '_clone_directory_to_dest') self.driver.ips = ['127.0.0.1'] locations = self.driver.create_share_from_snapshot( self._ctxt, share_instance, None) if share_proto == 'nfs': expect_locations = [{'path': r'127.0.0.1:/fake/path'}] self.assertEqual(locations, expect_locations) else: expect_locations = [{'path': r'\\127.0.0.1\share_fakeinstanceid'}] self.assertEqual(locations, expect_locations) mock_cd.assert_called_once_with(share_name='share_fakeinstanceid', pool_name='P') if share_proto == 'nfs': mock_cns.assert_called_once_with(share_path='/fake/path') elif share['share_proto'] == 'cifs': mock_ccs.assert_called_once_with(share_path='/fake/path', share_name='share_fakeinstanceid') mock_sdq.assert_called_once_with('/fake/path', share['size']) mock_cdtd.assert_called_once_with(snapshot=None, dest_path='/fake/path') @ddt.data('nfs', 'cifs') def test_delete_share(self, share_proto): share = fake_share.fake_share(share_proto=share_proto) share_instance = fake_share.fake_share_instance(share, host="H@B#P") expect_share_path = r'/P/share_fakeinstanceid' mock_gns = self.mock_object(self.driver, '_get_nfs_share', mock.Mock(return_value=['fake_share'])) mock_dns = self.mock_object(self.driver, '_delete_nfs_share') mock_gcs = self.mock_object(self.driver, '_get_cifs_share', mock.Mock(return_value=['fake_share'])) mock_dcs = self.mock_object(self.driver, '_delete_cifs_share') mock_dd = self.mock_object(self.driver, '_delete_directory') self.driver.delete_share(self._ctxt, share_instance) if share_proto == 'nfs': mock_gns.assert_called_once_with(expect_share_path) mock_dns.assert_called_once_with(expect_share_path) else: mock_gcs.assert_called_once_with('share_fakeinstanceid') mock_dcs.assert_called_once_with('share_fakeinstanceid') mock_dd.assert_called_once_with(expect_share_path) @ddt.data('nfs', 'cifs') def test_delete_share_not_exist(self, share_proto): share = fake_share.fake_share(share_proto=share_proto) share_instance = fake_share.fake_share_instance(share, host="H@B#P") expect_share_path = r'/P/share_fakeinstanceid' mock_gns = self.mock_object(self.driver, '_get_nfs_share', mock.Mock(return_value=[])) mock_gcs = self.mock_object(self.driver, '_get_cifs_share', mock.Mock(return_value=[])) self.driver.delete_share(self._ctxt, share_instance) if share_proto == 'nfs': mock_gns.assert_called_once_with(expect_share_path) elif share_proto == 'cifs': mock_gcs.assert_called_once_with('share_fakeinstanceid') def test_extend_share(self): share = fake_share.fake_share() share_instance = fake_share.fake_share_instance(share, host="H@B#P") expect_share_path = r'/P/share_fakeinstanceid' mock_sdq = self.mock_object(self.driver, '_set_directory_quota') self.driver.extend_share(share_instance, 2) mock_sdq.assert_called_once_with(expect_share_path, 2) @ddt.data('nfs', 'cifs') def test_ensure_share(self, share_proto): share = fake_share.fake_share(share_proto=share_proto) share_instance = fake_share.fake_share_instance(share, host="H@B#P") mock_gns = self.mock_object(self.driver, '_get_nfs_share', mock.Mock(return_value=['fake_share'])) mock_gcs = self.mock_object(self.driver, '_get_cifs_share', mock.Mock(return_value=['fake_share'])) self.driver.ips = ['127.0.0.1'] locations = self.driver.ensure_share(self._ctxt, share_instance) if share_proto == 'nfs': expect_locations = [{'path': r'127.0.0.1:/P/share_fakeinstanceid'}] self.assertEqual(locations, expect_locations) mock_gns.assert_called_once_with(r'/P/share_fakeinstanceid') else: expect_locations = [{'path': r'\\127.0.0.1\share_fakeinstanceid'}] self.assertEqual(locations, expect_locations) mock_gcs.assert_called_once_with(r'share_fakeinstanceid') def test_ensure_share_fail_1(self): share = fake_share.fake_share() share_instance = fake_share.fake_share_instance(share, host="H@B#P") self.assertRaises(exception.InvalidInput, self.driver.ensure_share, self._ctxt, share_instance) @ddt.data('nfs', 'cifs') def test_ensure_share_None_share_fail(self, share_proto): share = fake_share.fake_share(share_proto=share_proto) share_instance = fake_share.fake_share_instance(share, host="H@B#P") mock_gns = self.mock_object(self.driver, '_get_nfs_share', mock.Mock(return_value=[])) mock_gcs = self.mock_object(self.driver, '_get_cifs_share', mock.Mock(return_value=[])) self.assertRaises(exception.ShareResourceNotFound, self.driver.ensure_share, self._ctxt, share_instance) if share_proto == 'nfs': mock_gns.assert_called_once_with(r'/P/share_fakeinstanceid') elif share['share_proto'] == 'cifs': mock_gcs.assert_called_once_with(r'share_fakeinstanceid') def test_create_snapshot(self): share = fake_share.fake_share() share_instance = fake_share.fake_share_instance(share, host="H@B#P") snapshot_instance_pseudo = { 'share': share_instance, 'id': 'fakesnapid' } mock_rest = self.mock_object(as13000_nas.RestAPIExecutor, 'send_rest_api') self.driver.create_snapshot(self._ctxt, snapshot_instance_pseudo) method = 'snapshot/directory' request_type = 'post' params = {'path': r'/P/share_fakeinstanceid', 'snapName': 'snap_fakesnapid'} mock_rest.assert_called_once_with(method=method, request_type=request_type, params=params) def test_delete_snapshot_normal(self): share = fake_share.fake_share() share_instance = fake_share.fake_share_instance(share, host="H@B#P") snapshot_instance_pseudo = { 'share': share_instance, 'id': 'fakesnapid' } mock_gsfs = self.mock_object(self.driver, '_get_snapshots_from_share', mock.Mock(return_value=['fakesnapshot'])) mock_rest = self.mock_object(as13000_nas.RestAPIExecutor, 'send_rest_api') self.driver.delete_snapshot(self._ctxt, snapshot_instance_pseudo) mock_gsfs.assert_called_once_with('/P/share_fakeinstanceid') method = ('snapshot/directory?' 'path=/P/share_fakeinstanceid&snapName=snap_fakesnapid') request_type = 'delete' mock_rest.assert_called_once_with(method=method, request_type=request_type) def test_delete_snapshot_not_exist(self): share = fake_share.fake_share() share_instance = fake_share.fake_share_instance(share, host="H@B#P") snapshot_instance_pseudo = { 'share': share_instance, 'snapshot_id': 'fakesnapid' } mock_gsfs = self.mock_object(self.driver, '_get_snapshots_from_share', mock.Mock(return_value=[])) mock_rest = self.mock_object(as13000_nas.RestAPIExecutor, 'send_rest_api') self.driver.delete_snapshot(self._ctxt, snapshot_instance_pseudo) mock_gsfs.assert_called_once_with('/P/share_fakeinstanceid') mock_rest.assert_not_called() @ddt.data('nfs', 'icfs', 'cifs') def test_transfer_rule_to_client(self, proto): rule = {'access_to': '1.1.1.1', 'access_level': 'rw'} result = self.driver.transfer_rule_to_client(proto, rule) client = {'name': '1.1.1.1', 'authority': 'rwx' if proto == 'cifs' else 'rw'} if proto == 'nfs': client.update({'type': 0}) else: client.update({'type': 1}) self.assertEqual(client, result) @ddt.data({'share_proto': 'nfs', 'use_access': True}, {'share_proto': 'nfs', 'use_access': False}, {'share_proto': 'cifs', 'use_access': True}, {'share_proto': 'cifs', 'use_access': False}) @ddt.unpack def test_update_access(self, share_proto, use_access): share = fake_share.fake_share(share_proto=share_proto) share_instance = fake_share.fake_share_instance(share, host="H@B#P") access_rules = [{'access_to': 'fakename1', 'access_level': 'fakelevel1'}, {'access_to': 'fakename2', 'access_level': 'fakelevel2'}] add_rules = [{'access_to': 'fakename1', 'access_level': 'fakelevel1'}] del_rules = [{'access_to': 'fakename2', 'access_level': 'fakelevel2'}] mock_ca = self.mock_object(self.driver, '_clear_access') fake_share_backend = {'pathAuthority': 'fakepathAuthority'} mock_gns = self.mock_object(self.driver, '_get_nfs_share', mock.Mock(return_value=fake_share_backend)) mock_rest = self.mock_object(as13000_nas.RestAPIExecutor, 'send_rest_api') if use_access: self.driver.update_access(self._ctxt, share_instance, access_rules, [], [], []) else: self.driver.update_access(self._ctxt, share_instance, [], add_rules, del_rules, []) access_clients = [{'name': rule['access_to'], 'type': 0 if share_proto == 'nfs' else 1, 'authority': rule['access_level'] } for rule in access_rules] add_clients = [{'name': rule['access_to'], 'type': 0 if share_proto == 'nfs' else 1, 'authority': rule['access_level'] } for rule in add_rules] del_clients = [{'name': rule['access_to'], 'type': 0 if share_proto == 'nfs' else 1, 'authority': rule['access_level'] } for rule in del_rules] params = { 'path': r'/P/share_fakeinstanceid', 'addedClientList': [], 'deletedClientList': [], 'editedClientList': [] } if share_proto == 'nfs': mock_gns.assert_called_once_with(r'/P/share_fakeinstanceid') params['pathAuthority'] = fake_share_backend['pathAuthority'] else: params['name'] = 'share_fakeinstanceid' if use_access: mock_ca.assert_called_once_with(share_instance) params['addedClientList'] = access_clients else: params['addedClientList'] = add_clients params['deletedClientList'] = del_clients mock_rest.assert_called_once_with( method=('file/share/%s' % share_proto), params=params, request_type='put') def test__update_share_stats(self): mock_sg = self.mock_object(FakeConfig, 'safe_get', mock.Mock(return_value='fake_as13000')) self.driver.pools = ['fake_pool'] mock_gps = self.mock_object(self.driver, '_get_pool_stats', mock.Mock(return_value='fake_pool')) self.driver._token_time = time.time() mock_rt = self.mock_object(as13000_nas.RestAPIExecutor, 'refresh_token') mock_uss = self.mock_object(driver.ShareDriver, '_update_share_stats') self.driver._update_share_stats() data = {} data['vendor_name'] = self.driver.VENDOR data['driver_version'] = self.driver.VERSION data['storage_protocol'] = self.driver.PROTOCOL data['share_backend_name'] = 'fake_as13000' data['snapshot_support'] = True data['create_share_from_snapshot_support'] = True data['pools'] = ['fake_pool'] mock_sg.assert_called_once_with('share_backend_name') mock_gps.assert_called_once_with('fake_pool') mock_rt.assert_not_called() mock_uss.assert_called_once_with(data) def test__update_share_stats_refresh_token(self): mock_sg = self.mock_object(FakeConfig, 'safe_get', mock.Mock(return_value='fake_as13000')) self.driver.pools = ['fake_pool'] mock_gps = self.mock_object(self.driver, '_get_pool_stats', mock.Mock(return_value='fake_pool')) self.driver._token_time = ( time.time() - self.driver.token_available_time - 1) mock_rt = self.mock_object(as13000_nas.RestAPIExecutor, 'refresh_token') mock_uss = self.mock_object(driver.ShareDriver, '_update_share_stats') self.driver._update_share_stats() data = {} data['vendor_name'] = self.driver.VENDOR data['driver_version'] = self.driver.VERSION data['storage_protocol'] = self.driver.PROTOCOL data['share_backend_name'] = 'fake_as13000' data['snapshot_support'] = True data['create_share_from_snapshot_support'] = True data['pools'] = ['fake_pool'] mock_sg.assert_called_once_with('share_backend_name') mock_gps.assert_called_once_with('fake_pool') mock_rt.assert_called_once() mock_uss.assert_called_once_with(data) @ddt.data('nfs', 'cifs') def test__clear_access(self, share_proto): share = fake_share.fake_share(share_proto=share_proto) share_instance = fake_share.fake_share_instance(share, host="H@B#P") fake_share_backend = {'pathAuthority': 'fakepathAuthority', 'clientList': ['fakeclient'], 'userList': ['fakeuser']} mock_gns = self.mock_object(self.driver, '_get_nfs_share', mock.Mock(return_value=fake_share_backend)) mock_gcs = self.mock_object(self.driver, '_get_cifs_share', mock.Mock(return_value=fake_share_backend)) mock_rest = self.mock_object(as13000_nas.RestAPIExecutor, 'send_rest_api') self.driver._clear_access(share_instance) method = 'file/share/%s' % share_proto request_type = 'put' params = { 'path': r'/P/share_fakeinstanceid', 'addedClientList': [], 'deletedClientList': [], 'editedClientList': [] } if share_proto == 'nfs': mock_gns.assert_called_once_with(r'/P/share_fakeinstanceid') params['deletedClientList'] = fake_share_backend['clientList'] params['pathAuthority'] = fake_share_backend['pathAuthority'] else: mock_gcs.assert_called_once_with('share_fakeinstanceid') params['deletedClientList'] = fake_share_backend['userList'] params['name'] = 'share_fakeinstanceid' mock_rest.assert_called_once_with(method=method, request_type=request_type, params=params) def test__validate_pools_exist(self): self.driver.pools = ['fakepool'] mock_gdl = self.mock_object(self.driver, '_get_directory_list', mock.Mock(return_value=['fakepool'])) self.driver._validate_pools_exist() mock_gdl.assert_called_once_with('/') def test__validate_pools_exist_fail(self): self.driver.pools = ['fakepool_fail'] mock_gdl = self.mock_object(self.driver, '_get_directory_list', mock.Mock(return_value=['fakepool'])) self.assertRaises(exception.InvalidInput, self.driver._validate_pools_exist) mock_gdl.assert_called_once_with('/') @ddt.data(0, 1) def test__get_directory_quota(self, hardunit): fake_data = {'hardthreshold': 200, 'hardunit': hardunit, 'capacity': '50GB'} mock_rest = self.mock_object(as13000_nas.RestAPIExecutor, 'send_rest_api', mock.Mock(return_value=fake_data)) total, used = (self.driver._get_directory_quota('fakepath')) if hardunit == 0: self.assertEqual((200, 50), (total, used)) else: self.assertEqual((200 * 1024, 50), (total, used)) method = 'file/quota/directory?path=/fakepath' request_type = 'get' mock_rest.assert_called_once_with(method=method, request_type=request_type) def test__get_directory_quota_fail(self): fake_data = {'hardthreshold': None, 'hardunit': 0, 'capacity': '50GB'} mock_rest = self.mock_object(as13000_nas.RestAPIExecutor, 'send_rest_api', mock.Mock(return_value=fake_data)) self.assertRaises(exception.ShareBackendException, self.driver._get_directory_quota, 'fakepath') method = 'file/quota/directory?path=/fakepath' request_type = 'get' mock_rest.assert_called_once_with(method=method, request_type=request_type) def test__get_pool_stats(self): mock_gdq = self.mock_object(self.driver, '_get_directory_quota', mock.Mock(return_value=(200, 50))) pool = dict() pool['pool_name'] = 'fakepath' pool['reserved_percentage'] = 0 pool['reserved_snapshot_percentage'] = 0 pool['reserved_share_extend_percentage'] = 0 pool['max_over_subscription_ratio'] = 20.0 pool['dedupe'] = False pool['compression'] = False pool['qos'] = False pool['thin_provisioning'] = True pool['total_capacity_gb'] = 200 pool['free_capacity_gb'] = 150 pool['allocated_capacity_gb'] = 50 pool['snapshot_support'] = True pool['create_share_from_snapshot_support'] = True result = self.driver._get_pool_stats('fakepath') self.assertEqual(pool, result) mock_gdq.assert_called_once_with('fakepath') def test__get_directory_list(self): fake_dir_list = [{'name': 'fakedirectory1', 'size': 20}, {'name': 'fakedirectory2', 'size': 30}] mock_rest = self.mock_object(as13000_nas.RestAPIExecutor, 'send_rest_api', mock.Mock(return_value=fake_dir_list)) expected = ['fakedirectory1', 'fakedirectory2'] result = self.driver._get_directory_list('/fakepath') self.assertEqual(expected, result) method = 'file/directory?path=/fakepath' mock_rest.assert_called_once_with(method=method, request_type='get') def test__create_directory(self): base_dir_detail = { 'path': '/fakepath', 'authorityInfo': {'user': 'root', 'group': 'root', 'authority': 'rwxrwxrwx' }, 'dataProtection': {'type': 0, 'dc': 2, 'cc': 1, 'rn': 0, 'st': 4}, 'poolName': 'storage_pool' } mock_rest = self.mock_object(as13000_nas.RestAPIExecutor, 'send_rest_api') self.driver.base_dir_detail = base_dir_detail result = self.driver._create_directory('fakename', 'fakepool') self.assertEqual('/fakepool/fakename', result) method = 'file/directory' request_type = 'post' params = {'name': 'fakename', 'parentPath': base_dir_detail['path'], 'authorityInfo': base_dir_detail['authorityInfo'], 'dataProtection': base_dir_detail['dataProtection'], 'poolName': base_dir_detail['poolName']} mock_rest.assert_called_once_with(method=method, request_type=request_type, params=params) def test__delete_directory(self): mock_rest = self.mock_object(as13000_nas.RestAPIExecutor, 'send_rest_api') self.driver._delete_directory('/fakepath') method = 'file/directory?path=/fakepath' request_type = 'delete' mock_rest.assert_called_once_with(method=method, request_type=request_type) def test__set_directory_quota(self): mock_rest = self.mock_object(as13000_nas.RestAPIExecutor, 'send_rest_api') self.driver._set_directory_quota('fakepath', 200) method = 'file/quota/directory' request_type = 'put' params = {'path': 'fakepath', 'hardthreshold': 200, 'hardunit': 2} mock_rest.assert_called_once_with(method=method, request_type=request_type, params=params) def test__create_nfs_share(self): mock_rest = self.mock_object(as13000_nas.RestAPIExecutor, 'send_rest_api') self.driver._create_nfs_share('fakepath') method = 'file/share/nfs' request_type = 'post' params = {'path': 'fakepath', 'pathAuthority': 'rw', 'client': []} mock_rest.assert_called_once_with(method=method, request_type=request_type, params=params) def test__delete_nfs_share(self): mock_rest = self.mock_object(as13000_nas.RestAPIExecutor, 'send_rest_api') self.driver._delete_nfs_share('/fakepath') method = 'file/share/nfs?path=/fakepath' request_type = 'delete' mock_rest.assert_called_once_with(method=method, request_type=request_type) def test__get_nfs_share(self): mock_rest = self.mock_object(as13000_nas.RestAPIExecutor, 'send_rest_api', mock.Mock(return_value='fakebackend')) result = self.driver._get_nfs_share('/fakepath') self.assertEqual('fakebackend', result) method = 'file/share/nfs?path=/fakepath' request_type = 'get' mock_rest.assert_called_once_with(method=method, request_type=request_type) def test__create_cifs_share(self): mock_rest = self.mock_object(as13000_nas.RestAPIExecutor, 'send_rest_api') self.driver._create_cifs_share('fakename', 'fakepath') method = 'file/share/cifs' request_type = 'post' params = {'path': 'fakepath', 'name': 'fakename', 'userlist': []} mock_rest.assert_called_once_with(method=method, request_type=request_type, params=params) def test__delete_cifs_share(self): mock_rest = self.mock_object(as13000_nas.RestAPIExecutor, 'send_rest_api') self.driver._delete_cifs_share('fakename') method = 'file/share/cifs?name=fakename' request_type = 'delete' mock_rest.assert_called_once_with(method=method, request_type=request_type) def test__get_cifs_share(self): mock_rest = self.mock_object(as13000_nas.RestAPIExecutor, 'send_rest_api', mock.Mock(return_value='fakebackend')) result = self.driver._get_cifs_share('fakename') self.assertEqual('fakebackend', result) method = 'file/share/cifs?name=fakename' request_type = 'get' mock_rest.assert_called_once_with(method=method, request_type=request_type) def test__clone_directory_to_dest(self): share = fake_share.fake_share() share_instance = fake_share.fake_share_instance(share, host="H@B#P") snapshot_instance_pseudo = { 'id': 'fakesnapid', 'share_instance': share_instance } mock_rest = self.mock_object(as13000_nas.RestAPIExecutor, 'send_rest_api') self.driver._clone_directory_to_dest(snapshot_instance_pseudo, 'fakepath') method = 'snapshot/directory/clone' request_type = 'post' params = {'path': '/P/share_fakeinstanceid', 'snapName': 'snap_fakesnapid', 'destPath': 'fakepath'} mock_rest.assert_called_once_with(method=method, request_type=request_type, params=params) def test__get_snapshots_from_share(self): mock_rest = self.mock_object(as13000_nas.RestAPIExecutor, 'send_rest_api', mock.Mock(return_value=['fakesnap'])) result = self.driver._get_snapshots_from_share('/fakepath') self.assertEqual(['fakesnap'], result) method = 'snapshot/directory?path=/fakepath' request_type = 'get' mock_rest.assert_called_once_with(method=method, request_type=request_type) @ddt.data('nfs', 'cifs') def test__get_location_path(self, proto): self.driver.ips = ['ip1', 'ip2'] result = self.driver._get_location_path('fake_name', '/fake/path', proto) if proto == 'nfs': expect = [{'path': 'ip1:/fake/path'}, {'path': 'ip2:/fake/path'}] else: expect = [{'path': r'\\ip1\fake_name'}, {'path': r'\\ip2\fake_name'}] self.assertEqual(expect, result) def test__get_nodes_virtual_ips(self): ctdb_set = { 'virtualIpList': [{'ip': 'fakeip1/24'}, {'ip': 'fakeip2/24'}] } mock_rest = self.mock_object(as13000_nas.RestAPIExecutor, 'send_rest_api', mock.Mock(return_value=ctdb_set)) result = self.driver._get_nodes_virtual_ips() self.assertEqual(result, ['fakeip1', 'fakeip2']) mock_rest.assert_called_once_with(method='ctdb/set', request_type='get') def test__get_nodes_physical_ips(self): nodes = [{'nodeIp': 'fakeip1', 'runningStatus': 1, 'healthStatus': 1}, {'nodeIp': 'fakeip2', 'runningStatus': 1, 'healthStatus': 0}, {'nodeIp': 'fakeip3', 'runningStatus': 0, 'healthStatus': 1}, {'nodeIp': 'fakeip4', 'runningStatus': 0, 'healthStatus': 0}] mock_rest = self.mock_object(as13000_nas.RestAPIExecutor, 'send_rest_api', mock.Mock(return_value=nodes)) result = self.driver._get_nodes_physical_ips() expect = ['fakeip1'] self.assertEqual(expect, result) mock_rest.assert_called_once_with(method='cluster/node/cache', request_type='get') def test__get_nodes_ips(self): mock_virtual = self.mock_object(self.driver, '_get_nodes_virtual_ips', mock.Mock(return_value=['ip1'])) mock_physical = self.mock_object(self.driver, '_get_nodes_physical_ips', mock.Mock(return_value=['ip2'])) result = self.driver._get_nodes_ips() self.assertEqual(['ip1', 'ip2'], result) mock_virtual.assert_called_once() mock_physical.assert_called_once() @ddt.data('nfs', 'cifs') def test__get_share_instance_pnsp(self, share_proto): share = fake_share.fake_share(share_proto=share_proto) share_instance = fake_share.fake_share_instance(share, host="H@B#P") result = self.driver._get_share_instance_pnsp(share_instance) self.assertEqual(('P', 'share_fakeinstanceid', 1, share_proto), result) @ddt.data('5000000000', '5000000k', '5000mb', '50G', '5TB') def test__unit_convert(self, capacity): trans = {'5000000000': '%.0f' % (float(5000000000) / 1024 ** 3), '5000000k': '%.0f' % (float(5000000) / 1024 ** 2), '5000mb': '%.0f' % (float(5000) / 1024), '50G': '%.0f' % float(50), '5TB': '%.0f' % (float(5) * 1024)} expect = float(trans[capacity]) result = self.driver._unit_convert(capacity) self.assertEqual(expect, result) def test__format_name(self): a = 'atest-1234567890-1234567890-1234567890' expect = 'atest_1234567890_1234567890_1234' result = self.driver._format_name(a) self.assertEqual(expect, result) def test__generate_share_name(self): share = fake_share.fake_share() share_instance = fake_share.fake_share_instance(share, host="H@B#P") result = self.driver._generate_share_name(share_instance) self.assertEqual('share_fakeinstanceid', result) def test__generate_snapshot_name(self): snapshot_instance_pesudo = {'id': 'fakesnapinstanceid'} result = self.driver._generate_snapshot_name(snapshot_instance_pesudo) self.assertEqual('snap_fakesnapinstanceid', result) def test__generate_share_path(self): result = self.driver._generate_share_path('fakepool', 'fakename') self.assertEqual('/fakepool/fakename', result) def test__get_directory_detail(self): details = [{'poolName': 'fakepool1'}, {'poolName': 'fakepool2'}] mock_rest = self.mock_object(as13000_nas.RestAPIExecutor, 'send_rest_api', mock.Mock(return_value=details)) result = self.driver._get_directory_detail('fakepath') self.assertEqual(details[0], result) method = 'file/directory/detail?path=/fakepath' request_type = 'get' mock_rest.assert_called_once_with(method=method, request_type=request_type) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315602.0256703 manila-21.0.0/manila/tests/share/drivers/inspur/instorage/0000775000175000017500000000000000000000000023554 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/inspur/instorage/__init__.py0000664000175000017500000000000000000000000025653 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/inspur/instorage/test_instorage.py0000664000175000017500000014570000000000000027167 0ustar00zuulzuul00000000000000# Copyright 2019 Inspur Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Share driver test for Inspur InStorage """ from unittest import mock import ddt from eventlet import greenthread from oslo_concurrency import processutils from oslo_config import cfg import paramiko from manila import context from manila import exception from manila.share import driver from manila.share.drivers.inspur.instorage import cli_helper from manila.share.drivers.inspur.instorage import instorage from manila import ssh_utils from manila import test from manila.tests import fake_share from manila import utils as manila_utils CONF = cfg.CONF class FakeConfig(object): def __init__(self, *args, **kwargs): self.driver_handles_share_servers = False self.share_driver = 'fake_share_driver_name' self.share_backend_name = 'fake_instorage' self.instorage_nas_ip = kwargs.get( 'instorage_nas_ip', 'some_ip') self.instorage_nas_port = kwargs.get( 'instorage_nas_port', 'some_port') self.instorage_nas_login = kwargs.get( 'instorage_nas_login', 'username') self.instorage_nas_password = kwargs.get( 'instorage_nas_password', 'password') self.instorage_nas_pools = kwargs.get( 'instorage_nas_pools', ['fakepool']) self.network_config_group = kwargs.get( "network_config_group", "fake_network_config_group") self.admin_network_config_group = kwargs.get( "admin_network_config_group", "fake_admin_network_config_group") self.config_group = kwargs.get("config_group", "fake_config_group") self.reserved_share_percentage = kwargs.get( "reserved_share_percentage", 0) self.reserved_share_from_snapshot_percentage = kwargs.get( "reserved_share_from_snapshot_percentage", 0) self.reserved_share_extend_percentage = kwargs.get( "reserved_share_extend_percentage", 0) self.max_over_subscription_ratio = kwargs.get( "max_over_subscription_ratio", 0) self.filter_function = kwargs.get("filter_function", None) self.goodness_function = kwargs.get("goodness_function", None) def safe_get(self, key): return getattr(self, key) def append_config_values(self, *args, **kwargs): pass @ddt.ddt class InStorageShareDriverTestCase(test.TestCase): def __init__(self, *args, **kwargs): super(InStorageShareDriverTestCase, self).__init__(*args, **kwargs) self._ctxt = context.get_admin_context() self.configuration = FakeConfig() self.share = fake_share.fake_share() self.share_instance = fake_share.fake_share_instance( self.share, host='H@B#P' ) def setUp(self): self.mock_object(instorage.CONF, '_check_required_opts') self.driver = instorage.InStorageShareDriver( configuration=self.configuration ) super(InStorageShareDriverTestCase, self).setUp() def test_check_for_setup_error_failed_no_nodes(self): mock_gni = mock.Mock(return_value={}) self.mock_object( instorage.InStorageAssistant, 'get_nodes_info', mock_gni ) self.assertRaises( exception.ShareBackendException, self.driver.check_for_setup_error ) def test_check_for_setup_error_failed_pool_invalid(self): mock_gni = mock.Mock(return_value={'node1': {}}) self.mock_object( instorage.InStorageAssistant, 'get_nodes_info', mock_gni ) mock_gap = mock.Mock(return_value=['pool0']) self.mock_object( instorage.InStorageAssistant, 'get_available_pools', mock_gap ) self.assertRaises( exception.InvalidParameterValue, self.driver.check_for_setup_error ) def test_check_for_setup_error_success(self): mock_gni = mock.Mock(return_value={'node1': {}}) self.mock_object( instorage.InStorageAssistant, 'get_nodes_info', mock_gni ) mock_gap = mock.Mock(return_value=['fakepool', 'pool0']) self.mock_object( instorage.InStorageAssistant, 'get_available_pools', mock_gap ) self.driver.check_for_setup_error() mock_gni.assert_called_once() mock_gap.assert_called_once() def test__update_share_stats(self): pool_attr = { 'pool0': { 'pool_name': 'pool0', 'total_capacity_gb': 110, 'free_capacity_gb': 100, 'allocated_capacity_gb': 10, 'reserved_percentage': 0, 'reserved_snapshot_percentage': 0, 'reserved_share_extend_percentage': 0, 'qos': False, 'dedupe': False, 'compression': False, 'thin_provisioning': False, 'max_over_subscription_ratio': 0 } } mock_gpa = mock.Mock(return_value=pool_attr) self.mock_object( instorage.InStorageAssistant, 'get_pools_attr', mock_gpa ) mock_uss = mock.Mock() self.mock_object(driver.ShareDriver, '_update_share_stats', mock_uss) self.driver._update_share_stats() mock_gpa.assert_called_once_with(['fakepool']) stats = { 'share_backend_name': 'fake_instorage', 'vendor_name': 'INSPUR', 'driver_version': '1.0.0', 'storage_protocol': 'NFS_CIFS', 'reserved_percentage': 0, 'reserved_snapshot_percentage': 0, 'reserved_share_extend_percentage': 0, 'max_over_subscription_ratio': 0, 'snapshot_support': False, 'create_share_from_snapshot_support': False, 'revert_to_snapshot_support': False, 'qos': False, 'total_capacity_gb': 110, 'free_capacity_gb': 100, 'pools': [pool_attr['pool0']] } mock_uss.assert_called_once_with(stats) @ddt.data( {'id': 'abc-123', 'real': 'abc123'}, {'id': '123-abc', 'real': 'B23abc'}) @ddt.unpack def test_generate_share_name(self, id, real): ret = self.driver.generate_share_name({'id': id}) self.assertEqual(real, ret) def test_get_network_allocations_number(self): ret = self.driver.get_network_allocations_number() self.assertEqual(0, ret) def test_create_share(self): mock_cs = self.mock_object( instorage.InStorageAssistant, 'create_share' ) mock_gel = self.mock_object( instorage.InStorageAssistant, 'get_export_locations', mock.Mock(return_value=['fake_export_location']) ) ret = self.driver.create_share(self._ctxt, self.share_instance) self.assertEqual(['fake_export_location'], ret) mock_cs.assert_called_once_with('fakeinstanceid', 'P', 1, 'fake_proto') mock_gel.assert_called_once_with('fakeinstanceid', 'fake_proto') def test_delete_share(self): mock_ds = self.mock_object( instorage.InStorageAssistant, 'delete_share' ) self.driver.delete_share(self._ctxt, self.share_instance) mock_ds.assert_called_once_with('fakeinstanceid', 'fake_proto') def test_extend_share(self): mock_es = self.mock_object( instorage.InStorageAssistant, 'extend_share' ) self.driver.extend_share(self.share_instance, 3) mock_es.assert_called_once_with('fakeinstanceid', 3) def test_ensure_share(self): mock_gel = self.mock_object( instorage.InStorageAssistant, 'get_export_locations', mock.Mock(return_value=['fake_export_location']) ) ret = self.driver.ensure_share(self._ctxt, self.share_instance) self.assertEqual(['fake_export_location'], ret) mock_gel.assert_called_once_with('fakeinstanceid', 'fake_proto') def test_update_access(self): mock_ua = self.mock_object( instorage.InStorageAssistant, 'update_access' ) self.driver.update_access( self._ctxt, self.share_instance, [], [], [], []) mock_ua.assert_called_once_with( 'fakeinstanceid', 'fake_proto', [], [], [] ) class FakeSSH(object): def __enter__(self): return self def __exit__(self, exec_type, exec_val, exec_tb): if exec_val: raise class FakeSSHPool(object): def __init__(self, ssh): self.fakessh = ssh def item(self): return self.fakessh class SSHRunnerTestCase(test.TestCase): def setUp(self): self.fakessh = FakeSSH() self.fakePool = FakeSSHPool(self.fakessh) super(SSHRunnerTestCase, self).setUp() def test___call___success(self): mock_csi = self.mock_object(manila_utils, 'check_ssh_injection') mock_sshpool = mock.Mock(return_value=self.fakePool) self.mock_object(ssh_utils, 'SSHPool', mock_sshpool) mock_se = mock.Mock(return_value='fake_value') self.mock_object(cli_helper.SSHRunner, '_ssh_execute', mock_se) runner = cli_helper.SSHRunner( '127.0.0.1', '22', 'fakeuser', 'fakepassword' ) ret = runner(['mcsinq', 'lsvdisk']) mock_csi.assert_called_once_with(['mcsinq', 'lsvdisk']) mock_sshpool.assert_called_once_with( '127.0.0.1', '22', 60, 'fakeuser', password='fakepassword', privatekey=None, min_size=1, max_size=10 ) mock_se.assert_called_once_with( self.fakePool, 'mcsinq lsvdisk', True, 1 ) self.assertEqual('fake_value', ret) def test___call___ssh_pool_failed(self): mock_csi = self.mock_object(manila_utils, 'check_ssh_injection') mock_sshpool = mock.Mock(side_effect=paramiko.SSHException()) self.mock_object(ssh_utils, 'SSHPool', mock_sshpool) runner = cli_helper.SSHRunner( '127.0.0.1', '22', 'fakeuser', 'fakepassword' ) self.assertRaises(paramiko.SSHException, runner, ['mcsinq', 'lsvdisk']) mock_csi.assert_called_once_with(['mcsinq', 'lsvdisk']) def test___call___ssh_exec_failed(self): mock_csi = self.mock_object(manila_utils, 'check_ssh_injection') mock_sshpool = mock.Mock(return_value=self.fakePool) self.mock_object(ssh_utils, 'SSHPool', mock_sshpool) exception = processutils.ProcessExecutionError() mock_se = mock.Mock(side_effect=exception) self.mock_object(cli_helper.SSHRunner, '_ssh_execute', mock_se) runner = cli_helper.SSHRunner( '127.0.0.1', '22', 'fakeuser', 'fakepassword' ) self.assertRaises( processutils.ProcessExecutionError, runner, ['mcsinq', 'lsvdisk'] ) mock_csi.assert_called_once_with(['mcsinq', 'lsvdisk']) mock_sshpool.assert_called_once_with( '127.0.0.1', '22', 60, 'fakeuser', password='fakepassword', privatekey=None, min_size=1, max_size=10 ) def test__ssh_execute_success(self): mock_se = mock.Mock(return_value='fake_value') self.mock_object(processutils, 'ssh_execute', mock_se) runner = cli_helper.SSHRunner( '127.0.0.1', '22', 'fakeuser', 'fakepassword' ) ret = runner._ssh_execute(self.fakePool, 'mcsinq lsvdisk') mock_se.assert_called_once_with( self.fakessh, 'mcsinq lsvdisk', check_exit_code=True ) self.assertEqual('fake_value', ret) def test__ssh_execute_success_run_again(self): mock_se = mock.Mock(side_effect=[Exception(), 'fake_value']) self.mock_object(processutils, 'ssh_execute', mock_se) mock_sleep = self.mock_object(greenthread, 'sleep') runner = cli_helper.SSHRunner( '127.0.0.1', '22', 'fakeuser', 'fakepassword' ) ret = runner._ssh_execute( self.fakePool, 'mcsinq lsvdisk', check_exit_code=True, attempts=2 ) call = mock.call(self.fakessh, 'mcsinq lsvdisk', check_exit_code=True) mock_se.assert_has_calls([call, call]) mock_sleep.assert_called_once() self.assertEqual('fake_value', ret) def test__ssh_execute_failed_exec_failed(self): exception = Exception() exception.exit_code = '1' exception.stdout = 'fake_stdout' exception.stderr = 'fake_stderr' exception.cmd = 'fake_cmd_list' mock_se = mock.Mock(side_effect=exception) self.mock_object(processutils, 'ssh_execute', mock_se) mock_sleep = self.mock_object(greenthread, 'sleep') runner = cli_helper.SSHRunner( '127.0.0.1', '22', 'fakeuser', 'fakepassword' ) self.assertRaises( processutils.ProcessExecutionError, runner._ssh_execute, self.fakePool, 'mcsinq lsvdisk', check_exit_code=True, attempts=1 ) mock_se.assert_called_once_with( self.fakessh, 'mcsinq lsvdisk', check_exit_code=True ) mock_sleep.assert_called_once() def test__ssh_execute_failed_exec_failed_exception_error(self): mock_se = mock.Mock(side_effect=Exception()) self.mock_object(processutils, 'ssh_execute', mock_se) mock_sleep = self.mock_object(greenthread, 'sleep') runner = cli_helper.SSHRunner( '127.0.0.1', '22', 'fakeuser', 'fakepassword' ) self.assertRaises( processutils.ProcessExecutionError, runner._ssh_execute, self.fakePool, 'mcsinq lsvdisk', check_exit_code=True, attempts=1 ) mock_se.assert_called_once_with( self.fakessh, 'mcsinq lsvdisk', check_exit_code=True ) mock_sleep.assert_called_once() class CLIParserTestCase(test.TestCase): def test_cliparser_with_header(self): cmdlist = ['mcsinq', 'lsnasportip', '-delim', '!'] response = [ 'head1!head2', 'r1c1!r1c2', 'r2c1!r2c2' ] response = '\n'.join(response) ret = cli_helper.CLIParser( response, cmdlist, delim='!', with_header=True ) self.assertEqual(2, len(ret)) self.assertEqual('r1c1', ret[0]['head1']) self.assertEqual('r1c2', ret[0]['head2']) self.assertEqual('r2c1', ret[1]['head1']) self.assertEqual('r2c2', ret[1]['head2']) value = [(v['head1'], v['head2']) for v in ret] self.assertEqual([('r1c1', 'r1c2'), ('r2c1', 'r2c2')], value) def test_cliparser_without_header(self): cmdlist = ['mcsinq', 'lsnasportip', '-delim', '!'] response = [ 'head1!p1v1', 'head2!p1v2', '', 'head1!p2v1', 'head2!p2v2' ] response = '\n'.join(response) ret = cli_helper.CLIParser( response, cmdlist, delim='!', with_header=False ) self.assertEqual(2, len(ret)) self.assertEqual('p1v1', ret[0]['head1']) self.assertEqual('p1v2', ret[0]['head2']) self.assertEqual('p2v1', ret[1]['head1']) self.assertEqual('p2v2', ret[1]['head2']) @ddt.ddt class InStorageSSHTestCase(test.TestCase): def setUp(self): self.sshMock = mock.Mock() self.ssh = cli_helper.InStorageSSH(self.sshMock) super(InStorageSSHTestCase, self).setUp() def tearDown(self): super(InStorageSSHTestCase, self).tearDown() @ddt.data(None, 'node1') def test_lsnode(self, node_id): if node_id: cmd = ['mcsinq', 'lsnode', '-delim', '!', node_id] response = [ 'id!1', 'name!node1' ] else: cmd = ['mcsinq', 'lsnode', '-delim', '!'] response = [ 'id!name', '1!node1', '2!node2' ] response = '\n'.join(response) self.sshMock.return_value = (response, '') ret = self.ssh.lsnode(node_id) if node_id: self.sshMock.assert_called_once_with(cmd) self.assertEqual('node1', ret[0]['name']) else: self.sshMock.assert_called_once_with(cmd) self.assertEqual('node1', ret[0]['name']) self.assertEqual('node2', ret[1]['name']) @ddt.data(None, 'Pool0') def test_lsnaspool(self, pool_id): response = [ 'pool_name!available_capacity', 'Pool0!2GB' ] if pool_id is None: response.append('Pool1!3GB') response = '\n'.join(response) self.sshMock.return_value = (response, '') ret = self.ssh.lsnaspool(pool_id) if pool_id is None: cmd = ['mcsinq', 'lsnaspool', '-delim', '!'] self.sshMock.assert_called_once_with(cmd) self.assertEqual('Pool0', ret[0]['pool_name']) self.assertEqual('2GB', ret[0]['available_capacity']) self.assertEqual('Pool1', ret[1]['pool_name']) self.assertEqual('3GB', ret[1]['available_capacity']) else: cmd = ['mcsinq', 'lsnaspool', '-delim', '!', pool_id] self.sshMock.assert_called_once_with(cmd) self.assertEqual('Pool0', ret[0]['pool_name']) self.assertEqual('2GB', ret[0]['available_capacity']) @ddt.data({'node_name': 'node1', 'fsname': 'fs1'}, {'node_name': 'node1', 'fsname': None}, {'node_name': None, 'fsname': 'fs1'}, {'node_name': None, 'fsname': None}) @ddt.unpack def test_lsfs(self, node_name, fsname): response = [ 'pool_name!fs_name!total_capacity!used_capacity', 'pool0!fs0!10GB!1GB', 'pool1!fs1!8GB!3GB' ] response = '\n'.join(response) self.sshMock.return_value = (response, '') if fsname and not node_name: self.assertRaises(exception.InvalidParameterValue, self.ssh.lsfs, node_name=node_name, fsname=fsname) else: ret = self.ssh.lsfs(node_name, fsname) cmdlist = [] if node_name and not fsname: cmdlist = ['mcsinq', 'lsfs', '-delim', '!', '-node', '"node1"'] elif node_name and fsname: cmdlist = ['mcsinq', 'lsfs', '-delim', '!', '-node', '"node1"', '-name', '"fs1"'] else: cmdlist = ['mcsinq', 'lsfs', '-delim', '!', '-all'] self.sshMock.assert_called_once_with(cmdlist) self.assertEqual('pool0', ret[0]['pool_name']) self.assertEqual('fs0', ret[0]['fs_name']) self.assertEqual('10GB', ret[0]['total_capacity']) self.assertEqual('1GB', ret[0]['used_capacity']) self.assertEqual('pool1', ret[1]['pool_name']) self.assertEqual('fs1', ret[1]['fs_name']) self.assertEqual('8GB', ret[1]['total_capacity']) self.assertEqual('3GB', ret[1]['used_capacity']) def test_addfs(self): self.sshMock.return_value = ('', '') self.ssh.addfs('fsname', 'fake_pool', 1, 'node1') cmdlist = ['mcsop', 'addfs', '-name', '"fsname"', '-pool', '"fake_pool"', '-size', '1g', '-node', '"node1"'] self.sshMock.assert_called_once_with(cmdlist) def test_rmfs(self): self.sshMock.return_value = ('', '') self.ssh.rmfs('fsname') cmdlist = ['mcsop', 'rmfs', '-name', '"fsname"'] self.sshMock.assert_called_once_with(cmdlist) def test_expandfs(self): self.sshMock.return_value = ('', '') self.ssh.expandfs('fsname', 2) cmdlist = ['mcsop', 'expandfs', '-name', '"fsname"', '-size', '2g'] self.sshMock.assert_called_once_with(cmdlist) def test_lsnasdir(self): response = [ 'parent_dir!name', '/fs/test_01!share_01' ] response = '\n'.join(response) self.sshMock.return_value = (response, '') ret = self.ssh.lsnasdir('/fs/test_01') cmdlist = ['mcsinq', 'lsnasdir', '-delim', '!', '"/fs/test_01"'] self.sshMock.assert_called_once_with(cmdlist) self.assertEqual('/fs/test_01', ret[0]['parent_dir']) self.assertEqual('share_01', ret[0]['name']) def test_addnasdir(self): self.sshMock.return_value = ('', '') self.ssh.addnasdir('/fs/test_01/share_01') cmdlist = ['mcsop', 'addnasdir', '"/fs/test_01/share_01"'] self.sshMock.assert_called_once_with(cmdlist) def test_chnasdir(self): self.sshMock.return_value = ('', '') self.ssh.chnasdir('/fs/test_01/share_01', '/fs/test_01/share_02') cmdlist = ['mcsop', 'chnasdir', '-oldpath', '"/fs/test_01/share_01"', '-newpath', '"/fs/test_01/share_02"'] self.sshMock.assert_called_once_with(cmdlist) def test_rmnasdir(self): self.sshMock.return_value = ('', '') self.ssh.rmnasdir('/fs/test_01/share_01') cmdlist = ['mcsop', 'rmnasdir', '"/fs/test_01/share_01"'] self.sshMock.assert_called_once_with(cmdlist) def test_rmnfs(self): self.sshMock.return_value = ('', '') self.ssh.rmnfs('/fs/test_01/share_01') cmdlist = ['mcsop', 'rmnfs', '"/fs/test_01/share_01"'] self.sshMock.assert_called_once_with(cmdlist) @ddt.data(None, '/fs/test_01') def test_lsnfslist(self, prefix): cmdlist = ['mcsinq', 'lsnfslist', '-delim', '!'] if prefix: cmdlist.append('"/fs/test_01"') response = '\n'.join([ 'path', '/fs/test_01/share_01', '/fs/test_01/share_02' ]) self.sshMock.return_value = (response, '') ret = self.ssh.lsnfslist(prefix) self.sshMock.assert_called_once_with(cmdlist) self.assertEqual('/fs/test_01/share_01', ret[0]['path']) self.assertEqual('/fs/test_01/share_02', ret[1]['path']) def test_lsnfsinfo(self): cmdlist = [ 'mcsinq', 'lsnfsinfo', '-delim', '!', '"/fs/test_01/share_01"' ] response = '\n'.join([ 'ip!mask!rights!root_squash!all_squash', '192.168.1.0!255.255.255.0!rw!root_squash!all_squash' ]) self.sshMock.return_value = (response, '') ret = self.ssh.lsnfsinfo('/fs/test_01/share_01') self.sshMock.assert_called_once_with(cmdlist) self.assertEqual('192.168.1.0', ret[0]['ip']) self.assertEqual('255.255.255.0', ret[0]['mask']) self.assertEqual('rw', ret[0]['rights']) def test_addnfsclient(self): self.sshMock.return_value = ('', '') cmdlist = [ 'mcsop', 'addnfsclient', '-path', '"/fs/test_01/share_01"', '-client', '192.168.1.0/255.255.255.0:rw:ALL_SQUASH:ROOT_SQUASH' ] self.ssh.addnfsclient( '/fs/test_01/share_01', '192.168.1.0/255.255.255.0:rw:ALL_SQUASH:ROOT_SQUASH' ) self.sshMock.assert_called_once_with(cmdlist) def test_chnfsclient(self): self.sshMock.return_value = ('', '') cmdlist = [ 'mcsop', 'chnfsclient', '-path', '"/fs/test_01/share_01"', '-client', '192.168.1.0/255.255.255.0:rw:ALL_SQUASH:ROOT_SQUASH' ] self.ssh.chnfsclient( '/fs/test_01/share_01', '192.168.1.0/255.255.255.0:rw:ALL_SQUASH:ROOT_SQUASH' ) self.sshMock.assert_called_once_with(cmdlist) def test_rmnfsclient(self): self.sshMock.return_value = ('', '') cmdlist = [ 'mcsop', 'rmnfsclient', '-path', '"/fs/test_01/share_01"', '-client', '192.168.1.0/255.255.255.0' ] self.ssh.rmnfsclient( '/fs/test_01/share_01', '192.168.1.0/255.255.255.0:rw:ALL_SQUASH:ROOT_SQUASH' ) self.sshMock.assert_called_once_with(cmdlist) @ddt.data(None, 'cifs') def test_lscifslist(self, filter): cmdlist = ['mcsinq', 'lscifslist', '-delim', '!'] if filter: cmdlist.append('"%s"' % filter) response = '\n'.join([ 'name!path', 'cifs!/fs/test_01/share_01' ]) self.sshMock.return_value = (response, '') ret = self.ssh.lscifslist(filter) self.sshMock.assert_called_once_with(cmdlist) self.assertEqual('cifs', ret[0]['name']) self.assertEqual('/fs/test_01/share_01', ret[0]['path']) def test_lscifsinfo(self): cmdlist = ['mcsinq', 'lscifsinfo', '-delim', '!', '"cifs"'] response = '\n'.join([ 'path!oplocks!type!name!rights', '/fs/test_01/share_01!on!LU!user1!rw' ]) self.sshMock.return_value = (response, '') ret = self.ssh.lscifsinfo('cifs') self.sshMock.assert_called_once_with(cmdlist) self.assertEqual('/fs/test_01/share_01', ret[0]['path']) self.assertEqual('on', ret[0]['oplocks']) self.assertEqual('LU', ret[0]['type']) self.assertEqual('user1', ret[0]['name']) self.assertEqual('rw', ret[0]['rights']) def test_addcifs(self): self.sshMock.return_value = ('', '') cmdlist = [ 'mcsop', 'addcifs', '-name', 'cifs', '-path', '/fs/test_01/share_01', '-oplocks', 'off' ] self.ssh.addcifs('cifs', '/fs/test_01/share_01', 'off') self.sshMock.assert_called_once_with(cmdlist) def test_rmcifs(self): self.sshMock.return_value = ('', '') cmdlist = ['mcsop', 'rmcifs', 'cifs'] self.ssh.rmcifs('cifs') self.sshMock.assert_called_once_with(cmdlist) def test_chcifs(self): self.sshMock.return_value = ('', '') cmdlist = ['mcsop', 'chcifs', '-name', 'cifs', '-oplocks', 'off'] self.ssh.chcifs('cifs', 'off') self.sshMock.assert_called_once_with(cmdlist) def test_addcifsuser(self): self.sshMock.return_value = ('', '') cmdlist = [ 'mcsop', 'addcifsuser', '-name', 'cifs', '-rights', 'LU:user1:rw' ] self.ssh.addcifsuser('cifs', 'LU:user1:rw') self.sshMock.assert_called_once_with(cmdlist) def test_chcifsuser(self): self.sshMock.return_value = ('', '') cmdlist = [ 'mcsop', 'chcifsuser', '-name', 'cifs', '-rights', 'LU:user1:rw' ] self.ssh.chcifsuser('cifs', 'LU:user1:rw') self.sshMock.assert_called_once_with(cmdlist) def test_rmcifsuser(self): self.sshMock.return_value = ('', '') cmdlist = [ 'mcsop', 'rmcifsuser', '-name', 'cifs', '-rights', 'LU:user1' ] self.ssh.rmcifsuser('cifs', 'LU:user1:rw') self.sshMock.assert_called_once_with(cmdlist) def test_lsnasportip(self): cmdlist = ['mcsinq', 'lsnasportip', '-delim', '!'] response = '\n'.join([ 'node_name!id!ip!mask!gw!link_state', 'node1!1!192.168.10.1!255.255.255.0!192.168.10.254!active', 'node2!1!192.168.10.2!255.255.255.0!192.168.10.254!inactive' ]) self.sshMock.return_value = (response, '') ret = self.ssh.lsnasportip() self.sshMock.assert_called_once_with(cmdlist) self.assertEqual('node1', ret[0]['node_name']) self.assertEqual('1', ret[0]['id']) self.assertEqual('192.168.10.1', ret[0]['ip']) self.assertEqual('255.255.255.0', ret[0]['mask']) self.assertEqual('192.168.10.254', ret[0]['gw']) self.assertEqual('active', ret[0]['link_state']) self.assertEqual('node2', ret[1]['node_name']) self.assertEqual('1', ret[1]['id']) self.assertEqual('192.168.10.2', ret[1]['ip']) self.assertEqual('255.255.255.0', ret[1]['mask']) self.assertEqual('192.168.10.254', ret[1]['gw']) self.assertEqual('inactive', ret[1]['link_state']) @ddt.ddt class InStorageAssistantTestCase(test.TestCase): def setUp(self): self.sshMock = mock.Mock() self.assistant = instorage.InStorageAssistant(self.sshMock) super(InStorageAssistantTestCase, self).setUp() def tearDown(self): super(InStorageAssistantTestCase, self).tearDown() @ddt.data( {'size': '1000MB', 'gb_size': 1}, {'size': '3GB', 'gb_size': 3}, {'size': '4TB', 'gb_size': 4096}, {'size': '5PB', 'gb_size': 5242880}) @ddt.unpack def test_size_to_gb(self, size, gb_size): ret = self.assistant.size_to_gb(size) self.assertEqual(gb_size, ret) def test_get_available_pools(self): response_for_lsnaspool = ('\n'.join([ 'pool_name!available_capacity', 'pool0!100GB', 'pool1!150GB' ]), '') cmdlist = ['mcsinq', 'lsnaspool', '-delim', '!'] self.sshMock.return_value = response_for_lsnaspool ret = self.assistant.get_available_pools() pools = ['pool0', 'pool1'] self.assertEqual(pools, ret) self.sshMock.assert_called_once_with(cmdlist) def test_get_pools_attr(self): response_for_lsfs = ('\n'.join([ 'pool_name!fs_name!total_capacity!used_capacity', 'pool0!fs0!10GB!1GB', 'pool1!fs1!8GB!3GB' ]), '') call_for_lsfs = mock.call(['mcsinq', 'lsfs', '-delim', '!', '-all']) response_for_lsnaspool = ('\n'.join([ 'pool_name!available_capacity', 'pool0!100GB', 'pool1!150GB' ]), '') call_for_lsnaspool = mock.call(['mcsinq', 'lsnaspool', '-delim', '!']) self.sshMock.side_effect = [ response_for_lsfs, response_for_lsnaspool ] ret = self.assistant.get_pools_attr(['pool0']) pools = { 'pool0': { 'pool_name': 'pool0', 'total_capacity_gb': 110, 'free_capacity_gb': 100, 'allocated_capacity_gb': 10, 'qos': False, 'reserved_percentage': 0, 'reserved_snapshot_percentage': 0, 'reserved_share_extend_percentage': 0, 'dedupe': False, 'compression': False, 'thin_provisioning': False, 'max_over_subscription_ratio': 0 } } self.assertEqual(pools, ret) self.sshMock.assert_has_calls([call_for_lsfs, call_for_lsnaspool]) def test_get_nodes_info(self): response_for_lsnasportip = ('\n'.join([ 'node_name!id!ip!mask!gw!link_state', 'node1!1!192.168.10.1!255.255.255.0!192.168.10.254!active', 'node2!1!192.168.10.2!255.255.255.0!192.168.10.254!inactive', 'node1!2!!!!inactive', 'node2!2!!!!inactive' ]), '') call_for_lsnasportip = mock.call([ 'mcsinq', 'lsnasportip', '-delim', '!' ]) self.sshMock.side_effect = [response_for_lsnasportip] ret = self.assistant.get_nodes_info() nodes = { 'node1': { '1': { 'node_name': 'node1', 'id': '1', 'ip': '192.168.10.1', 'mask': '255.255.255.0', 'gw': '192.168.10.254', 'link_state': 'active' } }, 'node2': { '1': { 'node_name': 'node2', 'id': '1', 'ip': '192.168.10.2', 'mask': '255.255.255.0', 'gw': '192.168.10.254', 'link_state': 'inactive' } } } self.assertEqual(nodes, ret) self.sshMock.assert_has_calls([call_for_lsnasportip]) @ddt.data( {'name': '1' * 30, 'fsname': '1' * 30}, {'name': '1' * 40, 'fsname': '1' * 32}) @ddt.unpack def test_get_fsname_by_name(self, name, fsname): ret = self.assistant.get_fsname_by_name(name) self.assertEqual(fsname, ret) @ddt.data( {'name': '1' * 30, 'dirname': '1' * 30}, {'name': '1' * 40, 'dirname': '1' * 32}) @ddt.unpack def test_get_dirsname_by_name(self, name, dirname): ret = self.assistant.get_dirname_by_name(name) self.assertEqual(dirname, ret) @ddt.data( {'name': '1' * 30, 'dirpath': '/fs/' + '1' * 30 + '/' + '1' * 30}, {'name': '1' * 40, 'dirpath': '/fs/' + '1' * 32 + '/' + '1' * 32}) @ddt.unpack def test_get_dirpath_by_name(self, name, dirpath): ret = self.assistant.get_dirpath_by_name(name) self.assertEqual(dirpath, ret) @ddt.data('CIFS', 'NFS') def test_create_share(self, proto): response_for_lsnasportip = ('\n'.join([ 'node_name!id!ip!mask!gw!link_state', 'node1!1!192.168.10.1!255.255.255.0!192.168.10.254!active' ]), '') call_for_lsnasportip = mock.call([ 'mcsinq', 'lsnasportip', '-delim', '!' ]) response_for_addfs = ('', '') call_for_addfs = mock.call([ 'mcsop', 'addfs', '-name', '"fakename"', '-pool', '"fakepool"', '-size', '10g', '-node', '"node1"' ]) response_for_addnasdir = ('', '') call_for_addnasdir = mock.call([ 'mcsop', 'addnasdir', '"/fs/fakename/fakename"' ]) response_for_addcifs = ('', '') call_for_addcifs = mock.call([ 'mcsop', 'addcifs', '-name', 'fakename', '-path', '/fs/fakename/fakename', '-oplocks', 'off' ]) side_effect = [ response_for_lsnasportip, response_for_addfs, response_for_addnasdir ] calls = [call_for_lsnasportip, call_for_addfs, call_for_addnasdir] if proto == 'CIFS': side_effect.append(response_for_addcifs) calls.append(call_for_addcifs) self.sshMock.side_effect = side_effect self.assistant.create_share('fakename', 'fakepool', 10, proto) self.sshMock.assert_has_calls(calls) @ddt.data(True, False) def test_check_share_exist(self, exist): response_for_lsfs = ('\n'.join([ 'pool_name!fs_name!total_capacity!used_capacity', 'pool0!fs0!10GB!1GB', 'pool1!fs1!8GB!3GB' ]), '') call_for_lsfs = mock.call([ 'mcsinq', 'lsfs', '-delim', '!', '-all' ]) self.sshMock.side_effect = [ response_for_lsfs ] share_name = 'fs0' if exist else 'fs2' ret = self.assistant.check_share_exist(share_name) self.assertEqual(exist, ret) self.sshMock.assert_has_calls([call_for_lsfs]) @ddt.data({'proto': 'CIFS', 'share_exist': False}, {'proto': 'CIFS', 'share_exist': True}, {'proto': 'NFS', 'share_exist': False}, {'proto': 'NFS', 'share_exist': True}) @ddt.unpack def test_delete_share(self, proto, share_exist): mock_cse = self.mock_object( instorage.InStorageAssistant, 'check_share_exist', mock.Mock(return_value=share_exist) ) response_for_rmcifs = ('', '') call_for_rmcifs = mock.call([ 'mcsop', 'rmcifs', 'fakename' ]) response_for_rmnasdir = ('', '') call_for_rmnasdir = mock.call([ 'mcsop', 'rmnasdir', '"/fs/fakename/fakename"' ]) response_for_rmfs = ('', '') call_for_rmfs = mock.call([ 'mcsop', 'rmfs', '-name', '"fakename"' ]) side_effect = [response_for_rmnasdir, response_for_rmfs] calls = [call_for_rmnasdir, call_for_rmfs] if proto == 'CIFS': side_effect.insert(0, response_for_rmcifs) calls.insert(0, call_for_rmcifs) self.sshMock.side_effect = side_effect self.assistant.delete_share('fakename', proto) mock_cse.assert_called_once_with('fakename') if share_exist: self.sshMock.assert_has_calls(calls) else: self.sshMock.assert_not_called() def test_extend_share(self): response_for_lsfs = ('\n'.join([ 'pool_name!fs_name!total_capacity!used_capacity', 'pool0!fs0!10GB!1GB', 'pool1!fs1!8GB!3GB' ]), '') call_for_lsfs = mock.call([ 'mcsinq', 'lsfs', '-delim', '!', '-all' ]) response_for_expandfs = ('', '') call_for_expandfs = mock.call([ 'mcsop', 'expandfs', '-name', '"fs0"', '-size', '2g' ]) self.sshMock.side_effect = [response_for_lsfs, response_for_expandfs] self.assistant.extend_share('fs0', 12) self.sshMock.assert_has_calls([call_for_lsfs, call_for_expandfs]) @ddt.data('CIFS', 'NFS') def test_get_export_locations(self, proto): response_for_lsnode = ('\n'.join([ 'id!name', '1!node1', '2!node2' ]), '') call_for_lsnode = mock.call([ 'mcsinq', 'lsnode', '-delim', '!' ]) response_for_lsfs_node1 = ('\n'.join([ 'pool_name!fs_name!total_capacity!used_capacity', 'pool0!fs0!10GB!1GB' ]), '') call_for_lsfs_node1 = mock.call([ 'mcsinq', 'lsfs', '-delim', '!', '-node', '"node1"' ]) response_for_lsfs_node2 = ('\n'.join([ 'pool_name!fs_name!total_capacity!used_capacity', 'pool1!fs1!10GB!1GB' ]), '') call_for_lsfs_node2 = mock.call([ 'mcsinq', 'lsfs', '-delim', '!', '-node', '"node2"' ]) response_for_lsnasportip = ('\n'.join([ 'node_name!id!ip!mask!gw!link_state', 'node1!1!192.168.10.1!255.255.255.0!192.168.10.254!active', 'node1!2!192.168.10.2!255.255.255.0!192.168.10.254!active', 'node1!3!!!!inactive', 'node2!1!192.168.10.3!255.255.255.0!192.168.10.254!active', 'node2!2!192.168.10.4!255.255.255.0!192.168.10.254!active', 'node2!3!!!!inactive' ]), '') call_for_lsnasportip = mock.call([ 'mcsinq', 'lsnasportip', '-delim', '!' ]) self.sshMock.side_effect = [ response_for_lsnode, response_for_lsfs_node1, response_for_lsfs_node2, response_for_lsnasportip ] calls = [ call_for_lsnode, call_for_lsfs_node1, call_for_lsfs_node2, call_for_lsnasportip ] ret = self.assistant.get_export_locations('fs1', proto) if proto == 'CIFS': locations = [ { 'path': '\\\\192.168.10.3\\fs1', 'is_admin_only': False, 'metadata': {} }, { 'path': '\\\\192.168.10.4\\fs1', 'is_admin_only': False, 'metadata': {} } ] else: locations = [ { 'path': '192.168.10.3:/fs/fs1/fs1', 'is_admin_only': False, 'metadata': {} }, { 'path': '192.168.10.4:/fs/fs1/fs1', 'is_admin_only': False, 'metadata': {} } ] self.assertEqual(locations, ret) self.sshMock.assert_has_calls(calls) def test_classify_nfs_client_spec_has_nfsinfo(self): response_for_lsnfslist = ('\n'.join([ 'path', '/fs/fs01/fs01' ]), '') call_for_lsnfslist = mock.call([ 'mcsinq', 'lsnfslist', '-delim', '!', '"/fs/fs01/fs01"' ]) response_for_lsnfsinfo = ('\n'.join([ 'ip!mask!rights!all_squash!root_squash', '192.168.1.0!255.255.255.0!rw!all_squash!root_squash', '192.168.2.0!255.255.255.0!rw!all_squash!root_squash' ]), '') call_for_lsnfsinfo = mock.call([ 'mcsinq', 'lsnfsinfo', '-delim', '!', '"/fs/fs01/fs01"' ]) self.sshMock.side_effect = [ response_for_lsnfslist, response_for_lsnfsinfo ] calls = [call_for_lsnfslist, call_for_lsnfsinfo] client_spec = [ '192.168.2.0/255.255.255.0:rw:all_squash:root_squash', '192.168.3.0/255.255.255.0:rw:all_squash:root_squash' ] add_spec, del_spec = self.assistant.classify_nfs_client_spec( client_spec, '/fs/fs01/fs01' ) self.assertEqual( add_spec, ['192.168.3.0/255.255.255.0:rw:all_squash:root_squash'] ) self.assertEqual( del_spec, ['192.168.1.0/255.255.255.0:rw:all_squash:root_squash'] ) self.sshMock.assert_has_calls(calls) def test_classify_nfs_client_spec_has_no_nfsinfo(self): cmdlist = [ 'mcsinq', 'lsnfslist', '-delim', '!', '"/fs/fs01/fs01"' ] self.sshMock.return_value = ('', '') client_spec = [ '192.168.2.0/255.255.255.0:rw:all_squash:root_squash', ] add_spec, del_spec = self.assistant.classify_nfs_client_spec( client_spec, '/fs/fs01/fs01' ) self.assertEqual(client_spec, add_spec) self.assertEqual([], del_spec) self.sshMock.assert_called_once_with(cmdlist) def test_access_rule_to_client_spec(self): rule = { 'access_type': 'ip', 'access_to': '192.168.10.0/24', 'access_level': 'rw' } ret = self.assistant.access_rule_to_client_spec(rule) spec = '192.168.10.0/255.255.255.0:rw:all_squash:root_squash' self.assertEqual(spec, ret) def test_access_rule_to_client_spec_type_failed(self): rule = { 'access_type': 'user', 'access_to': 'test01', 'access_level': 'rw' } self.assertRaises( exception.ShareBackendException, self.assistant.access_rule_to_client_spec, rule ) def test_access_rule_to_client_spec_ipversion_failed(self): rule = { 'access_type': 'ip', 'access_to': '2001:db8::/64', 'access_level': 'rw' } self.assertRaises( exception.ShareBackendException, self.assistant.access_rule_to_client_spec, rule ) @ddt.data(True, False) def test_update_nfs_access(self, check_del_add): response_for_rmnfsclient = ('', '') call_for_rmnfsclient = mock.call( ['mcsop', 'rmnfsclient', '-path', '"/fs/fs01/fs01"', '-client', '192.168.1.0/255.255.255.0'] ) response_for_addnfsclient = ('', '') call_for_addnfsclient = mock.call( ['mcsop', 'addnfsclient', '-path', '"/fs/fs01/fs01"', '-client', '192.168.3.0/255.255.255.0:rw:all_squash:root_squash'] ) access_rules = [ { 'access_type': 'ip', 'access_to': '192.168.2.0/24', 'access_level': 'rw' }, { 'access_type': 'ip', 'access_to': '192.168.3.0/24', 'access_level': 'rw' } ] add_rules = [ { 'access_type': 'ip', 'access_to': '192.168.3.0/24', 'access_level': 'rw' } ] del_rules = [ { 'access_type': 'ip', 'access_to': '192.168.1.0/24', 'access_level': 'rw' }, { 'access_type': 'ip', 'access_to': '192.168.4.0/24', 'access_level': 'rw' } ] cncs_mock = mock.Mock(return_value=( ['192.168.3.0/255.255.255.0:rw:all_squash:root_squash'], ['192.168.1.0/255.255.255.0:rw:all_squash:root_squash'] )) self.mock_object(self.assistant, 'classify_nfs_client_spec', cncs_mock) self.sshMock.side_effect = [ response_for_rmnfsclient, response_for_addnfsclient ] if check_del_add: self.assistant.update_nfs_access('fs01', [], add_rules, del_rules) else: self.assistant.update_nfs_access('fs01', access_rules, [], []) if check_del_add: cncs_mock.assert_called_once_with( [], '/fs/fs01/fs01' ) else: cncs_mock.assert_called_once_with( [ '192.168.2.0/255.255.255.0:rw:all_squash:root_squash', '192.168.3.0/255.255.255.0:rw:all_squash:root_squash' ], '/fs/fs01/fs01' ) self.sshMock.assert_has_calls( [call_for_rmnfsclient, call_for_addnfsclient] ) def test_classify_cifs_rights(self): cmdlist = ['mcsinq', 'lscifsinfo', '-delim', '!', '"fs01"'] response_for_lscifsinfo = '\n'.join([ 'path!oplocks!type!name!rights', '/fs/fs01/fs01!on!LU!user1!rw', '/fs/fs01/fs01!on!LU!user2!rw' ]) self.sshMock.return_value = (response_for_lscifsinfo, '') access_rights = [ 'LU:user2:rw', 'LU:user3:rw' ] add_rights, del_rights = self.assistant.classify_cifs_rights( access_rights, 'fs01' ) self.sshMock.assert_called_once_with(cmdlist) self.assertEqual(['LU:user3:rw'], add_rights) self.assertEqual(['LU:user1:rw'], del_rights) def test_access_rule_to_rights(self): rule = { 'access_type': 'user', 'access_to': 'test01', 'access_level': 'rw' } ret = self.assistant.access_rule_to_rights(rule) self.assertEqual('LU:test01:rw', ret) def test_access_rule_to_rights_fail_type(self): rule = { 'access_type': 'ip', 'access_to': '192.168.1.0/24', 'access_level': 'rw' } self.assertRaises( exception.ShareBackendException, self.assistant.access_rule_to_rights, rule ) @ddt.data(True, False) def test_update_cifs_access(self, check_del_add): response_for_rmcifsuser = ('', None) call_for_rmcifsuser = mock.call( ['mcsop', 'rmcifsuser', '-name', 'fs01', '-rights', 'LU:user1'] ) response_for_addcifsuser = ('', None) call_for_addcifsuser = mock.call( ['mcsop', 'addcifsuser', '-name', 'fs01', '-rights', 'LU:user3:rw'] ) access_rules = [ { 'access_type': 'user', 'access_to': 'user2', 'access_level': 'rw' }, { 'access_type': 'user', 'access_to': 'user3', 'access_level': 'rw' } ] add_rules = [ { 'access_type': 'user', 'access_to': 'user3', 'access_level': 'rw' } ] del_rules = [ { 'access_type': 'user', 'access_to': 'user1', 'access_level': 'rw' } ] ccr_mock = mock.Mock(return_value=(['LU:user3:rw'], ['LU:user1:rw'])) self.mock_object(self.assistant, 'classify_cifs_rights', ccr_mock) self.sshMock.side_effect = [ response_for_rmcifsuser, response_for_addcifsuser ] if check_del_add: self.assistant.update_cifs_access('fs01', [], add_rules, del_rules) else: self.assistant.update_cifs_access('fs01', access_rules, [], []) if not check_del_add: ccr_mock.assert_called_once_with( ['LU:user2:rw', 'LU:user3:rw'], 'fs01' ) self.sshMock.assert_has_calls( [call_for_rmcifsuser, call_for_addcifsuser] ) def test_check_access_type(self): rules1 = { 'access_type': 'ip', 'access_to': '192.168.1.0/24', 'access_level': 'rw' } rules2 = { 'access_type': 'ip', 'access_to': '192.168.2.0/24', 'access_level': 'rw' } rules3 = { 'access_type': 'user', 'access_to': 'user1', 'access_level': 'rw' } rules4 = { 'access_type': 'user', 'access_to': 'user2', 'access_level': 'rw' } ret = self.assistant.check_access_type('ip', [rules1], [rules2]) self.assertTrue(ret) ret = self.assistant.check_access_type('user', [rules3], [rules4]) self.assertTrue(ret) ret = self.assistant.check_access_type('ip', [rules1], [rules3]) self.assertFalse(ret) ret = self.assistant.check_access_type('user', [rules3], [rules1]) self.assertFalse(ret) @ddt.data( {'proto': 'CIFS', 'ret': True}, {'proto': 'CIFS', 'ret': False}, {'proto': 'NFS', 'ret': True}, {'proto': 'NFS', 'ret': False}, {'proto': 'unknown', 'ret': True}) @ddt.unpack def test_update_access(self, proto, ret): uca_mock = self.mock_object( self.assistant, 'update_cifs_access', mock.Mock() ) una_mock = self.mock_object( self.assistant, 'update_nfs_access', mock.Mock() ) cat_mock = self.mock_object( self.assistant, 'check_access_type', mock.Mock(return_value=ret) ) if proto == 'unknown': self.assertRaises( exception.ShareBackendException, self.assistant.update_access, 'fs01', proto, [], [], [] ) cat_mock.assert_not_called() elif ret is False: self.assertRaises( exception.InvalidShareAccess, self.assistant.update_access, 'fs01', proto, [], [], [] ) cat_mock.assert_called_once() else: self.assistant.update_access( 'fs01', proto, [], [], [] ) if proto == 'CIFS': uca_mock.assert_called_once_with('fs01', [], [], []) una_mock.assert_not_called() else: una_mock.assert_called_once_with('fs01', [], [], []) uca_mock.assert_not_called() cat_mock.assert_called_once() ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315602.0256703 manila-21.0.0/manila/tests/share/drivers/macrosan/0000775000175000017500000000000000000000000022044 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/macrosan/__init__.py0000664000175000017500000000000000000000000024143 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/macrosan/test_macrosan_nas.py0000664000175000017500000027522600000000000026137 0ustar00zuulzuul00000000000000# Copyright (c) 2022 MacroSAN Technologies Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Share driver test for Macrosan Storage Array. """ import ddt import requests from oslo_config import cfg from unittest import mock from manila import context from manila import exception from manila.share import configuration from manila.share import driver from manila.share.drivers.macrosan import macrosan_constants as constants from manila.share.drivers.macrosan import macrosan_helper from manila.share.drivers.macrosan import macrosan_nas from manila.share.drivers.macrosan import rest_helper from manila import test from manila.tests import fake_share CONF = cfg.CONF class FakeResponse(object): def __init__(self, status, result): self.status_code = status self.text = 'return message' self.response = result def json(self): return self.response def close(self): pass @ddt.ddt class MacrosanShareDriverTestCase(test.TestCase): def setUp(self): self.mock_object(macrosan_nas.CONF, '_check_required_opts') super(MacrosanShareDriverTestCase, self).setUp() def _safe_get(opt): return getattr(self.configuration, opt) self._context = context.get_admin_context() self.configuration = mock.Mock(spec=configuration.Configuration) self.configuration.safe_get = mock.Mock(side_effect=_safe_get) self.configuration.driver_handles_share_servers = False self.configuration.share_backend_name = 'fake_share_backend_name' self.configuration.macrosan_nas_http_protocol = 'https' self.configuration.macrosan_nas_ip = 'fake_ip' self.configuration.macrosan_nas_port = 'fake_port' self.configuration.macrosan_nas_username = 'fake_username' self.configuration.macrosan_nas_password = 'fake_password' self.configuration.macrosan_nas_prefix = 'nas' self.configuration.macrosan_share_pools = ['fake_pool'] self.configuration.macrosan_timeout = 60 self.configuration.macrosan_ssl_cert_verify = False self.configuration.network_config_group = 'fake_network_config_group' self.configuration.admin_network_config_group = ( 'fake_admin_network_config_group') self.configuration.config_group = 'fake_config_group' self.configuration.reserved_share_percentage = 0 self.configuration.reserved_share_from_snapshot_percentage = 0 self.configuration.reserved_share_extend_percentage = 0 self.configuration.filter_function = None self.configuration.goodness_function = None self.driver = macrosan_nas.MacrosanNasDriver( configuration=self.configuration) self.result_success_storage_pools = { 'code': 0, 'message': 'success', 'data': [{ 'name': 'fake_pool', 'size': '1000.0G', 'allocated': '100G', 'free': '900G', 'health': 'ONLINE', 'rwStatus': 'off' }] } def test_do_setup(self): mock_login = self.mock_object(rest_helper.RestHelper, 'login') self.driver.do_setup(self._context) mock_login.assert_called_once() def test_do_setup_login_fail(self): mock_login = self.mock_object( rest_helper.RestHelper, 'login', mock.Mock( side_effect=exception.ShareBackendException( msg='fake_exception'))) self.assertRaises(exception.ShareBackendException, self.driver.do_setup, self._context) mock_login.assert_called_once() @ddt.data({'nfs_status': constants.NFS_NON_CONFIG, 'cifs_status': constants.CIFS_NON_CONFIG}, {'nfs_status': constants.NFS_DISABLED, 'cifs_status': constants.CIFS_DISABLED}, {'nfs_status': constants.NFS_ENABLED, 'cifs_status': constants.CIFS_ENABLED}, {'nfs_status': constants.NFS_ENABLED, 'cifs_status': constants.CIFS_SHARE_MODE}) @ddt.unpack def test_check_for_setup_error_non_config(self, nfs_status, cifs_status): mock_gnss = self.mock_object( rest_helper.RestHelper, '_get_nfs_service_status', mock.Mock(return_value={ "serviceStatus": nfs_status, "nfs3Status": constants.NFS_NON_SUPPORTED, "nfs4Status": constants.NFS_NON_SUPPORTED })) mock_cns = self.mock_object(rest_helper.RestHelper, '_config_nfs_service') mock_sns = self.mock_object(rest_helper.RestHelper, '_start_nfs_service') if cifs_status == constants.CIFS_DISABLED: mock_gcss = self.mock_object( rest_helper.RestHelper, '_get_cifs_service_status', mock.Mock(side_effect=[cifs_status, constants.CIFS_SHARE_MODE])) else: mock_gcss = self.mock_object( rest_helper.RestHelper, '_get_cifs_service_status', mock.Mock(return_value=cifs_status)) mock_ccs = self.mock_object(rest_helper.RestHelper, '_config_cifs_service') mock_scs = self.mock_object(rest_helper.RestHelper, '_start_cifs_service') self.driver.check_for_setup_error() if (nfs_status == constants.NFS_NON_CONFIG or nfs_status == constants.NFS_DISABLED): mock_cns.assert_called_once() mock_sns.assert_called_once() else: mock_cns.assert_called_once() mock_gnss.assert_called_once() if cifs_status == constants.CIFS_NON_CONFIG: mock_gcss.assert_called_once() mock_ccs.assert_called_once() mock_scs.assert_called_once() elif cifs_status == constants.CIFS_DISABLED: mock_gcss.assert_called() mock_ccs.assert_called_once() mock_scs.assert_called_once() elif cifs_status == constants.CIFS_SHARE_MODE: mock_gcss.assert_called_once() mock_ccs.assert_called_once() else: mock_gcss.assert_called_once() def test_check_for_setup_error_nfs_service_error(self): mock_gnss = self.mock_object( rest_helper.RestHelper, '_get_nfs_service_status', mock.Mock(return_value={ "serviceStatus": constants.NFS_EXCEPTION, "nfs3Status": constants.NFS_NON_SUPPORTED, "nfs4Status": constants.NFS_NON_SUPPORTED })) self.assertRaises(exception.MacrosanBackendExeption, self.driver.check_for_setup_error) mock_gnss.assert_called_once() def test_check_for_setup_error_cifs_service_error(self): mock_gnss = self.mock_object( rest_helper.RestHelper, '_get_nfs_service_status', mock.Mock(return_value={ "serviceStatus": constants.NFS_ENABLED, "nfs3Status": constants.NFS_SUPPORTED, "nfs4Status": constants.NFS_SUPPORTED })) mock_gcss = self.mock_object( rest_helper.RestHelper, '_get_cifs_service_status', mock.Mock(return_value=constants.CIFS_EXCEPTION)) self.assertRaises(exception.MacrosanBackendExeption, self.driver.check_for_setup_error) mock_gnss.assert_called_once() mock_gcss.assert_called_once() @ddt.data('nfs', 'cifs') def test_create_share(self, share_proto): share = fake_share.fake_share( share_proto=share_proto, host="fake_host@fake_backend#fake_pool") mock_cf = self.mock_object(rest_helper.RestHelper, '_create_filesystem') mock_cfd = self.mock_object(rest_helper.RestHelper, '_create_filesystem_dir') mock_cns = self.mock_object(rest_helper.RestHelper, '_create_nfs_share') self.mock_object(macrosan_helper.MacrosanHelper, '_ensure_user', mock.Mock(return_value=True)) mock_ccs = self.mock_object(rest_helper.RestHelper, '_create_cifs_share') self.driver.helper.configuration.macrosan_nas_ip = "172.0.0.1" location = self.driver.create_share(self._context, share) if share_proto == 'nfs': expect_location = r'172.0.0.1:/manila_fakeid/manila_fakeid' print('test location:', location) self.assertEqual(location, expect_location) else: expect_location = r'\\172.0.0.1\manila_fakeid' self.assertEqual(location, expect_location) mock_cf.assert_called_once_with(fs_name='manila_fakeid', pool_name='fake_pool', filesystem_quota='1GB') mock_cf.assert_called() share_path = self.driver.helper._generate_share_path('manila_fakeid') mock_cfd.assert_called_once_with(share_path) if share_proto == 'nfs': mock_cns.assert_called_once_with(share_path=share_path) else: mock_ccs.assert_called_once() def test_create_share_user_error(self): share = fake_share.fake_share( share_proto='cifs', host="fake_host@fake_backend#fake_pool") mock_cf = self.mock_object(rest_helper.RestHelper, '_create_filesystem') mock_cfd = self.mock_object(rest_helper.RestHelper, '_create_filesystem_dir') self.mock_object(macrosan_helper.MacrosanHelper, '_ensure_user', mock.Mock(return_value=False)) mock_df = self.mock_object(rest_helper.RestHelper, '_delete_filesystem') self.assertRaises(exception.MacrosanBackendExeption, self.driver.create_share, self._context, share) mock_cf.assert_called_once() share_path = self.driver.helper._generate_share_path('manila_fakeid') mock_cfd.assert_called_once_with(share_path) mock_df.assert_called_once_with('manila_fakeid') @ddt.data('nfs', 'cifs') def test_delete_share(self, share_proto): share = fake_share.fake_share( share_proto=share_proto, host="fake_host@fake_backend#fake_pool") expect_share_path = self.driver.helper._generate_share_path( 'manila_fakeid') mock_gns = self.mock_object(rest_helper.RestHelper, '_get_nfs_share', mock.Mock(return_value={ "path": "fake_path", "clients": ["client"], "protocol": "fake_protocol" })) mock_dns = self.mock_object( rest_helper.RestHelper, '_delete_nfs_share') mock_gcs = self.mock_object(rest_helper.RestHelper, '_get_cifs_share', mock.Mock(return_value={ "path": "fake_path", "cifsname": "fake_cifsname", "protocol": "fake_protocol", "roList": ["fake_ro"], "rwList": ["fake_rw"], "allowList": ["fake_allow"], "denyList": ["fake_deny"] })) mock_dcs = self.mock_object(rest_helper.RestHelper, '_delete_cifs_share') mock_df = self.mock_object(rest_helper.RestHelper, '_delete_filesystem') self.driver.delete_share(self._context, share) if share_proto == "nfs": mock_gns.assert_called_once_with(expect_share_path) mock_dns.assert_called_once_with(expect_share_path) else: mock_gcs.assert_called_once_with(expect_share_path) mock_dcs.assert_called_once_with('manila_fakeid', expect_share_path) mock_df.assert_called_once_with('manila_fakeid') @ddt.data('nfs', 'cifs') def test_delete_share_not_exist(self, share_proto): share = fake_share.fake_share(share_proto=share_proto, host="fake_host@fake_backend#fake_pool") expect_share_path = self.driver.helper._generate_share_path( 'manila_fakeid') mock_gns = self.mock_object(rest_helper.RestHelper, '_get_nfs_share', mock.Mock(return_value=None)) mock_gf = self.mock_object(rest_helper.RestHelper, '_get_filesystem', mock.Mock(return_value={ "name": "fake_name", "poolName": "fake_pool", "quotaStatus": "1GB" })) mock_gcs = self.mock_object(rest_helper.RestHelper, '_get_cifs_share', mock.Mock(return_value=None)) mock_df = self.mock_object(rest_helper.RestHelper, '_delete_filesystem') self.driver.delete_share(self._context, share) if share_proto == 'nfs': mock_gns.assert_called_once_with(expect_share_path) else: mock_gcs.assert_called_once_with(expect_share_path) mock_gf.assert_called_once_with('manila_fakeid') mock_df.assert_called_once_with('manila_fakeid') @ddt.data('nfs', 'cifs') def test_extend_share(self, share_proto): share = fake_share.fake_share(share_proto=share_proto, host="fake_host@fake_backend#fake_pool") expect_share_path = self.driver.helper._generate_share_path( 'manila_fakeid') mock_gns = self.mock_object(rest_helper.RestHelper, '_get_nfs_share', mock.Mock(return_value={ "path": "fake_path", "clients": ["client"], "protocol": "fake_protocol" })) mock_gcs = self.mock_object(rest_helper.RestHelper, '_get_cifs_share', mock.Mock(return_value={ "path": "fake_path", "cifsname": "fake_cifsname", "protocol": "fake_protocol", "roList": ["fake_ro"], "rwList": ["fake_rw"], "allowList": ["fake_allow"], "denyList": ["fake_deny"] })) mock_uss = self.mock_object(rest_helper.RestHelper, '_update_share_size') self.driver.extend_share(share, 2) if share_proto == 'nfs': mock_gns.assert_called_once_with(expect_share_path) else: mock_gcs.assert_called_once_with(expect_share_path) mock_uss.assert_called_once_with('manila_fakeid', '2GB') def test_extend_share_not_exist(self): share = fake_share.fake_share(share_proto='nfs', size=1, host="fake_host@fake_backend#fake_pool") expect_share_path = self.driver.helper._generate_share_path( 'manila_fakeid') mock_gns = self.mock_object(rest_helper.RestHelper, '_get_nfs_share', mock.Mock(return_value=None)) self.assertRaises(exception.ShareResourceNotFound, self.driver.extend_share, share, 2) mock_gns.assert_called_once_with(expect_share_path) @ddt.data('nfs', 'cifs') def test_shrink_share(self, share_proto): share = fake_share.fake_share(share_proto=share_proto, size=5, host="fake_host@fake_backend#fake_pool") expect_share_path = self.driver.helper._generate_share_path( 'manila_fakeid') mock_gns = self.mock_object(rest_helper.RestHelper, '_get_nfs_share', mock.Mock(return_value={ "path": "fake_path", "clients": ["client"], "protocol": "fake_protocol" })) mock_gcs = self.mock_object(rest_helper.RestHelper, '_get_cifs_share', mock.Mock(return_value={ "path": "fake_path", "cifsname": "fake_cifsname", "protocol": "fake_protocol", "roList": ["fake_ro"], "rwList": ["fake_rw"], "allowList": ["fake_allow"], "denyList": ["fake_deny"] })) mock_gf = self.mock_object(rest_helper.RestHelper, '_get_filesystem', mock.Mock(return_value={ "name": "fake_name", "poolName": "fake_pool", "quotaStatus": "5GB", "usedCapacity": '1GB' })) mock_uss = self.mock_object(rest_helper.RestHelper, '_update_share_size') self.driver.shrink_share(share, 3) if share_proto == 'nfs': mock_gns.assert_called_once_with(expect_share_path) else: mock_gcs.assert_called_once_with(expect_share_path) mock_gf.assert_called_once_with('manila_fakeid') mock_uss.assert_called_once_with('manila_fakeid', '3GB') @ddt.data('nfs', 'cifs') def test_shrink_share_not_exist(self, share_proto): share = fake_share.fake_share(share_proto=share_proto, size=3, host="fake_host@fake_backend#fake_pool") expect_share_path = self.driver.helper._generate_share_path( 'manila_fakeid') mock_gns = self.mock_object(rest_helper.RestHelper, '_get_nfs_share', mock.Mock(return_value=None)) mock_gcs = self.mock_object(rest_helper.RestHelper, '_get_cifs_share', mock.Mock(return_value=None)) self.assertRaises(exception.ShareResourceNotFound, self.driver.shrink_share, share, 1) if share_proto == 'nfs': mock_gns.assert_called_once_with(expect_share_path) elif share_proto == 'cifs': mock_gcs.assert_called_once_with(expect_share_path) def test_shrink_share_size_fail(self): share = fake_share.fake_share(share_proto='nfs', size=3, host="fake_host@fake_backend#fake_pool") expect_share_path = self.driver.helper._generate_share_path( 'manila_fakeid') mock_gns = self.mock_object(rest_helper.RestHelper, '_get_nfs_share', mock.Mock(return_value={ "path": "fake_path", "clients": ["client"], "protocol": "fake_protocol" })) mock_gf = self.mock_object(rest_helper.RestHelper, '_get_filesystem', mock.Mock(return_value={ "name": "fake_name", "poolName": "fake_pool", "quotaStatus": "3GB", "usedCapacity": '2GB' })) self.assertRaises(exception.ShareShrinkingPossibleDataLoss, self.driver.shrink_share, share, 1) mock_gf.assert_called_once_with('manila_fakeid') mock_gns.assert_called_once_with(expect_share_path) @ddt.data('nfs', 'cifs') def test_ensure_share(self, share_proto): share = fake_share.fake_share(share_proto=share_proto, host="fake_host@fake_backend#fake_pool") mock_gns = self.mock_object(rest_helper.RestHelper, '_get_nfs_share', mock.Mock(return_value={ "path": "fake_path", "clients": ["client"], "protocol": "fake_protocol" })) mock_gcs = self.mock_object(rest_helper.RestHelper, '_get_cifs_share', mock.Mock(return_value={ "path": "fake_path", "cifsname": "fake_cifsname", "protocol": "fake_protocol", "roList": ["fake_ro"], "rwList": ["fake_rw"], "allowList": ["fake_allow"], "denyList": ["fake_deny"], })) self.driver.helper.configuration.macrosan_nas_ip = "172.0.0.1" locations = self.driver.ensure_share(self._context, share) expect_share_path = self.driver.helper._generate_share_path( 'manila_fakeid') if share_proto == 'nfs': expect_locations = [r'172.0.0.1:/manila_fakeid/manila_fakeid'] self.assertEqual(locations, expect_locations) mock_gns.assert_called_once_with(expect_share_path) else: expect_locations = [r'\\172.0.0.1\manila_fakeid'] self.assertEqual(locations, expect_locations) mock_gcs.assert_called_once_with(expect_share_path) def test_ensure_share_proto_fail(self): share = fake_share.fake_share(host="fake_host@fake_backend#fake_pool") self.mock_object(rest_helper.RestHelper, '_get_nfs_share', mock.Mock(return_value={ "path": "fake_path", "clients": ["client"], "protocol": "fake_protocol" })) self.assertRaises(exception.MacrosanBackendExeption, self.driver.ensure_share, self._context, share) def test_ensure_share_not_exist(self): share = fake_share.fake_share(share_proto='nfs', host="fake_host@fake_backend#fake_pool") mock_gns = self.mock_object(rest_helper.RestHelper, '_get_nfs_share', mock.Mock(return_value=None)) self.assertRaises(exception.ShareResourceNotFound, self.driver.ensure_share, self._context, share) expect_share_path = self.driver.helper._generate_share_path( 'manila_fakeid') mock_gns.assert_called_once_with(expect_share_path) @ddt.data('nfs', 'cifs') def test_allow_access_success(self, share_proto): share = fake_share.fake_share(share_proto=share_proto, host="fake_host@fake_backend#fake_pool") if share_proto == 'nfs': access = { 'access_type': 'ip', 'access_to': '0.0.0.0/0', 'access_level': 'rw', } else: access = { 'access_type': 'user', 'access_to': 'fake_user', 'access_level': 'rw', } mock_gns = self.mock_object(rest_helper.RestHelper, '_get_nfs_share', mock.Mock(return_value={ "path": "fake_path", "clients": ["client"], "protocol": "fake_protocol" })) mock_gafns = self.mock_object(rest_helper.RestHelper, '_get_access_from_nfs_share', mock.Mock(return_value=None)) mock_anar = self.mock_object(rest_helper.RestHelper, '_allow_nfs_access_rest') mock_gcs = self.mock_object(rest_helper.RestHelper, '_get_cifs_share', mock.Mock(return_value={ "path": "fake_path", "cifsname": "fake_cifsname", "protocol": "fake_protocol", "roList": ["fake_ro"], "rwList": ["fake_rw"], "allowList": ["fake_allow"], "denyList": ["fake_deny"], })) mock_gafcs = self.mock_object(rest_helper.RestHelper, '_get_access_from_cifs_share', mock.Mock(return_value=None)) mock_acar = self.mock_object(rest_helper.RestHelper, '_allow_cifs_access_rest') self.driver.helper._allow_access(share, access) expect_share_path = self.driver.helper._generate_share_path( 'manila_fakeid') if access['access_to'] == '0.0.0.0/0': access['access_to'] = '*' if share_proto == 'nfs': mock_gns.assert_called_once_with(expect_share_path) mock_gafns.assert_called_once_with(expect_share_path, access['access_to']) mock_anar.assert_called_once_with(expect_share_path, access['access_to'], access['access_level']) else: mock_gcs.assert_called_once_with(expect_share_path) mock_gafcs.assert_called_once_with(expect_share_path, access['access_to']) mock_acar.assert_called_once_with(expect_share_path, access['access_to'], access['access_level']) def test_allow_access_nfs_change(self): share = fake_share.fake_share(share_proto='nfs', host="fake_host@fake_backend#fake_pool") access = { 'access_type': 'ip', 'access_to': '172.0.0.1', 'access_level': 'rw', } mock_gns = self.mock_object(rest_helper.RestHelper, '_get_nfs_share', mock.Mock(return_value={ "path": "/manila_fakeid", "clients": ["client"], "protocol": "fake_protocol" })) mock_gafns = self.mock_object(rest_helper.RestHelper, '_get_access_from_nfs_share', mock.Mock(return_value={ "path": "/manila_fakeid", "clientName": "fake_client_name", "accessRight": "ro", })) mock_cnar = self.mock_object(rest_helper.RestHelper, '_change_nfs_access_rest') self.driver.helper._allow_access(share, access) expect_share_path = self.driver.helper._generate_share_path( 'manila_fakeid') mock_gns.assert_called_once_with(expect_share_path) mock_gafns.assert_called_once_with(expect_share_path, access['access_to']) mock_cnar.assert_called_once_with(expect_share_path, access['access_to'], access['access_level']) def test_allow_access_cifs_change(self): share = fake_share.fake_share(share_proto='cifs', host="fake_host@fake_backend#fake_pool") access = { 'access_type': 'user', 'access_to': 'fake_user', 'access_level': 'rw', } mock_gcs = self.mock_object(rest_helper.RestHelper, '_get_cifs_share', mock.Mock(return_value={ "path": "fake_path", "cifsname": "fake_cifsname", "protocol": "fake_protocol", "roList": ["fake_ro"], "rwList": ["fake_rw"], "allowList": ["fake_allow"], "denyList": ["fake_deny"], })) mock_gafcs = self.mock_object(rest_helper.RestHelper, '_get_access_from_cifs_share', mock.Mock(return_value={ "path": "fake_path", "ugName": "fake_user", "ugType": "0", "accessRight": "ro", })) mock_ccar = self.mock_object(rest_helper.RestHelper, '_change_cifs_access_rest') self.driver.helper._allow_access(share, access) expect_share_path = self.driver.helper._generate_share_path( 'manila_fakeid') mock_gcs.assert_called_once_with(expect_share_path) mock_gafcs.assert_called_once_with(expect_share_path, access['access_to']) mock_ccar.assert_called_once_with(expect_share_path, access['access_to'], access['access_level'], '0') @ddt.data( { 'access_type': 'user', 'access_to': 'user_name', 'access_level': 'rw', }, { 'access_type': 'user', 'access_to': 'group_name', 'access_level': 'rw', }, { 'access_type': 'user', 'access_to': '/domain_user', 'access_level': 'rw', }, { 'access_type': 'user', 'access_to': '/domain_group', 'access_level': 'rw', }, ) def test_allow_access_cifs(self, access): share = fake_share.fake_share(share_proto='cifs', host="fake_host@fake_backend#fake_pool") mock_gcs = self.mock_object(rest_helper.RestHelper, '_get_cifs_share', mock.Mock(return_value={ "path": "fake_path", "cifsname": "fake_cifsname", "protocol": "fake_protocol", "roList": ["fake_ro"], "rwList": ["fake_rw"], "allowList": ["fake_allow"], "denyList": ["fake_deny"], })) mock_gafcs = self.mock_object(rest_helper.RestHelper, '_get_access_from_cifs_share', mock.Mock(return_value=None)) mock_acar = self.mock_object(rest_helper.RestHelper, '_allow_cifs_access_rest') self.driver.helper._allow_access(share, access) expect_share_path = self.driver.helper._generate_share_path( 'manila_fakeid') mock_gcs.assert_called_once_with(expect_share_path) mock_gafcs.assert_called_once_with(expect_share_path, access['access_to']) mock_acar.assert_called_once_with(expect_share_path, access['access_to'], access['access_level']) @ddt.data('nfs', 'cifs') def test_allow_access_share_not_exist(self, share_proto): share = fake_share.fake_share(share_proto=share_proto, host="fake_host@fake_backend#fake_pool") access = {} if share_proto == 'nfs': access = { 'access_type': 'ip', 'access_to': '172.0.0.1', 'access_level': 'rw', } else: access = { 'access_type': 'user', 'access_to': 'fake_user', 'access_level': 'rw', } mock_gns = self.mock_object(rest_helper.RestHelper, '_get_nfs_share', mock.Mock(return_value=None)) mock_gcs = self.mock_object(rest_helper.RestHelper, '_get_cifs_share', mock.Mock(return_value=None)) self.assertRaises(exception.ShareResourceNotFound, self.driver.helper._allow_access, share, access) expect_share_path = self.driver.helper._generate_share_path( 'manila_fakeid') if share_proto == 'nfs': mock_gns.assert_called_once_with(expect_share_path) else: mock_gcs.assert_called_once_with(expect_share_path) def test_allow_access_proto_fail(self): share = fake_share.fake_share(host="fake_host@fake_backend#fake_pool") access = { 'access_type': 'user', 'access_to': 'fake_user', 'access_level': 'rw', } self.assertRaises(exception.MacrosanBackendExeption, self.driver.helper._allow_access, share, access) def test_allow_access_nfs_user_fail(self): share = fake_share.fake_share(share_proto='nfs', host="fake_host@fake_backend#fake_pool") access = { 'access_type': 'user', 'access_to': 'fake_user', 'access_level': 'rw', } self.assertRaises(exception.InvalidShareAccess, self.driver.helper._allow_access, share, access) def test_allow_access_cifs_ip_fail(self): share = fake_share.fake_share(share_proto='cifs', host="fake_host@fake_backend#fake_pool") access = { 'access_type': 'ip', 'access_to': '172.0.0.1', 'access_level': 'rw', } self.assertRaises(exception.InvalidShareAccess, self.driver.helper._allow_access, share, access) def test_allow_access_nfs_level_fail(self): share = fake_share.fake_share(share_proto='nfs', host="fake_host@fake_backend#fake_pool") access = { 'access_type': 'ip', 'access_to': '172.0.0.1', 'access_level': 'r', } self.assertRaises(exception.InvalidShareAccess, self.driver.helper._allow_access, share, access) @ddt.data('nfs', 'cifs') def test_deny_access(self, share_proto): share = fake_share.fake_share(share_proto=share_proto, host="fake_host@fake_backend#fake_pool") if share_proto == 'nfs': access = { 'access_type': 'ip', 'access_to': '0.0.0.0/0', 'access_level': 'rw', } else: access = { 'access_type': 'user', 'access_to': 'fake_user', 'access_level': 'rw', } mock_gafns = self.mock_object(rest_helper.RestHelper, '_get_access_from_nfs_share', mock.Mock(return_value={ "path": "fake_path", "clientName": "fake_client_name", "accessRight": "rw", })) mock_dnar = self.mock_object(rest_helper.RestHelper, '_delete_nfs_access_rest') mock_gafcs = self.mock_object(rest_helper.RestHelper, '_get_access_from_cifs_share', mock.Mock(return_value={ "path": "fake_path", "ugName": "fake_user", "ugType": "0", "accessRight": "rw", })) mock_dcar = self.mock_object(rest_helper.RestHelper, '_delete_cifs_access_rest') expect_share_path = self.driver.helper._generate_share_path( 'manila_fakeid') self.driver.helper._deny_access(share, access) if access['access_to'] == '0.0.0.0/0': access['access_to'] = '*' if share_proto == 'nfs': mock_gafns.assert_called_once_with(expect_share_path, access['access_to']) mock_dnar.assert_called_once_with(expect_share_path, access['access_to']) else: mock_gafcs.assert_called_once_with(expect_share_path, access['access_to']) mock_dcar.assert_called_once_with(expect_share_path, "fake_user", "0") def test_deny_access_nfs_type_fail(self): share = fake_share.fake_share(share_proto='nfs', host="fake_host@fake_backend#fake_pool") access = { 'access_type': 'fake_type', 'access_to': '172.0.0.1', 'access_level': 'rw', } result = self.driver.helper._deny_access(share, access) self.assertIsNone(result) def test_deny_access_nfs_share_not_exist(self): share = fake_share.fake_share(share_proto='nfs', host="fake_host@fake_backend#fake_pool") access = { 'access_type': 'ip', 'access_to': '172.0.0.1', 'access_level': 'rw', } mock_gafns = self.mock_object(rest_helper.RestHelper, '_get_access_from_nfs_share', mock.Mock(return_value=None)) result = self.driver.helper._deny_access(share, access) self.assertIsNone(result) expect_share_path = self.driver.helper._generate_share_path( 'manila_fakeid') mock_gafns.assert_called_once_with(expect_share_path, access['access_to']) def test_deny_access_cifs_type_fail(self): share = fake_share.fake_share(share_proto='cifs', host="fake_host@fake_backend#fake_pool") access = { 'access_type': 'fake_type', 'access_to': 'fake_user', 'access_level': 'rw', } result = self.driver.helper._deny_access(share, access) self.assertIsNone(result) def test_deny_access_cifs_share_not_exist(self): share = fake_share.fake_share(share_proto='cifs', host="fake_host@fake_backend#fake_pool") access = { 'access_type': 'user', 'access_to': 'fake_user', 'access_level': 'rw', } mock_gafcs = self.mock_object(rest_helper.RestHelper, '_get_access_from_cifs_share', mock.Mock(return_value=None)) result = self.driver.helper._deny_access(share, access) self.assertIsNone(result) expect_share_path = self.driver.helper._generate_share_path( 'manila_fakeid') mock_gafcs.assert_called_once_with(expect_share_path, access['access_to']) def test_update_access_add_delete(self): share = fake_share.fake_share(share_proto='nfs', host="fake_host@fake_backend#fake_pool") add_rules = [{'access_type': 'ip', 'access_to': '172.0.2.1', 'access_level': 'rw', }] delete_rules = [{'access_type': 'ip', 'access_to': '172.0.2.2', 'access_level': 'rw', }] self.mock_object(macrosan_helper.MacrosanHelper, '_allow_access') self.mock_object(macrosan_helper.MacrosanHelper, '_deny_access') self.driver.update_access(self._context, share, None, add_rules, delete_rules, None) @ddt.data('nfs', 'cifs') def test_update_access_nfs(self, proto): share = fake_share.fake_share(share_proto=proto, host="fake_host@fake_backend#fake_pool") if proto == 'nfs': access_rules = [{'access_type': 'ip', 'access_to': '172.0.3.1', 'access_level': 'rw', }, {'access_type': 'ip', 'access_to': '172.0.3.2', 'access_level': 'rw', }] else: access_rules = [{'access_type': 'user', 'access_to': 'user_l', 'access_level': 'rw', }, {'access_type': 'user', 'access_to': 'user_a', 'access_level': 'rw', }] mock_ca = self.mock_object(macrosan_helper.MacrosanHelper, '_clear_access') self.mock_object(macrosan_helper.MacrosanHelper, '_allow_access') self.driver.update_access(self._context, share, access_rules, {}, {}, {}) mock_ca.assert_called_once_with(share, None) def test_update_access_fail(self): share = fake_share.fake_share(share_proto='nfs', host="fake_host@fake_backend#fake_pool") access_rules = [{'access_id': 'fakeid', 'access_type': 'ip', 'access_to': '172.0.3.1', 'access_level': 'rw', }] mock_ca = self.mock_object(macrosan_helper.MacrosanHelper, '_clear_access') self.mock_object(macrosan_helper.MacrosanHelper, '_allow_access', mock.Mock(side_effect=exception.InvalidShareAccess( reason='fake_exception'))) result = self.driver.update_access(self._context, share, access_rules, None, None, None) expect = { 'fakeid': { 'state': 'error', } } self.assertEqual(result, expect) mock_ca.assert_called_once_with(share, None) def test_update_access_add_fail(self): share = fake_share.fake_share(share_proto='nfs', host="fake_host@fake_backend#fake_pool") add_rules = [{'access_id': 'fakeid', 'access_type': 'ip', 'access_to': '172.0.2.1', 'access_level': 'rw', }] delete_rules = [] self.mock_object(macrosan_helper.MacrosanHelper, '_allow_access', mock.Mock(side_effect=exception.InvalidShareAccess( reason='fake_exception'))) self.mock_object(macrosan_helper.MacrosanHelper, '_deny_access') result = self.driver.update_access(self._context, share, None, add_rules, delete_rules, None) expect = { 'fakeid': { 'state': 'error' } } self.assertEqual(result, expect) @ddt.data('nfs', 'cifs') def test__clear_access(self, share_proto): share = fake_share.fake_share(share_proto=share_proto, host="fake_host@fake_backend#fake_pool") fake_nfs_share_backend = [ { 'share_path': 'fake_path', 'access_to': '172.0.0.1', 'access_level': 'rw' }, { 'share_path': 'default_path', 'access_to': '172.0.0.2', 'access_level': 'rw' }] fake_cifs_share_backend = [ { 'share_path': 'fake_path', 'access_to': 'user_name', 'ugType': '0', 'access_level': 'rw' }, { 'share_path': 'default_path', 'access_to': 'manilanobody', 'ugType': '0', 'access_level': 'rw' }] mock_ganar = self.mock_object( rest_helper.RestHelper, '_get_all_nfs_access_rest', mock.Mock(return_value=fake_nfs_share_backend)) mock_gacar = self.mock_object( rest_helper.RestHelper, '_get_all_cifs_access_rest', mock.Mock(return_value=fake_cifs_share_backend)) self.mock_object(rest_helper.RestHelper, '_delete_nfs_access_rest') self.mock_object(rest_helper.RestHelper, '_delete_cifs_access_rest') self.driver.helper._clear_access(share) expect_share_path = self.driver.helper._generate_share_path( 'manila_fakeid') if share_proto == 'nfs': mock_ganar.assert_called_once_with(expect_share_path) else: mock_gacar.assert_called_once_with(expect_share_path) @ddt.data('nfs', 'cifs') def test__clear_access_no_access_list(self, share_proto): share = fake_share.fake_share(share_proto=share_proto, host="fake_host@fake_backend#fake_pool") mock_ganar = self.mock_object( rest_helper.RestHelper, '_get_all_nfs_access_rest', mock.Mock(return_value=[])) mock_gacar = self.mock_object( rest_helper.RestHelper, '_get_all_cifs_access_rest', mock.Mock(return_value=[])) self.driver.helper._clear_access(share) expect_share_path = self.driver.helper._generate_share_path( 'manila_fakeid') if share_proto == 'nfs': mock_ganar.assert_called_once_with(expect_share_path) else: mock_gacar.assert_called_once_with(expect_share_path) @ddt.data(constants.USER_NOT_EXIST, constants.USER_EXIST, constants.USER_FORMAT_ERROR) def test__ensure_user(self, query_result): mock_qu = self.mock_object(rest_helper.RestHelper, '_query_user', mock.Mock(return_value=query_result)) mock_qg = self.mock_object( rest_helper.RestHelper, '_query_group', mock.Mock(return_value=constants.GROUP_NOT_EXIST)) mock_alg = self.mock_object(rest_helper.RestHelper, '_add_localgroup') mock_alu = self.mock_object(rest_helper.RestHelper, '_add_localuser') result = self.driver.helper._ensure_user('fake_user', 'fake_passwd', 'fake_group') if query_result == constants.USER_NOT_EXIST: mock_qg.assert_called_once_with('fake_group') mock_alg.assert_called_once_with('fake_group') mock_alu.assert_called_once_with('fake_user', 'fake_passwd', 'fake_group') self.assertTrue(result) elif query_result == constants.USER_EXIST: self.assertTrue(result) else: self.assertFalse(result) mock_qu.assert_called_once_with('fake_user') def test__ensure_user_fail(self): mock_qu = self.mock_object( rest_helper.RestHelper, '_query_user', mock.Mock(return_value=constants.USER_NOT_EXIST)) mock_qg = self.mock_object( rest_helper.RestHelper, '_query_group', mock.Mock(return_value=constants.GROUP_FORMAT_ERROR)) self.assertRaises(exception.InvalidInput, self.driver.helper._ensure_user, 'fake_user', 'fake_passwd', 'fake_group') mock_qu.assert_called_once_with('fake_user') mock_qg.assert_called_once_with('fake_group') def test__update_share_stats(self): self.driver.helper.pools = ['fake_pool'] mock_gap = self.mock_object(rest_helper.RestHelper, '_get_all_pool', mock.Mock(return_value='fake_result')) mock_gpc = self.mock_object(macrosan_helper.MacrosanHelper, '_get_pool_capacity', mock.Mock(return_value={ "totalcapacity": 10, "freecapacity": 9, "allocatedcapacity": 1, })) mock_uss = self.mock_object(driver.ShareDriver, '_update_share_stats') self.driver._update_share_stats() data = {} data['vendor_name'] = self.driver.VENDOR data['driver_version'] = self.driver.VERSION data['storage_protocol'] = self.driver.PROTOCOL data['share_backend_name'] = 'fake_share_backend_name' data['pools'] = [{ 'pool_name': 'fake_pool', 'total_capacity_gb': 10, 'free_capacity_gb': 9, 'allocated_capacity_gb': 1, 'reserved_percentage': 0, 'reserved_snapshot_percentage': 0, 'reserved_share_extend_percentage': 0, 'dedupe': False, 'compression': False, 'qos': False, 'thin_provisioning': False, 'snapshot_support': False, 'create_share_from_snapshot_support': False, }] mock_gap.assert_called_once() mock_gpc.assert_called_once_with('fake_pool', 'fake_result') mock_uss.assert_called_once_with(data) def test__update_share_stats_pool_not_exist(self): self.driver.helper.pools = ['fake_pool'] self.mock_object(rest_helper.RestHelper, '_get_all_pool', mock.Mock(return_value='fake_result')) self.mock_object(macrosan_helper.MacrosanHelper, '_get_pool_capacity', mock.Mock(return_value={})) self.assertRaises(exception.InvalidInput, self.driver._update_share_stats ) def test__get_pool_capacity(self): self.mock_object(macrosan_helper.MacrosanHelper, '_find_pool_info', mock.Mock(return_value={ "name": "fake_pool", "totalcapacity": "100.0G", "allocatedcapacity": "22G", "freecapacity": "78G", "health": "ONLINE", "rw": "off", })) res = self.driver.helper._get_pool_capacity("fake_pool", "fake_result") self.assertEqual(100, res['totalcapacity']) self.assertEqual(78, res['freecapacity']) self.assertEqual(22, res['allocatedcapacity']) def test__generate_share_name(self): share = fake_share.fake_share(host="fake_host@fake_backend#fake_pool") result = self.driver.helper._generate_share_name(share) self.assertEqual("manila_fakeid", result) def test__format_name(self): a = 'fake-1234567890-1234567890-1234567890' expect = 'fake_1234567890_1234567890_1234' result = self.driver.helper._format_name(a) self.assertEqual(expect, result) def test__generate_share_path(self): share_name = 'manila_fakeid' result = self.driver.helper._generate_share_path(share_name) self.assertEqual(r'/manila_fakeid/manila_fakeid', result) @ddt.data('nfs', 'cifs') def test__get_location_path(self, share_proto): self.driver.helper.configuration.macrosan_nas_ip = "172.0.0.1" result = self.driver.helper._get_location_path('fake_path', 'fake_name', share_proto) if share_proto == 'nfs': expect = r'172.0.0.1:fake_path' elif share_proto == 'cifs': expect = r'\\172.0.0.1\fake_name' self.assertEqual(expect, result) def test__get_share_instance_pnp_pool_error(self): share = fake_share.fake_share( share_proto="nfs", host="fake_host@fake_backend") self.assertRaises(exception.InvalidHost, self.driver.helper._get_share_instance_pnp, share) def test__get_share_instance_pnp_proto_error(self): share = fake_share.fake_share( share_proto="CephFS", host="fake_host@fake_backend#fake_pool") self.assertRaises(exception.MacrosanBackendExeption, self.driver.helper._get_share_instance_pnp, share) @ddt.data('2000000000', '2000000KB', '2000MB', '20GB', '2TB') def test__unit_convert_toGB(self, capacity): convert = {'2000000000': '%.0f' % (float(2000000000) / 1024 ** 3), '2000000KB': '%.0f' % (float(2000000) / 1024 ** 2), '2000MB': '%.0f' % (float(2000) / 1024), '20GB': '%.0f' % float(20), '2TB': '%.0f' % (float(2) * 1024)} expect = float(convert[capacity]) result = self.driver.helper._unit_convert_toGB(capacity) self.assertEqual(expect, result) @ddt.data('nfs', 'cifs') def test__get_share(self, proto): proto = proto.upper() mock_gns = self.mock_object(rest_helper.RestHelper, '_get_nfs_share', mock.Mock(return_value={ "path": "/manila_fakeid", "clients": ["client"], "protocol": "NFS" })) mock_gcs = self.mock_object(rest_helper.RestHelper, '_get_cifs_share', mock.Mock(return_value={ "path": "fake_path", "cifsname": "fake_cifsname", "protocol": "CIFS", "roList": ["fake_ro"], "rwList": ["fake_rw"], "allowList": ["fake_allow"], "denyList": ["fake_deny"], })) expect_nfs = { "path": "/manila_fakeid", "clients": ["client"], "protocol": "NFS"} expect_cifs = { "path": "fake_path", "cifsname": "fake_cifsname", "protocol": "CIFS", "roList": ["fake_ro"], "rwList": ["fake_rw"], "allowList": ["fake_allow"], "denyList": ["fake_deny"]} result = self.driver.helper._get_share('fake_path', proto) if proto == 'NFS': mock_gns.assert_called_once_with('fake_path') self.assertEqual(expect_nfs, result) elif proto == 'CIFS': mock_gcs.assert_called_once_with('fake_path') self.assertEqual(expect_cifs, result) def test__find_pool_info(self): pool_info = self.driver.helper._find_pool_info( 'fake_pool', self.result_success_storage_pools) self.assertIsNotNone(pool_info) def test__find_pool_info_fail(self): pool_info = self.driver.helper._find_pool_info( 'error_pool', self.result_success_storage_pools) expect = {} self.assertEqual(expect, pool_info) @ddt.ddt class RestHelperTestCase(test.TestCase): def setUp(self): self.mock_object(CONF, '_check_required_opts') super(RestHelperTestCase, self).setUp() def _safe_get(opt): return getattr(self.configuration, opt) self.configuration = mock.Mock(spec=configuration.Configuration) self.configuration.safe_get = mock.Mock(side_effect=_safe_get) self.configuration.macrosan_nas_http_protocol = 'https' self.configuration.macrosan_nas_ip = 'fake_ip' self.configuration.macrosan_nas_port = 'fake_port' self.configuration.macrosan_nas_prefix = 'nas' self.configuration.macrosan_nas_username = 'fake_username' self.configuration.macrosan_nas_password = 'fake_password' self.configuration.macrosan_timeout = 60 self.configuration.macrosan_ssl_cert_verify = False self.resthelper = rest_helper.RestHelper( configuration=self.configuration) self.post = 'POST' self.get = 'GET' self.delete = 'DELETE' self.put = 'PUT' self.fake_message = 'fake_message' self.result_success = { 'code': 0, 'message': 'success', 'data': 'fake_data' } self.result_success_return_0 = { 'code': 0, 'message': 'success', 'data': '0' } self.result_success_return_1 = { 'code': 0, 'message': 'success', 'data': '1' } self.result_failed = { 'code': 1, 'message': 'failed', 'data': 'fake_data' } self.result_failed_not_exist = { 'code': constants.CODE_SOURCE_NOT_EXIST, 'message': 'failed', 'data': '', } self.result_success_storage_pools = { 'code': 0, 'message': 'success', 'data': [{ 'name': 'fake_pool', 'size': '1000.0G', 'allocated': '100G', 'free': '900G', 'health': 'ONLINE', 'rwStatus': 'off' }] } @ddt.data( {'url': 'fake_url', 'data': {'fake_data': 'fake_value'}, 'method': 'POST'}, {'url': 'fake_url', 'data': None, 'method': 'GET'}, {'url': 'fake_url', 'data': {'fake_data': 'fake_value'}, 'method': 'DELETE'}, {'url': 'fake_url', 'data': {'fake_data': 'fake_value'}, 'method': 'PUT'}, ) @ddt.unpack def test_call(self, url, data, method): self.resthelper._token = 'fake_token' request_method = method.lower() fake_response = FakeResponse(200, self.result_success) mock_request = self.mock_object(requests, request_method, mock.Mock(return_value=fake_response)) self.resthelper.call(url, data, method) expected_url = ('https://%(ip)s:%(port)s/%(rest)s/%(url)s' % {'ip': 'fake_ip', 'port': 'fake_port', 'rest': 'nas', 'url': 'fake_url'}) header = {'Authorization': 'fake_token'} mock_request.assert_called_once_with( expected_url, data=data, headers=header, timeout=self.configuration.macrosan_timeout, verify=False) def test_call_method_fail(self): self.resthelper._token = 'fake_token' self.assertRaises(exception.ShareBackendException, self.resthelper.call, 'fake_url', 'fake_data', 'error_method') def test_call_token_fail(self): self.resthelper._token = 'fake_token' fake_result_fail = { 'code': 302, 'message': 'fake_message', 'data': 'fake_data' } self.mock_object(self.resthelper, 'do_request', mock.Mock(return_value=fake_result_fail)) self.assertRaises(exception.MacrosanBackendExeption, self.resthelper.call, 'fake_url', 'fake_data', self.post) def test_call_token_none(self): self.resthelper._token = None self.mock_object(self.resthelper, 'do_request', mock.Mock(return_value=self.result_success)) mock_l = self.mock_object(self.resthelper, 'login', mock.Mock(return_value='fake_token')) self.resthelper.call('fake_url', 'fake_data', self.post) mock_l.assert_called_once() def test_call_token_expired(self): self.resthelper._token = 'fake_token' fake_result = { 'code': 301, 'message': 'token expired', 'data': 'fake_data' } self.mock_object( self.resthelper, 'do_request', mock.Mock(side_effect=[fake_result, self.result_success])) mock_l = self.mock_object(self.resthelper, 'login', mock.Mock(return_value='fake_token')) self.resthelper.call('fake_url', 'fake_data', self.post) mock_l.assert_called_once() def test_call_fail(self): self.resthelper._token = 'fake_token' fake_response = FakeResponse(302, self.result_success) self.mock_object(requests, 'post', mock.Mock(return_value=fake_response)) self.assertRaises(exception.NetworkException, self.resthelper.call, 'fake_url', 'fake_data', self.post) def test_login(self): fake_result = { 'code': 0, 'message': 'Login success', 'data': 'fake_token' } mock_rd = self.mock_object(self.resthelper, 'do_request', mock.Mock(return_value=fake_result)) self.resthelper.login() login_data = {'userName': self.configuration.macrosan_nas_username, 'userPasswd': self.configuration.macrosan_nas_password} mock_rd.assert_called_once_with('rest/token', login_data, self.post) self.assertEqual('fake_token', self.resthelper._token) def test_login_fail(self): mock_rd = self.mock_object(self.resthelper, 'do_request', mock.Mock(return_value=self.result_failed)) self.assertRaises(exception.ShareBackendException, self.resthelper.login) login_data = {'userName': self.configuration.macrosan_nas_username, 'userPasswd': self.configuration.macrosan_nas_password} mock_rd.assert_called_once_with('rest/token', login_data, self.post) def test__assert_result_code(self): self.resthelper._assert_result_code(self.result_success, self.fake_message) def test__assert_result_code_fail(self): self.assertRaises(exception.ShareBackendException, self.resthelper._assert_result_code, self.result_failed, self.fake_message) def test__assert_result_data(self): self.resthelper._assert_result_data(self.result_success, self.fake_message) def test__assert_result_data_fail(self): fake_result = { 'code': 0, 'message': 'fake_message' } self.assertRaises(exception.ShareBackendException, self.resthelper._assert_result_data, fake_result, self.fake_message) def test__create_nfs_share(self): mock_call = self.mock_object(self.resthelper, 'call') self.mock_object(self.resthelper, '_assert_result_code') self.resthelper._create_nfs_share('fake_path') url = 'rest/nfsShare' data = { 'path': 'fake_path', 'authority': 'ro', 'accessClient': '192.0.2.0', } mock_call.assert_called_once_with(url, data, self.post) def test__get_nfs_share(self): fake_result = { 'code': 0, 'message': 'success', 'data': { "path": "fake_path", "clients": ["client"], "protocol": "fake_protocol" } } mock_call = self.mock_object(self.resthelper, 'call', mock.Mock(return_value=fake_result)) self.mock_object(self.resthelper, '_assert_result_code') result = self.resthelper._get_nfs_share('fake_path') expect = { "path": "fake_path", "clients": ["client"], "protocol": "fake_protocol" } self.assertEqual(expect, result) url = 'rest/nfsShare?path=fake_path' mock_call.assert_called_once_with(url, None, self.get) def test__delete_nfs_share(self): mock_call = self.mock_object(self.resthelper, 'call') self.mock_object(self.resthelper, '_assert_result_code') self.resthelper._delete_nfs_share('fake_path') url = 'rest/nfsShare?path=fake_path' mock_call.assert_called_once_with(url, None, self.delete) def test__create_cifs_share(self): mock_call = self.mock_object(self.resthelper, 'call') self.mock_object(self.resthelper, '_assert_result_code') self.resthelper._create_cifs_share('fake_name', 'fake_path', ['fake_user'], ['0']) url = 'rest/cifsShare' data = { 'path': 'fake_path', 'cifsName': 'fake_name', 'cifsDescription': '', 'RoList': [], 'RoListType': [], 'RwList': ['fake_user'], 'RwListType': ['0'], 'allowList': [], 'denyList': [], } mock_call.assert_called_once_with(url, data, self.post) def test__get_cifs_share(self): fake_result = { 'code': 0, 'message': 'success', 'data': { "path": "fake_path", "cifsname": "fake_cifsname", "protocol": "fake_protocol", "roList": ["fake_ro"], "rwList": ["fake_rw"], "allowList": ["fake_allow"], "denyList": ["fake_deny"] } } mock_call = self.mock_object(self.resthelper, 'call', mock.Mock(return_value=fake_result)) self.mock_object(self.resthelper, '_assert_result_code') result = self.resthelper._get_cifs_share('fake_path') expect = { "path": "fake_path", "cifsname": "fake_cifsname", "protocol": "fake_protocol", "roList": ["fake_ro"], "rwList": ["fake_rw"], "allowList": ["fake_allow"], "denyList": ["fake_deny"] } self.assertEqual(expect, result) url = 'rest/cifsShare?path=fake_path' mock_call.assert_called_once_with(url, None, self.get) def test__delete_cifs_share(self): mock_call = self.mock_object(self.resthelper, 'call') self.mock_object(self.resthelper, '_assert_result_code') self.resthelper._delete_cifs_share('fake_name', 'fake_path') url = 'rest/cifsShare?path=fake_path&cifsName=fake_name' mock_call.assert_called_once_with(url, None, self.delete) def test__update_share_size(self): mock_call = self.mock_object(self.resthelper, 'call') self.mock_object(self.resthelper, '_assert_result_code') self.resthelper._update_share_size('fake_filesystem', '2GB') url = 'rest/filesystem/fake_filesystem' data = { 'capacity': '2GB', } mock_call.assert_called_once_with(url, data, self.put) def test___create_filesystem(self): mock_call = self.mock_object(self.resthelper, 'call') self.mock_object(self.resthelper, '_assert_result_code') self.resthelper._create_filesystem('fake_filesystem', 'fake_pool', '1GB') url = 'rest/filesystem' data = { 'fsName': 'fake_filesystem', 'poolName': 'fake_pool', 'createType': '0', 'fileSystemQuota': '1GB', 'fileSystemReserve': '1GB', 'wormStatus': 0, 'defaultTimeStatus': 0, 'defaultTimeNum': 0, 'defaultTimeUnit': 'year', 'isAutoLock': 0, 'isAutoDelete': 0, 'lockTime': 0 } mock_call.assert_called_once_with(url, data, self.post) def test__delete_filesystem(self): mock_call = self.mock_object(self.resthelper, 'call') self.mock_object(self.resthelper, '_assert_result_code') self.resthelper._delete_filesystem('fake_filesystem') url = 'rest/filesystem/fake_filesystem' mock_call.assert_called_once_with(url, None, self.delete) def test__get_filesystem(self): fake_result = { 'code': 0, 'message': 'success', 'data': { 'name': 'fake_filesystem', 'poolName': 'fake_pool', } } mock_call = self.mock_object(self.resthelper, 'call', mock.Mock(return_value=fake_result)) self.mock_object(self.resthelper, '_assert_result_code') result = self.resthelper._get_filesystem('fake_filesystem') expect = { 'name': 'fake_filesystem', 'poolName': 'fake_pool', } self.assertEqual(expect, result) url = 'rest/filesystem/fake_filesystem' mock_call.assert_called_once_with(url, None, self.get) def test__create_filesystem_dir(self): mock_call = self.mock_object(self.resthelper, 'call') self.mock_object(self.resthelper, '_assert_result_code') self.resthelper._create_filesystem_dir('/fake_path/fake_dir') url = 'rest/fileDir' data = { 'path': '/fake_path', 'dirName': 'fake_dir', } mock_call.assert_called_once_with(url, data, self.post) def test__delete_filesystem_dir(self): mock_call = self.mock_object(self.resthelper, 'call') self.mock_object(self.resthelper, '_assert_result_code') self.resthelper._delete_filesystem_dir('/fake_path/fake_dir') url = 'rest/fileDir?path=/fake_path&dirName=fake_dir' mock_call.assert_called_once_with(url, None, self.delete) @ddt.data('nfs', 'cifs') def test__allow_access_rest(self, share_proto): share_proto = share_proto.upper() mock_anar = self.mock_object(self.resthelper, '_allow_nfs_access_rest') mock_acar = self.mock_object(self.resthelper, '_allow_cifs_access_rest') self.resthelper._allow_access_rest('fake_path', 'fake_access', 'rw', share_proto) if share_proto == 'NFS': mock_anar.assert_called_once_with('fake_path', 'fake_access', 'rw') elif share_proto == 'CIFS': mock_acar.assert_called_once_with('fake_path', 'fake_access', 'rw') def test__allow_access_rest_proto_error(self): self.assertRaises(exception.InvalidInput, self.resthelper._allow_access_rest, 'fake_path', 'fake_access', 'rw', 'error_proto') def test__allow_nfs_access_rest(self): mock_call = self.mock_object( self.resthelper, 'call', mock.Mock(return_value=self.result_success)) self.mock_object(self.resthelper, '_assert_result_code') self.resthelper._allow_nfs_access_rest('fake_path', '172.0.0.1', 'rw') url = 'rest/nfsShareClient' data = { 'path': 'fake_path', 'client': '172.0.0.1', 'authority': 'rw', } mock_call.assert_called_once_with(url, data, self.post) @ddt.data( {'access_to': 'fake_user', 'group': False}, {'access_to': 'fake_group', 'group': True}, {'access_to': '/fake_user', 'group': False}, {'access_to': '/fake_group', 'group': True} ) @ddt.unpack def test__allow_cifs_access_rest(self, access_to, group): ug_type_list = { 'localUser': '0', 'localGroup': '1', 'adUser': '2', 'adGroup': '3', } if not group: mock_call = self.mock_object( self.resthelper, 'call', mock.Mock(return_value=self.result_success)) else: mock_call = self.mock_object( self.resthelper, 'call', mock.Mock(side_effect=[self.result_failed_not_exist, self.result_success])) self.mock_object(self.resthelper, '_assert_result_code') self.resthelper._allow_cifs_access_rest('fake_path', access_to, 'rw') url = 'rest/cifsShareClient' actual_type = ug_type_list["localUser"] if '/' not in access_to: if not group: actual_type = ug_type_list["localUser"] access_to = access_to else: if not group: actual_type = ug_type_list["adUser"] access_to = access_to[access_to.index('/') + 1:] data = { 'path': 'fake_path', 'right': 'rw', 'ugName': access_to, 'ugType': actual_type, } if not group: mock_call.assert_called_once_with(url, data, self.post) else: mock_call.assert_called() def test__allow_cifs_access_rest_fail(self): mock_call = self.mock_object( self.resthelper, 'call', mock.Mock(side_effect=[self.result_failed_not_exist, self.result_failed_not_exist])) self.assertRaises(exception.InvalidShare, self.resthelper._allow_cifs_access_rest, 'fake_path', 'fake_user', 'rw') mock_call.assert_called() def test__get_access_from_nfs_share(self): fake_result = { 'code': 0, 'message': 'success', 'data': { "path": "fake_path", "clientName": "fake_client", "accessRight": "rw", } } mock_call = self.mock_object(self.resthelper, 'call', mock.Mock(return_value=fake_result)) self.mock_object(self.resthelper, '_assert_result_code') result = self.resthelper._get_access_from_nfs_share('fake_path', 'fake_client') expect = { "path": "fake_path", "clientName": "fake_client", "accessRight": "rw", } self.assertEqual(expect, result) url = 'rest/nfsShareClient?path=fake_path&client=fake_client' mock_call.assert_called_once_with(url, None, self.get) @ddt.data({'access_to': 'fake_user', 'ug_type': '0', 'code': 0, 'group': False}, {'access_to': 'fake_user', 'ug_type': None, 'code': 0, 'group': False}, {'access_to': 'fake_group', 'ug_type': None, 'code': 0, 'group': True}, {'access_to': 'fake_user', 'ug_type': None, 'code': 4, 'group': False}, {'access_to': '/fake_user', 'ug_type': None, 'code': 0, 'group': False}, {'access_to': '/fake_group', 'ug_type': None, 'code': 0, 'group': True}, {'access_to': '/fake_user', 'ug_type': None, 'code': 4, 'group': False}) @ddt.unpack def test__get_access_from_cifs_share(self, access_to, ug_type, code, group): fake_result_failed = { 'code': code, 'message': 'failed', 'data': {} } fake_result = { 'code': code, 'message': 'success', 'data': { 'path': 'fake_path', 'ugName': 'fake_user', 'ugType': '0', 'accessRight': 'rw' } } fake_result_group = { 'code': code, 'message': 'success', 'data': { 'path': 'fake_path', 'ugName': 'fake_group', 'ugType': '1', 'accessRight': 'rw' } } if code == 4: fake_result = fake_result_failed ug_type_list = { 'localUser': '0', 'localGroup': '1', 'adUser': '2', 'adGroup': '3', } expect = { 'path': 'fake_path', 'ugName': 'fake_user', 'ugType': '0', 'accessRight': 'rw' } expect_group = { 'path': 'fake_path', 'ugName': 'fake_group', 'ugType': '1', 'accessRight': 'rw' } if '/' in access_to: expect['ugType'] = '2' expect_group['ugType'] = '3' fake_result['data']['ugType'] = '2' fake_result_group['data']['ugType'] = '3' if ug_type is not None: mock_call = self.mock_object(self.resthelper, 'call', mock.Mock(return_value=fake_result)) else: if not group: mock_call = self.mock_object( self.resthelper, 'call', mock.Mock(return_value=fake_result)) else: mock_call = self.mock_object( self.resthelper, 'call', mock.Mock(side_effect=[fake_result_failed, fake_result_group])) self.mock_object(self.resthelper, '_assert_result_code') result = self.resthelper._get_access_from_cifs_share('fake_path', access_to, ug_type) if ug_type: self.assertEqual(expect, result) url = f'rest/cifsShareClient?path=fake_path&' \ f'ugName={access_to}&ugType={ug_type}' mock_call.assert_called_once_with(url, None, self.get) else: if '/' not in access_to: if not group: actual_type = ug_type_list["localUser"] actual_access = access_to else: if not group: actual_type = ug_type_list["adUser"] actual_access = access_to[access_to.index('/') + 1:] if code == 4: self.assertIsNone(result) else: if not group: self.assertEqual(expect, result) url = f'rest/cifsShareClient?path=fake_path&' \ f'ugName={actual_access}&' \ f'ugType={actual_type}' mock_call.assert_called_once_with(url, None, self.get) else: self.assertEqual(expect_group, result) mock_call.assert_called() def test__get_all_nfs_access_rest(self): fake_result = { 'code': 0, 'message': 'success', 'data': [ { 'path': 'fake_path', 'clientName': '172.0.0.1', 'accessRight': 'rw' }, { 'path': 'default_path', 'clientName': '172.0.0.2', 'accessRight': 'rw' }] } mock_call = self.mock_object(self.resthelper, 'call', mock.Mock(return_value=fake_result)) self.mock_object(self.resthelper, '_assert_result_code') result = self.resthelper._get_all_nfs_access_rest( '/manila_fakeid/manila_fakeid') expect = [ { 'share_path': 'fake_path', 'access_to': '172.0.0.1', 'access_level': 'rw' }, { 'share_path': 'default_path', 'access_to': '172.0.0.2', 'access_level': 'rw' }] self.assertEqual(expect, result) url = 'rest/allNfsShareClient?path=/manila_fakeid/manila_fakeid' mock_call.assert_called_once_with(url, None, self.get) def test__get_all_cifs_access_rest(self): fake_result = { 'code': 0, 'message': 'success', 'data': [ { 'path': 'fake_path', 'ugName': 'user_name', 'ugType': '0', 'accessRight': 'rw' }, { 'path': 'default_path', 'ugName': 'manilanobody', 'ugType': '0', 'accessRight': 'rw' }] } mock_call = self.mock_object(self.resthelper, 'call', mock.Mock(return_value=fake_result)) self.mock_object(self.resthelper, '_assert_result_code') result = self.resthelper._get_all_cifs_access_rest( '/manila_fakeid/manila_fakeid') expect = [ { 'share_path': 'fake_path', 'access_to': 'user_name', 'ugType': '0', 'access_level': 'rw' }, { 'share_path': 'default_path', 'access_to': 'manilanobody', 'ugType': '0', 'access_level': 'rw' }] self.assertEqual(expect, result) url = 'rest/allCifsShareClient?path=/manila_fakeid/manila_fakeid' mock_call.assert_called_once_with(url, None, self.get) def test__change_nfs_access_rest(self): mock_call = self.mock_object(self.resthelper, 'call') self.mock_object(self.resthelper, '_assert_result_code') self.resthelper._change_nfs_access_rest( '/manila_fakeid/manila_fakeid', '172.0.0.1', 'rw') url = 'rest/nfsShareClient' data = { 'path': '/manila_fakeid/manila_fakeid', 'oldNfsClientName': '172.0.0.1', 'clientName': '', 'accessRight': 'rw', 'allSquash': '', 'rootSquash': '', 'secure': '', 'anonuid': '', 'anongid': '', } mock_call.assert_called_once_with(url, data, self.put) def test__change_cifs_access_rest(self): mock_call = self.mock_object(self.resthelper, 'call') self.mock_object(self.resthelper, '_assert_result_code') self.resthelper._change_cifs_access_rest( '/manila_fakeid/manila_fakeid', '/fake_user', 'rw', '0') url = 'rest/cifsShareClient' data = { 'path': '/manila_fakeid/manila_fakeid', 'right': 'rw', 'ugName': 'fake_user', 'ugType': '0', } mock_call.assert_called_once_with(url, data, self.put) def test__delete_nfs_access_rest(self): mock_call = self.mock_object(self.resthelper, 'call') self.mock_object(self.resthelper, '_assert_result_code') self.resthelper._delete_nfs_access_rest( '/manila_fakeid/manila_fakeid', '*') url = 'rest/nfsShareClient?path=/manila_fakeid/manila_fakeid&client=*' mock_call.assert_called_once_with(url, None, self.delete) def test__delete_cifs_access_rest(self): mock_call = self.mock_object(self.resthelper, 'call') self.mock_object(self.resthelper, '_assert_result_code') self.resthelper._delete_cifs_access_rest( '/manila_fakeid/manila_fakeid', 'fake_user', '0') url = 'rest/cifsShareClient?path=/manila_fakeid/manila_fakeid' \ '&ugName=fake_user&ugType=0' mock_call.assert_called_once_with(url, None, self.delete) def test__get_nfs_service_status(self): fake_result = { 'code': 0, 'message': 'success', 'data': { 'serviceStatus': constants.NFS_ENABLED, 'nfs3Status': constants.NFS_SUPPORTED, 'nfs4Status': constants.NFS_SUPPORTED } } mock_call = self.mock_object(self.resthelper, 'call', mock.Mock(return_value=fake_result)) self.mock_object(self.resthelper, '_assert_result_code') result = self.resthelper._get_nfs_service_status() expect = { 'serviceStatus': constants.NFS_ENABLED, 'nfs3Status': constants.NFS_SUPPORTED, 'nfs4Status': constants.NFS_SUPPORTED } self.assertEqual(expect, result) url = 'rest/nfsService' mock_call.assert_called_once_with(url, None, self.get) def test__start_nfs_service(self): mock_call = self.mock_object(self.resthelper, 'call') self.mock_object(self.resthelper, '_assert_result_code') self.resthelper._start_nfs_service() url = 'rest/nfsService' data = { "openStatus": "1", } mock_call.assert_called_once_with(url, data, self.put) def test__config_nfs_service(self): mock_call = self.mock_object(self.resthelper, 'call') self.mock_object(self.resthelper, '_assert_result_code') self.resthelper._config_nfs_service() url = 'rest/nfsConfig' data = { 'configNfs3': "yes", 'configNfs4': "yes", } mock_call.assert_called_once_with(url, data, self.put) def test__get_cifs_service_status(self): mock_call = self.mock_object( self.resthelper, 'call', mock.Mock(return_value=self.result_success_return_1)) self.mock_object(self.resthelper, '_assert_result_code') result = self.resthelper._get_cifs_service_status() self.assertEqual('1', result) url = 'rest/cifsService' mock_call.assert_called_once_with(url, None, self.get) def test__start_cifs_service(self): mock_call = self.mock_object(self.resthelper, 'call') self.mock_object(self.resthelper, '_assert_result_code') self.resthelper._start_cifs_service() url = 'rest/cifsService' data = { 'openStatus': '1', } mock_call.assert_called_once_with(url, data, self.put) def test__config_cifs_service(self): mock_call = self.mock_object(self.resthelper, 'call') self.mock_object(self.resthelper, '_assert_result_code') self.resthelper._config_cifs_service() url = 'rest/cifsConfig' data = { 'workName': 'manila', 'description': '', 'access_way': 'user', 'isCache': 'no', 'adsName': '', 'adsIP': '', 'adsUSER': '', 'adsPASSWD': '', 'allowList': [], 'denyList': [], } mock_call.assert_called_once_with(url, data, self.put) def test__get_all_pool(self): mock_call = self.mock_object( self.resthelper, 'call', mock.Mock(return_value=self.result_success_storage_pools)) self.mock_object(self.resthelper, '_assert_result_code') result = self.resthelper._get_all_pool() self.assertEqual(self.result_success_storage_pools, result) url = 'rest/storagepool' mock_call.assert_called_once_with(url, None, self.get) def test__query_user(self): mock_call = self.mock_object( self.resthelper, 'call', mock.Mock(return_value=self.result_success_return_0)) self.mock_object(self.resthelper, '_assert_result_code') result = self.resthelper._query_user('fake_user') self.assertEqual('0', result) url = 'rest/user/fake_user' mock_call.assert_called_once_with(url, None, self.get) def test__add_localuser(self): mock_call = self.mock_object(self.resthelper, 'call') self.mock_object(self.resthelper, '_assert_result_code') self.resthelper._add_localuser('fake_user', 'fake_passwd', 'fake_group') url = 'rest/localUser' data = { 'userName': 'fake_user', 'mgGroup': 'fake_group', 'userPasswd': 'fake_passwd', 'unusedGroup': []} mock_call.assert_called_once_with(url, data, self.post) def test__query_group(self): mock_call = self.mock_object( self.resthelper, 'call', mock.Mock(return_value=self.result_success_return_0)) self.mock_object(self.resthelper, '_assert_result_code') result = self.resthelper._query_group('fake_group') self.assertEqual('0', result) url = 'rest/group/fake_group' mock_call.assert_called_once_with(url, None, self.get) def test__add_localgroup(self): mock_call = self.mock_object(self.resthelper, 'call') self.mock_object(self.resthelper, '_assert_result_code') self.resthelper._add_localgroup('fake_group') url = 'rest/localGroup' data = {'groupName': 'fake_group'} mock_call.assert_called_once_with(url, data, self.post) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315602.0256703 manila-21.0.0/manila/tests/share/drivers/maprfs/0000775000175000017500000000000000000000000021531 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/maprfs/__init__.py0000664000175000017500000000000000000000000023630 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/maprfs/test_maprfs.py0000664000175000017500000011367600000000000024450 0ustar00zuulzuul00000000000000# Copyright (c) 2016, MapR Technologies # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Unit tests for MapRFS native protocol driver module.""" from unittest import mock from oslo_concurrency import processutils from oslo_config import cfg from manila import context from manila import exception import manila.share.configuration as config from manila.share.drivers.maprfs import driver_util as mapru import manila.share.drivers.maprfs.maprfs_native as maprfs from manila import ssh_utils from manila import test from manila.tests import fake_share from manila import utils CONF = cfg.CONF class MapRFSNativeShareDriverTestCase(test.TestCase): """Tests MapRFSNativeShareDriver.""" def setUp(self): super(MapRFSNativeShareDriverTestCase, self).setUp() self._context = context.get_admin_context() self._hdfs_execute = mock.Mock(return_value=('', '')) self.local_ip = '192.168.1.1' CONF.set_default('driver_handles_share_servers', False) CONF.set_default('maprfs_clinode_ip', [self.local_ip]) CONF.set_default('maprfs_ssh_name', 'fake_sshname') CONF.set_default('maprfs_ssh_pw', 'fake_sshpw') CONF.set_default('maprfs_ssh_private_key', 'fake_sshkey') CONF.set_default('maprfs_rename_managed_volume', True) self.fake_conf = config.Configuration(None) self.cluster_name = 'fake' export_locations = {0: {'path': '/share-0'}} export_locations[0]['el_metadata'] = { 'volume-name': 'share-0'} self.share = fake_share.fake_share(share_proto='MAPRFS', name='share-0', size=2, share_id=1, export_locations=export_locations, export_location='/share-0') self.snapshot = fake_share.fake_snapshot(share_proto='MAPRFS', name='fake', share_name=self.share['name'], share_id=self.share['id'], share=self.share, share_instance=self.share, provider_location='fake') self.access = fake_share.fake_access(access_type='user', access_to='fake', access_level='rw') self.snapshot = self.snapshot.values self.snapshot.update(share_instance=self.share) self.export_path = 'maprfs:///share-0 -C -Z -N fake' self.fakesnapshot_path = '/share-0/.snapshot/snapshot-0' self.hadoop_bin = '/usr/bin/hadoop' self.maprcli_bin = '/usr/bin/maprcli' self.mock_object(utils, 'execute') self.mock_object( mapru.socket, 'gethostname', mock.Mock(return_value='testserver')) self.mock_object( mapru.socket, 'gethostbyname_ex', mock.Mock(return_value=( 'localhost', ['localhost.localdomain', mapru.socket.gethostname.return_value], ['127.0.0.1', self.local_ip]))) self._driver = maprfs.MapRFSNativeShareDriver( configuration=self.fake_conf) self._driver.do_setup(self._context) self._driver.api.get_share_metadata = mock.Mock(return_value={}) self._driver.api.update_share_metadata = mock.Mock() def test_do_setup(self): self._driver.do_setup(self._context) self.assertIsNotNone(self._driver._maprfs_util) self.assertEqual([self.local_ip], self._driver._maprfs_util.hosts) def test_check_for_setup_error(self): self._driver._maprfs_util._execute = mock.Mock(return_value=('', 0)) self._driver._maprfs_util.check_state = mock.Mock(return_value=True) self._driver._maprfs_util.maprfs_ls = mock.Mock() self._driver.check_for_setup_error() def test_check_for_setup_error_exception_config(self): self._driver.configuration.maprfs_clinode_ip = None self.assertRaises(exception.MapRFSException, self._driver.check_for_setup_error) def test_check_for_setup_error_exception_no_dir(self): self._driver._maprfs_util.check_state = mock.Mock(return_value=True) self._driver._maprfs_util.maprfs_ls = mock.Mock( side_effect=exception.ProcessExecutionError) self.assertRaises(exception.MapRFSException, self._driver.check_for_setup_error) def test_check_for_setup_error_exception_cldb_state(self): self._driver._check_maprfs_state = mock.Mock(return_value=False) self.assertRaises(exception.MapRFSException, self._driver.check_for_setup_error) def test__check_maprfs_state_healthy(self): fake_out = """Found 8 items drwxr-xr-x - mapr mapr 0 2016-07-29 05:38 /apps""" self._driver._maprfs_util._execute = mock.Mock( return_value=(fake_out, '')) result = self._driver._check_maprfs_state() self._driver._maprfs_util._execute.assert_called_once_with( self.hadoop_bin, 'fs', '-ls', '/', check_exit_code=False) self.assertTrue(result) def test__check_maprfs_state_down(self): fake_out = "No CLDB" self._driver._maprfs_util._execute = mock.Mock( return_value=(fake_out, '')) result = self._driver._check_maprfs_state() self._driver._maprfs_util._execute.assert_called_once_with( self.hadoop_bin, 'fs', '-ls', '/', check_exit_code=False) self.assertFalse(result) def test__check_maprfs_state_exception(self): self._driver._maprfs_util._execute = mock.Mock( side_effect=exception.ProcessExecutionError) self.assertRaises(exception.MapRFSException, self._driver._check_maprfs_state) self._driver._maprfs_util._execute.assert_called_once_with( self.hadoop_bin, 'fs', '-ls', '/', check_exit_code=False) def test_create_share_unsupported_proto(self): self._driver.api.get_share_metadata = mock.Mock(return_value={}) self._driver._get_share_path = mock.Mock() self.assertRaises(exception.MapRFSException, self._driver.create_share, self._context, fake_share.fake_share(share_id=1), share_server=None) self.assertFalse(self._driver._get_share_path.called) def test_manage_existing(self): self._driver._maprfs_util.get_volume_info_by_path = mock.Mock( return_value={'quota': 1024, 'totalused': 966, 'volumename': 'fake'}) self._driver._maprfs_util._execute = mock.Mock() self._driver._maprfs_util.get_cluster_name = mock.Mock( return_value="fake") def test_manage_existing_no_rename(self): self._driver._maprfs_util.get_volume_info_by_path = mock.Mock( return_value={'quota': 1024, 'totalused': 966, 'volumename': 'fake'}) self._driver._maprfs_util._execute = mock.Mock() self._driver._maprfs_util.get_cluster_name = mock.Mock( return_value="fake") result = self._driver.manage_existing(self.share, {'rename': 'no'}) self.assertEqual(1, result['size']) def test_manage_existing_exception(self): self._driver._maprfs_util.get_volume_info_by_path = mock.Mock( side_effect=exception.ProcessExecutionError) self.assertRaises(exception.MapRFSException, self._driver.manage_existing, self.share, {}) def test_manage_existing_invalid_share(self): def fake_execute(self, *cmd, **kwargs): check_exit_code = kwargs.get('check_exit_code', True) if check_exit_code: raise exception.ProcessExecutionError else: return 'No such volume', 0 self._driver._maprfs_util._execute = fake_execute mock_execute = self._driver.manage_existing self.assertRaises(exception.ManageInvalidShare, mock_execute, self.share, {}) def test_manage_existing_snapshot(self): self._driver._maprfs_util.get_snapshot_list = mock.Mock( return_value=[self.snapshot['provider_location']]) self._driver._maprfs_util.maprfs_du = mock.Mock(return_value=11) update = self._driver.manage_existing_snapshot(self.snapshot, {}) self.assertEqual(1, update['size']) def test_manage_existing_snapshot_invalid(self): self._driver._maprfs_util.get_snapshot_list = mock.Mock( return_value=[]) mock_execute = self._driver.manage_existing_snapshot self.assertRaises(exception.ManageInvalidShareSnapshot, mock_execute, self.snapshot, {}) def test_manage_existing_snapshot_exception(self): self._driver._maprfs_util.get_snapshot_list = mock.Mock( side_effect=exception.ProcessExecutionError) mock_execute = self._driver.manage_existing_snapshot self.assertRaises(exception.MapRFSException, mock_execute, self.snapshot, {}) def test_manage_existing_with_no_quota(self): self._driver._maprfs_util.get_volume_info_by_path = mock.Mock( return_value={'quota': 0, 'totalused': 1999, 'volumename': 'fake'}) self._driver._maprfs_util.rename_volume = mock.Mock() self._driver._maprfs_util.get_cluster_name = mock.Mock( return_value="fake") result = self._driver.manage_existing(self.share, {}) self.assertEqual(2, result['size']) def test__set_volume_size(self): volume = self._driver._volume_name(self.share['name']) sizestr = str(self.share['size']) + 'G' self._driver._maprfs_util._execute = mock.Mock(return_value=('', 0)) self._driver._maprfs_util.set_volume_size(volume, self.share['size']) self._driver._maprfs_util._execute.assert_called_once_with( self.maprcli_bin, 'volume', 'modify', '-name', volume, '-quota', sizestr) def test_extend_share(self): volume = self._driver._volume_name(self.share['name']) self._driver._maprfs_util.set_volume_size = mock.Mock() self._driver.extend_share(self.share, self.share['size']) self._driver._maprfs_util.set_volume_size.assert_called_once_with( volume, self.share['size']) def test_extend_exception(self): self._driver._maprfs_util.set_volume_size = mock.Mock( side_effect=exception.ProcessExecutionError) self.assertRaises(exception.MapRFSException, self._driver.extend_share, self.share, self.share['size']) def test_shrink_share(self): volume = self._driver._volume_name(self.share['name']) self._driver._maprfs_util.set_volume_size = mock.Mock() self._driver._maprfs_util.get_volume_info = mock.Mock( return_value={'total_user': 0}) self._driver.shrink_share(self.share, self.share['size']) self._driver._maprfs_util.set_volume_size.assert_called_once_with( volume, self.share['size']) def test_update_access_add(self): aces = { 'volumeAces': { 'readAce': 'u:fake|fake:fake', 'writeAce': 'u:fake', } } volume = self._driver._volume_name(self.share['name']) self._driver._maprfs_util.get_volume_info = mock.Mock( return_value=aces) self._driver._maprfs_util.group_exists = mock.Mock(return_value=True) self._driver._maprfs_util._execute = mock.Mock(return_value=('', 0)) self._driver.update_access(self._context, self.share, [self.access], [self.access], [], []) self._driver._maprfs_util._execute.assert_any_call( self.maprcli_bin, 'volume', 'modify', '-name', volume, '-readAce', 'g:' + self.access['access_to'], '-writeAce', 'g:' + self.access['access_to']) def test_update_access_add_no_user_no_group_exists(self): aces = { 'volumeAces': { 'readAce': 'u:fake|fake:fake', 'writeAce': 'u:fake', } } volume = self._driver._volume_name(self.share['name']) self._driver._maprfs_util.get_volume_info = mock.Mock( return_value=aces) self._driver._maprfs_util.group_exists = mock.Mock(return_value=False) self._driver._maprfs_util.user_exists = mock.Mock(return_value=False) self._driver._maprfs_util._execute = mock.Mock(return_value=('', 0)) self._driver.update_access(self._context, self.share, [self.access], [self.access], [], []) self._driver._maprfs_util._execute.assert_any_call( self.maprcli_bin, 'volume', 'modify', '-name', volume, '-readAce', 'g:' + self.access['access_to'], '-writeAce', 'g:' + self.access['access_to']) def test_update_access_delete(self): aces = { 'volumeAces': { 'readAce': 'p', 'writeAce': 'p', } } volume = self._driver._volume_name(self.share['name']) self._driver._maprfs_util.get_volume_info = mock.Mock( return_value=aces) self._driver._maprfs_util.group_exists = mock.Mock(return_value=True) self._driver._maprfs_util._execute = mock.Mock(return_value=('', 0)) self._driver.update_access(self._context, self.share, [], [], [self.access], []) self._driver._maprfs_util._execute.assert_any_call( self.maprcli_bin, 'volume', 'modify', '-name', volume, '-readAce', '', '-writeAce', '') def test_update_access_recover(self): aces = { 'volumeAces': { 'readAce': 'u:fake', 'writeAce': 'u:fake', } } volume = self._driver._volume_name(self.share['name']) self._driver._maprfs_util.get_volume_info = mock.Mock( return_value=aces) self._driver._maprfs_util.group_exists = mock.Mock(return_value=False) self._driver._maprfs_util.user_exists = mock.Mock(return_value=True) self._driver._maprfs_util._execute = mock.Mock(return_value=('', 0)) self._driver.update_access(self._context, self.share, [self.access], [], [], []) self._driver._maprfs_util._execute.assert_any_call( self.maprcli_bin, 'volume', 'modify', '-name', volume, '-readAce', 'u:' + self.access['access_to'], '-writeAce', 'u:' + self.access['access_to']) def test_update_access_share_not_exists(self): self._driver._maprfs_util.volume_exists = mock.Mock( return_value=False) self._driver._maprfs_util.group_exists = mock.Mock(return_value=True) self._driver._maprfs_util._execute = mock.Mock(return_value=('', 0)) self._driver.update_access(self._context, self.share, [self.access], [], [], []) self._driver._maprfs_util._execute.assert_not_called() def test_update_access_exception(self): aces = { 'volumeAces': { 'readAce': 'p', 'writeAce': 'p', } } self._driver._maprfs_util.get_volume_info = mock.Mock( return_value=aces) self._driver._maprfs_util.group_exists = mock.Mock(return_value=True) utils.execute = mock.Mock( side_effect=exception.ProcessExecutionError(stdout='ERROR')) self.assertRaises(exception.MapRFSException, self._driver.update_access, self._context, self.share, [self.access], [], [], []) def test_update_access_invalid_access(self): access = fake_share.fake_access(access_type='ip', access_to='fake', access_level='rw') self.assertRaises(exception.InvalidShareAccess, self._driver.update_access, self._context, self.share, [access], [], [], []) def test_ensure_share(self): self._driver._maprfs_util.volume_exists = mock.Mock( return_value=True) self._driver._maprfs_util.get_volume_info = mock.Mock( return_value={'mountdir': self.share['export_location']}) self._driver._maprfs_util.get_cluster_name = mock.Mock( return_value=self.cluster_name) result = self._driver.ensure_share(self._context, self.share) self.assertEqual(self.export_path, result[0]['path']) def test_create_share(self): size_str = str(self.share['size']) + 'G' path = self._driver._share_dir(self.share['name']) self._driver.api.get_share_metadata = mock.Mock( return_value={'_fake': 'fake'}) self._driver._maprfs_util._execute = mock.Mock(return_value=('', 0)) self._driver._maprfs_util.set_volume_size = mock.Mock() self._driver._maprfs_util.maprfs_chmod = mock.Mock() self._driver._maprfs_util.get_cluster_name = mock.Mock( return_value=self.cluster_name) self._driver.create_share(self._context, self.share) self._driver._maprfs_util._execute.assert_called_once_with( self.maprcli_bin, 'volume', 'create', '-name', self.share['name'], '-path', path, '-quota', size_str, '-readAce', '', '-writeAce', '', '-fake', 'fake') self._driver._maprfs_util.maprfs_chmod.assert_called_once_with(path, '777') def test_create_share_with_custom_name(self): size_str = str(self.share['size']) + 'G' self._driver.api.get_share_metadata = mock.Mock( return_value={'_name': 'fake', '_path': 'fake'}) self._driver._maprfs_util._execute = mock.Mock(return_value=('', 0)) self._driver._maprfs_util.set_volume_size = mock.Mock() self._driver._maprfs_util.maprfs_chmod = mock.Mock() self._driver._maprfs_util.get_cluster_name = mock.Mock( return_value=self.cluster_name) self._driver.create_share(self._context, self.share) self._driver._maprfs_util._execute.assert_called_once_with( self.maprcli_bin, 'volume', 'create', '-name', 'fake', '-path', 'fake', '-quota', size_str, '-readAce', '', '-writeAce', '') self._driver._maprfs_util.maprfs_chmod.assert_called_once_with('fake', '777') def test_create_share_exception(self): self._driver.api.get_share_metadata = mock.Mock(return_value={}) self._driver._maprfs_util._execute = mock.Mock( side_effect=exception.ProcessExecutionError) self._driver._maprfs_util.set_volume_size = mock.Mock() self._driver._maprfs_util.maprfs_chmod = mock.Mock() self._driver._maprfs_util.get_cluster_name = mock.Mock( return_value=self.cluster_name) self.assertRaises(exception.MapRFSException, self._driver.create_share, self._context, self.share) def test_create_share_from_snapshot(self): fake_snapshot = dict(self.snapshot) fake_snapshot.update(share_instance={'share_id': 1}) size_str = str(self.share['size']) + 'G' path = self._driver._share_dir(self.share['name']) snapthot_path = self._driver._get_snapshot_path(self.snapshot) + '/*' self._driver._maprfs_util._execute = mock.Mock( return_value=('Found', 0)) self._driver._maprfs_util.set_volume_size = mock.Mock() self._driver._maprfs_util.get_cluster_name = mock.Mock( return_value=self.cluster_name) self._driver.api.get_share_metadata = mock.Mock( return_value={'_fake': 'fake', 'fake2': 'fake2'}) mock_execute = self._driver._maprfs_util._execute self._driver.create_share_from_snapshot(self._context, self.share, self.snapshot) mock_execute.assert_any_call(self.hadoop_bin, 'fs', '-cp', '-p', snapthot_path, path) mock_execute.assert_any_call(self.maprcli_bin, 'volume', 'create', '-name', self.share['name'], '-path', path, '-quota', size_str, '-readAce', '', '-writeAce', '', '-fake', 'fake') def test_create_share_from_snapshot_wrong_tenant(self): fake_snapshot = dict(self.snapshot) fake_snapshot.update(share_instance={'share_id': 10}) self._driver._maprfs_util._execute = mock.Mock(return_value=('', 0)) self._driver._maprfs_util.set_volume_size = mock.Mock() self._driver._maprfs_util.get_cluster_name = mock.Mock( return_value=self.cluster_name) def fake_meta(context, share): return {'_tenantuser': 'fake'} if share['id'] == 10 else {} self._driver.api.get_share_metadata = fake_meta self.assertRaises(exception.MapRFSException, self._driver.create_share_from_snapshot, self._context, self.share, fake_snapshot) def test_create_share_from_snapshot_exception(self): fake_snapshot = dict(self.snapshot) fake_snapshot.update(share_instance={'share_id': 10}) self._driver._maprfs_util._execute = mock.Mock( return_value=('Found 0', 0)) self._driver._maprfs_util.maprfs_cp = mock.Mock( side_effect=exception.ProcessExecutionError) self._driver.api.get_share_metadata = mock.Mock( return_value={'_tenantuser': 'fake'}) self.assertRaises(exception.MapRFSException, self._driver.create_share_from_snapshot, self._context, self.share, self.snapshot) def test_delete_share(self): self._driver._maprfs_util._execute = mock.Mock(return_value=('', 0)) self._driver.delete_share(self._context, self.share) self._driver._maprfs_util._execute.assert_called_once_with( self.maprcli_bin, 'volume', 'remove', '-name', self.share['name'], '-force', 'true', check_exit_code=False) def test_delete_share_skip(self): self._driver._maprfs_util._execute = mock.Mock(return_value=('', 0)) self._driver.api.get_share_metadata = mock.Mock( return_value={'_name': 'error'}) self._driver.delete_share(self._context, self.share) self._driver._maprfs_util._execute.assert_not_called() def test_delete_share_exception(self): self._driver._maprfs_util._execute = mock.Mock( side_effect=exception.ProcessExecutionError) self.assertRaises(exception.MapRFSException, self._driver.delete_share, self._context, self.share) def test_delete_share_not_exist(self): self._driver._maprfs_util._execute = mock.Mock( return_value=('No such volume', 0)) self._driver.delete_share(self._context, self.share) def test_create_snapshot(self): volume = self._driver._volume_name(self.share['name']) self._driver._maprfs_util._execute = mock.Mock(return_value=('', 0)) self._driver.create_snapshot(self._context, self.snapshot) self._driver._maprfs_util._execute.assert_called_once_with( self.maprcli_bin, 'volume', 'snapshot', 'create', '-snapshotname', self.snapshot['name'], '-volume', volume) def test_create_snapshot_exception(self): self._driver._maprfs_util._execute = mock.Mock( side_effect=exception.ProcessExecutionError) self.assertRaises(exception.MapRFSException, self._driver.create_snapshot, self._context, self.snapshot) def test_delete_snapshot(self): volume = self._driver._volume_name(self.share['name']) self._driver._maprfs_util._execute = mock.Mock(return_value=('', 0)) self._driver.delete_snapshot(self._context, self.snapshot) self._driver._maprfs_util._execute.assert_called_once_with( self.maprcli_bin, 'volume', 'snapshot', 'remove', '-snapshotname', self.snapshot['name'], '-volume', volume, check_exit_code=False) def test_delete_snapshot_exception(self): self._driver._maprfs_util._execute = mock.Mock( return_value=('ERROR (fake)', None)) self.assertRaises(exception.MapRFSException, self._driver.delete_snapshot, self._context, self.snapshot) def test__execute(self): first_host_skip = 'first' available_host = 'available' hosts = [first_host_skip, self.local_ip, available_host, 'extra'] test_config = mock.Mock() test_config.maprfs_clinode_ip = hosts test_config.maprfs_ssh_name = 'fake_maprfs_ssh_name' test_maprfs_util = mapru.get_version_handler(test_config) # mutable container done = [False] skips = [] def fake_ssh_run(host, cmd, check_exit_code): if host == available_host: done[0] = True return '', 0 else: skips.append(host) raise Exception() test_maprfs_util._run_ssh = fake_ssh_run test_maprfs_util._execute('fake', 'cmd') self.assertTrue(done[0]) self.assertEqual(available_host, test_maprfs_util.hosts[0]) self.assertEqual(first_host_skip, test_maprfs_util.hosts[2]) self.assertEqual([first_host_skip], skips) utils.execute.assert_called_once_with( 'sudo', 'su', '-', 'fake_maprfs_ssh_name', '-c', 'fake cmd', check_exit_code=True) def test__execute_exeption(self): utils.execute = mock.Mock(side_effect=Exception) self.assertRaises(exception.ProcessExecutionError, self._driver._maprfs_util._execute, "fake", "cmd") def test__execute_native_exeption(self): utils.execute = mock.Mock( side_effect=exception.ProcessExecutionError(stdout='fake')) self.assertRaises(exception.ProcessExecutionError, self._driver._maprfs_util._execute, "fake", "cmd") def test__execute_local(self): self.mock_object(utils, 'execute', mock.Mock(return_value=("fake", 0))) self._driver._maprfs_util._execute("fake", "cmd") utils.execute.assert_called_once_with('sudo', 'su', '-', 'fake_sshname', '-c', 'fake cmd', check_exit_code=True) def test_share_shrink_error(self): fake_info = { 'totalused': 1024, 'quota': 2024 } self._driver._maprfs_util._execute = mock.Mock() self._driver._maprfs_util.get_volume_info = mock.Mock( return_value=fake_info) self.assertRaises(exception.ShareShrinkingPossibleDataLoss, self._driver.shrink_share, self.share, 1) def test__get_volume_info(self): fake_out = """ {"data": [{"mounted":1,"quota":"1024","used":"0","totalused":"0"}]} """ self._driver._maprfs_util._execute = mock.Mock( return_value=(fake_out, 0)) result = self._driver._maprfs_util.get_volume_info('fake_name') self.assertEqual('0', result['used']) def test__get_volume_info_by_path(self): fake_out = """ {"data": [{"mounted":1,"quota":"1024","used":"0","totalused":"0"}]} """ self._driver._maprfs_util._execute = mock.Mock( return_value=(fake_out, 0)) result = self._driver._maprfs_util.get_volume_info_by_path('fake_path') self.assertEqual('0', result['used']) def test__get_volume_info_by_path_not_exist(self): fake_out = "No such volume" self._driver._maprfs_util._execute = mock.Mock( return_value=(fake_out, 0)) result = self._driver._maprfs_util.get_volume_info_by_path( 'fake_path', check_if_exists=True) self.assertIsNone(result) def test_get_share_stats_refresh_false(self): self._driver._stats = {'fake_key': 'fake_value'} result = self._driver.get_share_stats(False) self.assertEqual(self._driver._stats, result) def test_get_share_stats_refresh_true(self): self._driver._maprfs_util.fs_capacity = mock.Mock( return_value=(1143554.0, 124111.0)) result = self._driver.get_share_stats(True) expected_keys = [ 'qos', 'driver_version', 'share_backend_name', 'free_capacity_gb', 'total_capacity_gb', 'driver_handles_share_servers', 'reserved_percentage', 'reserved_snapshot_percentage', 'reserved_share_extend_percentage', 'vendor_name', 'storage_protocol', ] for key in expected_keys: self.assertIn(key, result) self.assertEqual('MAPRFS', result['storage_protocol']) self._driver._maprfs_util.fs_capacity.assert_called_once_with() def test_get_share_stats_refresh_exception(self): self._driver._maprfs_util.fs_capacity = mock.Mock( side_effect=exception.ProcessExecutionError) self.assertRaises(exception.MapRFSException, self._driver.get_share_stats, True) def test__get_available_capacity(self): fake_out = """Filesystem Size Used Available Use% maprfs:/// 26367492096 1231028224 25136463872 5% """ self._driver._maprfs_util._execute = mock.Mock( return_value=(fake_out, '')) total, free = self._driver._maprfs_util.fs_capacity() self._driver._maprfs_util._execute.assert_called_once_with( self.hadoop_bin, 'fs', '-df') self.assertEqual(26367492096, total) self.assertEqual(25136463872, free) def test__get_available_capacity_exception(self): fake_out = 'fake' self._driver._maprfs_util._execute = mock.Mock( return_value=(fake_out, '')) self.assertRaises(exception.ProcessExecutionError, self._driver._maprfs_util.fs_capacity) def test__get_snapshot_list(self): fake_out = """{"data":[{"snapshotname":"fake-snapshot"}]}""" self._driver._maprfs_util._execute = mock.Mock( return_value=(fake_out, None)) snapshot_list = self._driver._maprfs_util.get_snapshot_list( volume_name='fake', volume_path='fake') self.assertEqual(['fake-snapshot'], snapshot_list) def test__cluster_name(self): fake_info = """{ "data":[ { "version":"fake", "cluster":{ "name":"fake", "secure":false, "ip":"10.10.10.10", "id":"7133813101868836065", "nodesUsed":1, "totalNodesAllowed":-1 } } ] } """ self._driver._maprfs_util._execute = mock.Mock( return_value=(fake_info, 0)) name = self._driver._maprfs_util.get_cluster_name() self.assertEqual('fake', name) def test__cluster_name_exception(self): fake_info = 'fake' self._driver._maprfs_util._execute = mock.Mock( return_value=(fake_info, 0)) self.assertRaises(exception.ProcessExecutionError, self._driver._maprfs_util.get_cluster_name) def test__run_ssh(self): ssh_output = 'fake_ssh_output' cmd_list = ['fake', 'cmd'] ssh = mock.Mock() ssh.get_transport = mock.Mock() ssh.get_transport().is_active = mock.Mock(return_value=False) ssh_pool = mock.Mock() ssh_pool.create = mock.Mock(return_value=ssh) self.mock_object(ssh_utils, 'SSHPool', mock.Mock(return_value=ssh_pool)) self.mock_object(processutils, 'ssh_execute', mock.Mock(return_value=ssh_output)) result = self._driver._maprfs_util._run_ssh( self.local_ip, cmd_list, check_exit_code=False) ssh_utils.SSHPool.assert_called_once_with( self._driver.configuration.maprfs_clinode_ip[0], self._driver.configuration.maprfs_ssh_port, self._driver.configuration.ssh_conn_timeout, self._driver.configuration.maprfs_ssh_name, password=self._driver.configuration.maprfs_ssh_pw, privatekey=self._driver.configuration.maprfs_ssh_private_key, min_size=self._driver.configuration.ssh_min_pool_conn, max_size=self._driver.configuration.ssh_max_pool_conn) ssh_pool.create.assert_called() ssh.get_transport().is_active.assert_called_once_with() processutils.ssh_execute.assert_called_once_with( ssh, 'fake cmd', check_exit_code=False) self.assertEqual(ssh_output, result) def test__run_ssh_exception(self): cmd_list = ['fake', 'cmd'] ssh = mock.Mock() ssh.get_transport = mock.Mock() ssh.get_transport().is_active = mock.Mock(return_value=True) ssh_pool = mock.Mock() ssh_pool.create = mock.Mock(return_value=ssh) self.mock_object(ssh_utils, 'SSHPool', mock.Mock(return_value=ssh_pool)) self.mock_object(processutils, 'ssh_execute', mock.Mock( side_effect=exception.ProcessExecutionError)) self.assertRaises(exception.ProcessExecutionError, self._driver._maprfs_util._run_ssh, self.local_ip, cmd_list) ssh_utils.SSHPool.assert_called_once_with( self._driver.configuration.maprfs_clinode_ip[0], self._driver.configuration.maprfs_ssh_port, self._driver.configuration.ssh_conn_timeout, self._driver.configuration.maprfs_ssh_name, password=self._driver.configuration.maprfs_ssh_pw, privatekey=self._driver.configuration.maprfs_ssh_private_key, min_size=self._driver.configuration.ssh_min_pool_conn, max_size=self._driver.configuration.ssh_max_pool_conn) ssh_pool.create.assert_called_once_with() ssh.get_transport().is_active.assert_called_once_with() processutils.ssh_execute.assert_called_once_with( ssh, 'fake cmd', check_exit_code=False) def test__share_dir(self): self._driver._base_volume_dir = '/volumes' share_dir = '/volumes/' + self.share['name'] actual_dir = self._driver._share_dir(self.share['name']) self.assertEqual(share_dir, actual_dir) def test__get_volume_name(self): volume_name = self._driver._get_volume_name("fake", self.share) self.assertEqual('share-0', volume_name) def test__maprfs_du(self): self._driver._maprfs_util._execute = mock.Mock( return_value=('1024 /', 0)) size = self._driver._maprfs_util.maprfs_du('/') self._driver._maprfs_util._execute.assert_called() self.assertEqual(1024, size) def test__maprfs_ls(self): self._driver._maprfs_util._execute = mock.Mock( return_value=('fake', 0)) self._driver._maprfs_util.maprfs_ls('/') self._driver._maprfs_util._execute.assert_called_with(self.hadoop_bin, 'fs', '-ls', '/') def test_rename_volume(self): self._driver._maprfs_util._execute = mock.Mock( return_value=('fake', 0)) self._driver._maprfs_util.rename_volume('fake', 'newfake') self._driver._maprfs_util._execute.assert_called_with(self.maprcli_bin, 'volume', 'rename', '-name', 'fake', '-newname', 'newfake') def test__run_as_user(self): cmd = ['fake', 'cmd'] u_cmd = self._driver._maprfs_util._as_user(cmd, 'user') self.assertEqual(['sudo', 'su', '-', 'user', '-c', 'fake cmd'], u_cmd) def test__add_params(self): params = {'p1': 1, 'p2': 2, 'p3': '3'} cmd = ['fake', 'cmd'] cmd_with_params = self._driver._maprfs_util._add_params(cmd, **params) self.assertEqual(cmd[:2], cmd_with_params[:2]) def test_get_network_allocations_number(self): number = self._driver.get_admin_network_allocations_number() self.assertEqual(0, number) def test__user_exists(self): fake_out = 'user:x:1000:1000::/opt/user:/bin/bash' self._driver._maprfs_util._execute = mock.Mock( return_value=(fake_out, 0)) result = self._driver._maprfs_util.user_exists('user') self.assertTrue(result) def test__group_exists(self): fake_out = 'user:x:1000:' self._driver._maprfs_util._execute = mock.Mock( return_value=(fake_out, 0)) result = self._driver._maprfs_util.group_exists('user') self.assertTrue(result) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315602.0256703 manila-21.0.0/manila/tests/share/drivers/netapp/0000775000175000017500000000000000000000000021530 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/netapp/__init__.py0000664000175000017500000000000000000000000023627 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315602.0256703 manila-21.0.0/manila/tests/share/drivers/netapp/dataontap/0000775000175000017500000000000000000000000023503 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/netapp/dataontap/__init__.py0000664000175000017500000000000000000000000025602 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315602.0296702 manila-21.0.0/manila/tests/share/drivers/netapp/dataontap/client/0000775000175000017500000000000000000000000024761 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/netapp/dataontap/client/__init__.py0000664000175000017500000000000000000000000027060 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/netapp/dataontap/client/fakes.py0000664000175000017500000045565600000000000026451 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Clinton Knight. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from lxml import etree import requests from manila.share.drivers.netapp.dataontap.client import api CONNECTION_INFO = { 'hostname': 'hostname', 'transport_type': 'https', 'ssl_cert_path': '/etc/ssl/certs/', 'port': 443, 'username': 'admin', 'password': 'passw0rd', 'api_trace_pattern': '(.*)', 'client_api': 'rest', 'async_rest_timeout': 60, 'private_key_file': '/fake_private_key.pem', 'certificate_file': '/fake_cert.pem', 'ca_certificate_file': '/fake_ca_cert.crt', 'certificate_host_validation': False } NO_SNAPRESTORE_LICENSE = '"SnapRestore" is not licensed in the cluster.' FAKE_UUID = 'b32bab78-82be-11ec-a8a3-0242ac120002' CLUSTER_NAME = 'fake_cluster' REMOTE_CLUSTER_NAME = 'fake_cluster_2' CLUSTER_ADDRESS_1 = 'fake_cluster_address' CLUSTER_ADDRESS_2 = 'fake_cluster_address_2' VERSION = 'NetApp Release 8.2.1 Cluster-Mode: Fri Mar 21 14:25:07 PDT 2014' VERSION_NO_DARE = 'NetApp Release 9.1.0: Tue May 10 19:30:23 2016 <1no-DARE>' VERSION_TUPLE = (9, 1, 0) NODE_NAME = 'fake_node1' NODE_NAME2 = 'fake_node2' NODE_NAMES = (NODE_NAME, NODE_NAME2) VSERVER_NAME = 'fake_vserver' VSERVER_NAME_2 = 'fake_vserver_2' VSERVER_PEER_NAME = 'fake_vserver_peer' VSERVER_PEER_STATE = 'peered' FAKE_CONFIG_NAME = "fake_config_name" FAKE_CONFIG_UUID = "fake_config_uuid" FAKE_KEY_ID = "fake_key_id" FAKE_KEYSTONE_URL = "fake_keystone_url" FAKE_APPLICATION_CRED_ID = "fake_application_cred_id" FAKE_APPLICATION_CRED_SECRET = "fake_application_cred_secret" ADMIN_VSERVER_NAME = 'fake_admin_vserver' NODE_VSERVER_NAME = 'fake_node_vserver' NFS_VERSIONS = ['nfs3', 'nfs4.0'] SECURITY_CERT_DEFAULT_EXPIRE_DAYS = 365 SECURITY_CERT_LARGE_EXPIRE_DAYS = 3652 DELETE_RETENTION_HOURS = 12 ROOT_AGGREGATE_NAMES = ('root_aggr1', 'root_aggr2') ROOT_VOLUME_AGGREGATE_NAME = 'fake_root_aggr' ROOT_VOLUME_NAME = 'fake_root_volume' VOLUME_NAMES = ('volume1', 'volume2') SHARE_AGGREGATE_NAME = 'fake_aggr1' SHARE_AGGREGATE_NAMES = ('fake_aggr1', 'fake_aggr2') SHARE_AGGREGATE_NAMES_LIST = ['fake_aggr1', 'fake_aggr2'] SHARE_AGGREGATE_RAID_TYPES = ('raid4', 'raid_dp') SHARE_AGGREGATE_DISK_TYPE = 'FCAL' SHARE_AGGREGATE_DISK_TYPES = ['SATA', 'SSD'] EFFECTIVE_TYPE = 'fake_effective_type1' SHARE_NAME = 'fake_share' SHARE_SIZE = '1000000000' SHARE_USED_SIZE = '3456796' SHARE_NAME_2 = 'fake_share_2' FLEXGROUP_STYLE_EXTENDED = 'flexgroup' FLEXVOL_STYLE_EXTENDED = 'flexvol' SNAPSHOT_NAME = 'fake_snapshot' CG_SNAPSHOT_ID = 'fake_cg_id' PARENT_SHARE_NAME = 'fake_parent_share' PARENT_SNAPSHOT_NAME = 'fake_parent_snapshot' MAX_FILES = 5000 LANGUAGE = 'fake_language' SNAPSHOT_POLICY_NAME = 'fake_snapshot_policy' EXPORT_POLICY_NAME = 'fake_export_policy' VOLUME_EFFICIENCY_POLICY_NAME = 'fake_volume_efficiency_policy' SHARE_MOUNT_POINT = 'fake_mount_point' DELETED_EXPORT_POLICIES = { VSERVER_NAME: [ 'deleted_manila_fake_policy_1', 'deleted_manila_fake_policy_2', ], VSERVER_NAME_2: [ 'deleted_manila_fake_policy_3', ], } QOS_POLICY_GROUP_NAME = 'fake_qos_policy_group_name' QOS_MAX_THROUGHPUT = '5000B/s' QOS_MAX_THROUGHPUT_IOPS = '5000iops' QOS_MAX_THROUGHPUT_NO_UNIT = 5000 QOS_MAX_THROUGHPUT_IOPS_NO_UNIT = 5000 ADAPTIVE_QOS_POLICY_GROUP_NAME = 'fake_adaptive_qos_policy_group_name' VSERVER_TYPE_DEFAULT = 'default' VSERVER_TYPE_DP_DEST = 'dp_destination' VSERVER_OP_STATE_RUNNING = 'running' VSERVER_STATE = 'running' VSERVER_INFO = { 'name': VSERVER_NAME, 'subtype': VSERVER_TYPE_DEFAULT, 'operational_state': VSERVER_OP_STATE_RUNNING, 'state': VSERVER_STATE, } SNAPMIRROR_POLICY_NAME = 'fake_snapmirror_policy' SNAPMIRROR_POLICY_TYPE = 'async_mirror' USER_NAME = 'fake_user' PORT = 'e0a' VLAN = '1001' VLAN_PORT = PORT + '-' + VLAN IP_ADDRESS = '10.10.10.10' NETMASK = '255.255.255.0' GATEWAY = '10.10.10.1' SUBNET = '10.10.10.0/24' NET_ALLOCATION_ID = 'fake_allocation_id' LIF_NAME_TEMPLATE = 'os_%(net_allocation_id)s' LIF_NAME = LIF_NAME_TEMPLATE % {'net_allocation_id': NET_ALLOCATION_ID} IPSPACE_NAME = 'ipspace_fake' BROADCAST_DOMAIN = 'domain_fake' MTU = 9000 SM_SOURCE_VSERVER = 'fake_source_vserver' SM_SOURCE_VOLUME = 'fake_source_volume' SM_DEST_VSERVER = 'fake_destination_vserver' SM_DEST_VOLUME = 'fake_destination_volume' SM_SOURCE_PATH = SM_SOURCE_VSERVER + ':' + SM_SOURCE_VOLUME SM_DEST_PATH = SM_DEST_VSERVER + ':' + SM_DEST_VOLUME FPOLICY_POLICY_NAME = 'fake_fpolicy_name' FPOLICY_EVENT_NAME = 'fake_fpolicy_event_name' FPOLICY_PROTOCOL = 'cifs' FPOLICY_FILE_OPERATIONS = 'create,write,rename' FPOLICY_FILE_OPERATIONS_LIST = ['create', 'write', 'rename'] FPOLICY_ENGINE = 'native' FPOLICY_EXT_TO_INCLUDE = 'avi' FPOLICY_EXT_TO_INCLUDE_LIST = ['avi'] FPOLICY_EXT_TO_EXCLUDE = 'jpg,mp3' FPOLICY_EXT_TO_EXCLUDE_LIST = ['jpg', 'mp3'] JOB_ID = 123 JOB_STATE = 'success' CDOT_CLONE_CHILD_1 = 'fake_child_1' CDOT_CLONE_CHILD_2 = 'fake_child_2' CDOT_CLONE_CHILDREN = [ {'name': CDOT_CLONE_CHILD_1}, {'name': CDOT_CLONE_CHILD_2}, ] NETWORK_INTERFACES = [{ 'interface-name': 'fake_interface', 'administrative-status': 'up', 'address': IP_ADDRESS, 'vserver': VSERVER_NAME, 'netmask': NETMASK, 'role': 'data', 'home-node': NODE_NAME, 'home-port': VLAN_PORT }] NETWORK_INTERFACES_MULTIPLE = [ { 'interface-name': 'fake_interface', 'administrative-status': 'up', 'address': IP_ADDRESS, 'vserver': VSERVER_NAME, 'netmask': NETMASK, 'role': 'data', 'home-node': NODE_NAME, 'home-port': VLAN_PORT, }, { 'interface-name': 'fake_interface_2', 'administrative-status': 'up', 'address': '10.10.12.10', 'vserver': VSERVER_NAME, 'netmask': NETMASK, 'role': 'data', 'home-node': NODE_NAME2, 'home-port': VLAN_PORT, } ] NETWORK_QUALIFIED_PORTS = [NODE_NAME + ':' + VLAN_PORT] NETWORK_QUALIFIED_PORTS2 = [NODE_NAME2 + ':' + VLAN_PORT] NETWORK_QUALIFIED_PORTS_ALL = (NETWORK_QUALIFIED_PORTS + NETWORK_QUALIFIED_PORTS2) IPSPACES = [{ 'uuid': 'fake_uuid', 'ipspace': IPSPACE_NAME, 'id': 'fake_id', 'broadcast-domains': [BROADCAST_DOMAIN], 'ports': NETWORK_QUALIFIED_PORTS2, 'vservers': [ IPSPACE_NAME, VSERVER_NAME, ] }] EMS_MESSAGE = { 'computer-name': 'fake_host', 'event-id': '0', 'event-source': 'fake driver', 'app-version': 'fake app version', 'category': 'fake category', 'event-description': 'fake description', 'log-level': '6', 'auto-support': 'false', } QOS_POLICY_GROUP = { 'policy-group': QOS_POLICY_GROUP_NAME, 'vserver': VSERVER_NAME, 'max-throughput': QOS_MAX_THROUGHPUT, 'num-workloads': 1, } VOLUME_AUTOSIZE_ATTRS = { 'mode': 'off', 'grow-threshold-percent': '85', 'shrink-threshold-percent': '50', 'maximum-size': '1258288', 'minimum-size': '1048576', } NO_RECORDS_RESPONSE = etree.XML(""" 0 """) PASSED_RESPONSE = etree.XML(""" """) PASSED_FAILED_ITER_RESPONSE = etree.XML(""" 0 1 """) INVALID_GET_ITER_RESPONSE_NO_ATTRIBUTES = etree.XML(""" 1 fake_tag """) INVALID_GET_ITER_RESPONSE_NO_RECORDS = etree.XML(""" fake_tag """) VSERVER_GET_ITER_RESPONSE = etree.XML(""" %(fake_vserver)s 1 """ % {'fake_vserver': VSERVER_NAME}) VSERVER_GET_ITER_RESPONSE_INFO = etree.XML(""" %(operational_state)s %(state)s %(name)s %(subtype)s 1 """ % VSERVER_INFO) VSERVER_GET_ROOT_VOLUME_NAME_RESPONSE = etree.XML(""" %(root_volume)s %(fake_vserver)s 1 """ % {'root_volume': ROOT_VOLUME_NAME, 'fake_vserver': VSERVER_NAME}) VSERVER_GET_IPSPACE_NAME_RESPONSE = etree.XML(""" %(ipspace)s %(fake_vserver)s 1 """ % {'ipspace': IPSPACE_NAME, 'fake_vserver': VSERVER_NAME}) VSERVER_GET_RESPONSE = etree.XML(""" %(aggr1)s %(aggr2)s 45678592 %(aggr1)s 6448431104 %(aggr2)s %(vserver)s """ % { 'vserver': VSERVER_NAME, 'aggr1': SHARE_AGGREGATE_NAMES[0], 'aggr2': SHARE_AGGREGATE_NAMES[1], }) VSERVER_SHOW_AGGR_GET_RESPONSE = etree.XML(""" fake_aggr ssd 3393178406912 false compliance os_vs 1 """) SECURITY_CERT_GET_RESPONSE = etree.XML(""" %(vserver)s 12345 1 """ % {'vserver': VSERVER_NAME}) VSERVER_DATA_LIST_RESPONSE = etree.XML(""" %(vserver)s data 1 """ % {'vserver': VSERVER_NAME}) VSERVER_AGGREGATES = { SHARE_AGGREGATE_NAMES[0]: { 'available': 45678592, }, SHARE_AGGREGATE_NAMES[1]: { 'available': 6448431104, }, } VSERVER_GET_RESPONSE_NO_AGGREGATES = etree.XML(""" %(vserver)s """ % {'vserver': VSERVER_NAME}) ONTAPI_VERSION_RESPONSE = etree.XML(""" 1 19 """) SYSTEM_GET_VERSION_RESPONSE = etree.XML(""" 1395426307 true %(version)s 8 2 1 """ % {'version': VERSION}) LICENSE_V2_LIST_INFO_RESPONSE = etree.XML(""" none Cluster Base License false cluster3 base 1-80-000008 license none NFS License false cluster3-01 nfs 1-81-0000000000000004082368507 license none CIFS License false cluster3-01 cifs 1-81-0000000000000004082368507 license none iSCSI License false cluster3-01 iscsi 1-81-0000000000000004082368507 license none FCP License false cluster3-01 fcp 1-81-0000000000000004082368507 license none SnapRestore License false cluster3-01 snaprestore 1-81-0000000000000004082368507 license none SnapMirror License false cluster3-01 snapmirror 1-81-0000000000000004082368507 license none FlexClone License false cluster3-01 flexclone 1-81-0000000000000004082368507 license none SnapVault License false cluster3-01 snapvault 1-81-0000000000000004082368507 license """) LICENSES = ( 'base', 'cifs', 'fcp', 'flexclone', 'iscsi', 'nfs', 'snapmirror', 'snaprestore', 'snapvault' ) VOLUME_COUNT_RESPONSE = etree.XML(""" vol0 cluster3-01 %(root_volume)s %(fake_vserver)s 2 """ % {'root_volume': ROOT_VOLUME_NAME, 'fake_vserver': VSERVER_NAME}) CIFS_SECURITY_SERVICE = { 'type': 'active_directory', 'password': 'fake_password', 'user': 'fake_user', 'ou': 'fake_ou', 'domain': 'fake_domain', 'dns_ip': 'fake_dns_ip', 'server': 'fake_server', 'default_ad_site': None, } CIFS_SECURITY_SERVICE_2 = { 'type': 'active_directory', 'password': 'fake_password_2', 'user': 'fake_user_2', 'ou': 'fake_ou_2', 'domain': 'fake_domain_2', 'dns_ip': 'fake_dns_ip_2', 'server': 'fake_server_2', 'default_ad_site': None, } CIFS_SECURITY_SERVICE_3 = { 'type': 'active_directory', 'password': 'fake_password_3', 'user': 'fake_user_3', 'ou': 'fake_ou_3', 'domain': 'fake_domain_3', 'dns_ip': 'fake_dns_ip_3', 'default_ad_site': 'fake_default_ad_site_3', 'server': None, } CIFS_SECURITY_SERVICE_4 = { 'type': 'active_directory', 'password': 'fake_password_4', 'user': 'fake_user_4', 'ou': 'fake_ou_4', 'domain': 'fake_domain_4', 'dns_ip': 'fake_dns_ip_4', 'default_ad_site': 'fake_default_ad_site_4', 'server': None, } LDAP_LINUX_SECURITY_SERVICE = { 'id': 'fake_id', 'type': 'ldap', 'user': 'fake_user', 'password': 'fake_password', 'server': 'fake_server', 'ou': 'fake_ou', 'dns_ip': None, 'domain': None } LDAP_AD_SECURITY_SERVICE = { 'id': 'fake_id', 'type': 'ldap', 'user': 'fake_user', 'password': 'fake_password', 'domain': 'fake_domain', 'ou': 'fake_ou', 'dns_ip': 'fake_dns_ip', 'server': None, } LDAP_AD_SECURITY_SERVICE_WITH_SERVER = { 'id': 'fake_id', 'type': 'ldap', 'user': 'fake_user', 'password': 'fake_password', 'domain': None, 'ou': 'fake_ou', 'dns_ip': 'fake_dns_ip', 'server': '10.10.10.1', } KERBEROS_SECURITY_SERVICE = { 'type': 'kerberos', 'password': 'fake_password', 'user': 'fake_user', 'server': 'fake_server', 'id': 'fake_id', 'domain': 'fake_domain', 'dns_ip': 'fake_dns_ip', } KERBEROS_SERVICE_PRINCIPAL_NAME = 'nfs/fake-vserver.fake_domain@FAKE_DOMAIN' INVALID_SECURITY_SERVICE = { 'type': 'fake', } SYSTEM_NODE_GET_ITER_RESPONSE = etree.XML(""" %s 1 """ % NODE_NAME) SECUTITY_KEY_MANAGER_NVE_SUPPORT_RESPONSE_TRUE = etree.XML(""" true """) SECUTITY_KEY_MANAGER_NVE_SUPPORT_RESPONSE_FALSE = etree.XML(""" false """) NET_PORT_GET_RESPONSE_NO_VLAN = etree.XML(""" auto full autof %(domain)s healthy false %(ipspace)s true true true up 00:0c:29:fc:04:f7 1500 1500 %(node_name)s full receive 1000 %(port)s physical data """ % {'domain': BROADCAST_DOMAIN, 'ipspace': IPSPACE_NAME, 'node_name': NODE_NAME, 'port': PORT}) NET_PORT_GET_RESPONSE = etree.XML(""" auto full auto healthy false %(ipspace)s true true true up 00:0c:29:fc:04:f7 1500 1500 %(node_name)s full receive 1000 %(port)s-%(vlan)s vlan data %(vlan)s %(node_name)s %(port)s """ % {'ipspace': IPSPACE_NAME, 'node_name': NODE_NAME, 'port': PORT, 'vlan': VLAN}) NET_PORT_GET_ITER_RESPONSE = etree.XML(""" full full auto true true true up 00:0c:29:fc:04:d9 1500 %(node_name)s full none 10 e0a physical data full full auto true true true up 00:0c:29:fc:04:e3 1500 %(node_name)s full none 100 e0b physical data full full auto true true true up 00:0c:29:fc:04:ed 1500 %(node_name)s full none 1000 e0c physical data full full auto true true true up 00:0c:29:fc:04:f7 1500 %(node_name)s full none 10000 e0d physical data 4 """ % {'node_name': NODE_NAME}) SPEED_SORTED_PORTS = ( {'node': NODE_NAME, 'port': 'e0d', 'speed': '10000'}, {'node': NODE_NAME, 'port': 'e0c', 'speed': '1000'}, {'node': NODE_NAME, 'port': 'e0b', 'speed': '100'}, {'node': NODE_NAME, 'port': 'e0a', 'speed': '10'}, ) PORT_NAMES = ('e0a', 'e0b', 'e0c', 'e0d') SPEED_SORTED_PORT_NAMES = ('e0d', 'e0c', 'e0b', 'e0a') UNSORTED_PORTS_ALL_SPEEDS = ( {'node': NODE_NAME, 'port': 'port6', 'speed': 'undef'}, {'node': NODE_NAME, 'port': 'port3', 'speed': '100'}, {'node': NODE_NAME, 'port': 'port1', 'speed': '10000'}, {'node': NODE_NAME, 'port': 'port4', 'speed': '10'}, {'node': NODE_NAME, 'port': 'port7'}, {'node': NODE_NAME, 'port': 'port2', 'speed': '1000'}, {'node': NODE_NAME, 'port': 'port5', 'speed': 'auto'}, ) SORTED_PORTS_ALL_SPEEDS = ( {'node': NODE_NAME, 'port': 'port1', 'speed': '10000'}, {'node': NODE_NAME, 'port': 'port2', 'speed': '1000'}, {'node': NODE_NAME, 'port': 'port3', 'speed': '100'}, {'node': NODE_NAME, 'port': 'port4', 'speed': '10'}, {'node': NODE_NAME, 'port': 'port5', 'speed': 'auto'}, {'node': NODE_NAME, 'port': 'port6', 'speed': 'undef'}, {'node': NODE_NAME, 'port': 'port7'}, ) NET_PORT_GET_ITER_BROADCAST_DOMAIN_RESPONSE = etree.XML(""" %(ipspace)s %(domain)s %(node)s %(port)s 1 """ % { 'domain': BROADCAST_DOMAIN, 'node': NODE_NAME, 'port': PORT, 'ipspace': IPSPACE_NAME, }) NET_PORT_GET_ITER_BROADCAST_DOMAIN_MISSING_RESPONSE = etree.XML(""" %(ipspace)s %(node)s %(port)s 1 """ % {'node': NODE_NAME, 'port': PORT, 'ipspace': IPSPACE_NAME}) NET_PORT_BROADCAST_DOMAIN_GET_ITER_RESPONSE = etree.XML(""" %(domain)s %(ipspace)s 1 """ % {'domain': BROADCAST_DOMAIN, 'ipspace': IPSPACE_NAME}) NET_IPSPACES_GET_ITER_RESPONSE = etree.XML(""" %(domain)s fake_id %(ipspace)s %(node)s:%(port)s fake_uuid %(ipspace)s %(vserver)s 1 """ % { 'domain': BROADCAST_DOMAIN, 'ipspace': IPSPACE_NAME, 'node': NODE_NAME2, 'port': VLAN_PORT, 'vserver': VSERVER_NAME }) NET_INTERFACE_GET_ITER_RESPONSE = etree.XML("""
    192.168.228.42
    ipv4 up %(node)s e0c none none system-defined disabled mgmt %(node)s e0c cluster_mgmt true true d3230112-7524-11e4-8608-123478563412 false %(netmask)s 24 up cluster_mgmt c192.168.228.0/24 system_defined cluster3
    192.168.228.43
    ipv4 up %(node)s e0d none system-defined nextavail mgmt %(node)s e0d mgmt1 true true 0ccc57cc-7525-11e4-8608-123478563412 false %(netmask)s 24 up node_mgmt n192.168.228.0/24 system_defined cluster3-01
    %(address)s
    ipv4 up %(node)s %(vlan)s nfs cifs none system-defined nextavail data %(node)s %(vlan)s %(lif)s false true db4d91b6-95d9-11e4-8608-123478563412 false %(netmask)s 24 up data d10.0.0.0/24 system_defined %(vserver)s
    3
    """ % { 'lif': LIF_NAME, 'vserver': VSERVER_NAME, 'node': NODE_NAME, 'address': IP_ADDRESS, 'netmask': NETMASK, 'vlan': VLAN_PORT, }) LIF_NAMES = ('cluster_mgmt', 'mgmt1', LIF_NAME) NET_INTERFACE_GET_ITER_RESPONSE_NFS = etree.XML("""
    %(address)s
    ipv4 up %(node)s %(vlan)s nfs cifs none system-defined nextavail data %(node)s %(vlan)s %(lif)s false true db4d91b6-95d9-11e4-8608-123478563412 false %(netmask)s 24 up data d10.0.0.0/24 system_defined %(vserver)s
    1
    """ % { 'lif': LIF_NAME, 'vserver': VSERVER_NAME, 'node': NODE_NAME, 'address': IP_ADDRESS, 'netmask': NETMASK, 'vlan': VLAN_PORT, }) LIFS = ( {'address': '192.168.228.42', 'administrative-status': 'up', 'home-node': NODE_NAME, 'home-port': 'e0c', 'interface-name': 'cluster_mgmt', 'netmask': NETMASK, 'role': 'cluster_mgmt', 'vserver': 'cluster3' }, {'address': '192.168.228.43', 'administrative-status': 'up', 'home-node': NODE_NAME, 'home-port': 'e0d', 'interface-name': 'mgmt1', 'netmask': NETMASK, 'role': 'node_mgmt', 'vserver': 'cluster3-01' }, {'address': IP_ADDRESS, 'administrative-status': 'up', 'home-node': NODE_NAME, 'home-port': VLAN_PORT, 'interface-name': LIF_NAME, 'netmask': NETMASK, 'role': 'data', 'vserver': VSERVER_NAME, }, ) NFS_LIFS = [ {'address': IP_ADDRESS, 'administrative-status': 'up', 'home-node': NODE_NAME, 'home-port': VLAN_PORT, 'interface-name': LIF_NAME, 'netmask': NETMASK, 'role': 'data', 'vserver': VSERVER_NAME, }, ] NFS_LIFS_REST = [ { 'uuid': 'fake_uuid_1', 'enabled': 'true', 'address': IP_ADDRESS, 'home-node': NODE_NAME, 'home-port': VLAN_PORT, 'interface-name': LIF_NAME, 'netmask': NETMASK, 'role': 'data', 'vserver': VSERVER_NAME, }, { 'uuid': 'fake_uuid_2', 'enabled': 'true', 'address': IP_ADDRESS, 'home-node': NODE_NAME, 'home-port': VLAN_PORT, 'interface-name': LIF_NAME, 'netmask': NETMASK, 'role': 'data', 'vserver': VSERVER_NAME, }, { 'uuid': 'fake_uuid_3', 'enabled': 'true', 'address': IP_ADDRESS, 'home-node': NODE_NAME, 'home-port': VLAN_PORT, 'interface-name': LIF_NAME, 'netmask': NETMASK, 'role': 'data', 'vserver': VSERVER_NAME, }, ] NET_INTERFACE_GET_ONE_RESPONSE = etree.XML(""" %(lif)s %(vserver)s 1 """ % {'lif': LIF_NAME, 'vserver': VSERVER_NAME}) AGGR_GET_NAMES_RESPONSE = etree.XML(""" %(root1)s %(root2)s %(aggr1)s %(aggr2)s 2 """ % { 'root1': ROOT_AGGREGATE_NAMES[0], 'root2': ROOT_AGGREGATE_NAMES[1], 'aggr1': SHARE_AGGREGATE_NAMES[0], 'aggr2': SHARE_AGGREGATE_NAMES[1], }) AGGR_GET_SPACE_RESPONSE = etree.XML(""" /%(aggr1)s/plex0 /%(aggr1)s/plex0/rg0 45670400 943718400 898048000 %(aggr1)s /%(aggr2)s/plex0 /%(aggr2)s/plex0/rg0 /%(aggr2)s/plex0/rg1 4267659264 7549747200 3282087936 %(aggr2)s 2 """ % { 'aggr1': SHARE_AGGREGATE_NAMES[0], 'aggr2': SHARE_AGGREGATE_NAMES[1], }) AGGR_GET_NODE_RESPONSE = etree.XML(""" %(node)s %(aggr)s 1 """ % { 'aggr': SHARE_AGGREGATE_NAME, 'node': NODE_NAME }) AGGR_GET_ITER_RESPONSE = etree.XML(""" false 64_bit 1758646411 aggr 512 30384 96 30384 30384 30384 243191 96 0 4082368507 cluster3-01 4082368507 cluster3-01 off 0 active block 3 cfo true false true false false false unmirrored online 1 true false /%(aggr1)s/plex0 normal,active block false false false /%(aggr1)s/plex0/rg0 0 0 0 on 16 raid_dp, normal raid_dp online true enterprise 0 0 true true 0 0 0 0 0 0 0 0 0 245760 0 95 45670400 943718400 898048000 0 898048000 897802240 1 0 0 %(aggr1)s 15863632-ea49-49a8-9c88-2bd2d57c6d7a cluster3-01 unknown false 64_bit 706602229 aggr 528 31142 96 31142 31142 31142 1945584 96 0 4082368507 cluster3-01 4082368507 cluster3-01 off 0 active block 10 sfo false false true false false false unmirrored online 1 true false /%(aggr2)s/plex0 normal,active block false false false /%(aggr2)s/plex0/rg0 0 0 block false false false /%(aggr2)s/plex0/rg1 0 0 0 on 8 raid4, normal raid4 online true compliance 0 0 true true 0 0 0 0 0 0 0 0 0 425984 0 15 6448431104 7549747200 1101316096 0 1101316096 1100890112 2 0 0 %(aggr2)s 2a741934-1aaf-42dd-93ca-aaf231be108a cluster3-01 not_striped 2 """ % { 'aggr1': SHARE_AGGREGATE_NAMES[0], 'aggr2': SHARE_AGGREGATE_NAMES[1], }) AGGR_GET_ITER_SSC_RESPONSE = etree.XML(""" false 64_bit 1758646411 aggr 512 30384 96 30384 30384 30384 243191 96 0 4082368507 cluster3-01 4082368507 cluster3-01 off 0 active block 3 cfo true false true false false false unmirrored online 1 true false /%(aggr1)s/plex0 normal,active block false false false /%(aggr1)s/plex0/rg0 0 0 0 on 16 raid_dp, normal raid_dp online true compliance 0 0 true true 0 0 0 0 0 0 0 0 0 245760 0 95 45670400 943718400 898048000 0 898048000 897802240 1 0 0 %(aggr1)s 15863632-ea49-49a8-9c88-2bd2d57c6d7a cluster3-01 unknown 1 """ % {'aggr1': SHARE_AGGREGATE_NAMES[0]}) AGGR_GET_ITER_ROOT_AGGR_RESPONSE = etree.XML(""" true false %(root1)s true false %(root2)s false false %(aggr1)s false false %(aggr2)s 6 """ % { 'root1': ROOT_AGGREGATE_NAMES[0], 'root2': ROOT_AGGREGATE_NAMES[1], 'aggr1': SHARE_AGGREGATE_NAMES[0], 'aggr2': SHARE_AGGREGATE_NAMES[1], }) AGGR_GET_ITER_NON_ROOT_AGGR_RESPONSE = etree.XML(""" false false %(aggr1)s false false %(aggr2)s 6 """ % { 'aggr1': SHARE_AGGREGATE_NAMES[0], 'aggr2': SHARE_AGGREGATE_NAMES[1], }) VOLUME_GET_NAME_RESPONSE = etree.XML(""" %(volume)s %(vserver)s 1 """ % {'volume': SHARE_NAME, 'vserver': VSERVER_NAME}) VOLUME_GET_VOLUME_PATH_RESPONSE = etree.XML(""" /%(volume)s """ % {'volume': SHARE_NAME}) VOLUME_GET_VOLUME_PATH_CIFS_RESPONSE = etree.XML(""" \\%(volume)s """ % {'volume': SHARE_NAME}) VOLUME_JUNCTION_PATH = '/' + SHARE_NAME VOLUME_JUNCTION_PATH_CIFS = '\\' + SHARE_NAME VOLUME_MODIFY_ITER_RESPONSE = etree.XML(""" 0 1 %(volume)s %(vserver)s """ % {'volume': SHARE_NAME, 'vserver': VSERVER_NAME}) VOLUME_MODIFY_ITER_ERROR_RESPONSE = etree.XML(""" 160 Unable to set volume attribute "size" %(volume)s %(vserver)s 1 0 """ % {'volume': SHARE_NAME, 'vserver': VSERVER_NAME}) SNAPSHOT_ACCESS_TIME = '1466640058' SNAPSHOT_GET_ITER_NOT_BUSY_RESPONSE = etree.XML(""" %(access_time)s false %(snap)s %(volume)s %(vserver)s 1 """ % { 'access_time': SNAPSHOT_ACCESS_TIME, 'snap': SNAPSHOT_NAME, 'volume': SHARE_NAME, 'vserver': VSERVER_NAME, }) SNAPSHOT_GET_ITER_BUSY_RESPONSE = etree.XML(""" %(access_time)s true %(snap)s %(volume)s %(vserver)s volume clone 1 """ % { 'access_time': SNAPSHOT_ACCESS_TIME, 'snap': SNAPSHOT_NAME, 'volume': SHARE_NAME, 'vserver': VSERVER_NAME, }) SNAPSHOT_GET_ITER_NOT_UNIQUE_RESPONSE = etree.XML(""" false %(snap)s %(volume)s %(vserver)s false %(snap)s %(root_volume)s %(admin_vserver)s 1 """ % { 'snap': SNAPSHOT_NAME, 'volume': SHARE_NAME, 'vserver': VSERVER_NAME, 'root_volume': ROOT_VOLUME_NAME, 'admin_vserver': ADMIN_VSERVER_NAME, }) SNAPSHOT_GET_ITER_UNAVAILABLE_RESPONSE = etree.XML(""" 0 13023 %(volume)s Unable to get information for Snapshot copies of volume \ "%(volume)s" on Vserver "%(vserver)s". Reason: Volume not online. %(vserver)s """ % {'volume': SHARE_NAME, 'vserver': VSERVER_NAME}) SNAPSHOT_GET_ITER_OTHER_ERROR_RESPONSE = etree.XML(""" 0 99999 %(volume)s Unable to get information for Snapshot copies of volume \ "%(volume)s" on Vserver "%(vserver)s". %(vserver)s """ % {'volume': SHARE_NAME, 'vserver': VSERVER_NAME}) SNAPSHOT_MULTIDELETE_ERROR_RESPONSE = etree.XML(""" 13021 %(volume)s No such snapshot. """ % {'volume': SHARE_NAME}) SNAPSHOT_GET_ITER_DELETED_RESPONSE = etree.XML(""" deleted_manila_%(snap)s %(volume)s %(vserver)s 1 """ % { 'snap': SNAPSHOT_NAME, 'volume': SHARE_NAME, 'vserver': VSERVER_NAME, }) SNAPSHOT_GET_ITER_SNAPMIRROR_RESPONSE = etree.XML(""" %(snap)s %(volume)s %(vserver)s 1 """ % { 'snap': SNAPSHOT_NAME, 'volume': SHARE_NAME, 'vserver': VSERVER_NAME, }) CIFS_SHARE_ACCESS_CONTROL_GET_ITER = etree.XML(""" full_control %(volume)s Administrator manila_svm_cifs change %(volume)s Administrators manila_svm_cifs read %(volume)s Power Users manila_svm_cifs no_access %(volume)s Users manila_svm_cifs 4 """ % {'volume': SHARE_NAME}) CIFS_SHARE_GET_ITER_RESPONSE = etree.XML(""" %(share_name)s fake_vserver 1 """ % {'share_name': SHARE_NAME}) NFS_EXPORT_RULES = ('10.10.10.10', '10.10.10.20') NFS_EXPORTFS_LIST_RULES_2_NO_RULES_RESPONSE = etree.XML(""" """) NFS_EXPORTFS_LIST_RULES_2_RESPONSE = etree.XML(""" %(path)s 65534 false %(host1)s %(host2)s %(host1)s %(host2)s %(host1)s %(host2)s sys """ % { 'path': VOLUME_JUNCTION_PATH, 'host1': NFS_EXPORT_RULES[0], 'host2': NFS_EXPORT_RULES[1], }) AGGR_GET_RAID_TYPE_RESPONSE = etree.XML(""" /%(aggr1)s/plex0 /%(aggr1)s/plex0/rg0 %(raid_type1)s %(aggr1)s /%(aggr2)s/plex0 /%(aggr2)s/plex0/rg0 /%(aggr2)s/plex0/rg1 %(raid_type2)s %(aggr2)s 2 """ % { 'aggr1': SHARE_AGGREGATE_NAMES[0], 'aggr2': SHARE_AGGREGATE_NAMES[1], 'raid_type1': SHARE_AGGREGATE_RAID_TYPES[0], 'raid_type2': SHARE_AGGREGATE_RAID_TYPES[1] }) STORAGE_DISK_GET_ITER_RESPONSE = etree.XML(""" cluster3-01:v5.19 %(type0)s cluster3-01:v5.20 %(type0)s cluster3-01:v5.20 %(type1)s cluster3-01:v5.20 %(type1)s 4 """ % { 'type0': SHARE_AGGREGATE_DISK_TYPES[0], 'type1': SHARE_AGGREGATE_DISK_TYPES[1], }) STORAGE_DISK_GET_ITER_RESPONSE_PAGE_1 = etree.XML(""" cluster3-01:v4.16 cluster3-01:v4.17 cluster3-01:v4.18 cluster3-01:v4.19 cluster3-01:v4.20 cluster3-01:v4.21 cluster3-01:v4.22 cluster3-01:v4.24 cluster3-01:v4.25 cluster3-01:v4.26 next_tag_1 10 """) STORAGE_DISK_GET_ITER_RESPONSE_PAGE_2 = etree.XML(""" cluster3-01:v4.27 cluster3-01:v4.28 cluster3-01:v4.29 cluster3-01:v4.32 cluster3-01:v5.16 cluster3-01:v5.17 cluster3-01:v5.18 cluster3-01:v5.19 cluster3-01:v5.20 cluster3-01:v5.21 next_tag_2 10 """) STORAGE_DISK_GET_ITER_RESPONSE_PAGE_3 = etree.XML(""" cluster3-01:v5.22 cluster3-01:v5.24 cluster3-01:v5.25 cluster3-01:v5.26 cluster3-01:v5.27 cluster3-01:v5.28 cluster3-01:v5.29 cluster3-01:v5.32 8 """) GET_AGGREGATE_FOR_VOLUME_RESPONSE = etree.XML(""" %(aggr)s %(share)s os_aa666789-5576-4835-87b7-868069856459 1 """ % { 'aggr': SHARE_AGGREGATE_NAME, 'share': SHARE_NAME }) GET_AGGREGATE_FOR_FLEXGROUP_VOL_RESPONSE = etree.XML(""" %(aggr)s %(share)s os_aa666789-5576-4835-87b7-868069856459 1 """ % { 'aggr': SHARE_AGGREGATE_NAME, 'share': SHARE_NAME }) VOLUME_AUTOSIZE_GET_RESPONSE = etree.XML(""" %(grow_percent)s false %(max_size)s %(min_size)s %(mode)s %(shrink_percent)s """ % {'grow_percent': VOLUME_AUTOSIZE_ATTRS.get('grow-threshold-percent'), 'max_size': VOLUME_AUTOSIZE_ATTRS.get('maximum-size'), 'min_size': VOLUME_AUTOSIZE_ATTRS.get('minimum-size'), 'mode': VOLUME_AUTOSIZE_ATTRS.get('mode'), 'shrink_percent': VOLUME_AUTOSIZE_ATTRS.get( 'shrink-threshold-percent')}) GET_VOLUME_FOR_ENCRYPTED_RESPONSE = etree.XML(""" true %(volume)s manila_svm 1 """ % {'volume': SHARE_NAME}) GET_VOLUME_FOR_ENCRYPTED_OLD_SYS_VERSION_RESPONSE = etree.XML(""" %(volume)s manila_svm 1 """ % {'volume': SHARE_NAME}) EXPORT_RULE_GET_ITER_RESPONSE = etree.XML(""" %(rule)s %(policy)s 3 manila_svm %(rule)s %(policy)s 1 manila_svm 2 """ % {'policy': EXPORT_POLICY_NAME, 'rule': IP_ADDRESS}) VOLUME_GET_EXPORT_POLICY_RESPONSE = etree.XML(""" %(policy)s %(volume)s manila_svm 1 """ % {'policy': EXPORT_POLICY_NAME, 'volume': SHARE_NAME}) DELETED_EXPORT_POLICY_GET_ITER_RESPONSE = etree.XML(""" %(policy1)s %(vserver)s %(policy2)s %(vserver)s %(policy3)s %(vserver2)s 2 """ % { 'vserver': VSERVER_NAME, 'vserver2': VSERVER_NAME_2, 'policy1': DELETED_EXPORT_POLICIES[VSERVER_NAME][0], 'policy2': DELETED_EXPORT_POLICIES[VSERVER_NAME][1], 'policy3': DELETED_EXPORT_POLICIES[VSERVER_NAME_2][0], }) LUN_GET_ITER_RESPONSE = etree.XML(""" /vol/%(volume)s/fakelun %(volume)s %(vserver)s 1 """ % { 'vserver': VSERVER_NAME, 'volume': SHARE_NAME, }) VOLUME_GET_ITER_NOT_UNIQUE_RESPONSE = etree.XML(""" %(volume1)s %(volume2)s 2 """ % { 'volume1': SHARE_NAME, 'volume2': SHARE_NAME_2, }) VOLUME_GET_ITER_SNAPSHOT_ATTRIBUTES_RESPONSE = etree.XML(""" %(snapshot_policy)s %(snapdir_access_enabled)s 1 """ % { 'snapshot_policy': 'daily', 'snapdir_access_enabled': 'false', }) VOLUME_GET_ITER_JUNCTIONED_VOLUMES_RESPONSE = etree.XML(""" fake_volume test 1 """) VOLUME_GET_ITER_VOLUME_TO_MANAGE_RESPONSE = etree.XML(""" %(aggr)s /%(volume)s %(volume)s %(vserver)s rw %(style-extended)s %(size)s %(size-used)s %(qos-policy-group-name)s compliance 1 """ % { 'aggr': SHARE_AGGREGATE_NAME, 'vserver': VSERVER_NAME, 'volume': SHARE_NAME, 'size': SHARE_SIZE, 'size-used': SHARE_USED_SIZE, 'qos-policy-group-name': QOS_POLICY_GROUP_NAME, 'style-extended': FLEXVOL_STYLE_EXTENDED, }) VOLUME_GET_ITER_FLEXGROUP_VOLUME_TO_MANAGE_RESPONSE = etree.XML(""" %(aggr)s /%(volume)s %(volume)s %(vserver)s rw %(style-extended)s %(size)s %(size-used)s %(qos-policy-group-name)s compliance 1 """ % { 'aggr': SHARE_AGGREGATE_NAME, 'vserver': VSERVER_NAME, 'volume': SHARE_NAME, 'size': SHARE_SIZE, 'size-used': SHARE_USED_SIZE, 'qos-policy-group-name': QOS_POLICY_GROUP_NAME, 'style-extended': FLEXGROUP_STYLE_EXTENDED, }) VOLUME_GET_ITER_NO_QOS_RESPONSE = etree.XML(""" %(aggr)s /%(volume)s %(volume)s %(vserver)s rw %(style-extended)s %(size)s %(size-used)s compliance 1 """ % { 'aggr': SHARE_AGGREGATE_NAME, 'vserver': VSERVER_NAME, 'volume': SHARE_NAME, 'size': SHARE_SIZE, 'size-used': SHARE_USED_SIZE, 'style-extended': FLEXVOL_STYLE_EXTENDED, }) CLONE_CHILD_1 = 'fake_child_1' CLONE_CHILD_2 = 'fake_child_2' VOLUME_GET_ITER_CLONE_CHILDREN_RESPONSE = etree.XML(""" %(clone1)s %(vserver)s %(clone2)s %(vserver)s 2 """ % { 'vserver': VSERVER_NAME, 'clone1': CLONE_CHILD_1, 'clone2': CLONE_CHILD_2, }) VOLUME_GET_ITER_PARENT_SNAP_EMPTY_RESPONSE = etree.XML(""" %(name)s %(vserver)s 1 """ % { 'vserver': VSERVER_NAME, 'name': SHARE_NAME, }) VOLUME_GET_ITER_PARENT_SNAP_RESPONSE = etree.XML(""" %(snapshot_name)s %(name)s %(vserver)s 1 """ % { 'snapshot_name': SNAPSHOT_NAME, 'vserver': VSERVER_NAME, 'name': SHARE_NAME, }) SIS_GET_ITER_RESPONSE = etree.XML(""" true /vol/%(volume)s enabled %(vserver)s """ % { 'vserver': VSERVER_NAME, 'volume': SHARE_NAME, }) CLUSTER_PEER_GET_ITER_RESPONSE = etree.XML(""" %(addr1)s %(addr2)s available %(cluster)s fake_uuid %(addr1)s %(remote_cluster)s fake_serial_number 60 1 """ % { 'addr1': CLUSTER_ADDRESS_1, 'addr2': CLUSTER_ADDRESS_2, 'cluster': CLUSTER_NAME, 'remote_cluster': REMOTE_CLUSTER_NAME, }) CLUSTER_PEER_POLICY_GET_RESPONSE = etree.XML(""" false 8 """) CLUSTER_GET_CLUSTER_NAME = etree.XML(""" - %(cluster_name)s 1-80-000000 fake_uuid fake_rdb """ % { 'cluster_name': CLUSTER_NAME, }) VSERVER_PEER_GET_ITER_RESPONSE = etree.XML(""" snapmirror %(cluster)s peered %(vserver2)s %(vserver1)s 2 """ % { 'cluster': CLUSTER_NAME, 'vserver1': VSERVER_NAME, 'vserver2': VSERVER_NAME_2 }) SNAPMIRROR_GET_ITER_RESPONSE = etree.XML(""" fake_destination_volume fake_destination_node fake_destination_vserver fake_snapshot 1442701782 false true 2187 109 1442701890 test:manila 1171456 initialize 0 snapmirrored fake_snapshot 1442701782 DPDefault v2 ea8bfcc6-5f1d-11e5-8446-123478563412 idle data_protection daily fake_source_volume fake_source_vserver fake_destination_vserver 1 """) SNAPMIRROR_GET_ITER_FILTERED_RESPONSE = etree.XML(""" fake_destination_vserver fake_destination_volume true snapmirrored idle daily fake_source_vserver fake_source_volume 1 """) SNAPMIRROR_GET_ITER_FILTERED_RESPONSE_2 = etree.XML(""" fake_source_vserver fake_destination_vserver snapmirrored idle 1 """) SNAPMIRROR_GET_DESTINATIONS_ITER_FILTERED_RESPONSE = etree.XML(""" fake_destination_vserver: fake_destination_vserver fake_relationship_id fake_source_vserver: fake_source_vserver 1 """) SNAPMIRROR_INITIALIZE_RESULT = etree.XML(""" succeeded """) VOLUME_MOVE_GET_ITER_RESULT = etree.XML(""" retry_on_failure
    Cutover Completed::Volume move job finishing move
    1481919246 82 finishing healthy %(volume)s %(vserver)s
    1
    """ % { 'volume': SHARE_NAME, 'vserver': VSERVER_NAME, }) NFS_INFO_STR = """ false true false use_export_policy des des3 aes_128 aes_256 65536 32768 false false """ NFS_INFO_DEFAULT_TREE = etree.XML(NFS_INFO_STR) NFS_CONFIG_DEFAULT_RESULT = etree.XML(""" %s """ % NFS_INFO_STR) NFS_CONFIG_SERVER_RESULT = etree.XML(""" %s """ % NFS_INFO_STR) PERF_OBJECT_COUNTER_TOTAL_CP_MSECS_LABELS = [ 'SETUP', 'PRE_P0', 'P0_SNAP_DEL', 'P1_CLEAN', 'P1_QUOTA', 'IPU_DISK_ADD', 'P2V_INOFILE', 'P2V_INO_PUB', 'P2V_INO_PRI', 'P2V_FSINFO', 'P2V_DLOG1', 'P2V_DLOG2', 'P2V_REFCOUNT', 'P2V_TOPAA', 'P2V_DF_SCORES_SUB', 'P2V_BM', 'P2V_SNAP', 'P2V_DF_SCORES', 'P2V_VOLINFO', 'P2V_CONT', 'P2A_INOFILE', 'P2A_INO', 'P2A_DLOG1', 'P2A_HYA', 'P2A_DLOG2', 'P2A_FSINFO', 'P2A_IPU_BITMAP_GROW', 'P2A_REFCOUNT', 'P2A_TOPAA', 'P2A_HYABC', 'P2A_BM', 'P2A_SNAP', 'P2A_VOLINFO', 'P2_FLUSH', 'P2_FINISH', 'P3_WAIT', 'P3V_VOLINFO', 'P3A_VOLINFO', 'P3_FINISH', 'P4_FINISH', 'P5_FINISH', ] PERF_OBJECT_COUNTER_LIST_INFO_WAFL_RESPONSE = etree.XML(""" No. of times 8.3 names are accessed per second. access_8_3_names diag rate per_sec Array of counts of different types of CPs wafl_timer generated CP snapshot generated CP wafl_avail_bufs generated CP dirty_blk_cnt generated CP full NV-log generated CP,back-to-back CP flush generated CP,sync generated CP deferred back-to-back CP low mbufs generated CP low datavecs generated CP nvlog replay takeover time limit CP cp_count diag delta array none total_cp_msecs Array of percentage time spent in different phases of CP %(labels)s cp_phase_times diag percent array percent """ % {'labels': ','.join(PERF_OBJECT_COUNTER_TOTAL_CP_MSECS_LABELS)}) PERF_OBJECT_GET_INSTANCES_SYSTEM_RESPONSE_CMODE = etree.XML(""" avg_processor_busy 5674745133134 system %(node1)s:kernel:system avg_processor_busy 4077649009234 system %(node2)s:kernel:system 1453412013 """ % {'node1': NODE_NAMES[0], 'node2': NODE_NAMES[1]}) PERF_OBJECT_GET_INSTANCES_SYSTEM_RESPONSE_7MODE = etree.XML(""" 1454146292 system avg_processor_busy 13215732322 """) PERF_OBJECT_INSTANCE_LIST_INFO_ITER_RESPONSE = etree.XML(""" system %(node)s:kernel:system 1 """ % {'node': NODE_NAME}) PERF_OBJECT_INSTANCE_LIST_INFO_RESPONSE = etree.XML(""" processor0 processor1 """) NET_ROUTES_CREATE_RESPONSE = etree.XML(""" ipv4 %(subnet)s %(gateway)s 20 %(vserver)s """ % { 'gateway': GATEWAY, 'vserver': VSERVER_NAME, 'subnet': SUBNET, }) QOS_POLICY_GROUP_GET_ITER_RESPONSE = etree.XML(""" %(max_throughput)s 1 %(qos_policy_group_name)s %(vserver)s 1 """ % { 'qos_policy_group_name': QOS_POLICY_GROUP_NAME, 'vserver': VSERVER_NAME, 'max_throughput': QOS_MAX_THROUGHPUT, }) SNAPMIRROR_POLICY_GET_ITER_RESPONSE = etree.XML(""" %(policy_name)s %(policy_type)s %(vserver_name)s 1 """ % { 'policy_name': SNAPMIRROR_POLICY_NAME, 'policy_type': SNAPMIRROR_POLICY_TYPE, 'vserver_name': VSERVER_NAME, }) KERBEROS_CONFIG_GET_RESPONSE = etree.XML(""" %(lif_name)s true %(vserver_name)s """ % { 'lif_name': LIF_NAME, 'vserver_name': VSERVER_NAME, }) DNS_CONFIG_GET_RESPONSE = etree.XML(""" 1 enabled fake_domain.com true fake_dns_1 fake_dns_2 true true 2 %(vserver_name)s """ % { 'vserver_name': VSERVER_NAME, }) FPOLICY_EVENT_GET_ITER_RESPONSE = etree.XML(""" %(event_name)s create write rename %(protocol)s false %(vserver_name)s 1 """ % { 'event_name': FPOLICY_EVENT_NAME, 'protocol': FPOLICY_PROTOCOL, 'vserver_name': VSERVER_NAME, }) FPOLICY_POLICY_GET_ITER_RESPONSE = etree.XML(""" false %(engine)s %(event_name)s true false %(policy_name)s %(vserver_name)s 1 """ % { 'engine': FPOLICY_ENGINE, 'event_name': FPOLICY_EVENT_NAME, 'policy_name': FPOLICY_POLICY_NAME, 'vserver_name': VSERVER_NAME, }) FPOLICY_SCOPE_GET_ITER_RESPONSE = etree.XML(""" true jpg mp3 avi false %(policy_name)s %(share_name)s %(vserver_name)s 1 """ % { 'policy_name': FPOLICY_POLICY_NAME, 'share_name': SHARE_NAME, 'vserver_name': VSERVER_NAME, }) FPOLICY_POLICY_STATUS_GET_ITER_RESPONSE = etree.XML(""" %(policy_name)s 1 true %(vserver_name)s 1 """ % { 'policy_name': FPOLICY_POLICY_NAME, 'vserver_name': VSERVER_NAME, }) FAKE_VOL_XML = """ open123 online 0 0 0 false false """ SNAPLOCK_CLOCK_CONFIG_1 = etree.XML(""" %(clock_info)s 1723063070 """ % { 'clock_info': 'Wed Aug 07 16:37:50' }) SNAPLOCK_CLOCK_CONFIG_2 = etree.XML(""" %(clock_info)s 1723063070 """ % { 'clock_info': 'ComplianceClock is not configured' }) FAKE_XML1 = """\ abc\ abc\ """ FAKE_XML2 = """somecontent""" FAKE_NA_ELEMENT = api.NaElement(etree.XML(FAKE_VOL_XML)) FAKE_INVOKE_DATA = 'somecontent' FAKE_XML_STR = 'abc' FAKE_REST_CALL_STR = 'def' FAKE_API_NAME = 'volume-get-iter' FAKE_API_NAME_ELEMENT = api.NaElement(FAKE_API_NAME) FAKE_NA_SERVER_STR = '127.0.0.1' FAKE_NA_SERVER = api.NaServer(FAKE_NA_SERVER_STR) FAKE_NA_SERVER_API_1_5 = api.NaServer(FAKE_NA_SERVER_STR) FAKE_NA_SERVER_API_1_5.set_vfiler('filer') FAKE_NA_SERVER_API_1_5.set_api_version(1, 5) FAKE_NA_SERVER_API_1_14 = api.NaServer(FAKE_NA_SERVER_STR) FAKE_NA_SERVER_API_1_14.set_vserver('server') FAKE_NA_SERVER_API_1_14.set_api_version(1, 14) FAKE_NA_SERVER_API_1_20 = api.NaServer(FAKE_NA_SERVER_STR) FAKE_NA_SERVER_API_1_20.set_vfiler('filer') FAKE_NA_SERVER_API_1_20.set_vserver('server') FAKE_NA_SERVER_API_1_20.set_api_version(1, 20) FAKE_QUERY = {'volume-attributes': None} FAKE_DES_ATTR = {'volume-attributes': ['volume-id-attributes', 'volume-space-attributes', 'volume-state-attributes', 'volume-qos-attributes']} FAKE_CALL_ARGS_LIST = [mock.call(80), mock.call(8088), mock.call(443), mock.call(8488)] FAKE_RESULT_API_ERR_REASON = api.NaElement('result') FAKE_RESULT_API_ERR_REASON.add_attr('errno', '000') FAKE_RESULT_API_ERR_REASON.add_attr('reason', 'fake_reason') FAKE_RESULT_API_ERRNO_INVALID = api.NaElement('result') FAKE_RESULT_API_ERRNO_INVALID.add_attr('errno', '000') FAKE_RESULT_API_ERRNO_VALID = api.NaElement('result') FAKE_RESULT_API_ERRNO_VALID.add_attr('errno', '14956') FAKE_RESULT_SUCCESS = api.NaElement('result') FAKE_RESULT_SUCCESS.add_attr('status', 'passed') FAKE_HTTP_SESSION = requests.Session() FAKE_MANAGE_VOLUME = { 'aggregate': SHARE_AGGREGATE_NAME, 'name': SHARE_NAME, 'owning-vserver-name': VSERVER_NAME, 'junction_path': VOLUME_JUNCTION_PATH, 'style': 'fake_style', 'size': SHARE_SIZE, } FAKE_KEY_MANAGER_ERROR = "The onboard key manager is not enabled. To enable \ it, run \"security key-manager setup\"." FAKE_ACTION_URL = '/endpoint' FAKE_BASE_URL = '10.0.0.3/api' FAKE_HTTP_BODY = {'fake_key': 'fake_value'} FAKE_HTTP_QUERY = {'type': 'fake_type'} FAKE_FORMATTED_HTTP_QUERY = "?type=fake_type" FAKE_HTTP_HEADER = {"fake_header_key": "fake_header_value"} FAKE_URL_PARAMS = {"fake_url_key": "fake_url_value_to_be_concatenated"} FAKE_MIGRATION_RESPONSE_WITH_JOB = { "_links": { "self": { "href": "/api/resourcelink" } }, "job": { "start_time": "2021-08-27T19:23:41.691Z", "uuid": "1cd8a442-86d1-11e0-ae1c-123478563412", "description": "Fake Job", "state": "success", "message": "Complete: Successful", "end_time": "2021-08-27T19:23:41.691Z", "code": "0" } } FAKE_JOB_ID = FAKE_MIGRATION_RESPONSE_WITH_JOB['job']['uuid'] FAKE_MIGRATION_POST_ID = 'fake_migration_id' FAKE_JOB_SUCCESS_STATE = { "_links": { "self": { "href": "/api/resourcelink" } }, "start_time": "2021-08-27T19:23:41.691Z", "uuid": "1cd8a442-86d1-11e0-ae1c-123478563412", "description": "POST migrations/%s" % FAKE_MIGRATION_POST_ID, "state": "success", "message": "Complete: Successful", "end_time": "2021-08-27T19:23:41.691Z", "code": "0" } FAKE_MIGRATION_JOB_SUCCESS = { "auto_cutover": True, "auto_source_cleanup": True, "current_operation": "none", "cutover_complete_time": "2020-12-02T18:36:19-08:00", "cutover_start_time": "2020-12-02T18:36:19-08:00", "cutover_trigger_time": "2020-12-02T18:36:19-08:00", "destination": { "ipspace": { "_links": { "self": { "href": "/api/resourcelink" } }, "name": "exchange", "uuid": "1cd8a442-86d1-11e0-ae1c-123478563412" }, "volume_placement": { "aggregates": [ { "_links": { "self": { "href": "/api/resourcelink" } }, "name": "aggr1", "uuid": "1cd8a442-86d1-11e0-ae1c-123478563412" } ], "volumes": [ { "aggregate": { "_links": { "self": { "href": "/api/resourcelink" } }, "name": "aggr1", "uuid": "1cd8a442-86d1-11e0-ae1c-123478563412" }, "volume": { "_links": { "self": { "href": "/api/resourcelink" } }, "name": "this_volume", "uuid": "1cd8a442-86d1-11e0-ae1c-123478563412" } } ] } }, "end_time": "2020-12-02T18:36:19-08:00", "last_failed_state": "precheck_started", "last_operation": "none", "last_pause_time": "2020-12-02T18:36:19-08:00", "last_resume_time": "2020-12-02T18:36:19-08:00", "messages": [ { "code": 852126, "message": "SVM migrate cannot start since a volume move is " "running.""Retry the command once volume move has " "finished." } ], "point_of_no_return": True, "restart_count": 0, "source": { "cluster": { "_links": { "self": { "href": "/api/resourcelink" } }, "name": "cluster1", "uuid": "1cd8a442-86d1-11e0-ae1c-123478563412" }, "svm": { "_links": { "self": { "href": "/api/resourcelink" } }, "name": "svm1", "uuid": "02c9e252-41be-11e9-81d5-00a0986138f7" } }, "start_time": "2020-12-02T18:36:19-08:00", "state": "migrate_complete", "uuid": "4ea7a442-86d1-11e0-ae1c-123478563412" } VOLUME_GET_ITER_STATE_RESPONSE = etree.XML(""" 1 online """) ASYNC_OPERATION_RESPONSE = etree.XML(""" in_progress 123 """) VOLUME_GET_ITER_STYLE_FLEXGROUP_RESPONSE = etree.XML(""" 1 %(style)s """ % { 'style': FLEXGROUP_STYLE_EXTENDED, }) VOLUME_GET_ITER_STYLE_FLEXVOL_RESPONSE = etree.XML(""" 1 flexvol """) JOB_GET_STATE_RESPONSE = etree.XML(""" 1 %(state)s """ % { 'state': JOB_STATE, }) JOB_GET_STATE_NOT_UNIQUE_RESPONSE = etree.XML(""" 1 %(state)s %(state)s """ % { 'state': JOB_STATE, }) NO_RECORDS_RESPONSE_REST = { "records": [], "num_records": 0, "_links": { "self": { "href": "/api/cluster/nodes" } } } ERROR_RESPONSE_REST = { "error": { "code": 1100, "message": "fake error", } } GET_VERSION_RESPONSE_REST = { "records": [ { "version": { "generation": "9", "minor": "11", "major": "1", "full": "NetApp Release 9.11.1: Sun Nov 05 18:20:57 UTC 2017" } } ], "_links": { "next": { "href": "/api/resourcelink" }, "self": { "href": "/api/resourcelink" } }, "num_records": 0 } VOLUME_GET_ITER_RESPONSE_LIST_REST = [ { "uuid": "2407b637-119c-11ec-a4fb-00a0b89c9a78", "name": VOLUME_NAMES[0], "state": "online", "style": "flexvol", "is_svm_root": False, "type": "rw", "error_state": { "is_inconsistent": False }, "_links": { "self": { "href": "/api/storage/volumes/2407b637-119c-11ec-a4fb" } } }, { "uuid": "2c190609-d51c-11eb-b83a", "name": VOLUME_NAMES[1], "state": "online", "style": "flexvol", "is_svm_root": False, "type": "rw", "error_state": { "is_inconsistent": False }, "_links": { "self": { "href": "/api/storage/volumes/2c190609-d51c-11eb-b83a" } } } ] VOLUME_GET_ITER_RESPONSE_REST_PAGE = { "records": [ VOLUME_GET_ITER_RESPONSE_LIST_REST[0], VOLUME_GET_ITER_RESPONSE_LIST_REST[0], VOLUME_GET_ITER_RESPONSE_LIST_REST[0], VOLUME_GET_ITER_RESPONSE_LIST_REST[0], VOLUME_GET_ITER_RESPONSE_LIST_REST[0], VOLUME_GET_ITER_RESPONSE_LIST_REST[0], VOLUME_GET_ITER_RESPONSE_LIST_REST[0], VOLUME_GET_ITER_RESPONSE_LIST_REST[0], VOLUME_GET_ITER_RESPONSE_LIST_REST[0], VOLUME_GET_ITER_RESPONSE_LIST_REST[0], ], "num_records": 10, "_links": { "self": { "href": "/api/storage/volumes?fields=name&max_records=2" }, "next": { "href": "/api/storage/volumes?" f"start.uuid={VOLUME_GET_ITER_RESPONSE_LIST_REST[0]['uuid']}" "&fields=name&max_records=2" } } } VOLUME_GET_ITER_RESPONSE_REST_LAST_PAGE = { "records": [ VOLUME_GET_ITER_RESPONSE_LIST_REST[0], VOLUME_GET_ITER_RESPONSE_LIST_REST[0], VOLUME_GET_ITER_RESPONSE_LIST_REST[0], VOLUME_GET_ITER_RESPONSE_LIST_REST[0], VOLUME_GET_ITER_RESPONSE_LIST_REST[0], VOLUME_GET_ITER_RESPONSE_LIST_REST[0], VOLUME_GET_ITER_RESPONSE_LIST_REST[0], VOLUME_GET_ITER_RESPONSE_LIST_REST[0], ], "num_records": 8, } INVALID_GET_ITER_RESPONSE_NO_RECORDS_REST = { "num_records": 1, } INVALID_GET_ITER_RESPONSE_NO_NUM_RECORDS_REST = { "records": [], } JOB_RESPONSE_REST = { "job": { "uuid": "uuid-12345", "_links": { "self": { "href": "/api/cluster/jobs/uuid-12345" } } } } JOB_SUCCESSFUL_REST = { "uuid": FAKE_UUID, "description": "Fake description", "state": "success", "message": "success", "code": 0, "start_time": "2022-02-18T20:08:03+00:00", "end_time": "2022-02-18T20:08:04+00:00", } JOB_RUNNING_REST = { "uuid": FAKE_UUID, "description": "Fake description", "state": "running", "message": "running", "code": 0, } JOB_ERROR_REST = { "uuid": FAKE_UUID, "description": "Fake description", "state": "failure", "message": "failure", "code": 4, "error": { "target": "uuid", "arguments": [ { "message": "string", "code": "string" } ], "message": "entry doesn't exist", "code": "4" }, "start_time": "2022-02-18T20:08:03+00:00", "end_time": "2022-02-18T20:08:04+00:00", } FAKE_GET_ONTAP_VERSION_REST = { "version": { "full": "NetApp Release 9.10.1RC1: Wed Oct 27 02:46:19 UTC 2021", "generation": 9, "major": 10, "minor": 1 }, } FAKE_GET_CLUSTER_NODE_VERSION_REST = { "records": [ { "uuid": "fake_uuid", "name": CLUSTER_NAME, "version": FAKE_GET_ONTAP_VERSION_REST["version"], } ], } FAKE_GET_LICENSES_REST = { "records": [ { "name": "base", }, { "name": "nfs", }, { "name": "cifs", } ], "num_records": 3, } VOLUME_ITEM_SIMPLE_RESPONSE_REST = { "uuid": "fake_uuid", "name": VOLUME_NAMES[0], "style": 'flexvol', "svm": { "name": VSERVER_NAME, "uuid": "fake_uuid", }, "efficiency": { "state": "enabled", "compression": "true" }, "snapshot_directory_access_enabled": "false", "snapshot_policy": { "name": "daily", }, "state": "online", } VOLUME_LIST_SIMPLE_RESPONSE_REST = { "records": [ VOLUME_ITEM_SIMPLE_RESPONSE_REST ], "num_records": 1, } SVMS_LIST_SIMPLE_RESPONSE_REST = { "records": [ { "uuid": "fake_uuid", "name": VSERVER_NAME, "subtype": VSERVER_TYPE_DEFAULT, "state": VSERVER_STATE, }, { "uuid": "fake_uuid_2", "name": VSERVER_NAME_2, "subtype": VSERVER_TYPE_DEFAULT, "state": VSERVER_STATE, }, ], "num_records": 2, } KEYSTORE_SIMPLE_RESPONSE_REST = { "records": [ { "type": "fake_type_barbican", "configuration": { "name": FAKE_CONFIG_NAME, "uuid": FAKE_CONFIG_UUID }, "uuid": FAKE_CONFIG_UUID } ], "num_records": 1 } AGGR_GET_ITER_RESPONSE_REST = { "records": [ { "uuid": "fake_uuid_1", "name": "fake_aggr1", "home_node": { "name": "fake_home_node_name" }, "snaplock_type": "enterprise", "is_snaplock": True, "space": { "footprint": 702764609536, "footprint_percent": 55, "block_storage": { "size": 1271819509760, "available": 568692293632, "used": 703127216128, }, "snapshot": { "used_percent": 0, "available": 0, "total": 0, "used": 0, "reserve_percent": 0 }, "cloud_storage": { "used": 0 }, "efficiency": { "savings": 70597836800, "ratio": 11.00085294873662, "logical_used": 77657018368 }, "efficiency_without_snapshots": { "savings": 4288385024, "ratio": 1.614324692241794, "logical_used": 11269033984 }, "efficiency_without_snapshots_flexclones": { "savings": 4288385024, "ratio": 1.614324692241794, "logical_used": 11269033984 } }, "block_storage": { "storage_type": "vmdisk", "primary": { "raid_type": "raid0" } }, }, { "uuid": "fake_uuid_2", "name": "fake_aggr2", "home_node": { "name": "fake_home_node_name" }, "space": { "footprint": 699261227008, "footprint_percent": 49, "block_storage": { "size": 1426876227584, "available": 727211110400, "used": 699665117184, }, "snapshot": { "used_percent": 0, "available": 0, "total": 0, "used": 0, "reserve_percent": 0 }, "cloud_storage": { "used": 0 }, "efficiency": { "savings": 4173848576, "ratio": 1.447821420943505, "logical_used": 13494190080 }, "efficiency_without_snapshots": { "savings": 0, "ratio": 1, "logical_used": 8565026816 }, "efficiency_without_snapshots_flexclones": { "savings": 0, "ratio": 1, "logical_used": 8565026816 } }, "block_storage": { "storage_type": "vmdisk", "primary": { "raid_type": "raid0" } }, } ], "num_records": 2, } EFFECTIVE_TYPE = 'fake_effective_type1' DISK_LIST_SIMPLE_RESPONSE_REST = { "records": [ { "name": "NET-1.2", "effective_type": EFFECTIVE_TYPE, } ], "num_records": 1, } GENERIC_JOB_POST_RESPONSE = { "job": { "_links": { "self": { "href": "/api/resourcelink" } }, "uuid": "fake_uuid" } } GENERIC_NETWORK_INTERFACES_GET_REPONSE = { "records": [ { "uuid": "fake_uuid", "name": LIF_NAME, "enabled": "true", "ip": { "address": IP_ADDRESS, "netmask": NETMASK }, "svm": { "name": VSERVER_NAME }, "services": [ "data_nfs", "data_cifs", ], "location": { "home_node": { "name": CLUSTER_NAME }, "home_port": { "name": PORT }, } } ], "num_records": 1, } GENERIC_EXPORT_POLICY_RESPONSE_AND_VOLUMES = { "records": [ { "svm": { "uuid": "fake_uuid", "name": VSERVER_NAME, }, "efficiency": { "volume_path": VOLUME_JUNCTION_PATH }, "id": "fake-policy-uuid", "name": EXPORT_POLICY_NAME, "style": "flexvol", "type": "rw", "aggregates": [ { "name": SHARE_AGGREGATE_NAME } ], "nas": { "path": VOLUME_JUNCTION_PATH }, "space": { "size": 21474836480, 'used': SHARE_USED_SIZE, }, "snaplock": { "type": "compliance" } } ], "num_records": 1, } GENERIC_FPOLICY_RESPONSE = { "records": [ { "name": FPOLICY_POLICY_NAME, "enabled": "true", "priority": 1, "events": [ { "name": FPOLICY_EVENT_NAME, } ], "engine": { "name": FPOLICY_ENGINE, }, "scope": { "include_shares": [ VOLUME_NAMES[0] ], "include_extension": FPOLICY_EXT_TO_INCLUDE_LIST, "exclude_extension": FPOLICY_EXT_TO_EXCLUDE_LIST }, } ], "num_records": 1, } GENERIC_FPOLICY_EVENTS_RESPONSE = { "records": [ { "name": FPOLICY_EVENT_NAME, "protocol": FPOLICY_PROTOCOL, "file_operations": { "create": 'true', "write": 'true', "rename": 'true' }, } ], "num_records": 1, } EXPORT_POLICY_REST = { "records": [ { "_links": { "self": { "href": "/api/resourcelink" } }, "name": "string", "id": 0, "svm": { "_links": { "self": { "href": "/api/resourcelink" } }, "name": "svm1", "uuid": "02c9e252-41be-11e9-81d5-00a0986138f7" }, "rules": [{ "rw_rule": [ "any" ], "_links": { "self": { "href": "/api/resourcelink" } }, "ro_rule": [ "any" ], "allow_suid": True, "chown_mode": "restricted", "index": 0, "superuser": [ "any" ], "protocols": [ "any" ], "anonymous_user": "string", "clients": [ {"match": "0.0.0.0/0"} ], "ntfs_unix_security": "fail", "allow_device_creation": True }] }], "_links": { "next": { "href": "/api/resourcelink" }, "self": { "href": "/api/resourcelink" } }, "num_records": 1 } QOS_POLICY_GROUP_REST = { "records": [ { "policy_class": "undefined", "object_count": '0', "fixed": { "max_throughput_iops": 0, "capacity_shared": True, "max_throughput_mbps": 0, "min_throughput_iops": 0, "min_throughput_mbps": 0 }, "_links": { "self": { "href": "/api/resourcelink" } }, "name": "extreme", "adaptive": { "expected_iops_allocation": "allocated_space", "expected_iops": 0, "peak_iops_allocation": "used_space", "block_size": "any", "peak_iops": 0, "absolute_min_iops": 0 }, "uuid": "1cd8a442-86d1-11e0-ae1c-123478563412", "svm": { "_links": { "self": { "href": "/api/resourcelink" } }, "name": "svm1", "uuid": "02c9e252-41be-11e9-81d5-00a0986138f7" }, "pgid": 0, "scope": "cluster" } ], "_links": { "next": { "href": "/api/resourcelink" }, "self": { "href": "/api/resourcelink" } }, "error": { "target": "uuid", "arguments": [ { "message": "string", "code": "string" } ], "message": "entry doesn't exist", "code": "4" }, "num_records": 1 } FAKE_SNAPSHOT_UUID = "fake_uuid" FAKE_VOLUME_UUID = "fake_volume_uuid" SNAPSHOT_REST = { "name": SNAPSHOT_NAME, "uuid": FAKE_SNAPSHOT_UUID, "volume": { "name": VOLUME_NAMES[0], "uuid": FAKE_VOLUME_UUID, }, "create_time": "2019-02-04T19:00:00Z", "owners": ["volume_clone"], "svm": { "name": VSERVER_NAME, } } SNAPSHOTS_REST_RESPONSE = { "records": [ SNAPSHOT_REST, ], "num_records": 1, } SNAPSHOTS_MULTIPLE_REST_RESPONSE = { "records": [ SNAPSHOT_REST, SNAPSHOT_REST, ], "num_records": 2, } SNAPMIRROR_GET_ITER_RESPONSE_REST = { "records": [ { "uuid": FAKE_UUID, "source": { "path": SM_SOURCE_VSERVER + ':' + SM_SOURCE_VOLUME, "svm": { "name": SM_SOURCE_VSERVER } }, "destination": { "path": SM_DEST_VSERVER + ':' + SM_DEST_VOLUME, "svm": { "name": SM_DEST_VSERVER } }, "policy": { "type": "async" }, "state": "snapmirrored", "healthy": True, "transfer_schedule": { "name": "hourly", }, "transfer": { "state": "success", "bytes_transferred": "3352", }, "last_transfer_type": "update", } ], "num_records": 1, } REST_GET_SNAPMIRRORS_RESPONSE = [{ 'destination-volume': SM_DEST_VOLUME, 'destination-vserver': SM_DEST_VSERVER, 'last-transfer-end-timestamp': 0, 'mirror-state': 'snapmirrored', 'relationship-status': 'idle', 'source-volume': SM_SOURCE_VOLUME, 'source-vserver': SM_SOURCE_VSERVER, 'uuid': FAKE_UUID, 'policy-type': 'async', 'schedule': 'hourly', 'transferring-state': 'success', 'is-healthy': 'true', 'last-transfer-size': '3352', 'last-transfer-type': 'update', }] FAKE_CIFS_RECORDS = { "records": [ { "svm": { "uuid": "000c5cd2-ebdf-11e8-a96e-0050568ea3cb", "name": "vs1" }, "user_or_group": "Everyone", "permission": "full_control" }, { "svm": { "uuid": "000c5cd2-ebdf-11e8-a96e-0050568ea3cb", "name": "vs1" }, "user_or_group": "root", "permission": "no_access" } ], "num_records": 2 } FAKE_VOL_MOVE_STATUS = { "records": [ { "svm": { "_links": { "self": { "href": "/api/resourcelink" } }, "name": "fake_svm", "uuid": "02c9e252-41be-11e9-81d5-00a0986138f7" }, "uuid": "fake_uuid", "name": "fake_name", "movement": { "state": "success", "percent_complete": 100 }, } ], "num_records": 1, } REST_SIMPLE_RESPONSE = { "records": [ { 'uuid': FAKE_UUID } ] } FAKE_GET_VOLUME_CLONE_REST = [ { "uuid": FAKE_UUID, "name": VOLUME_NAMES[0], "clone": { "parent_volume": { "name": VOLUME_NAMES[1] } }, "num_records": 1, } ] VSERVER_DATA_LIST_RESPONSE_REST = { 'records': [ { 'name': VSERVER_NAME }, { 'name': VSERVER_NAME_2 } ], 'num_records': 2, } PERF_COUNTER_LIST_INFO_WAFL_RESPONSE_REST = { 'name': 'wafl', 'counter_schemas': [ { 'name': 'cp_phase_times', 'description': 'Array of percentage time spent in different phases' + ' of Consistency Point (CP).', 'type': 'percent', 'unit': 'percent', 'denominator': { 'name': 'total_cp_msecs' } } ], } PERF_COUNTER_TOTAL_CP_MSECS_LABELS_REST = [ 'cp_setup', 'cp_pre_p0', 'cp_p0_snap_del', 'cp_p1_clean', 'cp_p1_quota', 'cp_ipu_disk_add', 'cp_p2v_inofile', 'cp_p2v_ino_pub', 'cp_p2v_ino_pri', 'cp_p2v_fsinfo', 'cp_p2v_dlog1', 'cp_p2v_dlog2', 'cp_p2v_refcount', 'cp_p2v_topaa', 'cp_p2v_df_scores_sub', 'cp_p2v_bm', 'cp_p2v_snap', 'cp_p2v_df_scores', 'cp_p2v_volinfo', 'cp_p2v_cont', 'cp_p2a_inofile', 'cp_p2a_ino', 'cp_p2a_dlog1', 'cp_p2a_hya', 'cp_p2a_dlog2', 'cp_p2a_fsinfo', 'cp_p2a_ipu_bitmap_grow', 'cp_p2a_refcount', 'cp_p2a_topaa', 'cp_p2a_hyabc', 'cp_p2a_bm', 'cp_p2a_snap', 'cp_p2a_volinfo', 'cp_p2_flush', 'cp_p2_finish', 'cp_p3_wait', 'cp_p3v_volinfo', 'cp_p3a_volinfo', 'cp_p3_finish', 'cp_p4_finish', 'cp_p5_finish', ] PERF_COUNTER_TOTAL_CP_MSECS_LABELS_RESULT = [ label[3:] for label in PERF_COUNTER_TOTAL_CP_MSECS_LABELS_REST ] PERF_COUNTER_TOTAL_CP_MSECS_VALUES_REST = [ 0, 3112, 3, 0, 0, 3, 757, 0, 99, 0, 26, 0, 22, 1, 0, 194, 4, 224, 359, 222, 0, 0, 0, 0, 0, 0, 82, 0, 0, 0, 0, 0, 0, 62, 0, 133, 16, 35, 334219, 43, 2218, 20, 0, ] PERF_COUNTER_TABLE_ROWS_WAFL = { 'records': [ { 'id': NODE_NAME + ':wafl', 'counters': [ { 'name': 'cp_phase_times', 'values': PERF_COUNTER_TOTAL_CP_MSECS_VALUES_REST, 'labels': PERF_COUNTER_TOTAL_CP_MSECS_LABELS_REST } ], } ], 'num_records': 1, } PERF_COUNTER_DOMAIN_BUSY_LABELS = [ 'exempt', 'ha', 'host_os', 'idle', 'kahuna', 'kahuna_legacy', 'none', 'nwk_exempt', 'network', 'protocol', 'raid', 'raid_exempt', 'sm_exempt', 'ssan_exempt', 'storage', 'target', 'unclassified', 'wafl_exempt', 'wafl_mpcleaner', 'xor_exempt', 'ssan_exempt2', 'exempt_ise', 'zombie', ] PERF_COUNTER_DOMAIN_BUSY_VALUES_1 = [ 83071627197, 1334877, 19459898, 588539096, 11516887, 14878622, 18, 647698, 20, 229232646, 4310322, 441035, 12946782, 57837913, 38765442, 1111004351701, 1497335, 949657, 109890, 768027, 21, 14, 13 ] PERF_COUNTER_DOMAIN_BUSY_VALUES_2 = [ 1191129018056, 135991, 22842513, 591213798, 9449562, 15345460, 0, 751656, 0, 162605694, 3927323, 511160, 7644403, 29696759, 21787992, 3585552592, 1058902, 957296, 87811, 499766, 0, 0, 0 ] PERF_COUNTER_ELAPSED_TIME_1 = 1199265469753 PERF_COUNTER_ELAPSED_TIME_2 = 1199265469755 PERF_GET_INSTANCES_PROCESSOR_RESPONSE_REST = { 'records': [ { 'counter_table': { 'name': 'processor' }, 'id': NODE_NAME + ':processor0', 'counters': [ { 'name': 'domain_busy_percent', 'values': PERF_COUNTER_DOMAIN_BUSY_VALUES_1, 'labels': PERF_COUNTER_DOMAIN_BUSY_LABELS }, { 'name': 'elapsed_time', 'value': PERF_COUNTER_ELAPSED_TIME_1, } ], }, { 'counter_table': { 'name': 'processor' }, 'id': NODE_NAME + ':processor1', 'counters': [ { 'name': 'domain_busy_percent', 'values': PERF_COUNTER_DOMAIN_BUSY_VALUES_2, 'labels': PERF_COUNTER_DOMAIN_BUSY_LABELS }, { 'name': 'elapsed_time', 'value': PERF_COUNTER_ELAPSED_TIME_2, } ], } ], 'num_records': 2, } PERF_COUNTERS_PROCESSOR_EXPECTED = [ { 'instance-name': 'processor', 'instance-uuid': NODE_NAME + ':processor0', 'node-name': NODE_NAME, 'timestamp': mock.ANY, 'domain_busy': ','.join([str(v) for v in PERF_COUNTER_DOMAIN_BUSY_VALUES_1]) }, { 'instance-name': 'processor', 'instance-uuid': NODE_NAME + ':processor0', 'node-name': NODE_NAME, 'timestamp': mock.ANY, 'processor_elapsed_time': PERF_COUNTER_ELAPSED_TIME_1 }, { 'instance-name': 'processor', 'instance-uuid': NODE_NAME + ':processor1', 'node-name': NODE_NAME, 'timestamp': mock.ANY, 'domain_busy': ','.join([str(v) for v in PERF_COUNTER_DOMAIN_BUSY_VALUES_2]) }, { 'instance-name': 'processor', 'instance-uuid': NODE_NAME + ':processor1', 'node-name': NODE_NAME, 'timestamp': mock.ANY, 'processor_elapsed_time': PERF_COUNTER_ELAPSED_TIME_2 }, ] DELETED_EXPORT_POLICY_GET_ITER_RESPONSE_REST = { 'records': [ { 'name': DELETED_EXPORT_POLICIES[VSERVER_NAME][0], 'svm': {'name': VSERVER_NAME}, }, { 'name': DELETED_EXPORT_POLICIES[VSERVER_NAME][1], 'svm': {'name': VSERVER_NAME}, }, { 'name': DELETED_EXPORT_POLICIES[VSERVER_NAME_2][0], 'svm': {'name': VSERVER_NAME_2}, } ], 'num_records': 3 } SECUTITY_KEY_MANAGER_SUPPORT_RESPONSE_TRUE_REST = { 'records': [ { 'volume_encryption': { 'supported': True, 'message': '', 'code': 0 } } ], 'num_records': 1 } SECUTITY_KEY_MANAGER_SUPPORT_RESPONSE_FALSE_REST = { 'records': [ { 'volume_encryption': { 'supported': False, 'message': 'No platform support for volume encryption ' 'in following nodes - node1, node2.', 'code': 346758 } } ], 'num_records': 1 } NFS_CONFIG_DEFAULT_RESULT_REST = { 'records': [ { 'svm': { 'uuid': FAKE_UUID, 'name': VSERVER_NAME, }, 'transport': { 'udp_enabled': True, 'tcp_enabled': True, 'tcp_max_transfer_size': 65536 }, } ], 'num_records': 1, } DNS_REST_RESPONSE = { "domains": [ "example.com", "example2.example3.com" ], "dynamic_dns": { "fqdn": "example.com", "time_to_live": "P2D", "use_secure": "true", "enabled": "true" }, "servers": [ "10.224.65.20", "2001:db08:a0b:12f0::1" ], } SVM_ITEM_SIMPLE_RESPONSE_REST = { "uuid": "fake_uuid", "name": VSERVER_NAME, } LOCAL_USERS_CIFS_RESPONSE = { "sid": "fake_SID", "svm": { "name": "svm1", "uuid": "02c9e252-41be-11e9-81d5-00a0986138f7" } } PREFERRED_DC_REST = { "fqdn": "test.com", "server_ip": "4.4.4.4" } FAKE_VSERVER_PEERS = [{ 'uuid': 'fake_uuid' }] FAKE_PEER_GET_RESPONSE = { 'records': [ { 'uuid': FAKE_UUID, 'svm': { 'name': VSERVER_NAME, }, 'peer': { 'svm': { 'name': VSERVER_NAME_2 }, 'cluster': { 'name': CLUSTER_NAME } }, 'state': VSERVER_PEER_STATE, } ], 'num_records': 1 } REST_SPEED_SORTED_PORTS = [ {'node': NODE_NAME, 'port': 'e0d', 'speed': 10000}, {'node': NODE_NAME, 'port': 'e0c', 'speed': 1000}, {'node': NODE_NAME, 'port': 'e0b', 'speed': 100}, ] REST_SPEED_NOT_SORTED_PORTS = [ {'node': NODE_NAME, 'port': 'e0b', 'speed': 100}, {'node': NODE_NAME, 'port': 'e0c', 'speed': 1000}, {'node': NODE_NAME, 'port': 'e0d', 'speed': 10000}, ] REST_ETHERNET_PORTS = { "records": [ { "uuid": "fake_uuid1", "name": "e0a", "type": "physical", "node": { "name": NODE_NAME }, "broadcast_domain": { "name": "fake_domain_1", "ipspace": { "name": "Default" } }, "state": "up", "speed": 10, }, { "uuid": "fake_uuid2", "name": "e0b", "type": "physical", "node": { "name": NODE_NAME }, "broadcast_domain": { "name": "fake_domain_2", "ipspace": { "name": "Default" } }, "state": "up", "speed": 100, }, { "uuid": "fake_uuid3", "name": "e0c", "type": "physical", "node": { "name": NODE_NAME }, "broadcast_domain": { "name": "fake_domain_3", "ipspace": { "name": "Default" } }, "state": "up", "speed": 1000, }, { "uuid": "fake_uuid4", "name": "e0d", "type": "physical", "node": { "name": NODE_NAME }, "broadcast_domain": { "name": "fake_domain_4", "ipspace": { "name": "Default" } }, "state": "up", "speed": 10000, } ], } SVM_ITEM_SIMPLE_RESPONSE_REST = { "uuid": "fake_uuid", "name": VSERVER_NAME, } FAKE_GET_BROADCAST_DOMAIN = { 'records': [ { 'ports': [ { 'name': PORT, 'node': {'name': NODE_NAME} } ], 'name': BROADCAST_DOMAIN, 'ipspace': {'name': IPSPACE_NAME} } ] } NFS_CONFIG_RESULT_REST = { 'records': [ { 'svm': { 'uuid': FAKE_UUID, 'name': VSERVER_NAME, }, 'transport': { 'udp_enabled': True, 'tcp_enabled': True, 'tcp_max_transfer_size': 65536 }, } ], 'num_records': 1, } SERVICE_POLICIES_REST = { 'records': [ { 'uuid': 'fake_policy_uuid', 'name': 'default-data-files', 'svm': { 'name': VSERVER_NAME }, 'services': [ 'data_core', 'data_flexcache', 'data_fpolicy_client', 'management_dns_client', 'management_ad_client', 'management_ldap_client', 'management_nis_client', 'data_dns_server' ], }, ], 'num_records': 1, } SECURITY_CERT_GET_RESPONSE_REST = { 'records': [ { 'uuid': 'fake_cert_uuid', 'serial_number': 'fake_serial_number', 'key_size': 0, 'hash_function': "sha256", 'common_name': "fake_common_name", 'name': "cert1", 'ca': 'fake_ca', 'expiry_time': 'fake_expiry_time', 'svm': { 'name': VSERVER_NAME, 'uuid': 'fake_uuid', }, }, ], 'num_records': 1, } SECURITY_CERT_POST_RESPONSE_REST = { 'records': [ { 'uuid': 'fake_cert_uuid', 'serial_number': 'fake_serial_number', 'key_size': 0, 'hash_function': "sha256", 'common_name': "fake_common_name", 'name': "cert1", 'ca': 'fake_ca', 'expiry_time': 'fake_expiry_time', 'svm': { 'name': VSERVER_NAME, 'uuid': 'fake_uuid', }, }, ], 'num_records': 1, } GET_SNAPMIRROR_POLICIES_REST = { "records": [ { "uuid": FAKE_UUID, "name": SNAPMIRROR_POLICY_NAME }], 'num_records': 1, } REST_VSERVER_GET_IPSPACE_NAME_RESPONSE = { "records": [ { "uuid": FAKE_UUID, "ipspace": {'name': IPSPACE_NAME} } ], 'num_records': 1, } BROADCAST_DOMAIN_LIST_SIMPLE_RESPONSE_REST = { "records": [ { "ports": [ { "_links": { "self": { "href": FAKE_BASE_URL } }, "name": "fake_port_name", "uuid": FAKE_UUID, "node": { "name": "fake_node_name" } } ], "_links": { "self": { "href": FAKE_BASE_URL } }, "name": "fake_broadcast_name", "ipspace": { "_links": { "self": { "href": FAKE_BASE_URL } }, "name": IPSPACE_NAME, "uuid": FAKE_UUID }, "uuid": FAKE_UUID, "mtu": MTU } ], "_links": { "next": { "href": FAKE_BASE_URL }, "self": { "href": FAKE_BASE_URL } }, "num_records": 1 } GET_IPSPACES_RESPONSE = { 'ipspace': IPSPACE_NAME, 'uuid': FAKE_UUID, 'broadcast-domains': [BROADCAST_DOMAIN], 'ports': [PORT], 'vservers': [VSERVER_NAME, VSERVER_NAME_2] } IPSPACE_INFO = { 'records': [ { 'name': IPSPACE_NAME, 'uuid': FAKE_UUID } ] } REST_SINGLE_PORT = { "records": [ { "uuid": "fake_uuid1", "name": "e0a", "type": "physical", "node": { "name": NODE_NAME }, "broadcast_domain": { "name": "fake_domain_1", "ipspace": { "name": "Default" } }, "state": "up", "speed": 10, } ] } VOLUME = { "name": "fake_volume_name", "uuid": "028baa66-41bd-11e9-81d5-00a0986138f7", "max_dir_size": 0, } SECUTITY_KEY_MANAGER_SUPPORT_RESPONSE_TRUE_REST = { 'records': [ { 'volume_encryption': { 'supported': True, 'message': '', 'code': 0 } } ], 'num_records': 1 } SECUTITY_KEY_MANAGER_SUPPORT_RESPONSE_FALSE_REST = { 'records': [ { 'volume_encryption': { 'supported': False, 'message': 'No platform support for volume encryption ' 'in following nodes - node1, node2.', 'code': 346758 } } ], 'num_records': 1 } FAKE_DISK_TYPE_RESPONSE = { "records": [ { "effective_type": "fakedisk" } ] } FAKE_SVM_AGGREGATES = { "records": [ { "name": VSERVER_NAME, "aggregates": [ { "name": SHARE_AGGREGATE_NAMES_LIST[0], "available_size": 568692293632 }, { "name": SHARE_AGGREGATE_NAMES_LIST[1], "available_size": 727211110400 }, ] } ] } FAKE_AGGREGATES_RESPONSE = { "records": [ { "aggregates": [ { "name": SHARE_AGGREGATE_NAME } ], "name": VSERVER_NAME, } ] } FAKE_SVM_AGGR_EMPTY = { "records": [ { "name": VSERVER_NAME, "aggregates": [] } ] } FAKE_AGGR_LIST = { "records": [ { "name": SHARE_AGGREGATE_NAMES_LIST[0] } ] } REST_DATA_INTERFACES = { "records": [ { "uuid": "fake-uuid-1", "name": "data-1", "location": { "port": { "name": "e0b" } }, "service_policy": { "name": "default-management" }, }, { "uuid": "fake-uuid-2", "name": "data-2", "location": { "port": { "name": "e0c" } }, "service_policy": { "name": "default-management" }, }, { "uuid": "fake-uuid-3", "name": "data-3", "location": { "port": { "name": "e0d" } }, "service_policy": { "name": "default-management" }, } ], "num_records": 3, } FAKE_CIFS_LOCAL_USER = { 'records': [ { 'sid': 'S-1-5-21-256008430-3394229847-3930036330-1001' } ] } FAKE_SERVER_SWITCH_NAME = 'fake_ss_name' FAKE_SUBTYPE = 'fake_subtype' FAKE_DNS_CONFIG = { 'dns-state': 'true', 'domains': ['fake_domain'], 'dns-ips': ['fake_ip'] } FAKE_VOLUME_MANAGE = { 'records': [ { 'name': VOLUME_NAMES[0], 'aggregates': [ { 'name': SHARE_AGGREGATE_NAME } ], 'nas': { 'path': VOLUME_JUNCTION_PATH }, 'style': 'flex', 'type': 'fake_type', 'svm': { 'name': VSERVER_NAME }, 'qos': { 'policy': { 'name': QOS_POLICY_GROUP_NAME } }, 'space': { 'size': SHARE_SIZE, 'used': SHARE_USED_SIZE, }, 'snaplock': { 'type': "compliance" } } ], 'num_records': 1 } FAKE_PORTS = [ {'speed': ''}, {'speed': '4'}, {'speed': 'auto'}, {'speed': 'undef'}, {'speed': 'fake_speed'} ] FAKE_ROOT_AGGREGATES_RESPONSE = { "records": [ { "aggregate": SHARE_AGGREGATE_NAME } ] } FAKE_GET_VOLUME = { "records": [ { "uuid": FAKE_UUID, "name": "share_6cb5b3f4_35d0_40b8_a106_d35262ac17c7", "size": 1024**3, } ], } STORAGE_FAIL_OVER_PARTNER = etree.XML(""" fake_partner_node """) DATA_LIF_CAPACITY_DETAILS = etree.XML(""" 512 44 512 fake_node 1 """) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/netapp/dataontap/client/test_api.py0000664000175000017500000005303600000000000027152 0ustar00zuulzuul00000000000000# Copyright (c) 2014 Ben Swartzlander. All rights reserved. # Copyright (c) 2014 Navneet Singh. All rights reserved. # Copyright (c) 2014 Clinton Knight. All rights reserved. # Copyright (c) 2014 Alex Meade. All rights reserved. # Copyright (c) 2014 Bob Callaway. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests for NetApp API layer """ from oslo_serialization import jsonutils from unittest import mock import ddt import requests from manila import exception from manila.share.drivers.netapp.dataontap.client import api from manila.share.drivers.netapp.dataontap.client import rest_endpoints from manila import test from manila.tests.share.drivers.netapp.dataontap.client import fakes as fake class NetAppApiElementTransTests(test.TestCase): """Test case for NetApp API element translations.""" def test_get_set_system_version(self): napi = api.NaServer('localhost') # Testing calls before version is set version = napi.get_system_version() self.assertIsNone(version) napi.set_system_version(fake.VERSION_TUPLE) version = napi.get_system_version() self.assertEqual(fake.VERSION_TUPLE, version) def test_translate_struct_dict_unique_key(self): """Tests if dict gets properly converted to NaElements.""" root = api.NaElement('root') child = {'e1': 'v1', 'e2': 'v2', 'e3': 'v3'} root.translate_struct(child) self.assertEqual(3, len(root.get_children())) for key, value in child.items(): self.assertEqual(value, root.get_child_content(key)) def test_translate_struct_dict_nonunique_key(self): """Tests if list/dict gets properly converted to NaElements.""" root = api.NaElement('root') child = [{'e1': 'v1', 'e2': 'v2'}, {'e1': 'v3'}] root.translate_struct(child) children = root.get_children() self.assertEqual(3, len(children)) for c in children: if c.get_name() == 'e1': self.assertIn(c.get_content(), ['v1', 'v3']) else: self.assertEqual('v2', c.get_content()) def test_translate_struct_list(self): """Tests if list gets properly converted to NaElements.""" root = api.NaElement('root') child = ['e1', 'e2'] root.translate_struct(child) self.assertEqual(2, len(root.get_children())) self.assertIsNone(root.get_child_content('e1')) self.assertIsNone(root.get_child_content('e2')) def test_translate_struct_tuple(self): """Tests if tuple gets properly converted to NaElements.""" root = api.NaElement('root') child = ('e1', 'e2') root.translate_struct(child) self.assertEqual(2, len(root.get_children())) self.assertIsNone(root.get_child_content('e1')) self.assertIsNone(root.get_child_content('e2')) def test_translate_invalid_struct(self): """Tests if invalid data structure raises exception.""" root = api.NaElement('root') child = 'random child element' self.assertRaises(ValueError, root.translate_struct, child) def test_setter_builtin_types(self): """Tests str, int, float get converted to NaElement.""" update = dict(e1='v1', e2='1', e3='2.0', e4='8') root = api.NaElement('root') for key, value in update.items(): root[key] = value for key, value in update.items(): self.assertEqual(value, root.get_child_content(key)) def test_setter_na_element(self): """Tests na_element gets appended as child.""" root = api.NaElement('root') root['e1'] = api.NaElement('nested') self.assertEqual(1, len(root.get_children())) e1 = root.get_child_by_name('e1') self.assertIsInstance(e1, api.NaElement) self.assertIsInstance(e1.get_child_by_name('nested'), api.NaElement) def test_setter_child_dict(self): """Tests dict is appended as child to root.""" root = api.NaElement('root') root['d'] = {'e1': 'v1', 'e2': 'v2'} e1 = root.get_child_by_name('d') self.assertIsInstance(e1, api.NaElement) sub_ch = e1.get_children() self.assertEqual(2, len(sub_ch)) for c in sub_ch: self.assertIn(c.get_name(), ['e1', 'e2']) if c.get_name() == 'e1': self.assertEqual('v1', c.get_content()) else: self.assertEqual('v2', c.get_content()) def test_setter_child_list_tuple(self): """Tests list/tuple are appended as child to root.""" root = api.NaElement('root') root['l'] = ['l1', 'l2'] root['t'] = ('t1', 't2') li = root.get_child_by_name('l') self.assertIsInstance(li, api.NaElement) t = root.get_child_by_name('t') self.assertIsInstance(t, api.NaElement) self.assertEqual(2, len(li.get_children())) for le in li.get_children(): self.assertIn(le.get_name(), ['l1', 'l2']) self.assertEqual(2, len(t.get_children())) for te in t.get_children(): self.assertIn(te.get_name(), ['t1', 't2']) def test_setter_no_value(self): """Tests key with None value.""" root = api.NaElement('root') root['k'] = None self.assertIsNone(root.get_child_content('k')) def test_setter_invalid_value(self): """Tests invalid value raises exception.""" self.assertRaises(TypeError, api.NaElement('root').__setitem__, 'k', api.NaServer('localhost')) def test_setter_invalid_key(self): """Tests invalid value raises exception.""" self.assertRaises(KeyError, api.NaElement('root').__setitem__, None, 'value') def test__build_session_with_basic_auth(self): """Tests whether build session works with """ """default(basic auth) parameters""" napi = api.ZapiClient('localhost') fake_session = mock.Mock() mock_requests_session = self.mock_object( requests, 'Session', mock.Mock(return_value=fake_session)) mock_auth = self.mock_object(napi, '_create_basic_auth_handler', mock.Mock(return_value='fake_auth')) napi._ssl_verify = 'fake_ssl' fake_headers = {'Content-Type': 'text/xml'} napi._build_session() self.assertEqual(fake_session, napi._session) self.assertEqual('fake_auth', napi._session.auth) self.assertEqual('fake_ssl', napi._session.verify) self.assertEqual(fake_headers, napi._session.headers) mock_requests_session.assert_called_once_with() mock_auth.assert_called_once_with() def test__build_session_with_certificate_auth(self): """Tests whether build session works with """ """valid certificate parameters""" napi = api.ZapiClient('localhost') napi._private_key_file = 'fake_key.pem' napi._certificate_file = 'fake_cert.pem' napi._certificate_host_validation = False cert = napi._certificate_file, napi._private_key_file fake_headers = {'Content-Type': 'text/xml'} fake_session = mock.Mock() napi._session = mock.Mock() mock_requests_session = self.mock_object( requests, 'Session', mock.Mock(return_value=fake_session)) res = napi._create_certificate_auth_handler() napi._build_session() self.assertEqual(fake_session, napi._session) self.assertEqual(res, (cert, napi._certificate_host_validation)) self.assertEqual(fake_headers, napi._session.headers) mock_requests_session.assert_called_once_with() def test__create_certificate_auth_handler_default(self): """Test whether create certificate auth handler """ """works with default params""" napi = api.ZapiClient('localhost') napi._private_key_file = 'fake_key.pem' napi._certificate_file = 'fake_cert.pem' napi._certificate_host_validation = False cert = napi._certificate_file, napi._private_key_file napi._session = mock.Mock() if not napi._certificate_host_validation: self.assertFalse(napi._certificate_host_validation) res = napi._create_certificate_auth_handler() self.assertEqual(res, (cert, napi._certificate_host_validation)) def test__create_certificate_auth_handler_with_host_validation(self): """Test whether create certificate auth handler """ """works with host validation enabled""" napi = api.ZapiClient('localhost') napi._private_key_file = 'fake_key.pem' napi._certificate_file = 'fake_cert.pem' napi._ca_certificate_file = 'fake_ca_cert.crt' napi._certificate_host_validation = True cert = napi._certificate_file, napi._private_key_file napi._session = mock.Mock() if napi._certificate_host_validation: self.assertTrue(napi._certificate_host_validation) res = napi._create_certificate_auth_handler() self.assertEqual(res, (cert, napi._ca_certificate_file)) @ddt.ddt class NetAppApiServerZapiClientTests(test.TestCase): """Test case for NetApp API server methods""" def setUp(self): self.root = api.NaServer('127.0.0.1').zapi_client super(NetAppApiServerZapiClientTests, self).setUp() @ddt.data(None, fake.FAKE_XML_STR) def test_invoke_elem_value_error(self, na_element): """Tests whether invalid NaElement parameter causes error""" self.assertRaises(ValueError, self.root.invoke_elem, na_element) def test_invoke_elem_http_error(self): """Tests handling of HTTPError""" na_element = fake.FAKE_NA_ELEMENT self.mock_object(self.root, '_create_request', mock.Mock( return_value=fake.FAKE_NA_ELEMENT)) self.mock_object(api, 'LOG') self.root._session = fake.FAKE_HTTP_SESSION self.mock_object(self.root, '_build_session') self.mock_object(self.root._session, 'post', mock.Mock( side_effect=requests.HTTPError())) self.assertRaises(api.NaApiError, self.root.invoke_elem, na_element) def test_invoke_elem_urlerror(self): """Tests handling of URLError""" na_element = fake.FAKE_NA_ELEMENT self.mock_object(self.root, '_create_request', mock.Mock( return_value=fake.FAKE_NA_ELEMENT)) self.mock_object(api, 'LOG') self.root._session = fake.FAKE_HTTP_SESSION self.mock_object(self.root, '_build_session') self.mock_object(self.root._session, 'post', mock.Mock( side_effect=requests.URLRequired())) self.assertRaises(exception.StorageCommunicationException, self.root.invoke_elem, na_element) def test_invoke_elem_unknown_exception(self): """Tests handling of Unknown Exception""" na_element = fake.FAKE_NA_ELEMENT self.mock_object(self.root, '_create_request', mock.Mock( return_value=fake.FAKE_NA_ELEMENT)) self.mock_object(api, 'LOG') self.root._session = fake.FAKE_HTTP_SESSION self.mock_object(self.root, '_build_session') self.mock_object(self.root._session, 'post', mock.Mock( side_effect=Exception)) exception = self.assertRaises(api.NaApiError, self.root.invoke_elem, na_element) self.assertEqual('unknown', exception.code) @ddt.data({'trace_enabled': False, 'trace_pattern': '(.*)', 'log': False}, {'trace_enabled': True, 'trace_pattern': '(?!(volume)).*', 'log': False}, {'trace_enabled': True, 'trace_pattern': '(.*)', 'log': True}, {'trace_enabled': True, 'trace_pattern': '^volume-(info|get-iter)$', 'log': True}) @ddt.unpack def test_invoke_elem_valid(self, trace_enabled, trace_pattern, log): """Tests the method invoke_elem with valid parameters""" na_element = fake.FAKE_NA_ELEMENT self.root._trace = trace_enabled self.root._api_trace_pattern = trace_pattern self.mock_object(self.root, '_create_request', mock.Mock( return_value=fake.FAKE_NA_ELEMENT)) self.mock_object(api, 'LOG') self.root._session = fake.FAKE_HTTP_SESSION self.mock_object(self.root, '_build_session') self.mock_object(self.root, '_get_result', mock.Mock( return_value=fake.FAKE_NA_ELEMENT)) response = mock.Mock() response.text = 'res1' self.mock_object( self.root._session, 'post', mock.Mock( return_value=response)) self.root.invoke_elem(na_element) expected_log_count = 2 if log else 0 self.assertEqual(expected_log_count, api.LOG.debug.call_count) @ddt.data('1234', 5678) def test_custom_port(self, port): root = api.NaServer('127.0.0.1', port=port).zapi_client self.assertEqual(str(port), root.get_port()) @ddt.ddt class NetAppApiServerRestClientTests(test.TestCase): """Test case for NetApp API Rest server methods""" def setUp(self): self.root = api.NaServer('127.0.0.1').rest_client super(NetAppApiServerRestClientTests, self).setUp() def test_invoke_elem_value_error(self): """Tests whether invalid NaElement parameter causes error""" na_element = fake.FAKE_REST_CALL_STR self.assertRaises(ValueError, self.root.invoke_elem, na_element) def _setup_mocks_for_invoke_element(self, mock_post_action): self.mock_object(api, 'LOG') self.root._session = fake.FAKE_HTTP_SESSION self.root._session.post = mock_post_action self.mock_object(self.root, '_build_session') self.mock_object( self.root, '_get_request_info', mock.Mock( return_value=(self.root._session.post, fake.FAKE_ACTION_URL))) self.mock_object( self.root, '_get_base_url', mock.Mock(return_value=fake.FAKE_BASE_URL)) return fake.FAKE_BASE_URL def test_invoke_elem_http_error(self): """Tests handling of HTTPError""" na_element = fake.FAKE_NA_ELEMENT element_name = fake.FAKE_NA_ELEMENT.get_name() self._setup_mocks_for_invoke_element( mock_post_action=mock.Mock(side_effect=requests.HTTPError())) self.assertRaises(api.NaApiError, self.root.invoke_elem, na_element) self.assertTrue(self.root._get_base_url.called) self.root._get_request_info.assert_called_once_with( element_name, self.root._session) def test_invoke_elem_urlerror(self): """Tests handling of URLError""" na_element = fake.FAKE_NA_ELEMENT element_name = fake.FAKE_NA_ELEMENT.get_name() self._setup_mocks_for_invoke_element( mock_post_action=mock.Mock(side_effect=requests.URLRequired())) self.assertRaises(exception.StorageCommunicationException, self.root.invoke_elem, na_element) self.assertTrue(self.root._get_base_url.called) self.root._get_request_info.assert_called_once_with( element_name, self.root._session) def test_invoke_elem_unknown_exception(self): """Tests handling of Unknown Exception""" na_element = fake.FAKE_NA_ELEMENT element_name = fake.FAKE_NA_ELEMENT.get_name() self._setup_mocks_for_invoke_element( mock_post_action=mock.Mock(side_effect=Exception)) exception = self.assertRaises(api.NaApiError, self.root.invoke_elem, na_element) self.assertEqual('unknown', exception.code) self.assertTrue(self.root._get_base_url.called) self.root._get_request_info.assert_called_once_with( element_name, self.root._session) @ddt.data( {'trace_enabled': False, 'trace_pattern': '(.*)', 'log': False, 'query': None, 'body': fake.FAKE_HTTP_BODY }, {'trace_enabled': True, 'trace_pattern': '(?!(volume)).*', 'log': False, 'query': None, 'body': fake.FAKE_HTTP_BODY }, {'trace_enabled': True, 'trace_pattern': '(.*)', 'log': True, 'query': fake.FAKE_HTTP_QUERY, 'body': fake.FAKE_HTTP_BODY }, {'trace_enabled': True, 'trace_pattern': '^volume-(info|get-iter)$', 'log': True, 'query': fake.FAKE_HTTP_QUERY, 'body': fake.FAKE_HTTP_BODY } ) @ddt.unpack def test_invoke_elem_valid(self, trace_enabled, trace_pattern, log, query, body): """Tests the method invoke_elem with valid parameters""" self.root._session = fake.FAKE_HTTP_SESSION response = mock.Mock() response.content = 'fake_response' self.root._session.post = mock.Mock(return_value=response) na_element = fake.FAKE_NA_ELEMENT element_name = fake.FAKE_NA_ELEMENT.get_name() self.root._trace = trace_enabled self.root._api_trace_pattern = trace_pattern expected_url = fake.FAKE_BASE_URL + fake.FAKE_ACTION_URL api_args = { "body": body, "query": query } self.mock_object(api, 'LOG') mock_build_session = self.mock_object(self.root, '_build_session') mock_get_req_info = self.mock_object( self.root, '_get_request_info', mock.Mock( return_value=(self.root._session.post, fake.FAKE_ACTION_URL))) mock_add_query_params = self.mock_object( self.root, '_add_query_params_to_url', mock.Mock( return_value=fake.FAKE_ACTION_URL)) mock_get_base_url = self.mock_object( self.root, '_get_base_url', mock.Mock(return_value=fake.FAKE_BASE_URL)) mock_json_loads = self.mock_object( jsonutils, 'loads', mock.Mock(return_value='fake_response')) mock_json_dumps = self.mock_object( jsonutils, 'dumps', mock.Mock(return_value=body)) result = self.root.invoke_elem(na_element, api_args=api_args) self.assertEqual('fake_response', result) expected_log_count = 2 if log else 0 self.assertEqual(expected_log_count, api.LOG.debug.call_count) self.assertTrue(mock_build_session.called) mock_get_req_info.assert_called_once_with( element_name, self.root._session) if query: mock_add_query_params.assert_called_once_with( fake.FAKE_ACTION_URL, query) self.assertTrue(mock_get_base_url.called) self.root._session.post.assert_called_once_with( expected_url, data=body) mock_json_loads.assert_called_once_with('fake_response') mock_json_dumps.assert_called_once_with(body) @ddt.data( ('svm-migration-start', rest_endpoints.ENDPOINT_MIGRATIONS, 'post'), ('svm-migration-complete', rest_endpoints.ENDPOINT_MIGRATION_ACTIONS, 'patch') ) @ddt.unpack def test__get_request_info(self, api_name, expected_url, expected_method): self.root._session = fake.FAKE_HTTP_SESSION for http_method in ['post', 'get', 'put', 'delete', 'patch']: setattr(self.root._session, http_method, mock.Mock()) method, url = self.root._get_request_info(api_name, self.root._session) self.assertEqual(method, getattr(self.root._session, expected_method)) self.assertEqual(expected_url, url) @ddt.data( {'is_ipv6': False, 'protocol': 'http', 'port': '80'}, {'is_ipv6': False, 'protocol': 'https', 'port': '443'}, {'is_ipv6': True, 'protocol': 'http', 'port': '80'}, {'is_ipv6': True, 'protocol': 'https', 'port': '443'}) @ddt.unpack def test__get_base_url(self, is_ipv6, protocol, port): self.root._host = '10.0.0.3' if not is_ipv6 else 'FF01::1' self.root._protocol = protocol self.root._port = port host_formated_for_url = ( '[%s]' % self.root._host if is_ipv6 else self.root._host) # example of the expected format: http://10.0.0.3:80/api/ expected_result = ( protocol + '://' + host_formated_for_url + ':' + port + '/api/') base_url = self.root._get_base_url() self.assertEqual(expected_result, base_url) def test__add_query_params_to_url(self): url = 'endpoint/to/get/data' filters = "?" for k, v in fake.FAKE_HTTP_QUERY.items(): filters += "%(key)s=%(value)s&" % {"key": k, "value": v} expected_formated_url = url + filters formatted_url = self.root._add_query_params_to_url( url, fake.FAKE_HTTP_QUERY) self.assertEqual(expected_formated_url, formatted_url) @ddt.data('1234', 5678) def test_custom_port(self, port): root = api.NaServer('127.0.0.1', port=port).rest_client self.assertEqual(str(port), root.get_port()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/netapp/dataontap/client/test_client_base.py0000664000175000017500000001645700000000000030657 0ustar00zuulzuul00000000000000# Copyright (c) 2014 Alex Meade. All rights reserved. # Copyright (c) 2014 Clinton Knight. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import ddt from oslo_log import log from manila.share.drivers.netapp.dataontap.client import api as netapp_api from manila.share.drivers.netapp.dataontap.client import client_base from manila import test from manila.tests.share.drivers.netapp.dataontap.client import fakes as fake @ddt.ddt class NetAppBaseClientTestCase(test.TestCase): def setUp(self): super(NetAppBaseClientTestCase, self).setUp() # Mock loggers as themselves to allow logger arg validation mock_logger = log.getLogger('mock_logger') self.mock_object(client_base.LOG, 'error', mock.Mock(side_effect=mock_logger.error)) self.mock_object(client_base.LOG, 'exception', mock.Mock(side_effect=mock_logger.error)) self.client = client_base.NetAppBaseClient(**fake.CONNECTION_INFO) self.client.connection = mock.MagicMock() self.connection = self.client.connection self.connection.zapi_client = mock.Mock() self.connection.rest_client = mock.Mock() def test_get_ontapi_version(self): version_response = netapp_api.NaElement(fake.ONTAPI_VERSION_RESPONSE) self.connection.invoke_successfully.return_value = version_response major, minor = self.client.get_ontapi_version(cached=False) self.assertEqual('1', major) self.assertEqual('19', minor) def test_get_ontapi_version_cached(self): self.connection.get_api_version.return_value = (1, 20) major, minor = self.client.get_ontapi_version() self.assertEqual(1, self.connection.get_api_version.call_count) self.assertEqual(1, major) self.assertEqual(20, minor) def test_get_system_version(self): version_response = netapp_api.NaElement( fake.SYSTEM_GET_VERSION_RESPONSE) self.connection.invoke_successfully.return_value = version_response result = self.client.get_system_version(cached=False) self.assertEqual(fake.VERSION, result['version']) self.assertEqual((8, 2, 1), result['version-tuple']) def test_get_system_version_cached(self): self.connection.get_system_version.return_value = { 'version': fake.VERSION, 'version-tuple': (8, 2, 1) } result = self.client.get_system_version() self.assertEqual(fake.VERSION, result['version']) self.assertEqual((8, 2, 1), result['version-tuple']) def test_init_features(self): self.client._init_features() self.assertSetEqual(set(), self.client.features.defined_features) @ddt.data('tag_name', '{http://www.netapp.com/filer/admin}tag_name') def test_strip_xml_namespace(self, element): result = self.client._strip_xml_namespace(element) self.assertEqual('tag_name', result) @ddt.data(True, False) def test_send_request(self, use_zapi): element = netapp_api.NaElement('fake-api') self.client.send_request('fake-api', use_zapi=use_zapi) self.assertEqual( element.to_string(), self.connection.invoke_successfully.call_args[0][0].to_string()) self.assertTrue( self.connection.invoke_successfully.call_args[1][ 'enable_tunneling']) self.assertEqual( use_zapi, self.connection.invoke_successfully.call_args[1][ 'use_zapi']) def test_send_request_no_tunneling(self): element = netapp_api.NaElement('fake-api') self.client.send_request('fake-api', enable_tunneling=False) self.assertEqual( element.to_string(), self.connection.invoke_successfully.call_args[0][0].to_string()) self.assertFalse( self.connection.invoke_successfully.call_args[1][ 'enable_tunneling']) @ddt.data(True, False) def test_send_request_with_args(self, use_zapi): element = netapp_api.NaElement('fake-api') api_args = {'arg1': 'data1', 'arg2': 'data2'} self.client.send_request('fake-api', api_args=api_args, use_zapi=use_zapi) self.assertEqual( element.to_string(), self.connection.invoke_successfully.call_args[0][0].to_string()) self.assertEqual( api_args, self.connection.invoke_successfully.call_args[1][ 'api_args']) self.assertTrue( self.connection.invoke_successfully.call_args[1][ 'enable_tunneling']) self.assertEqual( use_zapi, self.connection.invoke_successfully.call_args[1][ 'use_zapi']) def test_get_licenses(self): api_response = netapp_api.NaElement(fake.LICENSE_V2_LIST_INFO_RESPONSE) self.mock_object( self.client, 'send_request', mock.Mock(return_value=api_response)) response = self.client.get_licenses() self.assertSequenceEqual(fake.LICENSES, response) def test_get_licenses_api_error(self): self.mock_object(self.client, 'send_request', mock.Mock(side_effect=netapp_api.NaApiError)) self.assertRaises(netapp_api.NaApiError, self.client.get_licenses) self.assertEqual(1, client_base.LOG.exception.call_count) def test_send_ems_log_message(self): self.assertRaises(NotImplementedError, self.client.send_ems_log_message, {}) @ddt.ddt class FeaturesTestCase(test.TestCase): def setUp(self): super(FeaturesTestCase, self).setUp() self.features = client_base.Features() def test_init(self): self.assertSetEqual(set(), self.features.defined_features) def test_add_feature_default(self): self.features.add_feature('FEATURE_1') self.assertTrue(self.features.FEATURE_1) self.assertIn('FEATURE_1', self.features.defined_features) @ddt.data(True, False) def test_add_feature(self, value): self.features.add_feature('FEATURE_2', value) self.assertEqual(value, self.features.FEATURE_2) self.assertIn('FEATURE_2', self.features.defined_features) @ddt.data('True', 'False', 0, 1, 1.0, None, [], {}, (True,)) def test_add_feature_type_error(self, value): self.assertRaises(TypeError, self.features.add_feature, 'FEATURE_3', value) self.assertNotIn('FEATURE_3', self.features.defined_features) def test_get_attr_missing(self): self.assertRaises(AttributeError, getattr, self.features, 'FEATURE_4') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/netapp/dataontap/client/test_client_cmode.py0000664000175000017500000134061400000000000031030 0ustar00zuulzuul00000000000000# Copyright (c) 2014 Alex Meade. All rights reserved. # Copyright (c) 2015 Clinton Knight. All rights reserved. # Copyright (c) 2015 Tom Barron. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import hashlib import time from unittest import mock import ddt from oslo_log import log from manila import exception from manila.share.drivers.netapp.dataontap.client import api as netapp_api from manila.share.drivers.netapp.dataontap.client import client_base from manila.share.drivers.netapp.dataontap.client import client_cmode from manila.share.drivers.netapp import utils as na_utils from manila import test from manila.tests.share.drivers.netapp.dataontap.client import fakes as fake @ddt.ddt class NetAppClientCmodeTestCase(test.TestCase): def setUp(self): super(NetAppClientCmodeTestCase, self).setUp() # Mock loggers as themselves to allow logger arg validation mock_logger = log.getLogger('mock_logger') self.mock_object(client_cmode.LOG, 'error', mock.Mock(side_effect=mock_logger.error)) self.mock_object(client_cmode.LOG, 'warning', mock.Mock(side_effect=mock_logger.warning)) self.mock_object(client_cmode.LOG, 'debug', mock.Mock(side_effect=mock_logger.debug)) self.mock_object(client_base.NetAppBaseClient, 'get_ontapi_version', mock.Mock(return_value=(1, 20))) self.mock_object(client_base.NetAppBaseClient, 'get_system_version', mock.Mock(return_value={ 'version-tuple': (8, 3, 0), 'version': fake.VERSION, })) self.client = client_cmode.NetAppCmodeClient(**fake.CONNECTION_INFO) self.client.connection = mock.MagicMock() self.vserver_client = client_cmode.NetAppCmodeClient( **fake.CONNECTION_INFO) self.vserver_client.set_vserver(fake.VSERVER_NAME) self.vserver_client.connection = mock.MagicMock() def _mock_api_error(self, code='fake', message='fake'): return mock.Mock(side_effect=netapp_api.NaApiError(code=code, message=message)) def test_init_features_ontapi_1_21(self): self.mock_object(client_base.NetAppBaseClient, 'get_ontapi_version', mock.Mock(return_value=(1, 21))) self.client._init_features() self.assertFalse(self.client.features.BROADCAST_DOMAINS) self.assertFalse(self.client.features.IPSPACES) self.assertFalse(self.client.features.SUBNETS) self.assertFalse(self.client.features.FLEXVOL_ENCRYPTION) @ddt.data((1, 30), (1, 40), (2, 0)) def test_init_features_ontapi_1_30(self, ontapi_version): self.mock_object(client_base.NetAppBaseClient, 'get_ontapi_version', mock.Mock(return_value=ontapi_version)) self.client._init_features() self.assertTrue(self.client.features.BROADCAST_DOMAINS) self.assertTrue(self.client.features.IPSPACES) self.assertTrue(self.client.features.SUBNETS) @ddt.data((1, 110), (2, 0)) def test_init_features_ontap_1_110(self, ontapi_version): self.mock_object(client_base.NetAppBaseClient, 'get_ontapi_version', mock.Mock(return_value=ontapi_version)) self.client._init_features() self.assertTrue(self.client.features.BROADCAST_DOMAINS) self.assertTrue(self.client.features.IPSPACES) self.assertTrue(self.client.features.SUBNETS) self.assertTrue(self.client.features.FLEXVOL_ENCRYPTION) @ddt.data(((9, 1, 0), fake.VERSION_NO_DARE), ((8, 3, 2), fake.VERSION)) @ddt.unpack def test_is_nve_supported_unsupported_release_or_platform(self, gen, ver): system_version = {'version-tuple': gen, 'version': ver} self.mock_object(client_base.NetAppBaseClient, 'get_system_version', mock.Mock(return_value=system_version)) self.mock_object(self.client, 'get_security_key_manager_nve_support', mock.Mock(return_value=True)) self.mock_object(self.client, 'list_cluster_nodes', mock.Mock(return_value=fake.NODE_NAMES)) result = self.client.is_nve_supported() self.assertFalse(result) def test_is_nve_supported_valid_platform_and_supported_release(self): system_version = { 'version-tuple': (9, 1, 0), 'version': fake.VERSION, } self.mock_object(client_base.NetAppBaseClient, 'get_system_version', mock.Mock(return_value=system_version)) self.mock_object(self.client, 'get_security_key_manager_nve_support', mock.Mock(return_value=True)) self.mock_object(self.client, 'list_cluster_nodes', mock.Mock(return_value=fake.NODE_NAMES)) result = self.client.is_nve_supported() self.assertTrue(result) def test_is_nve_supported_key_manager_not_enabled(self): system_version = { 'version-tuple': (9, 1, 0), 'version': fake.VERSION, } self.mock_object(client_base.NetAppBaseClient, 'get_system_version', mock.Mock(return_value=system_version)) self.mock_object(self.client, 'get_security_key_manager_nve_support', mock.Mock(return_value=False)) self.mock_object(self.client, 'list_cluster_nodes', mock.Mock(return_value=fake.NODE_NAMES)) result = self.client.is_nve_supported() self.assertFalse(result) def test_get_security_key_manager_nve_support_enabled(self): api_response = netapp_api.NaElement( fake.SECUTITY_KEY_MANAGER_NVE_SUPPORT_RESPONSE_TRUE) self.mock_object(self.client, 'send_request', mock.Mock(return_value=api_response)) result = self.client.get_security_key_manager_nve_support( fake.NODE_NAME) self.assertTrue(result) api_args = {'node': fake.NODE_NAME} self.client.send_request.assert_has_calls([ mock.call('security-key-manager-volume-encryption-supported', api_args)]) def test_get_security_key_manager_nve_support_disabled(self): api_response = netapp_api.NaElement( fake.SECUTITY_KEY_MANAGER_NVE_SUPPORT_RESPONSE_FALSE) self.mock_object(self.client, 'send_request', mock.Mock(return_value=api_response)) result = self.client.get_security_key_manager_nve_support( fake.NODE_NAME) self.assertFalse(result) api_args = {'node': fake.NODE_NAME} self.client.send_request.assert_has_calls([ mock.call('security-key-manager-volume-encryption-supported', api_args)]) def test_get_security_key_manager_nve_support_disabled_no_license(self): self.mock_object(self.client, 'send_request', self._mock_api_error()) result = self.client.get_security_key_manager_nve_support( fake.NODE_NAME) self.assertFalse(result) api_args = {'node': fake.NODE_NAME} self.client.send_request.assert_has_calls([ mock.call('security-key-manager-volume-encryption-supported', api_args)]) @ddt.data((True, True, True), (False, None, False)) @ddt.unpack def test_send_volume_move_request_success(self, validation_only, encrypt_dst, fv_encryption): self.mock_object(self.client, 'features', mock.Mock(FLEXVOL_ENCRYPTION=fv_encryption)) self.client._send_volume_move_request(fake.ROOT_VOLUME_NAME, fake.NODE_VSERVER_NAME, fake.SHARE_AGGREGATE_NAME, validation_only=validation_only, encrypt_destination=encrypt_dst) @ddt.data((True, True, False)) @ddt.unpack def test_send_volume_move_request_failure(self, validation_only, encrypt_dst, fv_encrypt): self.mock_object(self.client, 'features', mock.Mock(FLEXVOL_ENCRYPTION=fv_encrypt)) self.assertRaises(exception.NetAppException, self.client._send_volume_move_request, fake.ROOT_VOLUME_NAME, fake.NODE_VSERVER_NAME, fake.SHARE_AGGREGATE_NAME, validation_only=validation_only, encrypt_destination=encrypt_dst) def test_invoke_vserver_api(self): self.client._invoke_vserver_api('fake-api', 'fake_vserver') self.client.connection.set_vserver.assert_has_calls( [mock.call('fake_vserver')]) self.client.connection.invoke_successfully.assert_has_calls( [mock.call('fake-api', True)]) def test_has_records(self): self.assertTrue(self.client._has_records( netapp_api.NaElement(fake.VSERVER_GET_ITER_RESPONSE))) def test_has_records_not_found(self): self.assertFalse(self.client._has_records( netapp_api.NaElement(fake.NO_RECORDS_RESPONSE))) @ddt.data((fake.VSERVER_GET_ITER_RESPONSE, 1), (fake.NO_RECORDS_RESPONSE, 0)) @ddt.unpack def test_get_record_count(self, response, expected): api_response = netapp_api.NaElement(response) result = self.client._get_record_count(api_response) self.assertEqual(expected, result) def test_get_records_count_invalid(self): api_response = netapp_api.NaElement( fake.INVALID_GET_ITER_RESPONSE_NO_RECORDS) self.assertRaises(exception.NetAppException, self.client._get_record_count, api_response) def test_send_iter_request(self): api_responses = [ netapp_api.NaElement(fake.STORAGE_DISK_GET_ITER_RESPONSE_PAGE_1), netapp_api.NaElement(fake.STORAGE_DISK_GET_ITER_RESPONSE_PAGE_2), netapp_api.NaElement(fake.STORAGE_DISK_GET_ITER_RESPONSE_PAGE_3), ] mock_send_request = self.mock_object( self.client, 'send_request', mock.Mock(side_effect=api_responses)) storage_disk_get_iter_args = { 'desired-attributes': { 'storage-disk-info': { 'disk-name': None, } } } result = self.client.send_iter_request( 'storage-disk-get-iter', api_args=storage_disk_get_iter_args, max_page_length=10) num_records = result.get_child_content('num-records') self.assertEqual('28', num_records) next_tag = result.get_child_content('next-tag') self.assertEqual('', next_tag) args1 = copy.deepcopy(storage_disk_get_iter_args) args1['max-records'] = 10 args2 = copy.deepcopy(storage_disk_get_iter_args) args2['max-records'] = 10 args2['tag'] = 'next_tag_1' args3 = copy.deepcopy(storage_disk_get_iter_args) args3['max-records'] = 10 args3['tag'] = 'next_tag_2' mock_send_request.assert_has_calls([ mock.call('storage-disk-get-iter', args1, enable_tunneling=True), mock.call('storage-disk-get-iter', args2, enable_tunneling=True), mock.call('storage-disk-get-iter', args3, enable_tunneling=True), ]) def test_send_iter_request_single_page(self): api_response = netapp_api.NaElement( fake.STORAGE_DISK_GET_ITER_RESPONSE) mock_send_request = self.mock_object( self.client, 'send_request', mock.Mock(return_value=api_response)) storage_disk_get_iter_args = { 'desired-attributes': { 'storage-disk-info': { 'disk-name': None, } } } result = self.client.send_iter_request( 'storage-disk-get-iter', api_args=storage_disk_get_iter_args, max_page_length=10) num_records = result.get_child_content('num-records') self.assertEqual('4', num_records) args = copy.deepcopy(storage_disk_get_iter_args) args['max-records'] = 10 mock_send_request.assert_has_calls([ mock.call('storage-disk-get-iter', args, enable_tunneling=True), ]) def test_send_iter_request_not_found(self): api_response = netapp_api.NaElement(fake.NO_RECORDS_RESPONSE) mock_send_request = self.mock_object( self.client, 'send_request', mock.Mock(return_value=api_response)) result = self.client.send_iter_request('storage-disk-get-iter') num_records = result.get_child_content('num-records') self.assertEqual('0', num_records) args = {'max-records': client_cmode.DEFAULT_MAX_PAGE_LENGTH} mock_send_request.assert_has_calls([ mock.call('storage-disk-get-iter', args, enable_tunneling=True), ]) @ddt.data(fake.INVALID_GET_ITER_RESPONSE_NO_ATTRIBUTES, fake.INVALID_GET_ITER_RESPONSE_NO_RECORDS) def test_send_iter_request_invalid(self, fake_response): api_response = netapp_api.NaElement(fake_response) self.mock_object(self.client, 'send_request', mock.Mock(return_value=api_response)) self.assertRaises(exception.NetAppException, self.client.send_iter_request, 'storage-disk-get-iter') def test_set_vserver(self): self.client.set_vserver(fake.VSERVER_NAME) self.client.connection.set_vserver.assert_has_calls( [mock.call('fake_vserver')]) def test_vserver_exists(self): api_response = netapp_api.NaElement(fake.VSERVER_GET_ITER_RESPONSE) self.mock_object(self.client, 'send_iter_request', mock.Mock(return_value=api_response)) vserver_get_args = { 'query': {'vserver-info': {'vserver-name': fake.VSERVER_NAME}}, 'desired-attributes': {'vserver-info': {'vserver-name': None}} } result = self.client.vserver_exists(fake.VSERVER_NAME) self.client.send_iter_request.assert_has_calls([ mock.call('vserver-get-iter', vserver_get_args, enable_tunneling=False)]) self.assertTrue(result) def test_vserver_exists_not_found(self): api_response = netapp_api.NaElement(fake.NO_RECORDS_RESPONSE) self.mock_object(self.client, 'send_request', mock.Mock(return_value=api_response)) result = self.client.vserver_exists(fake.VSERVER_NAME) self.assertFalse(result) def test_create_vserver_no_ipspace(self): self.client.features.add_feature('DELETE_RETENTION_HOURS') self.mock_object(self.client, 'send_request') self.mock_object(self.client, '_modify_security_cert', mock.Mock()) vserver_create_args = { 'vserver-name': fake.VSERVER_NAME, 'root-volume-security-style': 'unix', 'root-volume-aggregate': fake.ROOT_VOLUME_AGGREGATE_NAME, 'root-volume': fake.ROOT_VOLUME_NAME, 'name-server-switch': {'nsswitch': 'file'}, 'is-space-reporting-logical': 'false', 'is-space-enforcement-logical': 'false' } vserver_modify_args = { 'aggr-list': [{'aggr-name': aggr_name} for aggr_name in fake.SHARE_AGGREGATE_NAMES], 'vserver-name': fake.VSERVER_NAME, 'volume-delete-retention-hours': 16, } self.client.create_vserver(fake.VSERVER_NAME, fake.ROOT_VOLUME_AGGREGATE_NAME, fake.ROOT_VOLUME_NAME, fake.SHARE_AGGREGATE_NAMES, None, fake.SECURITY_CERT_LARGE_EXPIRE_DAYS, 16, False) self.client.send_request.assert_has_calls([ mock.call('vserver-create', vserver_create_args), mock.call('vserver-modify', vserver_modify_args)]) self.client._modify_security_cert.assert_called_with( fake.VSERVER_NAME, fake.SECURITY_CERT_LARGE_EXPIRE_DAYS) def test_create_vserver_with_ipspace(self): self.client.features.add_feature('IPSPACES') self.client.features.add_feature('DELETE_RETENTION_HOURS') self.mock_object(self.client, 'send_request') self.mock_object(self.client, '_modify_security_cert', mock.Mock()) vserver_create_args = { 'vserver-name': fake.VSERVER_NAME, 'root-volume-security-style': 'unix', 'root-volume-aggregate': fake.ROOT_VOLUME_AGGREGATE_NAME, 'root-volume': fake.ROOT_VOLUME_NAME, 'name-server-switch': {'nsswitch': 'file'}, 'ipspace': fake.IPSPACE_NAME, 'is-space-reporting-logical': 'false', 'is-space-enforcement-logical': 'false' } vserver_modify_args = { 'aggr-list': [{'aggr-name': aggr_name} for aggr_name in fake.SHARE_AGGREGATE_NAMES], 'volume-delete-retention-hours': 24, 'vserver-name': fake.VSERVER_NAME } self.client.create_vserver(fake.VSERVER_NAME, fake.ROOT_VOLUME_AGGREGATE_NAME, fake.ROOT_VOLUME_NAME, fake.SHARE_AGGREGATE_NAMES, fake.IPSPACE_NAME, fake.SECURITY_CERT_LARGE_EXPIRE_DAYS, 24, False) self.client.send_request.assert_has_calls([ mock.call('vserver-create', vserver_create_args), mock.call('vserver-modify', vserver_modify_args)]) self.client._modify_security_cert.assert_called_with( fake.VSERVER_NAME, fake.SECURITY_CERT_LARGE_EXPIRE_DAYS) def test__modify_security_cert(self): certificate_create_args = { 'vserver': fake.VSERVER_NAME, 'common-name': fake.VSERVER_NAME, 'type': 'server', 'expire-days': fake.SECURITY_CERT_LARGE_EXPIRE_DAYS, } self.mock_object(self.client, 'send_request') api_response = netapp_api.NaElement(fake.SECURITY_CERT_GET_RESPONSE) self.mock_object(self.client, 'send_iter_request', mock.Mock(return_value=api_response)) certificate_get_args = { 'query': { 'certificate-info': { 'vserver': fake.VSERVER_NAME, 'common-name': fake.VSERVER_NAME, 'certificate-authority': fake.VSERVER_NAME, 'type': 'server', }, }, 'desired-attributes': { 'certificate-info': { 'serial-number': None, }, }, } certificate_delete_args = { 'certificate-authority': fake.VSERVER_NAME, 'common-name': fake.VSERVER_NAME, 'serial-number': '12345', 'type': 'server', 'vserver': fake.VSERVER_NAME, } self.client._modify_security_cert( fake.VSERVER_NAME, fake.SECURITY_CERT_LARGE_EXPIRE_DAYS) self.client.send_request.assert_has_calls([ mock.call( 'security-certificate-create', certificate_create_args), mock.call( 'security-certificate-delete', certificate_delete_args)]) self.client.send_iter_request.assert_has_calls([ mock.call('security-certificate-get-iter', certificate_get_args)]) def test_create_vserver_dp_destination(self): self.client.features.add_feature('IPSPACES') self.client.features.add_feature('DELETE_RETENTION_HOURS') self.mock_object(self.client, 'send_request') vserver_create_args = { 'vserver-name': fake.VSERVER_NAME, 'ipspace': fake.IPSPACE_NAME, 'vserver-subtype': fake.VSERVER_TYPE_DP_DEST, 'is-space-reporting-logical': 'false', 'is-space-enforcement-logical': 'false' } vserver_modify_args = { 'aggr-list': [{'aggr-name': aggr_name} for aggr_name in fake.SHARE_AGGREGATE_NAMES], 'volume-delete-retention-hours': 18, 'vserver-name': fake.VSERVER_NAME } self.client.create_vserver_dp_destination( fake.VSERVER_NAME, fake.SHARE_AGGREGATE_NAMES, fake.IPSPACE_NAME, 18, False) self.client.send_request.assert_has_calls([ mock.call('vserver-create', vserver_create_args), mock.call('vserver-modify', vserver_modify_args)]) def test_create_vserver_ipspaces_not_supported(self): self.assertRaises(exception.NetAppException, self.client.create_vserver, fake.VSERVER_NAME, fake.ROOT_VOLUME_AGGREGATE_NAME, fake.ROOT_VOLUME_NAME, fake.SHARE_AGGREGATE_NAMES, fake.IPSPACE_NAME, fake.SECURITY_CERT_LARGE_EXPIRE_DAYS, 10, False) def test_get_vserver_root_volume_name(self): api_response = netapp_api.NaElement( fake.VSERVER_GET_ROOT_VOLUME_NAME_RESPONSE) self.mock_object(self.client, 'send_iter_request', mock.Mock(return_value=api_response)) vserver_get_args = { 'query': {'vserver-info': {'vserver-name': fake.VSERVER_NAME}}, 'desired-attributes': {'vserver-info': {'root-volume': None}} } result = self.client.get_vserver_root_volume_name(fake.VSERVER_NAME) self.client.send_iter_request.assert_has_calls([ mock.call('vserver-get-iter', vserver_get_args)]) self.assertEqual(fake.ROOT_VOLUME_NAME, result) def test_get_vserver_root_volume_name_not_found(self): api_response = netapp_api.NaElement(fake.NO_RECORDS_RESPONSE) self.mock_object(self.client, 'send_iter_request', mock.Mock(return_value=api_response)) self.assertRaises(exception.NetAppException, self.client.get_vserver_root_volume_name, fake.VSERVER_NAME) def test_get_vserver_ipspace(self): self.client.features.add_feature('IPSPACES') api_response = netapp_api.NaElement( fake.VSERVER_GET_IPSPACE_NAME_RESPONSE) self.mock_object(self.client, 'send_iter_request', mock.Mock(return_value=api_response)) result = self.client.get_vserver_ipspace(fake.VSERVER_NAME) vserver_get_iter_args = { 'query': { 'vserver-info': { 'vserver-name': fake.VSERVER_NAME, }, }, 'desired-attributes': { 'vserver-info': { 'ipspace': None, }, }, } self.client.send_iter_request.assert_has_calls([ mock.call('vserver-get-iter', vserver_get_iter_args)]) self.assertEqual(fake.IPSPACE_NAME, result) def test_get_vserver_ipspace_not_supported(self): result = self.client.get_vserver_ipspace(fake.IPSPACE_NAME) self.assertIsNone(result) def test_get_vserver_ipspace_not_found(self): self.client.features.add_feature('IPSPACES') api_response = netapp_api.NaElement(fake.NO_RECORDS_RESPONSE) self.mock_object(self.client, 'send_iter_request', mock.Mock(return_value=api_response)) self.assertRaises(exception.NetAppException, self.client.get_vserver_ipspace, fake.IPSPACE_NAME) def test_ipspace_has_data_vservers(self): self.client.features.add_feature('IPSPACES') api_response = netapp_api.NaElement(fake.VSERVER_GET_ITER_RESPONSE) self.mock_object(self.client, 'send_iter_request', mock.Mock(return_value=api_response)) result = self.client.ipspace_has_data_vservers(fake.IPSPACE_NAME) vserver_get_iter_args = { 'query': { 'vserver-info': { 'ipspace': fake.IPSPACE_NAME, 'vserver-type': 'data' }, }, 'desired-attributes': { 'vserver-info': { 'vserver-name': None, }, }, } self.client.send_iter_request.assert_has_calls([ mock.call('vserver-get-iter', vserver_get_iter_args)]) self.assertTrue(result) def test_ipspace_has_data_vservers_not_supported(self): result = self.client.ipspace_has_data_vservers(fake.IPSPACE_NAME) self.assertFalse(result) def test_ipspace_has_data_vservers_not_found(self): self.client.features.add_feature('IPSPACES') api_response = netapp_api.NaElement(fake.NO_RECORDS_RESPONSE) self.mock_object(self.client, 'send_request', mock.Mock(return_value=api_response)) result = self.client.ipspace_has_data_vservers(fake.IPSPACE_NAME) self.assertFalse(result) def test_list_vservers(self): api_response = netapp_api.NaElement( fake.VSERVER_DATA_LIST_RESPONSE) self.mock_object(self.client, 'send_iter_request', mock.Mock(return_value=api_response)) result = self.client.list_vservers() vserver_get_iter_args = { 'query': { 'vserver-info': { 'vserver-type': 'data' } }, 'desired-attributes': { 'vserver-info': { 'vserver-name': None } } } self.client.send_iter_request.assert_has_calls([ mock.call('vserver-get-iter', vserver_get_iter_args)]) self.assertListEqual([fake.VSERVER_NAME], result) def test_list_vservers_node_type(self): api_response = netapp_api.NaElement( fake.VSERVER_DATA_LIST_RESPONSE) self.mock_object(self.client, 'send_iter_request', mock.Mock(return_value=api_response)) result = self.client.list_vservers(vserver_type='node') vserver_get_iter_args = { 'query': { 'vserver-info': { 'vserver-type': 'node' } }, 'desired-attributes': { 'vserver-info': { 'vserver-name': None } } } self.client.send_iter_request.assert_has_calls([ mock.call('vserver-get-iter', vserver_get_iter_args)]) self.assertListEqual([fake.VSERVER_NAME], result) def test_list_vservers_not_found(self): api_response = netapp_api.NaElement( fake.NO_RECORDS_RESPONSE) self.mock_object(self.client, 'send_request', mock.Mock(return_value=api_response)) result = self.client.list_vservers(vserver_type='data') self.assertListEqual([], result) def test_get_vserver_volume_count(self): api_response = netapp_api.NaElement(fake.VOLUME_COUNT_RESPONSE) self.mock_object(self.client, 'send_iter_request', mock.Mock(return_value=api_response)) result = self.client.get_vserver_volume_count() self.assertEqual(2, result) def test_delete_vserver_no_volumes(self): self.mock_object(self.client, 'get_vserver_info', mock.Mock(return_value=fake.VSERVER_INFO)) self.mock_object(self.client, 'get_vserver_root_volume_name', mock.Mock(return_value=fake.ROOT_VOLUME_NAME)) self.mock_object(self.vserver_client, 'get_vserver_volume_count', mock.Mock(return_value=0)) self.mock_object(self.client, '_terminate_vserver_services') self.mock_object(self.client, 'send_request') self.client.delete_vserver( fake.VSERVER_NAME, self.vserver_client, security_services=[fake.CIFS_SECURITY_SERVICE]) self.client._terminate_vserver_services.assert_called_with( fake.VSERVER_NAME, self.vserver_client, [fake.CIFS_SECURITY_SERVICE]) vserver_destroy_args = {'vserver-name': fake.VSERVER_NAME} self.client.send_request.assert_has_calls([ mock.call('vserver-destroy', vserver_destroy_args)]) def test_delete_vserver_one_volume(self): self.mock_object(self.client, 'get_vserver_info', mock.Mock(return_value=fake.VSERVER_INFO)) self.mock_object(self.client, 'get_vserver_root_volume_name', mock.Mock(return_value=fake.ROOT_VOLUME_NAME)) self.mock_object(self.vserver_client, 'get_vserver_volume_count', mock.Mock(return_value=1)) self.mock_object(self.client, 'send_request') self.mock_object(self.vserver_client, 'offline_volume') self.mock_object(self.vserver_client, 'delete_volume') self.client.delete_vserver(fake.VSERVER_NAME, self.vserver_client) self.vserver_client.offline_volume.assert_called_with( fake.ROOT_VOLUME_NAME) self.vserver_client.delete_volume.assert_called_with( fake.ROOT_VOLUME_NAME) vserver_destroy_args = {'vserver-name': fake.VSERVER_NAME} self.client.send_request.assert_has_calls([ mock.call('vserver-destroy', vserver_destroy_args)]) def test_delete_vserver_one_volume_already_offline(self): self.mock_object(self.client, 'get_vserver_info', mock.Mock(return_value=fake.VSERVER_INFO)) self.mock_object(self.client, 'get_vserver_root_volume_name', mock.Mock(return_value=fake.ROOT_VOLUME_NAME)) self.mock_object(self.vserver_client, 'get_vserver_volume_count', mock.Mock(return_value=1)) self.mock_object(self.client, 'send_request') self.mock_object(self.vserver_client, 'offline_volume', self._mock_api_error(code=netapp_api.EVOLUMEOFFLINE)) self.mock_object(self.vserver_client, 'delete_volume') self.client.delete_vserver(fake.VSERVER_NAME, self.vserver_client) self.vserver_client.offline_volume.assert_called_with( fake.ROOT_VOLUME_NAME) self.vserver_client.delete_volume.assert_called_with( fake.ROOT_VOLUME_NAME) vserver_destroy_args = {'vserver-name': fake.VSERVER_NAME} self.client.send_request.assert_has_calls([ mock.call('vserver-destroy', vserver_destroy_args)]) self.assertEqual(1, client_cmode.LOG.error.call_count) def test_delete_vserver_one_volume_api_error(self): self.mock_object(self.client, 'get_vserver_info', mock.Mock(return_value=fake.VSERVER_INFO)) self.mock_object(self.client, 'get_vserver_root_volume_name', mock.Mock(return_value=fake.ROOT_VOLUME_NAME)) self.mock_object(self.vserver_client, 'get_vserver_volume_count', mock.Mock(return_value=1)) self.mock_object(self.client, 'send_request') self.mock_object(self.vserver_client, 'offline_volume', self._mock_api_error()) self.mock_object(self.vserver_client, 'delete_volume') self.assertRaises(netapp_api.NaApiError, self.client.delete_vserver, fake.VSERVER_NAME, self.vserver_client) def test_delete_vserver_multiple_volumes(self): self.mock_object(self.client, 'get_vserver_info', mock.Mock(return_value=fake.VSERVER_INFO)) self.mock_object(self.client, 'get_vserver_root_volume_name', mock.Mock(return_value=fake.ROOT_VOLUME_NAME)) self.mock_object(self.vserver_client, 'get_vserver_volume_count', mock.Mock(return_value=2)) self.assertRaises(exception.NetAppException, self.client.delete_vserver, fake.VSERVER_NAME, self.vserver_client) def test_delete_vserver_not_found(self): self.mock_object(self.client, 'get_vserver_info', mock.Mock(return_value=None)) self.client.delete_vserver(fake.VSERVER_NAME, self.vserver_client) self.assertEqual(1, client_cmode.LOG.error.call_count) def test_terminate_vserver_services(self): self.mock_object(self.vserver_client, 'send_request') self.client._terminate_vserver_services(fake.VSERVER_NAME, self.vserver_client, [fake.CIFS_SECURITY_SERVICE]) cifs_server_delete_args = { 'admin-password': fake.CIFS_SECURITY_SERVICE['password'], 'admin-username': fake.CIFS_SECURITY_SERVICE['user'], } self.vserver_client.send_request.assert_has_calls([ mock.call('cifs-server-delete', cifs_server_delete_args)]) def test_terminate_vserver_services_cifs_not_found(self): self.mock_object(self.vserver_client, 'send_request', self._mock_api_error( code=netapp_api.EOBJECTNOTFOUND)) self.client._terminate_vserver_services(fake.VSERVER_NAME, self.vserver_client, [fake.CIFS_SECURITY_SERVICE]) cifs_server_delete_args = { 'admin-password': fake.CIFS_SECURITY_SERVICE['password'], 'admin-username': fake.CIFS_SECURITY_SERVICE['user'], } self.vserver_client.send_request.assert_has_calls([ mock.call('cifs-server-delete', cifs_server_delete_args)]) self.assertEqual(1, client_cmode.LOG.error.call_count) def test_terminate_vserver_services_api_error(self): side_effects = [netapp_api.NaApiError(code='fake'), None] self.mock_object(self.vserver_client, 'send_request', mock.Mock(side_effect=side_effects)) self.client._terminate_vserver_services(fake.VSERVER_NAME, self.vserver_client, [fake.CIFS_SECURITY_SERVICE]) cifs_server_delete_args = { 'admin-password': fake.CIFS_SECURITY_SERVICE['password'], 'admin-username': fake.CIFS_SECURITY_SERVICE['user'], } cifs_server_delete_force_args = { 'force-account-delete': 'true', } self.vserver_client.send_request.assert_has_calls([ mock.call('cifs-server-delete', cifs_server_delete_args), mock.call('cifs-server-delete', cifs_server_delete_force_args)]) self.assertEqual(0, client_cmode.LOG.error.call_count) def test_list_cluster_nodes(self): api_response = netapp_api.NaElement( fake.SYSTEM_NODE_GET_ITER_RESPONSE) self.mock_object(self.client, 'send_request', mock.Mock(return_value=api_response)) result = self.client.list_cluster_nodes() self.assertListEqual([fake.NODE_NAME], result) def test_list_cluster_nodes_not_found(self): api_response = netapp_api.NaElement(fake.NO_RECORDS_RESPONSE) self.mock_object(self.client, 'send_request', mock.Mock(return_value=api_response)) result = self.client.list_cluster_nodes() self.assertListEqual([], result) def test_list_node_data_ports(self): self.mock_object(self.client, 'get_node_data_ports', mock.Mock(return_value=fake.SPEED_SORTED_PORTS)) result = self.client.list_node_data_ports(fake.NODE_NAME) self.assertSequenceEqual(fake.SPEED_SORTED_PORT_NAMES, result) def test_get_node_data_ports(self): api_response = netapp_api.NaElement(fake.NET_PORT_GET_ITER_RESPONSE) self.mock_object(self.client, 'send_iter_request', mock.Mock(return_value=api_response)) result = self.client.get_node_data_ports(fake.NODE_NAME) net_port_get_iter_args = { 'query': { 'net-port-info': { 'node': fake.NODE_NAME, 'link-status': 'up', 'port-type': 'physical|if_group', 'role': 'data', }, }, 'desired-attributes': { 'net-port-info': { 'port': None, 'node': None, 'operational-speed': None, 'ifgrp-port': None, }, }, } self.assertSequenceEqual(fake.SPEED_SORTED_PORTS, result) self.client.send_iter_request.assert_has_calls([ mock.call('net-port-get-iter', net_port_get_iter_args)]) def test_get_node_data_ports_not_found(self): api_response = netapp_api.NaElement(fake.NO_RECORDS_RESPONSE) self.mock_object(self.client, 'send_iter_request', mock.Mock(return_value=api_response)) result = self.client.get_node_data_ports(fake.NODE_NAME) self.assertSequenceEqual([], result) def test_sort_data_ports_by_speed(self): result = self.client._sort_data_ports_by_speed( fake.UNSORTED_PORTS_ALL_SPEEDS) self.assertSequenceEqual(fake.SORTED_PORTS_ALL_SPEEDS, result) def test_list_root_aggregates(self): api_response = netapp_api.NaElement( fake.AGGR_GET_ITER_ROOT_AGGR_RESPONSE) self.mock_object(self.client, 'send_iter_request', mock.Mock(return_value=api_response)) result = self.client.list_root_aggregates() aggr_get_iter_args = { 'desired-attributes': { 'aggr-attributes': { 'aggregate-name': None, 'aggr-raid-attributes': { 'has-local-root': None, 'has-partner-root': None, }, }, } } self.assertSequenceEqual(fake.ROOT_AGGREGATE_NAMES, result) self.client.send_iter_request.assert_has_calls([ mock.call('aggr-get-iter', aggr_get_iter_args)]) def test_list_non_root_aggregates(self): api_response = netapp_api.NaElement( fake.AGGR_GET_ITER_NON_ROOT_AGGR_RESPONSE) self.mock_object(self.client, 'send_iter_request', mock.Mock(return_value=api_response)) result = self.client.list_non_root_aggregates() aggr_get_iter_args = { 'query': { 'aggr-attributes': { 'aggr-raid-attributes': { 'has-local-root': 'false', 'has-partner-root': 'false', } }, }, 'desired-attributes': { 'aggr-attributes': { 'aggregate-name': None, }, }, } self.assertSequenceEqual(fake.SHARE_AGGREGATE_NAMES, result) self.client.send_iter_request.assert_has_calls([ mock.call('aggr-get-iter', aggr_get_iter_args)]) def test_list_aggregates(self): api_response = netapp_api.NaElement(fake.AGGR_GET_NAMES_RESPONSE) self.mock_object(self.client, 'send_iter_request', mock.Mock(return_value=api_response)) result = self.client._list_aggregates() aggr_get_iter_args = { 'desired-attributes': { 'aggr-attributes': { 'aggregate-name': None, }, }, } self.assertSequenceEqual( fake.ROOT_AGGREGATE_NAMES + fake.SHARE_AGGREGATE_NAMES, result) self.client.send_iter_request.assert_has_calls([ mock.call('aggr-get-iter', aggr_get_iter_args)]) def test_list_aggregates_not_found(self): api_response = netapp_api.NaElement(fake.NO_RECORDS_RESPONSE) self.mock_object(self.client, 'send_request', mock.Mock(return_value=api_response)) self.assertRaises(exception.NetAppException, self.client._list_aggregates) def test_list_vserver_aggregates(self): self.mock_object(self.vserver_client, 'get_vserver_aggregate_capacities', mock.Mock(return_value=fake.VSERVER_AGGREGATES)) result = self.vserver_client.list_vserver_aggregates() self.assertListEqual(list(fake.VSERVER_AGGREGATES.keys()), result) def test_list_vserver_aggregates_none_found(self): self.mock_object(self.vserver_client, 'get_vserver_aggregate_capacities', mock.Mock(return_value={})) result = self.vserver_client.list_vserver_aggregates() self.assertListEqual([], result) def test_create_network_interface(self): self.mock_object(self.client, 'send_request') lif_create_args = { 'address': fake.IP_ADDRESS, 'administrative-status': 'up', 'data-protocols': [ {'data-protocol': 'nfs'}, {'data-protocol': 'cifs'} ], 'home-node': fake.NODE_NAME, 'home-port': fake.VLAN_PORT, 'netmask': fake.NETMASK, 'interface-name': fake.LIF_NAME, 'role': 'data', 'vserver': fake.VSERVER_NAME, } self.client.create_network_interface(fake.IP_ADDRESS, fake.NETMASK, fake.NODE_NAME, fake.VLAN_PORT, fake.VSERVER_NAME, fake.LIF_NAME) self.client.send_request.assert_called_once_with( 'net-interface-create', lif_create_args) @ddt.data((None, True), (fake.VLAN, True), (None, False), (fake.VLAN, False)) @ddt.unpack def test_create_port_and_broadcast_domain(self, fake_vlan, broadcast_domains_supported): self.client.features.add_feature( 'BROADCAST_DOMAINS', broadcast_domains_supported) mock_create_vlan = self.mock_object( self.client, '_create_vlan') mock_ensure_broadcast = self.mock_object( self.client, '_ensure_broadcast_domain_for_port') result = self.client.create_port_and_broadcast_domain( fake.NODE_NAME, fake.PORT, fake_vlan, fake.MTU, fake.IPSPACE_NAME) if fake_vlan: mock_create_vlan.assert_called_once_with( fake.NODE_NAME, fake.PORT, fake_vlan) fake_home_port_name = ( f'{fake.PORT}-{fake_vlan}' if fake_vlan else fake.PORT) if broadcast_domains_supported: mock_ensure_broadcast.assert_called_once_with( fake.NODE_NAME, fake_home_port_name, fake.MTU, ipspace=fake.IPSPACE_NAME) self.assertEqual(fake_home_port_name, result) def test_create_vlan(self): self.mock_object(self.client, 'send_request') vlan_create_args = { 'vlan-info': { 'parent-interface': fake.PORT, 'node': fake.NODE_NAME, 'vlanid': fake.VLAN } } self.client._create_vlan(fake.NODE_NAME, fake.PORT, fake.VLAN) self.client.send_request.assert_has_calls([ mock.call('net-vlan-create', vlan_create_args)]) def test_create_vlan_already_present(self): self.mock_object(self.client, 'send_request', self._mock_api_error(code=netapp_api.EDUPLICATEENTRY)) vlan_create_args = { 'vlan-info': { 'parent-interface': fake.PORT, 'node': fake.NODE_NAME, 'vlanid': fake.VLAN } } self.client._create_vlan(fake.NODE_NAME, fake.PORT, fake.VLAN) self.client.send_request.assert_has_calls([ mock.call('net-vlan-create', vlan_create_args)]) self.assertEqual(1, client_cmode.LOG.debug.call_count) def test_create_vlan_api_error(self): self.mock_object(self.client, 'send_request', self._mock_api_error()) self.assertRaises(exception.NetAppException, self.client._create_vlan, fake.NODE_NAME, fake.PORT, fake.VLAN) def test_delete_vlan(self): self.mock_object(self.client, 'send_request') vlan_delete_args = { 'vlan-info': { 'parent-interface': fake.PORT, 'node': fake.NODE_NAME, 'vlanid': fake.VLAN } } self.client.delete_vlan(fake.NODE_NAME, fake.PORT, fake.VLAN) self.client.send_request.assert_has_calls([ mock.call('net-vlan-delete', vlan_delete_args)]) def test_delete_vlan_still_used(self): self.mock_object(self.client, 'send_request', self._mock_api_error(code=netapp_api.EAPIERROR, message='Port already has a ' 'lif bound. ')) vlan_delete_args = { 'vlan-info': { 'parent-interface': fake.PORT, 'node': fake.NODE_NAME, 'vlanid': fake.VLAN } } self.client.delete_vlan(fake.NODE_NAME, fake.PORT, fake.VLAN) self.client.send_request.assert_has_calls([ mock.call('net-vlan-delete', vlan_delete_args)]) self.assertEqual(1, client_cmode.LOG.debug.call_count) def test_delete_vlan_api_error(self): self.mock_object(self.client, 'send_request', self._mock_api_error()) self.assertRaises(exception.NetAppException, self.client.delete_vlan, fake.NODE_NAME, fake.PORT, fake.VLAN) @ddt.data(('10.10.10.0/24', '10.10.10.1', False), ('fc00::/7', 'fe80::1', False), ('0.0.0.0/0', '10.10.10.1', True), ('::/0', 'fe80::1', True)) @ddt.unpack def test_create_route(self, subnet, gateway, omit_destination): api_response = netapp_api.NaElement( fake.NET_ROUTES_CREATE_RESPONSE) expected_api_args = { 'destination': subnet, 'gateway': gateway, 'return-record': 'true', } self.mock_object( self.client, 'send_request', mock.Mock(return_value=api_response)) destination = None if omit_destination else subnet self.client.create_route(gateway, destination=destination) self.client.send_request.assert_called_once_with( 'net-routes-create', expected_api_args) def test_create_route_duplicate(self): self.mock_object(client_cmode.LOG, 'debug') expected_api_args = { 'destination': fake.SUBNET, 'gateway': fake.GATEWAY, 'return-record': 'true', } self.mock_object( self.client, 'send_request', mock.Mock(side_effect=self._mock_api_error( code=netapp_api.EAPIERROR, message='Duplicate route exists.'))) self.client.create_route(fake.GATEWAY, destination=fake.SUBNET) self.client.send_request.assert_called_once_with( 'net-routes-create', expected_api_args) self.assertEqual(1, client_cmode.LOG.debug.call_count) def test_create_route_api_error(self): expected_api_args = { 'destination': fake.SUBNET, 'gateway': fake.GATEWAY, 'return-record': 'true', } self.mock_object( self.client, 'send_request', mock.Mock(side_effect=self._mock_api_error())) self.assertRaises(exception.NetAppException, self.client.create_route, fake.GATEWAY, destination=fake.SUBNET) self.client.send_request.assert_called_once_with( 'net-routes-create', expected_api_args) def test_create_route_without_gateway(self): self.mock_object(self.client, 'send_request') self.client.create_route(None, destination=fake.SUBNET) self.assertFalse(self.client.send_request.called) def test_ensure_broadcast_domain_for_port_domain_match(self): port_info = { 'ipspace': fake.IPSPACE_NAME, 'broadcast-domain': fake.BROADCAST_DOMAIN, } self.mock_object(self.client, '_get_broadcast_domain_for_port', mock.Mock(return_value=port_info)) self.mock_object(self.client, '_broadcast_domain_exists', mock.Mock(return_value=True)) self.mock_object(self.client, '_create_broadcast_domain') self.mock_object(self.client, '_modify_broadcast_domain') self.mock_object(self.client, '_add_port_to_broadcast_domain') self.client._ensure_broadcast_domain_for_port( fake.NODE_NAME, fake.PORT, fake.MTU, ipspace=fake.IPSPACE_NAME) self.client._get_broadcast_domain_for_port.assert_called_once_with( fake.NODE_NAME, fake.PORT) self.client._modify_broadcast_domain.assert_called_once_with( fake.BROADCAST_DOMAIN, fake.IPSPACE_NAME, fake.MTU) self.assertFalse(self.client._broadcast_domain_exists.called) self.assertFalse(self.client._create_broadcast_domain.called) self.assertFalse(self.client._add_port_to_broadcast_domain.called) @ddt.data(fake.IPSPACE_NAME, client_cmode.DEFAULT_IPSPACE) def test_ensure_broadcast_domain_for_port_other_domain(self, ipspace): port_info = { 'ipspace': ipspace, 'broadcast-domain': 'other_domain', } self.mock_object(self.client, '_get_broadcast_domain_for_port', mock.Mock(return_value=port_info)) self.mock_object(self.client, '_broadcast_domain_exists', mock.Mock(return_value=True)) self.mock_object(self.client, '_create_broadcast_domain') self.mock_object(self.client, '_modify_broadcast_domain') self.mock_object(self.client, '_remove_port_from_broadcast_domain') self.mock_object(self.client, '_add_port_to_broadcast_domain') self.client._ensure_broadcast_domain_for_port( fake.NODE_NAME, fake.PORT, ipspace=fake.IPSPACE_NAME, mtu=fake.MTU) self.client._get_broadcast_domain_for_port.assert_called_once_with( fake.NODE_NAME, fake.PORT) self.client._remove_port_from_broadcast_domain.assert_called_once_with( fake.NODE_NAME, fake.PORT, 'other_domain', ipspace) self.client._broadcast_domain_exists.assert_called_once_with( fake.BROADCAST_DOMAIN, fake.IPSPACE_NAME) self.assertFalse(self.client._create_broadcast_domain.called) self.client._modify_broadcast_domain.assert_called_once_with( fake.BROADCAST_DOMAIN, fake.IPSPACE_NAME, fake.MTU) self.client._add_port_to_broadcast_domain.assert_called_once_with( fake.NODE_NAME, fake.PORT, fake.BROADCAST_DOMAIN, fake.IPSPACE_NAME) def test_ensure_broadcast_domain_for_port_no_domain(self): port_info = { 'ipspace': fake.IPSPACE_NAME, 'broadcast-domain': None, } self.mock_object(self.client, '_get_broadcast_domain_for_port', mock.Mock(return_value=port_info)) self.mock_object(self.client, '_broadcast_domain_exists', mock.Mock(return_value=False)) self.mock_object(self.client, '_create_broadcast_domain') self.mock_object(self.client, '_modify_broadcast_domain') self.mock_object(self.client, '_remove_port_from_broadcast_domain') self.mock_object(self.client, '_add_port_to_broadcast_domain') self.client._ensure_broadcast_domain_for_port( fake.NODE_NAME, fake.PORT, ipspace=fake.IPSPACE_NAME, mtu=fake.MTU) self.client._get_broadcast_domain_for_port.assert_called_once_with( fake.NODE_NAME, fake.PORT) self.assertFalse(self.client._remove_port_from_broadcast_domain.called) self.client._broadcast_domain_exists.assert_called_once_with( fake.BROADCAST_DOMAIN, fake.IPSPACE_NAME) self.client._create_broadcast_domain.assert_called_once_with( fake.BROADCAST_DOMAIN, fake.IPSPACE_NAME, fake.MTU) self.assertFalse(self.client._modify_broadcast_domain.called) self.client._add_port_to_broadcast_domain.assert_called_once_with( fake.NODE_NAME, fake.PORT, fake.BROADCAST_DOMAIN, fake.IPSPACE_NAME) def test_get_broadcast_domain_for_port(self): api_response = netapp_api.NaElement( fake.NET_PORT_GET_ITER_BROADCAST_DOMAIN_RESPONSE) self.mock_object(self.client, 'send_iter_request', mock.Mock(return_value=api_response)) net_port_get_iter_args = { 'query': { 'net-port-info': { 'node': fake.NODE_NAME, 'port': fake.PORT, }, }, 'desired-attributes': { 'net-port-info': { 'broadcast-domain': None, 'ipspace': None, }, }, } result = self.client._get_broadcast_domain_for_port(fake.NODE_NAME, fake.PORT) expected = { 'broadcast-domain': fake.BROADCAST_DOMAIN, 'ipspace': fake.IPSPACE_NAME, } self.client.send_iter_request.assert_has_calls([ mock.call('net-port-get-iter', net_port_get_iter_args)]) self.assertEqual(expected, result) def test_get_broadcast_domain_for_port_port_not_found(self): api_response = netapp_api.NaElement( fake.NO_RECORDS_RESPONSE) self.mock_object(self.client, 'send_iter_request', mock.Mock(return_value=api_response)) self.assertRaises(exception.NetAppException, self.client._get_broadcast_domain_for_port, fake.NODE_NAME, fake.PORT) def test_get_broadcast_domain_for_port_domain_not_found(self): api_response = netapp_api.NaElement( fake.NET_PORT_GET_ITER_BROADCAST_DOMAIN_MISSING_RESPONSE) self.mock_object(self.client, 'send_iter_request', mock.Mock(return_value=api_response)) result = self.client._get_broadcast_domain_for_port(fake.NODE_NAME, fake.PORT) expected = { 'broadcast-domain': None, 'ipspace': fake.IPSPACE_NAME, } self.assertEqual(expected, result) def test_broadcast_domain_exists(self): api_response = netapp_api.NaElement( fake.NET_PORT_BROADCAST_DOMAIN_GET_ITER_RESPONSE) self.mock_object(self.client, 'send_iter_request', mock.Mock(return_value=api_response)) result = self.client._broadcast_domain_exists(fake.BROADCAST_DOMAIN, fake.IPSPACE_NAME) net_port_broadcast_domain_get_iter_args = { 'query': { 'net-port-broadcast-domain-info': { 'ipspace': fake.IPSPACE_NAME, 'broadcast-domain': fake.BROADCAST_DOMAIN, }, }, 'desired-attributes': { 'net-port-broadcast-domain-info': None, }, } self.client.send_iter_request.assert_has_calls([ mock.call('net-port-broadcast-domain-get-iter', net_port_broadcast_domain_get_iter_args)]) self.assertTrue(result) def test_broadcast_domain_exists_not_found(self): api_response = netapp_api.NaElement( fake.NO_RECORDS_RESPONSE) self.mock_object(self.client, 'send_request', mock.Mock(return_value=api_response)) result = self.client._broadcast_domain_exists(fake.BROADCAST_DOMAIN, fake.IPSPACE_NAME) self.assertFalse(result) def test_create_broadcast_domain(self): self.mock_object(self.client, 'send_request') result = self.client._create_broadcast_domain(fake.BROADCAST_DOMAIN, fake.IPSPACE_NAME, fake.MTU) net_port_broadcast_domain_create_args = { 'ipspace': fake.IPSPACE_NAME, 'broadcast-domain': fake.BROADCAST_DOMAIN, 'mtu': fake.MTU, } self.assertIsNone(result) self.client.send_request.assert_has_calls([ mock.call('net-port-broadcast-domain-create', net_port_broadcast_domain_create_args)]) def test_modify_broadcast_domain(self): self.mock_object(self.client, 'send_request') result = self.client._modify_broadcast_domain(fake.BROADCAST_DOMAIN, fake.IPSPACE_NAME, fake.MTU) net_port_broadcast_domain_modify_args = { 'ipspace': fake.IPSPACE_NAME, 'broadcast-domain': fake.BROADCAST_DOMAIN, 'mtu': fake.MTU, } self.assertIsNone(result) self.client.send_request.assert_called_once_with( 'net-port-broadcast-domain-modify', net_port_broadcast_domain_modify_args) def test_delete_broadcast_domain(self): self.mock_object(self.client, 'send_request') result = self.client._delete_broadcast_domain(fake.BROADCAST_DOMAIN, fake.IPSPACE_NAME) net_port_broadcast_domain_delete_args = { 'ipspace': fake.IPSPACE_NAME, 'broadcast-domain': fake.BROADCAST_DOMAIN, } self.assertIsNone(result) self.client.send_request.assert_has_calls([ mock.call('net-port-broadcast-domain-destroy', net_port_broadcast_domain_delete_args)]) def test_delete_broadcast_domains_for_ipspace_not_found(self): self.mock_object(self.client, 'get_ipspaces', mock.Mock(return_value=[])) self.mock_object(self.client, '_delete_broadcast_domain') self.client._delete_broadcast_domains_for_ipspace(fake.IPSPACE_NAME) self.client.get_ipspaces.assert_called_once_with( ipspace_name=fake.IPSPACE_NAME) self.assertFalse(self.client._delete_broadcast_domain.called) def test_delete_broadcast_domains_for_ipspace(self): self.mock_object(self.client, 'get_ipspaces', mock.Mock(return_value=fake.IPSPACES)) self.mock_object(self.client, '_delete_broadcast_domain') self.client._delete_broadcast_domains_for_ipspace(fake.IPSPACE_NAME) self.client.get_ipspaces.assert_called_once_with( ipspace_name=fake.IPSPACE_NAME) self.client._delete_broadcast_domain.assert_called_once_with( fake.IPSPACES[0]['broadcast-domains'][0], fake.IPSPACE_NAME) def test_add_port_to_broadcast_domain(self): self.mock_object(self.client, 'send_request') add_port_to_broadcast_domain_args = { 'ipspace': fake.IPSPACE_NAME, 'broadcast-domain': fake.BROADCAST_DOMAIN, 'ports': { 'net-qualified-port-name': ':'.join([fake.NODE_NAME, fake.VLAN_PORT]) } } result = self.client._add_port_to_broadcast_domain( fake.NODE_NAME, fake.VLAN_PORT, fake.BROADCAST_DOMAIN, fake.IPSPACE_NAME) self.assertIsNone(result) self.client.send_request.assert_has_calls([ mock.call('net-port-broadcast-domain-add-ports', add_port_to_broadcast_domain_args)]) def test_add_port_to_broadcast_domain_already_present(self): self.mock_object(self.client, 'send_request', self._mock_api_error( code=netapp_api. E_VIFMGR_PORT_ALREADY_ASSIGNED_TO_BROADCAST_DOMAIN)) result = self.client._add_port_to_broadcast_domain( fake.NODE_NAME, fake.VLAN_PORT, fake.BROADCAST_DOMAIN, fake.IPSPACE_NAME) self.assertIsNone(result) def test_add_port_to_broadcast_domain_api_error(self): self.mock_object(self.client, 'send_request', self._mock_api_error()) self.assertRaises(exception.NetAppException, self.client._add_port_to_broadcast_domain, fake.NODE_NAME, fake.VLAN_PORT, fake.BROADCAST_DOMAIN, fake.IPSPACE_NAME) def test_remove_port_from_broadcast_domain(self): self.mock_object(self.client, 'send_request') result = self.client._remove_port_from_broadcast_domain( fake.NODE_NAME, fake.VLAN_PORT, fake.BROADCAST_DOMAIN, fake.IPSPACE_NAME) net_port_broadcast_domain_remove_ports_args = { 'ipspace': fake.IPSPACE_NAME, 'broadcast-domain': fake.BROADCAST_DOMAIN, 'ports': { 'net-qualified-port-name': ':'.join([fake.NODE_NAME, fake.VLAN_PORT]) } } self.assertIsNone(result) self.client.send_request.assert_has_calls([ mock.call('net-port-broadcast-domain-remove-ports', net_port_broadcast_domain_remove_ports_args)]) def test_network_interface_exists(self): api_response = netapp_api.NaElement( fake.NET_INTERFACE_GET_ONE_RESPONSE) self.mock_object(self.client, 'send_iter_request', mock.Mock(return_value=api_response)) net_interface_get_args = { 'query': { 'net-interface-info': { 'address': fake.IP_ADDRESS, 'home-node': fake.NODE_NAME, 'home-port': fake.VLAN_PORT, 'netmask': fake.NETMASK, 'vserver': fake.VSERVER_NAME} }, 'desired-attributes': { 'net-interface-info': { 'interface-name': None, } } } result = self.client.network_interface_exists( fake.VSERVER_NAME, fake.NODE_NAME, fake.PORT, fake.IP_ADDRESS, fake.NETMASK, fake.VLAN) self.client.send_iter_request.assert_has_calls([ mock.call('net-interface-get-iter', net_interface_get_args)]) self.assertTrue(result) def test_network_interface_exists_not_found(self): api_response = netapp_api.NaElement(fake.NO_RECORDS_RESPONSE) self.mock_object(self.client, 'send_iter_request', mock.Mock(return_value=api_response)) net_interface_get_args = { 'query': { 'net-interface-info': { 'address': fake.IP_ADDRESS, 'home-node': fake.NODE_NAME, 'home-port': fake.PORT, 'netmask': fake.NETMASK, 'vserver': fake.VSERVER_NAME} }, 'desired-attributes': { 'net-interface-info': { 'interface-name': None, } } } result = self.client.network_interface_exists( fake.VSERVER_NAME, fake.NODE_NAME, fake.PORT, fake.IP_ADDRESS, fake.NETMASK, None) self.client.send_iter_request.assert_has_calls([ mock.call('net-interface-get-iter', net_interface_get_args)]) self.assertFalse(result) def test_list_network_interfaces(self): api_response = netapp_api.NaElement( fake.NET_INTERFACE_GET_ITER_RESPONSE) self.mock_object(self.client, 'send_iter_request', mock.Mock(return_value=api_response)) net_interface_get_args = { 'desired-attributes': { 'net-interface-info': { 'interface-name': None, } } } result = self.client.list_network_interfaces() self.client.send_iter_request.assert_has_calls([ mock.call('net-interface-get-iter', net_interface_get_args)]) self.assertSequenceEqual(fake.LIF_NAMES, result) def test_list_network_interfaces_not_found(self): api_response = netapp_api.NaElement(fake.NO_RECORDS_RESPONSE) self.mock_object(self.client, 'send_request', mock.Mock(return_value=api_response)) result = self.client.list_network_interfaces() self.assertListEqual([], result) def test_get_network_interfaces(self): api_response = netapp_api.NaElement( fake.NET_INTERFACE_GET_ITER_RESPONSE) self.mock_object(self.client, 'send_iter_request', mock.Mock(return_value=api_response)) result = self.client.get_network_interfaces() self.client.send_iter_request.assert_has_calls([ mock.call('net-interface-get-iter', None)]) self.assertSequenceEqual(fake.LIFS, result) def test_get_network_interfaces_filtered_by_protocol(self): api_response = netapp_api.NaElement( fake.NET_INTERFACE_GET_ITER_RESPONSE_NFS) self.mock_object(self.client, 'send_iter_request', mock.Mock(return_value=api_response)) result = self.client.get_network_interfaces(protocols=['NFS']) net_interface_get_args = { 'query': { 'net-interface-info': { 'data-protocols': { 'data-protocol': 'nfs', } } } } self.client.send_iter_request.assert_has_calls([ mock.call('net-interface-get-iter', net_interface_get_args)]) self.assertListEqual(fake.NFS_LIFS, result) def test_get_network_interfaces_not_found(self): api_response = netapp_api.NaElement(fake.NO_RECORDS_RESPONSE) self.mock_object(self.client, 'send_iter_request', mock.Mock(return_value=api_response)) result = self.client.get_network_interfaces() self.client.send_iter_request.assert_has_calls([ mock.call('net-interface-get-iter', None)]) self.assertListEqual([], result) def test_disable_network_interface(self): interface_name = fake.NETWORK_INTERFACES[0]['interface-name'] vserver_name = fake.VSERVER_NAME expected_api_args = { 'administrative-status': 'down', 'interface-name': interface_name, 'vserver': vserver_name, } self.mock_object(self.client, 'send_request') self.client.disable_network_interface(vserver_name, interface_name) self.client.send_request.assert_called_once_with( 'net-interface-modify', expected_api_args) def test_delete_network_interface(self): interface_name = fake.NETWORK_INTERFACES[0]['interface-name'] vserver_name = fake.VSERVER_NAME expected_api_args = { 'interface-name': interface_name, 'vserver': vserver_name, } self.mock_object(self.client, 'disable_network_interface') self.mock_object(self.client, 'send_request') self.client.delete_network_interface(vserver_name, interface_name) self.client.disable_network_interface.assert_called_once_with( vserver_name, interface_name) self.client.send_request.assert_called_once_with( 'net-interface-delete', expected_api_args) def test_get_ipspaces(self): self.client.features.add_feature('IPSPACES') api_response = netapp_api.NaElement( fake.NET_IPSPACES_GET_ITER_RESPONSE) self.mock_object(self.client, 'send_iter_request', mock.Mock(return_value=api_response)) result = self.client.get_ipspaces(ipspace_name=fake.IPSPACE_NAME) net_ipspaces_get_iter_args = { 'query': { 'net-ipspaces-info': { 'ipspace': fake.IPSPACE_NAME, }, }, } self.client.send_iter_request.assert_has_calls([ mock.call('net-ipspaces-get-iter', net_ipspaces_get_iter_args)]) self.assertEqual(fake.IPSPACES, result) def test_get_ipspaces_not_found(self): self.client.features.add_feature('IPSPACES') api_response = netapp_api.NaElement(fake.NO_RECORDS_RESPONSE) self.mock_object(self.client, 'send_iter_request', mock.Mock(return_value=api_response)) result = self.client.get_ipspaces() net_ipspaces_get_iter_args = {} self.client.send_iter_request.assert_has_calls([ mock.call('net-ipspaces-get-iter', net_ipspaces_get_iter_args)]) self.assertEqual([], result) def test_get_ipspaces_not_supported(self): self.mock_object(self.client, 'send_iter_request') result = self.client.get_ipspaces() self.assertFalse(self.client.send_iter_request.called) self.assertEqual([], result) @ddt.data((fake.NET_IPSPACES_GET_ITER_RESPONSE, True), (fake.NO_RECORDS_RESPONSE, False)) @ddt.unpack def test_ipspace_exists(self, api_response, expected): self.client.features.add_feature('IPSPACES') api_response = netapp_api.NaElement(api_response) self.mock_object(self.client, 'send_iter_request', mock.Mock(return_value=api_response)) result = self.client.ipspace_exists(fake.IPSPACE_NAME) net_ipspaces_get_iter_args = { 'query': { 'net-ipspaces-info': { 'ipspace': fake.IPSPACE_NAME, }, }, 'desired-attributes': { 'net-ipspaces-info': { 'ipspace': None, }, }, } self.client.send_iter_request.assert_has_calls([ mock.call('net-ipspaces-get-iter', net_ipspaces_get_iter_args)]) self.assertEqual(expected, result) def test_ipspace_exists_not_supported(self): result = self.client.ipspace_exists(fake.IPSPACE_NAME) self.assertFalse(result) def test_create_ipspace(self): self.mock_object(self.client, 'send_request') self.client.create_ipspace(fake.IPSPACE_NAME) net_ipspaces_create_args = {'ipspace': fake.IPSPACE_NAME} self.client.send_request.assert_has_calls([ mock.call('net-ipspaces-create', net_ipspaces_create_args)]) def test_delete_ipspace(self): self.client.features.add_feature('IPSPACES') mock_ipspace_has_data_vservers = self.mock_object( self.client, 'ipspace_has_data_vservers', mock.Mock(return_value=False)) mock_delete_broadcast_domains_for_ipspace = self.mock_object( self.client, '_delete_broadcast_domains_for_ipspace') self.mock_object(self.client, 'send_request') self.client.delete_ipspace(fake.IPSPACE_NAME) net_ipspaces_destroy_args = {'ipspace': fake.IPSPACE_NAME} mock_ipspace_has_data_vservers.assert_called_once_with( fake.IPSPACE_NAME) mock_delete_broadcast_domains_for_ipspace.assert_called_once_with( fake.IPSPACE_NAME) self.client.send_request.assert_has_calls([ mock.call('net-ipspaces-destroy', net_ipspaces_destroy_args)]) def test_get_ipspace_name_for_vlan_port(self): self.client.features.add_feature('IPSPACES') api_response = netapp_api.NaElement(fake.NET_PORT_GET_RESPONSE) self.mock_object(self.client, 'send_request', mock.Mock(return_value=api_response)) ipspace = self.client.get_ipspace_name_for_vlan_port( fake.NODE_NAME, fake.PORT, fake.VLAN) port = '%(port)s-%(id)s' % {'port': fake.PORT, 'id': fake.VLAN} self.client.send_request.assert_called_once_with( 'net-port-get', {'node': fake.NODE_NAME, 'port': port}) self.assertEqual(fake.IPSPACE_NAME, ipspace) def test_get_ipspace_name_for_vlan_port_no_ipspace_feature(self): self.mock_object(self.client, 'send_request') ipspace = self.client.get_ipspace_name_for_vlan_port( fake.NODE_NAME, fake.PORT, fake.VLAN) self.client.send_request.assert_not_called() self.assertIsNone(ipspace) def test_get_ipspace_name_for_vlan_port_no_ipspace_found(self): self.client.features.add_feature('IPSPACES') self.mock_object( self.client, 'send_request', self._mock_api_error(code=netapp_api.EOBJECTNOTFOUND)) ipspace = self.client.get_ipspace_name_for_vlan_port( fake.NODE_NAME, fake.PORT, fake.VLAN) self.assertIsNone(ipspace) def test_get_ipspace_name_for_vlan_port_no_vlan(self): self.client.features.add_feature('IPSPACES') api_response = netapp_api.NaElement(fake.NET_PORT_GET_RESPONSE_NO_VLAN) self.mock_object(self.client, 'send_request', mock.Mock(return_value=api_response)) ipspace = self.client.get_ipspace_name_for_vlan_port( fake.NODE_NAME, fake.PORT, None) self.client.send_request.assert_called_once_with( 'net-port-get', {'node': fake.NODE_NAME, 'port': fake.PORT}) self.assertEqual(fake.IPSPACE_NAME, ipspace) def test_get_ipspace_name_for_vlan_port_raises_api_error(self): self.client.features.add_feature('IPSPACES') self.mock_object(self.client, 'send_request', mock.Mock(side_effect=self._mock_api_error())) self.assertRaises(netapp_api.NaApiError, self.client.get_ipspace_name_for_vlan_port, fake.NODE_NAME, fake.VLAN_PORT, None) def test_add_vserver_to_ipspace(self): self.mock_object(self.client, 'send_request') self.client.add_vserver_to_ipspace(fake.IPSPACE_NAME, fake.VSERVER_NAME) net_ipspaces_assign_vserver_args = { 'ipspace': fake.IPSPACE_NAME, 'vserver': fake.VSERVER_NAME } self.client.send_request.assert_has_calls([ mock.call('net-ipspaces-assign-vserver', net_ipspaces_assign_vserver_args)]) def test_get_node_for_aggregate(self): api_response = netapp_api.NaElement( fake.AGGR_GET_NODE_RESPONSE).get_child_by_name( 'attributes-list').get_children() self.mock_object(self.client, '_get_aggregates', mock.Mock(return_value=api_response)) result = self.client.get_node_for_aggregate(fake.SHARE_AGGREGATE_NAME) desired_attributes = { 'aggr-attributes': { 'aggregate-name': None, 'aggr-ownership-attributes': { 'home-name': None, }, }, } self.client._get_aggregates.assert_has_calls([ mock.call( aggregate_names=[fake.SHARE_AGGREGATE_NAME], desired_attributes=desired_attributes)]) self.assertEqual(fake.NODE_NAME, result) def test_get_node_for_aggregate_none_requested(self): result = self.client.get_node_for_aggregate(None) self.assertIsNone(result) def test_get_node_for_aggregate_api_not_found(self): self.mock_object(self.client, 'send_iter_request', mock.Mock(side_effect=self._mock_api_error( netapp_api.EAPINOTFOUND))) result = self.client.get_node_for_aggregate(fake.SHARE_AGGREGATE_NAME) self.assertIsNone(result) def test_get_node_for_aggregate_api_error(self): self.mock_object(self.client, 'send_iter_request', self._mock_api_error()) self.assertRaises(netapp_api.NaApiError, self.client.get_node_for_aggregate, fake.SHARE_AGGREGATE_NAME) def test_get_node_for_aggregate_not_found(self): api_response = netapp_api.NaElement(fake.NO_RECORDS_RESPONSE) self.mock_object(self.client, 'send_iter_request', mock.Mock(return_value=api_response)) result = self.client.get_node_for_aggregate(fake.SHARE_AGGREGATE_NAME) self.assertIsNone(result) def test_get_cluster_aggregate_capacities(self): api_response = netapp_api.NaElement( fake.AGGR_GET_SPACE_RESPONSE).get_child_by_name( 'attributes-list').get_children() self.mock_object(self.client, '_get_aggregates', mock.Mock(return_value=api_response)) result = self.client.get_cluster_aggregate_capacities( fake.SHARE_AGGREGATE_NAMES) desired_attributes = { 'aggr-attributes': { 'aggregate-name': None, 'aggr-space-attributes': { 'size-available': None, 'size-total': None, 'size-used': None, } } } self.client._get_aggregates.assert_has_calls([ mock.call( aggregate_names=fake.SHARE_AGGREGATE_NAMES, desired_attributes=desired_attributes)]) expected = { fake.SHARE_AGGREGATE_NAMES[0]: { 'available': 45670400, 'total': 943718400, 'used': 898048000, }, fake.SHARE_AGGREGATE_NAMES[1]: { 'available': 4267659264, 'total': 7549747200, 'used': 3282087936, }, } self.assertDictEqual(expected, result) def test_get_cluster_aggregate_capacities_not_found(self): api_response = netapp_api.NaElement('none').get_children() self.mock_object(self.client, '_get_aggregates', mock.Mock(return_value=api_response)) result = self.client.get_cluster_aggregate_capacities( fake.SHARE_AGGREGATE_NAMES) self.assertEqual({}, result) def test_get_cluster_aggregate_capacities_none_requested(self): result = self.client.get_cluster_aggregate_capacities([]) self.assertEqual({}, result) def test_get_vserver_aggregate_capacities(self): api_response = netapp_api.NaElement(fake.VSERVER_GET_RESPONSE) self.mock_object(self.vserver_client, 'send_request', mock.Mock(return_value=api_response)) result = self.vserver_client.get_vserver_aggregate_capacities() vserver_args = { 'desired-attributes': { 'vserver-info': { 'vserver-name': None, 'vserver-aggr-info-list': { 'vserver-aggr-info': { 'aggr-name': None, 'aggr-availsize': None } } } } } self.vserver_client.send_request.assert_has_calls([ mock.call('vserver-get', vserver_args)]) self.assertDictEqual(fake.VSERVER_AGGREGATES, result) def test_get_vserver_aggregate_capacities_partial_request(self): api_response = netapp_api.NaElement(fake.VSERVER_GET_RESPONSE) self.mock_object(self.vserver_client, 'send_request', mock.Mock(return_value=api_response)) result = self.vserver_client.get_vserver_aggregate_capacities( fake.SHARE_AGGREGATE_NAMES[0]) expected = {fake.SHARE_AGGREGATE_NAMES[0]: fake.VSERVER_AGGREGATES[fake.SHARE_AGGREGATE_NAMES[0]]} self.assertDictEqual(expected, result) def test_get_vserver_aggregate_capacities_aggregate_not_found(self): api_response = netapp_api.NaElement( fake.VSERVER_GET_RESPONSE_NO_AGGREGATES) self.mock_object(self.vserver_client, 'send_request', mock.Mock(return_value=api_response)) result = self.vserver_client.get_vserver_aggregate_capacities() self.assertDictEqual({}, result) self.assertEqual(1, client_cmode.LOG.warning.call_count) def test_get_vserver_aggregate_capacities_vserver_not_found(self): api_response = netapp_api.NaElement(fake.NO_RECORDS_RESPONSE) self.mock_object(self.vserver_client, 'send_request', mock.Mock(return_value=api_response)) self.assertRaises(exception.NetAppException, self.vserver_client.get_vserver_aggregate_capacities) def test_get_vserver_aggregate_capacities_none_requested(self): result = self.client.get_vserver_aggregate_capacities([]) self.assertEqual({}, result) def test_get_aggregates(self): api_response = netapp_api.NaElement(fake.AGGR_GET_ITER_RESPONSE) self.mock_object(self.client, 'send_iter_request', mock.Mock(return_value=api_response)) result = self.client._get_aggregates() self.client.send_iter_request.assert_has_calls([ mock.call('aggr-get-iter', {})]) self.assertListEqual( [aggr.to_string() for aggr in api_response.get_child_by_name( 'attributes-list').get_children()], [aggr.to_string() for aggr in result]) def test_get_aggregates_with_filters(self): api_response = netapp_api.NaElement(fake.AGGR_GET_SPACE_RESPONSE) self.mock_object(self.client, 'send_iter_request', mock.Mock(return_value=api_response)) desired_attributes = { 'aggr-attributes': { 'aggregate-name': None, 'aggr-space-attributes': { 'size-total': None, 'size-available': None, } } } result = self.client._get_aggregates( aggregate_names=fake.SHARE_AGGREGATE_NAMES, desired_attributes=desired_attributes) aggr_get_iter_args = { 'query': { 'aggr-attributes': { 'aggregate-name': '|'.join(fake.SHARE_AGGREGATE_NAMES), } }, 'desired-attributes': desired_attributes } self.client.send_iter_request.assert_has_calls([ mock.call('aggr-get-iter', aggr_get_iter_args)]) self.assertListEqual( [aggr.to_string() for aggr in api_response.get_child_by_name( 'attributes-list').get_children()], [aggr.to_string() for aggr in result]) def test_get_aggregates_not_found(self): api_response = netapp_api.NaElement(fake.NO_RECORDS_RESPONSE) self.mock_object(self.client, 'send_iter_request', mock.Mock(return_value=api_response)) result = self.client._get_aggregates() self.client.send_iter_request.assert_has_calls([ mock.call('aggr-get-iter', {})]) self.assertListEqual([], result) def test_get_performance_instance_uuids(self): api_response = netapp_api.NaElement( fake.PERF_OBJECT_INSTANCE_LIST_INFO_ITER_RESPONSE) self.mock_object(self.client, 'send_request', mock.Mock(return_value=api_response)) result = self.client.get_performance_instance_uuids( 'system', fake.NODE_NAME) expected = [fake.NODE_NAME + ':kernel:system'] self.assertEqual(expected, result) perf_object_instance_list_info_iter_args = { 'objectname': 'system', 'query': { 'instance-info': { 'uuid': fake.NODE_NAME + ':*', } } } self.client.send_request.assert_called_once_with( 'perf-object-instance-list-info-iter', perf_object_instance_list_info_iter_args) def test_get_performance_counter_info(self): api_response = netapp_api.NaElement( fake.PERF_OBJECT_COUNTER_LIST_INFO_WAFL_RESPONSE) self.mock_object(self.client, 'send_request', mock.Mock(return_value=api_response)) result = self.client.get_performance_counter_info('wafl', 'cp_phase_times') expected = { 'name': 'cp_phase_times', 'base-counter': 'total_cp_msecs', 'labels': fake.PERF_OBJECT_COUNTER_TOTAL_CP_MSECS_LABELS, } self.assertEqual(expected, result) perf_object_counter_list_info_args = {'objectname': 'wafl'} self.client.send_request.assert_called_once_with( 'perf-object-counter-list-info', perf_object_counter_list_info_args) def test_get_performance_counter_info_not_found(self): api_response = netapp_api.NaElement( fake.PERF_OBJECT_COUNTER_LIST_INFO_WAFL_RESPONSE) self.mock_object(self.client, 'send_request', mock.Mock(return_value=api_response)) self.assertRaises(exception.NotFound, self.client.get_performance_counter_info, 'wafl', 'invalid') def test_get_performance_counters(self): api_response = netapp_api.NaElement( fake.PERF_OBJECT_GET_INSTANCES_SYSTEM_RESPONSE_CMODE) self.mock_object(self.client, 'send_request', mock.Mock(return_value=api_response)) instance_uuids = [ fake.NODE_NAMES[0] + ':kernel:system', fake.NODE_NAMES[1] + ':kernel:system', ] counter_names = ['avg_processor_busy'] result = self.client.get_performance_counters('system', instance_uuids, counter_names) expected = [ { 'avg_processor_busy': '5674745133134', 'instance-name': 'system', 'instance-uuid': instance_uuids[0], 'node-name': fake.NODE_NAMES[0], 'timestamp': '1453412013', }, { 'avg_processor_busy': '4077649009234', 'instance-name': 'system', 'instance-uuid': instance_uuids[1], 'node-name': fake.NODE_NAMES[1], 'timestamp': '1453412013' }, ] self.assertEqual(expected, result) perf_object_get_instances_args = { 'objectname': 'system', 'instance-uuids': [ {'instance-uuid': instance_uuid} for instance_uuid in instance_uuids ], 'counters': [ {'counter': counter} for counter in counter_names ], } self.client.send_request.assert_called_once_with( 'perf-object-get-instances', perf_object_get_instances_args) def test_setup_security_services_ldap(self): self.mock_object(self.client, 'send_request') self.mock_object(self.vserver_client, 'configure_ldap') self.client.setup_security_services([fake.LDAP_LINUX_SECURITY_SERVICE], self.vserver_client, fake.VSERVER_NAME, False) vserver_modify_args = { 'name-mapping-switch': [ {'nmswitch': 'ldap'}, {'nmswitch': 'file'}, ], 'name-server-switch': [ {'nsswitch': 'ldap'}, {'nsswitch': 'file'}, ], 'vserver-name': fake.VSERVER_NAME } self.client.send_request.assert_has_calls([ mock.call('vserver-modify', vserver_modify_args)]) self.vserver_client.configure_ldap.assert_has_calls([ mock.call(fake.LDAP_LINUX_SECURITY_SERVICE, timeout=30)]) def test_setup_security_services_active_directory(self): self.mock_object(self.client, 'send_request') self.mock_object(self.vserver_client, 'configure_active_directory') self.mock_object(self.vserver_client, 'configure_cifs_options') self.client.setup_security_services([fake.CIFS_SECURITY_SERVICE], self.vserver_client, fake.VSERVER_NAME, False) vserver_modify_args = { 'name-mapping-switch': [ {'nmswitch': 'ldap'}, {'nmswitch': 'file'}, ], 'name-server-switch': [ {'nsswitch': 'ldap'}, {'nsswitch': 'file'}, ], 'vserver-name': fake.VSERVER_NAME } self.client.send_request.assert_has_calls([ mock.call('vserver-modify', vserver_modify_args)]) self.vserver_client.configure_active_directory.assert_has_calls([ mock.call(fake.CIFS_SECURITY_SERVICE, fake.VSERVER_NAME, False)]) self.vserver_client.configure_cifs_options.assert_has_calls([ mock.call(fake.CIFS_SECURITY_SERVICE)]) def test_setup_security_services_kerberos(self): self.mock_object(self.client, 'send_request') self.mock_object(self.vserver_client, 'create_kerberos_realm') self.mock_object(self.vserver_client, 'configure_kerberos') self.client.setup_security_services([fake.KERBEROS_SECURITY_SERVICE], self.vserver_client, fake.VSERVER_NAME, False) vserver_modify_args = { 'name-mapping-switch': [ {'nmswitch': 'ldap'}, {'nmswitch': 'file'}, ], 'name-server-switch': [ {'nsswitch': 'ldap'}, {'nsswitch': 'file'}, ], 'vserver-name': fake.VSERVER_NAME } self.client.send_request.assert_has_calls([ mock.call('vserver-modify', vserver_modify_args)]) self.vserver_client.create_kerberos_realm.assert_has_calls([ mock.call(fake.KERBEROS_SECURITY_SERVICE)]) self.vserver_client.configure_kerberos.assert_has_calls([ mock.call(fake.KERBEROS_SECURITY_SERVICE, fake.VSERVER_NAME)]) def test_setup_security_services_invalid(self): self.mock_object(self.client, 'send_request') self.assertRaises(exception.NetAppException, self.client.setup_security_services, [fake.INVALID_SECURITY_SERVICE], self.vserver_client, fake.VSERVER_NAME, False) vserver_modify_args = { 'name-mapping-switch': [ {'nmswitch': 'ldap'}, {'nmswitch': 'file'}, ], 'name-server-switch': [ {'nsswitch': 'ldap'}, {'nsswitch': 'file'}, ], 'vserver-name': fake.VSERVER_NAME } self.client.send_request.assert_has_calls([ mock.call('vserver-modify', vserver_modify_args)]) def test_update_showmount(self): self.mock_object(self.client, 'send_request') fake_showmount = 'true' self.client.update_showmount(fake_showmount) nfs_service_modify_args = { 'showmount': fake_showmount, } self.client.send_request.assert_called_once_with( 'nfs-service-modify', nfs_service_modify_args) @ddt.data({'tcp-max-xfer-size': 10000}, {}, None) def test_enable_nfs(self, nfs_config): self.mock_object(self.client, 'send_request') self.mock_object(self.client, '_enable_nfs_protocols') self.mock_object(self.client, '_create_default_nfs_export_rules') self.mock_object(self.client, '_configure_nfs') self.client.enable_nfs(fake.NFS_VERSIONS, nfs_config) self.client.send_request.assert_called_once_with('nfs-enable') self.client._enable_nfs_protocols.assert_called_once_with( fake.NFS_VERSIONS) self.client._create_default_nfs_export_rules.assert_called_once_with() if nfs_config: self.client._configure_nfs.assert_called_once_with(nfs_config) else: self.client._configure_nfs.assert_not_called() @ddt.data((True, True, True), (True, False, False), (False, True, True)) @ddt.unpack def test_enable_nfs_protocols(self, v3, v40, v41): self.mock_object(self.client, 'send_request') versions = [] if v3: versions.append('nfs3') if v40: versions.append('nfs4.0') if v41: versions.append('nfs4.1') self.client._enable_nfs_protocols(versions) nfs_service_modify_args = { 'is-nfsv3-enabled': 'true' if v3 else 'false', 'is-nfsv40-enabled': 'true' if v40 else 'false', 'is-nfsv41-enabled': 'true' if v41 else 'false', 'showmount': 'true', 'is-v3-ms-dos-client-enabled': 'true', 'is-nfsv3-connection-drop-enabled': 'false', 'enable-ejukebox': 'false', } self.client.send_request.assert_called_once_with( 'nfs-service-modify', nfs_service_modify_args) def test_configure_nfs(self): fake_nfs = { 'tcp-max-xfer-size': 10000, } self.mock_object(self.client, 'send_request') self.client._configure_nfs(fake_nfs) self.client.send_request.assert_called_once_with( 'nfs-service-modify', fake_nfs) def test_create_default_nfs_export_rules(self): class CopyingMock(mock.Mock): def __call__(self, *args, **kwargs): args = copy.deepcopy(args) kwargs = copy.deepcopy(kwargs) return super(CopyingMock, self).__call__(*args, **kwargs) self.mock_object(self.client, 'send_request', CopyingMock()) self.client._create_default_nfs_export_rules() export_rule_create_args = { 'client-match': '0.0.0.0/0', 'policy-name': 'default', 'ro-rule': { 'security-flavor': 'any' }, 'rw-rule': { 'security-flavor': 'never' } } export_rule_create_args2 = export_rule_create_args.copy() export_rule_create_args2['client-match'] = '::/0' self.client.send_request.assert_has_calls([ mock.call('export-rule-create', export_rule_create_args), mock.call('export-rule-create', export_rule_create_args2)]) @ddt.data(fake.LDAP_LINUX_SECURITY_SERVICE, fake.LDAP_AD_SECURITY_SERVICE) def test_configure_ldap(self, sec_service): self.client.features.add_feature('LDAP_LDAP_SERVERS') self.mock_object(self.client, 'send_request') self.mock_object(self.client, 'configure_dns') self.client.configure_ldap(sec_service) config_name = hashlib.md5( sec_service['id'].encode("latin-1")).hexdigest() ldap_client_create_args = { 'ldap-client-config': config_name, 'tcp-port': '389', 'bind-password': sec_service['password'], } if sec_service.get('domain'): ldap_client_create_args['schema'] = 'MS-AD-BIS' ldap_client_create_args['bind-dn'] = ( sec_service['user'] + '@' + sec_service['domain']) ldap_client_create_args['ad-domain'] = sec_service['domain'] else: ldap_client_create_args['schema'] = 'RFC-2307' ldap_client_create_args['bind-dn'] = sec_service['user'] ldap_client_create_args['ldap-servers'] = [{ 'string': sec_service['server'] }] if sec_service.get('ou'): ldap_client_create_args['base-dn'] = sec_service['ou'] ldap_config_create_args = { 'client-config': config_name, 'client-enabled': 'true' } self.client.send_request.assert_has_calls([ mock.call('ldap-client-create', ldap_client_create_args), mock.call('ldap-config-create', ldap_config_create_args)]) @ddt.data({'server': None, 'domain': None}, {'server': 'fake_server', 'domain': 'fake_domain'}) @ddt.unpack def test_configure_ldap_invalid_parameters(self, server, domain): fake_ldap_sec_service = copy.deepcopy(fake.LDAP_AD_SECURITY_SERVICE) fake_ldap_sec_service['server'] = server fake_ldap_sec_service['domain'] = domain self.assertRaises(exception.NetAppException, self.client.configure_ldap, fake_ldap_sec_service) def test__enable_ldap_client_timeout(self): mock_warning_log = self.mock_object(client_cmode.LOG, 'warning') na_api_error = netapp_api.NaApiError(code=netapp_api.EAPIERROR) mock_send_request = self.mock_object( self.client, 'send_request', mock.Mock(side_effect=na_api_error)) self.assertRaises(exception.NetAppException, self.client._enable_ldap_client, 'fake_config_name', timeout=6) self.assertEqual(2, mock_send_request.call_count) self.assertEqual(2, mock_warning_log.call_count) def test_configure_active_directory(self): self.mock_object(self.client, 'send_request') self.mock_object(self.client, 'configure_dns') self.mock_object(self.client, 'configure_cifs_aes_encryption') self.mock_object(self.client, 'set_preferred_dc') self.client.configure_active_directory(fake.CIFS_SECURITY_SERVICE, fake.VSERVER_NAME, False) cifs_server = (fake.VSERVER_NAME[0:8] + '-' + fake.VSERVER_NAME[-6:]).replace('_', '-').upper() cifs_server_create_args = { 'admin-username': fake.CIFS_SECURITY_SERVICE['user'], 'admin-password': fake.CIFS_SECURITY_SERVICE['password'], 'force-account-overwrite': 'true', 'cifs-server': cifs_server, 'organizational-unit': fake.CIFS_SECURITY_SERVICE['ou'], 'domain': fake.CIFS_SECURITY_SERVICE['domain'], } self.client.configure_dns.assert_called_with( fake.CIFS_SECURITY_SERVICE) self.client.configure_cifs_aes_encryption.assert_called_with(False) self.client.set_preferred_dc.assert_called_with( fake.CIFS_SECURITY_SERVICE) self.client.send_request.assert_has_calls([ mock.call('cifs-server-create', cifs_server_create_args)]) def test_configure_active_directory_with_ad_site(self): self.mock_object(self.client, 'send_request') self.mock_object(self.client, 'configure_dns') self.mock_object(self.client, 'configure_cifs_aes_encryption') self.mock_object(self.client, 'set_preferred_dc') self.client.configure_active_directory(fake.CIFS_SECURITY_SERVICE_3, fake.VSERVER_NAME, False) cifs_server = (fake.VSERVER_NAME[0:8] + '-' + fake.VSERVER_NAME[-6:]).replace('_', '-').upper() cifs_server_create_args = { 'admin-username': fake.CIFS_SECURITY_SERVICE_3['user'], 'admin-password': fake.CIFS_SECURITY_SERVICE_3['password'], 'force-account-overwrite': 'true', 'cifs-server': cifs_server, 'organizational-unit': fake.CIFS_SECURITY_SERVICE_3['ou'], 'domain': fake.CIFS_SECURITY_SERVICE_3['domain'], 'default-site': fake.CIFS_SECURITY_SERVICE_3['default_ad_site'], } self.client.configure_dns.assert_called_with( fake.CIFS_SECURITY_SERVICE_3) self.client.configure_cifs_aes_encryption.assert_called_with(False) self.client.set_preferred_dc.assert_called_with( fake.CIFS_SECURITY_SERVICE_3) self.client.send_request.assert_has_calls([ mock.call('cifs-server-create', cifs_server_create_args)]) def test_configure_active_directory_api_error(self): self.mock_object(self.client, 'send_request', self._mock_api_error()) self.mock_object(self.client, 'configure_dns') self.assertRaises(exception.NetAppException, self.client.configure_active_directory, fake.CIFS_SECURITY_SERVICE, fake.VSERVER_NAME, False) def test_create_kerberos_realm(self): self.client.features.add_feature('KERBEROS_VSERVER') self.mock_object(self.client, 'send_request') self.client.create_kerberos_realm(fake.KERBEROS_SECURITY_SERVICE) kerberos_realm_create_args = { 'admin-server-ip': fake.KERBEROS_SECURITY_SERVICE['server'], 'admin-server-port': '749', 'clock-skew': '5', 'comment': '', 'kdc-ip': fake.KERBEROS_SECURITY_SERVICE['server'], 'kdc-port': '88', 'kdc-vendor': 'other', 'password-server-ip': fake.KERBEROS_SECURITY_SERVICE['server'], 'password-server-port': '464', 'realm': fake.KERBEROS_SECURITY_SERVICE['domain'].upper() } self.client.send_request.assert_has_calls([ mock.call('kerberos-realm-create', kerberos_realm_create_args)]) def test_create_kerberos_realm_already_present(self): self.client.features.add_feature('KERBEROS_VSERVER') self.mock_object(self.client, 'send_request', self._mock_api_error(code=netapp_api.EDUPLICATEENTRY)) self.client.create_kerberos_realm(fake.KERBEROS_SECURITY_SERVICE) kerberos_realm_create_args = { 'admin-server-ip': fake.KERBEROS_SECURITY_SERVICE['server'], 'admin-server-port': '749', 'clock-skew': '5', 'comment': '', 'kdc-ip': fake.KERBEROS_SECURITY_SERVICE['server'], 'kdc-port': '88', 'kdc-vendor': 'other', 'password-server-ip': fake.KERBEROS_SECURITY_SERVICE['server'], 'password-server-port': '464', 'realm': fake.KERBEROS_SECURITY_SERVICE['domain'].upper() } self.client.send_request.assert_has_calls([ mock.call('kerberos-realm-create', kerberos_realm_create_args)]) self.assertEqual(1, client_cmode.LOG.debug.call_count) def test_create_kerberos_realm_api_error(self): self.client.features.add_feature('KERBEROS_VSERVER') self.mock_object(self.client, 'send_request', self._mock_api_error()) self.assertRaises(exception.NetAppException, self.client.create_kerberos_realm, fake.KERBEROS_SECURITY_SERVICE) def test_update_kerberos_realm(self): self.client.features.add_feature('KERBEROS_VSERVER') self.mock_object(self.client, 'send_request') self.client.update_kerberos_realm(fake.KERBEROS_SECURITY_SERVICE) kerberos_realm_create_args = { 'admin-server-ip': fake.KERBEROS_SECURITY_SERVICE['server'], 'kdc-ip': fake.KERBEROS_SECURITY_SERVICE['server'], 'password-server-ip': fake.KERBEROS_SECURITY_SERVICE['server'], 'realm': fake.KERBEROS_SECURITY_SERVICE['domain'].upper(), } self.client.send_request.assert_has_calls([ mock.call('kerberos-realm-modify', kerberos_realm_create_args)]) def test_update_kerberos_realm_failure(self): self.client.features.add_feature('KERBEROS_VSERVER') self.mock_object(self.client, 'send_request', self._mock_api_error()) self.assertRaises(exception.NetAppException, self.client.update_kerberos_realm, fake.KERBEROS_SECURITY_SERVICE) kerberos_realm_create_args = { 'admin-server-ip': fake.KERBEROS_SECURITY_SERVICE['server'], 'kdc-ip': fake.KERBEROS_SECURITY_SERVICE['server'], 'password-server-ip': fake.KERBEROS_SECURITY_SERVICE['server'], 'realm': fake.KERBEROS_SECURITY_SERVICE['domain'].upper(), } self.client.send_request.assert_has_calls([ mock.call('kerberos-realm-modify', kerberos_realm_create_args)]) def test_configure_kerberos(self): self.client.features.add_feature('KERBEROS_VSERVER') self.mock_object(self.client, 'send_request') self.mock_object(self.client, 'configure_dns') self.mock_object(self.client, 'list_network_interfaces', mock.Mock(return_value=['lif1', 'lif2'])) self.client.configure_kerberos( fake.KERBEROS_SECURITY_SERVICE, fake.VSERVER_NAME) spn = self.client._get_kerberos_service_principal_name( fake.KERBEROS_SECURITY_SERVICE, fake.VSERVER_NAME) kerberos_config_modify_args1 = { 'admin-password': fake.KERBEROS_SECURITY_SERVICE['password'], 'admin-user-name': fake.KERBEROS_SECURITY_SERVICE['user'], 'interface-name': 'lif1', 'is-kerberos-enabled': 'true', 'service-principal-name': spn } kerberos_config_modify_args2 = { 'admin-password': fake.KERBEROS_SECURITY_SERVICE['password'], 'admin-user-name': fake.KERBEROS_SECURITY_SERVICE['user'], 'interface-name': 'lif2', 'is-kerberos-enabled': 'true', 'service-principal-name': spn } self.client.configure_dns.assert_called_with( fake.KERBEROS_SECURITY_SERVICE) self.client.send_request.assert_has_calls([ mock.call('kerberos-config-modify', kerberos_config_modify_args1), mock.call('kerberos-config-modify', kerberos_config_modify_args2)]) def test_configure_kerberos_no_network_interfaces(self): self.client.features.add_feature('KERBEROS_VSERVER') self.mock_object(self.client, 'send_request') self.mock_object(self.client, 'configure_dns') self.mock_object(self.client, 'list_network_interfaces', mock.Mock(return_value=[])) self.assertRaises(exception.NetAppException, self.client.configure_kerberos, fake.KERBEROS_SECURITY_SERVICE, fake.VSERVER_NAME) self.client.configure_dns.assert_called_with( fake.KERBEROS_SECURITY_SERVICE) def test_disable_kerberos(self): self.mock_object(self.client, 'send_request') self.mock_object(self.client, 'list_network_interfaces', mock.Mock(return_value=['lif1', 'lif2'])) self.client.disable_kerberos(fake.KERBEROS_SECURITY_SERVICE) kerberos_config_modify_args1 = { 'admin-password': fake.KERBEROS_SECURITY_SERVICE['password'], 'admin-user-name': fake.KERBEROS_SECURITY_SERVICE['user'], 'interface-name': 'lif1', 'is-kerberos-enabled': 'false', } kerberos_config_modify_args2 = { 'admin-password': fake.KERBEROS_SECURITY_SERVICE['password'], 'admin-user-name': fake.KERBEROS_SECURITY_SERVICE['user'], 'interface-name': 'lif2', 'is-kerberos-enabled': 'false', } self.client.send_request.assert_has_calls([ mock.call('kerberos-config-modify', kerberos_config_modify_args1), mock.call('kerberos-config-modify', kerberos_config_modify_args2)]) self.client.list_network_interfaces.assert_called_once() def test_disable_kerberos_already_disabled(self): self.mock_object(self.client, 'send_request', self._mock_api_error( code=netapp_api.EAPIERROR, message='Kerberos is already disabled')) self.mock_object(self.client, 'list_network_interfaces', mock.Mock(return_value=['lif1'])) self.client.disable_kerberos(fake.KERBEROS_SECURITY_SERVICE) kerberos_config_modify_args = { 'admin-password': fake.KERBEROS_SECURITY_SERVICE['password'], 'admin-user-name': fake.KERBEROS_SECURITY_SERVICE['user'], 'interface-name': 'lif1', 'is-kerberos-enabled': 'false', } self.client.send_request.assert_called_once_with( 'kerberos-config-modify', kerberos_config_modify_args) self.client.list_network_interfaces.assert_called_once() def test_is_kerberos_enabled(self): self.client.features.add_feature('KERBEROS_VSERVER') api_response = netapp_api.NaElement( fake.KERBEROS_CONFIG_GET_RESPONSE) self.mock_object(self.client, 'send_request', mock.Mock(return_value=api_response)) self.mock_object(self.client, 'get_network_interfaces', mock.Mock(return_value=[{'interface-name': 'lif1'}])) result = self.client.is_kerberos_enabled() kerberos_config_get_args = { 'interface-name': 'lif1', 'desired-attributes': { 'kerberos-config-info': { 'is-kerberos-enabled': None, } } } self.assertTrue(result) self.client.send_request.assert_called_once_with( 'kerberos-config-get', kerberos_config_get_args) self.client.get_network_interfaces.assert_called_once_with( protocols=['NFS', 'CIFS']) def test_is_kerberos_enabled_exception_raise(self): self.client.features.add_feature('KERBEROS_VSERVER') api_response = netapp_api.NaElement( fake.KERBEROS_CONFIG_GET_RESPONSE) self.mock_object(self.client, 'send_request', mock.Mock(side_effect=[api_response, netapp_api.NaApiError('foobar')])) self.mock_object(self.client, 'get_network_interfaces', mock.Mock(return_value=[{'interface-name': 'lif1'}, {'interface-name': 'lif2'}, {'interface-name': 'lif3'}])) self.assertRaises(netapp_api.NaApiError, self.client.is_kerberos_enabled) kerberos_config_get_args_lif1 = { 'interface-name': 'lif1', 'desired-attributes': { 'kerberos-config-info': { 'is-kerberos-enabled': None, } } } kerberos_config_get_args_lif2 = { 'interface-name': 'lif2', 'desired-attributes': { 'kerberos-config-info': { 'is-kerberos-enabled': None, } } } self.client.send_request.assert_has_calls([ mock.call('kerberos-config-get', kerberos_config_get_args_lif1), mock.call('kerberos-config-get', kerberos_config_get_args_lif2), ]) self.client.get_network_interfaces.assert_called_once_with( protocols=['NFS', 'CIFS']) def test_is_kerberos_enabled_exception_return_false(self): self.client.features.add_feature('KERBEROS_VSERVER') api_response = netapp_api.NaElement( fake.KERBEROS_CONFIG_GET_RESPONSE) self.mock_object( self.client, 'send_request', mock.Mock(side_effect=[api_response, netapp_api.NaApiError( message="entry doesn't exist")])) self.mock_object(self.client, 'get_network_interfaces', mock.Mock(return_value=[{'interface-name': 'lif1'}, {'interface-name': 'lif2'}, {'interface-name': 'lif3'}])) result = self.client.is_kerberos_enabled() kerberos_config_get_args_lif1 = { 'interface-name': 'lif1', 'desired-attributes': { 'kerberos-config-info': { 'is-kerberos-enabled': None, } } } kerberos_config_get_args_lif2 = { 'interface-name': 'lif2', 'desired-attributes': { 'kerberos-config-info': { 'is-kerberos-enabled': None, } } } self.assertFalse(result) self.client.send_request.assert_has_calls([ mock.call('kerberos-config-get', kerberos_config_get_args_lif1), mock.call('kerberos-config-get', kerberos_config_get_args_lif2), ]) self.client.get_network_interfaces.assert_called_once_with( protocols=['NFS', 'CIFS']) def test_get_kerberos_service_principal_name(self): spn = self.client._get_kerberos_service_principal_name( fake.KERBEROS_SECURITY_SERVICE, fake.VSERVER_NAME ) self.assertEqual(fake.KERBEROS_SERVICE_PRINCIPAL_NAME, spn) def test_configure_dns_for_active_directory(self): self.mock_object(self.client, 'send_request') self.mock_object(self.client, 'get_dns_config', mock.Mock(return_value={})) self.client.configure_dns(fake.CIFS_SECURITY_SERVICE) net_dns_create_args = { 'domains': [{'string': fake.CIFS_SECURITY_SERVICE['domain']}], 'name-servers': [{ 'ip-address': fake.CIFS_SECURITY_SERVICE['dns_ip'] }], 'dns-state': 'enabled' } self.client.send_request.assert_has_calls([ mock.call('net-dns-create', net_dns_create_args)]) def test_configure_dns_multiple_dns_ip(self): self.mock_object(self.client, 'send_request') self.mock_object(self.client, 'get_dns_config', mock.Mock(return_value={})) mock_dns_ips = ['10.0.0.1', '10.0.0.2', '10.0.0.3'] security_service = copy.deepcopy(fake.CIFS_SECURITY_SERVICE) security_service['dns_ip'] = ', '.join(mock_dns_ips) self.client.configure_dns(security_service) self.client.send_request.assert_called_once() def test_configure_dns_for_kerberos(self): self.mock_object(self.client, 'send_request') self.mock_object(self.client, 'get_dns_config', mock.Mock(return_value={})) self.client.configure_dns(fake.KERBEROS_SECURITY_SERVICE) net_dns_create_args = { 'domains': [{'string': fake.KERBEROS_SECURITY_SERVICE['domain']}], 'name-servers': [{ 'ip-address': fake.KERBEROS_SECURITY_SERVICE['dns_ip'] }], 'dns-state': 'enabled' } self.client.send_request.assert_has_calls([ mock.call('net-dns-create', net_dns_create_args)]) def test_configure_dns_already_present(self): dns_config = { 'dns-state': 'enabled', 'domains': [fake.KERBEROS_SECURITY_SERVICE['domain']], 'dns-ips': [fake.KERBEROS_SECURITY_SERVICE['dns_ip']], } self.mock_object(self.client, 'get_dns_config', mock.Mock(return_value=dns_config)) self.mock_object(self.client, 'send_request') self.client.configure_dns(fake.KERBEROS_SECURITY_SERVICE) net_dns_create_args = { 'domains': [{'string': fake.KERBEROS_SECURITY_SERVICE['domain']}], 'name-servers': [{ 'ip-address': fake.KERBEROS_SECURITY_SERVICE['dns_ip'] }], 'dns-state': 'enabled' } self.client.send_request.assert_has_calls([ mock.call('net-dns-modify', net_dns_create_args)]) def test_update_dns_configuration(self): fake_configured_dns = { 'dns-state': 'enabled', 'domains': ['fake_domain_2'], 'dns-ips': ['fake_dns_ip_2'] } self.mock_object(self.client, 'get_dns_config', mock.Mock(return_value=fake_configured_dns)) self.mock_object(self.client, 'send_request') self.client.configure_dns(fake.KERBEROS_SECURITY_SERVICE) domains = set() domains.add(fake_configured_dns['domains'][0]) domains.add(fake.KERBEROS_SECURITY_SERVICE['domain']) dns_ips = set() dns_ips.add(fake_configured_dns['dns-ips'][0]) dns_ips.add(fake.KERBEROS_SECURITY_SERVICE['dns_ip']) net_dns_create_args = { 'domains': [{'string': domain} for domain in domains], 'dns-state': 'enabled', 'name-servers': [{'ip-address': dns_ip} for dns_ip in dns_ips] } self.client.send_request.assert_has_calls([ mock.call('net-dns-modify', net_dns_create_args)]) def test_configure_dns_api_error(self): self.mock_object(self.client, 'send_request', self._mock_api_error()) self.mock_object(self.client, 'get_dns_config', mock.Mock(return_value={})) self.assertRaises(exception.NetAppException, self.client.configure_dns, fake.KERBEROS_SECURITY_SERVICE) def test_get_dns_configuration(self): api_response = netapp_api.NaElement( fake.DNS_CONFIG_GET_RESPONSE) self.mock_object(self.client, 'send_request', mock.Mock(return_value=api_response)) result = self.client.get_dns_config() expected_result = { 'dns-state': 'enabled', 'domains': ['fake_domain.com'], 'dns-ips': ['fake_dns_1', 'fake_dns_2'] } self.assertEqual(expected_result, result) self.client.send_request.assert_called_once_with('net-dns-get', {}) @ddt.data(True, False) def test_configure_cifs_aes_encryption_enable(self, specify_types): self.client.features.add_feature( 'AES_ENCRYPTION_TYPES', supported=specify_types) self.mock_object(self.client, 'send_request') self.client.configure_cifs_aes_encryption(True) if specify_types: configure_cifs_aes_encryption_args = { 'advertised-enc-types': [{'cifskrbenctypes': 'aes_128'}, {'cifskrbenctypes': 'aes_256'}] } else: configure_cifs_aes_encryption_args = { 'is-aes-encryption-enabled': 'true', } self.client.send_request.assert_called_with( 'cifs-security-modify', configure_cifs_aes_encryption_args) @ddt.data(True, False) def test_configure_cifs_aes_encryption_disable(self, specify_types): self.client.features.add_feature( 'AES_ENCRYPTION_TYPES', supported=specify_types) self.mock_object(self.client, 'send_request') self.client.configure_cifs_aes_encryption(False) if specify_types: configure_cifs_aes_encryption_args = { 'advertised-enc-types': [{'cifskrbenctypes': 'des'}, {'cifskrbenctypes': 'rc4'}] } else: configure_cifs_aes_encryption_args = { 'is-aes-encryption-enabled': 'false', } self.client.send_request.assert_called_with( 'cifs-security-modify', configure_cifs_aes_encryption_args) @ddt.data( { 'server': '', 'check_feature': False }, { 'server': ['10.0.0.2', '10.0.0.3'], 'check_feature': False }, { 'server': '10.0.0.1', 'check_feature': False }, { 'server': '10.0.0.1', 'check_feature': True } ) @ddt.unpack def test_set_preferred_dc(self, server, check_feature): if check_feature: self.client.features.add_feature('CIFS_DC_ADD_SKIP_CHECK') self.mock_object(self.client, 'send_request') security_service = copy.deepcopy(fake.CIFS_SECURITY_SERVICE) security_service['server'] = ', '.join(server) self.client.set_preferred_dc(security_service) if server == '': self.client.send_request.assert_not_called() else: preferred_dc_add_args = { 'domain': fake.CIFS_SECURITY_SERVICE['domain'], 'preferred-dc': [{'string': dc_ip} for dc_ip in server] } if check_feature: preferred_dc_add_args['skip-config-validation'] = 'false' self.client.send_request.assert_has_calls([ mock.call('cifs-domain-preferred-dc-add', preferred_dc_add_args)]) def test_set_preferred_dc_api_error(self): self.mock_object(self.client, 'send_request', self._mock_api_error()) security_service = copy.deepcopy(fake.CIFS_SECURITY_SERVICE) security_service['server'] = 'fake_server' self.assertRaises(exception.NetAppException, self.client.set_preferred_dc, security_service) def test_remove_preferred_dcs(self): self.mock_object(self.client, 'send_request') security_service = copy.deepcopy(fake.CIFS_SECURITY_SERVICE) self.client.remove_preferred_dcs(security_service) preferred_dc_add_args = { 'domain': security_service['domain'], } self.client.send_request.assert_has_calls([ mock.call('cifs-domain-preferred-dc-remove', preferred_dc_add_args)]) def test_remove_preferred_dcs_error(self): self.mock_object(self.client, 'send_request', self._mock_api_error()) security_service = copy.deepcopy(fake.CIFS_SECURITY_SERVICE) self.assertRaises(exception.NetAppException, self.client.remove_preferred_dcs, security_service) preferred_dc_add_args = { 'domain': security_service['domain'], } self.client.send_request.assert_has_calls([ mock.call('cifs-domain-preferred-dc-remove', preferred_dc_add_args)]) @ddt.data(True, False) def test_create_volume(self, set_max_files): self.client.features.add_feature('ADAPTIVE_QOS') self.mock_object(self.client, 'set_volume_max_files') self.mock_object(self.client, 'send_request') self.mock_object(self.client, 'update_volume_efficiency_attributes') self.mock_object( self.client, '_get_create_volume_api_args', mock.Mock(return_value={})) options = {'efficiency_policy': fake.VOLUME_EFFICIENCY_POLICY_NAME} self.client.create_volume( fake.SHARE_AGGREGATE_NAME, fake.SHARE_NAME, 100, max_files=fake.MAX_FILES if set_max_files else None, **options ) volume_create_args = { 'containing-aggr-name': fake.SHARE_AGGREGATE_NAME, 'size': '100g', 'volume': fake.SHARE_NAME, } self.client._get_create_volume_api_args.assert_called_once_with( fake.SHARE_NAME, False, None, None, None, 'rw', None, False, None, None, None) self.client.send_request.assert_called_with('volume-create', volume_create_args) ( self.client.update_volume_efficiency_attributes. assert_called_once_with (fake.SHARE_NAME, False, False, efficiency_policy=fake.VOLUME_EFFICIENCY_POLICY_NAME) ) if set_max_files: self.client.set_volume_max_files.assert_called_once_with( fake.SHARE_NAME, fake.MAX_FILES) else: self.client.set_volume_max_files.assert_not_called() @ddt.data(True, False) def test_create_volume_thin_provisioned(self, thin_provisioned): self.mock_object(self.client, 'send_request') self.mock_object(self.client, 'update_volume_efficiency_attributes') self.client.create_volume( fake.SHARE_AGGREGATE_NAME, fake.SHARE_NAME, 100, thin_provisioned=thin_provisioned) volume_create_args = { 'containing-aggr-name': fake.SHARE_AGGREGATE_NAME, 'size': '100g', 'volume': fake.SHARE_NAME, 'volume-type': 'rw', 'junction-path': '/%s' % fake.SHARE_NAME, 'space-reserve': ('none' if thin_provisioned else 'volume'), 'encrypt': 'false' } self.client.send_request.assert_called_once_with('volume-create', volume_create_args) @ddt.data("compliance", "enterprise") def test_create_volume_snaplock_type(self, snaplock_type): self.mock_object(self.client, 'send_request') self.mock_object(self.client, 'update_volume_efficiency_attributes') self.mock_object(self.client, 'set_snaplock_attributes') self.client.create_volume( fake.SHARE_AGGREGATE_NAME, fake.SHARE_NAME, 100, snaplock_type=snaplock_type) volume_create_args = { 'containing-aggr-name': fake.SHARE_AGGREGATE_NAME, 'size': '100g', 'volume': fake.SHARE_NAME, 'volume-type': 'rw', 'junction-path': '/%s' % fake.SHARE_NAME, 'space-reserve': 'volume', 'encrypt': 'false', 'snaplock-type': snaplock_type, } self.client.send_request.assert_called_once_with('volume-create', volume_create_args) def test_create_volume_adaptive_not_supported(self): self.client.features.add_feature('ADAPTIVE_QOS', supported=False) self.mock_object(self.client, 'send_request') self.assertRaises(exception.NetAppException, self.client.create_volume, fake.SHARE_AGGREGATE_NAME, fake.SHARE_NAME, 100, adaptive_qos_policy_group='fake') self.client.send_request.assert_not_called() @ddt.data(True, False) def test_create_volume_async(self, auto_provisioned): api_response = netapp_api.NaElement(fake.ASYNC_OPERATION_RESPONSE) self.mock_object(self.client, 'send_request', mock.Mock(return_value=api_response)) self.mock_object( self.client, '_get_create_volume_api_args', mock.Mock(return_value={})) result = self.client.create_volume_async( [fake.SHARE_AGGREGATE_NAME], fake.SHARE_NAME, 1, auto_provisioned=auto_provisioned) volume_create_args = { 'size': 1073741824, 'volume-name': fake.SHARE_NAME, } if auto_provisioned: volume_create_args['auto-provision-as'] = 'flexgroup' else: volume_create_args['aggr-list'] = [ {'aggr-name': fake.SHARE_AGGREGATE_NAME}] expected_result = { 'jobid': '123', 'error-code': None, 'error-message': None, } self.client._get_create_volume_api_args.assert_called_once_with( fake.SHARE_NAME, False, None, None, None, 'rw', None, False, None, None, None) self.client.send_request.assert_called_with('volume-create-async', volume_create_args) self.assertEqual(expected_result, result) def test_create_volume_async_adaptive_not_supported(self): self.client.features.add_feature('ADAPTIVE_QOS', supported=False) self.mock_object(self.client, 'send_request') self.assertRaises(exception.NetAppException, self.client.create_volume_async, [fake.SHARE_AGGREGATE_NAME], fake.SHARE_NAME, 100, adaptive_qos_policy_group='fake') self.client.send_request.assert_not_called() def test_get_create_volume_api_args_with_mount_point_name(self): self.client.features.add_feature('FLEXVOL_ENCRYPTION') volume_type = 'rw' thin_provisioned = False snapshot_policy = 'default' language = 'en-US' reserve = 15 qos_name = 'fake_qos' encrypt = True qos_adaptive_name = 'fake_adaptive_qos' mount_point_name = 'fake_mp' result_api_args = self.client._get_create_volume_api_args( fake.SHARE_NAME, thin_provisioned, snapshot_policy, language, reserve, volume_type, qos_name, encrypt, qos_adaptive_name, mount_point_name) expected_api_args = { 'volume-type': volume_type, 'junction-path': '/fake_mp', 'space-reserve': 'volume', 'snapshot-policy': snapshot_policy, 'language-code': language, 'percentage-snapshot-reserve': str(reserve), 'qos-policy-group-name': qos_name, 'qos-adaptive-policy-group-name': qos_adaptive_name, 'encrypt': 'true', } self.assertEqual(expected_api_args, result_api_args) def test_get_create_volume_api_args_with_extra_specs(self): self.client.features.add_feature('FLEXVOL_ENCRYPTION') volume_type = 'rw' thin_provisioned = False snapshot_policy = 'default' language = 'en-US' reserve = 15 qos_name = 'fake_qos' encrypt = True qos_adaptive_name = 'fake_adaptive_qos' result_api_args = self.client._get_create_volume_api_args( fake.SHARE_NAME, thin_provisioned, snapshot_policy, language, reserve, volume_type, qos_name, encrypt, qos_adaptive_name) expected_api_args = { 'volume-type': volume_type, 'junction-path': '/fake_share', 'space-reserve': 'volume', 'snapshot-policy': snapshot_policy, 'language-code': language, 'percentage-snapshot-reserve': str(reserve), 'qos-policy-group-name': qos_name, 'qos-adaptive-policy-group-name': qos_adaptive_name, 'encrypt': 'true', } self.assertEqual(expected_api_args, result_api_args) def test_get_create_volume_api_args_no_extra_specs(self): self.client.features.add_feature('FLEXVOL_ENCRYPTION') volume_type = 'dp' thin_provisioned = False snapshot_policy = None language = None reserve = None qos_name = None encrypt = False qos_adaptive_name = None result_api_args = self.client._get_create_volume_api_args( fake.SHARE_NAME, thin_provisioned, snapshot_policy, language, reserve, volume_type, qos_name, encrypt, qos_adaptive_name) expected_api_args = { 'volume-type': volume_type, 'space-reserve': 'volume', 'encrypt': 'false' } self.assertEqual(expected_api_args, result_api_args) def test_get_create_volume_api_args_encrypted_not_supported(self): encrypt = True self.assertRaises(exception.NetAppException, self.client._get_create_volume_api_args, fake.SHARE_NAME, True, 'default', 'en-US', 15, 'rw', 'fake_qos', encrypt, 'fake_qos_adaptive') def test_is_flexvol_encrypted_unsupported(self): self.client.features.add_feature('FLEXVOL_ENCRYPTION', supported=False) result = self.client.is_flexvol_encrypted(fake.SHARE_NAME, fake.VSERVER_NAME) self.assertFalse(result) def test_is_flexvol_encrypted_no_records_found(self): api_response = netapp_api.NaElement(fake.NO_RECORDS_RESPONSE) self.mock_object(self.client, 'send_iter_request', mock.Mock(return_value=api_response)) result = self.client.is_flexvol_encrypted(fake.SHARE_NAME, fake.VSERVER_NAME) self.assertFalse(result) def test_is_flexvol_encrypted(self): self.client.features.add_feature('FLEXVOL_ENCRYPTION', supported=True) api_response = netapp_api.NaElement( fake.GET_VOLUME_FOR_ENCRYPTED_RESPONSE) self.mock_object(self.client, 'send_iter_request', mock.Mock(return_value=api_response)) result = self.client.is_flexvol_encrypted(fake.SHARE_NAME, fake.VSERVER_NAME) volume_get_iter_args = { 'query': { 'volume-attributes': { 'encrypt': 'true', 'volume-id-attributes': { 'name': fake.SHARE_NAME, 'owning-vserver-name': fake.VSERVER_NAME, } } }, 'desired-attributes': { 'volume-attributes': { 'encrypt': None, } } } self.client.send_iter_request.assert_called_once_with( 'volume-get-iter', volume_get_iter_args) self.assertTrue(result) def test_is_flexvol_encrypted_8_x_system_version_response(self): self.client.features.add_feature('FLEXVOL_ENCRYPTION', supported=True) api_response = netapp_api.NaElement( fake.GET_VOLUME_FOR_ENCRYPTED_OLD_SYS_VERSION_RESPONSE) self.mock_object(self.client, 'send_iter_request', mock.Mock(return_value=api_response)) result = self.client.is_flexvol_encrypted(fake.SHARE_NAME, fake.VSERVER_NAME) volume_get_iter_args = { 'query': { 'volume-attributes': { 'encrypt': 'true', 'volume-id-attributes': { 'name': fake.SHARE_NAME, 'owning-vserver-name': fake.VSERVER_NAME, } } }, 'desired-attributes': { 'volume-attributes': { 'encrypt': None, } } } self.client.send_iter_request.assert_called_once_with( 'volume-get-iter', volume_get_iter_args) self.assertFalse(result) def test_update_volume_snapshot_policy(self): self.mock_object(self.client, 'send_request') self.client.update_volume_snapshot_policy(fake.SHARE_NAME, fake.SNAPSHOT_POLICY_NAME) volume_modify_iter_api_args = { 'query': { 'volume-attributes': { 'volume-id-attributes': { 'name': fake.SHARE_NAME, }, }, }, 'attributes': { 'volume-attributes': { 'volume-snapshot-attributes': { 'snapshot-policy': fake.SNAPSHOT_POLICY_NAME, }, }, }, } self.client.send_request.assert_called_once_with( 'volume-modify-iter', volume_modify_iter_api_args) def test_enable_dedup(self): self.mock_object(self.client, 'send_request') self.client.enable_dedup(fake.SHARE_NAME) sis_enable_args = {'path': '/vol/%s' % fake.SHARE_NAME} self.client.send_request.assert_called_once_with('sis-enable', sis_enable_args) def test_enable_dedup_already_enabled(self): side_effect = netapp_api.NaApiError( code=netapp_api.OPERATION_ALREADY_ENABLED, message='It has already been enabled') self.mock_object(self.client, 'send_request', mock.Mock(side_effect=side_effect)) self.client.enable_dedup(fake.SHARE_NAME) sis_enable_args = {'path': '/vol/%s' % fake.SHARE_NAME} self.client.send_request.assert_called_once_with('sis-enable', sis_enable_args) def test_enable_dedup_currently_active(self): side_effect = netapp_api.NaApiError( code=netapp_api.OPERATION_ALREADY_ENABLED, message='The sis operation is currently active') self.mock_object(self.client, 'send_request', mock.Mock(side_effect=side_effect)) self.assertRaises(exception.NetAppException, self.client.enable_dedup, fake.SHARE_NAME) def test_disable_dedup(self): self.mock_object(self.client, 'send_request') self.client.disable_dedup(fake.SHARE_NAME) sis_disable_args = {'path': '/vol/%s' % fake.SHARE_NAME} self.client.send_request.assert_called_once_with('sis-disable', sis_disable_args) def test_disable_dedup_currently_active(self): side_effect = netapp_api.NaApiError( code=netapp_api.OPERATION_ALREADY_ENABLED, message='The sis operation is currently active') self.mock_object(self.client, 'send_request', mock.Mock(side_effect=side_effect)) self.assertRaises(exception.NetAppException, self.client.disable_dedup, fake.SHARE_NAME) def test_enable_compression(self): self.mock_object(self.client, 'send_request') self.client.enable_compression(fake.SHARE_NAME) sis_set_config_args = { 'path': '/vol/%s' % fake.SHARE_NAME, 'enable-compression': 'true' } self.client.send_request.assert_called_once_with('sis-set-config', sis_set_config_args) def test_disable_compression(self): self.mock_object(self.client, 'send_request') self.client.disable_compression(fake.SHARE_NAME) sis_set_config_args = { 'path': '/vol/%s' % fake.SHARE_NAME, 'enable-compression': 'false' } self.client.send_request.assert_called_once_with('sis-set-config', sis_set_config_args) def test_enable_dedupe_async(self): self.mock_object(self.client, 'send_request') self.client.enable_dedupe_async(fake.SHARE_NAME) sis_enable_args = {'volume-name': fake.SHARE_NAME} self.client.send_request.assert_called_once_with( 'sis-enable-async', sis_enable_args) def test_disable_dedupe_async(self): self.mock_object(self.client, 'send_request') self.client.disable_dedupe_async(fake.SHARE_NAME) sis_enable_args = {'volume-name': fake.SHARE_NAME} self.client.send_request.assert_called_once_with( 'sis-disable-async', sis_enable_args) def test_enable_compression_async(self): self.mock_object(self.client, 'send_request') self.client.enable_compression_async(fake.SHARE_NAME) sis_set_config_args = { 'volume-name': fake.SHARE_NAME, 'enable-compression': 'true' } self.client.send_request.assert_called_once_with( 'sis-set-config-async', sis_set_config_args) def test_disable_compression_async(self): self.mock_object(self.client, 'send_request') self.client.disable_compression_async(fake.SHARE_NAME) sis_set_config_args = { 'volume-name': fake.SHARE_NAME, 'enable-compression': 'false' } self.client.send_request.assert_called_once_with( 'sis-set-config-async', sis_set_config_args) def test_apply_volume_efficiency_policy_with_policy(self): self.mock_object(self.client, 'send_request') self.client.apply_volume_efficiency_policy( fake.SHARE_NAME, fake.VOLUME_EFFICIENCY_POLICY_NAME ) volume_efficiency_config_args = { 'path': '/vol/%s' % fake.SHARE_NAME, 'policy-name': fake.VOLUME_EFFICIENCY_POLICY_NAME } self.client.send_request.assert_called_once_with( 'sis-set-config', volume_efficiency_config_args) def test_apply_volume_efficiency_policy_without_policy(self): self.mock_object(self.client, 'send_request') self.client.apply_volume_efficiency_policy( fake.SHARE_NAME, None ) self.client.send_request.assert_not_called() def test_apply_volume_efficiency_policy_async_with_policy(self): self.mock_object(self.client.connection, 'send_request') self.client.apply_volume_efficiency_policy_async( fake.SHARE_NAME, fake.VOLUME_EFFICIENCY_POLICY_NAME ) volume_efficiency_config_args = { 'path': '/vol/%s' % fake.SHARE_NAME, 'policy-name': fake.VOLUME_EFFICIENCY_POLICY_NAME } self.client.connection.send_request.assert_called_once_with( 'sis-set-config-async', volume_efficiency_config_args) def test_apply_volume_efficiency_policy_async_without_policy(self): self.mock_object(self.client.connection, 'send_request') self.client.apply_volume_efficiency_policy_async( fake.SHARE_NAME ) self.client.connection.send_request.assert_not_called() def test_get_volume_efficiency_status(self): api_response = netapp_api.NaElement(fake.SIS_GET_ITER_RESPONSE) self.mock_object(self.client, 'send_iter_request', mock.Mock(return_value=api_response)) result = self.client.get_volume_efficiency_status(fake.SHARE_NAME) sis_get_iter_args = { 'query': { 'sis-status-info': { 'path': '/vol/%s' % fake.SHARE_NAME, }, }, 'desired-attributes': { 'sis-status-info': { 'state': None, 'is-compression-enabled': None, }, }, } self.client.send_iter_request.assert_has_calls([ mock.call('sis-get-iter', sis_get_iter_args)]) expected = {'dedupe': True, 'compression': True} self.assertDictEqual(expected, result) def test_get_volume_efficiency_status_not_found(self): api_response = netapp_api.NaElement(fake.NO_RECORDS_RESPONSE) self.mock_object(self.client, 'send_iter_request', mock.Mock(return_value=api_response)) result = self.client.get_volume_efficiency_status(fake.SHARE_NAME) expected = {'dedupe': False, 'compression': False} self.assertDictEqual(expected, result) def test_set_volume_max_files(self): api_response = netapp_api.NaElement(fake.VOLUME_MODIFY_ITER_RESPONSE) self.mock_object(self.client, 'send_request', mock.Mock(return_value=api_response)) self.client.set_volume_max_files(fake.SHARE_NAME, fake.MAX_FILES) volume_modify_iter_api_args = { 'query': { 'volume-attributes': { 'volume-id-attributes': { 'name': fake.SHARE_NAME, }, }, }, 'attributes': { 'volume-attributes': { 'volume-inode-attributes': { 'files-total': fake.MAX_FILES, }, }, }, } self.client.send_request.assert_called_once_with( 'volume-modify-iter', volume_modify_iter_api_args) def test_set_volume_name(self): self.mock_object(self.client, 'send_request') self.client.set_volume_name(fake.SHARE_NAME, 'new_name') volume_rename_api_args = { 'volume': fake.SHARE_NAME, 'new-volume-name': 'new_name', } self.client.send_request.assert_called_once_with( 'volume-rename', volume_rename_api_args) def test_rename_vserver(self): vserver_api_args = { 'vserver-name': fake.VSERVER_NAME, 'new-name': fake.VSERVER_NAME_2, } self.mock_object(self.client, 'send_request') self.client.rename_vserver(fake.VSERVER_NAME, fake.VSERVER_NAME_2) self.client.send_request.assert_called_once_with( 'vserver-rename', vserver_api_args ) @ddt.data(True, False) def test_modify_volume_no_optional_args(self, is_flexgroup): self.mock_object(self.client, 'send_request') mock_update_volume_efficiency_attributes = self.mock_object( self.client, 'update_volume_efficiency_attributes') self.mock_object(self.client, '_is_snaplock_enabled_volume', mock.Mock(return_value=True)) aggr = fake.SHARE_AGGREGATE_NAME if is_flexgroup: aggr = list(fake.SHARE_AGGREGATE_NAMES) self.client.modify_volume(aggr, fake.SHARE_NAME) volume_modify_iter_api_args = { 'query': { 'volume-attributes': { 'volume-id-attributes': { 'name': fake.SHARE_NAME, }, }, }, 'attributes': { 'volume-attributes': { 'volume-inode-attributes': {}, 'volume-language-attributes': {}, 'volume-snapshot-attributes': {}, 'volume-space-attributes': { 'space-guarantee': 'volume', }, 'volume-autosize-attributes': {}, }, }, } if is_flexgroup: volume_modify_iter_api_args['query']['volume-attributes'][ 'volume-id-attributes']['aggr-list'] = [ {'aggr-name': aggr[0]}, {'aggr-name': aggr[1]}] else: volume_modify_iter_api_args['query']['volume-attributes'][ 'volume-id-attributes'][ 'containing-aggregate-name'] = aggr self.client.send_request.assert_called_once_with( 'volume-modify-iter', volume_modify_iter_api_args) mock_update_volume_efficiency_attributes.assert_called_once_with( fake.SHARE_NAME, False, False, is_flexgroup=is_flexgroup, efficiency_policy=None ) @ddt.data((fake.QOS_POLICY_GROUP_NAME, None), (None, fake.ADAPTIVE_QOS_POLICY_GROUP_NAME)) @ddt.unpack def test_modify_volume_all_optional_args(self, qos_group, adaptive_qos_group): self.client.features.add_feature('ADAPTIVE_QOS') self.mock_object(self.client, 'send_request') mock_update_volume_efficiency_attributes = self.mock_object( self.client, 'update_volume_efficiency_attributes') options = {'efficiency_policy': fake.VOLUME_EFFICIENCY_POLICY_NAME} self.mock_object(self.client, '_is_snaplock_enabled_volume', mock.Mock(return_value=True)) self.client.modify_volume( fake.SHARE_AGGREGATE_NAME, fake.SHARE_NAME, thin_provisioned=True, snapshot_policy=fake.SNAPSHOT_POLICY_NAME, language=fake.LANGUAGE, dedup_enabled=True, compression_enabled=False, max_files=fake.MAX_FILES, qos_policy_group=qos_group, adaptive_qos_policy_group=adaptive_qos_group, autosize_attributes=fake.VOLUME_AUTOSIZE_ATTRS, hide_snapdir=True, **options ) volume_modify_iter_api_args = { 'query': { 'volume-attributes': { 'volume-id-attributes': { 'containing-aggregate-name': fake.SHARE_AGGREGATE_NAME, 'name': fake.SHARE_NAME, }, }, }, 'attributes': { 'volume-attributes': { 'volume-inode-attributes': { 'files-total': fake.MAX_FILES, }, 'volume-language-attributes': { 'language': fake.LANGUAGE, }, 'volume-snapshot-attributes': { 'snapshot-policy': fake.SNAPSHOT_POLICY_NAME, 'snapdir-access-enabled': 'false' }, 'volume-space-attributes': { 'space-guarantee': 'none', }, 'volume-autosize-attributes': fake.VOLUME_AUTOSIZE_ATTRS, }, }, } if qos_group: qos_update = { 'volume-qos-attributes': { 'policy-group-name': qos_group, }, } volume_modify_iter_api_args[ 'attributes']['volume-attributes'].update(qos_update) if adaptive_qos_group: qos_update = { 'volume-qos-attributes': { 'adaptive-policy-group-name': adaptive_qos_group, }, } volume_modify_iter_api_args[ 'attributes']['volume-attributes'].update(qos_update) self.client.send_request.assert_called_once_with( 'volume-modify-iter', volume_modify_iter_api_args) mock_update_volume_efficiency_attributes.assert_called_once_with( fake.SHARE_NAME, True, False, is_flexgroup=False, efficiency_policy=fake.VOLUME_EFFICIENCY_POLICY_NAME ) @ddt.data( {'existing': (True, True), 'desired': (True, True), 'fg': False, 'efficiency_policy': fake.VOLUME_EFFICIENCY_POLICY_NAME}, {'existing': (True, True), 'desired': (False, False), 'fg': False, 'efficiency_policy': fake.VOLUME_EFFICIENCY_POLICY_NAME}, {'existing': (True, True), 'desired': (True, False), 'fg': False, 'efficiency_policy': fake.VOLUME_EFFICIENCY_POLICY_NAME}, {'existing': (True, False), 'desired': (True, False), 'fg': False, 'efficiency_policy': fake.VOLUME_EFFICIENCY_POLICY_NAME}, {'existing': (True, False), 'desired': (False, False), 'fg': False, 'efficiency_policy': fake.VOLUME_EFFICIENCY_POLICY_NAME}, {'existing': (True, False), 'desired': (True, True), 'fg': False, 'efficiency_policy': fake.VOLUME_EFFICIENCY_POLICY_NAME}, {'existing': (False, False), 'desired': (False, False), 'fg': False, 'efficiency_policy': fake.VOLUME_EFFICIENCY_POLICY_NAME}, {'existing': (False, False), 'desired': (True, False), 'fg': False, 'efficiency_policy': fake.VOLUME_EFFICIENCY_POLICY_NAME}, {'existing': (False, False), 'desired': (True, True), 'fg': False, 'efficiency_policy': fake.VOLUME_EFFICIENCY_POLICY_NAME}, {'existing': (True, True), 'desired': (True, True), 'fg': True, 'efficiency_policy': fake.VOLUME_EFFICIENCY_POLICY_NAME}, {'existing': (True, True), 'desired': (False, False), 'fg': True, 'efficiency_policy': fake.VOLUME_EFFICIENCY_POLICY_NAME}, {'existing': (True, True), 'desired': (True, False), 'fg': True, 'efficiency_policy': fake.VOLUME_EFFICIENCY_POLICY_NAME}, {'existing': (True, False), 'desired': (True, False), 'fg': True, 'efficiency_policy': fake.VOLUME_EFFICIENCY_POLICY_NAME}, {'existing': (True, False), 'desired': (False, False), 'fg': True, 'efficiency_policy': fake.VOLUME_EFFICIENCY_POLICY_NAME}, {'existing': (True, False), 'desired': (True, True), 'fg': True, 'efficiency_policy': fake.VOLUME_EFFICIENCY_POLICY_NAME}, {'existing': (False, False), 'desired': (False, False), 'fg': True, 'efficiency_policy': fake.VOLUME_EFFICIENCY_POLICY_NAME}, {'existing': (False, False), 'desired': (True, False), 'fg': True, 'efficiency_policy': fake.VOLUME_EFFICIENCY_POLICY_NAME}, {'existing': (False, False), 'desired': (True, True), 'fg': True, 'efficiency_policy': fake.VOLUME_EFFICIENCY_POLICY_NAME}, ) @ddt.unpack def test_update_volume_efficiency_attributes(self, existing, desired, fg, efficiency_policy): existing_dedupe = existing[0] existing_compression = existing[1] desired_dedupe = desired[0] desired_compression = desired[1] self.mock_object( self.client, 'get_volume_efficiency_status', mock.Mock(return_value={'dedupe': existing_dedupe, 'compression': existing_compression})) mock_enable_compression = self.mock_object(self.client, 'enable_compression') mock_enable_compression_async = self.mock_object( self.client, 'enable_compression_async') mock_disable_compression = self.mock_object(self.client, 'disable_compression') mock_disable_compression_async = self.mock_object( self.client, 'disable_compression_async') mock_enable_dedup = self.mock_object(self.client, 'enable_dedup') mock_enable_dedup_async = self.mock_object(self.client, 'enable_dedupe_async') mock_disable_dedup = self.mock_object(self.client, 'disable_dedup') mock_disable_dedup_async = self.mock_object(self.client, 'disable_dedupe_async') mock_apply_volume_efficiency_policy = ( self.mock_object(self.client, 'apply_volume_efficiency_policy')) mock_apply_volume_efficiency_policy_async = ( self.mock_object(self.client, 'apply_volume_efficiency_policy_async' ) ) self.client.update_volume_efficiency_attributes( fake.SHARE_NAME, desired_dedupe, desired_compression, is_flexgroup=fg, efficiency_policy=efficiency_policy) if existing_dedupe == desired_dedupe: if fg: self.assertFalse(mock_enable_dedup_async.called) self.assertFalse(mock_disable_dedup_async.called) else: self.assertFalse(mock_enable_dedup.called) self.assertFalse(mock_disable_dedup.called) elif existing_dedupe and not desired_dedupe: if fg: self.assertFalse(mock_enable_dedup_async.called) self.assertTrue(mock_disable_dedup_async.called) else: self.assertFalse(mock_enable_dedup.called) self.assertTrue(mock_disable_dedup.called) elif not existing_dedupe and desired_dedupe: if fg: self.assertTrue(mock_enable_dedup_async.called) self.assertFalse(mock_disable_dedup_async.called) else: self.assertTrue(mock_enable_dedup.called) self.assertFalse(mock_disable_dedup.called) if existing_compression == desired_compression: if fg: self.assertFalse(mock_enable_compression_async.called) self.assertFalse(mock_disable_compression_async.called) else: self.assertFalse(mock_enable_compression.called) self.assertFalse(mock_disable_compression.called) elif existing_compression and not desired_compression: if fg: self.assertFalse(mock_enable_compression_async.called) self.assertTrue(mock_disable_compression_async.called) else: self.assertFalse(mock_enable_compression.called) self.assertTrue(mock_disable_compression.called) elif not existing_compression and desired_compression: if fg: self.assertTrue(mock_enable_compression_async.called) self.assertFalse(mock_disable_compression_async.called) else: self.assertTrue(mock_enable_compression.called) self.assertFalse(mock_disable_compression.called) if fg: self.assertTrue(mock_apply_volume_efficiency_policy_async.called) else: self.assertTrue(mock_apply_volume_efficiency_policy.called) def test_set_volume_size(self): api_response = netapp_api.NaElement(fake.VOLUME_MODIFY_ITER_RESPONSE) self.mock_object(self.client, 'send_request', mock.Mock(return_value=api_response)) self.client.set_volume_size(fake.SHARE_NAME, 10) volume_modify_iter_args = { 'query': { 'volume-attributes': { 'volume-id-attributes': { 'name': fake.SHARE_NAME } } }, 'attributes': { 'volume-attributes': { 'volume-space-attributes': { 'size': 10737418240, }, }, }, } self.client.send_request.assert_has_calls([ mock.call('volume-modify-iter', volume_modify_iter_args)]) @ddt.data(True, False) def test_set_volume_snapdir_access(self, hide_snapdir): api_response = netapp_api.NaElement( fake.VOLUME_MODIFY_ITER_RESPONSE) self.mock_object(self.client, 'send_request', mock.Mock(return_value=api_response)) self.client.set_volume_snapdir_access(fake.SHARE_NAME, hide_snapdir) api_args = { 'query': { 'volume-attributes': { 'volume-id-attributes': { 'name': fake.SHARE_NAME } } }, 'attributes': { 'volume-attributes': { 'volume-snapshot-attributes': { 'snapdir-access-enabled': str( not hide_snapdir).lower(), }, }, }, } self.client.send_request.assert_called_once_with( 'volume-modify-iter', api_args) def test_set_volume_snapdir_access_api_error(self): api_response = netapp_api.NaElement( fake.VOLUME_MODIFY_ITER_ERROR_RESPONSE) self.mock_object(self.client, 'send_request', mock.Mock(return_value=api_response)) self.assertRaises(netapp_api.NaApiError, self.client.set_volume_size, fake.SHARE_NAME, 10) @ddt.data(True, False) def test_set_volume_filesys_size_fixed(self, filesys_size_fixed): api_response = netapp_api.NaElement( fake.VOLUME_MODIFY_ITER_RESPONSE) self.mock_object(self.client, 'send_request', mock.Mock(return_value=api_response)) self.client.set_volume_filesys_size_fixed(fake.SHARE_NAME, filesys_size_fixed) api_args = { 'query': { 'volume-attributes': { 'volume-id-attributes': { 'name': fake.SHARE_NAME } } }, 'attributes': { 'volume-attributes': { 'volume-space-attributes': { 'is-filesys-size-fixed': str( filesys_size_fixed).lower(), }, }, }, } self.client.send_request.assert_called_once_with( 'volume-modify-iter', api_args) def test_set_volume_size_api_error(self): api_response = netapp_api.NaElement( fake.VOLUME_MODIFY_ITER_ERROR_RESPONSE) self.mock_object(self.client, 'send_request', mock.Mock(return_value=api_response)) self.assertRaises(netapp_api.NaApiError, self.client.set_volume_size, fake.SHARE_NAME, 10) @ddt.data(None, 'ntfs') def test_set_volume_security_style(self, security_style): api_response = netapp_api.NaElement(fake.VOLUME_MODIFY_ITER_RESPONSE) self.mock_object(self.client, 'send_request', mock.Mock(return_value=api_response)) kwargs = {'security_style': security_style} if security_style else {} self.client.set_volume_security_style(fake.SHARE_NAME, **kwargs) volume_modify_iter_args = { 'query': { 'volume-attributes': { 'volume-id-attributes': { 'name': fake.SHARE_NAME } } }, 'attributes': { 'volume-attributes': { 'volume-security-attributes': { 'style': security_style or 'unix', }, }, }, } self.client.send_request.assert_called_once_with( 'volume-modify-iter', volume_modify_iter_args) def test_set_volume_security_style_api_error(self): api_response = netapp_api.NaElement( fake.VOLUME_MODIFY_ITER_ERROR_RESPONSE) self.mock_object(self.client, 'send_request', mock.Mock(return_value=api_response)) self.assertRaises(netapp_api.NaApiError, self.client.set_volume_security_style, fake.SHARE_NAME, 'ntfs') def test_volume_exists(self): api_response = netapp_api.NaElement(fake.VOLUME_GET_NAME_RESPONSE) self.mock_object(self.client, 'send_iter_request', mock.Mock(return_value=api_response)) result = self.client.volume_exists(fake.SHARE_NAME) volume_get_iter_args = { 'query': { 'volume-attributes': { 'volume-id-attributes': { 'name': fake.SHARE_NAME } } }, 'desired-attributes': { 'volume-attributes': { 'volume-id-attributes': { 'name': None } } } } self.client.send_iter_request.assert_has_calls([ mock.call('volume-get-iter', volume_get_iter_args)]) self.assertTrue(result) def test_volume_exists_not_found(self): api_response = netapp_api.NaElement(fake.NO_RECORDS_RESPONSE) self.mock_object(self.client, 'send_request', mock.Mock(return_value=api_response)) self.assertFalse(self.client.volume_exists(fake.SHARE_NAME)) def test_snapshot_exists(self): api_response = netapp_api.NaElement(fake.VOLUME_GET_NAME_RESPONSE) self.mock_object(self.client, 'send_request', mock.Mock(return_value=api_response)) result = self.client.snapshot_exists(fake.SNAPSHOT_NAME, fake.SHARE_NAME) snapshot_get_iter_args = { 'query': { 'snapshot-info': { 'name': fake.SNAPSHOT_NAME, 'volume': fake.SHARE_NAME, } }, 'desired-attributes': { 'snapshot-info': { 'name': None, 'volume': None, 'busy': None, 'snapshot-owners-list': { 'snapshot-owner': None, } } } } self.client.send_request.assert_has_calls([ mock.call('snapshot-get-iter', snapshot_get_iter_args)]) self.assertTrue(result) def test_snapshot_exists_not_found(self): api_response = netapp_api.NaElement(fake.NO_RECORDS_RESPONSE) self.mock_object(self.client, 'send_request', mock.Mock(return_value=api_response)) self.assertFalse(self.client.snapshot_exists(fake.SNAPSHOT_NAME, fake.SHARE_NAME)) @ddt.data({ 'api_response_xml': fake.SNAPSHOT_GET_ITER_UNAVAILABLE_RESPONSE, 'raised_exception': exception.SnapshotUnavailable, }, { 'api_response_xml': fake.SNAPSHOT_GET_ITER_OTHER_ERROR_RESPONSE, 'raised_exception': exception.NetAppException, }) @ddt.unpack def test_snapshot_exists_error(self, api_response_xml, raised_exception): api_response = netapp_api.NaElement(api_response_xml) self.mock_object(self.client, 'send_request', mock.Mock(return_value=api_response)) self.assertRaises(raised_exception, self.client.snapshot_exists, fake.SNAPSHOT_NAME, fake.SHARE_NAME) @ddt.data(True, False) def test_get_aggregate_for_volume(self, is_flexgroup): api_response = netapp_api.NaElement( fake.GET_AGGREGATE_FOR_FLEXGROUP_VOL_RESPONSE if is_flexgroup else fake.GET_AGGREGATE_FOR_VOLUME_RESPONSE) self.mock_object(self.client, 'send_iter_request', mock.Mock(return_value=api_response)) result = self.client.get_aggregate_for_volume(fake.SHARE_NAME) volume_get_iter_args = { 'query': { 'volume-attributes': { 'volume-id-attributes': { 'name': fake.SHARE_NAME } } }, 'desired-attributes': { 'volume-attributes': { 'volume-id-attributes': { 'aggr-list': { 'aggr-name': None, }, 'containing-aggregate-name': None, 'name': None } } } } self.client.send_iter_request.assert_has_calls([ mock.call('volume-get-iter', volume_get_iter_args)]) if is_flexgroup: self.assertEqual([fake.SHARE_AGGREGATE_NAME], result) else: self.assertEqual(fake.SHARE_AGGREGATE_NAME, result) def test_get_aggregate_for_volume_not_found(self): api_response = netapp_api.NaElement(fake.NO_RECORDS_RESPONSE) self.mock_object(self.client, 'send_iter_request', mock.Mock(return_value=api_response)) self.assertRaises(exception.NetAppException, self.client.get_aggregate_for_volume, fake.SHARE_NAME) def test_volume_has_luns(self): api_response = netapp_api.NaElement(fake.LUN_GET_ITER_RESPONSE) self.mock_object(self.client, 'send_iter_request', mock.Mock(return_value=api_response)) result = self.client.volume_has_luns(fake.SHARE_NAME) lun_get_iter_args = { 'query': { 'lun-info': { 'volume': fake.SHARE_NAME, }, }, 'desired-attributes': { 'lun-info': { 'path': None, }, }, } self.client.send_iter_request.assert_has_calls([ mock.call('lun-get-iter', lun_get_iter_args)]) self.assertTrue(result) def test_volume_has_luns_not_found(self): api_response = netapp_api.NaElement(fake.NO_RECORDS_RESPONSE) self.mock_object(self.client, 'send_request', mock.Mock(return_value=api_response)) result = self.client.volume_has_luns(fake.SHARE_NAME) self.assertFalse(result) def test_volume_has_junctioned_volumes(self): api_response = netapp_api.NaElement( fake.VOLUME_GET_ITER_JUNCTIONED_VOLUMES_RESPONSE) self.mock_object(self.client, 'send_iter_request', mock.Mock(return_value=api_response)) fake_junction_path = '/%s' % fake.SHARE_NAME result = self.client.volume_has_junctioned_volumes(fake_junction_path) volume_get_iter_args = { 'query': { 'volume-attributes': { 'volume-id-attributes': { 'junction-path': fake_junction_path + '/*', }, }, }, 'desired-attributes': { 'volume-attributes': { 'volume-id-attributes': { 'name': None, }, }, }, } self.client.send_iter_request.assert_has_calls([ mock.call('volume-get-iter', volume_get_iter_args)]) self.assertTrue(result) def test_volume_has_junctioned_volumes_no_junction_path(self): result = self.client.volume_has_junctioned_volumes(None) self.assertFalse(result) def test_volume_has_junctioned_volumes_not_found(self): api_response = netapp_api.NaElement(fake.NO_RECORDS_RESPONSE) self.mock_object(self.client, 'send_request', mock.Mock(return_value=api_response)) fake_junction_path = '/%s' % fake.SHARE_NAME result = self.client.volume_has_junctioned_volumes(fake_junction_path) self.assertFalse(result) def test_get_volume_snapshot_attributes(self): api_response = netapp_api.NaElement( fake.VOLUME_GET_ITER_SNAPSHOT_ATTRIBUTES_RESPONSE) self.mock_object(self.client, 'send_request', mock.Mock(return_value=api_response)) result = self.client.get_volume_snapshot_attributes(fake.SHARE_NAME) desired_snapshot_attributes = { 'snapshot-policy': None, 'snapdir-access-enabled': None, } snap_get_iter_args = { 'query': { 'volume-attributes': { 'volume-id-attributes': { 'name': fake.SHARE_NAME, }, }, }, 'desired-attributes': { 'volume-attributes': { 'volume-snapshot-attributes': desired_snapshot_attributes, }, }, } self.client.send_request.assert_has_calls([ mock.call('volume-get-iter', snap_get_iter_args)]) expected = { 'snapshot-policy': 'daily', 'snapdir-access-enabled': 'false'} self.assertDictEqual(expected, result) @ddt.data(True, False) def test_get_volume(self, is_flexgroup): api_response = netapp_api.NaElement( fake.VOLUME_GET_ITER_FLEXGROUP_VOLUME_TO_MANAGE_RESPONSE if is_flexgroup else fake.VOLUME_GET_ITER_VOLUME_TO_MANAGE_RESPONSE) self.mock_object(self.client, 'send_request', mock.Mock(return_value=api_response)) result = self.client.get_volume(fake.SHARE_NAME) volume_get_iter_args = { 'query': { 'volume-attributes': { 'volume-id-attributes': { 'name': fake.SHARE_NAME, }, }, }, 'desired-attributes': { 'volume-attributes': { 'volume-id-attributes': { 'aggr-list': { 'aggr-name': None, }, 'containing-aggregate-name': None, 'junction-path': None, 'name': None, 'owning-vserver-name': None, 'type': None, 'style': None, 'style-extended': None, }, 'volume-space-attributes': { 'size': None, 'size-used': None, }, 'volume-qos-attributes': { 'policy-group-name': None, }, 'volume-snaplock-attributes': { 'snaplock-type': None, }, }, }, } expected = { 'aggregate': '' if is_flexgroup else fake.SHARE_AGGREGATE_NAME, 'aggr-list': [fake.SHARE_AGGREGATE_NAME] if is_flexgroup else [], 'junction-path': '/%s' % fake.SHARE_NAME, 'name': fake.SHARE_NAME, 'type': 'rw', 'style': 'flex', 'size': fake.SHARE_SIZE, 'size-used': fake.SHARE_USED_SIZE, 'owning-vserver-name': fake.VSERVER_NAME, 'qos-policy-group-name': fake.QOS_POLICY_GROUP_NAME, 'style-extended': (fake.FLEXGROUP_STYLE_EXTENDED if is_flexgroup else fake.FLEXVOL_STYLE_EXTENDED), 'snaplock-type': 'compliance', } self.client.send_request.assert_has_calls([ mock.call('volume-get-iter', volume_get_iter_args)]) self.assertDictEqual(expected, result) def test_get_volume_no_qos(self): api_response = netapp_api.NaElement( fake.VOLUME_GET_ITER_NO_QOS_RESPONSE) self.mock_object(self.client, 'send_request', mock.Mock(return_value=api_response)) result = self.client.get_volume(fake.SHARE_NAME) volume_get_iter_args = { 'query': { 'volume-attributes': { 'volume-id-attributes': { 'name': fake.SHARE_NAME, }, }, }, 'desired-attributes': { 'volume-attributes': { 'volume-id-attributes': { 'aggr-list': { 'aggr-name': None, }, 'containing-aggregate-name': None, 'junction-path': None, 'name': None, 'owning-vserver-name': None, 'type': None, 'style': None, 'style-extended': None, }, 'volume-space-attributes': { 'size': None, 'size-used': None, }, 'volume-qos-attributes': { 'policy-group-name': None, }, 'volume-snaplock-attributes': { 'snaplock-type': None, }, }, }, } expected = { 'aggregate': fake.SHARE_AGGREGATE_NAME, 'aggr-list': [], 'junction-path': '/%s' % fake.SHARE_NAME, 'name': fake.SHARE_NAME, 'type': 'rw', 'style': 'flex', 'size': fake.SHARE_SIZE, 'size-used': fake.SHARE_USED_SIZE, 'owning-vserver-name': fake.VSERVER_NAME, 'qos-policy-group-name': None, 'style-extended': fake.FLEXVOL_STYLE_EXTENDED, 'snaplock-type': "compliance", } self.client.send_request.assert_has_calls([ mock.call('volume-get-iter', volume_get_iter_args)]) self.assertDictEqual(expected, result) def test_get_volume_not_found(self): api_response = netapp_api.NaElement(fake.NO_RECORDS_RESPONSE) self.mock_object(self.client, 'send_request', mock.Mock(return_value=api_response)) self.assertRaises(exception.StorageResourceNotFound, self.client.get_volume, fake.SHARE_NAME) def test_get_volume_not_unique(self): api_response = netapp_api.NaElement( fake.VOLUME_GET_ITER_NOT_UNIQUE_RESPONSE) self.mock_object(self.client, 'send_request', mock.Mock(return_value=api_response)) self.assertRaises(exception.NetAppException, self.client.get_volume, fake.SHARE_NAME) def test_get_volume_at_junction_path(self): api_response = netapp_api.NaElement( fake.VOLUME_GET_ITER_VOLUME_TO_MANAGE_RESPONSE) self.mock_object(self.client, 'send_iter_request', mock.Mock(return_value=api_response)) fake_junction_path = '/%s' % fake.SHARE_NAME result = self.client.get_volume_at_junction_path(fake_junction_path) volume_get_iter_args = { 'query': { 'volume-attributes': { 'volume-id-attributes': { 'junction-path': fake_junction_path, 'style-extended': 'flexgroup|flexvol', }, }, }, 'desired-attributes': { 'volume-attributes': { 'volume-id-attributes': { 'name': None, }, }, }, } expected = { 'name': fake.SHARE_NAME, } self.client.send_iter_request.assert_has_calls([ mock.call('volume-get-iter', volume_get_iter_args)]) self.assertDictEqual(expected, result) def test_get_volume_at_junction_path_not_specified(self): result = self.client.get_volume_at_junction_path(None) self.assertIsNone(result) def test_get_volume_at_junction_path_not_found(self): api_response = netapp_api.NaElement(fake.NO_RECORDS_RESPONSE) self.mock_object(self.client, 'send_iter_request', mock.Mock(return_value=api_response)) fake_junction_path = '/%s' % fake.SHARE_NAME result = self.client.get_volume_at_junction_path(fake_junction_path) self.assertIsNone(result) @ddt.data(True, False) def test_get_volume_to_manage(self, is_flexgroup): api_response = netapp_api.NaElement( fake.VOLUME_GET_ITER_FLEXGROUP_VOLUME_TO_MANAGE_RESPONSE if is_flexgroup else fake.VOLUME_GET_ITER_VOLUME_TO_MANAGE_RESPONSE) self.mock_object(self.client, 'send_iter_request', mock.Mock(return_value=api_response)) aggr = fake.SHARE_AGGREGATE_NAME result = self.client.get_volume_to_manage( [aggr] if is_flexgroup else aggr, fake.SHARE_NAME) volume_get_iter_args = { 'query': { 'volume-attributes': { 'volume-id-attributes': { 'name': fake.SHARE_NAME, }, }, }, 'desired-attributes': { 'volume-attributes': { 'volume-id-attributes': { 'aggr-list': { 'aggr-name': None, }, 'containing-aggregate-name': None, 'junction-path': None, 'name': None, 'type': None, 'style': None, 'owning-vserver-name': None, }, 'volume-space-attributes': { 'size': None, }, 'volume-qos-attributes': { 'policy-group-name': None, }, }, }, } if is_flexgroup: volume_get_iter_args['query']['volume-attributes'][ 'volume-id-attributes']['aggr-list'] = [{'aggr-name': aggr}] else: volume_get_iter_args['query']['volume-attributes'][ 'volume-id-attributes']['containing-aggregate-name'] = aggr expected = { 'aggregate': '' if is_flexgroup else aggr, 'aggr-list': [aggr] if is_flexgroup else [], 'junction-path': '/%s' % fake.SHARE_NAME, 'name': fake.SHARE_NAME, 'type': 'rw', 'style': 'flex', 'size': fake.SHARE_SIZE, 'owning-vserver-name': fake.VSERVER_NAME, 'qos-policy-group-name': fake.QOS_POLICY_GROUP_NAME, } self.client.send_iter_request.assert_has_calls([ mock.call('volume-get-iter', volume_get_iter_args)]) self.assertDictEqual(expected, result) def test_get_volume_to_manage_not_found(self): api_response = netapp_api.NaElement(fake.NO_RECORDS_RESPONSE) self.mock_object(self.client, 'send_iter_request', mock.Mock(return_value=api_response)) result = self.client.get_volume_to_manage(fake.SHARE_AGGREGATE_NAME, fake.SHARE_NAME) self.assertIsNone(result) @ddt.data({'qos_policy_group_name': None, 'adaptive_qos_policy_group_name': None}, {'qos_policy_group_name': fake.QOS_POLICY_GROUP_NAME, 'adaptive_qos_policy_group_name': None}, {'qos_policy_group_name': None, 'adaptive_qos_policy_group_name': fake.ADAPTIVE_QOS_POLICY_GROUP_NAME}, {'mount_point_name': None}, ) @ddt.unpack def test_create_volume_clone(self, qos_policy_group_name=None, adaptive_qos_policy_group_name=None, mount_point_name=None): self.client.features.add_feature('ADAPTIVE_QOS') self.mock_object(self.client, 'send_request') set_qos_adapt_mock = self.mock_object( self.client, 'set_qos_adaptive_policy_group_for_volume') self.client.create_volume_clone( fake.SHARE_NAME, fake.PARENT_SHARE_NAME, fake.PARENT_SNAPSHOT_NAME, mount_point_name=mount_point_name, qos_policy_group=qos_policy_group_name, adaptive_qos_policy_group=adaptive_qos_policy_group_name,) volume_clone_create_args = { 'volume': fake.SHARE_NAME, 'parent-volume': fake.PARENT_SHARE_NAME, 'parent-snapshot': fake.PARENT_SNAPSHOT_NAME, 'junction-path': '/%s' % (mount_point_name or fake.SHARE_NAME) } if qos_policy_group_name: volume_clone_create_args.update( {'qos-policy-group-name': fake.QOS_POLICY_GROUP_NAME}) if adaptive_qos_policy_group_name: set_qos_adapt_mock.assert_called_once_with( fake.SHARE_NAME, fake.ADAPTIVE_QOS_POLICY_GROUP_NAME ) @ddt.data(None, mock.Mock(side_effect=netapp_api.NaApiError( code=netapp_api.EVOL_CLONE_BEING_SPLIT))) def test_volume_clone_split_start(self, side_effect): self.mock_object( self.client, 'send_request', mock.Mock(side_effect=side_effect)) self.client.volume_clone_split_start(fake.SHARE_NAME) volume_clone_split_args = {'volume': fake.SHARE_NAME} self.client.send_request.assert_has_calls([ mock.call('volume-clone-split-start', volume_clone_split_args)]) @ddt.data(None, mock.Mock(side_effect=netapp_api.NaApiError( code=netapp_api.EVOLOPNOTUNDERWAY))) def test_volume_clone_split_stop(self, side_effect): self.mock_object( self.client, 'send_request', mock.Mock(side_effect=side_effect)) self.client.volume_clone_split_stop(fake.SHARE_NAME) volume_clone_split_args = {'volume': fake.SHARE_NAME} self.client.send_request.assert_has_calls([ mock.call('volume-clone-split-stop', volume_clone_split_args)]) def test_volume_clone_split_start_api_error(self): self.mock_object(self.client, 'send_request', mock.Mock(side_effect=self._mock_api_error())) self.assertRaises(netapp_api.NaApiError, self.client.volume_clone_split_start, fake.SHARE_NAME) def test_get_clone_children_for_snapshot(self): api_response = netapp_api.NaElement( fake.VOLUME_GET_ITER_CLONE_CHILDREN_RESPONSE) self.mock_object(self.client, 'send_iter_request', mock.Mock(return_value=api_response)) result = self.client.get_clone_children_for_snapshot( fake.SHARE_NAME, fake.SNAPSHOT_NAME) volume_get_iter_args = { 'query': { 'volume-attributes': { 'volume-clone-attributes': { 'volume-clone-parent-attributes': { 'name': fake.SHARE_NAME, 'snapshot-name': fake.SNAPSHOT_NAME, }, }, }, }, 'desired-attributes': { 'volume-attributes': { 'volume-id-attributes': { 'name': None, }, }, }, } self.client.send_iter_request.assert_has_calls([ mock.call('volume-get-iter', volume_get_iter_args)]) expected = [ {'name': fake.CLONE_CHILD_1}, {'name': fake.CLONE_CHILD_2}, ] self.assertEqual(expected, result) def test_get_clone_children_for_snapshot_not_found(self): api_response = netapp_api.NaElement(fake.NO_RECORDS_RESPONSE) self.mock_object(self.client, 'send_iter_request', mock.Mock(return_value=api_response)) result = self.client.get_clone_children_for_snapshot( fake.SHARE_NAME, fake.SNAPSHOT_NAME) self.assertEqual([], result) def test_get_volume_junction_path(self): api_response = netapp_api.NaElement( fake.VOLUME_GET_VOLUME_PATH_RESPONSE) self.mock_object(self.client, 'send_request', mock.Mock(return_value=api_response)) result = self.client.get_volume_junction_path(fake.SHARE_NAME) volume_get_volume_path_args = { 'volume': fake.SHARE_NAME, 'is-style-cifs': 'false' } self.client.send_request.assert_has_calls([ mock.call('volume-get-volume-path', volume_get_volume_path_args)]) self.assertEqual(fake.VOLUME_JUNCTION_PATH, result) def test_get_volume_junction_path_cifs(self): api_response = netapp_api.NaElement( fake.VOLUME_GET_VOLUME_PATH_CIFS_RESPONSE) self.mock_object(self.client, 'send_request', mock.Mock(return_value=api_response)) result = self.client.get_volume_junction_path(fake.SHARE_NAME, is_style_cifs=True) volume_get_volume_path_args = { 'volume': fake.SHARE_NAME, 'is-style-cifs': 'true' } self.client.send_request.assert_has_calls([ mock.call('volume-get-volume-path', volume_get_volume_path_args)]) self.assertEqual(fake.VOLUME_JUNCTION_PATH_CIFS, result) def test_mount_volume_default_junction_path(self): self.mock_object(self.client, 'send_request') self.client.mount_volume(fake.SHARE_NAME) volume_mount_args = { 'volume-name': fake.SHARE_NAME, 'junction-path': '/%s' % fake.SHARE_NAME, } self.client.send_request.assert_has_calls([ mock.call('volume-mount', volume_mount_args)]) def test_mount_volume(self): self.mock_object(self.client, 'send_request') fake_path = '/fake_path' self.client.mount_volume(fake.SHARE_NAME, junction_path=fake_path) volume_mount_args = { 'volume-name': fake.SHARE_NAME, 'junction-path': fake_path, } self.client.send_request.assert_has_calls([ mock.call('volume-mount', volume_mount_args)]) def test_offline_volume(self): self.mock_object(self.client, 'send_request') self.client.offline_volume(fake.SHARE_NAME) volume_offline_args = {'name': fake.SHARE_NAME} self.client.send_request.assert_has_calls([ mock.call('volume-offline', volume_offline_args)]) def test_offline_volume_already_offline(self): self.mock_object(self.client, 'send_request', mock.Mock(side_effect=self._mock_api_error( netapp_api.EVOLUMEOFFLINE))) self.client.offline_volume(fake.SHARE_NAME) volume_offline_args = {'name': fake.SHARE_NAME} self.client.send_request.assert_has_calls([ mock.call('volume-offline', volume_offline_args)]) def test_offline_volume_api_error(self): self.mock_object(self.client, 'send_request', mock.Mock(side_effect=self._mock_api_error())) self.assertRaises(netapp_api.NaApiError, self.client.offline_volume, fake.SHARE_NAME) def test__unmount_volume(self): self.mock_object(self.client, 'send_request') self.client._unmount_volume(fake.SHARE_NAME) volume_unmount_args = { 'volume-name': fake.SHARE_NAME, 'force': 'false' } self.client.send_request.assert_has_calls([ mock.call('volume-unmount', volume_unmount_args)]) def test__unmount_volume_force(self): self.mock_object(self.client, 'send_request') self.client._unmount_volume(fake.SHARE_NAME, force=True) volume_unmount_args = {'volume-name': fake.SHARE_NAME, 'force': 'true'} self.client.send_request.assert_has_calls([ mock.call('volume-unmount', volume_unmount_args)]) def test__unmount_volume_already_unmounted(self): self.mock_object(self.client, 'send_request', mock.Mock(side_effect=self._mock_api_error( netapp_api.EVOL_NOT_MOUNTED))) self.client._unmount_volume(fake.SHARE_NAME, force=True) volume_unmount_args = {'volume-name': fake.SHARE_NAME, 'force': 'true'} self.client.send_request.assert_has_calls([ mock.call('volume-unmount', volume_unmount_args)]) def test__unmount_volume_api_error(self): self.mock_object(self.client, 'send_request', mock.Mock(side_effect=self._mock_api_error())) self.assertRaises(netapp_api.NaApiError, self.client._unmount_volume, fake.SHARE_NAME, force=True) def test_unmount_volume(self): self.mock_object(self.client, '_unmount_volume') self.client.unmount_volume(fake.SHARE_NAME) self.client._unmount_volume.assert_called_once_with(fake.SHARE_NAME, force=False) self.assertEqual(1, client_cmode.LOG.debug.call_count) self.assertEqual(0, client_cmode.LOG.warning.call_count) def test_unmount_volume_api_error(self): self.mock_object(self.client, '_unmount_volume', self._mock_api_error()) self.assertRaises(netapp_api.NaApiError, self.client.unmount_volume, fake.SHARE_NAME) self.assertEqual(1, self.client._unmount_volume.call_count) self.assertEqual(0, client_cmode.LOG.debug.call_count) self.assertEqual(0, client_cmode.LOG.warning.call_count) def test_unmount_volume_with_retries(self): side_effect = [netapp_api.NaApiError(code=netapp_api.EAPIERROR, message='...job ID...')] * 5 side_effect.append(None) self.mock_object(self.client, '_unmount_volume', mock.Mock(side_effect=side_effect)) self.mock_object(time, 'sleep') self.client.unmount_volume(fake.SHARE_NAME) self.assertEqual(6, self.client._unmount_volume.call_count) self.assertEqual(1, client_cmode.LOG.debug.call_count) self.assertEqual(5, client_cmode.LOG.warning.call_count) def test_unmount_volume_with_max_retries(self): side_effect = [netapp_api.NaApiError(code=netapp_api.EAPIERROR, message='...job ID...')] * 30 self.mock_object(self.client, '_unmount_volume', mock.Mock(side_effect=side_effect)) self.mock_object(time, 'sleep') self.assertRaises(exception.NetAppException, self.client.unmount_volume, fake.SHARE_NAME) self.assertEqual(10, self.client._unmount_volume.call_count) self.assertEqual(0, client_cmode.LOG.debug.call_count) self.assertEqual(10, client_cmode.LOG.warning.call_count) def test_delete_volume(self): self.mock_object(self.client, 'send_request') self.client.delete_volume(fake.SHARE_NAME) volume_destroy_args = {'name': fake.SHARE_NAME} self.client.send_request.assert_has_calls([ mock.call('volume-destroy', volume_destroy_args)]) def test_create_snapshot(self): self.mock_object(self.client, 'send_request') self.client.create_snapshot(fake.SHARE_NAME, fake.SNAPSHOT_NAME) snapshot_create_args = { 'volume': fake.SHARE_NAME, 'snapshot': fake.SNAPSHOT_NAME } self.client.send_request.assert_has_calls([ mock.call('snapshot-create', snapshot_create_args)]) @ddt.data({ 'mock_return': fake.SNAPSHOT_GET_ITER_NOT_BUSY_RESPONSE, 'expected': { 'access-time': fake.SNAPSHOT_ACCESS_TIME, 'name': fake.SNAPSHOT_NAME, 'volume': fake.SHARE_NAME, 'busy': False, 'owners': set(), 'locked_by_clone': False, } }, { 'mock_return': fake.SNAPSHOT_GET_ITER_BUSY_RESPONSE, 'expected': { 'access-time': fake.SNAPSHOT_ACCESS_TIME, 'name': fake.SNAPSHOT_NAME, 'volume': fake.SHARE_NAME, 'busy': True, 'owners': {'volume clone'}, 'locked_by_clone': True, } }) @ddt.unpack def test_get_snapshot(self, mock_return, expected): api_response = netapp_api.NaElement(mock_return) self.mock_object(self.client, 'send_request', mock.Mock(return_value=api_response)) result = self.client.get_snapshot(fake.SHARE_NAME, fake.SNAPSHOT_NAME) snapshot_get_iter_args = { 'query': { 'snapshot-info': { 'name': fake.SNAPSHOT_NAME, 'volume': fake.SHARE_NAME, }, }, 'desired-attributes': { 'snapshot-info': { 'access-time': None, 'name': None, 'volume': None, 'busy': None, 'snapshot-owners-list': { 'snapshot-owner': None, } }, }, } self.client.send_request.assert_has_calls([ mock.call('snapshot-get-iter', snapshot_get_iter_args)]) self.assertDictEqual(expected, result) @ddt.data({ 'api_response_xml': fake.NO_RECORDS_RESPONSE, 'raised_exception': exception.SnapshotResourceNotFound, }, { 'api_response_xml': fake.SNAPSHOT_GET_ITER_NOT_UNIQUE_RESPONSE, 'raised_exception': exception.NetAppException, }, { 'api_response_xml': fake.SNAPSHOT_GET_ITER_UNAVAILABLE_RESPONSE, 'raised_exception': exception.SnapshotUnavailable, }, { 'api_response_xml': fake.SNAPSHOT_GET_ITER_OTHER_ERROR_RESPONSE, 'raised_exception': exception.NetAppException, }) @ddt.unpack def test_get_snapshot_error(self, api_response_xml, raised_exception): api_response = netapp_api.NaElement(api_response_xml) self.mock_object(self.client, 'send_request', mock.Mock(return_value=api_response)) self.assertRaises(raised_exception, self.client.get_snapshot, fake.SHARE_NAME, fake.SNAPSHOT_NAME) def test_rename_snapshot(self): self.mock_object(self.client, 'send_request') self.client.rename_snapshot(fake.SHARE_NAME, fake.SNAPSHOT_NAME, 'new_snapshot_name') snapshot_rename_args = { 'volume': fake.SHARE_NAME, 'current-name': fake.SNAPSHOT_NAME, 'new-name': 'new_snapshot_name' } self.client.send_request.assert_has_calls([ mock.call('snapshot-rename', snapshot_rename_args)]) def test_restore_snapshot(self): self.mock_object(self.client, 'send_request') self.client.restore_snapshot(fake.SHARE_NAME, fake.SNAPSHOT_NAME) snapshot_restore_args = { 'volume': fake.SHARE_NAME, 'snapshot': fake.SNAPSHOT_NAME, } self.client.send_request.assert_has_calls([ mock.call('snapshot-restore-volume', snapshot_restore_args)]) @ddt.data(True, False) def test_delete_snapshot(self, ignore_owners): self.mock_object(self.client, 'send_request') self.client.delete_snapshot( fake.SHARE_NAME, fake.SNAPSHOT_NAME, ignore_owners=ignore_owners) snapshot_delete_args = { 'volume': fake.SHARE_NAME, 'snapshot': fake.SNAPSHOT_NAME, 'ignore-owners': 'true' if ignore_owners else 'false', } self.client.send_request.assert_has_calls([ mock.call('snapshot-delete', snapshot_delete_args)]) def test_soft_delete_snapshot(self): mock_delete_snapshot = self.mock_object(self.client, 'delete_snapshot') mock_rename_snapshot = self.mock_object(self.client, 'rename_snapshot') self.client.soft_delete_snapshot(fake.SHARE_NAME, fake.SNAPSHOT_NAME) mock_delete_snapshot.assert_called_once_with( fake.SHARE_NAME, fake.SNAPSHOT_NAME) self.assertFalse(mock_rename_snapshot.called) def test_soft_delete_snapshot_api_error(self): mock_delete_snapshot = self.mock_object( self.client, 'delete_snapshot', self._mock_api_error()) mock_rename_snapshot = self.mock_object(self.client, 'rename_snapshot') mock_get_clone_children_for_snapshot = self.mock_object( self.client, 'get_clone_children_for_snapshot', mock.Mock(return_value=fake.CDOT_CLONE_CHILDREN)) mock_volume_clone_split_start = self.mock_object( self.client, 'volume_clone_split_start') self.client.soft_delete_snapshot(fake.SHARE_NAME, fake.SNAPSHOT_NAME) mock_delete_snapshot.assert_called_once_with( fake.SHARE_NAME, fake.SNAPSHOT_NAME) mock_rename_snapshot.assert_called_once_with( fake.SHARE_NAME, fake.SNAPSHOT_NAME, 'deleted_manila_' + fake.SNAPSHOT_NAME) mock_get_clone_children_for_snapshot.assert_called_once_with( fake.SHARE_NAME, 'deleted_manila_' + fake.SNAPSHOT_NAME) mock_volume_clone_split_start.assert_has_calls([ mock.call(fake.CDOT_CLONE_CHILD_1), mock.call(fake.CDOT_CLONE_CHILD_2), ]) def test_prune_deleted_snapshots(self): deleted_snapshots_map = { 'vserver1': [{ 'name': 'deleted_snap_1', 'volume': 'fake_volume_1', 'vserver': 'vserver1', }], 'vserver2': [{ 'name': 'deleted_snap_2', 'volume': 'fake_volume_2', 'vserver': 'vserver2', }], } mock_get_deleted_snapshots = self.mock_object( self.client, '_get_deleted_snapshots', mock.Mock(return_value=deleted_snapshots_map)) mock_delete_snapshot = self.mock_object( self.client, 'delete_snapshot', mock.Mock(side_effect=[None, netapp_api.NaApiError])) self.mock_object( copy, 'deepcopy', mock.Mock(return_value=self.client)) self.client.prune_deleted_snapshots() mock_get_deleted_snapshots.assert_called_once_with() mock_delete_snapshot.assert_has_calls([ mock.call('fake_volume_1', 'deleted_snap_1'), mock.call('fake_volume_2', 'deleted_snap_2'), ], any_order=True) def test_get_deleted_snapshots(self): api_response = netapp_api.NaElement( fake.SNAPSHOT_GET_ITER_DELETED_RESPONSE) self.mock_object(self.client, 'send_iter_request', mock.Mock(return_value=api_response)) result = self.client._get_deleted_snapshots() snapshot_get_iter_args = { 'query': { 'snapshot-info': { 'name': 'deleted_manila_*', 'busy': 'false', }, }, 'desired-attributes': { 'snapshot-info': { 'name': None, 'vserver': None, 'volume': None, }, }, } self.client.send_iter_request.assert_has_calls([ mock.call('snapshot-get-iter', snapshot_get_iter_args)]) expected = { fake.VSERVER_NAME: [{ 'name': 'deleted_manila_' + fake.SNAPSHOT_NAME, 'volume': fake.SHARE_NAME, 'vserver': fake.VSERVER_NAME, }], } self.assertDictEqual(expected, result) def test_create_cg_snapshot(self): mock_start_cg_snapshot = self.mock_object( self.client, '_start_cg_snapshot', mock.Mock(return_value=fake.CG_SNAPSHOT_ID)) mock_commit_cg_snapshot = self.mock_object( self.client, '_commit_cg_snapshot') self.client.create_cg_snapshot([fake.SHARE_NAME, fake.SHARE_NAME_2], fake.SNAPSHOT_NAME) mock_start_cg_snapshot.assert_called_once_with( [fake.SHARE_NAME, fake.SHARE_NAME_2], fake.SNAPSHOT_NAME) mock_commit_cg_snapshot.assert_called_once_with(fake.CG_SNAPSHOT_ID) def test_create_cg_snapshot_no_id(self): mock_start_cg_snapshot = self.mock_object( self.client, '_start_cg_snapshot', mock.Mock(return_value=None)) mock_commit_cg_snapshot = self.mock_object( self.client, '_commit_cg_snapshot') self.assertRaises(exception.NetAppException, self.client.create_cg_snapshot, [fake.SHARE_NAME, fake.SHARE_NAME_2], fake.SNAPSHOT_NAME) mock_start_cg_snapshot.assert_called_once_with( [fake.SHARE_NAME, fake.SHARE_NAME_2], fake.SNAPSHOT_NAME) self.assertFalse(mock_commit_cg_snapshot.called) def test_start_cg_snapshot(self): self.mock_object(self.client, 'send_request') self.client._start_cg_snapshot([fake.SHARE_NAME, fake.SHARE_NAME_2], fake.SNAPSHOT_NAME) cg_start_args = { 'snapshot': fake.SNAPSHOT_NAME, 'timeout': 'relaxed', 'volumes': [ {'volume-name': fake.SHARE_NAME}, {'volume-name': fake.SHARE_NAME_2}, ], } self.client.send_request.assert_has_calls([ mock.call('cg-start', cg_start_args)]) def test_commit_cg_snapshot(self): self.mock_object(self.client, 'send_request') self.client._commit_cg_snapshot(fake.CG_SNAPSHOT_ID) cg_commit_args = {'cg-id': fake.CG_SNAPSHOT_ID} self.client.send_request.assert_has_calls([ mock.call('cg-commit', cg_commit_args)]) def test_create_cifs_share(self): self.mock_object(self.client, 'send_request') self.client.create_cifs_share( fake.SHARE_NAME, fake.VOLUME_JUNCTION_PATH) cifs_share_create_args = { 'path': fake.VOLUME_JUNCTION_PATH, 'share-name': fake.SHARE_NAME } self.client.send_request.assert_has_calls([ mock.call('cifs-share-create', cifs_share_create_args)]) def test_get_cifs_share_access(self): api_response = netapp_api.NaElement( fake.CIFS_SHARE_ACCESS_CONTROL_GET_ITER) self.mock_object(self.client, 'send_iter_request', mock.Mock(return_value=api_response)) result = self.client.get_cifs_share_access(fake.SHARE_NAME) cifs_share_access_control_get_iter_args = { 'query': { 'cifs-share-access-control': { 'share': fake.SHARE_NAME, }, }, 'desired-attributes': { 'cifs-share-access-control': { 'user-or-group': None, 'permission': None, }, }, } self.client.send_iter_request.assert_has_calls([ mock.call('cifs-share-access-control-get-iter', cifs_share_access_control_get_iter_args)]) expected = { 'Administrator': 'full_control', 'Administrators': 'change', 'Power Users': 'read', 'Users': 'no_access', } self.assertDictEqual(expected, result) def test_get_cifs_share_access_not_found(self): api_response = netapp_api.NaElement(fake.NO_RECORDS_RESPONSE) self.mock_object(self.client, 'send_iter_request', mock.Mock(return_value=api_response)) result = self.client.get_cifs_share_access(fake.SHARE_NAME) self.assertEqual({}, result) @ddt.data({'readonly': False, 'exception': True}, {'readonly': True, 'exception': False}, {'readonly': False, 'exception': False}, {'readonly': True, 'exception': True}) @ddt.unpack def test_add_cifs_share_access(self, readonly, exception): mock_exception = mock.Mock(side_effect=netapp_api.NaApiError( code=netapp_api.EDUPLICATEENTRY)) self.mock_object( self.client, 'send_request', mock_exception if exception else mock.Mock(return_value=None)) self.client.add_cifs_share_access(fake.SHARE_NAME, fake.USER_NAME, readonly) cifs_share_access_control_create_args = { 'permission': 'read' if readonly else 'full_control', 'share': fake.SHARE_NAME, 'user-or-group': fake.USER_NAME } self.client.send_request.assert_has_calls([ mock.call( 'cifs-share-access-control-create', cifs_share_access_control_create_args)]) @ddt.data(True, False) def test_modify_cifs_share_access(self, readonly): self.mock_object(self.client, 'send_request') self.client.modify_cifs_share_access(fake.SHARE_NAME, fake.USER_NAME, readonly) cifs_share_access_control_modify_args = { 'permission': 'read' if readonly else 'full_control', 'share': fake.SHARE_NAME, 'user-or-group': fake.USER_NAME } self.client.send_request.assert_has_calls([ mock.call( 'cifs-share-access-control-modify', cifs_share_access_control_modify_args)]) def test_remove_cifs_share_access(self): self.mock_object(self.client, 'send_request') self.client.remove_cifs_share_access(fake.SHARE_NAME, fake.USER_NAME) cifs_share_access_control_delete_args = { 'user-or-group': fake.USER_NAME, 'share': fake.SHARE_NAME } self.client.send_request.assert_has_calls([ mock.call( 'cifs-share-access-control-delete', cifs_share_access_control_delete_args)]) def test_remove_cifs_share(self): self.mock_object(self.client, 'send_request') self.client.remove_cifs_share(fake.SHARE_NAME) cifs_share_delete_args = {'share-name': fake.SHARE_NAME} self.client.send_request.assert_has_calls([ mock.call('cifs-share-delete', cifs_share_delete_args)]) def test_remove_cifs_share_not_found(self): self.mock_object(self.client, 'send_request', self._mock_api_error(code=netapp_api.EOBJECTNOTFOUND)) self.client.remove_cifs_share(fake.SHARE_NAME) cifs_share_args = {'share-name': fake.SHARE_NAME} self.client.send_request.assert_has_calls([ mock.call('cifs-share-delete', cifs_share_args)]) def test_add_nfs_export_rule(self): mock_get_nfs_export_rule_indices = self.mock_object( self.client, '_get_nfs_export_rule_indices', mock.Mock(return_value=[])) mock_add_nfs_export_rule = self.mock_object( self.client, '_add_nfs_export_rule') mock_update_nfs_export_rule = self.mock_object( self.client, '_update_nfs_export_rule') auth_methods = ['sys'] self.client.add_nfs_export_rule(fake.EXPORT_POLICY_NAME, fake.IP_ADDRESS, False, auth_methods) mock_get_nfs_export_rule_indices.assert_called_once_with( fake.EXPORT_POLICY_NAME, fake.IP_ADDRESS) mock_add_nfs_export_rule.assert_called_once_with( fake.EXPORT_POLICY_NAME, fake.IP_ADDRESS, False, auth_methods) self.assertFalse(mock_update_nfs_export_rule.called) def test_add_nfs_export_rule_single_existing(self): mock_get_nfs_export_rule_indices = self.mock_object( self.client, '_get_nfs_export_rule_indices', mock.Mock(return_value=['1'])) mock_add_nfs_export_rule = self.mock_object( self.client, '_add_nfs_export_rule') mock_update_nfs_export_rule = self.mock_object( self.client, '_update_nfs_export_rule') mock_remove_nfs_export_rules = self.mock_object( self.client, '_remove_nfs_export_rules') auth_methods = ['sys'] self.client.add_nfs_export_rule(fake.EXPORT_POLICY_NAME, fake.IP_ADDRESS, False, auth_methods) mock_get_nfs_export_rule_indices.assert_called_once_with( fake.EXPORT_POLICY_NAME, fake.IP_ADDRESS) self.assertFalse(mock_add_nfs_export_rule.called) mock_update_nfs_export_rule.assert_called_once_with( fake.EXPORT_POLICY_NAME, fake.IP_ADDRESS, False, '1', auth_methods) mock_remove_nfs_export_rules.assert_called_once_with( fake.EXPORT_POLICY_NAME, []) def test_add_nfs_export_rule_multiple_existing(self): mock_get_nfs_export_rule_indices = self.mock_object( self.client, '_get_nfs_export_rule_indices', mock.Mock(return_value=['2', '4', '6'])) mock_add_nfs_export_rule = self.mock_object( self.client, '_add_nfs_export_rule') mock_update_nfs_export_rule = self.mock_object( self.client, '_update_nfs_export_rule') mock_remove_nfs_export_rules = self.mock_object( self.client, '_remove_nfs_export_rules') auth_methods = ['sys'] self.client.add_nfs_export_rule(fake.EXPORT_POLICY_NAME, fake.IP_ADDRESS, False, auth_methods) mock_get_nfs_export_rule_indices.assert_called_once_with( fake.EXPORT_POLICY_NAME, fake.IP_ADDRESS) self.assertFalse(mock_add_nfs_export_rule.called) mock_update_nfs_export_rule.assert_called_once_with( fake.EXPORT_POLICY_NAME, fake.IP_ADDRESS, False, '2', auth_methods) mock_remove_nfs_export_rules.assert_called_once_with( fake.EXPORT_POLICY_NAME, ['4', '6']) @ddt.data({'readonly': False, 'auth_method': 'sys'}, {'readonly': True, 'auth_method': 'sys'}) @ddt.unpack def test__add_nfs_export_rule(self, readonly, auth_method): self.mock_object(self.client, 'send_request') self.client._add_nfs_export_rule(fake.EXPORT_POLICY_NAME, fake.IP_ADDRESS, readonly, [auth_method]) export_rule_create_args = { 'policy-name': fake.EXPORT_POLICY_NAME, 'client-match': fake.IP_ADDRESS, 'ro-rule': [ {'security-flavor': auth_method}, ], 'rw-rule': [ {'security-flavor': auth_method}, ], 'super-user-security': [ {'security-flavor': auth_method}, ], } if readonly: export_rule_create_args['rw-rule'] = [ {'security-flavor': 'never'} ] self.client.send_request.assert_has_calls( [mock.call('export-rule-create', export_rule_create_args)]) @ddt.data({'readonly': False, 'auth_method': 'sys', 'index': '2'}, {'readonly': True, 'auth_method': 'krb5', 'index': '4'}) @ddt.unpack def test_update_nfs_export_rule(self, readonly, auth_method, index): self.mock_object(self.client, 'send_request') self.client._update_nfs_export_rule(fake.EXPORT_POLICY_NAME, fake.IP_ADDRESS, readonly, index, [auth_method]) export_rule_modify_args = { 'policy-name': fake.EXPORT_POLICY_NAME, 'rule-index': index, 'client-match': fake.IP_ADDRESS, 'ro-rule': [ {'security-flavor': auth_method}, ], 'rw-rule': [ {'security-flavor': auth_method}, ], 'super-user-security': [ {'security-flavor': auth_method}, ], } if readonly: export_rule_modify_args['rw-rule'] = [ {'security-flavor': 'never'} ] self.client.send_request.assert_has_calls( [mock.call('export-rule-modify', export_rule_modify_args)]) def test_get_nfs_export_rule_indices(self): api_response = netapp_api.NaElement(fake.EXPORT_RULE_GET_ITER_RESPONSE) self.mock_object(self.client, 'send_iter_request', mock.Mock(return_value=api_response)) result = self.client._get_nfs_export_rule_indices( fake.EXPORT_POLICY_NAME, fake.IP_ADDRESS) export_rule_get_iter_args = { 'query': { 'export-rule-info': { 'policy-name': fake.EXPORT_POLICY_NAME, 'client-match': fake.IP_ADDRESS, }, }, 'desired-attributes': { 'export-rule-info': { 'vserver-name': None, 'policy-name': None, 'client-match': None, 'rule-index': None, }, }, } self.assertListEqual(['1', '3'], result) self.client.send_iter_request.assert_has_calls([ mock.call('export-rule-get-iter', export_rule_get_iter_args)]) def test_remove_nfs_export_rule(self): fake_indices = ['1', '3', '4'] mock_get_nfs_export_rule_indices = self.mock_object( self.client, '_get_nfs_export_rule_indices', mock.Mock(return_value=fake_indices)) mock_remove_nfs_export_rules = self.mock_object( self.client, '_remove_nfs_export_rules') self.client.remove_nfs_export_rule(fake.EXPORT_POLICY_NAME, fake.IP_ADDRESS) mock_get_nfs_export_rule_indices.assert_called_once_with( fake.EXPORT_POLICY_NAME, fake.IP_ADDRESS) mock_remove_nfs_export_rules.assert_called_once_with( fake.EXPORT_POLICY_NAME, fake_indices) def test_remove_nfs_export_rules(self): fake_indices = ['1', '3'] self.mock_object(self.client, 'send_request') self.client._remove_nfs_export_rules(fake.EXPORT_POLICY_NAME, fake_indices) self.client.send_request.assert_has_calls([ mock.call( 'export-rule-destroy', {'policy-name': fake.EXPORT_POLICY_NAME, 'rule-index': '1'}), mock.call( 'export-rule-destroy', {'policy-name': fake.EXPORT_POLICY_NAME, 'rule-index': '3'})]) def test_remove_nfs_export_rules_not_found(self): self.mock_object(self.client, 'send_request', self._mock_api_error(code=netapp_api.EOBJECTNOTFOUND)) self.client._remove_nfs_export_rules(fake.EXPORT_POLICY_NAME, ['1']) self.client.send_request.assert_has_calls([ mock.call( 'export-rule-destroy', {'policy-name': fake.EXPORT_POLICY_NAME, 'rule-index': '1'})]) def test_remove_nfs_export_rules_api_error(self): self.mock_object(self.client, 'send_request', self._mock_api_error()) self.assertRaises(netapp_api.NaApiError, self.client._remove_nfs_export_rules, fake.EXPORT_POLICY_NAME, ['1']) def test_clear_nfs_export_policy_for_volume(self): mock_set_nfs_export_policy_for_volume = self.mock_object( self.client, 'set_nfs_export_policy_for_volume') self.client.clear_nfs_export_policy_for_volume(fake.SHARE_NAME) mock_set_nfs_export_policy_for_volume.assert_called_once_with( fake.SHARE_NAME, 'default') def test_set_nfs_export_policy_for_volume(self): self.mock_object(self.client, 'send_request') self.client.set_nfs_export_policy_for_volume(fake.SHARE_NAME, fake.EXPORT_POLICY_NAME) volume_modify_iter_args = { 'query': { 'volume-attributes': { 'volume-id-attributes': { 'name': fake.SHARE_NAME, }, }, }, 'attributes': { 'volume-attributes': { 'volume-export-attributes': { 'policy': fake.EXPORT_POLICY_NAME, }, }, }, } self.client.send_request.assert_has_calls([ mock.call('volume-modify-iter', volume_modify_iter_args)]) def test_set_qos_policy_group_for_volume(self): self.mock_object(self.client, 'send_request') self.client.set_qos_policy_group_for_volume(fake.SHARE_NAME, fake.QOS_POLICY_GROUP_NAME) volume_modify_iter_args = { 'query': { 'volume-attributes': { 'volume-id-attributes': { 'name': fake.SHARE_NAME, }, }, }, 'attributes': { 'volume-attributes': { 'volume-qos-attributes': { 'policy-group-name': fake.QOS_POLICY_GROUP_NAME, }, }, }, } self.client.send_request.assert_called_once_with( 'volume-modify-iter', volume_modify_iter_args) def test_get_nfs_export_policy_for_volume(self): api_response = netapp_api.NaElement( fake.VOLUME_GET_EXPORT_POLICY_RESPONSE) self.mock_object(self.client, 'send_iter_request', mock.Mock(return_value=api_response)) result = self.client.get_nfs_export_policy_for_volume(fake.SHARE_NAME) volume_get_iter_args = { 'query': { 'volume-attributes': { 'volume-id-attributes': { 'name': fake.SHARE_NAME, }, }, }, 'desired-attributes': { 'volume-attributes': { 'volume-export-attributes': { 'policy': None, }, }, }, } self.assertEqual(fake.EXPORT_POLICY_NAME, result) self.client.send_iter_request.assert_has_calls([ mock.call('volume-get-iter', volume_get_iter_args)]) def test_get_nfs_export_policy_for_volume_not_found(self): api_response = netapp_api.NaElement(fake.NO_RECORDS_RESPONSE) self.mock_object(self.client, 'send_iter_request', mock.Mock(return_value=api_response)) self.assertRaises(exception.NetAppException, self.client.get_nfs_export_policy_for_volume, fake.SHARE_NAME) def test_create_nfs_export_policy(self): self.mock_object(self.client, 'send_request') self.client.create_nfs_export_policy(fake.EXPORT_POLICY_NAME) export_policy_create_args = {'policy-name': fake.EXPORT_POLICY_NAME} self.client.send_request.assert_has_calls([ mock.call('export-policy-create', export_policy_create_args)]) def test_create_nfs_export_policy_already_present(self): self.mock_object(self.client, 'send_request', self._mock_api_error(code=netapp_api.EDUPLICATEENTRY)) self.client.create_nfs_export_policy(fake.EXPORT_POLICY_NAME) export_policy_create_args = {'policy-name': fake.EXPORT_POLICY_NAME} self.client.send_request.assert_has_calls([ mock.call('export-policy-create', export_policy_create_args)]) def test_create_nfs_export_policy_api_error(self): self.mock_object(self.client, 'send_request', self._mock_api_error()) self.assertRaises(netapp_api.NaApiError, self.client.create_nfs_export_policy, fake.EXPORT_POLICY_NAME) def test_soft_delete_nfs_export_policy(self): self.mock_object(self.client, 'delete_nfs_export_policy') self.mock_object(self.client, 'rename_nfs_export_policy') self.client.soft_delete_nfs_export_policy(fake.EXPORT_POLICY_NAME) self.client.delete_nfs_export_policy.assert_has_calls([ mock.call(fake.EXPORT_POLICY_NAME)]) self.assertFalse(self.client.rename_nfs_export_policy.called) def test_soft_delete_nfs_export_policy_api_error(self): self.mock_object(self.client, 'delete_nfs_export_policy', self._mock_api_error()) self.mock_object(self.client, 'rename_nfs_export_policy') self.client.soft_delete_nfs_export_policy(fake.EXPORT_POLICY_NAME) self.client.delete_nfs_export_policy.assert_has_calls([ mock.call(fake.EXPORT_POLICY_NAME)]) self.assertTrue(self.client.rename_nfs_export_policy.called) def test_delete_nfs_export_policy(self): self.mock_object(self.client, 'send_request') self.client.delete_nfs_export_policy(fake.EXPORT_POLICY_NAME) export_policy_destroy_args = {'policy-name': fake.EXPORT_POLICY_NAME} self.client.send_request.assert_has_calls([ mock.call('export-policy-destroy', export_policy_destroy_args)]) def test_delete_nfs_export_policy_not_found(self): self.mock_object(self.client, 'send_request', self._mock_api_error(code=netapp_api.EOBJECTNOTFOUND)) self.client.delete_nfs_export_policy(fake.EXPORT_POLICY_NAME) export_policy_destroy_args = {'policy-name': fake.EXPORT_POLICY_NAME} self.client.send_request.assert_has_calls([ mock.call('export-policy-destroy', export_policy_destroy_args)]) def test_delete_nfs_export_policy_api_error(self): self.mock_object(self.client, 'send_request', self._mock_api_error()) self.assertRaises(netapp_api.NaApiError, self.client.delete_nfs_export_policy, fake.EXPORT_POLICY_NAME) def test_rename_nfs_export_policy(self): self.mock_object(self.client, 'send_request') self.client.rename_nfs_export_policy(fake.EXPORT_POLICY_NAME, 'new_policy_name') export_policy_rename_args = { 'policy-name': fake.EXPORT_POLICY_NAME, 'new-policy-name': 'new_policy_name' } self.client.send_request.assert_has_calls([ mock.call('export-policy-rename', export_policy_rename_args)]) def test_prune_deleted_nfs_export_policies(self): # Mock client lest we not be able to see calls on its copy. self.mock_object(copy, 'deepcopy', mock.Mock(return_value=self.client)) self.mock_object(self.client, '_get_deleted_nfs_export_policies', mock.Mock(return_value=fake.DELETED_EXPORT_POLICIES)) self.mock_object(self.client, 'delete_nfs_export_policy') self.client.prune_deleted_nfs_export_policies() self.assertTrue(self.client.delete_nfs_export_policy.called) self.client.delete_nfs_export_policy.assert_has_calls( [mock.call(policy) for policy in fake.DELETED_EXPORT_POLICIES[fake.VSERVER_NAME]]) def test_prune_deleted_nfs_export_policies_api_error(self): self.mock_object(copy, 'deepcopy', mock.Mock(return_value=self.client)) self.mock_object(self.client, '_get_deleted_nfs_export_policies', mock.Mock(return_value=fake.DELETED_EXPORT_POLICIES)) self.mock_object(self.client, 'delete_nfs_export_policy', self._mock_api_error()) self.client.prune_deleted_nfs_export_policies() self.assertTrue(self.client.delete_nfs_export_policy.called) self.client.delete_nfs_export_policy.assert_has_calls( [mock.call(policy) for policy in fake.DELETED_EXPORT_POLICIES[fake.VSERVER_NAME]]) def test_get_deleted_nfs_export_policies(self): api_response = netapp_api.NaElement( fake.DELETED_EXPORT_POLICY_GET_ITER_RESPONSE) self.mock_object(self.client, 'send_iter_request', mock.Mock(return_value=api_response)) result = self.client._get_deleted_nfs_export_policies() export_policy_get_iter_args = { 'query': { 'export-policy-info': { 'policy-name': 'deleted_manila_*', }, }, 'desired-attributes': { 'export-policy-info': { 'policy-name': None, 'vserver': None, }, }, } self.assertSequenceEqual(fake.DELETED_EXPORT_POLICIES, result) self.client.send_iter_request.assert_has_calls([ mock.call('export-policy-get-iter', export_policy_get_iter_args)]) def test_get_ems_log_destination_vserver(self): self.mock_object(self.client, 'get_ontapi_version', mock.Mock(return_value=(1, 21))) mock_list_vservers = self.mock_object( self.client, 'list_vservers', mock.Mock(return_value=[fake.ADMIN_VSERVER_NAME])) result = self.client._get_ems_log_destination_vserver() mock_list_vservers.assert_called_once_with(vserver_type='admin') self.assertEqual(fake.ADMIN_VSERVER_NAME, result) def test_get_ems_log_destination_vserver_future(self): self.mock_object(self.client, 'get_ontapi_version', mock.Mock(return_value=(2, 0))) mock_list_vservers = self.mock_object( self.client, 'list_vservers', mock.Mock(return_value=[fake.ADMIN_VSERVER_NAME])) result = self.client._get_ems_log_destination_vserver() mock_list_vservers.assert_called_once_with(vserver_type='admin') self.assertEqual(fake.ADMIN_VSERVER_NAME, result) def test_get_ems_log_destination_vserver_legacy(self): self.mock_object(self.client, 'get_ontapi_version', mock.Mock(return_value=(1, 15))) mock_list_vservers = self.mock_object( self.client, 'list_vservers', mock.Mock(return_value=[fake.NODE_VSERVER_NAME])) result = self.client._get_ems_log_destination_vserver() mock_list_vservers.assert_called_once_with(vserver_type='node') self.assertEqual(fake.NODE_VSERVER_NAME, result) def test_get_ems_log_destination_no_cluster_creds(self): self.mock_object(self.client, 'get_ontapi_version', mock.Mock(return_value=(1, 21))) mock_list_vservers = self.mock_object( self.client, 'list_vservers', mock.Mock(side_effect=[[], [fake.VSERVER_NAME]])) result = self.client._get_ems_log_destination_vserver() mock_list_vservers.assert_has_calls([ mock.call(vserver_type='admin'), mock.call(vserver_type='data')]) self.assertEqual(fake.VSERVER_NAME, result) def test_get_ems_log_destination_vserver_not_found(self): self.mock_object(self.client, 'get_ontapi_version', mock.Mock(return_value=(1, 21))) mock_list_vservers = self.mock_object( self.client, 'list_vservers', mock.Mock(return_value=[])) self.assertRaises(exception.NotFound, self.client._get_ems_log_destination_vserver) mock_list_vservers.assert_has_calls([ mock.call(vserver_type='admin'), mock.call(vserver_type='data'), mock.call(vserver_type='node')]) def test_send_ems_log_message(self): # Mock client lest we not be able to see calls on its copy. self.mock_object( copy, 'copy', mock.Mock(side_effect=[self.client, self.client.connection])) self.mock_object(self.client, '_get_ems_log_destination_vserver', mock.Mock(return_value=fake.ADMIN_VSERVER_NAME)) self.mock_object(self.client, 'send_request') self.client.send_ems_log_message(fake.EMS_MESSAGE) self.client.send_request.assert_has_calls([ mock.call('ems-autosupport-log', fake.EMS_MESSAGE)]) self.assertEqual(1, client_cmode.LOG.debug.call_count) def test_send_ems_log_message_api_error(self): # Mock client lest we not be able to see calls on its copy. self.mock_object( copy, 'copy', mock.Mock(side_effect=[self.client, self.client.connection])) self.mock_object(self.client, '_get_ems_log_destination_vserver', mock.Mock(return_value=fake.ADMIN_VSERVER_NAME)) self.mock_object(self.client, 'send_request', self._mock_api_error()) self.client.send_ems_log_message(fake.EMS_MESSAGE) self.client.send_request.assert_has_calls([ mock.call('ems-autosupport-log', fake.EMS_MESSAGE)]) self.assertEqual(1, client_cmode.LOG.warning.call_count) def test_get_aggregate_none_specified(self): result = self.client.get_aggregate('') self.assertEqual({}, result) def test_get_aggregate(self): self.client.features.SNAPLOCK = True api_response = netapp_api.NaElement( fake.AGGR_GET_ITER_SSC_RESPONSE).get_child_by_name( 'attributes-list').get_children() self.mock_object(self.client, '_get_aggregates', mock.Mock(return_value=api_response)) result = self.client.get_aggregate(fake.SHARE_AGGREGATE_NAME) desired_attributes = { 'aggr-attributes': { 'aggregate-name': None, 'aggr-raid-attributes': { 'raid-type': None, 'is-hybrid': None, }, 'aggr-ownership-attributes': { 'home-id': None, 'owner-id': None, }, }, } if self.client.features.SNAPLOCK: desired_attributes['aggr-attributes']['aggr-snaplock-attributes']\ = {'is-snaplock': None, 'snaplock-type': None} self.client._get_aggregates.assert_has_calls([ mock.call( aggregate_names=[fake.SHARE_AGGREGATE_NAME], desired_attributes=desired_attributes)]) expected = { 'name': fake.SHARE_AGGREGATE_NAME, 'raid-type': 'raid_dp', 'is-hybrid': False, 'is-home': True, 'snaplock-type': 'compliance', 'is-snaplock': 'true' } self.assertEqual(expected, result) def test_get_aggregate_not_found(self): api_response = netapp_api.NaElement(fake.NO_RECORDS_RESPONSE) self.mock_object(self.client, 'send_request', mock.Mock(return_value=api_response)) result = self.client.get_aggregate(fake.SHARE_AGGREGATE_NAME) self.assertEqual({}, result) def test_get_aggregate_api_error(self): self.mock_object(self.client, 'send_request', mock.Mock(side_effect=self._mock_api_error())) result = self.client.get_aggregate(fake.SHARE_AGGREGATE_NAME) self.assertEqual({}, result) @ddt.data({'types': {'FCAL'}, 'expected': ['FCAL']}, {'types': {'SATA', 'SSD'}, 'expected': ['SATA', 'SSD']},) @ddt.unpack def test_get_aggregate_disk_types(self, types, expected): mock_get_aggregate_disk_types = self.mock_object( self.client, '_get_aggregate_disk_types', mock.Mock(return_value=types)) result = self.client.get_aggregate_disk_types( fake.SHARE_AGGREGATE_NAME) self.assertEqual(sorted(expected), sorted(result)) mock_get_aggregate_disk_types.assert_called_once_with( fake.SHARE_AGGREGATE_NAME) def test_get_aggregate_disk_types_not_found(self): mock_get_aggregate_disk_types = self.mock_object( self.client, '_get_aggregate_disk_types', mock.Mock(return_value=set())) result = self.client.get_aggregate_disk_types( fake.SHARE_AGGREGATE_NAME) self.assertIsNone(result) mock_get_aggregate_disk_types.assert_called_once_with( fake.SHARE_AGGREGATE_NAME) def test_get_aggregate_disk_types_shared(self): self.client.features.add_feature('ADVANCED_DISK_PARTITIONING') mock_get_aggregate_disk_types = self.mock_object( self.client, '_get_aggregate_disk_types', mock.Mock(side_effect=[set(['SSD']), set(['SATA'])])) result = self.client.get_aggregate_disk_types( fake.SHARE_AGGREGATE_NAME) self.assertIsInstance(result, list) self.assertEqual(sorted(['SATA', 'SSD']), sorted(result)) mock_get_aggregate_disk_types.assert_has_calls([ mock.call(fake.SHARE_AGGREGATE_NAME), mock.call(fake.SHARE_AGGREGATE_NAME, shared=True), ]) @ddt.data({ 'shared': False, 'query_disk_raid_info': { 'disk-aggregate-info': { 'aggregate-name': fake.SHARE_AGGREGATE_NAME, }, }, }, { 'shared': True, 'query_disk_raid_info': { 'disk-shared-info': { 'aggregate-list': { 'shared-aggregate-info': { 'aggregate-name': fake.SHARE_AGGREGATE_NAME, }, }, }, }, }) @ddt.unpack def test__get_aggregate_disk_types_ddt(self, shared, query_disk_raid_info): api_response = netapp_api.NaElement( fake.STORAGE_DISK_GET_ITER_RESPONSE) self.mock_object(self.client, 'send_iter_request', mock.Mock(return_value=api_response)) result = self.client._get_aggregate_disk_types( fake.SHARE_AGGREGATE_NAME, shared=shared) storage_disk_get_iter_args = { 'query': { 'storage-disk-info': { 'disk-raid-info': query_disk_raid_info, }, }, 'desired-attributes': { 'storage-disk-info': { 'disk-raid-info': { 'effective-disk-type': None, }, }, }, } self.client.send_iter_request.assert_called_once_with( 'storage-disk-get-iter', storage_disk_get_iter_args) expected = set(fake.SHARE_AGGREGATE_DISK_TYPES) self.assertEqual(expected, result) def test__get_aggregate_disk_types_not_found(self): api_response = netapp_api.NaElement(fake.NO_RECORDS_RESPONSE) self.mock_object(self.client, 'send_iter_request', mock.Mock(return_value=api_response)) result = self.client._get_aggregate_disk_types( fake.SHARE_AGGREGATE_NAME) self.assertEqual(set(), result) def test__get_aggregate_disk_types_api_error(self): self.mock_object(self.client, 'send_iter_request', mock.Mock(side_effect=self._mock_api_error())) result = self.client._get_aggregate_disk_types( fake.SHARE_AGGREGATE_NAME) self.assertEqual(set([]), result) def test_check_for_cluster_credentials(self): api_response = netapp_api.NaElement(fake.SYSTEM_NODE_GET_ITER_RESPONSE) self.mock_object(self.client, 'send_iter_request', mock.Mock(return_value=api_response)) result = self.client.check_for_cluster_credentials() self.assertTrue(result) def test_check_for_cluster_credentials_not_cluster(self): self.mock_object(self.client, 'send_iter_request', mock.Mock(side_effect=self._mock_api_error( netapp_api.EAPINOTFOUND))) result = self.client.check_for_cluster_credentials() self.assertFalse(result) def test_check_for_cluster_credentials_api_error(self): self.mock_object(self.client, 'send_iter_request', self._mock_api_error()) self.assertRaises(netapp_api.NaApiError, self.client.check_for_cluster_credentials) def test_create_cluster_peer(self): self.mock_object(self.client, 'send_request') self.client.create_cluster_peer(['fake_address_1', 'fake_address_2'], 'fake_user', 'fake_password', 'fake_passphrase') cluster_peer_create_args = { 'peer-addresses': [ {'remote-inet-address': 'fake_address_1'}, {'remote-inet-address': 'fake_address_2'}, ], 'user-name': 'fake_user', 'password': 'fake_password', 'passphrase': 'fake_passphrase', } self.client.send_request.assert_has_calls([ mock.call('cluster-peer-create', cluster_peer_create_args, enable_tunneling=False)]) def test_get_cluster_peers(self): api_response = netapp_api.NaElement( fake.CLUSTER_PEER_GET_ITER_RESPONSE) self.mock_object(self.client, 'send_iter_request', mock.Mock(return_value=api_response)) result = self.client.get_cluster_peers() cluster_peer_get_iter_args = {} self.client.send_iter_request.assert_has_calls([ mock.call('cluster-peer-get-iter', cluster_peer_get_iter_args)]) expected = [{ 'active-addresses': [ fake.CLUSTER_ADDRESS_1, fake.CLUSTER_ADDRESS_2 ], 'availability': 'available', 'cluster-name': fake.CLUSTER_NAME, 'cluster-uuid': 'fake_uuid', 'peer-addresses': [fake.CLUSTER_ADDRESS_1], 'remote-cluster-name': fake.REMOTE_CLUSTER_NAME, 'serial-number': 'fake_serial_number', 'timeout': '60', }] self.assertEqual(expected, result) def test_get_cluster_peers_single(self): api_response = netapp_api.NaElement( fake.CLUSTER_PEER_GET_ITER_RESPONSE) self.mock_object(self.client, 'send_iter_request', mock.Mock(return_value=api_response)) self.client.get_cluster_peers(remote_cluster_name=fake.CLUSTER_NAME) cluster_peer_get_iter_args = { 'query': { 'cluster-peer-info': { 'remote-cluster-name': fake.CLUSTER_NAME, } }, } self.client.send_iter_request.assert_has_calls([ mock.call('cluster-peer-get-iter', cluster_peer_get_iter_args)]) def test_get_cluster_peers_not_found(self): api_response = netapp_api.NaElement(fake.NO_RECORDS_RESPONSE) self.mock_object(self.client, 'send_iter_request', mock.Mock(return_value=api_response)) result = self.client.get_cluster_peers( remote_cluster_name=fake.CLUSTER_NAME) self.assertEqual([], result) self.assertTrue(self.client.send_iter_request.called) def test_delete_cluster_peer(self): self.mock_object(self.client, 'send_request') self.client.delete_cluster_peer(fake.CLUSTER_NAME) cluster_peer_delete_args = {'cluster-name': fake.CLUSTER_NAME} self.client.send_request.assert_has_calls([ mock.call('cluster-peer-delete', cluster_peer_delete_args, enable_tunneling=False)]) def test_get_cluster_peer_policy(self): self.client.features.add_feature('CLUSTER_PEER_POLICY') api_response = netapp_api.NaElement( fake.CLUSTER_PEER_POLICY_GET_RESPONSE) self.mock_object(self.client, 'send_request', mock.Mock(return_value=api_response)) result = self.client.get_cluster_peer_policy() expected = { 'is-unauthenticated-access-permitted': False, 'passphrase-minimum-length': 8 } self.assertEqual(expected, result) self.assertTrue(self.client.send_request.called) def test_get_cluster_peer_policy_not_supported(self): result = self.client.get_cluster_peer_policy() self.assertEqual({}, result) def test_set_cluster_peer_policy_not_supported(self): self.mock_object(self.client, 'send_request') self.client.set_cluster_peer_policy() self.assertFalse(self.client.send_request.called) def test_set_cluster_peer_policy_no_arguments(self): self.client.features.add_feature('CLUSTER_PEER_POLICY') self.mock_object(self.client, 'send_request') self.client.set_cluster_peer_policy() self.assertFalse(self.client.send_request.called) def test_set_cluster_peer_policy(self): self.client.features.add_feature('CLUSTER_PEER_POLICY') self.mock_object(self.client, 'send_request') self.client.set_cluster_peer_policy( is_unauthenticated_access_permitted=True, passphrase_minimum_length=12) cluster_peer_policy_modify_args = { 'is-unauthenticated-access-permitted': 'true', 'passphrase-minlength': '12', } self.client.send_request.assert_has_calls([ mock.call('cluster-peer-policy-modify', cluster_peer_policy_modify_args)]) @ddt.data(None, 'cluster_name') def test_create_vserver_peer(self, cluster_name): self.mock_object(self.client, 'send_request') self.client.create_vserver_peer(fake.VSERVER_NAME, fake.VSERVER_PEER_NAME, peer_cluster_name=cluster_name) vserver_peer_create_args = { 'vserver': fake.VSERVER_NAME, 'peer-vserver': fake.VSERVER_PEER_NAME, 'applications': [ {'vserver-peer-application': 'snapmirror'}, ], } if cluster_name: vserver_peer_create_args['peer-cluster'] = cluster_name self.client.send_request.assert_has_calls([ mock.call('vserver-peer-create', vserver_peer_create_args, enable_tunneling=False)]) def test_delete_vserver_peer(self): self.mock_object(self.client, 'send_request') self.client.delete_vserver_peer('fake_vserver', 'fake_vserver_peer') vserver_peer_delete_args = { 'vserver': 'fake_vserver', 'peer-vserver': 'fake_vserver_peer', } self.client.send_request.assert_has_calls([ mock.call('vserver-peer-delete', vserver_peer_delete_args, enable_tunneling=False)]) def test_accept_vserver_peer(self): self.mock_object(self.client, 'send_request') self.client.accept_vserver_peer('fake_vserver', 'fake_vserver_peer') vserver_peer_accept_args = { 'vserver': 'fake_vserver', 'peer-vserver': 'fake_vserver_peer', } self.client.send_request.assert_has_calls([ mock.call('vserver-peer-accept', vserver_peer_accept_args, enable_tunneling=False)]) def test_get_vserver_peers(self): api_response = netapp_api.NaElement( fake.VSERVER_PEER_GET_ITER_RESPONSE) self.mock_object(self.client, 'send_iter_request', mock.Mock(return_value=api_response)) result = self.client.get_vserver_peers( vserver_name=fake.VSERVER_NAME, peer_vserver_name=fake.VSERVER_NAME_2) vserver_peer_get_iter_args = { 'query': { 'vserver-peer-info': { 'vserver': fake.VSERVER_NAME, 'peer-vserver': fake.VSERVER_NAME_2, } }, } self.client.send_iter_request.assert_has_calls([ mock.call('vserver-peer-get-iter', vserver_peer_get_iter_args)]) expected = [{ 'vserver': 'fake_vserver', 'peer-vserver': 'fake_vserver_2', 'peer-state': 'peered', 'peer-cluster': 'fake_cluster' }] self.assertEqual(expected, result) def test_get_vserver_peers_not_found(self): api_response = netapp_api.NaElement(fake.NO_RECORDS_RESPONSE) self.mock_object(self.client, 'send_iter_request', mock.Mock(return_value=api_response)) result = self.client.get_vserver_peers( vserver_name=fake.VSERVER_NAME, peer_vserver_name=fake.VSERVER_NAME_2) self.assertEqual([], result) self.assertTrue(self.client.send_iter_request.called) def test_ensure_snapmirror_v2(self): self.assertIsNone(self.client._ensure_snapmirror_v2()) def test_ensure_snapmirror_v2_not_supported(self): self.client.features.add_feature('SNAPMIRROR_V2', supported=False) self.assertRaises(exception.NetAppException, self.client._ensure_snapmirror_v2) @ddt.data({'schedule': 'fake_schedule', 'policy': 'fake_policy'}, {'schedule': None, 'policy': None}) @ddt.unpack def test_create_snapmirror(self, schedule, policy): self.mock_object(self.client, 'send_request') self.client.create_snapmirror_vol( fake.SM_SOURCE_VSERVER, fake.SM_SOURCE_VOLUME, fake.SM_DEST_VSERVER, fake.SM_DEST_VOLUME, na_utils.DATA_PROTECTION_TYPE, schedule=schedule, policy=policy) snapmirror_create_args = { 'source-vserver': fake.SM_SOURCE_VSERVER, 'source-volume': fake.SM_SOURCE_VOLUME, 'destination-vserver': fake.SM_DEST_VSERVER, 'destination-volume': fake.SM_DEST_VOLUME, 'relationship-type': na_utils.DATA_PROTECTION_TYPE, } if schedule: snapmirror_create_args['schedule'] = schedule if policy: snapmirror_create_args['policy'] = policy self.client.send_request.assert_has_calls([ mock.call('snapmirror-create', snapmirror_create_args)]) def test_create_snapmirror_already_exists(self): mock_send_req = mock.Mock(side_effect=netapp_api.NaApiError( code=netapp_api.ERELATION_EXISTS)) self.mock_object(self.client, 'send_request', mock_send_req) self.client.create_snapmirror_vol( fake.SM_SOURCE_VSERVER, fake.SM_SOURCE_VOLUME, fake.SM_DEST_VSERVER, fake.SM_DEST_VOLUME, na_utils.DATA_PROTECTION_TYPE) snapmirror_create_args = { 'source-vserver': fake.SM_SOURCE_VSERVER, 'source-volume': fake.SM_SOURCE_VOLUME, 'destination-vserver': fake.SM_DEST_VSERVER, 'destination-volume': fake.SM_DEST_VOLUME, 'relationship-type': na_utils.DATA_PROTECTION_TYPE, 'policy': na_utils.MIRROR_ALL_SNAP_POLICY, } self.client.send_request.assert_has_calls([ mock.call('snapmirror-create', snapmirror_create_args)]) def test_create_snapmirror_error(self): mock_send_req = mock.Mock(side_effect=netapp_api.NaApiError( code=0)) self.mock_object(self.client, 'send_request', mock_send_req) self.assertRaises(netapp_api.NaApiError, self.client.create_snapmirror_vol, fake.SM_SOURCE_VSERVER, fake.SM_SOURCE_VOLUME, fake.SM_DEST_VSERVER, fake.SM_DEST_VOLUME, na_utils.DATA_PROTECTION_TYPE) self.assertTrue(self.client.send_request.called) def test_create_snapmirror_svm(self): self.mock_object(self.client, 'send_request') self.client.create_snapmirror_svm(fake.SM_SOURCE_VSERVER, fake.SM_DEST_VSERVER, max_transfer_rate='fake_xfer_rate') snapmirror_create_args = { 'source-vserver': fake.SM_SOURCE_VSERVER, 'destination-vserver': fake.SM_DEST_VSERVER, 'relationship-type': na_utils.DATA_PROTECTION_TYPE, 'identity-preserve': 'true', 'max-transfer-rate': 'fake_xfer_rate' } self.client.send_request.assert_has_calls([ mock.call('snapmirror-create', snapmirror_create_args)]) @ddt.data( { 'source_snapshot': 'fake_snapshot', 'transfer_priority': 'fake_priority' }, { 'source_snapshot': None, 'transfer_priority': None } ) @ddt.unpack def test_initialize_snapmirror(self, source_snapshot, transfer_priority): api_response = netapp_api.NaElement(fake.SNAPMIRROR_INITIALIZE_RESULT) self.mock_object(self.client, 'send_request', mock.Mock(return_value=api_response)) result = self.client.initialize_snapmirror_vol( fake.SM_SOURCE_VSERVER, fake.SM_SOURCE_VOLUME, fake.SM_DEST_VSERVER, fake.SM_DEST_VOLUME, source_snapshot=source_snapshot, transfer_priority=transfer_priority) snapmirror_initialize_args = { 'source-vserver': fake.SM_SOURCE_VSERVER, 'source-volume': fake.SM_SOURCE_VOLUME, 'destination-vserver': fake.SM_DEST_VSERVER, 'destination-volume': fake.SM_DEST_VOLUME, } if source_snapshot: snapmirror_initialize_args['source-snapshot'] = source_snapshot if transfer_priority: snapmirror_initialize_args['transfer-priority'] = transfer_priority self.client.send_request.assert_has_calls([ mock.call('snapmirror-initialize', snapmirror_initialize_args)]) expected = { 'operation-id': None, 'status': 'succeeded', 'jobid': None, 'error-code': None, 'error-message': None } self.assertEqual(expected, result) def test_initialize_snapmirror_svm(self): api_response = netapp_api.NaElement(fake.SNAPMIRROR_INITIALIZE_RESULT) self.mock_object(self.client, 'send_request', mock.Mock(return_value=api_response)) result = self.client.initialize_snapmirror_svm(fake.SM_SOURCE_VSERVER, fake.SM_DEST_VSERVER) snapmirror_initialize_args = { 'source-location': fake.SM_SOURCE_VSERVER + ':', 'destination-location': fake.SM_DEST_VSERVER + ':', } self.client.send_request.assert_has_calls([ mock.call('snapmirror-initialize', snapmirror_initialize_args)]) expected = { 'operation-id': None, 'status': 'succeeded', 'jobid': None, 'error-code': None, 'error-message': None } self.assertEqual(expected, result) @ddt.data({'snapmirror_destinations_list': [], 'relationship_info_only': True}, {'snapmirror_destinations_list': [], 'relationship_info_only': False}, {'snapmirror_destinations_list': [{'relationship-id': 'fake_relationship_id'}], 'relationship_info_only': True}, {'snapmirror_destinations_list': [{'relationship-id': 'fake_relationship_id'}], 'relationship_info_only': False}) @ddt.unpack def test_release_snapmirror_vol(self, relationship_info_only, snapmirror_destinations_list): self.mock_object(self.client, 'send_request') self.mock_object(self.client, 'get_snapmirror_destinations', mock.Mock(return_value=snapmirror_destinations_list)) self.mock_object(self.client, '_ensure_snapmirror_v2') self.client.release_snapmirror_vol( fake.SM_SOURCE_VSERVER, fake.SM_SOURCE_VOLUME, fake.SM_DEST_VSERVER, fake.SM_DEST_VOLUME, relationship_info_only=relationship_info_only) snapmirror_release_args = { 'source-vserver': fake.SM_SOURCE_VSERVER, 'source-volume': fake.SM_SOURCE_VOLUME, 'destination-vserver': fake.SM_DEST_VSERVER, 'destination-volume': fake.SM_DEST_VOLUME, 'relationship-info-only': ('true' if relationship_info_only else 'false'), } if len(snapmirror_destinations_list) == 1: snapmirror_release_args['relationship-id'] = 'fake_relationship_id' self.client.send_request.assert_called_once_with( 'snapmirror-release', snapmirror_release_args, enable_tunneling=True) def test_release_snapmirror_vol_error_not_unique_relationship(self): self.mock_object(self.client, 'send_request') self.mock_object(self.client, 'get_snapmirror_destinations', mock.Mock(return_value=[{'relationship-id': 'fake'}, {'relationship-id': 'fake'}])) self.assertRaises(exception.NetAppException, self.client.release_snapmirror_vol, fake.SM_SOURCE_VSERVER, fake.SM_SOURCE_VOLUME, fake.SM_DEST_VSERVER, fake.SM_DEST_VOLUME) def test_release_snapmirror_svm(self): self.mock_object(self.client, 'send_request') self.mock_object(self.client, '_ensure_snapmirror_v2') self.client.release_snapmirror_svm( fake.SM_SOURCE_VSERVER, fake.SM_DEST_VSERVER) snapmirror_release_args = { 'query': { 'snapmirror-destination-info': { 'source-location': fake.SM_SOURCE_VSERVER + ':', 'destination-location': fake.SM_DEST_VSERVER + ':', }, }, 'relationship-info-only': 'false', } self.client.send_request.assert_has_calls([ mock.call('snapmirror-release-iter', snapmirror_release_args, enable_tunneling=False)]) def test_quiesce_snapmirror(self): self.mock_object(self.client, 'send_request') self.client.quiesce_snapmirror_vol( fake.SM_SOURCE_VSERVER, fake.SM_SOURCE_VOLUME, fake.SM_DEST_VSERVER, fake.SM_DEST_VOLUME) snapmirror_quiesce_args = { 'source-vserver': fake.SM_SOURCE_VSERVER, 'source-volume': fake.SM_SOURCE_VOLUME, 'destination-vserver': fake.SM_DEST_VSERVER, 'destination-volume': fake.SM_DEST_VOLUME, } self.client.send_request.assert_has_calls([ mock.call('snapmirror-quiesce', snapmirror_quiesce_args)]) def test_quiesce_snapmirror_svm(self): self.mock_object(self.client, 'send_request') self.client.quiesce_snapmirror_svm( fake.SM_SOURCE_VSERVER, fake.SM_DEST_VSERVER) snapmirror_quiesce_args = { 'source-location': fake.SM_SOURCE_VSERVER + ':', 'destination-location': fake.SM_DEST_VSERVER + ':', } self.client.send_request.assert_has_calls([ mock.call('snapmirror-quiesce', snapmirror_quiesce_args)]) @ddt.data(True, False) def test_abort_snapmirror(self, clear_checkpoint): self.mock_object(self.client, 'send_request') self.client.abort_snapmirror_vol( fake.SM_SOURCE_VSERVER, fake.SM_SOURCE_VOLUME, fake.SM_DEST_VSERVER, fake.SM_DEST_VOLUME, clear_checkpoint=clear_checkpoint) snapmirror_abort_args = { 'source-vserver': fake.SM_SOURCE_VSERVER, 'source-volume': fake.SM_SOURCE_VOLUME, 'destination-vserver': fake.SM_DEST_VSERVER, 'destination-volume': fake.SM_DEST_VOLUME, 'clear-checkpoint': 'true' if clear_checkpoint else 'false', } self.client.send_request.assert_has_calls([ mock.call('snapmirror-abort', snapmirror_abort_args)]) def test_abort_snapmirror_svm(self): self.mock_object(self.client, 'send_request') self.client.abort_snapmirror_svm( fake.SM_SOURCE_VSERVER, fake.SM_DEST_VSERVER) snapmirror_abort_args = { 'source-location': fake.SM_SOURCE_VSERVER + ':', 'destination-location': fake.SM_DEST_VSERVER + ':', 'clear-checkpoint': 'false' } self.client.send_request.assert_has_calls([ mock.call('snapmirror-abort', snapmirror_abort_args)]) def test_abort_snapmirror_no_transfer_in_progress(self): mock_send_req = mock.Mock(side_effect=netapp_api.NaApiError( code=netapp_api.ENOTRANSFER_IN_PROGRESS)) self.mock_object(self.client, 'send_request', mock_send_req) self.client.abort_snapmirror_vol( fake.SM_SOURCE_VSERVER, fake.SM_SOURCE_VOLUME, fake.SM_DEST_VSERVER, fake.SM_DEST_VOLUME) snapmirror_abort_args = { 'source-vserver': fake.SM_SOURCE_VSERVER, 'source-volume': fake.SM_SOURCE_VOLUME, 'destination-vserver': fake.SM_DEST_VSERVER, 'destination-volume': fake.SM_DEST_VOLUME, 'clear-checkpoint': 'false', } self.client.send_request.assert_has_calls([ mock.call('snapmirror-abort', snapmirror_abort_args)]) def test_abort_snapmirror_error(self): mock_send_req = mock.Mock(side_effect=netapp_api.NaApiError(code=0)) self.mock_object(self.client, 'send_request', mock_send_req) self.assertRaises(netapp_api.NaApiError, self.client.abort_snapmirror_vol, fake.SM_SOURCE_VSERVER, fake.SM_SOURCE_VOLUME, fake.SM_DEST_VSERVER, fake.SM_DEST_VOLUME) def test_break_snapmirror(self): self.mock_object(self.client, 'send_request') self.client.break_snapmirror_vol( fake.SM_SOURCE_VSERVER, fake.SM_SOURCE_VOLUME, fake.SM_DEST_VSERVER, fake.SM_DEST_VOLUME) snapmirror_break_args = { 'source-vserver': fake.SM_SOURCE_VSERVER, 'source-volume': fake.SM_SOURCE_VOLUME, 'destination-vserver': fake.SM_DEST_VSERVER, 'destination-volume': fake.SM_DEST_VOLUME, } self.client.send_request.assert_has_calls([ mock.call('snapmirror-break', snapmirror_break_args)]) def test_break_snapmirror_svm(self): self.mock_object(self.client, 'send_request') self.client.break_snapmirror_svm( fake.SM_SOURCE_VSERVER, fake.SM_DEST_VSERVER) snapmirror_break_args = { 'source-location': fake.SM_SOURCE_VSERVER + ':', 'destination-location': fake.SM_DEST_VSERVER + ':', } self.client.send_request.assert_has_calls([ mock.call('snapmirror-break', snapmirror_break_args)]) @ddt.data( { 'schedule': 'fake_schedule', 'policy': 'fake_policy', 'tries': 5, 'max_transfer_rate': 1024, }, { 'schedule': None, 'policy': None, 'tries': None, 'max_transfer_rate': None, } ) @ddt.unpack def test_modify_snapmirror(self, schedule, policy, tries, max_transfer_rate): self.mock_object(self.client, 'send_request') self.client.modify_snapmirror_vol( fake.SM_SOURCE_VSERVER, fake.SM_SOURCE_VOLUME, fake.SM_DEST_VSERVER, fake.SM_DEST_VOLUME, schedule=schedule, policy=policy, tries=tries, max_transfer_rate=max_transfer_rate) snapmirror_modify_args = { 'source-vserver': fake.SM_SOURCE_VSERVER, 'source-volume': fake.SM_SOURCE_VOLUME, 'destination-vserver': fake.SM_DEST_VSERVER, 'destination-volume': fake.SM_DEST_VOLUME, } if schedule: snapmirror_modify_args['schedule'] = schedule if policy: snapmirror_modify_args['policy'] = policy if tries: snapmirror_modify_args['tries'] = tries if max_transfer_rate: snapmirror_modify_args['max-transfer-rate'] = max_transfer_rate self.client.send_request.assert_has_calls([ mock.call('snapmirror-modify', snapmirror_modify_args)]) def test_update_snapmirror(self): self.mock_object(self.client, 'send_request') self.client.update_snapmirror_vol( fake.SM_SOURCE_VSERVER, fake.SM_SOURCE_VOLUME, fake.SM_DEST_VSERVER, fake.SM_DEST_VOLUME) snapmirror_update_args = { 'source-vserver': fake.SM_SOURCE_VSERVER, 'source-volume': fake.SM_SOURCE_VOLUME, 'destination-vserver': fake.SM_DEST_VSERVER, 'destination-volume': fake.SM_DEST_VOLUME, } self.client.send_request.assert_has_calls([ mock.call('snapmirror-update', snapmirror_update_args)]) def test_update_snapmirror_svm(self): self.mock_object(self.client, 'send_request') self.client.update_snapmirror_svm( fake.SM_SOURCE_VSERVER, fake.SM_DEST_VSERVER) snapmirror_update_args = { 'source-location': fake.SM_SOURCE_VSERVER + ':', 'destination-location': fake.SM_DEST_VSERVER + ':', } self.client.send_request.assert_has_calls([ mock.call('snapmirror-update', snapmirror_update_args)]) def test_update_snapmirror_already_transferring(self): mock_send_req = mock.Mock(side_effect=netapp_api.NaApiError( code=netapp_api.ETRANSFER_IN_PROGRESS)) self.mock_object(self.client, 'send_request', mock_send_req) self.client.update_snapmirror_vol( fake.SM_SOURCE_VSERVER, fake.SM_SOURCE_VOLUME, fake.SM_DEST_VSERVER, fake.SM_DEST_VOLUME) snapmirror_update_args = { 'source-vserver': fake.SM_SOURCE_VSERVER, 'source-volume': fake.SM_SOURCE_VOLUME, 'destination-vserver': fake.SM_DEST_VSERVER, 'destination-volume': fake.SM_DEST_VOLUME, } self.client.send_request.assert_has_calls([ mock.call('snapmirror-update', snapmirror_update_args)]) def test_update_snapmirror_already_transferring_two(self): mock_send_req = mock.Mock(side_effect=netapp_api.NaApiError( code=netapp_api.EANOTHER_OP_ACTIVE)) self.mock_object(self.client, 'send_request', mock_send_req) self.client.update_snapmirror_vol( fake.SM_SOURCE_VSERVER, fake.SM_SOURCE_VOLUME, fake.SM_DEST_VSERVER, fake.SM_DEST_VOLUME) snapmirror_update_args = { 'source-vserver': fake.SM_SOURCE_VSERVER, 'source-volume': fake.SM_SOURCE_VOLUME, 'destination-vserver': fake.SM_DEST_VSERVER, 'destination-volume': fake.SM_DEST_VOLUME, } self.client.send_request.assert_has_calls([ mock.call('snapmirror-update', snapmirror_update_args)]) def test_update_snapmirror_error(self): mock_send_req = mock.Mock(side_effect=netapp_api.NaApiError(code=0)) self.mock_object(self.client, 'send_request', mock_send_req) self.assertRaises(netapp_api.NaApiError, self.client.update_snapmirror_vol, fake.SM_SOURCE_VSERVER, fake.SM_SOURCE_VOLUME, fake.SM_DEST_VSERVER, fake.SM_DEST_VOLUME) def test_delete_snapmirror(self): self.mock_object(self.client, 'send_request') self.client.delete_snapmirror_vol( fake.SM_SOURCE_VSERVER, fake.SM_SOURCE_VOLUME, fake.SM_DEST_VSERVER, fake.SM_DEST_VOLUME) snapmirror_delete_args = { 'query': { 'snapmirror-info': { 'source-vserver': fake.SM_SOURCE_VSERVER, 'source-volume': fake.SM_SOURCE_VOLUME, 'destination-vserver': fake.SM_DEST_VSERVER, 'destination-volume': fake.SM_DEST_VOLUME, } } } self.client.send_request.assert_has_calls([ mock.call('snapmirror-destroy-iter', snapmirror_delete_args)]) def test_delete_snapmirror_svm(self): self.mock_object(self.client, 'send_request') self.client.delete_snapmirror_svm( fake.SM_SOURCE_VSERVER, fake.SM_DEST_VSERVER) snapmirror_delete_args = { 'query': { 'snapmirror-info': { 'source-location': fake.SM_SOURCE_VSERVER + ':', 'destination-location': fake.SM_DEST_VSERVER + ':', } } } self.client.send_request.assert_has_calls([ mock.call('snapmirror-destroy-iter', snapmirror_delete_args)]) def test__get_snapmirrors(self): api_response = netapp_api.NaElement(fake.SNAPMIRROR_GET_ITER_RESPONSE) self.mock_object(self.client, 'send_iter_request', mock.Mock(return_value=api_response)) desired_attributes = { 'snapmirror-info': { 'source-vserver': None, 'source-volume': None, 'destination-vserver': None, 'destination-volume': None, 'is-healthy': None, } } result = self.client._get_snapmirrors( source_vserver=fake.SM_SOURCE_VSERVER, source_volume=fake.SM_SOURCE_VOLUME, dest_vserver=fake.SM_DEST_VSERVER, dest_volume=fake.SM_DEST_VOLUME, desired_attributes=desired_attributes) snapmirror_get_iter_args = { 'query': { 'snapmirror-info': { 'source-vserver': fake.SM_SOURCE_VSERVER, 'source-volume': fake.SM_SOURCE_VOLUME, 'destination-vserver': fake.SM_DEST_VSERVER, 'destination-volume': fake.SM_DEST_VOLUME, }, }, 'desired-attributes': { 'snapmirror-info': { 'source-vserver': None, 'source-volume': None, 'destination-vserver': None, 'destination-volume': None, 'is-healthy': None, }, }, } self.client.send_iter_request.assert_has_calls([ mock.call('snapmirror-get-iter', snapmirror_get_iter_args)]) self.assertEqual(1, len(result)) def test__get_snapmirrors_not_found(self): api_response = netapp_api.NaElement(fake.NO_RECORDS_RESPONSE) self.mock_object(self.client, 'send_iter_request', mock.Mock(return_value=api_response)) result = self.client._get_snapmirrors() self.client.send_iter_request.assert_has_calls([ mock.call('snapmirror-get-iter', {})]) self.assertEqual([], result) def test_get_snapmirrors(self): api_response = netapp_api.NaElement( fake.SNAPMIRROR_GET_ITER_FILTERED_RESPONSE) self.mock_object(self.client, 'send_iter_request', mock.Mock(return_value=api_response)) desired_attributes = ['source-vserver', 'source-volume', 'destination-vserver', 'destination-volume', 'is-healthy', 'mirror-state', 'schedule', 'relationship-status'] result = self.client.get_snapmirrors( source_vserver=fake.SM_SOURCE_VSERVER, dest_vserver=fake.SM_DEST_VSERVER, source_volume=fake.SM_SOURCE_VOLUME, dest_volume=fake.SM_DEST_VOLUME, desired_attributes=desired_attributes) snapmirror_get_iter_args = { 'query': { 'snapmirror-info': { 'source-vserver': fake.SM_SOURCE_VSERVER, 'source-volume': fake.SM_SOURCE_VOLUME, 'destination-vserver': fake.SM_DEST_VSERVER, 'destination-volume': fake.SM_DEST_VOLUME, }, }, 'desired-attributes': { 'snapmirror-info': { 'source-vserver': None, 'source-volume': None, 'destination-vserver': None, 'destination-volume': None, 'is-healthy': None, 'mirror-state': None, 'schedule': None, 'relationship-status': None, }, }, } expected = [{ 'source-vserver': fake.SM_SOURCE_VSERVER, 'source-volume': fake.SM_SOURCE_VOLUME, 'destination-vserver': fake.SM_DEST_VSERVER, 'destination-volume': fake.SM_DEST_VOLUME, 'is-healthy': 'true', 'mirror-state': 'snapmirrored', 'schedule': 'daily', 'relationship-status': 'idle' }] self.client.send_iter_request.assert_has_calls([ mock.call('snapmirror-get-iter', snapmirror_get_iter_args)]) self.assertEqual(expected, result) def test_get_snapmirrors_svm(self): api_response = netapp_api.NaElement( fake.SNAPMIRROR_GET_ITER_FILTERED_RESPONSE_2) self.mock_object(self.client, 'send_iter_request', mock.Mock(return_value=api_response)) desired_attributes = ['source-vserver', 'destination-vserver', 'relationship-status', 'mirror-state'] result = self.client.get_snapmirrors_svm( source_vserver=fake.SM_SOURCE_VSERVER, dest_vserver=fake.SM_DEST_VSERVER, desired_attributes=desired_attributes) snapmirror_get_iter_args = { 'query': { 'snapmirror-info': { 'source-location': fake.SM_SOURCE_VSERVER + ':', 'destination-location': fake.SM_DEST_VSERVER + ':', }, }, 'desired-attributes': { 'snapmirror-info': { 'source-vserver': None, 'destination-vserver': None, 'relationship-status': None, 'mirror-state': None, }, }, } expected = [{ 'source-vserver': fake.SM_SOURCE_VSERVER, 'destination-vserver': fake.SM_DEST_VSERVER, 'relationship-status': 'idle', 'mirror-state': 'snapmirrored', }] self.client.send_iter_request.assert_has_calls([ mock.call('snapmirror-get-iter', snapmirror_get_iter_args)]) self.assertEqual(expected, result) @ddt.data(fake.SNAPMIRROR_GET_DESTINATIONS_ITER_FILTERED_RESPONSE, fake.NO_RECORDS_RESPONSE) def test_get_snapmirror_destinations_svm(self, api_response): self.mock_object( self.client, 'send_iter_request', mock.Mock(return_value=netapp_api.NaElement(api_response))) result = self.client.get_snapmirror_destinations_svm( source_vserver=fake.SM_SOURCE_VSERVER, dest_vserver=fake.SM_DEST_VSERVER) snapmirror_get_iter_args = { 'query': { 'snapmirror-destination-info': { 'source-location': fake.SM_SOURCE_VSERVER + ':', 'destination-location': fake.SM_DEST_VSERVER + ':', }, }, } if api_response == fake.NO_RECORDS_RESPONSE: expected = [] else: expected = [{ 'source-vserver': fake.SM_SOURCE_VSERVER, 'destination-vserver': fake.SM_DEST_VSERVER, 'source-location': fake.SM_SOURCE_VSERVER + ':', 'destination-location': fake.SM_DEST_VSERVER + ':', 'relationship-id': 'fake_relationship_id', }] self.client.send_iter_request.assert_has_calls([ mock.call('snapmirror-get-destination-iter', snapmirror_get_iter_args)]) self.assertEqual(expected, result) def test_resume_snapmirror(self): self.mock_object(self.client, 'send_request') self.client.resume_snapmirror_vol( fake.SM_SOURCE_VSERVER, fake.SM_SOURCE_VOLUME, fake.SM_DEST_VSERVER, fake.SM_DEST_VOLUME) snapmirror_resume_args = { 'source-vserver': fake.SM_SOURCE_VSERVER, 'source-volume': fake.SM_SOURCE_VOLUME, 'destination-vserver': fake.SM_DEST_VSERVER, 'destination-volume': fake.SM_DEST_VOLUME, } self.client.send_request.assert_has_calls([ mock.call('snapmirror-resume', snapmirror_resume_args)]) def test_resume_snapmirror_svm(self): self.mock_object(self.client, 'send_request') self.client.resume_snapmirror_svm( fake.SM_SOURCE_VSERVER, fake.SM_DEST_VSERVER) snapmirror_resume_args = { 'source-location': fake.SM_SOURCE_VSERVER + ':', 'destination-location': fake.SM_DEST_VSERVER + ':', } self.client.send_request.assert_has_calls([ mock.call('snapmirror-resume', snapmirror_resume_args)]) def test_resume_snapmirror_not_quiesed(self): mock_send_req = mock.Mock(side_effect=netapp_api.NaApiError( code=netapp_api.ERELATION_NOT_QUIESCED)) self.mock_object(self.client, 'send_request', mock_send_req) self.client.resume_snapmirror_vol( fake.SM_SOURCE_VSERVER, fake.SM_SOURCE_VOLUME, fake.SM_DEST_VSERVER, fake.SM_DEST_VOLUME) snapmirror_resume_args = { 'source-vserver': fake.SM_SOURCE_VSERVER, 'source-volume': fake.SM_SOURCE_VOLUME, 'destination-vserver': fake.SM_DEST_VSERVER, 'destination-volume': fake.SM_DEST_VOLUME, } self.client.send_request.assert_has_calls([ mock.call('snapmirror-resume', snapmirror_resume_args)]) def test_resume_snapmirror_error(self): mock_send_req = mock.Mock(side_effect=netapp_api.NaApiError(code=0)) self.mock_object(self.client, 'send_request', mock_send_req) self.assertRaises(netapp_api.NaApiError, self.client.resume_snapmirror_vol, fake.SM_SOURCE_VSERVER, fake.SM_SOURCE_VOLUME, fake.SM_DEST_VSERVER, fake.SM_DEST_VOLUME) def test_resync_snapmirror(self): self.mock_object(self.client, 'send_request') self.client.resync_snapmirror_vol( fake.SM_SOURCE_VSERVER, fake.SM_SOURCE_VOLUME, fake.SM_DEST_VSERVER, fake.SM_DEST_VOLUME) snapmirror_resync_args = { 'source-vserver': fake.SM_SOURCE_VSERVER, 'source-volume': fake.SM_SOURCE_VOLUME, 'destination-vserver': fake.SM_DEST_VSERVER, 'destination-volume': fake.SM_DEST_VOLUME, } self.client.send_request.assert_has_calls([ mock.call('snapmirror-resync', snapmirror_resync_args)]) def test_resync_snapmirror_svm(self): self.mock_object(self.client, 'send_request') self.client.resync_snapmirror_svm( fake.SM_SOURCE_VSERVER, fake.SM_DEST_VSERVER) snapmirror_resync_args = { 'source-location': fake.SM_SOURCE_VSERVER + ':', 'destination-location': fake.SM_DEST_VSERVER + ':', } self.client.send_request.assert_has_calls([ mock.call('snapmirror-resync', snapmirror_resync_args)]) @ddt.data('source', 'destination', None) def test_volume_has_snapmirror_relationships(self, snapmirror_rel_type): """Snapmirror relationships can be both ways.""" vol = fake.FAKE_MANAGE_VOLUME snapmirror = { 'source-vserver': fake.SM_SOURCE_VSERVER, 'source-volume': fake.SM_SOURCE_VOLUME, 'destination-vserver': fake.SM_DEST_VSERVER, 'destination-volume': fake.SM_DEST_VOLUME, 'is-healthy': 'true', 'mirror-state': 'snapmirrored', 'schedule': 'daily', } expected_get_snapmirrors_call_count = 2 expected_get_snapmirrors_calls = [ mock.call(source_vserver=vol['owning-vserver-name'], source_volume=vol['name']), mock.call(dest_vserver=vol['owning-vserver-name'], dest_volume=vol['name']), ] if snapmirror_rel_type is None: side_effect = ([], []) elif snapmirror_rel_type == 'source': snapmirror['source-vserver'] = vol['owning-vserver-name'] snapmirror['source-volume'] = vol['name'] side_effect = ([snapmirror], None) expected_get_snapmirrors_call_count = 1 expected_get_snapmirrors_calls.pop() else: snapmirror['destination-vserver'] = vol['owning-vserver-name'] snapmirror['destination-volume'] = vol['name'] side_effect = (None, [snapmirror]) mock_get_snapmirrors_call = self.mock_object( self.client, 'get_snapmirrors', mock.Mock(side_effect=side_effect)) mock_exc_log = self.mock_object(client_cmode.LOG, 'exception') expected_retval = True if snapmirror_rel_type else False retval = self.client.volume_has_snapmirror_relationships(vol) self.assertEqual(expected_retval, retval) self.assertEqual(expected_get_snapmirrors_call_count, mock_get_snapmirrors_call.call_count) mock_get_snapmirrors_call.assert_has_calls( expected_get_snapmirrors_calls) self.assertFalse(mock_exc_log.called) def test_volume_has_snapmirror_relationships_api_error(self): vol = fake.FAKE_MANAGE_VOLUME expected_get_snapmirrors_calls = [ mock.call(source_vserver=vol['owning-vserver-name'], source_volume=vol['name']), ] mock_get_snapmirrors_call = self.mock_object( self.client, 'get_snapmirrors', mock.Mock( side_effect=self._mock_api_error(netapp_api.EINTERNALERROR))) mock_exc_log = self.mock_object(client_cmode.LOG, 'exception') retval = self.client.volume_has_snapmirror_relationships(vol) self.assertFalse(retval) self.assertEqual(1, mock_get_snapmirrors_call.call_count) mock_get_snapmirrors_call.assert_has_calls( expected_get_snapmirrors_calls) self.assertTrue(mock_exc_log.called) @ddt.data(None, '12345') def test_list_snapmirror_snapshots(self, newer_than): api_response = netapp_api.NaElement( fake.SNAPSHOT_GET_ITER_SNAPMIRROR_RESPONSE) self.mock_object(self.client, 'send_iter_request', mock.Mock(return_value=api_response)) result = self.client.list_snapmirror_snapshots(fake.SHARE_NAME, newer_than=newer_than) snapshot_get_iter_args = { 'query': { 'snapshot-info': { 'dependency': 'snapmirror', 'volume': fake.SHARE_NAME, }, }, } if newer_than: snapshot_get_iter_args['query']['snapshot-info']['access-time'] = ( '>' + newer_than) self.client.send_iter_request.assert_has_calls([ mock.call('snapshot-get-iter', snapshot_get_iter_args)]) expected = [fake.SNAPSHOT_NAME] self.assertEqual(expected, result) @ddt.data( {'method_name': 'start_volume_move', 'ontapi_version': (1, 20)}, {'method_name': 'start_volume_move', 'ontapi_version': (1, 110)}, {'method_name': 'check_volume_move', 'ontapi_version': (1, 20)}, {'method_name': 'check_volume_move', 'ontapi_version': (1, 110)} ) @ddt.unpack def test_volume_move_method(self, method_name, ontapi_version): self.mock_object(client_base.NetAppBaseClient, 'get_ontapi_version', mock.Mock(return_value=ontapi_version)) self.client._init_features() method = getattr(self.client, method_name) self.mock_object(self.client, 'send_request') retval = method(fake.SHARE_NAME, fake.VSERVER_NAME, fake.SHARE_AGGREGATE_NAME) expected_api_args = { 'source-volume': fake.SHARE_NAME, 'vserver': fake.VSERVER_NAME, 'dest-aggr': fake.SHARE_AGGREGATE_NAME, 'cutover-action': 'wait', } if ontapi_version >= (1, 110): expected_api_args['encrypt-destination'] = 'false' self.assertTrue(self.client.features.FLEXVOL_ENCRYPTION) else: self.assertFalse(self.client.features.FLEXVOL_ENCRYPTION) if method_name.startswith('check'): expected_api_args['perform-validation-only'] = 'true' self.assertIsNone(retval) self.client.send_request.assert_called_once_with( 'volume-move-start', expected_api_args) def test_abort_volume_move(self): self.mock_object(self.client, 'send_request') retval = self.client.abort_volume_move( fake.SHARE_NAME, fake.VSERVER_NAME) expected_api_args = { 'source-volume': fake.SHARE_NAME, 'vserver': fake.VSERVER_NAME, } self.assertIsNone(retval) self.client.send_request.assert_called_once_with( 'volume-move-trigger-abort', expected_api_args) @ddt.data(True, False) def test_trigger_volume_move_cutover_force(self, forced): self.mock_object(self.client, 'send_request') retval = self.client.trigger_volume_move_cutover( fake.SHARE_NAME, fake.VSERVER_NAME, force=forced) expected_api_args = { 'source-volume': fake.SHARE_NAME, 'vserver': fake.VSERVER_NAME, 'force': 'true' if forced else 'false', } self.assertIsNone(retval) self.client.send_request.assert_called_once_with( 'volume-move-trigger-cutover', expected_api_args) def test_get_volume_move_status_no_records(self): self.mock_object(self.client, 'send_iter_request') self.mock_object(self.client, '_has_records', mock.Mock(return_value=False)) self.assertRaises(exception.NetAppException, self.client.get_volume_move_status, fake.SHARE_NAME, fake.VSERVER_NAME) expected_api_args = { 'query': { 'volume-move-info': { 'volume': fake.SHARE_NAME, 'vserver': fake.VSERVER_NAME, }, }, 'desired-attributes': { 'volume-move-info': { 'percent-complete': None, 'estimated-completion-time': None, 'state': None, 'details': None, 'cutover-action': None, 'phase': None, }, }, } self.client.send_iter_request.assert_called_once_with( 'volume-move-get-iter', expected_api_args) def test_get_volume_move_status(self): move_status = netapp_api.NaElement(fake.VOLUME_MOVE_GET_ITER_RESULT) self.mock_object(self.client, 'send_iter_request', mock.Mock(return_value=move_status)) actual_status_info = self.client.get_volume_move_status( fake.SHARE_NAME, fake.VSERVER_NAME) expected_api_args = { 'query': { 'volume-move-info': { 'volume': fake.SHARE_NAME, 'vserver': fake.VSERVER_NAME, }, }, 'desired-attributes': { 'volume-move-info': { 'percent-complete': None, 'estimated-completion-time': None, 'state': None, 'details': None, 'cutover-action': None, 'phase': None, }, }, } expected_status_info = { 'percent-complete': '82', 'estimated-completion-time': '1481919246', 'state': 'healthy', 'details': 'Cutover Completed::Volume move job finishing move', 'cutover-action': 'retry_on_failure', 'phase': 'finishing', } self.assertDictEqual(expected_status_info, actual_status_info) self.client.send_iter_request.assert_called_once_with( 'volume-move-get-iter', expected_api_args) def test_qos_policy_group_exists_no_records(self): self.mock_object(self.client, 'qos_policy_group_get', mock.Mock( side_effect=exception.NetAppException)) policy_exists = self.client.qos_policy_group_exists( 'i-dont-exist-but-i-am') self.assertIs(False, policy_exists) def test_qos_policy_group_exists(self): self.mock_object(self.client, 'qos_policy_group_get', mock.Mock(return_value=fake.QOS_POLICY_GROUP)) policy_exists = self.client.qos_policy_group_exists( fake.QOS_POLICY_GROUP_NAME) self.assertIs(True, policy_exists) def test_qos_policy_group_get_no_permissions_to_execute_zapi(self): naapi_error = self._mock_api_error(code=netapp_api.EAPINOTFOUND, message='13005:Unable to find API') self.mock_object(self.client, 'send_request', naapi_error) self.assertRaises(exception.NetAppException, self.client.qos_policy_group_get, 'possibly-valid-qos-policy') def test_qos_policy_group_get_other_zapi_errors(self): naapi_error = self._mock_api_error(code=netapp_api.EINTERNALERROR, message='13114:Internal error') self.mock_object(self.client, 'send_request', naapi_error) self.assertRaises(netapp_api.NaApiError, self.client.qos_policy_group_get, 'possibly-valid-qos-policy') def test_qos_policy_group_get_none_found(self): no_records_response = netapp_api.NaElement(fake.NO_RECORDS_RESPONSE) self.mock_object(self.client, 'send_request', mock.Mock(return_value=no_records_response)) self.assertRaises(exception.NetAppException, self.client.qos_policy_group_get, 'non-existent-qos-policy') qos_policy_group_get_iter_args = { 'query': { 'qos-policy-group-info': { 'policy-group': 'non-existent-qos-policy', }, }, 'desired-attributes': { 'qos-policy-group-info': { 'policy-group': None, 'vserver': None, 'max-throughput': None, 'num-workloads': None }, }, } self.client.send_request.assert_called_once_with( 'qos-policy-group-get-iter', qos_policy_group_get_iter_args, False) def test_qos_policy_group_get(self): api_response = netapp_api.NaElement( fake.QOS_POLICY_GROUP_GET_ITER_RESPONSE) self.mock_object(self.client, 'send_request', mock.Mock(return_value=api_response)) qos_info = self.client.qos_policy_group_get(fake.QOS_POLICY_GROUP_NAME) qos_policy_group_get_iter_args = { 'query': { 'qos-policy-group-info': { 'policy-group': fake.QOS_POLICY_GROUP_NAME, }, }, 'desired-attributes': { 'qos-policy-group-info': { 'policy-group': None, 'vserver': None, 'max-throughput': None, 'num-workloads': None }, }, } self.client.send_request.assert_called_once_with( 'qos-policy-group-get-iter', qos_policy_group_get_iter_args, False) self.assertDictEqual(fake.QOS_POLICY_GROUP, qos_info) @ddt.data(None, fake.QOS_MAX_THROUGHPUT) def test_qos_policy_group_create(self, max_throughput): self.mock_object(self.client, 'send_request', mock.Mock(return_value=fake.PASSED_RESPONSE)) self.client.qos_policy_group_create( fake.QOS_POLICY_GROUP_NAME, fake.VSERVER_NAME, max_throughput=max_throughput) qos_policy_group_create_args = { 'policy-group': fake.QOS_POLICY_GROUP_NAME, 'vserver': fake.VSERVER_NAME, } if max_throughput: qos_policy_group_create_args.update( {'max-throughput': max_throughput}) self.client.send_request.assert_called_once_with( 'qos-policy-group-create', qos_policy_group_create_args, False) def test_qos_policy_group_modify(self): self.mock_object(self.client, 'send_request', mock.Mock(return_value=fake.PASSED_RESPONSE)) self.client.qos_policy_group_modify(fake.QOS_POLICY_GROUP_NAME, '3000iops') qos_policy_group_modify_args = { 'policy-group': fake.QOS_POLICY_GROUP_NAME, 'max-throughput': '3000iops', } self.client.send_request.assert_called_once_with( 'qos-policy-group-modify', qos_policy_group_modify_args, False) def test_qos_policy_group_delete(self): self.mock_object(self.client, 'send_request', mock.Mock(return_value=fake.PASSED_RESPONSE)) self.client.qos_policy_group_delete(fake.QOS_POLICY_GROUP_NAME) qos_policy_group_delete_args = { 'policy-group': fake.QOS_POLICY_GROUP_NAME, } self.client.send_request.assert_called_once_with( 'qos-policy-group-delete', qos_policy_group_delete_args, False) def test_qos_policy_group_rename(self): self.mock_object(self.client, 'send_request', mock.Mock(return_value=fake.PASSED_RESPONSE)) self.client.qos_policy_group_rename( fake.QOS_POLICY_GROUP_NAME, 'new_' + fake.QOS_POLICY_GROUP_NAME) qos_policy_group_rename_args = { 'policy-group-name': fake.QOS_POLICY_GROUP_NAME, 'new-name': 'new_' + fake.QOS_POLICY_GROUP_NAME, } self.client.send_request.assert_called_once_with( 'qos-policy-group-rename', qos_policy_group_rename_args, False) def test_qos_policy_group_rename_noop(self): self.mock_object(self.client, 'send_request') # rename to same name = no-op self.client.qos_policy_group_rename( fake.QOS_POLICY_GROUP_NAME, fake.QOS_POLICY_GROUP_NAME) self.assertFalse(self.client.send_request.called) def test_mark_qos_policy_group_for_deletion_rename_failure(self): self.mock_object(self.client, 'qos_policy_group_exists', mock.Mock(return_value=True)) self.mock_object(self.client, 'qos_policy_group_rename', mock.Mock(side_effect=netapp_api.NaApiError)) self.mock_object(client_cmode.LOG, 'warning') self.mock_object(self.client, 'remove_unused_qos_policy_groups') retval = self.client.mark_qos_policy_group_for_deletion( fake.QOS_POLICY_GROUP_NAME) self.assertIsNone(retval) client_cmode.LOG.warning.assert_called_once() self.client.qos_policy_group_exists.assert_called_once_with( fake.QOS_POLICY_GROUP_NAME) self.client.qos_policy_group_rename.assert_called_once_with( fake.QOS_POLICY_GROUP_NAME, client_cmode.DELETED_PREFIX + fake.QOS_POLICY_GROUP_NAME) self.client.remove_unused_qos_policy_groups.assert_called_once_with() @ddt.data(True, False) def test_mark_qos_policy_group_for_deletion_policy_exists(self, exists): self.mock_object(self.client, 'qos_policy_group_exists', mock.Mock(return_value=exists)) self.mock_object(self.client, 'qos_policy_group_rename') mock_remove_unused_policies = self.mock_object( self.client, 'remove_unused_qos_policy_groups') self.mock_object(client_cmode.LOG, 'warning') retval = self.client.mark_qos_policy_group_for_deletion( fake.QOS_POLICY_GROUP_NAME) self.assertIsNone(retval) if exists: self.client.qos_policy_group_rename.assert_called_once_with( fake.QOS_POLICY_GROUP_NAME, client_cmode.DELETED_PREFIX + fake.QOS_POLICY_GROUP_NAME) mock_remove_unused_policies.assert_called_once_with() else: self.assertFalse(self.client.qos_policy_group_rename.called) self.assertFalse( self.client.remove_unused_qos_policy_groups.called) self.assertFalse(client_cmode.LOG.warning.called) @ddt.data(True, False) def test_remove_unused_qos_policy_groups_with_failure(self, failed): if failed: args = mock.Mock(side_effect=netapp_api.NaApiError) else: args = mock.Mock(return_value=fake.PASSED_FAILED_ITER_RESPONSE) self.mock_object(self.client, 'send_request', args) self.mock_object(client_cmode.LOG, 'debug') retval = self.client.remove_unused_qos_policy_groups() qos_policy_group_delete_iter_args = { 'query': { 'qos-policy-group-info': { 'policy-group': '%s*' % client_cmode.DELETED_PREFIX, } }, 'max-records': 3500, 'continue-on-failure': 'true', 'return-success-list': 'false', 'return-failure-list': 'false', } self.assertIsNone(retval) self.client.send_request.assert_called_once_with( 'qos-policy-group-delete-iter', qos_policy_group_delete_iter_args, False) self.assertIs(failed, client_cmode.LOG.debug.called) def test_get_cluster_name(self): api_response = netapp_api.NaElement( fake.CLUSTER_GET_CLUSTER_NAME) self.mock_object(self.client, 'send_request', mock.Mock(return_value=api_response)) api_args = { 'desired-attributes': { 'cluster-identity-info': { 'cluster-name': None, } } } result = self.client.get_cluster_name() self.assertEqual(fake.CLUSTER_NAME, result) self.client.send_request.assert_called_once_with( 'cluster-identity-get', api_args, enable_tunneling=False) @ddt.data('fake_snapshot_name', None) def test_check_volume_clone_split_completed(self, get_clone_parent): volume_name = fake.SHARE_NAME mock_get_vol_clone_parent = self.mock_object( self.client, 'get_volume_clone_parent_snaphot', mock.Mock(return_value=get_clone_parent)) result = self.client.check_volume_clone_split_completed(volume_name) mock_get_vol_clone_parent.assert_called_once_with(volume_name) expected_result = get_clone_parent is None self.assertEqual(expected_result, result) def test_rehost_volume(self): volume_name = fake.SHARE_NAME vserver = fake.VSERVER_NAME dest_vserver = fake.VSERVER_NAME_2 api_args = { 'volume': volume_name, 'vserver': vserver, 'destination-vserver': dest_vserver, } self.mock_object(self.client, 'send_request') self.client.rehost_volume(volume_name, vserver, dest_vserver) self.client.send_request.assert_called_once_with('volume-rehost', api_args) @ddt.data( {'fake_api_response': fake.VOLUME_GET_ITER_PARENT_SNAP_EMPTY_RESPONSE, 'expected_snapshot_name': None}, {'fake_api_response': fake.VOLUME_GET_ITER_PARENT_SNAP_RESPONSE, 'expected_snapshot_name': fake.SNAPSHOT_NAME}, {'fake_api_response': fake.NO_RECORDS_RESPONSE, 'expected_snapshot_name': None}) @ddt.unpack def test_get_volume_clone_parent_snaphot(self, fake_api_response, expected_snapshot_name): api_response = netapp_api.NaElement(fake_api_response) self.mock_object(self.client, 'send_iter_request', mock.Mock(return_value=api_response)) result = self.client.get_volume_clone_parent_snaphot(fake.SHARE_NAME) expected_api_args = { 'query': { 'volume-attributes': { 'volume-id-attributes': { 'name': fake.SHARE_NAME } } }, 'desired-attributes': { 'volume-attributes': { 'volume-clone-attributes': { 'volume-clone-parent-attributes': { 'snapshot-name': '' } } } } } self.client.send_iter_request.assert_called_once_with( 'volume-get-iter', expected_api_args) self.assertEqual(expected_snapshot_name, result) def test_set_qos_adaptive_policy_group_for_volume(self): self.client.features.add_feature('ADAPTIVE_QOS') self.mock_object(self.client, 'send_request') self.client.set_qos_adaptive_policy_group_for_volume( fake.SHARE_NAME, fake.QOS_POLICY_GROUP_NAME) volume_modify_iter_args = { 'query': { 'volume-attributes': { 'volume-id-attributes': { 'name': fake.SHARE_NAME, }, }, }, 'attributes': { 'volume-attributes': { 'volume-qos-attributes': { 'adaptive-policy-group-name': fake.QOS_POLICY_GROUP_NAME, }, }, }, } self.client.send_request.assert_called_once_with( 'volume-modify-iter', volume_modify_iter_args) def test_get_nfs_config(self): api_args = { 'query': { 'nfs-info': { 'vserver': 'vserver', }, }, 'desired-attributes': { 'nfs-info': { 'field': None, }, }, } api_response = netapp_api.NaElement( fake.NFS_CONFIG_SERVER_RESULT) self.mock_object(self.client, 'send_request', mock.Mock(return_value=api_response)) self.mock_object(self.client, 'parse_nfs_config', mock.Mock(return_value=None)) self.client.get_nfs_config(['field'], 'vserver') self.client.send_request.assert_called_once_with( 'nfs-service-get-iter', api_args) def test_get_nfs_config_default(self): api_response = netapp_api.NaElement( fake.NFS_CONFIG_DEFAULT_RESULT) self.mock_object(self.client, 'send_request', mock.Mock(return_value=api_response)) self.mock_object(self.client, 'parse_nfs_config', mock.Mock(return_value=None)) self.client.get_nfs_config_default(['field']) self.client.send_request.assert_called_once_with( 'nfs-service-get-create-defaults', None) @ddt.data( {'nfs_info': fake.NFS_CONFIG_SERVER_RESULT, 'desired_args': ['tcp-max-xfer-size'], 'expected_nfs': { 'tcp-max-xfer-size': '65536', }}, {'nfs_info': fake.NFS_CONFIG_SERVER_RESULT, 'desired_args': ['udp-max-xfer-size'], 'expected_nfs': { 'udp-max-xfer-size': '32768', }}, {'nfs_info': fake.NFS_CONFIG_SERVER_RESULT, 'desired_args': ['tcp-max-xfer-size', 'udp-max-xfer-size'], 'expected_nfs': { 'tcp-max-xfer-size': '65536', 'udp-max-xfer-size': '32768', }}, {'nfs_info': fake.NFS_CONFIG_SERVER_RESULT, 'desired_args': [], 'expected_nfs': {}}) @ddt.unpack def test_parse_nfs_config(self, nfs_info, desired_args, expected_nfs): parent_elem = netapp_api.NaElement(nfs_info).get_child_by_name( 'attributes-list') nfs_config = self.client.parse_nfs_config(parent_elem, desired_args) self.assertDictEqual(nfs_config, expected_nfs) @ddt.data(fake.NO_RECORDS_RESPONSE, fake.VSERVER_GET_ITER_RESPONSE_INFO) def test_get_vserver_info(self, api_response): self.mock_object(self.client, 'send_iter_request', mock.Mock( return_value=netapp_api.NaElement( api_response))) result = self.client.get_vserver_info(fake.VSERVER_NAME) expected_api_args = { 'query': { 'vserver-info': { 'vserver-name': fake.VSERVER_NAME, }, }, 'desired-attributes': { 'vserver-info': { 'vserver-name': None, 'vserver-subtype': None, 'state': None, 'operational-state': None, }, }, } self.client.send_iter_request.assert_called_once_with( 'vserver-get-iter', expected_api_args) if api_response == fake.NO_RECORDS_RESPONSE: self.assertIsNone(result) else: self.assertDictEqual(fake.VSERVER_INFO, result) @ddt.data({'discard_network': True, 'preserve_snapshots': False}, {'discard_network': False, 'preserve_snapshots': True}) @ddt.unpack def test_create_snapmirror_policy(self, discard_network, preserve_snapshots): api_response = netapp_api.NaElement(fake.PASSED_RESPONSE) self.mock_object(self.client, 'send_request', mock.Mock(return_value=api_response)) self.client.create_snapmirror_policy( fake.SNAPMIRROR_POLICY_NAME, discard_network_info=discard_network, snapmirror_label="backup", preserve_snapshots=preserve_snapshots) expected_create_api_args = { 'policy-name': fake.SNAPMIRROR_POLICY_NAME, 'type': 'async_mirror', } if discard_network: expected_create_api_args['discard-configs'] = { 'svmdr-config-obj': 'network' } expected_calls = [ mock.call('snapmirror-policy-create', expected_create_api_args) ] if preserve_snapshots: expected_add_rules = { 'policy-name': fake.SNAPMIRROR_POLICY_NAME, 'snapmirror-label': 'backup', 'keep': 1, 'preserve': 'false' } expected_calls.append(mock.call('snapmirror-policy-add-rule', expected_add_rules)) self.client.send_request.assert_has_calls(expected_calls) def test_delete_snapmirror_policy(self): api_response = netapp_api.NaElement(fake.PASSED_RESPONSE) self.mock_object(self.client, 'send_request', mock.Mock(return_value=api_response)) self.client.delete_snapmirror_policy(fake.SNAPMIRROR_POLICY_NAME) expected_api_args = { 'policy-name': fake.SNAPMIRROR_POLICY_NAME, } self.client.send_request.assert_called_once_with( 'snapmirror-policy-delete', expected_api_args) def test_delete_snapmirror_policy_not_found(self): self.mock_object(self.client, 'send_request', self._mock_api_error(code=netapp_api.EOBJECTNOTFOUND)) self.client.delete_snapmirror_policy(fake.SNAPMIRROR_POLICY_NAME) expected_api_args = { 'policy-name': fake.SNAPMIRROR_POLICY_NAME, } self.client.send_request.assert_called_once_with( 'snapmirror-policy-delete', expected_api_args) def test_get_snapmirror_policies(self): api_response = netapp_api.NaElement( fake.SNAPMIRROR_POLICY_GET_ITER_RESPONSE) self.mock_object(self.client, 'send_iter_request', mock.Mock(return_value=api_response)) result_elem = [fake.SNAPMIRROR_POLICY_NAME] result = self.client.get_snapmirror_policies( fake.VSERVER_NAME) expected_api_args = { 'query': { 'snapmirror-policy-info': { 'vserver-name': fake.VSERVER_NAME, }, }, 'desired-attributes': { 'snapmirror-policy-info': { 'policy-name': None, }, }, } self.client.send_iter_request.assert_called_once_with( 'snapmirror-policy-get-iter', expected_api_args) self.assertEqual(result_elem, result) @ddt.data(True, False, None) def test_start_vserver(self, force): api_response = netapp_api.NaElement(fake.PASSED_RESPONSE) self.mock_object(self.client, 'send_request', mock.Mock(return_value=api_response)) self.client.start_vserver(fake.VSERVER_NAME, force=force) expected_api_args = { 'vserver-name': fake.VSERVER_NAME, } if force is not None: expected_api_args['force'] = 'true' if force is True else 'false' self.client.send_request.assert_called_once_with( 'vserver-start', expected_api_args, enable_tunneling=False) def test_start_vserver_already_started(self): self.mock_object(self.client, 'send_request', self._mock_api_error( code=netapp_api.EVSERVERALREADYSTARTED)) self.client.start_vserver(fake.VSERVER_NAME) expected_api_args = { 'vserver-name': fake.VSERVER_NAME, } self.client.send_request.assert_called_once_with( 'vserver-start', expected_api_args, enable_tunneling=False) def test_stop_vserver(self): api_response = netapp_api.NaElement(fake.PASSED_RESPONSE) self.mock_object(self.client, 'send_request', mock.Mock(return_value=api_response)) self.client.stop_vserver(fake.VSERVER_NAME) expected_api_args = { 'vserver-name': fake.VSERVER_NAME, } self.client.send_request.assert_called_once_with( 'vserver-stop', expected_api_args, enable_tunneling=False) def test_is_svm_dr_supported(self): self.client.features.add_feature('SVM_DR') result = self.client.is_svm_dr_supported() self.assertTrue(result) @ddt.data({'get_iter_response': fake.CIFS_SHARE_GET_ITER_RESPONSE, 'expected_result': True}, {'get_iter_response': fake.NO_RECORDS_RESPONSE, 'expected_result': False}) @ddt.unpack def test_cifs_share_exists(self, get_iter_response, expected_result): api_response = netapp_api.NaElement(get_iter_response) self.mock_object(self.client, 'send_iter_request', mock.Mock(return_value=api_response)) fake_share_path = '/%s' % fake.SHARE_NAME result = self.client.cifs_share_exists(fake.SHARE_NAME) cifs_share_get_iter_args = { 'query': { 'cifs-share': { 'share-name': fake.SHARE_NAME, 'path': fake_share_path, }, }, 'desired-attributes': { 'cifs-share': { 'share-name': None } }, } self.assertEqual(expected_result, result) self.client.send_iter_request.assert_called_once_with( 'cifs-share-get-iter', cifs_share_get_iter_args) def test_get_volume_autosize_attributes(self): api_response = netapp_api.NaElement(fake.VOLUME_AUTOSIZE_GET_RESPONSE) self.mock_object(self.client, 'send_request', mock.Mock(return_value=api_response)) result = self.client.get_volume_autosize_attributes(fake.SHARE_NAME) expected_result = {} expected_keys = ['mode', 'grow-threshold-percent', 'minimum-size', 'shrink-threshold-percent', 'maximum-size'] for key in expected_keys: expected_result[key] = fake.VOLUME_AUTOSIZE_ATTRS[key] self.assertEqual(expected_result, result) self.client.send_request.assert_called_once_with( 'volume-autosize-get', {'volume': fake.SHARE_NAME}) @ddt.data('server_to_server', 'server_to_default_ad_site', 'default_ad_site_to_default_ad_site', 'default_ad_site_to_server') def test_modify_active_directory_security_service(self, modify_ad_direction): if modify_ad_direction == 'server_to_server': curr_sec_service = copy.deepcopy(fake.CIFS_SECURITY_SERVICE) new_sec_service = copy.deepcopy(fake.CIFS_SECURITY_SERVICE_2) if modify_ad_direction == 'server_to_default_ad_site': curr_sec_service = copy.deepcopy(fake.CIFS_SECURITY_SERVICE) new_sec_service = copy.deepcopy(fake.CIFS_SECURITY_SERVICE_3) if modify_ad_direction == 'default_ad_site_to_default_ad_site': curr_sec_service = copy.deepcopy(fake.CIFS_SECURITY_SERVICE_3) new_sec_service = copy.deepcopy(fake.CIFS_SECURITY_SERVICE_4) if modify_ad_direction == 'default_ad_site_to_server': curr_sec_service = copy.deepcopy(fake.CIFS_SECURITY_SERVICE_4) new_sec_service = copy.deepcopy(fake.CIFS_SECURITY_SERVICE_2) # we don't support domain change, but this validation isn't made in # within this method new_sec_service['domain'] = curr_sec_service['domain'] api_responses = [fake.PASSED_RESPONSE, fake.PASSED_RESPONSE, fake.PASSED_RESPONSE] self.mock_object(self.client, 'send_request', mock.Mock(side_effect=api_responses)) self.mock_object(self.client, 'remove_preferred_dcs') self.mock_object(self.client, 'set_preferred_dc') self.mock_object(self.client, 'configure_cifs_options') differing_keys = {'password', 'user', 'server', 'default_ad_site'} self.client.modify_active_directory_security_service( fake.VSERVER_NAME, differing_keys, new_sec_service, curr_sec_service) cifs_server = self.client._get_cifs_server_name(fake.VSERVER_NAME) current_cifs_username = cifs_server + '\\' + curr_sec_service['user'] set_pass_api_args = { 'user-name': current_cifs_username, 'user-password': new_sec_service['password'] } user_rename_api_args = { 'user-name': current_cifs_username, 'new-user-name': new_sec_service['user'] } self.client.send_request.assert_has_calls([ mock.call('cifs-local-user-set-password', set_pass_api_args), mock.call('cifs-local-user-rename', user_rename_api_args)]) if modify_ad_direction in ('default_ad_site_to_default_ad_site', 'server_to_default_ad_site'): cifs_server_modify_args = { 'admin-username': new_sec_service['user'], 'admin-password': new_sec_service['password'], 'force-account-overwrite': 'true', 'cifs-server': cifs_server, 'default-site': new_sec_service['default_ad_site'], } self.client.send_request.assert_has_calls([ mock.call('cifs-server-modify', cifs_server_modify_args)]) self.client.configure_cifs_options.assert_has_calls([ mock.call(new_sec_service)]) if modify_ad_direction in ('server_to_server', 'server_to_default_ad_site'): self.client.remove_preferred_dcs.assert_called_once_with( curr_sec_service) if modify_ad_direction in ('server_to_server', 'default_ad_site_to_server'): self.client.set_preferred_dc.assert_called_once_with( new_sec_service) self.client.configure_cifs_options.assert_has_calls([ mock.call(new_sec_service)]) @ddt.data(True, False) def test_modify_active_directory_security_service_error( self, cifs_set_password_failure): curr_sec_service = copy.deepcopy(fake.CIFS_SECURITY_SERVICE) new_sec_service = copy.deepcopy(fake.CIFS_SECURITY_SERVICE_2) # we don't support domain change, but this validation isn't made in # within this method new_sec_service['domain'] = curr_sec_service['domain'] if cifs_set_password_failure: api_responses = [netapp_api.NaApiError(code='fake'), fake.PASSED_RESPONSE] else: api_responses = [fake.PASSED_RESPONSE, netapp_api.NaApiError(code='fake')] self.mock_object(self.client, 'send_request', mock.Mock(side_effect=api_responses)) differing_keys = {'password', 'user', 'server'} self.assertRaises( exception.NetAppException, self.client.modify_active_directory_security_service, fake.VSERVER_NAME, differing_keys, new_sec_service, curr_sec_service) cifs_server = self.client._get_cifs_server_name(fake.VSERVER_NAME) current_cifs_username = cifs_server + '\\' + curr_sec_service['user'] set_pass_api_args = { 'user-name': current_cifs_username, 'user-password': new_sec_service['password'] } user_rename_api_args = { 'user-name': current_cifs_username, 'new-user-name': new_sec_service['user'] } if cifs_set_password_failure: send_request_calls = [ mock.call('cifs-local-user-set-password', set_pass_api_args)] else: send_request_calls = [ mock.call('cifs-local-user-set-password', set_pass_api_args), mock.call('cifs-local-user-rename', user_rename_api_args) ] self.client.send_request.assert_has_calls(send_request_calls) @ddt.data(False, True) def test_modify_ldap(self, api_not_found): current_ldap_service = fake.LDAP_AD_SECURITY_SERVICE new_ldap_service = fake.LDAP_LINUX_SECURITY_SERVICE config_name = hashlib.md5( new_ldap_service['id'].encode("latin-1")).hexdigest() api_result = (self._mock_api_error(code=netapp_api.EOBJECTNOTFOUND) if api_not_found else mock.Mock()) mock_create_client = self.mock_object( self.client, '_create_ldap_client') mock_send_request = self.mock_object( self.client, 'send_request', mock.Mock(return_value=api_result)) mock_delete_client = self.mock_object( self.client, '_delete_ldap_client', mock.Mock(return_value=api_result)) self.client.modify_ldap(new_ldap_service, current_ldap_service) api_args = {'client-config': config_name, 'client-enabled': 'true'} mock_create_client.assert_called_once_with(new_ldap_service) mock_send_request.assert_has_calls([ mock.call('ldap-config-delete'), mock.call('ldap-config-create', api_args)]) mock_delete_client.assert_called_once_with(current_ldap_service) def test_modify_ldap_config_delete_failure(self): current_ldap_service = fake.LDAP_AD_SECURITY_SERVICE new_ldap_service = fake.LDAP_LINUX_SECURITY_SERVICE mock_create_client = self.mock_object( self.client, '_create_ldap_client') mock_send_request = self.mock_object( self.client, 'send_request', mock.Mock( side_effect=netapp_api.NaApiError(code=netapp_api.EAPIERROR))) mock_delete_client = self.mock_object( self.client, '_delete_ldap_client') self.assertRaises(exception.NetAppException, self.client.modify_ldap, new_ldap_service, current_ldap_service) mock_create_client.assert_called_once_with(new_ldap_service) mock_send_request.assert_called_once_with('ldap-config-delete') mock_delete_client.assert_called_once_with(new_ldap_service) def test_modify_ldap_current_config_delete_error(self): current_ldap_service = fake.LDAP_AD_SECURITY_SERVICE new_ldap_service = fake.LDAP_LINUX_SECURITY_SERVICE config_name = hashlib.md5( new_ldap_service['id'].encode("latin-1")).hexdigest() mock_create_client = self.mock_object( self.client, '_create_ldap_client') mock_send_request = self.mock_object( self.client, 'send_request') mock_delete_client = self.mock_object( self.client, '_delete_ldap_client', mock.Mock( side_effect=netapp_api.NaApiError(code=netapp_api.EAPIERROR))) self.client.modify_ldap(new_ldap_service, current_ldap_service) api_args = {'client-config': config_name, 'client-enabled': 'true'} mock_create_client.assert_called_once_with(new_ldap_service) mock_send_request.assert_has_calls([ mock.call('ldap-config-delete'), mock.call('ldap-config-create', api_args)]) mock_delete_client.assert_called_once_with(current_ldap_service) def test_create_fpolicy_event(self): self.mock_object(self.client, 'send_request') self.client.create_fpolicy_event(fake.SHARE_NAME, fake.FPOLICY_EVENT_NAME, fake.FPOLICY_PROTOCOL, fake.FPOLICY_FILE_OPERATIONS_LIST) expected_args = { 'event-name': fake.FPOLICY_EVENT_NAME, 'protocol': fake.FPOLICY_PROTOCOL, 'file-operations': [], } for file_op in fake.FPOLICY_FILE_OPERATIONS_LIST: expected_args['file-operations'].append( {'fpolicy-operation': file_op}) self.client.send_request.assert_called_once_with( 'fpolicy-policy-event-create', expected_args) @ddt.data(None, netapp_api.EEVENTNOTFOUND) def test_delete_fpolicy_event(self, send_request_error): if send_request_error: send_request_mock = mock.Mock( side_effect=self._mock_api_error(code=send_request_error)) else: send_request_mock = mock.Mock() self.mock_object(self.client, 'send_request', send_request_mock) self.client.delete_fpolicy_event(fake.SHARE_NAME, fake.FPOLICY_EVENT_NAME) self.client.send_request.assert_called_once_with( 'fpolicy-policy-event-delete', {'event-name': fake.FPOLICY_EVENT_NAME}) def test_delete_fpolicy_event_error(self): eapi_error = self._mock_api_error(code=netapp_api.EAPIERROR) self.mock_object( self.client, 'send_request', mock.Mock(side_effect=eapi_error)) self.assertRaises(exception.NetAppException, self.client.delete_fpolicy_event, fake.SHARE_NAME, fake.FPOLICY_EVENT_NAME) self.client.send_request.assert_called_once_with( 'fpolicy-policy-event-delete', {'event-name': fake.FPOLICY_EVENT_NAME}) def test_get_fpolicy_events(self): api_response = netapp_api.NaElement( fake.FPOLICY_EVENT_GET_ITER_RESPONSE) self.mock_object(self.client, 'send_iter_request', mock.Mock(return_value=api_response)) result = self.client.get_fpolicy_events( event_name=fake.FPOLICY_EVENT_NAME, protocol=fake.FPOLICY_PROTOCOL, file_operations=fake.FPOLICY_FILE_OPERATIONS_LIST) expected_options = { 'event-name': fake.FPOLICY_EVENT_NAME, 'protocol': fake.FPOLICY_PROTOCOL, 'file-operations': [] } for file_op in fake.FPOLICY_FILE_OPERATIONS_LIST: expected_options['file-operations'].append( {'fpolicy-operation': file_op}) expected_args = { 'query': { 'fpolicy-event-options-config': expected_options, }, } expected = [{ 'event-name': fake.FPOLICY_EVENT_NAME, 'protocol': fake.FPOLICY_PROTOCOL, 'file-operations': fake.FPOLICY_FILE_OPERATIONS_LIST }] self.assertEqual(expected, result) self.client.send_iter_request.assert_called_once_with( 'fpolicy-policy-event-get-iter', expected_args) def test_create_fpolicy_policy(self): self.mock_object(self.client, 'send_request') self.client.create_fpolicy_policy(fake.FPOLICY_POLICY_NAME, fake.SHARE_NAME, [fake.FPOLICY_EVENT_NAME], engine=fake.FPOLICY_ENGINE) expected_args = { 'policy-name': fake.FPOLICY_POLICY_NAME, 'events': [], 'engine-name': fake.FPOLICY_ENGINE } for event in [fake.FPOLICY_EVENT_NAME]: expected_args['events'].append( {'event-name': event}) self.client.send_request.assert_called_once_with( 'fpolicy-policy-create', expected_args) @ddt.data(None, netapp_api.EPOLICYNOTFOUND) def test_delete_fpolicy_policy(self, send_request_error): if send_request_error: send_request_mock = mock.Mock( side_effect=self._mock_api_error(code=send_request_error)) else: send_request_mock = mock.Mock() self.mock_object(self.client, 'send_request', send_request_mock) self.client.delete_fpolicy_policy( fake.SHARE_NAME, fake.FPOLICY_POLICY_NAME) self.client.send_request.assert_called_once_with( 'fpolicy-policy-delete', {'policy-name': fake.FPOLICY_POLICY_NAME}) def test_delete_fpolicy_policy_error(self): eapi_error = self._mock_api_error(code=netapp_api.EAPIERROR) self.mock_object( self.client, 'send_request', mock.Mock(side_effect=eapi_error)) self.assertRaises(exception.NetAppException, self.client.delete_fpolicy_policy, fake.SHARE_NAME, fake.FPOLICY_POLICY_NAME) self.client.send_request.assert_called_once_with( 'fpolicy-policy-delete', {'policy-name': fake.FPOLICY_POLICY_NAME}) def test_get_fpolicy_policies(self): api_response = netapp_api.NaElement( fake.FPOLICY_POLICY_GET_ITER_RESPONSE) self.mock_object(self.client, 'send_iter_request', mock.Mock(return_value=api_response)) result = self.client.get_fpolicy_policies( share_name=fake.SHARE_NAME, policy_name=fake.FPOLICY_POLICY_NAME, engine_name=fake.FPOLICY_ENGINE, event_names=[fake.FPOLICY_EVENT_NAME]) expected_options = { 'policy-name': fake.FPOLICY_POLICY_NAME, 'engine-name': fake.FPOLICY_ENGINE, 'events': [] } for policy in [fake.FPOLICY_EVENT_NAME]: expected_options['events'].append( {'event-name': policy}) expected_args = { 'query': { 'fpolicy-policy-info': expected_options, }, } expected = [{ 'policy-name': fake.FPOLICY_POLICY_NAME, 'engine-name': fake.FPOLICY_ENGINE, 'events': [fake.FPOLICY_EVENT_NAME] }] self.assertEqual(expected, result) self.client.send_iter_request.assert_called_once_with( 'fpolicy-policy-get-iter', expected_args) def test_create_fpolicy_scope(self): self.mock_object(self.client, 'send_request') self.client.create_fpolicy_scope( fake.FPOLICY_POLICY_NAME, fake.SHARE_NAME, extensions_to_include=fake.FPOLICY_EXT_TO_INCLUDE, extensions_to_exclude=fake.FPOLICY_EXT_TO_EXCLUDE) expected_args = { 'policy-name': fake.FPOLICY_POLICY_NAME, 'shares-to-include': { 'string': fake.SHARE_NAME, }, 'file-extensions-to-include': [], 'file-extensions-to-exclude': [], } for file_ext in fake.FPOLICY_EXT_TO_INCLUDE_LIST: expected_args['file-extensions-to-include'].append( {'string': file_ext}) for file_ext in fake.FPOLICY_EXT_TO_EXCLUDE_LIST: expected_args['file-extensions-to-exclude'].append( {'string': file_ext}) self.client.send_request.assert_called_once_with( 'fpolicy-policy-scope-create', expected_args) def test_modify_fpolicy_scope(self): self.mock_object(self.client, 'send_request') self.client.modify_fpolicy_scope( fake.SHARE_NAME, fake.FPOLICY_POLICY_NAME, shares_to_include=[fake.SHARE_NAME], extensions_to_include=fake.FPOLICY_EXT_TO_INCLUDE, extensions_to_exclude=fake.FPOLICY_EXT_TO_EXCLUDE) expected_args = { 'policy-name': fake.FPOLICY_POLICY_NAME, 'file-extensions-to-include': [], 'file-extensions-to-exclude': [], 'shares-to-include': [{ 'string': fake.SHARE_NAME, }], } for file_ext in fake.FPOLICY_EXT_TO_INCLUDE_LIST: expected_args['file-extensions-to-include'].append( {'string': file_ext}) for file_ext in fake.FPOLICY_EXT_TO_EXCLUDE_LIST: expected_args['file-extensions-to-exclude'].append( {'string': file_ext}) self.client.send_request.assert_called_once_with( 'fpolicy-policy-scope-modify', expected_args) @ddt.data(None, netapp_api.ESCOPENOTFOUND) def test_delete_fpolicy_scope(self, send_request_error): if send_request_error: send_request_mock = mock.Mock( side_effect=self._mock_api_error(code=send_request_error)) else: send_request_mock = mock.Mock() self.mock_object(self.client, 'send_request', send_request_mock) self.client.delete_fpolicy_scope(fake.FPOLICY_POLICY_NAME) self.client.send_request.assert_called_once_with( 'fpolicy-policy-scope-delete', {'policy-name': fake.FPOLICY_POLICY_NAME}) def test_delete_fpolicy_scope_error(self): eapi_error = self._mock_api_error(code=netapp_api.EAPIERROR) self.mock_object( self.client, 'send_request', mock.Mock(side_effect=eapi_error)) self.assertRaises(exception.NetAppException, self.client.delete_fpolicy_scope, fake.FPOLICY_POLICY_NAME) self.client.send_request.assert_called_once_with( 'fpolicy-policy-scope-delete', {'policy-name': fake.FPOLICY_POLICY_NAME}) def test_get_fpolicy_scopes(self): api_response = netapp_api.NaElement( fake.FPOLICY_SCOPE_GET_ITER_RESPONSE) self.mock_object(self.client, 'send_iter_request', mock.Mock(return_value=api_response)) result = self.client.get_fpolicy_scopes( share_name=fake.SHARE_NAME, policy_name=fake.FPOLICY_POLICY_NAME, extensions_to_include=fake.FPOLICY_EXT_TO_INCLUDE, extensions_to_exclude=fake.FPOLICY_EXT_TO_EXCLUDE, shares_to_include=[fake.SHARE_NAME]) expected_options = { 'policy-name': fake.FPOLICY_POLICY_NAME, 'shares-to-include': [{ 'string': fake.SHARE_NAME, }], 'file-extensions-to-include': [], 'file-extensions-to-exclude': [], } for file_ext in fake.FPOLICY_EXT_TO_INCLUDE_LIST: expected_options['file-extensions-to-include'].append( {'string': file_ext}) for file_ext in fake.FPOLICY_EXT_TO_EXCLUDE_LIST: expected_options['file-extensions-to-exclude'].append( {'string': file_ext}) expected_args = { 'query': { 'fpolicy-scope-config': expected_options, }, } expected = [{ 'policy-name': fake.FPOLICY_POLICY_NAME, 'file-extensions-to-include': fake.FPOLICY_EXT_TO_INCLUDE_LIST, 'file-extensions-to-exclude': fake.FPOLICY_EXT_TO_EXCLUDE_LIST, 'shares-to-include': [fake.SHARE_NAME], }] self.assertEqual(expected, result) self.client.send_iter_request.assert_called_once_with( 'fpolicy-policy-scope-get-iter', expected_args) def test_enable_fpolicy_policy(self): self.mock_object(self.client, 'send_request') self.client.enable_fpolicy_policy( fake.SHARE_NAME, fake.FPOLICY_POLICY_NAME, 10) expected_args = { 'policy-name': fake.FPOLICY_POLICY_NAME, 'sequence-number': 10, } self.client.send_request.assert_called_once_with( 'fpolicy-enable-policy', expected_args) @ddt.data(None, netapp_api.EPOLICYNOTFOUND) def test_disable_fpolicy_policy(self, send_request_error): if send_request_error: send_request_mock = mock.Mock( side_effect=self._mock_api_error(code=send_request_error)) else: send_request_mock = mock.Mock() self.mock_object(self.client, 'send_request', send_request_mock) self.client.disable_fpolicy_policy(fake.FPOLICY_POLICY_NAME) expected_args = { 'policy-name': fake.FPOLICY_POLICY_NAME, } self.client.send_request.assert_called_once_with( 'fpolicy-disable-policy', expected_args) def test_disable_fpolicy_policy_error(self): eapi_error = self._mock_api_error(code=netapp_api.EAPIERROR) self.mock_object( self.client, 'send_request', mock.Mock(side_effect=eapi_error)) self.assertRaises(exception.NetAppException, self.client.disable_fpolicy_policy, fake.FPOLICY_POLICY_NAME) self.client.send_request.assert_called_once_with( 'fpolicy-disable-policy', {'policy-name': fake.FPOLICY_POLICY_NAME}) def test_get_fpolicy_status(self): api_response = netapp_api.NaElement( fake.FPOLICY_POLICY_STATUS_GET_ITER_RESPONSE) self.mock_object(self.client, 'send_iter_request', mock.Mock(return_value=api_response)) result = self.client.get_fpolicy_policies_status( share_name=fake.SHARE_NAME, policy_name=fake.FPOLICY_POLICY_NAME) expected_args = { 'query': { 'fpolicy-policy-status-info': { 'policy-name': fake.FPOLICY_POLICY_NAME, 'status': 'true' }, }, } expected = [{ 'policy-name': fake.FPOLICY_POLICY_NAME, 'status': True, 'sequence-number': '1' }] self.assertEqual(expected, result) self.client.send_iter_request.assert_called_once_with( 'fpolicy-policy-status-get-iter', expected_args) def test_is_svm_migrate_supported(self): self.client.features.add_feature('SVM_MIGRATE') result = self.client.is_svm_migrate_supported() self.assertTrue(result) @ddt.data( {"body": fake.FAKE_HTTP_BODY, "headers": fake.FAKE_HTTP_HEADER, "query": {}, "url_params": fake.FAKE_URL_PARAMS }, {"body": {}, "headers": fake.FAKE_HTTP_HEADER, "query": fake.FAKE_HTTP_QUERY, "url_params": fake.FAKE_URL_PARAMS }, ) @ddt.unpack def test__format_request(self, body, headers, query, url_params): expected_result = { "body": body, "headers": headers, "query": query, "url_params": url_params } result = self.client._format_request( body, headers=headers, query=query, url_params=url_params) for k, v in expected_result.items(): self.assertIn(k, result) self.assertEqual(result.get(k), v) @ddt.data( {"dest_ipspace": None, "check_only": True}, {"dest_ipspace": "fake_dest_ipspace", "check_only": False}, ) @ddt.unpack def test_svm_migration_start(self, dest_ipspace, check_only): api_args = { "auto_cutover": False, "auto_source_cleanup": True, "check_only": check_only, "source": { "cluster": {"name": fake.CLUSTER_NAME}, "svm": {"name": fake.VSERVER_NAME}, }, "destination": { "volume_placement": { "aggregates": [fake.SHARE_AGGREGATE_NAME], }, }, } if dest_ipspace: ipspace_data = { "ipspace": {"name": dest_ipspace} } api_args['destination'].update(ipspace_data) self.mock_object(self.client, '_format_request', mock.Mock(return_value=api_args)) self.mock_object( self.client, 'send_request', mock.Mock(return_value=fake.FAKE_MIGRATION_RESPONSE_WITH_JOB)) result = self.client.svm_migration_start( fake.CLUSTER_NAME, fake.VSERVER_NAME, [fake.SHARE_AGGREGATE_NAME], dest_ipspace=dest_ipspace, check_only=check_only) self.client._format_request.assert_called_once_with(api_args) self.client.send_request.assert_called_once_with( 'svm-migration-start', api_args=api_args, use_zapi=False) self.assertEqual(result, fake.FAKE_MIGRATION_RESPONSE_WITH_JOB) @ddt.data({"check_only": False}, {"check_only": True}) def test_share_server_migration_start_failed(self, check_only): api_args = {} self.mock_object(self.client, '_format_request', mock.Mock(return_value=api_args)) self.mock_object( self.client, 'send_request', mock.Mock(side_effect=netapp_api.NaApiError(message='fake'))) self.assertRaises( netapp_api.NaApiError, self.client.svm_migration_start, fake.CLUSTER_NAME, fake.VSERVER_NAME, [fake.SHARE_AGGREGATE_NAME], check_only=check_only ) def test_svm_migrate_complete(self): migration_id = 'ongoing_migration_id' request = { 'action': 'cutover' } expected_url_params = { 'svm_migration_id': migration_id } self.mock_object(self.client, '_format_request', mock.Mock(return_value=request)) self.mock_object( self.client, 'send_request', mock.Mock(return_value=fake.FAKE_MIGRATION_RESPONSE_WITH_JOB)) self.client.svm_migrate_complete(migration_id) self.client._format_request.assert_called_once_with( request, url_params=expected_url_params) self.client.send_request.assert_called_once_with( 'svm-migration-complete', api_args=request, use_zapi=False) def test_get_job(self): request = {} job_uuid = 'fake_job_uuid' url_params = { 'job_uuid': job_uuid } self.mock_object(self.client, '_format_request', mock.Mock(return_value=request)) self.mock_object(self.client, 'send_request', mock.Mock(return_value=fake.FAKE_JOB_SUCCESS_STATE)) result = self.client.get_job(job_uuid) self.assertEqual(fake.FAKE_JOB_SUCCESS_STATE, result) self.client._format_request.assert_called_once_with( request, url_params=url_params) self.client.send_request.assert_called_once_with( 'get-job', api_args=request, use_zapi=False) def test_svm_migrate_cancel(self): request = {} migration_id = 'fake_migration_uuid' url_params = { "svm_migration_id": migration_id } self.mock_object(self.client, '_format_request', mock.Mock(return_value=request)) self.mock_object( self.client, 'send_request', mock.Mock(return_value=fake.FAKE_MIGRATION_RESPONSE_WITH_JOB)) result = self.client.svm_migrate_cancel(migration_id) self.assertEqual(fake.FAKE_MIGRATION_RESPONSE_WITH_JOB, result) self.client._format_request.assert_called_once_with( request, url_params=url_params) self.client.send_request.assert_called_once_with( 'svm-migration-cancel', api_args=request, use_zapi=False) def test_svm_migration_get(self): request = {} migration_id = 'fake_migration_uuid' url_params = { "svm_migration_id": migration_id } self.mock_object(self.client, '_format_request', mock.Mock(return_value=request)) self.mock_object( self.client, 'send_request', mock.Mock(return_value=fake.FAKE_MIGRATION_JOB_SUCCESS)) result = self.client.svm_migration_get(migration_id) self.assertEqual(fake.FAKE_MIGRATION_JOB_SUCCESS, result) self.client._format_request.assert_called_once_with( request, url_params=url_params) self.client.send_request.assert_called_once_with( 'svm-migration-get', api_args=request, use_zapi=False) def test_svm_migrate_pause(self): request = { "action": "pause" } migration_id = 'fake_migration_uuid' url_params = { "svm_migration_id": migration_id } self.mock_object(self.client, '_format_request', mock.Mock(return_value=request)) self.mock_object( self.client, 'send_request', mock.Mock(return_value=fake.FAKE_MIGRATION_RESPONSE_WITH_JOB)) result = self.client.svm_migrate_pause(migration_id) self.assertEqual(fake.FAKE_MIGRATION_RESPONSE_WITH_JOB, result) self.client._format_request.assert_called_once_with( request, url_params=url_params) self.client.send_request.assert_called_once_with( 'svm-migration-pause', api_args=request, use_zapi=False) def test_migration_check_job_state(self): self.mock_object(self.client, 'get_job', mock.Mock(return_value=fake.FAKE_JOB_SUCCESS_STATE)) result = self.client.get_migration_check_job_state( fake.FAKE_JOB_ID ) self.assertEqual(result, fake.FAKE_JOB_SUCCESS_STATE) self.client.get_job.assert_called_once_with(fake.FAKE_JOB_ID) @ddt.data(netapp_api.ENFS_V4_0_ENABLED_MIGRATION_FAILURE, netapp_api.EVSERVER_MIGRATION_TO_NON_AFF_CLUSTER) def test_migration_check_job_state_failed(self, error_code): self.mock_object( self.client, 'get_job', mock.Mock(side_effect=netapp_api.NaApiError(code=error_code))) self.assertRaises( exception.NetAppException, self.client.get_migration_check_job_state, fake.FAKE_JOB_ID ) self.client.get_job.assert_called_once_with(fake.FAKE_JOB_ID) @ddt.data(True, False) def test_get_volume_state(self, has_record): api_response = netapp_api.NaElement( fake.VOLUME_GET_ITER_STATE_RESPONSE) mock_send_iter_request = self.mock_object( self.client, 'send_iter_request', mock.Mock(return_value=api_response)) mock_has_record = self.mock_object(self.client, '_has_records', mock.Mock(return_value=has_record)) state = self.client.get_volume_state(fake.SHARE_NAME) volume_get_iter_args = { 'query': { 'volume-attributes': { 'volume-id-attributes': { 'name': fake.SHARE_NAME, }, }, }, 'desired-attributes': { 'volume-attributes': { 'volume-state-attributes': { 'state': None } } }, } mock_send_iter_request.assert_called_once_with( 'volume-get-iter', volume_get_iter_args) mock_has_record.assert_called_once_with(api_response) if has_record: self.assertEqual('online', state) else: self.assertEqual('', state) @ddt.data(True, False) def test_is_flexgroup_volume(self, is_flexgroup): self.client.features.add_feature('FLEXGROUP', supported=True) api_response = netapp_api.NaElement( fake.VOLUME_GET_ITER_STYLE_FLEXGROUP_RESPONSE if is_flexgroup else fake.VOLUME_GET_ITER_STYLE_FLEXVOL_RESPONSE) mock_send_iter_request = self.mock_object( self.client, 'send_request', mock.Mock(return_value=api_response)) mock_has_record = self.mock_object(self.client, '_has_records', mock.Mock(return_value=True)) mock_is_style_extended_flexgroup = self.mock_object( na_utils, 'is_style_extended_flexgroup', mock.Mock(return_value=is_flexgroup)) is_flexgroup_res = self.client.is_flexgroup_volume(fake.SHARE_NAME) volume_get_iter_args = { 'query': { 'volume-attributes': { 'volume-id-attributes': { 'name': fake.SHARE_NAME, }, }, }, 'desired-attributes': { 'volume-attributes': { 'volume-id-attributes': { 'style-extended': None, }, }, }, } mock_send_iter_request.assert_called_once_with( 'volume-get-iter', volume_get_iter_args) mock_has_record.assert_called_once_with(api_response) mock_is_style_extended_flexgroup.assert_called_once_with( fake.FLEXGROUP_STYLE_EXTENDED if is_flexgroup else fake.FLEXVOL_STYLE_EXTENDED) self.assertEqual(is_flexgroup, is_flexgroup_res) def test_is_flexgroup_volume_not_found(self): self.client.features.add_feature('FLEXGROUP', supported=True) api_response = netapp_api.NaElement(fake.NO_RECORDS_RESPONSE) self.mock_object(self.client, 'send_request', mock.Mock(return_value=api_response)) self.assertRaises(exception.StorageResourceNotFound, self.client.is_flexgroup_volume, fake.SHARE_NAME) def test_is_flexgroup_volume_not_unique(self): self.client.features.add_feature('FLEXGROUP', supported=True) api_response = netapp_api.NaElement( fake.VOLUME_GET_ITER_NOT_UNIQUE_RESPONSE) self.mock_object(self.client, 'send_request', mock.Mock(return_value=api_response)) self.assertRaises(exception.NetAppException, self.client.is_flexgroup_volume, fake.SHARE_NAME) def test_is_flexgroup_volume_unsupported(self): self.client.features.add_feature('FLEXGROUP', supported=False) result = self.client.is_flexgroup_volume(fake.SHARE_NAME) self.assertFalse(result) def test_is_flexgroup_supported(self): self.client.features.add_feature('FLEXGROUP') result = self.client.is_flexgroup_supported() self.assertTrue(result) def test_is_flexgroup_fan_out_supported(self): self.client.features.add_feature('FLEXGROUP_FAN_OUT') result = self.client.is_flexgroup_fan_out_supported() self.assertTrue(result) def test_get_job_state(self): api_response = netapp_api.NaElement(fake.JOB_GET_STATE_RESPONSE) mock_send_iter_request = self.mock_object( self.client, 'send_iter_request', mock.Mock(return_value=api_response)) mock_has_record = self.mock_object(self.client, '_has_records', mock.Mock(return_value=True)) job_state_res = self.client.get_job_state(fake.JOB_ID) job_get_iter_args = { 'query': { 'job-info': { 'job-id': fake.JOB_ID, }, }, 'desired-attributes': { 'job-info': { 'job-state': None, }, }, } mock_send_iter_request.assert_called_once_with( 'job-get-iter', job_get_iter_args, enable_tunneling=False) mock_has_record.assert_called_once_with(api_response) self.assertEqual(fake.JOB_STATE, job_state_res) def test_get_job_state_not_found(self): api_response = netapp_api.NaElement(fake.NO_RECORDS_RESPONSE) self.mock_object(self.client, '_has_records', mock.Mock(return_value=False)) self.mock_object(self.client, 'send_iter_request', mock.Mock(return_value=api_response)) self.assertRaises(exception.NetAppException, self.client.get_job_state, fake.JOB_ID) def test_get_job_state_not_unique(self): api_response = netapp_api.NaElement( fake.JOB_GET_STATE_NOT_UNIQUE_RESPONSE) self.mock_object(self.client, '_has_records', mock.Mock(return_value=True)) self.mock_object(self.client, 'send_iter_request', mock.Mock(return_value=api_response)) self.assertRaises(exception.NetAppException, self.client.get_job_state, fake.JOB_ID) def test_check_snaprestore_license_svm_scoped_notfound(self): self.mock_object(self.client, 'restore_snapshot', mock.Mock(side_effect=netapp_api.NaApiError( code=netapp_api.EAPIERROR, message=fake.NO_SNAPRESTORE_LICENSE))) result = self.client.check_snaprestore_license() self.assertIs(False, result) def test_check_snaprestore_license_svm_scoped_found(self): self.mock_object(self.client, 'restore_snapshot', mock.Mock(side_effect=netapp_api.NaApiError( code=netapp_api.EAPIERROR, message='Other error'))) result = self.client.check_snaprestore_license() self.assertIs(True, result) def test_check_snaprestore_license_svm_scoped_found_exception(self): self.mock_object(client_cmode.LOG, 'exception') self.mock_object(self.client, 'restore_snapshot', mock.Mock(return_value=None)) self.assertRaises( exception.NetAppException, self.client.check_snaprestore_license) client_cmode.LOG.exception.assert_called_once() def test_get_svm_volumes_total_size(self): expected = 1 request = {} api_args = { 'svm.name': fake.VSERVER_NAME, 'fields': 'size' } self.mock_object(self.client, '_format_request', mock.Mock(return_value=api_args)) self.mock_object(self.client, 'send_request', mock.Mock(return_value=fake.FAKE_GET_VOLUME)) result = self.client.get_svm_volumes_total_size(fake.VSERVER_NAME) self.client._format_request.assert_called_once_with(request, query=api_args) self.client.send_request.assert_called_once_with( 'svm-migration-get-progress', api_args=api_args, use_zapi=False) self.assertEqual(expected, result) def test_configure_active_directory_credential_error(self): msg = "could not authenticate" self.mock_object(self.client, 'send_request', self._mock_api_error(code=netapp_api.EAPIERROR, message=msg)) self.mock_object(self.client, 'configure_dns') self.mock_object(self.client, 'configure_cifs_aes_encryption') self.mock_object(self.client, 'set_preferred_dc') self.mock_object(self.client, '_get_cifs_server_name') self.assertRaises(exception.SecurityServiceFailedAuth, self.client.configure_active_directory, fake.CIFS_SECURITY_SERVICE, fake.VSERVER_NAME, False) def test_configure_active_directory_user_privilege_error(self): msg = "insufficient access" self.mock_object(self.client, 'send_request', self._mock_api_error(code=netapp_api.EAPIERROR, message=msg)) self.mock_object(self.client, 'configure_dns') self.mock_object(self.client, 'configure_cifs_aes_encryption') self.mock_object(self.client, 'set_preferred_dc') self.mock_object(self.client, '_get_cifs_server_name') self.assertRaises(exception.SecurityServiceFailedAuth, self.client.configure_active_directory, fake.CIFS_SECURITY_SERVICE, fake.VSERVER_NAME, False) def test_snapmirror_restore_vol(self): self.mock_object(self.client, 'send_request') self.client.snapmirror_restore_vol(source_path=fake.SM_SOURCE_PATH, dest_path=fake.SM_DEST_PATH, source_snapshot=fake.SNAPSHOT_NAME, ) snapmirror_restore_args = { 'source-location': fake.SM_SOURCE_PATH, 'destination-location': fake.SM_DEST_PATH, 'source-snapshot': fake.SNAPSHOT_NAME, } self.client.send_request.assert_has_calls([ mock.call('snapmirror-restore', snapmirror_restore_args)]) @ddt.data({'snapmirror_label': None, 'newer_than': '2345'}, {'snapmirror_label': "fake_backup", 'newer_than': None}) @ddt.unpack def test_list_volume_snapshots(self, snapmirror_label, newer_than): api_response = netapp_api.NaElement( fake.SNAPSHOT_GET_ITER_SNAPMIRROR_RESPONSE) self.mock_object(self.client, 'send_iter_request', mock.Mock(return_value=api_response)) result = self.client.list_volume_snapshots( fake.SHARE_NAME, snapmirror_label=snapmirror_label, newer_than=newer_than) snapshot_get_iter_args = { 'query': { 'snapshot-info': { 'volume': fake.SHARE_NAME, }, }, } if newer_than: snapshot_get_iter_args['query']['snapshot-info'][ 'access-time'] = '>' + newer_than if snapmirror_label: snapshot_get_iter_args['query']['snapshot-info'][ 'snapmirror-label'] = snapmirror_label self.client.send_iter_request.assert_has_calls([ mock.call('snapshot-get-iter', snapshot_get_iter_args)]) expected = [fake.SNAPSHOT_NAME] self.assertEqual(expected, result) def test_is_snaplock_compliance_clock_configured(self): api_response = netapp_api.NaElement(fake.SNAPLOCK_CLOCK_CONFIG_1) self.mock_object(self.client, 'send_request', mock.Mock(return_value=api_response)) result = self.client.is_snaplock_compliance_clock_configured( "fake_node", ) self.assertIs(True, result) def test_is_snaplock_compliance_clock_configured_negative(self): api_response = netapp_api.NaElement(fake.SNAPLOCK_CLOCK_CONFIG_2) self.mock_object(self.client, 'send_request', mock.Mock(return_value=api_response)) result = self.client.is_snaplock_compliance_clock_configured( "fake_node" ) self.assertIs(False, result) def test_is_snaplock_compliance_clock_configured_none(self): api_response = netapp_api.NaElement(fake.SNAPLOCK_CLOCK_CONFIG_1) self.mock_object(self.client, 'send_request', mock.Mock(return_value=api_response)) self.mock_object(api_response, 'get_child_by_name', mock.Mock(return_value=None)) self.assertRaises( exception.NetAppException, self.client.is_snaplock_compliance_clock_configured, "node1" ) @ddt.data({'options': {'snaplock_autocommit_period': "4hours", 'snaplock_min_retention_period': "6days", 'snaplock_max_retention_period': "8months", 'snaplock_default_retention_period': "8days"}, }, {'options': {'snaplock_autocommit_period': "4hours", 'snaplock_min_retention_period': "6days", 'snaplock_max_retention_period': "8months", 'snaplock_default_retention_period': "min"}, }, {'options': {'snaplock_autocommit_period': "4hours", 'snaplock_min_retention_period': "6days", 'snaplock_max_retention_period': "8months", 'snaplock_default_retention_period': "max"}, }, ) @ddt.unpack def test_set_snaplock_attributes(self, options): api_args = { 'volume': fake.SHARE_NAME, 'autocommit-period': options.get('snaplock_autocommit_period'), 'minimum-retention-period': options.get( 'snaplock_min_retention_period'), 'maximum-retention-period': options.get( 'snaplock_max_retention_period'), 'default-retention-period': options.get( 'snaplock_default_retention_period'), } if options.get('snaplock_default_retention_period') == "min": api_args['default-retention-period'] = options.get( 'snaplock_min_retention_period') elif options.get('snaplock_default_retention_period') == 'max': api_args['default-retention-period'] = options.get( 'snaplock_max_retention_period') self.mock_object(self.client, 'send_request') self.client.set_snaplock_attributes(fake.SHARE_NAME, **options) self.client.send_request.assert_has_calls([ mock.call('volume-set-snaplock-attrs', api_args)]) def test_set_snaplock_attributes_all_none(self): self.mock_object(self.client, 'send_request') options = {'snaplock_autocommit_period': None, 'snaplock_min_retention_period': None, 'snaplock_max_retention_period': None, 'snaplock_default_retention_period': None, } self.client.set_snaplock_attributes(fake.SHARE_NAME, **options) self.client.send_request.assert_not_called() def test_get_vserver_aggr_snaplock_type(self): self.client.features.SNAPLOCK = True api_response = netapp_api.NaElement( fake.VSERVER_SHOW_AGGR_GET_RESPONSE, ) self.mock_object(self.client, 'send_iter_request', mock.Mock(return_value=api_response)) result = self.client.get_vserver_aggr_snaplock_type( fake.SHARE_AGGREGATE_NAMES ) self.assertEqual("compliance", result) def test_get_vserver_aggr_snaplock_type_negative(self): self.client.features.SNAPLOCK = False api_response = netapp_api.NaElement( fake.VSERVER_SHOW_AGGR_GET_RESPONSE, ) self.mock_object(self.client, 'send_iter_request', mock.Mock(return_value=api_response)) result = self.client.get_vserver_aggr_snaplock_type( fake.SHARE_AGGREGATE_NAMES ) self.assertIsNone(result) @ddt.data("compliance", "enterprise") def test__is_snaplock_enabled_volume_true(self, snaplock_type): vol_attr = {'snaplock-type': snaplock_type} self.mock_object(self.client, 'get_volume', mock.Mock(return_value=vol_attr)) result = self.client._is_snaplock_enabled_volume( fake.SHARE_AGGREGATE_NAMES ) self.assertIs(True, result) def test__is_snaplock_enabled_volume_false(self): vol_attr = {'snaplock-type': 'non-snaplock'} self.mock_object(self.client, 'get_volume', mock.Mock(return_value=vol_attr)) result = self.client._is_snaplock_enabled_volume( fake.SHARE_AGGREGATE_NAMES ) self.assertIs(False, result) def test_get_storage_failover_partner(self): api_response = netapp_api.NaElement(fake.STORAGE_FAIL_OVER_PARTNER) self.mock_object(self.client, 'send_request', mock.Mock(return_value=api_response)) result = self.client.get_storage_failover_partner("fake_node") self.assertEqual("fake_partner_node", result) def test_get_migratable_data_lif_for_node(self): api_response = netapp_api.NaElement( fake.NET_INTERFACE_GET_ITER_RESPONSE) self.mock_object(self.client, 'send_iter_request', mock.Mock(return_value=api_response)) failover_policy = ['system-defined', 'sfo-partner-only'] protocols = ['nfs', 'cifs'] api_args = { 'query': { 'net-interface-info': { 'failover-policy': '|'.join(failover_policy), 'home-node': "fake_node", 'data-protocols': { 'data-protocol': '|'.join(protocols), } } } } result = self.client.get_migratable_data_lif_for_node("fake_node") self.client.send_iter_request.assert_has_calls([ mock.call('net-interface-get-iter', api_args)]) self.assertEqual(list(fake.LIF_NAMES), result) def test_get_data_lif_details_for_nodes(self): api_response = netapp_api.NaElement( fake.DATA_LIF_CAPACITY_DETAILS) self.mock_object(self.client, 'send_iter_request', mock.Mock(return_value=api_response)) api_args = { 'desired-attributes': { 'data-lif-capacity-details-info': { 'limit-for-node': None, 'count-for-node': None, 'node': None }, }, } expected_result = [{'limit-for-node': '512', 'count-for-node': '44', 'node': 'fake_node', }] result = self.client.get_data_lif_details_for_nodes() self.client.send_iter_request.assert_has_calls([ mock.call('data-lif-capacity-details', api_args)]) self.assertEqual(expected_result, result) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/netapp/dataontap/client/test_client_cmode_rest.py0000664000175000017500000112203100000000000032054 0ustar00zuulzuul00000000000000# Copyright (c) 2023 NetApp, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import math import time from unittest import mock import ddt from oslo_log import log from oslo_utils import units from manila import exception from manila.share.drivers.netapp.dataontap.client import client_cmode from manila.share.drivers.netapp.dataontap.client import client_cmode_rest from manila.share.drivers.netapp.dataontap.client import rest_api as netapp_api from manila.share.drivers.netapp import utils as netapp_utils from manila import test from manila.tests.share.drivers.netapp.dataontap.client import fakes as fake from manila import utils @ddt.ddt class NetAppRestCmodeClientTestCase(test.TestCase): def setUp(self): super(NetAppRestCmodeClientTestCase, self).setUp() # Mock loggers as themselves to allow logger arg validation mock_logger = log.getLogger('mock_logger') self.mock_object(client_cmode_rest.LOG, 'error', mock.Mock(side_effect=mock_logger.error)) self.mock_object(client_cmode_rest.LOG, 'warning', mock.Mock(side_effect=mock_logger.warning)) self.mock_object(client_cmode_rest.LOG, 'debug', mock.Mock(side_effect=mock_logger.debug)) self.mock_object(client_cmode.NetAppCmodeClient, 'get_ontapi_version', mock.Mock(return_value=(1, 20))) # store the original reference so we can call it later in # test_get_ontap_version self.original_get_ontap_version = ( client_cmode_rest.NetAppRestClient.get_ontap_version) self.mock_object(client_cmode_rest.NetAppRestClient, 'get_ontap_version', mock.Mock(return_value={ 'version-tuple': (9, 12, 1), 'version': fake.VERSION, })) self.original_check_for_cluster_credentials = ( client_cmode_rest.NetAppRestClient._check_for_cluster_credentials) self.mock_object(client_cmode_rest.NetAppRestClient, '_check_for_cluster_credentials', mock.Mock(return_value=True)) self.mock_object(client_cmode.NetAppCmodeClient, 'get_system_version', mock.Mock(return_value={ 'version-tuple': (9, 10, 1), 'version': fake.VERSION, })) self.client = client_cmode_rest.NetAppRestClient( **fake.CONNECTION_INFO) self.client.connection = mock.MagicMock() def _mock_api_error(self, code='fake', message='fake'): return mock.Mock( side_effect=netapp_api.api.NaApiError(code=code, message=message)) def test_send_request(self): expected = 'fake_response' mock_get_records = self.mock_object( self.client, 'get_records', mock.Mock(return_value=expected)) res = self.client.send_request( fake.FAKE_ACTION_URL, 'get', body=fake.FAKE_HTTP_BODY, query=fake.FAKE_HTTP_QUERY, enable_tunneling=False) self.assertEqual(expected, res) mock_get_records.assert_called_once_with( fake.FAKE_ACTION_URL, fake.FAKE_HTTP_QUERY, False, 10000) def test_send_request_post(self): expected = (201, 'fake_response') mock_invoke = self.mock_object( self.client.connection, 'invoke_successfully', mock.Mock(return_value=expected)) res = self.client.send_request( fake.FAKE_ACTION_URL, 'post', body=fake.FAKE_HTTP_BODY, query=fake.FAKE_HTTP_QUERY, enable_tunneling=False) self.assertEqual(expected[1], res) mock_invoke.assert_called_once_with( fake.FAKE_ACTION_URL, 'post', body=fake.FAKE_HTTP_BODY, query=fake.FAKE_HTTP_QUERY, enable_tunneling=False) def test_send_request_wait(self): expected = (202, fake.JOB_RESPONSE_REST) mock_invoke = self.mock_object( self.client.connection, 'invoke_successfully', mock.Mock(return_value=expected)) mock_wait = self.mock_object( self.client, '_wait_job_result', mock.Mock(return_value=expected[1])) res = self.client.send_request( fake.FAKE_ACTION_URL, 'post', body=fake.FAKE_HTTP_BODY, query=fake.FAKE_HTTP_QUERY, enable_tunneling=False) self.assertEqual(expected[1], res) mock_invoke.assert_called_once_with( fake.FAKE_ACTION_URL, 'post', body=fake.FAKE_HTTP_BODY, query=fake.FAKE_HTTP_QUERY, enable_tunneling=False) mock_wait.assert_called_once_with( expected[1]['job']['_links']['self']['href'][4:]) @ddt.data(True, False) def test_get_records(self, enable_tunneling): api_responses = [ (200, fake.VOLUME_GET_ITER_RESPONSE_REST_PAGE), (200, fake.VOLUME_GET_ITER_RESPONSE_REST_PAGE), (200, fake.VOLUME_GET_ITER_RESPONSE_REST_LAST_PAGE), ] mock_invoke = self.mock_object( self.client.connection, 'invoke_successfully', mock.Mock(side_effect=copy.deepcopy(api_responses))) query = { 'fields': 'name' } result = self.client.get_records( '/storage/volumes/', query=query, enable_tunneling=enable_tunneling, max_page_length=10) num_records = result['num_records'] self.assertEqual(28, num_records) self.assertEqual(28, len(result['records'])) expected_records = [] expected_records.extend(api_responses[0][1]['records']) expected_records.extend(api_responses[1][1]['records']) expected_records.extend(api_responses[2][1]['records']) self.assertEqual(expected_records, result['records']) next_tag = result.get('next') self.assertIsNone(next_tag) expected_query = copy.deepcopy(query) expected_query['max_records'] = 10 next_url_1 = api_responses[0][1]['_links']['next']['href'][4:] next_url_2 = api_responses[1][1]['_links']['next']['href'][4:] mock_invoke.assert_has_calls([ mock.call('/storage/volumes/', 'get', query=expected_query, enable_tunneling=enable_tunneling), mock.call(next_url_1, 'get', query=None, enable_tunneling=enable_tunneling), mock.call(next_url_2, 'get', query=None, enable_tunneling=enable_tunneling), ]) def test_get_records_single_page(self): api_response = ( 200, fake.VOLUME_GET_ITER_RESPONSE_REST_LAST_PAGE) mock_invoke = self.mock_object(self.client.connection, 'invoke_successfully', mock.Mock(return_value=api_response)) query = { 'fields': 'name' } result = self.client.get_records( '/storage/volumes/', query=query, max_page_length=10) num_records = result['num_records'] self.assertEqual(8, num_records) self.assertEqual(8, len(result['records'])) next_tag = result.get('next') self.assertIsNone(next_tag) args = copy.deepcopy(query) args['max_records'] = 10 mock_invoke.assert_has_calls([ mock.call('/storage/volumes/', 'get', query=args, enable_tunneling=True), ]) def test_get_records_not_found(self): api_response = (200, fake.NO_RECORDS_RESPONSE_REST) mock_invoke = self.mock_object(self.client.connection, 'invoke_successfully', mock.Mock(return_value=api_response)) result = self.client.get_records('/storage/volumes/') num_records = result['num_records'] self.assertEqual(0, num_records) self.assertEqual(0, len(result['records'])) args = { 'max_records': client_cmode_rest.DEFAULT_MAX_PAGE_LENGTH } mock_invoke.assert_has_calls([ mock.call('/storage/volumes/', 'get', query=args, enable_tunneling=True), ]) def test_get_records_timeout(self): # To simulate timeout, max_records is 30, but the API returns less # records and fill the 'next url' pointing to the next page. max_records = 30 api_responses = [ (200, fake.VOLUME_GET_ITER_RESPONSE_REST_PAGE), (200, fake.VOLUME_GET_ITER_RESPONSE_REST_PAGE), (200, fake.VOLUME_GET_ITER_RESPONSE_REST_LAST_PAGE), ] mock_invoke = self.mock_object( self.client.connection, 'invoke_successfully', mock.Mock(side_effect=copy.deepcopy(api_responses))) query = { 'fields': 'name' } result = self.client.get_records( '/storage/volumes/', query=query, max_page_length=max_records) num_records = result['num_records'] self.assertEqual(28, num_records) self.assertEqual(28, len(result['records'])) expected_records = [] expected_records.extend(api_responses[0][1]['records']) expected_records.extend(api_responses[1][1]['records']) expected_records.extend(api_responses[2][1]['records']) self.assertEqual(expected_records, result['records']) next_tag = result.get('next', None) self.assertIsNone(next_tag) args1 = copy.deepcopy(query) args1['max_records'] = max_records next_url_1 = api_responses[0][1]['_links']['next']['href'][4:] next_url_2 = api_responses[1][1]['_links']['next']['href'][4:] mock_invoke.assert_has_calls([ mock.call('/storage/volumes/', 'get', query=args1, enable_tunneling=True), mock.call(next_url_1, 'get', query=None, enable_tunneling=True), mock.call(next_url_2, 'get', query=None, enable_tunneling=True), ]) def test__getattr__(self): # NOTE(nahimsouza): get_ontapi_version is implemented only in ZAPI # client, therefore, it will call __getattr__ self.client.get_ontapi_version() @ddt.data(True, False) def test_get_ontap_version(self, cached): self.client.get_ontap_version = self.original_get_ontap_version api_response = { 'records': [ { 'version': { 'generation': 9, 'major': 11, 'minor': 1, 'full': 'NetApp Release 9.11.1' } }] } return_mock = { 'version': 'NetApp Release 9.11.1', 'version-tuple': (9, 11, 1) } mock_connect = self.mock_object(self.client.connection, 'get_ontap_version', mock.Mock(return_value=return_mock)) mock_send_request = self.mock_object( self.client, 'send_request', mock.Mock(return_value=api_response)) result = self.client.get_ontap_version(self=self.client, cached=cached) if cached: mock_connect.assert_called_once() else: mock_send_request.assert_called_once_with( '/cluster/nodes', 'get', query={'fields': 'version'}, enable_tunneling=False) self.assertEqual(return_mock, result) def test__wait_job_result(self): response = fake.JOB_SUCCESSFUL_REST self.mock_object(self.client, 'send_request', mock.Mock(return_value=response)) result = self.client._wait_job_result( f'/cluster/jobs/{fake.FAKE_UUID}') self.assertEqual(response, result) def test__wait_job_result_failure(self): response = fake.JOB_ERROR_REST self.mock_object(self.client, 'send_request', mock.Mock(return_value=response)) self.assertRaises(netapp_utils.NetAppDriverException, self.client._wait_job_result, f'/cluster/jobs/{fake.FAKE_UUID}') def test__wait_job_result_timeout(self): response = fake.JOB_RUNNING_REST self.client.async_rest_timeout = 2 self.mock_object(self.client, 'send_request', mock.Mock(return_value=response)) self.assertRaises(netapp_utils.NetAppDriverException, self.client._wait_job_result, f'/cluster/jobs/{fake.FAKE_UUID}') def test_list_cluster_nodes(self): """Get all available cluster nodes.""" return_value = fake.FAKE_GET_CLUSTER_NODE_VERSION_REST self.mock_object(self.client, 'send_request', mock.Mock(return_value=return_value)) test_result = self.client.list_cluster_nodes() self.client.send_request.assert_called_once_with( '/cluster/nodes', 'get' ) nodes = return_value.get('records', []) expected_result = [node['name'] for node in nodes] self.assertEqual(expected_result, test_result) @ddt.data(True, False) def test_check_for_cluster_credentials(self, cluster_creds): self.client._have_cluster_creds = cluster_creds result = self.client.check_for_cluster_credentials() self.assertEqual(cluster_creds, result) def test__check_for_cluster_credentials(self): self.client._check_for_cluster_credentials = ( self.original_check_for_cluster_credentials) api_response = fake.FAKE_GET_CLUSTER_NODE_VERSION_REST self.mock_object(self.client, 'list_cluster_nodes', mock.Mock(return_value=api_response)) result = self.client._check_for_cluster_credentials(self=self.client) self.assertTrue(result) def test__check_for_cluster_credentials_not_cluster(self): self.client._check_for_cluster_credentials = ( self.original_check_for_cluster_credentials) self.mock_object(self.client, 'list_cluster_nodes', self._mock_api_error( netapp_api.EREST_NOT_AUTHORIZED)) result = self.client._check_for_cluster_credentials(self=self.client) self.assertFalse(result) def test__check_for_cluster_credentials_api_error(self): self.client._check_for_cluster_credentials = ( self.original_check_for_cluster_credentials) self.mock_object(self.client, 'list_cluster_nodes', self._mock_api_error()) self.assertRaises(netapp_api.api.NaApiError, self.client._check_for_cluster_credentials, self.client) def test_get_licenses(self): return_value = fake.FAKE_GET_LICENSES_REST self.mock_object(self.client, 'send_request', mock.Mock(return_value=return_value)) test_result = self.client.get_licenses() expected_result = sorted( [license['name'] for license in return_value.get('records', [])]) self.assertEqual(test_result, expected_result) @ddt.data(((9, 1, 0), fake.VERSION_NO_DARE), ((8, 3, 2), fake.VERSION)) @ddt.unpack def test_is_nve_supported_unsupported_release_or_platform(self, gen, ver): system_version = {'version-tuple': gen, 'version': ver} self.mock_object(self.client, 'get_ontap_version', mock.Mock(return_value=system_version)) self.mock_object(self.client, '_get_security_key_manager_nve_support', mock.Mock(return_value=False)) self.mock_object(self.client, 'list_cluster_nodes', mock.Mock(return_value=fake.NODE_NAMES)) result = self.client.is_nve_supported() self.assertFalse(result) def test_is_nve_supported_valid_platform_and_supported_release(self): system_version = { 'version-tuple': (9, 1, 0), 'version': fake.VERSION, } self.mock_object(self.client, 'get_ontap_version', mock.Mock(return_value=system_version)) self.mock_object(self.client, '_get_security_key_manager_nve_support', mock.Mock(return_value=True)) self.mock_object(self.client, 'list_cluster_nodes', mock.Mock(return_value=fake.NODE_NAMES)) result = self.client.is_nve_supported() self.assertTrue(result) def test_is_nve_supported_key_manager_not_enabled(self): system_version = { 'version-tuple': (9, 1, 0), 'version': fake.VERSION, } self.mock_object(self.client, 'get_ontap_version', mock.Mock(return_value=system_version)) self.mock_object(self.client, '_get_security_key_manager_nve_support', mock.Mock(return_value=False)) self.mock_object(self.client, 'list_cluster_nodes', mock.Mock(return_value=fake.NODE_NAMES)) result = self.client.is_nve_supported() self.assertFalse(result) def test__get_volume_by_args(self): response = fake.VOLUME_LIST_SIMPLE_RESPONSE_REST volume = fake.VOLUME_ITEM_SIMPLE_RESPONSE_REST self.mock_object(self.client, 'send_request', mock.Mock(return_value=response)) result = self.client._get_volume_by_args( vol_name=fake.VOLUME_NAMES[0], aggregate_name=fake.SHARE_AGGREGATE_NAME, vol_path=fake.VOLUME_JUNCTION_PATH, vserver=fake.VSERVER_NAME, fields='name,style,svm.name,svm.uuid') query = { 'name': fake.VOLUME_NAMES[0], 'aggregates.name': fake.SHARE_AGGREGATE_NAME, 'nas.path': fake.VOLUME_JUNCTION_PATH, 'svm.name': fake.VSERVER_NAME, 'style': 'flex*', # Match both 'flexvol' and 'flexgroup' 'error_state.is_inconsistent': 'false', 'fields': 'name,style,svm.name,svm.uuid' } self.client.send_request.assert_called_once_with( '/storage/volumes/', 'get', query=query) self.assertEqual(volume, result) def test_restore_snapshot(self): volume = fake.VOLUME_ITEM_SIMPLE_RESPONSE_REST uuid = fake.VOLUME_ITEM_SIMPLE_RESPONSE_REST["uuid"] body = { 'restore_to.snapshot.name': fake.SNAPSHOT_NAME } self.mock_object(self.client, '_get_volume_by_args', mock.Mock(return_value=volume)) self.mock_object(self.client, 'send_request') self.client.restore_snapshot(fake.VOLUME_NAMES[0], fake.SNAPSHOT_NAME) self.client._get_volume_by_args.assert_called_once_with( vol_name=fake.VOLUME_NAMES[0]) self.client.send_request.assert_called_once_with( f'/storage/volumes/{uuid}', 'patch', body=body) @ddt.data(0, 10) def test__has_records(self, num_records): result = self.client._has_records({'num_records': num_records}) if not num_records or num_records == 0: self.assertFalse(result) else: self.assertTrue(result) def test_vserver_exists(self): query = { 'name': fake.VSERVER_NAME } return_value = fake.SVMS_LIST_SIMPLE_RESPONSE_REST self.mock_object(self.client, 'send_request', mock.Mock(return_value=return_value)) self.mock_object(self.client, '_has_records', mock.Mock(return_value=True)) result = self.client.vserver_exists(fake.VSERVER_NAME) self.client.send_request.assert_called_once_with( '/svm/svms', 'get', query=query, enable_tunneling=False) self.client._has_records.assert_called_once_with( fake.SVMS_LIST_SIMPLE_RESPONSE_REST) self.assertEqual(result, True) def test_get_aggregate(self): response = fake.AGGR_GET_ITER_RESPONSE_REST['records'] self.mock_object(self.client, '_get_aggregates', mock.Mock(return_value=response)) result = self.client.get_aggregate(fake.SHARE_AGGREGATE_NAME) fields = ('name,block_storage.primary.raid_type,' 'block_storage.storage_type,snaplock_type') self.client._get_aggregates.assert_has_calls([ mock.call( aggregate_names=[fake.SHARE_AGGREGATE_NAME], fields=fields)]) expected = { 'name': fake.SHARE_AGGREGATE_NAME, 'raid-type': response[0]['block_storage']['primary']['raid_type'], 'is-hybrid': response[0]['block_storage']['storage_type'] == 'hybrid', 'snaplock-type': response[0]['snaplock_type'], 'is-snaplock': response[0]['is_snaplock'] } self.assertEqual(expected, result) def test_get_cluster_aggregate_capacities(self): response = fake.AGGR_GET_ITER_RESPONSE_REST['records'] self.mock_object(self.client, '_get_aggregates', mock.Mock(return_value=response)) result = self.client.get_cluster_aggregate_capacities( response) fields = 'name,space' self.client._get_aggregates.assert_has_calls([ mock.call( aggregate_names=response, fields=fields)]) expected = { response[0]['name']: { 'available': 568692293632, 'total': 1271819509760, 'used': 703127216128, }, response[1]['name']: { 'available': 727211110400, 'total': 1426876227584, 'used': 699665117184, } } self.assertDictEqual(expected, result) def test_list_non_root_aggregates(self): return_value = fake.FAKE_AGGR_LIST self.mock_object(self.client, 'send_request', mock.Mock(return_value=return_value)) result = self.client.list_non_root_aggregates() expected = [fake.SHARE_AGGREGATE_NAMES_LIST[0]] self.assertEqual(expected, result) def test__get_aggregates(self): api_response = fake.AGGR_GET_ITER_RESPONSE_REST self.mock_object(self.client, 'send_request', mock.Mock(return_value=api_response)) result = self.client._get_aggregates( aggregate_names=fake.SHARE_AGGREGATE_NAMES) expected = fake.AGGR_GET_ITER_RESPONSE_REST['records'] self.assertEqual(expected, result) def test_get_node_for_aggregate(self): response = fake.AGGR_GET_ITER_RESPONSE_REST['records'] self.mock_object(self.client, '_get_aggregates', mock.Mock(return_value=response)) result = self.client.get_node_for_aggregate(fake.SHARE_AGGREGATE_NAME) expected = 'fake_home_node_name' self.assertEqual(expected, result) @ddt.data({'types': {'FCAL'}, 'expected': ['FCAL']}, {'types': {'SATA', 'SSD'}, 'expected': ['SATA', 'SSD']}, ) @ddt.unpack def test_get_aggregate_disk_types(self, types, expected): mock_get_aggregate_disk_types = self.mock_object( self.client, '_get_aggregate_disk_types', mock.Mock(return_value=types)) result = self.client.get_aggregate_disk_types( fake.SHARE_AGGREGATE_NAME) self.assertEqual(sorted(expected), sorted(result)) mock_get_aggregate_disk_types.assert_called_once_with( fake.SHARE_AGGREGATE_NAME) def test_volume_exists(self): query = { 'name': fake.VOLUME_NAMES[0] } return_value = fake.VOLUME_LIST_SIMPLE_RESPONSE_REST self.mock_object(self.client, 'send_request', mock.Mock(return_value=return_value)) self.mock_object(self.client, '_has_records', mock.Mock(return_value=True)) result = self.client.volume_exists(fake.VOLUME_NAMES[0]) self.client.send_request.assert_called_once_with( '/storage/volumes', 'get', query=query) self.client._has_records.assert_called_once_with( fake.VOLUME_LIST_SIMPLE_RESPONSE_REST) self.assertEqual(result, True) def test_list_vserver_aggregates(self): self.mock_object(self.client, 'get_vserver_aggregate_capacities', mock.Mock(return_value=fake.VSERVER_AGGREGATES)) result = self.client.list_vserver_aggregates() self.assertListEqual(list(fake.VSERVER_AGGREGATES.keys()), result) def test_list_vserver_aggregates_none_found(self): self.mock_object(self.client, 'get_vserver_aggregate_capacities', mock.Mock(return_value={})) result = self.client.list_vserver_aggregates() self.assertListEqual([], result) def test_get_vserver_aggregate_capacities(self): response = fake.FAKE_SVM_AGGREGATES self.mock_object(self.client, 'send_request', mock.Mock(return_value=response)) result = self.client.get_vserver_aggregate_capacities( fake.SHARE_AGGREGATE_NAMES_LIST) query = { 'fields': 'name,aggregates.name,aggregates.available_size' } self.client.send_request.assert_has_calls([ mock.call('/svm/svms', 'get', query=query)]) expected = { response['records'][0].get('aggregates')[0].get('name'): { 'available': 568692293632, }, response['records'][0].get('aggregates')[1].get('name'): { 'available': 727211110400, } } self.assertDictEqual(expected, result) def test_get_vserver_aggregate_capacities_partial_request(self): response = fake.FAKE_SVM_AGGREGATES size = response['records'][0].get('aggregates')[0].get( 'available_size') self.mock_object(self.client, 'send_request', mock.Mock(return_value=response)) result = self.client.get_vserver_aggregate_capacities( [fake.SHARE_AGGREGATE_NAMES[0]]) expected = { fake.SHARE_AGGREGATE_NAMES[0]: { 'available': size } } self.assertDictEqual(expected, result) def test_get_vserver_aggregate_capacities_aggregate_not_found(self): self.mock_object(self.client, 'send_request', mock.Mock(return_value=fake.FAKE_SVM_AGGR_EMPTY)) result = self.client.get_vserver_aggregate_capacities( ['other-aggr']) self.assertDictEqual({}, result) self.assertEqual(1, client_cmode_rest.LOG.warning.call_count) def test_get_vserver_aggregate_capacities_none_requested(self): result = self.client.get_vserver_aggregate_capacities([]) self.assertEqual({}, result) @ddt.data(None, fake.QOS_MAX_THROUGHPUT, fake.QOS_MAX_THROUGHPUT_IOPS) def test_qos_policy_group_create(self, max_throughput): return_value = fake.GENERIC_JOB_POST_RESPONSE body = { 'name': fake.QOS_POLICY_GROUP_NAME, 'svm.name': fake.VSERVER_NAME, } if max_throughput: if 'iops' in max_throughput: qos = fake.QOS_MAX_THROUGHPUT_IOPS_NO_UNIT body['fixed.max_throughput_iops'] = qos else: qos = math.ceil(fake.QOS_MAX_THROUGHPUT_NO_UNIT / units.Mi) body['fixed.max_throughput_mbps'] = qos self.mock_object(self.client, 'send_request', mock.Mock(return_value=return_value)) if max_throughput: result = self.client.qos_policy_group_create( fake.QOS_POLICY_GROUP_NAME, fake.VSERVER_NAME, max_throughput) else: result = self.client.qos_policy_group_create( fake.QOS_POLICY_GROUP_NAME, fake.VSERVER_NAME) self.client.send_request.assert_called_once_with( '/storage/qos/policies', 'post', body=body) self.assertEqual(result, return_value) @ddt.data(None, ['CIFS', 'NFS']) def test_get_network_interfaces(self, protocols): return_value = fake.GENERIC_NETWORK_INTERFACES_GET_REPONSE lif_info = return_value.get('records', [])[0] fake_lif = [{ 'uuid': lif_info['uuid'], 'administrative-status': 'up' if lif_info['enabled'] else 'down', 'address': lif_info['ip']['address'], 'home-node': lif_info['location']['home_node']['name'], 'home-port': lif_info['location']['home_port']['name'], 'interface-name': lif_info['name'], 'netmask': lif_info['ip']['netmask'], 'role': lif_info['services'], 'vserver': lif_info['svm']['name'], }] if protocols: query = { 'services': 'data_cifs,data_nfs', 'fields': 'ip.address,location.home_node.name,' 'location.home_port.name,ip.netmask,' 'services,svm.name,enabled' } else: query = { 'fields': 'ip.address,location.home_node.name,' 'location.home_port.name,ip.netmask,' 'services,svm.name,enabled' } self.mock_object(self.client, 'send_request', mock.Mock(return_value=return_value)) result = self.client.get_network_interfaces(protocols) self.client.send_request.assert_called_once_with( '/network/ip/interfaces', 'get', query=query) self.assertEqual(result, fake_lif) def test_clear_nfs_export_policy_for_volume(self): mock_set_nfs_export_policy_for_volume = self.mock_object( self.client, 'set_nfs_export_policy_for_volume') self.client.clear_nfs_export_policy_for_volume(fake.SHARE_NAME) mock_set_nfs_export_policy_for_volume.assert_called_once_with( fake.SHARE_NAME, 'default') def test_set_nfs_export_policy_for_volume(self): volume = fake.VOLUME_ITEM_SIMPLE_RESPONSE_REST query = {'name': volume['name']} body = { 'nas.export_policy.name': fake.EXPORT_POLICY_NAME } self.mock_object(self.client, 'send_request') self.client.set_nfs_export_policy_for_volume( fake.VOLUME_NAMES[0], fake.EXPORT_POLICY_NAME) self.client.send_request.assert_called_once_with( '/storage/volumes/', 'patch', query=query, body=body) def test_create_nfs_export_policy(self): body = {'name': fake.EXPORT_POLICY_NAME} self.mock_object(self.client, 'send_request') self.client.create_nfs_export_policy(fake.EXPORT_POLICY_NAME) self.client.send_request.assert_called_once_with( '/protocols/nfs/export-policies', 'post', body=body) def test_soft_delete_nfs_export_policy(self): self.mock_object(self.client, 'delete_nfs_export_policy', mock.Mock(side_effect=self._mock_api_error())) self.mock_object(self.client, 'rename_nfs_export_policy') self.client.soft_delete_nfs_export_policy(fake.EXPORT_POLICY_NAME) self.client.rename_nfs_export_policy.assert_has_calls([ mock.call( fake.EXPORT_POLICY_NAME, 'deleted_manila_' + fake.EXPORT_POLICY_NAME)]) def test_rename_nfs_export_policy(self): return_uuid = fake.GENERIC_EXPORT_POLICY_RESPONSE_AND_VOLUMES uuid = "fake-policy-uuid" self.mock_object(self.client, 'send_request', mock.Mock(return_value=return_uuid)) self.mock_object(self.client, '_has_records', mock.Mock(return_value=True)) body = { 'name': 'fake_new_policy_name' } self.client.rename_nfs_export_policy(fake.EXPORT_POLICY_NAME, 'fake_new_policy_name') self.client._has_records.assert_called_once_with(return_uuid) self.client.send_request.assert_has_calls([ mock.call('/protocols/nfs/export-policies', 'get', query={'name': fake.EXPORT_POLICY_NAME}), mock.call(f'/protocols/nfs/export-policies/{uuid}', 'patch', body=body)]) def test_get_volume_junction_path(self): return_value = fake.GENERIC_EXPORT_POLICY_RESPONSE_AND_VOLUMES query = { 'name': fake.SHARE_NAME, 'fields': 'nas.path' } self.mock_object(self.client, 'send_request', mock.Mock(return_value=return_value)) result = self.client.get_volume_junction_path(fake.SHARE_NAME) expected = fake.VOLUME_JUNCTION_PATH self.client.send_request.assert_called_once_with('/storage/volumes/', 'get', query=query) self.assertEqual(result, expected) def test_get_volume_snapshot_attributes(self): volume = fake.VOLUME_ITEM_SIMPLE_RESPONSE_REST return_value = fake.VOLUME_ITEM_SIMPLE_RESPONSE_REST self.mock_object(self.client, '_get_volume_by_args', mock.Mock(return_value=volume)) self.mock_object(self.client, 'send_request', mock.Mock(return_value=return_value)) expected = { 'snapdir-access-enabled': 'false', 'snapshot-policy': 'daily', } result = self.client.get_volume_snapshot_attributes(fake.SHARE_NAME) self.assertEqual(result, expected) def test_get_volume(self): return_value = fake.GENERIC_EXPORT_POLICY_RESPONSE_AND_VOLUMES fake_volume = return_value.get('records', [])[0] expected = { 'aggregate': fake.SHARE_AGGREGATE_NAME, 'aggr-list': [fake.SHARE_AGGREGATE_NAME], 'junction-path': fake_volume.get('nas', {}).get('path', ''), 'name': fake_volume.get('name', ''), 'owning-vserver-name': fake_volume.get('svm', {}).get('name', ''), 'type': fake_volume.get('type', ''), 'style': fake_volume.get('style', ''), 'size': fake_volume.get('space', {}).get('size', ''), 'size-used': fake_volume.get('space', {}).get('used', ''), 'qos-policy-group-name': fake_volume.get('qos', {}) .get('policy', {}) .get('name'), 'style-extended': fake_volume.get('style', ''), 'snaplock-type': fake_volume.get('snaplock', {}).get('type', '') } self.mock_object(self.client, 'send_request', mock.Mock(return_value=return_value)) self.mock_object(self.client, '_has_records', mock.Mock(return_value=True)) result = self.client.get_volume(fake.SHARE_NAME) self.client._has_records.assert_called_once_with(return_value) self.assertEqual(result, expected) def test_cifs_share_exists(self): return_value = fake.VOLUME_LIST_SIMPLE_RESPONSE_REST self.mock_object(self.client, 'send_request', mock.Mock(return_value=return_value)) self.mock_object(self.client, '_has_records', mock.Mock(return_value=True)) result = self.client.cifs_share_exists(fake.SHARE_NAME) query = { 'name': fake.SHARE_NAME, 'path': fake.VOLUME_JUNCTION_PATH } self.client._has_records.assert_called_once_with(return_value) self.client.send_request.assert_called_once_with( '/protocols/cifs/shares', 'get', query=query) self.assertTrue(result) def test_create_cifs_share(self): body = { 'name': fake.SHARE_NAME, 'path': fake.VOLUME_JUNCTION_PATH, 'svm.name': self.client.vserver, } self.mock_object(self.client, 'send_request') self.client.create_cifs_share(fake.SHARE_NAME, f'/{fake.SHARE_NAME}') self.client.send_request.assert_called_once_with( '/protocols/cifs/shares', 'post', body=body) @ddt.data(None, 'fake_security_style') def test_set_volume_security_style(self, security_style): self.mock_object(self.client, 'send_request') if security_style: self.client.set_volume_security_style(fake.VOLUME_NAMES[0], security_style) else: self.client.set_volume_security_style(fake.VOLUME_NAMES[0]) query = { 'name': fake.VOLUME_NAMES[0], } body = { 'nas.security_style': security_style if security_style else 'unix' } self.client.send_request.assert_called_once_with( '/storage/volumes', 'patch', body=body, query=query) def test_remove_cifs_share_access(self): return_uuid = fake.GENERIC_EXPORT_POLICY_RESPONSE_AND_VOLUMES self.mock_object(self.client, 'send_request', mock.Mock(return_value=return_uuid)) self.client.remove_cifs_share_access(fake.SHARE_NAME, fake.USER_NAME) fake_uuid = "fake_uuid" self.client.send_request.assert_has_calls([ mock.call('/protocols/cifs/shares', 'get', query={'name': fake.SHARE_NAME, 'fields': 'svm.uuid'}), mock.call(f'/protocols/cifs/shares/{fake_uuid}/{fake.SHARE_NAME}/' f'acls/{fake.USER_NAME}/windows', 'delete')]) def test_create_volume(self): mock_create_volume_async = self.mock_object(self.client, 'create_volume_async') mock_update = self.mock_object( self.client, 'update_volume_efficiency_attributes') mock_max_files = self.mock_object(self.client, 'set_volume_max_files') options = {'efficiency_policy': fake.VOLUME_EFFICIENCY_POLICY_NAME} self.client.create_volume(fake.SHARE_AGGREGATE_NAME, fake.VOLUME_NAMES[0], fake.SHARE_SIZE, max_files=1, snaplock_type="enterprise", **options) mock_create_volume_async.assert_called_once_with( [fake.SHARE_AGGREGATE_NAME], fake.VOLUME_NAMES[0], fake.SHARE_SIZE, is_flexgroup=False, thin_provisioned=False, snapshot_policy=None, language=None, max_files=1, snapshot_reserve=None, volume_type='rw', qos_policy_group=None, encrypt=False, adaptive_qos_policy_group=None, mount_point_name=None, efficiency_policy=fake.VOLUME_EFFICIENCY_POLICY_NAME, snaplock_type="enterprise", ) mock_update.assert_called_once_with( fake.VOLUME_NAMES[0], False, False, efficiency_policy=fake.VOLUME_EFFICIENCY_POLICY_NAME) mock_max_files.assert_called_once_with(fake.VOLUME_NAMES[0], 1) def test_create_volume_async(self): body = { 'size': 1073741824, 'name': fake.VOLUME_NAMES[0], 'style': 'flexvol', 'aggregates': [{'name': fake.SHARE_AGGREGATE_NAME}] } return_value = fake.GENERIC_JOB_POST_RESPONSE expected_result = { 'jobid': fake.GENERIC_JOB_POST_RESPONSE['job']['uuid'], 'error-code': '', 'error-message': '', } self.mock_object(self.client, '_get_create_volume_body', mock.Mock(return_value={})) self.mock_object(self.client, 'send_request', mock.Mock(return_value=return_value)) result = self.client.create_volume_async([ fake.SHARE_AGGREGATE_NAME], fake.VOLUME_NAMES[0], 1, is_flexgroup=False) self.client._get_create_volume_body.assert_called_once_with( fake.VOLUME_NAMES[0], False, None, None, None, 'rw', None, False, None, None, None) self.client.send_request.assert_called_once_with( '/storage/volumes', 'post', body=body, wait_on_accepted=True) self.assertEqual(expected_result, result) def test_get_volume_efficiency_status(self): return_value = fake.VOLUME_LIST_SIMPLE_RESPONSE_REST query = { 'efficiency.volume_path': '/vol/%s' % fake.VOLUME_NAMES[0], 'fields': 'efficiency.state,efficiency.compression' } expected_result = { 'dedupe': True, 'compression': True } self.mock_object(self.client, 'send_request', mock.Mock(return_value=return_value)) result = self.client.get_volume_efficiency_status(fake.VOLUME_NAMES[0]) self.client.send_request.assert_called_once_with( '/storage/volumes', 'get', query=query) self.assertEqual(expected_result, result) def test_enable_dedupe_async(self): volume = fake.VOLUME_ITEM_SIMPLE_RESPONSE_REST uuid = volume["uuid"] return_value = fake.VOLUME_ITEM_SIMPLE_RESPONSE_REST self.mock_object(self.client, '_get_volume_by_args', mock.Mock(return_value=volume)) self.mock_object(self.client, 'send_request', mock.Mock(return_value=return_value)) body = { 'efficiency': {'dedupe': 'background'} } self.client.enable_dedupe_async(fake.VOLUME_NAMES[0]) self.client.send_request.assert_called_once_with( f'/storage/volumes/{uuid}', 'patch', body=body) self.client._get_volume_by_args.assert_called_once_with( vol_name=fake.VOLUME_NAMES[0]) def test_disable_dedupe_async(self): volume = fake.VOLUME_ITEM_SIMPLE_RESPONSE_REST uuid = volume["uuid"] return_value = fake.VOLUME_ITEM_SIMPLE_RESPONSE_REST self.mock_object(self.client, '_get_volume_by_args', mock.Mock(return_value=volume)) self.mock_object(self.client, 'send_request', mock.Mock(return_value=return_value)) body = { 'efficiency': {'dedupe': 'none'} } self.client.disable_dedupe_async(fake.VOLUME_NAMES[0]) self.client.send_request.assert_called_once_with( f'/storage/volumes/{uuid}', 'patch', body=body) self.client._get_volume_by_args.assert_called_once_with( vol_name=fake.VOLUME_NAMES[0]) def test_enable_compression_async(self): volume = fake.VOLUME_ITEM_SIMPLE_RESPONSE_REST uuid = volume["uuid"] return_value = fake.VOLUME_ITEM_SIMPLE_RESPONSE_REST self.mock_object(self.client, '_get_volume_by_args', mock.Mock(return_value=volume)) self.mock_object(self.client, 'send_request', mock.Mock(return_value=return_value)) body = { 'efficiency': {'compression': 'background'} } self.client.enable_compression_async(fake.VOLUME_NAMES[0]) self.client.send_request.assert_called_once_with( f'/storage/volumes/{uuid}', 'patch', body=body) self.client._get_volume_by_args.assert_called_once_with( vol_name=fake.VOLUME_NAMES[0]) def test_disable_compression_async(self): volume = fake.VOLUME_ITEM_SIMPLE_RESPONSE_REST uuid = volume["uuid"] return_value = fake.VOLUME_ITEM_SIMPLE_RESPONSE_REST self.mock_object(self.client, '_get_volume_by_args', mock.Mock(return_value=volume)) self.mock_object(self.client, 'send_request', mock.Mock(return_value=return_value)) body = { 'efficiency': {'compression': 'none'} } self.client.disable_compression_async(fake.VOLUME_NAMES[0]) self.client.send_request.assert_called_once_with( f'/storage/volumes/{uuid}', 'patch', body=body) self.client._get_volume_by_args.assert_called_once_with( vol_name=fake.VOLUME_NAMES[0]) def test_apply_volume_efficiency_policy(self): volume = fake.VOLUME_ITEM_SIMPLE_RESPONSE_REST uuid = volume["uuid"] return_value = fake.VOLUME_ITEM_SIMPLE_RESPONSE_REST self.mock_object(self.client, '_get_volume_by_args', mock.Mock(return_value=volume)) self.mock_object(self.client, 'send_request', mock.Mock(return_value=return_value)) body = { 'efficiency': {'policy': fake.VOLUME_EFFICIENCY_POLICY_NAME} } self.client.apply_volume_efficiency_policy( fake.VOLUME_NAMES[0], efficiency_policy=fake.VOLUME_EFFICIENCY_POLICY_NAME ) self.client.send_request.assert_called_once_with( f'/storage/volumes/{uuid}', 'patch', body=body) self.client._get_volume_by_args.assert_called_once_with( vol_name=fake.VOLUME_NAMES[0]) def test_apply_volume_efficiency_none_policy(self): volume = fake.VOLUME_ITEM_SIMPLE_RESPONSE_REST return_value = fake.VOLUME_ITEM_SIMPLE_RESPONSE_REST self.mock_object(self.client, '_get_volume_by_args', mock.Mock(return_value=volume)) self.mock_object(self.client, 'send_request', mock.Mock(return_value=return_value)) self.client.apply_volume_efficiency_policy( fake.VOLUME_NAMES[0], efficiency_policy=None ) self.client._get_volume_by_args.assert_not_called() def test_set_volume_max_files(self): volume = fake.VOLUME_ITEM_SIMPLE_RESPONSE_REST uuid = volume["uuid"] return_value = fake.VOLUME_ITEM_SIMPLE_RESPONSE_REST fake_max_files = '40000' self.mock_object(self.client, '_get_volume_by_args', mock.Mock(return_value=volume)) self.mock_object(self.client, 'send_request', mock.Mock(return_value=return_value)) body = { 'files.maximum': int(fake_max_files) } self.client.set_volume_max_files(fake.VOLUME_NAMES[0], fake_max_files) self.client.send_request.assert_called_once_with( f'/storage/volumes/{uuid}', 'patch', body=body) self.client._get_volume_by_args.assert_called_once_with( vol_name=fake.VOLUME_NAMES[0]) def test_set_volume_max_files_retry_allocated(self): volume = fake.VOLUME_ITEM_SIMPLE_RESPONSE_REST uuid = volume["uuid"] fake_max_files = '40000' fake_used_files = '30000' alloc_files = {'maximum': fake_max_files, 'used': fake_used_files} side_effect = [ netapp_api.api.NaApiError( code=netapp_api.EREST_CANNOT_MODITY_SPECIFIED_FIELD), None] self.mock_object(self.client, '_get_volume_by_args', mock.Mock(return_value=volume)) mock_sr = self.mock_object(self.client, 'send_request', mock.Mock(side_effect=side_effect)) self.mock_object(self.client, 'get_volume_allocated_files', mock.Mock(return_value=alloc_files)) body_before = { 'files.maximum': int(fake_max_files) } body_retry = { 'files.maximum': int(fake_used_files) } self.client.set_volume_max_files( fake.VOLUME_NAMES[0], fake_max_files, retry_allocated=True) mock_sr.assert_has_calls([ mock.call(f'/storage/volumes/{uuid}', 'patch', body=body_before), mock.call(f'/storage/volumes/{uuid}', 'patch', body=body_retry), ]) self.client._get_volume_by_args.assert_called() def test_set_volume_snapdir_access(self): volume = fake.VOLUME_ITEM_SIMPLE_RESPONSE_REST uuid = volume["uuid"] return_value = fake.VOLUME_ITEM_SIMPLE_RESPONSE_REST fake_hide_snapdir = 'fake-snapdir' self.mock_object(self.client, '_get_volume_by_args', mock.Mock(return_value=volume)) self.mock_object(self.client, 'send_request', mock.Mock(return_value=return_value)) body = { 'snapshot_directory_access_enabled': str( not fake_hide_snapdir).lower() } self.client.set_volume_snapdir_access(fake.VOLUME_NAMES[0], fake_hide_snapdir) self.client.send_request.assert_called_once_with( f'/storage/volumes/{uuid}', 'patch', body=body) self.client._get_volume_by_args.assert_called_once_with( vol_name=fake.VOLUME_NAMES[0]) def test_get_fpolicy_scopes(self): volume = fake.VOLUME_ITEM_SIMPLE_RESPONSE_REST uuid = volume["uuid"] return_value = fake.GENERIC_FPOLICY_RESPONSE query = { 'name': fake.FPOLICY_POLICY_NAME, 'scope.include_shares': fake.VOLUME_NAMES[0], 'scope.include_extension': fake.FPOLICY_EXT_TO_INCLUDE, 'scope.exclude_extension': fake.FPOLICY_EXT_TO_EXCLUDE } expected_result = [ { 'policy-name': fake.FPOLICY_POLICY_NAME, 'file-extensions-to-include': fake.FPOLICY_EXT_TO_INCLUDE_LIST, 'file-extensions-to-exclude': fake.FPOLICY_EXT_TO_EXCLUDE_LIST, 'shares-to-include': [fake.VOLUME_NAMES[0]], } ] self.mock_object(self.client, '_get_volume_by_args', mock.Mock(return_value=volume)) self.mock_object(self.client, '_has_records', mock.Mock(return_value=True)) self.mock_object(self.client, 'send_request', mock.Mock(return_value=return_value)) result = self.client.get_fpolicy_scopes( fake.VOLUME_NAMES[0], fake.FPOLICY_POLICY_NAME, fake.FPOLICY_EXT_TO_INCLUDE_LIST, fake.FPOLICY_EXT_TO_EXCLUDE_LIST, [fake.VOLUME_NAMES[0]]) self.client._get_volume_by_args.assert_called_once_with( vol_name=fake.VOLUME_NAMES[0]) self.client.send_request.assert_called_once_with( f'/protocols/fpolicy/{uuid}/policies', 'get', query=query) self.assertEqual(expected_result, result) def test_get_fpolicy_policies_status(self): volume = fake.VOLUME_ITEM_SIMPLE_RESPONSE_REST uuid = volume["uuid"] return_value = fake.GENERIC_FPOLICY_RESPONSE query = { 'name': fake.FPOLICY_POLICY_NAME, 'enabled': 'true' } expected_result = [ { 'policy-name': fake.FPOLICY_POLICY_NAME, 'status': True, 'sequence-number': 1 } ] self.mock_object(self.client, '_get_volume_by_args', mock.Mock(return_value=volume)) self.mock_object(self.client, '_has_records', mock.Mock(return_value=True)) self.mock_object(self.client, 'send_request', mock.Mock(return_value=return_value)) result = self.client.get_fpolicy_policies_status( fake.VOLUME_NAMES[0], fake.FPOLICY_POLICY_NAME, 'true') self.client._get_volume_by_args.assert_called_once_with( vol_name=fake.VOLUME_NAMES[0]) self.client.send_request.assert_called_once_with( f'/protocols/fpolicy/{uuid}/policies', 'get', query=query) self.assertEqual(expected_result, result) def test_get_fpolicy_policies(self): volume = fake.VOLUME_ITEM_SIMPLE_RESPONSE_REST uuid = volume["uuid"] return_value = fake.GENERIC_FPOLICY_RESPONSE query = { 'name': fake.FPOLICY_POLICY_NAME, 'engine.name': 'native', 'events': fake.FPOLICY_EVENT_NAME } expected_result = [ { 'policy-name': fake.FPOLICY_POLICY_NAME, 'engine-name': 'native', 'events': [fake.FPOLICY_EVENT_NAME] } ] self.mock_object(self.client, '_get_volume_by_args', mock.Mock(return_value=volume)) self.mock_object(self.client, '_has_records', mock.Mock(return_value=True)) self.mock_object(self.client, 'send_request', mock.Mock(return_value=return_value)) result = self.client.get_fpolicy_policies( fake.VOLUME_NAMES[0], fake.FPOLICY_POLICY_NAME, 'native', [fake.FPOLICY_EVENT_NAME]) self.client._get_volume_by_args.assert_called_once_with( vol_name=fake.VOLUME_NAMES[0]) self.client.send_request.assert_called_once_with( f'/protocols/fpolicy/{uuid}/policies', 'get', query=query) self.assertEqual(expected_result, result) def test_get_fpolicy_events(self): volume = fake.VOLUME_ITEM_SIMPLE_RESPONSE_REST uuid = volume["uuid"] return_value = fake.GENERIC_FPOLICY_EVENTS_RESPONSE query = { 'name': fake.FPOLICY_EVENT_NAME, 'protocol': fake.FPOLICY_PROTOCOL, 'fields': 'file_operations.create,file_operations.write,' 'file_operations.rename' } expected_result = [ { 'event-name': fake.FPOLICY_EVENT_NAME, 'protocol': fake.FPOLICY_PROTOCOL, 'file-operations': fake.FPOLICY_FILE_OPERATIONS_LIST } ] self.mock_object(self.client, '_get_volume_by_args', mock.Mock(return_value=volume)) self.mock_object(self.client, '_has_records', mock.Mock(return_value=True)) self.mock_object(self.client, 'send_request', mock.Mock(return_value=return_value)) result = self.client.get_fpolicy_events( fake.VOLUME_NAMES[0], fake.FPOLICY_EVENT_NAME, fake.FPOLICY_PROTOCOL, fake.FPOLICY_FILE_OPERATIONS_LIST) self.client._get_volume_by_args.assert_called_once_with( vol_name=fake.VOLUME_NAMES[0]) self.client.send_request.assert_called_once_with( f'/protocols/fpolicy/{uuid}/events', 'get', query=query) self.assertEqual(expected_result, result) def test_create_fpolicy_event(self): volume = fake.VOLUME_ITEM_SIMPLE_RESPONSE_REST uuid = volume["uuid"] body = { 'name': fake.FPOLICY_EVENT_NAME, 'protocol': fake.FPOLICY_PROTOCOL, 'file_operations.create': 'true', 'file_operations.write': 'true', 'file_operations.rename': 'true' } self.mock_object(self.client, '_get_volume_by_args', mock.Mock(return_value=volume)) self.mock_object(self.client, 'send_request') self.client.create_fpolicy_event( fake.VOLUME_NAMES[0], fake.FPOLICY_EVENT_NAME, fake.FPOLICY_PROTOCOL, fake.FPOLICY_FILE_OPERATIONS_LIST) self.client._get_volume_by_args.assert_called_once_with( vol_name=fake.VOLUME_NAMES[0]) self.client.send_request.assert_called_once_with( f'/protocols/fpolicy/{uuid}/events', 'post', body=body) def test_delete_fpolicy_policy(self): volume = fake.VOLUME_ITEM_SIMPLE_RESPONSE_REST uuid = volume["uuid"] self.mock_object(self.client, '_get_volume_by_args', mock.Mock(return_value=volume)) self.mock_object(self.client, 'send_request') self.client.delete_fpolicy_policy( fake.VOLUME_NAMES[0], fake.FPOLICY_POLICY_NAME) self.client._get_volume_by_args.assert_called_once_with( vol_name=fake.VOLUME_NAMES[0]) self.client.send_request.assert_called_once_with( f'/protocols/fpolicy/{uuid}/policies/{fake.FPOLICY_POLICY_NAME}', 'delete') def test_delete_fpolicy_event(self): volume = fake.VOLUME_ITEM_SIMPLE_RESPONSE_REST uuid = volume["uuid"] self.mock_object(self.client, '_get_volume_by_args', mock.Mock(return_value=volume)) self.mock_object(self.client, 'send_request') self.client.delete_fpolicy_event( fake.VOLUME_NAMES[0], fake.FPOLICY_EVENT_NAME) self.client._get_volume_by_args.assert_called_once_with( vol_name=fake.VOLUME_NAMES[0]) self.client.send_request.assert_called_once_with( f'/protocols/fpolicy/{uuid}/events/{fake.FPOLICY_EVENT_NAME}', 'delete') def test_enable_fpolicy_policy(self): volume = fake.VOLUME_ITEM_SIMPLE_RESPONSE_REST uuid = volume["uuid"] body = { 'priority': 1, } self.mock_object(self.client, '_get_volume_by_args', mock.Mock(return_value=volume)) self.mock_object(self.client, 'send_request') self.client.enable_fpolicy_policy( fake.VOLUME_NAMES[0], fake.FPOLICY_POLICY_NAME, 1) self.client._get_volume_by_args.assert_called_once_with( vol_name=fake.VOLUME_NAMES[0]) self.client.send_request.assert_called_once_with( f'/protocols/fpolicy/{uuid}/policies/{fake.FPOLICY_POLICY_NAME}', 'patch', body=body) def test_create_fpolicy_policy_with_scope(self): volume = fake.VOLUME_ITEM_SIMPLE_RESPONSE_REST uuid = volume["uuid"] body = { 'name': fake.FPOLICY_POLICY_NAME, 'events.name': fake.FPOLICY_EVENT_NAME, 'engine.name': fake.FPOLICY_ENGINE, 'scope.include_shares': [fake.VOLUME_NAMES[0]], 'scope.include_extension': fake.FPOLICY_EXT_TO_INCLUDE_LIST, 'scope.exclude_extension': fake.FPOLICY_EXT_TO_EXCLUDE_LIST } self.mock_object(self.client, '_get_volume_by_args', mock.Mock(return_value=volume)) self.mock_object(self.client, 'send_request') self.client.create_fpolicy_policy_with_scope( fake.FPOLICY_POLICY_NAME, fake.VOLUME_NAMES[0], fake.FPOLICY_EVENT_NAME, fake.FPOLICY_ENGINE, extensions_to_include=fake.FPOLICY_EXT_TO_INCLUDE, extensions_to_exclude=fake.FPOLICY_EXT_TO_EXCLUDE) self.client._get_volume_by_args.assert_called_once_with( vol_name=fake.VOLUME_NAMES[0]) self.client.send_request.assert_called_once_with( f'/protocols/fpolicy/{uuid}/policies', 'post', body=body) def test_delete_nfs_export_policy(self): policy_name = 'fake_policy_name' query = { 'name': policy_name, } api_response = fake.EXPORT_POLICY_REST mock_sr = self.mock_object(self.client, 'send_request', mock.Mock( return_value=api_response)) if not api_response.get('records'): return id = api_response.get('records')[0]['id'] self.client.delete_nfs_export_policy(policy_name) mock_sr.assert_has_calls([ mock.call('/protocols/nfs/export-policies', 'get', query=query), mock.call(f'/protocols/nfs/export-policies/{id}', 'delete'), ]) def test_delete_volume(self): """Deletes a volume.""" volume = fake.VOLUME_ITEM_SIMPLE_RESPONSE_REST self.mock_object(self.client, '_get_volume_by_args', mock.Mock(return_value=volume)) mock_sr = self.mock_object(self.client, 'send_request') # Get volume UUID. uuid = volume['uuid'] self.client.delete_volume('fake_volume_name') mock_sr.assert_called_once_with(f'/storage/volumes/{uuid}', 'delete') def test__unmount_volume(self): volume = fake.VOLUME_ITEM_SIMPLE_RESPONSE_REST self.mock_object(self.client, '_get_volume_by_args', mock.Mock(return_value=volume)) mock_send_request = self.mock_object(self.client, 'send_request') uuid = volume['uuid'] # Unmount volume async operation. body = {"nas": {"path": ""}} self.client._unmount_volume('fake_volume_name') mock_send_request.assert_called_once_with( f'/storage/volumes/{uuid}', 'patch', body=body) def test_offline_volume(self): volume = fake.VOLUME_ITEM_SIMPLE_RESPONSE_REST self.mock_object(self.client, '_get_volume_by_args', mock.Mock(return_value=volume)) mock_send_request = self.mock_object(self.client, 'send_request') uuid = volume['uuid'] body = {'state': 'offline'} self.client.offline_volume('fake_volume_name') mock_send_request.assert_called_once_with(f'/storage/volumes/{uuid}', 'patch', body=body) def test_qos_policy_group_rename(self): """Renames a QoS policy group.""" qos_policy_group_name = 'extreme' new_name = 'new_name' res = fake.QOS_POLICY_GROUP_REST mock_send_request = self.mock_object(self.client, 'send_request', mock.Mock(return_value=res)) query = { 'name': qos_policy_group_name, 'fields': 'uuid', } uuid = res.get('records')[0]['uuid'] body = {"name": new_name} self.client.qos_policy_group_rename(qos_policy_group_name, new_name) mock_send_request.assert_has_calls([ mock.call('/storage/qos/policies', 'get', query=query), mock.call(f'/storage/qos/policies/{uuid}', 'patch', body=body), ]) def test_qos_policy_group_get(self): qos_policy_group_name = 'extreme' qos_policy_group = fake.QOS_POLICY_GROUP_REST qos_policy = qos_policy_group.get('records')[0] max_throughput = qos_policy.get('fixed', {}).get('max_throughput_iops') expected = { 'policy-group': qos_policy.get('name'), 'vserver': qos_policy.get('svm', {}).get('name'), 'max-throughput': max_throughput if max_throughput else None, 'num-workloads': int(qos_policy.get('object_count')), } query = { 'name': qos_policy_group_name, 'fields': 'name,object_count,fixed.max_throughput_iops,' + 'fixed.max_throughput_mbps,svm.name' } mock_sr = self.mock_object(self.client, 'send_request', mock.Mock(return_value=qos_policy_group)) result = self.client.qos_policy_group_get(qos_policy_group_name) mock_sr.assert_called_once_with('/storage/qos/policies', 'get', query=query) self.assertEqual(expected, result) def test_remove_unused_qos_policy_groups(self): result = fake.QOS_POLICY_GROUP_REST query = { 'name': '%s*' % client_cmode_rest.DELETED_PREFIX, 'fields': 'uuid,name', } mock_send_request = self.mock_object(self.client, 'send_request', mock.Mock(return_value=result)) res = result.get('records') for record in res: uuid = record['uuid'] self.client.remove_unused_qos_policy_groups() mock_send_request.assert_has_calls([ mock.call('/storage/qos/policies', 'get', query=query), mock.call(f'/storage/qos/policies/{uuid}', 'delete')]) def test_unmount_volume(self): self.mock_object(self.client, '_unmount_volume') self.client.unmount_volume(fake.SHARE_NAME) self.client._unmount_volume.assert_called_once_with(fake.SHARE_NAME) self.assertEqual(1, client_cmode_rest.LOG.debug.call_count) self.assertEqual(0, client_cmode_rest.LOG.warning.call_count) def test_unmount_volume_api_error(self): self.mock_object(self.client, '_unmount_volume', self._mock_api_error()) self.assertRaises(netapp_api.api.NaApiError, self.client.unmount_volume, fake.SHARE_NAME) self.assertEqual(1, self.client._unmount_volume.call_count) self.assertEqual(0, client_cmode_rest.LOG.debug.call_count) self.assertEqual(0, client_cmode_rest.LOG.warning.call_count) def test_unmount_volume_with_retries(self): return_code = netapp_api.EREST_UNMOUNT_FAILED_LOCK side_effect = [netapp_api.api.NaApiError(code=return_code, message='...job ID...')] * 5 side_effect.append(None) self.mock_object(self.client, '_unmount_volume', mock.Mock(side_effect=side_effect)) self.mock_object(time, 'sleep') self.client.unmount_volume(fake.SHARE_NAME) self.assertEqual(6, self.client._unmount_volume.call_count) self.assertEqual(1, client_cmode_rest.LOG.debug.call_count) self.assertEqual(5, client_cmode_rest.LOG.warning.call_count) def test_unmount_volume_with_max_retries(self): return_code = netapp_api.EREST_UNMOUNT_FAILED_LOCK side_effect = [netapp_api.api.NaApiError(code=return_code, message='...job ID...')] * 30 self.mock_object(self.client, '_unmount_volume', mock.Mock(side_effect=side_effect)) self.mock_object(time, 'sleep') self.assertRaises(exception.NetAppException, self.client.unmount_volume, fake.SHARE_NAME) self.assertEqual(10, self.client._unmount_volume.call_count) self.assertEqual(0, client_cmode_rest.LOG.debug.call_count) self.assertEqual(10, client_cmode_rest.LOG.warning.call_count) def test_qos_policy_group_exists(self): mock = self.mock_object(self.client, 'qos_policy_group_get') response = self.client.qos_policy_group_exists('extreme') mock.assert_called_once_with('extreme') self.assertTrue(response) def test_mark_qos_policy_group_for_deletion_rename_failure(self): self.mock_object(self.client, 'qos_policy_group_exists', mock.Mock(return_value=True)) self.mock_object(self.client, 'qos_policy_group_rename', mock.Mock(side_effect=netapp_api.api.NaApiError)) self.mock_object(client_cmode_rest.LOG, 'warning') self.mock_object(self.client, 'remove_unused_qos_policy_groups') retval = self.client.mark_qos_policy_group_for_deletion( fake.QOS_POLICY_GROUP_NAME) self.assertIsNone(retval) client_cmode_rest.LOG.warning.assert_called_once() self.client.qos_policy_group_exists.assert_called_once_with( fake.QOS_POLICY_GROUP_NAME) self.client.qos_policy_group_rename.assert_called_once_with( fake.QOS_POLICY_GROUP_NAME, client_cmode_rest.DELETED_PREFIX + fake.QOS_POLICY_GROUP_NAME) self.client.remove_unused_qos_policy_groups.assert_called_once_with() @ddt.data(True, False) def test_mark_qos_policy_group_for_deletion_policy_exists(self, exists): self.mock_object(self.client, 'qos_policy_group_exists', mock.Mock(return_value=exists)) self.mock_object(self.client, 'qos_policy_group_rename') mock_remove_unused_policies = self.mock_object( self.client, 'remove_unused_qos_policy_groups') self.mock_object(client_cmode_rest.LOG, 'warning') retval = self.client.mark_qos_policy_group_for_deletion( fake.QOS_POLICY_GROUP_NAME) self.assertIsNone(retval) if exists: self.client.qos_policy_group_rename.assert_called_once_with( fake.QOS_POLICY_GROUP_NAME, client_cmode_rest.DELETED_PREFIX + fake.QOS_POLICY_GROUP_NAME) mock_remove_unused_policies.assert_called_once_with() else: self.assertFalse(self.client.qos_policy_group_rename.called) self.assertFalse( self.client.remove_unused_qos_policy_groups.called) self.assertFalse(client_cmode_rest.LOG.warning.called) def test_set_volume_size(self): unique_volume_return = {'uuid': 'fake_uuid'} self.mock_object(self.client, '_get_volume_by_args', mock.Mock(return_value=unique_volume_return)) mock_sr = self.mock_object(self.client, 'send_request') self.client.set_volume_size('fake_name', 1) body = { 'space.size': 1 * units.Gi } mock_sr.assert_called_once_with( '/storage/volumes/fake_uuid', 'patch', body=body) def test_qos_policy_group_modify(self): return_request = { 'records': [{'uuid': 'fake_uuid'}] } mock_sr = self.mock_object(self.client, 'send_request', mock.Mock(return_value=return_request)) self.client.qos_policy_group_modify('qos_fake_name', '1000iops') query = { 'name': 'qos_fake_name', } body = { 'fixed.max_throughput_iops': 1000, 'fixed.max_throughput_mbps': 0 } mock_sr.assert_has_calls([ mock.call('/storage/qos/policies', 'get', query=query), mock.call('/storage/qos/policies/fake_uuid', 'patch', body=body), ]) @ddt.data(True, False) def test_set_volume_filesys_size_fixed(self, filesys_size_fixed): volume = fake.VOLUME_ITEM_SIMPLE_RESPONSE_REST self.mock_object(self.client, '_get_volume_by_args', mock.Mock(return_value=volume)) mock_send_request = self.mock_object(self.client, 'send_request') fake_uuid = volume['uuid'] self.client.set_volume_filesys_size_fixed(fake.SHARE_NAME, filesys_size_fixed) body = { 'space.filesystem_size_fixed': filesys_size_fixed} mock_send_request.assert_called_once_with( f'/storage/volumes/{fake_uuid}', 'patch', body=body) def test_create_snapshot(self): volume = fake.VOLUME_ITEM_SIMPLE_RESPONSE_REST mock_get_volume = self.mock_object( self.client, '_get_volume_by_args', mock.Mock(return_value=volume)) mock_send_request = self.mock_object(self.client, 'send_request') self.client.create_snapshot(fake.VOLUME_NAMES[0], fake.SNAPSHOT_NAME) mock_get_volume.assert_called_once_with( vol_name=fake.VOLUME_NAMES[0]) body = { 'name': fake.SNAPSHOT_NAME, } uuid = volume['uuid'] mock_send_request.assert_called_once_with( f'/storage/volumes/{uuid}/snapshots', 'post', body=body) def test_is_flexgroup_supported(self): flexgroup_supported = self.client.is_flexgroup_supported() self.assertTrue(flexgroup_supported) @ddt.data(True, False) def test_is_flexgroup_volume(self, is_flexgroup): response = copy.deepcopy(fake.VOLUME_LIST_SIMPLE_RESPONSE_REST) expected_style = 'flexgroup' if is_flexgroup else 'flexvol' response['records'][0]['style'] = expected_style mock_send_request = self.mock_object(self.client, 'send_request', mock.Mock(return_value=response)) mock_has_records = self.mock_object( self.client, '_has_records', mock.Mock(return_value=True)) mock_na_utils_is_flexgroup = self.mock_object( netapp_utils, 'is_style_extended_flexgroup', mock.Mock(return_value=is_flexgroup)) result = self.client.is_flexgroup_volume(fake.VOLUME_NAMES[0]) self.assertEqual(is_flexgroup, result) query = { 'name': fake.VOLUME_NAMES[0], 'fields': 'style' } mock_send_request.assert_called_once_with('/storage/volumes/', 'get', query=query) mock_has_records.assert_called_once_with(response) mock_na_utils_is_flexgroup.assert_called_once_with(expected_style) def test_is_flexgroup_volume_raise_no_records(self): self.mock_object(self.client, 'send_request', mock.Mock(return_value=fake.NO_RECORDS_RESPONSE_REST)) self.mock_object( self.client, '_has_records', mock.Mock(return_value=False)) self.assertRaises( exception.StorageResourceNotFound, self.client.is_flexgroup_volume, fake.VOLUME_NAMES[0]) def test_is_flexgroup_volume_raise_more_than_one_volume(self): self.mock_object( self.client, 'send_request', mock.Mock(return_value=fake.VOLUME_GET_ITER_RESPONSE_REST_PAGE)) self.mock_object( self.client, '_has_records', mock.Mock(return_value=True)) self.assertRaises( exception.NetAppException, self.client.is_flexgroup_volume, fake.VOLUME_NAMES[0]) @ddt.data( {'is_busy': True, 'owners': ['volume_clone']}, {'is_busy': False, 'owners': ['snap_restore_dependent']}) @ddt.unpack def test__is_busy_snapshot(self, is_busy, owners): result = self.client._is_busy_snapshot(owners) self.assertEqual(is_busy, result) @ddt.data(True, False) def test_get_snapshot(self, locked): volume = fake.VOLUME_ITEM_SIMPLE_RESPONSE_REST mock_get_volume = self.mock_object( self.client, '_get_volume_by_args', mock.Mock(return_value=volume)) response = copy.deepcopy(fake.SNAPSHOTS_REST_RESPONSE) owners = ['volume_clone'] if locked else [] response['records'][0]['owners'] = owners mock_send_request = self.mock_object(self.client, 'send_request', mock.Mock(return_value=response)) mock_has_records = self.mock_object( self.client, '_has_records', mock.Mock(return_value=True)) mock_is_busy = self.mock_object(self.client, '_is_busy_snapshot', mock.Mock(return_value=True)) result = self.client.get_snapshot(fake.VOLUME_NAMES[0], fake.SNAPSHOT_NAME) expected_snapshot = { 'access-time': fake.SNAPSHOT_REST['create_time'], 'name': fake.SNAPSHOT_REST['name'], 'volume': fake.SNAPSHOT_REST['volume']['name'], 'owners': set(owners), 'busy': True, 'locked_by_clone': locked, } self.assertEqual(expected_snapshot, result) mock_get_volume.assert_called_once_with( vol_name=fake.VOLUME_NAMES[0]) uuid = volume['uuid'] query = { 'name': fake.SNAPSHOT_NAME, 'fields': 'name,volume,create_time,owners' } mock_send_request.assert_called_once_with( f'/storage/volumes/{uuid}/snapshots', 'get', query=query) mock_has_records.assert_called_once_with(response) mock_is_busy.assert_called_once_with(set(owners)) def test_get_snapshot_raise_not_found(self): volume = fake.VOLUME_ITEM_SIMPLE_RESPONSE_REST self.mock_object( self.client, '_get_volume_by_args', mock.Mock(return_value=volume)) self.mock_object( self.client, 'send_request', mock.Mock(return_value=fake.NO_RECORDS_RESPONSE_REST)) self.mock_object( self.client, '_has_records', mock.Mock(return_value=False)) self.assertRaises( exception.SnapshotResourceNotFound, self.client.get_snapshot, fake.VOLUME_NAMES[0], fake.SNAPSHOT_NAME) def test_get_snapshot_raise_more_than_one(self): volume = fake.VOLUME_ITEM_SIMPLE_RESPONSE_REST self.mock_object( self.client, '_get_volume_by_args', mock.Mock(return_value=volume)) self.mock_object( self.client, 'send_request', mock.Mock(return_value=fake.SNAPSHOTS_MULTIPLE_REST_RESPONSE)) self.mock_object( self.client, '_has_records', mock.Mock(return_value=True)) self.assertRaises( exception.NetAppException, self.client.get_snapshot, fake.VOLUME_NAMES[0], fake.SNAPSHOT_NAME) def test_get_clone_children_for_snapshot(self): mock_get_records = self.mock_object( self.client, 'get_records', mock.Mock(return_value=fake.VOLUME_LIST_SIMPLE_RESPONSE_REST)) result = self.client.get_clone_children_for_snapshot( fake.VOLUME_NAMES[0], fake.SNAPSHOT_NAME) expected_children = [{'name': fake.VOLUME_NAMES[0]}] self.assertEqual(expected_children, result) query = { 'clone.parent_snapshot.name': fake.SNAPSHOT_NAME, 'clone.parent_volume.name': fake.VOLUME_NAMES[0], 'fields': 'name' } mock_get_records.assert_called_once_with( '/storage/volumes', query=query) def test_volume_clone_split_start(self): fake_resp_vol = fake.REST_SIMPLE_RESPONSE["records"][0] fake_uuid = fake_resp_vol['uuid'] mock_get_unique_volume = self.mock_object( self.client, "_get_volume_by_args", mock.Mock(return_value=fake_resp_vol) ) mock_send_request = self.mock_object( self.client, 'send_request', mock.Mock(return_value=fake.VOLUME_LIST_SIMPLE_RESPONSE_REST)) self.client.volume_clone_split_start(fake.VOLUME_NAMES[0]) mock_get_unique_volume.assert_called_once() body = { 'clone.split_initiated': 'true', } mock_send_request.assert_called_once_with( f'/storage/volumes/{fake_uuid}', 'patch', body=body, wait_on_accepted=False) def test_volume_clone_split_stop(self): fake_resp_vol = fake.REST_SIMPLE_RESPONSE["records"][0] fake_uuid = fake_resp_vol['uuid'] mock_get_unique_volume = self.mock_object( self.client, "_get_volume_by_args", mock.Mock(return_value=fake_resp_vol) ) mock_send_request = self.mock_object( self.client, 'send_request', mock.Mock(return_value=fake.VOLUME_LIST_SIMPLE_RESPONSE_REST)) self.client.volume_clone_split_stop(fake.VOLUME_NAMES[0]) mock_get_unique_volume.assert_called_once() body = { 'clone.split_initiated': 'false', } mock_send_request.assert_called_once_with( f'/storage/volumes/{fake_uuid}', 'patch', body=body, wait_on_accepted=False) def test_rename_snapshot(self): volume = fake.VOLUME_ITEM_SIMPLE_RESPONSE_REST mock_get_volume = self.mock_object( self.client, '_get_volume_by_args', mock.Mock(return_value=volume)) mock_send_request = self.mock_object(self.client, 'send_request') self.client.rename_snapshot( fake.VOLUME_NAMES[0], fake.SNAPSHOT_NAME, 'new_' + fake.SNAPSHOT_NAME) mock_get_volume.assert_called_once_with( vol_name=fake.VOLUME_NAMES[0]) query = { 'name': fake.SNAPSHOT_NAME, } body = { 'name': 'new_' + fake.SNAPSHOT_NAME, } uuid = volume['uuid'] mock_send_request.assert_called_once_with( f'/storage/volumes/{uuid}/snapshots', 'patch', query=query, body=body) def test__get_soft_deleted_snapshots(self): mock_get_records = self.mock_object( self.client, 'get_records', mock.Mock(return_value=fake.SNAPSHOTS_MULTIPLE_REST_RESPONSE)) self.mock_object( self.client, '_is_busy_snapshot', mock.Mock(side_effect=[True, False])) snapshots_map = self.client._get_soft_deleted_snapshots() expected_snapshots = { fake.VSERVER_NAME: [{ "uuid": fake.FAKE_SNAPSHOT_UUID, "volume_uuid": fake.FAKE_VOLUME_UUID, }] } self.assertEqual(expected_snapshots, snapshots_map) query = { 'name': 'deleted_manila_*', 'fields': 'uuid,volume,owners,svm.name' } mock_get_records.assert_called_once_with( '/storage/volumes/*/snapshots', query=query) @ddt.data(True, False) def test_prune_deleted_snapshots(self, fail_deleting): soft_deleted_snapshots = { fake.VSERVER_NAME: [{ "uuid": fake.FAKE_SNAPSHOT_UUID, "volume_uuid": fake.FAKE_VOLUME_UUID, }] } mock_get_snaps = self.mock_object( self.client, '_get_soft_deleted_snapshots', mock.Mock(return_value=soft_deleted_snapshots) ) if fail_deleting: mock_send_request = self.mock_object( self.client, 'send_request', mock.Mock(side_effect=netapp_api.api.NaApiError)) else: mock_send_request = self.mock_object(self.client, 'send_request') self.client.prune_deleted_snapshots() mock_get_snaps.assert_called_once_with() vol_uuid = fake.FAKE_VOLUME_UUID snap_uuid = fake.FAKE_SNAPSHOT_UUID mock_send_request.assert_called_once_with( f'/storage/volumes/{vol_uuid}/snapshots/{snap_uuid}', 'delete') @ddt.data(True, False) def test_snapshot_exists(self, exists): volume = fake.VOLUME_ITEM_SIMPLE_RESPONSE_REST vol_uuid = volume['uuid'] mock_get_vol = self.mock_object( self.client, '_get_volume_by_args', mock.Mock(return_value=volume)) mock_send_request = self.mock_object( self.client, 'send_request', mock.Mock(return_value=fake.SNAPSHOTS_REST_RESPONSE)) mock_has_records = self.mock_object(self.client, '_has_records', mock.Mock(return_value=exists)) res = self.client.snapshot_exists(fake.SNAPSHOT_NAME, fake.VOLUME_NAMES[0]) self.assertEqual(exists, res) mock_get_vol.assert_called_once_with( vol_name=fake.VOLUME_NAMES[0], fields='uuid,state') query = { 'name': fake.SNAPSHOT_NAME } mock_send_request.assert_called_once_with( f'/storage/volumes/{vol_uuid}/snapshots/', 'get', query=query) mock_has_records.assert_called_once_with(fake.SNAPSHOTS_REST_RESPONSE) def test_snapshot_exists_error(self): volume = {'state': 'offline'} self.mock_object( self.client, '_get_volume_by_args', mock.Mock(return_value=volume)) self.assertRaises( exception.SnapshotUnavailable, self.client.snapshot_exists, fake.SNAPSHOT_NAME, fake.VOLUME_NAMES[0]) @ddt.data('source', 'destination', None) def test_volume_has_snapmirror_relationships(self, snapmirror_rel_type): """Snapmirror relationships can be both ways.""" vol = fake.FAKE_MANAGE_VOLUME snapmirror = { 'source-vserver': fake.SM_SOURCE_VSERVER, 'source-volume': fake.SM_SOURCE_VOLUME, 'destination-vserver': fake.SM_DEST_VSERVER, 'destination-volume': fake.SM_DEST_VOLUME, 'mirror-state': 'snapmirrored', 'schedule': 'daily', } expected_get_snapmirrors_call_count = 2 expected_get_snapmirrors_calls = [ mock.call(source_vserver=vol['owning-vserver-name'], source_volume=vol['name']), mock.call(dest_vserver=vol['owning-vserver-name'], dest_volume=vol['name']), ] if snapmirror_rel_type is None: side_effect = ([], []) elif snapmirror_rel_type == 'source': snapmirror['source-vserver'] = vol['owning-vserver-name'] snapmirror['source-volume'] = vol['name'] side_effect = ([snapmirror], None) expected_get_snapmirrors_call_count = 1 expected_get_snapmirrors_calls.pop() else: snapmirror['destination-vserver'] = vol['owning-vserver-name'] snapmirror['destination-volume'] = vol['name'] side_effect = (None, [snapmirror]) mock_get_snapmirrors_call = self.mock_object( self.client, 'get_snapmirrors', mock.Mock(side_effect=side_effect)) mock_exc_log = self.mock_object(client_cmode.LOG, 'exception') expected_retval = True if snapmirror_rel_type else False retval = self.client.volume_has_snapmirror_relationships(vol) self.assertEqual(expected_retval, retval) self.assertEqual(expected_get_snapmirrors_call_count, mock_get_snapmirrors_call.call_count) mock_get_snapmirrors_call.assert_has_calls( expected_get_snapmirrors_calls) self.assertFalse(mock_exc_log.called) def test_volume_has_snapmirror_relationships_api_error(self): vol = fake.FAKE_MANAGE_VOLUME expected_get_snapmirrors_calls = [ mock.call(source_vserver=vol['owning-vserver-name'], source_volume=vol['name']), ] mock_get_snapmirrors_call = self.mock_object( self.client, 'get_snapmirrors', mock.Mock( side_effect=self._mock_api_error())) mock_exc_log = self.mock_object(client_cmode_rest.LOG, 'exception') retval = self.client.volume_has_snapmirror_relationships(vol) self.assertFalse(retval) self.assertEqual(1, mock_get_snapmirrors_call.call_count) mock_get_snapmirrors_call.assert_has_calls( expected_get_snapmirrors_calls) self.assertTrue(mock_exc_log.called) def test_get_snapmirrors_svm(self): return_get_snp = fake.REST_GET_SNAPMIRRORS_RESPONSE mock_get_snap = self.mock_object( self.client, 'get_snapmirrors', mock.Mock(return_value=return_get_snp)) res = self.client.get_snapmirrors_svm(fake.SM_SOURCE_VSERVER, fake.SM_DEST_VSERVER, None) mock_get_snap.assert_called_once_with( source_path=fake.SM_SOURCE_VSERVER + ':*', dest_path=fake.SM_DEST_VSERVER + ':*', desired_attributes=None) self.assertEqual(return_get_snp, res) def test_get_snapmirrors(self): api_response = fake.SNAPMIRROR_GET_ITER_RESPONSE_REST mock_send_request = self.mock_object( self.client, 'send_request', mock.Mock(return_value=api_response)) result = self.client.get_snapmirrors( fake.SM_SOURCE_PATH, fake.SM_DEST_PATH, fake.SM_SOURCE_VSERVER, fake.SM_SOURCE_VOLUME, fake.SM_DEST_VSERVER, fake.SM_DEST_VOLUME, enable_tunneling=True) expected = fake.REST_GET_SNAPMIRRORS_RESPONSE query = { 'source.path': (fake.SM_SOURCE_VSERVER + ':' + fake.SM_SOURCE_VOLUME), 'destination.path': (fake.SM_DEST_VSERVER + ':' + fake.SM_DEST_VOLUME), 'fields': 'state,source.svm.name,source.path,destination.svm.name,' 'destination.path,transfer.end_time,uuid,policy.type,' 'transfer_schedule.name,transfer.state,' 'last_transfer_type,transfer.bytes_transferred,healthy' } mock_send_request.assert_called_once_with('/snapmirror/relationships', 'get', query=query, enable_tunneling=True) self.assertEqual(expected, result) @ddt.data( {'source_path': fake.SM_SOURCE_PATH, 'dest_path': fake.SM_DEST_PATH}, {'source_path': None, 'dest_path': None}) @ddt.unpack def test__get_snapmirrors(self, source_path, dest_path): api_response = fake.SNAPMIRROR_GET_ITER_RESPONSE_REST mock_send_request = self.mock_object( self.client, 'send_request', mock.Mock(return_value=api_response)) result = self.client._get_snapmirrors( source_path, dest_path, fake.SM_SOURCE_VSERVER, fake.SM_SOURCE_VOLUME, fake.SM_DEST_VSERVER, fake.SM_DEST_VOLUME) query = { 'source.path': (fake.SM_SOURCE_VSERVER + ':' + fake.SM_SOURCE_VOLUME), 'destination.path': (fake.SM_DEST_VSERVER + ':' + fake.SM_DEST_VOLUME), 'fields': 'state,source.svm.name,source.path,destination.svm.name,' 'destination.path,transfer.end_time,uuid,policy.type,' 'transfer_schedule.name,transfer.state,' 'last_transfer_type,transfer.bytes_transferred,healthy' } mock_send_request.assert_called_once_with('/snapmirror/relationships', 'get', query=query, enable_tunneling=True) self.assertEqual(1, len(result)) def test__get_snapmirrors_not_found(self): api_response = fake.NO_RECORDS_RESPONSE_REST mock_send_request = self.mock_object( self.client, 'send_request', mock.Mock(return_value=api_response)) result = self.client._get_snapmirrors( fake.SM_SOURCE_PATH, fake.SM_DEST_PATH, fake.SM_SOURCE_VSERVER, fake.SM_SOURCE_VOLUME, fake.SM_DEST_VSERVER, fake.SM_DEST_VOLUME) query = { 'source.path': (fake.SM_SOURCE_VSERVER + ':' + fake.SM_SOURCE_VOLUME), 'destination.path': (fake.SM_DEST_VSERVER + ':' + fake.SM_DEST_VOLUME), 'fields': 'state,source.svm.name,source.path,destination.svm.name,' 'destination.path,transfer.end_time,uuid,policy.type,' 'transfer_schedule.name,transfer.state,' 'last_transfer_type,transfer.bytes_transferred,healthy' } mock_send_request.assert_called_once_with('/snapmirror/relationships', 'get', query=query, enable_tunneling=True) self.assertEqual([], result) @ddt.data(True, False) def test_modify_volume_no_optional_args(self, is_flexgroup): self.mock_object(self.client, 'send_request') mock_update_volume_efficiency_attributes = self.mock_object( self.client, 'update_volume_efficiency_attributes') volume = fake.VOLUME_ITEM_SIMPLE_RESPONSE_REST self.mock_object(self.client, '_get_volume_by_args', mock.Mock(return_value=volume)) self.mock_object(self.client, '_is_snaplock_enabled_volume', mock.Mock(return_value=True)) aggr = fake.SHARE_AGGREGATE_NAME if is_flexgroup: aggr = list(fake.SHARE_AGGREGATE_NAMES) self.client.modify_volume(aggr, fake.SHARE_NAME) # default body for call with no optional params body = {'guarantee': {'type': 'volume'}} self.client.send_request.assert_called_once_with( '/storage/volumes/' + volume['uuid'], 'patch', body=body) mock_update_volume_efficiency_attributes.assert_called_once_with( fake.SHARE_NAME, False, False, is_flexgroup=is_flexgroup, efficiency_policy=None ) @ddt.data((fake.QOS_POLICY_GROUP_NAME, None), (None, fake.ADAPTIVE_QOS_POLICY_GROUP_NAME)) @ddt.unpack def test_modify_volume_all_optional_args(self, qos_group, adaptive_qos_group): self.client.features.add_feature('ADAPTIVE_QOS') self.mock_object(self.client, 'send_request') mock_update_volume_efficiency_attributes = self.mock_object( self.client, 'update_volume_efficiency_attributes') volume = fake.VOLUME_ITEM_SIMPLE_RESPONSE_REST self.mock_object(self.client, '_get_volume_by_args', mock.Mock(return_value=volume)) options = {'efficiency_policy': fake.VOLUME_EFFICIENCY_POLICY_NAME} self.mock_object(self.client, '_is_snaplock_enabled_volume', mock.Mock(return_value=True)) self.client.modify_volume( fake.SHARE_AGGREGATE_NAME, fake.SHARE_NAME, thin_provisioned=True, snapshot_policy=fake.SNAPSHOT_POLICY_NAME, language=fake.LANGUAGE, dedup_enabled=True, compression_enabled=False, max_files=fake.MAX_FILES, qos_policy_group=qos_group, adaptive_qos_policy_group=adaptive_qos_group, autosize_attributes=fake.VOLUME_AUTOSIZE_ATTRS, hide_snapdir=True, **options ) qos_policy_name = qos_group or adaptive_qos_group body = { 'guarantee': {'type': 'none'}, 'autosize': { 'mode': 'off', 'grow_threshold': '85', 'shrink_threshold': '50', 'maximum': '1258288', 'minimum': '1048576' }, 'files': {'maximum': 5000}, 'snapshot_policy': {'name': 'fake_snapshot_policy'}, 'qos': {'policy': {'name': qos_policy_name}}, 'snapshot_directory_access_enabled': 'false', 'language': 'fake_language' } self.client.send_request.assert_called_once_with( '/storage/volumes/' + volume['uuid'], 'patch', body=body) mock_update_volume_efficiency_attributes.assert_called_once_with( fake.SHARE_NAME, True, False, is_flexgroup=False, efficiency_policy=fake.VOLUME_EFFICIENCY_POLICY_NAME ) def test__parse_timestamp(self): test_time_str = '2022-11-25T14:41:20+00:00' res = self.client._parse_timestamp(test_time_str) self.assertEqual(1669387280.0, res) def test__parse_timestamp_exception(self): test_time_str = None self.assertRaises(TypeError, self.client._parse_timestamp, test_time_str) def test_start_volume_move(self): mock__send_volume_move_request = self.mock_object( self.client, '_send_volume_move_request') self.client.start_volume_move(fake.VOLUME_NAMES[0], fake.VSERVER_NAME, fake.SHARE_AGGREGATE_NAME, 'fake_cutover', False) mock__send_volume_move_request.assert_called_once_with( fake.VOLUME_NAMES[0], fake.VSERVER_NAME, fake.SHARE_AGGREGATE_NAME, cutover_action='fake_cutover', encrypt_destination=False) def test_check_volume_move(self): mock__send_volume_move_request = self.mock_object( self.client, '_send_volume_move_request') self.client.check_volume_move(fake.VOLUME_NAMES[0], fake.VSERVER_NAME, fake.SHARE_AGGREGATE_NAME, False) mock__send_volume_move_request.assert_called_once_with( fake.VOLUME_NAMES[0], fake.VSERVER_NAME, fake.SHARE_AGGREGATE_NAME, validation_only=True, encrypt_destination=False) def test__send_volume_move_request(self): mock_sr = self.mock_object(self.client, 'send_request') self.client._send_volume_move_request('volume_name', 'vserver', 'destination_aggregate', cutover_action='wait', validation_only=True, encrypt_destination=False) query = {'name': 'volume_name'} body = { 'movement.destination_aggregate.name': 'destination_aggregate', 'encryption.enabled': 'false', 'validate_only': 'true', 'movement.state': 'wait', } mock_sr.assert_called_once_with( '/storage/volumes/', 'patch', query=query, body=body, wait_on_accepted=False) def test_get_nfs_export_policy_for_volume(self): fake_query = { 'name': 'fake_volume_name', 'fields': 'nas.export_policy.name' } ret = { 'records': [ { 'nas': { 'export_policy': { 'name': 'fake_name' } } } ] } mock_records = self.mock_object(self.client, '_has_records', mock.Mock(return_value=True)) mock_sr = self.mock_object(self.client, 'send_request', mock.Mock(return_value=ret)) res = self.client.get_nfs_export_policy_for_volume('fake_volume_name') mock_records.assert_called_once_with(ret) mock_sr.assert_called_once_with('/storage/volumes/', 'get', query=fake_query) expected = 'fake_name' self.assertEqual(expected, res) def test_get_unique_export_policy_id(self): mock_records = self.mock_object(self.client, '_has_records', mock.Mock(return_value=True)) expected = 'fake_uuid' ret = { 'records': [ { 'id': 'fake_uuid' } ] } mock_sr = self.mock_object(self.client, 'send_request', mock.Mock(return_value=ret)) res = self.client.get_unique_export_policy_id('fake_policy_name') mock_records.assert_called_once_with(ret) mock_sr.assert_called_once_with( '/protocols/nfs/export-policies', 'get', query={'name': 'fake_policy_name'}) self.assertEqual(expected, res) def test__get_nfs_export_rule_indices(self): mockpid = self.mock_object(self.client, 'get_unique_export_policy_id', mock.Mock(return_value='fake_policy_id')) fake_uuid = 'fake_policy_id' fake_query = { 'clients.match': 'fakecl', 'fields': 'clients.match,index' } ret = { 'records': [ { 'index': '0' } ] } mock_sr = self.mock_object(self.client, 'send_request', mock.Mock(return_value=ret)) res = self.client._get_nfs_export_rule_indices('fake_policy', 'fakecl') mockpid.assert_called_once_with('fake_policy') mock_sr.assert_called_once_with( f'/protocols/nfs/export-policies/{fake_uuid}/rules', 'get', query=fake_query) expected = ['0'] self.assertEqual(expected, res) def test__add_nfs_export_rule(self): mockpid = self.mock_object(self.client, 'get_unique_export_policy_id', mock.Mock(return_value='fake_policy_id')) mock_sr = self.mock_object(self.client, 'send_request') self.client._add_nfs_export_rule('fake_policy', 'fakecl', False, ['rw']) mockpid.assert_called_once_with('fake_policy') body = { 'clients': [{'match': 'fakecl'}], 'ro_rule': ['rw'], 'rw_rule': ['rw'], 'superuser': ['rw'] } mock_sr.assert_called_once_with( '/protocols/nfs/export-policies/fake_policy_id/rules', 'post', body=body) def test__update_nfs_export_rule(self): fake_body = { 'client_match': 'fake_cli', 'ro_rule': ['rw'], 'rw_rule': ['rw'], 'superuser': ['rw'] } mockpid = self.mock_object(self.client, 'get_unique_export_policy_id', mock.Mock(return_value='fake_policy_id')) mock_sr = self.mock_object(self.client, 'send_request') self.client._update_nfs_export_rule('fake_policy', 'fake_cli', False, '0', ['rw']) mockpid.assert_called_once_with('fake_policy') mock_sr.assert_called_once_with( '/protocols/nfs/export-policies/fake_policy_id/rules/0', 'patch', body=fake_body) def test__remove_nfs_export_rules(self): fake_body = { 'index': 0 } mockpid = self.mock_object(self.client, 'get_unique_export_policy_id', mock.Mock(return_value='fake_policy_id')) mock_sr = self.mock_object(self.client, 'send_request') self.client._remove_nfs_export_rules('fake_policy', [0]) mockpid.assert_called_once_with('fake_policy') mock_sr.assert_called_once_with( '/protocols/nfs/export-policies/fake_policy_id/rules/0', 'delete', body=fake_body) def test_modify_cifs_share_access(self): return_uuid = fake.GENERIC_EXPORT_POLICY_RESPONSE_AND_VOLUMES self.mock_object(self.client, 'send_request', mock.Mock(return_value=return_uuid)) self.client.modify_cifs_share_access(fake.SHARE_NAME, fake.USER_NAME, 'read') fake_user = 'fake_user' FAKE_CIFS_USER_GROUP_TYPE = 'windows' fake_uuid = 'fake_uuid' fake_share = fake.SHARE_NAME query = {'name': 'fake_share'} body = {'permission': 'read'} self.client.send_request.assert_has_calls([ mock.call('/protocols/cifs/shares', 'get', query=query), mock.call(f'/protocols/cifs/shares/{fake_uuid}/{fake_share}' f'/acls/{fake_user}/{FAKE_CIFS_USER_GROUP_TYPE}', 'patch', body=body)]) def test_add_cifs_share_access(self): return_uuid = fake.GENERIC_EXPORT_POLICY_RESPONSE_AND_VOLUMES self.mock_object(self.client, 'send_request', mock.Mock(return_value=return_uuid)) self.client.add_cifs_share_access(fake.SHARE_NAME, fake.USER_NAME, 'read') fake_uuid = "fake_uuid" query = {'name': 'fake_share'} body = {'permission': 'read', 'user_or_group': 'fake_user'} self.client.send_request.assert_has_calls([ mock.call('/protocols/cifs/shares', 'get', query=query), mock.call(f'/protocols/cifs/shares/{fake_uuid}/{fake.SHARE_NAME}' '/acls', 'post', body=body)]) def test_get_cifs_share_access_rules_empty(self): return_uuid = fake.GENERIC_EXPORT_POLICY_RESPONSE_AND_VOLUMES self.mock_object(self.client, 'send_request', mock.Mock(return_value=return_uuid)) def test_get_cifs_share_access_rules_not_empty(self): return_uuid = fake.GENERIC_EXPORT_POLICY_RESPONSE_AND_VOLUMES self.mock_object(self.client, 'send_request', mock.Mock(return_value=return_uuid)) rules = {} fake_results = fake.FAKE_CIFS_RECORDS for record in fake_results['records']: user_or_group = record['user_or_group'] permission = record['permission'] rules[user_or_group] = permission def test_mount_volume_with_junction_path(self): volume_name = fake.SHARE_NAME junction_path = '/fake_path' volume = fake.VOLUME self.mock_object(self.client, '_get_volume_by_args', mock.Mock(return_value=volume)) self.mock_object(self.client, 'send_request') self.client.mount_volume(volume_name, junction_path=junction_path) uuid = volume['uuid'] body = { 'nas.path': junction_path } self.client.send_request.assert_called_once_with( f'/storage/volumes/{uuid}', 'patch', body=body) def test_mount_volume_with_volume_name(self): volume_name = fake.SHARE_NAME volume = fake.VOLUME self.mock_object(self.client, '_get_volume_by_args', mock.Mock(return_value=volume)) self.mock_object(self.client, 'send_request') self.client.mount_volume(volume_name) uuid = volume['uuid'] body = { 'nas.path': '/%s' % volume_name } self.client.send_request.assert_called_once_with( f'/storage/volumes/{uuid}', 'patch', body=body) def test_set_volume_name(self): volume_name = fake.SHARE_NAME new_volume_name = 'fake_name' volume = fake.VOLUME self.mock_object(self.client, '_get_volume_by_args', mock.Mock(return_value=volume)) self.mock_object(self.client, 'send_request') self.client.set_volume_name(volume_name, new_volume_name) uuid = volume['uuid'] body = { 'name': new_volume_name } self.client.send_request.assert_called_once_with( f'/storage/volumes/{uuid}', 'patch', body=body) def test_get_job(self): mock_sr = self.mock_object(self.client, 'send_request') self.client.get_job('fake_job_uuid') mock_sr.assert_called_once_with('/cluster/jobs/fake_job_uuid', 'get', enable_tunneling=False) @ddt.data(netapp_api.EREST_VSERVER_NOT_FOUND, 'fake') def test_vserver_exists_exception(self, er): self.mock_object(self.client, 'send_request', mock.Mock(side_effect=self._mock_api_error(code=er))) if er == netapp_api.EREST_VSERVER_NOT_FOUND: result = self.client.vserver_exists(fake.VSERVER_NAME) self.assertFalse(result) else: self.assertRaises(netapp_api.api.NaApiError, self.client.vserver_exists, fake.VSERVER_NAME) def test__get_aggregate_disk_types(self): response = fake.FAKE_DISK_TYPE_RESPONSE aggr = fake.SHARE_AGGREGATE_NAME mock_sr = self.mock_object(self.client, 'send_request', mock.Mock(return_value=response)) query = { 'aggregates.name': aggr, 'fields': 'effective_type' } expected = {'fakedisk'} result = self.client._get_aggregate_disk_types(aggr) mock_sr.assert_called_once_with('/storage/disks', 'get', query=query) self.assertEqual(expected, result) def test__get_aggregate_disk_types_exception(self): aggr = fake.SHARE_AGGREGATE_NAME self.mock_object(self.client, 'send_request', mock.Mock(side_effect=self._mock_api_error())) result = self.client._get_aggregate_disk_types(aggr) self.assertEqual(set(), result) def test_create_nfs_export_policy_exception(self): self.mock_object(self.client, 'send_request', mock.Mock(side_effect=self._mock_api_error())) self.assertRaises(netapp_api.api.NaApiError, self.client.create_nfs_export_policy, fake.EXPORT_POLICY_NAME) @ddt.data(True, False) def test__get_create_volume_body(self, thin_provisioned): expected = { 'type': 'fake_type', 'guarantee.type': ('none' if thin_provisioned else 'volume'), 'nas.path': '/%s' % fake.SHARE_MOUNT_POINT, 'snapshot_policy.name': fake.SNAPSHOT_POLICY_NAME, 'language': 'fake_language', 'space.snapshot.reserve_percent': 'fake_percent', 'qos.policy.name': fake.QOS_POLICY_GROUP_NAME, 'svm.name': 'fake_vserver', 'encryption.enabled': 'true', 'snaplock.type': 'compliance', } self.mock_object(self.client.connection, 'get_vserver', mock.Mock(return_value='fake_vserver')) res = self.client._get_create_volume_body(fake.VOLUME_NAMES[0], thin_provisioned, fake.SNAPSHOT_POLICY_NAME, 'fake_language', 'fake_percent', 'fake_type', fake.QOS_POLICY_GROUP_NAME, True, fake.QOS_POLICY_GROUP_NAME, fake.SHARE_MOUNT_POINT, "compliance") self.assertEqual(expected, res) def test_get_job_state(self): expected = 'success' query = { 'uuid': 'fake_uuid', 'fields': 'state' } response = { 'records': [fake.JOB_SUCCESSFUL_REST] } mock_sr = self.mock_object(self.client, 'send_request', mock.Mock(return_value=response)) self.mock_object(self.client, '_has_records', mock.Mock(return_value=True)) result = self.client.get_job_state('fake_uuid') mock_sr.assert_called_once_with('/cluster/jobs/', 'get', query=query, enable_tunneling=False) self.assertEqual(expected, result) def test_get_job_state_not_found(self): self.mock_object(self.client, 'send_request') self.mock_object(self.client, '_has_records', mock.Mock(return_value=False)) self.assertRaises(exception.NetAppException, self.client.get_job_state, 'fake_uuid') def test_update_volume_snapshot_policy(self): return_uuid = { 'uuid': 'fake_uuid' } mock_get_vol = self.mock_object(self.client, '_get_volume_by_args', mock.Mock(return_value=return_uuid)) mock_sr = self.mock_object(self.client, 'send_request') self.client.update_volume_snapshot_policy('fake_volume_name', fake.SNAPSHOT_POLICY_NAME) body = { 'snapshot_policy.name': fake.SNAPSHOT_POLICY_NAME } mock_sr.assert_called_once_with('/storage/volumes/fake_uuid', 'patch', body=body) mock_get_vol.assert_called_once_with(vol_name='fake_volume_name') @ddt.data(True, False) def test_update_volume_efficiency_attributes(self, status): response = { 'dedupe': not status, 'compression': not status } self.mock_object(self.client, 'get_volume_efficiency_status', mock.Mock(return_value=response)) en_dedupe = self.mock_object(self.client, 'enable_dedupe_async') dis_dedupe = self.mock_object(self.client, 'disable_dedupe_async') en_comp = self.mock_object(self.client, 'enable_compression_async') dis_comp = self.mock_object(self.client, 'disable_compression_async') apply_efficiency_policy = self.mock_object( self.client, 'apply_volume_efficiency_policy' ) self.client.update_volume_efficiency_attributes( fake.VOLUME_NAMES[0], status, status, efficiency_policy=fake.VOLUME_EFFICIENCY_POLICY_NAME) if status: en_dedupe.assert_called_once_with(fake.VOLUME_NAMES[0]) en_comp.assert_called_once_with(fake.VOLUME_NAMES[0]) apply_efficiency_policy.assert_called_once_with( fake.VOLUME_NAMES[0], efficiency_policy=fake.VOLUME_EFFICIENCY_POLICY_NAME ) else: dis_dedupe.assert_called_once_with(fake.VOLUME_NAMES[0]) dis_comp.assert_called_once_with(fake.VOLUME_NAMES[0]) apply_efficiency_policy.assert_called_once_with( fake.VOLUME_NAMES[0], efficiency_policy=fake.VOLUME_EFFICIENCY_POLICY_NAME ) def test_trigger_volume_move_cutover(self): query = { 'name': fake.VOLUME_NAMES[0] } body = { 'movement.state': 'cutover' } self.mock_object(self.client, 'send_request') self.client.trigger_volume_move_cutover( fake.VOLUME_NAMES[0], fake.VSERVER_NAME) self.client.send_request.assert_called_once_with( '/storage/volumes/', 'patch', query=query, body=body) def test_abort_volume_move(self): return_uuid = { 'uuid': 'fake_uuid' } mock_get_vol = self.mock_object(self.client, '_get_volume_by_args', mock.Mock(return_value=return_uuid)) mock_sr = self.mock_object(self.client, 'send_request') self.client.abort_volume_move('fake_volume_name', 'fake_vserver') mock_sr.assert_called_once_with('/storage/volumes/fake_uuid', 'patch') mock_get_vol.assert_called_once_with(vol_name='fake_volume_name') def test_get_volume_move_status(self): """Gets the current state of a volume move operation.""" return_sr = fake.FAKE_VOL_MOVE_STATUS fields = 'movement.percent_complete,movement.state' query = { 'name': 'fake_name', 'svm.name': 'fake_svm', 'fields': fields } mock_sr = self.mock_object(self.client, 'send_request', mock.Mock(return_value=return_sr)) result = self.client.get_volume_move_status('fake_name', 'fake_svm') mock_sr.assert_called_once_with('/storage/volumes/', 'get', query=query) volume_move_info = return_sr.get('records')[0] volume_movement = volume_move_info['movement'] expected = { 'percent-complete': volume_movement['percent_complete'], 'estimated-completion-time': '', 'state': volume_movement['state'], 'details': '', 'cutover-action': '', 'phase': volume_movement['state'], } self.assertEqual(expected, result) def test_list_snapmirror_snapshots(self): fake_response = fake.SNAPSHOTS_REST_RESPONSE api_response = fake.VOLUME_ITEM_SIMPLE_RESPONSE_REST mock_volume = self.mock_object(self.client, '_get_volume_by_args', mock.Mock(return_value=api_response)) mock_request = self.mock_object(self.client, 'send_request', mock.Mock(return_value=fake_response)) self.client.list_snapmirror_snapshots(fake.VOLUME_NAMES[0]) query = { 'owners': 'snapmirror_dependent', } mock_request.assert_called_once_with( '/storage/volumes/fake_uuid/snapshots/', 'get', query=query) mock_volume.assert_called_once_with(vol_name=fake.VOLUME_NAMES[0]) @ddt.data({'policy': 'fake_policy'}, {'policy': None}) @ddt.unpack def test_create_snapmirror_vol(self, policy): api_responses = [ { "job": { "uuid": fake.FAKE_UUID, }, }, ] self.mock_object(self.client, 'send_request', mock.Mock(side_effect=copy.deepcopy(api_responses))) self.client.create_snapmirror_vol( fake.SM_SOURCE_VSERVER, fake.SM_SOURCE_VOLUME, fake.SM_DEST_VSERVER, fake.SM_DEST_VOLUME, relationship_type=netapp_utils.EXTENDED_DATA_PROTECTION_TYPE, policy=policy) body = { 'source': { 'path': (fake.SM_SOURCE_VSERVER + ':' + fake.SM_SOURCE_VOLUME), }, 'destination': { 'path': (fake.SM_DEST_VSERVER + ':' + fake.SM_DEST_VOLUME) } } if policy: body['policy.name'] = policy self.client.send_request.assert_has_calls([ mock.call('/snapmirror/relationships/', 'post', body=body)]) def test_create_snapmirror_vol_already_exists(self): api_responses = netapp_api.api.NaApiError( code=netapp_api.EREST_ERELATION_EXISTS) self.mock_object(self.client, 'send_request', mock.Mock(side_effect=api_responses)) response = self.client.create_snapmirror_vol( fake.SM_SOURCE_VSERVER, fake.SM_SOURCE_VOLUME, fake.SM_DEST_VSERVER, fake.SM_DEST_VOLUME, schedule=None, policy=None, relationship_type='data_protection') self.assertIsNone(response) self.assertTrue(self.client.send_request.called) def test_create_snapmirror_vol_error(self): self.mock_object( self.client, 'send_request', mock.Mock(side_effect=netapp_api.api.NaApiError(code=123))) self.assertRaises(netapp_api.api.NaApiError, self.client.create_snapmirror_vol, fake.SM_SOURCE_VSERVER, fake.SM_SOURCE_VOLUME, fake.SM_DEST_VSERVER, fake.SM_DEST_VOLUME, schedule=None, policy=None, relationship_type='data_protection') self.assertTrue(self.client.send_request.called) def test__set_snapmirror_state(self): api_responses = [ fake.SNAPMIRROR_GET_ITER_RESPONSE_REST, { "job": { "uuid": fake.FAKE_UUID }, "num_records": 1 } ] expected_body = {'state': 'snapmirrored'} self.mock_object(self.client, 'send_request', mock.Mock(side_effect=copy.deepcopy(api_responses))) result = self.client._set_snapmirror_state( 'snapmirrored', None, None, fake.SM_SOURCE_VSERVER, fake.SM_SOURCE_VOLUME, fake.SM_DEST_VSERVER, fake.SM_DEST_VOLUME) self.client.send_request.assert_has_calls([ mock.call('/snapmirror/relationships/' + fake.FAKE_UUID, 'patch', body=expected_body, wait_on_accepted=True)]) expected = { 'operation-id': None, 'status': None, 'jobid': fake.FAKE_UUID, 'error-code': None, 'error-message': None, 'relationship-uuid': fake.FAKE_UUID } self.assertEqual(expected, result) def test_initialize_snapmirror_vol(self): expected_job = { 'operation-id': None, 'status': None, 'jobid': fake.FAKE_UUID, 'error-code': None, 'error-message': None, } mock_set_snapmirror_state = self.mock_object( self.client, '_set_snapmirror_state', mock.Mock(return_value=expected_job)) result = self.client.initialize_snapmirror_vol( fake.SM_SOURCE_VSERVER, fake.SM_SOURCE_VOLUME, fake.SM_DEST_VSERVER, fake.SM_DEST_VOLUME) mock_set_snapmirror_state.assert_called_once_with( 'snapmirrored', None, None, fake.SM_SOURCE_VSERVER, fake.SM_SOURCE_VOLUME, fake.SM_DEST_VSERVER, fake.SM_DEST_VOLUME, wait_result=False) self.assertEqual(expected_job, result) def test_modify_snapmirror_vol(self): expected_job = { 'operation-id': None, 'status': None, 'jobid': fake.FAKE_UUID, 'error-code': None, 'error-message': None, } mock_set_snapmirror_state = self.mock_object( self.client, '_set_snapmirror_state', mock.Mock(return_value=expected_job)) result = self.client.modify_snapmirror_vol( fake.SM_SOURCE_VSERVER, fake.SM_SOURCE_VOLUME, fake.SM_DEST_VSERVER, fake.SM_DEST_VOLUME, None) mock_set_snapmirror_state.assert_called_once_with( None, None, None, fake.SM_SOURCE_VSERVER, fake.SM_SOURCE_VOLUME, fake.SM_DEST_VSERVER, fake.SM_DEST_VOLUME, wait_result=False, schedule=None) self.assertEqual(expected_job, result) def test__abort_snapmirror(self): return_snp = fake.REST_GET_SNAPMIRRORS_RESPONSE mock_get_snap = self.mock_object(self.client, '_get_snapmirrors', mock.Mock(return_value=return_snp)) return_sr = fake.REST_SIMPLE_RESPONSE mock_sr = self.mock_object(self.client, 'send_request', mock.Mock(return_value=return_sr)) self.client._abort_snapmirror(fake.SM_SOURCE_PATH, fake.SM_DEST_PATH) mock_get_snap.assert_called_once_with( source_path=fake.SM_SOURCE_PATH, dest_path=fake.SM_DEST_PATH, source_vserver=None, source_volume=None, dest_vserver=None, dest_volume=None, enable_tunneling=None, list_destinations_only=None) mock_sr.assert_has_calls([ mock.call(f'/snapmirror/relationships/{return_snp[0]["uuid"]}' '/transfers/', 'get', query={'state': 'transferring'}), mock.call(f'/snapmirror/relationships/{return_snp[0]["uuid"]}' f'/transfers/{return_sr["records"][0]["uuid"]}', 'patch', body={'state': 'aborted'}), ]) def test_abort_snapmirror_vol(self): mock_abort = self.mock_object(self.client, '_abort_snapmirror') self.client.abort_snapmirror_vol(fake.VSERVER_NAME, fake.VOLUME_NAMES[0], fake.VSERVER_NAME_2, fake.VOLUME_NAMES[1]) mock_abort.assert_called_once_with(source_vserver=fake.VSERVER_NAME, source_volume=fake.VOLUME_NAMES[0], dest_vserver=fake.VSERVER_NAME_2, dest_volume=fake.VOLUME_NAMES[1], clear_checkpoint=False) def test_release_snapmirror_vol(self): mock_sr = self.mock_object(self.client, 'send_request') return_snp = fake.REST_GET_SNAPMIRRORS_RESPONSE mock_sd = self.mock_object(self.client, 'get_snapmirror_destinations', mock.Mock(return_value=return_snp)) self.client.release_snapmirror_vol(fake.VSERVER_NAME, fake.VOLUME_NAMES[0], fake.VSERVER_NAME_2, fake.VOLUME_NAMES[1]) mock_sd.assert_called_once_with(source_vserver=fake.VSERVER_NAME, source_volume=fake.VOLUME_NAMES[0], dest_vserver=fake.VSERVER_NAME_2, dest_volume=fake.VOLUME_NAMES[1], desired_attributes=['relationship-id']) uuid = return_snp[0].get("uuid") query = {"source_only": 'true'} mock_sr.assert_called_once_with(f'/snapmirror/relationships/{uuid}', 'delete', query=query) def test_delete_snapmirror_no_records(self): query_uuid = {} query_uuid['source.path'] = (fake.SM_SOURCE_VSERVER + ':' + fake.SM_SOURCE_VOLUME) query_uuid['destination.path'] = (fake.SM_DEST_VSERVER + ':' + fake.SM_DEST_VOLUME) query_uuid['fields'] = 'uuid' self.mock_object(self.client, 'send_request', mock.Mock(return_value=fake.NO_RECORDS_RESPONSE_REST)) self.client._delete_snapmirror(fake.SM_SOURCE_VSERVER, fake.SM_SOURCE_VOLUME, fake.SM_DEST_VSERVER, fake.SM_DEST_VOLUME) self.client.send_request.assert_called_once_with( '/snapmirror/relationships/', 'get', query=query_uuid) def test_delete_snapmirror(self): query_uuid = {} query_uuid['source.path'] = (fake.SM_SOURCE_VSERVER + ':' + fake.SM_SOURCE_VOLUME) query_uuid['destination.path'] = (fake.SM_DEST_VSERVER + ':' + fake.SM_DEST_VOLUME) query_uuid['fields'] = 'uuid' fake_cluster = fake.FAKE_GET_CLUSTER_NODE_VERSION_REST self.mock_object(self.client, 'send_request', mock.Mock(return_value=fake_cluster)) self.client._delete_snapmirror(fake.SM_SOURCE_VSERVER, fake.SM_SOURCE_VOLUME, fake.SM_DEST_VSERVER, fake.SM_DEST_VOLUME) query_delete = {"destination_only": "true"} snapmirror_uuid = fake_cluster.get('records')[0].get('uuid') self.client.send_request.assert_has_calls([ mock.call('/snapmirror/relationships/', 'get', query=query_uuid), mock.call('/snapmirror/relationships/' + snapmirror_uuid, 'delete', query=query_delete) ]) def test_get_snapmirror_destinations(self): mock_get_sm = self.mock_object(self.client, '_get_snapmirrors') self.client.get_snapmirror_destinations(fake.SM_SOURCE_PATH, fake.SM_DEST_PATH, fake.SM_SOURCE_VSERVER, fake.SM_SOURCE_VOLUME, fake.SM_DEST_VSERVER, fake.SM_DEST_VOLUME) mock_get_sm.assert_called_once_with( source_path=fake.SM_SOURCE_PATH, dest_path=fake.SM_DEST_PATH, source_vserver=fake.SM_SOURCE_VSERVER, source_volume=fake.SM_SOURCE_VOLUME, dest_vserver=fake.SM_DEST_VSERVER, dest_volume=fake.SM_DEST_VOLUME, enable_tunneling=False, list_destinations_only=True) def test_delete_snapmirror_vol(self): mock_delete = self.mock_object(self.client, '_delete_snapmirror') self.client.delete_snapmirror_vol(fake.SM_SOURCE_VSERVER, fake.SM_SOURCE_VOLUME, fake.SM_DEST_VSERVER, fake.SM_DEST_VOLUME) mock_delete.assert_called_once_with( source_vserver=fake.SM_SOURCE_VSERVER, dest_vserver=fake.SM_DEST_VSERVER, source_volume=fake.SM_SOURCE_VOLUME, dest_volume=fake.SM_DEST_VOLUME) def test_disable_fpolicy_policy(self): query = { 'name': fake.VSERVER_NAME, 'fields': 'uuid' } response_svm = fake.SVMS_LIST_SIMPLE_RESPONSE_REST self.client.vserver = fake.VSERVER_NAME self.mock_object(self.client, 'send_request', mock.Mock(side_effect=[response_svm, None])) self.client.disable_fpolicy_policy(fake.FPOLICY_POLICY_NAME) svm_id = response_svm.get('records')[0]['uuid'] self.client.send_request.assert_has_calls([ mock.call('/svm/svms', 'get', query=query, enable_tunneling=False), mock.call(f'/protocols/fpolicy/{svm_id}/policies' f'/{fake.FPOLICY_POLICY_NAME}', 'patch') ]) @ddt.data([fake.NO_RECORDS_RESPONSE_REST, None], [fake.SVMS_LIST_SIMPLE_RESPONSE_REST, netapp_api.api.NaApiError(code="1000", message="")]) def test_disable_fpolicy_policy_failure(self, side_effect): self.mock_object(self.client, 'send_request', mock.Mock(side_effect=side_effect)) self.assertRaises(exception.NetAppException, self.client.disable_fpolicy_policy, fake.FPOLICY_POLICY_NAME) @ddt.data({'qos_policy_group_name': None, 'adaptive_qos_policy_group_name': None}, {'qos_policy_group_name': fake.QOS_POLICY_GROUP_NAME, 'adaptive_qos_policy_group_name': None}, {'qos_policy_group_name': None, 'adaptive_qos_policy_group_name': fake.ADAPTIVE_QOS_POLICY_GROUP_NAME}, {'mount_point_name': None}, ) @ddt.unpack def test_create_volume_clone(self, qos_policy_group_name=None, adaptive_qos_policy_group_name=None, mount_point_name=None): self.mock_object(self.client, 'send_request') if qos_policy_group_name: volume = fake.VOLUME_ITEM_SIMPLE_RESPONSE_REST uuid = volume["uuid"] self.mock_object(self.client, '_get_volume_by_args', mock.Mock(return_value=volume)) self.mock_object(self.client, 'volume_clone_split_start') self.mock_object( self.client.connection, 'get_vserver', mock.Mock(return_value='fake_svm')) set_qos_adapt_mock = self.mock_object( self.client, 'set_qos_adaptive_policy_group_for_volume') self.client.create_volume_clone( fake.SHARE_NAME, fake.PARENT_SHARE_NAME, fake.PARENT_SNAPSHOT_NAME, mount_point_name=mount_point_name, qos_policy_group=qos_policy_group_name, adaptive_qos_policy_group=adaptive_qos_policy_group_name) body = { 'name': fake.SHARE_NAME, 'clone.parent_volume.name': fake.PARENT_SHARE_NAME, 'clone.parent_snapshot.name': fake.PARENT_SNAPSHOT_NAME, 'nas.path': '/%s' % (mount_point_name or fake.SHARE_NAME), 'clone.is_flexclone': 'true', 'svm.name': 'fake_svm', } if adaptive_qos_policy_group_name is not None: set_qos_adapt_mock.assert_called_once_with( fake.SHARE_NAME, fake.ADAPTIVE_QOS_POLICY_GROUP_NAME ) if qos_policy_group_name: self.client._get_volume_by_args.assert_called_once_with( vol_name=fake.SHARE_NAME) self.client.send_request.assert_has_calls([ mock.call('/storage/volumes', 'post', body=body), mock.call(f'/storage/volumes/{uuid}', 'patch', body={'qos.policy.name': qos_policy_group_name}) ]) else: self.client.send_request.assert_called_once_with( '/storage/volumes', 'post', body=body) self.assertFalse(self.client.volume_clone_split_start.called) @ddt.data(True, False) def test_create_volume_split(self, split): self.mock_object(self.client, 'send_request') self.mock_object(self.client, 'volume_clone_split_start') self.mock_object( self.client.connection, 'get_vserver', mock.Mock(return_value='fake_svm')) body = { 'name': fake.SHARE_NAME, 'clone.parent_volume.name': fake.PARENT_SHARE_NAME, 'clone.parent_snapshot.name': fake.PARENT_SNAPSHOT_NAME, 'nas.path': '/%s' % fake.SHARE_NAME, 'clone.is_flexclone': 'true', 'svm.name': 'fake_svm', } self.client.create_volume_clone( fake.SHARE_NAME, fake.PARENT_SHARE_NAME, fake.PARENT_SNAPSHOT_NAME, split=split) self.assertFalse(self.client.volume_clone_split_start.called) self.client.send_request.assert_called_once_with( '/storage/volumes', 'post', body=body) def test_quiesce_snapmirror_vol(self): mock__quiesce_snapmirror = self.mock_object( self.client, '_quiesce_snapmirror') self.client.quiesce_snapmirror_vol(fake.SM_SOURCE_VSERVER, fake.SM_SOURCE_VOLUME, fake.SM_DEST_VSERVER, fake.SM_DEST_VOLUME) mock__quiesce_snapmirror.assert_called_once_with( source_vserver=fake.SM_SOURCE_VSERVER, source_volume=fake.SM_SOURCE_VOLUME, dest_vserver=fake.SM_DEST_VSERVER, dest_volume=fake.SM_DEST_VOLUME) def test__quiesce_snapmirror(self): fake_snapmirror = fake.REST_GET_SNAPMIRRORS_RESPONSE fake_uuid = fake_snapmirror[0]['uuid'] fake_body = {'state': 'paused'} self.mock_object(self.client, 'send_request') mock_get_snap = self.mock_object( self.client, '_get_snapmirrors', mock.Mock(return_value=fake_snapmirror)) self.client._quiesce_snapmirror() mock_get_snap.assert_called_once() self.client.send_request.assert_called_once_with( f'/snapmirror/relationships/{fake_uuid}', 'patch', body=fake_body) def test_break_snapmirror_vol(self): self.mock_object(self.client, '_break_snapmirror') self.client.break_snapmirror_vol(source_vserver=fake.SM_SOURCE_VSERVER, source_volume=fake.SM_SOURCE_VOLUME, dest_vserver=fake.SM_DEST_VSERVER, dest_volume=fake.SM_DEST_VOLUME) self.client._break_snapmirror.assert_called_once_with( source_vserver=fake.SM_SOURCE_VSERVER, source_volume=fake.SM_SOURCE_VOLUME, dest_vserver=fake.SM_DEST_VSERVER, dest_volume=fake.SM_DEST_VOLUME) def test__break_snapmirror(self): fake_snapmirror = fake.REST_GET_SNAPMIRRORS_RESPONSE fake_uuid = fake_snapmirror[0]['uuid'] fake_body = {'state': 'broken_off'} self.mock_object(self.client, 'send_request') mock_get_snap = self.mock_object( self.client, '_get_snapmirrors', mock.Mock(return_value=fake_snapmirror)) self.client._break_snapmirror() mock_get_snap.assert_called_once() self.client.send_request.assert_called_once_with( f'/snapmirror/relationships/{fake_uuid}', 'patch', body=fake_body) def test_resume_snapmirror_vol(self): mock = self.mock_object(self.client, '_resume_snapmirror') self.client.resume_snapmirror_vol(fake.SM_SOURCE_VSERVER, fake.SM_SOURCE_VOLUME, fake.SM_DEST_VSERVER, fake.SM_DEST_VOLUME) mock.assert_called_once_with( source_vserver=fake.SM_SOURCE_VSERVER, dest_vserver=fake.SM_DEST_VSERVER, source_volume=fake.SM_SOURCE_VOLUME, dest_volume=fake.SM_DEST_VOLUME) def test_resync_snapmirror_vol(self): mock = self.mock_object(self.client, '_resync_snapmirror') self.client.resync_snapmirror_vol(fake.SM_SOURCE_VSERVER, fake.SM_SOURCE_VOLUME, fake.SM_DEST_VSERVER, fake.SM_DEST_VOLUME) mock.assert_called_once_with( source_vserver=fake.SM_SOURCE_VSERVER, dest_vserver=fake.SM_DEST_VSERVER, source_volume=fake.SM_SOURCE_VOLUME, dest_volume=fake.SM_DEST_VOLUME) @ddt.data('async', 'sync') def test__resume_snapmirror(self, snapmirror_policy): api_response = copy.deepcopy(fake.REST_GET_SNAPMIRRORS_RESPONSE) api_response[0]['policy-type'] = snapmirror_policy mock_snapmirror = self.mock_object( self.client, '_get_snapmirrors', mock.Mock(return_value=api_response)) mock_request = self.mock_object(self.client, 'send_request') snapmirror_uuid = fake.FAKE_UUID body_resync = {} if snapmirror_policy == 'async': body_resync['state'] = 'snapmirrored' elif snapmirror_policy == 'sync': body_resync['state'] = 'in_sync' self.client._resume_snapmirror(fake.SM_SOURCE_PATH, fake.SM_DEST_PATH) mock_request.assert_called_once_with('/snapmirror/relationships/' + snapmirror_uuid, 'patch', body=body_resync, wait_on_accepted=False) mock_snapmirror.assert_called_once_with( source_path=fake.SM_SOURCE_PATH, dest_path=fake.SM_DEST_PATH, source_vserver=None, source_volume=None, dest_vserver=None, dest_volume=None, enable_tunneling=None, list_destinations_only=None) def test__resync_snapmirror(self): mock = self.mock_object(self.client, '_resume_snapmirror') self.client._resume_snapmirror(fake.SM_SOURCE_PATH, fake.SM_DEST_PATH) mock.assert_called_once_with(fake.SM_SOURCE_PATH, fake.SM_DEST_PATH) def test_add_nfs_export_rule(self): mock_get_nfs_export_rule_indices = self.mock_object( self.client, '_get_nfs_export_rule_indices', mock.Mock(return_value=[])) mock_add_nfs_export_rule = self.mock_object( self.client, '_add_nfs_export_rule') mock_update_nfs_export_rule = self.mock_object( self.client, '_update_nfs_export_rule') auth_methods = ['sys'] self.client.add_nfs_export_rule(fake.EXPORT_POLICY_NAME, fake.IP_ADDRESS, False, auth_methods) mock_get_nfs_export_rule_indices.assert_called_once_with( fake.EXPORT_POLICY_NAME, fake.IP_ADDRESS) mock_add_nfs_export_rule.assert_called_once_with( fake.EXPORT_POLICY_NAME, fake.IP_ADDRESS, False, auth_methods) self.assertFalse(mock_update_nfs_export_rule.called) def test_set_qos_policy_group_for_volume(self): volume = fake.VOLUME_ITEM_SIMPLE_RESPONSE_REST mock_get_volume = self.mock_object( self.client, '_get_volume_by_args', mock.Mock(return_value=volume)) mock_send_request = self.mock_object( self.client, 'send_request') self.client.set_qos_policy_group_for_volume( volume['name'], fake.QOS_POLICY_GROUP_NAME) mock_get_volume.assert_called_once_with(vol_name=volume['name']) body = {'qos.policy.name': fake.QOS_POLICY_GROUP_NAME} mock_send_request.assert_called_once_with( f'/storage/volumes/{volume["uuid"]}', 'patch', body=body) def test__update_snapmirror(self): api_response = copy.deepcopy(fake.REST_GET_SNAPMIRRORS_RESPONSE) mock_snapmirror = self.mock_object( self.client, '_get_snapmirrors', mock.Mock(return_value=api_response)) mock_sr = self.mock_object(self.client, 'send_request') self.client._update_snapmirror(fake.SM_SOURCE_PATH, fake.SM_DEST_PATH, fake.SM_SOURCE_VSERVER, fake.SM_DEST_VSERVER, fake.SM_SOURCE_VOLUME, fake.SM_DEST_VOLUME) mock_sr.assert_called_once() mock_snapmirror.assert_called_once_with( source_path=fake.SM_SOURCE_PATH, dest_path=fake.SM_DEST_PATH, source_vserver=fake.SM_SOURCE_VSERVER, source_volume=fake.SM_SOURCE_VOLUME, dest_vserver=fake.SM_DEST_VSERVER, dest_volume=fake.SM_DEST_VOLUME, enable_tunneling=None, list_destinations_only=None) def test_get_cluster_name(self): """Get all available cluster nodes.""" return_value = fake.FAKE_GET_CLUSTER_NODE_VERSION_REST self.mock_object(self.client, 'send_request', mock.Mock(return_value=return_value)) test_result = self.client.get_cluster_name() self.client.send_request.assert_called_once_with( '/cluster', 'get', enable_tunneling=False ) expected_result = return_value.get('name') self.assertEqual(test_result, expected_result) @ddt.data(True, False) def test_check_volume_clone_split_completed(self, clone): mock__get_volume_by_args = self.mock_object( self.client, '_get_volume_by_args', mock.Mock(return_value={'clone': {'is_flexclone': clone}})) res = self.client.check_volume_clone_split_completed( fake.VOLUME_NAMES[0]) mock__get_volume_by_args.assert_called_once_with( vol_name=fake.VOLUME_NAMES[0], fields='clone.is_flexclone') self.assertEqual(not clone, res) def test_rehost_volume(self): self.mock_object(self.client, 'send_request') self.client.rehost_volume("fake_vol", "fake_svm", "fake_svm_2") body = { "vserver": "fake_svm", "volume": "fake_vol", "destination_vserver": "fake_svm_2" } self.client.send_request.assert_called_once_with( "/private/cli/volume/rehost", 'post', body=body) def test_get_net_options(self): res = self.client.get_net_options() self.assertTrue(res['ipv6-enabled']) def test_set_qos_adaptive_policy_group_for_volume(self): volume = fake.VOLUME_ITEM_SIMPLE_RESPONSE_REST mock_get_volume = self.mock_object( self.client, '_get_volume_by_args', mock.Mock(return_value=volume)) mock_send_request = self.mock_object( self.client, 'send_request') self.client.set_qos_adaptive_policy_group_for_volume( volume['name'], fake.QOS_POLICY_GROUP_NAME) mock_get_volume.assert_called_once_with(vol_name=volume['name']) body = {'qos.policy.name': fake.QOS_POLICY_GROUP_NAME} mock_send_request.assert_called_once_with( f'/storage/volumes/{volume["uuid"]}', 'patch', body=body) def test__list_vservers(self): api_response = fake.VSERVER_DATA_LIST_RESPONSE_REST self.mock_object(self.client, 'send_request', mock.Mock(return_value=api_response)) result = self.client._list_vservers() query = { 'fields': 'name', } self.client.send_request.assert_has_calls([ mock.call('/svm/svms', 'get', query=query, enable_tunneling=False)]) self.assertListEqual( [fake.VSERVER_NAME, fake.VSERVER_NAME_2], result) def test_list_vservers_not_found(self): api_response = fake.NO_RECORDS_RESPONSE_REST self.mock_object(self.client, 'send_request', mock.Mock(return_value=api_response)) result = self.client._list_vservers() self.assertListEqual([], result) def test_get_ems_log_destination_vserver(self): mock_list_vservers = self.mock_object( self.client, '_list_vservers', mock.Mock(return_value=[fake.VSERVER_NAME])) result = self.client._get_ems_log_destination_vserver() mock_list_vservers.assert_called_once_with() self.assertEqual(fake.VSERVER_NAME, result) def test_get_ems_log_destination_vserver_not_found(self): mock_list_vservers = self.mock_object( self.client, '_list_vservers', mock.Mock(return_value=[])) self.assertRaises(exception.NotFound, self.client._get_ems_log_destination_vserver) mock_list_vservers.assert_called_once_with() def test_send_ems_log_message(self): message_dict = { 'computer-name': '25-dev-vm', 'event-source': 'Cinder driver NetApp_iSCSI_Cluster_direct', 'app-version': '20.1.0.dev|vendor|Linux-5.4.0-120-generic-x86_64', 'category': 'provisioning', 'log-level': '5', 'auto-support': 'false', 'event-id': '1', 'event-description': '{"pools": {"vserver": "vserver_name",' + '"aggregates": [], "flexvols": ["flexvol_01"]}}' } body = { 'computer_name': message_dict['computer-name'], 'event_source': message_dict['event-source'], 'app_version': message_dict['app-version'], 'category': message_dict['category'], 'severity': 'notice', 'autosupport_required': message_dict['auto-support'] == 'true', 'event_id': message_dict['event-id'], 'event_description': message_dict['event-description'], } self.mock_object(self.client, '_get_ems_log_destination_vserver', mock.Mock(return_value='vserver_name')) self.mock_object(self.client, 'send_request') self.client.send_ems_log_message(message_dict) self.client.send_request.assert_called_once_with( '/support/ems/application-logs', 'post', body=body) @ddt.data('cp_phase_times', 'domain_busy') def test_get_performance_counter_info(self, counter_name): response1 = fake.PERF_COUNTER_LIST_INFO_WAFL_RESPONSE_REST response2 = fake.PERF_COUNTER_TABLE_ROWS_WAFL object_name = 'wafl' mock_send_request = self.mock_object( self.client, 'send_request', mock.Mock(side_effect=[response1, response2])) result = self.client.get_performance_counter_info(object_name, counter_name) expected = { 'name': 'cp_phase_times', 'base-counter': 'total_cp_msecs', 'labels': fake.PERF_COUNTER_TOTAL_CP_MSECS_LABELS_RESULT, } query1 = { 'counter_schemas.name': counter_name, 'fields': 'counter_schemas.*' } query2 = { 'counters.name': counter_name, 'fields': 'counters.*' } if counter_name == 'domain_busy': expected['name'] = 'domain_busy' expected['labels'] = ( fake.PERF_COUNTER_TOTAL_CP_MSECS_LABELS_REST) query1['counter_schemas.name'] = 'domain_busy_percent' query2['counters.name'] = 'domain_busy_percent' self.assertEqual(expected, result) mock_send_request.assert_has_calls([ mock.call(f'/cluster/counter/tables/{object_name}', 'get', query=query1), mock.call(f'/cluster/counter/tables/{object_name}/rows', 'get', query=query2, enable_tunneling=False), ]) def test_get_performance_counter_info_not_found_rows(self): response1 = fake.PERF_COUNTER_LIST_INFO_WAFL_RESPONSE_REST response2 = fake.NO_RECORDS_RESPONSE_REST object_name = 'wafl' counter_name = 'cp_phase_times' self.mock_object( self.client, 'send_request', mock.Mock(side_effect=[response1, response2])) result = self.client.get_performance_counter_info(object_name, counter_name) expected = { 'name': 'cp_phase_times', 'base-counter': 'total_cp_msecs', 'labels': [], } self.assertEqual(expected, result) def test_get_performance_instance_uuids(self): response = fake.PERF_COUNTER_TABLE_ROWS_WAFL mock_send_request = self.mock_object( self.client, 'send_request', mock.Mock(return_value=response)) object_name = 'wafl' result = self.client.get_performance_instance_uuids( object_name, fake.NODE_NAME) expected = [fake.NODE_NAME + ':wafl'] self.assertEqual(expected, result) query = { 'id': fake.NODE_NAME + ':*', } mock_send_request.assert_called_once_with( f'/cluster/counter/tables/{object_name}/rows', 'get', query=query, enable_tunneling=False) def test_get_performance_counters(self): response = fake.PERF_GET_INSTANCES_PROCESSOR_RESPONSE_REST mock_send_request = self.mock_object( self.client, 'send_request', mock.Mock(return_value=response)) instance_uuids = [ fake.NODE_NAME + ':processor0', fake.NODE_NAME + ':processor1', ] object_name = 'processor' counter_names = ['domain_busy', 'processor_elapsed_time'] rest_counter_names = ['domain_busy_percent', 'elapsed_time'] result = self.client.get_performance_counters(object_name, instance_uuids, counter_names) expected = fake.PERF_COUNTERS_PROCESSOR_EXPECTED self.assertEqual(expected, result) query = { 'id': '|'.join(instance_uuids), 'counters.name': '|'.join(rest_counter_names), 'fields': 'id,counter_table.name,counters.*', } mock_send_request.assert_called_once_with( f'/cluster/counter/tables/{object_name}/rows', 'get', query=query) def test__get_deleted_nfs_export_policies(self): api_response = fake.DELETED_EXPORT_POLICY_GET_ITER_RESPONSE_REST self.mock_object(self.client, 'send_request', mock.Mock(return_value=api_response)) result = self.client._get_deleted_nfs_export_policies() query = { 'name': 'deleted_manila_*', 'fields': 'name,svm.name', } self.assertSequenceEqual(fake.DELETED_EXPORT_POLICIES, result) self.client.send_request.assert_has_calls([ mock.call('/protocols/nfs/export-policies', 'get', query=query)]) def test_prune_deleted_nfs_export_policies(self): self.mock_object(self.client, '_get_deleted_nfs_export_policies', mock.Mock(return_value=fake.DELETED_EXPORT_POLICIES)) self.mock_object(self.client, 'delete_nfs_export_policy') self.client.prune_deleted_nfs_export_policies() self.assertTrue(self.client.delete_nfs_export_policy.called) self.client.delete_nfs_export_policy.assert_has_calls([ mock.call(fake.DELETED_EXPORT_POLICIES[fake.VSERVER_NAME][0]), mock.call(fake.DELETED_EXPORT_POLICIES[fake.VSERVER_NAME][1]), mock.call(fake.DELETED_EXPORT_POLICIES[fake.VSERVER_NAME_2][0]), ]) def test_prune_deleted_nfs_export_policies_api_error(self): self.mock_object(self.client, '_get_deleted_nfs_export_policies', mock.Mock(return_value=fake.DELETED_EXPORT_POLICIES)) self.mock_object(self.client, 'delete_nfs_export_policy', self._mock_api_error()) self.client.prune_deleted_nfs_export_policies() def test__get_security_key_manager_nve_support_enabled(self): api_response = fake.SECUTITY_KEY_MANAGER_SUPPORT_RESPONSE_TRUE_REST self.mock_object(self.client, 'send_request', mock.Mock(return_value=api_response)) result = self.client._get_security_key_manager_nve_support() self.assertTrue(result) query = {'fields': 'volume_encryption.*'} self.client.send_request.assert_has_calls([ mock.call('/security/key-managers', 'get', query=query)]) def test__get_security_key_manager_nve_support_disabled(self): api_response = fake.SECUTITY_KEY_MANAGER_SUPPORT_RESPONSE_FALSE_REST self.mock_object(self.client, 'send_request', mock.Mock(return_value=api_response)) result = self.client._get_security_key_manager_nve_support() self.assertFalse(result) query = {'fields': 'volume_encryption.*'} self.client.send_request.assert_has_calls([ mock.call('/security/key-managers', 'get', query=query)]) def test__get_security_key_manager_nve_support_no_records(self): self.mock_object(self.client, 'send_request', mock.Mock(return_value=fake.NO_RECORDS_RESPONSE_REST)) result = self.client._get_security_key_manager_nve_support() self.assertFalse(result) query = {'fields': 'volume_encryption.*'} self.client.send_request.assert_has_calls([ mock.call('/security/key-managers', 'get', query=query)]) def test__get_security_key_manager_nve_support_no_license(self): self.mock_object(self.client, 'send_request', self._mock_api_error()) result = self.client._get_security_key_manager_nve_support() self.assertFalse(result) query = {'fields': 'volume_encryption.*'} self.client.send_request.assert_has_calls([ mock.call('/security/key-managers', 'get', query=query)]) def test_get_nfs_config_default(self): api_response = fake.NFS_CONFIG_DEFAULT_RESULT_REST self.mock_object(self.client, 'send_request', mock.Mock(return_value=api_response)) result = self.client.get_nfs_config_default(['tcp-max-xfer-size', 'udp-max-xfer-size']) expected = { 'tcp-max-xfer-size': '65536', 'udp-max-xfer-size': '32768', } self.assertEqual(expected, result) query = {'fields': 'transport.*'} self.client.send_request.assert_called_once_with( '/protocols/nfs/services/', 'get', query=query) def test_get_kerberos_service_principal_name(self): spn = self.client._get_kerberos_service_principal_name( fake.KERBEROS_SECURITY_SERVICE, fake.VSERVER_NAME ) self.assertEqual(fake.KERBEROS_SERVICE_PRINCIPAL_NAME, spn) def test_get_cifs_server_name(self): expected_return = 'FAKE-VSE-SERVER' cifs_server = self.client._get_cifs_server_name(fake.VSERVER_NAME) self.assertEqual(expected_return, cifs_server) def test_list_network_interfaces(self): api_response = fake.GENERIC_NETWORK_INTERFACES_GET_REPONSE expected_result = [fake.LIF_NAME] self.mock_object(self.client, 'send_request', mock.Mock(return_value=api_response)) self.mock_object(self.client, '_has_records', mock.Mock(return_value=True)) fake_query = { 'fields': 'name' } result = self.client.list_network_interfaces() self.client.send_request.assert_has_calls([ mock.call('/network/ip/interfaces', 'get', query=fake_query)]) self.assertEqual(expected_result, result) def test_create_kerberos_realm(self): fake_security = fake.KERBEROS_SECURITY_SERVICE fake_body = { 'comment': '', 'kdc.ip': fake_security['server'], 'kdc.port': '88', 'kdc.vendor': 'other', 'name': fake_security['domain'].upper(), } self.mock_object(self.client, 'send_request') self.client.create_kerberos_realm(fake.KERBEROS_SECURITY_SERVICE) self.client.send_request.assert_called_once_with( '/protocols/nfs/kerberos/realms', 'post', body=fake_body) def test_configure_kerberos(self): fake_api_response = fake.NFS_LIFS_REST fake_security = fake.KERBEROS_SECURITY_SERVICE fake_keberos_name = fake.KERBEROS_SERVICE_PRINCIPAL_NAME fake_body = { 'password': fake_security['password'], 'user': fake_security['user'], 'interface.name': fake.LIF_NAME, 'enabled': True, 'spn': fake_keberos_name } self.mock_object(self.client, 'configure_dns') self_get_kerberos = self.mock_object( self.client, '_get_kerberos_service_principal_name', mock.Mock(return_value=fake_keberos_name)) self.mock_object(self.client, 'get_network_interfaces', mock.Mock(return_value=fake_api_response)) self.mock_object(self.client, 'send_request') self.client.configure_kerberos(fake.KERBEROS_SECURITY_SERVICE, fake.VSERVER_NAME) self.client.configure_dns.assert_called_once_with( fake.KERBEROS_SECURITY_SERVICE, vserver_name=fake.VSERVER_NAME) self_get_kerberos.assert_called_once_with( fake.KERBEROS_SECURITY_SERVICE, fake.VSERVER_NAME) self.client.get_network_interfaces.assert_called_once_with() self.client.send_request.assert_has_calls([ mock.call('/protocols/nfs/kerberos/interfaces/fake_uuid_1', 'patch', body=fake_body), mock.call('/protocols/nfs/kerberos/interfaces/fake_uuid_2', 'patch', body=fake_body), mock.call('/protocols/nfs/kerberos/interfaces/fake_uuid_3', 'patch', body=fake_body) ]) @ddt.data(fake.CIFS_SECURITY_SERVICE, fake.CIFS_SECURITY_SERVICE_3) def test_configure_active_directory(self, security_service): fake_security = copy.deepcopy(security_service) fake_body1 = { 'ad_domain.user': fake_security['user'], 'ad_domain.password': fake_security['password'], 'force': 'true', 'name': 'FAKE-VSE-SERVER', 'ad_domain.fqdn': fake_security['domain'], } self.mock_object(self.client, 'configure_dns') self.mock_object(self.client, 'set_preferred_dc') self.mock_object(self.client, 'configure_cifs_aes_encryption') self.mock_object(self.client, '_get_cifs_server_name', mock.Mock(return_value='FAKE-VSE-SERVER')) self.mock_object(self.client, 'send_request') self.client.configure_active_directory(fake_security, fake.VSERVER_NAME, False) self.client.configure_dns.assert_called_once_with( fake_security, vserver_name=fake.VSERVER_NAME) self.client.set_preferred_dc.assert_called_once_with( fake_security, fake.VSERVER_NAME) self.client.configure_cifs_aes_encryption.assert_called_once_with( fake.VSERVER_NAME, False) self.client._get_cifs_server_name.assert_called_once_with( fake.VSERVER_NAME) if fake_security['ou'] is not None: fake_body1['ad_domain.organizational_unit'] = fake_security['ou'] fake_body2 = fake_body1 self.client.send_request.assert_called_once_with( '/protocols/cifs/services', 'post', body=fake_body2) else: self.client.send_request.assert_called_once_with( '/protocols/cifs/services', 'post', body=fake_body1) def test__create_ldap_client_ad(self): mock_dns = self.mock_object(self.client, 'configure_dns') mock_sr = self.mock_object(self.client, 'send_request') security_service = { 'domain': 'fake_domain', 'user': 'fake_user', 'ou': 'fake_ou', 'dns_ip': 'fake_ip', 'password': 'fake_password' } ad_domain = security_service.get('domain') body = { 'port': '389', 'schema': 'MS-AD-BIS', 'bind_dn': (security_service.get('user') + '@' + ad_domain), 'bind_password': security_service.get('password'), 'svm.name': fake.VSERVER_NAME, 'base_dn': security_service.get('ou'), 'ad_domain': security_service.get('domain'), } self.client._create_ldap_client(security_service, vserver_name=fake.VSERVER_NAME) mock_dns.assert_called_once_with(security_service) mock_sr.assert_called_once_with('/name-services/ldap', 'post', body=body) def test__create_ldap_client_linux(self): mock_dns = self.mock_object(self.client, 'configure_dns') mock_sr = self.mock_object(self.client, 'send_request') security_service = { 'server': 'fake_server', 'user': 'fake_user', 'ou': 'fake_ou', 'dns_ip': 'fake_ip' } body = { 'port': '389', 'schema': 'RFC-2307', 'bind_dn': security_service.get('user'), 'bind_password': security_service.get('password'), 'svm.name': fake.VSERVER_NAME, 'base_dn': security_service.get('ou'), 'servers': [security_service.get('server')] } self.client._create_ldap_client(security_service, vserver_name=fake.VSERVER_NAME) mock_dns.assert_called_once_with(security_service) mock_sr.assert_called_once_with('/name-services/ldap', 'post', body=body) def test_configure_dns_already_present(self): dns_config = { 'domains': [fake.KERBEROS_SECURITY_SERVICE['domain']], 'dns-ips': [fake.KERBEROS_SECURITY_SERVICE['dns_ip']], } self.mock_object(self.client, 'get_dns_config', mock.Mock(return_value=dns_config)) self.mock_object(self.client, 'send_request', mock.Mock(return_value=fake.FAKE_VOL_MOVE_STATUS)) security_service = copy.deepcopy(fake.KERBEROS_SECURITY_SERVICE) self.client.configure_dns(security_service) net_dns_create_args = { 'domains': [security_service['domain']], 'servers': [security_service['dns_ip']], } uuid = fake.FAKE_VOL_MOVE_STATUS['records'][0]['uuid'] self.client.send_request.assert_has_calls([ mock.call('/svm/svms', 'get', query={'name': None, 'fields': 'uuid'}), mock.call(f'/name-services/dns/{uuid}', 'patch', body=net_dns_create_args)]) def test_configure_dns_for_active_directory(self): self.mock_object(self.client, 'send_request', mock.Mock(return_value=fake.FAKE_VOL_MOVE_STATUS)) self.mock_object(self.client, 'get_dns_config', mock.Mock(return_value={})) security_service = copy.deepcopy(fake.CIFS_SECURITY_SERVICE) self.client.configure_dns(security_service) net_dns_create_args = { 'domains': [security_service['domain']], 'servers': [security_service['dns_ip']], } self.client.send_request.assert_has_calls([ mock.call('/svm/svms', 'get', query={'name': None, 'fields': 'uuid'}), mock.call('/name-services/dns', 'post', body=net_dns_create_args)]) def test_configure_dns_multiple_dns_ip(self): self.mock_object(self.client, 'send_request', mock.Mock(return_value=fake.FAKE_VOL_MOVE_STATUS)) self.mock_object(self.client, 'get_dns_config', mock.Mock(return_value={})) mock_dns_ips = '10.0.0.5, 10.0.0.6, 10.0.0.7' security_service = copy.deepcopy(fake.CIFS_SECURITY_SERVICE) security_service['dns_ip'] = mock_dns_ips args_dns = {'domains': [security_service['domain']], 'servers': ['10.0.0.5', '10.0.0.6', '10.0.0.7']} self.client.configure_dns(security_service) self.client.send_request.assert_has_calls([ mock.call('/svm/svms', 'get', query={'name': None, 'fields': 'uuid'}), mock.call('/name-services/dns', 'post', body=args_dns)]) def test_configure_dns_for_kerberos(self): self.mock_object(self.client, 'send_request', mock.Mock(return_value=fake.FAKE_VOL_MOVE_STATUS)) self.mock_object(self.client, 'get_dns_config', mock.Mock(return_value={})) security_service = copy.deepcopy(fake.KERBEROS_SECURITY_SERVICE) self.client.configure_dns(security_service) net_dns_create_args = { 'domains': [security_service['domain']], 'servers': [security_service['dns_ip']], } self.client.send_request.assert_has_calls([ mock.call('/svm/svms', 'get', query={'name': None, 'fields': 'uuid'}), mock.call('/name-services/dns', 'post', body=net_dns_create_args)]) def test_configure_dns_api_error(self): self.mock_object(self.client, 'send_request', self._mock_api_error()) self.mock_object(self.client, 'get_dns_config', mock.Mock(return_value={})) self.mock_object(self.client, '_get_unique_svm_by_name', mock.Mock(return_value={})) self.assertRaises(exception.NetAppException, self.client.configure_dns, copy.deepcopy(fake.KERBEROS_SECURITY_SERVICE)) def test_get_dns_config_no_response(self): self.mock_object(self.client, 'send_request', mock.Mock(side_effect=netapp_api.api.NaApiError)) self.mock_object(self.client, '_get_unique_svm_by_name', mock.Mock(return_value={})) self.assertRaises(exception.NetAppException, self.client.get_dns_config) def test_get_dns_config(self): api_response = fake.DNS_REST_RESPONSE self.mock_object(self.client, 'send_request', mock.Mock(return_value=api_response)) fake_uuid = fake.FAKE_VOL_MOVE_STATUS['records'][0]['svm']['uuid'] self.mock_object(self.client, '_get_unique_svm_by_name', mock.Mock(return_value=fake_uuid)) result = self.client.get_dns_config() expected_result = { 'dns-state': 'true', 'domains': ['example.com', 'example2.example3.com'], 'dns-ips': ['10.224.65.20', '2001:db08:a0b:12f0::1'] } self.assertEqual(expected_result, result) self.client.send_request.assert_called_once_with( f'/name-services/dns/{fake_uuid}', 'get') @ddt.data(fake.LDAP_AD_SECURITY_SERVICE, fake.CIFS_SECURITY_SERVICE_3, fake.KERBEROS_SECURITY_SERVICE) def test_setup_security_services(self, security_service): fake_response = fake.FAKE_GET_CLUSTER_NODE_VERSION_REST mock_request = self.mock_object(self.client, 'send_request', mock.Mock(return_value=fake_response)) self.mock_object(self.client, 'configure_ldap') self.mock_object(self.client, 'configure_active_directory') self.mock_object(self.client, 'configure_cifs_options') self.mock_object(self.client, 'create_kerberos_realm') self.mock_object(self.client, 'configure_kerberos') ss_copy = copy.deepcopy(security_service) self.client.setup_security_services([ss_copy], self.client, 'fake_vservername', False) uuid = fake_response.get('records')[0].get('uuid') body = { 'nsswitch.namemap': ['ldap', 'files'], 'nsswitch.group': ['ldap', 'files'], 'nsswitch.netgroup': ['ldap', 'files'], 'nsswitch.passwd': ['ldap', 'files'], } mock_request.assert_has_calls([ mock.call('/svm/svms', 'get', query={'name': 'fake_vservername', 'fields': 'uuid'}), mock.call(f'/svm/svms/{uuid}', 'patch', body=body)]) def test_modify_ldap_ad(self): fake_svm_uuid = fake.FAKE_UUID mock_svm_uuid = self.mock_object(self.client, '_get_unique_svm_by_name', mock.Mock(return_value=fake_svm_uuid)) mock_sr = self.mock_object(self.client, 'send_request') security_service = { 'domain': 'fake_domain', 'user': 'fake_user', 'ou': 'fake_ou', 'dns_ip': 'fake_ip', 'password': 'fake_password' } ad_domain = security_service.get('domain') body = { 'port': '389', 'schema': 'MS-AD-BIS', 'bind_dn': (security_service.get('user') + '@' + ad_domain), 'bind_password': security_service.get('password'), 'base_dn': security_service.get('ou'), 'ad_domain': security_service.get('domain'), } self.client.modify_ldap(security_service, None) mock_svm_uuid.assert_called_once_with(None) mock_sr.assert_called_once_with(f'/name-services/ldap/{fake_svm_uuid}', 'patch', body=body) def test_modify_ldap_linux(self): fake_svm_uuid = fake.FAKE_UUID mock_svm_uuid = self.mock_object(self.client, '_get_unique_svm_by_name', mock.Mock(return_value=fake_svm_uuid)) mock_sr = self.mock_object(self.client, 'send_request') security_service = { 'server': 'fake_server', 'user': 'fake_user', 'ou': 'fake_ou', 'dns_ip': 'fake_ip' } body = { 'port': '389', 'schema': 'RFC-2307', 'bind_dn': security_service.get('user'), 'bind_password': security_service.get('password'), 'base_dn': security_service.get('ou'), 'servers': [security_service.get('server')] } self.client.modify_ldap(security_service, None) mock_svm_uuid.assert_called_once_with(None) mock_sr.assert_called_once_with(f'/name-services/ldap/{fake_svm_uuid}', 'patch', body=body) def test_update_kerberos_realm(self): self.mock_object(self.client, '_get_unique_svm_by_name', mock.Mock(return_value=fake.FAKE_UUID)) fake_uuid = fake.FAKE_UUID self.mock_object(self.client, 'send_request') self.client.update_kerberos_realm(fake.KERBEROS_SECURITY_SERVICE) fake_domain = fake.KERBEROS_SECURITY_SERVICE['domain'] body = { 'kdc-ip': fake.KERBEROS_SECURITY_SERVICE['server'], } self.client.send_request.assert_has_calls([ mock.call( f'/protocols/nfs/kerberos/realms/{fake_uuid}/{fake_domain}', 'patch', body=body)]) def test__get_unique_svm_by_name(self): response = fake.SVMS_LIST_SIMPLE_RESPONSE_REST svm = fake.SVM_ITEM_SIMPLE_RESPONSE_REST['uuid'] fake_query = { 'name': fake.VSERVER_NAME, 'fields': 'uuid' } self.mock_object(self.client, 'send_request', mock.Mock(return_value=response)) result = self.client._get_unique_svm_by_name( fake.VSERVER_NAME) self.client.send_request.assert_called_once_with( '/svm/svms', 'get', query=fake_query) self.assertEqual(svm, result) def test_update_dns_configuration(self): dns_config = { 'domains': [fake.KERBEROS_SECURITY_SERVICE['domain']], 'dns-ips': [fake.KERBEROS_SECURITY_SERVICE['dns_ip']], } body = { 'domains': [fake.KERBEROS_SECURITY_SERVICE['domain']], 'servers': [fake.KERBEROS_SECURITY_SERVICE['dns_ip']] } fake_uuid = 'fake_uuid' self.mock_object(self.client, 'get_dns_config', mock.Mock(return_value=dns_config)) self.mock_object(self.client, '_get_unique_svm_by_name', mock.Mock(return_value=fake_uuid)) self.mock_object(self.client, 'send_request', mock.Mock(return_value=fake.FAKE_VOL_MOVE_STATUS)) self.client.configure_dns(fake.KERBEROS_SECURITY_SERVICE) body = { 'domains': [fake.KERBEROS_SECURITY_SERVICE['domain']], 'servers': [fake.KERBEROS_SECURITY_SERVICE['dns_ip']] } self.client.send_request.assert_called_once_with( f'/name-services/dns/{fake_uuid}', 'patch', body=body) def test_remove_preferred_dcs(self): svm_uuid = copy.deepcopy(fake.FAKE_UUID) fqdn = copy.deepcopy(fake.PREFERRED_DC_REST.get('fqdn')) server_ip = copy.deepcopy(fake.PREFERRED_DC_REST.get('server_ip')) fake_response = copy.deepcopy(fake.PREFERRED_DC_REST) fake_ss = copy.deepcopy(fake.LDAP_AD_SECURITY_SERVICE) self.mock_object(self.client, 'send_request', mock.Mock(return_value=fake_response)) self.client.remove_preferred_dcs(fake_ss, svm_uuid) query = { 'fqdn': fake.LDAP_AD_SECURITY_SERVICE.get('domain'), } self.client.send_request.assert_has_calls([ mock.call(f'/protocols/cifs/domains/{svm_uuid}/' f'preferred-domain-controllers/', 'get'), mock.call(f'/protocols/cifs/domains/{svm_uuid}/' f'preferred-domain-controllers/{fqdn}/{server_ip}', 'delete', query=query) ]) def test_remove_preferred_dcs_api_error(self): fake_response = copy.deepcopy(fake.PREFERRED_DC_REST) fake_ss = copy.deepcopy(fake.LDAP_AD_SECURITY_SERVICE) self.mock_object(self.client, 'send_request', mock.Mock(return_value=fake_response)) self.mock_object(self.client, 'send_request', mock.Mock(side_effect=netapp_api.api.NaApiError)) self.assertRaises(netapp_api.api.NaApiError, self.client.remove_preferred_dcs, fake_ss, fake.FAKE_UUID) def test_configure_cifs_aes_encryption_enable(self): self.mock_object(self.client, 'send_request') self.mock_object(self.client, '_get_unique_svm_by_name', mock.Mock(return_value=fake.FAKE_UUID)) self.client.configure_cifs_aes_encryption(fake.VSERVER_NAME, True) self.client._get_unique_svm_by_name.assert_called_once_with( fake.VSERVER_NAME) body = { 'security.advertised_kdc_encryptions': ['aes-128', 'aes-256'], } self.client.send_request.assert_called_once_with( f'/protocols/cifs/services/{fake.FAKE_UUID}', 'patch', body=body) def test_configure_cifs_aes_encryption_disable(self): self.mock_object(self.client, 'send_request') self.mock_object(self.client, '_get_unique_svm_by_name', mock.Mock(return_value=fake.FAKE_UUID)) self.client.configure_cifs_aes_encryption(fake.VSERVER_NAME, False) self.client._get_unique_svm_by_name.assert_called_once_with( fake.VSERVER_NAME) body = { 'security.advertised_kdc_encryptions': ['des', 'rc4'], } self.client.send_request.assert_called_once_with( f'/protocols/cifs/services/{fake.FAKE_UUID}', 'patch', body=body) def test_set_preferred_dc(self): fake_ss = copy.deepcopy(fake.LDAP_AD_SECURITY_SERVICE_WITH_SERVER) self.mock_object(self.client, 'send_request') self.mock_object(self.client, '_get_unique_svm_by_name', mock.Mock(return_value=fake.FAKE_UUID)) self.client.set_preferred_dc(fake_ss, fake.VSERVER_NAME) self.client._get_unique_svm_by_name.assert_called_once_with( fake.VSERVER_NAME) query = { 'fqdn': fake_ss['domain'], 'skip_config_validation': 'false', 'server_ip': ['10.10.10.1'] } self.client.send_request.assert_called_once_with( f'/protocols/cifs/domains/{fake.FAKE_UUID}' '/preferred-domain-controllers', 'post', query=query) @ddt.data(None, 'cluster_name') def test_create_vserver_peer(self, cluster_name): self.mock_object(self.client, 'send_request') self.client.create_vserver_peer(fake.VSERVER_NAME, fake.VSERVER_PEER_NAME, peer_cluster_name=cluster_name) body = { 'svm.name': fake.VSERVER_NAME, 'peer.svm.name': fake.VSERVER_PEER_NAME, 'applications': ['snapmirror'], } if cluster_name: body['peer.cluster.name'] = cluster_name self.client.send_request.assert_has_calls([ mock.call('/svm/peers', 'post', body=body, enable_tunneling=False)]) def test__get_svm_peer_uuid(self): response = { "records": [{ "uuid": "fake-vserver-uuid", "name": fake.VSERVER_NAME, "svm": { "name": fake.VSERVER_NAME, }, "peer": { "svm": { "name": fake.VSERVER_PEER_NAME, } } }], } expected_result = "fake-vserver-uuid" return_value = response['records'][0]['uuid'] self.mock_object(self.client, '_get_svm_peer_uuid', mock.Mock(return_value=return_value)) result = self.client._get_svm_peer_uuid( fake.VSERVER_NAME, fake.VSERVER_PEER_NAME) self.client._get_svm_peer_uuid.assert_called_once_with( fake.VSERVER_NAME, fake.VSERVER_PEER_NAME) self.assertEqual(expected_result, result) def test_accept_vserver_peer(self): fake_resp = { 'records': [{'uuid': 'fake-vserver-uuid'}], 'num_records': 1, } self.mock_object( self.client, 'send_request', mock.Mock(side_effect=[fake_resp, None])) self.client.accept_vserver_peer( fake.VSERVER_NAME, fake.VSERVER_PEER_NAME) body = {'state': 'peered'} uuid = "fake-vserver-uuid" self.client.send_request.assert_has_calls([ mock.call(f'/svm/peers/{uuid}', 'patch', body=body, enable_tunneling=False)]) def test_get_vserver_peers(self): self.mock_object(self.client, 'send_request', mock.Mock(return_value=fake.FAKE_PEER_GET_RESPONSE)) result = self.client.get_vserver_peers( vserver_name=fake.VSERVER_NAME, peer_vserver_name=fake.VSERVER_NAME_2) query = { 'name': fake.VSERVER_NAME_2, 'svm.name': fake.VSERVER_NAME } query['fields'] = 'uuid,svm.name,peer.svm.name,state,peer.cluster.name' self.client.send_request.assert_has_calls([ mock.call('/svm/peers', 'get', query=query)]) expected = [{ 'uuid': fake.FAKE_UUID, 'vserver': fake.VSERVER_NAME, 'peer-vserver': fake.VSERVER_NAME_2, 'peer-state': fake.VSERVER_PEER_STATE, 'peer-cluster': fake.CLUSTER_NAME }] self.assertEqual(expected, result) def test_get_vserver_peers_not_found(self): self.mock_object(self.client, 'send_request', mock.Mock(return_value=fake.NO_RECORDS_RESPONSE_REST)) result = self.client.get_vserver_peers( vserver_name=fake.VSERVER_NAME, peer_vserver_name=fake.VSERVER_NAME_2) self.assertEqual([], result) self.assertTrue(self.client.send_request.called) def test_delete_vserver_peer(self): self.mock_object(self.client, 'get_vserver_peers', mock.Mock(return_value=fake.FAKE_VSERVER_PEERS)) self.mock_object(self.client, 'send_request') self.client.delete_vserver_peer(fake.VSERVER_NAME, fake.VSERVER_PEER_NAME) self.client.get_vserver_peers.assert_called_once_with( fake.VSERVER_NAME, fake.VSERVER_PEER_NAME) self.client.send_request.assert_called_once_with( '/svm/peers/fake_uuid', 'delete', enable_tunneling=False) def test_update_showmount(self): query = { 'name': fake.VSERVER_NAME, 'fields': 'uuid' } response_svm = fake.SVMS_LIST_SIMPLE_RESPONSE_REST self.client.vserver = fake.VSERVER_NAME self.mock_object(self.client, 'send_request', mock.Mock(side_effect=[response_svm, None])) fake_showmount = 'true' self.client.update_showmount(fake_showmount) svm_id = response_svm.get('records')[0]['uuid'] body = { 'showmount_enabled': fake_showmount, } self.client.send_request.assert_has_calls([ mock.call('/svm/svms', 'get', query=query), mock.call(f'/protocols/nfs/services/{svm_id}', 'patch', body=body) ]) @ddt.data({'tcp-max-xfer-size': 10000}, {}, None) def test_enable_nfs(self, nfs_config): self.mock_object(self.client, '_get_unique_svm_by_name', mock.Mock(return_value=fake.FAKE_UUID)) self.mock_object(self.client, 'send_request') self.mock_object(self.client, '_enable_nfs_protocols') self.mock_object(self.client, '_configure_nfs') self.mock_object(self.client, '_create_default_nfs_export_rules') self.mock_object(self.client, '_enable_nfs_protocols') self.client.enable_nfs(fake.NFS_VERSIONS, nfs_config) body = { 'svm.uuid': fake.FAKE_UUID, 'enabled': 'true' } self.client.send_request.assert_called_once_with( '/protocols/nfs/services/', 'post', body=body) self.client._get_unique_svm_by_name.assert_called_once_with() self.client._enable_nfs_protocols.assert_called_once_with( fake.NFS_VERSIONS, fake.FAKE_UUID) if nfs_config: self.client._configure_nfs.assert_called_once_with(nfs_config, fake.FAKE_UUID) else: self.client._configure_nfs.assert_not_called() self.client._create_default_nfs_export_rules.assert_called_once_with() @ddt.data((True, True, True), (True, False, False), (False, True, True)) @ddt.unpack def test_enable_nfs_protocols(self, v3, v40, v41): self.mock_object(self.client, 'send_request') versions = [] if v3: versions.append('nfs3') if v40: versions.append('nfs4.0') if v41: versions.append('nfs4.1') self.client._enable_nfs_protocols(versions, fake.FAKE_UUID) body = { 'protocol.v3_enabled': 'true' if v3 else 'false', 'protocol.v40_enabled': 'true' if v40 else 'false', 'protocol.v41_enabled': 'true' if v41 else 'false', 'showmount_enabled': 'true', 'windows.v3_ms_dos_client_enabled': 'true', 'protocol.v3_features.connection_drop': 'false', 'protocol.v3_features.ejukebox_enabled': 'false', } self.client.send_request.assert_called_once_with( f'/protocols/nfs/services/{fake.FAKE_UUID}', 'patch', body=body) def test_configure_nfs(self): self.mock_object(self.client, 'send_request') fake_nfs = { 'tcp-max-xfer-size': 10000, } self.client._configure_nfs(fake_nfs, fake.FAKE_UUID) body = { 'transport.tcp_max_transfer_size': 10000 } self.client.send_request.assert_called_once_with( f'/protocols/nfs/services/{fake.FAKE_UUID}', 'patch', body=body) def test__create_default_nfs_export_rules(self): class CopyingMock(mock.Mock): def __call__(self, *args, **kwargs): args = copy.deepcopy(args) kwargs = copy.deepcopy(kwargs) return super(CopyingMock, self).__call__(*args, **kwargs) self.mock_object(self.client, 'send_request', CopyingMock()) fake_uuid = fake.FAKE_UUID mock_id = self.mock_object(self.client, 'get_unique_export_policy_id', mock.Mock(return_value=fake_uuid)) self.client._create_default_nfs_export_rules() body = { 'clients': [{ 'match': '0.0.0.0/0' }], 'ro_rule': [ 'any', ], 'rw_rule': [ 'never' ], } body2 = body.copy() body2['clients'] = [{ 'match': '::/0' }] mock_id.assert_called_once_with('default') self.client.send_request.assert_has_calls([ mock.call(f'/protocols/nfs/export-policies/{fake_uuid}/rules', "post", body=body), mock.call(f'/protocols/nfs/export-policies/{fake_uuid}/rules', "post", body=body2)]) def test_get_node_data_ports(self): self.mock_object( self.client, 'send_request', mock.Mock( side_effect=[fake.REST_ETHERNET_PORTS, fake.REST_DATA_INTERFACES])) self.mock_object( self.client, '_sort_data_ports_by_speed', mock.Mock( return_value=fake.REST_SPEED_SORTED_PORTS)) test_result = self.client.get_node_data_ports(fake.NODE_NAME) fake_query = { 'node.name': fake.NODE_NAME, 'state': 'up', 'type': 'physical', 'broadcast_domain.name': 'Default', 'fields': 'node.name,speed,name' } query_interfaces = { 'service_policy.name': '!default-management', 'services': 'data_*', 'fields': 'location.port.name' } self.client.send_request.assert_has_calls([ mock.call('/network/ethernet/ports', 'get', query=fake_query), mock.call('/network/ip/interfaces', 'get', query=query_interfaces, enable_tunneling=False), ]) self.client._sort_data_ports_by_speed.assert_called_once_with( fake.REST_SPEED_NOT_SORTED_PORTS) self.assertEqual(fake.REST_SPEED_SORTED_PORTS, test_result) def test_list_node_data_ports(self): expected_resulted = ['e0d', 'e0c', 'e0b'] mock_ports = ( self.mock_object(self.client, 'get_node_data_ports', mock.Mock( return_value=fake.REST_SPEED_SORTED_PORTS))) test_result = self.client.list_node_data_ports(fake.NODE_NAME) mock_ports.assert_called_once_with(fake.NODE_NAME) self.assertEqual(test_result, expected_resulted) def test_create_ipspace(self): fake_body = {'name': fake.IPSPACE_NAME} self.mock_object(self.client, 'send_request') self.client.create_ipspace(fake.IPSPACE_NAME) self.client.send_request.assert_called_once_with( '/network/ipspaces', 'post', body=fake_body) def test_get_ipspace_name_for_vlan_port(self): fake_query = { 'node.name': fake.NODE_NAME, 'name': fake.VLAN_PORT, 'fields': 'broadcast_domain.ipspace.name', } expected_result = "Default" self.mock_object( self.client, 'send_request', mock.Mock( return_value=fake.REST_ETHERNET_PORTS)) test_result = self.client.get_ipspace_name_for_vlan_port( fake.NODE_NAME, fake.PORT, fake.VLAN) self.client.send_request.assert_called_once_with( '/network/ethernet/ports/', 'get', query=fake_query) self.assertEqual(test_result, expected_result) def test__create_broadcast_domain(self): fake_body = { 'ipspace.name': fake.IPSPACE_NAME, 'name': fake.BROADCAST_DOMAIN, 'mtu': fake.MTU, } self.mock_object(self.client, 'send_request') self.client._create_broadcast_domain(fake.BROADCAST_DOMAIN, fake.IPSPACE_NAME, fake.MTU) self.client.send_request.assert_called_once_with( '/network/ethernet/broadcast-domains', 'post', body=fake_body) def test_ensure_broadcast_domain_for_port_domain_match(self): port_info = { 'ipspace': fake.IPSPACE_NAME, 'broadcast-domain': fake.BROADCAST_DOMAIN, } self.mock_object(self.client, '_get_broadcast_domain_for_port', mock.Mock(return_value=port_info)) self.mock_object(self.client, '_broadcast_domain_exists', mock.Mock(return_value=True)) self.mock_object(self.client, '_create_broadcast_domain') self.mock_object(self.client, '_modify_broadcast_domain') self.mock_object(self.client, '_add_port_to_broadcast_domain') self.client._ensure_broadcast_domain_for_port( fake.NODE_NAME, fake.PORT, fake.MTU, ipspace=fake.IPSPACE_NAME) self.client._get_broadcast_domain_for_port.assert_called_once_with( fake.NODE_NAME, fake.PORT) self.client._modify_broadcast_domain.assert_called_once_with( fake.BROADCAST_DOMAIN, fake.IPSPACE_NAME, fake.MTU) self.assertFalse(self.client._broadcast_domain_exists.called) self.assertFalse(self.client._create_broadcast_domain.called) self.assertFalse(self.client._add_port_to_broadcast_domain.called) @ddt.data(fake.IPSPACE_NAME, client_cmode.DEFAULT_IPSPACE) def test_ensure_broadcast_domain_for_port_other_domain(self, ipspace): port_info = { 'ipspace': ipspace, 'broadcast-domain': 'other_domain', } self.mock_object(self.client, '_get_broadcast_domain_for_port', mock.Mock(return_value=port_info)) self.mock_object(self.client, '_broadcast_domain_exists', mock.Mock(return_value=True)) self.mock_object(self.client, '_create_broadcast_domain') self.mock_object(self.client, '_modify_broadcast_domain') self.mock_object(self.client, '_add_port_to_broadcast_domain') self.client._ensure_broadcast_domain_for_port( fake.NODE_NAME, fake.PORT, ipspace=fake.IPSPACE_NAME, mtu=fake.MTU) self.client._get_broadcast_domain_for_port.assert_called_once_with( fake.NODE_NAME, fake.PORT) self.client._broadcast_domain_exists.assert_called_once_with( fake.BROADCAST_DOMAIN, fake.IPSPACE_NAME) self.assertFalse(self.client._create_broadcast_domain.called) self.client._modify_broadcast_domain.assert_called_once_with( fake.BROADCAST_DOMAIN, fake.IPSPACE_NAME, fake.MTU) self.client._add_port_to_broadcast_domain.assert_called_once_with( fake.NODE_NAME, fake.PORT, fake.BROADCAST_DOMAIN, fake.IPSPACE_NAME) def test_ensure_broadcast_domain_for_port_no_domain(self): port_info = { 'ipspace': fake.IPSPACE_NAME, 'broadcast-domain': None, } self.mock_object(self.client, '_get_broadcast_domain_for_port', mock.Mock(return_value=port_info)) self.mock_object(self.client, '_broadcast_domain_exists', mock.Mock(return_value=False)) self.mock_object(self.client, '_create_broadcast_domain') self.mock_object(self.client, '_modify_broadcast_domain') self.mock_object(self.client, '_add_port_to_broadcast_domain') self.client._ensure_broadcast_domain_for_port( fake.NODE_NAME, fake.PORT, ipspace=fake.IPSPACE_NAME, mtu=fake.MTU) self.client._get_broadcast_domain_for_port.assert_called_once_with( fake.NODE_NAME, fake.PORT) self.client._broadcast_domain_exists.assert_called_once_with( fake.BROADCAST_DOMAIN, fake.IPSPACE_NAME) self.client._create_broadcast_domain.assert_called_once_with( fake.BROADCAST_DOMAIN, fake.IPSPACE_NAME, fake.MTU) self.assertFalse(self.client._modify_broadcast_domain.called) self.client._add_port_to_broadcast_domain.assert_called_once_with( fake.NODE_NAME, fake.PORT, fake.BROADCAST_DOMAIN, fake.IPSPACE_NAME) def test__add_port_to_broadcast_domain(self): query = { 'name': fake.PORT, 'node.name': fake.NODE_NAME, } body = { 'broadcast_domain.ipspace.name': fake.IPSPACE_NAME, 'broadcast_domain.name': fake.BROADCAST_DOMAIN, } self.mock_object(self.client, 'send_request') self.client._add_port_to_broadcast_domain(fake.NODE_NAME, fake.PORT, fake.BROADCAST_DOMAIN, fake.IPSPACE_NAME) self.client.send_request.assert_called_once_with( '/network/ethernet/ports/', 'patch', query=query, body=body) def test__add_port_to_broadcast_domain_exists(self): query = { 'name': fake.PORT, 'node.name': fake.NODE_NAME, } body = { 'broadcast_domain.ipspace.name': fake.IPSPACE_NAME, 'broadcast_domain.name': fake.BROADCAST_DOMAIN, } self.mock_object( self.client, 'send_request', self._mock_api_error( code=netapp_api.EREST_FAIL_ADD_PORT_BROADCAST)) self.client._add_port_to_broadcast_domain(fake.NODE_NAME, fake.PORT, fake.BROADCAST_DOMAIN, fake.IPSPACE_NAME) self.client.send_request.assert_called_once_with( '/network/ethernet/ports/', 'patch', query=query, body=body) self.assertEqual(1, client_cmode_rest.LOG.debug.call_count) def test__add_port_to_broadcast_domain_exception(self): self.mock_object(self.client, 'send_request', self._mock_api_error()) self.assertRaises( exception.NetAppException, self.client._add_port_to_broadcast_domain, fake.NODE_NAME, fake.PORT, fake.BROADCAST_DOMAIN, fake.IPSPACE_NAME) def test_rename_vserver(self): svm_uuid = fake.SVM_ITEM_SIMPLE_RESPONSE_REST["uuid"] body = { 'name': fake.VSERVER_NAME_2 } self.mock_object(self.client, '_get_unique_svm_by_name', mock.Mock(return_value=svm_uuid)) self.mock_object(self.client, 'send_request') self.client.rename_vserver(fake.VSERVER_NAME, fake.VSERVER_NAME_2) self.client._get_unique_svm_by_name.assert_called_once_with( fake.VSERVER_NAME) self.client.send_request.assert_called_once_with( f'/svm/svms/{svm_uuid}', 'patch', body=body) def test_create_network_interface(self): api_response = copy.deepcopy(fake.SERVICE_POLICIES_REST) self.mock_object( self.client, 'send_request', mock.Mock(side_effect=[api_response, None, None])) self.client.create_network_interface(fake.IP_ADDRESS, fake.NETMASK, fake.NODE_NAME, fake.VLAN_PORT, fake.VSERVER_NAME, fake.LIF_NAME) query = { 'name': 'default-data-files', 'svm.name': fake.VSERVER_NAME, 'fields': 'uuid,name,services,svm.name' } policy = copy.deepcopy(fake.SERVICE_POLICIES_REST['records'][0]) uuid = policy['uuid'] policy['services'].append('data_nfs') policy['services'].append('data_cifs') body1 = {'services': policy['services']} body2 = { 'ip.address': fake.IP_ADDRESS, 'ip.netmask': fake.NETMASK, 'enabled': 'true', 'service_policy.name': 'default-data-files', 'location.home_node.name': fake.NODE_NAME, 'location.home_port.name': fake.VLAN_PORT, 'name': fake.LIF_NAME, 'svm.name': fake.VSERVER_NAME, } self.client.send_request.assert_has_calls([ mock.call('/network/ip/service-policies/', 'get', query=query), mock.call(f'/network/ip/service-policies/{uuid}', 'patch', body=body1), mock.call('/network/ip/interfaces', 'post', body=body2) ]) def test_create_vserver(self): mock = self.mock_object(self.client, '_create_vserver') self.mock_object(self.client, '_modify_security_cert', mock.Mock(return_value=[])) self.client.create_vserver(fake.VSERVER_NAME, None, None, [fake.SHARE_AGGREGATE_NAME], fake.IPSPACE_NAME, fake.SECURITY_CERT_DEFAULT_EXPIRE_DAYS, fake.DELETE_RETENTION_HOURS, False) mock.assert_called_once_with(fake.VSERVER_NAME, [fake.SHARE_AGGREGATE_NAME], fake.IPSPACE_NAME, fake.DELETE_RETENTION_HOURS, name_server_switch=['files'], logical_space_reporting=False) self.client._modify_security_cert.assert_called_once_with( fake.VSERVER_NAME, fake.SECURITY_CERT_DEFAULT_EXPIRE_DAYS) def test__modify_security_cert(self): api_response = copy.deepcopy(fake.SECURITY_CERT_GET_RESPONSE_REST) api_response2 = copy.deepcopy(fake.SECURITY_CERT_POST_RESPONSE_REST) self.mock_object( self.client, 'send_request', mock.Mock(side_effect=[api_response, api_response2, None, None])) query = { 'common-name': fake.VSERVER_NAME, 'ca': fake.VSERVER_NAME, 'type': 'server', 'svm.name': fake.VSERVER_NAME, } old_cert_info = copy.deepcopy( fake.SECURITY_CERT_GET_RESPONSE_REST['records'][0]) old_cert_uuid = old_cert_info['uuid'] body1 = { 'common-name': fake.VSERVER_NAME, 'type': 'server', 'svm.name': fake.VSERVER_NAME, 'expiry_time': 'P' + str( fake.SECURITY_CERT_LARGE_EXPIRE_DAYS) + 'DT', } query1 = { 'return_records': 'true' } new_cert_info = copy.deepcopy( fake.SECURITY_CERT_POST_RESPONSE_REST['records'][0]) new_cert_uuid = new_cert_info['uuid'] new_svm_uuid = new_cert_info['svm']['uuid'] body2 = { 'certificate': { 'uuid': new_cert_uuid, }, 'client_enabled': 'false', } self.client._modify_security_cert( fake.VSERVER_NAME, fake.SECURITY_CERT_LARGE_EXPIRE_DAYS) self.client.send_request.assert_has_calls([ mock.call('/security/certificates', 'get', query=query), mock.call('/security/certificates', 'post', body=body1, query=query1), mock.call(f'/svm/svms/{new_svm_uuid}', 'patch', body=body2), mock.call(f'/security/certificates/{old_cert_uuid}', 'delete'), ]) def test__broadcast_domain_exists(self): response = fake.FAKE_GET_BROADCAST_DOMAIN self.mock_object(self.client, 'send_request', mock.Mock(return_value=response)) self.mock_object(self.client, '_has_records', mock.Mock(return_value=True)) query = { 'ipspace.name': fake.IPSPACE_NAME, 'name': fake.BROADCAST_DOMAIN, } result = self.client._broadcast_domain_exists(fake.BROADCAST_DOMAIN, fake.IPSPACE_NAME) self.client.send_request.assert_called_once_with( '/network/ethernet/broadcast-domains', 'get', query=query) self.assertTrue(result) def test___delete_port_by_ipspace_and_broadcast_domain(self): self.mock_object(self.client, 'send_request') query = { 'broadcast_domain.ipspace.name': fake.IPSPACE_NAME, 'broadcast_domain.name': fake.BROADCAST_DOMAIN, 'name': fake.PORT } self.client._delete_port_by_ipspace_and_broadcast_domain( fake.PORT, fake.BROADCAST_DOMAIN, fake.IPSPACE_NAME) self.client.send_request.assert_called_once_with( '/network/ethernet/ports/', 'delete', query=query) def test_get_broadcast_domain_for_port(self): self.mock_object(self.client, 'send_request', mock.Mock( return_value=fake.REST_ETHERNET_PORTS)) query = { 'node.name': fake.NODE_NAME, 'name': fake.PORT, 'fields': 'broadcast_domain.name,broadcast_domain.ipspace.name' } result = self.client._get_broadcast_domain_for_port(fake.NODE_NAME, fake.PORT) expected = { 'broadcast-domain': "fake_domain_1", 'ipspace': "Default", } self.client.send_request.assert_has_calls([ mock.call('/network/ethernet/ports', 'get', query=query)]) self.assertEqual(expected, result) def test_modify_broadcast_domain(self): self.mock_object(self.client, 'send_request') result = self.client._modify_broadcast_domain(fake.BROADCAST_DOMAIN, fake.IPSPACE_NAME, fake.MTU) query = { 'name': fake.BROADCAST_DOMAIN } body = { 'ipspace.name': fake.IPSPACE_NAME, 'mtu': fake.MTU, } self.assertIsNone(result) self.client.send_request.assert_called_once_with( '/network/ethernet/broadcast-domains', 'patch', body=body, query=query) @ddt.data(fake.NO_RECORDS_RESPONSE, fake.SVMS_LIST_SIMPLE_RESPONSE_REST) def test_get_vserver_info(self, api_response): self.mock_object(self.client, 'send_request', mock.Mock(return_value=api_response)) result = self.client.get_vserver_info(fake.VSERVER_NAME) query = { 'name': fake.VSERVER_NAME, 'fields': 'state,subtype' } self.client.send_request.assert_called_once_with( '/svm/svms', 'get', query=query) if api_response == fake.NO_RECORDS_RESPONSE: self.assertIsNone(result) else: self.assertDictEqual(fake.VSERVER_INFO, result) def test_get_nfs_config(self): api_response = fake.NFS_CONFIG_RESULT_REST self.mock_object(self.client, 'send_request', mock.Mock(return_value=api_response)) result = self.client.get_nfs_config(['tcp-max-xfer-size', 'udp-max-xfer-size'], fake.VSERVER_NAME) expected = { 'tcp-max-xfer-size': '65536', 'udp-max-xfer-size': '32768', } self.assertEqual(expected, result) query = {'fields': 'transport.*', 'svm.name': 'fake_vserver'} self.client.send_request.assert_called_once_with( '/protocols/nfs/services/', 'get', query=query) def test_get_vserver_ipspace(self): self.client.features.add_feature('IPSPACES') api_response = fake.REST_VSERVER_GET_IPSPACE_NAME_RESPONSE self.mock_object(self.client, 'send_request', mock.Mock(return_value=api_response)) result = self.client.get_vserver_ipspace(fake.VSERVER_NAME) query = { 'name': fake.VSERVER_NAME, 'fields': 'ipspace.name' } expected = fake.IPSPACE_NAME self.client.send_request.assert_has_calls([ mock.call('/svm/svms', 'get', query=query)]) self.assertEqual(expected, result) def test_get_vserver_ipspace_not_found(self): api_response = fake.NO_RECORDS_RESPONSE_REST self.mock_object(self.client, 'send_request', mock.Mock(return_value=api_response)) result = self.client.get_vserver_ipspace(fake.VSERVER_NAME) self.assertIsNone(result) def test_get_vserver_ipspace_exception(self): self.mock_object(self.client, 'send_request', self._mock_api_error()) self.assertRaises(exception.NetAppException, self.client.get_vserver_ipspace, fake.VSERVER_NAME) def test_get_snapmirror_policies(self): api_response = fake.GET_SNAPMIRROR_POLICIES_REST self.mock_object(self.client, 'send_request', mock.Mock(return_value=api_response)) result_elem = [fake.SNAPMIRROR_POLICY_NAME] result = self.client.get_snapmirror_policies( fake.VSERVER_NAME) query = { 'svm.name': fake.VSERVER_NAME, 'fields': 'name' } self.client.send_request.assert_called_once_with( '/snapmirror/policies', 'get', query=query) self.assertEqual(result_elem, result) def test_delete_snapmirror_policy(self): api_response = fake.GET_SNAPMIRROR_POLICIES_REST self.mock_object(self.client, 'send_request', mock.Mock(return_value=api_response)) self.client.delete_snapmirror_policy('fake_policy') query = {} query['name'] = 'fake_policy' query['fields'] = 'uuid,name' uuid = fake.FAKE_UUID self.client.send_request.assert_has_calls([ mock.call('/snapmirror/policies', 'get', query=query), mock.call(f'/snapmirror/policies/{uuid}', 'delete') ]) def test_delete_snapmirror_policy_exception(self): api_response = fake.GET_SNAPMIRROR_POLICIES_REST api_error = netapp_api.api.NaApiError() self.mock_object(self.client, 'send_request', mock.Mock(side_effect=[api_response, api_error])) self.assertRaises(netapp_api.api.NaApiError, self.client.delete_snapmirror_policy, 'fake_policy') def test_delete_snapmirror_policy_no_records(self): api_response = fake.NO_RECORDS_RESPONSE_REST self.mock_object(self.client, 'send_request', mock.Mock(return_value=api_response)) self.client.delete_snapmirror_policy('fake_policy') query = {} query['name'] = 'fake_policy' query['fields'] = 'uuid,name' self.client.send_request.assert_called_once_with( '/snapmirror/policies', 'get', query=query) def test_delete_vserver_one_volume(self): self.mock_object(self.client, 'get_vserver_info', mock.Mock(return_value=fake.VSERVER_INFO)) self.mock_object(self.client, '_get_unique_svm_by_name', mock.Mock(return_value=fake.FAKE_UUID)) self.mock_object(self.client, 'get_vserver_root_volume_name', mock.Mock(return_value=fake.ROOT_VOLUME_NAME)) self.mock_object(self.client, 'get_vserver_volume_count', mock.Mock(return_value=1)) self.mock_object(self.client, 'send_request') self.mock_object(self.client, 'offline_volume') self.mock_object(self.client, 'delete_volume') self.mock_object(self.client, '_terminate_vserver_services') self.client.delete_vserver(fake.VSERVER_NAME, self.client, fake.CIFS_SECURITY_SERVICE) self.client.offline_volume.assert_called_with(fake.ROOT_VOLUME_NAME) self.client.delete_volume.assert_called_with(fake.ROOT_VOLUME_NAME) self.client._terminate_vserver_services( fake.VSERVER_NAME, self.client, fake.CIFS_SECURITY_SERVICE) svm_uuid = fake.FAKE_UUID self.client.send_request.assert_has_calls([ mock.call(f'/svm/svms/{svm_uuid}', 'delete')]) def test_delete_vserver_one_volume_already_offline(self): self.mock_object(self.client, 'get_vserver_info', mock.Mock(return_value=fake.VSERVER_INFO)) self.mock_object(self.client, '_get_unique_svm_by_name', mock.Mock(return_value=fake.FAKE_UUID)) self.mock_object(self.client, 'get_vserver_root_volume_name', mock.Mock(return_value=fake.ROOT_VOLUME_NAME)) self.mock_object(self.client, 'get_vserver_volume_count', mock.Mock(return_value=1)) self.mock_object(self.client, 'offline_volume', self._mock_api_error( code=netapp_api.EREST_ENTRY_NOT_FOUND)) self.mock_object(self.client, 'send_request') self.mock_object(self.client, 'delete_volume') self.client.delete_vserver(fake.VSERVER_NAME, self.client) self.client.offline_volume.assert_called_with( fake.ROOT_VOLUME_NAME) self.client.delete_volume.assert_called_with( fake.ROOT_VOLUME_NAME) svm_uuid = fake.FAKE_UUID self.client.send_request.assert_has_calls([ mock.call(f'/svm/svms/{svm_uuid}', 'delete')]) self.assertEqual(1, client_cmode_rest.LOG.error.call_count) def test_delete_vserver_one_volume_api_error(self): self.mock_object(self.client, 'get_vserver_info', mock.Mock(return_value=fake.VSERVER_INFO)) self.mock_object(self.client, '_get_unique_svm_by_name', mock.Mock(return_value=fake.FAKE_UUID)) self.mock_object(self.client, 'get_vserver_root_volume_name', mock.Mock(return_value=fake.ROOT_VOLUME_NAME)) self.mock_object(self.client, 'send_request') self.mock_object(self.client, 'get_vserver_volume_count', mock.Mock(return_value=1)) self.mock_object(self.client, 'offline_volume', self._mock_api_error()) self.mock_object(self.client, 'delete_volume') self.assertRaises(netapp_api.api.NaApiError, self.client.delete_vserver, fake.VSERVER_NAME, self.client) def test_delete_vserver_multiple_volumes(self): self.mock_object(self.client, 'get_vserver_info', mock.Mock(return_value=fake.VSERVER_INFO)) self.mock_object(self.client, '_get_unique_svm_by_name', mock.Mock(return_value=fake.FAKE_UUID)) self.mock_object(self.client, 'get_vserver_root_volume_name', mock.Mock(return_value=fake.ROOT_VOLUME_NAME)) self.mock_object(self.client, 'get_vserver_volume_count', mock.Mock(return_value=2)) self.assertRaises(exception.NetAppException, self.client.delete_vserver, fake.VSERVER_NAME, self.client) def test_delete_vserver_not_found(self): self.mock_object(self.client, 'get_vserver_info', mock.Mock(return_value=None)) self.mock_object(self.client, '_get_unique_svm_by_name', mock.Mock(return_value=fake.FAKE_UUID)) self.client.delete_vserver(fake.VSERVER_NAME, self.client) self.assertEqual(1, client_cmode_rest.LOG.error.call_count) def test_get_vserver_volume_count(self): fake_response = fake.VOLUME_GET_ITER_RESPONSE_REST_PAGE mock_request = self.mock_object(self.client, 'send_request', mock.Mock(return_value=fake_response)) response = self.client.get_vserver_volume_count() self.assertEqual(response, 10) query = {'return_records': 'false'} mock_request.assert_called_once_with( '/storage/volumes', 'get', query=query) def test__terminate_vserver_services(self): fake_uuid = fake.FAKE_UUID self.mock_object(self.client, 'send_request') self.mock_object(self.client, 'disable_kerberos') self.mock_object(self.client, '_get_unique_svm_by_name', mock.Mock(return_value=fake_uuid)) security_services = [ copy.deepcopy(fake.CIFS_SECURITY_SERVICE), copy.deepcopy(fake.KERBEROS_SECURITY_SERVICE) ] self.client._terminate_vserver_services( fake.VSERVER_NAME, self.client, security_services) cifs_server_delete_body = { 'ad_domain.password': security_services[0]['password'], 'ad_domain.user': security_services[0]['user'], } self.client.send_request.assert_called_once_with( f'/protocols/cifs/services/{fake_uuid}', 'delete', body=cifs_server_delete_body) self.client.disable_kerberos.assert_called_once_with( security_services[1]) def test_terminate_vserver_services_cifs_not_found(self): fake_uuid = fake.FAKE_UUID self.mock_object( self.client, 'send_request', self._mock_api_error(code=netapp_api.EREST_ENTRY_NOT_FOUND)) self.mock_object(self.client, '_get_unique_svm_by_name', mock.Mock(return_value=fake_uuid)) security_service = copy.deepcopy(fake.CIFS_SECURITY_SERVICE) self.client._terminate_vserver_services(fake.VSERVER_NAME, self.client, [security_service]) cifs_server_delete_body = { 'ad_domain.password': security_service['password'], 'ad_domain.user': security_service['user'], } self.client.send_request.assert_called_once_with( f'/protocols/cifs/services/{fake_uuid}', 'delete', body=cifs_server_delete_body) self.assertEqual(1, client_cmode_rest.LOG.error.call_count) def test_terminate_vserver_services_api_error(self): fake_uuid = fake.FAKE_UUID side_effects = [netapp_api.api.NaApiError(code='fake'), None] self.mock_object(self.client, 'send_request', mock.Mock(side_effect=side_effects)) self.mock_object(self.client, '_get_unique_svm_by_name', mock.Mock(return_value=fake_uuid)) security_service = copy.deepcopy(fake.CIFS_SECURITY_SERVICE) self.client._terminate_vserver_services(fake.VSERVER_NAME, self.client, [security_service]) cifs_server_delete_body = { 'ad_domain.password': security_service['password'], 'ad_domain.user': security_service['user'], } cifs_server_delete_force_body = { 'ad_domain.password': security_service['password'], 'ad_domain.user': security_service['user'], 'force': True } self.client.send_request.assert_has_calls([ mock.call(f'/protocols/cifs/services/{fake_uuid}', 'delete', body=cifs_server_delete_body), mock.call(f'/protocols/cifs/services/{fake_uuid}', 'delete', body=cifs_server_delete_force_body)]) self.assertEqual(0, client_cmode_rest.LOG.error.call_count) def test_disable_kerberos(self): fake_api_response = fake.NFS_LIFS_REST api_error = self._mock_api_error( code=netapp_api.EREST_KERBEROS_IS_ENABLED_DISABLED) self.mock_object(self.client, 'get_network_interfaces', mock.Mock(return_value=fake_api_response)) self.mock_object( self.client, 'send_request', mock.Mock(side_effect=[None, api_error, None])) self.client.disable_kerberos(fake.KERBEROS_SECURITY_SERVICE) kerberos_config_modify_body = { 'password': fake.KERBEROS_SECURITY_SERVICE['password'], 'user': fake.KERBEROS_SECURITY_SERVICE['user'], 'interface.name': fake.LIF_NAME, 'enabled': False, } self.client.send_request.assert_has_calls([ mock.call('/protocols/nfs/kerberos/interfaces/fake_uuid_1', 'patch', body=kerberos_config_modify_body), mock.call('/protocols/nfs/kerberos/interfaces/fake_uuid_2', 'patch', body=kerberos_config_modify_body), mock.call('/protocols/nfs/kerberos/interfaces/fake_uuid_3', 'patch', body=kerberos_config_modify_body) ]) self.client.get_network_interfaces.assert_called_once() def test_get_vserver_root_volume_name(self): response = fake.VOLUME_ITEM_SIMPLE_RESPONSE_REST self.mock_object(self.client, '_get_volume_by_args', mock.Mock(return_value=response)) self.client.get_vserver_root_volume_name(fake.VSERVER_NAME) self.client._get_volume_by_args.assert_called_once_with( vserver=fake.VSERVER_NAME, is_root=True) def test_ipspace_has_data_vservers(self): api_response = fake.REST_VSERVER_GET_IPSPACE_NAME_RESPONSE self.mock_object(self.client, 'send_request', mock.Mock(return_value=api_response)) result = self.client.ipspace_has_data_vservers(fake.IPSPACE_NAME) query = {'ipspace.name': fake.IPSPACE_NAME} self.client.send_request.assert_has_calls([ mock.call('/svm/svms', 'get', query=query)]) self.assertTrue(result) def test_ipspace_has_data_vservers_not_supported(self): self.mock_object(self.client, 'send_request', mock.Mock(return_value='fake_response')) self.mock_object(self.client, '_has_records', mock.Mock(return_value=False)) result = self.client.ipspace_has_data_vservers(fake.IPSPACE_NAME) self.assertFalse(result) query = {'ipspace.name': fake.IPSPACE_NAME} self.client.send_request.assert_called_once_with( '/svm/svms', 'get', query=query) self.client._has_records.assert_called_once_with('fake_response') def test_ipspace_has_data_vservers_not_found(self): api_response = fake.NO_RECORDS_RESPONSE_REST self.mock_object(self.client, 'send_request', mock.Mock(return_value=api_response)) result = self.client.ipspace_has_data_vservers(fake.IPSPACE_NAME) self.assertFalse(result) def test_delete_vlan(self): self.mock_object(self.client, 'send_request') query = { 'vlan.base_port.name': fake.PORT, 'node.name': fake.NODE_NAME, 'vlan.tag': fake.VLAN } self.client.delete_vlan(fake.NODE_NAME, fake.PORT, fake.VLAN) self.client.send_request.assert_has_calls([ mock.call('/network/ethernet/ports/', 'delete', query=query)]) def test_delete_vlan_not_found(self): self.mock_object( self.client, 'send_request', self._mock_api_error(code=netapp_api.EREST_ENTRY_NOT_FOUND)) query = { 'vlan.base_port.name': fake.PORT, 'node.name': fake.NODE_NAME, 'vlan.tag': fake.VLAN } self.client.delete_vlan(fake.NODE_NAME, fake.PORT, fake.VLAN) self.client.send_request.assert_has_calls([ mock.call('/network/ethernet/ports/', 'delete', query=query)]) self.assertEqual(1, client_cmode_rest.LOG.debug.call_count) def test_delete_vlan_still_used(self): self.mock_object( self.client, 'send_request', self._mock_api_error(code=netapp_api.EREST_PORT_IN_USE)) query = { 'vlan.base_port.name': fake.PORT, 'node.name': fake.NODE_NAME, 'vlan.tag': fake.VLAN } self.client.delete_vlan(fake.NODE_NAME, fake.PORT, fake.VLAN) self.client.send_request.assert_has_calls([ mock.call('/network/ethernet/ports/', 'delete', query=query)]) self.assertEqual(1, client_cmode_rest.LOG.debug.call_count) def test_delete_vlan_api_error(self): self.mock_object(self.client, 'send_request', self._mock_api_error()) self.assertRaises(exception.NetAppException, self.client.delete_vlan, fake.NODE_NAME, fake.PORT, fake.VLAN) @ddt.data(None, fake.IPSPACE_NAME) def test_svm_migration_start(self, dest_ipspace): check_only = True self.mock_object(self.client, 'send_request', mock.Mock(return_value='fake_migration')) res = self.client.svm_migration_start( fake.CLUSTER_NAME, fake.VSERVER_NAME, fake.SHARE_AGGREGATE_NAMES, dest_ipspace=dest_ipspace, check_only=check_only) self.assertEqual('fake_migration', res) expected_body = { "auto_cutover": False, "auto_source_cleanup": True, "check_only": True, "source": { "cluster": {"name": fake.CLUSTER_NAME}, "svm": {"name": fake.VSERVER_NAME}, }, "destination": { "volume_placement": { "aggregates": fake.SHARE_AGGREGATE_NAMES, }, }, } if dest_ipspace is not None: ipspace_data = { "ipspace": { "name": dest_ipspace, } } expected_body["destination"].update(ipspace_data) self.client.send_request.assert_called_once_with( '/svm/migrations', 'post', body=expected_body, wait_on_accepted=False) def test_get_migration_check_job_state(self): self.mock_object(self.client, 'get_job', mock.Mock(return_value='fake_job')) res = self.client.get_migration_check_job_state(fake.JOB_ID) self.assertEqual('fake_job', res) self.client.get_job.assert_called_once_with(fake.JOB_ID) @ddt.data(netapp_api.api.ENFS_V4_0_ENABLED_MIGRATION_FAILURE, netapp_api.api.EVSERVER_MIGRATION_TO_NON_AFF_CLUSTER, 'none') def test_get_migration_check_job_state_raise_error(self, error_code): e = netapp_api.api.NaApiError(code=error_code) self.mock_object(self.client, 'get_job', mock.Mock(side_effect=e)) self.assertRaises( exception.NetAppException, self.client.get_migration_check_job_state, fake.JOB_ID) def test_svm_migrate_complete(self): self.mock_object(self.client, 'send_request', mock.Mock(return_value='fake_migration')) res = self.client.svm_migrate_complete(fake.FAKE_MIGRATION_POST_ID) self.assertEqual('fake_migration', res) expected_body = { "action": "cutover" } self.client.send_request.assert_called_once_with( f'/svm/migrations/{fake.FAKE_MIGRATION_POST_ID}', 'patch', body=expected_body, wait_on_accepted=False) def test_svm_migrate_cancel(self): self.mock_object(self.client, 'send_request', mock.Mock(return_value='fake_migration')) res = self.client.svm_migrate_cancel(fake.FAKE_MIGRATION_POST_ID) self.assertEqual('fake_migration', res) self.client.send_request.assert_called_once_with( f'/svm/migrations/{fake.FAKE_MIGRATION_POST_ID}', 'delete', wait_on_accepted=False) def test_svm_migration_get(self): self.mock_object(self.client, 'send_request', mock.Mock(return_value='fake_migration')) res = self.client.svm_migration_get(fake.FAKE_MIGRATION_POST_ID) self.assertEqual('fake_migration', res) self.client.send_request.assert_called_once_with( f'/svm/migrations/{fake.FAKE_MIGRATION_POST_ID}', 'get') def test_svm_migrate_pause(self): self.mock_object(self.client, 'send_request', mock.Mock(return_value='fake_migration')) res = self.client.svm_migrate_pause(fake.FAKE_MIGRATION_POST_ID) self.assertEqual('fake_migration', res) expected_body = { "action": "pause" } self.client.send_request.assert_called_once_with( f'/svm/migrations/{fake.FAKE_MIGRATION_POST_ID}', 'patch', body=expected_body, wait_on_accepted=False) def test_delete_network_interface(self): self.mock_object(self.client, 'disable_network_interface') self.mock_object(self.client, 'send_request') self.client.delete_network_interface(fake.VSERVER_NAME, fake.LIF_NAME) self.client.disable_network_interface.assert_called_once_with( fake.VSERVER_NAME, fake.LIF_NAME) expected_query = { 'svm.name': fake.VSERVER_NAME, 'name': fake.LIF_NAME } self.client.send_request.assert_called_once_with( '/network/ip/interfaces', 'delete', query=expected_query) def test_disable_network_interface(self): self.mock_object(self.client, 'send_request') self.client.disable_network_interface(fake.VSERVER_NAME, fake.LIF_NAME) expected_body = { 'enabled': 'false' } expected_query = { 'svm.name': fake.VSERVER_NAME, 'name': fake.LIF_NAME } self.client.send_request.assert_called_once_with( '/network/ip/interfaces', 'patch', body=expected_body, query=expected_query) def test__delete_port_and_broadcast_domain(self): domain = copy.deepcopy(fake.BROADCAST_DOMAIN) ipspace = copy.deepcopy(fake.GET_IPSPACES_RESPONSE) query = {'name': domain, 'ipspace.name': ipspace['ipspace']} response_broadcast = copy.deepcopy( fake.BROADCAST_DOMAIN_LIST_SIMPLE_RESPONSE_REST) self.mock_object(self.client, 'send_request', mock.Mock(side_effect=[response_broadcast, None])) self.mock_object(self.client, '_delete_port_by_ipspace_and_broadcast_domain') self.client._delete_port_and_broadcast_domain(domain, ipspace) self.client.send_request.assert_has_calls([ mock.call('/network/ethernet/broadcast-domains', 'delete', query=query)]) def test_delete_ipspace(self): ipspace = copy.deepcopy(fake.IPSPACES[0]) mock_del_brcst = self.mock_object( self.client, '_delete_port_and_broadcast_domains_for_ipspace') self.mock_object(self.client, 'ipspace_has_data_vservers', mock.Mock(return_value=[])) mock_send_request = self.mock_object(self.client, 'send_request') query = {'name': fake.IPSPACE_NAME} self.client.delete_ipspace(ipspace['ipspace']) mock_del_brcst.assert_called_once_with(fake.IPSPACE_NAME) mock_send_request.assert_called_once_with( '/network/ipspaces', 'delete', query=query) def test_get_ipspaces(self): expected = copy.deepcopy(fake.GET_IPSPACES_RESPONSE) sr_responses = [fake.IPSPACE_INFO, fake.REST_SINGLE_PORT, fake.SVMS_LIST_SIMPLE_RESPONSE_REST, fake.FAKE_GET_BROADCAST_DOMAIN] self.mock_object(self.client, 'send_request', mock.Mock(side_effect=sr_responses)) self.mock_object(self.client, '_has_records', mock.Mock(return_value=True)) result = self.client.get_ipspaces(fake.IPSPACE_NAME) self.assertEqual(expected, result) def test_get_ipspaces_no_records(self): api_response = fake.NO_RECORDS_RESPONSE_REST self.mock_object(self.client, 'send_request', mock.Mock(return_value=api_response)) result = self.client.get_ipspaces(fake.IPSPACE_NAME) self.assertEqual([], result) def test_delete_port_and_broadcast_domains_for_ipspace_not_found(self): self.mock_object(self.client, 'get_ipspaces', mock.Mock(return_value=[])) self.mock_object(self.client, '_delete_port_and_broadcast_domain') self.client._delete_port_and_broadcast_domains_for_ipspace( fake.IPSPACE_NAME) self.client.get_ipspaces.assert_called_once_with( fake.IPSPACE_NAME) self.assertFalse(self.client._delete_port_and_broadcast_domain.called) def test_delete_port_and_broadcast_domains_for_ipspace(self): self.mock_object(self.client, 'get_ipspaces', mock.Mock(return_value=fake.IPSPACES[0])) self.mock_object(self.client, '_delete_port_and_broadcast_domain') self.client._delete_port_and_broadcast_domains_for_ipspace( fake.IPSPACE_NAME) self.client.get_ipspaces.assert_called_once_with( fake.IPSPACE_NAME) self.client._delete_port_and_broadcast_domain.assert_called_once_with( fake.IPSPACES[0]['broadcast-domains'][0], fake.IPSPACES[0]) @ddt.data(('10.10.10.0/24', '10.10.10.1', False), ('fc00::/7', 'fe80::1', False), ('0.0.0.0/0', '10.10.10.1', True), ('::/0', 'fe80::1', True)) @ddt.unpack def test_create_route(self, subnet, gateway, omit_destination): address = None netmask = None destination = None if omit_destination else subnet if not destination: if ':' in gateway: destination = '::/0' else: destination = '0.0.0.0/0' if '/' in destination: address, netmask = destination.split('/') else: address = destination body = { 'destination.address': address, 'gateway': gateway, } if netmask: body['destination.netmask'] = netmask self.mock_object(self.client, 'send_request') self.client.create_route(gateway, destination=destination) self.client.send_request.assert_called_once_with( '/network/ip/routes', 'post', body=body) def test_create_route_duplicate(self): self.mock_object(client_cmode_rest.LOG, 'debug') self.mock_object( self.client, 'send_request', self._mock_api_error(code=netapp_api.EREST_DUPLICATE_ROUTE)) self.client.create_route(fake.GATEWAY, destination=fake.SUBNET) body = { 'destination.address': fake.SUBNET[:-3], 'gateway': fake.GATEWAY, 'destination.netmask': fake.SUBNET[-2:], } self.client.send_request.assert_called_once_with( '/network/ip/routes', 'post', body=body) self.assertEqual(1, client_cmode_rest.LOG.debug.call_count) def test_create_route_api_error(self): self.mock_object(client_cmode_rest.LOG, 'debug') self.mock_object(self.client, 'send_request', self._mock_api_error()) body = { 'destination.address': fake.SUBNET[:-3], 'gateway': fake.GATEWAY, 'destination.netmask': fake.SUBNET[-2:], } self.assertRaises(exception.NetAppException, self.client.create_route, fake.GATEWAY, destination=fake.SUBNET) self.client.send_request.assert_called_once_with( '/network/ip/routes', 'post', body=body) def test_create_route_without_gateway(self): self.mock_object(self.client, 'send_request') self.client.create_route(None, destination=fake.SUBNET) self.assertFalse(self.client.send_request.called) def test_network_interface_exists(self): api_response = fake.GENERIC_NETWORK_INTERFACES_GET_REPONSE self.mock_object(self.client, 'send_request', mock.Mock(return_value=api_response)) result = self.client.network_interface_exists( fake.VSERVER_NAME, fake.NODE_NAME, fake.PORT, fake.IP_ADDRESS, fake.NETMASK, fake.VLAN) query = { 'ip.address': fake.IP_ADDRESS, 'location.home_node.name': fake.NODE_NAME, 'location.home_port.name': f'{fake.PORT}-{fake.VLAN}', 'ip.netmask': fake.NETMASK, 'svm.name': fake.VSERVER_NAME, 'fields': 'name', } self.client.send_request.assert_called_once_with( '/network/ip/interfaces', 'get', query=query) self.assertTrue(result) def test_modify_active_directory_security_service(self): svm_uuid = fake.FAKE_UUID user_records = fake.FAKE_CIFS_LOCAL_USER.get('records')[0] sid = user_records.get('sid') self.mock_object(self.client, '_get_unique_svm_by_name', mock.Mock(return_value=svm_uuid)) self.mock_object(self.client, 'send_request', mock.Mock(side_effect=[user_records, None, None])) self.mock_object(self.client, 'remove_preferred_dcs') self.mock_object(self.client, 'set_preferred_dc') new_security_service = { 'user': 'new_user', 'password': 'new_password', 'server': 'fake_server' } current_security_service = { 'server': 'fake_current_server' } keys = {'user', 'password', 'server'} self.client.modify_active_directory_security_service( fake.VSERVER_NAME, keys, new_security_service, current_security_service) self.client.send_request.assert_has_calls([ mock.call(f'/protocols/cifs/local-users/{svm_uuid}', 'get'), mock.call(f'/protocols/cifs/local-users/{svm_uuid}/{sid}', 'patch', query={'password': new_security_service['password']}), mock.call(f'/protocols/cifs/local-users/{svm_uuid}/{sid}', 'patch', query={'name': new_security_service['user']}) ]) @ddt.data(True, False) def test__create_vserver(self, logical_space_reporting): mock_sr = self.mock_object(self.client, 'send_request') self.mock_object(self.client, '_get_unique_svm_by_name', mock.Mock(return_value=fake.FAKE_UUID)) body_post = { 'name': fake.VSERVER_NAME, 'nsswitch.namemap': fake.FAKE_SERVER_SWITCH_NAME, 'subtype': fake.FAKE_SUBTYPE, 'ipspace.name': fake.IPSPACE_NAME, 'aggregates': [{ 'name': fake.SHARE_AGGREGATE_NAME }], } if logical_space_reporting: body_post.update({ 'is_space_reporting_logical': 'true', 'is_space_enforcement_logical': 'true', }) else: body_post.update({ 'is_space_reporting_logical': 'false', 'is_space_enforcement_logical': 'false', }) body_patch = { 'retention_period': fake.DELETE_RETENTION_HOURS, } self.client._create_vserver( fake.VSERVER_NAME, [fake.SHARE_AGGREGATE_NAME], fake.IPSPACE_NAME, fake.DELETE_RETENTION_HOURS, fake.FAKE_SERVER_SWITCH_NAME, fake.FAKE_SUBTYPE, logical_space_reporting=logical_space_reporting) mock_sr.assert_has_calls([ mock.call('/svm/svms', 'post', body=body_post), mock.call(f'/svm/svms/{fake.FAKE_UUID}', 'patch', body=body_patch) ]) def test_create_barbican_kms_config_for_specified_vserver(self): mock_sr = self.mock_object(self.client, 'send_request') body = { 'svm.name': fake.VSERVER_NAME, 'configuration.name': fake.FAKE_CONFIG_NAME, 'key_id': fake.FAKE_KEY_ID, 'keystone_url': fake.FAKE_KEYSTONE_URL, 'application_cred_id': fake.FAKE_APPLICATION_CRED_ID, 'application_cred_secret': fake.FAKE_APPLICATION_CRED_SECRET, } self.client.create_barbican_kms_config_for_specified_vserver( fake.VSERVER_NAME, fake.FAKE_CONFIG_NAME, fake.FAKE_KEY_ID, fake.FAKE_KEYSTONE_URL, fake.FAKE_APPLICATION_CRED_ID, fake.FAKE_APPLICATION_CRED_SECRET) mock_sr.assert_called_once_with('/security/barbican-kms', 'post', body=body) def test_get_key_store_config_uuid(self): fake_query = { 'configuration.name': fake.FAKE_CONFIG_NAME } self.mock_object( self.client, 'send_request', mock.Mock( return_value=fake.KEYSTORE_SIMPLE_RESPONSE_REST)) actual_result = self.client.get_key_store_config_uuid( fake.FAKE_CONFIG_NAME) self.client.send_request.assert_called_once_with( '/security/key-stores', 'get', query=fake_query) expected_result = ( fake.KEYSTORE_SIMPLE_RESPONSE_REST[ 'records'][0]['configuration']['uuid']) self.assertEqual(expected_result, actual_result) def test_get_key_store_config_uuid_no_response(self): self.mock_object( self.client, 'send_request', mock.Mock( return_value={})) actual_result = self.client.get_key_store_config_uuid( fake.FAKE_CONFIG_NAME) self.assertIsNone(actual_result) def test_enable_key_store_config(self): config_uuid = fake.FAKE_CONFIG_UUID mock_sr = self.mock_object(self.client, 'send_request') self.client.enable_key_store_config(config_uuid) body = { 'enabled': True, } mock_sr.assert_called_once_with( f'/security/key-stores/{config_uuid}', 'patch', body=body) @ddt.data((f'/name-services/dns/{fake.FAKE_UUID}', 'patch', ['fake_domain'], ['fake_ip']), (f'/name-services/dns/{fake.FAKE_UUID}', 'delete', [], []), ('/name-services/dns', 'post', ['fake_domain'], ['fake_ip'])) @ddt.unpack def test_update_dns_configuration_all_operations(self, endpoint, operation, domains, ips): return_value = fake.FAKE_DNS_CONFIG if operation != 'post' else {} self.mock_object(self.client, 'get_dns_config', mock.Mock(return_value=return_value)) self.mock_object(self.client, '_get_unique_svm_by_name', mock.Mock(return_value=fake.FAKE_UUID)) mock_sr = self.mock_object(self.client, 'send_request') body = { 'domains': domains, 'servers': ips } empty_dns_config = (not body['domains'] and not body['servers']) if empty_dns_config: body = {} self.client.update_dns_configuration(ips, domains) mock_sr.assert_called_once_with(endpoint, operation, body) @ddt.data(True, False) def test_delete_snapshot(self, ignore_owners): volume_id = fake.VOLUME.get('uuid') self.mock_object(self.client, '_get_volume_by_args', mock.Mock(return_value=fake.VOLUME)) response = fake.SNAPSHOTS_REST_RESPONSE snapshot_id = response.get('records')[0].get('uuid') mock_sr = self.mock_object(self.client, 'send_request', mock.Mock(return_value=response)) self.mock_object(self.client, '_has_records', mock.Mock(return_value=True)) query = { 'name': fake.SNAPSHOT_NAME, 'fields': 'uuid' } calls = [mock.call(f'/storage/volumes/{volume_id}/snapshots', 'get', query=query)] if ignore_owners: query_cli = { 'vserver': self.client.vserver, 'volume': fake.VOLUME_NAMES[0], 'snapshot': fake.SNAPSHOT_NAME, 'ignore-owners': 'true' } calls.append(mock.call('/private/cli/snapshot', 'delete', query=query_cli)) else: calls.append(mock.call(f'/storage/volumes/{volume_id}/' f'snapshots/{snapshot_id}', 'delete')) self.client.delete_snapshot(fake.VOLUME_NAMES[0], fake.SNAPSHOT_NAME, ignore_owners) mock_sr.assert_has_calls(calls) def test_soft_delete_snapshot(self): mock_delete_snapshot = self.mock_object(self.client, 'delete_snapshot') mock_rename_snapshot = self.mock_object(self.client, 'rename_snapshot') self.client.soft_delete_snapshot(fake.SHARE_NAME, fake.SNAPSHOT_NAME) mock_delete_snapshot.assert_called_once_with( fake.SHARE_NAME, fake.SNAPSHOT_NAME) self.assertFalse(mock_rename_snapshot.called) def test_volume_has_luns(self): mock_sr = self.mock_object(self.client, 'send_request') self.mock_object(self.client, '_has_records', mock.Mock(return_value=True)) result = self.client.volume_has_luns(fake.VOLUME_NAMES[0]) query = { 'location.volume.name': fake.VOLUME_NAMES[0], } mock_sr.assert_called_once_with('/storage/luns/', 'get', query=query) self.assertTrue(result) @ddt.data(fake.VOLUME_JUNCTION_PATH, '') def test_volume_has_junctioned_volumes(self, junction_path): mock_sr = self.mock_object(self.client, 'send_request') return_records = True if junction_path else False self.mock_object(self.client, '_has_records', mock.Mock(return_value=return_records)) result = self.client.volume_has_junctioned_volumes(junction_path) if junction_path: query = { 'nas.path': junction_path + '/*', } mock_sr.assert_called_once_with('/storage/volumes/', 'get', query=query) self.assertTrue(result) else: self.assertFalse(result) @ddt.data(fake.VOLUME_JUNCTION_PATH, '') def test_get_volume_at_junction_path(self, junction_path): response = fake.VOLUME_LIST_SIMPLE_RESPONSE_REST return_records = True if junction_path else False mock_sr = self.mock_object(self.client, 'send_request', mock.Mock(return_value=response)) self.mock_object(self.client, '_has_records', mock.Mock(return_value=return_records)) query = { 'nas.path': junction_path, 'fields': 'name' } result = self.client.get_volume_at_junction_path(junction_path) expected = { 'name': response.get('records')[0].get('name') } if junction_path: mock_sr.assert_called_once_with('/storage/volumes/', 'get', query=query) self.assertEqual(expected, result) else: self.assertIsNone(result) def test_get_aggregate_for_volume(self): response = fake.FAKE_SVM_AGGREGATES.get('records')[0] mock_sr = self.mock_object(self.client, 'send_request', mock.Mock(return_value=response)) result = self.client.get_aggregate_for_volume(fake.VOLUME_NAMES[0]) expected = fake.SHARE_AGGREGATE_NAMES_LIST query = { 'name': fake.VOLUME_NAMES[0], 'fields': 'aggregates' } mock_sr.assert_called_once_with('/storage/volumes/', 'get', query=query) self.assertEqual(expected, result) def test_get_volume_to_manage(self): response = fake.FAKE_VOLUME_MANAGE mock_sr = self.mock_object(self.client, 'send_request', mock.Mock(return_value=response)) self.mock_object(self.client, '_has_records', mock.Mock(return_value=True)) expected = { 'aggregate': fake.SHARE_AGGREGATE_NAME, 'aggr-list': [], 'junction-path': fake.VOLUME_JUNCTION_PATH, 'name': fake.VOLUME_NAMES[0], 'type': 'fake_type', 'style': 'flex', 'owning-vserver-name': fake.VSERVER_NAME, 'size': fake.SHARE_SIZE, 'qos-policy-group-name': fake.QOS_POLICY_GROUP_NAME } result = self.client.get_volume_to_manage(fake.SHARE_AGGREGATE_NAME, fake.VOLUME_NAMES[0]) query = { 'name': fake.VOLUME_NAMES[0], 'fields': 'name,aggregates.name,nas.path,name,type,style,' 'svm.name,qos.policy.name,space.size', 'aggregates.name': fake.SHARE_AGGREGATE_NAME } mock_sr.assert_called_once_with('/storage/volumes', 'get', query=query) self.assertEqual(expected, result) def test_get_cifs_share_access(self): response = fake.FAKE_CIFS_RECORDS mock_sr = self.mock_object(self.client, 'send_request', mock.Mock(return_value=response)) query = { 'name': fake.SHARE_NAME } query_acls = { 'fields': 'user_or_group,permission' } expected = { 'Everyone': 'full_control', 'root': 'no_access' } result = self.client.get_cifs_share_access(fake.SHARE_NAME) svm_uuid = response.get('records')[0].get('svm').get('uuid') mock_sr.assert_has_calls([ mock.call('/protocols/cifs/shares', 'get', query=query), mock.call(f'/protocols/cifs/shares/{svm_uuid}/{fake.SHARE_NAME}/' 'acls', 'get', query=query_acls) ]) self.assertEqual(expected, result) @ddt.data((netapp_api.EREST_LICENSE_NOT_INSTALLED, False), (netapp_api.EREST_SNAPSHOT_NOT_SPECIFIED, True)) @ddt.unpack def test_check_snaprestore_license(self, code, expected): self.mock_object(self.client, 'send_request', mock.Mock(side_effect=self._mock_api_error(code))) result = self.client.check_snaprestore_license() self.assertEqual(expected, result) body = { 'restore_to.snapshot.name': '' } query = { 'name': '*' } self.client.send_request.assert_called_once_with('/storage/volumes', 'patch', body=body, query=query) def test_check_snaprestore_license_error(self): self.mock_object(self.client, 'send_request') self.assertRaises(exception.NetAppException, self.client.check_snaprestore_license) def test__sort_data_ports_by_speed(self): ports = fake.FAKE_PORTS result = self.client._sort_data_ports_by_speed(ports) expected = [{'speed': '4'}, {'speed': 'auto'}, {'speed': 'undef'}, {'speed': 'fake_speed'}, {'speed': ''}] self.assertEqual(expected, result) def test_create_port_and_broadcast_domain(self): self.mock_object(self.client, '_create_vlan') self.mock_object(self.client, '_ensure_broadcast_domain_for_port') res = self.client.create_port_and_broadcast_domain(fake.NODE_NAME, fake.PORT, fake.VLAN, fake.MTU, fake.IPSPACE_NAME) expected = f'{fake.PORT}-{fake.VLAN}' self.assertEqual(expected, res) @ddt.data(netapp_api.EREST_DUPLICATE_ENTRY, None) def test__create_vlan(self, code): self.mock_object(self.client, 'send_request', mock.Mock(side_effect=self._mock_api_error(code))) if not code: self.assertRaises(exception.NetAppException, self.client._create_vlan, fake.NODE_NAME, fake.PORT, fake.VLAN) else: self.client._create_vlan(fake.NODE_NAME, fake.PORT, fake.VLAN) body = { 'vlan.base_port.name': fake.PORT, 'node.name': fake.NODE_NAME, 'vlan.tag': fake.VLAN, 'type': 'vlan' } self.client.send_request.assert_called_once_with( '/network/ethernet/ports', 'post', body=body) @ddt.data(netapp_api.EREST_ENTRY_NOT_FOUND, None) def test_delete_fpolicy_event_error_not_found(self, code): volume = fake.VOLUME_ITEM_SIMPLE_RESPONSE_REST self.mock_object(self.client, '_get_volume_by_args', mock.Mock(return_value=volume)) self.mock_object(self.client, 'send_request', mock.Mock(side_effect=self._mock_api_error(code))) if not code: self.assertRaises(exception.NetAppException, self.client.delete_fpolicy_event, fake.SHARE_NAME, 'fake_event') else: self.client.delete_fpolicy_event(fake.SHARE_NAME, 'fake_event') self.assertEqual(1, client_cmode_rest.LOG.debug.call_count) @ddt.data(netapp_api.EREST_ENTRY_NOT_FOUND, None) def test_delete_fpolicy_policy_request_error(self, code): volume = fake.VOLUME_ITEM_SIMPLE_RESPONSE_REST self.mock_object(self.client, '_get_volume_by_args', mock.Mock(return_value=volume)) self.mock_object(self.client, 'send_request', mock.Mock(side_effect=self._mock_api_error(code))) if not code: self.assertRaises(exception.NetAppException, self.client.delete_fpolicy_policy, fake.SHARE_NAME, 'fake_policy') else: self.client.delete_fpolicy_policy(fake.SHARE_NAME, 'fake_policy') self.assertEqual(1, client_cmode_rest.LOG.debug.call_count) def test_modify_fpolicy_scope(self): volume = fake.VOLUME_ITEM_SIMPLE_RESPONSE_REST svm_uuid = volume['svm']['uuid'] self.mock_object(self.client, '_get_volume_by_args', mock.Mock(return_value=volume)) mock_sr = self.mock_object(self.client, 'send_request') body = { 'name': fake.FPOLICY_POLICY_NAME, 'scope.include_shares': fake.SHARE_NAME, 'scope.include_extension': 'fake_extension', 'scope.exclude_extension': 'fake_extension' } self.client.modify_fpolicy_scope(fake.SHARE_NAME, fake.FPOLICY_POLICY_NAME, [fake.SHARE_NAME], ['fake_extension'], ['fake_extension']) mock_sr.assert_called_once_with(f'/protocols/fpolicy/{svm_uuid}/' 'policies/', 'patch', body=body) def test_remove_cifs_share(self): response = fake.SVMS_LIST_SIMPLE_RESPONSE_REST svm_id = response.get('records')[0]['uuid'] mock_sr = self.mock_object(self.client, 'send_request', mock.Mock(return_value=response)) self.client.remove_cifs_share(fake.SHARE_NAME) query = { 'name': self.client.vserver, 'fields': 'uuid' } mock_sr.assert_has_calls([ mock.call('/svm/svms', 'get', query=query), mock.call(f'/protocols/cifs/shares/{svm_id}' f'/{fake.SHARE_NAME}', 'delete')]) def test_qos_policy_group_get_error(self): code = netapp_api.EREST_NOT_AUTHORIZED self.mock_object(self.client, 'send_request', mock.Mock(side_effect=self._mock_api_error(code))) self.assertRaises(exception.NetAppException, self.client.qos_policy_group_get, fake.QOS_POLICY_GROUP_NAME) def test_qos_policy_group_get_not_found(self): response = fake.NO_RECORDS_RESPONSE_REST self.mock_object(self.client, 'send_request', mock.Mock(return_value=response)) self.assertRaises(exception.NetAppException, self.client.qos_policy_group_get, fake.QOS_POLICY_GROUP_NAME) def test_remove_unused_qos_policy_groups_error(self): res_list = [fake.QOS_POLICY_GROUP_REST, netapp_api.api.NaApiError] self.mock_object(self.client, 'send_request', mock.Mock(side_effect=res_list)) self.client.remove_unused_qos_policy_groups() self.assertEqual(1, client_cmode_rest.LOG.debug.call_count) def test_mount_volume_error(self): volume = fake.VOLUME_ITEM_SIMPLE_RESPONSE_REST code = netapp_api.EREST_SNAPMIRROR_INITIALIZING self.mock_object(self.client, '_get_volume_by_args', mock.Mock(return_value=volume)) self.mock_object(self.client, 'send_request', mock.Mock(side_effect=self._mock_api_error(code))) self.assertRaises(netapp_api.api.NaApiError, self.client.mount_volume, fake.VOLUME_NAMES[0]) def test_get_aggregate_for_volume_empty(self): response = fake.NO_RECORDS_RESPONSE_REST self.mock_object(self.client, 'send_request', mock.Mock(return_value=response)) self.assertRaises(exception.NetAppException, self.client.get_aggregate_for_volume, fake.VOLUME_NAMES[0]) def test_get_nfs_export_policy_for_volume_empty(self): response = fake.NO_RECORDS_RESPONSE_REST self.mock_object(self.client, 'send_request', mock.Mock(return_value=response)) self.mock_object(self.client, '_has_records', mock.Mock(return_value=False)) self.assertRaises(exception.NetAppException, self.client.get_nfs_export_policy_for_volume, fake.VOLUME_NAMES[0]) def test_get_unique_export_policy_id_empty(self): response = fake.NO_RECORDS_RESPONSE_REST self.mock_object(self.client, 'send_request', mock.Mock(return_value=response)) self.mock_object(self.client, '_has_records', mock.Mock(return_value=False)) self.assertRaises(exception.NetAppException, self.client.get_unique_export_policy_id, fake.FPOLICY_POLICY_NAME) def test__remove_nfs_export_rules_error(self): self.mock_object(self.client, 'get_unique_export_policy_id', mock.Mock(return_value=fake.FAKE_UUID)) self.mock_object(self.client, 'send_request', mock.Mock(side_effect=self._mock_api_error())) self.assertRaises(netapp_api.api.NaApiError, self.client._remove_nfs_export_rules, fake.FPOLICY_POLICY_NAME, [1]) def test_get_volume_move_status_error(self): response = fake.NO_RECORDS_RESPONSE_REST self.mock_object(self.client, 'send_request', mock.Mock(return_value=response)) self.mock_object(self.client, '_has_records', mock.Mock(return_value=False)) self.assertRaises(exception.NetAppException, self.client.get_volume_move_status, fake.VOLUME_NAMES[0], fake.VSERVER_NAME) def test__set_snapmirror_state_error(self): self.mock_object(self.client, 'get_snapmirrors', mock.Mock(return_value=[])) self.assertRaises(netapp_utils.NetAppDriverException, self.client._set_snapmirror_state, 'fake_state', 'fake_source_path', 'fake_dest_path', 'fake_source_vserver', 'fake_source_volume', 'fake_dest_vserver', 'fake_dest_volume') def test__break_snapmirror_error(self): fake_snapmirror = fake.REST_GET_SNAPMIRRORS_RESPONSE self.mock_object(self.client, '_get_snapmirrors', mock.Mock(return_value=fake_snapmirror)) self.mock_object(self.client, 'send_request', mock.Mock(side_effect=self._mock_api_error())) self.assertRaises(netapp_api.api.NaApiError, self.client._break_snapmirror) def test__resync_snapmirror_no_parameter(self): mock_snap = self.mock_object(self.client, '_resume_snapmirror') self.client._resync_snapmirror() mock_snap.assert_called_once_with(None, None, None, None, None, None) def test_add_nfs_export_rule_with_rule_created(self): self.mock_object(self.client, '_get_nfs_export_rule_indices', mock.Mock(return_value=[1])) update = self.mock_object(self.client, '_update_nfs_export_rule') remove = self.mock_object(self.client, '_remove_nfs_export_rules') self.client.add_nfs_export_rule(fake.FPOLICY_POLICY_NAME, 'fake_client', True, 'fake_auth') update.assert_called_once_with(fake.FPOLICY_POLICY_NAME, 'fake_client', True, 1, 'fake_auth') remove.assert_called_once_with(fake.FPOLICY_POLICY_NAME, []) def test__update_snapmirror_no_snapmirrors(self): self.mock_object(self.client, '_get_snapmirrors', mock.Mock(return_value=[])) self.assertRaises(netapp_utils.NetAppDriverException, self.client._update_snapmirror) @ddt.data((netapp_api.EREST_SNAPMIRROR_NOT_INITIALIZED, 'Another transfer is in progress'), (None, 'fake')) @ddt.unpack def test__update_snapmirror_error(self, code, message): snapmirrors = fake.REST_GET_SNAPMIRRORS_RESPONSE self.mock_object(self.client, '_get_snapmirrors', mock.Mock(return_value=snapmirrors)) self.mock_object(self.client, 'send_request', mock.Mock(side_effect=self._mock_api_error(code, message))) self.assertRaises(netapp_api.api.NaApiError, self.client._update_snapmirror) @ddt.data(netapp_api.EREST_DUPLICATE_ENTRY, None) def test_create_kerberos_realm_error(self, code): self.mock_object(self.client, 'send_request', mock.Mock(side_effect=self._mock_api_error(code))) if code: self.client.create_kerberos_realm(fake.KERBEROS_SECURITY_SERVICE) self.assertEqual(1, client_cmode_rest.LOG.debug.call_count) else: self.assertRaises(exception.NetAppException, self.client.create_kerberos_realm, fake.KERBEROS_SECURITY_SERVICE) def test_configure_kerberos_error(self): self.mock_object(self.client, 'configure_dns') self.mock_object(self.client, '_get_kerberos_service_principal_name') self.mock_object(self.client, 'get_network_interfaces', mock.Mock(return_value=[])) self.assertRaises(exception.NetAppException, self.client.configure_kerberos, fake.KERBEROS_SECURITY_SERVICE, fake.VSERVER_NAME) def test_configure_ldap(self): mock_ldap = self.mock_object(self.client, '_create_ldap_client') self.client.configure_ldap(fake.LDAP_AD_SECURITY_SERVICE, 30, fake.VSERVER_NAME) mock_ldap.assert_called_once_with(fake.LDAP_AD_SECURITY_SERVICE, vserver_name=fake.VSERVER_NAME) def test_configure_active_directory_error(self): self.mock_object(self.client, 'configure_dns') self.mock_object(self.client, 'configure_cifs_aes_encryption') self.mock_object(self.client, 'set_preferred_dc') self.mock_object(self.client, '_get_cifs_server_name') self.mock_object(self.client, 'send_request', mock.Mock(side_effect=self._mock_api_error())) self.assertRaises(exception.NetAppException, self.client.configure_active_directory, fake.LDAP_AD_SECURITY_SERVICE, fake.VSERVER_NAME, False) def test__get_unique_svm_by_name_error(self): response = fake.NO_RECORDS_RESPONSE_REST self.mock_object(self.client, 'send_request', mock.Mock(return_value=response)) self.assertRaises(exception.NetAppException, self.client._get_unique_svm_by_name, fake.VSERVER_NAME) def test_get_ontap_version_scoped(self): self.client.get_ontap_version = self.original_get_ontap_version e = netapp_api.api.NaApiError(code=netapp_api.EREST_NOT_AUTHORIZED) res_list = [e, fake.GET_VERSION_RESPONSE_REST] version = fake.GET_VERSION_RESPONSE_REST['records'][0]['version'] expected = { 'version': version['full'], 'version-tuple': (9, 11, 1) } self.mock_object(self.client, 'send_request', mock.Mock(side_effect=res_list)) result = self.client.get_ontap_version(self=self.client, cached=False) self.assertEqual(expected, result) def test_get_licenses_error(self): self.mock_object(self.client, 'send_request', mock.Mock(side_effect=self._mock_api_error())) self.assertRaises(netapp_api.api.NaApiError, self.client.get_licenses) def test__get_volume_by_args_error(self): res = fake.VOLUME_GET_ITER_RESPONSE_REST_PAGE self.mock_object(self.client, 'send_request', mock.Mock(return_value=res)) self.assertRaises(exception.NetAppException, self.client._get_volume_by_args, is_root=True) def test_get_aggregate_no_name(self): expected = {} result = self.client.get_aggregate('') self.assertEqual(expected, result) def test_get_aggregate_error(self): self.mock_object(self.client, '_get_aggregates', mock.Mock(side_effect=self._mock_api_error())) result = self.client.get_aggregate(fake.SHARE_AGGREGATE_NAME) expected = {} self.assertEqual(expected, result) def test_get_node_for_aggregate_no_name(self): result = self.client.get_node_for_aggregate('') self.assertIsNone(result) @ddt.data(netapp_api.EREST_NOT_AUTHORIZED, None) def test_get_node_for_aggregate_error(self, code): self.mock_object(self.client, '_get_aggregates', mock.Mock(side_effect=self._mock_api_error(code))) if code: r = self.client.get_node_for_aggregate(fake.SHARE_AGGREGATE_NAME) self.assertIsNone(r) else: self.assertRaises(netapp_api.api.NaApiError, self.client.get_node_for_aggregate, fake.SHARE_AGGREGATE_NAME) def test_get_vserver_aggregate_capabilities_no_response(self): response = fake.NO_RECORDS_RESPONSE_REST self.mock_object(self.client, 'send_request', mock.Mock(return_value=response)) self.assertRaises(exception.NetAppException, self.client.get_vserver_aggregate_capacities, fake.SHARE_AGGREGATE_NAME) def test_get_vserver_aggregate_capacities_no_aggregate(self): response = fake.FAKE_AGGREGATES_RESPONSE share_name = fake.SHARE_AGGREGATE_NAME self.mock_object(self.client, 'send_request', mock.Mock(return_value=response)) res = self.client.get_vserver_aggregate_capacities(share_name) expected = {} self.assertEqual(expected, res) def test_rename_nfs_export_policy_error(self): self.mock_object(self.client, 'send_request') self.mock_object(self.client, '_has_records', mock.Mock(return_value=False)) self.assertRaises(exception.NetAppException, self.client.rename_nfs_export_policy, 'fake_policy_name', 'fake_new_policy_name') @ddt.data((False, exception.StorageResourceNotFound), (True, exception.NetAppException)) @ddt.unpack def test_get_volume_error(self, records, exception): res = copy.deepcopy(fake.FAKE_VOLUME_MANAGE) res['num_records'] = 2 self.mock_object(self.client, 'send_request', mock.Mock(return_value=res)) self.mock_object(self.client, '_has_records', mock.Mock(return_value=records)) self.assertRaises(exception, self.client.get_volume, fake.VOLUME_NAMES[0]) def test_get_volume_no_aggregate(self): res = copy.deepcopy(fake.FAKE_VOLUME_MANAGE) res.get('records')[0]['aggregates'] = [] self.mock_object(self.client, 'send_request', mock.Mock(return_value=res)) fake_volume = res.get('records', [])[0] expected = { 'aggregate': '', 'aggr-list': [], 'junction-path': fake_volume.get('nas', {}).get('path', ''), 'name': fake_volume.get('name', ''), 'owning-vserver-name': fake_volume.get('svm', {}).get('name', ''), 'type': fake_volume.get('type', ''), 'style': fake_volume.get('style', ''), 'size': fake_volume.get('space', {}).get('size', ''), 'size-used': fake_volume.get('space', {}).get('used', ''), 'qos-policy-group-name': fake_volume.get('qos', {}) .get('policy', {}) .get('name', ''), 'style-extended': fake_volume.get('style', ''), 'snaplock-type': fake_volume.get('snaplock', {}).get('type', '') } result = self.client.get_volume(fake.VOLUME_NAMES[0]) self.assertEqual(expected, result) def test_get_job_state_error(self): response = { 'records': [fake.JOB_SUCCESSFUL_REST, fake.JOB_SUCCESSFUL_REST] } self.mock_object(self.client, 'send_request', mock.Mock(return_value=response)) self.mock_object(self.client, '_has_records', mock.Mock(return_value=True)) self.assertRaises(exception.NetAppException, self.client.get_job_state, fake.JOB_ID) def test_get_volume_efficiency_status_error(self): self.mock_object(self.client, 'send_request', mock.Mock(side_effect=self._mock_api_error())) self.client.get_volume_efficiency_status(fake.VOLUME_NAMES[0]) self.assertEqual(1, client_cmode_rest.LOG.error.call_count) def test_get_fpolicy_scopes_not_found(self): self.mock_object(self.client, '_get_volume_by_args', mock.Mock(side_effect=exception.NetAppException)) result = self.client.get_fpolicy_scopes(fake.SHARE_NAME) expected = [] self.assertEqual(expected, result) def test_delete_fpolicy_policy_error(self): self.mock_object(self.client, '_get_volume_by_args', mock.Mock(side_effect=exception.NetAppException)) self.mock_object(self.client, 'send_request') res = self.client.delete_fpolicy_policy(fake.SHARE_NAME, fake.FPOLICY_POLICY_NAME) self.assertEqual(1, client_cmode_rest.LOG.debug.call_count) self.assertIsNone(res) def test_delete_fpolicy_event_error(self): self.mock_object(self.client, '_get_volume_by_args', mock.Mock(side_effect=exception.NetAppException)) self.mock_object(self.client, 'send_request') res = self.client.delete_fpolicy_event(fake.SHARE_NAME, fake.FPOLICY_EVENT_NAME) self.assertEqual(1, client_cmode_rest.LOG.debug.call_count) self.assertIsNone(res) def test_delete_nfs_export_policy_no_records(self): response = fake.NO_RECORDS_RESPONSE_REST self.mock_object(self.client, 'send_request', mock.Mock(return_value=response)) res = self.client.delete_nfs_export_policy(fake.FPOLICY_POLICY_NAME) self.assertIsNone(res) def test_remove_cifs_share_not_found(self): response = fake.NO_RECORDS_RESPONSE_REST self.mock_object(self.client, 'send_request', mock.Mock(return_value=response)) self.assertRaises(exception.NetAppException, self.client.remove_cifs_share, fake.SHARE_NAME) @ddt.data(netapp_api.EREST_ENTRY_NOT_FOUND, None) def test_remove_cifs_share_error(self, code): responses = [fake.SVMS_LIST_SIMPLE_RESPONSE_REST, netapp_api.api.NaApiError(code=code)] self.mock_object(self.client, 'send_request', mock.Mock(side_effect=responses)) if not code: self.assertRaises(netapp_api.api.NaApiError, self.client.remove_cifs_share, fake.SHARE_NAME) else: result = self.client.remove_cifs_share(fake.SHARE_NAME) self.assertIsNone(result) def test_qos_policy_group_does_not_exists(self): self.mock_object(self.client, 'qos_policy_group_get', mock.Mock(side_effect=exception.NetAppException)) result = self.client.qos_policy_group_exists(fake.QOS_POLICY_GROUP) self.assertFalse(result) def test_qos_policy_group_rename_error(self): response = fake.NO_RECORDS_RESPONSE_REST self.mock_object(self.client, 'send_request', mock.Mock(return_value=response)) self.assertRaises(exception.NetAppException, self.client.qos_policy_group_rename, fake.QOS_POLICY_GROUP_NAME, 'fake_new_qos_policy_group_name') def test_qos_policy_group_rename_same_name(self): res = self.client.qos_policy_group_rename(fake.QOS_POLICY_GROUP_NAME, fake.QOS_POLICY_GROUP_NAME) self.assertIsNone(res) def test_qos_policy_group_modify_error(self): response = fake.NO_RECORDS_RESPONSE_REST self.mock_object(self.client, 'send_request', mock.Mock(return_value=response)) self.assertRaises(exception.NetAppException, self.client.qos_policy_group_modify, fake.QOS_POLICY_GROUP_NAME, fake.QOS_MAX_THROUGHPUT) def test_update_kerberos_realm_error(self): self.mock_object(self.client, '_get_unique_svm_by_name', mock.Mock(return_value=fake.FAKE_UUID)) self.mock_object(self.client, 'send_request', mock.Mock(side_effect=self._mock_api_error())) self.assertRaises(exception.NetAppException, self.client.update_kerberos_realm, fake.KERBEROS_SECURITY_SERVICE) @ddt.data(('fake_domain', 'fake_server'), (None, None)) @ddt.unpack def test_modify_ldap_error(self, domain, server): security_service = { 'domain': domain, 'server': server, 'user': 'fake_user', 'ou': 'fake_ou', 'dns_ip': 'fake_ip', 'password': 'fake_password' } self.mock_object(self.client, '_get_unique_svm_by_name', mock.Mock(return_value=fake.FAKE_UUID)) self.mock_object(self.client, 'send_request') self.assertRaises(exception.NetAppException, self.client.modify_ldap, security_service, fake.LDAP_AD_SECURITY_SERVICE) def test_update_dns_configuration_error(self): self.mock_object(self.client, '_get_unique_svm_by_name', mock.Mock(return_value=fake.FAKE_UUID)) dns_config = { 'domains': [fake.KERBEROS_SECURITY_SERVICE['domain']], 'dns-ips': [fake.KERBEROS_SECURITY_SERVICE['dns_ip']], } self.mock_object(self.client, 'get_dns_config', mock.Mock(return_value=dns_config)) self.mock_object(self.client, 'send_request', mock.Mock(side_effect=self._mock_api_error())) self.assertRaises(exception.NetAppException, self.client.update_dns_configuration, ['fake_ips'], ['fake_domain']) def test_remove_preferred_dcs_error(self): fake_response = [fake.PREFERRED_DC_REST, netapp_api.api.NaApiError] self.mock_object(self.client, 'send_request', mock.Mock(side_effect=fake_response)) self.assertRaises(exception.NetAppException, self.client.remove_preferred_dcs, fake.LDAP_AD_SECURITY_SERVICE, fake.FAKE_UUID) def test_set_preferred_dc_error(self): security = copy.deepcopy(fake.LDAP_AD_SECURITY_SERVICE) security['server'] = 'fake_server' self.mock_object(self.client, '_get_unique_svm_by_name', mock.Mock(return_value=fake.FAKE_UUID)) self.mock_object(self.client, 'send_request', mock.Mock(side_effect=self._mock_api_error())) self.assertRaises(exception.NetAppException, self.client.set_preferred_dc, security, fake.VSERVER_NAME) def test_set_preferred_dc_no_server(self): result = self.client.set_preferred_dc(fake.LDAP_AD_SECURITY_SERVICE, fake.VSERVER_NAME) self.assertIsNone(result) def test__get_svm_peer_uuid_error(self): response = fake.NO_RECORDS_RESPONSE_REST self.mock_object(self.client, 'send_request', mock.Mock(return_value=response)) self.assertRaises(exception.NetAppException, self.client._get_svm_peer_uuid, fake.VSERVER_NAME, fake.VSERVER_PEER_NAME) def test_create_vserver_dp_destination(self): mock_vserver = self.mock_object(self.client, '_create_vserver') self.client.create_vserver_dp_destination(fake.VSERVER_NAME, fake.FAKE_AGGR_LIST, fake.IPSPACE_NAME, fake.DELETE_RETENTION_HOURS) mock_vserver.assert_called_once_with(fake.VSERVER_NAME, fake.FAKE_AGGR_LIST, fake.IPSPACE_NAME, fake.DELETE_RETENTION_HOURS, subtype='dp_destination') @ddt.data(':', '.') def test_create_route_no_destination(self, gateway): mock_sr = self.mock_object(self.client, 'send_request') body = { 'gateway': gateway, 'destination.address': '::' if ":" in gateway else '0.0.0.0', 'destination.netmask': '0' } self.client.create_route(gateway) mock_sr.assert_called_once_with('/network/ip/routes', 'post', body=body) def test_list_root_aggregates(self): return_value = fake.FAKE_ROOT_AGGREGATES_RESPONSE self.mock_object(self.client, 'send_request', mock.Mock(return_value=return_value)) result = self.client.list_root_aggregates() expected = [fake.SHARE_AGGREGATE_NAME] self.assertEqual(expected, result) @ddt.data(("fake_server", "fake_domain"), (None, None)) @ddt.unpack def test__create_ldap_client_error(self, server, domain): security_service = { 'server': server, 'domain': domain, 'user': 'fake_user', 'ou': 'fake_ou', 'dns_ip': 'fake_ip', 'password': 'fake_password' } self.assertRaises(exception.NetAppException, self.client._create_ldap_client, security_service) @ddt.data(["password"], ["user"]) def test__modify_active_directory_security_service_error(self, keys): svm_uuid = fake.FAKE_UUID user_records = fake.FAKE_CIFS_LOCAL_USER.get('records')[0] self.mock_object(self.client, '_get_unique_svm_by_name', mock.Mock(return_value=svm_uuid)) self.mock_object(self.client, 'send_request', mock.Mock(side_effect=[user_records, netapp_api.api.NaApiError])) self.mock_object(self.client, 'remove_preferred_dcs') self.mock_object(self.client, 'set_preferred_dc') new_security_service = { 'user': 'new_user', 'password': 'new_password', 'server': 'fake_server' } current_security_service = { 'server': 'fake_current_server' } self.assertRaises( exception.NetAppException, self.client.modify_active_directory_security_service, fake.VSERVER_NAME, keys, new_security_service, current_security_service) def test_disable_kerberos_error(self): fake_api_response = fake.NFS_LIFS_REST api_error = self._mock_api_error() self.mock_object(self.client, 'get_network_interfaces', mock.Mock(return_value=fake_api_response)) self.mock_object( self.client, 'send_request', mock.Mock(side_effect=api_error)) self.assertRaises(exception.NetAppException, self.client.disable_kerberos, fake.LDAP_AD_SECURITY_SERVICE) def test_set_volume_snapdir_access_exception(self): fake_hide_snapdir = 'fake-snapdir' self.mock_object(self.client, '_get_volume_by_args', mock.Mock(side_effect=exception.NetAppException)) self.assertRaises(exception.SnapshotResourceNotFound, self.client.set_volume_snapdir_access, fake.VOLUME_NAMES[0], fake_hide_snapdir) def test__get_broadcast_domain_for_port_exception(self): fake_response_empty = { "records": [{}] } self.mock_object(self.client, 'send_request', mock.Mock( return_value=fake_response_empty)) self.assertRaises(exception.NetAppException, self.client._get_broadcast_domain_for_port, fake.NODE_NAME, fake.PORT) def test__configure_nfs_exception(self): fake_nfs = { 'udp-max-xfer-size': 10000, 'tcp-max-xfer-size': 10000, } self.assertRaises(exception.NetAppException, self.client._configure_nfs, fake_nfs, fake.FAKE_UUID) def test_get_snapshot_exception(self): self.mock_object(self.client, '_get_volume_by_args', mock.Mock(side_effect=exception.NetAppException)) self.assertRaises(exception.SnapshotResourceNotFound, self.client.get_snapshot, fake.VOLUME_NAMES[0], fake.SNAPSHOT_NAME) def test_delete_snapshot_exception(self): self.mock_object(self.client, '_get_volume_by_args', mock.Mock(side_effect=exception.NetAppException)) self.client.delete_snapshot(fake.VOLUME_NAMES[0], fake.SNAPSHOT_NAME, True) self.assertEqual(1, client_cmode_rest.LOG.warning.call_count) def test_set_nfs_export_policy_for_volume_exception(self): return_code = netapp_api.EREST_CANNOT_MODITY_OFFLINE_VOLUME self.mock_object(self.client, 'send_request', mock.Mock(side_effect=self._mock_api_error( code=return_code))) self.client.set_nfs_export_policy_for_volume( fake.VOLUME_NAMES[0], fake.EXPORT_POLICY_NAME) self.assertEqual(1, client_cmode_rest.LOG.debug.call_count) def test__break_snapmirror_exception(self): fake_snapmirror = copy.deepcopy(fake.REST_GET_SNAPMIRRORS_RESPONSE) fake_snapmirror[0]['transferring-state'] = 'error' self.mock_object( self.client, '_get_snapmirrors', mock.Mock(return_value=fake_snapmirror)) self.assertRaises(netapp_utils.NetAppDriverException, self.client._break_snapmirror) def test_get_svm_volumes_total_size(self): expected = 1 fake_query = { 'svm.name': fake.VSERVER_NAME, 'fields': 'size' } self.mock_object(self.client, 'send_request', mock.Mock(return_value=fake.FAKE_GET_VOLUME)) result = self.client.get_svm_volumes_total_size(fake.VSERVER_NAME) self.client.send_request.assert_called_once_with( '/storage/volumes/', 'get', query=fake_query) self.assertEqual(expected, result) @ddt.data(fake.CIFS_SECURITY_SERVICE, fake.CIFS_SECURITY_SERVICE_3) def test_configure_active_directory_credential_error(self, security_service): msg = "could not authenticate" fake_security = copy.deepcopy(security_service) self.mock_object(self.client, 'configure_dns') self.mock_object(self.client, 'set_preferred_dc') self.mock_object(self.client, 'configure_cifs_aes_encryption') self.mock_object(self.client, '_get_cifs_server_name') self.mock_object(self.client, 'send_request', self._mock_api_error(code=netapp_api.api.EAPIERROR, message=msg)) self.assertRaises(exception.SecurityServiceFailedAuth, self.client.configure_active_directory, fake_security, fake.VSERVER_NAME, False) @ddt.data(fake.CIFS_SECURITY_SERVICE, fake.CIFS_SECURITY_SERVICE_3) def test_configure_active_directory_user_privilege_error(self, security_service): msg = "insufficient access" fake_security = copy.deepcopy(security_service) self.mock_object(self.client, 'configure_dns') self.mock_object(self.client, 'set_preferred_dc') self.mock_object(self.client, 'configure_cifs_aes_encryption') self.mock_object(self.client, '_get_cifs_server_name') self.mock_object(self.client, 'send_request', self._mock_api_error(code=netapp_api.api.EAPIERROR, message=msg)) self.assertRaises(exception.SecurityServiceFailedAuth, self.client.configure_active_directory, fake_security, fake.VSERVER_NAME, False) def test_snapmirror_restore_vol(self): uuid = fake.VOLUME_ITEM_SIMPLE_RESPONSE_REST["uuid"] body = { "destination": {"path": fake.SM_DEST_PATH, "cluster": {"name": fake.CLUSTER_NAME}}, "source_snapshot": fake.SNAPSHOT_NAME } snapmirror_info = [{'destination-vserver': "fake_des_vserver", 'destination-volume': "fake_des_vol", 'relationship-status': "idle", 'uuid': uuid}] self.mock_object(self.client, 'get_snapmirrors', mock.Mock(return_value=snapmirror_info)) self.mock_object(self.client, 'send_request') self.client.snapmirror_restore_vol(source_path=fake.SM_SOURCE_PATH, dest_path=fake.SM_DEST_PATH, source_snapshot=fake.SNAPSHOT_NAME, des_cluster=fake.CLUSTER_NAME) self.client.send_request.assert_called_once_with( f'/snapmirror/relationships/{uuid}/restore', 'post', body=body) @ddt.data({'snapmirror_label': None, 'newer_than': '2345'}, {'snapmirror_label': "fake_backup", 'newer_than': None}) @ddt.unpack def test_list_volume_snapshots(self, snapmirror_label, newer_than): fake_response = fake.SNAPSHOTS_REST_RESPONSE api_response = fake.VOLUME_ITEM_SIMPLE_RESPONSE_REST self.mock_object(self.client, '_get_volume_by_args', mock.Mock(return_value=api_response)) mock_request = self.mock_object(self.client, 'send_request', mock.Mock(return_value=fake_response)) self.client.list_volume_snapshots(fake.SHARE_NAME, snapmirror_label=snapmirror_label, newer_than=newer_than) uuid = fake.VOLUME_ITEM_SIMPLE_RESPONSE_REST["uuid"] query = {} if snapmirror_label: query = { 'snapmirror_label': snapmirror_label, } if newer_than: query['create_time'] = '>' + newer_than mock_request.assert_called_once_with( f'/storage/volumes/{uuid}/snapshots/', 'get', query=query) @ddt.data(('vault', False, True), (None, False, False)) @ddt.unpack def test_create_snapmirror_policy_rest(self, policy_type, discard_network_info, preserve_snapshots): fake_response = fake.SNAPSHOTS_REST_RESPONSE self.mock_object(self.client, 'send_request', mock.Mock(return_value=fake_response)) policy_name = fake.SNAPMIRROR_POLICY_NAME self.client.create_snapmirror_policy( policy_name, policy_type=policy_type, discard_network_info=discard_network_info, preserve_snapshots=preserve_snapshots, snapmirror_label='backup', keep=30) if policy_type == "vault": body = {"name": policy_name, "type": "async", "create_snapshot_on_source": False} else: body = {"name": policy_name, "type": policy_type} if discard_network_info: body["exclude_network_config"] = {'svmdr-config-obj': 'network'} if preserve_snapshots: body["retention"] = [{"label": 'backup', "count": 30}] self.client.send_request.assert_called_once_with( '/snapmirror/policies/', 'post', body=body) def test_is_snaplock_compliance_clock_configured(self): self.mock_object(self.client, '_get_cluster_node_uuid', mock.Mock(return_value="uuid")) api_response = {'time': 'Thu Aug 08 00:51:30 EDT 2024 -04:00'} self.mock_object(self.client, 'send_request', mock.Mock(return_value=api_response)) result = self.client.is_snaplock_compliance_clock_configured( "test_node" ) self.assertIs(True, result) def test_is_snaplock_compliance_clock_configured_negative(self): self.mock_object(self.client, '_get_cluster_node_uuid', mock.Mock(return_value="uuid")) api_response = {'time': None} self.mock_object(self.client, 'send_request', mock.Mock(return_value=api_response)) result = self.client.is_snaplock_compliance_clock_configured( "test_node" ) self.assertIs(False, result) @ddt.data({'options': {'snaplock_autocommit_period': "4hours", 'snaplock_min_retention_period': "6days", 'snaplock_max_retention_period': "8months", 'snaplock_default_retention_period': "8days"}, }, {'options': {'snaplock_autocommit_period': "4hours", 'snaplock_min_retention_period': "6days", 'snaplock_max_retention_period': "8months", 'snaplock_default_retention_period': "min"}, }, {'options': {'snaplock_autocommit_period': "4hours", 'snaplock_min_retention_period': "6days", 'snaplock_max_retention_period': "8months", 'snaplock_default_retention_period': "max"}, }, ) @ddt.unpack def test_set_snaplock_attributes(self, options): self.mock_object(self.client, 'send_request') body = { 'snaplock.autocommit_period': utils.convert_time_duration_to_iso_format( options.get('snaplock_autocommit_period')), 'snaplock.retention.minimum': utils.convert_time_duration_to_iso_format( options.get('snaplock_min_retention_period')), 'snaplock.retention.maximum': utils.convert_time_duration_to_iso_format( options.get('snaplock_max_retention_period')), } if options.get('snaplock_default_retention_period') == "min": body['snaplock.retention.default'] = ( utils.convert_time_duration_to_iso_format( options.get('snaplock_min_retention_period')) ) elif options.get('snaplock_default_retention_period') == 'max': body['snaplock.retention.default'] = ( utils.convert_time_duration_to_iso_format( options.get('snaplock_max_retention_period')) ) else: body['snaplock.retention.default'] = ( utils.convert_time_duration_to_iso_format( options.get('snaplock_default_retention_period')) ) self.mock_object(self.client, '_get_volume_by_args', mock.Mock(return_value={'uuid': fake.FAKE_UUID})) self.client.set_snaplock_attributes(fake.SHARE_NAME, **options) vol_uid = fake.FAKE_UUID self.client.send_request.assert_called_once_with( f'/storage/volumes/{vol_uid}', 'patch', body=body) def test_set_snaplock_attributes_none(self): self.mock_object(self.client, 'send_request') self.mock_object(self.client, '_get_volume_by_args', mock.Mock(return_value={'uuid': fake.FAKE_UUID})) options = {'snaplock_autocommit_period': None, 'snaplock_min_retention_period': None, 'snaplock_max_retention_period': None, 'snaplock_default_retention_period': None, } self.client.set_snaplock_attributes(fake.SHARE_NAME, **options) self.client.send_request.assert_not_called() def test__get_cluster_node_uuid(self): response = {'records': [{'uuid': fake.FAKE_UUID}]} self.mock_object(self.client, 'send_request', mock.Mock(return_value=response)) result = self.client._get_cluster_node_uuid("fake_node") self.assertEqual(result, fake.FAKE_UUID) @ddt.data("compliance", "enterprise") def test__is_snaplock_enabled_volume_true(self, snaplock_type): vol_attr = {'snaplock-type': snaplock_type} self.mock_object(self.client, 'get_volume', mock.Mock(return_value=vol_attr)) result = self.client._is_snaplock_enabled_volume( fake.SHARE_AGGREGATE_NAMES ) self.assertIs(True, result) def test__is_snaplock_enabled_volume_false(self): vol_attr = {'snaplock-type': "non-snaplock"} self.mock_object(self.client, 'get_volume', mock.Mock(return_value=vol_attr)) result = self.client._is_snaplock_enabled_volume( fake.SHARE_AGGREGATE_NAMES ) self.assertIs(False, result) def test_get_storage_failover_partner(self): self.mock_object(self.client, '_get_cluster_node_uuid', mock.Mock(return_value=fake.FAKE_UUID)) response = {'ha': {'partners': [{'name': 'partner_node'}]}} self.mock_object(self.client, 'send_request', mock.Mock(return_value=response)) result = self.client.get_storage_failover_partner("fake_node") self.assertEqual(result, "partner_node") def test_get_migratable_data_lif_for_node(self): api_response = fake.GENERIC_NETWORK_INTERFACES_GET_REPONSE expected_result = [fake.LIF_NAME] self.mock_object(self.client, '_has_records', mock.Mock(return_value=True)) self.mock_object( self.client, 'send_request', mock.Mock(side_effect=self._send_request_side_effect) ) uuid = api_response['records'][0]['uuid'] result = self.client.get_migratable_data_lif_for_node("fake_node") self.client.send_request.assert_any_call( '/network/ip/interfaces', 'get', query={ 'services': 'data_nfs|data_cifs', 'location.home_node.name': 'fake_node', 'fields': 'name', } ) self.client.send_request.assert_any_call( f'/network/ip/interfaces/{uuid}', 'get' ) self.assertEqual(expected_result, result) def _send_request_side_effect(self, endpoint, method, query=None): if (endpoint == '/network/ip/interfaces' and method == 'get' and query is not None): return {"records": [{"uuid": "fake_uuid", "name": fake.LIF_NAME}]} elif (endpoint.startswith('/network/ip/interfaces/') and method == 'get'): return {'location': {'failover': 'sfo_partners_only'}} return {} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/netapp/dataontap/client/test_rest_api.py0000664000175000017500000003750600000000000030213 0ustar00zuulzuul00000000000000# Copyright 2022 NetApp, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests for NetApp REST API layer """ from unittest import mock import ddt from oslo_serialization import jsonutils import requests from requests import auth from manila.share.drivers.netapp.dataontap.client import api as legacy_api from manila.share.drivers.netapp.dataontap.client import rest_api as netapp_api from manila import test from manila.tests.share.drivers.netapp.dataontap.client import fakes as fake @ddt.ddt class NetAppRestApiServerTests(test.TestCase): """Test case for NetApp REST API server methods.""" def setUp(self): self.rest_client = netapp_api.RestNaServer('127.0.0.1') super(NetAppRestApiServerTests, self).setUp() @ddt.data(None, 'my_cert') def test__init__ssl_verify(self, ssl_cert_path): client = netapp_api.RestNaServer('127.0.0.1', ssl_cert_path=ssl_cert_path) if ssl_cert_path: self.assertEqual(ssl_cert_path, client._ssl_verify) else: self.assertTrue(client._ssl_verify) @ddt.data(None, 'ftp') def test_set_transport_type_value_error(self, transport_type): self.assertRaises(ValueError, self.rest_client.set_transport_type, transport_type) @ddt.data('!&', '80na', '') def test_set_port__value_error(self, port): self.assertRaises(ValueError, self.rest_client.set_port, port) @ddt.data( {'port': None, 'protocol': 'http', 'expected_port': '80'}, {'port': None, 'protocol': 'https', 'expected_port': '443'}, {'port': '111', 'protocol': None, 'expected_port': '111'} ) @ddt.unpack def test_set_port(self, port, protocol, expected_port): self.rest_client._protocol = protocol self.rest_client.set_port(port=port) self.assertEqual(expected_port, self.rest_client._port) @ddt.data('!&', '80na', '') def test_set_timeout_value_error(self, timeout): self.assertRaises(ValueError, self.rest_client.set_timeout, timeout) @ddt.data({'params': {'major': 1, 'minor': '20a'}}, {'params': {'major': '20a', 'minor': 1}}, {'params': {'major': '!*', 'minor': '20a'}}) @ddt.unpack def test_set_api_version_value_error(self, params): self.assertRaises(ValueError, self.rest_client.set_api_version, **params) def test_set_api_version_valid(self): args = {'major': '20', 'minor': 1} self.rest_client.set_api_version(**args) self.assertEqual(self.rest_client._api_major_version, 20) self.assertEqual(self.rest_client._api_minor_version, 1) self.assertEqual(self.rest_client._api_version, "20.1") def test_invoke_successfully_naapi_error(self): self.mock_object(self.rest_client, '_build_headers') self.mock_object(self.rest_client, '_get_base_url', mock.Mock(return_value='')) self.mock_object( self.rest_client, 'send_http_request', mock.Mock(return_value=(10, fake.ERROR_RESPONSE_REST))) self.assertRaises(legacy_api.NaApiError, self.rest_client.invoke_successfully, fake.FAKE_ACTION_URL, 'get') @ddt.data(None, {'fields': 'fake_fields'}) def test_invoke_successfully(self, query): mock_build_header = self.mock_object( self.rest_client, '_build_headers', mock.Mock(return_value=fake.FAKE_HTTP_HEADER)) mock_base = self.mock_object( self.rest_client, '_get_base_url', mock.Mock(return_value=fake.FAKE_BASE_URL)) mock_add_query = self.mock_object( self.rest_client, '_add_query_params_to_url', mock.Mock(return_value=fake.FAKE_ACTION_URL)) http_code = 200 mock_send_http = self.mock_object( self.rest_client, 'send_http_request', mock.Mock(return_value=(http_code, fake.NO_RECORDS_RESPONSE_REST))) code, response = self.rest_client.invoke_successfully( fake.FAKE_ACTION_URL, 'get', body=fake.FAKE_HTTP_BODY, query=query, enable_tunneling=True) self.assertEqual(response, fake.NO_RECORDS_RESPONSE_REST) self.assertEqual(code, http_code) mock_build_header.assert_called_once_with(True) mock_base.assert_called_once_with() self.assertEqual(bool(query), mock_add_query.called) mock_send_http.assert_called_once_with( 'get', fake.FAKE_BASE_URL + fake.FAKE_ACTION_URL, fake.FAKE_HTTP_BODY, fake.FAKE_HTTP_HEADER) @ddt.data( {'error': requests.HTTPError(), 'raised': legacy_api.NaApiError}, {'error': Exception, 'raised': legacy_api.NaApiError}) @ddt.unpack def test_send_http_request_http_error(self, error, raised): self.mock_object(netapp_api, 'LOG') self.mock_object(self.rest_client, '_build_session') self.rest_client._session = mock.Mock() self.mock_object( self.rest_client, '_get_request_method', mock.Mock( return_value=mock.Mock(side_effect=error))) self.assertRaises(raised, self.rest_client.send_http_request, 'get', fake.FAKE_ACTION_URL, fake.FAKE_HTTP_BODY, fake.FAKE_HTTP_HEADER) @ddt.data( { 'resp_content': fake.NO_RECORDS_RESPONSE_REST, 'body': fake.FAKE_HTTP_BODY, 'timeout': 10, }, { 'resp_content': fake.NO_RECORDS_RESPONSE_REST, 'body': fake.FAKE_HTTP_BODY, 'timeout': None, }, { 'resp_content': fake.NO_RECORDS_RESPONSE_REST, 'body': None, 'timeout': None, }, { 'resp_content': None, 'body': None, 'timeout': None, } ) @ddt.unpack def test_send_http_request(self, resp_content, body, timeout): if timeout: self.rest_client._timeout = timeout self.mock_object(netapp_api, 'LOG') mock_json_dumps = self.mock_object( jsonutils, 'dumps', mock.Mock(return_value='fake_dump_body')) mock_build_session = self.mock_object( self.rest_client, '_build_session') _mock_session = mock.Mock() self.rest_client._session = _mock_session response = mock.Mock() response.content = resp_content response.status_code = 10 mock_post = mock.Mock(return_value=response) mock_get_request_method = self.mock_object( self.rest_client, '_get_request_method', mock.Mock( return_value=mock_post)) mock_json_loads = self.mock_object( jsonutils, 'loads', mock.Mock(return_value='fake_loads_response')) code, res = self.rest_client.send_http_request( 'post', fake.FAKE_ACTION_URL, body, fake.FAKE_HTTP_HEADER) expected_res = 'fake_loads_response' if resp_content else {} self.assertEqual(expected_res, res) self.assertEqual(10, code) self.assertEqual(bool(body), mock_json_dumps.called) self.assertEqual(bool(resp_content), mock_json_loads.called) mock_build_session.assert_called_once_with(fake.FAKE_HTTP_HEADER) mock_get_request_method.assert_called_once_with('post', _mock_session) expected_data = 'fake_dump_body' if body else {} if timeout: mock_post.assert_called_once_with( fake.FAKE_ACTION_URL, data=expected_data, timeout=timeout) else: mock_post.assert_called_once_with(fake.FAKE_ACTION_URL, data=expected_data) @ddt.data( {'host': '192.168.1.0', 'port': '80', 'protocol': 'http'}, {'host': '0.0.0.0', 'port': '443', 'protocol': 'https'}, {'host': '::ffff:8', 'port': '80', 'protocol': 'http'}, {'host': 'fdf8:f53b:82e4::53', 'port': '443', 'protocol': 'https'}) @ddt.unpack def test__get_base_url(self, host, port, protocol): client = netapp_api.RestNaServer(host, port=port, transport_type=protocol) expected_host = f'[{host}]' if ':' in host else host expected_url = '%s://%s:%s/api' % (protocol, expected_host, port) url = client._get_base_url() self.assertEqual(expected_url, url) def test__add_query_params_to_url(self): formatted_url = self.rest_client._add_query_params_to_url( fake.FAKE_ACTION_URL, fake.FAKE_HTTP_QUERY) expected_formatted_url = fake.FAKE_ACTION_URL expected_formatted_url += fake.FAKE_FORMATTED_HTTP_QUERY self.assertEqual(expected_formatted_url, formatted_url) @ddt.data('post', 'get', 'put', 'delete', 'patch') def test_get_request_method(self, method): _mock_session = mock.Mock() _mock_session.post = mock.Mock() _mock_session.get = mock.Mock() _mock_session.put = mock.Mock() _mock_session.delete = mock.Mock() _mock_session.patch = mock.Mock() res = self.rest_client._get_request_method(method, _mock_session) expected_method = getattr(_mock_session, method) self.assertEqual(expected_method, res) def test__str__(self): fake_host = 'fake_host' client = netapp_api.RestNaServer(fake_host) expected_str = "server: %s" % fake_host self.assertEqual(expected_str, str(client)) def test_get_transport_type(self): expected_protocol = 'fake_protocol' self.rest_client._protocol = expected_protocol res = self.rest_client.get_transport_type() self.assertEqual(expected_protocol, res) @ddt.data(None, ('1', '0')) def test_get_api_version(self, api_version): if api_version: self.rest_client._api_version = str(api_version) (self.rest_client._api_major_version, _) = api_version (_, self.rest_client._api_minor_version) = api_version res = self.rest_client.get_api_version() self.assertEqual(api_version, res) @ddt.data(None, '9.10') def test_get_ontap_version(self, ontap_version): if ontap_version: self.rest_client._ontap_version = ontap_version res = self.rest_client.get_ontap_version() self.assertEqual(ontap_version, res) def test_set_vserver(self): expected_vserver = 'fake_vserver' self.rest_client.set_vserver(expected_vserver) self.assertEqual(expected_vserver, self.rest_client._vserver) def test_get_vserver(self): expected_vserver = 'fake_vserver' self.rest_client._vserver = expected_vserver res = self.rest_client.get_vserver() self.assertEqual(expected_vserver, res) def test__build_session_with_basic_auth(self): """Tests whether build session works with """ """default(basic auth) parameters""" fake_session = mock.Mock() mock_requests_session = self.mock_object( requests, 'Session', mock.Mock(return_value=fake_session)) mock_auth = self.mock_object( self.rest_client, '_create_basic_auth_handler', mock.Mock(return_value='fake_auth')) self.rest_client._ssl_verify = 'fake_ssl' self.rest_client._build_session(fake.FAKE_HTTP_HEADER) self.assertEqual(fake_session, self.rest_client._session) self.assertEqual('fake_auth', self.rest_client._session.auth) self.assertEqual('fake_ssl', self.rest_client._session.verify) self.assertEqual(fake.FAKE_HTTP_HEADER, self.rest_client._session.headers) mock_requests_session.assert_called_once_with() mock_auth.assert_called_once_with() def test__build_session__certificate_auth(self): """Tests whether build session works with """ """valid certificate parameters""" self.rest_client._private_key_file = 'fake_key.pem' self.rest_client._certificate_file = 'fake_cert.pem' self.rest_client._certificate_host_validation = False fake_session = mock.Mock() mock_requests_session = self.mock_object( requests, 'Session', mock.Mock(return_value=fake_session)) mock_cert = self.mock_object( self.rest_client, '_create_certificate_auth_handler', mock.Mock(return_value=('fake_cert', 'fake_verify'))) self.rest_client._build_session(fake.FAKE_HTTP_HEADER) self.assertEqual(fake_session, self.rest_client._session) self.assertEqual(('fake_cert', 'fake_verify'), (self.rest_client._session.cert, self.rest_client._session.verify)) self.assertEqual(fake.FAKE_HTTP_HEADER, self.rest_client._session.headers) mock_requests_session.assert_called_once_with() mock_cert.assert_called_once_with() @ddt.data(True, False) def test__build_headers(self, enable_tunneling): self.rest_client._vserver = fake.VSERVER_NAME res = self.rest_client._build_headers(enable_tunneling) expected = { "Accept": "application/json", "Content-Type": "application/json" } if enable_tunneling: expected["X-Dot-SVM-Name"] = fake.VSERVER_NAME self.assertEqual(expected, res) def test__create_basic_auth_handler(self): username = 'fake_username' password = 'fake_password' client = netapp_api.RestNaServer('10.1.1.1', username=username, password=password) res = client._create_basic_auth_handler() expected = auth.HTTPBasicAuth(username, password) self.assertEqual(expected.__dict__, res.__dict__) def test__create_certificate_auth_handler_default(self): """Test whether create certificate auth handler """ """works with default params""" self.rest_client._private_key_file = 'fake_key.pem' self.rest_client._certificate_file = 'fake_cert.pem' self.rest_client._certificate_host_validation = False cert = self.rest_client._certificate_file, \ self.rest_client._private_key_file self.rest_client._session = mock.Mock() if not self.rest_client._certificate_host_validation: self.assertFalse(self.rest_client._certificate_host_validation) res = self.rest_client._create_certificate_auth_handler() self.assertEqual(res, (cert, self.rest_client._certificate_host_validation)) def test__create_certificate_auth_handler_with_host_validation(self): """Test whether create certificate auth handler """ """works with host validation enabled""" self.rest_client._private_key_file = 'fake_key.pem' self.rest_client._certificate_file = 'fake_cert.pem' self.rest_client._ca_certificate_file = 'fake_ca_cert.crt' self.rest_client._certificate_host_validation = True cert = self.rest_client._certificate_file, \ self.rest_client._private_key_file self.rest_client._session = mock.Mock() if self.rest_client._certificate_host_validation: self.assertTrue(self.rest_client._certificate_host_validation) res = self.rest_client._create_certificate_auth_handler() self.assertEqual(res, (cert, self.rest_client._ca_certificate_file)) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315602.0336702 manila-21.0.0/manila/tests/share/drivers/netapp/dataontap/cluster_mode/0000775000175000017500000000000000000000000026170 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/netapp/dataontap/cluster_mode/__init__.py0000664000175000017500000000000000000000000030267 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/netapp/dataontap/cluster_mode/test_data_motion.py0000664000175000017500000017461100000000000032111 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Alex Meade. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import time from unittest import mock import ddt from oslo_config import cfg from manila import exception from manila.share import configuration from manila.share import driver from manila.share.drivers.netapp.dataontap.client import api as netapp_api from manila.share.drivers.netapp.dataontap.client import client_cmode from manila.share.drivers.netapp.dataontap.cluster_mode import data_motion from manila.share.drivers.netapp import options as na_opts from manila.share.drivers.netapp import utils as na_utils from manila.share import utils as share_utils from manila import test from manila.tests.share.drivers.netapp.dataontap import fakes as fake from manila.tests.share.drivers.netapp import fakes as na_fakes CONF = cfg.CONF @ddt.ddt class NetAppCDOTDataMotionTestCase(test.TestCase): def setUp(self): super(NetAppCDOTDataMotionTestCase, self).setUp() self.backend = 'backend1' self.force_rest_client = False self.mock_cmode_client = self.mock_object(client_cmode, "NetAppCmodeClient", mock.Mock()) self.config = configuration.Configuration(driver.share_opts, config_group=self.backend) self.config.append_config_values(na_opts.netapp_cluster_opts) self.config.append_config_values(na_opts.netapp_connection_opts) self.config.append_config_values(na_opts.netapp_basicauth_opts) self.config.append_config_values(na_opts.netapp_certificateauth_opts) self.config.append_config_values(na_opts.netapp_transport_opts) self.config.append_config_values(na_opts.netapp_support_opts) self.config.append_config_values(na_opts.netapp_provisioning_opts) self.config.append_config_values(na_opts.netapp_data_motion_opts) CONF.set_override("share_backend_name", self.backend, group=self.backend) CONF.set_override("netapp_transport_type", "https", group=self.backend) CONF.set_override("netapp_login", "fake_user", group=self.backend) CONF.set_override("netapp_password", "fake_password", group=self.backend) CONF.set_override("netapp_server_hostname", "fake.hostname", group=self.backend) CONF.set_override("netapp_server_port", 8866, group=self.backend) CONF.set_override("netapp_ssl_cert_path", "/etc/ssl/certs", group=self.backend) CONF.set_override("netapp_private_key_file", "/fake_private_key.pem", group=self.backend) CONF.set_override("netapp_certificate_file", "/fake_cert.pem", group=self.backend) CONF.set_override("netapp_ca_certificate_file", "/fake_ca_cert.crt", group=self.backend) CONF.set_override("netapp_certificate_host_validation", False, group=self.backend) def test_get_client_for_backend(self): self.mock_object(data_motion, "get_backend_configuration", mock.Mock(return_value=self.config)) data_motion.get_client_for_backend(self.backend, self.force_rest_client) self.mock_cmode_client.assert_called_once_with( hostname='fake.hostname', password='fake_password', username='fake_user', transport_type='https', port=8866, ssl_cert_path='/etc/ssl/certs', trace=mock.ANY, vserver=None, private_key_file='/fake_private_key.pem', certificate_file='/fake_cert.pem', ca_certificate_file='/fake_ca_cert.crt', certificate_host_validation=False) def test_get_client_for_backend_with_vserver(self): self.mock_object(data_motion, "get_backend_configuration", mock.Mock(return_value=self.config)) CONF.set_override("netapp_vserver", 'fake_vserver', group=self.backend) data_motion.get_client_for_backend(self.backend) self.mock_cmode_client.assert_called_once_with( hostname='fake.hostname', password='fake_password', username='fake_user', transport_type='https', port=8866, ssl_cert_path='/etc/ssl/certs', trace=mock.ANY, vserver='fake_vserver', private_key_file='/fake_private_key.pem', certificate_file='/fake_cert.pem', ca_certificate_file='/fake_ca_cert.crt', certificate_host_validation=False) def test_get_client_for_host(self): mock_extract_host = self.mock_object( share_utils, 'extract_host', mock.Mock(return_value=fake.BACKEND_NAME)) mock_get_client = self.mock_object( data_motion, 'get_client_for_backend', mock.Mock(return_value=self.mock_cmode_client)) returned_client = data_motion.get_client_for_host( fake.HOST_NAME) mock_extract_host.assert_called_once_with( fake.HOST_NAME, level='backend_name') mock_get_client.assert_called_once_with(fake.BACKEND_NAME) self.assertEqual(returned_client, self.mock_cmode_client) def test_get_config_for_backend(self): self.mock_object(data_motion, "CONF") CONF.set_override("netapp_vserver", 'fake_vserver', group=self.backend) CONF.set_override("driver_handles_share_servers", False, group=self.backend) config = data_motion.get_backend_configuration(self.backend) self.assertEqual('fake_vserver', config.netapp_vserver) def test_get_config_for_backend_different_backend_name(self): self.mock_object(data_motion, "CONF") CONF.set_override("netapp_vserver", 'fake_vserver', group=self.backend) CONF.set_override("share_backend_name", "fake_backend_name", group=self.backend) CONF.set_override("driver_handles_share_servers", False, group=self.backend) config = data_motion.get_backend_configuration(self.backend) self.assertEqual('fake_vserver', config.netapp_vserver) self.assertEqual('fake_backend_name', config.share_backend_name) @ddt.data([], ['fake_backend1', 'fake_backend2']) def test_get_config_for_backend_not_configured(self, conf_sections): self.mock_object(data_motion, "CONF") self.assertRaises(exception.BadConfigurationException, data_motion.get_backend_configuration, self.backend) @ddt.ddt class NetAppCDOTDataMotionSessionTestCase(test.TestCase): def setUp(self): super(NetAppCDOTDataMotionSessionTestCase, self).setUp() self.source_backend = 'backend1' self.dest_backend = 'backend2' config = configuration.Configuration(driver.share_opts, config_group=self.source_backend) config.append_config_values(na_opts.netapp_cluster_opts) config.append_config_values(na_opts.netapp_connection_opts) config.append_config_values(na_opts.netapp_basicauth_opts) config.append_config_values(na_opts.netapp_certificateauth_opts) config.append_config_values(na_opts.netapp_transport_opts) config.append_config_values(na_opts.netapp_support_opts) config.append_config_values(na_opts.netapp_provisioning_opts) config.append_config_values(na_opts.netapp_data_motion_opts) self.mock_object(data_motion, "get_backend_configuration", mock.Mock(return_value=config)) self.mock_cmode_client = self.mock_object(client_cmode, "NetAppCmodeClient", mock.Mock()) self.dm_session = data_motion.DataMotionSession() self.fake_src_share = copy.deepcopy(fake.SHARE) self.fake_src_share_server = copy.deepcopy(fake.SHARE_SERVER) self.source_vserver = 'source_vserver' self.source_backend_name = ( self.fake_src_share_server['host'].split('@')[1]) self.fake_src_share_server['backend_details']['vserver_name'] = ( self.source_vserver ) self.fake_src_share['share_server'] = self.fake_src_share_server self.fake_src_share['id'] = 'c02d497a-236c-4852-812a-0d39373e312a' self.fake_src_vol_name = 'share_c02d497a_236c_4852_812a_0d39373e312a' self.fake_dest_share = copy.deepcopy(fake.SHARE) self.fake_dest_share_server = copy.deepcopy(fake.SHARE_SERVER_2) self.dest_vserver = 'dest_vserver' self.dest_backend_name = ( self.fake_dest_share_server['host'].split('@')[1]) self.fake_dest_share_server['backend_details']['vserver_name'] = ( self.dest_vserver ) self.fake_dest_share['share_server'] = self.fake_dest_share_server self.fake_dest_share['id'] = '34fbaf57-745d-460f-8270-3378c2945e30' self.fake_dest_vol_name = 'share_34fbaf57_745d_460f_8270_3378c2945e30' self.mock_src_client = mock.Mock() self.mock_dest_client = mock.Mock() self.mock_object(data_motion, 'get_client_for_backend', mock.Mock(side_effect=[self.mock_dest_client, self.mock_src_client])) self.mock_object(self.dm_session, 'get_client_and_vserver_name', mock.Mock(side_effect=[ (self.mock_src_client, self.source_vserver), (self.mock_dest_client, self.dest_vserver)])) def test_get_client_and_vserver_name(self): dm_session = data_motion.DataMotionSession() client = mock.Mock() self.mock_object(data_motion, 'get_client_for_backend', mock.Mock(return_value=client)) result = dm_session.get_client_and_vserver_name(fake.SHARE_SERVER) expected = (client, fake.SHARE_SERVER['backend_details']['vserver_name']) self.assertEqual(expected, result) data_motion.get_client_for_backend.assert_called_once_with( fake.BACKEND_NAME, vserver_name=fake.VSERVER1 ) @ddt.data(True, False) def test_create_snapmirror_mount(self, mount): mock_dest_client = mock.Mock() self.mock_object(data_motion, 'get_client_for_backend', mock.Mock(return_value=mock_dest_client)) self.mock_object(self.dm_session, 'wait_for_mount_replica') mock_backend_config = na_fakes.create_configuration() mock_backend_config.netapp_mount_replica_timeout = 30 self.mock_object(data_motion, 'get_backend_configuration', mock.Mock(return_value=mock_backend_config)) self.dm_session.create_snapmirror(self.fake_src_share, self.fake_dest_share, 'data_protection', mount=mount) mock_dest_client.create_snapmirror_vol.assert_called_once_with( mock.ANY, self.fake_src_vol_name, mock.ANY, self.fake_dest_vol_name, 'data_protection', schedule='hourly' ) mock_dest_client.initialize_snapmirror_vol.assert_called_once_with( mock.ANY, self.fake_src_vol_name, mock.ANY, self.fake_dest_vol_name ) if mount: self.dm_session.wait_for_mount_replica.assert_called_once_with( mock_dest_client, self.fake_dest_vol_name, timeout=30) else: self.dm_session.wait_for_mount_replica.assert_not_called() def test_create_snapmirror_svm(self): mock_dest_client = mock.Mock() self.mock_object(self.dm_session, 'get_client_and_vserver_name', mock.Mock(return_value=(mock_dest_client, self.dest_vserver))) self.mock_object(self.dm_session, 'get_vserver_from_share_server', mock.Mock(return_value=self.source_vserver)) policy_name = 'policy_' + self.dest_vserver get_snapmirro_policy_name = self.mock_object( self.dm_session, '_get_backend_snapmirror_policy_name_svm', mock.Mock(return_value=policy_name)) self.dm_session.create_snapmirror_svm(self.fake_src_share_server, self.fake_dest_share_server) self.dm_session.get_client_and_vserver_name.assert_called_once_with( self.fake_dest_share_server ) self.dm_session.get_vserver_from_share_server.assert_called_once_with( self.fake_src_share_server ) get_snapmirro_policy_name.assert_called_once_with( self.fake_dest_share_server['id'], self.dest_backend_name ) mock_dest_client.create_snapmirror_policy.assert_called_once_with( policy_name ) mock_dest_client.create_snapmirror_svm.assert_called_once_with( self.source_vserver, self.dest_vserver, policy=policy_name, schedule='hourly' ) mock_dest_client.initialize_snapmirror_svm.assert_called_once_with( self.source_vserver, self.dest_vserver ) def test_delete_snapmirror(self): mock_src_client = mock.Mock() mock_dest_client = mock.Mock() self.mock_object(data_motion, 'get_client_for_backend', mock.Mock(side_effect=[mock_dest_client, mock_src_client])) mock_backend_config = na_fakes.create_configuration() mock_backend_config.netapp_snapmirror_release_timeout = 30 self.mock_object(data_motion, 'get_backend_configuration', mock.Mock(return_value=mock_backend_config)) mock_wait_for_snapmirror_release_vol = self.mock_object( self.dm_session, 'wait_for_snapmirror_release_vol') self.dm_session.delete_snapmirror(self.fake_src_share, self.fake_dest_share) mock_dest_client.abort_snapmirror_vol.assert_called_once_with( mock.ANY, self.fake_src_vol_name, mock.ANY, self.fake_dest_vol_name, clear_checkpoint=False ) mock_dest_client.delete_snapmirror_vol.assert_called_once_with( mock.ANY, self.fake_src_vol_name, mock.ANY, self.fake_dest_vol_name ) mock_wait_for_snapmirror_release_vol.assert_called_once_with( self.source_vserver, self.dest_vserver, self.fake_src_vol_name, self.fake_dest_vol_name, False, mock_src_client, timeout=30 ) @ddt.data(True, False) def test_delete_snapmirror_svm(self, call_release): self.mock_object(self.dm_session, 'wait_for_snapmirror_release_svm') mock_backend_config = na_fakes.create_configuration() mock_backend_config.netapp_snapmirror_release_timeout = 30 self.mock_object(data_motion, 'get_backend_configuration', mock.Mock(return_value=mock_backend_config)) self.dm_session.delete_snapmirror_svm(self.fake_src_share_server, self.fake_dest_share_server, release=call_release) self.mock_dest_client.abort_snapmirror_svm.assert_called_once_with( self.source_vserver, self.dest_vserver ) self.mock_dest_client.delete_snapmirror_svm.assert_called_once_with( self.source_vserver, self.dest_vserver ) if call_release: release_mock = self.dm_session.wait_for_snapmirror_release_svm release_mock.assert_called_once_with( self.source_vserver, self.dest_vserver, self.mock_src_client, timeout=mock_backend_config.netapp_snapmirror_release_timeout ) def test_delete_snapmirror_does_not_exist(self): """Ensure delete succeeds when the snapmirror does not exist.""" mock_src_client = mock.Mock() mock_dest_client = mock.Mock() mock_dest_client.abort_snapmirror_vol.side_effect = ( netapp_api.NaApiError(code=netapp_api.EAPIERROR)) self.mock_object(data_motion, 'get_client_for_backend', mock.Mock(side_effect=[mock_dest_client, mock_src_client])) mock_backend_config = na_fakes.create_configuration() mock_backend_config.netapp_snapmirror_release_timeout = 30 self.mock_object(data_motion, 'get_backend_configuration', mock.Mock(return_value=mock_backend_config)) mock_wait_for_snapmirror_release_vol = self.mock_object( self.dm_session, 'wait_for_snapmirror_release_vol') self.dm_session.delete_snapmirror(self.fake_src_share, self.fake_dest_share) mock_dest_client.abort_snapmirror_vol.assert_called_once_with( mock.ANY, self.fake_src_vol_name, mock.ANY, self.fake_dest_vol_name, clear_checkpoint=False ) mock_dest_client.delete_snapmirror_vol.assert_called_once_with( mock.ANY, self.fake_src_vol_name, mock.ANY, self.fake_dest_vol_name ) mock_wait_for_snapmirror_release_vol.assert_called_once_with( self.source_vserver, self.dest_vserver, self.fake_src_vol_name, self.fake_dest_vol_name, False, mock_src_client, timeout=30 ) def test_delete_snapmirror_svm_does_not_exist(self): """Ensure delete succeeds when the snapmirror does not exist.""" self.mock_dest_client.abort_snapmirror_svm.side_effect = ( netapp_api.NaApiError(code=netapp_api.EAPIERROR)) self.mock_object(self.dm_session, 'wait_for_snapmirror_release_svm') mock_backend_config = na_fakes.create_configuration() mock_backend_config.netapp_snapmirror_release_timeout = 30 self.mock_object(data_motion, 'get_backend_configuration', mock.Mock(return_value=mock_backend_config)) self.dm_session.delete_snapmirror_svm(self.fake_src_share_server, self.fake_dest_share_server) self.mock_dest_client.abort_snapmirror_svm.assert_called_once_with( self.source_vserver, self.dest_vserver ) self.mock_dest_client.delete_snapmirror_svm.assert_called_once_with( self.source_vserver, self.dest_vserver ) release_mock = self.dm_session.wait_for_snapmirror_release_svm release_mock.assert_called_once_with( self.source_vserver, self.dest_vserver, self.mock_src_client, timeout=mock_backend_config.netapp_snapmirror_release_timeout ) def test_delete_snapmirror_error_deleting(self): """Ensure delete succeeds when the snapmirror does not exist.""" mock_src_client = mock.Mock() mock_dest_client = mock.Mock() mock_dest_client.delete_snapmirror_vol.side_effect = ( netapp_api.NaApiError(code=netapp_api.ESOURCE_IS_DIFFERENT)) self.mock_object(data_motion, 'get_client_for_backend', mock.Mock(side_effect=[mock_dest_client, mock_src_client])) mock_backend_config = na_fakes.create_configuration() mock_backend_config.netapp_snapmirror_release_timeout = 30 self.mock_object(data_motion, 'get_backend_configuration', mock.Mock(return_value=mock_backend_config)) mock_wait_for_snapmirror_release_vol = self.mock_object( self.dm_session, 'wait_for_snapmirror_release_vol') self.dm_session.delete_snapmirror(self.fake_src_share, self.fake_dest_share) mock_dest_client.abort_snapmirror_vol.assert_called_once_with( mock.ANY, self.fake_src_vol_name, mock.ANY, self.fake_dest_vol_name, clear_checkpoint=False ) mock_dest_client.delete_snapmirror_vol.assert_called_once_with( mock.ANY, self.fake_src_vol_name, mock.ANY, self.fake_dest_vol_name ) mock_wait_for_snapmirror_release_vol.assert_called_once_with( self.source_vserver, self.dest_vserver, self.fake_src_vol_name, self.fake_dest_vol_name, False, mock_src_client, timeout=30 ) def test_delete_snapmirror_svm_error_deleting(self): """Ensure delete succeeds when the snapmirror does not exist.""" self.mock_dest_client.delete_snapmirror_svm.side_effect = ( netapp_api.NaApiError(code=netapp_api.ESOURCE_IS_DIFFERENT)) self.mock_object(self.dm_session, 'wait_for_snapmirror_release_svm') mock_backend_config = na_fakes.create_configuration() mock_backend_config.netapp_snapmirror_release_timeout = 30 self.mock_object(data_motion, 'get_backend_configuration', mock.Mock(return_value=mock_backend_config)) self.dm_session.delete_snapmirror_svm(self.fake_src_share_server, self.fake_dest_share_server) self.mock_dest_client.abort_snapmirror_svm.assert_called_once_with( self.source_vserver, self.dest_vserver ) self.mock_dest_client.delete_snapmirror_svm.assert_called_once_with( self.source_vserver, self.dest_vserver ) release_mock = self.dm_session.wait_for_snapmirror_release_svm release_mock.assert_called_once_with( self.source_vserver, self.dest_vserver, self.mock_src_client, timeout=mock_backend_config.netapp_snapmirror_release_timeout ) def test_delete_snapmirror_without_release(self): mock_src_client = mock.Mock() mock_dest_client = mock.Mock() self.mock_object(data_motion, 'get_client_for_backend', mock.Mock(side_effect=[mock_dest_client, mock_src_client])) mock_wait_for_snapmirror_release_vol = self.mock_object( self.dm_session, 'wait_for_snapmirror_release_vol') self.dm_session.delete_snapmirror(self.fake_src_share, self.fake_dest_share, release=False) mock_dest_client.abort_snapmirror_vol.assert_called_once_with( mock.ANY, self.fake_src_vol_name, mock.ANY, self.fake_dest_vol_name, clear_checkpoint=False ) mock_dest_client.delete_snapmirror_vol.assert_called_once_with( mock.ANY, self.fake_src_vol_name, mock.ANY, self.fake_dest_vol_name ) self.assertFalse(mock_wait_for_snapmirror_release_vol.called) def test_delete_snapmirror_source_unreachable(self): mock_dest_client = mock.Mock() self.mock_object(data_motion, 'get_client_for_backend', mock.Mock(side_effect=[mock_dest_client, Exception])) mock_wait_for_snapmirror_release_vol = self.mock_object( self.dm_session, 'wait_for_snapmirror_release_vol') self.dm_session.delete_snapmirror(self.fake_src_share, self.fake_dest_share) mock_dest_client.abort_snapmirror_vol.assert_called_once_with( mock.ANY, self.fake_src_vol_name, mock.ANY, self.fake_dest_vol_name, clear_checkpoint=False ) mock_dest_client.delete_snapmirror_vol.assert_called_once_with( mock.ANY, self.fake_src_vol_name, mock.ANY, self.fake_dest_vol_name ) self.assertFalse(mock_wait_for_snapmirror_release_vol.called) def test_break_snapmirror(self): self.mock_object(self.dm_session, 'quiesce_then_abort') self.dm_session.break_snapmirror(self.fake_src_share, self.fake_dest_share) self.mock_dest_client.break_snapmirror_vol.assert_called_once_with( self.source_vserver, self.fake_src_vol_name, self.dest_vserver, self.fake_dest_vol_name) self.dm_session.quiesce_then_abort.assert_called_once_with( self.fake_src_share, self.fake_dest_share, quiesce_wait_time=None) self.mock_dest_client.mount_volume.assert_called_once_with( self.fake_dest_vol_name) def test_break_snapmirror_no_mount(self): self.mock_object(self.dm_session, 'quiesce_then_abort') self.dm_session.break_snapmirror(self.fake_src_share, self.fake_dest_share, mount=False) self.mock_dest_client.break_snapmirror_vol.assert_called_once_with( self.source_vserver, self.fake_src_vol_name, self.dest_vserver, self.fake_dest_vol_name) self.dm_session.quiesce_then_abort.assert_called_once_with( self.fake_src_share, self.fake_dest_share, quiesce_wait_time=None) self.assertFalse(self.mock_dest_client.mount_volume.called) def test_break_snapmirror_wait_for_quiesced(self): self.mock_object(self.dm_session, 'quiesce_then_abort') self.dm_session.break_snapmirror(self.fake_src_share, self.fake_dest_share) self.dm_session.quiesce_then_abort.assert_called_once_with( self.fake_src_share, self.fake_dest_share, quiesce_wait_time=None) self.mock_dest_client.break_snapmirror_vol.assert_called_once_with( self.source_vserver, self.fake_src_vol_name, self.dest_vserver, self.fake_dest_vol_name) self.mock_dest_client.mount_volume.assert_called_once_with( self.fake_dest_vol_name) @ddt.data(None, 2, 30) def test_quiesce_then_abort_wait_time(self, wait_time): self.mock_object(time, 'sleep') mock_get_snapmirrors = mock.Mock( return_value=[{'relationship-status': "transferring"}]) self.mock_object(self.mock_dest_client, 'get_snapmirrors', mock_get_snapmirrors) mock_backend_config = na_fakes.create_configuration() mock_backend_config.netapp_snapmirror_quiesce_timeout = 10 self.mock_object(data_motion, 'get_backend_configuration', mock.Mock(return_value=mock_backend_config)) self.dm_session.quiesce_then_abort(self.fake_src_share, self.fake_dest_share, quiesce_wait_time=wait_time) self.mock_dest_client.get_snapmirrors.assert_called_with( source_vserver=self.source_vserver, dest_vserver=self.dest_vserver, source_volume=self.fake_src_vol_name, dest_volume=self.fake_dest_vol_name, desired_attributes=['relationship-status', 'mirror-state'] ) call_count = self.mock_dest_client.get_snapmirrors.call_count if wait_time: if wait_time > 5: self.assertEqual(wait_time / 5, call_count) else: self.assertEqual(1, call_count) else: self.assertEqual(2, call_count) def test_quiesce_then_abort_timeout(self): self.mock_object(time, 'sleep') mock_get_snapmirrors = mock.Mock( return_value=[{'relationship-status': "transferring"}]) self.mock_object(self.mock_dest_client, 'get_snapmirrors', mock_get_snapmirrors) mock_backend_config = na_fakes.create_configuration() mock_backend_config.netapp_snapmirror_quiesce_timeout = 10 self.mock_object(data_motion, 'get_backend_configuration', mock.Mock(return_value=mock_backend_config)) self.dm_session.quiesce_then_abort(self.fake_src_share, self.fake_dest_share) self.mock_dest_client.get_snapmirrors.assert_called_with( source_vserver=self.source_vserver, dest_vserver=self.dest_vserver, source_volume=self.fake_src_vol_name, dest_volume=self.fake_dest_vol_name, desired_attributes=['relationship-status', 'mirror-state'] ) self.assertEqual(2, self.mock_dest_client.get_snapmirrors.call_count) self.mock_dest_client.quiesce_snapmirror_vol.assert_called_with( self.source_vserver, self.fake_src_vol_name, self.dest_vserver, self.fake_dest_vol_name) self.mock_dest_client.abort_snapmirror_vol.assert_called_once_with( self.source_vserver, self.fake_src_vol_name, self.dest_vserver, self.fake_dest_vol_name, clear_checkpoint=False ) def test_quiesce_then_abort_svm_timeout(self): self.mock_object(time, 'sleep') mock_get_snapmirrors = mock.Mock( return_value=[{'relationship-status': "transferring"}]) self.mock_object(self.mock_dest_client, 'get_snapmirrors_svm', mock_get_snapmirrors) mock_backend_config = na_fakes.create_configuration() mock_backend_config.netapp_snapmirror_quiesce_timeout = 10 self.mock_object(data_motion, 'get_backend_configuration', mock.Mock(return_value=mock_backend_config)) self.dm_session.quiesce_then_abort_svm(self.fake_src_share_server, self.fake_dest_share_server) self.mock_dest_client.get_snapmirrors_svm.assert_called_with( source_vserver=self.source_vserver, dest_vserver=self.dest_vserver, desired_attributes=['relationship-status', 'mirror-state'] ) self.assertEqual(2, self.mock_dest_client.get_snapmirrors_svm.call_count) self.mock_dest_client.quiesce_snapmirror_svm.assert_called_with( self.source_vserver, self.dest_vserver) self.mock_dest_client.abort_snapmirror_svm.assert_called_once_with( self.source_vserver, self.dest_vserver, clear_checkpoint=False ) def test_quiesce_then_abort_wait_for_quiesced(self): self.mock_object(time, 'sleep') self.mock_object(self.mock_dest_client, 'get_snapmirrors', mock.Mock(side_effect=[ [{'relationship-status': "transferring"}], [{'relationship-status': "quiesced"}]])) self.dm_session.quiesce_then_abort(self.fake_src_share, self.fake_dest_share) self.mock_dest_client.get_snapmirrors.assert_called_with( source_vserver=self.source_vserver, dest_vserver=self.dest_vserver, source_volume=self.fake_src_vol_name, dest_volume=self.fake_dest_vol_name, desired_attributes=['relationship-status', 'mirror-state'] ) self.assertEqual(2, self.mock_dest_client.get_snapmirrors.call_count) self.mock_dest_client.quiesce_snapmirror_vol.assert_called_once_with( self.source_vserver, self.fake_src_vol_name, self.dest_vserver, self.fake_dest_vol_name) def test_quiesce_then_abort_svm_wait_for_quiesced(self): self.mock_object(time, 'sleep') self.mock_object(self.mock_dest_client, 'get_snapmirrors_svm', mock.Mock(side_effect=[ [{'relationship-status': "transferring"}], [{'relationship-status': "quiesced"}]])) self.dm_session.quiesce_then_abort_svm(self.fake_src_share_server, self.fake_dest_share_server) self.mock_dest_client.get_snapmirrors_svm.assert_called_with( source_vserver=self.source_vserver, dest_vserver=self.dest_vserver, desired_attributes=['relationship-status', 'mirror-state'] ) self.assertEqual(2, self.mock_dest_client.get_snapmirrors_svm.call_count) self.mock_dest_client.quiesce_snapmirror_svm.assert_called_once_with( self.source_vserver, self.dest_vserver) def test_resync_snapmirror(self): self.dm_session.resync_snapmirror(self.fake_src_share, self.fake_dest_share) self.mock_dest_client.resync_snapmirror_vol.assert_called_once_with( self.source_vserver, self.fake_src_vol_name, self.dest_vserver, self.fake_dest_vol_name) def test_change_snapmirror_source(self): fake_new_src_share = copy.deepcopy(fake.SHARE) fake_new_src_share['id'] = 'd02d497a-236c-4852-812a-0d39373e312a' fake_new_src_share_name = 'share_d02d497a_236c_4852_812a_0d39373e312a' mock_new_src_client = mock.Mock() self.mock_object(self.dm_session, 'delete_snapmirror') self.mock_object(data_motion, 'get_client_for_backend', mock.Mock(side_effect=[self.mock_dest_client, self.mock_src_client, self.mock_dest_client, mock_new_src_client])) self.mock_object(na_utils, 'get_relationship_type', mock.Mock(return_value=na_utils.DATA_PROTECTION_TYPE)) self.dm_session.change_snapmirror_source( self.fake_dest_share, self.fake_src_share, fake_new_src_share, [self.fake_dest_share, self.fake_src_share, fake_new_src_share]) self.assertFalse(self.mock_src_client.release_snapmirror_vol.called) self.assertEqual(4, self.dm_session.delete_snapmirror.call_count) self.dm_session.delete_snapmirror.assert_called_with( mock.ANY, mock.ANY, release=False, relationship_info_only=False ) na_utils.get_relationship_type.assert_called_once_with(False) self.mock_dest_client.create_snapmirror_vol.assert_called_once_with( mock.ANY, fake_new_src_share_name, mock.ANY, self.fake_dest_vol_name, na_utils.DATA_PROTECTION_TYPE, schedule='hourly' ) self.mock_dest_client.resync_snapmirror_vol.assert_called_once_with( mock.ANY, fake_new_src_share_name, mock.ANY, self.fake_dest_vol_name ) def test_change_snapmirror_source_dhss_true(self): fake_new_src_share = copy.deepcopy(self.fake_src_share) fake_new_src_share['id'] = 'd02d497a-236c-4852-812a-0d39373e312a' fake_new_src_share_name = 'share_d02d497a_236c_4852_812a_0d39373e312a' fake_new_src_share_server = fake_new_src_share['share_server'] fake_new_src_ss_name = ( fake_new_src_share_server['backend_details']['vserver_name']) self.mock_object(self.dm_session, 'delete_snapmirror') self.mock_object(data_motion, 'get_client_for_backend', mock.Mock(side_effect=[self.mock_dest_client, self.mock_src_client])) mock_backend_config = na_fakes.create_configuration() mock_backend_config.driver_handles_share_servers = True self.mock_object(data_motion, 'get_backend_configuration', mock.Mock(return_value=mock_backend_config)) self.mock_object(self.mock_dest_client, 'get_vserver_peers', mock.Mock(return_value=[])) peer_cluster_name = 'new_src_cluster_name' self.mock_object(self.mock_src_client, 'get_cluster_name', mock.Mock(return_value=peer_cluster_name)) self.mock_object(na_utils, 'get_relationship_type', mock.Mock(return_value=na_utils.DATA_PROTECTION_TYPE)) self.dm_session.change_snapmirror_source( self.fake_dest_share, self.fake_src_share, fake_new_src_share, [self.fake_dest_share, self.fake_src_share, fake_new_src_share]) self.assertEqual(4, self.dm_session.delete_snapmirror.call_count) self.mock_dest_client.get_vserver_peers.assert_called_once_with( self.dest_vserver, fake_new_src_ss_name ) self.assertTrue(self.mock_src_client.get_cluster_name.called) self.mock_dest_client.create_vserver_peer.assert_called_once_with( self.dest_vserver, fake_new_src_ss_name, peer_cluster_name=peer_cluster_name ) self.mock_src_client.accept_vserver_peer.assert_called_once_with( fake_new_src_ss_name, self.dest_vserver ) na_utils.get_relationship_type.assert_called_once_with(False) self.dm_session.delete_snapmirror.assert_called_with( mock.ANY, mock.ANY, release=False, relationship_info_only=False ) self.mock_dest_client.create_snapmirror_vol.assert_called_once_with( mock.ANY, fake_new_src_share_name, mock.ANY, self.fake_dest_vol_name, na_utils.DATA_PROTECTION_TYPE, schedule='hourly' ) self.mock_dest_client.resync_snapmirror_vol.assert_called_once_with( mock.ANY, fake_new_src_share_name, mock.ANY, self.fake_dest_vol_name ) def test_get_snapmirrors(self): self.mock_object(self.mock_dest_client, 'get_snapmirrors') self.dm_session.get_snapmirrors(self.fake_src_share, self.fake_dest_share) self.mock_dest_client.get_snapmirrors.assert_called_with( source_vserver=self.source_vserver, dest_vserver=self.dest_vserver, source_volume=self.fake_src_vol_name, dest_volume=self.fake_dest_vol_name, desired_attributes=['relationship-status', 'mirror-state', 'schedule', 'source-vserver', 'source-volume', 'last-transfer-end-timestamp', 'last-transfer-size', 'last-transfer-error'] ) self.assertEqual(1, self.mock_dest_client.get_snapmirrors.call_count) def test_get_snapmirrors_svm(self): mock_dest_client = mock.Mock() self.mock_object(self.dm_session, 'get_client_and_vserver_name', mock.Mock(return_value=(mock_dest_client, self.dest_vserver))) self.mock_object(mock_dest_client, 'get_snapmirrors_svm') self.dm_session.get_snapmirrors_svm(self.fake_src_share_server, self.fake_dest_share_server) mock_dest_client.get_snapmirrors_svm.assert_called_with( source_vserver=self.source_vserver, dest_vserver=self.dest_vserver, desired_attributes=['relationship-status', 'mirror-state', 'last-transfer-end-timestamp'] ) self.assertEqual(1, mock_dest_client.get_snapmirrors_svm.call_count) def test_get_snapmirror_destinations_svm(self): mock_dest_client = mock.Mock() self.mock_object(self.dm_session, 'get_client_and_vserver_name', mock.Mock(return_value=(mock_dest_client, self.dest_vserver))) self.mock_object(mock_dest_client, 'get_snapmirror_destinations_svm') self.dm_session.get_snapmirror_destinations_svm( self.fake_src_share_server, self.fake_dest_share_server) mock_dest_client.get_snapmirror_destinations_svm.assert_called_with( source_vserver=self.source_vserver, dest_vserver=self.dest_vserver, ) self.assertEqual(1, mock_dest_client.get_snapmirror_destinations_svm .call_count) def test_update_snapmirror(self): self.mock_object(self.mock_dest_client, 'get_snapmirrors') self.dm_session.update_snapmirror(self.fake_src_share, self.fake_dest_share) self.mock_dest_client.update_snapmirror_vol.assert_called_once_with( self.source_vserver, self.fake_src_vol_name, self.dest_vserver, self.fake_dest_vol_name) def test_update_snapmirror_svm(self): mock_dest_client = mock.Mock() self.mock_object(self.dm_session, 'get_client_and_vserver_name', mock.Mock(return_value=(mock_dest_client, self.dest_vserver))) self.dm_session.update_snapmirror_svm(self.fake_src_share_server, self.fake_dest_share_server) mock_dest_client.update_snapmirror_svm.assert_called_once_with( self.source_vserver, self.dest_vserver) def test_abort_and_break_snapmirror_svm(self): mock_dest_client = mock.Mock() self.mock_object(self.dm_session, 'get_client_and_vserver_name', mock.Mock(return_value=(mock_dest_client, self.dest_vserver))) self.mock_object(self.dm_session, 'quiesce_then_abort_svm') self.dm_session.quiesce_and_break_snapmirror_svm( self.fake_src_share_server, self.fake_dest_share_server ) self.dm_session.get_client_and_vserver_name.assert_called_once_with( self.fake_dest_share_server ) self.dm_session.quiesce_then_abort_svm.assert_called_once_with( self.fake_src_share_server, self.fake_dest_share_server ) mock_dest_client.break_snapmirror_svm(self.source_vserver, self.dest_vserver) @ddt.data({'snapmirrors': ['fake_snapmirror'], 'vserver_subtype': 'default'}, {'snapmirrors': [], 'vserver_subtype': 'default'}, {'snapmirrors': [], 'vserver_subtype': 'dp_destination'}) @ddt.unpack def test_cancel_snapmirror_svm(self, snapmirrors, vserver_subtype): mock_dest_client = mock.Mock() self.mock_object(self.dm_session, 'get_client_and_vserver_name', mock.Mock(return_value=(mock_dest_client, self.dest_vserver))) mock_backend_config = na_fakes.create_configuration() mock_backend_config.netapp_server_migration_state_change_timeout = 30 self.mock_object(data_motion, 'get_backend_configuration', mock.Mock(return_value=mock_backend_config)) self.mock_object(self.dm_session, 'get_snapmirrors_svm', mock.Mock(return_value=snapmirrors)) self.mock_object(self.dm_session, 'quiesce_and_break_snapmirror_svm') self.mock_object(self.dm_session, 'wait_for_vserver_state') self.mock_object(self.dm_session, 'delete_snapmirror_svm') vserver_info = copy.deepcopy(fake.VSERVER_INFO) vserver_info['subtype'] = vserver_subtype self.mock_object(mock_dest_client, 'get_vserver_info', mock.Mock(return_value=vserver_info)) self.mock_object(self.dm_session, 'convert_svm_to_default_subtype') self.dm_session.cancel_snapmirror_svm(self.fake_src_share_server, self.fake_dest_share_server) data_motion.get_backend_configuration.assert_called_once_with( self.dest_backend_name ) self.dm_session.get_client_and_vserver_name.assert_called_once_with( self.fake_dest_share_server ) self.dm_session.get_snapmirrors_svm.assert_called_once_with( self.fake_src_share_server, self.fake_dest_share_server ) if snapmirrors: quiesce_mock = self.dm_session.quiesce_and_break_snapmirror_svm quiesce_mock.assert_called_once_with( self.fake_src_share_server, self.fake_dest_share_server ) self.dm_session.wait_for_vserver_state.assert_called_once_with( self.dest_vserver, mock_dest_client, subtype='default', state='running', operational_state='stopped', timeout=(mock_backend_config .netapp_server_migration_state_change_timeout) ) self.dm_session.delete_snapmirror_svm.assert_called_once_with( self.fake_src_share_server, self.fake_dest_share_server ) else: mock_dest_client.get_vserver_info.assert_called_once_with( self.dest_vserver ) convert_svm = self.dm_session.convert_svm_to_default_subtype if vserver_subtype == 'dp_destination': convert_svm.assert_called_once_with( self.dest_vserver, mock_dest_client, timeout=(mock_backend_config .netapp_server_migration_state_change_timeout) ) else: self.assertFalse(convert_svm.called) def test_resume_snapmirror(self): self.mock_object(self.mock_dest_client, 'get_snapmirrors') self.dm_session.resume_snapmirror(self.fake_src_share, self.fake_dest_share) self.mock_dest_client.resume_snapmirror_vol.assert_called_once_with( self.source_vserver, self.fake_src_vol_name, self.dest_vserver, self.fake_dest_vol_name) @ddt.data((None, exception.StorageCommunicationException), (exception.StorageCommunicationException, None)) @ddt.unpack def test_remove_qos_on_old_active_replica_unreachable_backend(self, side_eff_1, side_eff_2): mock_source_client = mock.Mock() self.mock_object(data_motion, 'get_client_for_backend', mock.Mock(return_value=mock_source_client)) self.mock_object( mock_source_client, 'set_qos_policy_group_for_volume', mock.Mock(side_effect=side_eff_1)) self.mock_object( mock_source_client, 'mark_qos_policy_group_for_deletion', mock.Mock(side_effect=side_eff_2)) self.mock_object(data_motion.LOG, 'exception') retval = self.dm_session.remove_qos_on_old_active_replica( self.fake_src_share) self.assertIsNone(retval) (mock_source_client.set_qos_policy_group_for_volume .assert_called_once_with(self.fake_src_vol_name, 'none')) data_motion.LOG.exception.assert_called_once() def test_remove_qos_on_old_active_replica(self): mock_source_client = mock.Mock() self.mock_object(data_motion, 'get_client_for_backend', mock.Mock(return_value=mock_source_client)) self.mock_object(data_motion.LOG, 'exception') retval = self.dm_session.remove_qos_on_old_active_replica( self.fake_src_share) self.assertIsNone(retval) (mock_source_client.set_qos_policy_group_for_volume .assert_called_once_with(self.fake_src_vol_name, 'none')) data_motion.LOG.exception.assert_not_called() @ddt.data(True, False) def test_convert_svm_to_default_subtype(self, is_dest): mock_client = mock.Mock() vserver_info_default = copy.deepcopy(fake.VSERVER_INFO) vserver_info_default['subtype'] = 'default' vserver_info_dp = copy.deepcopy(fake.VSERVER_INFO) vserver_info_dp['subtype'] = 'dp_destination' self.mock_object(mock_client, 'get_vserver_info', mock.Mock(side_effect=[vserver_info_dp, vserver_info_default])) self.mock_object(mock_client, 'break_snapmirror_svm') self.dm_session.convert_svm_to_default_subtype(fake.VSERVER1, mock_client, is_dest_path=is_dest, timeout=20) mock_client.get_vserver_info.assert_has_calls([ mock.call(fake.VSERVER1), mock.call(fake.VSERVER1)]) if is_dest: mock_client.break_snapmirror_svm.assert_called_once_with( dest_vserver=fake.VSERVER1 ) else: mock_client.break_snapmirror_svm.assert_called_once_with( source_vserver=fake.VSERVER1 ) def test_convert_svm_to_default_subtype_timeout(self): mock_client = mock.Mock() vserver_info_dp = copy.deepcopy(fake.VSERVER_INFO) vserver_info_dp['subtype'] = 'dp_destination' self.mock_object(mock_client, 'get_vserver_info', mock.Mock(side_effect=[vserver_info_dp])) self.mock_object(mock_client, 'break_snapmirror_svm') self.assertRaises( exception.NetAppException, self.dm_session.convert_svm_to_default_subtype, fake.VSERVER1, mock_client, is_dest_path=True, timeout=10) mock_client.get_vserver_info.assert_called_once_with(fake.VSERVER1) mock_client.break_snapmirror_svm.assert_called_once_with( dest_vserver=fake.VSERVER1) def test_wait_for_vserver_state(self,): mock_client = mock.Mock() vserver_info_default = copy.deepcopy(fake.VSERVER_INFO) vserver_info_default['subtype'] = 'default' vserver_info_dp = copy.deepcopy(fake.VSERVER_INFO) vserver_info_dp['subtype'] = 'dp_destination' self.mock_object(mock_client, 'get_vserver_info', mock.Mock(side_effect=[vserver_info_dp, vserver_info_default])) self.dm_session.wait_for_vserver_state(fake.VSERVER1, mock_client, state='running', operational_state='running', subtype='default', timeout=20) mock_client.get_vserver_info.assert_has_calls([ mock.call(fake.VSERVER1), mock.call(fake.VSERVER1)]) def test_wait_for_vserver_state_timeout(self): mock_client = mock.Mock() vserver_info_dp = copy.deepcopy(fake.VSERVER_INFO) vserver_info_dp['subtype'] = 'dp_destination' self.mock_object(mock_client, 'get_vserver_info', mock.Mock(side_effect=[vserver_info_dp])) self.assertRaises( exception.NetAppException, self.dm_session.wait_for_vserver_state, fake.VSERVER1, mock_client, state='running', operational_state='running', subtype='default', timeout=10) mock_client.get_vserver_info.assert_called_once_with(fake.VSERVER1) @ddt.data(mock.Mock(), mock.Mock(side_effect=netapp_api.NaApiError( code=netapp_api.EOBJECTNOTFOUND))) def test_wait_for_snapmirror_release_svm(self, release_snapmirror_ret): src_mock_client = mock.Mock() get_snapmirrors_mock = self.mock_object( src_mock_client, 'get_snapmirror_destinations_svm', mock.Mock(side_effect=[['fake_snapmirror'], []])) self.mock_object(src_mock_client, 'release_snapmirror_svm', release_snapmirror_ret) self.dm_session.wait_for_snapmirror_release_svm(fake.VSERVER1, fake.VSERVER2, src_mock_client, timeout=20) get_snapmirrors_mock.assert_has_calls([ mock.call(source_vserver=fake.VSERVER1, dest_vserver=fake.VSERVER2), mock.call(source_vserver=fake.VSERVER1, dest_vserver=fake.VSERVER2)]) src_mock_client.release_snapmirror_svm.assert_called_once_with( fake.VSERVER1, fake.VSERVER2) def test_wait_for_snapmirror_release_svm_timeout(self): src_mock_client = mock.Mock() get_snapmirrors_mock = self.mock_object( src_mock_client, 'get_snapmirror_destinations_svm', mock.Mock(side_effect=[['fake_snapmirror']])) self.mock_object(src_mock_client, 'release_snapmirror_svm') self.assertRaises(exception.NetAppException, self.dm_session.wait_for_snapmirror_release_svm, fake.VSERVER1, fake.VSERVER2, src_mock_client, timeout=10) get_snapmirrors_mock.assert_called_once_with( source_vserver=fake.VSERVER1, dest_vserver=fake.VSERVER2) src_mock_client.release_snapmirror_svm.assert_called_once_with( fake.VSERVER1, fake.VSERVER2 ) def test_wait_for_mount_replica(self): mock_client = mock.Mock() self.mock_object(time, 'sleep') mock_warning_log = self.mock_object(data_motion.LOG, 'warning') self.dm_session.wait_for_mount_replica( mock_client, fake.SHARE_NAME) mock_client.mount_volume.ssert_called_once_with(fake.SHARE_NAME) self.assertEqual(0, mock_warning_log.call_count) def test_wait_for_mount_replica_timeout(self): mock_client = mock.Mock() self.mock_object(time, 'sleep') mock_warning_log = self.mock_object(data_motion.LOG, 'warning') undergoing_snapmirror = ( 'The volume is undergoing a snapmirror initialize.') na_api_error = netapp_api.NaApiError(code=netapp_api.EAPIERROR, message=undergoing_snapmirror) mock_client.mount_volume.side_effect = na_api_error self.assertRaises(exception.NetAppException, self.dm_session.wait_for_mount_replica, mock_client, fake.SHARE_NAME, timeout=30) self.assertEqual(3, mock_client.mount_volume.call_count) self.assertEqual(3, mock_warning_log.call_count) def test_wait_for_mount_replica_api_not_found(self): mock_client = mock.Mock() self.mock_object(time, 'sleep') mock_warning_log = self.mock_object(data_motion.LOG, 'warning') na_api_error = netapp_api.NaApiError(code=netapp_api.EOBJECTNOTFOUND) mock_client.mount_volume.side_effect = na_api_error self.assertRaises(exception.NetAppException, self.dm_session.wait_for_mount_replica, mock_client, fake.SHARE_NAME, timeout=30) mock_client.mount_volume.assert_called_once_with(fake.SHARE_NAME) mock_warning_log.assert_not_called() @ddt.data(mock.Mock(), mock.Mock(side_effect=netapp_api.NaApiError( code=netapp_api.EOBJECTNOTFOUND))) def test_wait_for_snapmirror_release_vol(self, release_snapmirror_ret): src_mock_client = mock.Mock() get_snapmirrors_mock = self.mock_object( src_mock_client, 'get_snapmirror_destinations', mock.Mock(side_effect=[['fake_snapmirror'], []])) self.mock_object(src_mock_client, 'release_snapmirror_vol', release_snapmirror_ret) self.dm_session.wait_for_snapmirror_release_vol(fake.VSERVER1, fake.VSERVER2, fake.SHARE_NAME, fake.SHARE_NAME2, False, src_mock_client, timeout=20) get_snapmirrors_mock.assert_has_calls([ mock.call(source_vserver=fake.VSERVER1, dest_vserver=fake.VSERVER2, source_volume=fake.SHARE_NAME, dest_volume=fake.SHARE_NAME2), mock.call(source_vserver=fake.VSERVER1, dest_vserver=fake.VSERVER2, source_volume=fake.SHARE_NAME, dest_volume=fake.SHARE_NAME2)]) src_mock_client.release_snapmirror_vol.assert_called_once_with( fake.VSERVER1, fake.SHARE_NAME, fake.VSERVER2, fake.SHARE_NAME2, relationship_info_only=False) def test_wait_for_snapmirror_release_vol_timeout(self): src_mock_client = mock.Mock() get_snapmirrors_mock = self.mock_object( src_mock_client, 'get_snapmirror_destinations', mock.Mock(side_effect=[['fake_snapmirror']])) self.mock_object(src_mock_client, 'release_snapmirror_vol') self.assertRaises(exception.NetAppException, self.dm_session.wait_for_snapmirror_release_vol, fake.VSERVER1, fake.VSERVER2, fake.SHARE_NAME, fake.SHARE_NAME2, False, src_mock_client, timeout=10) get_snapmirrors_mock.assert_has_calls([ mock.call(source_vserver=fake.VSERVER1, dest_vserver=fake.VSERVER2, source_volume=fake.SHARE_NAME, dest_volume=fake.SHARE_NAME2)]) src_mock_client.release_snapmirror_vol.assert_called_once_with( fake.VSERVER1, fake.SHARE_NAME, fake.VSERVER2, fake.SHARE_NAME2, relationship_info_only=False) @ddt.data([{'id': 'src_share'}, {'id': 'dst_share'}], [{'id': 'dst_share'}]) def test_cleanup_previous_snapmirror_relationships(self, replica_list): mock_src_client = mock.Mock() src_backend_info = ('src_share', 'src_vserver', 'src_backend') dst_backend_info = ('dst_share', 'dst_vserver', 'dst_backend') self.mock_object(self.dm_session, 'get_backend_info_for_share', mock.Mock(side_effect=[src_backend_info, dst_backend_info])) self.mock_object(data_motion, 'get_client_for_backend', mock.Mock(return_value=mock_src_client)) self.mock_object(mock_src_client, 'release_snapmirror_vol') result = self.dm_session.cleanup_previous_snapmirror_relationships( {'id': 'src_share'}, replica_list) data_motion.get_client_for_backend.assert_called_once_with( 'src_backend', vserver_name='src_vserver') self.dm_session.get_backend_info_for_share.assert_has_calls([ mock.call({'id': 'src_share'}), mock.call({'id': 'dst_share'}) ]) mock_src_client.release_snapmirror_vol.assert_called_once_with( 'src_vserver', 'src_share', 'dst_vserver', 'dst_share') self.assertIsNone(result) @ddt.data(netapp_api.NaApiError(), netapp_api.NaApiError(code=netapp_api.EOBJECTNOTFOUND), netapp_api.NaApiError(code=netapp_api.ESOURCE_IS_DIFFERENT), netapp_api.NaApiError(code='some_random_code', message="(entry doesn't exist)"), netapp_api.NaApiError(code='some_random_code', message='(actually, entry does exist!)')) def test_cleanup_previous_snapmirror_relationships_does_not_exist( self, release_exception): mock_src_client = mock.Mock() self.mock_object(self.dm_session, 'get_backend_info_for_share', mock.Mock(return_value=( mock.Mock(), mock.Mock(), mock.Mock()))) self.mock_object(data_motion, 'get_client_for_backend', mock.Mock(return_value=mock_src_client)) self.mock_object(mock_src_client, 'release_snapmirror_vol', mock.Mock(side_effect=release_exception)) replica = {'id': 'src_share'} replica_list = [replica, {'id': 'dst_share'}] result = self.dm_session.cleanup_previous_snapmirror_relationships( replica, replica_list) mock_src_client.release_snapmirror_vol.assert_called() self.assertIsNone(result) def test_get_most_available_aggr_of_vserver(self): vserver_client = mock.Mock() aggr_space_attr = {fake.AGGREGATE: {'available': 5678}, 'aggr2': {'available': 2024}} self.mock_object(vserver_client, 'get_vserver_aggregate_capacities', mock.Mock(return_value=aggr_space_attr)) result = self.dm_session.get_most_available_aggr_of_vserver( vserver_client) self.assertEqual(result, fake.AGGREGATE) def test_initialize_and_wait_snapmirror_vol(self): vserver_client = mock.Mock() snapmirror_info = [{'source-vserver': fake.VSERVER1, 'source-volume': "fake_source_vol", 'destination-vserver': fake.VSERVER2, 'destination-volume': "fake_des_vol", 'relationship-status': "idle"}] self.mock_object(vserver_client, 'get_snapmirrors', mock.Mock(return_value=snapmirror_info)) (self.dm_session. initialize_and_wait_snapmirror_vol(vserver_client, fake.VSERVER1, fake.FLEXVOL_NAME, fake.VSERVER2, fake.FLEXVOL_NAME_1, source_snapshot=None, transfer_priority=None, timeout=300)) (vserver_client.initialize_snapmirror_vol. assert_called_once_with(mock.ANY, mock.ANY, mock.ANY, mock.ANY, source_snapshot=mock.ANY, transfer_priority=mock.ANY, )) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/netapp/dataontap/cluster_mode/test_driver_interfaces.py0000664000175000017500000000511600000000000033302 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Clinton Knight. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Mock unit tests for the NetApp file share driver interfaces """ from unittest import mock from manila.share.drivers.netapp.dataontap.cluster_mode import drv_multi_svm from manila.share.drivers.netapp.dataontap.cluster_mode import drv_single_svm from manila import test class NetAppFileStorageDriverInterfaceTestCase(test.TestCase): def setUp(self): super(NetAppFileStorageDriverInterfaceTestCase, self).setUp() self.mock_object(drv_multi_svm.NetAppCmodeMultiSvmShareDriver, '__init__', mock.Mock(return_value=None)) self.mock_object(drv_single_svm.NetAppCmodeSingleSvmShareDriver, '__init__', mock.Mock(return_value=None)) self.drv_multi_svm = drv_multi_svm.NetAppCmodeMultiSvmShareDriver() self.drv_single_svm = drv_single_svm.NetAppCmodeSingleSvmShareDriver() def test_driver_interfaces_match(self): """Ensure the NetApp file storage driver interfaces match. The two file share Manila drivers from NetApp (cDOT multi-SVM, cDOT single-SVM) are merely passthrough shim layers atop a common file storage library. Bugs are easily introduced when a Manila method is exposed via a subset of those driver shims. This test ensures they remain in sync and the library features are uniformly available in the drivers. """ # Get local functions of each driver interface multi_svm_methods = self._get_local_functions(self.drv_multi_svm) single_svm_methods = self._get_local_functions(self.drv_single_svm) # Ensure NetApp file share driver shims are identical self.assertSetEqual(multi_svm_methods, single_svm_methods) def _get_local_functions(self, obj): """Get function names of an object without superclass functions.""" return set([key for key, value in type(obj).__dict__.items() if callable(value)]) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/netapp/dataontap/cluster_mode/test_lib_base.py0000664000175000017500000151160600000000000031353 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Clinton Knight. All rights reserved. # Copyright (c) 2015 Tom Barron. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Unit tests for the NetApp Data ONTAP cDOT base storage driver library. """ import copy import json import math import socket import time from unittest import mock import ddt from oslo_config import cfg from oslo_log import log from oslo_service import loopingcall from oslo_utils import timeutils from oslo_utils import units from oslo_utils import uuidutils from manila.common import constants from manila import exception from manila.share import configuration from manila.share import driver from manila.share.drivers.netapp.dataontap.client import api as netapp_api from manila.share.drivers.netapp.dataontap.client import client_cmode from manila.share.drivers.netapp.dataontap.cluster_mode import data_motion from manila.share.drivers.netapp.dataontap.cluster_mode import lib_base from manila.share.drivers.netapp.dataontap.cluster_mode import performance from manila.share.drivers.netapp.dataontap.protocols import cifs_cmode from manila.share.drivers.netapp.dataontap.protocols import nfs_cmode from manila.share.drivers.netapp import options as na_opts from manila.share.drivers.netapp import utils as na_utils from manila.share import share_types from manila.share import utils as share_utils from manila import test from manila.tests import fake_share from manila.tests.share.drivers.netapp.dataontap import fakes as fake from manila.tests.share.drivers.netapp import fakes as na_fakes from manila.tests import utils CONF = cfg.CONF def fake_replica(**kwargs): return fake_share.fake_replica(for_manager=True, **kwargs) def _get_config(): backup_config = 'backup_config' config = configuration.Configuration(driver.share_opts, config_group=backup_config) config.append_config_values(na_opts.netapp_backup_opts) config.append_config_values(na_opts.netapp_proxy_opts) config.append_config_values(na_opts.netapp_connection_opts) config.append_config_values(na_opts.netapp_basicauth_opts) config.append_config_values(na_opts.netapp_certificateauth_opts) config.append_config_values(na_opts.netapp_provisioning_opts) config.append_config_values(na_opts.netapp_support_opts) config.append_config_values(na_opts.netapp_data_motion_opts) config.append_config_values(na_opts.netapp_cluster_opts) CONF.set_override("netapp_enabled_backup_types", [fake.BACKUP_TYPE, "backup2"], group=backup_config) CONF.set_override("netapp_backup_backend_section_name", fake.BACKEND_NAME, group=backup_config) CONF.set_override("netapp_backup_vserver", "fake_backup_share", group=backup_config) CONF.set_override("netapp_backup_volume", "fake_share_server", group=backup_config) return config @ddt.ddt class NetAppFileStorageLibraryTestCase(test.TestCase): def setUp(self): super(NetAppFileStorageLibraryTestCase, self).setUp() self.mock_object(na_utils, 'validate_driver_instantiation') self.mock_object(na_utils, 'setup_tracing') # Mock loggers as themselves to allow logger arg validation mock_logger = log.getLogger('mock_logger') self.mock_object(lib_base.LOG, 'info', mock.Mock(side_effect=mock_logger.info)) self.mock_object(lib_base.LOG, 'warning', mock.Mock(side_effect=mock_logger.warning)) self.mock_object(lib_base.LOG, 'error', mock.Mock(side_effect=mock_logger.error)) self.mock_object(lib_base.LOG, 'debug', mock.Mock(side_effect=mock_logger.debug)) kwargs = { 'configuration': fake.get_config_cmode(), 'private_storage': mock.Mock(), 'app_version': fake.APP_VERSION } self.library = lib_base.NetAppCmodeFileStorageLibrary(fake.DRIVER_NAME, **kwargs) self.library._client = mock.Mock() self.library._perf_library = mock.Mock() self.client = self.library._client self.context = mock.Mock() self.fake_replica = copy.deepcopy(fake.SHARE) self.fake_replica_2 = copy.deepcopy(fake.SHARE) self.fake_replica_2['id'] = fake.SHARE_ID2 self.fake_replica_2['replica_state'] = ( constants.REPLICA_STATE_OUT_OF_SYNC) self.mock_dm_session = mock.Mock() self.mock_object(data_motion, "DataMotionSession", mock.Mock(return_value=self.mock_dm_session)) self.mock_object(data_motion, 'get_client_for_backend') def _mock_api_error(self, code='fake', message='fake'): return mock.Mock(side_effect=netapp_api.NaApiError(code=code, message=message)) def test_init(self): self.assertEqual(fake.DRIVER_NAME, self.library.driver_name) self.assertEqual(1, na_utils.validate_driver_instantiation.call_count) self.assertEqual(1, na_utils.setup_tracing.call_count) self.assertListEqual([], self.library._licenses) self.assertDictEqual({}, self.library._clients) self.assertDictEqual({}, self.library._ssc_stats) self.assertIsNotNone(self.library._app_version) def test_do_setup(self): mock_get_api_client = self.mock_object(self.library, '_get_api_client') self.mock_object( performance, 'PerformanceLibrary', mock.Mock(return_value='fake_perf_library')) self.mock_object( self.library._client, 'check_for_cluster_credentials', mock.Mock(return_value=True)) self.mock_object( self.library._client, 'get_nfs_config_default', mock.Mock(return_value=fake.NFS_CONFIG_DEFAULT)) self.mock_object(self.library._client, 'list_cluster_nodes', mock.Mock(return_value=['node1', 'node2'])) self.mock_object( self.library, '_check_snaprestore_license', mock.Mock(return_value=True)) self.mock_object( self.library, '_get_licenses', mock.Mock(return_value=fake.LICENSES)) mock_get_api_client.features.TRANSFER_LIMIT_NFS_CONFIG = True self.library.do_setup(self.context) self.assertEqual(fake.LICENSES, self.library._licenses) mock_get_api_client.assert_called_once_with() (self.library._client.check_for_cluster_credentials. assert_called_once_with()) (self.library._client.get_nfs_config_default. assert_called_once_with( list(self.library.NFS_CONFIG_EXTRA_SPECS_MAP.values()))) self.assertEqual('fake_perf_library', self.library._perf_library) self.mock_object(self.library._client, 'check_for_cluster_credentials', mock.Mock(return_value=True)) self.mock_object(self.library._client, 'list_cluster_nodes', mock.Mock(return_value=['node1', 'node2'])) self.mock_object( self.library, '_check_snaprestore_license', mock.Mock(return_value=True)) mock_set_cluster_info = self.mock_object( self.library, '_set_cluster_info') self.library.do_setup(self.context) mock_set_cluster_info.assert_called_once() def test_set_cluster_info(self): self.library._client.is_nve_supported.return_value = True self.library._client.features.FLEXVOL_ENCRYPTION = True self.library._set_cluster_info() self.assertTrue(self.library._cluster_info['nve_support']) def test_check_for_setup_error(self): mock_start_periodic_tasks = self.mock_object(self.library, '_start_periodic_tasks') self.library.check_for_setup_error() mock_start_periodic_tasks.assert_called_once_with() def test_get_vserver(self): self.assertRaises(NotImplementedError, self.library._get_vserver) def test_get_api_client(self): client_kwargs = fake.CLIENT_KWARGS.copy() # First call should proceed normally. mock_client_constructor = self.mock_object(client_cmode, 'NetAppCmodeClient') client1 = self.library._get_api_client() self.assertIsNotNone(client1) mock_client_constructor.assert_called_once_with(**client_kwargs) # Second call should yield the same object. mock_client_constructor = self.mock_object(client_cmode, 'NetAppCmodeClient') client2 = self.library._get_api_client() self.assertEqual(client1, client2) self.assertFalse(mock_client_constructor.called) def test_get_api_client_with_vserver(self): client_kwargs = fake.CLIENT_KWARGS.copy() client_kwargs['vserver'] = fake.VSERVER1 # First call should proceed normally. mock_client_constructor = self.mock_object(client_cmode, 'NetAppCmodeClient') client1 = self.library._get_api_client(vserver=fake.VSERVER1) self.assertIsNotNone(client1) mock_client_constructor.assert_called_once_with(**client_kwargs) # Second call should yield the same object. mock_client_constructor = self.mock_object(client_cmode, 'NetAppCmodeClient') client2 = self.library._get_api_client(vserver=fake.VSERVER1) self.assertEqual(client1, client2) self.assertFalse(mock_client_constructor.called) # A different vserver should work normally without caching. mock_client_constructor = self.mock_object(client_cmode, 'NetAppCmodeClient') client3 = self.library._get_api_client(vserver=fake.VSERVER2) self.assertNotEqual(client1, client3) client_kwargs['vserver'] = fake.VSERVER2 mock_client_constructor.assert_called_once_with(**client_kwargs) def test_get_licenses_both_protocols(self): self.library._have_cluster_creds = True self.mock_object(self.client, 'get_licenses', mock.Mock(return_value=fake.LICENSES)) result = self.library._get_licenses() self.assertSequenceEqual(fake.LICENSES, result) self.assertEqual(0, lib_base.LOG.error.call_count) self.assertEqual(1, lib_base.LOG.info.call_count) def test_get_licenses_one_protocol(self): self.library._have_cluster_creds = True licenses = list(fake.LICENSES) licenses.remove('nfs') self.mock_object(self.client, 'get_licenses', mock.Mock(return_value=licenses)) result = self.library._get_licenses() self.assertListEqual(licenses, result) self.assertEqual(0, lib_base.LOG.error.call_count) self.assertEqual(1, lib_base.LOG.info.call_count) def test_get_licenses_no_protocols(self): self.library._have_cluster_creds = True licenses = list(fake.LICENSES) licenses.remove('nfs') licenses.remove('cifs') self.mock_object(self.client, 'get_licenses', mock.Mock(return_value=licenses)) result = self.library._get_licenses() self.assertListEqual(licenses, result) self.assertEqual(1, lib_base.LOG.error.call_count) self.assertEqual(1, lib_base.LOG.info.call_count) def test_get_licenses_no_cluster_creds(self): self.library._have_cluster_creds = False result = self.library._get_licenses() self.assertListEqual([], result) self.assertEqual(1, lib_base.LOG.debug.call_count) def test_start_periodic_tasks(self): mock_update_ssc_info = self.mock_object(self.library, '_update_ssc_info') mock_handle_ems_logging = self.mock_object(self.library, '_handle_ems_logging') mock_handle_housekeeping_tasks = self.mock_object( self.library, '_handle_housekeeping_tasks') mock_ssc_periodic_task = mock.Mock() mock_ems_periodic_task = mock.Mock() mock_housekeeping_periodic_task = mock.Mock() mock_loopingcall = self.mock_object( loopingcall, 'FixedIntervalLoopingCall', mock.Mock(side_effect=[mock_ssc_periodic_task, mock_ems_periodic_task, mock_housekeeping_periodic_task])) self.library._start_periodic_tasks() self.assertTrue(mock_update_ssc_info.called) self.assertFalse(mock_handle_ems_logging.called) self.assertFalse(mock_housekeeping_periodic_task.called) mock_loopingcall.assert_has_calls( [mock.call(mock_update_ssc_info), mock.call(mock_handle_ems_logging), mock.call(mock_handle_housekeeping_tasks)]) self.assertTrue(mock_ssc_periodic_task.start.called) self.assertTrue(mock_ems_periodic_task.start.called) self.assertTrue(mock_housekeeping_periodic_task.start.called) def test_get_backend_share_name(self): result = self.library._get_backend_share_name(fake.SHARE_ID) expected = (fake.VOLUME_NAME_TEMPLATE % {'share_id': fake.SHARE_ID.replace('-', '_')}) self.assertEqual(expected, result) def test_get_backend_snapshot_name(self): result = self.library._get_backend_snapshot_name(fake.SNAPSHOT_ID) expected = 'share_snapshot_' + fake.SNAPSHOT_ID.replace('-', '_') self.assertEqual(expected, result) def test_get_backend_cg_snapshot_name(self): result = self.library._get_backend_cg_snapshot_name(fake.SNAPSHOT_ID) expected = 'share_cg_snapshot_' + fake.SNAPSHOT_ID.replace('-', '_') self.assertEqual(expected, result) def test__get_backend_snapmirror_policy_name_svm(self): result = self.library._get_backend_snapmirror_policy_name_svm( fake.SERVER_ID) expected = 'snapmirror_policy_' + fake.SERVER_ID.replace('-', '_') self.assertEqual(expected, result) def test_get_aggregate_space_cluster_creds(self): self.library._have_cluster_creds = True self.mock_object(self.library._client, 'get_cluster_aggregate_capacities', mock.Mock(return_value=fake.AGGREGATE_CAPACITIES)) result = self.library._get_aggregate_space(fake.AGGREGATES) (self.library._client.get_cluster_aggregate_capacities. assert_called_once_with(fake.AGGREGATES)) self.assertDictEqual(fake.AGGREGATE_CAPACITIES, result) def test_get_aggregate_space_no_cluster_creds(self): self.library._have_cluster_creds = False self.mock_object(self.library._client, 'get_vserver_aggregate_capacities', mock.Mock(return_value=fake.AGGREGATE_CAPACITIES)) result = self.library._get_aggregate_space(fake.AGGREGATES) (self.library._client.get_vserver_aggregate_capacities. assert_called_once_with(fake.AGGREGATES)) self.assertDictEqual(fake.AGGREGATE_CAPACITIES, result) def test_check_snaprestore_license_admin_notfound(self): self.library._have_cluster_creds = True licenses = list(fake.LICENSES) licenses.remove('snaprestore') self.mock_object(self.client, 'get_licenses', mock.Mock(return_value=licenses)) result = self.library._check_snaprestore_license() self.assertIs(False, result) def test_check_snaprestore_license_admin_found(self): self.library._have_cluster_creds = True self.library._licenses = fake.LICENSES result = self.library._check_snaprestore_license() self.assertIs(True, result) def test_check_snaprestore_license_svm_scoped(self): self.library._have_cluster_creds = False self.mock_object(self.library._client, 'check_snaprestore_license', mock.Mock(return_value=True)) result = self.library._check_snaprestore_license() self.assertIs(True, result) def test_get_aggregate_node_cluster_creds(self): self.library._have_cluster_creds = True self.mock_object(self.library._client, 'get_node_for_aggregate', mock.Mock(return_value=fake.CLUSTER_NODE)) result = self.library._get_aggregate_node(fake.AGGREGATE) (self.library._client.get_node_for_aggregate. assert_called_once_with(fake.AGGREGATE)) self.assertEqual(fake.CLUSTER_NODE, result) def test_get_aggregate_node_no_cluster_creds(self): self.library._have_cluster_creds = False self.mock_object(self.library._client, 'get_node_for_aggregate') result = self.library._get_aggregate_node(fake.AGGREGATE) self.assertFalse(self.library._client.get_node_for_aggregate.called) self.assertIsNone(result) def test_get_default_filter_function(self): result = self.library.get_default_filter_function() self.assertEqual(self.library.DEFAULT_FILTER_FUNCTION, result) def test_get_default_filter_function_flexgroup(self): mock_is_flexgroup = self.mock_object( self.library, '_is_flexgroup_pool', mock.Mock(return_value=True)) mock_get_min = self.mock_object( self.library, '_get_minimum_flexgroup_size', mock.Mock(return_value=self.library.FLEXGROUP_MIN_SIZE_PER_AGGR)) result = self.library.get_default_filter_function(pool=fake.POOL_NAME) expected_filer = (self.library.DEFAULT_FLEXGROUP_FILTER_FUNCTION % self.library.FLEXGROUP_MIN_SIZE_PER_AGGR) self.assertEqual(expected_filer, result) mock_is_flexgroup.assert_called_once_with(fake.POOL_NAME) mock_get_min.assert_called_once_with(fake.POOL_NAME) def test_get_default_goodness_function(self): result = self.library.get_default_goodness_function() self.assertEqual(self.library.DEFAULT_GOODNESS_FUNCTION, result) @ddt.data( {'replication': True, 'flexgroup': False}, {'replication': True, 'flexgroup': True}, {'replication': False, 'flexgroup': False}, {'replication': False, 'flexgroup': True}, ) @ddt.unpack def test_get_share_stats(self, replication, flexgroup): if replication: self.library.configuration.replication_domain = "fake_domain" if flexgroup: self.library._flexgroup_pools = {'pool': ['aggr']} mock_get_pools = self.mock_object( self.library, '_get_pools', mock.Mock(return_value=fake.POOLS)) result = self.library.get_share_stats(get_filter_function='filter', goodness_function='goodness') expected = { 'share_backend_name': fake.BACKEND_NAME, 'driver_name': fake.DRIVER_NAME, 'vendor_name': 'NetApp', 'driver_version': '1.0', 'netapp_storage_family': 'ontap_cluster', 'storage_protocol': 'NFS_CIFS', 'pools': fake.POOLS, 'share_group_stats': {'consistent_snapshot_support': 'host'}, } if flexgroup: expected['share_group_stats']['consistent_snapshot_support'] = None if replication: expected['replication_type'] = ['dr', 'readable'] expected['replication_domain'] = 'fake_domain' self.assertDictEqual(expected, result) mock_get_pools.assert_called_once_with(get_filter_function='filter', goodness_function='goodness') def test_get_share_server_pools(self): self.mock_object(self.library, '_get_pools', mock.Mock(return_value=fake.POOLS)) self.library._cache_pool_status = na_utils.DataCache(60) result = self.library.get_share_server_pools(fake.SHARE_SERVER) self.assertListEqual(fake.POOLS, result) def test_get_pools(self): fake_total = 1.0 fake_free = 1.0 fake_used = 1.0 fake_pool = copy.deepcopy(fake.POOLS) fake_pool.append(fake.FLEXGROUP_POOL) self.library._flexgroup_pools = fake.FLEXGROUP_POOL_OPT mock_find_aggr = self.mock_object( self.library, '_find_matching_aggregates', mock.Mock(return_value=fake.AGGREGATES)) mock_get_flexgroup_aggr = self.mock_object( self.library, '_get_flexgroup_aggr_set', mock.Mock(return_value=fake.FLEXGROUP_AGGR_SET)) mock_get_aggregate_space = self.mock_object( self.library, '_get_aggregate_space', mock.Mock(return_value=fake.AGGREGATE_CAPACITIES)) mock_get_flexvol_space = self.mock_object( self.library, '_get_flexvol_pool_space', mock.Mock(return_value=(fake_total, fake_free, fake_used))) mock_get_pool = self.mock_object( self.library, '_get_pool', mock.Mock(side_effect=fake_pool)) mock_get_flexgroup_space = self.mock_object( self.library, '_get_flexgroup_pool_space', mock.Mock(return_value=(fake_total, fake_free, fake_used))) mock_get_cluster_name = self.mock_object( self.library._client, 'get_cluster_name', mock.Mock(return_value='fake_cluster_name')) self.library._cache_pool_status = na_utils.DataCache(60) self.library._have_cluster_creds = True result = self.library._get_pools( get_filter_function=fake.fake_get_filter_function, goodness_function='goodness') self.assertListEqual(fake_pool, result) mock_find_aggr.assert_called_once_with() mock_get_flexgroup_aggr.assert_called_once_with() mock_get_aggregate_space.assert_called_once_with(set(fake.AGGREGATES)) mock_get_flexvol_space.assert_has_calls([ mock.call(fake.AGGREGATE_CAPACITIES, fake.AGGREGATES[0]), mock.call(fake.AGGREGATE_CAPACITIES, fake.AGGREGATES[1])]) mock_get_flexgroup_space.assert_has_calls([ mock.call(fake.AGGREGATE_CAPACITIES, fake.FLEXGROUP_POOL_OPT[fake.FLEXGROUP_POOL_NAME])]) mock_get_cluster_name.assert_called_once_with() mock_get_pool.assert_has_calls([ mock.call(fake.AGGREGATES[0], fake_total, fake_free, fake_used), mock.call(fake.AGGREGATES[1], fake_total, fake_free, fake_used), mock.call(fake.FLEXGROUP_POOL_NAME, fake_total, fake_free, fake_used)]) def test_get_pool_vserver_creds(self): fake_pool = fake.POOLS_VSERVER_CREDS[0] self.library._have_cluster_creds = False self.library._revert_to_snapshot_support = True self.library._cluster_info = fake.CLUSTER_INFO self.library._ssc_stats = fake.SSC_INFO_VSERVER_CREDS self.library._perf_library.get_node_utilization_for_pool = ( mock.Mock(return_value=50.0)) self.mock_object(self.library, '_get_aggregate_snaplock_type', mock.Mock(return_value="compliance")) result = self.library._get_pool( fake_pool['pool_name'], fake_pool['total_capacity_gb'], fake_pool['free_capacity_gb'], fake_pool['allocated_capacity_gb']) self.assertEqual(fake_pool, result) def test_get_pool_cluster_creds(self): fake_pool = copy.deepcopy(fake.POOLS[0]) fake_pool['filter_function'] = None fake_pool['goodness_function'] = None fake_pool['netapp_cluster_name'] = '' self.library._have_cluster_creds = True self.library._revert_to_snapshot_support = True self.library._cluster_info = fake.CLUSTER_INFO self.library._ssc_stats = fake.SSC_INFO self.library._perf_library.get_node_utilization_for_pool = ( mock.Mock(return_value=30.0)) self.mock_object(self.library, '_get_aggregate_snaplock_type', mock.Mock(return_value="compliance")) result = self.library._get_pool( fake_pool['pool_name'], fake_pool['total_capacity_gb'], fake_pool['free_capacity_gb'], fake_pool['allocated_capacity_gb']) self.assertEqual(fake_pool, result) def test_get_flexvol_pool_space(self): total_gb, free_gb, used_gb = self.library._get_flexvol_pool_space( fake.AGGREGATE_CAPACITIES, fake.AGGREGATES[0]) self.mock_object(self.library, '_get_aggregate_snaplock_type', mock.Mock(return_value="compliance")) self.assertEqual(total_gb, fake.POOLS[0]['total_capacity_gb']) self.assertEqual(free_gb, fake.POOLS[0]['free_capacity_gb']) self.assertEqual(used_gb, fake.POOLS[0]['allocated_capacity_gb']) def test_get_flexgroup_pool_space(self): total_gb, free_gb, used_gb = self.library._get_flexgroup_pool_space( fake.AGGREGATE_CAPACITIES, fake.FLEXGROUP_POOL_AGGR) self.assertEqual(total_gb, fake.FLEXGROUP_POOL['total_capacity_gb']) self.assertEqual(free_gb, fake.FLEXGROUP_POOL['free_capacity_gb']) self.assertEqual(used_gb, fake.FLEXGROUP_POOL['allocated_capacity_gb']) @ddt.data( {'aggr_space': fake.AGGREGATE_CAPACITIES, 'aggr_pool': []}, {'aggr_space': fake.AGGREGATE_CAPACITIES, 'aggr_pool': ['fake']}, {'aggr_space': {fake.AGGREGATES[0]: {}}, 'aggr_pool': [fake.AGGREGATES[0]]}) @ddt.unpack def test_get_flexgroup_pool_space_zero(self, aggr_space, aggr_pool): total_gb, free_gb, used_gb = self.library._get_flexgroup_pool_space( aggr_space, aggr_pool) self.assertEqual(total_gb, 0.0) self.assertEqual(free_gb, 0.0) self.assertEqual(used_gb, 0.0) def test_get_flexgroup_aggr_set(self): self.library._flexgroup_pools = fake.FLEXGROUP_POOL_OPT result = self.library._get_flexgroup_aggr_set() self.assertSetEqual(result, set(fake.FLEXGROUP_POOL_AGGR)) def test_handle_ems_logging(self): self.mock_object(self.library, '_build_ems_log_message_0', mock.Mock(return_value=fake.EMS_MESSAGE_0)) self.mock_object(self.library, '_build_ems_log_message_1', mock.Mock(return_value=fake.EMS_MESSAGE_1)) self.library._handle_ems_logging() self.library._client.send_ems_log_message.assert_has_calls([ mock.call(fake.EMS_MESSAGE_0), mock.call(fake.EMS_MESSAGE_1), ]) def test_build_ems_log_message_0(self): self.mock_object(socket, 'gethostname', mock.Mock(return_value=fake.HOST_NAME)) result = self.library._build_ems_log_message_0() self.assertDictEqual(fake.EMS_MESSAGE_0, result) def test_build_ems_log_message_1(self): pool_info = { 'pools': { 'vserver': 'fake_vserver', 'aggregates': ['aggr1', 'aggr2'], }, } self.mock_object(socket, 'gethostname', mock.Mock(return_value=fake.HOST_NAME)) self.mock_object(self.library, '_get_ems_pool_info', mock.Mock(return_value=pool_info)) result = self.library._build_ems_log_message_1() self.assertDictEqual(pool_info, json.loads(result['event-description'])) result['event-description'] = '' self.assertDictEqual(fake.EMS_MESSAGE_1, result) def test_get_ems_pool_info(self): self.assertRaises(NotImplementedError, self.library._get_ems_pool_info) def test_find_matching_aggregates(self): self.assertRaises(NotImplementedError, self.library._find_matching_aggregates) @ddt.data(('NFS', nfs_cmode.NetAppCmodeNFSHelper), ('nfs', nfs_cmode.NetAppCmodeNFSHelper), ('CIFS', cifs_cmode.NetAppCmodeCIFSHelper), ('cifs', cifs_cmode.NetAppCmodeCIFSHelper)) @ddt.unpack def test_get_helper(self, protocol, helper_type): fake_share = fake.SHARE.copy() fake_share['share_proto'] = protocol mock_check_license_for_protocol = self.mock_object( self.library, '_check_license_for_protocol') result = self.library._get_helper(fake_share) mock_check_license_for_protocol.assert_called_once_with( protocol.lower()) self.assertEqual(helper_type, type(result)) def test_get_helper_invalid_protocol(self): fake_share = fake.SHARE.copy() fake_share['share_proto'] = 'iSCSI' self.mock_object(self.library, '_check_license_for_protocol') self.assertRaises(exception.NetAppException, self.library._get_helper, fake_share) def test_check_license_for_protocol_no_cluster_creds(self): self.library._have_cluster_creds = False result = self.library._check_license_for_protocol('fake_protocol') self.assertIsNone(result) def test_check_license_for_protocol_have_license(self): self.library._have_cluster_creds = True self.library._licenses = ['base', 'fake_protocol'] result = self.library._check_license_for_protocol('FAKE_PROTOCOL') self.assertIsNone(result) def test_check_license_for_protocol_newly_licensed_protocol(self): self.library._have_cluster_creds = True self.mock_object(self.library, '_get_licenses', mock.Mock(return_value=['base', 'nfs'])) self.library._licenses = ['base'] result = self.library._check_license_for_protocol('NFS') self.assertIsNone(result) self.assertTrue(self.library._get_licenses.called) def test_check_license_for_protocol_unlicensed_protocol(self): self.library._have_cluster_creds = True self.mock_object(self.library, '_get_licenses', mock.Mock(return_value=['base'])) self.library._licenses = ['base'] self.assertRaises(exception.NetAppException, self.library._check_license_for_protocol, 'NFS') def test_get_pool_has_pool(self): result = self.library.get_pool(fake.SHARE) self.assertEqual(fake.POOL_NAME, result) self.assertFalse(self.client.get_aggregate_for_volume.called) @ddt.data(True, False) def test_get_pool_no_pool(self, is_flexgroup): fake_share = copy.deepcopy(fake.SHARE) fake_share['host'] = '%(host)s@%(backend)s' % { 'host': fake.HOST_NAME, 'backend': fake.BACKEND_NAME} self.mock_object(self.library, '_get_flexgroup_pool_name', mock.Mock(return_value=fake.POOL_NAME)) if is_flexgroup: self.client.get_aggregate_for_volume.return_value = [ fake.POOL_NAME] else: self.client.get_aggregate_for_volume.return_value = fake.POOL_NAME result = self.library.get_pool(fake_share) self.assertEqual(fake.POOL_NAME, result) self.assertTrue(self.client.get_aggregate_for_volume.called) self.assertEqual(is_flexgroup, self.library._get_flexgroup_pool_name.called) @ddt.data(True, False) def test_get_pool_raises(self, is_flexgroup): fake_share = copy.deepcopy(fake.SHARE) fake_share['host'] = '%(host)s@%(backend)s' % { 'host': fake.HOST_NAME, 'backend': fake.BACKEND_NAME} self.mock_object(self.library, '_get_flexgroup_pool_name', mock.Mock(return_value=None)) if is_flexgroup: self.client.get_aggregate_for_volume.return_value = [] else: self.client.get_aggregate_for_volume.return_value = None self.assertRaises(exception.NetAppException, self.library.get_pool, fake_share) def test_create_share(self): vserver_client = mock.Mock() self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_client))) mock_allocate_container = self.mock_object(self.library, '_allocate_container') mock_create_export = self.mock_object( self.library, '_create_export', mock.Mock(return_value='fake_export_location')) result = self.library.create_share(self.context, fake.SHARE, share_server=fake.SHARE_SERVER) mock_allocate_container.assert_called_once_with(fake.SHARE, fake.VSERVER1, vserver_client) mock_create_export.assert_called_once_with(fake.SHARE, fake.SHARE_SERVER, fake.VSERVER1, vserver_client) self.assertEqual('fake_export_location', result) @ddt.data(None, fake.CG_SNAPSHOT_MEMBER_ID1) def test_create_share_from_snapshot(self, share_group_id): share = copy.deepcopy(fake.SHARE) share['source_share_group_snapshot_member_id'] = share_group_id vserver_client = mock.Mock() self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_client))) mock_allocate_container_from_snapshot = self.mock_object( self.library, '_allocate_container_from_snapshot') mock_create_export = self.mock_object( self.library, '_create_export', mock.Mock(return_value='fake_export_location')) result = self.library.create_share_from_snapshot( self.context, share, fake.SNAPSHOT, share_server=fake.SHARE_SERVER, parent_share=share) mock_allocate_container_from_snapshot.assert_called_once_with( share, fake.SNAPSHOT, fake.VSERVER1, vserver_client) mock_create_export.assert_called_once_with(share, fake.SHARE_SERVER, fake.VSERVER1, vserver_client) self.assertEqual('fake_export_location', result) def _setup_mocks_for_create_share_from_snapshot( self, allocate_attr=None, dest_cluster=fake.CLUSTER_NAME, is_flexgroup=False, flexgroup_error=False): class FakeDBObj(dict): def to_dict(self): return self if allocate_attr is None: allocate_attr = mock.Mock() self.src_vserver_client = mock.Mock() self.mock_dm_session = mock.Mock() self.fake_share = FakeDBObj(fake.SHARE) self.fake_share_server = FakeDBObj(fake.SHARE_SERVER) self.mock_dm_constr = self.mock_object( data_motion, "DataMotionSession", mock.Mock(return_value=self.mock_dm_session)) self.mock_dm_backend = self.mock_object( self.mock_dm_session, 'get_backend_info_for_share', mock.Mock(return_value=(None, fake.VSERVER1, fake.BACKEND_NAME))) self.mock_dm_get_src_client = self.mock_object( data_motion, 'get_client_for_backend', mock.Mock(return_value=self.src_vserver_client)) self.mock_get_src_cluster = self.mock_object( self.src_vserver_client, 'get_cluster_name', mock.Mock(return_value=fake.CLUSTER_NAME)) self.dest_vserver_client = mock.Mock() self.mock_get_vserver = self.mock_object( self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER2, self.dest_vserver_client))) self.mock_get_dest_cluster = self.mock_object( self.dest_vserver_client, 'get_cluster_name', mock.Mock(return_value=dest_cluster)) self.mock_extract_host = self.mock_object( share_utils, 'extract_host', mock.Mock(return_value=fake.POOL_NAME)) self.mock_is_flexgroup_share = self.mock_object( self.library, '_is_flexgroup_share', mock.Mock(return_value=is_flexgroup)) self.mock_is_flexgroup_pool = self.mock_object( self.library, '_is_flexgroup_pool', mock.Mock(return_value=(not is_flexgroup if flexgroup_error else is_flexgroup))) self.mock_get_aggregate_for_volume = self.mock_object( self.src_vserver_client, 'get_aggregate_for_volume', mock.Mock(return_value=[fake.POOL_NAME])) self.mock_get_flexgroup_aggregate_list = self.mock_object( self.library, '_get_flexgroup_aggregate_list', mock.Mock(return_value=[fake.POOL_NAME])) self.mock_allocate_container_from_snapshot = self.mock_object( self.library, '_allocate_container_from_snapshot', allocate_attr) self.mock_allocate_container = self.mock_object( self.library, '_allocate_container') self.mock_get_relationship_type = self.mock_object( na_utils, 'get_relationship_type', mock.Mock(return_value=(na_utils.EXTENDED_DATA_PROTECTION_TYPE if is_flexgroup else na_utils.DATA_PROTECTION_TYPE))) self.mock_dm_create_snapmirror = self.mock_object( self.mock_dm_session, 'create_snapmirror') self.mock_storage_update = self.mock_object( self.library.private_storage, 'update') self.mock_generate_uuid = self.mock_object( uuidutils, 'generate_uuid', mock.Mock(return_value=fake.SHARE_ID5)) # Parent share on MANILA_HOST_2 self.parent_share = copy.copy(fake.SHARE) self.parent_share['share_server'] = fake.SHARE_SERVER_2 self.parent_share['host'] = fake.MANILA_HOST_NAME_2 self.parent_share_server = {} ss_keys = ['id', 'identifier', 'backend_details', 'host'] for key in ss_keys: self.parent_share_server[key] = ( self.parent_share['share_server'].get(key, None)) self.temp_src_share = { 'id': self.fake_share['id'], 'host': self.parent_share['host'], 'share_server': self.parent_share_server or None } @ddt.data({'dest_cluster': fake.CLUSTER_NAME, 'is_flexgroup': False, 'have_cluster_creds': False}, {'dest_cluster': fake.CLUSTER_NAME, 'is_flexgroup': False, 'have_cluster_creds': True}, {'dest_cluster': fake.CLUSTER_NAME, 'is_flexgroup': True, 'have_cluster_creds': False}, {'dest_cluster': fake.CLUSTER_NAME_2, 'is_flexgroup': False, 'have_cluster_creds': False}, {'dest_cluster': fake.CLUSTER_NAME_2, 'is_flexgroup': False, 'have_cluster_creds': True}, ) @ddt.unpack def test_create_share_from_snapshot_another_host(self, dest_cluster, is_flexgroup, have_cluster_creds): self.library._have_cluster_creds = have_cluster_creds self._setup_mocks_for_create_share_from_snapshot( dest_cluster=dest_cluster, is_flexgroup=is_flexgroup) mock_get_backend_shr_name = self.mock_object( self.library, '_get_backend_share_name', mock.Mock(return_value=fake.SHARE_NAME)) result = self.library.create_share_from_snapshot( self.context, self.fake_share, fake.SNAPSHOT, share_server=self.fake_share_server, parent_share=self.parent_share) self.fake_share['share_server'] = self.fake_share_server self.mock_dm_constr.assert_called_once() self.mock_dm_backend.assert_called_once_with(self.parent_share) self.mock_dm_get_src_client.assert_called_once_with( fake.BACKEND_NAME, vserver_name=fake.VSERVER1) self.mock_get_vserver.assert_called_once_with(self.fake_share_server) if have_cluster_creds: self.mock_get_dest_cluster.assert_called_once() self.mock_get_src_cluster.assert_called_once() else: self.mock_get_dest_cluster.assert_not_called() self.mock_get_src_cluster.assert_not_called() mock_get_backend_shr_name.assert_called_once() self.mock_is_flexgroup_share.assert_called_once() self.mock_is_flexgroup_pool.assert_called_once() self.assertEqual(is_flexgroup, self.mock_get_aggregate_for_volume.called) self.assertEqual(is_flexgroup, self.mock_get_flexgroup_aggregate_list.called) if (dest_cluster != fake.CLUSTER_NAME or is_flexgroup or not have_cluster_creds): temp_share = copy.deepcopy(self.fake_share) temp_share["id"] = fake.SHARE_ID5 self.mock_allocate_container_from_snapshot.assert_called_once_with( temp_share, fake.SNAPSHOT, fake.VSERVER1, self.src_vserver_client, split=False, create_fpolicy=False) self.mock_allocate_container.assert_called_once_with( self.fake_share, fake.VSERVER2, self.dest_vserver_client, replica=True, set_qos=False) self.mock_dm_create_snapmirror.assert_called_once() self.temp_src_share['replica_state'] = ( constants.REPLICA_STATE_ACTIVE) state = self.library.STATE_SNAPMIRROR_DATA_COPYING else: self.mock_allocate_container_from_snapshot.assert_called_once_with( self.fake_share, fake.SNAPSHOT, fake.VSERVER1, self.src_vserver_client, split=True) state = self.library.STATE_SPLITTING_VOLUME_CLONE self.temp_src_share['aggregate'] = ([fake.POOL_NAME] if is_flexgroup else fake.POOL_NAME) self.temp_src_share['internal_state'] = state self.temp_src_share['status'] = constants.STATUS_ACTIVE str_temp_src_share = json.dumps(self.temp_src_share) self.mock_storage_update.assert_called_once_with( self.fake_share['id'], { 'source_share': str_temp_src_share }) expected_return = {'status': constants.STATUS_CREATING_FROM_SNAPSHOT} self.assertEqual(expected_return, result) @ddt.data(True, False) def test_create_share_from_snapshot_another_host_driver_error( self, have_cluster_creds): self.library._have_cluster_creds = have_cluster_creds self._setup_mocks_for_create_share_from_snapshot( allocate_attr=mock.Mock(side_effect=exception.NetAppException)) mock_delete_snapmirror = self.mock_object( self.mock_dm_session, 'delete_snapmirror') mock_delete_share = self.mock_object( self.library, '_delete_share') self.assertRaises(exception.NetAppException, self.library.create_share_from_snapshot, self.context, self.fake_share, fake.SNAPSHOT, share_server=self.fake_share_server, parent_share=self.parent_share) self.fake_share['share_server'] = self.fake_share_server self.mock_dm_constr.assert_called_once() self.mock_dm_backend.assert_called_once_with(self.parent_share) self.mock_dm_get_src_client.assert_called_once_with( fake.BACKEND_NAME, vserver_name=fake.VSERVER1) self.mock_get_vserver.assert_called_once_with(self.fake_share_server) if have_cluster_creds: self.mock_get_dest_cluster.assert_called_once() self.mock_get_src_cluster.assert_called_once() else: self.mock_get_dest_cluster.assert_not_called() self.mock_get_src_cluster.assert_not_called() if have_cluster_creds: self.mock_allocate_container_from_snapshot.assert_called_once_with( self.fake_share, fake.SNAPSHOT, fake.VSERVER1, self.src_vserver_client, split=True) else: self.mock_generate_uuid.assert_called_once() temp_share = copy.deepcopy(self.fake_share) temp_share["id"] = fake.SHARE_ID5 self.mock_allocate_container_from_snapshot.assert_called_once_with( temp_share, fake.SNAPSHOT, fake.VSERVER1, self.src_vserver_client, split=False, create_fpolicy=False) mock_delete_snapmirror.assert_called_once_with( self.temp_src_share, self.fake_share) mock_delete_share.assert_called_once_with( self.temp_src_share, fake.VSERVER1, self.src_vserver_client, remove_export=False) def test_create_share_from_snapshot_different_pool_types(self): self._setup_mocks_for_create_share_from_snapshot( dest_cluster=fake.CLUSTER_NAME_2, is_flexgroup=True, flexgroup_error=True) self.assertRaises(exception.NetAppException, self.library.create_share_from_snapshot, self.context, self.fake_share, fake.SNAPSHOT, share_server=self.fake_share_server, parent_share=self.parent_share) def test_create_share_from_snapshot_mismatch_flexgroup_pools_len(self): self._setup_mocks_for_create_share_from_snapshot( dest_cluster=fake.CLUSTER_NAME_2, is_flexgroup=True) self.mock_object( self.library, '_get_flexgroup_aggregate_list', mock.Mock(return_value=[])) self.library._is_flexgroup_auto = False self.assertRaises(exception.NetAppException, self.library.create_share_from_snapshot, self.context, self.fake_share, fake.SNAPSHOT, share_server=self.fake_share_server, parent_share=self.parent_share) def test__update_create_from_snapshot_status(self): fake_result = mock.Mock() mock_pvt_storage_get = self.mock_object( self.library.private_storage, 'get', mock.Mock(return_value=fake.SHARE)) mock__create_continue = self.mock_object( self.library, '_create_from_snapshot_continue', mock.Mock(return_value=fake_result)) result = self.library._update_create_from_snapshot_status( fake.SHARE, fake.SHARE_SERVER) mock_pvt_storage_get.assert_called_once_with(fake.SHARE['id'], 'source_share') mock__create_continue.assert_called_once_with(fake.SHARE, fake.SHARE_SERVER) self.assertEqual(fake_result, result) def test__update_create_from_snapshot_status_missing_source_share(self): mock_pvt_storage_get = self.mock_object( self.library.private_storage, 'get', mock.Mock(return_value=None)) expected_result = {'status': constants.STATUS_ERROR} result = self.library._update_create_from_snapshot_status( fake.SHARE, fake.SHARE_SERVER) mock_pvt_storage_get.assert_called_once_with(fake.SHARE['id'], 'source_share') self.assertEqual(expected_result, result) def test__update_create_from_snapshot_status_driver_error(self): fake_src_share = { 'id': fake.SHARE['id'], 'host': fake.SHARE['host'], 'internal_state': 'fake_internal_state', } copy_fake_src_share = copy.deepcopy(fake_src_share) src_vserver_client = mock.Mock() mock_dm_session = mock.Mock() mock_pvt_storage_get = self.mock_object( self.library.private_storage, 'get', mock.Mock(return_value=json.dumps(copy_fake_src_share))) mock__create_continue = self.mock_object( self.library, '_create_from_snapshot_continue', mock.Mock(side_effect=exception.NetAppException)) mock_dm_constr = self.mock_object( data_motion, "DataMotionSession", mock.Mock(return_value=mock_dm_session)) mock_delete_snapmirror = self.mock_object( mock_dm_session, 'delete_snapmirror') mock_dm_backend = self.mock_object( mock_dm_session, 'get_backend_info_for_share', mock.Mock(return_value=(None, fake.VSERVER1, fake.BACKEND_NAME))) mock_dm_get_src_client = self.mock_object( data_motion, 'get_client_for_backend', mock.Mock(return_value=src_vserver_client)) mock_get_backend_shr_name = self.mock_object( self.library, '_get_backend_share_name', mock.Mock(return_value=fake.SHARE_NAME)) mock_share_exits = self.mock_object( self.library, '_share_exists', mock.Mock(return_value=True)) mock_deallocate_container = self.mock_object( self.library, '_deallocate_container') mock_pvt_storage_delete = self.mock_object( self.library.private_storage, 'delete') mock_delete_policy = self.mock_object(self.library, '_delete_fpolicy_for_share') result = self.library._update_create_from_snapshot_status( fake.SHARE, fake.SHARE_SERVER) expected_result = {'status': constants.STATUS_ERROR} mock_pvt_storage_get.assert_called_once_with(fake.SHARE['id'], 'source_share') mock__create_continue.assert_called_once_with(fake.SHARE, fake.SHARE_SERVER) mock_dm_constr.assert_called_once() mock_delete_snapmirror.assert_called_once_with(fake_src_share, fake.SHARE) mock_dm_backend.assert_called_once_with(fake_src_share) mock_dm_get_src_client.assert_called_once_with( fake.BACKEND_NAME, vserver_name=fake.VSERVER1) mock_get_backend_shr_name.assert_called_once_with(fake_src_share['id']) mock_share_exits.assert_called_once_with(fake.SHARE_NAME, src_vserver_client) mock_deallocate_container.assert_called_once_with(fake.SHARE_NAME, src_vserver_client) mock_pvt_storage_delete.assert_called_once_with(fake.SHARE['id']) mock_delete_policy.assert_called_once_with(fake_src_share, fake.VSERVER1, src_vserver_client) self.assertEqual(expected_result, result) def _setup_mocks_for_create_from_snapshot_continue( self, src_host=fake.MANILA_HOST_NAME, dest_host=fake.MANILA_HOST_NAME, split_completed_result=True, move_completed_result=True, share_internal_state='fake_state', replica_state='in_sync', is_flexgroup=False): self.fake_export_location = 'fake_export_location' self.fake_src_share = { 'id': fake.SHARE['id'], 'host': src_host, 'aggregate': src_host.split('#')[1], 'internal_state': share_internal_state, } self.copy_fake_src_share = copy.deepcopy(self.fake_src_share) dest_pool = dest_host.split('#')[1] self.src_vserver_client = mock.Mock() self.dest_vserver_client = mock.Mock() self.mock_dm_session = mock.Mock() self.mock_dm_constr = self.mock_object( data_motion, "DataMotionSession", mock.Mock(return_value=self.mock_dm_session)) self.mock_pvt_storage_get = self.mock_object( self.library.private_storage, 'get', mock.Mock(return_value=json.dumps(self.copy_fake_src_share))) self.mock_dm_backend = self.mock_object( self.mock_dm_session, 'get_backend_info_for_share', mock.Mock(return_value=(None, fake.VSERVER1, fake.BACKEND_NAME))) self.mock_extract_host = self.mock_object( share_utils, 'extract_host', mock.Mock(return_value=dest_pool)) self.mock_is_flexgroup_pool = self.mock_object( self.library, '_is_flexgroup_pool', mock.Mock(return_value=is_flexgroup)) self.mock_get_flexgroup_aggregate_list = self.mock_object( self.library, '_get_flexgroup_aggregate_list', mock.Mock(return_value=dest_pool)) self.mock_dm_get_src_client = self.mock_object( data_motion, 'get_client_for_backend', mock.Mock(return_value=self.src_vserver_client)) self.mock_get_vserver = self.mock_object( self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER2, self.dest_vserver_client))) self.mock_split_completed = self.mock_object( self.library, '_check_volume_clone_split_completed', mock.Mock(return_value=split_completed_result)) self.mock_rehost_vol = self.mock_object( self.library, '_rehost_and_mount_volume') self.mock_move_vol = self.mock_object(self.library, '_move_volume_after_splitting') self.mock_move_completed = self.mock_object( self.library, '_check_volume_move_completed', mock.Mock(return_value=move_completed_result)) self.mock_update_rep_state = self.mock_object( self.library, 'update_replica_state', mock.Mock(return_value=replica_state) ) self.mock_update_snapmirror = self.mock_object( self.mock_dm_session, 'update_snapmirror') self.mock_break_snapmirror = self.mock_object( self.mock_dm_session, 'break_snapmirror') self.mock_delete_snapmirror = self.mock_object( self.mock_dm_session, 'delete_snapmirror') self.mock_get_backend_shr_name = self.mock_object( self.library, '_get_backend_share_name', mock.Mock(return_value=fake.SHARE_NAME)) self.mock__delete_share = self.mock_object(self.library, '_delete_share') self.mock_set_vol_size_fixes = self.mock_object( self.dest_vserver_client, 'set_volume_filesys_size_fixed') self.mock_create_export = self.mock_object( self.library, '_create_export', mock.Mock(return_value=self.fake_export_location)) self.mock_pvt_storage_update = self.mock_object( self.library.private_storage, 'update') self.mock_pvt_storage_delete = self.mock_object( self.library.private_storage, 'delete') self.mock_get_extra_specs_qos = self.mock_object( share_types, 'get_extra_specs_from_share', mock.Mock(return_value=fake.EXTRA_SPEC_WITH_QOS)) self.mock__get_provisioning_opts = self.mock_object( self.library, '_get_provisioning_options', mock.Mock(return_value=copy.deepcopy(fake.PROVISIONING_OPTIONS)) ) self.mock_modify_create_qos = self.mock_object( self.library, '_modify_or_create_qos_for_existing_share', mock.Mock(return_value=fake.QOS_POLICY_GROUP_NAME)) self.mock_modify_vol = self.mock_object(self.dest_vserver_client, 'modify_volume') self.mock_get_backend_qos_name = self.mock_object( self.library, '_get_backend_qos_policy_group_name', mock.Mock(return_value=fake.QOS_POLICY_GROUP_NAME)) self.mock_mark_qos_deletion = self.mock_object( self.src_vserver_client, 'mark_qos_policy_group_for_deletion') @ddt.data(fake.MANILA_HOST_NAME, fake.MANILA_HOST_NAME_2) def test__create_from_snapshot_continue_state_splitting(self, src_host): self._setup_mocks_for_create_from_snapshot_continue( src_host=src_host, share_internal_state=self.library.STATE_SPLITTING_VOLUME_CLONE) result = self.library._create_from_snapshot_continue(fake.SHARE, fake.SHARE_SERVER) fake.SHARE['share_server'] = fake.SHARE_SERVER self.mock_pvt_storage_get.assert_called_once_with(fake.SHARE['id'], 'source_share') self.mock_dm_backend.assert_called_once_with(self.fake_src_share) self.mock_extract_host.assert_has_calls([ mock.call(fake.SHARE['host'], level='pool')]) self.mock_dm_get_src_client.assert_called_once_with( fake.BACKEND_NAME, vserver_name=fake.VSERVER1) self.mock_get_vserver.assert_called_once_with(fake.SHARE_SERVER) self.mock_split_completed.assert_called_once_with( self.fake_src_share, self.src_vserver_client) self.mock_get_backend_qos_name.assert_called_once_with(fake.SHARE_ID) self.mock_mark_qos_deletion.assert_called_once_with( fake.QOS_POLICY_GROUP_NAME) self.mock_rehost_vol.assert_called_once_with( fake.SHARE, fake.VSERVER1, self.src_vserver_client, fake.VSERVER2, self.dest_vserver_client) if src_host != fake.MANILA_HOST_NAME: expected_result = { 'status': constants.STATUS_CREATING_FROM_SNAPSHOT } self.mock_move_vol.assert_called_once_with( self.fake_src_share, fake.SHARE, fake.SHARE_SERVER, cutover_action='defer') self.fake_src_share['internal_state'] = ( self.library.STATE_MOVING_VOLUME) self.mock_pvt_storage_update.assert_called_once_with( fake.SHARE['id'], {'source_share': json.dumps(self.fake_src_share)} ) self.assertEqual(expected_result, result) else: self.mock_get_extra_specs_qos.assert_called_once_with(fake.SHARE) self.mock__get_provisioning_opts.assert_called_once_with( fake.EXTRA_SPEC_WITH_QOS) self.mock_modify_create_qos.assert_called_once_with( fake.SHARE, fake.EXTRA_SPEC_WITH_QOS, fake.VSERVER2, self.dest_vserver_client) self.mock_get_backend_shr_name.assert_called_once_with( fake.SHARE_ID) self.mock_modify_vol.assert_called_once_with( fake.POOL_NAME, fake.SHARE_NAME, **fake.PROVISIONING_OPTIONS_WITH_QOS) self.mock_pvt_storage_delete.assert_called_once_with( fake.SHARE['id']) self.mock_create_export.assert_called_once_with( fake.SHARE, fake.SHARE_SERVER, fake.VSERVER2, self.dest_vserver_client, clear_current_export_policy=False) expected_result = { 'status': constants.STATUS_AVAILABLE, 'export_locations': self.fake_export_location, } self.assertEqual(expected_result, result) @ddt.data(True, False) def test__create_from_snapshot_continue_state_moving(self, move_completed): self._setup_mocks_for_create_from_snapshot_continue( share_internal_state=self.library.STATE_MOVING_VOLUME, move_completed_result=move_completed) result = self.library._create_from_snapshot_continue(fake.SHARE, fake.SHARE_SERVER) expect_result = { 'status': constants.STATUS_CREATING_FROM_SNAPSHOT } fake.SHARE['share_server'] = fake.SHARE_SERVER self.mock_pvt_storage_get.assert_called_once_with(fake.SHARE['id'], 'source_share') self.mock_dm_backend.assert_called_once_with(self.fake_src_share) self.mock_extract_host.assert_has_calls([ mock.call(fake.SHARE['host'], level='pool'), ]) self.mock_dm_get_src_client.assert_called_once_with( fake.BACKEND_NAME, vserver_name=fake.VSERVER1) self.mock_get_vserver.assert_called_once_with(fake.SHARE_SERVER) self.mock_move_completed.assert_called_once_with( fake.SHARE, fake.SHARE_SERVER) if move_completed: expect_result['status'] = constants.STATUS_AVAILABLE self.mock_pvt_storage_delete.assert_called_once_with( fake.SHARE['id']) self.mock_create_export.assert_called_once_with( fake.SHARE, fake.SHARE_SERVER, fake.VSERVER2, self.dest_vserver_client, clear_current_export_policy=False) expect_result['export_locations'] = self.fake_export_location self.assertEqual(expect_result, result) else: self.mock_pvt_storage_update.assert_called_once_with( fake.SHARE['id'], {'source_share': json.dumps(self.fake_src_share)} ) self.assertEqual(expect_result, result) @ddt.data({'replica_state': 'in_sync', 'is_flexgroup': False}, {'replica_state': 'out_of_sync', 'is_flexgroup': False}, {'replica_state': 'out_of_sync', 'is_flexgroup': True}) @ddt.unpack def test__create_from_snapshot_continue_state_snapmirror(self, replica_state, is_flexgroup): self._setup_mocks_for_create_from_snapshot_continue( share_internal_state=self.library.STATE_SNAPMIRROR_DATA_COPYING, replica_state=replica_state, is_flexgroup=is_flexgroup) result = self.library._create_from_snapshot_continue(fake.SHARE, fake.SHARE_SERVER) expect_result = { 'status': constants.STATUS_CREATING_FROM_SNAPSHOT } fake.SHARE['share_server'] = fake.SHARE_SERVER self.mock_pvt_storage_get.assert_called_once_with(fake.SHARE['id'], 'source_share') self.mock_dm_backend.assert_called_once_with(self.fake_src_share) self.mock_extract_host.assert_has_calls([ mock.call(fake.SHARE['host'], level='pool')]) self.mock_dm_get_src_client.assert_called_once_with( fake.BACKEND_NAME, vserver_name=fake.VSERVER1) self.mock_get_vserver.assert_called_once_with(fake.SHARE_SERVER) self.mock_update_rep_state.assert_called_once_with( None, [self.fake_src_share], fake.SHARE, [], [], fake.SHARE_SERVER, replication=False ) self.assertEqual(is_flexgroup, self.mock_get_flexgroup_aggregate_list.called) if replica_state == constants.REPLICA_STATE_IN_SYNC: self.mock_update_snapmirror.assert_called_once_with( self.fake_src_share, fake.SHARE) self.mock_break_snapmirror.assert_called_once_with( self.fake_src_share, fake.SHARE) self.mock_delete_snapmirror.assert_called_once_with( self.fake_src_share, fake.SHARE) self.mock_get_backend_shr_name.assert_has_calls( [mock.call(self.fake_src_share['id']), mock.call(fake.SHARE_ID)]) self.mock__delete_share.assert_called_once_with( self.fake_src_share, fake.VSERVER1, self.src_vserver_client, remove_export=False) self.mock_set_vol_size_fixes.assert_called_once_with( fake.SHARE_NAME, filesys_size_fixed=False) self.mock_get_extra_specs_qos.assert_called_once_with(fake.SHARE) self.mock__get_provisioning_opts.assert_called_once_with( fake.EXTRA_SPEC_WITH_QOS) self.mock_modify_create_qos.assert_called_once_with( fake.SHARE, fake.EXTRA_SPEC_WITH_QOS, fake.VSERVER2, self.dest_vserver_client) self.mock_modify_vol.assert_called_once_with( fake.POOL_NAME, fake.SHARE_NAME, **fake.PROVISIONING_OPTIONS_WITH_QOS) expect_result['status'] = constants.STATUS_AVAILABLE self.mock_pvt_storage_delete.assert_called_once_with( fake.SHARE['id']) self.mock_create_export.assert_called_once_with( fake.SHARE, fake.SHARE_SERVER, fake.VSERVER2, self.dest_vserver_client, clear_current_export_policy=False) expect_result['export_locations'] = self.fake_export_location self.assertEqual(expect_result, result) elif replica_state not in [constants.STATUS_ERROR, None]: self.mock_pvt_storage_update.assert_called_once_with( fake.SHARE['id'], {'source_share': json.dumps(self.fake_src_share)} ) self.assertEqual(expect_result, result) def test__create_from_snapshot_continue_state_unknown(self): self._setup_mocks_for_create_from_snapshot_continue( share_internal_state='unknown_state') self.assertRaises(exception.NetAppException, self.library._create_from_snapshot_continue, fake.SHARE, fake.SHARE_SERVER) self.mock_pvt_storage_delete.assert_called_once_with(fake.SHARE_ID) @ddt.data({'hide_snapdir': False, 'create_fpolicy': True, 'is_fg': True, 'with_encryption': True}, {'hide_snapdir': True, 'create_fpolicy': False, 'is_fg': True, 'with_encryption': False}, {'hide_snapdir': False, 'create_fpolicy': True, 'is_fg': False, 'with_encryption': True}, {'hide_snapdir': True, 'create_fpolicy': False, 'is_fg': False, 'with_encryption': False}) @ddt.unpack def test_allocate_container(self, hide_snapdir, create_fpolicy, is_fg, with_encryption): provisioning_options = copy.deepcopy( fake.PROVISIONING_OPTIONS_WITH_FPOLICY) provisioning_options['hide_snapdir'] = hide_snapdir provisioning_options['snaplock_type'] = "compliance" self.mock_object(self.library, '_get_backend_share_name', mock.Mock( return_value=fake.SHARE_NAME)) self.mock_object(share_utils, 'extract_host', mock.Mock( return_value=fake.POOL_NAME)) mock_get_provisioning_opts = self.mock_object( self.library, '_get_provisioning_options_for_share', mock.Mock(return_value=provisioning_options)) mock_create_fpolicy = self.mock_object( self.library, '_create_fpolicy_for_share') self.mock_object( self.library, '_is_flexgroup_pool', mock.Mock(return_value=is_fg)) mock_create_flexgroup = self.mock_object(self.library, '_create_flexgroup_share') mock_get_aggr_flexgroup = self.mock_object( self.library, '_get_flexgroup_aggregate_list', mock.Mock(return_value=[fake.AGGREGATE])) vserver_client = mock.Mock() fake_share = ( fake.SHARE_INSTANCE_WITH_ENCRYPTION if with_encryption else fake.SHARE_INSTANCE) self.library._allocate_container(fake_share, fake.VSERVER1, vserver_client, create_fpolicy=create_fpolicy) mock_get_provisioning_opts.assert_called_once_with( fake_share, fake.VSERVER1, vserver_client=vserver_client, set_qos=True) if is_fg: mock_get_aggr_flexgroup.assert_called_once_with(fake.POOL_NAME) mock_create_flexgroup.assert_called_once_with( vserver_client, [fake.AGGREGATE], fake.SHARE_NAME, fake.SHARE['size'], 8, mount_point_name=fake.MOUNT_POINT_NAME, **provisioning_options) else: mock_get_aggr_flexgroup.assert_not_called() vserver_client.create_volume.assert_called_once_with( fake.POOL_NAME, fake.SHARE_NAME, fake.SHARE['size'], snapshot_reserve=8, mount_point_name=fake.MOUNT_POINT_NAME, **provisioning_options) if hide_snapdir: vserver_client.set_volume_snapdir_access.assert_called_once_with( fake.SHARE_NAME, hide_snapdir) else: vserver_client.set_volume_snapdir_access.assert_not_called() if create_fpolicy: mock_create_fpolicy.assert_called_once_with( fake_share, fake.VSERVER1, vserver_client, **provisioning_options) else: mock_create_fpolicy.assert_not_called() def test_remap_standard_boolean_extra_specs(self): extra_specs = copy.deepcopy(fake.OVERLAPPING_EXTRA_SPEC) result = self.library._remap_standard_boolean_extra_specs(extra_specs) self.assertDictEqual(fake.REMAPPED_OVERLAPPING_EXTRA_SPEC, result) def test_allocate_container_as_replica(self): self.mock_object(self.library, '_get_backend_share_name', mock.Mock( return_value=fake.SHARE_NAME)) self.mock_object(share_utils, 'extract_host', mock.Mock( return_value=fake.POOL_NAME)) mock_get_provisioning_opts = self.mock_object( self.library, '_get_provisioning_options_for_share', mock.Mock(return_value=copy.deepcopy(fake.PROVISIONING_OPTIONS))) vserver_client = mock.Mock() self.library._allocate_container(fake.SHARE_INSTANCE, fake.VSERVER1, vserver_client, replica=True) mock_get_provisioning_opts.assert_called_once_with( fake.SHARE_INSTANCE, fake.VSERVER1, vserver_client=vserver_client, set_qos=True) vserver_client.create_volume.assert_called_once_with( fake.POOL_NAME, fake.SHARE_NAME, fake.SHARE['size'], thin_provisioned=True, snapshot_policy='default', language='en-US', dedup_enabled=True, split=True, compression_enabled=False, max_files=5000, encrypt=False, snapshot_reserve=8, mount_point_name=fake.MOUNT_POINT_NAME, volume_type='dp', adaptive_qos_policy_group=None) def test_allocate_container_no_pool_name(self): self.mock_object(self.library, '_get_backend_share_name', mock.Mock( return_value=fake.SHARE_NAME)) self.mock_object(share_utils, 'extract_host', mock.Mock( return_value=None)) self.mock_object(self.library, '_check_extra_specs_validity') self.mock_object(self.library, '_get_provisioning_options') vserver_client = mock.Mock() self.assertRaises(exception.InvalidHost, self.library._allocate_container, fake.SHARE_INSTANCE, fake.VSERVER1, vserver_client) self.library._get_backend_share_name.assert_called_once_with( fake.SHARE_INSTANCE['id']) share_utils.extract_host.assert_called_once_with( fake.SHARE_INSTANCE['host'], level='pool') self.assertEqual(0, self.library._check_extra_specs_validity.call_count) self.assertEqual(0, self.library._get_provisioning_options.call_count) @ddt.data(None, 1000) def test_create_flexgroup_share(self, max_files): self.library.configuration.netapp_flexgroup_volume_online_timeout = 2 vserver_client = mock.Mock() vserver_client.get_job_state.return_value = "success" mock_wait_for_start = self.mock_object( self.library, 'wait_for_start_create_flexgroup', mock.Mock(return_value={'jobid': fake.JOB_ID, 'error-code': None})) mock_wait_for_flexgroup_deployment = self.mock_object( self.library, 'wait_for_flexgroup_deployment') aggr_list = [fake.AGGREGATE] options = {'efficiency_policy': fake.VOLUME_EFFICIENCY_POLICY_NAME} self.library._create_flexgroup_share(vserver_client, aggr_list, fake.SHARE_NAME, 100, 10, max_files=max_files, snaplock_type="compliance", **options) start_timeout = (self.library.configuration. netapp_flexgroup_aggregate_not_busy_timeout) mock_wait_for_start.assert_called_once_with( start_timeout, vserver_client, aggr_list, fake.SHARE_NAME, 100, 10, None, "compliance", efficiency_policy=fake.VOLUME_EFFICIENCY_POLICY_NAME) mock_wait_for_flexgroup_deployment.assert_called_once_with( vserver_client, fake.JOB_ID, 2) vserver_client.update_volume_efficiency_attributes.assert_called_once_with( # noqa fake.SHARE_NAME, False, False, is_flexgroup=True, efficiency_policy=fake.VOLUME_EFFICIENCY_POLICY_NAME ) if max_files: vserver_client.set_volume_max_files.assert_called_once_with( fake.SHARE_NAME, max_files) else: self.assertFalse(vserver_client.set_volume_max_files.called) @ddt.data( {'jobid': fake.JOB_ID, 'error-code': 'fake', 'error-message': 'fake'}, {'jobid': None, 'error-code': None, 'error-message': 'fake'}) def test_create_flexgroup_share_raise_error_job(self, job): vserver_client = mock.Mock() self.mock_object(self.library, 'wait_for_start_create_flexgroup', mock.Mock(return_value=job)) aggr_list = [fake.AGGREGATE] self.assertRaises( exception.NetAppException, self.library._create_flexgroup_share, vserver_client, aggr_list, fake.SHARE_NAME, 100, 10) def test_wait_for_start_create_flexgroup(self): vserver_client = mock.Mock() job = {'jobid': fake.JOB_ID, 'error-code': None} vserver_client.create_volume_async.return_value = job aggr_list = [fake.AGGREGATE] result = self.library.wait_for_start_create_flexgroup( 20, vserver_client, aggr_list, fake.SHARE_NAME, 1, 10, fake.MOUNT_POINT_NAME, "compliance") self.assertEqual(job, result) vserver_client.create_volume_async.assert_called_once_with( aggr_list, fake.SHARE_NAME, 1, is_flexgroup=True, snapshot_reserve=10, auto_provisioned=self.library._is_flexgroup_auto, mount_point_name=fake.MOUNT_POINT_NAME, snaplock_type="compliance") def test_wait_for_start_create_flexgroup_timeout(self): vserver_client = mock.Mock() vserver_client.create_volume_async.side_effect = ( netapp_api.NaApiError(code=netapp_api.EAPIERROR, message="try the command again")) aggr_list = [fake.AGGREGATE] self.assertRaises( exception.NetAppException, self.library.wait_for_start_create_flexgroup, 10, vserver_client, aggr_list, fake.SHARE_NAME, 1, 10, fake.MOUNT_POINT_NAME, "compliance") def test_wait_for_flexgroup_deployment(self): vserver_client = mock.Mock() vserver_client.get_job_state.return_value = 'success' result = self.library.wait_for_flexgroup_deployment( vserver_client, fake.JOB_ID, 20) self.assertIsNone(result) vserver_client.get_job_state.assert_called_once_with(fake.JOB_ID) def test_wait_for_flexgroup_deployment_timeout(self): vserver_client = mock.Mock() vserver_client.get_job_state.return_value = 'queued' self.assertRaises( exception.NetAppException, self.library.wait_for_flexgroup_deployment, vserver_client, fake.JOB_ID, 10) @ddt.data('failure', 'error') def test_wai_for_flexgroup_deployment_job_error(self, error_state): vserver_client = mock.Mock() vserver_client.get_job_state.return_value = error_state self.assertRaises( exception.NetAppException, self.library.wait_for_flexgroup_deployment, vserver_client, fake.JOB_ID, 10) def test_check_extra_specs_validity(self): boolean_extra_spec_keys = list( self.library.BOOLEAN_QUALIFIED_EXTRA_SPECS_MAP) mock_bool_check = self.mock_object( self.library, '_check_boolean_extra_specs_validity') mock_string_check = self.mock_object( self.library, '_check_string_extra_specs_validity') self.library._check_extra_specs_validity( fake.SHARE_INSTANCE, fake.EXTRA_SPEC) mock_bool_check.assert_called_once_with( fake.SHARE_INSTANCE, fake.EXTRA_SPEC, boolean_extra_spec_keys) mock_string_check.assert_called_once_with( fake.SHARE_INSTANCE, fake.EXTRA_SPEC) def test_check_extra_specs_validity_empty_spec(self): result = self.library._check_extra_specs_validity( fake.SHARE_INSTANCE, fake.EMPTY_EXTRA_SPEC) self.assertIsNone(result) def test_check_extra_specs_validity_invalid_value(self): self.assertRaises( exception.Invalid, self.library._check_extra_specs_validity, fake.SHARE_INSTANCE, fake.INVALID_EXTRA_SPEC) def test_check_string_extra_specs_validity(self): result = self.library._check_string_extra_specs_validity( fake.SHARE_INSTANCE, fake.EXTRA_SPEC_WITH_FPOLICY) self.assertIsNone(result) def test_check_string_extra_specs_validity_empty_spec(self): result = self.library._check_string_extra_specs_validity( fake.SHARE_INSTANCE, fake.EMPTY_EXTRA_SPEC) self.assertIsNone(result) def test_check_string_extra_specs_validity_invalid_value(self): self.assertRaises( exception.NetAppException, self.library._check_string_extra_specs_validity, fake.SHARE_INSTANCE, fake.INVALID_MAX_FILE_EXTRA_SPEC) def test_check_boolean_extra_specs_validity_invalid_value(self): self.assertRaises( exception.Invalid, self.library._check_boolean_extra_specs_validity, fake.SHARE_INSTANCE, fake.INVALID_EXTRA_SPEC, list(self.library.BOOLEAN_QUALIFIED_EXTRA_SPECS_MAP)) def test_check_extra_specs_validity_invalid_combination(self): self.assertRaises( exception.Invalid, self.library._check_boolean_extra_specs_validity, fake.SHARE_INSTANCE, fake.INVALID_EXTRA_SPEC_COMBO, list(self.library.BOOLEAN_QUALIFIED_EXTRA_SPECS_MAP)) @ddt.data({'extra_specs': fake.EXTRA_SPEC, 'set_qos': True}, {'extra_specs': fake.EXTRA_SPEC_WITH_QOS, 'set_qos': False}, {'extra_specs': fake.EXTRA_SPEC, 'set_qos': True}, {'extra_specs': fake.EXTRA_SPEC_WITH_QOS, 'set_qos': False}) @ddt.unpack def test_get_provisioning_options_for_share(self, extra_specs, set_qos): qos = True if fake.QOS_EXTRA_SPEC in extra_specs else False vserver_client = mock.Mock() self.library._have_cluster_creds = True mock_get_extra_specs_from_share = self.mock_object( share_types, 'get_extra_specs_from_share', mock.Mock(return_value=extra_specs)) mock_remap_standard_boolean_extra_specs = self.mock_object( self.library, '_remap_standard_boolean_extra_specs', mock.Mock(return_value=extra_specs)) mock_check_extra_specs_validity = self.mock_object( self.library, '_check_extra_specs_validity') mock_get_provisioning_options = self.mock_object( self.library, '_get_provisioning_options', mock.Mock(return_value=fake.PROVISIONING_OPTIONS)) mock_get_normalized_qos_specs = self.mock_object( self.library, '_get_normalized_qos_specs', mock.Mock(return_value={fake.QOS_NORMALIZED_SPEC: 3000})) mock_create_qos_policy_group = self.mock_object( self.library, '_create_qos_policy_group', mock.Mock( return_value=fake.QOS_POLICY_GROUP_NAME)) result = self.library._get_provisioning_options_for_share( fake.SHARE_INSTANCE, fake.VSERVER1, vserver_client=vserver_client, set_qos=set_qos) if qos and not set_qos: expected_provisioning_opts = fake.PROVISIONING_OPTIONS self.assertFalse(mock_create_qos_policy_group.called) else: expected_provisioning_opts = fake.PROVISIONING_OPTIONS_WITH_QOS mock_create_qos_policy_group.assert_called_once_with( fake.SHARE_INSTANCE, fake.VSERVER1, {fake.QOS_NORMALIZED_SPEC: 3000}, vserver_client) self.assertEqual(expected_provisioning_opts, result) mock_get_extra_specs_from_share.assert_called_once_with( fake.SHARE_INSTANCE) mock_remap_standard_boolean_extra_specs.assert_called_once_with( extra_specs) mock_check_extra_specs_validity.assert_called_once_with( fake.SHARE_INSTANCE, extra_specs) mock_get_provisioning_options.assert_called_once_with(extra_specs) mock_get_normalized_qos_specs.assert_called_once_with(extra_specs) def test_get_provisioning_options_for_share_qos_conflict(self): vserver_client = mock.Mock() extra_specs = fake.EXTRA_SPEC_WITH_QOS mock_get_extra_specs_from_share = self.mock_object( share_types, 'get_extra_specs_from_share', mock.Mock(return_value=extra_specs)) mock_remap_standard_boolean_extra_specs = self.mock_object( self.library, '_remap_standard_boolean_extra_specs', mock.Mock(return_value=extra_specs)) mock_check_extra_specs_validity = self.mock_object( self.library, '_check_extra_specs_validity') mock_get_provisioning_options = self.mock_object( self.library, '_get_provisioning_options', mock.Mock(return_value=fake.PROVISIONING_OPTS_WITH_ADAPT_QOS)) mock_get_normalized_qos_specs = self.mock_object( self.library, '_get_normalized_qos_specs', mock.Mock(return_value={fake.QOS_NORMALIZED_SPEC: 3000})) self.assertRaises(exception.NetAppException, self.library._get_provisioning_options_for_share, fake.SHARE_INSTANCE, fake.VSERVER1, vserver_client=vserver_client, set_qos=True) mock_get_extra_specs_from_share.assert_called_once_with( fake.SHARE_INSTANCE) mock_remap_standard_boolean_extra_specs.assert_called_once_with( extra_specs) mock_check_extra_specs_validity.assert_called_once_with( fake.SHARE_INSTANCE, extra_specs) mock_get_provisioning_options.assert_called_once_with(extra_specs) mock_get_normalized_qos_specs.assert_called_once_with(extra_specs) def test_get_provisioning_options_implicit_false(self): result = self.library._get_provisioning_options( fake.EMPTY_EXTRA_SPEC) expected = { 'adaptive_qos_policy_group': None, 'language': None, 'max_files': None, 'max_files_multiplier': None, 'snapshot_policy': None, 'thin_provisioned': False, 'compression_enabled': False, 'dedup_enabled': False, 'split': False, 'encrypt': False, 'hide_snapdir': False, 'fpolicy_extensions_to_exclude': None, 'fpolicy_extensions_to_include': None, 'fpolicy_file_operations': None, 'efficiency_policy': None, 'snaplock_type': None, 'snaplock_autocommit_period': None, 'snaplock_min_retention_period': None, 'snaplock_max_retention_period': None, 'snaplock_default_retention_period': None, } self.assertEqual(expected, result) def test_get_boolean_provisioning_options(self): result = self.library._get_boolean_provisioning_options( fake.SHORT_BOOLEAN_EXTRA_SPEC, self.library.BOOLEAN_QUALIFIED_EXTRA_SPECS_MAP) self.assertEqual(fake.PROVISIONING_OPTIONS_BOOLEAN, result) def test_get_boolean_provisioning_options_missing_spec(self): result = self.library._get_boolean_provisioning_options( fake.SHORT_BOOLEAN_EXTRA_SPEC, self.library.BOOLEAN_QUALIFIED_EXTRA_SPECS_MAP) self.assertEqual(fake.PROVISIONING_OPTIONS_BOOLEAN, result) def test_get_boolean_provisioning_options_implicit_false(self): expected = { 'thin_provisioned': False, 'dedup_enabled': False, 'compression_enabled': False, 'split': False, 'hide_snapdir': False, } result = self.library._get_boolean_provisioning_options( fake.EMPTY_EXTRA_SPEC, self.library.BOOLEAN_QUALIFIED_EXTRA_SPECS_MAP) self.assertEqual(expected, result) def test_get_string_provisioning_options(self): result = self.library.get_string_provisioning_options( fake.STRING_EXTRA_SPEC, self.library.STRING_QUALIFIED_EXTRA_SPECS_MAP) self.assertEqual(fake.PROVISIONING_OPTIONS_STRING, result) def test_get_string_provisioning_options_missing_spec(self): result = self.library.get_string_provisioning_options( fake.SHORT_STRING_EXTRA_SPEC, self.library.STRING_QUALIFIED_EXTRA_SPECS_MAP) self.assertEqual(fake.PROVISIONING_OPTIONS_STRING_MISSING_SPECS, result) def test_get_string_provisioning_options_implicit_false(self): result = self.library.get_string_provisioning_options( fake.EMPTY_EXTRA_SPEC, self.library.STRING_QUALIFIED_EXTRA_SPECS_MAP) self.assertEqual(fake.PROVISIONING_OPTIONS_STRING_DEFAULT, result) @ddt.data({}, {'foo': 'bar'}, {'netapp:maxiops': '3000'}, {'qos': True, 'netapp:absiops': '3000'}, {'qos': True, 'netapp:maxiops:': '3000'}) def test_get_normalized_qos_specs_no_qos_specs(self, extra_specs): if 'qos' in extra_specs: self.assertRaises(exception.NetAppException, self.library._get_normalized_qos_specs, extra_specs) else: self.assertDictEqual( {}, self.library._get_normalized_qos_specs(extra_specs)) @ddt.data({'qos': True, 'netapp:maxiops': '3000', 'netapp:maxbps': '9000'}, {'qos': True, 'netapp:maxiopspergib': '1000', 'netapp:maxiops': '1000'}) def test_get_normalized_qos_specs_multiple_qos_specs(self, extra_specs): self.assertRaises(exception.NetAppException, self.library._get_normalized_qos_specs, extra_specs) @ddt.data({'qos': True, 'netapp:maxIOPS': '3000'}, {'qos': True, 'netapp:MAxBPs': '3000', 'clem': 'son'}, {'qos': True, 'netapp:maxbps': '3000', 'tig': 'ers'}, {'qos': True, 'netapp:MAXiopSPerGib': '3000', 'kin': 'gsof'}, {'qos': True, 'netapp:maxiopspergib': '3000', 'coll': 'ege'}, {'qos': True, 'netapp:maxBPSperGiB': '3000', 'foot': 'ball'}) def test_get_normalized_qos_specs(self, extra_specs): expected_normalized_spec = { key.lower().split('netapp:')[1]: value for key, value in extra_specs.items() if 'netapp:' in key } qos_specs = self.library._get_normalized_qos_specs(extra_specs) self.assertDictEqual(expected_normalized_spec, qos_specs) self.assertEqual(1, len(qos_specs)) @ddt.data({'qos': {'maxiops': '3000'}, 'expected': '3000iops'}, {'qos': {'maxbps': '3000'}, 'expected': '3000B/s'}, {'qos': {'maxbpspergib': '3000'}, 'expected': '12000B/s'}, {'qos': {'maxiopspergib': '3000'}, 'expected': '12000iops'}) @ddt.unpack def test_get_max_throughput(self, qos, expected): throughput = self.library._get_max_throughput(4, qos) self.assertEqual(expected, throughput) def test_create_qos_policy_group(self): mock_qos_policy_create = self.mock_object( self.library._client, 'qos_policy_group_create') self.library._create_qos_policy_group( fake.SHARE, fake.VSERVER1, {'maxiops': '3000'}) expected_policy_name = 'qos_share_' + fake.SHARE['id'].replace( '-', '_') mock_qos_policy_create.assert_called_once_with( expected_policy_name, fake.VSERVER1, max_throughput='3000iops') def test_check_if_max_files_is_valid_with_negative_integer(self): self.assertRaises(exception.NetAppException, self.library._check_if_max_files_is_valid, fake.SHARE, -1) def test_check_if_max_files_is_valid_with_string(self): self.assertRaises(ValueError, self.library._check_if_max_files_is_valid, fake.SHARE, 'abc') def test__check_fpolicy_file_operations(self): result = self.library._check_fpolicy_file_operations( fake.SHARE, fake.FPOLICY_FILE_OPERATIONS) self.assertIsNone(result) def test__check_fpolicy_file_operations_invalid_operation(self): invalid_ops = copy.deepcopy(fake.FPOLICY_FILE_OPERATIONS) invalid_ops += ',fake_op' self.assertRaises(exception.NetAppException, self.library._check_fpolicy_file_operations, fake.SHARE, invalid_ops) @ddt.data('15minutes', '4hours', "8days", "5months", "2years") def test__check_snaplock_attributes_autocommit_period(self, duration): result = self.library._check_snaplock_attributes( fake.SHARE, "netapp:snaplock_autocommit_period", duration) self.assertIsNone(result) @ddt.data('15minutes', '4hours', "8days", "5months", "2years") def test__check_snaplock_attributes_min_retention_period(self, duration): result = self.library._check_snaplock_attributes( fake.SHARE, "netapp:snaplock_min_retention_period", duration) self.assertIsNone(result) @ddt.data('15minutes', '4hours', "8days", "5months", "2years", "infinite") def test__check_snaplock_attributes_max_retention_period(self, duration): result = self.library._check_snaplock_attributes( fake.SHARE, "netapp:snaplock_max_retention_period", duration) self.assertIsNone(result) @ddt.data('15minutes', '4hours', "8days", "5months", "2years", "infinite", "min", "max") def test__check_snaplock_attributes_default_retention_period(self, duration): result = self.library._check_snaplock_attributes( fake.SHARE, "netapp:snaplock_default_retention_period", duration) self.assertIsNone(result) def test__check_snaplock_attributes_autocommit_period_negative(self): self.assertRaises(exception.NetAppException, self.library._check_snaplock_attributes, fake.SHARE, "netapp:snaplock_autocommit_period", "invalid_period", ) def test__check_snaplock_attributes_min_retention_period_negative(self): self.assertRaises(exception.NetAppException, self.library._check_snaplock_attributes, fake.SHARE, "netapp:snaplock_min_retention_period", "invalid_period", ) def test__check_snaplock_attributes_max_retention_period_negative(self): self.assertRaises(exception.NetAppException, self.library._check_snaplock_attributes, fake.SHARE, "netapp:snaplock_max_retention_period", "invalid_period", ) def test__check_snaplock_attributes_default_retention_period_neg(self): self.assertRaises(exception.NetAppException, self.library._check_snaplock_attributes, fake.SHARE, "netapp:snaplock_default_retention_period", "invalid_period", ) def test__check_snaplock_compatibility_true(self): self.library._have_cluster_creds = True self.library._is_snaplock_compliance_configured = True self.mock_object(self.client, 'list_cluster_nodes', mock.Mock(return_value=(["node1", "node2"]))) result = self.library._check_snaplock_compatibility() self.assertIsNone(result) def test__check_snaplock_compatibility_false(self): self.library._have_cluster_creds = True self.library._is_snaplock_compliance_configured = False self.mock_object(self.client, 'list_cluster_nodes', mock.Mock(return_value=(["node1", "node2"]))) self.assertRaises(exception.NetAppException, self.library._check_snaplock_compatibility) def test__check_snaplock_compatibility_not_cluster_scope(self): self.library._have_cluster_creds = False self.library._check_snaplock_compatibility() def test_allocate_container_no_pool(self): vserver_client = mock.Mock() fake_share_inst = copy.deepcopy(fake.SHARE_INSTANCE) fake_share_inst['host'] = fake_share_inst['host'].split('#')[0] self.assertRaises(exception.InvalidHost, self.library._allocate_container, fake_share_inst, fake.VSERVER1, vserver_client) def test_check_aggregate_extra_specs_validity(self): self.library._have_cluster_creds = True self.library._ssc_stats = fake.SSC_INFO result = self.library._check_aggregate_extra_specs_validity( fake.AGGREGATES[0], fake.EXTRA_SPEC) self.assertIsNone(result) def test_check_aggregate_extra_specs_validity_no_match(self): self.library._have_cluster_creds = True self.library._ssc_stats = fake.SSC_INFO self.assertRaises(exception.NetAppException, self.library._check_aggregate_extra_specs_validity, fake.AGGREGATES[1], fake.EXTRA_SPEC) @ddt.data({'provider_location': None, 'size': 50, 'hide_snapdir': True, 'split': None, 'create_fpolicy': False}, {'provider_location': 'fake_location', 'size': 30, 'hide_snapdir': False, 'split': True, 'create_fpolicy': True}, {'provider_location': 'fake_location', 'size': 20, 'hide_snapdir': True, 'split': False, 'create_fpolicy': True}) @ddt.unpack def test_allocate_container_from_snapshot( self, provider_location, size, hide_snapdir, split, create_fpolicy): provisioning_options = copy.deepcopy( fake.PROVISIONING_OPTIONS_WITH_FPOLICY) provisioning_options['hide_snapdir'] = hide_snapdir mock_get_provisioning_opts = self.mock_object( self.library, '_get_provisioning_options_for_share', mock.Mock(return_value=provisioning_options)) mock_create_fpolicy = self.mock_object( self.library, '_create_fpolicy_for_share') vserver = fake.VSERVER1 vserver_client = mock.Mock() original_snapshot_size = 20 fake_share_inst = copy.deepcopy(fake.SHARE_INSTANCE) fake_share_inst['size'] = size fake_snapshot = copy.deepcopy(fake.SNAPSHOT) fake_snapshot['provider_location'] = provider_location fake_snapshot['size'] = original_snapshot_size self.library._allocate_container_from_snapshot( fake_share_inst, fake_snapshot, vserver, vserver_client, split=split, create_fpolicy=create_fpolicy) share_name = self.library._get_backend_share_name( fake_share_inst['id']) parent_share_name = self.library._get_backend_share_name( fake_snapshot['share_id']) parent_snapshot_name = self.library._get_backend_snapshot_name( fake_snapshot['id']) if not provider_location else 'fake_location' mock_get_provisioning_opts.assert_called_once_with( fake_share_inst, fake.VSERVER1, vserver_client=vserver_client) vserver_client.create_volume_clone.assert_called_once_with( share_name, parent_share_name, parent_snapshot_name, mount_point_name=fake_share_inst["mount_point_name"], **provisioning_options) if size > original_snapshot_size: vserver_client.set_volume_size.assert_called_once_with( share_name, size) else: vserver_client.set_volume_size.assert_not_called() if hide_snapdir: vserver_client.set_volume_snapdir_access.assert_called_once_with( fake.SHARE_INSTANCE_NAME, hide_snapdir) else: vserver_client.set_volume_snapdir_access.assert_not_called() if create_fpolicy: mock_create_fpolicy.assert_called_once_with( fake_share_inst, vserver, vserver_client, **provisioning_options) else: mock_create_fpolicy.assert_not_called() if split is None: vserver_client.volume_clone_split_start.assert_called_once_with( fake.SHARE_INSTANCE_NAME) if split: vserver_client.volume_clone_split_start.assert_called_once_with( fake.SHARE_INSTANCE_NAME) if split is False: vserver_client.volume_clone_split_start.assert_not_called() def test_share_exists(self): vserver_client = mock.Mock() vserver_client.volume_exists.return_value = True result = self.library._share_exists(fake.SHARE_NAME, vserver_client) self.assertTrue(result) def test_share_exists_not_found(self): vserver_client = mock.Mock() vserver_client.volume_exists.return_value = False result = self.library._share_exists(fake.SHARE_NAME, vserver_client) self.assertFalse(result) def test_delete_share(self): vserver_client = mock.Mock() self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_client))) mock_share_exists = self.mock_object(self.library, '_share_exists', mock.Mock(return_value=True)) mock_remove_export = self.mock_object(self.library, '_remove_export') mock_deallocate_container = self.mock_object(self.library, '_deallocate_container') mock_delete_policy = self.mock_object(self.library, '_delete_fpolicy_for_share') self.library.delete_share(self.context, fake.SHARE, share_server=fake.SHARE_SERVER) share_name = self.library._get_backend_share_name(fake.SHARE['id']) qos_policy_name = self.library._get_backend_qos_policy_group_name( fake.SHARE['id']) mock_share_exists.assert_called_once_with(share_name, vserver_client) mock_remove_export.assert_called_once_with(fake.SHARE, vserver_client) mock_deallocate_container.assert_called_once_with(share_name, vserver_client) mock_delete_policy.assert_called_once_with(fake.SHARE, fake.VSERVER1, vserver_client) (vserver_client.mark_qos_policy_group_for_deletion .assert_called_once_with(qos_policy_name)) self.assertEqual(0, lib_base.LOG.info.call_count) def test__delete_share_no_remove_qos_and_export(self): vserver_client = mock.Mock() mock_share_exists = self.mock_object(self.library, '_share_exists', mock.Mock(return_value=True)) mock_remove_export = self.mock_object(self.library, '_remove_export') mock_deallocate_container = self.mock_object(self.library, '_deallocate_container') mock_delete_policy = self.mock_object(self.library, '_delete_fpolicy_for_share') mock_get_backend_qos = self.mock_object( self.library, '_get_backend_qos_policy_group_name') mock_get_share_name = self.mock_object( self.library, '_get_backend_share_name', mock.Mock(return_value=fake.SHARE_NAME)) self.library._delete_share(fake.SHARE, fake.VSERVER1, vserver_client, remove_export=False, remove_qos=False) mock_get_share_name.assert_called_once_with(fake.SHARE_ID) mock_delete_policy.assert_called_once_with(fake.SHARE, fake.VSERVER1, vserver_client) mock_share_exists.assert_called_once_with(fake.SHARE_NAME, vserver_client) mock_deallocate_container.assert_called_once_with(fake.SHARE_NAME, vserver_client) mock_remove_export.assert_not_called() mock_get_backend_qos.assert_not_called() vserver_client.mark_qos_policy_group_for_deletion.assert_not_called() @ddt.data(exception.InvalidInput(reason='fake_reason'), exception.VserverNotSpecified(), exception.VserverNotFound(vserver='fake_vserver')) def test_delete_share_no_share_server(self, get_vserver_exception): self.mock_object(self.library, '_get_vserver', mock.Mock(side_effect=get_vserver_exception)) mock_share_exists = self.mock_object(self.library, '_share_exists', mock.Mock(return_value=False)) mock_remove_export = self.mock_object(self.library, '_remove_export') mock_deallocate_container = self.mock_object(self.library, '_deallocate_container') self.library.delete_share(self.context, fake.SHARE, share_server=fake.SHARE_SERVER) self.assertFalse(mock_share_exists.called) self.assertFalse(mock_remove_export.called) self.assertFalse(mock_deallocate_container.called) self.assertFalse( self.library._client.mark_qos_policy_group_for_deletion.called) self.assertEqual(1, lib_base.LOG.warning.call_count) def test_delete_share_not_found(self): vserver_client = mock.Mock() self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_client))) mock_share_exists = self.mock_object(self.library, '_share_exists', mock.Mock(return_value=False)) mock_remove_export = self.mock_object(self.library, '_remove_export') mock_deallocate_container = self.mock_object(self.library, '_deallocate_container') mock_delete_fpolicy = self.mock_object(self.library, '_delete_fpolicy_for_share') self.library.delete_share(self.context, fake.SHARE, share_server=fake.SHARE_SERVER) share_name = self.library._get_backend_share_name(fake.SHARE['id']) mock_share_exists.assert_called_once_with(share_name, vserver_client) mock_delete_fpolicy.assert_called_once_with(fake.SHARE, fake.VSERVER1, vserver_client) self.assertFalse(mock_remove_export.called) self.assertFalse(mock_deallocate_container.called) self.assertFalse( self.library._client.mark_qos_policy_group_for_deletion.called) self.assertEqual(1, lib_base.LOG.info.call_count) def test_deallocate_container(self): vserver_client = mock.Mock() self.library._deallocate_container(fake.SHARE_NAME, vserver_client) vserver_client.unmount_volume.assert_called_with(fake.SHARE_NAME, force=True) vserver_client.offline_volume.assert_called_with(fake.SHARE_NAME) vserver_client.delete_volume.assert_called_with(fake.SHARE_NAME) @ddt.data(None, fake.MANILA_HOST_NAME_2) def test_create_export(self, share_host): protocol_helper = mock.Mock() callback = (lambda export_address, export_path='fake_export_path': ':'.join([export_address, export_path])) protocol_helper.create_share.return_value = callback expected_host = share_host if share_host else fake.SHARE['host'] self.mock_object(self.library, '_get_helper', mock.Mock(return_value=protocol_helper)) self.mock_object(self.library, '_is_flexgroup_pool', mock.Mock(return_value=False)) vserver_client = mock.Mock() cluster_client = mock.Mock() vserver_client.get_network_interfaces.return_value = fake.LIFS fake_interface_addresses_with_metadata = copy.deepcopy( fake.INTERFACE_ADDRESSES_WITH_METADATA) mock_get_export_addresses_with_metadata = self.mock_object( self.library, '_get_export_addresses_with_metadata', mock.Mock(return_value=fake_interface_addresses_with_metadata)) result = self.library._create_export(fake.SHARE, fake.SHARE_SERVER, fake.VSERVER1, vserver_client, cluster_client=cluster_client, share_host=share_host) self.assertEqual(fake.NFS_EXPORTS, result) mock_get_export_addresses_with_metadata.assert_called_once_with( fake.SHARE, fake.SHARE_SERVER, fake.LIFS, expected_host, cluster_client) protocol_helper.create_share.assert_called_once_with( fake.SHARE, fake.SHARE_NAME, clear_current_export_policy=True, ensure_share_already_exists=False, replica=False, is_flexgroup=False) def test_create_export_lifs_not_found(self): self.mock_object(self.library, '_get_helper') vserver_client = mock.Mock() vserver_client.get_network_interfaces.return_value = [] self.assertRaises(exception.NetAppException, self.library._create_export, fake.SHARE, fake.SHARE_SERVER, fake.VSERVER1, vserver_client) @ddt.data(True, False) def test_get_export_addresses_with_metadata(self, is_flexgroup): self.mock_object( self.library, '_is_flexgroup_pool', mock.Mock(return_value=is_flexgroup)) mock_get_aggr_flexgroup = self.mock_object( self.library, '_get_flexgroup_aggregate_list', mock.Mock(return_value=[fake.AGGREGATE])) mock_get_aggregate_node = self.mock_object( self.library, '_get_aggregate_node', mock.Mock(return_value=fake.CLUSTER_NODES[0])) mock_get_admin_addresses_for_share_server = self.mock_object( self.library, '_get_admin_addresses_for_share_server', mock.Mock(return_value=[fake.LIF_ADDRESSES[1]])) result = self.library._get_export_addresses_with_metadata( fake.SHARE, fake.SHARE_SERVER, fake.LIFS, fake.SHARE['host']) self.assertEqual(fake.INTERFACE_ADDRESSES_WITH_METADATA, result) mock_get_admin_addresses_for_share_server.assert_called_once_with( fake.SHARE_SERVER) if is_flexgroup: mock_get_aggr_flexgroup.assert_called_once_with(fake.POOL_NAME) mock_get_aggregate_node.assert_called_once_with( fake.AGGREGATE, None) else: mock_get_aggregate_node.assert_called_once_with( fake.POOL_NAME, None) def test_get_export_addresses_with_metadata_node_unknown(self): self.mock_object( self.library, '_is_flexgroup_pool', mock.Mock(return_value=False)) mock_get_aggregate_node = self.mock_object( self.library, '_get_aggregate_node', mock.Mock(return_value=None)) mock_get_admin_addresses_for_share_server = self.mock_object( self.library, '_get_admin_addresses_for_share_server', mock.Mock(return_value=[fake.LIF_ADDRESSES[1]])) result = self.library._get_export_addresses_with_metadata( fake.SHARE, fake.SHARE_SERVER, fake.LIFS, fake.SHARE['host']) expected = copy.deepcopy(fake.INTERFACE_ADDRESSES_WITH_METADATA) for key, value in expected.items(): value['preferred'] = False self.assertEqual(expected, result) mock_get_aggregate_node.assert_called_once_with(fake.POOL_NAME, None) mock_get_admin_addresses_for_share_server.assert_called_once_with( fake.SHARE_SERVER) def test_get_admin_addresses_for_share_server(self): result = self.library._get_admin_addresses_for_share_server( fake.SHARE_SERVER) self.assertEqual([fake.ADMIN_NETWORK_ALLOCATIONS[0]['ip_address']], result) def test_get_admin_addresses_for_share_server_no_share_server(self): result = self.library._get_admin_addresses_for_share_server(None) self.assertEqual([], result) @ddt.data(True, False) def test_sort_export_locations_by_preferred_paths(self, reverse): export_locations = copy.copy(fake.NFS_EXPORTS) if reverse: export_locations.reverse() result = self.library._sort_export_locations_by_preferred_paths( export_locations) self.assertEqual(fake.NFS_EXPORTS, result) def test_remove_export(self): protocol_helper = mock.Mock() protocol_helper.get_target.return_value = 'fake_target' self.mock_object(self.library, '_get_helper', mock.Mock(return_value=protocol_helper)) vserver_client = mock.Mock() self.library._remove_export(fake.SHARE, vserver_client) protocol_helper.set_client.assert_called_once_with(vserver_client) protocol_helper.get_target.assert_called_once_with(fake.SHARE) protocol_helper.delete_share.assert_called_once_with(fake.SHARE, fake.SHARE_NAME) def test_remove_export_target_not_found(self): protocol_helper = mock.Mock() protocol_helper.get_target.return_value = None self.mock_object(self.library, '_get_helper', mock.Mock(return_value=protocol_helper)) vserver_client = mock.Mock() self.library._remove_export(fake.SHARE, vserver_client) protocol_helper.set_client.assert_called_once_with(vserver_client) protocol_helper.get_target.assert_called_once_with(fake.SHARE) self.assertFalse(protocol_helper.delete_share.called) def test_create_snapshot(self): vserver_client = mock.Mock() self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_client))) model_update = self.library.create_snapshot( self.context, fake.SNAPSHOT, share_server=fake.SHARE_SERVER) share_name = self.library._get_backend_share_name( fake.SNAPSHOT['share_id']) snapshot_name = self.library._get_backend_snapshot_name( fake.SNAPSHOT['id']) vserver_client.create_snapshot.assert_called_once_with(share_name, snapshot_name) self.assertEqual(snapshot_name, model_update['provider_location']) @ddt.data(True, False) def test_revert_to_snapshot(self, use_snap_provider_location): vserver_client = mock.Mock() self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_client))) vserver_client.get_volume.return_value = fake.FLEXVOL_TO_MANAGE fake_snapshot = copy.deepcopy(fake.SNAPSHOT) if use_snap_provider_location: fake_snapshot['provider_location'] = 'fake-provider-location' else: del fake_snapshot['provider_location'] result = self.library.revert_to_snapshot( self.context, fake_snapshot, share_server=fake.SHARE_SERVER) self.assertIsNotNone(result) share_name = self.library._get_backend_share_name( fake_snapshot['share_id']) snapshot_name = (self.library._get_backend_snapshot_name( fake_snapshot['id']) if not use_snap_provider_location else 'fake-provider-location') vserver_client.restore_snapshot.assert_called_once_with(share_name, snapshot_name) def test_delete_snapshot(self): vserver_client = mock.Mock() self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_client))) mock_is_flexgroup_share = self.mock_object( self.library, '_is_flexgroup_share', mock.Mock(return_value=False)) mock_delete_snapshot = self.mock_object(self.library, '_delete_snapshot') self.library.delete_snapshot(self.context, fake.SNAPSHOT, share_server=fake.SHARE_SERVER) share_name = self.library._get_backend_share_name( fake.SNAPSHOT['share_id']) snapshot_name = self.library._get_backend_snapshot_name( fake.SNAPSHOT['id']) mock_is_flexgroup_share.assert_called_once_with(vserver_client, share_name) mock_delete_snapshot.assert_called_once_with( vserver_client, share_name, snapshot_name, is_flexgroup=False) def test_delete_snapshot_with_provider_location(self): vserver_client = mock.Mock() vserver_client.get_snapshot.return_value = fake.CDOT_SNAPSHOT self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_client))) fake_snapshot = copy.deepcopy(fake.SNAPSHOT) fake_snapshot['provider_location'] = 'fake_provider_location' self.library.delete_snapshot(self.context, fake_snapshot, share_server=fake.SHARE_SERVER) share_name = self.library._get_backend_share_name( fake_snapshot['share_id']) vserver_client.delete_snapshot.assert_called_once_with( share_name, fake_snapshot['provider_location']) @ddt.data(exception.InvalidInput(reason='fake_reason'), exception.VserverNotSpecified(), exception.VserverNotFound(vserver='fake_vserver')) def test_delete_snapshot_no_share_server(self, get_vserver_exception): self.mock_object(self.library, '_get_vserver', mock.Mock(side_effect=get_vserver_exception)) mock_is_flexgroup_share = self.mock_object( self.library, '_is_flexgroup_share', mock.Mock(return_value=False)) mock_delete_snapshot = self.mock_object(self.library, '_delete_snapshot') self.library.delete_snapshot(self.context, fake.SNAPSHOT, share_server=fake.SHARE_SERVER) self.assertFalse(mock_is_flexgroup_share.called) self.assertFalse(mock_delete_snapshot.called) def test_delete_snapshot_not_found(self): vserver_client = mock.Mock() self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_client))) mock_is_flexgroup_share = self.mock_object( self.library, '_is_flexgroup_share', mock.Mock(return_value=False)) mock_delete_snapshot = self.mock_object( self.library, '_delete_snapshot', mock.Mock(side_effect=exception.SnapshotResourceNotFound( name=fake.SNAPSHOT_NAME))) self.library.delete_snapshot(self.context, fake.SNAPSHOT, share_server=fake.SHARE_SERVER) share_name = self.library._get_backend_share_name( fake.SNAPSHOT['share_id']) snapshot_name = self.library._get_backend_snapshot_name( fake.SNAPSHOT['id']) mock_is_flexgroup_share.assert_called_once_with( vserver_client, share_name) mock_delete_snapshot.assert_called_once_with( vserver_client, share_name, snapshot_name, is_flexgroup=False) def test_delete_snapshot_not_unique(self): vserver_client = mock.Mock() self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_client))) mock_is_flexgroup_share = self.mock_object( self.library, '_is_flexgroup_share', mock.Mock(return_value=False)) mock_delete_snapshot = self.mock_object( self.library, '_delete_snapshot', mock.Mock(side_effect=exception.NetAppException())) self.assertRaises(exception.NetAppException, self.library.delete_snapshot, self.context, fake.SNAPSHOT, share_server=fake.SHARE_SERVER) share_name = self.library._get_backend_share_name( fake.SNAPSHOT['share_id']) snapshot_name = self.library._get_backend_snapshot_name( fake.SNAPSHOT['id']) mock_is_flexgroup_share.assert_called_once_with( vserver_client, share_name) mock_delete_snapshot.assert_called_once_with( vserver_client, share_name, snapshot_name, is_flexgroup=False) def test__delete_snapshot(self): vserver_client = mock.Mock() vserver_client.get_snapshot.return_value = fake.CDOT_SNAPSHOT self.library._delete_snapshot(vserver_client, fake.SHARE_NAME, fake.SNAPSHOT_NAME) vserver_client.delete_snapshot.assert_called_once_with( fake.SHARE_NAME, fake.SNAPSHOT_NAME) self.assertFalse(vserver_client.get_clone_children_for_snapshot.called) self.assertFalse(vserver_client.volume_clone_split_start.called) self.assertFalse(vserver_client.soft_delete_snapshot.called) @ddt.data(True, False) def test__delete_snapshot_busy_volume_clone(self, is_flexgroup): vserver_client = mock.Mock() vserver_client.get_snapshot.return_value = ( fake.CDOT_SNAPSHOT_BUSY_VOLUME_CLONE) vserver_client.get_clone_children_for_snapshot.return_value = ( fake.CDOT_CLONE_CHILDREN) mock_is_flexgroup_share = self.mock_object( self.library, '_delete_busy_snapshot') self.library._delete_snapshot(vserver_client, fake.SHARE_NAME, fake.SNAPSHOT_NAME, is_flexgroup=is_flexgroup) self.assertFalse(vserver_client.delete_snapshot.called) if is_flexgroup: (vserver_client. get_clone_children_for_snapshot.assert_called_once_with( fake.SHARE_NAME, fake.SNAPSHOT_NAME)) vserver_client.volume_clone_split_start.assert_has_calls([ mock.call(fake.CDOT_CLONE_CHILD_1), mock.call(fake.CDOT_CLONE_CHILD_2), ]) mock_is_flexgroup_share.assert_called_once_with( vserver_client, fake.SHARE_NAME, fake.SNAPSHOT_NAME) vserver_client.soft_delete_snapshot.assert_not_called() else: mock_is_flexgroup_share.assert_not_called() vserver_client.soft_delete_snapshot.assert_called_once_with( fake.SHARE_NAME, fake.SNAPSHOT_NAME) def test__delete_snapshot_busy_snapmirror(self): vserver_client = mock.Mock() vserver_client.get_snapshot.return_value = ( fake.CDOT_SNAPSHOT_BUSY_SNAPMIRROR) mock_is_flexgroup_share = self.mock_object( self.library, '_delete_busy_snapshot') self.assertRaises(exception.ShareSnapshotIsBusy, self.library._delete_snapshot, vserver_client, fake.SHARE_NAME, fake.SNAPSHOT_NAME) self.assertFalse(vserver_client.delete_snapshot.called) self.assertFalse(vserver_client.get_clone_children_for_snapshot.called) self.assertFalse(vserver_client.volume_clone_split_start.called) self.assertFalse(mock_is_flexgroup_share.called) self.assertFalse(vserver_client.soft_delete_snapshot.called) def test_delete_busy_snapshot(self): (self.library.configuration. netapp_delete_busy_flexgroup_snapshot_timeout) = 2 vserver_client = mock.Mock() vserver_client.get_snapshot.return_value = fake.CDOT_SNAPSHOT self.library._delete_busy_snapshot(vserver_client, fake.SHARE_NAME, fake.SNAPSHOT_NAME) vserver_client.get_snapshot.assert_called_once_with( fake.SHARE_NAME, fake.SNAPSHOT_NAME) vserver_client.delete_snapshot.assert_called_once_with( fake.SHARE_NAME, fake.SNAPSHOT_NAME) def test_delete_busy_snapshot_raise_timeout(self): (self.library.configuration. netapp_delete_busy_flexgroup_snapshot_timeout) = 2 vserver_client = mock.Mock() vserver_client.get_snapshot.return_value = ( fake.CDOT_SNAPSHOT_BUSY_VOLUME_CLONE) self.assertRaises( exception.NetAppException, self.library._delete_busy_snapshot, vserver_client, fake.SHARE_NAME, fake.SNAPSHOT_NAME) @ddt.data(None, fake.VSERVER1) def test_manage_existing(self, fake_vserver): vserver_client = mock.Mock() mock__get_vserver = self.mock_object( self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_client))) mock_manage_container = self.mock_object( self.library, '_manage_container', mock.Mock(return_value=fake.SHARE_SIZE)) mock_create_export = self.mock_object( self.library, '_create_export', mock.Mock(return_value=fake.NFS_EXPORTS)) result = self.library.manage_existing(fake.SHARE, {}, share_server=fake_vserver) expected = { 'size': fake.SHARE_SIZE, 'export_locations': fake.NFS_EXPORTS } mock__get_vserver.assert_called_once_with(share_server=fake_vserver) mock_manage_container.assert_called_once_with(fake.SHARE, fake.VSERVER1, vserver_client) mock_create_export.assert_called_once_with(fake.SHARE, fake_vserver, fake.VSERVER1, vserver_client) self.assertDictEqual(expected, result) @ddt.data(None, fake.VSERVER1) def test_unmanage(self, fake_vserver): result = self.library.unmanage(fake.SHARE, share_server=fake_vserver) self.assertIsNone(result) @ddt.data({'qos': True, 'fpolicy': False, 'is_flexgroup': False}, {'qos': False, 'fpolicy': True, 'is_flexgroup': False}, {'qos': True, 'fpolicy': False, 'is_flexgroup': True}, {'qos': False, 'fpolicy': True, 'is_flexgroup': True}) @ddt.unpack def test_manage_container(self, qos, fpolicy, is_flexgroup): vserver_client = mock.Mock() self.library._have_cluster_creds = True qos_policy_group_name = fake.QOS_POLICY_GROUP_NAME if qos else None if qos: extra_specs = copy.deepcopy(fake.EXTRA_SPEC_WITH_QOS) elif fpolicy: extra_specs = copy.deepcopy(fake.EXTRA_SPEC_WITH_FPOLICY) else: extra_specs = copy.deepcopy(fake.EXTRA_SPEC) provisioning_opts = self.library._get_provisioning_options(extra_specs) if qos: provisioning_opts['qos_policy_group'] = fake.QOS_POLICY_GROUP_NAME share_to_manage = copy.deepcopy(fake.SHARE) share_to_manage['export_location'] = fake.EXPORT_LOCATION mock_helper = mock.Mock() mock_helper.get_share_name_for_share.return_value = fake.FLEXVOL_NAME self.mock_object(self.library, '_get_helper', mock.Mock(return_value=mock_helper)) fake_aggr = [fake.POOL_NAME] if is_flexgroup else fake.POOL_NAME mock_is_flexgroup_pool = self.mock_object( self.library, '_is_flexgroup_pool', mock.Mock(return_value=is_flexgroup)) mock_get_flexgroup_aggregate_list = self.mock_object( self.library, '_get_flexgroup_aggregate_list', mock.Mock(return_value=fake_aggr)) mock_is_flexgroup_share = self.mock_object( self.library, '_is_flexgroup_share', mock.Mock(return_value=is_flexgroup)) mock_get_volume_to_manage = self.mock_object( vserver_client, 'get_volume_to_manage', mock.Mock(return_value=fake.FLEXVOL_TO_MANAGE)) mock_validate_volume_for_manage = self.mock_object( self.library, '_validate_volume_for_manage') self.mock_object(share_types, 'get_extra_specs_from_share', mock.Mock(return_value=extra_specs)) mock_check_extra_specs_validity = self.mock_object( self.library, '_check_extra_specs_validity') mock_check_aggregate_extra_specs_validity = self.mock_object( self.library, '_check_aggregate_extra_specs_validity') mock_modify_or_create_qos_policy = self.mock_object( self.library, '_modify_or_create_qos_for_existing_share', mock.Mock(return_value=qos_policy_group_name)) mock_get_volume_snapshot_attributes = self.mock_object( vserver_client, 'get_volume_snapshot_attributes', mock.Mock(return_value={'snapshot-policy': 'fake_policy'})) fake_fpolicy_scope = { 'policy-name': fake.FPOLICY_POLICY_NAME, 'shares-to-include': [fake.FLEXVOL_NAME] } mock_find_scope = self.mock_object( self.library, '_find_reusable_fpolicy_scope', mock.Mock(return_value=fake_fpolicy_scope)) mock_modify_fpolicy = self.mock_object( vserver_client, 'modify_fpolicy_scope') result = self.library._manage_container(share_to_manage, fake.VSERVER1, vserver_client) mock_is_flexgroup_pool.assert_called_once_with(fake.POOL_NAME) if is_flexgroup: mock_get_flexgroup_aggregate_list.assert_called_once_with( fake.POOL_NAME) else: mock_get_flexgroup_aggregate_list.assert_not_called() mock_is_flexgroup_share.assert_called_once_with(vserver_client, fake.FLEXVOL_NAME) mock_get_volume_to_manage.assert_called_once_with( fake_aggr, fake.FLEXVOL_NAME) mock_check_extra_specs_validity.assert_called_once_with( share_to_manage, extra_specs) mock_check_aggregate_extra_specs_validity.assert_called_once_with( fake.POOL_NAME, extra_specs) vserver_client.unmount_volume.assert_called_once_with( fake.FLEXVOL_NAME) vserver_client.set_volume_name.assert_called_once_with( fake.FLEXVOL_NAME, fake.SHARE_NAME) vserver_client.mount_volume.assert_called_once_with( fake.SHARE_NAME, fake.MOUNT_POINT_NAME) vserver_client.modify_volume.assert_called_once_with( fake_aggr, fake.SHARE_NAME, **provisioning_opts) mock_modify_or_create_qos_policy.assert_called_once_with( share_to_manage, extra_specs, fake.VSERVER1, vserver_client) mock_get_volume_snapshot_attributes.assert_called_once_with( fake.SHARE_NAME) mock_validate_volume_for_manage.assert_called() if fpolicy: mock_find_scope.assert_called_once_with( share_to_manage, vserver_client, fpolicy_extensions_to_include=fake.FPOLICY_EXT_TO_INCLUDE, fpolicy_extensions_to_exclude=fake.FPOLICY_EXT_TO_EXCLUDE, fpolicy_file_operations=fake.FPOLICY_FILE_OPERATIONS, shares_to_include=[fake.FLEXVOL_NAME]) mock_modify_fpolicy.assert_called_once_with( fake.SHARE_NAME, fake.FPOLICY_POLICY_NAME, shares_to_include=[fake.SHARE_NAME]) else: mock_find_scope.assert_not_called() mock_modify_fpolicy.assert_not_called() original_data = { 'original_name': fake.FLEXVOL_TO_MANAGE['name'], 'original_junction_path': fake.FLEXVOL_TO_MANAGE['junction-path'], } self.library.private_storage.update.assert_called_once_with( fake.SHARE['id'], original_data) expected_size = int( math.ceil(float(fake.FLEXVOL_TO_MANAGE['size']) / units.Gi)) self.assertEqual(expected_size, result) def test_manage_container_invalid_export_location(self): vserver_client = mock.Mock() share_to_manage = copy.deepcopy(fake.SHARE) share_to_manage['export_location'] = fake.EXPORT_LOCATION mock_helper = mock.Mock() mock_helper.get_share_name_for_share.return_value = None self.mock_object(self.library, '_get_helper', mock.Mock(return_value=mock_helper)) self.assertRaises(exception.ManageInvalidShare, self.library._manage_container, share_to_manage, fake.VSERVER1, vserver_client) def test_manage_container_not_found(self): vserver_client = mock.Mock() share_to_manage = copy.deepcopy(fake.SHARE) share_to_manage['export_location'] = fake.EXPORT_LOCATION mock_helper = mock.Mock() mock_helper.get_share_name_for_share.return_value = fake.FLEXVOL_NAME self.mock_object(self.library, '_get_helper', mock.Mock(return_value=mock_helper)) self.mock_object(self.library, '_is_flexgroup_pool', mock.Mock(return_value=False)) self.mock_object(self.library, '_is_flexgroup_share', mock.Mock(return_value=False)) self.mock_object(vserver_client, 'get_volume_to_manage', mock.Mock(return_value=None)) self.assertRaises(exception.ManageInvalidShare, self.library._manage_container, share_to_manage, fake.VSERVER1, vserver_client) def test_manage_container_invalid_extra_specs(self): vserver_client = mock.Mock() share_to_manage = copy.deepcopy(fake.SHARE) share_to_manage['export_location'] = fake.EXPORT_LOCATION mock_helper = mock.Mock() mock_helper.get_share_name_for_share.return_value = fake.FLEXVOL_NAME self.mock_object(self.library, '_get_helper', mock.Mock(return_value=mock_helper)) self.mock_object(self.library, '_is_flexgroup_pool', mock.Mock(return_value=False)) self.mock_object(self.library, '_is_flexgroup_share', mock.Mock(return_value=False)) self.mock_object(vserver_client, 'get_volume_to_manage', mock.Mock(return_value=fake.FLEXVOL_TO_MANAGE)) self.mock_object(self.library, '_validate_volume_for_manage') self.mock_object(share_types, 'get_extra_specs_from_share', mock.Mock(return_value=fake.EXTRA_SPEC)) self.mock_object(self.library, '_check_extra_specs_validity', mock.Mock(side_effect=exception.NetAppException)) self.assertRaises(exception.ManageExistingShareTypeMismatch, self.library._manage_container, share_to_manage, fake.VSERVER1, vserver_client) def test_manage_container_invalid_fpolicy(self): vserver_client = mock.Mock() extra_spec = copy.deepcopy(fake.EXTRA_SPEC_WITH_FPOLICY) share_to_manage = copy.deepcopy(fake.SHARE) share_to_manage['export_location'] = fake.EXPORT_LOCATION mock_helper = mock.Mock() mock_helper.get_share_name_for_share.return_value = fake.FLEXVOL_NAME self.mock_object(self.library, '_get_helper', mock.Mock(return_value=mock_helper)) self.mock_object(self.library, '_is_flexgroup_pool', mock.Mock(return_value=False)) self.mock_object(self.library, '_is_flexgroup_share', mock.Mock(return_value=False)) self.mock_object(vserver_client, 'get_volume_to_manage', mock.Mock(return_value=fake.FLEXVOL_TO_MANAGE)) self.mock_object(self.library, '_validate_volume_for_manage') self.mock_object(share_types, 'get_extra_specs_from_share', mock.Mock(return_value=extra_spec)) self.mock_object(self.library, '_check_extra_specs_validity') self.mock_object(self.library, '_find_reusable_fpolicy_scope', mock.Mock(return_value=None)) self.assertRaises(exception.ManageExistingShareTypeMismatch, self.library._manage_container, share_to_manage, fake.VSERVER1, vserver_client) def test_manage_container_wrong_pool_style(self): vserver_client = mock.Mock() share_to_manage = copy.deepcopy(fake.SHARE) share_to_manage['export_location'] = fake.EXPORT_LOCATION mock_helper = mock.Mock() mock_helper.get_share_name_for_share.return_value = fake.FLEXVOL_NAME self.mock_object(self.library, '_get_helper', mock.Mock(return_value=mock_helper)) self.mock_object(self.library, '_is_flexgroup_pool', mock.Mock(return_value=False)) self.mock_object(self.library, '_is_flexgroup_share', mock.Mock(return_value=True)) self.assertRaises(exception.ManageInvalidShare, self.library._manage_container, share_to_manage, fake.VSERVER1, vserver_client) def test_validate_volume_for_manage(self): vserver_client = mock.Mock() vserver_client.volume_has_luns = mock.Mock(return_value=False) vserver_client.volume_has_junctioned_volumes = mock.Mock( return_value=False) vserver_client.volume_has_snapmirror_relationships = mock.Mock( return_value=False) result = self.library._validate_volume_for_manage( fake.FLEXVOL_TO_MANAGE, vserver_client) self.assertIsNone(result) @ddt.data({ 'attribute': 'type', 'value': 'dp', }, { 'attribute': 'style', 'value': 'infinitevol', }) @ddt.unpack def test_validate_volume_for_manage_invalid_volume(self, attribute, value): flexvol_to_manage = copy.deepcopy(fake.FLEXVOL_TO_MANAGE) flexvol_to_manage[attribute] = value vserver_client = mock.Mock() vserver_client.volume_has_luns = mock.Mock(return_value=False) vserver_client.volume_has_junctioned_volumes = mock.Mock( return_value=False) vserver_client.volume_has_snapmirror_relationships = mock.Mock( return_value=False) self.assertRaises(exception.ManageInvalidShare, self.library._validate_volume_for_manage, flexvol_to_manage, vserver_client) def test_validate_volume_for_manage_luns_present(self): vserver_client = mock.Mock() vserver_client.volume_has_luns = mock.Mock(return_value=True) vserver_client.volume_has_junctioned_volumes = mock.Mock( return_value=False) vserver_client.volume_has_snapmirror_relationships = mock.Mock( return_value=False) self.assertRaises(exception.ManageInvalidShare, self.library._validate_volume_for_manage, fake.FLEXVOL_TO_MANAGE, vserver_client) def test_validate_volume_for_manage_junctioned_volumes_present(self): vserver_client = mock.Mock() vserver_client.volume_has_luns = mock.Mock(return_value=False) vserver_client.volume_has_junctioned_volumes = mock.Mock( return_value=True) vserver_client.volume_has_snapmirror_relationships = mock.Mock( return_value=False) self.assertRaises(exception.ManageInvalidShare, self.library._validate_volume_for_manage, fake.FLEXVOL_TO_MANAGE, vserver_client) @ddt.data( {'fake_vserver': None, 'is_flexgroup': False}, {'fake_vserver': fake.VSERVER1, 'is_flexgroup': False}, {'fake_vserver': None, 'is_flexgroup': True}, {'fake_vserver': fake.VSERVER1, 'is_flexgroup': True}) @ddt.unpack def test_manage_existing_snapshot(self, fake_vserver, is_flexgroup): vserver_client = mock.Mock() mock_get_vserver = self.mock_object( self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_client))) vserver_client.get_volume.return_value = fake.FLEXVOL_TO_MANAGE vserver_client.snapshot_exists.return_value = True vserver_client.volume_has_snapmirror_relationships.return_value = False result = self.library.manage_existing_snapshot( fake.SNAPSHOT_TO_MANAGE, {}, share_server=fake_vserver) share_name = self.library._get_backend_share_name( fake.SNAPSHOT['share_id']) mock_get_vserver.assert_called_once_with(share_server=fake_vserver) vserver_client.snapshot_exists.assert_called_once_with( fake.SNAPSHOT_NAME, share_name) (vserver_client.volume_has_snapmirror_relationships. assert_called_once_with(fake.FLEXVOL_TO_MANAGE)) expected_result = {'size': 2} self.assertEqual(expected_result, result) def test_manage_existing_snapshot_no_snapshot_name(self): vserver_client = mock.Mock() self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_client))) vserver_client.get_volume.return_value = fake.FLEXVOL_TO_MANAGE vserver_client.snapshot_exists.return_value = True vserver_client.volume_has_snapmirror_relationships.return_value = False fake_snapshot = copy.deepcopy(fake.SNAPSHOT_TO_MANAGE) fake_snapshot['provider_location'] = '' self.assertRaises(exception.ManageInvalidShareSnapshot, self.library.manage_existing_snapshot, fake_snapshot, {}) @ddt.data(netapp_api.NaApiError, exception.NetAppException) def test_manage_existing_snapshot_get_volume_error(self, exception_type): vserver_client = mock.Mock() self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_client))) vserver_client.get_volume.side_effect = exception_type self.mock_object(self.client, 'volume_has_snapmirror_relationships', mock.Mock(return_value=False)) self.assertRaises(exception.ShareNotFound, self.library.manage_existing_snapshot, fake.SNAPSHOT_TO_MANAGE, {}) def test_manage_existing_snapshot_not_from_share(self): vserver_client = mock.Mock() self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_client))) vserver_client.get_volume.return_value = fake.FLEXVOL_TO_MANAGE vserver_client.snapshot_exists.return_value = False self.assertRaises(exception.ManageInvalidShareSnapshot, self.library.manage_existing_snapshot, fake.SNAPSHOT_TO_MANAGE, {}) def test_manage_existing_snapshot_mirrors_present(self): vserver_client = mock.Mock() self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_client))) vserver_client.get_volume.return_value = fake.FLEXVOL_TO_MANAGE vserver_client.snapshot_exists.return_value = True vserver_client.volume_has_snapmirror_relationships.return_value = True self.assertRaises(exception.ManageInvalidShareSnapshot, self.library.manage_existing_snapshot, fake.SNAPSHOT_TO_MANAGE, {}) @ddt.data(None, fake.VSERVER1) def test_unmanage_snapshot(self, fake_vserver): result = self.library.unmanage_snapshot(fake.SNAPSHOT, fake_vserver) self.assertIsNone(result) def test_validate_volume_for_manage_snapmirror_relationships_present(self): vserver_client = mock.Mock() vserver_client.volume_has_luns.return_value = False vserver_client.volume_has_junctioned_volumes.return_value = False vserver_client.volume_has_snapmirror_relationships.return_value = True self.assertRaises(exception.ManageInvalidShare, self.library._validate_volume_for_manage, fake.FLEXVOL_TO_MANAGE, vserver_client) def test_create_consistency_group_from_cgsnapshot(self): vserver_client = mock.Mock() mock_get_vserver = self.mock_object( self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_client))) mock_allocate_container_from_snapshot = self.mock_object( self.library, '_allocate_container_from_snapshot') mock_create_export = self.mock_object( self.library, '_create_export', mock.Mock(side_effect=[['loc3'], ['loc4']])) result = self.library.create_consistency_group_from_cgsnapshot( self.context, fake.CONSISTENCY_GROUP_DEST, fake.CG_SNAPSHOT, share_server=fake.SHARE_SERVER) share_update_list = [ {'id': fake.SHARE_ID3, 'export_locations': ['loc3']}, {'id': fake.SHARE_ID4, 'export_locations': ['loc4']} ] expected = (None, share_update_list) self.assertEqual(expected, result) mock_allocate_container_from_snapshot.assert_has_calls([ mock.call(fake.COLLATED_CGSNAPSHOT_INFO[0]['share'], fake.COLLATED_CGSNAPSHOT_INFO[0]['snapshot'], fake.VSERVER1, vserver_client, mock.ANY), mock.call(fake.COLLATED_CGSNAPSHOT_INFO[1]['share'], fake.COLLATED_CGSNAPSHOT_INFO[1]['snapshot'], fake.VSERVER1, vserver_client, mock.ANY), ]) mock_create_export.assert_has_calls([ mock.call(fake.COLLATED_CGSNAPSHOT_INFO[0]['share'], fake.SHARE_SERVER, fake.VSERVER1, vserver_client), mock.call(fake.COLLATED_CGSNAPSHOT_INFO[1]['share'], fake.SHARE_SERVER, fake.VSERVER1, vserver_client), ]) mock_get_vserver.assert_called_once_with( share_server=fake.SHARE_SERVER) def test_create_consistency_group_from_cgsnapshot_no_members(self): vserver_client = mock.Mock() mock_get_vserver = self.mock_object( self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_client))) mock_allocate_container_from_snapshot = self.mock_object( self.library, '_allocate_container_from_snapshot') mock_create_export = self.mock_object( self.library, '_create_export', mock.Mock(side_effect=[['loc3'], ['loc4']])) fake_cg_snapshot = copy.deepcopy(fake.CG_SNAPSHOT) fake_cg_snapshot['share_group_snapshot_members'] = [] result = self.library.create_consistency_group_from_cgsnapshot( self.context, fake.CONSISTENCY_GROUP_DEST, fake_cg_snapshot, share_server=fake.SHARE_SERVER) self.assertEqual((None, None), result) self.assertFalse(mock_allocate_container_from_snapshot.called) self.assertFalse(mock_create_export.called) mock_get_vserver.assert_called_once_with( share_server=fake.SHARE_SERVER) def test_collate_cg_snapshot_info(self): result = self.library._collate_cg_snapshot_info( fake.CONSISTENCY_GROUP_DEST, fake.CG_SNAPSHOT) self.assertEqual(fake.COLLATED_CGSNAPSHOT_INFO, result) def test_collate_cg_snapshot_info_invalid(self): fake_cg_snapshot = copy.deepcopy(fake.CG_SNAPSHOT) fake_cg_snapshot['share_group_snapshot_members'] = [] self.assertRaises(exception.InvalidShareGroup, self.library._collate_cg_snapshot_info, fake.CONSISTENCY_GROUP_DEST, fake_cg_snapshot) def test_create_cgsnapshot(self): vserver_client = mock.Mock() mock_get_vserver = self.mock_object( self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_client))) result = self.library.create_cgsnapshot( self.context, fake.CG_SNAPSHOT, share_server=fake.SHARE_SERVER) share_names = [ self.library._get_backend_share_name( fake.CG_SNAPSHOT_MEMBER_1['share_id']), self.library._get_backend_share_name( fake.CG_SNAPSHOT_MEMBER_2['share_id']) ] snapshot_name = self.library._get_backend_cg_snapshot_name( fake.CG_SNAPSHOT['id']) vserver_client.create_cg_snapshot.assert_called_once_with( share_names, snapshot_name) self.assertEqual((None, None), result) mock_get_vserver.assert_called_once_with( share_server=fake.SHARE_SERVER) def test_create_cgsnapshot_no_members(self): vserver_client = mock.Mock() mock_get_vserver = self.mock_object( self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_client))) fake_cg_snapshot = copy.deepcopy(fake.CG_SNAPSHOT) fake_cg_snapshot['share_group_snapshot_members'] = [] result = self.library.create_cgsnapshot( self.context, fake_cg_snapshot, share_server=fake.SHARE_SERVER) self.assertFalse(vserver_client.create_cg_snapshot.called) self.assertEqual((None, None), result) mock_get_vserver.assert_called_once_with( share_server=fake.SHARE_SERVER) def test_delete_cgsnapshot(self): vserver_client = mock.Mock() mock_get_vserver = self.mock_object( self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_client))) mock_delete_snapshot = self.mock_object(self.library, '_delete_snapshot') result = self.library.delete_cgsnapshot( self.context, fake.CG_SNAPSHOT, share_server=fake.SHARE_SERVER) share_names = [ self.library._get_backend_share_name( fake.CG_SNAPSHOT_MEMBER_1['share_id']), self.library._get_backend_share_name( fake.CG_SNAPSHOT_MEMBER_2['share_id']) ] snapshot_name = self.library._get_backend_cg_snapshot_name( fake.CG_SNAPSHOT['id']) mock_delete_snapshot.assert_has_calls([ mock.call(vserver_client, share_names[0], snapshot_name), mock.call(vserver_client, share_names[1], snapshot_name) ]) self.assertEqual((None, None), result) mock_get_vserver.assert_called_once_with( share_server=fake.SHARE_SERVER) def test_delete_cgsnapshot_no_members(self): vserver_client = mock.Mock() mock_get_vserver = self.mock_object( self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_client))) mock_delete_snapshot = self.mock_object(self.library, '_delete_snapshot') fake_cg_snapshot = copy.deepcopy(fake.CG_SNAPSHOT) fake_cg_snapshot['share_group_snapshot_members'] = [] result = self.library.delete_cgsnapshot( self.context, fake_cg_snapshot, share_server=fake.SHARE_SERVER) self.assertFalse(mock_delete_snapshot.called) self.assertEqual((None, None), result) mock_get_vserver.assert_called_once_with( share_server=fake.SHARE_SERVER) def test_delete_cgsnapshot_snapshots_not_found(self): vserver_client = mock.Mock() mock_get_vserver = self.mock_object( self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_client))) mock_delete_snapshot = self.mock_object( self.library, '_delete_snapshot', mock.Mock(side_effect=exception.SnapshotResourceNotFound( name='fake'))) result = self.library.delete_cgsnapshot( self.context, fake.CG_SNAPSHOT, share_server=fake.SHARE_SERVER) share_names = [ self.library._get_backend_share_name( fake.CG_SNAPSHOT_MEMBER_1['share_id']), self.library._get_backend_share_name( fake.CG_SNAPSHOT_MEMBER_2['share_id']) ] snapshot_name = self.library._get_backend_cg_snapshot_name( fake.CG_SNAPSHOT['id']) mock_delete_snapshot.assert_has_calls([ mock.call(vserver_client, share_names[0], snapshot_name), mock.call(vserver_client, share_names[1], snapshot_name) ]) self.assertEqual((None, None), result) mock_get_vserver.assert_called_once_with( share_server=fake.SHARE_SERVER) @ddt.data(exception.InvalidInput(reason='fake_reason'), exception.VserverNotSpecified(), exception.VserverNotFound(vserver='fake_vserver')) def test_delete_cgsnapshot_no_share_server(self, get_vserver_exception): mock_get_vserver = self.mock_object( self.library, '_get_vserver', mock.Mock(side_effect=get_vserver_exception)) result = self.library.delete_cgsnapshot( self.context, fake.EMPTY_CONSISTENCY_GROUP, share_server=fake.SHARE_SERVER) self.assertEqual((None, None), result) self.assertEqual(1, lib_base.LOG.warning.call_count) mock_get_vserver.assert_called_once_with( share_server=fake.SHARE_SERVER) def test_adjust_qos_policy_with_volume_resize_no_cluster_creds(self): self.library._have_cluster_creds = False self.mock_object(share_types, 'get_extra_specs_from_share') retval = self.library._adjust_qos_policy_with_volume_resize( fake.SHARE, 10, mock.Mock()) self.assertIsNone(retval) share_types.get_extra_specs_from_share.assert_not_called() def test_adjust_qos_policy_with_volume_resize_no_qos_on_share(self): self.library._have_cluster_creds = True self.mock_object(share_types, 'get_extra_specs_from_share') vserver_client = mock.Mock() self.mock_object(vserver_client, 'get_volume', mock.Mock(return_value=fake.FLEXVOL_WITHOUT_QOS)) retval = self.library._adjust_qos_policy_with_volume_resize( fake.SHARE, 10, vserver_client) self.assertIsNone(retval) share_types.get_extra_specs_from_share.assert_not_called() def test_adjust_qos_policy_with_volume_resize_no_size_dependent_qos(self): self.library._have_cluster_creds = True self.mock_object(share_types, 'get_extra_specs_from_share', mock.Mock(return_value=fake.EXTRA_SPEC_WITH_QOS)) vserver_client = mock.Mock() self.mock_object(vserver_client, 'get_volume', mock.Mock(return_value=fake.FLEXVOL_WITH_QOS)) self.mock_object(self.library, '_get_max_throughput') self.mock_object(self.library._client, 'qos_policy_group_modify') retval = self.library._adjust_qos_policy_with_volume_resize( fake.SHARE, 10, vserver_client) self.assertIsNone(retval) share_types.get_extra_specs_from_share.assert_called_once_with( fake.SHARE) self.library._get_max_throughput.assert_not_called() self.library._client.qos_policy_group_modify.assert_not_called() def test_adjust_qos_policy_with_volume_resize(self): self.library._have_cluster_creds = True self.mock_object( share_types, 'get_extra_specs_from_share', mock.Mock(return_value=fake.EXTRA_SPEC_WITH_SIZE_DEPENDENT_QOS)) vserver_client = mock.Mock() self.mock_object(vserver_client, 'get_volume', mock.Mock(return_value=fake.FLEXVOL_WITH_QOS)) self.mock_object(self.library._client, 'qos_policy_group_modify') retval = self.library._adjust_qos_policy_with_volume_resize( fake.SHARE, 10, vserver_client) expected_max_throughput = '10000B/s' self.assertIsNone(retval) share_types.get_extra_specs_from_share.assert_called_once_with( fake.SHARE) self.library._client.qos_policy_group_modify.assert_called_once_with( fake.QOS_POLICY_GROUP_NAME, expected_max_throughput) def test_extend_share(self): vserver_client = mock.Mock() self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_client))) self.mock_object( share_types, 'get_extra_specs_from_share', mock.Mock(return_value=fake.EXTRA_SPEC)) mock_adjust_qos_policy = self.mock_object( self.library, '_adjust_qos_policy_with_volume_resize') mock_set_volume_size = self.mock_object(vserver_client, 'set_volume_size') new_size = fake.SHARE['size'] * 2 self.library.extend_share(fake.SHARE, new_size) mock_set_volume_size.assert_called_once_with(fake.SHARE_NAME, new_size) mock_adjust_qos_policy.assert_called_once_with( fake.SHARE, new_size, vserver_client) def test_shrink_share(self): vserver_client = mock.Mock() self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_client))) self.mock_object(share_types, 'get_extra_specs_from_share') self.mock_object(self.library, '_get_provisioning_options', mock.Mock(return_value={})) mock_adjust_qos_policy = self.mock_object( self.library, '_adjust_qos_policy_with_volume_resize') mock_set_volume_size = self.mock_object(vserver_client, 'set_volume_size') new_size = fake.SHARE['size'] - 1 self.library.shrink_share(fake.SHARE, new_size) mock_set_volume_size.assert_called_once_with(fake.SHARE_NAME, new_size) mock_adjust_qos_policy.assert_called_once_with( fake.SHARE, new_size, vserver_client) def test_shrinking_possible_data_loss(self): naapi_error = self._mock_api_error(code=netapp_api.EVOLOPNOTSUPP, message='Possible data loss') vserver_client = mock.Mock() self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_client))) mock_set_volume_size = self.mock_object( vserver_client, 'set_volume_size', naapi_error) self.mock_object(share_types, 'get_extra_specs_from_share') self.mock_object(self.library, '_get_provisioning_options', mock.Mock(return_value={})) new_size = fake.SHARE['size'] - 1 self.assertRaises(exception.ShareShrinkingPossibleDataLoss, self.library.shrink_share, fake.SHARE, new_size) self.library._get_vserver.assert_called_once_with(share_server=None) mock_set_volume_size.assert_called_once_with(fake.SHARE_NAME, new_size) def test_update_access(self): vserver_client = mock.Mock() mock_get_vserver = self.mock_object( self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_client))) protocol_helper = mock.Mock() protocol_helper.update_access.return_value = None self.mock_object(self.library, '_get_helper', mock.Mock(return_value=protocol_helper)) self.mock_object(self.library, '_is_readable_replica', mock.Mock(return_value=True)) mock_share_exists = self.mock_object(self.library, '_share_exists', mock.Mock(return_value=True)) self.library.update_access(self.context, fake.SHARE, [fake.SHARE_ACCESS], [], [], [], share_server=fake.SHARE_SERVER) mock_get_vserver.assert_called_once_with( share_server=fake.SHARE_SERVER) share_name = self.library._get_backend_share_name(fake.SHARE['id']) mock_share_exists.assert_called_once_with(share_name, vserver_client) protocol_helper.set_client.assert_called_once_with(vserver_client) protocol_helper.update_access.assert_called_once_with( fake.SHARE, fake.SHARE_NAME, [fake.SHARE_ACCESS]) @ddt.data(exception.InvalidInput(reason='fake_reason'), exception.VserverNotSpecified(), exception.VserverNotFound(vserver='fake_vserver')) def test_update_access_no_share_server(self, get_vserver_exception): mock_get_vserver = self.mock_object( self.library, '_get_vserver', mock.Mock(side_effect=get_vserver_exception)) protocol_helper = mock.Mock() protocol_helper.update_access.return_value = None self.mock_object(self.library, '_get_helper', mock.Mock(return_value=protocol_helper)) self.mock_object(self.library, '_is_readable_replica', mock.Mock(return_value=True)) mock_share_exists = self.mock_object(self.library, '_share_exists') self.library.update_access(self.context, fake.SHARE, [fake.SHARE_ACCESS], [], [], [], share_server=fake.SHARE_SERVER) mock_get_vserver.assert_called_once_with( share_server=fake.SHARE_SERVER) self.assertFalse(mock_share_exists.called) self.assertFalse(protocol_helper.set_client.called) self.assertFalse(protocol_helper.update_access.called) def test_update_access_share_not_found(self): vserver_client = mock.Mock() mock_get_vserver = self.mock_object( self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_client))) protocol_helper = mock.Mock() protocol_helper.update_access.return_value = None self.mock_object(self.library, '_get_helper', mock.Mock(return_value=protocol_helper)) self.mock_object(self.library, '_is_readable_replica', mock.Mock(return_value=True)) mock_share_exists = self.mock_object(self.library, '_share_exists', mock.Mock(return_value=False)) self.assertRaises(exception.ShareResourceNotFound, self.library.update_access, self.context, fake.SHARE, [fake.SHARE_ACCESS], [], [], [], share_server=fake.SHARE_SERVER) mock_get_vserver.assert_called_once_with( share_server=fake.SHARE_SERVER) share_name = self.library._get_backend_share_name(fake.SHARE['id']) mock_share_exists.assert_called_once_with(share_name, vserver_client) self.assertFalse(protocol_helper.set_client.called) self.assertFalse(protocol_helper.update_access.called) def test_update_access_to_active_replica(self): fake_share_copy = copy.deepcopy(fake.SHARE) fake_share_copy['replica_state'] = constants.REPLICA_STATE_ACTIVE vserver_client = mock.Mock() mock_get_vserver = self.mock_object( self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_client))) self.mock_object(self.library, '_is_readable_replica', mock.Mock(return_value=True)) protocol_helper = mock.Mock() protocol_helper.update_access.return_value = None self.mock_object(self.library, '_get_helper', mock.Mock(return_value=protocol_helper)) mock_share_exists = self.mock_object(self.library, '_share_exists', mock.Mock(return_value=True)) self.library.update_access(self.context, fake_share_copy, [fake.SHARE_ACCESS], [], [], [], share_server=fake.SHARE_SERVER) mock_get_vserver.assert_called_once_with( share_server=fake.SHARE_SERVER) share_name = self.library._get_backend_share_name(fake.SHARE['id']) mock_share_exists.assert_called_once_with(share_name, vserver_client) protocol_helper.set_client.assert_called_once_with(vserver_client) protocol_helper.update_access.assert_called_once_with( fake.SHARE, fake.SHARE_NAME, [fake.SHARE_ACCESS]) @ddt.data(True, False) def test_update_access_to_in_sync_replica(self, is_readable): fake_share_copy = copy.deepcopy(fake.SHARE) self.mock_object(self.library, '_is_readable_replica', mock.Mock(return_value=is_readable)) fake_share_copy['replica_state'] = constants.REPLICA_STATE_IN_SYNC vserver_client = mock.Mock() mock_get_vserver = self.mock_object( self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_client))) protocol_helper = mock.Mock() protocol_helper.update_access.return_value = None self.mock_object(self.library, '_get_helper', mock.Mock(return_value=protocol_helper)) self.mock_object(self.library, '_share_exists', mock.Mock(return_value=True)) self.library.update_access(self.context, fake_share_copy, [fake.SHARE_ACCESS], [], [], [], share_server=fake.SHARE_SERVER) if is_readable: mock_get_vserver.assert_called_once_with( share_server=fake.SHARE_SERVER) else: mock_get_vserver.assert_not_called() def test_setup_server(self): self.assertRaises(NotImplementedError, self.library.setup_server, fake.NETWORK_INFO) def test_teardown_server(self): self.assertRaises(NotImplementedError, self.library.teardown_server, fake.SHARE_SERVER['backend_details']) def test_get_network_allocations_number(self): self.assertRaises(NotImplementedError, self.library.get_network_allocations_number) def test_update_ssc_info(self): self.library._flexgroup_pools = fake.FLEXGROUP_POOL_OPT self.library._have_cluster_creds = True self.mock_object(self.library, '_find_matching_aggregates', mock.Mock(return_value=fake.AGGREGATES)) self.mock_object(self.library, '_get_flexgroup_aggr_set', mock.Mock(return_value=fake.FLEXGROUP_AGGR_SET)) self.mock_object(self.library, '_get_aggregate_info', mock.Mock(return_value=fake.SSC_INFO_MAP)) self.library._update_ssc_info() expected = { fake.AGGREGATES[0]: { 'netapp_aggregate': fake.AGGREGATES[0], 'netapp_flexgroup': False, 'netapp_raid_type': 'raid4', 'netapp_disk_type': ['FCAL'], 'netapp_hybrid_aggregate': 'false', 'netapp_snaplock_type': ['compliance', 'enterprise'], }, fake.AGGREGATES[1]: { 'netapp_aggregate': fake.AGGREGATES[1], 'netapp_flexgroup': False, 'netapp_raid_type': 'raid_dp', 'netapp_disk_type': ['SATA', 'SSD'], 'netapp_hybrid_aggregate': 'true', 'netapp_snaplock_type': ['compliance', 'enterprise'], }, fake.FLEXGROUP_POOL_NAME: { 'netapp_aggregate': fake.FLEXGROUP_POOL['netapp_aggregate'], 'netapp_flexgroup': True, 'netapp_raid_type': 'raid4 raid_dp', 'netapp_disk_type': ['FCAL', 'SATA', 'SSD'], 'netapp_hybrid_aggregate': 'false true', 'netapp_snaplock_type': fake.FLEXGROUP_POOL[ 'netapp_snaplock_type'] }, } self.assertEqual(expected, self.library._ssc_stats) def test_update_ssc_info_non_unified_aggr(self): self.library._flexgroup_pools = fake.FLEXGROUP_POOL_OPT self.library._client.features.UNIFIED_AGGR = False self.library._have_cluster_creds = True self.mock_object(self.library, '_find_matching_aggregates', mock.Mock(return_value=fake.AGGREGATES)) self.mock_object(self.library, '_get_flexgroup_aggr_set', mock.Mock(return_value=fake.FLEXGROUP_AGGR_SET)) self.mock_object(self.library, '_get_aggregate_info', mock.Mock(return_value=fake.SSC_INFO_MAP)) self.library._update_ssc_info() expected = { fake.AGGREGATES[0]: { 'netapp_aggregate': fake.AGGREGATES[0], 'netapp_flexgroup': False, 'netapp_raid_type': 'raid4', 'netapp_disk_type': ['FCAL'], 'netapp_hybrid_aggregate': 'false', 'netapp_snaplock_type': 'compliance', }, fake.AGGREGATES[1]: { 'netapp_aggregate': fake.AGGREGATES[1], 'netapp_flexgroup': False, 'netapp_raid_type': 'raid_dp', 'netapp_disk_type': ['SATA', 'SSD'], 'netapp_hybrid_aggregate': 'true', 'netapp_snaplock_type': 'enterprise', }, fake.FLEXGROUP_POOL_NAME: { 'netapp_aggregate': fake.FLEXGROUP_POOL['netapp_aggregate'], 'netapp_flexgroup': True, 'netapp_raid_type': 'raid4 raid_dp', 'netapp_disk_type': ['FCAL', 'SATA', 'SSD'], 'netapp_hybrid_aggregate': 'false true', 'netapp_snaplock_type': 'compliance enterprise', }, } self.assertEqual(expected, self.library._ssc_stats) def test_update_ssc_info_no_aggregates(self): self.library._flexgroup_pools = {} self.mock_object(self.library, '_find_matching_aggregates', mock.Mock(return_value=[])) self.library._update_ssc_info() self.assertDictEqual({}, self.library._ssc_stats) def test_update_ssc_info_no_cluster_creds(self): self.library._flexgroup_pools = fake.FLEXGROUP_POOL_OPT self.library._have_cluster_creds = False self.mock_object(self.library, '_find_matching_aggregates', mock.Mock(return_value=fake.AGGREGATES)) self.mock_object(self.library, '_get_flexgroup_aggr_set', mock.Mock(return_value=fake.FLEXGROUP_AGGR_SET)) self.mock_object(self.library, '_get_aggregate_info', mock.Mock(return_value=fake.SSC_INFO_MAP)) self.mock_object(self.client, 'get_vserver_aggr_snaplock_type', mock.Mock(return_value='compliance')) self.library._update_ssc_info() expected = { fake.AGGREGATES[0]: { 'netapp_aggregate': fake.AGGREGATES[0], 'netapp_flexgroup': False, 'netapp_snaplock_type': ['compliance', 'enterprise'], }, fake.AGGREGATES[1]: { 'netapp_aggregate': fake.AGGREGATES[1], 'netapp_flexgroup': False, 'netapp_snaplock_type': ['compliance', 'enterprise'], }, fake.FLEXGROUP_POOL_NAME: { 'netapp_aggregate': fake.FLEXGROUP_POOL['netapp_aggregate'], 'netapp_flexgroup': True, }, } self.assertDictEqual(self.library._ssc_stats, expected) def test_get_aggregate_info(self): mock_get_aggregate = self.mock_object( self.client, 'get_aggregate', mock.Mock(side_effect=fake.SSC_AGGREGATES)) mock_get_aggregate_disk_types = self.mock_object( self.client, 'get_aggregate_disk_types', mock.Mock(side_effect=fake.SSC_DISK_TYPES)) result = self.library._get_aggregate_info(fake.AGGREGATES) expected = { fake.AGGREGATES[0]: { 'netapp_raid_type': 'raid4', 'netapp_disk_type': 'FCAL', 'netapp_hybrid_aggregate': 'false', 'netapp_is_home': False, 'netapp_snaplock_type': 'compliance', }, fake.AGGREGATES[1]: { 'netapp_raid_type': 'raid_dp', 'netapp_disk_type': ['SATA', 'SSD'], 'netapp_hybrid_aggregate': 'true', 'netapp_is_home': True, 'netapp_snaplock_type': 'enterprise', }, } self.assertDictEqual(result, expected) mock_get_aggregate.assert_has_calls([ mock.call(fake.AGGREGATES[0]), mock.call(fake.AGGREGATES[1]), ]) mock_get_aggregate_disk_types.assert_has_calls([ mock.call(fake.AGGREGATES[0]), mock.call(fake.AGGREGATES[1]), ]) @ddt.data( {'is_readable': True, 'rules_status': constants.STATUS_ACTIVE}, {'is_readable': True, 'rules_status': ( constants.SHARE_INSTANCE_RULES_ERROR)}, {'is_readable': False, 'rules_status': constants.STATUS_ACTIVE}) @ddt.unpack def test_create_replica(self, is_readable, rules_status): vserver_client = mock.Mock() self.mock_object(self.library, '_allocate_container') mock_dm_session = mock.Mock() self.mock_object(data_motion, "DataMotionSession", mock.Mock(return_value=mock_dm_session)) self.mock_object(data_motion, 'get_client_for_backend', mock.Mock(return_value=vserver_client)) self.mock_object(mock_dm_session, 'get_vserver_from_share', mock.Mock(return_value=fake.VSERVER1)) self.mock_object(self.library, '_get_backend_share_name', mock.Mock(return_value=fake.SHARE_NAME)) mock_is_readable = self.mock_object( self.library, '_is_readable_replica', mock.Mock(return_value=is_readable)) mock_create_export = self.mock_object( self.library, '_create_export', mock.Mock(return_value=[])) protocol_helper = mock.Mock() if rules_status == constants.STATUS_ACTIVE: protocol_helper.update_access.return_value = None else: protocol_helper.update_access.side_effect = ( netapp_api.NaApiError(code=0)) mock_get_helper = self.mock_object( self.library, '_get_helper', mock.Mock(return_value=protocol_helper)) self.mock_object(mock_dm_session, 'get_backend_info_for_share', mock.Mock(return_value=(fake.SHARE_NAME, fake.VSERVER1, fake.BACKEND_NAME))) self.mock_object(self.library, '_is_flexgroup_share', mock.Mock(return_value=False)) self.mock_object(self.library, '_is_flexgroup_pool', mock.Mock(return_value=False)) self.mock_object(na_utils, 'get_relationship_type', mock.Mock(return_value=na_utils.DATA_PROTECTION_TYPE)) expected_model_update = { 'export_locations': [], 'replica_state': constants.REPLICA_STATE_OUT_OF_SYNC, 'access_rules_status': rules_status, } model_update = self.library.create_replica( None, [fake.SHARE], fake.SHARE, [fake.SHARE_ACCESS], [], share_server=None) self.assertDictEqual(expected_model_update, model_update) mock_dm_session.create_snapmirror.assert_called_once_with( fake.SHARE, fake.SHARE, na_utils.DATA_PROTECTION_TYPE, mount=is_readable) mock_is_readable.assert_called_once_with(fake.SHARE) if is_readable: mock_create_export.assert_called_once_with( fake.SHARE, None, fake.VSERVER1, vserver_client, replica=True) mock_get_helper.assert_called_once_with(fake.SHARE) protocol_helper.update_access.assert_called_once_with( fake.SHARE, fake.SHARE_NAME, [fake.SHARE_ACCESS]) else: mock_create_export.assert_not_called() mock_get_helper.assert_not_called() protocol_helper.update_access.assert_not_called() data_motion.get_client_for_backend.assert_has_calls( [mock.call(fake.BACKEND_NAME, vserver_name=fake.VSERVER1), mock.call(fake.BACKEND_NAME, vserver_name=fake.VSERVER1)]) self.library._is_flexgroup_pool.assert_called_once_with(fake.POOL_NAME) na_utils.get_relationship_type.assert_called_once_with(False) def test_create_replica_with_share_server(self): self.mock_object(self.library, '_allocate_container', mock.Mock()) mock_dm_session = mock.Mock() self.mock_object(data_motion, "DataMotionSession", mock.Mock(return_value=mock_dm_session)) self.mock_object(mock_dm_session, 'get_backend_info_for_share', mock.Mock(return_value=(fake.SHARE_NAME, fake.VSERVER1, fake.BACKEND_NAME))) self.mock_object(self.library, '_is_flexgroup_share', mock.Mock(return_value=False)) self.mock_object(self.library, '_is_flexgroup_pool', mock.Mock(return_value=False)) self.mock_object(data_motion, 'get_client_for_backend') self.mock_object(mock_dm_session, 'get_vserver_from_share', mock.Mock(return_value=fake.VSERVER1)) self.mock_object(self.library, '_is_readable_replica', mock.Mock(return_value=False)) expected_model_update = { 'export_locations': [], 'replica_state': constants.REPLICA_STATE_OUT_OF_SYNC, 'access_rules_status': constants.STATUS_ACTIVE, } model_update = self.library.create_replica( None, [fake.SHARE], fake.SHARE, [], [], share_server=fake.SHARE_SERVER) self.assertDictEqual(expected_model_update, model_update) mock_dm_session.create_snapmirror.assert_called_once_with( fake.SHARE, fake.SHARE, na_utils.DATA_PROTECTION_TYPE, mount=False) data_motion.get_client_for_backend.assert_has_calls( [mock.call(fake.BACKEND_NAME, vserver_name=fake.VSERVER1), mock.call(fake.BACKEND_NAME, vserver_name=fake.VSERVER1)]) self.library._is_flexgroup_pool.assert_called_once_with(fake.POOL_NAME) def test_create_replica_raise_different_type(self): mock_dm_session = mock.Mock() self.mock_object(data_motion, "DataMotionSession", mock.Mock(return_value=mock_dm_session)) self.mock_object(mock_dm_session, 'get_backend_info_for_share', mock.Mock(return_value=(fake.SHARE_NAME, fake.VSERVER1, fake.BACKEND_NAME))) self.mock_object(self.library, '_is_flexgroup_share', mock.Mock(return_value=True)) self.mock_object(self.library, '_is_flexgroup_pool', mock.Mock(return_value=False)) self.mock_object(data_motion, 'get_client_for_backend') self.assertRaises(exception.NetAppException, self.library.create_replica, None, [fake.SHARE], fake.SHARE, [], [], share_server=None) def test_create_replica_raise_flexgroup_no_fan_out_limit(self): mock_dm_session = mock.Mock() self.mock_object(data_motion, "DataMotionSession", mock.Mock(return_value=mock_dm_session)) self.mock_object(mock_dm_session, 'get_backend_info_for_share', mock.Mock(return_value=(fake.SHARE_NAME, fake.VSERVER1, fake.BACKEND_NAME))) self.mock_object(self.library, '_is_flexgroup_share', mock.Mock(return_value=True)) self.mock_object(self.library, '_is_flexgroup_pool', mock.Mock(return_value=True)) mock_src_client = mock.Mock() self.mock_object(mock_src_client, 'is_flexgroup_fan_out_supported', mock.Mock(return_value=False)) self.mock_object(self.library._client, 'is_flexgroup_fan_out_supported', mock.Mock(return_value=False)) self.mock_object(data_motion, 'get_client_for_backend', mock.Mock(return_value=mock_src_client)) self.assertRaises(exception.NetAppException, self.library.create_replica, None, [fake.SHARE, fake.SHARE, fake.SHARE], fake.SHARE, [], [], share_server=None) def test_delete_replica(self): active_replica = fake_replica( replica_state=constants.REPLICA_STATE_ACTIVE) replica_1 = fake_replica( replica_state=constants.REPLICA_STATE_IN_SYNC, host=fake.MANILA_HOST_NAME) replica_2 = fake_replica( replica_state=constants.REPLICA_STATE_OUT_OF_SYNC) replica_list = [active_replica, replica_1, replica_2] self.mock_object(self.library, '_delete_share', mock.Mock()) mock_dm_session = mock.Mock() self.mock_object(data_motion, "DataMotionSession", mock.Mock(return_value=mock_dm_session)) self.mock_object(data_motion, 'get_client_for_backend') self.mock_object(mock_dm_session, 'get_vserver_from_share', mock.Mock(return_value=fake.VSERVER1)) self.mock_object(self.library, '_is_readable_replica', mock.Mock(return_value=False)) result = self.library.delete_replica(None, replica_list, replica_1, [], share_server=None) self.assertIsNone(result) mock_dm_session.delete_snapmirror.assert_has_calls([ mock.call(active_replica, replica_1), mock.call(replica_2, replica_1), mock.call(replica_1, replica_2), mock.call(replica_1, active_replica)], any_order=True) self.assertEqual(4, mock_dm_session.delete_snapmirror.call_count) data_motion.get_client_for_backend.assert_called_with( fake.BACKEND_NAME, vserver_name=mock.ANY) self.assertEqual(1, data_motion.get_client_for_backend.call_count) def test_delete_replica_with_share_server(self): active_replica = fake_replica( replica_state=constants.REPLICA_STATE_ACTIVE) replica = fake_replica(replica_state=constants.REPLICA_STATE_IN_SYNC, host=fake.MANILA_HOST_NAME) replica_list = [active_replica, replica] self.mock_object(self.library, '_is_readable_replica', mock.Mock(return_value=False)) self.mock_object(self.library, '_delete_share', mock.Mock()) mock_dm_session = mock.Mock() self.mock_object(data_motion, "DataMotionSession", mock.Mock(return_value=mock_dm_session)) self.mock_object(data_motion, 'get_client_for_backend') self.mock_object(mock_dm_session, 'get_vserver_from_share', mock.Mock(return_value=fake.VSERVER1)) result = self.library.delete_replica(None, replica_list, replica, [], share_server=fake.SHARE_SERVER) self.assertIsNone(result) mock_dm_session.delete_snapmirror.assert_has_calls([ mock.call(active_replica, replica), mock.call(replica, active_replica)], any_order=True) data_motion.get_client_for_backend.assert_called_once_with( fake.BACKEND_NAME, vserver_name=fake.VSERVER1) @ddt.data({'seconds': 3600, 'schedule': 'hourly'}, {'seconds': (5 * 3600), 'schedule': '5hourly'}, {'seconds': (30 * 60), 'schedule': '30minute'}, {'seconds': (2 * 24 * 3600), 'schedule': '2DAY'}, {'seconds': 3600, 'schedule': 'fake_shedule'}, {'seconds': 3600, 'schedule': 'fake2'}, {'seconds': 3600, 'schedule': '10fake'}) @ddt.unpack def test__convert_schedule_to_seconds(self, seconds, schedule): expected_return = seconds actual_return = self.library._convert_schedule_to_seconds(schedule) self.assertEqual(expected_return, actual_return) def test_update_replica_state_no_snapmirror_share_creating(self): vserver_client = mock.Mock() self.mock_object(vserver_client, 'volume_exists', mock.Mock(return_value=True)) self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_client))) self.mock_dm_session.get_snapmirrors = mock.Mock(return_value=[]) self.mock_object(self.library, '_is_readable_replica', mock.Mock(return_value=False)) replica = copy.deepcopy(fake.SHARE) replica['status'] = constants.STATUS_CREATING result = self.library.update_replica_state( None, [replica], replica, None, [], share_server=None) self.assertFalse(self.mock_dm_session.create_snapmirror.called) self.assertEqual(constants.STATUS_OUT_OF_SYNC, result) def test_update_replica_state_share_reverting_to_snapshot(self): vserver_client = mock.Mock() self.mock_object(vserver_client, 'volume_exists', mock.Mock(return_value=True)) self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_client))) self.mock_dm_session.get_snapmirrors = mock.Mock(return_value=[]) replica = copy.deepcopy(fake.SHARE) replica['status'] = constants.STATUS_REVERTING result = self.library.update_replica_state( None, [replica], replica, None, [], share_server=None) self.assertFalse(self.mock_dm_session.get_snapmirrors.called) self.assertFalse(self.mock_dm_session.create_snapmirror.called) self.assertIsNone(result) def test_update_replica_state_no_snapmirror_create_failed(self): vserver_client = mock.Mock() self.mock_object(vserver_client, 'volume_exists', mock.Mock(return_value=True)) self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_client))) self.mock_dm_session.get_snapmirrors = mock.Mock(return_value=[]) self.mock_object(self.library, '_is_readable_replica', mock.Mock(return_value=False)) self.mock_object(share_utils, 'extract_host', mock.Mock(return_value=fake.POOL_NAME)) self.mock_object(self.library, '_is_flexgroup_pool', mock.Mock(return_value=False)) self.mock_object(na_utils, 'get_relationship_type', mock.Mock(return_value=na_utils.DATA_PROTECTION_TYPE)) self.mock_dm_session.create_snapmirror.side_effect = ( netapp_api.NaApiError(code=0)) replica = copy.deepcopy(fake.SHARE) replica['status'] = constants.REPLICA_STATE_OUT_OF_SYNC result = self.library.update_replica_state( None, [replica], replica, None, [], share_server=None) self.assertTrue(self.mock_dm_session.create_snapmirror.called) self.assertEqual(constants.STATUS_ERROR, result) @ddt.data(constants.STATUS_ERROR, constants.STATUS_AVAILABLE) def test_update_replica_state_no_snapmirror(self, status): vserver_client = mock.Mock() self.mock_object(vserver_client, 'volume_exists', mock.Mock(return_value=True)) self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_client))) self.mock_dm_session.get_snapmirrors = mock.Mock(return_value=[]) self.mock_object(self.library, '_is_readable_replica', mock.Mock(return_value=False)) self.mock_object(share_utils, 'extract_host', mock.Mock(return_value=fake.POOL_NAME)) self.mock_object(self.library, '_is_flexgroup_pool', mock.Mock(return_value=False)) self.mock_object(na_utils, 'get_relationship_type', mock.Mock(return_value=na_utils.DATA_PROTECTION_TYPE)) replica = copy.deepcopy(fake.SHARE) replica['status'] = status result = self.library.update_replica_state( None, [replica], replica, None, [], share_server=None) self.assertEqual(1, self.mock_dm_session.create_snapmirror.call_count) self.assertEqual(constants.STATUS_OUT_OF_SYNC, result) def test_update_replica_state_broken_snapmirror(self): fake_snapmirror = { 'mirror-state': 'broken-off', 'relationship-status': 'idle', 'source-vserver': fake.VSERVER2, 'source-volume': 'fake_volume', 'last-transfer-end-timestamp': '%s' % float(time.time() - 10000) } vserver_client = mock.Mock() self.mock_object(vserver_client, 'volume_exists', mock.Mock(return_value=True)) self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_client))) self.mock_dm_session.get_snapmirrors = mock.Mock( return_value=[fake_snapmirror]) self.mock_object(self.library, '_is_readable_replica', mock.Mock(return_value=False)) result = self.library.update_replica_state(None, [fake.SHARE], fake.SHARE, None, [], share_server=None) vserver_client.resync_snapmirror_vol.assert_called_once_with( fake.VSERVER2, 'fake_volume', fake.VSERVER1, fake.SHARE['name'] ) self.assertEqual(constants.REPLICA_STATE_OUT_OF_SYNC, result) def test_update_replica_state_snapmirror_still_initializing(self): fake_snapmirror = { 'mirror-state': 'uninitialized', 'relationship-status': 'transferring', 'source-vserver': fake.VSERVER2, 'source-volume': 'fake_volume', 'last-transfer-end-timestamp': '%s' % float(time.time() - 10000) } vserver_client = mock.Mock() self.mock_object(vserver_client, 'volume_exists', mock.Mock(return_value=True)) self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_client))) self.mock_dm_session.get_snapmirrors = mock.Mock( return_value=[fake_snapmirror]) self.mock_object(self.library, '_is_readable_replica', mock.Mock(return_value=False)) result = self.library.update_replica_state(None, [fake.SHARE], fake.SHARE, None, [], share_server=None) self.assertEqual(constants.REPLICA_STATE_OUT_OF_SYNC, result) def test_update_replica_state_fail_to_get_snapmirrors(self): vserver_client = mock.Mock() self.mock_object(vserver_client, 'volume_exists', mock.Mock(return_value=True)) self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_client))) self.mock_dm_session.get_snapmirrors.side_effect = ( netapp_api.NaApiError(code=0)) result = self.library.update_replica_state(None, [fake.SHARE], fake.SHARE, None, [], share_server=None) self.assertTrue(self.mock_dm_session.get_snapmirrors.called) self.assertEqual(constants.STATUS_ERROR, result) def test_update_replica_state_broken_snapmirror_resync_error(self): fake_snapmirror = { 'mirror-state': 'broken-off', 'relationship-status': 'idle', 'source-vserver': fake.VSERVER2, 'source-volume': 'fake_volume', 'last-transfer-end-timestamp': '%s' % float(time.time() - 10000) } vserver_client = mock.Mock() self.mock_object(vserver_client, 'volume_exists', mock.Mock(return_value=True)) self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_client))) self.mock_dm_session.get_snapmirrors = mock.Mock( return_value=[fake_snapmirror]) self.mock_object(self.library, '_is_readable_replica', mock.Mock(return_value=False)) vserver_client.resync_snapmirror_vol.side_effect = ( netapp_api.NaApiError) result = self.library.update_replica_state(None, [fake.SHARE], fake.SHARE, None, [], share_server=None) vserver_client.resync_snapmirror_vol.assert_called_once_with( fake.VSERVER2, 'fake_volume', fake.VSERVER1, fake.SHARE['name'] ) self.assertEqual(constants.STATUS_ERROR, result) def test_update_replica_state_stale_snapmirror(self): fake_snapmirror = { 'mirror-state': 'snapmirrored', 'schedule': self.library.configuration.netapp_snapmirror_schedule, 'last-transfer-end-timestamp': '%s' % float( timeutils.utcnow_ts() - 10000) } vserver_client = mock.Mock() self.mock_object(vserver_client, 'volume_exists', mock.Mock(return_value=True)) self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_client))) self.mock_dm_session.get_snapmirrors = mock.Mock( return_value=[fake_snapmirror]) self.mock_object(self.library, '_is_readable_replica', mock.Mock(return_value=False)) mock_backend_config = fake.get_config_cmode() self.mock_object(data_motion, 'get_backend_configuration', mock.Mock(return_value=mock_backend_config)) result = self.library.update_replica_state(None, [fake.SHARE], fake.SHARE, None, [], share_server=None) self.assertEqual(constants.REPLICA_STATE_OUT_OF_SYNC, result) def test_update_replica_state_in_sync(self): fake_snapmirror = { 'mirror-state': 'snapmirrored', 'schedule': self.library.configuration.netapp_snapmirror_schedule, 'relationship-status': 'idle', 'last-transfer-end-timestamp': '%s' % float(time.time()) } vserver_client = mock.Mock() self.mock_object(vserver_client, 'volume_exists', mock.Mock(return_value=True)) self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_client))) self.mock_dm_session.get_snapmirrors = mock.Mock( return_value=[fake_snapmirror]) self.mock_object(self.library, '_is_readable_replica', mock.Mock(return_value=False)) mock_backend_config = fake.get_config_cmode() self.mock_object(data_motion, 'get_backend_configuration', mock.Mock(return_value=mock_backend_config)) result = self.library.update_replica_state(None, [fake.SHARE], fake.SHARE, None, [], share_server=None) (self.mock_dm_session.cleanup_previous_snapmirror_relationships .assert_not_called()) self.assertEqual(constants.REPLICA_STATE_IN_SYNC, result) def test_update_replica_state_replica_change_to_in_sycn(self): fake_snapmirror = { 'mirror-state': 'snapmirrored', 'relationship-status': 'idle', 'last-transfer-end-timestamp': '%s' % float(time.time()) } # fake SHARE has replica_state set to active already active_replica = fake.SHARE out_of_sync_replica = copy.deepcopy(fake.SHARE) out_of_sync_replica['replica_state'] = ( constants.REPLICA_STATE_OUT_OF_SYNC) replica_list = [out_of_sync_replica, active_replica] vserver_client = mock.Mock() self.mock_object(vserver_client, 'volume_exists', mock.Mock(return_value=True)) self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_client))) self.mock_dm_session.get_snapmirrors = mock.Mock( return_value=[fake_snapmirror]) mock_config = mock.Mock() mock_config.safe_get = mock.Mock(return_value=0) self.mock_object(data_motion, 'get_backend_configuration', mock.Mock(return_value=mock_config)) self.mock_object(self.library, '_is_readable_replica', mock.Mock(return_value=False)) result = self.library.update_replica_state( None, replica_list, out_of_sync_replica, None, [], share_server=None) # Expect a snapmirror cleanup as replica was in out of sync state (self.mock_dm_session.cleanup_previous_snapmirror_relationships .assert_called_once_with(out_of_sync_replica, replica_list)) self.assertEqual(constants.REPLICA_STATE_IN_SYNC, result) def test_update_replica_state_backend_volume_absent(self): vserver_client = mock.Mock() self.mock_object(vserver_client, 'volume_exists', mock.Mock(return_value=False)) self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_client))) self.assertRaises(exception.ShareResourceNotFound, self.library.update_replica_state, None, [fake.SHARE], fake.SHARE, None, [], share_server=None) def test_update_replica_state_in_sync_with_snapshots(self): fake_snapmirror = { 'mirror-state': 'snapmirrored', 'schedule': self.library.configuration.netapp_snapmirror_schedule, 'relationship-status': 'idle', 'last-transfer-end-timestamp': '%s' % float(time.time()) } fake_snapshot = copy.deepcopy(fake.SNAPSHOT) fake_snapshot['share_id'] = fake.SHARE['id'] fake_snapshot['provider_location'] = 'fake' snapshots = [{'share_replica_snapshot': fake_snapshot}] vserver_client = mock.Mock() self.mock_object(vserver_client, 'snapshot_exists', mock.Mock( return_value=True)) self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_client))) self.mock_dm_session.get_snapmirrors = mock.Mock( return_value=[fake_snapmirror]) self.mock_object(self.library, '_is_readable_replica', mock.Mock(return_value=False)) mock_backend_config = fake.get_config_cmode() self.mock_object(data_motion, 'get_backend_configuration', mock.Mock(return_value=mock_backend_config)) result = self.library.update_replica_state(None, [fake.SHARE], fake.SHARE, None, snapshots, share_server=None) (self.mock_dm_session.cleanup_previous_snapmirror_relationships .assert_not_called()) self.assertEqual(constants.REPLICA_STATE_IN_SYNC, result) def test_update_replica_state_missing_snapshot(self): fake_snapmirror = { 'mirror-state': 'snapmirrored', 'relationship-status': 'idle', 'last-transfer-end-timestamp': '%s' % float(time.time()) } fake_snapshot = copy.deepcopy(fake.SNAPSHOT) fake_snapshot['share_id'] = fake.SHARE['id'] snapshots = [{'share_replica_snapshot': fake_snapshot}] vserver_client = mock.Mock() self.mock_object(vserver_client, 'snapshot_exists', mock.Mock( return_value=False)) self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_client))) self.mock_dm_session.get_snapmirrors = mock.Mock( return_value=[fake_snapmirror]) self.mock_object(self.library, '_is_readable_replica', mock.Mock(return_value=False)) mock_backend_config = fake.get_config_cmode() self.mock_object(data_motion, 'get_backend_configuration', mock.Mock(return_value=mock_backend_config)) result = self.library.update_replica_state(None, [fake.SHARE], fake.SHARE, None, snapshots, share_server=None) self.assertEqual(constants.REPLICA_STATE_OUT_OF_SYNC, result) @ddt.data(True, False) def test_promote_replica(self, is_readable): self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, mock.Mock()))) protocol_helper = mock.Mock() self.mock_object(self.library, '_get_helper', mock.Mock(return_value=protocol_helper)) self.mock_object(self.library, '_create_export', mock.Mock(return_value='fake_export_location')) self.mock_object(self.library, '_unmount_orig_active_replica') self.mock_object(self.library, '_handle_qos_on_replication_change') self.mock_object(self.library, '_is_flexgroup_pool', mock.Mock(return_value=False)) self.mock_object(share_types, 'get_extra_specs_from_share') self.mock_object(self.library, '_get_provisioning_options', mock.Mock(return_value={})) mock_dm_session = mock.Mock() self.mock_object(data_motion, "DataMotionSession", mock.Mock(return_value=mock_dm_session)) self.mock_object(mock_dm_session, 'get_vserver_from_share', mock.Mock(return_value=fake.VSERVER1)) self.mock_object(mock_dm_session, 'get_backend_info_for_share', mock.Mock(return_value=(fake.SHARE_NAME, fake.VSERVER1, fake.BACKEND_NAME))) mock_client = mock.Mock() self.mock_object(data_motion, "get_client_for_backend", mock.Mock(return_value=mock_client)) mock_backend_config = fake.get_config_cmode() self.mock_object(data_motion, 'get_backend_configuration', mock.Mock(return_value=mock_backend_config)) self.mock_object(self.library, '_get_api_client_for_backend', mock.Mock(return_value=mock_client)) self.mock_object(self.client, 'cleanup_demoted_replica') self.mock_object(self.library, '_is_readable_replica', mock.Mock(return_value=is_readable)) self.mock_object(self.library, '_update_autosize_attributes_after_promote_replica') replicas = self.library.promote_replica( None, [self.fake_replica, self.fake_replica_2], self.fake_replica_2, [], share_server=None) mock_dm_session.change_snapmirror_source.assert_called_once_with( self.fake_replica, self.fake_replica, self.fake_replica_2, mock.ANY, is_flexgroup=False ) self.assertEqual(2, len(replicas)) actual_replica_1 = list(filter( lambda x: x['id'] == self.fake_replica['id'], replicas))[0] self.assertEqual(constants.REPLICA_STATE_OUT_OF_SYNC, actual_replica_1['replica_state']) actual_replica_2 = list(filter( lambda x: x['id'] == self.fake_replica_2['id'], replicas))[0] self.assertEqual(constants.REPLICA_STATE_ACTIVE, actual_replica_2['replica_state']) self.assertEqual('fake_export_location', actual_replica_2['export_locations']) self.assertEqual(constants.STATUS_ACTIVE, actual_replica_2['access_rules_status']) if is_readable: self.library._unmount_orig_active_replica.assert_not_called() protocol_helper.cleanup_demoted_replica.assert_not_called() self.assertEqual('fake_export_location', actual_replica_1['export_locations']) else: self.library._unmount_orig_active_replica.assert_called_once_with( self.fake_replica, fake.VSERVER1) protocol_helper.cleanup_demoted_replica.assert_called_once_with( self.fake_replica, fake.SHARE['name']) self.assertEqual([], actual_replica_1['export_locations']) self.library._handle_qos_on_replication_change.assert_called_once() def test_promote_replica_cleanup_demoted_storage_error(self): self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, mock.Mock()))) protocol_helper = mock.Mock() self.mock_object(self.library, '_get_helper', mock.Mock(return_value=protocol_helper)) self.mock_object(self.library, '_is_readable_replica', mock.Mock(return_value=False)) self.mock_object(self.library, '_create_export', mock.Mock(return_value='fake_export_location')) self.mock_object(self.library, '_unmount_orig_active_replica') self.mock_object(self.library, '_handle_qos_on_replication_change') self.mock_object(self.library, '_is_flexgroup_pool', mock.Mock(return_value=False)) mock_dm_session = mock.Mock() self.mock_object(data_motion, "DataMotionSession", mock.Mock(return_value=mock_dm_session)) self.mock_object(mock_dm_session, 'get_vserver_from_share', mock.Mock(return_value=fake.VSERVER1)) self.mock_object( protocol_helper, 'cleanup_demoted_replica', mock.Mock(side_effect=exception.StorageCommunicationException)) self.mock_object(self.library, '_update_autosize_attributes_after_promote_replica') self.mock_object(share_types, 'get_extra_specs_from_share') self.mock_object(self.library, '_get_provisioning_options', mock.Mock(return_value={})) mock_log = self.mock_object(lib_base.LOG, 'exception') self.library.promote_replica( None, [self.fake_replica, self.fake_replica_2], self.fake_replica_2, [], share_server=None) mock_dm_session.change_snapmirror_source.assert_called_once_with( self.fake_replica, self.fake_replica, self.fake_replica_2, mock.ANY, is_flexgroup=False ) protocol_helper.cleanup_demoted_replica.assert_called_once_with( self.fake_replica, fake.SHARE['name']) mock_log.assert_called_once() def test_promote_replica_destination_unreachable(self): self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, mock.Mock()))) self.mock_object(self.library, '_get_helper', mock.Mock(return_value=mock.Mock())) self.mock_object(self.library, '_unmount_orig_active_replica') self.mock_object(self.library, '_handle_qos_on_replication_change') self.mock_object(self.library, '_create_export', mock.Mock(return_value='fake_export_location')) self.mock_object( self.library, '_convert_destination_replica_to_independent', mock.Mock(side_effect=exception.StorageCommunicationException)) self.mock_object(self.library, '_update_autosize_attributes_after_promote_replica') self.mock_object(share_types, 'get_extra_specs_from_share') self.mock_object(self.library, '_get_provisioning_options', mock.Mock(return_value={})) replicas = self.library.promote_replica( None, [self.fake_replica, self.fake_replica_2], self.fake_replica_2, [], share_server=None) self.assertEqual(1, len(replicas)) actual_replica = replicas[0] self.assertEqual(constants.STATUS_ERROR, actual_replica['replica_state']) self.assertEqual(constants.STATUS_ERROR, actual_replica['status']) self.assertFalse( self.library._unmount_orig_active_replica.called) self.assertFalse( self.library._handle_qos_on_replication_change.called) def test_promote_replica_more_than_two_replicas(self): fake_replica_3 = copy.deepcopy(self.fake_replica_2) fake_replica_3['id'] = fake.SHARE_ID3 fake_replica_3['replica_state'] = constants.REPLICA_STATE_OUT_OF_SYNC self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, mock.Mock()))) self.mock_object(self.library, '_unmount_orig_active_replica') self.mock_object(self.library, '_handle_qos_on_replication_change') self.mock_object(self.library, '_get_helper', mock.Mock(return_value=mock.Mock())) self.mock_object(self.library, '_is_flexgroup_pool', mock.Mock(return_value=False)) self.mock_object(self.library, '_create_export', mock.Mock(return_value='fake_export_location')) mock_dm_session = mock.Mock() self.mock_object(data_motion, "DataMotionSession", mock.Mock(return_value=mock_dm_session)) self.mock_object(mock_dm_session, 'get_vserver_from_share', mock.Mock(return_value=fake.VSERVER1)) self.mock_object(self.library, '_is_readable_replica', mock.Mock(return_value=False)) self.mock_object(self.library, '_update_autosize_attributes_after_promote_replica') self.mock_object(share_types, 'get_extra_specs_from_share') self.mock_object(self.library, '_get_provisioning_options', mock.Mock(return_value={})) replicas = self.library.promote_replica( None, [self.fake_replica, self.fake_replica_2, fake_replica_3], self.fake_replica_2, [], share_server=None) mock_dm_session.change_snapmirror_source.assert_has_calls([ mock.call(fake_replica_3, self.fake_replica, self.fake_replica_2, mock.ANY, is_flexgroup=False), mock.call(self.fake_replica, self.fake_replica, self.fake_replica_2, mock.ANY, is_flexgroup=False) ], any_order=True) self.assertEqual(3, len(replicas)) actual_replica_1 = list(filter( lambda x: x['id'] == self.fake_replica['id'], replicas))[0] self.assertEqual(constants.REPLICA_STATE_OUT_OF_SYNC, actual_replica_1['replica_state']) actual_replica_2 = list(filter( lambda x: x['id'] == self.fake_replica_2['id'], replicas))[0] self.assertEqual(constants.REPLICA_STATE_ACTIVE, actual_replica_2['replica_state']) self.assertEqual('fake_export_location', actual_replica_2['export_locations']) actual_replica_3 = list(filter( lambda x: x['id'] == fake_replica_3['id'], replicas))[0] self.assertEqual(constants.REPLICA_STATE_OUT_OF_SYNC, actual_replica_3['replica_state']) self.library._unmount_orig_active_replica.assert_called_once_with( self.fake_replica, fake.VSERVER1) self.library._handle_qos_on_replication_change.assert_called_once() def test_promote_replica_with_access_rules(self): self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, mock.Mock()))) self.mock_object(self.library, '_unmount_orig_active_replica') self.mock_object(self.library, '_handle_qos_on_replication_change') mock_helper = mock.Mock() self.mock_object(self.library, '_get_helper', mock.Mock(return_value=mock_helper)) self.mock_object(self.library, '_create_export', mock.Mock(return_value='fake_export_location')) self.mock_object(self.library, '_is_flexgroup_pool', mock.Mock(return_value=False)) mock_dm_session = mock.Mock() self.mock_object(data_motion, "DataMotionSession", mock.Mock(return_value=mock_dm_session)) self.mock_object(mock_dm_session, 'get_vserver_from_share', mock.Mock(return_value=fake.VSERVER1)) self.mock_object(self.library, '_is_readable_replica', mock.Mock(return_value=False)) self.mock_object(self.library, '_update_autosize_attributes_after_promote_replica') self.mock_object(share_types, 'get_extra_specs_from_share') self.mock_object(self.library, '_get_provisioning_options', mock.Mock(return_value={})) replicas = self.library.promote_replica( None, [self.fake_replica, self.fake_replica_2], self.fake_replica_2, [fake.SHARE_ACCESS], share_server=None) mock_dm_session.change_snapmirror_source.assert_has_calls([ mock.call(self.fake_replica, self.fake_replica, self.fake_replica_2, mock.ANY, is_flexgroup=False) ], any_order=True) self.assertEqual(2, len(replicas)) share_name = self.library._get_backend_share_name( self.fake_replica_2['id']) mock_helper.update_access.assert_called_once_with(self.fake_replica_2, share_name, [fake.SHARE_ACCESS]) self.library._unmount_orig_active_replica.assert_called_once_with( self.fake_replica, fake.VSERVER1) self.library._handle_qos_on_replication_change.assert_called_once() def test_unmount_orig_active_replica(self): self.mock_object(share_utils, 'extract_host', mock.Mock( return_value=fake.MANILA_HOST_NAME)) self.mock_object(data_motion, 'get_client_for_backend') self.mock_object(self.library, '_get_backend_share_name', mock.Mock( return_value=fake.SHARE_NAME)) result = self.library._unmount_orig_active_replica(fake.SHARE) self.assertIsNone(result) @ddt.data({'extra_specs': {'netapp:snapshot_policy': 'none'}, 'have_cluster_creds': True}, {'extra_specs': {'netapp:snapshot_policy': 'none'}, 'have_cluster_creds': True}, # Test Case 2 isn't possible input {'extra_specs': {'qos': True, 'netapp:maxiops': '3000'}, 'have_cluster_creds': False}) @ddt.unpack def test_handle_qos_on_replication_change_nothing_to_handle( self, extra_specs, have_cluster_creds): self.library._have_cluster_creds = have_cluster_creds self.mock_object(lib_base.LOG, 'exception') self.mock_object(lib_base.LOG, 'info') self.mock_object(share_types, 'get_extra_specs_from_share', mock.Mock(return_value=extra_specs)) retval = self.library._handle_qos_on_replication_change( self.mock_dm_session, self.fake_replica_2, self.fake_replica, True, share_server=fake.SHARE_SERVER) self.assertIsNone(retval) lib_base.LOG.exception.assert_not_called() lib_base.LOG.info.assert_not_called() if have_cluster_creds: share_types.get_extra_specs_from_share.assert_called_once_with( self.fake_replica) else: share_types.get_extra_specs_from_share.assert_not_called() def test_handle_qos_on_replication_change_exception(self): self.library._have_cluster_creds = True extra_specs = {'qos': True, fake.QOS_EXTRA_SPEC: '3000'} vserver_client = mock.Mock() self.mock_object(lib_base.LOG, 'exception') self.mock_object(lib_base.LOG, 'info') self.mock_object(share_types, 'get_extra_specs_from_share', mock.Mock(return_value=extra_specs)) self.mock_object(self.library, '_get_vserver', mock.Mock( return_value=(fake.VSERVER1, vserver_client))) self.mock_object(self.library._client, 'qos_policy_group_exists', mock.Mock(return_value=True)) self.mock_object(self.library._client, 'qos_policy_group_modify', mock.Mock(side_effect=netapp_api.NaApiError)) retval = self.library._handle_qos_on_replication_change( self.mock_dm_session, self.fake_replica_2, self.fake_replica, True, share_server=fake.SHARE_SERVER) self.assertIsNone(retval) (self.mock_dm_session.remove_qos_on_old_active_replica .assert_called_once_with(self.fake_replica)) lib_base.LOG.exception.assert_called_once() lib_base.LOG.info.assert_not_called() vserver_client.set_qos_policy_group_for_volume.assert_not_called() @ddt.data(True, False) def test_handle_qos_on_replication_change_modify_existing_policy(self, is_dr): self.library._have_cluster_creds = True extra_specs = {'qos': True, fake.QOS_EXTRA_SPEC: '3000'} vserver_client = mock.Mock() volume_name_on_backend = self.library._get_backend_share_name( self.fake_replica_2['id']) self.mock_object(lib_base.LOG, 'exception') self.mock_object(lib_base.LOG, 'info') self.mock_object(share_types, 'get_extra_specs_from_share', mock.Mock(return_value=extra_specs)) self.mock_object(self.library, '_get_vserver', mock.Mock( return_value=(fake.VSERVER1, vserver_client))) self.mock_object(self.library._client, 'qos_policy_group_exists', mock.Mock(return_value=True)) self.mock_object(self.library._client, 'qos_policy_group_modify') self.mock_object(self.library, '_create_qos_policy_group') retval = self.library._handle_qos_on_replication_change( self.mock_dm_session, self.fake_replica_2, self.fake_replica, is_dr, share_server=fake.SHARE_SERVER) self.assertIsNone(retval) if is_dr: (self.mock_dm_session.remove_qos_on_old_active_replica. assert_called_once_with(self.fake_replica)) else: (self.mock_dm_session.remove_qos_on_old_active_replica. assert_not_called()) self.library._client.qos_policy_group_modify.assert_called_once_with( 'qos_' + volume_name_on_backend, '3000iops') vserver_client.set_qos_policy_group_for_volume.assert_called_once_with( volume_name_on_backend, 'qos_' + volume_name_on_backend) self.library._create_qos_policy_group.assert_not_called() lib_base.LOG.exception.assert_not_called() lib_base.LOG.info.assert_called_once() def test_handle_qos_on_replication_change_create_new_policy(self): self.library._have_cluster_creds = True extra_specs = {'qos': True, fake.QOS_EXTRA_SPEC: '3000'} vserver_client = mock.Mock() self.mock_object(lib_base.LOG, 'exception') self.mock_object(lib_base.LOG, 'info') self.mock_object(share_types, 'get_extra_specs_from_share', mock.Mock(return_value=extra_specs)) self.mock_object(self.library, '_get_vserver', mock.Mock( return_value=(fake.VSERVER1, vserver_client))) self.mock_object(self.library._client, 'qos_policy_group_exists', mock.Mock(return_value=False)) self.mock_object(self.library._client, 'qos_policy_group_modify') self.mock_object(self.library, '_create_qos_policy_group') retval = self.library._handle_qos_on_replication_change( self.mock_dm_session, self.fake_replica_2, self.fake_replica, True, share_server=fake.SHARE_SERVER) self.assertIsNone(retval) self.library._create_qos_policy_group.assert_called_once_with( self.fake_replica_2, fake.VSERVER1, {'maxiops': '3000'}) self.library._client.qos_policy_group_modify.assert_not_called() lib_base.LOG.exception.assert_not_called() lib_base.LOG.info.assert_called_once() def test_convert_destination_replica_to_independent(self): self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, mock.Mock()))) self.mock_object(self.library, '_get_helper', mock.Mock(return_value=mock.Mock())) self.mock_object(self.library, '_create_export', mock.Mock(return_value='fake_export_location')) replica = self.library._convert_destination_replica_to_independent( None, self.mock_dm_session, self.fake_replica, self.fake_replica_2, [], share_server=None) self.mock_dm_session.update_snapmirror.assert_called_once_with( self.fake_replica, self.fake_replica_2) self.mock_dm_session.break_snapmirror.assert_called_once_with( self.fake_replica, self.fake_replica_2, quiesce_wait_time=None) self.assertEqual('fake_export_location', replica['export_locations']) self.assertEqual(constants.REPLICA_STATE_ACTIVE, replica['replica_state']) def test_convert_destination_replica_to_independent_update_failed(self): self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, mock.Mock()))) self.mock_object(self.library, '_get_helper', mock.Mock(return_value=mock.Mock())) self.mock_object(self.library, '_create_export', mock.Mock(return_value='fake_export_location')) self.mock_object( self.mock_dm_session, 'update_snapmirror', mock.Mock(side_effect=exception.StorageCommunicationException)) replica = self.library._convert_destination_replica_to_independent( None, self.mock_dm_session, self.fake_replica, self.fake_replica_2, [], share_server=None) self.mock_dm_session.update_snapmirror.assert_called_once_with( self.fake_replica, self.fake_replica_2) self.mock_dm_session.break_snapmirror.assert_called_once_with( self.fake_replica, self.fake_replica_2, quiesce_wait_time=None) self.assertEqual('fake_export_location', replica['export_locations']) self.assertEqual(constants.REPLICA_STATE_ACTIVE, replica['replica_state']) def test_promote_replica_fail_to_set_access_rules(self): fake_helper = mock.Mock() fake_helper.update_access.side_effect = Exception fake_access_rules = [ {'access_to': "0.0.0.0", 'access_level': constants.ACCESS_LEVEL_RO}, {'access_to': "10.10.10.10", 'access_level': constants.ACCESS_LEVEL_RW}, ] self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, mock.Mock()))) self.mock_object(self.library, '_handle_qos_on_replication_change') self.mock_object(self.library, '_get_helper', mock.Mock(return_value=fake_helper)) self.mock_object(self.library, '_create_export', mock.Mock(return_value='fake_export_location')) self.mock_object(self.library, '_is_flexgroup_pool', mock.Mock(return_value=False)) self.mock_object(self.library, '_is_readable_replica', mock.Mock(return_value=False)) self.mock_object(self.library, '_update_autosize_attributes_after_promote_replica') self.mock_object(share_types, 'get_extra_specs_from_share') self.mock_object(self.library, '_get_provisioning_options', mock.Mock(return_value={})) replicas = self.library.promote_replica( None, [self.fake_replica, self.fake_replica_2], self.fake_replica_2, fake_access_rules, share_server=None) self.mock_dm_session.change_snapmirror_source.assert_called_once_with( self.fake_replica, self.fake_replica, self.fake_replica_2, mock.ANY, is_flexgroup=False ) self.assertEqual(2, len(replicas)) actual_replica_1 = list(filter( lambda x: x['id'] == self.fake_replica['id'], replicas))[0] self.assertEqual(constants.REPLICA_STATE_OUT_OF_SYNC, actual_replica_1['replica_state']) actual_replica_2 = list(filter( lambda x: x['id'] == self.fake_replica_2['id'], replicas))[0] self.assertEqual(constants.REPLICA_STATE_ACTIVE, actual_replica_2['replica_state']) self.assertEqual('fake_export_location', actual_replica_2['export_locations']) self.assertEqual(constants.SHARE_INSTANCE_RULES_SYNCING, actual_replica_2['access_rules_status']) self.library._handle_qos_on_replication_change.assert_called_once() def test_convert_destination_replica_to_independent_with_access_rules( self): fake_helper = mock.Mock() fake_helper.update_access.side_effect = Exception fake_access_rules = [ {'access_to': "0.0.0.0", 'access_level': constants.ACCESS_LEVEL_RO}, {'access_to': "10.10.10.10", 'access_level': constants.ACCESS_LEVEL_RW}, ] self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, mock.Mock()))) self.mock_object(self.library, '_get_helper', mock.Mock(return_value=fake_helper)) self.mock_object(self.library, '_create_export', mock.Mock(return_value='fake_export_location')) replica = self.library._convert_destination_replica_to_independent( None, self.mock_dm_session, self.fake_replica, self.fake_replica_2, fake_access_rules, share_server=None) self.mock_dm_session.update_snapmirror.assert_called_once_with( self.fake_replica, self.fake_replica_2) self.mock_dm_session.break_snapmirror.assert_called_once_with( self.fake_replica, self.fake_replica_2, quiesce_wait_time=None) self.assertEqual('fake_export_location', replica['export_locations']) self.assertEqual(constants.REPLICA_STATE_ACTIVE, replica['replica_state']) self.assertEqual(constants.SHARE_INSTANCE_RULES_SYNCING, replica['access_rules_status']) def test_convert_destination_replica_to_independent_failed_access_rules( self): fake_helper = mock.Mock() fake_access_rules = [ {'access_to': "0.0.0.0", 'access_level': constants.ACCESS_LEVEL_RO}, {'access_to': "10.10.10.10", 'access_level': constants.ACCESS_LEVEL_RW}, ] self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, mock.Mock()))) self.mock_object(self.library, '_get_helper', mock.Mock(return_value=fake_helper)) self.mock_object(self.library, '_create_export', mock.Mock(return_value='fake_export_location')) replica = self.library._convert_destination_replica_to_independent( None, self.mock_dm_session, self.fake_replica, self.fake_replica_2, fake_access_rules, share_server=None) self.mock_dm_session.update_snapmirror.assert_called_once_with( self.fake_replica, self.fake_replica_2) self.mock_dm_session.break_snapmirror.assert_called_once_with( self.fake_replica, self.fake_replica_2, quiesce_wait_time=None) fake_helper.assert_has_calls([ mock.call.set_client(mock.ANY), mock.call.update_access(mock.ANY, mock.ANY, fake_access_rules), ]) self.assertEqual('fake_export_location', replica['export_locations']) self.assertEqual(constants.REPLICA_STATE_ACTIVE, replica['replica_state']) self.assertEqual(constants.STATUS_ACTIVE, replica['access_rules_status']) @ddt.data(True, False) def test_safe_change_replica_source(self, is_dr): fake_replica_3 = copy.deepcopy(self.fake_replica_2) fake_replica_3['id'] = fake.SHARE_ID3 fake_replica_3['replica_state'] = constants.REPLICA_STATE_OUT_OF_SYNC protocol_helper = mock.Mock() self.mock_object(self.library, '_get_helper', mock.Mock(return_value=protocol_helper)) self.mock_object(self.library, '_create_export', mock.Mock(return_value='fake_export_location')) self.mock_object(self.library, '_unmount_orig_active_replica') self.mock_object(self.library, '_handle_qos_on_replication_change') mock_dm_session = mock.Mock() mock_dm_session.wait_for_mount_replica.return_value = None self.mock_object(mock_dm_session, 'get_backend_info_for_share', mock.Mock(return_value=(fake.SHARE_NAME, fake.VSERVER1, fake.BACKEND_NAME))) mock_client = mock.Mock() self.mock_object(data_motion, "get_client_for_backend", mock.Mock(return_value=mock_client)) mock_backend_config = fake.get_config_cmode() mock_backend_config.netapp_mount_replica_timeout = 30 self.mock_object(data_motion, 'get_backend_configuration', mock.Mock(return_value=mock_backend_config)) self.mock_object(self.library, '_get_api_client_for_backend', mock.Mock(return_value=mock_client)) replica = self.library._safe_change_replica_source( mock_dm_session, self.fake_replica, self.fake_replica_2, fake_replica_3, [self.fake_replica, self.fake_replica_2, fake_replica_3], is_dr, [fake.SHARE_ACCESS] ) self.assertEqual(constants.REPLICA_STATE_OUT_OF_SYNC, replica['replica_state']) if is_dr: self.assertEqual([], replica['export_locations']) mock_dm_session.wait_for_mount_replica.assert_not_called() else: self.assertEqual('fake_export_location', replica['export_locations']) mock_dm_session.wait_for_mount_replica.assert_called_once_with( mock_client, fake.SHARE_NAME, timeout=30) @ddt.data({'fail_create_export': False, 'fail_mount': True}, {'fail_create_export': True, 'fail_mount': False}) @ddt.unpack def test_safe_change_replica_source_fail_recover_readable( self, fail_create_export, fail_mount): fake_replica_3 = copy.deepcopy(self.fake_replica_2) fake_replica_3['id'] = fake.SHARE_ID3 fake_replica_3['replica_state'] = constants.REPLICA_STATE_OUT_OF_SYNC protocol_helper = mock.Mock() self.mock_object(self.library, '_get_helper', mock.Mock(return_value=protocol_helper)) if fail_create_export: self.mock_object(self.library, '_create_export', mock.Mock(side_effect=netapp_api.NaApiError())) else: self.mock_object(self.library, '_create_export', mock.Mock(return_value='fake_export_location')) self.mock_object(self.library, '_unmount_orig_active_replica') self.mock_object(self.library, '_handle_qos_on_replication_change') mock_dm_session = mock.Mock() if fail_mount: mock_dm_session.wait_for_mount_replica.side_effect = ( netapp_api.NaApiError()) else: mock_dm_session.wait_for_mount_replica.return_value = None self.mock_object(mock_dm_session, 'get_backend_info_for_share', mock.Mock(return_value=(fake.SHARE_NAME, fake.VSERVER1, fake.BACKEND_NAME))) mock_client = mock.Mock() self.mock_object(data_motion, "get_client_for_backend", mock.Mock(return_value=mock_client)) mock_backend_config = fake.get_config_cmode() mock_backend_config.netapp_mount_replica_timeout = 30 self.mock_object(data_motion, 'get_backend_configuration', mock.Mock(return_value=mock_backend_config)) replica = self.library._safe_change_replica_source( mock_dm_session, self.fake_replica, self.fake_replica_2, fake_replica_3, [self.fake_replica, self.fake_replica_2, fake_replica_3], False, [fake.SHARE_ACCESS] ) self.assertEqual(constants.STATUS_ERROR, replica['replica_state']) self.assertEqual(constants.STATUS_ERROR, replica['status']) def test_safe_change_replica_source_destination_unreachable(self): self.mock_dm_session.change_snapmirror_source.side_effect = ( exception.StorageCommunicationException ) fake_replica_3 = copy.deepcopy(self.fake_replica_2) fake_replica_3['id'] = fake.SHARE_ID3 fake_replica_3['replica_state'] = constants.REPLICA_STATE_OUT_OF_SYNC replica = self.library._safe_change_replica_source( self.mock_dm_session, self.fake_replica, self.fake_replica_2, fake_replica_3, [self.fake_replica, self.fake_replica_2, fake_replica_3], True, [], ) self.assertEqual([], replica['export_locations']) self.assertEqual(constants.STATUS_ERROR, replica['replica_state']) self.assertEqual(constants.STATUS_ERROR, replica['status']) def test_safe_change_replica_source_error(self): self.mock_dm_session.change_snapmirror_source.side_effect = ( netapp_api.NaApiError(code=0) ) fake_replica_3 = copy.deepcopy(self.fake_replica_2) fake_replica_3['id'] = fake.SHARE_ID3 fake_replica_3['replica_state'] = constants.REPLICA_STATE_OUT_OF_SYNC replica = self.library._safe_change_replica_source( self.mock_dm_session, self.fake_replica, self.fake_replica_2, fake_replica_3, [self.fake_replica, self.fake_replica_2, fake_replica_3], True, [] ) self.assertEqual([], replica['export_locations']) self.assertEqual(constants.STATUS_ERROR, replica['replica_state']) def test_create_replicated_snapshot(self): fake_replica_3 = copy.deepcopy(self.fake_replica_2) fake_replica_3['id'] = fake.SHARE_ID3 replica_list = [self.fake_replica, self.fake_replica_2, fake_replica_3] fake_snapshot = copy.deepcopy(fake.SNAPSHOT) fake_snapshot['share_id'] = self.fake_replica['id'] fake_snapshot_2 = copy.deepcopy(fake.SNAPSHOT) fake_snapshot_2['id'] = uuidutils.generate_uuid() fake_snapshot_2['share_id'] = self.fake_replica_2['id'] fake_snapshot_3 = copy.deepcopy(fake.SNAPSHOT) fake_snapshot_3['id'] = uuidutils.generate_uuid() fake_snapshot_3['share_id'] = fake_replica_3['id'] snapshot_list = [fake_snapshot, fake_snapshot_2, fake_snapshot_3] vserver_client = mock.Mock() self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_client))) model_list = self.library.create_replicated_snapshot( self.context, replica_list, snapshot_list, share_server=fake.SHARE_SERVER) share_name = self.library._get_backend_share_name( fake_snapshot['share_id']) snapshot_name = self.library._get_backend_snapshot_name( fake_snapshot['id']) vserver_client.create_snapshot.assert_called_once_with(share_name, snapshot_name) self.assertEqual(3, len(model_list)) for snapshot in model_list: self.assertEqual(snapshot['provider_location'], snapshot_name) actual_active_snapshot = list(filter( lambda x: x['id'] == fake_snapshot['id'], model_list))[0] self.assertEqual(constants.STATUS_AVAILABLE, actual_active_snapshot['status']) actual_non_active_snapshot_list = list(filter( lambda x: x['id'] != fake_snapshot['id'], model_list)) for snapshot in actual_non_active_snapshot_list: self.assertEqual(constants.STATUS_CREATING, snapshot['status']) self.mock_dm_session.update_snapmirror.assert_has_calls( [mock.call(self.fake_replica, self.fake_replica_2), mock.call(self.fake_replica, fake_replica_3)], any_order=True ) def test_create_replicated_snapshot_with_creating_replica(self): fake_replica_3 = copy.deepcopy(self.fake_replica_2) fake_replica_3['id'] = fake.SHARE_ID3 fake_replica_3['host'] = None replica_list = [self.fake_replica, self.fake_replica_2, fake_replica_3] fake_snapshot = copy.deepcopy(fake.SNAPSHOT) fake_snapshot['share_id'] = self.fake_replica['id'] fake_snapshot_2 = copy.deepcopy(fake.SNAPSHOT) fake_snapshot_2['id'] = uuidutils.generate_uuid() fake_snapshot_2['share_id'] = self.fake_replica_2['id'] fake_snapshot_3 = copy.deepcopy(fake.SNAPSHOT) fake_snapshot_3['id'] = uuidutils.generate_uuid() fake_snapshot_3['share_id'] = fake_replica_3['id'] snapshot_list = [fake_snapshot, fake_snapshot_2, fake_snapshot_3] vserver_client = mock.Mock() self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_client))) model_list = self.library.create_replicated_snapshot( self.context, replica_list, snapshot_list, share_server=fake.SHARE_SERVER) share_name = self.library._get_backend_share_name( fake_snapshot['share_id']) snapshot_name = self.library._get_backend_snapshot_name( fake_snapshot['id']) vserver_client.create_snapshot.assert_called_once_with(share_name, snapshot_name) self.assertEqual(3, len(model_list)) for snapshot in model_list: self.assertEqual(snapshot['provider_location'], snapshot_name) actual_active_snapshot = list(filter( lambda x: x['id'] == fake_snapshot['id'], model_list))[0] self.assertEqual(constants.STATUS_AVAILABLE, actual_active_snapshot['status']) actual_non_active_snapshot_list = list(filter( lambda x: x['id'] != fake_snapshot['id'], model_list)) for snapshot in actual_non_active_snapshot_list: self.assertEqual(constants.STATUS_CREATING, snapshot['status']) self.mock_dm_session.update_snapmirror.assert_has_calls( [mock.call(self.fake_replica, self.fake_replica_2)], any_order=True ) @ddt.data( netapp_api.NaApiError(code=netapp_api.EOBJECTNOTFOUND), netapp_api.NaApiError(message='not initialized')) def test_create_replicated_snapshot_no_snapmirror(self, api_exception): self.mock_dm_session.update_snapmirror.side_effect = [ None, api_exception ] fake_replica_3 = copy.deepcopy(self.fake_replica_2) fake_replica_3['id'] = fake.SHARE_ID3 replica_list = [self.fake_replica, self.fake_replica_2, fake_replica_3] fake_snapshot = copy.deepcopy(fake.SNAPSHOT) fake_snapshot['share_id'] = self.fake_replica['id'] fake_snapshot_2 = copy.deepcopy(fake.SNAPSHOT) fake_snapshot_2['id'] = uuidutils.generate_uuid() fake_snapshot_2['share_id'] = self.fake_replica_2['id'] fake_snapshot_3 = copy.deepcopy(fake.SNAPSHOT) fake_snapshot_3['id'] = uuidutils.generate_uuid() fake_snapshot_3['share_id'] = fake_replica_3['id'] snapshot_list = [fake_snapshot, fake_snapshot_2, fake_snapshot_3] vserver_client = mock.Mock() self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_client))) model_list = self.library.create_replicated_snapshot( self.context, replica_list, snapshot_list, share_server=fake.SHARE_SERVER) share_name = self.library._get_backend_share_name( fake_snapshot['share_id']) snapshot_name = self.library._get_backend_snapshot_name( fake_snapshot['id']) vserver_client.create_snapshot.assert_called_once_with(share_name, snapshot_name) self.assertEqual(3, len(model_list)) for snapshot in model_list: self.assertEqual(snapshot['provider_location'], snapshot_name) actual_active_snapshot = list(filter( lambda x: x['id'] == fake_snapshot['id'], model_list))[0] self.assertEqual(constants.STATUS_AVAILABLE, actual_active_snapshot['status']) actual_non_active_snapshot_list = list(filter( lambda x: x['id'] != fake_snapshot['id'], model_list)) for snapshot in actual_non_active_snapshot_list: self.assertEqual(constants.STATUS_CREATING, snapshot['status']) self.mock_dm_session.update_snapmirror.assert_has_calls( [mock.call(self.fake_replica, self.fake_replica_2), mock.call(self.fake_replica, fake_replica_3)], any_order=True ) def test_create_replicated_snapshot_update_error(self): self.mock_dm_session.update_snapmirror.side_effect = [ None, netapp_api.NaApiError() ] fake_replica_3 = copy.deepcopy(self.fake_replica_2) fake_replica_3['id'] = fake.SHARE_ID3 replica_list = [self.fake_replica, self.fake_replica_2, fake_replica_3] fake_snapshot = copy.deepcopy(fake.SNAPSHOT) fake_snapshot['share_id'] = self.fake_replica['id'] fake_snapshot_2 = copy.deepcopy(fake.SNAPSHOT) fake_snapshot_2['id'] = uuidutils.generate_uuid() fake_snapshot_2['share_id'] = self.fake_replica_2['id'] fake_snapshot_3 = copy.deepcopy(fake.SNAPSHOT) fake_snapshot_3['id'] = uuidutils.generate_uuid() fake_snapshot_3['share_id'] = fake_replica_3['id'] snapshot_list = [fake_snapshot, fake_snapshot_2, fake_snapshot_3] vserver_client = mock.Mock() self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_client))) self.assertRaises(netapp_api.NaApiError, self.library.create_replicated_snapshot, self.context, replica_list, snapshot_list, share_server=fake.SHARE_SERVER) def test_delete_replicated_snapshot(self): fake_replica_3 = copy.deepcopy(self.fake_replica_2) fake_replica_3['id'] = fake.SHARE_ID3 replica_list = [self.fake_replica, self.fake_replica_2, fake_replica_3] fake_snapshot = copy.deepcopy(fake.SNAPSHOT) fake_snapshot['share_id'] = self.fake_replica['id'] share_name = self.library._get_backend_share_name( fake_snapshot['share_id']) snapshot_name = self.library._get_backend_snapshot_name( fake_snapshot['id']) fake_snapshot['provider_location'] = snapshot_name fake_snapshot_2 = copy.deepcopy(fake.SNAPSHOT) fake_snapshot_2['id'] = uuidutils.generate_uuid() fake_snapshot_2['share_id'] = self.fake_replica_2['id'] fake_snapshot_2['provider_location'] = snapshot_name fake_snapshot_3 = copy.deepcopy(fake.SNAPSHOT) fake_snapshot_3['id'] = uuidutils.generate_uuid() fake_snapshot_3['share_id'] = fake_replica_3['id'] fake_snapshot_3['provider_location'] = snapshot_name snapshot_list = [fake_snapshot, fake_snapshot_2, fake_snapshot_3] vserver_client = mock.Mock() vserver_client.get_snapshot.return_value = fake.CDOT_SNAPSHOT self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_client))) self.library.delete_replicated_snapshot( self.context, replica_list, snapshot_list, share_server=fake.SHARE_SERVER) vserver_client.delete_snapshot.assert_called_once_with(share_name, snapshot_name) self.mock_dm_session.update_snapmirror.assert_has_calls( [mock.call(self.fake_replica, self.fake_replica_2), mock.call(self.fake_replica, fake_replica_3)], any_order=True ) def test_delete_replicated_snapshot_replica_still_creating(self): fake_replica_3 = copy.deepcopy(self.fake_replica_2) fake_replica_3['id'] = fake.SHARE_ID3 fake_replica_3['host'] = None replica_list = [self.fake_replica, self.fake_replica_2, fake_replica_3] fake_snapshot = copy.deepcopy(fake.SNAPSHOT) fake_snapshot['share_id'] = self.fake_replica['id'] share_name = self.library._get_backend_share_name( fake_snapshot['share_id']) snapshot_name = self.library._get_backend_snapshot_name( fake_snapshot['id']) fake_snapshot['provider_location'] = snapshot_name fake_snapshot_2 = copy.deepcopy(fake.SNAPSHOT) fake_snapshot_2['id'] = uuidutils.generate_uuid() fake_snapshot_2['share_id'] = self.fake_replica_2['id'] fake_snapshot_2['provider_location'] = snapshot_name fake_snapshot_3 = copy.deepcopy(fake.SNAPSHOT) fake_snapshot_3['id'] = uuidutils.generate_uuid() fake_snapshot_3['share_id'] = fake_replica_3['id'] fake_snapshot_3['provider_location'] = snapshot_name snapshot_list = [fake_snapshot, fake_snapshot_2, fake_snapshot_3] vserver_client = mock.Mock() vserver_client.get_snapshot.return_value = fake.CDOT_SNAPSHOT self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_client))) self.library.delete_replicated_snapshot( self.context, replica_list, snapshot_list, share_server=fake.SHARE_SERVER) vserver_client.delete_snapshot.assert_called_once_with(share_name, snapshot_name) self.mock_dm_session.update_snapmirror.assert_has_calls( [mock.call(self.fake_replica, self.fake_replica_2)], any_order=True ) @ddt.data( netapp_api.NaApiError(code=netapp_api.EOBJECTNOTFOUND), netapp_api.NaApiError(message='not initialized')) def test_delete_replicated_snapshot_missing_snapmirror(self, api_exception): self.mock_dm_session.update_snapmirror.side_effect = [ None, api_exception ] fake_replica_3 = copy.deepcopy(self.fake_replica_2) fake_replica_3['id'] = fake.SHARE_ID3 replica_list = [self.fake_replica, self.fake_replica_2, fake_replica_3] fake_snapshot = copy.deepcopy(fake.SNAPSHOT) fake_snapshot['share_id'] = self.fake_replica['id'] share_name = self.library._get_backend_share_name( fake_snapshot['share_id']) snapshot_name = self.library._get_backend_snapshot_name( fake_snapshot['id']) fake_snapshot['provider_location'] = snapshot_name fake_snapshot['busy'] = False fake_snapshot_2 = copy.deepcopy(fake.SNAPSHOT) fake_snapshot_2['id'] = uuidutils.generate_uuid() fake_snapshot_2['share_id'] = self.fake_replica_2['id'] fake_snapshot_2['provider_location'] = snapshot_name fake_snapshot_3 = copy.deepcopy(fake.SNAPSHOT) fake_snapshot_3['id'] = uuidutils.generate_uuid() fake_snapshot_3['share_id'] = fake_replica_3['id'] fake_snapshot_3['provider_location'] = snapshot_name snapshot_list = [fake_snapshot, fake_snapshot_2, fake_snapshot_3] vserver_client = mock.Mock() vserver_client.get_snapshot.return_value = fake_snapshot self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_client))) self.library.delete_replicated_snapshot( self.context, replica_list, snapshot_list, share_server=fake.SHARE_SERVER) vserver_client.delete_snapshot.assert_called_once_with(share_name, snapshot_name) self.mock_dm_session.update_snapmirror.assert_has_calls( [mock.call(self.fake_replica, self.fake_replica_2), mock.call(self.fake_replica, fake_replica_3)], any_order=True ) def test_delete_replicated_snapshot_update_error(self): self.mock_dm_session.update_snapmirror.side_effect = [ None, netapp_api.NaApiError() ] fake_replica_3 = copy.deepcopy(self.fake_replica_2) fake_replica_3['id'] = fake.SHARE_ID3 replica_list = [self.fake_replica, self.fake_replica_2, fake_replica_3] fake_snapshot = copy.deepcopy(fake.SNAPSHOT) fake_snapshot['share_id'] = self.fake_replica['id'] snapshot_name = self.library._get_backend_snapshot_name( fake_snapshot['id']) fake_snapshot['provider_location'] = snapshot_name fake_snapshot['busy'] = False fake_snapshot_2 = copy.deepcopy(fake.SNAPSHOT) fake_snapshot_2['id'] = uuidutils.generate_uuid() fake_snapshot_2['share_id'] = self.fake_replica_2['id'] fake_snapshot_2['provider_location'] = snapshot_name fake_snapshot_3 = copy.deepcopy(fake.SNAPSHOT) fake_snapshot_3['id'] = uuidutils.generate_uuid() fake_snapshot_3['share_id'] = fake_replica_3['id'] fake_snapshot_3['provider_location'] = snapshot_name snapshot_list = [fake_snapshot, fake_snapshot_2, fake_snapshot_3] vserver_client = mock.Mock() vserver_client.get_snapshot.return_value = fake_snapshot self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_client))) self.assertRaises(netapp_api.NaApiError, self.library.delete_replicated_snapshot, self.context, replica_list, snapshot_list, share_server=fake.SHARE_SERVER) def test_update_replicated_snapshot_still_creating(self): vserver_client = mock.Mock() vserver_client.snapshot_exists.return_value = False self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_client))) replica_list = [self.fake_replica, self.fake_replica_2] fake_snapshot = copy.deepcopy(fake.SNAPSHOT) fake_snapshot['status'] = constants.STATUS_CREATING fake_snapshot['share_id'] = self.fake_replica_2['id'] snapshot_name = self.library._get_backend_snapshot_name( fake_snapshot['id']) fake_snapshot['provider_location'] = snapshot_name model_update = self.library.update_replicated_snapshot( replica_list, self.fake_replica_2, [fake_snapshot], fake_snapshot) self.assertIsNone(model_update) self.mock_dm_session.update_snapmirror.assert_called_once_with( self.fake_replica, self.fake_replica_2 ) def test_update_replicated_snapshot_still_creating_no_host(self): self.fake_replica_2['host'] = None vserver_client = mock.Mock() vserver_client.snapshot_exists.return_value = False self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_client))) replica_list = [self.fake_replica, self.fake_replica_2] fake_snapshot = copy.deepcopy(fake.SNAPSHOT) fake_snapshot['status'] = constants.STATUS_CREATING fake_snapshot['share_id'] = self.fake_replica_2['id'] snapshot_name = self.library._get_backend_snapshot_name( fake_snapshot['id']) fake_snapshot['provider_location'] = snapshot_name model_update = self.library.update_replicated_snapshot( replica_list, self.fake_replica_2, [fake_snapshot], fake_snapshot) self.assertIsNone(model_update) self.mock_dm_session.update_snapmirror.assert_called_once_with( self.fake_replica, self.fake_replica_2 ) @ddt.data( netapp_api.NaApiError(code=netapp_api.EOBJECTNOTFOUND), netapp_api.NaApiError(message='not initialized')) def test_update_replicated_snapshot_no_snapmirror(self, api_exception): vserver_client = mock.Mock() vserver_client.snapshot_exists.return_value = False self.mock_dm_session.update_snapmirror.side_effect = api_exception self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_client))) replica_list = [self.fake_replica, self.fake_replica_2] fake_snapshot = copy.deepcopy(fake.SNAPSHOT) fake_snapshot['status'] = constants.STATUS_CREATING fake_snapshot['share_id'] = self.fake_replica_2['id'] snapshot_name = self.library._get_backend_snapshot_name( fake_snapshot['id']) fake_snapshot['provider_location'] = snapshot_name model_update = self.library.update_replicated_snapshot( replica_list, self.fake_replica_2, [fake_snapshot], fake_snapshot) self.assertIsNone(model_update) self.mock_dm_session.update_snapmirror.assert_called_once_with( self.fake_replica, self.fake_replica_2 ) def test_update_replicated_snapshot_update_error(self): vserver_client = mock.Mock() vserver_client.snapshot_exists.return_value = False self.mock_dm_session.update_snapmirror.side_effect = ( netapp_api.NaApiError() ) self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_client))) replica_list = [self.fake_replica, self.fake_replica_2] fake_snapshot = copy.deepcopy(fake.SNAPSHOT) fake_snapshot['status'] = constants.STATUS_CREATING fake_snapshot['share_id'] = self.fake_replica_2['id'] snapshot_name = self.library._get_backend_snapshot_name( fake_snapshot['id']) fake_snapshot['provider_location'] = snapshot_name self.assertRaises(netapp_api.NaApiError, self.library.update_replicated_snapshot, replica_list, self.fake_replica_2, [fake_snapshot], fake_snapshot) def test_update_replicated_snapshot_still_deleting(self): vserver_client = mock.Mock() vserver_client.snapshot_exists.return_value = True vserver_client.get_snapshot.return_value = fake.CDOT_SNAPSHOT self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_client))) replica_list = [self.fake_replica] fake_snapshot = copy.deepcopy(fake.SNAPSHOT) fake_snapshot['status'] = constants.STATUS_DELETING fake_snapshot['share_id'] = self.fake_replica['id'] snapshot_name = self.library._get_backend_snapshot_name( fake_snapshot['id']) fake_snapshot['provider_location'] = snapshot_name model_update = self.library.update_replicated_snapshot( replica_list, self.fake_replica, [fake_snapshot], fake_snapshot) self.assertIsNone(model_update) def test_update_replicated_snapshot_created(self): vserver_client = mock.Mock() vserver_client.snapshot_exists.return_value = True self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_client))) replica_list = [self.fake_replica] fake_snapshot = copy.deepcopy(fake.SNAPSHOT) fake_snapshot['status'] = constants.STATUS_CREATING fake_snapshot['share_id'] = self.fake_replica['id'] snapshot_name = self.library._get_backend_snapshot_name( fake_snapshot['id']) fake_snapshot['provider_location'] = snapshot_name model_update = self.library.update_replicated_snapshot( replica_list, self.fake_replica, [fake_snapshot], fake_snapshot) self.assertEqual(constants.STATUS_AVAILABLE, model_update['status']) self.assertEqual(snapshot_name, model_update['provider_location']) def test_update_replicated_snapshot_created_no_provider_location(self): vserver_client = mock.Mock() vserver_client.snapshot_exists.return_value = True self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_client))) replica_list = [self.fake_replica, self.fake_replica_2] fake_snapshot = copy.deepcopy(fake.SNAPSHOT) fake_snapshot['status'] = constants.STATUS_ACTIVE fake_snapshot['share_id'] = self.fake_replica['id'] snapshot_name = self.library._get_backend_snapshot_name( fake_snapshot['id']) fake_snapshot['provider_location'] = snapshot_name fake_snapshot_2 = copy.deepcopy(fake.SNAPSHOT) fake_snapshot_2['status'] = constants.STATUS_CREATING fake_snapshot_2['share_id'] = self.fake_replica_2['id'] model_update = self.library.update_replicated_snapshot( replica_list, self.fake_replica_2, [fake_snapshot, fake_snapshot_2], fake_snapshot_2) self.assertEqual(constants.STATUS_AVAILABLE, model_update['status']) self.assertEqual(snapshot_name, model_update['provider_location']) def test_update_replicated_snapshot_deleted(self): vserver_client = mock.Mock() vserver_client.snapshot_exists.return_value = False self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_client))) replica_list = [self.fake_replica] fake_snapshot = copy.deepcopy(fake.SNAPSHOT) fake_snapshot['status'] = constants.STATUS_DELETING fake_snapshot['share_id'] = self.fake_replica['id'] snapshot_name = self.library._get_backend_snapshot_name( fake_snapshot['id']) fake_snapshot['provider_location'] = snapshot_name self.assertRaises(exception.SnapshotResourceNotFound, self.library.update_replicated_snapshot, replica_list, self.fake_replica, [fake_snapshot], fake_snapshot) def test_update_replicated_snapshot_no_provider_locations(self): vserver_client = mock.Mock() vserver_client.snapshot_exists.return_value = True self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_client))) replica_list = [self.fake_replica] fake_snapshot = copy.deepcopy(fake.SNAPSHOT) fake_snapshot['status'] = constants.STATUS_CREATING fake_snapshot['share_id'] = self.fake_replica['id'] fake_snapshot['provider_location'] = None model_update = self.library.update_replicated_snapshot( replica_list, self.fake_replica, [fake_snapshot], fake_snapshot) self.assertIsNone(model_update) def _get_fake_replicas_and_snapshots(self): fake_replica_3 = copy.deepcopy(self.fake_replica_2) fake_replica_3['id'] = fake.SHARE_ID3 fake_snapshot = copy.deepcopy(fake.SNAPSHOT) fake_snapshot['share_id'] = self.fake_replica['id'] snapshot_name = self.library._get_backend_snapshot_name( fake_snapshot['id']) fake_snapshot['provider_location'] = snapshot_name fake_snapshot_2 = copy.deepcopy(fake.SNAPSHOT) fake_snapshot_2['id'] = uuidutils.generate_uuid() fake_snapshot_2['share_id'] = self.fake_replica_2['id'] fake_snapshot_2['provider_location'] = snapshot_name fake_snapshot_3 = copy.deepcopy(fake.SNAPSHOT) fake_snapshot_3['id'] = uuidutils.generate_uuid() fake_snapshot_3['share_id'] = fake_replica_3['id'] fake_snapshot_3['provider_location'] = snapshot_name replica_list = [self.fake_replica, self.fake_replica_2, fake_replica_3] snapshot_list = [fake_snapshot, fake_snapshot_2, fake_snapshot_3] return replica_list, snapshot_list @ddt.data(True, False) def test_revert_to_replicated_snapshot(self, use_snap_provider_location): replica_list, snapshot_list = self._get_fake_replicas_and_snapshots() fake_replica, fake_replica_2, fake_replica_3 = replica_list fake_snapshot, fake_snapshot_2, fake_snapshot_3 = snapshot_list if not use_snap_provider_location: del fake_snapshot['provider_location'] del fake_snapshot_2['provider_location'] del fake_snapshot_3['provider_location'] share_name = self.library._get_backend_share_name( fake_snapshot['share_id']) snapshot_name = self.library._get_backend_snapshot_name( fake_snapshot['id']) vserver_client = mock.Mock() vserver_client.get_snapshot.return_value = fake.CDOT_SNAPSHOT vserver_client.list_snapmirror_snapshots.return_value = ['sm_snap'] self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_client))) self.library.revert_to_replicated_snapshot( self.context, self.fake_replica, replica_list, fake_snapshot, snapshot_list, share_server=fake.SHARE_SERVER) vserver_client.get_snapshot.assert_called_once_with( share_name, snapshot_name) vserver_client.list_snapmirror_snapshots.assert_called_once_with( share_name) vserver_client.delete_snapshot.assert_called_once_with( share_name, 'sm_snap', ignore_owners=True) vserver_client.restore_snapshot.assert_called_once_with( share_name, snapshot_name) self.mock_dm_session.break_snapmirror.assert_has_calls( [mock.call(self.fake_replica, self.fake_replica_2, mount=False), mock.call(self.fake_replica, fake_replica_3, mount=False)], any_order=True) self.mock_dm_session.resync_snapmirror.assert_has_calls( [mock.call(self.fake_replica, self.fake_replica_2), mock.call(self.fake_replica, fake_replica_3)], any_order=True) def test_revert_to_replicated_snapshot_not_found(self): replica_list, snapshot_list = self._get_fake_replicas_and_snapshots() fake_snapshot, fake_snapshot_2, fake_snapshot_3 = snapshot_list share_name = self.library._get_backend_share_name( fake_snapshot['share_id']) snapshot_name = self.library._get_backend_snapshot_name( fake_snapshot['id']) vserver_client = mock.Mock() vserver_client.get_snapshot.side_effect = netapp_api.NaApiError vserver_client.list_snapmirror_snapshots.return_value = ['sm_snap'] self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_client))) self.assertRaises( netapp_api.NaApiError, self.library.revert_to_replicated_snapshot, self.context, self.fake_replica, replica_list, fake_snapshot, snapshot_list, share_server=fake.SHARE_SERVER) vserver_client.get_snapshot.assert_called_once_with( share_name, snapshot_name) self.assertFalse(vserver_client.list_snapmirror_snapshots.called) self.assertFalse(vserver_client.delete_snapshot.called) self.assertFalse(vserver_client.restore_snapshot.called) self.assertFalse(self.mock_dm_session.break_snapmirror.called) self.assertFalse(self.mock_dm_session.resync_snapmirror.called) def test_revert_to_replicated_snapshot_snapmirror_break_error(self): replica_list, snapshot_list = self._get_fake_replicas_and_snapshots() fake_snapshot, fake_snapshot_2, fake_snapshot_3 = snapshot_list vserver_client = mock.Mock() vserver_client.get_snapshot.return_value = fake.CDOT_SNAPSHOT vserver_client.list_snapmirror_snapshots.return_value = ['sm_snap'] self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_client))) self.mock_dm_session.break_snapmirror.side_effect = ( netapp_api.NaApiError) self.assertRaises( netapp_api.NaApiError, self.library.revert_to_replicated_snapshot, self.context, self.fake_replica, replica_list, fake_snapshot, snapshot_list, share_server=fake.SHARE_SERVER) def test_revert_to_replicated_snapshot_snapmirror_break_not_found(self): replica_list, snapshot_list = self._get_fake_replicas_and_snapshots() fake_replica, fake_replica_2, fake_replica_3 = replica_list fake_snapshot, fake_snapshot_2, fake_snapshot_3 = snapshot_list share_name = self.library._get_backend_share_name( fake_snapshot['share_id']) snapshot_name = self.library._get_backend_snapshot_name( fake_snapshot['id']) vserver_client = mock.Mock() vserver_client.get_snapshot.return_value = fake.CDOT_SNAPSHOT vserver_client.list_snapmirror_snapshots.return_value = ['sm_snap'] self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_client))) self.mock_dm_session.break_snapmirror.side_effect = ( netapp_api.NaApiError(code=netapp_api.EOBJECTNOTFOUND)) self.library.revert_to_replicated_snapshot( self.context, self.fake_replica, replica_list, fake_snapshot, snapshot_list, share_server=fake.SHARE_SERVER) vserver_client.get_snapshot.assert_called_once_with( share_name, snapshot_name) vserver_client.list_snapmirror_snapshots.assert_called_once_with( share_name) vserver_client.delete_snapshot.assert_called_once_with( share_name, 'sm_snap', ignore_owners=True) vserver_client.restore_snapshot.assert_called_once_with( share_name, snapshot_name) self.mock_dm_session.break_snapmirror.assert_has_calls( [mock.call(self.fake_replica, self.fake_replica_2, mount=False), mock.call(self.fake_replica, fake_replica_3, mount=False)], any_order=True) self.mock_dm_session.resync_snapmirror.assert_has_calls( [mock.call(self.fake_replica, self.fake_replica_2), mock.call(self.fake_replica, fake_replica_3)], any_order=True) def test_revert_to_replicated_snapshot_snapmirror_resync_error(self): replica_list, snapshot_list = self._get_fake_replicas_and_snapshots() fake_snapshot, fake_snapshot_2, fake_snapshot_3 = snapshot_list vserver_client = mock.Mock() vserver_client.get_snapshot.return_value = fake.CDOT_SNAPSHOT vserver_client.list_snapmirror_snapshots.return_value = ['sm_snap'] self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_client))) self.mock_dm_session.resync_snapmirror.side_effect = ( netapp_api.NaApiError) self.assertRaises( netapp_api.NaApiError, self.library.revert_to_replicated_snapshot, self.context, self.fake_replica, replica_list, fake_snapshot, snapshot_list, share_server=fake.SHARE_SERVER) def test_revert_to_replicated_snapshot_snapmirror_resync_not_found(self): replica_list, snapshot_list = self._get_fake_replicas_and_snapshots() fake_replica, fake_replica_2, fake_replica_3 = replica_list fake_snapshot, fake_snapshot_2, fake_snapshot_3 = snapshot_list share_name = self.library._get_backend_share_name( fake_snapshot['share_id']) snapshot_name = self.library._get_backend_snapshot_name( fake_snapshot['id']) vserver_client = mock.Mock() vserver_client.get_snapshot.return_value = fake.CDOT_SNAPSHOT vserver_client.list_snapmirror_snapshots.return_value = ['sm_snap'] self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_client))) self.mock_dm_session.resync_snapmirror.side_effect = ( netapp_api.NaApiError(code=netapp_api.EOBJECTNOTFOUND)) self.library.revert_to_replicated_snapshot( self.context, self.fake_replica, replica_list, fake_snapshot, snapshot_list, share_server=fake.SHARE_SERVER) vserver_client.get_snapshot.assert_called_once_with( share_name, snapshot_name) vserver_client.list_snapmirror_snapshots.assert_called_once_with( share_name) vserver_client.delete_snapshot.assert_called_once_with( share_name, 'sm_snap', ignore_owners=True) vserver_client.restore_snapshot.assert_called_once_with( share_name, snapshot_name) self.mock_dm_session.break_snapmirror.assert_has_calls( [mock.call(self.fake_replica, self.fake_replica_2, mount=False), mock.call(self.fake_replica, fake_replica_3, mount=False)], any_order=True) self.mock_dm_session.resync_snapmirror.assert_has_calls( [mock.call(self.fake_replica, self.fake_replica_2), mock.call(self.fake_replica, fake_replica_3)], any_order=True) @ddt.data( {'replication_type': constants.REPLICATION_TYPE_READABLE, 'is_readable': True}, {'replication_type': constants.REPLICATION_TYPE_DR, 'is_readable': False}, {'replication_type': constants.REPLICATION_TYPE_WRITABLE, 'is_readable': False}, {'replication_type': None, 'is_readable': False}) @ddt.unpack def test__is_readable_replica(self, replication_type, is_readable): extra_specs = {} if replication_type: extra_specs['replication_type'] = replication_type mock_get_extra_spec = self.mock_object( share_types, 'get_extra_specs_from_share', mock.Mock(return_value=extra_specs)) result = self.library._is_readable_replica(fake.SHARE) self.assertEqual(is_readable, result) mock_get_extra_spec.assert_called_once_with(fake.SHARE) def test_migration_check_compatibility_no_cluster_credentials(self): self.library._have_cluster_creds = False self.mock_object(data_motion, 'get_backend_configuration') mock_warning_log = self.mock_object(lib_base.LOG, 'warning') migration_compatibility = self.library.migration_check_compatibility( self.context, fake_share.fake_share_instance(), fake_share.fake_share_instance(), share_server=None, destination_share_server=fake.SHARE_SERVER) expected_compatibility = { 'compatible': False, 'writable': False, 'nondisruptive': False, 'preserve_metadata': False, 'preserve_snapshots': False, } self.assertDictEqual(expected_compatibility, migration_compatibility) mock_warning_log.assert_called_once() self.assertFalse(data_motion.get_backend_configuration.called) @ddt.data( {'src_flexgroup': True, 'dest_flexgroup': False}, {'src_flexgroup': False, 'dest_flexgroup': True}) @ddt.unpack def test_migration_check_compatibility_flexgroup(self, src_flexgroup, dest_flexgroup): self.library._have_cluster_creds = True mock_dm = mock.Mock() self.mock_object(data_motion, 'DataMotionSession', mock.Mock(return_value=mock_dm)) self.mock_object(self.library, 'is_flexgroup_destination_host', mock.Mock(return_value=dest_flexgroup)) mock_exception_log = self.mock_object(lib_base.LOG, 'exception') self.mock_object(share_utils, 'extract_host', mock.Mock(return_value=fake.POOL_NAME)) self.mock_object(self.library, '_is_flexgroup_pool', mock.Mock(return_value=src_flexgroup)) migration_compatibility = self.library.migration_check_compatibility( self.context, fake_share.fake_share_instance(), fake_share.fake_share_instance(), share_server=None, destination_share_server=fake.SHARE_SERVER) expected_compatibility = { 'compatible': False, 'writable': False, 'nondisruptive': False, 'preserve_metadata': False, 'preserve_snapshots': False, } self.assertDictEqual(expected_compatibility, migration_compatibility) mock_exception_log.assert_called_once() self.library._is_flexgroup_pool.assert_called_once_with(fake.POOL_NAME) if src_flexgroup: self.library.is_flexgroup_destination_host.assert_not_called() @ddt.data((None, exception.NetAppException), (exception.Invalid, None)) @ddt.unpack def test_migration_check_compatibility_extra_specs_invalid( self, side_effect_1, side_effect_2): self.library._have_cluster_creds = True mock_dm = mock.Mock() self.mock_object(data_motion, 'DataMotionSession', mock.Mock(return_value=mock_dm)) self.mock_object(self.library, 'is_flexgroup_destination_host', mock.Mock(return_value=False)) self.mock_object(self.library, '_is_flexgroup_pool', mock.Mock(return_value=False)) self.mock_object(self.library, '_get_backend_share_name', mock.Mock(return_value=fake.SHARE_NAME)) self.mock_object(share_utils, 'extract_host', mock.Mock( side_effect=[ 'destination_backend', 'destination_pool', 'source_pool'])) mock_exception_log = self.mock_object(lib_base.LOG, 'exception') self.mock_object(share_types, 'get_extra_specs_from_share') self.mock_object(self.library, '_check_extra_specs_validity', mock.Mock(side_effect=side_effect_1)) self.mock_object(self.library, '_check_aggregate_extra_specs_validity', mock.Mock(side_effect=side_effect_2)) self.mock_object(data_motion, 'get_backend_configuration') migration_compatibility = self.library.migration_check_compatibility( self.context, fake_share.fake_share_instance(), fake_share.fake_share_instance(), share_server=fake.SHARE_SERVER, destination_share_server=None) expected_compatibility = { 'compatible': False, 'writable': False, 'nondisruptive': False, 'preserve_metadata': False, 'preserve_snapshots': False, } self.assertDictEqual(expected_compatibility, migration_compatibility) mock_exception_log.assert_called_once() self.assertFalse(data_motion.get_backend_configuration.called) def test_migration_check_compatibility_invalid_qos_configuration(self): self.library._have_cluster_creds = True mock_dm = mock.Mock() self.mock_object(data_motion, 'DataMotionSession', mock.Mock(return_value=mock_dm)) self.mock_object(self.library, 'is_flexgroup_destination_host', mock.Mock(return_value=False)) self.mock_object(self.library, '_is_flexgroup_pool', mock.Mock(return_value=False)) self.mock_object(self.library, '_get_backend_share_name', mock.Mock(return_value=fake.SHARE_NAME)) self.mock_object(share_utils, 'extract_host', mock.Mock( side_effect=[ 'destination_backend', 'destination_pool', 'source_pool'])) mock_exception_log = self.mock_object(lib_base.LOG, 'exception') self.mock_object(share_types, 'get_extra_specs_from_share') self.mock_object(self.library, '_check_extra_specs_validity') self.mock_object( self.library, '_get_provisioning_options', mock.Mock(return_value=fake.PROVISIONING_OPTS_WITH_ADAPT_QOS)) self.mock_object(self.library, '_get_normalized_qos_specs', mock.Mock(return_value=fake.QOS_NORMALIZED_SPEC)) migration_compatibility = self.library.migration_check_compatibility( self.context, fake_share.fake_share_instance(), fake_share.fake_share_instance(), share_server=fake.SHARE_SERVER, destination_share_server=None) expected_compatibility = { 'compatible': False, 'writable': False, 'nondisruptive': False, 'preserve_metadata': False, 'preserve_snapshots': False, } self.assertDictEqual(expected_compatibility, migration_compatibility) mock_exception_log.assert_called_once() def test_migration_check_compatibility_destination_not_configured(self): self.library._have_cluster_creds = True mock_dm = mock.Mock() mock_dm = mock.Mock() self.mock_object(data_motion, 'DataMotionSession', mock.Mock(return_value=mock_dm)) self.mock_object(self.library, 'is_flexgroup_destination_host', mock.Mock(return_value=False)) self.mock_object(self.library, '_is_flexgroup_pool', mock.Mock(return_value=False)) self.mock_object(self.library, '_get_backend_share_name', mock.Mock(return_value=fake.SHARE_NAME)) self.mock_object( data_motion, 'get_backend_configuration', mock.Mock(side_effect=exception.BadConfigurationException( reason='fake_reason'))) self.mock_object(self.library, '_get_vserver') mock_exception_log = self.mock_object(lib_base.LOG, 'exception') self.mock_object(share_utils, 'extract_host', mock.Mock( return_value='destination_backend')) self.mock_object(share_types, 'get_extra_specs_from_share') self.mock_object(self.library, '_check_extra_specs_validity') self.mock_object(self.library, '_check_aggregate_extra_specs_validity') self.mock_object(self.library, '_get_provisioning_options', mock.Mock(return_value={})) self.mock_object(self.library, '_get_normalized_qos_specs') self.mock_object(self.library, 'validate_provisioning_options_for_share') mock_vserver_compatibility_check = self.mock_object( self.library, '_check_destination_vserver_for_vol_move') self.mock_object(self.library, '_get_dest_flexvol_encryption_value', mock.Mock(return_value=False)) migration_compatibility = self.library.migration_check_compatibility( self.context, fake_share.fake_share_instance(), fake_share.fake_share_instance(), share_server=fake.SHARE_SERVER, destination_share_server=None) expected_compatibility = { 'compatible': False, 'writable': False, 'nondisruptive': False, 'preserve_metadata': False, 'preserve_snapshots': False, } self.assertDictEqual(expected_compatibility, migration_compatibility) mock_exception_log.assert_called_once() data_motion.get_backend_configuration.assert_called_once_with( 'destination_backend') self.assertFalse(mock_vserver_compatibility_check.called) self.assertFalse(self.library._get_vserver.called) @ddt.data( utils.annotated( 'dest_share_server_not_expected', (('src_vserver', None), exception.InvalidParameterValue( err='fake_err'))), utils.annotated( 'src_share_server_not_expected', (exception.InvalidParameterValue(err='fake_err'), ('dest_vserver', None)))) def test_migration_check_compatibility_errors(self, side_effects): self.library._have_cluster_creds = True mock_dm = mock.Mock() self.mock_object(data_motion, 'DataMotionSession', mock.Mock(return_value=mock_dm)) self.mock_object(self.library, 'is_flexgroup_destination_host', mock.Mock(return_value=False)) self.mock_object(self.library, '_is_flexgroup_pool', mock.Mock(return_value=False)) self.mock_object(share_types, 'get_extra_specs_from_share') self.mock_object(self.library, '_check_extra_specs_validity') self.mock_object(self.library, '_check_aggregate_extra_specs_validity') self.mock_object(self.library, '_get_backend_share_name', mock.Mock(return_value=fake.SHARE_NAME)) self.mock_object(self.library, '_get_provisioning_options', mock.Mock(return_value={})) self.mock_object(self.library, '_get_normalized_qos_specs') self.mock_object(self.library, 'validate_provisioning_options_for_share') self.mock_object(data_motion, 'get_backend_configuration') self.mock_object(self.library, '_get_vserver', mock.Mock(side_effect=side_effects)) mock_exception_log = self.mock_object(lib_base.LOG, 'exception') self.mock_object(share_utils, 'extract_host', mock.Mock( return_value='destination_backend')) mock_compatibility_check = self.mock_object( self.client, 'check_volume_move') migration_compatibility = self.library.migration_check_compatibility( self.context, fake_share.fake_share_instance(), fake_share.fake_share_instance(), share_server=fake.SHARE_SERVER, destination_share_server=None) expected_compatibility = { 'compatible': False, 'writable': False, 'nondisruptive': False, 'preserve_metadata': False, 'preserve_snapshots': False, } self.assertDictEqual(expected_compatibility, migration_compatibility) mock_exception_log.assert_called_once() data_motion.get_backend_configuration.assert_called_once_with( 'destination_backend') self.assertFalse(mock_compatibility_check.called) def test_migration_check_compatibility_incompatible_vservers(self): self.library._have_cluster_creds = True mock_dm = mock.Mock() self.mock_object(data_motion, 'DataMotionSession', mock.Mock(return_value=mock_dm)) self.mock_object(self.library, 'is_flexgroup_destination_host', mock.Mock(return_value=False)) self.mock_object(self.library, '_is_flexgroup_pool', mock.Mock(return_value=False)) self.mock_object(share_types, 'get_extra_specs_from_share') self.mock_object(self.library, '_check_extra_specs_validity') self.mock_object(self.library, '_check_aggregate_extra_specs_validity') self.mock_object(self.library, '_get_backend_share_name', mock.Mock(return_value=fake.SHARE_NAME)) self.mock_object(data_motion, 'get_backend_configuration') self.mock_object(self.library, '_get_provisioning_options', mock.Mock(return_value={})) self.mock_object(self.library, '_get_normalized_qos_specs') self.mock_object(self.library, 'validate_provisioning_options_for_share') mock_exception_log = self.mock_object(lib_base.LOG, 'exception') get_vserver_returns = [ (fake.VSERVER1, mock.Mock()), (fake.VSERVER2, mock.Mock()), ] self.mock_object(self.library, '_get_vserver', mock.Mock(side_effect=get_vserver_returns)) self.mock_object(share_utils, 'extract_host', mock.Mock( side_effect=[ 'destination_backend', 'destination_pool', 'source_pool'])) mock_move_check = self.mock_object(self.client, 'check_volume_move') migration_compatibility = self.library.migration_check_compatibility( self.context, fake_share.fake_share_instance(), fake_share.fake_share_instance(), share_server=fake.SHARE_SERVER, destination_share_server='dst_srv') expected_compatibility = { 'compatible': False, 'writable': False, 'nondisruptive': False, 'preserve_metadata': False, 'preserve_snapshots': False, } self.assertDictEqual(expected_compatibility, migration_compatibility) mock_exception_log.assert_called_once() data_motion.get_backend_configuration.assert_called_once_with( 'destination_backend') self.assertFalse(mock_move_check.called) self.library._get_vserver.assert_has_calls( [mock.call(share_server=fake.SHARE_SERVER), mock.call(share_server='dst_srv')]) def test_migration_check_compatibility_client_error(self): self.library._have_cluster_creds = True mock_dm = mock.Mock() self.mock_object(data_motion, 'DataMotionSession', mock.Mock(return_value=mock_dm)) self.mock_object(self.library, 'is_flexgroup_destination_host', mock.Mock(return_value=False)) self.mock_object(self.library, '_is_flexgroup_pool', mock.Mock(return_value=False)) self.mock_object(share_types, 'get_extra_specs_from_share') self.mock_object(self.library, '_check_extra_specs_validity') self.mock_object(self.library, '_check_aggregate_extra_specs_validity') self.mock_object(self.library, '_get_provisioning_options', mock.Mock(return_value={})) self.mock_object(self.library, '_get_normalized_qos_specs') self.mock_object(self.library, 'validate_provisioning_options_for_share') self.mock_object(self.library, '_get_backend_share_name', mock.Mock(return_value=fake.SHARE_NAME)) mock_exception_log = self.mock_object(lib_base.LOG, 'exception') self.mock_object(data_motion, 'get_backend_configuration') self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, mock.Mock()))) self.mock_object(share_utils, 'extract_host', mock.Mock( side_effect=[ 'destination_backend', 'destination_pool', 'source_pool'])) mock_move_check = self.mock_object( self.client, 'check_volume_move', mock.Mock(side_effect=netapp_api.NaApiError)) self.mock_object(self.library, '_get_dest_flexvol_encryption_value', mock.Mock(return_value=False)) migration_compatibility = self.library.migration_check_compatibility( self.context, fake_share.fake_share_instance(), fake_share.fake_share_instance(), share_server=fake.SHARE_SERVER, destination_share_server='dst_srv') expected_compatibility = { 'compatible': False, 'writable': False, 'nondisruptive': False, 'preserve_metadata': False, 'preserve_snapshots': False, } self.assertDictEqual(expected_compatibility, migration_compatibility) mock_exception_log.assert_called_once() data_motion.get_backend_configuration.assert_called_once_with( 'destination_backend') mock_move_check.assert_called_once_with( fake.SHARE_NAME, fake.VSERVER1, 'destination_pool', encrypt_destination=False) self.library._get_vserver.assert_has_calls( [mock.call(share_server=fake.SHARE_SERVER), mock.call(share_server='dst_srv')]) @ddt.data(False, True) def test_migration_check_compatibility(self, fpolicy): self.library._have_cluster_creds = True mock_dm = mock.Mock() self.mock_object(data_motion, 'DataMotionSession', mock.Mock(return_value=mock_dm)) self.mock_object(self.library, 'is_flexgroup_destination_host', mock.Mock(return_value=False)) self.mock_object(self.library, '_is_flexgroup_pool', mock.Mock(return_value=False)) mock_dest_client = mock.Mock() if fpolicy: provisioning_options = copy.deepcopy( fake.PROVISIONING_OPTIONS_WITH_FPOLICY) get_vserver_side_effect = [(mock.Mock(), mock_dest_client), (fake.VSERVER1, mock.Mock())] else: get_vserver_side_effect = [(fake.VSERVER1, mock.Mock())] provisioning_options = {} self.mock_object(share_types, 'get_extra_specs_from_share') self.mock_object(self.library, '_check_extra_specs_validity') self.mock_object(self.library, '_check_aggregate_extra_specs_validity') self.mock_object(self.library, '_get_backend_share_name', mock.Mock(return_value=fake.SHARE_NAME)) self.mock_object(data_motion, 'get_backend_configuration') self.mock_object(self.library, '_get_vserver', mock.Mock(side_effect=get_vserver_side_effect)) self.mock_object(share_utils, 'extract_host', mock.Mock( side_effect=[ 'destination_backend', 'destination_pool', 'source_pool'])) mock_move_check = self.mock_object(self.client, 'check_volume_move') self.mock_object(self.library, '_get_dest_flexvol_encryption_value', mock.Mock(return_value=False)) self.mock_object(self.library, '_get_provisioning_options', mock.Mock(return_value=provisioning_options)) self.mock_object(self.library, '_get_normalized_qos_specs') self.mock_object(self.library, 'validate_provisioning_options_for_share') self.mock_object(self.library, '_check_destination_vserver_for_vol_move') fpolicies = [ x for x in range(1, self.library.FPOLICY_MAX_VSERVER_POLICIES + 1)] mock_fpolicy_status = self.mock_object( mock_dest_client, 'get_fpolicy_policies_status', mock.Mock(return_value=fpolicies)) mock_reusable_fpolicy = self.mock_object( self.library, '_find_reusable_fpolicy_scope', mock.Mock(return_value={'fake'})) src_instance = fake_share.fake_share_instance() dst_instance = fake_share.fake_share_instance() migration_compatibility = self.library.migration_check_compatibility( self.context, src_instance, dst_instance, share_server=fake.SHARE_SERVER, destination_share_server='dst_srv') expected_compatibility = { 'compatible': True, 'writable': True, 'nondisruptive': True, 'preserve_metadata': True, 'preserve_snapshots': True, } self.assertDictEqual(expected_compatibility, migration_compatibility) data_motion.get_backend_configuration.assert_called_once_with( 'destination_backend') mock_move_check.assert_called_once_with( fake.SHARE_NAME, fake.VSERVER1, 'destination_pool', encrypt_destination=False) if fpolicy: self.library._get_vserver.assert_has_calls( [mock.call(share_server='dst_srv'), mock.call(share_server=fake.SHARE_SERVER)]) mock_fpolicy_status.assert_called_once() mock_reusable_fpolicy.assert_called_once_with( dst_instance, mock_dest_client, fpolicy_extensions_to_include=fake.FPOLICY_EXT_TO_INCLUDE, fpolicy_extensions_to_exclude=fake.FPOLICY_EXT_TO_EXCLUDE, fpolicy_file_operations=fake.FPOLICY_FILE_OPERATIONS ) else: self.library._get_vserver.assert_called_once_with( share_server=fake.SHARE_SERVER) def test_migration_check_compatibility_destination_type_is_encrypted(self): self.library._have_cluster_creds = True mock_dm = mock.Mock() self.mock_object(data_motion, 'DataMotionSession', mock.Mock(return_value=mock_dm)) self.mock_object(self.library, 'is_flexgroup_destination_host', mock.Mock(return_value=False)) self.mock_object(self.library, '_is_flexgroup_pool', mock.Mock(return_value=False)) self.mock_object(self.library, '_get_backend_share_name', mock.Mock(return_value=fake.SHARE_NAME)) self.mock_object(data_motion, 'get_backend_configuration') self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, mock.Mock()))) self.mock_object(share_utils, 'extract_host', mock.Mock( side_effect=[ 'destination_backend', 'destination_pool', 'source_pool'])) mock_move_check = self.mock_object(self.client, 'check_volume_move') self.mock_object(self.library, '_get_dest_flexvol_encryption_value', mock.Mock(return_value=True)) self.mock_object(share_types, 'get_extra_specs_from_share', mock.Mock(return_value={'spec1': 'spec-data'})) self.mock_object(self.library, '_check_extra_specs_validity') self.mock_object(self.library, '_check_aggregate_extra_specs_validity') self.mock_object(self.library, '_get_provisioning_options', mock.Mock(return_value={})) self.mock_object(self.library, '_get_normalized_qos_specs') self.mock_object(self.library, 'validate_provisioning_options_for_share') migration_compatibility = self.library.migration_check_compatibility( self.context, fake_share.fake_share_instance(), fake_share.fake_share_instance(), share_server=fake.SHARE_SERVER, destination_share_server='dst_srv') expected_compatibility = { 'compatible': True, 'writable': True, 'nondisruptive': True, 'preserve_metadata': True, 'preserve_snapshots': True, } self.assertDictEqual(expected_compatibility, migration_compatibility) data_motion.get_backend_configuration.assert_called_once_with( 'destination_backend') mock_move_check.assert_called_once_with( fake.SHARE_NAME, fake.VSERVER1, 'destination_pool', encrypt_destination=True) self.library._get_vserver.assert_has_calls( [mock.call(share_server=fake.SHARE_SERVER), mock.call(share_server='dst_srv')]) def test_migration_check_compatibility_snaplock_not_compatible(self): self.library._have_cluster_creds = True self.mock_object(self.library, '_get_backend_share_name', mock.Mock(return_value=fake.SHARE_NAME)) self.mock_object(data_motion, 'get_backend_configuration') self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, mock.Mock()))) self.mock_object(share_utils, 'extract_host', mock.Mock( side_effect=[ 'destination_backend', 'destination_pool', 'source_pool'])) mock_dm = mock.Mock() self.mock_object(data_motion, 'DataMotionSession', mock.Mock(return_value=mock_dm)) self.mock_object(self.library, 'is_flexgroup_destination_host', mock.Mock(return_value=False)) self.mock_object(self.library, '_is_flexgroup_pool', mock.Mock(return_value=False)) self.mock_object(self.library, '_is_snaplock_compatible_for_migration', mock.Mock(return_value=False) ) migration_compatibility = self.library.migration_check_compatibility( self.context, fake_share.fake_share_instance(), fake_share.fake_share_instance(), share_server=fake.SHARE_SERVER, destination_share_server='dst_srv') expected_compatibility = { 'compatible': False, 'writable': False, 'nondisruptive': False, 'preserve_metadata': False, 'preserve_snapshots': False, } self.assertDictEqual(expected_compatibility, migration_compatibility) def test_migration_start(self): mock_info_log = self.mock_object(lib_base.LOG, 'info') source_snapshots = mock.Mock() snapshot_mappings = mock.Mock() self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, mock.Mock()))) self.mock_object(self.library, '_get_backend_share_name', mock.Mock(return_value=fake.SHARE_NAME)) self.mock_object(share_utils, 'extract_host', mock.Mock(return_value='destination_pool')) mock_move = self.mock_object(self.client, 'start_volume_move') self.mock_object(self.library, '_get_dest_flexvol_encryption_value', mock.Mock(return_value=False)) retval = self.library.migration_start( self.context, fake_share.fake_share_instance(), fake_share.fake_share_instance(), source_snapshots, snapshot_mappings, share_server=fake.SHARE_SERVER, destination_share_server='dst_srv') self.assertIsNone(retval) self.assertTrue(mock_info_log.called) mock_move.assert_called_once_with( fake.SHARE_NAME, fake.VSERVER1, 'destination_pool', cutover_action='wait', encrypt_destination=False) def test_migration_start_encrypted_destination(self): mock_info_log = self.mock_object(lib_base.LOG, 'info') source_snapshots = mock.Mock() snapshot_mappings = mock.Mock() self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, mock.Mock()))) self.mock_object(self.library, '_get_backend_share_name', mock.Mock(return_value=fake.SHARE_NAME)) self.mock_object(share_utils, 'extract_host', mock.Mock(return_value='destination_pool')) mock_move = self.mock_object(self.client, 'start_volume_move') self.mock_object(self.library, '_get_dest_flexvol_encryption_value', mock.Mock(return_value=True)) retval = self.library.migration_start( self.context, fake_share.fake_share_instance(), fake_share.fake_share_instance(), source_snapshots, snapshot_mappings, share_server=fake.SHARE_SERVER, destination_share_server='dst_srv') self.assertIsNone(retval) self.assertTrue(mock_info_log.called) mock_move.assert_called_once_with( fake.SHARE_NAME, fake.VSERVER1, 'destination_pool', cutover_action='wait', encrypt_destination=True) def test_migration_continue_volume_move_failed(self): source_snapshots = mock.Mock() snapshot_mappings = mock.Mock() mock_exception_log = self.mock_object(lib_base.LOG, 'exception') self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, mock.Mock()))) self.mock_object(self.library, '_get_backend_share_name', mock.Mock(return_value=fake.SHARE_NAME)) mock_status_check = self.mock_object( self.client, 'get_volume_move_status', mock.Mock(return_value={'phase': 'failed', 'details': 'unknown'})) self.assertRaises(exception.NetAppException, self.library.migration_continue, self.context, fake_share.fake_share_instance(), fake_share.fake_share_instance(), source_snapshots, snapshot_mappings, share_server=None, destination_share_server=None) mock_status_check.assert_called_once_with( fake.SHARE_NAME, fake.VSERVER1) mock_exception_log.assert_called_once() @ddt.data({'phase': 'Queued', 'completed': False}, {'phase': 'Finishing', 'completed': False}, {'phase': 'cutover_hard_deferred', 'completed': True}, {'phase': 'cutover_soft_deferred', 'completed': True}, {'phase': 'completed', 'completed': True}) @ddt.unpack def test_migration_continue(self, phase, completed): source_snapshots = mock.Mock() snapshot_mappings = mock.Mock() self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, mock.Mock()))) self.mock_object(self.library, '_get_backend_share_name', mock.Mock(return_value=fake.SHARE_NAME)) self.mock_object(self.client, 'get_volume_move_status', mock.Mock(return_value={'phase': phase})) migration_completed = self.library.migration_continue( self.context, fake_share.fake_share_instance(), fake_share.fake_share_instance(), source_snapshots, snapshot_mappings, share_server=fake.SHARE_SERVER, destination_share_server='dst_srv') self.assertEqual(completed, migration_completed) @ddt.data('cutover_hard_deferred', 'cutover_soft_deferred', 'Queued', 'Replicating') def test_migration_get_progress_at_phase(self, phase): source_snapshots = mock.Mock() snapshot_mappings = mock.Mock() mock_info_log = self.mock_object(lib_base.LOG, 'info') status = { 'state': 'healthy', 'details': '%s:: Volume move job in progress' % phase, 'phase': phase, 'estimated-completion-time': '1481919246', 'percent-complete': 80, } self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, mock.Mock()))) self.mock_object(self.library, '_get_backend_share_name', mock.Mock(return_value=fake.SHARE_NAME)) self.mock_object(self.client, 'get_volume_move_status', mock.Mock(return_value=status)) migration_progress = self.library.migration_get_progress( self.context, fake_share.fake_share_instance(), source_snapshots, snapshot_mappings, fake_share.fake_share_instance(), share_server=fake.SHARE_SERVER, destination_share_server='dst_srv') expected_progress = { 'total_progress': 100 if phase.startswith('cutover') else 80, 'state': 'healthy', 'estimated_completion_time': '1481919246', 'details': '%s:: Volume move job in progress' % phase, 'phase': phase, } self.assertDictEqual(expected_progress, migration_progress) mock_info_log.assert_called_once() @ddt.data({'state': 'failed'}, {'state': 'healthy'}) @ddt.unpack def test_migration_cancel(self, state): source_snapshots = mock.Mock() snapshot_mappings = mock.Mock() self.library.configuration.netapp_migration_cancel_timeout = 15 mock_info_log = self.mock_object(lib_base.LOG, 'info') self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, mock.Mock()))) self.mock_object(self.library, '_get_backend_share_name', mock.Mock(return_value=fake.SHARE_NAME)) self.mock_object(self.client, 'abort_volume_move') self.mock_object(self.client, 'get_volume_move_status', mock.Mock(return_value={'state': state})) if state == 'failed': retval = self.library.migration_cancel( self.context, fake_share.fake_share_instance(), fake_share.fake_share_instance(), source_snapshots, snapshot_mappings, share_server=fake.SHARE_SERVER, destination_share_server='dst_srv') self.assertIsNone(retval) mock_info_log.assert_called_once() else: self.assertRaises( (exception.NetAppException), self.library.migration_cancel, self.context, fake_share.fake_share_instance(), fake_share.fake_share_instance, source_snapshots, snapshot_mappings, share_server=fake.SHARE_SERVER, destination_share_server='dst_srv') @ddt.data({'already_canceled': True, 'effect': exception.NetAppException}, {'already_canceled': False, 'effect': (None, exception.NetAppException)}) @ddt.unpack def test_migration_cancel_exception_volume_status(self, already_canceled, effect): source_snapshots = mock.Mock() snapshot_mappings = mock.Mock() self.library.configuration.netapp_migration_cancel_timeout = 1 mock_exception_log = self.mock_object(lib_base.LOG, 'exception') mock_info_log = self.mock_object(lib_base.LOG, 'info') self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, mock.Mock()))) self.mock_object(self.library, '_get_backend_share_name', mock.Mock(return_value=fake.SHARE_NAME)) self.mock_object(self.client, 'abort_volume_move') self.mock_object(self.client, 'get_volume_move_status', mock.Mock(side_effect=effect)) self.library.migration_cancel( self.context, fake_share.fake_share_instance(), fake_share.fake_share_instance(), source_snapshots, snapshot_mappings, share_server=fake.SHARE_SERVER, destination_share_server='dst_srv') mock_exception_log.assert_called_once() if not already_canceled: mock_info_log.assert_called_once() self.assertEqual(not already_canceled, self.client.abort_volume_move.called) def test_migration_complete_invalid_phase(self): source_snapshots = mock.Mock() snapshot_mappings = mock.Mock() status = { 'state': 'healthy', 'phase': 'Replicating', 'details': 'Replicating:: Volume move operation is in progress.', } mock_exception_log = self.mock_object(lib_base.LOG, 'exception') vserver_client = mock.Mock() self.mock_object( self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_client))) self.mock_object( self.library, '_get_backend_share_name', mock.Mock(side_effect=[fake.SHARE_NAME, 'new_share_name'])) self.mock_object(self.library, '_get_volume_move_status', mock.Mock(return_value=status)) self.mock_object(self.library, '_create_export') self.assertRaises( exception.NetAppException, self.library.migration_complete, self.context, fake_share.fake_share_instance(), fake_share.fake_share_instance, source_snapshots, snapshot_mappings, share_server=fake.SHARE_SERVER, destination_share_server='dst_srv') self.assertFalse(vserver_client.set_volume_name.called) self.assertFalse(self.library._create_export.called) mock_exception_log.assert_called_once() def test_migration_complete_timeout(self): source_snapshots = mock.Mock() snapshot_mappings = mock.Mock() self.library.configuration.netapp_volume_move_cutover_timeout = 15 vol_move_side_effects = [ {'phase': 'cutover_hard_deferred'}, {'phase': 'Cutover'}, {'phase': 'Finishing'}, {'phase': 'Finishing'}, ] self.mock_object(time, 'sleep') mock_warning_log = self.mock_object(lib_base.LOG, 'warning') vserver_client = mock.Mock() self.mock_object( self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_client))) self.mock_object( self.library, '_get_backend_share_name', mock.Mock(side_effect=[fake.SHARE_NAME, 'new_share_name'])) self.mock_object(self.library, '_get_volume_move_status', mock.Mock( side_effect=vol_move_side_effects)) self.mock_object(self.library, '_create_export') src_share = fake_share.fake_share_instance(id='source-share-instance') dest_share = fake_share.fake_share_instance(id='dest-share-instance') self.assertRaises( exception.NetAppException, self.library.migration_complete, self.context, src_share, dest_share, source_snapshots, snapshot_mappings, share_server=fake.SHARE_SERVER, destination_share_server='dst_srv') self.assertFalse(vserver_client.set_volume_name.called) self.assertFalse(self.library._create_export.called) self.assertEqual(3, mock_warning_log.call_count) @ddt.data({'phase': 'cutover_hard_deferred', 'provisioning_options': fake.PROVISIONING_OPTIONS_WITH_QOS, 'policy_group_name': fake.QOS_POLICY_GROUP_NAME}, {'phase': 'cutover_soft_deferred', 'provisioning_options': fake.PROVISIONING_OPTIONS_WITH_QOS, 'policy_group_name': fake.QOS_POLICY_GROUP_NAME}, {'phase': 'completed', 'provisioning_options': fake.PROVISIONING_OPTIONS, 'policy_group_name': False}, {'phase': 'completed', 'provisioning_options': fake.PROVISIONING_OPTIONS_WITH_FPOLICY, 'policy_group_name': False}) @ddt.unpack def test_migration_complete(self, phase, provisioning_options, policy_group_name): snap = fake_share.fake_snapshot_instance( id='src-snapshot', provider_location='test-src-provider-location') dest_snap = fake_share.fake_snapshot_instance(id='dest-snapshot', as_primitive=True) extra_specs = copy.deepcopy(fake.EXTRA_SPEC) source_snapshots = [snap] snapshot_mappings = {snap['id']: dest_snap} self.library.configuration.netapp_volume_move_cutover_timeout = 15 vol_move_side_effects = [ {'phase': phase}, {'phase': 'Cutover'}, {'phase': 'Finishing'}, {'phase': 'completed'}, ] self.mock_object(time, 'sleep') mock_debug_log = self.mock_object(lib_base.LOG, 'debug') mock_info_log = self.mock_object(lib_base.LOG, 'info') mock_warning_log = self.mock_object(lib_base.LOG, 'warning') vserver_client = mock.Mock() self.mock_object( self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_client))) self.mock_object( self.library, '_get_backend_share_name', mock.Mock(side_effect=[fake.SHARE_NAME, 'new_share_name'])) self.mock_object(self.library, '_create_export', mock.Mock( return_value=fake.NFS_EXPORTS)) mock_move_status_check = self.mock_object( self.library, '_get_volume_move_status', mock.Mock(side_effect=vol_move_side_effects)) self.mock_object(share_types, 'get_extra_specs_from_share', mock.Mock(return_value=extra_specs)) self.mock_object(self.library, '_check_fpolicy_file_operations') self.mock_object( self.library, '_get_provisioning_options', mock.Mock(return_value=provisioning_options)) self.mock_object( self.library, '_modify_or_create_qos_for_existing_share', mock.Mock(return_value=policy_group_name)) mock_get_volume_snapshot_attributes = self.mock_object( vserver_client, 'get_volume_snapshot_attributes', mock.Mock(return_value={'snapshot-policy': 'fake_policy'})) self.mock_object(vserver_client, 'modify_volume') mock_create_new_fpolicy = self.mock_object( self.library, '_create_fpolicy_for_share') mock_delete_policy = self.mock_object(self.library, '_delete_fpolicy_for_share') src_share = fake_share.fake_share_instance(id='source-share-instance') dest_share = fake_share.fake_share_instance(id='dest-share-instance') dest_aggr = share_utils.extract_host(dest_share['host'], level='pool') data_updates = self.library.migration_complete( self.context, src_share, dest_share, source_snapshots, snapshot_mappings, share_server=fake.SHARE_SERVER, destination_share_server='dst_srv') self.assertEqual(fake.NFS_EXPORTS, data_updates['export_locations']) expected_dest_snap_updates = { 'provider_location': snap['provider_location'], } self.assertIn(dest_snap['id'], data_updates['snapshot_updates']) self.assertEqual(expected_dest_snap_updates, data_updates['snapshot_updates'][dest_snap['id']]) vserver_client.set_volume_name.assert_called_once_with( fake.SHARE_NAME, 'new_share_name') self.library._create_export.assert_called_once_with( dest_share, fake.SHARE_SERVER, fake.VSERVER1, vserver_client, clear_current_export_policy=False) mock_get_volume_snapshot_attributes.assert_called_once_with( 'new_share_name') vserver_client.modify_volume.assert_called_once_with( dest_aggr, 'new_share_name', **provisioning_options) mock_info_log.assert_called_once() mock_delete_policy.assert_called_once_with(src_share, fake.VSERVER1, vserver_client) if phase != 'completed': self.assertEqual(2, mock_warning_log.call_count) self.assertFalse(mock_debug_log.called) self.assertEqual(4, mock_move_status_check.call_count) else: self.assertFalse(mock_warning_log.called) mock_debug_log.assert_called_once() mock_move_status_check.assert_called_once() if provisioning_options.get( 'fpolicy_extensions_to_include') is not None: mock_create_new_fpolicy.assert_called_once_with( dest_share, fake.VSERVER1, vserver_client, **provisioning_options) else: mock_create_new_fpolicy.assert_not_called() def test_modify_or_create_qos_for_existing_share_no_qos_extra_specs(self): vserver_client = mock.Mock() self.mock_object(self.library, '_get_backend_qos_policy_group_name') self.mock_object(vserver_client, 'get_volume') self.mock_object(self.library, '_create_qos_policy_group') retval = self.library._modify_or_create_qos_for_existing_share( fake.SHARE, fake.EXTRA_SPEC, fake.VSERVER1, vserver_client) self.assertIsNone(retval) self.library._get_backend_qos_policy_group_name.assert_not_called() vserver_client.get_volume.assert_not_called() self.library._create_qos_policy_group.assert_not_called() def test_modify_or_create_qos_for_existing_share_no_existing_qos(self): vserver_client = mock.Mock() self.mock_object(self.library, '_get_backend_qos_policy_group_name') self.mock_object(vserver_client, 'get_volume', mock.Mock(return_value=fake.FLEXVOL_WITHOUT_QOS)) self.mock_object(self.library, '_create_qos_policy_group') self.mock_object(self.library._client, 'qos_policy_group_modify') qos_policy_name = self.library._get_backend_qos_policy_group_name( fake.SHARE['id']) retval = self.library._modify_or_create_qos_for_existing_share( fake.SHARE, fake.EXTRA_SPEC_WITH_QOS, fake.VSERVER1, vserver_client) share_obj = { 'size': 2, 'id': fake.SHARE['id'], } self.assertEqual(qos_policy_name, retval) self.library._client.qos_policy_group_modify.assert_not_called() self.library._create_qos_policy_group.assert_called_once_with( share_obj, fake.VSERVER1, {'maxiops': '3000'}, vserver_client=vserver_client) @ddt.data(utils.annotated('volume_has_shared_qos_policy', (2, False, )), utils.annotated('volume_has_shared_qos_policy_iops_change', (2, True, )), utils.annotated('volume_has_nonshared_qos_policy', (1, False, )), utils.annotated('volume_has_nonshared_qos_policy_iops_change', (1, True, ))) @ddt.unpack def test_modify_or_create_qos_for_existing_share(self, num_workloads, qos_iops_change): vserver_client = mock.Mock() qos_policy = copy.deepcopy(fake.QOS_POLICY_GROUP) qos_policy['num-workloads'] = num_workloads extra_specs = copy.deepcopy(fake.EXTRA_SPEC_WITH_QOS) expected_iops = '3000' if qos_iops_change: expected_iops = '4000' extra_specs[fake.QOS_EXTRA_SPEC] = expected_iops self.mock_object(vserver_client, 'get_volume', mock.Mock(return_value=fake.FLEXVOL_WITH_QOS)) self.mock_object(self.library._client, 'qos_policy_group_get', mock.Mock(return_value=qos_policy)) mock_qos_policy_modify = self.mock_object( self.library._client, 'qos_policy_group_modify') mock_qos_policy_rename = self.mock_object( self.library._client, 'qos_policy_group_rename') mock_create_qos_policy = self.mock_object( self.library, '_create_qos_policy_group') new_qos_policy_name = self.library._get_backend_qos_policy_group_name( fake.SHARE['id']) retval = self.library._modify_or_create_qos_for_existing_share( fake.SHARE, extra_specs, fake.VSERVER1, vserver_client) self.assertEqual(new_qos_policy_name, retval) if num_workloads == 1: mock_create_qos_policy.assert_not_called() if qos_iops_change: mock_qos_policy_modify.assert_called_once_with( fake.QOS_POLICY_GROUP_NAME, expected_iops + 'iops') else: mock_qos_policy_modify.assert_not_called() mock_qos_policy_rename.assert_called_once_with( fake.QOS_POLICY_GROUP_NAME, new_qos_policy_name) else: share_obj = { 'size': 2, 'id': fake.SHARE['id'], } mock_create_qos_policy.assert_called_once_with( share_obj, fake.VSERVER1, {'maxiops': expected_iops}, vserver_client=vserver_client) self.library._client.qos_policy_group_modify.assert_not_called() self.library._client.qos_policy_group_rename.assert_not_called() @ddt.data(('host', True), ('pool', False), (None, False), ('fake', False)) @ddt.unpack def test__is_group_cg(self, css, is_cg): share_group = mock.Mock() share_group.consistent_snapshot_support = css self.assertEqual(is_cg, self.library._is_group_cg(self.context, share_group)) def test_create_group_snapshot_cg(self): share_group = mock.Mock() share_group.consistent_snapshot_support = 'host' snap_dict = {'share_group': share_group} fallback_create = mock.Mock() mock_create_cgsnapshot = self.mock_object(self.library, 'create_cgsnapshot') self.library.create_group_snapshot(self.context, snap_dict, fallback_create, share_server=fake.SHARE_SERVER) mock_create_cgsnapshot.assert_called_once_with( self.context, snap_dict, share_server=fake.SHARE_SERVER) fallback_create.assert_not_called() @ddt.data('pool', None, 'fake') def test_create_group_snapshot_fallback(self, css): share_group = mock.Mock() share_group.consistent_snapshot_support = css snap_dict = {'share_group': share_group} fallback_create = mock.Mock() mock_create_cgsnapshot = self.mock_object(self.library, 'create_cgsnapshot') self.library.create_group_snapshot(self.context, snap_dict, fallback_create, share_server=fake.SHARE_SERVER) mock_create_cgsnapshot.assert_not_called() fallback_create.assert_called_once_with(self.context, snap_dict, share_server=fake.SHARE_SERVER) def test_delete_group_snapshot_cg(self): share_group = mock.Mock() share_group.consistent_snapshot_support = 'host' snap_dict = {'share_group': share_group} fallback_delete = mock.Mock() mock_delete_cgsnapshot = self.mock_object(self.library, 'delete_cgsnapshot') self.library.delete_group_snapshot(self.context, snap_dict, fallback_delete, share_server=fake.SHARE_SERVER) mock_delete_cgsnapshot.assert_called_once_with( self.context, snap_dict, share_server=fake.SHARE_SERVER) fallback_delete.assert_not_called() @ddt.data('pool', None, 'fake') def test_delete_group_snapshot_fallback(self, css): share_group = mock.Mock() share_group.consistent_snapshot_support = css snap_dict = {'share_group': share_group} fallback_delete = mock.Mock() mock_delete_cgsnapshot = self.mock_object(self.library, 'delete_cgsnapshot') self.library.delete_group_snapshot(self.context, snap_dict, fallback_delete, share_server=fake.SHARE_SERVER) mock_delete_cgsnapshot.assert_not_called() fallback_delete.assert_called_once_with(self.context, snap_dict, share_server=fake.SHARE_SERVER) def test_create_group_from_snapshot_cg(self): share_group = mock.Mock() share_group.consistent_snapshot_support = 'host' snap_dict = {'share_group': share_group} fallback_create = mock.Mock() mock_create_cg_from_snapshot = self.mock_object( self.library, 'create_consistency_group_from_cgsnapshot') self.library.create_group_from_snapshot(self.context, share_group, snap_dict, fallback_create, share_server=fake.SHARE_SERVER) mock_create_cg_from_snapshot.assert_called_once_with( self.context, share_group, snap_dict, share_server=fake.SHARE_SERVER) fallback_create.assert_not_called() @ddt.data('pool', None, 'fake') def test_create_group_from_snapshot_fallback(self, css): share_group = mock.Mock() share_group.consistent_snapshot_support = css snap_dict = {'share_group': share_group} fallback_create = mock.Mock() mock_create_cg_from_snapshot = self.mock_object( self.library, 'create_consistency_group_from_cgsnapshot') self.library.create_group_from_snapshot(self.context, share_group, snap_dict, fallback_create, share_server=fake.SHARE_SERVER) mock_create_cg_from_snapshot.assert_not_called() fallback_create.assert_called_once_with(self.context, share_group, snap_dict, share_server=fake.SHARE_SERVER) @ddt.data('default', 'hidden', 'visible') def test_get_backend_info(self, snapdir): self.library.configuration.netapp_reset_snapdir_visibility = snapdir expected = {'snapdir_visibility': snapdir} result = self.library.get_backend_info(self.context) self.assertEqual(expected, result) @ddt.data('default', 'hidden') def test_ensure_shares(self, snapdir_cfg): shares = [ fake_share.fake_share_instance(id='s-1', share_server='fake_server_1'), fake_share.fake_share_instance(id='s-2', share_server='fake_server_2'), fake_share.fake_share_instance(id='s-3', share_server='fake_server_2') ] vserver_client = mock.Mock() self.mock_object( self.library, '_get_vserver', mock.Mock(side_effect=[ (fake.VSERVER1, vserver_client), (fake.VSERVER2, vserver_client), (fake.VSERVER2, vserver_client) ])) (self.library.configuration. netapp_reset_snapdir_visibility) = snapdir_cfg self.library.ensure_shares(self.context, shares) if snapdir_cfg == 'default': self.library._get_vserver.assert_not_called() vserver_client.set_volume_snapdir_access.assert_not_called() else: self.library._get_vserver.assert_has_calls([ mock.call(share_server='fake_server_1'), mock.call(share_server='fake_server_2'), mock.call(share_server='fake_server_2'), ]) vserver_client.set_volume_snapdir_access.assert_has_calls([ mock.call('share_s_1', True), mock.call('share_s_2', True), mock.call('share_s_3', True), ]) def test__check_volume_clone_split_completed(self): vserver_client = mock.Mock() mock_share_name = self.mock_object( self.library, '_get_backend_share_name', mock.Mock(return_value=fake.SHARE_NAME)) vserver_client.check_volume_clone_split_completed.return_value = ( fake.CDOT_SNAPSHOT_BUSY_SNAPMIRROR) self.library._check_volume_clone_split_completed(fake.SHARE, vserver_client) mock_share_name.assert_called_once_with(fake.SHARE_ID) check_call = vserver_client.check_volume_clone_split_completed check_call.assert_called_once_with(fake.SHARE_NAME) @ddt.data(constants.STATUS_ACTIVE, constants.STATUS_CREATING_FROM_SNAPSHOT) def test_get_share_status(self, status): mock_update_from_snap = self.mock_object( self.library, '_update_create_from_snapshot_status') fake.SHARE['status'] = status self.library.get_share_status(fake.SHARE, fake.SHARE_SERVER) if status == constants.STATUS_CREATING_FROM_SNAPSHOT: mock_update_from_snap.assert_called_once_with(fake.SHARE, fake.SHARE_SERVER) else: mock_update_from_snap.assert_not_called() def test_volume_rehost(self): mock_share_name = self.mock_object( self.library, '_get_backend_share_name', mock.Mock(return_value=fake.SHARE_NAME)) mock_rehost = self.mock_object(self.client, 'rehost_volume') self.library.volume_rehost(fake.SHARE, fake.VSERVER1, fake.VSERVER2) mock_share_name.assert_called_once_with(fake.SHARE_ID) mock_rehost.assert_called_once_with(fake.SHARE_NAME, fake.VSERVER1, fake.VSERVER2) def test__rehost_and_mount_volume(self): mock_share_name = self.mock_object( self.library, '_get_backend_share_name', mock.Mock(return_value=fake.SHARE_NAME)) mock_rehost = self.mock_object(self.library, 'volume_rehost', mock.Mock()) src_vserver_client = mock.Mock() mock_unmount = self.mock_object(src_vserver_client, 'unmount_volume') dst_vserver_client = mock.Mock() mock_mount = self.mock_object(dst_vserver_client, 'mount_volume') self.library._rehost_and_mount_volume( fake.SHARE, fake.VSERVER1, src_vserver_client, fake.VSERVER2, dst_vserver_client) mock_share_name.assert_called_once_with(fake.SHARE_ID) mock_unmount.assert_called_once_with(fake.SHARE_NAME) mock_rehost.assert_called_once_with(fake.SHARE, fake.VSERVER1, fake.VSERVER2) mock_mount.assert_called_once_with(fake.SHARE_NAME, fake.MOUNT_POINT_NAME) def test__move_volume_after_splitting(self): src_share = fake_share.fake_share_instance(id='source-share-instance') dest_share = fake_share.fake_share_instance(id='dest-share-instance') cutover_action = 'defer' self.library.configuration.netapp_start_volume_move_timeout = 15 self.mock_object(time, 'sleep') mock_warning_log = self.mock_object(lib_base.LOG, 'warning') mock_vol_move = self.mock_object(self.library, '_move_volume') self.library._move_volume_after_splitting( src_share, dest_share, share_server=fake.SHARE_SERVER, cutover_action=cutover_action) mock_vol_move.assert_called_once_with(src_share, dest_share, fake.SHARE_SERVER, cutover_action) self.assertEqual(0, mock_warning_log.call_count) def test__move_volume_after_splitting_timeout(self): src_share = fake_share.fake_share_instance(id='source-share-instance') dest_share = fake_share.fake_share_instance(id='dest-share-instance') self.library.configuration.netapp_start_volume_move_timeout = 15 cutover_action = 'defer' self.mock_object(time, 'sleep') mock_warning_log = self.mock_object(lib_base.LOG, 'warning') undergoing_split_op_msg = ( 'The volume is undergoing a clone split operation.') na_api_error = netapp_api.NaApiError(code=netapp_api.EAPIERROR, message=undergoing_split_op_msg) mock_move_vol = self.mock_object( self.library, '_move_volume', mock.Mock(side_effect=na_api_error)) self.assertRaises(exception.NetAppException, self.library._move_volume_after_splitting, src_share, dest_share, share_server=fake.SHARE_SERVER, cutover_action=cutover_action) self.assertEqual(3, mock_move_vol.call_count) self.assertEqual(3, mock_warning_log.call_count) def test__move_volume_after_splitting_api_not_found(self): src_share = fake_share.fake_share_instance(id='source-share-instance') dest_share = fake_share.fake_share_instance(id='dest-share-instance') self.library.configuration.netapp_start_volume_move_timeout = 15 cutover_action = 'defer' self.mock_object(time, 'sleep') mock_warning_log = self.mock_object(lib_base.LOG, 'warning') na_api_error = netapp_api.NaApiError(code=netapp_api.EOBJECTNOTFOUND) mock_move_vol = self.mock_object( self.library, '_move_volume', mock.Mock(side_effect=na_api_error)) self.assertRaises(exception.NetAppException, self.library._move_volume_after_splitting, src_share, dest_share, share_server=fake.SHARE_SERVER, cutover_action=cutover_action) mock_move_vol.assert_called_once_with(src_share, dest_share, fake.SHARE_SERVER, cutover_action) mock_warning_log.assert_not_called() @ddt.data({'total': 20, 'free': 5, 'reserved': 10, 'thin': False, 'over_sub': 0, 'size': 3, 'compatible': True, 'nb_pools': 1}, {'total': 20, 'free': 5, 'reserved': 10, 'thin': False, 'over_sub': 0, 'size': 4, 'compatible': False, 'nb_pools': 1}, {'total': 20, 'free': 5, 'reserved': 20, 'thin': False, 'over_sub': 1.1, 'size': 3, 'compatible': False, 'nb_pools': 1}, {'total': 20, 'free': 5, 'reserved': 10, 'thin': True, 'over_sub': 2.0, 'size': 6, 'compatible': True, 'nb_pools': 1}, {'total': 20, 'free': 5, 'reserved': 10, 'thin': True, 'over_sub': 1.0, 'size': 4, 'compatible': False, 'nb_pools': 1}, {'total': 'unknown', 'free': 5, 'reserved': 0, 'thin': False, 'over_sub': 3.0, 'size': 1, 'compatible': False, 'nb_pools': 1}, {'total': 20, 'free': 5, 'reserved': 10, 'thin': True, 'over_sub': 1.0, 'size': 6, 'compatible': True, 'nb_pools': 2}, {'total': 20, 'free': 5, 'reserved': 10, 'thin': True, 'over_sub': 1.0, 'size': 7, 'compatible': False, 'nb_pools': 2}, ) @ddt.unpack def test__check_capacity_compatibility(self, total, free, reserved, thin, over_sub, size, compatible, nb_pools): pools = [] for p in range(nb_pools): pool = copy.deepcopy(fake.POOLS[0]) pool['total_capacity_gb'] = total pool['free_capacity_gb'] = free pool['reserved_percentage'] = reserved pool['max_over_subscription_ratio'] = over_sub pools.append(pool) result = self.library._check_capacity_compatibility(pools, thin, size) self.assertEqual(compatible, result) @ddt.data({'provisioning_opts': fake.PROVISIONING_OPTS_WITH_ADAPT_QOS, 'qos_specs': {fake.QOS_NORMALIZED_SPEC: 3000}, 'extra_specs': None, 'cluster_credentials': True}, {'provisioning_opts': fake.PROVISIONING_OPTS_WITH_ADAPT_QOS, 'qos_specs': None, 'extra_specs': fake.EXTRA_SPEC_WITH_REPLICATION, 'cluster_credentials': True}, {'provisioning_opts': fake.PROVISIONING_OPTIONS, 'qos_specs': {fake.QOS_NORMALIZED_SPEC: 3000}, 'extra_specs': None, 'cluster_credentials': False}, {'provisioning_opts': fake.PROVISIONING_OPTS_WITH_ADAPT_QOS, 'qos_specs': None, 'extra_specs': None, 'cluster_credentials': False}, {'provisioning_opts': fake.PROVISIONING_OPTIONS_INVALID_FPOLICY, 'qos_specs': None, 'extra_specs': None, 'cluster_credentials': False}, {'provisioning_opts': fake.PROVISIONING_OPTIONS_WITH_FPOLICY, 'qos_specs': None, 'extra_specs': {'replication_type': 'dr'}, 'cluster_credentials': False} ) @ddt.unpack def test_validate_provisioning_options_for_share_invalid_params( self, provisioning_opts, qos_specs, extra_specs, cluster_credentials): self.library._have_cluster_creds = cluster_credentials self.assertRaises(exception.NetAppException, self.library.validate_provisioning_options_for_share, provisioning_opts, extra_specs=extra_specs, qos_specs=qos_specs) def test__get_backend_fpolicy_policy_name(self): result = self.library._get_backend_fpolicy_policy_name( fake.SHARE_ID) expected = 'fpolicy_policy_' + fake.SHARE_ID.replace('-', '_') self.assertEqual(expected, result) def test__get_backend_fpolicy_event_name(self): result = self.library._get_backend_fpolicy_event_name( fake.SHARE_ID, 'NFS') expected = 'fpolicy_event_nfs_' + fake.SHARE_ID.replace('-', '_') self.assertEqual(expected, result) @ddt.data({}, {'policy-name': fake.FPOLICY_POLICY_NAME, 'shares-to-include': [fake.SHARE_NAME]}) def test__create_fpolicy_for_share(self, reusable_scope): vserver_client = mock.Mock() vserver_name = fake.VSERVER1 new_fake_share = copy.deepcopy(fake.SHARE) new_fake_share['id'] = 'new_fake_id' new_fake_share['share_proto'] = 'CIFS' event_name = 'fpolicy_event_cifs_new_fake_id' events = [event_name] policy_name = 'fpolicy_policy_new_fake_id' shares_to_include = [] if reusable_scope: shares_to_include = copy.deepcopy( reusable_scope.get('shares-to-include')) shares_to_include.append('share_new_fake_id') mock_reusable_scope = self.mock_object( self.library, '_find_reusable_fpolicy_scope', mock.Mock(return_value=reusable_scope)) mock_modify_policy = self.mock_object( vserver_client, 'modify_fpolicy_scope') mock_get_policies = self.mock_object( vserver_client, 'get_fpolicy_policies_status', mock.Mock(return_value=[])) mock_create_event = self.mock_object( vserver_client, 'create_fpolicy_event') mock_enable_fpolicy = self.mock_object( vserver_client, 'enable_fpolicy_policy') mock_create_fpolicy_policy_with_scope = self.mock_object( vserver_client, 'create_fpolicy_policy_with_scope') self.library._create_fpolicy_for_share( new_fake_share, vserver_name, vserver_client, fpolicy_extensions_to_include=fake.FPOLICY_EXT_TO_INCLUDE, fpolicy_extensions_to_exclude=fake.FPOLICY_EXT_TO_EXCLUDE, fpolicy_file_operations=fake.FPOLICY_FILE_OPERATIONS) mock_reusable_scope.assert_called_once_with( new_fake_share, vserver_client, fpolicy_extensions_to_include=fake.FPOLICY_EXT_TO_INCLUDE, fpolicy_extensions_to_exclude=fake.FPOLICY_EXT_TO_EXCLUDE, fpolicy_file_operations=fake.FPOLICY_FILE_OPERATIONS) if reusable_scope: mock_modify_policy.assert_called_once_with( 'share_new_fake_id', fake.FPOLICY_POLICY_NAME, shares_to_include=shares_to_include) mock_get_policies.assert_not_called() mock_create_event.assert_not_called() mock_create_fpolicy_policy_with_scope.assert_not_called() mock_enable_fpolicy.assert_not_called() else: mock_modify_policy.assert_not_called() mock_get_policies.assert_called_once() mock_create_event.assert_called_once_with( 'share_new_fake_id', event_name, new_fake_share['share_proto'].lower(), fake.FPOLICY_FILE_OPERATIONS_LIST) mock_create_fpolicy_policy_with_scope.assert_called_once_with( policy_name, 'share_new_fake_id', events, extensions_to_include=fake.FPOLICY_EXT_TO_INCLUDE, extensions_to_exclude=fake.FPOLICY_EXT_TO_EXCLUDE ) mock_enable_fpolicy.assert_called_once_with( 'share_new_fake_id', policy_name, 1) def test__create_fpolicy_for_share_max_policies_error(self): fake_client = mock.Mock() vserver_name = fake.VSERVER1 mock_reusable_scope = self.mock_object( self.library, '_find_reusable_fpolicy_scope', mock.Mock(return_value=None)) policies = [ x for x in range(1, self.library.FPOLICY_MAX_VSERVER_POLICIES + 1)] mock_get_policies = self.mock_object( fake_client, 'get_fpolicy_policies_status', mock.Mock(return_value=policies)) self.assertRaises( exception.NetAppException, self.library._create_fpolicy_for_share, fake.SHARE, vserver_name, fake_client, fpolicy_extensions_to_include=fake.FPOLICY_EXT_TO_INCLUDE, fpolicy_extensions_to_exclude=fake.FPOLICY_EXT_TO_EXCLUDE, fpolicy_file_operations=fake.FPOLICY_FILE_OPERATIONS) mock_reusable_scope.assert_called_once_with( fake.SHARE, fake_client, fpolicy_extensions_to_include=fake.FPOLICY_EXT_TO_INCLUDE, fpolicy_extensions_to_exclude=fake.FPOLICY_EXT_TO_EXCLUDE, fpolicy_file_operations=fake.FPOLICY_FILE_OPERATIONS) mock_get_policies.assert_called_once() def test__create_fpolicy_for_share_client_error(self): fake_client = mock.Mock() vserver_name = fake.VSERVER1 new_fake_share = copy.deepcopy(fake.SHARE) new_fake_share['id'] = 'new_fake_id' new_fake_share['share_proto'] = 'CIFS' event_name = 'fpolicy_event_cifs_new_fake_id' events = [event_name] policy_name = 'fpolicy_policy_new_fake_id' mock_reusable_scope = self.mock_object( self.library, '_find_reusable_fpolicy_scope', mock.Mock(return_value=None)) mock_get_policies = self.mock_object( fake_client, 'get_fpolicy_policies_status', mock.Mock(return_value=[])) mock_create_event = self.mock_object( fake_client, 'create_fpolicy_event') mock_create_fpolicy_policy_with_scope = self.mock_object( fake_client, 'create_fpolicy_policy_with_scope', mock.Mock(side_effect=self._mock_api_error())) mock_delete_fpolicy = self.mock_object( fake_client, 'delete_fpolicy_policy') mock_delete_event = self.mock_object( fake_client, 'delete_fpolicy_event') self.assertRaises( exception.NetAppException, self.library._create_fpolicy_for_share, new_fake_share, vserver_name, fake_client, fpolicy_extensions_to_include=fake.FPOLICY_EXT_TO_INCLUDE, fpolicy_extensions_to_exclude=fake.FPOLICY_EXT_TO_EXCLUDE, fpolicy_file_operations=fake.FPOLICY_FILE_OPERATIONS) mock_reusable_scope.assert_called_once_with( new_fake_share, fake_client, fpolicy_extensions_to_include=fake.FPOLICY_EXT_TO_INCLUDE, fpolicy_extensions_to_exclude=fake.FPOLICY_EXT_TO_EXCLUDE, fpolicy_file_operations=fake.FPOLICY_FILE_OPERATIONS) mock_get_policies.assert_called_once() mock_create_event.assert_called_once_with( 'share_new_fake_id', event_name, new_fake_share['share_proto'].lower(), fake.FPOLICY_FILE_OPERATIONS_LIST) mock_create_fpolicy_policy_with_scope.assert_called_once_with( policy_name, 'share_new_fake_id', events, extensions_to_include=fake.FPOLICY_EXT_TO_INCLUDE, extensions_to_exclude=fake.FPOLICY_EXT_TO_EXCLUDE) mock_delete_fpolicy.assert_called_once_with( 'share_new_fake_id', policy_name) mock_delete_event.assert_called_once_with( 'share_new_fake_id', event_name) def test__find_reusable_fpolicy_scope(self): vserver_client = mock.Mock() new_fake_share = copy.deepcopy(fake.SHARE) new_fake_share['share_proto'] = 'CIFS' reusable_scopes = [{ 'policy-name': fake.FPOLICY_POLICY_NAME, 'file-extensions-to-include': fake.FPOLICY_EXT_TO_INCLUDE_LIST, 'file-extensions-to-exclude': fake.FPOLICY_EXT_TO_EXCLUDE_LIST, 'shares-to-include': ['any_other_fake_share'], }] reusable_policies = [{ 'policy-name': fake.FPOLICY_POLICY_NAME, 'engine-name': fake.FPOLICY_ENGINE, 'events': [fake.FPOLICY_EVENT_NAME] }] reusable_events = [{ 'event-name': fake.FPOLICY_EVENT_NAME, 'protocol': new_fake_share['share_proto'].lower(), 'file-operations': fake.FPOLICY_FILE_OPERATIONS_LIST }] mock_get_scopes = self.mock_object( vserver_client, 'get_fpolicy_scopes', mock.Mock(return_value=reusable_scopes)) mock_get_policies = self.mock_object( vserver_client, 'get_fpolicy_policies', mock.Mock(return_value=reusable_policies)) mocke_get_events = self.mock_object( vserver_client, 'get_fpolicy_events', mock.Mock(return_value=reusable_events) ) result = self.library._find_reusable_fpolicy_scope( new_fake_share, vserver_client, fpolicy_extensions_to_include=fake.FPOLICY_EXT_TO_INCLUDE, fpolicy_extensions_to_exclude=fake.FPOLICY_EXT_TO_EXCLUDE, fpolicy_file_operations=fake.FPOLICY_FILE_OPERATIONS) self.assertEqual(reusable_scopes[0], result) mock_get_scopes.assert_called_once_with( share_name=fake.SHARE_NAME, extensions_to_include=fake.FPOLICY_EXT_TO_INCLUDE, extensions_to_exclude=fake.FPOLICY_EXT_TO_EXCLUDE, shares_to_include=None) mock_get_policies.assert_called_once_with( share_name=fake.SHARE_NAME, policy_name=fake.FPOLICY_POLICY_NAME) mocke_get_events.assert_called_once_with( share_name=fake.SHARE_NAME, event_name=fake.FPOLICY_EVENT_NAME) @ddt.data(False, True) def test__delete_fpolicy_for_share(self, last_share): fake_vserver_client = mock.Mock() fake_vserver_name = fake.VSERVER1 fake_share = copy.deepcopy(fake.SHARE) share_name = self.library._get_backend_share_name(fake.SHARE_ID) existing_shares = [share_name] if not last_share: existing_shares.append('any_other_share') scopes = [{ 'policy-name': fake.FPOLICY_POLICY_NAME, 'file-extensions-to-include': fake.FPOLICY_EXT_TO_INCLUDE_LIST, 'file-extensions-to-exclude': fake.FPOLICY_EXT_TO_EXCLUDE_LIST, 'shares-to-include': existing_shares, }] shares_to_include = copy.copy(scopes[0].get('shares-to-include')) shares_to_include.remove(share_name) policies = [{ 'policy-name': fake.FPOLICY_POLICY_NAME, 'engine-name': fake.FPOLICY_ENGINE, 'events': [fake.FPOLICY_EVENT_NAME] }] mock_get_scopes = self.mock_object( fake_vserver_client, 'get_fpolicy_scopes', mock.Mock(return_value=scopes)) mock_modify_scope = self.mock_object( fake_vserver_client, 'modify_fpolicy_scope') mock_disable_policy = self.mock_object( fake_vserver_client, 'disable_fpolicy_policy') mock_get_policies = self.mock_object( fake_vserver_client, 'get_fpolicy_policies', mock.Mock(return_value=policies)) mock_delete_scope = self.mock_object( fake_vserver_client, 'delete_fpolicy_scope') mock_delete_policy = self.mock_object( fake_vserver_client, 'delete_fpolicy_policy') mock_delete_event = self.mock_object( fake_vserver_client, 'delete_fpolicy_event') self.library._delete_fpolicy_for_share(fake_share, fake_vserver_name, fake_vserver_client) mock_get_scopes.assert_called_once_with( share_name=fake.SHARE_NAME, shares_to_include=[share_name]) if shares_to_include: mock_modify_scope.assert_called_once_with( fake.SHARE_NAME, fake.FPOLICY_POLICY_NAME, shares_to_include=shares_to_include) else: mock_disable_policy.assert_called_once_with( fake.FPOLICY_POLICY_NAME) mock_get_policies.assert_called_once_with( share_name=fake.SHARE_NAME, policy_name=fake.FPOLICY_POLICY_NAME) mock_delete_scope.assert_called_once_with( fake.FPOLICY_POLICY_NAME) mock_delete_policy.assert_called_once_with( fake.SHARE_NAME, fake.FPOLICY_POLICY_NAME) mock_delete_event.assert_called_once_with( fake.FPOLICY_EVENT_NAME) @ddt.data(True, False) def test_initialize_flexgroup_pools(self, auto_provision): self.library.configuration.netapp_enable_flexgroup = True pool = None if auto_provision else [fake.FLEXGROUP_POOL_OPT_RAW] mock_safe_get = self.mock_object( self.library.configuration, 'safe_get', mock.Mock(return_value=pool)) mock_is_flex_support = self.mock_object( self.library._client, 'is_flexgroup_supported', mock.Mock(return_value=True)) mock_parse = self.mock_object( na_utils, 'parse_flexgroup_pool_config', mock.Mock(return_value=fake.FLEXGROUP_POOL_OPT)) aggr_set = set(fake.FLEXGROUP_POOL_AGGR) self.library._initialize_flexgroup_pools(aggr_set) mock_safe_get.assert_called_once_with('netapp_flexgroup_pools') mock_is_flex_support.assert_called_once_with() if auto_provision: self.assertEqual(self.library._flexgroup_pools, {na_utils.FLEXGROUP_DEFAULT_POOL_NAME: sorted( aggr_set)}) self.assertTrue(self.library._is_flexgroup_auto) mock_parse.assert_not_called() else: self.assertEqual(self.library._flexgroup_pools, fake.FLEXGROUP_POOL_OPT) self.assertFalse(self.library._is_flexgroup_auto) mock_parse.assert_called_once_with( [fake.FLEXGROUP_POOL_OPT_RAW], cluster_aggr_set=set(fake.FLEXGROUP_POOL_AGGR), check=True) def test_initialize_flexgroup_pools_no_opt(self): self.library.configuration.netapp_enable_flexgroup = False self.mock_object(self.library.configuration, 'safe_get', mock.Mock(return_value=None)) self.library._initialize_flexgroup_pools(set(fake.FLEXGROUP_POOL_AGGR)) self.assertEqual(self.library._flexgroup_pools, {}) def test_initialize_flexgroup_pools_raise_version(self): self.library.configuration.netapp_enable_flexgroup = True self.mock_object(self.library.configuration, 'safe_get', mock.Mock(return_value=[fake.FLEXGROUP_POOL_OPT_RAW])) self.mock_object(self.library._client, 'is_flexgroup_supported', mock.Mock(return_value=False)) self.assertRaises(exception.NetAppException, self.library._initialize_flexgroup_pools, set(fake.FLEXGROUP_POOL_AGGR)) def test_initialize_flexgroup_pools_raise_no_enable_with_pool(self): self.library.configuration.netapp_enable_flexgroup = False self.mock_object(self.library.configuration, 'safe_get', mock.Mock(return_value=[fake.FLEXGROUP_POOL_OPT_RAW])) self.assertRaises(exception.NetAppException, self.library._initialize_flexgroup_pools, set(fake.FLEXGROUP_POOL_AGGR)) @ddt.data(True, False) def test_get_flexgroup_pool_name(self, auto_provisioned): self.library._is_flexgroup_auto = auto_provisioned self.library._flexgroup_pools = fake.FLEXGROUP_POOL_OPT result = self.library._get_flexgroup_pool_name( fake.FLEXGROUP_POOL_AGGR) if auto_provisioned: self.assertEqual(na_utils.FLEXGROUP_DEFAULT_POOL_NAME, result) else: self.assertEqual(fake.FLEXGROUP_POOL_NAME, result) def test_get_flexgroup_pool_name_not_found(self): self.library._is_flexgroup_auto = False self.library._flexgroup_pools = fake.FLEXGROUP_POOL_OPT result = self.library._get_flexgroup_pool_name([]) self.assertEqual('', result) def test_is_flexgroup_pool(self): self.library._flexgroup_pools = fake.FLEXGROUP_POOL_OPT result = self.library._is_flexgroup_pool(fake.FLEXGROUP_POOL_NAME) self.assertTrue(result) @ddt.data({'pool_name': fake.FLEXGROUP_POOL_NAME, 'aggr_list': fake.FLEXGROUP_POOL_AGGR}, {'pool_name': '', 'aggr_list': []}) @ddt.unpack def test_get_flexgroup_aggregate_list(self, pool_name, aggr_list): self.library._flexgroup_pools = fake.FLEXGROUP_POOL_OPT result = self.library._get_flexgroup_aggregate_list(pool_name) self.assertEqual(aggr_list, result) def test_is_flexgroup_share(self): vserver_client = mock.Mock() vserver_client.is_flexgroup_volume.return_value = True result = self.library._is_flexgroup_share(vserver_client, fake.SHARE_NAME) vserver_client.is_flexgroup_volume.assert_called_once_with( fake.SHARE_NAME) self.assertTrue(result) def test_is_flexgroup_share_raise(self): vserver_client = mock.Mock() vserver_client.is_flexgroup_volume.side_effect = ( exception.NetAppException) self.assertRaises(exception.ShareNotFound, self.library._is_flexgroup_share, vserver_client, fake.SHARE_NAME) vserver_client.is_flexgroup_volume.assert_called_once_with( fake.SHARE_NAME) @ddt.data( {'enabled': True, 'flexgroup_only': False, 'is_flexvol': True}, {'enabled': False, 'flexgroup_only': False, 'is_flexvol': True}, {'enabled': True, 'flexgroup_only': True, 'is_flexvol': False}, {'enabled': False, 'flexgroup_only': True, 'is_flexvol': True}) @ddt.unpack def test_is_flexvol_pool_configured(self, enabled, flexgroup_only, is_flexvol): self.library.configuration.netapp_enable_flexgroup = enabled self.library.configuration.netapp_flexgroup_pool_only = flexgroup_only result = self.library.is_flexvol_pool_configured() self.assertEqual(is_flexvol, result) def test_get_minimum_flexgroup_size(self): self.mock_object(self.library, '_get_flexgroup_aggregate_list', mock.Mock(return_value=fake.AGGREGATES)) result = self.library._get_minimum_flexgroup_size(fake.POOL_NAME) expected = (len(fake.AGGREGATES) * self.library.FLEXGROUP_MIN_SIZE_PER_AGGR) self.assertEqual(expected, result) self.library._get_flexgroup_aggregate_list.assert_called_once_with( fake.POOL_NAME) def test_is_flexgroup_destination_host_not_enabled(self): mock_config = mock.Mock() dm_session = mock.Mock() mock_get_backend = self.mock_object( dm_session, 'get_backend_name_and_config_obj', mock.Mock(return_value=('fake', mock_config))) mock_safe_get = self.mock_object( mock_config, 'safe_get', mock.Mock(return_value=False)) result = self.library.is_flexgroup_destination_host(fake.HOST_NAME, dm_session) self.assertFalse(result) mock_get_backend.assert_called_once_with(fake.HOST_NAME) mock_safe_get.assert_called_once_with('netapp_enable_flexgroup') @ddt.data(None, [{'fg1': fake.AGGREGATE}]) def test_is_flexgroup_destination_host_false(self, flexgroup_pools): mock_config = mock.Mock() dm_session = mock.Mock() mock_get_backend = self.mock_object( dm_session, 'get_backend_name_and_config_obj', mock.Mock(return_value=('fake', mock_config))) mock_safe_get = self.mock_object( mock_config, 'safe_get', mock.Mock(side_effect=[True, flexgroup_pools])) mock_extract = self.mock_object( share_utils, 'extract_host', mock.Mock(return_value=fake.POOL_NAME)) mock_parse = self.mock_object( na_utils, 'parse_flexgroup_pool_config', mock.Mock(return_value={})) result = self.library.is_flexgroup_destination_host(fake.HOST_NAME, dm_session) self.assertFalse(result) mock_get_backend.assert_called_once_with(fake.HOST_NAME) mock_safe_get.assert_has_calls([ mock.call('netapp_enable_flexgroup'), mock.call('netapp_flexgroup_pools'), ]) mock_extract.assert_called_once_with(fake.HOST_NAME, level='pool') if flexgroup_pools: mock_parse.assert_called_once_with(flexgroup_pools) else: mock_parse.assert_not_called() def test_is_flexgroup_destination_host_true(self): flexgroup_pools = [{fake.POOL_NAME: fake.AGGREGATE}] mock_config = mock.Mock() dm_session = mock.Mock() mock_get_backend = self.mock_object( dm_session, 'get_backend_name_and_config_obj', mock.Mock(return_value=('fake', mock_config))) mock_safe_get = self.mock_object( mock_config, 'safe_get', mock.Mock(side_effect=[True, flexgroup_pools])) mock_extract = self.mock_object( share_utils, 'extract_host', mock.Mock(return_value=fake.POOL_NAME)) mock_parse = self.mock_object( na_utils, 'parse_flexgroup_pool_config', mock.Mock(return_value=flexgroup_pools[0])) result = self.library.is_flexgroup_destination_host(fake.HOST_NAME, dm_session) self.assertTrue(result) mock_get_backend.assert_called_once_with(fake.HOST_NAME) mock_safe_get.assert_has_calls([ mock.call('netapp_enable_flexgroup'), mock.call('netapp_flexgroup_pools'), ]) mock_extract.assert_called_once_with(fake.HOST_NAME, level='pool') mock_parse.assert_called_once_with(flexgroup_pools) def test_create_backup_first_backup(self): vserver_client = mock.Mock() mock_dest_client = mock.Mock() self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_client))) self._backup_mock_common_method(mock_dest_client) self.mock_object(vserver_client, 'get_snapmirror_destinations', mock.Mock(return_value=[])) vserver_peer_info = [{'vserver': fake.VSERVER1, 'peer-vserver': fake.VSERVER2}] self.mock_object(vserver_client, 'get_vserver_peers', mock.Mock(return_value=vserver_peer_info)) snap_list = ["snap1", "snap2", "snap3"] self.mock_object(mock_dest_client, 'list_volume_snapshots', mock.Mock(return_value=snap_list)) share_instance = fake.SHARE_INSTANCE backup = fake.SHARE_BACKUP self.library.create_backup(self.context, share_instance, backup) (mock_dest_client.create_snapmirror_policy. assert_called_once_with(mock.ANY, policy_type='vault', discard_network_info=False, snapmirror_label=mock.ANY, keep=mock.ANY)) (mock_dest_client.create_snapmirror_vol. assert_called_once_with(mock.ANY, mock.ANY, mock.ANY, mock.ANY, 'extended_data_protection', policy=mock.ANY )) dm_session = data_motion.DataMotionSession() (dm_session.initialize_and_wait_snapmirror_vol. assert_called_once_with(mock.ANY, mock.ANY, mock.ANY, mock.ANY, mock.ANY, timeout=mock.ANY, )) (mock_dest_client.update_snapmirror_vol. assert_called_once_with(mock.ANY, mock.ANY, mock.ANY, mock.ANY, )) def test_create_backup_second_backup(self): vserver_client = mock.Mock() mock_dest_client = mock.Mock() self._backup_mock_common_method(mock_dest_client) self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_client))) snapmirror_info = [fake.SNAP_MIRROR_INFO] self.mock_object(vserver_client, 'get_snapmirror_destinations', mock.Mock(return_value=snapmirror_info)) share_instance = fake.SHARE_INSTANCE backup = fake.SHARE_BACKUP self.library.create_backup(self.context, share_instance, backup) mock_dest_client.create_snapmirror_policy.assert_not_called() mock_dest_client.create_snapmirror_vol.assert_not_called() (data_motion.DataMotionSession(). initialize_and_wait_snapmirror_vol.assert_not_called()) (mock_dest_client.update_snapmirror_vol. assert_called_once_with(mock.ANY, mock.ANY, mock.ANY, mock.ANY, )) def test_create_backup_continue(self): vserver_client = mock.Mock() mock_dest_client = mock.Mock() self._backup_mock_common_method(mock_dest_client) self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_client))) snapmirror_info = [fake.SNAP_MIRROR_INFO] self.mock_object(mock_dest_client, 'get_snapmirrors', mock.Mock(return_value=snapmirror_info)) snap_list = ["snap1", "snap2", "snap3"] self.mock_object(self.library, '_get_des_volume_backup_snapshots', mock.Mock(return_value=snap_list)) share_instance = fake.SHARE_INSTANCE backup = fake.SHARE_BACKUP self.library.create_backup_continue(self.context, share_instance, backup) def test_restore_backup(self): vserver_client = mock.Mock() mock_dest_client = mock.Mock() self._backup_mock_common_method(mock_dest_client) self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_client))) share_instance = fake.SHARE_INSTANCE backup = fake.SHARE_BACKUP self.library.restore_backup(self.context, backup, share_instance) vserver_client.snapmirror_restore_vol.assert_called_once_with( source_path=mock.ANY, dest_path=mock.ANY, source_snapshot=mock.ANY, des_cluster=mock.ANY, ) def test_restore_backup_continue(self): vserver_client = mock.Mock() self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_client))) self.mock_object(vserver_client, 'get_snapmirrors', mock.Mock(return_value=[])) snap_list = ["restored_snap1", "snap2", "snap3"] self.mock_object(vserver_client, 'list_volume_snapshots', mock.Mock(return_value=snap_list)) self.mock_object(self.library, '_get_backup_snapshot_name', mock.Mock(return_value="restored_snap1")) share_instance = fake.SHARE_INSTANCE backup = fake.SHARE_BACKUP self.library.restore_backup_continue(self.context, backup, share_instance) def test_delete_backup(self): vserver_client = mock.Mock() mock_dest_client = mock.Mock() self._backup_mock_common_method(mock_dest_client) self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_client))) self.mock_object(mock_dest_client, 'get_snapmirrors', mock.Mock(return_value=[])) snap_list = ["snap1", "snap2", "snap3"] self.mock_object(self.library, '_get_des_volume_backup_snapshots', mock.Mock(return_value=snap_list)) self.mock_object( self.library, '_is_snapshot_deleted', mock.Mock(return_value=True)) share_instance = fake.SHARE_INSTANCE backup = fake.SHARE_BACKUP self.library.delete_backup(self.context, share_instance, backup) def test_delete_backup_with_resource_cleanup(self): vserver_client = mock.Mock() mock_dest_client = mock.Mock() self._backup_mock_common_method(mock_dest_client) self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_client))) snapmirror_info = [fake.SNAP_MIRROR_INFO] self.mock_object(mock_dest_client, 'get_snapmirrors', mock.Mock(return_value=snapmirror_info)) snap_list = ["snap1", "snap2", "snap3"] self.mock_object(self.library, '_get_des_volume_backup_snapshots', mock.Mock(return_value=snap_list)) self.mock_object( self.library, '_is_snapshot_deleted', mock.Mock(return_value=True)) share_instance = fake.SHARE_INSTANCE backup = fake.SHARE_BACKUP self.library.delete_backup(self.context, share_instance, backup) def test__get_backup_snapshot_name(self): backup = fake.SHARE_BACKUP actual_result = self.library._get_backup_snapshot_name(backup, fake.SHARE_ID) backup_id = backup.get('id', "") expected_result = f"backup_{fake.SHARE_ID}_{backup_id}" self.assertEqual(actual_result, expected_result) def test__get_backend(self): backup = fake.SHARE_BACKUP self.mock_object(data_motion, 'get_backup_configuration', mock.Mock(return_value=_get_config())) actual_result = self.library._get_backend(backup) self.assertEqual(actual_result, fake.BACKEND_NAME) def test__get_des_volume_backup_snapshots(self): mock_dest_client = mock.Mock() share_id = fake.SHARE_ID snap_list = [f"backup_{share_id}_snap1", f"backup_{share_id}_snap2", "snap3"] self.mock_object(mock_dest_client, 'list_volume_snapshots', mock.Mock(return_value=snap_list)) expected_snap_list = [f"backup_{share_id}_snap1", f"backup_{share_id}_snap2"] actual_result = self.library._get_des_volume_backup_snapshots( mock_dest_client, fake.FLEXVOL_NAME, share_id) self.assertEqual(expected_snap_list, actual_result) def test__get_volume_for_backup(self): mock_dest_client = mock.Mock() mock_src_client = mock.Mock() self.mock_object(data_motion, 'get_backup_configuration', mock.Mock(return_value=_get_config())) self.library._get_volume_for_backup(fake.SHARE_BACKUP, fake.SHARE_INSTANCE, mock_src_client, mock_dest_client) def test__get_volume_for_backup_create_new_vol(self): mock_dest_client = mock.Mock() mock_src_client = mock.Mock() _get_config() backup_config = 'backup_config' fake_config = configuration.Configuration(driver.share_opts, config_group=backup_config) CONF.set_override("netapp_backup_volume", "", group=backup_config) CONF.set_override("netapp_backup_vserver", "", group=backup_config) self.mock_object(data_motion, 'get_backup_configuration', mock.Mock(return_value=fake_config)) vol_attr = {'name': 'fake_vol', 'size': 12345} self.mock_object(mock_src_client, 'get_volume', mock.Mock(return_value=vol_attr)) self.library._get_volume_for_backup(fake.SHARE_BACKUP, fake.SHARE_INSTANCE, mock_src_client, mock_dest_client) def test__get_volume_for_backup_aggr_not_found_negative(self): mock_dest_client = mock.Mock() mock_src_client = mock.Mock() _get_config() backup_config = 'backup_config' fake_config = configuration.Configuration(driver.share_opts, config_group=backup_config) CONF.set_override("netapp_backup_volume", "", group=backup_config) CONF.set_override("netapp_backup_vserver", "", group=backup_config) self.mock_object(data_motion, 'get_backup_configuration', mock.Mock(return_value=fake_config)) self.mock_object(self.mock_dm_session, 'get_most_available_aggr_of_vserver', mock.Mock(return_value=None)) self.assertRaises( exception.NetAppException, self.library._get_volume_for_backup, fake.SHARE_BACKUP, fake.SHARE_INSTANCE, mock_src_client, mock_dest_client, ) def test__get_vserver_for_backup(self): mock_dest_client = mock.Mock() self.mock_object(data_motion, 'get_backup_configuration', mock.Mock(return_value=_get_config())) mock_backend_config = na_fakes.create_configuration() self.mock_object(data_motion, 'get_backend_configuration', mock.Mock(return_value=mock_backend_config)) self.mock_object(self.library, '_get_api_client_for_backend', mock.Mock(return_value=mock_dest_client)) self.library._get_vserver_for_backup(fake.SHARE_INSTANCE, fake.SHARE_BACKUP) def test__get_destination_vserver_and_vol(self): mock_dest_client = mock.Mock() snapmirror_info = [fake.SNAP_MIRROR_INFO] source_path = f"{fake.VSERVER1}:{fake.FLEXVOL_NAME}" self.mock_object(mock_dest_client, 'get_snapmirror_destinations', mock.Mock(return_value=snapmirror_info)) actual_result = self.library._get_destination_vserver_and_vol( mock_dest_client, source_path, validate_relation=True) expected_result = (fake.VSERVER2, fake.FLEXVOL_NAME_1) self.assertEqual(actual_result, expected_result) def test__get_destination_vserver_and_vol_negative(self): mock_dest_client = mock.Mock() snapmirror_info = [{'source-vserver': fake.VSERVER1, 'source-volume': fake.FLEXVOL_NAME, 'destination-vserver': fake.VSERVER2, 'destination-volume': fake.FLEXVOL_NAME_1 }, {'source-vserver': 'fake_vs_1', 'source-volume': 'fake_vol_1', 'destination-vserver': 'fake_vs_2', 'destination-volume': 'fake_vol_2' } ] source_path = f"{fake.VSERVER1}:{fake.FLEXVOL_NAME}" self.mock_object(mock_dest_client, 'get_snapmirror_destinations', mock.Mock(return_value=snapmirror_info)) self.assertRaises( exception.NetAppException, self.library._get_destination_vserver_and_vol, mock_dest_client, source_path, validate_relation=True ) def test_verify_and_wait_for_snapshot_to_transfer(self): vserver_client = mock.Mock() self.mock_object(vserver_client, 'get_snapshot', mock.Mock(return_value=fake.SNAPSHOT_NAME)) result = self.library._verify_and_wait_for_snapshot_to_transfer( vserver_client, fake.FLEXVOL_NAME, fake.SNAPSHOT_NAME, ) self.assertIsNone(result) def test_verify_and_wait_for_snapshot_to_transfer_negative(self): vserver_client = mock.Mock() self.mock_object(vserver_client, 'get_snapshot', mock.Mock(side_effect=netapp_api.NaApiError)) self.assertRaises( exception.NetAppException, self.library._verify_and_wait_for_snapshot_to_transfer, vserver_client, fake.FLEXVOL_NAME, fake.SNAPSHOT_NAME, timeout=10, ) def test__resource_cleanup_for_backup(self): src_vserver_client = mock.Mock() des_vserver_client = mock.Mock() share_instance = fake.SHARE_INSTANCE backup = fake.SHARE_BACKUP self.mock_object(data_motion, 'get_backup_configuration', mock.Mock(return_value=_get_config())) mock_backend_config = na_fakes.create_configuration() self.mock_object(data_motion, 'get_backend_configuration', mock.Mock(return_value=mock_backend_config)) self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, src_vserver_client))) self.mock_object(self.library, '_get_api_client_for_backend', mock.Mock(return_value=des_vserver_client)) self.mock_object(self.library, '_get_backend_share_name', mock.Mock(return_value=fake.FLEXVOL_NAME)) self.library._resource_cleanup_for_backup(backup, share_instance, fake.VSERVER2, fake.FLEXVOL_NAME_1, ) (des_vserver_client.abort_snapmirror_vol. assert_called_once_with(fake.VSERVER1, fake.FLEXVOL_NAME, fake.VSERVER2, fake.FLEXVOL_NAME_1, clear_checkpoint=False )) (des_vserver_client.delete_snapmirror_vol. assert_called_once_with(fake.VSERVER1, fake.FLEXVOL_NAME, fake.VSERVER2, fake.FLEXVOL_NAME_1, )) db_session = data_motion.DataMotionSession() (db_session.wait_for_snapmirror_release_vol. assert_called_once_with(fake.VSERVER1, fake.VSERVER2, fake.FLEXVOL_NAME, fake.FLEXVOL_NAME_1, False, src_vserver_client, timeout=mock.ANY )) def test__resource_cleanup_for_backup_with_exception(self): mock_src_vserver_client = mock.Mock() mock_des_vserver_client = mock.Mock() self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, mock_src_vserver_client))) self._backup_mock_common_method(mock_des_vserver_client) self.mock_object(mock_des_vserver_client, 'abort_snapmirror_vol', mock.Mock(side_effect=netapp_api.NaApiError)) self.mock_object(mock_des_vserver_client, 'delete_snapmirror_vol', mock.Mock(side_effect=netapp_api.NaApiError( code=netapp_api.EOBJECTNOTFOUND))) self.mock_object(mock_des_vserver_client, 'delete_snapmirror_policy', mock.Mock(side_effect=netapp_api.NaApiError)) self.mock_object(mock_src_vserver_client, 'delete_vserver_peer', mock.Mock(side_effect=netapp_api.NaApiError)) self.library._resource_cleanup_for_backup(fake.SHARE_BACKUP, fake.SHARE_INSTANCE, fake.VSERVER2, fake.FLEXVOL_NAME_1, ) def test__resource_cleanup_for_backup_vserver_volume_none(self): mock_src_vserver_client = mock.Mock() mock_des_vserver_client = mock.Mock() self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, mock_src_vserver_client))) self.mock_object(self.library, '_delete_backup_vserver', mock.Mock(return_value=None)) self.mock_object(mock_des_vserver_client, 'delete_volume', mock.Mock(side_effect=netapp_api.NaApiError)) self._backup_mock_common_method(mock_des_vserver_client) backup_config = 'backup_config' CONF.set_override("netapp_backup_volume", "", group=backup_config) CONF.set_override("netapp_backup_vserver", "", group=backup_config) self.library._resource_cleanup_for_backup( fake.SHARE_BACKUP, fake.SHARE_INSTANCE, fake.VSERVER2, fake.FLEXVOL_NAME_1, share_server=fake.SHARE_SERVER, ) def test_create_backup_with_backup_type_none_negative(self): vserver_client = mock.Mock() self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_client))) backup = {'id': '242ff47e-518d-4b07-b3c3-0a51e6744149', 'backup_options': {'backend': 'fake_ontap', 'backup_type': None }, } self.assertRaises( exception.BackupException, self.library.create_backup, self.context, fake.SHARE_INSTANCE, backup, ) def test_create_backup_with_non_netapp_backend_negative(self): self.mock_object(data_motion, 'get_backup_configuration', mock.Mock(return_value=_get_config())) self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, mock.Mock()))) fake_config = configuration.Configuration( driver.share_opts, config_group='backup_config') CONF.set_override("netapp_storage_family", None, group='backup_config') self.mock_object(data_motion, 'get_backend_configuration', mock.Mock(return_value=fake_config)) self.assertRaises( exception.BackupException, self.library.create_backup, self.context, fake.SHARE_INSTANCE, fake.SHARE_BACKUP, ) def test_create_backup_when_enabled_backup_types_none_negative(self): vserver_src_client = mock.Mock() vserver_dest_client = mock.Mock() self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_src_client))) self._backup_mock_common_method(vserver_dest_client) fake_config = configuration.Configuration(driver.share_opts, config_group='backup_config') CONF.set_override("netapp_enabled_backup_types", None, group='backup_config') self.mock_object(data_motion, 'get_backend_configuration', mock.Mock(return_value=fake_config)) self.assertRaises( exception.BackupException, self.library.create_backup, self.context, fake.SHARE_INSTANCE, fake.SHARE_BACKUP, ) def test_create_backup_source_has_2_more_relationships_negative(self): vserver_client = mock.Mock() mock_dest_client = mock.Mock() self._backup_mock_common_method(mock_dest_client) self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_client))) snapmirror_info = [{'source-vserver': fake.VSERVER1, 'source-volume': fake.FLEXVOL_NAME, 'destination-vserver': fake.VSERVER2, 'destination-volume': fake.FLEXVOL_NAME_1 }, {'source-vserver': 'fake_vs_1', 'source-volume': 'fake_vol_1', 'destination-vserver': 'fake_vs_2', 'destination-volume': 'fake_vol_2' } ] self.mock_object(vserver_client, 'get_snapmirror_destinations', mock.Mock(return_value=snapmirror_info)) self.assertRaises( exception.NetAppException, self.library.create_backup, self.context, fake.SHARE_INSTANCE, fake.SHARE_BACKUP, ) def test_create_backup_bad_backup_config_negative(self): mock_src_client = mock.Mock() mock_des_client = mock.Mock() self._backup_mock_common_method_for_negative(mock_src_client, mock_des_client) fake_config = configuration.Configuration( driver.share_opts, config_group='backup_config') CONF.set_override("netapp_backup_vserver", None, group='backup_config') self.mock_object(data_motion, 'get_backup_configuration', mock.Mock(return_value=fake_config)) self.assertRaises( exception.BadConfigurationException, self.library.create_backup, self.context, fake.SHARE_INSTANCE, fake.SHARE_BACKUP, ) def test_create_backup_when_cluster_are_not_peered_negative(self): mock_src_client = mock.Mock() mock_des_client = mock.Mock() self._backup_mock_common_method_for_negative(mock_src_client, mock_des_client) self.mock_object(self.client, 'get_cluster_peers', mock.Mock(return_value=[])) self.mock_object(mock_src_client, 'get_cluster_name', mock.Mock(return_value='fake_src_cluster')) self.assertRaises( exception.NetAppException, self.library.create_backup, self.context, fake.SHARE_INSTANCE, fake.SHARE_BACKUP, ) def test_create_backup_when_des_vol_creation_fail_negative(self): mock_src_client = mock.Mock() mock_des_client = mock.Mock() self._backup_mock_common_method_for_negative(mock_src_client, mock_des_client) self.mock_object(self.library, '_get_volume_for_backup', mock.Mock(side_effect=exception.NetAppException)) self.mock_object(self.library, '_delete_backup_vserver', mock.Mock(return_value=[])) self.assertRaises( exception.NetAppException, self.library.create_backup, self.context, fake.SHARE_INSTANCE, fake.SHARE_BACKUP, ) def test_create_backup_when_vserver_not_peered(self): mock_src_client = mock.Mock() mock_des_client = mock.Mock() mock_cluster_client = mock.Mock() self._backup_mock_common_method_for_negative(mock_src_client, mock_des_client) self.mock_object(mock_cluster_client, 'get_cluster_name', mock.Mock(return_value='fake_src_cluster')) self.mock_object(mock_src_client, 'get_vserver_peers', mock.Mock(return_value=[])) share_instance = fake.SHARE_INSTANCE backup = fake.SHARE_BACKUP self.library.create_backup(self.context, share_instance, backup) def test_create_backup_when_policy_creation_failed_negative(self): mock_src_client = mock.Mock() mock_des_client = mock.Mock() self._backup_mock_common_method_for_negative(mock_src_client, mock_des_client) self.mock_object(mock_des_client, 'create_snapmirror_policy', mock.Mock(side_effect=netapp_api.NaApiError)) self.assertRaises( netapp_api.NaApiError, self.library.create_backup, self.context, fake.SHARE_INSTANCE, fake.SHARE_BACKUP, ) def test_create_backup_when_duplicate_policy_created(self): mock_src_client = mock.Mock() mock_des_client = mock.Mock() self._backup_mock_common_method_for_negative(mock_src_client, mock_des_client) msg = 'policy with this name already exists' self.mock_object(mock_des_client, 'create_snapmirror_policy', mock.Mock(side_effect=netapp_api.NaApiError( message=msg))) share_instance = fake.SHARE_INSTANCE backup = fake.SHARE_BACKUP self.library.create_backup(self.context, share_instance, backup) def test_create_backup_when_snapmirror_creation_failed_negative(self): mock_src_client = mock.Mock() mock_des_client = mock.Mock() self._backup_mock_common_method_for_negative(mock_src_client, mock_des_client) self.mock_object(mock_des_client, 'create_snapmirror_vol', mock.Mock(side_effect=netapp_api.NaApiError)) self.assertRaises( exception.NetAppException, self.library.create_backup, self.context, fake.SHARE_INSTANCE, fake.SHARE_BACKUP, ) def test_create_backup_when_invalid_backup_type_negative(self): mock_src_client = mock.Mock() self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, mock_src_client))) self.mock_object(self.mock_dm_session, 'get_backup_configuration', mock.Mock( side_effect=exception.BadConfigurationException, )) self.assertRaises( exception.BadConfigurationException, self.library.create_backup, self.context, fake.SHARE_INSTANCE, fake.SHARE_BACKUP, ) def test_create_backup_when_invalid_backend_negative(self): mock_src_client = mock.Mock() self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, mock_src_client))) self.mock_object(data_motion, 'get_backup_configuration', mock.Mock(return_value=_get_config())) self.mock_object(self.mock_dm_session, 'get_backend_configuration', mock.Mock( side_effect=exception.BadConfigurationException, )) self.assertRaises( exception.BadConfigurationException, self.library.create_backup, self.context, fake.SHARE_INSTANCE, fake.SHARE_BACKUP, ) def test_create_backup_continue_with_status_inprogress(self): vserver_client = mock.Mock() mock_dest_client = mock.Mock() self._backup_mock_common_method(mock_dest_client) self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_client))) snapmirror_info = [{'source-vserver': fake.VSERVER1, 'source-volume': fake.FLEXVOL_NAME, 'destination-vserver': fake.VSERVER2, 'destination-volume': fake.FLEXVOL_NAME_1, 'relationship-status': "inprogress", 'last-transfer-type': "update", }] self.mock_object(mock_dest_client, 'get_snapmirrors', mock.Mock(return_value=snapmirror_info)) snap_list = ["snap1", "snap2", "snap3"] self.mock_object(self.library, '_get_des_volume_backup_snapshots', mock.Mock(return_value=snap_list)) share_instance = fake.SHARE_INSTANCE backup = fake.SHARE_BACKUP self.library.create_backup_continue(self.context, share_instance, backup) def test_create_backup_continue_with_state_not_update(self): vserver_client = mock.Mock() mock_dest_client = mock.Mock() self._backup_mock_common_method(mock_dest_client) self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_client))) snapmirror_info = [{'source-vserver': fake.VSERVER1, 'source-volume': fake.FLEXVOL_NAME, 'destination-vserver': fake.VSERVER2, 'destination-volume': fake.FLEXVOL_NAME_1, 'relationship-status': "idle", 'last-transfer-type': "initialize", }] self.mock_object(mock_dest_client, 'get_snapmirrors', mock.Mock(return_value=snapmirror_info)) snap_list = ["snap1", "snap2", "snap3"] self.mock_object(self.library, '_get_des_volume_backup_snapshots', mock.Mock(return_value=snap_list)) share_instance = fake.SHARE_INSTANCE backup = fake.SHARE_BACKUP self.library.create_backup_continue(self.context, share_instance, backup) def test_create_backup_continue_snapmirror_none(self): vserver_client = mock.Mock() mock_dest_client = mock.Mock() self._backup_mock_common_method(mock_dest_client) self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_client))) self.mock_object(vserver_client, 'get_snapmirror_destinations', mock.Mock(return_value=None)) share_instance = fake.SHARE_INSTANCE backup = fake.SHARE_BACKUP self.library.create_backup_continue(self.context, share_instance, backup) def test_create_backup_continue_snapmirror_none_from_destination(self): vserver_client = mock.Mock() mock_dest_client = mock.Mock() self._backup_mock_common_method(mock_dest_client) self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_client))) self.mock_object(mock_dest_client, 'get_snapmirrors', mock.Mock(return_value=None)) share_instance = fake.SHARE_INSTANCE backup = fake.SHARE_BACKUP self.library.create_backup_continue(self.context, share_instance, backup) def test_create_backup_continue_des_vserver_vol_none_negative(self): vserver_client = mock.Mock() mock_des_vserver = mock.Mock() self._backup_mock_common_method(mock_des_vserver) self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_client))) snapmirror_info = [fake.SNAP_MIRROR_INFO] self.mock_object(vserver_client, 'get_snapmirror_destinations', mock.Mock(return_value=snapmirror_info)) self.mock_object(self.library, '_get_destination_vserver_and_vol', mock.Mock(return_value=(None, None))) self.assertRaises( exception.NetAppException, self.library.create_backup_continue, self.context, fake.SHARE_INSTANCE, fake.SHARE_BACKUP, ) def test_create_backup_continue_snapshot_left_from_old_relationship(self): vserver_client = mock.Mock() mock_dest_client = mock.Mock() self._backup_mock_common_method(mock_dest_client) self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_client))) snapmirror_info = [fake.SNAP_MIRROR_INFO] self.mock_object(mock_dest_client, 'get_snapmirrors', mock.Mock(return_value=snapmirror_info)) snap_list = ["snap1", "snap2"] self.mock_object(self.library, '_get_des_volume_backup_snapshots', mock.Mock(return_value=snap_list)) self.library._set_volume_has_backup_before(True) share_instance = fake.SHARE_INSTANCE backup = fake.SHARE_BACKUP self.library.create_backup_continue(self.context, share_instance, backup) self.library._set_volume_has_backup_before(False) def test_create_backup_continue_relationship_not_healthy_negative(self): vserver_client = mock.Mock() mock_dest_client = mock.Mock() self._backup_mock_common_method(mock_dest_client) self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_client))) snapmirror_info = [ { 'source-vserver': fake.VSERVER1, 'source-volume': fake.FLEXVOL_NAME, 'destination-vserver': fake.VSERVER2, 'destination-volume': fake.FLEXVOL_NAME_1, 'relationship-status': "idle", 'last-transfer-type': "update", 'is-healthy': "false", } ] self.mock_object(mock_dest_client, 'get_snapmirrors', mock.Mock(return_value=snapmirror_info)) self.assertRaises( exception.NetAppException, self.library.create_backup_continue, self.context, fake.SHARE_INSTANCE, fake.SHARE_BACKUP, ) def test_restore_backup_with_vserver_volume_none(self): vserver_client = mock.Mock() self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_client))) self.mock_object(self.library, '_get_destination_vserver_and_vol', mock.Mock(return_value=(None, None))) self.assertRaises( exception.NetAppException, self.library.restore_backup, self.context, fake.SHARE_INSTANCE, fake.SHARE_BACKUP, ) def test_restore_backup_continue_with_rst_relationship(self): vserver_client = mock.Mock() self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_client))) self.mock_object(vserver_client, 'get_snapmirrors', mock.Mock(return_value=fake.SNAP_MIRROR_INFO)) snap_list = ["restored_snap1", "snap2", "snap3"] self.mock_object(self.library, '_get_des_volume_backup_snapshots', mock.Mock(return_value=snap_list)) share_instance = fake.SHARE_INSTANCE backup = fake.SHARE_BACKUP self.library.restore_backup_continue(self.context, backup, share_instance) def test_restore_backup_continue_restore_failed_negative(self): vserver_client = mock.Mock() self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_client))) self.mock_object(vserver_client, 'get_snapmirrors', mock.Mock(return_value=[])) snap_list = ["restored_snap1", "snap2", "snap3"] self.mock_object(vserver_client, 'list_volume_snapshots', mock.Mock(return_value=snap_list)) self.mock_object(self.library, '_get_backup_snapshot_name', mock.Mock(return_value="restored_snap_test1")) self.assertRaises( exception.NetAppException, self.library.restore_backup_continue, self.context, fake.SHARE_INSTANCE, fake.SHARE_BACKUP, ) def test_delete_backup_vserver_vol_none_negative(self): vserver_client = mock.Mock() self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_client))) self.mock_object(self.library, '_get_destination_vserver_and_vol', mock.Mock(return_value=(None, None))) self.mock_object(self.library, '_get_backend', mock.Mock(return_value=fake.BACKEND_NAME)) share_instance = fake.SHARE_INSTANCE backup = fake.SHARE_BACKUP self.library.delete_backup(self.context, backup, share_instance) def test_delete_backup_snapshot_not_found_negative(self): vserver_client = mock.Mock() mock_dest_client = mock.Mock() self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_client))) self.mock_object(self.library, '_get_destination_vserver_and_vol', mock.Mock(return_value=(fake.VSERVER2, fake.FLEXVOL_NAME_1))) self.mock_object(self.library, '_get_backend', mock.Mock(return_value=fake.BACKEND_NAME)) self.mock_object(self.library, '_get_des_volume_backup_snapshots', mock.Mock(side_effect=netapp_api.NaApiError)) self.mock_object(self.library, '_get_api_client_for_backend', mock.Mock(return_value=mock_dest_client)) share_instance = fake.SHARE_INSTANCE backup = fake.SHARE_BACKUP self.library.delete_backup(self.context, backup, share_instance) def test_delete_backup_cleanup_resource(self): vserver_client = mock.Mock() mock_des_client = mock.Mock() self._backup_mock_common_method(mock_des_client) self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_client))) self.mock_object(mock_des_client, 'get_snapmirrors', mock.Mock(return_value=fake.SNAP_MIRROR_INFO)) self.mock_object(self.library, '_get_des_volume_backup_snapshots', mock.Mock(return_value=['fake_snapshot'])) share_instance = fake.SHARE_INSTANCE backup = fake.SHARE_BACKUP self.library.delete_backup(self.context, backup, share_instance) def test_delete_backup_snapshot_delete_fail_negative(self): vserver_client = mock.Mock() mock_des_client = mock.Mock() self._backup_mock_common_method(mock_des_client) self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, vserver_client))) self.mock_object(mock_des_client, 'get_snapmirrors', mock.Mock(return_value=fake.SNAP_MIRROR_INFO)) self.mock_object(self.library, '_get_des_volume_backup_snapshots', mock.Mock(return_value=['fake_snapshot1', 'fake_snapshot2'])) self.mock_object(mock_des_client, 'get_snapshot', mock.Mock(side_effect=netapp_api.NaApiError)) self.mock_object(self.library, '_is_snapshot_deleted', mock.Mock(return_value=False)) self.assertRaises( exception.NetAppException, self.library.delete_backup, self.context, fake.SHARE_INSTANCE, fake.SHARE_BACKUP, ) def test_delete_backup_snapshot_not_exist(self): vserver_client = mock.Mock() mock_des_client = mock.Mock() self._backup_mock_common_method(mock_des_client) self._backup_mock_common_method_for_negative(vserver_client, mock_des_client) self.mock_object(self.library, '_get_des_volume_backup_snapshots', mock.Mock(return_value=['fake_snapshot1', 'fake_snapshot2'])) self.mock_object(self.library, '_is_snapshot_deleted', mock.Mock(return_value=True)) msg = "entry doesn't exist" self.mock_object(mock_des_client, 'delete_snapshot', mock.Mock(side_effect=netapp_api.NaApiError( message=msg))) self.library.delete_backup(self.context, fake.SHARE_BACKUP, fake.SHARE_INSTANCE) def test__get_backup_progress_status(self): mock_dest_client = mock.Mock() vol_attr = {'name': 'fake_vol', 'size-used': '123454'} self.mock_object(mock_dest_client, 'get_volume', mock.Mock(return_value=vol_attr)) snapmirror_info = {'source-vserver': fake.VSERVER1, 'source-volume': fake.FLEXVOL_NAME, 'destination-vserver': fake.VSERVER2, 'destination-volume': fake.FLEXVOL_NAME_1, 'relationship-status': "idle", 'last-transfer-size': '3456', } self.library._get_backup_progress_status(mock_dest_client, [snapmirror_info]) def _backup_mock_common_method(self, mock_dest_client): self.mock_object(mock_dest_client, 'get_cluster_name', mock.Mock(return_value=fake.CLUSTER_NAME)) self.mock_object(self.library, '_get_backend_share_name', mock.Mock(return_value=fake.SHARE_NAME)) self.mock_object(self.library, '_get_api_client_for_backend', mock.Mock(return_value=mock_dest_client)) self.mock_object(data_motion, 'get_backend_configuration', mock.Mock(return_value=_get_config())) self.mock_object(data_motion, 'get_backup_configuration', mock.Mock(return_value=_get_config())) self.mock_object(self.library, '_get_destination_vserver_and_vol', mock.Mock(return_value=(fake.VSERVER2, fake.FLEXVOL_NAME))) self.mock_object(self.library, '_get_backend', mock.Mock(return_value=fake.BACKEND_NAME)) def _backup_mock_common_method_for_negative(self, mock_src_client, mock_des_client): self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, mock_src_client))) self._backup_mock_common_method(mock_des_client) self.mock_object(mock_src_client, 'get_snapmirror_destinations', mock.Mock(return_value=[])) vserver_peer_info = [{'vserver': fake.VSERVER1, 'peer-vserver': fake.VSERVER2}] self.mock_object(mock_src_client, 'get_vserver_peers', mock.Mock(return_value=vserver_peer_info)) snap_list = ["snap1", "snap2", "snap3"] self.mock_object(mock_des_client, 'list_volume_snapshots', mock.Mock(return_value=snap_list)) def test_update_share_from_metadata(self): metadata = { "snapshot_policy": "daily", } share_instance = fake.SHARE_INSTANCE mock_update_volume_snapshot_policy = self.mock_object( self.library, 'update_volume_snapshot_policy') self.library.update_share_from_metadata(self.context, share_instance, metadata) mock_update_volume_snapshot_policy.assert_called_once_with( share_instance, "daily", share_server=None) def test_update_share_network_subnet_from_metadata(self): metadata = { "showmount": "true", } mock_update_showmount = self.mock_object( self.library, 'update_showmount') self.library.update_share_network_subnet_from_metadata( self.context, 'fake_share_network', 'fake_share_network_subnet', fake.SHARE_SERVER, metadata) mock_update_showmount.assert_called_once_with( "true", share_server=fake.SHARE_SERVER) def test__get_aggregate_snaplock_type_cluster_scope(self): self.library._have_cluster_creds = True self.mock_object(self.client, 'get_aggregate', mock.Mock(return_value={ 'snaplock-type': 'compliance' })) result = self.library._get_aggregate_snaplock_type(fake.AGGREGATE) self.assertEqual(result, "compliance") def test__get_aggregate_snaplock_type_vserver_scope(self): self.library._have_cluster_creds = False self.mock_object(self.client, 'get_vserver_aggr_snaplock_type', mock.Mock(return_value='enterprise')) result = self.library._get_aggregate_snaplock_type(fake.AGGREGATE) self.assertEqual(result, "enterprise") def test__is_snaplock_compatible_for_migration_for_unified_aggr(self): self.library._client.features.UNIFIED_AGGR = True result = self.library._is_snaplock_compatible_for_migration( fake.AGGREGATE, fake.AGGR_POOL_NAME ) self.assertTrue(result) def test__is_snaplock_compatible_for_migration_for_non_snaplock(self): self.library._client.features.UNIFIED_AGGR = False self.library._client.features.SNAPLOCK = False result = self.library._is_snaplock_compatible_for_migration( fake.AGGREGATE, fake.AGGR_POOL_NAME ) self.assertTrue(result) def test__is_snaplock_compatible_for_migration_non_unified_aggr(self): self.library._client.features.UNIFIED_AGGR = False self.library._client.features.SNAPLOCK = True result = self.library._is_snaplock_compatible_for_migration( fake.AGGREGATE, fake.AGGR_POOL_NAME ) self.assertTrue(result) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/netapp/dataontap/cluster_mode/test_lib_multi_svm.py0000664000175000017500000057644300000000000032471 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Clinton Knight. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Unit tests for the NetApp Data ONTAP cDOT multi-SVM storage driver library. """ import copy from unittest import mock import ddt from oslo_log import log from oslo_serialization import jsonutils from oslo_utils import units from manila.common import constants from manila import context from manila import exception from manila.share.drivers.netapp.dataontap.client import api as netapp_api from manila.share.drivers.netapp.dataontap.cluster_mode import data_motion from manila.share.drivers.netapp.dataontap.cluster_mode import lib_base from manila.share.drivers.netapp.dataontap.cluster_mode import lib_multi_svm from manila.share.drivers.netapp import utils as na_utils from manila.share import share_types from manila.share import utils as share_utils from manila import test from manila.tests.share.drivers.netapp.dataontap.client import fakes as c_fake from manila.tests.share.drivers.netapp.dataontap.cluster_mode.test_lib_base\ import _get_config from manila.tests.share.drivers.netapp.dataontap import fakes as fake @ddt.ddt class NetAppFileStorageLibraryTestCase(test.TestCase): def setUp(self): super(NetAppFileStorageLibraryTestCase, self).setUp() self.mock_object(na_utils, 'validate_driver_instantiation') # Mock loggers as themselves to allow logger arg validation mock_logger = log.getLogger('mock_logger') self.mock_object(lib_multi_svm.LOG, 'warning', mock.Mock(side_effect=mock_logger.warning)) self.mock_object(lib_multi_svm.LOG, 'error', mock.Mock(side_effect=mock_logger.error)) kwargs = { 'configuration': fake.get_config_cmode(), 'private_storage': mock.Mock(), 'app_version': fake.APP_VERSION } self.library = lib_multi_svm.NetAppCmodeMultiSVMFileStorageLibrary( fake.DRIVER_NAME, **kwargs) self.library._client = mock.Mock() self.library._client.get_ontapi_version.return_value = (1, 21) self.client = self.library._client self.fake_new_replica = copy.deepcopy(fake.SHARE) self.fake_new_ss = copy.deepcopy(fake.SHARE_SERVER) self.fake_new_vserver_name = 'fake_new_vserver' self.fake_new_ss['backend_details']['vserver_name'] = ( self.fake_new_vserver_name ) self.fake_new_replica['share_server'] = self.fake_new_ss self.fake_new_replica_host = 'fake_new_host' self.fake_replica = copy.deepcopy(fake.SHARE) self.fake_replica['id'] = fake.SHARE_ID2 fake_ss = copy.deepcopy(fake.SHARE_SERVER) self.fake_vserver = 'fake_vserver' fake_ss['backend_details']['vserver_name'] = ( self.fake_vserver) self.fake_replica['share_server'] = fake_ss self.fake_replica_host = 'fake_host' self.fake_new_client = mock.Mock() self.fake_client = mock.Mock() self.library._default_nfs_config = fake.NFS_CONFIG_DEFAULT # Server migration self.dm_session = data_motion.DataMotionSession() self.fake_src_share = copy.deepcopy(fake.SHARE) self.fake_src_share_server = copy.deepcopy(fake.SHARE_SERVER) self.fake_src_vserver = 'source_vserver' self.fake_src_backend_name = ( self.fake_src_share_server['host'].split('@')[1]) self.fake_src_share_server['backend_details']['vserver_name'] = ( self.fake_src_vserver ) self.fake_src_share['share_server'] = self.fake_src_share_server self.fake_src_share['id'] = 'fb9be037-8a75-4c2a-bb7d-f63dffe13015' self.fake_src_vol_name = 'share_fb9be037_8a75_4c2a_bb7d_f63dffe13015' self.fake_dest_share = copy.deepcopy(fake.SHARE) self.fake_dest_share_server = copy.deepcopy(fake.SHARE_SERVER_2) self.fake_dest_vserver = 'dest_vserver' self.fake_dest_backend_name = ( self.fake_dest_share_server['host'].split('@')[1]) self.fake_dest_share_server['backend_details']['vserver_name'] = ( self.fake_dest_vserver ) self.fake_dest_share['share_server'] = self.fake_dest_share_server self.fake_dest_share['id'] = 'aa6a3941-f87f-4874-92ca-425d3df85457' self.fake_dest_vol_name = 'share_aa6a3941_f87f_4874_92ca_425d3df85457' self.mock_src_client = mock.Mock() self.mock_dest_client = mock.Mock() def test_check_for_setup_error_cluster_creds_no_vserver(self): self.library._have_cluster_creds = True mock_list_non_root_aggregates = self.mock_object( self.client, 'list_non_root_aggregates', mock.Mock(return_value=fake.AGGREGATES)) mock_init_flexgroup = self.mock_object(self.library, '_initialize_flexgroup_pools') self.mock_object(self.library, 'is_flexvol_pool_configured', mock.Mock(return_value=True)) self.mock_object(self.library, '_find_matching_aggregates', mock.Mock(return_value=fake.AGGREGATES)) mock_super = self.mock_object(lib_base.NetAppCmodeFileStorageLibrary, 'check_for_setup_error') self.library.check_for_setup_error() mock_list_non_root_aggregates.assert_called_once_with() mock_init_flexgroup.assert_called_once_with(set(fake.AGGREGATES)) self.assertTrue(self.library.is_flexvol_pool_configured.called) self.assertTrue(self.library._find_matching_aggregates.called) mock_super.assert_called_once_with() def test_check_for_setup_error_cluster_creds_with_vserver(self): self.library._have_cluster_creds = True self.library.configuration.netapp_vserver = fake.VSERVER1 mock_list_non_root_aggregates = self.mock_object( self.client, 'list_non_root_aggregates', mock.Mock(return_value=fake.AGGREGATES)) mock_init_flexgroup = self.mock_object(self.library, '_initialize_flexgroup_pools') self.mock_object(self.library, 'is_flexvol_pool_configured', mock.Mock(return_value=True)) self.mock_object(self.library, '_find_matching_aggregates', mock.Mock(return_value=fake.AGGREGATES)) mock_super = self.mock_object(lib_base.NetAppCmodeFileStorageLibrary, 'check_for_setup_error') self.library.check_for_setup_error() mock_super.assert_called_once_with() mock_list_non_root_aggregates.assert_called_once_with() mock_init_flexgroup.assert_called_once_with(set(fake.AGGREGATES)) self.assertTrue(self.library.is_flexvol_pool_configured.called) self.assertTrue(self.library._find_matching_aggregates.called) self.assertTrue(lib_multi_svm.LOG.warning.called) def test_check_for_setup_error_no_aggregates_no_flexvol_pool(self): self.library._have_cluster_creds = True mock_list_non_root_aggregates = self.mock_object( self.client, 'list_non_root_aggregates', mock.Mock(return_value=fake.AGGREGATES)) mock_init_flexgroup = self.mock_object(self.library, '_initialize_flexgroup_pools') self.mock_object(self.library, 'is_flexvol_pool_configured', mock.Mock(return_value=False)) self.mock_object(self.library, '_find_matching_aggregates', mock.Mock(return_value=[])) self.library.check_for_setup_error() mock_list_non_root_aggregates.assert_called_once_with() mock_init_flexgroup.assert_called_once_with(set(fake.AGGREGATES)) self.assertTrue(self.library.is_flexvol_pool_configured.called) self.assertTrue(self.library._find_matching_aggregates.called) def test_check_for_setup_error_vserver_creds(self): self.library._have_cluster_creds = False self.assertRaises(exception.InvalidInput, self.library.check_for_setup_error) def test_check_for_setup_error_no_aggregates(self): self.library._have_cluster_creds = True mock_list_non_root_aggregates = self.mock_object( self.client, 'list_non_root_aggregates', mock.Mock(return_value=fake.AGGREGATES)) mock_init_flexgroup = self.mock_object(self.library, '_initialize_flexgroup_pools') self.mock_object(self.library, 'is_flexvol_pool_configured', mock.Mock(return_value=True)) self.mock_object(self.library, '_find_matching_aggregates', mock.Mock(return_value=[])) self.assertRaises(exception.NetAppException, self.library.check_for_setup_error) mock_list_non_root_aggregates.assert_called_once_with() mock_init_flexgroup.assert_called_once_with(set(fake.AGGREGATES)) self.assertTrue(self.library.is_flexvol_pool_configured.called) self.assertTrue(self.library._find_matching_aggregates.called) def test_get_vserver_no_share_server(self): self.assertRaises(exception.InvalidInput, self.library._get_vserver) def test_get_vserver_no_share_server_with_vserver_name(self): fake_vserver_client = mock.Mock() mock_vserver_exists = self.mock_object( fake_vserver_client, 'vserver_exists', mock.Mock(return_value=True)) self.mock_object(self.library, '_get_api_client', mock.Mock(return_value=fake_vserver_client)) result_vserver, result_vserver_client = self.library._get_vserver( share_server=None, vserver_name=fake.VSERVER1) mock_vserver_exists.assert_called_once_with( fake.VSERVER1 ) self.assertEqual(fake.VSERVER1, result_vserver) self.assertEqual(fake_vserver_client, result_vserver_client) def test_get_vserver_no_backend_details(self): fake_share_server = copy.deepcopy(fake.SHARE_SERVER) fake_share_server.pop('backend_details') kwargs = {'share_server': fake_share_server} self.assertRaises(exception.VserverNotSpecified, self.library._get_vserver, **kwargs) def test_get_vserver_none_backend_details(self): fake_share_server = copy.deepcopy(fake.SHARE_SERVER) fake_share_server['backend_details'] = None kwargs = {'share_server': fake_share_server} self.assertRaises(exception.VserverNotSpecified, self.library._get_vserver, **kwargs) def test_get_vserver_no_vserver(self): fake_share_server = copy.deepcopy(fake.SHARE_SERVER) fake_share_server['backend_details'].pop('vserver_name') kwargs = {'share_server': fake_share_server} self.assertRaises(exception.VserverNotSpecified, self.library._get_vserver, **kwargs) def test_get_vserver_none_vserver(self): fake_share_server = copy.deepcopy(fake.SHARE_SERVER) fake_share_server['backend_details']['vserver_name'] = None kwargs = {'share_server': fake_share_server} self.assertRaises(exception.VserverNotSpecified, self.library._get_vserver, **kwargs) def test_get_vserver_not_found(self): mock_client = mock.Mock() mock_client.vserver_exists.return_value = False self.mock_object(self.library, '_get_api_client', mock.Mock(return_value=mock_client)) kwargs = {'share_server': fake.SHARE_SERVER} self.assertRaises(exception.VserverNotFound, self.library._get_vserver, **kwargs) def test_get_vserver(self): mock_client = mock.Mock() mock_client.vserver_exists.return_value = True self.mock_object(self.library, '_get_api_client', mock.Mock(return_value=mock_client)) result = self.library._get_vserver(share_server=fake.SHARE_SERVER) self.assertTupleEqual((fake.VSERVER1, mock_client), result) def test_get_ems_pool_info(self): self.mock_object(self.library, '_find_matching_aggregates', mock.Mock(return_value=['aggr1', 'aggr2'])) self.library._flexgroup_pools = {'fg': ['aggr1', 'aggr2']} result = self.library._get_ems_pool_info() expected = { 'pools': { 'vserver': None, 'aggregates': ['aggr1', 'aggr2'], 'flexgroup_aggregates': {'fg': ['aggr1', 'aggr2']}, }, } self.assertEqual(expected, result) @ddt.data({'fake_vserver_name': fake, 'nfs_config_support': False}, {'fake_vserver_name': fake.IDENTIFIER, 'nfs_config_support': True}) @ddt.unpack def test_manage_server(self, fake_vserver_name, nfs_config_support): self.mock_object(context, 'get_admin_context', mock.Mock(return_value='fake_admin_context')) mock_get_vserver_name = self.mock_object( self.library, '_get_vserver_name', mock.Mock(return_value=fake_vserver_name)) self.library.is_nfs_config_supported = nfs_config_support mock_get_nfs_config = self.mock_object( self.library._client, 'get_nfs_config', mock.Mock(return_value=fake.NFS_CONFIG_DEFAULT)) new_identifier, new_details = self.library.manage_server( context, fake.SHARE_SERVER, fake.IDENTIFIER, {}) mock_get_vserver_name.assert_called_once_with(fake.SHARE_SERVER['id']) self.assertEqual(fake_vserver_name, new_details['vserver_name']) self.assertEqual(fake_vserver_name, new_identifier) if nfs_config_support: mock_get_nfs_config.assert_called_once_with( list(self.library.NFS_CONFIG_EXTRA_SPECS_MAP.values()), fake_vserver_name) self.assertEqual(jsonutils.dumps(fake.NFS_CONFIG_DEFAULT), new_details['nfs_config']) else: mock_get_nfs_config.assert_not_called() def test_get_share_server_network_info(self): fake_vserver_client = mock.Mock() self.mock_object(context, 'get_admin_context', mock.Mock(return_value='fake_admin_context')) mock_get_vserver = self.mock_object( self.library, '_get_vserver', mock.Mock(return_value=['fake', fake_vserver_client])) net_interfaces = copy.deepcopy(c_fake.NETWORK_INTERFACES_MULTIPLE) self.mock_object(fake_vserver_client, 'get_network_interfaces', mock.Mock(return_value=net_interfaces)) result = self.library.get_share_server_network_info(context, fake.SHARE_SERVER, fake.IDENTIFIER, {}) mock_get_vserver.assert_called_once_with( vserver_name=fake.IDENTIFIER ) reference_allocations = [] for lif in net_interfaces: reference_allocations.append(lif['address']) self.assertEqual(reference_allocations, result) @ddt.data((True, fake.IDENTIFIER), (False, fake.IDENTIFIER)) @ddt.unpack def test__verify_share_server_name(self, vserver_exists, identifier): mock_exists = self.mock_object(self.client, 'vserver_exists', mock.Mock(return_value=vserver_exists)) expected_result = identifier if not vserver_exists: expected_result = self.library._get_vserver_name(identifier) result = self.library._get_correct_vserver_old_name(identifier) self.assertEqual(result, expected_result) mock_exists.assert_called_once_with(identifier) def test_handle_housekeeping_tasks(self): self.mock_object(self.client, 'prune_deleted_nfs_export_policies') self.mock_object(self.client, 'prune_deleted_snapshots') mock_super = self.mock_object(lib_base.NetAppCmodeFileStorageLibrary, '_handle_housekeeping_tasks') self.library._handle_housekeeping_tasks() self.assertTrue(self.client.prune_deleted_nfs_export_policies.called) self.assertTrue(self.client.prune_deleted_snapshots.called) self.assertTrue(mock_super.called) def test_find_matching_aggregates(self): mock_is_flexvol_pool_configured = self.mock_object( self.library, 'is_flexvol_pool_configured', mock.Mock(return_value=True)) mock_list_non_root_aggregates = self.mock_object( self.client, 'list_non_root_aggregates', mock.Mock(return_value=fake.AGGREGATES)) self.library.configuration.netapp_aggregate_name_search_pattern = ( '.*_aggr_1') result = self.library._find_matching_aggregates() self.assertListEqual([fake.AGGREGATES[0]], result) mock_is_flexvol_pool_configured.assert_called_once_with() mock_list_non_root_aggregates.assert_called_once_with() mock_list_non_root_aggregates.assert_called_once_with() def test_find_matching_aggregates_no_flexvol_pool(self): self.mock_object(self.library, 'is_flexvol_pool_configured', mock.Mock(return_value=False)) result = self.library._find_matching_aggregates() self.assertListEqual([], result) def test__set_network_with_metadata(self): net_info_1 = copy.deepcopy(fake.NETWORK_INFO) net_info_2 = copy.deepcopy(fake.NETWORK_INFO) net_info_2['subnet_metadata'] = {'fake_key': 'fake_value'} net_info_3 = copy.deepcopy(fake.NETWORK_INFO) metadata_vlan = 1 net_info_3['subnet_metadata'] = { 'set_vlan': metadata_vlan, 'set_mtu': '1' } net_info_4 = copy.deepcopy(fake.NETWORK_INFO) metadata_vlan = 1 net_info_4['subnet_metadata'] = { 'set_vlan': metadata_vlan } net_list = [net_info_1, net_info_2, net_info_3, net_info_4] self.library._set_network_with_metadata(net_list) net_info = copy.deepcopy(fake.NETWORK_INFO) self.assertEqual(net_info, net_list[0]) net_info['subnet_metadata'] = {'fake_key': 'fake_value'} self.assertEqual(net_info, net_list[1]) self.assertEqual(metadata_vlan, net_list[2]['segmentation_id']) for allocation in net_list[2]['network_allocations']: self.assertEqual(metadata_vlan, allocation['segmentation_id']) self.assertEqual(1, allocation['mtu']) self.assertEqual(metadata_vlan, net_list[3]['segmentation_id']) for allocation in net_list[3]['network_allocations']: self.assertEqual(metadata_vlan, allocation['segmentation_id']) self.assertEqual(fake.MTU, allocation['mtu']) @ddt.data({'set_vlan': '0', 'set_mtu': '1500'}, {'set_vlan': '1000', 'set_mtu': '1bla'}) def test__set_network_with_metadata_exception(self, metadata): net_info = copy.deepcopy(fake.NETWORK_INFO) net_info['subnet_metadata'] = metadata self.assertRaises( exception.NetworkBadConfigurationException, self.library._set_network_with_metadata, [net_info]) @ddt.data({'nfs_config_support': False, 'with_encryption': True}, {'nfs_config_support': True, 'nfs_config': fake.NFS_CONFIG_UDP_MAX, 'with_encryption': False}, {'nfs_config_support': True, 'nfs_config': fake.NFS_CONFIG_DEFAULT, 'with_encryption': True}) @ddt.unpack def test_setup_server(self, nfs_config_support, nfs_config=None, with_encryption=False): mock_get_vserver_name = self.mock_object( self.library, '_get_vserver_name', mock.Mock(return_value=fake.VSERVER1)) mock_create_vserver = self.mock_object(self.library, '_create_vserver') mock_validate_network_type = self.mock_object( self.library, '_validate_network_type') mock_validate_share_network_subnets = self.mock_object( self.library, '_validate_share_network_subnets') self.library.is_nfs_config_supported = nfs_config_support mock_get_extra_spec = self.mock_object( share_types, "get_share_type_extra_specs", mock.Mock(return_value=fake.EXTRA_SPEC)) mock_check_extra_spec = self.mock_object( self.library, '_check_nfs_config_extra_specs_validity', mock.Mock()) self.library.configuration.netapp_restrict_lif_creation_per_ha_pair = ( True ) check_lif_limit = self.mock_object( self.library, '_check_data_lif_count_limit_reached_for_ha_pair', ) mock_get_nfs_config = self.mock_object( self.library, "_get_nfs_config_provisioning_options", mock.Mock(return_value=nfs_config)) mock_set_with_meta = self.mock_object( self.library, '_set_network_with_metadata') mock_barbican_kms_config = self.mock_object( self.library, '_create_barbican_kms_config_for_specified_vserver') fake_server_metadata = ( fake.SERVER_METADATA_WITH_ENCRYPTION if with_encryption else fake.SERVER_METADATA) result = self.library.setup_server(fake.NETWORK_INFO_LIST, fake_server_metadata) ports = {} for network_allocation in fake.NETWORK_INFO['network_allocations']: ports[network_allocation['id']] = network_allocation['ip_address'] mock_set_with_meta.assert_called_once_with(fake.NETWORK_INFO_LIST) self.assertTrue(mock_validate_network_type.called) self.assertTrue(mock_validate_share_network_subnets.called) self.assertTrue(mock_get_vserver_name.called) self.assertTrue(mock_create_vserver.called) self.assertTrue(check_lif_limit.called) if nfs_config_support: mock_get_extra_spec.assert_called_once_with( fake_server_metadata['share_type_id']) mock_check_extra_spec.assert_called_once_with( fake.EXTRA_SPEC) mock_get_nfs_config.assert_called_once_with( fake.EXTRA_SPEC) else: mock_get_extra_spec.assert_not_called() mock_check_extra_spec.assert_not_called() mock_get_nfs_config.assert_not_called() expected = { 'vserver_name': fake.VSERVER1, 'ports': jsonutils.dumps(ports), } if nfs_config_support: expected.update({'nfs_config': jsonutils.dumps(nfs_config)}) self.assertDictEqual(expected, result) if with_encryption: mock_barbican_kms_config.assert_called_once_with( fake.VSERVER1, fake_server_metadata) else: mock_barbican_kms_config.assert_not_called() def test_setup_server_with_error(self): self.library.is_nfs_config_supported = False mock_get_vserver_name = self.mock_object( self.library, '_get_vserver_name', mock.Mock(return_value=fake.VSERVER1)) fake_exception = exception.ManilaException("fake") mock_create_vserver = self.mock_object( self.library, '_create_vserver', mock.Mock(side_effect=fake_exception)) mock_validate_network_type = self.mock_object( self.library, '_validate_network_type') mock_validate_share_network_subnets = self.mock_object( self.library, '_validate_share_network_subnets') self.mock_object(self.library, '_set_network_with_metadata') self.assertRaises( exception.ManilaException, self.library.setup_server, fake.NETWORK_INFO_LIST, fake.SERVER_METADATA) ports = {} for network_allocation in fake.NETWORK_INFO['network_allocations']: ports[network_allocation['id']] = network_allocation['ip_address'] self.assertTrue(mock_validate_network_type.called) self.assertTrue(mock_validate_share_network_subnets.called) self.assertTrue(mock_get_vserver_name.called) self.assertTrue(mock_create_vserver.called) self.assertDictEqual( {'server_details': { 'vserver_name': fake.VSERVER1, 'ports': jsonutils.dumps(ports), }}, fake_exception.detail_data) def test_setup_server_invalid_subnet(self): invalid_subnet_exception = exception.NetworkBadConfigurationException( reason='This is a fake message') self.mock_object(self.library, '_get_vserver_name', mock.Mock(return_value=fake.VSERVER1)) self.mock_object(self.library, '_validate_network_type') self.mock_object(self.library, '_validate_share_network_subnets', mock.Mock(side_effect=invalid_subnet_exception)) self.mock_object(self.library, '_set_network_with_metadata') self.assertRaises( exception.NetworkBadConfigurationException, self.library.setup_server, fake.NETWORK_INFO_LIST) self.library._validate_share_network_subnets.assert_called_once_with( fake.NETWORK_INFO_LIST) def test_validate_share_network_subnets(self): fake_vlan = fake.NETWORK_INFO['segmentation_id'] network_info_different_seg_id = copy.deepcopy(fake.NETWORK_INFO) network_info_different_seg_id['segmentation_id'] = fake_vlan allocations = network_info_different_seg_id['network_allocations'] allocations[0]['segmentation_id'] = fake_vlan fake.NETWORK_INFO_LIST.append(network_info_different_seg_id) result = self.library._validate_share_network_subnets( fake.NETWORK_INFO_LIST) self.assertIsNone(result) def test_validate_share_network_subnets_invalid_vlan_config(self): network_info_different_seg_id = copy.deepcopy(fake.NETWORK_INFO) network_info_different_seg_id['segmentation_id'] = 4004 allocations = network_info_different_seg_id['network_allocations'] allocations[0]['segmentation_id'] = 4004 fake.NETWORK_INFO_LIST.append(network_info_different_seg_id) self.assertRaises( exception.NetworkBadConfigurationException, self.library._validate_share_network_subnets, fake.NETWORK_INFO_LIST) @ddt.data( {'network_info': [{'network_type': 'vlan', 'segmentation_id': 1000}]}, {'network_info': [{'network_type': None, 'segmentation_id': None}]}, {'network_info': [{'network_type': 'flat', 'segmentation_id': None}]}) @ddt.unpack def test_validate_network_type_with_valid_network_types(self, network_info): result = self.library._validate_network_type(network_info) self.assertIsNone(result) @ddt.data( {'network_info': [{'network_type': 'vxlan', 'segmentation_id': 1000}]}, {'network_info': [{'network_type': 'gre', 'segmentation_id': 100}]}) @ddt.unpack def test_validate_network_type_with_invalid_network_types(self, network_info): self.assertRaises(exception.NetworkBadConfigurationException, self.library._validate_network_type, network_info) def test_get_vserver_name(self): vserver_id = fake.NETWORK_INFO['server_id'] vserver_name = fake.VSERVER_NAME_TEMPLATE % vserver_id actual_result = self.library._get_vserver_name(vserver_id) self.assertEqual(vserver_name, actual_result) @ddt.data({'existing_ipspace': None, 'nfs_config': fake.NFS_CONFIG_TCP_UDP_MAX}, {'existing_ipspace': fake.IPSPACE, 'nfs_config': None}) @ddt.unpack def test_create_vserver(self, existing_ipspace, nfs_config): versions = ['fake_v1', 'fake_v2'] self.library.configuration.netapp_enabled_share_protocols = versions self.library.configuration.netapp_cifs_aes_encryption = False vserver_id = fake.NETWORK_INFO['server_id'] vserver_name = fake.VSERVER_NAME_TEMPLATE % vserver_id fake_lif_home_ports = {fake.CLUSTER_NODES[0]: 'fake_port', fake.CLUSTER_NODES[1]: 'another_fake_port'} vserver_client = mock.Mock() self.mock_object(self.library._client, 'list_cluster_nodes', mock.Mock(return_value=fake.CLUSTER_NODES)) self.mock_object(self.library, '_get_node_data_port', mock.Mock(return_value='fake_port')) self.mock_object(context, 'get_admin_context', mock.Mock(return_value='fake_admin_context')) self.mock_object(self.library, '_get_api_client', mock.Mock(return_value=vserver_client)) self.mock_object(self.library._client, 'vserver_exists', mock.Mock(return_value=False)) self.mock_object(self.library, '_find_matching_aggregates', mock.Mock(return_value=fake.AGGREGATES)) self.mock_object(self.library, '_get_flexgroup_aggr_set', mock.Mock(return_value=fake.AGGREGATES)) self.mock_object(self.library, '_create_ipspace', mock.Mock(return_value=fake.IPSPACE)) self.mock_object(self.library, '_create_vserver_lifs') self.mock_object(self.library, '_create_vserver_routes') self.mock_object(self.library, '_create_vserver_admin_lif') self.mock_object(self.library._client, 'create_port_and_broadcast_domain', mock.Mock(side_effect=['fake_port', 'another_fake_port'])) get_ipspace_name_for_vlan_port = self.mock_object( self.library._client, 'get_ipspace_name_for_vlan_port', mock.Mock(return_value=existing_ipspace)) self.library._create_vserver(vserver_name, fake.NETWORK_INFO_LIST, fake.NFS_CONFIG_TCP_UDP_MAX, nfs_config=nfs_config) get_ipspace_name_for_vlan_port.assert_called_once_with( fake.CLUSTER_NODES[0], 'fake_port', fake.NETWORK_INFO['segmentation_id']) if not existing_ipspace: self.library._create_ipspace.assert_called_once_with( fake.NETWORK_INFO) self.library._client.create_vserver.assert_called_once_with( vserver_name, fake.ROOT_VOLUME_AGGREGATE, fake.ROOT_VOLUME, set(fake.AGGREGATES), fake.IPSPACE, fake.SECURITY_CERT_DEFAULT_EXPIRE_DAYS, fake.DELETE_RETENTION_HOURS, False) self.library._get_api_client.assert_called_once_with( vserver=vserver_name) self.library._create_vserver_lifs.assert_called_once_with( vserver_name, vserver_client, fake.NETWORK_INFO, fake.IPSPACE, lif_home_ports=fake_lif_home_ports) self.library._create_vserver_routes.assert_called_once_with( vserver_client, fake.NETWORK_INFO) self.library._create_vserver_admin_lif.assert_called_once_with( vserver_name, vserver_client, fake.NETWORK_INFO, fake.IPSPACE, lif_home_ports=fake_lif_home_ports) vserver_client.enable_nfs.assert_called_once_with( versions, nfs_config=nfs_config) self.library._client.setup_security_services.assert_called_once_with( fake.NETWORK_INFO['security_services'], vserver_client, vserver_name, False) self.library._get_flexgroup_aggr_set.assert_called_once_with() @ddt.data(None, fake.IPSPACE) def test_create_vserver_dp_destination(self, existing_ipspace): versions = ['fake_v1', 'fake_v2'] self.library.configuration.netapp_enabled_share_protocols = versions vserver_id = fake.NETWORK_INFO['server_id'] vserver_name = fake.VSERVER_NAME_TEMPLATE % vserver_id self.mock_object(self.library._client, 'vserver_exists', mock.Mock(return_value=False)) self.mock_object(self.library._client, 'list_cluster_nodes', mock.Mock(return_value=fake.CLUSTER_NODES)) self.mock_object(self.library, '_get_node_data_port', mock.Mock(return_value='fake_port')) self.mock_object(context, 'get_admin_context', mock.Mock(return_value='fake_admin_context')) self.mock_object(self.library, '_find_matching_aggregates', mock.Mock(return_value=fake.AGGREGATES)) self.mock_object(self.library, '_get_flexgroup_aggr_set', mock.Mock(return_value=fake.AGGREGATES)) self.mock_object(self.library, '_create_ipspace', mock.Mock(return_value=fake.IPSPACE)) get_ipspace_name_for_vlan_port = self.mock_object( self.library._client, 'get_ipspace_name_for_vlan_port', mock.Mock(return_value=existing_ipspace)) self.mock_object(self.library, '_create_port_and_broadcast_domain') self.library._create_vserver(vserver_name, fake.NETWORK_INFO_LIST, metadata={'migration_destination': True}) get_ipspace_name_for_vlan_port.assert_called_once_with( fake.CLUSTER_NODES[0], 'fake_port', fake.NETWORK_INFO['segmentation_id']) if not existing_ipspace: self.library._create_ipspace.assert_called_once_with( fake.NETWORK_INFO) create_server_mock = self.library._client.create_vserver_dp_destination create_server_mock.assert_called_once_with( vserver_name, fake.AGGREGATES, fake.IPSPACE, fake.DELETE_RETENTION_HOURS, False) self.library._create_port_and_broadcast_domain.assert_called_once_with( fake.IPSPACE, fake.NETWORK_INFO) self.library._get_flexgroup_aggr_set.assert_not_called() def test_create_vserver_already_present(self): vserver_id = fake.NETWORK_INFO['server_id'] vserver_name = fake.VSERVER_NAME_TEMPLATE % vserver_id self.mock_object(context, 'get_admin_context', mock.Mock(return_value='fake_admin_context')) self.mock_object(self.library._client, 'vserver_exists', mock.Mock(return_value=True)) self.assertRaises(exception.NetAppException, self.library._create_vserver, vserver_name, fake.NETWORK_INFO, fake.NFS_CONFIG_TCP_UDP_MAX) @ddt.data( {'network_exception': netapp_api.NaApiError, 'existing_ipspace': fake.IPSPACE}, {'network_exception': netapp_api.NaApiError, 'existing_ipspace': None}, {'network_exception': exception.NetAppException, 'existing_ipspace': None}, {'network_exception': exception.NetAppException, 'existing_ipspace': fake.IPSPACE}) @ddt.unpack def test_create_vserver_lif_creation_failure(self, network_exception, existing_ipspace): vserver_id = fake.NETWORK_INFO['server_id'] vserver_name = fake.VSERVER_NAME_TEMPLATE % vserver_id vserver_client = mock.Mock() security_service = fake.NETWORK_INFO['security_services'] self.mock_object(self.library._client, 'list_cluster_nodes', mock.Mock(return_value=fake.CLUSTER_NODES)) self.mock_object(self.library, '_get_node_data_port', mock.Mock(return_value='fake_port')) self.mock_object(context, 'get_admin_context', mock.Mock(return_value='fake_admin_context')) self.mock_object(self.library, '_get_api_client', mock.Mock(return_value=vserver_client)) self.mock_object(self.library._client, 'vserver_exists', mock.Mock(return_value=False)) self.mock_object(self.library, '_find_matching_aggregates', mock.Mock(return_value=fake.AGGREGATES)) self.mock_object(self.library, '_get_flexgroup_aggr_set', mock.Mock(return_value=fake.AGGREGATES)) self.mock_object(self.library._client, 'get_ipspace_name_for_vlan_port', mock.Mock(return_value=existing_ipspace)) self.mock_object(self.library, '_create_ipspace', mock.Mock(return_value=fake.IPSPACE)) self.mock_object(self.library, '_setup_network_for_vserver', mock.Mock(side_effect=network_exception)) self.mock_object(self.library, '_delete_vserver') self.assertRaises(network_exception, self.library._create_vserver, vserver_name, fake.NETWORK_INFO_LIST, fake.NFS_CONFIG_TCP_UDP_MAX) self.library._get_api_client.assert_called_with(vserver=vserver_name) self.assertTrue(self.library._client.create_vserver.called) self.library._setup_network_for_vserver.assert_called_with( vserver_name, vserver_client, fake.NETWORK_INFO_LIST, fake.IPSPACE, security_services=security_service, nfs_config=None) self.library._delete_vserver.assert_called_once_with( vserver_name, needs_lock=False, security_services=security_service) self.assertFalse(vserver_client.enable_nfs.called) self.assertEqual(1, lib_multi_svm.LOG.error.call_count) def test__create_barbican_kms_config_for_specified_vserver(self): vserver_id = fake.NETWORK_INFO['server_id'] vserver_name = fake.VSERVER_NAME_TEMPLATE % vserver_id fake_config_uuid = "fake_uuid" self.mock_object( share_utils, 'extract_host', mock.Mock( return_value=fake.SERVER_METADATA_WITH_ENCRYPTION[ 'request_host'])) self.mock_object(data_motion, 'get_client_for_backend', mock.Mock(return_value=self.fake_client)) self.mock_object(self.fake_client, 'create_barbican_kms_config_for_specified_vserver') self.mock_object(self.fake_client, 'get_key_store_config_uuid', mock.Mock(return_value=fake_config_uuid)) self.mock_object(self.fake_client, 'enable_key_store_config') self.library._create_barbican_kms_config_for_specified_vserver( vserver_name, fake.SERVER_METADATA_WITH_ENCRYPTION) share_utils.extract_host.assert_called_once_with( fake.SERVER_METADATA_WITH_ENCRYPTION['request_host'], level='backend_name') data_motion.get_client_for_backend.assert_called_once_with( fake.SERVER_METADATA_WITH_ENCRYPTION['request_host'], vserver_name=None, force_rest_client=True) self.fake_client.create_barbican_kms_config_for_specified_vserver.\ assert_called_once_with( vserver_name, mock.ANY, fake.SERVER_METADATA_WITH_ENCRYPTION['encryption_key_ref'], fake.SERVER_METADATA_WITH_ENCRYPTION['keystone_url'], fake.SERVER_METADATA_WITH_ENCRYPTION ['application_credential_id'], fake.SERVER_METADATA_WITH_ENCRYPTION ['application_credential_secret']) self.fake_client.enable_key_store_config(fake_config_uuid) def test_get_valid_ipspace_name(self): result = self.library._get_valid_ipspace_name(fake.IPSPACE_ID) expected = 'ipspace_' + fake.IPSPACE_ID.replace('-', '_') self.assertEqual(expected, result) def test_create_ipspace_not_supported(self): self.library._client.features.IPSPACES = False result = self.library._create_ipspace(fake.NETWORK_INFO) self.assertIsNone(result) @ddt.data(None, 'flat') def test_create_ipspace_not_vlan(self, network_type): self.library._client.features.IPSPACES = True network_info = copy.deepcopy(fake.NETWORK_INFO) network_info['network_allocations'][0]['segmentation_id'] = None network_info['network_allocations'][0]['network_type'] = network_type result = self.library._create_ipspace(network_info) self.assertEqual('Default', result) def test_create_ipspace(self): self.library._client.features.IPSPACES = True self.mock_object(self.library._client, 'create_ipspace', mock.Mock(return_value=False)) result = self.library._create_ipspace(fake.NETWORK_INFO) expected = self.library._get_valid_ipspace_name( fake.NETWORK_INFO['neutron_net_id']) self.assertEqual(expected, result) self.library._client.create_ipspace.assert_called_once_with(expected) def test_create_vserver_lifs(self): self.mock_object(self.library._client, 'list_cluster_nodes', mock.Mock(return_value=fake.CLUSTER_NODES)) self.mock_object(self.library, '_get_lif_name', mock.Mock(side_effect=['fake_lif1', 'fake_lif2'])) self.mock_object(self.library, '_create_lif') self.library._create_vserver_lifs(fake.VSERVER1, 'fake_vserver_client', fake.NETWORK_INFO, fake.IPSPACE) self.library._create_lif.assert_has_calls([ mock.call('fake_vserver_client', fake.VSERVER1, fake.IPSPACE, fake.CLUSTER_NODES[0], 'fake_lif1', fake.NETWORK_INFO['network_allocations'][0], lif_home_port=None), mock.call('fake_vserver_client', fake.VSERVER1, fake.IPSPACE, fake.CLUSTER_NODES[1], 'fake_lif2', fake.NETWORK_INFO['network_allocations'][1], lif_home_port=None)]) def test_create_vserver_lifs_pre_configured_home_ports(self): self.mock_object(self.library._client, 'list_cluster_nodes', mock.Mock(return_value=fake.CLUSTER_NODES)) self.mock_object(self.library, '_get_lif_name', mock.Mock(side_effect=['fake_lif1', 'fake_lif2'])) self.mock_object(self.library, '_create_lif') lif_home_ports = { fake.CLUSTER_NODES[0]: 'fake_port1', fake.CLUSTER_NODES[1]: 'fake_port2' } self.library._create_vserver_lifs(fake.VSERVER1, 'fake_vserver_client', fake.NETWORK_INFO, fake.IPSPACE, lif_home_ports=lif_home_ports) self.library._create_lif.assert_has_calls([ mock.call('fake_vserver_client', fake.VSERVER1, fake.IPSPACE, fake.CLUSTER_NODES[0], 'fake_lif1', fake.NETWORK_INFO['network_allocations'][0], lif_home_port=lif_home_ports[fake.CLUSTER_NODES[0]]), mock.call('fake_vserver_client', fake.VSERVER1, fake.IPSPACE, fake.CLUSTER_NODES[1], 'fake_lif2', fake.NETWORK_INFO['network_allocations'][1], lif_home_port=lif_home_ports[fake.CLUSTER_NODES[1]])]) def test_create_vserver_admin_lif(self): self.mock_object(self.library._client, 'list_cluster_nodes', mock.Mock(return_value=fake.CLUSTER_NODES)) self.mock_object(self.library, '_get_lif_name', mock.Mock(return_value='fake_admin_lif')) self.mock_object(self.library, '_create_lif') self.library._create_vserver_admin_lif(fake.VSERVER1, 'fake_vserver_client', fake.NETWORK_INFO, fake.IPSPACE) self.library._create_lif.assert_has_calls([ mock.call('fake_vserver_client', fake.VSERVER1, fake.IPSPACE, fake.CLUSTER_NODES[0], 'fake_admin_lif', fake.NETWORK_INFO['admin_network_allocations'][0], lif_home_port=None)]) def test_create_vserver_admin_lif_no_admin_network(self): fake_network_info = copy.deepcopy(fake.NETWORK_INFO) fake_network_info['admin_network_allocations'] = [] self.mock_object(self.library._client, 'list_cluster_nodes', mock.Mock(return_value=fake.CLUSTER_NODES)) self.mock_object(self.library, '_get_lif_name', mock.Mock(return_value='fake_admin_lif')) self.mock_object(self.library, '_create_lif') self.library._create_vserver_admin_lif(fake.VSERVER1, 'fake_vserver_client', fake_network_info, fake.IPSPACE) self.assertFalse(self.library._create_lif.called) @ddt.data( fake.get_network_info(fake.USER_NETWORK_ALLOCATIONS, fake.ADMIN_NETWORK_ALLOCATIONS), fake.get_network_info(fake.USER_NETWORK_ALLOCATIONS_IPV6, fake.ADMIN_NETWORK_ALLOCATIONS)) def test_create_vserver_routes(self, network_info): expected_gateway = network_info['network_allocations'][0]['gateway'] vserver_client = mock.Mock() self.mock_object(vserver_client, 'create_route') retval = self.library._create_vserver_routes( vserver_client, network_info) self.assertIsNone(retval) vserver_client.create_route.assert_called_once_with(expected_gateway) def test_get_node_data_port(self): self.mock_object(self.client, 'list_node_data_ports', mock.Mock(return_value=fake.NODE_DATA_PORTS)) self.library.configuration.netapp_port_name_search_pattern = 'e0c' result = self.library._get_node_data_port(fake.CLUSTER_NODE) self.assertEqual('e0c', result) self.library._client.list_node_data_ports.assert_has_calls([ mock.call(fake.CLUSTER_NODE)]) def test_get_node_data_port_no_match(self): self.mock_object(self.client, 'list_node_data_ports', mock.Mock(return_value=fake.NODE_DATA_PORTS)) self.library.configuration.netapp_port_name_search_pattern = 'ifgroup1' self.assertRaises(exception.NetAppException, self.library._get_node_data_port, fake.CLUSTER_NODE) def test_get_lif_name(self): result = self.library._get_lif_name( 'fake_node', fake.NETWORK_INFO['network_allocations'][0]) self.assertEqual('os_132dbb10-9a36-46f2-8d89-3d909830c356', result) @ddt.data(fake.MTU, None, 'not-present') def test_create_lif(self, mtu): """Tests cases where MTU is a valid value, None or not present.""" expected_mtu = (mtu if mtu not in (None, 'not-present') else fake.DEFAULT_MTU) network_allocations = copy.deepcopy( fake.NETWORK_INFO['network_allocations'][0]) network_allocations['mtu'] = mtu if mtu == 'not-present': network_allocations.pop('mtu') vserver_client = mock.Mock() self.mock_object(self.library, '_get_node_data_port', mock.Mock(return_value='fake_port')) vserver_client.network_interface_exists = mock.Mock( return_value=False) self.library._client.create_port_and_broadcast_domain = ( mock.Mock(return_value='fake_port')) self.library._create_lif(vserver_client, 'fake_vserver', 'fake_ipspace', 'fake_node', 'fake_lif', network_allocations, lif_home_port=None) self.library._get_node_data_port.assert_called_once_with( 'fake_node') (vserver_client.network_interface_exists .assert_called_once_with('fake_vserver', 'fake_node', 'fake_port', '10.10.10.10', '255.255.255.0', '1000', home_port=None)) (self.library._client.create_port_and_broadcast_domain .assert_called_once_with('fake_node', 'fake_port', '1000', expected_mtu, 'fake_ipspace')) (self.library._client.create_network_interface .assert_called_once_with('10.10.10.10', '255.255.255.0', 'fake_node', 'fake_port', 'fake_vserver', 'fake_lif')) def test_create_lif_existent_home_port(self): """Tests case where a existent port is passed to the function""" network_allocations = copy.deepcopy( fake.NETWORK_INFO['network_allocations'][0]) vserver_client = mock.Mock() mock_get_node_data_port = self.mock_object( self.library, '_get_node_data_port') vserver_client.network_interface_exists = mock.Mock( return_value=False) mock_create_port_and_broadcast_domain = ( self.library._client.create_port_and_broadcast_domain) self.library._create_lif(vserver_client, 'fake_vserver', 'fake_ipspace', 'fake_node', 'fake_lif', network_allocations, lif_home_port='fake_port_from_param') (vserver_client.network_interface_exists .assert_called_once_with('fake_vserver', 'fake_node', 'fake_port_from_param', '10.10.10.10', '255.255.255.0', '1000', home_port='fake_port_from_param')) mock_get_node_data_port.assert_not_called() mock_create_port_and_broadcast_domain.assert_not_called() (self.library._client.create_network_interface .assert_called_once_with('10.10.10.10', '255.255.255.0', 'fake_node', 'fake_port_from_param', 'fake_vserver', 'fake_lif')) def test_create_lif_if_nonexistent_already_present(self): vserver_client = mock.Mock() vserver_client.network_interface_exists = mock.Mock( return_value=True) self.mock_object(self.library, '_get_node_data_port', mock.Mock(return_value='fake_port')) self.library._create_lif(vserver_client, 'fake_vserver', fake.IPSPACE, 'fake_node', 'fake_lif', fake.NETWORK_INFO['network_allocations'][0], lif_home_port=None) self.assertFalse(self.library._client.create_network_interface.called) def test_get_network_allocations_number(self): self.library._client.list_cluster_nodes.return_value = ( fake.CLUSTER_NODES) result = self.library.get_network_allocations_number() self.assertEqual(len(fake.CLUSTER_NODES), result) def test_get_admin_network_allocations_number(self): result = self.library.get_admin_network_allocations_number( 'fake_admin_network_api') self.assertEqual(1, result) def test_get_admin_network_allocations_number_no_admin_network(self): result = self.library.get_admin_network_allocations_number(None) self.assertEqual(0, result) def test_teardown_server(self): self.library._client.vserver_exists.return_value = True mock_delete_vserver = self.mock_object(self.library, '_delete_vserver') self.library.teardown_server( fake.SHARE_SERVER['backend_details'], security_services=fake.NETWORK_INFO['security_services']) self.library._client.vserver_exists.assert_called_once_with( fake.VSERVER1) mock_delete_vserver.assert_called_once_with( fake.VSERVER1, security_services=fake.NETWORK_INFO['security_services']) @ddt.data(None, {}, {'vserver_name': None}) def test_teardown_server_no_share_server(self, server_details): mock_delete_vserver = self.mock_object(self.library, '_delete_vserver') self.library.teardown_server(server_details) self.assertFalse(mock_delete_vserver.called) self.assertTrue(lib_multi_svm.LOG.warning.called) def test_teardown_server_no_vserver(self): self.library._client.vserver_exists.return_value = False mock_delete_vserver = self.mock_object(self.library, '_delete_vserver') self.library.teardown_server( fake.SHARE_SERVER['backend_details'], security_services=fake.NETWORK_INFO['security_services']) self.library._client.vserver_exists.assert_called_once_with( fake.VSERVER1) self.assertFalse(mock_delete_vserver.called) self.assertTrue(lib_multi_svm.LOG.warning.called) @ddt.data(True, False) def test_delete_vserver_no_ipspace(self, lock): self.mock_object(self.library._client, 'get_ipspaces', mock.Mock(return_value=[])) vserver_client = mock.Mock() self.mock_object(self.library, '_get_api_client', mock.Mock(return_value=vserver_client)) self.mock_object(self.library._client, 'get_snapmirror_policies', mock.Mock(return_value=[])) net_interfaces = copy.deepcopy(c_fake.NETWORK_INTERFACES_MULTIPLE) self.mock_object(vserver_client, 'get_network_interfaces', mock.Mock(return_value=net_interfaces)) security_services = fake.NETWORK_INFO['security_services'] self.mock_object(self.library, '_delete_vserver_peers') self.mock_object(self.library, '_delete_port_vlans') self.library._delete_vserver(fake.VSERVER1, security_services=security_services, needs_lock=lock) self.library._client.get_ipspaces.assert_called_once_with( vserver_name=fake.VSERVER1) self.library._delete_vserver_peers.assert_called_once_with( fake.VSERVER1) self.library._client.delete_vserver.assert_called_once_with( fake.VSERVER1, vserver_client, security_services=security_services) self.library._client.delete_ipspace.assert_not_called() self.library._delete_port_vlans.assert_not_called() self.library._client.delete_vlan.assert_not_called() @ddt.data(True, False) def test_delete_vserver_ipspace_has_data_vservers(self, lock): self.library._client.features.IPSPACES = True self.mock_object(self.library._client, 'get_ipspaces', mock.Mock(return_value=c_fake.IPSPACES)) vserver_client = mock.Mock() self.mock_object(self.library, '_get_api_client', mock.Mock(return_value=vserver_client)) self.mock_object(self.library._client, 'ipspace_has_data_vservers', mock.Mock(return_value=True)) self.mock_object(self.library._client, 'get_snapmirror_policies', mock.Mock(return_value=[])) self.mock_object(self.library, '_delete_vserver_peers') self.mock_object( vserver_client, 'get_network_interfaces', mock.Mock(return_value=c_fake.NETWORK_INTERFACES)) self.mock_object(self.library._client, 'delete_ipspace', mock.Mock(return_value=False)) self.mock_object(self.library._client, 'get_degraded_ports', mock.Mock(return_value=[])) self.mock_object(self.library, '_delete_port_vlans') security_services = fake.NETWORK_INFO['security_services'] self.library._delete_vserver(fake.VSERVER1, security_services=security_services, needs_lock=lock) self.library._client.get_ipspaces.assert_called_once_with( vserver_name=fake.VSERVER1) self.library._client.delete_vserver.assert_called_once_with( fake.VSERVER1, vserver_client, security_services=security_services) self.library._delete_vserver_peers.assert_called_once_with( fake.VSERVER1) self.library._client.delete_ipspace.assert_called_once_with( c_fake.IPSPACE_NAME ) self.library._client.get_degraded_ports.assert_called_once_with( [c_fake.BROADCAST_DOMAIN], c_fake.IPSPACE_NAME ) # not NETWORK_QUALIFIED_PORTS2, which would be given by get_ipspaces() self.library._delete_port_vlans.assert_called_once_with( self.library._client, set(c_fake.NETWORK_QUALIFIED_PORTS)) @ddt.data([], c_fake.NETWORK_INTERFACES) def test_delete_vserver_with_ipspace(self, interfaces): self.mock_object(self.library._client, 'get_ipspaces', mock.Mock(return_value=c_fake.IPSPACES)) vserver_client = mock.Mock() self.mock_object(self.library, '_get_api_client', mock.Mock(return_value=vserver_client)) self.mock_object(self.library._client, 'ipspace_has_data_vservers', mock.Mock(return_value=False)) self.mock_object(self.library, '_delete_vserver_peers') self.mock_object(vserver_client, 'get_network_interfaces', mock.Mock(return_value=interfaces)) self.mock_object(self.library._client, 'get_snapmirror_policies', mock.Mock(return_value=['fake_policy'])) security_services = fake.NETWORK_INFO['security_services'] self.library._delete_vserver(fake.VSERVER1, security_services=security_services) vserver_client.delete_snapmirror_policy.assert_called_once_with( 'fake_policy') self.library._delete_vserver_peers.assert_called_once_with( fake.VSERVER1 ) self.library._client.get_ipspaces.assert_called_once_with( vserver_name=fake.VSERVER1) self.library._client.delete_vserver.assert_called_once_with( fake.VSERVER1, vserver_client, security_services=security_services) self.library._client.delete_ipspace.assert_called_once_with( c_fake.IPSPACE_NAME) self.library._client.get_degraded_ports.assert_not_called() self.library._client.delete_vlan.assert_called_once_with( c_fake.NODE_NAME2, c_fake.PORT, c_fake.VLAN) def test__delete_vserver_peers(self): self.mock_object(self.library, '_get_vserver_peers', mock.Mock(return_value=fake.VSERVER_PEER)) self.mock_object(self.library, '_delete_vserver_peer') self.library._delete_vserver_peers(fake.VSERVER1) self.library._get_vserver_peers.assert_called_once_with( vserver=fake.VSERVER1 ) self.library._delete_vserver_peer.assert_called_once_with( fake.VSERVER_PEER[0]['vserver'], fake.VSERVER_PEER[0]['peer-vserver'] ) def test_delete_port_vlans(self): self.library._delete_port_vlans(self.library._client, c_fake.NETWORK_QUALIFIED_PORTS) for port_name in c_fake.NETWORK_QUALIFIED_PORTS: node, port = port_name.split(':') port, vlan = port.split('-') self.library._client.delete_vlan.assert_called_once_with( node, port, vlan) def test_delete_port_vlans_client_error(self): mock_exception_log = self.mock_object(lib_multi_svm.LOG, 'exception') self.mock_object( self.library._client, 'delete_vlan', mock.Mock(side_effect=exception.NetAppException("fake error"))) self.library._delete_port_vlans(self.library._client, c_fake.NETWORK_QUALIFIED_PORTS) for port_name in c_fake.NETWORK_QUALIFIED_PORTS: node, port = port_name.split(':') port, vlan = port.split('-') self.library._client.delete_vlan.assert_called_once_with( node, port, vlan) self.assertEqual(1, mock_exception_log.call_count) @ddt.data([], [{'vserver': c_fake.VSERVER_NAME, 'peer-vserver': c_fake.VSERVER_PEER_NAME, 'applications': [ {'vserver-peer-application': 'snapmirror'}] }]) def test_create_replica(self, vserver_peers): fake_cluster_name = 'fake_cluster' self.mock_object(self.library, '_get_vservers_from_replicas', mock.Mock(return_value=(self.fake_vserver, self.fake_new_vserver_name))) self.mock_object(self.library, 'find_active_replica', mock.Mock(return_value=self.fake_replica)) self.mock_object(share_utils, 'extract_host', mock.Mock(side_effect=[self.fake_new_replica_host, self.fake_replica_host])) self.mock_object(data_motion, 'get_client_for_backend', mock.Mock(side_effect=[self.fake_new_client, self.fake_client])) self.mock_object(self.library, '_get_vserver_peers', mock.Mock(return_value=vserver_peers)) self.mock_object(self.fake_new_client, 'get_cluster_name', mock.Mock(return_value=fake_cluster_name)) self.mock_object(self.fake_client, 'create_vserver_peer') self.mock_object(self.fake_new_client, 'accept_vserver_peer') lib_base_model_update = { 'export_locations': [], 'replica_state': constants.REPLICA_STATE_OUT_OF_SYNC, 'access_rules_status': constants.STATUS_ACTIVE, } self.mock_object(lib_base.NetAppCmodeFileStorageLibrary, 'create_replica', mock.Mock(return_value=lib_base_model_update)) model_update = self.library.create_replica( None, [self.fake_replica], self.fake_new_replica, [], [], share_server=None) self.assertDictEqual(lib_base_model_update, model_update) self.library._get_vservers_from_replicas.assert_called_once_with( None, [self.fake_replica], self.fake_new_replica ) self.library.find_active_replica.assert_called_once_with( [self.fake_replica] ) self.assertEqual(2, share_utils.extract_host.call_count) self.assertEqual(2, data_motion.get_client_for_backend.call_count) self.library._get_vserver_peers.assert_called_once_with( self.fake_new_vserver_name, self.fake_vserver ) self.fake_new_client.get_cluster_name.assert_called_once_with() if not vserver_peers: self.fake_client.create_vserver_peer.assert_called_once_with( self.fake_new_vserver_name, self.fake_vserver, peer_cluster_name=fake_cluster_name ) self.fake_new_client.accept_vserver_peer.assert_called_once_with( self.fake_vserver, self.fake_new_vserver_name ) base_class = lib_base.NetAppCmodeFileStorageLibrary base_class.create_replica.assert_called_once_with( None, [self.fake_replica], self.fake_new_replica, [], [] ) def test_delete_replica(self): base_class = lib_base.NetAppCmodeFileStorageLibrary vserver_peers = copy.deepcopy(fake.VSERVER_PEER) vserver_peers[0]['vserver'] = self.fake_vserver vserver_peers[0]['peer-vserver'] = self.fake_new_vserver_name self.mock_object(self.library, '_get_vservers_from_replicas', mock.Mock(return_value=(self.fake_vserver, self.fake_new_vserver_name))) self.mock_object(base_class, 'delete_replica') self.mock_object(self.library, '_get_snapmirrors_destinations', mock.Mock(return_value=[])) self.mock_object(self.library, '_get_snapmirrors', mock.Mock(return_value=[])) self.mock_object(self.library, '_get_vserver_peers', mock.Mock(return_value=vserver_peers)) self.mock_object(self.library, '_delete_vserver_peer') self.library.delete_replica(None, [self.fake_replica], self.fake_new_replica, [], share_server=None) self.library._get_vservers_from_replicas.assert_called_once_with( None, [self.fake_replica], self.fake_new_replica ) self.library._get_snapmirrors_destinations.assert_has_calls( [mock.call(self.fake_vserver, self.fake_new_vserver_name), mock.call(self.fake_new_vserver_name, self.fake_vserver)] ) base_class.delete_replica.assert_called_once_with( None, [self.fake_replica], self.fake_new_replica, [] ) self.library._get_snapmirrors.assert_has_calls( [mock.call(self.fake_vserver, self.fake_new_vserver_name), mock.call(self.fake_new_vserver_name, self.fake_vserver)] ) self.library._get_vserver_peers.assert_called_once_with( self.fake_new_vserver_name, self.fake_vserver ) self.library._delete_vserver_peer.assert_called_once_with( self.fake_new_vserver_name, self.fake_vserver ) def test_get_vservers_from_replicas(self): self.mock_object(self.library, 'find_active_replica', mock.Mock(return_value=self.fake_replica)) vserver, peer_vserver = self.library._get_vservers_from_replicas( None, [self.fake_replica], self.fake_new_replica) self.library.find_active_replica.assert_called_once_with( [self.fake_replica] ) self.assertEqual(self.fake_vserver, vserver) self.assertEqual(self.fake_new_vserver_name, peer_vserver) def test_get_vserver_peers(self): self.mock_object(self.library._client, 'get_vserver_peers') self.library._get_vserver_peers( vserver=self.fake_vserver, peer_vserver=self.fake_new_vserver_name) self.library._client.get_vserver_peers.assert_called_once_with( self.fake_vserver, self.fake_new_vserver_name ) def test_create_vserver_peer(self): self.mock_object(self.library._client, 'create_vserver_peer') self.library._create_vserver_peer( None, vserver=self.fake_vserver, peer_vserver=self.fake_new_vserver_name) self.library._client.create_vserver_peer.assert_called_once_with( self.fake_vserver, self.fake_new_vserver_name ) def test_delete_vserver_peer(self): self.mock_object(self.library._client, 'delete_vserver_peer') self.library._delete_vserver_peer( vserver=self.fake_vserver, peer_vserver=self.fake_new_vserver_name) self.library._client.delete_vserver_peer.assert_called_once_with( self.fake_vserver, self.fake_new_vserver_name ) def test_create_share_from_snapshot(self): fake_parent_share = copy.deepcopy(fake.SHARE) fake_parent_share['id'] = fake.SHARE_ID2 mock_create_from_snap = self.mock_object( lib_base.NetAppCmodeFileStorageLibrary, 'create_share_from_snapshot') self.library.create_share_from_snapshot( None, fake.SHARE, fake.SNAPSHOT, share_server=fake.SHARE_SERVER, parent_share=fake_parent_share) mock_create_from_snap.assert_called_once_with( None, fake.SHARE, fake.SNAPSHOT, share_server=fake.SHARE_SERVER, parent_share=fake_parent_share ) def test_create_share_from_snapshot_group(self): share = copy.deepcopy(fake.SHARE) share['source_share_group_snapshot_member_id'] = ( fake.CG_SNAPSHOT_MEMBER_ID1) fake_parent_share = copy.deepcopy(fake.SHARE) fake_parent_share['id'] = fake.SHARE_ID2 fake_parent_share['host'] = fake.MANILA_HOST_NAME_2 mock_create_from_snap = self.mock_object( lib_base.NetAppCmodeFileStorageLibrary, 'create_share_from_snapshot') mock_data_session = self.mock_object(data_motion, 'DataMotionSession') self.library.create_share_from_snapshot( None, share, fake.SNAPSHOT, share_server=fake.SHARE_SERVER, parent_share=fake_parent_share) mock_create_from_snap.assert_called_once_with( None, share, fake.SNAPSHOT, share_server=fake.SHARE_SERVER, parent_share=fake_parent_share ) mock_data_session.assert_not_called() @ddt.data( {'src_cluster_name': fake.CLUSTER_NAME, 'dest_cluster_name': fake.CLUSTER_NAME, 'has_vserver_peers': None}, {'src_cluster_name': fake.CLUSTER_NAME, 'dest_cluster_name': fake.CLUSTER_NAME_2, 'has_vserver_peers': False}, {'src_cluster_name': fake.CLUSTER_NAME, 'dest_cluster_name': fake.CLUSTER_NAME_2, 'has_vserver_peers': True} ) @ddt.unpack def test_create_share_from_snaphot_different_hosts(self, src_cluster_name, dest_cluster_name, has_vserver_peers): class FakeDBObj(dict): def to_dict(self): return self fake_parent_share = copy.deepcopy(fake.SHARE) fake_parent_share['id'] = fake.SHARE_ID2 fake_parent_share['host'] = fake.MANILA_HOST_NAME_2 fake_share = FakeDBObj(fake.SHARE) fake_share_server = FakeDBObj(fake.SHARE_SERVER) src_vserver = fake.VSERVER2 dest_vserver = fake.VSERVER1 src_backend = fake.BACKEND_NAME dest_backend = fake.BACKEND_NAME_2 mock_dm_session = mock.Mock() mock_dm_constr = self.mock_object( data_motion, "DataMotionSession", mock.Mock(return_value=mock_dm_session)) mock_get_vserver = self.mock_object( mock_dm_session, 'get_vserver_from_share', mock.Mock(return_value=src_vserver)) mock_get_vserver_from_share_server = self.mock_object( mock_dm_session, 'get_vserver_from_share_server', mock.Mock(return_value=dest_vserver)) src_vserver_client = mock.Mock() dest_vserver_client = mock.Mock() mock_extract_host = self.mock_object( share_utils, 'extract_host', mock.Mock(side_effect=[src_backend, dest_backend])) mock_dm_get_client = self.mock_object( data_motion, 'get_client_for_backend', mock.Mock(side_effect=[src_vserver_client, dest_vserver_client])) mock_get_src_cluster_name = self.mock_object( src_vserver_client, 'get_cluster_name', mock.Mock(return_value=src_cluster_name)) mock_get_dest_cluster_name = self.mock_object( dest_vserver_client, 'get_cluster_name', mock.Mock(return_value=dest_cluster_name)) mock_get_vserver_peers = self.mock_object( self.library, '_get_vserver_peers', mock.Mock(return_value=has_vserver_peers)) mock_create_vserver_peer = self.mock_object(dest_vserver_client, 'create_vserver_peer') mock_accept_peer = self.mock_object(src_vserver_client, 'accept_vserver_peer') mock_create_from_snap = self.mock_object( lib_base.NetAppCmodeFileStorageLibrary, 'create_share_from_snapshot') self.library.create_share_from_snapshot( None, fake_share, fake.SNAPSHOT, share_server=fake_share_server, parent_share=fake_parent_share) internal_share = copy.deepcopy(fake.SHARE) internal_share['share_server'] = copy.deepcopy(fake.SHARE_SERVER) mock_dm_constr.assert_called_once() mock_get_vserver.assert_called_once_with(fake_parent_share) mock_get_vserver_from_share_server.assert_called_once_with( fake_share_server) mock_extract_host.assert_has_calls([ mock.call(fake_parent_share['host'], level='backend_name'), mock.call(internal_share['host'], level='backend_name')]) mock_dm_get_client.assert_has_calls([ mock.call(src_backend, vserver_name=src_vserver), mock.call(dest_backend, vserver_name=dest_vserver) ]) mock_get_src_cluster_name.assert_called_once() mock_get_dest_cluster_name.assert_called_once() if src_cluster_name != dest_cluster_name: mock_get_vserver_peers.assert_called_once_with(dest_vserver, src_vserver) if not has_vserver_peers: mock_create_vserver_peer.assert_called_once_with( dest_vserver, src_vserver, peer_cluster_name=src_cluster_name) mock_accept_peer.assert_called_once_with(src_vserver, dest_vserver) mock_create_from_snap.assert_called_once_with( None, fake.SHARE, fake.SNAPSHOT, share_server=fake.SHARE_SERVER, parent_share=fake_parent_share) def test_check_if_extra_spec_is_positive_with_negative_integer(self): self.assertRaises(exception.NetAppException, self.library._check_if_max_files_is_valid, fake.SHARE, -1) def test_check_if_extra_spec_is_positive_with_string(self): self.assertRaises(ValueError, self.library._check_if_max_files_is_valid, fake.SHARE, 'abc') def test_check_nfs_config_extra_specs_validity(self): result = self.library._check_nfs_config_extra_specs_validity( fake.EXTRA_SPEC) self.assertIsNone(result) def test_check_nfs_config_extra_specs_validity_empty_spec(self): result = self.library._check_nfs_config_extra_specs_validity({}) self.assertIsNone(result) @ddt.data(fake.INVALID_TCP_MAX_XFER_SIZE_EXTRA_SPEC, fake.INVALID_UDP_MAX_XFER_SIZE_EXTRA_SPEC) def test_check_nfs_config_extra_specs_validity_invalid_value(self, extra_specs): self.assertRaises( exception.NetAppException, self.library._check_nfs_config_extra_specs_validity, extra_specs) @ddt.data({}, fake.STRING_EXTRA_SPEC) def test_get_nfs_config_provisioning_options_empty(self, extra_specs): result = self.library._get_nfs_config_provisioning_options( extra_specs) self.assertDictEqual(result, fake.NFS_CONFIG_DEFAULT) @ddt.data( {'extra_specs': fake.NFS_CONFIG_TCP_MAX_DDT['extra_specs'], 'expected': fake.NFS_CONFIG_TCP_MAX_DDT['expected']}, {'extra_specs': fake.NFS_CONFIG_UDP_MAX_DDT['extra_specs'], 'expected': fake.NFS_CONFIG_UDP_MAX_DDT['expected']}, {'extra_specs': fake.NFS_CONFIG_TCP_UDP_MAX_DDT['extra_specs'], 'expected': fake.NFS_CONFIG_TCP_UDP_MAX_DDT['expected']}, ) @ddt.unpack def test_get_nfs_config_provisioning_options_valid(self, extra_specs, expected): result = self.library._get_nfs_config_provisioning_options( extra_specs) self.assertDictEqual(expected, result) @ddt.data({'fake_share_server': fake.SHARE_SERVER_NFS_TCP, 'expected_nfs_config': fake.NFS_CONFIG_TCP_MAX}, {'fake_share_server': fake.SHARE_SERVER_NFS_UDP, 'expected_nfs_config': fake.NFS_CONFIG_UDP_MAX}, {'fake_share_server': fake.SHARE_SERVER_NFS_TCP_UDP, 'expected_nfs_config': fake.NFS_CONFIG_TCP_UDP_MAX}, {'fake_share_server': fake.SHARE_SERVER_NO_DETAILS, 'expected_nfs_config': fake.NFS_CONFIG_DEFAULT}, {'fake_share_server': fake.SHARE_SERVER_NFS_DEFAULT, 'expected_nfs_config': None}, {'fake_share_server': fake.SHARE_SERVER_NO_NFS_NONE, 'expected_nfs_config': fake.NFS_CONFIG_DEFAULT}) @ddt.unpack def test_is_share_server_compatible_true(self, fake_share_server, expected_nfs_config): is_same = self.library._is_share_server_compatible( fake_share_server, expected_nfs_config) self.assertTrue(is_same) @ddt.data({'fake_share_server': fake.SHARE_SERVER_NFS_TCP, 'expected_nfs_config': fake.NFS_CONFIG_UDP_MAX}, {'fake_share_server': fake.SHARE_SERVER_NFS_UDP, 'expected_nfs_config': fake.NFS_CONFIG_TCP_MAX}, {'fake_share_server': fake.SHARE_SERVER_NFS_TCP_UDP, 'expected_nfs_config': fake.NFS_CONFIG_TCP_MAX}, {'fake_share_server': fake.SHARE_SERVER_NFS_TCP_UDP, 'expected_nfs_config': fake.NFS_CONFIG_UDP_MAX}, {'fake_share_server': fake.SHARE_SERVER_NFS_TCP_UDP, 'expected_nfs_config': None}, {'fake_share_server': fake.SHARE_SERVER_NFS_TCP_UDP, 'expected_nfs_config': {}}, {'fake_share_server': fake.SHARE_SERVER_NFS_TCP_UDP, 'expected_nfs_config': fake.NFS_CONFIG_DEFAULT}, {'fake_share_server': fake.SHARE_SERVER_NO_DETAILS, 'expected_nfs_config': fake.NFS_CONFIG_UDP_MAX}, {'fake_share_server': fake.SHARE_SERVER_NFS_DEFAULT, 'expected_nfs_config': fake.NFS_CONFIG_UDP_MAX}, {'fake_share_server': fake.SHARE_SERVER_NO_NFS_NONE, 'expected_nfs_config': fake.NFS_CONFIG_TCP_MAX}) @ddt.unpack def test_is_share_server_compatible_false(self, fake_share_server, expected_nfs_config): is_same = self.library._is_share_server_compatible( fake_share_server, expected_nfs_config) self.assertFalse(is_same) @ddt.data( {'expected_server': fake.SHARE_SERVER_NFS_TCP, 'share_group': {'share_server_id': fake.SHARE_SERVER_NFS_TCP['id']}, 'nfs_config': fake.NFS_CONFIG_TCP_MAX}, {'expected_server': fake.SHARE_SERVER_NFS_UDP, 'share_group': {'share_server_id': fake.SHARE_SERVER_NFS_UDP['id']}, 'nfs_config': fake.NFS_CONFIG_UDP_MAX}, {'expected_server': fake.SHARE_SERVER_NFS_TCP_UDP, 'share_group': { 'share_server_id': fake.SHARE_SERVER_NFS_TCP_UDP['id']}, 'nfs_config': fake.NFS_CONFIG_TCP_UDP_MAX}, {'expected_server': fake.SHARE_SERVER_NFS_DEFAULT, 'share_group': { 'share_server_id': fake.SHARE_SERVER_NFS_DEFAULT['id']}, 'nfs_config': fake.NFS_CONFIG_DEFAULT}, {'expected_server': None, 'share_group': {'share_server_id': 'invalid_id'}, 'nfs_config': fake.NFS_CONFIG_TCP_MAX}) @ddt.unpack def test_choose_share_server_compatible_with_share_group_and_nfs_config( self, expected_server, share_group, nfs_config): self.library.is_nfs_config_supported = True mock_get_extra_spec = self.mock_object( share_types, "get_extra_specs_from_share", mock.Mock(return_value=fake.EXTRA_SPEC)) mock_get_nfs_config = self.mock_object( self.library, "_get_nfs_config_provisioning_options", mock.Mock(return_value=nfs_config)) mock_client = mock.Mock() self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=('fake_name', mock_client))) self.mock_object(mock_client, 'get_vserver_info', mock.Mock(return_value=fake.VSERVER_INFO)) self.mock_object(mock_client, 'list_vserver_aggregates', mock.Mock(return_value=fake.AGGREGATES)) server = self.library.choose_share_server_compatible_with_share( None, fake.SHARE_SERVERS, fake.SHARE_2, None, share_group) mock_get_extra_spec.assert_called_once_with(fake.SHARE_2) mock_get_nfs_config.assert_called_once_with(fake.EXTRA_SPEC) self.assertEqual(expected_server, server) @ddt.data( {'expected_server': fake.SHARE_SERVER_NO_NFS_NONE, 'share_group': { 'share_server_id': fake.SHARE_SERVER_NO_NFS_NONE['id']}}, {'expected_server': fake.SHARE_SERVER_NO_DETAILS, 'share_group': { 'share_server_id': fake.SHARE_SERVER_NO_DETAILS['id']}}, {'expected_server': fake.SHARE_SERVER_NO_DETAILS, 'share_group': { 'share_server_id': fake.SHARE_SERVER_NO_DETAILS['id']}, 'nfs_config_support': False}, {'expected_server': None, 'share_group': {'share_server_id': 'invalid_id'}}) @ddt.unpack def test_choose_share_server_compatible_with_share_group_only( self, expected_server, share_group, nfs_config_support=True): self.library.is_nfs_config_supported = nfs_config_support mock_get_extra_spec = self.mock_object( share_types, "get_extra_specs_from_share", mock.Mock(return_value=fake.EMPTY_EXTRA_SPEC)) mock_get_nfs_config = self.mock_object( self.library, "_get_nfs_config_provisioning_options", mock.Mock(return_value=fake.NFS_CONFIG_DEFAULT)) mock_client = mock.Mock() self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=('fake_name', mock_client))) self.mock_object(mock_client, 'get_vserver_info', mock.Mock(return_value=fake.VSERVER_INFO)) self.mock_object(mock_client, 'list_vserver_aggregates', mock.Mock(return_value=fake.AGGREGATES)) server = self.library.choose_share_server_compatible_with_share( None, fake.SHARE_SERVERS, fake.SHARE_2, None, share_group) self.assertEqual(expected_server, server) if nfs_config_support: mock_get_extra_spec.assert_called_once_with(fake.SHARE_2) mock_get_nfs_config.assert_called_once_with(fake.EMPTY_EXTRA_SPEC) @ddt.data( {'expected_server': fake.SHARE_SERVER_NFS_TCP, 'nfs_config': fake.NFS_CONFIG_TCP_MAX}, {'expected_server': fake.SHARE_SERVER_NFS_UDP, 'nfs_config': fake.NFS_CONFIG_UDP_MAX}, {'expected_server': fake.SHARE_SERVER_NFS_TCP_UDP, 'nfs_config': fake.NFS_CONFIG_TCP_UDP_MAX}, {'expected_server': fake.SHARE_SERVER_NFS_DEFAULT, 'nfs_config': fake.NFS_CONFIG_DEFAULT}, {'expected_server': None, 'nfs_config': {'invalid': 'invalid'}}, {'expected_server': fake.SHARE_SERVER_NFS_TCP, 'nfs_config': None, 'nfs_config_support': False}, ) @ddt.unpack def test_choose_share_server_compatible_with_share_nfs_config_only( self, expected_server, nfs_config, nfs_config_support=True): self.library.is_nfs_config_supported = nfs_config_support mock_get_extra_spec = self.mock_object( share_types, "get_extra_specs_from_share", mock.Mock(return_value=fake.EXTRA_SPEC)) mock_get_nfs_config = self.mock_object( self.library, "_get_nfs_config_provisioning_options", mock.Mock(return_value=nfs_config)) mock_client = mock.Mock() self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=('fake_name', mock_client))) self.mock_object(mock_client, 'get_vserver_info', mock.Mock(return_value=fake.VSERVER_INFO)) self.mock_object(mock_client, 'list_vserver_aggregates', mock.Mock(return_value=fake.AGGREGATES)) server = self.library.choose_share_server_compatible_with_share( None, fake.SHARE_SERVERS, fake.SHARE_2) self.assertEqual(expected_server, server) if nfs_config_support: mock_get_extra_spec.assert_called_once_with(fake.SHARE_2) mock_get_nfs_config.assert_called_once_with(fake.EXTRA_SPEC) @ddt.data( {'expected_server': fake.SHARE_SERVER_NO_DETAILS, 'share_servers': [ fake.SHARE_SERVER_NFS_TCP, fake.SHARE_SERVER_NO_DETAILS]}, {'expected_server': fake.SHARE_SERVER_NO_NFS_NONE, 'share_servers': [ fake.SHARE_SERVER_NFS_UDP, fake.SHARE_SERVER_NO_NFS_NONE]}, {'expected_server': fake.SHARE_SERVER_NFS_DEFAULT, 'share_servers': [ fake.SHARE_SERVER_NFS_UDP, fake.SHARE_SERVER_NFS_DEFAULT]}, {'expected_server': None, 'share_servers': [ fake.SHARE_SERVER_NFS_TCP, fake.SHARE_SERVER_NFS_UDP]}, {'expected_server': fake.SHARE_SERVER_NO_DETAILS, 'share_servers': [fake.SHARE_SERVER_NO_DETAILS], 'nfs_config_support': False} ) @ddt.unpack def test_choose_share_server_compatible_with_share_no_specification( self, expected_server, share_servers, nfs_config_support=True): self.library.is_nfs_config_supported = nfs_config_support mock_get_extra_spec = self.mock_object( share_types, "get_extra_specs_from_share", mock.Mock(return_value=fake.EMPTY_EXTRA_SPEC)) mock_get_nfs_config = self.mock_object( self.library, "_get_nfs_config_provisioning_options", mock.Mock(return_value=fake.NFS_CONFIG_DEFAULT)) mock_client = mock.Mock() self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=('fake_name', mock_client))) self.mock_object(mock_client, 'get_vserver_info', mock.Mock(return_value=fake.VSERVER_INFO)) self.mock_object(mock_client, 'list_vserver_aggregates', mock.Mock(return_value=fake.AGGREGATES)) server = self.library.choose_share_server_compatible_with_share( None, share_servers, fake.SHARE_2) self.assertEqual(expected_server, server) if nfs_config_support: mock_get_extra_spec.assert_called_once_with(fake.SHARE_2) mock_get_nfs_config.assert_called_once_with(fake.EMPTY_EXTRA_SPEC) def test_manage_existing_error(self): fake_server = {'id': 'id'} fake_nfs_config = 'fake_nfs_config' self.library.is_nfs_config_supported = True mock_get_extra_spec = self.mock_object( share_types, "get_extra_specs_from_share", mock.Mock(return_value=fake.EXTRA_SPEC)) mock_get_nfs_config = self.mock_object( self.library, "_get_nfs_config_provisioning_options", mock.Mock(return_value=fake_nfs_config)) mock_is_compatible = self.mock_object( self.library, "_is_share_server_compatible", mock.Mock(return_value=False)) self.assertRaises(exception.NetAppException, self.library.manage_existing, fake.SHARE, 'opts', fake_server) mock_get_extra_spec.assert_called_once_with(fake.SHARE) mock_get_nfs_config.assert_called_once_with(fake.EXTRA_SPEC) mock_is_compatible.assert_called_once_with(fake_server, fake_nfs_config) def test_choose_share_server_compatible_with_share_group_no_share_server( self): server = self.library.choose_share_server_compatible_with_share_group( None, [], fake.SHARE_GROUP_REF) self.assertIsNone(server) @ddt.data( [fake.NFS_CONFIG_DEFAULT, fake.NFS_CONFIG_TCP_MAX], [fake.NFS_CONFIG_TCP_MAX, fake.NFS_CONFIG_UDP_MAX], [fake.NFS_CONFIG_TCP_UDP_MAX, fake.NFS_CONFIG_TCP_MAX], [fake.NFS_CONFIG_DEFAULT, fake.NFS_CONFIG_TCP_UDP_MAX]) def test_choose_share_server_compatible_with_share_group_nfs_conflict( self, nfs_config_list): self.library.is_nfs_config_supported = True self.mock_object( share_types, "get_share_type_extra_specs", mock.Mock(return_value=fake.EXTRA_SPEC)) mock_get_nfs_config = self.mock_object( self.library, "_get_nfs_config_provisioning_options", mock.Mock(side_effect=nfs_config_list)) mock_check_extra_spec = self.mock_object( self.library, '_check_nfs_config_extra_specs_validity', mock.Mock()) self.assertRaises(exception.InvalidInput, self.library. choose_share_server_compatible_with_share_group, None, fake.SHARE_SERVERS, fake.SHARE_GROUP_REF) mock_get_nfs_config.assert_called_with(fake.EXTRA_SPEC) mock_check_extra_spec.assert_called_once_with(fake.EXTRA_SPEC) @ddt.data( {'expected_server': fake.SHARE_SERVER_NO_DETAILS, 'nfs_config': fake.NFS_CONFIG_DEFAULT, 'share_servers': [ fake.SHARE_SERVER_NFS_TCP, fake.SHARE_SERVER_NO_DETAILS]}, {'expected_server': fake.SHARE_SERVER_NO_NFS_NONE, 'nfs_config': fake.NFS_CONFIG_DEFAULT, 'share_servers': [ fake.SHARE_SERVER_NFS_UDP, fake.SHARE_SERVER_NO_NFS_NONE]}, {'expected_server': fake.SHARE_SERVER_NFS_DEFAULT, 'nfs_config': fake.NFS_CONFIG_DEFAULT, 'share_servers': [ fake.SHARE_SERVER_NFS_UDP, fake.SHARE_SERVER_NFS_DEFAULT]}, {'expected_server': None, 'nfs_config': fake.NFS_CONFIG_DEFAULT, 'share_servers': [ fake.SHARE_SERVER_NFS_TCP, fake.SHARE_SERVER_NFS_UDP, fake.SHARE_SERVER_NFS_TCP_UDP]}, {'expected_server': fake.SHARE_SERVER_NFS_TCP_UDP, 'nfs_config': fake.NFS_CONFIG_TCP_UDP_MAX, 'share_servers': [ fake.SHARE_SERVER_NFS_TCP, fake.SHARE_SERVER_NFS_UDP, fake.SHARE_SERVER_NFS_DEFAULT, fake.SHARE_SERVER_NFS_TCP_UDP]}, {'expected_server': fake.SHARE_SERVER_NO_DETAILS, 'nfs_config': None, 'share_servers': [fake.SHARE_SERVER_NO_DETAILS], 'nfs_config_supported': False} ) @ddt.unpack def test_choose_share_server_compatible_with_share_group_nfs( self, expected_server, nfs_config, share_servers, nfs_config_supported=True): self.library.is_nfs_config_supported = nfs_config_supported mock_client = mock.Mock() self.mock_object( share_types, "get_share_type_extra_specs", mock.Mock(return_value=fake.EXTRA_SPEC)) mock_get_nfs_config = self.mock_object( self.library, "_get_nfs_config_provisioning_options", mock.Mock(return_value=nfs_config)) mock_check_extra_spec = self.mock_object( self.library, '_check_nfs_config_extra_specs_validity', mock.Mock()) self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=('fake_name', mock_client))) self.mock_object(mock_client, 'get_vserver_info', mock.Mock(return_value=fake.VSERVER_INFO)) server = self.library.choose_share_server_compatible_with_share_group( None, share_servers, fake.SHARE_GROUP_REF) if nfs_config_supported: mock_get_nfs_config.assert_called_with(fake.EXTRA_SPEC) mock_check_extra_spec.assert_called_once_with(fake.EXTRA_SPEC) else: mock_get_nfs_config.assert_not_called() mock_check_extra_spec.assert_not_called() self.assertEqual(expected_server, server) def test_share_server_migration_check_compatibility_same_backend( self): not_compatible = fake.SERVER_MIGRATION_CHECK_NOT_COMPATIBLE self.library._have_cluster_creds = True self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(None, None))) result = self.library.share_server_migration_check_compatibility( None, self.fake_src_share_server, self.fake_src_share_server['host'], None, None, None) self.assertEqual(not_compatible, result) def _init_mocks_for_svm_dr_check_compatibility( self, src_svm_dr_supported=True, dest_svm_dr_supported=True, check_capacity_result=True, flexgroup_support=False, is_flexgroup_destination_host=False): self.mock_object(self.mock_src_client, 'is_svm_dr_supported', mock.Mock(return_value=src_svm_dr_supported)) self.mock_object(self.mock_dest_client, 'is_svm_dr_supported', mock.Mock(return_value=dest_svm_dr_supported)) self.mock_object(self.library, '_check_capacity_compatibility', mock.Mock(return_value=check_capacity_result)) self.mock_object(self.mock_src_client, 'is_flexgroup_supported', mock.Mock(return_value=flexgroup_support)) self.mock_object(data_motion, 'DataMotionSession') self.mock_object(self.library, 'is_flexgroup_destination_host', mock.Mock(return_value=is_flexgroup_destination_host)) def _configure_mocks_share_server_migration_check_compatibility( self, have_cluster_creds=True, src_cluster_name=fake.CLUSTER_NAME, dest_cluster_name=fake.CLUSTER_NAME_2, pools=fake.POOLS, is_svm_dr=True, failure_scenario=False): migration_method = 'svm_dr' if is_svm_dr else 'svm_migrate' self.library._have_cluster_creds = have_cluster_creds self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(self.fake_src_vserver, self.mock_src_client))) self.mock_object(self.mock_src_client, 'get_cluster_name', mock.Mock(return_value=src_cluster_name)) self.mock_object(self.client, 'get_cluster_name', mock.Mock(return_value=dest_cluster_name)) self.mock_object(data_motion, 'get_client_for_backend', mock.Mock(return_value=self.mock_dest_client)) self.mock_object(self.library, '_check_for_migration_support', mock.Mock(return_value=( migration_method, not failure_scenario))) self.mock_object(self.library, '_get_pools', mock.Mock(return_value=pools)) def test_share_server_migration_check_compatibility_dest_with_pool( self): not_compatible = fake.SERVER_MIGRATION_CHECK_NOT_COMPATIBLE self.library._have_cluster_creds = True result = self.library.share_server_migration_check_compatibility( None, self.fake_src_share_server, fake.MANILA_HOST_NAME, None, None, None) self.assertEqual(not_compatible, result) def test_share_server_migration_check_compatibility_same_cluster( self): not_compatible = fake.SERVER_MIGRATION_CHECK_NOT_COMPATIBLE self._configure_mocks_share_server_migration_check_compatibility( src_cluster_name=fake.CLUSTER_NAME, dest_cluster_name=fake.CLUSTER_NAME, ) result = self.library.share_server_migration_check_compatibility( None, self.fake_src_share_server, self.fake_dest_share_server['host'], None, None, None) self.assertEqual(not_compatible, result) self.library._get_vserver.assert_called_once_with( self.fake_src_share_server, backend_name=self.fake_src_backend_name ) self.assertTrue(self.mock_src_client.get_cluster_name.called) self.assertTrue(self.client.get_cluster_name.called) @ddt.data( {'src_svm_dr_supported': False, 'dest_svm_dr_supported': False, 'check_capacity_result': False, 'is_flexgroup_destination_host': False, }, {'src_svm_dr_supported': True, 'dest_svm_dr_supported': True, 'check_capacity_result': False, 'is_flexgroup_destination_host': False, }, {'src_svm_dr_supported': True, 'dest_svm_dr_supported': True, 'check_capacity_result': True, 'is_flexgroup_destination_host': True, }, ) @ddt.unpack def test__check_compatibility_svm_dr_not_compatible( self, src_svm_dr_supported, dest_svm_dr_supported, check_capacity_result, is_flexgroup_destination_host): server_total_size = (fake.SHARE_REQ_SPEC.get('shares_size', 0) + fake.SHARE_REQ_SPEC.get('snapshots_size', 0)) self._init_mocks_for_svm_dr_check_compatibility( src_svm_dr_supported=src_svm_dr_supported, dest_svm_dr_supported=dest_svm_dr_supported, check_capacity_result=check_capacity_result, flexgroup_support=is_flexgroup_destination_host, is_flexgroup_destination_host=is_flexgroup_destination_host) method, result = self.library._check_compatibility_using_svm_dr( self.mock_src_client, self.mock_dest_client, fake.SERVER_MIGRATION_REQUEST_SPEC, fake.POOLS) self.assertEqual(method, 'svm_dr') self.assertEqual(result, False) self.assertTrue(self.mock_src_client.is_svm_dr_supported.called) if (src_svm_dr_supported and dest_svm_dr_supported and is_flexgroup_destination_host): self.assertTrue(self.mock_src_client.is_flexgroup_supported.called) self.assertTrue(self.library.is_flexgroup_destination_host.called) if (check_capacity_result and not is_flexgroup_destination_host and not src_svm_dr_supported): self.assertFalse(self.mock_dest_client.is_svm_dr_supported.called) self.library._check_capacity_compatibility.assert_called_once_with( fake.POOLS, True, server_total_size) def test_share_server_migration_check_compatibility_different_sec_service( self): not_compatible = fake.SERVER_MIGRATION_CHECK_NOT_COMPATIBLE self._configure_mocks_share_server_migration_check_compatibility() new_sec_service = copy.deepcopy(fake.CIFS_SECURITY_SERVICE) new_sec_service['id'] = 'new_sec_serv_id' new_share_network = copy.deepcopy(fake.SHARE_NETWORK) new_share_network['id'] = 'fake_share_network_id_2' new_share_network['security_services'] = [new_sec_service] result = self.library.share_server_migration_check_compatibility( None, self.fake_src_share_server, self.fake_dest_share_server['host'], fake.SHARE_NETWORK, new_share_network, None) self.assertEqual(not_compatible, result) self.library._get_vserver.assert_called_once_with( self.fake_src_share_server, backend_name=self.fake_src_backend_name ) self.assertTrue(self.mock_src_client.get_cluster_name.called) self.assertTrue(self.client.get_cluster_name.called) data_motion.get_client_for_backend.assert_called_once_with( self.fake_dest_backend_name, vserver_name=None ) @ddt.data('netapp_flexvol_encryption', 'revert_to_snapshot_support') def test_share_server_migration_check_compatibility_invalid_capabilities( self, capability): not_compatible = fake.SERVER_MIGRATION_CHECK_NOT_COMPATIBLE pools_without_capability = copy.deepcopy(fake.POOLS) for pool in pools_without_capability: pool[capability] = False self._configure_mocks_share_server_migration_check_compatibility( pools=pools_without_capability ) result = self.library.share_server_migration_check_compatibility( None, self.fake_src_share_server, self.fake_dest_share_server['host'], fake.SHARE_NETWORK, fake.SHARE_NETWORK, fake.SERVER_MIGRATION_REQUEST_SPEC) self.assertEqual(not_compatible, result) self.library._get_vserver.assert_called_once_with( self.fake_src_share_server, backend_name=self.fake_src_backend_name ) self.assertTrue(self.mock_src_client.get_cluster_name.called) self.assertTrue(self.client.get_cluster_name.called) data_motion.get_client_for_backend.assert_called_once_with( self.fake_dest_backend_name, vserver_name=None ) @ddt.data((True, "svm_migrate"), (False, "svm_dr")) @ddt.unpack def test__check_for_migration_support( self, svm_migrate_supported, expected_migration_method): mock_dest_is_svm_migrate_supported = self.mock_object( self.mock_dest_client, 'is_svm_migrate_supported', mock.Mock(return_value=svm_migrate_supported)) mock_src_is_svm_migrate_supported = self.mock_object( self.mock_src_client, 'is_svm_migrate_supported', mock.Mock(return_value=svm_migrate_supported)) mock_find_matching_aggregates = self.mock_object( self.library, '_find_matching_aggregates', mock.Mock(return_value=fake.AGGREGATES)) mock_get_vserver_name = self.mock_object( self.library, '_get_vserver_name', mock.Mock(return_value=fake.VSERVER1)) mock_svm_migration_check_svm_mig = self.mock_object( self.library, '_check_compatibility_for_svm_migrate', mock.Mock(return_value=True)) mock_svm_migration_check_svm_dr = self.mock_object( self.library, '_check_compatibility_using_svm_dr', mock.Mock(side_effect=[('svm_dr', True)])) migration_method, result = self.library._check_for_migration_support( self.mock_src_client, self.mock_dest_client, fake.SHARE_SERVER, fake.SHARE_REQ_SPEC, fake.CLUSTER_NAME, fake.POOLS) self.assertIs(True, result) self.assertEqual(migration_method, expected_migration_method) mock_dest_is_svm_migrate_supported.assert_called_once() if svm_migrate_supported: mock_src_is_svm_migrate_supported.assert_called_once() mock_find_matching_aggregates.assert_called_once() mock_get_vserver_name.assert_not_called() mock_svm_migration_check_svm_mig.assert_called_once_with( fake.CLUSTER_NAME, fake.VSERVER1, fake.SHARE_SERVER, fake.AGGREGATES, self.mock_dest_client) else: mock_svm_migration_check_svm_dr.assert_called_once_with( self.mock_src_client, self.mock_dest_client, fake.SHARE_REQ_SPEC, fake.POOLS) def test__check_for_migration_support_svm_migrate_exception(self): svm_migrate_supported = True expected_migration_method = 'svm_migrate' mock_dest_is_svm_migrate_supported = self.mock_object( self.mock_dest_client, 'is_svm_migrate_supported', mock.Mock(return_value=svm_migrate_supported)) mock_src_is_svm_migrate_supported = self.mock_object( self.mock_src_client, 'is_svm_migrate_supported', mock.Mock(return_value=svm_migrate_supported)) mock_find_matching_aggregates = self.mock_object( self.library, '_find_matching_aggregates', mock.Mock(return_value=fake.AGGREGATES)) mock_get_vserver_name = self.mock_object( self.library, '_get_vserver_name', mock.Mock(return_value=fake.VSERVER1)) mock_svm_migration_check_svm_mig = self.mock_object( self.library, '_check_compatibility_for_svm_migrate', mock.Mock(side_effect=exception.NetAppException())) migration_method, result = self.library._check_for_migration_support( self.mock_src_client, self.mock_dest_client, fake.SHARE_SERVER, fake.SHARE_REQ_SPEC, fake.CLUSTER_NAME, fake.POOLS) self.assertIs(False, result) self.assertEqual(migration_method, expected_migration_method) mock_dest_is_svm_migrate_supported.assert_called_once() mock_src_is_svm_migrate_supported.assert_called_once() mock_find_matching_aggregates.assert_called_once() mock_get_vserver_name.assert_not_called() mock_svm_migration_check_svm_mig.assert_called_once_with( fake.CLUSTER_NAME, fake.VSERVER1, fake.SHARE_SERVER, fake.AGGREGATES, self.mock_dest_client) @ddt.data( (mock.Mock, True), (exception.NetAppException, False) ) @ddt.unpack def test__check_compatibility_for_svm_migrate(self, expected_exception, expected_compatibility): network_info = { 'network_allocations': self.fake_src_share_server['network_allocations'], 'neutron_subnet_id': self.fake_src_share_server['share_network_subnets'][0].get( 'neutron_subnet_id') } self.library.configuration.netapp_restrict_lif_creation_per_ha_pair = ( True ) check_lif_limit = self.mock_object( self.library, '_check_data_lif_count_limit_reached_for_ha_pair', ) self.mock_object(self.library._client, 'list_cluster_nodes', mock.Mock(return_value=fake.CLUSTER_NODES)) self.mock_object(self.library, '_get_node_data_port', mock.Mock(return_value=fake.NODE_DATA_PORT)) self.mock_object( self.library._client, 'get_ipspace_name_for_vlan_port', mock.Mock(return_value=fake.IPSPACE)) self.mock_object(self.library, '_create_port_and_broadcast_domain') self.mock_object(self.mock_dest_client, 'get_ipspaces', mock.Mock(return_value=[{'uuid': fake.IPSPACE_ID}])) self.mock_object( self.mock_dest_client, 'svm_migration_start', mock.Mock(return_value=c_fake.FAKE_MIGRATION_RESPONSE_WITH_JOB)) self.mock_object(self.library, '_get_job_uuid', mock.Mock(return_value=c_fake.FAKE_JOB_ID)) self.mock_object(self.library, '_wait_for_operation_status', mock.Mock(side_effect=expected_exception)) self.mock_object(self.mock_dest_client, 'list_cluster_nodes', mock.Mock(return_value=fake.CLUSTER_NODES)) compatibility = self.library._check_compatibility_for_svm_migrate( fake.CLUSTER_NAME, fake.VSERVER1, self.fake_src_share_server, fake.AGGREGATES, self.mock_dest_client) self.assertIs(expected_compatibility, compatibility) self.mock_dest_client.svm_migration_start.assert_called_once_with( fake.CLUSTER_NAME, fake.VSERVER1, fake.AGGREGATES, check_only=True, dest_ipspace=fake.IPSPACE) self.library._get_job_uuid.assert_called_once_with( c_fake.FAKE_MIGRATION_RESPONSE_WITH_JOB) self.library._client.list_cluster_nodes.assert_called_once() self.library._get_node_data_port.assert_called_with( fake.CLUSTER_NODES[0]) (self.library._client.get_ipspace_name_for_vlan_port .assert_called_once_with( fake.CLUSTER_NODES[0], fake.NODE_DATA_PORT, self.fake_src_share_server['network_allocations'][0][ 'segmentation_id'])) self.library._create_port_and_broadcast_domain.assert_called_once_with( fake.IPSPACE, network_info) self.assertTrue(check_lif_limit.called) def test__check_compatibility_for_svm_migrate_check_failure(self): network_info = { 'network_allocations': self.fake_src_share_server['network_allocations'], 'neutron_subnet_id': self.fake_src_share_server['share_network_subnets'][0].get( 'neutron_subnet_id') } self.mock_object(self.library._client, 'list_cluster_nodes', mock.Mock(return_value=fake.CLUSTER_NODES)) self.mock_object(self.library, '_get_node_data_port', mock.Mock(return_value=fake.NODE_DATA_PORT)) self.mock_object( self.library._client, 'get_ipspace_name_for_vlan_port', mock.Mock(return_value=fake.IPSPACE)) self.mock_object(self.library, '_create_port_and_broadcast_domain') self.mock_object(self.mock_dest_client, 'get_ipspaces', mock.Mock(return_value=[{'uuid': fake.IPSPACE_ID}])) self.mock_object( self.mock_dest_client, 'svm_migration_start', mock.Mock(side_effect=exception.NetAppException())) self.mock_object(self.mock_dest_client, 'delete_ipspace') self.mock_object(self.mock_dest_client, 'list_cluster_nodes', mock.Mock(return_value=fake.CLUSTER_NODES)) self.assertRaises( exception.NetAppException, self.library._check_compatibility_for_svm_migrate, fake.CLUSTER_NAME, fake.VSERVER1, self.fake_src_share_server, fake.AGGREGATES, self.mock_dest_client) self.library._client.list_cluster_nodes.assert_called_once() self.library._get_node_data_port.assert_called_with( fake.CLUSTER_NODES[0]) (self.library._client.get_ipspace_name_for_vlan_port .assert_called_once_with( fake.CLUSTER_NODES[0], fake.NODE_DATA_PORT, self.fake_src_share_server['network_allocations'][0][ 'segmentation_id'])) self.library._create_port_and_broadcast_domain.assert_called_once_with( fake.IPSPACE, network_info) self.mock_dest_client.delete_ipspace.assert_called_once_with( fake.IPSPACE) def test_share_server_migration_check_compatibility_compatible(self): compatible = { 'compatible': True, 'writable': True, 'nondisruptive': False, 'preserve_snapshots': True, 'migration_cancel': True, 'migration_get_progress': False, 'share_network_id': fake.SHARE_NETWORK['id'], } self._configure_mocks_share_server_migration_check_compatibility( is_svm_dr=True) result = self.library.share_server_migration_check_compatibility( None, self.fake_src_share_server, self.fake_dest_share_server['host'], fake.SHARE_NETWORK, fake.SHARE_NETWORK, fake.SERVER_MIGRATION_REQUEST_SPEC) self.assertEqual(compatible, result) self.library._get_vserver.assert_called_once_with( self.fake_src_share_server, backend_name=self.fake_src_backend_name ) self.assertTrue(self.mock_src_client.get_cluster_name.called) self.assertTrue(self.client.get_cluster_name.called) data_motion.get_client_for_backend.assert_called_once_with( self.fake_dest_backend_name, vserver_name=None ) def test__get_job_uuid(self): self.assertEqual( self.library._get_job_uuid( c_fake.FAKE_MIGRATION_RESPONSE_WITH_JOB), c_fake.FAKE_JOB_ID ) def test__wait_for_operation_status(self): job_starting_state = copy.copy(c_fake.FAKE_JOB_SUCCESS_STATE) job_starting_state['state'] = 'starting' returned_jobs = [ job_starting_state, c_fake.FAKE_JOB_SUCCESS_STATE, ] self.mock_object(self.mock_dest_client, 'get_job', mock.Mock(side_effect=returned_jobs)) self.library._wait_for_operation_status( c_fake.FAKE_JOB_ID, self.mock_dest_client.get_job ) self.assertEqual( self.mock_dest_client.get_job.call_count, len(returned_jobs)) def test__wait_for_operation_status_error(self): starting_job = copy.copy(c_fake.FAKE_JOB_SUCCESS_STATE) starting_job['state'] = 'starting' errored_job = copy.copy(c_fake.FAKE_JOB_SUCCESS_STATE) errored_job['state'] = constants.STATUS_ERROR returned_jobs = [starting_job, errored_job] self.mock_object(self.mock_dest_client, 'get_job', mock.Mock(side_effect=returned_jobs)) self.assertRaises( exception.NetAppException, self.library._wait_for_operation_status, c_fake.FAKE_JOB_ID, self.mock_dest_client.get_job ) @ddt.data( {'src_supports_svm_migrate': True, 'dest_supports_svm_migrate': True}, {'src_supports_svm_migrate': True, 'dest_supports_svm_migrate': False}, {'src_supports_svm_migrate': False, 'dest_supports_svm_migrate': True}, {'src_supports_svm_migrate': False, 'dest_supports_svm_migrate': False} ) @ddt.unpack def test_share_server_migration_start(self, src_supports_svm_migrate, dest_supports_svm_migrate): fake_migration_data = {'fake_migration_key': 'fake_migration_value'} self.mock_object( self.library, '_get_vserver', mock.Mock( side_effect=[(self.fake_src_vserver, self.mock_src_client)])) self.mock_object(data_motion, 'get_client_for_backend', mock.Mock(return_value=self.mock_dest_client)) mock_start_using_svm_migrate = self.mock_object( self.library, '_migration_start_using_svm_migrate', mock.Mock(return_value=fake_migration_data)) mock_start_using_svm_dr = self.mock_object( self.library, '_migration_start_using_svm_dr', mock.Mock(return_value=fake_migration_data)) self.mock_src_client.is_svm_migrate_supported.return_value = ( src_supports_svm_migrate) self.mock_dest_client.is_svm_migrate_supported.return_value = ( dest_supports_svm_migrate) src_and_dest_support_svm_migrate = all( [src_supports_svm_migrate, dest_supports_svm_migrate]) result = self.library.share_server_migration_start( None, self.fake_src_share_server, self.fake_dest_share_server, [fake.SHARE_INSTANCE], []) self.library._get_vserver.assert_called_once_with( share_server=self.fake_src_share_server, backend_name=self.fake_src_backend_name) if src_and_dest_support_svm_migrate: mock_start_using_svm_migrate.assert_called_once_with( None, self.fake_src_share_server, self.fake_dest_share_server, self.mock_src_client, self.mock_dest_client) else: mock_start_using_svm_dr.assert_called_once_with( self.fake_src_share_server, self.fake_dest_share_server ) self.assertEqual(result, fake_migration_data) @ddt.data({'vserver_peered': True, 'src_cluster': fake.CLUSTER_NAME}, {'vserver_peered': False, 'src_cluster': fake.CLUSTER_NAME}, {'vserver_peered': False, 'src_cluster': fake.CLUSTER_NAME_2}) @ddt.unpack def test__migration_start_using_svm_dr(self, vserver_peered, src_cluster): dest_cluster = fake.CLUSTER_NAME dm_session_mock = mock.Mock() self.mock_object(self.library, '_get_vserver', mock.Mock(side_effect=[ (self.fake_src_vserver, self.mock_src_client), (self.fake_dest_vserver, self.mock_dest_client)])) self.mock_object(self.mock_src_client, 'get_cluster_name', mock.Mock(return_value=src_cluster)) self.mock_object(self.mock_dest_client, 'get_cluster_name', mock.Mock(return_value=dest_cluster)) self.mock_object(self.library, '_get_vserver_peers', mock.Mock(return_value=vserver_peered)) self.mock_object(data_motion, "DataMotionSession", mock.Mock(return_value=dm_session_mock)) self.library._migration_start_using_svm_dr( self.fake_src_share_server, self.fake_dest_share_server) self.library._get_vserver.assert_has_calls([ mock.call(share_server=self.fake_src_share_server, backend_name=self.fake_src_backend_name), mock.call(share_server=self.fake_dest_share_server, backend_name=self.fake_dest_backend_name)]) self.assertTrue(self.mock_src_client.get_cluster_name.called) self.assertTrue(self.mock_dest_client.get_cluster_name.called) self.library._get_vserver_peers.assert_called_once_with( self.fake_dest_vserver, self.fake_src_vserver ) mock_vserver_peer = self.mock_dest_client.create_vserver_peer if vserver_peered: self.assertFalse(mock_vserver_peer.called) else: mock_vserver_peer.assert_called_once_with( self.fake_dest_vserver, self.fake_src_vserver, peer_cluster_name=src_cluster ) accept_peer_mock = self.mock_src_client.accept_vserver_peer if src_cluster != dest_cluster: accept_peer_mock.assert_called_once_with( self.fake_src_vserver, self.fake_dest_vserver ) else: self.assertFalse(accept_peer_mock.called) dm_session_mock.create_snapmirror_svm.assert_called_once_with( self.fake_src_share_server, self.fake_dest_share_server ) def test_share_server_migration_start_snapmirror_start_failure(self): self.mock_object(self.library, '_get_vserver', mock.Mock(side_effect=[ (self.fake_src_vserver, self.mock_src_client), (self.fake_dest_vserver, self.mock_dest_client)])) self.mock_object(self.mock_src_client, 'get_cluster_name') self.mock_object(self.mock_dest_client, 'get_cluster_name') self.mock_object(self.library, '_get_vserver_peers', mock.Mock(return_value=True)) dm_session_mock = mock.Mock() self.mock_object(data_motion, "DataMotionSession", mock.Mock(return_value=dm_session_mock)) create_snapmirror_mock = self.mock_object( dm_session_mock, 'create_snapmirror_svm', mock.Mock( side_effect=exception.NetAppException(message='fake'))) self.assertRaises(exception.NetAppException, self.library._migration_start_using_svm_dr, self.fake_src_share_server, self.fake_dest_share_server) self.library._get_vserver.assert_has_calls([ mock.call(share_server=self.fake_src_share_server, backend_name=self.fake_src_backend_name), mock.call(share_server=self.fake_dest_share_server, backend_name=self.fake_dest_backend_name)]) self.assertTrue(self.mock_src_client.get_cluster_name.called) self.assertTrue(self.mock_dest_client.get_cluster_name.called) self.library._get_vserver_peers.assert_called_once_with( self.fake_dest_vserver, self.fake_src_vserver ) self.assertFalse(self.mock_dest_client.create_vserver_peer.called) create_snapmirror_mock.assert_called_once_with( self.fake_src_share_server, self.fake_dest_share_server ) dm_session_mock.cancel_snapmirror_svm.assert_called_once_with( self.fake_src_share_server, self.fake_dest_share_server ) @ddt.data( {'network_change_during_migration': True}, {'network_change_during_migration': False}) @ddt.unpack def test__migration_start_using_svm_migrate( self, network_change_during_migration): self.fake_src_share_server['share_network_subnet_id'] = 'fake_sns_id' self.fake_dest_share_server['share_network_subnet_id'] = 'fake_sns_id' node_name = fake.CLUSTER_NODES[0] expected_server_info = { 'backend_details': { 'migration_operation_id': c_fake.FAKE_MIGRATION_POST_ID } } if not network_change_during_migration: self.fake_dest_share_server['network_allocations'] = None server_to_get_network_info = ( self.fake_dest_share_server if network_change_during_migration else self.fake_src_share_server) if network_change_during_migration: self.fake_dest_share_server['share_network_subnet_id'] = ( 'different_sns_id') segmentation_id = ( server_to_get_network_info['network_allocations'][0][ 'segmentation_id']) network_info = { 'network_allocations': server_to_get_network_info['network_allocations'], 'neutron_subnet_id': server_to_get_network_info['share_network_subnets'][0].get( 'neutron_subnet_id') } mock_list_cluster_nodes = self.mock_object( self.library._client, 'list_cluster_nodes', mock.Mock(return_value=fake.CLUSTER_NODES)) mock_get_data_port = self.mock_object( self.library, '_get_node_data_port', mock.Mock(return_value=fake.NODE_DATA_PORT)) mock_get_ipspace = self.mock_object( self.library._client, 'get_ipspace_name_for_vlan_port', mock.Mock(return_value=fake.IPSPACE)) mock_create_port = self.mock_object( self.library, '_create_port_and_broadcast_domain') mock_get_cluster_name = self.mock_object( self.mock_src_client, 'get_cluster_name', mock.Mock(return_value=fake.CLUSTER_NAME)) mock_get_aggregates = self.mock_object( self.library, '_find_matching_aggregates', mock.Mock(return_value=fake.AGGREGATES)) mock_svm_migration_start = self.mock_object( self.mock_dest_client, 'svm_migration_start', mock.Mock(return_value=c_fake.FAKE_MIGRATION_RESPONSE_WITH_JOB)) mock_get_job = self.mock_object( self.mock_dest_client, 'get_job', mock.Mock(return_value=c_fake.FAKE_JOB_SUCCESS_STATE)) server_info = self.library._migration_start_using_svm_migrate( None, self.fake_src_share_server, self.fake_dest_share_server, self.mock_src_client, self.mock_dest_client) self.assertTrue(mock_list_cluster_nodes.called) mock_get_data_port.assert_called_once_with(node_name) mock_get_ipspace.assert_called_once_with( node_name, fake.NODE_DATA_PORT, segmentation_id) mock_create_port.assert_called_once_with( fake.IPSPACE, network_info) self.assertTrue(mock_get_cluster_name.called) mock_svm_migration_start.assert_called_once_with( fake.CLUSTER_NAME, self.fake_src_vserver, fake.AGGREGATES, dest_ipspace=fake.IPSPACE) self.assertTrue(mock_get_aggregates.called) self.assertEqual(expected_server_info, server_info) mock_get_job.assert_called_once_with(c_fake.FAKE_JOB_ID) def test__migration_start_using_svm_migrate_exception(self): self.fake_src_share_server['share_network_subnet_id'] = 'fake_sns_id' self.fake_dest_share_server['share_network_subnet_id'] = 'fake_sns_id' node_name = fake.CLUSTER_NODES[0] server_to_get_network_info = self.fake_dest_share_server segmentation_id = ( server_to_get_network_info['network_allocations'][0][ 'segmentation_id']) network_info = { 'network_allocations': server_to_get_network_info['network_allocations'], 'neutron_subnet_id': server_to_get_network_info['share_network_subnets'][0].get( 'neutron_subnet_id') } mock_list_cluster_nodes = self.mock_object( self.library._client, 'list_cluster_nodes', mock.Mock(return_value=fake.CLUSTER_NODES)) mock_get_data_port = self.mock_object( self.library, '_get_node_data_port', mock.Mock(return_value=fake.NODE_DATA_PORT)) mock_get_ipspace = self.mock_object( self.library._client, 'get_ipspace_name_for_vlan_port', mock.Mock(return_value=fake.IPSPACE)) mock_create_port = self.mock_object( self.library, '_create_port_and_broadcast_domain') mock_get_vserver_name = self.mock_object( self.library, '_get_vserver_name', mock.Mock(return_value=fake.VSERVER1)) mock_get_cluster_name = self.mock_object( self.mock_src_client, 'get_cluster_name', mock.Mock(return_value=fake.CLUSTER_NAME)) mock_get_aggregates = self.mock_object( self.library, '_find_matching_aggregates', mock.Mock(return_value=fake.AGGREGATES)) mock_svm_migration_start = self.mock_object( self.mock_dest_client, 'svm_migration_start', mock.Mock(side_effect=exception.NetAppException())) mock_delete_ipspace = self.mock_object( self.mock_dest_client, 'delete_ipspace') self.assertRaises( exception.NetAppException, self.library._migration_start_using_svm_migrate, None, self.fake_src_share_server, self.fake_dest_share_server, self.mock_src_client, self.mock_dest_client) self.assertTrue(mock_list_cluster_nodes.called) mock_get_data_port.assert_called_once_with(node_name) mock_get_ipspace.assert_called_once_with( node_name, fake.NODE_DATA_PORT, segmentation_id) mock_create_port.assert_called_once_with( fake.IPSPACE, network_info) mock_get_vserver_name.assert_not_called() self.assertTrue(mock_get_cluster_name.called) mock_svm_migration_start.assert_called_once_with( fake.CLUSTER_NAME, self.fake_src_vserver, fake.AGGREGATES, dest_ipspace=fake.IPSPACE) self.assertTrue(mock_get_aggregates.called) mock_delete_ipspace.assert_called_once_with(fake.IPSPACE) def test__get_snapmirror_svm(self): dm_session_mock = mock.Mock() self.mock_object(data_motion, "DataMotionSession", mock.Mock(return_value=dm_session_mock)) fake_snapmirrors = ['mirror1'] self.mock_object(dm_session_mock, 'get_snapmirrors_svm', mock.Mock(return_value=fake_snapmirrors)) result = self.library._get_snapmirror_svm( self.fake_src_share_server, self.fake_dest_share_server) dm_session_mock.get_snapmirrors_svm.assert_called_once_with( self.fake_src_share_server, self.fake_dest_share_server ) self.assertEqual(fake_snapmirrors, result) def test__get_snapmirror_svm_fail_to_get_snapmirrors(self): dm_session_mock = mock.Mock() self.mock_object(data_motion, "DataMotionSession", mock.Mock(return_value=dm_session_mock)) self.mock_object(dm_session_mock, 'get_snapmirrors_svm', mock.Mock( side_effect=netapp_api.NaApiError(code=0))) self.assertRaises(exception.NetAppException, self.library._get_snapmirror_svm, self.fake_src_share_server, self.fake_dest_share_server) dm_session_mock.get_snapmirrors_svm.assert_called_once_with( self.fake_src_share_server, self.fake_dest_share_server ) def test_share_server_migration_continue_svm_dr_no_snapmirror(self): self.mock_object(self.library, '_get_snapmirror_svm', mock.Mock(return_value=[])) self.assertRaises(exception.NetAppException, self.library._share_server_migration_continue_svm_dr, self.fake_src_share_server, self.fake_dest_share_server) self.library._get_snapmirror_svm.assert_called_once_with( self.fake_src_share_server, self.fake_dest_share_server ) @ddt.data({'mirror_state': 'snapmirrored', 'status': 'idle'}, {'mirror_state': 'uninitialized', 'status': 'transferring'}, {'mirror_state': 'snapmirrored', 'status': 'quiescing'}, ) @ddt.unpack def test_share_server_migration_continue_svm_dr(self, mirror_state, status): fake_snapmirror = { 'mirror-state': mirror_state, 'relationship-status': status, } self.mock_object(self.library, '_get_snapmirror_svm', mock.Mock(return_value=[fake_snapmirror])) expected = mirror_state == 'snapmirrored' and status == 'idle' result = self.library._share_server_migration_continue_svm_dr( self.fake_src_share_server, self.fake_dest_share_server ) self.assertEqual(expected, result) self.library._get_snapmirror_svm.assert_called_once_with( self.fake_src_share_server, self.fake_dest_share_server ) @ddt.data( ('ready_for_cutover', True), ('transferring', False) ) @ddt.unpack def test_share_server_migration_continue_svm_migrate( self, job_state, first_phase_completed): c_fake.FAKE_MIGRATION_JOB_SUCCESS.update({"state": job_state}) self.mock_object(data_motion, 'get_client_for_host', mock.Mock(return_value=self.mock_dest_client)) self.mock_object( self.mock_dest_client, 'svm_migration_get', mock.Mock(return_value=c_fake.FAKE_MIGRATION_JOB_SUCCESS)) result = self.library._share_server_migration_continue_svm_migrate( self.fake_dest_share_server, c_fake.FAKE_MIGRATION_POST_ID) self.assertEqual(first_phase_completed, result) data_motion.get_client_for_host.assert_called_once_with( self.fake_dest_share_server['host']) self.mock_dest_client.svm_migration_get.assert_called_once_with( c_fake.FAKE_MIGRATION_POST_ID) def test_share_server_migration_continue_svm_migrate_exception(self): self.mock_object(data_motion, 'get_client_for_host', mock.Mock(return_value=self.mock_dest_client)) self.mock_object(self.mock_dest_client, 'svm_migration_get', mock.Mock(side_effect=netapp_api.NaApiError())) self.assertRaises( exception.NetAppException, self.library._share_server_migration_continue_svm_migrate, self.fake_dest_share_server, c_fake.FAKE_MIGRATION_POST_ID) data_motion.get_client_for_host.assert_called_once_with( self.fake_dest_share_server['host']) self.mock_dest_client.svm_migration_get.assert_called_once_with( c_fake.FAKE_MIGRATION_POST_ID) @ddt.data(None, 'fake_migration_id') def test_share_server_migration_continue(self, migration_id): expected_result = True self.mock_object( self.library, '_get_share_server_migration_id', mock.Mock(return_value=migration_id)) self.mock_object( self.library, '_share_server_migration_continue_svm_migrate', mock.Mock(return_value=expected_result)) self.mock_object( self.library, '_share_server_migration_continue_svm_dr', mock.Mock(return_value=expected_result)) result = self.library.share_server_migration_continue( None, self.fake_src_share_server, self.fake_dest_share_server, [], [] ) self.assertEqual(expected_result, result) def test__setup_networking_for_destination_vserver(self): self.mock_object(self.mock_dest_client, 'get_vserver_ipspace', mock.Mock(return_value=fake.IPSPACE)) self.mock_object(self.library, '_setup_network_for_vserver') self.library._setup_networking_for_destination_vserver( self.mock_dest_client, self.fake_vserver, fake.NETWORK_INFO_LIST) self.mock_dest_client.get_vserver_ipspace.assert_called_once_with( self.fake_vserver) self.library._setup_network_for_vserver.assert_called_once_with( self.fake_vserver, self.mock_dest_client, fake.NETWORK_INFO_LIST, fake.IPSPACE, enable_nfs=False, security_services=None) def test__migration_complete_svm_dr(self): dm_session_mock = mock.Mock() self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(self.fake_dest_vserver, self.mock_dest_client))) self.mock_object(data_motion, "DataMotionSession", mock.Mock(return_value=dm_session_mock)) self.mock_object( self.library, '_setup_networking_for_destination_vserver') self.library._share_server_migration_complete_svm_dr( self.fake_src_share_server, self.fake_dest_share_server, self.fake_src_vserver, self.mock_src_client, [fake.SHARE_INSTANCE], fake.NETWORK_INFO_LIST ) self.library._get_vserver.assert_called_once_with( share_server=self.fake_dest_share_server, backend_name=self.fake_dest_backend_name ) dm_session_mock.update_snapmirror_svm.assert_called_once_with( self.fake_src_share_server, self.fake_dest_share_server ) quiesce_break_mock = dm_session_mock.quiesce_and_break_snapmirror_svm quiesce_break_mock.assert_called_once_with( self.fake_src_share_server, self.fake_dest_share_server ) dm_session_mock.wait_for_vserver_state.assert_called_once_with( self.fake_dest_vserver, self.mock_dest_client, subtype='default', state='running', operational_state='stopped', timeout=(self.library.configuration. netapp_server_migration_state_change_timeout) ) self.mock_src_client.stop_vserver.assert_called_once_with( self.fake_src_vserver ) (self.library._setup_networking_for_destination_vserver .assert_called_once_with( self.mock_dest_client, self.fake_dest_vserver, fake.NETWORK_INFO_LIST)) self.mock_dest_client.start_vserver.assert_called_once_with( self.fake_dest_vserver ) dm_session_mock.delete_snapmirror_svm.assert_called_once_with( self.fake_src_share_server, self.fake_dest_share_server ) @ddt.data( {'is_svm_dr': True, 'network_change': True}, {'is_svm_dr': False, 'network_change': True}, {'is_svm_dr': False, 'network_change': False}, ) @ddt.unpack def test_share_server_migration_complete(self, is_svm_dr, network_change): current_interfaces = ['interface_1', 'interface_2'] self.mock_object(self.library, '_get_vserver', mock.Mock(side_effect=[ (self.fake_src_vserver, self.mock_src_client), (self.fake_dest_vserver, self.mock_dest_client)])) mock_complete_svm_migrate = self.mock_object( self.library, '_share_server_migration_complete_svm_migrate') mock_complete_svm_dr = self.mock_object( self.library, '_share_server_migration_complete_svm_dr') fake_share_name = self.library._get_backend_share_name( fake.SHARE_INSTANCE['id']) fake_volume = copy.deepcopy(fake.CLIENT_GET_VOLUME_RESPONSE) self.mock_object(self.mock_dest_client, 'get_volume', mock.Mock(return_value=fake_volume)) self.mock_object(self.library, '_create_export', mock.Mock(return_value=fake.NFS_EXPORTS)) self.mock_object(self.library, '_delete_share') mock_update_share_attrs = self.mock_object( self.library, '_update_share_attributes_after_server_migration') self.mock_object(data_motion, 'get_client_for_host', mock.Mock(return_value=self.mock_dest_client)) self.mock_object(self.mock_dest_client, 'list_network_interfaces', mock.Mock(return_value=current_interfaces)) self.mock_object(self.mock_dest_client, 'delete_network_interface') self.mock_object(self.library, '_setup_networking_for_destination_vserver') sns_id = 'fake_sns_id' new_sns_id = 'fake_sns_id_2' self.fake_src_share_server['share_network_subnet_id'] = sns_id self.fake_dest_share_server['share_network_subnet_id'] = ( sns_id if not network_change else new_sns_id) share_instances = [fake.SHARE_INSTANCE] migration_id = 'fake_migration_id' share_host = fake.SHARE_INSTANCE['host'] self.fake_src_share_server['backend_details']['ports'] = [] if not is_svm_dr: self.fake_dest_share_server['backend_details'][ 'migration_operation_id'] = ( migration_id) share_host = share_host.replace( share_host.split('#')[1], fake_volume['aggregate']) should_recreate_export = is_svm_dr or network_change share_server_to_get_vserver_name = ( self.fake_dest_share_server if is_svm_dr else self.fake_src_share_server) result = self.library.share_server_migration_complete( None, self.fake_src_share_server, self.fake_dest_share_server, share_instances, [], fake.NETWORK_INFO_LIST ) expected_share_updates = { fake.SHARE_INSTANCE['id']: { 'pool_name': fake_volume['aggregate'] } } expected_share_updates[fake.SHARE_INSTANCE['id']].update( {'export_locations': fake.NFS_EXPORTS}) expected_backend_details = ( {} if is_svm_dr else self.fake_src_share_server['backend_details']) expected_result = { 'share_updates': expected_share_updates, 'server_backend_details': expected_backend_details } self.assertEqual(expected_result, result) self.library._get_vserver.assert_has_calls([ mock.call(share_server=self.fake_src_share_server, backend_name=self.fake_src_backend_name), mock.call(share_server=share_server_to_get_vserver_name, backend_name=self.fake_dest_backend_name)]) if is_svm_dr: mock_complete_svm_dr.assert_called_once_with( self.fake_src_share_server, self.fake_dest_share_server, self.fake_src_vserver, self.mock_src_client, share_instances, fake.NETWORK_INFO_LIST ) self.library._delete_share.assert_called_once_with( fake.SHARE_INSTANCE, self.fake_src_vserver, self.mock_src_client, remove_export=True) mock_update_share_attrs.assert_called_once_with( fake.SHARE_INSTANCE, self.mock_src_client, fake_volume['aggregate'], self.mock_dest_client) else: mock_complete_svm_migrate.assert_called_once_with( migration_id, self.fake_dest_share_server) self.mock_dest_client.list_network_interfaces.assert_called_once() data_motion.get_client_for_host.assert_has_calls([ mock.call(self.fake_dest_share_server['host']), mock.call(self.fake_src_share_server['host']), ]) self.mock_dest_client.delete_network_interface.assert_has_calls( [mock.call(self.fake_src_vserver, interface_name) for interface_name in current_interfaces]) (self.library._setup_networking_for_destination_vserver. assert_called_once_with(self.mock_dest_client, self.fake_src_vserver, fake.NETWORK_INFO_LIST)) if should_recreate_export: create_export_calls = [ mock.call( instance, self.fake_dest_share_server, self.fake_dest_vserver, self.mock_dest_client, clear_current_export_policy=False, ensure_share_already_exists=True, share_host=share_host) for instance in share_instances ] self.library._create_export.assert_has_calls(create_export_calls) self.mock_dest_client.get_volume.assert_called_once_with( fake_share_name) def test_share_server_migration_complete_failure_breaking(self): dm_session_mock = mock.Mock() self.mock_object(data_motion, "DataMotionSession", mock.Mock(return_value=dm_session_mock)) self.mock_object( self.library, '_get_vserver', mock.Mock(return_value=(self.fake_dest_vserver, self.mock_dest_client))) self.mock_object(dm_session_mock, 'quiesce_and_break_snapmirror_svm', mock.Mock(side_effect=exception.NetAppException)) self.mock_object(self.library, '_delete_share') self.assertRaises(exception.NetAppException, self.library._share_server_migration_complete_svm_dr, self.fake_src_share_server, self.fake_dest_share_server, self.fake_src_vserver, self.mock_src_client, [fake.SHARE_INSTANCE], [fake.NETWORK_INFO]) dm_session_mock.update_snapmirror_svm.assert_called_once_with( self.fake_src_share_server, self.fake_dest_share_server ) self.library._get_vserver.assert_called_once_with( share_server=self.fake_dest_share_server, backend_name=self.fake_dest_backend_name) quiesce_break_mock = dm_session_mock.quiesce_and_break_snapmirror_svm quiesce_break_mock.assert_called_once_with( self.fake_src_share_server, self.fake_dest_share_server ) self.mock_src_client.start_vserver.assert_called_once_with( self.fake_src_vserver ) dm_session_mock.cancel_snapmirror_svm.assert_called_once_with( self.fake_src_share_server, self.fake_dest_share_server ) self.library._delete_share.assert_called_once_with( fake.SHARE_INSTANCE, self.fake_dest_vserver, self.mock_dest_client, remove_export=False) def test_share_server_migration_complete_failure_get_new_volume(self): dm_session_mock = mock.Mock() fake_share_name = self.library._get_backend_share_name( fake.SHARE_INSTANCE['id']) self.mock_object(data_motion, "DataMotionSession", mock.Mock(return_value=dm_session_mock)) self.mock_object(self.library, '_get_vserver', mock.Mock(side_effect=[ (self.fake_src_vserver, self.mock_src_client), (self.fake_dest_vserver, self.mock_dest_client)])) self.mock_object(self.library, '_share_server_migration_complete_svm_dr') self.mock_object(self.library, '_get_share_server_migration_id', mock.Mock(return_value=None)) self.mock_object(self.mock_dest_client, 'get_volume', mock.Mock(side_effect=exception.NetAppException)) self.assertRaises(exception.NetAppException, self.library.share_server_migration_complete, None, self.fake_src_share_server, self.fake_dest_share_server, [fake.SHARE_INSTANCE], [], fake.NETWORK_INFO_LIST) self.library._get_vserver.assert_has_calls([ mock.call(share_server=self.fake_src_share_server, backend_name=self.fake_src_backend_name), mock.call(share_server=self.fake_dest_share_server, backend_name=self.fake_dest_backend_name)]) self.mock_dest_client.get_volume.assert_called_once_with( fake_share_name) def test__share_server_migration_complete_svm_migrate(self): completion_status = na_utils.MIGRATION_STATE_MIGRATE_COMPLETE migration_id = 'fake_migration_id' fake_complete_job_uuid = 'fake_uuid' fake_complete_job = { 'job': { 'state': 'cutover_triggered', 'uuid': fake_complete_job_uuid } } self.mock_object(data_motion, 'get_client_for_host', mock.Mock(return_value=self.mock_dest_client)) self.mock_object(self.mock_dest_client, 'svm_migrate_complete', mock.Mock(return_value=fake_complete_job)) self.mock_object(self.library, '_get_job_uuid', mock.Mock(return_value=fake_complete_job_uuid)) self.mock_object(self.library, '_wait_for_operation_status') self.library._share_server_migration_complete_svm_migrate( migration_id, self.fake_dest_share_server) data_motion.get_client_for_host.assert_called_once_with( self.fake_dest_share_server['host']) self.mock_dest_client.svm_migrate_complete.assert_called_once_with( migration_id) self.library._get_job_uuid.assert_called_once_with(fake_complete_job) self.library._wait_for_operation_status.assert_has_calls( [mock.call(fake_complete_job_uuid, self.mock_dest_client.get_job), mock.call(migration_id, self.mock_dest_client.svm_migration_get, desired_status=completion_status) ] ) def test__share_server_migration_complete_svm_migrate_failed_to_complete( self): migration_id = 'fake_migration_id' self.mock_object(data_motion, 'get_client_for_host', mock.Mock(return_value=self.mock_dest_client)) self.mock_object(self.mock_dest_client, 'svm_migrate_complete', mock.Mock(side_effect=exception.NetAppException())) self.assertRaises( exception.NetAppException, self.library._share_server_migration_complete_svm_migrate, migration_id, self.fake_dest_share_server) data_motion.get_client_for_host.assert_called_once_with( self.fake_dest_share_server['host']) self.mock_dest_client.svm_migrate_complete.assert_called_once_with( migration_id) @ddt.data([], ['fake_snapmirror']) def test_share_server_migration_cancel_svm_dr(self, snapmirrors): dm_session_mock = mock.Mock() self.mock_object(data_motion, "DataMotionSession", mock.Mock(return_value=dm_session_mock)) self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(self.fake_dest_vserver, self.mock_dest_client))) self.mock_object(self.library, '_get_snapmirror_svm', mock.Mock(return_value=snapmirrors)) self.mock_object(self.library, '_delete_share') self.library._migration_cancel_using_svm_dr( self.fake_src_share_server, self.fake_dest_share_server, [fake.SHARE_INSTANCE] ) self.library._get_vserver.assert_called_once_with( share_server=self.fake_dest_share_server, backend_name=self.fake_dest_backend_name) self.library._get_snapmirror_svm.assert_called_once_with( self.fake_src_share_server, self.fake_dest_share_server ) if snapmirrors: dm_session_mock.cancel_snapmirror_svm.assert_called_once_with( self.fake_src_share_server, self.fake_dest_share_server ) self.library._delete_share.assert_called_once_with( fake.SHARE_INSTANCE, self.fake_dest_vserver, self.mock_dest_client, remove_export=False) @ddt.data(True, False) def test__migration_cancel_using_svm_migrate(self, has_ipspace): pause_job_uuid = 'fake_pause_job_id' cancel_job_uuid = 'fake_cancel_job_id' ipspace_name = 'fake_ipspace_name' migration_id = 'fake_migration_id' pause_job = { 'uuid': pause_job_uuid } cancel_job = { 'uuid': cancel_job_uuid } migration_information = { "destination": { "ipspace": { "name": ipspace_name } } } if has_ipspace: migration_information["destination"]["ipspace"]["name"] = ( ipspace_name) self.mock_object(self.library, '_get_job_uuid', mock.Mock( side_effect=[pause_job_uuid, cancel_job_uuid])) self.mock_object(data_motion, 'get_client_for_host', mock.Mock(return_value=self.mock_dest_client)) self.mock_object(self.mock_dest_client, 'svm_migration_get', mock.Mock(return_value=migration_information)) self.mock_object(self.mock_dest_client, 'svm_migrate_pause', mock.Mock(return_value=pause_job)) self.mock_object(self.library, '_wait_for_operation_status') self.mock_object(self.mock_dest_client, 'svm_migrate_cancel', mock.Mock(return_value=cancel_job)) self.mock_object(self.mock_dest_client, 'ipspace_has_data_vservers', mock.Mock(return_value=False)) self.mock_object(self.mock_dest_client, 'delete_ipspace') self.mock_object(self.mock_dest_client, 'list_cluster_nodes', mock.Mock(return_value=fake.CLUSTER_NODES)) self.mock_object(self.library, '_get_node_data_port', mock.Mock(return_value='fake_port')) self.library._migration_cancel_using_svm_migrate( migration_id, self.fake_dest_share_server) self.library._get_job_uuid.assert_has_calls( [mock.call(pause_job), mock.call(cancel_job)] ) data_motion.get_client_for_host.assert_called_once_with( self.fake_dest_share_server['host']) self.mock_dest_client.svm_migration_get.assert_called_once_with( migration_id) self.mock_dest_client.svm_migrate_pause.assert_called_once_with( migration_id) self.library._wait_for_operation_status.assert_has_calls( [mock.call(pause_job_uuid, self.mock_dest_client.get_job), mock.call(migration_id, self.mock_dest_client.svm_migration_get, desired_status=na_utils.MIGRATION_STATE_MIGRATE_PAUSED), mock.call(cancel_job_uuid, self.mock_dest_client.get_job)] ) self.mock_dest_client.svm_migrate_cancel.assert_called_once_with( migration_id) if has_ipspace: self.mock_dest_client.delete_ipspace.assert_called_once_with( ipspace_name) @ddt.data( (mock.Mock(side_effect=exception.NetAppException()), mock.Mock()), (mock.Mock(), mock.Mock(side_effect=exception.NetAppException())) ) @ddt.unpack def test__migration_cancel_using_svm_migrate_error( self, mock_pause, mock_cancel): pause_job_uuid = 'fake_pause_job_id' cancel_job_uuid = 'fake_cancel_job_id' migration_id = 'fake_migration_id' migration_information = { "destination": { "ipspace": { "name": "ipspace_name" } } } self.mock_object(self.library, '_get_job_uuid', mock.Mock( side_effect=[pause_job_uuid, cancel_job_uuid])) self.mock_object(data_motion, 'get_client_for_host', mock.Mock(return_value=self.mock_dest_client)) self.mock_object(self.mock_dest_client, 'svm_migration_get', mock.Mock(return_value=migration_information)) self.mock_object(self.mock_dest_client, 'svm_migrate_pause', mock_pause) self.mock_object(self.library, '_wait_for_operation_status') self.mock_object(self.mock_dest_client, 'svm_migrate_cancel', mock_cancel) self.assertRaises( exception.NetAppException, self.library._migration_cancel_using_svm_migrate, migration_id, self.fake_dest_share_server ) def test_share_server_migration_cancel_svm_dr_snapmirror_failure(self): dm_session_mock = mock.Mock() self.mock_object(data_motion, "DataMotionSession", mock.Mock(return_value=dm_session_mock)) self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(self.fake_dest_vserver, self.mock_dest_client))) self.mock_object(self.library, '_get_snapmirror_svm', mock.Mock(return_value=['fake_snapmirror'])) self.mock_object(dm_session_mock, 'cancel_snapmirror_svm', mock.Mock(side_effect=exception.NetAppException)) self.assertRaises(exception.NetAppException, self.library._migration_cancel_using_svm_dr, self.fake_src_share_server, self.fake_dest_share_server, [fake.SHARE_INSTANCE]) self.library._get_vserver.assert_called_once_with( share_server=self.fake_dest_share_server, backend_name=self.fake_dest_backend_name) self.library._get_snapmirror_svm.assert_called_once_with( self.fake_src_share_server, self.fake_dest_share_server ) dm_session_mock.cancel_snapmirror_svm.assert_called_once_with( self.fake_src_share_server, self.fake_dest_share_server ) @ddt.data(None, 'fake_migration_id') def test_share_server_migration_cancel(self, migration_id): self.mock_object(self.library, '_get_share_server_migration_id', mock.Mock(return_value=migration_id)) self.mock_object(self.library, '_migration_cancel_using_svm_migrate') self.mock_object(self.library, '_migration_cancel_using_svm_dr') self.library.share_server_migration_cancel( None, self.fake_src_share_server, self.fake_dest_share_server, [], []) if migration_id: (self.library._migration_cancel_using_svm_migrate .assert_called_once_with( migration_id, self.fake_dest_share_server)) else: (self.library._migration_cancel_using_svm_dr .assert_called_once_with( self.fake_src_share_server, self.fake_dest_share_server, [])) def test_share_server_migration_get_progress(self): fake_vserver_name = fake.VSERVER1 expected_result = {'total_progress': 50} self.mock_object(self.library._client, 'get_svm_volumes_total_size', mock.Mock(return_value=5)) self.mock_object(self.library, '_get_vserver_name', mock.Mock(return_value=fake_vserver_name)) result = self.library.share_server_migration_get_progress( None, self.fake_src_share_server, self.fake_dest_share_server, [self.fake_src_share], None ) self.library._client.get_svm_volumes_total_size.assert_called_once_with (fake_vserver_name) self.library._get_vserver_name.assert_called_once_with (self.fake_dest_share_server['source_share_server_id']) self.assertEqual(expected_result, result) @ddt.data({'subtype': 'default', 'share_group': None, 'compatible': True}, {'subtype': 'default', 'share_group': {'share_server_id': fake.SHARE_SERVER['id']}, 'compatible': True}, {'subtype': 'dp_destination', 'share_group': None, 'compatible': False}, {'subtype': 'default', 'share_group': {'share_server_id': 'another_fake_id'}, 'compatible': False}) @ddt.unpack def test_choose_share_server_compatible_with_share_vserver_info( self, subtype, share_group, compatible): self.library.is_nfs_config_supported = False mock_client = mock.Mock() self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, mock_client))) fake_vserver_info = { 'operational_state': 'running', 'state': 'running', 'subtype': subtype } self.mock_object(mock_client, 'get_vserver_info', mock.Mock(return_value=fake_vserver_info)) mock_get_extra_spec = self.mock_object( share_types, 'get_extra_specs_from_share', mock.Mock(return_value='fake_extra_specs')) mock_get_provisioning_opts = self.mock_object( self.library, '_get_provisioning_options', mock.Mock(return_value={})) self.mock_object(mock_client, 'list_vserver_aggregates', mock.Mock(return_value=fake.AGGREGATES)) result = self.library.choose_share_server_compatible_with_share( None, [fake.SHARE_SERVER], fake.SHARE_2, None, share_group ) expected_result = fake.SHARE_SERVER if compatible else None self.assertEqual(expected_result, result) mock_get_extra_spec.assert_called_once_with(fake.SHARE_2) mock_get_provisioning_opts.assert_called_once_with('fake_extra_specs') if (share_group and share_group['share_server_id'] != fake.SHARE_SERVER['id']): mock_client.get_vserver_info.assert_not_called() self.library._get_vserver.assert_not_called() else: mock_client.get_vserver_info.assert_called_once_with( fake.VSERVER1, ) self.library._get_vserver.assert_called_once_with( fake.SHARE_SERVER, backend_name=fake.BACKEND_NAME ) @ddt.data( {'policies': [], 'reusable_scope': None, 'compatible': True}, {'policies': "0123456789", 'reusable_scope': {'scope'}, 'compatible': True}, {'policies': "0123456789", 'reusable_scope': None, 'compatible': False}) @ddt.unpack def test_choose_share_server_compatible_with_share_fpolicy( self, policies, reusable_scope, compatible): self.library.is_nfs_config_supported = False mock_client = mock.Mock() fake_extra_spec = copy.deepcopy(fake.EXTRA_SPEC_WITH_FPOLICY) mock_get_extra_spec = self.mock_object( share_types, 'get_extra_specs_from_share', mock.Mock(return_value=fake_extra_spec)) self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, mock_client))) self.mock_object(mock_client, 'get_vserver_info', mock.Mock(return_value=fake.VSERVER_INFO)) self.mock_object(mock_client, 'list_vserver_aggregates', mock.Mock(return_value=fake.AGGREGATES)) mock_get_policies = self.mock_object( mock_client, 'get_fpolicy_policies_status', mock.Mock(return_value=policies)) mock_reusable_scope = self.mock_object( self.library, '_find_reusable_fpolicy_scope', mock.Mock(return_value=reusable_scope)) result = self.library.choose_share_server_compatible_with_share( None, [fake.SHARE_SERVER], fake.SHARE_2, None, None ) expected_result = fake.SHARE_SERVER if compatible else None self.assertEqual(expected_result, result) mock_get_extra_spec.assert_called_once_with(fake.SHARE_2) mock_client.get_vserver_info.assert_called_once_with( fake.VSERVER1, ) self.library._get_vserver.assert_called_once_with( fake.SHARE_SERVER, backend_name=fake.BACKEND_NAME ) mock_get_policies.assert_called_once() if len(policies) >= self.library.FPOLICY_MAX_VSERVER_POLICIES: mock_reusable_scope.assert_called_once_with( fake.SHARE_2, mock_client, fpolicy_extensions_to_include=fake.FPOLICY_EXT_TO_INCLUDE, fpolicy_extensions_to_exclude=fake.FPOLICY_EXT_TO_EXCLUDE, fpolicy_file_operations=fake.FPOLICY_FILE_OPERATIONS) @ddt.data({'subtype': 'default', 'compatible': True}, {'subtype': 'dp_destination', 'compatible': False}) @ddt.unpack def test_choose_share_server_compatible_with_share_group_vserver_info( self, subtype, compatible): self.library.is_nfs_config_supported = False mock_client = mock.Mock() self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, mock_client))) fake_vserver_info = { 'operational_state': 'running', 'state': 'running', 'subtype': subtype } self.mock_object(mock_client, 'get_vserver_info', mock.Mock(return_value=fake_vserver_info)) result = self.library.choose_share_server_compatible_with_share_group( None, [fake.SHARE_SERVER], None ) expected_result = fake.SHARE_SERVER if compatible else None self.assertEqual(expected_result, result) self.library._get_vserver.assert_called_once_with( fake.SHARE_SERVER, backend_name=fake.BACKEND_NAME ) mock_client.get_vserver_info.assert_called_once_with( fake.VSERVER1, ) def test_choose_share_server_compatible_with_different_aggrs(self): self.library.is_nfs_config_supported = False mock_client = mock.Mock() self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, mock_client))) fake_vserver_info = { 'operational_state': 'running', 'state': 'running', 'subtype': 'default' } self.mock_object(mock_client, 'get_vserver_info', mock.Mock(return_value=fake_vserver_info)) mock_get_extra_spec = self.mock_object( share_types, 'get_extra_specs_from_share', mock.Mock(return_value='fake_extra_specs')) mock_get_provisioning_opts = self.mock_object( self.library, '_get_provisioning_options', mock.Mock(return_value={})) self.mock_object(mock_client, 'list_vserver_aggregates', mock.Mock(return_value=fake.AGGREGATES)) result = self.library.choose_share_server_compatible_with_share( None, [fake.SHARE_SERVER], fake.SHARE_INSTANCE, None) self.assertIsNone(result) mock_get_extra_spec.assert_called_once_with(fake.SHARE_INSTANCE) mock_get_provisioning_opts.assert_called_once_with('fake_extra_specs') def test_choose_share_server_compatible_with_flexgroups(self): self.library.is_nfs_config_supported = False mock_client = mock.Mock() self.mock_object(self.library, '_get_vserver', mock.Mock(return_value=(fake.VSERVER1, mock_client))) fake_vserver_info = { 'operational_state': 'running', 'state': 'running', 'subtype': 'default' } self.mock_object(mock_client, 'get_vserver_info', mock.Mock(return_value=fake_vserver_info)) mock_get_extra_spec = self.mock_object( share_types, 'get_extra_specs_from_share', mock.Mock(return_value='fake_extra_specs')) mock_get_provisioning_opts = self.mock_object( self.library, '_get_provisioning_options', mock.Mock(return_value={})) self.mock_object(mock_client, 'list_vserver_aggregates', mock.Mock(return_value=fake.FLEXGROUP_POOL_AGGR)) self.mock_object(self.library, '_is_flexgroup_pool', mock.Mock(return_value=True)) self.mock_object(self.library, '_get_flexgroup_aggregate_list', mock.Mock(return_value=fake.FLEXGROUP_POOL_AGGR)) result = self.library.choose_share_server_compatible_with_share( None, [fake.SHARE_SERVER], fake.SHARE_FLEXGROUP, None) expected_result = fake.SHARE_SERVER self.assertEqual(expected_result, result) self.library._get_vserver.assert_called_once_with( fake.SHARE_SERVER, backend_name=fake.BACKEND_NAME ) mock_client.get_vserver_info.assert_called_once_with( fake.VSERVER1, ) mock_get_extra_spec.assert_called_once_with(fake.SHARE_FLEXGROUP) mock_get_provisioning_opts.assert_called_once_with('fake_extra_specs') def test__create_port_and_broadcast_domain(self): self.mock_object(self.library._client, 'list_cluster_nodes', mock.Mock(return_value=fake.CLUSTER_NODES)) self.mock_object(self.library, '_get_node_data_port', mock.Mock(return_value='fake_port')) self.library._create_port_and_broadcast_domain(fake.IPSPACE, fake.NETWORK_INFO) node_network_info = zip(fake.CLUSTER_NODES, fake.NETWORK_INFO['network_allocations']) get_node_port_calls = [] create_port_calls = [] for node, alloc in node_network_info: get_node_port_calls.append(mock.call(node)) create_port_calls.append(mock.call( node, 'fake_port', alloc['segmentation_id'], alloc['mtu'], fake.IPSPACE )) self.library._get_node_data_port.assert_has_calls(get_node_port_calls) self.library._client.create_port_and_broadcast_domain.assert_has_calls( create_port_calls) def test___update_share_attributes_after_server_migration(self): fake_aggregate = 'fake_aggr_0' mock_get_extra_spec = self.mock_object( share_types, "get_extra_specs_from_share", mock.Mock(return_value=fake.EXTRA_SPEC)) mock__get_provisioning_opts = self.mock_object( self.library, '_get_provisioning_options', mock.Mock(return_value=copy.deepcopy(fake.PROVISIONING_OPTIONS))) fake_share_name = self.library._get_backend_share_name( fake.SHARE_INSTANCE['id']) mock_get_vol_autosize_attrs = self.mock_object( self.mock_src_client, 'get_volume_autosize_attributes', mock.Mock(return_value=fake.VOLUME_AUTOSIZE_ATTRS) ) fake_provisioning_opts = copy.copy(fake.PROVISIONING_OPTIONS) fake_autosize_attrs = copy.copy(fake.VOLUME_AUTOSIZE_ATTRS) for key in ('minimum-size', 'maximum-size'): fake_autosize_attrs[key] = int(fake_autosize_attrs[key]) * units.Ki fake_provisioning_opts['autosize_attributes'] = fake_autosize_attrs mock_modify_volume = self.mock_object(self.mock_dest_client, 'modify_volume') fake_provisioning_opts.pop('snapshot_policy', None) self.library._update_share_attributes_after_server_migration( fake.SHARE_INSTANCE, self.mock_src_client, fake_aggregate, self.mock_dest_client) mock_get_extra_spec.assert_called_once_with(fake.SHARE_INSTANCE) mock__get_provisioning_opts.assert_called_once_with(fake.EXTRA_SPEC) mock_get_vol_autosize_attrs.assert_called_once_with(fake_share_name) mock_modify_volume.assert_called_once_with( fake_aggregate, fake_share_name, **fake_provisioning_opts) def test_validate_provisioning_options_for_share(self): mock_create_from_snap = self.mock_object( lib_base.NetAppCmodeFileStorageLibrary, 'validate_provisioning_options_for_share') self.library.validate_provisioning_options_for_share( fake.PROVISIONING_OPTIONS, extra_specs=fake.EXTRA_SPEC, qos_specs=fake.QOS_NORMALIZED_SPEC) mock_create_from_snap.assert_called_once_with( fake.PROVISIONING_OPTIONS, extra_specs=fake.EXTRA_SPEC, qos_specs=fake.QOS_NORMALIZED_SPEC) def test_validate_provisioning_options_for_share_aqos_not_supported(self): self.assertRaises( exception.NetAppException, self.library.validate_provisioning_options_for_share, fake.PROVISIONING_OPTS_WITH_ADAPT_QOS, qos_specs=None) def test__get_different_keys_for_equal_ss_type(self): curr_sec_service = copy.deepcopy(fake.CIFS_SECURITY_SERVICE) new_sec_service = copy.deepcopy(fake.CIFS_SECURITY_SERVICE_2) new_sec_service2 = copy.deepcopy(fake.CIFS_SECURITY_SERVICE_3) expected_keys = ['password', 'user', 'ou', 'domain', 'dns_ip', 'server'] result = self.library._get_different_keys_for_equal_ss_type( curr_sec_service, new_sec_service) self.assertEqual(expected_keys, result) expected_keys = ['password', 'user', 'ou', 'domain', 'dns_ip', 'server', 'default_ad_site'] result = self.library._get_different_keys_for_equal_ss_type( curr_sec_service, new_sec_service2) self.assertEqual(expected_keys, result) @ddt.data( {'current': None, 'new': fake.CIFS_SECURITY_SERVICE, 'existing': []}, {'current': fake.CIFS_SECURITY_SERVICE, 'new': fake.CIFS_SECURITY_SERVICE_2, 'existing': [fake.CIFS_SECURITY_SERVICE, fake.KERBEROS_SECURITY_SERVICE]}, {'current': fake.KERBEROS_SECURITY_SERVICE, 'new': fake.KERBEROS_SECURITY_SERVICE_2, 'existing': [fake.CIFS_SECURITY_SERVICE, fake.KERBEROS_SECURITY_SERVICE]}, {'current': fake.CIFS_SECURITY_SERVICE, 'new': fake.CIFS_SECURITY_SERVICE, 'existing': [fake.CIFS_SECURITY_SERVICE]}, ) @ddt.unpack def test_update_share_server_security_service(self, current, new, existing): fake_context = mock.Mock() fake_net_info = copy.deepcopy(fake.NETWORK_INFO_LIST) new_sec_service = copy.deepcopy(new) curr_sec_service = copy.deepcopy(current) if current else None new_type = new_sec_service['type'].lower() fake_net_info[0]['security_services'] = existing if curr_sec_service: # domain modification aren't support new_sec_service['domain'] = curr_sec_service['domain'] different_keys = [] if curr_sec_service != new_sec_service: different_keys = ['dns_ip', 'server', 'domain', 'user', 'password'] if new_sec_service.get('ou') is not None: different_keys.append('ou') fake_vserver_client = mock.Mock() mock_get_vserver = self.mock_object( self.library, '_get_vserver', mock.Mock(return_value=[fake.VSERVER1, fake_vserver_client])) mock_check_update = self.mock_object( self.library, 'check_update_share_server_security_service', mock.Mock(return_value=True)) mock_setup_sec_serv = self.mock_object( self.library._client, 'setup_security_services') mock_diff_keys = self.mock_object( self.library, '_get_different_keys_for_equal_ss_type', mock.Mock(return_value=different_keys)) mock_dns_update = self.mock_object( fake_vserver_client, 'update_dns_configuration') mock_update_krealm = self.mock_object( fake_vserver_client, 'update_kerberos_realm') mock_modify_ad = self.mock_object( fake_vserver_client, 'modify_active_directory_security_service') self.library.update_share_server_security_service( fake_context, fake.SHARE_SERVER, fake_net_info, new_sec_service, current_security_service=curr_sec_service) dns_ips = set() domains = set() # we don't need to split and strip since we know that fake have only # on dns-ip and domain configured for ss in existing: if ss['type'] != new_sec_service['type']: dns_ips.add(ss['dns_ip']) domains.add(ss['domain']) dns_ips.add(new_sec_service['dns_ip']) domains.add(new_sec_service['domain']) mock_get_vserver.assert_called_once_with( share_server=fake.SHARE_SERVER) mock_check_update.assert_called_once_with( fake_context, fake.SHARE_SERVER, fake_net_info, new_sec_service, current_security_service=curr_sec_service) if curr_sec_service is None: mock_setup_sec_serv.assert_called_once_with( [new_sec_service], fake_vserver_client, fake.VSERVER1, False) else: mock_diff_keys.assert_called_once_with(curr_sec_service, new_sec_service) if different_keys: mock_dns_update.assert_called_once_with(dns_ips, domains) if new_type == 'kerberos': mock_update_krealm.assert_called_once_with(new_sec_service) elif new_type == 'active_directory': mock_modify_ad.assert_called_once_with( fake.VSERVER1, different_keys, new_sec_service, curr_sec_service) def test_update_share_server_security_service_check_error(self): curr_sec_service = copy.deepcopy(fake.CIFS_SECURITY_SERVICE) new_sec_service = copy.deepcopy(fake.CIFS_SECURITY_SERVICE_2) fake_vserver_client = mock.Mock() fake_context = mock.Mock() fake_net_info = mock.Mock() mock_get_vserver = self.mock_object( self.library, '_get_vserver', mock.Mock(return_value=[fake.VSERVER1, fake_vserver_client])) mock_check_update = self.mock_object( self.library, 'check_update_share_server_security_service', mock.Mock(return_value=False)) self.assertRaises( exception.NetAppException, self.library.update_share_server_security_service, fake_context, fake.SHARE_SERVER, fake_net_info, new_sec_service, current_security_service=curr_sec_service) mock_get_vserver.assert_called_once_with( share_server=fake.SHARE_SERVER) mock_check_update.assert_called_once_with( fake_context, fake.SHARE_SERVER, fake_net_info, new_sec_service, current_security_service=curr_sec_service) @ddt.data( {'new': fake.LDAP_AD_SECURITY_SERVICE, 'current': fake.LDAP_LINUX_SECURITY_SERVICE, 'expected': True}, {'new': fake.CIFS_SECURITY_SERVICE, 'current': fake.KERBEROS_SECURITY_SERVICE, 'expected': False}, {'new': fake.CIFS_SECURITY_SERVICE, 'current': fake.CIFS_SECURITY_SERVICE, 'expected': True}, {'new': fake.KERBEROS_SECURITY_SERVICE, 'current': fake.KERBEROS_SECURITY_SERVICE, 'expected': True}, {'new': fake.CIFS_SECURITY_SERVICE, 'current': None, 'expected': True}, ) @ddt.unpack def test_check_update_share_server_security_service(self, new, current, expected): result = self.library.check_update_share_server_security_service( None, None, None, new, current_security_service=current) self.assertEqual(expected, result) def test_check_update_share_server_network_allocations(self): net_alloc_seg_id = fake.USER_NETWORK_ALLOCATIONS[0]['segmentation_id'] network_segments = [ {'segmentation_id': net_alloc_seg_id}, {'segmentation_id': fake.SHARE_NETWORK_SUBNET['segmentation_id']} ] mock__validate_network_type = self.mock_object( self.library, '_validate_network_type') mock__validate_share_network_subnets = self.mock_object( self.library, '_validate_share_network_subnets') result = self.library.check_update_share_server_network_allocations( None, fake.SHARE_SERVER, fake.CURRENT_NETWORK_ALLOCATIONS, fake.SHARE_NETWORK_SUBNET, None, None, None) self.assertTrue(result) mock__validate_network_type.assert_called_once_with( [fake.SHARE_NETWORK_SUBNET]) mock__validate_share_network_subnets.assert_called_once_with( network_segments) def test_check_update_share_server_network_allocations_fail_on_type(self): network_exception = exception.NetworkBadConfigurationException( reason='fake exception message') mock_validate_network_type = self.mock_object( self.library, '_validate_network_type', mock.Mock(side_effect=network_exception)) mock_validate_share_network_subnets = self.mock_object( self.library, '_validate_share_network_subnets') result = self.library.check_update_share_server_network_allocations( None, fake.SHARE_SERVER, fake.CURRENT_NETWORK_ALLOCATIONS, fake.SHARE_NETWORK_SUBNET, None, None, None) self.assertFalse(result) mock_validate_network_type.assert_called_once_with( [fake.SHARE_NETWORK_SUBNET]) mock_validate_share_network_subnets.assert_not_called() def test_check_update_share_server_network_allocations_subnets_error(self): net_alloc_seg_id = fake.USER_NETWORK_ALLOCATIONS[0]['segmentation_id'] network_segments = [ {'segmentation_id': net_alloc_seg_id}, {'segmentation_id': fake.SHARE_NETWORK_SUBNET['segmentation_id']} ] network_exception = exception.NetworkBadConfigurationException( reason='fake exception message') mock__validate_network_type = self.mock_object( self.library, '_validate_network_type') mock__validate_share_network_subnets = self.mock_object( self.library, '_validate_share_network_subnets', mock.Mock(side_effect=network_exception)) result = self.library.check_update_share_server_network_allocations( None, fake.SHARE_SERVER, fake.CURRENT_NETWORK_ALLOCATIONS, fake.SHARE_NETWORK_SUBNET, None, None, None) self.assertFalse(result) mock__validate_network_type.assert_called_once_with( [fake.SHARE_NETWORK_SUBNET]) mock__validate_share_network_subnets.assert_called_once_with( network_segments) @ddt.data(True, False) def test_build_model_update(self, has_export_locations): server_model_update = copy.deepcopy(fake.SERVER_MODEL_UPDATE) export_locations = server_model_update['share_updates'] if not has_export_locations: export_locations = None del server_model_update['share_updates'] result = self.library._build_model_update( fake.CURRENT_NETWORK_ALLOCATIONS, fake.NEW_NETWORK_ALLOCATIONS, export_locations=export_locations) self.assertEqual(server_model_update, result) @ddt.data('active', 'dr') def test_update_share_server_network_allocations(self, replica_state): fake_context = mock.Mock() fake_share_server = fake.SHARE_SERVER fake_current_network_allocations = fake.USER_NETWORK_ALLOCATIONS fake_new_network_allocations = fake.USER_NETWORK_ALLOCATIONS fake_share_instances = [copy.deepcopy(fake.SHARE_INSTANCE)] fake_share_instances[0]['replica_state'] = replica_state fake_vserver_name = fake.VSERVER1 fake_vserver_client = mock.Mock() fake_ipspace_name = fake.IPSPACE fake_export_locations = fake.NFS_EXPORTS[0] fake_updates = fake.SERVER_MODEL_UPDATE fake_updated_export_locations = { fake_share_instances[0]['id']: fake_export_locations, } self.mock_object(self.library, '_get_vserver_name', mock.Mock(return_value=fake_vserver_name)) self.mock_object(self.library, '_get_api_client', mock.Mock(return_value=fake_vserver_client)) self.mock_object(self.library._client, 'get_vserver_ipspace', mock.Mock(return_value=fake_ipspace_name)) self.mock_object(self.library, '_setup_network_for_vserver') self.mock_object(self.library, '_create_export', mock.Mock(return_value=fake_export_locations)) self.mock_object(self.library, '_build_model_update', mock.Mock(return_value=fake_updates)) self.assertEqual( fake_updates, self.library.update_share_server_network_allocations( fake_context, fake_share_server, fake_current_network_allocations, fake_new_network_allocations, None, fake_share_instances, None)) self.library._get_vserver_name.assert_called_once_with( fake_share_server['id']) self.library._get_api_client.assert_called_once_with( vserver=fake_vserver_name) self.library._client.get_vserver_ipspace.assert_called_once_with( fake_vserver_name) self.library._setup_network_for_vserver.assert_called_once_with( fake_vserver_name, fake_vserver_client, [fake_new_network_allocations], fake_ipspace_name, enable_nfs=False, security_services=None, nfs_config=None) if replica_state == 'active': self.library._create_export.assert_called_once_with( fake_share_instances[0], fake_share_server, fake_vserver_name, fake_vserver_client, clear_current_export_policy=False, ensure_share_already_exists=True, share_host=fake_share_instances[0]['host']) else: self.library._create_export.assert_not_called() fake_updated_export_locations = {} self.library._build_model_update.assert_called_once_with( fake_current_network_allocations, fake_new_network_allocations, fake_updated_export_locations) def test_update_share_server_network_allocations_setup_network_fail(self): fake_context = mock.Mock() fake_share_server = fake.SHARE_SERVER fake_current_network_allocations = fake.USER_NETWORK_ALLOCATIONS fake_new_network_allocations = fake.USER_NETWORK_ALLOCATIONS fake_share_instances = [fake.SHARE_INSTANCE] fake_updates = fake.SERVER_MODEL_UPDATE self.mock_object(self.library, '_get_vserver_name') self.mock_object(self.library, '_get_api_client') self.mock_object(self.library._client, 'get_vserver_ipspace') self.mock_object(self.library, '_setup_network_for_vserver', mock.Mock(side_effect=netapp_api.NaApiError)) self.mock_object(self.library, '_build_model_update', mock.Mock(return_value=fake_updates)) self.assertRaises(netapp_api.NaApiError, self.library.update_share_server_network_allocations, fake_context, fake_share_server, fake_current_network_allocations, fake_new_network_allocations, None, fake_share_instances, None) self.library._build_model_update.assert_called_once_with( fake_current_network_allocations, fake_new_network_allocations, export_locations=None) def test__get_backup_vserver(self): mock_dest_client = mock.Mock() self.mock_object(self.library, '_get_backend', mock.Mock(return_value=fake.BACKEND_NAME)) self.mock_object(data_motion, 'get_backend_configuration', mock.Mock(return_value=_get_config())) self.mock_object(self.library, '_get_api_client_for_backend', mock.Mock(return_value=mock_dest_client)) self.mock_object(mock_dest_client, 'list_non_root_aggregates', mock.Mock(return_value=['aggr1', 'aggr2'])) self.mock_object(mock_dest_client, 'create_vserver', mock.Mock(side_effect=netapp_api.NaApiError( message='Vserver name is already used by another' ' Vserver'))) self.library._get_backup_vserver(fake.SHARE_BACKUP, fake.SHARE_SERVER) def test__delete_backup_vserver(self): mock_api_client = mock.Mock() self.mock_object(self.library, '_get_backend', mock.Mock(return_value=fake.BACKEND_NAME)) self.mock_object(self.library, '_get_api_client_for_backend', mock.Mock(return_value=mock_api_client)) des_vserver = fake.VSERVER2 msg = (f"Cannot delete Vserver. Vserver {des_vserver} " f"has shares.") self.mock_object(mock_api_client, 'delete_vserver', mock.Mock( side_effect=exception.NetAppException( message=msg))) self.library._delete_backup_vserver(fake.SHARE_BACKUP, des_vserver) def test__check_data_lif_count_limit_reached_for_ha_pair_false(self): nodes = ["node1", "node2"] lif_detail = [{'node': "node1", 'count-for-node': '44', 'limit-for-node': '512'}, {'node': "node2", 'count-for-node': '50', 'limit-for-node': '512'}] self.mock_object(self.client, 'get_storage_failover_partner', mock.Mock(return_value="node2")) self.mock_object(self.client, 'list_cluster_nodes', mock.Mock(return_value=nodes)) self.mock_object(self.client, 'get_data_lif_details_for_nodes', mock.Mock(return_value=lif_detail)) self.mock_object(self.client, 'get_migratable_data_lif_for_node', mock.Mock(return_value=["data_lif_1", "data_lif_2"])) self.library._check_data_lif_count_limit_reached_for_ha_pair( self.client) def test__check_data_lif_count_limit_reached_for_ha_pair_true(self): nodes = ["node1", "node2"] lif_detail = [{'node': "node1", 'count-for-node': '511', 'limit-for-node': '512'}, {'node': "node2", 'count-for-node': '250', 'limit-for-node': '512'}] self.mock_object(self.client, 'get_storage_failover_partner', mock.Mock(return_value="node2")) self.mock_object(self.client, 'list_cluster_nodes', mock.Mock(return_value=nodes)) self.mock_object(self.client, 'get_data_lif_details_for_nodes', mock.Mock(return_value=lif_detail)) self.mock_object(self.client, 'get_migratable_data_lif_for_node', mock.Mock(return_value=["data_lif_1", "data_lif_2"])) self.assertRaises( exception.NetAppException, self.library._check_data_lif_count_limit_reached_for_ha_pair, self.client, ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/netapp/dataontap/cluster_mode/test_lib_single_svm.py0000664000175000017500000003253000000000000032600 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Clinton Knight. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Unit tests for the NetApp Data ONTAP cDOT single-SVM storage driver library. """ from unittest import mock import ddt from oslo_log import log from manila import exception from manila.share.drivers.netapp.dataontap.cluster_mode import data_motion from manila.share.drivers.netapp.dataontap.cluster_mode import lib_base from manila.share.drivers.netapp.dataontap.cluster_mode import lib_single_svm from manila.share.drivers.netapp import utils as na_utils from manila import test from manila.tests.share.drivers.netapp.dataontap.cluster_mode.test_lib_base\ import _get_config import manila.tests.share.drivers.netapp.dataontap.fakes as fake @ddt.ddt class NetAppFileStorageLibraryTestCase(test.TestCase): def setUp(self): super(NetAppFileStorageLibraryTestCase, self).setUp() self.mock_object(na_utils, 'validate_driver_instantiation') # Mock loggers as themselves to allow logger arg validation mock_logger = log.getLogger('mock_logger') self.mock_object(lib_single_svm.LOG, 'info', mock.Mock(side_effect=mock_logger.info)) config = fake.get_config_cmode() config.netapp_vserver = fake.VSERVER1 kwargs = { 'configuration': config, 'private_storage': mock.Mock(), 'app_version': fake.APP_VERSION } self.library = lib_single_svm.NetAppCmodeSingleSVMFileStorageLibrary( fake.DRIVER_NAME, **kwargs) self.library._client = mock.Mock() self.client = self.library._client def test_init(self): self.assertEqual(fake.VSERVER1, self.library._vserver) def test_check_for_setup_error(self): self.library._client.vserver_exists.return_value = True self.library._have_cluster_creds = True mock_client = mock.Mock() mock_client.list_vserver_aggregates.return_value = fake.AGGREGATES self.mock_object(self.library, '_get_api_client', mock.Mock(return_value=mock_client)) mock_init_flexgroup = self.mock_object(self.library, '_initialize_flexgroup_pools') self.mock_object(self.library, 'is_flexvol_pool_configured', mock.Mock(return_value=True)) self.mock_object(self.library, '_find_matching_aggregates', mock.Mock(return_value=fake.AGGREGATES)) mock_super = self.mock_object(lib_base.NetAppCmodeFileStorageLibrary, 'check_for_setup_error') self.library.check_for_setup_error() self.assertTrue(lib_single_svm.LOG.info.called) mock_super.assert_called_once_with() mock_client.list_vserver_aggregates.assert_called_once_with() self.assertTrue(self.library._get_api_client.called) mock_init_flexgroup.assert_called_once_with(set(fake.AGGREGATES)) self.assertTrue(self.library.is_flexvol_pool_configured.called) self.assertTrue(self.library._find_matching_aggregates.called) def test_check_for_setup_error_no_vserver(self): self.library._vserver = None self.assertRaises(exception.InvalidInput, self.library.check_for_setup_error) def test_check_for_setup_error_vserver_not_found(self): self.library._client.vserver_exists.return_value = False self.assertRaises(exception.VserverNotFound, self.library.check_for_setup_error) def test_check_for_setup_error_cluster_creds_vserver_match(self): self.library._client.vserver_exists.return_value = True self.library._have_cluster_creds = False self.library._client.list_vservers.return_value = [fake.VSERVER1] mock_client = mock.Mock() mock_client.list_vserver_aggregates.return_value = fake.AGGREGATES self.mock_object(self.library, '_get_api_client', mock.Mock(return_value=mock_client)) mock_init_flexgroup = self.mock_object(self.library, '_initialize_flexgroup_pools') self.mock_object(self.library, 'is_flexvol_pool_configured', mock.Mock(return_value=True)) self.mock_object(self.library, '_find_matching_aggregates', mock.Mock(return_value=fake.AGGREGATES)) mock_super = self.mock_object(lib_base.NetAppCmodeFileStorageLibrary, 'check_for_setup_error') self.library.check_for_setup_error() mock_super.assert_called_once_with() mock_client.list_vserver_aggregates.assert_called_once_with() self.assertTrue(self.library._get_api_client.called) mock_init_flexgroup.assert_called_once_with(set(fake.AGGREGATES)) self.assertTrue(self.library.is_flexvol_pool_configured.called) self.assertTrue(self.library._find_matching_aggregates.called) def test_check_for_setup_error_no_aggregates_no_flexvol_pool(self): self.library._client.vserver_exists.return_value = True self.library._have_cluster_creds = True mock_client = mock.Mock() mock_client.list_vserver_aggregates.return_value = fake.AGGREGATES self.mock_object(self.library, '_get_api_client', mock.Mock(return_value=mock_client)) self.mock_object(self.library, '_initialize_flexgroup_pools') self.mock_object(self.library, 'is_flexvol_pool_configured', mock.Mock(return_value=False)) self.mock_object(self.library, '_find_matching_aggregates', mock.Mock(return_value=[])) self.library.check_for_setup_error() self.assertTrue(self.library.is_flexvol_pool_configured.called) self.assertTrue(self.library._find_matching_aggregates.called) def test_check_for_setup_error_cluster_creds_vserver_mismatch(self): self.library._client.vserver_exists.return_value = True self.library._have_cluster_creds = False self.library._client.list_vservers.return_value = [fake.VSERVER2] self.assertRaises(exception.InvalidInput, self.library.check_for_setup_error) def test_check_for_setup_error_no_aggregates(self): self.library._client.vserver_exists.return_value = True self.library._have_cluster_creds = True mock_client = mock.Mock() mock_client.list_vserver_aggregates.return_value = fake.AGGREGATES self.mock_object(self.library, '_get_api_client', mock.Mock(return_value=mock_client)) self.mock_object(self.library, '_initialize_flexgroup_pools') self.mock_object(self.library, 'is_flexvol_pool_configured', mock.Mock(return_value=True)) self.mock_object(self.library, '_find_matching_aggregates', mock.Mock(return_value=[])) self.assertRaises(exception.NetAppException, self.library.check_for_setup_error) self.assertTrue(self.library.is_flexvol_pool_configured.called) self.assertTrue(self.library._find_matching_aggregates.called) def test_get_vserver(self): self.library._client.vserver_exists.return_value = True self.mock_object(self.library, '_get_api_client', mock.Mock(return_value='fake_client')) result_vserver, result_vserver_client = self.library._get_vserver() self.assertEqual(fake.VSERVER1, result_vserver) self.assertEqual('fake_client', result_vserver_client) def test_get_vserver_share_server_specified(self): self.assertRaises(exception.InvalidParameterValue, self.library._get_vserver, share_server=fake.SHARE_SERVER) def test_get_vserver_no_vserver(self): self.library._vserver = None self.assertRaises(exception.InvalidInput, self.library._get_vserver) def test_get_vserver_vserver_not_found(self): self.library._client.vserver_exists.return_value = False self.assertRaises(exception.VserverNotFound, self.library._get_vserver) def test_get_ems_pool_info(self): self.mock_object(self.library, '_find_matching_aggregates', mock.Mock(return_value=['aggr1', 'aggr2'])) self.library._flexgroup_pools = {'fg': ['aggr1', 'aggr2']} result = self.library._get_ems_pool_info() expected = { 'pools': { 'vserver': fake.VSERVER1, 'aggregates': ['aggr1', 'aggr2'], 'flexgroup_aggregates': {'fg': ['aggr1', 'aggr2']}, }, } self.assertEqual(expected, result) @ddt.data(True, False) def test_handle_housekeeping_tasks_with_cluster_creds(self, have_creds): self.library._have_cluster_creds = have_creds mock_vserver_client = mock.Mock() self.mock_object(self.library, '_get_api_client', mock.Mock(return_value=mock_vserver_client)) mock_super = self.mock_object(lib_base.NetAppCmodeFileStorageLibrary, '_handle_housekeeping_tasks') self.library._handle_housekeeping_tasks() self.assertTrue( mock_vserver_client.prune_deleted_nfs_export_policies.called) self.assertTrue(mock_vserver_client.prune_deleted_snapshots.called) self.assertIs( have_creds, mock_vserver_client.remove_unused_qos_policy_groups.called) self.assertTrue(mock_super.called) @ddt.data(True, False) def test_find_matching_aggregates(self, have_cluster_creds): self.mock_object(self.library, 'is_flexvol_pool_configured', mock.Mock(return_value=True)) self.library._have_cluster_creds = have_cluster_creds aggregates = fake.AGGREGATES + fake.ROOT_AGGREGATES mock_vserver_client = mock.Mock() mock_vserver_client.list_vserver_aggregates.return_value = aggregates self.mock_object(self.library, '_get_api_client', mock.Mock(return_value=mock_vserver_client)) mock_client = mock.Mock() mock_client.list_root_aggregates.return_value = fake.ROOT_AGGREGATES self.library._client = mock_client self.library.configuration.netapp_aggregate_name_search_pattern = ( '.*_aggr_1') result = self.library._find_matching_aggregates() if have_cluster_creds: self.assertListEqual([fake.AGGREGATES[0]], result) mock_client.list_root_aggregates.assert_called_once_with() else: self.assertListEqual([fake.AGGREGATES[0], fake.ROOT_AGGREGATES[0]], result) self.assertFalse(mock_client.list_root_aggregates.called) def test_find_matching_aggregates_no_flexvol_pool(self): self.mock_object(self.library, 'is_flexvol_pool_configured', mock.Mock(return_value=False)) result = self.library._find_matching_aggregates() self.assertListEqual([], result) def test_get_network_allocations_number(self): self.assertEqual(0, self.library.get_network_allocations_number()) def test_get_admin_network_allocations_number(self): result = self.library.get_admin_network_allocations_number() self.assertEqual(0, result) def test__get_backup_vserver(self): self.mock_object(self.library, '_get_backend', mock.Mock(return_value=fake.BACKEND_NAME)) self.mock_object(data_motion, 'get_backend_configuration', mock.Mock(return_value=_get_config())) self.library._get_backup_vserver(fake.SHARE_BACKUP) def test__get_backup_vserver_with_share_server_negative(self): self.mock_object(self.library, '_get_backend', mock.Mock(return_value=fake.BACKEND_NAME)) self.mock_object(data_motion, 'get_backend_configuration', mock.Mock(return_value=_get_config())) self.assertRaises( exception.InvalidParameterValue, self.library._get_backup_vserver, fake.SHARE_BACKUP, fake.SHARE_SERVER, ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/netapp/dataontap/cluster_mode/test_performance.py0000664000175000017500000010351500000000000032107 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Clinton Knight # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import ddt from manila import exception from manila.share.drivers.netapp.dataontap.client import api as netapp_api from manila.share.drivers.netapp.dataontap.cluster_mode import performance from manila import test from manila.tests.share.drivers.netapp.dataontap import fakes as fake @ddt.ddt class PerformanceLibraryTestCase(test.TestCase): def setUp(self): super(PerformanceLibraryTestCase, self).setUp() with mock.patch.object(performance.PerformanceLibrary, '_init_counter_info'): self.zapi_client = mock.Mock() self.perf_library = performance.PerformanceLibrary( self.zapi_client) self.perf_library.system_object_name = 'system' self.perf_library.avg_processor_busy_base_counter_name = ( 'cpu_elapsed_time1') self._set_up_fake_pools() def _set_up_fake_pools(self): self.fake_volumes = { 'pool1': { 'netapp_aggregate': 'aggr1', }, 'pool2': { 'netapp_aggregate': 'aggr2', }, 'pool3': { 'netapp_aggregate': 'aggr2', }, } self.fake_aggregates = { 'pool4': { 'netapp_aggregate': 'aggr3', }, 'flexgroup_pool': { 'netapp_aggregate': 'aggr1 aggr2', 'netapp_flexgroup': True, }, } self.fake_aggr_names = ['aggr1', 'aggr2', 'aggr3'] self.fake_nodes = ['node1', 'node2'] self.fake_aggr_node_map = { 'aggr1': 'node1', 'aggr2': 'node2', 'aggr3': 'node2', } def _get_fake_counters(self): return { 'node1': list(range(11, 21)), 'node2': list(range(21, 31)), } def test_init(self): mock_zapi_client = mock.Mock() mock_init_counter_info = self.mock_object( performance.PerformanceLibrary, '_init_counter_info') library = performance.PerformanceLibrary(mock_zapi_client) self.assertEqual(mock_zapi_client, library.zapi_client) mock_init_counter_info.assert_called_once_with() def test_init_counter_info_not_supported(self): self.zapi_client.features.SYSTEM_METRICS = False self.zapi_client.features.SYSTEM_CONSTITUENT_METRICS = False mock_get_base_counter_name = self.mock_object( self.perf_library, '_get_base_counter_name') self.perf_library._init_counter_info() self.assertIsNone(self.perf_library.system_object_name) self.assertIsNone( self.perf_library.avg_processor_busy_base_counter_name) self.assertFalse(mock_get_base_counter_name.called) @ddt.data({ 'system_constituent': False, 'base_counter': 'cpu_elapsed_time1', }, { 'system_constituent': True, 'base_counter': 'cpu_elapsed_time', }) @ddt.unpack def test_init_counter_info_api_error(self, system_constituent, base_counter): self.zapi_client.features.SYSTEM_METRICS = True self.zapi_client.features.SYSTEM_CONSTITUENT_METRICS = ( system_constituent) self.mock_object(self.perf_library, '_get_base_counter_name', mock.Mock(side_effect=netapp_api.NaApiError)) self.perf_library._init_counter_info() self.assertEqual( base_counter, self.perf_library.avg_processor_busy_base_counter_name) def test_init_counter_info_system(self): self.zapi_client.features.SYSTEM_METRICS = True self.zapi_client.features.SYSTEM_CONSTITUENT_METRICS = False mock_get_base_counter_name = self.mock_object( self.perf_library, '_get_base_counter_name', mock.Mock(return_value='cpu_elapsed_time1')) self.perf_library._init_counter_info() self.assertEqual('system', self.perf_library.system_object_name) self.assertEqual( 'cpu_elapsed_time1', self.perf_library.avg_processor_busy_base_counter_name) mock_get_base_counter_name.assert_called_once_with( 'system', 'avg_processor_busy') def test_init_counter_info_system_constituent(self): self.zapi_client.features.SYSTEM_METRICS = False self.zapi_client.features.SYSTEM_CONSTITUENT_METRICS = True mock_get_base_counter_name = self.mock_object( self.perf_library, '_get_base_counter_name', mock.Mock(return_value='cpu_elapsed_time')) self.perf_library._init_counter_info() self.assertEqual('system:constituent', self.perf_library.system_object_name) self.assertEqual( 'cpu_elapsed_time', self.perf_library.avg_processor_busy_base_counter_name) mock_get_base_counter_name.assert_called_once_with( 'system:constituent', 'avg_processor_busy') def test_update_performance_cache(self): self.perf_library.performance_counters = self._get_fake_counters() mock_get_aggregates_for_pools = self.mock_object( self.perf_library, '_get_aggregates_for_pools', mock.Mock(return_value=self.fake_aggr_names)) mock_get_nodes_for_aggregates = self.mock_object( self.perf_library, '_get_nodes_for_aggregates', mock.Mock(return_value=(self.fake_nodes, self.fake_aggr_node_map))) mock_get_node_utilization_counters = self.mock_object( self.perf_library, '_get_node_utilization_counters', mock.Mock(side_effect=[21, 31])) mock_get_node_utilization = self.mock_object( self.perf_library, '_get_node_utilization', mock.Mock(side_effect=[25, 75])) self.perf_library.update_performance_cache(self.fake_volumes, self.fake_aggregates) expected_performance_counters = { 'node1': list(range(12, 22)), 'node2': list(range(22, 32)), } self.assertEqual(expected_performance_counters, self.perf_library.performance_counters) expected_pool_utilization = { 'pool1': 25, 'pool2': 75, 'pool3': 75, 'pool4': 75, 'flexgroup_pool': performance.DEFAULT_UTILIZATION, } self.assertEqual(expected_pool_utilization, self.perf_library.pool_utilization) mock_get_aggregates_for_pools.assert_called_once_with( self.fake_volumes, self.fake_aggregates) mock_get_nodes_for_aggregates.assert_called_once_with( self.fake_aggr_names) mock_get_node_utilization_counters.assert_has_calls([ mock.call('node1'), mock.call('node2')]) mock_get_node_utilization.assert_has_calls([ mock.call(12, 21, 'node1'), mock.call(22, 31, 'node2')]) def test_update_performance_cache_first_pass(self): mock_get_aggregates_for_pools = self.mock_object( self.perf_library, '_get_aggregates_for_pools', mock.Mock(return_value=self.fake_aggr_names)) mock_get_nodes_for_aggregates = self.mock_object( self.perf_library, '_get_nodes_for_aggregates', mock.Mock(return_value=(self.fake_nodes, self.fake_aggr_node_map))) mock_get_node_utilization_counters = self.mock_object( self.perf_library, '_get_node_utilization_counters', mock.Mock(side_effect=[11, 21])) mock_get_node_utilization = self.mock_object( self.perf_library, '_get_node_utilization', mock.Mock(side_effect=[25, 75])) self.perf_library.update_performance_cache(self.fake_volumes, self.fake_aggregates) expected_performance_counters = {'node1': [11], 'node2': [21]} self.assertEqual(expected_performance_counters, self.perf_library.performance_counters) expected_pool_utilization = { 'pool1': performance.DEFAULT_UTILIZATION, 'pool2': performance.DEFAULT_UTILIZATION, 'pool3': performance.DEFAULT_UTILIZATION, 'pool4': performance.DEFAULT_UTILIZATION, 'flexgroup_pool': performance.DEFAULT_UTILIZATION, } self.assertEqual(expected_pool_utilization, self.perf_library.pool_utilization) mock_get_aggregates_for_pools.assert_called_once_with( self.fake_volumes, self.fake_aggregates) mock_get_nodes_for_aggregates.assert_called_once_with( self.fake_aggr_names) mock_get_node_utilization_counters.assert_has_calls([ mock.call('node1'), mock.call('node2')]) self.assertFalse(mock_get_node_utilization.called) def test_update_performance_cache_unknown_nodes(self): self.perf_library.performance_counters = self._get_fake_counters() mock_get_aggregates_for_pools = self.mock_object( self.perf_library, '_get_aggregates_for_pools', mock.Mock(return_value=self.fake_aggr_names)) mock_get_nodes_for_aggregates = self.mock_object( self.perf_library, '_get_nodes_for_aggregates', mock.Mock(return_value=([], {}))) mock_get_node_utilization_counters = self.mock_object( self.perf_library, '_get_node_utilization_counters', mock.Mock(side_effect=[11, 21])) mock_get_node_utilization = self.mock_object( self.perf_library, '_get_node_utilization', mock.Mock(side_effect=[25, 75])) self.perf_library.update_performance_cache(self.fake_volumes, self.fake_aggregates) self.assertEqual(self._get_fake_counters(), self.perf_library.performance_counters) expected_pool_utilization = { 'pool1': performance.DEFAULT_UTILIZATION, 'pool2': performance.DEFAULT_UTILIZATION, 'pool3': performance.DEFAULT_UTILIZATION, 'pool4': performance.DEFAULT_UTILIZATION, 'flexgroup_pool': performance.DEFAULT_UTILIZATION, } self.assertEqual(expected_pool_utilization, self.perf_library.pool_utilization) mock_get_aggregates_for_pools.assert_called_once_with( self.fake_volumes, self.fake_aggregates) mock_get_nodes_for_aggregates.assert_called_once_with( self.fake_aggr_names) self.assertFalse(mock_get_node_utilization_counters.called) self.assertFalse(mock_get_node_utilization.called) def test_update_performance_cache_counters_unavailable(self): self.perf_library.performance_counters = self._get_fake_counters() mock_get_aggregates_for_pools = self.mock_object( self.perf_library, '_get_aggregates_for_pools', mock.Mock(return_value=self.fake_aggr_names)) mock_get_nodes_for_aggregates = self.mock_object( self.perf_library, '_get_nodes_for_aggregates', mock.Mock(return_value=(self.fake_nodes, self.fake_aggr_node_map))) mock_get_node_utilization_counters = self.mock_object( self.perf_library, '_get_node_utilization_counters', mock.Mock(side_effect=[None, None])) mock_get_node_utilization = self.mock_object( self.perf_library, '_get_node_utilization', mock.Mock(side_effect=[25, 75])) self.perf_library.update_performance_cache(self.fake_volumes, self.fake_aggregates) self.assertEqual(self._get_fake_counters(), self.perf_library.performance_counters) expected_pool_utilization = { 'pool1': performance.DEFAULT_UTILIZATION, 'pool2': performance.DEFAULT_UTILIZATION, 'pool3': performance.DEFAULT_UTILIZATION, 'pool4': performance.DEFAULT_UTILIZATION, 'flexgroup_pool': performance.DEFAULT_UTILIZATION, } self.assertEqual(expected_pool_utilization, self.perf_library.pool_utilization) mock_get_aggregates_for_pools.assert_called_once_with( self.fake_volumes, self.fake_aggregates) mock_get_nodes_for_aggregates.assert_called_once_with( self.fake_aggr_names) mock_get_node_utilization_counters.assert_has_calls([ mock.call('node1'), mock.call('node2')]) self.assertFalse(mock_get_node_utilization.called) def test_update_performance_cache_not_supported(self): self.zapi_client.features.SYSTEM_METRICS = False self.zapi_client.features.SYSTEM_CONSTITUENT_METRICS = False mock_get_aggregates_for_pools = self.mock_object( self.perf_library, '_get_aggregates_for_pools') self.perf_library.update_performance_cache(self.fake_volumes, self.fake_aggregates) expected_performance_counters = {} self.assertEqual(expected_performance_counters, self.perf_library.performance_counters) expected_pool_utilization = {} self.assertEqual(expected_pool_utilization, self.perf_library.pool_utilization) self.assertFalse(mock_get_aggregates_for_pools.called) @ddt.data({'pool': 'pool1', 'expected': 10.0}, {'pool': 'pool3', 'expected': performance.DEFAULT_UTILIZATION}) @ddt.unpack def test_get_node_utilization_for_pool(self, pool, expected): self.perf_library.pool_utilization = {'pool1': 10.0, 'pool2': 15.0} result = self.perf_library.get_node_utilization_for_pool(pool) self.assertAlmostEqual(expected, result) def test__update_for_failover(self): self.mock_object(self.perf_library, 'update_performance_cache') mock_client = mock.Mock(name='FAKE_ZAPI_CLIENT') self.perf_library.update_for_failover(mock_client, self.fake_volumes, self.fake_aggregates) self.assertEqual(mock_client, self.perf_library.zapi_client) self.perf_library.update_performance_cache.assert_called_once_with( self.fake_volumes, self.fake_aggregates) def test_get_aggregates_for_pools(self): result = self.perf_library._get_aggregates_for_pools( self.fake_volumes, self.fake_aggregates) expected_aggregate_names = ['aggr1', 'aggr2', 'aggr3'] self.assertEqual(sorted(expected_aggregate_names), sorted(result)) def test_get_nodes_for_aggregates(self): aggregate_names = ['aggr1', 'aggr2', 'aggr3'] aggregate_nodes = ['node1', 'node2', 'node2'] mock_get_node_for_aggregate = self.mock_object( self.zapi_client, 'get_node_for_aggregate', mock.Mock(side_effect=aggregate_nodes)) result = self.perf_library._get_nodes_for_aggregates(aggregate_names) self.assertEqual(2, len(result)) result_node_names, result_aggr_node_map = result expected_node_names = ['node1', 'node2'] expected_aggr_node_map = dict(zip(aggregate_names, aggregate_nodes)) self.assertEqual(sorted(expected_node_names), sorted(result_node_names)) self.assertEqual(expected_aggr_node_map, result_aggr_node_map) mock_get_node_for_aggregate.assert_has_calls([ mock.call('aggr1'), mock.call('aggr2'), mock.call('aggr3')]) def test_get_node_utilization_kahuna_overutilized(self): mock_get_kahuna_utilization = self.mock_object( self.perf_library, '_get_kahuna_utilization', mock.Mock(return_value=61.0)) mock_get_average_cpu_utilization = self.mock_object( self.perf_library, '_get_average_cpu_utilization', mock.Mock(return_value=25.0)) result = self.perf_library._get_node_utilization('fake1', 'fake2', 'fake_node') self.assertAlmostEqual(100.0, result) mock_get_kahuna_utilization.assert_called_once_with('fake1', 'fake2') self.assertFalse(mock_get_average_cpu_utilization.called) @ddt.data({'cpu': -0.01, 'cp_time': 10000, 'poll_time': 0}, {'cpu': 1.01, 'cp_time': 0, 'poll_time': 1000}, {'cpu': 0.50, 'cp_time': 0, 'poll_time': 0}) @ddt.unpack def test_get_node_utilization_zero_time(self, cpu, cp_time, poll_time): mock_get_kahuna_utilization = self.mock_object( self.perf_library, '_get_kahuna_utilization', mock.Mock(return_value=59.0)) mock_get_average_cpu_utilization = self.mock_object( self.perf_library, '_get_average_cpu_utilization', mock.Mock(return_value=cpu)) mock_get_total_consistency_point_time = self.mock_object( self.perf_library, '_get_total_consistency_point_time', mock.Mock(return_value=cp_time)) mock_get_consistency_point_p2_flush_time = self.mock_object( self.perf_library, '_get_consistency_point_p2_flush_time', mock.Mock(return_value=cp_time)) mock_get_total_time = self.mock_object( self.perf_library, '_get_total_time', mock.Mock(return_value=poll_time)) mock_get_adjusted_consistency_point_time = self.mock_object( self.perf_library, '_get_adjusted_consistency_point_time') result = self.perf_library._get_node_utilization('fake1', 'fake2', 'fake_node') expected = max(min(100.0, 100.0 * cpu), 0) self.assertEqual(expected, result) mock_get_kahuna_utilization.assert_called_once_with('fake1', 'fake2') mock_get_average_cpu_utilization.assert_called_once_with('fake1', 'fake2') mock_get_total_consistency_point_time.assert_called_once_with('fake1', 'fake2') mock_get_consistency_point_p2_flush_time.assert_called_once_with( 'fake1', 'fake2') mock_get_total_time.assert_called_once_with('fake1', 'fake2', 'total_cp_msecs') self.assertFalse(mock_get_adjusted_consistency_point_time.called) @ddt.data({'cpu': 0.75, 'adjusted_cp_time': 8000, 'expected': 80}, {'cpu': 0.80, 'adjusted_cp_time': 7500, 'expected': 80}, {'cpu': 0.50, 'adjusted_cp_time': 11000, 'expected': 100}) @ddt.unpack def test_get_node_utilization(self, cpu, adjusted_cp_time, expected): mock_get_kahuna_utilization = self.mock_object( self.perf_library, '_get_kahuna_utilization', mock.Mock(return_value=59.0)) mock_get_average_cpu_utilization = self.mock_object( self.perf_library, '_get_average_cpu_utilization', mock.Mock(return_value=cpu)) mock_get_total_consistency_point_time = self.mock_object( self.perf_library, '_get_total_consistency_point_time', mock.Mock(return_value=90.0)) mock_get_consistency_point_p2_flush_time = self.mock_object( self.perf_library, '_get_consistency_point_p2_flush_time', mock.Mock(return_value=50.0)) mock_get_total_time = self.mock_object( self.perf_library, '_get_total_time', mock.Mock(return_value=10000)) mock_get_adjusted_consistency_point_time = self.mock_object( self.perf_library, '_get_adjusted_consistency_point_time', mock.Mock(return_value=adjusted_cp_time)) result = self.perf_library._get_node_utilization('fake1', 'fake2', 'fake_node') self.assertEqual(expected, result) mock_get_kahuna_utilization.assert_called_once_with('fake1', 'fake2') mock_get_average_cpu_utilization.assert_called_once_with('fake1', 'fake2') mock_get_total_consistency_point_time.assert_called_once_with('fake1', 'fake2') mock_get_consistency_point_p2_flush_time.assert_called_once_with( 'fake1', 'fake2') mock_get_total_time.assert_called_once_with('fake1', 'fake2', 'total_cp_msecs') mock_get_adjusted_consistency_point_time.assert_called_once_with( 90.0, 50.0) def test_get_node_utilization_calculation_error(self): self.mock_object(self.perf_library, '_get_kahuna_utilization', mock.Mock(return_value=59.0)) self.mock_object(self.perf_library, '_get_average_cpu_utilization', mock.Mock(return_value=25.0)) self.mock_object(self.perf_library, '_get_total_consistency_point_time', mock.Mock(return_value=90.0)) self.mock_object(self.perf_library, '_get_consistency_point_p2_flush_time', mock.Mock(return_value=50.0)) self.mock_object(self.perf_library, '_get_total_time', mock.Mock(return_value=10000)) self.mock_object(self.perf_library, '_get_adjusted_consistency_point_time', mock.Mock(side_effect=ZeroDivisionError)) result = self.perf_library._get_node_utilization('fake1', 'fake2', 'fake_node') self.assertEqual(performance.DEFAULT_UTILIZATION, result) (self.perf_library._get_adjusted_consistency_point_time. assert_called_once_with(mock.ANY, mock.ANY)) def test_get_kahuna_utilization(self): mock_get_performance_counter = self.mock_object( self.perf_library, '_get_performance_counter_average_multi_instance', mock.Mock(return_value=[0.2, 0.3])) result = self.perf_library._get_kahuna_utilization('fake_t1', 'fake_t2') self.assertAlmostEqual(50.0, result) mock_get_performance_counter.assert_called_once_with( 'fake_t1', 'fake_t2', 'domain_busy:kahuna', 'processor_elapsed_time') def test_get_average_cpu_utilization(self): mock_get_performance_counter_average = self.mock_object( self.perf_library, '_get_performance_counter_average', mock.Mock(return_value=0.45)) result = self.perf_library._get_average_cpu_utilization('fake_t1', 'fake_t2') self.assertAlmostEqual(0.45, result) mock_get_performance_counter_average.assert_called_once_with( 'fake_t1', 'fake_t2', 'avg_processor_busy', 'cpu_elapsed_time1') def test_get_total_consistency_point_time(self): mock_get_performance_counter_delta = self.mock_object( self.perf_library, '_get_performance_counter_delta', mock.Mock(return_value=500)) result = self.perf_library._get_total_consistency_point_time( 'fake_t1', 'fake_t2') self.assertEqual(500, result) mock_get_performance_counter_delta.assert_called_once_with( 'fake_t1', 'fake_t2', 'total_cp_msecs') def test_get_consistency_point_p2_flush_time(self): mock_get_performance_counter_delta = self.mock_object( self.perf_library, '_get_performance_counter_delta', mock.Mock(return_value=500)) result = self.perf_library._get_consistency_point_p2_flush_time( 'fake_t1', 'fake_t2') self.assertEqual(500, result) mock_get_performance_counter_delta.assert_called_once_with( 'fake_t1', 'fake_t2', 'cp_phase_times:p2_flush') def test_get_total_time(self): mock_find_performance_counter_timestamp = self.mock_object( self.perf_library, '_find_performance_counter_timestamp', mock.Mock(side_effect=[100, 105])) result = self.perf_library._get_total_time('fake_t1', 'fake_t2', 'fake_counter') self.assertEqual(5000, result) mock_find_performance_counter_timestamp.assert_has_calls([ mock.call('fake_t1', 'fake_counter'), mock.call('fake_t2', 'fake_counter')]) def test_get_adjusted_consistency_point_time(self): result = self.perf_library._get_adjusted_consistency_point_time( 500, 200) self.assertAlmostEqual(360.0, result) def test_get_performance_counter_delta(self): result = self.perf_library._get_performance_counter_delta( fake.COUNTERS_T1, fake.COUNTERS_T2, 'total_cp_msecs') self.assertEqual(1482, result) def test_get_performance_counter_average(self): result = self.perf_library._get_performance_counter_average( fake.COUNTERS_T1, fake.COUNTERS_T2, 'domain_busy:kahuna', 'processor_elapsed_time', 'processor0') self.assertAlmostEqual(0.00281954360981, result) def test_get_performance_counter_average_multi_instance(self): result = ( self.perf_library._get_performance_counter_average_multi_instance( fake.COUNTERS_T1, fake.COUNTERS_T2, 'domain_busy:kahuna', 'processor_elapsed_time')) expected = [0.002819543609809441, 0.0033421611147606135] self.assertAlmostEqual(expected, result) def test_find_performance_counter_value(self): result = self.perf_library._find_performance_counter_value( fake.COUNTERS_T1, 'domain_busy:kahuna', instance_name='processor0') self.assertEqual('2712467226', result) def test_find_performance_counter_value_not_found(self): self.assertRaises( exception.NotFound, self.perf_library._find_performance_counter_value, fake.COUNTERS_T1, 'invalid', instance_name='processor0') def test_find_performance_counter_timestamp(self): result = self.perf_library._find_performance_counter_timestamp( fake.COUNTERS_T1, 'domain_busy') self.assertEqual('1453573777', result) def test_find_performance_counter_timestamp_not_found(self): self.assertRaises( exception.NotFound, self.perf_library._find_performance_counter_timestamp, fake.COUNTERS_T1, 'invalid', instance_name='processor0') def test_expand_performance_array(self): counter_info = { 'labels': ['idle', 'kahuna', 'storage', 'exempt'], 'name': 'domain_busy', } self.zapi_client.get_performance_counter_info = mock.Mock( return_value=counter_info) counter = { 'node-name': 'cluster1-01', 'instance-uuid': 'cluster1-01:kernel:processor0', 'domain_busy': '969142314286,2567571412,2131582146,5383861579', 'instance-name': 'processor0', 'timestamp': '1453512244', } self.perf_library._expand_performance_array('wafl', 'domain_busy', counter) modified_counter = { 'node-name': 'cluster1-01', 'instance-uuid': 'cluster1-01:kernel:processor0', 'domain_busy': '969142314286,2567571412,2131582146,5383861579', 'instance-name': 'processor0', 'timestamp': '1453512244', 'domain_busy:idle': '969142314286', 'domain_busy:kahuna': '2567571412', 'domain_busy:storage': '2131582146', 'domain_busy:exempt': '5383861579', } self.assertEqual(modified_counter, counter) def test_get_base_counter_name(self): counter_info = { 'base-counter': 'cpu_elapsed_time', 'labels': [], 'name': 'avg_processor_busy', } self.zapi_client.get_performance_counter_info = mock.Mock( return_value=counter_info) result = self.perf_library._get_base_counter_name( 'system:constituent', 'avg_processor_busy') self.assertEqual('cpu_elapsed_time', result) def test_get_node_utilization_counters(self): mock_get_node_utilization_system_counters = self.mock_object( self.perf_library, '_get_node_utilization_system_counters', mock.Mock(return_value=['A', 'B', 'C'])) mock_get_node_utilization_wafl_counters = self.mock_object( self.perf_library, '_get_node_utilization_wafl_counters', mock.Mock(return_value=['D', 'E', 'F'])) mock_get_node_utilization_processor_counters = self.mock_object( self.perf_library, '_get_node_utilization_processor_counters', mock.Mock(return_value=['G', 'H', 'I'])) result = self.perf_library._get_node_utilization_counters(fake.NODE) expected = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I'] self.assertEqual(expected, result) mock_get_node_utilization_system_counters.assert_called_once_with( fake.NODE) mock_get_node_utilization_wafl_counters.assert_called_once_with( fake.NODE) mock_get_node_utilization_processor_counters.assert_called_once_with( fake.NODE) def test_get_node_utilization_counters_api_error(self): self.mock_object(self.perf_library, '_get_node_utilization_system_counters', mock.Mock(side_effect=netapp_api.NaApiError)) result = self.perf_library._get_node_utilization_counters(fake.NODE) self.assertIsNone(result) def test_get_node_utilization_system_counters(self): mock_get_performance_instance_uuids = self.mock_object( self.zapi_client, 'get_performance_instance_uuids', mock.Mock(return_value=fake.SYSTEM_INSTANCE_UUIDS)) mock_get_performance_counters = self.mock_object( self.zapi_client, 'get_performance_counters', mock.Mock(return_value=fake.SYSTEM_COUNTERS)) result = self.perf_library._get_node_utilization_system_counters( fake.NODE) self.assertEqual(fake.SYSTEM_COUNTERS, result) mock_get_performance_instance_uuids.assert_called_once_with( 'system', fake.NODE) mock_get_performance_counters.assert_called_once_with( 'system', fake.SYSTEM_INSTANCE_UUIDS, ['avg_processor_busy', 'cpu_elapsed_time1', 'cpu_elapsed_time']) def test_get_node_utilization_wafl_counters(self): mock_get_performance_instance_uuids = self.mock_object( self.zapi_client, 'get_performance_instance_uuids', mock.Mock(return_value=fake.WAFL_INSTANCE_UUIDS)) mock_get_performance_counters = self.mock_object( self.zapi_client, 'get_performance_counters', mock.Mock(return_value=fake.WAFL_COUNTERS)) mock_get_performance_counter_info = self.mock_object( self.zapi_client, 'get_performance_counter_info', mock.Mock(return_value=fake.WAFL_CP_PHASE_TIMES_COUNTER_INFO)) result = self.perf_library._get_node_utilization_wafl_counters( fake.NODE) self.assertEqual(fake.EXPANDED_WAFL_COUNTERS, result) mock_get_performance_instance_uuids.assert_called_once_with( 'wafl', fake.NODE) mock_get_performance_counters.assert_called_once_with( 'wafl', fake.WAFL_INSTANCE_UUIDS, ['total_cp_msecs', 'cp_phase_times']) mock_get_performance_counter_info.assert_called_once_with( 'wafl', 'cp_phase_times') def test_get_node_utilization_processor_counters(self): mock_get_performance_instance_uuids = self.mock_object( self.zapi_client, 'get_performance_instance_uuids', mock.Mock(return_value=fake.PROCESSOR_INSTANCE_UUIDS)) mock_get_performance_counters = self.mock_object( self.zapi_client, 'get_performance_counters', mock.Mock(return_value=fake.PROCESSOR_COUNTERS)) self.mock_object( self.zapi_client, 'get_performance_counter_info', mock.Mock(return_value=fake.PROCESSOR_DOMAIN_BUSY_COUNTER_INFO)) result = self.perf_library._get_node_utilization_processor_counters( fake.NODE) self.assertEqual(fake.EXPANDED_PROCESSOR_COUNTERS, result) mock_get_performance_instance_uuids.assert_called_once_with( 'processor', fake.NODE) mock_get_performance_counters.assert_called_once_with( 'processor', fake.PROCESSOR_INSTANCE_UUIDS, ['domain_busy', 'processor_elapsed_time']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/netapp/dataontap/fakes.py0000664000175000017500000020127700000000000025157 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Clinton Knight All rights reserved. # Copyright (c) 2015 Tom Barron All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from oslo_serialization import jsonutils from manila.common import constants import manila.tests.share.drivers.netapp.fakes as na_fakes CLUSTER_NAME = 'fake_cluster' CLUSTER_NAME_2 = 'fake_cluster_2' BACKEND_NAME = 'fake_backend_name' BACKEND_NAME_2 = 'fake_backend_name_2' DRIVER_NAME = 'fake_driver_name' APP_VERSION = 'fake_app_vsersion' HOST_NAME = 'fake_host' POOL_NAME = 'fake_pool' FLEXGROUP_STYLE_EXTENDED = 'flexgroup' POOL_NAME_2 = 'fake_pool_2' VSERVER1 = 'fake_vserver_1' VSERVER2 = 'fake_vserver_2' LICENSES = ('base', 'cifs', 'fcp', 'flexclone', 'iscsi', 'nfs', 'snapmirror', 'snaprestore', 'snapvault') VOLUME_NAME_TEMPLATE = 'share_%(share_id)s' VSERVER_NAME_TEMPLATE = 'os_%s' AGGREGATE_NAME_SEARCH_PATTERN = '(.*)' SHARE_NAME = 'share_7cf7c200_d3af_4e05_b87e_9167c95dfcad' SHARE_NAME2 = 'share_d24e7257_124e_4fb6_b05b_d384f660bc85' SHARE_INSTANCE_NAME = 'share_d24e7257_124e_4fb6_b05b_d384f660bc85' FLEXVOL_NAME = 'fake_volume' FLEXVOL_NAME_1 = 'fake_volume_1' JUNCTION_PATH = '/%s' % FLEXVOL_NAME EXPORT_LOCATION = '%s:%s' % (HOST_NAME, JUNCTION_PATH) SNAPSHOT_NAME = 'fake_snapshot' SNAPSHOT_ACCESS_TIME = '1466455782' CONSISTENCY_GROUP_NAME = 'fake_consistency_group' SHARE_SIZE = 10 TENANT_ID = '24cb2448-13d8-4f41-afd9-eff5c4fd2a57' SHARE_ID = '7cf7c200-d3af-4e05-b87e-9167c95dfcad' SHARE_ID2 = 'b51c5a31-aa5b-4254-9ee8-7d39fa4c8c38' SHARE_ID3 = '1379991d-037b-4897-bf3a-81b4aac72eff' SHARE_ID4 = '1cb41aad-fd9b-4964-8059-646f69de925e' SHARE_ID5 = '2cb41aad-fd9b-4964-8059-646f69de925f' SHARE_INSTANCE_ID = 'd24e7257-124e-4fb6-b05b-d384f660bc85' PARENT_SHARE_ID = '585c3935-2aa9-437c-8bad-5abae1076555' SNAPSHOT_ID = 'de4c9050-e2f9-4ce1-ade4-5ed0c9f26451' CONSISTENCY_GROUP_ID = '65bfa2c9-dc6c-4513-951a-b8d15b453ad8' CONSISTENCY_GROUP_ID2 = '35f5c1ea-45fb-40c4-98ae-2a2a17554159' CG_SNAPSHOT_ID = '6ddd8a6b-5df7-417b-a2ae-3f6e449f4eea' CG_SNAPSHOT_MEMBER_ID1 = '629f79ef-b27e-4596-9737-30f084e5ba29' CG_SNAPSHOT_MEMBER_ID2 = 'e876aa9c-a322-4391-bd88-9266178262be' SERVER_ID = 'd5e90724-6f28-4944-858a-553138bdbd29' FREE_CAPACITY = 10000000000 TOTAL_CAPACITY = 20000000000 AGGREGATE = 'manila_aggr_1' AGGREGATES = ('manila_aggr_1', 'manila_aggr_2') AGGR_POOL_NAME = 'manila_aggr_1' FLEXGROUP_POOL_NAME = 'flexgroup_pool' ROOT_AGGREGATES = ('root_aggr_1', 'root_aggr_2') ROOT_VOLUME_AGGREGATE = 'manila1' SECURITY_CERT_DEFAULT_EXPIRE_DAYS = 365 SECURITY_CERT_LARGE_EXPIRE_DAYS = 3652 DELETE_RETENTION_HOURS = 12 ROOT_VOLUME = 'root' CLUSTER_NODE = 'cluster1_01' CLUSTER_NODES = ('cluster1_01', 'cluster1_02') NODE_DATA_PORT = 'e0c' NODE_DATA_PORTS = ('e0c', 'e0d') LIF_NAME_TEMPLATE = 'os_%(net_allocation_id)s' SHARE_TYPE_ID = '26e89a5b-960b-46bb-a8cf-0778e653098f' SHARE_TYPE_ID_2 = '2a06887e-25b5-486e-804a-d84c2d806feb' SHARE_TYPE_NAME = 'fake_share_type' IPSPACE = 'fake_ipspace' IPSPACE_ID = '27d38c27-3e8b-4d7d-9d91-fcf295e3ac8f' MTU = 1234 DEFAULT_MTU = 1500 MANILA_HOST_NAME = '%(host)s@%(backend)s#%(pool)s' % { 'host': HOST_NAME, 'backend': BACKEND_NAME, 'pool': POOL_NAME} MANILA_HOST_NAME_2 = '%(host)s@%(backend)s#%(pool)s' % { 'host': HOST_NAME, 'backend': BACKEND_NAME, 'pool': POOL_NAME_2} MANILA_HOST_NAME_3 = '%(host)s@%(backend)s#%(pool)s' % { 'host': HOST_NAME, 'backend': BACKEND_NAME_2, 'pool': POOL_NAME_2} MANILA_HOST_NAME_AGGR = '%(host)s@%(backend)s#%(pool)s' % { 'host': HOST_NAME, 'backend': BACKEND_NAME, 'pool': AGGR_POOL_NAME} MANILA_HOST_NAME_FLEXG_AGGR = '%(host)s@%(backend)s#%(pool)s' % { 'host': HOST_NAME, 'backend': BACKEND_NAME, 'pool': FLEXGROUP_POOL_NAME} SERVER_HOST = '%(host)s@%(backend)s' % { 'host': HOST_NAME, 'backend': BACKEND_NAME} SERVER_HOST_2 = '%(host)s@%(backend)s' % { 'host': HOST_NAME, 'backend': BACKEND_NAME_2} QOS_EXTRA_SPEC = 'netapp:maxiops' QOS_SIZE_DEPENDENT_EXTRA_SPEC = 'netapp:maxbpspergib' QOS_NORMALIZED_SPEC = 'maxiops' QOS_POLICY_GROUP_NAME = 'fake_qos_policy_group_name' FPOLICY_POLICY_NAME = 'fake_fpolicy_name' FPOLICY_EVENT_NAME = 'fake_fpolicy_event_name' FPOLICY_PROTOCOL = 'cifs' FPOLICY_FILE_OPERATIONS = 'create,write,rename' FPOLICY_FILE_OPERATIONS_LIST = ['create', 'write', 'rename'] FPOLICY_ENGINE = 'native' FPOLICY_EXT_TO_INCLUDE = 'avi' FPOLICY_EXT_TO_INCLUDE_LIST = ['avi'] FPOLICY_EXT_TO_EXCLUDE = 'jpg,mp3' FPOLICY_EXT_TO_EXCLUDE_LIST = ['jpg', 'mp3'] BACKUP_TYPE = "fake_backup_type" MOUNT_POINT_NAME = 'fake_mp' VOLUME_EFFICIENCY_POLICY_NAME = 'fake_volume_efficiency_policy' JOB_ID = '123' JOB_STATE = 'success' CLIENT_KWARGS = { 'username': 'admin', 'trace': False, 'hostname': '127.0.0.1', 'vserver': None, 'transport_type': 'https', 'ssl_cert_path': '/etc/ssl/certs/', 'password': 'pass', 'private_key_file': '/fake_private_key.pem', 'certificate_file': '/fake_certificate.pem', 'ca_certificate_file': '/fake_ca_certificate.crt', 'certificate_host_validation': False, 'port': '443', 'api_trace_pattern': '(.*)', } SHARE = { 'id': SHARE_ID, 'host': MANILA_HOST_NAME, 'project_id': TENANT_ID, 'name': SHARE_NAME, 'mount_point_name': MOUNT_POINT_NAME, 'size': SHARE_SIZE, 'share_proto': 'fake', 'share_type_id': 'fake_share_type_id', 'share_network_id': '5dfe0898-e2a1-4740-9177-81c7d26713b0', 'share_server_id': '7e6a2cc8-871f-4b1d-8364-5aad0f98da86', 'network_info': { 'network_allocations': [{'ip_address': 'ip'}] }, 'replica_state': constants.REPLICA_STATE_ACTIVE, 'status': constants.STATUS_AVAILABLE, 'share_server': None, 'encrypt': False, 'share_id': SHARE_ID, } SHARE_2 = { 'id': SHARE_ID, 'host': MANILA_HOST_NAME_AGGR, 'project_id': TENANT_ID, 'name': SHARE_NAME, 'size': SHARE_SIZE, 'share_proto': 'fake', 'share_type_id': 'fake_share_type_id', 'share_network_id': '5dfe0898-e2a1-4740-9177-81c7d26713b0', 'share_server_id': '7e6a2cc8-871f-4b1d-8364-5aad0f98da86', 'network_info': { 'network_allocations': [{'ip_address': 'ip'}] }, 'replica_state': constants.REPLICA_STATE_ACTIVE, 'status': constants.STATUS_AVAILABLE, 'share_server': None, 'encrypt': False, 'share_id': SHARE_ID, } SHARE_FLEXGROUP = { 'id': SHARE_ID, 'host': MANILA_HOST_NAME_FLEXG_AGGR, 'project_id': TENANT_ID, 'name': SHARE_NAME, 'size': SHARE_SIZE, 'share_proto': 'fake', 'share_type_id': 'fake_share_type_id', 'share_network_id': '5dfe0898-e2a1-4740-9177-81c7d26713b0', 'share_server_id': '7e6a2cc8-871f-4b1d-8364-5aad0f98da86', 'network_info': { 'network_allocations': [{'ip_address': 'ip'}] }, 'replica_state': constants.REPLICA_STATE_ACTIVE, 'status': constants.STATUS_AVAILABLE, 'share_server': None, 'encrypt': False, 'share_id': SHARE_ID, } SHARE_INSTANCE = { 'id': SHARE_INSTANCE_ID, 'share_id': SHARE_ID, 'host': MANILA_HOST_NAME, 'project_id': TENANT_ID, 'mount_point_name': MOUNT_POINT_NAME, 'name': SHARE_INSTANCE_NAME, 'size': SHARE_SIZE, 'share_proto': 'fake', 'share_type_id': SHARE_TYPE_ID, 'share_network_id': '5dfe0898-e2a1-4740-9177-81c7d26713b0', 'share_server_id': '7e6a2cc8-871f-4b1d-8364-5aad0f98da86', 'replica_state': constants.REPLICA_STATE_ACTIVE, 'status': constants.STATUS_AVAILABLE } SHARE_INSTANCE_WITH_ENCRYPTION = { 'id': SHARE_INSTANCE_ID, 'share_id': SHARE_ID, 'host': MANILA_HOST_NAME, 'project_id': TENANT_ID, 'mount_point_name': MOUNT_POINT_NAME, 'name': SHARE_INSTANCE_NAME, 'size': SHARE_SIZE, 'share_proto': 'fake', 'share_type_id': SHARE_TYPE_ID, 'share_network_id': '5dfe0898-e2a1-4740-9177-81c7d26713b0', 'share_server_id': '7e6a2cc8-871f-4b1d-8364-5aad0f98da86', 'replica_state': constants.REPLICA_STATE_ACTIVE, 'status': constants.STATUS_AVAILABLE, 'encryption_key_ref': 'fake_key_ref' } FLEXVOL_TO_MANAGE = { 'aggregate': POOL_NAME, 'junction-path': '/%s' % FLEXVOL_NAME, 'name': FLEXVOL_NAME, 'type': 'rw', 'style': 'flex', 'size': '1610612736', # rounds up to 2 GB 'style-extended': FLEXGROUP_STYLE_EXTENDED, } FLEXVOL_WITHOUT_QOS = copy.deepcopy(FLEXVOL_TO_MANAGE) FLEXVOL_WITHOUT_QOS.update({'qos-policy-group-name': None}) FLEXVOL_WITH_QOS = copy.deepcopy(FLEXVOL_TO_MANAGE) FLEXVOL_WITH_QOS.update({'qos-policy-group-name': QOS_POLICY_GROUP_NAME}) QOS_POLICY_GROUP = { 'policy-group': QOS_POLICY_GROUP_NAME, 'vserver': VSERVER1, 'max-throughput': '3000iops', 'num-workloads': 1, } FLEXVOL = { 'aggregate': POOL_NAME, 'junction-path': '/%s' % FLEXVOL_NAME, 'name': FLEXVOL_NAME, 'type': 'rw', 'style': 'flex', 'size': '1610612736', # rounds down to 1 GB, 'owning-vserver-name': VSERVER1, } SHARE_TYPE_EXTRA_SPEC = { 'snapshot_support': True, 'create_share_from_snapshot_support': True, 'revert_to_snapshot_support': True, 'mount_snapshot_support': False, 'driver_handles_share_servers': True, 'availability_zones': [], } EXTRA_SPEC = { 'netapp:thin_provisioned': 'true', 'netapp:snapshot_policy': 'default', 'netapp:language': 'en-US', 'netapp:dedup': 'True', 'netapp:compression': 'false', 'netapp:max_files': 5000, 'netapp:split_clone_on_create': 'true', 'netapp_disk_type': 'FCAL', 'netapp_raid_type': 'raid4', 'netapp_flexvol_encryption': 'true', 'netapp:tcp_max_xfer_size': 100, 'netapp:udp_max_xfer_size': 100, 'netapp:adaptive_qos_policy_group': None, 'netapp:snaplock_type': "compliance", 'netapp:snaplock_autocommit_period': '4months', 'netapp:snaplock_min_retention_period': '30minutes', 'netapp:snaplock_max_retention_period': '2years', 'netapp:snaplock_default_retention_period': '2months' } EXTRA_SPEC_WITH_REPLICATION = copy.copy(EXTRA_SPEC) EXTRA_SPEC_WITH_REPLICATION.update({ 'replication_type': 'dr' }) EXTRA_SPEC_WITH_FPOLICY = copy.copy(EXTRA_SPEC) EXTRA_SPEC_WITH_FPOLICY.update( {'fpolicy_extensions_to_include': FPOLICY_EXT_TO_INCLUDE, 'fpolicy_extensions_to_exclude': FPOLICY_EXT_TO_EXCLUDE, 'fpolicy_file_operations': FPOLICY_FILE_OPERATIONS}) NFS_CONFIG_DEFAULT = { 'tcp-max-xfer-size': 65536, 'udp-max-xfer-size': 32768, } NFS_CONFIG_TCP_MAX_DDT = { 'extra_specs': { 'netapp:tcp_max_xfer_size': 100, }, 'expected': { 'tcp-max-xfer-size': 100, 'udp-max-xfer-size': NFS_CONFIG_DEFAULT['udp-max-xfer-size'], } } NFS_CONFIG_UDP_MAX_DDT = { 'extra_specs': { 'netapp:udp_max_xfer_size': 100, }, 'expected': { 'tcp-max-xfer-size': NFS_CONFIG_DEFAULT['tcp-max-xfer-size'], 'udp-max-xfer-size': 100, } } NFS_CONFIG_TCP_UDP_MAX = { 'tcp-max-xfer-size': 100, 'udp-max-xfer-size': 100, } NFS_CONFIG_TCP_MAX = { 'tcp-max-xfer-size': 100, 'udp-max-xfer-size': NFS_CONFIG_DEFAULT['udp-max-xfer-size'], } NFS_CONFIG_UDP_MAX = { 'tcp-max-xfer-size': NFS_CONFIG_DEFAULT['tcp-max-xfer-size'], 'udp-max-xfer-size': 100, } NFS_CONFIG_TCP_UDP_MAX_DDT = { 'extra_specs': { 'netapp:tcp_max_xfer_size': 100, 'netapp:udp_max_xfer_size': 100, }, 'expected': NFS_CONFIG_TCP_UDP_MAX, } SHARE_GROUP_REF = { 'share_types': [ {'share_type_id': 'id1'}, {'share_type_id': 'id2'}, {'share_type_id': 'id3'}, ], } EXTRA_SPEC_WITH_QOS = copy.deepcopy(EXTRA_SPEC) EXTRA_SPEC_WITH_QOS.update({ 'qos': True, QOS_EXTRA_SPEC: '3000', }) EXTRA_SPEC_WITH_FPOLICY = copy.deepcopy(EXTRA_SPEC) EXTRA_SPEC_WITH_FPOLICY.update( {'netapp:fpolicy_extensions_to_include': FPOLICY_EXT_TO_INCLUDE, 'netapp:fpolicy_extensions_to_exclude': FPOLICY_EXT_TO_EXCLUDE, 'netapp:fpolicy_file_operations': FPOLICY_FILE_OPERATIONS}) EXTRA_SPEC_WITH_SIZE_DEPENDENT_QOS = copy.deepcopy(EXTRA_SPEC) EXTRA_SPEC_WITH_SIZE_DEPENDENT_QOS.update({ 'qos': True, QOS_SIZE_DEPENDENT_EXTRA_SPEC: '1000', }) PROVISIONING_OPTIONS = { 'thin_provisioned': True, 'snapshot_policy': 'default', 'language': 'en-US', 'dedup_enabled': True, 'compression_enabled': False, 'max_files': 5000, 'split': True, 'encrypt': False, 'hide_snapdir': False, 'adaptive_qos_policy_group': None, } PROVISIONING_OPTIONS_WITH_QOS = copy.deepcopy(PROVISIONING_OPTIONS) PROVISIONING_OPTIONS_WITH_QOS.update( {'qos_policy_group': QOS_POLICY_GROUP_NAME}) PROVISIONING_OPTS_WITH_ADAPT_QOS = copy.deepcopy(PROVISIONING_OPTIONS) PROVISIONING_OPTS_WITH_ADAPT_QOS.update( {'adaptive_qos_policy_group': QOS_POLICY_GROUP_NAME}) PROVISIONING_OPTIONS_WITH_FPOLICY = copy.deepcopy(PROVISIONING_OPTIONS) PROVISIONING_OPTIONS_WITH_FPOLICY.update( {'fpolicy_extensions_to_include': FPOLICY_EXT_TO_INCLUDE, 'fpolicy_extensions_to_exclude': FPOLICY_EXT_TO_EXCLUDE, 'fpolicy_file_operations': FPOLICY_FILE_OPERATIONS}) PROVISIONING_OPTIONS_INVALID_FPOLICY = copy.deepcopy(PROVISIONING_OPTIONS) PROVISIONING_OPTIONS_INVALID_FPOLICY.update( {'fpolicy_file_operations': FPOLICY_FILE_OPERATIONS}) PROVISIONING_OPTIONS_BOOLEAN = { 'thin_provisioned': True, 'dedup_enabled': False, 'compression_enabled': False, 'split': False, 'hide_snapdir': False, } PROVISIONING_OPTIONS_BOOLEAN_THIN_PROVISIONED_TRUE = { 'thin_provisioned': True, 'snapshot_policy': None, 'language': None, 'dedup_enabled': False, 'compression_enabled': False, 'max_files': None, 'split': False, 'encrypt': False, } PROVISIONING_OPTIONS_STRING = { 'snapshot_policy': 'default', 'language': 'en-US', 'max_files': 5000, 'max_files_multiplier': 4.2, 'adaptive_qos_policy_group': None, 'fpolicy_extensions_to_exclude': None, 'fpolicy_extensions_to_include': None, 'fpolicy_file_operations': None, 'efficiency_policy': None, 'snaplock_type': None, 'snaplock_autocommit_period': None, 'snaplock_min_retention_period': None, 'snaplock_max_retention_period': None, 'snaplock_default_retention_period': None, } PROVISIONING_OPTIONS_STRING_MISSING_SPECS = { 'snapshot_policy': 'default', 'language': 'en-US', 'max_files': None, 'max_files_multiplier': None, 'adaptive_qos_policy_group': None, 'fpolicy_extensions_to_exclude': None, 'fpolicy_extensions_to_include': None, 'fpolicy_file_operations': None, 'efficiency_policy': None, 'snaplock_type': None, 'snaplock_autocommit_period': None, 'snaplock_min_retention_period': None, 'snaplock_max_retention_period': None, 'snaplock_default_retention_period': None, } PROVISIONING_OPTIONS_STRING_DEFAULT = { 'snapshot_policy': None, 'language': None, 'max_files': None, 'max_files_multiplier': None, 'adaptive_qos_policy_group': None, 'fpolicy_extensions_to_exclude': None, 'fpolicy_extensions_to_include': None, 'fpolicy_file_operations': None, 'efficiency_policy': None, 'snaplock_type': None, 'snaplock_autocommit_period': None, 'snaplock_min_retention_period': None, 'snaplock_max_retention_period': None, 'snaplock_default_retention_period': None, } SHORT_BOOLEAN_EXTRA_SPEC = { 'netapp:thin_provisioned': 'true', } STRING_EXTRA_SPEC = { 'netapp:snapshot_policy': 'default', 'netapp:language': 'en-US', 'netapp:max_files': 5000, 'netapp:max_files_multiplier': 4.2, 'netapp:adaptive_qos_policy_group': None, 'netapp:efficiency_policy': None, } SHORT_STRING_EXTRA_SPEC = { 'netapp:snapshot_policy': 'default', 'netapp:language': 'en-US', 'netapp:adaptive_qos_policy_group': None, } INVALID_EXTRA_SPEC = { 'netapp:thin_provisioned': 'ture', 'netapp:snapshot_policy': 'wrong_default', 'netapp:language': 'abc', } INVALID_EXTRA_SPEC_COMBO = { 'netapp:dedup': 'false', 'netapp:compression': 'true' } INVALID_MAX_FILE_EXTRA_SPEC = { 'netapp:max_files': -1, } INVALID_TCP_MAX_XFER_SIZE_EXTRA_SPEC = { 'netapp:tcp_max_xfer_size': -1, } INVALID_UDP_MAX_XFER_SIZE_EXTRA_SPEC = { 'netapp:udp_max_xfer_size': -1, } EMPTY_EXTRA_SPEC = {} SHARE_TYPE = { 'id': SHARE_TYPE_ID, 'name': SHARE_TYPE_NAME, 'extra_specs': EXTRA_SPEC } OVERLAPPING_EXTRA_SPEC = { 'compression': ' True', 'netapp:compression': 'true', 'dedupe': ' True', 'netapp:dedup': 'false', 'thin_provisioning': ' False', 'netapp:thin_provisioned': 'true', } REMAPPED_OVERLAPPING_EXTRA_SPEC = { 'netapp:compression': 'true', 'netapp:dedup': 'true', 'netapp:thin_provisioned': 'false', } VOLUME_AUTOSIZE_ATTRS = { 'mode': 'off', 'grow-threshold-percent': '85', 'shrink-threshold-percent': '50', 'maximum-size': '1258288', 'minimum-size': '1048576', } USER_NETWORK_ALLOCATIONS = [ { 'id': '132dbb10-9a36-46f2-8d89-3d909830c356', 'ip_address': '10.10.10.10', 'cidr': '10.10.10.0/24', 'segmentation_id': '1000', 'network_type': 'vlan', 'label': 'user', 'mtu': MTU, 'gateway': '10.10.10.1', }, { 'id': '7eabdeed-bad2-46ea-bd0f-a33884c869e0', 'ip_address': '10.10.10.20', 'cidr': '10.10.10.0/24', 'segmentation_id': '1000', 'network_type': 'vlan', 'label': 'user', 'mtu': MTU, 'gateway': '10.10.10.1', } ] USER_NETWORK_ALLOCATIONS_IPV6 = [ { 'id': '234dbb10-9a36-46f2-8d89-3d909830c356', 'ip_address': 'fd68:1a09:66ab:8d51:0:10:0:1', 'cidr': 'fd68:1a09:66ab:8d51::/64', 'segmentation_id': '2000', 'network_type': 'vlan', 'label': 'user', 'mtu': MTU, 'gateway': 'fd68:1a09:66ab:8d51:0:0:0:1', }, { 'id': '6677deed-bad2-46ea-bd0f-a33884c869e0', 'ip_address': 'fd68:1a09:66ab:8d51:0:10:0:2', 'cidr': 'fd68:1a09:66ab:8d51::/64', 'segmentation_id': '2000', 'network_type': 'vlan', 'label': 'user', 'mtu': MTU, 'gateway': 'fd68:1a09:66ab:8d51:0:0:0:1', } ] ADMIN_NETWORK_ALLOCATIONS = [ { 'id': '132dbb10-9a36-46f2-8d89-3d909830c356', 'ip_address': '10.10.20.10', 'cidr': '10.10.20.0/24', 'segmentation_id': None, 'network_type': 'flat', 'label': 'admin', 'mtu': MTU, 'gateway': '10.10.20.1' }, ] NETWORK_INFO = { 'server_id': '56aafd02-4d44-43d7-b784-57fc88167224', 'security_services': ['fake_ldap', 'fake_kerberos', 'fake_ad', ], 'network_allocations': USER_NETWORK_ALLOCATIONS, 'admin_network_allocations': ADMIN_NETWORK_ALLOCATIONS, 'neutron_net_id': '4eff22ca-5ad2-454d-a000-aadfd7b40b39', 'neutron_subnet_id': '62bf1c2c-18eb-421b-8983-48a6d39aafe0', 'segmentation_id': '1000', 'network_type': 'vlan' } NETWORK_INFO_LIST = [NETWORK_INFO] NETWORK_INFO_NETMASK = '255.255.255.0' SHARE_SERVER = { 'id': 'fake_id', 'share_network_id': 'c5b3a865-56d0-4d88-abe5-879965e099c9', 'identifier': 'fake_id', 'backend_details': { 'vserver_name': VSERVER1 }, 'network_allocations': (USER_NETWORK_ALLOCATIONS + ADMIN_NETWORK_ALLOCATIONS), 'host': SERVER_HOST, 'share_network_subnets': [{ 'neutron_net_id': 'fake_neutron_net_id', 'neutron_subnet_id': 'fake_neutron_subnet_id' }] } SHARE_SERVER_2 = { 'id': 'fake_id_2', 'source_share_server_id': 'fake_src_share_server_id_2', 'share_network_id': 'c5b3a865-56d0-4d88-abe5-879965e099c9', 'backend_details': { 'vserver_name': VSERVER2 }, 'network_allocations': (USER_NETWORK_ALLOCATIONS + ADMIN_NETWORK_ALLOCATIONS), 'host': SERVER_HOST_2, 'share_network_subnets': [{ 'neutron_net_id': 'fake_neutron_net_id_2', 'neutron_subnet_id': 'fake_neutron_subnet_id_2' }] } VSERVER_INFO = { 'name': 'fake_vserver_name', 'subtype': 'default', 'operational_state': 'running', 'state': 'running', } SHARE_SERVER_NFS_TCP = { 'id': 'fake_nfs_id_tcp', 'backend_details': { 'vserver_name': VSERVER2, 'nfs_config': jsonutils.dumps(NFS_CONFIG_TCP_MAX), }, 'host': 'fake_host@fake_backend', } SHARE_SERVER_NFS_UDP = { 'id': 'fake_nfs_id_udp', 'backend_details': { 'vserver_name': VSERVER2, 'nfs_config': jsonutils.dumps(NFS_CONFIG_UDP_MAX), }, 'host': 'fake_host@fake_backend', } SHARE_SERVER_NFS_TCP_UDP = { 'id': 'fake_nfs_id_tcp_udp', 'backend_details': { 'vserver_name': VSERVER2, 'nfs_config': jsonutils.dumps(NFS_CONFIG_TCP_UDP_MAX), }, 'host': 'fake_host@fake_backend', } SHARE_SERVER_NO_NFS_NONE = { 'id': 'fake_no_nfs_id_none', 'backend_details': { 'vserver_name': VSERVER2, }, 'host': 'fake_host@fake_backend', } SHARE_SERVER_NO_DETAILS = { 'id': 'id_no_datails', 'host': 'fake_host@fake_backend', } SHARE_SERVER_NFS_DEFAULT = { 'id': 'fake_id_nfs_default', 'backend_details': { 'vserver_name': VSERVER2, 'nfs_config': jsonutils.dumps(NFS_CONFIG_DEFAULT), }, 'host': 'fake_host@fake_backend', } SHARE_SERVERS = [ SHARE_SERVER_NFS_TCP, SHARE_SERVER_NFS_UDP, SHARE_SERVER_NFS_TCP_UDP, SHARE_SERVER_NFS_DEFAULT, SHARE_SERVER_NO_NFS_NONE, SHARE_SERVER_NO_DETAILS, ] VSERVER_PEER = [{ 'vserver': VSERVER1, 'peer-vserver': VSERVER2, 'peer-state': 'peered', 'peer-cluster': 'fake_cluster' }] SNAPSHOT = { 'id': SNAPSHOT_ID, 'project_id': TENANT_ID, 'share_id': PARENT_SHARE_ID, 'status': constants.STATUS_CREATING, 'provider_location': None, } SNAPSHOT_TO_MANAGE = { 'id': SNAPSHOT_ID, 'project_id': TENANT_ID, 'share_id': PARENT_SHARE_ID, 'status': constants.STATUS_CREATING, 'provider_location': SNAPSHOT_NAME, } CDOT_SNAPSHOT = { 'name': SNAPSHOT_NAME, 'volume': SHARE_NAME, 'busy': False, 'owners': set(), 'access-time': SNAPSHOT_ACCESS_TIME, 'locked_by_clone': False, } CDOT_SNAPSHOT_BUSY_VOLUME_CLONE = { 'name': SNAPSHOT_NAME, 'volume': SHARE_NAME, 'busy': True, 'owners': {'volume clone'}, 'access-time': SNAPSHOT_ACCESS_TIME, 'locked_by_clone': True, } CDOT_SNAPSHOT_BUSY_SNAPMIRROR = { 'name': SNAPSHOT_NAME, 'volume': SHARE_NAME, 'busy': True, 'owners': {'snapmirror'}, 'access-time': SNAPSHOT_ACCESS_TIME, 'locked_by_clone': False, } CDOT_CLONE_CHILD_1 = 'fake_child_1' CDOT_CLONE_CHILD_2 = 'fake_child_2' CDOT_CLONE_CHILDREN = [ {'name': CDOT_CLONE_CHILD_1}, {'name': CDOT_CLONE_CHILD_2}, ] SHARE_FOR_CG1 = { 'id': SHARE_ID, 'host': '%(host)s@%(backend)s#%(pool)s' % { 'host': HOST_NAME, 'backend': BACKEND_NAME, 'pool': POOL_NAME}, 'name': 'share_1', 'share_proto': 'NFS', 'source_share_group_snapshot_member_id': None, } SHARE_FOR_CG2 = { 'id': SHARE_ID2, 'host': '%(host)s@%(backend)s#%(pool)s' % { 'host': HOST_NAME, 'backend': BACKEND_NAME, 'pool': POOL_NAME}, 'name': 'share_2', 'share_proto': 'NFS', 'source_share_group_snapshot_member_id': None, } # Clone dest of SHARE_FOR_CG1 SHARE_FOR_CG3 = { 'id': SHARE_ID3, 'host': '%(host)s@%(backend)s#%(pool)s' % { 'host': HOST_NAME, 'backend': BACKEND_NAME, 'pool': POOL_NAME}, 'name': 'share3', 'share_proto': 'NFS', 'source_share_group_snapshot_member_id': CG_SNAPSHOT_MEMBER_ID1, } # Clone dest of SHARE_FOR_CG2 SHARE_FOR_CG4 = { 'id': SHARE_ID4, 'host': '%(host)s@%(backend)s#%(pool)s' % { 'host': HOST_NAME, 'backend': BACKEND_NAME, 'pool': POOL_NAME}, 'name': 'share4', 'share_proto': 'NFS', 'source_share_group_snapshot_member_id': CG_SNAPSHOT_MEMBER_ID2, } EMPTY_CONSISTENCY_GROUP = { 'cgsnapshots': [], 'description': 'fake description', 'host': '%(host)s@%(backend)s' % { 'host': HOST_NAME, 'backend': BACKEND_NAME}, 'id': CONSISTENCY_GROUP_ID, 'name': CONSISTENCY_GROUP_NAME, 'shares': [], } CONSISTENCY_GROUP = { 'cgsnapshots': [], 'description': 'fake description', 'host': '%(host)s@%(backend)s' % { 'host': HOST_NAME, 'backend': BACKEND_NAME}, 'id': CONSISTENCY_GROUP_ID, 'name': CONSISTENCY_GROUP_NAME, 'shares': [SHARE_FOR_CG1, SHARE_FOR_CG2], } CONSISTENCY_GROUP_DEST = { 'cgsnapshots': [], 'description': 'fake description', 'host': '%(host)s@%(backend)s' % { 'host': HOST_NAME, 'backend': BACKEND_NAME}, 'id': CONSISTENCY_GROUP_ID, 'name': CONSISTENCY_GROUP_NAME, 'shares': [SHARE_FOR_CG3, SHARE_FOR_CG4], } CG_SNAPSHOT_MEMBER_1 = { 'cgsnapshot_id': CG_SNAPSHOT_ID, 'id': CG_SNAPSHOT_MEMBER_ID1, 'share_id': SHARE_ID, 'share_proto': 'NFS', 'size': SHARE_SIZE, } CG_SNAPSHOT_MEMBER_2 = { 'cgsnapshot_id': CG_SNAPSHOT_ID, 'id': CG_SNAPSHOT_MEMBER_ID2, 'share_id': SHARE_ID2, 'share_proto': 'NFS', 'size': SHARE_SIZE, } CG_SNAPSHOT = { 'share_group_snapshot_members': [CG_SNAPSHOT_MEMBER_1, CG_SNAPSHOT_MEMBER_2], 'share_group': CONSISTENCY_GROUP, 'share_group_id': CONSISTENCY_GROUP_ID, 'id': CG_SNAPSHOT_ID, 'project_id': TENANT_ID, } COLLATED_CGSNAPSHOT_INFO = [ { 'share': SHARE_FOR_CG3, 'snapshot': { 'share_id': SHARE_ID, 'id': CG_SNAPSHOT_ID, 'size': SHARE_SIZE, } }, { 'share': SHARE_FOR_CG4, 'snapshot': { 'share_id': SHARE_ID2, 'id': CG_SNAPSHOT_ID, 'size': SHARE_SIZE, } }, ] IDENTIFIER = 'c5b3a865-56d0-4d88-dke5-853465e099c9' LIF_NAMES = [] LIF_ADDRESSES = ['10.10.10.10', '10.10.10.20'] LIFS = ( {'address': LIF_ADDRESSES[0], 'administrative-status': 'up', 'home-node': CLUSTER_NODES[0], 'home-port': 'e0c', 'interface-name': 'os_132dbb10-9a36-46f2-8d89-3d909830c356', 'netmask': NETWORK_INFO_NETMASK, 'role': 'data', 'vserver': VSERVER1 }, {'address': LIF_ADDRESSES[1], 'administrative-status': 'up', 'home-node': CLUSTER_NODES[1], 'home-port': 'e0c', 'interface-name': 'os_7eabdeed-bad2-46ea-bd0f-a33884c869e0', 'netmask': NETWORK_INFO_NETMASK, 'role': 'data', 'vserver': VSERVER1 }, ) INTERFACE_ADDRESSES_WITH_METADATA = { LIF_ADDRESSES[0]: { 'is_admin_only': False, 'preferred': True, }, LIF_ADDRESSES[1]: { 'is_admin_only': True, 'preferred': False, }, } NFS_EXPORTS = [ { 'path': ':'.join([LIF_ADDRESSES[0], 'fake_export_path']), 'is_admin_only': False, 'metadata': { 'preferred': True, }, }, { 'path': ':'.join([LIF_ADDRESSES[1], 'fake_export_path']), 'is_admin_only': True, 'metadata': { 'preferred': False, }, }, ] SHARE_ACCESS = { 'access_type': 'user', 'access_to': [LIF_ADDRESSES[0]] } EMS_MESSAGE_0 = { 'computer-name': HOST_NAME, 'event-id': '0', 'event-source': 'Manila driver %s' % DRIVER_NAME, 'app-version': APP_VERSION, 'category': 'provisioning', 'event-description': 'OpenStack Manila connected to cluster node', 'log-level': '5', 'auto-support': 'false' } EMS_MESSAGE_1 = { 'computer-name': HOST_NAME, 'event-id': '1', 'event-source': 'Manila driver %s' % DRIVER_NAME, 'app-version': APP_VERSION, 'category': 'provisioning', 'event-description': '', 'log-level': '5', 'auto-support': 'false' } AGGREGATE_CAPACITIES = { AGGREGATES[0]: { 'available': 1181116007, # 1.1 GB 'total': 3543348020, # 3.3 GB 'used': 2362232013, # 2.2 GB }, AGGREGATES[1]: { 'available': 2147483648, # 2.0 GB 'total': 6442450944, # 6.0 GB 'used': 4294967296, # 4.0 GB } } FLEXGROUP_POOL_AGGR = [AGGREGATES[0], AGGREGATES[1]] FLEXGROUP_POOL_OPT = { FLEXGROUP_POOL_NAME: FLEXGROUP_POOL_AGGR, } FLEXGROUP_POOL_OPT_RAW = { FLEXGROUP_POOL_NAME: '%s %s' % (AGGREGATES[0], AGGREGATES[1]), } FLEXGROUP_POOL = { 'pool_name': FLEXGROUP_POOL_NAME, 'netapp_aggregate': '%s %s' % (AGGREGATES[0], AGGREGATES[1]), 'total_capacity_gb': 6.6, 'free_capacity_gb': 2.2, 'allocated_capacity_gb': 4.39, 'reserved_percentage': 5, 'max_over_subscription_ratio': 2.0, 'dedupe': [True, False], 'compression': [True, False], 'thin_provisioning': [True, False], 'netapp_flexvol_encryption': True, 'netapp_raid_type': 'raid4 raid_dp', 'netapp_disk_type': ['FCAL', 'SATA', 'SSD'], 'netapp_hybrid_aggregate': 'false true', 'utilization': 50.0, 'filter_function': FLEXGROUP_POOL_NAME, 'goodness_function': 'goodness', 'snapshot_support': True, 'create_share_from_snapshot_support': True, 'revert_to_snapshot_support': True, 'qos': True, 'security_service_update_support': True, 'netapp_flexgroup': True, 'netapp_cluster_name': 'fake_cluster_name', 'netapp_snaplock_type': ['compliance', 'enterprise'], } FLEXGROUP_AGGR_SET = set(FLEXGROUP_POOL_OPT[FLEXGROUP_POOL_NAME]) AGGREGATE_CAPACITIES_VSERVER_CREDS = { AGGREGATES[0]: { 'available': 1181116007, # 1.1 GB }, AGGREGATES[1]: { 'available': 2147483648, # 2.0 GB } } SSC_INFO = { AGGREGATES[0]: { 'netapp_raid_type': 'raid4', 'netapp_disk_type': 'FCAL', 'netapp_hybrid_aggregate': 'false', 'netapp_aggregate': AGGREGATES[0], 'netapp_flexgroup': False, 'netapp_snaplock_type': 'compliance', }, AGGREGATES[1]: { 'netapp_raid_type': 'raid_dp', 'netapp_disk_type': ['SATA', 'SSD'], 'netapp_hybrid_aggregate': 'true', 'netapp_aggregate': AGGREGATES[1], 'netapp_flexgroup': False, 'netapp_snaplock_type': 'enterprise', } } SSC_INFO_MAP = { AGGREGATES[0]: { 'netapp_raid_type': 'raid4', 'netapp_disk_type': ['FCAL'], 'netapp_hybrid_aggregate': 'false', 'netapp_snaplock_type': 'compliance', }, AGGREGATES[1]: { 'netapp_raid_type': 'raid_dp', 'netapp_disk_type': ['SATA', 'SSD'], 'netapp_hybrid_aggregate': 'true', 'netapp_snaplock_type': 'enterprise', } } SSC_INFO_VSERVER_CREDS = { AGGREGATES[0]: { 'netapp_aggregate': AGGREGATES[0], 'netapp_flexgroup': False, }, AGGREGATES[1]: { 'netapp_aggregate': AGGREGATES[1], 'netapp_flexgroup': False, } } POOLS = [ { 'pool_name': AGGREGATES[0], 'netapp_aggregate': AGGREGATES[0], 'total_capacity_gb': 3.3, 'free_capacity_gb': 1.1, 'allocated_capacity_gb': 2.2, 'reserved_percentage': 5, 'reserved_snapshot_percentage': 2, 'reserved_share_extend_percentage': 2, 'max_over_subscription_ratio': 2.0, 'dedupe': [True, False], 'compression': [True, False], 'thin_provisioning': [True, False], 'netapp_flexvol_encryption': True, 'netapp_raid_type': 'raid4', 'netapp_disk_type': 'FCAL', 'netapp_hybrid_aggregate': 'false', 'utilization': 30.0, 'filter_function': 'filter', 'goodness_function': 'goodness', 'mount_point_name_support': True, 'snapshot_support': True, 'create_share_from_snapshot_support': True, 'revert_to_snapshot_support': True, 'qos': True, 'security_service_update_support': True, 'share_server_multiple_subnet_support': True, 'netapp_flexgroup': False, 'netapp_cluster_name': 'fake_cluster_name', 'netapp_snaplock_type': 'compliance', 'share_replicas_migration_support': True, 'encryption_support': ['share_server'], }, { 'pool_name': AGGREGATES[1], 'netapp_aggregate': AGGREGATES[1], 'total_capacity_gb': 6.0, 'free_capacity_gb': 2.0, 'allocated_capacity_gb': 4.0, 'reserved_percentage': 5, 'reserved_snapshot_percentage': 2, 'reserved_share_extend_percentage': 2, 'max_over_subscription_ratio': 2.0, 'dedupe': [True, False], 'compression': [True, False], 'thin_provisioning': [True, False], 'netapp_flexvol_encryption': True, 'netapp_raid_type': 'raid_dp', 'netapp_disk_type': ['SATA', 'SSD'], 'netapp_hybrid_aggregate': 'true', 'utilization': 42.0, 'filter_function': 'filter', 'goodness_function': 'goodness', 'snapshot_support': True, 'create_share_from_snapshot_support': True, 'revert_to_snapshot_support': True, 'qos': True, 'security_service_update_support': True, 'share_server_multiple_subnet_support': True, 'netapp_flexgroup': False, 'netapp_cluster_name': 'fake_cluster_name', 'netapp_snaplock_type': 'compliance', 'share_replicas_migration_support': True, 'encryption_support': ['share_server'], }, ] POOLS_VSERVER_CREDS = [ { 'pool_name': AGGREGATES[0], 'filter_function': None, 'goodness_function': None, 'mount_point_name_support': True, 'netapp_cluster_name': '', 'netapp_aggregate': AGGREGATES[0], 'total_capacity_gb': 'unknown', 'free_capacity_gb': 1.1, 'allocated_capacity_gb': 0.0, 'reserved_percentage': 5, 'reserved_snapshot_percentage': 2, 'reserved_share_extend_percentage': 2, 'max_over_subscription_ratio': 2.0, 'dedupe': [True, False], 'compression': [True, False], 'thin_provisioning': [True, False], 'netapp_flexvol_encryption': True, 'utilization': 50.0, 'snapshot_support': True, 'create_share_from_snapshot_support': True, 'revert_to_snapshot_support': True, 'qos': False, 'security_service_update_support': True, 'share_server_multiple_subnet_support': True, 'netapp_flexgroup': False, 'share_replicas_migration_support': True, 'encryption_support': ['share_server'], }, { 'pool_name': AGGREGATES[1], 'netapp_aggregate': AGGREGATES[1], 'total_capacity_gb': 'unknown', 'free_capacity_gb': 2.0, 'allocated_capacity_gb': 0.0, 'reserved_percentage': 5, 'reserved_snapshot_percentage': 2, 'reserved_share_extend_percentage': 2, 'max_over_subscription_ratio': 2.0, 'dedupe': [True, False], 'compression': [True, False], 'thin_provisioning': [True, False], 'netapp_flexvol_encryption': True, 'utilization': 50.0, 'snapshot_support': True, 'create_share_from_snapshot_support': True, 'revert_to_snapshot_support': True, 'qos': False, 'security_service_update_support': True, 'share_server_multiple_subnet_support': True, 'netapp_flexgroup': False, 'share_replicas_migration_support': True, 'encryption_support': ['share_server'], }, ] SSC_AGGREGATES = [ { 'name': AGGREGATES[0], 'raid-type': 'raid4', 'is-hybrid': False, 'is-home': False, 'snaplock-type': 'compliance', }, { 'name': AGGREGATES[1], 'raid-type': 'raid_dp', 'is-hybrid': True, 'is-home': True, 'snaplock-type': 'enterprise', }, ] CLUSTER_INFO = { 'nodes': CLUSTER_NODES, 'nve_support': True, } SSC_DISK_TYPES = ['FCAL', ['SATA', 'SSD']] NODE = 'cluster1-01' COUNTERS_T1 = [ { 'node-name': 'cluster1-01', 'instance-uuid': 'cluster1-01:kernel:system', 'avg_processor_busy': '29078861388', 'instance-name': 'system', 'timestamp': '1453573776', }, { 'node-name': 'cluster1-01', 'instance-uuid': 'cluster1-01:kernel:system', 'cpu_elapsed_time': '1063283283681', 'instance-name': 'system', 'timestamp': '1453573776', }, { 'node-name': 'cluster1-01', 'instance-uuid': 'cluster1-01:kernel:system', 'cpu_elapsed_time1': '1063283283681', 'instance-name': 'system', 'timestamp': '1453573776', }, { 'cp_phase_times:p2a_snap': '714', 'cp_phase_times:p4_finish': '14897', 'cp_phase_times:setup': '581', 'cp_phase_times:p2a_dlog1': '6019', 'cp_phase_times:p2a_dlog2': '2328', 'cp_phase_times:p2v_cont': '2479', 'cp_phase_times:p2v_volinfo': '1138', 'cp_phase_times:p2v_bm': '3484', 'cp_phase_times:p2v_fsinfo': '2031', 'cp_phase_times:p2a_inofile': '356', 'cp_phase_times': '581,5007,1840,9832,498,0,839,799,1336,2031,0,377,' '427,1058,354,3484,5135,1460,1138,2479,356,1373' ',6019,9,2328,2257,229,493,1275,0,6059,714,530215,' '21603833,0,0,3286,11075940,22001,14897,36', 'cp_phase_times:p2v_dlog2': '377', 'instance-name': 'wafl', 'cp_phase_times:p3_wait': '0', 'cp_phase_times:p2a_bm': '6059', 'cp_phase_times:p1_quota': '498', 'cp_phase_times:p2v_inofile': '839', 'cp_phase_times:p2a_refcount': '493', 'cp_phase_times:p2a_fsinfo': '2257', 'cp_phase_times:p2a_hyabc': '0', 'cp_phase_times:p2a_volinfo': '530215', 'cp_phase_times:pre_p0': '5007', 'cp_phase_times:p2a_hya': '9', 'cp_phase_times:p0_snap_del': '1840', 'cp_phase_times:p2a_ino': '1373', 'cp_phase_times:p2v_df_scores_sub': '354', 'cp_phase_times:p2v_ino_pub': '799', 'cp_phase_times:p2a_ipu_bitmap_grow': '229', 'cp_phase_times:p2v_refcount': '427', 'timestamp': '1453573776', 'cp_phase_times:p2v_dlog1': '0', 'cp_phase_times:p2_finish': '0', 'cp_phase_times:p1_clean': '9832', 'node-name': 'cluster1-01', 'instance-uuid': 'cluster1-01:kernel:wafl', 'cp_phase_times:p3a_volinfo': '11075940', 'cp_phase_times:p2a_topaa': '1275', 'cp_phase_times:p2_flush': '21603833', 'cp_phase_times:p2v_df_scores': '1460', 'cp_phase_times:ipu_disk_add': '0', 'cp_phase_times:p2v_snap': '5135', 'cp_phase_times:p5_finish': '36', 'cp_phase_times:p2v_ino_pri': '1336', 'cp_phase_times:p3v_volinfo': '3286', 'cp_phase_times:p2v_topaa': '1058', 'cp_phase_times:p3_finish': '22001', }, { 'node-name': 'cluster1-01', 'instance-uuid': 'cluster1-01:kernel:wafl', 'total_cp_msecs': '33309624', 'instance-name': 'wafl', 'timestamp': '1453573776', }, { 'domain_busy:kahuna': '2712467226', 'timestamp': '1453573777', 'domain_busy:cifs': '434036', 'domain_busy:raid_exempt': '28', 'node-name': 'cluster1-01', 'instance-uuid': 'cluster1-01:kernel:processor0', 'domain_busy:target': '6460782', 'domain_busy:nwk_exempt': '20', 'domain_busy:raid': '722094140', 'domain_busy:storage': '2253156562', 'instance-name': 'processor0', 'domain_busy:cluster': '34', 'domain_busy:wafl_xcleaner': '51275254', 'domain_busy:wafl_exempt': '1243553699', 'domain_busy:protocol': '54', 'domain_busy': '1028851855595,2712467226,2253156562,5688808118,' '722094140,28,6460782,59,434036,1243553699,51275254,' '61237441,34,54,11,20,5254181873,13656398235,452215', 'domain_busy:nwk_legacy': '5254181873', 'domain_busy:dnscache': '59', 'domain_busy:exempt': '5688808118', 'domain_busy:hostos': '13656398235', 'domain_busy:sm_exempt': '61237441', 'domain_busy:nwk_exclusive': '11', 'domain_busy:idle': '1028851855595', 'domain_busy:ssan_exempt': '452215', }, { 'node-name': 'cluster1-01', 'instance-uuid': 'cluster1-01:kernel:processor0', 'processor_elapsed_time': '1063283843318', 'instance-name': 'processor0', 'timestamp': '1453573777', }, { 'domain_busy:kahuna': '1978024846', 'timestamp': '1453573777', 'domain_busy:cifs': '318584', 'domain_busy:raid_exempt': '0', 'node-name': 'cluster1-01', 'instance-uuid': 'cluster1-01:kernel:processor1', 'domain_busy:target': '3330956', 'domain_busy:nwk_exempt': '0', 'domain_busy:raid': '722235930', 'domain_busy:storage': '1498890708', 'instance-name': 'processor1', 'domain_busy:cluster': '0', 'domain_busy:wafl_xcleaner': '50122685', 'domain_busy:wafl_exempt': '1265921369', 'domain_busy:protocol': '0', 'domain_busy': '1039557880852,1978024846,1498890708,3734060289,' '722235930,0,3330956,0,318584,1265921369,50122685,' '36417362,0,0,0,0,2815252976,10274810484,393451', 'domain_busy:nwk_legacy': '2815252976', 'domain_busy:dnscache': '0', 'domain_busy:exempt': '3734060289', 'domain_busy:hostos': '10274810484', 'domain_busy:sm_exempt': '36417362', 'domain_busy:nwk_exclusive': '0', 'domain_busy:idle': '1039557880852', 'domain_busy:ssan_exempt': '393451', }, { 'node-name': 'cluster1-01', 'instance-uuid': 'cluster1-01:kernel:processor1', 'processor_elapsed_time': '1063283843321', 'instance-name': 'processor1', 'timestamp': '1453573777', } ] COUNTERS_T2 = [ { 'node-name': 'cluster1-01', 'instance-uuid': 'cluster1-01:kernel:system', 'avg_processor_busy': '29081228905', 'instance-name': 'system', 'timestamp': '1453573834', }, { 'node-name': 'cluster1-01', 'instance-uuid': 'cluster1-01:kernel:system', 'cpu_elapsed_time': '1063340792148', 'instance-name': 'system', 'timestamp': '1453573834', }, { 'node-name': 'cluster1-01', 'instance-uuid': 'cluster1-01:kernel:system', 'cpu_elapsed_time1': '1063340792148', 'instance-name': 'system', 'timestamp': '1453573834', }, { 'cp_phase_times:p2a_snap': '714', 'cp_phase_times:p4_finish': '14897', 'cp_phase_times:setup': '581', 'cp_phase_times:p2a_dlog1': '6019', 'cp_phase_times:p2a_dlog2': '2328', 'cp_phase_times:p2v_cont': '2479', 'cp_phase_times:p2v_volinfo': '1138', 'cp_phase_times:p2v_bm': '3484', 'cp_phase_times:p2v_fsinfo': '2031', 'cp_phase_times:p2a_inofile': '356', 'cp_phase_times': '581,5007,1840,9832,498,0,839,799,1336,2031,0,377,' '427,1058,354,3484,5135,1460,1138,2479,356,1373,' '6019,9,2328,2257,229,493,1275,0,6059,714,530215,' '21604863,0,0,3286,11076392,22001,14897,36', 'cp_phase_times:p2v_dlog2': '377', 'instance-name': 'wafl', 'cp_phase_times:p3_wait': '0', 'cp_phase_times:p2a_bm': '6059', 'cp_phase_times:p1_quota': '498', 'cp_phase_times:p2v_inofile': '839', 'cp_phase_times:p2a_refcount': '493', 'cp_phase_times:p2a_fsinfo': '2257', 'cp_phase_times:p2a_hyabc': '0', 'cp_phase_times:p2a_volinfo': '530215', 'cp_phase_times:pre_p0': '5007', 'cp_phase_times:p2a_hya': '9', 'cp_phase_times:p0_snap_del': '1840', 'cp_phase_times:p2a_ino': '1373', 'cp_phase_times:p2v_df_scores_sub': '354', 'cp_phase_times:p2v_ino_pub': '799', 'cp_phase_times:p2a_ipu_bitmap_grow': '229', 'cp_phase_times:p2v_refcount': '427', 'timestamp': '1453573834', 'cp_phase_times:p2v_dlog1': '0', 'cp_phase_times:p2_finish': '0', 'cp_phase_times:p1_clean': '9832', 'node-name': 'cluster1-01', 'instance-uuid': 'cluster1-01:kernel:wafl', 'cp_phase_times:p3a_volinfo': '11076392', 'cp_phase_times:p2a_topaa': '1275', 'cp_phase_times:p2_flush': '21604863', 'cp_phase_times:p2v_df_scores': '1460', 'cp_phase_times:ipu_disk_add': '0', 'cp_phase_times:p2v_snap': '5135', 'cp_phase_times:p5_finish': '36', 'cp_phase_times:p2v_ino_pri': '1336', 'cp_phase_times:p3v_volinfo': '3286', 'cp_phase_times:p2v_topaa': '1058', 'cp_phase_times:p3_finish': '22001', }, { 'node-name': 'cluster1-01', 'instance-uuid': 'cluster1-01:kernel:wafl', 'total_cp_msecs': '33311106', 'instance-name': 'wafl', 'timestamp': '1453573834', }, { 'domain_busy:kahuna': '2712629374', 'timestamp': '1453573834', 'domain_busy:cifs': '434036', 'domain_busy:raid_exempt': '28', 'node-name': 'cluster1-01', 'instance-uuid': 'cluster1-01:kernel:processor0', 'domain_busy:target': '6461082', 'domain_busy:nwk_exempt': '20', 'domain_busy:raid': '722136824', 'domain_busy:storage': '2253260824', 'instance-name': 'processor0', 'domain_busy:cluster': '34', 'domain_busy:wafl_xcleaner': '51277506', 'domain_busy:wafl_exempt': '1243637154', 'domain_busy:protocol': '54', 'domain_busy': '1028906640232,2712629374,2253260824,5689093500,' '722136824,28,6461082,59,434036,1243637154,51277506,' '61240335,34,54,11,20,5254491236,13657992139,452215', 'domain_busy:nwk_legacy': '5254491236', 'domain_busy:dnscache': '59', 'domain_busy:exempt': '5689093500', 'domain_busy:hostos': '13657992139', 'domain_busy:sm_exempt': '61240335', 'domain_busy:nwk_exclusive': '11', 'domain_busy:idle': '1028906640232', 'domain_busy:ssan_exempt': '452215', }, { 'node-name': 'cluster1-01', 'instance-uuid': 'cluster1-01:kernel:processor0', 'processor_elapsed_time': '1063341351916', 'instance-name': 'processor0', 'timestamp': '1453573834', }, { 'domain_busy:kahuna': '1978217049', 'timestamp': '1453573834', 'domain_busy:cifs': '318584', 'domain_busy:raid_exempt': '0', 'node-name': 'cluster1-01', 'instance-uuid': 'cluster1-01:kernel:processor1', 'domain_busy:target': '3331147', 'domain_busy:nwk_exempt': '0', 'domain_busy:raid': '722276805', 'domain_busy:storage': '1498984059', 'instance-name': 'processor1', 'domain_busy:cluster': '0', 'domain_busy:wafl_xcleaner': '50126176', 'domain_busy:wafl_exempt': '1266039846', 'domain_busy:protocol': '0', 'domain_busy': '1039613222253,1978217049,1498984059,3734279672,' '722276805,0,3331147,0,318584,1266039846,50126176,' '36419297,0,0,0,0,2815435865,10276068104,393451', 'domain_busy:nwk_legacy': '2815435865', 'domain_busy:dnscache': '0', 'domain_busy:exempt': '3734279672', 'domain_busy:hostos': '10276068104', 'domain_busy:sm_exempt': '36419297', 'domain_busy:nwk_exclusive': '0', 'domain_busy:idle': '1039613222253', 'domain_busy:ssan_exempt': '393451', }, { 'node-name': 'cluster1-01', 'instance-uuid': 'cluster1-01:kernel:processor1', 'processor_elapsed_time': '1063341351919', 'instance-name': 'processor1', 'timestamp': '1453573834', }, ] SYSTEM_INSTANCE_UUIDS = ['cluster1-01:kernel:system'] SYSTEM_INSTANCE_NAMES = ['system'] SYSTEM_COUNTERS = [ { 'node-name': 'cluster1-01', 'instance-uuid': 'cluster1-01:kernel:system', 'avg_processor_busy': '27877641199', 'instance-name': 'system', 'timestamp': '1453524928', }, { 'node-name': 'cluster1-01', 'instance-uuid': 'cluster1-01:kernel:system', 'cpu_elapsed_time': '1014438541279', 'instance-name': 'system', 'timestamp': '1453524928', }, { 'node-name': 'cluster1-01', 'instance-uuid': 'cluster1-01:kernel:system', 'cpu_elapsed_time1': '1014438541279', 'instance-name': 'system', 'timestamp': '1453524928', }, ] WAFL_INSTANCE_UUIDS = ['cluster1-01:kernel:wafl'] WAFL_INSTANCE_NAMES = ['wafl'] WAFL_COUNTERS = [ { 'cp_phase_times': '563,4844,1731,9676,469,0,821,763,1282,1937,0,359,' '418,1048,344,3344,4867,1397,1101,2380,356,1318,' '5954,9,2236,2190,228,476,1221,0,5838,696,515588,' '20542954,0,0,3122,10567367,20696,13982,36', 'node-name': 'cluster1-01', 'instance-uuid': 'cluster1-01:kernel:wafl', 'instance-name': 'wafl', 'timestamp': '1453523339', }, { 'node-name': 'cluster1-01', 'instance-uuid': 'cluster1-01:kernel:wafl', 'total_cp_msecs': '31721222', 'instance-name': 'wafl', 'timestamp': '1453523339', }, ] WAFL_CP_PHASE_TIMES_COUNTER_INFO = { 'labels': [ 'SETUP', 'PRE_P0', 'P0_SNAP_DEL', 'P1_CLEAN', 'P1_QUOTA', 'IPU_DISK_ADD', 'P2V_INOFILE', 'P2V_INO_PUB', 'P2V_INO_PRI', 'P2V_FSINFO', 'P2V_DLOG1', 'P2V_DLOG2', 'P2V_REFCOUNT', 'P2V_TOPAA', 'P2V_DF_SCORES_SUB', 'P2V_BM', 'P2V_SNAP', 'P2V_DF_SCORES', 'P2V_VOLINFO', 'P2V_CONT', 'P2A_INOFILE', 'P2A_INO', 'P2A_DLOG1', 'P2A_HYA', 'P2A_DLOG2', 'P2A_FSINFO', 'P2A_IPU_BITMAP_GROW', 'P2A_REFCOUNT', 'P2A_TOPAA', 'P2A_HYABC', 'P2A_BM', 'P2A_SNAP', 'P2A_VOLINFO', 'P2_FLUSH', 'P2_FINISH', 'P3_WAIT', 'P3V_VOLINFO', 'P3A_VOLINFO', 'P3_FINISH', 'P4_FINISH', 'P5_FINISH', ], 'name': 'cp_phase_times', } EXPANDED_WAFL_COUNTERS = [ { 'cp_phase_times:p2a_snap': '696', 'cp_phase_times:p4_finish': '13982', 'cp_phase_times:setup': '563', 'cp_phase_times:p2a_dlog1': '5954', 'cp_phase_times:p2a_dlog2': '2236', 'cp_phase_times:p2v_cont': '2380', 'cp_phase_times:p2v_volinfo': '1101', 'cp_phase_times:p2v_bm': '3344', 'cp_phase_times:p2v_fsinfo': '1937', 'cp_phase_times:p2a_inofile': '356', 'cp_phase_times': '563,4844,1731,9676,469,0,821,763,1282,1937,0,359,' '418,1048,344,3344,4867,1397,1101,2380,356,1318,' '5954,9,2236,2190,228,476,1221,0,5838,696,515588,' '20542954,0,0,3122,10567367,20696,13982,36', 'cp_phase_times:p2v_dlog2': '359', 'instance-name': 'wafl', 'cp_phase_times:p3_wait': '0', 'cp_phase_times:p2a_bm': '5838', 'cp_phase_times:p1_quota': '469', 'cp_phase_times:p2v_inofile': '821', 'cp_phase_times:p2a_refcount': '476', 'cp_phase_times:p2a_fsinfo': '2190', 'cp_phase_times:p2a_hyabc': '0', 'cp_phase_times:p2a_volinfo': '515588', 'cp_phase_times:pre_p0': '4844', 'cp_phase_times:p2a_hya': '9', 'cp_phase_times:p0_snap_del': '1731', 'cp_phase_times:p2a_ino': '1318', 'cp_phase_times:p2v_df_scores_sub': '344', 'cp_phase_times:p2v_ino_pub': '763', 'cp_phase_times:p2a_ipu_bitmap_grow': '228', 'cp_phase_times:p2v_refcount': '418', 'timestamp': '1453523339', 'cp_phase_times:p2v_dlog1': '0', 'cp_phase_times:p2_finish': '0', 'cp_phase_times:p1_clean': '9676', 'node-name': 'cluster1-01', 'instance-uuid': 'cluster1-01:kernel:wafl', 'cp_phase_times:p3a_volinfo': '10567367', 'cp_phase_times:p2a_topaa': '1221', 'cp_phase_times:p2_flush': '20542954', 'cp_phase_times:p2v_df_scores': '1397', 'cp_phase_times:ipu_disk_add': '0', 'cp_phase_times:p2v_snap': '4867', 'cp_phase_times:p5_finish': '36', 'cp_phase_times:p2v_ino_pri': '1282', 'cp_phase_times:p3v_volinfo': '3122', 'cp_phase_times:p2v_topaa': '1048', 'cp_phase_times:p3_finish': '20696', }, { 'node-name': 'cluster1-01', 'instance-uuid': 'cluster1-01:kernel:wafl', 'total_cp_msecs': '31721222', 'instance-name': 'wafl', 'timestamp': '1453523339', }, ] PROCESSOR_INSTANCE_UUIDS = [ 'cluster1-01:kernel:processor0', 'cluster1-01:kernel:processor1', ] PROCESSOR_INSTANCE_NAMES = ['processor0', 'processor1'] SERVER_METADATA = { 'share_type_id': 'fake_id', 'host': 'fake_host', } SERVER_METADATA_WITH_ENCRYPTION = { 'share_type_id': 'fake_id', 'host': 'fake_host', 'encryption_key_ref': 'fake_key', 'keystone_url': 'fake_keystone_url', 'application_credential_id': 'fake_app_cred_id', 'application_credential_secret': 'fake_app_cred_secret', 'request_host': 'fake_host@pool' } PROCESSOR_COUNTERS = [ { 'node-name': 'cluster1-01', 'instance-uuid': 'cluster1-01:kernel:processor0', 'domain_busy': '980648687811,2597164534,2155400686,5443901498,' '690280568,28,6180773,59,413895,1190100947,48989575,' '58549809,34,54,11,20,5024141791,13136260754,452215', 'instance-name': 'processor0', 'timestamp': '1453524150', }, { 'node-name': 'cluster1-01', 'instance-uuid': 'cluster1-01:kernel:processor0', 'processor_elapsed_time': '1013660714257', 'instance-name': 'processor0', 'timestamp': '1453524150', }, { 'node-name': 'cluster1-01', 'instance-uuid': 'cluster1-01:kernel:processor1', 'domain_busy': '990957980543,1891766637,1433411516,3572427934,' '691372324,0,3188648,0,305947,1211235777,47954620,' '34832715,0,0,0,0,2692084482,9834648927,393451', 'instance-name': 'processor1', 'timestamp': '1453524150', }, { 'node-name': 'cluster1-01', 'instance-uuid': 'cluster1-01:kernel:processor1', 'processor_elapsed_time': '1013660714261', 'instance-name': 'processor1', 'timestamp': '1453524150', }, ] PROCESSOR_DOMAIN_BUSY_COUNTER_INFO = { 'labels': [ 'idle', 'kahuna', 'storage', 'exempt', 'raid', 'raid_exempt', 'target', 'dnscache', 'cifs', 'wafl_exempt', 'wafl_xcleaner', 'sm_exempt', 'cluster', 'protocol', 'nwk_exclusive', 'nwk_exempt', 'nwk_legacy', 'hostOS', 'ssan_exempt', ], 'name': 'domain_busy', } EXPANDED_PROCESSOR_COUNTERS = [ { 'domain_busy:kahuna': '2597164534', 'timestamp': '1453524150', 'domain_busy:cifs': '413895', 'domain_busy:raid_exempt': '28', 'node-name': 'cluster1-01', 'instance-uuid': 'cluster1-01:kernel:processor0', 'domain_busy:target': '6180773', 'domain_busy:nwk_exempt': '20', 'domain_busy:raid': '690280568', 'domain_busy:storage': '2155400686', 'instance-name': 'processor0', 'domain_busy:cluster': '34', 'domain_busy:wafl_xcleaner': '48989575', 'domain_busy:wafl_exempt': '1190100947', 'domain_busy:protocol': '54', 'domain_busy': '980648687811,2597164534,2155400686,5443901498,' '690280568,28,6180773,59,413895,1190100947,48989575,' '58549809,34,54,11,20,5024141791,13136260754,452215', 'domain_busy:nwk_legacy': '5024141791', 'domain_busy:dnscache': '59', 'domain_busy:exempt': '5443901498', 'domain_busy:hostos': '13136260754', 'domain_busy:sm_exempt': '58549809', 'domain_busy:nwk_exclusive': '11', 'domain_busy:idle': '980648687811', 'domain_busy:ssan_exempt': '452215', }, { 'node-name': 'cluster1-01', 'instance-uuid': 'cluster1-01:kernel:processor0', 'processor_elapsed_time': '1013660714257', 'instance-name': 'processor0', 'timestamp': '1453524150', }, { 'domain_busy:kahuna': '1891766637', 'timestamp': '1453524150', 'domain_busy:cifs': '305947', 'domain_busy:raid_exempt': '0', 'node-name': 'cluster1-01', 'instance-uuid': 'cluster1-01:kernel:processor1', 'domain_busy:target': '3188648', 'domain_busy:nwk_exempt': '0', 'domain_busy:raid': '691372324', 'domain_busy:storage': '1433411516', 'instance-name': 'processor1', 'domain_busy:cluster': '0', 'domain_busy:wafl_xcleaner': '47954620', 'domain_busy:wafl_exempt': '1211235777', 'domain_busy:protocol': '0', 'domain_busy': '990957980543,1891766637,1433411516,3572427934,' '691372324,0,3188648,0,305947,1211235777,47954620,' '34832715,0,0,0,0,2692084482,9834648927,393451', 'domain_busy:nwk_legacy': '2692084482', 'domain_busy:dnscache': '0', 'domain_busy:exempt': '3572427934', 'domain_busy:hostos': '9834648927', 'domain_busy:sm_exempt': '34832715', 'domain_busy:nwk_exclusive': '0', 'domain_busy:idle': '990957980543', 'domain_busy:ssan_exempt': '393451', }, { 'node-name': 'cluster1-01', 'instance-uuid': 'cluster1-01:kernel:processor1', 'processor_elapsed_time': '1013660714261', 'instance-name': 'processor1', 'timestamp': '1453524150', }, ] SERVER_MIGRATION_CHECK_NOT_COMPATIBLE = { 'compatible': False, 'writable': None, 'nondisruptive': None, 'preserve_snapshots': None, 'migration_cancel': None, 'migration_get_progress': None, 'share_network_id': None, } CIFS_SECURITY_SERVICE = { 'id': 'fake_id', 'type': 'active_directory', 'password': 'fake_password', 'user': 'fake_user', 'ou': 'fake_ou', 'domain': 'fake_domain', 'dns_ip': 'fake_dns_ip', 'server': 'fake_server', 'default_ad_site': None, } CIFS_SECURITY_SERVICE_2 = { 'id': 'fake_id_2', 'type': 'active_directory', 'password': 'fake_password_2', 'user': 'fake_user_2', 'ou': 'fake_ou_2', 'domain': 'fake_domain_2', 'dns_ip': 'fake_dns_ip_2', 'server': 'fake_server_2', 'default_ad_site': None, } CIFS_SECURITY_SERVICE_3 = { 'id': 'fake_id_3', 'type': 'active_directory', 'password': 'fake_password_3', 'user': 'fake_user_3', 'ou': 'fake_ou_3', 'domain': 'fake_domain_3', 'dns_ip': 'fake_dns_ip_3', 'default_ad_site': 'fake_default_ad_site', 'server': None } LDAP_LINUX_SECURITY_SERVICE = { 'id': 'fake_id', 'type': 'ldap', 'user': 'fake_user', 'password': 'fake_password', 'server': 'fake_server', 'ou': 'fake_ou', 'dns_ip': None, 'domain': None } LDAP_AD_SECURITY_SERVICE = { 'id': 'fake_id', 'type': 'ldap', 'user': 'fake_user', 'password': 'fake_password', 'domain': 'fake_domain', 'ou': 'fake_ou', 'dns_ip': 'fake_dns_ip', 'server': None, } KERBEROS_SECURITY_SERVICE = { 'id': 'fake_id_3', 'type': 'kerberos', 'password': 'fake_password_3', 'user': 'fake_user_3', 'domain': 'fake_realm', 'dns_ip': 'fake_dns_ip_3', 'server': 'fake_server_3', } KERBEROS_SECURITY_SERVICE_2 = { 'id': 'fake_id_4', 'type': 'kerberos', 'password': 'fake_password_4', 'user': 'fake_user_4', 'domain': 'fake_realm_2', 'dns_ip': 'fake_dns_ip_4', 'server': 'fake_server_4', } SHARE_NETWORK_SUBNET = { 'id': 'fake_share_net_subnet_d', 'neutron_subnet_id': '34950f50-a142-4328-8026-418ad4410b09', 'neutron_net_id': 'fa202676-531a-4446-bc0c-bcec15a72e82', 'network_type': 'fake_network_type', 'segmentation_id': 1234, 'ip_version': 4, 'cidr': 'fake_cidr', 'gateway': 'fake_gateway', 'mtu': 1509, } SHARE_NETWORK = { 'id': 'fake_share_net_id', 'project_id': 'fake_project_id', 'status': 'fake_status', 'name': 'fake_name', 'description': 'fake_description', 'security_services': [CIFS_SECURITY_SERVICE], 'share_network_subnets': [SHARE_NETWORK_SUBNET], } SHARE_TYPE_2 = copy.deepcopy(SHARE_TYPE) SHARE_TYPE_2['id'] = SHARE_TYPE_ID_2 SHARE_TYPE_2['extra_specs'].update(SHARE_TYPE_EXTRA_SPEC) SHARE_REQ_SPEC = { 'share_properties': { 'size': SHARE['size'], 'project_id': SHARE['project_id'], 'snapshot_support': SHARE_TYPE_EXTRA_SPEC['snapshot_support'], 'create_share_from_snapshot_support': SHARE_TYPE_EXTRA_SPEC['create_share_from_snapshot_support'], 'revert_to_snapshot_support': SHARE_TYPE_EXTRA_SPEC['revert_to_snapshot_support'], 'mount_snapshot_support': SHARE_TYPE_EXTRA_SPEC['mount_snapshot_support'], 'share_proto': SHARE['share_proto'], 'share_type_id': SHARE_TYPE_2['id'], 'is_public': True, 'share_group_id': None, 'source_share_group_snapshot_member_id': None, 'snapshot_id': None, }, 'share_instance_properties': { 'availability_zone_id': 'fake_az_1', 'share_network_id': SHARE_NETWORK['id'], 'share_server_id': SHARE_SERVER['id'], 'share_id': SHARE_ID, 'host': SHARE_INSTANCE['host'], 'status': SHARE_INSTANCE['status'], }, 'share_type': SHARE_TYPE_2, 'share_id': SHARE_ID, } SERVER_MIGRATION_REQUEST_SPEC = { 'shares_size': 10, 'snapshots_size': 10, 'shares_req_spec': [SHARE_REQ_SPEC], } CLIENT_GET_VOLUME_RESPONSE = { 'aggregate': AGGREGATE, 'junction-path': '/%s' % SHARE_NAME, 'name': SHARE_NAME, 'type': 'rw', 'style': 'flex', 'size': SHARE_SIZE, 'owning-vserver-name': VSERVER1, 'qos-policy-group-name': QOS_POLICY_GROUP_NAME, } SHARE_INSTANCE_LIST = [SHARE_INSTANCE] CURRENT_NETWORK_ALLOCATIONS = { 'admin_network_allocations': ADMIN_NETWORK_ALLOCATIONS, 'subnets': [ { 'share_network_subnet_id': '0bdeaa8c6db3-3bc10d67', 'neutron_net_id': '2598-4122-bb62-0bdeaa8c6db3', 'neutron_subnet_id': '3bc10d67-2598-4122-bb62', 'network_allocations': USER_NETWORK_ALLOCATIONS } ] } NEW_NETWORK_ALLOCATIONS = { 'share_network_subnet_id': '0bdeaa8c6db3-3bc10d67', 'neutron_net_id': '2598-4122-bb62-0bdeaa8c6db3', 'neutron_subnet_id': '3bc10d67-2598-4122-bb62', 'network_allocations': USER_NETWORK_ALLOCATIONS } SHARE_BACKUP = { 'id': '242ff47e-518d-4b07-b3c3-0a51e6744149', 'share_id': 'd0a424c3-fee9-4781-9d4a-2c48a63386aa', 'size': SHARE_SIZE, 'host': MANILA_HOST_NAME, 'display_name': 'fake_backup', 'backup_options': {'backend': BACKEND_NAME, 'backup_type': BACKUP_TYPE}, } SNAP_MIRROR_INFO = {'source-vserver': VSERVER1, 'source-volume': FLEXVOL_NAME, 'destination-vserver': VSERVER2, 'destination-volume': FLEXVOL_NAME_1, 'relationship-status': "idle", 'last-transfer-type': "update", } SERVER_MODEL_UPDATE = { 'server_details': { 'ports': '{"%s": "%s", "%s": "%s"}' % ( USER_NETWORK_ALLOCATIONS[0]['id'], USER_NETWORK_ALLOCATIONS[0]['ip_address'], USER_NETWORK_ALLOCATIONS[1]['id'], USER_NETWORK_ALLOCATIONS[1]['ip_address']) }, 'share_updates': {SHARE_INSTANCE['id']: NFS_EXPORTS[0]}, } def get_config_cmode(): config = na_fakes.create_configuration_cmode() config.local_conf.set_override('share_backend_name', BACKEND_NAME) config.reserved_share_percentage = 5 config.reserved_share_from_snapshot_percentage = 2 config.reserved_share_extend_percentage = 2 config.max_over_subscription_ratio = 2.0 config.netapp_login = CLIENT_KWARGS['username'] config.netapp_password = CLIENT_KWARGS['password'] config.netapp_server_hostname = CLIENT_KWARGS['hostname'] config.netapp_transport_type = CLIENT_KWARGS['transport_type'] config.netapp_ssl_cert_path = CLIENT_KWARGS['ssl_cert_path'] config.netapp_server_port = CLIENT_KWARGS['port'] config.netapp_private_key_file = CLIENT_KWARGS['private_key_file'] config.netapp_certificate_file = CLIENT_KWARGS['certificate_file'] config.netapp_ca_certificate_file = CLIENT_KWARGS['ca_certificate_file'] config.netapp_certificate_host_validation = \ CLIENT_KWARGS['certificate_host_validation'] config.netapp_volume_name_template = VOLUME_NAME_TEMPLATE config.netapp_aggregate_name_search_pattern = AGGREGATE_NAME_SEARCH_PATTERN config.netapp_vserver_name_template = VSERVER_NAME_TEMPLATE config.netapp_root_volume_aggregate = ROOT_VOLUME_AGGREGATE config.netapp_root_volume = ROOT_VOLUME config.netapp_lif_name_template = LIF_NAME_TEMPLATE config.netapp_volume_snapshot_reserve_percent = 8 config.netapp_vserver = VSERVER1 return config def get_network_info(user_network_allocation, admin_network_allocation): net_info = copy.deepcopy(NETWORK_INFO) net_info['network_allocations'] = user_network_allocation net_info['admin_network_allocations'] = admin_network_allocation return net_info def fake_get_filter_function(pool=None): return pool if pool else 'filter' ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315602.0336702 manila-21.0.0/manila/tests/share/drivers/netapp/dataontap/protocols/0000775000175000017500000000000000000000000025527 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/netapp/dataontap/protocols/__init__.py0000664000175000017500000000000000000000000027626 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/netapp/dataontap/protocols/fakes.py0000664000175000017500000000427100000000000027176 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Clinton Knight. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from manila.common import constants SHARE_NAME = 'fake_share' SHARE_ID = '9dba208c-9aa7-11e4-89d3-123b93f75cba' EXPORT_POLICY_NAME = 'policy_9dba208c_9aa7_11e4_89d3_123b93f75cba' SHARE_ADDRESS_1 = '10.10.10.10' SHARE_ADDRESS_2 = '10.10.10.20' CLIENT_ADDRESS_1 = '20.20.20.10' CLIENT_ADDRESS_2 = '20.20.20.20' CIFS_SHARE_PATH = '/%s' % SHARE_NAME CIFS_SHARE_PATH_PARSED = '\\%s' % SHARE_NAME CIFS_SHARE = { 'export_location': r'\\%s%s' % (SHARE_ADDRESS_1, CIFS_SHARE_PATH_PARSED), 'id': SHARE_ID } NFS_SHARE_PATH = '/%s' % SHARE_NAME NFS_SHARE = { 'export_location': '%s:%s' % (SHARE_ADDRESS_1, NFS_SHARE_PATH), 'id': SHARE_ID } IP_ACCESS = { 'access_type': 'ip', 'access_to': CLIENT_ADDRESS_1, 'access_level': constants.ACCESS_LEVEL_RW, } USER_ACCESS = { 'access_type': 'user', 'access_to': 'fake_user', 'access_level': constants.ACCESS_LEVEL_RW, } VOLUME = { 'name': SHARE_NAME, } NEW_NFS_RULES = { '10.10.10.0/30': constants.ACCESS_LEVEL_RW, '10.10.10.0/24': constants.ACCESS_LEVEL_RO, '10.10.10.10': constants.ACCESS_LEVEL_RW, '10.10.20.0/24': constants.ACCESS_LEVEL_RW, '10.10.20.10': constants.ACCESS_LEVEL_RW, } EXISTING_CIFS_RULES = { 'user1': constants.ACCESS_LEVEL_RW, 'user2': constants.ACCESS_LEVEL_RO, 'user3': constants.ACCESS_LEVEL_RW, 'user4': constants.ACCESS_LEVEL_RO, } NEW_CIFS_RULES = { 'user1': constants.ACCESS_LEVEL_RW, 'user2': constants.ACCESS_LEVEL_RW, 'user3': constants.ACCESS_LEVEL_RO, 'user5': constants.ACCESS_LEVEL_RW, 'user6': constants.ACCESS_LEVEL_RO, } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/netapp/dataontap/protocols/test_base.py0000664000175000017500000000436000000000000030055 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Clinton Knight. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Mock unit tests for the NetApp driver protocols base class module. """ import ddt from manila.common import constants from manila.share.drivers.netapp.dataontap.protocols import nfs_cmode from manila import test @ddt.ddt class NetAppNASHelperBaseTestCase(test.TestCase): def test_set_client(self): # The base class is abstract, so we'll use a subclass to test # base class functionality. helper = nfs_cmode.NetAppCmodeNFSHelper() self.assertIsNone(helper._client) helper.set_client('fake_client') self.assertEqual('fake_client', helper._client) @ddt.data( {'level': constants.ACCESS_LEVEL_RW, 'readonly': False}, {'level': constants.ACCESS_LEVEL_RO, 'readonly': True}) @ddt.unpack def test_is_readonly(self, level, readonly): helper = nfs_cmode.NetAppCmodeNFSHelper() result = helper._is_readonly(level) self.assertEqual(readonly, result) @ddt.data( {'share': {'export_location': 'fake_export'}, 'expected_export': 'fake_export'}, {'share': {'export_locations': [{'path': 'fake_export'}]}, 'expected_export': 'fake_export'}, {'share': {'export_locations': 'error_type'}, 'expected_export': None}, {'share': {'export_locations': []}, 'expected_export': None}, {'share': {}, 'expected_export': None}) @ddt.unpack def test__get_share_export_location(self, share, expected_export): helper = nfs_cmode.NetAppCmodeNFSHelper() result = helper._get_share_export_location(share) self.assertEqual(expected_export, result) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/netapp/dataontap/protocols/test_cifs_cmode.py0000664000175000017500000002415700000000000031244 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Clinton Knight. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Mock unit tests for the NetApp driver protocols CIFS class module. """ import copy from unittest import mock import ddt from manila.common import constants from manila import exception from manila.share.drivers.netapp.dataontap.protocols import cifs_cmode from manila import test from manila.tests.share.drivers.netapp.dataontap.protocols \ import fakes as fake @ddt.ddt class NetAppClusteredCIFSHelperTestCase(test.TestCase): def setUp(self): super(NetAppClusteredCIFSHelperTestCase, self).setUp() self.mock_context = mock.Mock() self.mock_client = mock.Mock() self.helper = cifs_cmode.NetAppCmodeCIFSHelper() self.helper.set_client(self.mock_client) @ddt.data({'replica': True, 'cifs_exist': False}, {'replica': False, 'cifs_exist': True}) @ddt.unpack def test_create_share(self, replica, cifs_exist): self.mock_client.cifs_share_exists.return_value = cifs_exist self.mock_client.get_volume_junction_path.return_value = ( fake.CIFS_SHARE_PATH) result = self.helper.create_share( fake.CIFS_SHARE, fake.SHARE_NAME, replica=replica) export_addresses = [fake.SHARE_ADDRESS_1, fake.SHARE_ADDRESS_2] export_paths = [result(address) for address in export_addresses] expected_paths = [ r'\\%s%s' % (fake.SHARE_ADDRESS_1, fake.CIFS_SHARE_PATH_PARSED), r'\\%s%s' % (fake.SHARE_ADDRESS_2, fake.CIFS_SHARE_PATH_PARSED), ] self.assertEqual(expected_paths, export_paths) self.mock_client.cifs_share_exists.assert_called_once_with( fake.SHARE_NAME) if cifs_exist: self.mock_client.create_cifs_share.assert_not_called() self.mock_client.remove_cifs_share.assert_not_called() else: self.mock_client.create_cifs_share.assert_called_once_with( fake.SHARE_NAME, fake.CIFS_SHARE_PATH) self.mock_client.remove_cifs_share_access.assert_called_once_with( fake.SHARE_NAME, 'Everyone') if replica: self.mock_client.set_volume_security_style.assert_not_called() else: self.mock_client.set_volume_security_style.assert_called_once_with( fake.SHARE_NAME, security_style='ntfs') def test_create_share_ensure_not_exist_error(self): self.mock_client.cifs_share_exists.return_value = False self.assertRaises(exception.NetAppException, self.helper.create_share, fake.CIFS_SHARE, fake.SHARE_NAME, ensure_share_already_exists=True) def test_delete_share(self): self.helper.delete_share(fake.CIFS_SHARE, fake.SHARE_NAME) self.mock_client.remove_cifs_share.assert_called_once_with( fake.SHARE_NAME) def test_update_access(self): mock_validate_access_rule = self.mock_object(self.helper, '_validate_access_rule') mock_get_access_rules = self.mock_object( self.helper, '_get_access_rules', mock.Mock(return_value=fake.EXISTING_CIFS_RULES)) mock_handle_added_rules = self.mock_object(self.helper, '_handle_added_rules') mock_handle_ro_to_rw_rules = self.mock_object(self.helper, '_handle_ro_to_rw_rules') mock_handle_rw_to_ro_rules = self.mock_object(self.helper, '_handle_rw_to_ro_rules') mock_handle_deleted_rules = self.mock_object(self.helper, '_handle_deleted_rules') self.helper.update_access(fake.CIFS_SHARE, fake.SHARE_NAME, [fake.USER_ACCESS]) new_rules = {'fake_user': constants.ACCESS_LEVEL_RW} mock_validate_access_rule.assert_called_once_with(fake.USER_ACCESS) mock_get_access_rules.assert_called_once_with(fake.CIFS_SHARE, fake.SHARE_NAME) mock_handle_added_rules.assert_called_once_with( fake.SHARE_NAME, fake.EXISTING_CIFS_RULES, new_rules) mock_handle_ro_to_rw_rules.assert_called_once_with( fake.SHARE_NAME, fake.EXISTING_CIFS_RULES, new_rules) mock_handle_rw_to_ro_rules.assert_called_once_with( fake.SHARE_NAME, fake.EXISTING_CIFS_RULES, new_rules) mock_handle_deleted_rules.assert_called_once_with( fake.SHARE_NAME, fake.EXISTING_CIFS_RULES, new_rules) def test_validate_access_rule(self): result = self.helper._validate_access_rule(fake.USER_ACCESS) self.assertIsNone(result) def test_validate_access_rule_invalid_type(self): rule = copy.copy(fake.USER_ACCESS) rule['access_type'] = 'ip' self.assertRaises(exception.InvalidShareAccess, self.helper._validate_access_rule, rule) def test_validate_access_rule_invalid_level(self): rule = copy.copy(fake.USER_ACCESS) rule['access_level'] = 'none' self.assertRaises(exception.InvalidShareAccessLevel, self.helper._validate_access_rule, rule) def test_handle_added_rules(self): self.helper._handle_added_rules(fake.SHARE_NAME, fake.EXISTING_CIFS_RULES, fake.NEW_CIFS_RULES) self.mock_client.add_cifs_share_access.assert_has_calls([ mock.call(fake.SHARE_NAME, 'user5', False), mock.call(fake.SHARE_NAME, 'user6', True), ], any_order=True) def test_handle_ro_to_rw_rules(self): self.helper._handle_ro_to_rw_rules(fake.SHARE_NAME, fake.EXISTING_CIFS_RULES, fake.NEW_CIFS_RULES) self.mock_client.modify_cifs_share_access.assert_has_calls([ mock.call(fake.SHARE_NAME, 'user2', False) ]) def test_handle_rw_to_ro_rules(self): self.helper._handle_rw_to_ro_rules(fake.SHARE_NAME, fake.EXISTING_CIFS_RULES, fake.NEW_CIFS_RULES) self.mock_client.modify_cifs_share_access.assert_has_calls([ mock.call(fake.SHARE_NAME, 'user3', True) ]) def test_handle_deleted_rules(self): self.helper._handle_deleted_rules(fake.SHARE_NAME, fake.EXISTING_CIFS_RULES, fake.NEW_CIFS_RULES) self.mock_client.remove_cifs_share_access.assert_has_calls([ mock.call(fake.SHARE_NAME, 'user4') ]) def test_get_access_rules(self): self.mock_client.get_cifs_share_access = ( mock.Mock(return_value='fake_rules')) result = self.helper._get_access_rules(fake.CIFS_SHARE, fake.SHARE_NAME) self.assertEqual('fake_rules', result) self.mock_client.get_cifs_share_access.assert_called_once_with( fake.SHARE_NAME) def test_get_target(self): target = self.helper.get_target(fake.CIFS_SHARE) self.assertEqual(fake.SHARE_ADDRESS_1, target) def test_get_target_missing_location(self): target = self.helper.get_target({'export_location': ''}) self.assertEqual('', target) def test_get_share_name_for_share(self): self.mock_client.get_volume_at_junction_path.return_value = ( fake.VOLUME) share_name = self.helper.get_share_name_for_share(fake.CIFS_SHARE) self.assertEqual(fake.SHARE_NAME, share_name) self.mock_client.get_volume_at_junction_path.assert_called_once_with( fake.CIFS_SHARE_PATH) def test_get_share_name_for_share_not_found(self): self.mock_client.get_volume_at_junction_path.return_value = None share_name = self.helper.get_share_name_for_share(fake.CIFS_SHARE) self.assertIsNone(share_name) self.mock_client.get_volume_at_junction_path.assert_called_once_with( fake.CIFS_SHARE_PATH) @ddt.data( { 'location': r'\\%s\%s' % (fake.SHARE_ADDRESS_1, fake.SHARE_NAME), 'ip': fake.SHARE_ADDRESS_1, 'share_name': fake.SHARE_NAME, }, { 'location': r'//%s/%s' % (fake.SHARE_ADDRESS_1, fake.SHARE_NAME), 'ip': fake.SHARE_ADDRESS_1, 'share_name': fake.SHARE_NAME, }, {'location': '', 'ip': '', 'share_name': ''}, {'location': 'invalid', 'ip': '', 'share_name': ''}, ) @ddt.unpack def test_get_export_location(self, location, ip, share_name): share = fake.CIFS_SHARE.copy() share['export_location'] = location self.mock_object(self.helper, '_get_share_export_location', mock.Mock(return_value=location)) result_ip, result_share_name = self.helper._get_export_location(share) self.assertEqual(ip, result_ip) self.assertEqual(share_name, result_share_name) self.helper._get_share_export_location.assert_called_once_with(share) def test_cleanup_demoted_replica(self): self.helper.cleanup_demoted_replica(fake.CIFS_SHARE, fake.SHARE_NAME) self.mock_client.remove_cifs_share.assert_called_once_with( fake.SHARE_NAME) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/netapp/dataontap/protocols/test_nfs_cmode.py0000664000175000017500000002332700000000000031104 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Clinton Knight. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Mock unit tests for the NetApp driver protocols NFS class module. """ import copy from unittest import mock import uuid import ddt from manila import exception from manila.share.drivers.netapp.dataontap.protocols import nfs_cmode from manila import test from manila.tests.share.drivers.netapp.dataontap.protocols \ import fakes as fake @ddt.ddt class NetAppClusteredNFSHelperTestCase(test.TestCase): def setUp(self): super(NetAppClusteredNFSHelperTestCase, self).setUp() self.mock_context = mock.Mock() self.mock_client = mock.Mock() self.helper = nfs_cmode.NetAppCmodeNFSHelper() self.helper.set_client(self.mock_client) @ddt.data(('1.2.3.4', '1.2.3.4'), ('fc00::1', '[fc00::1]')) @ddt.unpack def test__escaped_address(self, raw, escaped): self.assertEqual(escaped, self.helper._escaped_address(raw)) @ddt.data(True, False) def test_create_share(self, is_flexgroup): mock_ensure_export_policy = self.mock_object(self.helper, '_ensure_export_policy') self.mock_client.get_volume_junction_path.return_value = ( fake.NFS_SHARE_PATH) self.mock_client.get_volume.return_value = { 'junction-path': fake.NFS_SHARE_PATH, } result = self.helper.create_share(fake.NFS_SHARE, fake.SHARE_NAME, is_flexgroup=is_flexgroup) export_addresses = [fake.SHARE_ADDRESS_1, fake.SHARE_ADDRESS_2] export_paths = [result(address) for address in export_addresses] expected_paths = [ fake.SHARE_ADDRESS_1 + ":" + fake.NFS_SHARE_PATH, fake.SHARE_ADDRESS_2 + ":" + fake.NFS_SHARE_PATH, ] self.assertEqual(expected_paths, export_paths) (self.mock_client.clear_nfs_export_policy_for_volume. assert_called_once_with(fake.SHARE_NAME)) self.assertTrue(mock_ensure_export_policy.called) if is_flexgroup: self.assertTrue(self.mock_client.get_volume.called) else: self.assertTrue(self.mock_client.get_volume_junction_path.called) def test_delete_share(self): self.helper.delete_share(fake.NFS_SHARE, fake.SHARE_NAME) (self.mock_client.clear_nfs_export_policy_for_volume. assert_called_once_with(fake.SHARE_NAME)) self.mock_client.soft_delete_nfs_export_policy.assert_called_once_with( fake.EXPORT_POLICY_NAME) def test_update_access(self): self.mock_object(self.helper, '_ensure_export_policy') self.mock_object(self.helper, '_get_export_policy_name', mock.Mock(return_value='fake_export_policy')) self.mock_object(self.helper, '_get_temp_export_policy_name', mock.Mock(side_effect=['fake_new_export_policy', 'fake_old_export_policy'])) fake_auth_method = 'fake_auth_method' self.mock_object(self.helper, '_get_auth_methods', mock.Mock(return_value=fake_auth_method)) self.helper.update_access(fake.CIFS_SHARE, fake.SHARE_NAME, [fake.IP_ACCESS]) self.mock_client.create_nfs_export_policy.assert_called_once_with( 'fake_new_export_policy') self.mock_client.add_nfs_export_rule.assert_called_once_with( 'fake_new_export_policy', fake.CLIENT_ADDRESS_1, False, fake_auth_method) (self.mock_client.set_nfs_export_policy_for_volume. assert_called_once_with(fake.SHARE_NAME, 'fake_new_export_policy')) (self.mock_client.soft_delete_nfs_export_policy. assert_called_once_with('fake_old_export_policy')) self.mock_client.rename_nfs_export_policy.assert_has_calls([ mock.call('fake_export_policy', 'fake_old_export_policy'), mock.call('fake_new_export_policy', 'fake_export_policy'), ]) def test_validate_access_rule(self): result = self.helper._validate_access_rule(fake.IP_ACCESS) self.assertIsNone(result) def test_validate_access_rule_invalid_type(self): rule = copy.copy(fake.IP_ACCESS) rule['access_type'] = 'user' self.assertRaises(exception.InvalidShareAccess, self.helper._validate_access_rule, rule) def test_validate_access_rule_invalid_level(self): rule = copy.copy(fake.IP_ACCESS) rule['access_level'] = 'none' self.assertRaises(exception.InvalidShareAccessLevel, self.helper._validate_access_rule, rule) def test_get_target(self): target = self.helper.get_target(fake.NFS_SHARE) self.assertEqual(fake.SHARE_ADDRESS_1, target) def test_get_share_name_for_share(self): self.mock_client.get_volume_at_junction_path.return_value = ( fake.VOLUME) share_name = self.helper.get_share_name_for_share(fake.NFS_SHARE) self.assertEqual(fake.SHARE_NAME, share_name) self.mock_client.get_volume_at_junction_path.assert_called_once_with( fake.NFS_SHARE_PATH) def test_get_share_name_for_share_not_found(self): self.mock_client.get_volume_at_junction_path.return_value = None share_name = self.helper.get_share_name_for_share(fake.NFS_SHARE) self.assertIsNone(share_name) self.mock_client.get_volume_at_junction_path.assert_called_once_with( fake.NFS_SHARE_PATH) def test_get_target_missing_location(self): target = self.helper.get_target({'export_location': ''}) self.assertEqual('', target) def test_get_export_location(self): export = fake.NFS_SHARE['export_location'] self.mock_object(self.helper, '_get_share_export_location', mock.Mock(return_value=export)) host_ip, export_path = self.helper._get_export_location( fake.NFS_SHARE) self.assertEqual(fake.SHARE_ADDRESS_1, host_ip) self.assertEqual('/' + fake.SHARE_NAME, export_path) @ddt.data('', 'invalid') def test_get_export_location_missing_location_invalid(self, export): fake_share = fake.NFS_SHARE.copy() fake_share['export_location'] = export self.mock_object(self.helper, '_get_share_export_location', mock.Mock(return_value=export)) host_ip, export_path = self.helper._get_export_location(fake_share) self.assertEqual('', host_ip) self.assertEqual('', export_path) self.helper._get_share_export_location.assert_called_once_with( fake_share) def test_get_temp_export_policy_name(self): self.mock_object(uuid, 'uuid1', mock.Mock(return_value='fake-uuid')) result = self.helper._get_temp_export_policy_name() self.assertEqual('temp_fake_uuid', result) def test_get_export_policy_name(self): result = self.helper._get_export_policy_name(fake.NFS_SHARE) self.assertEqual(fake.EXPORT_POLICY_NAME, result) def test_ensure_export_policy_equal(self): self.mock_client.get_nfs_export_policy_for_volume.return_value = ( fake.EXPORT_POLICY_NAME) self.helper._ensure_export_policy(fake.NFS_SHARE, fake.SHARE_NAME) self.assertFalse(self.mock_client.create_nfs_export_policy.called) self.assertFalse(self.mock_client.rename_nfs_export_policy.called) def test_ensure_export_policy_default(self): self.mock_client.get_nfs_export_policy_for_volume.return_value = ( 'default') self.helper._ensure_export_policy(fake.NFS_SHARE, fake.SHARE_NAME) self.mock_client.create_nfs_export_policy.assert_called_once_with( fake.EXPORT_POLICY_NAME) (self.mock_client.set_nfs_export_policy_for_volume. assert_called_once_with(fake.SHARE_NAME, fake.EXPORT_POLICY_NAME)) self.assertFalse(self.mock_client.rename_nfs_export_policy.called) def test_ensure_export_policy_rename(self): self.mock_client.get_nfs_export_policy_for_volume.return_value = 'fake' self.helper._ensure_export_policy(fake.NFS_SHARE, fake.SHARE_NAME) self.assertFalse(self.mock_client.create_nfs_export_policy.called) self.mock_client.rename_nfs_export_policy.assert_called_once_with( 'fake', fake.EXPORT_POLICY_NAME) @ddt.data((False, ['sys']), (True, ['krb5', 'krb5i', 'krb5p'])) @ddt.unpack def test__get_security_flavors(self, kerberos_enabled, security_flavors): self.mock_client.is_kerberos_enabled.return_value = kerberos_enabled result = self.helper._get_auth_methods() self.assertEqual(security_flavors, result) def test_cleanup_demoted_replica(self): self.mock_object(self.helper, 'delete_share') self.helper.cleanup_demoted_replica(fake.NFS_SHARE, fake.SHARE_NAME) self.helper.delete_share.assert_called_once_with(fake.NFS_SHARE, fake.SHARE_NAME) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/netapp/fakes.py0000664000175000017500000000267200000000000023202 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Clinton Knight. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from manila.share import configuration as conf from manila.share import driver as manila_opts from manila.share.drivers.netapp import options as na_opts def create_configuration(): config = conf.Configuration(None) config.append_config_values(manila_opts.share_opts) config.append_config_values(na_opts.netapp_connection_opts) config.append_config_values(na_opts.netapp_transport_opts) config.append_config_values(na_opts.netapp_basicauth_opts) config.append_config_values(na_opts.netapp_certificateauth_opts) config.append_config_values(na_opts.netapp_provisioning_opts) config.append_config_values(na_opts.netapp_backup_opts) return config def create_configuration_cmode(): config = create_configuration() config.append_config_values(na_opts.netapp_support_opts) return config ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/netapp/test_common.py0000664000175000017500000001436200000000000024437 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Clinton Knight. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from manila import exception from manila.share.drivers.netapp import common as na_common from manila.share.drivers.netapp.dataontap.cluster_mode import drv_multi_svm from manila.share.drivers.netapp import utils as na_utils from manila import test from manila.tests.share.drivers.netapp import fakes as na_fakes class NetAppDriverFactoryTestCase(test.TestCase): def test_new(self): self.mock_object(na_utils.OpenStackInfo, 'info', mock.Mock(return_value='fake_info')) mock_get_driver_mode = self.mock_object( na_common.NetAppDriver, '_get_driver_mode', mock.Mock(return_value='fake_mode')) mock_create_driver = self.mock_object(na_common.NetAppDriver, '_create_driver') config = na_fakes.create_configuration() config.netapp_storage_family = 'fake_family' config.driver_handles_share_servers = True kwargs = {'configuration': config} na_common.NetAppDriver(**kwargs) kwargs['app_version'] = 'fake_info' mock_get_driver_mode.assert_called_once_with('fake_family', True) mock_create_driver.assert_called_once_with('fake_family', 'fake_mode', *(), **kwargs) def test_new_missing_config(self): self.mock_object(na_utils.OpenStackInfo, 'info') self.mock_object(na_common.NetAppDriver, '_create_driver') self.assertRaises(exception.InvalidInput, na_common.NetAppDriver, **{}) def test_new_missing_family(self): self.mock_object(na_utils.OpenStackInfo, 'info') self.mock_object(na_common.NetAppDriver, '_create_driver') config = na_fakes.create_configuration() config.driver_handles_share_servers = True config.netapp_storage_family = None kwargs = {'configuration': config} self.assertRaises(exception.InvalidInput, na_common.NetAppDriver, **kwargs) def test_new_missing_mode(self): self.mock_object(na_utils.OpenStackInfo, 'info') self.mock_object(na_common.NetAppDriver, '_create_driver') config = na_fakes.create_configuration() config.driver_handles_share_servers = None config.netapp_storage_family = 'fake_family' kwargs = {'configuration': config} self.assertRaises(exception.InvalidInput, na_common.NetAppDriver, **kwargs) def test_get_driver_mode_missing_mode_good_default(self): result = na_common.NetAppDriver._get_driver_mode('ONTAP_CLUSTER', None) self.assertEqual(na_common.MULTI_SVM, result) def test_create_driver_missing_mode_no_default(self): self.assertRaises(exception.InvalidInput, na_common.NetAppDriver._get_driver_mode, 'fake_family', None) def test_get_driver_mode_multi_svm(self): result = na_common.NetAppDriver._get_driver_mode('ONTAP_CLUSTER', True) self.assertEqual(na_common.MULTI_SVM, result) def test_get_driver_mode_single_svm(self): result = na_common.NetAppDriver._get_driver_mode('ONTAP_CLUSTER', False) self.assertEqual(na_common.SINGLE_SVM, result) def test_create_driver(self): def get_full_class_name(obj): return obj.__module__ + '.' + obj.__class__.__name__ registry = na_common.NETAPP_UNIFIED_DRIVER_REGISTRY for family in iter(registry.keys()): for mode, full_class_name in registry[family].items(): config = na_fakes.create_configuration() config.local_conf.set_override('driver_handles_share_servers', mode == na_common.MULTI_SVM) kwargs = { 'configuration': config, 'private_storage': mock.Mock(), 'app_version': 'fake_info' } driver = na_common.NetAppDriver._create_driver( family, mode, **kwargs) self.assertEqual(full_class_name, get_full_class_name(driver)) def test_create_driver_case_insensitive(self): config = na_fakes.create_configuration() config.local_conf.set_override('driver_handles_share_servers', True) kwargs = { 'configuration': config, 'private_storage': mock.Mock(), 'app_version': 'fake_info' } driver = na_common.NetAppDriver._create_driver('ONTAP_CLUSTER', na_common.MULTI_SVM, **kwargs) self.assertIsInstance(driver, drv_multi_svm.NetAppCmodeMultiSvmShareDriver) def test_create_driver_invalid_family(self): kwargs = { 'configuration': na_fakes.create_configuration(), 'app_version': 'fake_info', } self.assertRaises(exception.InvalidInput, na_common.NetAppDriver._create_driver, 'fake_family', na_common.MULTI_SVM, **kwargs) def test_create_driver_invalid_mode(self): kwargs = { 'configuration': na_fakes.create_configuration(), 'app_version': 'fake_info', } self.assertRaises(exception.InvalidInput, na_common.NetAppDriver._create_driver, 'ontap_cluster', 'fake_mode', **kwargs) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/netapp/test_utils.py0000664000175000017500000005213200000000000024304 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Clinton Knight. All rights reserved. # Copyright (c) 2015 Tom Barron. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Mock unit tests for the NetApp driver utility module """ import platform from unittest import mock import ddt from oslo_concurrency import processutils as putils from oslo_log import log from manila import exception from manila.share.drivers.netapp import utils as na_utils from manila import test from manila.tests.share.drivers.netapp.dataontap import fakes as fake from manila import version @ddt.ddt class NetAppDriverUtilsTestCase(test.TestCase): def setUp(self): super(NetAppDriverUtilsTestCase, self).setUp() # Mock loggers as themselves to allow logger arg validation mock_logger = log.getLogger('mock_logger') self.mock_object(na_utils.LOG, 'warning', mock.Mock(side_effect=mock_logger.warning)) self.mock_object(na_utils.LOG, 'debug', mock.Mock(side_effect=mock_logger.debug)) na_utils.setup_tracing(None) def test_round_down(self): self.assertAlmostEqual(na_utils.round_down(5.567), 5.56) self.assertAlmostEqual(na_utils.round_down(5.567, '0.00'), 5.56) self.assertAlmostEqual(na_utils.round_down(5.567, '0.0'), 5.5) self.assertAlmostEqual(na_utils.round_down(5.567, '0'), 5) self.assertAlmostEqual(na_utils.round_down(0, '0.00'), 0) self.assertAlmostEqual(na_utils.round_down(-5.567), -5.56) self.assertAlmostEqual(na_utils.round_down(-5.567, '0.00'), -5.56) self.assertAlmostEqual(na_utils.round_down(-5.567, '0.0'), -5.5) self.assertAlmostEqual(na_utils.round_down(-5.567, '0'), -5) def test_setup_tracing(self): na_utils.setup_tracing(None, api_trace_pattern='(.*)') self.assertFalse(na_utils.TRACE_API) self.assertFalse(na_utils.TRACE_METHOD) self.assertEqual('(.*)', na_utils.API_TRACE_PATTERN) self.assertEqual(0, na_utils.LOG.warning.call_count) na_utils.setup_tracing('method') self.assertFalse(na_utils.TRACE_API) self.assertTrue(na_utils.TRACE_METHOD) self.assertEqual('(.*)', na_utils.API_TRACE_PATTERN) self.assertEqual(0, na_utils.LOG.warning.call_count) na_utils.setup_tracing('method,api', api_trace_pattern='(^fancy-api$)') self.assertTrue(na_utils.TRACE_API) self.assertTrue(na_utils.TRACE_METHOD) self.assertEqual('(^fancy-api$)', na_utils.API_TRACE_PATTERN) self.assertEqual(0, na_utils.LOG.warning.call_count) def test_setup_tracing_invalid_key(self): na_utils.setup_tracing('method,fake') self.assertFalse(na_utils.TRACE_API) self.assertTrue(na_utils.TRACE_METHOD) self.assertEqual(1, na_utils.LOG.warning.call_count) @ddt.data('?!(bad', '(reg]+', 'eX?!)') def test_setup_tracing_invalid_regex(self, regex): self.assertRaises(exception.BadConfigurationException, na_utils.setup_tracing, 'method,api', api_trace_pattern=regex) @na_utils.trace def _trace_test_method(self, *args, **kwargs): return 'OK' def test_trace_no_tracing(self): result = self._trace_test_method() self.assertEqual('OK', result) self.assertEqual(0, na_utils.LOG.debug.call_count) na_utils.setup_tracing('method') def test_trace_method_tracing(self): na_utils.setup_tracing('method') result = self._trace_test_method() self.assertEqual('OK', result) self.assertEqual(2, na_utils.LOG.debug.call_count) def test_validate_driver_instantiation_proxy(self): kwargs = {'netapp_mode': 'proxy'} na_utils.validate_driver_instantiation(**kwargs) self.assertEqual(0, na_utils.LOG.warning.call_count) def test_validate_driver_instantiation_no_proxy(self): kwargs = {'netapp_mode': 'asdf'} na_utils.validate_driver_instantiation(**kwargs) self.assertEqual(1, na_utils.LOG.warning.call_count) def test_check_flags(self): configuration = type('Fake', (object,), {'flag1': 'value1', 'flag2': 'value2'}) self.assertIsNone(na_utils.check_flags(['flag1', 'flag2'], configuration)) def test_check_flags_missing_flag(self): configuration = type('Fake', (object,), {'flag1': 'value1', 'flag3': 'value3'}) self.assertRaises(exception.InvalidInput, na_utils.check_flags, ['flag1', 'flag2'], configuration) def test_convert_to_list(self): self.assertListEqual([], na_utils.convert_to_list(None)) self.assertListEqual(['test'], na_utils.convert_to_list('test')) self.assertListEqual(['a'], na_utils.convert_to_list(['a'])) self.assertListEqual(['a', 'b'], na_utils.convert_to_list(['a', 'b'])) self.assertListEqual([1, 2, 3], na_utils.convert_to_list((1, 2, 3))) self.assertListEqual([5], na_utils.convert_to_list(5)) self.assertListEqual( sorted(['key1', 'key2']), sorted(na_utils.convert_to_list({'key1': 'value1', 'key2': 'value2'}))) @ddt.data({'is_fg': True, 'type': na_utils.EXTENDED_DATA_PROTECTION_TYPE}, {'is_fg': False, 'type': na_utils.DATA_PROTECTION_TYPE}) @ddt.unpack def test_get_relationship_type(self, is_fg, type): relationship_type = na_utils.get_relationship_type(is_fg) self.assertEqual(type, relationship_type) @ddt.data({'is_style': True, 'style': na_utils.FLEXGROUP_STYLE_EXTENDED}, {'is_style': False, 'style': na_utils.FLEXVOL_STYLE_EXTENDED}) @ddt.unpack def test_is_style_extended_flexgroup(self, is_style, style): res = na_utils.is_style_extended_flexgroup(style) self.assertEqual(is_style, res) @ddt.data(True, False) def test_parse_flexgroup_pool_config(self, check): result = na_utils.parse_flexgroup_pool_config( [fake.FLEXGROUP_POOL_OPT_RAW], cluster_aggr_set=set(fake.FLEXGROUP_POOL_AGGR), check=check) self.assertEqual(fake.FLEXGROUP_POOL_OPT, result) def test_parse_flexgroup_pool_config_raise_invalid_aggr(self): self.assertRaises(exception.NetAppException, na_utils.parse_flexgroup_pool_config, [fake.FLEXGROUP_POOL_OPT_RAW], cluster_aggr_set=set(), check=True) def test_parse_flexgroup_pool_config_raise_duplicated_pool(self): fake_pool = { 'flexgroup1': fake.FLEXGROUP_POOL_AGGR[0], 'flexgroup2': fake.FLEXGROUP_POOL_AGGR[0], } self.assertRaises(exception.NetAppException, na_utils.parse_flexgroup_pool_config, [fake_pool], cluster_aggr_set=set(fake.FLEXGROUP_POOL_AGGR), check=True) def test_parse_flexgroup_pool_config_raise_repeated_aggr(self): aggr_pool = '%s %s' % (fake.FLEXGROUP_POOL_AGGR[0], fake.FLEXGROUP_POOL_AGGR[0]) self.assertRaises(exception.NetAppException, na_utils.parse_flexgroup_pool_config, [{'flexgroup1': aggr_pool}], cluster_aggr_set=set(fake.FLEXGROUP_POOL_AGGR), check=True) def test_parse_flexgroup_pool_config_raise_invalid_pool_name(self): aggr_pool = '%s %s' % (fake.FLEXGROUP_POOL_AGGR[0], fake.FLEXGROUP_POOL_AGGR[0]) self.assertRaises(exception.NetAppException, na_utils.parse_flexgroup_pool_config, [{fake.FLEXGROUP_POOL_AGGR[0]: aggr_pool}], cluster_aggr_set=set(fake.FLEXGROUP_POOL_AGGR), check=True) class OpenstackInfoTestCase(test.TestCase): UNKNOWN_VERSION = 'unknown version' UNKNOWN_RELEASE = 'unknown release' UNKNOWN_VENDOR = 'unknown vendor' UNKNOWN_PLATFORM = 'unknown platform' VERSION_STRING_RET_VAL = 'fake_version_1' RELEASE_STRING_RET_VAL = 'fake_release_1' PLATFORM_RET_VAL = 'fake_platform_1' VERSION_INFO_VERSION = 'fake_version_2' VERSION_INFO_RELEASE = 'fake_release_2' RPM_INFO_VERSION = 'fake_version_3' RPM_INFO_RELEASE = 'fake_release_3' RPM_INFO_VENDOR = 'fake vendor 3' PUTILS_RPM_RET_VAL = ('fake_version_3 fake_release_3 fake vendor 3', '') NO_PKG_FOUND = ('', 'whatever') PUTILS_DPKG_RET_VAL = ('epoch:upstream_version-debian_revision', '') DEB_RLS = 'upstream_version-debian_revision' DEB_VENDOR = 'debian_revision' def test_openstack_info_init(self): info = na_utils.OpenStackInfo() self.assertEqual(self.UNKNOWN_VERSION, info._version) self.assertEqual(self.UNKNOWN_RELEASE, info._release) self.assertEqual(self.UNKNOWN_VENDOR, info._vendor) self.assertEqual(self.UNKNOWN_PLATFORM, info._platform) @mock.patch.object(version.version_info, 'version_string', mock.Mock(return_value=VERSION_STRING_RET_VAL)) def test_update_version_from_version_string(self): info = na_utils.OpenStackInfo() info._update_version_from_version_string() self.assertEqual(self.VERSION_STRING_RET_VAL, info._version) self.assertEqual(self.UNKNOWN_RELEASE, info._release) self.assertEqual(self.UNKNOWN_VENDOR, info._vendor) self.assertEqual(self.UNKNOWN_PLATFORM, info._platform) @mock.patch.object(version.version_info, 'version_string', mock.Mock(side_effect=Exception)) def test_exception_in_update_version_from_version_string(self): info = na_utils.OpenStackInfo() info._update_version_from_version_string() self.assertEqual(self.UNKNOWN_VERSION, info._version) self.assertEqual(self.UNKNOWN_RELEASE, info._release) self.assertEqual(self.UNKNOWN_VENDOR, info._vendor) self.assertEqual(self.UNKNOWN_PLATFORM, info._platform) @mock.patch.object(version.version_info, 'release_string', mock.Mock(return_value=RELEASE_STRING_RET_VAL)) def test_update_release_from_release_string(self): info = na_utils.OpenStackInfo() info._update_release_from_release_string() self.assertEqual(self.UNKNOWN_VERSION, info._version) self.assertEqual(self.RELEASE_STRING_RET_VAL, info._release) self.assertEqual(self.UNKNOWN_VENDOR, info._vendor) self.assertEqual(self.UNKNOWN_PLATFORM, info._platform) @mock.patch.object(version.version_info, 'release_string', mock.Mock(side_effect=Exception)) def test_exception_in_update_release_from_release_string(self): info = na_utils.OpenStackInfo() info._update_release_from_release_string() self.assertEqual(self.UNKNOWN_VERSION, info._version) self.assertEqual(self.UNKNOWN_RELEASE, info._release) self.assertEqual(self.UNKNOWN_VENDOR, info._vendor) self.assertEqual(self.UNKNOWN_PLATFORM, info._platform) @mock.patch.object(platform, 'platform', mock.Mock(return_value=PLATFORM_RET_VAL)) def test_update_platform(self): info = na_utils.OpenStackInfo() info._update_platform() self.assertEqual(self.UNKNOWN_VERSION, info._version) self.assertEqual(self.UNKNOWN_RELEASE, info._release) self.assertEqual(self.UNKNOWN_VENDOR, info._vendor) self.assertEqual(self.PLATFORM_RET_VAL, info._platform) @mock.patch.object(platform, 'platform', mock.Mock(side_effect=Exception)) def test_exception_in_update_platform(self): info = na_utils.OpenStackInfo() info._update_platform() self.assertEqual(self.UNKNOWN_VERSION, info._version) self.assertEqual(self.UNKNOWN_RELEASE, info._release) self.assertEqual(self.UNKNOWN_VENDOR, info._vendor) self.assertEqual(self.UNKNOWN_PLATFORM, info._platform) @mock.patch.object(na_utils.OpenStackInfo, '_get_version_info_version', mock.Mock(return_value=VERSION_INFO_VERSION)) @mock.patch.object(na_utils.OpenStackInfo, '_get_version_info_release', mock.Mock(return_value=VERSION_INFO_RELEASE)) def test_update_info_from_version_info(self): info = na_utils.OpenStackInfo() info._update_info_from_version_info() self.assertEqual(self.VERSION_INFO_VERSION, info._version) self.assertEqual(self.VERSION_INFO_RELEASE, info._release) self.assertEqual(self.UNKNOWN_VENDOR, info._vendor) self.assertEqual(self.UNKNOWN_PLATFORM, info._platform) @mock.patch.object(na_utils.OpenStackInfo, '_get_version_info_version', mock.Mock(return_value='')) @mock.patch.object(na_utils.OpenStackInfo, '_get_version_info_release', mock.Mock(return_value=None)) def test_no_info_from_version_info(self): info = na_utils.OpenStackInfo() info._update_info_from_version_info() self.assertEqual(self.UNKNOWN_VERSION, info._version) self.assertEqual(self.UNKNOWN_RELEASE, info._release) self.assertEqual(self.UNKNOWN_VENDOR, info._vendor) self.assertEqual(self.UNKNOWN_PLATFORM, info._platform) @mock.patch.object(na_utils.OpenStackInfo, '_get_version_info_version', mock.Mock(return_value=VERSION_INFO_VERSION)) @mock.patch.object(na_utils.OpenStackInfo, '_get_version_info_release', mock.Mock(side_effect=Exception)) def test_exception_in_info_from_version_info(self): info = na_utils.OpenStackInfo() info._update_info_from_version_info() self.assertEqual(self.VERSION_INFO_VERSION, info._version) self.assertEqual(self.UNKNOWN_RELEASE, info._release) self.assertEqual(self.UNKNOWN_VENDOR, info._vendor) self.assertEqual(self.UNKNOWN_PLATFORM, info._platform) @mock.patch.object(putils, 'execute', mock.Mock(return_value=PUTILS_RPM_RET_VAL)) def test_update_info_from_rpm(self): info = na_utils.OpenStackInfo() found_package = info._update_info_from_rpm() self.assertEqual(self.RPM_INFO_VERSION, info._version) self.assertEqual(self.RPM_INFO_RELEASE, info._release) self.assertEqual(self.RPM_INFO_VENDOR, info._vendor) self.assertEqual(self.UNKNOWN_PLATFORM, info._platform) self.assertTrue(found_package) @mock.patch.object(putils, 'execute', mock.Mock(return_value=NO_PKG_FOUND)) def test_update_info_from_rpm_no_pkg_found(self): info = na_utils.OpenStackInfo() found_package = info._update_info_from_rpm() self.assertEqual(self.UNKNOWN_VERSION, info._version) self.assertEqual(self.UNKNOWN_RELEASE, info._release) self.assertEqual(self.UNKNOWN_VENDOR, info._vendor) self.assertEqual(self.UNKNOWN_PLATFORM, info._platform) self.assertFalse(found_package) @mock.patch.object(putils, 'execute', mock.Mock(side_effect=Exception)) def test_exception_in_update_info_from_rpm(self): info = na_utils.OpenStackInfo() found_package = info._update_info_from_rpm() self.assertEqual(self.UNKNOWN_VERSION, info._version) self.assertEqual(self.UNKNOWN_RELEASE, info._release) self.assertEqual(self.UNKNOWN_VENDOR, info._vendor) self.assertEqual(self.UNKNOWN_PLATFORM, info._platform) self.assertFalse(found_package) @mock.patch.object(putils, 'execute', mock.Mock(return_value=PUTILS_DPKG_RET_VAL)) def test_update_info_from_dpkg(self): info = na_utils.OpenStackInfo() found_package = info._update_info_from_dpkg() self.assertEqual(self.UNKNOWN_VERSION, info._version) self.assertEqual(self.DEB_RLS, info._release) self.assertEqual(self.DEB_VENDOR, info._vendor) self.assertEqual(self.UNKNOWN_PLATFORM, info._platform) self.assertTrue(found_package) @mock.patch.object(putils, 'execute', mock.Mock(return_value=NO_PKG_FOUND)) def test_update_info_from_dpkg_no_pkg_found(self): info = na_utils.OpenStackInfo() found_package = info._update_info_from_dpkg() self.assertEqual(self.UNKNOWN_VERSION, info._version) self.assertEqual(self.UNKNOWN_RELEASE, info._release) self.assertEqual(self.UNKNOWN_VENDOR, info._vendor) self.assertEqual(self.UNKNOWN_PLATFORM, info._platform) self.assertFalse(found_package) @mock.patch.object(putils, 'execute', mock.Mock(side_effect=Exception)) def test_exception_in_update_info_from_dpkg(self): info = na_utils.OpenStackInfo() found_package = info._update_info_from_dpkg() self.assertEqual(self.UNKNOWN_VERSION, info._version) self.assertEqual(self.UNKNOWN_RELEASE, info._release) self.assertEqual(self.UNKNOWN_VENDOR, info._vendor) self.assertEqual(self.UNKNOWN_PLATFORM, info._platform) self.assertFalse(found_package) @mock.patch.object(na_utils.OpenStackInfo, '_update_version_from_version_string', mock.Mock()) @mock.patch.object(na_utils.OpenStackInfo, '_update_release_from_release_string', mock.Mock()) @mock.patch.object(na_utils.OpenStackInfo, '_update_platform', mock.Mock()) @mock.patch.object(na_utils.OpenStackInfo, '_update_info_from_version_info', mock.Mock()) @mock.patch.object(na_utils.OpenStackInfo, '_update_info_from_rpm', mock.Mock(return_value=True)) @mock.patch.object(na_utils.OpenStackInfo, '_update_info_from_dpkg') def test_update_openstack_info_rpm_pkg_found(self, mock_updt_from_dpkg): info = na_utils.OpenStackInfo() info._update_openstack_info() self.assertFalse(mock_updt_from_dpkg.called) @mock.patch.object(na_utils.OpenStackInfo, '_update_version_from_version_string', mock.Mock()) @mock.patch.object(na_utils.OpenStackInfo, '_update_release_from_release_string', mock.Mock()) @mock.patch.object(na_utils.OpenStackInfo, '_update_platform', mock.Mock()) @mock.patch.object(na_utils.OpenStackInfo, '_update_info_from_version_info', mock.Mock()) @mock.patch.object(na_utils.OpenStackInfo, '_update_info_from_rpm', mock.Mock(return_value=False)) @mock.patch.object(na_utils.OpenStackInfo, '_update_info_from_dpkg') def test_update_openstack_info_rpm_pkg_not_found(self, mock_updt_from_dpkg): info = na_utils.OpenStackInfo() info._update_openstack_info() self.assertTrue(mock_updt_from_dpkg.called) @ddt.ddt class DataCacheTestCase(test.TestCase): def setUp(self): super(DataCacheTestCase, self).setUp() self.cache = na_utils.DataCache(60) self.cache._stop_watch = mock.Mock() @ddt.data(True, False) def test_is_expired(self, is_expired): not_expired = not is_expired self.mock_object( self.cache._stop_watch, 'has_started', mock.Mock(return_value=not_expired)) self.mock_object( self.cache._stop_watch, 'expired', mock.Mock(return_value=is_expired)) self.assertEqual(is_expired, self.cache.is_expired()) def test_get_data(self): fake_data = 10 self.cache._cached_data = fake_data self.assertEqual(fake_data, self.cache.get_data()) @ddt.data(True, False) def test_update_data(self, started): self.mock_object( self.cache._stop_watch, 'has_started', mock.Mock(return_value=started)) mock_start = self.mock_object(self.cache._stop_watch, 'start', mock.Mock()) mock_restart = self.mock_object(self.cache._stop_watch, 'restart', mock.Mock()) fake_data = 10 self.cache.update_data(fake_data) self.assertEqual(self.cache._cached_data, fake_data) if not started: mock_start.assert_called_once() else: mock_restart.assert_called_once() ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315602.0336702 manila-21.0.0/manila/tests/share/drivers/nexenta/0000775000175000017500000000000000000000000021703 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/nexenta/__init__.py0000664000175000017500000000000000000000000024002 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315602.0336702 manila-21.0.0/manila/tests/share/drivers/nexenta/ns4/0000775000175000017500000000000000000000000022407 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/nexenta/ns4/__init__.py0000664000175000017500000000000000000000000024506 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/nexenta/ns4/test_jsonrpc.py0000664000175000017500000000251600000000000025502 0ustar00zuulzuul00000000000000# Copyright 2016 Nexenta Systems, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from oslo_serialization import jsonutils import requests from manila import exception from manila.share.drivers.nexenta.ns4 import jsonrpc from manila import test class TestNexentaJSONProxy(test.TestCase): @mock.patch('requests.post') def test_call(self, post): nms_post = jsonrpc.NexentaJSONProxy( 'http', '1.1.1.1', '8080', 'user', 'pass', 'obj', auto=False, method='get') data = {'error': {'message': 'some_error'}} post.return_value = requests.Response() post.return_value.__setstate__({ 'status_code': 500, '_content': jsonutils.dumps(data)}) self.assertRaises(exception.NexentaException, nms_post) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/nexenta/ns4/test_nexenta_nas.py0000664000175000017500000005421200000000000026327 0ustar00zuulzuul00000000000000# Copyright 2016 Nexenta Systems, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import base64 import json from unittest import mock from oslo_serialization import jsonutils from oslo_utils import units from manila import context from manila import exception from manila.share import configuration as conf from manila.share.drivers.nexenta.ns4 import nexenta_nas from manila import test PATH_TO_RPC = 'requests.post' CODE = mock.PropertyMock(return_value=200) class FakeResponse(object): def __init__(self, response={}): self.content = json.dumps(response) super(FakeResponse, self).__init__() def close(self): pass class RequestParams(object): def __init__(self, scheme, host, port, path, user, password): self.scheme = scheme.lower() self.host = host self.port = port self.path = path self.user = user self.password = password @property def url(self): return '%s://%s:%s%s' % (self.scheme, self.host, self.port, self.path) @property def headers(self): auth = base64.b64encode( ('%s:%s' % (self.user, self.password)).encode('utf-8')) headers = { 'Content-Type': 'application/json', 'Authorization': 'Basic %s' % auth, } return headers def build_post_args(self, obj, method, *args): data = jsonutils.dumps({ 'object': obj, 'method': method, 'params': args, }) return data class TestNexentaNasDriver(test.TestCase): def _get_share_path(self, share_name): return '%s/%s/%s' % (self.volume, self.share, share_name) def setUp(self): def _safe_get(opt): return getattr(self.cfg, opt) self.cfg = mock.Mock(spec=conf.Configuration) self.cfg.nexenta_nas_host = '1.1.1.1' super(TestNexentaNasDriver, self).setUp() self.ctx = context.get_admin_context() self.cfg.safe_get = mock.Mock(side_effect=_safe_get) self.cfg.nexenta_rest_port = 1000 self.cfg.reserved_share_percentage = 0 self.cfg.reserved_share_from_snapshot_percentage = 0 self.cfg.reserved_share_extend_percentage = 0 self.cfg.max_over_subscription_ratio = 0 self.cfg.nexenta_rest_protocol = 'auto' self.cfg.nexenta_volume = 'volume' self.cfg.nexenta_nfs_share = 'nfs_share' self.cfg.nexenta_user = 'user' self.cfg.nexenta_password = 'password' self.cfg.nexenta_thin_provisioning = False self.cfg.enabled_share_protocols = 'NFS' self.cfg.nexenta_mount_point_base = '$state_path/mnt' self.cfg.share_backend_name = 'NexentaStor' self.cfg.nexenta_dataset_compression = 'on' self.cfg.nexenta_smb = 'on' self.cfg.nexenta_nfs = 'on' self.cfg.nexenta_dataset_dedupe = 'on' self.cfg.network_config_group = 'DEFAULT' self.cfg.admin_network_config_group = ( 'fake_admin_network_config_group') self.cfg.driver_handles_share_servers = False self.request_params = RequestParams( 'http', self.cfg.nexenta_nas_host, self.cfg.nexenta_rest_port, '/rest/nms/', self.cfg.nexenta_user, self.cfg.nexenta_password) self.drv = nexenta_nas.NexentaNasDriver(configuration=self.cfg) self.drv.do_setup(self.ctx) self.volume = self.cfg.nexenta_volume self.share = self.cfg.nexenta_nfs_share @mock.patch(PATH_TO_RPC) def test_check_for_setup_error__volume_doesnt_exist(self, post): post.return_value = FakeResponse() self.assertRaises( exception.NexentaException, self.drv.check_for_setup_error) @mock.patch(PATH_TO_RPC) def test_check_for_setup_error__folder_doesnt_exist(self, post): folder = '%s/%s' % (self.volume, self.share) create_folder_props = { 'recordsize': '4K', 'quota': '1G', 'compression': self.cfg.nexenta_dataset_compression, 'sharesmb': self.cfg.nexenta_smb, 'sharenfs': self.cfg.nexenta_nfs, } share_opts = { 'read_write': '*', 'read_only': '', 'root': 'nobody', 'extra_options': 'anon=0', 'recursive': 'true', 'anonymous_rw': 'true', } def my_side_effect(*args, **kwargs): if kwargs['data'] == self.request_params.build_post_args( 'volume', 'object_exists', self.volume): return FakeResponse({'result': 'OK'}) elif kwargs['data'] == self.request_params.build_post_args( 'folder', 'object_exists', folder): return FakeResponse() elif kwargs['data'] == self.request_params.build_post_args( 'folder', 'create_with_props', self.volume, self.share, create_folder_props): return FakeResponse() elif kwargs['data'] == self.request_params.build_post_args( 'netstorsvc', 'share_folder', 'svc:/network/nfs/server:default', folder, share_opts): return FakeResponse() else: raise exception.ManilaException('Unexpected request') post.side_effect = my_side_effect self.assertRaises( exception.ManilaException, self.drv.check_for_setup_error) post.assert_any_call( self.request_params.url, data=self.request_params.build_post_args( 'volume', 'object_exists', self.volume), headers=self.request_params.headers, timeout=60) post.assert_any_call( self.request_params.url, data=self.request_params.build_post_args( 'folder', 'object_exists', folder), headers=self.request_params.headers, timeout=60) @mock.patch(PATH_TO_RPC) def test_create_share(self, post): share = { 'name': 'share', 'size': 1, 'share_proto': self.cfg.enabled_share_protocols } self.cfg.nexenta_thin_provisioning = False path = '%s/%s/%s' % (self.volume, self.share, share['name']) location = {'path': '%s:/volumes/%s' % ( self.cfg.nexenta_nas_host, path)} post.return_value = FakeResponse() self.assertEqual([location], self.drv.create_share(self.ctx, share)) @mock.patch(PATH_TO_RPC) def test_create_share__wrong_proto(self, post): share = { 'name': 'share', 'size': 1, 'share_proto': 'A_VERY_WRONG_PROTO' } post.return_value = FakeResponse() self.assertRaises(exception.InvalidShare, self.drv.create_share, self.ctx, share) @mock.patch(PATH_TO_RPC) def test_create_share__thin_provisioning(self, post): share = {'name': 'share', 'size': 1, 'share_proto': self.cfg.enabled_share_protocols} create_folder_props = { 'recordsize': '4K', 'quota': '1G', 'compression': self.cfg.nexenta_dataset_compression, } parent_path = '%s/%s' % (self.volume, self.share) post.return_value = FakeResponse() self.cfg.nexenta_thin_provisioning = True self.drv.create_share(self.ctx, share) post.assert_called_with( self.request_params.url, data=self.request_params.build_post_args( 'folder', 'create_with_props', parent_path, share['name'], create_folder_props), headers=self.request_params.headers, timeout=60) @mock.patch(PATH_TO_RPC) def test_create_share__thick_provisioning(self, post): share = { 'name': 'share', 'size': 1, 'share_proto': self.cfg.enabled_share_protocols } quota = '%sG' % share['size'] create_folder_props = { 'recordsize': '4K', 'quota': quota, 'compression': self.cfg.nexenta_dataset_compression, 'reservation': quota, } parent_path = '%s/%s' % (self.volume, self.share) post.return_value = FakeResponse() self.cfg.nexenta_thin_provisioning = False self.drv.create_share(self.ctx, share) post.assert_called_with( self.request_params.url, data=self.request_params.build_post_args( 'folder', 'create_with_props', parent_path, share['name'], create_folder_props), headers=self.request_params.headers, timeout=60) @mock.patch(PATH_TO_RPC) def test_create_share_from_snapshot(self, post): share = { 'name': 'share', 'size': 1, 'share_proto': self.cfg.enabled_share_protocols } snapshot = {'name': 'sn1', 'share_name': share['name']} post.return_value = FakeResponse() path = '%s/%s/%s' % (self.volume, self.share, share['name']) location = {'path': '%s:/volumes/%s' % ( self.cfg.nexenta_nas_host, path)} snapshot_name = '%s/%s/%s@%s' % ( self.volume, self.share, snapshot['share_name'], snapshot['name']) self.assertEqual([location], self.drv.create_share_from_snapshot( self.ctx, share, snapshot)) post.assert_any_call( self.request_params.url, data=self.request_params.build_post_args( 'folder', 'clone', snapshot_name, '%s/%s/%s' % (self.volume, self.share, share['name'])), headers=self.request_params.headers, timeout=60) @mock.patch(PATH_TO_RPC) def test_delete_share(self, post): share = { 'name': 'share', 'size': 1, 'share_proto': self.cfg.enabled_share_protocols } post.return_value = FakeResponse() folder = '%s/%s/%s' % (self.volume, self.share, share['name']) self.drv.delete_share(self.ctx, share) post.assert_any_call( self.request_params.url, data=self.request_params.build_post_args( 'folder', 'destroy', folder.strip(), '-r'), headers=self.request_params.headers, timeout=60) @mock.patch(PATH_TO_RPC) def test_delete_share__exists_error(self, post): share = { 'name': 'share', 'size': 1, 'share_proto': self.cfg.enabled_share_protocols } post.return_value = FakeResponse() post.side_effect = exception.NexentaException('does not exist') self.drv.delete_share(self.ctx, share) @mock.patch(PATH_TO_RPC) def test_delete_share__some_error(self, post): share = { 'name': 'share', 'size': 1, 'share_proto': self.cfg.enabled_share_protocols } post.return_value = FakeResponse() post.side_effect = exception.ManilaException('Some error') self.assertRaises( exception.ManilaException, self.drv.delete_share, self.ctx, share) @mock.patch(PATH_TO_RPC) def test_extend_share__thin_provisoning(self, post): share = { 'name': 'share', 'size': 1, 'share_proto': self.cfg.enabled_share_protocols } new_size = 5 quota = '%sG' % new_size post.return_value = FakeResponse() self.cfg.nexenta_thin_provisioning = True self.drv.extend_share(share, new_size) post.assert_called_with( self.request_params.url, data=self.request_params.build_post_args( 'folder', 'set_child_prop', '%s/%s/%s' % (self.volume, self.share, share['name']), 'quota', quota), headers=self.request_params.headers, timeout=60) @mock.patch(PATH_TO_RPC) def test_extend_share__thick_provisoning(self, post): share = { 'name': 'share', 'size': 1, 'share_proto': self.cfg.enabled_share_protocols } new_size = 5 post.return_value = FakeResponse() self.cfg.nexenta_thin_provisioning = False self.drv.extend_share(share, new_size) post.assert_not_called() @mock.patch(PATH_TO_RPC) def test_create_snapshot(self, post): snapshot = {'share_name': 'share', 'name': 'share@first'} post.return_value = FakeResponse() folder = '%s/%s/%s' % (self.volume, self.share, snapshot['share_name']) self.drv.create_snapshot(self.ctx, snapshot) post.assert_called_with( self.request_params.url, data=self.request_params.build_post_args( 'folder', 'create_snapshot', folder, snapshot['name'], '-r'), headers=self.request_params.headers, timeout=60) @mock.patch(PATH_TO_RPC) def test_delete_snapshot(self, post): snapshot = {'share_name': 'share', 'name': 'share@first'} post.return_value = FakeResponse() self.drv.delete_snapshot(self.ctx, snapshot) post.assert_called_with( self.request_params.url, data=self.request_params.build_post_args( 'snapshot', 'destroy', '%s@%s' % ( self._get_share_path(snapshot['share_name']), snapshot['name']), ''), headers=self.request_params.headers, timeout=60) @mock.patch(PATH_TO_RPC) def test_delete_snapshot__nexenta_error_1(self, post): snapshot = {'share_name': 'share', 'name': 'share@first'} post.return_value = FakeResponse() post.side_effect = exception.NexentaException('does not exist') self.drv.delete_snapshot(self.ctx, snapshot) @mock.patch(PATH_TO_RPC) def test_delete_snapshot__nexenta_error_2(self, post): snapshot = {'share_name': 'share', 'name': 'share@first'} post.return_value = FakeResponse() post.side_effect = exception.NexentaException('has dependent clones') self.drv.delete_snapshot(self.ctx, snapshot) @mock.patch(PATH_TO_RPC) def test_delete_snapshot__some_error(self, post): snapshot = {'share_name': 'share', 'name': 'share@first'} post.return_value = FakeResponse() post.side_effect = exception.ManilaException('Some error') self.assertRaises(exception.ManilaException, self.drv.delete_snapshot, self.ctx, snapshot) @mock.patch(PATH_TO_RPC) def test_update_access__unsupported_access_type(self, post): share = { 'name': 'share', 'share_proto': self.cfg.enabled_share_protocols } access = { 'access_type': 'group', 'access_to': 'ordinary_users', 'access_level': 'rw' } self.assertRaises(exception.InvalidShareAccess, self.drv.update_access, self.ctx, share, [access], None, None, None) @mock.patch(PATH_TO_RPC) def test_update_access__cidr(self, post): share = { 'name': 'share', 'share_proto': self.cfg.enabled_share_protocols } access1 = { 'access_type': 'ip', 'access_to': '1.1.1.1/24', 'access_level': 'rw' } access2 = { 'access_type': 'ip', 'access_to': '1.2.3.4', 'access_level': 'rw' } access_rules = [access1, access2] share_opts = { 'auth_type': 'none', 'read_write': '%s:%s' % ( access1['access_to'], access2['access_to']), 'read_only': '', 'recursive': 'true', 'anonymous_rw': 'true', 'anonymous': 'true', 'extra_options': 'anon=0', } def my_side_effect(*args, **kwargs): if kwargs['data'] == self.request_params.build_post_args( 'netstorsvc', 'share_folder', 'svc:/network/nfs/server:default', self._get_share_path(share['name']), share_opts): return FakeResponse() else: raise exception.ManilaException('Unexpected request') post.return_value = FakeResponse() post.side_effect = my_side_effect self.drv.update_access(self.ctx, share, access_rules, None, None, None) post.assert_called_with( self.request_params.url, data=self.request_params.build_post_args( 'netstorsvc', 'share_folder', 'svc:/network/nfs/server:default', self._get_share_path(share['name']), share_opts), headers=self.request_params.headers, timeout=60) self.assertRaises(exception.ManilaException, self.drv.update_access, self.ctx, share, [access1, {'access_type': 'ip', 'access_to': '2.2.2.2', 'access_level': 'rw'}], None, None, None) @mock.patch(PATH_TO_RPC) def test_update_access__add_one_ip_to_empty_access_list(self, post): share = {'name': 'share', 'share_proto': self.cfg.enabled_share_protocols} access = { 'access_type': 'ip', 'access_to': '1.1.1.1', 'access_level': 'rw' } rw_list = None share_opts = { 'auth_type': 'none', 'read_write': access['access_to'], 'read_only': '', 'recursive': 'true', 'anonymous_rw': 'true', 'anonymous': 'true', 'extra_options': 'anon=0', } def my_side_effect(*args, **kwargs): if kwargs['data'] == self.request_params.build_post_args( 'netstorsvc', 'get_shareopts', 'svc:/network/nfs/server:default', self._get_share_path(share['name'])): return FakeResponse({'result': {'read_write': rw_list}}) elif kwargs['data'] == self.request_params.build_post_args( 'netstorsvc', 'share_folder', 'svc:/network/nfs/server:default', self._get_share_path(share['name']), share_opts): return FakeResponse() else: raise exception.ManilaException('Unexpected request') post.return_value = FakeResponse() self.drv.update_access(self.ctx, share, [access], None, None, None) post.assert_called_with( self.request_params.url, data=self.request_params.build_post_args( 'netstorsvc', 'share_folder', 'svc:/network/nfs/server:default', self._get_share_path(share['name']), share_opts), headers=self.request_params.headers, timeout=60) post.side_effect = my_side_effect self.assertRaises(exception.ManilaException, self.drv.update_access, self.ctx, share, [{'access_type': 'ip', 'access_to': '1111', 'access_level': 'rw'}], None, None, None) @mock.patch(PATH_TO_RPC) def test_deny_access__unsupported_access_type(self, post): share = {'name': 'share', 'share_proto': self.cfg.enabled_share_protocols} access = { 'access_type': 'group', 'access_to': 'ordinary_users', 'access_level': 'rw' } self.assertRaises(exception.InvalidShareAccess, self.drv.update_access, self.ctx, share, [access], None, None, None) def test_share_backend_name(self): self.assertEqual('NexentaStor', self.drv.share_backend_name) @mock.patch(PATH_TO_RPC) def test_get_capacity_info(self, post): post.return_value = FakeResponse({'result': { 'available': 9 * units.Gi, 'used': 1 * units.Gi}}) self.assertEqual( (10, 9, 1), self.drv.helper._get_capacity_info()) @mock.patch('manila.share.drivers.nexenta.ns4.nexenta_nfs_helper.' 'NFSHelper._get_capacity_info') @mock.patch('manila.share.driver.ShareDriver._update_share_stats') def test_update_share_stats(self, super_stats, info): info.return_value = (100, 90, 10) stats = { 'vendor_name': 'Nexenta', 'storage_protocol': 'NFS', 'nfs_mount_point_base': self.cfg.nexenta_mount_point_base, 'driver_version': '1.0', 'share_backend_name': self.cfg.share_backend_name, 'pools': [{ 'total_capacity_gb': 100, 'free_capacity_gb': 90, 'pool_name': 'volume', 'reserved_percentage': ( self.cfg.reserved_share_percentage), 'reserved_snapshot_percentage': ( self.cfg.reserved_share_from_snapshot_percentage), 'reserved_share_extend_percentage': ( self.cfg.reserved_share_extend_percentage), 'compression': True, 'dedupe': True, 'thin_provisioning': self.cfg.nexenta_thin_provisioning, 'max_over_subscription_ratio': ( self.cfg.safe_get( 'max_over_subscription_ratio')), }], } self.drv._update_share_stats() self.assertEqual(stats, self.drv._stats) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315602.0336702 manila-21.0.0/manila/tests/share/drivers/nexenta/ns5/0000775000175000017500000000000000000000000022410 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/nexenta/ns5/__init__.py0000664000175000017500000000000000000000000024507 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/nexenta/ns5/test_jsonrpc.py0000664000175000017500000012440000000000000025500 0ustar00zuulzuul00000000000000# Copyright 2019 Nexenta by DDN, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Unit tests for NexentaStor 5 REST API helper """ import copy import hashlib import json import posixpath from unittest import mock from urllib import parse as urlparse import uuid import requests from manila.share import configuration as conf from manila.share.drivers.nexenta.ns5 import jsonrpc from manila import test class FakeNefProxy(object): def __init__(self): self.scheme = 'https' self.port = 8443 self.hosts = ['1.1.1.1', '2.2.2.2'] self.host = self.hosts[0] self.root = 'pool/share' self.username = 'username' self.password = 'password' self.retries = 3 self.timeout = 5 self.session = mock.Mock() self.session.headers = {} def __getattr__(self, name): pass def delay(self, interval): pass def delete_bearer(self): pass def update_lock(self): pass def update_token(self, token): pass def update_host(self, host): pass def url(self, path): return '%s://%s:%s/%s' % (self.scheme, self.host, self.port, path) class TestNefException(test.TestCase): def test_message(self): message = 'test message 1' result = jsonrpc.NefException(message) self.assertIn(message, result.msg) def test_message_kwargs(self): code = 'EAGAIN' message = 'test message 2' result = jsonrpc.NefException(message, code=code) self.assertEqual(code, result.code) self.assertIn(message, result.msg) def test_no_message_kwargs(self): code = 'ESRCH' message = 'test message 3' result = jsonrpc.NefException(None, code=code, message=message) self.assertEqual(code, result.code) self.assertIn(message, result.msg) def test_message_plus_kwargs(self): code = 'ENODEV' message1 = 'test message 4' message2 = 'test message 5' result = jsonrpc.NefException(message1, code=code, message=message2) self.assertEqual(code, result.code) self.assertIn(message2, result.msg) def test_dict(self): code = 'ENOENT' message = 'test message 4' result = jsonrpc.NefException({'code': code, 'message': message}) self.assertEqual(code, result.code) self.assertIn(message, result.msg) def test_kwargs(self): code = 'EPERM' message = 'test message 5' result = jsonrpc.NefException(code=code, message=message) self.assertEqual(code, result.code) self.assertIn(message, result.msg) def test_dict_kwargs(self): code = 'EINVAL' message = 'test message 6' result = jsonrpc.NefException({'code': code}, message=message) self.assertEqual(code, result.code) self.assertIn(message, result.msg) def test_defaults(self): code = 'EBADMSG' message = 'NexentaError' result = jsonrpc.NefException() self.assertEqual(code, result.code) self.assertIn(message, result.msg) class TestNefRequest(test.TestCase): def setUp(self): super(TestNefRequest, self).setUp() self.proxy = FakeNefProxy() def fake_response(self, method, path, payload, code, content): request = requests.PreparedRequest() request.method = method request.url = self.proxy.url(path) request.headers = {'Content-Type': 'application/json'} request.body = None if method in ['get', 'delete']: request.params = payload elif method in ['put', 'post']: request.data = json.dumps(payload) response = requests.Response() response.request = request response.status_code = code response._content = json.dumps(content) if content else '' return response def test___call___invalid_method(self): method = 'unsupported' instance = jsonrpc.NefRequest(self.proxy, method) path = 'parent/child' self.assertRaises(jsonrpc.NefException, instance, path) def test___call___none_path(self): method = 'get' instance = jsonrpc.NefRequest(self.proxy, method) self.assertRaises(jsonrpc.NefException, instance, None) def test___call___empty_path(self): method = 'get' instance = jsonrpc.NefRequest(self.proxy, method) self.assertRaises(jsonrpc.NefException, instance, '') @mock.patch('manila.share.drivers.nexenta.ns5.' 'jsonrpc.NefRequest.request') def test___call___get(self, request): method = 'get' instance = jsonrpc.NefRequest(self.proxy, method) path = 'parent/child' payload = {} content = {'name': 'snapshot'} response = self.fake_response(method, path, payload, 200, content) request.return_value = response result = instance(path, payload) request.assert_called_with(method, path) self.assertEqual(content, result) @mock.patch('manila.share.drivers.nexenta.ns5.' 'jsonrpc.NefRequest.request') def test___call___get_payload(self, request): method = 'get' instance = jsonrpc.NefRequest(self.proxy, method) path = 'parent/child' payload = {'key': 'value'} content = {'name': 'snapshot'} response = self.fake_response(method, path, payload, 200, content) request.return_value = response result = instance(path, payload) params = {'params': payload} request.assert_called_with(method, path, **params) self.assertEqual(content, result) @mock.patch('manila.share.drivers.nexenta.ns5.' 'jsonrpc.NefRequest.request') def test___call___get_data_payload(self, request): method = 'get' instance = jsonrpc.NefRequest(self.proxy, method) path = 'parent/child' payload = {'key': 'value'} data = [ { 'name': 'fs1', 'path': 'pool/fs1' }, { 'name': 'fs2', 'path': 'pool/fs2' } ] content = {'data': data} response = self.fake_response(method, path, payload, 200, content) request.return_value = response instance.data = data result = instance(path, payload) params = {'params': payload} request.assert_called_with(method, path, **params) self.assertEqual(data, result) def test___call___get_invalid_payload(self): method = 'get' instance = jsonrpc.NefRequest(self.proxy, method) path = 'parent/child' payload = 'bad data' self.assertRaises(jsonrpc.NefException, instance, path, payload) @mock.patch('manila.share.drivers.nexenta.ns5.' 'jsonrpc.NefRequest.request') def test___call___delete(self, request): method = 'delete' instance = jsonrpc.NefRequest(self.proxy, method) path = 'parent/child' payload = {} content = {'name': 'snapshot'} response = self.fake_response(method, path, payload, 200, content) request.return_value = response result = instance(path, payload) request.assert_called_with(method, path) self.assertEqual(content, result) @mock.patch('manila.share.drivers.nexenta.ns5.' 'jsonrpc.NefRequest.request') def test___call___delete_payload(self, request): method = 'delete' instance = jsonrpc.NefRequest(self.proxy, method) path = 'parent/child' payload = {'key': 'value'} content = {'name': 'snapshot'} response = self.fake_response(method, path, payload, 200, content) request.return_value = response result = instance(path, payload) params = {'params': payload} request.assert_called_with(method, path, **params) self.assertEqual(content, result) def test___call___delete_invalid_payload(self): method = 'delete' instance = jsonrpc.NefRequest(self.proxy, method) path = 'parent/child' payload = 'bad data' self.assertRaises(jsonrpc.NefException, instance, path, payload) @mock.patch('manila.share.drivers.nexenta.ns5.' 'jsonrpc.NefRequest.request') def test___call___post(self, request): method = 'post' instance = jsonrpc.NefRequest(self.proxy, method) path = 'parent/child' payload = {} content = None response = self.fake_response(method, path, payload, 200, content) request.return_value = response result = instance(path, payload) request.assert_called_with(method, path) self.assertEqual(content, result) @mock.patch('manila.share.drivers.nexenta.ns5.' 'jsonrpc.NefRequest.request') def test___call___post_payload(self, request): method = 'post' instance = jsonrpc.NefRequest(self.proxy, method) path = 'parent/child' payload = {'key': 'value'} content = None response = self.fake_response(method, path, payload, 200, content) request.return_value = response result = instance(path, payload) params = {'data': json.dumps(payload)} request.assert_called_with(method, path, **params) self.assertEqual(content, result) def test___call___post_invalid_payload(self): method = 'post' instance = jsonrpc.NefRequest(self.proxy, method) path = 'parent/child' payload = 'bad data' self.assertRaises(jsonrpc.NefException, instance, path, payload) @mock.patch('manila.share.drivers.nexenta.ns5.' 'jsonrpc.NefRequest.request') def test___call___put(self, request): method = 'put' instance = jsonrpc.NefRequest(self.proxy, method) path = 'parent/child' payload = {} content = None response = self.fake_response(method, path, payload, 200, content) request.return_value = response result = instance(path, payload) request.assert_called_with(method, path) self.assertEqual(content, result) @mock.patch('manila.share.drivers.nexenta.ns5.' 'jsonrpc.NefRequest.request') def test___call___put_payload(self, request): method = 'put' instance = jsonrpc.NefRequest(self.proxy, method) path = 'parent/child' payload = {'key': 'value'} content = None response = self.fake_response(method, path, payload, 200, content) request.return_value = response result = instance(path, payload) params = {'data': json.dumps(payload)} request.assert_called_with(method, path, **params) self.assertEqual(content, result) def test___call___put_invalid_payload(self): method = 'put' instance = jsonrpc.NefRequest(self.proxy, method) path = 'parent/child' payload = 'bad data' self.assertRaises(jsonrpc.NefException, instance, path, payload) @mock.patch('manila.share.drivers.nexenta.ns5.' 'jsonrpc.NefRequest.request') def test___call___non_ok_response(self, request): method = 'get' instance = jsonrpc.NefRequest(self.proxy, method) path = 'parent/child' payload = {'key': 'value'} content = {'code': 'ENOENT', 'message': 'error'} response = self.fake_response(method, path, payload, 500, content) request.return_value = response self.assertRaises(jsonrpc.NefException, instance, path, payload) @mock.patch('manila.share.drivers.nexenta.ns5.' 'jsonrpc.NefRequest.failover') @mock.patch('manila.share.drivers.nexenta.ns5.' 'jsonrpc.NefRequest.request') def test___call___request_after_failover(self, request, failover): method = 'post' instance = jsonrpc.NefRequest(self.proxy, method) path = 'parent/child' payload = {'key': 'value'} content = None response = self.fake_response(method, path, payload, 200, content) request.side_effect = [requests.exceptions.Timeout, response] failover.return_value = True result = instance(path, payload) params = {'data': json.dumps(payload)} request.assert_called_with(method, path, **params) self.assertEqual(content, result) @mock.patch('manila.share.drivers.nexenta.ns5.' 'jsonrpc.NefRequest.failover') @mock.patch('manila.share.drivers.nexenta.ns5.' 'jsonrpc.NefRequest.request') def test___call___request_failover_error(self, request, failover): method = 'put' instance = jsonrpc.NefRequest(self.proxy, method) path = 'parent/child' payload = {'key': 'value'} request.side_effect = requests.exceptions.Timeout failover.return_value = False self.assertRaises(requests.exceptions.Timeout, instance, path, payload) def test_hook_default(self): method = 'post' instance = jsonrpc.NefRequest(self.proxy, method) path = 'parent/child' payload = {'key': 'value'} content = {'name': 'dataset'} response = self.fake_response(method, path, payload, 303, content) result = instance.hook(response) self.assertEqual(response, result) def test_hook_200_empty(self): method = 'delete' instance = jsonrpc.NefRequest(self.proxy, method) path = 'storage/filesystems' payload = {'force': True} content = None response = self.fake_response(method, path, payload, 200, content) result = instance.hook(response) self.assertEqual(response, result) def test_hook_201_empty(self): method = 'post' instance = jsonrpc.NefRequest(self.proxy, method) path = 'storage/snapshots' payload = {'path': 'parent/child@name'} content = None response = self.fake_response(method, path, payload, 201, content) result = instance.hook(response) self.assertEqual(response, result) def test_hook_500_empty(self): method = 'get' instance = jsonrpc.NefRequest(self.proxy, method) path = 'storage/pools' payload = {'poolName': 'tank'} content = None response = self.fake_response(method, path, payload, 500, content) self.assertRaises(jsonrpc.NefException, instance.hook, response) def test_hook_200_bad_content(self): method = 'get' instance = jsonrpc.NefRequest(self.proxy, method) path = 'storage/volumes' payload = {'name': 'test'} content = None response = self.fake_response(method, path, payload, 200, content) response._content = 'bad_content' self.assertRaises(jsonrpc.NefException, instance.hook, response) @mock.patch('manila.share.drivers.nexenta.ns5.' 'jsonrpc.NefRequest.request') @mock.patch('manila.share.drivers.nexenta.ns5.' 'jsonrpc.NefRequest.auth') def test_hook_401(self, auth, request): method = 'get' instance = jsonrpc.NefRequest(self.proxy, method) path = 'parent/child' payload = {'key': 'value'} content = {'code': 'EAUTH'} response = self.fake_response(method, path, payload, 401, content) auth.return_value = True content2 = {'name': 'test'} response2 = self.fake_response(method, path, payload, 200, content2) request.return_value = response2 self.proxy.session.send.return_value = content2 result = instance.hook(response) self.assertEqual(content2, result) def test_hook_401_max_retries(self): method = 'get' instance = jsonrpc.NefRequest(self.proxy, method) instance.stat[401] = self.proxy.retries path = 'parent/child' payload = {'key': 'value'} content = {'code': 'EAUTH'} response = self.fake_response(method, path, payload, 401, content) self.assertRaises(jsonrpc.NefException, instance.hook, response) def test_hook_404_nested(self): method = 'get' instance = jsonrpc.NefRequest(self.proxy, method) instance.lock = True path = 'parent/child' payload = {'key': 'value'} content = {'code': 'ENOENT'} response = self.fake_response(method, path, payload, 404, content) result = instance.hook(response) self.assertEqual(response, result) def test_hook_404_max_retries(self): method = 'get' instance = jsonrpc.NefRequest(self.proxy, method) instance.stat[404] = self.proxy.retries path = 'parent/child' payload = {'key': 'value'} content = {'code': 'ENOENT'} response = self.fake_response(method, path, payload, 404, content) self.assertRaises(jsonrpc.NefException, instance.hook, response) @mock.patch('manila.share.drivers.nexenta.ns5.' 'jsonrpc.NefRequest.failover') def test_hook_404_failover_error(self, failover): method = 'get' instance = jsonrpc.NefRequest(self.proxy, method) path = 'parent/child' payload = {'key': 'value'} content = {'code': 'ENOENT'} response = self.fake_response(method, path, payload, 404, content) failover.return_value = False result = instance.hook(response) self.assertEqual(response, result) @mock.patch('manila.share.drivers.nexenta.ns5.' 'jsonrpc.NefRequest.request') @mock.patch('manila.share.drivers.nexenta.ns5.' 'jsonrpc.NefRequest.failover') def test_hook_404_failover_ok(self, failover, request): method = 'get' instance = jsonrpc.NefRequest(self.proxy, method) path = 'parent/child' payload = {'key': 'value'} content = {'code': 'ENOENT'} response = self.fake_response(method, path, payload, 404, content) failover.return_value = True content2 = {'name': 'test'} response2 = self.fake_response(method, path, payload, 200, content2) request.return_value = response2 result = instance.hook(response) self.assertEqual(response2, result) def test_hook_500_permanent(self): method = 'get' instance = jsonrpc.NefRequest(self.proxy, method) path = 'parent/child' payload = {'key': 'value'} content = {'code': 'EINVAL'} response = self.fake_response(method, path, payload, 500, content) self.assertRaises(jsonrpc.NefException, instance.hook, response) def test_hook_500_busy_max_retries(self): method = 'get' instance = jsonrpc.NefRequest(self.proxy, method) instance.stat[500] = self.proxy.retries path = 'parent/child' payload = {'key': 'value'} content = {'code': 'EBUSY'} response = self.fake_response(method, path, payload, 500, content) self.assertRaises(jsonrpc.NefException, instance.hook, response) @mock.patch('manila.share.drivers.nexenta.ns5.' 'jsonrpc.NefRequest.request') def test_hook_500_busy_ok(self, request): method = 'get' instance = jsonrpc.NefRequest(self.proxy, method) path = 'parent/child' payload = {'key': 'value'} content = {'code': 'EBUSY'} response = self.fake_response(method, path, payload, 500, content) content2 = {'name': 'test'} response2 = self.fake_response(method, path, payload, 200, content2) request.return_value = response2 result = instance.hook(response) self.assertEqual(response2, result) def test_hook_201_no_monitor(self): method = 'get' instance = jsonrpc.NefRequest(self.proxy, method) path = 'parent/child' payload = {'key': 'value'} content = {'monitor': 'unknown'} response = self.fake_response(method, path, payload, 202, content) self.assertRaises(jsonrpc.NefException, instance.hook, response) @mock.patch('manila.share.drivers.nexenta.ns5.' 'jsonrpc.NefRequest.request') def test_hook_201_ok(self, request): method = 'delete' instance = jsonrpc.NefRequest(self.proxy, method) path = 'parent/child' payload = {'key': 'value'} content = { 'links': [{ 'rel': 'monitor', 'href': '/jobStatus/jobID' }] } response = self.fake_response(method, path, payload, 202, content) content2 = None response2 = self.fake_response(method, path, payload, 201, content2) request.return_value = response2 result = instance.hook(response) self.assertEqual(response2, result) def test_200_no_data(self): method = 'get' instance = jsonrpc.NefRequest(self.proxy, method) path = 'parent/child' payload = {'key': 'value'} content = {'name': 'test'} response = self.fake_response(method, path, payload, 200, content) result = instance.hook(response) self.assertEqual(response, result) def test_200_pagination_end(self): method = 'get' instance = jsonrpc.NefRequest(self.proxy, method) path = 'parent/child' payload = {'key': 'value'} content = {'data': 'value'} response = self.fake_response(method, path, payload, 200, content) result = instance.hook(response) self.assertEqual(response, result) @mock.patch('manila.share.drivers.nexenta.ns5.' 'jsonrpc.NefRequest.request') def test_200_pagination_next(self, request): method = 'get' instance = jsonrpc.NefRequest(self.proxy, method) path = 'parent/child' payload = {'key': 'value'} content = { 'data': [{ 'name': 'test' }], 'links': [{ 'rel': 'next', 'href': path }] } response = self.fake_response(method, path, payload, 200, content) response2 = self.fake_response(method, path, payload, 200, content) request.return_value = response2 result = instance.hook(response) self.assertEqual(response2, result) def test_request(self): method = 'get' instance = jsonrpc.NefRequest(self.proxy, method) path = 'parent/child' payload = {'key': 'value'} expected = {'name': 'dataset'} url = self.proxy.url(path) kwargs = payload.copy() kwargs['timeout'] = self.proxy.timeout kwargs['hooks'] = {'response': instance.hook} self.proxy.session.request.return_value = expected result = instance.request(method, path, **payload) self.proxy.session.request.assert_called_with(method, url, **kwargs) self.assertEqual(expected, result) @mock.patch('manila.share.drivers.nexenta.ns5.' 'jsonrpc.NefRequest.request') def test_auth(self, request): method = 'get' instance = jsonrpc.NefRequest(self.proxy, method) method = 'post' path = 'auth/login' payload = { 'data': json.dumps({ 'username': self.proxy.username, 'password': self.proxy.password }) } content = {'token': 'test'} response = self.fake_response(method, path, payload, 200, content) request.return_value = response instance.auth() request.assert_called_once_with(method, path, **payload) @mock.patch('manila.share.drivers.nexenta.ns5.' 'jsonrpc.NefRequest.request') def test_auth_error(self, request): method = 'get' instance = jsonrpc.NefRequest(self.proxy, method) method = 'post' path = 'auth/login' payload = { 'data': json.dumps({ 'username': self.proxy.username, 'password': self.proxy.password }) } content = {'data': 'noauth'} response = self.fake_response(method, path, payload, 200, content) request.return_value = response self.assertRaises(jsonrpc.NefException, instance.auth) @mock.patch('manila.share.drivers.nexenta.ns5.' 'jsonrpc.NefRequest.request') def test_failover(self, request): method = 'get' instance = jsonrpc.NefRequest(self.proxy, method) path = self.proxy.root payload = {} content = {'path': path} response = self.fake_response(method, path, payload, 200, content) request.return_value = response result = instance.failover() request.assert_called_once_with(method, path) expected = True self.assertEqual(expected, result) @mock.patch('manila.share.drivers.nexenta.ns5.' 'jsonrpc.NefRequest.request') def test_failover_timeout(self, request): method = 'get' instance = jsonrpc.NefRequest(self.proxy, method) path = self.proxy.root payload = {} content = {'path': path} response = self.fake_response(method, path, payload, 200, content) request.side_effect = [requests.exceptions.Timeout, response] result = instance.failover() request.assert_called_once_with(method, path) expected = False self.assertEqual(expected, result) @mock.patch('manila.share.drivers.nexenta.ns5.' 'jsonrpc.NefRequest.request') def test_failover_404(self, request): method = 'get' instance = jsonrpc.NefRequest(self.proxy, method) path = self.proxy.root payload = {} content = {} response = self.fake_response(method, path, payload, 404, content) request.side_effect = [response, response] result = instance.failover() request.assert_called_once_with(method, path) expected = False self.assertEqual(expected, result) @mock.patch('manila.share.drivers.nexenta.ns5.' 'jsonrpc.NefRequest.request') def test_failover_error(self, request): method = 'get' instance = jsonrpc.NefRequest(self.proxy, method) path = self.proxy.root request.side_effect = [ requests.exceptions.Timeout, requests.exceptions.ConnectionError ] result = instance.failover() request.assert_called_with(method, path) expected = False self.assertEqual(expected, result) def test_getpath(self): method = 'get' rel = 'monitor' href = 'jobStatus/jobID' content = { 'links': [ [1, 2], 'bad link', { 'rel': 'next', 'href': href }, { 'rel': rel, 'href': href } ] } instance = jsonrpc.NefRequest(self.proxy, method) result = instance.getpath(content, rel) expected = href self.assertEqual(expected, result) def test_getpath_no_content(self): method = 'get' rel = 'next' content = None instance = jsonrpc.NefRequest(self.proxy, method) result = instance.getpath(content, rel) self.assertIsNone(result) def test_getpath_no_links(self): method = 'get' rel = 'next' content = {'a': 'b'} instance = jsonrpc.NefRequest(self.proxy, method) result = instance.getpath(content, rel) self.assertIsNone(result) def test_getpath_no_rel(self): method = 'get' rel = 'next' content = { 'links': [ { 'rel': 'monitor', 'href': '/jobs/jobID' } ] } instance = jsonrpc.NefRequest(self.proxy, method) result = instance.getpath(content, rel) self.assertIsNone(result) def test_getpath_no_href(self): method = 'get' rel = 'next' content = { 'links': [ { 'rel': rel } ] } instance = jsonrpc.NefRequest(self.proxy, method) result = instance.getpath(content, rel) self.assertIsNone(result) class TestNefCollections(test.TestCase): def setUp(self): super(TestNefCollections, self).setUp() self.proxy = mock.Mock() self.instance = jsonrpc.NefCollections(self.proxy) def test_path(self): path = 'path/to/item name + - & # $ = 0' result = self.instance.path(path) quoted_path = urlparse.quote_plus(path) expected = posixpath.join(self.instance.root, quoted_path) self.assertEqual(expected, result) def test_get(self): name = 'parent/child' payload = {'key': 'value'} expected = {'name': 'dataset'} path = self.instance.path(name) self.proxy.get.return_value = expected result = self.instance.get(name, payload) self.proxy.get.assert_called_with(path, payload) self.assertEqual(expected, result) def test_set(self): name = 'parent/child' payload = {'key': 'value'} expected = None path = self.instance.path(name) self.proxy.put.return_value = expected result = self.instance.set(name, payload) self.proxy.put.assert_called_with(path, payload) self.assertIsNone(result) def test_list(self): payload = {'key': 'value'} expected = [{'name': 'dataset'}] self.proxy.get.return_value = expected result = self.instance.list(payload) self.proxy.get.assert_called_with(self.instance.root, payload) self.assertEqual(expected, result) def test_create(self): payload = {'key': 'value'} expected = None self.proxy.post.return_value = expected result = self.instance.create(payload) self.proxy.post.assert_called_with(self.instance.root, payload) self.assertIsNone(result) def test_create_exist(self): payload = {'key': 'value'} self.proxy.post.side_effect = jsonrpc.NefException(code='EEXIST') result = self.instance.create(payload) self.proxy.post.assert_called_with(self.instance.root, payload) self.assertIsNone(result) def test_create_error(self): payload = {'key': 'value'} self.proxy.post.side_effect = jsonrpc.NefException(code='EBUSY') self.assertRaises(jsonrpc.NefException, self.instance.create, payload) self.proxy.post.assert_called_with(self.instance.root, payload) def test_delete(self): name = 'parent/child' payload = {'key': 'value'} expected = None path = self.instance.path(name) self.proxy.delete.return_value = expected result = self.instance.delete(name, payload) self.proxy.delete.assert_called_with(path, payload) self.assertIsNone(result) def test_delete_not_found(self): name = 'parent/child' payload = {'key': 'value'} path = self.instance.path(name) self.proxy.delete.side_effect = jsonrpc.NefException(code='ENOENT') result = self.instance.delete(name, payload) self.proxy.delete.assert_called_with(path, payload) self.assertIsNone(result) def test_delete_error(self): name = 'parent/child' payload = {'key': 'value'} path = self.instance.path(name) self.proxy.delete.side_effect = jsonrpc.NefException(code='EINVAL') self.assertRaises(jsonrpc.NefException, self.instance.delete, name, payload) self.proxy.delete.assert_called_with(path, payload) class TestNefSettings(test.TestCase): def setUp(self): super(TestNefSettings, self).setUp() self.proxy = mock.Mock() self.instance = jsonrpc.NefSettings(self.proxy) def test_create(self): payload = {'key': 'value'} result = self.instance.create(payload) expected = NotImplemented self.assertEqual(expected, result) def test_delete(self): name = 'parent/child' payload = {'key': 'value'} result = self.instance.delete(name, payload) expected = NotImplemented self.assertEqual(expected, result) class TestNefDatasets(test.TestCase): def setUp(self): super(TestNefDatasets, self).setUp() self.proxy = mock.Mock() self.instance = jsonrpc.NefDatasets(self.proxy) def test_rename(self): name = 'parent/child' payload = {'key': 'value'} expected = None path = self.instance.path(name) path = posixpath.join(path, 'rename') self.proxy.post.return_value = expected result = self.instance.rename(name, payload) self.proxy.post.assert_called_with(path, payload) self.assertIsNone(result) class TestNefSnapshots(test.TestCase): def setUp(self): super(TestNefSnapshots, self).setUp() self.proxy = mock.Mock() self.instance = jsonrpc.NefSnapshots(self.proxy) def test_clone(self): name = 'parent/child' payload = {'key': 'value'} expected = None path = self.instance.path(name) path = posixpath.join(path, 'clone') self.proxy.post.return_value = expected result = self.instance.clone(name, payload) self.proxy.post.assert_called_with(path, payload) self.assertIsNone(result) class TestNefFilesystems(test.TestCase): def setUp(self): super(TestNefFilesystems, self).setUp() self.proxy = mock.Mock() self.instance = jsonrpc.NefFilesystems(self.proxy) def test_mount(self): name = 'parent/child' payload = {'key': 'value'} expected = None path = self.instance.path(name) path = posixpath.join(path, 'mount') self.proxy.post.return_value = expected result = self.instance.mount(name, payload) self.proxy.post.assert_called_with(path, payload) self.assertIsNone(result) def test_unmount(self): name = 'parent/child' payload = {'key': 'value'} expected = None path = self.instance.path(name) path = posixpath.join(path, 'unmount') self.proxy.post.return_value = expected result = self.instance.unmount(name, payload) self.proxy.post.assert_called_with(path, payload) self.assertIsNone(result) def test_acl(self): name = 'parent/child' payload = {'key': 'value'} expected = None path = self.instance.path(name) path = posixpath.join(path, 'acl') self.proxy.post.return_value = expected result = self.instance.acl(name, payload) self.proxy.post.assert_called_with(path, payload) self.assertIsNone(result) def test_promote(self): name = 'parent/child' payload = {'key': 'value'} expected = None path = self.instance.path(name) path = posixpath.join(path, 'promote') self.proxy.post.return_value = expected result = self.instance.promote(name, payload) self.proxy.post.assert_called_with(path, payload) self.assertIsNone(result) def test_rollback(self): name = 'parent/child' payload = {'key': 'value'} expected = None path = self.instance.path(name) path = posixpath.join(path, 'rollback') self.proxy.post.return_value = expected result = self.instance.rollback(name, payload) self.proxy.post.assert_called_with(path, payload) self.assertIsNone(result) class TestNefHpr(test.TestCase): def setUp(self): super(TestNefHpr, self).setUp() self.proxy = mock.Mock() self.instance = jsonrpc.NefHpr(self.proxy) def test_activate(self): payload = {'key': 'value'} expected = None path = posixpath.join(self.instance.root, 'activate') self.proxy.post.return_value = expected result = self.instance.activate(payload) self.proxy.post.assert_called_with(path, payload) self.assertIsNone(result) def test_start(self): name = 'parent/child' payload = {'key': 'value'} expected = None path = posixpath.join(self.instance.path(name), 'start') self.proxy.post.return_value = expected result = self.instance.start(name, payload) self.proxy.post.assert_called_with(path, payload) self.assertIsNone(result) class TestNefProxy(test.TestCase): def setUp(self): super(TestNefProxy, self).setUp() self.cfg = mock.Mock(spec=conf.Configuration) self.cfg.nexenta_use_https = True self.cfg.nexenta_ssl_cert_verify = True self.cfg.nexenta_user = 'user' self.cfg.nexenta_password = 'pass' self.cfg.nexenta_rest_addresses = ['1.1.1.1', '2.2.2.2'] self.cfg.nexenta_rest_port = 8443 self.cfg.nexenta_rest_backoff_factor = 1 self.cfg.nexenta_rest_retry_count = 3 self.cfg.nexenta_rest_connect_timeout = 1 self.cfg.nexenta_rest_read_timeout = 1 self.cfg.nexenta_nas_host = '3.3.3.3' self.cfg.nexenta_folder = 'pool/path/to/share' self.nef_mock = mock.Mock() self.mock_object(jsonrpc, 'NefRequest') self.proto = 'nfs' self.proxy = jsonrpc.NefProxy(self.proto, self.cfg.nexenta_folder, self.cfg) def test___init___http(self): proto = 'nfs' cfg = copy.copy(self.cfg) cfg.nexenta_use_https = False result = jsonrpc.NefProxy(proto, cfg.nexenta_folder, cfg) self.assertIsInstance(result, jsonrpc.NefProxy) def test___init___no_rest_port_http(self): proto = 'nfs' cfg = copy.copy(self.cfg) cfg.nexenta_rest_port = 0 cfg.nexenta_use_https = False result = jsonrpc.NefProxy(proto, cfg.nexenta_folder, cfg) self.assertIsInstance(result, jsonrpc.NefProxy) def test___init___no_rest_port_https(self): proto = 'nfs' cfg = copy.copy(self.cfg) cfg.nexenta_rest_port = 0 cfg.nexenta_use_https = True result = jsonrpc.NefProxy(proto, cfg.nexenta_folder, cfg) self.assertIsInstance(result, jsonrpc.NefProxy) def test___init___iscsi(self): proto = 'iscsi' cfg = copy.copy(self.cfg) result = jsonrpc.NefProxy(proto, cfg.nexenta_folder, cfg) self.assertIsInstance(result, jsonrpc.NefProxy) def test___init___nfs_no_rest_address(self): proto = 'nfs' cfg = copy.copy(self.cfg) cfg.nexenta_rest_addresses = '' result = jsonrpc.NefProxy(proto, cfg.nexenta_folder, cfg) self.assertIsInstance(result, jsonrpc.NefProxy) def test___init___iscsi_no_rest_address(self): proto = 'iscsi' cfg = copy.copy(self.cfg) cfg.nexenta_rest_addresses = '' cfg.nexenta_nas_host = '4.4.4.4' result = jsonrpc.NefProxy(proto, cfg.nexenta_folder, cfg) self.assertIsInstance(result, jsonrpc.NefProxy) @mock.patch('requests.packages.urllib3.disable_warnings') def test___init___no_ssl_cert_verify(self, disable_warnings): proto = 'nfs' cfg = copy.copy(self.cfg) cfg.nexenta_ssl_cert_verify = False disable_warnings.return_value = None result = jsonrpc.NefProxy(proto, cfg.nexenta_folder, cfg) disable_warnings.assert_called() self.assertIsInstance(result, jsonrpc.NefProxy) def test_delete_bearer(self): self.assertIsNone(self.proxy.delete_bearer()) self.assertNotIn('Authorization', self.proxy.session.headers) self.proxy.session.headers['Authorization'] = 'Bearer token' self.assertIsNone(self.proxy.delete_bearer()) self.assertNotIn('Authorization', self.proxy.session.headers) def test_update_bearer(self): token = 'token' bearer = 'Bearer %s' % token self.assertNotIn('Authorization', self.proxy.session.headers) self.assertIsNone(self.proxy.update_bearer(token)) self.assertIn('Authorization', self.proxy.session.headers) self.assertEqual(self.proxy.session.headers['Authorization'], bearer) def test_update_token(self): token = 'token' bearer = 'Bearer %s' % token self.assertIsNone(self.proxy.update_token(token)) self.assertEqual(self.proxy.tokens[self.proxy.host], token) self.assertEqual(self.proxy.session.headers['Authorization'], bearer) def test_update_host(self): token = 'token' bearer = 'Bearer %s' % token host = self.cfg.nexenta_rest_addresses[0] self.proxy.tokens[host] = token self.assertIsNone(self.proxy.update_host(host)) self.assertEqual(self.proxy.session.headers['Authorization'], bearer) def test_skip_update_host(self): host = 'nonexistent' self.assertIsNone(self.proxy.update_host(host)) @mock.patch('manila.share.drivers.nexenta.ns5.' 'jsonrpc.NefSettings.get') def test_update_lock(self, get_settings): guid = uuid.uuid4().hex settings = {'value': guid} get_settings.return_value = settings self.assertIsNone(self.proxy.update_lock()) path = '%s:%s' % (guid, self.proxy.path) if isinstance(path, str): path = path.encode('utf-8') expected = hashlib.md5(path).hexdigest() self.assertEqual(expected, self.proxy.lock) def test_url(self): path = '/path/to/api' result = self.proxy.url(path) expected = '%s://%s:%s%s' % (self.proxy.scheme, self.proxy.host, self.proxy.port, path) self.assertEqual(expected, result) @mock.patch('eventlet.greenthread.sleep') def test_delay(self, sleep): sleep.return_value = None for attempt in range(0, 10): expected = int(self.proxy.backoff_factor * (2 ** (attempt - 1))) self.assertIsNone(self.proxy.delay(attempt)) sleep.assert_called_with(expected) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/nexenta/ns5/test_nexenta_nas.py0000664000175000017500000005372500000000000026340 0ustar00zuulzuul00000000000000# Copyright 2019 Nexenta by DDN, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import ddt from oslo_utils import units from manila import context from manila.share.drivers.nexenta.ns5 import jsonrpc from manila.share.drivers.nexenta.ns5 import nexenta_nas from manila import test RPC_PATH = 'manila.share.drivers.nexenta.ns5.jsonrpc' DRV_PATH = 'manila.share.drivers.nexenta.ns5.nexenta_nas.NexentaNasDriver' DRIVER_VERSION = '1.1' SHARE = {'share_id': 'uuid', 'size': 1, 'share_proto': 'NFS'} SHARE_PATH = 'pool1/nfs_share/share-uuid' SHARE2 = {'share_id': 'uuid2', 'size': 2, 'share_proto': 'NFS'} SHARE2_PATH = 'pool1/nfs_share/share-uuid2' SNAPSHOT = { 'snapshot_id': 'snap_id', 'share': SHARE, 'snapshot_path': '%s@%s' % (SHARE_PATH, 'snapshot-snap_id')} @ddt.ddt class TestNexentaNasDriver(test.TestCase): def setUp(self): def _safe_get(opt): return getattr(self.cfg, opt) self.cfg = mock.Mock() self.mock_object( self.cfg, 'safe_get', mock.Mock(side_effect=_safe_get)) super(TestNexentaNasDriver, self).setUp() self.cfg.nexenta_nas_host = '1.1.1.1' self.cfg.nexenta_rest_addresses = ['2.2.2.2'] self.ctx = context.get_admin_context() self.cfg.nexenta_rest_port = 8080 self.cfg.nexenta_rest_protocol = 'auto' self.cfg.nexenta_pool = 'pool1' self.cfg.nexenta_dataset_record_size = 131072 self.cfg.reserved_share_percentage = 0 self.cfg.reserved_share_from_snapshot_percentage = 0 self.cfg.reserved_share_extend_percentage = 0 self.cfg.nexenta_folder = 'nfs_share' self.cfg.nexenta_user = 'user' self.cfg.share_backend_name = 'NexentaStor5' self.cfg.nexenta_password = 'password' self.cfg.nexenta_thin_provisioning = False self.cfg.nexenta_mount_point_base = 'mnt' self.cfg.nexenta_rest_retry_count = 3 self.cfg.nexenta_share_name_prefix = 'share-' self.cfg.max_over_subscription_ratio = 20.0 self.cfg.enabled_share_protocols = 'NFS' self.cfg.nexenta_mount_point_base = '$state_path/mnt' self.cfg.nexenta_dataset_compression = 'on' self.cfg.network_config_group = 'DEFAULT' self.cfg.admin_network_config_group = ( 'fake_admin_network_config_group') self.cfg.driver_handles_share_servers = False self.cfg.safe_get = self.fake_safe_get self.nef_mock = mock.Mock() self.mock_object(jsonrpc, 'NefRequest') self.drv = nexenta_nas.NexentaNasDriver(configuration=self.cfg) self.drv.do_setup(self.ctx) def fake_safe_get(self, key): try: value = getattr(self.cfg, key) except AttributeError: value = None return value def test_backend_name(self): self.assertEqual('NexentaStor5', self.drv.share_backend_name) @mock.patch('%s._get_provisioned_capacity' % DRV_PATH) @mock.patch('manila.share.drivers.nexenta.ns5.' 'jsonrpc.NefServices.get') @mock.patch('manila.share.drivers.nexenta.ns5.' 'jsonrpc.NefFilesystems.set') @mock.patch('manila.share.drivers.nexenta.ns5.' 'jsonrpc.NefFilesystems.get') def test_check_for_setup_error(self, get_filesystem, set_filesystem, get_service, prov_capacity): prov_capacity.return_value = 1 get_filesystem.return_value = { 'mountPoint': '/path/to/volume', 'nonBlockingMandatoryMode': False, 'smartCompression': False, 'isMounted': True } get_service.return_value = { 'state': 'online' } self.assertIsNone(self.drv.check_for_setup_error()) get_filesystem.assert_called_with(self.drv.root_path) set_filesystem.assert_not_called() get_service.assert_called_with('nfs') get_filesystem.return_value = { 'mountPoint': '/path/to/volume', 'nonBlockingMandatoryMode': True, 'smartCompression': True, 'isMounted': True } set_filesystem.return_value = {} payload = { 'nonBlockingMandatoryMode': False, 'smartCompression': False } self.assertIsNone(self.drv.check_for_setup_error()) get_filesystem.assert_called_with(self.drv.root_path) set_filesystem.assert_called_with(self.drv.root_path, payload) get_service.assert_called_with('nfs') get_filesystem.return_value = { 'mountPoint': '/path/to/volume', 'nonBlockingMandatoryMode': False, 'smartCompression': True, 'isMounted': True } payload = { 'smartCompression': False } set_filesystem.return_value = {} self.assertIsNone(self.drv.check_for_setup_error()) get_filesystem.assert_called_with(self.drv.root_path) set_filesystem.assert_called_with(self.drv.root_path, payload) get_service.assert_called_with('nfs') get_filesystem.return_value = { 'mountPoint': '/path/to/volume', 'nonBlockingMandatoryMode': True, 'smartCompression': False, 'isMounted': True } payload = { 'nonBlockingMandatoryMode': False } set_filesystem.return_value = {} self.assertIsNone(self.drv.check_for_setup_error()) get_filesystem.assert_called_with(self.drv.root_path) set_filesystem.assert_called_with(self.drv.root_path, payload) get_service.assert_called_with('nfs') get_filesystem.return_value = { 'mountPoint': 'none', 'nonBlockingMandatoryMode': False, 'smartCompression': False, 'isMounted': False } self.assertRaises(jsonrpc.NefException, self.drv.check_for_setup_error) get_filesystem.return_value = { 'mountPoint': '/path/to/volume', 'nonBlockingMandatoryMode': False, 'smartCompression': False, 'isMounted': False } self.assertRaises(jsonrpc.NefException, self.drv.check_for_setup_error) get_service.return_value = { 'state': 'online' } self.assertRaises(jsonrpc.NefException, self.drv.check_for_setup_error) @mock.patch('%s.NefFilesystems.get' % RPC_PATH) def test__get_provisioned_capacity(self, fs_get): fs_get.return_value = { 'path': 'pool1/nfs_share/123', 'referencedQuotaSize': 1 * units.Gi } self.drv._get_provisioned_capacity() self.assertEqual(1 * units.Gi, self.drv.provisioned_capacity) @mock.patch('%s._mount_filesystem' % DRV_PATH) @mock.patch('%s.NefFilesystems.create' % RPC_PATH) @mock.patch('%s.NefFilesystems.delete' % RPC_PATH) def test_create_share(self, delete_fs, create_fs, mount_fs): mount_path = '%s:/%s' % (self.cfg.nexenta_nas_host, SHARE_PATH) mount_fs.return_value = mount_path size = int(1 * units.Gi * 1.1) self.assertEqual( [{ 'path': mount_path, 'id': 'share-uuid' }], self.drv.create_share(self.ctx, SHARE)) payload = { 'recordSize': 131072, 'compressionMode': self.cfg.nexenta_dataset_compression, 'path': SHARE_PATH, 'referencedQuotaSize': size, 'nonBlockingMandatoryMode': False, 'referencedReservationSize': size } self.drv.nef.filesystems.create.assert_called_with(payload) mount_fs.side_effect = jsonrpc.NefException('some error') self.assertRaises(jsonrpc.NefException, self.drv.create_share, self.ctx, SHARE) delete_payload = {'force': True} self.drv.nef.filesystems.delete.assert_called_with( SHARE_PATH, delete_payload) @mock.patch('%s.NefFilesystems.promote' % RPC_PATH) @mock.patch('%s.NefSnapshots.get' % RPC_PATH) @mock.patch('%s.NefSnapshots.list' % RPC_PATH) @mock.patch('%s.NefFilesystems.delete' % RPC_PATH) def test_delete_share(self, fs_delete, snap_list, snap_get, fs_promote): delete_payload = {'force': True, 'snapshots': True} snapshots_payload = {'parent': SHARE_PATH, 'fields': 'path'} clones_payload = {'fields': 'clones,creationTxg'} clone_path = '%s:/%s' % (self.cfg.nexenta_nas_host, 'path_to_fs') fs_delete.side_effect = [ jsonrpc.NefException({ 'message': 'some_error', 'code': 'EEXIST'}), None] snap_list.return_value = [{'path': '%s@snap1' % SHARE_PATH}] snap_get.return_value = {'clones': [clone_path], 'creationTxg': 1} self.assertIsNone(self.drv.delete_share(self.ctx, SHARE)) fs_delete.assert_called_with(SHARE_PATH, delete_payload) fs_promote.assert_called_with(clone_path) snap_get.assert_called_with('%s@snap1' % SHARE_PATH, clones_payload) snap_list.assert_called_with(snapshots_payload) @mock.patch('%s.NefFilesystems.mount' % RPC_PATH) @mock.patch('%s.NefFilesystems.get' % RPC_PATH) def test_mount_filesystem(self, fs_get, fs_mount): mount_path = '%s:/%s' % (self.cfg.nexenta_nas_host, SHARE_PATH) fs_get.return_value = { 'mountPoint': '/%s' % SHARE_PATH, 'isMounted': False} self.assertEqual(mount_path, self.drv._mount_filesystem(SHARE)) self.drv.nef.filesystems.mount.assert_called_with(SHARE_PATH) @mock.patch('%s.NefHpr.activate' % RPC_PATH) @mock.patch('%s.NefFilesystems.mount' % RPC_PATH) @mock.patch('%s.NefFilesystems.get' % RPC_PATH) def test_mount_filesystem_with_activate( self, fs_get, fs_mount, hpr_activate): mount_path = '%s:/%s' % (self.cfg.nexenta_nas_host, SHARE_PATH) fs_get.side_effect = [ {'mountPoint': 'none', 'isMounted': False}, {'mountPoint': '/%s' % SHARE_PATH, 'isMounted': False}] self.assertEqual(mount_path, self.drv._mount_filesystem(SHARE)) payload = {'datasetName': SHARE_PATH} self.drv.nef.hpr.activate.assert_called_once_with(payload) @mock.patch('%s.NefFilesystems.mount' % RPC_PATH) @mock.patch('%s.NefFilesystems.unmount' % RPC_PATH) def test_remount_filesystem(self, fs_unmount, fs_mount): self.drv._remount_filesystem(SHARE_PATH) fs_unmount.assert_called_once_with(SHARE_PATH) fs_mount.assert_called_once_with(SHARE_PATH) def parse_fqdn(self, fqdn): address_mask = fqdn.strip().split('/', 1) address = address_mask[0] ls = {"allow": True, "etype": "fqdn", "entity": address} if len(address_mask) == 2: ls['mask'] = address_mask[1] ls['etype'] = 'network' return ls @ddt.data({'key': 'value'}, {}) @mock.patch('%s.NefNfs.list' % RPC_PATH) @mock.patch('%s.NefNfs.set' % RPC_PATH) @mock.patch('%s.NefFilesystems.acl' % RPC_PATH) def test_update_nfs_access(self, acl, nfs_set, nfs_list, list_data): security_contexts = {'securityModes': ['sys']} nfs_list.return_value = list_data rw_list = ['1.1.1.1/24', '2.2.2.2'] ro_list = ['3.3.3.3', '4.4.4.4/30'] security_contexts['readWriteList'] = [] security_contexts['readOnlyList'] = [] for fqdn in rw_list: ls = self.parse_fqdn(fqdn) if ls.get('mask'): ls['mask'] = int(ls['mask']) security_contexts['readWriteList'].append(ls) for fqdn in ro_list: ls = self.parse_fqdn(fqdn) if ls.get('mask'): ls['mask'] = int(ls['mask']) security_contexts['readOnlyList'].append(ls) self.assertIsNone(self.drv._update_nfs_access(SHARE, rw_list, ro_list)) payload = { 'flags': ['file_inherit', 'dir_inherit'], 'permissions': ['full_set'], 'principal': 'everyone@', 'type': 'allow' } self.drv.nef.filesystems.acl.assert_called_with(SHARE_PATH, payload) payload = {'securityContexts': [security_contexts]} if list_data: self.drv.nef.nfs.set.assert_called_with(SHARE_PATH, payload) else: payload['filesystem'] = SHARE_PATH self.drv.nef.nfs.create.assert_called_with(payload) def test_update_nfs_access_bad_mask(self): security_contexts = {'securityModes': ['sys']} rw_list = ['1.1.1.1/24', '2.2.2.2/1a'] ro_list = ['3.3.3.3', '4.4.4.4/30'] security_contexts['readWriteList'] = [] security_contexts['readOnlyList'] = [] for fqdn in rw_list: security_contexts['readWriteList'].append(self.parse_fqdn(fqdn)) for fqdn in ro_list: security_contexts['readOnlyList'].append(self.parse_fqdn(fqdn)) self.assertRaises(ValueError, self.drv._update_nfs_access, SHARE, rw_list, ro_list) @mock.patch('%s._update_nfs_access' % DRV_PATH) def test_update_access__ip_rw(self, update_nfs_access): access = { 'access_type': 'ip', 'access_to': '1.1.1.1', 'access_level': 'rw', 'access_id': 'fake_id' } self.assertEqual( {'fake_id': {'state': 'active'}}, self.drv.update_access( self.ctx, SHARE, [access], None, None, None)) self.drv._update_nfs_access.assert_called_with(SHARE, ['1.1.1.1'], []) @mock.patch('%s._update_nfs_access' % DRV_PATH) def test_update_access__ip_ro(self, update_nfs_access): access = { 'access_type': 'ip', 'access_to': '1.1.1.1', 'access_level': 'ro', 'access_id': 'fake_id' } expected = {'fake_id': {'state': 'active'}} self.assertEqual( expected, self.drv.update_access( self.ctx, SHARE, [access], None, None, None)) self.drv._update_nfs_access.assert_called_with(SHARE, [], ['1.1.1.1']) @ddt.data('rw', 'ro') def test_update_access__not_ip(self, access_level): access = { 'access_type': 'username', 'access_to': 'some_user', 'access_level': access_level, 'access_id': 'fake_id' } expected = {'fake_id': {'state': 'error'}} self.assertEqual(expected, self.drv.update_access( self.ctx, SHARE, [access], None, None, None)) @mock.patch('%s._get_capacity_info' % DRV_PATH) @mock.patch('manila.share.driver.ShareDriver._update_share_stats') def test_update_share_stats(self, super_stats, info): info.return_value = (100, 90, 10) stats = { 'vendor_name': 'Nexenta', 'storage_protocol': 'NFS', 'nfs_mount_point_base': self.cfg.nexenta_mount_point_base, 'create_share_from_snapshot_support': True, 'revert_to_snapshot_support': True, 'snapshot_support': True, 'driver_version': DRIVER_VERSION, 'share_backend_name': self.cfg.share_backend_name, 'pools': [{ 'compression': True, 'pool_name': 'pool1', 'total_capacity_gb': 100, 'free_capacity_gb': 90, 'provisioned_capacity_gb': 0, 'max_over_subscription_ratio': 20.0, 'reserved_percentage': ( self.cfg.reserved_share_percentage), 'reserved_snapshot_percentage': ( self.cfg.reserved_share_from_snapshot_percentage), 'reserved_share_extend_percentage': ( self.cfg.reserved_share_extend_percentage), 'thin_provisioning': self.cfg.nexenta_thin_provisioning, }], } self.drv._update_share_stats() self.assertEqual(stats, self.drv._stats) def test_get_capacity_info(self): self.drv.nef.get.return_value = { 'bytesAvailable': 9 * units.Gi, 'bytesUsed': 1 * units.Gi} self.assertEqual((10, 9, 1), self.drv._get_capacity_info()) @mock.patch('%s._set_reservation' % DRV_PATH) @mock.patch('%s._set_quota' % DRV_PATH) @mock.patch('%s.NefFilesystems.rename' % RPC_PATH) @mock.patch('%s.NefFilesystems.get' % RPC_PATH) def test_manage_existing(self, fs_get, fs_rename, set_res, set_quota): fs_get.return_value = {'referencedQuotaSize': 1073741824} old_path = '%s:/%s' % (self.cfg.nexenta_nas_host, 'path_to_fs') new_path = '%s:/%s' % (self.cfg.nexenta_nas_host, SHARE_PATH) SHARE['export_locations'] = [{'path': old_path}] expected = {'size': 2, 'export_locations': [{ 'path': new_path }]} self.assertEqual(expected, self.drv.manage_existing(SHARE, None)) fs_rename.assert_called_with('path_to_fs', {'newPath': SHARE_PATH}) set_res.assert_called_with(SHARE, 2) set_quota.assert_called_with(SHARE, 2) @mock.patch('%s.NefSnapshots.create' % RPC_PATH) def test_create_snapshot(self, snap_create): self.assertIsNone(self.drv.create_snapshot(self.ctx, SNAPSHOT)) snap_create.assert_called_once_with({ 'path': SNAPSHOT['snapshot_path']}) @mock.patch('%s.NefSnapshots.delete' % RPC_PATH) def test_delete_snapshot(self, snap_delete): self.assertIsNone(self.drv.delete_snapshot(self.ctx, SNAPSHOT)) payload = {'defer': True} snap_delete.assert_called_once_with( SNAPSHOT['snapshot_path'], payload) @mock.patch('%s._mount_filesystem' % DRV_PATH) @mock.patch('%s._remount_filesystem' % DRV_PATH) @mock.patch('%s.NefFilesystems.delete' % RPC_PATH) @mock.patch('%s.NefSnapshots.clone' % RPC_PATH) def test_create_share_from_snapshot( self, snap_clone, fs_delete, remount_fs, mount_fs): mount_fs.return_value = 'mount_path' location = { 'path': 'mount_path', 'id': 'share-uuid2' } self.assertEqual([location], self.drv.create_share_from_snapshot( self.ctx, SHARE2, SNAPSHOT)) size = int(SHARE2['size'] * units.Gi * 1.1) payload = { 'targetPath': SHARE2_PATH, 'referencedQuotaSize': size, 'recordSize': self.cfg.nexenta_dataset_record_size, 'compressionMode': self.cfg.nexenta_dataset_compression, 'nonBlockingMandatoryMode': False, 'referencedReservationSize': size } snap_clone.assert_called_once_with(SNAPSHOT['snapshot_path'], payload) @mock.patch('%s._mount_filesystem' % DRV_PATH) @mock.patch('%s._remount_filesystem' % DRV_PATH) @mock.patch('%s.NefFilesystems.delete' % RPC_PATH) @mock.patch('%s.NefSnapshots.clone' % RPC_PATH) def test_create_share_from_snapshot_error( self, snap_clone, fs_delete, remount_fs, mount_fs): fs_delete.side_effect = jsonrpc.NefException('delete error') mount_fs.side_effect = jsonrpc.NefException('create error') self.assertRaises( jsonrpc.NefException, self.drv.create_share_from_snapshot, self.ctx, SHARE2, SNAPSHOT) size = int(SHARE2['size'] * units.Gi * 1.1) payload = { 'targetPath': SHARE2_PATH, 'referencedQuotaSize': size, 'recordSize': self.cfg.nexenta_dataset_record_size, 'compressionMode': self.cfg.nexenta_dataset_compression, 'nonBlockingMandatoryMode': False, 'referencedReservationSize': size } snap_clone.assert_called_once_with(SNAPSHOT['snapshot_path'], payload) payload = {'force': True} fs_delete.assert_called_once_with(SHARE2_PATH, payload) @mock.patch('%s.NefFilesystems.rollback' % RPC_PATH) def test_revert_to_snapshot(self, fs_rollback): self.assertIsNone(self.drv.revert_to_snapshot( self.ctx, SNAPSHOT, [], [])) payload = {'snapshot': 'snapshot-snap_id'} fs_rollback.assert_called_once_with( SHARE_PATH, payload) @mock.patch('%s._set_reservation' % DRV_PATH) @mock.patch('%s._set_quota' % DRV_PATH) def test_extend_share(self, set_quota, set_reservation): self.assertIsNone(self.drv.extend_share( SHARE, 2)) set_quota.assert_called_once_with( SHARE, 2) set_reservation.assert_called_once_with( SHARE, 2) @mock.patch('%s.NefFilesystems.get' % RPC_PATH) @mock.patch('%s._set_reservation' % DRV_PATH) @mock.patch('%s._set_quota' % DRV_PATH) def test_shrink_share(self, set_quota, set_reservation, fs_get): fs_get.return_value = { 'bytesUsedBySelf': 0.5 * units.Gi } self.assertIsNone(self.drv.shrink_share( SHARE2, 1)) set_quota.assert_called_once_with( SHARE2, 1) set_reservation.assert_called_once_with( SHARE2, 1) @mock.patch('%s.NefFilesystems.set' % RPC_PATH) def test_set_quota(self, fs_set): quota = int(2 * units.Gi * 1.1) payload = {'referencedQuotaSize': quota} self.assertIsNone(self.drv._set_quota( SHARE, 2)) fs_set.assert_called_once_with(SHARE_PATH, payload) @mock.patch('%s.NefFilesystems.set' % RPC_PATH) def test_set_reservation(self, fs_set): reservation = int(2 * units.Gi * 1.1) payload = {'referencedReservationSize': reservation} self.assertIsNone(self.drv._set_reservation( SHARE, 2)) fs_set.assert_called_once_with(SHARE_PATH, payload) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/nexenta/test_utils.py0000664000175000017500000000257500000000000024465 0ustar00zuulzuul00000000000000# Copyright 2016 Nexenta Systems, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ddt from oslo_utils import units from manila.share.drivers.nexenta import utils from manila import test @ddt.ddt class TestNexentaUtils(test.TestCase): @ddt.data( # Test empty value (None, 0), ('', 0), ('0', 0), ('12', 12), # Test int values (10, 10), # Test bytes string ('1b', 1), ('1B', 1), ('1023b', 1023), ('0B', 0), # Test other units ('1M', units.Mi), ('1.0M', units.Mi), ) @ddt.unpack def test_str2size(self, value, result): self.assertEqual(result, utils.str2size(value)) def test_str2size_input_error(self): # Invalid format value self.assertRaises(ValueError, utils.str2size, 'A') ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315602.0336702 manila-21.0.0/manila/tests/share/drivers/purestorage/0000775000175000017500000000000000000000000022601 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/purestorage/__init__.py0000664000175000017500000000000000000000000024700 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/purestorage/test_flashblade.py0000664000175000017500000003255600000000000026312 0ustar00zuulzuul00000000000000# Copyright 2021 Pure Storage Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Unit tests for Pure Storage FlashBlade driver.""" import sys from unittest import mock sys.modules["purity_fb"] = mock.Mock() from manila.common import constants from manila import exception from manila.share.drivers.purestorage import flashblade from manila import test _MOCK_SHARE_ID = 1 _MOCK_SNAPSHOT_ID = "snap" _MOCK_SHARE_SIZE = 4294967296 _SINGLE_VIP_LOCATION = [ { "path": 'mockfb2:/share-1-manila', "is_admin_only": False, "metadata": { "preferred": True, } } ] _DUAL_VIP_LOCATION = [ { "path": 'mockfb2:/share-1-manila', "is_admin_only": False, "metadata": { "preferred": True, } }, { "path": 'mockfb3:/share-1-manila', "is_admin_only": False, "metadata": { "preferred": False, } } ] def _create_mock__getitem__(mock): def mock__getitem__(self, key, default=None): return getattr(mock, key, default) return mock__getitem__ test_nfs_share = mock.Mock( id=_MOCK_SHARE_ID, size=_MOCK_SHARE_SIZE, share_proto="NFS" ) test_nfs_share.__getitem__ = _create_mock__getitem__(test_nfs_share) test_snapshot = mock.Mock(id=_MOCK_SNAPSHOT_ID, share=test_nfs_share) test_snapshot.__getitem__ = _create_mock__getitem__(test_snapshot) class FakePurityFBException(Exception): def __init__(self, message=None, error_code=None, *args): self.message = message self.error_code = error_code super(FakePurityFBException, self).__init__(message, error_code, *args) class FlashBladeDriverTestCaseBase(test.TestCase): def setUp(self): super(FlashBladeDriverTestCaseBase, self).setUp() self.configuration = mock.Mock() self.configuration.flashblade_mgmt_vip = "mockfb1" self.configuration.flashblade_data_vip = ["mockfb2"] self.configuration.flashblade_api = "api" self.configuration.flashblade_eradicate = True self.configuration.driver_handles_share_servers = False self._mock_filesystem = mock.Mock() self.mock_object(self.configuration, "safe_get", self._fake_safe_get) self.purity_fb = self._patch( "manila.share.drivers.purestorage.flashblade.purity_fb" ) self.driver = flashblade.FlashBladeShareDriver( configuration=self.configuration ) self._sys = self._flashblade_mock() self._sys.api_version = mock.Mock() self._sys.arrays.list_arrays_space = mock.Mock() self.purity_fb.rest.ApiException = FakePurityFBException self.purity_fb.PurityFb.return_value = self._sys self.driver.do_setup(None) self.mock_object( self.driver, "_resize_share", mock.Mock(return_value="fake_dataset"), ) self.mock_object( self.driver, "_make_source_name", mock.Mock(return_value="fake_dataset"), ) self.mock_object( self.driver, "_get_flashblade_filesystem_by_name", mock.Mock(return_value="fake_dataset"), ) self.mock_object( self.driver, "_get_flashblade_snapshot_by_name", mock.Mock(return_value="fake_snapshot.snap"), ) def _flashblade_mock(self): result = mock.Mock() self._mock_filesystem = mock.Mock() result.file_systems.create_file_systems.return_value = ( self._mock_filesystem ) result.file_systems.update_file_systems.return_value = ( self._mock_filesystem ) result.file_systems.delete_file_systems.return_value = ( self._mock_filesystem ) result.file_system_snapshots.create_file_system_snapshots\ .return_value = (self._mock_filesystem) return result def _raise_purity_fb(self, *args, **kwargs): raise FakePurityFBException() def _fake_safe_get(self, value): return getattr(self.configuration, value, None) def _patch(self, path, *args, **kwargs): patcher = mock.patch(path, *args, **kwargs) result = patcher.start() self.addCleanup(patcher.stop) return result class FlashBladeDriverTestCase(FlashBladeDriverTestCaseBase): @mock.patch("manila.share.drivers.purestorage.flashblade.purity_fb", None) def test_no_purity_fb_module(self): self.assertRaises(exception.ManilaException, self.driver.do_setup, None) def test_no_auth_parameters(self): self.configuration.flashblade_api = None self.assertRaises( exception.BadConfigurationException, self.driver.do_setup, None ) def test_empty_auth_parameters(self): self.configuration.flashblade_api = "" self.assertRaises( exception.BadConfigurationException, self.driver.do_setup, None ) def test_create_share_incorrect_protocol(self): test_nfs_share.share_proto = "CIFS" self.assertRaises( exception.InvalidShare, self.driver.create_share, None, test_nfs_share, ) def test_create_nfs_share(self): location = self.driver.create_share(None, test_nfs_share) self._sys.file_systems.create_file_systems.assert_called_once_with( self.purity_fb.FileSystem( name="share-%s-manila" % test_nfs_share["id"], provisioned=test_nfs_share["size"], hard_limit_enabled=True, fast_remove_directory_enabled=True, snapshot_directory_enabled=True, nfs=self.purity_fb.NfsRule( v3_enabled=True, rules="", v4_1_enabled=True ), ) ) self.assertEqual(_SINGLE_VIP_LOCATION, location) def test_create_nfs_share_multiple_vips(self): self.configuration.flashblade_data_vip.append("mockfb3") location = self.driver.create_share(None, test_nfs_share) self.assertEqual(_DUAL_VIP_LOCATION, location) def test_delete_share(self): self.mock_object(self.driver, "_get_flashblade_filesystem_by_name") self.driver.delete_share(None, test_nfs_share) share_name = "share-%s-manila" % test_nfs_share["id"] self.driver._get_flashblade_filesystem_by_name.assert_called_once_with( share_name ) self._sys.file_systems.update_file_systems.assert_called_once_with( name=share_name, attributes=self.purity_fb.FileSystem( nfs=self.purity_fb.NfsRule( v3_enabled=False, v4_1_enabled=False ), smb=self.purity_fb.ProtocolRule(enabled=False), destroyed=True, ), ) self._sys.file_systems.delete_file_systems.assert_called_once_with( name=share_name ) def test_delete_share_no_eradicate(self): self.configuration.flashblade_eradicate = False self.mock_object(self.driver, "_get_flashblade_filesystem_by_name") self.driver.delete_share(None, test_nfs_share) share_name = "share-%s-manila" % test_nfs_share["id"] self.driver._get_flashblade_filesystem_by_name.assert_called_once_with( share_name ) self._sys.file_systems.update_file_systems.assert_called_once_with( name=share_name, attributes=self.purity_fb.FileSystem( nfs=self.purity_fb.NfsRule( v3_enabled=False, v4_1_enabled=False ), smb=self.purity_fb.ProtocolRule(enabled=False), destroyed=True, ), ) assert not self._sys.file_systems.delete_file_systems.called def test_delete_share_not_found(self): self.mock_object( self.driver, "_get_flashblade_filesystem_by_name", mock.Mock(side_effect=self.purity_fb.rest.ApiException), ) mock_result = self.driver.delete_share(None, test_nfs_share) self.assertIsNone(mock_result) def test_extend_share(self): self.driver.extend_share(test_nfs_share, _MOCK_SHARE_SIZE * 2) self.driver._resize_share.assert_called_once_with( test_nfs_share, _MOCK_SHARE_SIZE * 2, ) def test_shrink_share(self): self.driver.shrink_share(test_nfs_share, _MOCK_SHARE_SIZE / 2) self.driver._resize_share.assert_called_once_with( test_nfs_share, _MOCK_SHARE_SIZE / 2, ) def test_shrink_share_over_consumed(self): self.mock_object( self.driver, "_resize_share", mock.Mock( side_effect=exception.ShareShrinkingPossibleDataLoss( share_id=test_nfs_share["id"] ) ), ) self.assertRaises( exception.ShareShrinkingPossibleDataLoss, self.driver.shrink_share, test_nfs_share, _MOCK_SHARE_SIZE / 2, ) def test_create_snapshot(self): self.mock_object(self.driver, "_get_flashblade_filesystem_by_name") self.mock_object(self.driver, "_get_flashblade_snapshot_by_name") self.mock_object(self.driver, "_make_source_name") self.driver.create_snapshot(None, test_snapshot) self._sys.file_system_snapshots.create_file_system_snapshots\ .assert_called_once_with( suffix=self.purity_fb.SnapshotSuffix(test_snapshot["id"]), sources=[mock.ANY], ) def test_delete_snapshot_no_eradicate(self): self.configuration.flashblade_eradicate = False self.mock_object(self.driver, "_get_flashblade_snapshot_by_name") self.driver.delete_snapshot(None, test_snapshot) self._sys.file_system_snapshots.update_file_system_snapshots\ .assert_called_once_with( name=mock.ANY, attributes=self.purity_fb.FileSystemSnapshot(destroyed=True), ) assert not self._sys.file_system_snapshots\ .delete_file_system_snapshots.called def test_delete_snapshot(self): self.mock_object(self.driver, "_get_flashblade_snapshot_by_name") self.driver.delete_snapshot(None, test_snapshot) self._sys.file_system_snapshots.update_file_system_snapshots\ .assert_called_once_with( name=mock.ANY, attributes=self.purity_fb.FileSystemSnapshot(destroyed=True), ) self._sys.file_system_snapshots.delete_file_system_snapshots\ .assert_called_once_with( name=mock.ANY ) def test_delete_snapshot_not_found(self): self.mock_object( self.driver, "_get_flashblade_snapshot_by_name", mock.Mock( side_effect=exception.ShareResourceNotFound( share_id=test_nfs_share["id"] ) ), ) mock_result = self.driver.delete_snapshot(None, test_snapshot) self.assertIsNone(mock_result) def test_update_access_share(self): access_rules = [ { "access_level": constants.ACCESS_LEVEL_RO, "access_to": "1.2.3.4", "access_type": "ip", "access_id": "09960614-8574-4e03-89cf-7cf267b0bd09", }, { "access_level": constants.ACCESS_LEVEL_RW, "access_to": "1.2.3.5", "access_type": "user", "access_id": "09960614-8574-4e03-89cf-7cf267b0bd08", }, ] expected_rule_map = { "09960614-8574-4e03-89cf-7cf267b0bd08": {"state": "error"}, "09960614-8574-4e03-89cf-7cf267b0bd09": {"state": "active"}, } rule_map = self.driver.update_access( None, test_nfs_share, access_rules, [], [], [] ) self.assertEqual(expected_rule_map, rule_map) def test_revert_to_snapshot_bad_snapshot(self): self.mock_object( self.driver, "_get_flashblade_filesystem_by_name", mock.Mock(side_effect=self.purity_fb.rest.ApiException), ) mock_result = self.driver.revert_to_snapshot( None, test_snapshot, None, None ) self.assertIsNone(mock_result) def test_revert_to_snapshot(self): self.mock_object(self.driver, "_get_flashblade_snapshot_by_name") self.driver.revert_to_snapshot(None, test_snapshot, [], []) self._sys.file_systems.create_file_systems.assert_called_once_with( overwrite=True, discard_non_snapshotted_data=True, file_system=self.purity_fb.FileSystem( name=test_nfs_share, source=self.purity_fb.Reference(name=mock.ANY), ), ) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315602.0376701 manila-21.0.0/manila/tests/share/drivers/qnap/0000775000175000017500000000000000000000000021200 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/qnap/__init__.py0000664000175000017500000000000000000000000023277 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/qnap/fakes.py0000664000175000017500000005474700000000000022664 0ustar00zuulzuul00000000000000# Copyright (c) 2016 QNAP Systems, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. FAKE_RES_DETAIL_DATA_LOGIN = """ """ FAKE_RES_DETAIL_DATA_GETBASIC_INFO_ES_1_1_1 = """ """ FAKE_RES_DETAIL_DATA_GETBASIC_INFO_ES_1_1_3 = """ """ FAKE_RES_DETAIL_DATA_GETBASIC_INFO_ES_2_0_0 = """ """ FAKE_RES_DETAIL_DATA_GETBASIC_INFO_ES_2_1_0 = """ """ FAKE_RES_DETAIL_DATA_GETBASIC_INFO_ES_2_2_0 = """ """ FAKE_RES_DETAIL_DATA_GETBASIC_INFO_TS_4_0_0 = """ """ FAKE_RES_DETAIL_DATA_GETBASIC_INFO_TS_4_3_0 = """ """ FAKE_RES_DETAIL_DATA_GETBASIC_INFO_TES_TS_4_0_0 = """ """ FAKE_RES_DETAIL_DATA_GETBASIC_INFO_TES_TS_4_3_0 = """ """ FAKE_RES_DETAIL_DATA_GETBASIC_INFO_TES_ES_1_1_1 = """ """ FAKE_RES_DETAIL_DATA_GETBASIC_INFO_TES_ES_1_1_3 = """ """ FAKE_RES_DETAIL_DATA_GETBASIC_INFO_TES_ES_2_0_0 = """ """ FAKE_RES_DETAIL_DATA_GETBASIC_INFO_TES_ES_2_1_0 = """ """ FAKE_RES_DETAIL_DATA_GETBASIC_INFO_TES_ES_2_2_0 = """ """ FAKE_RES_DETAIL_DATA_GETBASIC_INFO_ERROR = """ """ FAKE_RES_DETAIL_DATA_SHARE_INFO = """ """ FAKE_RES_DETAIL_DATA_VOLUME_INFO = """ fakeMountPath """ FAKE_RES_DETAIL_DATA_SNAPSHOT = """ 10 """ FAKE_RES_DETAIL_DATA_SPECIFIC_POOL_INFO = """ """ FAKE_RES_DETAIL_DATA_GET_HOST_LIST = """ """ FAKE_RES_DETAIL_DATA_CREATE_SHARE = """ """ FAKE_RES_DETAIL_DATA_ES_RET_CODE_NEGATIVE = """ """ FAKE_RES_DETAIL_DATA_RESULT_NEGATIVE = """ """ FAKE_RES_DETAIL_DATA_AUTHPASS_FAIL = """ """ FAKE_RES_DETAIL_DATA_DELETE_SHARE = """ 0 """ FAKE_RES_DETAIL_DATA_DELETE_SNAPSHOT = """ 0 """ FAKE_RES_DETAIL_DATA_DELETE_SNAPSHOT_SNAPSHOT_NOT_EXIST = """ -206021 """ FAKE_RES_DETAIL_DATA_DELETE_SNAPSHOT_SHARE_NOT_EXIST = """ -200005 """ FAKE_RES_DETAIL_DATA_GET_HOST_LIST_API = """ """ FAKE_RES_DETAIL_DATA_GET_NO_HOST_LIST_API = """ """ FAKE_RES_DETAIL_DATA_CREATE_SNAPSHOT = """ """ class SnapshotClass(object): """Snapshot Class.""" size = 0 provider_location = 'fakeShareName@fakeSnapshotName' def __init__(self, size, provider_location=None): """Init.""" self.size = size self.provider_location = provider_location def get(self, provider_location): """Get function.""" return self.provider_location def __getitem__(self, arg): """Getitem.""" return { 'display_name': 'fakeSnapshotDisplayName', 'id': 'fakeSnapshotId', 'share': {'share_id': 'fakeShareId', 'id': 'fakeId'}, 'share_instance': {'share_id': 'fakeShareId', 'id': 'fakeId'}, 'size': self.size, 'share_instance_id': 'fakeShareId' }[arg] def __setitem__(self, key, value): """Setitem.""" if key == 'provider_location': self.provider_location = value class ShareNfsClass(object): """Share Class.""" share_proto = 'NFS' id = '' size = 0 def __init__(self, share_id, size): """Init.""" self.id = share_id self.size = size def __getitem__(self, arg): """Getitem.""" return { 'share_proto': self.share_proto, 'id': self.id, 'display_name': 'fakeDisplayName', 'export_locations': [{'path': '1.2.3.4:/share/fakeShareName'}], 'host': 'QnapShareDriver', 'size': self.size }[arg] def __setitem__(self, key, value): """Setitem.""" if key == 'share_proto': self.share_proto = value class ShareCifsClass(object): """Share Class.""" share_proto = 'CIFS' id = '' size = 0 def __init__(self, share_id, size): """Init.""" self.id = share_id self.size = size def __getitem__(self, arg): """Getitem.""" return { 'share_proto': self.share_proto, 'id': self.id, 'display_name': 'fakeDisplayName', 'export_locations': [{'path': '\\\\1.2.3.4\\fakeShareName'}], 'host': 'QnapShareDriver', 'size': self.size }[arg] def __setitem__(self, key, value): """Setitem.""" if key == 'share_proto': self.share_proto = value class AccessClass(object): """Access Class.""" access_type = 'fakeAccessType' access_level = 'ro' access_to = 'fakeIp' def __init__(self, access_type, access_level, access_to): """Init.""" self.access_type = access_type self.access_level = access_level self.access_to = access_to def __getitem__(self, arg): """Getitem.""" return { 'access_type': self.access_type, 'access_level': self.access_level, 'access_to': self.access_to, }[arg] class FakeGetBasicInfoResponseEs_1_1_1(object): """Fake GetBasicInfo response from ES nas.""" status = 'fackStatus' def read(self): """Mock response.read.""" return FAKE_RES_DETAIL_DATA_GETBASIC_INFO_ES_1_1_1 class FakeGetBasicInfoResponseEs_1_1_3(object): """Fake GetBasicInfo response from ES nas.""" status = 'fackStatus' def read(self): """Mock response.read.""" return FAKE_RES_DETAIL_DATA_GETBASIC_INFO_ES_1_1_3 class FakeGetBasicInfoResponseEs_2_0_0(object): """Fake GetBasicInfo response from ES nas.""" status = 'fackStatus' def read(self): """Mock response.read.""" return FAKE_RES_DETAIL_DATA_GETBASIC_INFO_ES_2_0_0 class FakeGetBasicInfoResponseEs_2_1_0(object): """Fake GetBasicInfo response from ES nas.""" status = 'fackStatus' def read(self): """Mock response.read.""" return FAKE_RES_DETAIL_DATA_GETBASIC_INFO_ES_2_1_0 class FakeGetBasicInfoResponseEs_2_2_0(object): """Fake GetBasicInfo response from ES nas.""" status = 'fackStatus' def read(self): """Mock response.read.""" return FAKE_RES_DETAIL_DATA_GETBASIC_INFO_ES_2_2_0 class FakeGetBasicInfoResponseTs_4_0_0(object): """Fake GetBasicInfoTS response from TS nas.""" status = 'fackStatus' def read(self): """Mock response.read.""" return FAKE_RES_DETAIL_DATA_GETBASIC_INFO_TS_4_0_0 class FakeGetBasicInfoResponseTs_4_3_0(object): """Fake GetBasicInfoTS response from TS nas.""" status = 'fackStatus' def read(self): """Mock response.read.""" return FAKE_RES_DETAIL_DATA_GETBASIC_INFO_TS_4_3_0 class FakeGetBasicInfoResponseTesTs_4_0_0(object): """Fake GetBasicInfoTS response from TS nas.""" status = 'fackStatus' def read(self): """Mock response.read.""" return FAKE_RES_DETAIL_DATA_GETBASIC_INFO_TES_TS_4_0_0 class FakeGetBasicInfoResponseTesTs_4_3_0(object): """Fake GetBasicInfoTS response from TS nas.""" status = 'fackStatus' def read(self): """Mock response.read.""" return FAKE_RES_DETAIL_DATA_GETBASIC_INFO_TES_TS_4_3_0 class FakeGetBasicInfoResponseTesEs_1_1_1(object): """Fake GetBasicInfoTS response from TS nas.""" status = 'fackStatus' def read(self): """Mock response.read.""" return FAKE_RES_DETAIL_DATA_GETBASIC_INFO_TES_ES_1_1_1 class FakeGetBasicInfoResponseTesEs_1_1_3(object): """Fake GetBasicInfoTS response from TS nas.""" status = 'fackStatus' def read(self): """Mock response.read.""" return FAKE_RES_DETAIL_DATA_GETBASIC_INFO_TES_ES_1_1_3 class FakeGetBasicInfoResponseTesEs_2_0_0(object): """Fake GetBasicInfoTS response from TS nas.""" status = 'fackStatus' def read(self): """Mock response.read.""" return FAKE_RES_DETAIL_DATA_GETBASIC_INFO_TES_ES_2_0_0 class FakeGetBasicInfoResponseTesEs_2_1_0(object): """Fake GetBasicInfoTS response from TS nas.""" status = 'fackStatus' def read(self): """Mock response.read.""" return FAKE_RES_DETAIL_DATA_GETBASIC_INFO_TES_ES_2_1_0 class FakeGetBasicInfoResponseTesEs_2_2_0(object): """Fake GetBasicInfoTS response from TS nas.""" status = 'fackStatus' def read(self): """Mock response.read.""" return FAKE_RES_DETAIL_DATA_GETBASIC_INFO_TES_ES_2_2_0 class FakeGetBasicInfoResponseError(object): """Fake GetBasicInfoTS response from TS nas.""" status = 'fackStatus' def read(self): """Mock response.read.""" return FAKE_RES_DETAIL_DATA_GETBASIC_INFO_ERROR class FakeCreateShareResponse(object): """Fake login response.""" status = 'fackStatus' def read(self): """Mock response.read.""" return FAKE_RES_DETAIL_DATA_CREATE_SHARE class FakeDeleteShareResponse(object): """Fake login response.""" status = 'fackStatus' def read(self): """Mock response.read.""" return FAKE_RES_DETAIL_DATA_DELETE_SHARE class FakeDeleteSnapshotResponse(object): """Fake delete snapshot response.""" status = 'fackStatus' def read(self): """Mock response.read.""" return FAKE_RES_DETAIL_DATA_DELETE_SNAPSHOT class FakeDeleteSnapshotResponseSnapshotNotExist(object): """Fake delete snapshot response.""" status = 'fackStatus' def read(self): """Mock response.read.""" return FAKE_RES_DETAIL_DATA_DELETE_SNAPSHOT_SNAPSHOT_NOT_EXIST class FakeDeleteSnapshotResponseShareNotExist(object): """Fake delete snapshot response.""" status = 'fackStatus' def read(self): """Mock response.read.""" return FAKE_RES_DETAIL_DATA_DELETE_SNAPSHOT_SHARE_NOT_EXIST class FakeGetHostListResponse(object): """Fake host info response.""" status = 'fackStatus' def read(self): """Mock response.read.""" return FAKE_RES_DETAIL_DATA_GET_HOST_LIST_API class FakeGetNoHostListResponse(object): """Fake host info response.""" status = 'fackStatus' def read(self): """Mock response.read.""" return FAKE_RES_DETAIL_DATA_GET_NO_HOST_LIST_API class FakeAuthPassFailResponse(object): """Fake pool info response.""" status = 'fackStatus' def read(self): """Mock response.read.""" return FAKE_RES_DETAIL_DATA_AUTHPASS_FAIL class FakeEsResCodeNegativeResponse(object): """Fake pool info response.""" status = 'fackStatus' def read(self): """Mock response.read.""" return FAKE_RES_DETAIL_DATA_ES_RET_CODE_NEGATIVE class FakeResultNegativeResponse(object): """Fake pool info response.""" status = 'fackStatus' def read(self): """Mock response.read.""" return FAKE_RES_DETAIL_DATA_RESULT_NEGATIVE class FakeLoginResponse(object): """Fake login response.""" status = 'fackStatus' def read(self): """Mock response.read.""" return FAKE_RES_DETAIL_DATA_LOGIN class FakeSpecificPoolInfoResponse(object): """Fake pool info response.""" status = 'fackStatus' def read(self): """Mock response.read.""" return FAKE_RES_DETAIL_DATA_SPECIFIC_POOL_INFO class FakeShareInfoResponse(object): """Fake pool info response.""" status = 'fackStatus' def read(self): """Mock response.read.""" return FAKE_RES_DETAIL_DATA_SHARE_INFO class FakeSnapshotInfoResponse(object): """Fake pool info response.""" status = 'fackStatus' def read(self): """Mock response.read.""" return FAKE_RES_DETAIL_DATA_SNAPSHOT class FakeSpecificVolInfoResponse(object): """Fake pool info response.""" status = 'fackStatus' def read(self): """Mock response.read.""" return FAKE_RES_DETAIL_DATA_VOLUME_INFO class FakeCreateSnapshotResponse(object): """Fake pool info response.""" status = 'fackStatus' def read(self): """Mock response.read.""" return FAKE_RES_DETAIL_DATA_CREATE_SNAPSHOT ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/qnap/test_api.py0000664000175000017500000010760300000000000023371 0ustar00zuulzuul00000000000000# Copyright (c) 2016 QNAP Systems, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import base64 import ddt from http import client as http_client import time from unittest import mock from urllib import parse as urlparse from manila import exception from manila.share.drivers.qnap import qnap from manila import test from manila.tests import fake_share from manila.tests.share.drivers.qnap import fakes def create_configuration(management_url, qnap_share_ip, qnap_nas_login, qnap_nas_password, qnap_poolname): """Create configuration.""" configuration = mock.Mock() configuration.qnap_management_url = management_url configuration.qnap_share_ip = qnap_share_ip configuration.qnap_nas_login = qnap_nas_login configuration.qnap_nas_password = qnap_nas_password configuration.qnap_poolname = qnap_poolname configuration.safe_get.return_value = False return configuration class QnapShareDriverBaseTestCase(test.TestCase): """Base Class for the QnapShareDriver Tests.""" def setUp(self): """Setup the Qnap Driver Base TestCase.""" super(QnapShareDriverBaseTestCase, self).setUp() self.driver = None self.share_api = None def _do_setup(self, management_url, share_ip, nas_login, nas_password, poolname, **kwargs): """Config do setup configurations.""" self.driver = qnap.QnapShareDriver( configuration=create_configuration( management_url, share_ip, nas_login, nas_password, poolname), private_storage=kwargs.get('private_storage')) self.driver.do_setup('context') @ddt.ddt class QnapAPITestCase(QnapShareDriverBaseTestCase): """Tests QNAP api functions.""" login_url = ('/cgi-bin/authLogin.cgi?') get_basic_info_url = ('/cgi-bin/authLogin.cgi') fake_password = 'qnapadmin' def setUp(self): """Setup the Qnap API TestCase.""" super(QnapAPITestCase, self).setUp() fake_parms = {} fake_parms['user'] = 'admin' fake_parms['pwd'] = base64.b64encode( self.fake_password.encode("utf-8")) fake_parms['serviceKey'] = 1 sanitized_params = self._sanitize_params(fake_parms) self.login_url = ('/cgi-bin/authLogin.cgi?%s' % sanitized_params) self.mock_object(http_client, 'HTTPConnection') self.share = fake_share.fake_share( share_proto='NFS', id='shareId', display_name='fakeDisplayName', export_locations=[{'path': '1.2.3.4:/share/fakeShareName'}], host='QnapShareDriver', size=10) def _sanitize_params(self, params, doseq=False): sanitized_params = {} for key in params: value = params[key] if value is not None: if isinstance(value, list): sanitized_params[key] = [str(v) for v in value] else: sanitized_params[key] = str(value) sanitized_params = urlparse.urlencode(sanitized_params, doseq) return sanitized_params @ddt.data('fake_share_name', 'fakeLabel') def test_create_share_api(self, fake_name): """Test create share api.""" mock_http_connection = http_client.HTTPConnection mock_http_connection.return_value.getresponse.side_effect = [ fakes.FakeLoginResponse(), fakes.FakeGetBasicInfoResponseEs_1_1_3(), fakes.FakeLoginResponse(), fakes.FakeCreateShareResponse()] self._do_setup('http://1.2.3.4:8080', '1.2.3.4', 'admin', 'qnapadmin', 'Storage Pool 1') self.driver.api_executor.create_share( self.share, 'Storage Pool 1', fake_name, 'NFS', qnap_deduplication=False, qnap_compression=True, qnap_thin_provision=True, qnap_ssd_cache=False) fake_params = { 'wiz_func': 'share_create', 'action': 'add_share', 'vol_name': fake_name, 'vol_size': '10' + 'GB', 'threshold': '80', 'dedup': 'off', 'compression': '1', 'thin_pro': '1', 'cache': '0', 'cifs_enable': '0', 'nfs_enable': '1', 'afp_enable': '0', 'ftp_enable': '0', 'encryption': '0', 'hidden': '0', 'oplocks': '1', 'sync': 'always', 'userrw0': 'admin', 'userrd_len': '0', 'userrw_len': '1', 'userno_len': '0', 'access_r': 'setup_users', 'path_type': 'auto', 'recycle_bin': '1', 'recycle_bin_administrators_only': '0', 'pool_name': 'Storage Pool 1', 'sid': 'fakeSid', } sanitized_params = self._sanitize_params(fake_params) fake_url = ('/cgi-bin/wizReq.cgi?%s' % sanitized_params) expected_call_list = [ mock.call('GET', self.login_url), mock.call('GET', self.get_basic_info_url), mock.call('GET', self.login_url), mock.call('GET', fake_url)] self.assertEqual( expected_call_list, mock_http_connection.return_value.request.call_args_list) def test_api_delete_share(self): """Test delete share api.""" mock_http_connection = http_client.HTTPConnection mock_http_connection.return_value.getresponse.side_effect = [ fakes.FakeLoginResponse(), fakes.FakeGetBasicInfoResponseEs_1_1_3(), fakes.FakeLoginResponse(), fakes.FakeDeleteShareResponse()] self._do_setup('http://1.2.3.4:8080', '1.2.3.4', 'admin', 'qnapadmin', 'Storage Pool 1') self.driver.api_executor.delete_share( 'fakeId') fake_params = { 'func': 'volume_mgmt', 'vol_remove': '1', 'volumeID': 'fakeId', 'stop_service': 'no', 'sid': 'fakeSid', } sanitized_params = self._sanitize_params(fake_params) fake_url = ( '/cgi-bin/disk/disk_manage.cgi?%s' % sanitized_params) expected_call_list = [ mock.call('GET', self.login_url), mock.call('GET', self.get_basic_info_url), mock.call('GET', self.login_url), mock.call('GET', fake_url)] self.assertEqual( expected_call_list, mock_http_connection.return_value.request.call_args_list) def test_get_specific_poolinfo(self): """Test get specific poolinfo api.""" mock_http_connection = http_client.HTTPConnection mock_http_connection.return_value.getresponse.side_effect = [ fakes.FakeLoginResponse(), fakes.FakeGetBasicInfoResponseEs_1_1_3(), fakes.FakeLoginResponse(), fakes.FakeSpecificPoolInfoResponse()] self._do_setup('http://1.2.3.4:8080', '1.2.3.4', 'admin', 'qnapadmin', 'Storage Pool 1') self.driver.api_executor.get_specific_poolinfo( 'fakePoolId') fake_params = { 'store': 'poolInfo', 'func': 'extra_get', 'poolID': 'fakePoolId', 'Pool_Info': '1', 'sid': 'fakeSid', } sanitized_params = self._sanitize_params(fake_params) fake_url = ( '/cgi-bin/disk/disk_manage.cgi?%s' % sanitized_params) expected_call_list = [ mock.call('GET', self.login_url), mock.call('GET', self.get_basic_info_url), mock.call('GET', self.login_url), mock.call('GET', fake_url)] self.assertEqual( expected_call_list, mock_http_connection.return_value.request.call_args_list) @ddt.data({'pool_id': "Storage Pool 1"}, {'pool_id': "Storage Pool 1", 'vol_no': 'fakeNo'}, {'pool_id': "Storage Pool 1", 'vol_label': 'fakeShareName'}) def test_get_share_info(self, dict_parm): """Test get share info api.""" mock_http_connection = http_client.HTTPConnection mock_http_connection.return_value.getresponse.side_effect = [ fakes.FakeLoginResponse(), fakes.FakeGetBasicInfoResponseEs_1_1_3(), fakes.FakeLoginResponse(), fakes.FakeShareInfoResponse()] self._do_setup('http://1.2.3.4:8080', '1.2.3.4', 'admin', 'qnapadmin', 'Storage Pool 1') self.driver.api_executor.get_share_info(**dict_parm) fake_params = { 'store': 'poolVolumeList', 'poolID': 'Storage Pool 1', 'func': 'extra_get', 'Pool_Vol_Info': '1', 'sid': 'fakeSid', } sanitized_params = self._sanitize_params(fake_params) fake_url = ( '/cgi-bin/disk/disk_manage.cgi?%s' % sanitized_params) expected_call_list = [ mock.call('GET', self.login_url), mock.call('GET', self.get_basic_info_url), mock.call('GET', self.login_url), mock.call('GET', fake_url)] self.assertEqual( expected_call_list, mock_http_connection.return_value.request.call_args_list) def test_get_specific_volinfo(self): """Test get specific volume info api.""" mock_http_connection = http_client.HTTPConnection mock_http_connection.return_value.getresponse.side_effect = [ fakes.FakeLoginResponse(), fakes.FakeGetBasicInfoResponseEs_1_1_3(), fakes.FakeLoginResponse(), fakes.FakeSpecificVolInfoResponse()] self._do_setup('http://1.2.3.4:8080', '1.2.3.4', 'admin', 'qnapadmin', 'Storage Pool 1') self.driver.api_executor.get_specific_volinfo( 'fakeNo') fake_params = { 'store': 'volumeInfo', 'volumeID': 'fakeNo', 'func': 'extra_get', 'Volume_Info': '1', 'sid': 'fakeSid', } sanitized_params = self._sanitize_params(fake_params) fake_url = ( '/cgi-bin/disk/disk_manage.cgi?%s' % sanitized_params) expected_call_list = [ mock.call('GET', self.login_url), mock.call('GET', self.get_basic_info_url), mock.call('GET', self.login_url), mock.call('GET', fake_url)] self.assertEqual( expected_call_list, mock_http_connection.return_value.request.call_args_list) def test_get_snapshot_info_es(self): """Test get snapsho info api.""" mock_http_connection = http_client.HTTPConnection mock_http_connection.return_value.getresponse.side_effect = [ fakes.FakeLoginResponse(), fakes.FakeGetBasicInfoResponseEs_1_1_3(), fakes.FakeLoginResponse(), fakes.FakeSnapshotInfoResponse()] self._do_setup('http://1.2.3.4:8080', '1.2.3.4', 'admin', 'qnapadmin', 'Storage Pool 1') self.driver.api_executor.get_snapshot_info( volID='volId', snapshot_name='fakeSnapshotName') fake_params = { 'func': 'extra_get', 'volumeID': 'volId', 'snapshot_list': '1', 'snap_start': '0', 'snap_count': '100', 'sid': 'fakeSid', } sanitized_params = self._sanitize_params(fake_params) fake_url = ( '/cgi-bin/disk/snapshot.cgi?%s' % sanitized_params) expected_call_list = [ mock.call('GET', self.login_url), mock.call('GET', self.get_basic_info_url), mock.call('GET', self.login_url), mock.call('GET', fake_url)] self.assertEqual( expected_call_list, mock_http_connection.return_value.request.call_args_list) def test_create_snapshot_api(self): """Test create snapshot api.""" mock_http_connection = http_client.HTTPConnection mock_http_connection.return_value.getresponse.side_effect = [ fakes.FakeLoginResponse(), fakes.FakeGetBasicInfoResponseEs_1_1_3(), fakes.FakeLoginResponse(), fakes.FakeCreateSnapshotResponse()] self._do_setup('http://1.2.3.4:8080', '1.2.3.4', 'admin', 'qnapadmin', 'Storage Pool 1') self.driver.api_executor.create_snapshot_api( 'fakeVolumeId', 'fakeSnapshotName') fake_params = { 'func': 'create_snapshot', 'volumeID': 'fakeVolumeId', 'snapshot_name': 'fakeSnapshotName', 'expire_min': '0', 'vital': '1', 'sid': 'fakeSid', } sanitized_params = self._sanitize_params(fake_params) fake_url = ( '/cgi-bin/disk/snapshot.cgi?%s' % sanitized_params) expected_call_list = [ mock.call('GET', self.login_url), mock.call('GET', self.get_basic_info_url), mock.call('GET', self.login_url), mock.call('GET', fake_url)] self.assertEqual( expected_call_list, mock_http_connection.return_value.request.call_args_list) @ddt.data(fakes.FakeDeleteSnapshotResponse(), fakes.FakeDeleteSnapshotResponseSnapshotNotExist(), fakes.FakeDeleteSnapshotResponseShareNotExist()) def test_delete_snapshot_api(self, fakeDeleteSnapshotResponse): """Test delete snapshot api.""" mock_http_connection = http_client.HTTPConnection mock_http_connection.return_value.getresponse.side_effect = [ fakes.FakeLoginResponse(), fakes.FakeGetBasicInfoResponseEs_1_1_3(), fakes.FakeLoginResponse(), fakeDeleteSnapshotResponse] self._do_setup('http://1.2.3.4:8080', '1.2.3.4', 'admin', 'qnapadmin', 'Storage Pool 1') self.driver.api_executor.delete_snapshot_api( 'fakeSnapshotId') fake_params = { 'func': 'del_snapshots', 'snapshotID': 'fakeSnapshotId', 'sid': 'fakeSid', } sanitized_params = self._sanitize_params(fake_params) fake_url = ( '/cgi-bin/disk/snapshot.cgi?%s' % sanitized_params) expected_call_list = [ mock.call('GET', self.login_url), mock.call('GET', self.get_basic_info_url), mock.call('GET', self.login_url), mock.call('GET', fake_url)] self.assertEqual( expected_call_list, mock_http_connection.return_value.request.call_args_list) def test_clone_snapshot_api(self): """Test clone snapshot api.""" mock_http_connection = http_client.HTTPConnection mock_http_connection.return_value.getresponse.side_effect = [ fakes.FakeLoginResponse(), fakes.FakeGetBasicInfoResponseEs_1_1_3(), fakes.FakeLoginResponse(), fakes.FakeDeleteSnapshotResponse()] self._do_setup('http://1.2.3.4:8080', '1.2.3.4', 'admin', 'qnapadmin', 'Storage Pool 1') self.driver.api_executor.clone_snapshot( 'fakeSnapshotId', 'fakeNewShareName', 'fakeCloneSize') fake_params = { 'func': 'clone_qsnapshot', 'by_vol': '1', 'snapshotID': 'fakeSnapshotId', 'new_name': 'fakeNewShareName', 'clone_size': '{}g'.format('fakeCloneSize'), 'sid': 'fakeSid', } sanitized_params = self._sanitize_params(fake_params) fake_url = ( '/cgi-bin/disk/snapshot.cgi?%s' % sanitized_params) expected_call_list = [ mock.call('GET', self.login_url), mock.call('GET', self.get_basic_info_url), mock.call('GET', self.login_url), mock.call('GET', fake_url)] self.assertEqual( expected_call_list, mock_http_connection.return_value.request.call_args_list) def test_edit_share_api(self): """Test edit share api.""" mock_http_connection = http_client.HTTPConnection mock_http_connection.return_value.getresponse.side_effect = [ fakes.FakeLoginResponse(), fakes.FakeGetBasicInfoResponseTs_4_3_0(), fakes.FakeLoginResponse(), fakes.FakeCreateSnapshotResponse()] self._do_setup('http://1.2.3.4:8080', '1.2.3.4', 'admin', 'qnapadmin', 'Storage Pool 1') expect_share_dict = { "sharename": 'fakeVolId', "old_sharename": 'fakeVolId', "new_size": 100, "deduplication": False, "compression": True, "thin_provision": True, "ssd_cache": False, "share_proto": "NFS" } self.driver.api_executor.edit_share( expect_share_dict) fake_params = { 'wiz_func': 'share_property', 'action': 'share_property', 'sharename': 'fakeVolId', 'old_sharename': 'fakeVolId', 'dedup': 'off', 'compression': '1', 'thin_pro': '1', 'cache': '0', 'cifs_enable': '0', 'nfs_enable': '1', 'afp_enable': '0', 'ftp_enable': '0', 'hidden': '0', 'oplocks': '1', 'sync': 'always', 'recycle_bin': '1', 'recycle_bin_administrators_only': '0', 'sid': 'fakeSid', } if expect_share_dict.get('new_size'): fake_params['vol_size'] = '100GB' sanitized_params = self._sanitize_params(fake_params) fake_url = ( '/cgi-bin/priv/privWizard.cgi?%s' % sanitized_params) expected_call_list = [ mock.call('GET', self.login_url), mock.call('GET', self.get_basic_info_url), mock.call('GET', self.login_url), mock.call('GET', fake_url)] self.assertEqual( expected_call_list, mock_http_connection.return_value.request.call_args_list) @ddt.data(fakes.FakeGetHostListResponse(), fakes.FakeGetNoHostListResponse()) def test_get_host_list(self, fakeGetHostListResponse): """Test get host list api.""" mock_http_connection = http_client.HTTPConnection mock_http_connection.return_value.getresponse.side_effect = [ fakes.FakeLoginResponse(), fakes.FakeGetBasicInfoResponseEs_1_1_3(), fakes.FakeLoginResponse(), fakeGetHostListResponse] self._do_setup('http://1.2.3.4:8080', '1.2.3.4', 'admin', 'qnapadmin', 'Storage Pool 1') self.driver.api_executor.get_host_list() fake_params = { 'module': 'hosts', 'func': 'get_hostlist', 'sid': 'fakeSid', } sanitized_params = self._sanitize_params(fake_params) fake_url = ( ('/cgi-bin/accessrights/accessrightsRequest.cgi?%s') % sanitized_params) expected_call_list = [ mock.call('GET', self.login_url), mock.call('GET', self.get_basic_info_url), mock.call('GET', self.login_url), mock.call('GET', fake_url)] self.assertEqual( expected_call_list, mock_http_connection.return_value.request.call_args_list) def test_add_host(self): """Test add host api.""" mock_http_connection = http_client.HTTPConnection mock_http_connection.return_value.getresponse.side_effect = [ fakes.FakeLoginResponse(), fakes.FakeGetBasicInfoResponseEs_1_1_3(), fakes.FakeLoginResponse(), fakes.FakeGetHostListResponse()] self._do_setup('http://1.2.3.4:8080', '1.2.3.4', 'admin', 'qnapadmin', 'Storage Pool 1') self.driver.api_executor.add_host( 'fakeHostName', 'fakeIpV4') fake_params = { 'module': 'hosts', 'func': 'apply_addhost', 'name': 'fakeHostName', 'ipaddr_v4': 'fakeIpV4', 'sid': 'fakeSid', } sanitized_params = self._sanitize_params(fake_params) fake_url = ( ('/cgi-bin/accessrights/accessrightsRequest.cgi?%s') % sanitized_params) expected_call_list = [ mock.call('GET', self.login_url), mock.call('GET', self.get_basic_info_url), mock.call('GET', self.login_url), mock.call('GET', fake_url)] self.assertEqual( expected_call_list, mock_http_connection.return_value.request.call_args_list) def test_edit_host(self): """Test edit host api.""" mock_http_connection = http_client.HTTPConnection mock_http_connection.return_value.getresponse.side_effect = [ fakes.FakeLoginResponse(), fakes.FakeGetBasicInfoResponseEs_1_1_3(), fakes.FakeLoginResponse(), fakes.FakeGetHostListResponse()] self._do_setup('http://1.2.3.4:8080', '1.2.3.4', 'admin', 'qnapadmin', 'Storage Pool 1') self.driver.api_executor.edit_host( 'fakeHostName', ['fakeIpV4']) fake_params = { 'module': 'hosts', 'func': 'apply_sethost', 'name': 'fakeHostName', 'ipaddr_v4': ['fakeIpV4'], 'sid': 'fakeSid', } sanitized_params = self._sanitize_params(fake_params, doseq=True) fake_url = ( ('/cgi-bin/accessrights/accessrightsRequest.cgi?%s') % sanitized_params) expected_call_list = [ mock.call('GET', self.login_url), mock.call('GET', self.get_basic_info_url), mock.call('GET', self.login_url), mock.call('GET', fake_url)] self.assertEqual( expected_call_list, mock_http_connection.return_value.request.call_args_list) def test_delete_host(self): """Test delete host api.""" mock_http_connection = http_client.HTTPConnection mock_http_connection.return_value.getresponse.side_effect = [ fakes.FakeLoginResponse(), fakes.FakeGetBasicInfoResponseEs_1_1_3(), fakes.FakeLoginResponse(), fakes.FakeGetHostListResponse()] self._do_setup('http://1.2.3.4:8080', '1.2.3.4', 'admin', 'qnapadmin', 'Storage Pool 1') self.driver.api_executor.delete_host('fakeHostName') fake_params = { 'module': 'hosts', 'func': 'apply_delhost', 'host_name': 'fakeHostName', 'sid': 'fakeSid', } sanitized_params = self._sanitize_params(fake_params) fake_url = ( ('/cgi-bin/accessrights/accessrightsRequest.cgi?%s') % sanitized_params) expected_call_list = [ mock.call('GET', self.login_url), mock.call('GET', self.get_basic_info_url), mock.call('GET', self.login_url), mock.call('GET', fake_url)] self.assertEqual( expected_call_list, mock_http_connection.return_value.request.call_args_list) @ddt.data(fakes.FakeGetHostListResponse()) def test_set_nfs_access(self, fakeGetHostListResponse): """Test get host list api.""" mock_http_connection = http_client.HTTPConnection mock_http_connection.return_value.getresponse.side_effect = [ fakes.FakeLoginResponse(), fakes.FakeGetBasicInfoResponseEs_1_1_3(), fakes.FakeLoginResponse(), fakeGetHostListResponse] self._do_setup('http://1.2.3.4:8080', '1.2.3.4', 'admin', 'qnapadmin', 'Storage Pool 1') self.driver.api_executor.set_nfs_access( 'fakeShareName', 'fakeAccess', 'fakeHostName') fake_params = { 'wiz_func': 'share_nfs_control', 'action': 'share_nfs_control', 'sharename': 'fakeShareName', 'access': 'fakeAccess', 'host_name': 'fakeHostName', 'sid': 'fakeSid', } sanitized_params = self._sanitize_params(fake_params) fake_url = ( ('/cgi-bin/priv/privWizard.cgi?%s') % sanitized_params) expected_call_list = [ mock.call('GET', self.login_url), mock.call('GET', self.get_basic_info_url), mock.call('GET', self.login_url), mock.call('GET', fake_url)] self.assertEqual( expected_call_list, mock_http_connection.return_value.request.call_args_list) def test_get_snapshot_info_ts_api(self): """Test get snapshot info api.""" mock_http_connection = http_client.HTTPConnection mock_http_connection.return_value.getresponse.side_effect = [ fakes.FakeLoginResponse(), fakes.FakeGetBasicInfoResponseTs_4_3_0(), fakes.FakeLoginResponse(), fakes.FakeSnapshotInfoResponse()] self._do_setup('http://1.2.3.4:8080', '1.2.3.4', 'admin', 'qnapadmin', 'Storage Pool 1') self.driver.api_executor.get_snapshot_info( snapshot_name='fakeSnapshotName', lun_index='fakeLunIndex') fake_params = { 'func': 'extra_get', 'LUNIndex': 'fakeLunIndex', 'smb_snapshot_list': '1', 'smb_snapshot': '1', 'snapshot_list': '1', 'sid': 'fakeSid'} sanitized_params = self._sanitize_params(fake_params) fake_url = ( ('/cgi-bin/disk/snapshot.cgi?%s') % sanitized_params) expected_call_list = [ mock.call('GET', self.login_url), mock.call('GET', self.get_basic_info_url), mock.call('GET', self.login_url), mock.call('GET', fake_url)] self.assertEqual( expected_call_list, mock_http_connection.return_value.request.call_args_list) @ddt.data(fakes.FakeAuthPassFailResponse(), fakes.FakeEsResCodeNegativeResponse()) def test_api_create_share_with_fail_response(self, fake_fail_response): """Test create share api with fail response.""" mock_http_connection = http_client.HTTPConnection mock_http_connection.return_value.getresponse.side_effect = [ fakes.FakeLoginResponse(), fakes.FakeGetBasicInfoResponseEs_1_1_3(), fakes.FakeLoginResponse(), fake_fail_response, fake_fail_response, fake_fail_response, fake_fail_response, fake_fail_response, fake_fail_response, fake_fail_response, fake_fail_response, fake_fail_response, fake_fail_response] self.mock_object(time, 'sleep') self._do_setup('http://1.2.3.4:8080', '1.2.3.4', 'admin', 'qnapadmin', 'Storage Pool 1') self.assertRaises( exception.ShareBackendException, self.driver.api_executor.create_share, share=self.share, pool_name='Storage Pool 1', create_share_name='fake_share_name', share_proto='NFS', qnap_deduplication=False, qnap_compression=True, qnap_thin_provision=True, qnap_ssd_cache=False) @ddt.unpack @ddt.data(['self.driver.api_executor.get_share_info', {'pool_id': 'fakeId'}, fakes.FakeAuthPassFailResponse(), fakes.FakeGetBasicInfoResponseEs_1_1_3()], ['self.driver.api_executor.get_specific_volinfo', {'vol_id': 'fakeId'}, fakes.FakeAuthPassFailResponse(), fakes.FakeGetBasicInfoResponseEs_1_1_3()], ['self.driver.api_executor.create_snapshot_api', {'volumeID': 'fakeVolumeId', 'snapshot_name': 'fakeSnapshotName'}, fakes.FakeAuthPassFailResponse(), fakes.FakeGetBasicInfoResponseEs_1_1_3()], ['self.driver.api_executor.create_snapshot_api', {'volumeID': 'fakeVolumeId', 'snapshot_name': 'fakeSnapshotName'}, fakes.FakeEsResCodeNegativeResponse(), fakes.FakeGetBasicInfoResponseEs_1_1_3()], ['self.driver.api_executor.get_snapshot_info', {'volID': 'volId'}, fakes.FakeAuthPassFailResponse(), fakes.FakeGetBasicInfoResponseEs_1_1_3()], ['self.driver.api_executor.get_snapshot_info', {'volID': 'volId'}, fakes.FakeResultNegativeResponse(), fakes.FakeGetBasicInfoResponseEs_1_1_3()], ['self.driver.api_executor.get_specific_poolinfo', {'pool_id': 'Storage Pool 1'}, fakes.FakeAuthPassFailResponse(), fakes.FakeGetBasicInfoResponseEs_1_1_3()], ['self.driver.api_executor.get_specific_poolinfo', {'pool_id': 'Storage Pool 1'}, fakes.FakeResultNegativeResponse(), fakes.FakeGetBasicInfoResponseEs_1_1_3()], ['self.driver.api_executor.delete_share', {'vol_id': 'fakeId'}, fakes.FakeAuthPassFailResponse(), fakes.FakeGetBasicInfoResponseEs_1_1_3()], ['self.driver.api_executor.delete_share', {'vol_id': 'fakeId'}, fakes.FakeResultNegativeResponse(), fakes.FakeGetBasicInfoResponseEs_1_1_3()], ['self.driver.api_executor.delete_snapshot_api', {'snapshot_id': 'fakeSnapshotId'}, fakes.FakeAuthPassFailResponse(), fakes.FakeGetBasicInfoResponseEs_1_1_3()], ['self.driver.api_executor.delete_snapshot_api', {'snapshot_id': 'fakeSnapshotId'}, fakes.FakeResultNegativeResponse(), fakes.FakeGetBasicInfoResponseEs_1_1_3()], ['self.driver.api_executor.clone_snapshot', {'snapshot_id': 'fakeSnapshotId', 'new_sharename': 'fakeNewShareName', 'clone_size': 'fakeCloneSize'}, fakes.FakeResultNegativeResponse(), fakes.FakeGetBasicInfoResponseEs_1_1_3()], ['self.driver.api_executor.clone_snapshot', {'snapshot_id': 'fakeSnapshotId', 'new_sharename': 'fakeNewShareName', 'clone_size': 'fakeCloneSize'}, fakes.FakeAuthPassFailResponse(), fakes.FakeGetBasicInfoResponseEs_1_1_3()], ['self.driver.api_executor.edit_share', {'share_dict': {"sharename": 'fakeVolId', "old_sharename": 'fakeVolId', "new_size": 100, "deduplication": False, "compression": True, "thin_provision": False, "ssd_cache": False, "share_proto": "NFS"}}, fakes.FakeEsResCodeNegativeResponse(), fakes.FakeGetBasicInfoResponseEs_1_1_3()], ['self.driver.api_executor.edit_share', {'share_dict': {"sharename": 'fakeVolId', "old_sharename": 'fakeVolId', "new_size": 100, "deduplication": False, "compression": True, "thin_provision": False, "ssd_cache": False, "share_proto": "NFS"}}, fakes.FakeAuthPassFailResponse(), fakes.FakeGetBasicInfoResponseEs_1_1_3()], ['self.driver.api_executor.add_host', {'hostname': 'fakeHostName', 'ipv4': 'fakeIpV4'}, fakes.FakeResultNegativeResponse(), fakes.FakeGetBasicInfoResponseEs_1_1_3()], ['self.driver.api_executor.add_host', {'hostname': 'fakeHostName', 'ipv4': 'fakeIpV4'}, fakes.FakeAuthPassFailResponse(), fakes.FakeGetBasicInfoResponseEs_1_1_3()], ['self.driver.api_executor.edit_host', {'hostname': 'fakeHostName', 'ipv4_list': 'fakeIpV4List'}, fakes.FakeResultNegativeResponse(), fakes.FakeGetBasicInfoResponseEs_1_1_3()], ['self.driver.api_executor.edit_host', {'hostname': 'fakeHostName', 'ipv4_list': 'fakeIpV4List'}, fakes.FakeAuthPassFailResponse(), fakes.FakeGetBasicInfoResponseEs_1_1_3()], ['self.driver.api_executor.delete_host', {'hostname': 'fakeHostName'}, fakes.FakeResultNegativeResponse(), fakes.FakeGetBasicInfoResponseEs_1_1_3()], ['self.driver.api_executor.delete_host', {'hostname': 'fakeHostName'}, fakes.FakeAuthPassFailResponse(), fakes.FakeGetBasicInfoResponseEs_1_1_3()], ['self.driver.api_executor.get_host_list', {}, fakes.FakeResultNegativeResponse(), fakes.FakeGetBasicInfoResponseEs_1_1_3()], ['self.driver.api_executor.get_host_list', {}, fakes.FakeAuthPassFailResponse(), fakes.FakeGetBasicInfoResponseEs_1_1_3()], ['self.driver.api_executor.set_nfs_access', {'sharename': 'fakeShareName', 'access': 'fakeAccess', 'host_name': 'fakeHostName'}, fakes.FakeAuthPassFailResponse(), fakes.FakeGetBasicInfoResponseEs_1_1_3()], ['self.driver.api_executor.set_nfs_access', {'sharename': 'fakeShareName', 'access': 'fakeAccess', 'host_name': 'fakeHostName'}, fakes.FakeResultNegativeResponse(), fakes.FakeGetBasicInfoResponseEs_1_1_3()], ['self.driver.api_executor.get_snapshot_info', {'snapshot_name': 'fakeSnapshoName', 'lun_index': 'fakeLunIndex'}, fakes.FakeAuthPassFailResponse(), fakes.FakeGetBasicInfoResponseTs_4_3_0()], ['self.driver.api_executor.get_snapshot_info', {'snapshot_name': 'fakeSnapshoName', 'lun_index': 'fakeLunIndex'}, fakes.FakeResultNegativeResponse(), fakes.FakeGetBasicInfoResponseTs_4_3_0()]) def test_get_snapshot_info_ts_with_fail_response( self, api, dict_parm, fake_fail_response, fake_basic_info): """Test get snapshot info api with fail response.""" mock_http_connection = http_client.HTTPConnection mock_http_connection.return_value.getresponse.side_effect = [ fakes.FakeLoginResponse(), fake_basic_info, fakes.FakeLoginResponse(), fake_fail_response, fake_fail_response, fake_fail_response, fake_fail_response, fake_fail_response, fake_fail_response, fake_fail_response, fake_fail_response, fake_fail_response, fake_fail_response] self._do_setup('http://1.2.3.4:8080', '1.2.3.4', 'admin', 'qnapadmin', 'Storage Pool 1') self.mock_object(time, 'sleep') self.assertRaises( exception.ShareBackendException, eval(api), **dict_parm) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/qnap/test_qnap.py0000664000175000017500000017666700000000000023577 0ustar00zuulzuul00000000000000# Copyright (c) 2016 QNAP Systems, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from http import client as http_client import time from unittest import mock import ddt from defusedxml import ElementTree as etree from eventlet import greenthread from oslo_config import cfg from manila import exception from manila.share.drivers.qnap import api from manila.share.drivers.qnap import qnap from manila.share import share_types from manila import test from manila.tests import fake_share from manila.tests.share.drivers.qnap import fakes CONF = cfg.CONF def create_configuration(management_url, qnap_share_ip, qnap_nas_login, qnap_nas_password, qnap_poolname): """Create configuration.""" configuration = mock.Mock() configuration.qnap_management_url = management_url configuration.qnap_share_ip = qnap_share_ip configuration.qnap_nas_login = qnap_nas_login configuration.qnap_nas_password = qnap_nas_password configuration.qnap_poolname = qnap_poolname configuration.safe_get.return_value = False return configuration class QnapShareDriverBaseTestCase(test.TestCase): """Base Class for the QnapShareDriver Tests.""" def setUp(self): """Setup the Qnap Driver Base TestCase.""" super(QnapShareDriverBaseTestCase, self).setUp() self.driver = None self.share_api = None def _do_setup(self, management_url, share_ip, nas_login, nas_password, poolname, **kwargs): """Config do setup configurations.""" self.driver = qnap.QnapShareDriver( configuration=create_configuration( management_url, share_ip, nas_login, nas_password, poolname), private_storage=kwargs.get('private_storage')) self.driver.do_setup('context') @ddt.ddt class QnapShareDriverLoginTestCase(QnapShareDriverBaseTestCase): """Tests do_setup api.""" def setUp(self): """Setup the Qnap Share Driver login TestCase.""" super(QnapShareDriverLoginTestCase, self).setUp() self.mock_object(http_client, 'HTTPConnection') self.mock_object(http_client, 'HTTPSConnection') @ddt.unpack @ddt.data({'mng_url': 'http://1.2.3.4:8080', 'port': '8080', 'ssl': False}, {'mng_url': 'https://1.2.3.4:443', 'port': '443', 'ssl': True}) def test_do_setup_positive(self, mng_url, port, ssl): """Test do_setup with http://1.2.3.4:8080.""" fake_login_response = fakes.FakeLoginResponse() fake_get_basic_info_response_es = ( fakes.FakeGetBasicInfoResponseEs_1_1_3()) if ssl: mock_connection = http_client.HTTPSConnection else: mock_connection = http_client.HTTPConnection mock_connection.return_value.getresponse.side_effect = [ fake_login_response, fake_get_basic_info_response_es, fake_login_response] self._do_setup(mng_url, '1.2.3.4', 'admin', 'qnapadmin', 'Storage Pool 1') self.assertEqual( mng_url, self.driver.configuration.qnap_management_url) self.assertEqual( '1.2.3.4', self.driver.configuration.qnap_share_ip) self.assertEqual( 'admin', self.driver.configuration.qnap_nas_login) self.assertEqual( 'qnapadmin', self.driver.configuration.qnap_nas_password) self.assertEqual( 'Storage Pool 1', self.driver.configuration.qnap_poolname) self.assertEqual('fakeSid', self.driver.api_executor.sid) self.assertEqual('admin', self.driver.api_executor.username) self.assertEqual('qnapadmin', self.driver.api_executor.password) self.assertEqual('1.2.3.4', self.driver.api_executor.ip) self.assertEqual(port, self.driver.api_executor.port) self.assertEqual(ssl, self.driver.api_executor.ssl) @ddt.data(fakes.FakeGetBasicInfoResponseTs_4_3_0(), fakes.FakeGetBasicInfoResponseTesTs_4_3_0(), fakes.FakeGetBasicInfoResponseTesEs_1_1_3()) def test_do_setup_positive_with_diff_nas(self, fake_basic_info): """Test do_setup with different NAS model.""" fake_login_response = fakes.FakeLoginResponse() mock_connection = http_client.HTTPSConnection mock_connection.return_value.getresponse.side_effect = [ fake_login_response, fake_basic_info, fake_login_response] self._do_setup('https://1.2.3.4:443', '1.2.3.4', 'admin', 'qnapadmin', 'Storage Pool 1') self.assertEqual('fakeSid', self.driver.api_executor.sid) self.assertEqual('admin', self.driver.api_executor.username) self.assertEqual('qnapadmin', self.driver.api_executor.password) self.assertEqual('1.2.3.4', self.driver.api_executor.ip) self.assertEqual('443', self.driver.api_executor.port) self.assertTrue(self.driver.api_executor.ssl) @ddt.data({ 'fake_basic_info': fakes.FakeGetBasicInfoResponseTs_4_3_0(), 'expect_result': api.QnapAPIExecutorTS }, { 'fake_basic_info': fakes.FakeGetBasicInfoResponseTesTs_4_3_0(), 'expect_result': api.QnapAPIExecutorTS }, { 'fake_basic_info': fakes.FakeGetBasicInfoResponseTesEs_1_1_3(), 'expect_result': api.QnapAPIExecutor }, { 'fake_basic_info': fakes.FakeGetBasicInfoResponseTesEs_2_0_0(), 'expect_result': api.QnapAPIExecutor }, { 'fake_basic_info': fakes.FakeGetBasicInfoResponseTesEs_2_1_0(), 'expect_result': api.QnapAPIExecutor }, { 'fake_basic_info': fakes.FakeGetBasicInfoResponseEs_1_1_3(), 'expect_result': api.QnapAPIExecutor }, { 'fake_basic_info': fakes.FakeGetBasicInfoResponseEs_2_0_0(), 'expect_result': api.QnapAPIExecutor }, { 'fake_basic_info': fakes.FakeGetBasicInfoResponseEs_2_1_0(), 'expect_result': api.QnapAPIExecutor }) @ddt.unpack def test_create_api_executor(self, fake_basic_info, expect_result): """Test do_setup with different NAS model.""" fake_login_response = fakes.FakeLoginResponse() mock_connection = http_client.HTTPSConnection mock_connection.return_value.getresponse.side_effect = [ fake_login_response, fake_basic_info, fake_login_response] self._do_setup('https://1.2.3.4:443', '1.2.3.4', 'admin', 'qnapadmin', 'Storage Pool 1') self.assertIsInstance(self.driver.api_executor, expect_result) @ddt.data({ 'fake_basic_info': fakes.FakeGetBasicInfoResponseTs_4_0_0(), 'expect_result': exception.ShareBackendException }, { 'fake_basic_info': fakes.FakeGetBasicInfoResponseTesTs_4_0_0(), 'expect_result': exception.ShareBackendException }, { 'fake_basic_info': fakes.FakeGetBasicInfoResponseTesEs_1_1_1(), 'expect_result': exception.ShareBackendException }, { 'fake_basic_info': fakes.FakeGetBasicInfoResponseTesEs_2_2_0(), 'expect_result': exception.ShareBackendException }, { 'fake_basic_info': fakes.FakeGetBasicInfoResponseEs_1_1_1(), 'expect_result': exception.ShareBackendException }, { 'fake_basic_info': fakes.FakeGetBasicInfoResponseEs_2_2_0(), 'expect_result': exception.ShareBackendException }) @ddt.unpack def test_create_api_executor_negative(self, fake_basic_info, expect_result): """Test do_setup with different NAS model.""" fake_login_response = fakes.FakeLoginResponse() mock_connection = http_client.HTTPSConnection mock_connection.return_value.getresponse.side_effect = [ fake_login_response, fake_basic_info, fake_login_response] self.assertRaises( exception.ShareBackendException, self._do_setup, 'https://1.2.3.4:443', '1.2.3.4', 'admin', 'qnapadmin', 'Storage Pool 1') def test_do_setup_with_exception(self): """Test do_setup with exception.""" fake_login_response = fakes.FakeLoginResponse() fake_get_basic_info_response_error = ( fakes.FakeGetBasicInfoResponseError()) mock_connection = http_client.HTTPSConnection mock_connection.return_value.getresponse.side_effect = [ fake_login_response, fake_get_basic_info_response_error, fake_login_response] self.driver = qnap.QnapShareDriver( configuration=create_configuration( 'https://1.2.3.4:443', '1.2.3.4', 'admin', 'qnapadmin', 'Pool1')) self.assertRaises( exception.ShareBackendException, self.driver.do_setup, context='context') def test_check_for_setup_error(self): """Test do_setup with exception.""" self.driver = qnap.QnapShareDriver( configuration=create_configuration( 'https://1.2.3.4:443', '1.2.3.4', 'admin', 'qnapadmin', 'Pool1')) self.assertRaises( exception.ShareBackendException, self.driver.check_for_setup_error) @ddt.ddt class QnapShareDriverTestCase(QnapShareDriverBaseTestCase): """Tests share driver functions.""" def setUp(self): """Setup the Qnap Driver Base TestCase.""" super(QnapShareDriverTestCase, self).setUp() self.mock_object(qnap.QnapShareDriver, '_create_api_executor') self.share = fake_share.fake_share( share_proto='NFS', id='shareId', display_name='fakeDisplayName', export_locations=[{'path': '1.2.3.4:/share/fakeShareName'}], host='QnapShareDriver', size=10) def get_share_info_return_value(self): """Return the share info form get_share_info method.""" root = etree.fromstring(fakes.FAKE_RES_DETAIL_DATA_SHARE_INFO) share_list = root.find('Volume_Info') share_info_tree = share_list.findall('row') for share in share_info_tree: return share def get_snapshot_info_return_value(self): """Return the snapshot info form get_snapshot_info method.""" root = etree.fromstring(fakes.FAKE_RES_DETAIL_DATA_SNAPSHOT) snapshot_list = root.find('SnapshotList') snapshot_info_tree = snapshot_list.findall('row') for snapshot in snapshot_info_tree: return snapshot def get_specific_volinfo_return_value(self): """Return the volume info form get_specific_volinfo method.""" root = etree.fromstring(fakes.FAKE_RES_DETAIL_DATA_VOLUME_INFO) volume_list = root.find('Volume_Info') volume_info_tree = volume_list.findall('row') for volume in volume_info_tree: return volume def get_specific_poolinfo_return_value(self): """Get specific pool info.""" root = etree.fromstring(fakes.FAKE_RES_DETAIL_DATA_SPECIFIC_POOL_INFO) pool_list = root.find('Pool_Index') pool_info_tree = pool_list.findall('row') for pool in pool_info_tree: return pool def get_host_list_return_value(self): """Get host list.""" root = etree.fromstring(fakes.FAKE_RES_DETAIL_DATA_GET_HOST_LIST) hosts = [] host_list = root.find('host_list') host_tree = host_list.findall('host') for host in host_tree: hosts.append(host) return hosts @ddt.data({ 'fake_extra_spec': {}, 'expect_extra_spec': { 'qnap_thin_provision': True, 'qnap_compression': True, 'qnap_deduplication': False, 'qnap_ssd_cache': False } }, { 'fake_extra_spec': { 'thin_provisioning': u'true', 'compression': u'true', 'qnap_ssd_cache': u'true' }, 'expect_extra_spec': { 'qnap_thin_provision': True, 'qnap_compression': True, 'qnap_deduplication': False, 'qnap_ssd_cache': True } }, { 'fake_extra_spec': { 'thin_provisioning': u' False', 'compression': u' True', 'qnap_ssd_cache': u' True' }, 'expect_extra_spec': { 'qnap_thin_provision': False, 'qnap_compression': True, 'qnap_deduplication': False, 'qnap_ssd_cache': True } }, { 'fake_extra_spec': { 'thin_provisioning': u'true', 'dedupe': u' True', 'qnap_ssd_cache': u'False' }, 'expect_extra_spec': { 'qnap_thin_provision': True, 'qnap_compression': True, 'qnap_deduplication': True, 'qnap_ssd_cache': False } }, { 'fake_extra_spec': { 'thin_provisioning': u' False', 'compression': u'false', 'dedupe': u' False', 'qnap_ssd_cache': u' False' }, 'expect_extra_spec': { 'qnap_thin_provision': False, 'qnap_compression': False, 'qnap_deduplication': False, 'qnap_ssd_cache': False } }) @ddt.unpack @mock.patch.object(qnap.QnapShareDriver, '_get_location_path') @mock.patch.object(qnap.QnapShareDriver, '_gen_random_name') def test_create_share_positive( self, mock_gen_random_name, mock_get_location_path, fake_extra_spec, expect_extra_spec): """Test create share.""" mock_api_executor = qnap.QnapShareDriver._create_api_executor mock_api_executor.return_value.get_share_info.side_effect = [ None, self.get_share_info_return_value()] mock_gen_random_name.return_value = 'fakeShareName' mock_api_executor.return_value.create_share.return_value = ( 'fakeCreateShareId') mock_get_location_path.return_value = None mock_private_storage = mock.Mock() self.mock_object(greenthread, 'sleep') self._do_setup('http://1.2.3.4:8080', '1.2.3.4', 'admin', 'qnapadmin', 'Storage Pool 1', private_storage=mock_private_storage) self.mock_object(share_types, 'get_extra_specs_from_share', mock.Mock(return_value=fake_extra_spec)) self.driver.create_share('context', self.share) mock_api_return = mock_api_executor.return_value expected_call_list = [ mock.call('Storage Pool 1', vol_label='fakeShareName'), mock.call('Storage Pool 1', vol_label='fakeShareName')] self.assertEqual( expected_call_list, mock_api_return.get_share_info.call_args_list) mock_api_executor.return_value.create_share.assert_called_once_with( self.share, self.driver.configuration.qnap_poolname, 'fakeShareName', 'NFS', **expect_extra_spec) mock_get_location_path.assert_called_once_with( 'fakeShareName', 'NFS', '1.2.3.4', 'fakeNo') @mock.patch.object(qnap.QnapShareDriver, '_get_location_path') @mock.patch.object(qnap.QnapShareDriver, '_gen_random_name') def test_create_share_negative_share_exist( self, mock_gen_random_name, mock_get_location_path): """Test create share.""" mock_api_executor = qnap.QnapShareDriver._create_api_executor mock_api_executor.return_value.get_share_info.return_value = ( self.get_share_info_return_value()) mock_gen_random_name.return_value = 'fakeShareName' mock_get_location_path.return_value = None mock_private_storage = mock.Mock() self.mock_object(time, 'sleep') self._do_setup('http://1.2.3.4:8080', '1.2.3.4', 'admin', 'qnapadmin', 'Storage Pool 1', private_storage=mock_private_storage) self.mock_object(share_types, 'get_extra_specs_from_share', mock.Mock(return_value={})) self.assertRaises( exception.ShareBackendException, self.driver.create_share, context='context', share=self.share) @mock.patch.object(qnap.QnapShareDriver, '_get_location_path') @mock.patch.object(qnap.QnapShareDriver, '_gen_random_name') def test_create_share_negative_create_fail( self, mock_gen_random_name, mock_get_location_path): """Test create share.""" mock_api_executor = qnap.QnapShareDriver._create_api_executor mock_api_executor.return_value.get_share_info.return_value = None mock_gen_random_name.return_value = 'fakeShareName' mock_get_location_path.return_value = None mock_private_storage = mock.Mock() self.mock_object(time, 'sleep') self.mock_object(greenthread, 'sleep') self._do_setup('http://1.2.3.4:8080', '1.2.3.4', 'admin', 'qnapadmin', 'Storage Pool 1', private_storage=mock_private_storage) self.mock_object(share_types, 'get_extra_specs_from_share', mock.Mock(return_value={})) self.assertRaises( exception.ShareBackendException, self.driver.create_share, context='context', share=self.share) @mock.patch.object(qnap.QnapShareDriver, '_get_location_path') @mock.patch.object(qnap.QnapShareDriver, '_gen_random_name') def test_create_share_negative_configutarion( self, mock_gen_random_name, mock_get_location_path): """Test create share.""" mock_api_executor = qnap.QnapShareDriver._create_api_executor mock_api_executor.return_value.get_share_info.side_effect = [ None, self.get_share_info_return_value()] mock_gen_random_name.return_value = 'fakeShareName' mock_get_location_path.return_value = None mock_private_storage = mock.Mock() self._do_setup('http://1.2.3.4:8080', '1.2.3.4', 'admin', 'qnapadmin', 'Storage Pool 1', private_storage=mock_private_storage) self.mock_object(share_types, 'get_extra_specs_from_share', mock.Mock(return_value={ 'dedupe': 'true', 'thin_provisioning': 'false'})) self.assertRaises( exception.InvalidExtraSpec, self.driver.create_share, context='context', share=self.share) def test_delete_share_positive(self): """Test delete share with fake_share.""" mock_api_executor = qnap.QnapShareDriver._create_api_executor mock_api_executor.return_value.get_share_info.return_value = ( self.get_share_info_return_value()) mock_api_executor.return_value.delete_share.return_value = ( 'fakeCreateShareId') mock_private_storage = mock.Mock() mock_private_storage.get.return_value = 'fakeVolNo' self._do_setup('http://1.2.3.4:8080', '1.2.3.4', 'admin', 'qnapadmin', 'Storage Pool 1', private_storage=mock_private_storage) self.driver.delete_share('context', self.share, share_server=None) mock_api_executor.return_value.get_share_info.assert_called_once_with( 'Storage Pool 1', vol_no='fakeVolNo') mock_api_executor.return_value.delete_share.assert_called_once_with( 'fakeNo') def test_delete_share_no_volid(self): """Test delete share with fake_share and no volID.""" mock_private_storage = mock.Mock() mock_private_storage.get.return_value = None self._do_setup('http://1.2.3.4:8080', '1.2.3.4', 'admin', 'qnapadmin', 'Storage Pool 1', private_storage=mock_private_storage) self.driver.delete_share('context', self.share, share_server=None) mock_private_storage.get.assert_called_once_with( 'shareId', 'volID') def test_delete_share_no_delete_share(self): """Test delete share with fake_share.""" mock_api_executor = qnap.QnapShareDriver._create_api_executor mock_api_executor.return_value.get_share_info.return_value = None mock_api_executor.return_value.delete_share.return_value = ( 'fakeCreateShareId') mock_private_storage = mock.Mock() mock_private_storage.get.return_value = 'fakeVolNo' self._do_setup('http://1.2.3.4:8080', '1.2.3.4', 'admin', 'qnapadmin', 'Storage Pool 1', private_storage=mock_private_storage) self.driver.delete_share('context', self.share, share_server=None) mock_api_executor.return_value.get_share_info.assert_called_once_with( 'Storage Pool 1', vol_no='fakeVolNo') def test_extend_share(self): """Test extend share with fake_share.""" mock_api_executor = qnap.QnapShareDriver._create_api_executor mock_api_executor.return_value.get_share_info.return_value = ( self.get_share_info_return_value()) mock_api_executor.return_value.edit_share.return_value = None mock_private_storage = mock.Mock() mock_private_storage.get.side_effect = [ 'fakeVolName', 'True', 'True', 'False', 'False'] self._do_setup('http://1.2.3.4:8080', '1.2.3.4', 'admin', 'qnapadmin', 'Storage Pool 1', private_storage=mock_private_storage) self.driver.extend_share(self.share, 100, share_server=None) expect_share_dict = { 'sharename': 'fakeVolName', 'old_sharename': 'fakeVolName', 'new_size': 100, 'thin_provision': True, 'compression': True, 'deduplication': False, 'ssd_cache': False, 'share_proto': 'NFS' } mock_api_executor.return_value.edit_share.assert_called_once_with( expect_share_dict) def test_extend_share_without_share_name(self): """Test extend share without share name.""" mock_private_storage = mock.Mock() mock_private_storage.get.return_value = None self._do_setup('http://1.2.3.4:8080', '1.2.3.4', 'admin', 'qnapadmin', 'Storage Pool 1', private_storage=mock_private_storage) self.assertRaises( exception.ShareResourceNotFound, self.driver.extend_share, share=self.share, new_size=100, share_server=None) @mock.patch.object(qnap.QnapShareDriver, '_gen_random_name') def test_create_snapshot( self, mock_gen_random_name): """Test create snapshot with fake_snapshot.""" fake_snapshot = fakes.SnapshotClass( 10, 'fakeShareName@fakeSnapshotName') mock_gen_random_name.return_value = 'fakeSnapshotName' mock_api_executor = qnap.QnapShareDriver._create_api_executor mock_api_executor.return_value.get_snapshot_info.side_effect = [ None, self.get_snapshot_info_return_value()] mock_api_executor.return_value.create_snapshot_api.return_value = ( 'fakeCreateShareId') mock_private_storage = mock.Mock() mock_private_storage.get.return_value = 'fakeVolId' self._do_setup('http://1.2.3.4:8080', '1.2.3.4', 'admin', 'qnapadmin', 'Storage Pool 1', private_storage=mock_private_storage) self.driver.create_snapshot( 'context', fake_snapshot, share_server=None) mock_api_return = mock_api_executor.return_value expected_call_list = [ mock.call(volID='fakeVolId', snapshot_name='fakeSnapshotName'), mock.call(volID='fakeVolId', snapshot_name='fakeSnapshotName')] self.assertEqual( expected_call_list, mock_api_return.get_snapshot_info.call_args_list) mock_api_return.create_snapshot_api.assert_called_once_with( 'fakeVolId', 'fakeSnapshotName') def test_create_snapshot_without_volid(self): """Test create snapshot with fake_snapshot.""" fake_snapshot = fakes.SnapshotClass(10, None) mock_private_storage = mock.Mock() mock_private_storage.get.return_value = None self._do_setup('http://1.2.3.4:8080', '1.2.3.4', 'admin', 'qnapadmin', 'Storage Pool 1', private_storage=mock_private_storage) self.assertRaises( exception.ShareResourceNotFound, self.driver.create_snapshot, context='context', snapshot=fake_snapshot, share_server=None) def test_delete_snapshot(self): """Test delete snapshot with fakeSnapshot.""" fake_snapshot = fakes.SnapshotClass( 10, 'fakeShareName@fakeSnapshotName') mock_api_executor = qnap.QnapShareDriver._create_api_executor mock_api_executor.return_value.delete_snapshot_api.return_value = ( 'fakeCreateShareId') mock_private_storage = mock.Mock() mock_private_storage.get.return_value = 'fakeSnapshotId' self._do_setup('http://1.2.3.4:8080', '1.2.3.4', 'admin', 'qnapadmin', 'Storage Pool 1', private_storage=mock_private_storage) self.driver.delete_snapshot( 'context', fake_snapshot, share_server=None) mock_api_return = mock_api_executor.return_value mock_api_return.delete_snapshot_api.assert_called_once_with( 'fakeShareName@fakeSnapshotName') def test_delete_snapshot_without_snapshot_id(self): """Test delete snapshot with fakeSnapshot and no snapshot id.""" fake_snapshot = fakes.SnapshotClass(10, None) mock_private_storage = mock.Mock() mock_private_storage.get.return_value = None self._do_setup('http://1.2.3.4:8080', '1.2.3.4', 'admin', 'qnapadmin', 'Storage Pool 1', private_storage=mock_private_storage) self.driver.delete_snapshot( 'context', fake_snapshot, share_server=None) mock_private_storage.get.assert_called_once_with( 'fakeSnapshotId', 'snapshot_id') @mock.patch.object(qnap.QnapShareDriver, '_get_location_path') @mock.patch('manila.share.API') @mock.patch.object(qnap.QnapShareDriver, '_gen_random_name') def test_create_share_from_snapshot( self, mock_gen_random_name, mock_share_api, mock_get_location_path): """Test create share from snapshot.""" fake_snapshot = fakes.SnapshotClass( 10, 'fakeShareName@fakeSnapshotName') mock_api_executor = qnap.QnapShareDriver._create_api_executor mock_gen_random_name.return_value = 'fakeShareName' mock_api_executor.return_value.get_share_info.side_effect = [ None, self.get_share_info_return_value()] mock_private_storage = mock.Mock() mock_private_storage.get.return_value = 'fakeSnapshotId' mock_share_api.return_value.get.return_value = {'size': 10} self._do_setup('http://1.2.3.4:8080', '1.2.3.4', 'admin', 'qnapadmin', 'Storage Pool 1', private_storage=mock_private_storage) self.driver.create_share_from_snapshot( 'context', self.share, fake_snapshot, share_server=None) mock_gen_random_name.assert_called_once_with( 'share') mock_api_return = mock_api_executor.return_value expected_call_list = [ mock.call('Storage Pool 1', vol_label='fakeShareName'), mock.call('Storage Pool 1', vol_label='fakeShareName')] self.assertEqual( expected_call_list, mock_api_return.get_share_info.call_args_list) mock_api_return.clone_snapshot.assert_called_once_with( 'fakeShareName@fakeSnapshotName', 'fakeShareName', 10) @mock.patch.object(qnap.QnapShareDriver, '_get_location_path') @mock.patch.object(qnap.QnapShareDriver, '_gen_random_name') def test_create_share_from_snapshot_diff_size( self, mock_gen_random_name, mock_get_location_path): """Test create share from snapshot.""" fake_snapshot = fakes.SnapshotClass( 10, 'fakeShareName@fakeSnapshotName') mock_gen_random_name.return_value = 'fakeShareName' mock_api_executor = qnap.QnapShareDriver._create_api_executor mock_api_executor.return_value.get_share_info.side_effect = [ None, self.get_share_info_return_value()] mock_private_storage = mock.Mock() mock_private_storage.get.side_effect = [ 'True', 'True', 'False', 'False', 'fakeVolName'] self._do_setup('http://1.2.3.4:8080', '1.2.3.4', 'admin', 'qnapadmin', 'Storage Pool 1', private_storage=mock_private_storage) self.driver.create_share_from_snapshot( 'context', self.share, fake_snapshot, share_server=None) mock_gen_random_name.assert_called_once_with( 'share') mock_api_return = mock_api_executor.return_value expected_call_list = [ mock.call('Storage Pool 1', vol_label='fakeShareName'), mock.call('Storage Pool 1', vol_label='fakeShareName')] self.assertEqual( expected_call_list, mock_api_return.get_share_info.call_args_list) mock_api_return.clone_snapshot.assert_called_once_with( 'fakeShareName@fakeSnapshotName', 'fakeShareName', 10) def test_create_share_from_snapshot_without_snapshot_id(self): """Test create share from snapshot.""" fake_snapshot = fakes.SnapshotClass(10, None) mock_private_storage = mock.Mock() mock_private_storage.get.return_value = None self._do_setup('http://1.2.3.4:8080', '1.2.3.4', 'admin', 'qnapadmin', 'Storage Pool 1', private_storage=mock_private_storage) self.assertRaises( exception.SnapshotResourceNotFound, self.driver.create_share_from_snapshot, context='context', share=self.share, snapshot=fake_snapshot, share_server=None) @mock.patch.object(qnap.QnapShareDriver, '_get_location_path') @mock.patch('manila.share.API') @mock.patch.object(qnap.QnapShareDriver, '_gen_random_name') def test_create_share_from_snapshot_negative_name_exist( self, mock_gen_random_name, mock_share_api, mock_get_location_path): """Test create share from snapshot.""" fake_snapshot = fakes.SnapshotClass( 10, 'fakeShareName@fakeSnapshotName') mock_api_executor = qnap.QnapShareDriver._create_api_executor mock_gen_random_name.return_value = 'fakeShareName' mock_api_executor.return_value.get_share_info.return_value = ( self.get_share_info_return_value()) mock_private_storage = mock.Mock() mock_private_storage.get.return_value = 'fakeSnapshotId' mock_share_api.return_value.get.return_value = {'size': 10} self.mock_object(time, 'sleep') self._do_setup('http://1.2.3.4:8080', '1.2.3.4', 'admin', 'qnapadmin', 'Storage Pool 1', private_storage=mock_private_storage) self.assertRaises( exception.ShareBackendException, self.driver.create_share_from_snapshot, context='context', share=self.share, snapshot=fake_snapshot, share_server=None) @mock.patch.object(qnap.QnapShareDriver, '_get_location_path') @mock.patch('manila.share.API') @mock.patch.object(qnap.QnapShareDriver, '_gen_random_name') def test_create_share_from_snapshot_negative_clone_fail( self, mock_gen_random_name, mock_share_api, mock_get_location_path): """Test create share from snapshot.""" fake_snapshot = fakes.SnapshotClass( 10, 'fakeShareName@fakeSnapshotName') mock_api_executor = qnap.QnapShareDriver._create_api_executor mock_gen_random_name.return_value = 'fakeShareName' mock_api_executor.return_value.get_share_info.return_value = None mock_private_storage = mock.Mock() mock_private_storage.get.return_value = 'fakeSnapshotId' mock_share_api.return_value.get.return_value = {'size': 10} self.mock_object(time, 'sleep') self._do_setup('http://1.2.3.4:8080', '1.2.3.4', 'admin', 'qnapadmin', 'Storage Pool 1', private_storage=mock_private_storage) self.assertRaises( exception.ShareBackendException, self.driver.create_share_from_snapshot, context='context', share=self.share, snapshot=fake_snapshot, share_server=None) @mock.patch.object(qnap.QnapShareDriver, '_get_timestamp_from_vol_name') @mock.patch.object(qnap.QnapShareDriver, '_allow_access') @ddt.data('fakeHostName', 'fakeHostNameNotMatch') def test_update_access_allow_access( self, fakeHostName, mock_allow_access, mock_get_timestamp_from_vol_name): """Test update access with allow access rules.""" mock_private_storage = mock.Mock() mock_private_storage.get.return_value = 'fakeVolName' mock_api_executor = qnap.QnapShareDriver._create_api_executor mock_api_executor.return_value.get_host_list.return_value = ( self.get_host_list_return_value()) mock_api_executor.return_value.set_nfs_access.return_value = None mock_api_executor.return_value.delete_host.return_value = None mock_allow_access.return_value = None mock_get_timestamp_from_vol_name.return_value = fakeHostName self._do_setup('http://1.2.3.4:8080', '1.2.3.4', 'admin', 'qnapadmin', 'Storage Pool 1', private_storage=mock_private_storage) self.driver.update_access( 'context', self.share, 'access_rules', None, None, None, share_server=None) mock_api_executor.return_value.set_nfs_access.assert_called_once_with( 'fakeVolName', 2, 'all') @mock.patch.object(qnap.QnapShareDriver, '_allow_access') @mock.patch.object(qnap.QnapShareDriver, '_deny_access') def test_update_access_deny_and_allow_access( self, mock_deny_access, mock_allow_access): """Test update access with deny and allow access rules.""" mock_private_storage = mock.Mock() mock_private_storage.get.return_value = 'fakeVolName' mock_deny_access.return_value = None mock_allow_access.return_value = None self._do_setup('http://1.2.3.4:8080', '1.2.3.4', 'admin', 'qnapadmin', 'Storage Pool 1', private_storage=mock_private_storage) delete_rules = [] delete_rules.append('access1') add_rules = [] add_rules.append('access1') update_rules = [] self.driver.update_access( 'context', self.share, None, add_rules, delete_rules, update_rules, share_server=None) mock_deny_access.assert_called_once_with( 'context', self.share, 'access1', None) mock_allow_access.assert_called_once_with( 'context', self.share, 'access1', None) def test_update_access_without_volname(self): """Test update access without volName.""" mock_private_storage = mock.Mock() mock_private_storage.get.return_value = None self._do_setup('http://1.2.3.4:8080', '1.2.3.4', 'admin', 'qnapadmin', 'Storage Pool 1', private_storage=mock_private_storage) self.assertRaises( exception.ShareResourceNotFound, self.driver.update_access, context='context', share=self.share, access_rules='access_rules', add_rules=None, delete_rules=None, update_rules=None, share_server=None) @mock.patch.object(qnap.QnapShareDriver, '_get_location_path') def test_manage_existing_nfs( self, mock_get_location_path): """Test manage existing.""" mock_api_executor = qnap.QnapShareDriver._create_api_executor mock_api_executor.return_value.get_share_info.return_value = ( self.get_share_info_return_value()) mock_private_storage = mock.Mock() mock_private_storage.update.return_value = None mock_private_storage.get.side_effect = [ 'fakeVolId', 'fakeVolName'] mock_api_executor.return_value.get_specific_volinfo.return_value = ( self.get_specific_volinfo_return_value()) mock_api_executor.return_value.get_share_info.return_value = ( self.get_share_info_return_value()) mock_get_location_path.return_value = None self._do_setup('http://1.2.3.4:8080', '1.2.3.4', 'admin', 'qnapadmin', 'Storage Pool 1', private_storage=mock_private_storage) self.mock_object(share_types, 'get_extra_specs_from_share', mock.Mock(return_value={})) self.driver.manage_existing(self.share, 'driver_options') mock_api_return = mock_api_executor.return_value mock_api_return.get_share_info.assert_called_once_with( 'Storage Pool 1', vol_label='fakeShareName') mock_api_return.get_specific_volinfo.assert_called_once_with( 'fakeNo') mock_get_location_path.assert_called_once_with( 'fakeShareName', 'NFS', '1.2.3.4', 'fakeNo') @mock.patch.object(qnap.QnapShareDriver, '_get_location_path') def test_manage_existing_nfs_negative_configutarion( self, mock_get_location_path): """Test manage existing.""" mock_api_executor = qnap.QnapShareDriver._create_api_executor mock_api_executor.return_value.get_share_info.return_value = ( self.get_share_info_return_value()) mock_private_storage = mock.Mock() mock_private_storage.update.return_value = None mock_private_storage.get.side_effect = [ 'fakeVolId', 'fakeVolName'] mock_api_executor.return_value.get_specific_volinfo.return_value = ( self.get_specific_volinfo_return_value()) mock_api_executor.return_value.get_share_info.return_value = ( self.get_share_info_return_value()) mock_get_location_path.return_value = None self._do_setup('http://1.2.3.4:8080', '1.2.3.4', 'admin', 'qnapadmin', 'Storage Pool 1', private_storage=mock_private_storage) self.mock_object(share_types, 'get_extra_specs_from_share', mock.Mock(return_value={ 'dedupe': 'true', 'thin_provisioning': 'false'})) self.assertRaises( exception.InvalidExtraSpec, self.driver.manage_existing, share=self.share, driver_options='driver_options') def test_manage_invalid_protocol(self): """Test manage existing.""" share = fake_share.fake_share( share_proto='fakeProtocol', id='fakeId', display_name='fakeDisplayName', export_locations=[{'path': ''}], host='QnapShareDriver', size=10) mock_private_storage = mock.Mock() self._do_setup('http://1.2.3.4:8080', '1.2.3.4', 'admin', 'qnapadmin', 'Storage Pool 1', private_storage=mock_private_storage) self.assertRaises( exception.InvalidInput, self.driver.manage_existing, share=share, driver_options='driver_options') def test_manage_existing_nfs_without_export_locations(self): share = fake_share.fake_share( share_proto='NFS', id='fakeId', display_name='fakeDisplayName', export_locations=[{'path': ''}], host='QnapShareDriver', size=10) mock_private_storage = mock.Mock() self._do_setup('http://1.2.3.4:8080', '1.2.3.4', 'admin', 'qnapadmin', 'Storage Pool 1', private_storage=mock_private_storage) self.assertRaises( exception.ShareBackendException, self.driver.manage_existing, share=share, driver_options='driver_options') @mock.patch.object(qnap.QnapShareDriver, '_get_location_path') def test_manage_existing_nfs_ip_not_equel_share_ip( self, mock_get_location_path): """Test manage existing with nfs ip not equel to share ip.""" mock_api_executor = qnap.QnapShareDriver._create_api_executor mock_api_executor.return_value.get_share_info.return_value = ( self.get_share_info_return_value()) mock_private_storage = mock.Mock() mock_private_storage.update.return_value = None mock_private_storage.get.side_effect = [ 'fakeVolId', 'fakeVolName'] mock_api_executor.return_value.get_specific_volinfo.return_value = ( self.get_specific_volinfo_return_value()) mock_api_executor.return_value.get_share_info.return_value = ( self.get_share_info_return_value()) mock_get_location_path.return_value = None self._do_setup('http://1.2.3.4:8080', '1.1.1.1', 'admin', 'qnapadmin', 'Storage Pool 1', private_storage=mock_private_storage) self.assertRaises( exception.ShareBackendException, self.driver.manage_existing, share=self.share, driver_options='driver_options') @mock.patch.object(qnap.QnapShareDriver, '_get_location_path') def test_manage_existing_nfs_without_existing_share( self, mock_get_location_path): """Test manage existing nfs without existing share.""" mock_api_executor = qnap.QnapShareDriver._create_api_executor mock_api_executor.return_value.get_share_info.return_value = ( self.get_share_info_return_value()) mock_private_storage = mock.Mock() mock_private_storage.update.return_value = None mock_private_storage.get.side_effect = [ 'fakeVolId', 'fakeVolName'] mock_api_executor.return_value.get_specific_volinfo.return_value = ( self.get_specific_volinfo_return_value()) mock_api_executor.return_value.get_share_info.return_value = ( None) mock_get_location_path.return_value = None self._do_setup('http://1.2.3.4:8080', '1.2.3.4', 'admin', 'qnapadmin', 'Storage Pool 1', private_storage=mock_private_storage) self.assertRaises( exception.ManageInvalidShare, self.driver.manage_existing, share=self.share, driver_options='driver_options') def test_unmanage(self): """Test unmanage.""" mock_private_storage = mock.Mock() mock_private_storage.delete.return_value = None self._do_setup('http://1.2.3.4:8080', '1.2.3.4', 'admin', 'qnapadmin', 'Storage Pool 1', private_storage=mock_private_storage) self.driver.unmanage(self.share) mock_private_storage.delete.assert_called_once_with( 'shareId') @mock.patch.object(qnap.QnapShareDriver, '_get_location_path') def test_manage_existing_snapshot( self, mock_get_location_path): """Test manage existing snapshot snapshot.""" fake_snapshot = fakes.SnapshotClass( 10, 'fakeShareName@fakeSnapshotName') mock_api_executor = qnap.QnapShareDriver._create_api_executor mock_api_executor.return_value.get_share_info.return_value = ( self.get_share_info_return_value()) mock_api_executor.return_value.get_snapshot_info.return_value = ( self.get_snapshot_info_return_value()) mock_private_storage = mock.Mock() mock_private_storage.update.return_value = None mock_private_storage.get.side_effect = [ 'fakeVolId', 'fakeVolName'] self._do_setup('http://1.2.3.4:8080', '1.2.3.4', 'admin', 'qnapadmin', 'Storage Pool 1', private_storage=mock_private_storage) self.driver.manage_existing_snapshot(fake_snapshot, 'driver_options') mock_api_return = mock_api_executor.return_value mock_api_return.get_share_info.assert_called_once_with( 'Storage Pool 1', vol_no='fakeVolId') fake_metadata = { 'snapshot_id': 'fakeShareName@fakeSnapshotName'} mock_private_storage.update.assert_called_once_with( 'fakeSnapshotId', fake_metadata) def test_unmanage_snapshot(self): """Test unmanage snapshot.""" fake_snapshot = fakes.SnapshotClass( 10, 'fakeShareName@fakeSnapshotName') mock_private_storage = mock.Mock() mock_private_storage.delete.return_value = None self._do_setup('http://1.2.3.4:8080', '1.2.3.4', 'admin', 'qnapadmin', 'Storage Pool 1', private_storage=mock_private_storage) self.driver.unmanage_snapshot(fake_snapshot) mock_private_storage.delete.assert_called_once_with( 'fakeSnapshotId') @ddt.data( {'expect_result': 'manila-shr-fake_time', 'test_string': 'share'}, {'expect_result': 'manila-snp-fake_time', 'test_string': 'snapshot'}, {'expect_result': 'manila-hst-fake_time', 'test_string': 'host'}, {'expect_result': 'manila-fake_time', 'test_string': ''}) @ddt.unpack @mock.patch('oslo_utils.timeutils.utcnow') def test_gen_random_name( self, mock_utcnow, expect_result, test_string): """Test gen random name.""" mock_private_storage = mock.Mock() mock_utcnow.return_value.strftime.return_value = 'fake_time' self._do_setup('http://1.2.3.4:8080', '1.2.3.4', 'admin', 'qnapadmin', 'Storage Pool 1', private_storage=mock_private_storage) self.assertEqual( expect_result, self.driver._gen_random_name(test_string)) def test_get_location_path(self): """Test get location path name.""" mock_private_storage = mock.Mock() mock_api_executor = qnap.QnapShareDriver._create_api_executor mock_api_executor.return_value.get_share_info.return_value = ( self.get_share_info_return_value()) mock_api_executor.return_value.get_specific_volinfo.return_value = ( self.get_specific_volinfo_return_value()) self._do_setup('http://1.2.3.4:8080', '1.2.3.4', 'admin', 'qnapadmin', 'Storage Pool 1', private_storage=mock_private_storage) location = 'fakeIp:fakeMountPath' expect_result = { 'path': location, 'is_admin_only': False, } self.assertEqual( expect_result, self.driver._get_location_path( 'fakeShareName', 'NFS', 'fakeIp', 'fakeVolId')) self.assertRaises( exception.InvalidInput, self.driver._get_location_path, share_name='fakeShareName', share_proto='fakeProto', ip='fakeIp', vol_id='fakeVolId') def test_update_share_stats(self): """Test update share stats.""" mock_private_storage = mock.Mock() mock_api_return = ( qnap.QnapShareDriver._create_api_executor.return_value) mock_api_return.get_specific_poolinfo.return_value = ( self.get_specific_poolinfo_return_value()) mock_api_return.get_share_info.return_value = ( self.get_share_info_return_value()) mock_api_return.get_specific_volinfo.return_value = ( self.get_specific_volinfo_return_value()) self._do_setup('http://1.2.3.4:8080', '1.2.3.4', 'admin', 'qnapadmin', 'Storage Pool 1', private_storage=mock_private_storage) self.driver._update_share_stats() mock_api_return.get_specific_poolinfo.assert_called_once_with( self.driver.configuration.qnap_poolname) def test_get_vol_host(self): """Test get manila host IPV4s.""" mock_private_storage = mock.Mock() self._do_setup('http://1.2.3.4:8080', '1.2.3.4', 'admin', 'qnapadmin', 'Storage Pool 1', private_storage=mock_private_storage) expect_host_dict_ips = [] host_list = self.get_host_list_return_value() for host in host_list: host_dict = { 'index': host.find('index').text, 'hostid': host.find('hostid').text, 'name': host.find('name').text, 'ipv4': [host.find('netaddrs').find('ipv4').text] } expect_host_dict_ips.append(host_dict) self.assertEqual( expect_host_dict_ips, self.driver._get_vol_host( host_list, 'fakeHostName')) @mock.patch.object(qnap.QnapShareDriver, '_gen_host_name') @mock.patch.object(qnap.QnapShareDriver, '_get_timestamp_from_vol_name') @mock.patch.object(qnap.QnapShareDriver, '_check_share_access') def test_allow_access_ro( self, mock_check_share_access, mock_get_timestamp_from_vol_name, mock_gen_host_name): """Test allow_access with access type ro.""" fake_access = fakes.AccessClass('fakeAccessType', 'ro', 'fakeIp') mock_private_storage = mock.Mock() mock_private_storage.get.return_value = 'fakeVolName' mock_api_executor = qnap.QnapShareDriver._create_api_executor mock_api_executor.return_value.get_host_list.return_value = [] mock_get_timestamp_from_vol_name.return_value = 'fakeHostName' mock_gen_host_name.return_value = 'manila-fakeHostName-ro' mock_api_executor.return_value.add_host.return_value = None mock_api_executor.return_value.set_nfs_access.return_value = None self._do_setup('http://1.2.3.4:8080', '1.2.3.4', 'admin', 'qnapadmin', 'Storage Pool 1', private_storage=mock_private_storage) self.driver._allow_access( 'context', self.share, fake_access, share_server=None) mock_check_share_access.assert_called_once_with( 'NFS', 'fakeAccessType') mock_api_executor.return_value.add_host.assert_called_once_with( 'manila-fakeHostName-ro', 'fakeIp') @mock.patch.object(qnap.QnapShareDriver, '_gen_host_name') @mock.patch.object(qnap.QnapShareDriver, '_get_timestamp_from_vol_name') @mock.patch.object(qnap.QnapShareDriver, '_check_share_access') def test_allow_access_ro_with_hostlist( self, mock_check_share_access, mock_get_timestamp_from_vol_name, mock_gen_host_name): """Test allow_access_ro_with_hostlist.""" host_dict_ips = [] for host in self.get_host_list_return_value(): if host.find('netaddrs/ipv4').text is not None: host_dict = { 'index': host.find('index').text, 'hostid': host.find('hostid').text, 'name': host.find('name').text, 'ipv4': [host.find('netaddrs').find('ipv4').text]} host_dict_ips.append(host_dict) for host in host_dict_ips: fake_access_to = host['ipv4'] fake_access = fakes.AccessClass( 'fakeAccessType', 'ro', fake_access_to) mock_private_storage = mock.Mock() mock_private_storage.get.return_value = 'fakeVolName' mock_api_executor = qnap.QnapShareDriver._create_api_executor mock_api_executor.return_value.get_host_list.return_value = ( self.get_host_list_return_value()) mock_get_timestamp_from_vol_name.return_value = 'fakeHostName' mock_gen_host_name.return_value = 'manila-fakeHostName' mock_api_executor.return_value.set_nfs_access.return_value = None self._do_setup('http://1.2.3.4:8080', '1.2.3.4', 'admin', 'qnapadmin', 'Storage Pool 1', private_storage=mock_private_storage) self.driver._allow_access( 'context', self.share, fake_access, share_server=None) mock_check_share_access.assert_called_once_with( 'NFS', 'fakeAccessType') @mock.patch.object(qnap.QnapShareDriver, '_gen_host_name') @mock.patch.object(qnap.QnapShareDriver, '_get_timestamp_from_vol_name') @mock.patch.object(qnap.QnapShareDriver, '_check_share_access') def test_allow_access_rw_with_hostlist_invalid_access( self, mock_check_share_access, mock_get_timestamp_from_vol_name, mock_gen_host_name): """Test allow_access_rw_invalid_access.""" host_dict_ips = [] for host in self.get_host_list_return_value(): if host.find('netaddrs/ipv4').text is not None: host_dict = { 'index': host.find('index').text, 'hostid': host.find('hostid').text, 'name': host.find('name').text, 'ipv4': [host.find('netaddrs').find('ipv4').text]} host_dict_ips.append(host_dict) for host in host_dict_ips: fake_access_to = host['ipv4'] fake_access = fakes.AccessClass( 'fakeAccessType', 'rw', fake_access_to) mock_private_storage = mock.Mock() mock_private_storage.get.return_value = 'fakeVolName' mock_api_executor = qnap.QnapShareDriver._create_api_executor mock_api_executor.return_value.get_host_list.return_value = ( self.get_host_list_return_value()) mock_get_timestamp_from_vol_name.return_value = 'fakeHostName' mock_gen_host_name.return_value = 'manila-fakeHostName-rw' self._do_setup('http://1.2.3.4:8080', '1.2.3.4', 'admin', 'qnapadmin', 'Storage Pool 1', private_storage=mock_private_storage) self.assertRaises( exception.InvalidShareAccess, self.driver._allow_access, context='context', share=self.share, access=fake_access, share_server=None) @mock.patch.object(qnap.QnapShareDriver, '_get_timestamp_from_vol_name') @mock.patch.object(qnap.QnapShareDriver, '_check_share_access') def test_allow_access_rw( self, mock_check_share_access, mock_get_timestamp_from_vol_name): """Test allow_access with access type rw.""" fake_access = fakes.AccessClass('fakeAccessType', 'rw', 'fakeIp') mock_private_storage = mock.Mock() mock_private_storage.get.return_value = 'fakeVolName' mock_api_executor = qnap.QnapShareDriver._create_api_executor mock_api_executor.return_value.get_host_list.return_value = [] mock_get_timestamp_from_vol_name.return_value = 'fakeHostName' mock_api_executor.return_value.add_host.return_value = None mock_api_executor.return_value.set_nfs_access.return_value = None self._do_setup('http://1.2.3.4:8080', '1.2.3.4', 'admin', 'qnapadmin', 'Storage Pool 1', private_storage=mock_private_storage) self.driver._allow_access( 'context', self.share, fake_access, share_server=None) mock_check_share_access.assert_called_once_with( 'NFS', 'fakeAccessType') mock_api_executor.return_value.add_host.assert_called_once_with( 'manila-fakeHostName-rw', 'fakeIp') @mock.patch.object(qnap.QnapShareDriver, '_gen_host_name') @mock.patch.object(qnap.QnapShareDriver, '_check_share_access') def test_allow_access_ro_without_hostlist( self, mock_check_share_access, mock_gen_host_name): """Test allow access without host list.""" fake_access = fakes.AccessClass('fakeAccessType', 'ro', 'fakeIp') mock_private_storage = mock.Mock() mock_api_executor = qnap.QnapShareDriver._create_api_executor mock_api_executor.return_value.get_host_list.return_value = None mock_gen_host_name.return_value = 'fakeHostName' mock_api_executor.return_value.add_host.return_value = None mock_api_executor.return_value.set_nfs_access.return_value = None self._do_setup('http://1.2.3.4:8080', '1.2.3.4', 'admin', 'qnapadmin', 'Storage Pool 1', private_storage=mock_private_storage) share_name = self.driver._gen_random_name('share') mock_private_storage.get.return_value = share_name self.driver._allow_access( 'context', self.share, fake_access, share_server=None) mock_check_share_access.assert_called_once_with( 'NFS', 'fakeAccessType') mock_api_executor.return_value.add_host.assert_called_once_with( 'fakeHostName', 'fakeIp') @mock.patch.object(qnap.QnapShareDriver, '_get_vol_host') @mock.patch.object(qnap.QnapShareDriver, '_gen_host_name') @mock.patch.object(qnap.QnapShareDriver, '_get_timestamp_from_vol_name') @mock.patch.object(qnap.QnapShareDriver, '_check_share_access') def test_deny_access_with_hostlist( self, mock_check_share_access, mock_get_timestamp_from_vol_name, mock_gen_host_name, mock_get_vol_host): """Test deny access.""" host_dict_ips = [] for host in self.get_host_list_return_value(): if host.find('netaddrs/ipv4').text is not None: host_dict = { 'index': host.find('index').text, 'hostid': host.find('hostid').text, 'name': host.find('name').text, 'ipv4': [host.find('netaddrs').find('ipv4').text]} host_dict_ips.append(host_dict) for host in host_dict_ips: fake_access_to = host['ipv4'][0] fake_access = fakes.AccessClass('fakeAccessType', 'ro', fake_access_to) mock_private_storage = mock.Mock() mock_private_storage.get.return_value = 'vol_name' mock_api_return = ( qnap.QnapShareDriver._create_api_executor.return_value) mock_api_return.get_host_list.return_value = ( self.get_host_list_return_value()) mock_get_timestamp_from_vol_name.return_value = 'fakeTimeStamp' mock_gen_host_name.return_value = 'manila-fakeHostName' mock_get_vol_host.return_value = host_dict_ips mock_api_return.add_host.return_value = None mock_api_return.set_nfs_access.return_value = None self._do_setup('http://1.2.3.4:8080', '1.2.3.4', 'admin', 'qnapadmin', 'Storage Pool 1', private_storage=mock_private_storage) self.driver._deny_access( 'context', self.share, fake_access, share_server=None) mock_check_share_access.assert_called_once_with( 'NFS', 'fakeAccessType') @mock.patch.object(qnap.QnapShareDriver, '_get_timestamp_from_vol_name') @mock.patch.object(qnap.QnapShareDriver, '_check_share_access') def test_deny_access_with_hostlist_not_equel_access_to( self, mock_check_share_access, mock_get_timestamp_from_vol_name): """Test deny access.""" fake_access = fakes.AccessClass('fakeAccessType', 'ro', 'fakeIp') mock_private_storage = mock.Mock() mock_private_storage.get.return_value = 'vol_name' mock_api_return = ( qnap.QnapShareDriver._create_api_executor.return_value) mock_api_return.get_host_list.return_value = ( self.get_host_list_return_value()) mock_api_return.add_host.return_value = None self._do_setup('http://1.2.3.4:8080', '1.2.3.4', 'admin', 'qnapadmin', 'Storage Pool 1', private_storage=mock_private_storage) self.driver._deny_access( 'context', self.share, fake_access, share_server=None) mock_check_share_access.assert_called_once_with( 'NFS', 'fakeAccessType') @mock.patch.object(qnap.QnapShareDriver, '_get_timestamp_from_vol_name') @mock.patch.object(qnap.QnapShareDriver, '_check_share_access') def test_deny_access_without_hostlist( self, mock_check_share_access, mock_get_timestamp_from_vol_name): """Test deny access without hostlist.""" fake_access = fakes.AccessClass('fakeAccessType', 'ro', 'fakeIp') mock_private_storage = mock.Mock() mock_private_storage.get.return_value = 'fakeVolName' mock_api_executor = qnap.QnapShareDriver._create_api_executor mock_api_executor.return_value.get_host_list.return_value = None mock_get_timestamp_from_vol_name.return_value = 'fakeHostName' mock_api_executor.return_value.add_host.return_value = None mock_api_executor.return_value.set_nfs_access.return_value = None self._do_setup('http://1.2.3.4:8080', '1.2.3.4', 'admin', 'qnapadmin', 'Storage Pool 1', private_storage=mock_private_storage) self.driver._deny_access( 'context', self.share, fake_access, share_server=None) mock_check_share_access.assert_called_once_with( 'NFS', 'fakeAccessType') @ddt.data('NFS', 'CIFS', 'proto') def test_check_share_access(self, test_proto): """Test check_share_access.""" mock_private_storage = mock.Mock() mock_api_executor = qnap.QnapShareDriver._create_api_executor mock_api_executor.return_value.get_host_list.return_value = None mock_api_executor.return_value.add_host.return_value = None mock_api_executor.return_value.set_nfs_access.return_value = None self._do_setup('http://1.2.3.4:8080', '1.2.3.4', 'admin', 'qnapadmin', 'Storage Pool 1', private_storage=mock_private_storage) self.assertRaises( exception.InvalidShareAccess, self.driver._check_share_access, share_proto=test_proto, access_type='notser') def test_get_ts_model_pool_id(self): """Test get ts model pool id.""" mock_private_storage = mock.Mock() self._do_setup('http://1.2.3.4:8080', '1.2.3.4', 'admin', 'qnapadmin', '1', private_storage=mock_private_storage) self.assertEqual('1', self.driver._get_ts_model_pool_id('1')) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315602.0376701 manila-21.0.0/manila/tests/share/drivers/quobyte/0000775000175000017500000000000000000000000021731 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/quobyte/__init__.py0000664000175000017500000000000000000000000024030 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/quobyte/test_jsonrpc.py0000664000175000017500000002233300000000000025023 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Quobyte, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import tempfile import time from unittest import mock import requests from requests import auth from requests import exceptions from manila import exception from manila.share.drivers.quobyte import jsonrpc from manila import test class FakeResponse(object): def __init__(self, status, body): self.status_code = status self.reason = "HTTP reason" self.body = body self.text = str(body) def json(self): return self.body class QuobyteJsonRpcTestCase(test.TestCase): def setUp(self): super(QuobyteJsonRpcTestCase, self).setUp() self.rpc = jsonrpc.JsonRpc(url="http://test", user_credentials=("me", "team")) self.mock_object(time, 'sleep') @mock.patch.object(requests, 'post', return_value=FakeResponse(200, {"result": "yes"})) def test_request_generation_and_basic_auth(self, req_get_mock): self.rpc.call('method', {'param': 'value'}) req_get_mock.assert_called_once_with( url='http://test', auth=auth.HTTPBasicAuth("me", "team"), json=mock.ANY, timeout=60) def test_jsonrpc_init_with_ca(self): foofile = tempfile.TemporaryFile() fake_url = "https://foo.bar/" fake_credentials = ('fakeuser', 'fakepwd') fake_cert_file = tempfile.TemporaryFile() fake_key_file = tempfile.TemporaryFile() self.rpc = jsonrpc.JsonRpc(url=fake_url, user_credentials=fake_credentials, ca_file=foofile, key_file=fake_key_file, cert_file=fake_cert_file) self.assertEqual("https", self.rpc._url_scheme) self.assertEqual(fake_url, self.rpc._url) self.assertEqual(foofile, self.rpc._ca_file) self.assertEqual(fake_cert_file, self.rpc._cert_file) self.assertEqual(fake_key_file, self.rpc._key_file) @mock.patch.object(jsonrpc.LOG, "warning") def test_jsonrpc_init_without_ca(self, mock_warning): self.rpc = jsonrpc.JsonRpc("https://foo.bar/", ('fakeuser', 'fakepwd'), None) mock_warning.assert_called_once_with( "Will not verify the server certificate of the API service" " because the CA certificate is not available.") def test_jsonrpc_init_no_ssl(self): self.rpc = jsonrpc.JsonRpc("http://foo.bar/", ('fakeuser', 'fakepwd')) self.assertEqual("http", self.rpc._url_scheme) @mock.patch.object(requests, "post", return_value=FakeResponse( 200, {"result": "Sweet gorilla of Manila"})) def test_successful_call(self, mock_req_get): result = self.rpc.call('method', {'param': 'value'}) mock_req_get.assert_called_once_with( url=self.rpc._url, json=mock.ANY, # not checking here as of undefined order in dict auth=self.rpc._credentials, timeout=60) self.assertEqual("Sweet gorilla of Manila", result) @mock.patch.object(requests, "post", return_value=FakeResponse( 200, {"result": "Sweet gorilla of Manila"})) def test_https_call_with_cert(self, mock_req_get): fake_cert_file = tempfile.TemporaryFile() fake_key_file = tempfile.TemporaryFile() self.rpc = jsonrpc.JsonRpc(url="https://test", user_credentials=("me", "team"), cert_file=fake_cert_file, key_file=fake_key_file) result = self.rpc.call('method', {'param': 'value'}) mock_req_get.assert_called_once_with( url=self.rpc._url, json=mock.ANY, # not checking here as of undefined order in dict auth=self.rpc._credentials, verify=False, cert=(fake_cert_file, fake_key_file), timeout=60) self.assertEqual("Sweet gorilla of Manila", result) @mock.patch.object(requests, "post", return_value=FakeResponse( 200, {"result": "Sweet gorilla of Manila"})) def test_https_call_verify(self, mock_req_get): fake_ca_file = tempfile.TemporaryFile() self.rpc = jsonrpc.JsonRpc(url="https://test", user_credentials=("me", "team"), ca_file=fake_ca_file) result = self.rpc.call('method', {'param': 'value'}) mock_req_get.assert_called_once_with( url=self.rpc._url, json=mock.ANY, # not checking here as of undefined order in dict auth=self.rpc._credentials, verify=fake_ca_file, timeout=60) self.assertEqual("Sweet gorilla of Manila", result) @mock.patch.object(jsonrpc.JsonRpc, "_checked_for_application_error", return_value="Sweet gorilla of Manila") @mock.patch.object(requests, "post", return_value=FakeResponse( 200, {"result": "Sweet gorilla of Manila"})) def test_https_call_verify_expected_error(self, mock_req_get, mock_check): fake_ca_file = tempfile.TemporaryFile() self.rpc = jsonrpc.JsonRpc(url="https://test", user_credentials=("me", "team"), ca_file=fake_ca_file) result = self.rpc.call('method', {'param': 'value'}, expected_errors=[42]) mock_req_get.assert_called_once_with( url=self.rpc._url, json=mock.ANY, # not checking here as of undefined order in dict auth=self.rpc._credentials, verify=fake_ca_file, timeout=60) mock_check.assert_called_once_with( {'result': 'Sweet gorilla of Manila'}, [42]) self.assertEqual("Sweet gorilla of Manila", result) @mock.patch.object(requests, "post", side_effect=exceptions.HTTPError) def test_jsonrpc_call_http_exception(self, req_get_mock): self.assertRaises(exceptions.HTTPError, self.rpc.call, 'method', {'param': 'value'}) req_get_mock.assert_called_once_with( url=self.rpc._url, json=mock.ANY, # not checking here as of undefined order in dict auth=self.rpc._credentials, timeout=60) @mock.patch.object(requests, "post", return_value=FakeResponse( 200, {"error": {"code": 28, "message": "text"}})) def test_application_error(self, req_get_mock): self.assertRaises(exception.QBRpcException, self.rpc.call, 'method', {'param': 'value'}) req_get_mock.assert_called_once_with( url=self.rpc._url, json=mock.ANY, # not checking here as of undefined order in dict auth=self.rpc._credentials, timeout=60) def test_checked_for_application_error(self): resultdict = {"result": "Sweet gorilla of Manila"} self.assertEqual("Sweet gorilla of Manila", (self.rpc._checked_for_application_error( result=resultdict))) def test_checked_for_application_error_enf(self): resultdict = {"result": "Sweet gorilla of Manila", "error": {"message": "No Gorilla", "code": jsonrpc.ERROR_ENTITY_NOT_FOUND}} self.assertIsNone( self.rpc._checked_for_application_error( result=resultdict, expected_errors=[jsonrpc.ERROR_ENTITY_NOT_FOUND])) def test_checked_for_application_error_no_entry(self): resultdict = {"result": "Sweet gorilla of Manila", "error": {"message": "No Gorilla", "code": jsonrpc.ERROR_ENOENT}} self.assertIsNone( self.rpc._checked_for_application_error( result=resultdict, expected_errors=[jsonrpc.ERROR_ENOENT])) def test_checked_for_application_error_exception(self): self.assertRaises(exception.QBRpcException, self.rpc._checked_for_application_error, {"result": "Sweet gorilla of Manila", "error": {"message": "No Gorilla", "code": 666 } } ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/quobyte/test_quobyte.py0000664000175000017500000007365300000000000025050 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Quobyte, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from oslo_config import cfg from oslo_utils import units from manila import context from manila import exception from manila.share import configuration as config from manila.share import driver from manila.share.drivers.quobyte import jsonrpc from manila.share.drivers.quobyte import quobyte from manila import test from manila.tests import fake_share CONF = cfg.CONF def fake_rpc_handler(name, *args, **kwargs): if name == 'resolveVolumeName': return None elif name == 'createVolume': return {'volume_uuid': 'voluuid'} elif name == 'exportVolume': return {'nfs_server_ip': 'fake_location', 'nfs_export_path': '/fake_share'} elif name == 'getConfiguration': return { "tenant_configuration": [{ "domain_name": "fake_domain_name", "volume_access": [ {"volume_uuid": "fake_id_1", "restrict_to_network": "10.0.0.1", "read_only": False}, {"volume_uuid": "fake_id_1", "restrict_to_network": "10.0.0.2", "read_only": False}, {"volume_uuid": "fake_id_2", "restrict_to_network": "10.0.0.3", "read_only": False} ]}, {"domain_name": "fake_domain_name_2", "volume_access": [ {"volume_uuid": "fake_id_3", "restrict_to_network": "10.0.0.4", "read_only": False}, {"volume_uuid": "fake_id_3", "restrict_to_network": "10.0.0.5", "read_only": True}, {"volume_uuid": "fake_id_4", "restrict_to_network": "10.0.0.6", "read_only": False} ]} ] } else: return "Unknown fake rpc handler call" def create_fake_access(access_adr, access_id='fake_access_id', access_type='ip', access_level='rw'): return { 'access_id': access_id, 'access_type': access_type, 'access_to': access_adr, 'access_level': access_level } class QuobyteShareDriverTestCase(test.TestCase): """Tests QuobyteShareDriver.""" def setUp(self): super(QuobyteShareDriverTestCase, self).setUp() self._context = context.get_admin_context() CONF.set_default('driver_handles_share_servers', False) self.fake_conf = config.Configuration(None) self._driver = quobyte.QuobyteShareDriver(configuration=self.fake_conf) self._driver.rpc = mock.Mock() self.share = fake_share.fake_share( share_proto='NFS', export_location='fake_location:/quobyte/fake_share') self.access = fake_share.fake_access() @mock.patch('manila.share.drivers.quobyte.jsonrpc.JsonRpc', mock.Mock()) def test_do_setup_success(self): self._driver.rpc.call = mock.Mock(return_value=None) self._driver.do_setup(self._context) self._driver.rpc.call.assert_called_with('getInformation', {}) @mock.patch('manila.share.drivers.quobyte.jsonrpc.JsonRpc.__init__', mock.Mock(return_value=None)) @mock.patch.object(jsonrpc.JsonRpc, 'call', side_effect=exception.QBRpcException( result='fake_result', qbcode=666)) def test_do_setup_failure(self, mock_call): self.assertRaises(exception.QBException, self._driver.do_setup, self._context) @mock.patch.object(quobyte.QuobyteShareDriver, "_resize_share") def test_create_share_new_volume(self, qb_resize_mock): self._driver.rpc.call = mock.Mock(wraps=fake_rpc_handler) result = self._driver.create_share(self._context, self.share) self.assertEqual(self.share['export_location'], result) self._driver.rpc.call.assert_has_calls([ mock.call('createVolume', dict( name=self.share['name'], tenant_domain=self.share['project_id'], root_user_id=self.fake_conf.quobyte_default_volume_user, root_group_id=self.fake_conf.quobyte_default_volume_group, configuration_name=self.fake_conf.quobyte_volume_configuration )), mock.call('exportVolume', dict(protocol='NFS', volume_uuid='voluuid'))]) qb_resize_mock.assert_called_once_with(self.share, self.share['size']) @mock.patch.object(quobyte.QuobyteShareDriver, "_resize_share") def test_create_share_existing_volume(self, qb_resize_mock): self._driver.rpc.call = mock.Mock(wraps=fake_rpc_handler) result = self._driver.create_share(self._context, self.share) self.assertEqual(self.share['export_location'], result) resolv_params = {'tenant_domain': 'fake_project_uuid', 'volume_name': 'fakename'} sett_params = {'tenant': {'tenant_id': 'fake_project_uuid'}} create_params = dict( name='fakename', tenant_domain='fake_project_uuid', root_user_id='root', root_group_id='root', configuration_name='BASE') self._driver.rpc.call.assert_has_calls([ mock.call('resolveVolumeName', resolv_params, [jsonrpc.ERROR_ENOENT, jsonrpc.ERROR_ENTITY_NOT_FOUND]), mock.call('setTenant', sett_params, expected_errors=[jsonrpc.ERROR_GARBAGE_ARGS]), mock.call('createVolume', create_params), mock.call('exportVolume', dict(protocol='NFS', volume_uuid='voluuid'))]) qb_resize_mock.assert_called_once_with(self.share, self.share['size']) def test_create_share_wrong_protocol(self): share = {'share_proto': 'WRONG_PROTOCOL'} self.assertRaises(exception.QBException, self._driver.create_share, context=None, share=share) def test_delete_share_existing_volume(self): def rpc_handler(name, *args): if name == 'resolveVolumeName': return {'volume_uuid': 'voluuid'} elif name == 'exportVolume': return {} self._driver.configuration.quobyte_delete_shares = True self._driver.rpc.call = mock.Mock(wraps=rpc_handler) self._driver.delete_share(self._context, self.share) resolv_params = {'volume_name': 'fakename', 'tenant_domain': 'fake_project_uuid'} self._driver.rpc.call.assert_has_calls([ mock.call('resolveVolumeName', resolv_params, [jsonrpc.ERROR_ENOENT, jsonrpc.ERROR_ENTITY_NOT_FOUND]), mock.call('deleteVolume', {'volume_uuid': 'voluuid'})]) def test_delete_share_existing_volume_disabled(self): def rpc_handler(name, *args): if name == 'resolveVolumeName': return {'volume_uuid': 'voluuid'} elif name == 'exportVolume': return {} CONF.set_default('quobyte_delete_shares', False) self._driver.rpc.call = mock.Mock(wraps=rpc_handler) self._driver.delete_share(self._context, self.share) self._driver.rpc.call.assert_called_with( 'exportVolume', {'volume_uuid': 'voluuid', 'remove_export': True}) @mock.patch.object(quobyte.LOG, 'warning') def test_delete_share_nonexisting_volume(self, mock_warning): def rpc_handler(name, *args): if name == 'resolveVolumeName': return None self._driver.rpc.call = mock.Mock(wraps=rpc_handler) self._driver.delete_share(self._context, self.share) mock_warning.assert_called_with( 'No volume found for share %(project_id)s/%(name)s', {'project_id': 'fake_project_uuid', 'name': 'fakename'}) def test_allow_access(self): def rpc_handler(name, *args): if name == 'resolveVolumeName': return {'volume_uuid': 'voluuid'} elif name == 'exportVolume': return {'nfs_server_ip': '10.10.1.1', 'nfs_export_path': '/voluuid'} self._driver.rpc.call = mock.Mock(wraps=rpc_handler) self._driver._allow_access(self._context, self.share, self.access) exp_params = {'volume_uuid': 'voluuid', 'read_only': False, 'add_allow_ip': '10.0.0.1'} self._driver.rpc.call.assert_called_with('exportVolume', exp_params) def test_allow_ro_access(self): def rpc_handler(name, *args): if name == 'resolveVolumeName': return {'volume_uuid': 'voluuid'} elif name == 'exportVolume': return {'nfs_server_ip': '10.10.1.1', 'nfs_export_path': '/voluuid'} self._driver.rpc.call = mock.Mock(wraps=rpc_handler) ro_access = fake_share.fake_access(access_level='ro') self._driver._allow_access(self._context, self.share, ro_access) exp_params = {'volume_uuid': 'voluuid', 'read_only': True, 'add_allow_ip': '10.0.0.1'} self._driver.rpc.call.assert_called_with('exportVolume', exp_params) def test_allow_access_nonip(self): self._driver.rpc.call = mock.Mock(wraps=fake_rpc_handler) self.access = fake_share.fake_access(**{"access_type": "non_existant_access_type"}) self.assertRaises(exception.InvalidShareAccess, self._driver._allow_access, self._context, self.share, self.access) def test_deny_access(self): def rpc_handler(name, *args): if name == 'resolveVolumeName': return {'volume_uuid': 'voluuid'} elif name == 'exportVolume': return {'nfs_server_ip': '10.10.1.1', 'nfs_export_path': '/voluuid'} self._driver.rpc.call = mock.Mock(wraps=rpc_handler) self._driver._deny_access(self._context, self.share, self.access) self._driver.rpc.call.assert_called_with( 'exportVolume', {'volume_uuid': 'voluuid', 'remove_allow_ip': '10.0.0.1'}) @mock.patch.object(quobyte.LOG, 'debug') def test_deny_access_nonip(self, mock_debug): self._driver.rpc.call = mock.Mock(wraps=fake_rpc_handler) self.access = fake_share.fake_access( access_type="non_existant_access_type") self._driver._deny_access(self._context, self.share, self.access) mock_debug.assert_called_with( 'Quobyte driver only supports ip access control. ' 'Ignoring deny access call for %s , %s', 'fakename', 'fake_project_uuid') def test_resolve_volume_name(self): self._driver.rpc.call = mock.Mock( return_value={'volume_uuid': 'fake_uuid'}) self._driver._resolve_volume_name('fake_vol_name', 'fake_domain_name') exp_params = {'volume_name': 'fake_vol_name', 'tenant_domain': 'fake_domain_name'} self._driver.rpc.call.assert_called_with( 'resolveVolumeName', exp_params, [jsonrpc.ERROR_ENOENT, jsonrpc.ERROR_ENTITY_NOT_FOUND]) def test_resolve_volume_name_NOENT(self): self._driver.rpc.call = mock.Mock( return_value=None) self.assertIsNone( self._driver._resolve_volume_name('fake_vol_name', 'fake_domain_name')) self._driver.rpc.call.assert_called_once_with( 'resolveVolumeName', dict(volume_name='fake_vol_name', tenant_domain='fake_domain_name'), [jsonrpc.ERROR_ENOENT, jsonrpc.ERROR_ENTITY_NOT_FOUND] ) def test_resolve_volume_name_other_error(self): self._driver.rpc.call = mock.Mock( side_effect=exception.QBRpcException( result='fubar', qbcode=666)) self.assertRaises(exception.QBRpcException, self._driver._resolve_volume_name, volume_name='fake_vol_name', tenant_domain='fake_domain_name') @mock.patch.object(driver.ShareDriver, '_update_share_stats') def test_update_share_stats(self, mock_uss): self._driver._get_capacities = mock.Mock(return_value=[42, 23]) self._driver._update_share_stats() mock_uss.assert_called_once_with( dict(storage_protocol='NFS', vendor_name='Quobyte', share_backend_name=self._driver.backend_name, driver_version=self._driver.DRIVER_VERSION, total_capacity_gb=42, free_capacity_gb=23, reserved_percentage=0, reserved_snapshot_percentage=0, reserved_share_extend_percentage=0)) def test_get_capacities_gb(self): capval = 42115548133 useval = 19695128917 replfact = 3 self._driver._get_qb_replication_factor = mock.Mock( return_value=replfact) self._driver.rpc.call = mock.Mock( return_value={'total_physical_capacity': str(capval), 'total_physical_usage': str(useval)}) self.assertEqual((39.223160718, 6.960214182), self._driver._get_capacities()) def test_get_capacities_gb_full(self): capval = 1024 * 1024 * 1024 * 3 useval = 1024 * 1024 * 1024 * 3 + 1 replfact = 1 self._driver._get_qb_replication_factor = mock.Mock( return_value=replfact) self._driver.rpc.call = mock.Mock( return_value={'total_physical_capacity': str(capval), 'total_physical_usage': str(useval)}) self.assertEqual((3.0, 0), self._driver._get_capacities()) def test_get_replication(self): fakerepl = 42 self._driver.configuration.quobyte_volume_configuration = 'fakeVolConf' self._driver.rpc.call = mock.Mock( return_value={'configuration': {'volume_metadata_configuration': {'replication_factor': str(fakerepl)}}}) self.assertEqual(fakerepl, self._driver._get_qb_replication_factor()) @mock.patch.object(quobyte.QuobyteShareDriver, "_resolve_volume_name", return_value="fake_uuid") def test_ensure_share(self, mock_qb_resolve_volname): self._driver.rpc.call = mock.Mock(wraps=fake_rpc_handler) result = self._driver.ensure_share(self._context, self.share, None) self.assertEqual(self.share["export_location"], result) (mock_qb_resolve_volname. assert_called_once_with(self.share['name'], self.share['project_id'])) self._driver.rpc.call.assert_has_calls([ mock.call('exportVolume', dict( volume_uuid="fake_uuid", protocol='NFS' ))]) @mock.patch.object(quobyte.QuobyteShareDriver, "_resolve_volume_name", return_value=None) def test_ensure_deleted_share(self, mock_qb_resolve_volname): self._driver.rpc.call = mock.Mock(wraps=fake_rpc_handler) self.assertRaises(exception.ShareResourceNotFound, self._driver.ensure_share, self._context, self.share, None) (mock_qb_resolve_volname. assert_called_once_with(self.share['name'], self.share['project_id'])) @mock.patch.object(quobyte.QuobyteShareDriver, "_resize_share") def test_extend_share(self, mock_qsd_resize_share): self._driver.extend_share(ext_share=self.share, ext_size=2, share_server=None) mock_qsd_resize_share.assert_called_once_with(share=self.share, new_size=2) @mock.patch.object(quobyte.QuobyteShareDriver, "_resolve_volume_name", return_value="fake_volume_uuid") def test_resize_share(self, mock_qb_resolv): self._driver.rpc.call = mock.Mock(wraps=fake_rpc_handler) manila_size = 7 newsize_bytes = manila_size * units.Gi self._driver._resize_share(share=self.share, new_size=manila_size) exp_params = { "quotas": [{ "consumer": [{ "type": "VOLUME", "identifier": "fake_volume_uuid", "tenant_id": self.share["project_id"] }], "limits": [{ "type": "LOGICAL_DISK_SPACE", "value": newsize_bytes, }], }]} self._driver.rpc.call.assert_has_calls([ mock.call('setQuota', exp_params)]) mock_qb_resolv.assert_called_once_with(self.share['name'], self.share['project_id']) @mock.patch.object(quobyte.QuobyteShareDriver, "_resolve_volume_name", return_value="fake_id_3") def test_fetch_existing_access(self, mock_qb_resolve_volname): self._driver.rpc.call = mock.Mock(wraps=fake_rpc_handler) old_access_1 = create_fake_access(access_id="old_1", access_adr="10.0.0.4") old_access_2 = create_fake_access(access_id="old_2", access_adr="10.0.0.5") exist_list = self._driver._fetch_existing_access(context=self._context, share=self.share) # assert expected result here self.assertEqual([old_access_1['access_to'], old_access_2['access_to']], [e.get('access_to') for e in exist_list]) (mock_qb_resolve_volname. assert_called_once_with(self.share['name'], self.share['project_id'])) @mock.patch.object(quobyte.QuobyteShareDriver, "_resize_share") def test_shrink_share(self, mock_qsd_resize_share): self._driver.shrink_share(shrink_share=self.share, shrink_size=3, share_server=None) mock_qsd_resize_share.assert_called_once_with(share=self.share, new_size=3) def test_subtract_access_lists(self): access_1 = create_fake_access(access_id="new_1", access_adr="10.0.0.5", access_type="rw",) access_2 = create_fake_access(access_id="old_1", access_adr="10.0.0.1", access_type="rw") access_3 = create_fake_access(access_id="old_2", access_adr="10.0.0.3", access_type="ro") access_4 = create_fake_access(access_id="new_2", access_adr="10.0.0.6", access_type="rw") access_5 = create_fake_access(access_id="old_3", access_adr="10.0.0.4", access_type="rw") min_list = [access_1, access_2, access_3, access_4] sub_list = [access_5, access_3, access_2] self.assertEqual([access_1, access_4], self._driver._subtract_access_lists(min_list, sub_list)) def test_subtract_access_lists_level(self): access_1 = create_fake_access(access_id="new_1", access_adr="10.0.0.5", access_level="rw") access_2 = create_fake_access(access_id="old_1", access_adr="10.0.0.1", access_level="rw") access_3 = create_fake_access(access_id="old_2", access_adr="10.0.0.3", access_level="rw") access_4 = create_fake_access(access_id="new_2", access_adr="10.0.0.6", access_level="rw") access_5 = create_fake_access(access_id="old_2_ro", access_adr="10.0.0.3", access_level="ro") min_list = [access_1, access_2, access_3, access_4] sub_list = [access_5, access_2] self.assertEqual([access_1, access_3, access_4], self._driver._subtract_access_lists(min_list, sub_list)) def test_subtract_access_lists_type(self): access_1 = create_fake_access(access_id="new_1", access_adr="10.0.0.5", access_type="ip") access_2 = create_fake_access(access_id="old_1", access_adr="10.0.0.1", access_type="ip") access_3 = create_fake_access(access_id="old_2", access_adr="10.0.0.3", access_type="ip") access_4 = create_fake_access(access_id="new_2", access_adr="10.0.0.6", access_type="ip") access_5 = create_fake_access(access_id="old_2_ro", access_adr="10.0.0.3", access_type="other") min_list = [access_1, access_2, access_3, access_4] sub_list = [access_5, access_2] self.assertEqual([access_1, access_3, access_4], self._driver._subtract_access_lists(min_list, sub_list)) @mock.patch.object(quobyte.QuobyteShareDriver, "_allow_access") @mock.patch.object(quobyte.QuobyteShareDriver, "_deny_access") def test_update_access_add_delete(self, qb_deny_mock, qb_allow_mock): access_1 = create_fake_access(access_id="new_1", access_adr="10.0.0.5", access_level="rw") access_2 = create_fake_access(access_id="old_1", access_adr="10.0.0.1", access_level="rw") access_3 = create_fake_access(access_id="old_2", access_adr="10.0.0.3", access_level="rw") self._driver.update_access(self._context, self.share, access_rules=None, add_rules=[access_1], delete_rules=[access_2, access_3], update_rules=[]) qb_allow_mock.assert_called_once_with(self._context, self.share, access_1) deny_calls = [mock.call(self._context, self.share, access_2), mock.call(self._context, self.share, access_3)] qb_deny_mock.assert_has_calls(deny_calls) @mock.patch.object(quobyte.LOG, "warning") def test_update_access_no_rules(self, qb_log_mock): self._driver.update_access(context=None, share=None, access_rules=[], add_rules=[], delete_rules=[], update_rules=[]) qb_log_mock.assert_has_calls([mock.ANY]) @mock.patch.object(quobyte.QuobyteShareDriver, "_subtract_access_lists") @mock.patch.object(quobyte.QuobyteShareDriver, "_fetch_existing_access") @mock.patch.object(quobyte.QuobyteShareDriver, "_allow_access") def test_update_access_recovery_additionals(self, qb_allow_mock, qb_exist_mock, qb_subtr_mock): new_access_1 = create_fake_access(access_id="new_1", access_adr="10.0.0.2") old_access = create_fake_access(access_id="fake_access_id", access_adr="10.0.0.1") new_access_2 = create_fake_access(access_id="new_2", access_adr="10.0.0.3") add_access_rules = [new_access_1, old_access, new_access_2] qb_exist_mock.return_value = [old_access] qb_subtr_mock.side_effect = [[new_access_1, new_access_2], []] self._driver.update_access(self._context, self.share, access_rules=add_access_rules, add_rules=[], delete_rules=[], update_rules=[]) assert_calls = [mock.call(self._context, self.share, new_access_1), mock.call(self._context, self.share, new_access_2)] qb_allow_mock.assert_has_calls(assert_calls, any_order=True) qb_exist_mock.assert_called_once_with(self._context, self.share) @mock.patch.object(quobyte.QuobyteShareDriver, "_subtract_access_lists") @mock.patch.object(quobyte.QuobyteShareDriver, "_fetch_existing_access") @mock.patch.object(quobyte.QuobyteShareDriver, "_deny_access") def test_update_access_recovery_superfluous(self, qb_deny_mock, qb_exist_mock, qb_subtr_mock): old_access_1 = create_fake_access(access_id="old_1", access_adr="10.0.0.1") missing_access_1 = create_fake_access(access_id="mis_1", access_adr="10.0.0.2") old_access_2 = create_fake_access(access_id="old_2", access_adr="10.0.0.3") qb_exist_mock.side_effect = [[old_access_1, old_access_2]] qb_subtr_mock.side_effect = [[], [missing_access_1]] old_access_rules = [old_access_1, old_access_2] self._driver.update_access(self._context, self.share, access_rules=old_access_rules, add_rules=[], delete_rules=[], update_rules=[]) qb_deny_mock.assert_called_once_with(self._context, self.share, (missing_access_1)) qb_exist_mock.assert_called_once_with(self._context, self.share) @mock.patch.object(quobyte.QuobyteShareDriver, "_subtract_access_lists") @mock.patch.object(quobyte.QuobyteShareDriver, "_fetch_existing_access") @mock.patch.object(quobyte.QuobyteShareDriver, "_deny_access") @mock.patch.object(quobyte.QuobyteShareDriver, "_allow_access") def test_update_access_recovery_add_superfluous(self, qb_allow_mock, qb_deny_mock, qb_exist_mock, qb_subtr_mock): new_access_1 = create_fake_access(access_id="new_1", access_adr="10.0.0.5") old_access_1 = create_fake_access(access_id="old_1", access_adr="10.0.0.1") old_access_2 = create_fake_access(access_id="old_2", access_adr="10.0.0.3") old_access_3 = create_fake_access(access_id="old_3", access_adr="10.0.0.4") miss_access_1 = create_fake_access(access_id="old_3", access_adr="10.0.0.4") new_access_2 = create_fake_access(access_id="new_2", access_adr="10.0.0.3", access_level="ro") new_access_rules = [new_access_1, old_access_1, old_access_2, old_access_3, new_access_2] qb_exist_mock.return_value = [old_access_1, old_access_2, old_access_3, miss_access_1] qb_subtr_mock.side_effect = [[new_access_1, new_access_2], [miss_access_1, old_access_2]] self._driver.update_access(self._context, self.share, new_access_rules, add_rules=[], delete_rules=[], update_rules=[]) a_calls = [mock.call(self._context, self.share, new_access_1), mock.call(self._context, self.share, new_access_2)] qb_allow_mock.assert_has_calls(a_calls) b_calls = [mock.call(self._context, self.share, miss_access_1), mock.call(self._context, self.share, old_access_2)] qb_deny_mock.assert_has_calls(b_calls) qb_exist_mock.assert_called_once_with(self._context, self.share) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315602.0376701 manila-21.0.0/manila/tests/share/drivers/tegile/0000775000175000017500000000000000000000000021512 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/tegile/__init__.py0000664000175000017500000000000000000000000023611 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/tegile/test_tegile.py0000664000175000017500000010115400000000000024376 0ustar00zuulzuul00000000000000# Copyright (c) 2016 by Tegile Systems, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Share driver Test for Tegile storage. """ from unittest import mock import ddt from oslo_config import cfg import requests from manila.common import constants as const from manila import context from manila import exception from manila.share import configuration from manila.share.drivers.tegile import tegile from manila import test CONF = cfg.CONF test_config = configuration.Configuration(None) test_config.tegile_nas_server = 'some-ip' test_config.tegile_nas_login = 'some-user' test_config.tegile_nas_password = 'some-password' test_config.reserved_share_percentage = 10 test_config.reserved_share_from_snapshot_percentage = 5 test_config.reserved_share_extend_percentage = 5 test_config.max_over_subscription_ratio = 30.0 test_share = { 'host': 'node#fake_pool', 'name': 'testshare', 'id': 'a24c2ee8-525a-4406-8ccd-8d38688f8e9e', 'share_proto': 'NFS', 'size': 10, } test_share_cifs = { 'host': 'node#fake_pool', 'name': 'testshare', 'id': 'a24c2ee8-525a-4406-8ccd-8d38688f8e9e', 'share_proto': 'CIFS', 'size': 10, } test_share_fail = { 'host': 'node#fake_pool', 'name': 'testshare', 'id': 'a24c2ee8-525a-4406-8ccd-8d38688f8e9e', 'share_proto': 'OTHER', 'size': 10, } test_snapshot = { 'name': 'testSnap', 'id': '07ae9978-5445-405e-8881-28f2adfee732', 'share': test_share, 'share_name': 'snapshotted', 'display_name': 'disp', 'display_description': 'disp-desc', } array_stats = { 'total_capacity_gb': 4569.199686084874, 'free_capacity_gb': 4565.381390112452, 'pools': [ { 'total_capacity_gb': 913.5, 'QoS_support': False, 'free_capacity_gb': 911.812650680542, 'reserved_percentage': 0, 'reserved_snapshot_percentage': 0, 'reserved_share_extend_percentage': 0, 'pool_name': 'pyramid', }, { 'total_capacity_gb': 2742.1996604874, 'QoS_support': False, 'free_capacity_gb': 2740.148867149747, 'reserved_percentage': 0, 'reserved_snapshot_percentage': 0, 'reserved_share_extend_percentage': 0, 'pool_name': 'cobalt', }, { 'total_capacity_gb': 913.5, 'QoS_support': False, 'free_capacity_gb': 913.4198722839355, 'reserved_percentage': 0, 'reserved_snapshot_percentage': 0, 'reserved_share_extend_percentage': 0, 'pool_name': 'test', }, ], } fake_tegile_backend_fail = mock.Mock( side_effect=exception.TegileAPIException(response="Fake Exception")) class FakeResponse(object): def __init__(self, status, json_output): self.status_code = status self.text = 'Random text' self._json = json_output def json(self): return self._json def close(self): pass @ddt.ddt class TegileShareDriverTestCase(test.TestCase): def __init__(self, *args, **kwds): super(TegileShareDriverTestCase, self).__init__(*args, **kwds) self._ctxt = context.get_admin_context() self.configuration = test_config def setUp(self): CONF.set_default('driver_handles_share_servers', False) self._driver = tegile.TegileShareDriver( configuration=self.configuration) self._driver._default_project = 'fake_project' super(TegileShareDriverTestCase, self).setUp() def test_create_share(self): api_return_value = (test_config.tegile_nas_server + " " + test_share['name']) mock_api = self.mock_object(self._driver, '_api', mock.Mock( return_value=api_return_value)) result = self._driver.create_share(self._ctxt, test_share) expected = { 'is_admin_only': False, 'metadata': { 'preferred': True, }, 'path': 'some-ip:testshare', } self.assertEqual(expected, result) create_params = ( 'fake_pool', 'fake_project', test_share['name'], test_share['share_proto'], ) mock_api.assert_called_once_with('createShare', create_params) def test_create_share_fail(self): mock_api = self.mock_object(self._driver, '_api', mock.Mock( side_effect=( exception.TegileAPIException( response="Fake Exception")))) self.assertRaises(exception.TegileAPIException, self._driver.create_share, self._ctxt, test_share) create_params = ( 'fake_pool', 'fake_project', test_share['name'], test_share['share_proto'], ) mock_api.assert_called_once_with('createShare', create_params) def test_delete_share(self): fake_share_info = ('fake_pool', 'fake_project', test_share['name']) mock_params = self.mock_object(self._driver, '_get_pool_project_share_name', mock.Mock(return_value=fake_share_info)) mock_api = self.mock_object(self._driver, '_api') self._driver.delete_share(self._ctxt, test_share) delete_path = '%s/%s/%s/%s' % ( 'fake_pool', 'Local', 'fake_project', test_share['name']) delete_params = (delete_path, True, False) mock_api.assert_called_once_with('deleteShare', delete_params) mock_params.assert_called_once_with(test_share) def test_delete_share_fail(self): mock_api = self.mock_object(self._driver, '_api', mock.Mock( side_effect=( exception.TegileAPIException( response="Fake Exception")))) self.assertRaises(exception.TegileAPIException, self._driver.delete_share, self._ctxt, test_share) delete_path = '%s/%s/%s/%s' % ( 'fake_pool', 'Local', 'fake_project', test_share['name']) delete_params = (delete_path, True, False) mock_api.assert_called_once_with('deleteShare', delete_params) def test_create_snapshot(self): mock_api = self.mock_object(self._driver, '_api') fake_share_info = ('fake_pool', 'fake_project', test_share['name']) mock_params = self.mock_object(self._driver, '_get_pool_project_share_name', mock.Mock(return_value=fake_share_info)) self._driver.create_snapshot(self._ctxt, test_snapshot) share = { 'poolName': 'fake_pool', 'projectName': 'fake_project', 'name': test_share['name'], 'availableSize': 0, 'totalSize': 0, 'datasetPath': '%s/%s/%s' % ( 'fake_pool', 'Local', 'fake_project', ), 'mountpoint': test_share['name'], 'local': 'true', } create_params = (share, test_snapshot['name'], False) mock_api.assert_called_once_with('createShareSnapshot', create_params) mock_params.assert_called_once_with(test_share) def test_create_snapshot_fail(self): mock_api = self.mock_object(self._driver, '_api', mock.Mock( side_effect=( exception.TegileAPIException( response="Fake Exception")))) self.assertRaises(exception.TegileAPIException, self._driver.create_snapshot, self._ctxt, test_snapshot) share = { 'poolName': 'fake_pool', 'projectName': 'fake_project', 'name': test_share['name'], 'availableSize': 0, 'totalSize': 0, 'datasetPath': '%s/%s/%s' % ( 'fake_pool', 'Local', 'fake_project', ), 'mountpoint': test_share['name'], 'local': 'true', } create_params = (share, test_snapshot['name'], False) mock_api.assert_called_once_with('createShareSnapshot', create_params) def test_delete_snapshot(self): fake_share_info = ('fake_pool', 'fake_project', test_share['name']) mock_params = self.mock_object(self._driver, '_get_pool_project_share_name', mock.Mock(return_value=fake_share_info)) mock_api = self.mock_object(self._driver, '_api') self._driver.delete_snapshot(self._ctxt, test_snapshot) delete_snap_path = ('%s/%s/%s/%s@%s%s' % ( 'fake_pool', 'Local', 'fake_project', test_share['name'], 'Manual-S-', test_snapshot['name'], )) delete_params = (delete_snap_path, False) mock_api.assert_called_once_with('deleteShareSnapshot', delete_params) mock_params.assert_called_once_with(test_share) def test_delete_snapshot_fail(self): mock_api = self.mock_object(self._driver, '_api', mock.Mock( side_effect=( exception.TegileAPIException( response="Fake Exception")))) self.assertRaises(exception.TegileAPIException, self._driver.delete_snapshot, self._ctxt, test_snapshot) delete_snap_path = ('%s/%s/%s/%s@%s%s' % ( 'fake_pool', 'Local', 'fake_project', test_share['name'], 'Manual-S-', test_snapshot['name'], )) delete_params = (delete_snap_path, False) mock_api.assert_called_once_with('deleteShareSnapshot', delete_params) def test_create_share_from_snapshot(self): api_return_value = (test_config.tegile_nas_server + " " + test_share['name']) mock_api = self.mock_object(self._driver, '_api', mock.Mock( return_value=api_return_value)) fake_share_info = ('fake_pool', 'fake_project', test_share['name']) mock_params = self.mock_object(self._driver, '_get_pool_project_share_name', mock.Mock(return_value=fake_share_info)) result = self._driver.create_share_from_snapshot(self._ctxt, test_share, test_snapshot) expected = { 'is_admin_only': False, 'metadata': { 'preferred': True, }, 'path': 'some-ip:testshare', } self.assertEqual(expected, result) create_params = ( '%s/%s/%s/%s@%s%s' % ( 'fake_pool', 'Local', 'fake_project', test_snapshot['share_name'], 'Manual-S-', test_snapshot['name'], ), test_share['name'], True, ) mock_api.assert_called_once_with('cloneShareSnapshot', create_params) mock_params.assert_called_once_with(test_share) def test_create_share_from_snapshot_fail(self): mock_api = self.mock_object(self._driver, '_api', mock.Mock( side_effect=( exception.TegileAPIException( response="Fake Exception")))) self.assertRaises(exception.TegileAPIException, self._driver.create_share_from_snapshot, self._ctxt, test_share, test_snapshot) create_params = ( '%s/%s/%s/%s@%s%s' % ( 'fake_pool', 'Local', 'fake_project', test_snapshot['share_name'], 'Manual-S-', test_snapshot['name'], ), test_share['name'], True, ) mock_api.assert_called_once_with('cloneShareSnapshot', create_params) def test_ensure_share(self): api_return_value = (test_config.tegile_nas_server + " " + test_share['name']) mock_api = self.mock_object(self._driver, '_api', mock.Mock( return_value=api_return_value)) fake_share_info = ('fake_pool', 'fake_project', test_share['name']) mock_params = self.mock_object(self._driver, '_get_pool_project_share_name', mock.Mock(return_value=fake_share_info)) result = self._driver.ensure_share(self._ctxt, test_share) expected = [ { 'is_admin_only': False, 'metadata': { 'preferred': True, }, 'path': 'some-ip:testshare', }, ] self.assertEqual(expected, result) ensure_params = [ '%s/%s/%s/%s' % ( 'fake_pool', 'Local', 'fake_project', test_share['name'])] mock_api.assert_called_once_with('getShareIPAndMountPoint', ensure_params) mock_params.assert_called_once_with(test_share) def test_ensure_share_fail(self): mock_api = self.mock_object(self._driver, '_api', mock.Mock( side_effect=( exception.TegileAPIException( response="Fake Exception")))) self.assertRaises(exception.TegileAPIException, self._driver.ensure_share, self._ctxt, test_share) ensure_params = [ '%s/%s/%s/%s' % ( 'fake_pool', 'Local', 'fake_project', test_share['name'])] mock_api.assert_called_once_with('getShareIPAndMountPoint', ensure_params) def test_get_share_stats(self): mock_api = self.mock_object(self._driver, '_api', mock.Mock( return_value=array_stats)) result_dict = self._driver.get_share_stats(True) expected_dict = { 'driver_handles_share_servers': False, 'driver_version': '1.0.0', 'free_capacity_gb': 4565.381390112452, 'pools': [ { 'allocated_capacity_gb': 0.0, 'compression': True, 'dedupe': True, 'free_capacity_gb': 911.812650680542, 'pool_name': 'pyramid', 'qos': False, 'reserved_percentage': 10, 'reserved_snapshot_percentage': 5, 'reserved_share_extend_percentage': 5, 'thin_provisioning': True, 'max_over_subscription_ratio': 30.0, 'total_capacity_gb': 913.5}, { 'allocated_capacity_gb': 0.0, 'compression': True, 'dedupe': True, 'free_capacity_gb': 2740.148867149747, 'pool_name': 'cobalt', 'qos': False, 'reserved_percentage': 10, 'reserved_snapshot_percentage': 5, 'reserved_share_extend_percentage': 5, 'thin_provisioning': True, 'max_over_subscription_ratio': 30.0, 'total_capacity_gb': 2742.1996604874 }, { 'allocated_capacity_gb': 0.0, 'compression': True, 'dedupe': True, 'free_capacity_gb': 913.4198722839355, 'pool_name': 'test', 'qos': False, 'reserved_percentage': 10, 'reserved_snapshot_percentage': 5, 'reserved_share_extend_percentage': 5, 'thin_provisioning': True, 'max_over_subscription_ratio': 30.0, 'total_capacity_gb': 913.5}, ], 'qos': False, 'reserved_percentage': 0, 'reserved_snapshot_percentage': 0, 'reserved_share_extend_percentage': 0, 'replication_domain': None, 'share_backend_name': 'Tegile', 'snapshot_support': True, 'storage_protocol': 'NFS_CIFS', 'total_capacity_gb': 4569.199686084874, 'vendor_name': 'Tegile Systems Inc.', } self.assertSubDictMatch(expected_dict, result_dict) mock_api.assert_called_once_with(fine_logging=False, method='getArrayStats', request_type='get') def test_get_share_stats_fail(self): mock_api = self.mock_object(self._driver, '_api', mock.Mock( side_effect=( exception.TegileAPIException( response="Fake Exception")))) self.assertRaises(exception.TegileAPIException, self._driver.get_share_stats, True) mock_api.assert_called_once_with(fine_logging=False, method='getArrayStats', request_type='get') def test_get_pool(self): result = self._driver.get_pool(test_share) expected = 'fake_pool' self.assertEqual(expected, result) def test_extend_share(self): fake_share_info = ('fake_pool', 'fake_project', test_share['name']) mock_params = self.mock_object(self._driver, '_get_pool_project_share_name', mock.Mock(return_value=fake_share_info)) mock_api = self.mock_object(self._driver, '_api') self._driver.extend_share(test_share, 12) extend_path = '%s/%s/%s/%s' % ( 'fake_pool', 'Local', 'fake_project', test_share['name']) extend_params = (extend_path, str(12), 'GB') mock_api.assert_called_once_with('resizeShare', extend_params) mock_params.assert_called_once_with(test_share) def test_extend_share_fail(self): mock_api = self.mock_object(self._driver, '_api', mock.Mock( side_effect=( exception.TegileAPIException( response="Fake Exception")))) self.assertRaises(exception.TegileAPIException, self._driver.extend_share, test_share, 30) extend_path = '%s/%s/%s/%s' % ( 'fake_pool', 'Local', 'fake_project', test_share['name']) extend_params = (extend_path, str(30), 'GB') mock_api.assert_called_once_with('resizeShare', extend_params) def test_shrink_share(self): fake_share_info = ('fake_pool', 'fake_project', test_share['name']) mock_params = self.mock_object(self._driver, '_get_pool_project_share_name', mock.Mock(return_value=fake_share_info)) mock_api = self.mock_object(self._driver, '_api') self._driver.shrink_share(test_share, 15) shrink_path = '%s/%s/%s/%s' % ( 'fake_pool', 'Local', 'fake_project', test_share['name']) shrink_params = (shrink_path, str(15), 'GB') mock_api.assert_called_once_with('resizeShare', shrink_params) mock_params.assert_called_once_with(test_share) def test_shrink_share_fail(self): mock_api = self.mock_object(self._driver, '_api', mock.Mock( side_effect=( exception.TegileAPIException( response="Fake Exception")))) self.assertRaises(exception.TegileAPIException, self._driver.shrink_share, test_share, 30) shrink_path = '%s/%s/%s/%s' % ( 'fake_pool', 'Local', 'fake_project', test_share['name']) shrink_params = (shrink_path, str(30), 'GB') mock_api.assert_called_once_with('resizeShare', shrink_params) @ddt.data('ip', 'user') def test_allow_access(self, access_type): fake_share_info = ('fake_pool', 'fake_project', test_share['name']) mock_params = self.mock_object(self._driver, '_get_pool_project_share_name', mock.Mock(return_value=fake_share_info)) mock_api = self.mock_object(self._driver, '_api') access = { 'access_type': access_type, 'access_level': const.ACCESS_LEVEL_RW, 'access_to': 'some-ip', } self._driver._allow_access(self._ctxt, test_share, access) allow_params = ( '%s/%s/%s/%s' % ( 'fake_pool', 'Local', 'fake_project', test_share['name'], ), test_share['share_proto'], access_type, access['access_to'], access['access_level'], ) mock_api.assert_called_once_with('shareAllowAccess', allow_params) mock_params.assert_called_once_with(test_share) @ddt.data({'access_type': 'other', 'to': 'some-ip', 'share': test_share, 'exception_type': exception.InvalidShareAccess}, {'access_type': 'ip', 'to': 'some-ip', 'share': test_share, 'exception_type': exception.TegileAPIException}, {'access_type': 'ip', 'to': 'some-ip', 'share': test_share_cifs, 'exception_type': exception.InvalidShareAccess}, {'access_type': 'ip', 'to': 'some-ip', 'share': test_share_fail, 'exception_type': exception.InvalidShareAccess}) @ddt.unpack def test_allow_access_fail(self, access_type, to, share, exception_type): self.mock_object(self._driver, '_api', mock.Mock( side_effect=exception.TegileAPIException( response="Fake Exception"))) access = { 'access_type': access_type, 'access_level': const.ACCESS_LEVEL_RW, 'access_to': to, } self.assertRaises(exception_type, self._driver._allow_access, self._ctxt, share, access) @ddt.data('ip', 'user') def test_deny_access(self, access_type): fake_share_info = ('fake_pool', 'fake_project', test_share['name']) mock_params = self.mock_object(self._driver, '_get_pool_project_share_name', mock.Mock(return_value=fake_share_info)) mock_api = self.mock_object(self._driver, '_api') access = { 'access_type': access_type, 'access_level': const.ACCESS_LEVEL_RW, 'access_to': 'some-ip', } self._driver._deny_access(self._ctxt, test_share, access) deny_params = ( '%s/%s/%s/%s' % ( 'fake_pool', 'Local', 'fake_project', test_share['name'], ), test_share['share_proto'], access_type, access['access_to'], access['access_level'], ) mock_api.assert_called_once_with('shareDenyAccess', deny_params) mock_params.assert_called_once_with(test_share) @ddt.data({'access_type': 'other', 'to': 'some-ip', 'share': test_share, 'exception_type': exception.InvalidShareAccess}, {'access_type': 'ip', 'to': 'some-ip', 'share': test_share, 'exception_type': exception.TegileAPIException}, {'access_type': 'ip', 'to': 'some-ip', 'share': test_share_cifs, 'exception_type': exception.InvalidShareAccess}, {'access_type': 'ip', 'to': 'some-ip', 'share': test_share_fail, 'exception_type': exception.InvalidShareAccess}) @ddt.unpack def test_deny_access_fail(self, access_type, to, share, exception_type): self.mock_object(self._driver, '_api', mock.Mock( side_effect=exception.TegileAPIException( response="Fake Exception"))) access = { 'access_type': access_type, 'access_level': const.ACCESS_LEVEL_RW, 'access_to': to, } self.assertRaises(exception_type, self._driver._deny_access, self._ctxt, share, access) @ddt.data({'access_rules': [{'access_type': 'ip', 'access_level': const.ACCESS_LEVEL_RW, 'access_to': 'some-ip', }, ], 'add_rules': None, 'delete_rules': None, 'call_name': 'shareAllowAccess'}, {'access_rules': [], 'add_rules': [{'access_type': 'ip', 'access_level': const.ACCESS_LEVEL_RW, 'access_to': 'some-ip'}, ], 'delete_rules': [], 'call_name': 'shareAllowAccess'}, {'access_rules': [], 'add_rules': [], 'delete_rules': [{'access_type': 'ip', 'access_level': const.ACCESS_LEVEL_RW, 'access_to': 'some-ip', }, ], 'call_name': 'shareDenyAccess'}) @ddt.unpack def test_update_access(self, access_rules, add_rules, delete_rules, call_name): fake_share_info = ('fake_pool', 'fake_project', test_share['name']) mock_params = self.mock_object(self._driver, '_get_pool_project_share_name', mock.Mock(return_value=fake_share_info)) mock_api = self.mock_object(self._driver, '_api') self._driver.update_access(self._ctxt, test_share, access_rules=access_rules, add_rules=add_rules, delete_rules=delete_rules, update_rules=None) allow_params = ( '%s/%s/%s/%s' % ( 'fake_pool', 'Local', 'fake_project', test_share['name'], ), test_share['share_proto'], 'ip', 'some-ip', const.ACCESS_LEVEL_RW, ) if not (add_rules or delete_rules): clear_params = ( '%s/%s/%s/%s' % ( 'fake_pool', 'Local', 'fake_project', test_share['name'], ), test_share['share_proto'], ) mock_api.assert_has_calls([mock.call('clearAccessRules', clear_params), mock.call(call_name, allow_params)]) mock_params.assert_called_with(test_share) else: mock_api.assert_called_once_with(call_name, allow_params) mock_params.assert_called_once_with(test_share) @ddt.data({'path': r'\\some-ip\shareName', 'share_proto': 'CIFS', 'host': 'some-ip'}, {'path': 'some-ip:shareName', 'share_proto': 'NFS', 'host': 'some-ip'}, {'path': 'some-ip:shareName', 'share_proto': 'NFS', 'host': None}) @ddt.unpack def test_get_location_path(self, path, share_proto, host): self._driver._hostname = 'some-ip' result = self._driver._get_location_path('shareName', share_proto, host) expected = { 'is_admin_only': False, 'metadata': { 'preferred': True, }, 'path': path, } self.assertEqual(expected, result) def test_get_location_path_fail(self): self.assertRaises(exception.InvalidInput, self._driver._get_location_path, 'shareName', 'SOME', 'some-ip') def test_get_network_allocations_number(self): result = self._driver.get_network_allocations_number() expected = 0 self.assertEqual(expected, result) class TegileAPIExecutorTestCase(test.TestCase): def setUp(self): self._api = tegile.TegileAPIExecutor("TestCase", test_config.tegile_nas_server, test_config.tegile_nas_login, test_config.tegile_nas_password) super(TegileAPIExecutorTestCase, self).setUp() def test_send_api_post(self): json_output = {'value': 'abc'} self.mock_object(requests, 'post', mock.Mock(return_value=FakeResponse(200, json_output))) result = self._api(method="Test", request_type='post', params='[]', fine_logging=True) self.assertEqual(json_output, result) def test_send_api_get(self): json_output = {'value': 'abc'} self.mock_object(requests, 'get', mock.Mock(return_value=FakeResponse(200, json_output))) result = self._api(method="Test", request_type='get', fine_logging=False) self.assertEqual(json_output, result) def test_send_api_get_fail(self): self.mock_object(requests, 'get', mock.Mock(return_value=FakeResponse(404, []))) self.assertRaises(exception.TegileAPIException, self._api, method="Test", request_type='get', fine_logging=False) def test_send_api_value_error_fail(self): json_output = {'value': 'abc'} self.mock_object(requests, 'post', mock.Mock(return_value=FakeResponse(200, json_output))) self.mock_object(FakeResponse, 'json', mock.Mock(side_effect=ValueError)) result = self._api(method="Test", request_type='post', fine_logging=False) expected = '' self.assertEqual(expected, result) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/test_ganesha.py0000664000175000017500000006524400000000000023273 0ustar00zuulzuul00000000000000# Copyright (c) 2014 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import errno import os from unittest import mock import ddt from oslo_config import cfg from manila import context from manila import exception from manila.share import configuration as config from manila.share.drivers import ganesha from manila import test from manila.tests import fake_share CONF = cfg.CONF fake_basepath = '/fakepath' fake_export_name = 'fakename--fakeaccid' fake_output_template = { 'EXPORT': { 'Export_Id': 101, 'Path': '/fakepath/fakename', 'Pseudo': '/fakepath/fakename--fakeaccid', 'Tag': 'fakeaccid', 'CLIENT': { 'Clients': '10.0.0.1' }, 'FSAL': 'fakefsal' } } @ddt.ddt class GaneshaNASHelperTestCase(test.TestCase): """Tests GaneshaNASHElper.""" def setUp(self): super(GaneshaNASHelperTestCase, self).setUp() CONF.set_default('ganesha_config_path', '/fakedir0/fakeconfig') CONF.set_default('ganesha_db_path', '/fakedir1/fake.db') CONF.set_default('ganesha_export_dir', '/fakedir0/export.d') CONF.set_default('ganesha_export_template_dir', '/fakedir2/faketempl.d') CONF.set_default('ganesha_service_name', 'ganesha.fakeservice') self._context = context.get_admin_context() self._execute = mock.Mock(return_value=('', '')) self.fake_conf = config.Configuration(None) self.fake_conf_dir_path = '/fakedir0/exports.d' self._helper = ganesha.GaneshaNASHelper( self._execute, self.fake_conf, tag='faketag') self._helper.ganesha = mock.Mock() self._helper.export_template = {'key': 'value'} self.share = fake_share.fake_share() self.access = fake_share.fake_access() def test_load_conf_dir(self): fake_template1 = {'key': 'value1'} fake_template2 = {'key': 'value2'} fake_ls_dir = ['fakefile0.conf', 'fakefile1.json', 'fakefile2.txt'] mock_ganesha_utils_patch = mock.Mock() def fake_patch_run(tmpl1, tmpl2): mock_ganesha_utils_patch( copy.deepcopy(tmpl1), copy.deepcopy(tmpl2)) tmpl1.update(tmpl2) self.mock_object(ganesha.os, 'listdir', mock.Mock(return_value=fake_ls_dir)) self.mock_object(ganesha.LOG, 'info') self.mock_object(ganesha.ganesha_manager, 'parseconf', mock.Mock(side_effect=[fake_template1, fake_template2])) self.mock_object(ganesha.ganesha_utils, 'patch', mock.Mock(side_effect=fake_patch_run)) with mock.patch('builtins.open', mock.mock_open()) as mockopen: mockopen().read.side_effect = ['fakeconf0', 'fakeconf1'] ret = self._helper._load_conf_dir(self.fake_conf_dir_path) ganesha.os.listdir.assert_called_once_with( self.fake_conf_dir_path) ganesha.LOG.info.assert_called_once_with( mock.ANY, self.fake_conf_dir_path) mockopen.assert_has_calls([ mock.call('/fakedir0/exports.d/fakefile0.conf'), mock.call('/fakedir0/exports.d/fakefile1.json')], any_order=True) ganesha.ganesha_manager.parseconf.assert_has_calls([ mock.call('fakeconf0'), mock.call('fakeconf1')]) mock_ganesha_utils_patch.assert_has_calls([ mock.call({}, fake_template1), mock.call(fake_template1, fake_template2)]) self.assertEqual(fake_template2, ret) def test_load_conf_dir_no_conf_dir_must_exist_false(self): self.mock_object( ganesha.os, 'listdir', mock.Mock(side_effect=OSError(errno.ENOENT, os.strerror(errno.ENOENT)))) self.mock_object(ganesha.LOG, 'info') self.mock_object(ganesha.ganesha_manager, 'parseconf') self.mock_object(ganesha.ganesha_utils, 'patch') with mock.patch('builtins.open', mock.mock_open(read_data='fakeconf')) as mockopen: ret = self._helper._load_conf_dir(self.fake_conf_dir_path, must_exist=False) ganesha.os.listdir.assert_called_once_with( self.fake_conf_dir_path) ganesha.LOG.info.assert_called_once_with( mock.ANY, self.fake_conf_dir_path) self.assertFalse(mockopen.called) self.assertFalse(ganesha.ganesha_manager.parseconf.called) self.assertFalse(ganesha.ganesha_utils.patch.called) self.assertEqual({}, ret) def test_load_conf_dir_error_no_conf_dir_must_exist_true(self): self.mock_object( ganesha.os, 'listdir', mock.Mock(side_effect=OSError(errno.ENOENT, os.strerror(errno.ENOENT)))) self.assertRaises(OSError, self._helper._load_conf_dir, self.fake_conf_dir_path) ganesha.os.listdir.assert_called_once_with(self.fake_conf_dir_path) def test_load_conf_dir_error_conf_dir_present_must_exist_false(self): self.mock_object( ganesha.os, 'listdir', mock.Mock(side_effect=OSError(errno.EACCES, os.strerror(errno.EACCES)))) self.assertRaises(OSError, self._helper._load_conf_dir, self.fake_conf_dir_path, must_exist=False) ganesha.os.listdir.assert_called_once_with(self.fake_conf_dir_path) def test_load_conf_dir_error(self): self.mock_object( ganesha.os, 'listdir', mock.Mock(side_effect=RuntimeError('fake error'))) self.assertRaises(RuntimeError, self._helper._load_conf_dir, self.fake_conf_dir_path) ganesha.os.listdir.assert_called_once_with(self.fake_conf_dir_path) def test_init_helper(self): mock_template = mock.Mock() mock_ganesha_manager = mock.Mock() self.mock_object(ganesha.ganesha_manager, 'GaneshaManager', mock.Mock(return_value=mock_ganesha_manager)) self.mock_object(self._helper, '_load_conf_dir', mock.Mock(return_value=mock_template)) self.mock_object(self._helper, '_default_config_hook') ret = self._helper.init_helper() ganesha.ganesha_manager.GaneshaManager.assert_called_once_with( self._execute, 'faketag', ganesha_config_path='/fakedir0/fakeconfig', ganesha_export_dir='/fakedir0/export.d', ganesha_db_path='/fakedir1/fake.db', ganesha_service_name='ganesha.fakeservice') self._helper._load_conf_dir.assert_called_once_with( '/fakedir2/faketempl.d', must_exist=False) self.assertFalse(self._helper._default_config_hook.called) self.assertEqual(mock_ganesha_manager, self._helper.ganesha) self.assertEqual(mock_template, self._helper.export_template) self.assertIsNone(ret) def test_init_helper_conf_dir_empty(self): mock_template = mock.Mock() mock_ganesha_manager = mock.Mock() self.mock_object(ganesha.ganesha_manager, 'GaneshaManager', mock.Mock(return_value=mock_ganesha_manager)) self.mock_object(self._helper, '_load_conf_dir', mock.Mock(return_value={})) self.mock_object(self._helper, '_default_config_hook', mock.Mock(return_value=mock_template)) ret = self._helper.init_helper() ganesha.ganesha_manager.GaneshaManager.assert_called_once_with( self._execute, 'faketag', ganesha_config_path='/fakedir0/fakeconfig', ganesha_export_dir='/fakedir0/export.d', ganesha_db_path='/fakedir1/fake.db', ganesha_service_name='ganesha.fakeservice') self._helper._load_conf_dir.assert_called_once_with( '/fakedir2/faketempl.d', must_exist=False) self._helper._default_config_hook.assert_called_once_with() self.assertEqual(mock_ganesha_manager, self._helper.ganesha) self.assertEqual(mock_template, self._helper.export_template) self.assertIsNone(ret) def test_default_config_hook(self): fake_template = {'key': 'value'} self.mock_object(ganesha.ganesha_utils, 'path_from', mock.Mock(return_value='/fakedir3/fakeconfdir')) self.mock_object(self._helper, '_load_conf_dir', mock.Mock(return_value=fake_template)) ret = self._helper._default_config_hook() ganesha.ganesha_utils.path_from.assert_called_once_with( ganesha.__file__, 'conf') self._helper._load_conf_dir.assert_called_once_with( '/fakedir3/fakeconfdir') self.assertEqual(fake_template, ret) def test_fsal_hook(self): ret = self._helper._fsal_hook('/fakepath', self.share, self.access) self.assertEqual({}, ret) def test_cleanup_fsal_hook(self): ret = self._helper._cleanup_fsal_hook('/fakepath', self.share, self.access) self.assertIsNone(ret) def test_allow_access(self): mock_ganesha_utils_patch = mock.Mock() def fake_patch_run(tmpl1, tmpl2, tmpl3): mock_ganesha_utils_patch(copy.deepcopy(tmpl1), tmpl2, tmpl3) tmpl1.update(tmpl3) self.mock_object(self._helper.ganesha, 'get_export_id', mock.Mock(return_value=101)) self.mock_object(self._helper, '_fsal_hook', mock.Mock(return_value='fakefsal')) self.mock_object(ganesha.ganesha_utils, 'patch', mock.Mock(side_effect=fake_patch_run)) self.mock_object(ganesha.ganesha_utils, 'validate_access_rule', mock.Mock(return_value=True)) ret = self._helper._allow_access(fake_basepath, self.share, self.access) self._helper.ganesha.get_export_id.assert_called_once_with() self._helper._fsal_hook.assert_called_once_with( fake_basepath, self.share, self.access, sub_name=None) mock_ganesha_utils_patch.assert_called_once_with( {}, self._helper.export_template, fake_output_template) self._helper._fsal_hook.assert_called_once_with( fake_basepath, self.share, self.access, sub_name=None) self._helper.ganesha.add_export.assert_called_once_with( fake_export_name, fake_output_template) self.assertIsNone(ret) def test_allow_access_error_invalid_share(self): access = fake_share.fake_access(access_type='notip') self.assertRaises(exception.InvalidShareAccess, self._helper._allow_access, '/fakepath', self.share, access) def test_deny_access(self): ret = self._helper._deny_access('/fakepath', self.share, self.access) self._helper.ganesha.remove_export.assert_called_once_with( 'fakename--fakeaccid') self.assertIsNone(ret) def test_update_access_for_allow(self): self.mock_object(self._helper, '_allow_access') self.mock_object(self._helper, '_deny_access') self._helper.update_access( self._context, self.share, access_rules=[self.access], add_rules=[self.access], delete_rules=[], update_rules=[]) self._helper._allow_access.assert_called_once_with( '/', self.share, self.access) self.assertFalse(self._helper._deny_access.called) self.assertFalse(self._helper.ganesha.reset_exports.called) self.assertFalse(self._helper.ganesha.restart_service.called) def test_update_access_for_deny(self): self.mock_object(self._helper, '_allow_access') self.mock_object(self._helper, '_deny_access') self._helper.update_access( self._context, self.share, access_rules=[], add_rules=[], delete_rules=[self.access], update_rules=[]) self._helper._deny_access.assert_called_once_with( '/', self.share, self.access) self.assertFalse(self._helper._allow_access.called) self.assertFalse(self._helper.ganesha.reset_exports.called) self.assertFalse(self._helper.ganesha.restart_service.called) def test_update_access_recovery(self): self.mock_object(self._helper, '_allow_access') self.mock_object(self._helper, '_deny_access') self._helper.update_access( self._context, self.share, access_rules=[self.access], add_rules=[], delete_rules=[], update_rules=[]) self._helper._allow_access.assert_called_once_with( '/', self.share, self.access) self.assertFalse(self._helper._deny_access.called) self.assertTrue(self._helper.ganesha.reset_exports.called) self.assertTrue(self._helper.ganesha.restart_service.called) def test_update_access_invalid_share_access_type(self): bad_rule = fake_share.fake_access(access_type='notip', id='fakeid') expected = {'fakeid': {'state': 'error'}} result = self._helper.update_access(self._context, self.share, access_rules=[bad_rule], add_rules=[], delete_rules=[], update_rules=[]) self.assertEqual(expected, result) def test_update_access_invalid_share_access_level(self): bad_rule = fake_share.fake_access(access_level='RO', id='fakeid') expected = {'fakeid': {'state': 'error'}} result = self._helper.update_access(self._context, self.share, access_rules=[bad_rule], add_rules=[], delete_rules=[], update_rules=[]) self.assertEqual(expected, result) @ddt.ddt class GaneshaNASHelper2TestCase(test.TestCase): """Tests GaneshaNASHelper2.""" def setUp(self): super(GaneshaNASHelper2TestCase, self).setUp() CONF.set_default('ganesha_config_path', '/fakedir0/fakeconfig') CONF.set_default('ganesha_db_path', '/fakedir1/fake.db') CONF.set_default('ganesha_export_dir', '/fakedir0/export.d') CONF.set_default('ganesha_export_template_dir', '/fakedir2/faketempl.d') CONF.set_default('ganesha_service_name', 'ganesha.fakeservice') CONF.set_default('ganesha_rados_store_enable', True) CONF.set_default('ganesha_rados_store_pool_name', 'ceph_pool') CONF.set_default('ganesha_rados_export_index', 'fake_index') CONF.set_default('ganesha_rados_export_counter', 'fake_counter') self._context = context.get_admin_context() self._execute = mock.Mock(return_value=('', '')) self.rados_client = mock.Mock() self.fake_conf = config.Configuration(None) self.fake_conf_dir_path = '/fakedir0/exports.d' self._helper = ganesha.GaneshaNASHelper2( self._execute, self.fake_conf, tag='faketag', rados_client=self.rados_client) self._helper.ganesha = mock.Mock() self._helper.export_template = {} self.share = fake_share.fake_share() self.rule1 = fake_share.fake_access(access_level='ro') self.rule2 = fake_share.fake_access(access_level='rw', access_to='10.0.0.2') @ddt.data(False, True) def test_init_helper_with_rados_store(self, rados_store_enable): CONF.set_default('ganesha_rados_store_enable', rados_store_enable) mock_template = mock.Mock() mock_ganesha_manager = mock.Mock() self.mock_object(ganesha.ganesha_manager, 'GaneshaManager', mock.Mock(return_value=mock_ganesha_manager)) self.mock_object(self._helper, '_load_conf_dir', mock.Mock(return_value={})) self.mock_object(self._helper, '_default_config_hook', mock.Mock(return_value=mock_template)) ret = self._helper.init_helper() if rados_store_enable: kwargs = { 'ganesha_config_path': '/fakedir0/fakeconfig', 'ganesha_export_dir': '/fakedir0/export.d', 'ganesha_service_name': 'ganesha.fakeservice', 'ganesha_rados_store_enable': True, 'ganesha_rados_store_pool_name': 'ceph_pool', 'ganesha_rados_export_index': 'fake_index', 'ganesha_rados_export_counter': 'fake_counter', 'rados_client': self.rados_client } else: kwargs = { 'ganesha_config_path': '/fakedir0/fakeconfig', 'ganesha_export_dir': '/fakedir0/export.d', 'ganesha_service_name': 'ganesha.fakeservice', 'ganesha_db_path': '/fakedir1/fake.db' } ganesha.ganesha_manager.GaneshaManager.assert_called_once_with( self._execute, '', **kwargs) self._helper._load_conf_dir.assert_called_once_with( '/fakedir2/faketempl.d', must_exist=False) self.assertEqual(mock_ganesha_manager, self._helper.ganesha) self._helper._default_config_hook.assert_called_once_with() self.assertEqual(mock_template, self._helper.export_template) self.assertIsNone(ret) @ddt.data(False, True) def test_init_helper_conf_dir_empty(self, conf_dir_empty): mock_template = mock.Mock() mock_ganesha_manager = mock.Mock() self.mock_object(ganesha.ganesha_manager, 'GaneshaManager', mock.Mock(return_value=mock_ganesha_manager)) if conf_dir_empty: self.mock_object(self._helper, '_load_conf_dir', mock.Mock(return_value={})) else: self.mock_object(self._helper, '_load_conf_dir', mock.Mock(return_value=mock_template)) self.mock_object(self._helper, '_default_config_hook', mock.Mock(return_value=mock_template)) ret = self._helper.init_helper() ganesha.ganesha_manager.GaneshaManager.assert_called_once_with( self._execute, '', ganesha_config_path='/fakedir0/fakeconfig', ganesha_export_dir='/fakedir0/export.d', ganesha_service_name='ganesha.fakeservice', ganesha_rados_store_enable=True, ganesha_rados_store_pool_name='ceph_pool', ganesha_rados_export_index='fake_index', ganesha_rados_export_counter='fake_counter', rados_client=self.rados_client) self._helper._load_conf_dir.assert_called_once_with( '/fakedir2/faketempl.d', must_exist=False) self.assertEqual(mock_ganesha_manager, self._helper.ganesha) if conf_dir_empty: self._helper._default_config_hook.assert_called_once_with() else: self.assertFalse(self._helper._default_config_hook.called) self.assertEqual(mock_template, self._helper.export_template) self.assertIsNone(ret) def test_init_helper_with_rados_store_pool_name_not_set(self): self.mock_object(ganesha.ganesha_manager, 'GaneshaManager') self.mock_object(self._helper, '_load_conf_dir') self.mock_object(self._helper, '_default_config_hook') self._helper.configuration.ganesha_rados_store_pool_name = None self.assertRaises( exception.GaneshaException, self._helper.init_helper) self.assertFalse(ganesha.ganesha_manager.GaneshaManager.called) self.assertFalse(self._helper._load_conf_dir.called) self.assertFalse(self._helper._default_config_hook.called) def test_update_access_add_export(self): mock_gh = self._helper.ganesha self.mock_object(mock_gh, 'check_export_exists', mock.Mock(return_value=False)) self.mock_object(mock_gh, 'get_export_id', mock.Mock(return_value=100)) self.mock_object(self._helper, '_get_export_path', mock.Mock(return_value='/fakepath')) self.mock_object(self._helper, '_get_export_pseudo_path', mock.Mock(return_value='/fakepath')) self.mock_object(self._helper, '_fsal_hook', mock.Mock(return_value={'Name': 'fake'})) self.mock_object(ganesha.ganesha_utils, 'validate_access_rule', mock.Mock(return_value=True)) result_confdict = { 'EXPORT': { 'Export_Id': 100, 'Path': '/fakepath', 'Pseudo': '/fakepath', 'Tag': 'fakename', 'CLIENT': [{ 'Access_Type': 'ro', 'Clients': '10.0.0.1'}], 'FSAL': {'Name': 'fake'} } } self._helper.update_access( self._context, self.share, access_rules=[self.rule1], add_rules=[], delete_rules=[], update_rules=[]) mock_gh.check_export_exists.assert_called_once_with('fakename') mock_gh.get_export_id.assert_called_once_with() self._helper._get_export_path.assert_called_once_with( self.share, sub_name=None) (self._helper._get_export_pseudo_path.assert_called_once_with( self.share, sub_name=None)) self._helper._fsal_hook.assert_called_once_with( None, self.share, None, sub_name=None) mock_gh.add_export.assert_called_once_with( 'fakename', result_confdict) self.assertFalse(mock_gh.update_export.called) self.assertFalse(mock_gh.remove_export.called) @ddt.data({'Access_Type': 'ro', 'Clients': '10.0.0.1'}, [{'Access_Type': 'ro', 'Clients': '10.0.0.1'}]) def test_update_access_update_export(self, client): mock_gh = self._helper.ganesha self.mock_object(mock_gh, 'check_export_exists', mock.Mock(return_value=True)) self.mock_object( mock_gh, '_read_export', mock.Mock(return_value={'EXPORT': {'CLIENT': client}}) ) self.mock_object(ganesha.ganesha_utils, 'validate_access_rule', mock.Mock(return_value=True)) result_confdict = { 'EXPORT': { 'CLIENT': [ {'Access_Type': 'ro', 'Clients': '10.0.0.1'}, {'Access_Type': 'rw', 'Clients': '10.0.0.2'}] } } self._helper.update_access( self._context, self.share, access_rules=[self.rule1, self.rule2], add_rules=[self.rule2], delete_rules=[], update_rules=[]) mock_gh.check_export_exists.assert_called_once_with('fakename') mock_gh.update_export.assert_called_once_with('fakename', result_confdict) self.assertFalse(mock_gh.add_export.called) self.assertFalse(mock_gh.remove_export.called) def test_update_access_remove_export(self): mock_gh = self._helper.ganesha self.mock_object(mock_gh, 'check_export_exists', mock.Mock(return_value=True)) self.mock_object(self._helper, '_cleanup_fsal_hook') client = {'Access_Type': 'ro', 'Clients': '10.0.0.1'} self.mock_object( mock_gh, '_read_export', mock.Mock(return_value={'EXPORT': {'CLIENT': client}}) ) self._helper.update_access( self._context, self.share, access_rules=[], add_rules=[], delete_rules=[self.rule1], update_rules=[]) mock_gh.check_export_exists.assert_called_once_with('fakename') mock_gh.remove_export.assert_called_once_with('fakename') self._helper._cleanup_fsal_hook.assert_called_once_with( None, self.share, None, sub_name=None) self.assertFalse(mock_gh.add_export.called) self.assertFalse(mock_gh.update_export.called) def test_update_access_export_file_already_removed(self): mock_gh = self._helper.ganesha self.mock_object(mock_gh, 'check_export_exists', mock.Mock(return_value=False)) self.mock_object(ganesha.LOG, 'warning') self.mock_object(self._helper, '_cleanup_fsal_hook') self._helper.update_access( self._context, self.share, access_rules=[], add_rules=[], delete_rules=[self.rule1], update_rules=[]) mock_gh.check_export_exists.assert_called_once_with('fakename') ganesha.LOG.warning.assert_called_once_with(mock.ANY, mock.ANY) self.assertFalse(mock_gh.add_export.called) self.assertFalse(mock_gh.update_export.called) self.assertFalse(mock_gh.remove_export.called) self.assertFalse(self._helper._cleanup_fsal_hook.called) def test_update_access_invalid_share_access_type(self): mock_gh = self._helper.ganesha self.mock_object(mock_gh, 'check_export_exists', mock.Mock(return_value=False)) bad_rule = fake_share.fake_access(access_type='notip', id='fakeid') expected = {'fakeid': {'state': 'error'}} result = self._helper.update_access(self._context, self.share, access_rules=[bad_rule], add_rules=[], delete_rules=[], update_rules=[]) self.assertEqual(expected, result) def test_update_access_invalid_share_access_level(self): bad_rule = fake_share.fake_access(access_level='NOT_RO_OR_RW', id='fakeid') expected = {'fakeid': {'state': 'error'}} mock_gh = self._helper.ganesha self.mock_object(mock_gh, 'check_export_exists', mock.Mock(return_value=False)) result = self._helper.update_access(self._context, self.share, access_rules=[bad_rule], add_rules=[], delete_rules=[], update_rules=[]) self.assertEqual(expected, result) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/test_generic.py0000664000175000017500000024064600000000000023302 0ustar00zuulzuul00000000000000# Copyright (c) 2014 NetApp, Inc. # Copyright (c) 2015 Mirantis, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Unit tests for the Generic driver module.""" import os import time from unittest import mock import ddt from oslo_concurrency import processutils from oslo_config import cfg from manila.common import constants as const from manila import compute from manila import context from manila import exception import manila.share.configuration from manila.share.drivers import generic from manila.share import share_types from manila import ssh_utils from manila import test from manila.tests import fake_compute from manila.tests import fake_service_instance from manila.tests import fake_share from manila.tests import fake_volume from manila import utils from manila import volume CONF = cfg.CONF def get_fake_manage_share(): return { 'id': 'fake', 'share_proto': 'NFS', 'share_type_id': 'fake', 'export_locations': [ {'path': '10.0.0.1:/foo/fake/path'}, {'path': '11.0.0.1:/bar/fake/path'}, ], } def get_fake_snap_dict(): snap_dict = { 'status': 'available', 'project_id': '13c0be6290934bd98596cfa004650049', 'user_id': 'a0314a441ca842019b0952224aa39192', 'description': None, 'deleted': '0', 'created_at': '2015-08-10 00:05:58', 'updated_at': '2015-08-10 00:05:58', 'consistency_group_id': None, 'deleted_at': None, 'id': 'f6aa3b59-57eb-421e-965c-4e182538e36a', 'name': None, } return snap_dict def get_fake_access_rule(access_to, access_level, access_type='ip'): return { 'access_type': access_type, 'access_to': access_to, 'access_level': access_level, } @ddt.ddt class GenericShareDriverTestCase(test.TestCase): """Tests GenericShareDriver.""" def setUp(self): super(GenericShareDriverTestCase, self).setUp() self._context = context.get_admin_context() self._execute = mock.Mock(return_value=('', '')) self._helper_cifs = mock.Mock() self._helper_nfs = mock.Mock() CONF.set_default('driver_handles_share_servers', True) self.fake_conf = manila.share.configuration.Configuration(None) self.fake_private_storage = mock.Mock() self.mock_object(self.fake_private_storage, 'get', mock.Mock(return_value=None)) with mock.patch.object( generic.service_instance, 'ServiceInstanceManager', fake_service_instance.FakeServiceInstanceManager): self._driver = generic.GenericShareDriver( private_storage=self.fake_private_storage, execute=self._execute, configuration=self.fake_conf) self._driver.service_tenant_id = 'service tenant id' self._driver.service_network_id = 'service network id' self._driver.compute_api = fake_compute.API() self._driver.volume_api = fake_volume.API() self._driver.share_networks_locks = {} self._driver.get_service_instance = mock.Mock() self._driver.share_networks_servers = {} self._driver.admin_context = self._context self.fake_sn = {"id": "fake_sn_id"} self.fake_net_info = { "id": "fake_srv_id", "share_network_id": "fake_sn_id" } fsim = fake_service_instance.FakeServiceInstanceManager() sim = mock.Mock(return_value=fsim) self._driver.instance_manager = sim self._driver.service_instance_manager = sim self.fake_server = sim._create_service_instance( context="fake", instance_name="fake", share_network_id=self.fake_sn["id"], old_server_ip="fake") self.mock_object(utils, 'synchronized', mock.Mock(return_value=lambda f: f)) self.mock_object(generic.os.path, 'exists', mock.Mock(return_value=True)) self._driver._helpers = { 'CIFS': self._helper_cifs, 'NFS': self._helper_nfs, } self.share = fake_share.fake_share(share_proto='NFS') self.server = { 'instance_id': 'fake_instance_id', 'ip': 'fake_ip', 'username': 'fake_username', 'password': 'fake_password', 'pk_path': 'fake_pk_path', 'backend_details': { 'ip': '1.2.3.4', 'public_address': 'fake_public_address', 'instance_id': 'fake', 'service_ip': 'fake_ip', }, 'availability_zone': 'fake_az', } self.access = fake_share.fake_access() self.snapshot = fake_share.fake_snapshot() self.mock_object(time, 'sleep') self.mock_debug_log = self.mock_object(generic.LOG, 'debug') self.mock_warning_log = self.mock_object(generic.LOG, 'warning') self.mock_error_log = self.mock_object(generic.LOG, 'error') self.mock_exception_log = self.mock_object(generic.LOG, 'exception') @ddt.data(True, False) def test_do_setup_with_dhss(self, dhss): CONF.set_default('driver_handles_share_servers', dhss) fake_server = {'id': 'fake_server_id'} self.mock_object(volume, 'API') self.mock_object(compute, 'API') self.mock_object(self._driver, '_setup_helpers') self.mock_object( self._driver, '_is_share_server_active', mock.Mock(return_value=True)) self.mock_object( self._driver.service_instance_manager, 'get_common_server', mock.Mock(return_value=fake_server)) self._driver.do_setup(self._context) volume.API.assert_called_once_with() compute.API.assert_called_once_with() self._driver._setup_helpers.assert_called_once_with() if not dhss: (self._driver.service_instance_manager.get_common_server. assert_called_once_with()) self._driver._is_share_server_active.assert_called_once_with( self._context, fake_server) else: self.assertFalse( self._driver.service_instance_manager.get_common_server.called) self.assertFalse(self._driver._is_share_server_active.called) @mock.patch('time.sleep') def test_do_setup_dhss_false_server_avail_after_retry(self, mock_sleep): # This tests the scenario in which the common share server cannot be # retrieved during the first attempt, is not active during the second, # becoming active during the third attempt. CONF.set_default('driver_handles_share_servers', False) fake_server = {'id': 'fake_server_id'} self.mock_object(volume, 'API') self.mock_object(compute, 'API') self.mock_object(self._driver, '_setup_helpers') self.mock_object( self._driver, '_is_share_server_active', mock.Mock(side_effect=[False, True])) self.mock_object( self._driver.service_instance_manager, 'get_common_server', mock.Mock(side_effect=[exception.ManilaException, fake_server, fake_server])) self._driver.do_setup(self._context) volume.API.assert_called_once_with() compute.API.assert_called_once_with() self._driver._setup_helpers.assert_called_once_with() (self._driver.service_instance_manager.get_common_server. assert_has_calls([mock.call()] * 3)) self._driver._is_share_server_active.assert_has_calls( [mock.call(self._context, fake_server)] * 2) mock_sleep.assert_has_calls([mock.call(5)] * 2) def test_setup_helpers(self): self._driver._helpers = {} CONF.set_default('share_helpers', ['NFS=fakenfs']) self.mock_object(generic.importutils, 'import_class', mock.Mock(return_value=self._helper_nfs)) self._driver._setup_helpers() generic.importutils.import_class.assert_has_calls([ mock.call('fakenfs') ]) self._helper_nfs.assert_called_once_with( self._execute, self._driver._ssh_exec, self.fake_conf ) self.assertEqual(1, len(self._driver._helpers)) def test_setup_helpers_no_helpers(self): self._driver._helpers = {} CONF.set_default('share_helpers', []) self.assertRaises(exception.ManilaException, self._driver._setup_helpers) def test_create_share(self): volume = 'fake_volume' volume2 = 'fake_volume2' self.mock_object(self._driver, '_allocate_container', mock.Mock(return_value=volume)) self.mock_object(self._driver, '_attach_volume', mock.Mock(return_value=volume2)) self.mock_object(self._driver, '_format_device') self.mock_object(self._driver, '_mount_device') result = self._driver.create_share( self._context, self.share, share_server=self.server) self.assertEqual(self._helper_nfs.create_exports.return_value, result) self._driver._allocate_container.assert_called_once_with( self._driver.admin_context, self.share, snapshot=None) self._driver._attach_volume.assert_called_once_with( self._driver.admin_context, self.share, self.server['backend_details']['instance_id'], volume) self._driver._format_device.assert_called_once_with( self.server['backend_details'], volume2) self._driver._mount_device.assert_called_once_with( self.share, self.server['backend_details'], volume2) def test_create_share_exception(self): share = fake_share.fake_share(share_network_id=None) self.assertRaises(exception.ManilaException, self._driver.create_share, self._context, share) def test_create_share_invalid_helper(self): self._driver._helpers = {'CIFS': self._helper_cifs} self.assertRaises(exception.InvalidShare, self._driver.create_share, self._context, self.share, share_server=self.server) def test_is_device_file_available(self): volume = {'mountpoint': 'fake_mount_point'} self.mock_object(self._driver, '_ssh_exec', mock.Mock(return_value=None)) self._driver._is_device_file_available(self.server, volume) self._driver._ssh_exec.assert_called_once_with( self.server, ['sudo', 'test', '-b', volume['mountpoint']]) def test_format_device(self): volume = {'mountpoint': 'fake_mount_point'} self.mock_object(self._driver, '_ssh_exec', mock.Mock(return_value=('', ''))) self.mock_object(self._driver, '_is_device_file_available') self._driver._format_device(self.server, volume) self._driver._is_device_file_available.assert_called_once_with( self.server, volume) self._driver._ssh_exec.assert_called_once_with( self.server, ['sudo', 'mkfs.%s' % self.fake_conf.share_volume_fstype, volume['mountpoint']]) def test_mount_device_not_present(self): server = {'instance_id': 'fake_server_id'} mount_path = self._driver._get_mount_path(self.share) volume = {'mountpoint': 'fake_mount_point'} device_path = volume['mountpoint'] self.mock_object(self._driver, '_is_device_mounted', mock.Mock(return_value=False)) self.mock_object(self._driver, '_add_mount_permanently') self.mock_object(self._driver, '_ssh_exec', mock.Mock(return_value=('', ''))) self._driver._mount_device(self.share, server, volume) self._driver._is_device_mounted.assert_called_once_with( mount_path, server, volume) self._driver._add_mount_permanently.assert_called_once_with( self.share.id, device_path, server) self._driver._ssh_exec.assert_called_once_with( server, ( 'sudo', 'mkdir', '-p', mount_path, '&&', 'sudo', 'mount', volume['mountpoint'], mount_path, '&&', 'sudo', 'chmod', '777', mount_path, '&&', 'sudo', 'umount', mount_path, '&&', 'sudo', 'e2fsck', '-y', '-f', volume['mountpoint'], '&&', 'sudo', 'tune2fs', '-U', 'random', volume['mountpoint'], '&&', 'sudo', 'mount', volume['mountpoint'], mount_path, ), ) def test_mount_device_present(self): mount_path = '/fake/mount/path' volume = {'mountpoint': 'fake_mount_point'} self.mock_object(self._driver, '_is_device_mounted', mock.Mock(return_value=True)) self.mock_object(self._driver, '_get_mount_path', mock.Mock(return_value=mount_path)) self.mock_object(generic.LOG, 'warning') self._driver._mount_device(self.share, self.server, volume) self._driver._get_mount_path.assert_called_once_with(self.share) self._driver._is_device_mounted.assert_called_once_with( mount_path, self.server, volume) generic.LOG.warning.assert_called_once_with(mock.ANY, mock.ANY) def test_mount_device_exception_raised(self): volume = {'mountpoint': 'fake_mount_point'} self.mock_object( self._driver, '_is_device_mounted', mock.Mock(side_effect=exception.ProcessExecutionError)) self.assertRaises( exception.ShareBackendException, self._driver._mount_device, self.share, self.server, volume, ) self._driver._is_device_mounted.assert_called_once_with( self._driver._get_mount_path(self.share), self.server, volume) def test_unmount_device_present(self): mount_path = '/fake/mount/path' self.mock_object(self._driver, '_is_device_mounted', mock.Mock(return_value=True)) self.mock_object(self._driver, '_remove_mount_permanently') self.mock_object(self._driver, '_get_mount_path', mock.Mock(return_value=mount_path)) self.mock_object(self._driver, '_ssh_exec', mock.Mock(return_value=('', ''))) self._driver._unmount_device(self.share, self.server) self._driver._get_mount_path.assert_called_once_with(self.share) self._driver._is_device_mounted.assert_called_once_with( mount_path, self.server) self._driver._remove_mount_permanently.assert_called_once_with( self.share.id, self.server) self._driver._ssh_exec.assert_called_once_with( self.server, ['sudo', 'umount', mount_path, '&&', 'sudo', 'rmdir', mount_path], ) def test_unmount_device_retry_once(self): self.counter = 0 def _side_effect(*args): self.counter += 1 if self.counter < 2: raise exception.ProcessExecutionError mount_path = '/fake/mount/path' self.mock_object(self._driver, '_is_device_mounted', mock.Mock(return_value=True)) self.mock_object(self._driver, '_remove_mount_permanently') self.mock_object(self._driver, '_get_mount_path', mock.Mock(return_value=mount_path)) self.mock_object(self._driver, '_ssh_exec', mock.Mock(side_effect=_side_effect)) self._driver._unmount_device(self.share, self.server) self.assertEqual(1, time.sleep.call_count) self.assertEqual([mock.call(self.share) for i in range(2)], self._driver._get_mount_path.mock_calls) self.assertEqual([mock.call(mount_path, self.server) for i in range(2)], self._driver._is_device_mounted.mock_calls) self._driver._remove_mount_permanently.assert_called_once_with( self.share.id, self.server) self.assertEqual( [mock.call(self.server, ['sudo', 'umount', mount_path, '&&', 'sudo', 'rmdir', mount_path]) for i in range(2)], self._driver._ssh_exec.mock_calls, ) def test_unmount_device_not_present(self): mount_path = '/fake/mount/path' self.mock_object(self._driver, '_is_device_mounted', mock.Mock(return_value=False)) self.mock_object(self._driver, '_get_mount_path', mock.Mock(return_value=mount_path)) self.mock_object(generic.LOG, 'warning') self._driver._unmount_device(self.share, self.server) self._driver._get_mount_path.assert_called_once_with(self.share) self._driver._is_device_mounted.assert_called_once_with( mount_path, self.server) generic.LOG.warning.assert_called_once_with(mock.ANY, mock.ANY) def test_is_device_mounted_true(self): volume = {'mountpoint': 'fake_mount_point', 'id': 'fake_id'} mount_path = '/fake/mount/path' mounts = "%(dev)s on %(path)s" % {'dev': volume['mountpoint'], 'path': mount_path} self.mock_object(self._driver, '_ssh_exec', mock.Mock(return_value=(mounts, ''))) result = self._driver._is_device_mounted( mount_path, self.server, volume) self._driver._ssh_exec.assert_called_once_with( self.server, ['sudo', 'mount']) self.assertTrue(result) def test_is_device_mounted_true_no_volume_provided(self): mount_path = '/fake/mount/path' mounts = "/fake/dev/path on %(path)s type fake" % {'path': mount_path} self.mock_object(self._driver, '_ssh_exec', mock.Mock(return_value=(mounts, ''))) result = self._driver._is_device_mounted(mount_path, self.server) self._driver._ssh_exec.assert_called_once_with( self.server, ['sudo', 'mount']) self.assertTrue(result) def test_is_device_mounted_false(self): mount_path = '/fake/mount/path' volume = {'mountpoint': 'fake_mount_point', 'id': 'fake_id'} mounts = "%(dev)s on %(path)s" % {'dev': '/fake', 'path': mount_path} self.mock_object(self._driver, '_ssh_exec', mock.Mock(return_value=(mounts, ''))) result = self._driver._is_device_mounted( mount_path, self.server, volume) self._driver._ssh_exec.assert_called_once_with( self.server, ['sudo', 'mount']) self.assertFalse(result) def test_is_device_mounted_false_no_volume_provided(self): mount_path = '/fake/mount/path' mounts = "%(path)s" % {'path': 'fake'} self.mock_object(self._driver, '_ssh_exec', mock.Mock(return_value=(mounts, ''))) self.mock_object(self._driver, '_get_mount_path', mock.Mock(return_value=mount_path)) result = self._driver._is_device_mounted(mount_path, self.server) self._driver._ssh_exec.assert_called_once_with( self.server, ['sudo', 'mount']) self.assertFalse(result) def test_add_mount_permanently(self): device_path = '/fake/mount/path' device_uuid = 'fake_disk_uuid' formated_device_uuid = f"UUID={device_uuid}" self.mock_object(self._driver, '_ssh_exec', mock.Mock(return_value=(device_uuid, ''))) self._driver._add_mount_permanently(self.share.id, device_path, self.server) self._driver._ssh_exec.assert_has_calls([ mock.call( self.server, ['grep', self.share.id, const.MOUNT_FILE_TEMP, '|', 'sudo', 'tee', '-a', const.MOUNT_FILE]), mock.call(self.server, ['lsblk', '-o', 'uuid', '-n', device_path]), mock.call( self.server, ['sudo', 'sed', '-i', "s@{}@{}@".format(device_path, formated_device_uuid), const.MOUNT_FILE]), mock.call(self.server, ['sudo', 'mount', '-a']) ]) def test_add_mount_permanently_raise_error_on_add(self): device_path = 'fake_device_path' self.mock_object( self._driver, '_ssh_exec', mock.Mock(side_effect=exception.ProcessExecutionError)) self.assertRaises( exception.ShareBackendException, self._driver._add_mount_permanently, self.share.id, device_path, self.server ) self._driver._ssh_exec.assert_called_once_with( self.server, ['grep', self.share.id, const.MOUNT_FILE_TEMP, '|', 'sudo', 'tee', '-a', const.MOUNT_FILE], ) def test_remove_mount_permanently(self): self.mock_object(self._driver, '_ssh_exec') self._driver._remove_mount_permanently(self.share.id, self.server) self._driver._ssh_exec.assert_called_once_with( self.server, ['sudo', 'sed', '-i', '\'/%s/d\'' % self.share.id, const.MOUNT_FILE], ) def test_remove_mount_permanently_raise_error_on_remove(self): self.mock_object( self._driver, '_ssh_exec', mock.Mock(side_effect=exception.ProcessExecutionError)) self.assertRaises( exception.ShareBackendException, self._driver._remove_mount_permanently, self.share.id, self.server ) self._driver._ssh_exec.assert_called_once_with( self.server, ['sudo', 'sed', '-i', '\'/%s/d\'' % self.share.id, const.MOUNT_FILE], ) def test_get_mount_path(self): result = self._driver._get_mount_path(self.share) self.assertEqual(os.path.join(CONF.share_mount_path, self.share['name']), result) def test_attach_volume_not_attached(self): available_volume = fake_volume.FakeVolume() attached_volume = fake_volume.FakeVolume(status='in-use') self.mock_object(self._driver.compute_api, 'instance_volume_attach') self.mock_object(self._driver.volume_api, 'get', mock.Mock(return_value=attached_volume)) result = self._driver._attach_volume(self._context, self.share, 'fake_inst_id', available_volume) (self._driver.compute_api.instance_volume_attach. assert_called_once_with(self._context, 'fake_inst_id', available_volume['id'])) self._driver.volume_api.get.assert_called_once_with( self._context, attached_volume['id']) self.assertEqual(attached_volume, result) def test_attach_volume_attached_correct(self): fake_server = fake_compute.FakeServer() attached_volume = fake_volume.FakeVolume(status='in-use') self.mock_object(self._driver.compute_api, 'instance_volumes_list', mock.Mock(return_value=[attached_volume.id])) result = self._driver._attach_volume(self._context, self.share, fake_server, attached_volume) self.assertEqual(attached_volume, result) def test_attach_volume_attached_incorrect(self): fake_server = fake_compute.FakeServer() attached_volume = fake_volume.FakeVolume(status='in-use') anoter_volume = fake_volume.FakeVolume(id='fake_id2', status='in-use') self.mock_object(self._driver.compute_api, 'instance_volumes_list', mock.Mock(return_value=[anoter_volume])) self.assertRaises(exception.ManilaException, self._driver._attach_volume, self._context, self.share, fake_server, attached_volume) @ddt.data(exception.ManilaException, exception.Invalid) def test_attach_volume_failed_attach(self, side_effect): fake_server = fake_compute.FakeServer() available_volume = fake_volume.FakeVolume() self.mock_object(self._driver.compute_api, 'instance_volume_attach', mock.Mock(side_effect=side_effect)) self.assertRaises(exception.ManilaException, self._driver._attach_volume, self._context, self.share, fake_server, available_volume) self.assertEqual( 3, self._driver.compute_api.instance_volume_attach.call_count) def test_attach_volume_attached_retry_correct(self): fake_server = fake_compute.FakeServer() attached_volume = fake_volume.FakeVolume(status='available') in_use_volume = fake_volume.FakeVolume(status='in-use') side_effect = [exception.Invalid("Fake"), attached_volume] attach_mock = mock.Mock(side_effect=side_effect) self.mock_object(self._driver.compute_api, 'instance_volume_attach', attach_mock) self.mock_object(self._driver.compute_api, 'instance_volumes_list', mock.Mock(return_value=[attached_volume])) self.mock_object(self._driver.volume_api, 'get', mock.Mock(return_value=in_use_volume)) result = self._driver._attach_volume(self._context, self.share, fake_server, attached_volume) self.assertEqual(in_use_volume, result) self.assertEqual( 2, self._driver.compute_api.instance_volume_attach.call_count) def test_attach_volume_error(self): fake_server = fake_compute.FakeServer() available_volume = fake_volume.FakeVolume() error_volume = fake_volume.FakeVolume(status='error') self.mock_object(self._driver.compute_api, 'instance_volume_attach') self.mock_object(self._driver.volume_api, 'get', mock.Mock(return_value=error_volume)) self.assertRaises(exception.ManilaException, self._driver._attach_volume, self._context, self.share, fake_server, available_volume) def test_get_volume(self): volume = fake_volume.FakeVolume( name=CONF.volume_name_template % self.share['id']) self.mock_object(self._driver.volume_api, 'get_all', mock.Mock(return_value=[volume])) result = self._driver._get_volume(self._context, self.share['id']) self.assertEqual(volume, result) self._driver.volume_api.get_all.assert_called_once_with( self._context, {'all_tenants': True, 'name': volume['name']}) def test_get_volume_with_private_data(self): volume = fake_volume.FakeVolume() self.mock_object(self._driver.volume_api, 'get', mock.Mock(return_value=volume)) self.mock_object(self.fake_private_storage, 'get', mock.Mock(return_value=volume['id'])) result = self._driver._get_volume(self._context, self.share['id']) self.assertEqual(volume, result) self._driver.volume_api.get.assert_called_once_with( self._context, volume['id']) self.fake_private_storage.get.assert_called_once_with( self.share['id'], 'volume_id' ) def test_get_volume_none(self): vol_name = ( self._driver.configuration.volume_name_template % self.share['id']) self.mock_object(self._driver.volume_api, 'get_all', mock.Mock(return_value=[])) result = self._driver._get_volume(self._context, self.share['id']) self.assertIsNone(result) self._driver.volume_api.get_all.assert_called_once_with( self._context, {'all_tenants': True, 'name': vol_name}) def test_get_volume_error(self): volume = fake_volume.FakeVolume( name=CONF.volume_name_template % self.share['id']) self.mock_object(self._driver.volume_api, 'get_all', mock.Mock(return_value=[volume, volume])) self.assertRaises(exception.ManilaException, self._driver._get_volume, self._context, self.share['id']) self._driver.volume_api.get_all.assert_called_once_with( self._context, {'all_tenants': True, 'name': volume['name']}) def test_get_volume_snapshot(self): volume_snapshot = fake_volume.FakeVolumeSnapshot( name=self._driver.configuration.volume_snapshot_name_template % self.snapshot['id']) self.mock_object(self._driver.volume_api, 'get_all_snapshots', mock.Mock(return_value=[volume_snapshot])) result = self._driver._get_volume_snapshot(self._context, self.snapshot['id']) self.assertEqual(volume_snapshot, result) self._driver.volume_api.get_all_snapshots.assert_called_once_with( self._context, {'name': volume_snapshot['name']}) def test_get_volume_snapshot_with_private_data(self): volume_snapshot = fake_volume.FakeVolumeSnapshot() self.mock_object(self._driver.volume_api, 'get_snapshot', mock.Mock(return_value=volume_snapshot)) self.mock_object(self.fake_private_storage, 'get', mock.Mock(return_value=volume_snapshot['id'])) result = self._driver._get_volume_snapshot(self._context, self.snapshot['id']) self.assertEqual(volume_snapshot, result) self._driver.volume_api.get_snapshot.assert_called_once_with( self._context, volume_snapshot['id']) self.fake_private_storage.get.assert_called_once_with( self.snapshot['id'], 'volume_snapshot_id' ) def test_get_volume_snapshot_none(self): snap_name = ( self._driver.configuration.volume_snapshot_name_template % self.share['id']) self.mock_object(self._driver.volume_api, 'get_all_snapshots', mock.Mock(return_value=[])) result = self._driver._get_volume_snapshot(self._context, self.share['id']) self.assertIsNone(result) self._driver.volume_api.get_all_snapshots.assert_called_once_with( self._context, {'name': snap_name}) def test_get_volume_snapshot_error(self): volume_snapshot = fake_volume.FakeVolumeSnapshot( name=self._driver.configuration.volume_snapshot_name_template % self.snapshot['id']) self.mock_object( self._driver.volume_api, 'get_all_snapshots', mock.Mock(return_value=[volume_snapshot, volume_snapshot])) self.assertRaises( exception.ManilaException, self._driver._get_volume_snapshot, self._context, self.snapshot['id']) self._driver.volume_api.get_all_snapshots.assert_called_once_with( self._context, {'name': volume_snapshot['name']}) def test_detach_volume(self): available_volume = fake_volume.FakeVolume() attached_volume = fake_volume.FakeVolume(status='in-use') self.mock_object(self._driver, '_get_volume', mock.Mock(return_value=attached_volume)) self.mock_object(self._driver.compute_api, 'instance_volumes_list', mock.Mock(return_value=[attached_volume.id])) self.mock_object(self._driver.compute_api, 'instance_volume_detach') self.mock_object(self._driver.volume_api, 'get', mock.Mock(return_value=available_volume)) self._driver._detach_volume(self._context, self.share, self.server['backend_details']) (self._driver.compute_api.instance_volume_detach. assert_called_once_with( self._context, self.server['backend_details']['instance_id'], available_volume['id'])) self._driver.volume_api.get.assert_called_once_with( self._context, available_volume['id']) def test_detach_volume_detached(self): available_volume = fake_volume.FakeVolume() attached_volume = fake_volume.FakeVolume(status='in-use') self.mock_object(self._driver, '_get_volume', mock.Mock(return_value=attached_volume)) self.mock_object(self._driver.compute_api, 'instance_volumes_list', mock.Mock(return_value=[])) self.mock_object(self._driver.volume_api, 'get', mock.Mock(return_value=available_volume)) self.mock_object(self._driver.compute_api, 'instance_volume_detach') self._driver._detach_volume(self._context, self.share, self.server['backend_details']) self.assertFalse(self._driver.volume_api.get.called) self.assertFalse( self._driver.compute_api.instance_volume_detach.called) def test_allocate_container(self): fake_vol = fake_volume.FakeVolume() self.fake_conf.cinder_volume_type = 'fake_volume_type' self.mock_object(self._driver.volume_api, 'create', mock.Mock(return_value=fake_vol)) result = self._driver._allocate_container(self._context, self.share) self.assertEqual(fake_vol, result) self._driver.volume_api.create.assert_called_once_with( self._context, self.share['size'], CONF.volume_name_template % self.share['id'], '', snapshot=None, volume_type='fake_volume_type', availability_zone=self.share['availability_zone']) def test_allocate_container_with_snaphot(self): fake_vol = fake_volume.FakeVolume() fake_vol_snap = fake_volume.FakeVolumeSnapshot() self.mock_object(self._driver, '_get_volume_snapshot', mock.Mock(return_value=fake_vol_snap)) self.mock_object(self._driver.volume_api, 'create', mock.Mock(return_value=fake_vol)) result = self._driver._allocate_container(self._context, self.share, self.snapshot) self.assertEqual(fake_vol, result) self._driver.volume_api.create.assert_called_once_with( self._context, self.share['size'], CONF.volume_name_template % self.share['id'], '', snapshot=fake_vol_snap, volume_type=None, availability_zone=self.share['availability_zone']) def test_allocate_container_error(self): fake_vol = fake_volume.FakeVolume(status='error') self.mock_object(self._driver.volume_api, 'create', mock.Mock(return_value=fake_vol)) self.assertRaises(exception.ManilaException, self._driver._allocate_container, self._context, self.share) def test_wait_for_available_volume(self): fake_volume = {'status': 'creating', 'id': 'fake'} fake_available_volume = {'status': 'available', 'id': 'fake'} self.mock_object(self._driver.volume_api, 'get', mock.Mock(return_value=fake_available_volume)) actual_result = self._driver._wait_for_available_volume( fake_volume, 5, "error", "timeout") self.assertEqual(fake_available_volume, actual_result) self._driver.volume_api.get.assert_called_once_with( mock.ANY, fake_volume['id']) @mock.patch('time.sleep') def test_wait_for_available_volume_error_extending(self, mock_sleep): fake_volume = {'status': const.STATUS_EXTENDING_ERROR, 'id': 'fake'} self.assertRaises(exception.ManilaException, self._driver._wait_for_available_volume, fake_volume, 5, 'error', 'timeout') self.assertFalse(mock_sleep.called) @mock.patch('time.sleep') def test_wait_for_extending_volume(self, mock_sleep): initial_size = 1 expected_size = 2 mock_volume = fake_volume.FakeVolume(status='available', size=initial_size) mock_extending_vol = fake_volume.FakeVolume(status='extending', size=initial_size) mock_extended_vol = fake_volume.FakeVolume(status='available', size=expected_size) self.mock_object(self._driver.volume_api, 'get', mock.Mock(side_effect=[mock_extending_vol, mock_extended_vol])) result = self._driver._wait_for_available_volume( mock_volume, 5, "error", "timeout", expected_size=expected_size) expected_get_count = 2 self.assertEqual(mock_extended_vol, result) self._driver.volume_api.get.assert_has_calls( [mock.call(self._driver.admin_context, mock_volume['id'])] * expected_get_count) mock_sleep.assert_has_calls([mock.call(1)] * expected_get_count) @ddt.data(mock.Mock(return_value={'status': 'creating', 'id': 'fake'}), mock.Mock(return_value={'status': 'error', 'id': 'fake'})) def test_wait_for_available_volume_invalid(self, volume_get_mock): fake_volume = {'status': 'creating', 'id': 'fake'} self.mock_object(self._driver.volume_api, 'get', volume_get_mock) self.mock_object(time, 'time', mock.Mock(side_effect=[1.0, 1.33, 1.67, 2.0])) self.assertRaises( exception.ManilaException, self._driver._wait_for_available_volume, fake_volume, 1, "error", "timeout" ) def test_deallocate_container(self): fake_vol = fake_volume.FakeVolume() self.mock_object(self._driver, '_get_volume', mock.Mock(return_value=fake_vol)) self.mock_object(self._driver.volume_api, 'delete') self.mock_object(self._driver.volume_api, 'get', mock.Mock( side_effect=exception.VolumeNotFound(volume_id=fake_vol['id']))) self._driver._deallocate_container(self._context, self.share) self._driver._get_volume.assert_called_once_with( self._context, self.share['id']) self._driver.volume_api.delete.assert_called_once_with( self._context, fake_vol['id']) self._driver.volume_api.get.assert_called_once_with( self._context, fake_vol['id']) def test_deallocate_container_with_volume_not_found(self): fake_vol = fake_volume.FakeVolume() self.mock_object(self._driver, '_get_volume', mock.Mock(side_effect=exception.VolumeNotFound( volume_id=fake_vol['id']))) self.mock_object(self._driver.volume_api, 'delete') self._driver._deallocate_container(self._context, self.share) self._driver._get_volume.assert_called_once_with( self._context, self.share['id']) self.assertFalse(self._driver.volume_api.delete.called) def test_create_share_from_snapshot(self): vol1 = 'fake_vol1' vol2 = 'fake_vol2' self.mock_object(self._driver, '_allocate_container', mock.Mock(return_value=vol1)) self.mock_object(self._driver, '_attach_volume', mock.Mock(return_value=vol2)) self.mock_object(self._driver, '_mount_device') result = self._driver.create_share_from_snapshot( self._context, self.share, self.snapshot, share_server=self.server) self.assertEqual(self._helper_nfs.create_exports.return_value, result) self._driver._allocate_container.assert_called_once_with( self._driver.admin_context, self.share, snapshot=self.snapshot) self._driver._attach_volume.assert_called_once_with( self._driver.admin_context, self.share, self.server['backend_details']['instance_id'], vol1) self._driver._mount_device.assert_called_once_with( self.share, self.server['backend_details'], vol2) self._helper_nfs.create_exports.assert_called_once_with( self.server['backend_details'], self.share['name']) def test_create_share_from_snapshot_invalid_helper(self): self._driver._helpers = {'CIFS': self._helper_cifs} self.assertRaises(exception.InvalidShare, self._driver.create_share_from_snapshot, self._context, self.share, self.snapshot, share_server=self.server) def test_delete_share_no_share_servers_handling(self): self.mock_object(self._driver, '_deallocate_container') self.mock_object( self._driver.service_instance_manager, 'get_common_server', mock.Mock(return_value=self.server)) self.mock_object( self._driver.service_instance_manager, 'ensure_service_instance', mock.Mock(return_value=False)) CONF.set_default('driver_handles_share_servers', False) self._driver.delete_share(self._context, self.share) (self._driver.service_instance_manager.get_common_server. assert_called_once_with()) self._driver._deallocate_container.assert_called_once_with( self._driver.admin_context, self.share) (self._driver.service_instance_manager.ensure_service_instance. assert_called_once_with( self._context, self.server['backend_details'])) def test_delete_share(self): self.mock_object(self._driver, '_unmount_device') self.mock_object(self._driver, '_detach_volume') self.mock_object(self._driver, '_deallocate_container') self._driver.delete_share( self._context, self.share, share_server=self.server) self._helper_nfs.remove_exports.assert_called_once_with( self.server['backend_details'], self.share['name']) self._driver._unmount_device.assert_called_once_with( self.share, self.server['backend_details']) self._driver._detach_volume.assert_called_once_with( self._driver.admin_context, self.share, self.server['backend_details']) self._driver._deallocate_container.assert_called_once_with( self._driver.admin_context, self.share) (self._driver.service_instance_manager.ensure_service_instance. assert_called_once_with( self._context, self.server['backend_details'])) def test_detach_volume_with_volume_not_found(self): fake_vol = fake_volume.FakeVolume() fake_server_details = mock.MagicMock() self.mock_object(self._driver.compute_api, 'instance_volumes_list', mock.Mock(return_value=[])) self.mock_object(self._driver, '_get_volume', mock.Mock(side_effect=exception.VolumeNotFound( volume_id=fake_vol['id']))) self._driver._detach_volume(self._context, self.share, fake_server_details) (self._driver.compute_api.instance_volumes_list. assert_called_once_with(self._driver.admin_context, fake_server_details['instance_id'])) (self._driver._get_volume. assert_called_once_with(self._driver.admin_context, self.share['id'])) self.assertEqual(1, self.mock_warning_log.call_count) def test_delete_share_without_share_server(self): self.mock_object(self._driver, '_unmount_device') self.mock_object(self._driver, '_detach_volume') self.mock_object(self._driver, '_deallocate_container') self._driver.delete_share( self._context, self.share, share_server=None) self.assertFalse(self._helper_nfs.remove_export.called) self.assertFalse(self._driver._unmount_device.called) self.assertFalse(self._driver._detach_volume.called) self._driver._deallocate_container.assert_called_once_with( self._driver.admin_context, self.share) def test_delete_share_without_server_backend_details(self): self.mock_object(self._driver, '_unmount_device') self.mock_object(self._driver, '_detach_volume') self.mock_object(self._driver, '_deallocate_container') fake_share_server = { 'instance_id': 'fake_instance_id', 'ip': 'fake_ip', 'username': 'fake_username', 'password': 'fake_password', 'pk_path': 'fake_pk_path', 'backend_details': {} } self._driver.delete_share( self._context, self.share, share_server=fake_share_server) self.assertFalse(self._helper_nfs.remove_export.called) self.assertFalse(self._driver._unmount_device.called) self.assertFalse(self._driver._detach_volume.called) self._driver._deallocate_container.assert_called_once_with( self._driver.admin_context, self.share) def test_delete_share_without_server_availability(self): self.mock_object(self._driver, '_unmount_device') self.mock_object(self._driver, '_detach_volume') self.mock_object(self._driver, '_deallocate_container') self.mock_object( self._driver.service_instance_manager, 'ensure_service_instance', mock.Mock(return_value=False)) self._driver.delete_share( self._context, self.share, share_server=self.server) self.assertFalse(self._helper_nfs.remove_export.called) self.assertFalse(self._driver._unmount_device.called) self.assertFalse(self._driver._detach_volume.called) self._driver._deallocate_container.assert_called_once_with( self._driver.admin_context, self.share) (self._driver.service_instance_manager.ensure_service_instance. assert_called_once_with( self._context, self.server['backend_details'])) def test_delete_share_invalid_helper(self): self._driver._helpers = {'CIFS': self._helper_cifs} self.assertRaises(exception.InvalidShare, self._driver.delete_share, self._context, self.share, share_server=self.server) def test_create_snapshot(self): fake_vol = fake_volume.FakeVolume() fake_vol_snap = fake_volume.FakeVolumeSnapshot( share_instance_id=fake_vol['id']) self.mock_object(self._driver, '_get_volume', mock.Mock(return_value=fake_vol)) self.mock_object(self._driver.volume_api, 'create_snapshot_force', mock.Mock(return_value=fake_vol_snap)) self._driver.create_snapshot(self._context, fake_vol_snap, share_server=self.server) self._driver._get_volume.assert_called_once_with( self._driver.admin_context, fake_vol_snap['share_instance_id']) self._driver.volume_api.create_snapshot_force.assert_called_once_with( self._context, fake_vol['id'], CONF.volume_snapshot_name_template % fake_vol_snap['id'], '' ) def test_delete_snapshot(self): fake_vol_snap = fake_volume.FakeVolumeSnapshot() fake_vol_snap2 = {'id': 'fake_vol_snap2'} self.mock_object(self._driver, '_get_volume_snapshot', mock.Mock(return_value=fake_vol_snap2)) self.mock_object(self._driver.volume_api, 'delete_snapshot') self.mock_object( self._driver.volume_api, 'get_snapshot', mock.Mock(side_effect=exception.VolumeSnapshotNotFound( snapshot_id=fake_vol_snap['id']))) self._driver.delete_snapshot(self._context, fake_vol_snap, share_server=self.server) self._driver._get_volume_snapshot.assert_called_once_with( self._driver.admin_context, fake_vol_snap['id']) self._driver.volume_api.delete_snapshot.assert_called_once_with( self._driver.admin_context, fake_vol_snap2['id']) self._driver.volume_api.get_snapshot.assert_called_once_with( self._driver.admin_context, fake_vol_snap2['id']) def test_ensure_share(self): vol1 = 'fake_vol1' vol2 = 'fake_vol2' self._helper_nfs.create_export.return_value = 'fakelocation' self.mock_object(self._driver, '_get_volume', mock.Mock(return_value=vol1)) self.mock_object(self._driver, '_attach_volume', mock.Mock(return_value=vol2)) self.mock_object(self._driver, '_mount_device') self._driver.ensure_share( self._context, self.share, share_server=self.server) self._driver._get_volume.assert_called_once_with( self._context, self.share['id']) self._driver._attach_volume.assert_called_once_with( self._context, self.share, self.server['backend_details']['instance_id'], vol1) self._driver._mount_device.assert_called_once_with( self.share, self.server['backend_details'], vol2) self._helper_nfs.create_exports.assert_called_once_with( self.server['backend_details'], self.share['name'], recreate=True) def test_ensure_share_volume_is_absent(self): self.mock_object( self._driver, '_get_volume', mock.Mock(return_value=None)) self.mock_object(self._driver, '_attach_volume') self._driver.ensure_share( self._context, self.share, share_server=self.server) self._driver._get_volume.assert_called_once_with( self._context, self.share['id']) self.assertFalse(self._driver._attach_volume.called) def test_ensure_share_invalid_helper(self): self._driver._helpers = {'CIFS': self._helper_cifs} self.assertRaises(exception.InvalidShare, self._driver.ensure_share, self._context, self.share, share_server=self.server) @ddt.data(const.ACCESS_LEVEL_RW, const.ACCESS_LEVEL_RO) def test_update_access(self, access_level): # fakes access_rules = [get_fake_access_rule('1.1.1.1', access_level), get_fake_access_rule('2.2.2.2', access_level)] add_rules = [get_fake_access_rule('2.2.2.2', access_level), ] delete_rules = [get_fake_access_rule('3.3.3.3', access_level), ] # run self._driver.update_access(self._context, self.share, access_rules, add_rules=add_rules, delete_rules=delete_rules, update_rules=None, share_server=self.server) # asserts (self._driver._helpers[self.share['share_proto']]. update_access.assert_called_once_with( self.server['backend_details'], self.share['name'], access_rules, add_rules=add_rules, delete_rules=delete_rules)) @ddt.data(fake_share.fake_share(), fake_share.fake_share(share_proto='NFSBOGUS'), fake_share.fake_share(share_proto='CIFSBOGUS')) def test__get_helper_with_wrong_proto(self, share): self.assertRaises(exception.InvalidShare, self._driver._get_helper, share) def test__setup_server(self): sim = self._driver.instance_manager net_info = [{ 'server_id': 'fake', 'neutron_net_id': 'fake-net-id', 'neutron_subnet_id': 'fake-subnet-id', }] self._driver.setup_server(net_info) sim.set_up_service_instance.assert_called_once_with( self._context, net_info[0]) def test__setup_server_revert(self): def raise_exception(*args, **kwargs): raise exception.ServiceInstanceException net_info = [{'server_id': 'fake', 'neutron_net_id': 'fake-net-id', 'neutron_subnet_id': 'fake-subnet-id'}] self.mock_object(self._driver.service_instance_manager, 'set_up_service_instance', mock.Mock(side_effect=raise_exception)) self.assertRaises(exception.ServiceInstanceException, self._driver.setup_server, net_info) def test__teardown_server(self): server_details = { 'instance_id': 'fake_instance_id', 'subnet_id': 'fake_subnet_id', 'router_id': 'fake_router_id', } self._driver.teardown_server(server_details) (self._driver.service_instance_manager.delete_service_instance. assert_called_once_with( self._driver.admin_context, server_details)) def test_ssh_exec_connection_not_exist(self): ssh_conn_timeout = 30 CONF.set_default('ssh_conn_timeout', ssh_conn_timeout) ssh_output = 'fake_ssh_output' cmd = ['fake', 'command'] ssh = mock.Mock() ssh.get_transport = mock.Mock() ssh.get_transport().is_active = mock.Mock(return_value=True) ssh_pool = mock.Mock() ssh_pool.create = mock.Mock(return_value=ssh) self.mock_object(ssh_utils, 'SSHPool', mock.Mock(return_value=ssh_pool)) self.mock_object(processutils, 'ssh_execute', mock.Mock(return_value=ssh_output)) self._driver.ssh_connections = {} result = self._driver._ssh_exec(self.server, cmd) ssh_utils.SSHPool.assert_called_once_with( self.server['ip'], 22, ssh_conn_timeout, self.server['username'], self.server['password'], self.server['pk_path'], max_size=1) ssh_pool.create.assert_called_once_with() processutils.ssh_execute.assert_called_once_with( ssh, 'fake command', check_exit_code=True) ssh.get_transport().is_active.assert_called_once_with() self.assertEqual( self._driver.ssh_connections, {self.server['instance_id']: (ssh_pool, ssh)} ) self.assertEqual(ssh_output, result) def test_ssh_exec_connection_exist(self): ssh_output = 'fake_ssh_output' cmd = ['fake', 'command'] ssh = mock.Mock() ssh.get_transport = mock.Mock() ssh.get_transport().is_active = mock.Mock(side_effect=lambda: True) ssh_pool = mock.Mock() self.mock_object(processutils, 'ssh_execute', mock.Mock(return_value=ssh_output)) self._driver.ssh_connections = { self.server['instance_id']: (ssh_pool, ssh) } result = self._driver._ssh_exec(self.server, cmd) processutils.ssh_execute.assert_called_once_with( ssh, 'fake command', check_exit_code=True) ssh.get_transport().is_active.assert_called_once_with() self.assertEqual( self._driver.ssh_connections, {self.server['instance_id']: (ssh_pool, ssh)} ) self.assertEqual(ssh_output, result) def test_ssh_exec_connection_recreation(self): ssh_output = 'fake_ssh_output' cmd = ['fake', 'command'] ssh = mock.Mock() ssh.get_transport = mock.Mock() ssh.get_transport().is_active = mock.Mock(side_effect=lambda: False) ssh_pool = mock.Mock() ssh_pool.create = mock.Mock(side_effect=lambda: ssh) ssh_pool.remove = mock.Mock() self.mock_object(processutils, 'ssh_execute', mock.Mock(return_value=ssh_output)) self._driver.ssh_connections = { self.server['instance_id']: (ssh_pool, ssh) } result = self._driver._ssh_exec(self.server, cmd) processutils.ssh_execute.assert_called_once_with( ssh, 'fake command', check_exit_code=True) ssh.get_transport().is_active.assert_called_once_with() ssh_pool.create.assert_called_once_with() ssh_pool.remove.assert_called_once_with(ssh) self.assertEqual( self._driver.ssh_connections, {self.server['instance_id']: (ssh_pool, ssh)} ) self.assertEqual(ssh_output, result) def test__ssh_exec_check_list_comprehensions_still_work(self): ssh_output = 'fake_ssh_output' cmd = ['fake', 'command spaced'] ssh = mock.Mock() ssh_pool = mock.Mock() ssh_pool.create = mock.Mock(side_effect=lambda: ssh) ssh_pool.remove = mock.Mock() self.mock_object(processutils, 'ssh_execute', mock.Mock(return_value=ssh_output)) self._driver.ssh_connections = { self.server['instance_id']: (ssh_pool, ssh) } self._driver._ssh_exec(self.server, cmd) processutils.ssh_execute.assert_called_once_with( ssh, 'fake "command spaced"', check_exit_code=True) def test_get_share_stats_refresh_false(self): self._driver._stats = {'fake_key': 'fake_value'} result = self._driver.get_share_stats(False) self.assertEqual(self._driver._stats, result) def test_get_share_stats_refresh_true(self): fake_stats = {'fake_key': 'fake_value'} self._driver._stats = fake_stats expected_keys = [ 'qos', 'driver_version', 'share_backend_name', 'free_capacity_gb', 'total_capacity_gb', 'driver_handles_share_servers', 'reserved_percentage', 'reserved_snapshot_percentage', 'reserved_share_extend_percentage', 'vendor_name', 'storage_protocol', ] result = self._driver.get_share_stats(True) self.assertNotEqual(fake_stats, result) for key in expected_keys: self.assertIn(key, result) self.assertTrue(result['driver_handles_share_servers']) self.assertEqual('Open Source', result['vendor_name']) def _setup_manage_mocks(self, get_share_type_extra_specs='False', is_device_mounted=True, server_details=None): CONF.set_default('driver_handles_share_servers', False) self.mock_object(share_types, 'get_share_type_extra_specs', mock.Mock(return_value=get_share_type_extra_specs)) self.mock_object(self._driver, '_is_device_mounted', mock.Mock(return_value=is_device_mounted)) self.mock_object(self._driver, 'service_instance_manager') server = {'backend_details': server_details} self.mock_object(self._driver.service_instance_manager, 'get_common_server', mock.Mock(return_value=server)) def test_manage_invalid_protocol(self): share = {'share_proto': 'fake_proto'} self._setup_manage_mocks() self.assertRaises(exception.InvalidShare, self._driver.manage_existing, share, {}) def test_manage_not_mounted_share(self): share = get_fake_manage_share() fake_path = '/foo/bar' self._setup_manage_mocks(is_device_mounted=False) self.mock_object( self._driver._helpers[share['share_proto']], 'get_share_path_by_export_location', mock.Mock(return_value=fake_path)) self.assertRaises(exception.ManageInvalidShare, self._driver.manage_existing, share, {}) self.assertEqual( 1, self._driver.service_instance_manager.get_common_server.call_count) self._driver._is_device_mounted.assert_called_once_with( fake_path, None) (self._driver._helpers[share['share_proto']]. get_share_path_by_export_location.assert_called_once_with( None, share['export_locations'][0]['path'])) def test_manage_share_not_attached_to_cinder_volume_invalid_size(self): share = get_fake_manage_share() server_details = {} fake_path = '/foo/bar' self._setup_manage_mocks(server_details=server_details) self.mock_object(self._driver, '_get_volume', mock.Mock(return_value=None)) error = exception.ManageInvalidShare(reason="fake") self.mock_object( self._driver, '_get_mounted_share_size', mock.Mock(side_effect=error)) self.mock_object( self._driver._helpers[share['share_proto']], 'get_share_path_by_export_location', mock.Mock(return_value=fake_path)) self.assertRaises(exception.ManageInvalidShare, self._driver.manage_existing, share, {}) self._driver._get_mounted_share_size.assert_called_once_with( fake_path, server_details) (self._driver._helpers[share['share_proto']]. get_share_path_by_export_location.assert_called_once_with( server_details, share['export_locations'][0]['path'])) def test_manage_share_not_attached_to_cinder_volume(self): share = get_fake_manage_share() share_size = "fake" fake_path = '/foo/bar' fake_exports = ['foo', 'bar'] server_details = {} self._setup_manage_mocks(server_details=server_details) self.mock_object(self._driver, '_get_volume') self.mock_object(self._driver, '_get_mounted_share_size', mock.Mock(return_value=share_size)) self.mock_object( self._driver._helpers[share['share_proto']], 'get_share_path_by_export_location', mock.Mock(return_value=fake_path)) self.mock_object( self._driver._helpers[share['share_proto']], 'get_exports_for_share', mock.Mock(return_value=fake_exports)) result = self._driver.manage_existing(share, {}) self.assertEqual( {'size': share_size, 'export_locations': fake_exports}, result) (self._driver._helpers[share['share_proto']].get_exports_for_share. assert_called_once_with( server_details, share['export_locations'][0]['path'])) (self._driver._helpers[share['share_proto']]. get_share_path_by_export_location.assert_called_once_with( server_details, share['export_locations'][0]['path'])) self._driver._get_mounted_share_size.assert_called_once_with( fake_path, server_details) self.assertFalse(self._driver._get_volume.called) def test_manage_share_attached_to_cinder_volume_not_found(self): share = get_fake_manage_share() server_details = {} driver_options = {'volume_id': 'fake'} self._setup_manage_mocks(server_details=server_details) self.mock_object( self._driver.volume_api, 'get', mock.Mock(side_effect=exception.VolumeNotFound(volume_id="fake")) ) self.assertRaises(exception.ManageInvalidShare, self._driver.manage_existing, share, driver_options) self._driver.volume_api.get.assert_called_once_with( mock.ANY, driver_options['volume_id']) def test_manage_share_attached_to_cinder_volume_not_mounted_to_srv(self): share = get_fake_manage_share() server_details = {'instance_id': 'fake'} driver_options = {'volume_id': 'fake'} volume = {'id': 'fake'} self._setup_manage_mocks(server_details=server_details) self.mock_object(self._driver.volume_api, 'get', mock.Mock(return_value=volume)) self.mock_object(self._driver.compute_api, 'instance_volumes_list', mock.Mock(return_value=[])) self.assertRaises(exception.ManageInvalidShare, self._driver.manage_existing, share, driver_options) self._driver.volume_api.get.assert_called_once_with( mock.ANY, driver_options['volume_id']) self._driver.compute_api.instance_volumes_list.assert_called_once_with( mock.ANY, server_details['instance_id']) def test_manage_share_attached_to_cinder_volume(self): share = get_fake_manage_share() fake_size = 'foobar' fake_exports = ['foo', 'bar'] server_details = {'instance_id': 'fake'} driver_options = {'volume_id': 'fake'} volume = {'id': 'fake', 'name': 'fake_volume_1', 'size': fake_size} self._setup_manage_mocks(server_details=server_details) self.mock_object(self._driver.volume_api, 'get', mock.Mock(return_value=volume)) self._driver.volume_api.update = mock.Mock() self.mock_object(self._driver.compute_api, 'instance_volumes_list', mock.Mock(return_value=['fake'])) self.mock_object( self._driver._helpers[share['share_proto']], 'get_exports_for_share', mock.Mock(return_value=fake_exports)) result = self._driver.manage_existing(share, driver_options) self.assertEqual( {'size': fake_size, 'export_locations': fake_exports}, result) (self._driver._helpers[share['share_proto']].get_exports_for_share. assert_called_once_with( server_details, share['export_locations'][0]['path'])) expected_volume_update = { 'name': self._driver._get_volume_name(share['id']) } self._driver.volume_api.update.assert_called_once_with( mock.ANY, volume['id'], expected_volume_update) self.fake_private_storage.update.assert_called_once_with( share['id'], {'volume_id': volume['id']} ) def test_get_mounted_share_size(self): output = ("Filesystem blocks Used Available Capacity Mounted on\n" "/dev/fake 1G 1G 1G 4% /shares/share-fake") self.mock_object(self._driver, '_ssh_exec', mock.Mock(return_value=(output, ''))) actual_result = self._driver._get_mounted_share_size('/fake/path', {}) self.assertEqual(1, actual_result) @ddt.data("fake\nfake\n", "fake", "fake\n") def test_get_mounted_share_size_invalid_output(self, output): self.mock_object(self._driver, '_ssh_exec', mock.Mock(return_value=(output, ''))) self.assertRaises(exception.ManageInvalidShare, self._driver._get_mounted_share_size, '/fake/path', {}) def test_get_consumed_space(self): mount_path = "fake_path" server_details = {} index = 2 valid_result = 1 self.mock_object(self._driver, '_get_mount_stats_by_index', mock.Mock(return_value=valid_result * 1024)) actual_result = self._driver._get_consumed_space( mount_path, server_details) self.assertEqual(valid_result, actual_result) self._driver._get_mount_stats_by_index.assert_called_once_with( mount_path, server_details, index, block_size='M' ) def test_get_consumed_space_invalid(self): self.mock_object( self._driver, '_get_mount_stats_by_index', mock.Mock(side_effect=exception.ManilaException("fake")) ) self.assertRaises( exception.InvalidShare, self._driver._get_consumed_space, "fake", "fake" ) @ddt.data(100, 130, 123) def test_extend_share(self, volume_size): fake_volume = { "name": "fake", "size": volume_size, } fake_share = { 'id': 'fake', 'share_proto': 'NFS', 'name': 'test_share', } new_size = 123 srv_details = self.server['backend_details'] self.mock_object( self._driver.service_instance_manager, 'get_common_server', mock.Mock(return_value=self.server) ) self.mock_object(self._driver, '_unmount_device') self.mock_object(self._driver, '_detach_volume') self.mock_object(self._driver, '_extend_volume') self.mock_object(self._driver, '_attach_volume') self.mock_object(self._driver, '_mount_device') self.mock_object(self._driver, '_resize_filesystem') self.mock_object( self._driver, '_get_volume', mock.Mock(return_value=fake_volume) ) CONF.set_default('driver_handles_share_servers', False) self._driver.extend_share(fake_share, new_size) self.assertTrue( self._driver.service_instance_manager.get_common_server.called) self._driver._unmount_device.assert_called_once_with( fake_share, srv_details) self._driver._get_volume.assert_called_once_with( mock.ANY, fake_share['id']) if new_size > volume_size: self._driver._detach_volume.assert_called_once_with( mock.ANY, fake_share, srv_details) self._driver._extend_volume.assert_called_once_with( mock.ANY, fake_volume, new_size) self._driver._attach_volume.assert_called_once_with( mock.ANY, fake_share, srv_details['instance_id'], mock.ANY) else: self.assertFalse(self._driver._detach_volume.called) self.assertFalse(self._driver._extend_volume.called) self.assertFalse(self._driver._attach_volume.called) (self._helper_nfs.disable_access_for_maintenance. assert_called_once_with(srv_details, 'test_share')) (self._helper_nfs.restore_access_after_maintenance. assert_called_once_with(srv_details, 'test_share')) self.assertTrue(self._driver._resize_filesystem.called) def test_extend_volume(self): fake_volume = {'id': 'fake'} new_size = 123 self.mock_object(self._driver.volume_api, 'extend') self.mock_object(self._driver, '_wait_for_available_volume') self._driver._extend_volume(self._context, fake_volume, new_size) self._driver.volume_api.extend.assert_called_once_with( self._context, fake_volume['id'], new_size ) self._driver._wait_for_available_volume.assert_called_once_with( fake_volume, mock.ANY, msg_timeout=mock.ANY, msg_error=mock.ANY, expected_size=new_size ) def test_resize_filesystem(self): fake_server_details = {'fake': 'fake'} fake_volume = {'mountpoint': '/dev/fake'} self.mock_object(self._driver, '_ssh_exec') self._driver._resize_filesystem( fake_server_details, fake_volume, new_size=123) self._driver._ssh_exec.assert_any_call( fake_server_details, ['sudo', 'fsck', '-pf', '/dev/fake']) self._driver._ssh_exec.assert_any_call( fake_server_details, ['sudo', 'resize2fs', '/dev/fake', "%sG" % 123] ) self.assertEqual(2, self._driver._ssh_exec.call_count) @ddt.data( { 'source': processutils.ProcessExecutionError( stderr="resize2fs: New size smaller than minimum (123456)"), 'target': exception.Invalid }, { 'source': processutils.ProcessExecutionError(stderr="fake_error"), 'target': exception.ManilaException } ) @ddt.unpack def test_resize_filesystem_invalid_new_size(self, source, target): fake_server_details = {'fake': 'fake'} fake_volume = {'mountpoint': '/dev/fake'} ssh_mock = mock.Mock(side_effect=["fake", source]) self.mock_object(self._driver, '_ssh_exec', ssh_mock) self.assertRaises( target, self._driver._resize_filesystem, fake_server_details, fake_volume, new_size=123 ) def test_shrink_share_invalid_size(self): fake_share = {'id': 'fake', 'export_locations': [{'path': 'test'}]} new_size = 123 self.mock_object( self._driver.service_instance_manager, 'get_common_server', mock.Mock(return_value=self.server) ) self.mock_object(self._driver, '_get_helper') self.mock_object(self._driver, '_get_consumed_space', mock.Mock(return_value=200)) CONF.set_default('driver_handles_share_servers', False) self.assertRaises( exception.ShareShrinkingPossibleDataLoss, self._driver.shrink_share, fake_share, new_size ) self._driver._get_helper.assert_called_once_with(fake_share) self._driver._get_consumed_space.assert_called_once_with( mock.ANY, self.server['backend_details']) def _setup_shrink_mocks(self): share = {'id': 'fake', 'export_locations': [{'path': 'test'}], 'name': 'fake'} volume = {'id': 'fake'} new_size = 123 server_details = self.server['backend_details'] self.mock_object( self._driver.service_instance_manager, 'get_common_server', mock.Mock(return_value=self.server) ) helper = mock.Mock() self.mock_object(self._driver, '_get_helper', mock.Mock(return_value=helper)) self.mock_object(self._driver, '_get_consumed_space', mock.Mock(return_value=100)) self.mock_object(self._driver, '_get_volume', mock.Mock(return_value=volume)) self.mock_object(self._driver, '_unmount_device') self.mock_object(self._driver, '_mount_device') CONF.set_default('driver_handles_share_servers', False) return share, volume, new_size, server_details, helper @ddt.data({'source': exception.Invalid("fake"), 'target': exception.ShareShrinkingPossibleDataLoss}, {'source': exception.ManilaException("fake"), 'target': exception.Invalid}) @ddt.unpack def test_shrink_share_error_on_resize_fs(self, source, target): share, vol, size, server_details, _ = self._setup_shrink_mocks() resize_mock = mock.Mock(side_effect=source) self.mock_object(self._driver, '_resize_filesystem', resize_mock) self.assertRaises(target, self._driver.shrink_share, share, size) resize_mock.assert_called_once_with(server_details, vol, new_size=size) def test_shrink_share(self): share, vol, size, server_details, helper = self._setup_shrink_mocks() self.mock_object(self._driver, '_resize_filesystem') self._driver.shrink_share(share, size) self._driver._get_helper.assert_called_once_with(share) self._driver._get_consumed_space.assert_called_once_with( mock.ANY, server_details) self._driver._get_volume.assert_called_once_with(mock.ANY, share['id']) self._driver._unmount_device.assert_called_once_with(share, server_details) self._driver._resize_filesystem( server_details, vol, new_size=size) self._driver._mount_device(share, server_details, vol) self.assertTrue(helper.disable_access_for_maintenance.called) self.assertTrue(helper.restore_access_after_maintenance.called) @ddt.data({'share_servers': [], 'result': None}, {'share_servers': None, 'result': None}, {'share_servers': ['fake'], 'result': 'fake'}, {'share_servers': ['fake', 'test'], 'result': 'fake'}) @ddt.unpack def tests_choose_share_server_compatible_with_share(self, share_servers, result): fake_share = "fake" actual_result = self._driver.choose_share_server_compatible_with_share( self._context, share_servers, fake_share ) self.assertEqual(result, actual_result) def test_manage_snapshot_not_found(self): snapshot_instance = {'id': 'snap_instance_id', 'provider_location': 'vol_snap_id'} driver_options = {} self.mock_object( self._driver.volume_api, 'get_snapshot', mock.Mock(side_effect=exception.VolumeSnapshotNotFound( snapshot_id='vol_snap_id'))) self.assertRaises(exception.ManageInvalidShareSnapshot, self._driver.manage_existing_snapshot, snapshot_instance, driver_options) self._driver.volume_api.get_snapshot.assert_called_once_with( self._context, 'vol_snap_id') def test_manage_snapshot_valid(self): snapshot_instance = {'id': 'snap_instance_id', 'provider_location': 'vol_snap_id'} volume_snapshot = {'id': 'vol_snap_id', 'size': 1} self.mock_object(self._driver.volume_api, 'get_snapshot', mock.Mock(return_value=volume_snapshot)) ret_manage = self._driver.manage_existing_snapshot( snapshot_instance, {}) self.assertEqual({'provider_location': 'vol_snap_id', 'size': 1}, ret_manage) self._driver.volume_api.get_snapshot.assert_called_once_with( self._context, 'vol_snap_id') def test_unmanage_snapshot(self): snapshot_instance = {'id': 'snap_instance_id', 'provider_location': 'vol_snap_id'} self.mock_object(self._driver.private_storage, 'delete') self._driver.unmanage_snapshot(snapshot_instance) self._driver.private_storage.delete.assert_called_once_with( 'snap_instance_id') @generic.ensure_server def fake(driver_instance, context, share_server=None): return share_server @ddt.ddt class GenericDriverEnsureServerTestCase(test.TestCase): def setUp(self): super(GenericDriverEnsureServerTestCase, self).setUp() self._context = context.get_admin_context() self.server = {'id': 'fake_id', 'backend_details': {'foo': 'bar'}} self.dhss_false = type( 'Fake', (object,), {'driver_handles_share_servers': False}) self.dhss_true = type( 'Fake', (object,), {'driver_handles_share_servers': True}) def test_share_servers_are_not_handled_server_not_provided(self): self.dhss_false.service_instance_manager = mock.Mock() self.dhss_false.service_instance_manager.get_common_server = ( mock.Mock(return_value=self.server)) self.dhss_false.service_instance_manager.ensure_service_instance = ( mock.Mock(return_value=True)) actual = fake(self.dhss_false, self._context) self.assertEqual(self.server, actual) (self.dhss_false.service_instance_manager. get_common_server.assert_called_once_with()) (self.dhss_false.service_instance_manager.ensure_service_instance. assert_called_once_with( self._context, self.server['backend_details'])) @ddt.data({'id': 'without_details'}, {'id': 'with_details', 'backend_details': {'foo': 'bar'}}) def test_share_servers_are_not_handled_server_provided(self, server): self.assertRaises( exception.ManilaException, fake, self.dhss_false, self._context, share_server=server) def test_share_servers_are_handled_server_provided(self): self.dhss_true.service_instance_manager = mock.Mock() self.dhss_true.service_instance_manager.ensure_service_instance = ( mock.Mock(return_value=True)) actual = fake(self.dhss_true, self._context, share_server=self.server) self.assertEqual(self.server, actual) (self.dhss_true.service_instance_manager.ensure_service_instance. assert_called_once_with( self._context, self.server['backend_details'])) def test_share_servers_are_handled_invalid_server_provided(self): server = {'id': 'without_details'} self.assertRaises( exception.ManilaException, fake, self.dhss_true, self._context, share_server=server) def test_share_servers_are_handled_server_not_provided(self): self.assertRaises( exception.ManilaException, fake, self.dhss_true, self._context) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/test_glusterfs.py0000664000175000017500000004600700000000000023677 0ustar00zuulzuul00000000000000# Copyright (c) 2014 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import socket from unittest import mock import ddt from oslo_config import cfg from manila import context from manila import exception from manila.share import configuration as config from manila.share.drivers import ganesha from manila.share.drivers import glusterfs from manila.share.drivers.glusterfs import layout from manila import test from manila.tests import fake_share from manila.tests import fake_utils CONF = cfg.CONF fake_gluster_manager_attrs = { 'export': '127.0.0.1:/testvol', 'host': '127.0.0.1', 'qualified': 'testuser@127.0.0.1:/testvol', 'user': 'testuser', 'volume': 'testvol', 'path_to_private_key': '/fakepath/to/privatekey', 'remote_server_password': 'fakepassword', } fake_share_name = 'fakename' NFS_EXPORT_DIR = 'nfs.export-dir' NFS_EXPORT_VOL = 'nfs.export-volumes' NFS_RPC_AUTH_ALLOW = 'nfs.rpc-auth-allow' NFS_RPC_AUTH_REJECT = 'nfs.rpc-auth-reject' @ddt.ddt class GlusterfsShareDriverTestCase(test.TestCase): """Tests GlusterfsShareDriver.""" def setUp(self): super(GlusterfsShareDriverTestCase, self).setUp() fake_utils.stub_out_utils_execute(self) self._execute = fake_utils.fake_execute self._context = context.get_admin_context() self.addCleanup(fake_utils.fake_execute_set_repliers, []) self.addCleanup(fake_utils.fake_execute_clear_log) CONF.set_default('reserved_share_percentage', 50) CONF.set_default('reserved_share_from_snapshot_percentage', 30) CONF.set_default('reserved_share_extend_percentage', 30) CONF.set_default('driver_handles_share_servers', False) self.fake_conf = config.Configuration(None) self._driver = glusterfs.GlusterfsShareDriver( execute=self._execute, configuration=self.fake_conf) self.share = fake_share.fake_share(share_proto='NFS') def test_do_setup(self): self.mock_object(self._driver, '_get_helper') self.mock_object(layout.GlusterfsShareDriverBase, 'do_setup') _context = mock.Mock() self._driver.do_setup(_context) self._driver._get_helper.assert_called_once_with() layout.GlusterfsShareDriverBase.do_setup.assert_called_once_with( _context) @ddt.data(True, False) def test_setup_via_manager(self, has_parent): gmgr = mock.Mock() share_mgr_parent = mock.Mock() if has_parent else None nfs_helper = mock.Mock() nfs_helper.get_export = mock.Mock(return_value='host:/vol') self._driver.nfs_helper = mock.Mock(return_value=nfs_helper) ret = self._driver._setup_via_manager( {'manager': gmgr, 'share': self.share}, share_manager_parent=share_mgr_parent) gmgr.set_vol_option.assert_called_once_with( 'nfs.export-volumes', False) self._driver.nfs_helper.assert_called_once_with( self._execute, self.fake_conf, gluster_manager=gmgr) nfs_helper.get_export.assert_called_once_with(self.share) self.assertEqual('host:/vol', ret) @ddt.data({'helpercls': None, 'path': '/fakepath'}, {'helpercls': None, 'path': None}, {'helpercls': glusterfs.GlusterNFSHelper, 'path': '/fakepath'}, {'helpercls': glusterfs.GlusterNFSHelper, 'path': None}) @ddt.unpack def test_setup_via_manager_path(self, helpercls, path): gmgr = mock.Mock() gmgr.path = path if not helpercls: helper = mock.Mock() helper.get_export = mock.Mock(return_value='host:/vol') helpercls = mock.Mock(return_value=helper) self._driver.nfs_helper = helpercls if helpercls == glusterfs.GlusterNFSHelper and path is None: gmgr.get_vol_option = mock.Mock(return_value=True) self._driver._setup_via_manager( {'manager': gmgr, 'share': self.share}) if helpercls == glusterfs.GlusterNFSHelper and path is None: gmgr.get_vol_option.assert_called_once_with( NFS_EXPORT_VOL, boolean=True) args = (NFS_RPC_AUTH_REJECT, '*') else: args = (NFS_EXPORT_VOL, False) gmgr.set_vol_option.assert_called_once_with(*args) def test_setup_via_manager_export_volumes_off(self): gmgr = mock.Mock() gmgr.path = None gmgr.get_vol_option = mock.Mock(return_value=False) self._driver.nfs_helper = glusterfs.GlusterNFSHelper self.assertRaises(exception.GlusterfsException, self._driver._setup_via_manager, {'manager': gmgr, 'share': self.share}) gmgr.get_vol_option.assert_called_once_with(NFS_EXPORT_VOL, boolean=True) def test_check_for_setup_error(self): self._driver.check_for_setup_error() def test_update_share_stats(self): self.mock_object(layout.GlusterfsShareDriverBase, '_update_share_stats') self._driver._update_share_stats() (layout.GlusterfsShareDriverBase._update_share_stats. assert_called_once_with({'storage_protocol': 'NFS', 'vendor_name': 'Red Hat', 'share_backend_name': 'GlusterFS', 'reserved_percentage': 50, 'reserved_snapshot_percentage': 30, 'reserved_share_extend_percentage': 30})) def test_get_network_allocations_number(self): self.assertEqual(0, self._driver.get_network_allocations_number()) def test_get_helper(self): ret = self._driver._get_helper() self.assertIsInstance(ret, self._driver.nfs_helper) @ddt.data({'path': '/fakepath', 'helper': glusterfs.GlusterNFSHelper}, {'path': None, 'helper': glusterfs.GlusterNFSVolHelper}) @ddt.unpack def test_get_helper_vol(self, path, helper): self._driver.nfs_helper = glusterfs.GlusterNFSHelper gmgr = mock.Mock(path=path) ret = self._driver._get_helper(gmgr) self.assertIsInstance(ret, helper) @ddt.data('type', 'level') def test_supported_access_features(self, feature): nfs_helper = mock.Mock() supported_access_feature = mock.Mock() setattr(nfs_helper, 'supported_access_%ss' % feature, supported_access_feature) self.mock_object(self._driver, 'nfs_helper', nfs_helper) ret = getattr(self._driver, 'supported_access_%ss' % feature) self.assertEqual(supported_access_feature, ret) def test_update_access_via_manager(self): self.mock_object(self._driver, '_get_helper') gmgr = mock.Mock() add_rules = mock.Mock() delete_rules = mock.Mock() self._driver._update_access_via_manager( gmgr, self._context, self.share, add_rules, delete_rules, recovery=True) self._driver._get_helper.assert_called_once_with(gmgr) self._driver._get_helper().update_access.assert_called_once_with( '/', self.share, add_rules, delete_rules, recovery=True) @ddt.ddt class GlusterNFSHelperTestCase(test.TestCase): """Tests GlusterNFSHelper.""" def setUp(self): super(GlusterNFSHelperTestCase, self).setUp() fake_utils.stub_out_utils_execute(self) gluster_manager = mock.Mock(**fake_gluster_manager_attrs) self._execute = mock.Mock(return_value=('', '')) self.fake_conf = config.Configuration(None) self._helper = glusterfs.GlusterNFSHelper( self._execute, self.fake_conf, gluster_manager=gluster_manager) def test_get_export(self): ret = self._helper.get_export(mock.Mock()) self.assertEqual(fake_gluster_manager_attrs['export'], ret) @ddt.data({'output_str': '/foo(10.0.0.1|10.0.0.2),/bar(10.0.0.1)', 'expected': {'foo': ['10.0.0.1', '10.0.0.2'], 'bar': ['10.0.0.1']}}, {'output_str': None, 'expected': {}}) @ddt.unpack def test_get_export_dir_dict(self, output_str, expected): self.mock_object(self._helper.gluster_manager, 'get_vol_option', mock.Mock(return_value=output_str)) ret = self._helper._get_export_dir_dict() self.assertEqual(expected, ret) (self._helper.gluster_manager.get_vol_option. assert_called_once_with(NFS_EXPORT_DIR)) @ddt.data({'delta': (['10.0.0.2'], []), 'extra_exports': {}, 'new_exports': '/fakename(10.0.0.1|10.0.0.2)'}, {'delta': (['10.0.0.1'], []), 'extra_exports': {}, 'new_exports': '/fakename(10.0.0.1)'}, {'delta': ([], ['10.0.0.2']), 'extra_exports': {}, 'new_exports': '/fakename(10.0.0.1)'}, {'delta': ([], ['10.0.0.1']), 'extra_exports': {}, 'new_exports': None}, {'delta': ([], ['10.0.0.1']), 'extra_exports': {'elsewhere': ['10.0.1.3']}, 'new_exports': '/elsewhere(10.0.1.3)'}) @ddt.unpack def test_update_access(self, delta, extra_exports, new_exports): gluster_manager_attrs = {'path': '/fakename'} gluster_manager_attrs.update(fake_gluster_manager_attrs) gluster_mgr = mock.Mock(**gluster_manager_attrs) helper = glusterfs.GlusterNFSHelper( self._execute, self.fake_conf, gluster_manager=gluster_mgr) export_dir_dict = {'fakename': ['10.0.0.1']} export_dir_dict.update(extra_exports) helper._get_export_dir_dict = mock.Mock(return_value=export_dir_dict) _share = mock.Mock() add_rules, delete_rules = ( map(lambda a: {'access_to': a}, r) for r in delta) helper.update_access('/', _share, add_rules, delete_rules) helper._get_export_dir_dict.assert_called_once_with() gluster_mgr.set_vol_option.assert_called_once_with(NFS_EXPORT_DIR, new_exports) @ddt.data({}, {'elsewhere': '10.0.1.3'}) def test_update_access_disjoint(self, export_dir_dict): gluster_manager_attrs = {'path': '/fakename'} gluster_manager_attrs.update(fake_gluster_manager_attrs) gluster_mgr = mock.Mock(**gluster_manager_attrs) helper = glusterfs.GlusterNFSHelper( self._execute, self.fake_conf, gluster_manager=gluster_mgr) helper._get_export_dir_dict = mock.Mock(return_value=export_dir_dict) _share = mock.Mock() helper.update_access('/', _share, [], [{'access_to': '10.0.0.2'}]) helper._get_export_dir_dict.assert_called_once_with() self.assertFalse(gluster_mgr.set_vol_option.called) @ddt.ddt class GlusterNFSVolHelperTestCase(test.TestCase): """Tests GlusterNFSVolHelper.""" def setUp(self): super(GlusterNFSVolHelperTestCase, self).setUp() fake_utils.stub_out_utils_execute(self) gluster_manager = mock.Mock(**fake_gluster_manager_attrs) self._execute = mock.Mock(return_value=('', '')) self.fake_conf = config.Configuration(None) self._helper = glusterfs.GlusterNFSVolHelper( self._execute, self.fake_conf, gluster_manager=gluster_manager) @ddt.data({'output_str': '10.0.0.1,10.0.0.2', 'expected': ['10.0.0.1', '10.0.0.2']}, {'output_str': None, 'expected': []}) @ddt.unpack def test_get_vol_exports(self, output_str, expected): self.mock_object(self._helper.gluster_manager, 'get_vol_option', mock.Mock(return_value=output_str)) ret = self._helper._get_vol_exports() self.assertEqual(expected, ret) (self._helper.gluster_manager.get_vol_option. assert_called_once_with(NFS_RPC_AUTH_ALLOW)) @ddt.data({'delta': (["10.0.0.1"], []), 'expected': "10.0.0.1,10.0.0.3"}, {'delta': (["10.0.0.2"], []), 'expected': "10.0.0.1,10.0.0.2,10.0.0.3"}, {'delta': ([], ["10.0.0.1"]), 'expected': "10.0.0.3"}, {'delta': ([], ["10.0.0.2"]), 'expected': "10.0.0.1,10.0.0.3"}) @ddt.unpack def test_update_access(self, delta, expected): self.mock_object(self._helper, '_get_vol_exports', mock.Mock( return_value=["10.0.0.1", "10.0.0.3"])) _share = mock.Mock() add_rules, delete_rules = ( map(lambda a: {'access_to': a}, r) for r in delta) self._helper.update_access("/", _share, add_rules, delete_rules) self._helper._get_vol_exports.assert_called_once_with() argseq = [(NFS_RPC_AUTH_ALLOW, expected), (NFS_RPC_AUTH_REJECT, None)] self.assertEqual( [mock.call(*a) for a in argseq], self._helper.gluster_manager.set_vol_option.call_args_list) def test_update_access_empty(self): self.mock_object(self._helper, '_get_vol_exports', mock.Mock( return_value=["10.0.0.1"])) _share = mock.Mock() self._helper.update_access("/", _share, [], [{'access_to': "10.0.0.1"}]) self._helper._get_vol_exports.assert_called_once_with() argseq = [(NFS_RPC_AUTH_ALLOW, None), (NFS_RPC_AUTH_REJECT, "*")] self.assertEqual( [mock.call(*a) for a in argseq], self._helper.gluster_manager.set_vol_option.call_args_list) class GaneshaNFSHelperTestCase(test.TestCase): """Tests GaneshaNFSHelper.""" def setUp(self): super(GaneshaNFSHelperTestCase, self).setUp() self.gluster_manager = mock.Mock(**fake_gluster_manager_attrs) self._execute = mock.Mock(return_value=('', '')) self._root_execute = mock.Mock(return_value=('', '')) self.access = fake_share.fake_access() self.fake_conf = config.Configuration(None) self.fake_template = {'key': 'value'} self.share = fake_share.fake_share() self.mock_object(glusterfs.ganesha_utils, 'RootExecutor', mock.Mock(return_value=self._root_execute)) self.mock_object(glusterfs.ganesha.GaneshaNASHelper, '__init__', mock.Mock()) socket.gethostname = mock.Mock(return_value='example.com') self._helper = glusterfs.GaneshaNFSHelper( self._execute, self.fake_conf, gluster_manager=self.gluster_manager) self._helper.tag = 'GLUSTER-Ganesha-localhost' def test_init_local_ganesha_server(self): glusterfs.ganesha_utils.RootExecutor.assert_called_once_with( self._execute) socket.gethostname.assert_has_calls([mock.call()]) glusterfs.ganesha.GaneshaNASHelper.__init__.assert_has_calls( [mock.call(self._root_execute, self.fake_conf, tag='GLUSTER-Ganesha-example.com')]) def test_get_export(self): ret = self._helper.get_export(self.share) self.assertEqual('example.com:/fakename--', ret) def test_init_remote_ganesha_server(self): ssh_execute = mock.Mock(return_value=('', '')) CONF.set_default('glusterfs_ganesha_server_ip', 'fakeip') self.mock_object(glusterfs.ganesha_utils, 'SSHExecutor', mock.Mock(return_value=ssh_execute)) glusterfs.GaneshaNFSHelper( self._execute, self.fake_conf, gluster_manager=self.gluster_manager) glusterfs.ganesha_utils.SSHExecutor.assert_called_once_with( 'fakeip', 22, None, 'root', password=None, privatekey=None) glusterfs.ganesha.GaneshaNASHelper.__init__.assert_has_calls( [mock.call(ssh_execute, self.fake_conf, tag='GLUSTER-Ganesha-fakeip')]) def test_init_helper(self): ganeshelper = mock.Mock() exptemp = mock.Mock() def set_attributes(*a, **kw): self._helper.ganesha = ganeshelper self._helper.export_template = exptemp self.mock_object(ganesha.GaneshaNASHelper, 'init_helper', mock.Mock(side_effect=set_attributes)) self.assertEqual({}, glusterfs.GaneshaNFSHelper.shared_data) self._helper.init_helper() ganesha.GaneshaNASHelper.init_helper.assert_called_once_with() self.assertEqual(ganeshelper, self._helper.ganesha) self.assertEqual(exptemp, self._helper.export_template) self.assertEqual({ 'GLUSTER-Ganesha-localhost': { 'ganesha': ganeshelper, 'export_template': exptemp}}, glusterfs.GaneshaNFSHelper.shared_data) other_helper = glusterfs.GaneshaNFSHelper( self._execute, self.fake_conf, gluster_manager=self.gluster_manager) other_helper.tag = 'GLUSTER-Ganesha-localhost' other_helper.init_helper() self.assertEqual(ganeshelper, other_helper.ganesha) self.assertEqual(exptemp, other_helper.export_template) def test_default_config_hook(self): fake_conf_dict = {'key': 'value1'} mock_ganesha_utils_patch = mock.Mock() def fake_patch_run(tmpl1, tmpl2): mock_ganesha_utils_patch( copy.deepcopy(tmpl1), tmpl2) tmpl1.update(tmpl2) self.mock_object(glusterfs.ganesha.GaneshaNASHelper, '_default_config_hook', mock.Mock(return_value=self.fake_template)) self.mock_object(glusterfs.ganesha_utils, 'path_from', mock.Mock(return_value='/fakedir/glusterfs/conf')) self.mock_object(self._helper, '_load_conf_dir', mock.Mock(return_value=fake_conf_dict)) self.mock_object(glusterfs.ganesha_utils, 'patch', mock.Mock(side_effect=fake_patch_run)) ret = self._helper._default_config_hook() (glusterfs.ganesha.GaneshaNASHelper._default_config_hook. assert_called_once_with()) glusterfs.ganesha_utils.path_from.assert_called_once_with( glusterfs.__file__, 'conf') self._helper._load_conf_dir.assert_called_once_with( '/fakedir/glusterfs/conf') glusterfs.ganesha_utils.patch.assert_called_once_with( self.fake_template, fake_conf_dict) self.assertEqual(fake_conf_dict, ret) def test_fsal_hook(self): self._helper.gluster_manager.path = '/fakename' output = { 'Hostname': '127.0.0.1', 'Volume': 'testvol', 'Volpath': '/fakename' } ret = self._helper._fsal_hook('/fakepath', self.share, self.access) self.assertEqual(output, ret) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/test_helpers.py0000664000175000017500000010507400000000000023323 0ustar00zuulzuul00000000000000# Copyright 2015 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from unittest import mock import ddt from oslo_config import cfg from manila.common import constants as const from manila import exception import manila.share.configuration from manila.share.drivers import helpers from manila import test from manila.tests import fake_compute from manila.tests import fake_utils from manila.tests.share.drivers import test_generic CONF = cfg.CONF @ddt.ddt class NFSHelperTestCase(test.TestCase): """Test case for NFS helper.""" def setUp(self): super(NFSHelperTestCase, self).setUp() fake_utils.stub_out_utils_execute(self) self.fake_conf = manila.share.configuration.Configuration(None) self._ssh_exec = mock.Mock(return_value=('', '')) self._execute = mock.Mock(return_value=('', '')) self._helper = helpers.NFSHelper(self._execute, self._ssh_exec, self.fake_conf) ip = '10.254.0.3' self.server = fake_compute.FakeServer( ip=ip, public_address=ip, instance_id='fake_instance_id') self.share_name = 'fake_share_name' def test_init_helper(self): # mocks self.mock_object( self._helper, '_ssh_exec', mock.Mock(side_effect=exception.ProcessExecutionError( stderr='command not found'))) # run self.assertRaises(exception.ManilaException, self._helper.init_helper, self.server) # asserts self._helper._ssh_exec.assert_called_once_with( self.server, ['sudo', 'exportfs']) def test_init_helper_log(self): # mocks self.mock_object( self._helper, '_ssh_exec', mock.Mock(side_effect=exception.ProcessExecutionError( stderr='fake'))) # run self._helper.init_helper(self.server) # asserts self._helper._ssh_exec.assert_called_once_with( self.server, ['sudo', 'exportfs']) @ddt.data( {"server": {"public_address": "1.2.3.4"}, "version": 4}, {"server": {"public_address": "1001::1002"}, "version": 6}, {"server": {"public_address": "1.2.3.4", "admin_ip": "5.6.7.8"}, "version": 4}, {"server": {"public_address": "1.2.3.4", "ip": "9.10.11.12"}, "version": 4}, {"server": {"public_address": "1001::1001", "ip": "1001::1002"}, "version": 6}, {"server": {"public_address": "1001::1002", "admin_ip": "1001::1002"}, "version": 6}, {"server": {"public_addresses": ["1001::1002"]}, "version": 6}, {"server": {"public_addresses": ["1.2.3.4", "1001::1002"]}, "version": {"1.2.3.4": 4, "1001::1002": 6}}, ) @ddt.unpack def test_create_exports(self, server, version): result = self._helper.create_exports(server, self.share_name) expected_export_locations = [] path = os.path.join(CONF.share_mount_path, self.share_name) service_address = server.get("admin_ip", server.get("ip")) version_copy = version def convert_address(address, version): if version == 4: return address return "[%s]" % address if 'public_addresses' in server: pairs = list(map(lambda addr: (addr, False), server['public_addresses'])) else: pairs = [(server['public_address'], False)] service_address = server.get("admin_ip", server.get("ip")) if service_address: pairs.append((service_address, True)) for ip, is_admin in pairs: if isinstance(version_copy, dict): version = version_copy.get(ip) expected_export_locations.append({ "path": "%s:%s" % (convert_address(ip, version), path), "is_admin_only": is_admin, "metadata": { "export_location_metadata_example": "example", }, }) self.assertEqual(expected_export_locations, result) @ddt.data(const.ACCESS_LEVEL_RW, const.ACCESS_LEVEL_RO) def test_update_access(self, access_level): expected_mount_options = '%s,no_subtree_check,no_root_squash' self.mock_object(self._helper, '_sync_nfs_temp_and_perm_files') local_path = os.path.join(CONF.share_mount_path, self.share_name) exec_result = ' '.join([local_path, '2.2.2.3']) self.mock_object(self._helper, '_ssh_exec', mock.Mock(return_value=(exec_result, ''))) access_rules = [ test_generic.get_fake_access_rule('1.1.1.1', access_level), test_generic.get_fake_access_rule('2.2.2.2', access_level), test_generic.get_fake_access_rule('2.2.2.3', access_level)] add_rules = [ test_generic.get_fake_access_rule('2.2.2.2', access_level), test_generic.get_fake_access_rule('2.2.2.3', access_level), test_generic.get_fake_access_rule('5.5.5.0/24', access_level)] delete_rules = [ test_generic.get_fake_access_rule('3.3.3.3', access_level), test_generic.get_fake_access_rule('4.4.4.4', access_level, 'user'), test_generic.get_fake_access_rule('0.0.0.0/0', access_level)] self._helper.update_access(self.server, self.share_name, access_rules, add_rules=add_rules, delete_rules=delete_rules) local_path = os.path.join(CONF.share_mount_path, self.share_name) self._helper._ssh_exec.assert_has_calls([ mock.call(self.server, ['sudo', 'exportfs']), mock.call(self.server, ['sudo', 'exportfs', '-u', ':'.join(['3.3.3.3', local_path])]), mock.call(self.server, ['sudo', 'exportfs', '-u', ':'.join(['*', local_path])]), mock.call(self.server, ['sudo', 'exportfs', '-o', expected_mount_options % access_level, ':'.join(['2.2.2.2', local_path])]), mock.call(self.server, ['sudo', 'exportfs', '-o', expected_mount_options % access_level, ':'.join(['5.5.5.0/24', local_path])]), ]) self._helper._sync_nfs_temp_and_perm_files.assert_has_calls([ mock.call(self.server), mock.call(self.server)]) @ddt.data({'access': '10.0.0.1', 'result': '10.0.0.1'}, {'access': '10.0.0.1/32', 'result': '10.0.0.1'}, {'access': '10.0.0.0/24', 'result': '10.0.0.0/24'}, {'access': '1001::1001', 'result': '[1001::1001]'}, {'access': '1001::1000/128', 'result': '[1001::1000]'}, {'access': '1001::1000/124', 'result': '[1001::1000]/124'}) @ddt.unpack def test__get_parsed_address_or_cidr(self, access, result): self.assertEqual(result, self._helper._get_parsed_address_or_cidr(access)) @ddt.data('10.0.0.265', '10.0.0.1/33', '1001::10069', '1001::1000/129') def test__get_parsed_address_or_cidr_with_invalid_access(self, access): self.assertRaises(ValueError, self._helper._get_parsed_address_or_cidr, access) def test_update_access_invalid_type(self): access_rules = [test_generic.get_fake_access_rule( '2.2.2.2', const.ACCESS_LEVEL_RW, access_type='fake'), ] self.assertRaises( exception.InvalidShareAccess, self._helper.update_access, self.server, self.share_name, access_rules, [], []) def test_update_access_invalid_level(self): access_rules = [test_generic.get_fake_access_rule( '2.2.2.2', 'fake_level', access_type='ip'), ] self.assertRaises( exception.InvalidShareAccessLevel, self._helper.update_access, self.server, self.share_name, access_rules, [], []) @ddt.data({'access_to': 'lala', 'access_type': 'user'}, {'access_to': '203.0.113.29'}, {'access_to': '2001:0DB8:7d18:c63e:5f0a:871f:83b8:d244', 'access_level': 'ro'}) @ddt.unpack def test_update_access_delete_invalid_rule( self, access_to, access_level='rw', access_type='ip'): mount_path = '%s:/shares/%s' % (access_to, self.share_name) if access_type == 'ip': self._helper._get_parsed_address_or_cidr = mock.Mock( return_value=access_to) not_found_msg = ( "exportfs: Could not find '%s' to unexport.\n" % mount_path ) exc = exception.ProcessExecutionError self.mock_object( self._helper, '_ssh_exec', mock.Mock(side_effect=[(0, 0), exc(stderr=not_found_msg)])) delete_rules = [ test_generic.get_fake_access_rule(access_to, access_level, access_type), ] self.mock_object(self._helper, '_sync_nfs_temp_and_perm_files') self._helper.update_access(self.server, self.share_name, [], [], delete_rules) if access_type == 'ip': self._helper._ssh_exec.assert_has_calls([ mock.call(self.server, ['sudo', 'exportfs']), mock.call(self.server, ['sudo', 'exportfs', '-u', mount_path])]) self._helper._sync_nfs_temp_and_perm_files.assert_called_with( self.server) def test_get_host_list(self): fake_exportfs = ('/shares/share-1\n\t\t20.0.0.3\n' '/shares/share-1\n\t\t20.0.0.6\n' '/shares/share-1\n\t\t\n' '/shares/share-2\n\t\t10.0.0.2\n' '/shares/share-2\n\t\t10.0.0.5\n' '/shares/share-3\n\t\t30.0.0.4\n' '/shares/share-3\n\t\t30.0.0.7\n') expected = ['20.0.0.3', '20.0.0.6', '*'] result = self._helper.get_host_list(fake_exportfs, '/shares/share-1') self.assertEqual(expected, result) @ddt.data({"level": const.ACCESS_LEVEL_RW, "ip": "1.1.1.1", "expected": "1.1.1.1"}, {"level": const.ACCESS_LEVEL_RO, "ip": "1.1.1.1", "expected": "1.1.1.1"}, {"level": const.ACCESS_LEVEL_RW, "ip": "fd12:abcd::10", "expected": "[fd12:abcd::10]"}, {"level": const.ACCESS_LEVEL_RO, "ip": "fd12:abcd::10", "expected": "[fd12:abcd::10]"}) @ddt.unpack def test_update_access_recovery_mode(self, level, ip, expected): expected_mount_options = '%s,no_subtree_check,no_root_squash' access_rules = [test_generic.get_fake_access_rule( ip, level), ] self.mock_object(self._helper, '_sync_nfs_temp_and_perm_files') self.mock_object(self._helper, 'get_host_list', mock.Mock(return_value=[ip])) self._helper.update_access(self.server, self.share_name, access_rules, [], []) local_path = os.path.join(CONF.share_mount_path, self.share_name) self._ssh_exec.assert_has_calls([ mock.call(self.server, ['sudo', 'exportfs']), mock.call( self.server, ['sudo', 'exportfs', '-u', ':'.join([expected, local_path])]), mock.call(self.server, ['sudo', 'exportfs', '-o', expected_mount_options % level, ':'.join([expected, local_path])]), ]) self._helper._sync_nfs_temp_and_perm_files.assert_called_with( self.server) def test_sync_nfs_temp_and_perm_files(self): self._helper._sync_nfs_temp_and_perm_files(self.server) self._helper._ssh_exec.assert_has_calls( [mock.call(self.server, mock.ANY) for i in range(1)]) @ddt.data('/foo/bar', '5.6.7.8:/bar/quuz', '5.6.7.9:/foo/quuz', '[1001::1001]:/foo/bar', '[1001::1000]/:124:/foo/bar') def test_get_exports_for_share_single_ip(self, export_location): server = dict(public_address='1.2.3.4') result = self._helper.get_exports_for_share(server, export_location) path = export_location.split(':')[-1] expected_export_locations = [ {"is_admin_only": False, "path": "%s:%s" % (server["public_address"], path), "metadata": {"export_location_metadata_example": "example"}} ] self.assertEqual(expected_export_locations, result) @ddt.data('/foo/bar', '5.6.7.8:/bar/quuz', '5.6.7.9:/foo/quuz') def test_get_exports_for_share_multi_ip(self, export_location): server = dict(public_addresses=['1.2.3.4', '1.2.3.5']) result = self._helper.get_exports_for_share(server, export_location) path = export_location.split(':')[-1] expected_export_locations = list(map( lambda addr: { "is_admin_only": False, "path": "%s:%s" % (addr, path), "metadata": {"export_location_metadata_example": "example"} }, server['public_addresses']) ) self.assertEqual(expected_export_locations, result) @ddt.data( {'public_address_with_suffix': 'foo'}, {'with_prefix_public_address': 'bar'}, {'with_prefix_public_address_and_with_suffix': 'quuz'}, {}) def test_get_exports_for_share_with_error(self, server): export_location = '1.2.3.4:/foo/bar' self.assertRaises( exception.ManilaException, self._helper.get_exports_for_share, server, export_location) @ddt.data('/foo/bar', '5.6.7.8:/foo/bar', '5.6.7.88:fake:/foo/bar', '[1001::1002]:/foo/bar', '[1001::1000]/124:/foo/bar') def test_get_share_path_by_export_location(self, export_location): result = self._helper.get_share_path_by_export_location( dict(), export_location) self.assertEqual('/foo/bar', result) @ddt.data( ('/shares/fake_share1\n\t\t1.1.1.10\n' '/shares/fake_share2\n\t\t1.1.1.16\n' '/shares/fake_share3\n\t\t\n' '/mnt/fake_share1 1.1.1.11', False), ('/shares/fake_share_name\n\t\t1.1.1.10\n' '/shares/fake_share_name\n\t\t1.1.1.16\n' '/shares/fake_share_name\n\t\t\n' '/mnt/fake_share1\n\t\t1.1.1.11', True), ('/mnt/fake_share_name\n\t\t1.1.1.11\n' '/shares/fake_share_name\n\t\t1.1.1.10\n' '/shares/fake_share_name\n\t\t1.1.1.16\n' '/shares/fake_share_name\n\t\t\n', True)) @ddt.unpack def test_disable_access_for_maintenance(self, output, hosts_match): fake_maintenance_path = "fake.path" self._helper.configuration.share_mount_path = '/shares' local_path = os.path.join(self._helper.configuration.share_mount_path, self.share_name) def fake_ssh_exec(*args, **kwargs): if 'exportfs' in args[1] and '-u' not in args[1]: return output, '' else: return '', '' self.mock_object(self._helper, '_ssh_exec', mock.Mock(side_effect=fake_ssh_exec)) self.mock_object(self._helper, '_sync_nfs_temp_and_perm_files') self.mock_object(self._helper, '_get_maintenance_file_path', mock.Mock(return_value=fake_maintenance_path)) self._helper.disable_access_for_maintenance( self.server, self.share_name) self._helper._ssh_exec.assert_any_call( self.server, ['cat', const.NFS_EXPORTS_FILE, '|', 'grep', self.share_name, '|', 'sudo', 'tee', fake_maintenance_path] ) self._helper._ssh_exec.assert_has_calls([ mock.call(self.server, ['sudo', 'exportfs']), ]) if hosts_match: self._helper._ssh_exec.assert_has_calls([ mock.call(self.server, ['sudo', 'exportfs', '-u', '"{}"'.format(':'.join(['1.1.1.10', local_path]))]), mock.call(self.server, ['sudo', 'exportfs', '-u', '"{}"'.format(':'.join(['1.1.1.16', local_path]))]), mock.call(self.server, ['sudo', 'exportfs', '-u', '"{}"'.format(':'.join(['*', local_path]))]), ]) self._helper._sync_nfs_temp_and_perm_files.assert_called_once_with( self.server ) def test_restore_access_after_maintenance(self): fake_maintenance_path = "fake.path" self.mock_object(self._helper, '_get_maintenance_file_path', mock.Mock(return_value=fake_maintenance_path)) self.mock_object(self._helper, '_ssh_exec') self._helper.restore_access_after_maintenance( self.server, self.share_name) self._helper._ssh_exec.assert_called_once_with( self.server, ['cat', fake_maintenance_path, '|', 'sudo', 'tee', '-a', const.NFS_EXPORTS_FILE, '&&', 'sudo', 'exportfs', '-r', '&&', 'sudo', 'rm', '-f', fake_maintenance_path] ) @ddt.ddt class CIFSHelperIPAccessTestCase(test.TestCase): """Test case for CIFS helper with IP access.""" def setUp(self): super(CIFSHelperIPAccessTestCase, self).setUp() self.server_details = {'instance_id': 'fake', 'public_address': '1.2.3.4', } self.share_name = 'fake_share_name' self.fake_conf = manila.share.configuration.Configuration(None) self._ssh_exec = mock.Mock(return_value=('', '')) self._execute = mock.Mock(return_value=('', '')) self._helper = helpers.CIFSHelperIPAccess(self._execute, self._ssh_exec, self.fake_conf) self.access = dict( access_level=const.ACCESS_LEVEL_RW, access_type='ip', access_to='1.1.1.1') def test_init_helper(self): self._helper.init_helper(self.server_details) self._helper._ssh_exec.assert_called_once_with( self.server_details, ['sudo', 'net', 'conf', 'list'], ) def test_create_export_share_does_not_exist(self): def fake_ssh_exec(*args, **kwargs): if 'showshare' in args[1]: raise exception.ProcessExecutionError() else: return '', '' self.mock_object(self._helper, '_ssh_exec', mock.Mock(side_effect=fake_ssh_exec)) ret = self._helper.create_exports(self.server_details, self.share_name) expected_location = [{ "is_admin_only": False, "path": "\\\\%s\\%s" % ( self.server_details['public_address'], self.share_name), "metadata": {"export_location_metadata_example": "example"} }] self.assertEqual(expected_location, ret) share_path = os.path.join( self._helper.configuration.share_mount_path, self.share_name) self._helper._ssh_exec.assert_has_calls([ mock.call( self.server_details, ['sudo', 'net', 'conf', 'showshare', self.share_name, ] ), mock.call( self.server_details, [ 'sudo', 'net', 'conf', 'addshare', self.share_name, share_path, 'writeable=y', 'guest_ok=y', ] ), mock.call(self.server_details, mock.ANY), ]) def test_create_export_share_does_not_exist_exception(self): self.mock_object(self._helper, '_ssh_exec', mock.Mock( side_effect=[exception.ProcessExecutionError(), Exception('')] )) self.assertRaises( exception.ManilaException, self._helper.create_exports, self.server_details, self.share_name) def test_create_exports_share_exist_recreate_true(self): ret = self._helper.create_exports( self.server_details, self.share_name, recreate=True) expected_location = [{ "is_admin_only": False, "path": "\\\\%s\\%s" % ( self.server_details['public_address'], self.share_name), "metadata": {"export_location_metadata_example": "example"} }] self.assertEqual(expected_location, ret) share_path = os.path.join( self._helper.configuration.share_mount_path, self.share_name) self._helper._ssh_exec.assert_has_calls([ mock.call( self.server_details, ['sudo', 'net', 'conf', 'showshare', self.share_name, ] ), mock.call( self.server_details, ['sudo', 'net', 'conf', 'delshare', self.share_name, ] ), mock.call( self.server_details, [ 'sudo', 'net', 'conf', 'addshare', self.share_name, share_path, 'writeable=y', 'guest_ok=y', ] ), mock.call(self.server_details, mock.ANY), ]) def test_create_export_share_exist_recreate_false(self): self.assertRaises( exception.ShareBackendException, self._helper.create_exports, self.server_details, self.share_name, recreate=False, ) self._helper._ssh_exec.assert_has_calls([ mock.call( self.server_details, ['sudo', 'net', 'conf', 'showshare', self.share_name, ] ), ]) def test_remove_exports(self): self._helper.remove_exports(self.server_details, self.share_name) self._helper._ssh_exec.assert_called_once_with( self.server_details, ['sudo', 'net', 'conf', 'delshare', self.share_name], ) def test_remove_export_forcibly(self): delshare_command = ['sudo', 'net', 'conf', 'delshare', self.share_name] def fake_ssh_exec(*args, **kwargs): if delshare_command == args[1]: raise exception.ProcessExecutionError() else: return ('', '') self.mock_object(self._helper, '_ssh_exec', mock.Mock(side_effect=fake_ssh_exec)) self._helper.remove_exports(self.server_details, self.share_name) self._helper._ssh_exec.assert_has_calls([ mock.call( self.server_details, ['sudo', 'net', 'conf', 'delshare', self.share_name], ), mock.call( self.server_details, ['sudo', 'smbcontrol', 'all', 'close-share', self.share_name], ), ]) def test_update_access_wrong_access_level(self): access_rules = [test_generic.get_fake_access_rule( '2.2.2.2', const.ACCESS_LEVEL_RO), ] self.assertRaises( exception.InvalidShareAccessLevel, self._helper.update_access, self.server_details, self.share_name, access_rules, [], []) def test_update_access_wrong_access_type(self): access_rules = [test_generic.get_fake_access_rule( '2.2.2.2', const.ACCESS_LEVEL_RW, access_type='fake'), ] self.assertRaises( exception.InvalidShareAccess, self._helper.update_access, self.server_details, self.share_name, access_rules, [], []) def test_update_access(self): access_rules = [test_generic.get_fake_access_rule( '1.1.1.1', const.ACCESS_LEVEL_RW), ] self._helper.update_access(self.server_details, self.share_name, access_rules, [], []) self._helper._ssh_exec.assert_called_once_with( self.server_details, ['sudo', 'net', 'conf', 'setparm', self.share_name, 'hosts allow', '1.1.1.1']) def test_get_allow_hosts(self): self.mock_object(self._helper, '_ssh_exec', mock.Mock( return_value=('1.1.1.1 2.2.2.2 3.3.3.3', ''))) expected = ['1.1.1.1', '2.2.2.2', '3.3.3.3'] result = self._helper._get_allow_hosts( self.server_details, self.share_name) self.assertEqual(expected, result) cmd = ['sudo', 'net', 'conf', 'getparm', self.share_name, 'hosts allow'] self._helper._ssh_exec.assert_called_once_with( self.server_details, cmd) @ddt.data( '', '1.2.3.4:/nfs/like/export', '/1.2.3.4/foo', '\\1.2.3.4\\foo', '//1.2.3.4\\mixed_slashes_and_backslashes_one', '\\\\1.2.3.4/mixed_slashes_and_backslashes_two') def test__get_share_group_name_from_export_location(self, export_location): self.assertRaises( exception.InvalidShare, self._helper._get_share_group_name_from_export_location, export_location) @ddt.data('//5.6.7.8/foo', '\\\\5.6.7.8\\foo') def test_get_exports_for_share(self, export_location): server = dict(public_address='1.2.3.4') self.mock_object( self._helper, '_get_share_group_name_from_export_location', mock.Mock(side_effect=( self._helper._get_share_group_name_from_export_location))) result = self._helper.get_exports_for_share(server, export_location) expected_export_location = [{ "is_admin_only": False, "path": "\\\\%s\\foo" % server['public_address'], "metadata": {"export_location_metadata_example": "example"} }] self.assertEqual(expected_export_location, result) (self._helper._get_share_group_name_from_export_location. assert_called_once_with(export_location)) @ddt.data( {'public_address_with_suffix': 'foo'}, {'with_prefix_public_address': 'bar'}, {'with_prefix_public_address_and_with_suffix': 'quuz'}, {}) def test_get_exports_for_share_with_exception(self, server): export_location = '1.2.3.4:/foo/bar' self.assertRaises( exception.ManilaException, self._helper.get_exports_for_share, server, export_location) @ddt.data('//5.6.7.8/foo', '\\\\5.6.7.8\\foo') def test_get_share_path_by_export_location(self, export_location): fake_path = ' /bar/quuz\n ' fake_server = dict() self.mock_object( self._helper, '_ssh_exec', mock.Mock(return_value=(fake_path, 'fake'))) self.mock_object( self._helper, '_get_share_group_name_from_export_location', mock.Mock(side_effect=( self._helper._get_share_group_name_from_export_location))) result = self._helper.get_share_path_by_export_location( fake_server, export_location) self.assertEqual('/bar/quuz', result) self._helper._ssh_exec.assert_called_once_with( fake_server, ['sudo', 'net', 'conf', 'getparm', 'foo', 'path']) (self._helper._get_share_group_name_from_export_location. assert_called_once_with(export_location)) def test_disable_access_for_maintenance(self): allowed_hosts = ['test', 'test2'] maintenance_path = os.path.join( self._helper.configuration.share_mount_path, "%s.maintenance" % self.share_name) self.mock_object(self._helper, '_set_allow_hosts') self.mock_object(self._helper, '_get_allow_hosts', mock.Mock(return_value=allowed_hosts)) self._helper.disable_access_for_maintenance( self.server_details, self.share_name) self._helper._get_allow_hosts.assert_called_once_with( self.server_details, self.share_name) self._helper._set_allow_hosts.assert_called_once_with( self.server_details, [], self.share_name) kickoff_user_cmd = ['sudo', 'smbstatus', '-S'] self._helper._ssh_exec.assert_any_call( self.server_details, kickoff_user_cmd) valid_cmd = ['echo', "'test test2'", '|', 'sudo', 'tee', maintenance_path] self._helper._ssh_exec.assert_any_call( self.server_details, valid_cmd) def test__kick_out_users_success(self): smbstatus_return = """Service pid machine Connected at ------------------------------------------------------- fake_share_name 1001 fake_machine1 Thu Sep 14 14:59:07 2017 fake_share_name 1002 fake_machine2 Thu Sep 14 14:59:07 2017 """ self.mock_object(self._helper, '_ssh_exec', mock.Mock( side_effect=[(smbstatus_return, "fake_stderr"), ("fake", "fake")])) self._helper._kick_out_users(self.server_details, self.share_name) self._helper._ssh_exec.assert_any_call( self.server_details, ['sudo', 'smbstatus', '-S']) self._helper._ssh_exec.assert_any_call( self.server_details, ["sudo", "kill", "-15", "1001", "1002"]) def test__kick_out_users_failed(self): smbstatus_return = """Service pid machine Connected at ------------------------------------------------------- fake line """ self.mock_object(self._helper, '_ssh_exec', mock.Mock( return_value=(smbstatus_return, "fake_stderr"))) self.assertRaises(exception.ShareBackendException, self._helper._kick_out_users, self.server_details, self.share_name) def test_restore_access_after_maintenance(self): fake_maintenance_path = "test.path" self.mock_object(self._helper, '_set_allow_hosts') self.mock_object(self._helper, '_get_maintenance_file_path', mock.Mock(return_value=fake_maintenance_path)) self.mock_object(self._helper, '_ssh_exec', mock.Mock(side_effect=[("fake fake2", 0), "fake"])) self._helper.restore_access_after_maintenance( self.server_details, self.share_name) self._helper._set_allow_hosts.assert_called_once_with( self.server_details, ['fake', 'fake2'], self.share_name) self._helper._ssh_exec.assert_any_call( self.server_details, ['cat', fake_maintenance_path]) self._helper._ssh_exec.assert_any_call( self.server_details, ['sudo', 'rm', '-f', fake_maintenance_path]) @ddt.ddt class CIFSHelperUserAccessTestCase(test.TestCase): """Test case for CIFS helper with user access.""" access_rw = dict( access_level=const.ACCESS_LEVEL_RW, access_type='user', access_to='manila-user') access_ro = dict( access_level=const.ACCESS_LEVEL_RO, access_type='user', access_to='manila-user') def setUp(self): super(CIFSHelperUserAccessTestCase, self).setUp() self.server_details = {'instance_id': 'fake', 'public_address': '1.2.3.4', } self.share_name = 'fake_share_name' self.fake_conf = manila.share.configuration.Configuration(None) self._ssh_exec = mock.Mock(return_value=('', '')) self._execute = mock.Mock(return_value=('', '')) self._helper = helpers.CIFSHelperUserAccess( self._execute, self._ssh_exec, self.fake_conf) def test_update_access_exception_type(self): access_rules = [test_generic.get_fake_access_rule( 'user1', const.ACCESS_LEVEL_RW, access_type='ip')] self.assertRaises(exception.InvalidShareAccess, self._helper.update_access, self.server_details, self.share_name, access_rules, [], []) def test_update_access(self): access_list = [test_generic.get_fake_access_rule( 'user1', const.ACCESS_LEVEL_RW, access_type='user'), test_generic.get_fake_access_rule( 'user2', const.ACCESS_LEVEL_RO, access_type='user')] self._helper.update_access(self.server_details, self.share_name, access_list, [], []) self._helper._ssh_exec.assert_has_calls([ mock.call(self.server_details, ['sudo', 'net', 'conf', 'setparm', self.share_name, 'valid users', 'user1']), mock.call(self.server_details, ['sudo', 'net', 'conf', 'setparm', self.share_name, 'read list', 'user2']) ]) def test_update_access_exception_level(self): access_rules = [test_generic.get_fake_access_rule( 'user1', 'fake_level', access_type='user'), ] self.assertRaises( exception.InvalidShareAccessLevel, self._helper.update_access, self.server_details, self.share_name, access_rules, [], []) @ddt.ddt class NFSSynchronizedTestCase(test.TestCase): @helpers.nfs_synchronized def wrapped_method(self, server, share_name): return server['instance_id'] + share_name @ddt.data( ({'lock_name': 'FOO', 'instance_id': 'QUUZ'}, 'nfs-FOO'), ({'instance_id': 'QUUZ'}, 'nfs-QUUZ'), ) @ddt.unpack def test_with_lock_name(self, server, expected_lock_name): share_name = 'fake_share_name' self.mock_object( helpers.utils, 'synchronized', mock.Mock(side_effect=helpers.utils.synchronized)) result = self.wrapped_method(server, share_name) self.assertEqual(server['instance_id'] + share_name, result) helpers.utils.synchronized.assert_called_once_with( expected_lock_name, external=True) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/test_lvm.py0000664000175000017500000010535100000000000022455 0ustar00zuulzuul00000000000000# Copyright 2012 NetApp # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Unit tests for the LVM driver module.""" import os from unittest import mock import ddt from oslo_concurrency import processutils from oslo_config import cfg from oslo_utils import timeutils from manila.common import constants as const from manila import context from manila import exception from manila.privsep import common as privsep_common from manila.privsep import filesystem from manila.privsep import lvm as privsep_lvm from manila.privsep import os as os_routines from manila.share import configuration from manila.share.drivers import lvm from manila import test from manila.tests.db import fakes as db_fakes from manila.tests import fake_utils from manila.tests.share.drivers import test_generic CONF = cfg.CONF def fake_share(**kwargs): share = { 'id': 'fakeid', 'name': 'fakename', 'size': 1, 'share_proto': 'NFS', 'export_location': '127.0.0.1:/mnt/nfs/volume-00002', } share.update(kwargs) return db_fakes.FakeModel(share) def fake_snapshot(**kwargs): snapshot = { 'id': 'fakesnapshotid', 'share_name': 'fakename', 'share_id': 'fakeid', 'name': 'fakesnapshotname', 'share_proto': 'NFS', 'export_location': '127.0.0.1:/mnt/nfs/volume-00002', 'share': { 'id': 'fakeid', 'name': 'fakename', 'size': 1, 'share_proto': 'NFS', }, } snapshot.update(kwargs) return db_fakes.FakeModel(snapshot) def fake_access(**kwargs): access = { 'id': 'fakeaccid', 'access_type': 'ip', 'access_to': '10.0.0.2', 'access_level': 'rw', 'state': 'active', } access.update(kwargs) return db_fakes.FakeModel(access) @ddt.ddt class LVMShareDriverTestCase(test.TestCase): """Tests LVMShareDriver.""" def setUp(self): super(LVMShareDriverTestCase, self).setUp() fake_utils.stub_out_utils_execute(self) self._context = context.get_admin_context() CONF.set_default('lvm_share_volume_group', 'fakevg') CONF.set_default('lvm_share_export_ips', ['10.0.0.1', '10.0.0.2']) CONF.set_default('driver_handles_share_servers', False) CONF.set_default('reserved_share_percentage', 50) CONF.set_default('reserved_share_from_snapshot_percentage', 30) CONF.set_default('reserved_share_extend_percentage', 30) self._helper_cifs = mock.Mock() self._helper_nfs = mock.Mock() self.fake_conf = configuration.Configuration(None) self._db = mock.Mock() self._os = lvm.os = mock.Mock() self._os.path.join = os.path.join self._driver = lvm.LVMShareDriver(self._db, configuration=self.fake_conf) self._driver._helpers = { 'CIFS': self._helper_cifs, 'NFS': self._helper_nfs, } self.share = fake_share() self.access = fake_access() self.snapshot = fake_snapshot() self.server = { 'public_addresses': self.fake_conf.lvm_share_export_ips, 'instance_id': 'LVM', 'lock_name': 'manila_lvm', } # Used only to test compatibility with share manager self.share_server = "fake_share_server" def tearDown(self): super(LVMShareDriverTestCase, self).tearDown() fake_utils.fake_execute_set_repliers([]) fake_utils.fake_execute_clear_log() def test_do_setup(self): CONF.set_default('lvm_share_helpers', ['NFS=fakenfs']) lvm.importutils = mock.Mock() lvm.importutils.import_class.return_value = self._helper_nfs self._driver.do_setup(self._context) lvm.importutils.import_class.assert_has_calls([ mock.call('fakenfs') ]) def test_check_for_setup_error(self): out, err = '\n fake1\n fakevg\n fake2\n', '' self.mock_object(privsep_lvm, 'list_vgs_get_name', mock.Mock(return_value=(out, err))) self._driver.check_for_setup_error() privsep_lvm.list_vgs_get_name.assert_called_once() def test_check_for_setup_error_no_vg(self): out = '\n fake0\n fake1\n fake2\n' err = '' self.mock_object(privsep_lvm, 'list_vgs_get_name', mock.Mock(return_value=(out, err))) self.assertRaises(exception.InvalidParameterValue, self._driver.check_for_setup_error) def test_check_for_setup_error_no_export_ips(self): out = '\n fake1\n fakevg\n fake2\n' err = '' self.mock_object(privsep_lvm, 'list_vgs_get_name', mock.Mock(return_value=(out, err))) CONF.set_default('lvm_share_export_ips', None) self.assertRaises(exception.InvalidParameterValue, self._driver.check_for_setup_error) def test_local_path_normal(self): share = fake_share(name='fake_sharename') CONF.set_default('lvm_share_volume_group', 'fake_vg') ret = self._driver._get_local_path(share) self.assertEqual('/dev/mapper/fake_vg-fake_sharename', ret) def test_local_path_escapes(self): share = fake_share(name='fake-sharename') CONF.set_default('lvm_share_volume_group', 'fake-vg') ret = self._driver._get_local_path(share) self.assertEqual('/dev/mapper/fake--vg-fake--sharename', ret) def test_create_share(self): CONF.set_default('lvm_share_mirrors', 0) self._driver._mount_device = mock.Mock() lv_create_mock = privsep_lvm.lvcreate = mock.Mock() lv_create_args = [ self.share['size'], self.share['name'], CONF.lvm_share_volume_group, 0, 0] self.mock_object(privsep_common, 'execute_with_retries') self.mock_object(filesystem, 'make_filesystem') self.mock_object(self._driver, '_get_mount_point_name', mock.Mock(return_value=self.share['name'])) ret = self._driver.create_share(self._context, self.share, self.share_server) self._driver._mount_device.assert_called_with( self.share, '/dev/mapper/fakevg-fakename') privsep_common.execute_with_retries.assert_called_once_with( lv_create_mock, lv_create_args, CONF.num_shell_tries) filesystem.make_filesystem.assert_called_once_with( 'ext4', '/dev/mapper/fakevg-fakename') self.assertEqual(self._helper_nfs.create_exports.return_value, ret) def test_create_share_from_snapshot(self): CONF.set_default('lvm_share_mirrors', 0) self._driver._mount_device = mock.Mock() snapshot_instance = { 'snapshot_id': 'fakesnapshotid', 'name': 'fakename' } mount_share = '/dev/mapper/fakevg-fakename' mount_snapshot = '/dev/mapper/fakevg-fakename' self._helper_nfs.create_export.return_value = 'fakelocation' lv_create_mock = privsep_lvm.lvcreate = mock.Mock() lv_create_args = [ self.share['size'], self.share['name'], CONF.lvm_share_volume_group, 0, 0] self.mock_object(privsep_common, 'execute_with_retries') self.mock_object(os_routines, 'is_data_definition_direct_io_supported', mock.Mock(return_value=True)) self.mock_object(os_routines, 'data_definition') self.mock_object(os_routines, 'mount') self.mock_object(os_routines, 'chmod') self.mock_object(filesystem, 'make_filesystem') self.mock_object(filesystem, 'e2fsck') self.mock_object(filesystem, 'tune2fs') self.mock_object(self._driver, '_get_mount_point_name', mock.Mock(return_value=self.share['name'])) self._driver.create_share_from_snapshot(self._context, self.share, snapshot_instance, self.share_server) self._driver._mount_device.assert_called_with(self.share, mount_snapshot) privsep_common.execute_with_retries.assert_called_once_with( lv_create_mock, lv_create_args, 3) filesystem.make_filesystem.assert_called_once_with( 'ext4', '/dev/mapper/fakevg-fakename') filesystem.e2fsck.assert_called_once_with( mount_share) filesystem.tune2fs.assert_called_once_with( mount_share) (os_routines.is_data_definition_direct_io_supported .assert_called_once_with( mount_snapshot, mount_share)) os_routines.data_definition.assert_called_once_with( mount_snapshot, mount_share, (self.share['size'] * 1024), use_direct_io=True) def test_create_share_mirrors(self): share = fake_share(size='2048') CONF.set_default('lvm_share_mirrors', 2) lv_create_mock = privsep_lvm.lvcreate = mock.Mock() lv_create_args = [ '2048', self.share['name'], CONF.lvm_share_volume_group, 2, '2'] self._driver._mount_device = mock.Mock() self.mock_object(privsep_common, 'execute_with_retries') self.mock_object(filesystem, 'make_filesystem') self.mock_object(self._driver, '_get_mount_point_name', mock.Mock(return_value=self.share['name'])) ret = self._driver.create_share(self._context, share, self.share_server) self._driver._mount_device.assert_called_with( share, '/dev/mapper/fakevg-fakename') privsep_common.execute_with_retries.assert_called_once_with( lv_create_mock, lv_create_args, 3) filesystem.make_filesystem.assert_called_once_with( 'ext4', '/dev/mapper/fakevg-fakename') self.assertEqual(self._helper_nfs.create_exports.return_value, ret) def test_deallocate_container(self): mock_lvremove = privsep_lvm.lvremove = mock.Mock() self.mock_object(privsep_common, 'execute_with_retries') self._driver._deallocate_container(self.share['name']) privsep_common.execute_with_retries.assert_called_once_with( mock_lvremove, [CONF.lvm_share_volume_group, self.share['name']], 3 ) def test_deallocate_container_error(self): def _fake_exec(*args, **kwargs): raise exception.ProcessExecutionError(stderr="error") self.mock_object(privsep_common, 'execute_with_retries', _fake_exec) self.assertRaises(exception.ProcessExecutionError, self._driver._deallocate_container, self.share['name']) @ddt.data( 'Logical volume "fake/fake-volume" not found\n', 'Failed to find logical volume "fake/fake-volume"\n') def test_deallocate_container_not_found_error(self, error_msg): def _fake_exec(*args, **kwargs): raise exception.ProcessExecutionError(stderr=error_msg) self.mock_object(privsep_common, 'execute_with_retries', _fake_exec) self._driver._deallocate_container(self.share['name']) @mock.patch.object(lvm.LVMShareDriver, '_update_share_stats', mock.Mock()) def test_get_share_stats(self): with mock.patch.object(self._driver, '_stats', mock.Mock) as stats: self.assertEqual(stats, self._driver.get_share_stats()) self.assertFalse(self._driver._update_share_stats.called) @mock.patch.object(lvm.LVMShareDriver, '_update_share_stats', mock.Mock()) def test_get_share_stats_refresh(self): with mock.patch.object(self._driver, '_stats', mock.Mock) as stats: self.assertEqual(stats, self._driver.get_share_stats(refresh=True)) self._driver._update_share_stats.assert_called_once_with() def test__unmount_device_not_mounted(self): mount_path = self._get_mount_path(self.share) error_msg = ( "umount: /opt/stack/data/manila/mnt/share-fake-share: not " "mounted.\n") umount_exception = exception.ProcessExecutionError(stderr=error_msg) self.mock_object( os_routines, 'umount', mock.Mock(side_effect=umount_exception)) self.mock_object(os_routines, 'rmdir') self._os.path.exists.return_value = True self._driver._unmount_device(self.share, raise_if_missing=False) self._os.path.exists.assert_called_with(mount_path) os_routines.umount.assert_called_once_with(mount_path) def test__unmount_device_is_busy_error(self): error_msg = 'device is busy' umount_exception = exception.ProcessExecutionError(stderr=error_msg) self.mock_object( os_routines, 'umount', mock.Mock(side_effect=umount_exception)) self._os.path.exists.return_value = True mount_path = self._get_mount_path(self.share) self.assertRaises(exception.ShareBusyException, self._driver._unmount_device, self.share) os_routines.umount.assert_called_once_with(mount_path) def test__unmount_device_error(self): error_msg = 'fake error' mount_path = self._get_mount_path(self.share) umount_exception = exception.ProcessExecutionError(stderr=error_msg) self.mock_object( os_routines, 'umount', mock.Mock(side_effect=umount_exception)) self._os.path.exists.return_value = True self.assertRaises(processutils.ProcessExecutionError, self._driver._unmount_device, self.share) self._os.path.exists.assert_called_with(mount_path) os_routines.umount.assert_called_once_with(mount_path) def test__unmount_device_rmdir_error(self): error_msg = 'fake error' mount_path = self._get_mount_path(self.share) umount_exception = exception.ProcessExecutionError(stderr=error_msg) self.mock_object(os_routines, 'umount') self.mock_object(os_routines, 'rmdir', mock.Mock(side_effect=umount_exception)) self._os.path.exists.return_value = True self.assertRaises(exception.ShareBackendException, self._driver._unmount_device, self.share) self._os.path.exists.assert_called_with(mount_path) os_routines.umount.assert_called_once_with(mount_path) os_routines.rmdir.assert_called_once_with(mount_path) def test_create_snapshot(self): mock_lv_create = privsep_lvm.lvcreate = mock.Mock() orig_lv_name = "%s/%s" % (CONF.lvm_share_volume_group, self.snapshot['share_name']) device_path = '/dev/mapper/fakevg-%s' % self.snapshot['name'] lv_create_args = [ self.snapshot['share']['size'], self.snapshot['share']['name'], orig_lv_name] self.mock_object(privsep_common, 'execute_with_retries') self.mock_object(filesystem, 'e2fsck') self.mock_object(filesystem, 'tune2fs') self.mock_object(os_routines, 'mount') self.mock_object(os_routines, 'chmod') self._driver.create_snapshot(self._context, self.snapshot, self.share_server) mount_path = self._get_mount_path(self.snapshot) expected_exec = [ "mkdir -p " + mount_path, ] self.assertEqual(expected_exec, fake_utils.fake_execute_get_log()) privsep_common.execute_with_retries( mock_lv_create, lv_create_args, CONF.num_shell_tries) filesystem.e2fsck.assert_called_once_with(device_path) filesystem.tune2fs.assert_called_once_with(device_path) os_routines.mount.assert_called_once_with( "/dev/mapper/fakevg-fakesnapshotname", mount_path) os_routines.chmod.assert_called_once_with( '777', mount_path) def test_ensure_share(self): device_name = '/dev/mapper/fakevg-fakename' self.mock_object(self._driver, '_get_mount_point_name', mock.Mock(return_value=self.share['name'])) with mock.patch.object(self._driver, '_mount_device', mock.Mock(return_value='fake_location')): self._driver.ensure_share(self._context, self.share, self.share_server) self._driver._mount_device.assert_called_with(self.share, device_name) self._helper_nfs.create_exports.assert_called_once_with( self.server, self.share['name'], recreate=True) def test_delete_share(self): self.mock_object(self._driver, '_get_mount_point_name', mock.Mock(return_value=self.share['name'])) mount_path = self._get_mount_path(self.share) self._helper_nfs.remove_export(mount_path, self.share['name']) self._driver._delete_share(self._context, self.share) def test_delete_snapshot(self): mount_path = self._get_mount_path(self.snapshot) self.mock_object(os_routines, 'umount') self.mock_object(os_routines, 'rmdir') self.mock_object(privsep_common, 'execute_with_retries') self.mock_object(self._driver, '_deallocate_container') self._driver.delete_snapshot(self._context, self.snapshot, self.share_server) os_routines.umount.assert_called_once_with(mount_path) os_routines.rmdir.assert_called_once_with(mount_path) self._driver._deallocate_container.assert_called_once_with( self.snapshot['name']) def test_delete_share_invalid_share(self): self.mock_object(self._driver, '_unmount_device') self.mock_object(self._driver, '_deallocate_container') self._driver._get_helper = mock.Mock( side_effect=exception.InvalidShare(reason='fake')) self.mock_object(self._driver, '_get_mount_point_name', mock.Mock(return_value=self.share['name'])) self._driver.delete_share(self._context, self.share, self.share_server) self._driver._unmount_device.assert_called_once_with( self.share, raise_if_missing=False, retry_busy_device=True) self._driver._deallocate_container.assert_called_once_with( self.share['name']) def test_delete_share_process_execution_error(self): self.mock_object( self._helper_nfs, 'remove_export', mock.Mock(side_effect=exception.ProcessExecutionError)) self.mock_object(self._driver, '_get_mount_point_name', mock.Mock(return_value=self.share['name'])) self._driver._delete_share(self._context, self.share) self._helper_nfs.remove_exports.assert_called_once_with( self.server, self.share['name']) @ddt.data(const.ACCESS_LEVEL_RW, const.ACCESS_LEVEL_RO) def test_update_access(self, access_level): access_rules = [test_generic.get_fake_access_rule( '1.1.1.1', access_level), ] add_rules = [test_generic.get_fake_access_rule( '2.2.2.2', access_level), ] delete_rules = [test_generic.get_fake_access_rule( '3.3.3.3', access_level), ] self.mock_object(self._driver, '_get_mount_point_name', mock.Mock(return_value=self.share['name'])) self._driver.update_access(self._context, self.share, access_rules, add_rules=add_rules, delete_rules=delete_rules, update_rules=None, share_server=self.server) (self._driver._helpers[self.share['share_proto']]. update_access.assert_called_once_with( self.server, self.share['name'], access_rules, add_rules=add_rules, delete_rules=delete_rules)) @ddt.data((['1001::1001/129'], False), (['1.1.1.256'], False), (['1001::1001'], [6]), ('1.1.1.0', [4]), (['1001::1001', '1.1.1.0'], [6, 4]), (['1001::1001/129', '1.1.1.0'], False)) @ddt.unpack def test_get_configured_ip_versions(self, configured_ips, configured_ip_version): CONF.set_default('lvm_share_export_ips', configured_ips) if configured_ip_version: self.assertEqual(configured_ip_version, self._driver.get_configured_ip_versions()) else: self.assertRaises(exception.InvalidInput, self._driver.get_configured_ip_versions) def test_mount_device(self): mount_path = self._get_mount_path(self.share) self.mock_object(os_routines, 'mount') self.mock_object(os_routines, 'chmod') expected_exec = [ "mkdir -p %s" % (mount_path,), ] device_name = 'fakedevice' ret = self._driver._mount_device(self.share, device_name) self.assertEqual(expected_exec, fake_utils.fake_execute_get_log()) os_routines.mount.assert_called_once_with(device_name, mount_path) os_routines.chmod.assert_called_once_with('777', mount_path) self.assertEqual(mount_path, ret) def test_mount_device_already(self): def exec_runner(*args, **kwargs): if 'mount' in args and '-l' not in args: raise exception.ProcessExecutionError() else: return 'fakedevice', '' self.mock_object(self._driver, '_execute', exec_runner) self.mock_object(os_routines, 'mount') self.mock_object(os_routines, 'chmod') mount_path = self._get_mount_path(self.share) ret = self._driver._mount_device(self.share, 'fakedevice') self.assertEqual(mount_path, ret) def test_mount_device_error(self): self.mock_object(self._driver, '_execute') self.mock_object( os_routines, 'mount', mock.Mock(side_effect=exception.ProcessExecutionError)) self.mock_object( os_routines, 'list_mounts', mock.Mock(return_value=('fake', ''))) self.assertRaises(exception.ProcessExecutionError, self._driver._mount_device, self.share, 'fakedevice') def test_get_helper(self): share_cifs = fake_share(share_proto='CIFS') share_nfs = fake_share(share_proto='NFS') share_fake = fake_share(share_proto='FAKE') self.assertEqual(self._driver._get_helper(share_cifs), self._helper_cifs) self.assertEqual(self._driver._get_helper(share_nfs), self._helper_nfs) self.assertRaises(exception.InvalidShare, self._driver._get_helper, share_fake) def _get_mount_path(self, share): return os.path.join(CONF.lvm_share_export_root, share['name']) @ddt.data(True, False) def test__unmount_device_with_retry_busy_device(self, retry_busy_device): execute_sideeffects = [ exception.ProcessExecutionError(stderr='device is busy'), exception.ProcessExecutionError(stderr='target is busy'), None, None ] if retry_busy_device else [None, None] mount_path = self._get_mount_path(self.share) self._os.path.exists.return_value = True self.mock_object(os_routines, 'umount', mock.Mock( side_effect=execute_sideeffects)) self.mock_object(os_routines, 'rmdir') self._driver._unmount_device(self.share, retry_busy_device=retry_busy_device) num_of_times_umount_is_called = 3 if retry_busy_device else 1 self._os.path.exists.assert_called_with(mount_path) os_routines.umount.assert_has_calls([ mock.call(mount_path)] * num_of_times_umount_is_called) os_routines.rmdir.assert_called_once_with(mount_path) def test_extend_share(self): local_path = self._driver._get_local_path(self.share) self.mock_object(self._driver, '_extend_container') self.mock_object(self._driver, '_execute') self._driver.extend_share(self.share, 3) self._driver._extend_container.assert_called_once_with(self.share, local_path, 3) def test_ssh_exec_as_root(self): command = ['fake_command'] self.mock_object(self._driver, '_execute') self._driver._ssh_exec_as_root('fake_server', command) self._driver._execute.assert_called_once_with('fake_command', check_exit_code=True) def test_ssh_exec_as_root_with_sudo(self): command = ['sudo', 'fake_command'] self.mock_object(self._driver, '_execute') self._driver._ssh_exec_as_root('fake_server', command) self._driver._execute.assert_called_once_with( 'fake_command', run_as_root=True, check_exit_code=True) def test_extend_container(self): mock_lvextend = privsep_lvm.lvextend = mock.Mock() self.mock_object(privsep_common, 'execute_with_retries') self._driver._extend_container(self.share, 'device_name', 3) privsep_common.execute_with_retries.assert_called_once_with( mock_lvextend, ['device_name', 3], CONF.num_shell_tries) def test_get_share_server_pools(self): expected_result = [{ 'pool_name': 'lvm-single-pool', 'total_capacity_gb': 33, 'free_capacity_gb': 22, 'reserved_percentage': 0, 'reserved_snapshot_percentage': 0, 'reserved_share_extend_percentage': 0, 'mount_point_name_support': True, }, ] out, err = "VSize 33g VFree 22g", None self.mock_object( privsep_lvm, 'get_vgs', mock.Mock(return_value=(out, err))) self.assertEqual(expected_result, self._driver.get_share_server_pools()) @ddt.data(True, False) def test_copy_volume_error(self, use_direct_io): src_str = 'src' dest_str = 'dest' self.mock_object( os_routines, 'is_data_definition_direct_io_supported', mock.Mock(return_value=use_direct_io)) self.mock_object( os_routines, 'data_definition', mock.Mock(side_effect=exception.ProcessExecutionError)) self.assertRaises( exception.ShareBackendException, self._driver._copy_volume, src_str, dest_str, 1) (os_routines.is_data_definition_direct_io_supported .assert_called_once_with( src_str, dest_str)) os_routines.data_definition.assert_called_once_with( src_str, dest_str, (1 * 1024), use_direct_io=use_direct_io) @ddt.data((['1.1.1.1'], 4), (['1001::1001'], 6)) @ddt.unpack def test_update_share_stats(self, configured_ip, version): CONF.set_default('lvm_share_export_ips', configured_ip) self.mock_object(self._driver, 'get_share_server_pools', mock.Mock(return_value='test-pool')) self._driver._update_share_stats() self.assertEqual('LVM', self._driver._stats['share_backend_name']) self.assertEqual('NFS_CIFS', self._driver._stats['storage_protocol']) self.assertEqual(50, self._driver._stats['reserved_percentage']) self.assertTrue(self._driver._stats['snapshot_support']) self.assertEqual('LVMShareDriver', self._driver._stats['driver_name']) self.assertEqual('test-pool', self._driver._stats['pools']) self.assertEqual(version == 4, self._driver._stats['ipv4_support']) self.assertEqual(version == 6, self._driver._stats['ipv6_support']) def test_revert_to_snapshot(self): share_local_path = '/dev/mapper/fakevg-fakename' snapshot_local_path = '/dev/mapper/fakevg-fakesnapshotname' mock_update_access = self.mock_object( self._helper_nfs, 'update_access') mock__unmount_device = self.mock_object( self._driver, '_unmount_device') mock_lvconvert = self.mock_object(privsep_lvm, 'lvconvert') mock_create_snapshot = self.mock_object( self._driver, '_create_snapshot') mock_mount_device = self.mock_object( self._driver, '_mount_device') mock_get_local_path = self.mock_object( self._driver, '_get_local_path', mock.Mock(side_effect=[share_local_path, snapshot_local_path])) self.mock_object(self._driver, '_get_mount_point_name', mock.Mock(return_value=self.snapshot['name'])) snapshot_parent_share = self.snapshot['share'] self._driver.revert_to_snapshot(self._context, self.snapshot, [], [], self.share_server) self.assertEqual(4, mock_update_access.call_count) mock__unmount_device.assert_has_calls( [mock.call(self.snapshot), mock.call(self.snapshot['share'])]) mock_lvconvert.assert_called_once_with( CONF.lvm_share_volume_group, self.snapshot['name']) mock_create_snapshot.assert_called_once_with( self._context, self.snapshot) mock_mount_device.assert_has_calls( [mock.call(snapshot_parent_share, share_local_path), mock.call(self.snapshot, snapshot_local_path)] ) mock_get_local_path.assert_has_calls( [mock.call(snapshot_parent_share), mock.call(self.snapshot)]) def test_snapshot_update_access(self): access_rules = [{ 'access_type': 'ip', 'access_to': '1.1.1.1', 'access_level': 'ro', }] add_rules = [{ 'access_type': 'ip', 'access_to': '2.2.2.2', 'access_level': 'ro', }] delete_rules = [{ 'access_type': 'ip', 'access_to': '3.3.3.3', 'access_level': 'ro', }] self._driver.snapshot_update_access(self._context, self.snapshot, access_rules, add_rules, delete_rules) (self._driver._helpers[self.snapshot['share']['share_proto']]. update_access.assert_called_once_with( self.server, self.snapshot['name'], access_rules, add_rules=add_rules, delete_rules=delete_rules)) @mock.patch.object(timeutils, 'utcnow', mock.Mock( return_value='fake_date')) def test_update_share_usage_size(self): mount_path = self._get_mount_path(self.share) self._os.path.exists.return_value = True self.mock_object( self._driver, '_execute', mock.Mock(return_value=( "Mounted on Used " + mount_path + " 1G", None))) update_shares = self._driver.update_share_usage_size( self._context, [self.share, ]) self._os.path.exists.assert_called_with(mount_path) self.assertEqual( [{'id': 'fakeid', 'used_size': '1', 'gathered_at': 'fake_date'}], update_shares) self._driver._execute.assert_called_once_with( 'df', '-l', '--output=target,used', '--block-size=g') @mock.patch.object(timeutils, 'utcnow', mock.Mock( return_value='fake_date')) def test_update_share_usage_size_multiple_share(self): share1 = fake_share(id='fakeid_get_fail', name='get_fail') share2 = fake_share(id='fakeid_success', name='get_success') share3 = fake_share(id='fakeid_not_exist', name='get_not_exist') mount_path2 = self._get_mount_path(share2) mount_path3 = self._get_mount_path(share3) self._os.path.exists.side_effect = [True, True, False] self.mock_object( self._driver, '_execute', mock.Mock(return_value=( "Mounted on Used " + mount_path2 + " 1G", None))) update_shares = self._driver.update_share_usage_size( self._context, [share1, share2, share3]) self._os.path.exists.assert_called_with(mount_path3) self.assertEqual( [{'gathered_at': 'fake_date', 'id': 'fakeid_success', 'used_size': '1'}], update_shares) self._driver._execute.assert_called_with( 'df', '-l', '--output=target,used', '--block-size=g') def test_update_share_usage_size_fail(self): def _fake_exec(*args, **kwargs): raise exception.ProcessExecutionError(stderr="error") self.mock_object(self._driver, '_execute', _fake_exec) self.assertRaises(exception.ProcessExecutionError, self._driver.update_share_usage_size, self._context, [self.share]) def test_get_backend_info(self): backend_info = self._driver.get_backend_info(self._context) self.assertEqual( {'export_ips': ','.join(self.server['public_addresses']), 'db_version': mock.ANY}, backend_info) def test_get_mount_point_name_with_mount_point_name(self): share = {'mount_point_name': 'fake_mp_name', 'name': 'fakename'} result = self._driver._get_mount_point_name(share) self.assertEqual(result, 'fake_mp_name') def test_get_mount_point_name_without_mount_point_name(self): share = {'name': 'fakename'} result = self._driver._get_mount_point_name(share) self.assertEqual(result, 'fakename') def test_get_mount_point_name_with_empty_mount_point_name(self): share = {'mount_point_name': '', 'name': 'fakename'} result = self._driver._get_mount_point_name(share) self.assertEqual(result, 'fakename') def test_get_mount_point_name_with_none_mount_point_name(self): share = {'mount_point_name': None, 'name': 'fakename'} result = self._driver._get_mount_point_name(share) self.assertEqual(result, 'fakename') def test_get_mount_point_name_without_name(self): share = {'mount_point_name': 'fake_mp_name'} result = self._driver._get_mount_point_name(share) self.assertEqual(result, 'fake_mp_name') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/test_service_instance.py0000664000175000017500000034054700000000000025213 0ustar00zuulzuul00000000000000# Copyright (c) 2014 NetApp, Inc. # Copyright (c) 2015 Mirantis, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Unit tests for the instance module.""" import os import time from unittest import mock import ddt import netaddr from oslo_config import cfg from oslo_utils import importutils from manila import exception from manila.share import configuration from manila.share import driver # noqa from manila.share.drivers import service_instance from manila import test from manila.tests import fake_compute from manila.tests import fake_image from manila.tests import fake_network from manila.tests import utils as test_utils CONF = cfg.CONF def fake_get_config_option(key): if key == 'driver_handles_share_servers': return True elif key == 'service_instance_password': return None elif key == 'service_instance_user': return 'fake_user' elif key == 'service_instance_flavor_id': return '100' elif key == 'service_instance_name_template': return 'fake_manila_service_instance_%s' elif key == 'service_image_name': return 'fake_service_image_name' elif key == 'manila_service_keypair_name': return 'fake_manila_service_keypair_name' elif key == 'path_to_private_key': return 'fake_path_to_private_key' elif key == 'path_to_public_key': return 'fake_path_to_public_key' elif key == 'max_time_to_build_instance': return 500 elif key == 'connect_share_server_to_tenant_network': return False elif key == 'service_network_cidr': return '99.254.0.0/24' elif key == 'service_network_division_mask': return 27 elif key == 'service_network_host': return 'fake_service_network_host' elif key == 'service_network_name': return 'fake_service_network_name' elif key == 'interface_driver': return 'i.am.fake.VifDriver' elif key == 'admin_network_id': return None elif key == 'admin_subnet_id': return None elif key == 'backend_availability_zone': return None else: return mock.Mock() class FakeServiceInstance(object): def __init__(self, driver_config=None): super(FakeServiceInstance, self).__init__() self.compute_api = service_instance.compute.API() self.admin_context = service_instance.context.get_admin_context() self.driver_config = driver_config def get_config_option(self, key): return fake_get_config_option(key) class FakeNetworkHelper(service_instance.BaseNetworkhelper): @property def NAME(self): return service_instance.NEUTRON_NAME @property def neutron_api(self): if not hasattr(self, '_neutron_api'): self._neutron_api = mock.Mock() return self._neutron_api def __init__(self, service_instance_manager): self.get_config_option = service_instance_manager.get_config_option def get_network_name(self, network_info): """Return name of network.""" return 'fake_network_name' def setup_connectivity_with_service_instances(self): """Nothing to do in fake network helper.""" def setup_network(self, network_info): """Combine fake network data.""" return dict() def teardown_network(self, server_details): """Nothing to do in fake network helper.""" @ddt.ddt class ServiceInstanceManagerTestCase(test.TestCase): """Test suite for service instance manager.""" def setUp(self): super(ServiceInstanceManagerTestCase, self).setUp() self.instance_id = 'fake_instance_id' self.config = configuration.Configuration(None) self.config.safe_get = mock.Mock(side_effect=fake_get_config_option) self.mock_object(service_instance.compute, 'API', fake_compute.API) self.mock_object( service_instance.os.path, 'exists', mock.Mock(return_value=True)) self.mock_object(service_instance, 'NeutronNetworkHelper', mock.Mock(side_effect=FakeNetworkHelper)) self._manager = service_instance.ServiceInstanceManager(self.config) self._manager._load_public_key = mock.Mock(return_value='') self.mock_object(time, 'sleep') def test_get_config_option_from_driver_config(self): username1 = 'fake_username_1_%s' % self.id() username2 = 'fake_username_2_%s' % self.id() config_data = dict( DEFAULT=dict(service_instance_user=username1), CUSTOM=dict(service_instance_user=username2)) with test_utils.create_temp_config_with_opts(config_data): self.config = configuration.Configuration( service_instance.common_opts, config_group='CUSTOM') self._manager = service_instance.ServiceInstanceManager( self.config) result = self._manager.get_config_option('service_instance_user') self.assertEqual(username2, result) def test_get_config_option_from_common_config(self): username = 'fake_username_%s' % self.id() config_data = dict(DEFAULT=dict(service_instance_user=username)) with test_utils.create_temp_config_with_opts(config_data): self._manager = service_instance.ServiceInstanceManager() result = self._manager.get_config_option('service_instance_user') self.assertEqual(username, result) def test_get_neutron_network_helper(self): # Mock it again, because it was called in setUp method. self.mock_object(service_instance, 'NeutronNetworkHelper') config_data = dict(DEFAULT=dict(service_instance_user='fake_username', driver_handles_share_servers=True)) with test_utils.create_temp_config_with_opts(config_data): self._manager = service_instance.ServiceInstanceManager() self._manager.network_helper service_instance.NeutronNetworkHelper.assert_called_once_with( self._manager) def test_init_with_driver_config_and_handling_of_share_servers(self): self.mock_object(service_instance, 'NeutronNetworkHelper') config_data = dict(CUSTOM=dict( driver_handles_share_servers=True, service_instance_user='fake_user')) opts = service_instance.common_opts + driver.share_opts with test_utils.create_temp_config_with_opts(config_data): self.config = configuration.Configuration(opts, 'CUSTOM') self._manager = service_instance.ServiceInstanceManager( self.config) self.assertTrue( self._manager.get_config_option("driver_handles_share_servers")) self.assertIsNotNone(self._manager.driver_config) self.assertTrue(hasattr(self._manager, 'network_helper')) self.assertTrue(service_instance.NeutronNetworkHelper.called) def test_init_with_driver_config_and_wo_handling_of_share_servers(self): self.mock_object(service_instance, 'NeutronNetworkHelper') config_data = dict(CUSTOM=dict( driver_handles_share_servers=False, service_instance_user='fake_user')) opts = service_instance.common_opts + driver.share_opts with test_utils.create_temp_config_with_opts(config_data): self.config = configuration.Configuration(opts, 'CUSTOM') self._manager = service_instance.ServiceInstanceManager( self.config) self.assertIsNotNone(self._manager.driver_config) self.assertFalse(hasattr(self._manager, 'network_helper')) self.assertFalse(service_instance.NeutronNetworkHelper.called) def test_init_with_common_config_and_handling_of_share_servers(self): self.mock_object(service_instance, 'NeutronNetworkHelper') config_data = dict(DEFAULT=dict( service_instance_user='fake_username', driver_handles_share_servers=True)) with test_utils.create_temp_config_with_opts(config_data): self._manager = service_instance.ServiceInstanceManager() self.assertTrue( self._manager.get_config_option("driver_handles_share_servers")) self.assertIsNone(self._manager.driver_config) self.assertTrue(hasattr(self._manager, 'network_helper')) self.assertTrue(service_instance.NeutronNetworkHelper.called) def test_init_with_common_config_and_wo_handling_of_share_servers(self): self.mock_object(service_instance, 'NeutronNetworkHelper') config_data = dict(DEFAULT=dict( service_instance_user='fake_username', driver_handles_share_servers=False)) with test_utils.create_temp_config_with_opts(config_data): self._manager = service_instance.ServiceInstanceManager() self.assertEqual( False, self._manager.get_config_option("driver_handles_share_servers")) self.assertIsNone(self._manager.driver_config) self.assertFalse(hasattr(self._manager, 'network_helper')) self.assertFalse(service_instance.NeutronNetworkHelper.called) def test_no_service_user_defined(self): group_name = 'GROUP_%s' % self.id() config_data = {group_name: dict()} with test_utils.create_temp_config_with_opts(config_data): config = configuration.Configuration( service_instance.common_opts, config_group=group_name) self.assertRaises( exception.ServiceInstanceException, service_instance.ServiceInstanceManager, config) def test_get_service_instance_name_using_driver_config(self): fake_server_id = 'fake_share_server_id_%s' % self.id() self.mock_object(service_instance, 'NeutronNetworkHelper') config_data = dict(CUSTOM=dict( driver_handles_share_servers=True, service_instance_user='fake_user')) opts = service_instance.common_opts + driver.share_opts with test_utils.create_temp_config_with_opts(config_data): self.config = configuration.Configuration(opts, 'CUSTOM') self._manager = service_instance.ServiceInstanceManager( self.config) result = self._manager._get_service_instance_name(fake_server_id) self.assertIsNotNone(self._manager.driver_config) self.assertEqual( self._manager.get_config_option( "service_instance_name_template") % "%s_%s" % ( self._manager.driver_config.config_group, fake_server_id), result) self.assertTrue( self._manager.get_config_option("driver_handles_share_servers")) self.assertTrue(hasattr(self._manager, 'network_helper')) self.assertTrue(service_instance.NeutronNetworkHelper.called) def test_get_service_instance_name_using_default_config(self): fake_server_id = 'fake_share_server_id_%s' % self.id() config_data = dict(CUSTOM=dict( service_instance_user='fake_user')) with test_utils.create_temp_config_with_opts(config_data): self._manager = service_instance.ServiceInstanceManager() result = self._manager._get_service_instance_name(fake_server_id) self.assertIsNone(self._manager.driver_config) self.assertEqual( self._manager.get_config_option( "service_instance_name_template") % fake_server_id, result) def test__check_server_availability_available_from_start(self): fake_server = dict( id='fake_server', ip='127.0.0.1', username="manila", password="manila" ) self.mock_object(service_instance.ssh_utils.SSHPool, 'create') self.mock_object(service_instance.time, 'sleep') self.mock_object(service_instance.time, 'time', mock.Mock(return_value=0)) result = self._manager._check_server_availability(fake_server) self.assertTrue(result) service_instance.ssh_utils.SSHPool.create.assert_called_once_with( quiet=True) service_instance.time.time.assert_has_calls([ mock.call(), mock.call()]) service_instance.time.time.assert_has_calls([]) @ddt.data(True, False) def test__check_server_availability_with_recall(self, is_ok): fake_server = dict( id='fake_server', ip='127.0.0.1', username="manila", password="manila" ) self.fake_time = 0 def fake_create(quiet=False): if not (is_ok and self.fake_time > 1): raise exception.SSHException def fake_time(): return self.fake_time def fake_sleep(time): self.fake_time += 5 self.mock_object(service_instance.time, 'sleep', mock.Mock(side_effect=fake_sleep)) self.mock_object(service_instance.ssh_utils.SSHPool, 'create', mock.Mock(side_effect=fake_create)) self.mock_object(service_instance.time, 'time', mock.Mock(side_effect=fake_time)) self._manager.max_time_to_build_instance = 6 result = self._manager._check_server_availability(fake_server) if is_ok: self.assertTrue(result) else: self.assertFalse(result) service_instance.ssh_utils.SSHPool.create.assert_has_calls([ mock.call(quiet=True), mock.call(quiet=True)]) service_instance.time.time.assert_has_calls([ mock.call(), mock.call(), mock.call()]) service_instance.time.time.assert_has_calls([mock.call()]) def test_get_server_ip_found_in_networks_section(self): ip = '10.0.0.1' net_name = self._manager.get_config_option('service_network_name') fake_server = dict(networks={net_name: [ip]}) result = self._manager._get_server_ip(fake_server, net_name) self.assertEqual(ip, result) def test_get_server_ip_found_in_addresses_section(self): ip = '10.0.0.1' net_name = self._manager.get_config_option('service_network_name') fake_server = dict(addresses={net_name: [dict(addr=ip, version=4)]}) result = self._manager._get_server_ip(fake_server, net_name) self.assertEqual(ip, result) @ddt.data( {}, {'networks': {fake_get_config_option('service_network_name'): []}}, {'addresses': {fake_get_config_option('service_network_name'): []}}) def test_get_server_ip_not_found(self, data): self.assertRaises( exception.ManilaException, self._manager._get_server_ip, data, fake_get_config_option('service_network_name')) def test_security_group_name_not_specified(self): self.mock_object(self._manager, 'get_config_option', mock.Mock(return_value=None)) result = self._manager._get_or_create_security_groups( self._manager.admin_context) self.assertIsNone(result) self._manager.get_config_option.assert_called_once_with( 'service_instance_security_group') def test_security_group_name_from_config_and_sg_exist(self): name = "fake_sg_name_from_config" desc = "fake_sg_description" fake_secgroup = {'id': 'fake_sg_id', 'name': name, 'description': desc} self.mock_object(self._manager, 'get_config_option', mock.Mock(return_value=name)) neutron_api = self._manager.network_helper.neutron_api neutron_api.security_group_list.return_value = { 'security_groups': [fake_secgroup]} result = self._manager._get_or_create_security_groups( self._manager.admin_context) self.assertEqual([fake_secgroup, ], result) self._manager.get_config_option.assert_called_once_with( 'service_instance_security_group') neutron_api.security_group_list.assert_called_once_with({"name": name}) @ddt.data(None, 'fake_name') def test_security_group_creation_with_name_from_config(self, name): config_name = "fake_sg_name_from_config" desc = "fake_sg_description" fake_secgroup = {'id': 'fake_sg_id', 'name': name, 'description': desc} self.mock_object(self._manager, 'get_config_option', mock.Mock(return_value=name or config_name)) neutron_api = self._manager.network_helper.neutron_api neutron_api.security_group_list.return_value = {'security_groups': []} neutron_api.security_group_create.return_value = { 'security_group': fake_secgroup, } result = self._manager._get_or_create_security_groups( context=self._manager.admin_context, name=name, description=desc, ) self.assertEqual([fake_secgroup, ], result) if not name: self._manager.get_config_option.assert_called_once_with( 'service_instance_security_group') neutron_api.security_group_list.assert_called_once_with( {"name": name or config_name}) neutron_api.security_group_create.assert_called_once_with( name or config_name, desc) @ddt.data(None, 'fake_name') def test_security_group_creation_with_name_from_conf_allow_ssh(self, name): def fake_secgroup(*args, **kwargs): return {'security_group': {'id': 'fake_sg_id', 'name': args[0], 'description': args[1]}} config_name = "fake_sg_name_from_config" desc = "fake_sg_description" self.mock_object(self._manager, 'get_config_option', mock.Mock(return_value=name or config_name)) neutron_api = self._manager.network_helper.neutron_api neutron_api.security_group_list.return_value = {'security_groups': []} self.mock_object(neutron_api, 'security_group_create', mock.Mock(side_effect=fake_secgroup)) fake_ssh_allow_subnet = dict(cidr="10.254.0.1/24", id='allow_subnet_id') ssh_sg_name = 'manila-service-subnet-{}'.format( fake_ssh_allow_subnet['id']) result = self._manager._get_or_create_security_groups( context=self._manager.admin_context, name=name, description=desc, allow_ssh_subnet=fake_ssh_allow_subnet ) self.assertEqual([fake_secgroup(name if name else config_name, desc)['security_group'], fake_secgroup(ssh_sg_name, desc)['security_group']], result) if not name: self._manager.get_config_option.assert_called_with( 'service_instance_security_group') neutron_api.security_group_list.assert_has_calls([ mock.call({"name": name or config_name}), mock.call({"name": ssh_sg_name})]) neutron_api.security_group_create.assert_has_calls([ mock.call(name or config_name, desc), mock.call(ssh_sg_name, desc)]) def test_security_group_limit_ssh_invalid_subnet(self): def fake_secgroup(*args, **kwargs): return {'security_group': {'id': 'fake_sg_id', 'name': args[0], 'description': args[1]}} config_name = "fake_sg_name_from_config" desc = "fake_sg_description" self.mock_object(self._manager, 'get_config_option', mock.Mock(config_name)) neutron_api = self._manager.network_helper.neutron_api neutron_api.security_group_list.return_value = {'security_groups': []} self.mock_object(neutron_api, 'security_group_create', mock.Mock(side_effect=fake_secgroup)) fake_ssh_allow_subnet = dict(id='allow_subnet_id') self.assertRaises(exception.ManilaException, self._manager._get_or_create_security_groups, context=self._manager.admin_context, name=None, description=desc, allow_ssh_subnet=fake_ssh_allow_subnet) def test_security_group_two_sg_in_list(self): name = "fake_name" fake_secgroup1 = {'id': 'fake_sg_id1', 'name': name} fake_secgroup2 = {'id': 'fake_sg_id2', 'name': name} neutron_api = self._manager.network_helper.neutron_api neutron_api.security_group_list.return_value = { 'security_groups': [fake_secgroup1, fake_secgroup2]} self.assertRaises(exception.ServiceInstanceException, self._manager._get_or_create_security_groups, self._manager.admin_context, name) neutron_api.security_group_list.assert_called_once_with( {"name": name}) @ddt.data( dict(), dict(service_port_id='fake_service_port_id'), dict(public_port_id='fake_public_port_id'), dict(service_port_id='fake_service_port_id', public_port_id='fake_public_port_id'), ) def test_set_up_service_instance(self, update_data): fake_network_info = {'foo': 'bar', 'server_id': 'fake_server_id'} fake_server = { 'id': 'fake', 'ip': '1.2.3.4', 'public_address': '1.2.3.4', 'pk_path': None, 'subnet_id': 'fake-subnet-id', 'router_id': 'fake-router-id', 'username': self._manager.get_config_option( 'service_instance_user'), 'admin_ip': 'admin_ip'} fake_server.update(update_data) expected_details = fake_server.copy() expected_details.pop('pk_path') expected_details['instance_id'] = expected_details.pop('id') expected_instance_name = self._manager._get_service_instance_name( fake_network_info['server_id']) self.mock_object(self._manager, '_create_service_instance', mock.Mock(return_value=fake_server)) self.mock_object(self._manager, '_check_server_availability') result = self._manager.set_up_service_instance( self._manager.admin_context, fake_network_info) self._manager._create_service_instance.assert_called_once_with( self._manager.admin_context, expected_instance_name, fake_network_info) self._manager._check_server_availability.assert_called_once_with( expected_details) self.assertEqual(expected_details, result) def test_set_up_service_instance_not_available(self): fake_network_info = {'foo': 'bar', 'server_id': 'fake_server_id'} fake_server = { 'id': 'fake', 'ip': '1.2.3.4', 'public_address': '1.2.3.4', 'pk_path': None, 'subnet_id': 'fake-subnet-id', 'router_id': 'fake-router-id', 'username': self._manager.get_config_option( 'service_instance_user'), 'admin_ip': 'admin_ip'} expected_details = fake_server.copy() expected_details.pop('pk_path') expected_details['instance_id'] = expected_details.pop('id') expected_instance_name = self._manager._get_service_instance_name( fake_network_info['server_id']) self.mock_object(self._manager, '_create_service_instance', mock.Mock(return_value=fake_server)) self.mock_object(self._manager, '_check_server_availability', mock.Mock(return_value=False)) result = self.assertRaises( exception.ServiceInstanceException, self._manager.set_up_service_instance, self._manager.admin_context, fake_network_info) self.assertTrue(hasattr(result, 'detail_data')) self.assertEqual( {'server_details': expected_details}, result.detail_data) self._manager._create_service_instance.assert_called_once_with( self._manager.admin_context, expected_instance_name, fake_network_info) self._manager._check_server_availability.assert_called_once_with( expected_details) def test_ensure_server(self): server_details = {'instance_id': 'fake_inst_id', 'ip': '1.2.3.4'} fake_server = fake_compute.FakeServer() self.mock_object(self._manager, '_check_server_availability', mock.Mock(return_value=True)) self.mock_object(self._manager.compute_api, 'server_get', mock.Mock(return_value=fake_server)) result = self._manager.ensure_service_instance( self._manager.admin_context, server_details) self._manager.compute_api.server_get.assert_called_once_with( self._manager.admin_context, server_details['instance_id']) self._manager._check_server_availability.assert_called_once_with( server_details) self.assertTrue(result) def test_ensure_server_not_exists(self): server_details = {'instance_id': 'fake_inst_id', 'ip': '1.2.3.4'} self.mock_object(self._manager, '_check_server_availability', mock.Mock(return_value=True)) self.mock_object(self._manager.compute_api, 'server_get', mock.Mock(side_effect=exception.InstanceNotFound( instance_id=server_details['instance_id']))) result = self._manager.ensure_service_instance( self._manager.admin_context, server_details) self._manager.compute_api.server_get.assert_called_once_with( self._manager.admin_context, server_details['instance_id']) self.assertFalse(self._manager._check_server_availability.called) self.assertFalse(result) def test_ensure_server_exception(self): server_details = {'instance_id': 'fake_inst_id', 'ip': '1.2.3.4'} self.mock_object(self._manager, '_check_server_availability', mock.Mock(return_value=True)) self.mock_object(self._manager.compute_api, 'server_get', mock.Mock(side_effect=exception.ManilaException)) self.assertRaises(exception.ManilaException, self._manager.ensure_service_instance, self._manager.admin_context, server_details) self._manager.compute_api.server_get.assert_called_once_with( self._manager.admin_context, server_details['instance_id']) self.assertFalse(self._manager._check_server_availability.called) def test_ensure_server_non_active(self): server_details = {'instance_id': 'fake_inst_id', 'ip': '1.2.3.4'} fake_server = fake_compute.FakeServer(status='ERROR') self.mock_object(self._manager.compute_api, 'server_get', mock.Mock(return_value=fake_server)) self.mock_object(self._manager, '_check_server_availability', mock.Mock(return_value=True)) result = self._manager.ensure_service_instance( self._manager.admin_context, server_details) self.assertFalse(self._manager._check_server_availability.called) self.assertFalse(result) def test_ensure_server_no_instance_id(self): # Tests that we avoid a KeyError if the share details don't have an # instance_id key set (so we can't find the share instance). self.assertFalse(self._manager.ensure_service_instance( self._manager.admin_context, {'ip': '1.2.3.4'})) def test_get_key_create_new(self): keypair_name = self._manager.get_config_option( 'manila_service_keypair_name') fake_keypair = fake_compute.FakeKeypair(name=keypair_name) self.mock_object(self._manager.compute_api, 'keypair_list', mock.Mock(return_value=[])) self.mock_object(self._manager.compute_api, 'keypair_import', mock.Mock(return_value=fake_keypair)) result = self._manager._get_key(self._manager.admin_context) self.assertEqual( (fake_keypair.name, os.path.expanduser(self._manager.get_config_option( 'path_to_private_key'))), result) self._manager.compute_api.keypair_list.assert_called_once_with( self._manager.admin_context) self._manager.compute_api.keypair_import.assert_called_once_with( self._manager.admin_context, keypair_name, '') def test_get_key_exists(self): fake_keypair = fake_compute.FakeKeypair( name=self._manager.get_config_option( 'manila_service_keypair_name'), public_key='fake_public_key') self.mock_object(self._manager.compute_api, 'keypair_list', mock.Mock(return_value=[fake_keypair])) self.mock_object(self._manager.compute_api, 'keypair_import', mock.Mock(return_value=fake_keypair)) self.mock_object(self._manager, '_load_public_key', mock.Mock(return_value='fake_public_key')) result = self._manager._get_key(self._manager.admin_context) self._manager.compute_api.keypair_list.assert_called_once_with( self._manager.admin_context) self.assertFalse(self._manager.compute_api.keypair_import.called) self.assertEqual( (fake_keypair.name, os.path.expanduser(self._manager.get_config_option( 'path_to_private_key'))), result) def test_get_key_exists_recreate(self): fake_keypair = fake_compute.FakeKeypair( name=self._manager.get_config_option( 'manila_service_keypair_name'), public_key='fake_public_key1') self.mock_object(self._manager.compute_api, 'keypair_list', mock.Mock(return_value=[fake_keypair])) self.mock_object(self._manager.compute_api, 'keypair_import', mock.Mock(return_value=fake_keypair)) self.mock_object(self._manager.compute_api, 'keypair_delete') self.mock_object(self._manager, '_load_public_key', mock.Mock(return_value='fake_public_key2')) result = self._manager._get_key(self._manager.admin_context) self._manager.compute_api.keypair_list.assert_called_once_with( self._manager.admin_context) self._manager.compute_api.keypair_delete.assert_called_once_with( self._manager.admin_context, fake_keypair.id) self._manager.compute_api.keypair_import.assert_called_once_with( self._manager.admin_context, fake_keypair.name, 'fake_public_key2') self.assertEqual( (fake_keypair.name, os.path.expanduser(self._manager.get_config_option( 'path_to_private_key'))), result) def test_get_key_more_than_one_exist(self): fake_keypair = fake_compute.FakeKeypair( name=self._manager.get_config_option( 'manila_service_keypair_name'), public_key='fake_public_key1') self.mock_object(self._manager.compute_api, 'keypair_list', mock.Mock(return_value=[fake_keypair, fake_keypair])) self.assertRaises( exception.ServiceInstanceException, self._manager._get_key, self._manager.admin_context) self._manager.compute_api.keypair_list.assert_called_once_with( self._manager.admin_context) def test_get_key_keypath_to_public_not_set(self): self._manager.path_to_public_key = None result = self._manager._get_key(self._manager.admin_context) self.assertEqual((None, None), result) def test_get_key_keypath_to_private_not_set(self): self._manager.path_to_private_key = None result = self._manager._get_key(self._manager.admin_context) self.assertEqual((None, None), result) def test_get_key_incorrect_keypath_to_public(self): def exists_side_effect(path): return False if path == 'fake_path' else True self._manager.path_to_public_key = 'fake_path' os_path_exists_mock = mock.Mock(side_effect=exists_side_effect) with mock.patch.object(os.path, 'exists', os_path_exists_mock): with mock.patch.object(os.path, 'expanduser', mock.Mock(side_effect=lambda value: value)): result = self._manager._get_key(self._manager.admin_context) self.assertEqual((None, None), result) def test_get_key_incorrect_keypath_to_private(self): def exists_side_effect(path): return False if path == 'fake_path' else True self._manager.path_to_private_key = 'fake_path' os_path_exists_mock = mock.Mock(side_effect=exists_side_effect) with mock.patch.object(os.path, 'exists', os_path_exists_mock): with mock.patch.object(os.path, 'expanduser', mock.Mock(side_effect=lambda value: value)): result = self._manager._get_key(self._manager.admin_context) self.assertEqual((None, None), result) def test_get_service_image(self): fake_image1 = fake_image.FakeImage( name=self._manager.get_config_option('service_image_name'), status='active') fake_image2 = fake_image.FakeImage( name='service_image_name', status='error') fake_image3 = fake_image.FakeImage( name='another-image', status='active') self.mock_object(self._manager.image_api, 'image_list', mock.Mock(return_value=[fake_image1, fake_image2, fake_image3])) result = self._manager._get_service_image(self._manager.admin_context) self.assertEqual(fake_image1.id, result) def test_get_service_image_not_found(self): self.mock_object(self._manager.image_api, 'image_list', mock.Mock(return_value=[])) self.assertRaises( exception.ServiceInstanceException, self._manager._get_service_image, self._manager.admin_context) def test_get_service_image_not_active(self): fake_error_image = fake_image.FakeImage( name='service_image_name', status='error') self.mock_object(self._manager.image_api, 'image_list', mock.Mock(return_value=[fake_error_image])) self.assertRaises( exception.ServiceInstanceException, self._manager._get_service_image, self._manager.admin_context) def test_get_service_image_ambiguous(self): fake_image1 = fake_image.FakeImage( name=fake_get_config_option('service_image_name'), status='active') fake_images = [fake_image1, fake_image1] self.mock_object(self._manager.image_api, 'image_list', mock.Mock(return_value=fake_images)) self.assertRaises( exception.ServiceInstanceException, self._manager._get_service_image, self._manager.admin_context) def test__delete_server_not_found(self): self.mock_object(self._manager.compute_api, 'server_delete') self.mock_object( self._manager.compute_api, 'server_get', mock.Mock(side_effect=exception.InstanceNotFound( instance_id=self.instance_id))) self._manager._delete_server( self._manager.admin_context, self.instance_id) self.assertFalse(self._manager.compute_api.server_delete.called) self._manager.compute_api.server_get.assert_called_once_with( self._manager.admin_context, self.instance_id) def test__delete_server(self): def fake_server_get(*args, **kwargs): ctx = args[0] if not hasattr(ctx, 'called'): ctx.called = True return else: raise exception.InstanceNotFound(instance_id=self.instance_id) self.mock_object(self._manager.compute_api, 'server_delete') self.mock_object(self._manager.compute_api, 'server_get', mock.Mock(side_effect=fake_server_get)) self._manager._delete_server( self._manager.admin_context, self.instance_id) self._manager.compute_api.server_delete.assert_called_once_with( self._manager.admin_context, self.instance_id) self._manager.compute_api.server_get.assert_has_calls([ mock.call(self._manager.admin_context, self.instance_id), mock.call(self._manager.admin_context, self.instance_id)]) def test__delete_server_found_always(self): self.fake_time = 0 def fake_time(): return self.fake_time def fake_sleep(time): self.fake_time += 1 server_details = {'instance_id': 'fake_inst_id', 'status': 'ACTIVE'} self.mock_object(self._manager.compute_api, 'server_delete') self.mock_object(self._manager.compute_api, 'server_get', mock.Mock(return_value=server_details)) self.mock_object(service_instance, 'time') self.mock_object( service_instance.time, 'time', mock.Mock(side_effect=fake_time)) self.mock_object( service_instance.time, 'sleep', mock.Mock(side_effect=fake_sleep)) self.mock_object(self._manager, 'max_time_to_build_instance', 2) self.assertRaises( exception.ServiceInstanceException, self._manager._delete_server, self._manager.admin_context, self.instance_id) self._manager.compute_api.server_delete.assert_called_once_with( self._manager.admin_context, self.instance_id) service_instance.time.sleep.assert_has_calls( [mock.call(mock.ANY) for i in range(2)]) service_instance.time.time.assert_has_calls( [mock.call() for i in range(4)]) self._manager.compute_api.server_get.assert_has_calls( [mock.call(self._manager.admin_context, self.instance_id) for i in range(3)]) def test_delete_server_soft_deleted(self): server_details = {'instance_id': 'fake_inst_id', 'status': 'SOFT_DELETED'} self.mock_object(self._manager.compute_api, 'server_delete') self.mock_object(self._manager.compute_api, 'server_get', mock.Mock(return_value=server_details)) self._manager._delete_server( self._manager.admin_context, self.instance_id) self._manager.compute_api.server_delete.assert_called_once_with( self._manager.admin_context, self.instance_id) self._manager.compute_api.server_get.assert_has_calls([ mock.call(self._manager.admin_context, self.instance_id), mock.call(self._manager.admin_context, self.instance_id)]) def test_delete_service_instance(self): fake_server_details = dict( router_id='foo', subnet_id='bar', instance_id='quuz') self.mock_object(self._manager, '_delete_server') self.mock_object(self._manager.network_helper, 'teardown_network') self._manager.delete_service_instance( self._manager.admin_context, fake_server_details) self._manager._delete_server.assert_called_once_with( self._manager.admin_context, fake_server_details['instance_id']) self._manager.network_helper.teardown_network.assert_called_once_with( fake_server_details) @ddt.data( *[{'service_config': service_config, 'tenant_config': tenant_config, 'server': server} for service_config, tenant_config in ( ('fake_net_s', 'fake_net_t'), ('fake_net_s', '12.34.56.78'), ('98.76.54.123', 'fake_net_t'), ('98.76.54.123', '12.34.56.78')) for server in ( {'networks': { 'fake_net_s': ['foo', '98.76.54.123', 'bar'], 'fake_net_t': ['baar', '12.34.56.78', 'quuz']}}, {'addresses': { 'fake_net_s': [ {'addr': 'fake1'}, {'addr': '98.76.54.123'}, {'addr': 'fake2'}], 'fake_net_t': [ {'addr': 'fake3'}, {'addr': '12.34.56.78'}, {'addr': 'fake4'}], }})]) @ddt.unpack def test_get_common_server_valid_cases(self, service_config, tenant_config, server): self._get_common_server(service_config, tenant_config, server, '98.76.54.123', '12.34.56.78', True) @ddt.data( *[{'service_config': service_config, 'tenant_config': tenant_config, 'server': server} for service_config, tenant_config in ( ('fake_net_s', 'fake'), ('fake', 'fake_net_t'), ('fake', 'fake'), ('98.76.54.123', '12.12.12.1212'), ('12.12.12.1212', '12.34.56.78'), ('12.12.12.1212', '12.12.12.1212'), ('1001::1001', '1001::100G'), ('1001::10G1', '1001::1001'), ) for server in ( {'networks': { 'fake_net_s': ['foo', '98.76.54.123', 'bar'], 'fake_net_t': ['baar', '12.34.56.78', 'quuz']}}, {'addresses': { 'fake_net_s': [ {'addr': 'fake1'}, {'addr': '98.76.54.123'}, {'addr': 'fake2'}], 'fake_net_t': [ {'addr': 'fake3'}, {'addr': '12.34.56.78'}, {'addr': 'fake4'}], }})]) @ddt.unpack def test_get_common_server_invalid_cases(self, service_config, tenant_config, server): self._get_common_server(service_config, tenant_config, server, '98.76.54.123', '12.34.56.78', False) @ddt.data( *[{'service_config': service_config, 'tenant_config': tenant_config, 'server': server} for service_config, tenant_config in ( ('fake_net_s', '1001::1002'), ('1001::1001', 'fake_net_t'), ('1001::1001', '1001::1002')) for server in ( {'networks': { 'fake_net_s': ['foo', '1001::1001'], 'fake_net_t': ['bar', '1001::1002']}}, {'addresses': { 'fake_net_s': [{'addr': 'foo'}, {'addr': '1001::1001'}], 'fake_net_t': [{'addr': 'bar'}, {'addr': '1001::1002'}]}})]) @ddt.unpack def test_get_common_server_valid_ipv6_address(self, service_config, tenant_config, server): self._get_common_server(service_config, tenant_config, server, '1001::1001', '1001::1002', True) def _get_common_server(self, service_config, tenant_config, server, service_address, network_address, is_valid=True): fake_instance_id = 'fake_instance_id' fake_user = 'fake_user' fake_pass = 'fake_pass' fake_server = {'id': fake_instance_id} fake_server.update(server) expected = { 'backend_details': { 'username': fake_user, 'password': fake_pass, 'pk_path': self._manager.path_to_private_key, 'ip': service_address, 'public_address': network_address, 'instance_id': fake_instance_id, } } def fake_get_config_option(attr): if attr == 'service_net_name_or_ip': return service_config elif attr == 'tenant_net_name_or_ip': return tenant_config elif attr == 'service_instance_name_or_id': return fake_instance_id elif attr == 'service_instance_user': return fake_user elif attr == 'service_instance_password': return fake_pass else: raise exception.ManilaException("Wrong test data provided.") self.mock_object( self._manager.compute_api, 'server_get_by_name_or_id', mock.Mock(return_value=fake_server)) self.mock_object( self._manager, 'get_config_option', mock.Mock(side_effect=fake_get_config_option)) if is_valid: actual = self._manager.get_common_server() self.assertEqual(expected, actual) else: self.assertRaises( exception.ManilaException, self._manager.get_common_server) self.assertTrue( self._manager.compute_api.server_get_by_name_or_id.called) def test___create_service_instance_with_sg_success(self): self.mock_object(service_instance, 'NeutronNetworkHelper', mock.Mock(side_effect=FakeNetworkHelper)) config_data = dict(DEFAULT=dict( driver_handles_share_servers=True, service_instance_user='fake_user', limit_ssh_access=True)) with test_utils.create_temp_config_with_opts(config_data): self._manager = service_instance.ServiceInstanceManager() server_create = dict(id='fakeid', status='CREATING', networks=dict()) net_name = self._manager.get_config_option("service_network_name") sg = [{'id': 'fakeid', 'name': 'fakename'}, ] ip_address = 'fake_ip_address' service_image_id = 'fake_service_image_id' key_data = 'fake_key_name', 'fake_key_path' instance_name = 'fake_instance_name' network_info = dict() network_data = {'nics': ['fake_nic1', 'fake_nic2']} network_data['router'] = dict(id='fake_router_id') server_get = dict( id='fakeid', status='ACTIVE', networks={net_name: [ip_address]}) network_data.update(dict( router_id='fake_router_id', subnet_id='fake_subnet_id', public_port=dict(id='fake_public_port', fixed_ips=[dict(ip_address=ip_address)]), service_port=dict(id='fake_service_port', fixed_ips=[{'ip_address': ip_address}]), admin_port={'id': 'fake_admin_port', 'fixed_ips': [{'ip_address': ip_address}]}, service_subnet={'id': 'fake_subnet_id', 'cidr': '10.254.0.0/28'}) ) self.mock_object(service_instance.time, 'time', mock.Mock(return_value=5)) self.mock_object(self._manager.network_helper, 'setup_network', mock.Mock(return_value=network_data)) self.mock_object(self._manager.network_helper, 'get_network_name', mock.Mock(return_value=net_name)) self.mock_object(self._manager, '_get_service_image', mock.Mock(return_value=service_image_id)) self.mock_object(self._manager, '_get_key', mock.Mock(return_value=key_data)) self.mock_object(self._manager, '_get_or_create_security_groups', mock.Mock(return_value=sg)) self.mock_object(self._manager.compute_api, 'server_create', mock.Mock(return_value=server_create)) self.mock_object(self._manager.compute_api, 'server_get', mock.Mock(return_value=server_get)) self.mock_object(self._manager.compute_api, 'add_security_group_to_server') expected = { 'id': server_get['id'], 'status': server_get['status'], 'pk_path': key_data[1], 'public_address': ip_address, 'router_id': network_data.get('router_id'), 'subnet_id': network_data.get('subnet_id'), 'instance_id': server_get['id'], 'ip': ip_address, 'networks': server_get['networks'], 'public_port_id': 'fake_public_port', 'service_port_id': 'fake_service_port', 'admin_port_id': 'fake_admin_port', 'admin_ip': 'fake_ip_address', } result = self._manager._create_service_instance( self._manager.admin_context, instance_name, network_info) self.assertEqual(expected, result) self.assertTrue(service_instance.time.time.called) self._manager.network_helper.setup_network.assert_called_once_with( network_info) self._manager._get_service_image.assert_called_once_with( self._manager.admin_context) self._manager._get_key.assert_called_once_with( self._manager.admin_context) self._manager._get_or_create_security_groups.assert_called_once_with( self._manager.admin_context, allow_ssh_subnet=network_data['service_subnet']) self._manager.compute_api.server_create.assert_called_once_with( self._manager.admin_context, name=instance_name, image=service_image_id, flavor='100', key_name=key_data[0], nics=network_data['nics'], availability_zone=service_instance.CONF.storage_availability_zone) self._manager.compute_api.server_get.assert_called_once_with( self._manager.admin_context, server_create['id']) (self._manager.compute_api.add_security_group_to_server. assert_called_once_with( self._manager.admin_context, server_get['id'], sg[0]['id'])) self._manager.network_helper.get_network_name.assert_has_calls([]) def test___create_service_instance_neutron_no_admin_ip(self): self.mock_object(service_instance, 'NeutronNetworkHelper', mock.Mock(side_effect=FakeNetworkHelper)) config_data = {'DEFAULT': { 'driver_handles_share_servers': True, 'service_instance_user': 'fake_user', 'limit_ssh_access': True}} with test_utils.create_temp_config_with_opts(config_data): self._manager = service_instance.ServiceInstanceManager() server_create = {'id': 'fakeid', 'status': 'CREATING', 'networks': {}} net_name = self._manager.get_config_option("service_network_name") sg = {'id': 'fakeid', 'name': 'fakename'} ip_address = 'fake_ip_address' service_image_id = 'fake_service_image_id' key_data = 'fake_key_name', 'fake_key_path' instance_name = 'fake_instance_name' network_info = {} network_data = { 'nics': ['fake_nic1', 'fake_nic2'], 'router_id': 'fake_router_id', 'subnet_id': 'fake_subnet_id', 'public_port': {'id': 'fake_public_port', 'fixed_ips': [{'ip_address': ip_address}]}, 'service_port': {'id': 'fake_service_port', 'fixed_ips': [{'ip_address': ip_address}]}, 'admin_port': {'id': 'fake_admin_port', 'fixed_ips': []}, 'router': {'id': 'fake_router_id'}, 'service_subnet': {'id': 'fake_id', 'cidr': '10.254.0.0/28'} } server_get = { 'id': 'fakeid', 'status': 'ACTIVE', 'networks': {net_name: [ip_address]}} self.mock_object(service_instance.time, 'time', mock.Mock(return_value=5)) self.mock_object(self._manager.network_helper, 'setup_network', mock.Mock(return_value=network_data)) self.mock_object(self._manager.network_helper, 'get_network_name', mock.Mock(return_value=net_name)) self.mock_object(self._manager, '_get_service_image', mock.Mock(return_value=service_image_id)) self.mock_object(self._manager, '_get_key', mock.Mock(return_value=key_data)) self.mock_object(self._manager, '_get_or_create_security_groups', mock.Mock(return_value=[sg, ])) self.mock_object(self._manager.compute_api, 'server_create', mock.Mock(return_value=server_create)) self.mock_object(self._manager.compute_api, 'server_get', mock.Mock(return_value=server_get)) self.mock_object(self._manager.compute_api, 'add_security_group_to_server') self.assertRaises( exception.AdminIPNotFound, self._manager._create_service_instance, self._manager.admin_context, instance_name, network_info) self.assertTrue(service_instance.time.time.called) self._manager.network_helper.setup_network.assert_called_once_with( network_info) self._manager._get_service_image.assert_called_once_with( self._manager.admin_context) self._manager._get_key.assert_called_once_with( self._manager.admin_context) self._manager._get_or_create_security_groups.assert_called_once_with( self._manager.admin_context, allow_ssh_subnet=network_data['service_subnet']) self._manager.compute_api.server_create.assert_called_once_with( self._manager.admin_context, name=instance_name, image=service_image_id, flavor='100', key_name=key_data[0], nics=network_data['nics'], availability_zone=service_instance.CONF.storage_availability_zone) self._manager.compute_api.server_get.assert_called_once_with( self._manager.admin_context, server_create['id']) (self._manager.compute_api.add_security_group_to_server. assert_called_once_with( self._manager.admin_context, server_get['id'], sg['id'])) self._manager.network_helper.get_network_name.assert_has_calls([]) @ddt.data( dict( instance_id_included=False, mockobj=mock.Mock(side_effect=exception.ServiceInstanceException)), dict( instance_id_included=True, mockobj=mock.Mock(return_value=dict(id='fakeid', status='ERROR')))) @ddt.unpack def test___create_service_instance_failed_to_create( self, instance_id_included, mockobj): service_image_id = 'fake_service_image_id' key_data = 'fake_key_name', 'fake_key_path' instance_name = 'fake_instance_name' network_info = dict() network_data = dict( nics=['fake_nic1', 'fake_nic2'], router_id='fake_router_id', subnet_id='fake_subnet_id') self.mock_object(self._manager.network_helper, 'setup_network', mock.Mock(return_value=network_data)) self.mock_object(self._manager, '_get_service_image', mock.Mock(return_value=service_image_id)) self.mock_object(self._manager, '_get_key', mock.Mock(return_value=key_data)) self.mock_object( self._manager.compute_api, 'server_create', mockobj) self.mock_object( self._manager, 'wait_for_instance_to_be_active', mock.Mock(side_effect=exception.ServiceInstanceException)) try: self._manager._create_service_instance( self._manager.admin_context, instance_name, network_info) except exception.ServiceInstanceException as e: expected = dict(server_details=dict( subnet_id=network_data['subnet_id'], router_id=network_data['router_id'])) if instance_id_included: expected['server_details']['instance_id'] = 'fakeid' self.assertEqual(expected, e.detail_data) else: raise exception.ManilaException('Expected error was not raised.') self._manager.network_helper.setup_network.assert_called_once_with( network_info) self._manager._get_service_image.assert_called_once_with( self._manager.admin_context) self._manager._get_key.assert_called_once_with( self._manager.admin_context) self._manager.compute_api.server_create.assert_called_once_with( self._manager.admin_context, name=instance_name, image=service_image_id, flavor='100', key_name=key_data[0], nics=network_data['nics'], availability_zone=service_instance.CONF.storage_availability_zone) def test___create_service_instance_limit_ssh_no_service_subnet(self): self.mock_object(service_instance, 'NeutronNetworkHelper', mock.Mock(side_effect=FakeNetworkHelper)) config_data = dict(DEFAULT=dict( driver_handles_share_servers=True, service_instance_user='fake_user', limit_ssh_access=True)) with test_utils.create_temp_config_with_opts(config_data): self._manager = service_instance.ServiceInstanceManager() server_create = dict(id='fakeid', status='CREATING', networks=dict()) net_name = self._manager.get_config_option("service_network_name") ip_address = 'fake_ip_address' service_image_id = 'fake_service_image_id' key_data = 'fake_key_name', 'fake_key_path' instance_name = 'fake_instance_name' network_info = dict() network_data = {'nics': ['fake_nic1', 'fake_nic2']} network_data['router'] = dict(id='fake_router_id') server_get = dict( id='fakeid', status='ACTIVE', networks={net_name: [ip_address]}) network_data.update(dict( router_id='fake_router_id', subnet_id='fake_subnet_id', public_port=dict(id='fake_public_port', fixed_ips=[dict(ip_address=ip_address)]), service_port=dict(id='fake_service_port', fixed_ips=[{'ip_address': ip_address}]), admin_port={'id': 'fake_admin_port', 'fixed_ips': [{'ip_address': ip_address}]},) ) self.mock_object(service_instance.time, 'time', mock.Mock(return_value=5)) self.mock_object(self._manager.network_helper, 'setup_network', mock.Mock(return_value=network_data)) self.mock_object(self._manager.network_helper, 'get_network_name', mock.Mock(return_value=net_name)) self.mock_object(self._manager, '_get_service_image', mock.Mock(return_value=service_image_id)) self.mock_object(self._manager, '_get_key', mock.Mock(return_value=key_data)) self.mock_object(self._manager.compute_api, 'server_create', mock.Mock(return_value=server_create)) self.mock_object(self._manager.compute_api, 'server_get', mock.Mock(return_value=server_get)) self.assertRaises(exception.ManilaException, self._manager._create_service_instance, self._manager.admin_context, instance_name, network_info) def test___create_service_instance_failed_to_build(self): server_create = dict(id='fakeid', status='CREATING', networks=dict()) service_image_id = 'fake_service_image_id' key_data = 'fake_key_name', 'fake_key_path' instance_name = 'fake_instance_name' network_info = dict() network_data = dict( nics=['fake_nic1', 'fake_nic2'], router_id='fake_router_id', subnet_id='fake_subnet_id') self.mock_object(self._manager.network_helper, 'setup_network', mock.Mock(return_value=network_data)) self.mock_object(self._manager, '_get_service_image', mock.Mock(return_value=service_image_id)) self.mock_object(self._manager, '_get_key', mock.Mock(return_value=key_data)) self.mock_object(self._manager.compute_api, 'server_create', mock.Mock(return_value=server_create)) self.mock_object( self._manager, 'wait_for_instance_to_be_active', mock.Mock(side_effect=exception.ServiceInstanceException)) try: self._manager._create_service_instance( self._manager.admin_context, instance_name, network_info) except exception.ServiceInstanceException as e: self.assertEqual( dict(server_details=dict(subnet_id=network_data['subnet_id'], router_id=network_data['router_id'], instance_id=server_create['id'])), e.detail_data) else: raise exception.ManilaException('Expected error was not raised.') self._manager.network_helper.setup_network.assert_called_once_with( network_info) self._manager._get_service_image.assert_called_once_with( self._manager.admin_context) self._manager._get_key.assert_called_once_with( self._manager.admin_context) self._manager.compute_api.server_create.assert_called_once_with( self._manager.admin_context, name=instance_name, image=service_image_id, flavor='100', key_name=key_data[0], nics=network_data['nics'], availability_zone=service_instance.CONF.storage_availability_zone) @ddt.data( dict(name=None, path=None), dict(name=None, path='/tmp')) @ddt.unpack def test__create_service_instance_no_key_and_no_path(self, name, path): key_data = name, path self.mock_object(self._manager, '_get_service_image') self.mock_object(self._manager, '_get_key', mock.Mock(return_value=key_data)) self.assertRaises( exception.ServiceInstanceException, self._manager._create_service_instance, self._manager.admin_context, 'fake_instance_name', dict()) self._manager._get_service_image.assert_called_once_with( self._manager.admin_context) self._manager._get_key.assert_called_once_with( self._manager.admin_context) @mock.patch('time.sleep') @mock.patch('time.time') def _test_wait_for_instance(self, mock_time, mock_sleep, server_get_side_eff=None, expected_try_count=1, expected_sleep_count=0, expected_ret_val=None, expected_exc=None): mock_server_get = mock.Mock(side_effect=server_get_side_eff) self.mock_object(self._manager.compute_api, 'server_get', mock_server_get) self.fake_time = 0 def fake_time(): return self.fake_time def fake_sleep(sleep_time): self.fake_time += sleep_time # Note(lpetrut): LOG methods can call time.time mock_time.side_effect = fake_time mock_sleep.side_effect = fake_sleep timeout = 3 if expected_exc: self.assertRaises( expected_exc, self._manager.wait_for_instance_to_be_active, instance_id=mock.sentinel.instance_id, timeout=timeout) else: instance = self._manager.wait_for_instance_to_be_active( instance_id=mock.sentinel.instance_id, timeout=timeout) self.assertEqual(expected_ret_val, instance) mock_server_get.assert_has_calls( [mock.call(self._manager.admin_context, mock.sentinel.instance_id)] * expected_try_count) mock_sleep.assert_has_calls([mock.call(1)] * expected_sleep_count) def test_wait_for_instance_timeout(self): server_get_side_eff = [ exception.InstanceNotFound( instance_id=mock.sentinel.instance_id), {'status': 'BUILDING'}, {'status': 'ACTIVE'}] # Note that in this case, although the status is active, the # 'networks' field is missing. self._test_wait_for_instance( # pylint: disable=no-value-for-parameter server_get_side_eff=server_get_side_eff, expected_exc=exception.ServiceInstanceException, expected_try_count=3, expected_sleep_count=3) def test_wait_for_instance_error_state(self): mock_instance = {'status': 'ERROR'} self._test_wait_for_instance( # pylint: disable=no-value-for-parameter server_get_side_eff=[mock_instance], expected_exc=exception.ServiceInstanceException, expected_try_count=1) def test_wait_for_instance_available(self): mock_instance = {'status': 'ACTIVE', 'networks': mock.sentinel.networks} self._test_wait_for_instance( # pylint: disable=no-value-for-parameter server_get_side_eff=[mock_instance], expected_try_count=1, expected_ret_val=mock_instance) def test_reboot_server(self): fake_server = {'instance_id': mock.sentinel.instance_id} soft_reboot = True mock_reboot = mock.Mock() self.mock_object(self._manager.compute_api, 'server_reboot', mock_reboot) self._manager.reboot_server(fake_server, soft_reboot) mock_reboot.assert_called_once_with(self._manager.admin_context, fake_server['instance_id'], soft_reboot) class BaseNetworkHelperTestCase(test.TestCase): """Tests Base network helper for service instance.""" def test_instantiate_valid(self): class FakeNetworkHelper(service_instance.BaseNetworkhelper): @property def NAME(self): return 'fake_NAME' def __init__(self, service_instance_manager): self.fake_init = 'fake_init_value' def get_network_name(self, network_info): return 'fake_network_name' def setup_connectivity_with_service_instances(self): return 'fake_setup_connectivity_with_service_instances' def setup_network(self, network_info): return 'fake_setup_network' def teardown_network(self, server_details): return 'fake_teardown_network' instance = FakeNetworkHelper('fake') attrs = [ 'fake_init', 'NAME', 'get_network_name', 'teardown_network', 'setup_connectivity_with_service_instances', 'setup_network', ] for attr in attrs: self.assertTrue(hasattr(instance, attr)) self.assertEqual('fake_init_value', instance.fake_init) self.assertEqual('fake_NAME', instance.NAME) self.assertEqual( 'fake_network_name', instance.get_network_name('fake')) self.assertEqual( 'fake_setup_connectivity_with_service_instances', instance.setup_connectivity_with_service_instances()) self.assertEqual('fake_setup_network', instance.setup_network('fake')) self.assertEqual( 'fake_teardown_network', instance.teardown_network('fake')) def test_instantiate_invalid(self): self.assertRaises( TypeError, service_instance.BaseNetworkhelper, 'fake') @ddt.ddt class NeutronNetworkHelperTestCase(test.TestCase): """Tests Neutron network helper for service instance.""" def setUp(self): super(NeutronNetworkHelperTestCase, self).setUp() self.mock_object(importutils, 'import_class') self.fake_manager = FakeServiceInstance() def _init_neutron_network_plugin(self): self.mock_object( service_instance.NeutronNetworkHelper, '_get_service_network_id', mock.Mock(return_value='fake_service_network_id')) return service_instance.NeutronNetworkHelper(self.fake_manager) def test_init_neutron_network_plugin(self): instance = self._init_neutron_network_plugin() self.assertEqual(service_instance.NEUTRON_NAME, instance.NAME) attrs = [ 'neutron_api', 'vif_driver', 'service_network_id', 'connect_share_server_to_tenant_network', 'get_config_option'] for attr in attrs: self.assertTrue(hasattr(instance, attr), "No attr '%s'" % attr) (service_instance.NeutronNetworkHelper._get_service_network_id. assert_called_once_with()) self.assertEqual('DEFAULT', instance.neutron_api.config_group_name) def test_init_neutron_network_plugin_with_driver_config_group(self): self.fake_manager.driver_config = mock.Mock() self.fake_manager.driver_config.config_group = ( 'fake_config_group') self.fake_manager.driver_config.network_config_group = None instance = self._init_neutron_network_plugin() self.assertEqual('fake_config_group', instance.neutron_api.config_group_name) def test_init_neutron_network_plugin_with_network_config_group(self): self.fake_manager.driver_config = mock.Mock() self.fake_manager.driver_config.config_group = ( "fake_config_group") self.fake_manager.driver_config.network_config_group = ( "fake_network_config_group") instance = self._init_neutron_network_plugin() self.assertEqual('fake_network_config_group', instance.neutron_api.config_group_name) def test_admin_project_id(self): instance = self._init_neutron_network_plugin() admin_project_id = 'fake_admin_project_id' self.mock_class('manila.network.neutron.api.API', mock.Mock()) instance.neutron_api.admin_project_id = admin_project_id self.assertEqual(admin_project_id, instance.admin_project_id) def test_get_network_name(self): network_info = dict(neutron_net_id='fake_neutron_net_id') network = dict(name='fake_network_name') instance = self._init_neutron_network_plugin() self.mock_object( instance.neutron_api, 'get_network', mock.Mock(return_value=network)) result = instance.get_network_name(network_info) self.assertEqual(network['name'], result) instance.neutron_api.get_network.assert_called_once_with( network_info['neutron_net_id']) def test_get_service_network_id_none_exist(self): service_network_name = fake_get_config_option('service_network_name') network = dict(id='fake_network_id') admin_project_id = 'fake_admin_project_id' self.mock_object( service_instance.neutron.API, 'get_all_admin_project_networks', mock.Mock(return_value=[])) self.mock_object( service_instance.neutron.API, 'admin_project_id', mock.Mock(return_value=admin_project_id)) self.mock_object( service_instance.neutron.API, 'network_create', mock.Mock(return_value=network)) instance = service_instance.NeutronNetworkHelper(self.fake_manager) result = instance._get_service_network_id() self.assertEqual(network['id'], result) self.assertTrue(service_instance.neutron.API. get_all_admin_project_networks.called) service_instance.neutron.API.network_create.assert_has_calls([ mock.call(instance.admin_project_id, service_network_name)]) def test_get_service_network_id_one_exist(self): service_network_name = fake_get_config_option('service_network_name') network = dict(id='fake_network_id', name=service_network_name) admin_project_id = 'fake_admin_project_id' self.mock_object( service_instance.neutron.API, 'get_all_admin_project_networks', mock.Mock(return_value=[network])) self.mock_object( service_instance.neutron.API, 'admin_project_id', mock.Mock(return_value=admin_project_id)) instance = service_instance.NeutronNetworkHelper(self.fake_manager) result = instance._get_service_network_id() self.assertEqual(network['id'], result) self.assertTrue(service_instance.neutron.API. get_all_admin_project_networks.called) def test_get_service_network_id_two_exist(self): service_network_name = fake_get_config_option('service_network_name') network = dict(id='fake_network_id', name=service_network_name) self.mock_object( service_instance.neutron.API, 'get_all_admin_project_networks', mock.Mock(return_value=[network, network])) helper = service_instance.NeutronNetworkHelper(self.fake_manager) self.assertRaises(exception.ManilaException, lambda: helper.service_network_id) (service_instance.neutron.API.get_all_admin_project_networks. assert_has_calls([mock.call()])) @ddt.data(dict(), dict(subnet_id='foo'), dict(router_id='bar')) def test_teardown_network_no_service_data(self, server_details): fake_ports = [ {'device_id': 'fake_device_id', 'device_owner': 'compute:foo'}, ] self.mock_object( service_instance.neutron.API, 'update_subnet') self.mock_object( service_instance.neutron.API, 'list_ports', mock.Mock(return_value=fake_ports)) instance = self._init_neutron_network_plugin() self.mock_object( service_instance.neutron.API, 'router_remove_interface') instance.teardown_network(server_details) self.assertFalse( service_instance.neutron.API.router_remove_interface.called) @ddt.data( *[dict(server_details=sd, fail=f) for f in (True, False) for sd in (dict(service_port_id='fake_service_port_id'), dict(public_port_id='fake_public_port_id'), dict(service_port_id='fake_service_port_id', public_port_id='fake_public_port_id'))] ) @ddt.unpack def test_teardown_network_with_ports(self, server_details, fail): instance = self._init_neutron_network_plugin() self.mock_object( service_instance.neutron.API, 'router_remove_interface') if fail: delete_port_mock = mock.Mock( side_effect=exception.NetworkException(code=404)) else: delete_port_mock = mock.Mock() self.mock_object(instance.neutron_api, 'delete_port', delete_port_mock) self.mock_object(service_instance.LOG, 'debug') instance.teardown_network(server_details) self.assertFalse(instance.neutron_api.router_remove_interface.called) self.assertEqual( len(server_details), len(instance.neutron_api.delete_port.mock_calls)) for k, v in server_details.items(): self.assertIn( mock.call(v), instance.neutron_api.delete_port.mock_calls) if fail: service_instance.LOG.debug.assert_has_calls([ mock.call(mock.ANY, mock.ANY) for sd in server_details ]) else: service_instance.LOG.debug.assert_has_calls([]) @ddt.data( dict(service_port_id='fake_service_port_id'), dict(public_port_id='fake_public_port_id'), dict(service_port_id='fake_service_port_id', public_port_id='fake_public_port_id'), ) def test_teardown_network_with_ports_unhandled_exception(self, server_details): instance = self._init_neutron_network_plugin() self.mock_object( service_instance.neutron.API, 'router_remove_interface') delete_port_mock = mock.Mock( side_effect=exception.NetworkException(code=500)) self.mock_object( service_instance.neutron.API, 'delete_port', delete_port_mock) self.mock_object(service_instance.LOG, 'debug') self.assertRaises( exception.NetworkException, instance.teardown_network, server_details, ) self.assertFalse( service_instance.neutron.API.router_remove_interface.called) service_instance.neutron.API.delete_port.assert_called_once_with( mock.ANY) service_instance.LOG.debug.assert_has_calls([]) def test_teardown_network_with_wrong_ports(self): instance = self._init_neutron_network_plugin() self.mock_object( service_instance.neutron.API, 'router_remove_interface') self.mock_object( service_instance.neutron.API, 'delete_port') self.mock_object(service_instance.LOG, 'debug') instance.teardown_network(dict(foo_id='fake_service_port_id')) service_instance.neutron.API.router_remove_interface.assert_has_calls( []) service_instance.neutron.API.delete_port.assert_has_calls([]) service_instance.LOG.debug.assert_has_calls([]) def test_teardown_network_subnet_is_used(self): server_details = dict(subnet_id='foo', router_id='bar') fake_ports = [ {'device_id': 'fake_device_id', 'device_owner': 'compute:foo'}, ] instance = self._init_neutron_network_plugin() self.mock_object( service_instance.neutron.API, 'router_remove_interface') self.mock_object( service_instance.neutron.API, 'update_subnet') self.mock_object( service_instance.neutron.API, 'list_ports', mock.Mock(return_value=fake_ports)) instance.teardown_network(server_details) self.assertFalse( service_instance.neutron.API.router_remove_interface.called) self.assertFalse(service_instance.neutron.API.update_subnet.called) service_instance.neutron.API.list_ports.assert_called_once_with( fields=['device_id', 'device_owner'], fixed_ips=['subnet_id=foo']) def test_teardown_network_subnet_not_used(self): server_details = dict(subnet_id='foo', router_id='bar') fake_ports = [ {'device_id': 'fake_device_id', 'device_owner': 'network:router_interface'}, {'device_id': 'fake_device_id', 'device_owner': 'compute'}, {'device_id': '', 'device_owner': 'compute'}, ] instance = self._init_neutron_network_plugin() self.mock_object( service_instance.neutron.API, 'router_remove_interface') self.mock_object( service_instance.neutron.API, 'update_subnet') self.mock_object( service_instance.neutron.API, 'list_ports', mock.Mock(return_value=fake_ports)) instance.teardown_network(server_details) (service_instance.neutron.API.router_remove_interface. assert_called_once_with('bar', 'foo')) (service_instance.neutron.API.update_subnet. assert_called_once_with('foo', '')) service_instance.neutron.API.list_ports.assert_called_once_with( fields=['device_id', 'device_owner'], fixed_ips=['subnet_id=foo']) def test_teardown_network_subnet_not_used_with_no_router_id(self): server_details = dict(subnet_id='foo') fake_ports = [ {'device_id': 'fake_device_id', 'device_owner': 'compute'}, {'device_id': '', 'device_owner': 'compute'}, ] instance = self._init_neutron_network_plugin() self.mock_object( service_instance.neutron.API, 'router_remove_interface') self.mock_object( service_instance.neutron.API, 'update_subnet') self.mock_object( service_instance.neutron.API, 'list_ports', mock.Mock(return_value=fake_ports)) instance.teardown_network(server_details) self.assertFalse( service_instance.neutron.API.router_remove_interface.called) (service_instance.neutron.API.update_subnet. assert_called_once_with('foo', '')) service_instance.neutron.API.list_ports.assert_called_once_with( fields=['device_id', 'device_owner'], fixed_ips=['subnet_id=foo']) def test_teardown_network_subnet_not_used_and_get_error_404(self): server_details = dict(subnet_id='foo', router_id='bar') fake_ports = [ {'device_id': 'fake_device_id', 'device_owner': 'fake'}, ] instance = self._init_neutron_network_plugin() self.mock_object( service_instance.neutron.API, 'router_remove_interface', mock.Mock(side_effect=exception.NetworkException(code=404))) self.mock_object( service_instance.neutron.API, 'update_subnet') self.mock_object( service_instance.neutron.API, 'list_ports', mock.Mock(return_value=fake_ports)) instance.teardown_network(server_details) (service_instance.neutron.API.router_remove_interface. assert_called_once_with('bar', 'foo')) (service_instance.neutron.API.update_subnet. assert_called_once_with('foo', '')) service_instance.neutron.API.list_ports.assert_called_once_with( fields=['device_id', 'device_owner'], fixed_ips=['subnet_id=foo']) def test_teardown_network_subnet_not_used_get_unhandled_error(self): server_details = dict(subnet_id='foo', router_id='bar') fake_ports = [ {'device_id': 'fake_device_id', 'device_owner': 'fake'}, ] instance = self._init_neutron_network_plugin() self.mock_object( service_instance.neutron.API, 'router_remove_interface', mock.Mock(side_effect=exception.NetworkException(code=500))) self.mock_object( service_instance.neutron.API, 'update_subnet') self.mock_object( service_instance.neutron.API, 'list_ports', mock.Mock(return_value=fake_ports)) self.assertRaises( exception.NetworkException, instance.teardown_network, server_details) (service_instance.neutron.API.router_remove_interface. assert_called_once_with('bar', 'foo')) self.assertFalse(service_instance.neutron.API.update_subnet.called) service_instance.neutron.API.list_ports.assert_called_once_with( fields=['device_id', 'device_owner'], fixed_ips=['subnet_id=foo']) def test_setup_network_and_connect_share_server_to_tenant_net(self): def fake_create_port(*aargs, **kwargs): if aargs[1] == 'fake_service_network_id': return self.service_port elif aargs[1] == 'fake_tenant_network_id': return self.public_port else: raise exception.ManilaException('Got unexpected data') admin_project_id = 'fake_admin_project_id' network_info = dict( neutron_net_id='fake_tenant_network_id', neutron_subnet_id='fake_tenant_subnet_id') cidr = '13.0.0.0/24' self.service_port = dict( id='fake_service_port_id', fixed_ips=[dict(ip_address='fake_service_port_ip_address')]) self.public_port = dict( id='fake_tenant_port_id', fixed_ips=[dict(ip_address='fake_public_port_ip_address')]) service_subnet = dict(id='fake_service_subnet') instance = self._init_neutron_network_plugin() instance.connect_share_server_to_tenant_network = True self.mock_object(instance, '_get_service_network_id', mock.Mock(return_value='fake_service_network_id')) self.mock_object( service_instance.neutron.API, 'admin_project_id', mock.Mock(return_value=admin_project_id)) self.mock_object( service_instance.neutron.API, 'create_port', mock.Mock(side_effect=fake_create_port)) self.mock_object( service_instance.neutron.API, 'subnet_create', mock.Mock(return_value=service_subnet)) self.mock_object( instance, 'setup_connectivity_with_service_instances', mock.Mock(return_value=service_subnet)) self.mock_object( instance, '_get_cidr_for_subnet', mock.Mock(return_value=cidr)) self.mock_object( instance, '_get_service_subnet', mock.Mock(return_value=None)) expected = { 'ip_address': self.public_port['fixed_ips'][0]['ip_address'], 'public_port': self.public_port, 'service_port': self.service_port, 'service_subnet': service_subnet, 'ports': [self.public_port, self.service_port], 'nics': [{'port-id': self.public_port['id']}, {'port-id': self.service_port['id']}]} result = instance.setup_network(network_info) self.assertEqual(expected, result) (instance.setup_connectivity_with_service_instances. assert_called_once_with()) instance._get_service_subnet.assert_called_once_with(mock.ANY) instance._get_cidr_for_subnet.assert_called_once_with() self.assertTrue(service_instance.neutron.API.subnet_create.called) self.assertTrue(service_instance.neutron.API.create_port.called) def test_setup_network_and_connect_share_server_to_tenant_net_admin(self): def fake_create_port(*aargs, **kwargs): if aargs[1] == 'fake_admin_network_id': return self.admin_port elif aargs[1] == 'fake_tenant_network_id': return self.public_port else: raise exception.ManilaException('Got unexpected data') admin_project_id = 'fake_admin_project_id' network_info = { 'neutron_net_id': 'fake_tenant_network_id', 'neutron_subnet_id': 'fake_tenant_subnet_id'} self.admin_port = { 'id': 'fake_admin_port_id', 'fixed_ips': [{'ip_address': 'fake_admin_port_ip_address'}]} self.public_port = { 'id': 'fake_tenant_port_id', 'fixed_ips': [{'ip_address': 'fake_public_port_ip_address'}]} instance = self._init_neutron_network_plugin() instance.use_admin_port = True instance.use_service_network = False instance.admin_network_id = 'fake_admin_network_id' instance.admin_subnet_id = 'fake_admin_subnet_id' instance.connect_share_server_to_tenant_network = True self.mock_object( service_instance.neutron.API, 'admin_project_id', mock.Mock(return_value=admin_project_id)) self.mock_object( service_instance.neutron.API, 'create_port', mock.Mock(side_effect=fake_create_port)) self.mock_object( instance, 'setup_connectivity_with_service_instances') expected = { 'ip_address': self.public_port['fixed_ips'][0]['ip_address'], 'public_port': self.public_port, 'admin_port': self.admin_port, 'ports': [self.public_port, self.admin_port], 'nics': [{'port-id': self.public_port['id']}, {'port-id': self.admin_port['id']}]} result = instance.setup_network(network_info) self.assertEqual(expected, result) (instance.setup_connectivity_with_service_instances. assert_called_once_with()) self.assertTrue(service_instance.neutron.API.create_port.called) @ddt.data(None, exception.NetworkException(code=400)) def test_setup_network_using_router_success(self, return_obj): admin_project_id = 'fake_admin_project_id' network_info = dict( neutron_net_id='fake_tenant_network_id', neutron_subnet_id='fake_tenant_subnet_id') cidr = '13.0.0.0/24' self.admin_port = { 'id': 'fake_admin_port_id', 'fixed_ips': [{'ip_address': 'fake_admin_port_ip_address'}]} self.service_port = dict( id='fake_service_port_id', fixed_ips=[dict(ip_address='fake_service_port_ip_address')]) service_subnet = dict(id='fake_service_subnet') instance = self._init_neutron_network_plugin() instance.use_admin_port = True instance.admin_network_id = 'fake_admin_network_id' instance.admin_subnet_id = 'fake_admin_subnet_id' instance.connect_share_server_to_tenant_network = False self.mock_object(instance, '_get_service_network_id', mock.Mock(return_value='fake_service_network_id')) router = dict(id='fake_router_id') self.mock_object( service_instance.neutron.API, 'admin_project_id', mock.Mock(return_value=admin_project_id)) self.mock_object( service_instance.neutron.API, 'create_port', mock.Mock(side_effect=[self.service_port, self.admin_port])) self.mock_object( service_instance.neutron.API, 'subnet_create', mock.Mock(return_value=service_subnet)) self.mock_object( instance, '_get_private_router', mock.Mock(return_value=router)) self.mock_object( service_instance.neutron.API, 'router_add_interface', mock.Mock(side_effect=return_obj)) self.mock_object(instance, 'setup_connectivity_with_service_instances') self.mock_object( instance, '_get_cidr_for_subnet', mock.Mock(return_value=cidr)) self.mock_object( instance, '_get_service_subnet', mock.Mock(return_value=None)) expected = { 'ip_address': self.service_port['fixed_ips'][0]['ip_address'], 'service_port': self.service_port, 'service_subnet': service_subnet, 'admin_port': self.admin_port, 'router': router, 'ports': [self.service_port, self.admin_port], 'nics': [{'port-id': self.service_port['id']}, {'port-id': self.admin_port['id']}]} result = instance.setup_network(network_info) self.assertEqual(expected, result) (instance.setup_connectivity_with_service_instances. assert_called_once_with()) instance._get_service_subnet.assert_called_once_with(mock.ANY) instance._get_cidr_for_subnet.assert_called_once_with() self.assertTrue(service_instance.neutron.API.subnet_create.called) self.assertTrue(service_instance.neutron.API.create_port.called) instance._get_private_router.assert_called_once_with( network_info['neutron_net_id'], network_info['neutron_subnet_id']) (service_instance.neutron.API.router_add_interface. assert_called_once_with(router['id'], service_subnet['id'])) def test_setup_network_using_router_addon_of_interface_failed(self): network_info = dict( neutron_net_id='fake_tenant_network_id', neutron_subnet_id='fake_tenant_subnet_id') service_subnet = dict(id='fake_service_subnet') instance = self._init_neutron_network_plugin() instance.connect_share_server_to_tenant_network = False self.mock_object(instance, '_get_service_network_id', mock.Mock(return_value='fake_service_network_id')) router = dict(id='fake_router_id') self.mock_object( instance, '_get_private_router', mock.Mock(return_value=router)) self.mock_object( service_instance.neutron.API, 'router_add_interface', mock.Mock(side_effect=exception.NetworkException(code=500))) self.mock_object( instance, '_get_service_subnet', mock.Mock(return_value=service_subnet)) self.assertRaises( exception.NetworkException, instance.setup_network, network_info) instance._get_service_subnet.assert_called_once_with(mock.ANY) instance._get_private_router.assert_called_once_with( network_info['neutron_net_id'], network_info['neutron_subnet_id']) (service_instance.neutron.API.router_add_interface. assert_called_once_with(router['id'], service_subnet['id'])) def test_setup_network_using_router_connectivity_verification_fail(self): admin_project_id = 'fake_admin_project_id' network_info = dict( neutron_net_id='fake_tenant_network_id', neutron_subnet_id='fake_tenant_subnet_id') cidr = '13.0.0.0/24' self.service_port = dict( id='fake_service_port_id', fixed_ips=[dict(ip_address='fake_service_port_ip_address')]) service_subnet = dict(id='fake_service_subnet') instance = self._init_neutron_network_plugin() instance.connect_share_server_to_tenant_network = False self.mock_object(instance, '_get_service_network_id', mock.Mock(return_value='fake_service_network_id')) router = dict(id='fake_router_id') self.mock_object( service_instance.neutron.API, 'admin_project_id', mock.Mock(return_value=admin_project_id)) self.mock_object( service_instance.neutron.API, 'create_port', mock.Mock(return_value=self.service_port)) self.mock_object( service_instance.neutron.API, 'subnet_create', mock.Mock(return_value=service_subnet)) self.mock_object(service_instance.neutron.API, 'delete_port') self.mock_object( instance, '_get_private_router', mock.Mock(return_value=router)) self.mock_object( service_instance.neutron.API, 'router_add_interface') self.mock_object( instance, 'setup_connectivity_with_service_instances', mock.Mock(side_effect=exception.ManilaException('Fake'))) self.mock_object( instance, '_get_cidr_for_subnet', mock.Mock(return_value=cidr)) self.mock_object( instance, '_get_service_subnet', mock.Mock(return_value=None)) self.assertRaises( exception.ManilaException, instance.setup_network, network_info) (instance.setup_connectivity_with_service_instances. assert_called_once_with()) instance._get_service_subnet.assert_called_once_with(mock.ANY) instance._get_cidr_for_subnet.assert_called_once_with() self.assertTrue(service_instance.neutron.API.subnet_create.called) self.assertTrue(service_instance.neutron.API.create_port.called) instance._get_private_router.assert_called_once_with( network_info['neutron_net_id'], network_info['neutron_subnet_id']) (service_instance.neutron.API.router_add_interface. assert_called_once_with(router['id'], service_subnet['id'])) service_instance.neutron.API.delete_port.assert_has_calls([ mock.call(self.service_port['id'])]) def test__get_cidr_for_subnet_success(self): expected = ( fake_get_config_option('service_network_cidr').split('/')[0] + '/' + str( fake_get_config_option('service_network_division_mask'))) instance = self._init_neutron_network_plugin() self.mock_object( instance, '_get_all_service_subnets', mock.Mock(return_value=[])) result = instance._get_cidr_for_subnet() self.assertEqual(expected, result) instance._get_all_service_subnets.assert_called_once_with() def test__get_cidr_for_subnet_failure(self): subnets = [] serv_cidr = netaddr.IPNetwork( fake_get_config_option('service_network_cidr')) division_mask = fake_get_config_option('service_network_division_mask') for subnet in serv_cidr.subnet(division_mask): subnets.append(dict(cidr=str(subnet.cidr))) instance = self._init_neutron_network_plugin() self.mock_object( instance, '_get_all_service_subnets', mock.Mock(return_value=subnets)) self.assertRaises( exception.ServiceInstanceException, instance._get_cidr_for_subnet) instance._get_all_service_subnets.assert_called_once_with() def test_setup_connectivity_with_service_instances(self): instance = self._init_neutron_network_plugin() instance.use_admin_port = True instance.admin_network_id = 'fake_admin_network_id' instance.admin_subnet_id = 'fake_admin_subnet_id' interface_name_service = 'fake_interface_name_service' interface_name_admin = 'fake_interface_name_admin' fake_division_mask = fake_get_config_option( 'service_network_division_mask') fake_subnet_service = fake_network.FakeSubnet( cidr='10.254.0.0/%s' % fake_division_mask) fake_subnet_admin = fake_network.FakeSubnet(id='fake_admin_subnet_id', cidr='10.0.0.0/24') fake_service_port = fake_network.FakePort(fixed_ips=[ {'subnet_id': fake_subnet_service['id'], 'ip_address': '10.254.0.2'}], mac_address='fake_mac_address') fake_admin_port = fake_network.FakePort(fixed_ips=[ {'subnet_id': fake_subnet_admin['id'], 'ip_address': '10.0.0.4'}], mac_address='fake_mac_address') self.mock_object(instance, '_get_service_port', mock.Mock(side_effect=[fake_service_port, fake_admin_port])) self.mock_object(instance, '_add_fixed_ips_to_service_port', mock.Mock(return_value=fake_service_port)) self.mock_object(instance.vif_driver, 'get_device_name', mock.Mock(side_effect=[interface_name_service, interface_name_admin])) self.mock_object(instance.neutron_api, 'get_subnet', mock.Mock(side_effect=[fake_subnet_service, fake_subnet_admin, fake_subnet_admin])) self.mock_object(instance.vif_driver, 'plug') device_mock = mock.Mock() self.mock_object(service_instance.ip_lib, 'IPDevice', mock.Mock(return_value=device_mock)) instance.setup_connectivity_with_service_instances() instance._get_service_port.assert_has_calls([ mock.call(instance.service_network_id, None, 'manila-share'), mock.call('fake_admin_network_id', 'fake_admin_subnet_id', 'manila-admin-share')]) instance.vif_driver.get_device_name.assert_has_calls([ mock.call(fake_service_port), mock.call(fake_admin_port)]) instance.vif_driver.plug.assert_has_calls([ mock.call(interface_name_service, fake_service_port['id'], fake_service_port['mac_address']), mock.call(interface_name_admin, fake_admin_port['id'], fake_admin_port['mac_address'])]) instance.neutron_api.get_subnet.assert_has_calls([ mock.call(fake_subnet_service['id']), mock.call(fake_subnet_admin['id'])]) instance.vif_driver.init_l3.assert_has_calls([ mock.call(interface_name_service, ['10.254.0.2/%s' % fake_division_mask], clear_cidrs=[]), mock.call(interface_name_admin, ['10.0.0.4/24'], clear_cidrs=[fake_subnet_admin['cidr']])]) service_instance.ip_lib.IPDevice.assert_has_calls([ mock.call(interface_name_service), mock.call(interface_name_admin)]) def test__get_service_port_none_exist(self): instance = self._init_neutron_network_plugin() admin_project_id = 'fake_admin_project_id' fake_port_values = {'device_id': 'manila-share', 'binding:host_id': 'fake_service_network_host'} self.mock_object( service_instance.neutron.API, 'admin_project_id', mock.Mock(return_value=admin_project_id)) fake_service_port = fake_network.FakePort(device_id='manila-share') self.mock_object(instance.neutron_api, 'list_ports', mock.Mock(return_value=[])) self.flags(host='fake-host') self.mock_object(instance.neutron_api, 'create_port', mock.Mock(return_value=fake_service_port)) self.mock_object(instance.neutron_api, 'update_port_fixed_ips', mock.Mock(return_value=fake_service_port)) result = instance._get_service_port(instance.service_network_id, None, 'manila-share') instance.neutron_api.list_ports.assert_called_once_with( **fake_port_values) instance.neutron_api.create_port.assert_called_once_with( instance.admin_project_id, instance.service_network_id, device_id='manila-share', device_owner='manila:share', host_id='fake_service_network_host', subnet_id=None, port_security_enabled=False) self.assertFalse(instance.neutron_api.update_port_fixed_ips.called) self.assertEqual(fake_service_port, result) def test__get_service_port_one_exist_on_same_host(self): instance = self._init_neutron_network_plugin() fake_port_values = {'device_id': 'manila-share', 'binding:host_id': 'fake_service_network_host'} fake_service_port = fake_network.FakePort(**fake_port_values) self.flags(host='fake-host') self.mock_object(instance.neutron_api, 'list_ports', mock.Mock(return_value=[fake_service_port])) self.mock_object(instance.neutron_api, 'create_port', mock.Mock(return_value=fake_service_port)) self.mock_object(instance.neutron_api, 'update_port_fixed_ips', mock.Mock(return_value=fake_service_port)) result = instance._get_service_port(instance.service_network_id, None, 'manila-share') instance.neutron_api.list_ports.assert_called_once_with( **fake_port_values) self.assertFalse(instance.neutron_api.create_port.called) self.assertFalse(instance.neutron_api.update_port_fixed_ips.called) self.assertEqual(fake_service_port, result) def test__get_service_port_default_host(self): self.mock_object(self.fake_manager, 'get_config_option', mock.Mock(return_value=None)) instance = self._init_neutron_network_plugin() admin_project_id = 'fake_admin_project_id' fake_port = {'device_id': 'manila-share', 'binding:host_id': 'fake-host'} self.mock_object( service_instance.neutron.API, 'admin_project_id', mock.Mock(return_value=admin_project_id)) fake_service_port = fake_network.FakePort(**fake_port) self.mock_object(instance.neutron_api, 'list_ports', mock.Mock(return_value=[])) self.flags(host='fake-host') self.mock_object(instance.neutron_api, 'create_port', mock.Mock(return_value=fake_service_port)) self.mock_object(instance.neutron_api, 'update_port_fixed_ips', mock.Mock(return_value=fake_service_port)) result = instance._get_service_port(instance.service_network_id, None, 'manila-share') instance.neutron_api.create_port.assert_called_once_with( instance.admin_project_id, instance.service_network_id, device_id='manila-share', device_owner='manila:share', host_id='fake-host', subnet_id=None, port_security_enabled=False) self.assertEqual(fake_service_port, result) def test__get_service_port_one_exist_on_different_host(self): instance = self._init_neutron_network_plugin() admin_project_id = 'fake_admin_project_id' fake_port = {'device_id': 'manila-share', 'binding:host_id': 'fake_service_network_host'} self.mock_object( service_instance.neutron.API, 'admin_project_id', mock.Mock(return_value=admin_project_id)) fake_service_port = fake_network.FakePort(**fake_port) self.mock_object(instance.neutron_api, 'list_ports', mock.Mock(return_value=[])) self.flags(host='fake-host') self.mock_object(instance.neutron_api, 'create_port', mock.Mock(return_value=fake_service_port)) self.mock_object(instance.neutron_api, 'update_port_fixed_ips', mock.Mock(return_value=fake_service_port)) result = instance._get_service_port(instance.service_network_id, None, 'manila-share') instance.neutron_api.list_ports.assert_called_once_with( **fake_port) instance.neutron_api.create_port.assert_called_once_with( instance.admin_project_id, instance.service_network_id, device_id='manila-share', device_owner='manila:share', host_id='fake_service_network_host', subnet_id=None, port_security_enabled=False) self.assertFalse(instance.neutron_api.update_port_fixed_ips.called) self.assertEqual(fake_service_port, result) def test__get_service_port_two_exist_on_same_host(self): instance = self._init_neutron_network_plugin() fake_service_port = fake_network.FakePort(**{ 'device_id': 'manila-share', 'binding:host_id': 'fake-host'}) self.mock_object( instance.neutron_api, 'list_ports', mock.Mock(return_value=[fake_service_port, fake_service_port])) self.flags(host='fake-host') self.mock_object(instance.neutron_api, 'create_port', mock.Mock(return_value=fake_service_port)) self.assertRaises( exception.ServiceInstanceException, instance._get_service_port, instance.service_network_id, None, 'manila-share') self.assertFalse(instance.neutron_api.create_port.called) def test__add_fixed_ips_to_service_port(self): ip_address1 = '13.0.0.13' subnet_id1 = 'fake_subnet_id1' subnet_id2 = 'fake_subnet_id2' port = dict(id='fooport', fixed_ips=[dict( subnet_id=subnet_id1, ip_address=ip_address1)]) expected = mock.Mock() network = dict(subnets=[subnet_id1, subnet_id2]) instance = self._init_neutron_network_plugin() self.mock_object(instance.neutron_api, 'get_network', mock.Mock(return_value=network)) self.mock_object(instance.neutron_api, 'update_port_fixed_ips', mock.Mock(return_value=expected)) result = instance._add_fixed_ips_to_service_port(port) self.assertEqual(expected, result) instance.neutron_api.get_network.assert_called_once_with( instance.service_network_id) instance.neutron_api.update_port_fixed_ips.assert_called_once_with( port['id'], dict(fixed_ips=[ dict(subnet_id=subnet_id1, ip_address=ip_address1), dict(subnet_id=subnet_id2)])) def test__get_private_router_success(self): instance = self._init_neutron_network_plugin() network = fake_network.FakeNetwork() subnet = fake_network.FakeSubnet(gateway_ip='fake_ip') router = fake_network.FakeRouter(id='fake_router_id') port = fake_network.FakePort(fixed_ips=[ dict(subnet_id=subnet['id'], ip_address=subnet['gateway_ip'])], device_id=router['id']) self.mock_object(instance.neutron_api, 'get_subnet', mock.Mock(return_value=subnet)) self.mock_object(instance.neutron_api, 'list_ports', mock.Mock(return_value=[port])) self.mock_object(instance.neutron_api, 'show_router', mock.Mock(return_value=router)) result = instance._get_private_router(network['id'], subnet['id']) self.assertEqual(router, result) instance.neutron_api.get_subnet.assert_called_once_with(subnet['id']) instance.neutron_api.list_ports.assert_called_once_with( network_id=network['id']) instance.neutron_api.show_router.assert_called_once_with(router['id']) def test__get_private_router_no_gateway(self): instance = self._init_neutron_network_plugin() subnet = fake_network.FakeSubnet(gateway_ip='') self.mock_object(instance.neutron_api, 'get_subnet', mock.Mock(return_value=subnet)) self.assertRaises( exception.ServiceInstanceException, instance._get_private_router, 'fake_network_id', subnet['id']) instance.neutron_api.get_subnet.assert_called_once_with( subnet['id']) def test__get_private_router_subnet_is_not_attached_to_the_router(self): instance = self._init_neutron_network_plugin() network_id = 'fake_network_id' subnet = fake_network.FakeSubnet(gateway_ip='fake_ip') self.mock_object(instance.neutron_api, 'get_subnet', mock.Mock(return_value=subnet)) self.mock_object(instance.neutron_api, 'list_ports', mock.Mock(return_value=[])) self.assertRaises( exception.ServiceInstanceException, instance._get_private_router, network_id, subnet['id']) instance.neutron_api.get_subnet.assert_called_once_with( subnet['id']) instance.neutron_api.list_ports.assert_called_once_with( network_id=network_id) def test__get_service_subnet_none_found(self): subnet_name = 'fake_subnet_name' instance = self._init_neutron_network_plugin() self.mock_object(instance, '_get_all_service_subnets', mock.Mock(return_value=[])) result = instance._get_service_subnet(subnet_name) self.assertIsNone(result) instance._get_all_service_subnets.assert_called_once_with() def test__get_service_subnet_unused_found(self): subnet_name = 'fake_subnet_name' subnets = [fake_network.FakeSubnet(id='foo', name=''), fake_network.FakeSubnet(id='bar', name='quuz')] instance = self._init_neutron_network_plugin() self.mock_object(instance.neutron_api, 'update_subnet') self.mock_object(instance, '_get_all_service_subnets', mock.Mock(return_value=subnets)) result = instance._get_service_subnet(subnet_name) self.assertEqual(subnets[0], result) instance._get_all_service_subnets.assert_called_once_with() instance.neutron_api.update_subnet.assert_called_once_with( subnets[0]['id'], subnet_name) def test__get_service_subnet_one_found(self): subnet_name = 'fake_subnet_name' subnets = [fake_network.FakeSubnet(id='foo', name='quuz'), fake_network.FakeSubnet(id='bar', name=subnet_name)] instance = self._init_neutron_network_plugin() self.mock_object(instance, '_get_all_service_subnets', mock.Mock(return_value=subnets)) result = instance._get_service_subnet(subnet_name) self.assertEqual(subnets[1], result) instance._get_all_service_subnets.assert_called_once_with() def test__get_service_subnet_two_found(self): subnet_name = 'fake_subnet_name' subnets = [fake_network.FakeSubnet(id='foo', name=subnet_name), fake_network.FakeSubnet(id='bar', name=subnet_name)] instance = self._init_neutron_network_plugin() self.mock_object(instance, '_get_all_service_subnets', mock.Mock(return_value=subnets)) self.assertRaises( exception.ServiceInstanceException, instance._get_service_subnet, subnet_name) instance._get_all_service_subnets.assert_called_once_with() def test__get_all_service_subnets(self): subnet_id1 = 'fake_subnet_id1' subnet_id2 = 'fake_subnet_id2' instance = self._init_neutron_network_plugin() network = dict(subnets=[subnet_id1, subnet_id2]) self.mock_object(instance.neutron_api, 'get_subnet', mock.Mock(side_effect=lambda s_id: dict(id=s_id))) self.mock_object(instance.neutron_api, 'get_network', mock.Mock(return_value=network)) result = instance._get_all_service_subnets() self.assertEqual([dict(id=subnet_id1), dict(id=subnet_id2)], result) instance.neutron_api.get_network.assert_called_once_with( instance.service_network_id) instance.neutron_api.get_subnet.assert_has_calls([ mock.call(subnet_id1), mock.call(subnet_id2)]) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315602.0376701 manila-21.0.0/manila/tests/share/drivers/vastdata/0000775000175000017500000000000000000000000022050 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/vastdata/__init__.py0000664000175000017500000000000000000000000024147 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/vastdata/test_driver.py0000664000175000017500000006565700000000000024777 0ustar00zuulzuul00000000000000# Copyright 2024 VAST Data Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import itertools import unittest from unittest import mock import ddt import netaddr import manila.context as manila_context import manila.exception as exception from manila.share import configuration from manila.share.drivers.vastdata import driver from manila.share.drivers.vastdata import driver_util from manila.tests import fake_share from manila.tests.share.drivers.vastdata.test_rest import fake_metrics @mock.patch( "manila.share.drivers.vastdata.rest.Session.refresh_auth_token", mock.MagicMock() ) @ddt.ddt class VASTShareDriverTestCase(unittest.TestCase): def _create_mocked_rest_api(self): # Create a mock RestApi instance mock_rest_api = mock.MagicMock() # Create mock sub resources with their methods subresources = [ "views", "view_policies", "capacity_metrics", "quotas", "vip_pools", "snapshots", "folders", ] methods = [ "list", "create", "update", "delete", "one", "ensure", "vips" ] for subresource in subresources: mock_subresource = mock.MagicMock() setattr(mock_rest_api, subresource, mock_subresource) for method in methods: mock_method = mock.MagicMock() setattr(mock_subresource, method, mock_method) return mock_rest_api @mock.patch( "manila.share.drivers.vastdata.rest.Session.refresh_auth_token" ) def setUp(self, m_auth_token): super().setUp() self.fake_conf = configuration.Configuration(None) self._context = manila_context.get_admin_context() self._snapshot = fake_share.fake_snapshot_instance() self.fake_conf.set_default("driver_handles_share_servers", False) self.fake_conf.set_default("share_backend_name", "vast") self.fake_conf.set_default("vast_mgmt_host", "test") self.fake_conf.set_default("vast_root_export", "/fake") self.fake_conf.set_default("vast_vippool_name", "vippool") self.fake_conf.set_default("vast_mgmt_user", "user") self.fake_conf.set_default("vast_mgmt_password", "password") self._driver = driver.VASTShareDriver( execute=mock.MagicMock(), configuration=self.fake_conf ) self._driver.do_setup(self._context) m_auth_token.assert_called_once() def test_do_setup(self): session = self._driver.rest.session self.assertEqual(self._driver._backend_name, "vast") self.assertEqual(self._driver._vippool_name, "vippool") self.assertEqual(self._driver._root_export, "/fake") self.assertFalse(session.ssl_verify) self.assertEqual(session.base_url, "https://test:443/api") @ddt.data("vast_mgmt_user", "vast_vippool_name", "vast_mgmt_host") def test_do_setup_missing_required_fields(self, missing_field): self.fake_conf.set_default(missing_field, None) _driver = driver.VASTShareDriver( execute=mock.MagicMock(), configuration=self.fake_conf ) with self.assertRaises(exception.VastDriverException): _driver.do_setup(self._context) def test_do_setup_with_api_token(self): self.fake_conf.set_default("vast_mgmt_user", None) self.fake_conf.set_default("vast_mgmt_password", None) self.fake_conf.set_default("vast_api_token", "test_token") _driver = driver.VASTShareDriver( execute=mock.MagicMock(), configuration=self.fake_conf ) _driver.do_setup(self._context) self.assertEqual(_driver.rest.session.token, "test_token") @mock.patch( "manila.share.drivers.vastdata.rest.Session.get", mock.MagicMock(return_value=fake_metrics), ) def test_update_share_stats(self): self._driver._update_share_stats() result = self._driver._stats self.assertEqual(result["share_backend_name"], "vast") self.assertEqual(result["driver_handles_share_servers"], False) self.assertEqual(result["vendor_name"], "VAST STORAGE") self.assertEqual(result["driver_version"], "1.0") self.assertEqual(result["storage_protocol"], "NFS") self.assertEqual(result["total_capacity_gb"], 471.1061706542969) self.assertEqual(result["free_capacity_gb"], 450.2256333641708) self.assertEqual(result["reserved_percentage"], 0) self.assertEqual(result["reserved_snapshot_percentage"], 0) self.assertEqual(result["reserved_share_extend_percentage"], 0) self.assertIs(result["qos"], False) self.assertIsNone(result["pools"]) self.assertIs(result["snapshot_support"], True) self.assertIs(result["create_share_from_snapshot_support"], False) self.assertIs(result["revert_to_snapshot_support"], False) self.assertIs(result["mount_snapshot_support"], False) self.assertIsNone(result["replication_domain"]) self.assertIsNone(result["filter_function"]) self.assertIsNone(result["goodness_function"]) self.assertIs(result["security_service_update_support"], False) self.assertIs(result["network_allocation_update_support"], False) self.assertIs(result["share_server_multiple_subnet_support"], False) self.assertIs(result["mount_point_name_support"], False) self.assertEqual(result["data_reduction"], 1.2) self.assertEqual(result["provisioned_capacity_gb"], 20.880537290126085) self.assertEqual( result["share_group_stats"], {"consistent_snapshot_support": None} ) self.assertIs(result["ipv4_support"], True) self.assertIs(result["ipv6_support"], False) @ddt.idata( itertools.product( [1073741824, 1], ["NFS", "SMB"], ["fakeid", None] ) ) @ddt.unpack def test_create_shares(self, capacity, proto, policy): share = fake_share.fake_share(share_proto=proto) mock_rest = self._create_mocked_rest_api() mock_rest.view_policies.ensure.return_value = driver_util.Bunch(id=1) mock_rest.quotas.ensure.return_value = driver_util.Bunch( id=2, hard_limit=capacity ) mock_rest.views.ensure.return_value = driver_util.Bunch( id=3, policy=policy ) mock_rest.vip_pools.vips.return_value = ["1.1.1.0", "1.1.1.1"] with mock.patch.object(self._driver, "rest", mock_rest): if proto != "NFS": with self.assertRaises(exception.InvalidShare) as exc: self._driver.create_share(self._context, share) self.assertIn( "Invalid NAS protocol supplied", str(exc.exception) ) elif capacity == 1: with self.assertRaises(exception.ManilaException) as exc: self._driver.create_share(self._context, share) self.assertIn( "Share already exists with different capacity", str(exc.exception) ) else: location = self._driver.create_share(self._context, share) mock_rest.vip_pools.vips.assert_called_once_with( pool_name="vippool" ) mock_rest.view_policies.ensure.assert_called_once_with( name="fakeid" ) mock_rest.quotas.ensure.assert_called_once_with( name="fakeid", path="/fake/manila-fakeid", create_dir=True, hard_limit=capacity, ) mock_rest.views.ensure.assert_called_once_with( name="fakeid", path="/fake/manila-fakeid", policy_id=1 ) self.assertListEqual( location, [ { 'path': '1.1.1.0:/fake/manila-fakeid', 'is_admin_only': False }, { 'path': '1.1.1.1:/fake/manila-fakeid', 'is_admin_only': False }, ] ) if not policy: mock_rest.views.update.assert_called_once_with( 3, policy_id=1 ) else: mock_rest.views.update.assert_not_called() def test_delete_share(self): share = fake_share.fake_share(share_proto="NFS") mock_rest = self._create_mocked_rest_api() with mock.patch.object(self._driver, "rest", mock_rest): self._driver.delete_share(self._context, share) mock_rest.folders.delete.assert_called_once_with( path="/fake/manila-fakeid" ) mock_rest.views.delete.assert_called_once_with(name="fakeid") mock_rest.quotas.delete.assert_called_once_with(name="fakeid") mock_rest.view_policies.delete.assert_called_once_with(name="fakeid") def test_update_access_rules_wrong_proto(self): share = fake_share.fake_share(share_proto="SMB") access_rules = [ { "access_level": "rw", "access_to": "127.0.0.1", "access_type": "ip" } ] res = self._driver.update_access( self._context, share, access_rules, None, None, None ) self.assertIsNone(res) def test_update_access_add_rules_no_policy(self): share = fake_share.fake_share(share_proto="NFS") mock_rest = self._create_mocked_rest_api() mock_rest.view_policies.one.return_value = None access_rules = [ { "access_level": "rw", "access_to": "127.0.0.1", "access_type": "ip" } ] with mock.patch.object(self._driver, "rest", mock_rest): with self.assertRaises(exception.ManilaException) as exc: self._driver.update_access( self._context, share, access_rules, None, None, None ) self.assertIn("Policy not found", str(exc.exception)) @ddt.data( (["*"], ["10.10.10.1", "10.10.10.2"]), (["10.10.10.1", "10.10.10.2"], []), (["*"], []), ) @ddt.unpack def test_update_access_add_rules(self, rw, ro): share = fake_share.fake_share(share_proto="NFS") mock_rest = self._create_mocked_rest_api() mock_rest.view_policies.one.return_value = driver_util.Bunch( id=1, nfs_read_write=rw, nfs_read_only=ro ) access_rules = [ { "access_level": "rw", "access_to": "127.0.0.1", "access_type": "ip" } ] with mock.patch.object(self._driver, "rest", mock_rest): failed_rules = self._driver.update_access( self._context, share, access_rules, None, None, None ) expected_ro = set(ro) if rw == ["*"]: expected_rw = {"127.0.0.1"} else: expected_rw = set(["127.0.0.1"] + rw) kw = mock_rest.view_policies.update.call_args.kwargs self.assertEqual(kw["name"], "fakeid") self.assertSetEqual(set(kw["nfs_read_write"]), expected_rw) self.assertSetEqual(set(kw["nfs_read_only"]), expected_ro) self.assertEqual(kw["nfs_no_squash"], ["*"]) self.assertEqual(kw["nfs_root_squash"], ["*"]) self.assertFalse(failed_rules) # and the same for ro mock_rest = self._create_mocked_rest_api() mock_rest.view_policies.one.return_value = driver_util.Bunch( id=1, nfs_read_write=rw, nfs_read_only=ro ) access_rules = [ { "access_level": "ro", "access_to": "127.0.0.1", "access_type": "ip" } ] with mock.patch.object(self._driver, "rest", mock_rest): failed_rules = self._driver.update_access( self._context, share, access_rules, None, None, None ) expected_rw = set(rw) if ro == ["*"]: expected_ro = {"127.0.0.1"} else: expected_ro = set(["127.0.0.1"] + ro) kw = mock_rest.view_policies.update.call_args.kwargs self.assertEqual(kw["name"], "fakeid") self.assertSetEqual(set(kw["nfs_read_write"]), expected_rw) self.assertSetEqual(set(kw["nfs_read_only"]), expected_ro) self.assertEqual(kw["nfs_no_squash"], ["*"]) self.assertEqual(kw["nfs_root_squash"], ["*"]) self.assertFalse(failed_rules) @ddt.data( (["*"], ["10.10.10.1", "10.10.10.2"]), (["10.10.10.1", "10.10.10.2"], []), (["*"], []), ) @ddt.unpack def test_update_access_delete_rules(self, rw, ro): share = fake_share.fake_share(share_proto="NFS") mock_rest = self._create_mocked_rest_api() mock_rest.view_policies.one.return_value = driver_util.Bunch( id=1, nfs_read_write=rw, nfs_read_only=ro ) delete_rules = [ { "access_level": "rw", "access_to": "10.10.10.1", "access_type": "ip" } ] with mock.patch.object(self._driver, "rest", mock_rest): failed_rules = self._driver.update_access( self._context, share, None, None, delete_rules, None, ) expected_ro = set(ro) if rw == ["*"]: expected_rw = set(rw) else: expected_rw = set([r for r in rw if r != "10.10.10.1"]) kw = mock_rest.view_policies.update.call_args.kwargs self.assertEqual(kw["name"], "fakeid") self.assertSetEqual(set(kw["nfs_read_write"]), expected_rw) self.assertSetEqual(set(kw["nfs_read_only"]), expected_ro) self.assertEqual(kw["nfs_no_squash"], ["*"]) self.assertEqual(kw["nfs_root_squash"], ["*"]) self.assertFalse(failed_rules) # and the same for ro mock_rest = self._create_mocked_rest_api() mock_rest.view_policies.one.return_value = driver_util.Bunch( id=1, nfs_read_write=rw, nfs_read_only=ro ) delete_rules = [ { "access_level": "ro", "access_to": "10.10.10.1", "access_type": "ip" } ] with mock.patch.object(self._driver, "rest", mock_rest): failed_rules = self._driver.update_access( self._context, share, None, None, delete_rules, None ) expected_rw = set(rw) if ro == ["*"]: expected_ro = set(ro) else: expected_ro = set([r for r in ro if r != "10.10.10.1"]) kw = mock_rest.view_policies.update.call_args.kwargs self.assertEqual(kw["name"], "fakeid") self.assertSetEqual(set(kw["nfs_read_write"]), expected_rw) self.assertSetEqual(set(kw["nfs_read_only"]), expected_ro) self.assertEqual(kw["nfs_no_squash"], ["*"]) self.assertEqual(kw["nfs_root_squash"], ["*"]) self.assertFalse(failed_rules) def test_update_access_for_cidr(self): share = fake_share.fake_share(share_proto="NFS") mock_rest = self._create_mocked_rest_api() mock_rest.view_policies.one.return_value = driver_util.Bunch( id=1, nfs_read_write=["10.0.0.1"], nfs_read_only=["*"] ) access_rules = [ { "access_level": "ro", "access_to": "10.0.0.1/29", "access_type": "ip", "access_id": 12345, } ] with mock.patch.object(self._driver, "rest", mock_rest): failed_rules = self._driver.update_access( self._context, share, access_rules, None, None, None ) kw = mock_rest.view_policies.update.call_args.kwargs self.assertEqual(kw["name"], "fakeid") self.assertSetEqual(set(kw["nfs_read_write"]), {"10.0.0.1"}) self.assertSetEqual( set(kw["nfs_read_only"]), { '10.0.0.1', '10.0.0.3', '10.0.0.2', '10.0.0.6', '10.0.0.5', '10.0.0.4' } ) self.assertFalse(failed_rules) delete_rules = [ { "access_level": "ro", "access_to": "10.0.0.1/30", "access_type": "ip", "access_id": 12345, } ] mock_rest.view_policies.one.return_value = driver_util.Bunch( id=1, nfs_read_write=["10.0.0.1"], nfs_read_only=[ '10.0.0.1', '10.0.0.3', '10.0.0.2', '10.0.0.6', '10.0.0.5', '10.0.0.4', ] ) with mock.patch.object(self._driver, "rest", mock_rest): failed_rules = self._driver.update_access( self._context, share, None, None, delete_rules, None ) kw = mock_rest.view_policies.update.call_args.kwargs self.assertEqual(kw["name"], "fakeid") self.assertSetEqual(set(kw["nfs_read_write"]), {"10.0.0.1"}) self.assertSetEqual( set(kw["nfs_read_only"]), {'10.0.0.6', '10.0.0.3', '10.0.0.4', '10.0.0.5'} ) self.assertFalse(failed_rules) def test_update_access_for_invalid_rules(self): share = fake_share.fake_share(share_proto="NFS") mock_rest = self._create_mocked_rest_api() mock_rest.view_policies.one.return_value = driver_util.Bunch( id=1, nfs_read_write=["10.0.0.1"], nfs_read_only=["*"] ) access_rules = [ { "access_level": "ry", "access_to": "10.0.0.1", "access_type": "ip", "access_id": 12345, "id": 12345, }, { "access_level": "ro", "access_to": "10.0.0.2", "access_type": "ip", "access_id": 12346, "id": 12346, }, { "access_level": "ro", "access_to": "10.0.0.2/33", "access_type": "ip", "access_id": 12347, "id": 12347, }, { "access_level": "rw", "access_to": "10.0.0.2.4", "access_type": "ip", "access_id": 12348, "id": 12348, } ] with mock.patch.object(self._driver, "rest", mock_rest): failed_rules = self._driver.update_access( self._context, share, access_rules, None, None, None ) kw = mock_rest.view_policies.update.call_args.kwargs self.assertEqual(kw["name"], "fakeid") self.assertSetEqual(set(kw["nfs_read_write"]), {"10.0.0.1"}) self.assertSetEqual(set(kw["nfs_read_only"]), {'10.0.0.2'}) self.assertDictEqual( failed_rules, { 12345: {'state': 'error'}, 12347: {'state': 'error'}, 12348: {'state': 'error'} } ) def test_resize_share_quota_not_found(self): share = fake_share.fake_share(share_proto="NFS") mock_rest = self._create_mocked_rest_api() mock_rest.quotas.one.return_value = None with mock.patch.object(self._driver, "rest", mock_rest): with self.assertRaises(exception.ShareNotFound) as exc: self._driver.extend_share(share, 10000) self.assertIn("could not be found", str(exc.exception)) def test_resize_share_ok(self): share = fake_share.fake_share(share_proto="NFS") mock_rest = self._create_mocked_rest_api() mock_rest.quotas.one.return_value = driver_util.Bunch( id=1, used_effective_capacity=1073741824 ) with mock.patch.object(self._driver, "rest", mock_rest): self._driver.extend_share(share, 50) mock_rest.quotas.update.assert_called_with( 1, hard_limit=53687091200 ) mock_rest.quotas.update.reset() self._driver.shrink_share(share, 20) mock_rest.quotas.update.assert_called_with( 1, hard_limit=21474836480 ) def test_resize_share_exceeded_hard_limit(self): share = fake_share.fake_share( share_proto="NFS" ) mock_rest = self._create_mocked_rest_api() mock_rest.quotas.one.return_value = driver_util.Bunch( id=1, used_effective_capacity=10737418240 ) # 10GB with mock.patch.object(self._driver, "rest", mock_rest): with self.assertRaises(exception.ShareShrinkingPossibleDataLoss): self._driver.shrink_share(share, 9.7) self._driver.shrink_share(share, 10) def test_create_snapshot(self): snapshot = driver_util.Bunch( name="fakesnap", share_instance_id="fakeid" ) mock_rest = self._create_mocked_rest_api() with mock.patch.object(self._driver, "rest", mock_rest): self._driver.create_snapshot(self._context, snapshot, None) mock_rest.snapshots.create.assert_called_once_with( path="/fake/manila-fakeid", name="fakesnap" ) def test_delete_snapshot(self): snapshot = driver_util.Bunch( name="fakesnap", share_instance_id="fakeid" ) mock_rest = self._create_mocked_rest_api() with mock.patch.object(self._driver, "rest", mock_rest): self._driver.delete_snapshot(self._context, snapshot, None) mock_rest.snapshots.delete.assert_called_once_with(name="fakesnap") def test_network_allocation_number(self): self.assertEqual(self._driver.get_network_allocations_number(), 0) @ddt.data([], ['fake/path/1', 'fake/path']) def test_ensure_shares(self, fake_export_locations): mock_rest = self._create_mocked_rest_api() mock_rest.view_policies.ensure.return_value = driver_util.Bunch(id=1) mock_rest.quotas.ensure.return_value = driver_util.Bunch( id=2, hard_limit=1073741824 ) mock_rest.views.ensure.return_value = driver_util.Bunch( id=3, policy="test_policy" ) shares = [ fake_share.fake_share( id=_id, share_id=share_id, share_proto="NFS", export_locations=fake_export_locations, ) for _id, share_id in enumerate(["123", "456", "789"], 1) ] mock_rest.vip_pools.vips.return_value = ["1.1.1.0", "1.1.1.1"] with mock.patch.object(self._driver, "rest", mock_rest): locations = self._driver.ensure_shares(self._context, shares) common = {"is_admin_only": False} self.assertDictEqual( locations, { 1: { "export_locations": [ {"path": "1.1.1.0:/fake/manila-1", **common}, {"path": "1.1.1.1:/fake/manila-1", **common}, ] }, 2: { "export_locations": [ {"path": "1.1.1.0:/fake/manila-2", **common}, {"path": "1.1.1.1:/fake/manila-2", **common}, ] }, 3: { "export_locations": [ {"path": "1.1.1.0:/fake/manila-3", **common}, {"path": "1.1.1.1:/fake/manila-3", **common}, ] }, }, ) def test_backend_info(self): backend_info = self._driver.get_backend_info(self._context) self.assertDictEqual( backend_info, {'vast_vippool_name': 'vippool', 'vast_mgmt_host': 'test'} ) class TestPolicyPayloadFromRules(unittest.TestCase): def test_policy_payload_from_rules_update(self): rules = [{"access_level": "rw", "access_to": "127.0.0.1"}] policy = mock.MagicMock() policy.nfs_read_write = ["127.0.0.1"] policy.nfs_read_only = [] result = driver.policy_payload_from_rules(rules, policy, "update") self.assertEqual( result, {"nfs_read_write": ["127.0.0.1"], "nfs_read_only": []} ) def test_policy_payload_from_rules_deny(self): rules = [{"access_level": "rw", "access_to": "127.0.0.1"}] policy = mock.MagicMock() policy.nfs_read_write = ["127.0.0.1"] policy.nfs_read_only = [] result = driver.policy_payload_from_rules(rules, policy, "deny") self.assertEqual(result, {"nfs_read_write": [], "nfs_read_only": []}) def test_policy_payload_from_rules_invalid_action(self): rules = [{"access_level": "rw", "access_to": "127.0.0.1"}] with self.assertRaises(ValueError): driver.policy_payload_from_rules(rules, None, "invalid") def test_policy_payload_from_rules_invalid_ip(self): rules = [{"access_level": "rw", "access_to": "1.0.0.257"}] with self.assertRaises(netaddr.core.AddrFormatError): driver.policy_payload_from_rules(rules, None, "deny") class TestValidateAccessRules(unittest.TestCase): def test_validate_access_rules_invalid_type(self): rule = {"access_type": "INVALID", "access_level": "rw"} with self.assertRaises(exception.InvalidShareAccess): driver.validate_access_rule(rule) def test_validate_access_rules_invalid_level(self): rule = {"access_type": "ip", "access_level": "INVALID"} with self.assertRaises(exception.InvalidShareAccessLevel): driver.validate_access_rule(rule) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/vastdata/test_driver_util.py0000664000175000017500000002017200000000000026013 0ustar00zuulzuul00000000000000# Copyright 2024 VAST Data Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import pickle from unittest import mock import ddt from manila.share.drivers.vastdata import driver_util from manila import test driver_util.CONF.debug = True @ddt.ddt class TestBunch(test.TestCase): def setUp(self): super(TestBunch, self).setUp() self.bunch = driver_util.Bunch(a=1, b=2) def test_bunch_getattr(self): self.assertEqual(self.bunch.a, 1) def test_bunch_setattr(self): self.bunch.c = 3 self.assertEqual(self.bunch.c, 3) def test_bunch_delattr(self): del self.bunch.a self.assertRaises(AttributeError, lambda: self.bunch.a) def test_bunch_to_dict(self): self.assertEqual(self.bunch.to_dict(), {"a": 1, "b": 2}) def test_bunch_from_dict(self): self.assertEqual( driver_util.Bunch.from_dict({"a": 1, "b": 2}), self.bunch ) def test_bunch_to_json(self): self.assertEqual(self.bunch.to_json(), json.dumps({"a": 1, "b": 2})) def test_bunch_without(self): self.assertEqual(self.bunch.without("a"), driver_util.Bunch(b=2)) def test_bunch_but_with(self): self.assertEqual( self.bunch.but_with(c=3), driver_util.Bunch(a=1, b=2, c=3) ) def test_bunch_delattr_missing(self): self.assertRaises( AttributeError, lambda: self.bunch.__delattr__("non_existing_attribute") ) def test_bunch_from_json(self): json_bunch = json.dumps({"a": 1, "b": 2}) self.assertEqual(driver_util.Bunch.from_json(json_bunch), self.bunch) def test_bunch_render(self): self.assertEqual(self.bunch.render(), "a=1, b=2") def test_bunch_pickle(self): pickled_bunch = pickle.dumps(self.bunch) unpickled_bunch = pickle.loads(pickled_bunch) self.assertEqual(self.bunch, unpickled_bunch) @ddt.data(True, False) def test_bunch_copy(self, deep): copy_bunch = self.bunch.copy(deep=deep) self.assertEqual(copy_bunch, self.bunch) self.assertIsNot(copy_bunch, self.bunch) def test_name_starts_with_underscore_and_digit(self): bunch = driver_util.Bunch() bunch["1"] = "value" self.assertEqual(bunch._1, "value") def test_bunch_recursion(self): x = driver_util.Bunch( a="a", b="b", d=driver_util.Bunch(x="axe", y="why") ) x.d.x = x x.d.y = x.b print(x) def test_bunch_repr(self): self.assertEqual(repr(self.bunch), "Bunch(a=1, b=2)") def test_getitem_with_integral_key(self): self.bunch["1"] = "value" self.assertEqual(self.bunch[1], "value") def test_bunch_dir(self): self.assertEqual( set(i for i in dir(self.bunch) if not i.startswith("_")), { "a", "b", "but_with", "clear", "copy", "from_dict", "from_json", "fromkeys", "get", "items", "keys", "pop", "popitem", "render", "setdefault", "to_dict", "to_json", "update", "values", "without", }, ) def test_bunch_edge_cases(self): # Test edge cases for attribute access, setting, and deletion self.bunch["key-with-special-chars_123"] = "value" self.assertEqual(self.bunch["key-with-special-chars_123"], "value") self.bunch["key-with-special-chars_123"] = None self.assertIsNone(self.bunch["key-with-special-chars_123"]) del self.bunch["key-with-special-chars_123"] self.assertRaises( KeyError, lambda: self.bunch["key-with-special-chars_123"] ) def test_bunch_deep_copy(self): nested_bunch = driver_util.Bunch(x=driver_util.Bunch(y=1)) deep_copy = nested_bunch.copy(deep=True) self.assertIsNot(nested_bunch["x"], deep_copy["x"]) self.assertEqual(nested_bunch["x"]["y"], deep_copy["x"]["y"]) def test_bunch_serialization(self): # Test serialization with nested structures nested_bunch = driver_util.Bunch(a=1, b=driver_util.Bunch(c=2)) self.assertEqual(nested_bunch.to_dict(), {"a": 1, "b": {"c": 2}}) self.assertEqual( nested_bunch.to_json(), json.dumps({"a": 1, "b": {"c": 2}}) ) class TestBunchify(test.TestCase): def test_bunchify(self): self.assertEqual( driver_util.bunchify({"a": 1, "b": 2}, c=3), driver_util.Bunch(a=1, b=2, c=3) ) x = driver_util.bunchify(dict(a=[dict(b=5), 9, (1, 2)], c=8)) self.assertEqual(x.a[0].b, 5) self.assertEqual(x.a[1], 9) self.assertIsInstance(x.a[2], tuple) self.assertEqual(x.c, 8) self.assertEqual(x.pop("c"), 8) def test_bunchify_edge_cases(self): # Test edge cases for bunchify function self.assertEqual(driver_util.bunchify({}), driver_util.Bunch()) def test_bunchify_nested_structures(self): # Test bunchify with nested structures nested_dict = {"a": [{"b": 1}, 2]} self.assertEqual(driver_util.bunchify(nested_dict).a[0].b, 1) class TestUnbunchify(test.TestCase): def test_unbunchify(self): self.assertEqual( driver_util.unbunchify(driver_util.Bunch(a=1, b=2)), {"a": 1, "b": 2} ) @ddt.ddt class TestGenerateIpRange(test.TestCase): @ddt.data( ( [["15.0.0.1", "15.0.0.4"], ["10.0.0.27", "10.0.0.30"]], [ "15.0.0.1", "15.0.0.2", "15.0.0.3", "15.0.0.4", "10.0.0.27", "10.0.0.28", "10.0.0.29", "10.0.0.30", ], ), ( [["15.0.0.1", "15.0.0.1"], ["10.0.0.20", "10.0.0.20"]], ["15.0.0.1", "10.0.0.20"], ), ([], []), ) @ddt.unpack def test_generate_ip_range(self, ip_ranges, expected): ips = driver_util.generate_ip_range(ip_ranges) assert ips == expected def test_generate_ip_range_edge_cases(self): # Test edge cases for generate_ip_range function self.assertEqual(driver_util.generate_ip_range([]), []) self.assertEqual(driver_util.generate_ip_range( [["15.0.0.1", "15.0.0.1"]]), ["15.0.0.1"] ) def test_generate_ip_range_large_range(self): # Test with a large range of IPs start_ip = "192.168.0.1" end_ip = "192.168.255.255" ips = driver_util.generate_ip_range([[start_ip, end_ip]]) self.assertEqual(len(ips), 65535) class MockClass1: def method1(self): return 1 def _private_method(self): return 2 class TestDecorateMethodsWith(test.TestCase): def test_decorate_methods_with(self): decorated_cls = driver_util.decorate_methods_with( mock.Mock())(MockClass1) self.assertTrue(hasattr(decorated_cls, 'method1')) self.assertTrue(hasattr(decorated_cls, '_private_method')) class MockClass2: @driver_util.verbose_driver_trace def method1(self): return 1 class TestVerboseDriverTrace(test.TestCase): def test_verbose_driver_trace_debug_true(self): mock_instance = MockClass2() with mock.patch.object( driver_util.LOG, 'debug') as mock_debug: mock_instance.method1() self.assertEqual(mock_debug.call_count, 2) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/vastdata/test_rest.py0000664000175000017500000004101400000000000024436 0ustar00zuulzuul00000000000000# Copyright 2024 VAST Data Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import io import unittest from unittest import mock import ddt import requests from manila import exception as manila_exception from manila.share.drivers.vastdata import driver_util from manila.share.drivers.vastdata import rest as vast_rest fake_metrics = driver_util.Bunch.from_dict( { "object_ids": [1], "prop_list": [ "timestamp", "object_id", "Capacity,drr", "Capacity,physical_space_in_use", "Capacity,physical_space", "Capacity,logical_space", "Capacity,logical_space_in_use", ], "data": [ [ "2024-04-13T14:47:48Z", 1, 1.2, 30635076602.0, 711246584217.0, 505850953728.0, 22370924820.0, ], [ "2024-04-13T14:47:38Z", 1, 1.2, 30635109399.0, 711246584217.0, 505850134528.0, 22370810131.0, ], [ "2024-04-13T14:47:28Z", 1, 1.2, 30635142195.0, 711246584217.0, 505849217024.0, 22370720020.0, ], [ "2024-04-13T14:47:18Z", 1, 1.2, 30635174991.0, 711246584217.0, 505848365056.0, 22370654484.0, ], [ "2024-04-13T14:47:08Z", 1, 1.2, 30635207787.0, 711246584217.0, 505847447552.0, 22420396308.0, ], [ "2024-04-13T14:46:58Z", 1, 1.2, 30635248783.0, 711246584217.0, 505846398976.0, 22420306196.0, ], ], "granularity": None, } ) class TestSession(unittest.TestCase): def setUp(self): self.session = vast_rest.Session( "host", "username", "password", "", False, "1.0", ) @mock.patch("requests.Session.request") def test_refresh_auth_token_success(self, mock_request): mock_request.return_value.json.return_value = {"access": "test_token"} self.session.refresh_auth_token() self.assertEqual( self.session.headers["authorization"], "Bearer test_token" ) @mock.patch("requests.Session.request") def test_refresh_auth_token_failure(self, mock_request): mock_request.side_effect = ConnectionError() with self.assertRaises(manila_exception.VastApiException): self.session.refresh_auth_token() @mock.patch("requests.Session.request") def test_request_success(self, mock_request): mock_request.return_value.status_code = 200 self.session.request( "GET", "test_method", log_result=False, params={"foo": "bar"} ) mock_request.assert_called_once_with( "GET", "https://host/api/test_method/", verify=False, params={"foo": "bar"} ) @mock.patch("requests.Session.request") def test_request_failure_400(self, mock_request): mock_request.return_value.status_code = 400 mock_request.return_value.text = "foo/bar" with self.assertRaises(manila_exception.VastApiException): self.session.request( "POST", "test_method", data={"data": {"foo": "bar"}} ) def test_request_failure_500(self): resp = requests.Response() resp.status_code = 500 resp.raw = io.BytesIO(b"Server error") with mock.patch( "requests.Session.request", new=lambda *a, **k: resp ): with self.assertRaises(manila_exception.VastApiException) as exc: self.session.request("GET", "test_method", log_result=False) self.assertIn("Server Error", str(exc.exception)) def test_request_no_return_content(self): resp = requests.Response() resp.status_code = 200 resp.raw = io.BytesIO(b"") with mock.patch( "requests.Session.request", new=lambda *a, **k: resp ): res = self.session.request("GET", "test_method") self.assertFalse(res) @mock.patch( "manila.share.drivers.vastdata.rest.Session.refresh_auth_token", mock.MagicMock() ) def test_refresh_token_retries(self): resp = requests.Response() resp.status_code = 403 resp.raw = io.BytesIO(b"Token is invalid") with mock.patch("requests.Session.request", new=lambda *a, **k: resp): with self.assertRaises(manila_exception.VastApiRetry): self.session.request("POST", "test_method", foo="bar") def test_getattr_with_underscore(self): with self.assertRaises(AttributeError): self.session.__getattr__("_private") @mock.patch.object(vast_rest.Session, "request") def test_getattr_without_underscore(self, mock_request): attr = "public" params = {"key": "value"} self.session.__getattr__(attr)(**params) mock_request.assert_called_once_with("get", attr, params=params) class TestVastResource(unittest.TestCase): def setUp(self): self.mock_rest = mock.MagicMock() self.vast_resource = vast_rest.VastResource(self.mock_rest) def test_list_with_filtering_params(self): self.vast_resource.list(name="test") self.mock_rest.session.get.assert_called_with( self.vast_resource.resource_name, params={"name": "test"} ) def test_create_with_provided_params(self): self.vast_resource.create(name="test", size=10) self.mock_rest.session.post.assert_called_with( self.vast_resource.resource_name, data={"name": "test", "size": 10} ) def test_update_with_provided_params(self): self.vast_resource.update("1", name="test", size=10) self.mock_rest.session.patch.assert_called_with( f"{self.vast_resource.resource_name}/1", data={"name": "test", "size": 10} ) def test_delete_when_entry_not_found(self): self.vast_resource.one = mock.MagicMock(return_value=None) self.vast_resource.delete("test") self.mock_rest.session.delete.assert_not_called() def test_delete_when_entry_found(self): mock_entry = mock.MagicMock() mock_entry.id = "1" self.vast_resource.one = mock.MagicMock(return_value=mock_entry) self.vast_resource.delete("test") self.mock_rest.session.delete.assert_called_with( f"{self.vast_resource.resource_name}/{mock_entry.id}" ) def test_one_when_no_entries_found(self): self.vast_resource.list = mock.MagicMock(return_value=[]) result = self.vast_resource.one("test") self.assertIsNone(result) def test_one_when_multiple_entries_found(self): self.vast_resource.list = mock.MagicMock( return_value=[mock.MagicMock(), mock.MagicMock()] ) with self.assertRaises(manila_exception.VastDriverException): self.vast_resource.one("test") def test_one_when_single_entry_found(self): mock_entry = mock.MagicMock() self.vast_resource.list = mock.MagicMock(return_value=[mock_entry]) result = self.vast_resource.one("test") self.assertEqual(result, mock_entry) def test_ensure_when_entry_not_found(self): self.vast_resource.one = mock.MagicMock(return_value=None) mock_entry = mock.MagicMock() self.vast_resource.create = mock.MagicMock(return_value=mock_entry) result = self.vast_resource.ensure("test", size=10) self.assertEqual(result, mock_entry) def test_ensure_when_entry_found(self): mock_entry = mock.MagicMock() self.vast_resource.one = mock.MagicMock(return_value=mock_entry) result = self.vast_resource.ensure("test", size=10) self.assertEqual(result, mock_entry) class ViewTest(unittest.TestCase): @mock.patch( "manila.share.drivers.vastdata.rest.Session.refresh_auth_token", mock.MagicMock() ) def test_view_create(self): with mock.patch( "manila.share.drivers.vastdata.rest.Session.post" ) as mock_session: rest_api = vast_rest.RestApi( "host", "username", "password", "", True, "1.0" ) rest_api.views.create("test-view", "/test", 1) self.assertEqual(("views",), mock_session.call_args.args) self.assertDictEqual( { "data": { "name": "test-view", "path": "/test", "policy_id": 1, "create_dir": True, "protocols": ["NFS"], } }, mock_session.call_args.kwargs, ) @mock.patch( "manila.share.drivers.vastdata.rest.Session.refresh_auth_token", mock.MagicMock() ) @mock.patch( "manila.share.drivers.vastdata.rest.Session.get", mock.MagicMock(return_value=fake_metrics), ) class TestCapacityMetrics(unittest.TestCase): def test_capacity_metrics(self): metrics_list = [ "Capacity,drr", "Capacity,logical_space", "Capacity,logical_space_in_use", "Capacity,physical_space", "Capacity,physical_space_in_use", ] expected = { "": 1, "drr": 1.2, "physical_space_in_use": 30635248783.0, "physical_space": 711246584217.0, "logical_space": 505846398976.0, "logical_space_in_use": 22420306196.0, } rest_api = vast_rest.RestApi( "host", "username", "password", "", True, "1.0" ) metrics = rest_api.capacity_metrics.get(metrics_list) self.assertDictEqual(expected, metrics) @mock.patch( "manila.share.drivers.vastdata.rest.Session.refresh_auth_token", mock.MagicMock() ) @ddt.ddt class TestFolders(unittest.TestCase): @mock.patch( "manila.share.drivers.vastdata.rest.Session.refresh_auth_token", mock.MagicMock() ) def setUp(self): self.rest_api = vast_rest.RestApi( "host", "username", "password", "", True, "1.0", ) @ddt.data( "4.3.9", "4.0.11.12", "3.4.6.123.1", "4.5.6-1", "4.6.0", "4.6.0-1", "4.6.0-1.1", "4.6.9", ) def test_requisite_decorator(self, cluster_version): """Test `requisite` decorator produces exception when cluster version doesn't met requirements """ with mock.patch( "manila.share.drivers.vastdata.rest.RestApi.get_sw_version", new=lambda s: cluster_version, ): self.assertRaises( manila_exception.VastDriverException, lambda: self.rest_api.folders.delete("/abc") ) def test_trash_api_disabled(self): def raise_http_err(*args, **kwargs): resp = requests.Response() resp.status_code = 400 resp.raw = io.BytesIO(b"trash folder disabled") raise manila_exception.VastApiException(message=resp.text) with ( mock.patch( "manila.share.drivers.vastdata.rest.Session.delete", side_effect=raise_http_err, ), mock.patch( "manila.share.drivers.vastdata.rest.RestApi.get_sw_version", new=lambda s: "5.0.0", ), ): with self.assertRaises( manila_exception.VastDriverException ) as exc: self.rest_api.folders.delete("/abc") self.assertIn("Trash Folder Access is disabled", str(exc.exception)) def test_trash_api_unpredictable_error(self): def raise_http_err(*args, **kwargs): raise RuntimeError() with ( mock.patch( "manila.share.drivers.vastdata.rest.Session.delete", side_effect=raise_http_err, ), mock.patch( "manila.share.drivers.vastdata.rest.RestApi.get_sw_version", new=lambda s: "5.0.0", ), ): with self.assertRaises(RuntimeError): self.rest_api.folders.delete("/abc") def test_double_deletion(self): def raise_http_err(*args, **kwargs): resp = requests.Response() resp.status_code = 400 resp.raw = io.BytesIO(b"no such directory") raise manila_exception.VastApiException(message=resp.text) with ( mock.patch( "manila.share.drivers.vastdata.rest.Session.delete", side_effect=raise_http_err, ), mock.patch( "manila.share.drivers.vastdata.rest.RestApi.get_sw_version", new=lambda s: "5.0.0", ), ): with self.assertLogs(level="DEBUG") as cm: self.rest_api.folders.delete("/abc") self.assertIn( "remote directory might have been removed earlier", str(cm.output) ) class VipPoolTest(unittest.TestCase): @mock.patch( "manila.share.drivers.vastdata.rest.Session.refresh_auth_token", mock.MagicMock() ) def setUp(self): self.rest_api = vast_rest.RestApi( "host", "username", "password", "", True, "1.0" ) def test_no_vipool(self): with mock.patch( "manila.share.drivers.vastdata.rest.Session.get", return_value=[] ): with self.assertRaises( manila_exception.VastDriverException ) as exc: self.rest_api.vip_pools.vips("test-vip") self.assertIn("No vip pool found", str(exc.exception)) def test_no_vips(self): vippool = driver_util.Bunch(ip_ranges=[]) with mock.patch( "manila.share.drivers.vastdata.rest.Session.get", return_value=[vippool] ): with self.assertRaises( manila_exception.VastDriverException ) as exc: self.rest_api.vip_pools.vips("test-vip") self.assertIn( "Pool test-vip has no available vips", str(exc.exception) ) def test_vips_ok(self): vippool = driver_util.Bunch( ip_ranges=[["15.0.0.1", "15.0.0.4"], ["10.0.0.27", "10.0.0.30"]] ) expected = [ "15.0.0.1", "15.0.0.2", "15.0.0.3", "15.0.0.4", "10.0.0.27", "10.0.0.28", "10.0.0.29", "10.0.0.30", ] with mock.patch( "manila.share.drivers.vastdata.rest.Session.get", return_value=[vippool] ): vips = self.rest_api.vip_pools.vips("test-vip") self.assertListEqual(vips, expected) class TestRestApi(unittest.TestCase): @mock.patch("manila.share.drivers.vastdata.rest.Session") def test_get_sw_version(self, mock_session): mock_session.return_value.versions.return_value = [ mock.MagicMock(sys_version="1.0") ] rest_api = vast_rest.RestApi( "host", "username", "password", "", True, "1.0", ) version = rest_api.get_sw_version() self.assertEqual(version, "1.0") ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315602.0376701 manila-21.0.0/manila/tests/share/drivers/veritas/0000775000175000017500000000000000000000000021716 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/veritas/__init__.py0000664000175000017500000000000000000000000024015 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/veritas/test_veritas_isa.py0000664000175000017500000005605300000000000025651 0ustar00zuulzuul00000000000000# Copyright 2017 Veritas Technologies LLC. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Unit tests for Veritas Manila driver. """ import hashlib import json from unittest import mock from oslo_config import cfg import requests from manila import context from manila import exception from manila.share import configuration as conf from manila.share.drivers.veritas import veritas_isa from manila import test CONF = cfg.CONF FAKE_BACKEND = 'fake_backend' class MockResponse(object): def __init__(self): self.status_code = 200 def json(self): data = {'fake_key': 'fake_val'} return json.dumps(data) class ACCESSShareDriverTestCase(test.TestCase): """Tests ACCESSShareDriver.""" share = { 'id': 'fakeid', 'name': 'fakename', 'size': 1, 'share_proto': 'NFS', 'export_locations': [{'path': '10.20.30.40:/vx/fake_location'}], 'snapshot_id': False } share2 = { 'id': 'fakeid2', 'name': 'fakename2', 'size': 4, 'share_proto': 'NFS', } share3 = { 'id': 'fakeid3', 'name': 'fakename3', 'size': 2, 'share_proto': 'NFS', 'export_location': '/vx/fake_location', 'snapshot_id': True } snapshot = { 'id': 'fakesnapshotid', 'share_name': 'fakename', 'share_id': 'fakeid', 'name': 'fakesnapshotname', 'share_size': 1, 'share_proto': 'NFS', 'snapshot_id': 'fake_snap_id', } access = { 'id': 'fakeaccid', 'access_type': 'ip', 'access_to': '10.0.0.2', 'access_level': 'rw', 'state': 'active', } access2 = { 'id': 'fakeaccid2', 'access_type': 'user', 'access_to': '10.0.0.3', 'access_level': 'rw', 'state': 'active', } access3 = { 'id': 'fakeaccid3', 'access_type': 'ip', 'access_to': '10.0.0.4', 'access_level': 'rw+', 'state': 'active', } access4 = { 'id': 'fakeaccid', 'access_type': 'ip', 'access_to': '10.0.0.2', 'access_level': 'ro', 'state': 'active', } def setUp(self): super(ACCESSShareDriverTestCase, self).setUp() self._create_fake_config() lcfg = self.configuration self._context = context.get_admin_context() self._driver = veritas_isa.ACCESSShareDriver(False, configuration=lcfg) self._driver.do_setup(self._context) def _create_fake_config(self): def _safe_get(opt): return getattr(self.configuration, opt) self.mock_object(veritas_isa.ACCESSShareDriver, '_authenticate_access') self.configuration = mock.Mock(spec=conf.Configuration) self.configuration.safe_get = mock.Mock(side_effect=_safe_get) self.configuration.va_server_ip = '1.1.1.1' self.configuration.va_pool = 'pool1' self.configuration.va_user = 'user' self.configuration.va_pwd = 'passwd' self.configuration.va_port = 14161 self.configuration.va_ssl = 'False' self.configuration.va_fstype = 'simple' self.configuration.network_config_group = 'fake_network_config_group' self.configuration.admin_network_config_group = ( 'fake_admin_network_config_group') self.configuration.driver_handles_share_servers = False self.configuration.share_backend_name = FAKE_BACKEND self.configuration.replication_domain = 'Disable' self.configuration.filter_function = 'Disable' self.configuration.goodness_function = 'Disable' self.configuration.max_shares_per_share_server = -1 self.configuration.max_share_server_size = -1 def test_create_share(self): self.mock_object(self._driver, '_get_va_share_name') self.mock_object(self._driver, '_get_va_share_path') self.mock_object(self._driver, '_get_vip') self.mock_object(self._driver, '_access_api') length = len(self.share['name']) index = int(length / 2) name1 = self.share['name'][:index] name2 = self.share['name'][index:] crc1 = hashlib.md5(name1.encode('utf-8')).hexdigest()[:8] crc2 = hashlib.md5(name2.encode('utf-8')).hexdigest()[:8] share_name_to_ret = crc1 + '-' + crc2 share_path_to_ret = '/vx/' + crc1 + '-' + crc2 self._driver._get_va_share_name.return_value = share_name_to_ret self._driver._get_va_share_path.return_value = share_path_to_ret self._driver._get_vip.return_value = '1.1.1.1' self._driver.create_share(self._context, self.share) self.assertEqual(1, self._driver._get_vip.call_count) self.assertEqual(1, self._driver._get_va_share_name.call_count) self.assertEqual(1, self._driver._get_va_share_path.call_count) def test_create_share_negative(self): self.mock_object(self._driver, '_access_api') self._driver._access_api.return_value = False self.assertRaises(exception.ShareBackendException, self._driver.create_share, self._context, self.share) def test_create_share_from_snapshot(self): self.mock_object(self._driver, '_get_vip') sharename = self._driver._get_va_share_name( self.snapshot['share_name']) snapname = self._driver._get_va_snap_name(self.snapshot['name']) sharepath = self._driver._get_va_share_path(sharename) self._driver._get_vip.return_value = '1.1.1.1' vip = self._driver._get_vip() location = (str(vip) + ':' + str(sharepath) + ':' + str(snapname)) ret = self._driver.create_share_from_snapshot(self._context, self.share, self.snapshot) self.assertEqual(location, ret) def test_delete_share(self): self.mock_object(self._driver, '_access_api') self.mock_object(self._driver, '_does_item_exist_at_va_backend') self._driver._does_item_exist_at_va_backend.return_value = True self._driver.delete_share(self._context, self.share) self.assertEqual(2, self._driver._access_api.call_count) def test_delete_share_negative(self): self.mock_object(self._driver, '_access_api') self.mock_object(self._driver, '_does_item_exist_at_va_backend') self._driver._does_item_exist_at_va_backend.return_value = True self._driver._access_api.return_value = False self.assertRaises(exception.ShareBackendException, self._driver.delete_share, self._context, self.share) def test_delete_share_if_share_created_from_snap(self): self.mock_object(self._driver, '_access_api') self.mock_object(self._driver, '_does_item_exist_at_va_backend') self._driver.delete_share(self._context, self.share3) self.assertEqual(0, (self._driver. _does_item_exist_at_va_backend.call_count)) self.assertEqual(0, self._driver._access_api.call_count) def test_delete_share_if_not_present_at_backend(self): self.mock_object(self._driver, '_does_item_exist_at_va_backend') self.mock_object(self._driver, '_access_api') self._driver._does_item_exist_at_va_backend.return_value = False self._driver.delete_share(self._context, self.share) self.assertEqual(1, (self._driver. _does_item_exist_at_va_backend.call_count)) self.assertEqual(0, self._driver._access_api.call_count) def test_create_snapshot(self): self.mock_object(self._driver, '_access_api') self._driver.create_snapshot(self._context, self.snapshot) self.assertEqual(2, self._driver._access_api.call_count) def test_create_snapshot_negative(self): self.mock_object(self._driver, '_access_api') self._driver._access_api.return_value = False self.assertRaises(exception.ShareBackendException, self._driver.create_snapshot, self._context, self.snapshot) def test_delete_snapshot(self): self.mock_object(self._driver, '_access_api') self.mock_object(self._driver, '_does_item_exist_at_va_backend') self._driver._does_item_exist_at_va_backend.return_value = True self._driver.delete_snapshot(self._context, self.snapshot) self.assertEqual(2, self._driver._access_api.call_count) def test_delete_snapshot_negative(self): self.mock_object(self._driver, '_access_api') self.mock_object(self._driver, '_does_item_exist_at_va_backend') self._driver._does_item_exist_at_va_backend.return_value = True self._driver._access_api.return_value = False self.assertRaises(exception.ShareBackendException, self._driver.delete_snapshot, self._context, self.snapshot) def test_delete_snapshot_if_not_present_at_backend(self): self.mock_object(self._driver, '_does_item_exist_at_va_backend') self.mock_object(self._driver, '_access_api') self._driver._does_item_exist_at_va_backend.return_value = False self._driver.delete_snapshot(self._context, self.snapshot) self.assertEqual(1, (self._driver. _does_item_exist_at_va_backend.call_count)) self.assertEqual(0, self._driver._access_api.call_count) def test_update_access_for_allow(self): self.mock_object(self._driver, '_access_api') self._driver.update_access(self._context, self.share, [], [self.access], [], []) self.assertEqual(2, self._driver._access_api.call_count) def test_update_access_for_allow_negative(self): self.mock_object(self._driver, '_access_api') self._driver._access_api.return_value = False self.assertRaises(exception.ShareBackendException, self._driver.update_access, self._context, self.share, [], [self.access], [], []) self.assertRaises(exception.InvalidShareAccess, self._driver.update_access, self._context, self.share, [], [self.access2], [], []) self.assertRaises(exception.InvalidShareAccessLevel, self._driver.update_access, self._context, self.share, [], [self.access3], [], []) def test_update_access_for_deny(self): self.mock_object(self._driver, '_access_api') self._driver.update_access(self._context, self.share, [], [], [self.access], []) self.assertEqual(2, self._driver._access_api.call_count) def test_update_access_for_deny_negative(self): self.mock_object(self._driver, '_access_api') self._driver._access_api.return_value = False self.assertRaises(exception.ShareBackendException, self._driver.update_access, self._context, self.share, [], [], [self.access], []) def test_update_access_for_deny_for_invalid_access_type(self): self.mock_object(self._driver, '_access_api') self._driver.update_access(self._context, self.share, [], [], [self.access2], []) self.assertEqual(0, self._driver._access_api.call_count) def test_update_access_for_empty_rule_list(self): self.mock_object(self._driver, '_allow_access') self.mock_object(self._driver, '_deny_access') self._driver.update_access(self._context, self.share, [], [], [], []) self.assertEqual(0, self._driver._allow_access.call_count) self.assertEqual(0, self._driver._deny_access.call_count) def test_update_access_for_access_rules(self): self.mock_object(self._driver, '_fetch_existing_rule') self.mock_object(self._driver, '_allow_access') self.mock_object(self._driver, '_deny_access') existing_a_rules = [{'access_level': 'rw', 'access_type': 'ip', 'access_to': '10.0.0.2'}, {'access_level': 'rw', 'access_type': 'ip', 'access_to': '10.0.0.3'}] self._driver._fetch_existing_rule.return_value = existing_a_rules d_rule = self._driver._return_access_lists_difference(existing_a_rules, [self.access4]) a_rule = self._driver._return_access_lists_difference([self.access4], existing_a_rules) self._driver.update_access(self._context, self.share, [self.access4], [], [], []) self.assertEqual(d_rule, existing_a_rules) self.assertEqual(a_rule, [self.access4]) self.assertEqual(1, self._driver._allow_access.call_count) self.assertEqual(2, self._driver._deny_access.call_count) def test_extend_share(self): self.mock_object(self._driver, '_access_api') new_size = 3 self._driver.extend_share(self.share, new_size) self.assertEqual(1, self._driver._access_api.call_count) def test_extend_share_negative(self): self.mock_object(self._driver, '_access_api') new_size = 3 self._driver._access_api.return_value = False self.assertRaises(exception.ShareBackendException, self._driver.extend_share, self.share, new_size) def test_shrink_share(self): self.mock_object(self._driver, '_access_api') new_size = 3 self._driver.shrink_share(self.share2, new_size) self.assertEqual(1, self._driver._access_api.call_count) def test_shrink_share_negative(self): self.mock_object(self._driver, '_access_api') new_size = 3 self._driver._access_api.return_value = False self.assertRaises(exception.ShareBackendException, self._driver.shrink_share, self.share2, new_size) def test__get_access_pool_details(self): self.mock_object(self._driver, '_access_api') pool_details = [] pool_details_dict = {} pool_details_dict['device_group_name'] = 'fake_pool' pool_details_dict['capacity'] = 10737418240 pool_details_dict['used_size'] = 9663676416 pool_details.append(pool_details_dict) pool_details_dict2 = {} pool_details_dict2['device_group_name'] = self.configuration.va_pool pool_details_dict2['capacity'] = 10737418240 pool_details_dict2['used_size'] = 9663676416 pool_details.append(pool_details_dict2) self._driver._access_api.return_value = pool_details total_space, free_space = self._driver._get_access_pool_details() self.assertEqual(10, total_space) self.assertEqual(1, free_space) def test__get_access_pool_details_negative(self): self.mock_object(self._driver, '_access_api') pool_details = [] self._driver._access_api.return_value = pool_details self.assertRaises(exception.ShareBackendException, self._driver._get_access_pool_details) def test__update_share_stats(self): self.mock_object(self._driver, '_authenticate_access') self.mock_object(self._driver, '_get_access_pool_details') self._driver._get_access_pool_details.return_value = (10, 9) self._driver._update_share_stats() data = { 'share_backend_name': FAKE_BACKEND, 'vendor_name': 'Veritas', 'driver_version': '1.0', 'storage_protocol': 'NFS', 'total_capacity_gb': 10, 'free_capacity_gb': 9, 'reserved_percentage': 0, 'reserved_snapshot_percentage': 0, 'reserved_share_extend_percentage': 0, 'QoS_support': False, 'create_share_from_snapshot_support': True, 'driver_handles_share_servers': False, 'filter_function': 'Disable', 'goodness_function': 'Disable', 'mount_point_name_support': False, 'ipv4_support': True, 'ipv6_support': False, 'mount_snapshot_support': False, 'pools': None, 'qos': False, 'replication_domain': 'Disable', 'revert_to_snapshot_support': False, 'share_group_stats': {'consistent_snapshot_support': None}, 'snapshot_support': True, 'security_service_update_support': False, 'share_server_multiple_subnet_support': False, 'network_allocation_update_support': False, 'share_replicas_migration_support': False, 'encryption_support': None, } self.assertEqual(data, self._driver._stats) def test__get_vip(self): self.mock_object(self._driver, '_get_access_ips') pool_list = [] ip1 = {'isconsoleip': 1, 'type': 'Virtual', 'status': 'ONLINE', 'ip': '1.1.1.2'} ip2 = {'isconsoleip': 0, 'type': 'Virtual', 'status': 'ONLINE', 'ip': '1.1.1.4'} ip3 = {'isconsoleip': 0, 'type': 'Virtual', 'status': 'OFFLINE', 'ip': '1.1.1.5'} ip4 = {'isconsoleip': 0, 'type': 'Physical', 'status': 'OFFLINE', 'ip': '1.1.1.6'} pool_list = [ip1, ip2, ip3, ip4] self._driver._get_access_ips.return_value = pool_list vip = self._driver._get_vip() self.assertEqual('1.1.1.4', vip) def test__get_access_ips(self): self.mock_object(self._driver, '_access_api') ip_list = ['1.1.1.2', '1.1.2.3', '1.1.1.4'] self._driver._access_api.return_value = ip_list ret_value = self._driver._get_access_ips(self._driver.session, self._driver.host) self.assertEqual(ret_value, ip_list) def test__access_api(self): self.mock_object(requests, 'session') provider = '%s:%s' % (self._driver.host, self._driver._port) path = '/fake/path' input_data = {} mock_response = MockResponse() session = requests.session data = {'fake_key': 'fake_val'} json_data = json.dumps(data) session.request.return_value = mock_response ret_value = self._driver._access_api(session, provider, path, json.dumps(input_data), 'GET') self.assertEqual(json_data, ret_value) def test__access_api_ret_for_update_object(self): self.mock_object(requests, 'session') provider = '%s:%s' % (self._driver.host, self._driver._port) path = self._driver._update_object input_data = None mock_response = MockResponse() session = requests.session session.request.return_value = mock_response ret = self._driver._access_api(session, provider, path, input_data, 'GET') self.assertTrue(ret) def test__access_api_negative(self): session = self._driver.session provider = '%s:%s' % (self._driver.host, self._driver._port) path = '/fake/path' input_data = {} ret_value = self._driver._access_api(session, provider, path, json.dumps(input_data), 'GET') self.assertEqual(False, ret_value) def test__get_api(self): provider = '%s:%s' % (self._driver.host, self._driver._port) tail = '/fake/path' ret = self._driver._get_api(provider, tail) api_root = 'https://%s/api' % (provider) to_be_ret = api_root + tail self.assertEqual(to_be_ret, ret) def test__does_item_exist_at_va_backend(self): self.mock_object(self._driver, '_access_api') item_name = 'fake_item' path = '/fake/path' fake_item_list = [{'name': item_name}] self._driver._access_api.return_value = fake_item_list ret_value = self._driver._does_item_exist_at_va_backend(item_name, path) self.assertTrue(ret_value) def test__does_item_exist_at_va_backend_negative(self): self.mock_object(self._driver, '_access_api') item_name = 'fake_item' path = '/fake/path' fake_item_list = [{'name': 'item2'}] self._driver._access_api.return_value = fake_item_list ret_value = self._driver._does_item_exist_at_va_backend(item_name, path) self.assertEqual(False, ret_value) def test__fetch_existing_rule(self): self.mock_object(self._driver, '_access_api') fake_share = 'fake-share' fake_access_list = [] list1 = [] list1.append({ 'status': 'online', 'name': '/vx/fake-share', 'host_name': '10.0.0.1', 'privilege': 'rw' }) list1.append({ 'status': 'online', 'name': '/vx/fake-share', 'host_name': '10.0.0.2', 'privilege': 'rw' }) list1.append({ 'status': 'online', 'name': '/vx/fake-share', 'host_name': '10.0.0.3', 'privilege': 'ro' }) list1.append({ 'status': 'online', 'name': '/vx/fake-share2', 'host_name': '10.0.0.4', 'privilege': 'rw' }) fake_access_list.append({ 'shareType': 'NFS', 'shares': list1 }) fake_access_list.append({ 'shareType': 'CIFS', 'shares': [] }) ret_access_list = [] ret_access_list.append({ 'access_to': '10.0.0.1', 'access_level': 'rw', 'access_type': 'ip' }) ret_access_list.append({ 'access_to': '10.0.0.2', 'access_level': 'rw', 'access_type': 'ip' }) ret_access_list.append({ 'access_to': '10.0.0.3', 'access_level': 'ro', 'access_type': 'ip' }) self._driver._access_api.return_value = fake_access_list ret_value = self._driver._fetch_existing_rule(fake_share) self.assertEqual(ret_access_list, ret_value) ././@PaxHeader0000000000000000000000000000003200000000000011450 xustar000000000000000026 mtime=1759315602.04167 manila-21.0.0/manila/tests/share/drivers/windows/0000775000175000017500000000000000000000000021733 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/windows/__init__.py0000664000175000017500000000000000000000000024032 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/windows/test_service_instance.py0000664000175000017500000004002500000000000026671 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Cloudbase Solutions SRL # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from unittest import mock import ddt from oslo_concurrency import processutils from oslo_config import cfg from manila import exception from manila.share import configuration from manila.share.drivers import service_instance as generic_service_instance from manila.share.drivers.windows import service_instance from manila.share.drivers.windows import windows_utils from manila import test CONF = cfg.CONF CONF.import_opt('driver_handles_share_servers', 'manila.share.driver') CONF.register_opts(generic_service_instance.common_opts) serv_mgr_cls = service_instance.WindowsServiceInstanceManager generic_serv_mgr_cls = generic_service_instance.ServiceInstanceManager @ddt.ddt class WindowsServiceInstanceManagerTestCase(test.TestCase): _FAKE_SERVER = {'ip': mock.sentinel.ip, 'instance_id': mock.sentinel.instance_id} @mock.patch.object(windows_utils, 'WindowsUtils') @mock.patch.object(serv_mgr_cls, '_check_auth_mode') def setUp(self, mock_check_auth, mock_utils_cls): self.flags(service_instance_user=mock.sentinel.username) self._remote_execute = mock.Mock() fake_conf = configuration.Configuration(None) self._mgr = serv_mgr_cls(remote_execute=self._remote_execute, driver_config=fake_conf) self._windows_utils = mock_utils_cls.return_value super(WindowsServiceInstanceManagerTestCase, self).setUp() @ddt.data({}, {'use_cert_auth': False}, {'use_cert_auth': False, 'valid_pass_complexity': False}, {'certs_exist': False}) @mock.patch('os.path.exists') @mock.patch.object(serv_mgr_cls, '_check_password_complexity') @ddt.unpack def test_check_auth_mode(self, mock_check_complexity, mock_path_exists, use_cert_auth=True, certs_exist=True, valid_pass_complexity=True): self.flags(service_instance_password=mock.sentinel.password) self._mgr._cert_pem_path = mock.sentinel.cert_path self._mgr._cert_key_pem_path = mock.sentinel.key_path mock_path_exists.return_value = certs_exist mock_check_complexity.return_value = valid_pass_complexity self._mgr._use_cert_auth = use_cert_auth invalid_auth = ((use_cert_auth and not certs_exist) or not valid_pass_complexity) if invalid_auth: self.assertRaises(exception.ServiceInstanceException, self._mgr._check_auth_mode) else: self._mgr._check_auth_mode() if not use_cert_auth: mock_check_complexity.assert_called_once_with( str(mock.sentinel.password)) @ddt.data(False, True) def test_get_auth_info(self, use_cert_auth): self._mgr._use_cert_auth = use_cert_auth self._mgr._cert_pem_path = mock.sentinel.cert_path self._mgr._cert_key_pem_path = mock.sentinel.key_path auth_info = self._mgr._get_auth_info() expected_auth_info = {'use_cert_auth': use_cert_auth} if use_cert_auth: expected_auth_info.update(cert_pem_path=mock.sentinel.cert_path, cert_key_pem_path=mock.sentinel.key_path) self.assertEqual(expected_auth_info, auth_info) @mock.patch.object(serv_mgr_cls, '_get_auth_info') @mock.patch.object(generic_serv_mgr_cls, 'get_common_server') def test_common_server(self, mock_generic_get_server, mock_get_auth): mock_server_details = {'backend_details': {}} mock_auth_info = {'fake_auth_info': mock.sentinel.auth_info} mock_generic_get_server.return_value = mock_server_details mock_get_auth.return_value = mock_auth_info expected_server_details = dict(backend_details=mock_auth_info) server_details = self._mgr.get_common_server() mock_generic_get_server.assert_called_once_with() self.assertEqual(expected_server_details, server_details) @mock.patch.object(serv_mgr_cls, '_get_auth_info') @mock.patch.object(generic_serv_mgr_cls, '_get_new_instance_details') def test_get_new_instance_details(self, mock_generic_get_details, mock_get_auth): mock_server_details = {'fake_server_details': mock.sentinel.server_details} mock_generic_get_details.return_value = mock_server_details mock_auth_info = {'fake_auth_info': mock.sentinel.auth_info} mock_get_auth.return_value = mock_auth_info expected_server_details = dict(mock_server_details, **mock_auth_info) instance_details = self._mgr._get_new_instance_details( server=mock.sentinel.server) mock_generic_get_details.assert_called_once_with(mock.sentinel.server) self.assertEqual(expected_server_details, instance_details) @ddt.data(('abAB01', True), ('abcdef', False), ('aA0', False)) @ddt.unpack def test_check_password_complexity(self, password, expected_result): valid_complexity = self._mgr._check_password_complexity( password) self.assertEqual(expected_result, valid_complexity) @ddt.data(None, Exception) def test_server_connection(self, side_effect): self._remote_execute.side_effect = side_effect expected_result = side_effect is None is_available = self._mgr._test_server_connection(self._FAKE_SERVER) self.assertEqual(expected_result, is_available) self._remote_execute.assert_called_once_with(self._FAKE_SERVER, "whoami", retry=False) @ddt.data(False, True) def test_get_service_instance_create_kwargs(self, use_cert_auth): self._mgr._use_cert_auth = use_cert_auth self.flags(service_instance_password=mock.sentinel.admin_pass) if use_cert_auth: mock_cert_data = 'mock_cert_data' self.mock_object(service_instance, 'open', mock.mock_open( read_data=mock_cert_data)) expected_kwargs = dict(user_data=mock_cert_data) else: expected_kwargs = dict( meta=dict(admin_pass=str(mock.sentinel.admin_pass))) create_kwargs = self._mgr._get_service_instance_create_kwargs() self.assertEqual(expected_kwargs, create_kwargs) @mock.patch.object(generic_serv_mgr_cls, 'set_up_service_instance') @mock.patch.object(serv_mgr_cls, 'get_valid_security_service') @mock.patch.object(serv_mgr_cls, '_setup_security_service') def test_set_up_service_instance(self, mock_setup_security_service, mock_get_valid_security_service, mock_generic_setup_serv_inst): mock_service_instance = {'instance_details': None} mock_network_info = {'security_services': mock.sentinel.security_services} mock_generic_setup_serv_inst.return_value = mock_service_instance mock_get_valid_security_service.return_value = ( mock.sentinel.security_service) instance_details = self._mgr.set_up_service_instance( mock.sentinel.context, mock_network_info) mock_generic_setup_serv_inst.assert_called_once_with( mock.sentinel.context, mock_network_info) mock_get_valid_security_service.assert_called_once_with( mock.sentinel.security_services) mock_setup_security_service.assert_called_once_with( mock_service_instance, mock.sentinel.security_service) expected_instance_details = dict(mock_service_instance, joined_domain=True) self.assertEqual(expected_instance_details, instance_details) @mock.patch.object(serv_mgr_cls, '_run_cloudbase_init_plugin_after_reboot') @mock.patch.object(serv_mgr_cls, '_join_domain') def test_setup_security_service(self, mock_join_domain, mock_run_cbsinit_plugin): utils = self._windows_utils mock_security_service = {'domain': mock.sentinel.domain, 'user': mock.sentinel.admin_username, 'password': mock.sentinel.admin_password, 'dns_ip': mock.sentinel.dns_ip} utils.get_interface_index_by_ip.return_value = ( mock.sentinel.interface_index) self._mgr._setup_security_service(self._FAKE_SERVER, mock_security_service) utils.set_dns_client_search_list.assert_called_once_with( self._FAKE_SERVER, [mock_security_service['domain']]) utils.get_interface_index_by_ip.assert_called_once_with( self._FAKE_SERVER, self._FAKE_SERVER['ip']) utils.set_dns_client_server_addresses.assert_called_once_with( self._FAKE_SERVER, mock.sentinel.interface_index, [mock_security_service['dns_ip']]) mock_run_cbsinit_plugin.assert_called_once_with( self._FAKE_SERVER, plugin_name=self._mgr._CBS_INIT_WINRM_PLUGIN) mock_join_domain.assert_called_once_with( self._FAKE_SERVER, mock.sentinel.domain, mock.sentinel.admin_username, mock.sentinel.admin_password) @ddt.data({'join_domain_side_eff': Exception}, {'server_available': False, 'expected_exception': exception.ServiceInstanceException}, {'join_domain_side_eff': processutils.ProcessExecutionError, 'expected_exception': processutils.ProcessExecutionError}, {'domain_mismatch': True, 'expected_exception': exception.ServiceInstanceException}) @mock.patch.object(generic_serv_mgr_cls, 'reboot_server') @mock.patch.object(generic_serv_mgr_cls, 'wait_for_instance_to_be_active') @mock.patch.object(generic_serv_mgr_cls, '_check_server_availability') @ddt.unpack def test_join_domain(self, mock_check_avail, mock_wait_instance_active, mock_reboot_server, expected_exception=None, server_available=True, domain_mismatch=False, join_domain_side_eff=None): self._windows_utils.join_domain.side_effect = join_domain_side_eff mock_check_avail.return_value = server_available self._windows_utils.get_current_domain.return_value = ( None if domain_mismatch else mock.sentinel.domain) domain_params = (mock.sentinel.domain, mock.sentinel.admin_username, mock.sentinel.admin_password) if expected_exception: self.assertRaises(expected_exception, self._mgr._join_domain, self._FAKE_SERVER, *domain_params) else: self._mgr._join_domain(self._FAKE_SERVER, *domain_params) if join_domain_side_eff != processutils.ProcessExecutionError: mock_reboot_server.assert_called_once_with( self._FAKE_SERVER, soft_reboot=True) mock_wait_instance_active.assert_called_once_with( self._FAKE_SERVER['instance_id'], timeout=self._mgr.max_time_to_build_instance) mock_check_avail.assert_called_once_with(self._FAKE_SERVER) if server_available: self._windows_utils.get_current_domain.assert_called_once_with( self._FAKE_SERVER) self._windows_utils.join_domain.assert_called_once_with( self._FAKE_SERVER, *domain_params) @ddt.data([], [{'type': 'active_directory'}], [{'type': 'active_directory'}] * 2, [{'type': mock.sentinel.invalid_type}]) def test_get_valid_security_service(self, security_services): valid_security_service = self._mgr.get_valid_security_service( security_services) if (security_services and len(security_services) == 1 and security_services[0]['type'] == 'active_directory'): expected_valid_sec_service = security_services[0] else: expected_valid_sec_service = None self.assertEqual(expected_valid_sec_service, valid_security_service) @mock.patch.object(serv_mgr_cls, '_get_cbs_init_reg_section') def test_run_cloudbase_init_plugin_after_reboot(self, mock_get_cbs_init_reg): self._FAKE_SERVER = {'instance_id': mock.sentinel.instance_id} mock_get_cbs_init_reg.return_value = mock.sentinel.cbs_init_reg_sect expected_plugin_key_path = "%(cbs_init)s\\%(instance_id)s\\Plugins" % { 'cbs_init': mock.sentinel.cbs_init_reg_sect, 'instance_id': self._FAKE_SERVER['instance_id']} self._mgr._run_cloudbase_init_plugin_after_reboot( server=self._FAKE_SERVER, plugin_name=mock.sentinel.plugin_name) mock_get_cbs_init_reg.assert_called_once_with(self._FAKE_SERVER) self._windows_utils.set_win_reg_value.assert_called_once_with( self._FAKE_SERVER, path=expected_plugin_key_path, key=mock.sentinel.plugin_name, value=self._mgr._CBS_INIT_RUN_PLUGIN_AFTER_REBOOT) @ddt.data( {}, {'exec_errors': [ processutils.ProcessExecutionError(stderr='Cannot find path'), processutils.ProcessExecutionError(stderr='Cannot find path')], 'expected_exception': exception.ServiceInstanceException}, {'exec_errors': [processutils.ProcessExecutionError(stderr='')], 'expected_exception': processutils.ProcessExecutionError}, {'exec_errors': [ processutils.ProcessExecutionError(stderr='Cannot find path'), None]} ) @ddt.unpack def test_get_cbs_init_reg_section(self, exec_errors=None, expected_exception=None): self._windows_utils.normalize_path.return_value = ( mock.sentinel.normalized_section_path) self._windows_utils.get_win_reg_value.side_effect = exec_errors if expected_exception: self.assertRaises(expected_exception, self._mgr._get_cbs_init_reg_section, mock.sentinel.server) else: cbs_init_section = self._mgr._get_cbs_init_reg_section( mock.sentinel.server) self.assertEqual(mock.sentinel.normalized_section_path, cbs_init_section) base_path = 'hklm:\\SOFTWARE' cbs_section = 'Cloudbase Solutions\\Cloudbase-Init' tested_upper_sections = [''] if exec_errors and 'Cannot find path' in exec_errors[0].stderr: tested_upper_sections.append('Wow6432Node') tested_sections = [os.path.join(base_path, upper_section, cbs_section) for upper_section in tested_upper_sections] self._windows_utils.normalize_path.assert_has_calls( [mock.call(tested_section) for tested_section in tested_sections]) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/windows/test_windows_smb_driver.py0000664000175000017500000003045200000000000027256 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Cloudbase Solutions SRL # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from unittest import mock import ddt from manila.share import configuration from manila.share.drivers import generic from manila.share.drivers.windows import service_instance from manila.share.drivers.windows import windows_smb_driver as windows_drv from manila.share.drivers.windows import windows_smb_helper from manila.share.drivers.windows import windows_utils from manila.share.drivers.windows import winrm_helper from manila import test from manila.tests import fake_share @ddt.ddt class WindowsSMBDriverTestCase(test.TestCase): @mock.patch.object(winrm_helper, 'WinRMHelper') @mock.patch.object(windows_utils, 'WindowsUtils') @mock.patch.object(windows_smb_helper, 'WindowsSMBHelper') @mock.patch.object(service_instance, 'WindowsServiceInstanceManager') def setUp(self, mock_sv_instance_mgr, mock_smb_helper_cls, mock_utils_cls, mock_winrm_helper_cls): self.flags(driver_handles_share_servers=True) self._fake_conf = configuration.Configuration(None) self._share = fake_share.fake_share(share_proto='SMB') self._share_server = dict( backend_details=mock.sentinel.backend_details) self._drv = windows_drv.WindowsSMBDriver( configuration=self._fake_conf) self._drv._setup_helpers() self._remote_execute = mock_winrm_helper_cls.return_value self._windows_utils = mock_utils_cls.return_value self._smb_helper = mock_smb_helper_cls.return_value super(WindowsSMBDriverTestCase, self).setUp() @mock.patch('manila.share.driver.ShareDriver') def test_update_share_stats(self, mock_base_driver): self._drv._update_share_stats() mock_base_driver._update_share_stats.assert_called_once_with( self._drv, data=dict(storage_protocol="CIFS")) @mock.patch.object(service_instance, 'WindowsServiceInstanceManager') def test_setup_service_instance_manager(self, mock_sv_instance_mgr): self._drv._setup_service_instance_manager() mock_sv_instance_mgr.assert_called_once_with( driver_config=self._fake_conf) def test_setup_helpers(self): expected_helpers = {"SMB": self._smb_helper, "CIFS": self._smb_helper} self._drv._setup_helpers() self.assertEqual(expected_helpers, self._drv._helpers) @mock.patch.object(generic.GenericShareDriver, '_teardown_server') def test_teardown_server(self, mock_super_teardown): mock_server = {'joined_domain': True, 'instance_id': mock.sentinel.instance_id} mock_sec_service = {'user': mock.sentinel.user, 'password': mock.sentinel.password, 'domain': mock.sentinel.domain} sv_mgr = self._drv.service_instance_manager sv_mgr.get_valid_security_service.return_value = mock_sec_service # We ensure that domain unjoin exceptions do not prevent the # service instance from being teared down. self._windows_utils.unjoin_domain.side_effect = Exception self._drv._teardown_server(mock_server, mock_sec_service) sv_mgr.get_valid_security_service.assert_called_once_with( mock_sec_service) self._windows_utils.unjoin_domain.assert_called_once_with( mock_server, mock_sec_service['user'], mock_sec_service['password']) mock_super_teardown.assert_called_once_with(mock_server, mock_sec_service) @mock.patch.object(windows_drv.WindowsSMBDriver, '_get_disk_number') def test_format_device(self, mock_get_disk_number): mock_get_disk_number.return_value = mock.sentinel.disk_number self._drv._format_device(mock.sentinel.server, mock.sentinel.vol) self._drv._get_disk_number.assert_called_once_with( mock.sentinel.server, mock.sentinel.vol) self._windows_utils.initialize_disk.assert_called_once_with( mock.sentinel.server, mock.sentinel.disk_number) self._windows_utils.create_partition.assert_called_once_with( mock.sentinel.server, mock.sentinel.disk_number) self._windows_utils.format_partition.assert_called_once_with( mock.sentinel.server, mock.sentinel.disk_number, self._drv._DEFAULT_SHARE_PARTITION) @mock.patch.object(windows_drv.WindowsSMBDriver, '_ensure_disk_online_and_writable') @mock.patch.object(windows_drv.WindowsSMBDriver, '_get_disk_number') @mock.patch.object(windows_drv.WindowsSMBDriver, '_get_mount_path') @mock.patch.object(windows_drv.WindowsSMBDriver, '_is_device_mounted') def test_mount_device(self, mock_device_mounted, mock_get_mount_path, mock_get_disk_number, mock_ensure_disk): mock_get_mount_path.return_value = mock.sentinel.mount_path mock_get_disk_number.return_value = mock.sentinel.disk_number mock_device_mounted.return_value = False self._drv._mount_device(share=mock.sentinel.share, server_details=mock.sentinel.server, volume=mock.sentinel.vol) mock_device_mounted.assert_called_once_with( mock.sentinel.mount_path, mock.sentinel.server, mock.sentinel.vol) mock_get_disk_number.assert_called_once_with( mock.sentinel.server, mock.sentinel.vol) self._windows_utils.ensure_directory_exists.assert_called_once_with( mock.sentinel.server, mock.sentinel.mount_path) self._windows_utils.add_access_path( mock.sentinel.server, mock.sentinel.mount_path, mock.sentinel.disk_number, self._drv._DEFAULT_SHARE_PARTITION) mock_ensure_disk.assert_called_once_with( mock.sentinel.server, mock.sentinel.disk_number) @mock.patch.object(windows_drv.WindowsSMBDriver, '_get_mount_path') def test_unmount_device(self, mock_get_mount_path): mock_get_mount_path.return_value = mock.sentinel.mount_path mock_get_disk_number_by_path = ( self._windows_utils.get_disk_number_by_mount_path) self._drv._unmount_device(mock.sentinel.share, mock.sentinel.server) mock_get_mount_path.assert_called_once_with(mock.sentinel.share) mock_get_disk_number_by_path.assert_called_once_with( mock.sentinel.server, mock.sentinel.mount_path) self._windows_utils.set_disk_online_status.assert_called_once_with( mock.sentinel.server, mock_get_disk_number_by_path.return_value, online=False) @ddt.data(None, 1) @mock.patch.object(windows_drv.WindowsSMBDriver, '_get_disk_number') @mock.patch.object(windows_drv.WindowsSMBDriver, '_ensure_disk_online_and_writable') def test_resize_filesystem(self, new_size, mock_ensure_disk, mock_get_disk_number): mock_get_disk_number.return_value = mock.sentinel.disk_number mock_get_max_size = self._windows_utils.get_partition_maximum_size mock_get_max_size.return_value = mock.sentinel.max_size self._drv._resize_filesystem(mock.sentinel.server, mock.sentinel.vol, new_size=new_size) mock_get_disk_number.assert_called_once_with(mock.sentinel.server, mock.sentinel.vol) self._drv._ensure_disk_online_and_writable.assert_called_once_with( mock.sentinel.server, mock.sentinel.disk_number) if not new_size: mock_get_max_size.assert_called_once_with( mock.sentinel.server, mock.sentinel.disk_number, self._drv._DEFAULT_SHARE_PARTITION) expected_new_size = mock.sentinel.max_size else: expected_new_size = new_size << 30 self._windows_utils.resize_partition.assert_called_once_with( mock.sentinel.server, expected_new_size, mock.sentinel.disk_number, self._drv._DEFAULT_SHARE_PARTITION) def test_ensure_disk_online_and_writable(self): self._drv._ensure_disk_online_and_writable( mock.sentinel.server, mock.sentinel.disk_number) self._windows_utils.update_disk.assert_called_once_with( mock.sentinel.server, mock.sentinel.disk_number) self._windows_utils.set_disk_online_status.assert_called_once_with( mock.sentinel.server, mock.sentinel.disk_number, online=True) self._windows_utils.set_disk_readonly_status.assert_called_once_with( mock.sentinel.server, mock.sentinel.disk_number, readonly=False) def test_get_mounted_share_size(self): fake_size_gb = 10 self._windows_utils.get_disk_space_by_path.return_value = ( fake_size_gb << 30, mock.sentinel.free_bytes) share_size = self._drv._get_mounted_share_size( mock.sentinel.mount_path, mock.sentinel.server) self.assertEqual(fake_size_gb, share_size) def test_get_consumed_space(self): fake_size_gb = 2 fake_free_space_gb = 1 self._windows_utils.get_disk_space_by_path.return_value = ( fake_size_gb << 30, fake_free_space_gb << 30) consumed_space = self._drv._get_consumed_space( mock.sentinel.mount_path, mock.sentinel.server) self.assertEqual(fake_size_gb - fake_free_space_gb, consumed_space) def test_get_mount_path(self): fake_mount_path = 'fake_mount_path' fake_share_name = 'fake_share_name' mock_share = {'name': fake_share_name} self.flags(share_mount_path=fake_mount_path) mount_path = self._drv._get_mount_path(mock_share) self._windows_utils.normalize_path.assert_called_once_with( os.path.join(fake_mount_path, fake_share_name)) self.assertEqual(self._windows_utils.normalize_path.return_value, mount_path) @ddt.data(None, 2) def test_get_disk_number(self, disk_number_by_serial=None): mock_get_disk_number_by_serial = ( self._windows_utils.get_disk_number_by_serial_number) mock_get_disk_number_by_serial.return_value = disk_number_by_serial mock_volume = {'id': mock.sentinel.vol_id, 'mountpoint': "/dev/sdb"} # If the disk number cannot be identified using the disk serial # number, we expect it to be retrieved based on the volume mountpoint, # having disk number 1 in this case. expected_disk_number = (disk_number_by_serial if disk_number_by_serial else 1) disk_number = self._drv._get_disk_number(mock.sentinel.server, mock_volume) mock_get_disk_number_by_serial.assert_called_once_with( mock.sentinel.server, mock.sentinel.vol_id) self.assertEqual(expected_disk_number, disk_number) @ddt.data(None, 2) def test_is_device_mounted(self, disk_number_by_path): mock_get_disk_number_by_path = ( self._windows_utils.get_disk_number_by_mount_path) mock_get_disk_number_by_path.return_value = disk_number_by_path expected_result = disk_number_by_path is not None is_mounted = self._drv._is_device_mounted( mount_path=mock.sentinel.mount_path, server_details=mock.sentinel.server) mock_get_disk_number_by_path.assert_called_once_with( mock.sentinel.server, mock.sentinel.mount_path) self.assertEqual(expected_result, is_mounted) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/windows/test_windows_smb_helper.py0000664000175000017500000004072500000000000027246 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Cloudbase Solutions SRL # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from unittest import mock import ddt from manila.common import constants from manila import exception from manila.share import configuration from manila.share.drivers.windows import windows_smb_helper from manila.share.drivers.windows import windows_utils from manila import test from oslo_config import cfg CONF = cfg.CONF CONF.import_opt('share_mount_path', 'manila.share.drivers.generic') @ddt.ddt class WindowsSMBHelperTestCase(test.TestCase): _FAKE_SERVER = {'public_address': mock.sentinel.public_address} _FAKE_SHARE_NAME = "fake_share_name" _FAKE_SHARE = "\\\\%s\\%s" % (_FAKE_SERVER['public_address'], _FAKE_SHARE_NAME) _FAKE_SHARE_LOCATION = os.path.join( configuration.Configuration(None).share_mount_path, _FAKE_SHARE_NAME) _FAKE_ACCOUNT_NAME = 'FakeDomain\\FakeUser' _FAKE_RW_ACC_RULE = { 'access_to': _FAKE_ACCOUNT_NAME, 'access_level': constants.ACCESS_LEVEL_RW, 'access_type': 'user', } def setUp(self): self._remote_exec = mock.Mock() fake_conf = configuration.Configuration(None) self._win_smb_helper = windows_smb_helper.WindowsSMBHelper( self._remote_exec, fake_conf) super(WindowsSMBHelperTestCase, self).setUp() def test_init_helper(self): self._win_smb_helper.init_helper(mock.sentinel.server) self._remote_exec.assert_called_once_with(mock.sentinel.server, "Get-SmbShare") @ddt.data(True, False) @mock.patch.object(windows_smb_helper.WindowsSMBHelper, '_share_exists') def test_create_exports(self, share_exists, mock_share_exists): mock_share_exists.return_value = share_exists result = self._win_smb_helper.create_exports(self._FAKE_SERVER, self._FAKE_SHARE_NAME) if not share_exists: cmd = ['New-SmbShare', '-Name', self._FAKE_SHARE_NAME, '-Path', self._win_smb_helper._windows_utils.normalize_path( self._FAKE_SHARE_LOCATION), '-ReadAccess', "*%s" % self._win_smb_helper._NULL_SID] self._remote_exec.assert_called_once_with(self._FAKE_SERVER, cmd) else: self.assertFalse(self._remote_exec.called) expected_exports = [ { 'is_admin_only': False, 'metadata': {'export_location_metadata_example': 'example'}, 'path': self._FAKE_SHARE }, ] self.assertEqual(expected_exports, result) @mock.patch.object(windows_smb_helper.WindowsSMBHelper, '_share_exists') def test_remove_exports(self, mock_share_exists): mock_share_exists.return_value = True self._win_smb_helper.remove_exports(mock.sentinel.server, mock.sentinel.share_name) cmd = ['Remove-SmbShare', '-Name', mock.sentinel.share_name, "-Force"] self._remote_exec.assert_called_once_with(mock.sentinel.server, cmd) @mock.patch.object(windows_utils.WindowsUtils, 'get_volume_path_by_mount_path') @mock.patch.object(windows_smb_helper.WindowsSMBHelper, '_get_share_path_by_name') def test_get_volume_path_by_share_name(self, mock_get_share_path, mock_get_vol_path): mock_get_share_path.return_value = self._FAKE_SHARE_LOCATION volume_path = self._win_smb_helper._get_volume_path_by_share_name( mock.sentinel.server, self._FAKE_SHARE_NAME) mock_get_share_path.assert_called_once_with(mock.sentinel.server, self._FAKE_SHARE_NAME) mock_get_vol_path.assert_called_once_with(mock.sentinel.server, self._FAKE_SHARE_LOCATION) self.assertEqual(mock_get_vol_path.return_value, volume_path) @ddt.data({'raw_out': '', 'expected': []}, {'raw_out': '{"key": "val"}', 'expected': [{"key": "val"}]}, {'raw_out': '[{"key": "val"}, {"key2": "val2"}]', 'expected': [{"key": "val"}, {"key2": "val2"}]}) @ddt.unpack def test_get_acls_helper(self, raw_out, expected): self._remote_exec.return_value = (raw_out, mock.sentinel.err) rules = self._win_smb_helper._get_acls(mock.sentinel.server, self._FAKE_SHARE_NAME) self.assertEqual(expected, rules) expected_cmd = ( 'Get-SmbShareAccess -Name %s | ' 'Select-Object @("Name", "AccountName", ' '"AccessControlType", "AccessRight") | ' 'ConvertTo-JSON -Compress') % self._FAKE_SHARE_NAME self._remote_exec.assert_called_once_with(mock.sentinel.server, expected_cmd) @mock.patch.object(windows_smb_helper.WindowsSMBHelper, '_get_acls') def test_get_access_rules(self, mock_get_acls): helper = self._win_smb_helper valid_acl = { 'AccountName': self._FAKE_ACCOUNT_NAME, 'AccessRight': helper._WIN_ACCESS_RIGHT_FULL, 'AccessControlType': helper._WIN_ACL_ALLOW, } valid_acls = [valid_acl, dict(valid_acl, AccessRight=helper._WIN_ACCESS_RIGHT_CHANGE), dict(valid_acl, AccessRight=helper._WIN_ACCESS_RIGHT_READ)] # Those are rules that were not added by us and are expected to # be ignored. When encountering such a rule, a warning message # will be logged. ignored_acls = [ dict(valid_acl, AccessRight=helper._WIN_ACCESS_RIGHT_CUSTOM), dict(valid_acl, AccessControlType=helper._WIN_ACL_DENY)] mock_get_acls.return_value = valid_acls + ignored_acls # There won't be multiple access rules for the same account, # but we'll ignore this fact for the sake of this test. expected_rules = [self._FAKE_RW_ACC_RULE, self._FAKE_RW_ACC_RULE, dict(self._FAKE_RW_ACC_RULE, access_level=constants.ACCESS_LEVEL_RO)] rules = helper.get_access_rules(mock.sentinel.server, mock.sentinel.share_name) self.assertEqual(expected_rules, rules) mock_get_acls.assert_called_once_with(mock.sentinel.server, mock.sentinel.share_name) @mock.patch.object(windows_smb_helper.WindowsSMBHelper, '_refresh_acl') def test_grant_share_access(self, mock_refresh_acl): self._win_smb_helper._grant_share_access(mock.sentinel.server, mock.sentinel.share_name, constants.ACCESS_LEVEL_RW, mock.sentinel.username) cmd = ["Grant-SmbShareAccess", "-Name", mock.sentinel.share_name, "-AccessRight", "Change", "-AccountName", "'%s'" % mock.sentinel.username, "-Force"] self._remote_exec.assert_called_once_with(mock.sentinel.server, cmd) mock_refresh_acl.assert_called_once_with(mock.sentinel.server, mock.sentinel.share_name) def test_refresh_acl(self): self._win_smb_helper._refresh_acl(mock.sentinel.server, mock.sentinel.share_name) cmd = ['Set-SmbPathAcl', '-ShareName', mock.sentinel.share_name] self._remote_exec.assert_called_once_with(mock.sentinel.server, cmd) @mock.patch.object(windows_smb_helper.WindowsSMBHelper, '_refresh_acl') def test_revoke_share_access(self, mock_refresh_acl): self._win_smb_helper._revoke_share_access(mock.sentinel.server, mock.sentinel.share_name, mock.sentinel.username) cmd = ["Revoke-SmbShareAccess", "-Name", mock.sentinel.share_name, "-AccountName", '"%s"' % mock.sentinel.username, "-Force"] self._remote_exec.assert_called_once_with(mock.sentinel.server, cmd) mock_refresh_acl.assert_called_once_with(mock.sentinel.server, mock.sentinel.share_name) def test_update_access_invalid_type(self): invalid_access_rule = dict(self._FAKE_RW_ACC_RULE, access_type='ip') self.assertRaises( exception.InvalidShareAccess, self._win_smb_helper.update_access, mock.sentinel.server, mock.sentinel.share_name, [invalid_access_rule], [], [], []) def test_update_access_invalid_level(self): invalid_access_rule = dict(self._FAKE_RW_ACC_RULE, access_level='fake_level') self.assertRaises( exception.InvalidShareAccessLevel, self._win_smb_helper.update_access, mock.sentinel.server, mock.sentinel.share_name, [], [invalid_access_rule], [], []) @mock.patch.object(windows_smb_helper.WindowsSMBHelper, '_revoke_share_access') def test_update_access_deleting_invalid_rule(self, mock_revoke): # We want to make sure that we allow deleting invalid rules. invalid_access_rule = dict(self._FAKE_RW_ACC_RULE, access_level='fake_level') delete_rules = [invalid_access_rule, self._FAKE_RW_ACC_RULE] self._win_smb_helper.update_access( mock.sentinel.server, mock.sentinel.share_name, [], [], delete_rules, []) mock_revoke.assert_called_once_with( mock.sentinel.server, mock.sentinel.share_name, self._FAKE_RW_ACC_RULE['access_to']) @mock.patch.object(windows_smb_helper.WindowsSMBHelper, 'validate_access_rules') @mock.patch.object(windows_smb_helper.WindowsSMBHelper, 'get_access_rules') @mock.patch.object(windows_smb_helper.WindowsSMBHelper, '_grant_share_access') @mock.patch.object(windows_smb_helper.WindowsSMBHelper, '_revoke_share_access') def test_update_access(self, mock_revoke, mock_grant, mock_get_access_rules, mock_validate): added_rules = [mock.MagicMock(), mock.MagicMock()] deleted_rules = [mock.MagicMock(), mock.MagicMock()] self._win_smb_helper.update_access( mock.sentinel.server, mock.sentinel.share_name, [], added_rules, deleted_rules, []) mock_revoke.assert_has_calls( [mock.call(mock.sentinel.server, mock.sentinel.share_name, deleted_rule['access_to']) for deleted_rule in deleted_rules]) mock_grant.assert_has_calls( [mock.call(mock.sentinel.server, mock.sentinel.share_name, added_rule['access_level'], added_rule['access_to']) for added_rule in added_rules]) @mock.patch.object(windows_smb_helper.WindowsSMBHelper, '_get_rule_updates') @mock.patch.object(windows_smb_helper.WindowsSMBHelper, 'validate_access_rules') @mock.patch.object(windows_smb_helper.WindowsSMBHelper, 'get_access_rules') @mock.patch.object(windows_smb_helper.WindowsSMBHelper, '_grant_share_access') @mock.patch.object(windows_smb_helper.WindowsSMBHelper, '_revoke_share_access') def test_update_access_maintenance( self, mock_revoke, mock_grant, mock_get_access_rules, mock_validate, mock_get_rule_updates): all_rules = mock.MagicMock() added_rules = [mock.MagicMock(), mock.MagicMock()] deleted_rules = [mock.MagicMock(), mock.MagicMock()] mock_get_rule_updates.return_value = [ added_rules, deleted_rules] self._win_smb_helper.update_access( mock.sentinel.server, mock.sentinel.share_name, all_rules, [], [], []) mock_get_access_rules.assert_called_once_with( mock.sentinel.server, mock.sentinel.share_name) mock_get_rule_updates.assert_called_once_with( existing_rules=mock_get_access_rules.return_value, requested_rules=all_rules) mock_revoke.assert_has_calls( [mock.call(mock.sentinel.server, mock.sentinel.share_name, deleted_rule['access_to']) for deleted_rule in deleted_rules]) mock_grant.assert_has_calls( [mock.call(mock.sentinel.server, mock.sentinel.share_name, added_rule['access_level'], added_rule['access_to']) for added_rule in added_rules]) def test_get_rule_updates(self): req_rule_0 = self._FAKE_RW_ACC_RULE req_rule_1 = dict(self._FAKE_RW_ACC_RULE, access_to='fake_acc') curr_rule_0 = dict(self._FAKE_RW_ACC_RULE, access_to=self._FAKE_RW_ACC_RULE[ 'access_to'].upper()) curr_rule_1 = dict(self._FAKE_RW_ACC_RULE, access_to='fake_acc2') curr_rule_2 = dict(req_rule_1, access_level=constants.ACCESS_LEVEL_RO) expected_added_rules = [req_rule_1] expected_deleted_rules = [curr_rule_1, curr_rule_2] existing_rules = [curr_rule_0, curr_rule_1, curr_rule_2] requested_rules = [req_rule_0, req_rule_1] (added_rules, deleted_rules) = self._win_smb_helper._get_rule_updates( existing_rules, requested_rules) self.assertEqual(expected_added_rules, added_rules) self.assertEqual(expected_deleted_rules, deleted_rules) def test_get_share_name(self): result = self._win_smb_helper._get_share_name(self._FAKE_SHARE) self.assertEqual(self._FAKE_SHARE_NAME, result) def test_get_share_path_by_name(self): self._remote_exec.return_value = (self._FAKE_SHARE_LOCATION, mock.sentinel.std_err) result = self._win_smb_helper._get_share_path_by_name( mock.sentinel.server, mock.sentinel.share_name) cmd = ('Get-SmbShare -Name %s | ' 'Select-Object -ExpandProperty Path' % mock.sentinel.share_name) self._remote_exec.assert_called_once_with(mock.sentinel.server, cmd, check_exit_code=True) self.assertEqual(self._FAKE_SHARE_LOCATION, result) @mock.patch.object(windows_smb_helper.WindowsSMBHelper, '_get_share_path_by_name') def test_get_share_path_by_export_location(self, mock_get_share_path_by_name): mock_get_share_path_by_name.return_value = mock.sentinel.share_path result = self._win_smb_helper.get_share_path_by_export_location( mock.sentinel.server, self._FAKE_SHARE) mock_get_share_path_by_name.assert_called_once_with( mock.sentinel.server, self._FAKE_SHARE_NAME) self.assertEqual(mock.sentinel.share_path, result) @mock.patch.object(windows_smb_helper.WindowsSMBHelper, '_get_share_path_by_name') def test_share_exists(self, mock_get_share_path_by_name): result = self._win_smb_helper._share_exists(mock.sentinel.server, mock.sentinel.share_name) mock_get_share_path_by_name.assert_called_once_with( mock.sentinel.server, mock.sentinel.share_name, ignore_missing=True) self.assertTrue(result) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/windows/test_windows_utils.py0000664000175000017500000003775600000000000026300 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Cloudbase Solutions SRL # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import ddt from manila.share.drivers.windows import windows_utils from manila import test @ddt.ddt class WindowsUtilsTestCase(test.TestCase): def setUp(self): self._remote_exec = mock.Mock() self._windows_utils = windows_utils.WindowsUtils(self._remote_exec) super(WindowsUtilsTestCase, self).setUp() def test_initialize_disk(self): self._windows_utils.initialize_disk(mock.sentinel.server, mock.sentinel.disk_number) cmd = ["Initialize-Disk", "-Number", mock.sentinel.disk_number] self._remote_exec.assert_called_once_with(mock.sentinel.server, cmd) def test_create_partition(self): self._windows_utils.create_partition(mock.sentinel.server, mock.sentinel.disk_number) cmd = ["New-Partition", "-DiskNumber", mock.sentinel.disk_number, "-UseMaximumSize"] self._remote_exec.assert_called_once_with(mock.sentinel.server, cmd) def test_format_partition(self): self._windows_utils.format_partition(mock.sentinel.server, mock.sentinel.disk_number, mock.sentinel.partition_number) cmd = ("Get-Partition -DiskNumber %(disk_number)s " "-PartitionNumber %(partition_number)s | " "Format-Volume -FileSystem NTFS -Force -Confirm:$false" % { 'disk_number': mock.sentinel.disk_number, 'partition_number': mock.sentinel.partition_number, }) self._remote_exec.assert_called_once_with(mock.sentinel.server, cmd) def test_add_access_path(self): self._windows_utils.add_access_path(mock.sentinel.server, mock.sentinel.mount_path, mock.sentinel.disk_number, mock.sentinel.partition_number) cmd = ["Add-PartitionAccessPath", "-DiskNumber", mock.sentinel.disk_number, "-PartitionNumber", mock.sentinel.partition_number, "-AccessPath", self._windows_utils.quote_string( mock.sentinel.mount_path) ] self._remote_exec.assert_called_once_with(mock.sentinel.server, cmd) def test_resize_partition(self): self._windows_utils.resize_partition(mock.sentinel.server, mock.sentinel.size_bytes, mock.sentinel.disk_number, mock.sentinel.partition_number) cmd = ['Resize-Partition', '-DiskNumber', mock.sentinel.disk_number, '-PartitionNumber', mock.sentinel.partition_number, '-Size', mock.sentinel.size_bytes] self._remote_exec.assert_called_once_with(mock.sentinel.server, cmd) @ddt.data("1", "") def test_get_disk_number_by_serial_number(self, disk_number): mock_serial_number = "serial_number" self._remote_exec.return_value = (disk_number, mock.sentinel.std_err) expected_disk_number = int(disk_number) if disk_number else None result = self._windows_utils.get_disk_number_by_serial_number( mock.sentinel.server, mock_serial_number) pattern = "%s*" % mock_serial_number cmd = ("Get-Disk | " "Where-Object {$_.SerialNumber -like '%s'} | " "Select-Object -ExpandProperty Number" % pattern) self._remote_exec.assert_called_once_with(mock.sentinel.server, cmd) self.assertEqual(expected_disk_number, result) @ddt.data("1", "") def test_get_disk_number_by_mount_path(self, disk_number): fake_mount_path = "fake_mount_path" self._remote_exec.return_value = (disk_number, mock.sentinel.std_err) expected_disk_number = int(disk_number) if disk_number else None result = self._windows_utils.get_disk_number_by_mount_path( mock.sentinel.server, fake_mount_path) cmd = ('Get-Partition | ' 'Where-Object {$_.AccessPaths -contains "%s"} | ' 'Select-Object -ExpandProperty DiskNumber' % (fake_mount_path + "\\")) self._remote_exec.assert_called_once_with(mock.sentinel.server, cmd) self.assertEqual(expected_disk_number, result) def test_get_volume_path_by_mount_path(self): fake_mount_path = "fake_mount_path" fake_volume_path = "fake_volume_path" self._remote_exec.return_value = fake_volume_path + '\r\n', None result = self._windows_utils.get_volume_path_by_mount_path( mock.sentinel.server, fake_mount_path) cmd = ('Get-Partition | ' 'Where-Object {$_.AccessPaths -contains "%s"} | ' 'Get-Volume | ' 'Select-Object -ExpandProperty Path' % (fake_mount_path + "\\")) self._remote_exec.assert_called_once_with(mock.sentinel.server, cmd) self.assertEqual(fake_volume_path, result) def test_get_disk_space_by_path(self): fake_disk_size = 1024 fake_free_bytes = 1000 fake_fsutil_output = ("Total # of bytes : %(total_bytes)s" "Total # of avail free bytes : %(free_bytes)s" % dict(total_bytes=fake_disk_size, free_bytes=fake_free_bytes)) self._remote_exec.return_value = fake_fsutil_output, None result = self._windows_utils.get_disk_space_by_path( mock.sentinel.server, mock.sentinel.mount_path) cmd = ["fsutil", "volume", "diskfree", self._windows_utils.quote_string(mock.sentinel.mount_path)] self._remote_exec.assert_called_once_with(mock.sentinel.server, cmd) self.assertEqual((fake_disk_size, fake_free_bytes), result) def test_get_partition_maximum_size(self): fake_max_size = 1024 self._remote_exec.return_value = ("%s" % fake_max_size, mock.sentinel.std_err) result = self._windows_utils.get_partition_maximum_size( mock.sentinel.server, mock.sentinel.disk_number, mock.sentinel.partition_number) cmd = ('Get-PartitionSupportedSize -DiskNumber %(disk_number)s ' '-PartitionNumber %(partition_number)s | ' 'Select-Object -ExpandProperty SizeMax' % dict(disk_number=mock.sentinel.disk_number, partition_number=mock.sentinel.partition_number)) self._remote_exec.assert_called_once_with(mock.sentinel.server, cmd) self.assertEqual(fake_max_size, result) def test_set_disk_online_status(self): self._windows_utils.set_disk_online_status(mock.sentinel.server, mock.sentinel.disk_number, online=True) cmd = ["Set-Disk", "-Number", mock.sentinel.disk_number, "-IsOffline", 0] self._remote_exec.assert_called_once_with(mock.sentinel.server, cmd) def test_set_disk_readonly_status(self): self._windows_utils.set_disk_readonly_status(mock.sentinel.server, mock.sentinel.disk_number, readonly=False) cmd = ["Set-Disk", "-Number", mock.sentinel.disk_number, "-IsReadOnly", 0] self._remote_exec.assert_called_once_with(mock.sentinel.server, cmd) def test_update_disk(self): self._windows_utils.update_disk(mock.sentinel.server, mock.sentinel.disk_number) cmd = ["Update-Disk", mock.sentinel.disk_number] self._remote_exec.assert_called_once_with(mock.sentinel.server, cmd) def test_join_domain(self): mock_server = {'ip': mock.sentinel.server_ip} self._windows_utils.join_domain(mock_server, mock.sentinel.domain, mock.sentinel.admin_username, mock.sentinel.admin_password) cmds = [ ('$password = "%s" | ' 'ConvertTo-SecureString -asPlainText -Force' % mock.sentinel.admin_password), ('$credential = ' 'New-Object System.Management.Automation.PSCredential(' '"%s", $password)' % mock.sentinel.admin_username), ('Add-Computer -DomainName "%s" -Credential $credential' % mock.sentinel.domain)] cmd = ";".join(cmds) self._remote_exec.assert_called_once_with(mock_server, cmd) def test_unjoin_domain(self): self._windows_utils.unjoin_domain(mock.sentinel.server, mock.sentinel.admin_username, mock.sentinel.admin_password) cmds = [ ('$password = "%s" | ' 'ConvertTo-SecureString -asPlainText -Force' % mock.sentinel.admin_password), ('$credential = ' 'New-Object System.Management.Automation.PSCredential(' '"%s", $password)' % mock.sentinel.admin_username), ('Remove-Computer -UnjoinDomaincredential $credential ' '-Passthru -Verbose -Force')] cmd = ";".join(cmds) self._remote_exec.assert_called_once_with(mock.sentinel.server, cmd) def test_get_current_domain(self): fake_domain = " domain" self._remote_exec.return_value = (fake_domain, mock.sentinel.std_err) result = self._windows_utils.get_current_domain(mock.sentinel.server) cmd = "(Get-WmiObject Win32_ComputerSystem).Domain" self._remote_exec.assert_called_once_with(mock.sentinel.server, cmd) self.assertEqual(fake_domain.strip(), result) def test_ensure_directory_exists(self): self._windows_utils.ensure_directory_exists(mock.sentinel.server, mock.sentinel.path) cmd = ["New-Item", "-ItemType", "Directory", "-Force", "-Path", self._windows_utils.quote_string(mock.sentinel.path)] self._remote_exec.assert_called_once_with(mock.sentinel.server, cmd) @ddt.data(False, True) @mock.patch.object(windows_utils.WindowsUtils, 'path_exists') def test_remove(self, is_junction, mock_path_exists): recurse = True self._windows_utils.remove(mock.sentinel.server, mock.sentinel.path, is_junction=is_junction, recurse=recurse) if is_junction: cmd = ('[System.IO.Directory]::Delete(' '%(path)s, %(recurse)d)' % dict(path=self._windows_utils.quote_string( mock.sentinel.path), recurse=recurse)) else: cmd = ["Remove-Item", "-Confirm:$false", "-Path", self._windows_utils.quote_string(mock.sentinel.path), "-Force", '-Recurse'] self._remote_exec.assert_called_once_with(mock.sentinel.server, cmd) @mock.patch.object(windows_utils.WindowsUtils, 'path_exists') def test_remove_unexisting_path(self, mock_path_exists): mock_path_exists.return_value = False self._windows_utils.remove(mock.sentinel.server, mock.sentinel.path) self.assertFalse(self._remote_exec.called) @ddt.data("True", "False") def test_path_exists(self, path_exists): self._remote_exec.return_value = (path_exists, mock.sentinel.std_err) result = self._windows_utils.path_exists(mock.sentinel.server, mock.sentinel.path) cmd = ["Test-Path", mock.sentinel.path] self._remote_exec.assert_called_once_with(mock.sentinel.server, cmd) self.assertEqual(path_exists == "True", result) def test_normalize_path(self): fake_path = "C:/" result = self._windows_utils.normalize_path(fake_path) self.assertEqual("C:\\", result) def test_get_interface_index_by_ip(self): _FAKE_INDEX = "2" self._remote_exec.return_value = (_FAKE_INDEX, mock.sentinel.std_err) result = self._windows_utils.get_interface_index_by_ip( mock.sentinel.server, mock.sentinel.ip) cmd = ('Get-NetIPAddress | ' 'Where-Object {$_.IPAddress -eq "%(ip)s"} | ' 'Select-Object -ExpandProperty InterfaceIndex' % dict(ip=mock.sentinel.ip)) self._remote_exec.assert_called_once_with(mock.sentinel.server, cmd) self.assertEqual(int(_FAKE_INDEX), result) def test_set_dns_client_search_list(self): mock_search_list = ["A", "B", "C"] self._windows_utils.set_dns_client_search_list(mock.sentinel.server, mock_search_list) cmd = ["Set-DnsClientGlobalSetting", "-SuffixSearchList", "@('A','B','C')"] self._remote_exec.assert_called_once_with(mock.sentinel.server, cmd) def test_set_dns_client_server_addresses(self): mock_dns_servers = ["A", "B", "C"] self._windows_utils.set_dns_client_server_addresses( mock.sentinel.server, mock.sentinel.if_index, mock_dns_servers) cmd = ["Set-DnsClientServerAddress", "-InterfaceIndex", mock.sentinel.if_index, "-ServerAddresses", "('A','B','C')"] self._remote_exec.assert_called_once_with(mock.sentinel.server, cmd) def test_set_win_reg_value(self): self._windows_utils.set_win_reg_value(mock.sentinel.server, mock.sentinel.path, mock.sentinel.key, mock.sentinel.value) cmd = ['Set-ItemProperty', '-Path', self._windows_utils.quote_string(mock.sentinel.path), '-Name', mock.sentinel.key, '-Value', mock.sentinel.value] self._remote_exec.assert_called_once_with(mock.sentinel.server, cmd) @ddt.data(None, mock.sentinel.key_name) def test_get_win_reg_value(self, key_name): self._remote_exec.return_value = (mock.sentinel.value, mock.sentinel.std_err) result = self._windows_utils.get_win_reg_value(mock.sentinel.server, mock.sentinel.path, name=key_name) cmd = "Get-ItemProperty -Path %s" % ( self._windows_utils.quote_string(mock.sentinel.path)) if key_name: cmd += " | Select-Object -ExpandProperty %s" % key_name self._remote_exec.assert_called_once_with(mock.sentinel.server, cmd, retry=False) self.assertEqual(mock.sentinel.value, result) def test_quote_string(self): result = self._windows_utils.quote_string(mock.sentinel.string) self.assertEqual('"%s"' % mock.sentinel.string, result) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/windows/test_winrm_helper.py0000664000175000017500000002641000000000000026042 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Cloudbase Solutions SRL # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import ddt from oslo_concurrency import processutils from oslo_utils import importutils from oslo_utils import strutils from manila import exception from manila.share.drivers.windows import winrm_helper from manila import test @ddt.ddt class WinRMHelperTestCase(test.TestCase): _FAKE_SERVER = {'ip': mock.sentinel.ip} @mock.patch.object(importutils, 'import_module') def setUp(self, mock_import_module): self._winrm = winrm_helper.WinRMHelper() super(WinRMHelperTestCase, self).setUp() @ddt.data({'import_exc': None}, {'import_exc': ImportError}) @mock.patch.object(importutils, 'import_module') @ddt.unpack def test_setup_winrm(self, mock_import_module, import_exc): winrm_helper.winrm = None mock_import_module.side_effect = import_exc if import_exc: self.assertRaises(exception.ShareBackendException, winrm_helper.setup_winrm) else: winrm_helper.setup_winrm() self.assertEqual(mock_import_module.return_value, winrm_helper.winrm) mock_import_module.assert_called_once_with('winrm') @mock.patch.object(winrm_helper.WinRMHelper, '_get_auth') @mock.patch.object(winrm_helper, 'WinRMConnection') def test_get_conn(self, mock_conn_cls, mock_get_auth): mock_auth = {'mock_auth_key': mock.sentinel.auth_opt} mock_get_auth.return_value = mock_auth conn = self._winrm._get_conn(self._FAKE_SERVER) mock_get_auth.assert_called_once_with(self._FAKE_SERVER) mock_conn_cls.assert_called_once_with( ip=self._FAKE_SERVER['ip'], conn_timeout=self._winrm._config.winrm_conn_timeout, operation_timeout=self._winrm._config.winrm_operation_timeout, **mock_auth) self.assertEqual(mock_conn_cls.return_value, conn) @ddt.data({}, {'exit_code': 1}, {'exit_code': 1, 'check_exit_code': False}) @mock.patch.object(strutils, 'mask_password') @mock.patch.object(winrm_helper.WinRMHelper, '_parse_command') @mock.patch.object(winrm_helper.WinRMHelper, '_get_conn') @ddt.unpack def test_execute(self, mock_get_conn, mock_parse_command, mock_mask_password, check_exit_code=True, exit_code=0): mock_parse_command.return_value = (mock.sentinel.parsed_cmd, mock.sentinel.sanitized_cmd) mock_conn = mock_get_conn.return_value mock_conn.execute.return_value = (mock.sentinel.stdout, mock.sentinel.stderr, exit_code) if exit_code == 0 or not check_exit_code: result = self._winrm.execute(mock.sentinel.server, mock.sentinel.command, check_exit_code=check_exit_code, retry=False) expected_result = (mock.sentinel.stdout, mock.sentinel.stderr) self.assertEqual(expected_result, result) else: self.assertRaises(processutils.ProcessExecutionError, self._winrm.execute, mock.sentinel.server, mock.sentinel.command, check_exit_code=check_exit_code, retry=False) mock_get_conn.assert_called_once_with(mock.sentinel.server) mock_parse_command.assert_called_once_with(mock.sentinel.command) mock_conn.execute.assert_called_once_with(mock.sentinel.parsed_cmd) mock_mask_password.assert_has_calls([mock.call(mock.sentinel.stdout), mock.call(mock.sentinel.stderr)]) @mock.patch('base64.b64encode') @mock.patch.object(strutils, 'mask_password') def test_parse_command(self, mock_mask_password, mock_base64): mock_mask_password.return_value = mock.sentinel.sanitized_cmd mock_base64.return_value = mock.sentinel.encoded_string cmd = ('Get-Disk', '-Number', 1) result = self._winrm._parse_command(cmd) joined_cmd = 'Get-Disk -Number 1' expected_command = ("powershell.exe -ExecutionPolicy RemoteSigned " "-NonInteractive -EncodedCommand %s" % mock.sentinel.encoded_string) expected_result = expected_command, mock.sentinel.sanitized_cmd mock_mask_password.assert_called_once_with(joined_cmd) mock_base64.assert_called_once_with(joined_cmd.encode("utf_16_le")) self.assertEqual(expected_result, result) def _test_get_auth(self, use_cert_auth=False): mock_server = {'use_cert_auth': use_cert_auth, 'cert_pem_path': mock.sentinel.pem_path, 'cert_key_pem_path': mock.sentinel.key_path, 'username': mock.sentinel.username, 'password': mock.sentinel.password} result = self._winrm._get_auth(mock_server) expected_result = {'username': mock_server['username']} if use_cert_auth: expected_result['cert_pem_path'] = mock_server['cert_pem_path'] expected_result['cert_key_pem_path'] = ( mock_server['cert_key_pem_path']) else: expected_result['password'] = mock_server['password'] self.assertEqual(expected_result, result) def test_get_auth_using_certificates(self): self._test_get_auth(use_cert_auth=True) def test_get_auth_using_password(self): self._test_get_auth() class WinRMConnectionTestCase(test.TestCase): @mock.patch.object(winrm_helper, 'setup_winrm') @mock.patch.object(winrm_helper, 'winrm') @mock.patch.object(winrm_helper.WinRMConnection, '_get_url') @mock.patch.object(winrm_helper.WinRMConnection, '_get_default_port') def setUp(self, mock_get_port, mock_get_url, mock_winrm, mock_setup_winrm): self._winrm = winrm_helper.WinRMConnection() self._mock_conn = mock_winrm.protocol.Protocol.return_value super(WinRMConnectionTestCase, self).setUp() @mock.patch.object(winrm_helper, 'setup_winrm') @mock.patch.object(winrm_helper, 'winrm') @mock.patch.object(winrm_helper.WinRMConnection, '_get_url') @mock.patch.object(winrm_helper.WinRMConnection, '_get_default_port') def test_init_conn(self, mock_get_port, mock_get_url, mock_winrm, mock_setup_winrm): # certificates are passed so we expect cert auth to be used cert_auth = True winrm_conn = winrm_helper.WinRMConnection( ip=mock.sentinel.ip, username=mock.sentinel.username, password=mock.sentinel.password, cert_pem_path=mock.sentinel.cert_pem_path, cert_key_pem_path=mock.sentinel.cert_key_pem_path, operation_timeout=mock.sentinel.operation_timeout, conn_timeout=mock.sentinel.conn_timeout) mock_get_port.assert_called_once_with(cert_auth) mock_get_url.assert_called_once_with(mock.sentinel.ip, mock_get_port.return_value, cert_auth) mock_winrm.protocol.Protocol.assert_called_once_with( endpoint=mock_get_url.return_value, transport=winrm_helper.TRANSPORT_SSL, username=mock.sentinel.username, password=mock.sentinel.password, cert_pem=mock.sentinel.cert_pem_path, cert_key_pem=mock.sentinel.cert_key_pem_path) self.assertEqual(mock_winrm.protocol.Protocol.return_value, winrm_conn._conn) self.assertEqual(mock.sentinel.conn_timeout, winrm_conn._conn.transport.timeout) winrm_conn._conn.set_timeout.assert_called_once_with( mock.sentinel.operation_timeout) def test_get_default_port_https(self): port = self._winrm._get_default_port(use_ssl=True) self.assertEqual(winrm_helper.DEFAULT_PORT_HTTPS, port) def test_get_default_port_http(self): port = self._winrm._get_default_port(use_ssl=False) self.assertEqual(winrm_helper.DEFAULT_PORT_HTTP, port) def _test_get_url(self, ip=None, use_ssl=True): if not ip: self.assertRaises(exception.ShareBackendException, self._winrm._get_url, ip=ip, port=mock.sentinel.port, use_ssl=use_ssl) else: url = self._winrm._get_url(ip=ip, port=mock.sentinel.port, use_ssl=use_ssl) expected_protocol = 'https' if use_ssl else 'http' expected_url = self._winrm._URL_TEMPLATE % dict( protocol=expected_protocol, port=mock.sentinel.port, ip=ip) self.assertEqual(expected_url, url) def test_get_url_using_ssl(self): self._test_get_url(ip=mock.sentinel.ip) def test_get_url_using_plaintext(self): self._test_get_url(ip=mock.sentinel.ip, use_ssl=False) def test_get_url_missing_ip(self): self._test_get_url() def _test_execute(self, get_output_exception=None): self._mock_conn.open_shell.return_value = mock.sentinel.shell_id self._mock_conn.run_command.return_value = mock.sentinel.cmd_id command_output = (mock.sentinel.stdout, mock.sentinel.stderr, mock.sentinel.exit_code) if get_output_exception: self._mock_conn.get_command_output.side_effect = ( get_output_exception) self.assertRaises( get_output_exception, self._winrm.execute, mock.sentinel.cmd) else: self._mock_conn.get_command_output.return_value = command_output result = self._winrm.execute(mock.sentinel.cmd) self.assertEqual(command_output, result) self._mock_conn.open_shell.assert_called_once_with() self._mock_conn.run_command.assert_called_once_with( mock.sentinel.shell_id, mock.sentinel.cmd) self._mock_conn.cleanup_command.assert_called_once_with( mock.sentinel.shell_id, mock.sentinel.cmd_id) self._mock_conn.close_shell.assert_called_once_with( mock.sentinel.shell_id) def test_execute(self): self._test_execute() def test_execute_exception(self): self._test_execute(get_output_exception=Exception) ././@PaxHeader0000000000000000000000000000003200000000000011450 xustar000000000000000026 mtime=1759315602.04167 manila-21.0.0/manila/tests/share/drivers/zadara/0000775000175000017500000000000000000000000021503 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/zadara/__init__.py0000664000175000017500000000000000000000000023602 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/zadara/test_zadara.py0000664000175000017500000012136500000000000024366 0ustar00zuulzuul00000000000000# Copyright (c) 2021 Zadara Storage, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests for Zadara VPSA Share driver """ import copy import requests from unittest import mock from urllib import parse from manila import context from manila import exception as manila_exception from manila.share import configuration from manila.share.drivers.zadara import zadara from manila import test from manila.tests import fake_share def check_access_key(func): """A decorator for all operations that needed an API before executing""" def wrap(self, *args, **kwargs): if not self._is_correct_access_key(): return RUNTIME_VARS['bad_login'] return func(self, *args, **kwargs) return wrap DEFAULT_RUNTIME_VARS = { 'status': 200, 'user': 'test', 'password': 'test_password', 'access_key': '0123456789ABCDEF', 'volumes': [], 'servers': [], 'controllers': [('active_ctrl', {'display-name': 'test_ctrl'})], 'counter': 1000, "login": """ { "response": { "user": { "updated-at": "2021-01-22", "access-key": "%s", "id": 1, "created-at": "2021-01-22", "email": "jsmith@example.com", "username": "jsmith" }, "status": 0 } }""", "good": """ { "response": { "status": 0 } }""", "good_snapshot": """ { "response": { "snapshot_name": "fakesnaplocation", "status": 0 } }""", "bad_login": """ { "response": { "status": 5, "status-msg": "Some message..." } }""", "bad_volume": """ { "response": { "status": 10081, "status-msg": "Virtual volume xxx should be found" } }""", "fake_volume": """ { "response": { "volumes": [], "status": 0, "status-msg": "Virtual volume xxx doesn't exist" } }""", "bad_server": """ { "response": { "status": 10086, "status-msg": "Server xxx not found" } }""", "server_created": """ { "response": { "server_name": "%s", "status": 0 } }""", } RUNTIME_VARS = None class FakeResponse(object): def __init__(self, method, url, params, body, headers, **kwargs): # kwargs include: verify, timeout self.method = method self.url = url self.body = body self.params = params self.headers = headers self.status = RUNTIME_VARS['status'] @property def access_key(self): """Returns Response Access Key""" return self.headers["X-Access-Key"] def read(self): ops = {'POST': [('/api/users/login.json', self._login), ('/api/volumes.json', self._create_volume), ('/api/servers.json', self._create_server), ('/api/servers/*/volumes.json', self._attach), ('/api/volumes/*/rename.json', self._rename), ('/api/volumes/*/detach.json', self._detach), ('/api/volumes/*/expand.json', self._expand), ('/api/consistency_groups/*/snapshots.json', self._create_snapshot), ('/api/snapshots/*/rename.json', self._rename_snapshot), ('/api/consistency_groups/*/clone.json', self._create_clone_from_snapshot), ('/api/consistency_groups/*/clone.json', self._create_clone)], 'DELETE': [('/api/volumes/*', self._delete), ('/api/snapshots/*', self._delete_snapshot)], 'GET': [('/api/volumes.json?showonlyfile=YES', self._list_volumes), ('/api/volumes.json?display_name=*', self._get_volume_by_name), ('/api/pools/*.json', self._get_pool), ('/api/vcontrollers.json', self._list_controllers), ('/api/servers.json', self._list_servers), ('/api/consistency_groups/*/snapshots.json', self._list_vol_snapshots), ('/api/volumes/*/servers.json', self._list_vol_attachments)] } ops_list = ops[self.method] for (templ_url, func) in ops_list: if self._compare_url(self.url, templ_url): result = func() return result @staticmethod def _compare_url(url, template_url): items = url.split('/') titems = template_url.split('/') for (i, titem) in enumerate(titems): if '*' not in titem and titem != items[i]: return False if '?' in titem and titem.split('=')[0] != items[i].split('=')[0]: return False return True @staticmethod def _get_counter(): cnt = RUNTIME_VARS['counter'] RUNTIME_VARS['counter'] += 1 return cnt def _login(self): params = self.body if (params['user'] == RUNTIME_VARS['user'] and params['password'] == RUNTIME_VARS['password']): return RUNTIME_VARS['login'] % RUNTIME_VARS['access_key'] else: return RUNTIME_VARS['bad_login'] def _is_correct_access_key(self): return self.access_key == RUNTIME_VARS['access_key'] @check_access_key def _create_volume(self): params = self.body params['display-name'] = params['name'] params['cg-name'] = params['name'] params['snapshots'] = [] params['server_ext_names'] = '' params['provisioned-capacity'] = 1 vpsa_vol = 'volume-%07d' % self._get_counter() params['nfs-export-path'] = '10.2.1.56:/export/%s' % vpsa_vol RUNTIME_VARS['volumes'].append((vpsa_vol, params)) return RUNTIME_VARS['good'] @check_access_key def _create_server(self): params = self.body params['display-name'] = params['display_name'] vpsa_srv = 'srv-%07d' % self._get_counter() RUNTIME_VARS['servers'].append((vpsa_srv, params)) return RUNTIME_VARS['server_created'] % vpsa_srv @check_access_key def _attach(self): srv = self.url.split('/')[3] params = self.body vol = params['volume_name[]'] for (vol_name, params) in RUNTIME_VARS['volumes']: if params['name'] == vol: attachments = params['server_ext_names'].split(',') if srv in attachments: # already attached - ok return RUNTIME_VARS['good'] else: if not attachments[0]: params['server_ext_names'] = srv else: params['server_ext_names'] += ',' + srv return RUNTIME_VARS['good'] return RUNTIME_VARS['bad_volume'] @check_access_key def _detach(self): params = self.body vol = self.url.split('/')[3] srv = params['server_name[]'] for (vol_name, params) in RUNTIME_VARS['volumes']: if params['name'] == vol: attachments = params['server_ext_names'].split(',') if srv not in attachments: return RUNTIME_VARS['bad_server'] else: attachments.remove(srv) params['server_ext_names'] = (','.join([str(elem) for elem in attachments])) return RUNTIME_VARS['good'] return RUNTIME_VARS['bad_volume'] @check_access_key def _expand(self): params = self.body vol = self.url.split('/')[3] capacity = params['capacity'] for (vol_name, params) in RUNTIME_VARS['volumes']: if params['name'] == vol: params['capacity'] = capacity return RUNTIME_VARS['good'] return RUNTIME_VARS['bad_volume'] @check_access_key def _rename(self): params = self.body vol = self.url.split('/')[3] for (vol_name, vol_params) in RUNTIME_VARS['volumes']: if vol_params['name'] == vol: vol_params['name'] = params['new_name'] vol_params['display-name'] = params['new_name'] vol_params['cg-name'] = params['new_name'] return RUNTIME_VARS['good'] return RUNTIME_VARS['bad_volume'] @check_access_key def _rename_snapshot(self): params = self.body vpsa_snapshot = self.url.split('/')[3] for (vol_name, vol_params) in RUNTIME_VARS['volumes']: for snapshot in vol_params['snapshots']: if vpsa_snapshot == snapshot['provider-location']: snapshot['name'] = params['newname'] snapshot['display-name'] = params['newname'] return RUNTIME_VARS['good'] return RUNTIME_VARS['bad_volume'] @check_access_key def _create_snapshot(self): params = self.body cg_name = self.url.split('/')[3] snap_name = params['display_name'] for (vol_name, params) in RUNTIME_VARS['volumes']: if params['cg-name'] == cg_name: snapshots = params['snapshots'] if snap_name in snapshots: # already attached return RUNTIME_VARS['bad_volume'] else: snapshots.append(snap_name) return RUNTIME_VARS['good_snapshot'] return RUNTIME_VARS['bad_volume'] @check_access_key def _delete_snapshot(self): snap = self.url.split('/')[3].split('.')[0] for (vol_name, params) in RUNTIME_VARS['volumes']: if snap in params['snapshots']: params['snapshots'].remove(snap) return RUNTIME_VARS['good'] return RUNTIME_VARS['bad_volume'] @check_access_key def _create_clone_from_snapshot(self): params = self.body params['display-name'] = params['name'] params['cg-name'] = params['name'] params['capacity'] = 1 params['snapshots'] = [] params['server_ext_names'] = '' params['pool'] = 'pool-0001' params['provisioned-capacity'] = 1 vpsa_vol = 'volume-%07d' % self._get_counter() params['nfs-export-path'] = '10.2.1.56:/export/%s' % vpsa_vol RUNTIME_VARS['volumes'].append((vpsa_vol, params)) return RUNTIME_VARS['good'] @check_access_key def _create_clone(self): params = self.body params['display-name'] = params['name'] params['cg-name'] = params['name'] params['capacity'] = 1 params['snapshots'] = [] params['server_ext_names'] = '' vpsa_vol = 'volume-%07d' % self._get_counter() RUNTIME_VARS['volumes'].append((vpsa_vol, params)) return RUNTIME_VARS['good'] def _delete(self): vol = self.url.split('/')[3].split('.')[0] for (vol_name, params) in RUNTIME_VARS['volumes']: if params['name'] == vol: if params['server_ext_names']: # there are attachments - should be volume busy error return RUNTIME_VARS['bad_volume'] else: RUNTIME_VARS['volumes'].remove((vol_name, params)) return RUNTIME_VARS['good'] return RUNTIME_VARS['bad_volume'] def _generate_list_resp(self, null_body, body, lst, vol): resp = '' for (obj, params) in lst: if vol: resp += body % (params['name'], params['display-name'], params['cg-name'], params['capacity'], params['pool'], params['provisioned-capacity'], params['nfs-export-path']) else: resp += body % (obj, params['display-name']) if resp: return resp else: return null_body def _list_volumes(self): null_body = """ { "response": { "volumes": [ ], "status": 0 } }""" body = """ { "response": { "volumes": %s, "status": 0 } }""" volume_obj = """ { "name": "%s", "display_name": "%s", "cg_name": "%s", "status": "Available", "virtual_capacity": %d, "pool_name": "%s", "allocated-capacity": 1, "provisioned_capacity": "%s", "raid-group-name": "r5", "cache": "write-through", "created-at": "2021-01-22", "modified-at": "2021-01-22", "nfs_export_path": "%s" } """ if len(RUNTIME_VARS['volumes']) == 0: return null_body resp = '' volume_list = '' count = 0 for (vol_name, params) in RUNTIME_VARS['volumes']: volume_dict = volume_obj % (params['name'], params['display-name'], params['cg-name'], params['capacity'], params['pool'], params['provisioned-capacity'], params['nfs-export-path']) if count == 0: volume_list += volume_dict count += 1 elif count != len(RUNTIME_VARS['volumes']): volume_list = volume_list + ',' + volume_dict count += 1 if volume_list: volume_list = '[' + volume_list + ']' resp = body % volume_list return resp return RUNTIME_VARS['bad_volume'] def _get_volume_by_name(self): volume_name = self.url.split('=')[1] body = """ { "response": { "volumes": [ { "name": "%s", "display_name": "%s", "cg_name": "%s", "status": "Available", "virtual_capacity": %d, "pool_name": "%s", "allocated-capacity": 1, "provisioned_capacity": %d, "raid-group-name": "r5", "cache": "write-through", "created-at": "2021-01-22", "modified-at": "2021-01-22", "nfs_export_path": "%s", "server_ext_names": "%s" } ], "status": 0 } }""" for (vol_name, params) in RUNTIME_VARS['volumes']: if params['name'] == volume_name: resp = body % (volume_name, params['display-name'], params['cg-name'], params['capacity'], params['pool'], params['provisioned-capacity'], params['nfs-export-path'], params['server_ext_names']) return resp return RUNTIME_VARS['fake_volume'] def _list_controllers(self): null_body = """ { "response": { "vcontrollers": [ ], "status": 0 } }""" body = """ { "response": { "vcontrollers": [ { "name": "%s", "display_name": "%s", "state": "active", "target": "iqn.2011-04.zadarastorage:vsa-xxx:1", "iscsi_ip": "1.1.1.1", "iscsi_ipv6": "", "mgmt-ip": "1.1.1.1", "software-ver": "0.0.09-05.1--77.7", "heartbeat1": "ok", "heartbeat2": "ok", "vpsa_chap_user": "test_chap_user", "vpsa_chap_secret": "test_chap_secret" } ], "status": 0 } }""" return self._generate_list_resp(null_body, body, RUNTIME_VARS['controllers'], False) def _get_pool(self): response = """ { "response": { "pool": { "name": "pool-0001", "capacity": 100, "available_capacity": 99, "provisioned_capacity": 1 }, "status": 0 } }""" return response def _list_servers(self): null_body = """ { "response": { "servers": [ ], "status": 0 } }""" body = """ { "response": { "servers": %s, "status": 0 } }""" server_obj = """ { "name": "%s", "display_name": "%s", "iscsi_ip": "%s", "status": "Active", "created-at": "2021-01-22", "modified-at": "2021-01-22" } """ resp = '' server_list = '' count = 0 for (obj, params) in RUNTIME_VARS['servers']: server_dict = server_obj % (obj, params['display-name'], params['iqn']) if count == 0: server_list += server_dict count += 1 elif count != len(RUNTIME_VARS['servers']): server_list = server_list + ',' + server_dict count += 1 server_list = '[' + server_list + ']' resp = body % server_list if resp: return resp else: return null_body def _get_server_obj(self, name): for (srv_name, params) in RUNTIME_VARS['servers']: if srv_name == name: return params def _list_vol_attachments(self): vol = self.url.split('/')[3] null_body = """ { "response": { "servers": [ ], "status": 0 } }""" body = """ { "response": { "servers": %s, "status": 0 } }""" server_obj = """ { "name": "%s", "display_name": "%s", "iscsi_ip": "%s", "target": "iqn.2011-04.zadarastorage:vsa-xxx:1", "lun": 0 } """ for (vol_name, params) in RUNTIME_VARS['volumes']: if params['name'] == vol: attachments = params['server_ext_names'].split(',') if not attachments[0]: return null_body resp = '' server_list = '' count = 0 for server in attachments: srv_params = self._get_server_obj(server) server_dict = (server_obj % (server, srv_params['display_name'], srv_params['iscsi'])) if count == 0: server_list += server_dict count += 1 elif count != len(attachments): server_list = server_list + ',' + server_dict count += 1 server_list = '[' + server_list + ']' resp = body % server_list return resp return RUNTIME_VARS['bad_volume'] def _list_vol_snapshots(self): cg_name = self.url.split('/')[3] null_body = """ { "response": { "snapshots": [ ], "status": 0 } }""" body = """ { "response": { "snapshots": %s, "status": 0 } }""" snapshot_obj = """ { "name": "%s", "display_name": "%s", "status": "normal", "cg-name": "%s", "pool-name": "pool-00000001" } """ for (vol_name, params) in RUNTIME_VARS['volumes']: if params['cg-name'] == cg_name: snapshots = params['snapshots'] if len(snapshots) == 0: return null_body resp = '' snapshot_list = '' count = 0 for snapshot in snapshots: snapshot_dict = snapshot_obj % (snapshot, snapshot, cg_name) if count == 0: snapshot_list += snapshot_dict count += 1 elif count != len(snapshots): snapshot_list = snapshot_list + ',' + snapshot_dict count += 1 snapshot_list = '[' + snapshot_list + ']' resp = body % snapshot_list return resp return RUNTIME_VARS['bad_volume'] class FakeRequests(object): """A fake requests for zadara volume driver tests.""" def __init__(self, method, api_url, params=None, data=None, headers=None, **kwargs): apiurl_items = parse.urlparse(api_url) if apiurl_items.query: url = apiurl_items.path + '?' + apiurl_items.query else: url = apiurl_items.path res = FakeResponse(method, url, params, data, headers, **kwargs) self.content = res.read() self.status_code = res.status class ZadaraVPSAShareDriverTestCase(test.TestCase): @mock.patch.object(requests.Session, 'request', FakeRequests) def setUp(self): super(ZadaraVPSAShareDriverTestCase, self).setUp() def _safe_get(opt): return getattr(self.configuration, opt) self._context = context.get_admin_context() self.configuration = mock.Mock(spec=configuration.Configuration) self.configuration.safe_get = mock.Mock(side_effect=_safe_get) global RUNTIME_VARS RUNTIME_VARS = copy.deepcopy(DEFAULT_RUNTIME_VARS) self.configuration.driver_handles_share_servers = False self.configuration.network_config_group = ( 'fake_network_config_group') self.configuration.admin_network_config_group = ( 'fake_admin_network_config_group') self.configuration.reserved_percentage = 0 self.configuration.reserved_snapshot_percentage = 0 self.configuration.reserved_share_extend_percentage = 0 self.configuration.zadara_use_iser = True self.configuration.zadara_vpsa_host = '192.168.5.5' self.configuration.zadara_vpsa_port = '80' self.configuration.zadara_user = 'test' self.configuration.zadara_password = 'test_password' self.configuration.zadara_access_key = '0123456789ABCDEF' self.configuration.zadara_vpsa_poolname = 'pool-0001' self.configuration.zadara_vol_encrypt = False self.configuration.zadara_share_name_template = 'OS_share-%s' self.configuration.zadara_share_snap_name_template = ( 'OS_share-snapshot-%s') self.configuration.zadara_vpsa_use_ssl = False self.configuration.zadara_ssl_cert_verify = False self.configuration.zadara_default_snap_policy = False self.configuration.zadara_driver_ssl_cert_path = None self.configuration.zadara_gen3_vol_compress = True self.configuration.zadara_gen3_vol_dedupe = True self.configuration.share_backend_name = 'zadaravpsa' self.configuration.reserved_share_percentage = '0' self.configuration.reserved_share_from_snapshot_percentage = '0' self.configuration.reserved_share_extend_percentage = 0 self.configuration.replication_domain = None self.configuration.filter_function = None self.configuration.goodness_function = None self.configuration.goodness_function = None self.driver = (zadara.ZadaraVPSAShareDriver( configuration=self.configuration)) self.driver.do_setup(None) self.driver.api.get_share_metadata = mock.Mock(return_value={}) self.driver._get_share_export_location = mock.Mock() @mock.patch.object(requests.Session, 'request', FakeRequests) def test_do_setup(self): self.driver.do_setup(self._context) self.assertIsNotNone(self.driver.vpsa) self.assertEqual(self.driver.vpsa.access_key, self.configuration.zadara_access_key) @mock.patch.object(requests.Session, 'request', FakeRequests) def test_no_active_ctrl(self): share = fake_share.fake_share(id='fakeid', share_proto='NFS', share_id='fakeshareid') self.driver.create_share(self._context, share) access = fake_share.fake_access() RUNTIME_VARS['controllers'] = [] self.assertRaises(manila_exception.ZadaraVPSANoActiveController, self.driver._allow_access, self._context, share, access) @mock.patch.object(requests.Session, 'request', FakeRequests) def test_create_share_unsupported_proto(self): share = fake_share.fake_share(share_proto='INVALID') self.assertRaises(manila_exception.ZadaraInvalidProtocol, self.driver.create_share, self._context, share) @mock.patch.object(requests.Session, 'request', FakeRequests) def test_create_delete_share(self): """Create share.""" share = fake_share.fake_share(share_proto='NFS', share_id='fakeshareid') self.driver.create_share(self._context, share) self.driver.delete_share(self._context, share) @mock.patch.object(requests.Session, 'request', FakeRequests) def test_create_delete_multiple_shares(self): """Create/Delete multiple shares.""" share1 = fake_share.fake_share(id='fakeid1', share_proto='NFS', share_id='fakeshareid1') self.driver.create_share(self._context, share1) share2 = fake_share.fake_share(id='fakeid2', share_proto='CIFS', share_id='fakeshareid2') self.driver.create_share(self._context, share2) self.driver.delete_share(self._context, share1) self.driver.delete_share(self._context, share2) @mock.patch.object(requests.Session, 'request', FakeRequests) def test_delete_non_existent(self): """Delete non-existent share.""" share = fake_share.fake_share(share_proto='NFS', share_id='fakeshareid') self.driver.delete_share(self._context, share) @mock.patch.object(requests.Session, 'request', FakeRequests) def test_create_delete_share_snapshot(self): """Create/Delete share snapshot.""" share1 = fake_share.fake_share(id='fakeid1', share_proto='NFS', share_id='fakeshareid1') self.driver.create_share(self._context, share1) snapshot = fake_share.fake_snapshot(name='fakesnap', share=share1, share_name=share1['name'], share_id=share1['id'], provider_location='fakelocation') share2 = fake_share.fake_share(id='fakeid2', share_proto='NFS', share_id='fakeshareid2') self.assertRaises(manila_exception.ManilaException, self.driver.create_snapshot, self._context, {'name': snapshot['name'], 'id': snapshot['id'], 'share': share2}) self.driver.create_snapshot(self._context, snapshot) # Deleted should succeed for missing volume self.driver.delete_snapshot(self._context, {'name': snapshot['name'], 'id': snapshot['id'], 'share': share2}) # Deleted should succeed for missing snap self.driver.delete_snapshot(self._context, {'name': 'wrong_snap', 'id': 'wrong_id', 'share': share1}) self.driver.delete_snapshot(self._context, snapshot) self.driver.delete_share(self._context, share1) @mock.patch.object(requests.Session, 'request', FakeRequests) def test_extend_share(self): """Expand share test.""" share1 = fake_share.fake_share(id='fakeid1', share_proto='NFS', share_id='fakeshareid', size=10) share2 = fake_share.fake_share(id='fakeid2', share_proto='NFS', size=10) self.driver.create_share(self._context, share1) self.assertRaises(manila_exception.ZadaraShareNotFound, self.driver.extend_share, share2, 15) self.driver.extend_share(share1, 15) self.driver.delete_share(self._context, share1) @mock.patch.object(requests.Session, 'request', FakeRequests) def test_create_share_from_snapshot(self): """Create a share from snapshot test.""" share1 = fake_share.fake_share(id='fakeid1', share_proto='NFS', share_id='fakeshareid1') share2 = fake_share.fake_share(id='fakeid2', share_proto='NFS', share_id='fakeshareid2') self.driver.create_share(self._context, share1) snapshot = fake_share.fake_snapshot(name='fakesnap', share=share1, share_name=share1['name'], share_id=share1['id'], share_instance_id=share1['id'], provider_location='fakelocation') self.driver.create_snapshot(self._context, snapshot) self.assertRaises(manila_exception.ManilaException, self.driver.create_share_from_snapshot, self._context, share2, {'name': snapshot['name'], 'id': snapshot['id'], 'share': share2, 'share_instance_id': share2['id']}) self.assertRaises(manila_exception.ManilaException, self.driver.create_share_from_snapshot, self._context, share2, {'name': 'fakesnapname', 'id': 'fakesnapid', 'share': share1, 'share_instance_id': share1['id']}) self.driver.create_share_from_snapshot(self._context, share2, snapshot) self.driver.delete_share(self._context, share1) self.driver.delete_share(self._context, share2) def create_vpsa_backend_share(self): vpsashare_params = {} vpsashare_params['id'] = 'fake_id' vpsashare_params['name'] = 'fake_name' vpsashare_params['display-name'] = 'fake_name' vpsashare_params['cg-name'] = 'fake_name' vpsashare_params['size'] = 1 vpsashare_params['capacity'] = 1 vpsashare_params['pool'] = 'pool-0001' vpsashare_params['share_proto'] = 'NFS' vpsashare_params['nfs-export-path'] = '10.2.1.56:/export/manage_id' vpsashare_params['provisioned-capacity'] = 1 vpsashare_params['server_ext_names'] = '' vpsa_volname = 'fake-volume' vpsa_share = (vpsa_volname, vpsashare_params) RUNTIME_VARS['volumes'].append(vpsa_share) return vpsa_share @mock.patch.object(requests.Session, 'request', FakeRequests) def test_manage_existing_share(self): share1 = {'id': 'manage_name', 'name': 'manage_name', 'display-name': 'manage_name', 'size': 1, 'share_proto': 'NFS', 'export_locations': [{'path': '10.2.1.56:/export/manage_id'}]} driver_options = {} vpsa_share = self.create_vpsa_backend_share() self.driver.manage_existing(share1, driver_options) self.assertEqual(vpsa_share[1]['display-name'].split('-')[1], share1['display-name']) self.driver.delete_share(self._context, share1) @mock.patch.object(requests.Session, 'request', FakeRequests) def test_get_share_stats(self): """Get stats test.""" self.configuration.safe_get.return_value = 'ZadaraVPSAShareDriver' data = self.driver.get_share_stats(True) self.assertEqual('Zadara Storage', data['vendor_name']) self.assertEqual('unknown', data['total_capacity_gb']) self.assertEqual('unknown', data['free_capacity_gb']) self.assertEqual(data['reserved_percentage'], self.configuration.reserved_percentage) self.assertEqual(data['reserved_snapshot_percentage'], self.configuration.reserved_snapshot_percentage) self.assertEqual(data['reserved_share_extend_percentage'], self.configuration.reserved_share_extend_percentage) self.assertEqual(data['snapshot_support'], True) self.assertEqual(data['create_share_from_snapshot_support'], True) self.assertEqual(data['revert_to_snapshot_support'], False) self.assertEqual(data['vendor_name'], 'Zadara Storage') self.assertEqual(data['driver_version'], self.driver.VERSION) self.assertEqual(data['storage_protocol'], 'NFS_CIFS') self.assertEqual(data['share_backend_name'], self.configuration.share_backend_name) def test_allow_access_with_incorrect_access_type(self): share = fake_share.fake_share(id='fakeid1', share_proto='NFS') access = fake_share.fake_access(access_type='fake_type') self.assertRaises(manila_exception.ZadaraInvalidShareAccessType, self.driver._allow_access, self._context, share, access) @mock.patch.object(requests.Session, 'request', FakeRequests) def test_share_allow_deny_access(self): """Test share access allow any deny rules.""" share = fake_share.fake_share(id='fakeid', share_proto='NFS', share_id='fakeshareid') self.driver.create_share(self._context, share) access = fake_share.fake_access() # Attach server for accessing share with the fake access rules allow_access = self.driver._allow_access(self._context, share, access) self.assertEqual(allow_access['driver_volume_type'], share['share_proto']) self.assertEqual('1.1.1.1:3260', allow_access['data']['target_portal']) (srv_name, srv_params) = RUNTIME_VARS['servers'][0] self.assertEqual(srv_params['iscsi'], allow_access['data']['target_ip']) self.assertEqual(share['id'], allow_access['data']['id']) self.assertEqual('CHAP', allow_access['data']['auth_method']) self.assertEqual('test_chap_user', allow_access['data']['auth_username']) self.assertEqual('test_chap_secret', allow_access['data']['auth_password']) # Detach will not throw any error with missing access rules dup_access = fake_share.fake_access() self.driver._deny_access(self._context, share, dup_access) # Detach server from the share with deny access rules self.driver._deny_access(self._context, share, access) self.driver.delete_share(self._context, share) def create_vpsa_backend_share_snapshot(self, share): vpsasnap_params = {} vpsasnap_params['id'] = 'fakesnapid' vpsasnap_params['name'] = 'fakesnapname' vpsasnap_params['display-name'] = 'fakesnapname' vpsasnap_params['provider-location'] = 'fakesnaplocation' (vol_name, vol_params) = RUNTIME_VARS['volumes'][0] vol_params['snapshots'].append(vpsasnap_params) @mock.patch.object(requests.Session, 'request', FakeRequests) def test_manage_existing_snapshot(self): share = {'id': 'fake_id', 'share_id': 'fake_shareid', 'name': 'fake_name', 'display-name': 'fake_name', 'cg-name': 'fake_name', 'size': 1, 'capacity': 1, 'share_proto': 'NFS', 'pool': 'pool-0001', 'nfs-export-path': '10.2.1.56:/export/manage_id', 'provisioned-capacity': 1} self.driver.create_share(self._context, share) # Create a backend share that will be managed for manila self.create_vpsa_backend_share_snapshot(share) snapshot = {'id': 'manage_snapname', 'name': 'manage_snapname', 'display_name': 'manage_snapname', 'provider_location': 'fakesnaplocation', 'share': share} driver_options = {} self.driver.manage_existing_snapshot(snapshot, driver_options) # Check that the backend share has been renamed (vol_name, vol_params) = RUNTIME_VARS['volumes'][0] self.assertEqual( vol_params['snapshots'][0]['display-name'].split('-')[2], snapshot['display_name']) self.driver.delete_snapshot(self._context, snapshot) self.driver.delete_share(self._context, share) ././@PaxHeader0000000000000000000000000000003200000000000011450 xustar000000000000000026 mtime=1759315602.04167 manila-21.0.0/manila/tests/share/drivers/zfsonlinux/0000775000175000017500000000000000000000000022460 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/zfsonlinux/__init__.py0000664000175000017500000000000000000000000024557 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/zfsonlinux/test_driver.py0000664000175000017500000032323100000000000025370 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Mirantis, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import ddt from oslo_config import cfg from manila import context from manila import exception from manila.share.drivers.ganesha import utils as ganesha_utils from manila.share.drivers.zfsonlinux import driver as zfs_driver from manila import test from manila.tests import db_utils CONF = cfg.CONF class FakeConfig(object): def __init__(self, *args, **kwargs): self.driver_handles_share_servers = False self.share_driver = 'fake_share_driver_name' self.share_backend_name = 'FAKE_BACKEND_NAME' self.zfs_share_export_ip = kwargs.get( "zfs_share_export_ip", "1.1.1.1") self.zfs_service_ip = kwargs.get("zfs_service_ip", "2.2.2.2") self.zfs_zpool_list = kwargs.get( "zfs_zpool_list", ["foo", "bar/subbar", "quuz"]) self.zfs_use_ssh = kwargs.get("zfs_use_ssh", False) self.zfs_share_export_ip = kwargs.get( "zfs_share_export_ip", "240.241.242.243") self.zfs_service_ip = kwargs.get("zfs_service_ip", "240.241.242.244") self.ssh_conn_timeout = kwargs.get("ssh_conn_timeout", 123) self.zfs_ssh_username = kwargs.get( "zfs_ssh_username", 'fake_username') self.zfs_ssh_user_password = kwargs.get( "zfs_ssh_user_password", 'fake_pass') self.zfs_ssh_private_key_path = kwargs.get( "zfs_ssh_private_key_path", '/fake/path') self.zfs_replica_snapshot_prefix = kwargs.get( "zfs_replica_snapshot_prefix", "tmp_snapshot_for_replication_") self.zfs_migration_snapshot_prefix = kwargs.get( "zfs_migration_snapshot_prefix", "tmp_snapshot_for_migration_") self.zfs_dataset_creation_options = kwargs.get( "zfs_dataset_creation_options", ["fook=foov", "bark=barv"]) self.network_config_group = kwargs.get( "network_config_group", "fake_network_config_group") self.admin_network_config_group = kwargs.get( "admin_network_config_group", "fake_admin_network_config_group") self.config_group = kwargs.get("config_group", "fake_config_group") self.reserved_share_percentage = kwargs.get( "reserved_share_percentage", 0) self.reserved_share_from_snapshot_percentage = kwargs.get( "reserved_share_from_snapshot_percentage", 0) self.reserved_share_extend_percentage = kwargs.get( "reserved_share_extend_percentage", 0) self.max_over_subscription_ratio = kwargs.get( "max_over_subscription_ratio", 15.0) self.filter_function = kwargs.get("filter_function", None) self.goodness_function = kwargs.get("goodness_function", None) def safe_get(self, key): return getattr(self, key) def append_config_values(self, *args, **kwargs): pass class FakeDriverPrivateStorage(object): def __init__(self): self.storage = {} def update(self, entity_id, data): if entity_id not in self.storage: self.storage[entity_id] = {} self.storage[entity_id].update(data) def get(self, entity_id, key): return self.storage.get(entity_id, {}).get(key) def delete(self, entity_id): self.storage.pop(entity_id, None) class FakeTempDir(object): def __enter__(self, *args, **kwargs): return '/foo/path' def __exit__(self, *args, **kwargs): pass class GetBackendConfigurationTestCase(test.TestCase): def test_get_backend_configuration_success(self): backend_name = 'fake_backend_name' self.mock_object( zfs_driver.CONF, 'list_all_sections', mock.Mock(return_value=['fake1', backend_name, 'fake2'])) mock_config = self.mock_object( zfs_driver.configuration, 'Configuration') result = zfs_driver.get_backend_configuration(backend_name) self.assertEqual(mock_config.return_value, result) mock_config.assert_called_once_with( zfs_driver.driver.share_opts, config_group=backend_name) mock_config.return_value.append_config_values.assert_has_calls([ mock.call(zfs_driver.zfsonlinux_opts), mock.call(zfs_driver.share_manager_opts), mock.call(zfs_driver.driver.ssh_opts), ]) def test_get_backend_configuration_error(self): backend_name = 'fake_backend_name' self.mock_object( zfs_driver.CONF, 'list_all_sections', mock.Mock(return_value=['fake1', 'fake2'])) mock_config = self.mock_object( zfs_driver.configuration, 'Configuration') self.assertRaises( exception.BadConfigurationException, zfs_driver.get_backend_configuration, backend_name, ) self.assertFalse(mock_config.called) self.assertFalse(mock_config.return_value.append_config_values.called) @ddt.ddt class ZFSonLinuxShareDriverTestCase(test.TestCase): def setUp(self): self.mock_object(zfs_driver.CONF, '_check_required_opts') super(ZFSonLinuxShareDriverTestCase, self).setUp() self._context = context.get_admin_context() self.ssh_executor = self.mock_object(ganesha_utils, 'SSHExecutor') self.configuration = FakeConfig() self.private_storage = FakeDriverPrivateStorage() self.driver = zfs_driver.ZFSonLinuxShareDriver( configuration=self.configuration, private_storage=self.private_storage) self.mock_object(zfs_driver.time, 'sleep') def test_init(self): self.assertTrue(hasattr(self.driver, 'replica_snapshot_prefix')) self.assertEqual( self.driver.replica_snapshot_prefix, self.configuration.zfs_replica_snapshot_prefix) self.assertEqual( self.driver.backend_name, self.configuration.share_backend_name) self.assertEqual( self.driver.zpool_list, ['foo', 'bar', 'quuz']) self.assertEqual( self.driver.dataset_creation_options, self.configuration.zfs_dataset_creation_options) self.assertEqual( self.driver.share_export_ip, self.configuration.zfs_share_export_ip) self.assertEqual( self.driver.service_ip, self.configuration.zfs_service_ip) self.assertEqual( self.driver.private_storage, self.private_storage) self.assertTrue(hasattr(self.driver, '_helpers')) self.assertEqual(self.driver._helpers, {}) for attr_name in ('execute', 'execute_with_retry', 'parse_zfs_answer', 'get_zpool_option', 'get_zfs_option', 'zfs'): self.assertTrue(hasattr(self.driver, attr_name)) def test_init_error_with_duplicated_zpools(self): configuration = FakeConfig( zfs_zpool_list=['foo', 'bar', 'foo/quuz']) self.assertRaises( exception.BadConfigurationException, zfs_driver.ZFSonLinuxShareDriver, configuration=configuration, private_storage=self.private_storage ) def test__setup_helpers(self): mock_import_class = self.mock_object( zfs_driver.importutils, 'import_class') self.configuration.zfs_share_helpers = ['FOO=foo.module.WithHelper'] result = self.driver._setup_helpers() self.assertIsNone(result) mock_import_class.assert_called_once_with('foo.module.WithHelper') mock_import_class.return_value.assert_called_once_with( self.configuration) self.assertEqual( self.driver._helpers, {'FOO': mock_import_class.return_value.return_value}) def test__setup_helpers_error(self): self.configuration.zfs_share_helpers = [] self.assertRaises( exception.BadConfigurationException, self.driver._setup_helpers) def test__get_share_helper(self): self.driver._helpers = {'FOO': 'BAR'} result = self.driver._get_share_helper('FOO') self.assertEqual('BAR', result) @ddt.data({}, {'foo': 'bar'}) def test__get_share_helper_error(self, share_proto): self.assertRaises( exception.InvalidShare, self.driver._get_share_helper, 'NFS') @ddt.data(True, False) def test_do_setup(self, use_ssh): self.mock_object(self.driver, '_setup_helpers') self.mock_object(self.driver, 'ssh_executor') self.configuration.zfs_use_ssh = use_ssh self.driver.do_setup('fake_context') self.driver._setup_helpers.assert_called_once_with() if use_ssh: self.assertEqual(4, self.driver.ssh_executor.call_count) else: self.assertEqual(3, self.driver.ssh_executor.call_count) @ddt.data( ('foo', '127.0.0.1'), ('127.0.0.1', 'foo'), ('256.0.0.1', '127.0.0.1'), ('::1/128', '127.0.0.1'), ('127.0.0.1', '::1/128'), ) @ddt.unpack def test_do_setup_error_on_ip_addresses_configuration( self, share_export_ip, service_ip): self.mock_object(self.driver, '_setup_helpers') self.driver.share_export_ip = share_export_ip self.driver.service_ip = service_ip self.assertRaises( exception.BadConfigurationException, self.driver.do_setup, 'fake_context') self.driver._setup_helpers.assert_called_once_with() @ddt.data([], '', None) def test_do_setup_no_zpools_configured(self, zpool_list): self.mock_object(self.driver, '_setup_helpers') self.driver.zpool_list = zpool_list self.assertRaises( exception.BadConfigurationException, self.driver.do_setup, 'fake_context') self.driver._setup_helpers.assert_called_once_with() @ddt.data(None, '', 'foo_replication_domain') def test__get_pools_info(self, replication_domain): self.mock_object( self.driver, 'get_zpool_option', mock.Mock(side_effect=['2G', '3G', '5G', '4G'])) self.configuration.replication_domain = replication_domain self.driver.zpool_list = ['foo', 'bar'] expected = [ {'pool_name': 'foo', 'total_capacity_gb': 3.0, 'free_capacity_gb': 2.0, 'reserved_percentage': 0, 'reserved_snapshot_percentage': 0, 'reserved_share_extend_percentage': 0, 'compression': [True, False], 'dedupe': [True, False], 'thin_provisioning': [True], 'max_over_subscription_ratio': ( self.driver.configuration.max_over_subscription_ratio), 'qos': [False]}, {'pool_name': 'bar', 'total_capacity_gb': 4.0, 'free_capacity_gb': 5.0, 'reserved_percentage': 0, 'reserved_snapshot_percentage': 0, 'reserved_share_extend_percentage': 0, 'compression': [True, False], 'dedupe': [True, False], 'thin_provisioning': [True], 'max_over_subscription_ratio': ( self.driver.configuration.max_over_subscription_ratio), 'qos': [False]}, ] if replication_domain: for pool in expected: pool['replication_type'] = 'readable' result = self.driver._get_pools_info() self.assertEqual(expected, result) self.driver.get_zpool_option.assert_has_calls([ mock.call('foo', 'free'), mock.call('foo', 'size'), mock.call('bar', 'free'), mock.call('bar', 'size'), ]) @ddt.data( ([], {'compression': [True, False], 'dedupe': [True, False]}), (['dedup=off'], {'compression': [True, False], 'dedupe': [False]}), (['dedup=on'], {'compression': [True, False], 'dedupe': [True]}), (['compression=on'], {'compression': [True], 'dedupe': [True, False]}), (['compression=off'], {'compression': [False], 'dedupe': [True, False]}), (['compression=fake'], {'compression': [True], 'dedupe': [True, False]}), (['compression=fake', 'dedup=off'], {'compression': [True], 'dedupe': [False]}), (['compression=off', 'dedup=on'], {'compression': [False], 'dedupe': [True]}), ) @ddt.unpack def test__init_common_capabilities( self, dataset_creation_options, expected_part): self.driver.dataset_creation_options = ( dataset_creation_options) expected = { 'thin_provisioning': [True], 'qos': [False], 'max_over_subscription_ratio': ( self.driver.configuration.max_over_subscription_ratio), } expected.update(expected_part) self.driver._init_common_capabilities() self.assertEqual(expected, self.driver.common_capabilities) @ddt.data(None, '', 'foo_replication_domain') def test__update_share_stats(self, replication_domain): self.configuration.replication_domain = replication_domain self.configuration.max_shares_per_share_server = -1 self.configuration.max_share_server_size = -1 self.mock_object(self.driver, '_get_pools_info') self.assertEqual({}, self.driver._stats) expected = { 'driver_handles_share_servers': False, 'driver_name': 'ZFS', 'driver_version': '1.0', 'free_capacity_gb': 'unknown', 'pools': self.driver._get_pools_info.return_value, 'qos': False, 'replication_domain': replication_domain, 'reserved_percentage': 0, 'reserved_snapshot_percentage': 0, 'reserved_share_extend_percentage': 0, 'share_backend_name': self.driver.backend_name, 'share_group_stats': {'consistent_snapshot_support': None}, 'snapshot_support': True, 'create_share_from_snapshot_support': True, 'revert_to_snapshot_support': False, 'mount_snapshot_support': False, 'storage_protocol': 'NFS', 'total_capacity_gb': 'unknown', 'vendor_name': 'Open Source', 'filter_function': None, 'goodness_function': None, 'mount_point_name_support': False, 'ipv4_support': True, 'ipv6_support': False, 'security_service_update_support': False, 'share_server_multiple_subnet_support': False, 'network_allocation_update_support': False, 'share_replicas_migration_support': False, 'encryption_support': None, } if replication_domain: expected['replication_type'] = 'readable' self.driver._update_share_stats() self.assertEqual(expected, self.driver._stats) self.driver._get_pools_info.assert_called_once_with() @ddt.data('', 'foo', 'foo-bar', 'foo_bar', 'foo-bar_quuz') def test__get_share_name(self, share_id): prefix = 'fake_prefix_' self.configuration.zfs_dataset_name_prefix = prefix self.configuration.zfs_dataset_snapshot_name_prefix = 'quuz' expected = prefix + share_id.replace('-', '_') result = self.driver._get_share_name(share_id) self.assertEqual(expected, result) @ddt.data('', 'foo', 'foo-bar', 'foo_bar', 'foo-bar_quuz') def test__get_snapshot_name(self, snapshot_id): prefix = 'fake_prefix_' self.configuration.zfs_dataset_name_prefix = 'quuz' self.configuration.zfs_dataset_snapshot_name_prefix = prefix expected = prefix + snapshot_id.replace('-', '_') result = self.driver._get_snapshot_name(snapshot_id) self.assertEqual(expected, result) def test__get_dataset_creation_options_not_set(self): self.driver.dataset_creation_options = [] mock_get_extra_specs_from_share = self.mock_object( zfs_driver.share_types, 'get_extra_specs_from_share', mock.Mock(return_value={})) share = {'size': '5'} result = self.driver._get_dataset_creation_options(share=share) self.assertIsInstance(result, list) self.assertEqual(2, len(result)) for v in ('quota=5G', 'readonly=off'): self.assertIn(v, result) mock_get_extra_specs_from_share.assert_called_once_with(share) @ddt.data(True, False) def test__get_dataset_creation_options(self, is_readonly): mock_get_extra_specs_from_share = self.mock_object( zfs_driver.share_types, 'get_extra_specs_from_share', mock.Mock(return_value={})) self.driver.dataset_creation_options = [ 'readonly=quuz', 'sharenfs=foo', 'sharesmb=bar', 'k=v', 'q=w', ] share = {'size': 5} readonly = 'readonly=%s' % ('on' if is_readonly else 'off') expected = [readonly, 'k=v', 'q=w', 'quota=5G'] result = self.driver._get_dataset_creation_options( share=share, is_readonly=is_readonly) self.assertEqual(sorted(expected), sorted(result)) mock_get_extra_specs_from_share.assert_called_once_with(share) @ddt.data( (' True', [True, False], ['dedup=off'], 'dedup=on'), ('True', [True, False], ['dedup=off'], 'dedup=on'), ('on', [True, False], ['dedup=off'], 'dedup=on'), ('yes', [True, False], ['dedup=off'], 'dedup=on'), ('1', [True, False], ['dedup=off'], 'dedup=on'), ('True', [True], [], 'dedup=on'), (' False', [True, False], [], 'dedup=off'), ('False', [True, False], [], 'dedup=off'), ('False', [False], ['dedup=on'], 'dedup=off'), ('off', [False], ['dedup=on'], 'dedup=off'), ('no', [False], ['dedup=on'], 'dedup=off'), ('0', [False], ['dedup=on'], 'dedup=off'), ) @ddt.unpack def test__get_dataset_creation_options_with_updated_dedupe( self, dedupe_extra_spec, dedupe_capability, driver_options, expected): mock_get_extra_specs_from_share = self.mock_object( zfs_driver.share_types, 'get_extra_specs_from_share', mock.Mock(return_value={'dedupe': dedupe_extra_spec})) self.driver.dataset_creation_options = driver_options self.driver.common_capabilities['dedupe'] = dedupe_capability share = {'size': 5} expected_options = ['quota=5G', 'readonly=off'] expected_options.append(expected) result = self.driver._get_dataset_creation_options(share=share) self.assertEqual(sorted(expected_options), sorted(result)) mock_get_extra_specs_from_share.assert_called_once_with(share) @ddt.data( ('on', [True, False], ['compression=off'], 'compression=on'), ('on', [True], [], 'compression=on'), ('off', [False], ['compression=on'], 'compression=off'), ('off', [True, False], [], 'compression=off'), ('foo', [True, False], [], 'compression=foo'), ('bar', [True], [], 'compression=bar'), ) @ddt.unpack def test__get_dataset_creation_options_with_updated_compression( self, extra_spec, capability, driver_options, expected_option): mock_get_extra_specs_from_share = self.mock_object( zfs_driver.share_types, 'get_extra_specs_from_share', mock.Mock(return_value={'zfsonlinux:compression': extra_spec})) self.driver.dataset_creation_options = driver_options self.driver.common_capabilities['compression'] = capability share = {'size': 5} expected_options = ['quota=5G', 'readonly=off'] expected_options.append(expected_option) result = self.driver._get_dataset_creation_options(share=share) self.assertEqual(sorted(expected_options), sorted(result)) mock_get_extra_specs_from_share.assert_called_once_with(share) @ddt.data( ({'dedupe': 'fake'}, {'dedupe': [True, False]}), ({'dedupe': 'on'}, {'dedupe': [False]}), ({'dedupe': 'off'}, {'dedupe': [True]}), ({'zfsonlinux:compression': 'fake'}, {'compression': [False]}), ({'zfsonlinux:compression': 'on'}, {'compression': [False]}), ({'zfsonlinux:compression': 'off'}, {'compression': [True]}), ) @ddt.unpack def test__get_dataset_creation_options_error( self, extra_specs, common_capabilities): mock_get_extra_specs_from_share = self.mock_object( zfs_driver.share_types, 'get_extra_specs_from_share', mock.Mock(return_value=extra_specs)) share = {'size': 5} self.driver.common_capabilities.update(common_capabilities) self.assertRaises( exception.ZFSonLinuxException, self.driver._get_dataset_creation_options, share=share ) mock_get_extra_specs_from_share.assert_called_once_with(share) @ddt.data('bar/quuz', 'bar/quuz/', 'bar') def test__get_dataset_name(self, second_zpool): self.configuration.zfs_zpool_list = ['foo', second_zpool] prefix = 'fake_prefix_' self.configuration.zfs_dataset_name_prefix = prefix share = {'id': 'abc-def_ghi', 'host': 'hostname@backend_name#bar'} result = self.driver._get_dataset_name(share) if second_zpool[-1] == '/': second_zpool = second_zpool[0:-1] expected = '%s/%sabc_def_ghi' % (second_zpool, prefix) self.assertEqual(expected, result) def test_create_share(self): mock_get_helper = self.mock_object(self.driver, '_get_share_helper') self.mock_object(self.driver, 'zfs') mock_get_extra_specs_from_share = self.mock_object( zfs_driver.share_types, 'get_extra_specs_from_share', mock.Mock(return_value={})) context = 'fake_context' share = { 'id': 'fake_share_id', 'host': 'hostname@backend_name#bar', 'share_proto': 'NFS', 'size': 4, } self.configuration.zfs_dataset_name_prefix = 'some_prefix_' self.configuration.zfs_ssh_username = 'someuser' self.driver.share_export_ip = '1.1.1.1' self.driver.service_ip = '2.2.2.2' dataset_name = 'bar/subbar/some_prefix_fake_share_id' result = self.driver.create_share(context, share, share_server=None) self.assertEqual( mock_get_helper.return_value.create_exports.return_value, result, ) self.assertEqual( 'share', self.driver.private_storage.get(share['id'], 'entity_type')) self.assertEqual( dataset_name, self.driver.private_storage.get(share['id'], 'dataset_name')) self.assertEqual( 'someuser@2.2.2.2', self.driver.private_storage.get(share['id'], 'ssh_cmd')) self.assertEqual( 'bar', self.driver.private_storage.get(share['id'], 'pool_name')) self.driver.zfs.assert_called_once_with( 'create', '-o', 'quota=4G', '-o', 'fook=foov', '-o', 'bark=barv', '-o', 'readonly=off', 'bar/subbar/some_prefix_fake_share_id') mock_get_helper.assert_has_calls([ mock.call('NFS'), mock.call().create_exports(dataset_name) ]) mock_get_extra_specs_from_share.assert_called_once_with(share) def test_create_share_with_share_server(self): self.assertRaises( exception.InvalidInput, self.driver.create_share, 'fake_context', 'fake_share', share_server={'id': 'fake_server'}, ) def test_delete_share(self): dataset_name = 'bar/subbar/some_prefix_fake_share_id' mock_delete = self.mock_object( self.driver, '_delete_dataset_or_snapshot_with_retry') self.mock_object(self.driver, '_get_share_helper') self.mock_object(zfs_driver.LOG, 'warning') self.mock_object( self.driver, 'zfs', mock.Mock(return_value=('a', 'b'))) snap_name = '%s@%s' % ( dataset_name, self.driver.replica_snapshot_prefix) self.mock_object( self.driver, 'parse_zfs_answer', mock.Mock( side_effect=[ [{'NAME': 'fake_dataset_name'}, {'NAME': dataset_name}], [{'NAME': 'snap_name'}, {'NAME': '%s@foo' % dataset_name}, {'NAME': snap_name}], ])) context = 'fake_context' share = { 'id': 'fake_share_id', 'host': 'hostname@backend_name#bar', 'share_proto': 'NFS', 'size': 4, } self.configuration.zfs_dataset_name_prefix = 'some_prefix_' self.configuration.zfs_ssh_username = 'someuser' self.driver.share_export_ip = '1.1.1.1' self.driver.service_ip = '2.2.2.2' self.driver.private_storage.update( share['id'], {'pool_name': 'bar', 'dataset_name': dataset_name} ) self.driver.delete_share(context, share, share_server=None) self.driver.zfs.assert_has_calls([ mock.call('list', '-r', 'bar'), mock.call('list', '-r', '-t', 'snapshot', 'bar'), ]) self.driver._get_share_helper.assert_has_calls([ mock.call('NFS'), mock.call().remove_exports(dataset_name)]) self.driver.parse_zfs_answer.assert_has_calls([ mock.call('a'), mock.call('a')]) mock_delete.assert_has_calls([ mock.call(snap_name), mock.call(dataset_name), ]) self.assertEqual(0, zfs_driver.LOG.warning.call_count) def test_delete_share_absent(self): dataset_name = 'bar/subbar/some_prefix_fake_share_id' mock_delete = self.mock_object( self.driver, '_delete_dataset_or_snapshot_with_retry') self.mock_object(self.driver, '_get_share_helper') self.mock_object(zfs_driver.LOG, 'warning') self.mock_object( self.driver, 'zfs', mock.Mock(return_value=('a', 'b'))) snap_name = '%s@%s' % ( dataset_name, self.driver.replica_snapshot_prefix) self.mock_object( self.driver, 'parse_zfs_answer', mock.Mock(side_effect=[[], [{'NAME': snap_name}]])) context = 'fake_context' share = { 'id': 'fake_share_id', 'host': 'hostname@backend_name#bar', 'size': 4, } self.configuration.zfs_dataset_name_prefix = 'some_prefix_' self.configuration.zfs_ssh_username = 'someuser' self.driver.share_export_ip = '1.1.1.1' self.driver.service_ip = '2.2.2.2' self.driver.private_storage.update(share['id'], {'pool_name': 'bar'}) self.driver.delete_share(context, share, share_server=None) self.assertEqual(0, self.driver._get_share_helper.call_count) self.assertEqual(0, mock_delete.call_count) self.driver.zfs.assert_called_once_with('list', '-r', 'bar') self.driver.parse_zfs_answer.assert_called_once_with('a') zfs_driver.LOG.warning.assert_called_once_with( mock.ANY, {'id': share['id'], 'name': dataset_name}) def test_delete_share_with_share_server(self): self.assertRaises( exception.InvalidInput, self.driver.delete_share, 'fake_context', 'fake_share', share_server={'id': 'fake_server'}, ) def test_create_snapshot(self): self.configuration.zfs_dataset_snapshot_name_prefix = 'prefx_' self.mock_object(self.driver, 'zfs') snapshot = { 'id': 'fake_snapshot_instance_id', 'snapshot_id': 'fake_snapshot_id', 'host': 'hostname@backend_name#bar', 'size': 4, 'share_instance_id': 'fake_share_id' } snapshot_name = 'foo_data_set_name@prefx_%s' % snapshot['id'] self.driver.private_storage.update( snapshot['share_instance_id'], {'dataset_name': 'foo_data_set_name'}) result = self.driver.create_snapshot('fake_context', snapshot) self.driver.zfs.assert_called_once_with( 'snapshot', snapshot_name) self.assertEqual( snapshot_name.split('@')[-1], self.driver.private_storage.get( snapshot['snapshot_id'], 'snapshot_tag')) self.assertEqual({"provider_location": snapshot_name}, result) def test_delete_snapshot(self): snapshot = { 'id': 'fake_snapshot_instance_id', 'snapshot_id': 'fake_snapshot_id', 'host': 'hostname@backend_name#bar', 'size': 4, 'share_instance_id': 'fake_share_id', } dataset_name = 'foo_zpool/bar_dataset_name' snap_tag = 'prefix_%s' % snapshot['id'] snap_name = '%(dataset)s@%(tag)s' % { 'dataset': dataset_name, 'tag': snap_tag} mock_delete = self.mock_object( self.driver, '_delete_dataset_or_snapshot_with_retry') self.mock_object(zfs_driver.LOG, 'warning') self.mock_object( self.driver, 'zfs', mock.Mock(return_value=('a', 'b'))) self.mock_object( self.driver, 'parse_zfs_answer', mock.Mock(side_effect=[ [{'NAME': 'some_other_dataset@snapshot_name'}, {'NAME': snap_name}], []])) context = 'fake_context' self.driver.private_storage.update( snapshot['id'], {'snapshot_name': snap_name}) self.driver.private_storage.update( snapshot['snapshot_id'], {'snapshot_tag': snap_tag}) self.driver.private_storage.update( snapshot['share_instance_id'], {'dataset_name': dataset_name}) self.assertEqual( snap_tag, self.driver.private_storage.get( snapshot['snapshot_id'], 'snapshot_tag')) self.driver.delete_snapshot(context, snapshot, share_server=None) self.assertIsNone( self.driver.private_storage.get( snapshot['snapshot_id'], 'snapshot_tag')) self.assertEqual(0, zfs_driver.LOG.warning.call_count) self.driver.zfs.assert_called_once_with( 'list', '-r', '-t', 'snapshot', snap_name) self.driver.parse_zfs_answer.assert_called_once_with('a') mock_delete.assert_called_once_with(snap_name) def test_delete_snapshot_absent(self): snapshot = { 'id': 'fake_snapshot_instance_id', 'snapshot_id': 'fake_snapshot_id', 'host': 'hostname@backend_name#bar', 'size': 4, 'share_instance_id': 'fake_share_id', } dataset_name = 'foo_zpool/bar_dataset_name' snap_tag = 'prefix_%s' % snapshot['id'] snap_name = '%(dataset)s@%(tag)s' % { 'dataset': dataset_name, 'tag': snap_tag} mock_delete = self.mock_object( self.driver, '_delete_dataset_or_snapshot_with_retry') self.mock_object(zfs_driver.LOG, 'warning') self.mock_object( self.driver, 'zfs', mock.Mock(return_value=('a', 'b'))) self.mock_object( self.driver, 'parse_zfs_answer', mock.Mock(side_effect=[[], [{'NAME': snap_name}]])) context = 'fake_context' self.driver.private_storage.update( snapshot['id'], {'snapshot_name': snap_name}) self.driver.private_storage.update( snapshot['snapshot_id'], {'snapshot_tag': snap_tag}) self.driver.private_storage.update( snapshot['share_instance_id'], {'dataset_name': dataset_name}) self.driver.delete_snapshot(context, snapshot, share_server=None) self.assertEqual(0, mock_delete.call_count) self.driver.zfs.assert_called_once_with( 'list', '-r', '-t', 'snapshot', snap_name) self.driver.parse_zfs_answer.assert_called_once_with('a') zfs_driver.LOG.warning.assert_called_once_with( mock.ANY, {'id': snapshot['id'], 'name': snap_name}) def test_delete_snapshot_with_share_server(self): self.assertRaises( exception.InvalidInput, self.driver.delete_snapshot, 'fake_context', 'fake_snapshot', share_server={'id': 'fake_server'}, ) @ddt.data({'src_backend_name': 'backend_a', 'src_user': 'someuser', 'src_ip': '2.2.2.2'}, {'src_backend_name': 'backend_b', 'src_user': 'someuser2', 'src_ip': '3.3.3.3'}) @ddt.unpack def test_create_share_from_snapshot(self, src_backend_name, src_user, src_ip): mock_get_helper = self.mock_object(self.driver, '_get_share_helper') self.mock_object(self.driver, 'zfs') self.mock_object(self.driver, 'execute') mock_get_extra_specs_from_share = self.mock_object( zfs_driver.share_types, 'get_extra_specs_from_share', mock.Mock(return_value={})) context = 'fake_context' dst_backend_name = 'backend_a' parent_share = db_utils.create_share_without_instance( id='fake_share_id_1', size=4 ) parent_instance = db_utils.create_share_instance( id='fake_parent_instance', share_id=parent_share['id'], host='hostname@%s#bar' % src_backend_name ) share = db_utils.create_share( id='fake_share_id_2', host='hostname@%s#bar' % dst_backend_name, size=4 ) snapshot = db_utils.create_snapshot( id='fake_snap_id_1', share_id='fake_share_id_1' ) snap_instance = db_utils.create_snapshot_instance( id='fake_snap_instance', snapshot_id=snapshot['id'], share_instance_id=parent_instance['id'] ) dataset_name = 'bar/subbar/some_prefix_%s' % share['id'] snap_tag = 'prefix_%s' % snapshot['id'] snap_name = '%(dataset)s@%(tag)s' % { 'dataset': dataset_name, 'tag': snap_tag} self.configuration.zfs_dataset_name_prefix = 'some_prefix_' self.configuration.zfs_ssh_username = 'someuser' self.driver.share_export_ip = '1.1.1.1' self.driver.service_ip = '2.2.2.2' self.driver.private_storage.update( snap_instance['id'], {'snapshot_name': snap_name}) self.driver.private_storage.update( snap_instance['snapshot_id'], {'snapshot_tag': snap_tag}) self.driver.private_storage.update( snap_instance['share_instance_id'], {'dataset_name': dataset_name}) self.mock_object( zfs_driver, 'get_backend_configuration', mock.Mock(return_value=type( 'FakeConfig', (object,), { 'zfs_ssh_username': src_user, 'zfs_service_ip': src_ip }))) result = self.driver.create_share_from_snapshot( context, share, snap_instance, share_server=None) self.assertEqual( mock_get_helper.return_value.create_exports.return_value, result, ) dst_ssh_host = (self.configuration.zfs_ssh_username + '@' + self.driver.service_ip) src_ssh_host = src_user + '@' + src_ip self.assertEqual( 'share', self.driver.private_storage.get(share['id'], 'entity_type')) self.assertEqual( dataset_name, self.driver.private_storage.get( snap_instance['share_instance_id'], 'dataset_name')) self.assertEqual( dst_ssh_host, self.driver.private_storage.get(share['id'], 'ssh_cmd')) self.assertEqual( 'bar', self.driver.private_storage.get(share['id'], 'pool_name')) self.driver.execute.assert_has_calls([ mock.call( 'ssh', src_ssh_host, 'sudo', 'zfs', 'send', '-vD', snap_name, '|', 'ssh', dst_ssh_host, 'sudo', 'zfs', 'receive', '-v', '%s' % dataset_name), mock.call( 'sudo', 'zfs', 'destroy', '%s@%s' % (dataset_name, snap_tag)), ]) self.driver.zfs.assert_has_calls([ mock.call('set', opt, '%s' % dataset_name) for opt in ('quota=4G', 'bark=barv', 'readonly=off', 'fook=foov') ], any_order=True) mock_get_helper.assert_has_calls([ mock.call('NFS'), mock.call().create_exports(dataset_name) ]) mock_get_extra_specs_from_share.assert_called_once_with(share) def test_create_share_from_snapshot_with_share_server(self): self.assertRaises( exception.InvalidInput, self.driver.create_share_from_snapshot, 'fake_context', 'fake_share', 'fake_snapshot', share_server={'id': 'fake_server'}, ) def test_get_pool(self): share = {'host': 'hostname@backend_name#bar'} result = self.driver.get_pool(share) self.assertEqual('bar', result) @ddt.data('on', 'off', 'rw=1.1.1.1') def test_ensure_share(self, get_zfs_option_answer): share = { 'id': 'fake_share_id', 'host': 'hostname@backend_name#bar', 'share_proto': 'NFS', } dataset_name = 'foo_zpool/foo_fs' self.mock_object( self.driver, '_get_dataset_name', mock.Mock(return_value=dataset_name)) self.mock_object( self.driver, 'get_zfs_option', mock.Mock(return_value=get_zfs_option_answer)) mock_helper = self.mock_object(self.driver, '_get_share_helper') self.mock_object( self.driver, 'zfs', mock.Mock(return_value=('a', 'b'))) self.mock_object( self.driver, 'parse_zfs_answer', mock.Mock(side_effect=[[{'NAME': 'fake1'}, {'NAME': dataset_name}, {'NAME': 'fake2'}]] * 2)) for s in ('1', '2'): self.driver.zfs.reset_mock() self.driver.get_zfs_option.reset_mock() mock_helper.reset_mock() self.driver.parse_zfs_answer.reset_mock() self.driver._get_dataset_name.reset_mock() self.driver.share_export_ip = '1.1.1.%s' % s self.driver.service_ip = '2.2.2.%s' % s self.configuration.zfs_ssh_username = 'user%s' % s result = self.driver.ensure_share('fake_context', share) self.assertEqual( 'user%(s)s@2.2.2.%(s)s' % {'s': s}, self.driver.private_storage.get(share['id'], 'ssh_cmd')) self.driver.get_zfs_option.assert_called_once_with( dataset_name, 'sharenfs') mock_helper.assert_called_once_with( share['share_proto']) mock_helper.return_value.get_exports.assert_called_once_with( dataset_name) expected_calls = [mock.call('list', '-r', 'bar')] if get_zfs_option_answer != 'off': expected_calls.append(mock.call('share', dataset_name)) self.driver.zfs.assert_has_calls(expected_calls) self.driver.parse_zfs_answer.assert_called_once_with('a') self.driver._get_dataset_name.assert_called_once_with(share) self.assertEqual( mock_helper.return_value.get_exports.return_value, result, ) def test_ensure_share_absent(self): share = {'id': 'fake_share_id', 'host': 'hostname@backend_name#bar'} dataset_name = 'foo_zpool/foo_fs' self.driver.private_storage.update( share['id'], {'dataset_name': dataset_name}) self.mock_object(self.driver, 'get_zfs_option') self.mock_object(self.driver, '_get_share_helper') self.mock_object( self.driver, 'zfs', mock.Mock(return_value=('a', 'b'))) self.mock_object( self.driver, 'parse_zfs_answer', mock.Mock(side_effect=[[], [{'NAME': dataset_name}]])) self.assertRaises( exception.ShareResourceNotFound, self.driver.ensure_share, 'fake_context', share, ) self.assertEqual(0, self.driver.get_zfs_option.call_count) self.assertEqual(0, self.driver._get_share_helper.call_count) self.driver.zfs.assert_called_once_with('list', '-r', 'bar') self.driver.parse_zfs_answer.assert_called_once_with('a') def test_ensure_share_with_share_server(self): self.assertRaises( exception.InvalidInput, self.driver.ensure_share, 'fake_context', 'fake_share', share_server={'id': 'fake_server'}, ) def test_get_network_allocations_number(self): self.assertEqual(0, self.driver.get_network_allocations_number()) def test_extend_share(self): dataset_name = 'foo_zpool/foo_fs' self.mock_object( self.driver, '_get_dataset_name', mock.Mock(return_value=dataset_name)) self.mock_object(self.driver, 'zfs') self.driver.extend_share('fake_share', 5) self.driver._get_dataset_name.assert_called_once_with('fake_share') self.driver.zfs.assert_called_once_with( 'set', 'quota=5G', dataset_name) def test_extend_share_with_share_server(self): self.assertRaises( exception.InvalidInput, self.driver.extend_share, 'fake_context', 'fake_share', 5, share_server={'id': 'fake_server'}, ) def test_shrink_share(self): dataset_name = 'foo_zpool/foo_fs' self.mock_object( self.driver, '_get_dataset_name', mock.Mock(return_value=dataset_name)) self.mock_object(self.driver, 'zfs') self.mock_object( self.driver, 'get_zfs_option', mock.Mock(return_value='4G')) share = {'id': 'fake_share_id'} self.driver.shrink_share(share, 5) self.driver._get_dataset_name.assert_called_once_with(share) self.driver.get_zfs_option.assert_called_once_with( dataset_name, 'used') self.driver.zfs.assert_called_once_with( 'set', 'quota=5G', dataset_name) def test_shrink_share_data_loss(self): dataset_name = 'foo_zpool/foo_fs' self.mock_object( self.driver, '_get_dataset_name', mock.Mock(return_value=dataset_name)) self.mock_object(self.driver, 'zfs') self.mock_object( self.driver, 'get_zfs_option', mock.Mock(return_value='6G')) share = {'id': 'fake_share_id'} self.assertRaises( exception.ShareShrinkingPossibleDataLoss, self.driver.shrink_share, share, 5) self.driver._get_dataset_name.assert_called_once_with(share) self.driver.get_zfs_option.assert_called_once_with( dataset_name, 'used') self.assertEqual(0, self.driver.zfs.call_count) def test_shrink_share_with_share_server(self): self.assertRaises( exception.InvalidInput, self.driver.shrink_share, 'fake_context', 'fake_share', 5, share_server={'id': 'fake_server'}, ) def test__get_replication_snapshot_prefix(self): replica = {'id': 'foo-_bar-_id'} self.driver.replica_snapshot_prefix = 'PrEfIx' result = self.driver._get_replication_snapshot_prefix(replica) self.assertEqual('PrEfIx_foo__bar__id', result) def test__get_replication_snapshot_tag(self): replica = {'id': 'foo-_bar-_id'} self.driver.replica_snapshot_prefix = 'PrEfIx' mock_utcnow = self.mock_object(zfs_driver.timeutils, 'utcnow') result = self.driver._get_replication_snapshot_tag(replica) self.assertEqual( ('PrEfIx_foo__bar__id_time_' '%s' % mock_utcnow.return_value.isoformat.return_value), result) mock_utcnow.assert_called_once_with() mock_utcnow.return_value.isoformat.assert_called_once_with() def test__get_active_replica(self): replica_list = [ {'replica_state': zfs_driver.constants.REPLICA_STATE_IN_SYNC, 'id': '1'}, {'replica_state': zfs_driver.constants.REPLICA_STATE_ACTIVE, 'id': '2'}, {'replica_state': zfs_driver.constants.REPLICA_STATE_OUT_OF_SYNC, 'id': '3'}, ] result = self.driver._get_active_replica(replica_list) self.assertEqual(replica_list[1], result) def test__get_active_replica_not_found(self): replica_list = [ {'replica_state': zfs_driver.constants.REPLICA_STATE_IN_SYNC, 'id': '1'}, {'replica_state': zfs_driver.constants.REPLICA_STATE_OUT_OF_SYNC, 'id': '3'}, ] self.assertRaises( exception.ReplicationException, self.driver._get_active_replica, replica_list, ) def test_update_access(self): self.mock_object(self.driver, '_get_dataset_name') mock_helper = self.mock_object(self.driver, '_get_share_helper') mock_shell_executor = self.mock_object( self.driver, '_get_shell_executor_by_host') share = { 'share_proto': 'NFS', 'host': 'foo_host@bar_backend@quuz_pool', } result = self.driver.update_access( 'fake_context', share, [1], [2], [3], []) self.driver._get_dataset_name.assert_called_once_with(share) mock_shell_executor.assert_called_once_with(share['host']) self.assertEqual( mock_helper.return_value.update_access.return_value, result, ) def test_update_access_with_share_server(self): self.assertRaises( exception.InvalidInput, self.driver.update_access, 'fake_context', 'fake_share', [], [], [], share_server={'id': 'fake_server'}, ) @ddt.data( ({}, True), ({"size": 5}, True), ({"size": 5, "foo": "bar"}, False), ({"size": "5", "foo": "bar"}, True), ) @ddt.unpack def test_manage_share_success_expected(self, driver_options, mount_exists): old_dataset_name = "foopool/path/to/old/dataset/name" new_dataset_name = "foopool/path/to/new/dataset/name" share = { "id": "fake_share_instance_id", "share_id": "fake_share_id", "export_locations": [{"path": "1.1.1.1:/%s" % old_dataset_name}], "host": "foobackend@foohost#foopool", "share_proto": "NFS", } mock_get_extra_specs_from_share = self.mock_object( zfs_driver.share_types, 'get_extra_specs_from_share', mock.Mock(return_value={})) mock__get_dataset_name = self.mock_object( self.driver, "_get_dataset_name", mock.Mock(return_value=new_dataset_name)) mock_helper = self.mock_object(self.driver, "_get_share_helper") mock_zfs = self.mock_object( self.driver, "zfs", mock.Mock(return_value=("fake_out", "fake_error"))) mock_zfs_with_retry = self.mock_object(self.driver, "zfs_with_retry") mock_execute_side_effects = [ ("%s " % old_dataset_name, "fake_err") if mount_exists else ("foo", "bar") ] * 3 if mount_exists: # After three retries, assume the mount goes away mock_execute_side_effects.append((("foo", "bar"))) mock_execute = self.mock_object( self.driver, "execute", mock.Mock(side_effect=iter(mock_execute_side_effects))) mock_parse_zfs_answer = self.mock_object( self.driver, "parse_zfs_answer", mock.Mock(return_value=[ {"NAME": "some_other_dataset_1"}, {"NAME": old_dataset_name}, {"NAME": "some_other_dataset_2"}, ])) mock_get_zfs_option = self.mock_object( self.driver, 'get_zfs_option', mock.Mock(return_value="4G")) result = self.driver.manage_existing(share, driver_options) self.assertTrue(mock_helper.return_value.get_exports.called) self.assertTrue(mock_zfs_with_retry.called) self.assertEqual(2, len(result)) self.assertIn("size", result) self.assertIn("export_locations", result) self.assertEqual(5, result["size"]) self.assertEqual( mock_helper.return_value.get_exports.return_value, result["export_locations"]) mock_execute.assert_called_with("sudo", "mount") if mount_exists: self.assertEqual(4, mock_execute.call_count) else: self.assertEqual(1, mock_execute.call_count) mock_parse_zfs_answer.assert_called_once_with(mock_zfs.return_value[0]) if driver_options.get("size"): self.assertFalse(mock_get_zfs_option.called) else: mock_get_zfs_option.assert_called_once_with( old_dataset_name, "used") mock__get_dataset_name.assert_called_once_with(share) mock_get_extra_specs_from_share.assert_called_once_with(share) def test_manage_share_wrong_pool(self): old_dataset_name = "foopool/path/to/old/dataset/name" new_dataset_name = "foopool/path/to/new/dataset/name" share = { "id": "fake_share_instance_id", "share_id": "fake_share_id", "export_locations": [{"path": "1.1.1.1:/%s" % old_dataset_name}], "host": "foobackend@foohost#barpool", "share_proto": "NFS", } mock_get_extra_specs_from_share = self.mock_object( zfs_driver.share_types, 'get_extra_specs_from_share', mock.Mock(return_value={})) mock__get_dataset_name = self.mock_object( self.driver, "_get_dataset_name", mock.Mock(return_value=new_dataset_name)) mock_get_zfs_option = self.mock_object( self.driver, 'get_zfs_option', mock.Mock(return_value="4G")) self.assertRaises( exception.ZFSonLinuxException, self.driver.manage_existing, share, {} ) mock__get_dataset_name.assert_called_once_with(share) mock_get_zfs_option.assert_called_once_with(old_dataset_name, "used") mock_get_extra_specs_from_share.assert_called_once_with(share) def test_manage_share_dataset_not_found(self): old_dataset_name = "foopool/path/to/old/dataset/name" new_dataset_name = "foopool/path/to/new/dataset/name" share = { "id": "fake_share_instance_id", "share_id": "fake_share_id", "export_locations": [{"path": "1.1.1.1:/%s" % old_dataset_name}], "host": "foobackend@foohost#foopool", "share_proto": "NFS", } mock_get_extra_specs_from_share = self.mock_object( zfs_driver.share_types, 'get_extra_specs_from_share', mock.Mock(return_value={})) mock__get_dataset_name = self.mock_object( self.driver, "_get_dataset_name", mock.Mock(return_value=new_dataset_name)) mock_get_zfs_option = self.mock_object( self.driver, 'get_zfs_option', mock.Mock(return_value="4G")) mock_zfs = self.mock_object( self.driver, "zfs", mock.Mock(return_value=("fake_out", "fake_error"))) mock_parse_zfs_answer = self.mock_object( self.driver, "parse_zfs_answer", mock.Mock(return_value=[{"NAME": "some_other_dataset_1"}])) self.assertRaises( exception.ZFSonLinuxException, self.driver.manage_existing, share, {} ) mock__get_dataset_name.assert_called_once_with(share) mock_get_zfs_option.assert_called_once_with(old_dataset_name, "used") mock_zfs.assert_called_once_with( "list", "-r", old_dataset_name.split("/")[0]) mock_parse_zfs_answer.assert_called_once_with(mock_zfs.return_value[0]) mock_get_extra_specs_from_share.assert_called_once_with(share) def test_manage_unmount_exception(self): old_ds_name = "foopool/path/to/old/dataset/name" new_ds_name = "foopool/path/to/new/dataset/name" share = { "id": "fake_share_instance_id", "share_id": "fake_share_id", "export_locations": [{"path": "1.1.1.1:/%s" % old_ds_name}], "host": "foobackend@foohost#foopool", "share_proto": "NFS", } mock_get_extra_specs_from_share = self.mock_object( zfs_driver.share_types, 'get_extra_specs_from_share', mock.Mock(return_value={})) mock__get_dataset_name = self.mock_object( self.driver, "_get_dataset_name", mock.Mock(return_value=new_ds_name)) mock_helper = self.mock_object(self.driver, "_get_share_helper") mock_zfs = self.mock_object( self.driver, "zfs", mock.Mock(return_value=("fake_out", "fake_error"))) mock_zfs_with_retry = self.mock_object(self.driver, "zfs_with_retry") # 10 Retries, would mean 20 calls to check the mount still exists mock_execute_side_effects = [("%s " % old_ds_name, "fake_err")] * 21 mock_execute = self.mock_object( self.driver, "execute", mock.Mock(side_effect=mock_execute_side_effects)) mock_parse_zfs_answer = self.mock_object( self.driver, "parse_zfs_answer", mock.Mock(return_value=[ {"NAME": "some_other_dataset_1"}, {"NAME": old_ds_name}, {"NAME": "some_other_dataset_2"}, ])) mock_get_zfs_option = self.mock_object( self.driver, 'get_zfs_option', mock.Mock(return_value="4G")) self.assertRaises(exception.ZFSonLinuxException, self.driver.manage_existing, share, {'size': 10}) self.assertFalse(mock_helper.return_value.get_exports.called) mock_zfs_with_retry.assert_called_with("umount", "-f", old_ds_name) mock_execute.assert_called_with("sudo", "mount") self.assertEqual(10, mock_zfs_with_retry.call_count) self.assertEqual(20, mock_execute.call_count) mock_parse_zfs_answer.assert_called_once_with(mock_zfs.return_value[0]) self.assertFalse(mock_get_zfs_option.called) mock__get_dataset_name.assert_called_once_with(share) mock_get_extra_specs_from_share.assert_called_once_with(share) def test_unmanage(self): share = {'id': 'fake_share_id'} self.mock_object(self.driver.private_storage, 'delete') self.driver.unmanage(share) self.driver.private_storage.delete.assert_called_once_with(share['id']) @ddt.data( {}, {"size": 5}, {"size": "5"}, ) def test_manage_existing_snapshot(self, driver_options): dataset_name = "path/to/dataset" old_provider_location = dataset_name + "@original_snapshot_tag" snapshot_instance = { "id": "fake_snapshot_instance_id", "share_instance_id": "fake_share_instance_id", "snapshot_id": "fake_snapshot_id", "provider_location": old_provider_location, } new_snapshot_tag = "fake_new_snapshot_tag" new_provider_location = ( old_provider_location.split("@")[0] + "@" + new_snapshot_tag) self.mock_object(self.driver, "zfs") self.mock_object( self.driver, "get_zfs_option", mock.Mock(return_value="5G")) self.mock_object( self.driver, '_get_snapshot_name', mock.Mock(return_value=new_snapshot_tag)) self.driver.private_storage.update( snapshot_instance["share_instance_id"], {"dataset_name": dataset_name}) result = self.driver.manage_existing_snapshot( snapshot_instance, driver_options) expected_result = { "size": 5, "provider_location": new_provider_location, } self.assertEqual(expected_result, result) self.driver._get_snapshot_name.assert_called_once_with( snapshot_instance["id"]) self.driver.zfs.assert_has_calls([ mock.call("list", "-r", "-t", "snapshot", old_provider_location), mock.call("rename", old_provider_location, new_provider_location), ]) def test_manage_existing_snapshot_not_found(self): dataset_name = "path/to/dataset" old_provider_location = dataset_name + "@original_snapshot_tag" new_snapshot_tag = "fake_new_snapshot_tag" snapshot_instance = { "id": "fake_snapshot_instance_id", "snapshot_id": "fake_snapshot_id", "provider_location": old_provider_location, } self.mock_object( self.driver, "_get_snapshot_name", mock.Mock(return_value=new_snapshot_tag)) self.mock_object( self.driver, "zfs", mock.Mock(side_effect=exception.ProcessExecutionError("FAKE"))) self.assertRaises( exception.ManageInvalidShareSnapshot, self.driver.manage_existing_snapshot, snapshot_instance, {}, ) self.driver.zfs.assert_called_once_with( "list", "-r", "-t", "snapshot", old_provider_location) self.driver._get_snapshot_name.assert_called_once_with( snapshot_instance["id"]) def test_unmanage_snapshot(self): snapshot_instance = { "id": "fake_snapshot_instance_id", "snapshot_id": "fake_snapshot_id", } self.mock_object(self.driver.private_storage, "delete") self.driver.unmanage_snapshot(snapshot_instance) self.driver.private_storage.delete.assert_called_once_with( snapshot_instance["snapshot_id"]) def test__delete_dataset_or_snapshot_with_retry_snapshot(self): umount_sideeffects = (exception.ProcessExecutionError, exception.ProcessExecutionError, exception.ProcessExecutionError) zfs_destroy_sideeffects = (exception.ProcessExecutionError, exception.ProcessExecutionError, None) self.mock_object( self.driver, 'get_zfs_option', mock.Mock(return_value='/foo@bar')) self.mock_object( self.driver, 'execute', mock.Mock(side_effect=umount_sideeffects)) self.mock_object( self.driver, 'zfs', mock.Mock(side_effect=zfs_destroy_sideeffects)) self.driver._delete_dataset_or_snapshot_with_retry('foo@bar') self.driver.get_zfs_option.assert_called_once_with( 'foo@bar', 'mountpoint') self.driver.execute.assert_has_calls( [mock.call('sudo', 'umount', '/foo@bar')] * 3) self.driver.zfs.assert_has_calls( [mock.call('destroy', '-f', 'foo@bar')] * 3) self.assertEqual(3, self.driver.execute.call_count) self.assertEqual(3, self.driver.zfs.call_count) def test__delete_dataset_or_snapshot_with_retry_dataset_busy_fail(self): # simulating local processes that have open files on the mount lsof_sideeffects = ([('1001, 1002', '0')] * 30) self.mock_object(self.driver, 'get_zfs_option', mock.Mock(return_value='/fake/dataset/name')) self.mock_object( self.driver, 'execute', mock.Mock(side_effect=lsof_sideeffects)) self.mock_object(zfs_driver.LOG, 'debug') self.mock_object( zfs_driver.time, 'time', mock.Mock(side_effect=range(1, 70, 2))) self.mock_object(self.driver, 'zfs') dataset_name = 'fake/dataset/name' self.assertRaises( exception.ZFSonLinuxException, self.driver._delete_dataset_or_snapshot_with_retry, dataset_name, ) self.driver.get_zfs_option.assert_called_once_with( dataset_name, 'mountpoint') self.assertEqual(29, zfs_driver.LOG.debug.call_count) # We should've bailed out before executing "zfs destroy" self.driver.zfs.assert_not_called() def test__delete_dataset_or_snapshot_with_retry_dataset(self): lsof_sideeffects = (('1001', '0'), exception.ProcessExecutionError) umount_sideeffects = (exception.ProcessExecutionError, exception.ProcessExecutionError, exception.ProcessExecutionError) zfs_destroy_sideeffects = (exception.ProcessExecutionError, exception.ProcessExecutionError, None) dataset_name = 'fake/dataset/name' self.mock_object(self.driver, 'get_zfs_option', mock.Mock( return_value='/%s' % dataset_name)) self.mock_object( self.driver, 'execute', mock.Mock( side_effect=(lsof_sideeffects + umount_sideeffects))) self.mock_object( self.driver, 'zfs', mock.Mock(side_effect=zfs_destroy_sideeffects)) self.mock_object(zfs_driver.LOG, 'info') self.driver._delete_dataset_or_snapshot_with_retry(dataset_name) self.driver.get_zfs_option.assert_called_once_with( dataset_name, 'mountpoint') self.driver.execute.assert_has_calls( [mock.call('lsof', '-w', '/%s' % dataset_name)] * 2 + [mock.call('sudo', 'umount', '/%s' % dataset_name)] * 3) self.driver.zfs.assert_has_calls( [mock.call('destroy', '-f', dataset_name)] * 3) self.assertEqual(6, zfs_driver.time.sleep.call_count) self.assertEqual(5, self.driver.execute.call_count) self.assertEqual(3, self.driver.zfs.call_count) def test_create_replica(self): active_replica = { 'id': 'fake_active_replica_id', 'host': 'hostname1@backend_name1#foo', 'size': 5, 'replica_state': zfs_driver.constants.REPLICA_STATE_ACTIVE, } replica_list = [active_replica] new_replica = { 'id': 'fake_new_replica_id', 'host': 'hostname2@backend_name2#bar', 'share_proto': 'NFS', 'replica_state': None, } dst_dataset_name = ( 'bar/subbar/fake_dataset_name_prefix%s' % new_replica['id']) access_rules = ['foo_rule', 'bar_rule'] self.driver.private_storage.update( active_replica['id'], {'dataset_name': 'fake/active/dataset/name', 'ssh_cmd': 'fake_ssh_cmd'} ) self.mock_object( self.driver, 'execute', mock.Mock(side_effect=[('a', 'b'), ('c', 'd')])) self.mock_object(self.driver, 'zfs') mock_helper = self.mock_object(self.driver, '_get_share_helper') self.configuration.zfs_dataset_name_prefix = 'fake_dataset_name_prefix' mock_utcnow = self.mock_object(zfs_driver.timeutils, 'utcnow') mock_utcnow.return_value.isoformat.return_value = 'some_time' result = self.driver.create_replica( 'fake_context', replica_list, new_replica, access_rules, []) expected = { 'export_locations': ( mock_helper.return_value.create_exports.return_value), 'replica_state': zfs_driver.constants.REPLICA_STATE_IN_SYNC, 'access_rules_status': zfs_driver.constants.STATUS_ACTIVE, } self.assertEqual(expected, result) mock_helper.assert_has_calls([ mock.call('NFS'), mock.call().update_access( dst_dataset_name, access_rules, add_rules=[], delete_rules=[], make_all_ro=True), mock.call('NFS'), mock.call().create_exports(dst_dataset_name), ]) self.driver.zfs.assert_has_calls([ mock.call('set', 'readonly=on', dst_dataset_name), mock.call('set', 'quota=%sG' % active_replica['size'], dst_dataset_name), ]) src_snapshot_name = ( 'fake/active/dataset/name@' 'tmp_snapshot_for_replication__fake_new_replica_id_time_some_time') self.driver.execute.assert_has_calls([ mock.call('ssh', 'fake_ssh_cmd', 'sudo', 'zfs', 'snapshot', src_snapshot_name), mock.call( 'ssh', 'fake_ssh_cmd', 'sudo', 'zfs', 'send', '-vDR', src_snapshot_name, '|', 'ssh', 'fake_username@240.241.242.244', 'sudo', 'zfs', 'receive', '-v', dst_dataset_name ), ]) mock_utcnow.assert_called_once_with() mock_utcnow.return_value.isoformat.assert_called_once_with() def test_delete_replica_not_found(self): dataset_name = 'foo/dataset/name' pool_name = 'foo_pool' replica = {'id': 'fake_replica_id'} replica_list = [replica] replica_snapshots = [] self.mock_object( self.driver, '_get_dataset_name', mock.Mock(return_value=dataset_name)) self.mock_object( self.driver, 'zfs', mock.Mock(side_effect=[('a', 'b'), ('c', 'd')])) self.mock_object( self.driver, 'parse_zfs_answer', mock.Mock(side_effect=[[], []])) self.mock_object(self.driver, '_delete_dataset_or_snapshot_with_retry') self.mock_object(zfs_driver.LOG, 'warning') self.mock_object(self.driver, '_get_share_helper') self.driver.private_storage.update( replica['id'], {'pool_name': pool_name}) self.driver.delete_replica('fake_context', replica_list, replica_snapshots, replica) zfs_driver.LOG.warning.assert_called_once_with( mock.ANY, {'id': replica['id'], 'name': dataset_name}) self.assertEqual(0, self.driver._get_share_helper.call_count) self.assertEqual( 0, self.driver._delete_dataset_or_snapshot_with_retry.call_count) self.driver._get_dataset_name.assert_called_once_with(replica) self.driver.zfs.assert_has_calls([ mock.call('list', '-r', '-t', 'snapshot', pool_name), mock.call('list', '-r', pool_name), ]) self.driver.parse_zfs_answer.assert_has_calls([ mock.call('a'), mock.call('c'), ]) def test_delete_replica(self): dataset_name = 'foo/dataset/name' pool_name = 'foo_pool' replica = {'id': 'fake_replica_id', 'share_proto': 'NFS'} replica_list = [replica] self.mock_object( self.driver, '_get_dataset_name', mock.Mock(return_value=dataset_name)) self.mock_object( self.driver, 'zfs', mock.Mock(side_effect=[('a', 'b'), ('c', 'd')])) self.mock_object( self.driver, 'parse_zfs_answer', mock.Mock(side_effect=[ [{'NAME': 'some_other_dataset@snapshot'}, {'NAME': dataset_name + '@foo_snap'}], [{'NAME': 'some_other_dataset'}, {'NAME': dataset_name}], ])) mock_helper = self.mock_object(self.driver, '_get_share_helper') self.mock_object(self.driver, '_delete_dataset_or_snapshot_with_retry') self.mock_object(zfs_driver.LOG, 'warning') self.driver.private_storage.update( replica['id'], {'pool_name': pool_name, 'dataset_name': dataset_name}) self.driver.delete_replica('fake_context', replica_list, [], replica) self.assertEqual(0, zfs_driver.LOG.warning.call_count) self.assertEqual(0, self.driver._get_dataset_name.call_count) self.driver._delete_dataset_or_snapshot_with_retry.assert_has_calls([ mock.call(dataset_name + '@foo_snap'), mock.call(dataset_name), ]) self.driver.zfs.assert_has_calls([ mock.call('list', '-r', '-t', 'snapshot', pool_name), mock.call('list', '-r', pool_name), ]) self.driver.parse_zfs_answer.assert_has_calls([ mock.call('a'), mock.call('c'), ]) mock_helper.assert_called_once_with(replica['share_proto']) mock_helper.return_value.remove_exports.assert_called_once_with( dataset_name) def test_update_replica(self): active_replica = { 'id': 'fake_active_replica_id', 'host': 'hostname1@backend_name1#foo', 'size': 5, 'replica_state': zfs_driver.constants.REPLICA_STATE_ACTIVE, } replica = { 'id': 'fake_new_replica_id', 'host': 'hostname2@backend_name2#bar', 'share_proto': 'NFS', 'replica_state': None, } replica_list = [replica, active_replica] replica_snapshots = [] dst_dataset_name = ( 'bar/subbar/fake_dataset_name_prefix%s' % replica['id']) src_dataset_name = ( 'bar/subbar/fake_dataset_name_prefix%s' % active_replica['id']) access_rules = ['foo_rule', 'bar_rule'] old_repl_snapshot_tag = ( self.driver._get_replication_snapshot_prefix( active_replica) + 'foo') snap_tag_prefix = self.driver._get_replication_snapshot_prefix( replica) self.driver.private_storage.update( active_replica['id'], {'dataset_name': src_dataset_name, 'ssh_cmd': 'fake_src_ssh_cmd', 'repl_snapshot_tag': old_repl_snapshot_tag} ) self.driver.private_storage.update( replica['id'], {'dataset_name': dst_dataset_name, 'ssh_cmd': 'fake_dst_ssh_cmd', 'repl_snapshot_tag': old_repl_snapshot_tag} ) self.mock_object( self.driver, 'execute', mock.Mock(side_effect=[('a', 'b'), ('c', 'd'), ('e', 'f')])) self.mock_object(self.driver, 'execute_with_retry', mock.Mock(side_effect=[('g', 'h')])) self.mock_object(self.driver, 'zfs', mock.Mock(side_effect=[('j', 'k'), ('l', 'm')])) self.mock_object( self.driver, 'parse_zfs_answer', mock.Mock(side_effect=[ ({'NAME': dst_dataset_name + '@' + old_repl_snapshot_tag}, {'NAME': dst_dataset_name + '@%s_time_some_time' % snap_tag_prefix}, {'NAME': 'other/dataset/name1@' + old_repl_snapshot_tag}), ({'NAME': src_dataset_name + '@' + old_repl_snapshot_tag}, {'NAME': src_dataset_name + '@' + snap_tag_prefix + 'quuz'}, {'NAME': 'other/dataset/name2@' + old_repl_snapshot_tag}), ]) ) mock_helper = self.mock_object(self.driver, '_get_share_helper') self.configuration.zfs_dataset_name_prefix = 'fake_dataset_name_prefix' mock_utcnow = self.mock_object(zfs_driver.timeutils, 'utcnow') mock_utcnow.return_value.isoformat.return_value = 'some_time' mock_delete_snapshot = self.mock_object( self.driver, '_delete_dataset_or_snapshot_with_retry') result = self.driver.update_replica_state( 'fake_context', replica_list, replica, access_rules, replica_snapshots) self.assertEqual(zfs_driver.constants.REPLICA_STATE_IN_SYNC, result) mock_helper.assert_called_once_with('NFS') mock_helper.return_value.update_access.assert_called_once_with( dst_dataset_name, access_rules, add_rules=[], delete_rules=[], make_all_ro=True) self.driver.execute_with_retry.assert_called_once_with( 'ssh', 'fake_src_ssh_cmd', 'sudo', 'zfs', 'destroy', '-f', src_dataset_name + '@' + snap_tag_prefix + 'quuz') self.driver.execute.assert_has_calls([ mock.call( 'ssh', 'fake_src_ssh_cmd', 'sudo', 'zfs', 'snapshot', src_dataset_name + '@' + self.driver._get_replication_snapshot_tag(replica)), mock.call( 'ssh', 'fake_src_ssh_cmd', 'sudo', 'zfs', 'send', '-vDRI', old_repl_snapshot_tag, src_dataset_name + '@%s' % snap_tag_prefix + '_time_some_time', '|', 'ssh', 'fake_dst_ssh_cmd', 'sudo', 'zfs', 'receive', '-vF', dst_dataset_name), mock.call( 'ssh', 'fake_src_ssh_cmd', 'sudo', 'zfs', 'list', '-r', '-t', 'snapshot', 'bar'), ]) mock_delete_snapshot.assert_called_once_with( dst_dataset_name + '@' + old_repl_snapshot_tag) self.driver.parse_zfs_answer.assert_has_calls( [mock.call('l'), mock.call('e')]) def test_promote_replica_active_available(self): active_replica = { 'id': 'fake_active_replica_id', 'host': 'hostname1@backend_name1#foo', 'size': 5, 'replica_state': zfs_driver.constants.REPLICA_STATE_ACTIVE, } replica = { 'id': 'fake_first_replica_id', 'host': 'hostname2@backend_name2#bar', 'share_proto': 'NFS', 'replica_state': zfs_driver.constants.REPLICA_STATE_IN_SYNC, } second_replica = { 'id': 'fake_second_replica_id', 'host': 'hostname3@backend_name3#quuz', 'share_proto': 'NFS', 'replica_state': zfs_driver.constants.REPLICA_STATE_IN_SYNC, } replica_list = [replica, active_replica, second_replica] dst_dataset_name = ( 'bar/subbar/fake_dataset_name_prefix%s' % replica['id']) src_dataset_name = ( 'bar/subbar/fake_dataset_name_prefix%s' % active_replica['id']) access_rules = ['foo_rule', 'bar_rule'] old_repl_snapshot_tag = ( self.driver._get_replication_snapshot_prefix( active_replica) + 'foo') snap_tag_prefix = self.driver._get_replication_snapshot_prefix( active_replica) + '_time_some_time' self.driver.private_storage.update( active_replica['id'], {'dataset_name': src_dataset_name, 'ssh_cmd': 'fake_src_ssh_cmd', 'repl_snapshot_tag': old_repl_snapshot_tag} ) for repl in (replica, second_replica): self.driver.private_storage.update( repl['id'], {'dataset_name': ( 'bar/subbar/fake_dataset_name_prefix%s' % repl['id']), 'ssh_cmd': 'fake_dst_ssh_cmd', 'repl_snapshot_tag': old_repl_snapshot_tag} ) self.mock_object( self.driver, 'execute', mock.Mock(side_effect=[ ('a', 'b'), ('c', 'd'), ('e', 'f'), exception.ProcessExecutionError('Second replica sync failure'), ])) self.mock_object(self.driver, 'zfs', mock.Mock(side_effect=[('g', 'h')])) mock_helper = self.mock_object(self.driver, '_get_share_helper') self.configuration.zfs_dataset_name_prefix = 'fake_dataset_name_prefix' mock_utcnow = self.mock_object(zfs_driver.timeutils, 'utcnow') mock_utcnow.return_value.isoformat.return_value = 'some_time' mock_delete_snapshot = self.mock_object( self.driver, '_delete_dataset_or_snapshot_with_retry') result = self.driver.promote_replica( 'fake_context', replica_list, replica, access_rules) expected = [ {'access_rules_status': zfs_driver.constants.SHARE_INSTANCE_RULES_SYNCING, 'id': 'fake_active_replica_id', 'replica_state': zfs_driver.constants.REPLICA_STATE_IN_SYNC}, {'access_rules_status': zfs_driver.constants.STATUS_ACTIVE, 'id': 'fake_first_replica_id', 'replica_state': zfs_driver.constants.REPLICA_STATE_ACTIVE}, {'access_rules_status': zfs_driver.constants.SHARE_INSTANCE_RULES_SYNCING, 'id': 'fake_second_replica_id', 'replica_state': zfs_driver.constants.REPLICA_STATE_OUT_OF_SYNC}, ] for repl in expected: self.assertIn(repl, result) self.assertEqual(3, len(result)) mock_helper.assert_called_once_with('NFS') mock_helper.return_value.update_access.assert_called_once_with( dst_dataset_name, access_rules, add_rules=[], delete_rules=[]) self.driver.zfs.assert_called_once_with( 'set', 'readonly=off', dst_dataset_name) self.assertEqual(0, mock_delete_snapshot.call_count) for repl in (active_replica, replica): self.assertEqual( snap_tag_prefix, self.driver.private_storage.get( repl['id'], 'repl_snapshot_tag')) self.assertEqual( old_repl_snapshot_tag, self.driver.private_storage.get( second_replica['id'], 'repl_snapshot_tag')) def test_promote_replica_active_not_available(self): active_replica = { 'id': 'fake_active_replica_id', 'host': 'hostname1@backend_name1#foo', 'size': 5, 'replica_state': zfs_driver.constants.REPLICA_STATE_ACTIVE, } replica = { 'id': 'fake_first_replica_id', 'host': 'hostname2@backend_name2#bar', 'share_proto': 'NFS', 'replica_state': zfs_driver.constants.REPLICA_STATE_IN_SYNC, } second_replica = { 'id': 'fake_second_replica_id', 'host': 'hostname3@backend_name3#quuz', 'share_proto': 'NFS', 'replica_state': zfs_driver.constants.REPLICA_STATE_IN_SYNC, } third_replica = { 'id': 'fake_third_replica_id', 'host': 'hostname4@backend_name4#fff', 'share_proto': 'NFS', 'replica_state': zfs_driver.constants.REPLICA_STATE_IN_SYNC, } replica_list = [replica, active_replica, second_replica, third_replica] dst_dataset_name = ( 'bar/subbar/fake_dataset_name_prefix%s' % replica['id']) src_dataset_name = ( 'bar/subbar/fake_dataset_name_prefix%s' % active_replica['id']) access_rules = ['foo_rule', 'bar_rule'] old_repl_snapshot_tag = ( self.driver._get_replication_snapshot_prefix( active_replica) + 'foo') snap_tag_prefix = self.driver._get_replication_snapshot_prefix( replica) + '_time_some_time' self.driver.private_storage.update( active_replica['id'], {'dataset_name': src_dataset_name, 'ssh_cmd': 'fake_src_ssh_cmd', 'repl_snapshot_tag': old_repl_snapshot_tag} ) for repl in (replica, second_replica, third_replica): self.driver.private_storage.update( repl['id'], {'dataset_name': ( 'bar/subbar/fake_dataset_name_prefix%s' % repl['id']), 'ssh_cmd': 'fake_dst_ssh_cmd', 'repl_snapshot_tag': old_repl_snapshot_tag} ) self.mock_object( self.driver, 'execute', mock.Mock(side_effect=[ exception.ProcessExecutionError('Active replica failure'), ('a', 'b'), exception.ProcessExecutionError('Second replica sync failure'), ('c', 'd'), ])) self.mock_object(self.driver, 'zfs', mock.Mock(side_effect=[('g', 'h'), ('i', 'j')])) mock_helper = self.mock_object(self.driver, '_get_share_helper') self.configuration.zfs_dataset_name_prefix = 'fake_dataset_name_prefix' mock_utcnow = self.mock_object(zfs_driver.timeutils, 'utcnow') mock_utcnow.return_value.isoformat.return_value = 'some_time' mock_delete_snapshot = self.mock_object( self.driver, '_delete_dataset_or_snapshot_with_retry') result = self.driver.promote_replica( 'fake_context', replica_list, replica, access_rules) expected = [ {'access_rules_status': zfs_driver.constants.SHARE_INSTANCE_RULES_SYNCING, 'id': 'fake_active_replica_id', 'replica_state': zfs_driver.constants.REPLICA_STATE_OUT_OF_SYNC}, {'access_rules_status': zfs_driver.constants.STATUS_ACTIVE, 'id': 'fake_first_replica_id', 'replica_state': zfs_driver.constants.REPLICA_STATE_ACTIVE}, {'access_rules_status': zfs_driver.constants.SHARE_INSTANCE_RULES_SYNCING, 'id': 'fake_second_replica_id'}, {'access_rules_status': zfs_driver.constants.SHARE_INSTANCE_RULES_SYNCING, 'id': 'fake_third_replica_id', 'replica_state': zfs_driver.constants.REPLICA_STATE_OUT_OF_SYNC}, ] for repl in expected: self.assertIn(repl, result) self.assertEqual(4, len(result)) mock_helper.assert_called_once_with('NFS') mock_helper.return_value.update_access.assert_called_once_with( dst_dataset_name, access_rules, add_rules=[], delete_rules=[]) self.driver.zfs.assert_has_calls([ mock.call('snapshot', dst_dataset_name + '@' + snap_tag_prefix), mock.call('set', 'readonly=off', dst_dataset_name), ]) self.assertEqual(0, mock_delete_snapshot.call_count) for repl in (second_replica, replica): self.assertEqual( snap_tag_prefix, self.driver.private_storage.get( repl['id'], 'repl_snapshot_tag')) for repl in (active_replica, third_replica): self.assertEqual( old_repl_snapshot_tag, self.driver.private_storage.get( repl['id'], 'repl_snapshot_tag')) def test_create_replicated_snapshot(self): active_replica = { 'id': 'fake_active_replica_id', 'replica_state': zfs_driver.constants.REPLICA_STATE_ACTIVE, } replica = { 'id': 'fake_first_replica_id', 'replica_state': zfs_driver.constants.REPLICA_STATE_IN_SYNC, } second_replica = { 'id': 'fake_second_replica_id', 'replica_state': zfs_driver.constants.REPLICA_STATE_IN_SYNC, } replica_list = [replica, active_replica, second_replica] snapshot_instances = [ {'id': 'si_%s' % r['id'], 'share_instance_id': r['id'], 'snapshot_id': 'some_snapshot_id'} for r in replica_list ] src_dataset_name = ( 'bar/subbar/fake_dataset_name_prefix%s' % active_replica['id']) old_repl_snapshot_tag = ( self.driver._get_replication_snapshot_prefix( active_replica) + 'foo') self.driver.private_storage.update( active_replica['id'], {'dataset_name': src_dataset_name, 'ssh_cmd': 'fake_src_ssh_cmd', 'repl_snapshot_tag': old_repl_snapshot_tag} ) for repl in (replica, second_replica): self.driver.private_storage.update( repl['id'], {'dataset_name': ( 'bar/subbar/fake_dataset_name_prefix%s' % repl['id']), 'ssh_cmd': 'fake_dst_ssh_cmd', 'repl_snapshot_tag': old_repl_snapshot_tag} ) self.mock_object( self.driver, 'execute', mock.Mock(side_effect=[ ('a', 'b'), ('c', 'd'), ('e', 'f'), exception.ProcessExecutionError('Second replica sync failure'), ])) self.configuration.zfs_dataset_name_prefix = 'fake_dataset_name_prefix' self.configuration.zfs_dataset_snapshot_name_prefix = ( 'fake_dataset_snapshot_name_prefix') snap_tag_prefix = ( self.configuration.zfs_dataset_snapshot_name_prefix + 'si_%s' % active_replica['id']) repl_snap_tag = 'fake_repl_tag' self.mock_object( self.driver, '_get_replication_snapshot_tag', mock.Mock(return_value=repl_snap_tag)) result = self.driver.create_replicated_snapshot( 'fake_context', replica_list, snapshot_instances) expected = [ {'id': 'si_fake_active_replica_id', 'status': zfs_driver.constants.STATUS_AVAILABLE}, {'id': 'si_fake_first_replica_id', 'status': zfs_driver.constants.STATUS_AVAILABLE}, {'id': 'si_fake_second_replica_id', 'status': zfs_driver.constants.STATUS_ERROR}, ] for repl in expected: self.assertIn(repl, result) self.assertEqual(3, len(result)) for repl in (active_replica, replica): self.assertEqual( repl_snap_tag, self.driver.private_storage.get( repl['id'], 'repl_snapshot_tag')) self.assertEqual( old_repl_snapshot_tag, self.driver.private_storage.get( second_replica['id'], 'repl_snapshot_tag')) self.assertEqual( snap_tag_prefix, self.driver.private_storage.get( snapshot_instances[0]['snapshot_id'], 'snapshot_tag')) self.driver._get_replication_snapshot_tag.assert_called_once_with( active_replica) def test_delete_replicated_snapshot(self): active_replica = { 'id': 'fake_active_replica_id', 'replica_state': zfs_driver.constants.REPLICA_STATE_ACTIVE, } replica = { 'id': 'fake_first_replica_id', 'replica_state': zfs_driver.constants.REPLICA_STATE_IN_SYNC, } second_replica = { 'id': 'fake_second_replica_id', 'replica_state': zfs_driver.constants.REPLICA_STATE_IN_SYNC, } replica_list = [replica, active_replica, second_replica] active_snapshot_instance = { 'id': 'si_%s' % active_replica['id'], 'share_instance_id': active_replica['id'], 'snapshot_id': 'some_snapshot_id', 'share_id': 'some_share_id', } snapshot_instances = [ {'id': 'si_%s' % r['id'], 'share_instance_id': r['id'], 'snapshot_id': active_snapshot_instance['snapshot_id'], 'share_id': active_snapshot_instance['share_id']} for r in (replica, second_replica) ] snapshot_instances.append(active_snapshot_instance) for si in snapshot_instances: self.driver.private_storage.update( si['id'], {'snapshot_name': 'fake_snap_name_%s' % si['id']}) src_dataset_name = ( 'bar/subbar/fake_dataset_name_prefix%s' % active_replica['id']) old_repl_snapshot_tag = ( self.driver._get_replication_snapshot_prefix( active_replica) + 'foo') new_repl_snapshot_tag = 'foo_snapshot_tag' dataset_name = 'some_dataset_name' self.driver.private_storage.update( active_replica['id'], {'dataset_name': src_dataset_name, 'ssh_cmd': 'fake_src_ssh_cmd', 'repl_snapshot_tag': old_repl_snapshot_tag} ) for replica in (replica, second_replica): self.driver.private_storage.update( replica['id'], {'dataset_name': dataset_name, 'ssh_cmd': 'fake_ssh_cmd'} ) self.driver.private_storage.update( snapshot_instances[0]['snapshot_id'], {'snapshot_tag': new_repl_snapshot_tag} ) snap_name = 'fake_snap_name' self.mock_object( self.driver, 'zfs', mock.Mock(return_value=['out', 'err'])) self.mock_object( self.driver, 'execute', mock.Mock(side_effect=[ ('a', 'b'), ('c', 'd'), exception.ProcessExecutionError('Second replica sync failure'), ])) self.mock_object( self.driver, 'parse_zfs_answer', mock.Mock(side_effect=[ ({'NAME': 'foo'}, {'NAME': snap_name}), ({'NAME': 'bar'}, {'NAME': snap_name}), [], ])) expected = sorted([ {'id': si['id'], 'status': 'deleted'} for si in snapshot_instances ], key=lambda item: item['id']) self.assertEqual( new_repl_snapshot_tag, self.driver.private_storage.get( snapshot_instances[0]['snapshot_id'], 'snapshot_tag')) result = self.driver.delete_replicated_snapshot( 'fake_context', replica_list, snapshot_instances) self.assertIsNone( self.driver.private_storage.get( snapshot_instances[0]['snapshot_id'], 'snapshot_tag')) self.driver.execute.assert_has_calls([ mock.call('ssh', 'fake_ssh_cmd', 'sudo', 'zfs', 'list', '-r', '-t', 'snapshot', dataset_name + '@' + new_repl_snapshot_tag) for i in (0, 1) ]) self.assertIsInstance(result, list) self.assertEqual(3, len(result)) self.assertEqual(expected, sorted(result, key=lambda item: item['id'])) self.driver.parse_zfs_answer.assert_has_calls([ mock.call('out'), ]) @ddt.data( ({'NAME': 'fake'}, zfs_driver.constants.STATUS_ERROR), ({'NAME': 'fake_snap_name'}, zfs_driver.constants.STATUS_AVAILABLE), ) @ddt.unpack def test_update_replicated_snapshot(self, parse_answer, expected_status): snap_name = 'fake_snap_name' self.mock_object(self.driver, '_update_replica_state') self.mock_object( self.driver, '_get_saved_snapshot_name', mock.Mock(return_value=snap_name)) self.mock_object( self.driver, 'zfs', mock.Mock(side_effect=[('a', 'b')])) self.mock_object( self.driver, 'parse_zfs_answer', mock.Mock(side_effect=[ [parse_answer] ])) fake_context = 'fake_context' replica_list = ['foo', 'bar'] share_replica = 'quuz' snapshot_instance = {'id': 'fake_snapshot_instance_id'} snapshot_instances = ['q', 'w', 'e', 'r', 't', 'y'] result = self.driver.update_replicated_snapshot( fake_context, replica_list, share_replica, snapshot_instances, snapshot_instance) self.driver._update_replica_state.assert_called_once_with( fake_context, replica_list, share_replica) self.driver._get_saved_snapshot_name.assert_called_once_with( snapshot_instance) self.driver.zfs.assert_called_once_with( 'list', '-r', '-t', 'snapshot', snap_name) self.driver.parse_zfs_answer.assert_called_once_with('a') self.assertIsInstance(result, dict) self.assertEqual(2, len(result)) self.assertIn('status', result) self.assertIn('id', result) self.assertEqual(expected_status, result['status']) self.assertEqual(snapshot_instance['id'], result['id']) def test__get_shell_executor_by_host_local(self): backend_name = 'foobackend' host = 'foohost@%s#foopool' % backend_name CONF.set_default( 'enabled_share_backends', 'fake1,%s,fake2,fake3' % backend_name) self.assertIsNone(self.driver._shell_executors.get(backend_name)) result = self.driver._get_shell_executor_by_host(host) self.assertEqual(self.driver.execute, result) def test__get_shell_executor_by_host_remote(self): backend_name = 'foobackend' host = 'foohost@%s#foopool' % backend_name CONF.set_default('enabled_share_backends', 'fake1,fake2,fake3') mock_get_remote_shell_executor = self.mock_object( zfs_driver.zfs_utils, 'get_remote_shell_executor') mock_config = self.mock_object(zfs_driver, 'get_backend_configuration') self.assertIsNone(self.driver._shell_executors.get(backend_name)) for i in (1, 2): result = self.driver._get_shell_executor_by_host(host) self.assertEqual( mock_get_remote_shell_executor.return_value, result) mock_get_remote_shell_executor.assert_called_once_with( ip=mock_config.return_value.zfs_service_ip, port=22, conn_timeout=mock_config.return_value.ssh_conn_timeout, login=mock_config.return_value.zfs_ssh_username, password=mock_config.return_value.zfs_ssh_user_password, privatekey=mock_config.return_value.zfs_ssh_private_key_path, max_size=10, ) zfs_driver.get_backend_configuration.assert_called_once_with( backend_name) def test__get_migration_snapshot_tag(self): share_instance = {'id': 'fake-share_instance_id'} current_time = 'fake_current_time' mock_utcnow = self.mock_object(zfs_driver.timeutils, 'utcnow') mock_utcnow.return_value.isoformat.return_value = current_time expected_value = ( self.driver.migration_snapshot_prefix + '_fake_share_instance_id_time_' + current_time) result = self.driver._get_migration_snapshot_tag(share_instance) self.assertEqual(expected_value, result) def test_migration_check_compatibility(self): src_share = {'host': 'foohost@foobackend#foopool'} dst_backend_name = 'barbackend' dst_share = {'host': 'barhost@%s#barpool' % dst_backend_name} expected = { 'compatible': True, 'writable': False, 'preserve_metadata': True, 'nondisruptive': True, } self.mock_object( zfs_driver, 'get_backend_configuration', mock.Mock(return_value=type( 'FakeConfig', (object,), { 'share_driver': self.driver.configuration.share_driver}))) actual = self.driver.migration_check_compatibility( 'fake_context', src_share, dst_share) self.assertEqual(expected, actual) zfs_driver.get_backend_configuration.assert_called_once_with( dst_backend_name) def test_migration_start(self): username = self.driver.configuration.zfs_ssh_username hostname = self.driver.configuration.zfs_service_ip dst_username = username + '_dst' dst_hostname = hostname + '_dst' src_share = { 'id': 'fake_src_share_id', 'host': 'foohost@foobackend#foopool', } src_dataset_name = 'foo_dataset_name' dst_share = { 'id': 'fake_dst_share_id', 'host': 'barhost@barbackend#barpool', } dst_dataset_name = 'bar_dataset_name' snapshot_tag = 'fake_migration_snapshot_tag' self.mock_object( self.driver, '_get_dataset_name', mock.Mock(return_value=dst_dataset_name)) self.mock_object( self.driver, '_get_migration_snapshot_tag', mock.Mock(return_value=snapshot_tag)) self.mock_object( zfs_driver, 'get_backend_configuration', mock.Mock(return_value=type( 'FakeConfig', (object,), { 'zfs_ssh_username': dst_username, 'zfs_service_ip': dst_hostname, }))) self.mock_object(self.driver, 'execute') self.mock_object( zfs_driver.utils, 'tempdir', mock.MagicMock(side_effect=FakeTempDir)) self.driver.private_storage.update( src_share['id'], {'dataset_name': src_dataset_name, 'ssh_cmd': username + '@' + hostname}) src_snapshot_name = ( '%(dataset_name)s@%(snapshot_tag)s' % { 'snapshot_tag': snapshot_tag, 'dataset_name': src_dataset_name, } ) with mock.patch("builtins.open", mock.mock_open(read_data="data")) as mock_file: self.driver.migration_start( self._context, src_share, dst_share, None, None) expected_file_content = ( 'ssh %(ssh_cmd)s sudo zfs send -vDR %(snap)s | ' 'ssh %(dst_ssh_cmd)s sudo zfs receive -v %(dst_dataset)s' ) % { 'ssh_cmd': self.driver.private_storage.get( src_share['id'], 'ssh_cmd'), 'dst_ssh_cmd': self.driver.private_storage.get( dst_share['id'], 'ssh_cmd'), 'snap': src_snapshot_name, 'dst_dataset': dst_dataset_name, } mock_file.assert_called_with("/foo/path/bar_dataset_name.sh", "w") mock_file.return_value.write.assert_called_once_with( expected_file_content) self.driver.execute.assert_has_calls([ mock.call('sudo', 'zfs', 'snapshot', src_snapshot_name), mock.call('sudo', 'chmod', '755', mock.ANY), mock.call('nohup', mock.ANY, '&'), ]) self.driver._get_migration_snapshot_tag.assert_called_once_with( dst_share) self.driver._get_dataset_name.assert_called_once_with( dst_share) for k, v in (('dataset_name', dst_dataset_name), ('migr_snapshot_tag', snapshot_tag), ('pool_name', 'barpool'), ('ssh_cmd', dst_username + '@' + dst_hostname)): self.assertEqual( v, self.driver.private_storage.get(dst_share['id'], k)) def test_migration_continue_success(self): dst_share = { 'id': 'fake_dst_share_id', 'host': 'barhost@barbackend#barpool', } dst_dataset_name = 'bar_dataset_name' snapshot_tag = 'fake_migration_snapshot_tag' self.driver.private_storage.update( dst_share['id'], { 'migr_snapshot_tag': snapshot_tag, 'dataset_name': dst_dataset_name, }) mock_executor = self.mock_object( self.driver, '_get_shell_executor_by_host') self.mock_object( self.driver, 'execute', mock.Mock(return_value=('fake_out', 'fake_err'))) result = self.driver.migration_continue( self._context, 'fake_src_share', dst_share, None, None) self.assertTrue(result) mock_executor.assert_called_once_with(dst_share['host']) self.driver.execute.assert_has_calls([ mock.call('ps', 'aux'), mock.call('sudo', 'zfs', 'get', 'quota', dst_dataset_name, executor=mock_executor.return_value), ]) def test_migration_continue_pending(self): dst_share = { 'id': 'fake_dst_share_id', 'host': 'barhost@barbackend#barpool', } dst_dataset_name = 'bar_dataset_name' snapshot_tag = 'fake_migration_snapshot_tag' self.driver.private_storage.update( dst_share['id'], { 'migr_snapshot_tag': snapshot_tag, 'dataset_name': dst_dataset_name, }) mock_executor = self.mock_object( self.driver, '_get_shell_executor_by_host') self.mock_object( self.driver, 'execute', mock.Mock(return_value=('foo@%s' % snapshot_tag, 'fake_err'))) result = self.driver.migration_continue( self._context, 'fake_src_share', dst_share, None, None) self.assertIsNone(result) self.assertFalse(mock_executor.called) self.driver.execute.assert_called_once_with('ps', 'aux') def test_migration_continue_exception(self): dst_share = { 'id': 'fake_dst_share_id', 'host': 'barhost@barbackend#barpool', } dst_dataset_name = 'bar_dataset_name' snapshot_tag = 'fake_migration_snapshot_tag' self.driver.private_storage.update( dst_share['id'], { 'migr_snapshot_tag': snapshot_tag, 'dataset_name': dst_dataset_name, }) mock_executor = self.mock_object( self.driver, '_get_shell_executor_by_host') self.mock_object( self.driver, 'execute', mock.Mock(side_effect=[ ('fake_out', 'fake_err'), exception.ProcessExecutionError('fake'), ])) self.assertRaises( exception.ZFSonLinuxException, self.driver.migration_continue, self._context, 'fake_src_share', dst_share, None, None ) mock_executor.assert_called_once_with(dst_share['host']) self.driver.execute.assert_has_calls([ mock.call('ps', 'aux'), mock.call('sudo', 'zfs', 'get', 'quota', dst_dataset_name, executor=mock_executor.return_value), ]) def test_migration_complete(self): src_share = {'id': 'fake_src_share_id'} dst_share = { 'id': 'fake_dst_share_id', 'host': 'barhost@barbackend#barpool', 'share_proto': 'fake_share_proto', } dst_dataset_name = 'bar_dataset_name' snapshot_tag = 'fake_migration_snapshot_tag' self.driver.private_storage.update( dst_share['id'], { 'migr_snapshot_tag': snapshot_tag, 'dataset_name': dst_dataset_name, }) dst_snapshot_name = ( '%(dataset_name)s@%(snapshot_tag)s' % { 'snapshot_tag': snapshot_tag, 'dataset_name': dst_dataset_name, } ) mock_helper = self.mock_object(self.driver, '_get_share_helper') mock_executor = self.mock_object( self.driver, '_get_shell_executor_by_host') self.mock_object( self.driver, 'execute', mock.Mock(return_value=('fake_out', 'fake_err'))) self.mock_object(self.driver, 'delete_share') result = self.driver.migration_complete( self._context, src_share, dst_share, None, None) expected_result = { 'export_locations': (mock_helper.return_value. create_exports.return_value) } self.assertEqual(expected_result, result) mock_executor.assert_called_once_with(dst_share['host']) self.driver.execute.assert_called_once_with( 'sudo', 'zfs', 'destroy', dst_snapshot_name, executor=mock_executor.return_value, ) self.driver.delete_share.assert_called_once_with( self._context, src_share) mock_helper.assert_called_once_with(dst_share['share_proto']) mock_helper.return_value.create_exports.assert_called_once_with( dst_dataset_name, executor=self.driver._get_shell_executor_by_host.return_value) def test_migration_cancel_success(self): src_dataset_name = 'fake_src_dataset_name' src_share = { 'id': 'fake_src_share_id', 'dataset_name': src_dataset_name, } dst_share = { 'id': 'fake_dst_share_id', 'host': 'barhost@barbackend#barpool', 'share_proto': 'fake_share_proto', } dst_dataset_name = 'fake_dst_dataset_name' snapshot_tag = 'fake_migration_snapshot_tag' dst_ssh_cmd = 'fake_dst_ssh_cmd' self.driver.private_storage.update( src_share['id'], {'dataset_name': src_dataset_name}) self.driver.private_storage.update( dst_share['id'], { 'migr_snapshot_tag': snapshot_tag, 'dataset_name': dst_dataset_name, 'ssh_cmd': dst_ssh_cmd, }) mock_delete_dataset = self.mock_object( self.driver, '_delete_dataset_or_snapshot_with_retry') ps_output = ( "fake_line1\nfoo_user 12345 foo_dataset_name@%s\n" "fake_line2") % snapshot_tag self.mock_object( self.driver, 'execute', mock.Mock(return_value=(ps_output, 'fake_err')) ) self.driver.migration_cancel( self._context, src_share, dst_share, [], {}) self.driver.execute.assert_has_calls([ mock.call('ps', 'aux'), mock.call('sudo', 'kill', '-9', '12345'), mock.call('ssh', dst_ssh_cmd, 'sudo', 'zfs', 'destroy', '-r', dst_dataset_name), ]) zfs_driver.time.sleep.assert_called_once_with(2) mock_delete_dataset.assert_called_once_with( src_dataset_name + '@' + snapshot_tag) def test_migration_cancel_error(self): src_dataset_name = 'fake_src_dataset_name' src_share = { 'id': 'fake_src_share_id', 'dataset_name': src_dataset_name, } dst_share = { 'id': 'fake_dst_share_id', 'host': 'barhost@barbackend#barpool', 'share_proto': 'fake_share_proto', } dst_dataset_name = 'fake_dst_dataset_name' snapshot_tag = 'fake_migration_snapshot_tag' dst_ssh_cmd = 'fake_dst_ssh_cmd' self.driver.private_storage.update( src_share['id'], {'dataset_name': src_dataset_name}) self.driver.private_storage.update( dst_share['id'], { 'migr_snapshot_tag': snapshot_tag, 'dataset_name': dst_dataset_name, 'ssh_cmd': dst_ssh_cmd, }) mock_delete_dataset = self.mock_object( self.driver, '_delete_dataset_or_snapshot_with_retry') self.mock_object( self.driver, 'execute', mock.Mock(side_effect=exception.ProcessExecutionError), ) self.driver.migration_cancel( self._context, src_share, dst_share, [], {}) self.driver.execute.assert_has_calls([ mock.call('ps', 'aux'), mock.call('ssh', dst_ssh_cmd, 'sudo', 'zfs', 'destroy', '-r', dst_dataset_name), ]) zfs_driver.time.sleep.assert_called_once_with(2) mock_delete_dataset.assert_called_once_with( src_dataset_name + '@' + snapshot_tag) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/zfsonlinux/test_utils.py0000664000175000017500000004604600000000000025243 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Mirantis, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import time from unittest import mock import ddt from oslo_config import cfg from manila import exception from manila.share.drivers.ganesha import utils as ganesha_utils from manila.share.drivers.zfsonlinux import utils as zfs_utils from manila import test CONF = cfg.CONF def get_fake_configuration(*args, **kwargs): fake_config_options = { "zfs_use_ssh": kwargs.get("zfs_use_ssh", False), "zfs_share_export_ip": kwargs.get( "zfs_share_export_ip", "240.241.242.243"), "zfs_service_ip": kwargs.get("zfs_service_ip", "240.241.242.244"), "ssh_conn_timeout": kwargs.get("ssh_conn_timeout", 123), "zfs_ssh_username": kwargs.get( "zfs_ssh_username", 'fake_username'), "zfs_ssh_user_password": kwargs.get( "zfs_ssh_user_password", 'fake_pass'), "zfs_ssh_private_key_path": kwargs.get( "zfs_ssh_private_key_path", '/fake/path'), "append_config_values": mock.Mock(), } return type("FakeConfig", (object, ), fake_config_options) class FakeShareDriver(zfs_utils.ExecuteMixin): def __init__(self, *args, **kwargs): self.configuration = get_fake_configuration(*args, **kwargs) self.init_execute_mixin(*args, **kwargs) @ddt.ddt class ExecuteMixinTestCase(test.TestCase): def setUp(self): super(ExecuteMixinTestCase, self).setUp() self.ssh_executor = self.mock_object(ganesha_utils, 'SSHExecutor') self.driver = FakeShareDriver() def test_init(self): self.assertIsNone(self.driver.ssh_executor) self.assertEqual(0, self.ssh_executor.call_count) def test_init_ssh(self): driver = FakeShareDriver(zfs_use_ssh=True) self.assertIsNotNone(driver.ssh_executor) self.ssh_executor.assert_called_once_with( ip=driver.configuration.zfs_service_ip, port=22, conn_timeout=driver.configuration.ssh_conn_timeout, login=driver.configuration.zfs_ssh_username, password=driver.configuration.zfs_ssh_user_password, privatekey=driver.configuration.zfs_ssh_private_key_path, max_size=10, ) def test_execute_with_provided_executor(self): self.mock_object(self.driver, '_execute') fake_executor = mock.Mock() self.driver.execute('fake', '--foo', '--bar', executor=fake_executor) self.assertFalse(self.driver._execute.called) self.assertFalse(self.ssh_executor.called) fake_executor.assert_called_once_with('fake', '--foo', '--bar') def test_local_shell_execute(self): self.mock_object(self.driver, '_execute') self.driver.execute('fake', '--foo', '--bar') self.assertEqual(0, self.ssh_executor.call_count) self.driver._execute.assert_called_once_with( 'fake', '--foo', '--bar') def test_local_shell_execute_with_sudo(self): self.mock_object(self.driver, '_execute') self.driver.execute('sudo', 'fake', '--foo', '--bar') self.assertEqual(0, self.ssh_executor.call_count) self.driver._execute.assert_called_once_with( 'fake', '--foo', '--bar', run_as_root=True) def test_ssh_execute(self): driver = FakeShareDriver(zfs_use_ssh=True) self.mock_object(driver, '_execute') driver.execute('fake', '--foo', '--bar') self.assertEqual(0, driver._execute.call_count) self.ssh_executor.return_value.assert_called_once_with( 'fake', '--foo', '--bar') def test_ssh_execute_with_sudo(self): driver = FakeShareDriver(zfs_use_ssh=True) self.mock_object(driver, '_execute') driver.execute('sudo', 'fake', '--foo', '--bar') self.assertEqual(0, driver._execute.call_count) self.ssh_executor.return_value.assert_called_once_with( 'fake', '--foo', '--bar', run_as_root=True) def test_execute_with_retry(self): self.mock_object(time, 'sleep') self.mock_object(self.driver, 'execute', mock.Mock( side_effect=[exception.ProcessExecutionError('FAKE'), None])) self.driver.execute_with_retry('foo', 'bar') self.assertEqual(2, self.driver.execute.call_count) self.driver.execute.assert_has_calls( [mock.call('foo', 'bar'), mock.call('foo', 'bar')]) def test_execute_with_retry_exceeded(self): self.mock_object(time, 'sleep') self.mock_object(self.driver, 'execute', mock.Mock( side_effect=exception.ProcessExecutionError('FAKE'))) self.assertRaises( exception.ProcessExecutionError, self.driver.execute_with_retry, 'foo', 'bar', ) self.assertEqual(36, self.driver.execute.call_count) @ddt.data(True, False) def test__get_option(self, pool_level): out = """NAME PROPERTY VALUE SOURCE\n foo_resource_name bar_option_name some_value local""" self.mock_object( self.driver, '_execute', mock.Mock(return_value=(out, ''))) res_name = 'foo_resource_name' opt_name = 'bar_option_name' result = self.driver._get_option( res_name, opt_name, pool_level=pool_level) self.assertEqual('some_value', result) self.driver._execute.assert_called_once_with( 'zpool' if pool_level else 'zfs', 'get', opt_name, res_name, run_as_root=True) def test_parse_zfs_answer(self): not_parsed_str = '' not_parsed_str = """NAME PROPERTY VALUE SOURCE\n foo_res opt_1 bar local foo_res opt_2 foo default foo_res opt_3 some_value local""" expected = [ {'NAME': 'foo_res', 'PROPERTY': 'opt_1', 'VALUE': 'bar', 'SOURCE': 'local'}, {'NAME': 'foo_res', 'PROPERTY': 'opt_2', 'VALUE': 'foo', 'SOURCE': 'default'}, {'NAME': 'foo_res', 'PROPERTY': 'opt_3', 'VALUE': 'some_value', 'SOURCE': 'local'}, ] result = self.driver.parse_zfs_answer(not_parsed_str) self.assertEqual(expected, result) def test_parse_zfs_answer_empty(self): result = self.driver.parse_zfs_answer('') self.assertEqual([], result) def test_get_zpool_option(self): self.mock_object(self.driver, '_get_option') zpool_name = 'foo_resource_name' opt_name = 'bar_option_name' result = self.driver.get_zpool_option(zpool_name, opt_name) self.assertEqual(self.driver._get_option.return_value, result) self.driver._get_option.assert_called_once_with( zpool_name, opt_name, True) def test_get_zfs_option(self): self.mock_object(self.driver, '_get_option') dataset_name = 'foo_resource_name' opt_name = 'bar_option_name' result = self.driver.get_zfs_option(dataset_name, opt_name) self.assertEqual(self.driver._get_option.return_value, result) self.driver._get_option.assert_called_once_with( dataset_name, opt_name, False) def test_zfs(self): self.mock_object(self.driver, 'execute') self.mock_object(self.driver, 'execute_with_retry') self.driver.zfs('foo', 'bar') self.assertEqual(0, self.driver.execute_with_retry.call_count) self.driver.execute.assert_called_once_with( 'sudo', 'zfs', 'foo', 'bar') def test_zfs_with_retry(self): self.mock_object(self.driver, 'execute') self.mock_object(self.driver, 'execute_with_retry') self.driver.zfs_with_retry('foo', 'bar') self.assertEqual(0, self.driver.execute.call_count) self.driver.execute_with_retry.assert_called_once_with( 'sudo', 'zfs', 'foo', 'bar') @ddt.ddt class NFSviaZFSHelperTestCase(test.TestCase): def setUp(self): super(NFSviaZFSHelperTestCase, self).setUp() configuration = get_fake_configuration() self.out = "fake_out" self.mock_object( zfs_utils.utils, "execute", mock.Mock(return_value=(self.out, ""))) self.helper = zfs_utils.NFSviaZFSHelper(configuration) def test_init(self): zfs_utils.utils.execute.assert_has_calls([ mock.call("which", "exportfs"), mock.call("exportfs", run_as_root=True), ]) def test_verify_setup_exportfs_not_installed(self): zfs_utils.utils.execute.reset_mock() zfs_utils.utils.execute.side_effect = [('', '')] self.assertRaises( exception.ZFSonLinuxException, self.helper.verify_setup) zfs_utils.utils.execute.assert_called_once_with("which", "exportfs") def test_verify_setup_error_calling_exportfs(self): zfs_utils.utils.execute.reset_mock() zfs_utils.utils.execute.side_effect = [ ('fake_out', ''), exception.ProcessExecutionError('Fake')] self.assertRaises( exception.ProcessExecutionError, self.helper.verify_setup) zfs_utils.utils.execute.assert_has_calls([ mock.call("which", "exportfs"), mock.call("exportfs", run_as_root=True), ]) def test_is_kernel_version_true(self): delattr(self.helper, '_is_kernel_version') zfs_utils.utils.execute.reset_mock() self.assertTrue(self.helper.is_kernel_version) zfs_utils.utils.execute.assert_has_calls([ mock.call("modinfo", "zfs"), ]) def test_is_kernel_version_false(self): delattr(self.helper, '_is_kernel_version') zfs_utils.utils.execute.reset_mock() zfs_utils.utils.execute.side_effect = ( exception.ProcessExecutionError('Fake')) self.assertFalse(self.helper.is_kernel_version) zfs_utils.utils.execute.assert_has_calls([ mock.call("modinfo", "zfs"), ]) def test_is_kernel_version_second_call(self): delattr(self.helper, '_is_kernel_version') zfs_utils.utils.execute.reset_mock() self.assertTrue(self.helper.is_kernel_version) self.assertTrue(self.helper.is_kernel_version) zfs_utils.utils.execute.assert_has_calls([ mock.call("modinfo", "zfs"), ]) def test_create_exports(self): self.mock_object(self.helper, 'get_exports') result = self.helper.create_exports('foo') self.assertEqual( self.helper.get_exports.return_value, result) def test_get_exports(self): self.mock_object( self.helper, 'get_zfs_option', mock.Mock(return_value='fake_mp')) expected = [ { "path": "%s:fake_mp" % ip, "metadata": {}, "is_admin_only": is_admin_only, } for ip, is_admin_only in ( (self.helper.configuration.zfs_share_export_ip, False), (self.helper.configuration.zfs_service_ip, True)) ] result = self.helper.get_exports('foo') self.assertEqual(expected, result) self.helper.get_zfs_option.assert_called_once_with( 'foo', 'mountpoint', executor=None) def test_remove_exports(self): zfs_utils.utils.execute.reset_mock() self.mock_object( self.helper, 'get_zfs_option', mock.Mock(return_value='bar')) self.helper.remove_exports('foo') self.helper.get_zfs_option.assert_called_once_with( 'foo', 'sharenfs', executor=None) zfs_utils.utils.execute.assert_called_once_with( 'zfs', 'set', 'sharenfs=off', 'foo', run_as_root=True) def test_remove_exports_that_absent(self): zfs_utils.utils.execute.reset_mock() self.mock_object( self.helper, 'get_zfs_option', mock.Mock(return_value='off')) self.helper.remove_exports('foo') self.helper.get_zfs_option.assert_called_once_with( 'foo', 'sharenfs', executor=None) self.assertEqual(0, zfs_utils.utils.execute.call_count) @ddt.data( (('fake_modinfo_result', ''), ('sharenfs=rw=1.1.1.1:3.3.3.0/255.255.255.0,no_root_squash,' 'ro=2.2.2.2,no_root_squash'), False), (('fake_modinfo_result', ''), ('sharenfs=ro=1.1.1.1:2.2.2.2:3.3.3.0/255.255.255.0,no_root_squash'), True), (exception.ProcessExecutionError('Fake'), ('sharenfs=1.1.1.1:rw,no_root_squash 3.3.3.0/255.255.255.0:rw,' 'no_root_squash 2.2.2.2:ro,no_root_squash'), False), (exception.ProcessExecutionError('Fake'), ('sharenfs=1.1.1.1:ro,no_root_squash 2.2.2.2:ro,' 'no_root_squash 3.3.3.0/255.255.255.0:ro,no_root_squash'), True), ) @ddt.unpack def test_update_access_rw_and_ro(self, modinfo_response, access_str, make_all_ro): delattr(self.helper, '_is_kernel_version') zfs_utils.utils.execute.reset_mock() dataset_name = 'zpoolz/foo_dataset_name/fake' zfs_utils.utils.execute.side_effect = [ modinfo_response, ("""NAME USED AVAIL REFER MOUNTPOINT\n %(dn)s 2.58M 14.8G 27.5K /%(dn)s\n %(dn)s_some_other 3.58M 15.8G 28.5K /%(dn)s\n """ % {'dn': dataset_name}, ''), ('fake_set_opt_result', ''), ("""NAME PROPERTY VALUE SOURCE\n %s mountpoint /%s default\n """ % (dataset_name, dataset_name), ''), ('fake_1_result', ''), ('fake_2_result', ''), ('fake_3_result', ''), ('fake_4_result', ''), ('fake_5_result', ''), ] access_rules = [ {'access_type': 'ip', 'access_level': 'rw', 'access_to': '1.1.1.1'}, {'access_type': 'ip', 'access_level': 'ro', 'access_to': '2.2.2.2'}, {'access_type': 'ip', 'access_level': 'rw', 'access_to': '3.3.3.0/24'}, ] delete_rules = [ {'access_type': 'ip', 'access_level': 'rw', 'access_to': '4.4.4.4'}, {'access_type': 'ip', 'access_level': 'ro', 'access_to': '5.5.5.5/32'}, {'access_type': 'ip', 'access_level': 'ro', 'access_to': '5.5.5.6/16'}, {'access_type': 'ip', 'access_level': 'ro', 'access_to': '5.5.5.7/0'}, {'access_type': 'user', 'access_level': 'rw', 'access_to': '6.6.6.6'}, {'access_type': 'user', 'access_level': 'ro', 'access_to': '7.7.7.7'}, ] self.helper.update_access( dataset_name, access_rules, [], delete_rules, make_all_ro=make_all_ro) zfs_utils.utils.execute.assert_has_calls([ mock.call('modinfo', 'zfs'), mock.call('zfs', 'list', '-r', 'zpoolz', run_as_root=True), mock.call( 'zfs', 'set', access_str, dataset_name, run_as_root=True), mock.call( 'zfs', 'get', 'mountpoint', dataset_name, run_as_root=True), mock.call( 'exportfs', '-u', '4.4.4.4:/%s' % dataset_name, run_as_root=True), mock.call( 'exportfs', '-u', '5.5.5.5:/%s' % dataset_name, run_as_root=True), mock.call( 'exportfs', '-u', '5.5.5.6/255.255.0.0:/%s' % dataset_name, run_as_root=True), mock.call( 'exportfs', '-u', '5.5.5.7/0.0.0.0:/%s' % dataset_name, run_as_root=True), ]) def test_update_access_dataset_not_found(self): self.mock_object(zfs_utils.LOG, 'warning') zfs_utils.utils.execute.reset_mock() dataset_name = 'zpoolz/foo_dataset_name/fake' zfs_utils.utils.execute.side_effect = [ ('fake_modinfo_result', ''), ('fake_dataset_not_found_result', ''), ('fake_set_opt_result', ''), ] access_rules = [ {'access_type': 'ip', 'access_level': 'rw', 'access_to': '1.1.1.1'}, {'access_type': 'ip', 'access_level': 'ro', 'access_to': '1.1.1.2'}, ] self.helper.update_access(dataset_name, access_rules, [], []) zfs_utils.utils.execute.assert_has_calls([ mock.call('zfs', 'list', '-r', 'zpoolz', run_as_root=True), ]) zfs_utils.LOG.warning.assert_called_once_with( mock.ANY, {'name': dataset_name}) @ddt.data(exception.ProcessExecutionError('Fake'), ('Ok', '')) def test_update_access_no_rules(self, first_execute_result): zfs_utils.utils.execute.reset_mock() dataset_name = 'zpoolz/foo_dataset_name/fake' zfs_utils.utils.execute.side_effect = [ ("""NAME USED AVAIL REFER MOUNTPOINT\n %s 2.58M 14.8G 27.5K /%s\n """ % (dataset_name, dataset_name), ''), ('fake_set_opt_result', ''), ] self.helper.update_access(dataset_name, [], [], []) zfs_utils.utils.execute.assert_has_calls([ mock.call('zfs', 'list', '-r', 'zpoolz', run_as_root=True), mock.call('zfs', 'set', 'sharenfs=off', dataset_name, run_as_root=True), ]) @ddt.data('user', 'cert', 'cephx', '', 'fake', 'i', 'p') def test_update_access_not_ip_access_type(self, access_type): zfs_utils.utils.execute.reset_mock() dataset_name = 'zpoolz/foo_dataset_name/fake' access_rules = [ {'access_type': access_type, 'access_level': 'rw', 'access_to': '1.1.1.1'}, {'access_type': 'ip', 'access_level': 'ro', 'access_to': '1.1.1.2'}, ] self.assertRaises( exception.InvalidShareAccess, self.helper.update_access, dataset_name, access_rules, access_rules, [], ) self.assertEqual(0, zfs_utils.utils.execute.call_count) @ddt.data('', 'r', 'o', 'w', 'fake', 'su') def test_update_access_neither_rw_nor_ro_access_level(self, access_level): zfs_utils.utils.execute.reset_mock() dataset_name = 'zpoolz/foo_dataset_name/fake' access_rules = [ {'access_type': 'ip', 'access_level': access_level, 'access_to': '1.1.1.1'}, {'access_type': 'ip', 'access_level': 'ro', 'access_to': '1.1.1.2'}, ] self.assertRaises( exception.InvalidShareAccess, self.helper.update_access, dataset_name, access_rules, access_rules, [], ) self.assertEqual(0, zfs_utils.utils.execute.call_count) ././@PaxHeader0000000000000000000000000000003200000000000011450 xustar000000000000000026 mtime=1759315602.04167 manila-21.0.0/manila/tests/share/drivers/zfssa/0000775000175000017500000000000000000000000021367 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/zfssa/__init__.py0000664000175000017500000000000000000000000023466 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/zfssa/test_zfssarest.py0000664000175000017500000004374000000000000025034 0ustar00zuulzuul00000000000000# Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Unit tests for Oracle's ZFSSA REST API. """ from unittest import mock from manila import exception from manila.share.drivers.zfssa import restclient from manila.share.drivers.zfssa import zfssarest from manila import test from manila.tests import fake_zfssa class ZFSSAApiTestCase(test.TestCase): """Tests ZFSSAApi.""" @mock.patch.object(zfssarest, 'factory_restclient') def setUp(self, _restclient): super(ZFSSAApiTestCase, self).setUp() self.host = 'fakehost' self.user = 'fakeuser' self.url = None self.pool = 'fakepool' self.project = 'fakeproject' self.share = 'fakeshare' self.snap = 'fakesnapshot' _restclient.return_value = fake_zfssa.FakeRestClient() self._zfssa = zfssarest.ZFSSAApi() self._zfssa.set_host('fakehost') self.schema = { 'property': 'manila_managed', 'description': 'Managed by Manila', 'type': 'Boolean', } def _create_response(self, status): response = fake_zfssa.FakeResponse(status) return response def test_enable_service(self): self.mock_object(self._zfssa.rclient, 'put') self._zfssa.rclient.put.return_value = self._create_response( restclient.Status.ACCEPTED) self._zfssa.enable_service('nfs') self.assertEqual(1, self._zfssa.rclient.put.call_count) self._zfssa.rclient.put.return_value = self._create_response( restclient.Status.OK) self.assertRaises(exception.ShareBackendException, self._zfssa.enable_service, 'nfs') def test_verify_avail_space(self): self.mock_object(self._zfssa, 'verify_project') self.mock_object(self._zfssa, 'get_project_stats') self._zfssa.get_project_stats.return_value = 2000 self._zfssa.verify_avail_space(self.pool, self.project, self.share, 1000) self.assertEqual(1, self._zfssa.verify_project.call_count) self.assertEqual(1, self._zfssa.get_project_stats.call_count) self._zfssa.verify_project.assert_called_with(self.pool, self.project) self._zfssa.get_project_stats.assert_called_with(self.pool, self.project) self._zfssa.get_project_stats.return_value = 900 self.assertRaises(exception.ShareBackendException, self._zfssa.verify_avail_space, self.pool, self.project, self.share, 1000) def test_create_project(self): self.mock_object(self._zfssa, 'verify_pool') self.mock_object(self._zfssa.rclient, 'get') self.mock_object(self._zfssa.rclient, 'post') arg = { 'name': self.project, 'sharesmb': 'off', 'sharenfs': 'off', 'mountpoint': 'fakemnpt', } self._zfssa.rclient.get.return_value = self._create_response( restclient.Status.NOT_FOUND) self._zfssa.rclient.post.return_value = self._create_response( restclient.Status.CREATED) self._zfssa.create_project(self.pool, self.project, arg) self.assertEqual(1, self._zfssa.rclient.get.call_count) self.assertEqual(1, self._zfssa.rclient.post.call_count) self.assertEqual(1, self._zfssa.verify_pool.call_count) self._zfssa.verify_pool.assert_called_with(self.pool) self._zfssa.rclient.post.return_value = self._create_response( restclient.Status.NOT_FOUND) self.assertRaises(exception.ShareBackendException, self._zfssa.create_project, self.pool, self.project, arg) def test_create_share(self): self.mock_object(self._zfssa, 'verify_avail_space') self.mock_object(self._zfssa.rclient, 'get') self.mock_object(self._zfssa.rclient, 'post') self._zfssa.rclient.get.return_value = self._create_response( restclient.Status.NOT_FOUND) self._zfssa.rclient.post.return_value = self._create_response( restclient.Status.CREATED) arg = { "name": self.share, "quota": 1, } self._zfssa.create_share(self.pool, self.project, arg) self.assertEqual(1, self._zfssa.rclient.get.call_count) self.assertEqual(1, self._zfssa.rclient.post.call_count) self.assertEqual(1, self._zfssa.verify_avail_space.call_count) self._zfssa.verify_avail_space.assert_called_with(self.pool, self.project, arg, arg['quota']) self._zfssa.rclient.post.return_value = self._create_response( restclient.Status.NOT_FOUND) self.assertRaises(exception.ShareBackendException, self._zfssa.create_share, self.pool, self.project, arg) self._zfssa.rclient.get.return_value = self._create_response( restclient.Status.OK) self.assertRaises(exception.ShareBackendException, self._zfssa.create_share, self.pool, self.project, arg) def test_modify_share(self): self.mock_object(self._zfssa.rclient, 'put') self._zfssa.rclient.put.return_value = self._create_response( restclient.Status.ACCEPTED) arg = {"name": "dummyname"} svc = self._zfssa.share_path % (self.pool, self.project, self.share) self._zfssa.modify_share(self.pool, self.project, self.share, arg) self.assertEqual(1, self._zfssa.rclient.put.call_count) self._zfssa.rclient.put.assert_called_with(svc, arg) self._zfssa.rclient.put.return_value = self._create_response( restclient.Status.BAD_REQUEST) self.assertRaises(exception.ShareBackendException, self._zfssa.modify_share, self.pool, self.project, self.share, arg) def test_delete_share(self): self.mock_object(self._zfssa.rclient, 'delete') self._zfssa.rclient.delete.return_value = self._create_response( restclient.Status.NO_CONTENT) svc = self._zfssa.share_path % (self.pool, self.project, self.share) self._zfssa.delete_share(self.pool, self.project, self.share) self.assertEqual(1, self._zfssa.rclient.delete.call_count) self._zfssa.rclient.delete.assert_called_with(svc) def test_create_snapshot(self): self.mock_object(self._zfssa.rclient, 'post') self._zfssa.rclient.post.return_value = self._create_response( restclient.Status.CREATED) arg = {"name": self.snap} svc = self._zfssa.snapshots_path % (self.pool, self.project, self.share) self._zfssa.create_snapshot(self.pool, self.project, self.share, self.snap) self.assertEqual(1, self._zfssa.rclient.post.call_count) self._zfssa.rclient.post.assert_called_with(svc, arg) self._zfssa.rclient.post.return_value = self._create_response( restclient.Status.BAD_REQUEST) self.assertRaises(exception.ShareBackendException, self._zfssa.create_snapshot, self.pool, self.project, self.share, self.snap) def test_delete_snapshot(self): self.mock_object(self._zfssa.rclient, 'delete') self._zfssa.rclient.delete.return_value = self._create_response( restclient.Status.NO_CONTENT) svc = self._zfssa.snapshot_path % (self.pool, self.project, self.share, self.snap) self._zfssa.delete_snapshot(self.pool, self.project, self.share, self.snap) self.assertEqual(1, self._zfssa.rclient.delete.call_count) self._zfssa.rclient.delete.assert_called_with(svc) self._zfssa.rclient.delete.return_value = self._create_response( restclient.Status.BAD_REQUEST) self.assertRaises(exception.ShareBackendException, self._zfssa.delete_snapshot, self.pool, self.project, self.share, self.snap) def test_clone_snapshot(self): self.mock_object(self._zfssa, 'verify_avail_space') self.mock_object(self._zfssa.rclient, 'put') self._zfssa.rclient.put.return_value = self._create_response( restclient.Status.CREATED) snapshot = { "id": self.snap, "share_id": self.share, } clone = { "id": "cloneid", "size": 1, } arg = { "name": "dummyname", "quota": 1, } self._zfssa.clone_snapshot(self.pool, self.project, snapshot, clone, arg) self.assertEqual(1, self._zfssa.rclient.put.call_count) self.assertEqual(1, self._zfssa.verify_avail_space.call_count) self._zfssa.verify_avail_space.assert_called_with(self.pool, self.project, clone['id'], clone['size']) self._zfssa.rclient.put.return_value = self._create_response( restclient.Status.NOT_FOUND) self.assertRaises(exception.ShareBackendException, self._zfssa.clone_snapshot, self.pool, self.project, snapshot, clone, arg) def _create_entry(self, sharenfs, ip): if sharenfs == 'off': sharenfs = 'sec=sys' entry = (',rw=@%s' % ip) if '/' not in ip: entry = entry + '/32' arg = {'sharenfs': sharenfs + entry} return arg def test_allow_access_nfs(self): self.mock_object(self._zfssa, 'get_share') self.mock_object(self._zfssa, 'modify_share') details = {"sharenfs": "off"} access = { "access_type": "nonip", "access_to": "foo", } # invalid access type self.assertRaises(exception.InvalidShareAccess, self._zfssa.allow_access_nfs, self.pool, self.project, self.share, access) # valid entry access.update({"access_type": "ip"}) arg = self._create_entry("off", access['access_to']) self._zfssa.get_share.return_value = details self._zfssa.allow_access_nfs(self.pool, self.project, self.share, access) self.assertEqual(1, self._zfssa.get_share.call_count) self.assertEqual(1, self._zfssa.modify_share.call_count) self._zfssa.get_share.assert_called_with(self.pool, self.project, self.share) self._zfssa.modify_share.assert_called_with(self.pool, self.project, self.share, arg) # add another entry access.update({"access_to": "10.0.0.1/24"}) arg = self._create_entry("off", access['access_to']) self._zfssa.allow_access_nfs(self.pool, self.project, self.share, access) self.assertEqual(2, self._zfssa.modify_share.call_count) self._zfssa.modify_share.assert_called_with(self.pool, self.project, self.share, arg) # verify modify_share is not called if sharenfs='on' details = {"sharenfs": "on"} self._zfssa.get_share.return_value = details self._zfssa.allow_access_nfs(self.pool, self.project, self.share, access) self.assertEqual(2, self._zfssa.modify_share.call_count) # verify modify_share is not called if ip is already in the list access.update({"access_to": "10.0.0.1/24"}) details = self._create_entry("off", access['access_to']) self._zfssa.get_share.return_value = details self._zfssa.allow_access_nfs(self.pool, self.project, self.share, access) self.assertEqual(2, self._zfssa.modify_share.call_count) def test_deny_access_nfs(self): self.mock_object(self._zfssa, 'get_share') self.mock_object(self._zfssa, 'modify_share') data1 = self._create_entry("off", "10.0.0.1") access = { "access_type": "nonip", "access_to": "foo", } # invalid access_type self.assertRaises(exception.InvalidShareAccess, self._zfssa.deny_access_nfs, self.pool, self.project, self.share, access) # valid entry access.update({"access_type": "ip"}) self._zfssa.get_share.return_value = data1 self._zfssa.deny_access_nfs(self.pool, self.project, self.share, access) self.assertEqual(1, self._zfssa.get_share.call_count) self.assertEqual(0, self._zfssa.modify_share.call_count) self._zfssa.get_share.assert_called_with(self.pool, self.project, self.share) # another valid entry data1 = self._create_entry(data1['sharenfs'], '10.0.0.2/24') data2 = self._create_entry(data1['sharenfs'], access['access_to']) self._zfssa.get_share.return_value = data2 self._zfssa.deny_access_nfs(self.pool, self.project, self.share, access) self.assertEqual(2, self._zfssa.get_share.call_count) self.assertEqual(1, self._zfssa.modify_share.call_count) self._zfssa.get_share.assert_called_with(self.pool, self.project, self.share) self._zfssa.modify_share.assert_called_with(self.pool, self.project, self.share, data1) def test_create_schema_negative(self): self.mock_object(self._zfssa.rclient, 'get') self.mock_object(self._zfssa.rclient, 'post') self._zfssa.rclient.post.return_value = self._create_response( restclient.Status.NOT_FOUND) self.assertRaises(exception.ShareBackendException, self._zfssa.create_schema, self.schema) def test_create_schema_property_exists(self): self.mock_object(self._zfssa.rclient, 'get') self.mock_object(self._zfssa.rclient, 'post') self._zfssa.rclient.get.return_value = self._create_response( restclient.Status.OK) self._zfssa.create_schema(self.schema) self.assertEqual(1, self._zfssa.rclient.get.call_count) self.assertEqual(0, self._zfssa.rclient.post.call_count) def test_create_schema(self): self.mock_object(self._zfssa.rclient, 'get') self.mock_object(self._zfssa.rclient, 'post') self._zfssa.rclient.get.return_value = self._create_response( restclient.Status.NOT_FOUND) self._zfssa.rclient.post.return_value = self._create_response( restclient.Status.CREATED) self._zfssa.create_schema(self.schema) self.assertEqual(1, self._zfssa.rclient.get.call_count) self.assertEqual(1, self._zfssa.rclient.post.call_count) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/drivers/zfssa/test_zfssashare.py0000664000175000017500000004735100000000000025163 0ustar00zuulzuul00000000000000# Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Unit tests for Oracle's ZFSSA Manila driver. """ from unittest import mock from oslo_config import cfg from oslo_utils import units from manila import context from manila import exception from manila.share import configuration as conf from manila.share.drivers.zfssa import zfssashare from manila import test from manila.tests import fake_zfssa CONF = cfg.CONF class ZFSSAShareDriverTestCase(test.TestCase): """Tests ZFSSAShareDriver.""" share = { 'id': 'fakeid', 'name': 'fakename', 'size': 1, 'share_proto': 'NFS', 'export_location': '/mnt/nfs/volume-00002', } share2 = { 'id': 'fakeid2', 'name': 'fakename2', 'size': 4, 'share_proto': 'CIFS', 'export_location': '/mnt/nfs/volume-00003', 'space_data': 3006477107 } snapshot = { 'id': 'fakesnapshotid', 'share_name': 'fakename', 'share_id': 'fakeid', 'name': 'fakesnapshotname', 'share_size': 1, 'share_proto': 'NFS', } access = { 'id': 'fakeaccid', 'access_type': 'ip', 'access_to': '10.0.0.2', 'state': 'active', } @mock.patch.object(zfssashare, 'factory_zfssa') def setUp(self, _factory_zfssa): super(ZFSSAShareDriverTestCase, self).setUp() self._create_fake_config() lcfg = self.configuration self.mountpoint = '/export/' + lcfg.zfssa_nas_mountpoint _factory_zfssa.return_value = fake_zfssa.FakeZFSSA() _factory_zfssa.set_host(lcfg.zfssa_host) _factory_zfssa.login(lcfg.zfssa_auth_user) self._context = context.get_admin_context() self._driver = zfssashare.ZFSSAShareDriver(False, configuration=lcfg) self._driver.do_setup(self._context) self.fake_proto_share = { 'id': self.share['id'], 'share_proto': 'fake_proto', 'export_locations': [{'path': self.share['export_location']}], } self.test_share = { 'id': self.share['id'], 'share_proto': 'NFS', 'export_locations': [{'path': self.share['export_location']}], } self.test_share2 = { 'id': self.share2['id'], 'share_proto': 'CIFS', 'export_locations': [{'path': self.share2['export_location']}], } self.driver_options = {'zfssa_name': self.share['name']} def _create_fake_config(self): def _safe_get(opt): return getattr(self.configuration, opt) self.configuration = mock.Mock(spec=conf.Configuration) self.configuration.safe_get = mock.Mock(side_effect=_safe_get) self.configuration.zfssa_host = '1.1.1.1' self.configuration.zfssa_data_ip = '1.1.1.1' self.configuration.zfssa_auth_user = 'user' self.configuration.zfssa_auth_password = 'passwd' self.configuration.zfssa_pool = 'pool' self.configuration.zfssa_project = 'project' self.configuration.zfssa_nas_mountpoint = 'project' self.configuration.zfssa_nas_checksum = 'fletcher4' self.configuration.zfssa_nas_logbias = 'latency' self.configuration.zfssa_nas_compression = 'off' self.configuration.zfssa_nas_vscan = 'false' self.configuration.zfssa_nas_rstchown = 'true' self.configuration.zfssa_nas_quota_snap = 'true' self.configuration.zfssa_rest_timeout = 60 self.configuration.network_config_group = 'fake_network_config_group' self.configuration.admin_network_config_group = ( 'fake_admin_network_config_group') self.configuration.driver_handles_share_servers = False self.configuration.zfssa_manage_policy = 'strict' def test_create_share(self): self.mock_object(self._driver.zfssa, 'create_share') self.mock_object(self._driver, '_export_location') lcfg = self.configuration arg = { 'host': lcfg.zfssa_data_ip, 'mountpoint': self.mountpoint, 'name': self.share['id'], } location = ("%(host)s:%(mountpoint)s/%(name)s" % arg) self._driver._export_location.return_value = location arg = self._driver.create_arg(self.share['size']) arg.update(self._driver.default_args) arg.update({'name': self.share['id']}) ret = self._driver.create_share(self._context, self.share) self._driver.zfssa.create_share.assert_called_with(lcfg.zfssa_pool, lcfg.zfssa_project, arg) self.assertEqual(location, ret) self.assertEqual(1, self._driver.zfssa.create_share.call_count) self.assertEqual(1, self._driver._export_location.call_count) def test_create_share_from_snapshot(self): self.mock_object(self._driver.zfssa, 'clone_snapshot') self.mock_object(self._driver, '_export_location') lcfg = self.configuration arg = { 'host': lcfg.zfssa_data_ip, 'mountpoint': self.mountpoint, 'name': self.share['id'], } location = ("%(host)s:%(mountpoint)s/%(name)s" % arg) self._driver._export_location.return_value = location arg = self._driver.create_arg(self.share['size']) details = { 'share': self.share['id'], 'project': lcfg.zfssa_project, } arg.update(details) ret = self._driver.create_share_from_snapshot(self._context, self.share, self.snapshot) self.assertEqual(location, ret) self.assertEqual(1, self._driver.zfssa.clone_snapshot.call_count) self.assertEqual(1, self._driver._export_location.call_count) self._driver.zfssa.clone_snapshot.assert_called_with( lcfg.zfssa_pool, lcfg.zfssa_project, self.snapshot, self.share, arg) def test_delete_share(self): self.mock_object(self._driver.zfssa, 'delete_share') self._driver.delete_share(self._context, self.share) self.assertEqual(1, self._driver.zfssa.delete_share.call_count) lcfg = self.configuration self._driver.zfssa.delete_share.assert_called_with(lcfg.zfssa_pool, lcfg.zfssa_project, self.share['id']) def test_create_snapshot(self): self.mock_object(self._driver.zfssa, 'create_snapshot') lcfg = self.configuration self._driver.create_snapshot(self._context, self.snapshot) self.assertEqual(1, self._driver.zfssa.create_snapshot.call_count) self._driver.zfssa.create_snapshot.assert_called_with( lcfg.zfssa_pool, lcfg.zfssa_project, self.snapshot['share_id'], self.snapshot['id']) def test_delete_snapshot(self): self.mock_object(self._driver.zfssa, 'delete_snapshot') self._driver.delete_snapshot(self._context, self.snapshot) self.assertEqual(1, self._driver.zfssa.delete_snapshot.call_count) def test_delete_snapshot_negative(self): self.mock_object(self._driver.zfssa, 'has_clones') self._driver.zfssa.has_clones.return_value = True self.assertRaises(exception.ShareSnapshotIsBusy, self._driver.delete_snapshot, self._context, self.snapshot) def test_ensure_share(self): self.mock_object(self._driver.zfssa, 'get_share') lcfg = self.configuration self._driver.ensure_share(self._context, self.share) self.assertEqual(1, self._driver.zfssa.get_share.call_count) self._driver.zfssa.get_share.assert_called_with( lcfg.zfssa_pool, lcfg.zfssa_project, self.share['id']) self._driver.zfssa.get_share.return_value = None self.assertRaises(exception.ManilaException, self._driver.ensure_share, self._context, self.share) def test_allow_access(self): self.mock_object(self._driver.zfssa, 'allow_access_nfs') lcfg = self.configuration self._driver.allow_access(self._context, self.share, self.access) self.assertEqual(1, self._driver.zfssa.allow_access_nfs.call_count) self._driver.zfssa.allow_access_nfs.assert_called_with( lcfg.zfssa_pool, lcfg.zfssa_project, self.share['id'], self.access) def test_deny_access(self): self.mock_object(self._driver.zfssa, 'deny_access_nfs') lcfg = self.configuration self._driver.deny_access(self._context, self.share, self.access) self.assertEqual(1, self._driver.zfssa.deny_access_nfs.call_count) self._driver.zfssa.deny_access_nfs.assert_called_with( lcfg.zfssa_pool, lcfg.zfssa_project, self.share['id'], self.access) def test_extend_share_negative(self): self.mock_object(self._driver.zfssa, 'modify_share') new_size = 3 # Not enough space in project, expect an exception: self.mock_object(self._driver.zfssa, 'get_project_stats') self._driver.zfssa.get_project_stats.return_value = 1 * units.Gi self.assertRaises(exception.ShareExtendingError, self._driver.extend_share, self.share, new_size) def test_extend_share(self): self.mock_object(self._driver.zfssa, 'modify_share') new_size = 3 lcfg = self.configuration self.mock_object(self._driver.zfssa, 'get_project_stats') self._driver.zfssa.get_project_stats.return_value = 10 * units.Gi arg = self._driver.create_arg(new_size) self._driver.extend_share(self.share, new_size) self.assertEqual(1, self._driver.zfssa.modify_share.call_count) self._driver.zfssa.modify_share.assert_called_with( lcfg.zfssa_pool, lcfg.zfssa_project, self.share['id'], arg) def test_shrink_share_negative(self): self.mock_object(self._driver.zfssa, 'modify_share') # Used space is larger than 2GB new_size = 2 self.mock_object(self._driver.zfssa, 'get_share') self._driver.zfssa.get_share.return_value = self.share2 self.assertRaises(exception.ShareShrinkingPossibleDataLoss, self._driver.shrink_share, self.share2, new_size) def test_shrink_share(self): self.mock_object(self._driver.zfssa, 'modify_share') new_size = 3 lcfg = self.configuration self.mock_object(self._driver.zfssa, 'get_share') self._driver.zfssa.get_share.return_value = self.share2 arg = self._driver.create_arg(new_size) self._driver.shrink_share(self.share2, new_size) self.assertEqual(1, self._driver.zfssa.modify_share.call_count) self._driver.zfssa.modify_share.assert_called_with( lcfg.zfssa_pool, lcfg.zfssa_project, self.share2['id'], arg) def test_manage_invalid_option(self): self.mock_object(self._driver, '_get_share_details') # zfssa_name not in driver_options: self.assertRaises(exception.ShareBackendException, self._driver.manage_existing, self.share, {}) def test_manage_no_share_details(self): self.mock_object(self._driver, '_get_share_details') self._driver._get_share_details.side_effect = ( exception.ShareResourceNotFound(share_id=self.share['name'])) self.assertRaises(exception.ShareResourceNotFound, self._driver.manage_existing, self.share, self.driver_options) def test_manage_invalid_size(self): details = { 'quota': 10, # 10 bytes 'reservation': 10, } self.mock_object(self._driver, '_get_share_details') self._driver._get_share_details.return_value = details self.mock_object(self._driver.zfssa, 'get_project_stats') self._driver.zfssa.get_project_stats.return_value = 900 # Share size is less than 1GB, but there is not enough free space self.assertRaises(exception.ManageInvalidShare, self._driver.manage_existing, self.test_share, self.driver_options) def test_manage_invalid_protocol(self): self.mock_object(self._driver, '_get_share_details') self._driver._get_share_details.return_value = { 'quota': self.share['size'] * units.Gi, 'reservation': self.share['size'] * units.Gi, 'custom:manila_managed': False, } self.assertRaises(exception.ManageInvalidShare, self._driver.manage_existing, self.fake_proto_share, self.driver_options) def test_manage_unmanage_no_schema(self): self.mock_object(self._driver, '_get_share_details') self._driver._get_share_details.return_value = {} # Share does not have custom:manila_managed property # Test manage_existing(): self.assertRaises(exception.ManageInvalidShare, self._driver.manage_existing, self.test_share, self.driver_options) # Test unmanage(): self.assertRaises(exception.UnmanageInvalidShare, self._driver.unmanage, self.test_share) def test_manage_round_up_size(self): details = { 'quota': 100, 'reservation': 50, 'custom:manila_managed': False, } self.mock_object(self._driver, '_get_share_details') self._driver._get_share_details.return_value = details self.mock_object(self._driver.zfssa, 'get_project_stats') self._driver.zfssa.get_project_stats.return_value = 1 * units.Gi ret = self._driver.manage_existing(self.test_share, self.driver_options) # Expect share size is 1GB self.assertEqual(1, ret['size']) def test_manage_not_enough_space(self): details = { 'quota': 3.5 * units.Gi, 'reservation': 3.5 * units.Gi, 'custom:manila_managed': False, } self.mock_object(self._driver, '_get_share_details') self._driver._get_share_details.return_value = details self.mock_object(self._driver.zfssa, 'get_project_stats') self._driver.zfssa.get_project_stats.return_value = 0.1 * units.Gi self.assertRaises(exception.ManageInvalidShare, self._driver.manage_existing, self.test_share, self.driver_options) def test_manage_unmanage_NFS(self): lcfg = self.configuration details = { # Share size is 1GB 'quota': self.share['size'] * units.Gi, 'reservation': self.share['size'] * units.Gi, 'custom:manila_managed': False, } arg = { 'host': lcfg.zfssa_data_ip, 'mountpoint': self.share['export_location'], 'name': self.share['id'], } export_loc = "%(host)s:%(mountpoint)s/%(name)s" % arg self.mock_object(self._driver, '_get_share_details') self._driver._get_share_details.return_value = details ret = self._driver.manage_existing(self.test_share, self.driver_options) self.assertEqual(export_loc, ret['export_locations']) self.assertEqual(1, ret['size']) def test_manage_unmanage_CIFS(self): lcfg = self.configuration details = { # Share size is 1GB 'quota': self.share2['size'] * units.Gi, 'reservation': self.share2['size'] * units.Gi, 'custom:manila_managed': False, } arg = { 'host': lcfg.zfssa_data_ip, 'name': self.share2['id'], } export_loc = "\\\\%(host)s\\%(name)s" % arg self.mock_object(self._driver, '_get_share_details') self._driver._get_share_details.return_value = details ret = self._driver.manage_existing(self.test_share2, self.driver_options) self.assertEqual(export_loc, ret['export_locations']) self.assertEqual(4, ret['size']) def test_unmanage_NFS(self): self.mock_object(self._driver.zfssa, 'modify_share') lcfg = self.configuration details = { 'quota': self.share['size'] * units.Gi, 'reservation': self.share['size'] * units.Gi, 'custom:manila_managed': True, } arg = { 'custom:manila_managed': False, 'sharenfs': 'off', } self.mock_object(self._driver, '_get_share_details') self._driver._get_share_details.return_value = details self._driver.unmanage(self.test_share) self._driver.zfssa.modify_share.assert_called_with( lcfg.zfssa_pool, lcfg.zfssa_project, self.test_share['id'], arg) def test_unmanage_CIFS(self): self.mock_object(self._driver.zfssa, 'modify_share') lcfg = self.configuration details = { 'quota': self.share2['size'] * units.Gi, 'reservation': self.share2['size'] * units.Gi, 'custom:manila_managed': True, } arg = { 'custom:manila_managed': False, 'sharesmb': 'off', } self.mock_object(self._driver, '_get_share_details') self._driver._get_share_details.return_value = details self._driver.unmanage(self.test_share2) self._driver.zfssa.modify_share.assert_called_with( lcfg.zfssa_pool, lcfg.zfssa_project, self.test_share2['id'], arg) def test_verify_share_to_manage_loose_policy(self): # Temporarily change policy to loose self.configuration.zfssa_manage_policy = 'loose' ret = self._driver._verify_share_to_manage('sharename', {}) self.assertIsNone(ret) # Change it back to strict self.configuration.zfssa_manage_policy = 'strict' def test_verify_share_to_manage_no_property(self): self.configuration.zfssa_manage_policy = 'strict' self.assertRaises(exception.ManageInvalidShare, self._driver._verify_share_to_manage, 'sharename', {}) def test_verify_share_to_manage_alredy_managed(self): details = {'custom:manila_managed': True} self.assertRaises(exception.ManageInvalidShare, self._driver._verify_share_to_manage, 'sharename', details) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/test_access.py0000664000175000017500000011626400000000000021447 0ustar00zuulzuul00000000000000# Copyright 2016 Hitachi Data Systems inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import itertools import random from unittest import mock import ddt from manila.common import constants from manila import context from manila import db from manila import exception from manila.share import access from manila import test from manila.tests import db_utils from manila import utils class LockedOperationsTestCase(test.TestCase): class FakeAccessHelper(object): @access.locked_access_rules_operation def some_access_rules_operation(self, context, share_instance_id=None): pass def setUp(self): super(LockedOperationsTestCase, self).setUp() self.access_helper = self.FakeAccessHelper() self.context = context.RequestContext('fake_user', 'fake_project') self.lock_call = self.mock_object( utils, 'synchronized', mock.Mock(return_value=lambda f: f)) def test_locked_access_rules_operation(self, **replica): self.access_helper.some_access_rules_operation( self.context, share_instance_id='FAKE_INSTANCE_ID') self.lock_call.assert_called_once_with( "locked_access_rules_operation_by_share_instance_FAKE_INSTANCE_ID", external=True) @ddt.ddt class ShareInstanceAccessDatabaseMixinTestCase(test.TestCase): def setUp(self): super(ShareInstanceAccessDatabaseMixinTestCase, self).setUp() self.driver = mock.Mock() self.access_helper = access.ShareInstanceAccess(db, self.driver) self.context = context.RequestContext('fake_user', 'fake_project') self.mock_object( utils, 'synchronized', mock.Mock(return_value=lambda f: f)) def test_get_and_update_access_rules_status_force_status(self): share = db_utils.create_share( access_rule_status=constants.STATUS_ACTIVE, status=constants.STATUS_AVAILABLE) share = db.share_get(self.context, share['id']) self.assertEqual(constants.STATUS_ACTIVE, share['access_rules_status']) self.access_helper.get_and_update_share_instance_access_rules_status( self.context, status=constants.SHARE_INSTANCE_RULES_SYNCING, share_instance_id=share['instance']['id']) share = db.share_get(self.context, share['id']) self.assertEqual(constants.SHARE_INSTANCE_RULES_SYNCING, share['access_rules_status']) @ddt.data((constants.SHARE_INSTANCE_RULES_SYNCING, True), (constants.STATUS_ERROR, False)) @ddt.unpack def test_get_and_update_access_rules_status_conditionally_change( self, initial_status, change_allowed): share = db_utils.create_share(access_rules_status=initial_status, status=constants.STATUS_AVAILABLE) share = db.share_get(self.context, share['id']) self.assertEqual(initial_status, share['access_rules_status']) conditionally_change = { constants.SHARE_INSTANCE_RULES_SYNCING: constants.STATUS_ACTIVE, } updated_instance = ( self.access_helper. get_and_update_share_instance_access_rules_status( self.context, conditionally_change=conditionally_change, share_instance_id=share['instance']['id']) ) share = db.share_get(self.context, share['id']) if change_allowed: self.assertEqual(constants.STATUS_ACTIVE, share['access_rules_status']) self.assertIsNotNone(updated_instance) else: self.assertEqual(initial_status, share['access_rules_status']) self.assertIsNone(updated_instance) def test_get_and_update_all_access_rules_just_get(self): share = db_utils.create_share(status=constants.STATUS_AVAILABLE) rule_1 = db_utils.create_access(share_id=share['id']) rule_2 = db_utils.create_access(share_id=share['id']) self.mock_object(db, 'share_instance_access_update') rules = self.access_helper.get_and_update_share_instance_access_rules( self.context, share_instance_id=share['instance']['id']) self.assertEqual(2, len(rules)) rule_ids = [r['access_id'] for r in rules] self.assertIn(rule_1['id'], rule_ids) self.assertIn(rule_2['id'], rule_ids) self.assertFalse(db.share_instance_access_update.called) @ddt.data( ([constants.ACCESS_STATE_QUEUED_TO_APPLY], 2), ([constants.ACCESS_STATE_QUEUED_TO_APPLY, constants.STATUS_ACTIVE], 1), ([constants.ACCESS_STATE_APPLYING], 2), ([constants.ACCESS_STATE_APPLYING, constants.ACCESS_STATE_ERROR], 1), ([constants.ACCESS_STATE_ACTIVE, constants.ACCESS_STATE_DENYING], 0)) @ddt.unpack def test_get_and_update_all_access_rules_updates_conditionally_changed( self, statuses, changes_allowed): share = db_utils.create_share(status=constants.STATUS_AVAILABLE) db_utils.create_access(share_id=share['id'], state=statuses[0]) db_utils.create_access(share_id=share['id'], state=statuses[-1]) self.mock_object(db, 'share_instance_access_update', mock.Mock( side_effect=db.share_instance_access_update)) updates = { 'access_key': 'renfrow2stars' } expected_updates = { 'access_key': 'renfrow2stars', 'state': constants.ACCESS_STATE_QUEUED_TO_DENY, } conditionally_change = { constants.ACCESS_STATE_APPLYING: constants.ACCESS_STATE_QUEUED_TO_DENY, constants.ACCESS_STATE_QUEUED_TO_APPLY: constants.ACCESS_STATE_QUEUED_TO_DENY, } rules = self.access_helper.get_and_update_share_instance_access_rules( self.context, share_instance_id=share['instance']['id'], updates=updates, conditionally_change=conditionally_change) state_changed_rules = [ r for r in rules if r['state'] == constants.ACCESS_STATE_QUEUED_TO_DENY ] self.assertEqual(changes_allowed, len(state_changed_rules)) self.assertEqual(2, db.share_instance_access_update.call_count) db.share_instance_access_update.assert_has_calls([ mock.call(self.context, mock.ANY, share['instance']['id'], expected_updates), ] * changes_allowed) def test_get_and_update_access_rule_just_get(self): share = db_utils.create_share(status=constants.STATUS_AVAILABLE) expected_rule = db_utils.create_access(share_id=share['id']) self.mock_object(db, 'share_instance_access_update') actual_rule = ( self.access_helper.get_and_update_share_instance_access_rule( self.context, expected_rule['id'], share_instance_id=share['instance']['id']) ) self.assertEqual(expected_rule['id'], actual_rule['access_id']) self.assertFalse(db.share_instance_access_update.called) @ddt.data(constants.ACCESS_STATE_APPLYING, constants.ACCESS_STATE_DENYING, constants.ACCESS_STATE_ACTIVE, constants.ACCESS_STATE_QUEUED_TO_APPLY) def test_get_and_update_access_rule_updates_conditionally_changed( self, initial_state): mock_debug_log = self.mock_object(access.LOG, 'debug') share = db_utils.create_share(status=constants.STATUS_AVAILABLE) rule = db_utils.create_access(share_id=share['id'], state=initial_state) self.mock_object(db, 'share_instance_access_update', mock.Mock( side_effect=db.share_instance_access_update)) updates = { 'access_key': 'renfrow2stars' } conditionally_change = { constants.ACCESS_STATE_APPLYING: constants.ACCESS_STATE_QUEUED_TO_DENY, constants.ACCESS_STATE_DENYING: constants.ACCESS_STATE_QUEUED_TO_DENY, } actual_rule = ( self.access_helper.get_and_update_share_instance_access_rule( self.context, rule['id'], updates=updates, share_instance_id=share['instance']['id'], conditionally_change=conditionally_change) ) self.assertEqual(rule['id'], actual_rule['access_id']) if 'ing' in initial_state: self.assertEqual(constants.ACCESS_STATE_QUEUED_TO_DENY, actual_rule['state']) self.assertFalse(mock_debug_log.called) else: self.assertEqual(initial_state, actual_rule['state']) mock_debug_log.assert_called_once() @ddt.ddt class ShareInstanceAccessTestCase(test.TestCase): def setUp(self): super(ShareInstanceAccessTestCase, self).setUp() self.driver = self.mock_class("manila.share.driver.ShareDriver", mock.Mock()) self.access_helper = access.ShareInstanceAccess(db, self.driver) self.context = context.RequestContext('fake_user', 'fake_project') @ddt.data(constants.ACCESS_STATE_APPLYING, constants.ACCESS_STATE_DENYING) def test_update_access_rules_an_update_is_in_progress(self, initial_state): share = db_utils.create_share(status=constants.STATUS_AVAILABLE) share_instance = share['instance'] db_utils.create_access(share_id=share['id'], state=initial_state) mock_debug_log = self.mock_object(access.LOG, 'debug') self.mock_object(self.access_helper, '_update_access_rules') get_and_update_call = self.mock_object( self.access_helper, 'get_and_update_share_instance_access_rules', mock.Mock(side_effect=self.access_helper. get_and_update_share_instance_access_rules)) retval = self.access_helper.update_access_rules( self.context, share_instance['id']) expected_filters = { 'state': (constants.ACCESS_STATE_APPLYING, constants.ACCESS_STATE_DENYING, constants.ACCESS_STATE_UPDATING), } self.assertIsNone(retval) mock_debug_log.assert_called_once() get_and_update_call.assert_called_once_with( self.context, filters=expected_filters, share_instance_id=share_instance['id']) self.assertFalse(self.access_helper._update_access_rules.called) def test_update_access_rules_nothing_to_update(self): share = db_utils.create_share(status=constants.STATUS_AVAILABLE) share_instance = share['instance'] db_utils.create_access(share_id=share['id'], state=constants.STATUS_ACTIVE) mock_debug_log = self.mock_object(access.LOG, 'debug') self.mock_object(self.access_helper, '_update_access_rules') get_and_update_call = self.mock_object( self.access_helper, 'get_and_update_share_instance_access_rules', mock.Mock(side_effect=self.access_helper. get_and_update_share_instance_access_rules)) retval = self.access_helper.update_access_rules( self.context, share_instance['id']) expected_rule_filter_1 = { 'state': (constants.ACCESS_STATE_APPLYING, constants.ACCESS_STATE_DENYING, constants.ACCESS_STATE_UPDATING), } expected_rule_filter_2 = { 'state': (constants.ACCESS_STATE_QUEUED_TO_APPLY, constants.ACCESS_STATE_QUEUED_TO_DENY, constants.ACCESS_STATE_QUEUED_TO_UPDATE), } expected_conditionally_change = { constants.ACCESS_STATE_QUEUED_TO_APPLY: constants.ACCESS_STATE_APPLYING, constants.ACCESS_STATE_QUEUED_TO_DENY: constants.ACCESS_STATE_DENYING, constants.ACCESS_STATE_QUEUED_TO_UPDATE: constants.ACCESS_STATE_UPDATING, } self.assertIsNone(retval) mock_debug_log.assert_called_once() get_and_update_call.assert_has_calls( [ mock.call(self.context, filters=expected_rule_filter_1, share_instance_id=share_instance['id']), mock.call(self.context, filters=expected_rule_filter_2, share_instance_id=share_instance['id'], conditionally_change=expected_conditionally_change), ]) self.assertFalse(self.access_helper._update_access_rules.called) @ddt.data(True, False) def test_update_access_rules_delete_all_rules(self, delete_all_rules): share = db_utils.create_share(status=constants.STATUS_AVAILABLE) share_instance = share['instance'] db_utils.create_access( share_id=share['id'], state=constants.STATUS_ACTIVE) db_utils.create_access( share_id=share['id'], state=constants.ACCESS_STATE_QUEUED_TO_APPLY) db_utils.create_access( share_id=share['id'], state=constants.ACCESS_STATE_QUEUED_TO_DENY) mock_debug_log = self.mock_object(access.LOG, 'debug') self.mock_object(self.access_helper, '_update_access_rules') get_and_update_call = self.mock_object( self.access_helper, 'get_and_update_share_instance_access_rules', mock.Mock(side_effect=self.access_helper. get_and_update_share_instance_access_rules)) retval = self.access_helper.update_access_rules( self.context, share_instance['id'], delete_all_rules=delete_all_rules) expected_rule_filter_1 = { 'state': (constants.ACCESS_STATE_APPLYING, constants.ACCESS_STATE_DENYING, constants.ACCESS_STATE_UPDATING), } expected_rule_filter_2 = { 'state': (constants.ACCESS_STATE_QUEUED_TO_APPLY, constants.ACCESS_STATE_QUEUED_TO_DENY, constants.ACCESS_STATE_QUEUED_TO_UPDATE), } expected_conditionally_change = { constants.ACCESS_STATE_QUEUED_TO_APPLY: constants.ACCESS_STATE_APPLYING, constants.ACCESS_STATE_QUEUED_TO_DENY: constants.ACCESS_STATE_DENYING, constants.ACCESS_STATE_QUEUED_TO_UPDATE: constants.ACCESS_STATE_UPDATING, } expected_get_and_update_calls = [] if delete_all_rules: deny_all_updates = { 'state': constants.ACCESS_STATE_QUEUED_TO_DENY, } expected_get_and_update_calls = [ mock.call(self.context, updates=deny_all_updates, share_instance_id=share_instance['id']), ] expected_get_and_update_calls.extend([ mock.call(self.context, filters=expected_rule_filter_1, share_instance_id=share_instance['id']), mock.call(self.context, filters=expected_rule_filter_2, share_instance_id=share_instance['id'], conditionally_change=expected_conditionally_change), ]) self.assertIsNone(retval) mock_debug_log.assert_called_once() get_and_update_call.assert_has_calls(expected_get_and_update_calls) self.access_helper._update_access_rules.assert_called_once_with( self.context, share_instance['id'], share_server=None) @ddt.data(*itertools.product( (True, False), (constants.ACCESS_STATE_ERROR, constants.ACCESS_STATE_ACTIVE))) @ddt.unpack def test__update_access_rules_with_driver_updates( self, driver_returns_updates, access_state): expected_access_rules_status = ( constants.STATUS_ACTIVE if access_state == constants.ACCESS_STATE_ACTIVE else constants.SHARE_INSTANCE_RULES_ERROR ) share = db_utils.create_share( status=constants.STATUS_AVAILABLE, access_rules_status=expected_access_rules_status) share_instance_id = share['instance']['id'] rule_1 = db_utils.create_access( share_id=share['id'], state=access_state) rule_1 = db.share_instance_access_get( self.context, rule_1['id'], share_instance_id) rule_2 = db_utils.create_access( share_id=share['id'], state=constants.ACCESS_STATE_APPLYING) rule_2 = db.share_instance_access_get( self.context, rule_2['id'], share_instance_id) rule_3 = db_utils.create_access( share_id=share['id'], state=constants.ACCESS_STATE_DENYING) rule_3 = db.share_instance_access_get( self.context, rule_3['id'], share_instance_id) if driver_returns_updates: driver_rule_updates = { rule_3['access_id']: {'access_key': 'alic3h4sAcc355'}, rule_2['access_id']: {'state': access_state} } else: driver_rule_updates = None shr_instance_access_rules_status_update_call = self.mock_object( self.access_helper, 'get_and_update_share_instance_access_rules_status', mock.Mock(side_effect=self.access_helper. get_and_update_share_instance_access_rules_status)) all_access_rules_update_call = self.mock_object( self.access_helper, 'get_and_update_share_instance_access_rules', mock.Mock(side_effect=self.access_helper. get_and_update_share_instance_access_rules)) one_access_rule_update_call = self.mock_object( self.access_helper, 'get_and_update_share_instance_access_rule', mock.Mock(side_effect=self.access_helper. get_and_update_share_instance_access_rule)) driver_call = self.mock_object( self.access_helper.driver, 'update_access', mock.Mock(return_value=driver_rule_updates)) self.mock_object(self.access_helper, '_check_needs_refresh', mock.Mock(return_value=False)) retval = self.access_helper._update_access_rules( self.context, share_instance_id, share_server='fake_server') # Expected Values: if access_state != constants.ACCESS_STATE_ERROR: expected_rules_to_be_on_share = [r['id'] for r in (rule_1, rule_2)] else: expected_rules_to_be_on_share = [rule_2['id']] expected_filters_1 = { 'state': (constants.ACCESS_STATE_APPLYING, constants.ACCESS_STATE_ACTIVE, constants.ACCESS_STATE_DENYING, constants.ACCESS_STATE_UPDATING), } expected_filters_2 = {'state': constants.STATUS_ERROR} expected_get_and_update_calls = [ mock.call(self.context, filters=expected_filters_1, share_instance_id=share_instance_id), mock.call(self.context, filters=expected_filters_2, share_instance_id=share_instance_id), ] expected_access_rules_status_change_cond1 = { constants.STATUS_ACTIVE: constants.SHARE_INSTANCE_RULES_SYNCING, } if access_state == constants.SHARE_INSTANCE_RULES_ERROR: expected_access_rules_status_change_cond2 = { constants.SHARE_INSTANCE_RULES_SYNCING: constants.SHARE_INSTANCE_RULES_ERROR, } else: expected_access_rules_status_change_cond2 = { constants.SHARE_INSTANCE_RULES_SYNCING: constants.STATUS_ACTIVE, constants.SHARE_INSTANCE_RULES_ERROR: constants.STATUS_ACTIVE, } call_args = driver_call.call_args_list[0][0] call_kwargs = driver_call.call_args_list[0][1] access_rules_to_be_on_share = [r['id'] for r in call_args[2]] # Asserts self.assertIsNone(retval) self.assertEqual(share_instance_id, call_args[1]['id']) self.assertIsInstance(access_rules_to_be_on_share, list) self.assertEqual(len(expected_rules_to_be_on_share), len(access_rules_to_be_on_share)) for pool in expected_rules_to_be_on_share: self.assertIn(pool, access_rules_to_be_on_share) self.assertEqual(1, len(call_kwargs['add_rules'])) self.assertEqual(rule_2['id'], call_kwargs['add_rules'][0]['id']) self.assertEqual(1, len(call_kwargs['delete_rules'])) self.assertEqual(rule_3['id'], call_kwargs['delete_rules'][0]['id']) self.assertEqual('fake_server', call_kwargs['share_server']) shr_instance_access_rules_status_update_call.assert_has_calls([ mock.call( self.context, share_instance_id=share_instance_id, conditionally_change=expected_access_rules_status_change_cond1 ), mock.call( self.context, share_instance_id=share_instance_id, conditionally_change=expected_access_rules_status_change_cond2 ), ]) if driver_returns_updates: expected_conditional_state_updates = { constants.ACCESS_STATE_APPLYING: access_state, constants.ACCESS_STATE_DENYING: access_state, constants.ACCESS_STATE_UPDATING: access_state, constants.ACCESS_STATE_ACTIVE: access_state, } expected_access_rule_update_calls = [ mock.call( self.context, rule_3['access_id'], updates={'access_key': 'alic3h4sAcc355'}, share_instance_id=share_instance_id, conditionally_change={}), mock.call( self.context, rule_2['access_id'], updates=mock.ANY, share_instance_id=share_instance_id, conditionally_change=expected_conditional_state_updates) ] one_access_rule_update_call.assert_has_calls( expected_access_rule_update_calls, any_order=True) else: self.assertFalse(one_access_rule_update_call.called) expected_conditionally_change = { constants.ACCESS_STATE_APPLYING: constants.ACCESS_STATE_ACTIVE, constants.ACCESS_STATE_UPDATING: constants.ACCESS_STATE_ACTIVE, } expected_get_and_update_calls.append( mock.call(self.context, share_instance_id=share_instance_id, conditionally_change=expected_conditionally_change)) all_access_rules_update_call.assert_has_calls( expected_get_and_update_calls, any_order=True) share_instance = db.share_instance_get( self.context, share_instance_id) self.assertEqual(expected_access_rules_status, share_instance['access_rules_status']) @ddt.data(True, False) def test__update_access_rules_recursive_driver_exception(self, drv_exc): other = access.ShareInstanceAccess(db, None) share = db_utils.create_share( status=constants.STATUS_AVAILABLE, access_rules_status=constants.SHARE_INSTANCE_RULES_SYNCING) share_instance_id = share['instance']['id'] rule_4 = [] get_and_update_count = [1] drv_count = [1] def _get_and_update_side_effect(*args, **kwargs): # The third call to this method needs to create a new access rule mtd = other.get_and_update_share_instance_access_rules if get_and_update_count[0] == 3: rule_4.append( db_utils.create_access( state=constants.ACCESS_STATE_QUEUED_TO_APPLY, share_id=share['id'])) get_and_update_count[0] += 1 return mtd(*args, **kwargs) def _driver_side_effect(*args, **kwargs): if drv_exc and drv_count[0] == 2: raise exception.ManilaException('fake') drv_count[0] += 1 rule_kwargs = {'share_id': share['id'], 'access_level': 'rw'} rule_1 = db_utils.create_access(state=constants.ACCESS_STATE_APPLYING, **rule_kwargs) rule_2 = db_utils.create_access(state=constants.ACCESS_STATE_ACTIVE, **rule_kwargs) rule_3 = db_utils.create_access(state=constants.ACCESS_STATE_DENYING, **rule_kwargs) self.mock_object(self.access_helper, 'get_and_update_share_instance_access_rules', mock.Mock(side_effect=_get_and_update_side_effect)) self.mock_object(self.access_helper.driver, 'update_access', mock.Mock(side_effect=_driver_side_effect)) if drv_exc: self.assertRaises(exception.ManilaException, self.access_helper._update_access_rules, self.context, share_instance_id) else: retval = self.access_helper._update_access_rules(self.context, share_instance_id) self.assertIsNone(retval) expected_filters_1 = { 'state': (constants.ACCESS_STATE_APPLYING, constants.ACCESS_STATE_ACTIVE, constants.ACCESS_STATE_DENYING), } conditionally_change_2 = { constants.ACCESS_STATE_APPLYING: constants.ACCESS_STATE_ACTIVE, } expected_filters_3 = { 'state': (constants.ACCESS_STATE_QUEUED_TO_APPLY, constants.ACCESS_STATE_QUEUED_TO_DENY, constants.ACCESS_STATE_QUEUED_TO_UPDATE), } expected_conditionally_change_3 = { constants.ACCESS_STATE_QUEUED_TO_APPLY: constants.ACCESS_STATE_APPLYING, constants.ACCESS_STATE_QUEUED_TO_DENY: constants.ACCESS_STATE_DENYING, constants.ACCESS_STATE_QUEUED_TO_UPDATE: constants.ACCESS_STATE_UPDATING, } expected_conditionally_change_4 = { constants.ACCESS_STATE_APPLYING: constants.ACCESS_STATE_ERROR, constants.ACCESS_STATE_DENYING: constants.ACCESS_STATE_ERROR, constants.ACCESS_STATE_UPDATING: constants.ACCESS_STATE_ERROR, } expected_get_and_update_calls = [ mock.call(self.context, filters=expected_filters_1, share_instance_id=share_instance_id), mock.call(self.context, share_instance_id=share_instance_id, conditionally_change=conditionally_change_2), mock.call(self.context, filters=expected_filters_3, share_instance_id=share_instance_id, conditionally_change=expected_conditionally_change_3), mock.call(self.context, filters=expected_filters_1, share_instance_id=share_instance_id), ] if drv_exc: expected_get_and_update_calls.append( mock.call( self.context, share_instance_id=share_instance_id, conditionally_change=expected_conditionally_change_4)) else: expected_get_and_update_calls.append( mock.call(self.context, share_instance_id=share_instance_id, conditionally_change=conditionally_change_2)) # Verify rule changes: # 'denying' rule must not exist self.assertRaises(exception.NotFound, db.share_access_get, self.context, rule_3['id']) # 'applying' rule must be set to 'active' rules_that_must_be_active = (rule_1, rule_2) if not drv_exc: rules_that_must_be_active += (rule_4[0], ) for rule in rules_that_must_be_active: rule = db.share_access_get(self.context, rule['id']) self.assertEqual(constants.ACCESS_STATE_ACTIVE, rule['state']) # access_rules_status must be as expected expected_access_rules_status = ( constants.SHARE_INSTANCE_RULES_ERROR if drv_exc else constants.STATUS_ACTIVE) share_instance = db.share_instance_get(self.context, share_instance_id) self.assertEqual( expected_access_rules_status, share_instance['access_rules_status']) def test__update_access_rules_for_migration(self): share = db_utils.create_share() instance = db_utils.create_share_instance( status=constants.STATUS_MIGRATING, access_rules_status=constants.STATUS_ACTIVE, cast_rules_to_readonly=True, share_id=share['id']) rule_kwargs = {'share_id': share['id'], 'access_level': 'rw'} rule_1 = db_utils.create_access( state=constants.ACCESS_STATE_ACTIVE, **rule_kwargs) rule_1 = db.share_instance_access_get( self.context, rule_1['id'], instance['id']) rule_2 = db_utils.create_access( state=constants.ACCESS_STATE_APPLYING, share_id=share['id'], access_level='ro') rule_2 = db.share_instance_access_get( self.context, rule_2['id'], instance['id']) driver_call = self.mock_object( self.access_helper.driver, 'update_access', mock.Mock(return_value=None)) self.mock_object(self.access_helper, '_check_needs_refresh', mock.Mock(return_value=False)) retval = self.access_helper._update_access_rules( self.context, instance['id'], share_server='fake_server') call_args = driver_call.call_args_list[0][0] call_kwargs = driver_call.call_args_list[0][1] access_rules_to_be_on_share = [r['id'] for r in call_args[2]] access_levels = [r['access_level'] for r in call_args[2]] expected_rules_to_be_on_share = ([rule_1['id'], rule_2['id']]) self.assertIsNone(retval) self.assertEqual(instance['id'], call_args[1]['id']) self.assertIsInstance(access_rules_to_be_on_share, list) self.assertEqual(len(expected_rules_to_be_on_share), len(access_rules_to_be_on_share)) for pool in expected_rules_to_be_on_share: self.assertIn(pool, access_rules_to_be_on_share) self.assertEqual(['ro'] * len(expected_rules_to_be_on_share), access_levels) self.assertEqual(0, len(call_kwargs['add_rules'])) self.assertEqual(0, len(call_kwargs['delete_rules'])) self.assertEqual('fake_server', call_kwargs['share_server']) @ddt.data(True, False) def test__check_needs_refresh(self, expected_needs_refresh): states = ( [constants.ACCESS_STATE_QUEUED_TO_DENY, constants.ACCESS_STATE_QUEUED_TO_APPLY] if expected_needs_refresh else [constants.ACCESS_STATE_ACTIVE] ) share = db_utils.create_share( status=constants.STATUS_AVAILABLE, access_rules_status=constants.SHARE_INSTANCE_RULES_SYNCING) share_instance_id = share['instance']['id'] rule_kwargs = {'share_id': share['id'], 'access_level': 'rw'} rule_1 = db_utils.create_access(state=states[0], **rule_kwargs) db_utils.create_access(state=constants.ACCESS_STATE_ACTIVE, **rule_kwargs) db_utils.create_access(state=constants.ACCESS_STATE_DENYING, **rule_kwargs) rule_4 = db_utils.create_access(state=states[-1], **rule_kwargs) get_and_update_call = self.mock_object( self.access_helper, 'get_and_update_share_instance_access_rules', mock.Mock(side_effect=self.access_helper. get_and_update_share_instance_access_rules)) needs_refresh = self.access_helper._check_needs_refresh( self.context, share_instance_id) expected_filter = { 'state': (constants.ACCESS_STATE_QUEUED_TO_APPLY, constants.ACCESS_STATE_QUEUED_TO_DENY, constants.ACCESS_STATE_QUEUED_TO_UPDATE), } expected_conditionally_change = { constants.ACCESS_STATE_QUEUED_TO_APPLY: constants.ACCESS_STATE_APPLYING, constants.ACCESS_STATE_QUEUED_TO_DENY: constants.ACCESS_STATE_DENYING, constants.ACCESS_STATE_QUEUED_TO_UPDATE: constants.ACCESS_STATE_UPDATING, } self.assertEqual(expected_needs_refresh, needs_refresh) get_and_update_call.assert_called_once_with( self.context, filters=expected_filter, share_instance_id=share_instance_id, conditionally_change=expected_conditionally_change) rule_1 = db.share_instance_access_get( self.context, rule_1['id'], share_instance_id) rule_4 = db.share_instance_access_get( self.context, rule_4['id'], share_instance_id) if expected_needs_refresh: self.assertEqual(constants.ACCESS_STATE_DENYING, rule_1['state']) self.assertEqual(constants.ACCESS_STATE_APPLYING, rule_4['state']) else: self.assertEqual(states[0], rule_1['state']) self.assertEqual(states[-1], rule_4['state']) @ddt.data(('nfs', True, False), ('nfs', False, True), ('cifs', True, False), ('cifs', False, False), ('cephx', True, False), ('cephx', False, False)) @ddt.unpack def test__update_rules_through_share_driver(self, proto, enable_ipv6, filtered): self.driver.ipv6_implemented = enable_ipv6 share_instance = {'share_proto': proto} pass_rules, fail_rules = self._get_pass_rules_and_fail_rules() pass_add_rules, fail_add_rules = self._get_pass_rules_and_fail_rules() pass_delete_rules, fail_delete_rules = ( self._get_pass_rules_and_fail_rules()) pass_update_rules, fail_update_rules = ( self._get_pass_rules_and_fail_rules()) test_rules = pass_rules + fail_rules test_add_rules = pass_add_rules + fail_add_rules test_delete_rules = pass_delete_rules + fail_delete_rules test_update_rules = pass_update_rules + fail_update_rules fake_expect_driver_update_rules = pass_rules update_access_call = self.mock_object( self.access_helper.driver, 'update_access', mock.Mock(return_value=pass_rules)) driver_update_rules = ( self.access_helper._update_rules_through_share_driver( self.context, share_instance=share_instance, access_rules_to_be_on_share=test_rules, add_rules=test_add_rules, delete_rules=test_delete_rules, update_rules=test_update_rules, rules_to_be_removed_from_db=test_rules, share_server=None)) if filtered: update_access_call.assert_called_once_with( self.context, share_instance, pass_rules, add_rules=pass_add_rules, delete_rules=pass_delete_rules, update_rules=pass_update_rules, share_server=None) else: update_access_call.assert_called_once_with( self.context, share_instance, test_rules, add_rules=test_add_rules, delete_rules=test_delete_rules, update_rules=test_update_rules, share_server=None) self.assertEqual(fake_expect_driver_update_rules, driver_update_rules) def _get_pass_rules_and_fail_rules(self): random_value = str(random.randint(10, 32)) pass_rules = [ { 'access_type': 'ip', 'access_to': '1.1.1.' + random_value, }, { 'access_type': 'ip', 'access_to': '1.1.%s.0/24' % random_value, }, { 'access_type': 'user', 'access_to': 'fake_user' + random_value, }, ] fail_rules = [ { 'access_type': 'ip', 'access_to': '1001::' + random_value, }, { 'access_type': 'ip', 'access_to': '%s::/64' % random_value, }, ] return pass_rules, fail_rules def test_update_share_instances_access_rules_status(self): mock_db_instances_update = self.mock_object( db, 'share_instance_status_update') share_instances = ['fake_instance_id', 'fake_instance_id_2'] self.access_helper.update_share_instances_access_rules_status( self.context, 'fake_status', share_instances) mock_db_instances_update.assert_called_once_with( self.context, share_instances, {'access_rules_status': 'fake_status'}) @ddt.data(True, False) def test_reset_rules_to_queueing_states(self, reset_active): share = db_utils.create_share( status=constants.STATUS_AVAILABLE, # if rules are applying/denying, status would be 'syncing', but, # lets test the transition to syncing when asked to reset_active access_rules_status=constants.STATUS_ACTIVE) share_instance_id = share['instance']['id'] rule_kwargs = {'share_id': share['id'], 'access_level': 'rw'} r1 = db_utils.create_access( state=constants.ACCESS_STATE_APPLYING, **rule_kwargs) r2 = db_utils.create_access( state=constants.ACCESS_STATE_DENYING, **rule_kwargs) r3 = db_utils.create_access( state=constants.ACCESS_STATE_ACTIVE, **rule_kwargs) r4 = db_utils.create_access( state=constants.ACCESS_STATE_ACTIVE, **rule_kwargs) self.access_helper.reset_rules_to_queueing_states( self.context, share_instance_id, reset_active=reset_active) rules = db.share_access_get_all_for_instance( self.context, share_instance_id) share_instance = db.share_instance_get(self.context, share_instance_id) rules_dict = {r['access_id']: r for r in rules} self.assertEqual(constants.SHARE_INSTANCE_RULES_SYNCING, share_instance['access_rules_status']) self.assertEqual(constants.ACCESS_STATE_QUEUED_TO_APPLY, rules_dict[r1['id']]['state']) self.assertEqual(constants.ACCESS_STATE_QUEUED_TO_DENY, rules_dict[r2['id']]['state']) expected_state_for_previously_active = ( constants.ACCESS_STATE_QUEUED_TO_APPLY if reset_active else constants.ACCESS_STATE_ACTIVE ) self.assertEqual(expected_state_for_previously_active, rules_dict[r3['id']]['state']) self.assertEqual(expected_state_for_previously_active, rules_dict[r4['id']]['state']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/test_api.py0000664000175000017500000124766100000000000020766 0ustar00zuulzuul00000000000000# Copyright 2012 NetApp. All rights reserved. # Copyright (c) 2015 Tom Barron. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Unit tests for the Share API module.""" import copy import datetime import json from unittest import mock import ddt from oslo_config import cfg from oslo_utils import timeutils from oslo_utils import uuidutils from webob import exc as webob_exc from manila.common import constants from manila import context from manila.data import rpcapi as data_rpc from manila import db as db_api from manila.db.sqlalchemy import models from manila import exception from manila.keymgr import barbican as barbican_api from manila import policy from manila import quota from manila import share from manila.share import api as share_api from manila.share import share_types from manila import test from manila.tests import db_utils from manila.tests import fake_share as fakes from manila.tests import utils as test_utils from manila import utils CONF = cfg.CONF _FAKE_LIST_OF_ALL_SHARES = [ { 'name': 'foo', 'description': 'ds', 'status': constants.STATUS_AVAILABLE, 'project_id': 'fake_pid_1', 'share_server_id': 'fake_server_1', }, { 'name': 'bar', 'status': constants.STATUS_ERROR, 'project_id': 'fake_pid_2', 'share_server_id': 'fake_server_2', }, { 'name': 'foo1', 'description': 'ds1', 'status': constants.STATUS_AVAILABLE, 'project_id': 'fake_pid_2', 'share_server_id': 'fake_server_3', }, { 'name': 'bar', 'status': constants.STATUS_ERROR, 'project_id': 'fake_pid_2', 'share_server_id': 'fake_server_3', }, ] _FAKE_LIST_OF_ALL_SNAPSHOTS = [ { 'name': 'foo', 'status': constants.STATUS_AVAILABLE, 'project_id': 'fake_pid_1', 'share_id': 'fake_server_1', }, { 'name': 'bar', 'status': constants.STATUS_ERROR, 'project_id': 'fake_pid_2', 'share_id': 'fake_server_2', }, { 'name': 'foo', 'status': constants.STATUS_AVAILABLE, 'project_id': 'fake_pid_2', 'share_id': 'fake_share_id_3', }, { 'name': 'bar', 'status': constants.STATUS_ERROR, 'project_id': 'fake_pid_2', 'share_id': 'fake_share_id_3', }, ] @ddt.ddt class ShareAPITestCase(test.TestCase): def setUp(self): super(ShareAPITestCase, self).setUp() self.context = context.get_admin_context() self.scheduler_rpcapi = mock.Mock() self.share_rpcapi = mock.Mock() self.api = share.API() self.mock_object(self.api.db, 'resource_lock_get_all', mock.Mock(return_value=([], None))) self.mock_object(self.api, 'scheduler_rpcapi', self.scheduler_rpcapi) self.mock_object(self.api, 'share_rpcapi', self.share_rpcapi) self.mock_object(quota.QUOTAS, 'reserve', lambda *args, **kwargs: None) self.dt_utc = timeutils.utcnow() self.mock_object(timeutils, 'utcnow', mock.Mock(return_value=self.dt_utc)) self.mock_object(share_api.policy, 'check_policy') self._setup_sized_share_types() def _setup_sized_share_types(self): """create a share type with size limit""" spec_dict = {share_types.MIN_SIZE_KEY: 2, share_types.MAX_SIZE_KEY: 4, share_types.MAX_EXTEND_SIZE_KEY: 6} db_utils.create_share_type(name='limit', extra_specs=spec_dict) self.sized_sha_type = db_api.share_type_get_by_name(self.context, 'limit') def _setup_create_mocks(self, protocol='nfs', **kwargs): share = db_utils.create_share( user_id=self.context.user_id, project_id=self.context.project_id, share_type_id=kwargs.pop('share_type_id', 'fake'), **kwargs ) share_data = { 'share_proto': protocol, 'size': 1, 'display_name': 'fakename', 'display_description': 'fakedesc', 'availability_zone': 'fakeaz' } self.mock_object(db_api, 'share_create', mock.Mock(return_value=share)) self.mock_object(self.api, 'create_instance') return share, share_data def _setup_create_instance_mocks(self): host = 'fake' share_type_id = "fake_share_type" share = db_utils.create_share( user_id=self.context.user_id, project_id=self.context.project_id, create_share_instance=False, ) share_instance = db_utils.create_share_instance( share_id=share['id'], share_type_id=share_type_id) share_type = { 'fake': 'fake', 'mount_point_name_support': False } self.mock_object(db_api, 'share_instance_create', mock.Mock(return_value=share_instance)) self.mock_object(db_api, 'share_type_get', mock.Mock(return_value=share_type)) az_mock = mock.Mock() type(az_mock.return_value).id = mock.PropertyMock( return_value='fake_id') self.mock_object(db_api, 'availability_zone_get', az_mock) self.mock_object(self.api.share_rpcapi, 'create_share_instance') self.mock_object(self.api.scheduler_rpcapi, 'create_share_instance') return host, share, share_instance def _setup_create_from_snapshot_mocks(self, use_scheduler=True, host=None): CONF.set_default("use_scheduler_creating_share_from_snapshot", use_scheduler) share_type = fakes.fake_share_type() original_share = db_utils.create_share( user_id=self.context.user_id, project_id=self.context.project_id, status=constants.STATUS_AVAILABLE, host=host if host else 'fake', size=1, share_type_id=share_type['id'], ) snapshot = db_utils.create_snapshot( share_id=original_share['id'], status=constants.STATUS_AVAILABLE, size=1 ) share, share_data = self._setup_create_mocks( snapshot_id=snapshot['id'], share_type_id=share_type['id']) request_spec = { 'share_properties': share.to_dict(), 'share_proto': share['share_proto'], 'share_id': share['id'], 'share_type': None, 'snapshot_id': share['snapshot_id'], } self.mock_object(quota.QUOTAS, 'reserve', mock.Mock(return_value='reservation')) self.mock_object(quota.QUOTAS, 'commit') self.mock_object( share_types, 'get_share_type', mock.Mock(return_value=share_type)) return snapshot, share, share_data, request_spec def _setup_delete_mocks(self, status, snapshots=None, **kwargs): if snapshots is None: snapshots = [] share = db_utils.create_share(status=status, **kwargs) self.mock_object(db_api, 'share_delete') self.mock_object(db_api, 'share_server_update') self.mock_object(db_api, 'share_snapshot_get_all_for_share', mock.Mock(return_value=snapshots)) self.mock_object(db_api, 'share_backups_get_all', mock.Mock(return_value=[])) self.mock_object(self.api, 'delete_instance') return share def _setup_delete_share_instance_mocks(self, **kwargs): share = db_utils.create_share(**kwargs) self.mock_object(db_api, 'share_instance_update', mock.Mock(return_value=share.instance)) self.mock_object(self.api.share_rpcapi, 'delete_share_instance') self.mock_object(db_api, 'share_server_update') self.mock_object(db_api, 'share_instance_delete') return share.instance def test_get_all_admin_default_filters(self): self.mock_object(db_api, 'share_get_all_by_project', mock.Mock(return_value=_FAKE_LIST_OF_ALL_SHARES[0])) ctx = context.RequestContext('fake_uid', 'fake_pid_1', is_admin=True) shares = self.api.get_all(ctx) db_api.share_get_all_by_project.assert_called_once_with( ctx, sort_dir='desc', sort_key='created_at', project_id='fake_pid_1', filters={'list_deferred_delete': True}, is_public=False ) self.assertEqual(_FAKE_LIST_OF_ALL_SHARES[0], shares) def test_get_all_admin_filter_by_all_tenants(self): ctx = context.RequestContext('fake_uid', 'fake_pid_1', is_admin=True) self.mock_object(db_api, 'share_get_all', mock.Mock(return_value=_FAKE_LIST_OF_ALL_SHARES)) self.mock_object(share_api.policy, 'check_policy', mock.Mock(return_value=True)) shares = self.api.get_all(ctx, {'all_tenants': 1}) db_api.share_get_all.assert_called_once_with( ctx, sort_dir='desc', sort_key='created_at', filters={'list_deferred_delete': True}) self.assertEqual(_FAKE_LIST_OF_ALL_SHARES, shares) def test_get_all_admin_filter_by_all_tenants_with_blank(self): ctx = context.RequestContext('fake_uid', 'fake_pid_1', is_admin=True) self.mock_object(db_api, 'share_get_all', mock.Mock(return_value=_FAKE_LIST_OF_ALL_SHARES)) self.mock_object(share_api.policy, 'check_policy', mock.Mock(return_value=True)) shares = self.api.get_all(ctx, {'all_tenants': ''}) db_api.share_get_all.assert_called_once_with( ctx, sort_dir='desc', sort_key='created_at', filters={'list_deferred_delete': True}) self.assertEqual(_FAKE_LIST_OF_ALL_SHARES, shares) def test_get_all_admin_filter_by_all_tenants_with_false(self): ctx = context.RequestContext('fake_uid', 'fake_pid_1', is_admin=True) self.mock_object(db_api, 'share_get_all_by_project', mock.Mock(return_value=_FAKE_LIST_OF_ALL_SHARES[0])) shares = self.api.get_all(ctx, {'all_tenants': 'false'}) db_api.share_get_all_by_project.assert_called_once_with( ctx, sort_dir='desc', sort_key='created_at', project_id='fake_pid_1', filters={'list_deferred_delete': True}, is_public=False ) self.assertEqual(_FAKE_LIST_OF_ALL_SHARES[0], shares) def test_get_all_admin_filter_by_all_tenants_with_invaild_value(self): ctx = context.RequestContext('fake_uid', 'fake_pid_1', is_admin=True) self.mock_object(db_api, 'share_get_all') self.assertRaises( exception.InvalidInput, self.api.get_all, ctx, {'all_tenants': 'wonk'}) @ddt.data( ({'share_server_id': 'fake_share_server'}, 'list_by_share_server_id'), ({'host': 'fake_host'}, 'list_by_host'), ) @ddt.unpack def test_get_all_by_non_admin_using_admin_filter(self, filters, policy): def fake_policy_checker(*args, **kwargs): if policy == args[2] and not args[0].is_admin: raise exception.NotAuthorized ctx = context.RequestContext('fake_uid', 'fake_pid_1', is_admin=False) mock_policy = self.mock_object( share_api.policy, 'check_policy', mock.Mock(side_effect=fake_policy_checker)) self.assertRaises( exception.NotAuthorized, self.api.get_all, ctx, filters) mock_policy.assert_has_calls([ mock.call( ctx, 'share', 'list_shares_in_deferred_deletion_states', do_raise=False), mock.call(ctx, 'share', policy)]) def test_get_all_admin_filter_by_share_server_and_all_tenants(self): # NOTE(vponomaryov): if share_server_id provided, 'all_tenants' opt # should not make any influence. ctx = context.RequestContext('fake_uid', 'fake_pid_1', is_admin=True) self.mock_object(db_api, 'share_get_all_by_share_server', mock.Mock(return_value=_FAKE_LIST_OF_ALL_SHARES[2:])) self.mock_object(db_api, 'share_get_all') self.mock_object(db_api, 'share_get_all_by_project') mock_policy = self.mock_object(share_api.policy, 'check_policy') shares = self.api.get_all( ctx, {'share_server_id': 'fake_server_3', 'all_tenants': 1}) mock_policy.assert_has_calls([ mock.call( ctx, 'share', 'list_shares_in_deferred_deletion_states', do_raise=False), mock.call(ctx, 'share', 'list_all_projects', do_raise=False), mock.call(ctx, 'share', 'list_by_share_server_id')]) db_api.share_get_all_by_share_server.assert_called_once_with( ctx, 'fake_server_3', sort_dir='desc', sort_key='created_at', filters={'list_deferred_delete': True}, ) db_api.share_get_all_by_project.assert_has_calls([]) db_api.share_get_all.assert_has_calls([]) self.assertEqual(_FAKE_LIST_OF_ALL_SHARES[2:], shares) def test_get_all_admin_filter_by_name(self): ctx = context.RequestContext('fake_uid', 'fake_pid_2', is_admin=True) self.mock_object( db_api, 'share_get_all_by_project', mock.Mock(return_value=_FAKE_LIST_OF_ALL_SHARES[1::2])) expected_filters = {'display_name': 'bar', 'list_deferred_delete': True} shares = self.api.get_all(ctx, {'display_name': 'bar'}) db_api.share_get_all_by_project.assert_called_once_with( ctx, sort_dir='desc', sort_key='created_at', project_id='fake_pid_2', filters=expected_filters, is_public=False ) self.assertEqual(_FAKE_LIST_OF_ALL_SHARES[1::2], shares) @ddt.data(({'display_name': 'fo'}, 0), ({'display_description': 'd'}, 0), ({'display_name': 'foo', 'display_description': 'd'}, 0), ({'display_name': 'foo'}, 1), ({'display_description': 'ds'}, 1), ({'display_name~': 'foo', 'display_description~': 'ds'}, 2), ({'display_name': 'foo', 'display_description~': 'ds'}, 1), ({'display_name~': 'foo', 'display_description': 'ds'}, 1)) @ddt.unpack def test_get_all_admin_filter_by_name_and_description( self, search_opts, get_share_number): ctx = context.RequestContext('fake_uid', 'fake_pid_2', is_admin=True) expected_result = [] if get_share_number == 2: expected_result = _FAKE_LIST_OF_ALL_SHARES[0::2] elif get_share_number == 1: expected_result = _FAKE_LIST_OF_ALL_SHARES[:1] self.mock_object(db_api, 'share_get_all_by_project', mock.Mock(return_value=expected_result)) expected_filters = copy.copy(search_opts) expected_filters.update({'list_deferred_delete': True}) shares = self.api.get_all(ctx, search_opts) db_api.share_get_all_by_project.assert_called_once_with( ctx, sort_dir='desc', sort_key='created_at', project_id='fake_pid_2', filters=expected_filters, is_public=False ) self.assertEqual(get_share_number, len(shares)) self.assertEqual(expected_result, shares) @ddt.data('id', 'path') def test_get_all_admin_filter_by_export_location(self, type): ctx = context.RequestContext('fake_uid', 'fake_pid_2', is_admin=True) self.mock_object(db_api, 'share_get_all_by_project', mock.Mock(return_value=_FAKE_LIST_OF_ALL_SHARES[1:])) shares = self.api.get_all(ctx, {'export_location_' + type: 'test'}) db_api.share_get_all_by_project.assert_called_once_with( ctx, sort_dir='desc', sort_key='created_at', project_id='fake_pid_2', filters={'export_location_' + type: 'test', 'list_deferred_delete': True}, is_public=False ) self.assertEqual(_FAKE_LIST_OF_ALL_SHARES[1:], shares) def test_get_all_admin_filter_by_name_and_all_tenants(self): ctx = context.RequestContext('fake_uid', 'fake_pid_2', is_admin=True) self.mock_object(db_api, 'share_get_all', mock.Mock(return_value=_FAKE_LIST_OF_ALL_SHARES[:1])) shares = self.api.get_all(ctx, {'name': 'foo', 'all_tenants': 1}) db_api.share_get_all.assert_called_once_with( ctx, sort_dir='desc', sort_key='created_at', filters={'list_deferred_delete': True}) self.assertEqual(_FAKE_LIST_OF_ALL_SHARES[:1], shares) def test_get_all_admin_filter_by_status(self): ctx = context.RequestContext('fake_uid', 'fake_pid_2', is_admin=True) expected_filter = {'status': constants.STATUS_AVAILABLE, 'list_deferred_delete': True} self.mock_object( db_api, 'share_get_all_by_project', mock.Mock(return_value=_FAKE_LIST_OF_ALL_SHARES[0::2])) shares = self.api.get_all(ctx, {'status': constants.STATUS_AVAILABLE}) db_api.share_get_all_by_project.assert_called_once_with( ctx, sort_dir='desc', sort_key='created_at', project_id='fake_pid_2', filters=expected_filter, is_public=False ) self.assertEqual(_FAKE_LIST_OF_ALL_SHARES[0::2], shares) def test_get_all_admin_filter_by_status_and_all_tenants(self): ctx = context.RequestContext('fake_uid', 'fake_pid_2', is_admin=True) self.mock_object( db_api, 'share_get_all', mock.Mock(return_value=_FAKE_LIST_OF_ALL_SHARES[1::2])) expected_filter = {'status': constants.STATUS_ERROR, 'list_deferred_delete': True} shares = self.api.get_all( ctx, {'status': constants.STATUS_ERROR, 'all_tenants': 1}) db_api.share_get_all.assert_called_once_with( ctx, sort_dir='desc', sort_key='created_at', filters=expected_filter) self.assertEqual(_FAKE_LIST_OF_ALL_SHARES[1::2], shares) def test_get_all_non_admin_filter_by_all_tenants(self): # Expected share list only by project of non-admin user ctx = context.RequestContext('fake_uid', 'fake_pid_2', is_admin=False) self.mock_object(db_api, 'share_get_all_by_project', mock.Mock(return_value=_FAKE_LIST_OF_ALL_SHARES[1:])) self.mock_policy_check = self.mock_object( policy, 'check_policy', mock.Mock(return_value=False)) shares = self.api.get_all(ctx, {'all_tenants': 1}) db_api.share_get_all_by_project.assert_called_once_with( ctx, sort_dir='desc', sort_key='created_at', project_id='fake_pid_2', filters={}, is_public=False ) self.assertEqual(_FAKE_LIST_OF_ALL_SHARES[1:], shares) def test_get_all_non_admin_with_name_and_status_filters(self): ctx = context.RequestContext('fake_uid', 'fake_pid_2', is_admin=False) self.mock_object( db_api, 'share_get_all_by_project', mock.Mock(side_effect=[ _FAKE_LIST_OF_ALL_SHARES[1::2], _FAKE_LIST_OF_ALL_SHARES[2::4]])) self.mock_policy_check = self.mock_object( policy, 'check_policy', mock.Mock(return_value=False)) shares = self.api.get_all( ctx, {'name': 'bar', 'status': constants.STATUS_ERROR}) expected_filter_1 = {'status': constants.STATUS_ERROR} expected_filter_2 = {'status': constants.STATUS_AVAILABLE} db_api.share_get_all_by_project.assert_called_once_with( ctx, sort_dir='desc', sort_key='created_at', project_id='fake_pid_2', filters=expected_filter_1, is_public=False ) # two items expected, one filtered self.assertEqual(_FAKE_LIST_OF_ALL_SHARES[1::2], shares) # one item expected, two filtered shares = self.api.get_all( ctx, {'name': 'foo1', 'status': constants.STATUS_AVAILABLE}) self.assertEqual(_FAKE_LIST_OF_ALL_SHARES[2::4], shares) db_api.share_get_all_by_project.assert_has_calls([ mock.call( ctx, sort_dir='desc', sort_key='created_at', project_id='fake_pid_2', filters=expected_filter_1, is_public=False), mock.call( ctx, sort_dir='desc', sort_key='created_at', project_id='fake_pid_2', filters=expected_filter_2, is_public=False), ]) @ddt.data('True', 'true', '1', 'yes', 'y', 'on', 't', True) def test_get_all_non_admin_public(self, is_public): ctx = context.RequestContext('fake_uid', 'fake_pid_2', is_admin=False) self.mock_object(db_api, 'share_get_all_by_project', mock.Mock( return_value=_FAKE_LIST_OF_ALL_SHARES[1:])) self.mock_policy_check = self.mock_object( policy, 'check_policy', mock.Mock(return_value=False)) shares = self.api.get_all(ctx, {'is_public': is_public}) db_api.share_get_all_by_project.assert_called_once_with( ctx, sort_dir='desc', sort_key='created_at', project_id='fake_pid_2', filters={}, is_public=True ) self.assertEqual(_FAKE_LIST_OF_ALL_SHARES[1:], shares) @ddt.data('False', 'false', '0', 'no', 'n', 'off', 'f', False) def test_get_all_non_admin_not_public(self, is_public): ctx = context.RequestContext('fake_uid', 'fake_pid_2', is_admin=False) self.mock_object(db_api, 'share_get_all_by_project', mock.Mock( return_value=_FAKE_LIST_OF_ALL_SHARES[1:])) self.mock_policy_check = self.mock_object( policy, 'check_policy', mock.Mock(return_value=False)) shares = self.api.get_all(ctx, {'is_public': is_public}) db_api.share_get_all_by_project.assert_called_once_with( ctx, sort_dir='desc', sort_key='created_at', project_id='fake_pid_2', filters={}, is_public=False ) self.assertEqual(_FAKE_LIST_OF_ALL_SHARES[1:], shares) @ddt.data('truefoo', 'bartrue') def test_get_all_invalid_public_value(self, is_public): ctx = context.RequestContext('fake_uid', 'fake_pid_2', is_admin=False) self.assertRaises(ValueError, self.api.get_all, ctx, {'is_public': is_public}) def test_get_all_with_sorting_valid(self): self.mock_object(db_api, 'share_get_all_by_project', mock.Mock(return_value=_FAKE_LIST_OF_ALL_SHARES[0])) ctx = context.RequestContext('fake_uid', 'fake_pid_1', is_admin=False) self.mock_policy_check = self.mock_object( policy, 'check_policy', mock.Mock(return_value=False)) shares = self.api.get_all(ctx, sort_key='status', sort_dir='asc') db_api.share_get_all_by_project.assert_called_once_with( ctx, sort_dir='asc', sort_key='status', project_id='fake_pid_1', filters={}, is_public=False ) self.assertEqual(_FAKE_LIST_OF_ALL_SHARES[0], shares) def test_get_all_sort_key_invalid(self): self.mock_object(db_api, 'share_get_all_by_project', mock.Mock(return_value=_FAKE_LIST_OF_ALL_SHARES[0])) ctx = context.RequestContext('fake_uid', 'fake_pid_1', is_admin=False) self.assertRaises( exception.InvalidInput, self.api.get_all, ctx, sort_key=1, ) def test_get_all_sort_dir_invalid(self): self.mock_object(db_api, 'share_get_all_by_project', mock.Mock(return_value=_FAKE_LIST_OF_ALL_SHARES[0])) ctx = context.RequestContext('fake_uid', 'fake_pid_1', is_admin=False) self.assertRaises( exception.InvalidInput, self.api.get_all, ctx, sort_dir=1, ) def _get_all_filter_metadata_or_extra_specs_valid(self, key): self.mock_object(db_api, 'share_get_all_by_project', mock.Mock(return_value=_FAKE_LIST_OF_ALL_SHARES[0])) ctx = context.RequestContext('fake_uid', 'fake_pid_1', is_admin=False) mock_policy_check = self.mock_object( policy, 'check_policy', mock.Mock(return_value=False)) search_opts = {key: {'foo1': 'bar1', 'foo2': 'bar2'}} shares = self.api.get_all(ctx, search_opts=search_opts.copy()) if key == 'extra_specs': mock_policy_check.assert_has_calls([ mock.call(ctx, 'share_types_extra_spec', 'index'), mock.call( ctx, 'share', 'list_shares_in_deferred_deletion_states', do_raise=False)]) else: mock_policy_check.assert_called_once_with( ctx, 'share', 'list_shares_in_deferred_deletion_states', do_raise=False), db_api.share_get_all_by_project.assert_called_once_with( ctx, sort_dir='desc', sort_key='created_at', project_id='fake_pid_1', filters=search_opts, is_public=False) self.assertEqual(_FAKE_LIST_OF_ALL_SHARES[0], shares) def test_get_all_filter_by_metadata(self): self._get_all_filter_metadata_or_extra_specs_valid(key='metadata') def test_get_all_filter_by_extra_specs(self): self._get_all_filter_metadata_or_extra_specs_valid(key='extra_specs') def _get_all_filter_metadata_or_extra_specs_invalid(self, key): self.mock_object(db_api, 'share_get_all_by_project', mock.Mock(return_value=_FAKE_LIST_OF_ALL_SHARES[0])) ctx = context.RequestContext('fake_uid', 'fake_pid_1', is_admin=False) search_opts = {key: "{'foo': 'bar'}"} self.assertRaises(exception.InvalidInput, self.api.get_all, ctx, search_opts=search_opts) if key == 'extra_specs': share_api.policy.check_policy.assert_called_once_with( ctx, 'share_types_extra_spec', 'index') def test_get_all_filter_by_invalid_metadata(self): self._get_all_filter_metadata_or_extra_specs_invalid(key='metadata') def test_get_all_filter_by_invalid_extra_specs(self): self._get_all_filter_metadata_or_extra_specs_invalid(key='extra_specs') @ddt.data(True, False) def test_update_metadata_from_share_type_extra_specs(self, with_metadata): share_type = fakes.fake_share_type( extra_specs={ 'driver_handles_share_servers': 'False', 'fake_driver:dedupe': 'True', 'fake_driver:encrypt': 'True', 'fake_driver:snapshot_policy': 'daily', 'provisioning:max_share_size': '10', } ) user_metadata = {} if with_metadata: user_metadata = { 'snapshot_policy': 'monthly', 'tag': 't1', 'max_share_size': '5', } CONF.set_default( "driver_updatable_metadata", ['dedupe', 'snapshot_policy', 'thin_provisioning'], ) result = self.api.update_metadata_from_share_type_extra_specs( self.context, share_type, user_metadata ) if with_metadata: self.assertEqual( result, {'dedupe': 'True', 'snapshot_policy': 'monthly', 'tag': 't1', 'max_share_size': '5'}) else: self.assertEqual( result, {'dedupe': 'True', 'snapshot_policy': 'daily'}) def test_update_share_network_subnet_from_metadata(self): CONF.set_default( "driver_updatable_subnet_metadata", ['dedupe', 'snapshot_policy', 'thin_provisioning'], ) metadata = { 'test_key': 'True', 'snapshot_policy': 'monthly', } backend_metadata = { k: v for k, v in metadata.items() if k in CONF.driver_updatable_subnet_metadata} self.mock_object( db_api, 'share_server_get_all_by_host_and_or_share_subnet', mock.Mock(return_value=['fake_share_server'])) mock_call = self.mock_object( self.api.share_rpcapi, 'update_share_network_subnet_from_metadata') self.api.update_share_network_subnet_from_metadata( self.context, 'fake_sn_id', 'fake_sn_subnet_id', metadata) mock_call.assert_called_once_with( self.context, 'fake_sn_id', 'fake_sn_subnet_id', 'fake_share_server', backend_metadata) def test_update_share_from_metadata(self): CONF.set_default( "driver_updatable_metadata", ['dedupe', 'snapshot_policy', 'thin_provisioning'], ) metadata = { 'dedupe': 'True', 'snapshot_policy': 'monthly', 'max_share_size': '10' } backend_metadata = { k: v for k, v in metadata.items() if k != 'max_share_size'} self.mock_object(self.api, 'get', mock.Mock(return_value='fake_share')) mock_call = self.mock_object( self.api.share_rpcapi, 'update_share_from_metadata' ) self.api.update_share_from_metadata(self.context, 'fake_id', metadata) mock_call.assert_called_once_with( self.context, 'fake_share', backend_metadata) @ddt.data(True, False) def test_create_public_and_private_share(self, is_public): share, share_data = self._setup_create_mocks(is_public=is_public) az = share_data.pop('availability_zone') self.api.create( self.context, share_data['share_proto'], share_data['size'], share_data['display_name'], share_data['display_description'], availability_zone=az ) share['status'] = constants.STATUS_CREATING share['host'] = None self.assertSubDictMatch(share_data, db_api.share_create.call_args[0][1]) @ddt.data( {}, { constants.ExtraSpecs.SNAPSHOT_SUPPORT: True, }, { constants.ExtraSpecs.SNAPSHOT_SUPPORT: False, constants.ExtraSpecs.CREATE_SHARE_FROM_SNAPSHOT_SUPPORT: False, }, { constants.ExtraSpecs.SNAPSHOT_SUPPORT: True, constants.ExtraSpecs.CREATE_SHARE_FROM_SNAPSHOT_SUPPORT: False, }, { constants.ExtraSpecs.SNAPSHOT_SUPPORT: True, constants.ExtraSpecs.CREATE_SHARE_FROM_SNAPSHOT_SUPPORT: True, } ) def test_create_default_snapshot_semantics(self, extra_specs): share, share_data = self._setup_create_mocks(is_public=False) az = share_data.pop('availability_zone') share_type = fakes.fake_share_type(extra_specs=extra_specs) self.api.create( self.context, share_data['share_proto'], share_data['size'], share_data['display_name'], share_data['display_description'], availability_zone=az, share_type=share_type ) share['status'] = constants.STATUS_CREATING share['host'] = None share_data.update(extra_specs) if extra_specs.get('snapshot_support') is None: share_data['snapshot_support'] = False if extra_specs.get('create_share_from_snapshot_support') is None: share_data['create_share_from_snapshot_support'] = False self.assertSubDictMatch(share_data, db_api.share_create.call_args[0][1]) @ddt.data(*constants.SUPPORTED_SHARE_PROTOCOLS) def test_create_share_valid_protocol(self, proto): share, share_data = self._setup_create_mocks(protocol=proto) az = share_data.pop('availability_zone') all_protos = ','.join( proto for proto in constants.SUPPORTED_SHARE_PROTOCOLS) data = dict(DEFAULT=dict(enabled_share_protocols=all_protos)) with test_utils.create_temp_config_with_opts(data): self.api.create( self.context, proto, share_data['size'], share_data['display_name'], share_data['display_description'], availability_zone=az) share['status'] = constants.STATUS_CREATING share['host'] = None self.assertSubDictMatch(share_data, db_api.share_create.call_args[0][1]) @ddt.data( {'get_all_azs_return': [], 'subnet_by_az_side_effect': []}, {'get_all_azs_return': [{'name': 'az1', 'id': 'az_id_1'}], 'subnet_by_az_side_effect': [None]}, {'get_all_azs_return': [{'name': 'az1', 'id': 'az_id_1'}], 'subnet_by_az_side_effect': ['fake_sns_1']}, {'get_all_azs_return': [{'name': 'az1', 'id': 'az_id_1'}, {'name': 'az2', 'id': 'az_id_2'}], 'subnet_by_az_side_effect': [None, 'fake_sns_2']} ) @ddt.unpack def test__get_all_availability_zones_with_subnets( self, get_all_azs_return, subnet_by_az_side_effect): fake_share_network_id = 'fake_sn_id' self.mock_object(db_api, 'availability_zone_get_all', mock.Mock(return_value=get_all_azs_return)) self.mock_object( db_api, 'share_network_subnets_get_all_by_availability_zone_id', mock.Mock(side_effect=subnet_by_az_side_effect)) expected_az_names = ([], {}) expected_get_az_calls = [] for index, value in enumerate(get_all_azs_return): expected_get_az_calls.append(mock.call( self.context, share_network_id=fake_share_network_id, availability_zone_id=value['id'])) if subnet_by_az_side_effect[index] is not None: expected_az_names = ([value['name']], {value['id']: True}) get_all_subnets = self.api._get_all_availability_zones_with_subnets compatible_azs = get_all_subnets(self.context, fake_share_network_id) db_api.availability_zone_get_all.assert_called_once_with( self.context) db_get_azs_with_subnet = ( db_api.share_network_subnets_get_all_by_availability_zone_id) db_get_azs_with_subnet.assert_has_calls(expected_get_az_calls) self.assertEqual(expected_az_names, compatible_azs) def test_create_share_with_share_type_size_limit(self): self.assertRaises(exception.InvalidInput, self.api.create, self.context, 'nfs', 1, 'display_name', 'display_description', share_type=self.sized_sha_type) self.assertRaises(exception.InvalidInput, self.api.create, self.context, 'nfs', 5, 'display_name', 'display_description', share_type=self.sized_sha_type) @ddt.data( {'availability_zones': None, 'compatible_azs_name': ['fake_az_1'], 'compatible_azs_multiple': {}}, {'availability_zones': ['fake_az_2'], 'compatible_azs_name': ['fake_az_2'], 'compatible_azs_multiple': {}}, {'availability_zones': ['fake_az_1', 'faze_az_2', 'fake_az_3'], 'compatible_azs_name': ['fake_az_3'], 'compatible_azs_multiple': {'fake_az_3': 1}} ) @ddt.unpack def test_create_share_with_subnets(self, availability_zones, compatible_azs_name, compatible_azs_multiple): share, share_data = self._setup_create_mocks() reservation = 'fake' self.mock_object(quota.QUOTAS, 'reserve', mock.Mock(return_value=reservation)) self.mock_object(self.api, '_get_all_availability_zones_with_subnets', mock.Mock(return_value=[compatible_azs_name, compatible_azs_multiple])) self.mock_object(quota.QUOTAS, 'commit') self.mock_object(self.api, 'create_instance') self.mock_object(db_api, 'share_get') fake_share_network_id = 'fake_sn_id' if availability_zones: expected_azs = ( [az for az in availability_zones if az in compatible_azs_name]) else: expected_azs = compatible_azs_name az_multiple_sn_support_map = None if compatible_azs_multiple != {}: az_multiple_sn_support_map = compatible_azs_multiple self.api.create( self.context, share_data['share_proto'], share_data['size'], share_data['display_name'], share_data['display_description'], share_network_id=fake_share_network_id, availability_zones=availability_zones, az_request_multiple_subnet_support_map=az_multiple_sn_support_map) share['status'] = constants.STATUS_CREATING share['host'] = None quota.QUOTAS.reserve.assert_called_once() get_all_azs_sns = self.api._get_all_availability_zones_with_subnets get_all_azs_sns.assert_called_once_with( self.context, fake_share_network_id) quota.QUOTAS.commit.assert_called_once() self.api.create_instance.assert_called_once_with( self.context, share, share_network_id=fake_share_network_id, host=None, availability_zone=None, share_group=None, share_group_snapshot_member=None, share_type_id=None, availability_zones=expected_azs, az_request_multiple_subnet_support_map=compatible_azs_multiple, snapshot_host=None, scheduler_hints=None, mount_point_name=None, encryption_key_ref=None, ) db_api.share_get.assert_called_once() @ddt.data( {'availability_zones': None, 'compatible_azs_name': [], 'compatible_azs_multiple': []}, {'availability_zones': ['fake_az_1'], 'compatible_azs_name': ['fake_az_2'], 'compatible_azs_multiple': []} ) @ddt.unpack def test_create_share_with_subnets_invalid_azs(self, availability_zones, compatible_azs_name, compatible_azs_multiple): share, share_data = self._setup_create_mocks() reservation = 'fake' self.mock_object(quota.QUOTAS, 'reserve', mock.Mock(return_value=reservation)) self.mock_object(self.api, '_get_all_availability_zones_with_subnets', mock.Mock(return_value=[compatible_azs_name, compatible_azs_multiple])) self.mock_object(quota.QUOTAS, 'commit') self.mock_object(self.api, 'create_instance') self.mock_object(db_api, 'share_get') fake_share_network_id = 'fake_sn_id' self.assertRaises( exception.InvalidInput, self.api.create, self.context, share_data['share_proto'], share_data['size'], share_data['display_name'], share_data['display_description'], share_network_id=fake_share_network_id, availability_zones=availability_zones) quota.QUOTAS.reserve.assert_called_once() get_all_azs_sns = self.api._get_all_availability_zones_with_subnets get_all_azs_sns.assert_called_once_with( self.context, fake_share_network_id) def test_prefix_with_missing_extra_spec_mount_point_name_support(self): share, share_data = self._setup_create_mocks(is_public=True) az = share_data.pop('availability_zone') extra_specs = {'replication_type': 'readable', 'mount_point_name_support': False} self.mock_object( self.api, 'get_share_attributes_from_share_type', mock.Mock(return_value=extra_specs)) self.assertRaises( exception.InvalidInput, self.api.create, self.context, share_data['share_proto'], share_data['size'], share_data['display_name'], share_data['display_description'], availability_zones=az, mount_point_name='fake_mp') def test_configure_default_prefix(self): share_type = {'extra_specs': {}} conf = dict(DEFAULT=dict(default_mount_point_prefix="manila_")) with test_utils.create_temp_config_with_opts(conf): self.context.project_id = 'project_id' mount_point_name = 'mount_point' result = self.api._prefix_mount_point_name( share_type, self.context, mount_point_name ) self.assertEqual(result, 'manila_mount_point') def test_configure_empty_default_prefix(self): share_type = {'extra_specs': {}} conf = dict(DEFAULT=dict(default_mount_point_prefix="")) with test_utils.create_temp_config_with_opts(conf): self.context.project_id = 'project_id' mount_point_name = 'mount_point' result = self.api._prefix_mount_point_name( share_type, self.context, mount_point_name ) self.assertEqual(result, 'mount_point') def test_prefix_with_valid_mount_point_name(self): share_type = { 'extra_specs': { constants.ExtraSpecs.PROVISIONING_MOUNT_POINT_PREFIX: 'prefix_', } } self.context.project_id = 'project_id' mount_point_name = 'mount_point' result = self.api._prefix_mount_point_name( share_type, self.context, mount_point_name ) self.assertEqual(result, 'prefix_mount_point') def test_empty_prefix_with_valid_mount_point_name(self): share_type = { 'extra_specs': { constants.ExtraSpecs.PROVISIONING_MOUNT_POINT_PREFIX: '', } } self.context.project_id = 'project_id' mount_point_name = 'mount_point' result = self.api._prefix_mount_point_name( share_type, self.context, mount_point_name ) self.assertEqual(result, 'mount_point') def test_prefix_with_valid_missing_extra_spec_mount_point_name(self): share_type = { 'extra_specs': {}, } self.context.project_id = 'project_id' mount_point_name = 'mount_point' result = self.api._prefix_mount_point_name( share_type, self.context, mount_point_name ) self.assertEqual(result, 'project_id_mount_point') def test_prefix_with_invalid_mount_point_name(self): share_type = \ { 'extra_specs': { constants.ExtraSpecs.PROVISIONING_MOUNT_POINT_PREFIX: 'prefix', } } self.context.project_id = 'project_id' mount_point_name = 'invalid*name' self.assertRaises( exception.InvalidInput, self.api._prefix_mount_point_name, share_type, self.context, mount_point_name ) def test_prefix_with_too_long_mount_point_name(self): share_type = \ { 'extra_specs': { constants.ExtraSpecs.PROVISIONING_MOUNT_POINT_PREFIX: 'prefix', } } self.context.project_id = 'project_id' mount_point_name = 'a' * 256 self.assertRaises( exception.InvalidInput, self.api._prefix_mount_point_name, share_type, self.context, mount_point_name ) @ddt.data( None, '', 'fake', 'nfsfake', 'cifsfake', 'glusterfsfake', 'hdfsfake') def test_create_share_invalid_protocol(self, proto): share, share_data = self._setup_create_mocks(protocol=proto) all_protos = ','.join( proto for proto in constants.SUPPORTED_SHARE_PROTOCOLS) data = dict(DEFAULT=dict(enabled_share_protocols=all_protos)) with test_utils.create_temp_config_with_opts(data): self.assertRaises( exception.InvalidInput, self.api.create, self.context, proto, share_data['size'], share_data['display_name'], share_data['display_description']) @ddt.data({'overs': {'gigabytes': 'fake'}, 'expected_exception': exception.ShareSizeExceedsAvailableQuota, 'replication_type': None}, {'overs': {'shares': 'fake'}, 'expected_exception': exception.ShareLimitExceeded, 'replication_type': None}, {'overs': {'replica_gigabytes': 'fake'}, 'expected_exception': exception.ShareReplicaSizeExceedsAvailableQuota, 'replication_type': constants.REPLICATION_TYPE_READABLE}, {'overs': {'share_replicas': 'fake'}, 'expected_exception': exception.ShareReplicasLimitExceeded, 'replication_type': constants.REPLICATION_TYPE_READABLE}) @ddt.unpack def test_create_share_over_quota(self, overs, expected_exception, replication_type): extra_specs = {'replication_type': replication_type} share_type = db_utils.create_share_type(extra_specs=extra_specs) share_type = db_api.share_type_get(self.context, share_type['id']) share, share_data = self._setup_create_mocks( share_type_id=share_type['id']) az = share_data.pop('availability_zone') usages = {'gigabytes': {'reserved': 5, 'in_use': 5}, 'shares': {'reserved': 10, 'in_use': 10}, 'replica_gigabytes': {'reserved': 5, 'in_use': 5}, 'share_replicas': {'reserved': 10, 'in_use': 10}} quotas = {'gigabytes': 5, 'shares': 10, 'replica_gigabytes': 5, 'share_replicas': 10} exc = exception.OverQuota(overs=overs, usages=usages, quotas=quotas) self.mock_object(quota.QUOTAS, 'reserve', mock.Mock(side_effect=exc)) if replication_type: # Prevent the raising of an exception, to force the call to the # function check_if_replica_quotas_exceeded self.mock_object(self.api, 'check_if_share_quotas_exceeded') self.assertRaises( expected_exception, self.api.create, self.context, share_data['share_proto'], share_data['size'], share_data['display_name'], share_data['display_description'], availability_zone=az, share_type=share_type ) if replication_type: quota.QUOTAS.reserve.assert_called_once_with( self.context, share_type_id=share_type['id'], gigabytes=1, shares=1, share_replicas=1, replica_gigabytes=1) else: quota.QUOTAS.reserve.assert_called_once_with( self.context, share_type_id=share_type['id'], shares=1, gigabytes=share_data['size']) @ddt.data({'overs': {'per_share_gigabytes': 'fake'}, 'expected_exception': exception.ShareSizeExceedsLimit}) @ddt.unpack def test_create_share_over_per_share_quota(self, overs, expected_exception): share, share_data = self._setup_create_mocks() quota.CONF.set_default("per_share_gigabytes", 5, 'quota') share_data['size'] = 20 usages = {'per_share_gigabytes': {'reserved': 0, 'in_use': 0}} quotas = {'per_share_gigabytes': 10} exc = exception.OverQuota(overs=overs, usages=usages, quotas=quotas) self.mock_object(quota.QUOTAS, 'reserve', mock.Mock(side_effect=exc)) self.assertRaises( expected_exception, self.api.create, self.context, share_data['share_proto'], share_data['size'], share_data['display_name'], share_data['display_description'] ) @ddt.data( {'overs': {'encryption_keys': 'fake'}, 'expected_exception': ( exception.EncryptionKeysLimitExceeded) }) @ddt.unpack def test_create_share_over_encryption_keys_quota(self, overs, expected_exception): share, share_data = self._setup_create_mocks() quota.CONF.set_default("encryption_keys", 2, 'quota') share_data['encryption_key_ref'] = uuidutils.generate_uuid() usages = {'encryption_keys': {'reserved': 0, 'in_use': 2}} quotas = {'encryption_keys': 2} exc = exception.OverQuota(overs=overs, usages=usages, quotas=quotas) self.mock_object(quota.QUOTAS, 'reserve', mock.Mock(side_effect=exc)) self.mock_object(db_api, 'share_server_get_all_with_filters', mock.Mock(return_value={})) self.mock_object(barbican_api, 'create_secret_access') self.mock_object(db_api, 'encryption_keys_get_count', mock.Mock(return_value=2)) self.assertRaises( expected_exception, self.api.create, self.context, share_data['share_proto'], share_data['size'], share_data['display_name'], share_data['display_description'], encryption_key_ref=share_data['encryption_key_ref'] ) @ddt.data(exception.QuotaError, exception.InvalidShare) def test_create_share_error_on_quota_commit(self, expected_exception): share, share_data = self._setup_create_mocks() reservation = 'fake' self.mock_object(quota.QUOTAS, 'reserve', mock.Mock(return_value=reservation)) self.mock_object(quota.QUOTAS, 'commit', mock.Mock(side_effect=expected_exception('fake'))) self.mock_object(quota.QUOTAS, 'rollback') self.mock_object(db_api, 'share_delete') self.assertRaises( expected_exception, self.api.create, self.context, share_data['share_proto'], share_data['size'], share_data['display_name'], share_data['display_description'] ) quota.QUOTAS.rollback.assert_called_once_with( self.context, reservation, share_type_id=None) db_api.share_delete.assert_called_once_with(self.context, share['id']) def test_create_share_instance_with_host_and_az(self): host, share, share_instance = self._setup_create_instance_mocks() self.api.create_instance(self.context, share, host=host, availability_zone='fake', share_type_id='fake_share_type') db_api.share_instance_create.assert_called_once_with( self.context, share['id'], { 'share_network_id': None, 'status': constants.STATUS_CREATING, 'scheduled_at': self.dt_utc, 'host': host, 'availability_zone_id': 'fake_id', 'share_type_id': 'fake_share_type', 'cast_rules_to_readonly': False, 'mount_point_name': None, 'encryption_key_ref': None } ) db_api.share_type_get.assert_called_once_with( self.context, share_instance['share_type_id']) self.api.share_rpcapi.create_share_instance.assert_called_once_with( self.context, share_instance, host, request_spec=mock.ANY, filter_properties={'scheduler_hints': None}, snapshot_id=share['snapshot_id'], ) self.assertFalse( self.api.scheduler_rpcapi.create_share_instance.called) def test_create_share_instance_with_mount_point_name(self): host, share, share_instance = self._setup_create_instance_mocks() self.api.create_instance(self.context, share, host=host, availability_zone='fake', share_type_id='fake_share_type', mount_point_name='fake_mp') db_api.share_instance_create.assert_called_once_with( self.context, share['id'], { 'share_network_id': None, 'status': constants.STATUS_CREATING, 'scheduled_at': self.dt_utc, 'host': host, 'availability_zone_id': 'fake_id', 'share_type_id': 'fake_share_type', 'cast_rules_to_readonly': False, 'mount_point_name': 'fake_mp', 'encryption_key_ref': None } ) def test_create_share_instance_without_host(self): _, share, share_instance = self._setup_create_instance_mocks() self.api.create_instance(self.context, share) (self.api.scheduler_rpcapi.create_share_instance. assert_called_once_with( self.context, request_spec=mock.ANY, filter_properties={'scheduler_hints': None})) self.assertFalse(self.api.share_rpcapi.create_share_instance.called) def test_create_share_instance_from_snapshot(self): snapshot, share, _, _ = self._setup_create_from_snapshot_mocks() request_spec, share_instance = ( self.api.create_share_instance_and_get_request_spec( self.context, share) ) self.assertIsNotNone(share_instance) self.assertEqual(share['id'], request_spec['share_instance_properties']['share_id']) self.assertEqual(share['snapshot_id'], request_spec['snapshot_id']) self.assertFalse( self.api.share_rpcapi.create_share_instance_and_get_request_spec .called) def test_create_instance_share_group_snapshot_member(self): fake_req_spec = { 'share_properties': 'fake_share_properties', 'share_instance_properties': 'fake_share_instance_properties', } share = fakes.fake_share() member_info = { 'host': 'host', 'share_network_id': 'share_network_id', 'share_server_id': 'share_server_id', } fake_instance = fakes.fake_share_instance( share_id=share['id'], **member_info) sg_snap_member = {'share_instance': fake_instance} self.mock_policy_check = self.mock_object( policy, 'check_policy', mock.Mock(return_value=True)) mock_share_rpcapi_call = self.mock_object(self.share_rpcapi, 'create_share_instance') mock_scheduler_rpcapi_call = self.mock_object(self.scheduler_rpcapi, 'create_share_instance') mock_db_share_instance_update = self.mock_object( db_api, 'share_instance_update') self.mock_object( share_api.API, 'create_share_instance_and_get_request_spec', mock.Mock(return_value=(fake_req_spec, fake_instance))) retval = self.api.create_instance( self.context, fakes.fake_share(), share_group_snapshot_member=sg_snap_member) self.assertIsNone(retval) mock_db_share_instance_update.assert_called_once_with( self.context, fake_instance['id'], member_info) self.assertFalse(mock_scheduler_rpcapi_call.called) self.assertFalse(mock_share_rpcapi_call.called) def test_get_share_attributes_from_share_type(self): share_type = { 'extra_specs': { 'snapshot_support': True, 'create_share_from_snapshot_support': False, 'revert_to_snapshot_support': False, 'mount_snapshot_support': False, 'mount_point_name_support': False, 'replication_type': 'dr', } } result = self.api.get_share_attributes_from_share_type(share_type) self.assertEqual(share_type['extra_specs'], result) @ddt.data({}, {'extra_specs': {}}, None) def test_get_share_attributes_from_share_type_defaults(self, share_type): result = self.api.get_share_attributes_from_share_type(share_type) expected = { 'snapshot_support': False, 'create_share_from_snapshot_support': False, 'revert_to_snapshot_support': False, 'mount_snapshot_support': False, 'mount_point_name_support': False, 'replication_type': None, } self.assertEqual(expected, result) @ddt.data({'extra_specs': {'snapshot_support': 'fake'}}, {'extra_specs': {'create_share_from_snapshot_support': 'fake'}}) def test_get_share_attributes_from_share_type_invalid(self, share_type): self.assertRaises(exception.InvalidExtraSpec, self.api.get_share_attributes_from_share_type, share_type) @ddt.data( {'replication_type': 'dr', 'dhss': False, 'share_server_id': None}, {'replication_type': 'readable', 'dhss': False, 'share_server_id': None}, {'replication_type': None, 'dhss': False, 'share_server_id': None}, {'replication_type': None, 'dhss': True, 'share_server_id': 'fake'} ) @ddt.unpack def test_manage_new(self, replication_type, dhss, share_server_id): share_data = { 'host': 'fake', 'export_location_path': 'fake', 'share_proto': 'fake', 'share_type_id': 'fake', } if dhss: share_data['share_server_id'] = share_server_id driver_options = {} date = datetime.datetime(1, 1, 1, 1, 1, 1) timeutils.utcnow.return_value = date fake_subnet = db_utils.create_share_network_subnet( share_network_id='fake') share_server = db_utils.create_share_server( status=constants.STATUS_ACTIVE, id=share_server_id, share_network_subnets=[fake_subnet]) share_network = db_utils.create_share_network(id='fake') fake_share_data = { 'id': 'fakeid', 'status': constants.STATUS_CREATING, } fake_type = { 'id': 'fake_type_id', 'extra_specs': { 'snapshot_support': False, 'replication_type': replication_type, 'create_share_from_snapshot_support': False, 'revert_to_snapshot_support': False, 'mount_snapshot_support': False, 'mount_point_name_support': False, 'driver_handles_share_servers': dhss, }, } share = db_api.share_create(self.context, fake_share_data) self.mock_object(self.scheduler_rpcapi, 'manage_share') self.mock_object(db_api, 'share_create', mock.Mock(return_value=share)) self.mock_object(db_api, 'export_locations_update') self.mock_object(db_api, 'share_get', mock.Mock(return_value=share)) self.mock_object(share_types, 'get_share_type', mock.Mock(return_value=fake_type)) self.mock_object(db_api, 'share_server_get', mock.Mock(return_value=share_server)) self.mock_object(db_api, 'share_instance_get_all', mock.Mock(return_value=[])) self.mock_object(db_api, 'share_network_get', mock.Mock(return_value=share_network)) self.api.manage(self.context, copy.deepcopy(share_data), driver_options) share_data.update({ 'user_id': self.context.user_id, 'project_id': self.context.project_id, 'status': constants.STATUS_MANAGING, 'scheduled_at': date, 'snapshot_support': fake_type['extra_specs']['snapshot_support'], 'create_share_from_snapshot_support': fake_type['extra_specs']['create_share_from_snapshot_support'], 'revert_to_snapshot_support': fake_type['extra_specs']['revert_to_snapshot_support'], 'mount_snapshot_support': fake_type['extra_specs']['mount_snapshot_support'], 'mount_point_name_support': fake_type['extra_specs']['mount_point_name_support'], 'replication_type': replication_type, }) expected_request_spec = self._get_request_spec_dict( share, fake_type, self.context, size=0, share_proto=share_data['share_proto'], host=share_data['host']) if dhss: share_data.update({ 'share_network_id': fake_subnet['share_network_id']}) export_location = share_data.pop('export_location_path') filters = {'export_location_path': export_location, 'host': share_data['host'] } if share_server_id: filters['share_server_id'] = share_server_id db_api.share_instance_get_all.assert_called_once_with( self.context, filters=filters) db_api.share_create.assert_called_once_with(self.context, share_data) db_api.share_get.assert_called_once_with(self.context, share['id']) db_api.export_locations_update.assert_called_once_with( self.context, share.instance['id'], export_location) self.scheduler_rpcapi.manage_share.assert_called_once_with( self.context, share['id'], driver_options, expected_request_spec) if dhss: db_api.share_server_get.assert_called_once_with( self.context, share_data['share_server_id']) @ddt.data((True, exception.InvalidInput, True), (True, exception.InvalidInput, False), (False, exception.InvalidInput, True), (True, exception.InvalidInput, True)) @ddt.unpack def test_manage_new_dhss_true_and_false(self, dhss, exception_type, has_share_server_id): share_data = { 'host': 'fake', 'export_location_path': 'fake', 'share_proto': 'fake', 'share_type_id': 'fake', } if has_share_server_id: share_data['share_server_id'] = 'fake' driver_options = {} date = datetime.datetime(1, 1, 1, 1, 1, 1) timeutils.utcnow.return_value = date fake_type = { 'id': 'fake_type_id', 'extra_specs': { 'snapshot_support': False, 'create_share_from_snapshot_support': False, 'revert_to_snapshot_support': False, 'mount_snapshot_support': False, 'mount_point_name_support': False, 'driver_handles_share_servers': dhss, }, } self.mock_object(share_types, 'get_share_type', mock.Mock(return_value=fake_type)) self.mock_object(db_api, 'share_instance_get_all', mock.Mock(return_value=[])) self.assertRaises(exception_type, self.api.manage, self.context, share_data=share_data, driver_options=driver_options ) share_types.get_share_type.assert_called_once_with( self.context, share_data['share_type_id'] ) filters = {'export_location_path': share_data['export_location_path'], 'host': share_data['host'] } if has_share_server_id: filters['share_server_id'] = 'fake' db_api.share_instance_get_all.assert_called_once_with( self.context, filters=filters) def test_manage_new_share_server_not_found(self): share_data = { 'host': 'fake', 'export_location_path': 'fake', 'share_proto': 'fake', 'share_type_id': 'fake', 'share_server_id': 'fake' } driver_options = {} date = datetime.datetime(1, 1, 1, 1, 1, 1) timeutils.utcnow.return_value = date fake_type = { 'id': 'fake_type_id', 'extra_specs': { 'snapshot_support': False, 'replication_type': 'dr', 'create_share_from_snapshot_support': False, 'revert_to_snapshot_support': False, 'mount_snapshot_support': False, 'mount_point_name_support': False, 'driver_handles_share_servers': True, }, } self.mock_object(share_types, 'get_share_type', mock.Mock(return_value=fake_type)) self.mock_object(db_api, 'share_instance_get_all', mock.Mock(return_value=[])) self.assertRaises(exception.InvalidInput, self.api.manage, self.context, share_data=share_data, driver_options=driver_options ) share_types.get_share_type.assert_called_once_with( self.context, share_data['share_type_id'] ) db_api.share_instance_get_all.assert_called_once_with( self.context, filters={ 'export_location_path': share_data['export_location_path'], 'host': share_data['host'], 'share_server_id': share_data['share_server_id'] } ) def test_manage_new_share_server_not_active(self): share_data = { 'host': 'fake', 'export_location_path': 'fake', 'share_proto': 'fake', 'share_type_id': 'fake', 'share_server_id': 'fake' } fake_share_data = { 'id': 'fakeid', 'status': constants.STATUS_ERROR, } driver_options = {} date = datetime.datetime(1, 1, 1, 1, 1, 1) timeutils.utcnow.return_value = date fake_type = { 'id': 'fake_type_id', 'extra_specs': { 'snapshot_support': False, 'replication_type': 'dr', 'create_share_from_snapshot_support': False, 'revert_to_snapshot_support': False, 'mount_snapshot_support': False, 'mount_point_name_support': False, 'driver_handles_share_servers': True, }, } share = db_api.share_create(self.context, fake_share_data) self.mock_object(share_types, 'get_share_type', mock.Mock(return_value=fake_type)) self.mock_object(db_api, 'share_instance_get_all', mock.Mock(return_value=[])) self.mock_object(db_api, 'share_server_get', mock.Mock(return_value=share)) self.assertRaises(exception.InvalidShareServer, self.api.manage, self.context, share_data=share_data, driver_options=driver_options ) share_types.get_share_type.assert_called_once_with( self.context, share_data['share_type_id'] ) db_api.share_instance_get_all.assert_called_once_with( self.context, filters={ 'export_location_path': share_data['export_location_path'], 'host': share_data['host'], 'share_server_id': share_data['share_server_id'] } ) db_api.share_server_get.assert_called_once_with( self.context, share_data['share_server_id'] ) @ddt.data(constants.STATUS_MANAGE_ERROR, constants.STATUS_AVAILABLE) def test_manage_duplicate(self, status): share_data = { 'host': 'fake', 'export_location_path': 'fake', 'share_proto': 'fake', 'share_type_id': 'fake', } driver_options = {} fake_type = { 'id': 'fake_type_id', 'extra_specs': { 'snapshot_support': False, 'create_share_from_snapshot_support': False, 'driver_handles_share_servers': False, }, } already_managed = [{'id': 'fake', 'status': status}] self.mock_object(db_api, 'share_instance_get_all', mock.Mock(return_value=already_managed)) self.mock_object(share_types, 'get_share_type', mock.Mock(return_value=fake_type)) self.assertRaises(exception.InvalidShare, self.api.manage, self.context, share_data, driver_options) def _get_request_spec_dict(self, share, share_type, context, **kwargs): if share is None: share = {'instance': {}} share_instance = share['instance'] share_properties = { 'size': kwargs.get('size', share.get('size')), 'user_id': kwargs.get('user_id', share.get('user_id')), 'project_id': kwargs.get('project_id', share.get('project_id')), 'metadata': db_api.share_metadata_get(context, share['id']), 'snapshot_support': kwargs.get( 'snapshot_support', share_type['extra_specs']['snapshot_support']), 'create_share_from_snapshot_support': kwargs.get( 'create_share_from_snapshot_support', share_type['extra_specs'].get( 'create_share_from_snapshot_support')), 'revert_to_snapshot_support': kwargs.get( 'revert_to_snapshot_support', share_type['extra_specs'].get('revert_to_snapshot_support')), 'mount_snapshot_support': kwargs.get( 'mount_snapshot_support', share_type['extra_specs'].get('mount_snapshot_support')), 'mount_point_name_support': kwargs.get( 'mount_point_name_support', share_type['extra_specs'].get('mount_point_name_support')), 'share_proto': kwargs.get('share_proto', share.get('share_proto')), 'share_type_id': share_type['id'], 'is_public': kwargs.get('is_public', share.get('is_public')), 'share_group_id': kwargs.get( 'share_group_id', share.get('share_group_id')), 'source_share_group_snapshot_member_id': kwargs.get( 'source_share_group_snapshot_member_id', share.get('source_share_group_snapshot_member_id')), 'snapshot_id': kwargs.get('snapshot_id', share.get('snapshot_id')), } share_instance_properties = { 'availability_zone_id': kwargs.get( 'availability_zone_id', share_instance.get('availability_zone_id')), 'share_network_id': kwargs.get( 'share_network_id', share_instance.get('share_network_id')), 'share_server_id': kwargs.get( 'share_server_id', share_instance.get('share_server_id')), 'share_id': kwargs.get('share_id', share_instance.get('share_id')), 'host': kwargs.get('host', share_instance.get('host')), 'status': kwargs.get('status', share_instance.get('status')), } request_spec = { 'share_properties': share_properties, 'share_instance_properties': share_instance_properties, 'share_type': share_type, 'share_id': share.get('id'), } return request_spec def test_unmanage(self): share = db_utils.create_share( id='fakeid', host='fake', size='1', status=constants.STATUS_AVAILABLE, user_id=self.context.user_id, project_id=self.context.project_id, task_state=None) self.mock_object(db_api, 'share_update', mock.Mock()) self.api.unmanage(self.context, share) self.share_rpcapi.unmanage_share.assert_called_once_with( self.context, mock.ANY) db_api.share_update.assert_called_once_with( mock.ANY, share['id'], mock.ANY) def test_unmanage_task_state_busy(self): share = db_utils.create_share( id='fakeid', host='fake', size='1', status=constants.STATUS_AVAILABLE, user_id=self.context.user_id, project_id=self.context.project_id, task_state=constants.TASK_STATE_MIGRATION_IN_PROGRESS) self.assertRaises(exception.ShareBusyException, self.api.unmanage, self.context, share) def test_unmanage_locked_share(self): self.mock_object( self.api.db, 'resource_lock_get_all', mock.Mock(return_value=([{'id': 'l1'}, {'id': 'l2'}], None)) ) share = db_utils.create_share( id='fakeid', host='fake', size='1', status=constants.STATUS_AVAILABLE, user_id=self.context.user_id, project_id=self.context.project_id, task_state=None) self.mock_object(db_api, 'share_update', mock.Mock()) self.assertRaises(exception.InvalidShare, self.api.unmanage, self.context, share) # lock check decorator executed first, nothing else is invoked self.share_rpcapi.unmanage_share.assert_not_called() db_api.share_update.assert_not_called() @mock.patch.object(quota.QUOTAS, 'reserve', mock.Mock(return_value='reservation')) @mock.patch.object(quota.QUOTAS, 'commit', mock.Mock()) def test_create_snapshot(self): snapshot = db_utils.create_snapshot( with_share=True, status=constants.STATUS_CREATING, size=1) share = snapshot['share'] fake_name = 'fakename' fake_desc = 'fakedesc' options = { 'share_id': share['id'], 'user_id': self.context.user_id, 'project_id': self.context.project_id, 'status': constants.STATUS_CREATING, 'progress': '0%', 'share_size': share['size'], 'size': 1, 'display_name': fake_name, 'display_description': fake_desc, 'share_proto': share['share_proto'], } with mock.patch.object(db_api, 'share_snapshot_create', mock.Mock(return_value=snapshot)): self.api.create_snapshot(self.context, share, fake_name, fake_desc) share_api.policy.check_policy.assert_called_once_with( self.context, 'share', 'create_snapshot', share) quota.QUOTAS.reserve.assert_called_once_with( self.context, share_type_id=None, snapshot_gigabytes=1, snapshots=1) quota.QUOTAS.commit.assert_called_once_with( self.context, 'reservation', share_type_id=None) db_api.share_snapshot_create.assert_called_once_with( self.context, options) def test_create_snapshot_space_quota_exceeded(self): share = fakes.fake_share( id=uuidutils.generate_uuid(), size=1, project_id='fake_project', user_id='fake_user', has_replicas=False, status='available') usages = {'snapshot_gigabytes': {'reserved': 10, 'in_use': 0}} quotas = {'snapshot_gigabytes': 10} side_effect = exception.OverQuota( overs='snapshot_gigabytes', usages=usages, quotas=quotas) self.mock_object( quota.QUOTAS, 'reserve', mock.Mock(side_effect=side_effect)) mock_snap_create = self.mock_object(db_api, 'share_snapshot_create') self.assertRaises(exception.SnapshotSizeExceedsAvailableQuota, self.api.create_snapshot, self.context, share, 'fake_name', 'fake_description') mock_snap_create.assert_not_called() def test_create_snapshot_count_quota_exceeded(self): share = fakes.fake_share( id=uuidutils.generate_uuid(), size=1, project_id='fake_project', user_id='fake_user', has_replicas=False, status='available') usages = {'snapshots': {'reserved': 10, 'in_use': 0}} quotas = {'snapshots': 10} side_effect = exception.OverQuota( overs='snapshots', usages=usages, quotas=quotas) self.mock_object( quota.QUOTAS, 'reserve', mock.Mock(side_effect=side_effect)) mock_snap_create = self.mock_object(db_api, 'share_snapshot_create') self.assertRaises(exception.SnapshotLimitExceeded, self.api.create_snapshot, self.context, share, 'fake_name', 'fake_description') mock_snap_create.assert_not_called() def test_manage_snapshot_share_not_found(self): snapshot = fakes.fake_snapshot(share_id='fake_share', as_primitive=True) mock_share_get_call = self.mock_object( db_api, 'share_get', mock.Mock(side_effect=exception.NotFound)) mock_db_snapshot_call = self.mock_object( db_api, 'share_snapshot_get_all_for_share') self.assertRaises(exception.ShareNotFound, self.api.manage_snapshot, self.context, snapshot, {}) self.assertFalse(mock_db_snapshot_call.called) mock_share_get_call.assert_called_once_with( self.context, snapshot['share_id']) def test_manage_snapshot_share_has_replicas(self): share_ref = fakes.fake_share( has_replicas=True, status=constants.STATUS_AVAILABLE) self.mock_object( db_api, 'share_get', mock.Mock(return_value=share_ref)) snapshot = fakes.fake_snapshot(create_instance=True, as_primitive=True) mock_db_snapshot_get_all_for_share_call = self.mock_object( db_api, 'share_snapshot_get_all_for_share') self.assertRaises(exception.InvalidShare, self.api.manage_snapshot, context, snapshot, {}) self.assertFalse(mock_db_snapshot_get_all_for_share_call.called) def test_manage_snapshot_already_managed(self): share_ref = fakes.fake_share( has_replicas=False, status=constants.STATUS_AVAILABLE) snapshot = fakes.fake_snapshot(create_instance=True, as_primitive=True) self.mock_object( db_api, 'share_get', mock.Mock(return_value=share_ref)) mock_db_snapshot_call = self.mock_object( db_api, 'share_snapshot_get_all_for_share', mock.Mock( return_value=[snapshot])) mock_db_snapshot_create_call = self.mock_object( db_api, 'share_snapshot_create') self.assertRaises(exception.ManageInvalidShareSnapshot, self.api.manage_snapshot, self.context, snapshot, {}) mock_db_snapshot_call.assert_called_once_with( self.context, snapshot['share_id']) self.assertFalse(mock_db_snapshot_create_call.called) def test_manage_snapshot(self): share_ref = fakes.fake_share( has_replicas=False, status=constants.STATUS_AVAILABLE, host='fake_host') existing_snapshot = fakes.fake_snapshot( create_instance=True, share_id=share_ref['id']) self.mock_object(db_api, 'share_snapshot_get_all_for_share', mock.Mock(return_value=[existing_snapshot])) snapshot_data = { 'share_id': share_ref['id'], 'provider_location': 'someproviderlocation', } expected_snapshot_data = { 'user_id': self.context.user_id, 'project_id': self.context.project_id, 'status': constants.STATUS_MANAGING, 'share_size': share_ref['size'], 'progress': '0%', 'share_proto': share_ref['share_proto'], } expected_snapshot_data.update(**snapshot_data) snapshot = fakes.fake_snapshot( create_instance=True, **expected_snapshot_data) self.mock_object( db_api, 'share_get', mock.Mock(return_value=share_ref)) mock_db_snapshot_create_call = self.mock_object( db_api, 'share_snapshot_create', mock.Mock(return_value=snapshot)) mock_rpc_call = self.mock_object(self.share_rpcapi, 'manage_snapshot', mock.Mock(return_value=snapshot)) new_snap = self.api.manage_snapshot( self.context, snapshot_data, {}) self.assertEqual(new_snap, snapshot) mock_db_snapshot_create_call.assert_called_once_with( self.context, expected_snapshot_data) mock_rpc_call.assert_called_once_with( self.context, snapshot, share_ref['host'], {}) def test_manage_share_server(self): """Tests manage share server""" host = 'fake_host' fake_share_network = { 'id': 'fake_net_id' } fake_share_net_subnet = [{ 'id': 'fake_subnet_id', 'share_network_id': fake_share_network['id'] }] identifier = 'fake_identifier' values = { 'host': host, 'share_network_subnets': [fake_share_net_subnet], 'status': constants.STATUS_MANAGING, 'is_auto_deletable': False, 'identifier': identifier, } server_managing = { 'id': 'fake_server_id', 'status': constants.STATUS_MANAGING, 'host': host, 'share_network_subnets': [fake_share_net_subnet], 'is_auto_deletable': False, 'identifier': identifier, } mock_share_server_search = self.mock_object( db_api, 'share_server_search_by_identifier', mock.Mock(side_effect=exception.ShareServerNotFound('fake'))) mock_share_server_get = self.mock_object( db_api, 'share_server_get', mock.Mock( return_value=server_managing) ) mock_share_server_create = self.mock_object( db_api, 'share_server_create', mock.Mock(return_value=server_managing) ) result = self.api.manage_share_server( self.context, 'fake_identifier', host, fake_share_net_subnet, {'opt1': 'val1', 'opt2': 'val2'} ) mock_share_server_create.assert_called_once_with( self.context, values) mock_share_server_get.assert_called_once_with( self.context, 'fake_server_id') mock_share_server_search.assert_called_once_with( self.context, 'fake_identifier') result_dict = { 'host': result['host'], 'share_network_subnets': result['share_network_subnets'], 'status': result['status'], 'is_auto_deletable': result['is_auto_deletable'], 'identifier': result['identifier'], } self.assertEqual(values, result_dict) def test_manage_share_server_invalid(self): server = {'identifier': 'fake_server'} mock_share_server_search = self.mock_object( db_api, 'share_server_search_by_identifier', mock.Mock(return_value=[server])) self.assertRaises( exception.InvalidInput, self.api.manage_share_server, self.context, 'invalid_identifier', 'fake_host', 'fake_share_net', {}) mock_share_server_search.assert_called_once_with( self.context, 'invalid_identifier') def test_unmanage_snapshot(self): fake_host = 'fake_host' snapshot_data = { 'status': constants.STATUS_UNMANAGING, 'terminated_at': timeutils.utcnow(), } snapshot = fakes.fake_snapshot( create_instance=True, share_instance_id='id2', **snapshot_data) mock_db_snap_update_call = self.mock_object( db_api, 'share_snapshot_update', mock.Mock(return_value=snapshot)) mock_rpc_call = self.mock_object( self.share_rpcapi, 'unmanage_snapshot') retval = self.api.unmanage_snapshot( self.context, snapshot, fake_host) self.assertIsNone(retval) mock_db_snap_update_call.assert_called_once_with( self.context, snapshot['id'], snapshot_data) mock_rpc_call.assert_called_once_with( self.context, snapshot, fake_host) def test_unmanage_share_server(self): shr1 = {} share_server = db_utils.create_share_server(**shr1) update_data = {'status': constants.STATUS_UNMANAGING, 'terminated_at': timeutils.utcnow()} mock_share_instance_get_all = self.mock_object( db_api, 'share_instance_get_all_by_share_server', mock.Mock(return_value={})) mock_share_group_get_all = self.mock_object( db_api, 'share_group_get_all_by_share_server', mock.Mock(return_value={})) mock_share_server_update = self.mock_object( db_api, 'share_server_update', mock.Mock(return_value=share_server)) mock_rpc = self.mock_object( self.api.share_rpcapi, 'unmanage_share_server') self.api.unmanage_share_server(self.context, share_server, True) mock_share_instance_get_all.assert_called_once_with( self.context, share_server['id'] ) mock_share_group_get_all.assert_called_once_with( self.context, share_server['id'] ) mock_share_server_update.assert_called_once_with( self.context, share_server['id'], update_data ) mock_rpc.assert_called_once_with( self.context, share_server, force=True) def test_unmanage_share_server_in_use(self): fake_share = db_utils.create_share() fake_share_server = db_utils.create_share_server() fake_share_instance = db_utils.create_share_instance( share_id=fake_share['id']) share_instance_get_all_mock = self.mock_object( db_api, 'share_instance_get_all_by_share_server', mock.Mock(return_value=fake_share_instance) ) self.assertRaises(exception.ShareServerInUse, self.api.unmanage_share_server, self.context, fake_share_server, True) share_instance_get_all_mock.assert_called_once_with( self.context, fake_share_server['id'] ) def test_unmanage_share_server_in_use_share_groups(self): fake_share_server = db_utils.create_share_server() fake_share_groups = db_utils.create_share_group() share_instance_get_all_mock = self.mock_object( db_api, 'share_instance_get_all_by_share_server', mock.Mock(return_value={}) ) group_get_all_mock = self.mock_object( db_api, 'share_group_get_all_by_share_server', mock.Mock(return_value=fake_share_groups) ) self.assertRaises(exception.ShareServerInUse, self.api.unmanage_share_server, self.context, fake_share_server, True) share_instance_get_all_mock.assert_called_once_with( self.context, fake_share_server['id'] ) group_get_all_mock.assert_called_once_with( self.context, fake_share_server['id'] ) @ddt.data(True, False) def test_revert_to_snapshot(self, has_replicas): share = fakes.fake_share(id=uuidutils.generate_uuid(), has_replicas=has_replicas) self.mock_object(db_api, 'share_get', mock.Mock(return_value=share)) mock_handle_revert_to_snapshot_quotas = self.mock_object( self.api, '_handle_revert_to_snapshot_quotas', mock.Mock(return_value='fake_reservations')) mock_revert_to_replicated_snapshot = self.mock_object( self.api, '_revert_to_replicated_snapshot') mock_revert_to_snapshot = self.mock_object( self.api, '_revert_to_snapshot') snapshot = fakes.fake_snapshot(share_id=share['id']) self.api.revert_to_snapshot(self.context, share, snapshot) mock_handle_revert_to_snapshot_quotas.assert_called_once_with( self.context, share, snapshot) if not has_replicas: self.assertFalse(mock_revert_to_replicated_snapshot.called) mock_revert_to_snapshot.assert_called_once_with( self.context, share, snapshot, 'fake_reservations') else: mock_revert_to_replicated_snapshot.assert_called_once_with( self.context, share, snapshot, 'fake_reservations') self.assertFalse(mock_revert_to_snapshot.called) @ddt.data(None, 'fake_reservations') def test_revert_to_snapshot_exception(self, reservations): share = fakes.fake_share(id=uuidutils.generate_uuid(), has_replicas=False) self.mock_object(db_api, 'share_get', mock.Mock(return_value=share)) self.mock_object( self.api, '_handle_revert_to_snapshot_quotas', mock.Mock(return_value=reservations)) side_effect = exception.ReplicationException(reason='error') self.mock_object( self.api, '_revert_to_snapshot', mock.Mock(side_effect=side_effect)) mock_quotas_rollback = self.mock_object(quota.QUOTAS, 'rollback') snapshot = fakes.fake_snapshot(share_id=share['id']) self.assertRaises(exception.ReplicationException, self.api.revert_to_snapshot, self.context, share, snapshot) if reservations is not None: mock_quotas_rollback.assert_called_once_with( self.context, reservations, share_type_id=share['instance']['share_type_id']) else: self.assertFalse(mock_quotas_rollback.called) def test_handle_revert_to_snapshot_quotas(self): share = fakes.fake_share( id=uuidutils.generate_uuid(), size=1, project_id='fake_project', user_id='fake_user', has_replicas=False) snapshot = fakes.fake_snapshot( id=uuidutils.generate_uuid(), share_id=share['id'], size=1) mock_quotas_reserve = self.mock_object(quota.QUOTAS, 'reserve') result = self.api._handle_revert_to_snapshot_quotas( self.context, share, snapshot) self.assertIsNone(result) self.assertFalse(mock_quotas_reserve.called) def test_handle_revert_to_snapshot_quotas_different_size(self): share = fakes.fake_share( id=uuidutils.generate_uuid(), size=1, project_id='fake_project', user_id='fake_user', has_replicas=False) snapshot = fakes.fake_snapshot( id=uuidutils.generate_uuid(), share_id=share['id'], size=2) mock_quotas_reserve = self.mock_object( quota.QUOTAS, 'reserve', mock.Mock(return_value='fake_reservations')) result = self.api._handle_revert_to_snapshot_quotas( self.context, share, snapshot) self.assertEqual('fake_reservations', result) mock_quotas_reserve.assert_called_once_with( self.context, project_id='fake_project', gigabytes=1, share_type_id=share['instance']['share_type_id'], user_id='fake_user') def test_handle_revert_to_snapshot_quotas_quota_exceeded(self): share = fakes.fake_share( id=uuidutils.generate_uuid(), size=1, project_id='fake_project', user_id='fake_user', has_replicas=False) snapshot = fakes.fake_snapshot( id=uuidutils.generate_uuid(), share_id=share['id'], size=2) usages = {'gigabytes': {'reserved': 10, 'in_use': 0}} quotas = {'gigabytes': 10} side_effect = exception.OverQuota( overs='fake', usages=usages, quotas=quotas) self.mock_object( quota.QUOTAS, 'reserve', mock.Mock(side_effect=side_effect)) self.assertRaises(exception.ShareSizeExceedsAvailableQuota, self.api._handle_revert_to_snapshot_quotas, self.context, share, snapshot) def test__revert_to_snapshot(self): share = fakes.fake_share( id=uuidutils.generate_uuid(), size=1, project_id='fake_project', user_id='fake_user', has_replicas=False) snapshot = fakes.fake_snapshot( id=uuidutils.generate_uuid(), share_id=share['id'], size=2) mock_share_update = self.mock_object(db_api, 'share_update') mock_share_snapshot_update = self.mock_object( db_api, 'share_snapshot_update') mock_revert_rpc_call = self.mock_object( self.share_rpcapi, 'revert_to_snapshot') self.api._revert_to_snapshot( self.context, share, snapshot, 'fake_reservations') mock_share_update.assert_called_once_with( self.context, share['id'], {'status': constants.STATUS_REVERTING}) mock_share_snapshot_update.assert_called_once_with( self.context, snapshot['id'], {'status': constants.STATUS_RESTORING}) mock_revert_rpc_call.assert_called_once_with( self.context, share, snapshot, share['instance']['host'], 'fake_reservations') def test_revert_to_replicated_snapshot(self): share = fakes.fake_share( has_replicas=True, status=constants.STATUS_AVAILABLE) snapshot = fakes.fake_snapshot(share_instance_id='id1') snapshot_instance = fakes.fake_snapshot_instance( base_snapshot=snapshot, id='sid1') replicas = [ fakes.fake_replica( id='rid1', replica_state=constants.REPLICA_STATE_ACTIVE), fakes.fake_replica( id='rid2', replica_state=constants.REPLICA_STATE_IN_SYNC), ] self.mock_object( db_api, 'share_replicas_get_available_active_replica', mock.Mock(return_value=replicas[0])) self.mock_object( db_api, 'share_snapshot_instance_get_all_with_filters', mock.Mock(return_value=[snapshot_instance])) mock_share_replica_update = self.mock_object( db_api, 'share_replica_update') mock_share_snapshot_instance_update = self.mock_object( db_api, 'share_snapshot_instance_update') mock_revert_rpc_call = self.mock_object( self.share_rpcapi, 'revert_to_snapshot') self.api._revert_to_replicated_snapshot( self.context, share, snapshot, 'fake_reservations') mock_share_replica_update.assert_called_once_with( self.context, 'rid1', {'status': constants.STATUS_REVERTING}) mock_share_snapshot_instance_update.assert_called_once_with( self.context, 'sid1', {'status': constants.STATUS_RESTORING}) mock_revert_rpc_call.assert_called_once_with( self.context, share, snapshot, replicas[0]['host'], 'fake_reservations') def test_revert_to_replicated_snapshot_no_active_replica(self): share = fakes.fake_share( has_replicas=True, status=constants.STATUS_AVAILABLE) snapshot = fakes.fake_snapshot(share_instance_id='id1') self.mock_object( db_api, 'share_replicas_get_available_active_replica', mock.Mock(return_value=None)) self.assertRaises(exception.ReplicationException, self.api._revert_to_replicated_snapshot, self.context, share, snapshot, 'fake_reservations') def test_revert_to_replicated_snapshot_no_snapshot_instance(self): share = fakes.fake_share( has_replicas=True, status=constants.STATUS_AVAILABLE) snapshot = fakes.fake_snapshot(share_instance_id='id1') replicas = [ fakes.fake_replica( id='rid1', replica_state=constants.REPLICA_STATE_ACTIVE), fakes.fake_replica( id='rid2', replica_state=constants.REPLICA_STATE_IN_SYNC), ] self.mock_object( db_api, 'share_replicas_get_available_active_replica', mock.Mock(return_value=replicas[0])) self.mock_object( db_api, 'share_snapshot_instance_get_all_with_filters', mock.Mock(return_value=[None])) self.assertRaises(exception.ReplicationException, self.api._revert_to_replicated_snapshot, self.context, share, snapshot, 'fake_reservations') def test_create_snapshot_for_replicated_share(self): share = fakes.fake_share( has_replicas=True, status=constants.STATUS_AVAILABLE) snapshot = fakes.fake_snapshot( create_instance=True, share_instance_id='id2') replicas = [ fakes.fake_replica( id='id1', replica_state=constants.REPLICA_STATE_ACTIVE), fakes.fake_replica( id='id2', replica_state=constants.REPLICA_STATE_IN_SYNC) ] self.mock_object(share_api.policy, 'check_policy') self.mock_object(quota.QUOTAS, 'reserve', mock.Mock(return_value='reservation')) self.mock_object( db_api, 'share_snapshot_create', mock.Mock(return_value=snapshot)) self.mock_object(db_api, 'share_replicas_get_all_by_share', mock.Mock(return_value=replicas)) self.mock_object( db_api, 'share_snapshot_get', mock.Mock(return_value=snapshot)) self.mock_object(quota.QUOTAS, 'commit') mock_instance_create_call = self.mock_object( db_api, 'share_snapshot_instance_create') mock_snapshot_rpc_call = self.mock_object( self.share_rpcapi, 'create_snapshot') mock_replicated_snapshot_rpc_call = self.mock_object( self.share_rpcapi, 'create_replicated_snapshot') snapshot_instance_args = { 'status': constants.STATUS_CREATING, 'progress': '0%', 'share_instance_id': 'id1', } retval = self.api.create_snapshot( self.context, share, 'fake_name', 'fake_description') self.assertEqual(snapshot['id'], retval['id']) mock_instance_create_call.assert_called_once_with( self.context, snapshot['id'], snapshot_instance_args) self.assertFalse(mock_snapshot_rpc_call.called) self.assertTrue(mock_replicated_snapshot_rpc_call.called) @mock.patch.object(db_api, 'share_instance_get_all_by_share_server', mock.Mock(return_value=[])) @mock.patch.object(db_api, 'share_group_get_all_by_share_server', mock.Mock(return_value=[])) def test_delete_share_server_no_dependent_shares(self): server = {'id': 'fake_share_server_id'} server_returned = { 'id': 'fake_share_server_id', } self.mock_object(db_api, 'share_server_update', mock.Mock(return_value=server_returned)) self.api.delete_share_server(self.context, server) db_api.share_instance_get_all_by_share_server.assert_called_once_with( self.context, server['id']) (db_api.share_group_get_all_by_share_server. assert_called_once_with(self.context, server['id'])) self.share_rpcapi.delete_share_server.assert_called_once_with( self.context, server_returned) @mock.patch.object(db_api, 'share_instance_get_all_by_share_server', mock.Mock(return_value=['fake_share', ])) @mock.patch.object(db_api, 'share_group_get_all_by_share_server', mock.Mock(return_value=[])) def test_delete_share_server_dependent_share_exists(self): server = {'id': 'fake_share_server_id'} self.assertRaises(exception.ShareServerInUse, self.api.delete_share_server, self.context, server) db_api.share_instance_get_all_by_share_server.assert_called_once_with( self.context, server['id']) @mock.patch.object(db_api, 'share_instance_get_all_by_share_server', mock.Mock(return_value=[])) @mock.patch.object(db_api, 'share_group_get_all_by_share_server', mock.Mock(return_value=['fake_group', ])) def test_delete_share_server_dependent_group_exists(self): server = {'id': 'fake_share_server_id'} self.assertRaises(exception.ShareServerInUse, self.api.delete_share_server, self.context, server) db_api.share_instance_get_all_by_share_server.assert_called_once_with( self.context, server['id']) (db_api.share_group_get_all_by_share_server. assert_called_once_with(self.context, server['id'])) @mock.patch.object(db_api, 'share_snapshot_instance_update', mock.Mock()) def test_delete_snapshot(self): snapshot = db_utils.create_snapshot( with_share=True, status=constants.STATUS_AVAILABLE) share = snapshot['share'] with mock.patch.object(db_api, 'share_get', mock.Mock(return_value=share)): self.api.delete_snapshot(self.context, snapshot) self.share_rpcapi.delete_snapshot.assert_called_once_with( self.context, snapshot, share['host'], force=False, deferred_delete=False) share_api.policy.check_policy.assert_called_once_with( self.context, 'share', 'delete_snapshot', snapshot) db_api.share_snapshot_instance_update.assert_called_once_with( self.context, snapshot['instance']['id'], {'status': constants.STATUS_DELETING}) db_api.share_get.assert_called_once_with( self.context, snapshot['share_id']) @ddt.data(True, False) def test_delete_snapshot_deferred(self, force): CONF.set_default("is_deferred_deletion_enabled", True) snapshot = db_utils.create_snapshot( with_share=True, status=constants.STATUS_AVAILABLE) share = snapshot['share'] self.mock_object(db_api, 'share_snapshot_instance_update', mock.Mock()) with mock.patch.object(db_api, 'share_get', mock.Mock(return_value=share)): self.api.delete_snapshot(self.context, snapshot, force=force) if force: self.share_rpcapi.delete_snapshot.assert_called_once_with( self.context, snapshot, share['host'], force=True, deferred_delete=False) db_api.share_snapshot_instance_update.assert_called_once_with( self.context, snapshot['instance']['id'], {'status': constants.STATUS_DELETING}) else: self.share_rpcapi.delete_snapshot.assert_called_once_with( self.context, snapshot, share['host'], force=False, deferred_delete=True) db_api.share_snapshot_instance_update.assert_called_once_with( self.context, snapshot['instance']['id'], {'status': constants.STATUS_DEFERRED_DELETING}) share_api.policy.check_policy.assert_called_once_with( self.context, 'share', 'delete_snapshot', snapshot) db_api.share_get.assert_called_once_with( self.context, snapshot['share_id']) def test_delete_snapshot_wrong_status(self): snapshot = db_utils.create_snapshot( with_share=True, status=constants.STATUS_CREATING) self.assertRaises(exception.InvalidShareSnapshot, self.api.delete_snapshot, self.context, snapshot) share_api.policy.check_policy.assert_called_once_with( self.context, 'share', 'delete_snapshot', snapshot) @ddt.data(constants.STATUS_MANAGING, constants.STATUS_ERROR_DELETING, constants.STATUS_CREATING, constants.STATUS_AVAILABLE) def test_delete_snapshot_force_delete(self, status): share = fakes.fake_share(id=uuidutils.generate_uuid(), has_replicas=False) snapshot = fakes.fake_snapshot(aggregate_status=status, share=share) snapshot_instance = fakes.fake_snapshot_instance( base_snapshot=snapshot) self.mock_object(db_api, 'share_get', mock.Mock(return_value=share)) self.mock_object( db_api, 'share_snapshot_instance_get_all_with_filters', mock.Mock(return_value=[snapshot_instance])) mock_instance_update_call = self.mock_object( db_api, 'share_snapshot_instance_update') mock_rpc_call = self.mock_object(self.share_rpcapi, 'delete_snapshot') retval = self.api.delete_snapshot(self.context, snapshot, force=True) self.assertIsNone(retval) mock_instance_update_call.assert_called_once_with( self.context, snapshot_instance['id'], {'status': constants.STATUS_DELETING}) mock_rpc_call.assert_called_once_with( self.context, snapshot, share['instance']['host'], force=True, deferred_delete=False) @ddt.data(True, False) def test_delete_snapshot_replicated_snapshot(self, force): share = fakes.fake_share(has_replicas=True) snapshot = fakes.fake_snapshot( create_instance=True, share_id=share['id'], status=constants.STATUS_ERROR) snapshot_instance = fakes.fake_snapshot_instance( base_snapshot=snapshot) expected_update_calls = [ mock.call(self.context, x, {'status': constants.STATUS_DELETING}) for x in (snapshot['instance']['id'], snapshot_instance['id']) ] self.mock_object(db_api, 'share_get', mock.Mock(return_value=share)) self.mock_object( db_api, 'share_snapshot_instance_get_all_with_filters', mock.Mock(return_value=[snapshot['instance'], snapshot_instance])) mock_db_update_call = self.mock_object( db_api, 'share_snapshot_instance_update') mock_snapshot_rpc_call = self.mock_object( self.share_rpcapi, 'delete_snapshot') mock_replicated_snapshot_rpc_call = self.mock_object( self.share_rpcapi, 'delete_replicated_snapshot') retval = self.api.delete_snapshot(self.context, snapshot, force=force) self.assertIsNone(retval) self.assertEqual(2, mock_db_update_call.call_count) mock_db_update_call.assert_has_calls(expected_update_calls) mock_replicated_snapshot_rpc_call.assert_called_once_with( self.context, snapshot, share['instance']['host'], share_id=share['id'], force=force) self.assertFalse(mock_snapshot_rpc_call.called) def test_create_snapshot_if_share_not_available(self): share = db_utils.create_share(status=constants.STATUS_ERROR) self.assertRaises(exception.InvalidShare, self.api.create_snapshot, self.context, share, 'fakename', 'fakedesc') share_api.policy.check_policy.assert_called_once_with( self.context, 'share', 'create_snapshot', share) def test_create_snapshot_invalid_task_state(self): share = db_utils.create_share( status=constants.STATUS_AVAILABLE, task_state=constants.TASK_STATE_MIGRATION_IN_PROGRESS) self.assertRaises(exception.ShareBusyException, self.api.create_snapshot, self.context, share, 'fakename', 'fakedesc') share_api.policy.check_policy.assert_called_once_with( self.context, 'share', 'create_snapshot', share) def test_create_snapshot_fail(self): share = fakes.fake_share( has_replicas=False, status=constants.STATUS_AVAILABLE) mock_db_share_snapshot_create = self.mock_object( db_api, 'share_snapshot_create', mock.Mock( side_effect=exception.NotFound)) self.mock_object(quota.QUOTAS, 'rollback') self.assertRaises(exception.NotFound, self.api.create_snapshot, self.context, share, 'fake_name', 'fake_desc') self.assertTrue(mock_db_share_snapshot_create.called) quota.QUOTAS.rollback.assert_called_once_with( self.context, mock.ANY, share_type_id=share['instance']['share_type_id']) def test_create_snapshot_quota_commit_fail(self): share = fakes.fake_share( has_replicas=False, status=constants.STATUS_AVAILABLE) snapshot = fakes.fake_snapshot( create_instance=True, share_instance_id='id2') self.mock_object( quota.QUOTAS, 'commit', mock.Mock( side_effect=exception.QuotaError('fake'))) self.mock_object(db_api, 'share_snapshot_create', mock.Mock( return_value=snapshot)) self.mock_object(db_api, 'share_snapshot_instance_delete') self.mock_object(quota.QUOTAS, 'rollback') self.assertRaises(exception.QuotaError, self.api.create_snapshot, self.context, share, 'fake_name', 'fake_desc') quota.QUOTAS.rollback.assert_called_once_with( self.context, mock.ANY, share_type_id=share['instance']['share_type_id']) @ddt.data({'use_scheduler': False, 'valid_host': 'fake', 'az': None}, {'use_scheduler': True, 'valid_host': None, 'az': None}, {'use_scheduler': True, 'valid_host': None, 'az': "fakeaz2"}) @ddt.unpack def test_create_from_snapshot(self, use_scheduler, valid_host, az): snapshot, share, share_data, request_spec = ( self._setup_create_from_snapshot_mocks( use_scheduler=use_scheduler, host=valid_host) ) share_type = fakes.fake_share_type() self.mock_object(db_api, 'share_snapshot_get', mock.Mock(return_value=snapshot)) mock_get_share_type_call = self.mock_object( share_types, 'get_share_type', mock.Mock(return_value=share_type)) self.api.create( self.context, share_data['share_proto'], None, # NOTE(u_glide): Get share size from snapshot share_data['display_name'], share_data['display_description'], snapshot_id=snapshot['id'], availability_zone=az, ) expected_az = snapshot['share']['availability_zone'] if not az else az share_data.pop('availability_zone') mock_get_share_type_call.assert_called_once_with( self.context, share['share_type_id']) self.assertSubDictMatch(share_data, db_api.share_create.call_args[0][1]) self.api.create_instance.assert_called_once_with( self.context, share, share_network_id=share['share_network_id'], host=valid_host, share_type_id=share_type['id'], availability_zone=expected_az, share_group=None, share_group_snapshot_member=None, availability_zones=None, az_request_multiple_subnet_support_map=None, snapshot_host=snapshot['share']['instance']['host'], scheduler_hints=None, mount_point_name=None, encryption_key_ref=None) share_api.policy.check_policy.assert_called_once_with( self.context, 'share_snapshot', 'get_snapshot', snapshot, do_raise=False) quota.QUOTAS.reserve.assert_called_once_with( self.context, share_type_id=share_type['id'], gigabytes=1, shares=1) quota.QUOTAS.commit.assert_called_once_with( self.context, 'reservation', share_type_id=share_type['id']) def test_create_share_with_share_group(self): extra_specs = {'replication_type': constants.REPLICATION_TYPE_READABLE} share_type = db_utils.create_share_type(extra_specs=extra_specs) share_type = db_api.share_type_get(self.context, share_type['id']) group = db_utils.create_share_group( status=constants.STATUS_AVAILABLE, share_types=[share_type['id']]) share, share_data = self._setup_create_mocks( share_type_id=share_type['id'], share_group_id=group['id']) share_instance = db_utils.create_share_instance( share_id=share['id']) sg_snap_member = { 'id': 'fake_sg_snap_member_id', 'share_instance': share_instance } az = share_data.pop('availability_zone') self.mock_object(quota.QUOTAS, 'reserve', mock.Mock(return_value='reservation')) self.mock_object(quota.QUOTAS, 'commit') self.api.create( self.context, share_data['share_proto'], share_data['size'], share_data['display_name'], share_data['display_description'], availability_zone=az, share_type=share_type, share_group_id=group['id'], share_group_snapshot_member=sg_snap_member, ) quota.QUOTAS.reserve.assert_called_once_with( self.context, share_type_id=share_type['id'], gigabytes=1, shares=1, share_replicas=1, replica_gigabytes=1) quota.QUOTAS.commit.assert_called_once_with( self.context, 'reservation', share_type_id=share_type['id'] ) def test_create_share_share_type_contains_replication_type(self): extra_specs = {'replication_type': constants.REPLICATION_TYPE_READABLE} share_type = db_utils.create_share_type(extra_specs=extra_specs) share_type = db_api.share_type_get(self.context, share_type['id']) share, share_data = self._setup_create_mocks( share_type_id=share_type['id']) az = share_data.pop('availability_zone') self.mock_object(quota.QUOTAS, 'reserve', mock.Mock(return_value='reservation')) self.mock_object(quota.QUOTAS, 'commit') self.api.create( self.context, share_data['share_proto'], share_data['size'], share_data['display_name'], share_data['display_description'], availability_zone=az, share_type=share_type ) quota.QUOTAS.reserve.assert_called_once_with( self.context, share_type_id=share_type['id'], gigabytes=1, shares=1, share_replicas=1, replica_gigabytes=1) quota.QUOTAS.commit.assert_called_once_with( self.context, 'reservation', share_type_id=share_type['id'] ) def test_create_from_snapshot_az_different_from_source(self): snapshot, share, share_data, request_spec = ( self._setup_create_from_snapshot_mocks(use_scheduler=False) ) self.assertRaises(exception.InvalidInput, self.api.create, self.context, share_data['share_proto'], share_data['size'], share_data['display_name'], share_data['display_description'], snapshot_id=snapshot['id'], availability_zone='fake_different_az') def test_create_from_snapshot_with_different_share_type(self): snapshot, share, share_data, request_spec = ( self._setup_create_from_snapshot_mocks() ) share_type = {'id': 'super_fake_share_type'} self.assertRaises(exception.InvalidInput, self.api.create, self.context, share_data['share_proto'], share_data['size'], share_data['display_name'], share_data['display_description'], snapshot_id=snapshot['id'], availability_zone=share_data['availability_zone'], share_type=share_type) def test_get_snapshot(self): fake_get_snap = {'fake_key': 'fake_val'} with mock.patch.object(db_api, 'share_snapshot_get', mock.Mock(return_value=fake_get_snap)): rule = self.api.get_snapshot(self.context, 'fakeid') self.assertEqual(fake_get_snap, rule) share_api.policy.check_policy.assert_called_once_with( self.context, 'share_snapshot', 'get_snapshot', rule, do_raise=False) db_api.share_snapshot_get.assert_called_once_with( self.context, 'fakeid') def test_get_snapshot_non_admin_deferred_state(self): fake_get_snap = { 'fake_key': 'fake_val', 'status': 'deferred_deleting' } with mock.patch.object(db_api, 'share_snapshot_get', mock.Mock(return_value=fake_get_snap)): self.mock_object( policy, 'check_policy', mock.Mock(side_effect=[True, False])) self.assertRaises(exception.NotFound, self.api.get_snapshot, self.context, 'fakeid') def test_get_snapshot_not_authorized(self): fake_get_snap = {'fake_key': 'fake_val'} share_api.policy.check_policy.return_value = False with mock.patch.object(db_api, 'share_snapshot_get', mock.Mock(return_value=fake_get_snap)): self.assertRaises(exception.NotFound, self.api.get_snapshot, self.context, 'fakeid') share_api.policy.check_policy.assert_called_once_with( self.context, 'share_snapshot', 'get_snapshot', fake_get_snap, do_raise=False) db_api.share_snapshot_get.assert_called_once_with( self.context, 'fakeid') def test_create_from_snapshot_not_available(self): snapshot = db_utils.create_snapshot( with_share=True, status=constants.STATUS_ERROR) self.assertRaises(exception.InvalidShareSnapshot, self.api.create, self.context, 'nfs', '1', 'fakename', 'fakedesc', snapshot_id=snapshot['id'], availability_zone='fakeaz') def test_create_from_snapshot_larger_size(self): snapshot = db_utils.create_snapshot( size=100, status=constants.STATUS_AVAILABLE, with_share=True) self.assertRaises(exception.InvalidInput, self.api.create, self.context, 'nfs', 1, 'fakename', 'fakedesc', availability_zone='fakeaz', snapshot_id=snapshot['id']) def test_create_share_wrong_size_0(self): self.assertRaises(exception.InvalidInput, self.api.create, self.context, 'nfs', 0, 'fakename', 'fakedesc', availability_zone='fakeaz') def test_create_share_wrong_size_some(self): self.assertRaises(exception.InvalidInput, self.api.create, self.context, 'nfs', 'some', 'fakename', 'fakedesc', availability_zone='fakeaz') @ddt.data(constants.STATUS_AVAILABLE, constants.STATUS_ERROR) def test_delete(self, status): share = self._setup_delete_mocks(status) self.api.delete(self.context, share) self.api.delete_instance.assert_called_once_with( utils.IsAMatcher(context.RequestContext), utils.IsAMatcher(models.ShareInstance), force=False ) db_api.share_snapshot_get_all_for_share.assert_called_once_with( utils.IsAMatcher(context.RequestContext), share['id']) def test_delete_quota_with_different_user(self): share = self._setup_delete_mocks(constants.STATUS_AVAILABLE) diff_user_context = context.RequestContext( user_id='fake2', project_id='fake', is_admin=False ) self.api.delete(diff_user_context, share) def test_delete_wrong_status(self): share = fakes.fake_share(status='wrongstatus') self.mock_object(db_api, 'share_get', mock.Mock(return_value=share)) self.assertRaises(exception.InvalidShare, self.api.delete, self.context, share) def test_delete_share_has_replicas(self): share = self._setup_delete_mocks(constants.STATUS_AVAILABLE, replication_type='writable') db_utils.create_share_replica(share_id=share['id'], replica_state='in_sync') db_utils.create_share_replica(share_id=share['id'], replica_state='out_of_sync') self.assertRaises(exception.Conflict, self.api.delete, self.context, share) @mock.patch.object(db_api, 'count_share_group_snapshot_members_in_share', mock.Mock(return_value=2)) def test_delete_dependent_share_group_snapshot_members(self): share_server_id = 'fake-ss-id' share = self._setup_delete_mocks(constants.STATUS_AVAILABLE, share_server_id) self.assertRaises(exception.InvalidShare, self.api.delete, self.context, share) @mock.patch.object(db_api, 'share_instance_delete', mock.Mock()) def test_delete_no_host(self): share = self._setup_delete_mocks(constants.STATUS_AVAILABLE, host=None) self.api.delete(self.context, share) db_api.share_instance_delete.assert_called_once_with( utils.IsAMatcher(context.RequestContext), share.instance['id'], need_to_update_usages=True) def test_delete_share_with_snapshots(self): share = self._setup_delete_mocks(constants.STATUS_AVAILABLE, snapshots=['fake']) self.assertRaises( exception.InvalidShare, self.api.delete, self.context, share ) def test_delete_share_invalid_task_state(self): share = db_utils.create_share( status=constants.STATUS_AVAILABLE, task_state=constants.TASK_STATE_MIGRATION_IN_PROGRESS) self.assertRaises(exception.ShareBusyException, self.api.delete, self.context, share) def test_delete_share_quota_error(self): share = self._setup_delete_mocks(constants.STATUS_AVAILABLE) self.mock_object(quota.QUOTAS, 'reserve', mock.Mock(side_effect=exception.QuotaError('fake'))) self.api.delete(self.context, share) def test_delete_locked_share(self): self.mock_object( self.api.db, 'resource_lock_get_all', mock.Mock(return_value=([{'id': 'l1'}, {'id': 'l2'}], None)) ) share = self._setup_delete_mocks('available') self.assertRaises(exception.InvalidShare, self.api.delete, self.context, share) # lock check decorator executed first, nothing else is invoked self.api.delete_instance.assert_not_called() db_api.share_snapshot_get_all_for_share.assert_not_called() @ddt.data({'status': constants.STATUS_AVAILABLE, 'force': False}, {'status': constants.STATUS_ERROR, 'force': True}) @ddt.unpack def test_delete_share_instance(self, status, force): instance = self._setup_delete_share_instance_mocks( status=status, share_server_id='fake') self.api.delete_instance(self.context, instance, force=force) db_api.share_instance_update.assert_called_once_with( self.context, instance['id'], {'status': constants.STATUS_DELETING, 'terminated_at': self.dt_utc} ) self.api.share_rpcapi.delete_share_instance.\ assert_called_once_with( self.context, instance, force=force, deferred_delete=False ) db_api.share_server_update( self.context, instance['share_server_id'], {'updated_at': self.dt_utc} ) @ddt.data({'status': constants.STATUS_DEFERRED_DELETING, 'force': True}, {'status': constants.STATUS_AVAILABLE, 'force': False}, {'status': constants.STATUS_AVAILABLE, 'force': True}) @ddt.unpack def test_delete_share_instance_deferred(self, status, force): CONF.set_default("is_deferred_deletion_enabled", True) instance = self._setup_delete_share_instance_mocks( status=status, share_server_id='fake') self.api.delete_instance(self.context, instance, force=force) if force: if status != constants.STATUS_DEFERRED_DELETING: db_api.share_instance_update.assert_called_once_with( self.context, instance['id'], {'status': constants.STATUS_DELETING, 'terminated_at': self.dt_utc} ) self.api.share_rpcapi.delete_share_instance.\ assert_called_once_with( self.context, instance, force=True, deferred_delete=False ) else: db_api.share_instance_update.assert_called_once_with( self.context, instance['id'], {'status': constants.STATUS_DEFERRED_DELETING, 'terminated_at': self.dt_utc} ) self.api.share_rpcapi.delete_share_instance.\ assert_called_once_with( self.context, instance, force=False, deferred_delete=True ) db_api.share_server_update( self.context, instance['share_server_id'], {'updated_at': self.dt_utc} ) def test_delete_share_instance_invalid_status(self): instance = self._setup_delete_share_instance_mocks( status=constants.STATUS_CREATING, share_server_id='fake') self.assertRaises( exception.InvalidShareInstance, self.api.delete_instance, self.context, instance ) def test_get(self): share = db_utils.create_share() with mock.patch.object(db_api, 'share_get', mock.Mock(return_value=share)): result = self.api.get(self.context, 'fakeid') self.assertEqual(share, result) share_api.policy.check_policy.assert_called_once_with( self.context, 'share', 'get', share, do_raise=False) db_api.share_get.assert_called_once_with( self.context, 'fakeid') def test_get_admin_deferred_state(self): rv = { 'id': 'fake_id', 'is_public': False, 'name': 'bar', 'status': constants.STATUS_ERROR_DEFERRED_DELETING, 'project_id': 'fake_pid_2', 'share_server_id': 'fake_server_3', } self.mock_object(db_api, 'share_get', mock.Mock(return_value=rv)) ctx = context.RequestContext('fake_uid', 'fake_pid_1', is_admin=True) self.mock_object( policy, 'check_policy', mock.Mock(side_effect=[True, True])) share = self.api.get(ctx, 'fake_id') self.assertEqual(rv, share) def test_get_non_admin_deferred_state(self): rv = { 'id': 'fake_id', 'is_public': False, 'name': 'bar', 'status': constants.STATUS_ERROR_DEFERRED_DELETING, 'project_id': 'fake_pid_2', 'share_server_id': 'fake_server_3', } self.mock_object(db_api, 'share_get', mock.Mock(return_value=rv)) ctx = context.RequestContext('fake_uid', 'fake_pid_1', is_admin=False) self.mock_object( policy, 'check_policy', mock.Mock(side_effect=[True, False])) self.assertRaises( exception.NotFound, self.api.get, ctx, 'fake_id') def test_get_not_authorized(self): share = db_utils.create_share( is_public=False, project_id='5db325fc4de14fe1a860ff69f190c78c') share_api.policy.check_policy.return_value = False ctx = context.RequestContext('df6d65cc1f8946ba86be06b8140ec4b3', 'e8133457b853436591a7e4610e7ce679', is_admin=False) with mock.patch.object(db_api, 'share_get', mock.Mock(return_value=share)): self.assertRaises(exception.NotFound, self.api.get, ctx, share['id']) share_api.policy.check_policy.assert_called_once_with( ctx, 'share', 'get', share, do_raise=False) db_api.share_get.assert_called_once_with(ctx, share['id']) @mock.patch.object(db_api, 'share_snapshot_get_all_by_project', mock.Mock()) def test_get_all_snapshots_admin_not_all_tenants(self): ctx = context.RequestContext('fakeuid', 'fakepid', is_admin=True) mock_policy = self.mock_object(share_api.policy, 'check_policy', mock.Mock(return_value=False)) self.api.get_all_snapshots(ctx) mock_policy.assert_has_calls([ mock.call(ctx, 'share_snapshot', 'get_all_snapshots'), mock.call( ctx, 'share_snapshot', 'list_snapshots_in_deferred_deletion_states', do_raise=False)]) db_api.share_snapshot_get_all_by_project.assert_called_once_with( ctx, 'fakepid', limit=None, offset=None, sort_dir='desc', sort_key='share_id', filters={}) @mock.patch.object(db_api, 'share_snapshot_get_all', mock.Mock()) def test_get_all_snapshots_admin_all_tenants(self): mock_policy = self.mock_object( share_api.policy, 'check_policy', mock.Mock(side_effect=[False, True, False])) self.api.get_all_snapshots(self.context, search_opts={'all_tenants': 1}) mock_policy.assert_has_calls([ mock.call(self.context, 'share_snapshot', 'get_all_snapshots'), mock.call( self.context, 'share_snapshot', 'list_all_projects', do_raise=False), mock.call( self.context, 'share_snapshot', 'list_snapshots_in_deferred_deletion_states', do_raise=False)]) db_api.share_snapshot_get_all.assert_called_once_with( self.context, limit=None, offset=None, sort_dir='desc', sort_key='share_id', filters={}) @mock.patch.object(db_api, 'share_snapshot_get_all_by_project', mock.Mock()) def test_get_all_snapshots_not_admin(self): ctx = context.RequestContext('fakeuid', 'fakepid', is_admin=False) mock_policy = self.mock_object(share_api.policy, 'check_policy', mock.Mock(return_value=False)) self.api.get_all_snapshots(ctx) mock_policy.assert_has_calls([ mock.call(ctx, 'share_snapshot', 'get_all_snapshots'), mock.call( ctx, 'share_snapshot', 'list_snapshots_in_deferred_deletion_states', do_raise=False)]) db_api.share_snapshot_get_all_by_project.assert_called_once_with( ctx, 'fakepid', limit=None, offset=None, sort_dir='desc', sort_key='share_id', filters={}) def test_get_all_snapshots_not_admin_search_opts(self): search_opts = {'size': 'fakesize'} fake_objs = [{'name': 'fakename1'}, search_opts] ctx = context.RequestContext('fakeuid', 'fakepid', is_admin=False) self.mock_object(db_api, 'share_snapshot_get_all_by_project', mock.Mock(return_value=fake_objs)) mock_policy = self.mock_object(share_api.policy, 'check_policy', mock.Mock(return_value=False)) result = self.api.get_all_snapshots(ctx, search_opts) self.assertEqual(fake_objs, result) mock_policy.assert_has_calls([ mock.call(ctx, 'share_snapshot', 'get_all_snapshots'), mock.call( ctx, 'share_snapshot', 'list_snapshots_in_deferred_deletion_states', do_raise=False)]) db_api.share_snapshot_get_all_by_project.assert_called_once_with( ctx, 'fakepid', limit=None, offset=None, sort_dir='desc', sort_key='share_id', filters=search_opts) @ddt.data(({'name': 'fo'}, 0, []), ({'description': 'd'}, 0, []), ({'name': 'foo', 'description': 'd'}, 0, []), ({'name': 'foo'}, 1, [{'name': 'foo', 'description': 'ds'}]), ({'description': 'ds'}, 1, [{'name': 'foo', 'description': 'ds'}]), ({'name~': 'foo', 'description~': 'ds'}, 2, [{'name': 'foo', 'description': 'ds'}, {'name': 'foo1', 'description': 'ds1'}]), ({'name': 'foo', 'description~': 'ds'}, 1, [{'name': 'foo', 'description': 'ds'}]), ({'name~': 'foo', 'description': 'ds'}, 1, [{'name': 'foo', 'description': 'ds'}])) @ddt.unpack def test_get_all_snapshots_filter_by_name_and_description( self, search_opts, get_snapshot_number, res_snapshots): fake_objs = [{'name': 'fo2', 'description': 'd2'}, {'name': 'foo', 'description': 'ds'}, {'name': 'foo1', 'description': 'ds1'}] ctx = context.RequestContext('fakeuid', 'fakepid', is_admin=False) self.mock_object(db_api, 'share_snapshot_get_all_by_project', mock.Mock(return_value=res_snapshots)) mock_policy = self.mock_object(share_api.policy, 'check_policy') result = self.api.get_all_snapshots(ctx, search_opts) self.assertEqual(get_snapshot_number, len(result)) if get_snapshot_number == 2: self.assertEqual(fake_objs[1:], result) elif get_snapshot_number == 1: self.assertEqual(fake_objs[1:2], result) mock_policy.assert_has_calls([ mock.call(ctx, 'share_snapshot', 'get_all_snapshots'), mock.call( ctx, 'share_snapshot', 'list_snapshots_in_deferred_deletion_states', do_raise=False)]) db_api.share_snapshot_get_all_by_project.assert_called_once_with( ctx, 'fakepid', limit=None, offset=None, sort_dir='desc', sort_key='share_id', filters=search_opts) def test_get_all_snapshots_with_sorting_valid(self): self.mock_object( db_api, 'share_snapshot_get_all_by_project', mock.Mock(return_value=_FAKE_LIST_OF_ALL_SNAPSHOTS[0])) ctx = context.RequestContext('fake_uid', 'fake_pid_1', is_admin=False) mock_policy = self.mock_object(share_api.policy, 'check_policy', mock.Mock(return_value=False)) snapshots = self.api.get_all_snapshots( ctx, sort_key='status', sort_dir='asc') mock_policy.assert_has_calls([ mock.call(ctx, 'share_snapshot', 'get_all_snapshots'), mock.call( ctx, 'share_snapshot', 'list_snapshots_in_deferred_deletion_states', do_raise=False)]) db_api.share_snapshot_get_all_by_project.assert_called_once_with( ctx, 'fake_pid_1', limit=None, offset=None, sort_dir='asc', sort_key='status', filters={}) self.assertEqual(_FAKE_LIST_OF_ALL_SNAPSHOTS[0], snapshots) def test_get_all_snapshots_sort_key_invalid(self): self.mock_object( db_api, 'share_snapshot_get_all_by_project', mock.Mock(return_value=_FAKE_LIST_OF_ALL_SNAPSHOTS[0])) ctx = context.RequestContext('fake_uid', 'fake_pid_1', is_admin=False) self.assertRaises( exception.InvalidInput, self.api.get_all_snapshots, ctx, sort_key=1, ) share_api.policy.check_policy.assert_called_once_with( ctx, 'share_snapshot', 'get_all_snapshots') def test_get_all_snapshots_sort_dir_invalid(self): self.mock_object( db_api, 'share_snapshot_get_all_by_project', mock.Mock(return_value=_FAKE_LIST_OF_ALL_SNAPSHOTS[0])) ctx = context.RequestContext('fake_uid', 'fake_pid_1', is_admin=False) self.assertRaises( exception.InvalidInput, self.api.get_all_snapshots, ctx, sort_dir=1, ) share_api.policy.check_policy.assert_called_once_with( ctx, 'share_snapshot', 'get_all_snapshots') def test_allow_access_rule_already_exists(self): share = db_utils.create_share(status=constants.STATUS_AVAILABLE) fake_access = db_utils.create_access(share_id=share['id']) self.mock_object(self.api.db, 'share_access_create') self.assertRaises( exception.ShareAccessExists, self.api.allow_access, self.context, share, fake_access['access_type'], fake_access['access_to'], fake_access['access_level']) self.assertFalse(self.api.db.share_access_create.called) def test_allow_access_invalid_access_level(self): share = db_utils.create_share(status=constants.STATUS_AVAILABLE) self.mock_object(self.api.db, 'share_access_create') self.assertRaises( exception.InvalidShareAccess, self.api.allow_access, self.context, share, 'user', 'alice', access_level='execute') self.assertFalse(self.api.db.share_access_create.called) @ddt.data({'host': None}, {'status': constants.STATUS_ERROR_DELETING, 'access_rules_status': constants.STATUS_ACTIVE}, {'host': None, 'access_rules_status': constants.STATUS_ERROR}, {'access_rules_status': constants.STATUS_ERROR}) def test_allow_access_invalid_instance(self, params): share = db_utils.create_share(host='fake') db_utils.create_share_instance(share_id=share['id']) db_utils.create_share_instance(share_id=share['id'], **params) self.mock_object(self.api.db, 'share_access_create') self.assertRaises(exception.InvalidShare, self.api.allow_access, self.context, share, 'ip', '10.0.0.1') self.assertFalse(self.api.db.share_access_create.called) @ddt.data(*(constants.ACCESS_LEVELS + (None,))) def test_allow_access(self, level): share = db_utils.create_share(status=constants.STATUS_AVAILABLE) values = { 'share_id': share['id'], 'access_type': 'fake_access_type', 'access_to': 'fake_access_to', 'access_level': level, 'metadata': None, } fake_access = copy.deepcopy(values) fake_access.update({ 'id': 'fake_access_id', 'state': constants.STATUS_ACTIVE, 'deleted': 'fake_deleted', 'deleted_at': 'fake_deleted_at', 'instance_mappings': ['foo', 'bar'], }) self.mock_object(db_api, 'share_get', mock.Mock(return_value=share)) self.mock_object(db_api, 'share_access_create', mock.Mock(return_value=fake_access)) self.mock_object(db_api, 'share_access_get', mock.Mock(return_value=fake_access)) self.mock_object(db_api, 'share_access_get_all_by_type_and_access', mock.Mock(return_value=[])) self.mock_object(self.api, 'allow_access_to_instance') access = self.api.allow_access( self.context, share, fake_access['access_type'], fake_access['access_to'], level) self.assertEqual(fake_access, access) db_api.share_access_create.assert_called_once_with( self.context, values) self.api.allow_access_to_instance.assert_called_once_with( self.context, share.instance) def test_allow_access_to_instance(self): share = db_utils.create_share(host='fake') rpc_method = self.mock_object(self.api.share_rpcapi, 'update_access') self.api.allow_access_to_instance(self.context, share.instance) rpc_method.assert_called_once_with(self.context, share.instance) @ddt.data({'host': None}, {'status': constants.STATUS_ERROR_DELETING, 'access_rules_status': constants.STATUS_ACTIVE}, {'host': None, 'access_rules_status': constants.STATUS_ERROR}, {'access_rules_status': constants.STATUS_ERROR}) def test_deny_access_invalid_instance(self, params): share = db_utils.create_share(host='fake') db_utils.create_share_instance(share_id=share['id']) db_utils.create_share_instance(share_id=share['id'], **params) access_rule = db_utils.create_access(share_id=share['id']) self.mock_object(self.api, 'deny_access_to_instance') self.assertRaises(exception.InvalidShare, self.api.deny_access, self.context, share, access_rule) self.assertFalse(self.api.deny_access_to_instance.called) def test_deny_access(self): share = db_utils.create_share( host='fake', status=constants.STATUS_AVAILABLE, access_rules_status=constants.STATUS_ACTIVE) access_rule = db_utils.create_access(share_id=share['id']) self.mock_object(self.api, 'deny_access_to_instance') retval = self.api.deny_access(self.context, share, access_rule) self.assertIsNone(retval) self.api.deny_access_to_instance.assert_called_once_with( self.context, share.instance, access_rule) def test_deny_access_to_instance(self): share = db_utils.create_share(host='fake') share_instance = db_utils.create_share_instance( share_id=share['id'], host='fake') access = db_utils.create_access(share_id=share['id']) rpc_method = self.mock_object(self.api.share_rpcapi, 'update_access') self.mock_object(db_api, 'share_instance_access_get', mock.Mock(return_value=access.instance_mappings[0])) mock_share_instance_rules_status_update = self.mock_object( self.api.access_helper, 'get_and_update_share_instance_access_rules_status') mock_access_rule_state_update = self.mock_object( self.api.access_helper, 'get_and_update_share_instance_access_rule') self.api.deny_access_to_instance(self.context, share_instance, access) rpc_method.assert_called_once_with(self.context, share_instance) mock_access_rule_state_update.assert_called_once_with( self.context, access['id'], updates={'state': constants.ACCESS_STATE_QUEUED_TO_DENY}, share_instance_id=share_instance['id']) expected_conditional_change = { constants.STATUS_ACTIVE: constants.SHARE_INSTANCE_RULES_SYNCING, } mock_share_instance_rules_status_update.assert_called_once_with( self.context, share_instance_id=share_instance['id'], conditionally_change=expected_conditional_change) def test_access_get(self): with mock.patch.object(db_api, 'share_access_get', mock.Mock(return_value={'share_id': 'fake'})): self.mock_object(self.api, 'get') rule = self.api.access_get(self.context, 'fakeid') self.assertEqual({'share_id': 'fake'}, rule) db_api.share_access_get.assert_called_once_with( self.context, 'fakeid') self.api.get.assert_called_once_with(self.context, 'fake') def test_access_get_all(self): share = db_utils.create_share(id='fakeid') values = { 'fakeacc0id': { 'id': 'fakeacc0id', 'access_type': 'fakeacctype', 'access_to': 'fakeaccto', 'access_level': 'rw', 'share_id': share['id'], }, 'fakeacc1id': { 'id': 'fakeacc1id', 'access_type': 'fakeacctype', 'access_to': 'fakeaccto', 'access_level': 'rw', 'share_id': share['id'], }, } rules = [ db_utils.create_access(**values['fakeacc0id']), db_utils.create_access(**values['fakeacc1id']), ] # add state property values['fakeacc0id']['state'] = constants.STATUS_ACTIVE values['fakeacc1id']['state'] = constants.STATUS_ACTIVE self.mock_object(db_api, 'share_access_get_all_for_share', mock.Mock(return_value=rules)) actual = self.api.access_get_all(self.context, share) self.assertEqual(rules, actual) share_api.policy.check_policy.assert_called_once_with( self.context, 'share', 'access_get_all') db_api.share_access_get_all_for_share.assert_called_once_with( self.context, 'fakeid', filters=None) def test_share_metadata_get(self): metadata = {'a': 'b', 'c': 'd'} share_id = uuidutils.generate_uuid() db_api.share_create(self.context, {'id': share_id, 'metadata': metadata}) self.assertEqual(metadata, db_api.share_metadata_get(self.context, share_id)) def test_share_metadata_update(self): metadata1 = {'a': '1', 'c': '2'} metadata2 = {'a': '3', 'd': '5'} should_be = {'a': '3', 'c': '2', 'd': '5'} share_id = uuidutils.generate_uuid() db_api.share_create(self.context, {'id': share_id, 'metadata': metadata1}) db_api.share_metadata_update(self.context, share_id, metadata2, False) self.assertEqual(should_be, db_api.share_metadata_get(self.context, share_id)) def test_share_metadata_update_delete(self): metadata1 = {'a': '1', 'c': '2'} metadata2 = {'a': '3', 'd': '4'} should_be = metadata2 share_id = uuidutils.generate_uuid() db_api.share_create(self.context, {'id': share_id, 'metadata': metadata1}) db_api.share_metadata_update(self.context, share_id, metadata2, True) self.assertEqual(should_be, db_api.share_metadata_get(self.context, share_id)) def test_extend_invalid_status(self): invalid_status = 'fake' share = db_utils.create_share(status=invalid_status) new_size = 123 self.assertRaises(exception.InvalidShare, self.api.extend, self.context, share, new_size) def test_extend_invalid_task_state(self): share = db_utils.create_share( status=constants.STATUS_AVAILABLE, task_state=constants.TASK_STATE_MIGRATION_IN_PROGRESS) new_size = 123 self.assertRaises(exception.ShareBusyException, self.api.extend, self.context, share, new_size) def test_extend_invalid_size(self): share = db_utils.create_share(status=constants.STATUS_AVAILABLE, size=200) new_size = 123 self.assertRaises(exception.InvalidInput, self.api.extend, self.context, share, new_size) def test_extend_share_over_per_share_quota(self): quota.CONF.set_default("per_share_gigabytes", 5, 'quota') share = db_utils.create_share(status=constants.STATUS_AVAILABLE, size=4) new_size = 6 self.assertRaises(exception.ShareSizeExceedsLimit, self.api.extend, self.context, share, new_size) def test_extend_with_share_type_size_limit(self): ctx = context.RequestContext('fake_uid', 'fake_pid_1', is_admin=False) share = db_utils.create_share(status=constants.STATUS_AVAILABLE, size=3) self.mock_object(share_types, 'get_share_type', mock.Mock(return_value=self.sized_sha_type)) self.mock_policy_check = self.mock_object( policy, 'check_policy', mock.Mock(return_value=False)) new_size = 5 self.assertRaises(exception.InvalidInput, self.api.extend, ctx, share, new_size) def test_extend_with_share_type_size_limit_admin(self): ctx = context.RequestContext('fake_uid', 'fake_pid_1', is_admin=True) share = db_utils.create_share(status=constants.STATUS_AVAILABLE, size=3) self.mock_object(share_types, 'get_share_type', mock.Mock(return_value=self.sized_sha_type)) self.mock_policy_check = self.mock_object( policy, 'check_policy', mock.Mock(return_value=True)) new_size = 7 self.assertRaises(exception.InvalidInput, self.api.extend, ctx, share, new_size) def _setup_extend_mocks(self, supports_replication): replica_list = [] if supports_replication: replica_list.append({'id': 'fake_replica_id'}) replica_list.append({'id': 'fake_replica_id_2'}) self.mock_object(db_api, 'share_replicas_get_all_by_share', mock.Mock(return_value=replica_list)) @ddt.data( (False, 'gigabytes', exception.ShareSizeExceedsAvailableQuota), (True, 'replica_gigabytes', exception.ShareReplicaSizeExceedsAvailableQuota) ) @ddt.unpack def test_extend_quota_error(self, supports_replication, quota_key, expected_exception): self._setup_extend_mocks(supports_replication) share = db_utils.create_share(status=constants.STATUS_AVAILABLE, size=100) new_size = 123 replica_amount = len( db_api.share_replicas_get_all_by_share.return_value) value_to_be_extended = new_size - share['size'] usages = {quota_key: {'reserved': 11, 'in_use': 12}} quotas = {quota_key: 13} overs = {quota_key: new_size} exc = exception.OverQuota(usages=usages, quotas=quotas, overs=overs) expected_deltas = { 'project_id': share['project_id'], 'gigabytes': value_to_be_extended, 'user_id': share['user_id'], 'share_type_id': share['instance']['share_type_id'] } if supports_replication: expected_deltas.update( {'replica_gigabytes': value_to_be_extended * replica_amount}) self.mock_object(quota.QUOTAS, 'reserve', mock.Mock(side_effect=exc)) self.assertRaises(expected_exception, self.api.extend, self.context, share, new_size) quota.QUOTAS.reserve.assert_called_once_with( mock.ANY, **expected_deltas ) def test_extend_quota_user(self): self._setup_extend_mocks(False) share = db_utils.create_share(status=constants.STATUS_AVAILABLE, size=100) diff_user_context = context.RequestContext( user_id='fake2', project_id='fake', is_admin=False ) fake_type = { 'id': 'fake_type_id', 'extra_specs': { 'snapshot_support': False, 'create_share_from_snapshot_support': False, 'driver_handles_share_servers': False, }, } new_size = 123 size_increase = int(new_size) - share['size'] self.mock_object(quota.QUOTAS, 'reserve') self.mock_object(share_types, 'get_share_type', mock.Mock(return_value=fake_type)) self.api.extend(diff_user_context, share, new_size) quota.QUOTAS.reserve.assert_called_once_with( diff_user_context, project_id=share['project_id'], gigabytes=size_increase, share_type_id=None, user_id=share['user_id'] ) @ddt.data(True, False) def test_extend_valid(self, supports_replication): self._setup_extend_mocks(supports_replication) share = db_utils.create_share(status=constants.STATUS_AVAILABLE, size=100) new_size = 123 size_increase = int(new_size) - share['size'] replica_amount = len( db_api.share_replicas_get_all_by_share.return_value) expected_deltas = { 'project_id': share['project_id'], 'gigabytes': size_increase, 'user_id': share['user_id'], 'share_type_id': share['instance']['share_type_id'] } if supports_replication: new_replica_size = size_increase * replica_amount expected_deltas.update({'replica_gigabytes': new_replica_size}) self.mock_object(self.api, 'update') self.mock_object(self.api.scheduler_rpcapi, 'extend_share') self.mock_object(quota.QUOTAS, 'reserve') self.mock_object(share_types, 'get_share_type') self.mock_object(share_types, 'provision_filter_on_size') self.mock_object(self.api, '_get_request_spec_dict') self.api.extend(self.context, share, new_size) self.api.update.assert_called_once_with( self.context, share, {'status': constants.STATUS_EXTENDING}) self.api.scheduler_rpcapi.extend_share.assert_called_once_with( self.context, share['id'], new_size, mock.ANY, mock.ANY ) quota.QUOTAS.reserve.assert_called_once_with( self.context, **expected_deltas) def test_shrink_invalid_status(self): invalid_status = 'fake' share = db_utils.create_share(status=invalid_status) self.assertRaises(exception.InvalidShare, self.api.shrink, self.context, share, 123) def test_shrink_invalid_task_state(self): share = db_utils.create_share( status=constants.STATUS_AVAILABLE, task_state=constants.TASK_STATE_MIGRATION_IN_PROGRESS) self.assertRaises(exception.ShareBusyException, self.api.shrink, self.context, share, 123) @ddt.data(300, 0, -1) def test_shrink_invalid_size(self, new_size): share = db_utils.create_share(status=constants.STATUS_AVAILABLE, size=200) self.assertRaises(exception.InvalidInput, self.api.shrink, self.context, share, new_size) @ddt.data(constants.STATUS_AVAILABLE, constants.STATUS_SHRINKING_POSSIBLE_DATA_LOSS_ERROR) def test_shrink_valid(self, share_status): share = db_utils.create_share(status=share_status, size=100) new_size = 50 self.mock_object(self.api, 'update') self.mock_object(self.api.share_rpcapi, 'shrink_share') self.api.shrink(self.context, share, new_size) self.api.update.assert_called_once_with( self.context, share, {'status': constants.STATUS_SHRINKING}) self.api.share_rpcapi.shrink_share.assert_called_once_with( self.context, share, new_size ) def test_shrink_with_share_type_size_limit(self): share = db_utils.create_share(status=constants.STATUS_AVAILABLE, size=3) self.mock_object(share_types, 'get_share_type', mock.Mock(return_value=self.sized_sha_type)) new_size = 1 self.assertRaises(exception.InvalidInput, self.api.shrink, self.context, share, new_size) def test_snapshot_allow_access(self): access_to = '1.1.1.1' access_type = 'ip' share = db_utils.create_share() snapshot = db_utils.create_snapshot(share_id=share['id'], status=constants.STATUS_AVAILABLE) access = db_utils.create_snapshot_access( share_snapshot_id=snapshot['id']) values = {'share_snapshot_id': snapshot['id'], 'access_type': access_type, 'access_to': access_to} existing_access_check = self.mock_object( db_api, 'share_snapshot_check_for_existing_access', mock.Mock(return_value=False)) access_create = self.mock_object( db_api, 'share_snapshot_access_create', mock.Mock(return_value=access)) self.mock_object(self.api.share_rpcapi, 'snapshot_update_access') out = self.api.snapshot_allow_access(self.context, snapshot, access_type, access_to) self.assertEqual(access, out) existing_access_check.assert_called_once_with( utils.IsAMatcher(context.RequestContext), snapshot['id'], access_type, access_to) access_create.assert_called_once_with( utils.IsAMatcher(context.RequestContext), values) def test_snapshot_allow_access_instance_exception(self): access_to = '1.1.1.1' access_type = 'ip' share = db_utils.create_share() snapshot = db_utils.create_snapshot(share_id=share['id']) existing_access_check = self.mock_object( db_api, 'share_snapshot_check_for_existing_access', mock.Mock(return_value=False)) self.assertRaises(exception.InvalidShareSnapshotInstance, self.api.snapshot_allow_access, self.context, snapshot, access_type, access_to) existing_access_check.assert_called_once_with( utils.IsAMatcher(context.RequestContext), snapshot['id'], access_type, access_to) def test_snapshot_allow_access_access_exists_exception(self): access_to = '1.1.1.1' access_type = 'ip' share = db_utils.create_share() snapshot = db_utils.create_snapshot(share_id=share['id']) db_utils.create_snapshot_access( share_snapshot_id=snapshot['id'], access_to=access_to, access_type=access_type) existing_access_check = self.mock_object( db_api, 'share_snapshot_check_for_existing_access', mock.Mock(return_value=True)) self.assertRaises(exception.ShareSnapshotAccessExists, self.api.snapshot_allow_access, self.context, snapshot, access_type, access_to) existing_access_check.assert_called_once_with( utils.IsAMatcher(context.RequestContext), snapshot['id'], access_type, access_to) def test_snapshot_deny_access(self): share = db_utils.create_share() snapshot = db_utils.create_snapshot(share_id=share['id'], status=constants.STATUS_AVAILABLE) access = db_utils.create_snapshot_access( share_snapshot_id=snapshot['id']) mapping = {'id': 'fake_id', 'state': constants.STATUS_ACTIVE, 'access_id': access['id']} access_get = self.mock_object( db_api, 'share_snapshot_instance_access_get', mock.Mock(return_value=mapping)) access_update_state = self.mock_object( db_api, 'share_snapshot_instance_access_update') update_access = self.mock_object(self.api.share_rpcapi, 'snapshot_update_access') self.api.snapshot_deny_access(self.context, snapshot, access) access_get.assert_called_once_with( utils.IsAMatcher(context.RequestContext), access['id'], snapshot['instance']['id']) access_update_state.assert_called_once_with( utils.IsAMatcher(context.RequestContext), access['id'], snapshot.instance['id'], {'state': constants.ACCESS_STATE_QUEUED_TO_DENY}) update_access.assert_called_once_with( utils.IsAMatcher(context.RequestContext), snapshot['instance']) def test_snapshot_deny_access_exception(self): share = db_utils.create_share() snapshot = db_utils.create_snapshot(share_id=share['id']) access = db_utils.create_snapshot_access( share_snapshot_id=snapshot['id']) self.assertRaises(exception.InvalidShareSnapshotInstance, self.api.snapshot_deny_access, self.context, snapshot, access) def test_snapshot_access_get_all(self): share = db_utils.create_share() snapshot = db_utils.create_snapshot(share_id=share['id']) access = [] access.append(db_utils.create_snapshot_access( share_snapshot_id=snapshot['id'])) self.mock_object( db_api, 'share_snapshot_access_get_all_for_share_snapshot', mock.Mock(return_value=access)) out = self.api.snapshot_access_get_all(self.context, snapshot) self.assertEqual(access, out) def test_snapshot_access_get(self): share = db_utils.create_share() snapshot = db_utils.create_snapshot(share_id=share['id']) access = db_utils.create_snapshot_access( share_snapshot_id=snapshot['id']) self.mock_object( db_api, 'share_snapshot_access_get', mock.Mock(return_value=access)) out = self.api.snapshot_access_get(self.context, access['id']) self.assertEqual(access, out) def test_snapshot_export_locations_get(self): share = db_utils.create_share() snapshot = db_utils.create_snapshot(share_id=share['id']) self.mock_object( db_api, 'share_snapshot_export_locations_get', mock.Mock(return_value='')) out = self.api.snapshot_export_locations_get(self.context, snapshot) self.assertEqual('', out) def test_snapshot_export_location_get(self): fake_el = '/fake_export_location' self.mock_object( db_api, 'share_snapshot_instance_export_location_get', mock.Mock(return_value=fake_el)) out = self.api.snapshot_export_location_get(self.context, 'fake_id') self.assertEqual(fake_el, out) @ddt.data(True, False) def test__modify_quotas_for_share_migration(self, new_replication_type): extra_specs = ( {'replication_type': 'readable'} if new_replication_type else {}) share = db_utils.create_share() share_type = db_utils.create_share_type(extra_specs=extra_specs) expected_deltas = { 'project_id': share['project_id'], 'user_id': share['user_id'], 'shares': 1, 'gigabytes': share['size'], 'share_type_id': share_type['id'] } if new_replication_type: expected_deltas.update({ 'share_replicas': 1, 'replica_gigabytes': share['size'], }) reservations = 'reservations' mock_specs_get = self.mock_object( self.api, 'get_share_attributes_from_share_type', mock.Mock(return_value=extra_specs)) mock_reserve = self.mock_object( quota.QUOTAS, 'reserve', mock.Mock(return_value=reservations)) mock_commit = self.mock_object(quota.QUOTAS, 'commit') self.api._modify_quotas_for_share_migration( self.context, share, share_type) mock_specs_get.assert_called_once_with(share_type) mock_reserve.assert_called_once_with( self.context, **expected_deltas) mock_commit.assert_called_once_with( self.context, reservations, project_id=share['project_id'], user_id=share['user_id'], share_type_id=share_type['id']) @ddt.data( ('replica_gigabytes', exception.ShareReplicaSizeExceedsAvailableQuota), ('share_replicas', exception.ShareReplicasLimitExceeded), ('gigabytes', exception.ShareSizeExceedsAvailableQuota) ) @ddt.unpack def test__modify_quotas_for_share_migration_reservation_failed( self, over_resource, expected_exception): extra_specs = {'replication_type': 'readable'} share = db_utils.create_share() share_type = db_utils.create_share_type(extra_specs=extra_specs) expected_deltas = { 'project_id': share['project_id'], 'user_id': share['user_id'], 'share_replicas': 1, 'shares': 1, 'gigabytes': share['size'], 'replica_gigabytes': share['size'], 'share_type_id': share_type['id'] } usages = { over_resource: { 'reserved': 'fake', 'in_use': 'fake' } } quotas = { over_resource: 'fake' } effect_exc = exception.OverQuota( overs=[over_resource], usages=usages, quotas=quotas) mock_specs_get = self.mock_object( self.api, 'get_share_attributes_from_share_type', mock.Mock(return_value=extra_specs)) mock_reserve = self.mock_object( quota.QUOTAS, 'reserve', mock.Mock(side_effect=effect_exc)) self.assertRaises( expected_exception, self.api._modify_quotas_for_share_migration, self.context, share, share_type ) mock_specs_get.assert_called_once_with(share_type) mock_reserve.assert_called_once_with(self.context, **expected_deltas) @ddt.data({'share_type': True, 'share_net': True, 'dhss': True}, {'share_type': False, 'share_net': True, 'dhss': True}, {'share_type': False, 'share_net': False, 'dhss': True}, {'share_type': True, 'share_net': False, 'dhss': False}, {'share_type': False, 'share_net': False, 'dhss': False}) @ddt.unpack def test_migration_start(self, share_type, share_net, dhss): host = 'fake2@backend#pool' service = {'availability_zone_id': 'fake_az_id', 'availability_zone': {'name': 'fake_az1'}} share_network = None share_network_id = None if share_net: share_network = db_utils.create_share_network(id='fake_net_id') share_network_id = share_network['id'] fake_type = { 'id': 'fake_type_id', 'extra_specs': { 'snapshot_support': False, 'create_share_from_snapshot_support': False, 'revert_to_snapshot_support': False, 'mount_snapshot_support': False, 'mount_point_name_support': False, 'driver_handles_share_servers': dhss, }, } if share_type: fake_type_2 = { 'id': 'fake_type_2_id', 'extra_specs': { 'snapshot_support': False, 'create_share_from_snapshot_support': False, 'revert_to_snapshot_support': False, 'mount_snapshot_support': False, 'mount_point_name_support': False, 'driver_handles_share_servers': dhss, 'availability_zones': 'fake_az1,fake_az2', }, } else: fake_type_2 = fake_type share = db_utils.create_share( status=constants.STATUS_AVAILABLE, host='fake@backend#pool', share_type_id=fake_type['id'], share_network_id=share_network_id) request_spec = self._get_request_spec_dict( share, fake_type_2, self.context, size=0, availability_zone_id='fake_az_id', share_network_id=share_network_id) self.mock_object(self.scheduler_rpcapi, 'migrate_share_to_host') self.mock_object(share_types, 'get_share_type', mock.Mock(return_value=fake_type)) self.mock_object(utils, 'validate_service_host') self.mock_object(db_api, 'share_instance_update') self.mock_object(db_api, 'share_update') self.mock_object(db_api, 'service_get_by_args', mock.Mock(return_value=service)) self.mock_object(share_api.API, '_modify_quotas_for_share_migration') if share_type: self.api.migration_start(self.context, share, host, False, True, True, True, True, share_network, fake_type_2) else: self.api.migration_start(self.context, share, host, False, True, True, True, True, share_network, None) self.scheduler_rpcapi.migrate_share_to_host.assert_called_once_with( self.context, share['id'], host, False, True, True, True, True, share_network_id, fake_type_2['id'], request_spec) if not share_type: share_types.get_share_type.assert_called_once_with( self.context, fake_type['id']) utils.validate_service_host.assert_called_once_with( self.context, 'fake2@backend') db_api.service_get_by_args.assert_called_once_with( self.context, 'fake2@backend', 'manila-share') db_api.share_update.assert_called_once_with( self.context, share['id'], {'task_state': constants.TASK_STATE_MIGRATION_STARTING}) db_api.share_instance_update.assert_called_once_with( self.context, share.instance['id'], {'status': constants.STATUS_MIGRATING}) if share_type: (share_api.API._modify_quotas_for_share_migration. assert_called_once_with(self.context, share, fake_type_2)) def test_migration_start_with_new_share_type_limit(self): host = 'fake2@backend#pool' self.mock_object(utils, 'validate_service_host') share = db_utils.create_share( status=constants.STATUS_AVAILABLE, size=1) self.assertRaises(exception.InvalidInput, self.api.migration_start, self.context, share, host, False, True, True, True, True, None, self.sized_sha_type) def test_migration_start_destination_az_unsupported(self): host = 'fake2@backend#pool' host_without_pool = host.split('#')[0] service = {'availability_zone_id': 'fake_az_id', 'availability_zone': {'name': 'fake_az3'}} share_network = db_utils.create_share_network(id='fake_net_id') share_network_id = share_network['id'] existing_share_type = { 'id': '4b5b0920-a294-401b-bb7d-c55b425e1cad', 'name': 'fake_type_1', 'extra_specs': { 'snapshot_support': False, 'create_share_from_snapshot_support': False, 'revert_to_snapshot_support': False, 'mount_snapshot_support': False, 'mount_point_name_support': False, 'driver_handles_share_servers': 'true', 'availability_zones': 'fake_az3' }, } new_share_type = { 'id': 'fa844ae2-494d-4da9-95e7-37ac6a26f635', 'name': 'fake_type_2', 'extra_specs': { 'snapshot_support': False, 'create_share_from_snapshot_support': False, 'revert_to_snapshot_support': False, 'mount_snapshot_support': False, 'mount_point_name_support': False, 'driver_handles_share_servers': 'true', 'availability_zones': 'fake_az1,fake_az2', }, } share = db_utils.create_share( status=constants.STATUS_AVAILABLE, host='fake@backend#pool', share_type_id=existing_share_type['id'], share_network_id=share_network_id) self.mock_object(self.api, '_get_request_spec_dict') self.mock_object(self.scheduler_rpcapi, 'migrate_share_to_host') self.mock_object(share_types, 'get_share_type') self.mock_object(utils, 'validate_service_host') self.mock_object(db_api, 'share_instance_update') self.mock_object(db_api, 'share_update') self.mock_object(db_api, 'service_get_by_args', mock.Mock(return_value=service)) self.mock_object(share_api.API, '_modify_quotas_for_share_migration') self.assertRaises(exception.InvalidShare, self.api.migration_start, self.context, share, host, False, True, True, True, False, new_share_network=share_network, new_share_type=new_share_type) utils.validate_service_host.assert_called_once_with( self.context, host_without_pool) share_types.get_share_type.assert_not_called() db_api.share_update.assert_not_called() db_api.service_get_by_args.assert_called_once_with( self.context, host_without_pool, 'manila-share') self.api._get_request_spec_dict.assert_not_called() db_api.share_instance_update.assert_not_called() self.scheduler_rpcapi.migrate_share_to_host.assert_not_called() @ddt.data({'force_host_assisted': True, 'writable': True, 'preserve_metadata': False, 'preserve_snapshots': False, 'nondisruptive': False}, {'force_host_assisted': True, 'writable': False, 'preserve_metadata': True, 'preserve_snapshots': False, 'nondisruptive': False}, {'force_host_assisted': True, 'writable': False, 'preserve_metadata': False, 'preserve_snapshots': True, 'nondisruptive': False}, {'force_host_assisted': True, 'writable': False, 'preserve_metadata': False, 'preserve_snapshots': False, 'nondisruptive': True}) @ddt.unpack def test_migration_start_invalid_host_and_driver_assisted_params( self, force_host_assisted, writable, preserve_metadata, preserve_snapshots, nondisruptive): self.assertRaises( exception.InvalidInput, self.api.migration_start, self.context, 'some_share', 'some_host', force_host_assisted, preserve_metadata, writable, preserve_snapshots, nondisruptive) @ddt.data(True, False) def test_migration_start_invalid_share_network_type_combo(self, dhss): host = 'fake2@backend#pool' share_network = None if not dhss: share_network = db_utils.create_share_network(id='fake_net_id') fake_type = { 'id': 'fake_type_id', 'extra_specs': { 'snapshot_support': False, 'driver_handles_share_servers': not dhss, }, } fake_type_2 = { 'id': 'fake_type_2_id', 'extra_specs': { 'snapshot_support': False, 'driver_handles_share_servers': dhss, }, } share = db_utils.create_share( status=constants.STATUS_AVAILABLE, host='fake@backend#pool', share_type_id=fake_type['id']) self.mock_object(utils, 'validate_service_host') self.mock_object(share_api.API, '_modify_quotas_for_share_migration') self.assertRaises( exception.InvalidInput, self.api.migration_start, self.context, share, host, False, True, True, True, True, share_network, fake_type_2) utils.validate_service_host.assert_called_once_with( self.context, 'fake2@backend') def test_migration_start_status_unavailable(self): host = 'fake2@backend#pool' share = db_utils.create_share( status=constants.STATUS_ERROR) self.assertRaises(exception.InvalidShare, self.api.migration_start, self.context, share, host, False, True, True, True, True) def test_migration_start_access_rules_status_error(self): host = 'fake2@backend#pool' instance = db_utils.create_share_instance( share_id='fake_share_id', access_rules_status=constants.STATUS_ERROR, status=constants.STATUS_AVAILABLE) share = db_utils.create_share( id='fake_share_id', instances=[instance]) self.assertRaises(exception.InvalidShare, self.api.migration_start, self.context, share, host, False, True, True, True, True) def test_migration_start_task_state_invalid(self): host = 'fake2@backend#pool' share = db_utils.create_share( status=constants.STATUS_AVAILABLE, task_state=constants.TASK_STATE_MIGRATION_IN_PROGRESS) self.assertRaises(exception.ShareBusyException, self.api.migration_start, self.context, share, host, False, True, True, True, True) def test_migration_start_host_assisted_with_snapshots(self): host = 'fake2@backend#pool' share = db_utils.create_share( host='fake@backend#pool', status=constants.STATUS_AVAILABLE) self.mock_object(db_api, 'share_snapshot_get_all_for_share', mock.Mock(return_value=True)) self.assertRaises(exception.Conflict, self.api.migration_start, self.context, share, host, True, False, False, False, False) def test_migration_start_with_snapshots(self): host = 'fake2@backend#pool' fake_type = { 'id': 'fake_type_id', 'extra_specs': { 'snapshot_support': True, 'driver_handles_share_servers': False, }, } service = {'availability_zone_id': 'fake_az_id', 'availability_zone': {'name': 'fake_az'}} self.mock_object(db_api, 'service_get_by_args', mock.Mock(return_value=service)) self.mock_object(utils, 'validate_service_host') self.mock_object(share_types, 'get_share_type', mock.Mock(return_value=fake_type)) share = db_utils.create_share( host='fake@backend#pool', status=constants.STATUS_AVAILABLE, share_type_id=fake_type['id']) request_spec = self._get_request_spec_dict( share, fake_type, self.context, availability_zone_id='fake_az_id') self.api.migration_start(self.context, share, host, False, True, True, True, True) self.scheduler_rpcapi.migrate_share_to_host.assert_called_once_with( self.context, share['id'], host, False, True, True, True, True, None, 'fake_type_id', request_spec) def test_migration_start_has_replicas(self): host = 'fake2@backend#pool' share = db_utils.create_share( host='fake@backend#pool', status=constants.STATUS_AVAILABLE, replication_type='dr') for i in range(1, 4): db_utils.create_share_replica( share_id=share['id'], replica_state='in_sync') self.mock_object(db_api, 'share_snapshot_get_all_for_share', mock.Mock(return_value=True)) mock_log = self.mock_object(share_api, 'LOG') mock_snapshot_get_call = self.mock_object( db_api, 'share_snapshot_get_all_for_share') # Share was updated after adding replicas, grabbing it again. share = db_api.share_get(self.context, share['id']) self.assertRaises(exception.Conflict, self.api.migration_start, self.context, share, host, False, True, True, True, True) self.assertTrue(mock_log.error.called) self.assertFalse(mock_snapshot_get_call.called) def test_migration_start_is_member_of_group(self): group = db_utils.create_share_group() share = db_utils.create_share( host='fake@backend#pool', status=constants.STATUS_AVAILABLE, share_group_id=group['id']) mock_log = self.mock_object(share_api, 'LOG') self.assertRaises(exception.InvalidShare, self.api.migration_start, self.context, share, 'fake_host', False, True, True, True, True) self.assertTrue(mock_log.error.called) def test_migration_start_invalid_host(self): host = 'fake@backend#pool' share = db_utils.create_share( host='fake2@backend', status=constants.STATUS_AVAILABLE) self.mock_object(db_api, 'share_snapshot_get_all_for_share', mock.Mock(return_value=False)) self.assertRaises(exception.ServiceNotFound, self.api.migration_start, self.context, share, host, False, True, True, True, True) @ddt.data({'dhss': True, 'new_share_network_id': 'fake_net_id', 'new_share_type_id': 'fake_type_id'}, {'dhss': False, 'new_share_network_id': None, 'new_share_type_id': 'fake_type_id'}, {'dhss': True, 'new_share_network_id': 'fake_net_id', 'new_share_type_id': None}) @ddt. unpack def test_migration_start_same_data_as_source( self, dhss, new_share_network_id, new_share_type_id): host = 'fake@backend#pool' fake_type_src = { 'id': 'fake_type_id', 'extra_specs': { 'snapshot_support': True, 'driver_handles_share_servers': True, }, } new_share_type_param = None if new_share_type_id: new_share_type_param = { 'id': new_share_type_id, 'extra_specs': { 'snapshot_support': True, 'driver_handles_share_servers': dhss, }, } new_share_net_param = None if new_share_network_id: new_share_net_param = db_utils.create_share_network( id=new_share_network_id) share = db_utils.create_share( host='fake@backend#pool', status=constants.STATUS_AVAILABLE, share_type_id=fake_type_src['id'], share_network_id=new_share_network_id) self.mock_object(utils, 'validate_service_host') self.mock_object(db_api, 'share_update') self.mock_object(share_types, 'get_share_type', mock.Mock(return_value=fake_type_src)) result = self.api.migration_start( self.context, share, host, False, True, True, True, True, new_share_net_param, new_share_type_param) self.assertEqual(200, result) db_api.share_update.assert_called_once_with( self.context, share['id'], {'task_state': constants.TASK_STATE_MIGRATION_SUCCESS}) @ddt.data({}, {'replication_type': None}) def test_create_share_replica_invalid_share_type(self, attributes): share = fakes.fake_share(id='FAKE_SHARE_ID', **attributes) mock_request_spec_call = self.mock_object( self.api, 'create_share_instance_and_get_request_spec') mock_db_update_call = self.mock_object(db_api, 'share_replica_update') mock_scheduler_rpcapi_call = self.mock_object( self.api.scheduler_rpcapi, 'create_share_replica') self.assertRaises(exception.InvalidShare, self.api.create_share_replica, self.context, share) self.assertFalse(mock_request_spec_call.called) self.assertFalse(mock_db_update_call.called) self.assertFalse(mock_scheduler_rpcapi_call.called) def test_create_share_replica_busy_share(self): share = fakes.fake_share( id='FAKE_SHARE_ID', task_state='doing_something_real_important', is_busy=True, replication_type='dr') mock_request_spec_call = self.mock_object( self.api, 'create_share_instance_and_get_request_spec') mock_db_update_call = self.mock_object(db_api, 'share_replica_update') mock_scheduler_rpcapi_call = self.mock_object( self.api.scheduler_rpcapi, 'create_share_replica') self.assertRaises(exception.ShareBusyException, self.api.create_share_replica, self.context, share) self.assertFalse(mock_request_spec_call.called) self.assertFalse(mock_db_update_call.called) self.assertFalse(mock_scheduler_rpcapi_call.called) @ddt.data(None, []) def test_create_share_replica_no_active_replica(self, active_replicas): share = fakes.fake_share( id='FAKE_SHARE_ID', replication_type='dr') mock_request_spec_call = self.mock_object( self.api, 'create_share_instance_and_get_request_spec') mock_db_update_call = self.mock_object(db_api, 'share_replica_update') mock_scheduler_rpcapi_call = self.mock_object( self.api.scheduler_rpcapi, 'create_share_replica') self.mock_object(db_api, 'share_replicas_get_available_active_replica', mock.Mock(return_value=active_replicas)) self.assertRaises(exception.ReplicationException, self.api.create_share_replica, self.context, share) self.assertFalse(mock_request_spec_call.called) self.assertFalse(mock_db_update_call.called) self.assertFalse(mock_scheduler_rpcapi_call.called) @ddt.data(None, 'fake-share-type') def test_create_share_replica_type_doesnt_support_AZ(self, st_name): share_type = fakes.fake_share_type( name=st_name, extra_specs={'availability_zones': 'zone 1,zone 3'}) share = fakes.fake_share( id='FAKE_SHARE_ID', replication_type='dr', availability_zone='zone 2') share['instance'].update({ 'share_type': share_type, 'share_type_id': '359b9851-2bd5-4404-89a9-5cd22bbc5fb9', }) mock_request_spec_call = self.mock_object( self.api, 'create_share_instance_and_get_request_spec') mock_db_update_call = self.mock_object(db_api, 'share_replica_update') mock_scheduler_rpcapi_call = self.mock_object( self.api.scheduler_rpcapi, 'create_share_replica') self.mock_object(share_types, 'get_share_type', mock.Mock(return_value=share_type)) self.mock_object(db_api, 'share_replicas_get_available_active_replica', mock.Mock(return_value=mock.Mock( return_value={'host': 'fake_ar_host'}))) self.assertRaises(exception.InvalidShare, self.api.create_share_replica, self.context, share, availability_zone='zone 2') share_types.get_share_type.assert_called_once_with( self.context, '359b9851-2bd5-4404-89a9-5cd22bbc5fb9') self.assertFalse(mock_request_spec_call.called) self.assertFalse(mock_db_update_call.called) self.assertFalse(mock_scheduler_rpcapi_call.called) def test_create_share_replica_subnet_not_found(self): request_spec = fakes.fake_replica_request_spec() replica = request_spec['share_instance_properties'] extra_specs = { 'availability_zones': 'FAKE_AZ,FAKE_AZ2', 'replication_type': constants.REPLICATION_TYPE_DR } share_type = db_utils.create_share_type(extra_specs=extra_specs) share_type = db_api.share_type_get(self.context, share_type['id']) az_name = 'FAKE_AZ' share = db_utils.create_share( id=replica['share_id'], replication_type='dr') self.mock_object(db_api, 'share_replicas_get_available_active_replica', mock.Mock(return_value=mock.Mock( return_value={'host': 'fake_ar_host'}))) self.mock_object(share_types, 'get_share_type', mock.Mock(return_value=share_type)) self.mock_object(db_api, 'availability_zone_get') self.mock_object( db_api, 'share_network_subnets_get_all_by_availability_zone_id', mock.Mock(return_value=None)) self.assertRaises(exception.InvalidShare, self.api.create_share_replica, self.context, share, availability_zone=az_name, share_network_id='fake_id') (db_api.share_replicas_get_available_active_replica .assert_called_once_with(self.context, share['id'])) self.assertTrue(share_types.get_share_type.called) db_api.availability_zone_get.assert_called_once_with( self.context, az_name) self.assertTrue( (db_api.share_network_subnets_get_all_by_availability_zone_id. called)) def test_create_share_replica_az_not_found(self): request_spec = fakes.fake_replica_request_spec() replica = request_spec['share_instance_properties'] extra_specs = { 'availability_zones': 'FAKE_AZ,FAKE_AZ2', 'replication_type': constants.REPLICATION_TYPE_DR } share_type = db_utils.create_share_type(extra_specs=extra_specs) share_type = db_api.share_type_get(self.context, share_type['id']) az_name = 'FAKE_AZ' share = db_utils.create_share( id=replica['share_id'], replication_type='dr') self.mock_object(db_api, 'share_replicas_get_available_active_replica', mock.Mock(return_value=mock.Mock( return_value={'host': 'fake_ar_host'}))) self.mock_object(share_types, 'get_share_type', mock.Mock(return_value=share_type)) side_effect = exception.AvailabilityZoneNotFound(id=az_name) self.mock_object(db_api, 'availability_zone_get', mock.Mock(side_effect=side_effect)) self.assertRaises(exception.InvalidInput, self.api.create_share_replica, self.context, share, availability_zone=az_name, share_network_id='fake_id') (db_api.share_replicas_get_available_active_replica .assert_called_once_with(self.context, share['id'])) self.assertTrue(share_types.get_share_type.called) db_api.availability_zone_get.assert_called_once_with( self.context, az_name) @ddt.data( {'availability_zones': '', 'compatible_azs_name': ['fake_az_1'], 'compatible_azs_multiple': []}, {'availability_zones': 'fake_az_1,fake_az_2', 'compatible_azs_name': ['fake_az_2'], 'compatible_azs_multiple': []} ) @ddt.unpack def test_create_share_replica_azs_with_subnets(self, availability_zones, compatible_azs_name, compatible_azs_multiple): request_spec = fakes.fake_replica_request_spec() replica = request_spec['share_instance_properties'] share_network_id = 'fake_share_network_id' extra_specs = { 'availability_zones': availability_zones, 'replication_type': constants.REPLICATION_TYPE_DR } share_type = db_utils.create_share_type(extra_specs=extra_specs) share_type = db_api.share_type_get(self.context, share_type['id']) share = db_utils.create_share( id=replica['share_id'], replication_type='dr', share_type_id=share_type['id']) cast_rules_to_readonly = ( share['replication_type'] == constants.REPLICATION_TYPE_READABLE) fake_replica = fakes.fake_replica(id=replica['id']) fake_request_spec = fakes.fake_replica_request_spec() self.mock_object(db_api, 'share_replicas_get_available_active_replica', mock.Mock(return_value={'host': 'fake_ar_host'})) self.mock_object(share_types, 'get_share_type', mock.Mock(return_value=share_type)) mock_get_all_az_subnet = self.mock_object( self.api, '_get_all_availability_zones_with_subnets', mock.Mock(return_value=[compatible_azs_name, compatible_azs_multiple])) if availability_zones == '': expected_azs = compatible_azs_name else: availability_zones = [ t for t in availability_zones.split(',') if availability_zones] expected_azs = ( [az for az in availability_zones if az in compatible_azs_name]) self.mock_object( self.api, 'create_share_instance_and_get_request_spec', mock.Mock(return_value=(fake_request_spec, fake_replica))) self.mock_object(db_api, 'share_replica_update') mock_snapshot_get_all_call = self.mock_object( db_api, 'share_snapshot_get_all_for_share', mock.Mock(return_value=[])) mock_sched_rpcapi_call = self.mock_object( self.api.scheduler_rpcapi, 'create_share_replica') self.api.create_share_replica( self.context, share, share_network_id=share_network_id) (db_api.share_replicas_get_available_active_replica .assert_called_once_with(self.context, share['id'])) self.assertTrue(share_types.get_share_type.called) mock_get_all_az_subnet.assert_called_once_with( self.context, share_network_id ) (self.api.create_share_instance_and_get_request_spec. assert_called_once_with( self.context, share, availability_zone=None, share_network_id=share_network_id, share_type_id=share_type['id'], availability_zones=expected_azs, az_request_multiple_subnet_support_map={}, cast_rules_to_readonly=cast_rules_to_readonly)) db_api.share_replica_update.assert_called_once() mock_snapshot_get_all_call.assert_called_once() mock_sched_rpcapi_call.assert_called_once() @ddt.data( {'availability_zones': '', 'compatible_azs_name': [], 'compatible_azs_multiple': []}, {'availability_zones': 'fake_az_1,fake_az_2', 'compatible_azs_name': ['fake_az_3'], 'compatible_azs_multiple': []} ) @ddt.unpack def test_create_share_replica_azs_with_subnets_invalid_input( self, availability_zones, compatible_azs_name, compatible_azs_multiple): request_spec = fakes.fake_replica_request_spec() replica = request_spec['share_instance_properties'] share_network_id = 'fake_share_network_id' extra_specs = { 'availability_zones': availability_zones, 'replication_type': constants.REPLICATION_TYPE_DR } share_type = db_utils.create_share_type(extra_specs=extra_specs) share_type = db_api.share_type_get(self.context, share_type['id']) share = db_utils.create_share( id=replica['share_id'], replication_type='dr', share_type_id=share_type['id']) self.mock_object(db_api, 'share_replicas_get_available_active_replica', mock.Mock(return_value={'host': 'fake_ar_host'})) self.mock_object(share_types, 'get_share_type', mock.Mock(return_value=share_type)) mock_get_all_az_subnet = self.mock_object( self.api, '_get_all_availability_zones_with_subnets', mock.Mock(return_value=[compatible_azs_name, compatible_azs_multiple])) self.assertRaises( exception.InvalidInput, self.api.create_share_replica, self.context, share, share_network_id=share_network_id) (db_api.share_replicas_get_available_active_replica .assert_called_once_with(self.context, share['id'])) self.assertTrue(share_types.get_share_type.called) mock_get_all_az_subnet.assert_called_once_with( self.context, share_network_id ) @ddt.data({'has_snapshots': True, 'az_id': {}, 'extra_specs': { 'replication_type': constants.REPLICATION_TYPE_DR, }, 'share_network_id': None}, {'has_snapshots': False, 'az_id': {}, 'extra_specs': { 'availability_zones': 'FAKE_AZ,FAKE_AZ2', 'replication_type': constants.REPLICATION_TYPE_DR, }, 'share_network_id': None}, {'has_snapshots': True, 'az_id': {}, 'extra_specs': { 'availability_zones': 'FAKE_AZ,FAKE_AZ2', 'replication_type': constants.REPLICATION_TYPE_READABLE, }, 'share_network_id': None}, {'has_snapshots': False, 'az_id': {'fake_zone_id': False}, 'extra_specs': { 'replication_type': constants.REPLICATION_TYPE_READABLE, }, 'share_network_id': 'fake_sn_id'}) @ddt.unpack def test_create_share_replica(self, has_snapshots, extra_specs, share_network_id, az_id): subnets = db_utils.create_share_network_subnet( id='fakeid', share_network_id='fake_network_id') az = {'id': 'fake_zone_id'} request_spec = fakes.fake_replica_request_spec() replication_type = extra_specs['replication_type'] replica = request_spec['share_instance_properties'] share_type = db_utils.create_share_type(extra_specs=extra_specs) share_type = db_api.share_type_get(self.context, share_type['id']) share = db_utils.create_share( id=replica['share_id'], replication_type=replication_type, share_type_id=share_type['id']) snapshots = ( [fakes.fake_snapshot(), fakes.fake_snapshot()] if has_snapshots else [] ) cast_rules_to_readonly = ( replication_type == constants.REPLICATION_TYPE_READABLE) fake_replica = fakes.fake_replica(id=replica['id']) fake_request_spec = fakes.fake_replica_request_spec() self.mock_object(db_api, 'share_replicas_get_available_active_replica', mock.Mock(return_value={'host': 'fake_ar_host'})) self.mock_object(share_types, 'get_share_type', mock.Mock(return_value=share_type)) self.mock_object(db_api, 'availability_zone_get', mock.Mock(return_value=az)) self.mock_object( db_api, 'share_network_subnets_get_all_by_availability_zone_id', mock.Mock(return_value=[subnets])) self.mock_object( share_api.API, 'create_share_instance_and_get_request_spec', mock.Mock(return_value=(fake_request_spec, fake_replica))) self.mock_object(db_api, 'share_replica_update') mock_sched_rpcapi_call = self.mock_object( self.api.scheduler_rpcapi, 'create_share_replica') mock_snapshot_get_all_call = self.mock_object( db_api, 'share_snapshot_get_all_for_share', mock.Mock(return_value=snapshots)) mock_snapshot_instance_create_call = self.mock_object( db_api, 'share_snapshot_instance_create') expected_snap_instance_create_call_count = 2 if has_snapshots else 0 result = self.api.create_share_replica( self.context, share, availability_zone='FAKE_AZ', share_network_id=share_network_id) self.assertTrue(mock_sched_rpcapi_call.called) self.assertEqual(replica, result) share_types.get_share_type.assert_called_once_with( self.context, share_type['id']) mock_snapshot_get_all_call.assert_called_once_with( self.context, fake_replica['share_id']) self.assertEqual(expected_snap_instance_create_call_count, mock_snapshot_instance_create_call.call_count) expected_azs = extra_specs.get('availability_zones', '') expected_azs = expected_azs.split(',') if expected_azs else [] (share_api.API.create_share_instance_and_get_request_spec. assert_called_once_with( self.context, share, availability_zone='FAKE_AZ', share_network_id=share_network_id, share_type_id=share_type['id'], availability_zones=expected_azs, az_request_multiple_subnet_support_map=az_id, cast_rules_to_readonly=cast_rules_to_readonly)) def test_delete_last_active_replica(self): fake_replica = fakes.fake_replica( share_id='FAKE_SHARE_ID', replica_state=constants.REPLICA_STATE_ACTIVE) self.mock_object(db_api, 'share_replicas_get_all_by_share', mock.Mock(return_value=[fake_replica])) mock_log = self.mock_object(share_api.LOG, 'info') self.assertRaises( exception.ReplicationException, self.api.delete_share_replica, self.context, fake_replica) self.assertFalse(mock_log.called) @ddt.data(True, False) def test_delete_share_replica_no_host(self, has_snapshots): snapshots = [{'id': 'xyz'}, {'id': 'abc'}, {'id': 'pqr'}] snapshots = snapshots if has_snapshots else [] replica = fakes.fake_replica('FAKE_ID', host='') mock_sched_rpcapi_call = self.mock_object( self.share_rpcapi, 'delete_share_replica') mock_db_replica_delete_call = self.mock_object( db_api, 'share_replica_delete') mock_db_update_call = self.mock_object(db_api, 'share_replica_update') mock_snapshot_get_call = self.mock_object( db_api, 'share_snapshot_instance_get_all_with_filters', mock.Mock(return_value=snapshots)) mock_snapshot_instance_delete_call = self.mock_object( db_api, 'share_snapshot_instance_delete') self.api.delete_share_replica(self.context, replica) self.assertFalse(mock_sched_rpcapi_call.called) mock_db_replica_delete_call.assert_called_once_with( self.context, replica['id']) mock_db_update_call.assert_called_once_with( self.context, replica['id'], {'status': constants.STATUS_DELETING, 'terminated_at': mock.ANY}) mock_snapshot_get_call.assert_called_once_with( self.context, {'share_instance_ids': replica['id']}) self.assertEqual( len(snapshots), mock_snapshot_instance_delete_call.call_count) @ddt.data(True, False) def test_delete_share_replica(self, force): replica = fakes.fake_replica('FAKE_ID', host='HOSTA@BackendB#PoolC') mock_sched_rpcapi_call = self.mock_object( self.share_rpcapi, 'delete_share_replica') mock_db_update_call = self.mock_object(db_api, 'share_replica_update') self.api.delete_share_replica(self.context, replica, force=force) mock_sched_rpcapi_call.assert_called_once_with( self.context, replica, force=force) mock_db_update_call.assert_called_once_with( self.context, replica['id'], {'status': constants.STATUS_DELETING, 'terminated_at': mock.ANY}) @ddt.data(constants.STATUS_CREATING, constants.STATUS_DELETING, constants.STATUS_ERROR, constants.STATUS_EXTENDING, constants.STATUS_REPLICATION_CHANGE, constants.STATUS_MANAGING, constants.STATUS_ERROR_DELETING) def test_promote_share_replica_non_available_status(self, status): replica = fakes.fake_replica( status=status, replica_state=constants.REPLICA_STATE_IN_SYNC) mock_rpcapi_promote_share_replica_call = self.mock_object( self.share_rpcapi, 'promote_share_replica') self.assertRaises(exception.ReplicationException, self.api.promote_share_replica, self.context, replica) self.assertFalse(mock_rpcapi_promote_share_replica_call.called) @ddt.data(constants.REPLICA_STATE_OUT_OF_SYNC, constants.STATUS_ERROR) def test_promote_share_replica_out_of_sync_non_admin(self, replica_state): fake_user_context = context.RequestContext( user_id=None, project_id=None, is_admin=False, read_deleted='no', overwrite=False) replica = fakes.fake_replica( status=constants.STATUS_AVAILABLE, replica_state=replica_state) mock_rpcapi_promote_share_replica_call = self.mock_object( self.share_rpcapi, 'promote_share_replica') self.assertRaises(exception.AdminRequired, self.api.promote_share_replica, fake_user_context, replica) self.assertFalse(mock_rpcapi_promote_share_replica_call.called) @ddt.data(constants.REPLICA_STATE_OUT_OF_SYNC, constants.STATUS_ERROR) def test_promote_share_replica_admin_authorized(self, replica_state): replica = fakes.fake_replica( status=constants.STATUS_AVAILABLE, replica_state=replica_state, host='HOSTA@BackendB#PoolC') self.mock_object(db_api, 'share_replica_get', mock.Mock(return_value=replica)) mock_rpcapi_promote_share_replica_call = self.mock_object( self.share_rpcapi, 'promote_share_replica') mock_db_update_call = self.mock_object(db_api, 'share_replica_update') retval = self.api.promote_share_replica( self.context, replica) self.assertEqual(replica, retval) mock_db_update_call.assert_called_once_with( self.context, replica['id'], {'status': constants.STATUS_REPLICATION_CHANGE}) mock_rpcapi_promote_share_replica_call.assert_called_once_with( self.context, replica, quiesce_wait_time=None) def test_promote_share_replica(self): replica = fakes.fake_replica('FAKE_ID', host='HOSTA@BackendB#PoolC') self.mock_object(db_api, 'share_replica_get', mock.Mock(return_value=replica)) self.mock_object(db_api, 'share_replica_update') mock_sched_rpcapi_call = self.mock_object( self.share_rpcapi, 'promote_share_replica') result = self.api.promote_share_replica(self.context, replica) mock_sched_rpcapi_call.assert_called_once_with( self.context, replica, quiesce_wait_time=None) self.assertEqual(replica, result) def test_update_share_replica_no_host(self): replica = fakes.fake_replica('FAKE_ID') replica['host'] = None mock_rpcapi_update_share_replica_call = self.mock_object( self.share_rpcapi, 'update_share_replica') self.assertRaises(exception.InvalidHost, self.api.update_share_replica, self.context, replica) self.assertFalse(mock_rpcapi_update_share_replica_call.called) def test_update_share_replica(self): replica = fakes.fake_replica('FAKE_ID', host='HOSTA@BackendB#PoolC') mock_rpcapi_update_share_replica_call = self.mock_object( self.share_rpcapi, 'update_share_replica') retval = self.api.update_share_replica(self.context, replica) self.assertTrue(mock_rpcapi_update_share_replica_call.called) self.assertIsNone(retval) @ddt.data({'overs': {'replica_gigabytes': 'fake'}, 'expected_exception': exception.ShareReplicaSizeExceedsAvailableQuota}, {'overs': {'share_replicas': 'fake'}, 'expected_exception': exception.ShareReplicasLimitExceeded}) @ddt.unpack def test_create_share_replica_over_quota(self, overs, expected_exception): request_spec = fakes.fake_replica_request_spec() replica = request_spec['share_instance_properties'] share = db_utils.create_share(replication_type='dr', id=replica['share_id']) share_type = db_utils.create_share_type() share_type = db_api.share_type_get(self.context, share_type['id']) usages = {'replica_gigabytes': {'reserved': 5, 'in_use': 5}, 'share_replicas': {'reserved': 5, 'in_use': 5}} quotas = {'share_replicas': 5, 'replica_gigabytes': 5} exc = exception.OverQuota(overs=overs, usages=usages, quotas=quotas) self.mock_object(quota.QUOTAS, 'reserve', mock.Mock(side_effect=exc)) self.mock_object(db_api, 'share_replicas_get_available_active_replica', mock.Mock(return_value={'host': 'fake_ar_host'})) self.mock_object(share_types, 'get_share_type', mock.Mock(return_value=share_type)) self.assertRaises( expected_exception, self.api.create_share_replica, self.context, share ) quota.QUOTAS.reserve.assert_called_once_with( self.context, share_type_id=share_type['id'], share_replicas=1, replica_gigabytes=share['size']) (db_api.share_replicas_get_available_active_replica .assert_called_once_with(self.context, share['id'])) share_types.get_share_type.assert_called_once_with( self.context, share['instance']['share_type_id']) def test_create_share_replica_error_on_quota_commit(self): request_spec = fakes.fake_replica_request_spec() replica = request_spec['share_instance_properties'] share_type = db_utils.create_share_type() fake_replica = fakes.fake_replica(id=replica['id']) share = db_utils.create_share(replication_type='dr', id=fake_replica['share_id'], share_type_id=share_type['id']) share_network_id = None share_type = db_api.share_type_get(self.context, share_type['id']) expected_azs = share_type['extra_specs'].get('availability_zones', '') expected_azs = expected_azs.split(',') if expected_azs else [] reservation = 'fake' self.mock_object(quota.QUOTAS, 'reserve', mock.Mock(return_value=reservation)) self.mock_object(quota.QUOTAS, 'commit', mock.Mock(side_effect=exception.QuotaError('fake'))) self.mock_object(db_api, 'share_replica_delete') self.mock_object(quota.QUOTAS, 'rollback') self.mock_object(db_api, 'share_replicas_get_available_active_replica', mock.Mock(return_value={'host': 'fake_ar_host'})) self.mock_object(share_types, 'get_share_type', mock.Mock(return_value=share_type)) self.mock_object( share_api.API, 'create_share_instance_and_get_request_spec', mock.Mock(return_value=(request_spec, fake_replica))) self.assertRaises( exception.QuotaError, self.api.create_share_replica, self.context, share ) db_api.share_replica_delete.assert_called_once_with( self.context, replica['id'], need_to_update_usages=False) quota.QUOTAS.rollback.assert_called_once_with( self.context, reservation, share_type_id=share['instance']['share_type_id']) (db_api.share_replicas_get_available_active_replica. assert_called_once_with(self.context, share['id'])) share_types.get_share_type.assert_called_once_with( self.context, share['instance']['share_type_id']) (share_api.API.create_share_instance_and_get_request_spec. assert_called_once_with(self.context, share, availability_zone=None, share_network_id=share_network_id, share_type_id=share_type['id'], availability_zones=expected_azs, az_request_multiple_subnet_support_map={}, cast_rules_to_readonly=False)) def test_migration_complete(self): instance1 = db_utils.create_share_instance( share_id='fake_id', status=constants.STATUS_MIGRATING) instance2 = db_utils.create_share_instance( share_id='fake_id', status=constants.STATUS_MIGRATING_TO) share = db_utils.create_share( id='fake_id', task_state=constants.TASK_STATE_DATA_COPYING_COMPLETED, instances=[instance1, instance2]) self.mock_object(db_api, 'share_instance_get', mock.Mock(return_value=instance1)) self.mock_object(self.api.share_rpcapi, 'migration_complete') self.api.migration_complete(self.context, share) self.api.share_rpcapi.migration_complete.assert_called_once_with( self.context, instance1, instance2['id']) @ddt.data(constants.TASK_STATE_DATA_COPYING_STARTING, constants.TASK_STATE_MIGRATION_SUCCESS, constants.TASK_STATE_DATA_COPYING_IN_PROGRESS, constants.TASK_STATE_MIGRATION_ERROR, constants.TASK_STATE_MIGRATION_CANCELLED, None) def test_migration_complete_task_state_invalid(self, task_state): share = db_utils.create_share( id='fake_id', task_state=task_state) self.assertRaises(exception.InvalidShare, self.api.migration_complete, self.context, share) def test_migration_complete_status_invalid(self): instance1 = db_utils.create_share_instance( share_id='fake_id', status=constants.STATUS_ERROR) instance2 = db_utils.create_share_instance( share_id='fake_id', status=constants.STATUS_ERROR) share = db_utils.create_share( id='fake_id', task_state=constants.TASK_STATE_DATA_COPYING_COMPLETED, instances=[instance1, instance2]) self.assertRaises(exception.ShareMigrationFailed, self.api.migration_complete, self.context, share) @ddt.data(None, Exception('fake')) def test_migration_cancel(self, exc): share = db_utils.create_share( id='fake_id', task_state=constants.TASK_STATE_DATA_COPYING_IN_PROGRESS) services = ['fake_service'] self.mock_object(utils, 'service_is_up', mock.Mock(return_value=True)) self.mock_object(db_api, 'service_get_all_by_topic', mock.Mock(return_value=services)) self.mock_object(data_rpc.DataAPI, 'data_copy_cancel', mock.Mock(side_effect=[exc])) if exc: self.assertRaises( exception.ShareMigrationError, self.api.migration_cancel, self.context, share) else: self.api.migration_cancel(self.context, share) data_rpc.DataAPI.data_copy_cancel.assert_called_once_with( self.context, share['id']) db_api.service_get_all_by_topic.assert_called_once_with( self.context, 'manila-data') def test_migration_cancel_service_down(self): service = 'fake_service' instance1 = db_utils.create_share_instance( share_id='fake_id', status=constants.STATUS_MIGRATING) instance2 = db_utils.create_share_instance( share_id='fake_id', status=constants.STATUS_MIGRATING_TO) share = db_utils.create_share( id='fake_id', task_state=constants.TASK_STATE_DATA_COPYING_IN_PROGRESS, instances=[instance1, instance2]) self.mock_object(utils, 'service_is_up', mock.Mock(return_value=False)) self.mock_object(db_api, 'share_instance_get', mock.Mock(return_value=instance1)) self.mock_object(db_api, 'service_get_all_by_topic', mock.Mock(return_value=service)) self.assertRaises(exception.InvalidShare, self.api.migration_cancel, self.context, share) def test_migration_cancel_driver(self): service = 'fake_service' instance1 = db_utils.create_share_instance( share_id='fake_id', status=constants.STATUS_MIGRATING, host='some_host') instance2 = db_utils.create_share_instance( share_id='fake_id', status=constants.STATUS_MIGRATING_TO) share = db_utils.create_share( id='fake_id', task_state=constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS, instances=[instance1, instance2]) self.mock_object(db_api, 'share_instance_get', mock.Mock(return_value=instance1)) self.mock_object(self.api.share_rpcapi, 'migration_cancel') self.mock_object(db_api, 'service_get_by_args', mock.Mock(return_value=service)) self.mock_object(utils, 'service_is_up', mock.Mock(return_value=True)) self.api.migration_cancel(self.context, share) self.api.share_rpcapi.migration_cancel.assert_called_once_with( self.context, instance1, instance2['id']) db_api.service_get_by_args.assert_called_once_with( self.context, instance1['host'], 'manila-share') @ddt.data(constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS, constants.TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE, constants.TASK_STATE_DATA_COPYING_COMPLETED) def test_migration_cancel_driver_service_down(self, task_state): service = 'fake_service' instance1 = db_utils.create_share_instance( share_id='fake_id', status=constants.STATUS_MIGRATING, host='some_host') instance2 = db_utils.create_share_instance( share_id='fake_id', status=constants.STATUS_MIGRATING_TO) share = db_utils.create_share( id='fake_id', task_state=task_state, instances=[instance1, instance2]) self.mock_object(utils, 'service_is_up', mock.Mock(return_value=False)) self.mock_object(db_api, 'share_instance_get', mock.Mock(return_value=instance1)) self.mock_object(db_api, 'service_get_by_args', mock.Mock(return_value=service)) self.assertRaises(exception.InvalidShare, self.api.migration_cancel, self.context, share) @ddt.data(constants.TASK_STATE_DATA_COPYING_STARTING, constants.TASK_STATE_MIGRATION_SUCCESS, constants.TASK_STATE_MIGRATION_ERROR, constants.TASK_STATE_MIGRATION_CANCELLED, None) def test_migration_cancel_task_state_invalid(self, task_state): share = db_utils.create_share( id='fake_id', task_state=task_state) self.assertRaises(exception.InvalidShare, self.api.migration_cancel, self.context, share) @ddt.data({'total_progress': 50}, Exception('fake')) def test_migration_get_progress(self, expected): share = db_utils.create_share( id='fake_id', task_state=constants.TASK_STATE_DATA_COPYING_IN_PROGRESS) services = ['fake_service'] self.mock_object(utils, 'service_is_up', mock.Mock(return_value=True)) self.mock_object(db_api, 'service_get_all_by_topic', mock.Mock(return_value=services)) self.mock_object(data_rpc.DataAPI, 'data_copy_get_progress', mock.Mock(side_effect=[expected])) if not isinstance(expected, Exception): result = self.api.migration_get_progress(self.context, share) self.assertEqual(expected, result) else: self.assertRaises( exception.ShareMigrationError, self.api.migration_get_progress, self.context, share) data_rpc.DataAPI.data_copy_get_progress.assert_called_once_with( self.context, share['id']) db_api.service_get_all_by_topic.assert_called_once_with( self.context, 'manila-data') def test_migration_get_progress_service_down(self): instance1 = db_utils.create_share_instance( share_id='fake_id', status=constants.STATUS_MIGRATING) instance2 = db_utils.create_share_instance( share_id='fake_id', status=constants.STATUS_MIGRATING_TO) share = db_utils.create_share( id='fake_id', task_state=constants.TASK_STATE_DATA_COPYING_IN_PROGRESS, instances=[instance1, instance2]) services = ['fake_service'] self.mock_object(utils, 'service_is_up', mock.Mock(return_value=False)) self.mock_object(db_api, 'service_get_all_by_topic', mock.Mock(return_value=services)) self.mock_object(db_api, 'share_instance_get', mock.Mock(return_value=instance1)) self.assertRaises(exception.InvalidShare, self.api.migration_get_progress, self.context, share) def test_migration_get_progress_driver(self): expected = {'total_progress': 50} instance1 = db_utils.create_share_instance( share_id='fake_id', status=constants.STATUS_MIGRATING, host='some_host') instance2 = db_utils.create_share_instance( share_id='fake_id', status=constants.STATUS_MIGRATING_TO) share = db_utils.create_share( id='fake_id', task_state=constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS, instances=[instance1, instance2]) service = 'fake_service' self.mock_object(utils, 'service_is_up', mock.Mock(return_value=True)) self.mock_object(db_api, 'service_get_by_args', mock.Mock(return_value=service)) self.mock_object(db_api, 'share_instance_get', mock.Mock(return_value=instance1)) self.mock_object(self.api.share_rpcapi, 'migration_get_progress', mock.Mock(return_value=expected)) result = self.api.migration_get_progress(self.context, share) self.assertEqual(expected, result) self.api.share_rpcapi.migration_get_progress.assert_called_once_with( self.context, instance1, instance2['id']) db_api.service_get_by_args.assert_called_once_with( self.context, instance1['host'], 'manila-share') def test_migration_get_progress_driver_error(self): instance1 = db_utils.create_share_instance( share_id='fake_id', status=constants.STATUS_MIGRATING, host='some_host') instance2 = db_utils.create_share_instance( share_id='fake_id', status=constants.STATUS_MIGRATING_TO) share = db_utils.create_share( id='fake_id', task_state=constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS, instances=[instance1, instance2]) service = 'fake_service' self.mock_object(utils, 'service_is_up', mock.Mock(return_value=True)) self.mock_object(db_api, 'service_get_by_args', mock.Mock(return_value=service)) self.mock_object(db_api, 'share_instance_get', mock.Mock(return_value=instance1)) self.mock_object(self.api.share_rpcapi, 'migration_get_progress', mock.Mock(side_effect=Exception('fake'))) self.assertRaises(exception.ShareMigrationError, self.api.migration_get_progress, self.context, share) self.api.share_rpcapi.migration_get_progress.assert_called_once_with( self.context, instance1, instance2['id']) def test_migration_get_progress_driver_service_down(self): service = 'fake_service' instance1 = db_utils.create_share_instance( share_id='fake_id', status=constants.STATUS_MIGRATING, host='some_host') instance2 = db_utils.create_share_instance( share_id='fake_id', status=constants.STATUS_MIGRATING_TO) share = db_utils.create_share( id='fake_id', task_state=constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS, instances=[instance1, instance2]) self.mock_object(utils, 'service_is_up', mock.Mock(return_value=False)) self.mock_object(db_api, 'share_instance_get', mock.Mock(return_value=instance1)) self.mock_object(db_api, 'service_get_by_args', mock.Mock(return_value=service)) self.assertRaises(exception.InvalidShare, self.api.migration_get_progress, self.context, share) @ddt.data(constants.TASK_STATE_MIGRATION_STARTING, constants.TASK_STATE_MIGRATION_DRIVER_STARTING, constants.TASK_STATE_DATA_COPYING_STARTING, constants.TASK_STATE_MIGRATION_IN_PROGRESS) def test_migration_get_progress_task_state_progress_0(self, task_state): share = db_utils.create_share( id='fake_id', task_state=task_state) expected = {'total_progress': 0} result = self.api.migration_get_progress(self.context, share) self.assertEqual(expected, result) @ddt.data(constants.TASK_STATE_MIGRATION_SUCCESS, constants.TASK_STATE_DATA_COPYING_ERROR, constants.TASK_STATE_MIGRATION_CANCELLED, constants.TASK_STATE_MIGRATION_COMPLETING, constants.TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE, constants.TASK_STATE_DATA_COPYING_COMPLETED, constants.TASK_STATE_DATA_COPYING_COMPLETING, constants.TASK_STATE_DATA_COPYING_CANCELLED, constants.TASK_STATE_MIGRATION_ERROR) def test_migration_get_progress_task_state_progress_100(self, task_state): share = db_utils.create_share( id='fake_id', task_state=task_state) expected = {'total_progress': 100} result = self.api.migration_get_progress(self.context, share) self.assertEqual(expected, result) def test_migration_get_progress_task_state_None(self): share = db_utils.create_share(id='fake_id', task_state=None) self.assertRaises(exception.InvalidShare, self.api.migration_get_progress, self.context, share) @ddt.data(None, {'invalid_progress': None}, {}) def test_migration_get_progress_invalid(self, progress): instance1 = db_utils.create_share_instance( share_id='fake_id', status=constants.STATUS_MIGRATING, host='some_host') instance2 = db_utils.create_share_instance( share_id='fake_id', status=constants.STATUS_MIGRATING_TO) share = db_utils.create_share( id='fake_id', task_state=constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS, instances=[instance1, instance2]) service = 'fake_service' self.mock_object(utils, 'service_is_up', mock.Mock(return_value=True)) self.mock_object(db_api, 'service_get_by_args', mock.Mock(return_value=service)) self.mock_object(db_api, 'share_instance_get', mock.Mock(return_value=instance1)) self.mock_object(self.api.share_rpcapi, 'migration_get_progress', mock.Mock(return_value=progress)) self.assertRaises(exception.InvalidShare, self.api.migration_get_progress, self.context, share) self.api.share_rpcapi.migration_get_progress.assert_called_once_with( self.context, instance1, instance2['id']) @ddt.data(True, False) def test__migration_initial_checks(self, create_share_network): type_data = { 'extra_specs': { 'availability_zones': 'fake_az1,fake_az2' } } fake_server_host = 'fake@backend' fake_share_server = db_utils.create_share_server(host=fake_server_host) share_type = db_utils.create_share_type(**type_data) share_type = db_api.share_type_get(self.context, share_type['id']) fake_share = db_utils.create_share( host='fake@backend#pool', status=constants.STATUS_AVAILABLE, share_type_id=share_type['id']) fake_az = { 'id': 'fake_az_id', 'name': 'fake_az1' } fake_share_network = ( db_utils.create_share_network() if create_share_network else None) expected_network_change = create_share_network is True fake_share_network_id = ( fake_share_network['id'] if create_share_network else fake_share['share_network_id']) fake_subnet = db_utils.create_share_network_subnet( availability_zone_id=fake_az['id']) fake_host = 'test@fake' service = {'availability_zone_id': fake_az['id'], 'availability_zone': {'name': fake_az['name']}} mock_shares_get_all = self.mock_object( db_api, 'share_get_all_by_share_server', mock.Mock(return_value=[fake_share])) mock_shares_in_recycle_bin_get_all = self.mock_object( db_api, 'share_get_all_soft_deleted', mock.Mock(return_value=[])) mock_get_type = self.mock_object( share_types, 'get_share_type', mock.Mock(return_value=share_type)) mock_validate_service = self.mock_object( utils, 'validate_service_host') mock_service_get = self.mock_object( db_api, 'service_get_by_args', mock.Mock(return_value=service)) mock_az_get = self.mock_object( db_api, 'availability_zone_get', mock.Mock(return_value=fake_az)) mock_get_subnet = self.mock_object( db_api, 'share_network_subnets_get_all_by_availability_zone_id', mock.Mock(return_value=[fake_subnet])) exp_shares, exp_types, exp_service, exp_network_id, net_change = ( self.api._migration_initial_checks( self.context, fake_share_server, fake_host, fake_share_network)) self.assertEqual(exp_shares, [fake_share]) self.assertEqual(exp_types, [share_type]) self.assertEqual(exp_service, service) self.assertEqual(exp_network_id, fake_share_network_id) self.assertIs(expected_network_change, net_change) mock_shares_get_all.assert_has_calls([ mock.call(self.context, fake_share_server['id']), mock.call(self.context, fake_share_server['id'])]) mock_shares_in_recycle_bin_get_all.assert_has_calls([ mock.call(self.context, fake_share_server['id'])]) mock_get_type.assert_called_once_with(self.context, share_type['id']) mock_validate_service.assert_called_once_with(self.context, fake_host) mock_service_get.assert_called_once_with( self.context, fake_host, 'manila-share') mock_get_subnet.assert_called_once_with( self.context, fake_share_network_id, fake_az['id']) mock_az_get.assert_called_once_with( self.context, service['availability_zone']['name'] ) def test_share_server_migration_get_destination(self): fake_source_server_id = 'fake_source_id' server_data = { 'id': 'fake', 'source_share_server_id': fake_source_server_id, 'status': constants.STATUS_SERVER_MIGRATING_TO, } server = db_utils.create_share_server(**server_data) mock_get_all = self.mock_object( db_api, 'share_server_get_all_with_filters', mock.Mock(return_value=[server])) filters = { 'status': constants.STATUS_SERVER_MIGRATING_TO, 'source_share_server_id': fake_source_server_id, } filtered_server = self.api.share_server_migration_get_destination( self.context, fake_source_server_id, status=constants.STATUS_SERVER_MIGRATING_TO ) self.assertEqual(filtered_server['id'], server['id']) mock_get_all.assert_called_once_with(self.context, filters=filters) def test_share_server_migration_get_destination_no_share_server( self): fake_source_server_id = 'fake_source_id' server_data = { 'id': 'fake', 'source_share_server_id': fake_source_server_id, 'status': constants.STATUS_SERVER_MIGRATING_TO, } db_utils.create_share_server(**server_data) mock_get_all = self.mock_object( db_api, 'share_server_get_all_with_filters', mock.Mock(return_value=[])) filters = { 'status': constants.STATUS_SERVER_MIGRATING_TO, 'source_share_server_id': fake_source_server_id, } self.assertRaises( exception.InvalidShareServer, self.api.share_server_migration_get_destination, self.context, fake_source_server_id, status=constants.STATUS_SERVER_MIGRATING_TO ) mock_get_all.assert_called_once_with(self.context, filters=filters) def test_share_server_migration_get_destination_multiple_servers(self): fake_source_server_id = 'fake_source_id' server_data = { 'id': 'fake', 'source_share_server_id': fake_source_server_id, 'status': constants.STATUS_SERVER_MIGRATING_TO, } server_1 = db_utils.create_share_server(**server_data) server_data['id'] = 'fake_id_2' server_2 = db_utils.create_share_server(**server_data) mock_get_all = self.mock_object( db_api, 'share_server_get_all_with_filters', mock.Mock(return_value=[server_1, server_2])) filters = { 'status': constants.STATUS_SERVER_MIGRATING_TO, 'source_share_server_id': fake_source_server_id, } self.assertRaises( exception.InvalidShareServer, self.api.share_server_migration_get_destination, self.context, fake_source_server_id, status=constants.STATUS_SERVER_MIGRATING_TO ) mock_get_all.assert_called_once_with(self.context, filters=filters) def test__migration_initial_checks_no_shares(self): fake_share_server = fakes.fake_share_server_get() fake_share_network = {} fake_host = 'test@fake' mock_shares_get_all = self.mock_object( db_api, 'share_get_all_by_share_server', mock.Mock(return_value=[])) self.assertRaises( exception.InvalidShareServer, self.api._migration_initial_checks, self.context, fake_share_server, fake_host, fake_share_network, ) mock_shares_get_all.assert_called_once_with( self.context, fake_share_server['id']) def test__migration_initial_checks_server_not_active(self): fake_share_server = fakes.fake_share_server_get() fake_share_server['status'] = 'error' fake_share = fakes.fake_share() fake_share_network = {} fake_host = 'test@fake' mock_shares_get_all = self.mock_object( db_api, 'share_get_all_by_share_server', mock.Mock(return_value=[fake_share])) self.assertRaises( exception.InvalidShareServer, self.api._migration_initial_checks, self.context, fake_share_server, fake_host, fake_share_network, ) mock_shares_get_all.assert_called_once_with( self.context, fake_share_server['id']) def test__migration_initial_checks_share_group_related_to_server(self): fake_share_server = db_utils.create_share_server() fake_share = db_utils.create_share() fake_share_group = db_utils.create_share_group() fake_share_network = {} fake_host = 'test@fake' mock_shares_get_all = self.mock_object( db_api, 'share_get_all_by_share_server', mock.Mock(return_value=[fake_share])) mock_get_groups = self.mock_object( db_api, 'share_group_get_all_by_share_server', mock.Mock(return_value=[fake_share_group])) self.assertRaises( exception.InvalidShareServer, self.api._migration_initial_checks, self.context, fake_share_server, fake_host, fake_share_network, ) mock_shares_get_all.assert_called_once_with( self.context, fake_share_server['id']) mock_get_groups.assert_called_once_with(self.context, fake_share_server['id']) def _setup_mocks_for_initial_checks(self, fake_share, share_type, service, fake_az, fake_subnet): self.mock_object( db_api, 'share_get_all_by_share_server', mock.Mock(return_value=[fake_share])) self.mock_object( db_api, 'share_group_get_all_by_share_server', mock.Mock(return_value=[])) self.mock_object( share_types, 'get_share_type', mock.Mock(return_value=share_type)) self.mock_object( utils, 'validate_service_host') self.mock_object( db_api, 'service_get_by_args', mock.Mock(return_value=service)) self.mock_object( db_api, 'availability_zone_get', mock.Mock(return_value=fake_az)) self.mock_object( db_api, 'share_network_subnets_get_all_by_availability_zone_id', mock.Mock(return_value=fake_subnet)) def test__migration_initial_checks_share_not_available(self): fake_share_server = fakes.fake_share_server_get() fake_share_server['host'] = 'fake@backend' type_data = { 'extra_specs': { 'availability_zones': 'fake_az1,fake_az2' } } fake_server_host = 'fake@backend' fake_share_server = db_utils.create_share_server(host=fake_server_host) share_type = db_utils.create_share_type(**type_data) share_type = db_api.share_type_get(self.context, share_type['id']) fake_share = db_utils.create_share( host='fake@backend#pool', status=constants.STATUS_ERROR, share_type_id=share_type['id']) fake_az = { 'id': 'fake_az_id', 'name': 'fake_az1' } fake_share_network = None fake_share_network_id = fake_share['share_network_id'] fake_subnet = db_utils.create_share_network_subnet( availability_zone_id=fake_az['id']) fake_host = 'test@fake' service = {'availability_zone_id': fake_az['id'], 'availability_zone': {'name': fake_az['name']}} self._setup_mocks_for_initial_checks(fake_share, share_type, service, fake_az, fake_subnet) self.assertRaises( exception.InvalidShareServer, self.api._migration_initial_checks, self.context, fake_share_server, fake_host, fake_share_network, ) db_api.share_get_all_by_share_server.assert_has_calls([ mock.call(self.context, fake_share_server['id']), mock.call(self.context, fake_share_server['id'])]) share_types.get_share_type.assert_called_once_with( self.context, share_type['id']) utils.validate_service_host.assert_called_once_with( self.context, fake_host) db_api.service_get_by_args.assert_called_once_with( self.context, fake_host, 'manila-share') db_api.availability_zone_get.assert_called_once_with( self.context, service['availability_zone']['name'] ) (db_api.share_network_subnets_get_all_by_availability_zone_id. assert_called_once_with( self.context, fake_share_network_id, fake_az['id'])) db_api.share_group_get_all_by_share_server.assert_called_once_with( self.context, fake_share_server['id']) def test__migration_initial_checks_share_with_replicas(self): fake_share_server = fakes.fake_share_server_get() fake_share_server['host'] = 'fake@backend' type_data = { 'extra_specs': { 'availability_zones': 'fake_az1,fake_az2' } } fake_server_host = 'fake@backend' fake_share_server = db_utils.create_share_server(host=fake_server_host) share_type = db_utils.create_share_type(**type_data) share_type = db_api.share_type_get(self.context, share_type['id']) fake_share = db_utils.create_share( host='fake@backend#pool', status=constants.STATUS_AVAILABLE, replication_type='dr', share_type_id=share_type['id']) for i in range(1, 4): db_utils.create_share_replica( share_id=fake_share['id'], replica_state='in_sync') fake_share = db_api.share_get(self.context, fake_share['id']) fake_az = { 'id': 'fake_az_id', 'name': 'fake_az1' } fake_share_network = None fake_share_network_id = fake_share['share_network_id'] fake_subnet = db_utils.create_share_network_subnet( availability_zone_id=fake_az['id']) fake_host = 'test@fake' service = {'availability_zone_id': fake_az['id'], 'availability_zone': {'name': fake_az['name']}} self._setup_mocks_for_initial_checks(fake_share, share_type, service, fake_az, fake_subnet) self.assertRaises( exception.InvalidShareServer, self.api._migration_initial_checks, self.context, fake_share_server, fake_host, fake_share_network, ) db_api.share_get_all_by_share_server.assert_has_calls([ mock.call(self.context, fake_share_server['id']), mock.call(self.context, fake_share_server['id'])]) share_types.get_share_type.assert_called_once_with( self.context, share_type['id']) utils.validate_service_host.assert_called_once_with( self.context, fake_host) db_api.service_get_by_args.assert_called_once_with( self.context, fake_host, 'manila-share') db_api.availability_zone_get.assert_called_once_with( self.context, service['availability_zone']['name'] ) (db_api.share_network_subnets_get_all_by_availability_zone_id. assert_called_once_with( self.context, fake_share_network_id, fake_az['id'])) db_api.share_group_get_all_by_share_server.assert_called_once_with( self.context, fake_share_server['id']) def test__migration_initial_checks_share_in_share_group(self): fake_share_server = fakes.fake_share_server_get() fake_share_server['host'] = 'fake@backend' type_data = { 'extra_specs': { 'availability_zones': 'fake_az1,fake_az2' } } fake_server_host = 'fake@backend' fake_share_server = db_utils.create_share_server(host=fake_server_host) share_type = db_utils.create_share_type(**type_data) share_type = db_api.share_type_get(self.context, share_type['id']) fake_share = db_utils.create_share( host='fake@backend#pool', status=constants.STATUS_AVAILABLE, share_type_id=share_type['id'], share_group_id='fake_group_id') fake_az = { 'id': 'fake_az_id', 'name': 'fake_az1' } fake_share_network = None fake_share_network_id = fake_share['share_network_id'] fake_subnet = db_utils.create_share_network_subnet( availability_zone_id=fake_az['id']) fake_host = 'test@fake' service = {'availability_zone_id': fake_az['id'], 'availability_zone': {'name': fake_az['name']}} self._setup_mocks_for_initial_checks(fake_share, share_type, service, fake_az, fake_subnet) mock_snapshots_get = self.mock_object( db_api, 'share_snapshot_get_all_for_share', mock.Mock(return_value=[])) self.assertRaises( exception.InvalidShareServer, self.api._migration_initial_checks, self.context, fake_share_server, fake_host, fake_share_network, ) db_api.share_get_all_by_share_server.assert_has_calls([ mock.call(self.context, fake_share_server['id']), mock.call(self.context, fake_share_server['id'])]) share_types.get_share_type.assert_called_once_with( self.context, share_type['id']) utils.validate_service_host.assert_called_once_with( self.context, fake_host) db_api.service_get_by_args.assert_called_once_with( self.context, fake_host, 'manila-share') db_api.availability_zone_get.assert_called_once_with( self.context, service['availability_zone']['name'] ) (db_api.share_network_subnets_get_all_by_availability_zone_id. assert_called_once_with( self.context, fake_share_network_id, fake_az['id'])) mock_snapshots_get.assert_called_once_with( self.context, fake_share['id']) db_api.share_group_get_all_by_share_server.assert_called_once_with( self.context, fake_share_server['id']) def test__migration_initial_checks_same_backend_and_network(self): fake_server_host = 'fake@backend' fake_share_network = {'id': 'fake_share_network_id'} fake_share_server = db_utils.create_share_server(host=fake_server_host) fake_share = db_utils.create_share( host=fake_server_host, status=constants.STATUS_AVAILABLE, share_group_id='fake_group_id', share_network_id=fake_share_network['id']) mock_shares_get_all = self.mock_object( db_api, 'share_get_all_by_share_server', mock.Mock(return_value=[fake_share])) self.assertRaises( exception.InvalidShareServer, self.api._migration_initial_checks, self.context, fake_share_server, fake_server_host, fake_share_network, ) mock_shares_get_all.assert_called_once_with( self.context, fake_share_server['id']) def test__migration_initial_checks_another_migration_found(self): fake_server_host = 'fake@backend2' fake_share_network = {'id': 'fake_share_network_id'} fake_share_server = db_utils.create_share_server(host=fake_server_host) fake_share = db_utils.create_share( host='fake@backend#pool', status=constants.STATUS_AVAILABLE, share_group_id='fake_group_id', share_network=fake_share_network) mock_shares_get_all = self.mock_object( db_api, 'share_get_all_by_share_server', mock.Mock(return_value=[fake_share])) mock_shares_get_servers_filters = self.mock_object( db_api, 'share_server_get_all_with_filters', mock.Mock(return_value=['fake_share_server'])) self.assertRaises( exception.InvalidShareServer, self.api._migration_initial_checks, self.context, fake_share_server, fake_server_host, fake_share_network, ) mock_shares_get_all.assert_called_once_with( self.context, fake_share_server['id']) filters = {'source_share_server_id': fake_share_server['id'], 'status': constants.STATUS_SERVER_MIGRATING_TO} mock_shares_get_servers_filters.assert_called_once_with( self.context, filters=filters) def test_share_server_migration_get_request_spec_dict(self): share_instances = [ db_utils.create_share_instance(share_id='fake_id') for i in range(1, 3)] snapshot_instances = [ db_utils.create_snapshot_instance( snapshot_id='fake_' + str(i), share_instance_id='fake') for i in range(1, 3)] shares_req_spec = [{} for instance in share_instances] total_shares_size = sum( [instance.get('size', 0) for instance in share_instances]) total_snapshots_size = sum( [instance.get('size', 0) for instance in snapshot_instances]) expected_result = { 'shares_size': total_shares_size, 'snapshots_size': total_snapshots_size, 'shares_req_spec': shares_req_spec, } fake_share_type = db_utils.create_share_type() get_type_calls = [] get_request_spec_calls = [] for instance in share_instances: get_type_calls.append( mock.call(self.context, instance['share_type_id'])) get_request_spec_calls.append( mock.call(self.context, instance, fake_share_type)) mock_get_type = self.mock_object( share_types, 'get_share_type', mock.Mock(return_value=fake_share_type)) mock_get_request_spec = self.mock_object( self.api, '_get_request_spec_dict', mock.Mock(return_value={})) result = self.api.get_share_server_migration_request_spec_dict( self.context, share_instances, snapshot_instances) self.assertEqual(result, expected_result) mock_get_type.assert_has_calls(get_type_calls) mock_get_request_spec.assert_has_calls(get_request_spec_calls) def test__migration_initial_checks_instance_rules_error_status(self): fake_share_server = fakes.fake_share_server_get() fake_share_server['host'] = 'fake@backend' type_data = { 'extra_specs': { 'availability_zones': 'fake_az1,fake_az2' } } fake_server_host = 'fake@backend' fake_share_server = db_utils.create_share_server(host=fake_server_host) share_type = db_utils.create_share_type(**type_data) share_type = db_api.share_type_get(self.context, share_type['id']) fake_share = db_utils.create_share( host='fake@backend#pool', status=constants.STATUS_AVAILABLE, share_type_id=share_type['id'], share_group_id='fake_group_id') fake_share['instance']['access_rules_status'] = constants.STATUS_ERROR fake_az = { 'id': 'fake_az_id', 'name': 'fake_az1' } fake_share_network = None fake_share_network_id = fake_share['share_network_id'] fake_subnet = db_utils.create_share_network_subnet( availability_zone_id=fake_az['id']) fake_host = 'test@fake' service = {'availability_zone_id': fake_az['id'], 'availability_zone': {'name': fake_az['name']}} self._setup_mocks_for_initial_checks(fake_share, share_type, service, fake_az, fake_subnet) mock_snapshots_get = self.mock_object( db_api, 'share_snapshot_get_all_for_share', mock.Mock(return_value=[])) self.assertRaises( exception.InvalidShareServer, self.api._migration_initial_checks, self.context, fake_share_server, fake_host, fake_share_network, ) db_api.share_get_all_by_share_server.assert_has_calls([ mock.call(self.context, fake_share_server['id']), mock.call(self.context, fake_share_server['id'])]) share_types.get_share_type.assert_called_once_with( self.context, share_type['id']) utils.validate_service_host.assert_called_once_with( self.context, fake_host) db_api.service_get_by_args.assert_called_once_with( self.context, fake_host, 'manila-share') db_api.availability_zone_get.assert_called_once_with( self.context, service['availability_zone']['name'] ) (db_api.share_network_subnets_get_all_by_availability_zone_id. assert_called_once_with( self.context, fake_share_network_id, fake_az['id'])) mock_snapshots_get.assert_called_once_with( self.context, fake_share['id']) db_api.share_group_get_all_by_share_server.assert_called_once_with( self.context, fake_share_server['id']) def test__migration_initial_checks_dest_az_not_match_host_az(self): type_data = { 'extra_specs': { 'availability_zones': 'zone1,zone2' } } fake_server_host = 'fake@backend' fake_share_server = db_utils.create_share_server(host=fake_server_host) share_type = db_utils.create_share_type(**type_data) share_type = db_api.share_type_get(self.context, share_type['id']) fake_share = db_utils.create_share( host='fake@backend#pool', status=constants.STATUS_AVAILABLE, share_type_id=share_type['id']) fake_share_network = {} fake_host = 'test@fake' service = {'availability_zone_id': 'fake_az_id', 'availability_zone': {'name': 'fake_az1'}} mock_shares_get_all = self.mock_object( db_api, 'share_get_all_by_share_server', mock.Mock(return_value=[fake_share])) mock_get_type = self.mock_object( share_types, 'get_share_type', mock.Mock(return_value=share_type)) mock_validate_service = self.mock_object( utils, 'validate_service_host') mock_service_get = self.mock_object( db_api, 'service_get_by_args', mock.Mock(return_value=service)) self.assertRaises( exception.InvalidShareServer, self.api._migration_initial_checks, self.context, fake_share_server, fake_host, fake_share_network, ) mock_shares_get_all.assert_called_once_with( self.context, fake_share_server['id']) mock_get_type.assert_called_once_with(self.context, share_type['id']) mock_validate_service.assert_called_once_with(self.context, fake_host) mock_service_get.assert_called_once_with( self.context, fake_host, 'manila-share') def test__migration_initial_checks_no_matching_subnet(self): type_data = { 'extra_specs': { 'availability_zones': 'fake_az1,fake_az2' } } fake_server_host = 'fake@backend' fake_share_server = db_utils.create_share_server(host=fake_server_host) share_type = db_utils.create_share_type(**type_data) share_type = db_api.share_type_get(self.context, share_type['id']) fake_share = db_utils.create_share( host='fake@backend#pool', status=constants.STATUS_AVAILABLE, share_type_id=share_type['id']) fake_share_network = db_utils.create_share_network() fake_az = { 'id': 'fake_az_id', 'name': 'fake_az1' } db_utils.create_share_network_subnet( availability_zone_id='fake', share_network_id=fake_share_network['id']) fake_share_network = db_api.share_network_get( self.context, fake_share_network['id']) fake_host = 'test@fake' service = {'availability_zone_id': fake_az['id'], 'availability_zone': {'name': fake_az['name']}} mock_shares_get_all = self.mock_object( db_api, 'share_get_all_by_share_server', mock.Mock(return_value=[fake_share])) mock_get_type = self.mock_object( share_types, 'get_share_type', mock.Mock(return_value=share_type)) mock_validate_service = self.mock_object( utils, 'validate_service_host') mock_service_get = self.mock_object( db_api, 'service_get_by_args', mock.Mock(return_value=service)) mock_az_get = self.mock_object( db_api, 'availability_zone_get', mock.Mock(return_value=fake_az)) mock_get_subnet = self.mock_object( db_api, 'share_network_subnets_get_all_by_availability_zone_id', mock.Mock(return_value=None)) self.assertRaises( exception.InvalidShareServer, self.api._migration_initial_checks, self.context, fake_share_server, fake_host, fake_share_network, ) mock_shares_get_all.assert_called_once_with( self.context, fake_share_server['id']) mock_get_type.assert_called_once_with(self.context, share_type['id']) mock_validate_service.assert_called_once_with(self.context, fake_host) mock_service_get.assert_called_once_with( self.context, fake_host, 'manila-share') mock_get_subnet.assert_called_once_with( self.context, fake_share_network['id'], fake_az['id']) mock_az_get.assert_called_once_with( self.context, service['availability_zone']['name'] ) def test_server_migration_check_nondisruptive_and_network_change(self): fake_shares = [db_utils.create_share() for i in range(2)] fake_types = [{'id': 'fake_type_id'}] fake_share_server = db_utils.create_share_server() dest_host = fake_share_server['host'] service = { 'availability_zone_id': 'fake_az_id', 'availability_zone': {'name': 'fake_az_name'} } fake_share_network = db_utils.create_share_network() network_has_changed = True writable = preserve_snapshots = False nondisruptive = True expected_result = { 'compatible': False, 'writable': writable, 'nondisruptive': False, 'preserve_snapshots': preserve_snapshots, 'share_network_id': fake_share_network['id'], 'migration_cancel': False, 'migration_get_progress': False } mock_initial_checks = self.mock_object( self.api, '_migration_initial_checks', mock.Mock( return_value=[fake_shares, fake_types, service, fake_share_network['id'], network_has_changed])) check_result = self.api.share_server_migration_check( self.context, fake_share_server, dest_host, writable, nondisruptive, preserve_snapshots, fake_share_network ) self.assertEqual(expected_result, check_result) mock_initial_checks.assert_called_once_with( self.context, fake_share_server, dest_host, fake_share_network) def test_server_migration_start_nondisruptive_and_network_change(self): fake_shares = [db_utils.create_share() for i in range(2)] fake_types = [{'id': 'fake_type_id'}] fake_share_server = db_utils.create_share_server() dest_host = fake_share_server['host'] service = { 'availability_zone_id': 'fake_az_id', 'availability_zone': {'name': 'fake_az_name'} } fake_share_network = db_utils.create_share_network() network_has_changed = True writable = preserve_snapshots = False nondisruptive = True mock_initial_checks = self.mock_object( self.api, '_migration_initial_checks', mock.Mock( return_value=[fake_shares, fake_types, service, fake_share_network['id'], network_has_changed])) self.assertRaises( exception.InvalidInput, self.api.share_server_migration_start, self.context, fake_share_server, dest_host, writable, nondisruptive, preserve_snapshots, fake_share_network ) mock_initial_checks.assert_called_once_with( self.context, fake_share_server, dest_host, fake_share_network) def test_share_server_migration_check(self): type_data = { 'extra_specs': { 'availability_zones': 'fake_az1,fake_az2' } } fake_share_server = db_utils.create_share_server() share_type = db_utils.create_share_type(**type_data) share_type = db_api.share_type_get(self.context, share_type['id']) fake_share = db_utils.create_share( host='fake@backend#pool', status=constants.STATUS_AVAILABLE, share_type_id=share_type['id']) fake_shares = [fake_share] fake_types = [share_type] fake_share_network = db_utils.create_share_network() fake_az = { 'id': 'fake_az_id', 'name': 'fake_az1' } writable = True nondisruptive = True preserve_snapshots = True fake_share_network = db_api.share_network_get( self.context, fake_share_network['id']) fake_host = 'test@fake' service = {'availability_zone_id': fake_az['id'], 'availability_zone': {'name': fake_az['name']}} expected_result = { 'requested_capabilities': {}, 'supported_capabilities': {} } mock_initial_checks = self.mock_object( self.api, '_migration_initial_checks', mock.Mock(return_value=[fake_shares, fake_types, service, fake_share_network['id'], False])) # NOTE(carloss): Returning an "empty" dictionary should be enough for # this test case. The unit test to check the values being returned to # the user should be placed in the share manager, where the dict is # populated with the real info. At this level we only forward the # received response to the user. mock_migration_check = self.mock_object( self.share_rpcapi, 'share_server_migration_check', mock.Mock(return_value=expected_result)) result = self.api.share_server_migration_check( self.context, fake_share_server, fake_host, writable, nondisruptive, preserve_snapshots, fake_share_network ) mock_initial_checks.assert_called_once_with( self.context, fake_share_server, fake_host, fake_share_network) mock_migration_check.assert_called_once_with( self.context, fake_share_server['id'], fake_host, writable, nondisruptive, preserve_snapshots, fake_share_network['id'] ) self.assertEqual(result, expected_result) def test_share_server_migration_start(self): type_data = { 'extra_specs': { 'availability_zones': 'fake_az1,fake_az2' } } fake_share_server = db_utils.create_share_server() share_type = db_utils.create_share_type(**type_data) share_type = db_api.share_type_get(self.context, share_type['id']) fake_shares = [db_utils.create_share( host='fake@backend#pool', status=constants.STATUS_AVAILABLE, share_type_id=share_type['id'], share_server_id=fake_share_server['id']) for x in range(4)] fake_snapshots = [ db_utils.create_snapshot(share_id=fake_shares[0]['id'])] instance_ids = [share['instance']['id'] for share in fake_shares] snap_instances = [] snap_instance_ids = [] for fake_share in fake_shares: for snapshot in fake_snapshots: snap_instances.append({'id': snapshot['instance']['id']}) snap_instance_ids.append(snapshot['instance']['id']) fake_types = [share_type] fake_share_network = db_utils.create_share_network() writable = True nondisruptive = True preserve_snapshots = True fake_share_network = db_api.share_network_get( self.context, fake_share_network['id']) fake_host = 'test@fake' service = {'availability_zone_id': 'fake_az_id', 'availability_zone': {'name': 'fake_az1'}} server_expected_update = { 'task_state': constants.TASK_STATE_MIGRATION_STARTING, 'status': constants.STATUS_SERVER_MIGRATING } share_expected_update = { 'status': constants.STATUS_SERVER_MIGRATING } mock_initial_checks = self.mock_object( self.api, '_migration_initial_checks', mock.Mock(return_value=[fake_shares, fake_types, service, fake_share_network['id'], False])) mock_migration_start = self.mock_object( self.share_rpcapi, 'share_server_migration_start') mock_server_update = self.mock_object(db_api, 'share_server_update') mock_snapshots_get = self.mock_object( db_api, 'share_snapshot_instance_get_all_with_filters', mock.Mock(return_value=snap_instances)) mock_update_instances = self.mock_object( db_api, 'share_and_snapshot_instances_status_update') self.api.share_server_migration_start( self.context, fake_share_server, fake_host, writable, nondisruptive, preserve_snapshots, fake_share_network ) mock_initial_checks.assert_called_once_with( self.context, fake_share_server, fake_host, fake_share_network) mock_migration_start.assert_called_once_with( self.context, fake_share_server, fake_host, writable, nondisruptive, preserve_snapshots, fake_share_network['id'] ) mock_server_update.assert_called_once_with( self.context, fake_share_server['id'], server_expected_update) mock_snapshots_get.assert_called() mock_update_instances.assert_called_once_with( self.context, share_expected_update, current_expected_status=constants.STATUS_AVAILABLE, share_instance_ids=instance_ids, snapshot_instance_ids=snap_instance_ids) @ddt.data( (constants.STATUS_ACTIVE, None), (constants.STATUS_SERVER_MIGRATING, constants.TASK_STATE_MIGRATION_STARTING) ) @ddt.unpack def test_share_server_migration_complete_invalid_status(self, status, task_state): fake_host = 'fakehost@fakebackend' fake_share_server = db_utils.create_share_server( status=status, task_state=task_state, host=fake_host) self.assertRaises( exception.InvalidShareServer, self.api.share_server_migration_complete, self.context, fake_share_server) def test_share_server_migration_complete(self): fake_service_host = 'fakehost@fakebackend' fake_share_server = db_utils.create_share_server( status=constants.STATUS_SERVER_MIGRATING, task_state=constants.TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE, host=fake_service_host) fake_share_server_dest = db_utils.create_share_server( status=constants.STATUS_SERVER_MIGRATING_TO, host=fake_service_host) fake_service = {'availability_zone_id': 'fake_az_id', 'availability_zone': {'name': 'fake_az1'}} mock_get_destination = self.mock_object( self.api, 'share_server_migration_get_destination', mock.Mock(return_value=fake_share_server_dest)) mock_validate_service_host = self.mock_object( utils, 'validate_service_host', mock.Mock(return_value=fake_service)) mock_migration_complete = self.mock_object( self.share_rpcapi, 'share_server_migration_complete') result = self.api.share_server_migration_complete( self.context, fake_share_server) expected = { 'destination_share_server_id': fake_share_server_dest['id'] } self.assertEqual(expected, result) mock_get_destination.assert_called_once_with( self.context, fake_share_server['id'], status=constants.STATUS_SERVER_MIGRATING_TO) mock_validate_service_host.assert_called_once_with( self.context, fake_service_host) mock_migration_complete.assert_called_once_with( self.context, fake_share_server['host'], fake_share_server, fake_share_server_dest ) @ddt.data( (constants.STATUS_ACTIVE, None), (constants.STATUS_SERVER_MIGRATING, constants.TASK_STATE_MIGRATION_STARTING) ) @ddt.unpack def test_share_server_migration_cancel_server_not_migrating( self, status, task_state): fake_share_server = db_utils.create_share_server( status=status, task_state=task_state) self.mock_object(self.api, '_migration_validate_error_message', mock.Mock(return_value=None)) self.assertRaises( exception.InvalidShareServer, self.api.share_server_migration_cancel, self.context, fake_share_server ) @ddt.data(constants.TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE, constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS) def test_share_server_migration_cancel_service_not_up(self, task_state): fake_service_host = 'host@backend' fake_share_server = db_utils.create_share_server( status=constants.STATUS_SERVER_MIGRATING, task_state=task_state, host=fake_service_host) fake_share_server_dest = db_utils.create_share_server( status=constants.STATUS_SERVER_MIGRATING_TO, host=fake_service_host) mock_get_destination = self.mock_object( self.api, 'share_server_migration_get_destination', mock.Mock(return_value=fake_share_server_dest)) mock_validate_service_host = self.mock_object( utils, 'validate_service_host', mock.Mock(side_effect=exception.ServiceIsDown( service="fake_service"))) self.assertRaises( exception.ServiceIsDown, self.api.share_server_migration_cancel, self.context, fake_share_server ) mock_get_destination.assert_called_once_with( self.context, fake_share_server['id'], status=constants.STATUS_SERVER_MIGRATING_TO) mock_validate_service_host.assert_called_once_with( self.context, fake_service_host) @ddt.data(constants.TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE, constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS) def test_share_server_migration_cancel(self, task_state): fake_service_host = 'host@backend' fake_share_server = db_utils.create_share_server( status=constants.STATUS_SERVER_MIGRATING, task_state=task_state, host=fake_service_host) fake_share_server_dest = db_utils.create_share_server( status=constants.STATUS_SERVER_MIGRATING_TO, host=fake_service_host) fake_service = {'availability_zone_id': 'fake_az_id', 'availability_zone': {'name': 'fake_az1'}} mock_get_destination = self.mock_object( self.api, 'share_server_migration_get_destination', mock.Mock(return_value=fake_share_server_dest)) mock_validate_service_host = self.mock_object( utils, 'validate_service_host', mock.Mock(return_value=fake_service)) self.api.share_server_migration_cancel( self.context, fake_share_server) mock_get_destination.assert_called_once_with( self.context, fake_share_server['id'], status=constants.STATUS_SERVER_MIGRATING_TO) mock_validate_service_host.assert_called_once_with( self.context, fake_service_host) def test_share_server_migration_get_progress_not_migrating(self): fake_share_server = db_utils.create_share_server( status=constants.STATUS_ACTIVE) self.assertRaises( exception.InvalidShareServer, self.api.share_server_migration_get_progress, self.context, fake_share_server['id'] ) def test_share_server_migration_get_progress_service_not_up(self): fake_service_host = 'host@backend' fake_share_server = db_utils.create_share_server( status=constants.STATUS_SERVER_MIGRATING, task_state=constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS, host=fake_service_host) fake_share_server_dest = db_utils.create_share_server( status=constants.STATUS_SERVER_MIGRATING_TO, host=fake_service_host) mock_get_destination = self.mock_object( self.api, 'share_server_migration_get_destination', mock.Mock(return_value=fake_share_server_dest)) mock_validate_service_host = self.mock_object( utils, 'validate_service_host', mock.Mock(side_effect=exception.ServiceIsDown( service="fake_service"))) self.assertRaises( exception.ServiceIsDown, self.api.share_server_migration_get_progress, self.context, fake_share_server['id'] ) mock_get_destination.assert_called_once_with( self.context, fake_share_server['id'], status=constants.STATUS_SERVER_MIGRATING_TO) mock_validate_service_host.assert_called_once_with( self.context, fake_service_host) def test_share_server_migration_get_progress_rpcapi_exception(self): fake_service_host = 'host@backend' fake_share_server = db_utils.create_share_server( status=constants.STATUS_SERVER_MIGRATING, task_state=constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS, host=fake_service_host) fake_share_server_dest = db_utils.create_share_server( status=constants.STATUS_SERVER_MIGRATING_TO, host=fake_service_host) fake_service = {'availability_zone_id': 'fake_az_id', 'availability_zone': {'name': 'fake_az1'}} mock_server_get = self.mock_object( db_api, 'share_server_get', mock.Mock(return_value=fake_share_server)) mock_get_destination = self.mock_object( self.api, 'share_server_migration_get_destination', mock.Mock(return_value=fake_share_server_dest)) mock_validate_service_host = self.mock_object( utils, 'validate_service_host', mock.Mock(return_value=fake_service)) mock_migration_get_progress = self.mock_object( self.share_rpcapi, 'share_server_migration_get_progress', mock.Mock(side_effect=Exception)) self.assertRaises( exception.ShareServerMigrationError, self.api.share_server_migration_get_progress, self.context, fake_share_server['id'] ) mock_server_get.assert_called_once_with(self.context, fake_share_server['id']) mock_get_destination.assert_called_once_with( self.context, fake_share_server['id'], status=constants.STATUS_SERVER_MIGRATING_TO) mock_validate_service_host.assert_called_once_with( self.context, fake_service_host) mock_migration_get_progress.assert_called_once_with( self.context, fake_share_server_dest['host'], fake_share_server, fake_share_server_dest) @ddt.data(constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS, constants.TASK_STATE_MIGRATION_SUCCESS) def test_share_server_migration_get_progress(self, task_state): fake_service_host = 'host@backend' fake_share_server = db_utils.create_share_server( status=constants.STATUS_SERVER_MIGRATING, task_state=task_state, host=fake_service_host) fake_share_server_dest = db_utils.create_share_server( status=constants.STATUS_SERVER_MIGRATING_TO, host=fake_service_host) fake_service = {'availability_zone_id': 'fake_az_id', 'availability_zone': {'name': 'fake_az1'}} mock_server_get = self.mock_object( db_api, 'share_server_get', mock.Mock(return_value=fake_share_server)) mock_get_destination = self.mock_object( self.api, 'share_server_migration_get_destination', mock.Mock(return_value=fake_share_server_dest)) mock_validate_service_host = self.mock_object( utils, 'validate_service_host', mock.Mock(return_value=fake_service)) mock_migration_get_progress = self.mock_object( self.share_rpcapi, 'share_server_migration_get_progress', mock.Mock(return_value={'total_progress': 50})) self.mock_object(utils, 'service_is_up', mock.Mock(return_value=True)) result = self.api.share_server_migration_get_progress( self.context, fake_share_server['id']) self.assertIn('total_progress', result) mock_server_get.assert_called_once_with(self.context, fake_share_server['id']) if task_state == constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS: mock_get_destination.assert_called_once_with( self.context, fake_share_server['id'], status=constants.STATUS_SERVER_MIGRATING_TO) mock_validate_service_host.assert_called_once_with( self.context, fake_service_host) mock_migration_get_progress.assert_called_once_with( self.context, fake_share_server_dest['host'], fake_share_server, fake_share_server_dest) @ddt.data(constants.STATUS_SERVER_MIGRATING_TO, constants.STATUS_SERVER_MIGRATING) def test_share_server_migration_get_progress_invalid_share_server(self, status): fake_service_host = 'host@backend' fake_share_server = db_utils.create_share_server( status=status, task_state=None, host=fake_service_host) mock_server_get = self.mock_object( db_api, 'share_server_get', mock.Mock(return_value=fake_share_server)) mock_get_progress_state = self.mock_object( self.api, '_migration_get_progress_state', mock.Mock(return_value=None)) self.mock_object(self.api, 'share_server_migration_get_destination') self.assertRaises( exception.InvalidShareServer, self.api.share_server_migration_get_progress, self.context, fake_share_server['id']) mock_server_get.assert_called_once_with(self.context, fake_share_server['id']) if status == constants.STATUS_SERVER_MIGRATING: mock_get_progress_state.assert_called_once_with(fake_share_server) def test_share_server_migration_get_progress_source_not_found(self): fake_dest_hare_server = db_utils.create_share_server( status=constants.STATUS_ACTIVE, task_state=constants.TASK_STATE_MIGRATION_SUCCESS) mock_server_get = self.mock_object( db_api, 'share_server_get', mock.Mock(side_effect=exception.ShareServerNotFound( share_server_id='fake_id'))) mock_get_destination = self.mock_object( self.api, 'share_server_migration_get_destination', mock.Mock(return_value=fake_dest_hare_server)) result = self.api.share_server_migration_get_progress( self.context, 'fake_source_server_id') expected = { 'total_progress': 100, 'destination_share_server_id': fake_dest_hare_server['id'], 'task_state': constants.TASK_STATE_MIGRATION_SUCCESS, } self.assertEqual(expected, result) mock_server_get.assert_called_once_with(self.context, 'fake_source_server_id') mock_get_destination.assert_called_once_with( self.context, 'fake_source_server_id', status=constants.STATUS_ACTIVE) def test_share_server_migration_get_progress_has_destination_only(self): mock_server_get = self.mock_object( db_api, 'share_server_get', mock.Mock(side_effect=exception.ShareServerNotFound( share_server_id='fake_id'))) mock_get_destination = self.mock_object( self.api, 'share_server_migration_get_destination', mock.Mock(side_effect=exception.InvalidShareServer(reason=''))) self.assertRaises( exception.InvalidShareServer, self.api.share_server_migration_get_progress, self.context, 'fake_src_server_id') mock_server_get.assert_called_once_with(self.context, 'fake_src_server_id') mock_get_destination.assert_called_once_with( self.context, 'fake_src_server_id', status=constants.STATUS_ACTIVE) def test_share_server_migration_get_progress_not_determinated(self): fake_service_host = 'host@backend' fake_share_server = db_utils.create_share_server( status=constants.STATUS_SERVER_MIGRATING, task_state=None, host=fake_service_host) mock_server_get = self.mock_object( db_api, 'share_server_get', mock.Mock(return_value=fake_share_server)) mock_get_destination = self.mock_object( self.api, 'share_server_migration_get_destination', mock.Mock(side_effect=exception.InvalidShareServer(reason=''))) mock_get_progress_state = self.mock_object( self.api, '_migration_get_progress_state', mock.Mock(return_value={'total_progress': 0})) result = self.api.share_server_migration_get_progress( self.context, 'fake_source_server_id') expected = { 'total_progress': 0, 'destination_share_server_id': '', 'task_state': '', } self.assertEqual(expected, result) mock_server_get.assert_called_once_with(self.context, 'fake_source_server_id') mock_get_destination.assert_called_once_with( self.context, fake_share_server['id'], status=constants.STATUS_SERVER_MIGRATING_TO) mock_get_progress_state.assert_called_once_with(fake_share_server) def test_migration_get_progress_race(self): instance1 = db_utils.create_share_instance( share_id='fake_id', status=constants.STATUS_MIGRATING, host='some_host') instance2 = db_utils.create_share_instance( share_id='fake_id', status=constants.STATUS_MIGRATING_TO) share = db_utils.create_share( id='fake_id', task_state=constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS, instances=[instance1, instance2]) share_ref = fakes.fake_share( id='fake_id', task_state=constants.TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE) service = 'fake_service' expected = {'total_progress': 100} self.mock_object(utils, 'service_is_up', mock.Mock(return_value=True)) self.mock_object(db_api, 'service_get_by_args', mock.Mock(return_value=service)) self.mock_object(db_api, 'share_instance_get', mock.Mock(return_value=instance1)) self.mock_object(db_api, 'share_get', mock.Mock(return_value=share_ref)) self.mock_object(self.api.share_rpcapi, 'migration_get_progress', mock.Mock(side_effect=exception.InvalidShare('fake'))) result = self.api.migration_get_progress(self.context, share) self.assertEqual(expected, result) self.api.share_rpcapi.migration_get_progress.assert_called_once_with( self.context, instance1, instance2['id']) def test__share_network_update_initial_checks_network_not_active(self): share_network = db_utils.create_share_network( status=constants.STATUS_NETWORK_CHANGE) new_sec_service = db_utils.create_security_service( share_network_id=share_network['id'], type='ldap') self.assertRaises( webob_exc.HTTPBadRequest, self.api._share_network_update_initial_checks, self.context, share_network, new_sec_service ) def test__share_network_update_initial_checks_server_not_active(self): share_subnet = db_utils.create_share_network_subnet( id='fakeid', share_network_id='fakenetid') db_utils.create_share_server( share_network_subnets=[share_subnet], status=constants.STATUS_ERROR, security_service_update_support=True) share_network = db_utils.create_share_network(id='fakenetid') new_sec_service = db_utils.create_security_service( share_network_id='fakenetid', type='ldap') self.assertRaises( exception.InvalidShareNetwork, self.api._share_network_update_initial_checks, self.context, share_network, new_sec_service, ) def test__share_network_update_initial_checks_shares_not_available(self): share_subnet = db_utils.create_share_network_subnet( id='fakeid', share_network_id='fake_network_id') db_utils.create_share_server(share_network_subnets=[share_subnet], security_service_update_support=True) share_network = db_utils.create_share_network( id='fake_network_id') new_sec_service = db_utils.create_security_service( share_network_id='fake_network_id', type='ldap') shares = [db_utils.create_share(status=constants.STATUS_ERROR)] self.mock_object(utils, 'validate_service_host') self.mock_object( self.api, 'get_all', mock.Mock(return_value=shares)) self.assertRaises( exception.InvalidShareNetwork, self.api._share_network_update_initial_checks, self.context, share_network, new_sec_service ) utils.validate_service_host.assert_called_once_with( utils.IsAMatcher(context.RequestContext), 'host1') self.api.get_all.assert_called_once_with( self.context, search_opts={'share_network_id': share_network['id']}) def test__share_network_update_initial_checks_rules_in_error(self): share_subnet = db_utils.create_share_network_subnet( id='fakeid', share_network_id='fake_network_id') db_utils.create_share_server(share_network_subnets=[share_subnet], security_service_update_support=True) share_network = db_utils.create_share_network( id='fake_network_id') new_sec_service = db_utils.create_security_service( share_network_id='fake_network_id', type='ldap') shares = [db_utils.create_share(status=constants.STATUS_AVAILABLE)] shares[0]['instance']['access_rules_status'] = ( constants.ACCESS_STATE_ERROR) self.mock_object(utils, 'validate_service_host') self.mock_object( self.api, 'get_all', mock.Mock(return_value=shares)) self.assertRaises( exception.InvalidShareNetwork, self.api._share_network_update_initial_checks, self.context, share_network, new_sec_service ) utils.validate_service_host.assert_called_once_with( utils.IsAMatcher(context.RequestContext), 'host1') self.api.get_all.assert_called_once_with( self.context, search_opts={'share_network_id': share_network['id']}) def test__share_network_update_initial_checks_share_is_busy(self): share_subnet = db_utils.create_share_network_subnet( id='fakeid', share_network_id='fake_net_id') db_utils.create_share_server(share_network_subnets=[share_subnet], security_service_update_support=True) share_network = db_utils.create_share_network(id='fake_net_id') new_sec_service = db_utils.create_security_service( share_network_id='fake_net_id', type='ldap') shares = [db_utils.create_share(status=constants.STATUS_AVAILABLE)] self.mock_object(utils, 'validate_service_host') self.mock_object( self.api, 'get_all', mock.Mock(return_value=shares)) self.mock_object( self.api, '_check_is_share_busy', mock.Mock(side_effect=exception.ShareBusyException(message='fake')) ) self.assertRaises( exception.InvalidShareNetwork, self.api._share_network_update_initial_checks, self.context, share_network, new_sec_service ) utils.validate_service_host.assert_called_once_with( utils.IsAMatcher(context.RequestContext), 'host1') self.api.get_all.assert_called_once_with( self.context, search_opts={'share_network_id': share_network['id']}) self.api._check_is_share_busy.assert_called_once_with(shares[0]) def test__share_network_update_initial_checks_unsupported_server(self): share_subnet = db_utils.create_share_network_subnet( id='fakeid', share_network_id='fake_net_id') db_utils.create_share_server(share_network_subnets=[share_subnet], security_service_update_support=False) share_network = db_utils.create_share_network(id='fake_net_id') self.assertRaises( exception.InvalidShareNetwork, self.api._share_network_update_initial_checks, self.context, share_network, None ) def test__share_network_update_initial_checks_update_different_types(self): db_utils.create_share_server(share_network_subnet_id='fakeid', security_service_update_support=True) db_utils.create_share_network_subnet( id='fakeid', share_network_id='fake_net_id') share_network = db_utils.create_share_network(id='fake_net_id') new_sec_service = db_utils.create_security_service( share_network_id='fake_net_id', type='ldap') curr_sec_service = db_utils.create_security_service( share_network_id='fake_net_id', type='kerberos') self.assertRaises( exception.InvalidSecurityService, self.api._share_network_update_initial_checks, self.context, share_network, new_sec_service, current_security_service=curr_sec_service ) def test__share_network_update_initial_checks_add_type_conflict(self): db_utils.create_share_server(share_network_subnet_id='fakeid', security_service_update_support=True) db_utils.create_share_network_subnet( id='fakeid', share_network_id='fake_net_id') share_network = db_utils.create_share_network(id='fake_net_id') db_utils.create_security_service( share_network_id='fake_net_id', type='ldap') share_network = db_api.share_network_get(self.context, share_network['id']) new_sec_service = db_utils.create_security_service( share_network_id='fake_net_id', type='ldap') self.assertRaises( exception.InvalidSecurityService, self.api._share_network_update_initial_checks, self.context, share_network, new_sec_service, ) def test_update_share_network_security_service_no_share_servers(self): mock_initial_checks = self.mock_object( self.api, '_share_network_update_initial_checks', mock.Mock(return_value=([], []))) mock_get_key = self.mock_object( self.api, 'get_security_service_update_key') fake_share_network = {'id': 'fake_share_net_id'} fake_sec_service = {'id': 'fake_sec_serv_id'} self.api.update_share_network_security_service( self.context, fake_share_network, fake_sec_service, current_security_service=None) mock_initial_checks.assert_called_once_with( self.context, fake_share_network, fake_sec_service, current_security_service=None) mock_get_key.assert_not_called() def test_update_share_network_security_service_without_check(self): mock_initial_checks = self.mock_object( self.api, '_share_network_update_initial_checks', mock.Mock(return_value=(['fake_server'], ['fake_host']))) mock_get_key = self.mock_object( self.api, 'get_security_service_update_key', mock.Mock(return_value=None)) fake_share_network = {'id': 'fake_share_net_id'} fake_sec_service = {'id': 'fake_sec_serv_id'} self.assertRaises(exception.InvalidShareNetwork, self.api.update_share_network_security_service, self.context, fake_share_network, fake_sec_service) mock_initial_checks.assert_called_once_with( self.context, fake_share_network, fake_sec_service, current_security_service=None) mock_get_key.assert_called_once_with( 'hosts_check', fake_sec_service['id'], current_security_service_id=None) def test_update_share_network_security_service_update_hosts_failure(self): mock_initial_checks = self.mock_object( self.api, '_share_network_update_initial_checks', mock.Mock(return_value=(['fake_server'], ['fake_host']))) mock_get_key = self.mock_object( self.api, 'get_security_service_update_key', mock.Mock(return_value='fake_key')) mock_async_db_get = self.mock_object( db_api, 'async_operation_data_get', mock.Mock(return_value='fake_value')) mock_validate_host = self.mock_object( self.api, '_security_service_update_validate_hosts', mock.Mock(side_effect=Exception)) mock_async_db_delete = self.mock_object( db_api, 'async_operation_data_delete') fake_share_network = {'id': 'fake_share_net_id'} fake_sec_service = {'id': 'fake_sec_serv_id'} self.assertRaises(exception.InvalidShareNetwork, self.api.update_share_network_security_service, self.context, fake_share_network, fake_sec_service) mock_initial_checks.assert_called_once_with( self.context, fake_share_network, fake_sec_service, current_security_service=None) mock_get_key.assert_called_once_with( 'hosts_check', fake_sec_service['id'], current_security_service_id=None) mock_async_db_get.assert_called_once_with( self.context, fake_share_network['id'], 'fake_key') mock_validate_host.assert_called_once_with( self.context, fake_share_network, ['fake_host'], ['fake_server'], new_security_service_id=fake_sec_service['id'], current_security_service_id=None) mock_async_db_delete.assert_called_once_with( self.context, fake_share_network['id'], 'fake_key') def test_update_share_network_security_service_backend_host_failure(self): share_network = db_utils.create_share_network() security_service = db_utils.create_security_service() backend_host = 'fakehost' mock_initial_checks = self.mock_object( self.api, '_share_network_update_initial_checks', mock.Mock(return_value=(['fake_server'], [backend_host]))) mock_get_update_key = self.mock_object( self.api, 'get_security_service_update_key', mock.Mock(return_value='fake_key')) mock_db_async_op = self.mock_object( db_api, 'async_operation_data_get', mock.Mock(return_value='fake_update_value')) mock_validate_host = self.mock_object( self.api, '_security_service_update_validate_hosts', mock.Mock(return_value=(False, None))) self.assertRaises( exception.InvalidShareNetwork, self.api.update_share_network_security_service, self.context, share_network, security_service) mock_initial_checks.assert_called_once_with( self.context, share_network, security_service, current_security_service=None) mock_db_async_op.assert_called_once_with( self.context, share_network['id'], 'fake_key') mock_get_update_key.assert_called_once_with( 'hosts_check', security_service['id'], current_security_service_id=None) mock_validate_host.assert_called_once_with( self.context, share_network, [backend_host], ['fake_server'], new_security_service_id=security_service['id'], current_security_service_id=None) def test_update_share_network_security_service(self): share_network = db_utils.create_share_network() security_service = db_utils.create_security_service() backend_hosts = ['fakehost'] fake_update_key = 'fake_key' servers = [ db_utils.create_share_server() for i in range(2)] server_ids = [server['id'] for server in servers] mock_initial_checks = self.mock_object( self.api, '_share_network_update_initial_checks', mock.Mock(return_value=(servers, backend_hosts))) mock_get_update_key = self.mock_object( self.api, 'get_security_service_update_key', mock.Mock(return_value=fake_update_key)) mock_db_async_op = self.mock_object( db_api, 'async_operation_data_get', mock.Mock(return_value='fake_update_value')) mock_validate_host = self.mock_object( self.api, '_security_service_update_validate_hosts', mock.Mock(return_value=(True, None))) mock_network_update = self.mock_object( db_api, 'share_network_update') mock_servers_update = self.mock_object( db_api, 'share_servers_update') mock_update_security_services = self.mock_object( self.share_rpcapi, 'update_share_network_security_service') mock_db_async_op_del = self.mock_object( db_api, 'async_operation_data_delete',) self.api.update_share_network_security_service( self.context, share_network, security_service) mock_initial_checks.assert_called_once_with( self.context, share_network, security_service, current_security_service=None) mock_db_async_op.assert_called_once_with( self.context, share_network['id'], fake_update_key) mock_get_update_key.assert_called_once_with( 'hosts_check', security_service['id'], current_security_service_id=None) mock_validate_host.assert_called_once_with( self.context, share_network, backend_hosts, servers, new_security_service_id=security_service['id'], current_security_service_id=None) mock_network_update.assert_called_once_with( self.context, share_network['id'], {'status': constants.STATUS_NETWORK_CHANGE}) mock_servers_update.assert_called_once_with( self.context, server_ids, {'status': constants.STATUS_SERVER_NETWORK_CHANGE} ) mock_update_security_services.assert_called_once_with( self.context, backend_hosts[0], share_network['id'], security_service['id'], current_security_service_id=None) mock_db_async_op_del.assert_called_once_with( self.context, share_network['id'], fake_update_key) def test__security_service_update_validate_hosts_new_check(self): curr_sec_service_id = "fake_curr_sec_serv_id" new_sec_service_id = "fake_new_sec_serv_id" fake_key = (curr_sec_service_id + '_' + new_sec_service_id + '_' + 'hosts_check') fake_share_network = {'id': 'fake_network_id'} backend_hosts = {'hostA', 'hostB'} fake_return = 'fake_update' mock_get_key = self.mock_object( self.api, 'get_security_service_update_key', mock.Mock(return_value=fake_key)) mock_do_update_validate = self.mock_object( self.api, '_do_update_validate_hosts', mock.Mock(return_value=fake_return)) res = self.api._security_service_update_validate_hosts( self.context, fake_share_network, backend_hosts, None, new_security_service_id=new_sec_service_id, current_security_service_id=curr_sec_service_id) self.assertEqual(fake_return, res) mock_get_key.assert_called_once_with( 'hosts_check', new_sec_service_id, current_security_service_id=curr_sec_service_id) mock_do_update_validate.assert_called_once_with( self.context, fake_share_network['id'], backend_hosts, fake_key, new_security_service_id=new_sec_service_id, current_security_service_id=curr_sec_service_id) @ddt.data(True, False) def test__do_update_validate_hosts(self, update_security_service): curr_sec_service_id = None new_sec_service_id = None new_share_network_subnet = 'fake_new_share_network_subnet' if update_security_service: curr_sec_service_id = "fake_curr_sec_serv_id" new_sec_service_id = "fake_new_sec_serv_id" new_share_network_subnet = None fake_key = 'fake_key' fake_share_network_id = 'fake_network_id' backend_hosts = {'hostA', 'hostB'} hosts_to_validate = {} for bh in backend_hosts: hosts_to_validate[bh] = None mock_async_data_get = self.mock_object( db_api, 'async_operation_data_get', mock.Mock(return_value=None)) mock_async_data_update = self.mock_object( db_api, 'async_operation_data_update') mock_check_update_allocations = self.mock_object( self.share_rpcapi, 'check_update_share_server_network_allocations') mock_check_update_services = self.mock_object( self.share_rpcapi, 'check_update_share_network_security_service') compatible, hosts_info = self.api._do_update_validate_hosts( self.context, fake_share_network_id, backend_hosts, fake_key, new_share_network_subnet=new_share_network_subnet, new_security_service_id=new_sec_service_id, current_security_service_id=curr_sec_service_id) self.assertIsNone(compatible) self.assertEqual(hosts_to_validate, hosts_info) mock_async_data_get.assert_called_once_with( self.context, fake_share_network_id, fake_key) mock_async_data_update.assert_called_once_with( self.context, fake_share_network_id, {fake_key: json.dumps(hosts_to_validate)}) mock_share_api_check_calls = [] for host in backend_hosts: if update_security_service: mock_share_api_check_calls.append( mock.call(self.context, host, fake_share_network_id, new_sec_service_id, current_security_service_id=curr_sec_service_id)) else: mock_share_api_check_calls.append( mock.call(self.context, host, fake_share_network_id, new_share_network_subnet)) if update_security_service: mock_check_update_services.assert_has_calls( mock_share_api_check_calls) mock_check_update_allocations.assert_not_called() else: mock_check_update_allocations.assert_has_calls( mock_share_api_check_calls) mock_check_update_services.assert_not_called() @ddt.data( {'update_security_service': True, 'new_host': None, 'host_support': None, 'exp_result': None}, {'update_security_service': True, 'new_host': None, 'host_support': False, 'exp_result': False}, {'update_security_service': True, 'new_host': None, 'host_support': True, 'exp_result': True}, {'update_security_service': True, 'new_host': 'hostC', 'host_support': None, 'exp_result': None}, {'update_security_service': True, 'new_host': 'hostC', 'host_support': False, 'exp_result': False}, {'update_security_service': True, 'new_host': 'hostC', 'host_support': True, 'exp_result': None}, {'update_security_service': False, 'new_host': None, 'host_support': None, 'exp_result': None}, {'update_security_service': False, 'new_host': None, 'host_support': False, 'exp_result': False}, {'update_security_service': False, 'new_host': None, 'host_support': True, 'exp_result': True}, {'update_security_service': False, 'new_host': 'hostC', 'host_support': None, 'exp_result': None}, {'update_security_service': False, 'new_host': 'hostC', 'host_support': False, 'exp_result': False}, {'update_security_service': False, 'new_host': 'hostC', 'host_support': True, 'exp_result': None}, ) @ddt.unpack def test__do_update_validate_hosts_all( self, update_security_service, new_host, host_support, exp_result): curr_sec_service_id = None new_sec_service_id = None new_share_network_subnet = 'fake_new_share_network_subnet' if update_security_service: curr_sec_service_id = "fake_curr_sec_serv_id" new_sec_service_id = "fake_new_sec_serv_id" new_share_network_subnet = None fake_key = 'fake_key' fake_share_network_id = 'fake_network_id' backend_hosts = ['hostA', 'hostB'] hosts_to_validate = {} for bh in backend_hosts: hosts_to_validate[bh] = host_support json_orig_hosts = json.dumps(hosts_to_validate) if new_host: backend_hosts.append(new_host) hosts_to_validate[new_host] = None mock_async_data_get = self.mock_object( db_api, 'async_operation_data_get', mock.Mock(return_value=json_orig_hosts)) mock_async_data_update = self.mock_object( db_api, 'async_operation_data_update') mock_check_update_allocations = self.mock_object( self.share_rpcapi, 'check_update_share_server_network_allocations') mock_check_update_services = self.mock_object( self.share_rpcapi, 'check_update_share_network_security_service') result, hosts_info = self.api._do_update_validate_hosts( self.context, fake_share_network_id, backend_hosts, fake_key, new_share_network_subnet=new_share_network_subnet, new_security_service_id=new_sec_service_id, current_security_service_id=curr_sec_service_id) self.assertEqual(exp_result, result) self.assertEqual(hosts_to_validate, hosts_info) mock_async_data_get.assert_called_once_with( self.context, fake_share_network_id, fake_key) # we fail earlier if one one the host answer False. if new_host and host_support is not False: mock_async_data_update.assert_called_once_with( self.context, fake_share_network_id, {fake_key: json.dumps(hosts_to_validate)}) if update_security_service: mock_check_update_services.assert_called_once_with( self.context, new_host, fake_share_network_id, new_sec_service_id, current_security_service_id=curr_sec_service_id) else: mock_check_update_allocations.assert_called_once_with( self.context, new_host, fake_share_network_id, new_share_network_subnet) def test_soft_delete_share_already_soft_deleted(self): share = fakes.fake_share(id='fake_id', status=constants.STATUS_AVAILABLE, is_soft_deleted=True) self.assertRaises(exception.InvalidShare, self.api.soft_delete, self.context, share) def test_soft_delete_invalid_status(self): invalid_status = 'fake' share = fakes.fake_share(id='fake_id', status=invalid_status, is_soft_deleted=False) self.assertRaises(exception.InvalidShare, self.api.soft_delete, self.context, share) def test_soft_delete_share_with_replicas(self): share = fakes.fake_share(id='fake_id', has_replicas=True, status=constants.STATUS_AVAILABLE, is_soft_deleted=False) self.assertRaises(exception.Conflict, self.api.soft_delete, self.context, share) def test_soft_delete_share_with_snapshot(self): share = fakes.fake_share(id='fake_id', status=constants.STATUS_AVAILABLE, has_replicas=False, is_soft_deleted=False) snapshot = fakes.fake_snapshot(create_instance=True, as_primitive=True) mock_db_snapshot_call = self.mock_object( db_api, 'share_snapshot_get_all_for_share', mock.Mock( return_value=[snapshot])) self.assertRaises(exception.InvalidShare, self.api.soft_delete, self.context, share) mock_db_snapshot_call.assert_called_once_with( self.context, share['id']) @mock.patch.object(db_api, 'count_share_group_snapshot_members_in_share', mock.Mock(return_value=2)) def test_soft_delete_share_with_group_snapshot_members(self): share = fakes.fake_share(id='fake_id', status=constants.STATUS_AVAILABLE, has_replicas=False, is_soft_deleted=False) self.mock_object(db_api, 'share_backups_get_all', mock.Mock(return_value=[])) self.assertRaises(exception.InvalidShare, self.api.soft_delete, self.context, share) def test_soft_delete_locked_share(self): self.mock_object( self.api.db, 'resource_lock_get_all', mock.Mock(return_value=([{'id': 'l1'}, {'id': 'l2'}], None)) ) share = self._setup_delete_mocks('available') self.mock_object(db_api, 'share_soft_delete') self.assertRaises(exception.InvalidShare, self.api.soft_delete, self.context, share) # lock check decorator executed first, nothing else is invoked db_api.share_soft_delete.assert_not_called() db_api.share_snapshot_get_all_for_share.assert_not_called() def test_soft_delete_share(self): share = fakes.fake_share(id='fake_id', status=constants.STATUS_AVAILABLE, has_replicas=False, is_soft_deleted=False) self.mock_object(db_api, 'share_snapshot_get_all_for_share', mock.Mock(return_value=[])) self.mock_object(db_api, 'share_backups_get_all', mock.Mock(return_value=[])) self.mock_object(db_api, 'count_share_group_snapshot_members_in_share', mock.Mock(return_value=0)) self.mock_object(db_api, 'share_soft_delete') self.mock_object(self.api, '_check_is_share_busy') self.api.soft_delete(self.context, share) self.api._check_is_share_busy.assert_called_once_with(share) def test_restore_share(self): share = fakes.fake_share(id='fake_id', status=constants.STATUS_AVAILABLE, is_soft_deleted=True) self.mock_object(db_api, 'share_restore') self.api.restore(self.context, share) def test__share_server_update_allocations_validate_hosts(self): update_return = 'fake_return' mock_do_update = self.mock_object( self.api, '_do_update_validate_hosts', mock.Mock(return_value=update_return)) backend_hosts = 'fake_hosts' update_key = 'fake_key' share_network_id = 'fake_net_id' subnet = { 'neutron_net_id': 'fake_net_id', 'neutron_subnet_id': 'fake_subnet_id', 'availability_zone_id': 'fake_availability_zone_id', } res = self.api._share_server_update_allocations_validate_hosts( self.context, backend_hosts, update_key, share_network_id=share_network_id, neutron_net_id=subnet['neutron_net_id'], neutron_subnet_id=subnet['neutron_subnet_id'], availability_zone_id=subnet['availability_zone_id']) self.assertEqual(update_return, res) mock_do_update.assert_called_once_with( self.context, share_network_id, backend_hosts, update_key, new_share_network_subnet=subnet) def test_get_share_server_update_allocations_key(self): availability_zone_id = None share_network_id = 'fake_share_network_id' expected_key = ('share_server_update_allocations_' + share_network_id + '_' + str(availability_zone_id) + '_' + 'hosts_check') res = self.api.get_share_server_update_allocations_key( share_network_id, availability_zone_id) self.assertEqual(expected_key, res) def test__share_server_update_allocations_initial_checks(self): share_network = db_utils.create_share_network() share1 = db_utils.create_share( share_network_id=share_network['id'], status=constants.STATUS_AVAILABLE) server_host = 'fake_host' share_server = db_utils.create_share_server(host=server_host) mock_validate_service_host = self.mock_object( utils, 'validate_service_host') mock_share_get_all_by_share_server = self.mock_object( self.api.db, 'share_get_all_by_share_server', mock.Mock(return_value=[share1])) mock_share_is_busy = self.mock_object( self.api, '_check_is_share_busy') res_hosts = self.api._share_server_update_allocations_initial_checks( self.context, share_network, [share_server]) self.assertEqual(set([server_host]), res_hosts) mock_validate_service_host.assert_called_once() mock_share_get_all_by_share_server.assert_called_once_with( self.context, share_server['id']) mock_share_is_busy.assert_called_once_with(share1) def test__share_server_update_allocations_initial_checks_no_support(self): fake_share_network = { 'id': 'fake_sn_id', 'network_allocation_update_support': False, 'status': constants.STATUS_NETWORK_ACTIVE, } sn_subnet = db_utils.create_share_network_subnet() self.assertRaises( exception.InvalidShareNetwork, self.api._share_server_update_allocations_initial_checks, self.context, fake_share_network, sn_subnet) def test__share_server_update_allocations_initial_checks_inactive(self): share_network = db_utils.create_share_network() share_server = db_utils.create_share_server( status=constants.STATUS_INACTIVE) self.assertRaises( exception.InvalidShareNetwork, self.api._share_server_update_allocations_initial_checks, self.context, share_network, [share_server]) def test__share_server_update_allocations_initial_checks_shares_na(self): share_network = db_utils.create_share_network() share1 = db_utils.create_share( share_network_id=share_network['id'], status=constants.STATUS_ERROR) share_server = db_utils.create_share_server() mock_validate_service_host = self.mock_object( utils, 'validate_service_host') mock_share_get_all_by_share_server = self.mock_object( self.api.db, 'share_get_all_by_share_server', mock.Mock(return_value=[share1])) self.assertRaises( exception.InvalidShareNetwork, self.api._share_server_update_allocations_initial_checks, self.context, share_network, [share_server]) mock_validate_service_host.assert_called_once() mock_share_get_all_by_share_server.assert_called_once_with( self.context, share_server['id']) def test__share_server_update_allocations_initial_checks_rules_na(self): share_network = db_utils.create_share_network() share1 = db_utils.create_share( share_network_id=share_network['id'], status=constants.STATUS_AVAILABLE) share_server = db_utils.create_share_server() share1['instance']['access_rules_status'] = constants.STATUS_INACTIVE mock_validate_service_host = self.mock_object( utils, 'validate_service_host') mock_share_get_all_by_share_server = self.mock_object( self.api.db, 'share_get_all_by_share_server', mock.Mock(return_value=[share1])) self.assertRaises( exception.InvalidShareNetwork, self.api._share_server_update_allocations_initial_checks, self.context, share_network, [share_server]) mock_validate_service_host.assert_called_once() mock_share_get_all_by_share_server.assert_called_once_with( self.context, share_server['id']) def test__share_server_update_allocations_initial_checks_share_busy(self): share_network = db_utils.create_share_network() share1 = db_utils.create_share( share_network_id=share_network['id'], status=constants.STATUS_AVAILABLE) share_server = db_utils.create_share_server() mock_validate_service_host = self.mock_object( utils, 'validate_service_host') mock_share_get_all_by_share_server = self.mock_object( self.api.db, 'share_get_all_by_share_server', mock.Mock(return_value=[share1])) mock_share_is_busy = self.mock_object( self.api, '_check_is_share_busy', mock.Mock(side_effect=exception.ShareBusyException(message='fake')) ) self.assertRaises( exception.InvalidShareNetwork, self.api._share_server_update_allocations_initial_checks, self.context, share_network, [share_server]) mock_validate_service_host.assert_called_once() mock_share_get_all_by_share_server.assert_called_once_with( self.context, share_server['id']) mock_share_is_busy.assert_called_once_with(share1) def test_check_update_share_server_network_allocations(self): backend_hosts = 'fake_hosts' mock_initial_check = self.mock_object( self.api, '_share_server_update_allocations_initial_checks', mock.Mock(return_value=backend_hosts)) update_key = 'fake_key' mock_get_key = self.mock_object( self.api, 'get_share_server_update_allocations_key', mock.Mock(return_value=update_key)) mock_reset_data = self.mock_object( self.api.db, 'async_operation_data_delete') compatible = True hosts_info = {'fake_host': True} mock_validate_hosts = self.mock_object( self.api, '_share_server_update_allocations_validate_hosts', mock.Mock(return_value=(compatible, hosts_info))) share_network = {'id': 'fake_id'} new_share_network_subnet = { 'share_servers': 'fake_servers', 'availability_zone_id': 'fake_availability_zone_id', 'neutron_net_id': 'fake_neutron_net_id', 'neutron_subnet_id': 'fake_neutron_subnet_id', } res = self.api.check_update_share_server_network_allocations( self.context, share_network, new_share_network_subnet, True) self.assertEqual( {'compatible': compatible, 'hosts_check_result': hosts_info}, res) mock_initial_check.assert_called_once_with( self.context, share_network, new_share_network_subnet['share_servers']) mock_get_key.assert_called_once_with( share_network['id'], new_share_network_subnet['availability_zone_id']) mock_reset_data.assert_called_once_with( self.context, share_network['id'], update_key) mock_validate_hosts.assert_called_once_with( self.context, backend_hosts, update_key, share_network_id=share_network['id'], neutron_net_id=new_share_network_subnet['neutron_net_id'], neutron_subnet_id=new_share_network_subnet['neutron_subnet_id'], availability_zone_id=( new_share_network_subnet["availability_zone_id"])) def test_check_update_share_server_network_allocations_failed(self): backend_hosts = 'fake_hosts' self.mock_object( self.api, '_share_server_update_allocations_initial_checks', mock.Mock(return_value=backend_hosts)) update_key = 'fake_key' self.mock_object( self.api, 'get_share_server_update_allocations_key', mock.Mock(return_value=update_key)) self.mock_object(self.api.db, 'async_operation_data_delete') self.mock_object( self.api, '_share_server_update_allocations_validate_hosts', mock.Mock(side_effect=exception.InvalidShareNetwork(reason="msg"))) share_network = {'id': 'fake_id'} new_share_network_subnet = { 'share_servers': 'fake_servers', 'availability_zone_id': 'fake_availability_zone_id', 'neutron_net_id': 'fake_neutron_net_id', 'neutron_subnet_id': 'fake_neutron_subnet_id', } self.assertRaises( exception.InvalidShareNetwork, self.api.check_update_share_server_network_allocations, self.context, share_network, new_share_network_subnet, True) def test_update_share_server_network_allocations(self): backend_host = 'fake_host' backend_hosts = [backend_host] mock_initial_check = self.mock_object( self.api, '_share_server_update_allocations_initial_checks', mock.Mock(return_value=backend_hosts)) update_key = 'fake_key' mock_get_key = self.mock_object( self.api, 'get_share_server_update_allocations_key', mock.Mock(return_value=update_key)) mock_get_data = self.mock_object( self.api.db, 'async_operation_data_get', mock.Mock(return_value='fake_update_value')) mock_validate_hosts = self.mock_object( self.api, '_share_server_update_allocations_validate_hosts', mock.Mock(return_value=(True, 'fake_host'))) mock_net_update = self.mock_object(self.api.db, 'share_network_update') mock_server_update = self.mock_object(self.api.db, 'share_servers_update') new_share_network_subnet_db = {'id': 'fake_subnet_id'} mock_subnet_create = self.mock_object( self.api.db, 'share_network_subnet_create', mock.Mock(return_value=new_share_network_subnet_db)) mock_update_allocations = self.mock_object( self.api.share_rpcapi, 'update_share_server_network_allocations') mock_delete_data = self.mock_object(self.api.db, 'async_operation_data_delete') share_network = {'id': 'fake_id'} server1 = {'id': 'fake_id'} new_share_network_subnet = { 'share_servers': [server1], 'availability_zone_id': 'fake_availability_zone_id', 'neutron_net_id': 'fake_neutron_net_id', 'neutron_subnet_id': 'fake_neutron_subnet_id', } res_subnet = self.api.update_share_server_network_allocations( self.context, share_network, new_share_network_subnet) self.assertEqual(new_share_network_subnet_db, res_subnet) mock_initial_check.assert_called_once_with( self.context, share_network, new_share_network_subnet['share_servers']) mock_get_key.assert_called_once_with( share_network['id'], new_share_network_subnet['availability_zone_id']) mock_get_data.assert_called_once_with( self.context, share_network['id'], update_key) mock_validate_hosts.assert_called_once_with( self.context, backend_hosts, update_key, share_network_id=share_network['id'], neutron_net_id=new_share_network_subnet['neutron_net_id'], neutron_subnet_id=new_share_network_subnet['neutron_subnet_id'], availability_zone_id=( new_share_network_subnet["availability_zone_id"])) mock_net_update.assert_called_once_with( self.context, share_network['id'], {'status': constants.STATUS_NETWORK_CHANGE}) mock_server_update.assert_called_once_with( self.context, [server1['id']], {'status': constants.STATUS_SERVER_NETWORK_CHANGE}) mock_subnet_create.assert_called_once_with( self.context, new_share_network_subnet) mock_update_allocations.assert_called_once_with( self.context, backend_host, share_network['id'], new_share_network_subnet_db['id']) mock_delete_data.assert_called_once_with( self.context, share_network['id'], update_key) def test_update_share_server_network_allocations_no_check(self): backend_host = 'fake_host' backend_hosts = [backend_host] self.mock_object( self.api, '_share_server_update_allocations_initial_checks', mock.Mock(return_value=backend_hosts)) update_key = 'fake_key' self.mock_object( self.api, 'get_share_server_update_allocations_key', mock.Mock(return_value=update_key)) self.mock_object( self.api.db, 'async_operation_data_get', mock.Mock(return_value=None)) share_network = {'id': 'fake_id'} server1 = {'id': 'fake_id'} new_share_network_subnet = { 'share_servers': [server1], 'availability_zone_id': 'fake_availability_zone_id', 'neutron_net_id': 'fake_neutron_net_id', 'neutron_subnet_id': 'fake_neutron_subnet_id', } self.assertRaises( exception.InvalidShareNetwork, self.api.update_share_server_network_allocations, self.context, share_network, new_share_network_subnet) def test_update_share_server_network_allocations_fail_validation(self): backend_host = 'fake_host' backend_hosts = [backend_host] self.mock_object( self.api, '_share_server_update_allocations_initial_checks', mock.Mock(return_value=backend_hosts)) update_key = 'fake_key' self.mock_object( self.api, 'get_share_server_update_allocations_key', mock.Mock(return_value=update_key)) self.mock_object( self.api.db, 'async_operation_data_get', mock.Mock(return_value='fake_update_value')) self.mock_object( self.api, '_share_server_update_allocations_validate_hosts', mock.Mock(side_effect=exception.InvalidShareNetwork( reason='fake_reason'))) mock_delete_data = self.mock_object(self.api.db, 'async_operation_data_delete') share_network = {'id': 'fake_id'} server1 = {'id': 'fake_id'} new_share_network_subnet = { 'share_servers': [server1], 'availability_zone_id': 'fake_availability_zone_id', 'neutron_net_id': 'fake_neutron_net_id', 'neutron_subnet_id': 'fake_neutron_subnet_id', } self.assertRaises( exception.InvalidShareNetwork, self.api.update_share_server_network_allocations, self.context, share_network, new_share_network_subnet) mock_delete_data.assert_called_once_with( self.context, share_network['id'], update_key) @ddt.data(False, None) def test_update_share_server_network_allocations_check_fail(self, result): backend_host = 'fake_host' backend_hosts = [backend_host] self.mock_object( self.api, '_share_server_update_allocations_initial_checks', mock.Mock(return_value=backend_hosts)) update_key = 'fake_key' self.mock_object( self.api, 'get_share_server_update_allocations_key', mock.Mock(return_value=update_key)) self.mock_object( self.api.db, 'async_operation_data_get', mock.Mock(return_value='fake_update_value')) self.mock_object( self.api, '_share_server_update_allocations_validate_hosts', mock.Mock(return_value=(result, 'fake_host'))) share_network = {'id': 'fake_id'} server1 = {'id': 'fake_id'} new_share_network_subnet = { 'share_servers': [server1], 'availability_zone_id': 'fake_availability_zone_id', 'neutron_net_id': 'fake_neutron_net_id', 'neutron_subnet_id': 'fake_neutron_subnet_id', } self.assertRaises( exception.InvalidShareNetwork, self.api.update_share_server_network_allocations, self.context, share_network, new_share_network_subnet) @ddt.data(None, {'driver': test}) def test_create_share_backup(self, backup_opts): share = db_utils.create_share(is_public=True, status='available') backup_ref = db_utils.create_backup(share['id'], status='available') reservation = 'fake' self.mock_object(quota.QUOTAS, 'reserve', mock.Mock(return_value=reservation)) self.mock_object(quota.QUOTAS, 'commit') self.mock_object(db_api, 'share_backup_create', mock.Mock(return_value=backup_ref)) self.mock_object(db_api, 'share_backup_update', mock.Mock()) self.mock_object(data_rpc.DataAPI, 'create_backup', mock.Mock()) self.mock_object(self.share_rpcapi, 'create_backup', mock.Mock()) backup = { 'display_name': 'tmp_backup', 'backup_options': backup_opts, 'id': 'dbe28dff-ce7d-4c3d-a795-c31f6fee31ab', } self.api.create_share_backup(self.context, share, backup) quota.QUOTAS.reserve.assert_called_once() db_api.share_backup_create.assert_called_once() quota.QUOTAS.commit.assert_called_once() db_api.share_backup_update.assert_called_once() if backup_opts: self.share_rpcapi.create_backup.assert_called_once_with( self.context, backup_ref) else: data_rpc.DataAPI.create_backup.assert_called_once_with( self.context, backup_ref) def test_create_share_backup_share_error_state(self): share = db_utils.create_share(is_public=True, status='error') backup = { 'display_name': 'tmp_backup', 'id': 'dbe28dff-ce7d-4c3d-a795-c31f6fee31ab', } self.assertRaises(exception.InvalidShare, self.api.create_share_backup, self.context, share, backup) def test_create_share_backup_share_busy_task_state(self): share = db_utils.create_share( is_public=True, task_state='data_copying_in_progress') backup = { 'display_name': 'tmp_backup', 'id': 'dbe28dff-ce7d-4c3d-a795-c31f6fee31ab', } self.assertRaises(exception.ShareBusyException, self.api.create_share_backup, self.context, share, backup) def test_create_share_backup_share_has_snapshots(self): share = db_utils.create_share( is_public=True, state='available') snapshot = db_utils.create_snapshot( share_id=share['id'], status='available', size=1) backup = { 'display_name': 'tmp_backup', 'id': 'dbe28dff-ce7d-4c3d-a795-c31f6fee31ab', } self.mock_object(db_api, 'share_snapshot_get_all_for_share', mock.Mock(return_value=[snapshot])) self.assertRaises(exception.InvalidShare, self.api.create_share_backup, self.context, share, backup) def test_create_share_backup_share_has_replicas(self): share = fakes.fake_share(id='fake_id', has_replicas=True, status=constants.STATUS_AVAILABLE, is_soft_deleted=False) backup = { 'display_name': 'tmp_backup', 'id': 'dbe28dff-ce7d-4c3d-a795-c31f6fee31ab', } self.assertRaises(exception.InvalidShare, self.api.create_share_backup, self.context, share, backup) @ddt.data({'overs': {'backup_gigabytes': 'fake'}, 'expected_exception': exception.ShareBackupSizeExceedsAvailableQuota}, {'overs': {'backups': 'fake'}, 'expected_exception': exception.BackupLimitExceeded},) @ddt.unpack def test_create_share_backup_over_quota(self, overs, expected_exception): share = fakes.fake_share(id='fake_id', status=constants.STATUS_AVAILABLE, is_soft_deleted=False, size=5) backup = { 'display_name': 'tmp_backup', 'id': 'dbe28dff-ce7d-4c3d-a795-c31f6fee31ab', } usages = {'backup_gigabytes': {'reserved': 5, 'in_use': 5}, 'backups': {'reserved': 5, 'in_use': 5}} quotas = {'backup_gigabytes': 5, 'backups': 5} exc = exception.OverQuota(overs=overs, usages=usages, quotas=quotas) self.mock_object(quota.QUOTAS, 'reserve', mock.Mock(side_effect=exc)) self.assertRaises(expected_exception, self.api.create_share_backup, self.context, share, backup) quota.QUOTAS.reserve.assert_called_once_with( self.context, backups=1, backup_gigabytes=share['size']) def test_create_share_backup_rollback_quota(self): share = db_utils.create_share(is_public=True, status='available') reservation = 'fake' self.mock_object(quota.QUOTAS, 'reserve', mock.Mock(return_value=reservation)) self.mock_object(quota.QUOTAS, 'rollback') self.mock_object(db_api, 'share_backup_create', mock.Mock(side_effect=exception.ManilaException)) self.mock_object(data_rpc.DataAPI, 'create_backup', mock.Mock()) self.mock_object(self.share_rpcapi, 'create_backup', mock.Mock()) backup = { 'display_name': 'tmp_backup', 'id': 'dbe28dff-ce7d-4c3d-a795-c31f6fee31ab', } self.assertRaises(exception.ManilaException, self.api.create_share_backup, self.context, share, backup) quota.QUOTAS.reserve.assert_called_once() db_api.share_backup_create.assert_called_once() quota.QUOTAS.rollback.assert_called_once_with( self.context, reservation) @ddt.data(CONF.share_topic, CONF.data_topic) def test_delete_share_backup(self, topic): share = db_utils.create_share(is_public=True, status='available') backup = db_utils.create_backup(share['id'], status='available') self.mock_object(db_api, 'share_backup_update', mock.Mock()) self.mock_object(data_rpc.DataAPI, 'delete_backup', mock.Mock()) self.mock_object(self.share_rpcapi, 'delete_backup', mock.Mock()) backup.update({'topic': topic}) self.api.delete_share_backup(self.context, backup) db_api.share_backup_update.assert_called_once() if topic == CONF.share_topic: self.share_rpcapi.delete_backup.assert_called_once_with( self.context, backup) else: data_rpc.DataAPI.delete_backup.assert_called_once_with( self.context, backup) @ddt.data(constants.STATUS_DELETING, constants.STATUS_CREATING) def test_delete_share_backup_invalid_state(self, state): share = db_utils.create_share(is_public=True, status='available') backup = db_utils.create_backup(share['id'], status=state) self.assertRaises(exception.InvalidBackup, self.api.delete_share_backup, self.context, backup) @ddt.data(CONF.share_topic, CONF.data_topic) def test_restore_share_backup(self, topic): share = db_utils.create_share( is_public=True, status='available', size=1) backup = db_utils.create_backup( share['id'], status='available', size=1) self.mock_object(self.api, 'get', mock.Mock(return_value=share)) self.mock_object(db_api, 'share_backup_update', mock.Mock()) self.mock_object(db_api, 'share_update', mock.Mock()) self.mock_object(data_rpc.DataAPI, 'restore_backup', mock.Mock()) self.mock_object(self.share_rpcapi, 'restore_backup', mock.Mock()) backup.update({'topic': topic}) self.api.restore_share_backup(self.context, backup) self.api.get.assert_called_once() db_api.share_update.assert_called_once() db_api.share_backup_update.assert_called_once() if topic == CONF.share_topic: self.share_rpcapi.restore_backup.assert_called_once_with( self.context, backup, share['id']) else: data_rpc.DataAPI.restore_backup.assert_called_once_with( self.context, backup, share['id']) def test_restore_share_backup_invalid_share_sizee(self): share = db_utils.create_share( is_public=True, status='available', size=1) backup = db_utils.create_backup( share['id'], status='available', size=2) self.assertRaises(exception.InvalidShare, self.api.restore_share_backup, self.context, backup) def test_restore_share_backup_invalid_share_state(self): share = db_utils.create_share(is_public=True, status='deleting') backup = db_utils.create_backup(share['id'], status='available') self.assertRaises(exception.InvalidShare, self.api.restore_share_backup, self.context, backup) def test_restore_share_backup_invalid_backup_state(self): share = db_utils.create_share(is_public=True, status='available') backup = db_utils.create_backup(share['id'], status='deleting') self.assertRaises(exception.InvalidBackup, self.api.restore_share_backup, self.context, backup) def test_update_share_backup(self): share = db_utils.create_share(is_public=True, status='available') backup = db_utils.create_backup(share['id'], status='available') self.mock_object(db_api, 'share_backup_update', mock.Mock()) self.api.update_share_backup(self.context, backup, {'display_name': 'new_name'}) db_api.share_backup_update.assert_called_once() class OtherTenantsShareActionsTestCase(test.TestCase): def setUp(self): super(OtherTenantsShareActionsTestCase, self).setUp() self.api = share.API() def test_delete_other_tenants_public_share(self): share = db_utils.create_share(is_public=True) ctx = context.RequestContext(user_id='1111', project_id='2222') self.assertRaises(exception.PolicyNotAuthorized, self.api.delete, ctx, share) def test_update_other_tenants_public_share(self): share = db_utils.create_share(is_public=True) ctx = context.RequestContext(user_id='1111', project_id='2222') self.assertRaises(exception.PolicyNotAuthorized, self.api.update, ctx, share, {'display_name': 'newname'}) def test_get_other_tenants_public_share(self): share = db_utils.create_share(is_public=True) ctx = context.RequestContext(user_id='1111', project_id='2222') self.mock_object(db_api, 'share_get', mock.Mock(return_value=share)) result = self.api.get(ctx, 'fakeid') self.assertEqual(share, result) db_api.share_get.assert_called_once_with(ctx, 'fakeid') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/test_driver.py0000664000175000017500000016041700000000000021500 0ustar00zuulzuul00000000000000# Copyright 2012 NetApp # Copyright 2014 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Unit tests for the Share driver module.""" import time from unittest import mock import ddt from manila.common import constants from manila import exception from manila import network from manila.share import configuration from manila.share import driver from manila import test from manila.tests import utils as test_utils from manila import utils def fake_execute_with_raise(*cmd, **kwargs): raise exception.ProcessExecutionError def fake_sleep(duration): pass class ShareDriverWithExecuteMixin(driver.ShareDriver, driver.ExecuteMixin): pass @ddt.ddt class ShareDriverTestCase(test.TestCase): _SNAPSHOT_METHOD_NAMES = ["create_snapshot", "delete_snapshot"] def setUp(self): super(ShareDriverTestCase, self).setUp() self.utils = utils self.mock_object(self.utils, 'execute', fake_execute_with_raise) self.time = time self.mock_object(self.time, 'sleep', fake_sleep) driver.CONF.set_default('driver_handles_share_servers', True) def test__try_execute(self): execute_mixin = ShareDriverWithExecuteMixin( True, configuration=configuration.Configuration(None)) self.assertRaises(exception.ProcessExecutionError, execute_mixin._try_execute) def test_verify_share_driver_mode_option_type(self): data = {'DEFAULT': {'driver_handles_share_servers': 'True'}} with test_utils.create_temp_config_with_opts(data): share_driver = driver.ShareDriver([True, False]) self.assertTrue(share_driver.driver_handles_share_servers) def _instantiate_share_driver(self, network_config_group, driver_handles_share_servers, admin_network_config_group=None): self.mock_object(network, 'API') config = mock.Mock() config.append_config_values = mock.Mock() config.config_group = 'fake_config_group' config.network_config_group = network_config_group if admin_network_config_group: config.admin_network_config_group = admin_network_config_group config.safe_get = mock.Mock(return_value=driver_handles_share_servers) share_driver = driver.ShareDriver([True, False], configuration=config) self.assertTrue(hasattr(share_driver, 'configuration')) config.append_config_values.assert_called_once_with(driver.share_opts) if driver_handles_share_servers: calls = [] if network_config_group: calls.append(mock.call( config_group_name=config.network_config_group)) else: calls.append(mock.call( config_group_name=config.config_group)) if admin_network_config_group: calls.append(mock.call( config_group_name=config.admin_network_config_group, label='admin')) network.API.assert_has_calls(calls) self.assertTrue(hasattr(share_driver, 'network_api')) self.assertTrue(hasattr(share_driver, 'admin_network_api')) self.assertIsNotNone(share_driver.network_api) self.assertIsNotNone(share_driver.admin_network_api) else: self.assertFalse(hasattr(share_driver, 'network_api')) self.assertTrue(hasattr(share_driver, 'admin_network_api')) self.assertIsNone(share_driver.admin_network_api) self.assertFalse(network.API.called) return share_driver def test_instantiate_share_driver(self): self._instantiate_share_driver(None, True) def test_instantiate_share_driver_another_config_group(self): self._instantiate_share_driver("fake_network_config_group", True) def test_instantiate_share_driver_with_admin_network(self): self._instantiate_share_driver( "fake_network_config_group", True, "fake_admin_network_config_group") def test_instantiate_share_driver_no_configuration(self): self.mock_object(network, 'API') share_driver = driver.ShareDriver(True, configuration=None) self.assertIsNone(share_driver.configuration) network.API.assert_called_once_with(config_group_name=None) def test_get_share_stats_refresh_false(self): share_driver = driver.ShareDriver(True, configuration=None) share_driver._stats = {'fake_key': 'fake_value'} result = share_driver.get_share_stats(False) self.assertEqual(share_driver._stats, result) def test_get_share_stats_refresh_true(self): conf = configuration.Configuration(None) expected_keys = [ 'qos', 'driver_version', 'share_backend_name', 'free_capacity_gb', 'total_capacity_gb', 'driver_handles_share_servers', 'reserved_percentage', 'reserved_snapshot_percentage', 'reserved_share_extend_percentage', 'vendor_name', 'storage_protocol', 'snapshot_support', 'mount_snapshot_support', 'mount_point_name_support', ] share_driver = driver.ShareDriver(True, configuration=conf) fake_stats = {'fake_key': 'fake_value'} share_driver._stats = fake_stats result = share_driver.get_share_stats(True) self.assertNotEqual(fake_stats, result) for key in expected_keys: self.assertIn(key, result) self.assertEqual('Open Source', result['vendor_name']) def test_get_share_status(self): share_driver = self._instantiate_share_driver(None, False) self.assertRaises(NotImplementedError, share_driver.get_share_status, None, None) @ddt.data( {'opt': True, 'allowed': True}, {'opt': True, 'allowed': (True, False)}, {'opt': True, 'allowed': [True, False]}, {'opt': True, 'allowed': set([True, False])}, {'opt': False, 'allowed': False}, {'opt': False, 'allowed': (True, False)}, {'opt': False, 'allowed': [True, False]}, {'opt': False, 'allowed': set([True, False])}) @ddt.unpack def test__verify_share_server_handling_valid_cases(self, opt, allowed): conf = configuration.Configuration(None) self.mock_object(conf, 'safe_get', mock.Mock(return_value=opt)) share_driver = driver.ShareDriver(allowed, configuration=conf) self.assertTrue(conf.safe_get.called) self.assertEqual(opt, share_driver.driver_handles_share_servers) @ddt.data( {'opt': False, 'allowed': True}, {'opt': True, 'allowed': False}, {'opt': None, 'allowed': True}, {'opt': 'True', 'allowed': True}, {'opt': 'False', 'allowed': False}, {'opt': [], 'allowed': True}, {'opt': True, 'allowed': []}, {'opt': True, 'allowed': ['True']}, {'opt': False, 'allowed': ['False']}) @ddt.unpack def test__verify_share_server_handling_invalid_cases(self, opt, allowed): conf = configuration.Configuration(None) self.mock_object(conf, 'safe_get', mock.Mock(return_value=opt)) self.assertRaises( exception.ManilaException, driver.ShareDriver, allowed, configuration=conf) self.assertTrue(conf.safe_get.called) def test_setup_server_handling_disabled(self): share_driver = self._instantiate_share_driver(None, False) # We expect successful execution, nothing to assert share_driver.setup_server('Nothing is expected to happen.') def test_setup_server_handling_enabled(self): share_driver = self._instantiate_share_driver(None, True) self.assertRaises( NotImplementedError, share_driver.setup_server, 'fake_network_info') def test_teardown_server_handling_disabled(self): share_driver = self._instantiate_share_driver(None, False) # We expect successful execution, nothing to assert share_driver.teardown_server('Nothing is expected to happen.') def test_teardown_server_handling_enabled(self): share_driver = self._instantiate_share_driver(None, True) self.assertRaises( NotImplementedError, share_driver.teardown_server, 'fake_share_server_details') def _assert_is_callable(self, obj, attr): self.assertTrue(callable(getattr(obj, attr))) @ddt.data('manage_existing', 'unmanage') def test_drivers_methods_needed_by_manage_functionality(self, method): share_driver = self._instantiate_share_driver(None, False) self._assert_is_callable(share_driver, method) @ddt.data('manage_existing_snapshot', 'unmanage_snapshot') def test_drivers_methods_needed_by_manage_snapshot_functionality( self, method): share_driver = self._instantiate_share_driver(None, False) self._assert_is_callable(share_driver, method) @ddt.data('revert_to_snapshot', 'revert_to_replicated_snapshot') def test_drivers_methods_needed_by_share_revert_to_snapshot_functionality( self, method): share_driver = self._instantiate_share_driver(None, False) self._assert_is_callable(share_driver, method) @ddt.data(True, False) def test_get_share_server_pools(self, value): driver.CONF.set_default('driver_handles_share_servers', value) share_driver = driver.ShareDriver(value) self.assertEqual([], share_driver.get_share_server_pools('fake_server')) def test_check_for_setup_error(self): driver.CONF.set_default('driver_handles_share_servers', False) share_driver = driver.ShareDriver(False) share_driver.configuration = configuration.Configuration(None) share_driver.check_for_setup_error() def test_snapshot_support_exists(self): driver.CONF.set_default('driver_handles_share_servers', True) fake_method = lambda *args, **kwargs: None # noqa: E731 child_methods = { "create_snapshot": fake_method, "delete_snapshot": fake_method, } child_class_instance = type( "NotRedefined", (driver.ShareDriver, ), child_methods)(True) self.mock_object(child_class_instance, "configuration") child_class_instance._update_share_stats() self.assertTrue(child_class_instance._stats["snapshot_support"]) self.assertTrue(child_class_instance.configuration.safe_get.called) @ddt.data( ([], [], False), (_SNAPSHOT_METHOD_NAMES, [], True), (_SNAPSHOT_METHOD_NAMES, _SNAPSHOT_METHOD_NAMES, True), (_SNAPSHOT_METHOD_NAMES[0:1], _SNAPSHOT_METHOD_NAMES[1:], True), ([], _SNAPSHOT_METHOD_NAMES, True), ) @ddt.unpack def test_check_redefined_driver_methods(self, common_drv_meth_names, child_drv_meth_names, expected_result): # This test covers the case of drivers inheriting other drivers or # common classes. driver.CONF.set_default('driver_handles_share_servers', True) common_drv_methods, child_drv_methods = [ {method_name: lambda *args, **kwargs: None # noqa: E731 for method_name in method_names} for method_names in (common_drv_meth_names, child_drv_meth_names)] common_drv = type( "NotRedefinedCommon", (driver.ShareDriver, ), common_drv_methods) child_drv_instance = type("NotRedefined", (common_drv, ), child_drv_methods)(True) has_redefined_methods = ( child_drv_instance._has_redefined_driver_methods( self._SNAPSHOT_METHOD_NAMES)) self.assertEqual(expected_result, has_redefined_methods) @ddt.data( (), ("create_snapshot"), ("delete_snapshot"), ("create_snapshot", "delete_snapshotFOO"), ) def test_snapshot_support_absent(self, methods): driver.CONF.set_default('driver_handles_share_servers', True) fake_method = lambda *args, **kwargs: None # noqa: E731 child_methods = {} for method in methods: child_methods[method] = fake_method child_class_instance = type( "NotRedefined", (driver.ShareDriver, ), child_methods)(True) self.mock_object(child_class_instance, "configuration") child_class_instance._update_share_stats() self.assertFalse(child_class_instance._stats["snapshot_support"]) self.assertTrue(child_class_instance.configuration.safe_get.called) @ddt.data(True, False) def test_snapshot_support_not_exists_and_set_explicitly( self, snapshots_are_supported): driver.CONF.set_default('driver_handles_share_servers', True) child_class_instance = type( "NotRedefined", (driver.ShareDriver, ), {})(True) self.mock_object(child_class_instance, "configuration") child_class_instance._update_share_stats( {"snapshot_support": snapshots_are_supported}) self.assertEqual( snapshots_are_supported, child_class_instance._stats["snapshot_support"]) self.assertTrue(child_class_instance.configuration.safe_get.called) @ddt.data(True, False) def test_snapshot_support_exists_and_set_explicitly( self, snapshots_are_supported): driver.CONF.set_default('driver_handles_share_servers', True) fake_method = lambda *args, **kwargs: None # noqa: E731 child_methods = { "create_snapshot": fake_method, "delete_snapshot": fake_method, } child_class_instance = type( "NotRedefined", (driver.ShareDriver, ), child_methods)(True) self.mock_object(child_class_instance, "configuration") child_class_instance._update_share_stats( {"snapshot_support": snapshots_are_supported}) self.assertEqual( snapshots_are_supported, child_class_instance._stats["snapshot_support"]) self.assertTrue(child_class_instance.configuration.safe_get.called) def test_create_share_from_snapshot_support_exists(self): driver.CONF.set_default('driver_handles_share_servers', True) fake_method = lambda *args, **kwargs: None # noqa: E731 child_methods = { "create_share_from_snapshot": fake_method, "create_snapshot": fake_method, "delete_snapshot": fake_method, } child_class_instance = type( "NotRedefined", (driver.ShareDriver, ), child_methods)(True) self.mock_object(child_class_instance, "configuration") child_class_instance._update_share_stats() self.assertTrue( child_class_instance._stats["create_share_from_snapshot_support"]) self.assertTrue(child_class_instance.configuration.safe_get.called) @ddt.data( (), ("create_snapshot"), ("create_share_from_snapshotFOO"), ) def test_create_share_from_snapshot_support_absent(self, methods): driver.CONF.set_default('driver_handles_share_servers', True) fake_method = lambda *args, **kwargs: None # noqa: E731 child_methods = {} for method in methods: child_methods[method] = fake_method child_class_instance = type( "NotRedefined", (driver.ShareDriver, ), child_methods)(True) self.mock_object(child_class_instance, "configuration") child_class_instance._update_share_stats() self.assertFalse( child_class_instance._stats["create_share_from_snapshot_support"]) self.assertTrue(child_class_instance.configuration.safe_get.called) @ddt.data(True, False) def test_create_share_from_snapshot_not_exists_and_set_explicitly( self, creating_shares_from_snapshot_is_supported): driver.CONF.set_default('driver_handles_share_servers', True) child_class_instance = type( "NotRedefined", (driver.ShareDriver, ), {})(True) self.mock_object(child_class_instance, "configuration") child_class_instance._update_share_stats({ "create_share_from_snapshot_support": creating_shares_from_snapshot_is_supported, }) self.assertEqual( creating_shares_from_snapshot_is_supported, child_class_instance._stats["create_share_from_snapshot_support"]) self.assertTrue(child_class_instance.configuration.safe_get.called) @ddt.data(True, False) def test_create_share_from_snapshot_exists_and_set_explicitly( self, create_share_from_snapshot_supported): driver.CONF.set_default('driver_handles_share_servers', True) fake_method = lambda *args, **kwargs: None # noqa: E731 child_methods = {"create_share_from_snapshot": fake_method} child_class_instance = type( "NotRedefined", (driver.ShareDriver, ), child_methods)(True) self.mock_object(child_class_instance, "configuration") child_class_instance._update_share_stats({ "create_share_from_snapshot_support": create_share_from_snapshot_supported, }) self.assertEqual( create_share_from_snapshot_supported, child_class_instance._stats["create_share_from_snapshot_support"]) self.assertTrue(child_class_instance.configuration.safe_get.called) def test_get_periodic_hook_data(self): share_driver = self._instantiate_share_driver(None, False) share_instances = ["list", "of", "share", "instances"] result = share_driver.get_periodic_hook_data( "fake_context", share_instances) self.assertEqual(share_instances, result) def test_get_admin_network_allocations_number(self): share_driver = self._instantiate_share_driver(None, True) self.assertEqual( 0, share_driver.get_admin_network_allocations_number()) def test_allocate_admin_network_count_None(self): share_driver = self._instantiate_share_driver(None, True) ctxt = 'fake_context' share_server = 'fake_share_server' mock_get_admin_network_allocations_number = self.mock_object( share_driver, 'get_admin_network_allocations_number', mock.Mock(return_value=0)) self.mock_object( share_driver.admin_network_api, 'allocate_network', mock.Mock(side_effect=Exception('ShouldNotBeRaised'))) share_driver.allocate_admin_network(ctxt, share_server) mock_get_admin_network_allocations_number.assert_called_once_with() self.assertFalse( share_driver.admin_network_api.allocate_network.called) def test_allocate_admin_network_count_0(self): share_driver = self._instantiate_share_driver(None, True) ctxt = 'fake_context' share_server = 'fake_share_server' self.mock_object( share_driver, 'get_admin_network_allocations_number', mock.Mock(return_value=0)) self.mock_object( share_driver.admin_network_api, 'allocate_network', mock.Mock(side_effect=Exception('ShouldNotBeRaised'))) share_driver.allocate_admin_network(ctxt, share_server, count=0) self.assertFalse( share_driver.get_admin_network_allocations_number.called) self.assertFalse( share_driver.admin_network_api.allocate_network.called) def test_allocate_admin_network_count_1_api_initialized(self): share_driver = self._instantiate_share_driver(None, True) ctxt = 'fake_context' share_server = 'fake_share_server' mock_get_admin_network_allocations_number = self.mock_object( share_driver, 'get_admin_network_allocations_number', mock.Mock(return_value=1)) self.mock_object( share_driver.admin_network_api, 'allocate_network', mock.Mock()) share_driver.allocate_admin_network(ctxt, share_server) mock_get_admin_network_allocations_number.assert_called_once_with() (share_driver.admin_network_api.allocate_network. assert_called_once_with(ctxt, share_server, count=1)) def test_allocate_admin_network_count_1_api_not_initialized(self): share_driver = self._instantiate_share_driver(None, True, None) ctxt = 'fake_context' share_server = 'fake_share_server' share_driver._admin_network_api = None mock_get_admin_network_allocations_number = self.mock_object( share_driver, 'get_admin_network_allocations_number', mock.Mock(return_value=1)) self.assertRaises( exception.NetworkBadConfigurationException, share_driver.allocate_admin_network, ctxt, share_server, ) mock_get_admin_network_allocations_number.assert_called_once_with() def test_migration_start(self): driver.CONF.set_default('driver_handles_share_servers', False) share_driver = driver.ShareDriver(False) self.assertRaises(NotImplementedError, share_driver.migration_start, None, None, None, None, None, None, None) def test_migration_continue(self): driver.CONF.set_default('driver_handles_share_servers', False) share_driver = driver.ShareDriver(False) self.assertRaises(NotImplementedError, share_driver.migration_continue, None, None, None, None, None, None, None) def test_migration_complete(self): driver.CONF.set_default('driver_handles_share_servers', False) share_driver = driver.ShareDriver(False) self.assertRaises(NotImplementedError, share_driver.migration_complete, None, None, None, None, None, None, None) def test_migration_cancel(self): driver.CONF.set_default('driver_handles_share_servers', False) share_driver = driver.ShareDriver(False) self.assertRaises(NotImplementedError, share_driver.migration_cancel, None, None, None, None, None, None, None) def test_migration_get_progress(self): driver.CONF.set_default('driver_handles_share_servers', False) share_driver = driver.ShareDriver(False) self.assertRaises(NotImplementedError, share_driver.migration_get_progress, None, None, None, None, None, None, None) def test_share_server_migration_start(self): driver.CONF.set_default('driver_handles_share_servers', True) share_driver = driver.ShareDriver(True) self.assertRaises(NotImplementedError, share_driver.share_server_migration_start, None, None, None, None, None) def test_share_server_migration_continue(self): driver.CONF.set_default('driver_handles_share_servers', True) share_driver = driver.ShareDriver(True) self.assertRaises(NotImplementedError, share_driver.share_server_migration_continue, None, None, None, None, None) def test_share_server_migration_get_progress(self): driver.CONF.set_default('driver_handles_share_servers', True) share_driver = driver.ShareDriver(True) self.assertRaises(NotImplementedError, share_driver.share_server_migration_get_progress, None, None, None, None, None) def test_share_server_migration_cancel(self): driver.CONF.set_default('driver_handles_share_servers', True) share_driver = driver.ShareDriver(True) self.assertRaises(NotImplementedError, share_driver.share_server_migration_cancel, None, None, None, None, None) def test_share_server_migration_check_compatibility(self): driver.CONF.set_default('driver_handles_share_servers', True) share_driver = driver.ShareDriver(True) expected_compatibility = { 'compatible': False, 'writable': False, 'nondisruptive': False, 'preserve_snapshots': False, 'migration_cancel': False, 'migration_get_progress': False, } driver_compatibility = ( share_driver.share_server_migration_check_compatibility( None, None, None, None, None, None)) self.assertEqual(expected_compatibility, driver_compatibility) def test_share_server_migration_complete(self): driver.CONF.set_default('driver_handles_share_servers', True) share_driver = driver.ShareDriver(True) self.assertRaises( NotImplementedError, share_driver.share_server_migration_complete, None, None, None, None, None, None) @ddt.data(True, False) def test_connection_get_info(self, admin): expected = { 'mount': 'mount -vt nfs %(options)s /fake/fake_id %(path)s', 'unmount': 'umount -v %(path)s', 'access_mapping': { 'ip': ['nfs'] } } fake_share = { 'id': 'fake_id', 'share_proto': 'nfs', 'export_locations': [{ 'path': '/fake/fake_id', 'is_admin_only': admin }] } driver.CONF.set_default('driver_handles_share_servers', False) share_driver = driver.ShareDriver(False) share_driver.configuration = configuration.Configuration(None) connection_info = share_driver.connection_get_info( None, fake_share, "fake_server") self.assertEqual(expected, connection_info) def test_migration_check_compatibility(self): driver.CONF.set_default('driver_handles_share_servers', False) share_driver = driver.ShareDriver(False) share_driver.configuration = configuration.Configuration(None) expected = { 'compatible': False, 'writable': False, 'preserve_metadata': False, 'nondisruptive': False, 'preserve_snapshots': False, } result = share_driver.migration_check_compatibility( None, None, None, None, None) self.assertEqual(expected, result) def test_update_access(self): share_driver = driver.ShareDriver(True, configuration=None) self.assertRaises( NotImplementedError, share_driver.update_access, 'ctx', 'fake_share', 'fake_access_rules', 'fake_add_rules', 'fake_delete_rules', 'fake_update_rules' ) def test_create_replica(self): share_driver = self._instantiate_share_driver(None, True) self.assertRaises(NotImplementedError, share_driver.create_replica, 'fake_context', ['r1', 'r2'], 'fake_new_replica', [], []) def test_delete_replica(self): share_driver = self._instantiate_share_driver(None, True) self.assertRaises(NotImplementedError, share_driver.delete_replica, 'fake_context', ['r1', 'r2'], 'fake_replica', []) def test_promote_replica(self): share_driver = self._instantiate_share_driver(None, True) self.assertRaises(NotImplementedError, share_driver.promote_replica, 'fake_context', [], 'fake_replica', []) def test_update_replica_state(self): share_driver = self._instantiate_share_driver(None, True) self.assertRaises(NotImplementedError, share_driver.update_replica_state, 'fake_context', ['r1', 'r2'], 'fake_replica', [], []) def test_create_replicated_snapshot(self): share_driver = self._instantiate_share_driver(None, False) self.assertRaises(NotImplementedError, share_driver.create_replicated_snapshot, 'fake_context', ['r1', 'r2'], ['s1', 's2']) def test_delete_replicated_snapshot(self): share_driver = self._instantiate_share_driver(None, False) self.assertRaises(NotImplementedError, share_driver.delete_replicated_snapshot, 'fake_context', ['r1', 'r2'], ['s1', 's2']) def test_update_replicated_snapshot(self): share_driver = self._instantiate_share_driver(None, False) self.assertRaises(NotImplementedError, share_driver.update_replicated_snapshot, 'fake_context', ['r1', 'r2'], 'r1', ['s1', 's2'], 's1') @ddt.data(True, False) def test_share_group_snapshot_support_exists_and_equals_snapshot_support( self, snapshots_are_supported): driver.CONF.set_default('driver_handles_share_servers', True) child_class_instance = driver.ShareDriver(True) child_class_instance._snapshots_are_supported = snapshots_are_supported self.mock_object(child_class_instance, "configuration") child_class_instance._update_share_stats() self.assertEqual( snapshots_are_supported, child_class_instance._stats["snapshot_support"]) self.assertTrue(child_class_instance.configuration.safe_get.called) def test_create_share_group_from_share_group_snapshot(self): share_driver = self._instantiate_share_driver(None, False) fake_shares = [ {'id': 'fake_share_%d' % i, 'source_share_group_snapshot_member_id': 'fake_member_%d' % i} for i in (1, 2)] fake_share_group_dict = { 'source_share_group_snapshot_id': 'some_fake_uuid_abc', 'shares': fake_shares, 'id': 'some_fake_uuid_def', } fake_share_group_snapshot_dict = { 'share_group_snapshot_members': [ {'id': 'fake_member_1'}, {'id': 'fake_member_2'}], 'id': 'fake_share_group_snapshot_id', } mock_create = self.mock_object( share_driver, 'create_share_from_snapshot', mock.Mock(side_effect=['fake_export1', 'fake_export2'])) expected_share_updates = [ { 'id': 'fake_share_1', 'export_locations': 'fake_export1', }, { 'id': 'fake_share_2', 'export_locations': 'fake_export2', }, ] share_group_update, share_update = ( share_driver.create_share_group_from_share_group_snapshot( 'fake_context', fake_share_group_dict, fake_share_group_snapshot_dict)) mock_create.assert_has_calls([ mock.call( 'fake_context', {'id': 'fake_share_1', 'source_share_group_snapshot_member_id': 'fake_member_1'}, {'id': 'fake_member_1'}), mock.call( 'fake_context', {'id': 'fake_share_2', 'source_share_group_snapshot_member_id': 'fake_member_2'}, {'id': 'fake_member_2'}) ]) self.assertIsNone(share_group_update) self.assertEqual(expected_share_updates, share_update) def test_create_share_group_from_share_group_snapshot_dhss(self): share_driver = self._instantiate_share_driver(None, True) mock_share_server = mock.Mock() fake_shares = [ {'id': 'fake_share_1', 'source_share_group_snapshot_member_id': 'foo_member_1'}, {'id': 'fake_share_2', 'source_share_group_snapshot_member_id': 'foo_member_2'}] fake_share_group_dict = { 'source_share_group_snapshot_id': 'some_fake_uuid', 'shares': fake_shares, 'id': 'eda52174-0442-476d-9694-a58327466c14', } fake_share_group_snapshot_dict = { 'share_group_snapshot_members': [ {'id': 'foo_member_1'}, {'id': 'foo_member_2'}], 'id': 'fake_share_group_snapshot_id' } mock_create = self.mock_object( share_driver, 'create_share_from_snapshot', mock.Mock(side_effect=['fake_export1', 'fake_export2'])) expected_share_updates = [ {'id': 'fake_share_1', 'export_locations': 'fake_export1'}, {'id': 'fake_share_2', 'export_locations': 'fake_export2'}, ] share_group_update, share_update = ( share_driver.create_share_group_from_share_group_snapshot( 'fake_context', fake_share_group_dict, fake_share_group_snapshot_dict, share_server=mock_share_server, ) ) mock_create.assert_has_calls([ mock.call( 'fake_context', {'id': 'fake_share_%d' % i, 'source_share_group_snapshot_member_id': 'foo_member_%d' % i}, {'id': 'foo_member_%d' % i}, share_server=mock_share_server) for i in (1, 2) ]) self.assertIsNone(share_group_update) self.assertEqual(expected_share_updates, share_update) def test_create_share_group_from_share_group_snapshot_with_dict_raise( self): share_driver = self._instantiate_share_driver(None, False) fake_shares = [ {'id': 'fake_share_%d' % i, 'source_share_group_snapshot_member_id': 'fake_member_%d' % i} for i in (1, 2)] fake_share_group_dict = { 'source_share_group_snapshot_id': 'some_fake_uuid_abc', 'shares': fake_shares, 'id': 'some_fake_uuid_def', } fake_share_group_snapshot_dict = { 'share_group_snapshot_members': [ {'id': 'fake_member_1'}, {'id': 'fake_member_2'}], 'id': 'fake_share_group_snapshot_id', } self.mock_object( share_driver, 'create_share_from_snapshot', mock.Mock(side_effect=[{ 'export_locations': 'fake_export1', 'status': constants.STATUS_CREATING}, {'export_locations': 'fake_export2', 'status': constants.STATUS_CREATING}])) self.assertRaises( exception.InvalidShareInstance, share_driver.create_share_group_from_share_group_snapshot, 'fake_context', fake_share_group_dict, fake_share_group_snapshot_dict) def test_create_share_group_from_share_group_snapshot_with_dict( self): share_driver = self._instantiate_share_driver(None, False) fake_shares = [ {'id': 'fake_share_%d' % i, 'source_share_group_snapshot_member_id': 'fake_member_%d' % i} for i in (1, 2)] fake_share_group_dict = { 'source_share_group_snapshot_id': 'some_fake_uuid_abc', 'shares': fake_shares, 'id': 'some_fake_uuid_def', } fake_share_group_snapshot_dict = { 'share_group_snapshot_members': [ {'id': 'fake_member_1'}, {'id': 'fake_member_2'}], 'id': 'fake_share_group_snapshot_id', } mock_create = self.mock_object( share_driver, 'create_share_from_snapshot', mock.Mock(side_effect=[{ 'export_locations': 'fake_export1', 'status': constants.STATUS_CREATING_FROM_SNAPSHOT}, {'export_locations': 'fake_export2', 'status': constants.STATUS_AVAILABLE}])) expected_share_updates = [ { 'id': 'fake_share_1', 'status': constants.STATUS_CREATING_FROM_SNAPSHOT, 'export_locations': 'fake_export1', }, { 'id': 'fake_share_2', 'status': constants.STATUS_AVAILABLE, 'export_locations': 'fake_export2', }, ] share_group_update, share_update = ( share_driver.create_share_group_from_share_group_snapshot( 'fake_context', fake_share_group_dict, fake_share_group_snapshot_dict)) mock_create.assert_has_calls([ mock.call( 'fake_context', {'id': 'fake_share_1', 'source_share_group_snapshot_member_id': 'fake_member_1'}, {'id': 'fake_member_1'}), mock.call( 'fake_context', {'id': 'fake_share_2', 'source_share_group_snapshot_member_id': 'fake_member_2'}, {'id': 'fake_member_2'}) ]) self.assertIsNone(share_group_update) self.assertEqual(expected_share_updates, share_update) def test_update_share_server_security_service(self): share_driver = self._instantiate_share_driver(None, True) self.assertRaises(NotImplementedError, share_driver.update_share_server_security_service, 'fake_context', {'id', 'share_server_id'}, {'fake', 'fake_net_info'}, [{"id": "fake_instance_id"}], [{"id": "fake_rule_id"}], {'id', 'fake_sec_service_id'}, current_security_service=None) def test_check_update_share_server_security_service(self): share_driver = self._instantiate_share_driver(None, True) self.assertRaises( NotImplementedError, share_driver.check_update_share_server_security_service, 'fake_context', {'id', 'share_server_id'}, {'fake', 'fake_net_info'}, [{"id": "fake_instance_id"}], [{"id": "fake_rule_id"}], {'id', 'fake_sec_service_id'}, current_security_service=None) def test_check_update_share_server_network_allocations(self): share_driver = self._instantiate_share_driver(None, True) self.assertRaises( NotImplementedError, share_driver.check_update_share_server_network_allocations, 'fake_context', {'id', 'share_server_id'}, {'admin_network_allocations': [], 'subnets': []}, {"id": "fake_subnet_id"}, [{"id": "fake_security_service_id"}], [{'id', 'fake_share_instance_id'}], [{"id": "fake_rule_id"}]) def test_update_share_server_network_allocations(self): share_driver = self._instantiate_share_driver(None, True) self.assertRaises( NotImplementedError, share_driver.update_share_server_network_allocations, 'fake_context', {'id', 'share_server_id'}, {'admin_network_allocations': [], 'subnets': []}, { 'share_network_subnet_id': 'fake_share_network_subnet_id', 'neutron_net_id': 'fake_neutron_net_id', 'neutron_subnet_id': 'fake_neutron_subnet_id', 'network_allocations': [] }, [{"id": "fake_security_service_id"}], [{"id": "fake_share_id"}], [{"id": "fake_snapshot_id"}]) def test_create_share_group_from_sg_snapshot_with_no_members(self): share_driver = self._instantiate_share_driver(None, False) fake_share_group_dict = {} fake_share_group_snapshot_dict = {'share_group_snapshot_members': []} share_group_update, share_update = ( share_driver.create_share_group_from_share_group_snapshot( 'fake_context', fake_share_group_dict, fake_share_group_snapshot_dict)) self.assertIsNone(share_group_update) self.assertIsNone(share_update) def test_create_share_group_snapshot(self): fake_snap_member_1 = { 'id': '6813e06b-a8f5-4784-b17d-f3e91afa370e', 'share_id': 'a3ebdba5-b4e1-46c8-a0ea-a9ac8daf5296', 'share_group_snapshot_id': 'fake_share_group_snapshot_id', 'share_instance_id': 'fake_share_instance_id_1', 'provider_location': 'should_not_be_used_1', 'share_name': 'share_fake_share_instance_id_1', 'name': 'share-snapshot-6813e06b-a8f5-4784-b17d-f3e91afa370e', 'share': { 'id': '420f978b-dbf6-4b3c-92fe-f5b17a0bb5e2', 'size': 3, 'share_proto': 'fake_share_proto', }, } fake_snap_member_2 = { 'id': '1e010dfe-545b-432d-ab95-4ef03cd82f89', 'share_id': 'a3ebdba5-b4e1-46c8-a0ea-a9ac8daf5296', 'share_group_snapshot_id': 'fake_share_group_snapshot_id', 'share_instance_id': 'fake_share_instance_id_2', 'provider_location': 'should_not_be_used_2', 'share_name': 'share_fake_share_instance_id_2', 'name': 'share-snapshot-1e010dfe-545b-432d-ab95-4ef03cd82f89', 'share': { 'id': '420f978b-dbf6-4b3c-92fe-f5b17a0bb5e2', 'size': '2', 'share_proto': 'fake_share_proto', }, } fake_snap_dict = { 'status': 'available', 'project_id': '13c0be6290934bd98596cfa004650049', 'user_id': 'a0314a441ca842019b0952224aa39192', 'description': None, 'deleted': '0', 'share_group_id': '4b04fdc3-00b9-4909-ba1a-06e9b3f88b67', 'share_group_snapshot_members': [ fake_snap_member_1, fake_snap_member_2], 'deleted_at': None, 'id': 'f6aa3b59-57eb-421e-965c-4e182538e36a', 'name': None } share_driver = self._instantiate_share_driver(None, False) share_driver._stats['snapshot_support'] = True mock_create_snap = self.mock_object( share_driver, 'create_snapshot', mock.Mock(side_effect=lambda *args, **kwargs: { 'foo_k': 'foo_v', 'bar_k': 'bar_v_%s' % args[1]['id']})) share_group_snapshot_update, member_update_list = ( share_driver.create_share_group_snapshot( 'fake_context', fake_snap_dict)) mock_create_snap.assert_has_calls([ mock.call( 'fake_context', {'snapshot_id': member['share_group_snapshot_id'], 'share_id': member['share_id'], 'share_instance_id': member['share']['id'], 'id': member['id'], 'share': member['share'], 'share_name': member['share_name'], 'name': member['name'], 'size': member['share']['size'], 'share_size': member['share']['size'], 'share_proto': member['share']['share_proto'], 'provider_location': None}, share_server=None) for member in (fake_snap_member_1, fake_snap_member_2) ]) self.assertIsNone(share_group_snapshot_update) self.assertEqual( [{'id': member['id'], 'foo_k': 'foo_v', 'bar_k': 'bar_v_%s' % member['id']} for member in (fake_snap_member_1, fake_snap_member_2)], member_update_list, ) def test_create_share_group_snapshot_failed_snapshot(self): fake_snap_member_1 = { 'id': '6813e06b-a8f5-4784-b17d-f3e91afa370e', 'share_id': 'a3ebdba5-b4e1-46c8-a0ea-a9ac8daf5296', 'share_group_snapshot_id': 'fake_share_group_snapshot_id', 'share_instance_id': 'fake_share_instance_id_1', 'provider_location': 'should_not_be_used_1', 'share_name': 'share_fake_share_instance_id_1', 'name': 'share-snapshot-6813e06b-a8f5-4784-b17d-f3e91afa370e', 'share': { 'id': '420f978b-dbf6-4b3c-92fe-f5b17a0bb5e2', 'size': 3, 'share_proto': 'fake_share_proto', }, } fake_snap_member_2 = { 'id': '1e010dfe-545b-432d-ab95-4ef03cd82f89', 'share_id': 'a3ebdba5-b4e1-46c8-a0ea-a9ac8daf5296', 'share_group_snapshot_id': 'fake_share_group_snapshot_id', 'share_instance_id': 'fake_share_instance_id_2', 'provider_location': 'should_not_be_used_2', 'share_name': 'share_fake_share_instance_id_2', 'name': 'share-snapshot-1e010dfe-545b-432d-ab95-4ef03cd82f89', 'share': { 'id': '420f978b-dbf6-4b3c-92fe-f5b17a0bb5e2', 'size': '2', 'share_proto': 'fake_share_proto', }, } fake_snap_dict = { 'status': 'available', 'project_id': '13c0be6290934bd98596cfa004650049', 'user_id': 'a0314a441ca842019b0952224aa39192', 'description': None, 'deleted': '0', 'share_group_id': '4b04fdc3-00b9-4909-ba1a-06e9b3f88b67', 'share_group_snapshot_members': [ fake_snap_member_1, fake_snap_member_2], 'deleted_at': None, 'id': 'f6aa3b59-57eb-421e-965c-4e182538e36a', 'name': None } expected_exception = exception.ManilaException share_driver = self._instantiate_share_driver(None, False) share_driver._stats['snapshot_support'] = True mock_create_snap = self.mock_object( share_driver, 'create_snapshot', mock.Mock(side_effect=[None, expected_exception])) mock_delete_snap = self.mock_object(share_driver, 'delete_snapshot') self.assertRaises( expected_exception, share_driver.create_share_group_snapshot, 'fake_context', fake_snap_dict) fake_snap_member_1_expected = { 'snapshot_id': fake_snap_member_1['share_group_snapshot_id'], 'share_id': fake_snap_member_1['share_id'], 'share_instance_id': fake_snap_member_1['share']['id'], 'id': fake_snap_member_1['id'], 'share': fake_snap_member_1['share'], 'share_name': fake_snap_member_1['share_name'], 'name': fake_snap_member_1['name'], 'size': fake_snap_member_1['share']['size'], 'share_size': fake_snap_member_1['share']['size'], 'share_proto': fake_snap_member_1['share']['share_proto'], 'provider_location': None, } mock_create_snap.assert_has_calls([ mock.call( 'fake_context', {'snapshot_id': member['share_group_snapshot_id'], 'share_id': member['share_id'], 'share_instance_id': member['share']['id'], 'id': member['id'], 'share': member['share'], 'share_name': member['share_name'], 'name': member['name'], 'size': member['share']['size'], 'share_size': member['share']['size'], 'share_proto': member['share']['share_proto'], 'provider_location': None}, share_server=None) for member in (fake_snap_member_1, fake_snap_member_2) ]) mock_delete_snap.assert_called_with( 'fake_context', fake_snap_member_1_expected, share_server=None) def test_create_share_group_snapshot_no_support(self): fake_snap_dict = { 'status': 'available', 'project_id': '13c0be6290934bd98596cfa004650049', 'user_id': 'a0314a441ca842019b0952224aa39192', 'description': None, 'deleted': '0', 'share_group_id': '4b04fdc3-00b9-4909-ba1a-06e9b3f88b67', 'share_group_snapshot_members': [ { 'status': 'available', 'share_type_id': '1a9ed31e-ee70-483d-93ba-89690e028d7f', 'user_id': 'a0314a441ca842019b0952224aa39192', 'deleted': 'False', 'share_proto': 'NFS', 'project_id': '13c0be6290934bd98596cfa004650049', 'share_group_snapshot_id': 'f6aa3b59-57eb-421e-965c-4e182538e36a', 'deleted_at': None, 'id': '6813e06b-a8f5-4784-b17d-f3e91afa370e', 'size': 1 }, ], 'deleted_at': None, 'id': 'f6aa3b59-57eb-421e-965c-4e182538e36a', 'name': None } share_driver = self._instantiate_share_driver(None, False) share_driver._stats['snapshot_support'] = False self.assertRaises( exception.ShareGroupSnapshotNotSupported, share_driver.create_share_group_snapshot, 'fake_context', fake_snap_dict) def test_create_share_group_snapshot_no_members(self): fake_snap_dict = { 'status': 'available', 'project_id': '13c0be6290934bd98596cfa004650049', 'user_id': 'a0314a441ca842019b0952224aa39192', 'description': None, 'deleted': '0', 'share_group_id': '4b04fdc3-00b9-4909-ba1a-06e9b3f88b67', 'share_group_snapshot_members': [], 'deleted_at': None, 'id': 'f6aa3b59-57eb-421e-965c-4e182538e36a', 'name': None } share_driver = self._instantiate_share_driver(None, False) share_driver._stats['snapshot_support'] = True share_group_snapshot_update, member_update_list = ( share_driver.create_share_group_snapshot( 'fake_context', fake_snap_dict)) self.assertIsNone(share_group_snapshot_update) self.assertIsNone(member_update_list) def test_delete_share_group_snapshot(self): fake_snap_member_1 = { 'id': '6813e06b-a8f5-4784-b17d-f3e91afa370e', 'share_id': 'a3ebdba5-b4e1-46c8-a0ea-a9ac8daf5296', 'share_group_snapshot_id': 'fake_share_group_snapshot_id', 'share_instance_id': 'fake_share_instance_id_1', 'provider_location': 'fake_provider_location_2', 'share_name': 'share_fake_share_instance_id_1', 'name': 'share-snapshot-6813e06b-a8f5-4784-b17d-f3e91afa370e', 'share': { 'id': '420f978b-dbf6-4b3c-92fe-f5b17a0bb5e2', 'size': 3, 'share_proto': 'fake_share_proto', }, } fake_snap_member_2 = { 'id': '1e010dfe-545b-432d-ab95-4ef03cd82f89', 'share_id': 'a3ebdba5-b4e1-46c8-a0ea-a9ac8daf5296', 'share_group_snapshot_id': 'fake_share_group_snapshot_id', 'share_instance_id': 'fake_share_instance_id_2', 'provider_location': 'fake_provider_location_2', 'share_name': 'share_fake_provider_location_2', 'name': 'share-snapshot-1e010dfe-545b-432d-ab95-4ef03cd82f89', 'share': { 'id': '420f978b-dbf6-4b3c-92fe-f5b17a0bb5e2', 'size': '2', 'share_proto': 'fake_share_proto', }, } fake_snap_dict = { 'status': 'available', 'project_id': '13c0be6290934bd98596cfa004650049', 'user_id': 'a0314a441ca842019b0952224aa39192', 'description': None, 'deleted': '0', 'share_group_id': '4b04fdc3-00b9-4909-ba1a-06e9b3f88b67', 'share_group_snapshot_members': [ fake_snap_member_1, fake_snap_member_2], 'deleted_at': None, 'id': 'f6aa3b59-57eb-421e-965c-4e182538e36a', 'name': None } share_driver = self._instantiate_share_driver(None, False) share_driver._stats['share_group_snapshot_support'] = True mock_delete_snap = self.mock_object(share_driver, 'delete_snapshot') share_group_snapshot_update, member_update_list = ( share_driver.delete_share_group_snapshot( 'fake_context', fake_snap_dict)) mock_delete_snap.assert_has_calls([ mock.call( 'fake_context', {'snapshot_id': member['share_group_snapshot_id'], 'share_id': member['share_id'], 'share_instance_id': member['share']['id'], 'id': member['id'], 'share': member['share'], 'size': member['share']['size'], 'share_size': member['share']['size'], 'share_name': member['share_name'], 'name': member['name'], 'share_proto': member['share']['share_proto'], 'provider_location': member['provider_location']}, share_server=None) for member in (fake_snap_member_1, fake_snap_member_2) ]) self.assertIsNone(share_group_snapshot_update) self.assertIsNone(member_update_list) def test_snapshot_update_access(self): share_driver = self._instantiate_share_driver(None, False) self.assertRaises(NotImplementedError, share_driver.snapshot_update_access, 'fake_context', 'fake_snapshot', ['r1', 'r2'], [], []) @ddt.data({'user_networks': set([4]), 'conf': [4], 'expected': {'ipv4': True, 'ipv6': False}}, {'user_networks': set([6]), 'conf': [4], 'expected': {'ipv4': False, 'ipv6': False}}, {'user_networks': set([4, 6]), 'conf': [4], 'expected': {'ipv4': True, 'ipv6': False}}, {'user_networks': set([4]), 'conf': [6], 'expected': {'ipv4': False, 'ipv6': False}}, {'user_networks': set([6]), 'conf': [6], 'expected': {'ipv4': False, 'ipv6': True}}, {'user_networks': set([4, 6]), 'conf': [6], 'expected': {'ipv4': False, 'ipv6': True}}, {'user_networks': set([4]), 'conf': [4, 6], 'expected': {'ipv4': True, 'ipv6': False}}, {'user_networks': set([6]), 'conf': [4, 6], 'expected': {'ipv4': False, 'ipv6': True}}, {'user_networks': set([4, 6]), 'conf': [4, 6], 'expected': {'ipv4': True, 'ipv6': True}}, ) @ddt.unpack def test_add_ip_version_capability_if_dhss_true(self, user_networks, conf, expected): share_driver = self._instantiate_share_driver(None, True) self.mock_object(share_driver, 'get_configured_ip_versions', mock.Mock(return_value=conf)) versions = mock.PropertyMock(return_value=user_networks) type(share_driver.network_api).enabled_ip_versions = versions data = {'share_backend_name': 'fake_backend'} result = share_driver.add_ip_version_capability(data) self.assertIsNotNone(result['ipv4_support']) self.assertEqual(expected['ipv4'], result['ipv4_support']) self.assertIsNotNone(result['ipv6_support']) self.assertEqual(expected['ipv6'], result['ipv6_support']) @ddt.data({'conf': [4], 'expected': {'ipv4': True, 'ipv6': False}}, {'conf': [6], 'expected': {'ipv4': False, 'ipv6': True}}, {'conf': [4, 6], 'expected': {'ipv4': True, 'ipv6': True}}, ) @ddt.unpack def test_add_ip_version_capability_if_dhss_false(self, conf, expected): share_driver = self._instantiate_share_driver(None, False) self.mock_object(share_driver, 'get_configured_ip_versions', mock.Mock(return_value=conf)) data = {'share_backend_name': 'fake_backend'} result = share_driver.add_ip_version_capability(data) self.assertIsNotNone(result['ipv4_support']) self.assertEqual(expected['ipv4'], result['ipv4_support']) self.assertIsNotNone(result['ipv6_support']) self.assertEqual(expected['ipv6'], result['ipv6_support']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/test_drivers_private_data.py0000664000175000017500000001324300000000000024400 0ustar00zuulzuul00000000000000# Copyright 2015 Mirantis inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import ddt from oslo_utils import uuidutils from manila.share import drivers_private_data as pd from manila import test @ddt.ddt class DriverPrivateDataTestCase(test.TestCase): """Tests DriverPrivateData.""" def setUp(self): super(DriverPrivateDataTestCase, self).setUp() self.fake_storage = mock.Mock() self.entity_id = uuidutils.generate_uuid() def test_default_storage_driver(self): private_data = pd.DriverPrivateData( storage=None, context="fake", backend_host="fake") self.assertIsInstance(private_data._storage, pd.SqlStorageDriver) def test_custom_storage_driver(self): private_data = pd.DriverPrivateData(storage=self.fake_storage) self.assertEqual(self.fake_storage, private_data._storage) def test_invalid_parameters(self): self.assertRaises(ValueError, pd.DriverPrivateData) @ddt.data({'context': 'fake'}, {'backend_host': 'fake'}) def test_invalid_single_parameter(self, test_args): self.assertRaises(ValueError, pd.DriverPrivateData, **test_args) @ddt.data("111", ["fake"], None) def test_validate_entity_id_invalid(self, entity_id): data = pd.DriverPrivateData(storage="fake") self.assertRaises(ValueError, data._validate_entity_id, entity_id) def test_validate_entity_id_valid(self): actual_result = ( pd.DriverPrivateData._validate_entity_id(self.entity_id) ) self.assertIsNone(actual_result) def test_update(self): data = pd.DriverPrivateData(storage=self.fake_storage) details = {"foo": "bar"} self.mock_object(self.fake_storage, 'update', mock.Mock(return_value=True)) actual_result = data.update( self.entity_id, details, delete_existing=True ) self.assertTrue(actual_result) self.fake_storage.update.assert_called_once_with( self.entity_id, details, True ) def test_update_invalid(self): data = pd.DriverPrivateData(storage=self.fake_storage) details = ["invalid"] self.mock_object(self.fake_storage, 'update', mock.Mock(return_value=True)) self.assertRaises( ValueError, data.update, self.entity_id, details) self.assertFalse(self.fake_storage.update.called) def test_get(self): data = pd.DriverPrivateData(storage=self.fake_storage) key = "fake_key" value = "fake_value" default_value = "def" self.mock_object(self.fake_storage, 'get', mock.Mock(return_value=value)) actual_result = data.get(self.entity_id, key, default_value) self.assertEqual(value, actual_result) self.fake_storage.get.assert_called_once_with( self.entity_id, key, default_value ) def test_delete(self): data = pd.DriverPrivateData(storage=self.fake_storage) key = "fake_key" self.mock_object(self.fake_storage, 'get', mock.Mock(return_value=True)) actual_result = data.delete(self.entity_id, key) self.assertTrue(actual_result) self.fake_storage.delete.assert_called_once_with( self.entity_id, key ) fake_storage_data = { "entity_id": "fake_id", "details": {"foo": "bar"}, "context": "fake_context", "backend_host": "fake_host", "default": "def", "delete_existing": True, "key": "fake_key", } def create_arg_list(key_names): return [fake_storage_data[key] for key in key_names] def create_arg_dict(key_names): return {key: fake_storage_data[key] for key in key_names} @ddt.ddt class SqlStorageDriverTestCase(test.TestCase): @ddt.data( { "method_name": 'update', "method_kwargs": create_arg_dict( ["entity_id", "details", "delete_existing"]), "valid_args": create_arg_list( ["context", "entity_id", "details", "delete_existing"] ) }, { "method_name": 'get', "method_kwargs": create_arg_dict(["entity_id", "key", "default"]), "valid_args": create_arg_list( ["context", "entity_id", "key", "default"]), }, { "method_name": 'delete', "method_kwargs": create_arg_dict(["entity_id", "key"]), "valid_args": create_arg_list( ["context", "entity_id", "key"]), }) @ddt.unpack def test_methods(self, method_kwargs, method_name, valid_args): method = method_name db_method = 'driver_private_data_' + method_name with mock.patch('manila.db.api.' + db_method) as db_method: storage_driver = pd.SqlStorageDriver( context=fake_storage_data['context'], backend_host=fake_storage_data['backend_host']) method = getattr(storage_driver, method) method(**method_kwargs) db_method.assert_called_once_with(*valid_args) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/test_hook.py0000664000175000017500000003072700000000000021145 0ustar00zuulzuul00000000000000# Copyright 2015 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import ddt from manila import context from manila.share import hook from manila import test class FakeHookImplementation(hook.HookBase): def _execute_pre_hook(self, context, func_name, *args, **kwargs): """Fake implementation of a pre hook action.""" def _execute_post_hook(self, context, func_name, pre_hook_data, driver_action_results, *args, **kwargs): """Fake implementation of a post hook action.""" def _execute_periodic_hook(self, context, periodic_hook_data, *args, **kwargs): """Fake implementation of a periodic hook action.""" @ddt.ddt class HookBaseTestCase(test.TestCase): def setUp(self): super(HookBaseTestCase, self).setUp() self.context = context.get_admin_context() self.default_config = { "enable_pre_hooks": True, "enable_post_hooks": True, "enable_periodic_hooks": True, "suppress_pre_hooks_errors": True, "suppress_post_hooks_errors": True, } for k, v in self.default_config.items(): hook.CONF.set_default(k, v) def _fake_safe_get(self, key): return self.default_config.get(key) def _get_hook_instance(self, set_configuration=True, host="fake_host"): if set_configuration: configuration = mock.Mock() configuration.safe_get.side_effect = self._fake_safe_get else: configuration = None instance = FakeHookImplementation( configuration=configuration, host=host) return instance def test_instantiate_hook_fail(self): self.assertRaises(TypeError, hook.HookBase) @ddt.data(True, False) def test_instantiate_hook_successfully_and_set_configuration( self, set_configuration): instance = self._get_hook_instance(set_configuration) self.assertTrue(hasattr(instance, 'host')) self.assertEqual("fake_host", instance.host) self.assertTrue(hasattr(instance, 'configuration')) if not set_configuration: self.assertIsNone(instance.configuration) for attr_name in ("pre_hooks_enabled", "post_hooks_enabled", "periodic_hooks_enabled", "suppress_pre_hooks_errors", "suppress_post_hooks_errors"): self.assertTrue(hasattr(instance, attr_name)) if set_configuration: instance.configuration.append_config_values.assert_has_calls([ mock.call(hook.hook_options)]) conf_func = self._fake_safe_get else: conf_func = self.default_config.get self.assertEqual( conf_func("enable_pre_hooks"), instance.pre_hooks_enabled) self.assertEqual( conf_func("enable_post_hooks"), instance.post_hooks_enabled) self.assertEqual( conf_func("enable_periodic_hooks"), instance.periodic_hooks_enabled) self.assertEqual( conf_func("suppress_pre_hooks_errors"), instance.suppress_pre_hooks_errors) self.assertEqual( conf_func("suppress_post_hooks_errors"), instance.suppress_post_hooks_errors) def test_execute_pre_hook_disabled(self): instance = self._get_hook_instance() instance.pre_hooks_enabled = False self.mock_object( instance, "_execute_pre_hook", mock.Mock(side_effect=Exception("I should not be raised."))) result = instance.execute_pre_hook( self.context, "fake_func_name", "some_arg", some_kwarg="foo") self.assertIsNone(result) @ddt.data(True, False) def test_execute_pre_hook_success(self, provide_context): instance = self._get_hook_instance() instance.pre_hooks_enabled = True instance.suppress_pre_hooks_errors = True expected = "fake_expected_result" some_arg = "some_arg" func_name = "fake_func_name" self.mock_object(hook.LOG, 'error') self.mock_object( instance, "_execute_pre_hook", mock.Mock(return_value=expected)) mock_ctxt = self.mock_object(context, 'get_admin_context') ctxt = self.context if provide_context else mock_ctxt result = instance.execute_pre_hook( ctxt, func_name, some_arg, some_kwarg="foo") self.assertEqual(expected, result) instance._execute_pre_hook.assert_called_once_with( some_arg, context=self.context if provide_context else mock_ctxt, func_name=func_name, some_kwarg="foo") self.assertFalse(hook.LOG.error.called) def test_execute_pre_hook_exception_with_suppression(self): instance = self._get_hook_instance() instance.pre_hooks_enabled = True instance.suppress_pre_hooks_errors = True some_arg = "some_arg" func_name = "fake_func_name" FakeException = type("FakeException", (Exception, ), {}) self.mock_object(hook.LOG, 'warning') self.mock_object( instance, "_execute_pre_hook", mock.Mock(side_effect=( FakeException("Some exception that should be suppressed.")))) result = instance.execute_pre_hook( self.context, func_name, some_arg, some_kwarg="foo") self.assertIsInstance(result, FakeException) instance._execute_pre_hook.assert_called_once_with( some_arg, context=self.context, func_name=func_name, some_kwarg="foo") self.assertTrue(hook.LOG.warning.called) def test_execute_pre_hook_exception_without_suppression(self): instance = self._get_hook_instance() instance.pre_hooks_enabled = True instance.suppress_pre_hooks_errors = False some_arg = "some_arg" func_name = "fake_func_name" FakeException = type("FakeException", (Exception, ), {}) self.mock_object(hook.LOG, 'warning') self.mock_object( instance, "_execute_pre_hook", mock.Mock(side_effect=( FakeException( "Some exception that should NOT be suppressed.")))) self.assertRaises( FakeException, instance.execute_pre_hook, self.context, func_name, some_arg, some_kwarg="foo") instance._execute_pre_hook.assert_called_once_with( some_arg, context=self.context, func_name=func_name, some_kwarg="foo") self.assertFalse(hook.LOG.warning.called) def test_execute_post_hook_disabled(self): instance = self._get_hook_instance() instance.post_hooks_enabled = False self.mock_object( instance, "_execute_post_hook", mock.Mock(side_effect=Exception("I should not be raised."))) result = instance.execute_post_hook( self.context, "fake_func_name", "some_pre_hook_data", "some_driver_action_results", "some_arg", some_kwarg="foo") self.assertIsNone(result) @ddt.data(True, False) def test_execute_post_hook_success(self, provide_context): instance = self._get_hook_instance() instance.post_hooks_enabled = True instance.suppress_post_hooks_errors = True expected = "fake_expected_result" some_arg = "some_arg" func_name = "fake_func_name" pre_hook_data = "some_pre_hook_data" driver_action_results = "some_driver_action_results" self.mock_object(hook.LOG, 'warning') self.mock_object( instance, "_execute_post_hook", mock.Mock(return_value=expected)) mock_ctxt = self.mock_object(context, 'get_admin_context') ctxt = self.context if provide_context else mock_ctxt result = instance.execute_post_hook( ctxt, func_name, pre_hook_data, driver_action_results, some_arg, some_kwarg="foo") self.assertEqual(expected, result) instance._execute_post_hook.assert_called_once_with( some_arg, context=self.context if provide_context else mock_ctxt, func_name=func_name, pre_hook_data=pre_hook_data, driver_action_results=driver_action_results, some_kwarg="foo") self.assertFalse(hook.LOG.warning.called) def test_execute_post_hook_exception_with_suppression(self): instance = self._get_hook_instance() instance.post_hooks_enabled = True instance.suppress_post_hooks_errors = True some_arg = "some_arg" func_name = "fake_func_name" pre_hook_data = "some_pre_hook_data" driver_action_results = "some_driver_action_results" FakeException = type("FakeException", (Exception, ), {}) self.mock_object(hook.LOG, 'warning') self.mock_object( instance, "_execute_post_hook", mock.Mock(side_effect=( FakeException("Some exception that should be suppressed.")))) result = instance.execute_post_hook( self.context, func_name, pre_hook_data, driver_action_results, some_arg, some_kwarg="foo") self.assertIsInstance(result, FakeException) instance._execute_post_hook.assert_called_once_with( some_arg, context=self.context, func_name=func_name, pre_hook_data=pre_hook_data, driver_action_results=driver_action_results, some_kwarg="foo") self.assertTrue(hook.LOG.warning.called) def test_execute_post_hook_exception_without_suppression(self): instance = self._get_hook_instance() instance.post_hooks_enabled = True instance.suppress_post_hooks_errors = False some_arg = "some_arg" func_name = "fake_func_name" pre_hook_data = "some_pre_hook_data" driver_action_results = "some_driver_action_results" FakeException = type("FakeException", (Exception, ), {}) self.mock_object(hook.LOG, 'error') self.mock_object( instance, "_execute_post_hook", mock.Mock(side_effect=( FakeException( "Some exception that should NOT be suppressed.")))) self.assertRaises( FakeException, instance.execute_post_hook, self.context, func_name, pre_hook_data, driver_action_results, some_arg, some_kwarg="foo") instance._execute_post_hook.assert_called_once_with( some_arg, context=self.context, func_name=func_name, pre_hook_data=pre_hook_data, driver_action_results=driver_action_results, some_kwarg="foo") self.assertFalse(hook.LOG.error.called) def test_execute_periodic_hook_disabled(self): instance = self._get_hook_instance() instance.periodic_hooks_enabled = False self.mock_object(instance, "_execute_periodic_hook") instance.execute_periodic_hook( self.context, "fake_periodic_hook_data", "some_arg", some_kwarg="foo") self.assertFalse(instance._execute_periodic_hook.called) @ddt.data(True, False) def test_execute_periodic_hook_enabled(self, provide_context): instance = self._get_hook_instance() instance.periodic_hooks_enabled = True expected = "some_expected_result" self.mock_object( instance, "_execute_periodic_hook", mock.Mock(return_value=expected)) mock_ctxt = self.mock_object(context, 'get_admin_context') ctxt = self.context if provide_context else mock_ctxt result = instance.execute_periodic_hook( ctxt, "fake_periodic_hook_data", "some_arg", some_kwarg="foo") instance._execute_periodic_hook.assert_called_once_with( ctxt, "fake_periodic_hook_data", "some_arg", some_kwarg="foo") self.assertEqual(expected, result) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/test_manager.py0000664000175000017500000203564300000000000021623 0ustar00zuulzuul00000000000000# Copyright 2014 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Test of Share Manager for Manila.""" import datetime import hashlib import json import random from unittest import mock import ddt from oslo_concurrency import lockutils from oslo_serialization import jsonutils from oslo_utils import importutils from oslo_utils import timeutils from manila.common import constants from manila import context from manila import coordination from manila.data import rpcapi as data_rpc from manila import db from manila.db.sqlalchemy import models from manila import exception from manila.keymgr import barbican as barbican_api from manila.message import message_field from manila import quota from manila.share import api from manila.share import drivers_private_data from manila.share import manager from manila.share import migration as migration_api from manila.share import rpcapi from manila.share import share_types from manila.share import utils as share_utils from manila import test from manila.tests.api import fakes as test_fakes from manila.tests import db_utils from manila.tests import fake_notifier from manila.tests import fake_share as fakes from manila.tests import fake_utils from manila.tests import utils as test_utils from manila.transfer import api as transfer_api from manila import utils def fake_replica(**kwargs): return fakes.fake_replica(for_manager=True, **kwargs) class CustomTimeSleepException(Exception): pass class LockedOperationsTestCase(test.TestCase): class FakeManager(object): @manager.locked_share_replica_operation def fake_replica_operation(self, context, replica, share_id=None): pass def setUp(self): super(LockedOperationsTestCase, self).setUp() self.manager = self.FakeManager() self.fake_context = test_fakes.FakeRequestContext self.lock_call = self.mock_object( coordination, 'synchronized', mock.Mock(return_value=lambda f: f)) @ddt.data({'id': 'FAKE_REPLICA_ID'}, 'FAKE_REPLICA_ID') @ddt.unpack def test_locked_share_replica_operation(self, **replica): self.manager.fake_replica_operation(self.fake_context, replica, share_id='FAKE_SHARE_ID') self.assertTrue(self.lock_call.called) @ddt.ddt class ShareManagerTestCase(test.TestCase): def setUp(self): super(ShareManagerTestCase, self).setUp() self.flags(share_driver='manila.tests.fake_driver.FakeShareDriver') # Define class directly, because this test suite dedicated # to specific manager. self.share_manager = importutils.import_object( "manila.share.manager.ShareManager") self.mock_object(self.share_manager.driver, 'do_setup') self.mock_object(self.share_manager.driver, 'check_for_setup_error') self.share_manager.driver._stats = { 'share_group_stats': {'consistent_snapshot_support': None}, } self.mock_object(self.share_manager.message_api, 'create') self.context = context.get_admin_context() self.share_manager.driver.initialized = True self.host = 'host' self.share_manager.host = 'fake_host' mock.patch.object( lockutils, 'lock', fake_utils.get_fake_lock_context()) self.synchronized_lock_decorator_call = self.mock_object( coordination, 'synchronized', mock.Mock(return_value=lambda f: f)) def test_share_manager_instance(self): fake_service_name = "fake_service" importutils_mock = mock.Mock() self.mock_object(importutils, "import_object", importutils_mock) private_data_mock = mock.Mock() self.mock_object(drivers_private_data, "DriverPrivateData", private_data_mock) self.mock_object(manager.ShareManager, '_init_hook_drivers') share_manager = manager.ShareManager(service_name=fake_service_name) private_data_mock.assert_called_once_with( context=mock.ANY, backend_host=share_manager.host, config_group=fake_service_name ) self.assertTrue(importutils_mock.called) self.assertTrue(manager.ShareManager._init_hook_drivers.called) def test__init_hook_drivers(self): fake_service_name = "fake_service" importutils_mock = mock.Mock() self.mock_object(importutils, "import_object", importutils_mock) self.mock_object(drivers_private_data, "DriverPrivateData") share_manager = manager.ShareManager(service_name=fake_service_name) share_manager.configuration.safe_get = mock.Mock( return_value=["Foo", "Bar"]) self.assertEqual(0, len(share_manager.hooks)) importutils_mock.reset() share_manager._init_hook_drivers() self.assertEqual( len(share_manager.configuration.safe_get.return_value), len(share_manager.hooks)) importutils_mock.assert_has_calls([ mock.call( hook, configuration=share_manager.configuration, host=share_manager.host ) for hook in share_manager.configuration.safe_get.return_value ], any_order=True) def test__execute_periodic_hook(self): share_instances_mock = mock.Mock() hook_data_mock = mock.Mock() self.mock_object( self.share_manager.db, "share_instance_get_all_by_host", share_instances_mock) self.mock_object( self.share_manager.driver, "get_periodic_hook_data", hook_data_mock) self.share_manager.hooks = [mock.Mock(return_value=i) for i in (0, 1)] self.share_manager._execute_periodic_hook(self.context) share_instances_mock.assert_called_once_with( context=self.context, host=self.share_manager.host) hook_data_mock.assert_called_once_with( context=self.context, share_instances=share_instances_mock.return_value) for mock_hook in self.share_manager.hooks: mock_hook.execute_periodic_hook.assert_called_once_with( context=self.context, periodic_hook_data=hook_data_mock.return_value) def test_is_service_ready(self): self.assertTrue(self.share_manager.is_service_ready()) # switch it to false and check again self.share_manager.driver.initialized = False self.assertFalse(self.share_manager.is_service_ready()) @ddt.data(True, False) def test_ensure_driver_resources_driver_needs_to_reapply_rules( self, driver_needs_to_reapply_rules): old_hash = {'info_hash': '1e5ff444cfdc4a154126ddebc0223ffeae2d10c9'} self.mock_object(self.share_manager.db, 'backend_info_get', mock.Mock(return_value=old_hash)) self.mock_object(self.share_manager.driver, 'get_backend_info', mock.Mock(return_value={'val': 'tigersgo'})) instances, rules = self._setup_init_mocks() fake_export_locations = ['fake/path/1', 'fake/path'] fake_update_instances = { instances[0]['id']: { 'export_locations': fake_export_locations, 'reapply_access_rules': driver_needs_to_reapply_rules, }, instances[2]['id']: { 'export_locations': fake_export_locations, 'reapply_access_rules': driver_needs_to_reapply_rules, }, } fake_service = {'id': 'fake_service_id', 'binary': 'manila-share'} self.mock_object(self.share_manager.db, 'service_get_by_args', mock.Mock(return_value=fake_service)) self.mock_object(self.share_manager.db, 'service_update') mock_backend_info_update = self.mock_object( self.share_manager.db, 'backend_info_update') mock_share_get_all_by_host = self.mock_object( self.share_manager.db, 'share_instance_get_all_by_host', mock.Mock(return_value=instances)) self.mock_object(self.share_manager.db, 'share_instance_get', mock.Mock(side_effect=[instances[0], instances[2], instances[4]])) self.mock_object(self.share_manager.db, 'export_locations_update') mock_ensure_shares = self.mock_object( self.share_manager.driver, 'ensure_shares', mock.Mock(return_value=fake_update_instances)) self.mock_object(self.share_manager, '_ensure_share_instance_has_pool') self.mock_object(self.share_manager, '_get_share_server', mock.Mock(return_value='share_server')) self.mock_object(self.share_manager, '_get_share_server_dict', mock.Mock(return_value='share_server')) mock_reset_rules_method = self.mock_object( self.share_manager.access_helper, 'reset_rules_to_queueing_states') mock_update_rules_method = self.mock_object( self.share_manager.access_helper, 'update_access_rules') dict_instances = [self._get_share_instance_dict( instance, share_server='share_server') for instance in instances] self.share_manager.ensure_driver_resources(self.context) exports_update = self.share_manager.db.export_locations_update mock_backend_info_update.assert_called_once_with( utils.IsAMatcher(context.RequestContext), self.share_manager.host, '77a1d6fc86295017d9908a4f657dc9e089b3de4b') mock_ensure_shares.assert_called_once_with( utils.IsAMatcher(context.RequestContext), [dict_instances[0], dict_instances[2], dict_instances[4]]) mock_share_get_all_by_host.assert_called_once_with( utils.IsAMatcher(context.RequestContext), self.share_manager.host) exports_update.assert_has_calls([ mock.call(mock.ANY, instances[0]['id'], fake_export_locations), mock.call(mock.ANY, instances[2]['id'], fake_export_locations), ]) self.share_manager._ensure_share_instance_has_pool.assert_has_calls([ mock.call(utils.IsAMatcher(context.RequestContext), instances[0]), mock.call(utils.IsAMatcher(context.RequestContext), instances[2]), ]) self.share_manager._get_share_server.assert_has_calls([ mock.call(utils.IsAMatcher(context.RequestContext), instances[0]), mock.call(utils.IsAMatcher(context.RequestContext), instances[2]), ]) self.share_manager.db.service_get_by_args.assert_called_once_with( utils.IsAMatcher(context.RequestContext), self.share_manager.host, 'manila-share' ) self.share_manager.db.service_update.assert_has_calls([ mock.call( utils.IsAMatcher(context.RequestContext), fake_service['id'], {'ensuring': True} ), mock.call( utils.IsAMatcher(context.RequestContext), fake_service['id'], {'ensuring': False} ) ]) if driver_needs_to_reapply_rules: # don't care if share_instance['access_rules_status'] is "syncing" mock_reset_rules_method.assert_has_calls([ mock.call(mock.ANY, instances[0]['id'], reset_active=driver_needs_to_reapply_rules), mock.call(mock.ANY, instances[2]['id'], reset_active=driver_needs_to_reapply_rules), ]) mock_update_rules_method.assert_has_calls([ mock.call(mock.ANY, instances[0]['id'], share_server='share_server'), mock.call(mock.ANY, instances[2]['id'], share_server='share_server'), ]) else: # none of the share instances in the fake data have syncing rules mock_reset_rules_method.assert_not_called() (self.share_manager.access_helper.update_access_rules .assert_not_called()) def test_ensure_driver_resources_share_metadata_updates(self): old_hash = {'info_hash': '1e5ff444cfdc4a154126ddebc0223ffeae2d10c9'} self.mock_object(self.share_manager.db, 'backend_info_get', mock.Mock(return_value=old_hash)) self.mock_object(self.share_manager.driver, 'get_backend_info', mock.Mock(return_value={'val': 'newval'})) instances, rules = self._setup_init_mocks() metadata_updates = {'update_meta_key': 'update_meta_val'} fake_update_instances = { instances[0]['id']: { 'metadata': metadata_updates, }, instances[2]['id']: { 'metadata': metadata_updates, }, } fake_service = {'id': 'fake_service_id', 'binary': 'manila-share'} self.mock_object(self.share_manager.db, 'service_get_by_args', mock.Mock(return_value=fake_service)) self.mock_object(self.share_manager.db, 'service_update') mock_backend_info_update = self.mock_object( self.share_manager.db, 'backend_info_update') mock_share_get_all_by_host = self.mock_object( self.share_manager.db, 'share_instance_get_all_by_host', mock.Mock(return_value=instances)) self.mock_object(self.share_manager.db, 'share_instance_get', mock.Mock(side_effect=[instances[0], instances[2], instances[4]])) self.mock_object(self.share_manager.db, 'share_metadata_update') mock_ensure_shares = self.mock_object( self.share_manager.driver, 'ensure_shares', mock.Mock(return_value=fake_update_instances)) self.mock_object(self.share_manager, '_ensure_share_instance_has_pool') self.mock_object(self.share_manager, '_get_share_server', mock.Mock(return_value='share_server')) self.mock_object(self.share_manager, '_get_share_server_dict', mock.Mock(return_value='share_server')) mock_reset_rules_method = self.mock_object( self.share_manager.access_helper, 'reset_rules_to_queueing_states') dict_instances = [self._get_share_instance_dict( instance, share_server='share_server') for instance in instances] self.share_manager.ensure_driver_resources(self.context) mock_backend_info_update.assert_called_once_with( utils.IsAMatcher(context.RequestContext), self.share_manager.host, '7026776d9aaf3563799b8bbe1a020ea8cbd4dd22') mock_ensure_shares.assert_called_once_with( utils.IsAMatcher(context.RequestContext), [dict_instances[0], dict_instances[2], dict_instances[4]]) mock_share_get_all_by_host.assert_called_once_with( utils.IsAMatcher(context.RequestContext), self.share_manager.host) self.share_manager._ensure_share_instance_has_pool.assert_has_calls([ mock.call(utils.IsAMatcher(context.RequestContext), instances[0]), mock.call(utils.IsAMatcher(context.RequestContext), instances[2]), ]) self.share_manager.db.share_metadata_update.assert_has_calls([ mock.call( self.context, instances[0]['share_id'], metadata_updates, False ), mock.call( self.context, instances[2]['share_id'], metadata_updates, False ), ]) self.share_manager._get_share_server.assert_has_calls([ mock.call(utils.IsAMatcher(context.RequestContext), instances[0]), mock.call(utils.IsAMatcher(context.RequestContext), instances[2]), ]) # none of the share instances in the fake data have syncing rules mock_reset_rules_method.assert_not_called() self.share_manager.db.service_get_by_args.assert_called_once_with( utils.IsAMatcher(context.RequestContext), self.share_manager.host, 'manila-share' ) self.share_manager.db.service_update.assert_has_calls([ mock.call( utils.IsAMatcher(context.RequestContext), fake_service['id'], {'ensuring': True} ), mock.call( utils.IsAMatcher(context.RequestContext), fake_service['id'], {'ensuring': False} ) ]) def test_init_host_with_no_shares(self): self.mock_object(self.share_manager.db, 'share_instance_get_all_by_host', mock.Mock(return_value=[])) self.share_manager.init_host() self.assertTrue(self.share_manager.driver.initialized) (self.share_manager.db.share_instance_get_all_by_host. assert_called_once_with(utils.IsAMatcher(context.RequestContext), self.share_manager.host)) self.share_manager.driver.do_setup.assert_called_once_with( utils.IsAMatcher(context.RequestContext)) (self.share_manager.driver.check_for_setup_error. assert_called_once_with()) @ddt.data( "connection_get_info", "migration_cancel", "migration_get_progress", "migration_complete", "migration_start", "create_share_instance", "manage_share", "unmanage_share", "delete_share_instance", "delete_free_share_servers", "delete_expired_share", "create_snapshot", "delete_snapshot", "update_access", "_report_driver_status", "_execute_periodic_hook", "publish_service_capabilities", "delete_share_server", "extend_share", "shrink_share", "create_share_group", "delete_share_group", "create_share_group_snapshot", "delete_share_group_snapshot", "create_share_replica", "delete_share_replica", "promote_share_replica", "periodic_share_replica_update", "update_share_replica", "create_replicated_snapshot", "delete_replicated_snapshot", "periodic_share_replica_snapshot_update", "do_deferred_share_deletion" ) def test_call_driver_when_its_init_failed(self, method_name): self.mock_object(self.share_manager.driver, 'do_setup', mock.Mock(side_effect=Exception())) # break the endless retry loop with mock.patch('tenacity.nap.sleep') as sleep: sleep.side_effect = CustomTimeSleepException() self.assertRaises(CustomTimeSleepException, self.share_manager.init_host) self.assertRaises( exception.DriverNotInitialized, getattr(self.share_manager, method_name), 'foo', 'bar', 'quuz' ) @ddt.data("do_setup", "check_for_setup_error") def test_init_host_with_driver_failure(self, method_name): self.mock_object(self.share_manager.driver, method_name, mock.Mock(side_effect=Exception())) self.mock_object(manager.LOG, 'exception') self.share_manager.driver.initialized = False with mock.patch('time.sleep') as mock_sleep: mock_sleep.side_effect = CustomTimeSleepException() self.assertRaises(CustomTimeSleepException, self.share_manager.init_host) manager.LOG.exception.assert_called_once_with( mock.ANY, "%(name)s@%(host)s" % {'name': self.share_manager.driver.__class__.__name__, 'host': self.share_manager.host}) self.assertFalse(self.share_manager.driver.initialized) def _setup_init_mocks(self, setup_access_rules=True): share_type = db_utils.create_share_type() instances = [ db_utils.create_share(id='fake_id_1', share_type_id=share_type['id'], status=constants.STATUS_AVAILABLE, display_name='fake_name_1').instance, db_utils.create_share(id='fake_id_2', share_type_id=share_type['id'], status=constants.STATUS_ERROR, display_name='fake_name_2').instance, db_utils.create_share(id='fake_id_3', share_type_id=share_type['id'], status=constants.STATUS_AVAILABLE, display_name='fake_name_3').instance, db_utils.create_share( id='fake_id_4', share_type_id=share_type['id'], status=constants.STATUS_MIGRATING, task_state=constants.TASK_STATE_MIGRATION_IN_PROGRESS, display_name='fake_name_4').instance, db_utils.create_share(id='fake_id_5', share_type_id=share_type['id'], status=constants.STATUS_AVAILABLE, display_name='fake_name_5').instance, db_utils.create_share( id='fake_id_6', share_type_id=share_type['id'], status=constants.STATUS_MIGRATING, task_state=constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS, display_name='fake_name_6').instance, db_utils.create_share( id='fake_id_7', share_type_id=share_type['id'], status=constants.STATUS_CREATING_FROM_SNAPSHOT, display_name='fake_name_7').instance, ] instances[4]['access_rules_status'] = ( constants.SHARE_INSTANCE_RULES_SYNCING) if not setup_access_rules: return instances rules = [ db_utils.create_access(share_id='fake_id_1'), db_utils.create_access(share_id='fake_id_3'), ] return instances, rules @ddt.data(("some_hash", {"db_version": "test_version"}), ("ddd86ec90923b686597501e2f2431f3af59238c0", {"db_version": "test_version"}), (None, {"db_version": "test_version"}), (None, None)) @ddt.unpack def test_init_host_with_shares_and_rules( self, old_backend_info_hash, new_backend_info): # initialization of test data def raise_share_access_exists(*args, **kwargs): raise exception.ShareAccessExists( access_type='fake_access_type', access='fake_access') new_backend_info_hash = (hashlib.sha1(str( sorted(new_backend_info.items())).encode('utf-8')).hexdigest() if new_backend_info else None) old_backend_info = {'info_hash': old_backend_info_hash} share_server = fakes.fake_share_server_get() instances, rules = self._setup_init_mocks() fake_export_locations = ['fake/path/1', 'fake/path'] fake_update_instances = { instances[0]['id']: {'export_locations': fake_export_locations}, instances[2]['id']: {'export_locations': fake_export_locations} } instances[0]['access_rules_status'] = '' instances[2]['access_rules_status'] = '' fake_service = {'id': 'fake_service_id', 'binary': 'manila-share'} self.mock_object(self.share_manager.db, 'backend_info_get', mock.Mock(return_value=old_backend_info)) mock_backend_info_update = self.mock_object( self.share_manager.db, 'backend_info_update') self.mock_object(self.share_manager.driver, 'get_backend_info', mock.Mock(return_value=new_backend_info)) mock_share_get_all_by_host = self.mock_object( self.share_manager.db, 'share_instance_get_all_by_host', mock.Mock(return_value=instances)) self.mock_object(self.share_manager.db, 'service_get_by_args', mock.Mock(return_value=fake_service)) self.mock_object(self.share_manager.db, 'service_update') self.mock_object(self.share_manager.db, 'share_instance_get', mock.Mock(side_effect=[instances[0], instances[2], instances[4]])) self.mock_object(self.share_manager.db, 'export_locations_update') mock_ensure_shares = self.mock_object( self.share_manager.driver, 'ensure_shares', mock.Mock(return_value=fake_update_instances)) self.mock_object(self.share_manager, '_ensure_share_instance_has_pool') self.mock_object(self.share_manager, '_get_share_server', mock.Mock(return_value=share_server)) self.mock_object(self.share_manager, '_get_share_server_dict', mock.Mock(return_value=share_server)) self.mock_object(self.share_manager, 'publish_service_capabilities', mock.Mock()) self.mock_object(self.share_manager.access_helper, 'reset_rules_to_queueing_states') self.mock_object( self.share_manager.access_helper, 'update_access_rules', mock.Mock(side_effect=raise_share_access_exists) ) dict_instances = [self._get_share_instance_dict( instance, share_server=share_server) for instance in instances] # call of 'init_host' method self.share_manager.init_host() # verification of call exports_update = self.share_manager.db.export_locations_update self.share_manager.driver.do_setup.assert_called_once_with( utils.IsAMatcher(context.RequestContext)) (self.share_manager.driver.check_for_setup_error. assert_called_once_with()) if new_backend_info_hash == old_backend_info_hash: mock_backend_info_update.assert_not_called() mock_ensure_shares.assert_not_called() mock_share_get_all_by_host.assert_not_called() else: mock_backend_info_update.assert_called_once_with( utils.IsAMatcher(context.RequestContext), self.share_manager.host, new_backend_info_hash) self.share_manager.driver.ensure_shares.assert_called_once_with( utils.IsAMatcher(context.RequestContext), [dict_instances[0], dict_instances[2], dict_instances[4]]) mock_share_get_all_by_host.assert_called_once_with( utils.IsAMatcher(context.RequestContext), self.share_manager.host) exports_update.assert_has_calls([ mock.call(mock.ANY, instances[0]['id'], fake_export_locations), mock.call(mock.ANY, instances[2]['id'], fake_export_locations) ]) (self.share_manager._ensure_share_instance_has_pool. assert_has_calls([ mock.call(utils.IsAMatcher(context.RequestContext), instances[0]), mock.call(utils.IsAMatcher(context.RequestContext), instances[2]), ])) self.share_manager._get_share_server.assert_has_calls([ mock.call(utils.IsAMatcher(context.RequestContext), instances[0]), mock.call(utils.IsAMatcher(context.RequestContext), instances[2]), ]) (self.share_manager.publish_service_capabilities. assert_called_once_with( utils.IsAMatcher(context.RequestContext))) (self.share_manager.access_helper.update_access_rules. assert_has_calls([ mock.call(mock.ANY, instances[0]['id'], share_server=share_server), mock.call(mock.ANY, instances[2]['id'], share_server=share_server), ])) self.share_manager.db.service_get_by_args.assert_called_once_with( utils.IsAMatcher(context.RequestContext), self.share_manager.host, 'manila-share' ) self.share_manager.db.service_update.assert_has_calls([ mock.call( utils.IsAMatcher(context.RequestContext), fake_service['id'], {'ensuring': True} ), mock.call( utils.IsAMatcher(context.RequestContext), fake_service['id'], {'ensuring': False} ) ]) @ddt.data(("some_hash", {"db_version": "test_version"}), ("ddd86ec90923b686597501e2f2431f3af59238c0", {"db_version": "test_version"}), (None, {"db_version": "test_version"}), (None, None)) @ddt.unpack def test_init_host_without_shares_and_rules( self, old_backend_info_hash, new_backend_info): old_backend_info = {'info_hash': old_backend_info_hash} new_backend_info_hash = (hashlib.sha1(str( sorted(new_backend_info.items())).encode('utf-8')).hexdigest() if new_backend_info else None) mock_backend_info_update = self.mock_object( self.share_manager.db, 'backend_info_update') fake_service = {'id': 'fake_service_id', 'binary': 'manila-share'} self.mock_object( self.share_manager.db, 'backend_info_get', mock.Mock(return_value=old_backend_info)) self.mock_object(self.share_manager.driver, 'get_backend_info', mock.Mock(return_value=new_backend_info)) self.mock_object(self.share_manager, 'publish_service_capabilities', mock.Mock()) self.mock_object(self.share_manager.db, 'service_get_by_args', mock.Mock(return_value=fake_service)) self.mock_object(self.share_manager.db, 'service_update') mock_ensure_shares = self.mock_object( self.share_manager.driver, 'ensure_shares') mock_share_instance_get_all_by_host = self.mock_object( self.share_manager.db, 'share_instance_get_all_by_host', mock.Mock(return_value=[])) # call of 'init_host' method self.share_manager.init_host() if new_backend_info_hash == old_backend_info_hash: mock_backend_info_update.assert_not_called() mock_ensure_shares.assert_not_called() mock_share_instance_get_all_by_host.assert_not_called() else: mock_backend_info_update.assert_called_once_with( utils.IsAMatcher(context.RequestContext), self.share_manager.host, new_backend_info_hash) self.share_manager.driver.do_setup.assert_called_once_with( utils.IsAMatcher(context.RequestContext)) self.share_manager.db.backend_info_get.assert_called_once_with( utils.IsAMatcher(context.RequestContext), self.share_manager.host) self.share_manager.driver.get_backend_info.assert_called_once_with( utils.IsAMatcher(context.RequestContext)) mock_ensure_shares.assert_not_called() mock_share_instance_get_all_by_host.assert_called_once_with( utils.IsAMatcher(context.RequestContext), self.share_manager.host) @ddt.data(exception.ManilaException, ['fake/path/1', 'fake/path']) def test_init_host_with_ensure_share(self, expected_ensure_share_result): def raise_NotImplementedError(*args, **kwargs): raise NotImplementedError instances = self._setup_init_mocks(setup_access_rules=False) share_server = fakes.fake_share_server_get() fake_service = {'id': 'fake_service_id', 'binary': 'manila-share'} self.mock_object(self.share_manager.db, 'share_instance_get_all_by_host', mock.Mock(return_value=instances)) self.mock_object(self.share_manager.db, 'share_instance_get', mock.Mock(side_effect=[instances[0], instances[2], instances[3]])) self.mock_object( self.share_manager.driver, 'ensure_shares', mock.Mock(side_effect=raise_NotImplementedError)) self.mock_object(self.share_manager.driver, 'ensure_share', mock.Mock(side_effect=expected_ensure_share_result)) self.mock_object( self.share_manager, '_ensure_share_instance_has_pool') self.mock_object(self.share_manager, '_get_share_server', mock.Mock(return_value=share_server)) self.mock_object(self.share_manager, '_get_share_server_dict', mock.Mock(return_value=share_server)) self.mock_object(self.share_manager, 'publish_service_capabilities') self.mock_object(self.share_manager.db, 'service_get_by_args', mock.Mock(return_value=fake_service)) self.mock_object(self.share_manager.db, 'service_update') self.mock_object(manager.LOG, 'error') self.mock_object(manager.LOG, 'info') dict_instances = [self._get_share_instance_dict( instance, share_server=share_server) for instance in instances] # call of 'init_host' method self.share_manager.init_host() # verification of call (self.share_manager.db.share_instance_get_all_by_host. assert_called_once_with(utils.IsAMatcher(context.RequestContext), self.share_manager.host)) self.share_manager.driver.do_setup.assert_called_once_with( utils.IsAMatcher(context.RequestContext)) self.share_manager.driver.check_for_setup_error.assert_called_with() self.share_manager._ensure_share_instance_has_pool.assert_has_calls([ mock.call(utils.IsAMatcher(context.RequestContext), instances[0]), mock.call(utils.IsAMatcher(context.RequestContext), instances[2]), ]) self.share_manager.driver.ensure_shares.assert_called_once_with( utils.IsAMatcher(context.RequestContext), [dict_instances[0], dict_instances[2], dict_instances[3]]) self.share_manager._get_share_server.assert_has_calls([ mock.call(utils.IsAMatcher(context.RequestContext), instances[0]), mock.call(utils.IsAMatcher(context.RequestContext), instances[2]), ]) self.share_manager.driver.ensure_share.assert_has_calls([ mock.call(utils.IsAMatcher(context.RequestContext), dict_instances[0], share_server=share_server), mock.call(utils.IsAMatcher(context.RequestContext), dict_instances[2], share_server=share_server), ]) (self.share_manager.publish_service_capabilities. assert_called_once_with( utils.IsAMatcher(context.RequestContext))) manager.LOG.info.assert_any_call( mock.ANY, {'task': constants.TASK_STATE_MIGRATION_IN_PROGRESS, 'id': instances[3]['id']}, ) manager.LOG.info.assert_any_call( mock.ANY, {'id': instances[1]['id'], 'status': instances[1]['status']}, ) self.share_manager.db.service_get_by_args.assert_called_once_with( utils.IsAMatcher(context.RequestContext), self.share_manager.host, 'manila-share' ) self.share_manager.db.service_update.assert_has_calls([ mock.call( utils.IsAMatcher(context.RequestContext), fake_service['id'], {'ensuring': True} ), mock.call( utils.IsAMatcher(context.RequestContext), fake_service['id'], {'ensuring': False} ) ]) def _get_share_instance_dict(self, share_instance, **kwargs): # TODO(gouthamr): remove method when the db layer returns primitives share_instance_ref = { 'id': share_instance.get('id'), 'name': share_instance.get('name'), 'share_id': share_instance.get('share_id'), 'host': share_instance.get('host'), 'status': share_instance.get('status'), 'replica_state': share_instance.get('replica_state'), 'availability_zone_id': share_instance.get('availability_zone_id'), 'share_network_id': share_instance.get('share_network_id'), 'share_server_id': share_instance.get('share_server_id'), 'deleted': share_instance.get('deleted'), 'terminated_at': share_instance.get('terminated_at'), 'launched_at': share_instance.get('launched_at'), 'scheduled_at': share_instance.get('scheduled_at'), 'updated_at': share_instance.get('updated_at'), 'deleted_at': share_instance.get('deleted_at'), 'created_at': share_instance.get('created_at'), 'share_server': kwargs.get('share_server'), 'access_rules_status': share_instance.get('access_rules_status'), # Share details 'user_id': share_instance.get('user_id'), 'project_id': share_instance.get('project_id'), 'size': share_instance.get('size'), 'display_name': share_instance.get('display_name'), 'display_description': share_instance.get('display_description'), 'snapshot_id': share_instance.get('snapshot_id'), 'share_proto': share_instance.get('share_proto'), 'share_type_id': share_instance.get('share_type_id'), 'is_public': share_instance.get('is_public'), 'share_group_id': share_instance.get('share_group_id'), 'source_share_group_snapshot_member_id': share_instance.get( 'source_share_group_snapshot_member_id'), 'availability_zone': share_instance.get('availability_zone'), 'export_locations': share_instance.get('export_locations') or [], } return share_instance_ref def test_init_host_with_exception_on_ensure_shares(self): def raise_exception(*args, **kwargs): raise exception.ManilaException(message="Fake raise") instances = self._setup_init_mocks(setup_access_rules=False) fake_service = {'id': 'fake_service_id', 'binary': 'manila-share'} mock_ensure_share = self.mock_object( self.share_manager.driver, 'ensure_share') self.mock_object(self.share_manager.db, 'share_instance_get_all_by_host', mock.Mock(return_value=instances)) self.mock_object(self.share_manager.db, 'service_get_by_args', mock.Mock(return_value=fake_service)) self.mock_object(self.share_manager.db, 'service_update') self.mock_object(self.share_manager.db, 'share_instance_get', mock.Mock(side_effect=[instances[0], instances[2], instances[3]])) self.mock_object( self.share_manager.driver, 'ensure_shares', mock.Mock(side_effect=raise_exception)) self.mock_object( self.share_manager, '_ensure_share_instance_has_pool') self.mock_object(db, 'share_server_get', mock.Mock(return_value=fakes.fake_share_server_get())) dict_instances = [self._get_share_instance_dict(instance) for instance in instances] # call of 'init_host' method self.share_manager.init_host() # verification of call (self.share_manager.db.share_instance_get_all_by_host. assert_called_once_with(utils.IsAMatcher(context.RequestContext), self.share_manager.host)) self.share_manager.driver.do_setup.assert_called_once_with( utils.IsAMatcher(context.RequestContext)) self.share_manager.driver.check_for_setup_error.assert_called_with() self.share_manager._ensure_share_instance_has_pool.assert_has_calls([ mock.call(utils.IsAMatcher(context.RequestContext), instances[0]), mock.call(utils.IsAMatcher(context.RequestContext), instances[2]), ]) self.share_manager.db.service_get_by_args.assert_called_once_with( utils.IsAMatcher(context.RequestContext), self.share_manager.host, 'manila-share' ) self.share_manager.db.service_update.assert_has_calls([ mock.call( utils.IsAMatcher(context.RequestContext), fake_service['id'], {'ensuring': True} ), mock.call( utils.IsAMatcher(context.RequestContext), fake_service['id'], {'ensuring': False} ) ]) self.share_manager.driver.ensure_shares.assert_called_once_with( utils.IsAMatcher(context.RequestContext), [dict_instances[0], dict_instances[2], dict_instances[3]]) mock_ensure_share.assert_not_called() def test_init_host_with_exception_on_get_backend_info(self): def raise_exception(*args, **kwargs): raise exception.ManilaException(message="Fake raise") old_backend_info = {'info_hash': "test_backend_info"} mock_ensure_share = self.mock_object( self.share_manager.driver, 'ensure_share') mock_ensure_shares = self.mock_object( self.share_manager.driver, 'ensure_shares') self.mock_object(self.share_manager.db, 'backend_info_get', mock.Mock(return_value=old_backend_info)) self.mock_object( self.share_manager.driver, 'get_backend_info', mock.Mock(side_effect=raise_exception)) # call of 'init_host' method self.assertRaises( exception.ManilaException, self.share_manager.init_host, ) # verification of call self.share_manager.db.backend_info_get.assert_called_once_with( utils.IsAMatcher(context.RequestContext), self.share_manager.host) self.share_manager.driver.get_backend_info.assert_called_once_with( utils.IsAMatcher(context.RequestContext)) mock_ensure_share.assert_not_called() mock_ensure_shares.assert_not_called() def test_init_host_with_exception_on_update_access_rules(self): def raise_exception(*args, **kwargs): raise exception.ManilaException(message="Fake raise") instances, rules = self._setup_init_mocks() share_server = fakes.fake_share_server_get() fake_update_instances = { instances[0]['id']: {'status': 'available'}, instances[2]['id']: {'status': 'available'}, instances[4]['id']: {'status': 'available'} } smanager = self.share_manager fake_service = {'id': 'fake_service_id', 'binary': 'manila-share'} self.mock_object(smanager.db, 'share_instance_get_all_by_host', mock.Mock(return_value=instances)) self.mock_object(self.share_manager.db, 'share_instance_get', mock.Mock(side_effect=[instances[0], instances[2], instances[4]])) self.mock_object(self.share_manager.db, 'service_get_by_args', mock.Mock(return_value=fake_service)) self.mock_object(self.share_manager.db, 'service_update') self.mock_object(self.share_manager.driver, 'ensure_share', mock.Mock(return_value=None)) self.mock_object(self.share_manager.driver, 'ensure_shares', mock.Mock(return_value=fake_update_instances)) self.mock_object(smanager, '_ensure_share_instance_has_pool') self.mock_object(smanager, '_get_share_server', mock.Mock(return_value=share_server)) self.mock_object(smanager, 'publish_service_capabilities') self.mock_object(manager.LOG, 'exception') self.mock_object(manager.LOG, 'info') self.mock_object(smanager.access_helper, 'reset_rules_to_queueing_states') self.mock_object(smanager.access_helper, 'update_access_rules', mock.Mock(side_effect=raise_exception)) self.mock_object(smanager, '_get_share_server_dict', mock.Mock(return_value=share_server)) dict_instances = [self._get_share_instance_dict( instance, share_server=share_server) for instance in instances] # call of 'init_host' method smanager.init_host() # verification of call (smanager.db.share_instance_get_all_by_host. assert_called_once_with(utils.IsAMatcher(context.RequestContext), smanager.host)) smanager.driver.do_setup.assert_called_once_with( utils.IsAMatcher(context.RequestContext)) smanager.driver.check_for_setup_error.assert_called_with() smanager._ensure_share_instance_has_pool.assert_has_calls([ mock.call(utils.IsAMatcher(context.RequestContext), instances[0]), mock.call(utils.IsAMatcher(context.RequestContext), instances[2]), ]) smanager.driver.ensure_shares.assert_called_once_with( utils.IsAMatcher(context.RequestContext), [dict_instances[0], dict_instances[2], dict_instances[4]]) (self.share_manager.publish_service_capabilities. assert_called_once_with( utils.IsAMatcher(context.RequestContext))) manager.LOG.info.assert_any_call( mock.ANY, {'task': constants.TASK_STATE_MIGRATION_IN_PROGRESS, 'id': instances[3]['id']}, ) manager.LOG.info.assert_any_call( mock.ANY, {'id': instances[1]['id'], 'status': instances[1]['status']}, ) smanager.access_helper.update_access_rules.assert_has_calls([ mock.call(utils.IsAMatcher(context.RequestContext), instances[4]['id'], share_server=share_server), ]) manager.LOG.exception.assert_has_calls([ mock.call(mock.ANY, mock.ANY), ]) self.share_manager.db.service_get_by_args.assert_called_once_with( utils.IsAMatcher(context.RequestContext), self.share_manager.host, 'manila-share' ) self.share_manager.db.service_update.assert_has_calls([ mock.call( utils.IsAMatcher(context.RequestContext), fake_service['id'], {'ensuring': True} ), mock.call( utils.IsAMatcher(context.RequestContext), fake_service['id'], {'ensuring': False} ) ]) def test_create_share_instance_from_snapshot_with_server(self): """Test share can be created from snapshot if server exists.""" network = db_utils.create_share_network() subnet = db_utils.create_share_network_subnet( share_network_id=network['id']) server = db_utils.create_share_server( share_network_subnets=[subnet], host='fake_host', backend_details=dict(fake='fake')) parent_share = db_utils.create_share(share_network_id='net-id', share_server_id=server['id']) share_type = db_utils.create_share_type() share = db_utils.create_share(share_type_id=share_type['id']) share_id = share['id'] snapshot = db_utils.create_snapshot(share_id=parent_share['id']) snapshot_id = snapshot['id'] self.share_manager.create_share_instance( self.context, share.instance['id'], snapshot_id=snapshot_id) self.assertEqual(share_id, db.share_get(context.get_admin_context(), share_id).id) shr = db.share_get(self.context, share_id) self.assertEqual(constants.STATUS_AVAILABLE, shr['status']) self.assertEqual(server['id'], shr['instance']['share_server_id']) def test_create_share_instance_from_snapshot_with_server_not_found(self): """Test creation from snapshot fails if server not found.""" parent_share = db_utils.create_share(share_network_id='net-id', share_server_id='fake-id') share_type = db_utils.create_share_type() share = db_utils.create_share(share_type_id=share_type['id']) share_id = share['id'] snapshot = db_utils.create_snapshot(share_id=parent_share['id']) snapshot_id = snapshot['id'] self.assertRaises(exception.ShareServerNotFound, self.share_manager.create_share_instance, self.context, share.instance['id'], snapshot_id=snapshot_id ) shr = db.share_get(self.context, share_id) self.assertEqual(constants.STATUS_ERROR, shr['status']) def test_create_share_instance_from_snapshot_status_creating(self): """Test share can be created from snapshot in asynchronous mode.""" share_type = db_utils.create_share_type() share = db_utils.create_share(share_type_id=share_type['id']) share_id = share['id'] snapshot = db_utils.create_snapshot(share_id=share_id) snapshot_id = snapshot['id'] create_from_snap_ret = { 'status': constants.STATUS_CREATING_FROM_SNAPSHOT, } driver_call = self.mock_object( self.share_manager.driver, 'create_share_from_snapshot', mock.Mock(return_value=create_from_snap_ret)) self.share_manager.create_share_instance( self.context, share.instance['id'], snapshot_id=snapshot_id) self.assertTrue(driver_call.called) self.assertEqual(share_id, db.share_get(context.get_admin_context(), share_id).id) shr = db.share_get(self.context, share_id) self.assertTrue(driver_call.called) self.assertEqual(constants.STATUS_CREATING_FROM_SNAPSHOT, shr['status']) self.assertEqual(0, len(shr['export_locations'])) def test_create_share_instance_from_snapshot_invalid_status(self): """Test share can't be created from snapshot with 'creating' status.""" share_type = db_utils.create_share_type() share = db_utils.create_share(share_type_id=share_type['id']) share_id = share['id'] snapshot = db_utils.create_snapshot(share_id=share_id) snapshot_id = snapshot['id'] create_from_snap_ret = { 'status': constants.STATUS_CREATING, } driver_call = self.mock_object( self.share_manager.driver, 'create_share_from_snapshot', mock.Mock(return_value=create_from_snap_ret)) self.assertRaises(exception.InvalidShareInstance, self.share_manager.create_share_instance, self.context, share.instance['id'], snapshot_id=snapshot_id) self.assertTrue(driver_call.called) shr = db.share_get(self.context, share_id) self.assertEqual(constants.STATUS_ERROR, shr['status']) def test_create_share_instance_from_snapshot_export_locations_only(self): """Test share can be created from snapshot on old driver interface.""" share_type = db_utils.create_share_type() share = db_utils.create_share(share_type_id=share_type['id']) share_id = share['id'] snapshot = db_utils.create_snapshot(share_id=share_id) snapshot_id = snapshot['id'] create_from_snap_ret = ['/path/fake', '/path/fake2', '/path/fake3'] driver_call = self.mock_object( self.share_manager.driver, 'create_share_from_snapshot', mock.Mock(return_value=create_from_snap_ret)) self.share_manager.create_share_instance( self.context, share.instance['id'], snapshot_id=snapshot_id) self.assertTrue(driver_call.called) self.assertEqual(share_id, db.share_get(context.get_admin_context(), share_id).id) shr = db.share_get(self.context, share_id) self.assertEqual(constants.STATUS_AVAILABLE, shr['status']) self.assertEqual(3, len(shr['export_locations'])) def test_create_share_instance_from_snapshot(self): """Test share can be created from snapshot.""" share_type = db_utils.create_share_type() share = db_utils.create_share(share_type_id=share_type['id']) share_id = share['id'] snapshot = db_utils.create_snapshot(share_id=share_id) snapshot_id = snapshot['id'] self.share_manager.create_share_instance( self.context, share.instance['id'], snapshot_id=snapshot_id) self.assertEqual(share_id, db.share_get(context.get_admin_context(), share_id).id) shr = db.share_get(self.context, share_id) self.assertEqual(constants.STATUS_AVAILABLE, shr['status']) self.assertGreater(len(shr['export_location']), 0) self.assertEqual(2, len(shr['export_locations'])) def test_create_share_instance_for_share_with_replication_support(self): """Test update call is made to update replica_state.""" share_type = db_utils.create_share_type() share = db_utils.create_share(replication_type='writable', share_type_id=share_type['id']) share_id = share['id'] self.share_manager.create_share_instance(self.context, share.instance['id']) self.assertEqual(share_id, db.share_get(context.get_admin_context(), share_id).id) shr = db.share_get(self.context, share_id) shr_instance = db.share_instance_get(self.context, share.instance['id']) self.assertEqual(constants.STATUS_AVAILABLE, shr['status'],) self.assertEqual(constants.REPLICA_STATE_ACTIVE, shr_instance['replica_state']) @ddt.data([], None) def test_create_share_replica_no_active_replicas(self, active_replicas): replica = fake_replica() self.mock_object(db, 'share_replicas_get_available_active_replica', mock.Mock(return_value=active_replicas)) self.mock_object( db, 'share_replica_get', mock.Mock(return_value=replica)) mock_replica_update_call = self.mock_object(db, 'share_replica_update') mock_driver_replica_call = self.mock_object( self.share_manager.driver, 'create_replica') self.assertRaises(exception.ReplicationException, self.share_manager.create_share_replica, self.context, replica) mock_replica_update_call.assert_called_once_with( mock.ANY, replica['id'], {'status': constants.STATUS_ERROR, 'replica_state': constants.STATUS_ERROR}) self.assertFalse(mock_driver_replica_call.called) self.share_manager.message_api.create.assert_called_once_with( utils.IsAMatcher(context.RequestContext), message_field.Action.CREATE, replica['project_id'], resource_type=message_field.Resource.SHARE_REPLICA, resource_id=replica['id'], detail=message_field.Detail.NO_ACTIVE_REPLICA) def test_create_share_replica_with_share_network_id_and_not_dhss(self): replica = fake_replica() manager.CONF.set_default('driver_handles_share_servers', False) self.mock_object(db, 'share_access_get_all_for_share', mock.Mock(return_value=[])) self.mock_object(db, 'share_replicas_get_available_active_replica', mock.Mock(return_value=fake_replica(id='fake2'))) self.mock_object(db, 'share_replica_get', mock.Mock(return_value=replica)) mock_replica_update_call = self.mock_object(db, 'share_replica_update') mock_driver_replica_call = self.mock_object( self.share_manager.driver, 'create_replica') self.assertRaises(exception.InvalidDriverMode, self.share_manager.create_share_replica, self.context, replica) mock_replica_update_call.assert_called_once_with( mock.ANY, replica['id'], {'status': constants.STATUS_ERROR, 'replica_state': constants.STATUS_ERROR}) self.assertFalse(mock_driver_replica_call.called) self.share_manager.message_api.create.assert_called_once_with( utils.IsAMatcher(context.RequestContext), message_field.Action.CREATE, replica['project_id'], resource_type=message_field.Resource.SHARE_REPLICA, resource_id=replica['id'], detail=message_field.Detail.UNEXPECTED_NETWORK) def test_create_share_replica_with_share_server_exception(self): replica = fake_replica() share_network_subnet = db_utils.create_share_network_subnet( share_network_id=replica['share_network_id'], availability_zone_id=replica['availability_zone_id']) manager.CONF.set_default('driver_handles_share_servers', True) self.mock_object(db, 'share_instance_access_copy', mock.Mock(return_value=[])) self.mock_object(db, 'share_replicas_get_available_active_replica', mock.Mock(return_value=fake_replica(id='fake2'))) self.mock_object(db, 'share_replica_get', mock.Mock(return_value=replica)) self.mock_object( db, 'share_network_subnets_get_all_by_availability_zone_id', mock.Mock(return_value=[share_network_subnet])) mock_replica_update_call = self.mock_object(db, 'share_replica_update') mock_driver_replica_call = self.mock_object( self.share_manager.driver, 'create_replica') self.assertRaises(exception.NotFound, self.share_manager.create_share_replica, self.context, replica) mock_replica_update_call.assert_called_once_with( mock.ANY, replica['id'], {'status': constants.STATUS_ERROR, 'replica_state': constants.STATUS_ERROR}) self.assertFalse(mock_driver_replica_call.called) self.share_manager.message_api.create.assert_called_once_with( utils.IsAMatcher(context.RequestContext), message_field.Action.CREATE, replica['project_id'], resource_type=message_field.Resource.SHARE_REPLICA, resource_id=replica['id'], detail=message_field.Detail.NO_SHARE_SERVER) def test_create_share_replica_driver_error_on_creation(self): fake_access_rules = [{'id': '1'}, {'id': '2'}, {'id': '3'}] replica = fake_replica() replica_2 = fake_replica(id='fake2') self.mock_object(db, 'share_replica_get', mock.Mock(return_value=replica)) self.mock_object(db, 'share_instance_access_copy', mock.Mock(return_value=fake_access_rules)) self.mock_object(db, 'share_replicas_get_available_active_replica', mock.Mock(return_value=replica_2)) self.mock_object(db, 'share_replicas_get_all_by_share', mock.Mock(return_value=[replica, replica_2])) self.mock_object(self.share_manager, '_provide_share_server_for_share', mock.Mock(return_value=('FAKE_SERVER', replica))) self.mock_object(self.share_manager, '_get_replica_snapshots_for_snapshot', mock.Mock(return_value=[])) mock_replica_update_call = self.mock_object(db, 'share_replica_update') mock_export_locs_update_call = self.mock_object( db, 'export_locations_update') mock_log_error = self.mock_object(manager.LOG, 'error') mock_log_info = self.mock_object(manager.LOG, 'info') self.mock_object(db, 'share_instance_access_get', mock.Mock(return_value=fake_access_rules[0])) mock_share_replica_access_update = self.mock_object( self.share_manager.access_helper, 'get_and_update_share_instance_access_rules_status') self.mock_object(self.share_manager, '_get_share_server') driver_call = self.mock_object( self.share_manager.driver, 'create_replica', mock.Mock(side_effect=exception.ManilaException)) self.assertRaises(exception.ManilaException, self.share_manager.create_share_replica, self.context, replica) mock_replica_update_call.assert_called_once_with( utils.IsAMatcher(context.RequestContext), replica['id'], {'status': constants.STATUS_ERROR, 'replica_state': constants.STATUS_ERROR}) mock_share_replica_access_update.assert_called_once_with( utils.IsAMatcher(context.RequestContext), share_instance_id=replica['id'], status=constants.SHARE_INSTANCE_RULES_ERROR) self.assertFalse(mock_export_locs_update_call.called) self.assertTrue(mock_log_error.called) self.assertFalse(mock_log_info.called) self.assertTrue(driver_call.called) self.share_manager.message_api.create.assert_called_once_with( utils.IsAMatcher(context.RequestContext), message_field.Action.CREATE, replica['project_id'], resource_type=message_field.Resource.SHARE_REPLICA, resource_id=replica['id'], exception=mock.ANY) def test_create_share_replica_invalid_locations_state(self): driver_retval = { 'export_locations': 'FAKE_EXPORT_LOC', } replica = fake_replica(share_network='', access_rules_status=constants.STATUS_ACTIVE) replica_2 = fake_replica(id='fake2') fake_access_rules = [{'id': '1'}, {'id': '2'}] self.mock_object(db, 'share_replicas_get_available_active_replica', mock.Mock(return_value=replica_2)) self.mock_object(db, 'share_replicas_get_all_by_share', mock.Mock(return_value=[replica, replica_2])) self.mock_object(db, 'share_replica_get', mock.Mock(return_value=replica)) self.mock_object(db, 'share_instance_access_copy', mock.Mock(return_value=fake_access_rules)) self.mock_object(self.share_manager, '_provide_share_server_for_share', mock.Mock(return_value=('FAKE_SERVER', replica))) self.mock_object(self.share_manager, '_get_share_server', mock.Mock(return_value=None)) self.mock_object(self.share_manager, '_get_replica_snapshots_for_snapshot', mock.Mock(return_value=[])) mock_replica_update_call = self.mock_object(db, 'share_replica_update') mock_export_locs_update_call = self.mock_object( db, 'export_locations_update') mock_log_info = self.mock_object(manager.LOG, 'info') mock_log_warning = self.mock_object(manager.LOG, 'warning') mock_log_error = self.mock_object(manager.LOG, 'error') driver_call = self.mock_object( self.share_manager.driver, 'create_replica', mock.Mock(return_value=driver_retval)) self.mock_object(db, 'share_instance_access_get', mock.Mock(return_value=fake_access_rules[0])) mock_share_replica_access_update = self.mock_object( self.share_manager.access_helper, 'get_and_update_share_instance_access_rules_status') self.share_manager.create_share_replica(self.context, replica) self.assertFalse(mock_replica_update_call.called) mock_share_replica_access_update.assert_called_once_with( utils.IsAMatcher(context.RequestContext), share_instance_id=replica['id'], status=constants.STATUS_ACTIVE) self.assertFalse(mock_export_locs_update_call.called) self.assertTrue(mock_log_info.called) self.assertTrue(mock_log_warning.called) self.assertFalse(mock_log_error.called) self.assertTrue(driver_call.called) call_args = driver_call.call_args_list[0][0] replica_list_arg = call_args[1] r_ids = [r['id'] for r in replica_list_arg] for r in (replica, replica_2): self.assertIn(r['id'], r_ids) self.assertEqual(2, len(r_ids)) def test_create_share_replica_no_availability_zone(self): replica = fake_replica( availability_zone=None, share_network='', replica_state=constants.REPLICA_STATE_OUT_OF_SYNC, access_rules_status=None) replica_2 = fake_replica(id='fake2') self.mock_object(db, 'share_replicas_get_all_by_share', mock.Mock(return_value=[replica, replica_2])) self.share_manager.availability_zone = 'fake_az' fake_access_rules = [{'id': '1'}, {'id': '2'}, {'id': '3'}] self.mock_object(db, 'share_replica_get', mock.Mock(return_value=replica)) self.mock_object(db, 'share_instance_access_copy', mock.Mock(return_value=fake_access_rules)) self.mock_object(db, 'share_replicas_get_available_active_replica', mock.Mock(return_value=replica_2)) self.mock_object(self.share_manager, '_provide_share_server_for_share', mock.Mock(return_value=('FAKE_SERVER', replica))) self.mock_object(self.share_manager, '_get_replica_snapshots_for_snapshot', mock.Mock(return_value=[])) mock_replica_update_call = self.mock_object( db, 'share_replica_update', mock.Mock(return_value=replica)) mock_calls = [ mock.call(mock.ANY, replica['id'], {'availability_zone': 'fake_az'}, with_share_data=True), mock.call(mock.ANY, replica['id'], {'status': constants.STATUS_AVAILABLE, 'replica_state': constants.REPLICA_STATE_OUT_OF_SYNC, 'progress': '100%'}), ] mock_export_locs_update_call = self.mock_object( db, 'export_locations_update') mock_log_info = self.mock_object(manager.LOG, 'info') mock_log_warning = self.mock_object(manager.LOG, 'warning') mock_log_error = self.mock_object(manager.LOG, 'warning') self.mock_object(db, 'share_instance_access_get', mock.Mock(return_value=fake_access_rules[0])) mock_share_replica_access_rule_update = self.mock_object( self.share_manager.access_helper, 'get_and_update_share_instance_access_rules') mock_share_replica_access_state_update = self.mock_object( self.share_manager, '_update_share_instance_access_rules_state') driver_call = self.mock_object( self.share_manager.driver, 'create_replica', mock.Mock(return_value=replica)) self.mock_object(self.share_manager, '_get_share_server', mock.Mock()) self.share_manager.create_share_replica(self.context, replica) mock_replica_update_call.assert_has_calls(mock_calls, any_order=False) mock_share_replica_access_rule_update.assert_called_once_with( utils.IsAMatcher(context.RequestContext), share_instance_id=replica['id'], conditionally_change={'queued_to_apply': 'active'}) mock_share_replica_access_state_update.assert_called_once_with( utils.IsAMatcher(context.RequestContext), replica['id'], constants.STATUS_ACTIVE) self.assertTrue(mock_export_locs_update_call.called) self.assertTrue(mock_log_info.called) self.assertFalse(mock_log_warning.called) self.assertFalse(mock_log_error.called) self.assertTrue(driver_call.called) @ddt.data(True, False) def test_create_share_replica(self, has_snapshots): replica = fake_replica( share_network='', replica_state=constants.REPLICA_STATE_IN_SYNC, access_rules_status='active') replica_2 = fake_replica(id='fake2') snapshots = ([fakes.fake_snapshot(create_instance=True)] if has_snapshots else []) snapshot_instances = [ fakes.fake_snapshot_instance(share_instance_id=replica['id']), fakes.fake_snapshot_instance(share_instance_id='fake2'), ] fake_access_rules = [{'id': '1'}, {'id': '2'}, {'id': '3'}] self.mock_object(db, 'share_replica_get', mock.Mock(return_value=replica)) self.mock_object(db, 'share_instance_access_copy', mock.Mock(return_value=fake_access_rules)) self.mock_object(db, 'share_replicas_get_available_active_replica', mock.Mock(return_value=replica_2)) self.mock_object(self.share_manager, '_provide_share_server_for_share', mock.Mock(return_value=('FAKE_SERVER', replica))) self.mock_object(db, 'share_replicas_get_all_by_share', mock.Mock(return_value=[replica, replica_2])) self.mock_object(db, 'share_snapshot_get_all_for_share', mock.Mock( return_value=snapshots)) mock_instance_get_call = self.mock_object( db, 'share_snapshot_instance_get_all_with_filters', mock.Mock(return_value=snapshot_instances)) mock_replica_update_call = self.mock_object(db, 'share_replica_update') mock_export_locs_update_call = self.mock_object( db, 'export_locations_update') mock_log_info = self.mock_object(manager.LOG, 'info') mock_log_warning = self.mock_object(manager.LOG, 'warning') mock_log_error = self.mock_object(manager.LOG, 'warning') self.mock_object(db, 'share_instance_access_get', mock.Mock(return_value=fake_access_rules[0])) mock_share_replica_access_rule_update = self.mock_object( self.share_manager.access_helper, 'get_and_update_share_instance_access_rules') mock_share_replica_access_state_update = self.mock_object( self.share_manager, '_update_share_instance_access_rules_state') driver_call = self.mock_object( self.share_manager.driver, 'create_replica', mock.Mock(return_value=replica)) self.mock_object(self.share_manager, '_get_share_server') self.share_manager.create_share_replica(self.context, replica) mock_replica_update_call.assert_called_once_with( utils.IsAMatcher(context.RequestContext), replica['id'], {'status': constants.STATUS_AVAILABLE, 'replica_state': constants.REPLICA_STATE_IN_SYNC, 'progress': '100%'}) mock_share_replica_access_rule_update.assert_called_once_with( utils.IsAMatcher(context.RequestContext), share_instance_id=replica['id'], conditionally_change={'queued_to_apply': 'active'}) mock_share_replica_access_state_update.assert_called_once_with( utils.IsAMatcher(context.RequestContext), replica['id'], constants.STATUS_ACTIVE) self.assertTrue(mock_export_locs_update_call.called) self.assertTrue(mock_log_info.called) self.assertFalse(mock_log_warning.called) self.assertFalse(mock_log_error.called) self.assertTrue(driver_call.called) call_args = driver_call.call_args_list[0][0] replica_list_arg = call_args[1] snapshot_list_arg = call_args[4] r_ids = [r['id'] for r in replica_list_arg] for r in (replica, replica_2): self.assertIn(r['id'], r_ids) self.assertEqual(2, len(r_ids)) if has_snapshots: for snapshot_dict in snapshot_list_arg: self.assertIn('active_replica_snapshot', snapshot_dict) self.assertIn('share_replica_snapshot', snapshot_dict) else: self.assertFalse(mock_instance_get_call.called) def test_delete_share_replica_access_rules_exception(self): replica = fake_replica() replica_2 = fake_replica(id='fake_2') self.mock_object(db, 'share_replicas_get_all_by_share', mock.Mock(return_value=[replica, replica_2])) active_replica = fake_replica( id='Current_active_replica', replica_state=constants.REPLICA_STATE_ACTIVE) mock_exception_log = self.mock_object(manager.LOG, 'exception') self.mock_object(db, 'share_replica_get', mock.Mock(return_value=replica)) self.mock_object(db, 'share_replicas_get_available_active_replica', mock.Mock(return_value=active_replica)) self.mock_object(self.share_manager, '_get_share_server') self.mock_object(self.share_manager.access_helper, 'update_access_rules') mock_replica_update_call = self.mock_object(db, 'share_replica_update') mock_replica_delete_call = self.mock_object(db, 'share_replica_delete') mock_drv_delete_replica_call = self.mock_object( self.share_manager.driver, 'delete_replica') self.mock_object( self.share_manager.access_helper, 'update_access_rules', mock.Mock(side_effect=exception.ManilaException)) self.assertRaises(exception.ManilaException, self.share_manager.delete_share_replica, self.context, replica['id'], share_id=replica['share_id']) mock_replica_update_call.assert_called_once_with( mock.ANY, replica['id'], {'status': constants.STATUS_ERROR}) self.assertFalse(mock_drv_delete_replica_call.called) self.assertFalse(mock_replica_delete_call.called) self.assertFalse(mock_exception_log.called) self.share_manager.message_api.create.assert_called_once_with( utils.IsAMatcher(context.RequestContext), message_field.Action.DELETE_ACCESS_RULES, replica['project_id'], resource_type=message_field.Resource.SHARE_REPLICA, resource_id=replica['id'], exception=mock.ANY) def test_delete_share_replica_drv_misbehavior_ignored_with_the_force(self): replica = fake_replica() active_replica = fake_replica(id='Current_active_replica') mock_exception_log = self.mock_object(manager.LOG, 'exception') self.mock_object(db, 'share_replicas_get_all_by_share', mock.Mock(return_value=[replica, active_replica])) self.mock_object(db, 'share_replica_get', mock.Mock(return_value=replica)) self.mock_object(db, 'share_replicas_get_available_active_replica', mock.Mock(return_value=active_replica)) self.mock_object(self.share_manager, '_get_share_server', mock.Mock(return_value=None)) self.mock_object(self.share_manager.access_helper, 'update_access_rules') self.mock_object( db, 'share_snapshot_instance_get_all_with_filters', mock.Mock(return_value=[])) mock_snap_instance_delete = self.mock_object( db, 'share_snapshot_instance_delete') mock_replica_update_call = self.mock_object(db, 'share_replica_update') mock_replica_delete_call = self.mock_object(db, 'share_replica_delete') mock_drv_delete_replica_call = self.mock_object( self.share_manager.driver, 'delete_replica', mock.Mock(side_effect=exception.ManilaException)) self.mock_object( self.share_manager.access_helper, 'update_access_rules') self.share_manager.delete_share_replica( self.context, replica['id'], share_id=replica['share_id'], force=True) self.assertFalse(mock_replica_update_call.called) self.assertTrue(mock_replica_delete_call.called) self.assertEqual(1, mock_exception_log.call_count) self.assertTrue(mock_drv_delete_replica_call.called) self.assertFalse(mock_snap_instance_delete.called) def test_delete_share_replica_driver_exception(self): replica = fake_replica() active_replica = fake_replica(id='Current_active_replica') self.mock_object(db, 'share_replicas_get_all_by_share', mock.Mock(return_value=[replica, active_replica])) self.mock_object(db, 'share_replica_get', mock.Mock(return_value=replica)) self.mock_object(db, 'share_replicas_get_available_active_replica', mock.Mock(return_value=active_replica)) self.mock_object(self.share_manager, '_get_share_server', mock.Mock(return_value=None)) mock_snapshot_get_call = self.mock_object( db, 'share_snapshot_instance_get_all_with_filters', mock.Mock(return_value=[])) mock_replica_update_call = self.mock_object(db, 'share_replica_update') mock_replica_delete_call = self.mock_object(db, 'share_replica_delete') self.mock_object( self.share_manager.access_helper, 'update_access_rules') mock_drv_delete_replica_call = self.mock_object( self.share_manager.driver, 'delete_replica', mock.Mock(side_effect=exception.ManilaException)) self.assertRaises(exception.ManilaException, self.share_manager.delete_share_replica, self.context, replica['id'], share_id=replica['share_id']) self.assertTrue(mock_replica_update_call.called) self.assertFalse(mock_replica_delete_call.called) self.assertTrue(mock_drv_delete_replica_call.called) self.assertTrue(mock_snapshot_get_call.called) def test_delete_share_replica_both_exceptions_ignored_with_the_force(self): replica = fake_replica() active_replica = fake_replica(id='Current_active_replica') snapshots = [ fakes.fake_snapshot(share_id=replica['id'], status=constants.STATUS_AVAILABLE), fakes.fake_snapshot(share_id=replica['id'], id='test_creating_to_err', status=constants.STATUS_CREATING) ] self.mock_object(db, 'share_replicas_get_all_by_share', mock.Mock(return_value=[replica, active_replica])) mock_exception_log = self.mock_object(manager.LOG, 'exception') self.mock_object(db, 'share_replica_get', mock.Mock(return_value=replica)) self.mock_object(db, 'share_replicas_get_available_active_replica', mock.Mock(return_value=active_replica)) self.mock_object(self.share_manager, '_get_share_server', mock.Mock(return_value=None)) self.mock_object( db, 'share_snapshot_instance_get_all_with_filters', mock.Mock(return_value=snapshots)) mock_snapshot_instance_delete_call = self.mock_object( db, 'share_snapshot_instance_delete') mock_replica_update_call = self.mock_object(db, 'share_replica_update') mock_replica_delete_call = self.mock_object(db, 'share_replica_delete') self.mock_object( self.share_manager.access_helper, 'update_access_rules', mock.Mock(side_effect=exception.ManilaException)) mock_drv_delete_replica_call = self.mock_object( self.share_manager.driver, 'delete_replica', mock.Mock(side_effect=exception.ManilaException)) self.share_manager.delete_share_replica( self.context, replica['id'], share_id=replica['share_id'], force=True) mock_replica_update_call.assert_called_once_with( mock.ANY, replica['id'], {'status': constants.STATUS_ERROR}) self.assertTrue(mock_replica_delete_call.called) self.assertEqual(2, mock_exception_log.call_count) self.assertTrue(mock_drv_delete_replica_call.called) self.assertEqual(2, mock_snapshot_instance_delete_call.call_count) def test_delete_share_replica(self): replica = fake_replica() active_replica = fake_replica(id='current_active_replica') snapshots = [ fakes.fake_snapshot(share_id=replica['share_id'], status=constants.STATUS_AVAILABLE), fakes.fake_snapshot(share_id=replica['share_id'], id='test_creating_to_err', status=constants.STATUS_CREATING) ] self.mock_object( db, 'share_snapshot_instance_get_all_with_filters', mock.Mock(return_value=snapshots)) self.mock_object(db, 'share_replicas_get_all_by_share', mock.Mock(return_value=[replica, active_replica])) self.mock_object(db, 'share_replica_get', mock.Mock(return_value=replica)) self.mock_object(db, 'share_replicas_get_available_active_replica', mock.Mock(return_value=active_replica)) self.mock_object(self.share_manager, '_get_share_server', mock.Mock(return_value=None)) mock_info_log = self.mock_object(manager.LOG, 'info') mock_snapshot_instance_delete_call = self.mock_object( db, 'share_snapshot_instance_delete') mock_replica_update_call = self.mock_object(db, 'share_replica_update') mock_replica_delete_call = self.mock_object(db, 'share_replica_delete') self.mock_object( self.share_manager.access_helper, 'update_access_rules') mock_drv_delete_replica_call = self.mock_object( self.share_manager.driver, 'delete_replica') self.share_manager.delete_share_replica(self.context, replica) self.assertFalse(mock_replica_update_call.called) self.assertTrue(mock_replica_delete_call.called) self.assertTrue(mock_info_log.called) self.assertTrue(mock_drv_delete_replica_call.called) self.assertEqual(2, mock_snapshot_instance_delete_call.call_count) def test_promote_share_replica_no_active_replica(self): replica = fake_replica() replica_list = [replica] self.mock_object(db, 'share_replica_get', mock.Mock(return_value=replica)) self.mock_object(self.share_manager, '_get_share_server') self.mock_object(db, 'share_replicas_get_available_active_replica', mock.Mock(return_value=replica_list)) mock_info_log = self.mock_object(manager.LOG, 'info') mock_driver_call = self.mock_object(self.share_manager.driver, 'promote_replica') mock_replica_update = self.mock_object(db, 'share_replica_update') expected_update_call = mock.call( mock.ANY, replica['id'], {'status': constants.STATUS_AVAILABLE}) self.assertRaises(exception.ReplicationException, self.share_manager.promote_share_replica, self.context, replica) self.assertFalse(mock_info_log.called) self.assertFalse(mock_driver_call.called) mock_replica_update.assert_has_calls([expected_update_call]) def test_promote_share_replica_driver_exception(self): replica = fake_replica() active_replica = fake_replica( id='current_active_replica', replica_state=constants.REPLICA_STATE_ACTIVE) replica_list = [replica, active_replica] self.mock_object(db, 'share_access_get_all_for_share', mock.Mock(return_value=[])) self.mock_object(db, 'share_replica_get', mock.Mock(return_value=replica)) self.mock_object(self.share_manager, '_get_share_server') self.mock_object(db, 'share_replicas_get_all_by_share', mock.Mock(return_value=replica_list)) self.mock_object(self.share_manager.driver, 'promote_replica', mock.Mock(side_effect=exception.ManilaException)) mock_info_log = self.mock_object(manager.LOG, 'info') mock_replica_update = self.mock_object(db, 'share_replica_update') expected_update_calls = [mock.call( mock.ANY, r['id'], {'status': constants.STATUS_ERROR}) for r in (replica, active_replica)] self.assertRaises(exception.ManilaException, self.share_manager.promote_share_replica, self.context, replica) mock_replica_update.assert_has_calls(expected_update_calls) self.assertFalse(mock_info_log.called) expected_message_calls = [ mock.call( utils.IsAMatcher(context.RequestContext), message_field.Action.PROMOTE, r['project_id'], resource_type=message_field.Resource.SHARE_REPLICA, resource_id=r['id'], exception=mock.ANY) for r in (replica, active_replica)] self.share_manager.message_api.create.assert_has_calls( expected_message_calls) @ddt.data([], None) def test_promote_share_replica_driver_update_nothing_has_snaps(self, retval): replica = fake_replica( replication_type=constants.REPLICATION_TYPE_READABLE) active_replica = fake_replica( id='current_active_replica', replica_state=constants.REPLICA_STATE_ACTIVE) snapshots_instances = [ fakes.fake_snapshot(create_instance=True, share_id=replica['share_id'], status=constants.STATUS_AVAILABLE), fakes.fake_snapshot(create_instance=True, share_id=replica['share_id'], id='test_creating_to_err', status=constants.STATUS_CREATING) ] replica_list = [replica, active_replica] self.mock_object(db, 'share_replica_get', mock.Mock(return_value=replica)) self.mock_object(db, 'share_access_get_all_for_share', mock.Mock(return_value=[])) self.mock_object(self.share_manager, '_get_share_server') self.mock_object(db, 'share_replicas_get_all_by_share', mock.Mock(return_value=replica_list)) self.mock_object( db, 'share_snapshot_instance_get_all_with_filters', mock.Mock(return_value=snapshots_instances)) self.mock_object( self.share_manager.driver, 'promote_replica', mock.Mock(return_value=retval)) mock_snap_instance_update = self.mock_object( db, 'share_snapshot_instance_update') mock_info_log = self.mock_object(manager.LOG, 'info') mock_export_locs_update = self.mock_object( db, 'export_locations_update') mock_replica_update = self.mock_object(db, 'share_replica_update') call_1 = mock.call(mock.ANY, replica['id'], {'status': constants.STATUS_AVAILABLE, 'replica_state': constants.REPLICA_STATE_ACTIVE, 'cast_rules_to_readonly': False}) call_2 = mock.call( mock.ANY, 'current_active_replica', {'replica_state': constants.REPLICA_STATE_OUT_OF_SYNC, 'cast_rules_to_readonly': True}) expected_update_calls = [call_1, call_2] self.share_manager.promote_share_replica(self.context, replica) self.assertFalse(mock_export_locs_update.called) mock_replica_update.assert_has_calls(expected_update_calls, any_order=True) mock_snap_instance_update.assert_called_once_with( mock.ANY, 'test_creating_to_err', {'status': constants.STATUS_ERROR}) self.assertEqual(2, mock_info_log.call_count) @ddt.data(constants.REPLICATION_TYPE_READABLE, constants.REPLICATION_TYPE_WRITABLE, constants.REPLICATION_TYPE_DR) def test_promote_share_replica_driver_updates_replica_list(self, rtype): replica = fake_replica(replication_type=rtype) active_replica = fake_replica( id='current_active_replica', replica_state=constants.REPLICA_STATE_ACTIVE) replica_list = [ replica, active_replica, fake_replica(id=3), fake_replica(id='one_more_replica'), ] updated_replica_list = [ { 'id': replica['id'], 'export_locations': ['TEST1', 'TEST2'], 'replica_state': constants.REPLICA_STATE_ACTIVE, }, { 'id': 'current_active_replica', 'export_locations': 'junk_return_value', 'replica_state': constants.REPLICA_STATE_IN_SYNC, }, { 'id': 'other_replica', 'export_locations': ['TEST3', 'TEST4'], }, { 'id': replica_list[3]['id'], 'export_locations': ['TEST5', 'TEST6'], 'replica_state': constants.REPLICA_STATE_IN_SYNC, }, ] self.mock_object(db, 'share_replica_get', mock.Mock(return_value=replica)) self.mock_object( db, 'share_snapshot_instance_get_all_with_filters', mock.Mock(return_value=[])) self.mock_object(db, 'share_access_get_all_for_share', mock.Mock(return_value=[])) self.mock_object(self.share_manager, '_get_share_server') self.mock_object(db, 'share_replicas_get_all_by_share', mock.Mock(return_value=replica_list)) mock_snap_instance_update = self.mock_object( db, 'share_snapshot_instance_update') self.mock_object( self.share_manager.driver, 'promote_replica', mock.Mock(return_value=updated_replica_list)) mock_info_log = self.mock_object(manager.LOG, 'info') mock_export_locs_update = self.mock_object( db, 'export_locations_update') mock_replica_update = self.mock_object(db, 'share_replica_update') reset_replication_change_updates = { 'replica_state': constants.STATUS_ACTIVE, 'status': constants.STATUS_AVAILABLE, 'cast_rules_to_readonly': False, } demoted_replica_updates = { 'replica_state': constants.REPLICA_STATE_IN_SYNC, 'cast_rules_to_readonly': False, } if rtype == constants.REPLICATION_TYPE_READABLE: demoted_replica_updates['cast_rules_to_readonly'] = True reset_replication_change_call = mock.call( mock.ANY, replica['id'], reset_replication_change_updates) demoted_replica_update_call = mock.call( mock.ANY, active_replica['id'], demoted_replica_updates ) additional_replica_update_call = mock.call( mock.ANY, replica_list[3]['id'], { 'replica_state': constants.REPLICA_STATE_IN_SYNC, } ) self.share_manager.promote_share_replica(self.context, replica) self.assertEqual(3, mock_export_locs_update.call_count) mock_replica_update.assert_has_calls([ demoted_replica_update_call, additional_replica_update_call, reset_replication_change_call, ]) self.assertTrue(mock_info_log.called) self.assertFalse(mock_snap_instance_update.called) @ddt.data('openstack1@watson#_pool0', 'openstack1@newton#_pool0') def test_periodic_share_replica_update(self, host): mock_debug_log = self.mock_object(manager.LOG, 'debug') replicas = [ fake_replica(host='openstack1@watson#pool4'), fake_replica(host='openstack1@watson#pool5'), fake_replica(host='openstack1@newton#pool5'), fake_replica(host='openstack1@newton#pool5'), ] self.mock_object(self.share_manager.db, 'share_replicas_get_all', mock.Mock(return_value=replicas)) mock_update_method = self.mock_object( self.share_manager, '_share_replica_update') self.share_manager.host = host self.share_manager.periodic_share_replica_update(self.context) self.assertEqual(2, mock_update_method.call_count) self.assertEqual(1, mock_debug_log.call_count) @ddt.data(constants.REPLICA_STATE_IN_SYNC, constants.REPLICA_STATE_OUT_OF_SYNC) def test__share_replica_update_driver_exception(self, replica_state): mock_debug_log = self.mock_object(manager.LOG, 'debug') replica = fake_replica(replica_state=replica_state) active_replica = fake_replica( replica_state=constants.REPLICA_STATE_ACTIVE) self.mock_object(db, 'share_replicas_get_all_by_share', mock.Mock(return_value=[replica, active_replica])) self.mock_object(self.share_manager.db, 'share_replica_get', mock.Mock(return_value=replica)) self.mock_object(db, 'share_server_get', mock.Mock(return_value=fakes.fake_share_server_get())) self.mock_object(self.share_manager.driver, 'update_replica_state', mock.Mock(side_effect=exception.ManilaException)) mock_db_update_call = self.mock_object( self.share_manager.db, 'share_replica_update') self.share_manager._share_replica_update( self.context, replica['id'], share_id=replica['share_id']) mock_db_update_call.assert_called_once_with( self.context, replica['id'], {'replica_state': constants.STATUS_ERROR, 'status': constants.STATUS_ERROR} ) self.assertEqual(1, mock_debug_log.call_count) self.share_manager.message_api.create.assert_called_once_with( utils.IsAMatcher(context.RequestContext), message_field.Action.UPDATE, replica['project_id'], resource_type=message_field.Resource.SHARE_REPLICA, resource_id=replica['id'], exception=mock.ANY) def test__share_replica_update_driver_exception_ignored(self): mock_debug_log = self.mock_object(manager.LOG, 'debug') replica = fake_replica(replica_state=constants.STATUS_ERROR) active_replica = fake_replica(replica_state=constants.STATUS_ACTIVE) self.mock_object(db, 'share_replicas_get_all_by_share', mock.Mock(return_value=[replica, active_replica])) self.mock_object(self.share_manager.db, 'share_replica_get', mock.Mock(return_value=replica)) self.mock_object(db, 'share_server_get', mock.Mock(return_value={})) self.share_manager.host = replica['host'] self.mock_object(self.share_manager.driver, 'update_replica_state', mock.Mock(side_effect=exception.ManilaException)) mock_db_update_call = self.mock_object( self.share_manager.db, 'share_replica_update') self.share_manager._share_replica_update( self.context, replica['id'], share_id=replica['share_id']) mock_db_update_call.assert_called_once_with( self.context, replica['id'], {'replica_state': constants.STATUS_ERROR, 'status': constants.STATUS_ERROR} ) self.assertEqual(1, mock_debug_log.call_count) self.share_manager.message_api.create.assert_called_once_with( utils.IsAMatcher(context.RequestContext), message_field.Action.UPDATE, replica['project_id'], resource_type=message_field.Resource.SHARE_REPLICA, resource_id=replica['id'], exception=mock.ANY) @ddt.data({'status': constants.STATUS_AVAILABLE, 'replica_state': constants.REPLICA_STATE_ACTIVE, }, {'status': constants.STATUS_DELETING, 'replica_state': constants.REPLICA_STATE_IN_SYNC, }, {'status': constants.STATUS_CREATING, 'replica_state': constants.REPLICA_STATE_OUT_OF_SYNC, }, {'status': constants.STATUS_MANAGING, 'replica_state': constants.REPLICA_STATE_OUT_OF_SYNC, }, {'status': constants.STATUS_UNMANAGING, 'replica_state': constants.REPLICA_STATE_ACTIVE, }, {'status': constants.STATUS_EXTENDING, 'replica_state': constants.REPLICA_STATE_IN_SYNC, }, {'status': constants.STATUS_SHRINKING, 'replica_state': constants.REPLICA_STATE_IN_SYNC, }) def test__share_replica_update_unqualified_replica(self, state): mock_debug_log = self.mock_object(manager.LOG, 'debug') mock_warning_log = self.mock_object(manager.LOG, 'warning') mock_driver_call = self.mock_object( self.share_manager.driver, 'update_replica_state') mock_db_update_call = self.mock_object( self.share_manager.db, 'share_replica_update') replica = fake_replica(**state) self.mock_object(db, 'share_server_get', mock.Mock(return_value='fake_share_server')) self.mock_object(db, 'share_replica_get', mock.Mock(return_value=replica)) self.share_manager._share_replica_update( self.context, replica['id'], share_id=replica['share_id']) self.assertFalse(mock_debug_log.called) self.assertFalse(mock_warning_log.called) self.assertFalse(mock_driver_call.called) self.assertFalse(mock_db_update_call.called) @ddt.data(None, constants.REPLICA_STATE_IN_SYNC, constants.REPLICA_STATE_OUT_OF_SYNC, constants.REPLICA_STATE_ACTIVE, constants.STATUS_ERROR) def test__share_replica_update(self, retval): mock_debug_log = self.mock_object(manager.LOG, 'debug') mock_warning_log = self.mock_object(manager.LOG, 'warning') replica_states = [constants.REPLICA_STATE_IN_SYNC, constants.REPLICA_STATE_OUT_OF_SYNC] replica = fake_replica(replica_state=random.choice(replica_states), share_server=fakes.fake_share_server_get()) active_replica = fake_replica( id='fake2', replica_state=constants.STATUS_ACTIVE) snapshots = [fakes.fake_snapshot( create_instance=True, aggregate_status=constants.STATUS_AVAILABLE)] snapshot_instances = [ fakes.fake_snapshot_instance(share_instance_id=replica['id']), fakes.fake_snapshot_instance(share_instance_id='fake2'), ] del replica['availability_zone'] self.mock_object(db, 'share_replicas_get_all_by_share', mock.Mock(return_value=[replica, active_replica])) self.mock_object(db, 'share_server_get', mock.Mock(return_value=fakes.fake_share_server_get())) mock_db_update_calls = [] self.mock_object(self.share_manager.db, 'share_replica_get', mock.Mock(return_value=replica)) mock_driver_call = self.mock_object( self.share_manager.driver, 'update_replica_state', mock.Mock(return_value=retval)) mock_db_update_call = self.mock_object( self.share_manager.db, 'share_replica_update') self.mock_object(db, 'share_snapshot_get_all_for_share', mock.Mock(return_value=snapshots)) self.mock_object(db, 'share_snapshot_instance_get_all_with_filters', mock.Mock(return_value=snapshot_instances)) self.share_manager._share_replica_update( self.context, replica['id'], share_id=replica['share_id']) if retval == constants.REPLICA_STATE_ACTIVE: self.assertEqual(1, mock_warning_log.call_count) elif retval: self.assertEqual(0, mock_warning_log.call_count) self.assertTrue(mock_driver_call.called) # pylint: disable=unsubscriptable-object snapshot_list_arg = mock_driver_call.call_args[0][4] # pylint: enable=unsubscriptable-object self.assertIn('active_replica_snapshot', snapshot_list_arg[0]) self.assertIn('share_replica_snapshot', snapshot_list_arg[0]) mock_db_update_call.assert_has_calls(mock_db_update_calls) self.assertEqual(1, mock_debug_log.call_count) def test_update_share_replica_replica_not_found(self): replica = fake_replica() self.mock_object( self.share_manager.db, 'share_replica_get', mock.Mock( side_effect=exception.ShareReplicaNotFound(replica_id='fake'))) self.mock_object(self.share_manager, '_get_share_server') driver_call = self.mock_object( self.share_manager.driver, 'update_replica_state') retval = self.share_manager.update_share_replica( self.context, replica['id'], share_id=replica['share_id']) self.assertIsNone(retval) self.assertFalse(driver_call.called) def test_update_share_replica_replica(self): replica_update_call = self.mock_object( self.share_manager, '_share_replica_update') self.mock_object(self.share_manager.db, 'share_replica_get') retval = self.share_manager.update_share_replica( self.context, 'fake_replica_id', share_id='fake_share_id') self.assertIsNone(retval) self.assertTrue(replica_update_call.called) def _get_snapshot_instance_dict(self, snapshot_instance, share, snapshot=None): expected_snapshot_instance_dict = { 'status': constants.STATUS_CREATING, 'share_id': share['id'], 'share_name': snapshot_instance['share_name'], 'deleted': snapshot_instance['deleted'], 'share': share, 'updated_at': snapshot_instance['updated_at'], 'snapshot_id': snapshot_instance['snapshot_id'], 'id': snapshot_instance['id'], 'name': snapshot_instance['name'], 'created_at': snapshot_instance['created_at'], 'share_instance_id': snapshot_instance['share_instance_id'], 'progress': snapshot_instance['progress'], 'deleted_at': snapshot_instance['deleted_at'], 'provider_location': snapshot_instance['provider_location'], } if snapshot: expected_snapshot_instance_dict.update({ 'size': snapshot['size'], }) return expected_snapshot_instance_dict def test_create_snapshot_driver_exception(self): def _raise_not_found(self, *args, **kwargs): raise exception.NotFound() share_id = 'FAKE_SHARE_ID' share = fakes.fake_share(id=share_id, instance={'id': 'fake_id'}) snapshot_instance = fakes.fake_snapshot_instance( share_id=share_id, share=share, name='fake_snapshot') snapshot = fakes.fake_snapshot( share_id=share_id, share=share, instance=snapshot_instance, project_id=self.context.project_id) snapshot_id = snapshot['id'] self.mock_object(self.share_manager.driver, "create_snapshot", mock.Mock(side_effect=_raise_not_found)) self.mock_object(self.share_manager, '_get_share_server', mock.Mock(return_value=None)) self.mock_object(self.share_manager.db, 'share_snapshot_instance_get', mock.Mock(return_value=snapshot_instance)) self.mock_object(self.share_manager.db, 'share_snapshot_get', mock.Mock(return_value=snapshot)) db_update = self.mock_object( self.share_manager.db, 'share_snapshot_instance_update') expected_snapshot_instance_dict = self._get_snapshot_instance_dict( snapshot_instance, share) self.assertRaises(exception.NotFound, self.share_manager.create_snapshot, self.context, share_id, snapshot_id) db_update.assert_called_once_with(self.context, snapshot_instance['id'], {'status': constants.STATUS_ERROR}) self.share_manager.driver.create_snapshot.assert_called_once_with( self.context, expected_snapshot_instance_dict, share_server=None) self.share_manager.message_api.create.assert_called_once_with( utils.IsAMatcher(context.RequestContext), message_field.Action.CREATE, snapshot['project_id'], resource_type=message_field.Resource.SHARE_SNAPSHOT, resource_id=snapshot_instance['id'], exception=mock.ANY) @ddt.data({'model_update': {}, 'mount_snapshot_support': True}, {'model_update': {}, 'mount_snapshot_support': False}, {'model_update': {'export_locations': [ {'path': '/path1', 'is_admin_only': True}, {'path': '/path2', 'is_admin_only': False} ]}, 'mount_snapshot_support': True}, {'model_update': {'export_locations': [ {'path': '/path1', 'is_admin_only': True}, {'path': '/path2', 'is_admin_only': False} ]}, 'mount_snapshot_support': False}) @ddt.unpack def test_create_snapshot(self, model_update, mount_snapshot_support): export_locations = model_update.get('export_locations') share_id = 'FAKE_SHARE_ID' share = fakes.fake_share( id=share_id, instance={'id': 'fake_id'}, mount_snapshot_support=mount_snapshot_support) snapshot_instance = fakes.fake_snapshot_instance( share_id=share_id, share=share, name='fake_snapshot') snapshot = fakes.fake_snapshot( share_id=share_id, share=share, instance=snapshot_instance) snapshot_id = snapshot['id'] self.mock_object(self.share_manager.db, 'share_snapshot_get', mock.Mock(return_value=snapshot)) self.mock_object(self.share_manager.db, 'share_snapshot_instance_get', mock.Mock(return_value=snapshot_instance)) self.mock_object(self.share_manager, '_get_share_server', mock.Mock(return_value=None)) mock_export_update = self.mock_object( self.share_manager.db, 'share_snapshot_instance_export_location_create') expected_update_calls = [ mock.call(self.context, snapshot_instance['id'], {'status': constants.STATUS_AVAILABLE, 'progress': '100%'}) ] expected_snapshot_instance_dict = self._get_snapshot_instance_dict( snapshot_instance, share) self.mock_object( self.share_manager.driver, 'create_snapshot', mock.Mock(return_value=model_update)) db_update = self.mock_object( self.share_manager.db, 'share_snapshot_instance_update') return_value = self.share_manager.create_snapshot( self.context, share_id, snapshot_id) self.assertIsNone(return_value) self.share_manager.driver.create_snapshot.assert_called_once_with( self.context, expected_snapshot_instance_dict, share_server=None) db_update.assert_has_calls(expected_update_calls, any_order=True) if mount_snapshot_support and export_locations: snap_ins_id = snapshot.instance['id'] for i in range(0, 2): export_locations[i]['share_snapshot_instance_id'] = snap_ins_id mock_export_update.assert_has_calls([ mock.call(utils.IsAMatcher(context.RequestContext), export_locations[0]), mock.call(utils.IsAMatcher(context.RequestContext), export_locations[1]), ]) else: mock_export_update.assert_not_called() @ddt.data(exception.ShareSnapshotIsBusy(snapshot_name='fake_name'), exception.NotFound()) def test_delete_snapshot_driver_exception(self, exc): share_id = 'FAKE_SHARE_ID' share = fakes.fake_share(id=share_id, instance={'id': 'fake_id'}, mount_snapshot_support=True) snapshot_instance = fakes.fake_snapshot_instance( share_id=share_id, share=share, name='fake_snapshot') snapshot = fakes.fake_snapshot( share_id=share_id, share=share, instance=snapshot_instance, project_id=self.context.project_id) snapshot_id = snapshot['id'] update_access = self.mock_object( self.share_manager.snapshot_access_helper, 'update_access_rules') self.mock_object(self.share_manager.driver, "delete_snapshot", mock.Mock(side_effect=exc)) self.mock_object(self.share_manager, '_get_share_server', mock.Mock(return_value=None)) self.mock_object(self.share_manager.db, 'share_snapshot_instance_get', mock.Mock(return_value=snapshot_instance)) self.mock_object(self.share_manager.db, 'share_snapshot_get', mock.Mock(return_value=snapshot)) self.mock_object( self.share_manager.db, 'share_get', mock.Mock(return_value=share)) db_update = self.mock_object( self.share_manager.db, 'share_snapshot_instance_update') db_destroy_call = self.mock_object( self.share_manager.db, 'share_snapshot_instance_delete') expected_snapshot_instance_dict = self._get_snapshot_instance_dict( snapshot_instance, share) mock_exception_log = self.mock_object(manager.LOG, 'exception') self.assertRaises(type(exc), self.share_manager.delete_snapshot, self.context, snapshot_id) db_update.assert_called_once_with( mock.ANY, snapshot_instance['id'], {'status': constants.STATUS_ERROR_DELETING}) self.share_manager.driver.delete_snapshot.assert_called_once_with( mock.ANY, expected_snapshot_instance_dict, share_server=None) update_access.assert_called_once_with( utils.IsAMatcher(context.RequestContext), snapshot_instance['id'], delete_all_rules=True, share_server=None) self.assertFalse(db_destroy_call.called) self.assertFalse(mock_exception_log.called) self.share_manager.message_api.create.assert_called_once_with( utils.IsAMatcher(context.RequestContext), message_field.Action.DELETE, snapshot['project_id'], resource_type=message_field.Resource.SHARE_SNAPSHOT, resource_id=snapshot_instance['id'], exception=mock.ANY) def test_delete_snapshot_deferred_delete_quota_error(self): share_id = 'FAKE_SHARE_ID' share = fakes.fake_share(id=share_id) snapshot_instance = fakes.fake_snapshot_instance( share_id=share_id, share=share, name='fake_snapshot', status=constants.STATUS_DEFERRED_DELETING) snapshot = fakes.fake_snapshot( share_id=share_id, share=share, instance=snapshot_instance, project_id=self.context.project_id, size=1) snapshot_id = snapshot['id'] self.mock_object(self.share_manager.db, 'share_snapshot_get', mock.Mock(return_value=snapshot)) self.mock_object(self.share_manager.db, 'share_snapshot_instance_get', mock.Mock(return_value=snapshot_instance)) self.mock_object(self.share_manager.db, 'share_get', mock.Mock(return_value=share)) self.mock_object(self.share_manager, '_get_share_server', mock.Mock(return_value=None)) self.mock_object( self.share_manager.db, 'share_snapshot_instance_delete') self.mock_object(self.share_manager.driver, 'delete_snapshot') self.mock_object(quota.QUOTAS, 'reserve', mock.Mock(side_effect=exception.QuotaError(code=500))) quota_commit_call = self.mock_object(quota.QUOTAS, 'commit') self.share_manager.delete_snapshot(self.context, snapshot_id, deferred_delete=True) self.share_manager.db.share_snapshot_instance_delete.assert_called() self.share_manager.driver.delete_snapshot.assert_called() quota.QUOTAS.reserve.assert_called_once_with( mock.ANY, project_id=self.context.project_id, snapshots=-1, snapshot_gigabytes=-snapshot['size'], user_id=snapshot['user_id'], share_type_id=share['instance']['share_type_id']) self.assertEqual(False, quota_commit_call.called) self.assertTrue(self.share_manager.driver.delete_snapshot.called) @ddt.data(True, False) def test_delete_snapshot_with_quota_error(self, quota_error): share_id = 'FAKE_SHARE_ID' share = fakes.fake_share(id=share_id) snapshot_instance = fakes.fake_snapshot_instance( share_id=share_id, share=share, name='fake_snapshot') snapshot = fakes.fake_snapshot( share_id=share_id, share=share, instance=snapshot_instance, project_id=self.context.project_id, size=1) snapshot_id = snapshot['id'] self.mock_object(self.share_manager.db, 'share_snapshot_get', mock.Mock(return_value=snapshot)) self.mock_object(self.share_manager.db, 'share_snapshot_instance_get', mock.Mock(return_value=snapshot_instance)) self.mock_object(self.share_manager.db, 'share_get', mock.Mock(return_value=share)) self.mock_object(self.share_manager, '_get_share_server', mock.Mock(return_value=None)) mock_exception_log = self.mock_object(manager.LOG, 'exception') expected_exc_count = 1 if quota_error else 0 expected_snapshot_instance_dict = self._get_snapshot_instance_dict( snapshot_instance, share) self.mock_object(self.share_manager.driver, 'delete_snapshot') db_update_call = self.mock_object( self.share_manager.db, 'share_snapshot_instance_update') snapshot_destroy_call = self.mock_object( self.share_manager.db, 'share_snapshot_instance_delete') side_effect = exception.QuotaError(code=500) if quota_error else None self.mock_object(quota.QUOTAS, 'reserve', mock.Mock(side_effect=side_effect)) quota_commit_call = self.mock_object(quota.QUOTAS, 'commit') retval = self.share_manager.delete_snapshot( self.context, snapshot_id) self.assertIsNone(retval) self.share_manager.driver.delete_snapshot.assert_called_once_with( mock.ANY, expected_snapshot_instance_dict, share_server=None) self.assertFalse(db_update_call.called) self.assertTrue(snapshot_destroy_call.called) quota.QUOTAS.reserve.assert_called_once_with( mock.ANY, project_id=self.context.project_id, snapshots=-1, snapshot_gigabytes=-snapshot['size'], user_id=snapshot['user_id'], share_type_id=share['instance']['share_type_id']) self.assertEqual(not quota_error, quota_commit_call.called) self.assertEqual(quota_error, mock_exception_log.called) self.assertEqual(expected_exc_count, mock_exception_log.call_count) @ddt.data(exception.ShareSnapshotIsBusy(snapshot_name='fake_snapshot'), exception.ManilaException) def test_delete_snapshot_ignore_exceptions_with_the_force(self, exc): def _raise_quota_error(): raise exception.QuotaError(code='500') share_id = 'FAKE_SHARE_ID' share = fakes.fake_share(id=share_id) snapshot_instance = fakes.fake_snapshot_instance( share_id=share_id, share=share, name='fake_snapshot') snapshot = fakes.fake_snapshot( share_id=share_id, share=share, instance=snapshot_instance, project_id=self.context.project_id, size=1) snapshot_id = snapshot['id'] self.mock_object(self.share_manager.db, 'share_snapshot_get', mock.Mock(return_value=snapshot)) self.mock_object(self.share_manager.db, 'share_snapshot_instance_get', mock.Mock(return_value=snapshot_instance)) self.mock_object(self.share_manager.db, 'share_get', mock.Mock(return_value=share)) self.mock_object(self.share_manager, '_get_share_server', mock.Mock(return_value=None)) mock_exception_log = self.mock_object(manager.LOG, 'exception') self.mock_object(self.share_manager.driver, 'delete_snapshot', mock.Mock(side_effect=exc)) db_update_call = self.mock_object( self.share_manager.db, 'share_snapshot_instance_update') snapshot_destroy_call = self.mock_object( self.share_manager.db, 'share_snapshot_instance_delete') self.mock_object(quota.QUOTAS, 'reserve', mock.Mock(side_effect=_raise_quota_error)) quota_commit_call = self.mock_object(quota.QUOTAS, 'commit') retval = self.share_manager.delete_snapshot( self.context, snapshot_id, force=True) self.assertIsNone(retval) self.assertEqual(2, mock_exception_log.call_count) snapshot_destroy_call.assert_called_once_with( mock.ANY, snapshot_instance['id']) self.assertFalse(quota_commit_call.called) self.assertFalse(db_update_call.called) self.share_manager.message_api.create.assert_called_once_with( utils.IsAMatcher(context.RequestContext), message_field.Action.DELETE, snapshot['project_id'], resource_type=message_field.Resource.SHARE_SNAPSHOT, resource_id=snapshot_instance['id'], exception=mock.ANY) def test_do_deferred_snapshot_deletion(self): instance_1 = db_utils.create_share_instance( share_id='fake_id', share_type_id='fake_type_id') instance_2 = db_utils.create_share_instance( share_id='fake_id', share_type_id='fake_type_id') share = db_utils.create_share( id='fake_id', instances=[instance_1, instance_2]) snapshot = db_utils.create_snapshot(share_id=share['id']) db_utils.create_snapshot_instance( snapshot_id=snapshot['id'], share_instance_id=instance_1['id'], status='error_deferred_deleting') db_utils.create_snapshot_instance( snapshot_id=snapshot['id'], share_instance_id=instance_2['id'], status='error_deferred_deleting') self.mock_object(self.share_manager, '_get_share_server', mock.Mock(return_value=None)) mock_delete_snapshot = self.mock_object( self.share_manager.driver, 'delete_snapshot') self.mock_object(self.share_manager.db, 'share_snapshot_instance_update') self.share_manager.do_deferred_snapshot_deletion(self.context) self.assertEqual(2, mock_delete_snapshot.call_count) def test_do_deferred_snapshot_deletion_exception(self): instance_1 = db_utils.create_share_instance( share_id='fake_id', share_type_id='fake_type_id') share = db_utils.create_share( id='fake_id', instances=[instance_1]) snapshot = db_utils.create_snapshot(share_id=share['id']) db_utils.create_snapshot_instance( snapshot_id=snapshot['id'], share_instance_id=instance_1['id'], status='error_deferred_deleting') self.mock_object(self.share_manager, '_get_share_server', mock.Mock(return_value=None)) self.mock_object( self.share_manager.driver, 'delete_snapshot', mock.Mock(side_effect=exception.ManilaException)) self.mock_object(self.share_manager.db, 'share_snapshot_instance_update') mock_delete_snapshot_db = self.mock_object( self.share_manager.db, 'share_snapshot_instance_delete') self.share_manager.do_deferred_snapshot_deletion(self.context) mock_delete_snapshot_db.assert_not_called() def test_create_share_instance_with_share_network_dhss_false(self): manager.CONF.set_default('driver_handles_share_servers', False) self.mock_object( self.share_manager.driver.configuration, 'safe_get', mock.Mock(return_value=False)) share_network_id = 'fake_sn' share_type = db_utils.create_share_type() share = db_utils.create_share( share_network_id=share_network_id, share_type_id=share_type['id']) share_instance = share.instance self.mock_object( self.share_manager.db, 'share_instance_get', mock.Mock(return_value=share_instance)) self.mock_object(self.share_manager.db, 'share_instance_update') self.assertRaisesRegex( exception.ManilaException, '.*%s.*' % share_instance['id'], self.share_manager.create_share_instance, self.context, share_instance['id']) self.share_manager.db.share_instance_get.assert_called_once_with( utils.IsAMatcher(context.RequestContext), share_instance['id'], with_share_data=True ) self.share_manager.db.share_instance_update.assert_called_once_with( utils.IsAMatcher(context.RequestContext), share_instance['id'], {'status': constants.STATUS_ERROR}) self.share_manager.message_api.create.assert_called_once_with( utils.IsAMatcher(context.RequestContext), message_field.Action.CREATE, str(share.project_id), resource_type=message_field.Resource.SHARE, resource_id=share['id'], detail=mock.ANY) def test_create_share_instance_with_share_network_server_not_exists(self): """Test share can be created without share server.""" share_net = db_utils.create_share_network() share_net_subnet = db_utils.create_share_network_subnet( share_network_id=share_net['id'], availability_zone_id=None, ) share_type = db_utils.create_share_type() share = db_utils.create_share( share_network_id=share_net['id'], share_type_id=share_type['id']) share_id = share['id'] def fake_setup_server(context, share_network, *args, **kwargs): return db_utils.create_share_server( share_network_subnet_id=share_net_subnet['id'], host='fake_host') self.mock_object(manager.LOG, 'info') self.share_manager.driver.create_share = mock.Mock( return_value='fake_location') self.share_manager._setup_server = fake_setup_server self.share_manager.create_share_instance(self.context, share.instance['id']) self.assertEqual(share_id, db.share_get(context.get_admin_context(), share_id).id) manager.LOG.info.assert_called_with(mock.ANY, share.instance['id']) def test_create_share_instance_with_network_port_limit_exceeded(self): share_network = db_utils.create_share_network(id='fake_sn_id') share_net_subnet = db_utils.create_share_network_subnet( id='fake_sns_id', share_network_id=share_network['id'] ) share_type = db_utils.create_share_type() fake_share = db_utils.create_share( share_network_id=share_network['id'], size=1, share_type_id=share_type['id']) fake_metadata = { 'request_host': 'fake_host', 'share_type_id': 'fake_share_type_id', } fake_server = db_utils.create_share_server( id='fake_srv_id', status=constants.STATUS_CREATING, share_network_subnets=[share_net_subnet]) self.mock_object(self.share_manager, '_build_server_metadata', mock.Mock(return_value=fake_metadata)) self.mock_object(db, 'share_server_create', mock.Mock(return_value=fake_server)) self.mock_object(db, 'share_instance_update', mock.Mock(return_value=fake_share.instance)) self.mock_object(db, 'share_instance_get', mock.Mock(return_value=fake_share.instance)) self.mock_object( db, 'share_network_subnets_get_all_by_availability_zone_id', mock.Mock(return_value=[share_net_subnet])) self.mock_object(manager.LOG, 'error') def raise_manila_exception(*args, **kwargs): raise exception.PortLimitExceeded() self.mock_object(self.share_manager, '_setup_server', mock.Mock(side_effect=raise_manila_exception)) self.assertRaises( exception.PortLimitExceeded, self.share_manager.create_share_instance, self.context, fake_share.instance['id'], ) db.share_server_create.assert_called_once_with( utils.IsAMatcher(context.RequestContext), mock.ANY) db.share_instance_update.assert_has_calls([ mock.call( utils.IsAMatcher(context.RequestContext), fake_share.instance['id'], {'status': constants.STATUS_ERROR}, ) ]) self.share_manager._setup_server.assert_called_once_with( utils.IsAMatcher(context.RequestContext), fake_server, fake_metadata) manager.LOG.error.assert_called_with(mock.ANY, fake_share.instance['id']) self.share_manager.message_api.create.assert_called_once_with( utils.IsAMatcher(context.RequestContext), message_field.Action.CREATE, str(fake_share.project_id), resource_type=message_field.Resource.SHARE, resource_id=fake_share['id'], detail=( message_field.Detail.SHARE_NETWORK_PORT_QUOTA_LIMIT_EXCEEDED)) def test_create_share_instance_with_ip_address_generation_failure(self): share_network = db_utils.create_share_network(id='fake_sn_id') share_net_subnet = db_utils.create_share_network_subnet( id='fake_sns_id', share_network_id=share_network['id'] ) share_type = db_utils.create_share_type() fake_share = db_utils.create_share( share_network_id=share_network['id'], size=1, share_type_id=share_type['id']) fake_metadata = { 'request_host': 'fake_host', 'share_type_id': 'fake_share_type_id', } fake_server = db_utils.create_share_server( id='fake_srv_id', status=constants.STATUS_CREATING, share_network_subnets=[share_net_subnet]) self.mock_object(self.share_manager, '_build_server_metadata', mock.Mock(return_value=fake_metadata)) self.mock_object(db, 'share_server_create', mock.Mock(return_value=fake_server)) self.mock_object(db, 'share_instance_update', mock.Mock(return_value=fake_share.instance)) self.mock_object(db, 'share_instance_get', mock.Mock(return_value=fake_share.instance)) self.mock_object( db, 'share_network_subnets_get_all_by_availability_zone_id', mock.Mock(return_value=[share_net_subnet])) self.mock_object(manager.LOG, 'error') def raise_manila_exception(*args, **kwargs): raise exception.IpAddressGenerationFailureClient() self.mock_object(self.share_manager, '_setup_server', mock.Mock(side_effect=raise_manila_exception)) self.assertRaises( exception.IpAddressGenerationFailureClient, self.share_manager.create_share_instance, self.context, fake_share.instance['id'], ) db.share_server_create.assert_called_once_with( utils.IsAMatcher(context.RequestContext), mock.ANY) db.share_instance_update.assert_has_calls([ mock.call( utils.IsAMatcher(context.RequestContext), fake_share.instance['id'], {'status': constants.STATUS_ERROR}, ) ]) self.share_manager._setup_server.assert_called_once_with( utils.IsAMatcher(context.RequestContext), fake_server, fake_metadata) manager.LOG.error.assert_called_with(mock.ANY, fake_share.instance['id']) self.share_manager.message_api.create.assert_called_once_with( utils.IsAMatcher(context.RequestContext), message_field.Action.CREATE, str(fake_share.project_id), resource_type=message_field.Resource.SHARE, resource_id=fake_share['id'], detail=( message_field.Detail.NEUTRON_SUBNET_FULL)) def test_create_share_instance_with_share_network_server_fail(self): share_network = db_utils.create_share_network(id='fake_sn_id') share_net_subnet = db_utils.create_share_network_subnet( id='fake_sns_id', share_network_id=share_network['id'] ) share_type = db_utils.create_share_type() fake_share = db_utils.create_share( share_network_id=share_network['id'], share_type_id=share_type['id'], size=1 ) fake_metadata = { 'request_host': 'fake_host', 'share_type_id': 'fake_share_type_id', } fake_server = db_utils.create_share_server( id='fake_srv_id', status=constants.STATUS_CREATING, share_network_subnets=[share_net_subnet]) self.mock_object(self.share_manager, '_build_server_metadata', mock.Mock(return_value=fake_metadata)) self.mock_object(db, 'share_server_create', mock.Mock(return_value=fake_server)) self.mock_object(db, 'share_instance_update', mock.Mock(return_value=fake_share.instance)) self.mock_object(db, 'share_instance_get', mock.Mock(return_value=fake_share.instance)) self.mock_object( db, 'share_network_subnets_get_all_by_availability_zone_id', mock.Mock(return_value=[share_net_subnet])) self.mock_object(manager.LOG, 'error') def raise_share_server_not_found(*args, **kwargs): raise exception.ShareServerNotFound( share_server_id=fake_server['id']) def raise_manila_exception(*args, **kwargs): raise exception.ManilaException() self.mock_object(db, 'share_server_get_all_by_host_and_share_subnet_valid', mock.Mock(side_effect=raise_share_server_not_found)) self.mock_object(self.share_manager, '_setup_server', mock.Mock(side_effect=raise_manila_exception)) self.assertRaises( exception.ManilaException, self.share_manager.create_share_instance, self.context, fake_share.instance['id'], ) (db.share_server_get_all_by_host_and_share_subnet_valid. assert_called_once_with( utils.IsAMatcher(context.RequestContext), self.share_manager.host, share_net_subnet['id'], )) db.share_server_create.assert_called_once_with( utils.IsAMatcher(context.RequestContext), mock.ANY) db.share_instance_update.assert_has_calls([ mock.call( utils.IsAMatcher(context.RequestContext), fake_share.instance['id'], {'status': constants.STATUS_ERROR}, ) ]) self.share_manager._setup_server.assert_called_once_with( utils.IsAMatcher(context.RequestContext), fake_server, fake_metadata) manager.LOG.error.assert_called_with(mock.ANY, fake_share.instance['id']) self.share_manager.message_api.create.assert_called_once_with( utils.IsAMatcher(context.RequestContext), message_field.Action.CREATE, str(fake_share.project_id), resource_type=message_field.Resource.SHARE, resource_id=fake_share['id'], detail=message_field.Detail.NO_SHARE_SERVER) def test_create_share_instance_with_share_network_subnet_not_found(self): """Test creation fails if share network not found.""" self.mock_object(manager.LOG, 'error') share_type = db_utils.create_share_type() share = db_utils.create_share(share_network_id='fake-net-id', share_type_id=share_type['id']) share_id = share['id'] self.assertRaises( exception.ShareNetworkSubnetNotFound, self.share_manager.create_share_instance, self.context, share.instance['id'] ) manager.LOG.error.assert_called_with(mock.ANY, share.instance['id']) shr = db.share_get(self.context, share_id) self.assertEqual(constants.STATUS_ERROR, shr['status']) self.share_manager.message_api.create.assert_called_once_with( utils.IsAMatcher(context.RequestContext), message_field.Action.CREATE, str(shr.project_id), resource_type=message_field.Resource.SHARE, resource_id=shr['id'], detail=message_field.Detail.NO_SHARE_SERVER) def test_create_share_instance_with_security_service_missing(self): """Test creation fails if security service association is missing.""" self.mock_object(self.share_manager, 'driver') self.share_manager.driver.driver_handles_share_servers = True self.share_manager.driver.\ dhss_mandatory_security_service_association = { 'fake_proto': ['fake_ss', 'fake_ss2', ] } ss_data = { 'name': 'fake_name', 'ou': 'fake_ou', 'domain': 'fake_domain', 'server': 'fake_server', 'dns_ip': 'fake_dns_ip', 'user': 'fake_user', 'type': 'fake_ss', 'password': 'fake_pass', } security_service = db_utils.create_security_service(**ss_data) share_net = db_utils.create_share_network() share_net_subnet = db_utils.create_share_network_subnet( share_network_id=share_net['id'], availability_zone_id=None, ) db.share_network_add_security_service(context.get_admin_context(), share_net['id'], security_service['id']) share_type = db_utils.create_share_type() share = db_utils.create_share( share_network_id=share_net['id'], share_proto='fake_proto', share_type_id=share_type['id'], ) db_utils.create_share_server( share_network_subnet_id=share_net_subnet['id'], host=self.share_manager.host, status=constants.STATUS_ERROR) fake_server = { 'id': 'fake_srv_id', 'status': constants.STATUS_CREATING, } fake_metadata = { 'request_host': 'fake_host', 'share_type_id': 'fake_share_type_id', } self.mock_object(self.share_manager, '_build_server_metadata', mock.Mock(return_value=fake_metadata)) self.mock_object(db, 'share_server_create', mock.Mock(return_value=fake_server)) self.mock_object(self.share_manager, '_setup_server', mock.Mock(return_value=fake_server)) self.assertRaises( exception.InvalidRequest, self.share_manager.create_share_instance, self.context, share.instance['id'] ) share = db.share_get(self.context, share['id']) self.assertEqual(constants.STATUS_ERROR, share['status']) self.share_manager.message_api.create.assert_called_once_with( utils.IsAMatcher(context.RequestContext), message_field.Action.CREATE, str(share.project_id), resource_type=message_field.Resource.SHARE, resource_id=share['id'], detail=message_field.Detail.MISSING_SECURITY_SERVICE) @ddt.data( (True, 1, 3, 10, 0), (False, 1, 100, 5, 0), (True, 1, 10, 3, 0), (False, 1, 10, 10, 3), (False, 1, -1, 100, 3), (False, 1, 10, -1, 3), ) @ddt.unpack def test__check_share_server_backend_limits( self, with_share_instance, resource_size, max_shares, max_gigabytes, expected_share_servers_len): """Tests if servers aren't being reused when its limits are reached.""" # Creates three share servers to have a list of available share servers share_servers = [db_utils.create_share_server() for i in range(3)] share = db_utils.create_share() # Creates some share instances using the resource size share_instances = [ db_utils.create_share_instance( size=resource_size, share_id=share['id']) for i in range(3)] # Creates some snapshot instances to make sure they are being # accounted snapshot_instances = [ db_utils.create_snapshot( size=resource_size, share_id=share['id'])['instance'] for i in range(3)] kwargs = {} driver_mock = mock.Mock() # Sets the driver max shares per share server and max server size # configured value to be the one received in the test parameters driver_mock.max_shares_per_share_server = max_shares driver_mock.max_share_server_size = max_gigabytes self.share_manager.driver = driver_mock self.mock_object( db, 'share_instance_get_all_by_share_server', mock.Mock(return_value=share_instances)) self.mock_object( db, 'share_snapshot_instance_get_all_with_filters', mock.Mock(return_value=snapshot_instances)) # NOTE(carloss): If with_share_instance, simulates the behavior where # the provide_share_server method call was not related to a request to # create a share group, where a share instance is not provided, neither # accounted, since it's a brand new group. When a share instance is # specified, it must be accounted to check if the creation of that # share instance in the given share server is going to exceed the # configured limit. if with_share_instance: share_instance = db_utils.create_share_instance( size=resource_size, share_id=share['id']) kwargs['share_instance'] = share_instance available_share_servers = ( self.share_manager._check_share_server_backend_limits( self.context, share_servers, **kwargs)) self.assertEqual( expected_share_servers_len, len(available_share_servers)) def test__check_share_server_backend_limits_migrating_share(self): """Tests if servers aren't being reused when its limits are reached.""" share_servers = [db_utils.create_share_server()] share = db_utils.create_share(status=constants.STATUS_MIGRATING_TO) resource_size = 1 driver_mock = mock.Mock() driver_mock.max_shares_per_share_server = 2 driver_mock.max_share_server_size = 2 share_instances = [ db_utils.create_share_instance( size=resource_size, share_id=share['id'], status=status, share_server_id=share_servers[0]['id']) for status in [ constants.STATUS_MIGRATING, constants.STATUS_MIGRATING_TO]] share_instance_ids = [ share_instances[0]['id'], share_instances[1]['id']] kwargs = {} self.share_manager.driver = driver_mock self.mock_object( db, 'share_instance_get_all_by_share_server', mock.Mock(return_value=share_instances)) self.mock_object( db, 'share_snapshot_instance_get_all_with_filters', mock.Mock(return_value=[])) self.mock_object(db, 'share_get', mock.Mock(return_value=share)) self.mock_object(api.API, 'get_migrating_instances', mock.Mock(return_value=share_instance_ids)) self.mock_object(db, 'share_instance_get', mock.Mock(return_value=share_instances[0])) # NOTE(carloss): If with_share_instance, simulates the behavior where # the provide_share_server method call was not related to a request to # create a share group, where a share instance is not provided, neither # accounted, since it's a brand new group. When a share instance is # specified, it must be accounted to check if the creation of that # share instance in the given share server is going to exceed the # configured limit. kwargs['share_instance'] = share_instances[1] available_share_servers = ( self.share_manager._check_share_server_backend_limits( self.context, share_servers, **kwargs)) self.assertEqual( 1, len(available_share_servers)) db.share_instance_get_all_by_share_server.assert_called_once_with( self.context, share_servers[0]['id'], with_share_data=True) (db.share_snapshot_instance_get_all_with_filters. assert_called_once_with( self.context, {"share_instance_ids": share_instance_ids}, with_share_data=True)) db.share_get.assert_called_once_with(self.context, share['id']) api.API.get_migrating_instances.assert_called_once_with(share) db.share_instance_get.assert_called_once_with( self.context, share_instances[0]['id']) def test__check_share_server_backend_limits_unlimited(self): driver_mock = mock.Mock() driver_mock.max_shares_per_share_server = -1 driver_mock.max_share_server_size = -1 self.share_manager.driver = driver_mock share_servers = [db_utils.create_share_server() for i in range(3)] available_share_servers = ( self.share_manager._check_share_server_backend_limits( self.context, share_servers)) self.assertEqual(share_servers, available_share_servers) def test_create_share_instance_with_share_network_server_exists(self): """Test share can be created with existing share server.""" share_net = db_utils.create_share_network() share_net_subnet = db_utils.create_share_network_subnet( share_network_id=share_net['id'], availability_zone_id=None, ) share_type = db_utils.create_share_type() share = db_utils.create_share(share_network_id=share_net['id'], share_type_id=share_type['id']) share_srv = db_utils.create_share_server( share_network_subnets=[share_net_subnet], host=self.share_manager.host) share_id = share['id'] self.mock_object(manager.LOG, 'info') driver_mock = mock.Mock() driver_mock.max_shares_per_share_server = -1 driver_mock.max_share_server_size = -1 driver_mock.create_share.return_value = "fake_location" driver_mock.choose_share_server_compatible_with_share.return_value = ( share_srv ) driver_mock.get_optional_share_creation_data.return_value = {} self.share_manager.driver = driver_mock self.share_manager.driver.\ dhss_mandatory_security_service_association = {} self.share_manager.create_share_instance(self.context, share.instance['id']) self.assertFalse(self.share_manager.driver.setup_network.called) self.assertEqual(share_id, db.share_get(context.get_admin_context(), share_id).id) shr = db.share_get(self.context, share_id) self.assertEqual(shr['status'], constants.STATUS_AVAILABLE) self.assertEqual(shr['share_server_id'], share_srv['id']) self.assertGreater(len(shr['export_location']), 0) self.assertEqual(1, len(shr['export_locations'])) manager.LOG.info.assert_called_with(mock.ANY, share.instance['id']) @ddt.data('export_location', 'export_locations') def test_create_share_instance_with_error_in_driver(self, details_key): """Test db updates if share creation fails in driver.""" share_type = db_utils.create_share_type() share = db_utils.create_share(share_type_id=share_type['id']) share_id = share['id'] some_data = 'fake_location' self.share_manager.driver = mock.Mock() e = exception.ManilaException(detail_data={details_key: some_data}) self.share_manager.driver.create_share.side_effect = e self.assertRaises( exception.ManilaException, self.share_manager.create_share_instance, self.context, share.instance['id'] ) self.assertTrue(self.share_manager.driver.create_share.called) shr = db.share_get(self.context, share_id) self.assertEqual(some_data, shr['export_location']) self.share_manager.message_api.create.assert_called_once_with( utils.IsAMatcher(context.RequestContext), message_field.Action.CREATE, str(share.project_id), resource_type=message_field.Resource.SHARE, resource_id=share['id'], exception=mock.ANY) def test_create_share_instance_with_server_created(self): """Test share can be created and share server is created.""" share_net = db_utils.create_share_network() share_net_subnet = db_utils.create_share_network_subnet( share_network_id=share_net['id'], availability_zone_id=None) share_type = db_utils.create_share_type() share = db_utils.create_share(share_network_id=share_net['id'], share_type_id=share_type['id'], availability_zone=None) db_utils.create_share_server( share_network_subnet_id=share_net_subnet['id'], host=self.share_manager.host, status=constants.STATUS_ERROR) share_id = share['id'] fake_server = { 'id': 'fake_srv_id', 'status': constants.STATUS_CREATING, } fake_metadata = { 'request_host': 'fake_host', 'share_type_id': 'fake_share_type_id', } self.mock_object(self.share_manager, '_build_server_metadata', mock.Mock(return_value=fake_metadata)) self.mock_object(db, 'share_server_create', mock.Mock(return_value=fake_server)) self.mock_object(self.share_manager, '_setup_server', mock.Mock(return_value=fake_server)) self.share_manager.create_share_instance(self.context, share.instance['id']) self.assertEqual(share_id, db.share_get(context.get_admin_context(), share_id).id) shr = db.share_get(self.context, share_id) self.assertEqual(constants.STATUS_AVAILABLE, shr['status']) self.assertEqual('fake_srv_id', shr['share_server_id']) db.share_server_create.assert_called_once_with( utils.IsAMatcher(context.RequestContext), mock.ANY) self.share_manager._setup_server.assert_called_once_with( utils.IsAMatcher(context.RequestContext), fake_server, fake_metadata) def test_create_share_instance_with_backend_provided_metadata(self): """Test share can be created and share server is created.""" share_type = db_utils.create_share_type() share = db_utils.create_share(share_type_id=share_type['id'], availability_zone=None) share_id = share['id'] share_backend_info_return = { 'metadata': {'meta_key': 'meta_val'} } share_metadata = share_backend_info_return['metadata'] self.mock_object( self.share_manager.driver, 'get_optional_share_creation_data', mock.Mock(return_value=share_backend_info_return) ) self.mock_object(db, 'share_metadata_update') self.share_manager.create_share_instance(self.context, share.instance['id']) self.assertEqual(share_id, db.share_get(context.get_admin_context(), share_id).id) shr = db.share_get(self.context, share_id) self.assertEqual(constants.STATUS_AVAILABLE, shr['status']) (self.share_manager.driver.get_optional_share_creation_data .assert_called_once()) db.share_metadata_update.assert_called_once_with( mock.ANY, share_id, share_metadata, False) def test_create_share_instance_update_replica_state(self): share_net = db_utils.create_share_network() share_net_subnet = db_utils.create_share_network_subnet( share_network_id=share_net['id'], availability_zone_id=None ) share_type = db_utils.create_share_type() share = db_utils.create_share(share_network_id=share_net['id'], share_type_id=share_type['id'], replication_type='dr', availability_zone=None) db_utils.create_share_server( share_network_subnet_id=share_net_subnet['id'], host=self.share_manager.host, status=constants.STATUS_ERROR) share_id = share['id'] fake_server = { 'id': 'fake_srv_id', 'status': constants.STATUS_CREATING, } fake_metadata = { 'request_host': 'fake_host', 'share_type_id': 'fake_share_type_id', } self.mock_object(self.share_manager, '_build_server_metadata', mock.Mock(return_value=fake_metadata)) self.mock_object(db, 'share_server_create', mock.Mock(return_value=fake_server)) self.mock_object(self.share_manager, '_setup_server', mock.Mock(return_value=fake_server)) self.share_manager.create_share_instance(self.context, share.instance['id']) self.assertEqual(share_id, db.share_get(context.get_admin_context(), share_id).id) shr = db.share_get(self.context, share_id) shr_instances = db.share_instance_get_all_by_share( self.context, shr['id']) self.assertEqual(1, len(shr_instances)) self.assertEqual(constants.STATUS_AVAILABLE, shr['status']) self.assertEqual( constants.REPLICA_STATE_ACTIVE, shr_instances[0]['replica_state']) self.assertEqual('fake_srv_id', shr['share_server_id']) db.share_server_create.assert_called_once_with( utils.IsAMatcher(context.RequestContext), mock.ANY) self.share_manager._setup_server.assert_called_once_with( utils.IsAMatcher(context.RequestContext), fake_server, fake_metadata) @mock.patch('manila.tests.fake_notifier.FakeNotifier._notify') def test_create_delete_share_instance(self, mock_notify): """Test share can be created and deleted.""" share_type = db_utils.create_share_type() share = db_utils.create_share(share_type_id=share_type['id']) mock_notify.assert_not_called() self.share_manager.create_share_instance( self.context, share.instance['id']) self.assert_notify_called(mock_notify, (['INFO', 'share.create.start'], ['INFO', 'share.create.end'])) self.share_manager.delete_share_instance( self.context, share.instance['id']) self.assert_notify_called(mock_notify, (['INFO', 'share.create.start'], ['INFO', 'share.create.end'], ['INFO', 'share.delete.start'], ['INFO', 'share.delete.end'])) @ddt.data(True, False) def test_create_delete_share_instance_error(self, exception_update_access): """Test share can be created and deleted with error.""" def _raise_exception(self, *args, **kwargs): raise exception.ManilaException('fake') self.mock_object(self.share_manager.driver, "create_share", mock.Mock(side_effect=_raise_exception)) self.mock_object(self.share_manager.driver, "delete_share", mock.Mock(side_effect=_raise_exception)) if exception_update_access: self.mock_object( self.share_manager.access_helper, "update_access_rules", mock.Mock(side_effect=_raise_exception)) share_type = db_utils.create_share_type() share = db_utils.create_share(share_type_id=share_type['id']) share_id = share['id'] self.assertRaises(exception.ManilaException, self.share_manager.create_share_instance, self.context, share.instance['id']) shr = db.share_get(self.context, share_id) self.assertEqual(constants.STATUS_ERROR, shr['status']) self.assertRaises(exception.ManilaException, self.share_manager.delete_share_instance, self.context, share.instance['id']) shr = db.share_get(self.context, share_id) self.assertEqual(constants.STATUS_ERROR_DELETING, shr['status']) self.share_manager.driver.create_share.assert_called_once_with( utils.IsAMatcher(context.RequestContext), utils.IsAMatcher(models.ShareInstance), share_server=None) if not exception_update_access: self.share_manager.driver.delete_share.assert_called_once_with( utils.IsAMatcher(context.RequestContext), utils.IsAMatcher(models.ShareInstance), share_server=None) def test_create_share_instance_update_availability_zone(self): share_type = db_utils.create_share_type() share = db_utils.create_share(availability_zone=None, share_type_id=share_type['id']) share_id = share['id'] self.share_manager.create_share_instance( self.context, share.instance['id']) actual_share = db.share_get(context.get_admin_context(), share_id) self.assertIsNotNone(actual_share.availability_zone) self.assertEqual(manager.CONF.storage_availability_zone, actual_share.availability_zone) def test_provide_share_server_for_share_incompatible_servers(self): fake_exception = exception.ManilaException("fake") fake_share_network = db_utils.create_share_network(id='fake_sn_id') fake_share_net_subnets = [db_utils.create_share_network_subnet( id='fake_sns_id', share_network_id=fake_share_network['id'] )] fake_share_server = db_utils.create_share_server(id='fake') share = db_utils.create_share() db_method_mock = self.mock_object( db, 'share_network_subnets_get_all_by_availability_zone_id', mock.Mock(return_value=fake_share_net_subnets)) self.mock_object(db, 'share_server_get_all_by_host_and_share_subnet_valid', mock.Mock(return_value=[fake_share_server])) self.mock_object( self.share_manager, '_check_share_server_backend_limits', mock.Mock(return_value=[fake_share_server])) self.mock_object( self.share_manager.driver, "choose_share_server_compatible_with_share", mock.Mock(side_effect=fake_exception) ) self.assertRaises(exception.ManilaException, self.share_manager._provide_share_server_for_share, self.context, fake_share_network['id'], share.instance) db_method_mock.assert_called_once_with( self.context, fake_share_network['id'], availability_zone_id=share.instance.get('availability_zone_id') ) driver_mock = self.share_manager.driver driver_method_mock = ( driver_mock.choose_share_server_compatible_with_share ) driver_method_mock.assert_called_once_with( self.context, [fake_share_server], share.instance, snapshot=None, share_group=None, encryption_key_ref=None) @ddt.data(True, False) def test_provide_share_server_encryption_key_ref(self, acl_error): fake_share_network = db_utils.create_share_network(id='fake_sn_id') fake_share_net_subnets = [db_utils.create_share_network_subnet( id='fake_sns_id', share_network_id=fake_share_network['id'] )] fake_share_server = db_utils.create_share_server( id='fake', encryption_key_ref='fake_ref' ) share = db_utils.create_share(encryption_key_ref='fake_ref') self.share_manager.driver.encryption_support = 'share_server' db_method_mock = self.mock_object( db, 'share_network_subnets_get_all_by_availability_zone_id', mock.Mock(return_value=fake_share_net_subnets)) self.mock_object( db, 'share_server_get_all_by_host_and_share_subnet_valid', mock.Mock(return_value=[fake_share_server])) self.mock_object( self.share_manager, '_check_share_server_backend_limits', mock.Mock(return_value=[fake_share_server])) self.mock_object( self.share_manager.driver, "choose_share_server_compatible_with_share", mock.Mock(return_value=None) ) mock_server_create = self.mock_object( db, 'share_server_create', mock.Mock(return_value=fake_share_server)) fake_exception = exception.ManilaBarbicanACLError() self.mock_object( barbican_api, 'create_secret_access', mock.Mock(side_effect=fake_exception) if acl_error else mock.Mock()) mock_obj = mock.Mock() mock_obj.to_dict.return_value = { 'id': 'fake_application_credential_id' } mock_create_app_cred = self.mock_object( barbican_api, 'create_application_credentials', mock.Mock(return_value=mock_obj)) mock_get_secret_ref = self.mock_object( barbican_api, 'get_secret_href', mock.Mock(return_value='fake_secret_href')) if acl_error: self.assertRaises( exception.ManilaBarbicanACLError, self.share_manager._provide_share_server_for_share, self.context, fake_share_network['id'], share.instance) else: self.share_manager._provide_share_server_for_share( self.context, fake_share_network['id'], share.instance) mock_server_create.assert_called() mock_create_app_cred.assert_called() mock_get_secret_ref.assert_called() db_method_mock.assert_called_once_with( self.context, fake_share_network['id'], availability_zone_id=share.instance.get('availability_zone_id') ) driver_mock = self.share_manager.driver driver_method_mock = ( driver_mock.choose_share_server_compatible_with_share ) driver_method_mock.assert_called_once_with( self.context, [fake_share_server], share.instance, snapshot=None, share_group=None, encryption_key_ref='fake_ref') def test_provide_share_server_for_share_invalid_arguments(self): self.assertRaises(ValueError, self.share_manager._provide_share_server_for_share, self.context, None, None) def test_provide_share_server_for_share_parent_ss_not_found(self): fake_parent_id = "fake_server_id" fake_share_network = db_utils.create_share_network(id='fake_sn_id') fake_exception = exception.ShareServerNotFound("fake") share = db_utils.create_share() fake_snapshot = { 'share': { 'instance': { 'share_server_id': fake_parent_id } } } self.mock_object(db, 'share_server_get', mock.Mock(side_effect=fake_exception)) self.assertRaises(exception.ShareServerNotFound, self.share_manager._provide_share_server_for_share, self.context, fake_share_network['id'], share.instance, snapshot=fake_snapshot) db.share_server_get.assert_called_once_with( self.context, fake_parent_id) def test_provide_share_server_for_share_parent_ss_invalid(self): fake_parent_id = "fake_server_id" fake_share_network = db_utils.create_share_network(id='fake_sn_id') share = db_utils.create_share() fake_snapshot = { 'share': { 'instance': { 'share_server_id': fake_parent_id } } } fake_parent_share_server = {'status': 'fake'} self.mock_object(db, 'share_server_get', mock.Mock(return_value=fake_parent_share_server)) self.assertRaises(exception.InvalidShareServer, self.share_manager._provide_share_server_for_share, self.context, fake_share_network['id'], share.instance, snapshot=fake_snapshot) db.share_server_get.assert_called_once_with( self.context, fake_parent_id) def test_provide_share_server_for_share_group_incompatible_servers(self): fake_exception = exception.ManilaException("fake") sg = db_utils.create_share_group() share_network = {'id': 'fake_sn_id'} share_net_subnets = [{'id': 'fake_sns_id', 'share_network_id': share_network['id']}] fake_share_server = { 'id': 'fake_id', 'share_network_subnets': share_net_subnets, } self.mock_object(db, 'share_server_get_all_by_host_and_share_subnet_valid', mock.Mock(return_value=[fake_share_server])) self.mock_object( self.share_manager.driver, "choose_share_server_compatible_with_share_group", mock.Mock(side_effect=fake_exception) ) self.assertRaises( exception.ManilaException, self.share_manager._provide_share_server_for_share_group, self.context, "fake_sn_id", share_net_subnets, sg) driver_mock = self.share_manager.driver driver_method_mock = ( driver_mock.choose_share_server_compatible_with_share_group) driver_method_mock.assert_called_once_with( self.context, [fake_share_server], sg, share_group_snapshot=None) def test_provide_share_server_for_share_group_invalid_arguments(self): self.assertRaises( exception.InvalidInput, self.share_manager._provide_share_server_for_share_group, self.context, None, None, None) def test_manage_share_driver_exception(self): self.mock_object(self.share_manager, 'driver') self.share_manager.driver.driver_handles_share_servers = False CustomException = type('CustomException', (Exception,), dict()) self.mock_object(self.share_manager.driver, 'manage_existing', mock.Mock(side_effect=CustomException)) self.mock_object(share_types, 'get_share_type_extra_specs', mock.Mock(return_value='False')) self.mock_object( self.share_manager, '_get_extra_specs_from_share_type', mock.Mock(return_value={})) self.mock_object(self.share_manager.db, 'share_update', mock.Mock()) self.mock_object(share_types, 'get_share_type', mock.Mock()) self.mock_object(share_types, 'provision_filter_on_size', mock.Mock()) share = db_utils.create_share() share_id = share['id'] driver_options = {'fake': 'fake'} self.assertRaises( CustomException, self.share_manager.manage_share, self.context, share_id, driver_options) (self.share_manager.driver.manage_existing. assert_called_once_with(mock.ANY, driver_options)) self.share_manager.db.share_update.assert_called_once_with( utils.IsAMatcher(context.RequestContext), share_id, {'status': constants.STATUS_MANAGE_ERROR, 'size': 0}) (self.share_manager._get_extra_specs_from_share_type. assert_called_once_with( mock.ANY, share['instance']['share_type_id'])) def _setup_provide_server_for_migration_test(self): source_share_server = db_utils.create_share_server() fake_share_network = db_utils.create_share_network() fake_network_subnet = db_utils.create_share_network_subnet( share_network_id=fake_share_network['id']) fake_dest_host = 'fakehost@fakebackend' fake_az = { 'availability_zone_id': 'fake_az_id', 'availability_zone_name': 'fake_az_name' } fake_data = { 'source_share_server': source_share_server, 'fake_share_network': fake_share_network, 'fake_network_subnet': fake_network_subnet, 'fake_dest_host': fake_dest_host, 'fake_az': fake_az, } return fake_data def test__provide_share_server_for_migration_subnet_not_found(self): fake_data = self._setup_provide_server_for_migration_test() mock_subnet_get = self.mock_object( db, 'share_network_subnets_get_all_by_availability_zone_id', mock.Mock(return_value=None)) self.assertRaises( exception.ShareNetworkSubnetNotFound, self.share_manager._provide_share_server_for_migration, self.context, fake_data['source_share_server'], fake_data['fake_share_network']['id'], fake_data['fake_az']['availability_zone_id'], fake_data['fake_dest_host'] ) mock_subnet_get.assert_called_once_with( self.context, fake_data['fake_share_network']['id'], availability_zone_id=fake_data['fake_az']['availability_zone_id']) def test__provide_share_server_for_migration(self): fake_data = self._setup_provide_server_for_migration_test() dest_share_server = db_utils.create_share_server( share_network_subnets=[fake_data['fake_network_subnet']]) expected_share_server_data = { 'host': self.share_manager.host, 'share_network_subnets': [fake_data['fake_network_subnet']], 'status': constants.STATUS_CREATING, 'security_service_update_support': False, 'network_allocation_update_support': False, 'share_replicas_migration_support': False, } fake_metadata = { 'migration_destination': True, 'request_host': fake_data['fake_dest_host'], 'source_share_server': fake_data['source_share_server'] } mock_subnet_get = self.mock_object( db, 'share_network_subnets_get_all_by_availability_zone_id', mock.Mock(return_value=[fake_data['fake_network_subnet']])) mock_server_create = self.mock_object( db, 'share_server_create', mock.Mock(return_value=dest_share_server)) mock_create_server_in_backend = self.mock_object( self.share_manager, '_create_share_server_in_backend', mock.Mock(return_value=dest_share_server)) result = self.share_manager._provide_share_server_for_migration( self.context, fake_data['source_share_server'], fake_data['fake_share_network']['id'], fake_data['fake_az']['availability_zone_id'], fake_data['fake_dest_host'] ) self.assertEqual(result, dest_share_server) mock_subnet_get.assert_called_once_with( self.context, fake_data['fake_share_network']['id'], availability_zone_id=fake_data['fake_az']['availability_zone_id']) mock_server_create.assert_called_once_with( self.context, expected_share_server_data) mock_create_server_in_backend.assert_called_once_with( self.context, dest_share_server, metadata=fake_metadata) def test_manage_share_invalid_size(self): self.mock_object(self.share_manager, 'driver') self.share_manager.driver.driver_handles_share_servers = False self.mock_object(share_types, 'get_share_type_extra_specs', mock.Mock(return_value='False')) self.mock_object(self.share_manager.driver, "manage_existing", mock.Mock(return_value=None)) self.mock_object(self.share_manager.db, 'share_update', mock.Mock()) self.mock_object(share_types, 'get_share_type', mock.Mock()) self.mock_object(share_types, 'provision_filter_on_size', mock.Mock()) self.mock_object( self.share_manager, '_get_extra_specs_from_share_type', mock.Mock(return_value={})) share = db_utils.create_share() share_id = share['id'] driver_options = {'fake': 'fake'} self.assertRaises( exception.InvalidShare, self.share_manager.manage_share, self.context, share_id, driver_options) (self.share_manager.driver.manage_existing. assert_called_once_with(mock.ANY, driver_options)) self.share_manager.db.share_update.assert_called_once_with( utils.IsAMatcher(context.RequestContext), share_id, {'status': constants.STATUS_MANAGE_ERROR, 'size': 0}) (self.share_manager._get_extra_specs_from_share_type. assert_called_once_with( mock.ANY, share['instance']['share_type_id'])) def test_manage_share_quota_error(self): self.mock_object(self.share_manager, 'driver') self.share_manager.driver.driver_handles_share_servers = False self.mock_object(share_types, 'get_share_type_extra_specs', mock.Mock(return_value='False')) self.mock_object(self.share_manager.driver, "manage_existing", mock.Mock(return_value={'size': 3})) self.mock_object(quota.QUOTAS, 'reserve', mock.Mock(side_effect=exception.QuotaError)) self.mock_object(self.share_manager.db, 'share_update', mock.Mock()) self.mock_object(share_types, 'get_share_type', mock.Mock()) self.mock_object(share_types, 'provision_filter_on_size', mock.Mock()) self.mock_object( self.share_manager, '_get_extra_specs_from_share_type', mock.Mock(return_value={})) share = db_utils.create_share() share_id = share['id'] driver_options = {'fake': 'fake'} self.assertRaises( exception.QuotaError, self.share_manager.manage_share, self.context, share_id, driver_options) (self.share_manager.driver.manage_existing. assert_called_once_with(mock.ANY, driver_options)) self.share_manager.db.share_update.assert_called_once_with( mock.ANY, share_id, {'status': constants.STATUS_MANAGE_ERROR, 'size': 0}) (self.share_manager._get_extra_specs_from_share_type. assert_called_once_with( mock.ANY, share['instance']['share_type_id'])) def test_manage_share_incompatible_dhss(self): self.mock_object(self.share_manager, 'driver') self.share_manager.driver.driver_handles_share_servers = False share = db_utils.create_share() self.mock_object(share_types, 'get_share_type', mock.Mock()) self.mock_object(share_types, 'provision_filter_on_size', mock.Mock()) self.mock_object(share_types, 'get_share_type_extra_specs', mock.Mock(return_value="True")) self.mock_object( self.share_manager, '_get_extra_specs_from_share_type', mock.Mock(return_value={})) self.assertRaises( exception.InvalidShare, self.share_manager.manage_share, self.context, share['id'], {}) (self.share_manager._get_extra_specs_from_share_type. assert_called_once_with( mock.ANY, share['instance']['share_type_id'])) @ddt.data({'dhss': True, 'driver_data': {'size': 1, 'replication_type': None}}, {'dhss': False, 'driver_data': {'size': 2, 'name': 'fake', 'replication_type': 'dr'}}, {'dhss': False, 'driver_data': {'size': 3, 'export_locations': ['foo', 'bar', 'quuz'], 'replication_type': 'writable'}}) @ddt.unpack def test_manage_share_valid_share(self, dhss, driver_data): self.mock_object(self.share_manager, 'driver') self.share_manager.driver.driver_handles_share_servers = dhss replication_type = driver_data.pop('replication_type') extra_specs = {} if replication_type is not None: extra_specs.update({'replication_type': replication_type}) export_locations = driver_data.get('export_locations') self.mock_object(self.share_manager.db, 'share_update', mock.Mock()) self.mock_object(quota.QUOTAS, 'reserve', mock.Mock()) self.mock_object(share_types, 'get_share_type', mock.Mock()) self.mock_object(share_types, 'provision_filter_on_size', mock.Mock()) self.mock_object( self.share_manager.db, 'export_locations_update', mock.Mock(side_effect=( self.share_manager.db.export_locations_update))) self.mock_object(share_types, 'get_share_type_extra_specs', mock.Mock(return_value=str(dhss))) self.mock_object( self.share_manager, '_get_extra_specs_from_share_type', mock.Mock(return_value=extra_specs)) if dhss: mock_manage = self.mock_object( self.share_manager.driver, "manage_existing_with_server", mock.Mock(return_value=driver_data)) else: mock_manage = self.mock_object( self.share_manager.driver, "manage_existing", mock.Mock(return_value=driver_data)) share = db_utils.create_share(replication_type=replication_type) share_id = share['id'] driver_options = {'fake': 'fake'} expected_deltas = { 'project_id': share['project_id'], 'user_id': self.context.user_id, 'shares': 1, 'gigabytes': driver_data['size'], 'share_type_id': share['instance']['share_type_id'], 'overquota_allowed': True } if replication_type: expected_deltas.update({'share_replicas': 1, 'replica_gigabytes': driver_data['size']}) self.share_manager.manage_share(self.context, share_id, driver_options) if dhss: mock_manage.assert_called_once_with(mock.ANY, driver_options, None) else: mock_manage.assert_called_once_with(mock.ANY, driver_options) if export_locations: (self.share_manager.db.export_locations_update. assert_called_once_with( utils.IsAMatcher(context.RequestContext), share.instance['id'], export_locations, delete=True)) else: self.assertFalse( self.share_manager.db.export_locations_update.called) valid_share_data = { 'status': constants.STATUS_AVAILABLE, 'launched_at': mock.ANY} if replication_type: valid_share_data['replica_state'] = constants.REPLICA_STATE_ACTIVE valid_share_data.update(driver_data) self.share_manager.db.share_update.assert_called_once_with( utils.IsAMatcher(context.RequestContext), share_id, valid_share_data) quota.QUOTAS.reserve.assert_called_once_with( mock.ANY, **expected_deltas) (self.share_manager._get_extra_specs_from_share_type. assert_called_once_with( mock.ANY, share['instance']['share_type_id'])) def test_update_quota_usages_new(self): self.mock_object(self.share_manager.db, 'quota_usage_get', mock.Mock(return_value={'in_use': 1})) self.mock_object(self.share_manager.db, 'quota_usage_update') project_id = 'fake_project_id' resource_name = 'fake' usage = 1 self.share_manager._update_quota_usages( self.context, project_id, {resource_name: usage}) self.share_manager.db.quota_usage_get.assert_called_once_with( mock.ANY, project_id, resource_name, mock.ANY) self.share_manager.db.quota_usage_update.assert_called_once_with( mock.ANY, project_id, mock.ANY, resource_name, in_use=2) def test_update_quota_usages_update(self): project_id = 'fake_project_id' resource_name = 'fake' usage = 1 side_effect = exception.QuotaUsageNotFound(project_id=project_id) self.mock_object( self.share_manager.db, 'quota_usage_get', mock.Mock(side_effect=side_effect)) self.mock_object(self.share_manager.db, 'quota_usage_create') self.share_manager._update_quota_usages( self.context, project_id, {resource_name: usage}) self.share_manager.db.quota_usage_get.assert_called_once_with( mock.ANY, project_id, resource_name, mock.ANY) self.share_manager.db.quota_usage_create.assert_called_once_with( mock.ANY, project_id, mock.ANY, resource_name, usage) def _setup_unmanage_mocks(self, mock_driver=True, mock_unmanage=None, dhss=False, supports_replication=False): if mock_driver: self.mock_object(self.share_manager, 'driver') replicas_list = [] if supports_replication: replicas_list.append({'id': 'fake_id'}) if mock_unmanage: if dhss: self.mock_object( self.share_manager.driver, "unmanage_with_share_server", mock_unmanage) else: self.mock_object(self.share_manager.driver, "unmanage", mock_unmanage) self.mock_object(self.share_manager.db, 'share_update') self.mock_object(self.share_manager.db, 'share_instance_delete') self.mock_object( self.share_manager.db, 'share_replicas_get_all_by_share', mock.Mock(return_value=replicas_list)) def test_unmanage_share_invalid_share(self): self.mock_object(self.share_manager, 'driver') self.share_manager.driver.driver_handles_share_servers = False unmanage = mock.Mock(side_effect=exception.InvalidShare(reason="fake")) self._setup_unmanage_mocks(mock_driver=False, mock_unmanage=unmanage) share = db_utils.create_share() self.share_manager.unmanage_share(self.context, share['id']) self.share_manager.db.share_update.assert_called_once_with( mock.ANY, share['id'], {'status': constants.STATUS_UNMANAGE_ERROR}) (self.share_manager.db.share_replicas_get_all_by_share. assert_called_once_with(mock.ANY, share['id'])) @ddt.data(True, False) def test_unmanage_share_valid_share(self, supports_replication): self.mock_object(self.share_manager, 'driver') self.share_manager.driver.driver_handles_share_servers = False self._setup_unmanage_mocks( mock_driver=False, mock_unmanage=mock.Mock(), supports_replication=supports_replication) self.mock_object(quota.QUOTAS, 'reserve') share = db_utils.create_share() share_id = share['id'] share_instance_id = share.instance['id'] self.mock_object(self.share_manager.db, 'share_instance_get', mock.Mock(return_value=share.instance)) reservation_params = { 'project_id': share['project_id'], 'shares': -1, 'gigabytes': -share['size'], 'share_type_id': share['instance']['share_type_id'], } if supports_replication: reservation_params.update( {'share_replicas': -1, 'replica_gigabytes': -share['size']}) self.share_manager.unmanage_share(self.context, share_id) (self.share_manager.driver.unmanage. assert_called_once_with(share.instance)) self.share_manager.db.share_instance_delete.assert_called_once_with( mock.ANY, share_instance_id) quota.QUOTAS.reserve.assert_called_once_with( mock.ANY, **reservation_params) (self.share_manager.db.share_replicas_get_all_by_share. assert_called_once_with(mock.ANY, share['id'])) @ddt.data(True, False) def test_unmanage_share_valid_share_with_share_server( self, supports_replication): self.mock_object(self.share_manager, 'driver') self.share_manager.driver.driver_handles_share_servers = True self._setup_unmanage_mocks( mock_driver=False, mock_unmanage=mock.Mock(), dhss=True, supports_replication=supports_replication) server = db_utils.create_share_server(id='fake_server_id') share = db_utils.create_share(share_server_id='fake_server_id') self.mock_object(self.share_manager.db, 'share_server_update') self.mock_object(self.share_manager.db, 'share_server_get', mock.Mock(return_value=server)) self.mock_object(self.share_manager.db, 'share_instance_get', mock.Mock(return_value=share.instance)) self.mock_object(quota.QUOTAS, 'reserve') reservation_params = { 'project_id': share['project_id'], 'shares': -1, 'gigabytes': -share['size'], 'share_type_id': share['instance']['share_type_id'], } if supports_replication: reservation_params.update( {'share_replicas': -1, 'replica_gigabytes': -share['size']}) share_id = share['id'] share_instance_id = share.instance['id'] self.share_manager.unmanage_share(self.context, share_id) (self.share_manager.driver.unmanage_with_server. assert_called_once_with(share.instance, server)) self.share_manager.db.share_instance_delete.assert_called_once_with( mock.ANY, share_instance_id) self.share_manager.db.share_server_update.assert_called_once_with( mock.ANY, server['id'], {'is_auto_deletable': False}) quota.QUOTAS.reserve.assert_called_once_with( mock.ANY, **reservation_params) (self.share_manager.db.share_replicas_get_all_by_share .assert_called_once_with(mock.ANY, share['id'])) def test_unmanage_share_valid_share_with_quota_error(self): self.mock_object(self.share_manager, 'driver') self.share_manager.driver.driver_handles_share_servers = False self._setup_unmanage_mocks(mock_driver=False, mock_unmanage=mock.Mock()) self.mock_object(quota.QUOTAS, 'reserve', mock.Mock(side_effect=Exception())) share = db_utils.create_share() share_instance_id = share.instance['id'] self.share_manager.unmanage_share(self.context, share['id']) self.share_manager.driver.unmanage.assert_called_once_with(mock.ANY) self.share_manager.db.share_instance_delete.assert_called_once_with( mock.ANY, share_instance_id) (self.share_manager.db.share_replicas_get_all_by_share. assert_called_once_with(mock.ANY, share['id'])) def test_unmanage_share_remove_access_rules_error(self): self.mock_object(self.share_manager, 'driver') self.share_manager.driver.driver_handles_share_servers = False manager.CONF.unmanage_remove_access_rules = True self._setup_unmanage_mocks(mock_driver=False, mock_unmanage=mock.Mock()) self.mock_object( self.share_manager.access_helper, 'update_access_rules', mock.Mock(side_effect=Exception()) ) self.mock_object(quota.QUOTAS, 'reserve', mock.Mock(return_value=[])) share = db_utils.create_share() self.share_manager.unmanage_share(self.context, share['id']) self.share_manager.db.share_update.assert_called_once_with( mock.ANY, share['id'], {'status': constants.STATUS_UNMANAGE_ERROR}) (self.share_manager.db.share_replicas_get_all_by_share. assert_called_once_with(mock.ANY, share['id'])) def test_unmanage_share_valid_share_remove_access_rules(self): self.mock_object(self.share_manager, 'driver') self.share_manager.driver.driver_handles_share_servers = False manager.CONF.unmanage_remove_access_rules = True self._setup_unmanage_mocks(mock_driver=False, mock_unmanage=mock.Mock()) smanager = self.share_manager self.mock_object(smanager.access_helper, 'update_access_rules') self.mock_object(quota.QUOTAS, 'reserve', mock.Mock(return_value=[])) share = db_utils.create_share() share_id = share['id'] share_instance_id = share.instance['id'] smanager.unmanage_share(self.context, share_id) smanager.driver.unmanage.assert_called_once_with(mock.ANY) smanager.access_helper.update_access_rules.assert_called_once_with( mock.ANY, mock.ANY, delete_all_rules=True, share_server=None ) smanager.db.share_instance_delete.assert_called_once_with( mock.ANY, share_instance_id) (self.share_manager.db.share_replicas_get_all_by_share. assert_called_once_with(mock.ANY, share['id'])) def test_delete_share_instance_share_server_not_found(self): share_net = db_utils.create_share_network() share_network_subnet = db_utils.create_share_network_subnet( share_network_id=share_net['id'] ) share = db_utils.create_share( share_network_id=share_net['id'], share_server_id='fake-id', share_network_subnets=[share_network_subnet]) self.assertRaises( exception.ShareServerNotFound, self.share_manager.delete_share_instance, self.context, share.instance['id'] ) @ddt.data(True, False) def test_delete_share_instance_last_on_srv_with_sec_service( self, with_details): share_net = db_utils.create_share_network() share_network_subnet = db_utils.create_share_network_subnet( share_network_id=share_net['id'] ) sec_service = db_utils.create_security_service( share_network_id=share_net['id']) backend_details = dict( security_service_ldap=jsonutils.dumps(sec_service)) if with_details: share_srv = db_utils.create_share_server( host=self.share_manager.host, backend_details=backend_details, share_network_subnets=[share_network_subnet]) else: share_srv = db_utils.create_share_server( host=self.share_manager.host, share_network_subnets=[share_network_subnet]) db.share_server_backend_details_set( context.get_admin_context(), share_srv['id'], backend_details) share_type = db_utils.create_share_type() share = db_utils.create_share(share_network_id=share_net['id'], share_server_id=share_srv['id'], share_type_id=share_type['id']) mock_access_helper_call = self.mock_object( self.share_manager.access_helper, 'update_access_rules') self.share_manager.driver = mock.Mock() manager.CONF.delete_share_server_with_last_share = True self.share_manager.delete_share_instance(self.context, share.instance['id']) mock_access_helper_call.assert_called_once_with( utils.IsAMatcher(context.RequestContext), share.instance['id'], delete_all_rules=True, share_server=mock.ANY) self.share_manager.driver.teardown_server.assert_called_once_with( server_details=backend_details, security_services=[jsonutils.loads( backend_details['security_service_ldap'])]) @ddt.data({'force': True, 'side_effect': 'update_access'}, {'force': True, 'side_effect': 'delete_share'}, {'force': False, 'side_effect': None}) @ddt.unpack def test_delete_share_instance_last_on_server(self, force, side_effect): share_net = db_utils.create_share_network() share_network_subnet = db_utils.create_share_network_subnet( share_network_id=share_net['id'] ) share_srv = db_utils.create_share_server( host=self.share_manager.host, share_network_subnets=[share_network_subnet] ) share_type = db_utils.create_share_type() share = db_utils.create_share(share_network_id=share_net['id'], share_server_id=share_srv['id'], share_type_id=share_type['id']) share_srv = db.share_server_get(self.context, share_srv['id']) mock_access_helper_call = self.mock_object( self.share_manager.access_helper, 'update_access_rules') self.share_manager.driver = mock.Mock() if side_effect == 'update_access': mock_access_helper_call.side_effect = exception.ManilaException if side_effect == 'delete_share': self.mock_object(self.share_manager.driver, 'delete_share', mock.Mock(side_effect=Exception('fake'))) self.mock_object(manager.LOG, 'error') manager.CONF.delete_share_server_with_last_share = True self.share_manager.delete_share_instance( self.context, share.instance['id'], force=force) mock_access_helper_call.assert_called_once_with( utils.IsAMatcher(context.RequestContext), share.instance['id'], delete_all_rules=True, share_server=mock.ANY) self.share_manager.driver.teardown_server.assert_called_once_with( server_details=share_srv.get('backend_details'), security_services=[]) self.assertEqual(force, manager.LOG.error.called) def test_delete_share_instance_last_on_server_deletion_disabled(self): share_net = db_utils.create_share_network() share_srv = db_utils.create_share_server(host=self.share_manager.host) share_type = db_utils.create_share_type() share = db_utils.create_share(share_network_id=share_net['id'], share_server_id=share_srv['id'], share_type_id=share_type['id']) share_srv = db.share_server_get(self.context, share_srv['id']) manager.CONF.delete_share_server_with_last_share = False self.share_manager.driver = mock.Mock() mock_access_helper_call = self.mock_object( self.share_manager.access_helper, 'update_access_rules') self.mock_object(db, 'share_server_get', mock.Mock(return_value=share_srv)) self.share_manager.delete_share_instance(self.context, share.instance['id']) mock_access_helper_call.assert_called_once_with( utils.IsAMatcher(context.RequestContext), share.instance['id'], delete_all_rules=True, share_server=share_srv) self.assertFalse(self.share_manager.driver.teardown_network.called) def test_delete_share_instance_not_last_on_server(self): share_net = db_utils.create_share_network() share_srv = db_utils.create_share_server( host=self.share_manager.host ) share_type = db_utils.create_share_type() share = db_utils.create_share(share_network_id=share_net['id'], share_server_id=share_srv['id'], share_type_id=share_type['id']) db_utils.create_share(share_network_id=share_net['id'], share_server_id=share_srv['id']) share_srv = db.share_server_get(self.context, share_srv['id']) manager.CONF.delete_share_server_with_last_share = True self.share_manager.driver = mock.Mock() self.mock_object(db, 'share_server_get', mock.Mock(return_value=share_srv)) mock_access_helper_call = self.mock_object( self.share_manager.access_helper, 'update_access_rules') self.share_manager.delete_share_instance(self.context, share.instance['id']) mock_access_helper_call.assert_called_once_with( utils.IsAMatcher(context.RequestContext), share.instance['id'], delete_all_rules=True, share_server=share_srv) self.assertFalse(self.share_manager.driver.teardown_network.called) @ddt.data('update_access', 'delete_share') def test_delete_share_instance_not_found(self, side_effect): share_net = db_utils.create_share_network() share_srv = db_utils.create_share_server( host=self.share_manager.host) share_type = db_utils.create_share_type() share = db_utils.create_share(share_network_id=share_net['id'], share_server_id=share_srv['id'], share_type_id=share_type['id']) access = db_utils.create_access(share_id=share['id']) db_utils.create_share(share_network_id=share_net['id'], share_server_id=share_srv['id']) share_srv = db.share_server_get(self.context, share_srv['id']) manager.CONF.delete_share_server_with_last_share = False self.mock_object(db, 'share_server_get', mock.Mock(return_value=share_srv)) self.mock_object(db, 'share_instance_get', mock.Mock(return_value=share.instance)) self.mock_object(db, 'share_access_get_all_for_instance', mock.Mock(return_value=[access])) self.share_manager.driver = mock.Mock() self.share_manager.access_helper.driver = mock.Mock() if side_effect == 'update_access': mock_access_helper_call = self.mock_object( self.share_manager.access_helper, 'update_access_rules', mock.Mock(side_effect=exception.ShareResourceNotFound( share_id=share['id']))) if side_effect == 'delete_share': mock_access_helper_call = self.mock_object( self.share_manager.access_helper, 'update_access_rules', mock.Mock(return_value=None) ) self.mock_object( self.share_manager.driver, 'delete_share', mock.Mock(side_effect=exception.ShareResourceNotFound( share_id=share['id']))) self.mock_object(manager.LOG, 'warning') self.share_manager.delete_share_instance(self.context, share.instance['id']) self.assertFalse(self.share_manager.driver.teardown_network.called) mock_access_helper_call.assert_called_once_with( utils.IsAMatcher(context.RequestContext), share.instance['id'], delete_all_rules=True, share_server=share_srv) self.assertTrue(manager.LOG.warning.called) def test_delete_share_instance_deferred_delete_quota_error(self): share_net = db_utils.create_share_network() share_srv = db_utils.create_share_server( host=self.share_manager.host ) share_type = db_utils.create_share_type() share = db_utils.create_share( share_network_id=share_net['id'], share_server_id=share_srv['id'], share_type_id=share_type['id'], status=constants.STATUS_DEFERRED_DELETING) share_srv = db.share_server_get(self.context, share_srv['id']) manager.CONF.delete_share_server_with_last_share = False self.share_manager.driver = mock.Mock() self.mock_object(db, 'share_server_get', mock.Mock(return_value=share_srv)) self.mock_object( quota.QUOTAS, 'reserve', mock.Mock(side_effect=exception.QuotaError(code='500'))) self.mock_object(quota.QUOTAS, 'commit') self.mock_object(manager.LOG, 'exception') self.mock_object(self.share_manager.db, 'share_instance_update', mock.Mock(return_value=None)) self.mock_object(self.share_manager.driver, 'delete_share') self.share_manager.delete_share_instance(self.context, share.instance['id'], deferred_delete=True) reservation_params = { 'gigabytes': -share['size'], 'shares': -1, 'project_id': share['project_id'], 'share_type_id': share_type['id'], 'user_id': share['user_id'], } quota.QUOTAS.reserve.assert_called_once_with( mock.ANY, **reservation_params, ) self.assertFalse(quota.QUOTAS.commit.called) self.assertTrue(self.share_manager.driver.delete_share.called) self.assertFalse(self.share_manager.driver.teardown_network.called) def test_delete_share_instance_deferred_delete(self): share_net = db_utils.create_share_network() share_srv = db_utils.create_share_server( host=self.share_manager.host ) share_type = db_utils.create_share_type() share = db_utils.create_share( share_network_id=share_net['id'], share_server_id=share_srv['id'], share_type_id=share_type['id'], status=constants.STATUS_DEFERRED_DELETING) share_srv = db.share_server_get(self.context, share_srv['id']) manager.CONF.delete_share_server_with_last_share = False self.share_manager.driver = mock.Mock() self.mock_object(db, 'share_server_get', mock.Mock(return_value=share_srv)) self.mock_object(quota.QUOTAS, 'reserve', mock.Mock(return_value='fake_reservation')) self.mock_object(quota.QUOTAS, 'commit') self.mock_object(manager.LOG, 'exception') self.mock_object(self.share_manager.db, 'share_instance_update', mock.Mock(return_value=None)) self.mock_object(self.share_manager.driver, 'delete_share') self.share_manager.delete_share_instance(self.context, share.instance['id'], deferred_delete=True) reservation_params = { 'gigabytes': -share['size'], 'shares': -1, 'project_id': share['project_id'], 'share_type_id': share_type['id'], 'user_id': share['user_id'], } quota.QUOTAS.reserve.assert_called_once_with( mock.ANY, **reservation_params, ) quota.QUOTAS.commit.assert_called_once_with( mock.ANY, mock.ANY, project_id=share['project_id'], share_type_id=share_type['id'], user_id=share['user_id'], ) self.assertTrue(self.share_manager.driver.delete_share.called) self.assertFalse(self.share_manager.driver.teardown_network.called) def test_do_deferred_share_deletion(self): share = db_utils.create_share_without_instance( id='fake_id', status=constants.STATUS_AVAILABLE) share_server = fakes.fake_share_server_get() kwargs = { 'id': 1, 'share_id': share['id'], 'share_server_id': share_server['id'], 'status': 'deferred_deleting', 'updated_at': timeutils.utcnow(), 'host': self.host, } si_1 = db_utils.create_share_instance(**kwargs) kwargs = { 'id': 2, 'share_id': share['id'], 'share_server_id': share_server['id'], 'status': 'deferred_deleting', 'updated_at': timeutils.utcnow(), 'host': self.host, } si_2 = db_utils.create_share_instance(**kwargs) self.mock_object(self.share_manager.db, 'share_server_get', mock.Mock(return_value=share_server)) self.mock_object(self.share_manager.db, 'share_get', mock.Mock(return_value=share)) self.mock_object(self.share_manager.db, 'share_instance_delete') self.mock_object( self.share_manager.db, 'share_instance_get_all', mock.Mock(return_value=[si_1, si_2])) self.mock_object(self.share_manager, '_check_delete_share_server') self.mock_object(self.share_manager, '_notify_about_share_usage') mock_delete_share = self.mock_object( self.share_manager.driver, 'delete_share') self.share_manager.do_deferred_share_deletion(self.context) self.assertEqual(2, mock_delete_share.call_count) def test_do_deferred_share_deletion_exception(self): share = db_utils.create_share_without_instance( id='fake_id', status=constants.STATUS_AVAILABLE) share_server = fakes.fake_share_server_get() kwargs = { 'id': 1, 'share_id': share['id'], 'share_server_id': share_server['id'], 'status': 'deferred_deleting', 'updated_at': timeutils.utcnow(), 'host': self.host, } si = db_utils.create_share_instance(**kwargs) self.mock_object(self.share_manager.db, 'share_server_get', mock.Mock(return_value=share_server)) self.mock_object(self.share_manager.db, 'share_get', mock.Mock(return_value=share)) self.mock_object(self.share_manager.db, 'share_instance_update') mock_delete = self.mock_object(self.share_manager.db, 'share_instance_delete') self.mock_object( self.share_manager.db, 'share_instance_get_all', mock.Mock(return_value=[si])) self.mock_object( self.share_manager.driver, 'delete_share', mock.Mock(side_effect=exception.ManilaException)) self.share_manager.do_deferred_share_deletion(self.context) mock_delete.assert_not_called() def test_setup_server(self): # Setup required test data metadata = {'fake_metadata_key': 'fake_metadata_value'} share_network = db_utils.create_share_network(id='fake_sn_id') share_net_subnets = [db_utils.create_share_network_subnet( id='fake_sns_id', share_network_id=share_network['id'] )] share_server = db_utils.create_share_server( id='fake_id', share_network_subnets=share_net_subnets) network_info = {'security_services': []} for ss_type in constants.SECURITY_SERVICES_ALLOWED_TYPES: network_info['security_services'].append({ 'name': 'fake_name' + ss_type, 'ou': 'fake_ou' + ss_type, 'domain': 'fake_domain' + ss_type, 'server': 'fake_server' + ss_type, 'dns_ip': 'fake_dns_ip' + ss_type, 'user': 'fake_user' + ss_type, 'type': ss_type, 'password': 'fake_password' + ss_type, 'default_ad_site': 'fake_default_ad_site' + ss_type, }) sec_services = network_info['security_services'] server_info = {'fake_server_info_key': 'fake_server_info_value'} network_info['network_type'] = 'fake_network_type' # mock required stuff self.mock_object(self.share_manager.db, 'share_network_subnet_get_all_by_share_server_id', mock.Mock(return_value=share_net_subnets)) self.mock_object(self.share_manager.db, 'share_network_get', mock.Mock(return_value=share_network)) self.mock_object(self.share_manager.driver, 'allocate_network') self.mock_object(self.share_manager, '_form_server_setup_info', mock.Mock(return_value=[network_info])) self.mock_object(self.share_manager, '_validate_segmentation_id') self.mock_object(self.share_manager.driver, 'setup_server', mock.Mock(return_value=server_info)) self.mock_object(self.share_manager.db, 'share_server_backend_details_set') self.mock_object(self.share_manager.db, 'share_server_update', mock.Mock(return_value=share_server)) # execute method _setup_server result = self.share_manager._setup_server( self.context, share_server, metadata) # verify results self.assertEqual(share_server, result) self.share_manager.db.share_network_get.assert_called_once_with( self.context, share_net_subnets[0]['share_network_id']) (self.share_manager.db.share_network_subnet_get_all_by_share_server_id. assert_called_once_with( self.context, share_server['id'])) self.share_manager.driver.allocate_network.assert_called_once_with( self.context, share_server, share_network, share_server['share_network_subnets'][0]) self.share_manager._form_server_setup_info.assert_called_once_with( self.context, share_server, share_network, share_net_subnets) self.share_manager._validate_segmentation_id.assert_called_once_with( network_info) self.share_manager.driver.setup_server.assert_called_once_with( [network_info], metadata=metadata) (self.share_manager.db.share_server_backend_details_set. assert_has_calls([ mock.call(self.context, share_server['id'], {'security_service_' + sec_services[0]['type']: jsonutils.dumps(sec_services[0])}), mock.call(self.context, share_server['id'], {'security_service_' + sec_services[1]['type']: jsonutils.dumps(sec_services[1])}), mock.call(self.context, share_server['id'], {'security_service_' + sec_services[2]['type']: jsonutils.dumps(sec_services[2])}), mock.call(self.context, share_server['id'], server_info), ])) self.share_manager.db.share_server_update.assert_called_once_with( self.context, share_server['id'], {'status': constants.STATUS_ACTIVE, 'identifier': share_server['id']}) def test_setup_server_server_info_not_present(self): # Setup required test data metadata = {'fake_metadata_key': 'fake_metadata_value'} share_network = {'id': 'fake_sn_id'} share_net_subnets = [{'id': 'fake_sns_id', 'share_network_id': share_network['id']}] share_server = { 'id': 'fake_id', 'share_network_subnets': share_net_subnets, } network_info = { 'fake_network_info_key': 'fake_network_info_value', 'security_services': [], 'network_type': 'fake_network_type', } server_info = {} # mock required stuff self.mock_object(self.share_manager.db, 'share_network_subnet_get_all_by_share_server_id', mock.Mock(return_value=share_net_subnets)) self.mock_object(self.share_manager.db, 'share_network_get', mock.Mock(return_value=share_network)) self.mock_object(self.share_manager, '_form_server_setup_info', mock.Mock(return_value=[network_info])) self.mock_object(self.share_manager.driver, 'setup_server', mock.Mock(return_value=server_info)) self.mock_object(self.share_manager.db, 'share_server_update', mock.Mock(return_value=share_server)) self.mock_object(self.share_manager.driver, 'allocate_network') # execute method _setup_server result = self.share_manager._setup_server( self.context, share_server, metadata) # verify results self.assertEqual(share_server, result) self.share_manager.db.share_network_get.assert_called_once_with( self.context, share_net_subnets[0]['share_network_id']) (self.share_manager.db.share_network_subnet_get_all_by_share_server_id. assert_called_once_with( self.context, share_server['id'])) self.share_manager._form_server_setup_info.assert_called_once_with( self.context, share_server, share_network, share_net_subnets) self.share_manager.driver.setup_server.assert_called_once_with( [network_info], metadata=metadata) self.share_manager.db.share_server_update.assert_called_once_with( self.context, share_server['id'], {'status': constants.STATUS_ACTIVE, 'identifier': share_server['id']}) self.share_manager.driver.allocate_network.assert_called_once_with( self.context, share_server, share_network, share_net_subnets[0]) def setup_server_raise_no_subnets(self): self.assertRaises( exception.NetworkBadConfigurationException, self.share_manager._setup_server, self.context, {'share_network_subnets': []}, {}) def setup_server_raise_exception(self, detail_data_proper): # Setup required test data metadata = {'fake_metadata_key': 'fake_metadata_value'} server_info = {'details_key': 'value'} share_network = {'id': 'fake_sn_id'} share_net_subnets = [{'id': 'fake_sns_id', 'share_network_id': share_network['id']}] share_server = { 'id': 'fake_id', 'share_network_subnets': share_net_subnets } network_info = { 'fake_network_info_key': 'fake_network_info_value', 'security_services': [], 'network_type': 'fake_network_type', } if detail_data_proper: detail_data = {'server_details': server_info} self.mock_object(self.share_manager.db, 'share_server_backend_details_set') else: detail_data = 'not dictionary detail data' # Mock required parameters self.mock_object(self.share_manager.db, 'share_network_get', mock.Mock(return_value=share_network)) self.mock_object(self.share_manager.db, 'share_network_subnet_get_all_by_share_server_id', mock.Mock(return_value=share_net_subnets)) self.mock_object(self.share_manager.db, 'share_server_update') for m in ['deallocate_network', 'allocate_network']: self.mock_object(self.share_manager.driver, m) self.mock_object(self.share_manager, '_form_server_setup_info', mock.Mock(return_value=[network_info])) self.mock_object(self.share_manager.db, 'share_server_backend_details_set') self.mock_object(self.share_manager.driver, 'setup_server', mock.Mock(side_effect=exception.ManilaException( detail_data=detail_data))) # execute method _setup_server self.assertRaises( exception.ManilaException, self.share_manager._setup_server, self.context, share_server, metadata, ) # verify results if detail_data_proper: (self.share_manager.db.share_server_backend_details_set. assert_called_once_with( self.context, share_server['id'], server_info)) self.share_manager._form_server_setup_info.assert_called_once_with( self.context, share_server, share_network, share_net_subnets) self.share_manager.db.share_server_update.assert_called_once_with( self.context, share_server['id'], {'status': constants.STATUS_ERROR}) self.share_manager.db.share_network_get.assert_called_once_with( self.context, share_net_subnets[0]['share_network_id']) (self.share_manager.db.share_network_subnet_get_all_by_share_server_id. assert_called_once_with( self.context, share_server['id'])) self.share_manager.driver.allocate_network.assert_has_calls([ mock.call(self.context, share_server, share_network, share_net_subnets[0])]) self.share_manager.driver.deallocate_network.assert_has_calls([ mock.call(self.context, share_server['id'])]) def test_setup_server_incorrect_detail_data(self): self.setup_server_raise_exception(detail_data_proper=False) def test_setup_server_exception_in_driver(self): self.setup_server_raise_exception(detail_data_proper=True) @ddt.data({}, {'detail_data': 'fake'}, {'detail_data': {'server_details': 'fake'}}, {'detail_data': {'server_details': {'fake': 'fake'}}}, {'detail_data': { 'server_details': {'fake': 'fake', 'fake2': 'fake2'}}},) def test_setup_server_exception_in_cleanup_after_error(self, data): def get_server_details_from_data(data): d = data.get('detail_data') if not isinstance(d, dict): return {} d = d.get('server_details') if not isinstance(d, dict): return {} return d share_net_subnets = [db_utils.create_share_network_subnet( id='fake_subnet_id', share_network_id='fake_share_net_id' )] share_server = db_utils.create_share_server( id='fake', share_network_subnets=share_net_subnets) details = get_server_details_from_data(data) metadata = {'fake_metadata_key': 'fake_metadata_value'} exc_mock = mock.Mock(side_effect=exception.ManilaException(**data)) details_mock = mock.Mock(side_effect=exception.ManilaException()) self.mock_object(self.share_manager.db, 'share_network_get', exc_mock) self.mock_object(self.share_manager.db, 'share_server_backend_details_set', details_mock) self.mock_object(self.share_manager.db, 'share_server_update') self.mock_object(self.share_manager.driver, 'deallocate_network') self.mock_object(manager.LOG, 'debug') self.mock_object(manager.LOG, 'warning') self.assertRaises( exception.ManilaException, self.share_manager._setup_server, self.context, share_server, metadata, ) self.assertTrue(self.share_manager.db.share_network_get.called) if details: self.assertEqual(len(details), details_mock.call_count) expected = [mock.call(mock.ANY, share_server['id'], {k: v}) for k, v in details.items()] self.assertEqual(expected, details_mock.call_args_list) self.share_manager.db.share_server_update.assert_called_once_with( self.context, share_server['id'], {'status': constants.STATUS_ERROR}) self.share_manager.driver.deallocate_network.assert_called_once_with( self.context, share_server['id'] ) self.assertFalse(manager.LOG.warning.called) if get_server_details_from_data(data): self.assertTrue(manager.LOG.debug.called) def test_ensure_share_instance_has_pool_with_only_host(self): fake_share = { 'status': constants.STATUS_AVAILABLE, 'host': 'host1', 'id': 1} host = self.share_manager._ensure_share_instance_has_pool( context.get_admin_context(), fake_share) self.assertIsNone(host) def test_ensure_share_instance_has_pool_with_full_pool_name(self): fake_share = {'host': 'host1#pool0', 'id': 1, 'status': constants.STATUS_AVAILABLE} fake_share_expected_value = 'pool0' host = self.share_manager._ensure_share_instance_has_pool( context.get_admin_context(), fake_share) self.assertEqual(fake_share_expected_value, host) def test_ensure_share_instance_has_pool_unable_to_fetch_share(self): fake_share = {'host': 'host@backend', 'id': 1, 'status': constants.STATUS_AVAILABLE} with mock.patch.object(self.share_manager.driver, 'get_pool', side_effect=Exception): with mock.patch.object(manager, 'LOG') as mock_LOG: self.share_manager._ensure_share_instance_has_pool( context.get_admin_context(), fake_share) self.assertEqual(1, mock_LOG.exception.call_count) def test_ensure_share_instance_pool_notexist_and_get_from_driver(self): fake_share_instance = {'host': 'host@backend', 'id': 1, 'status': constants.STATUS_AVAILABLE} fake_host_expected_value = 'fake_pool' self.mock_object(self.share_manager.db, 'share_instance_update') self.mock_object(self.share_manager.driver, 'get_pool', mock.Mock(return_value='fake_pool')) host = self.share_manager._ensure_share_instance_has_pool( context.get_admin_context(), fake_share_instance) self.share_manager.db.share_instance_update.assert_any_call( mock.ANY, 1, {'host': 'host@backend#fake_pool'}) self.assertEqual(fake_host_expected_value, host) def test__form_server_setup_info(self): def fake_network_allocations_get_for_share_server(*args, **kwargs): if kwargs.get('label') != 'admin': return ['foo', 'bar'] return ['admin-foo', 'admin-bar'] self.mock_object( self.share_manager.db, 'network_allocations_get_for_share_server', mock.Mock( side_effect=fake_network_allocations_get_for_share_server)) fake_share_server = dict( id='fake_share_server_id', backend_details=dict(foo='bar')) fake_share_network = dict( security_services='fake_security_services' ) fake_share_network_subnet = dict( id='fake_sns_id', segmentation_id='fake_segmentation_id', cidr='fake_cidr', neutron_net_id='fake_neutron_net_id', neutron_subnet_id='fake_neutron_subnet_id', network_type='fake_network_type', subnet_metadata={'fake_key': 'fake_value'}) expected = [dict( server_id=fake_share_server['id'], segmentation_id=fake_share_network_subnet['segmentation_id'], cidr=fake_share_network_subnet['cidr'], neutron_net_id=fake_share_network_subnet['neutron_net_id'], neutron_subnet_id=fake_share_network_subnet['neutron_subnet_id'], security_services=fake_share_network['security_services'], network_allocations=( fake_network_allocations_get_for_share_server()), admin_network_allocations=( fake_network_allocations_get_for_share_server(label='admin')), backend_details=fake_share_server['backend_details'], network_type=fake_share_network_subnet['network_type'], subnet_metadata=fake_share_network_subnet['subnet_metadata'])] network_info = self.share_manager._form_server_setup_info( self.context, fake_share_server, fake_share_network, [fake_share_network_subnet]) self.assertEqual(expected, network_info) (self.share_manager.db.network_allocations_get_for_share_server. assert_has_calls([ mock.call(self.context, fake_share_server['id'], label='admin'), mock.call(self.context, fake_share_server['id'], label='user', subnet_id=fake_share_network_subnet['id']) ])) @ddt.data( {'network_info': {'network_type': 'vlan', 'segmentation_id': '100'}}, {'network_info': {'network_type': 'vlan', 'segmentation_id': '1'}}, {'network_info': {'network_type': 'vlan', 'segmentation_id': '4094'}}, {'network_info': {'network_type': 'vxlan', 'segmentation_id': '100'}}, {'network_info': {'network_type': 'vxlan', 'segmentation_id': '1'}}, {'network_info': {'network_type': 'vxlan', 'segmentation_id': '16777215'}}, {'network_info': {'network_type': 'gre', 'segmentation_id': '100'}}, {'network_info': {'network_type': 'gre', 'segmentation_id': '1'}}, {'network_info': {'network_type': 'gre', 'segmentation_id': '4294967295'}}, {'network_info': {'network_type': 'flat', 'segmentation_id': None}}, {'network_info': {'network_type': 'flat', 'segmentation_id': 0}}, {'network_info': {'network_type': None, 'segmentation_id': None}}, {'network_info': {'network_type': None, 'segmentation_id': 0}}) @ddt.unpack def test_validate_segmentation_id_with_valid_values(self, network_info): self.share_manager._validate_segmentation_id(network_info) @ddt.data( {'network_info': {'network_type': 'vlan', 'segmentation_id': None}}, {'network_info': {'network_type': 'vlan', 'segmentation_id': -1}}, {'network_info': {'network_type': 'vlan', 'segmentation_id': 0}}, {'network_info': {'network_type': 'vlan', 'segmentation_id': '4095'}}, {'network_info': {'network_type': 'vxlan', 'segmentation_id': None}}, {'network_info': {'network_type': 'vxlan', 'segmentation_id': 0}}, {'network_info': {'network_type': 'vxlan', 'segmentation_id': '16777216'}}, {'network_info': {'network_type': 'gre', 'segmentation_id': None}}, {'network_info': {'network_type': 'gre', 'segmentation_id': 0}}, {'network_info': {'network_type': 'gre', 'segmentation_id': '4294967296'}}, {'network_info': {'network_type': 'flat', 'segmentation_id': '1000'}}, {'network_info': {'network_type': None, 'segmentation_id': '1000'}}) @ddt.unpack def test_validate_segmentation_id_with_invalid_values(self, network_info): self.assertRaises(exception.NetworkBadConfigurationException, self.share_manager._validate_segmentation_id, network_info) @ddt.data(10, 36, 720) def test_verify_server_cleanup_interval_valid_cases(self, val): data = dict(DEFAULT=dict(unused_share_server_cleanup_interval=val)) with test_utils.create_temp_config_with_opts(data): manager.ShareManager() def test_delete_share_server_server_not_found(self): share_server = db_utils.create_share_server() self.share_manager.driver.initialized = True mock_server_get = self.mock_object( self.share_manager.db, 'share_server_get', mock.Mock(side_effect=exception.ShareServerNotFound( share_server_id=share_server['id']))) self.assertRaises( exception.ShareServerNotFound, self.share_manager.delete_share_server, self.context, share_server ) mock_server_get.assert_called_once_with( self.context, share_server['id']) def test_delete_share_server_server_in_use(self): share_server = db_utils.create_share_server() share_server_shares = [db_utils.create_share()] self.share_manager.driver.initialized = True mock_server_get = self.mock_object( self.share_manager.db, 'share_server_get', mock.Mock(return_value=share_server)) mock_instances_get = self.mock_object( self.share_manager.db, 'share_instance_get_all_by_share_server', mock.Mock(return_value=share_server_shares)) self.assertRaises( exception.ShareServerInUse, self.share_manager.delete_share_server, self.context, share_server ) mock_server_get.assert_called_once_with( self.context, share_server['id']) mock_instances_get.assert_called_once_with( self.context, share_server['id']) def test_delete_share_server_teardown_failure(self): share_server = db_utils.create_share_server() self.share_manager.driver.initialized = True mock_server_get = self.mock_object( self.share_manager.db, 'share_server_get', mock.Mock(return_value=share_server)) mock_instances_get = self.mock_object( self.share_manager.db, 'share_instance_get_all_by_share_server', mock.Mock(return_value=[])) mock_teardown_server = self.mock_object( self.share_manager.driver, 'teardown_server', mock.Mock(side_effect=exception.ShareBackendException(msg='fake'))) mock_server_update = self.mock_object( self.share_manager.db, 'share_server_update') self.assertRaises( exception.ShareBackendException, self.share_manager.delete_share_server, self.context, share_server ) mock_server_get.assert_called_once_with( self.context, share_server['id']) mock_instances_get.assert_called_once_with( self.context, share_server['id']) mock_teardown_server.assert_called_once_with( server_details=share_server['backend_details'], security_services=[] ) mock_server_update.assert_called_with( self.context, share_server['id'], {'status': constants.STATUS_ERROR} ) @mock.patch.object(db, 'share_server_get_all_unused_deletable', mock.Mock()) @mock.patch.object(manager.ShareManager, 'delete_share_server', mock.Mock()) def test_delete_free_share_servers_cleanup_disabled(self): data = dict(DEFAULT=dict(automatic_share_server_cleanup=False)) with test_utils.create_temp_config_with_opts(data): share_manager = manager.ShareManager() share_manager.driver.initialized = True share_manager.delete_free_share_servers(self.context) self.assertFalse(db.share_server_get_all_unused_deletable.called) @mock.patch.object(db, 'share_server_get_all_unused_deletable', mock.Mock()) @mock.patch.object(manager.ShareManager, 'delete_share_server', mock.Mock()) def test_delete_free_share_servers_driver_handles_ss_disabled(self): data = dict(DEFAULT=dict(driver_handles_share_servers=False)) with test_utils.create_temp_config_with_opts(data): share_manager = manager.ShareManager() share_manager.driver.initialized = True share_manager.delete_free_share_servers(self.context) self.assertFalse(db.share_server_get_all_unused_deletable.called) self.assertFalse(share_manager.delete_share_server.called) @mock.patch.object(db, 'share_server_get_all_unused_deletable', mock.Mock(return_value=['server1', ])) @mock.patch.object(manager.ShareManager, 'delete_share_server', mock.Mock()) @mock.patch.object(timeutils, 'utcnow', mock.Mock( return_value=datetime.timedelta(minutes=20))) def test_delete_free_share_servers(self): self.share_manager.delete_free_share_servers(self.context) db.share_server_get_all_unused_deletable.assert_called_once_with( self.context, self.share_manager.host, datetime.timedelta(minutes=10)) self.share_manager.delete_share_server.assert_called_once_with( self.context, 'server1') timeutils.utcnow.assert_called_once_with() @ddt.data("available", "error_deleting") def test_delete_expired_share(self, share_status): self.mock_object(db, 'share_get_all_expired', mock.Mock(return_value=[{"id": "share1", "status": share_status}, ])) self.mock_object(db, 'share_update') self.mock_object(api.API, 'delete') self.share_manager.delete_expired_share(self.context) db.share_get_all_expired.assert_called_once_with(self.context) share1 = {"id": "share1", "status": share_status} if share1["status"] == "error_deleting": db.share_update.assert_called_once_with( self.context, share1["id"], {'status': 'error'}) api.API.delete.assert_called_once_with( self.context, share1) def test_delete_expired_transfers(self): self.mock_object(db, 'transfer_get_all_expired', mock.Mock(return_value=[{"id": "transfer1", "name": "test_tr"}, ])) self.mock_object(transfer_api.API, 'delete') self.share_manager.delete_expired_transfers(self.context) db.transfer_get_all_expired.assert_called_once_with(self.context) transfer1 = {"id": "transfer1", "name": "test_tr"} transfer_api.API.delete.assert_called_once_with( self.context, transfer_id=transfer1["id"]) @ddt.data(True, False) def test_transfer_accept(self, clear_rules): share = db_utils.create_share(id="fake") self.mock_object(db, 'share_get', mock.Mock(return_value=share)) update_access_rules_call = self.mock_object( self.share_manager.access_helper, 'update_access_rules') transfer_accept_call = self.mock_object(self.share_manager.driver, 'transfer_accept') instances, rules = self._setup_init_mocks() self.mock_object(self.share_manager.db, 'share_access_get_all_for_share', mock.Mock(return_value=rules)) self.mock_object(self.share_manager.db, 'share_instance_get_all_by_share', mock.Mock(return_value=instances)) self.mock_object(db, 'share_instance_get', mock.Mock(return_value=instances[0])) self.mock_object(self.share_manager, '_get_share_server', mock.Mock(return_value=None)) self.share_manager.transfer_accept(self.context, "fake_share_id", "fake_user_id", "fake_project_id", clear_rules) if clear_rules: update_access_rules_call.assert_called_with( self.context, instances[0]['id'], delete_all_rules=True) transfer_accept_call.assert_called_with( self.context, instances[0], "fake_user_id", "fake_project_id", access_rules=[], share_server=None) else: transfer_accept_call.assert_called_with( self.context, instances[0], "fake_user_id", "fake_project_id", access_rules=rules, share_server=None) def test_transfer_accept_driver_cannot_transfer_with_rules(self): shr_obj = db_utils.create_share() self.mock_object(db, 'share_get', mock.Mock(return_value=shr_obj)) drv_exc = exception.DriverCannotTransferShareWithRules update_access_rules_call = self.mock_object( self.share_manager.access_helper, 'update_access_rules') transfer_accept_call = self.mock_object(self.share_manager.driver, 'transfer_accept', mock.Mock(side_effect=drv_exc)) rules = [ db_utils.create_access(share_id=shr_obj['id']), db_utils.create_access(share_id=shr_obj['id']) ] self.mock_object(self.share_manager.db, 'share_access_get_all_for_share', mock.Mock(return_value=rules)) self.mock_object(self.share_manager.db, 'share_instance_get_all_by_share', mock.Mock(return_value=[shr_obj['instance']])) self.mock_object(db, 'share_instance_get', mock.Mock(return_value=shr_obj['instance'])) self.mock_object(self.share_manager, '_get_share_server', mock.Mock(return_value=None)) self.assertRaises(exception.DriverCannotTransferShareWithRules, self.share_manager.transfer_accept, self.context, shr_obj['id'], "fake_new_user_id", "fake_new_project_id", False) transfer_accept_call.assert_called_with( self.context, shr_obj['instance'], "fake_new_user_id", "fake_new_project_id", access_rules=rules, share_server=None) update_access_rules_call.assert_not_called() self.share_manager.message_api.create.assert_called_once_with( self.context, message_field.Action.TRANSFER_ACCEPT, 'fake_new_project_id', resource_type=message_field.Resource.SHARE, resource_id=shr_obj['id'], detail=message_field.Detail.DRIVER_FAILED_TRANSFER_ACCEPT) def test_transfer_accept_other_driver_exception(self): shr_obj = db_utils.create_share() self.mock_object(db, 'share_get', mock.Mock(return_value=shr_obj)) drv_exc = exception.ShareBackendException(msg='fake_msg') update_access_rules_call = self.mock_object( self.share_manager.access_helper, 'update_access_rules') transfer_accept_call = self.mock_object(self.share_manager.driver, 'transfer_accept', mock.Mock(side_effect=drv_exc)) rules = [ db_utils.create_access(share_id=shr_obj['id']), db_utils.create_access(share_id=shr_obj['id']) ] self.mock_object(self.share_manager.db, 'share_access_get_all_for_share', mock.Mock(return_value=rules)) self.mock_object(self.share_manager.db, 'share_instance_get_all_by_share', mock.Mock(return_value=[shr_obj['instance']])) self.mock_object(db, 'share_instance_get', mock.Mock(return_value=shr_obj['instance'])) self.mock_object(self.share_manager, '_get_share_server', mock.Mock(return_value=None)) self.assertRaises(exception.ShareBackendException, self.share_manager.transfer_accept, self.context, shr_obj['id'], "fake_new_user_id", "fake_new_project_id", False) transfer_accept_call.assert_called_with( self.context, shr_obj['instance'], "fake_new_user_id", "fake_new_project_id", access_rules=rules, share_server=None) update_access_rules_call.assert_not_called() self.share_manager.message_api.create.assert_not_called() @mock.patch('manila.tests.fake_notifier.FakeNotifier._notify') def test_extend_share_invalid(self, mock_notify): share_type = db_utils.create_share_type() share = db_utils.create_share(share_type_id=share_type['id']) share_id = share['id'] reservations = {} mock_notify.assert_not_called() self.mock_object(self.share_manager, 'driver') self.mock_object(self.share_manager.db, 'share_update') self.mock_object(quota.QUOTAS, 'rollback') self.mock_object(self.share_manager.driver, 'extend_share', mock.Mock(side_effect=Exception('fake'))) self.assertRaises( exception.ShareExtendingError, self.share_manager.extend_share, self.context, share_id, 123, {}) quota.QUOTAS.rollback.assert_called_once_with( mock.ANY, reservations, project_id=str(share['project_id']), user_id=str(share['user_id']), share_type_id=share_type['id'], ) @mock.patch('manila.tests.fake_notifier.FakeNotifier._notify') def test_extend_share(self, mock_notify): share_type = db_utils.create_share_type() share = db_utils.create_share(share_type_id=share_type['id']) share_id = share['id'] new_size = 123 shr_update = { 'size': int(new_size), 'status': constants.STATUS_AVAILABLE.lower() } reservations = {} fake_share_server = 'fake' mock_notify.assert_not_called() manager = self.share_manager self.mock_object(manager, 'driver') self.mock_object(manager.db, 'share_get', mock.Mock(return_value=share)) self.mock_object(manager.db, 'share_update', mock.Mock(return_value=share)) self.mock_object(quota.QUOTAS, 'commit') self.mock_object(manager.driver, 'extend_share') self.mock_object(manager, '_get_share_server', mock.Mock(return_value=fake_share_server)) self.share_manager.extend_share(self.context, share_id, new_size, reservations) self.assertTrue(manager._get_share_server.called) manager.driver.extend_share.assert_called_once_with( utils.IsAMatcher(models.ShareInstance), new_size, share_server=fake_share_server ) quota.QUOTAS.commit.assert_called_once_with( mock.ANY, reservations, project_id=share['project_id'], user_id=share['user_id'], share_type_id=share_type['id']) manager.db.share_update.assert_called_once_with( mock.ANY, share_id, shr_update ) self.assert_notify_called(mock_notify, (['INFO', 'share.extend.start'], ['INFO', 'share.extend.end'])) def test_shrink_share_not_supported(self): share_type = db_utils.create_share_type() share = db_utils.create_share(size=2, share_type_id=share_type['id']) new_size = 1 share_id = share['id'] self.mock_object(self.share_manager.db, 'share_get', mock.Mock(return_value=share)) self.mock_object(self.share_manager, 'driver') self.mock_object(self.share_manager.db, 'share_update') self.mock_object(quota.QUOTAS, 'reserve') self.mock_object(quota.QUOTAS, 'rollback') self.mock_object(self.share_manager.driver, 'shrink_share', mock.Mock(side_effect=NotImplementedError)) self.assertRaises( exception.ShareShrinkingError, self.share_manager.shrink_share, self.context, share_id, new_size) self.share_manager.driver.shrink_share.assert_called_once_with( utils.IsAMatcher(models.ShareInstance), new_size, share_server=None ) self.share_manager.db.share_update.assert_called_once_with( mock.ANY, share_id, {'status': constants.STATUS_AVAILABLE} ) quota.QUOTAS.reserve.assert_called_once_with( mock.ANY, gigabytes=-1, project_id=share['project_id'], share_type_id=share_type['id'], user_id=share['user_id'], ) quota.QUOTAS.rollback.assert_called_once_with( mock.ANY, mock.ANY, project_id=share['project_id'], share_type_id=share_type['id'], user_id=share['user_id'], ) self.assertTrue(self.share_manager.db.share_get.called) self.share_manager.message_api.create.assert_called_once_with( utils.IsAMatcher(context.RequestContext), message_field.Action.SHRINK, share['project_id'], resource_type=message_field.Resource.SHARE, resource_id=share_id, detail=message_field.Detail.DRIVER_FAILED_SHRINK) @ddt.data((True, [{'id': 'fake'}]), (False, [])) @ddt.unpack def test_shrink_share_quota_error(self, supports_replication, replicas_list): size = 5 new_size = 1 share_type = db_utils.create_share_type() share = db_utils.create_share(size=size, share_type_id=share_type['id']) share_id = share['id'] self.mock_object(self.share_manager.db, 'share_update') self.mock_object(quota.QUOTAS, 'reserve', mock.Mock(side_effect=Exception('fake'))) self.mock_object( self.share_manager.db, 'share_replicas_get_all_by_share', mock.Mock(return_value=replicas_list)) deltas = {} if supports_replication: deltas.update({'replica_gigabytes': new_size - size}) self.assertRaises( exception.ShareShrinkingError, self.share_manager.shrink_share, self.context, share_id, new_size) quota.QUOTAS.reserve.assert_called_with( mock.ANY, project_id=str(share['project_id']), user_id=str(share['user_id']), share_type_id=share_type['id'], gigabytes=new_size - size, **deltas ) self.assertTrue(self.share_manager.db.share_update.called) (self.share_manager.db.share_replicas_get_all_by_share .assert_called_once_with(mock.ANY, share['id'])) @ddt.data({'exc': exception.InvalidShare("fake"), 'status': constants.STATUS_SHRINKING_ERROR}, {'exc': exception.ShareShrinkingPossibleDataLoss("fake"), 'status': constants.STATUS_AVAILABLE}) @ddt.unpack def test_shrink_share_invalid(self, exc, status): share_type = db_utils.create_share_type() share = db_utils.create_share(share_type_id=share_type['id']) new_size = 1 share_id = share['id'] size_decrease = int(share['size']) - new_size self.mock_object(self.share_manager, 'driver') self.mock_object(self.share_manager.db, 'share_update') self.mock_object(self.share_manager.db, 'share_get', mock.Mock(return_value=share)) self.mock_object(quota.QUOTAS, 'reserve') self.mock_object(quota.QUOTAS, 'rollback') self.mock_object(self.share_manager.driver, 'shrink_share', mock.Mock(side_effect=exc)) self.assertRaises( exception.ShareShrinkingError, self.share_manager.shrink_share, self.context, share_id, new_size) self.share_manager.driver.shrink_share.assert_called_once_with( utils.IsAMatcher(models.ShareInstance), new_size, share_server=None ) self.share_manager.db.share_update.assert_called_once_with( mock.ANY, share_id, {'status': status} ) quota.QUOTAS.reserve.assert_called_once_with( mock.ANY, gigabytes=-size_decrease, project_id=share['project_id'], share_type_id=share_type['id'], user_id=share['user_id'], ) quota.QUOTAS.rollback.assert_called_once_with( mock.ANY, mock.ANY, project_id=share['project_id'], share_type_id=share_type['id'], user_id=share['user_id'], ) self.assertTrue(self.share_manager.db.share_get.called) if isinstance(exc, exception.ShareShrinkingPossibleDataLoss): self.share_manager.message_api.create.assert_called_once_with( utils.IsAMatcher(context.RequestContext), message_field.Action.SHRINK, share['project_id'], resource_type=message_field.Resource.SHARE, resource_id=share_id, detail=message_field.Detail.DRIVER_REFUSED_SHRINK) @ddt.data(True, False) def test_shrink_share(self, supports_replication): share_type = db_utils.create_share_type() share = db_utils.create_share(share_type_id=share_type['id']) share_id = share['id'] new_size = 123 shr_update = { 'size': int(new_size), 'status': constants.STATUS_AVAILABLE } fake_share_server = 'fake' size_decrease = int(share['size']) - new_size mock_notify = self.mock_object(fake_notifier.FakeNotifier, '_notify') replicas_list = [] if supports_replication: replicas_list.append(share) replicas_list.append({'name': 'fake_replica'}) mock_notify.assert_not_called() manager = self.share_manager self.mock_object(manager, 'driver') self.mock_object(manager.db, 'share_get', mock.Mock(return_value=share)) self.mock_object(manager.db, 'share_update', mock.Mock(return_value=share)) self.mock_object(quota.QUOTAS, 'commit') self.mock_object(quota.QUOTAS, 'reserve') self.mock_object(manager.driver, 'shrink_share') self.mock_object(manager, '_get_share_server', mock.Mock(return_value=fake_share_server)) self.mock_object(manager.db, 'share_replicas_get_all_by_share', mock.Mock(return_value=replicas_list)) reservation_params = { 'gigabytes': -size_decrease, 'project_id': share['project_id'], 'share_type_id': share_type['id'], 'user_id': share['user_id'], } if supports_replication: reservation_params.update( {'replica_gigabytes': -size_decrease * 2}) self.share_manager.shrink_share(self.context, share_id, new_size) self.assertTrue(manager._get_share_server.called) manager.driver.shrink_share.assert_called_once_with( utils.IsAMatcher(models.ShareInstance), new_size, share_server=fake_share_server ) quota.QUOTAS.reserve.assert_called_once_with( mock.ANY, **reservation_params, ) quota.QUOTAS.commit.assert_called_once_with( mock.ANY, mock.ANY, project_id=share['project_id'], share_type_id=share_type['id'], user_id=share['user_id'], ) manager.db.share_update.assert_called_once_with( mock.ANY, share_id, shr_update ) self.assert_notify_called(mock_notify, (['INFO', 'share.shrink.start'], ['INFO', 'share.shrink.end'])) (self.share_manager.db.share_replicas_get_all_by_share. assert_called_once_with(mock.ANY, share['id'])) def test_report_driver_status_driver_handles_ss_false(self): fake_stats = {'field': 'val'} fake_pool = {'name': 'pool1'} self.share_manager.last_capabilities = {'field': 'old_val'} self.mock_object(self.share_manager, 'driver', mock.Mock()) driver = self.share_manager.driver driver.get_share_stats = mock.Mock(return_value=fake_stats) self.mock_object(db, 'share_server_get_all_by_host', mock.Mock()) driver.driver_handles_share_servers = False driver.get_share_server_pools = mock.Mock(return_value=fake_pool) self.share_manager._report_driver_status(self.context) driver.get_share_stats.assert_called_once_with( refresh=True) self.assertFalse(db.share_server_get_all_by_host.called) self.assertFalse(driver.get_share_server_pools.called) self.assertEqual(fake_stats, self.share_manager.last_capabilities) def test_report_driver_status_driver_handles_ss(self): fake_stats = {'field': 'val'} fake_ss = {'id': '1234'} fake_pool = {'name': 'pool1'} self.mock_object(self.share_manager, 'driver', mock.Mock()) driver = self.share_manager.driver driver.get_share_stats = mock.Mock(return_value=fake_stats) self.mock_object(db, 'share_server_get_all_by_host', mock.Mock( return_value=[fake_ss])) driver.driver_handles_share_servers = True driver.get_share_server_pools = mock.Mock(return_value=fake_pool) self.share_manager._report_driver_status(self.context) driver.get_share_stats.assert_called_once_with(refresh=True) db.share_server_get_all_by_host.assert_called_once_with( self.context, self.share_manager.host) driver.get_share_server_pools.assert_called_once_with(fake_ss) expected_stats = { 'field': 'val', 'server_pools_mapping': { '1234': fake_pool}, } self.assertEqual(expected_stats, self.share_manager.last_capabilities) def test_report_driver_status_empty_share_stats(self): old_capabilities = {'field': 'old_val'} fake_pool = {'name': 'pool1'} self.share_manager.last_capabilities = old_capabilities self.mock_object(self.share_manager, 'driver', mock.Mock()) driver = self.share_manager.driver driver.get_share_stats = mock.Mock(return_value={}) self.mock_object(db, 'share_server_get_all_by_host', mock.Mock()) driver.driver_handles_share_servers = True driver.get_share_server_pools = mock.Mock(return_value=fake_pool) self.share_manager._report_driver_status(self.context) driver.get_share_stats.assert_called_once_with(refresh=True) self.assertFalse(db.share_server_get_all_by_host.called) self.assertFalse(driver.get_share_server_pools.called) self.assertEqual(old_capabilities, self.share_manager.last_capabilities) def test_create_share_group(self): fake_group = { 'id': 'fake_id', 'availability_zone_id': 'fake_az', } self.mock_object(self.share_manager.db, 'share_group_get', mock.Mock(return_value=fake_group)) self.mock_object(self.share_manager.db, 'share_group_update', mock.Mock(return_value=fake_group)) self.mock_object(self.share_manager.driver, 'create_share_group', mock.Mock(return_value=None)) self.share_manager.create_share_group(self.context, "fake_id") self.share_manager.db.share_group_update.assert_called_once_with( mock.ANY, 'fake_id', { 'status': constants.STATUS_AVAILABLE, 'created_at': mock.ANY, 'consistent_snapshot_support': None, 'availability_zone_id': fake_group['availability_zone_id'], } ) def test_create_cg_with_share_network_driver_not_handles_servers(self): manager.CONF.set_default('driver_handles_share_servers', False) self.mock_object( self.share_manager.driver.configuration, 'safe_get', mock.Mock(return_value=False)) cg_id = 'fake_group_id' share_network_id = 'fake_sn' fake_group = {'id': 'fake_id', 'share_network_id': share_network_id} self.mock_object( self.share_manager.db, 'share_group_get', mock.Mock(return_value=fake_group)) self.mock_object(self.share_manager.db, 'share_group_update') self.assertRaises( exception.ManilaException, self.share_manager.create_share_group, self.context, cg_id) self.share_manager.db.share_group_get.assert_called_once_with( utils.IsAMatcher(context.RequestContext), cg_id) self.share_manager.db.share_group_update.assert_called_once_with( utils.IsAMatcher(context.RequestContext), cg_id, {'status': constants.STATUS_ERROR}) def test_create_sg_with_share_network_driver_handles_servers(self): manager.CONF.set_default('driver_handles_share_servers', True) self.mock_object( self.share_manager.driver.configuration, 'safe_get', mock.Mock(return_value=True)) share_network_id = 'fake_sn' fake_group = { 'id': 'fake_id', 'share_network_id': share_network_id, 'host': "fake_host", 'availability_zone_id': 'fake_az', } fake_subnet = { 'id': 'fake_subnet_id' } self.mock_object( self.share_manager.db, 'share_group_get', mock.Mock(return_value=fake_group)) self.mock_object( self.share_manager.db, 'share_group_update', mock.Mock(return_value=fake_group)) self.mock_object( self.share_manager.db, 'share_network_subnets_get_all_by_availability_zone_id', mock.Mock(return_value=[fake_subnet]) ) self.mock_object( self.share_manager, '_provide_share_server_for_share_group', mock.Mock(return_value=({}, fake_group))) self.mock_object( self.share_manager.driver, 'create_share_group', mock.Mock(return_value=None)) self.share_manager.create_share_group(self.context, "fake_id") self.share_manager.db.share_group_update.assert_called_once_with( mock.ANY, 'fake_id', { 'status': constants.STATUS_AVAILABLE, 'created_at': mock.ANY, 'consistent_snapshot_support': None, 'availability_zone_id': fake_group['availability_zone_id'], } ) def test_create_share_group_with_update(self): fake_group = { 'id': 'fake_id', 'availability_zone_id': 'fake_az', } self.mock_object(self.share_manager.db, 'share_group_get', mock.Mock(return_value=fake_group)) self.mock_object(self.share_manager.db, 'share_group_update', mock.Mock(return_value=fake_group)) self.mock_object(self.share_manager.driver, 'create_share_group', mock.Mock(return_value={'foo': 'bar'})) self.share_manager.create_share_group(self.context, "fake_id") (self.share_manager.db.share_group_update. assert_any_call(mock.ANY, 'fake_id', {'foo': 'bar'})) self.share_manager.db.share_group_update.assert_any_call( mock.ANY, 'fake_id', { 'status': constants.STATUS_AVAILABLE, 'created_at': mock.ANY, 'consistent_snapshot_support': None, 'availability_zone_id': fake_group['availability_zone_id'], } ) def test_create_share_group_with_error(self): fake_group = { 'id': 'fake_id', 'availability_zone_id': 'fake_az', } self.mock_object(self.share_manager.db, 'share_group_get', mock.Mock(return_value=fake_group)) self.mock_object(self.share_manager.db, 'share_group_update', mock.Mock(return_value=fake_group)) self.mock_object(self.share_manager.driver, 'create_share_group', mock.Mock(side_effect=exception.Error)) self.assertRaises(exception.Error, self.share_manager.create_share_group, self.context, "fake_id") self.share_manager.db.share_group_update.assert_called_once_with( mock.ANY, 'fake_id', { 'status': constants.STATUS_ERROR, 'consistent_snapshot_support': None, 'availability_zone_id': fake_group['availability_zone_id'], } ) def test_create_share_group_from_sg_snapshot(self): fake_group = { 'id': 'fake_id', 'source_share_group_snapshot_id': 'fake_snap_id', 'shares': [], 'share_server_id': 'fake_ss_id', 'availability_zone_id': 'fake_az', } fake_sn = {'id': 'fake_sn_id'} fake_sns = {'id': 'fake_sns_id', 'share_network_id': fake_sn['id']} fake_ss = {'id': 'fake_ss_id', 'share_network_subnets': [fake_sns], 'share_network_id': 'fake_sn_id'} fake_snap = {'id': 'fake_snap_id', 'share_group_snapshot_members': [], 'share_group': {'share_server_id': fake_ss['id']}} self.mock_object(self.share_manager.db, 'share_group_get', mock.Mock(return_value=fake_group)) self.mock_object(self.share_manager.db, 'share_group_snapshot_get', mock.Mock(return_value=fake_snap)) self.mock_object(self.share_manager.db, 'share_server_get', mock.Mock( return_value=fake_ss)) self.mock_object(self.share_manager.db, 'share_group_update', mock.Mock(return_value=fake_group)) mock_create_sg_from_sg_snap = self.mock_object( self.share_manager.driver, 'create_share_group_from_share_group_snapshot', mock.Mock(return_value=(None, None))) self.share_manager.create_share_group(self.context, "fake_id") self.share_manager.db.share_group_update.assert_called_once_with( mock.ANY, 'fake_id', {'status': constants.STATUS_AVAILABLE, 'created_at': mock.ANY, 'availability_zone_id': fake_group['availability_zone_id'], 'consistent_snapshot_support': None}) self.share_manager.db.share_server_get(mock.ANY, 'fake_ss_id') mock_create_sg_from_sg_snap.assert_called_once_with( mock.ANY, fake_group, fake_snap, share_server=fake_ss) def test_create_sg_snapshot_share_network_driver_not_handles_servers(self): manager.CONF.set_default('driver_handles_share_servers', False) self.mock_object( self.share_manager.driver.configuration, 'safe_get', mock.Mock(return_value=False)) sg_id = 'fake_share_group_id' share_network_id = 'fake_sn' fake_group = { 'id': 'fake_id', 'source_share_group_snapshot_id': 'fake_snap_id', 'shares': [], 'share_network_id': share_network_id, 'host': "fake_host", } self.mock_object( self.share_manager.db, 'share_group_get', mock.Mock(return_value=fake_group)) fake_snap = {'id': 'fake_snap_id', 'share_group_snapshot_members': []} self.mock_object(self.share_manager.db, 'share_group_snapshot_get', mock.Mock(return_value=fake_snap)) self.mock_object(self.share_manager.db, 'share_group_update') self.assertRaises(exception.ManilaException, self.share_manager.create_share_group, self.context, sg_id) self.share_manager.db.share_group_get.assert_called_once_with( utils.IsAMatcher(context.RequestContext), sg_id) self.share_manager.db.share_group_update.assert_called_once_with( utils.IsAMatcher(context.RequestContext), sg_id, {'status': constants.STATUS_ERROR}) def test_create_sg_snapshot_share_network_without_subnets(self): manager.CONF.set_default('driver_handles_share_servers', True) self.mock_object( self.share_manager.driver.configuration, 'safe_get', mock.Mock(return_value=True)) self.mock_object(self.share_manager.db, 'share_instance_get_all_by_share_group_id', mock.Mock(return_value=[])) fake_group = { 'id': 'fake_id', 'source_share_group_snapshot_id': 'fake_snap_id', 'shares': [], 'share_network_id': 'fake_sn', 'host': "fake_host", } self.mock_object( self.share_manager.db, 'share_group_get', mock.Mock(return_value=fake_group)) fake_snap = {'id': 'fake_snap_id', 'share_group_snapshot_members': []} self.mock_object(self.share_manager.db, 'share_group_snapshot_get', mock.Mock(return_value=fake_snap)) self.mock_object(self.share_manager, '_get_az_for_share_group', mock.Mock(return_value='az')) self.mock_object( self.share_manager.db, 'share_network_subnets_get_all_by_availability_zone_id', mock.Mock(return_value=[])) self.assertRaises(exception.ShareNetworkSubnetNotFound, self.share_manager.create_share_group, self.context, 'fake_share_group_id') def test_create_share_group_from_sg_snapshot_share_network_dhss(self): manager.CONF.set_default('driver_handles_share_servers', True) self.mock_object(self.share_manager.driver.configuration, 'safe_get', mock.Mock(return_value=True)) share_network_id = 'fake_sn' share_network_subnet = { 'id': 'fake_subnet_id' } fake_group = { 'id': 'fake_id', 'source_share_group_snapshot_id': 'fake_snap_id', 'shares': [], 'share_network_id': share_network_id, 'availability_zone_id': 'fake_az', } fake_snap = {'id': 'fake_snap_id', 'share_group_snapshot_members': []} self.mock_object(self.share_manager.db, 'share_group_get', mock.Mock(return_value=fake_group)) self.mock_object(self.share_manager.db, 'share_group_snapshot_get', mock.Mock(return_value=fake_snap)) self.mock_object(self.share_manager.db, 'share_group_update', mock.Mock(return_value=fake_group)) self.mock_object( self.share_manager.db, 'share_network_subnets_get_all_by_availability_zone_id', mock.Mock(return_value=share_network_subnet)) self.mock_object( self.share_manager, '_provide_share_server_for_share_group', mock.Mock(return_value=({}, fake_group))) self.mock_object( self.share_manager.driver, 'create_share_group_from_share_group_snapshot', mock.Mock(return_value=(None, None))) self.share_manager.create_share_group(self.context, "fake_id") self.share_manager.db.share_group_update.assert_called_once_with( mock.ANY, 'fake_id', {'status': constants.STATUS_AVAILABLE, 'created_at': mock.ANY, 'consistent_snapshot_support': None, 'availability_zone_id': fake_group['availability_zone_id']}) def test_create_share_group_from_share_group_snapshot_with_update(self): fake_group = { 'id': 'fake_id', 'source_share_group_snapshot_id': 'fake_snap_id', 'shares': [], 'availability_zone_id': 'fake_az', } fake_snap = {'id': 'fake_snap_id', 'share_group_snapshot_members': []} self.mock_object(self.share_manager.db, 'share_group_get', mock.Mock(return_value=fake_group)) self.mock_object(self.share_manager.db, 'share_group_snapshot_get', mock.Mock(return_value=fake_snap)) self.mock_object(self.share_manager.db, 'share_group_update', mock.Mock(return_value=fake_group)) self.mock_object(self.share_manager.driver, 'create_share_group_from_share_group_snapshot', mock.Mock(return_value=({'foo': 'bar'}, None))) self.share_manager.create_share_group(self.context, "fake_id") self.share_manager.db.share_group_update.assert_any_call( mock.ANY, 'fake_id', {'foo': 'bar'}) self.share_manager.db.share_group_update.assert_any_call( mock.ANY, 'fake_id', { 'status': constants.STATUS_AVAILABLE, 'created_at': mock.ANY, 'consistent_snapshot_support': None, 'availability_zone_id': fake_group['availability_zone_id'], } ) @ddt.data(constants.STATUS_AVAILABLE, constants.STATUS_CREATING_FROM_SNAPSHOT, None) def test_create_share_group_from_sg_snapshot_with_share_update_status( self, share_status): fake_share = {'id': 'fake_share_id'} # if share_status is not None: # fake_share.update({'status': share_status}) fake_export_locations = ['my_export_location'] fake_group = { 'id': 'fake_id', 'source_share_group_snapshot_id': 'fake_snap_id', 'shares': [fake_share], 'availability_zone_id': 'fake_az', } fake_snap = {'id': 'fake_snap_id', 'share_group_snapshot_members': []} self.mock_object(self.share_manager.db, 'share_group_get', mock.Mock(return_value=fake_group)) self.mock_object(self.share_manager.db, 'share_group_snapshot_get', mock.Mock(return_value=fake_snap)) self.mock_object(self.share_manager.db, 'share_group_update') self.mock_object(self.share_manager.db, 'share_instance_update') self.mock_object(self.share_manager.db, 'export_locations_update') fake_share_update = {'id': fake_share['id'], 'foo': 'bar', 'export_locations': fake_export_locations} if share_status is not None: fake_share_update.update({'status': share_status}) self.mock_object(self.share_manager.driver, 'create_share_group_from_share_group_snapshot', mock.Mock(return_value=(None, [fake_share_update]))) self.share_manager.create_share_group(self.context, "fake_id") exp_progress = ( '0%' if share_status == constants.STATUS_CREATING_FROM_SNAPSHOT else '100%') self.share_manager.db.share_instance_update.assert_any_call( mock.ANY, 'fake_share_id', {'foo': 'bar', 'status': share_status or constants.STATUS_AVAILABLE, 'progress': exp_progress}) self.share_manager.db.export_locations_update.assert_any_call( mock.ANY, 'fake_share_id', fake_export_locations) self.share_manager.db.share_group_update.assert_any_call( mock.ANY, 'fake_id', { 'status': constants.STATUS_AVAILABLE, 'created_at': mock.ANY, 'consistent_snapshot_support': None, 'availability_zone_id': fake_group['availability_zone_id'], } ) def test_create_share_group_from_sg_snapshot_with_error(self): fake_group = { 'id': 'fake_id', 'source_share_group_snapshot_id': 'fake_snap_id', 'shares': [], 'availability_zone_id': 'fake_az', } fake_snap = {'id': 'fake_snap_id', 'share_group_snapshot_members': []} self.mock_object(self.share_manager.db, 'share_group_get', mock.Mock(return_value=fake_group)) self.mock_object(self.share_manager.db, 'share_group_snapshot_get', mock.Mock(return_value=fake_snap)) self.mock_object(self.share_manager.db, 'share_instance_get_all_by_share_group_id', mock.Mock(return_value=[])) self.mock_object(self.share_manager.db, 'share_group_update', mock.Mock(return_value=fake_group)) self.mock_object(self.share_manager.driver, 'create_share_group_from_share_group_snapshot', mock.Mock(side_effect=exception.Error)) self.assertRaises(exception.Error, self.share_manager.create_share_group, self.context, "fake_id") self.share_manager.db.share_group_update.assert_called_once_with( mock.ANY, 'fake_id', { 'status': constants.STATUS_ERROR, 'consistent_snapshot_support': None, 'availability_zone_id': fake_group['availability_zone_id'], } ) def test_create_share_group_from_sg_snapshot_with_invalid_status(self): fake_share = {'id': 'fake_share_id', 'status': constants.STATUS_CREATING} fake_export_locations = ['my_export_location'] fake_group = { 'id': 'fake_id', 'source_share_group_snapshot_id': 'fake_snap_id', 'shares': [fake_share], 'availability_zone_id': 'fake_az', } fake_snap = {'id': 'fake_snap_id', 'share_group_snapshot_members': []} self.mock_object(self.share_manager.db, 'share_group_get', mock.Mock(return_value=fake_group)) self.mock_object(self.share_manager.db, 'share_group_snapshot_get', mock.Mock(return_value=fake_snap)) self.mock_object(self.share_manager.db, 'share_instance_get_all_by_share_group_id', mock.Mock(return_value=[])) self.mock_object(self.share_manager.db, 'share_group_update', mock.Mock(return_value=fake_group)) fake_share_update_list = [{'id': fake_share['id'], 'status': fake_share['status'], 'foo': 'bar', 'export_locations': fake_export_locations}] self.mock_object(self.share_manager.driver, 'create_share_group_from_share_group_snapshot', mock.Mock( return_value=(None, fake_share_update_list))) self.assertRaises(exception.InvalidShareInstance, self.share_manager.create_share_group, self.context, "fake_id") self.share_manager.db.share_group_update.assert_called_once_with( mock.ANY, 'fake_id', { 'status': constants.STATUS_ERROR, 'consistent_snapshot_support': None, 'availability_zone_id': fake_group['availability_zone_id'], } ) def test_create_share_group_from_sg_snapshot_with_share_error(self): fake_share = {'id': 'fake_share_id'} fake_group = { 'id': 'fake_id', 'source_share_group_snapshot_id': 'fake_snap_id', 'shares': [fake_share], 'availability_zone_id': 'fake_az', } fake_snap = {'id': 'fake_snap_id', 'share_group_snapshot_members': []} self.mock_object(self.share_manager.db, 'share_group_get', mock.Mock(return_value=fake_group)) self.mock_object(self.share_manager.db, 'share_group_snapshot_get', mock.Mock(return_value=fake_snap)) self.mock_object(self.share_manager.db, 'share_instance_get_all_by_share_group_id', mock.Mock(return_value=[fake_share])) self.mock_object(self.share_manager.db, 'share_group_update') self.mock_object(self.share_manager.db, 'share_instance_update') self.mock_object(self.share_manager.driver, 'create_share_group_from_share_group_snapshot', mock.Mock(side_effect=exception.Error)) self.assertRaises(exception.Error, self.share_manager.create_share_group, self.context, "fake_id") self.share_manager.db.share_instance_update.assert_any_call( mock.ANY, 'fake_share_id', {'status': constants.STATUS_ERROR}) self.share_manager.db.share_group_update.assert_called_once_with( mock.ANY, 'fake_id', { 'status': constants.STATUS_ERROR, 'consistent_snapshot_support': None, 'availability_zone_id': fake_group['availability_zone_id'], } ) def test_delete_share_group(self): fake_group = {'id': 'fake_id'} self.mock_object(self.share_manager.db, 'share_group_get', mock.Mock(return_value=fake_group)) self.mock_object(self.share_manager.db, 'share_group_update', mock.Mock(return_value=fake_group)) self.mock_object(self.share_manager.db, 'share_group_destroy', mock.Mock(return_value=fake_group)) self.mock_object(self.share_manager.driver, 'delete_share_group', mock.Mock(return_value=None)) self.share_manager.delete_share_group(self.context, "fake_id") self.share_manager.db.share_group_destroy.assert_called_once_with( mock.ANY, 'fake_id') def test_delete_share_group_with_update(self): fake_group = {'id': 'fake_id'} self.mock_object(self.share_manager.db, 'share_group_get', mock.Mock(return_value=fake_group)) self.mock_object(self.share_manager.db, 'share_group_update', mock.Mock(return_value=fake_group)) self.mock_object(self.share_manager.db, 'share_group_destroy', mock.Mock(return_value=fake_group)) self.mock_object(self.share_manager.driver, 'delete_share_group', mock.Mock(return_value={'foo': 'bar'})) self.share_manager.delete_share_group(self.context, "fake_id") self.share_manager.db.share_group_update.assert_called_once_with( mock.ANY, 'fake_id', {'foo': 'bar'}) self.share_manager.db.share_group_destroy.assert_called_once_with( mock.ANY, 'fake_id') def test_delete_share_group_with_error(self): fake_group = {'id': 'fake_id'} self.mock_object(self.share_manager.db, 'share_group_get', mock.Mock(return_value=fake_group)) self.mock_object(self.share_manager.db, 'share_group_update', mock.Mock(return_value=fake_group)) self.mock_object(self.share_manager.driver, 'delete_share_group', mock.Mock(side_effect=exception.Error)) self.assertRaises(exception.Error, self.share_manager.delete_share_group, self.context, "fake_id") self.share_manager.db.share_group_update.assert_called_once_with( mock.ANY, 'fake_id', {'status': constants.STATUS_ERROR}) def test_create_share_group_snapshot(self): fake_snap = { 'id': 'fake_snap_id', 'share_group': {}, 'share_group_snapshot_members': [], } self.mock_object( self.share_manager.db, 'share_group_snapshot_get', mock.Mock(return_value=fake_snap)) mock_sg_snap_update = self.mock_object( self.share_manager.db, 'share_group_snapshot_update', mock.Mock(return_value=fake_snap)) self.mock_object( self.share_manager.driver, 'create_share_group_snapshot', mock.Mock(return_value=(None, None))) self.share_manager.create_share_group_snapshot( self.context, fake_snap['id']) mock_sg_snap_update.assert_called_once_with( mock.ANY, fake_snap['id'], {'status': constants.STATUS_AVAILABLE, 'updated_at': mock.ANY}) def test_create_share_group_snapshot_with_update(self): fake_snap = {'id': 'fake_snap_id', 'share_group': {}, 'share_group_snapshot_members': []} self.mock_object(self.share_manager.db, 'share_group_snapshot_get', mock.Mock(return_value=fake_snap)) self.mock_object(self.share_manager.db, 'share_group_snapshot_update', mock.Mock(return_value=fake_snap)) self.mock_object(self.share_manager.driver, 'create_share_group_snapshot', mock.Mock(return_value=({'foo': 'bar'}, None))) self.share_manager.create_share_group_snapshot( self.context, fake_snap['id']) self.share_manager.db.share_group_snapshot_update.assert_any_call( mock.ANY, 'fake_snap_id', {'foo': 'bar'}) self.share_manager.db.share_group_snapshot_update.assert_any_call( mock.ANY, fake_snap['id'], {'status': constants.STATUS_AVAILABLE, 'updated_at': mock.ANY}) def test_create_share_group_snapshot_with_member_update(self): fake_member1 = {'id': 'fake_member_id_1', 'share_instance_id': 'si_1'} fake_member2 = {'id': 'fake_member_id_2', 'share_instance_id': 'si_2'} fake_member3 = {'id': 'fake_member_id_3', 'share_instance_id': 'si_3'} fake_member_update1 = { 'id': fake_member1['id'], 'provider_location': 'fake_provider_location_1', 'size': 13, 'export_locations': ['fake_el_1_1', 'fake_el_1_2'], 'should_not_be_used_k1': 'should_not_be_used_v1', } fake_member_update2 = { 'id': fake_member2['id'], 'provider_location': 'fake_provider_location_2', 'size': 31, 'export_locations': ['fake_el_2_1', 'fake_el_2_2'], 'status': 'fake_status_for_update', 'should_not_be_used_k2': 'should_not_be_used_k2', } fake_member_update3 = { 'provider_location': 'fake_provider_location_3', 'size': 42, 'export_locations': ['fake_el_3_1', 'fake_el_3_2'], 'should_not_be_used_k3': 'should_not_be_used_k3', } expected_member_update1 = { 'id': fake_member_update1['id'], 'provider_location': fake_member_update1['provider_location'], 'size': fake_member_update1['size'], } expected_member_update2 = { 'id': fake_member_update2['id'], 'provider_location': fake_member_update2['provider_location'], 'size': fake_member_update2['size'], 'status': fake_member_update2['status'], } fake_snap = { 'id': 'fake_snap_id', 'share_group': {}, 'share_group_snapshot_members': [ fake_member1, fake_member2, fake_member3], } self.mock_object( self.share_manager.db, 'share_group_snapshot_get', mock.Mock(return_value=fake_snap)) mock_sg_snapshot_update = self.mock_object( self.share_manager.db, 'share_group_snapshot_update', mock.Mock(return_value=fake_snap)) mock_sg_snapshot_member_update = self.mock_object( self.share_manager.db, 'share_group_snapshot_member_update') self.mock_object( self.share_manager.db, 'share_instance_get', mock.Mock(return_value={'id': 'blah'})) self.mock_object( timeutils, 'utcnow', mock.Mock(side_effect=range(1, 10))) mock_driver_create_sg_snapshot = self.mock_object( self.share_manager.driver, 'create_share_group_snapshot', mock.Mock(return_value=( None, [fake_member_update1, fake_member_update2, fake_member_update3]))) self.share_manager.create_share_group_snapshot( self.context, fake_snap['id']) mock_driver_create_sg_snapshot.assert_called_once_with( utils.IsAMatcher(context.RequestContext), fake_snap, share_server=None) mock_sg_snapshot_update.assert_called_once_with( mock.ANY, fake_snap['id'], {'status': constants.STATUS_AVAILABLE, 'updated_at': mock.ANY}) mock_sg_snapshot_member_update.assert_has_calls([ mock.call( utils.IsAMatcher(context.RequestContext), expected_member_update1['id'], {'provider_location': expected_member_update1[ 'provider_location'], 'size': expected_member_update1['size'], 'updated_at': 1, 'status': manager.constants.STATUS_AVAILABLE}), mock.call( utils.IsAMatcher(context.RequestContext), expected_member_update2['id'], {'provider_location': expected_member_update2[ 'provider_location'], 'size': expected_member_update2['size'], 'updated_at': 1, 'status': expected_member_update2['status']}), ]) def test_create_group_snapshot_with_error(self): fake_snap = {'id': 'fake_snap_id', 'share_group': {}, 'share_group_snapshot_members': []} self.mock_object( self.share_manager.db, 'share_group_snapshot_get', mock.Mock(return_value=fake_snap)) mock_sg_snap_update = self.mock_object( self.share_manager.db, 'share_group_snapshot_update', mock.Mock(return_value=fake_snap)) self.mock_object( self.share_manager.driver, 'create_share_group_snapshot', mock.Mock(side_effect=exception.Error)) self.assertRaises( exception.Error, self.share_manager.create_share_group_snapshot, self.context, fake_snap['id']) mock_sg_snap_update.assert_called_once_with( mock.ANY, fake_snap['id'], {'status': constants.STATUS_ERROR}) def test_connection_get_info(self): share_instance = {'share_server_id': 'fake_server_id'} share_instance_id = 'fake_id' share_server = 'fake_share_server' connection_info = 'fake_info' # mocks self.mock_object(self.share_manager.db, 'share_instance_get', mock.Mock(return_value=share_instance)) self.mock_object(self.share_manager.db, 'share_server_get', mock.Mock(return_value=share_server)) self.mock_object(self.share_manager.driver, 'connection_get_info', mock.Mock(return_value=connection_info)) # run result = self.share_manager.connection_get_info( self.context, share_instance_id) # asserts self.assertEqual(connection_info, result) self.share_manager.db.share_instance_get.assert_called_once_with( self.context, share_instance_id, with_share_data=True) self.share_manager.driver.connection_get_info.assert_called_once_with( self.context, share_instance, share_server) @ddt.data(True, False) def test_migration_start(self, success): instance = db_utils.create_share_instance( share_id='fake_id', status=constants.STATUS_AVAILABLE, share_server_id='fake_server_id', host='fake@backend#pool') share = db_utils.create_share(id='fake_id', instances=[instance]) fake_service = {'availability_zone_id': 'fake_az_id'} host = 'fake2@backend#pool' # mocks self.mock_object(self.share_manager.db, 'share_get', mock.Mock(return_value=share)) self.mock_object(self.share_manager.db, 'share_instance_get', mock.Mock(return_value=instance)) self.mock_object(self.share_manager.db, 'share_update') self.mock_object(self.share_manager.db, 'share_instance_update') self.mock_object(self.share_manager, '_migration_start_driver', mock.Mock(return_value=success)) self.mock_object(self.share_manager.db, 'service_get_by_args', mock.Mock(return_value=fake_service)) if not success: self.mock_object( self.share_manager, '_migration_start_host_assisted') # run self.share_manager.migration_start( self.context, 'fake_id', host, False, False, False, False, False, 'fake_net_id', 'fake_type_id') # asserts self.share_manager.db.share_get.assert_called_once_with( self.context, share['id']) self.share_manager.db.share_instance_get.assert_called_once_with( self.context, instance['id'], with_share_data=True) share_update_calls = [ mock.call( self.context, share['id'], {'task_state': constants.TASK_STATE_MIGRATION_IN_PROGRESS}), ] if not success: share_update_calls.append(mock.call( self.context, share['id'], {'task_state': constants.TASK_STATE_MIGRATION_IN_PROGRESS})) self.share_manager.db.share_update.assert_has_calls(share_update_calls) self.share_manager._migration_start_driver.assert_called_once_with( self.context, share, instance, host, False, False, False, False, 'fake_net_id', 'fake_az_id', 'fake_type_id') if not success: (self.share_manager._migration_start_host_assisted. assert_called_once_with( self.context, share, instance, host, 'fake_net_id', 'fake_az_id', 'fake_type_id')) self.share_manager.db.service_get_by_args.assert_called_once_with( self.context, 'fake2@backend', 'manila-share') @ddt.data({'writable': False, 'preserve_metadata': False, 'nondisruptive': False, 'preserve_snapshots': True, 'has_snapshots': False}, {'writable': False, 'preserve_metadata': False, 'nondisruptive': True, 'preserve_snapshots': False, 'has_snapshots': False}, {'writable': False, 'preserve_metadata': True, 'nondisruptive': False, 'preserve_snapshots': False, 'has_snapshots': False}, {'writable': True, 'preserve_metadata': False, 'nondisruptive': False, 'preserve_snapshots': False, 'has_snapshots': False}, {'writable': False, 'preserve_metadata': False, 'nondisruptive': False, 'preserve_snapshots': False, 'has_snapshots': True} ) @ddt.unpack def test_migration_start_prevent_host_assisted( self, writable, preserve_metadata, nondisruptive, preserve_snapshots, has_snapshots): share = db_utils.create_share() instance = share.instance host = 'fake@backend#pool' fake_service = {'availability_zone_id': 'fake_az_id'} if has_snapshots: snapshot = db_utils.create_snapshot(share_id=share['id']) self.mock_object( self.share_manager.db, 'share_snapshot_get_all_for_share', mock.Mock(return_value=[snapshot])) # mocks self.mock_object(self.share_manager, '_reset_read_only_access_rules') self.mock_object(self.share_manager.db, 'service_get_by_args', mock.Mock(return_value=fake_service)) self.mock_object(self.share_manager.db, 'share_update') self.mock_object(self.share_manager.db, 'share_instance_update') self.mock_object(self.share_manager.db, 'share_get', mock.Mock(return_value=share)) # run self.assertRaises( exception.ShareMigrationFailed, self.share_manager.migration_start, self.context, 'share_id', host, True, writable, preserve_metadata, nondisruptive, preserve_snapshots, 'fake_net_id') self.share_manager.db.share_update.assert_has_calls([ mock.call( self.context, 'share_id', {'task_state': constants.TASK_STATE_MIGRATION_IN_PROGRESS}), mock.call( self.context, 'share_id', {'task_state': constants.TASK_STATE_MIGRATION_ERROR}), ]) self.share_manager.db.share_instance_update.assert_called_once_with( self.context, instance['id'], {'status': constants.STATUS_AVAILABLE}) self.share_manager.db.share_get.assert_called_once_with( self.context, 'share_id') self.share_manager.db.service_get_by_args.assert_called_once_with( self.context, 'fake@backend', 'manila-share') (self.share_manager._reset_read_only_access_rules. assert_called_once_with(self.context, instance['id'])) def test_migration_start_exception(self): instance = db_utils.create_share_instance( share_id='fake_id', status=constants.STATUS_AVAILABLE, share_server_id='fake_server_id', host='fake@backend#pool') share = db_utils.create_share(id='fake_id', instances=[instance]) host = 'fake2@backend#pool' fake_service = {'availability_zone_id': 'fake_az_id'} # mocks self.mock_object(self.share_manager.db, 'service_get_by_args', mock.Mock(return_value=fake_service)) self.mock_object(self.share_manager.db, 'share_get', mock.Mock(return_value=share)) self.mock_object(self.share_manager.db, 'share_instance_get', mock.Mock(return_value=instance)) self.mock_object(self.share_manager.db, 'share_update') self.mock_object(self.share_manager.db, 'share_instance_update') self.mock_object(self.share_manager, '_migration_start_driver', mock.Mock(side_effect=Exception('fake_exc_1'))) self.mock_object(self.share_manager, '_migration_start_host_assisted', mock.Mock(side_effect=Exception('fake_exc_2'))) self.mock_object(self.share_manager, '_reset_read_only_access_rules') # run self.assertRaises( exception.ShareMigrationFailed, self.share_manager.migration_start, self.context, 'fake_id', host, False, False, False, False, False, 'fake_net_id', 'fake_type_id') # asserts self.share_manager.db.share_get.assert_called_once_with( self.context, share['id']) self.share_manager.db.share_instance_get.assert_called_once_with( self.context, instance['id'], with_share_data=True) share_update_calls = [ mock.call( self.context, share['id'], {'task_state': constants.TASK_STATE_MIGRATION_IN_PROGRESS}), mock.call( self.context, share['id'], {'task_state': constants.TASK_STATE_MIGRATION_ERROR}) ] (self.share_manager._reset_read_only_access_rules. assert_called_once_with(self.context, instance['id'])) self.share_manager.db.share_update.assert_has_calls(share_update_calls) self.share_manager.db.share_instance_update.assert_called_once_with( self.context, instance['id'], {'status': constants.STATUS_AVAILABLE}) self.share_manager._migration_start_driver.assert_called_once_with( self.context, share, instance, host, False, False, False, False, 'fake_net_id', 'fake_az_id', 'fake_type_id') self.share_manager.db.service_get_by_args.assert_called_once_with( self.context, 'fake2@backend', 'manila-share') @ddt.data(None, Exception('fake')) def test__migration_start_host_assisted(self, exc): share_server = db_utils.create_share_server() instance = db_utils.create_share_instance( share_id='fake_id', status=constants.STATUS_AVAILABLE, share_server_id=share_server['id']) new_instance = db_utils.create_share_instance( share_id='new_fake_id', status=constants.STATUS_AVAILABLE) share = db_utils.create_share(id='fake_id', instances=[instance]) src_connection_info = 'src_fake_info' dest_connection_info = 'dest_fake_info' instance_updates = [ mock.call( self.context, instance['id'], {'cast_rules_to_readonly': True}) ] # mocks helper = mock.Mock() self.mock_object(migration_api, 'ShareMigrationHelper', mock.Mock(return_value=helper)) self.mock_object(helper, 'cleanup_new_instance') self.mock_object(self.share_manager.db, 'share_server_get', mock.Mock(return_value=share_server)) self.mock_object(self.share_manager.db, 'share_instance_update', mock.Mock(return_value=share_server)) self.mock_object(self.share_manager.access_helper, 'get_and_update_share_instance_access_rules') self.mock_object(self.share_manager.access_helper, 'update_access_rules') self.mock_object(utils, 'wait_for_access_update') if exc is None: self.mock_object(helper, 'create_instance_and_wait', mock.Mock(return_value=new_instance)) self.mock_object(self.share_manager.driver, 'connection_get_info', mock.Mock(return_value=src_connection_info)) self.mock_object(rpcapi.ShareAPI, 'connection_get_info', mock.Mock(return_value=dest_connection_info)) self.mock_object(data_rpc.DataAPI, 'migration_start', mock.Mock(side_effect=Exception('fake'))) self.mock_object(helper, 'cleanup_new_instance') instance_updates.append( mock.call(self.context, new_instance['id'], {'status': constants.STATUS_MIGRATING_TO})) else: self.mock_object(helper, 'create_instance_and_wait', mock.Mock(side_effect=exc)) # run self.assertRaises( exception.ShareMigrationFailed, self.share_manager._migration_start_host_assisted, self.context, share, instance, 'fake_host', 'fake_net_id', 'fake_az_id', 'fake_type_id') # asserts self.share_manager.db.share_server_get.assert_has_calls([ mock.call(utils.IsAMatcher(context.RequestContext), instance['share_server_id']), mock.call(utils.IsAMatcher(context.RequestContext), instance['share_server_id']) ]) (self.share_manager.access_helper.update_access_rules. assert_called_once_with( self.context, instance['id'], share_server=share_server)) helper.create_instance_and_wait.assert_called_once_with( share, 'fake_host', 'fake_net_id', 'fake_az_id', 'fake_type_id') utils.wait_for_access_update.assert_called_once_with( self.context, self.share_manager.db, instance, self.share_manager.migration_wait_access_rules_timeout) if exc is None: (self.share_manager.driver.connection_get_info. assert_called_once_with(self.context, instance, share_server)) rpcapi.ShareAPI.connection_get_info.assert_called_once_with( self.context, new_instance) data_rpc.DataAPI.migration_start.assert_called_once_with( self.context, share['id'], ['lost+found'], instance['id'], new_instance['id'], src_connection_info, dest_connection_info) helper.cleanup_new_instance.assert_called_once_with(new_instance) @ddt.data({'share_network_id': 'fake_net_id', 'exc': None, 'has_snapshots': True}, {'share_network_id': None, 'exc': Exception('fake'), 'has_snapshots': True}, {'share_network_id': None, 'exc': None, 'has_snapshots': False}) @ddt.unpack def test__migration_start_driver( self, exc, share_network_id, has_snapshots): fake_dest_host = 'fake_host' src_server = db_utils.create_share_server() if share_network_id: dest_server = db_utils.create_share_server() else: dest_server = None share = db_utils.create_share( id='fake_id', share_server_id='fake_src_server_id', share_network_id=share_network_id) migrating_instance = db_utils.create_share_instance( share_id='fake_id', share_network_id=share_network_id) if has_snapshots: snapshot = db_utils.create_snapshot( status=(constants.STATUS_AVAILABLE if not exc else constants.STATUS_ERROR), share_id=share['id']) migrating_snap_instance = db_utils.create_snapshot( status=constants.STATUS_MIGRATING, share_id=share['id']) dest_snap_instance = db_utils.create_snapshot_instance( status=constants.STATUS_AVAILABLE, snapshot_id=snapshot['id'], share_instance_id=migrating_instance['id']) snapshot_mappings = {snapshot.instance['id']: dest_snap_instance} else: snapshot_mappings = {} src_instance = share.instance compatibility = { 'compatible': True, 'writable': False, 'preserve_metadata': False, 'nondisruptive': False, 'preserve_snapshots': has_snapshots, } # mocks self.mock_object(self.share_manager.db, 'share_instance_get', mock.Mock(return_value=migrating_instance)) self.mock_object(self.share_manager.db, 'share_server_get', mock.Mock(return_value=src_server)) self.mock_object(self.share_manager.driver, 'migration_check_compatibility', mock.Mock(return_value=compatibility)) self.mock_object( api.API, 'create_share_instance_and_get_request_spec', mock.Mock(return_value=({}, migrating_instance))) self.mock_object(self.share_manager.db, 'share_instance_update') self.mock_object(self.share_manager.db, 'share_update') self.mock_object(rpcapi.ShareAPI, 'provide_share_server', mock.Mock(return_value='fake_dest_share_server_id')) self.mock_object(rpcapi.ShareAPI, 'create_share_server') self.mock_object( migration_api.ShareMigrationHelper, 'wait_for_share_server', mock.Mock(return_value=dest_server)) self.mock_object( self.share_manager.db, 'share_snapshot_get_all_for_share', mock.Mock(return_value=[snapshot] if has_snapshots else [])) if has_snapshots: self.mock_object( self.share_manager.db, 'share_snapshot_instance_create', mock.Mock(return_value=dest_snap_instance)) self.mock_object( self.share_manager.db, 'share_snapshot_instance_update') self.mock_object( self.share_manager.db, 'share_snapshot_instance_get_all_with_filters', mock.Mock(return_value=[migrating_snap_instance])) self.mock_object(self.share_manager.driver, 'migration_start') self.mock_object(self.share_manager, '_migration_delete_instance') self.mock_object(self.share_manager, 'update_access_for_instances') self.mock_object(utils, 'wait_for_access_update') # run if exc: self.assertRaises( exception.ShareMigrationFailed, self.share_manager._migration_start_driver, self.context, share, src_instance, fake_dest_host, False, False, False, False, share_network_id, 'fake_az_id', 'fake_type_id') else: result = self.share_manager._migration_start_driver( self.context, share, src_instance, fake_dest_host, False, False, False, False, share_network_id, 'fake_az_id', 'fake_type_id') # asserts if not exc: self.assertTrue(result) self.share_manager.db.share_update.assert_has_calls([ mock.call( self.context, share['id'], {'task_state': constants.TASK_STATE_MIGRATION_DRIVER_STARTING}), mock.call( self.context, share['id'], {'task_state': constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS}) ]) (self.share_manager.db.share_instance_update.assert_has_calls([ mock.call(self.context, migrating_instance['id'], {'status': constants.STATUS_MIGRATING_TO}), mock.call(self.context, src_instance['id'], {'cast_rules_to_readonly': True})])) (self.share_manager.update_access_for_instances. assert_called_once_with(self.context, [src_instance['id']], share_server_id=src_server['id'])) self.share_manager.driver.migration_start.assert_called_once_with( self.context, src_instance, migrating_instance, [snapshot.instance] if has_snapshots else [], snapshot_mappings, src_server, dest_server) self.share_manager.db.share_instance_get.assert_called_once_with( self.context, migrating_instance['id'], with_share_data=True) self.share_manager.db.share_server_get.assert_called_once_with( self.context, 'fake_src_server_id') (api.API.create_share_instance_and_get_request_spec. assert_called_once_with(self.context, share, 'fake_az_id', None, 'fake_host', share_network_id, 'fake_type_id')) (self.share_manager.driver.migration_check_compatibility. assert_called_once_with(self.context, src_instance, migrating_instance, src_server, dest_server)) (self.share_manager.db.share_snapshot_get_all_for_share. assert_called_once_with(self.context, share['id'])) if share_network_id: (rpcapi.ShareAPI.provide_share_server. assert_called_once_with( self.context, migrating_instance, share_network_id)) rpcapi.ShareAPI.create_share_server.assert_called_once_with( self.context, migrating_instance, 'fake_dest_share_server_id') (migration_api.ShareMigrationHelper.wait_for_share_server. assert_called_once_with('fake_dest_share_server_id')) if exc: (self.share_manager._migration_delete_instance. assert_called_once_with(self.context, migrating_instance['id'])) if has_snapshots: (self.share_manager.db.share_snapshot_instance_update. assert_called_once_with( self.context, migrating_snap_instance['id'], {'status': constants.STATUS_AVAILABLE})) (self.share_manager.db. share_snapshot_instance_get_all_with_filters( self.context, {'share_instance_ids': [src_instance['id']]})) else: if has_snapshots: snap_data = { 'status': constants.STATUS_MIGRATING_TO, 'progress': '0%', 'share_instance_id': migrating_instance['id'], } (self.share_manager.db.share_snapshot_instance_create. assert_called_once_with(self.context, snapshot['id'], snap_data)) (self.share_manager.db.share_snapshot_instance_update. assert_called_once_with( self.context, snapshot.instance['id'], {'status': constants.STATUS_MIGRATING})) @ddt.data({'writable': False, 'preserve_metadata': True, 'nondisruptive': True, 'compatible': True, 'preserve_snapshots': True, 'has_snapshots': False}, {'writable': True, 'preserve_metadata': False, 'nondisruptive': True, 'compatible': True, 'preserve_snapshots': True, 'has_snapshots': False}, {'writable': True, 'preserve_metadata': True, 'nondisruptive': False, 'compatible': True, 'preserve_snapshots': True, 'has_snapshots': False}, {'writable': True, 'preserve_metadata': True, 'nondisruptive': True, 'compatible': False, 'preserve_snapshots': True, 'has_snapshots': False}, {'writable': True, 'preserve_metadata': True, 'nondisruptive': True, 'compatible': True, 'preserve_snapshots': False, 'has_snapshots': False}, {'writable': True, 'preserve_metadata': True, 'nondisruptive': True, 'compatible': True, 'preserve_snapshots': False, 'has_snapshots': True}) @ddt.unpack def test__migration_start_driver_not_compatible( self, compatible, writable, preserve_metadata, nondisruptive, preserve_snapshots, has_snapshots): share = db_utils.create_share() src_instance = db_utils.create_share_instance( share_id='fake_id', share_server_id='src_server_id', share_network_id='fake_share_network_id') fake_dest_host = 'fake_host' src_server = db_utils.create_share_server() dest_server = db_utils.create_share_server() migrating_instance = db_utils.create_share_instance( share_id='fake_id', share_network_id='fake_net_id') compatibility = { 'compatible': compatible, 'writable': writable, 'preserve_metadata': preserve_metadata, 'nondisruptive': nondisruptive, 'preserve_snapshots': preserve_snapshots, } snapshot = db_utils.create_snapshot(share_id=share['id']) # mocks self.mock_object(self.share_manager.db, 'share_server_get', mock.Mock(return_value=src_server)) self.mock_object(self.share_manager.db, 'share_instance_get', mock.Mock(return_value=migrating_instance)) self.mock_object( api.API, 'create_share_instance_and_get_request_spec', mock.Mock(return_value=({}, migrating_instance))) self.mock_object(rpcapi.ShareAPI, 'provide_share_server', mock.Mock(return_value='fake_dest_share_server_id')) self.mock_object(rpcapi.ShareAPI, 'create_share_server') self.mock_object( migration_api.ShareMigrationHelper, 'wait_for_share_server', mock.Mock(return_value=dest_server)) self.mock_object(self.share_manager.db, 'share_instance_update', mock.Mock(return_value=migrating_instance)) self.mock_object(self.share_manager, '_migration_delete_instance') self.mock_object(self.share_manager.driver, 'migration_check_compatibility', mock.Mock(return_value=compatibility)) self.mock_object(utils, 'wait_for_access_update') self.mock_object( self.share_manager.db, 'share_snapshot_get_all_for_share', mock.Mock(return_value=[snapshot] if has_snapshots else [])) # run self.assertRaises( exception.ShareMigrationFailed, self.share_manager._migration_start_driver, self.context, share, src_instance, fake_dest_host, True, True, nondisruptive, not has_snapshots, 'fake_net_id', 'fake_az_id', 'fake_new_type_id') # asserts self.share_manager.db.share_server_get.assert_called_once_with( utils.IsAMatcher(context.RequestContext), 'src_server_id') self.share_manager.db.share_instance_get.assert_called_once_with( self.context, migrating_instance['id'], with_share_data=True) if nondisruptive: self.share_manager.db.share_instance_update.assert_called_with( self.context, migrating_instance['id'], {'share_server_id': src_server['id']}, with_share_data=True ) rpcapi.ShareAPI.provide_share_server.assert_not_called() rpcapi.ShareAPI.create_share_server.assert_not_called() else: (rpcapi.ShareAPI.provide_share_server. assert_called_once_with( self.context, migrating_instance, 'fake_net_id')) rpcapi.ShareAPI.create_share_server.assert_called_once_with( self.context, migrating_instance, 'fake_dest_share_server_id') (migration_api.ShareMigrationHelper.wait_for_share_server. assert_called_once_with('fake_dest_share_server_id')) (api.API.create_share_instance_and_get_request_spec. assert_called_once_with(self.context, share, 'fake_az_id', None, 'fake_host', 'fake_net_id', 'fake_new_type_id')) self.share_manager._migration_delete_instance.assert_called_once_with( self.context, migrating_instance['id']) @ddt.data(Exception('fake'), False, True) def test_migration_driver_continue(self, finished): src_server = db_utils.create_share_server() dest_server = db_utils.create_share_server() share = db_utils.create_share( task_state=constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS, id='share_id', share_server_id=src_server['id'], status=constants.STATUS_MIGRATING) share_cancelled = db_utils.create_share( task_state=constants.TASK_STATE_MIGRATION_CANCELLED) if finished: share_cancelled = share regular_instance = db_utils.create_share_instance( status=constants.STATUS_AVAILABLE, share_id='other_id') dest_instance = db_utils.create_share_instance( share_id='share_id', host='fake_host', share_server_id=dest_server['id'], status=constants.STATUS_MIGRATING_TO) src_instance = share.instance snapshot = db_utils.create_snapshot(share_id=share['id']) dest_snap_instance = db_utils.create_snapshot_instance( snapshot_id=snapshot['id'], share_instance_id=dest_instance['id']) migrating_snap_instance = db_utils.create_snapshot( status=constants.STATUS_MIGRATING, share_id=share['id']) snapshot_mappings = {snapshot.instance['id']: dest_snap_instance} self.mock_object(manager.LOG, 'warning') self.mock_object(self.share_manager.db, 'share_instance_get_all_by_host', mock.Mock( return_value=[regular_instance, src_instance])) self.mock_object(self.share_manager.db, 'share_get', mock.Mock(side_effect=[share, share_cancelled])) self.mock_object(api.API, 'get_migrating_instances', mock.Mock(return_value=( src_instance['id'], dest_instance['id']))) self.mock_object(self.share_manager.db, 'share_instance_get', mock.Mock(return_value=dest_instance)) self.mock_object(self.share_manager.db, 'share_server_get', mock.Mock(side_effect=[src_server, dest_server])) self.mock_object(self.share_manager.driver, 'migration_continue', mock.Mock(side_effect=[finished])) self.mock_object(self.share_manager.db, 'share_instance_update') self.mock_object(self.share_manager.db, 'share_update') self.mock_object(self.share_manager, '_migration_delete_instance') side_effect = [[dest_snap_instance], [snapshot.instance]] if isinstance(finished, Exception): side_effect.append([migrating_snap_instance]) self.mock_object( self.share_manager.db, 'share_snapshot_instance_get_all_with_filters', mock.Mock(side_effect=side_effect)) self.mock_object( self.share_manager.db, 'share_snapshot_instance_update') share_get_calls = [mock.call(self.context, 'share_id')] self.mock_object(self.share_manager, '_reset_read_only_access_rules') self.share_manager.migration_driver_continue(self.context) snapshot_instance_get_all_calls = [ mock.call(self.context, {'share_instance_ids': [dest_instance['id']]}), mock.call(self.context, {'share_instance_ids': [src_instance['id']]}) ] if isinstance(finished, Exception): self.share_manager.db.share_update.assert_called_once_with( self.context, 'share_id', {'task_state': constants.TASK_STATE_MIGRATION_ERROR}) (self.share_manager.db.share_instance_update. assert_called_once_with( self.context, src_instance['id'], {'status': constants.STATUS_AVAILABLE})) (self.share_manager._migration_delete_instance. assert_called_once_with(self.context, dest_instance['id'])) (self.share_manager._reset_read_only_access_rules. assert_called_once_with(self.context, src_instance['id'])) (self.share_manager.db.share_snapshot_instance_update. assert_called_once_with( self.context, migrating_snap_instance['id'], {'status': constants.STATUS_AVAILABLE})) snapshot_instance_get_all_calls.append( mock.call( self.context, {'share_instance_ids': [src_instance['id']]})) else: if finished: self.share_manager.db.share_update.assert_called_once_with( self.context, 'share_id', {'task_state': constants.TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE}) else: share_get_calls.append(mock.call(self.context, 'share_id')) self.assertTrue(manager.LOG.warning.called) self.share_manager.db.share_instance_get_all_by_host( self.context, self.share_manager.host) self.share_manager.db.share_get.assert_has_calls(share_get_calls) api.API.get_migrating_instances.assert_called_once_with(share) self.share_manager.db.share_instance_get.assert_called_once_with( self.context, dest_instance['id'], with_share_data=True) self.share_manager.db.share_server_get.assert_has_calls([ mock.call(self.context, src_server['id']), mock.call(self.context, dest_server['id']), ]) self.share_manager.driver.migration_continue.assert_called_once_with( self.context, src_instance, dest_instance, [snapshot.instance], snapshot_mappings, src_server, dest_server) (self.share_manager.db.share_snapshot_instance_get_all_with_filters. assert_has_calls(snapshot_instance_get_all_calls)) @ddt.data({'task_state': constants.TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE, 'exc': None}, {'task_state': constants.TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE, 'exc': Exception('fake')}, {'task_state': constants.TASK_STATE_DATA_COPYING_COMPLETED, 'exc': None}, {'task_state': constants.TASK_STATE_DATA_COPYING_COMPLETED, 'exc': Exception('fake')}) @ddt.unpack def test_migration_complete(self, task_state, exc): instance_1 = db_utils.create_share_instance( share_id='fake_id', status=constants.STATUS_MIGRATING, share_server_id='fake_server_id', share_type_id='fake_type_id') instance_2 = db_utils.create_share_instance( share_id='fake_id', status=constants.STATUS_MIGRATING_TO, share_server_id='fake_server_id', share_type_id='fake_type_id') share = db_utils.create_share( id='fake_id', instances=[instance_1, instance_2], task_state=task_state) model_type_update = {'create_share_from_snapshot_support': False} share_update = model_type_update share_update['task_state'] = constants.TASK_STATE_MIGRATION_SUCCESS # mocks self.mock_object(self.share_manager.db, 'share_get', mock.Mock(return_value=share)) self.mock_object(self.share_manager.db, 'share_instance_get', mock.Mock(side_effect=[instance_1, instance_2])) self.mock_object(api.API, 'get_share_attributes_from_share_type', mock.Mock(return_value=model_type_update)) self.mock_object(share_types, 'get_share_type', mock.Mock(return_value='fake_type')) self.mock_object(self.share_manager.db, 'share_update') if task_state == constants.TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE: self.mock_object( self.share_manager, '_migration_complete_driver', mock.Mock(side_effect=exc)) else: self.mock_object( self.share_manager, '_migration_complete_host_assisted', mock.Mock(side_effect=exc)) if exc: snapshot = db_utils.create_snapshot(share_id=share['id']) snapshot_ins1 = db_utils.create_snapshot_instance( snapshot_id=snapshot['id'], share_instance_id=instance_1['id'], status=constants.STATUS_MIGRATING,) snapshot_ins2 = db_utils.create_snapshot_instance( snapshot_id=snapshot['id'], share_instance_id=instance_2['id'], status=constants.STATUS_MIGRATING_TO) self.mock_object(manager.LOG, 'exception') self.mock_object(self.share_manager.db, 'share_update') self.mock_object(self.share_manager.db, 'share_instance_update') self.mock_object(self.share_manager.db, 'share_snapshot_instance_update') self.mock_object(self.share_manager.db, 'share_snapshot_instance_get_all_with_filters', mock.Mock( return_value=[snapshot_ins1, snapshot_ins2])) self.assertRaises( exception.ShareMigrationFailed, self.share_manager.migration_complete, self.context, instance_1['id'], instance_2['id']) else: self.share_manager.migration_complete( self.context, instance_1['id'], instance_2['id']) # asserts self.share_manager.db.share_get.assert_called_once_with( self.context, share['id']) self.share_manager.db.share_instance_get.assert_has_calls([ mock.call(self.context, instance_1['id'], with_share_data=True), mock.call(self.context, instance_2['id'], with_share_data=True)]) if task_state == constants.TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE: (self.share_manager._migration_complete_driver. assert_called_once_with( self.context, share, instance_1, instance_2)) else: (self.share_manager._migration_complete_host_assisted. assert_called_once_with( self.context, share, instance_1['id'], instance_2['id'])) if exc: self.assertTrue(manager.LOG.exception.called) self.share_manager.db.share_update.assert_called_once_with( self.context, share['id'], {'task_state': constants.TASK_STATE_MIGRATION_ERROR}) if task_state == constants.TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE: share_instance_update_calls = [ mock.call(self.context, instance_1['id'], {'status': constants.STATUS_ERROR}), mock.call(self.context, instance_2['id'], {'status': constants.STATUS_ERROR}) ] else: share_instance_update_calls = [ mock.call(self.context, instance_1['id'], {'status': constants.STATUS_AVAILABLE}), ] self.share_manager.db.share_instance_update.assert_has_calls( share_instance_update_calls) if task_state == constants.TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE: (self.share_manager.db.share_snapshot_instance_update. assert_has_calls([ mock.call(self.context, snapshot_ins1['id'], {'status': constants.STATUS_ERROR}), mock.call(self.context, snapshot_ins2['id'], {'status': constants.STATUS_ERROR})])) (self.share_manager.db. share_snapshot_instance_get_all_with_filters. assert_called_once_with( self.context, { 'share_instance_ids': [instance_1['id'], instance_2['id']] } )) else: (api.API.get_share_attributes_from_share_type. assert_called_once_with('fake_type')) share_types.get_share_type.assert_called_once_with( self.context, 'fake_type_id') self.share_manager.db.share_update.assert_called_once_with( self.context, share['id'], share_update) @ddt.data(constants.TASK_STATE_DATA_COPYING_ERROR, constants.TASK_STATE_DATA_COPYING_CANCELLED, constants.TASK_STATE_DATA_COPYING_COMPLETED, 'other') def test__migration_complete_host_assisted_status(self, status): instance = db_utils.create_share_instance( share_id='fake_id', share_server_id='fake_server_id') new_instance = db_utils.create_share_instance(share_id='fake_id') share = db_utils.create_share(id='fake_id', task_state=status) helper = mock.Mock() # mocks self.mock_object(self.share_manager.db, 'share_instance_get', mock.Mock(side_effect=[instance, new_instance])) self.mock_object(helper, 'cleanup_new_instance') self.mock_object(migration_api, 'ShareMigrationHelper', mock.Mock(return_value=helper)) self.mock_object(self.share_manager.db, 'share_instance_update') self.mock_object(self.share_manager.db, 'share_update') self.mock_object(self.share_manager, '_reset_read_only_access_rules') if status == constants.TASK_STATE_DATA_COPYING_COMPLETED: self.mock_object(helper, 'apply_new_access_rules', mock.Mock(side_effect=Exception('fake'))) self.mock_object(manager.LOG, 'exception') # run if status == constants.TASK_STATE_DATA_COPYING_CANCELLED: self.share_manager._migration_complete_host_assisted( self.context, share, instance['id'], new_instance['id']) else: self.assertRaises( exception.ShareMigrationFailed, self.share_manager._migration_complete_host_assisted, self.context, share, instance['id'], new_instance['id']) # asserts self.share_manager.db.share_instance_get.assert_has_calls([ mock.call(self.context, instance['id'], with_share_data=True), mock.call(self.context, new_instance['id'], with_share_data=True) ]) cancelled = not (status == constants.TASK_STATE_DATA_COPYING_CANCELLED) if status != 'other': helper.cleanup_new_instance.assert_called_once_with(new_instance) (self.share_manager._reset_read_only_access_rules. assert_called_once_with(self.context, instance['id'], helper=helper, supress_errors=cancelled)) if status == constants.TASK_STATE_MIGRATION_CANCELLED: (self.share_manager.db.share_instance_update. assert_called_once_with( self.context, instance['id'], {'status': constants.STATUS_AVAILABLE, 'progress': '100%'})) self.share_manager.db.share_update.assert_called_once_with( self.context, share['id'], {'task_state': constants.TASK_STATE_MIGRATION_CANCELLED}) if status == constants.TASK_STATE_DATA_COPYING_COMPLETED: helper.apply_new_access_rules. assert_called_once_with( new_instance, 'fake_id') self.assertTrue(manager.LOG.exception.called) @ddt.data({'mount_snapshot_support': True, 'snapshot_els': False}, {'mount_snapshot_support': True, 'snapshot_els': True}, {'mount_snapshot_support': False, 'snapshot_els': False}, {'mount_snapshot_support': False, 'snapshot_els': True},) @ddt.unpack def test__migration_complete_driver( self, mount_snapshot_support, snapshot_els): fake_src_host = 'src_host' fake_dest_host = 'dest_host' fake_rules = 'fake_rules' src_server = db_utils.create_share_server() dest_server = db_utils.create_share_server() share_type = db_utils.create_share_type( extra_specs={'mount_snapshot_support': mount_snapshot_support}) share = db_utils.create_share( share_server_id='fake_src_server_id', host=fake_src_host) dest_instance = db_utils.create_share_instance( share_id=share['id'], share_server_id='fake_dest_server_id', host=fake_dest_host, share_type_id=share_type['id']) src_instance = share.instance snapshot = db_utils.create_snapshot(share_id=share['id']) dest_snap_instance = db_utils.create_snapshot_instance( snapshot_id=snapshot['id'], share_instance_id=dest_instance['id']) snapshot_mappings = {snapshot.instance['id']: dest_snap_instance} model_update = {'fake_keys': 'fake_values'} if snapshot_els: el = {'path': 'fake_path', 'is_admin_only': False} model_update['export_locations'] = [el] fake_return_data = { 'export_locations': 'fake_export_locations', 'snapshot_updates': {dest_snap_instance['id']: model_update}, } # mocks self.mock_object(self.share_manager.db, 'share_server_get', mock.Mock( side_effect=[src_server, dest_server])) self.mock_object( self.share_manager.db, 'share_access_get_all_for_instance', mock.Mock(return_value=fake_rules)) self.mock_object( self.share_manager.db, 'export_locations_update') self.mock_object(self.share_manager.driver, 'migration_complete', mock.Mock(return_value=fake_return_data)) self.mock_object( self.share_manager.access_helper, '_check_needs_refresh', mock.Mock(return_value=True)) self.mock_object(self.share_manager.db, 'share_instance_update') self.mock_object(self.share_manager.db, 'share_update') self.mock_object(self.share_manager, '_migration_complete_instance') self.mock_object(self.share_manager, '_migration_delete_instance') self.mock_object(migration_api.ShareMigrationHelper, 'apply_new_access_rules') self.mock_object( share_types, 'revert_allocated_share_type_quotas_during_migration') self.mock_object( self.share_manager.db, 'share_snapshot_instance_get_all_with_filters', mock.Mock(side_effect=[[dest_snap_instance], [snapshot.instance]])) self.mock_object( self.share_manager.db, 'share_snapshot_instance_update') el_create = self.mock_object( self.share_manager.db, 'share_snapshot_instance_export_location_create') # run self.share_manager._migration_complete_driver( self.context, share, src_instance, dest_instance) # asserts self.share_manager.db.share_server_get.assert_has_calls([ mock.call(self.context, 'fake_src_server_id'), mock.call(self.context, 'fake_dest_server_id')]) (self.share_manager.db.export_locations_update. assert_called_once_with(self.context, dest_instance['id'], 'fake_export_locations')) self.share_manager.driver.migration_complete.assert_called_once_with( self.context, src_instance, dest_instance, [snapshot.instance], snapshot_mappings, src_server, dest_server) (migration_api.ShareMigrationHelper.apply_new_access_rules. assert_called_once_with(dest_instance, share['id'])) (self.share_manager._migration_complete_instance. assert_called_once_with(self.context, share, src_instance['id'], dest_instance['id'])) self.share_manager._migration_delete_instance.assert_called_once_with( self.context, src_instance['id']) self.share_manager.db.share_update.assert_called_once_with( self.context, dest_instance['share_id'], {'task_state': constants.TASK_STATE_MIGRATION_COMPLETING}) (self.share_manager.db.share_snapshot_instance_get_all_with_filters. assert_has_calls([ mock.call(self.context, {'share_instance_ids': [dest_instance['id']]}), mock.call(self.context, {'share_instance_ids': [src_instance['id']]})])) snap_data_update = ( fake_return_data['snapshot_updates'][dest_snap_instance['id']]) snap_data_update.update({ 'status': constants.STATUS_AVAILABLE, 'progress': '100%', }) (self.share_manager.db.share_snapshot_instance_update. assert_called_once_with(self.context, dest_snap_instance['id'], snap_data_update)) if mount_snapshot_support and snapshot_els: el['share_snapshot_instance_id'] = dest_snap_instance['id'] el_create.assert_called_once_with(self.context, el) else: el_create.assert_not_called() (share_types. revert_allocated_share_type_quotas_during_migration. assert_called_once_with( self.context, dest_instance, src_instance['share_type_id'], allow_deallocate_from_current_type=True)) def test__migration_complete_host_assisted(self): instance = db_utils.create_share_instance( share_id='fake_id', share_server_id='fake_server_id') new_instance = db_utils.create_share_instance(share_id='fake_id') share = db_utils.create_share( id='fake_id', task_state=constants.TASK_STATE_DATA_COPYING_COMPLETED) # mocks self.mock_object(self.share_manager.db, 'share_instance_get', mock.Mock(side_effect=[instance, new_instance])) self.mock_object(self.share_manager.db, 'share_instance_update') self.mock_object(self.share_manager.db, 'share_update') self.mock_object(self.share_manager, '_migration_complete_instance') delete_mock = self.mock_object(migration_api.ShareMigrationHelper, 'delete_instance_and_wait') self.mock_object(migration_api.ShareMigrationHelper, 'apply_new_access_rules') # run self.share_manager._migration_complete_host_assisted( self.context, share, instance['id'], new_instance['id']) # asserts self.share_manager.db.share_instance_get.assert_has_calls([ mock.call(self.context, instance['id'], with_share_data=True), mock.call(self.context, new_instance['id'], with_share_data=True) ]) self.share_manager.db.share_update.assert_called_once_with( self.context, share['id'], {'task_state': constants.TASK_STATE_MIGRATION_COMPLETING}) (migration_api.ShareMigrationHelper.apply_new_access_rules. assert_called_once_with(new_instance, 'fake_id')) delete_mock.assert_called_once_with(instance) (self.share_manager._migration_complete_instance. assert_called_once_with(self.context, share, instance['id'], new_instance['id'])) @ddt.data(constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS, constants.TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE, constants.TASK_STATE_DATA_COPYING_COMPLETED) def test_migration_cancel(self, task_state): dest_host = 'fake_host' server_1 = db_utils.create_share_server() server_2 = db_utils.create_share_server() share = db_utils.create_share(task_state=task_state) instance_1 = db_utils.create_share_instance( share_id=share['id'], share_server_id=server_1['id']) instance_2 = db_utils.create_share_instance( share_id=share['id'], share_server_id=server_2['id'], host=dest_host) helper = mock.Mock() self.mock_object(migration_api, 'ShareMigrationHelper', mock.Mock(return_value=helper)) self.mock_object(db, 'share_get', mock.Mock(return_value=share)) self.mock_object(db, 'share_instance_get', mock.Mock(side_effect=[instance_1, instance_2])) self.mock_object(db, 'share_update') self.mock_object(db, 'share_instance_update') self.mock_object(self.share_manager, '_migration_delete_instance') self.mock_object(self.share_manager, '_restore_migrating_snapshots_status') self.mock_object(db, 'share_server_get', mock.Mock(side_effect=[server_1, server_2])) self.mock_object(self.share_manager.driver, 'migration_cancel') self.mock_object(helper, 'cleanup_new_instance') self.mock_object(self.share_manager, '_reset_read_only_access_rules') self.mock_object( share_types, 'revert_allocated_share_type_quotas_during_migration') self.share_manager.migration_cancel( self.context, instance_1['id'], instance_2['id']) share_instance_update_calls = [] if task_state == constants.TASK_STATE_DATA_COPYING_COMPLETED: share_instance_update_calls.append(mock.call( self.context, instance_2['id'], {'status': constants.STATUS_INACTIVE})) (helper.cleanup_new_instance.assert_called_once_with(instance_2)) (self.share_manager._reset_read_only_access_rules. assert_called_once_with(self.context, instance_1['id'], helper=helper, supress_errors=False)) else: self.share_manager.driver.migration_cancel.assert_called_once_with( self.context, instance_1, instance_2, [], {}, server_1, server_2) (self.share_manager._migration_delete_instance. assert_called_once_with(self.context, instance_2['id'])) (self.share_manager._restore_migrating_snapshots_status. assert_called_once_with(self.context, instance_1['id'])) self.share_manager.db.share_get.assert_called_once_with( self.context, share['id']) self.share_manager.db.share_server_get.assert_has_calls([ mock.call(self.context, server_1['id']), mock.call(self.context, server_2['id']), ]) self.share_manager.db.share_instance_get.assert_has_calls([ mock.call(self.context, instance_1['id'], with_share_data=True), mock.call(self.context, instance_2['id'], with_share_data=True) ]) self.share_manager.db.share_update.assert_called_once_with( self.context, share['id'], {'task_state': constants.TASK_STATE_MIGRATION_CANCELLED}) share_instance_update_calls.append(mock.call( self.context, instance_1['id'], {'status': constants.STATUS_AVAILABLE})) self.share_manager.db.share_instance_update.assert_has_calls( share_instance_update_calls) (share_types.revert_allocated_share_type_quotas_during_migration. assert_called_once_with( self.context, instance_1, instance_2['share_type_id'])) @ddt.data(True, False) def test__reset_read_only_access_rules(self, supress_errors): share = db_utils.create_share() server = db_utils.create_share_server() instance = db_utils.create_share_instance( share_id=share['id'], cast_rules_to_readonly=True, share_server_id=server['id']) # mocks self.mock_object(self.share_manager.db, 'share_server_get', mock.Mock(return_value=server)) self.mock_object(self.share_manager.db, 'share_instance_update') self.mock_object(self.share_manager.db, 'share_instance_get', mock.Mock(return_value=instance)) self.mock_object(migration_api.ShareMigrationHelper, 'cleanup_access_rules') self.mock_object(migration_api.ShareMigrationHelper, 'revert_access_rules') # run self.share_manager._reset_read_only_access_rules( self.context, instance['id'], supress_errors=supress_errors) # asserts self.share_manager.db.share_server_get.assert_called_once_with( self.context, server['id']) self.share_manager.db.share_instance_update.assert_called_once_with( self.context, instance['id'], {'cast_rules_to_readonly': False}) self.share_manager.db.share_instance_get.assert_has_calls([ mock.call(self.context, instance['id'], with_share_data=True), mock.call(self.context, instance['id'], with_share_data=True)]) if supress_errors: (migration_api.ShareMigrationHelper.cleanup_access_rules. assert_called_once_with([instance], server, None)) else: (migration_api.ShareMigrationHelper.revert_access_rules. assert_called_once_with([instance], server, None)) def test__migration_delete_instance(self): share = db_utils.create_share(id='fake_id') instance = share.instance snapshot = db_utils.create_snapshot(share_id=share['id']) rules = [{'id': 'rule_id_1'}, {'id': 'rule_id_2'}] # mocks self.mock_object(self.share_manager.db, 'share_instance_get', mock.Mock(return_value=instance)) mock_get_access_rules_call = self.mock_object( self.share_manager.access_helper, 'get_and_update_share_instance_access_rules', mock.Mock(return_value=rules)) mock_delete_access_rules_call = self.mock_object( self.share_manager.access_helper, 'delete_share_instance_access_rules') self.mock_object(self.share_manager.db, 'share_instance_delete') self.mock_object(self.share_manager.db, 'share_instance_access_delete') self.mock_object(self.share_manager, '_check_delete_share_server') self.mock_object(self.share_manager.db, 'share_snapshot_instance_delete') self.mock_object(self.share_manager.db, 'share_snapshot_instance_get_all_with_filters', mock.Mock(return_value=[snapshot.instance])) # run self.share_manager._migration_delete_instance( self.context, instance['id']) # asserts self.share_manager.db.share_instance_get.assert_called_once_with( self.context, instance['id'], with_share_data=True) mock_get_access_rules_call.assert_called_once_with( self.context, share_instance_id=instance['id']) mock_delete_access_rules_call.assert_called_once_with( self.context, rules, instance['id']) self.share_manager.db.share_instance_delete.assert_called_once_with( self.context, instance['id']) self.share_manager._check_delete_share_server.assert_called_once_with( self.context, share_instance=instance) (self.share_manager.db.share_snapshot_instance_get_all_with_filters. assert_called_once_with(self.context, {'share_instance_ids': [instance['id']]})) (self.share_manager.db.share_snapshot_instance_delete. assert_called_once_with(self.context, snapshot.instance['id'])) @ddt.data({}, {'replication_type': 'readable'}) def test__migration_complete_instance(self, kwargs): src_share = db_utils.create_share() dest_share = db_utils.create_share(**kwargs) src_instance_id = src_share['instance']['id'] dest_instance_id = dest_share['instance']['id'] src_updates = {'status': constants.STATUS_INACTIVE} dest_updates = dest_updates = { 'status': constants.STATUS_AVAILABLE, 'progress': '100%' } if kwargs.get('replication_type'): replication_info = { 'replica_state': constants.REPLICA_STATE_ACTIVE} dest_updates.update(replication_info) self.mock_object(self.share_manager.db, 'share_instance_update') self.share_manager._migration_complete_instance( self.context, dest_share, src_instance_id, dest_instance_id) self.share_manager.db.share_instance_update.assert_has_calls( [mock.call(self.context, dest_instance_id, dest_updates), mock.call(self.context, src_instance_id, src_updates)]) def test_migration_cancel_invalid(self): share = db_utils.create_share() self.mock_object(db, 'share_instance_get', mock.Mock(return_value=share.instance)) self.mock_object(db, 'share_get', mock.Mock(return_value=share)) self.assertRaises( exception.InvalidShare, self.share_manager.migration_cancel, self.context, 'ins1_id', 'ins2_id') def test_migration_get_progress(self): expected = 'fake_progress' dest_host = 'fake_host' server_1 = db_utils.create_share_server() server_2 = db_utils.create_share_server() share = db_utils.create_share( task_state=constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS, share_server_id=server_1['id']) instance_1 = db_utils.create_share_instance( share_id=share['id'], share_server_id=server_1['id']) instance_2 = db_utils.create_share_instance( share_id=share['id'], share_server_id=server_2['id'], host=dest_host) self.mock_object(db, 'share_get', mock.Mock(return_value=share)) self.mock_object(db, 'share_instance_get', mock.Mock(side_effect=[instance_1, instance_2])) self.mock_object(db, 'share_server_get', mock.Mock(side_effect=[server_1, server_2])) self.mock_object(self.share_manager.driver, 'migration_get_progress', mock.Mock(return_value=expected)) result = self.share_manager.migration_get_progress( self.context, instance_1['id'], instance_2['id']) self.assertEqual(expected, result) (self.share_manager.driver.migration_get_progress. assert_called_once_with( self.context, instance_1, instance_2, [], {}, server_1, server_2)) self.share_manager.db.share_get.assert_called_once_with( self.context, share['id']) self.share_manager.db.share_server_get.assert_has_calls([ mock.call(self.context, server_1['id']), mock.call(self.context, server_2['id']), ]) self.share_manager.db.share_instance_get.assert_has_calls([ mock.call(self.context, instance_1['id'], with_share_data=True), mock.call(self.context, instance_2['id'], with_share_data=True) ]) def test_migration_get_progress_invalid(self): share = db_utils.create_share() self.mock_object(db, 'share_instance_get', mock.Mock(return_value=share.instance)) self.mock_object(db, 'share_get', mock.Mock(return_value=share)) self.assertRaises( exception.InvalidShare, self.share_manager.migration_get_progress, self.context, 'ins1_id', 'ins2_id') def test_provide_share_server(self): instance = db_utils.create_share_instance(share_id='fake_id', share_group_id='sg_id') snapshot = db_utils.create_snapshot(with_share=True) group = db_utils.create_share_group() server = db_utils.create_share_server() # mocks self.mock_object(self.share_manager.db, 'share_instance_get', mock.Mock(return_value=instance)) self.mock_object(self.share_manager.db, 'share_snapshot_get', mock.Mock(return_value=snapshot)) self.mock_object(self.share_manager.db, 'share_group_get', mock.Mock(return_value=group)) self.mock_object(self.share_manager, '_provide_share_server_for_share', mock.Mock(return_value=(server, instance))) # run result = self.share_manager.provide_share_server( self.context, 'ins_id', 'net_id', 'snap_id') # asserts self.assertEqual(server['id'], result) self.share_manager.db.share_instance_get.assert_called_once_with( self.context, 'ins_id', with_share_data=True) self.share_manager.db.share_snapshot_get.assert_called_once_with( self.context, 'snap_id') self.share_manager.db.share_group_get.assert_called_once_with( self.context, 'sg_id') (self.share_manager._provide_share_server_for_share. assert_called_once_with(self.context, 'net_id', instance, snapshot, group, create_on_backend=False)) def test_create_share_server(self): server = db_utils.create_share_server() share = db_utils.create_share() fake_metadata = { 'request_host': 'fake_host', 'share_type_id': 'fake_share_type_id', } # mocks self.mock_object(self.share_manager.db, 'share_server_get', mock.Mock(return_value=server)) self.mock_object(self.share_manager.db, 'share_instance_get', mock.Mock(return_value=share)) self.mock_object(self.share_manager, '_create_share_server_in_backend') self.mock_object(self.share_manager, '_build_server_metadata', mock.Mock(return_value=fake_metadata)) # run self.share_manager.create_share_server( self.context, 'server_id', 'share_instance_id') # asserts self.share_manager.db.share_server_get.assert_called_once_with( self.context, 'server_id') self.share_manager.db.share_instance_get.assert_called_once_with( self.context, 'share_instance_id', with_share_data=True) (self.share_manager._create_share_server_in_backend. assert_called_once_with(self.context, server, fake_metadata)) @ddt.data({'admin_network_api': mock.Mock(), 'driver_return': ('new_identifier', {'some_id': 'some_value'})}, {'admin_network_api': None, 'driver_return': (None, None)}) @ddt.unpack def test_manage_share_server(self, admin_network_api, driver_return): driver_opts = {} fake_share_server = fakes.fake_share_server_get() fake_list_network_info = [{}, {}] fake_list_empty_network_info = [] identifier = 'fake_id' ss_data = { 'name': 'fake_name', 'ou': 'fake_ou', 'domain': 'fake_domain', 'server': 'fake_server', 'dns_ip': 'fake_dns_ip', 'user': 'fake_user', 'type': 'FAKE', 'password': 'fake_pass', } mock_manage_admin_network_allocations = mock.Mock() security_service = db_utils.create_security_service(**ss_data) share_network = db_utils.create_share_network() share_net_subnet = db_utils.create_share_network_subnet( share_network_id=share_network['id']) fake_share_server['share_network_subnets'] = [share_net_subnet] share_server = db_utils.create_share_server(**fake_share_server) db.share_network_add_security_service(context.get_admin_context(), share_network['id'], security_service['id']) share_network = db.share_network_get(context.get_admin_context(), share_network['id']) self.share_manager.driver._admin_network_api = admin_network_api mock_share_server_update = self.mock_object( db, 'share_server_update') mock_share_server_get = self.mock_object( db, 'share_server_get', mock.Mock(return_value=share_server)) mock_share_network_get = self.mock_object( db, 'share_network_get', mock.Mock(return_value=share_network)) mock_share_net_subnet_get = self.mock_object( db, 'share_network_subnet_get', mock.Mock( return_value=share_net_subnet) ) mock_network_allocations_get = self.mock_object( self.share_manager.driver, 'get_network_allocations_number', mock.Mock(return_value=1)) mock_share_server_net_info = self.mock_object( self.share_manager.driver, 'get_share_server_network_info', mock.Mock(return_value=fake_list_network_info)) mock_manage_network_allocations = self.mock_object( self.share_manager.driver.network_api, 'manage_network_allocations', mock.Mock(return_value=fake_list_empty_network_info)) mock_manage_server = self.mock_object( self.share_manager.driver, 'manage_server', mock.Mock(return_value=driver_return)) mock_set_backend_details = self.mock_object( db, 'share_server_backend_details_set') ss_from_db = share_network['security_services'][0] ss_data_from_db = { 'name': ss_from_db['name'], 'ou': ss_from_db['ou'], 'default_ad_site': ss_from_db['default_ad_site'], 'domain': ss_from_db['domain'], 'server': ss_from_db['server'], 'dns_ip': ss_from_db['dns_ip'], 'user': ss_from_db['user'], 'type': ss_from_db['type'], 'password': ss_from_db['password'], } expected_backend_details = { 'security_service_FAKE': jsonutils.dumps(ss_data_from_db), } if driver_return[1]: expected_backend_details.update(driver_return[1]) if admin_network_api is not None: mock_manage_admin_network_allocations = self.mock_object( self.share_manager.driver.admin_network_api, 'manage_network_allocations', mock.Mock(return_value=fake_list_network_info)) self.share_manager.manage_share_server(self.context, fake_share_server['id'], identifier, driver_opts) mock_share_server_get.assert_called_once_with( utils.IsAMatcher(context.RequestContext), fake_share_server['id'] ) mock_share_network_get.assert_called_once_with( utils.IsAMatcher(context.RequestContext), share_net_subnet['share_network_id'] ) mock_share_net_subnet_get.assert_called_once_with( utils.IsAMatcher(context.RequestContext), share_server['share_network_subnet_ids'][0] ) mock_network_allocations_get.assert_called_once_with() mock_share_server_net_info.assert_called_once_with( utils.IsAMatcher(context.RequestContext), share_server, identifier, driver_opts ) mock_manage_network_allocations.assert_called_once_with( utils.IsAMatcher(context.RequestContext), fake_list_network_info, share_server, share_network, share_net_subnet ) mock_manage_server.assert_called_once_with( utils.IsAMatcher(context.RequestContext), share_server, identifier, driver_opts ) mock_share_server_update.assert_called_once_with( utils.IsAMatcher(context.RequestContext), fake_share_server['id'], {'status': constants.STATUS_ACTIVE, 'identifier': driver_return[0] or share_server['id'], 'network_allocation_update_support': False, 'share_replicas_migration_support': False} ) mock_set_backend_details.assert_called_once_with( utils.IsAMatcher(context.RequestContext), share_server['id'], expected_backend_details ) if admin_network_api is not None: mock_manage_admin_network_allocations.assert_called_once_with( utils.IsAMatcher(context.RequestContext), fake_list_network_info, share_server ) def test_manage_share_server_dhss_false(self): self.mock_object(self.share_manager, 'driver') self.share_manager.driver.driver_handles_share_servers = False self.assertRaises( exception.ManageShareServerError, self.share_manager.manage_share_server, self.context, "fake_id", "foo", {}) def test_manage_share_server_without_allocations(self): driver_opts = {} fake_share_server = fakes.fake_share_server_get() fake_list_empty_network_info = [] identifier = 'fake_id' share_network = db_utils.create_share_network() share_network_subnet = db_utils.create_share_network_subnet( share_network_id=share_network['id'] ) fake_share_server['share_network_subnets'] = [share_network_subnet] share_server = db_utils.create_share_server(**fake_share_server) self.share_manager.driver._admin_network_api = mock.Mock() mock_share_server_get = self.mock_object( db, 'share_server_get', mock.Mock(return_value=share_server)) mock_share_network_get = self.mock_object( db, 'share_network_get', mock.Mock(return_value=share_network)) mock_share_net_subnet_get = self.mock_object( db, 'share_network_subnet_get', mock.Mock( return_value=share_network_subnet)) mock_network_allocations_get = self.mock_object( self.share_manager.driver, 'get_network_allocations_number', mock.Mock(return_value=1)) mock_get_share_network_info = self.mock_object( self.share_manager.driver, 'get_share_server_network_info', mock.Mock(return_value=fake_list_empty_network_info)) self.assertRaises(exception.ManageShareServerError, self.share_manager.manage_share_server, context=self.context, share_server_id=fake_share_server['id'], identifier=identifier, driver_opts=driver_opts) mock_share_server_get.assert_called_once_with( utils.IsAMatcher(context.RequestContext), fake_share_server['id'] ) mock_share_network_get.assert_called_once_with( utils.IsAMatcher(context.RequestContext), share_network_subnet['share_network_id'] ) mock_share_net_subnet_get.assert_called_once_with( utils.IsAMatcher(context.RequestContext), share_server['share_network_subnet_ids'][0] ) mock_network_allocations_get.assert_called_once_with() mock_get_share_network_info.assert_called_once_with( utils.IsAMatcher(context.RequestContext), share_server, identifier, driver_opts ) def test_manage_share_server_allocations_not_managed(self): driver_opts = {} fake_share_server = fakes.fake_share_server_get() fake_list_network_info = [{}, {}] identifier = 'fake_id' share_network = db_utils.create_share_network() share_network_subnet = db_utils.create_share_network_subnet( share_network_id=share_network['id'] ) fake_share_server['share_network_subnets'] = [share_network_subnet] share_server = db_utils.create_share_server(**fake_share_server) self.share_manager.driver._admin_network_api = mock.Mock() mock_share_server_get = self.mock_object( db, 'share_server_get', mock.Mock(return_value=share_server)) mock_share_network_get = self.mock_object( db, 'share_network_get', mock.Mock(return_value=share_network)) mock_share_net_subnet_get = self.mock_object( db, 'share_network_subnet_get', mock.Mock( return_value=share_network_subnet)) mock_network_allocations_get = self.mock_object( self.share_manager.driver, 'get_network_allocations_number', mock.Mock(return_value=1)) mock_get_share_network_info = self.mock_object( self.share_manager.driver, 'get_share_server_network_info', mock.Mock(return_value=fake_list_network_info)) mock_manage_admin_network_allocations = self.mock_object( self.share_manager.driver.admin_network_api, 'manage_network_allocations', mock.Mock(return_value=fake_list_network_info)) mock_manage_network_allocations = self.mock_object( self.share_manager.driver.network_api, 'manage_network_allocations', mock.Mock(return_value=fake_list_network_info)) self.assertRaises(exception.ManageShareServerError, self.share_manager.manage_share_server, context=self.context, share_server_id=fake_share_server['id'], identifier=identifier, driver_opts=driver_opts) mock_share_server_get.assert_called_once_with( utils.IsAMatcher(context.RequestContext), fake_share_server['id'] ) mock_share_network_get.assert_called_once_with( utils.IsAMatcher(context.RequestContext), share_network_subnet['share_network_id'] ) mock_share_net_subnet_get.assert_called_once_with( utils.IsAMatcher(context.RequestContext), share_server['share_network_subnet_ids'][0] ) mock_network_allocations_get.assert_called_once_with() mock_get_share_network_info.assert_called_once_with( utils.IsAMatcher(context.RequestContext), share_server, identifier, driver_opts ) mock_manage_admin_network_allocations.assert_called_once_with( utils.IsAMatcher(context.RequestContext), fake_list_network_info, share_server ) mock_manage_network_allocations.assert_called_once_with( utils.IsAMatcher(context.RequestContext), fake_list_network_info, share_server, share_network, share_network_subnet ) def test_manage_snapshot_driver_exception(self): CustomException = type('CustomException', (Exception,), {}) self.mock_object(self.share_manager, 'driver') self.share_manager.driver.driver_handles_share_servers = False self.mock_object(share_types, 'get_share_type_extra_specs', mock.Mock(return_value="False")) mock_manage = self.mock_object(self.share_manager.driver, 'manage_existing_snapshot', mock.Mock(side_effect=CustomException)) share = db_utils.create_share() snapshot = db_utils.create_snapshot(share_id=share['id']) driver_options = {} mock_get = self.mock_object(self.share_manager.db, 'share_snapshot_get', mock.Mock(return_value=snapshot)) self.assertRaises( CustomException, self.share_manager.manage_snapshot, self.context, snapshot['id'], driver_options) mock_manage.assert_called_once_with(mock.ANY, driver_options) mock_get.assert_called_once_with( utils.IsAMatcher(context.RequestContext), snapshot['id']) def test_unmanage_share_server_no_allocations(self): fake_share_server = fakes.fake_share_server_get() ss_list = [ {'name': 'fake_AD'}, {'name': 'fake_LDAP'}, {'name': 'fake_kerberos'} ] db_utils.create_share_server(**fake_share_server) self.mock_object(self.share_manager.driver, 'unmanage_server', mock.Mock(side_effect=NotImplementedError())) self.mock_object(self.share_manager.db, 'share_server_delete') mock_network_allocations_number = self.mock_object( self.share_manager.driver, 'get_network_allocations_number', mock.Mock(return_value=0) ) mock_admin_network_allocations_number = self.mock_object( self.share_manager.driver, 'get_admin_network_allocations_number', mock.Mock(return_value=0) ) self.share_manager.unmanage_share_server( self.context, fake_share_server['id'], True) mock_network_allocations_number.assert_called_once_with() mock_admin_network_allocations_number.assert_called_once_with() self.share_manager.driver.unmanage_server.assert_called_once_with( fake_share_server['backend_details'], ss_list) self.share_manager.db.share_server_delete.assert_called_once_with( utils.IsAMatcher(context.RequestContext), fake_share_server['id']) def test_unmanage_share_server_no_allocations_driver_not_implemented(self): fake_share_server = fakes.fake_share_server_get() fake_share_server['status'] = constants.STATUS_UNMANAGING ss_list = [ {'name': 'fake_AD'}, {'name': 'fake_LDAP'}, {'name': 'fake_kerberos'} ] db_utils.create_share_server(**fake_share_server) self.mock_object(self.share_manager.driver, 'unmanage_server', mock.Mock(side_effect=NotImplementedError())) self.mock_object(self.share_manager.db, 'share_server_update') self.share_manager.unmanage_share_server( self.context, fake_share_server['id'], False) self.share_manager.driver.unmanage_server.assert_called_once_with( fake_share_server['backend_details'], ss_list) self.share_manager.db.share_server_update.assert_called_once_with( utils.IsAMatcher(context.RequestContext), fake_share_server['id'], {'status': constants.STATUS_UNMANAGE_ERROR}) def test_unmanage_share_server_with_network_allocations(self): fake_share_server = fakes.fake_share_server_get() db_utils.create_share_server(**fake_share_server) mock_unmanage_network_allocations = self.mock_object( self.share_manager.driver.network_api, 'unmanage_network_allocations' ) mock_network_allocations_number = self.mock_object( self.share_manager.driver, 'get_network_allocations_number', mock.Mock(return_value=1) ) self.share_manager.unmanage_share_server( self.context, fake_share_server['id'], True) mock_network_allocations_number.assert_called_once_with() mock_unmanage_network_allocations.assert_called_once_with( utils.IsAMatcher(context.RequestContext), fake_share_server['id']) def test_unmanage_share_server_with_admin_network_allocations(self): fake_share_server = fakes.fake_share_server_get() db_utils.create_share_server(**fake_share_server) mock_admin_network_allocations_number = self.mock_object( self.share_manager.driver, 'get_admin_network_allocations_number', mock.Mock(return_value=1) ) mock_network_allocations_number = self.mock_object( self.share_manager.driver, 'get_network_allocations_number', mock.Mock(return_value=0) ) self.share_manager.driver._admin_network_api = mock.Mock() self.share_manager.unmanage_share_server( self.context, fake_share_server['id'], True) mock_admin_network_allocations_number.assert_called_once_with() mock_network_allocations_number.assert_called_once_with() def test_unmanage_share_server_error(self): fake_share_server = fakes.fake_share_server_get() db_utils.create_share_server(**fake_share_server) mock_network_allocations_number = self.mock_object( self.share_manager.driver, 'get_network_allocations_number', mock.Mock(return_value=1) ) error = mock.Mock( side_effect=exception.ShareServerNotFound(share_server_id="fake")) mock_share_server_delete = self.mock_object( db, 'share_server_delete', error ) mock_share_server_update = self.mock_object( db, 'share_server_update' ) self.share_manager.driver._admin_network_api = mock.Mock() self.assertRaises(exception.ShareServerNotFound, self.share_manager.unmanage_share_server, self.context, fake_share_server['id'], True) mock_network_allocations_number.assert_called_once_with() mock_share_server_delete.assert_called_once_with( utils.IsAMatcher(context.RequestContext), fake_share_server['id'] ) mock_share_server_update.assert_called_once_with( utils.IsAMatcher(context.RequestContext), fake_share_server['id'], {'status': constants.STATUS_UNMANAGE_ERROR} ) def test_unmanage_share_server_network_allocations_error(self): fake_share_server = fakes.fake_share_server_get() db_utils.create_share_server(**fake_share_server) mock_network_allocations_number = self.mock_object( self.share_manager.driver, 'get_network_allocations_number', mock.Mock(return_value=1) ) error = mock.Mock( side_effect=exception.ShareNetworkNotFound(share_network_id="fake") ) mock_unmanage_network_allocations = self.mock_object( self.share_manager.driver.network_api, 'unmanage_network_allocations', error) mock_share_server_update = self.mock_object( db, 'share_server_update' ) self.share_manager.driver._admin_network_api = mock.Mock() self.assertRaises(exception.ShareNetworkNotFound, self.share_manager.unmanage_share_server, self.context, fake_share_server['id'], True) mock_network_allocations_number.assert_called_once_with() mock_unmanage_network_allocations.assert_called_once_with( utils.IsAMatcher(context.RequestContext), fake_share_server['id'] ) mock_share_server_update.assert_called_once_with( utils.IsAMatcher(context.RequestContext), fake_share_server['id'], {'status': constants.STATUS_UNMANAGE_ERROR} ) def test_unmanage_share_server_admin_network_allocations_error(self): fake_share_server = fakes.fake_share_server_get() db_utils.create_share_server(**fake_share_server) self.share_manager.driver._admin_network_api = mock.Mock() mock_network_allocations_number = self.mock_object( self.share_manager.driver, 'get_network_allocations_number', mock.Mock(return_value=0) ) mock_admin_network_allocations_number = self.mock_object( self.share_manager.driver, 'get_admin_network_allocations_number', mock.Mock(return_value=1) ) error = mock.Mock( side_effect=exception.ShareNetworkNotFound(share_network_id="fake") ) mock_unmanage_admin_network_allocations = self.mock_object( self.share_manager.driver._admin_network_api, 'unmanage_network_allocations', error ) mock_unmanage_network_allocations = self.mock_object( self.share_manager.driver.network_api, 'unmanage_network_allocations', error) mock_share_server_update = self.mock_object( db, 'share_server_update' ) self.assertRaises(exception.ShareNetworkNotFound, self.share_manager.unmanage_share_server, self.context, fake_share_server['id'], True) mock_network_allocations_number.assert_called_once_with() mock_admin_network_allocations_number.assert_called_once_with() mock_unmanage_network_allocations.assert_called_once_with( utils.IsAMatcher(context.RequestContext), fake_share_server['id'] ) mock_unmanage_admin_network_allocations.assert_called_once_with( utils.IsAMatcher(context.RequestContext), fake_share_server['id'] ) mock_share_server_update.assert_called_once_with( utils.IsAMatcher(context.RequestContext), fake_share_server['id'], {'status': constants.STATUS_UNMANAGE_ERROR} ) @ddt.data({'dhss': True, 'driver_data': {'size': 1}, 'mount_snapshot_support': False}, {'dhss': True, 'driver_data': {'size': 2, 'name': 'fake'}, 'mount_snapshot_support': False}, {'dhss': False, 'driver_data': {'size': 3}, 'mount_snapshot_support': False}, {'dhss': False, 'driver_data': {'size': 3, 'export_locations': [ {'path': '/path1', 'is_admin_only': True}, {'path': '/path2', 'is_admin_only': False} ]}, 'mount_snapshot_support': False}, {'dhss': False, 'driver_data': {'size': 3, 'export_locations': [ {'path': '/path1', 'is_admin_only': True}, {'path': '/path2', 'is_admin_only': False} ]}, 'mount_snapshot_support': True}) @ddt.unpack def test_manage_snapshot_valid_snapshot( self, driver_data, mount_snapshot_support, dhss): mock_get_share_server = self.mock_object(self.share_manager, '_get_share_server', mock.Mock(return_value=None)) self.mock_object(self.share_manager.db, 'share_snapshot_update') self.mock_object(self.share_manager, 'driver') self.mock_object(quota.QUOTAS, 'reserve', mock.Mock()) self.share_manager.driver.driver_handles_share_servers = dhss if dhss: mock_manage = self.mock_object( self.share_manager.driver, "manage_existing_snapshot_with_server", mock.Mock(return_value=driver_data)) else: mock_manage = self.mock_object( self.share_manager.driver, "manage_existing_snapshot", mock.Mock(return_value=driver_data)) size = driver_data['size'] export_locations = driver_data.get('export_locations') share = db_utils.create_share( size=size, mount_snapshot_support=mount_snapshot_support) snapshot = db_utils.create_snapshot(share_id=share['id'], size=size) snapshot_id = snapshot['id'] driver_options = {} mock_get = self.mock_object(self.share_manager.db, 'share_snapshot_get', mock.Mock(return_value=snapshot)) mock_export_update = self.mock_object( self.share_manager.db, 'share_snapshot_instance_export_location_create') self.share_manager.manage_snapshot(self.context, snapshot_id, driver_options) if dhss: mock_manage.assert_called_once_with(mock.ANY, driver_options, None) else: mock_manage.assert_called_once_with(mock.ANY, driver_options) valid_snapshot_data = { 'status': constants.STATUS_AVAILABLE} valid_snapshot_data.update(driver_data) self.share_manager.db.share_snapshot_update.assert_called_once_with( utils.IsAMatcher(context.RequestContext), snapshot_id, valid_snapshot_data) if dhss: mock_get_share_server.assert_called_once_with( utils.IsAMatcher(context.RequestContext), snapshot['share']) mock_get.assert_called_once_with( utils.IsAMatcher(context.RequestContext), snapshot_id) if mount_snapshot_support and export_locations: snap_ins_id = snapshot.instance['id'] for i in range(0, 2): export_locations[i]['share_snapshot_instance_id'] = snap_ins_id mock_export_update.assert_has_calls([ mock.call(utils.IsAMatcher(context.RequestContext), export_locations[0]), mock.call(utils.IsAMatcher(context.RequestContext), export_locations[1]), ]) else: mock_export_update.assert_not_called() def test_unmanage_snapshot_invalid_share(self): manager.CONF.unmanage_remove_access_rules = False self.mock_object(self.share_manager, 'driver') self.share_manager.driver.driver_handles_share_servers = False mock_unmanage = mock.Mock( side_effect=exception.UnmanageInvalidShareSnapshot(reason="fake")) self.mock_object(self.share_manager.driver, "unmanage_snapshot", mock_unmanage) mock_get_share_server = self.mock_object( self.share_manager, '_get_share_server', mock.Mock(return_value=None)) self.mock_object(self.share_manager.db, 'share_snapshot_update') share = db_utils.create_share() snapshot = db_utils.create_snapshot(share_id=share['id']) mock_get = self.mock_object(self.share_manager.db, 'share_snapshot_get', mock.Mock(return_value=snapshot)) self.share_manager.unmanage_snapshot(self.context, snapshot['id']) self.share_manager.db.share_snapshot_update.assert_called_once_with( utils.IsAMatcher(context.RequestContext), snapshot['id'], {'status': constants.STATUS_UNMANAGE_ERROR}) self.share_manager.driver.unmanage_snapshot.assert_called_once_with( mock.ANY) mock_get.assert_called_once_with( utils.IsAMatcher(context.RequestContext), snapshot['id']) mock_get_share_server.assert_called_once_with( utils.IsAMatcher(context.RequestContext), snapshot['share']) @ddt.data({'dhss': False, 'quota_error': False}, {'dhss': True, 'quota_error': False}, {'dhss': False, 'quota_error': True}, {'dhss': True, 'quota_error': True}) @ddt.unpack def test_unmanage_snapshot_valid_snapshot(self, dhss, quota_error): if quota_error: self.mock_object(quota.QUOTAS, 'reserve', mock.Mock( side_effect=exception.ManilaException(message='error'))) manager.CONF.unmanage_remove_access_rules = True mock_log_warning = self.mock_object(manager.LOG, 'warning') self.mock_object(self.share_manager, 'driver') self.share_manager.driver.driver_handles_share_servers = dhss mock_update_access = self.mock_object( self.share_manager.snapshot_access_helper, "update_access_rules") if dhss: mock_unmanage = self.mock_object( self.share_manager.driver, "unmanage_snapshot_with_server") else: mock_unmanage = self.mock_object( self.share_manager.driver, "unmanage_snapshot") mock_get_share_server = self.mock_object( self.share_manager, '_get_share_server', mock.Mock(return_value=None)) mock_snapshot_instance_destroy_call = self.mock_object( self.share_manager.db, 'share_snapshot_instance_delete') share = db_utils.create_share() snapshot = db_utils.create_snapshot(share_id=share['id']) mock_get = self.mock_object(self.share_manager.db, 'share_snapshot_get', mock.Mock(return_value=snapshot)) mock_snap_ins_get = self.mock_object( self.share_manager.db, 'share_snapshot_instance_get', mock.Mock(return_value=snapshot.instance)) self.share_manager.unmanage_snapshot(self.context, snapshot['id']) if dhss: mock_unmanage.assert_called_once_with(snapshot.instance, None) else: mock_unmanage.assert_called_once_with(snapshot.instance) mock_update_access.assert_called_once_with( utils.IsAMatcher(context.RequestContext), snapshot.instance['id'], delete_all_rules=True, share_server=None) mock_snapshot_instance_destroy_call.assert_called_once_with( mock.ANY, snapshot['instance']['id']) mock_get.assert_called_once_with( utils.IsAMatcher(context.RequestContext), snapshot['id']) mock_get_share_server.assert_called_once_with( utils.IsAMatcher(context.RequestContext), snapshot['share']) mock_snap_ins_get.assert_called_once_with( utils.IsAMatcher(context.RequestContext), snapshot.instance['id'], with_share_data=True) if quota_error: self.assertTrue(mock_log_warning.called) @ddt.data(True, False) def test_revert_to_snapshot(self, has_replicas): reservations = 'fake_reservations' share_id = 'fake_share_id' snapshot_id = 'fake_snapshot_id' snapshot_instance_id = 'fake_snapshot_instance_id' share_instance_id = 'fake_share_instance_id' share_instance = fakes.fake_share_instance( id=share_instance_id, share_id=share_id) share = fakes.fake_share( id=share_id, instance=share_instance, project_id='fake_project', user_id='fake_user', size=2, has_replicas=has_replicas) snapshot_instance = fakes.fake_snapshot_instance( id=snapshot_instance_id, share_id=share_instance_id, share=share, name='fake_snapshot', share_instance=share_instance, share_instance_id=share_instance_id) snapshot = fakes.fake_snapshot( id=snapshot_id, share_id=share_id, share=share, instance=snapshot_instance, project_id='fake_project', user_id='fake_user', size=1) share_access_rules = ['fake_share_access_rule'] snapshot_access_rules = ['fake_snapshot_access_rule'] mock_share_snapshot_get = self.mock_object( self.share_manager.db, 'share_snapshot_get', mock.Mock(return_value=snapshot)) mock_share_access_get = self.mock_object( self.share_manager.access_helper, 'get_share_instance_access_rules', mock.Mock(return_value=share_access_rules)) mock_snapshot_access_get = self.mock_object( self.share_manager.snapshot_access_helper, 'get_snapshot_instance_access_rules', mock.Mock(return_value=snapshot_access_rules)) mock_revert_to_snapshot = self.mock_object( self.share_manager, '_revert_to_snapshot') mock_revert_to_replicated_snapshot = self.mock_object( self.share_manager, '_revert_to_replicated_snapshot') self.share_manager.revert_to_snapshot(self.context, snapshot_id, reservations) mock_share_snapshot_get.assert_called_once_with(mock.ANY, snapshot_id) mock_share_access_get.assert_called_once_with( mock.ANY, filters={'state': constants.STATUS_ACTIVE}, share_instance_id=share_instance_id) mock_snapshot_access_get.assert_called_once_with( mock.ANY, snapshot_instance_id) if not has_replicas: mock_revert_to_snapshot.assert_called_once_with( mock.ANY, share, snapshot, reservations, share_access_rules, snapshot_access_rules) self.assertFalse(mock_revert_to_replicated_snapshot.called) else: self.assertFalse(mock_revert_to_snapshot.called) mock_revert_to_replicated_snapshot.assert_called_once_with( mock.ANY, share, snapshot, reservations, share_access_rules, snapshot_access_rules, share_id=share_id) @ddt.data((None, False), (None, True), ('fake_reservations', False), ('fake_reservations', True)) @ddt.unpack def test__revert_to_snapshot(self, reservations, revert_return_share_size): mock_quotas_rollback = self.mock_object(quota.QUOTAS, 'rollback') mock_quotas_commit = self.mock_object(quota.QUOTAS, 'commit') self.mock_object( self.share_manager, '_get_share_server', mock.Mock(return_value=None)) mock_driver = self.mock_object(self.share_manager, 'driver') share_id = 'fake_share_id' share = fakes.fake_share( id=share_id, instance={'id': 'fake_instance_id', 'share_type_id': 'fake_share_type_id'}, project_id='fake_project', user_id='fake_user', size=5 if revert_return_share_size else 3) snapshot_instance = fakes.fake_snapshot_instance( share_id=share_id, share=share, name='fake_snapshot', share_instance=share['instance']) snapshot = fakes.fake_snapshot( id='fake_snapshot_id', share_id=share_id, share=share, instance=snapshot_instance, project_id='fake_project', user_id='fake_user', size=4) share_access_rules = [] snapshot_access_rules = [] self.mock_object( self.share_manager.db, 'share_snapshot_get', mock.Mock(return_value=snapshot)) self.mock_object( self.share_manager.db, 'share_snapshot_instance_get', mock.Mock(return_value=snapshot_instance)) mock_share_update = self.mock_object( self.share_manager.db, 'share_update') mock_share_snapshot_update = self.mock_object( self.share_manager.db, 'share_snapshot_update') mock_driver.revert_to_snapshot.return_value = ( 5 if revert_return_share_size else None) self.share_manager._revert_to_snapshot(self.context, share, snapshot, reservations, share_access_rules, snapshot_access_rules) mock_driver.revert_to_snapshot.assert_called_once_with( mock.ANY, self._get_snapshot_instance_dict( snapshot_instance, share, snapshot=snapshot), share_access_rules, snapshot_access_rules, share_server=None) if reservations: if revert_return_share_size: mock_quotas_rollback.assert_called_once_with( mock.ANY, reservations, project_id='fake_project', user_id='fake_user', share_type_id=( snapshot_instance['share_instance']['share_type_id'])) else: self.assertFalse(mock_quotas_rollback.called) mock_quotas_commit.assert_called_once_with( mock.ANY, reservations, project_id='fake_project', user_id='fake_user', share_type_id=( snapshot_instance['share_instance']['share_type_id'])) else: self.assertFalse(mock_quotas_commit.called) mock_share_update.assert_called_once_with( mock.ANY, share_id, {'status': constants.STATUS_AVAILABLE, 'size': 5 if revert_return_share_size else 4}) mock_share_snapshot_update.assert_called_once_with( mock.ANY, 'fake_snapshot_id', {'status': constants.STATUS_AVAILABLE}) @ddt.data(None, 'fake_reservations') def test__revert_to_snapshot_driver_exception(self, reservations): mock_quotas_rollback = self.mock_object(quota.QUOTAS, 'rollback') mock_quotas_commit = self.mock_object(quota.QUOTAS, 'commit') self.mock_object( self.share_manager, '_get_share_server', mock.Mock(return_value=None)) mock_driver = self.mock_object(self.share_manager, 'driver') mock_driver.revert_to_snapshot.side_effect = exception.ManilaException share_id = 'fake_share_id' share = fakes.fake_share( id=share_id, instance={'id': 'fake_instance_id', 'share_type_id': 'fake_share_type_id'}, project_id='fake_project', user_id='fake_user', size=2) snapshot_instance = fakes.fake_snapshot_instance( share_id=share_id, share=share, name='fake_snapshot', share_instance=share['instance']) snapshot = fakes.fake_snapshot( id='fake_snapshot_id', share_id=share_id, share=share, instance=snapshot_instance, project_id='fake_project', user_id='fake_user', size=1) share_access_rules = [] snapshot_access_rules = [] self.mock_object( self.share_manager.db, 'share_snapshot_get', mock.Mock(return_value=snapshot)) self.mock_object( self.share_manager.db, 'share_snapshot_instance_get', mock.Mock(return_value=snapshot_instance)) mock_share_update = self.mock_object( self.share_manager.db, 'share_update') mock_share_snapshot_update = self.mock_object( self.share_manager.db, 'share_snapshot_update') self.assertRaises(exception.ManilaException, self.share_manager._revert_to_snapshot, self.context, share, snapshot, reservations, share_access_rules, snapshot_access_rules) mock_driver.revert_to_snapshot.assert_called_once_with( mock.ANY, self._get_snapshot_instance_dict( snapshot_instance, share, snapshot=snapshot), share_access_rules, snapshot_access_rules, share_server=None) self.assertFalse(mock_quotas_commit.called) if reservations: mock_quotas_rollback.assert_called_once_with( mock.ANY, reservations, project_id='fake_project', user_id='fake_user', share_type_id=( snapshot_instance['share_instance']['share_type_id'])) else: self.assertFalse(mock_quotas_rollback.called) mock_share_update.assert_called_once_with( mock.ANY, share_id, {'status': constants.STATUS_REVERTING_ERROR}) mock_share_snapshot_update.assert_called_once_with( mock.ANY, 'fake_snapshot_id', {'status': constants.STATUS_AVAILABLE}) def test_unmanage_snapshot_update_access_rule_exception(self): self.mock_object(self.share_manager, 'driver') self.share_manager.driver.driver_handles_share_servers = False share = db_utils.create_share() snapshot = db_utils.create_snapshot(share_id=share['id']) manager.CONF.unmanage_remove_access_rules = True mock_get = self.mock_object( self.share_manager.db, 'share_snapshot_get', mock.Mock(return_value=snapshot)) mock_get_share_server = self.mock_object( self.share_manager, '_get_share_server', mock.Mock(return_value=None)) self.mock_object(self.share_manager.snapshot_access_helper, 'update_access_rules', mock.Mock(side_effect=Exception)) mock_log_exception = self.mock_object(manager.LOG, 'exception') mock_update = self.mock_object(self.share_manager.db, 'share_snapshot_update') self.share_manager.unmanage_snapshot(self.context, snapshot['id']) self.assertTrue(mock_log_exception.called) mock_get.assert_called_once_with( utils.IsAMatcher(context.RequestContext), snapshot['id']) mock_get_share_server.assert_called_once_with( utils.IsAMatcher(context.RequestContext), snapshot['share']) mock_update.assert_called_once_with( utils.IsAMatcher(context.RequestContext), snapshot['id'], {'status': constants.STATUS_UNMANAGE_ERROR}) def test_snapshot_update_access(self): snapshot = fakes.fake_snapshot(create_instance=True) snapshot_instance = fakes.fake_snapshot_instance( base_snapshot=snapshot) mock_instance_get = self.mock_object( db, 'share_snapshot_instance_get', mock.Mock(return_value=snapshot_instance)) mock_get_share_server = self.mock_object(self.share_manager, '_get_share_server', mock.Mock(return_value=None)) mock_update_access = self.mock_object( self.share_manager.snapshot_access_helper, 'update_access_rules') self.share_manager.snapshot_update_access(self.context, snapshot_instance['id']) mock_instance_get.assert_called_once_with( utils.IsAMatcher(context.RequestContext), snapshot_instance['id'], with_share_data=True) mock_get_share_server.assert_called_once_with( utils.IsAMatcher(context.RequestContext), snapshot_instance['share_instance']) mock_update_access.assert_called_once_with( utils.IsAMatcher(context.RequestContext), snapshot_instance['id'], share_server=None) def _setup_crud_replicated_snapshot_data(self): snapshot = fakes.fake_snapshot(create_instance=True) snapshot_instance = fakes.fake_snapshot_instance( base_snapshot=snapshot) snapshot_instances = [snapshot['instance'], snapshot_instance] replicas = [fake_replica(), fake_replica()] return snapshot, snapshot_instances, replicas def test_create_replicated_snapshot_driver_exception(self): snapshot, snapshot_instances, replicas = ( self._setup_crud_replicated_snapshot_data() ) self.mock_object( db, 'share_snapshot_get', mock.Mock(return_value=snapshot)) self.mock_object(self.share_manager, '_get_share_server') self.mock_object(db, 'share_snapshot_instance_get_all_with_filters', mock.Mock(return_value=snapshot_instances)) self.mock_object(db, 'share_replicas_get_all_by_share', mock.Mock(return_value=replicas)) self.mock_object( self.share_manager.driver, 'create_replicated_snapshot', mock.Mock(side_effect=exception.ManilaException)) mock_db_update_call = self.mock_object( db, 'share_snapshot_instance_update') self.assertRaises(exception.ManilaException, self.share_manager.create_replicated_snapshot, self.context, snapshot['id'], share_id='fake_share') mock_db_update_call.assert_has_calls([ mock.call( self.context, snapshot['instance']['id'], {'status': constants.STATUS_ERROR}), mock.call( self.context, snapshot_instances[1]['id'], {'status': constants.STATUS_ERROR}), ]) @ddt.data(None, []) def test_create_replicated_snapshot_driver_updates_nothing(self, retval): snapshot, snapshot_instances, replicas = ( self._setup_crud_replicated_snapshot_data() ) self.mock_object( db, 'share_snapshot_get', mock.Mock(return_value=snapshot)) self.mock_object(self.share_manager, '_get_share_server') self.mock_object(db, 'share_snapshot_instance_get_all_with_filters', mock.Mock(return_value=snapshot_instances)) self.mock_object(db, 'share_replicas_get_all_by_share', mock.Mock(return_value=replicas)) self.mock_object( self.share_manager.driver, 'create_replicated_snapshot', mock.Mock(return_value=retval)) mock_db_update_call = self.mock_object( db, 'share_snapshot_instance_update') return_value = self.share_manager.create_replicated_snapshot( self.context, snapshot['id'], share_id='fake_share') self.assertIsNone(return_value) self.assertFalse(mock_db_update_call.called) def test_create_replicated_snapshot_driver_updates_snapshot(self): snapshot, snapshot_instances, replicas = ( self._setup_crud_replicated_snapshot_data() ) snapshot_dict = { 'status': constants.STATUS_AVAILABLE, 'provider_location': 'spinners_end', 'progress': '100%', 'id': snapshot['instance']['id'], } self.mock_object( db, 'share_snapshot_get', mock.Mock(return_value=snapshot)) self.mock_object(self.share_manager, '_get_share_server') self.mock_object(db, 'share_snapshot_instance_get_all_with_filters', mock.Mock(return_value=snapshot_instances)) self.mock_object(db, 'share_replicas_get_all_by_share', mock.Mock(return_value=replicas)) self.mock_object( self.share_manager.driver, 'create_replicated_snapshot', mock.Mock(return_value=[snapshot_dict])) mock_db_update_call = self.mock_object( db, 'share_snapshot_instance_update') return_value = self.share_manager.create_replicated_snapshot( self.context, snapshot['id'], share_id='fake_share') self.assertIsNone(return_value) mock_db_update_call.assert_called_once_with( self.context, snapshot['instance']['id'], snapshot_dict) @ddt.data(None, 'fake_reservations') def test_revert_to_replicated_snapshot(self, reservations): share_id = 'id1' mock_quotas_rollback = self.mock_object(quota.QUOTAS, 'rollback') mock_quotas_commit = self.mock_object(quota.QUOTAS, 'commit') share = fakes.fake_share( id=share_id, project_id='fake_project', user_id='fake_user') snapshot = fakes.fake_snapshot( create_instance=True, share=share, size=1) snapshot_instance = fakes.fake_snapshot_instance( base_snapshot=snapshot) snapshot_instances = [snapshot['instance'], snapshot_instance] active_replica = fake_replica( id='rid1', share_id=share_id, host=self.share_manager.host, replica_state=constants.REPLICA_STATE_ACTIVE, as_primitive=False) replica = fake_replica( id='rid2', share_id=share_id, host='secondary', replica_state=constants.REPLICA_STATE_IN_SYNC, as_primitive=False) replicas = [active_replica, replica] share_access_rules = [] snapshot_access_rules = [] self.mock_object( db, 'share_snapshot_get', mock.Mock(return_value=snapshot)) self.mock_object( self.share_manager, '_get_share_server', mock.Mock(return_value=None)) self.mock_object( db, 'share_replicas_get_all_by_share', mock.Mock(return_value=replicas)) self.mock_object( db, 'share_snapshot_instance_get_all_with_filters', mock.Mock(side_effect=[snapshot_instances, [snapshot_instances[0]]])) mock_driver = self.mock_object(self.share_manager, 'driver') mock_share_update = self.mock_object( self.share_manager.db, 'share_update') mock_share_replica_update = self.mock_object( self.share_manager.db, 'share_replica_update') mock_share_snapshot_instance_update = self.mock_object( self.share_manager.db, 'share_snapshot_instance_update') self.share_manager._revert_to_replicated_snapshot( self.context, share, snapshot, reservations, share_access_rules, snapshot_access_rules, share_id=share_id) self.assertTrue(mock_driver.revert_to_replicated_snapshot.called) self.assertFalse(mock_quotas_rollback.called) if reservations: mock_quotas_commit.assert_called_once_with( mock.ANY, reservations, project_id='fake_project', user_id='fake_user', share_type_id=None) else: self.assertFalse(mock_quotas_commit.called) mock_share_update.assert_called_once_with( mock.ANY, share_id, {'size': snapshot['size']}) mock_share_replica_update.assert_called_once_with( mock.ANY, active_replica['id'], {'status': constants.STATUS_AVAILABLE}) mock_share_snapshot_instance_update.assert_called_once_with( mock.ANY, snapshot['instance']['id'], {'status': constants.STATUS_AVAILABLE}) @ddt.data(None, 'fake_reservations') def test_revert_to_replicated_snapshot_driver_exception( self, reservations): mock_quotas_rollback = self.mock_object(quota.QUOTAS, 'rollback') mock_quotas_commit = self.mock_object(quota.QUOTAS, 'commit') share_id = 'id1' share = fakes.fake_share( id=share_id, project_id='fake_project', user_id='fake_user') snapshot = fakes.fake_snapshot( create_instance=True, share=share, size=1) snapshot_instance = fakes.fake_snapshot_instance( base_snapshot=snapshot) snapshot_instances = [snapshot['instance'], snapshot_instance] active_replica = fake_replica( id='rid1', share_id=share_id, host=self.share_manager.host, replica_state=constants.REPLICA_STATE_ACTIVE, as_primitive=False, share_type_id='fake_share_type_id') replica = fake_replica( id='rid2', share_id=share_id, host='secondary', replica_state=constants.REPLICA_STATE_IN_SYNC, as_primitive=False, share_type_id='fake_share_type_id') replicas = [active_replica, replica] share_access_rules = [] snapshot_access_rules = [] self.mock_object( db, 'share_snapshot_get', mock.Mock(return_value=snapshot)) self.mock_object( self.share_manager, '_get_share_server', mock.Mock(return_value=None)) self.mock_object( db, 'share_replicas_get_all_by_share', mock.Mock(return_value=replicas)) self.mock_object( db, 'share_snapshot_instance_get_all_with_filters', mock.Mock(side_effect=[snapshot_instances, [snapshot_instances[0]]])) mock_driver = self.mock_object(self.share_manager, 'driver') mock_driver.revert_to_replicated_snapshot.side_effect = ( exception.ManilaException) mock_share_update = self.mock_object( self.share_manager.db, 'share_update') mock_share_replica_update = self.mock_object( self.share_manager.db, 'share_replica_update') mock_share_snapshot_instance_update = self.mock_object( self.share_manager.db, 'share_snapshot_instance_update') self.assertRaises(exception.ManilaException, self.share_manager._revert_to_replicated_snapshot, self.context, share, snapshot, reservations, share_access_rules, snapshot_access_rules, share_id=share_id) self.assertTrue(mock_driver.revert_to_replicated_snapshot.called) self.assertFalse(mock_quotas_commit.called) if reservations: mock_quotas_rollback.assert_called_once_with( mock.ANY, reservations, project_id='fake_project', user_id='fake_user', share_type_id=replica['share_type_id']) else: self.assertFalse(mock_quotas_rollback.called) self.assertFalse(mock_share_update.called) mock_share_replica_update.assert_called_once_with( mock.ANY, active_replica['id'], {'status': constants.STATUS_REVERTING_ERROR}) mock_share_snapshot_instance_update.assert_called_once_with( mock.ANY, snapshot['instance']['id'], {'status': constants.STATUS_AVAILABLE}) def delete_replicated_snapshot_driver_exception(self): snapshot, snapshot_instances, replicas = ( self._setup_crud_replicated_snapshot_data() ) self.mock_object( db, 'share_snapshot_get', mock.Mock(return_value=snapshot)) self.mock_object(self.share_manager, '_get_share_server') self.mock_object(db, 'share_snapshot_instance_get_all_with_filters', mock.Mock(return_value=snapshot_instances)) self.mock_object(db, 'share_replicas_get_all_by_share', mock.Mock(return_value=replicas)) self.mock_object( self.share_manager.driver, 'delete_replicated_snapshot', mock.Mock(side_effect=exception.ManilaException)) mock_db_update_call = self.mock_object( db, 'share_snapshot_instance_update') mock_db_delete_call = self.mock_object( db, 'share_snapshot_instance_delete') self.assertRaises(exception.ManilaException, self.share_manager.delete_replicated_snapshot, self.context, snapshot['id'], share_id='fake_share') mock_db_update_call.assert_has_calls([ mock.call( self.context, snapshot['instance']['id'], {'status': constants.STATUS_ERROR_DELETING}), mock.call( self.context, snapshot_instances[1]['id'], {'status': constants.STATUS_ERROR_DELETING}), ]) self.assertFalse(mock_db_delete_call.called) def delete_replicated_snapshot_driver_exception_ignored_with_force(self): snapshot, snapshot_instances, replicas = ( self._setup_crud_replicated_snapshot_data() ) self.mock_object( db, 'share_snapshot_get', mock.Mock(return_value=snapshot)) self.mock_object(self.share_manager, '_get_share_server') self.mock_object(db, 'share_snapshot_instance_get_all_with_filters', mock.Mock(return_value=snapshot_instances)) self.mock_object(db, 'share_replicas_get_all_by_share', mock.Mock(return_value=replicas)) self.mock_object( self.share_manager.driver, 'delete_replicated_snapshot', mock.Mock(side_effect=exception.ManilaException)) mock_db_update_call = self.mock_object( db, 'share_snapshot_instance_update') mock_db_delete_call = self.mock_object( db, 'share_snapshot_instance_delete') retval = self.share_manager.delete_replicated_snapshot( self.context, snapshot['id'], share_id='fake_share') self.assertIsNone(retval) mock_db_delete_call.assert_has_calls([ mock.call( self.context, snapshot['instance']['id']), mock.call( self.context, snapshot_instances[1]['id']), ]) self.assertFalse(mock_db_update_call.called) @ddt.data(None, []) def delete_replicated_snapshot_driver_updates_nothing(self, retval): snapshot, snapshot_instances, replicas = ( self._setup_crud_replicated_snapshot_data() ) self.mock_object( db, 'share_snapshot_get', mock.Mock(return_value=snapshot)) self.mock_object(self.share_manager, '_get_share_server') self.mock_object(db, 'share_snapshot_instance_get_all_with_filters', mock.Mock(return_value=snapshot_instances)) self.mock_object(db, 'share_replicas_get_all_by_share', mock.Mock(return_value=replicas)) self.mock_object( self.share_manager.driver, 'delete_replicated_snapshot', mock.Mock(return_value=retval)) mock_db_update_call = self.mock_object( db, 'share_snapshot_instance_update') mock_db_delete_call = self.mock_object( db, 'share_snapshot_instance_delete') return_value = self.share_manager.delete_replicated_snapshot( self.context, snapshot['id'], share_id='fake_share') self.assertIsNone(return_value) self.assertFalse(mock_db_delete_call.called) self.assertFalse(mock_db_update_call.called) def delete_replicated_snapshot_driver_deletes_snapshots(self): snapshot, snapshot_instances, replicas = ( self._setup_crud_replicated_snapshot_data() ) retval = [{ 'status': constants.STATUS_DELETED, 'id': snapshot['instance']['id'], }] self.mock_object( db, 'share_snapshot_get', mock.Mock(return_value=snapshot)) self.mock_object(self.share_manager, '_get_share_server') self.mock_object(db, 'share_snapshot_instance_get_all_with_filters', mock.Mock(return_value=snapshot_instances)) self.mock_object(db, 'share_replicas_get_all_by_share', mock.Mock(return_value=replicas)) self.mock_object( self.share_manager.driver, 'delete_replicated_snapshot', mock.Mock(return_value=retval)) mock_db_update_call = self.mock_object( db, 'share_snapshot_instance_update') mock_db_delete_call = self.mock_object( db, 'share_snapshot_instance_delete') return_value = self.share_manager.delete_replicated_snapshot( self.context, snapshot['id'], share_id='fake_share') self.assertIsNone(return_value) mock_db_delete_call.assert_called_once_with( self.context, snapshot['instance']['id']) self.assertFalse(mock_db_update_call.called) @ddt.data(True, False) def delete_replicated_snapshot_drv_del_and_updates_snapshots(self, force): snapshot, snapshot_instances, replicas = ( self._setup_crud_replicated_snapshot_data() ) updated_instance_details = { 'status': constants.STATUS_ERROR, 'id': snapshot_instances[1]['id'], 'provider_location': 'azkaban', } retval = [ { 'status': constants.STATUS_DELETED, 'id': snapshot['instance']['id'], }, ] retval.append(updated_instance_details) self.mock_object( db, 'share_snapshot_get', mock.Mock(return_value=snapshot)) self.mock_object(self.share_manager, '_get_share_server') self.mock_object(db, 'share_snapshot_instance_get_all_with_filters', mock.Mock(return_value=snapshot_instances)) self.mock_object(db, 'share_replicas_get_all_by_share', mock.Mock(return_value=replicas)) self.mock_object( self.share_manager.driver, 'delete_replicated_snapshot', mock.Mock(return_value=retval)) mock_db_update_call = self.mock_object( db, 'share_snapshot_instance_update') mock_db_delete_call = self.mock_object( db, 'share_snapshot_instance_delete') return_value = self.share_manager.delete_replicated_snapshot( self.context, snapshot['id'], share_id='fake_share', force=force) self.assertIsNone(return_value) if force: self.assertEqual(2, mock_db_delete_call.call_count) self.assertFalse(mock_db_update_call.called) else: mock_db_delete_call.assert_called_once_with( self.context, snapshot['instance']['id']) mock_db_update_call.assert_called_once_with( self.context, snapshot_instances[1]['id'], updated_instance_details) def test_periodic_share_replica_snapshot_update(self): mock_debug_log = self.mock_object(manager.LOG, 'debug') replicas = 3 * [ fake_replica(host='malfoy@manor#_pool0', replica_state=constants.REPLICA_STATE_IN_SYNC) ] replicas.append(fake_replica(replica_state=constants.STATUS_ACTIVE)) snapshot = fakes.fake_snapshot(create_instance=True, status=constants.STATUS_DELETING) snapshot_instances = 3 * [ fakes.fake_snapshot_instance(base_snapshot=snapshot) ] self.mock_object( db, 'share_replicas_get_all', mock.Mock(return_value=replicas)) self.mock_object(db, 'share_snapshot_instance_get_all_with_filters', mock.Mock(return_value=snapshot_instances)) mock_snapshot_update_call = self.mock_object( self.share_manager, '_update_replica_snapshot') retval = self.share_manager.periodic_share_replica_snapshot_update( self.context) self.assertIsNone(retval) self.assertEqual(1, mock_debug_log.call_count) self.assertEqual(0, mock_snapshot_update_call.call_count) @ddt.data(True, False) def test_periodic_share_replica_snapshot_update_nothing_to_update( self, has_instances): mock_debug_log = self.mock_object(manager.LOG, 'debug') replicas = 3 * [ fake_replica(host='malfoy@manor#_pool0', replica_state=constants.REPLICA_STATE_IN_SYNC) ] replicas.append(fake_replica(replica_state=constants.STATUS_ACTIVE)) snapshot = fakes.fake_snapshot(create_instance=True, status=constants.STATUS_DELETING) snapshot_instances = 3 * [ fakes.fake_snapshot_instance(base_snapshot=snapshot) ] self.mock_object(db, 'share_replicas_get_all', mock.Mock(side_effect=[[], replicas])) self.mock_object(db, 'share_snapshot_instance_get_all_with_filters', mock.Mock(side_effect=[snapshot_instances, []])) mock_snapshot_update_call = self.mock_object( self.share_manager, '_update_replica_snapshot') retval = self.share_manager.periodic_share_replica_snapshot_update( self.context) self.assertIsNone(retval) self.assertEqual(1, mock_debug_log.call_count) self.assertEqual(0, mock_snapshot_update_call.call_count) def test__update_replica_snapshot_replica_deleted_from_database(self): replica_not_found = exception.ShareReplicaNotFound(replica_id='xyzzy') self.mock_object(db, 'share_replica_get', mock.Mock( side_effect=replica_not_found)) mock_db_delete_call = self.mock_object( db, 'share_snapshot_instance_delete') mock_db_update_call = self.mock_object( db, 'share_snapshot_instance_update') mock_driver_update_call = self.mock_object( self.share_manager.driver, 'update_replicated_snapshot') snaphot_instance = fakes.fake_snapshot_instance() retval = self.share_manager._update_replica_snapshot( self.context, snaphot_instance) self.assertIsNone(retval) mock_db_delete_call.assert_called_once_with( self.context, snaphot_instance['id']) self.assertFalse(mock_driver_update_call.called) self.assertFalse(mock_db_update_call.called) def test__update_replica_snapshot_both_deleted_from_database(self): replica_not_found = exception.ShareReplicaNotFound(replica_id='xyzzy') instance_not_found = exception.ShareSnapshotInstanceNotFound( instance_id='spoon!') self.mock_object(db, 'share_replica_get', mock.Mock( side_effect=replica_not_found)) mock_db_delete_call = self.mock_object( db, 'share_snapshot_instance_delete', mock.Mock( side_effect=instance_not_found)) mock_db_update_call = self.mock_object( db, 'share_snapshot_instance_update') mock_driver_update_call = self.mock_object( self.share_manager.driver, 'update_replicated_snapshot') snapshot_instance = fakes.fake_snapshot_instance() retval = self.share_manager._update_replica_snapshot( self.context, snapshot_instance) self.assertIsNone(retval) mock_db_delete_call.assert_called_once_with( self.context, snapshot_instance['id']) self.assertFalse(mock_driver_update_call.called) self.assertFalse(mock_db_update_call.called) def test__update_replica_snapshot_driver_raises_Not_Found_exception(self): mock_debug_log = self.mock_object(manager.LOG, 'debug') replica = fake_replica() snapshot_instance = fakes.fake_snapshot_instance( status=constants.STATUS_DELETING) self.mock_object( db, 'share_replica_get', mock.Mock(return_value=replica)) self.mock_object(db, 'share_snapshot_instance_get', mock.Mock(return_value=snapshot_instance)) self.mock_object(db, 'share_snapshot_instance_get', mock.Mock(return_value=snapshot_instance)) self.mock_object(db, 'share_replicas_get_all_by_share', mock.Mock(return_value=[replica])) self.mock_object(self.share_manager, '_get_share_server', mock.Mock(return_value=None)) self.mock_object( self.share_manager.driver, 'update_replicated_snapshot', mock.Mock( side_effect=exception.SnapshotResourceNotFound(name='abc'))) mock_db_delete_call = self.mock_object( db, 'share_snapshot_instance_delete') mock_db_update_call = self.mock_object( db, 'share_snapshot_instance_update') retval = self.share_manager._update_replica_snapshot( self.context, snapshot_instance, replica_snapshots=None) self.assertIsNone(retval) self.assertEqual(1, mock_debug_log.call_count) mock_db_delete_call.assert_called_once_with( self.context, snapshot_instance['id']) self.assertFalse(mock_db_update_call.called) @ddt.data(exception.NotFound, exception.ManilaException) def test__update_replica_snapshot_driver_raises_other_exception(self, exc): mock_debug_log = self.mock_object(manager.LOG, 'debug') mock_info_log = self.mock_object(manager.LOG, 'info') mock_exception_log = self.mock_object(manager.LOG, 'exception') replica = fake_replica() snapshot_instance = fakes.fake_snapshot_instance( status=constants.STATUS_CREATING) self.mock_object( db, 'share_replica_get', mock.Mock(return_value=replica)) self.mock_object(db, 'share_snapshot_instance_get', mock.Mock(return_value=snapshot_instance)) self.mock_object(db, 'share_snapshot_instance_get', mock.Mock(return_value=snapshot_instance)) self.mock_object(db, 'share_replicas_get_all_by_share', mock.Mock(return_value=[replica])) self.mock_object(self.share_manager, '_get_share_server', mock.Mock(return_value=None)) self.mock_object(self.share_manager.driver, 'update_replicated_snapshot', mock.Mock(side_effect=exc)) mock_db_delete_call = self.mock_object( db, 'share_snapshot_instance_delete') mock_db_update_call = self.mock_object( db, 'share_snapshot_instance_update') retval = self.share_manager._update_replica_snapshot( self.context, snapshot_instance) self.assertIsNone(retval) self.assertEqual(1, mock_exception_log.call_count) self.assertEqual(1, mock_debug_log.call_count) self.assertFalse(mock_info_log.called) mock_db_update_call.assert_called_once_with( self.context, snapshot_instance['id'], {'status': 'error'}) self.assertFalse(mock_db_delete_call.called) @ddt.data(True, False) def test__update_replica_snapshot_driver_updates_replica(self, update): replica = fake_replica() snapshot_instance = fakes.fake_snapshot_instance() driver_update = {} if update: driver_update = { 'id': snapshot_instance['id'], 'provider_location': 'knockturn_alley', 'status': constants.STATUS_AVAILABLE, } mock_debug_log = self.mock_object(manager.LOG, 'debug') mock_info_log = self.mock_object(manager.LOG, 'info') self.mock_object( db, 'share_replica_get', mock.Mock(return_value=replica)) self.mock_object(db, 'share_snapshot_instance_get', mock.Mock(return_value=snapshot_instance)) self.mock_object(db, 'share_snapshot_instance_get', mock.Mock(return_value=snapshot_instance)) self.mock_object(db, 'share_replicas_get_all_by_share', mock.Mock(return_value=[replica])) self.mock_object(self.share_manager, '_get_share_server', mock.Mock(return_value=None)) self.mock_object(self.share_manager.driver, 'update_replicated_snapshot', mock.Mock(return_value=driver_update)) mock_db_delete_call = self.mock_object( db, 'share_snapshot_instance_delete') mock_db_update_call = self.mock_object( db, 'share_snapshot_instance_update') retval = self.share_manager._update_replica_snapshot( self.context, snapshot_instance, replica_snapshots=None) driver_update['progress'] = '100%' self.assertIsNone(retval) self.assertEqual(1, mock_debug_log.call_count) self.assertFalse(mock_info_log.called) if update: mock_db_update_call.assert_called_once_with( self.context, snapshot_instance['id'], driver_update) else: self.assertFalse(mock_db_update_call.called) self.assertFalse(mock_db_delete_call.called) def test_update_access(self): share_server = fakes.fake_share_server_get() kwargs = {'share_server_id': share_server['id']} share_instance = fakes.fake_share_instance(**kwargs) self.mock_object(self.share_manager, '_get_share_server', mock.Mock(return_value='fake_share_server')) self.mock_object(self.share_manager, '_get_share_instance', mock.Mock(return_value=share_instance)) self.mock_object(self.share_manager.db, 'share_server_get', mock.Mock(return_value=share_server)) access_rules_update_method = self.mock_object( self.share_manager.access_helper, 'update_access_rules') retval = self.share_manager.update_access( self.context, share_instance['id']) self.assertIsNone(retval) access_rules_update_method.assert_called_once_with( self.context, share_instance['id'], share_server=share_server) @mock.patch('manila.tests.fake_notifier.FakeNotifier._notify') def test_update_share_usage_size(self, mock_notify): instances = self._setup_init_mocks(setup_access_rules=False) update_shares = [{'id': 'fake_id', 'used_size': '3', 'gathered_at': 'fake'}] mock_notify.assert_not_called() manager = self.share_manager self.mock_object(manager, 'driver') self.mock_object(manager.db, 'share_instance_get_all_by_host', mock.Mock(return_value=instances)) self.mock_object(manager.db, 'share_instance_get', mock.Mock(side_effect=instances)) mock_driver_call = self.mock_object( manager.driver, 'update_share_usage_size', mock.Mock(return_value=update_shares)) self.share_manager.update_share_usage_size(self.context) self.assert_notify_called(mock_notify, (['INFO', 'share.consumed.size'], )) mock_driver_call.assert_called_once_with( self.context, instances) @mock.patch('manila.tests.fake_notifier.FakeNotifier._notify') def test_update_share_usage_size_fail(self, mock_notify): instances = self._setup_init_mocks(setup_access_rules=False) mock_notify.assert_not_called() self.mock_object(self.share_manager, 'driver') self.mock_object(self.share_manager.db, 'share_instance_get_all_by_host', mock.Mock(return_value=instances)) self.mock_object(self.share_manager.db, 'share_instance_get', mock.Mock(side_effect=instances)) self.mock_object( self.share_manager.driver, 'update_share_usage_size', mock.Mock(side_effect=exception.ProcessExecutionError)) mock_log_exception = self.mock_object(manager.LOG, 'exception') self.share_manager.update_share_usage_size(self.context) self.assertTrue(mock_log_exception.called) def test_periodic_share_status_update(self): instances = self._setup_init_mocks(setup_access_rules=False) instances_creating_from_snap = [ x for x in instances if x['status'] == constants.STATUS_CREATING_FROM_SNAPSHOT ] self.mock_object(self.share_manager, 'driver') self.mock_object(self.share_manager.db, 'share_instance_get_all_by_host', mock.Mock(return_value=instances_creating_from_snap)) mock_update_share_status = self.mock_object( self.share_manager, '_update_share_status') instances_dict = [ self.share_manager._get_share_instance_dict(self.context, si) for si in instances_creating_from_snap] self.share_manager.periodic_share_status_update(self.context) mock_update_share_status.assert_has_calls([ mock.call(self.context, share_instance) for share_instance in instances_dict ]) def test__update_share_status(self): instances = self._setup_init_mocks(setup_access_rules=False) fake_export_locations = ['fake/path/1', 'fake/path'] instance_model_update = { 'status': constants.STATUS_AVAILABLE, 'export_locations': fake_export_locations } expected_si_update_info = { 'status': constants.STATUS_AVAILABLE, 'progress': '100%' } driver_get_status = self.mock_object( self.share_manager.driver, 'get_share_status', mock.Mock(return_value=instance_model_update)) db_si_update = self.mock_object(self.share_manager.db, 'share_instance_update') db_el_update = self.mock_object(self.share_manager.db, 'export_locations_update') in_progress_instances = [x for x in instances if x['status'] == constants.STATUS_CREATING_FROM_SNAPSHOT] instance = self.share_manager.db.share_instance_get( self.context, in_progress_instances[0]['id'], with_share_data=True) self.share_manager._update_share_status(self.context, instance) driver_get_status.assert_called_once_with(instance, None) db_si_update.assert_called_once_with(self.context, instance['id'], expected_si_update_info) db_el_update.assert_called_once_with(self.context, instance['id'], fake_export_locations) @ddt.data(mock.Mock(return_value={'status': constants.STATUS_ERROR}), mock.Mock(side_effect=exception.ShareBackendException( msg='fake_msg'))) def test__update_share_status_share_with_error_or_exception(self, driver_error): instances = self._setup_init_mocks(setup_access_rules=False) expected_si_update_info = { 'status': constants.STATUS_ERROR, 'progress': None, } driver_get_status = self.mock_object( self.share_manager.driver, 'get_share_status', driver_error) db_si_update = self.mock_object(self.share_manager.db, 'share_instance_update') in_progress_instances = [x for x in instances if x['status'] == constants.STATUS_CREATING_FROM_SNAPSHOT] instance = self.share_manager.db.share_instance_get( self.context, in_progress_instances[0]['id'], with_share_data=True) self.share_manager._update_share_status(self.context, instance) driver_get_status.assert_called_once_with(instance, None) db_si_update.assert_called_once_with(self.context, instance['id'], expected_si_update_info) self.share_manager.message_api.create.assert_called_once_with( self.context, message_field.Action.UPDATE, instance['project_id'], resource_type=message_field.Resource.SHARE, resource_id=instance['share_id'], detail=message_field.Detail.DRIVER_FAILED_CREATING_FROM_SNAP) def test__build_server_metadata(self): share = {'host': 'host', 'share_type_id': 'id'} expected_metadata = {'request_host': 'host', 'share_type_id': 'id', 'encryption_key_ref': None, 'keystone_url': None} metadata = self.share_manager._build_server_metadata( self.context, share['host'], share['share_type_id']) self.assertDictEqual(expected_metadata, metadata) @ddt.data( { 'compatible': False, 'writable': True, 'nondisruptive': True, 'preserve_snapshots': True, }, { 'compatible': True, 'writable': False, 'nondisruptive': True, 'preserve_snapshots': True, }, { 'compatible': True, 'writable': True, 'nondisruptive': False, 'preserve_snapshots': True, }, { 'compatible': True, 'writable': True, 'nondisruptive': True, 'preserve_snapshots': False, }, { 'compatible': True, 'writable': True, 'nondisruptive': True, 'preserve_snapshots': False, 'not_preserve_with_instances': True }, ) @ddt.unpack def test__validate_check_compatibility_result( self, compatible, writable, nondisruptive, preserve_snapshots, not_preserve_with_instances=False): fake_share_network = db_utils.create_share_network() fake_share_server = db_utils.create_share_server() fake_share_server_dest = db_utils.create_share_server() share_instances = [] snapshot_instances = [ db_utils.create_snapshot( with_share=True, status='available')['instance']] driver_compatibility = { 'compatible': compatible, 'writable': writable, 'preserve_snapshots': preserve_snapshots, 'nondisruptive': nondisruptive, 'share_network_id': fake_share_network['id'], 'migration_cancel': False, 'migration_get_progress': False } specified_writable = True if not writable else writable specified_nondisruptive = True if not nondisruptive else nondisruptive specified_preserve_snapshots = (True if not preserve_snapshots else preserve_snapshots) if not preserve_snapshots and not_preserve_with_instances: specified_preserve_snapshots = False self.assertRaises( exception.ShareServerMigrationFailed, self.share_manager._validate_check_compatibility_result, self.context, fake_share_server['id'], share_instances, snapshot_instances, driver_compatibility, fake_share_server_dest['host'], specified_nondisruptive, specified_writable, specified_preserve_snapshots, resource_type='share server' ) @ddt.data( { 'kwargs': {'share_instance_ids': ['fakeid1']}, 'resource_type': 'share_instance' }, { 'kwargs': {'snapshot_instance_ids': ['fakeid1']}, 'resource_type': 'snapshot_instance' }, { 'kwargs': { 'snapshot_instance_ids': ['fakeid1'], 'task_state': constants.TASK_STATE_MIGRATION_STARTING}, 'resource_type': 'snapshot_instance' }, ) @ddt.unpack def test__update_resource_status(self, kwargs, resource_type): if resource_type == 'share_instance': mock_db_instances_status_update = self.mock_object( db, 'share_instance_status_update') else: mock_db_instances_status_update = self.mock_object( db, 'share_snapshot_instances_status_update') kwargs_relationship = { 'share_instance': 'share_instance_ids', 'snapshot_instance': 'snapshot_instance_ids' } resource_ids_key = kwargs_relationship.get(resource_type) resource_ids = kwargs.get(resource_ids_key) fields = {'status': constants.STATUS_AVAILABLE} if kwargs.get('task_state'): fields['task_state'] = kwargs['task_state'] self.share_manager._update_resource_status( self.context, constants.STATUS_AVAILABLE, **kwargs) mock_db_instances_status_update.assert_called_once_with( self.context, resource_ids, fields) def _get_share_server_start_update_calls( self, source_share_server, dest_share_server, driver_failed=False): migration_in_progress_call = mock.call( self.context, dest_share_server['id'], { 'status': constants.STATUS_SERVER_MIGRATING_TO, 'task_state': constants.TASK_STATE_MIGRATION_IN_PROGRESS, 'source_share_server_id': source_share_server['id'] } ) driver_migration_starting_src_call = mock.call( self.context, source_share_server['id'], {'task_state': constants.TASK_STATE_MIGRATION_DRIVER_STARTING} ) driver_migration_starting_dest_call = mock.call( self.context, dest_share_server['id'], {'task_state': constants.TASK_STATE_MIGRATION_DRIVER_STARTING} ) driver_migration_src_call = mock.call( self.context, source_share_server['id'], {'task_state': constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS} ) driver_migration_dest_call = mock.call( self.context, dest_share_server['id'], {'task_state': constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS} ) driver_migration_dest_error = mock.call( self.context, dest_share_server['id'], { 'task_state': constants.TASK_STATE_MIGRATION_ERROR, 'status': constants.STATUS_ERROR } ) mock_calls = [ migration_in_progress_call, driver_migration_starting_src_call, driver_migration_starting_dest_call] if driver_failed: mock_calls.append(driver_migration_dest_error) else: mock_calls.append(driver_migration_src_call) mock_calls.append(driver_migration_dest_call) return mock_calls def _setup_server_migration_start_mocks( self, fake_share_instances, fake_snap_instances, fake_old_network, fake_new_network, fake_service, fake_request_spec, fake_driver_result, fake_new_share_server, server_info, network_subnet, new_network_subnet=None, az_compatible=True): self.mock_object(db, 'share_instance_get_all_by_share_server', mock.Mock(return_value=fake_share_instances)) self.mock_object(db, 'share_snapshot_instance_get_all_with_filters', mock.Mock(return_value=fake_snap_instances)) self.mock_object(db, 'share_network_get', mock.Mock(side_effect=[fake_old_network, fake_new_network])) self.mock_object(self.share_manager, '_update_resource_status') self.mock_object(db, 'service_get_by_args', mock.Mock(return_value=fake_service)) self.mock_object(api.API, 'get_share_server_migration_request_spec_dict', mock.Mock(return_value=fake_request_spec)) self.mock_object(self.share_manager.driver, 'share_server_migration_check_compatibility', mock.Mock(return_value=fake_driver_result)) self.mock_object(self.share_manager, '_validate_check_compatibility_result') self.mock_object(self.share_manager, '_provide_share_server_for_migration', mock.Mock(return_value=fake_new_share_server)) self.mock_object(self.share_manager, '_cast_access_rules_to_readonly_for_server') self.mock_object(db, 'share_network_subnet_get', mock.Mock(return_value=network_subnet)) self.mock_object(db, 'share_network_subnet_get_all_by_share_server_id', mock.Mock(return_value=[new_network_subnet])) self.mock_object(db, 'share_server_get', mock.Mock(return_value=fake_new_share_server)) self.mock_object(self.share_manager.driver, 'allocate_network') self.mock_object(self.share_manager.driver, 'allocate_admin_network') self.mock_object(self.share_manager.driver, 'deallocate_network') self.mock_object(db, 'share_server_delete') self.mock_object(db, 'share_server_update') self.mock_object(self.share_manager.driver, 'share_server_migration_start', mock.Mock(return_value=server_info)) self.mock_object(db, 'share_server_backend_details_set') self.mock_object(self.share_manager, 'delete_share_server') self.mock_object(share_utils, 'is_az_subnets_compatible', mock.Mock(return_value=az_compatible)) @ddt.data((True, True), (False, True)) @ddt.unpack def test__share_server_migration_start_driver(self, writable, nondisruptive): old_subnet_id = 'fake_id' new_subnet_kwargs = {} if not nondisruptive: new_subnet_kwargs.update({ 'neutron_net_id': 'fake_nn_id', 'neutron_subnet_id': 'fake_sn_id' }) network_subnet = db_utils.create_share_network_subnet(id=old_subnet_id) new_network_subnet = db_utils.create_share_network_subnet( **new_subnet_kwargs) fake_old_share_server = { 'id': 'fake_server_id', 'share_network_subnets': [network_subnet], 'host': 'host@backend' } fake_new_share_server = { 'id': 'fake_server_id_2', 'share_network_subnets': [new_network_subnet], 'host': 'host@backend' } fake_old_network = db_utils.create_share_network() fake_new_network = db_utils.create_share_network() fake_share_instances = [ db_utils.create_share( share_server_id=fake_old_share_server['id'], share_network_id=fake_old_network['id'])['instance']] fake_share_instance_ids = [ fake_instance['id'] for fake_instance in fake_share_instances] fake_snap_instances = [] fake_service = {'availability_zone_id': 'fake_az_id', 'availability_zone': {'name': 'fake_az1'}} fake_request_spec = {} fake_dest_host = 'fakehost@fakebackend' preserve_snapshots = True fake_driver_result = { 'compatible': True, 'writable': writable, 'preserve_snapshots': preserve_snapshots, 'nondisruptive': nondisruptive, 'share_network_id': fake_new_network['id'], 'migration_cancel': False, 'migration_get_progress': False } server_info = { 'fake_server_info_key': 'fake_server_info_value', 'backend_details': {'fake': 'fake'} } create_on_backend = not nondisruptive self._setup_server_migration_start_mocks( fake_share_instances, fake_snap_instances, fake_old_network, fake_new_network, fake_service, fake_request_spec, fake_driver_result, fake_new_share_server, server_info, network_subnet, new_network_subnet=new_network_subnet, az_compatible=create_on_backend) result = self.share_manager._share_server_migration_start_driver( self.context, fake_old_share_server, fake_dest_host, writable, nondisruptive, preserve_snapshots, fake_new_network['id']) self.assertTrue(result) db.share_instance_get_all_by_share_server.assert_called_once_with( self.context, fake_old_share_server['id'], with_share_data=True) (db.share_snapshot_instance_get_all_with_filters. assert_called_once_with( self.context, {'share_instance_ids': fake_share_instance_ids})) db.share_network_get.assert_has_calls( [mock.call(self.context, fake_old_network['id']), mock.call(self.context, fake_new_network['id'])]) db.service_get_by_args.assert_called_once_with( self.context, fake_dest_host, 'manila-share') (api.API.get_share_server_migration_request_spec_dict. assert_called_once_with( self.context, fake_share_instances, fake_snap_instances, availability_zone_id=fake_service['availability_zone_id'], share_network_id=fake_new_network['id'])) (self.share_manager.driver.share_server_migration_check_compatibility. assert_called_once_with( self.context, fake_old_share_server, fake_dest_host, fake_old_network, fake_new_network, fake_request_spec)) (self.share_manager._validate_check_compatibility_result. assert_called_once_with( self.context, fake_old_share_server, fake_share_instances, fake_snap_instances, fake_driver_result, fake_dest_host, nondisruptive, writable, preserve_snapshots, resource_type='share server')) (self.share_manager._provide_share_server_for_migration. assert_called_once_with( self.context, fake_old_share_server, fake_new_network['id'], fake_service['availability_zone_id'], fake_dest_host, create_on_backend=create_on_backend)) db.share_server_update.assert_has_calls( self._get_share_server_start_update_calls( fake_old_share_server, fake_new_share_server)) (self.share_manager.driver.share_server_migration_start. assert_called_once_with( self.context, fake_old_share_server, fake_new_share_server, fake_share_instances, fake_snap_instances)) if not create_on_backend: share_utils.is_az_subnets_compatible.assert_called_once_with( [new_network_subnet], [network_subnet]) (db.share_network_subnet_get_all_by_share_server_id. assert_called_once_with( self.context, fake_new_share_server['id'])) self.share_manager.driver.allocate_network.assert_called_once_with( self.context, fake_new_share_server, fake_new_network, new_network_subnet) (self.share_manager.driver.allocate_admin_network. assert_called_once_with(self.context, fake_new_share_server)) if not writable: (self.share_manager._cast_access_rules_to_readonly_for_server. assert_called_once_with( self.context, fake_share_instances, fake_old_share_server, dest_host=fake_old_share_server['host'])) else: (self.share_manager._cast_access_rules_to_readonly_for_server. assert_not_called()) if server_info: db.share_server_backend_details_set.assert_called_once_with( self.context, fake_new_share_server['id'], server_info.get('backend_details')) def test__share_server_migration_start_driver_exception(self): fake_old_share_server = db_utils.create_share_server() fake_new_share_server = db_utils.create_share_server() fake_old_network = db_utils.create_share_network() fake_new_network = db_utils.create_share_network() fake_share_instances = [ db_utils.create_share( share_server_id=fake_old_share_server['id'], share_network_id=fake_old_network['id'])['instance']] fake_share_instance_ids = [ fake_instance['id'] for fake_instance in fake_share_instances] fake_snap_instances = [] fake_snap_instance_ids = [] fake_service = {'availability_zone_id': 'fake_az_id', 'availability_zone': {'name': 'fake_az1'}} fake_request_spec = {} fake_dest_host = 'fakehost@fakebackend' nondisruptive = False preserve_snapshots = True writable = True fake_driver_result = { 'compatible': True, 'writable': writable, 'preserve_snapshots': preserve_snapshots, 'nondisruptive': nondisruptive, 'share_network_id': fake_new_network['id'], 'migration_cancel': False, 'migration_get_progress': False } server_info = { 'fake_server_info_key': 'fake_server_info_value', 'backend_details': {'fake': 'fake'} } network_subnet = db_utils.create_share_network_subnet() self._setup_server_migration_start_mocks( fake_share_instances, fake_snap_instances, fake_old_network, fake_new_network, fake_service, fake_request_spec, fake_driver_result, fake_new_share_server, server_info, network_subnet) mock__reset_read_only = self.mock_object( self.share_manager, '_reset_read_only_access_rules_for_server') self.share_manager.driver.share_server_migration_start.side_effect = ( Exception ) self.assertRaises( exception.ShareServerMigrationFailed, self.share_manager._share_server_migration_start_driver, self.context, fake_old_share_server, fake_dest_host, writable, nondisruptive, preserve_snapshots, fake_new_network['id'] ) db.share_instance_get_all_by_share_server.assert_called_once_with( self.context, fake_old_share_server['id'], with_share_data=True) (db.share_snapshot_instance_get_all_with_filters. assert_called_once_with( self.context, {'share_instance_ids': fake_share_instance_ids})) db.share_network_get.assert_has_calls( [mock.call(self.context, fake_old_network['id']), mock.call(self.context, fake_new_network['id'])]) self.share_manager._update_resource_status.assert_has_calls([ mock.call( self.context, constants.STATUS_AVAILABLE, share_instance_ids=fake_share_instance_ids, snapshot_instance_ids=fake_snap_instance_ids)]) db.service_get_by_args.assert_called_once_with( self.context, fake_dest_host, 'manila-share') (api.API.get_share_server_migration_request_spec_dict. assert_called_once_with( self.context, fake_share_instances, fake_snap_instances, availability_zone_id=fake_service['availability_zone_id'], share_network_id=fake_new_network['id'])) (self.share_manager.driver.share_server_migration_check_compatibility. assert_called_once_with( self.context, fake_old_share_server, fake_dest_host, fake_old_network, fake_new_network, fake_request_spec)) (self.share_manager._validate_check_compatibility_result. assert_called_once_with( self.context, fake_old_share_server, fake_share_instances, fake_snap_instances, fake_driver_result, fake_dest_host, nondisruptive, writable, preserve_snapshots, resource_type='share server')) (self.share_manager._provide_share_server_for_migration. assert_called_once_with( self.context, fake_old_share_server, fake_new_network['id'], fake_service['availability_zone_id'], fake_dest_host, create_on_backend=True)) db.share_server_update.assert_has_calls( self._get_share_server_start_update_calls( fake_old_share_server, fake_new_share_server, driver_failed=True)) (self.share_manager.driver.share_server_migration_start. assert_called_once_with( self.context, fake_old_share_server, fake_new_share_server, fake_share_instances, fake_snap_instances)) mock__reset_read_only.assert_called_once_with( self.context, fake_share_instances, fake_old_share_server, dest_host=fake_old_share_server['host'] ) if not writable: (self.share_manager._cast_access_rules_to_readonly_for_server. assert_called_once_with( self.context, fake_share_instances, fake_old_share_server, fake_old_share_server['host'])) else: (self.share_manager._cast_access_rules_to_readonly_for_server. assert_not_called()) self.share_manager.delete_share_server.assert_called_once_with( self.context, fake_new_share_server) @ddt.data(None, exception.ShareServerMigrationFailed) def test_share_server_migration_check(self, check_action): fake_share_server = db_utils.create_share_server() fake_old_network = db_utils.create_share_network() fake_new_network = db_utils.create_share_network() fake_dest_host = 'fakehost@fakebackend' fake_share_instances = [ db_utils.create_share( share_network_id=fake_old_network['id'])['instance']] fake_share_instance_ids = [ fake_instance['id'] for fake_instance in fake_share_instances] fake_snap_instances = [] fake_service = {'availability_zone_id': 'fake_az_id', 'availability_zone': {'name': 'fake_az1'}} fake_request_spec = {} nondisruptive = False writable = True preserve_snapshots = True fake_driver_result = { 'compatible': True, 'writable': writable, 'preserve_snapshots': preserve_snapshots, 'nondisruptive': nondisruptive, 'share_network_id': fake_new_network['id'], 'migration_cancel': False, 'migration_get_progress': False } mock_server_get = self.mock_object( db, 'share_server_get', mock.Mock(return_value=fake_share_server)) mock_get_server_instances = self.mock_object( db, 'share_instance_get_all_by_share_server', mock.Mock(return_value=fake_share_instances)) mock_snap_instances_get = self.mock_object( db, 'share_snapshot_instance_get_all_with_filters', mock.Mock(return_value=fake_snap_instances)) mock_sn_get = self.mock_object( db, 'share_network_get', mock.Mock(side_effect=[fake_old_network, fake_new_network])) mock_service_get = self.mock_object( db, 'service_get_by_args', mock.Mock(return_value=fake_service)) mock_get_req_spec = self.mock_object( api.API, 'get_share_server_migration_request_spec_dict', mock.Mock(return_value=fake_request_spec)) mock_driver_check = self.mock_object( self.share_manager.driver, 'share_server_migration_check_compatibility', mock.Mock(return_value=fake_driver_result)) mock__validate_check_compatibility = self.mock_object( self.share_manager, '_validate_check_compatibility_result') if isinstance(check_action, exception.ShareServerMigrationFailed): mock__validate_check_compatibility.side_effect = ( exception.ShareServerMigrationFailed) fake_driver_result['compatible'] = False result = self.share_manager.share_server_migration_check( self.context, fake_share_server['id'], fake_dest_host, True, False, True, fake_new_network['id'] ) self.assertEqual(fake_driver_result, result) mock_server_get.assert_called_once_with( self.context, fake_share_server['id']) mock_get_server_instances.assert_called_once_with( self.context, fake_share_server['id'], with_share_data=True ) mock_snap_instances_get.assert_called_once_with( self.context, {'share_instance_ids': fake_share_instance_ids} ) mock_sn_get.assert_has_calls( [mock.call(self.context, fake_old_network['id']), mock.call(self.context, fake_new_network['id'])] ) mock_service_get.assert_called_once_with( self.context, fake_dest_host, 'manila-share' ) mock_get_req_spec.assert_called_once_with( self.context, fake_share_instances, fake_snap_instances, availability_zone_id=fake_service['availability_zone_id'], share_network_id=fake_new_network['id'] ) mock_driver_check.assert_called_once_with( self.context, fake_share_server, fake_dest_host, fake_old_network, fake_new_network, fake_request_spec ) mock__validate_check_compatibility.assert_called_once_with( self.context, fake_share_server, fake_share_instances, fake_snap_instances, fake_driver_result, fake_dest_host, nondisruptive, writable, preserve_snapshots, resource_type='share server' ) def test_share_server_migration_check_dhss_false(self): self.mock_object(self.share_manager, 'driver') self.share_manager.driver.driver_handles_share_servers = False expected = { 'compatible': False, 'writable': None, 'preserve_snapshots': None, 'nondisruptive': None, 'share_network_id': 'new_share_network_id', 'migration_cancel': None, 'migration_get_progress': None } result = self.share_manager.share_server_migration_check( self.context, 'fake_share_server_id', 'fake_dest_host', False, False, False, 'new_share_network_id' ) self.assertEqual(expected, result) def test_share_server_migration_start(self): fake_share_server = db_utils.create_share_server() fake_share_network = db_utils.create_share_server() fake_dest_host = 'fakehost@fakebackend' writable = True nondisruptive = True preserve_snapshots = True mock_server_update = self.mock_object(db, 'share_server_update') mock_server_get = self.mock_object( db, 'share_server_get', mock.Mock(return_value=fake_share_server)) mock__server_migration_start_driver = self.mock_object( self.share_manager, '_share_server_migration_start_driver') self.share_manager.share_server_migration_start( self.context, fake_share_server['id'], fake_dest_host, writable, nondisruptive, preserve_snapshots, fake_share_network['id'] ) mock_server_update.assert_called_once_with( self.context, fake_share_server['id'], {'task_state': constants.TASK_STATE_MIGRATION_IN_PROGRESS} ) mock_server_get.assert_called_once_with( self.context, fake_share_server['id'] ) mock__server_migration_start_driver.assert_called_once_with( self.context, fake_share_server, fake_dest_host, writable, nondisruptive, preserve_snapshots, fake_share_network['id'] ) @ddt.data(True, False) def test_share_server_migration_start_exception(self, dhss): fake_share_server = db_utils.create_share_server() fake_share_network = db_utils.create_share_server() fake_dest_host = 'fakehost@fakebackend' writable = True nondisruptive = True preserve_snapshots = True self.mock_object(self.share_manager, 'driver') self.share_manager.driver.driver_handles_share_servers = dhss mock_server_update = self.mock_object(db, 'share_server_update') mock_server_get = self.mock_object( db, 'share_server_get', mock.Mock(return_value=fake_share_server)) mock__server_migration_start_driver = self.mock_object( self.share_manager, '_share_server_migration_start_driver', mock.Mock(side_effect=exception.ShareServerMigrationFailed( reason='fake_reason'))) self.share_manager.share_server_migration_start( self.context, fake_share_server['id'], fake_dest_host, writable, nondisruptive, preserve_snapshots, fake_share_network['id'] ) mock_server_update.assert_has_calls([ mock.call( self.context, fake_share_server['id'], {'task_state': constants.TASK_STATE_MIGRATION_IN_PROGRESS}), mock.call( self.context, fake_share_server['id'], {'task_state': constants.TASK_STATE_MIGRATION_ERROR, 'status': constants.STATUS_ACTIVE} ) ]) mock_server_get.assert_called_once_with( self.context, fake_share_server['id'] ) if dhss: mock__server_migration_start_driver.assert_called_once_with( self.context, fake_share_server, fake_dest_host, writable, nondisruptive, preserve_snapshots, fake_share_network['id'] ) def _setup_migration_continue_mocks( self, fake_share_servers, fake_share_instances, fake_snapshot_instances): self.mock_object( db, 'share_server_get_all_by_host', mock.Mock(return_value=fake_share_servers)) self.mock_object( db, 'share_instance_get_all_by_share_server', mock.Mock(return_value=fake_share_instances)) self.mock_object( db, 'share_snapshot_instance_get_all_with_filters', mock.Mock(return_value=fake_snapshot_instances)) @ddt.data(True, False) def test_share_server_migration_continue(self, finished): fake_src_share_servers = [ db_utils.create_share_server( status=constants.STATUS_SERVER_MIGRATING, task_state=constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS)] fake_dest_share_servers = [ db_utils.create_share_server( source_share_server_id=fake_src_share_servers[0]['id'], status=constants.STATUS_SERVER_MIGRATING_TO, task_state=constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS )] fake_share_instances = [db_utils.create_share()['instance']] fake_share_instance_ids = [ instance['id'] for instance in fake_share_instances] fake_cancelled_share_server = db_utils.create_share_server() fake_snapshot_instances = [] server_update_calls = [ mock.call( self.context, fake_src_share_servers[0]['id'], { 'task_state': constants.TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE}), mock.call( self.context, fake_dest_share_servers[0]['id'], { 'task_state': constants.TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE }) ] self._setup_migration_continue_mocks( fake_dest_share_servers, fake_share_instances, fake_snapshot_instances) self.mock_object(db, 'share_server_get', mock.Mock(side_effect=[fake_src_share_servers[0], fake_cancelled_share_server])) self.mock_object( self.share_manager.driver, 'share_server_migration_continue', mock.Mock(return_value=finished)) self.mock_object(db, 'share_server_update') self.share_manager.share_server_migration_driver_continue( self.context) db.share_server_get_all_by_host.assert_called_once_with( self.context, self.share_manager.host, filters={'status': constants.STATUS_SERVER_MIGRATING_TO} ) db.share_instance_get_all_by_share_server.assert_called_once_with( self.context, fake_src_share_servers[0]['id'], with_share_data=True ) (db.share_snapshot_instance_get_all_with_filters. assert_called_once_with( self.context, {'share_instance_ids': fake_share_instance_ids})) (self.share_manager.driver.share_server_migration_continue. assert_called_once_with( self.context, fake_src_share_servers[0], fake_dest_share_servers[0], fake_share_instances, fake_snapshot_instances)) if finished: db.share_server_update.assert_has_calls(server_update_calls) db.share_server_get.assert_called_once_with( self.context, fake_src_share_servers[0]['id'] ) else: db.share_server_get.assert_has_calls([ mock.call(self.context, fake_src_share_servers[0]['id']), mock.call(self.context, fake_src_share_servers[0]['id']), ]) @ddt.data( { 'src_share_server_exists': False, 'action_migration_continue': { 'return_value': True } }, { 'src_share_server_exists': True, 'action_migration_continue': { 'side_effect': Exception } } ) @ddt.unpack def test_share_server_migration_continue_exception( self, src_share_server_exists, action_migration_continue): fake_src_share_server = db_utils.create_share_server( status=constants.STATUS_SERVER_MIGRATING, task_state=constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS) fake_dest_share_servers = [ db_utils.create_share_server( source_share_server_id=fake_src_share_server['id'], status=constants.STATUS_SERVER_MIGRATING_TO, task_state=constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS)] fake_share_instances = [db_utils.create_share()['instance']] fake_share_instance_ids = [ instance['id'] for instance in fake_share_instances] fake_snapshot_instances = [] fake_snapshot_instance_ids = [] server_update_calls = [mock.call( self.context, fake_dest_share_servers[0]['id'], {'task_state': constants.TASK_STATE_MIGRATION_ERROR, 'status': constants.STATUS_ERROR} )] if src_share_server_exists: self.mock_object(db, 'share_server_get', mock.Mock(return_value=fake_src_share_server)) server_update_calls.append( mock.call( self.context, fake_src_share_server['id'], { 'task_state': constants.TASK_STATE_MIGRATION_ERROR, 'status': constants.STATUS_ACTIVE })) else: self.mock_object(db, 'share_server_get', mock.Mock(return_value=None)) self._setup_migration_continue_mocks( fake_dest_share_servers, fake_share_instances, fake_snapshot_instances) mock_server_update = self.mock_object(db, 'share_server_update') self.mock_object( self.share_manager.driver, 'share_server_migration_continue', mock.Mock(**action_migration_continue) ) mock__update_resource_status = self.mock_object( self.share_manager, '_update_resource_status') mock__rest_read_only_access_rules = self.mock_object( self.share_manager, '_reset_read_only_access_rules_for_server' ) self.share_manager.share_server_migration_driver_continue( self.context) db.share_server_get_all_by_host.assert_called_once_with( self.context, self.share_manager.host, filters={'status': constants.STATUS_SERVER_MIGRATING_TO}) db.share_server_get.assert_called_once_with( self.context, fake_src_share_server['id']) if src_share_server_exists: db.share_instance_get_all_by_share_server.assert_called_once_with( self.context, fake_src_share_server['id'], with_share_data=True) (db.share_snapshot_instance_get_all_with_filters. assert_called_once_with( self.context, {'share_instance_ids': fake_share_instance_ids})) mock__update_resource_status.assert_called_once_with( self.context, constants.STATUS_AVAILABLE, share_instance_ids=fake_share_instance_ids, snapshot_instance_ids=fake_snapshot_instance_ids ) mock__rest_read_only_access_rules.assert_called_once_with( self.context, fake_share_instances, fake_src_share_server, dest_host=fake_src_share_server['host'] ) mock_server_update.assert_has_calls(server_update_calls) def _setup_server_migration_complete_mocks( self, fake_source_share_server, fake_dest_share_server, fake_share_instances, fake_snapshot_instances, additional_server_get_side_effect=None): server_get_side_effects = [fake_dest_share_server, fake_source_share_server] if additional_server_get_side_effect: server_get_side_effects.append(additional_server_get_side_effect) self.mock_object( db, 'share_server_get', mock.Mock(side_effect=server_get_side_effects)) self.mock_object( db, 'share_instance_get_all_by_share_server', mock.Mock(return_value=fake_share_instances)) self.mock_object( db, 'share_snapshot_instance_get_all_with_filters', mock.Mock(return_value=fake_snapshot_instances)) self.mock_object( self.share_manager, '_update_resource_status') self.mock_object(db, 'share_server_update') @ddt.data(True, False) def test_share_server_migration_complete_exception( self, server_already_dropped): fake_source_share_server = db_utils.create_share_server() fake_dest_share_server = db_utils.create_share_server() fake_share_instances = [db_utils.create_share()['instance']] fake_share_instance_ids = [ instance['id'] for instance in fake_share_instances] fake_snapshot_instances = [] fake_snapshot_instance_ids = [] server_get_additional_se = ( exception.ShareServerNotFound if server_already_dropped else fake_dest_share_server) server_update_calls = [ mock.call(self.context, fake_source_share_server['id'], {'task_state': constants.TASK_STATE_MIGRATION_ERROR, 'status': constants.STATUS_ERROR}), ] if not server_already_dropped: server_update_calls.append( mock.call( self.context, fake_dest_share_server['id'], {'task_state': constants.TASK_STATE_MIGRATION_ERROR, 'status': constants.STATUS_ERROR}) ) self._setup_server_migration_complete_mocks( fake_source_share_server, fake_dest_share_server, fake_share_instances, fake_snapshot_instances, server_get_additional_se) mock__server_migration_complete = self.mock_object( self.share_manager, '_server_migration_complete_driver', mock.Mock(side_effect=Exception)) self.assertRaises( exception.ShareServerMigrationFailed, self.share_manager.share_server_migration_complete, self.context, fake_source_share_server['id'], fake_dest_share_server['id'] ) db.share_server_get.assert_has_calls( [mock.call(self.context, fake_dest_share_server['id']), mock.call(self.context, fake_source_share_server['id'])] ) db.share_instance_get_all_by_share_server.assert_called_once_with( self.context, fake_source_share_server['id'], with_share_data=True) (db.share_snapshot_instance_get_all_with_filters. assert_called_once_with( self.context, {'share_instance_ids': fake_share_instance_ids})) mock__server_migration_complete.assert_called_once_with( self.context, fake_source_share_server, fake_share_instances, fake_snapshot_instances, fake_dest_share_server) self.share_manager._update_resource_status.assert_called_once_with( self.context, constants.STATUS_ERROR, share_instance_ids=fake_share_instance_ids, snapshot_instance_ids=fake_snapshot_instance_ids) db.share_server_update.assert_has_calls(server_update_calls) @ddt.data(('fake_src_identifier', 'fake_dest_identifier'), ('fake_src_identifier', None)) @ddt.unpack def test_share_server_migration_complete( self, src_identifier, dest_identifier): fake_source_share_server = db_utils.create_share_server( identifier=src_identifier) fake_dest_share_server = db_utils.create_share_server( identifier=dest_identifier) fake_share_instances = [db_utils.create_share()['instance']] fake_share_instance_ids = [ instance['id'] for instance in fake_share_instances] fake_snapshot_instances = [] fake_snapshot_instance_ids = [] expected_identifier = ( dest_identifier if dest_identifier else src_identifier) expected_server_update = { 'task_state': constants.TASK_STATE_MIGRATION_SUCCESS, 'status': constants.STATUS_ACTIVE, } if not dest_identifier: expected_server_update['identifier'] = expected_identifier self._setup_server_migration_complete_mocks( fake_source_share_server, fake_dest_share_server, fake_share_instances, fake_snapshot_instances ) mock__server_migration_complete = self.mock_object( self.share_manager, '_server_migration_complete_driver') self.share_manager.share_server_migration_complete( self.context, fake_source_share_server['id'], fake_dest_share_server['id']) db.share_server_get.assert_has_calls( [mock.call(self.context, fake_dest_share_server['id']), mock.call(self.context, fake_source_share_server['id'])] ) db.share_instance_get_all_by_share_server.assert_called_once_with( self.context, fake_source_share_server['id'], with_share_data=True) (db.share_snapshot_instance_get_all_with_filters. assert_called_once_with( self.context, {'share_instance_ids': fake_share_instance_ids})) mock__server_migration_complete.assert_called_once_with( self.context, fake_source_share_server, fake_share_instances, fake_snapshot_instances, fake_dest_share_server) self.share_manager._update_resource_status.assert_called_once_with( self.context, constants.STATUS_AVAILABLE, share_instance_ids=fake_share_instance_ids, snapshot_instance_ids=fake_snapshot_instance_ids) db.share_server_update.assert_called_once_with( self.context, fake_dest_share_server['id'], expected_server_update) @ddt.data( {'model_update': { 'unmanage_source_server': False, 'snapshot_updates': {}, 'share_updates': {}}, 'need_network_allocation': False, 'can_reuse_server': False}, {'model_update': { 'unmanage_source_server': True, 'snapshot_updates': {}, 'share_updates': {}}, 'need_network_allocation': False, 'can_reuse_server': True}, {'model_update': { 'unmanage_source_server': False, 'snapshot_updates': {}, 'share_updates': {}}, 'need_network_allocation': True, 'can_reuse_server': False}, {'model_update': { 'unmanage_source_server': True, 'snapshot_updates': {}, 'share_updates': {}}, 'need_network_allocation': True, 'can_reuse_server': True} ) @ddt.unpack def test__server_migration_complete_driver(self, model_update, need_network_allocation, can_reuse_server): fake_share_network = db_utils.create_share_network() fake_share_network_subnet = db_utils.create_share_network_subnet( share_network_id=fake_share_network['id']) fake_source_share_server = db_utils.create_share_server() fake_dest_share_server = db_utils.create_share_server( share_network_subnets=[fake_share_network_subnet]) fake_share = db_utils.create_share() fake_snapshot = db_utils.create_snapshot(share_id=fake_share['id']) fake_service = {'availability_zone_id': 'fake_az_id', 'availability_zone': {'name': 'fake_az1'}} fake_share_instances = [fake_share['instance']] fake_snapshot_instances = [fake_snapshot['instance']] fake_share_instance_id = fake_share['instance']['id'] fake_alloc_data = [{ 'network_allocations': [{'id': 'fake_id'}], 'admin_network_allocations': [{'id': 'fake_admin_id'}], }] model_update['share_updates'][fake_share['instance']['id']] = { 'export_locations': { "path": "10.10.10.31:/fake_mount_point", "metadata": { "preferred": True, }, "is_admin_only": False, }, 'pool_name': 'fakepool' } snapshot_el_update = { "path": "10.10.10.31:/fake_snap_mount_point", "is_admin_only": False, } model_update['snapshot_updates'][fake_snapshot['instance']['id']] = { 'export_locations': [snapshot_el_update] } fake_instance_update = { 'share_server_id': fake_dest_share_server['id'], 'host': fake_dest_share_server['host'] + '#fakepool', 'share_network_id': fake_share_network['id'], 'availability_zone_id': fake_service['availability_zone_id'], } backend_details = fake_source_share_server.get("backend_details") mock_backend_details_set_calls = [] if backend_details: for k, v in backend_details.items(): mock_backend_details_set_calls.append( mock.call( self.context, fake_dest_share_server['id'], {k: v}) ) dest_network_allocations = [] if need_network_allocation: dest_network_allocations.append({'id': 'fake_allocation'}) mock_server_update = self.mock_object(db, 'share_server_update') mock_network_get = self.mock_object( db, 'share_network_get', mock.Mock(return_value=fake_share_network)) mock_allocations_get = self.mock_object( db, 'network_allocations_get_for_share_server', mock.Mock(return_value=dest_network_allocations) ) mock_subnet_get = self.mock_object( db, 'share_network_subnet_get_all_by_share_server_id', mock.Mock(return_value=fake_share_network_subnet)) mock_form_server_setup_info = self.mock_object( self.share_manager, '_form_server_setup_info', mock.Mock(return_value=fake_alloc_data)) mock_server_migration_complete = self.mock_object( self.share_manager.driver, 'share_server_migration_complete', mock.Mock(return_value=model_update)) mock_network_allocation_update = self.mock_object( db, 'network_allocation_update') mock_share_server_backend_details_set = self.mock_object( db, 'share_server_backend_details_set') mock_service_get_by_args = self.mock_object( db, 'service_get_by_args', mock.Mock(return_value=fake_service)) mock_instance_update = self.mock_object(db, 'share_instance_update') mock_el_update = self.mock_object(db, 'export_locations_update') mock_snap_el_update = self.mock_object( db, 'share_snapshot_instance_export_locations_update') mock_reset_access_rules = self.mock_object( self.share_manager, '_reset_read_only_access_rules_for_server') mock_unmanage_server = self.mock_object( rpcapi.ShareAPI, 'unmanage_share_server') mock_delete_server = self.mock_object(db, 'share_server_delete') mock_deallocate_network = self.mock_object( self.share_manager.driver, 'deallocate_network') self.share_manager._server_migration_complete_driver( self.context, fake_source_share_server, fake_share_instances, fake_snapshot_instances, fake_dest_share_server) mock_server_update.assert_has_calls( [mock.call( self.context, fake_source_share_server['id'], {'task_state': constants.TASK_STATE_MIGRATION_COMPLETING}), mock.call( self.context, fake_dest_share_server['id'], {'task_state': constants.TASK_STATE_MIGRATION_COMPLETING}), mock.call( self.context, fake_source_share_server['id'], {'task_state': constants.TASK_STATE_MIGRATION_SUCCESS, 'status': constants.STATUS_INACTIVE})]) mock_network_get.assert_called_once_with( self.context, fake_share_network['id']) mock_subnet_get.assert_called_once_with( self.context, fake_dest_share_server['id']) if need_network_allocation: mock_allocations_get.assert_called_once_with( self.context, fake_dest_share_server['id']) else: mock_allocations_get.assert_has_calls( calls=[ mock.call(self.context, fake_dest_share_server['id']), mock.call(self.context, fake_source_share_server['id']), ] ) if not need_network_allocation: mock_form_server_setup_info.assert_called_once_with( self.context, fake_source_share_server, fake_share_network, fake_share_network_subnet) elif need_network_allocation: mock_share_server_backend_details_set.assert_has_calls( mock_backend_details_set_calls) mock_form_server_setup_info.assert_called_once_with( self.context, fake_dest_share_server, fake_share_network, fake_share_network_subnet) mock_network_allocation_update.assert_has_calls( [mock.call( self.context, fake_alloc_data[0]['network_allocations'][0]['id'], {'share_server_id': fake_dest_share_server['id']}), mock.call( self.context, fake_alloc_data[0]['admin_network_allocations'][0]['id'], {'share_server_id': fake_dest_share_server['id']})]) mock_server_migration_complete.assert_called_once_with( self.context, fake_source_share_server, fake_dest_share_server, fake_share_instances, fake_snapshot_instances, fake_alloc_data ) mock_service_get_by_args.assert_called_once_with( self.context, fake_dest_share_server['host'], 'manila-share') mock_instance_update.assert_called_once_with( self.context, fake_share_instance_id, fake_instance_update) mock_el_update.assert_called_once_with( self.context, fake_share_instance_id, model_update['share_updates'][fake_share_instance_id][ 'export_locations']) mock_snap_el_update.assert_called_once_with( self.context, fake_snapshot['instance']['id'], [snapshot_el_update] ) mock_reset_access_rules.assert_called_once_with( self.context, fake_share_instances, fake_source_share_server, dest_host=fake_source_share_server['host']) if model_update.get('unmanage_share_server') is True: mock_unmanage_server.assert_called_once_with( self.context, fake_source_share_server) else: mock_deallocate_network.assert_called_once_with( self.context, fake_source_share_server['id']) mock_delete_server.assert_called_once_with( self.context, fake_source_share_server['id']) @ddt.data(constants.TASK_STATE_MIGRATION_SUCCESS, constants.TASK_STATE_MIGRATION_IN_PROGRESS) def test_server_migration_cancel_exception(self, task_state): fake_source_share_server = db_utils.create_share_server( task_state=task_state) fake_dest_share_server = db_utils.create_share_server() mock_server_get = self.mock_object( db, 'share_server_get', mock.Mock(side_effect=[fake_source_share_server, fake_dest_share_server])) self.assertRaises( exception.InvalidShareServer, self.share_manager.share_server_migration_cancel, self.context, fake_source_share_server['id'], fake_dest_share_server['id'] ) mock_server_get.assert_has_calls([ mock.call(self.context, fake_source_share_server['id']), mock.call(self.context, fake_dest_share_server['id'])]) @ddt.data( constants.TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE, constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS) def test_share_server_migration_cancel(self, task_state): fake_source_share_server = db_utils.create_share_server( task_state=task_state) fake_dest_share_server = db_utils.create_share_server() fake_share = db_utils.create_share() fake_share_instances = [fake_share['instance']] fake_share_instance_ids = [fake_share['instance']['id']] fake_snapshot = db_utils.create_snapshot(share_id=fake_share['id']) fake_snapshot_instances = [fake_snapshot['instance']] fake_snapshot_instance_ids = [fake_snapshot['instance']['id']] mock_server_get = self.mock_object( db, 'share_server_get', mock.Mock(side_effect=[fake_source_share_server, fake_dest_share_server])) mock_get_instances = self.mock_object( db, 'share_instance_get_all_by_share_server', mock.Mock(return_value=fake_share_instances)) mock_get_snap_instances = self.mock_object( db, 'share_snapshot_instance_get_all_with_filters', mock.Mock(return_value=fake_snapshot_instances)) mock_migration_cancel = self.mock_object( self.share_manager.driver, 'share_server_migration_cancel') mock_server_update = self.mock_object(db, 'share_server_update') mock_check_delete_server = self.mock_object( self.share_manager, '_check_delete_share_server') mock_update_resource = self.mock_object( self.share_manager, '_update_resource_status') mock_reset_read_only_rules = self.mock_object( self.share_manager, '_reset_read_only_access_rules_for_server') self.share_manager.share_server_migration_cancel( self.context, fake_source_share_server['id'], fake_dest_share_server['id']) mock_server_get.assert_has_calls([ mock.call(self.context, fake_source_share_server['id']), mock.call(self.context, fake_dest_share_server['id'])]) mock_get_instances.assert_called_once_with( self.context, fake_source_share_server['id'], with_share_data=True) mock_get_snap_instances.assert_called_once_with( self.context, {'share_instance_ids': fake_share_instance_ids}) mock_migration_cancel.assert_called_once_with( self.context, fake_source_share_server, fake_dest_share_server, fake_share_instances, fake_snapshot_instances) mock_server_update.assert_has_calls([ mock.call( self.context, fake_dest_share_server['id'], {'task_state': constants.TASK_STATE_MIGRATION_CANCELLED, 'status': constants.STATUS_INACTIVE} ), mock.call( self.context, fake_source_share_server['id'], {'task_state': constants.TASK_STATE_MIGRATION_CANCELLED, 'status': constants.STATUS_ACTIVE} ) ]) mock_check_delete_server.assert_called_once_with( self.context, share_server=fake_dest_share_server) mock_update_resource.assert_called_once_with( self.context, constants.STATUS_AVAILABLE, share_instance_ids=fake_share_instance_ids, snapshot_instance_ids=fake_snapshot_instance_ids) mock_reset_read_only_rules.assert_called_once_with( self.context, fake_share_instances, fake_source_share_server, dest_host=fake_source_share_server['host']) @ddt.data( constants.TASK_STATE_MIGRATION_STARTING, constants.TASK_STATE_MIGRATION_CANCELLED, ) def test_migration_get_progress_exception(self, task_state): fake_source_share_server = db_utils.create_share_server( task_state=task_state) fake_dest_share_server = db_utils.create_share_server() self.mock_object( db, 'share_server_get', mock.Mock(side_effect=[fake_source_share_server, fake_dest_share_server])) self.assertRaises( exception.InvalidShareServer, self.share_manager.share_server_migration_cancel, self.context, fake_source_share_server['id'], fake_dest_share_server['id'] ) def test_share_server_migration_get_progress(self): fake_source_share_server = db_utils.create_share_server( task_state=constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS) fake_dest_share_server = db_utils.create_share_server() fake_progress = {"total_progress": 75} fake_share = db_utils.create_share() fake_share_instances = [fake_share['instance']] fake_share_instance_ids = [fake_share['instance']['id']] fake_snapshot = db_utils.create_snapshot(share_id=fake_share['id']) fake_snapshot_instances = [fake_snapshot['instance']] mock_server_get = self.mock_object( db, 'share_server_get', mock.Mock(side_effect=[fake_source_share_server, fake_dest_share_server])) mock_get_instances = self.mock_object( db, 'share_instance_get_all_by_share_server', mock.Mock(return_value=fake_share_instances)) mock_get_snap_instances = self.mock_object( db, 'share_snapshot_instance_get_all_with_filters', mock.Mock(return_value=fake_snapshot_instances)) mock_migration_get_progress = self.mock_object( self.share_manager.driver, 'share_server_migration_get_progress', mock.Mock(return_value=fake_progress)) self.share_manager.share_server_migration_get_progress( self.context, fake_source_share_server['id'], fake_dest_share_server['id']) mock_get_instances.assert_called_once_with( self.context, fake_source_share_server['id'], with_share_data=True) mock_get_snap_instances.assert_called_once_with( self.context, {'share_instance_ids': fake_share_instance_ids}) mock_server_get.assert_has_calls([ mock.call(self.context, fake_source_share_server['id']), mock.call(self.context, fake_dest_share_server['id'])]) mock_migration_get_progress.assert_called_once_with( self.context, fake_source_share_server, fake_dest_share_server, fake_share_instances, fake_snapshot_instances) @ddt.data([constants.STATUS_ERROR, constants.STATUS_ACTIVE], [constants.STATUS_ACTIVE, constants.STATUS_ACTIVE]) def test__check_share_network_update_finished(self, server_statuses): share_servers = [ db_utils.create_share_server(status=status) for status in server_statuses] share_network = db_utils.create_share_network( status=constants.STATUS_SERVER_NETWORK_CHANGE) all_servers_are_active = ( all(server_statuses) == constants.STATUS_ACTIVE) self.mock_object(db, 'share_network_get', mock.Mock(return_value=share_network)) self.mock_object( db, 'share_server_get_all_with_filters', mock.Mock(return_value=share_servers)) self.mock_object(db, 'share_network_update') self.share_manager._check_share_network_update_finished( self.context, share_network['id']) db.share_server_get_all_with_filters.assert_called_once_with( self.context, {'share_network_id': share_network['id']}) db.share_network_get.assert_called_once_with( self.context, share_network['id']) if all_servers_are_active: db.share_network_update.assert_called_once_with( self.context, share_network['id'], {'status': constants.STATUS_NETWORK_ACTIVE}) def test__check_share_network_update_finished_already_active(self): share_network = db_utils.create_share_network() self.mock_object(db, 'share_network_get', mock.Mock(return_value=share_network)) self.mock_object(db, 'share_server_get_all_with_filters') self.share_manager._check_share_network_update_finished( self.context, share_network['id']) db.share_network_get.assert_called_once_with( self.context, share_network['id']) db.share_server_get_all_with_filters.assert_not_called() def _setup_mocks_for_sec_service_update( self, service_get_effect, share_network, share_servers, subnet, network_info, share_instances, fake_rules, driver_support_update=True, driver_update_action=mock.Mock()): self.mock_object( db, 'security_service_get', mock.Mock(side_effect=service_get_effect)) self.mock_object( db, 'share_network_get', mock.Mock(return_value=share_network)) self.mock_object( db, 'share_server_get_all_by_host', mock.Mock(return_value=share_servers)) self.mock_object( db, 'share_network_subnet_get_all_by_share_server_id', mock.Mock(return_value=[subnet])) self.mock_object( self.share_manager, '_form_server_setup_info', mock.Mock(return_value=network_info)) self.mock_object( db, 'share_instance_get_all_by_share_server', mock.Mock(return_value=share_instances)) self.mock_object( db, 'share_access_get_all_for_instance', mock.Mock(return_value=fake_rules)) self.mock_object( self.share_manager.driver, 'check_update_share_server_security_service', mock.Mock(return_value=driver_support_update)) self.mock_object(db, 'share_server_backend_details_set') self.mock_object( self.share_manager.driver, 'update_share_server_security_service', driver_update_action) self.mock_object(db, 'share_server_update') self.mock_object( self.share_manager, '_check_share_network_update_finished') self.mock_object( self.share_manager.access_helper, 'get_and_update_share_instance_access_rules') self.mock_object( self.share_manager.access_helper, 'update_share_instances_access_rules_status') self.mock_object( self.share_manager.access_helper, 'process_driver_rule_updates') @ddt.data(False, True) def test__update_share_network_security_service(self, is_check_only): security_services = [ db_utils.create_security_service() for i in range(2)] share_network = db_utils.create_share_network() share_network_subnet = db_utils.create_share_network_subnet() share_servers = [ db_utils.create_share_server( share_network_subnets=[share_network_subnet])] security_services_effect = mock.Mock(side_effect=security_services) share_network_id = share_network['id'] current_security_service_id = security_services[0]['id'] new_security_service_id = security_services[1]['id'] share_instances = [db_utils.create_share()['instance']] fake_rules = ['fake_rules'] network_info = {'fake': 'fake'} backend_details_keys = [ 'name', 'ou', 'default_ad_site', 'domain', 'server', 'dns_ip', 'user', 'type', 'password'] backend_details_data = {} [backend_details_data.update( {key: security_services[0][key]}) for key in backend_details_keys] backend_details_exp_update = { 'security_service_' + security_services[0]['type']: jsonutils.dumps(backend_details_data) } expected_instance_rules = [{ 'share_instance_id': share_instances[0]['id'], 'access_rules': fake_rules }] rule_updates = { share_instances[0]['id']: { 'access_rule_id': { 'access_key': 'fake_access_key', 'state': 'active', }, }, } expected_rule_updates_value = rule_updates[share_instances[0]['id']] driver_return = mock.Mock(return_value=rule_updates) self._setup_mocks_for_sec_service_update( security_services_effect, share_network, share_servers, share_network_subnet, network_info, share_instances, fake_rules, driver_update_action=driver_return) result = self.share_manager._update_share_network_security_service( self.context, share_network_id, new_security_service_id, current_security_service_id=current_security_service_id, check_only=is_check_only) db.security_service_get.assert_has_calls( [mock.call(self.context, security_services[1]['id']), mock.call(self.context, security_services[0]['id'])] ) db.share_network_get.assert_called_once_with( self.context, share_network_id) db.share_server_get_all_by_host.assert_called_once_with( self.context, self.share_manager.host, filters={'share_network_id': share_network_id}) (db.share_network_subnet_get_all_by_share_server_id. assert_called_once_with( self.context, share_servers[0]['id'])) self.share_manager._form_server_setup_info.assert_called_once_with( self.context, share_servers[0], share_network, [share_network_subnet] ) db.share_instance_get_all_by_share_server.assert_called_once_with( self.context, share_servers[0]['id'], with_share_data=True) db.share_access_get_all_for_instance.assert_called_once_with( self.context, share_instances[0]['id']) if not is_check_only: (self.share_manager.driver.update_share_server_security_service. assert_called_once_with( self.context, share_servers[0], network_info, share_instances, expected_instance_rules, security_services[0], current_security_service=security_services[1])) db.share_server_backend_details_set.assert_called_once_with( self.context, share_servers[0]['id'], backend_details_exp_update) db.share_server_update.assert_called_once_with( self.context, share_servers[0]['id'], {'status': constants.STATUS_ACTIVE}) (self.share_manager.access_helper.process_driver_rule_updates. assert_called_once_with( self.context, expected_rule_updates_value, share_instances[0]['id'])) else: (self.share_manager.driver. check_update_share_server_security_service. assert_called_once_with( self.context, share_servers[0], network_info, share_instances, expected_instance_rules, security_services[0], current_security_service=security_services[1])) self.assertEqual(result, True) def test__update_share_network_security_service_no_support(self): security_services = [ db_utils.create_security_service() for i in range(2)] share_network = db_utils.create_share_network() share_network_subnet = db_utils.create_share_network_subnet() share_servers = [ db_utils.create_share_server( share_network_subnets=[share_network_subnet])] security_services_effect = mock.Mock(side_effect=security_services) share_network_id = share_network['id'] current_security_service_id = security_services[0]['id'] new_security_service_id = security_services[1]['id'] network_info = [{'fake': 'fake'}] share_instances = [db_utils.create_share()['instance']] fake_rules = ['fake_rules'] expected_instance_rules = [{ 'share_instance_id': share_instances[0]['id'], 'access_rules': fake_rules }] self._setup_mocks_for_sec_service_update( security_services_effect, share_network, share_servers, share_network_subnet, network_info, share_instances, fake_rules, driver_support_update=False) result = self.share_manager._update_share_network_security_service( self.context, share_network_id, new_security_service_id, current_security_service_id=current_security_service_id, check_only=True) db.security_service_get.assert_has_calls( [mock.call(self.context, security_services[1]['id']), mock.call(self.context, security_services[0]['id'])] ) db.share_network_get.assert_called_once_with( self.context, share_network_id) db.share_server_get_all_by_host.assert_called_once_with( self.context, self.share_manager.host, filters={'share_network_id': share_network_id}) (db.share_network_subnet_get_all_by_share_server_id. assert_called_once_with(self.context, share_servers[0]['id'])) self.share_manager._form_server_setup_info.assert_called_once_with( self.context, share_servers[0], share_network, [share_network_subnet] ) db.share_instance_get_all_by_share_server.assert_called_once_with( self.context, share_servers[0]['id'], with_share_data=True) db.share_access_get_all_for_instance.assert_called_once_with( self.context, share_instances[0]['id']) (self.share_manager.driver.check_update_share_server_security_service. assert_called_once_with( self.context, share_servers[0], network_info, share_instances, expected_instance_rules, security_services[0], current_security_service=security_services[1])) self.assertEqual(result, False) def test__update_share_network_security_service_exception(self): security_services = [ db_utils.create_security_service() for i in range(2)] share_network = db_utils.create_share_network() share_network_subnet = db_utils.create_share_network_subnet() share_servers = [ db_utils.create_share_server( share_network_subnets=[share_network_subnet])] share_instances = [db_utils.create_share_instance(share_id='fake')] share_instance_ids = [instance['id'] for instance in share_instances] security_services_effect = mock.Mock(side_effect=security_services) share_network_id = share_network['id'] current_security_service_id = security_services[0]['id'] new_security_service_id = security_services[1]['id'] network_info = [{'fake': 'fake'}] backend_details_keys = [ 'name', 'ou', 'default_ad_site', 'domain', 'server', 'dns_ip', 'user', 'type', 'password'] backend_details_data = {} [backend_details_data.update( {key: security_services[0][key]}) for key in backend_details_keys] backend_details_exp_update = { 'security_service_' + security_services[0]['type']: jsonutils.dumps(backend_details_data) } driver_exception = mock.Mock(side_effect=Exception()) share_instances = [db_utils.create_share()['instance']] fake_rules = ['fake_rules'] expected_instance_rules = [{ 'share_instance_id': share_instances[0]['id'], 'access_rules': fake_rules }] self._setup_mocks_for_sec_service_update( security_services_effect, share_network, share_servers, share_network_subnet, network_info, share_instances, fake_rules, driver_update_action=driver_exception) self.mock_object( self.share_manager.access_helper, 'update_share_instances_access_rules_status') self.mock_object( db, 'share_instance_get_all_by_share_server', mock.Mock(return_value=share_instances)) self.share_manager._update_share_network_security_service( self.context, share_network_id, new_security_service_id, current_security_service_id=current_security_service_id) db.security_service_get.assert_has_calls( [mock.call(self.context, security_services[1]['id']), mock.call(self.context, security_services[0]['id'])] ) db.share_network_get.assert_called_once_with( self.context, share_network_id) db.share_server_get_all_by_host.assert_called_once_with( self.context, self.share_manager.host, filters={'share_network_id': share_network_id}) (db.share_network_subnet_get_all_by_share_server_id. assert_called_once_with(self.context, share_servers[0]['id'])) self.share_manager._form_server_setup_info.assert_called_once_with( self.context, share_servers[0], share_network, [share_network_subnet] ) (self.share_manager.driver.update_share_server_security_service. assert_called_once_with( self.context, share_servers[0], network_info, share_instances, expected_instance_rules, security_services[0], current_security_service=security_services[1])) db.share_server_backend_details_set.assert_called_once_with( self.context, share_servers[0]['id'], backend_details_exp_update) db.share_server_update.assert_called_once_with( self.context, share_servers[0]['id'], {'status': constants.STATUS_ERROR}) db.share_instance_get_all_by_share_server.assert_called_once_with( self.context, share_servers[0]['id'], with_share_data=True) db.share_access_get_all_for_instance.assert_called_once_with( self.context, share_instances[0]['id']) (self.share_manager.access_helper. update_share_instances_access_rules_status( self.context, constants.SHARE_INSTANCE_RULES_ERROR, share_instance_ids)) (self.share_manager.access_helper. get_and_update_share_instance_access_rules( self.context, updates={'state': constants.STATUS_ERROR}, share_instance_id=share_instances[0]['id'])) def test_update_share_network_security_service(self): share_network_id = 'fake_sn_id' new_security_service_id = 'new_sec_service_id' current_security_service_id = 'current_sec_service_id' self.mock_object( self.share_manager, '_update_share_network_security_service') self.share_manager.update_share_network_security_service( self.context, share_network_id, new_security_service_id, current_security_service_id=current_security_service_id) (self.share_manager._update_share_network_security_service. assert_called_once_with( self.context, share_network_id, new_security_service_id, current_security_service_id=current_security_service_id, check_only=False)) def test_check_update_share_network_security_service(self): share_network_id = 'fake_sn_id' new_security_service_id = 'new_sec_service_id' current_security_service_id = 'current_sec_service_id' self.mock_object( self.share_manager, '_update_share_network_security_service') self.share_manager.check_update_share_network_security_service( self.context, share_network_id, new_security_service_id, current_security_service_id=current_security_service_id) (self.share_manager._update_share_network_security_service. assert_called_once_with( self.context, share_network_id, new_security_service_id, current_security_service_id=current_security_service_id, check_only=True)) @ddt.data(None, '{"fake_host": false}') def test__update_share_server_allocations_check_operation( self, current_hosts_info): update_key = 'fake_key' mock_get_allocations_key = self.mock_object( self.share_manager.share_api, 'get_share_server_update_allocations_key', mock.Mock(return_value=update_key)) mock_get_data = self.mock_object( self.share_manager.db, 'async_operation_data_get', mock.Mock(return_value=current_hosts_info)) mock_update_data = self.mock_object( self.share_manager.db, 'async_operation_data_update') share_network_id = 'fake_net_id' availability_zone_id = 'fake_az_id' self.share_manager._update_share_server_allocations_check_operation( self.context, True, share_network_id=share_network_id, availability_zone_id=availability_zone_id) mock_get_allocations_key.assert_called_once_with( share_network_id, availability_zone_id) mock_get_data.assert_called_once_with( self.context, share_network_id, update_key) if current_hosts_info: mock_update_data.assert_called_once_with( self.context, share_network_id, {update_key: json.dumps({self.share_manager.host: True})}) else: mock_update_data.assert_not_called() def test__get_subnet_allocations(self): fake_allocations = ['fake_alloc'] mock_get_allocations = self.mock_object( self.share_manager.db, 'network_allocations_get_for_share_server', mock.Mock(return_value=fake_allocations)) subnet_id = 'fake_id' neutron_net_id = 'fake_net_id' neutron_subnet_id = 'fake_subnet_id' fake_subnet = { 'id': subnet_id, 'neutron_net_id': neutron_net_id, 'neutron_subnet_id': neutron_subnet_id, } result = self.share_manager._get_subnet_allocations( self.context, 'fake_id', fake_subnet) expected_allocations = { 'share_network_subnet_id': subnet_id, 'neutron_net_id': neutron_net_id, 'neutron_subnet_id': neutron_subnet_id, 'network_allocations': fake_allocations, } self.assertEqual(expected_allocations, result) mock_get_allocations.assert_called_once_with( self.context, 'fake_id', label='user', subnet_id=fake_subnet['id']) def test__form_network_allocations(self): fake_allocation = 'fake_alloc' mock_get_allocations = self.mock_object( self.share_manager, '_get_subnet_allocations', mock.Mock(return_value=fake_allocation)) mock_admin_allocations = self.mock_object( self.share_manager.db, 'network_allocations_get_for_share_server', mock.Mock(return_value=[fake_allocation])) result = self.share_manager._form_network_allocations( self.context, 'fake_id', ['fake_subnet']) expected_allocations = { 'admin_network_allocations': [fake_allocation], 'subnets': [fake_allocation], } self.assertEqual(expected_allocations, result) mock_get_allocations.assert_called_once_with( self.context, 'fake_id', 'fake_subnet') mock_admin_allocations.assert_called_once_with( self.context, 'fake_id', label='admin') @ddt.data(True, False) def test_check_update_share_server_network_allocations(self, support): security_services = 'fake_service' mock_net_get = self.mock_object( self.share_manager.db, 'share_network_get', mock.Mock(return_value={'security_services': security_services})) server = {'id': 'fake_id'} subnets = [{'share_servers': [server]}] mock_subnet_get = self.mock_object( self.share_manager.db, 'share_network_subnets_get_all_by_availability_zone_id', mock.Mock(return_value=subnets)) mock_include_net_info = self.mock_object( self.share_manager.driver.network_api, 'include_network_info') current_network_allocations = 'fake_net_allocations' mock_form_net_allocations = self.mock_object( self.share_manager, '_form_network_allocations', mock.Mock(return_value=current_network_allocations)) share_id = 'fake_id' shares = [fakes.fake_share(id=share_id)] mock_shares_get = self.mock_object( self.share_manager.db, 'share_instance_get_all_by_share_server', mock.Mock(return_value=shares)) access = 'fake_access' mock_access_get = self.mock_object( self.share_manager.db, 'share_access_get_all_for_instance', mock.Mock(return_value=access)) mock_check_update = self.mock_object( self.share_manager.driver, 'check_update_share_server_network_allocations', mock.Mock(return_value=support)) mock_update_check_operation = self.mock_object( self.share_manager, '_update_share_server_allocations_check_operation') new_subnet = {'availability_zone_id': 'fake_az'} net_id = 'fake_net_id' self.share_manager.check_update_share_server_network_allocations( self.context, net_id, new_subnet) mock_net_get.assert_called_once_with(self.context, net_id) mock_subnet_get.assert_called_once_with( self.context, net_id, new_subnet['availability_zone_id'], fallback_to_default=False) mock_include_net_info.assert_called_once_with(new_subnet) mock_form_net_allocations.assert_called_once_with( self.context, server['id'], subnets) mock_shares_get.assert_called_once_with( self.context, server['id'], with_share_data=True) mock_access_get.assert_called_once_with(self.context, share_id) access_list = [{'share_instance_id': share_id, 'access_rules': access}] mock_check_update.assert_called_once_with( self.context, server, current_network_allocations, new_subnet, security_services, shares, access_list) mock_update_check_operation.assert_called_once_with( self.context, support, share_network_id=net_id, availability_zone_id=new_subnet['availability_zone_id']) def test__do_update_share_server_network_allocations(self): mock_allocate = self.mock_object( self.share_manager.driver, 'allocate_network') net_allocations = {'network_allocations': ['fake_allocation']} mock_get_allocations = self.mock_object( self.share_manager, '_get_subnet_allocations', mock.Mock(return_value=net_allocations)) mock_validate_segmentation = self.mock_object( self.share_manager, '_validate_segmentation_id') server_details = 'fake_details' snap_export = [{'path': 'fake_path', 'is_admin_only': 'fake_is_admin'}] update_model = { 'server_details': server_details, 'share_updates': {'fake_id': 'fake_export'}, 'snapshot_updates': { 'fake_id': { 'export_locations': snap_export, 'status': 'fake_status', }, }, } mock_update_server_allocations = self.mock_object( self.share_manager.driver, 'update_share_server_network_allocations', mock.Mock(return_value=update_model)) mock_update_net_allocation = self.mock_object( self.share_manager.driver, 'update_network_allocation') mock_db_backend_details_set = self.mock_object( self.share_manager.db, 'share_server_backend_details_set') mock_db_export_share_update = self.mock_object( self.share_manager.db, 'export_locations_update') mock_db_snapshot_update = self.mock_object( self.share_manager.db, 'share_snapshot_instance_update') mock_db_export_snap_update = self.mock_object( self.share_manager.db, 'share_snapshot_instance_export_locations_update') server = {'id': 'fake_id'} share_net = {'security_services': 'fake_services'} new_subnet = 'fake_subnet' current_network_allocations = 'fake_allocations' share_instances = 'fake_instances' snapshot_instance_ids = 'fake_snaps' self.share_manager._do_update_share_server_network_allocations( self.context, server, share_net, new_subnet, current_network_allocations, share_instances, snapshot_instance_ids ) mock_update_server_allocations.assert_called_once_with( self.context, server, current_network_allocations, net_allocations, share_net['security_services'], share_instances, snapshot_instance_ids) mock_allocate.assert_called_once_with( self.context, server, share_net, new_subnet) mock_get_allocations.assert_called_once_with( self.context, server['id'], new_subnet) mock_validate_segmentation.assert_called_once_with( net_allocations['network_allocations'][0]) mock_update_net_allocation.assert_called_once_with( self.context, server) mock_db_backend_details_set.assert_called_once_with( self.context, server['id'], server_details) mock_db_export_share_update.assert_called_once_with( self.context, 'fake_id', 'fake_export') mock_db_snapshot_update.assert_called_once_with( self.context, 'fake_id', {'status': 'fake_status'}) mock_db_export_snap_update.assert_called_once_with( self.context, 'fake_id', snap_export) def test__do_update_share_server_network_allocations_exception(self): self.mock_object(self.share_manager.driver, 'allocate_network') net_allocations = {'network_allocations': []} self.mock_object( self.share_manager, '_get_subnet_allocations', mock.Mock(return_value=net_allocations)) server = {'id': 'fake_id'} share_net = {'security_services': 'fake_services'} new_subnet = 'fake_subnet' current_network_allocations = 'fake_allocations' share_instances = 'fake_instances' snapshot_instance_ids = 'fake_snaps' self.assertRaises( exception.AllocationsNotFoundForShareServer, self.share_manager._do_update_share_server_network_allocations, self.context, server, share_net, new_subnet, current_network_allocations, share_instances, snapshot_instance_ids ) def test_update_share_server_network_allocations(self): net_id = 'fake_net_id' mock_net_get = self.mock_object( self.share_manager.db, 'share_network_get', mock.Mock(return_value={'id': net_id})) new_subnet = {'availability_zone_id': 'fake_id'} mock_subnet_get = self.mock_object( self.share_manager.db, 'share_network_subnet_get', mock.Mock(return_value=new_subnet)) subnets = [{'id': 'fake_id'}] mock_subnets_get = self.mock_object( self.share_manager.db, 'share_network_subnets_get_all_by_availability_zone_id', mock.Mock(return_value=subnets)) server_id = 'fake_server_id' server = {'id': server_id} mock_servers_get = self.mock_object( self.share_manager.db, 'share_server_get_all_by_host_and_or_share_subnet', mock.Mock(return_value=[server])) current_network_allocations = 'fake_current_net_allocations' mock_form_net_allocations = self.mock_object( self.share_manager, '_form_network_allocations', mock.Mock(return_value=current_network_allocations)) share_instances = [{'id': 'fake_id'}] mock_instances_get = self.mock_object( self.share_manager.db, 'share_instance_get_all_by_share_server', mock.Mock(return_value=share_instances)) snap_instances = [{'id': 'fake_id'}] mock_snap_instances_get = self.mock_object( self.share_manager.db, 'share_snapshot_instance_get_all_with_filters', mock.Mock(return_value=snap_instances)) mock_do_update = self.mock_object( self.share_manager, '_do_update_share_server_network_allocations') mock_server_update = self.mock_object( self.share_manager.db, 'share_server_update', mock.Mock(return_value=snap_instances)) mock_check_update_finished = self.mock_object( self.share_manager, '_check_share_network_update_finished') new_share_network_subnet_id = 'fake_new_subnet_id' self.share_manager.update_share_server_network_allocations( self.context, net_id, new_share_network_subnet_id) mock_net_get.assert_called_once_with(self.context, net_id) mock_subnet_get.assert_called_once_with(self.context, new_share_network_subnet_id) mock_subnets_get.assert_called_once_with( self.context, net_id, new_subnet['availability_zone_id'], fallback_to_default=False) mock_servers_get.assert_called_once_with( self.context, host=self.share_manager.host, share_subnet_id=new_share_network_subnet_id) mock_form_net_allocations.assert_called_once_with( self.context, server['id'], subnets) mock_instances_get.assert_called_once_with( self.context, server['id'], with_share_data=True) mock_snap_instances_get.assert_called_once_with( self.context, {'share_instance_ids': ['fake_id']}) mock_do_update.assert_called_once_with( self.context, server, {'id': net_id}, new_subnet, current_network_allocations, share_instances, snap_instances) mock_server_update.assert_called_once_with( self.context, server['id'], {'status': constants.STATUS_ACTIVE}) mock_check_update_finished.assert_called_once_with( self.context, share_network_id=net_id) def test_update_share_server_network_allocations_failed(self): net_id = 'fake_net_id' mock_net_get = self.mock_object( self.share_manager.db, 'share_network_get', mock.Mock(return_value={'id': net_id})) new_subnet = {'availability_zone_id': 'fake_id'} mock_subnet_get = self.mock_object( self.share_manager.db, 'share_network_subnet_get', mock.Mock(return_value=new_subnet)) subnets = [{'id': 'fake_id'}] mock_subnets_get = self.mock_object( self.share_manager.db, 'share_network_subnets_get_all_by_availability_zone_id', mock.Mock(return_value=subnets)) server_id = 'fake_server_id' server = {'id': server_id} mock_servers_get = self.mock_object( self.share_manager.db, 'share_server_get_all_by_host_and_or_share_subnet', mock.Mock(return_value=[server])) current_network_allocations = 'fake_current_net_allocations' mock_form_net_allocations = self.mock_object( self.share_manager, '_form_network_allocations', mock.Mock(return_value=current_network_allocations)) share_instances = [{'id': 'fake_id'}] mock_instances_get = self.mock_object( self.share_manager.db, 'share_instance_get_all_by_share_server', mock.Mock(return_value=share_instances)) snap_instances = [{'id': 'fake_id'}] mock_snap_instances_get = self.mock_object( self.share_manager.db, 'share_snapshot_instance_get_all_with_filters', mock.Mock(return_value=snap_instances)) mock_do_update = self.mock_object( self.share_manager, '_do_update_share_server_network_allocations', mock.Mock(side_effect=exception.AllocationsNotFoundForShareServer( share_server_id=server_id))) mock_handle_error = self.mock_object( self.share_manager, '_handle_setup_server_error') mock_update_status = self.mock_object( self.share_manager, '_update_resource_status') mock_server_update = self.mock_object( self.share_manager.db, 'share_server_update', mock.Mock(return_value=snap_instances)) mock_check_update_finished = self.mock_object( self.share_manager, '_check_share_network_update_finished') new_share_network_subnet_id = 'fake_new_subnet_id' self.share_manager.update_share_server_network_allocations( self.context, net_id, new_share_network_subnet_id) mock_net_get.assert_called_once_with(self.context, net_id) mock_subnet_get.assert_called_once_with(self.context, new_share_network_subnet_id) mock_subnets_get.assert_called_once_with( self.context, net_id, new_subnet['availability_zone_id'], fallback_to_default=False) mock_servers_get.assert_called_once_with( self.context, host=self.share_manager.host, share_subnet_id=new_share_network_subnet_id) mock_form_net_allocations.assert_called_once_with( self.context, server['id'], subnets) mock_instances_get.assert_called_once_with( self.context, server['id'], with_share_data=True) mock_snap_instances_get.assert_called_once_with( self.context, {'share_instance_ids': ['fake_id']}) mock_do_update.assert_called_once_with( self.context, server, {'id': net_id}, new_subnet, current_network_allocations, share_instances, snap_instances) mock_server_update.assert_not_called() mock_handle_error.assert_called() mock_update_status.assert_called_once_with( self.context, constants.STATUS_ERROR, share_instance_ids=['fake_id'], snapshot_instance_ids=['fake_id']) mock_check_update_finished.assert_called_once_with( self.context, share_network_id=net_id) def test_update_share_network_subnet_from_metadata(self): share_server = fakes.fake_share_server_get() share_network = db_utils.create_share_network(id='fake_sn_id') share_net_subnet = db_utils.create_share_network_subnet( id='fake_sns_id', share_network_id=share_network['id'] ) self.mock_object( self.share_manager.db, 'share_server_get', mock.Mock(return_value=share_server)) self.mock_object( self.share_manager.db, 'share_network_get', mock.Mock(return_value=share_network)) self.mock_object( self.share_manager.db, 'share_network_subnet_get', mock.Mock(return_value=share_net_subnet)) metadata = {'showmount': 'true'} mock_update = self.mock_object( self.share_manager.driver, 'update_share_network_subnet_from_metadata') self.share_manager.update_share_network_subnet_from_metadata( self.context, share_network['id'], share_net_subnet['id'], share_server['id'], metadata) mock_update.assert_called_once_with( self.context, share_network, share_net_subnet, share_server, metadata) self.share_manager.message_api.create.assert_called_once_with( utils.IsAMatcher(context.RequestContext), message_field.Action.UPDATE_METADATA, share_network['project_id'], resource_type=message_field.Resource.SHARE_NETWORK_SUBNET, resource_id=share_net_subnet['id'], detail=message_field.Detail.UPDATE_METADATA_SUCCESS) def test_restore_backup_respects_restore_to_target(self): target_share = db_utils.create_share(status=constants.STATUS_AVAILABLE) share = db_utils.create_share(status=constants.STATUS_AVAILABLE) backup = db_utils.create_backup( share['id'], status=constants.STATUS_AVAILABLE, size=2) target_share_id = target_share['id'] self.mock_object(self.share_manager.driver, 'restore_backup') self.mock_object(self.share_manager.db, 'share_get', mock.Mock(return_value=target_share)) self.mock_object(self.share_manager, '_get_share_instance', mock.Mock(return_value=target_share['instance'])) with mock.patch.object(self.share_manager.driver, 'restore_to_target_support', False): self.assertRaises( exception.BackupException, self.share_manager.restore_backup, self.context, backup, target_share_id) self.share_manager.driver.restore_backup.assert_not_called() @ddt.ddt class HookWrapperTestCase(test.TestCase): def setUp(self): super(HookWrapperTestCase, self).setUp() self.configuration = mock.Mock() self.configuration.safe_get.return_value = True @manager.add_hooks def _fake_wrapped_method(self, some_arg, some_kwarg): return "foo" def test_hooks_enabled(self): self.hooks = [mock.Mock(return_value=i) for i in range(2)] result = self._fake_wrapped_method( "some_arg", some_kwarg="some_kwarg_value") self.assertEqual("foo", result) for i, mock_hook in enumerate(self.hooks): mock_hook.execute_pre_hook.assert_called_once_with( "some_arg", func_name="_fake_wrapped_method", some_kwarg="some_kwarg_value") mock_hook.execute_post_hook.assert_called_once_with( "some_arg", func_name="_fake_wrapped_method", driver_action_results="foo", pre_hook_data=self.hooks[i].execute_pre_hook.return_value, some_kwarg="some_kwarg_value") def test_hooks_disabled(self): self.hooks = [] result = self._fake_wrapped_method( "some_arg", some_kwarg="some_kwarg_value") self.assertEqual("foo", result) for mock_hook in self.hooks: self.assertFalse(mock_hook.execute_pre_hook.called) self.assertFalse(mock_hook.execute_post_hook.called) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/test_migration.py0000664000175000017500000003235600000000000022176 0ustar00zuulzuul00000000000000# Copyright 2015 Hitachi Data Systems inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import time from unittest import mock import ddt from manila.common import constants from manila import context from manila import db from manila import exception from manila.share import access as access_helper from manila.share import api as share_api from manila.share import migration from manila.share import rpcapi as share_rpcapi from manila import test from manila.tests import db_utils from manila import utils @ddt.ddt class ShareMigrationHelperTestCase(test.TestCase): """Tests ShareMigrationHelper.""" def setUp(self): super(ShareMigrationHelperTestCase, self).setUp() self.share = db_utils.create_share() self.share_instance = db_utils.create_share_instance( share_id=self.share['id'], share_network_id='fake_network_id') self.access_helper = access_helper.ShareInstanceAccess(db, None) self.context = context.get_admin_context() self.helper = migration.ShareMigrationHelper( self.context, db, self.access_helper) def test_delete_instance_and_wait(self): # mocks self.mock_object(share_api.API, 'delete_instance') self.mock_object(db, 'share_instance_get', mock.Mock(side_effect=[self.share_instance, exception.NotFound()])) self.mock_object(time, 'sleep') # run self.helper.delete_instance_and_wait(self.share_instance) # asserts share_api.API.delete_instance.assert_called_once_with( self.context, self.share_instance, True) db.share_instance_get.assert_has_calls([ mock.call(self.context, self.share_instance['id']), mock.call(self.context, self.share_instance['id'])]) time.sleep.assert_called_once_with(1.414) def test_delete_instance_and_wait_timeout(self): # mocks self.mock_object(share_api.API, 'delete_instance') self.mock_object(db, 'share_instance_get', mock.Mock(side_effect=[self.share_instance, None])) self.mock_object(time, 'sleep') now = time.time() timeout = now + 310 self.mock_object(time, 'time', mock.Mock(side_effect=[now, timeout])) # run self.assertRaises(exception.ShareMigrationFailed, self.helper.delete_instance_and_wait, self.share_instance) # asserts share_api.API.delete_instance.assert_called_once_with( self.context, self.share_instance, True) db.share_instance_get.assert_called_once_with( self.context, self.share_instance['id']) time.time.assert_has_calls([mock.call(), mock.call()]) def test_delete_instance_and_wait_not_found(self): # mocks self.mock_object(share_api.API, 'delete_instance') self.mock_object(db, 'share_instance_get', mock.Mock(side_effect=exception.NotFound)) # run self.helper.delete_instance_and_wait(self.share_instance) # asserts share_api.API.delete_instance.assert_called_once_with( self.context, self.share_instance, True) db.share_instance_get.assert_called_once_with( self.context, self.share_instance['id']) def test_create_instance_and_wait(self): host = 'fake_host' share_instance_creating = db_utils.create_share_instance( share_id=self.share['id'], status=constants.STATUS_CREATING, share_network_id='fake_network_id') share_instance_available = db_utils.create_share_instance( share_id=self.share['id'], status=constants.STATUS_AVAILABLE, share_network_id='fake_network_id') # mocks self.mock_object(share_api.API, 'create_instance', mock.Mock(return_value=share_instance_creating)) self.mock_object(db, 'share_instance_get', mock.Mock(side_effect=[share_instance_creating, share_instance_available])) self.mock_object(time, 'sleep') # run self.helper.create_instance_and_wait( self.share, host, 'fake_net_id', 'fake_az_id', 'fake_type_id') # asserts share_api.API.create_instance.assert_called_once_with( self.context, self.share, 'fake_net_id', 'fake_host', 'fake_az_id', share_type_id='fake_type_id') db.share_instance_get.assert_has_calls([ mock.call(self.context, share_instance_creating['id'], with_share_data=True), mock.call(self.context, share_instance_creating['id'], with_share_data=True)]) time.sleep.assert_called_once_with(1.414) def test_create_instance_and_wait_status_error(self): host = 'fake_host' share_instance_error = db_utils.create_share_instance( share_id=self.share['id'], status=constants.STATUS_ERROR, share_network_id='fake_network_id') # mocks self.mock_object(share_api.API, 'create_instance', mock.Mock(return_value=share_instance_error)) self.mock_object(self.helper, 'cleanup_new_instance') self.mock_object(db, 'share_instance_get', mock.Mock(return_value=share_instance_error)) # run self.assertRaises( exception.ShareMigrationFailed, self.helper.create_instance_and_wait, self.share, host, 'fake_net_id', 'fake_az_id', 'fake_type_id') # asserts share_api.API.create_instance.assert_called_once_with( self.context, self.share, 'fake_net_id', 'fake_host', 'fake_az_id', share_type_id='fake_type_id') db.share_instance_get.assert_called_once_with( self.context, share_instance_error['id'], with_share_data=True) self.helper.cleanup_new_instance.assert_called_once_with( share_instance_error) def test_create_instance_and_wait_timeout(self): host = 'fake_host' share_instance_creating = db_utils.create_share_instance( share_id=self.share['id'], status=constants.STATUS_CREATING, share_network_id='fake_network_id') # mocks self.mock_object(share_api.API, 'create_instance', mock.Mock(return_value=share_instance_creating)) self.mock_object(self.helper, 'cleanup_new_instance') self.mock_object(db, 'share_instance_get', mock.Mock(return_value=share_instance_creating)) self.mock_object(time, 'sleep') now = time.time() timeout = now + 310 self.mock_object(time, 'time', mock.Mock(side_effect=[now, timeout])) # run self.assertRaises( exception.ShareMigrationFailed, self.helper.create_instance_and_wait, self.share, host, 'fake_net_id', 'fake_az_id', 'fake_type_id') # asserts share_api.API.create_instance.assert_called_once_with( self.context, self.share, 'fake_net_id', 'fake_host', 'fake_az_id', share_type_id='fake_type_id') db.share_instance_get.assert_called_once_with( self.context, share_instance_creating['id'], with_share_data=True) time.time.assert_has_calls([mock.call(), mock.call()]) self.helper.cleanup_new_instance.assert_called_once_with( share_instance_creating) @ddt.data(constants.STATUS_ACTIVE, constants.STATUS_ERROR, constants.STATUS_CREATING) def test_wait_for_share_server(self, status): server = db_utils.create_share_server(status=status) # mocks self.mock_object(db, 'share_server_get', mock.Mock(return_value=server)) # run if status == constants.STATUS_ACTIVE: result = self.helper.wait_for_share_server('fake_server_id') self.assertEqual(server, result) elif status == constants.STATUS_ERROR: self.assertRaises( exception.ShareServerNotCreated, self.helper.wait_for_share_server, 'fake_server_id') else: self.mock_object(time, 'sleep') self.assertRaises( exception.ShareServerNotReady, self.helper.wait_for_share_server, 'fake_server_id') # asserts db.share_server_get.assert_called_with(self.context, 'fake_server_id') @ddt.data(None, 'fakehost@fakebackend') def test_revert_access_rules(self, dest_host): share_instance = db_utils.create_share_instance( share_id=self.share['id'], status=constants.STATUS_AVAILABLE) share_instance_ids = [instance['id'] for instance in [share_instance]] access = db_utils.create_access(share_id=self.share['id'], access_to='fake_ip', access_level='rw') server = db_utils.create_share_server(share_id=self.share['id']) # mocks self.mock_object(self.access_helper, 'update_access_rules') get_and_update_call = self.mock_object( self.access_helper, 'get_and_update_share_instance_access_rules', mock.Mock(return_value=[access])) mock_update_access_for_instances = self.mock_object( share_rpcapi.ShareAPI, 'update_access_for_instances') # run self.helper.revert_access_rules([share_instance], server, dest_host=dest_host) # asserts get_and_update_call.assert_called_once_with( self.context, share_instance_id=share_instance['id'], updates={'state': constants.ACCESS_STATE_QUEUED_TO_APPLY}) if dest_host: mock_update_access_for_instances.assert_called_once_with( self.context, dest_host, share_instance_ids, server) else: self.access_helper.update_access_rules.assert_called_once_with( self.context, share_instance['id'], share_server=server) @ddt.data(True, False) def test_apply_new_access_rules_there_are_rules(self, prior_rules): new_share_instance = db_utils.create_share_instance( share_id=self.share['id'], status=constants.STATUS_AVAILABLE, access_rules_status='active') rules = None if prior_rules: rules = [ db_utils.create_access( share_id=self.share['id'], access_to='fake_ip') ] # mocks self.mock_object(db, 'share_instance_access_copy', mock.Mock( return_value=rules)) self.mock_object(share_api.API, 'allow_access_to_instance') self.mock_object(utils, 'wait_for_access_update') # run self.helper.apply_new_access_rules(new_share_instance, self.share['id']) # asserts db.share_instance_access_copy.assert_called_once_with( self.context, self.share['id'], new_share_instance['id']) if prior_rules: share_api.API.allow_access_to_instance.assert_called_with( self.context, new_share_instance) utils.wait_for_access_update.assert_called_with( self.context, db, new_share_instance, self.helper.migration_wait_access_rules_timeout) else: self.assertFalse(share_api.API.allow_access_to_instance.called) self.assertFalse(utils.wait_for_access_update.called) @ddt.data(None, Exception('fake')) def test_cleanup_new_instance(self, exc): # mocks self.mock_object(self.helper, 'delete_instance_and_wait', mock.Mock(side_effect=exc)) self.mock_object(migration.LOG, 'warning') # run self.helper.cleanup_new_instance(self.share_instance) # asserts self.helper.delete_instance_and_wait.assert_called_once_with( self.share_instance) if exc: self.assertEqual(1, migration.LOG.warning.call_count) @ddt.data(None, Exception('fake')) def test_cleanup_access_rules(self, exc): # mocks server = db_utils.create_share_server() self.mock_object(self.helper, 'revert_access_rules', mock.Mock(side_effect=exc)) self.mock_object(migration.LOG, 'warning') # run self.helper.cleanup_access_rules(self.share_instance, server) # asserts self.helper.revert_access_rules.assert_called_once_with( self.share_instance, server, None) if exc: self.assertEqual(1, migration.LOG.warning.call_count) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/test_rpcapi.py0000664000175000017500000005703000000000000021457 0ustar00zuulzuul00000000000000# Copyright 2015 Alex Meade # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Unit Tests for manila.share.rpcapi. """ import copy from oslo_config import cfg from oslo_serialization import jsonutils from manila.common import constants from manila import context from manila.share import rpcapi as share_rpcapi from manila import test from manila.tests import db_utils CONF = cfg.CONF class ShareRpcAPITestCase(test.TestCase): def setUp(self): super(ShareRpcAPITestCase, self).setUp() share = db_utils.create_share( availability_zone=CONF.storage_availability_zone, status=constants.STATUS_AVAILABLE ) snapshot = db_utils.create_snapshot(share_id=share['id']) share_replica = db_utils.create_share_replica( id='fake_replica', share_id='fake_share_id', host='fake_host', ) share_group = {'id': 'fake_share_group_id', 'host': 'fake_host'} share_group_snapshot = {'id': 'fake_share_group_id'} host = 'fake_host' share_server = db_utils.create_share_server(host=host) share_network_subnet = { 'id': 'fake share network subnet', 'availability_zone_id': 'fake_az_id', 'neutron_net_id': 'fake_neutron_net_id', 'neutron_subnet_id': 'fake_neutron_subnet_id', 'ip_version': 4, 'cidr': '127.0.0.0/28', 'gateway': '127.0.0.1', 'mtu': 1500, 'network_type': 'vlan', 'segmentation_id': 3000, } self.fake_share = jsonutils.to_primitive(share) # mock out the getattr on the share db model object since jsonutils # doesn't know about those extra attributes to pull in self.fake_share['instance'] = jsonutils.to_primitive(share.instance) self.fake_share_replica = jsonutils.to_primitive(share_replica) self.fake_snapshot = jsonutils.to_primitive(snapshot) self.fake_snapshot['share_instance'] = jsonutils.to_primitive( snapshot.instance) self.fake_share_server = jsonutils.to_primitive(share_server) self.fake_share_group = jsonutils.to_primitive(share_group) self.fake_share_group_snapshot = jsonutils.to_primitive( share_group_snapshot) self.fake_host = jsonutils.to_primitive(host) self.fake_share_network_subnet = jsonutils.to_primitive( share_network_subnet) self.ctxt = context.RequestContext('fake_user', 'fake_project') self.rpcapi = share_rpcapi.ShareAPI() def test_serialized_share_has_id(self): self.assertIn('id', self.fake_share) def _test_share_api(self, method, rpc_method, **kwargs): expected_retval = 'foo' if method == 'call' else None target = { "version": kwargs.pop('version', self.rpcapi.BASE_RPC_API_VERSION) } expected_msg = copy.deepcopy(kwargs) if 'share' in expected_msg and method != 'get_connection_info': share = expected_msg['share'] del expected_msg['share'] expected_msg['share_id'] = share['id'] if 'share_instance' in expected_msg: share_instance = expected_msg.pop('share_instance', None) expected_msg['share_instance_id'] = share_instance['id'] if 'share_group' in expected_msg: share_group = expected_msg['share_group'] del expected_msg['share_group'] expected_msg['share_group_id'] = share_group['id'] if 'share_group_snapshot' in expected_msg: snap = expected_msg['share_group_snapshot'] del expected_msg['share_group_snapshot'] expected_msg['share_group_snapshot_id'] = snap['id'] if 'host' in expected_msg: del expected_msg['host'] if 'snapshot' in expected_msg: snapshot = expected_msg['snapshot'] del expected_msg['snapshot'] expected_msg['snapshot_id'] = snapshot['id'] if 'dest_host' in expected_msg: del expected_msg['dest_host'] expected_msg['dest_host'] = self.fake_host if 'share_replica' in expected_msg: share_replica = expected_msg.pop('share_replica', None) expected_msg['share_replica_id'] = share_replica['id'] expected_msg['share_id'] = share_replica['share_id'] if 'replicated_snapshot' in expected_msg: snapshot = expected_msg.pop('replicated_snapshot', None) expected_msg['snapshot_id'] = snapshot['id'] expected_msg['share_id'] = snapshot['share_id'] if 'src_share_instance' in expected_msg: share_instance = expected_msg.pop('src_share_instance', None) expected_msg['src_instance_id'] = share_instance['id'] if 'update_access' in expected_msg: share_instance = expected_msg.pop('share_instance', None) expected_msg['share_instance_id'] = share_instance['id'] if 'snapshot_instance' in expected_msg: snapshot_instance = expected_msg.pop('snapshot_instance', None) expected_msg['snapshot_instance_id'] = snapshot_instance['id'] share_server_id_methods = [ 'manage_share_server', 'unmanage_share_server', 'share_server_migration_start', 'share_server_migration_check', 'update_share_network_subnet_from_metadata'] src_dest_share_server_methods = [ 'share_server_migration_cancel', 'share_server_migration_get_progress', 'share_server_migration_complete'] if ('share_server' in expected_msg and method in share_server_id_methods): share_server = expected_msg.pop('share_server', None) expected_msg['share_server_id'] = share_server['id'] if ('share_server' in expected_msg and method in src_dest_share_server_methods): share_server = expected_msg.pop('share_server', None) expected_msg['src_share_server_id'] = share_server['id'] if ('dest_share_server' in expected_msg and method in src_dest_share_server_methods): share_server = expected_msg.pop('dest_share_server', None) expected_msg['dest_share_server_id'] = share_server['id'] if method == 'ensure_driver_resources': expected_msg['skip_backend_info_check'] = True if 'host' in kwargs: host = kwargs['host'] elif 'share_group' in kwargs: host = kwargs['share_group']['host'] elif 'share_instance' in kwargs: host = kwargs['share_instance']['host'] elif 'share_server' in kwargs: host = kwargs['share_server']['host'] elif 'share_replica' in kwargs: host = kwargs['share_replica']['host'] elif 'replicated_snapshot' in kwargs: host = kwargs['share']['instance']['host'] elif 'share' in kwargs: host = kwargs['share']['host'] else: host = self.fake_host target['server'] = host target['topic'] = '%s.%s' % (CONF.share_topic, host) self.fake_args = None self.fake_kwargs = None def _fake_prepare_method(*args, **kwds): for kwd in kwds: self.assertEqual(target[kwd], kwds[kwd]) return self.rpcapi.client def _fake_rpc_method(*args, **kwargs): self.fake_args = args self.fake_kwargs = kwargs if expected_retval: return expected_retval self.mock_object(self.rpcapi.client, "prepare", _fake_prepare_method) self.mock_object(self.rpcapi.client, rpc_method, _fake_rpc_method) retval = getattr(self.rpcapi, method)(self.ctxt, **kwargs) self.assertEqual(expected_retval, retval) expected_args = [self.ctxt, method] for arg, expected_arg in zip(self.fake_args, expected_args): self.assertEqual(expected_arg, arg) for kwarg, value in self.fake_kwargs.items(): self.assertEqual(expected_msg[kwarg], value) def test_create_share_instance(self): self._test_share_api('create_share_instance', rpc_method='cast', version='1.4', share_instance=self.fake_share, host='fake_host1', snapshot_id='fake_snapshot_id', filter_properties=None, request_spec=None) def test_delete_share_instance(self): self._test_share_api('delete_share_instance', rpc_method='cast', version='1.27', share_instance=self.fake_share, force=False, deferred_delete=False) def test_update_access(self): self._test_share_api('update_access', rpc_method='cast', version='1.14', share_instance=self.fake_share) def test_create_snapshot(self): self._test_share_api('create_snapshot', rpc_method='cast', share=self.fake_share, snapshot=self.fake_snapshot) def test_delete_snapshot(self): self._test_share_api('delete_snapshot', rpc_method='cast', version='1.27', snapshot=self.fake_snapshot, host='fake_host', force=False, deferred_delete=False) def test_delete_share_server(self): self._test_share_api('delete_share_server', rpc_method='cast', share_server=self.fake_share_server) def test_transfer_accept(self): self._test_share_api('transfer_accept', rpc_method='call', version='1.25', share=self.fake_share, new_user='new_user', new_project='new_project', clear_rules=False) def test_extend_share(self): self._test_share_api('extend_share', rpc_method='cast', version='1.2', share=self.fake_share, new_size=123, reservations={'fake': 'fake'}) def test_shrink_share(self): self._test_share_api('shrink_share', rpc_method='cast', version='1.3', share=self.fake_share, new_size=123) def test_create_share_group(self): self._test_share_api('create_share_group', version='1.16', rpc_method='cast', share_group=self.fake_share_group, host='fake_host1') def test_delete_share_group(self): self._test_share_api('delete_share_group', version='1.16', rpc_method='cast', share_group=self.fake_share_group) def test_create_share_group_snapshot(self): self._test_share_api( 'create_share_group_snapshot', version='1.16', rpc_method='cast', share_group_snapshot=self.fake_share_group_snapshot, host='fake_host1') def test_delete_share_group_snapshot(self): self._test_share_api( 'delete_share_group_snapshot', version='1.16', rpc_method='cast', share_group_snapshot=self.fake_share_group_snapshot, host='fake_host1') def test_migration_start(self): self._test_share_api('migration_start', rpc_method='cast', version='1.15', share=self.fake_share, dest_host=self.fake_host, force_host_assisted_migration=True, preserve_metadata=True, writable=True, nondisruptive=False, preserve_snapshots=True, new_share_network_id='fake_net_id', new_share_type_id='fake_type_id') def test_connection_get_info(self): self._test_share_api('connection_get_info', rpc_method='call', version='1.12', share_instance=self.fake_share) def test_migration_complete(self): self._test_share_api('migration_complete', rpc_method='cast', version='1.12', src_share_instance=self.fake_share['instance'], dest_instance_id='new_fake_ins_id') def test_migration_cancel(self): self._test_share_api('migration_cancel', rpc_method='cast', version='1.12', src_share_instance=self.fake_share['instance'], dest_instance_id='ins2_id') def test_migration_get_progress(self): self._test_share_api('migration_get_progress', rpc_method='call', version='1.12', src_share_instance=self.fake_share['instance'], dest_instance_id='ins2_id') def test_delete_share_replica(self): self._test_share_api('delete_share_replica', rpc_method='cast', version='1.8', share_replica=self.fake_share_replica, force=False) def test_promote_share_replica(self): self._test_share_api('promote_share_replica', rpc_method='cast', version='1.24', share_replica=self.fake_share_replica, quiesce_wait_time=None) def test_update_share_replica(self): self._test_share_api('update_share_replica', rpc_method='cast', version='1.8', share_replica=self.fake_share_replica) def test_manage_snapshot(self): self._test_share_api('manage_snapshot', rpc_method='cast', version='1.9', snapshot=self.fake_snapshot, host='fake_host', driver_options={'volume_snapshot_id': 'fake'}) def test_unmanage_snapshot(self): self._test_share_api('unmanage_snapshot', rpc_method='cast', version='1.9', snapshot=self.fake_snapshot, host='fake_host') def test_manage_share_server(self): self._test_share_api('manage_share_server', rpc_method='cast', version='1.19', share_server=self.fake_share_server, identifier='fake', driver_opts={}) def test_unmanage_share_server(self): self._test_share_api('unmanage_share_server', rpc_method='cast', version='1.19', share_server=self.fake_share_server, force='fake_force') def test_revert_to_snapshot(self): self._test_share_api('revert_to_snapshot', rpc_method='cast', version='1.18', share=self.fake_share, snapshot=self.fake_snapshot, host='fake_host', reservations={'fake': 'fake'}) def test_update_share_from_metadata(self): self._test_share_api('update_share_from_metadata', rpc_method='cast', version='1.28', share=self.fake_share, metadata={'fake': 'fake'}) def test_update_share_network_subnet_from_metadata(self): self._test_share_api( 'update_share_network_subnet_from_metadata', rpc_method='cast', version='1.30', share_network_id='fake_net_id', share_network_subnet_id=self.fake_share_network_subnet['id'], share_server=self.fake_share_server, metadata={'fake': 'fake'}) def test_create_replicated_snapshot(self): self._test_share_api('create_replicated_snapshot', rpc_method='cast', version='1.11', replicated_snapshot=self.fake_snapshot, share=self.fake_share) def test_delete_replicated_snapshot(self): self._test_share_api('delete_replicated_snapshot', rpc_method='cast', version='1.11', replicated_snapshot=self.fake_snapshot, share_id=self.fake_snapshot['share_id'], force=False, host='fake_host') def test_provide_share_server(self): self._test_share_api('provide_share_server', rpc_method='call', version='1.12', share_instance=self.fake_share['instance'], share_network_id='fake_network_id', snapshot_id='fake_snapshot_id') def test_create_share_server(self): self._test_share_api('create_share_server', rpc_method='cast', version='1.20', share_instance=self.fake_share['instance'], share_server_id='fake_server_id') def test_snapshot_update_access(self): self._test_share_api('snapshot_update_access', rpc_method='cast', version='1.17', snapshot_instance=self.fake_snapshot[ 'share_instance']) def test_share_server_migration_start(self): self._test_share_api('share_server_migration_start', rpc_method='cast', version='1.21', share_server=self.fake_share_server, dest_host=self.fake_host, writable=True, nondisruptive=False, preserve_snapshots=True, new_share_network_id='fake_share_network_id') def test_share_server_migration_check(self): self._test_share_api('share_server_migration_check', rpc_method='call', version='1.21', share_server_id=self.fake_share_server['id'], dest_host=self.fake_host, writable=True, nondisruptive=False, preserve_snapshots=True, new_share_network_id='fake_net_id') def test_share_server_migration_cancel(self): self._test_share_api('share_server_migration_cancel', rpc_method='cast', version='1.21', dest_host=self.fake_host, share_server=self.fake_share_server, dest_share_server=self.fake_share_server) def test_share_server_migration_get_progress(self): self._test_share_api('share_server_migration_get_progress', rpc_method='call', version='1.21', dest_host=self.fake_host, share_server=self.fake_share_server, dest_share_server=self.fake_share_server) def test_share_server_migration_complete(self): self._test_share_api('share_server_migration_complete', rpc_method='cast', version='1.21', dest_host=self.fake_host, share_server=self.fake_share_server, dest_share_server=self.fake_share_server) def test_update_access_for_share_instances(self): self._test_share_api( 'update_access_for_instances', rpc_method='cast', version='1.21', dest_host=self.fake_host, share_instance_ids=[self.fake_share['instance']['id']], share_server_id=self.fake_share_server['id']) def test_update_share_network_security_service(self): self._test_share_api( 'update_share_network_security_service', rpc_method='cast', version='1.22', dest_host=self.fake_host, share_network_id='fake_net_id', new_security_service_id='fake_sec_service_id', current_security_service_id='fake_sec_service_id') def test_check_update_share_network_security_service(self): self._test_share_api('check_update_share_network_security_service', rpc_method='cast', version='1.22', dest_host=self.fake_host, share_network_id='fake_net_id', new_security_service_id='fake_sec_service_id', current_security_service_id='fake_sec_service_id') def test_check_update_share_server_network_allocations(self): self._test_share_api( 'check_update_share_server_network_allocations', rpc_method='cast', version='1.23', dest_host=self.fake_host, share_network_id='fake_net_id', new_share_network_subnet=self.fake_share_network_subnet) def test_update_share_server_network_allocations(self): self._test_share_api( 'update_share_server_network_allocations', rpc_method='cast', version='1.23', dest_host=self.fake_host, share_network_id='fake_net_id', new_share_network_subnet_id='new_share_network_subnet_id') def test_ensure_driver_resources(self): self._test_share_api( 'ensure_driver_resources', rpc_method='cast', version='1.29', host=self.fake_host, ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/test_share_types.py0000664000175000017500000006431500000000000022533 0ustar00zuulzuul00000000000000# Copyright 2015 Deutsche Telekom AG. All rights reserved. # Copyright 2015 Tom Barron. All rights reserved. # Copyright 2015 Mirantis, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Test of Share Type methods for Manila.""" import copy import datetime import itertools from unittest import mock import ddt from oslo_utils import strutils from manila.common import constants from manila import context from manila import db from manila import exception from manila import quota from manila.share import share_types from manila import test from manila.tests import db_utils def create_share_type_dict(extra_specs=None): return { 'fake_type': { 'name': 'fake1', 'extra_specs': extra_specs } } def return_share_type_update(context, id, values): name = values.get('name') description = values.get('description') is_public = values.get('is_public') if id == '444': raise exception.ShareTypeUpdateFailed(id=id) else: st_update = { 'created_at': datetime.datetime(2019, 9, 9, 14, 40, 31), 'deleted': '0', 'deleted_at': None, 'extra_specs': {u'gold': u'True'}, 'required_extra_specs': {}, 'id': id, 'name': name, 'is_public': is_public, 'description': description, 'updated_at': None } return st_update @ddt.ddt class ShareTypesTestCase(test.TestCase): fake_type = { 'test': { 'created_at': datetime.datetime(2015, 1, 22, 11, 43, 24), 'deleted': '0', 'deleted_at': None, 'extra_specs': {}, 'required_extra_specs': {}, 'id': u'fooid-1', 'name': u'test', 'updated_at': None } } fake_extra_specs = {u'gold': u'True'} fake_share_type_id = u'fooid-2' fake_type_w_extra = { 'test_with_extra': { 'created_at': datetime.datetime(2015, 1, 22, 11, 45, 31), 'deleted': '0', 'deleted_at': None, 'extra_specs': fake_extra_specs, 'required_extra_specs': {}, 'id': fake_share_type_id, 'name': u'test_with_extra', 'updated_at': None } } fake_type_update = { 'test_type_update': { 'created_at': datetime.datetime(2019, 9, 9, 14, 40, 31), 'deleted': '0', 'deleted_at': None, 'extra_specs': {u'gold': u'True'}, 'required_extra_specs': {}, 'id': '888', 'name': 'new_name', 'is_public': True, 'description': 'new_description', 'updated_at': None } } fake_r_extra_specs = { u'gold': u'True', u'driver_handles_share_servers': u'True' } fake_r_required_extra_specs = { u'driver_handles_share_servers': u'True' } fake_r_type_extra = { 'test_with_extra': { 'created_at': datetime.datetime(2015, 1, 22, 11, 45, 31), 'deleted': '0', 'deleted_at': None, 'extra_specs': fake_r_extra_specs, 'required_extra_specs': fake_r_required_extra_specs, 'id': fake_share_type_id, 'name': u'test_with_extra', 'updated_at': None } } fake_required_extra_specs = { constants.ExtraSpecs.DRIVER_HANDLES_SHARE_SERVERS: 'true', } fake_optional_extra_specs = { constants.ExtraSpecs.SNAPSHOT_SUPPORT: 'true', constants.ExtraSpecs.CREATE_SHARE_FROM_SNAPSHOT_SUPPORT: 'false', constants.ExtraSpecs.REVERT_TO_SNAPSHOT_SUPPORT: 'false', } fake_type_w_valid_extra = { 'test_with_extra': { 'created_at': datetime.datetime(2015, 1, 22, 11, 45, 31), 'deleted': '0', 'deleted_at': None, 'extra_specs': fake_required_extra_specs, 'required_extra_specs': fake_required_extra_specs, 'id': u'fooid-2', 'name': u'test_with_extra', 'updated_at': None } } fake_types = fake_type.copy() fake_types.update(fake_type_w_extra) fake_types.update(fake_type_w_valid_extra) fake_share = {'id': u'fooid-1', 'share_type_id': fake_share_type_id} def setUp(self): super(ShareTypesTestCase, self).setUp() self.context = context.get_admin_context() @ddt.data({}, fake_type, fake_type_w_extra, fake_types) def test_get_all_types(self, share_type): self.mock_object(db, 'share_type_get_all', mock.Mock(return_value=copy.deepcopy(share_type))) returned_type = share_types.get_all_types(self.context) self.assertEqual(sorted(share_type), sorted(returned_type)) def test_get_all_types_search(self): share_type = self.fake_type_w_extra search_filter = {'extra_specs': {'gold': 'True'}, 'is_public': True} self.mock_object(db, 'share_type_get_all', mock.Mock(return_value=share_type)) returned_type = share_types.get_all_types(self.context, search_opts=search_filter) db.share_type_get_all.assert_called_once_with( mock.ANY, 0, filters={'is_public': True}) self.assertEqual(sorted(share_type), sorted(returned_type)) search_filter = {'extra_specs': {'gold': 'False'}} expected_types = {} returned_types = share_types.get_all_types(self.context, search_opts=search_filter) self.assertEqual(expected_types, returned_types) share_type = self.fake_r_type_extra search_filter = {'extra_specs': {'gold': 'True'}} returned_type = share_types.get_all_types(self.context, search_opts=search_filter) self.assertEqual(sorted(share_type), sorted(returned_type)) @ddt.data("nova", "supernova,nova", "supernova", "nova,hypernova,supernova") def test_get_all_types_search_by_availability_zone(self, search_azs): all_share_types = { 'gold': { 'extra_specs': { 'somepoolcap': 'somevalue', 'availability_zones': 'nova,supernova,hypernova', }, 'required_extra_specs': { 'driver_handles_share_servers': True, }, 'id': '1e8f93a8-9669-4467-88a0-7b8229a9a609', 'name': u'gold-share-type', 'is_public': True, }, 'silver': { 'extra_specs': { 'somepoolcap': 'somevalue', 'availability_zones': 'nova,supernova', }, 'required_extra_specs': { 'driver_handles_share_servers': False, }, 'id': '39a7b9a8-8c76-4b49-aed3-60b718d54325', 'name': u'silver-share-type', 'is_public': True, }, 'bronze': { 'extra_specs': { 'somepoolcap': 'somevalue', 'availability_zones': 'milkyway,andromeda', }, 'required_extra_specs': { 'driver_handles_share_servers': True, }, 'id': '5a55a54d-6688-49b4-9344-bfc2d9634f70', 'name': u'bronze-share-type', 'is_public': True, }, 'default': { 'extra_specs': { 'somepoolcap': 'somevalue', }, 'required_extra_specs': { 'driver_handles_share_servers': True, }, 'id': '5a55a54d-6688-49b4-9344-bfc2d9634f70', 'name': u'bronze-share-type', 'is_public': True, } } self.mock_object( db, 'share_type_get_all', mock.Mock(return_value=all_share_types)) self.mock_object(share_types, 'get_valid_required_extra_specs') search_opts = { 'extra_specs': { 'somepoolcap': 'somevalue', 'availability_zones': search_azs }, 'is_public': True, } returned_types = share_types.get_all_types( self.context, search_opts=search_opts) db.share_type_get_all.assert_called_once_with( mock.ANY, 0, filters={'is_public': True}) expected_return_types = (['gold', 'silver', 'default'] if len(search_azs.split(',')) < 3 else ['gold', 'default']) self.assertEqual(sorted(expected_return_types), sorted(returned_types)) def test_get_share_type_extra_specs(self): share_type = self.fake_type_w_extra['test_with_extra'] self.mock_object(db, 'share_type_get', mock.Mock(return_value=share_type)) id = share_type['id'] extra_spec = share_types.get_share_type_extra_specs(id, key='gold') self.assertEqual(share_type['extra_specs']['gold'], extra_spec) extra_spec = share_types.get_share_type_extra_specs(id) self.assertEqual(share_type['extra_specs'], extra_spec) def test_get_extra_specs_from_share(self): expected = self.fake_extra_specs self.mock_object(share_types, 'get_share_type_extra_specs', mock.Mock(return_value=expected)) spec_value = share_types.get_extra_specs_from_share(self.fake_share) self.assertEqual(expected, spec_value) share_types.get_share_type_extra_specs.assert_called_once_with( self.fake_share_type_id) def test_update_share_type(self): expected = self.fake_type_update['test_type_update'] self.mock_object(db, 'share_type_update', mock.Mock(side_effect=return_share_type_update)) self.mock_object(db, 'share_type_get', mock.Mock(return_value=expected)) new_name = "new_name" new_description = "new_description" is_public = True self.assertRaises(exception.ShareTypeUpdateFailed, share_types.update, self.context, id='444', name=new_name, description=new_description, is_public=is_public) share_types.update(self.context, '888', new_name, new_description, is_public) st_update = share_types.get_share_type(self.context, '888') self.assertEqual(new_name, st_update['name']) self.assertEqual(new_description, st_update['description']) self.assertEqual(is_public, st_update['is_public']) @ddt.data({}, {"fake": "fake"}) def test_create_without_required_extra_spec(self, optional_specs): specs = copy.copy(self.fake_required_extra_specs) del specs['driver_handles_share_servers'] specs.update(optional_specs) self.assertRaises(exception.InvalidShareType, share_types.create, self.context, "fake_share_type", specs) @ddt.data({"snapshot_support": "fake"}) def test_create_with_invalid_optional_extra_spec(self, optional_specs): specs = copy.copy(self.fake_required_extra_specs) specs.update(optional_specs) self.assertRaises(exception.InvalidShareType, share_types.create, self.context, "fake_share_type", specs) def test_get_required_extra_specs(self): result = share_types.get_required_extra_specs() self.assertEqual(constants.ExtraSpecs.REQUIRED, result) def test_get_optional_extra_specs(self): result = share_types.get_optional_extra_specs() self.assertEqual(constants.ExtraSpecs.OPTIONAL, result) def test_get_tenant_visible_extra_specs(self): result = share_types.get_tenant_visible_extra_specs() self.assertEqual(constants.ExtraSpecs.TENANT_VISIBLE, result) def test_get_boolean_extra_specs(self): result = share_types.get_boolean_extra_specs() self.assertEqual(constants.ExtraSpecs.BOOLEAN, result) def test_is_valid_required_extra_spec_other(self): actual_result = share_types.is_valid_required_extra_spec( 'fake', 'fake') self.assertIsNone(actual_result) @ddt.data(*itertools.product( constants.ExtraSpecs.REQUIRED, strutils.TRUE_STRINGS + strutils.FALSE_STRINGS)) @ddt.unpack def test_is_valid_required_extra_spec_valid(self, key, value): actual_result = share_types.is_valid_required_extra_spec(key, value) self.assertTrue(actual_result) @ddt.data('invalid', {}, '0000000000') def test_is_valid_required_extra_spec_invalid(self, value): key = constants.ExtraSpecs.DRIVER_HANDLES_SHARE_SERVERS actual_result = share_types.is_valid_required_extra_spec(key, value) self.assertFalse(actual_result) @ddt.data({}, {'another_key': True}) def test_get_valid_required_extra_specs_valid(self, optional_specs): specs = copy.copy(self.fake_required_extra_specs) specs.update(optional_specs) actual_result = share_types.get_valid_required_extra_specs(specs) self.assertEqual(self.fake_required_extra_specs, actual_result) @ddt.data(None, {}, {constants.ExtraSpecs.DRIVER_HANDLES_SHARE_SERVERS: 'fake'}) def test_get_valid_required_extra_specs_invalid(self, extra_specs): self.assertRaises(exception.InvalidExtraSpec, share_types.get_valid_required_extra_specs, extra_specs) @ddt.data(*( list(itertools.product( (constants.ExtraSpecs.SNAPSHOT_SUPPORT, constants.ExtraSpecs.CREATE_SHARE_FROM_SNAPSHOT_SUPPORT, constants.ExtraSpecs.REVERT_TO_SNAPSHOT_SUPPORT, constants.ExtraSpecs.MOUNT_SNAPSHOT_SUPPORT, constants.ExtraSpecs.MOUNT_POINT_NAME_SUPPORT), strutils.TRUE_STRINGS + strutils.FALSE_STRINGS)) + list(itertools.product( (constants.ExtraSpecs.REPLICATION_TYPE_SPEC,), constants.ExtraSpecs.REPLICATION_TYPES)) + list(itertools.product( (constants.ExtraSpecs.ENCRYPTION_SUPPORT,), constants.ExtraSpecs.ENCRYPTION_TYPES)) + [(constants.ExtraSpecs.AVAILABILITY_ZONES, 'zone a, zoneb$c'), (constants.ExtraSpecs.AVAILABILITY_ZONES, ' zonea, zoneb'), (constants.ExtraSpecs.AVAILABILITY_ZONES, 'zone1')] + [(constants.ExtraSpecs.PROVISIONING_MOUNT_POINT_PREFIX, 'gold'), (constants.ExtraSpecs.PROVISIONING_MOUNT_POINT_PREFIX, 'silver'), (constants.ExtraSpecs.PROVISIONING_MOUNT_POINT_PREFIX, 'bronze')] + [(constants.ExtraSpecs.ENCRYPTION_SUPPORT, 'share'), (constants.ExtraSpecs.ENCRYPTION_SUPPORT, 'share_server')] )) @ddt.unpack def test_is_valid_optional_extra_spec_valid(self, key, value): result = share_types.is_valid_optional_extra_spec(key, value) self.assertTrue(result) def test_valid_string(self): self.assertTrue(share_types.is_valid_string("This is a valid string")) def test_empty_string(self): self.assertFalse(share_types.is_valid_string("")) def test_string_too_long(self): self.assertFalse(share_types.is_valid_string("a" * 256)) def test_non_string_input(self): self.assertFalse(share_types.is_valid_string(123)) def test_is_valid_optional_extra_spec_valid_unknown_key(self): result = share_types.is_valid_optional_extra_spec('fake', 'fake') self.assertIsNone(result) def test_get_valid_optional_extra_specs(self): extra_specs = copy.copy(self.fake_required_extra_specs) extra_specs.update(self.fake_optional_extra_specs) extra_specs.update({'fake': 'fake'}) result = share_types.get_valid_optional_extra_specs(extra_specs) self.assertEqual(self.fake_optional_extra_specs, result) def test_get_valid_optional_extra_specs_empty(self): result = share_types.get_valid_optional_extra_specs({}) self.assertEqual({}, result) @ddt.data({constants.ExtraSpecs.SNAPSHOT_SUPPORT: 'fake'}, {constants.ExtraSpecs.AVAILABILITY_ZONES: 'ZoneA,'}) def test_get_valid_optional_extra_specs_invalid(self, extra_specs): self.assertRaises(exception.InvalidExtraSpec, share_types.get_valid_optional_extra_specs, extra_specs) @ddt.data(' az 1, az2 ,az 3 ', 'az 1,az2,az 3 ', None) def test_sanitize_extra_specs(self, spec_value): extra_specs = { constants.ExtraSpecs.DRIVER_HANDLES_SHARE_SERVERS: 'True', constants.ExtraSpecs.SNAPSHOT_SUPPORT: 'True', constants.ExtraSpecs.CREATE_SHARE_FROM_SNAPSHOT_SUPPORT: 'False' } expected_specs = copy.copy(extra_specs) if spec_value is not None: extra_specs[constants.ExtraSpecs.AVAILABILITY_ZONES] = spec_value expected_specs['availability_zones'] = 'az 1,az2,az 3' self.assertDictEqual(expected_specs, share_types.sanitize_extra_specs(extra_specs)) def test_add_access(self): project_id = '456' extra_specs = { constants.ExtraSpecs.DRIVER_HANDLES_SHARE_SERVERS: 'true', constants.ExtraSpecs.SNAPSHOT_SUPPORT: 'true', constants.ExtraSpecs.CREATE_SHARE_FROM_SNAPSHOT_SUPPORT: 'false', } share_type = share_types.create(self.context, 'type1', extra_specs) share_type_id = share_type.get('id') share_types.add_share_type_access(self.context, share_type_id, project_id) stype_access = db.share_type_access_get_all(self.context, share_type_id) self.assertIn(project_id, [a.project_id for a in stype_access]) def test_add_access_invalid(self): self.assertRaises(exception.InvalidShareType, share_types.add_share_type_access, 'fake', None, 'fake') def test_remove_access(self): project_id = '456' extra_specs = { constants.ExtraSpecs.DRIVER_HANDLES_SHARE_SERVERS: 'true', constants.ExtraSpecs.SNAPSHOT_SUPPORT: 'true', constants.ExtraSpecs.CREATE_SHARE_FROM_SNAPSHOT_SUPPORT: 'false', } share_type = share_types.create( self.context, 'type1', projects=['456'], extra_specs=extra_specs) share_type_id = share_type.get('id') share_types.remove_share_type_access(self.context, share_type_id, project_id) stype_access = db.share_type_access_get_all(self.context, share_type_id) self.assertNotIn(project_id, stype_access) def test_remove_access_invalid(self): self.assertRaises(exception.InvalidShareType, share_types.remove_share_type_access, 'fake', None, 'fake') @ddt.data({'spec_value': ' True', 'expected': True}, {'spec_value': 'true', 'expected': True}, {'spec_value': ' False', 'expected': False}, {'spec_value': 'false', 'expected': False}, {'spec_value': u' FaLsE ', 'expected': False}) @ddt.unpack def test_parse_boolean_extra_spec(self, spec_value, expected): result = share_types.parse_boolean_extra_spec('fake_key', spec_value) self.assertEqual(expected, result) @ddt.data(' True', ' Wrong', None, 5) def test_parse_boolean_extra_spec_invalid(self, spec_value): self.assertRaises(exception.InvalidExtraSpec, share_types.parse_boolean_extra_spec, 'fake_key', spec_value) def test_provision_filter_on_size(self): share_types.create(self.context, "type1", extra_specs={ "key1": "val1", "key2": "val2", "driver_handles_share_servers": False}) share_types.create(self.context, "type2", extra_specs={ share_types.MIN_SIZE_KEY: "12", "key3": "val3", "driver_handles_share_servers": False}) share_types.create(self.context, "type3", extra_specs={ share_types.MAX_SIZE_KEY: "99", "key4": "val4", "driver_handles_share_servers": False}) share_types.create(self.context, "type4", extra_specs={ share_types.MIN_SIZE_KEY: "24", share_types.MAX_SIZE_KEY: "99", "key4": "val4", "driver_handles_share_servers": False}) share_types.create(self.context, "type5", extra_specs={ share_types.MAX_SIZE_KEY: "95", share_types.MAX_EXTEND_SIZE_KEY: "99", "key4": "val4", "driver_handles_share_servers": False}) # Make sure we don't raise if there are no min/max set type1 = share_types.get_share_type_by_name(self.context, 'type1') share_types.provision_filter_on_size(self.context, type1, "11") # verify minimum size requirements type2 = share_types.get_share_type_by_name(self.context, 'type2') self.assertRaises(exception.InvalidInput, share_types.provision_filter_on_size, self.context, type2, "11") share_types.provision_filter_on_size(self.context, type2, "12") share_types.provision_filter_on_size(self.context, type2, "100") # verify max size requirements type3 = share_types.get_share_type_by_name(self.context, 'type3') self.assertRaises(exception.InvalidInput, share_types.provision_filter_on_size, self.context, type3, "100") share_types.provision_filter_on_size(self.context, type3, "99") share_types.provision_filter_on_size(self.context, type3, "1") # verify min and max type4 = share_types.get_share_type_by_name(self.context, 'type4') self.assertRaises(exception.InvalidInput, share_types.provision_filter_on_size, self.context, type4, "20") self.assertRaises(exception.InvalidInput, share_types.provision_filter_on_size, self.context, type4, "100") share_types.provision_filter_on_size(self.context, type4, "24") share_types.provision_filter_on_size(self.context, type4, "99") share_types.provision_filter_on_size(self.context, type4, "30") # verify max extend size requirements type5 = share_types.get_share_type_by_name(self.context, 'type5') self.assertRaises(exception.InvalidInput, share_types.provision_filter_on_size, self.context, type5, "100", operation="extend") share_types.provision_filter_on_size(self.context, type5, "99", operation="admin-extend") @ddt.data(True, False) def test__revert_allocated_share_type_quotas_during_migration( self, failed_on_reservation): fake_type_id = 'fake_1' extra_specs = {'replication_type': 'readable'} source_instance = db_utils.create_share() dest_instance = db_utils.create_share( share_type_id=fake_type_id) share_type = { 'name': 'fake_share_type', 'extra_specs': extra_specs, 'is_public': True, 'id': 'fake_type_id' } expected_deltas = { 'project_id': dest_instance['project_id'], 'user_id': dest_instance['user_id'], 'share_replicas': -1, 'replica_gigabytes': -dest_instance['size'], 'share_type_id': share_type['id'], 'shares': -1, 'gigabytes': -dest_instance['size'], } reservations = 'reservations' reservation_action = ( mock.Mock(side_effect=exception.ManilaException(message='fake')) if failed_on_reservation else mock.Mock(return_value=reservations)) mock_type_get = self.mock_object( share_types, 'get_share_type', mock.Mock(return_value=share_type)) mock_reserve = self.mock_object( quota.QUOTAS, 'reserve', reservation_action) mock_commit = self.mock_object(quota.QUOTAS, 'commit') mock_log = self.mock_object(share_types.LOG, 'exception') share_types.revert_allocated_share_type_quotas_during_migration( self.context, source_instance, share_type['id'], dest_instance) if not failed_on_reservation: mock_commit.assert_called_once_with( self.context, reservations, project_id=dest_instance['project_id'], user_id=dest_instance['user_id'], share_type_id=share_type['id']) else: mock_log.assert_called_once() mock_type_get.assert_called_once_with( self.context, share_type['id']) mock_reserve.assert_called_once_with( self.context, **expected_deltas) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/test_share_utils.py0000664000175000017500000002327700000000000022531 0ustar00zuulzuul00000000000000# Copyright 2011 OpenStack Foundation # Copyright (c) 2015 Rushil Chugh # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests For miscellaneous util methods used with share.""" from unittest import mock import ddt from manila.common import constants from manila.share import utils as share_utils from manila import test @ddt.ddt class ShareUtilsTestCase(test.TestCase): def test_extract_host_without_pool(self): host = 'Host@Backend' self.assertEqual( 'Host@Backend', share_utils.extract_host(host)) def test_extract_host_only_return_host(self): host = 'Host@Backend' self.assertEqual( 'Host', share_utils.extract_host(host, 'host')) def test_extract_host_only_return_pool(self): host = 'Host@Backend' self.assertIsNone( share_utils.extract_host(host, 'pool')) def test_extract_host_only_return_backend(self): host = 'Host@Backend' self.assertEqual( 'Host@Backend', share_utils.extract_host(host, 'backend')) def test_extract_host_missing_backend_and_pool(self): host = 'Host' # Default level is 'backend' self.assertEqual( 'Host', share_utils.extract_host(host)) def test_extract_host_only_return_backend_name(self): host = 'Host@Backend#Pool' self.assertEqual( 'Backend', share_utils.extract_host(host, 'backend_name')) def test_extract_host_only_return_backend_name_index_error(self): host = 'Host#Pool' self.assertRaises(IndexError, share_utils.extract_host, host, 'backend_name') def test_extract_host_missing_backend(self): host = 'Host#Pool' self.assertEqual( 'Host', share_utils.extract_host(host)) self.assertEqual( 'Host', share_utils.extract_host(host, 'host')) def test_extract_host_missing_backend_only_return_backend(self): host = 'Host#Pool' self.assertEqual( 'Host', share_utils.extract_host(host, 'backend')) def test_extract_host_missing_backend_only_return_pool(self): host = 'Host#Pool' self.assertEqual( 'Pool', share_utils.extract_host(host, 'pool')) self.assertEqual( 'Pool', share_utils.extract_host(host, 'pool', True)) def test_extract_host_missing_pool(self): host = 'Host@Backend' self.assertIsNone( share_utils.extract_host(host, 'pool')) def test_extract_host_missing_pool_use_default_pool(self): host = 'Host@Backend' self.assertEqual( '_pool0', share_utils.extract_host(host, 'pool', True)) def test_extract_host_with_default_pool(self): host = 'Host' # Default_pool_name doesn't work for level other than 'pool' self.assertEqual( 'Host', share_utils.extract_host(host, 'host', True)) self.assertEqual( 'Host', share_utils.extract_host(host, 'host', False)) self.assertEqual( 'Host', share_utils.extract_host(host, 'backend', True)) self.assertEqual( 'Host', share_utils.extract_host(host, 'backend', False)) def test_extract_host_with_pool(self): host = 'Host@Backend#Pool' self.assertEqual( 'Host@Backend', share_utils.extract_host(host)) self.assertEqual( 'Host', share_utils.extract_host(host, 'host')) self.assertEqual( 'Host@Backend', share_utils.extract_host(host, 'backend'),) self.assertEqual( 'Pool', share_utils.extract_host(host, 'pool')) self.assertEqual( 'Pool', share_utils.extract_host(host, 'pool', True)) def test_append_host_with_host_and_pool(self): host = 'Host' pool = 'Pool' expected = 'Host#Pool' self.assertEqual(expected, share_utils.append_host(host, pool)) def test_append_host_with_host(self): host = 'Host' pool = None expected = 'Host' self.assertEqual(expected, share_utils.append_host(host, pool)) def test_append_host_with_pool(self): host = None pool = 'pool' expected = None self.assertEqual(expected, share_utils.append_host(host, pool)) def test_append_host_with_no_values(self): host = None pool = None expected = None self.assertEqual(expected, share_utils.append_host(host, pool)) def test_get_active_replica_success(self): replica_list = [{'id': '123456', 'replica_state': constants.REPLICA_STATE_IN_SYNC}, {'id': '654321', 'replica_state': constants.REPLICA_STATE_ACTIVE}, ] replica = share_utils.get_active_replica(replica_list) self.assertEqual('654321', replica['id']) def test_get_active_replica_not_exist(self): replica_list = [{'id': '123456', 'replica_state': constants.REPLICA_STATE_IN_SYNC}, {'id': '654321', 'replica_state': constants.REPLICA_STATE_OUT_OF_SYNC}, ] replica = share_utils.get_active_replica(replica_list) self.assertIsNone(replica) @ddt.data( {'fake_subnet': [{'neutron_net_id': 'fake_nn_id', 'neutron_subnet_id': 'fake_nsb_id'}], 'fake_new_subnet': [{'neutron_net_id': 'fake_nn_id', 'neutron_subnet_id': 'fake_nsb_id'}], 'is_compatible': True}, {'fake_subnet': [{'neutron_net_id': 'fake_nn_id', 'neutron_subnet_id': 'fake_nsb_id'}], 'fake_new_subnet': [{'neutron_net_id': 'fake_nn_id', 'neutron_subnet_id': 'fake_nsb_id2'}], 'is_compatible': False}, {'fake_subnet': [{'neutron_net_id': 'fake_nn_id', 'neutron_subnet_id': 'fake_nsb_id'}, {'neutron_net_id': 'fake_nn_id2', 'neutron_subnet_id': 'fake_nsb_id2'}], 'fake_new_subnet': [{'neutron_net_id': 'fake_nn_id', 'neutron_subnet_id': 'fake_nsb_id'}], 'is_compatible': False} ) @ddt.unpack def test_is_az_subnets_compatible(self, fake_subnet, fake_new_subnet, is_compatible): expected_result = is_compatible result = share_utils.is_az_subnets_compatible(fake_subnet, fake_new_subnet) self.assertEqual(expected_result, result) class NotifyUsageTestCase(test.TestCase): @mock.patch('manila.share.utils._usage_from_share') @mock.patch('manila.share.utils.CONF') @mock.patch('manila.share.utils.rpc') def test_notify_about_share_usage(self, mock_rpc, mock_conf, mock_usage): mock_conf.host = 'host1' output = share_utils.notify_about_share_usage(mock.sentinel.context, mock.sentinel.share, mock.sentinel. share_instance, 'test_suffix') self.assertIsNone(output) mock_usage.assert_called_once_with(mock.sentinel.share, mock.sentinel.share_instance) mock_rpc.get_notifier.assert_called_once_with('share', 'host1') mock_rpc.get_notifier.return_value.info.assert_called_once_with( mock.sentinel.context, 'share.test_suffix', mock_usage.return_value) @mock.patch('manila.share.utils._usage_from_share') @mock.patch('manila.share.utils.CONF') @mock.patch('manila.share.utils.rpc') def test_notify_about_share_usage_with_kwargs(self, mock_rpc, mock_conf, mock_usage): mock_conf.host = 'host1' output = share_utils.notify_about_share_usage(mock.sentinel.context, mock.sentinel.share, mock.sentinel. share_instance, 'test_suffix', extra_usage_info={ 'a': 'b', 'c': 'd'}, host='host2') self.assertIsNone(output) mock_usage.assert_called_once_with(mock.sentinel.share, mock.sentinel.share_instance, a='b', c='d') mock_rpc.get_notifier.assert_called_once_with('share', 'host2') mock_rpc.get_notifier.return_value.info.assert_called_once_with( mock.sentinel.context, 'share.test_suffix', mock_usage.return_value) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share/test_snapshot_access.py0000664000175000017500000001557200000000000023366 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Hitachi Data Systems, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from unittest import mock import ddt from manila.common import constants from manila import context from manila import db from manila import exception from manila.share import snapshot_access from manila import test from manila.tests import db_utils from manila import utils @ddt.ddt class SnapshotAccessTestCase(test.TestCase): def setUp(self): super(SnapshotAccessTestCase, self).setUp() self.driver = self.mock_class("manila.share.driver.ShareDriver", mock.Mock()) self.snapshot_access = snapshot_access.ShareSnapshotInstanceAccess( db, self.driver) self.context = context.get_admin_context() share = db_utils.create_share() self.snapshot = db_utils.create_snapshot(share_id=share['id']) self.snapshot_instance = db_utils.create_snapshot_instance( snapshot_id=self.snapshot['id'], share_instance_id=self.snapshot['share']['instance']['id']) @ddt.data(constants.ACCESS_STATE_QUEUED_TO_APPLY, constants.ACCESS_STATE_QUEUED_TO_DENY) def test_update_access_rules(self, state): rules = [] for i in range(2): rules.append({ 'id': 'id-%s' % i, 'state': state, 'access_id': 'rule_id%s' % i }) all_rules = copy.deepcopy(rules) all_rules.append({ 'id': 'id-3', 'state': constants.ACCESS_STATE_ERROR, 'access_id': 'rule_id3' }) snapshot_instance_get = self.mock_object( db, 'share_snapshot_instance_get', mock.Mock(return_value=self.snapshot_instance)) snap_get_all_for_snap_instance = self.mock_object( db, 'share_snapshot_access_get_all_for_snapshot_instance', mock.Mock(return_value=all_rules)) self.mock_object(db, 'share_snapshot_instance_access_update') self.mock_object(self.driver, 'snapshot_update_access') self.mock_object(self.snapshot_access, '_check_needs_refresh', mock.Mock(return_value=False)) self.mock_object(db, 'share_snapshot_instance_access_delete') self.snapshot_access.update_access_rules(self.context, self.snapshot_instance['id']) snapshot_instance_get.assert_called_once_with( utils.IsAMatcher(context.RequestContext), self.snapshot_instance['id'], with_share_data=True) snap_get_all_for_snap_instance.assert_called_once_with( utils.IsAMatcher(context.RequestContext), self.snapshot_instance['id']) if state == constants.ACCESS_STATE_QUEUED_TO_APPLY: self.driver.snapshot_update_access.assert_called_once_with( utils.IsAMatcher(context.RequestContext), self.snapshot_instance, rules, add_rules=rules, delete_rules=[], share_server=None) else: self.driver.snapshot_update_access.assert_called_once_with( utils.IsAMatcher(context.RequestContext), self.snapshot_instance, [], add_rules=[], delete_rules=rules, share_server=None) def test_update_access_rules_delete_all_rules(self): rules = [] for i in range(2): rules.append({ 'id': 'id-%s' % i, 'state': constants.ACCESS_STATE_QUEUED_TO_DENY, 'access_id': 'rule_id%s' % i }) snapshot_instance_get = self.mock_object( db, 'share_snapshot_instance_get', mock.Mock(return_value=self.snapshot_instance)) snap_get_all_for_snap_instance = self.mock_object( db, 'share_snapshot_access_get_all_for_snapshot_instance', mock.Mock(side_effect=[rules, []])) self.mock_object(db, 'share_snapshot_instance_access_update') self.mock_object(self.driver, 'snapshot_update_access') self.mock_object(db, 'share_snapshot_instance_access_delete') self.snapshot_access.update_access_rules(self.context, self.snapshot_instance['id'], delete_all_rules=True) snapshot_instance_get.assert_called_once_with( utils.IsAMatcher(context.RequestContext), self.snapshot_instance['id'], with_share_data=True) snap_get_all_for_snap_instance.assert_called_with( utils.IsAMatcher(context.RequestContext), self.snapshot_instance['id']) self.driver.snapshot_update_access.assert_called_with( utils.IsAMatcher(context.RequestContext), self.snapshot_instance, [], add_rules=[], delete_rules=rules, share_server=None) def test_update_access_rules_exception(self): rules = [] for i in range(2): rules.append({ 'id': 'id-%s' % i, 'state': constants.ACCESS_STATE_APPLYING, 'access_id': 'rule_id%s' % i }) snapshot_instance_get = self.mock_object( db, 'share_snapshot_instance_get', mock.Mock(return_value=self.snapshot_instance)) snap_get_all_for_snap_instance = self.mock_object( db, 'share_snapshot_access_get_all_for_snapshot_instance', mock.Mock(return_value=rules)) self.mock_object(db, 'share_snapshot_instance_access_update') self.mock_object(self.driver, 'snapshot_update_access', mock.Mock(side_effect=exception.NotFound)) self.assertRaises(exception.NotFound, self.snapshot_access.update_access_rules, self.context, self.snapshot_instance['id']) snapshot_instance_get.assert_called_once_with( utils.IsAMatcher(context.RequestContext), self.snapshot_instance['id'], with_share_data=True) snap_get_all_for_snap_instance.assert_called_once_with( utils.IsAMatcher(context.RequestContext), self.snapshot_instance['id']) self.driver.snapshot_update_access.assert_called_once_with( utils.IsAMatcher(context.RequestContext), self.snapshot_instance, rules, add_rules=rules, delete_rules=[], share_server=None) ././@PaxHeader0000000000000000000000000000003200000000000011450 xustar000000000000000026 mtime=1759315602.04167 manila-21.0.0/manila/tests/share_group/0000775000175000017500000000000000000000000017777 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share_group/__init__.py0000664000175000017500000000000000000000000022076 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share_group/test_api.py0000664000175000017500000020726500000000000022175 0ustar00zuulzuul00000000000000# Copyright 2016 Alex Meade # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Unit tests for the Share API module.""" import copy import datetime from unittest import mock import ddt from oslo_config import cfg from oslo_utils import timeutils from webob import exc as webob_exc from manila.common import constants from manila import context from manila import db as db_driver from manila import exception from manila.share import share_types from manila.share_group import api as share_group_api from manila import test from manila.tests.api.contrib import stubs from manila.tests import utils as test_utils CONF = cfg.CONF def fake_share_group(id, **kwargs): share_group = { 'id': id, 'user_id': 'fakeuser', 'project_id': 'fakeproject', 'status': constants.STATUS_CREATING, 'name': None, 'description': None, 'host': None, 'availability_zone_id': None, 'share_group_type_id': None, 'source_share_group_snapshot_id': None, 'share_network_id': None, 'share_server_id': None, 'share_types': mock.ANY, 'created_at': datetime.datetime(1, 1, 1, 1, 1, 1), } if 'source_share_group_snapshot_id' in kwargs: share_group['share_network_id'] = 'fake_share_network_id' share_group['share_server_id'] = 'fake_share_server_id' share_group.update(kwargs) return share_group def fake_share_group_snapshot(id, **kwargs): snap = { 'id': id, 'user_id': 'fakeuser', 'project_id': 'fakeproject', 'status': constants.STATUS_CREATING, 'name': None, 'description': None, 'share_group_id': None, 'created_at': datetime.datetime(1, 1, 1, 1, 1, 1), } snap.update(kwargs) return snap @ddt.ddt class ShareGroupsAPITestCase(test.TestCase): def setUp(self): super(ShareGroupsAPITestCase, self).setUp() self.user_id = 'fake_user_id' self.project_id = 'fake_project_id' self.context = context.RequestContext( user_id=self.user_id, project_id=self.project_id, is_admin=True) self.scheduler_rpcapi = mock.Mock() self.share_rpcapi = mock.Mock() self.share_api = mock.Mock() self.api = share_group_api.API() self.mock_object(self.api, 'share_rpcapi', self.share_rpcapi) self.mock_object(self.api, 'share_api', self.share_api) self.mock_object(self.api, 'scheduler_rpcapi', self.scheduler_rpcapi) dt_utc = timeutils.utcnow() self.mock_object(timeutils, 'utcnow', mock.Mock(return_value=dt_utc)) self.fake_share_type = { 'name': 'default', 'extra_specs': {'driver_handles_share_servers': 'False'}, 'is_public': True, 'id': 'c01990c1-448f-435a-9de6-c7c894bb6df9' } self.fake_share_type_2 = { 'name': 'default2', 'extra_specs': {'driver_handles_share_servers': 'False'}, 'is_public': True, 'id': 'c01990c1-448f-435a-9de6-c7c894bb7dfd' } self.fake_share_group_type = { 'share_types': [ {'share_type_id': self.fake_share_type['id']}, {'share_type_id': self.fake_share_type_2['id']}, ] } self.mock_object(share_types, 'get_share_type', mock.Mock(return_value=self.fake_share_type)) self.mock_object( db_driver, 'share_group_type_get', mock.Mock(return_value=self.fake_share_group_type)) self.mock_object(share_group_api.QUOTAS, 'reserve') self.mock_object(share_group_api.QUOTAS, 'commit') self.mock_object(share_group_api.QUOTAS, 'rollback') def test_create_empty_request(self): share_group = fake_share_group( 'fakeid', user_id=self.context.user_id, project_id=self.context.project_id, status=constants.STATUS_CREATING) expected_values = share_group.copy() for name in ('id', 'host', 'created_at'): expected_values.pop(name, None) self.mock_object(db_driver, 'share_group_create', mock.Mock(return_value=share_group)) self.api.create(self.context) db_driver.share_group_create.assert_called_once_with( self.context, expected_values) share_group_api.QUOTAS.reserve.assert_called_once_with( self.context, share_groups=1) share_group_api.QUOTAS.commit.assert_called_once_with( self.context, share_group_api.QUOTAS.reserve.return_value) share_group_api.QUOTAS.rollback.assert_not_called() def test_create_request_spec(self): """Ensure the correct values are sent to the scheduler.""" share_group = fake_share_group( 'fakeid', user_id=self.context.user_id, project_id=self.context.project_id, status=constants.STATUS_CREATING) expected_values = share_group.copy() for name in ('id', 'host', 'created_at'): expected_values.pop(name, None) expected_request_spec = {'share_group_id': share_group['id']} expected_request_spec.update(share_group) expected_request_spec['availability_zones'] = set([]) del expected_request_spec['id'] del expected_request_spec['created_at'] del expected_request_spec['host'] expected_request_spec['resource_type'] = self.fake_share_group_type self.mock_object(db_driver, 'share_group_create', mock.Mock(return_value=share_group)) self.api.create(self.context) self.scheduler_rpcapi.create_share_group.assert_called_once_with( self.context, share_group_id=share_group['id'], request_spec=expected_request_spec, filter_properties={}) share_group_api.QUOTAS.reserve.assert_called_once_with( self.context, share_groups=1) share_group_api.QUOTAS.commit.assert_called_once_with( self.context, share_group_api.QUOTAS.reserve.return_value) share_group_api.QUOTAS.rollback.assert_not_called() def test_create_with_name(self): fake_name = 'fake_name' share_group = fake_share_group( 'fakeid', user_id=self.context.user_id, project_id=self.context.project_id, status=constants.STATUS_CREATING) expected_values = share_group.copy() for name in ('id', 'host', 'created_at'): expected_values.pop(name, None) expected_values['name'] = fake_name self.mock_object(db_driver, 'share_group_create', mock.Mock(return_value=share_group)) self.mock_object(db_driver, 'share_network_get') self.api.create(self.context, name=fake_name) db_driver.share_group_create.assert_called_once_with( self.context, expected_values) self.scheduler_rpcapi.create_share_group.assert_called_once_with( self.context, share_group_id=share_group['id'], request_spec=mock.ANY, filter_properties={}) share_group_api.QUOTAS.reserve.assert_called_once_with( self.context, share_groups=1) share_group_api.QUOTAS.commit.assert_called_once_with( self.context, share_group_api.QUOTAS.reserve.return_value) share_group_api.QUOTAS.rollback.assert_not_called() def test_create_with_description(self): fake_desc = 'fake_desc' share_group = fake_share_group( 'fakeid', user_id=self.context.user_id, project_id=self.context.project_id, status=constants.STATUS_CREATING) expected_values = share_group.copy() for name in ('id', 'host', 'created_at'): expected_values.pop(name, None) expected_values['description'] = fake_desc self.mock_object(db_driver, 'share_group_create', mock.Mock(return_value=share_group)) self.api.create(self.context, description=fake_desc) db_driver.share_group_create.assert_called_once_with( self.context, expected_values) share_group_api.QUOTAS.reserve.assert_called_once_with( self.context, share_groups=1) share_group_api.QUOTAS.commit.assert_called_once_with( self.context, share_group_api.QUOTAS.reserve.return_value) share_group_api.QUOTAS.rollback.assert_not_called() @ddt.data(True, False) def test_create_with_multiple_share_types_with_az(self, with_az): share_type_1 = copy.deepcopy(self.fake_share_type) share_type_2 = copy.deepcopy(self.fake_share_type_2) share_type_1['extra_specs']['availability_zones'] = 'nova,supernova' share_type_2['extra_specs']['availability_zones'] = 'nova' fake_share_types = [share_type_1, share_type_2] fake_share_type_ids = [x['id'] for x in fake_share_types] share_group_type = { 'share_types': [ {'share_type_id': share_type_1['id']}, {'share_type_id': share_type_2['id']}, {'share_type_id': self.fake_share_type['id']}, ] } self.mock_object( db_driver, 'share_group_type_get', mock.Mock(return_value=share_group_type)) self.mock_object(share_types, 'get_share_type', mock.Mock( side_effect=[share_type_1, share_type_2, share_type_1, share_type_2, self.fake_share_type])) share_group = fake_share_group( 'fakeid', user_id=self.context.user_id, project_id=self.context.project_id, status=constants.STATUS_CREATING, availability_zone_id=('e030620e-892c-4ff4-8764-9f3f2b560bd1' if with_az else None) ) expected_values = share_group.copy() for name in ('id', 'host', 'created_at'): expected_values.pop(name, None) expected_values['share_types'] = fake_share_type_ids self.mock_object( db_driver, 'share_group_create', mock.Mock(return_value=share_group)) self.mock_object(db_driver, 'share_network_get') az_kwargs = { 'availability_zone': 'nova', 'availability_zone_id': share_group['availability_zone_id'], } kwargs = {} if not with_az else az_kwargs self.api.create(self.context, share_type_ids=fake_share_type_ids, **kwargs) scheduler_request_spec = ( self.scheduler_rpcapi.create_share_group.call_args_list[ 0][1]['request_spec'] ) az_id = az_kwargs['availability_zone_id'] if with_az else None self.assertEqual({'nova', 'supernova'}, scheduler_request_spec['availability_zones']) self.assertEqual(az_id, scheduler_request_spec['availability_zone_id']) db_driver.share_group_create.assert_called_once_with( self.context, expected_values) share_group_api.QUOTAS.reserve.assert_called_once_with( self.context, share_groups=1) share_group_api.QUOTAS.commit.assert_called_once_with( self.context, share_group_api.QUOTAS.reserve.return_value) share_group_api.QUOTAS.rollback.assert_not_called() @ddt.data( test_utils.annotated('specified_stypes_one_unsupported_in_AZ', (True, True)), test_utils.annotated('specified_stypes_all_unsupported_in_AZ', (True, False)), test_utils.annotated('group_type_stypes_one_unsupported_in_AZ', (False, True)), test_utils.annotated('group_type_stypes_all_unsupported_in_AZ', (False, False))) @ddt.unpack def test_create_unsupported_az(self, specify_stypes, all_unsupported): share_type_1 = copy.deepcopy(self.fake_share_type) share_type_2 = copy.deepcopy(self.fake_share_type_2) share_type_1['extra_specs']['availability_zones'] = 'nova,supernova' share_type_2['extra_specs']['availability_zones'] = ( 'nova' if all_unsupported else 'nova,hypernova' ) share_group_type = { 'share_types': [ {'share_type_id': share_type_1['id'], }, {'share_type_id': share_type_2['id']}, ] } share_group = fake_share_group( 'fakeid', user_id=self.context.user_id, project_id=self.context.project_id, status=constants.STATUS_CREATING, availability_zone_id='e030620e-892c-4ff4-8764-9f3f2b560bd1') self.mock_object( db_driver, 'share_group_create', mock.Mock(return_value=share_group)) self.mock_object(db_driver, 'share_network_get') self.mock_object( db_driver, 'share_group_type_get', mock.Mock(return_value=share_group_type)) self.mock_object(share_types, 'get_share_type', mock.Mock(side_effect=[share_type_1, share_type_1] * 2)) self.mock_object(db_driver, 'share_group_snapshot_get') kwargs = { 'availability_zone': 'hypernova', 'availability_zone_id': share_group['availability_zone_id'], } if specify_stypes: kwargs['share_type_ids'] = [share_type_1['id'], share_type_2['id']] self.assertRaises( exception.InvalidInput, self.api.create, self.context, **kwargs) db_driver.share_group_snapshot_get.assert_not_called() db_driver.share_network_get.assert_not_called() def test_create_with_share_type_not_found(self): self.mock_object(share_types, 'get_share_type', mock.Mock(side_effect=exception.ShareTypeNotFound( share_type_id=self.fake_share_type['id']))) share_group = fake_share_group( 'fakeid', user_id=self.context.user_id, project_id=self.context.project_id, status=constants.STATUS_CREATING) expected_values = share_group.copy() for name in ('id', 'host', 'created_at'): expected_values.pop(name, None) expected_values['share_types'] = self.fake_share_type['id'] self.mock_object(db_driver, 'share_group_create', mock.Mock(return_value=share_group)) self.assertRaises( exception.InvalidInput, self.api.create, self.context, share_type_ids=[self.fake_share_type['id']]) share_group_api.QUOTAS.reserve.assert_not_called() share_group_api.QUOTAS.commit.assert_not_called() share_group_api.QUOTAS.rollback.assert_not_called() def test_create_with_error_on_quota_reserve(self): overs = ["share_groups"] usages = {"share_groups": {"reserved": 1, "in_use": 3, "limit": 4}} quotas = {"share_groups": 5} share_group_api.QUOTAS.reserve.side_effect = exception.OverQuota( overs=overs, usages=usages, quotas=quotas, ) self.mock_object(share_group_api.LOG, "warning") self.assertRaises( exception.ShareGroupsLimitExceeded, self.api.create, self.context) share_group_api.QUOTAS.reserve.assert_called_once_with( self.context, share_groups=1) share_group_api.QUOTAS.commit.assert_not_called() share_group_api.QUOTAS.rollback.assert_not_called() share_group_api.LOG.warning.assert_called_once_with(mock.ANY, mock.ANY) def test_create_driver_handles_share_servers_is_false_with_net_id(self): fake_share_types = [self.fake_share_type] self.mock_object(share_types, 'get_share_type') self.assertRaises(exception.InvalidInput, self.api.create, self.context, share_type_ids=fake_share_types, share_network_id="fake_share_network") def test_create_with_conflicting_share_types(self): fake_share_type = { 'name': 'default', 'extra_specs': {'driver_handles_share_servers': 'True'}, 'is_public': True, 'id': 'c01990c1-448f-435a-9de6-c7c894bb6df9', } fake_share_type_2 = { 'name': 'default2', 'extra_specs': {'driver_handles_share_servers': 'False'}, 'is_public': True, 'id': 'c01990c1-448f-435a-9de6-c7c894bb7df9', } fake_share_types = [fake_share_type, fake_share_type_2] fake_share_type_ids = [x['id'] for x in fake_share_types] self.mock_object(share_types, 'get_share_type', mock.Mock(side_effect=[fake_share_type, fake_share_type_2])) self.assertRaises( exception.InvalidInput, self.api.create, self.context, share_type_ids=fake_share_type_ids) share_group_api.QUOTAS.reserve.assert_not_called() share_group_api.QUOTAS.commit.assert_not_called() share_group_api.QUOTAS.rollback.assert_not_called() def test_create_with_conflicting_share_type_and_share_network(self): fake_share_type = { 'name': 'default', 'extra_specs': {'driver_handles_share_servers': 'False'}, 'is_public': True, 'id': 'c01990c1-448f-435a-9de6-c7c894bb6df9', } fake_share_types = [fake_share_type] self.mock_object(share_types, 'get_share_type', mock.Mock(return_value=fake_share_type)) self.assertRaises( exception.InvalidInput, self.api.create, self.context, share_type_ids=fake_share_types, share_network_id="fake_sn") share_group_api.QUOTAS.reserve.assert_not_called() share_group_api.QUOTAS.commit.assert_not_called() share_group_api.QUOTAS.rollback.assert_not_called() def test_create_with_source_share_group_snapshot_id(self): snap = fake_share_group_snapshot( "fake_source_share_group_snapshot_id", status=constants.STATUS_AVAILABLE) fake_share_type_mapping = {'share_type_id': self.fake_share_type['id']} orig_share_group = fake_share_group( 'fakeorigid', user_id=self.context.user_id, project_id=self.context.project_id, share_types=[fake_share_type_mapping], status=constants.STATUS_AVAILABLE, host='fake_original_host', share_network_id='fake_network_id', share_server_id='fake_server_id') share_group = fake_share_group( 'fakeid', user_id=self.context.user_id, project_id=self.context.project_id, share_types=[fake_share_type_mapping], status=constants.STATUS_CREATING, host='fake_original_host', share_network_id='fake_network_id', share_server_id='fake_server_id') share_network = { 'id': 'fakeid', 'status': constants.STATUS_NETWORK_ACTIVE } expected_values = share_group.copy() for name in ('id', 'created_at', 'share_network_id', 'share_server_id'): expected_values.pop(name, None) expected_values['source_share_group_snapshot_id'] = snap['id'] expected_values['share_types'] = [self.fake_share_type['id']] expected_values['share_network_id'] = 'fake_network_id' expected_values['share_server_id'] = 'fake_server_id' self.mock_object( db_driver, 'share_group_snapshot_get', mock.Mock(return_value=snap)) self.mock_object( db_driver, 'share_group_get', mock.Mock(return_value=orig_share_group)) self.mock_object( db_driver, 'share_group_create', mock.Mock(return_value=share_group)) self.mock_object( db_driver, 'share_get', mock.Mock(return_value=stubs.stub_share('fake_share'))) self.mock_object( share_types, 'get_share_type', mock.Mock(return_value={"id": self.fake_share_type['id']})) self.mock_object(db_driver, 'share_network_get', mock.Mock(return_value=share_network)) self.mock_object( db_driver, 'share_group_snapshot_members_get_all', mock.Mock(return_value=[])) self.api.create( self.context, source_share_group_snapshot_id=snap['id']) db_driver.share_group_create.assert_called_once_with( self.context, expected_values) self.share_rpcapi.create_share_group.assert_called_once_with( self.context, share_group, orig_share_group['host']) share_group_api.QUOTAS.reserve.assert_called_once_with( self.context, share_groups=1) share_group_api.QUOTAS.commit.assert_called_once_with( self.context, share_group_api.QUOTAS.reserve.return_value) share_group_api.QUOTAS.rollback.assert_not_called() def test_create_share_group_network_not_active(self): fake_share_type_mapping = {'share_type_id': self.fake_share_type['id']} share_group = fake_share_group( 'fakeid', user_id=self.context.user_id, project_id=self.context.project_id, share_types=[fake_share_type_mapping], status=constants.STATUS_CREATING, host='fake_original_host', share_network_id='fake_network_id', share_server_id='fake_server_id') network_id = 'fake_sn' share_network = { 'id': network_id, 'status': constants.STATUS_SERVER_NETWORK_CHANGE } expected_values = share_group.copy() for name in ('id', 'created_at', 'share_network_id', 'share_server_id'): expected_values.pop(name, None) expected_values['share_types'] = [self.fake_share_type['id']] expected_values['share_network_id'] = 'fake_network_id' expected_values['share_server_id'] = 'fake_server_id' self.mock_object( share_types, 'get_share_type', mock.Mock(return_value={"id": self.fake_share_type['id']})) self.mock_object(db_driver, 'share_network_get', mock.Mock(return_value=share_network)) self.assertRaises( webob_exc.HTTPBadRequest, self.api.create, self.context, share_type_ids=[fake_share_type_mapping], share_network_id="fake_sn") db_driver.share_network_get.assert_called_once_with( self.context, network_id) def test_create_with_source_share_group_snapshot_id_with_member(self): snap = fake_share_group_snapshot( "fake_source_share_group_snapshot_id", status=constants.STATUS_AVAILABLE) share = stubs.stub_share('fakeshareid') member = stubs.stub_share_group_snapshot_member('fake_member_id') fake_share_type_mapping = {'share_type_id': self.fake_share_type['id']} orig_share_group = fake_share_group( 'fakeorigid', user_id=self.context.user_id, project_id=self.context.project_id, share_types=[fake_share_type_mapping], status=constants.STATUS_AVAILABLE, share_network_id='fake_network_id', share_server_id='fake_server_id') share_group = fake_share_group( 'fakeid', user_id=self.context.user_id, project_id=self.context.project_id, share_types=[fake_share_type_mapping], status=constants.STATUS_CREATING, share_network_id='fake_network_id', share_server_id='fake_server_id') expected_values = share_group.copy() share_network = { 'id': 'fakeid', 'status': constants.STATUS_NETWORK_ACTIVE } for name in ('id', 'created_at', 'fake_network_id', 'fake_share_server_id'): expected_values.pop(name, None) expected_values['source_share_group_snapshot_id'] = snap['id'] expected_values['share_types'] = [self.fake_share_type['id']] expected_values['share_network_id'] = 'fake_network_id' expected_values['share_server_id'] = 'fake_server_id' self.mock_object( db_driver, 'share_group_snapshot_get', mock.Mock(return_value=snap)) self.mock_object( db_driver, 'share_group_get', mock.Mock(return_value=orig_share_group)) self.mock_object( db_driver, 'share_group_create', mock.Mock(return_value=share_group)) self.mock_object( db_driver, 'share_get', mock.Mock(return_value=stubs.stub_share('fakeshare'))) self.mock_object( share_types, 'get_share_type', mock.Mock(return_value={"id": self.fake_share_type['id']})) self.mock_object(db_driver, 'share_network_get', mock.Mock(return_value=share_network)) self.mock_object( db_driver, 'share_instance_get', mock.Mock(return_value=share)) self.mock_object( db_driver, 'share_group_snapshot_members_get_all', mock.Mock(return_value=[member])) self.mock_object(self.share_api, 'create') self.api.create( self.context, source_share_group_snapshot_id=snap['id']) db_driver.share_group_create.assert_called_once_with( self.context, expected_values) self.assertTrue(self.share_api.create.called) self.share_rpcapi.create_share_group.assert_called_once_with( self.context, share_group, orig_share_group['host']) share_group_api.QUOTAS.reserve.assert_called_once_with( self.context, share_groups=1) share_group_api.QUOTAS.commit.assert_called_once_with( self.context, share_group_api.QUOTAS.reserve.return_value) share_group_api.QUOTAS.rollback.assert_not_called() def test_create_with_source_sg_snapshot_id_with_members_error(self): snap = fake_share_group_snapshot( "fake_source_share_group_snapshot_id", status=constants.STATUS_AVAILABLE) member = stubs.stub_share_group_snapshot_member('fake_member_id') member_2 = stubs.stub_share_group_snapshot_member('fake_member2_id') share = stubs.stub_share('fakeshareid') fake_share_type_mapping = {'share_type_id': self.fake_share_type['id']} orig_share_group = fake_share_group( 'fakeorigid', user_id=self.context.user_id, project_id=self.context.project_id, share_types=[fake_share_type_mapping], status=constants.STATUS_AVAILABLE, share_network_id='fake_network_id', share_server_id='fake_server_id') share_group = fake_share_group( 'fakeid', user_id=self.context.user_id, project_id=self.context.project_id, share_types=[fake_share_type_mapping], status=constants.STATUS_CREATING, share_network_id='fake_network_id', share_server_id='fake_server_id') share_network = { 'id': 'fakeid', 'status': constants.STATUS_NETWORK_ACTIVE } expected_values = share_group.copy() for name in ('id', 'created_at', 'share_network_id', 'share_server_id'): expected_values.pop(name, None) expected_values['source_share_group_snapshot_id'] = snap['id'] expected_values['share_types'] = [self.fake_share_type['id']] expected_values['share_network_id'] = 'fake_network_id' expected_values['share_server_id'] = 'fake_server_id' self.mock_object(db_driver, 'share_group_snapshot_get', mock.Mock(return_value=snap)) self.mock_object(db_driver, 'share_group_get', mock.Mock(return_value=orig_share_group)) self.mock_object(db_driver, 'share_network_get', mock.Mock(return_value=share_network)) self.mock_object(db_driver, 'share_instance_get', mock.Mock(return_value=share)) self.mock_object(db_driver, 'share_group_create', mock.Mock(return_value=share_group)) self.mock_object(db_driver, 'share_get', mock.Mock(return_value=stubs.stub_share('fakeshare'))) self.mock_object(share_types, 'get_share_type', mock.Mock(return_value={ "id": self.fake_share_type['id']})) self.mock_object(db_driver, 'share_group_snapshot_members_get_all', mock.Mock(return_value=[member, member_2])) self.mock_object(self.share_api, 'create', mock.Mock(side_effect=[None, exception.Error])) self.mock_object(db_driver, 'share_group_destroy') self.assertRaises(exception.Error, self.api.create, self.context, source_share_group_snapshot_id=snap['id']) db_driver.share_group_create.assert_called_once_with( self.context, expected_values) self.assertEqual(2, self.share_api.create.call_count) self.assertEqual(1, db_driver.share_group_destroy.call_count) share_group_api.QUOTAS.reserve.assert_called_once_with( self.context, share_groups=1) share_group_api.QUOTAS.commit.assert_not_called() share_group_api.QUOTAS.rollback.assert_called_once_with( self.context, share_group_api.QUOTAS.reserve.return_value) def test_create_with_source_sg_snapshot_id_error_snapshot_status(self): snap = fake_share_group_snapshot( "fake_source_share_group_snapshot_id", status=constants.STATUS_ERROR) self.mock_object( db_driver, 'share_group_snapshot_get', mock.Mock(return_value=snap)) self.assertRaises( exception.InvalidShareGroupSnapshot, self.api.create, self.context, source_share_group_snapshot_id=snap['id']) share_group_api.QUOTAS.reserve.assert_not_called() share_group_api.QUOTAS.commit.assert_not_called() share_group_api.QUOTAS.rollback.assert_not_called() def test_create_with_source_sg_snapshot_id_snap_not_found(self): snap = fake_share_group_snapshot( "fake_source_share_group_snapshot_id", status=constants.STATUS_ERROR) self.mock_object( db_driver, 'share_group_snapshot_get', mock.Mock(side_effect=exception.ShareGroupSnapshotNotFound( share_group_snapshot_id='fake_source_sg_snapshot_id'))) self.assertRaises( exception.ShareGroupSnapshotNotFound, self.api.create, self.context, source_share_group_snapshot_id=snap['id']) share_group_api.QUOTAS.reserve.assert_not_called() share_group_api.QUOTAS.commit.assert_not_called() share_group_api.QUOTAS.rollback.assert_not_called() def test_create_with_multiple_fields(self): fake_desc = 'fake_desc' fake_name = 'fake_name' share_group = fake_share_group( 'fakeid', user_id=self.context.user_id, project_id=self.context.project_id, status=constants.STATUS_CREATING) expected_values = share_group.copy() for name in ('id', 'host', 'created_at'): expected_values.pop(name, None) expected_values['name'] = fake_name expected_values['description'] = fake_desc self.mock_object(db_driver, 'share_group_create', mock.Mock(return_value=share_group)) self.api.create(self.context, name=fake_name, description=fake_desc) db_driver.share_group_create.assert_called_once_with( self.context, expected_values) share_group_api.QUOTAS.reserve.assert_called_once_with( self.context, share_groups=1) share_group_api.QUOTAS.commit.assert_called_once_with( self.context, share_group_api.QUOTAS.reserve.return_value) share_group_api.QUOTAS.rollback.assert_not_called() def test_create_with_error_on_creation(self): share_group = fake_share_group( 'fakeid', user_id=self.context.user_id, project_id=self.context.project_id, status=constants.STATUS_CREATING) expected_values = share_group.copy() for name in ('id', 'host', 'created_at'): expected_values.pop(name, None) self.mock_object(db_driver, 'share_group_create', mock.Mock(side_effect=exception.Error)) self.assertRaises(exception.Error, self.api.create, self.context) db_driver.share_group_create.assert_called_once_with( self.context, expected_values) share_group_api.QUOTAS.reserve.assert_called_once_with( self.context, share_groups=1) share_group_api.QUOTAS.commit.assert_not_called() share_group_api.QUOTAS.rollback.assert_called_once_with( self.context, share_group_api.QUOTAS.reserve.return_value) def test_delete_creating_no_host(self): share_group = fake_share_group( 'fakeid', user_id=self.user_id + '_different_user', project_id=self.project_id + '_in_different_project', status=constants.STATUS_CREATING) self.mock_object(db_driver, 'share_group_destroy') self.api.delete(self.context, share_group) db_driver.share_group_destroy.assert_called_once_with( mock.ANY, share_group['id']) share_group_api.QUOTAS.reserve.assert_not_called() share_group_api.QUOTAS.commit.assert_not_called() share_group_api.QUOTAS.rollback.assert_not_called() def test_delete_creating_with_host(self): share_group = fake_share_group( 'fakeid', user_id=self.context.user_id, project_id=self.context.project_id, status=constants.STATUS_CREATING, host="fake_host") self.assertRaises( exception.InvalidShareGroup, self.api.delete, self.context, share_group) def test_delete_available(self): share_group = fake_share_group( 'fakeid', user_id=self.user_id + '_different_user', project_id=self.project_id + '_in_different_project', status=constants.STATUS_AVAILABLE, host="fake_host") deleted_share_group = copy.deepcopy(share_group) deleted_share_group['status'] = constants.STATUS_DELETING self.mock_object(db_driver, 'share_group_update', mock.Mock(return_value=deleted_share_group)) self.mock_object(db_driver, 'count_shares_in_share_group', mock.Mock(return_value=0)) self.api.delete(self.context, share_group) db_driver.share_group_update.assert_called_once_with( self.context, share_group['id'], {'status': constants.STATUS_DELETING}) self.share_rpcapi.delete_share_group.assert_called_once_with( self.context, deleted_share_group) share_group_api.QUOTAS.reserve.assert_called_once_with( self.context, share_groups=-1, project_id=share_group['project_id'], user_id=share_group['user_id']) share_group_api.QUOTAS.commit.assert_called_once_with( self.context, share_group_api.QUOTAS.reserve.return_value, project_id=share_group['project_id'], user_id=share_group['user_id']) share_group_api.QUOTAS.rollback.assert_not_called() def test_delete_error_with_host(self): share_group = fake_share_group( 'fakeid', user_id=self.context.user_id, project_id=self.context.project_id, status=constants.STATUS_ERROR, host="fake_host") deleted_share_group = copy.deepcopy(share_group) deleted_share_group['status'] = constants.STATUS_DELETING self.mock_object(self.api, 'share_rpcapi') self.mock_object(db_driver, 'share_group_update', mock.Mock(return_value=deleted_share_group)) self.mock_object(db_driver, 'count_shares_in_share_group', mock.Mock(return_value=0)) self.api.delete(self.context, share_group) db_driver.share_group_update.assert_called_once_with( self.context, share_group['id'], {'status': constants.STATUS_DELETING}) self.api.share_rpcapi.delete_share_group.assert_called_once_with( self.context, deleted_share_group) share_group_api.QUOTAS.reserve.assert_called_once_with( self.context, share_groups=-1, project_id=share_group['project_id'], user_id=share_group['user_id']) share_group_api.QUOTAS.commit.assert_called_once_with( self.context, share_group_api.QUOTAS.reserve.return_value, project_id=share_group['project_id'], user_id=share_group['user_id']) share_group_api.QUOTAS.rollback.assert_not_called() def test_delete_error_without_host(self): share_group = fake_share_group( 'fakeid', user_id=self.context.user_id, project_id=self.context.project_id, status=constants.STATUS_ERROR) self.mock_object(db_driver, 'share_group_destroy') self.api.delete(self.context, share_group) db_driver.share_group_destroy.assert_called_once_with( mock.ANY, share_group['id']) share_group_api.QUOTAS.reserve.assert_not_called() share_group_api.QUOTAS.commit.assert_not_called() share_group_api.QUOTAS.rollback.assert_not_called() def test_delete_with_shares(self): share_group = fake_share_group( 'fakeid', user_id=self.context.user_id, project_id=self.context.project_id, status=constants.STATUS_AVAILABLE, host="fake_host") self.mock_object( db_driver, 'count_shares_in_share_group', mock.Mock(return_value=1)) self.assertRaises( exception.InvalidShareGroup, self.api.delete, self.context, share_group) share_group_api.QUOTAS.reserve.assert_not_called() share_group_api.QUOTAS.commit.assert_not_called() share_group_api.QUOTAS.rollback.assert_not_called() def test_delete_with_share_group_snapshots(self): share_group = fake_share_group( 'fakeid', user_id=self.context.user_id, project_id=self.context.project_id, status=constants.STATUS_AVAILABLE, host="fake_host") self.mock_object( db_driver, 'count_share_group_snapshots_in_share_group', mock.Mock(return_value=1)) self.assertRaises( exception.InvalidShareGroup, self.api.delete, self.context, share_group) share_group_api.QUOTAS.reserve.assert_not_called() share_group_api.QUOTAS.commit.assert_not_called() share_group_api.QUOTAS.rollback.assert_not_called() @ddt.data({}, {"name": "fake_name"}, {"description": "fake_description"}) def test_update(self, expected_values): share_group = fake_share_group( 'fakeid', user_id=self.context.user_id, project_id=self.context.project_id, status=constants.STATUS_CREATING) self.mock_object( db_driver, 'share_group_update', mock.Mock(return_value=share_group)) self.api.update(self.context, share_group, expected_values) db_driver.share_group_update.assert_called_once_with( self.context, share_group['id'], expected_values) def test_get(self): expected = fake_share_group( 'fakeid', user_id=self.context.user_id, project_id=self.context.project_id, status=constants.STATUS_CREATING) self.mock_object( db_driver, 'share_group_get', mock.Mock(return_value=expected)) actual = self.api.get(self.context, expected['id']) self.assertEqual(expected, actual) def test_get_all_no_groups(self): self.mock_object( db_driver, 'share_group_get_all', mock.Mock(return_value=[])) actual_group = self.api.get_all(self.context) self.assertEqual([], actual_group) def test_get_all(self): expected = [fake_share_group( 'fakeid', user_id=self.context.user_id, project_id=self.context.project_id, status=constants.STATUS_CREATING)] self.mock_object( db_driver, 'share_group_get_all_by_project', mock.Mock(return_value=expected)) actual = self.api.get_all(self.context, detailed=True) self.assertEqual(expected, actual) def test_get_all_all_tenants_not_admin(self): cxt = context.RequestContext( user_id=None, project_id=None, is_admin=False) expected = [fake_share_group( 'fakeid', user_id=cxt.user_id, project_id=cxt.project_id, status=constants.STATUS_CREATING)] self.mock_object(db_driver, 'share_group_get_all_by_project', mock.Mock(return_value=expected)) actual = self.api.get_all(cxt, search_opts={'all_tenants': True}) self.assertEqual(expected, actual) def test_get_all_all_tenants_as_admin(self): expected = [fake_share_group( 'fakeid', user_id=self.context.user_id, project_id=self.context.project_id, status=constants.STATUS_CREATING)] self.mock_object(db_driver, 'share_group_get_all', mock.Mock(return_value=expected)) actual = self.api.get_all( self.context, search_opts={'all_tenants': True}) self.assertEqual(expected, actual) db_driver.share_group_get_all.assert_called_once_with( self.context, detailed=True, filters={}, sort_dir=None, sort_key=None) def test_create_share_group_snapshot_minimal_request_no_members(self): share_group = fake_share_group( 'fake_group_id', user_id=self.context.user_id, project_id=self.context.project_id, status=constants.STATUS_AVAILABLE) snap = fake_share_group_snapshot( 'fakeid', user_id=self.context.user_id, project_id=self.context.project_id, share_group_id=share_group['id'], status=constants.STATUS_CREATING) expected_values = snap.copy() for name in ('id', 'created_at'): expected_values.pop(name, None) self.mock_object( db_driver, 'share_group_get', mock.Mock(return_value=share_group)) self.mock_object( db_driver, 'share_group_snapshot_create', mock.Mock(return_value=snap)) self.mock_object( db_driver, 'share_get_all_by_share_group_id', mock.Mock(return_value=[])) self.api.create_share_group_snapshot( self.context, share_group_id=share_group['id']) db_driver.share_group_get.assert_called_once_with( self.context, share_group['id']) db_driver.share_group_snapshot_create.assert_called_once_with( self.context, expected_values) self.share_rpcapi.create_share_group_snapshot.assert_called_once_with( self.context, snap, share_group['host']) share_group_api.QUOTAS.reserve.assert_called_once_with( self.context, share_group_snapshots=1) share_group_api.QUOTAS.commit.assert_called_once_with( self.context, share_group_api.QUOTAS.reserve.return_value) share_group_api.QUOTAS.rollback.assert_not_called() def test_create_sg_snapshot_minimal_request_no_members_with_name(self): fake_name = 'fake_name' share_group = fake_share_group( 'fake_group_id', user_id=self.context.user_id, project_id=self.context.project_id, status=constants.STATUS_AVAILABLE) snap = fake_share_group_snapshot( 'fakeid', user_id=self.context.user_id, project_id=self.context.project_id, share_group_id=share_group['id'], name=fake_name, status=constants.STATUS_CREATING) expected_values = snap.copy() for name in ('id', 'created_at'): expected_values.pop(name, None) self.mock_object( db_driver, 'share_group_get', mock.Mock(return_value=share_group)) self.mock_object( db_driver, 'share_group_snapshot_create', mock.Mock(return_value=snap)) self.mock_object( db_driver, 'share_get_all_by_share_group_id', mock.Mock(return_value=[])) self.api.create_share_group_snapshot( self.context, share_group_id=share_group['id'], name=fake_name) db_driver.share_group_get.assert_called_once_with( self.context, share_group['id']) db_driver.share_group_snapshot_create.assert_called_once_with( self.context, expected_values) self.share_rpcapi.create_share_group_snapshot.assert_called_once_with( self.context, snap, share_group['host']) share_group_api.QUOTAS.reserve.assert_called_once_with( self.context, share_group_snapshots=1) share_group_api.QUOTAS.commit.assert_called_once_with( self.context, share_group_api.QUOTAS.reserve.return_value) share_group_api.QUOTAS.rollback.assert_not_called() def test_create_group_snapshot_minimal_request_no_members_with_desc(self): fake_description = 'fake_description' share_group = fake_share_group( 'fake_group_id', user_id=self.context.user_id, project_id=self.context.project_id, status=constants.STATUS_AVAILABLE) snap = fake_share_group_snapshot( 'fakeid', user_id=self.context.user_id, project_id=self.context.project_id, share_group_id=share_group['id'], description=fake_description, status=constants.STATUS_CREATING) expected_values = snap.copy() for name in ('id', 'created_at'): expected_values.pop(name, None) self.mock_object( db_driver, 'share_group_get', mock.Mock(return_value=share_group)) self.mock_object( db_driver, 'share_group_snapshot_create', mock.Mock(return_value=snap)) self.mock_object( db_driver, 'share_get_all_by_share_group_id', mock.Mock(return_value=[])) self.api.create_share_group_snapshot( self.context, share_group_id=share_group['id'], description=fake_description) db_driver.share_group_get.assert_called_once_with( self.context, share_group['id']) db_driver.share_group_snapshot_create.assert_called_once_with( self.context, expected_values) self.share_rpcapi.create_share_group_snapshot.assert_called_once_with( self.context, snap, share_group['host']) share_group_api.QUOTAS.reserve.assert_called_once_with( self.context, share_group_snapshots=1) share_group_api.QUOTAS.commit.assert_called_once_with( self.context, share_group_api.QUOTAS.reserve.return_value) share_group_api.QUOTAS.rollback.assert_not_called() def test_create_share_group_snapshot_group_does_not_exist(self): share_group = fake_share_group( 'fake_group_id', user_id=self.context.user_id, project_id=self.context.project_id, status=constants.STATUS_CREATING) snap = fake_share_group_snapshot( 'fakeid', user_id=self.context.user_id, project_id=self.context.project_id, share_group_id=share_group['id'], status=constants.STATUS_CREATING) expected_values = snap.copy() for name in ('id', 'created_at'): expected_values.pop(name, None) self.mock_object( db_driver, 'share_group_get', mock.Mock(return_value=share_group)) self.mock_object( db_driver, 'share_group_snapshot_create', mock.Mock(return_value=snap)) self.mock_object( db_driver, 'share_get_all_by_share_group_id', mock.Mock(return_value=[])) self.assertRaises( exception.InvalidShareGroup, self.api.create_share_group_snapshot, self.context, share_group_id=share_group['id']) db_driver.share_group_get.assert_called_once_with( self.context, share_group['id']) share_group_api.QUOTAS.reserve.assert_not_called() share_group_api.QUOTAS.commit.assert_not_called() share_group_api.QUOTAS.rollback.assert_not_called() def test_create_share_group_snapshot_failure_reserving_quota(self): overs = ["share_group_snapshots"] usages = {"share_group_snapshots": { "reserved": 1, "in_use": 3, "limit": 4, }} quotas = {"share_group_snapshots": 5} share_group = fake_share_group( "fake_group_id", user_id=self.context.user_id, project_id=self.context.project_id, status=constants.STATUS_AVAILABLE) self.mock_object( db_driver, "share_group_get", mock.Mock(return_value=share_group)) self.mock_object( db_driver, "share_get_all_by_share_group_id", mock.Mock(return_value=[])) share_group_api.QUOTAS.reserve.side_effect = exception.OverQuota( overs=overs, usages=usages, quotas=quotas, ) self.mock_object(share_group_api.LOG, "warning") self.assertRaises( exception.ShareGroupSnapshotsLimitExceeded, self.api.create_share_group_snapshot, self.context, share_group_id=share_group["id"]) db_driver.share_group_get.assert_called_once_with( self.context, share_group["id"]) share_group_api.QUOTAS.reserve.assert_called_once_with( self.context, share_group_snapshots=1) share_group_api.QUOTAS.commit.assert_not_called() share_group_api.QUOTAS.rollback.assert_not_called() share_group_api.LOG.warning.assert_called_once_with(mock.ANY, mock.ANY) def test_create_share_group_snapshot_group_in_creating(self): self.mock_object( db_driver, 'share_group_get', mock.Mock(side_effect=exception.ShareGroupNotFound( share_group_id='fake_id'))) self.assertRaises( exception.ShareGroupNotFound, self.api.create_share_group_snapshot, self.context, share_group_id="fake_id") db_driver.share_group_get.assert_called_once_with( self.context, "fake_id") share_group_api.QUOTAS.reserve.assert_not_called() share_group_api.QUOTAS.commit.assert_not_called() share_group_api.QUOTAS.rollback.assert_not_called() def test_create_share_group_snapshot_with_member(self): share_group = fake_share_group( 'fake_group_id', user_id=self.context.user_id, project_id=self.context.project_id, status=constants.STATUS_AVAILABLE) snap = fake_share_group_snapshot( 'fakeid', user_id=self.context.user_id, project_id=self.context.project_id, share_group_id=share_group['id'], status=constants.STATUS_CREATING) share = stubs.stub_share( 'fake_share_id', status=constants.STATUS_AVAILABLE) expected_values = snap.copy() for name in ('id', 'created_at'): expected_values.pop(name, None) expected_member_values = { 'share_group_snapshot_id': snap['id'], 'user_id': self.context.user_id, 'project_id': self.context.project_id, 'status': constants.STATUS_CREATING, 'size': share['size'], 'share_proto': share['share_proto'], 'share_instance_id': mock.ANY, } self.mock_object( db_driver, 'share_group_get', mock.Mock(return_value=share_group)) self.mock_object( db_driver, 'share_group_snapshot_create', mock.Mock(return_value=snap)) self.mock_object(db_driver, 'share_group_snapshot_member_create') self.mock_object( db_driver, 'share_get_all_by_share_group_id', mock.Mock(return_value=[share])) self.api.create_share_group_snapshot( self.context, share_group_id=share_group['id']) db_driver.share_group_get.assert_called_once_with( self.context, share_group['id']) db_driver.share_group_snapshot_create.assert_called_once_with( self.context, expected_values) db_driver.share_group_snapshot_member_create.assert_called_once_with( self.context, expected_member_values) self.share_rpcapi.create_share_group_snapshot.assert_called_once_with( self.context, snap, share_group['host']) share_group_api.QUOTAS.reserve.assert_called_once_with( self.context, share_group_snapshots=1) share_group_api.QUOTAS.commit.assert_called_once_with( self.context, share_group_api.QUOTAS.reserve.return_value) share_group_api.QUOTAS.rollback.assert_not_called() def test_create_share_group_snapshot_with_member_share_in_creating(self): share_group = fake_share_group( 'fake_group_id', user_id=self.context.user_id, project_id=self.context.project_id, status=constants.STATUS_AVAILABLE) share = stubs.stub_share( 'fake_share_id', status=constants.STATUS_CREATING) self.mock_object( db_driver, 'share_group_get', mock.Mock(return_value=share_group)) self.mock_object( db_driver, 'share_get_all_by_share_group_id', mock.Mock(return_value=[share])) self.assertRaises( exception.InvalidShareGroup, self.api.create_share_group_snapshot, self.context, share_group_id=share_group['id']) db_driver.share_group_get.assert_called_once_with( self.context, share_group['id']) share_group_api.QUOTAS.reserve.assert_not_called() share_group_api.QUOTAS.commit.assert_not_called() share_group_api.QUOTAS.rollback.assert_not_called() def test_create_share_group_snapshot_with_two_members(self): share_group = fake_share_group( 'fake_group_id', user_id=self.context.user_id, project_id=self.context.project_id, status=constants.STATUS_AVAILABLE) snap = fake_share_group_snapshot( 'fakeid', user_id=self.context.user_id, project_id=self.context.project_id, share_group_id=share_group['id'], status=constants.STATUS_CREATING) share = stubs.stub_share( 'fake_share_id', status=constants.STATUS_AVAILABLE) share_2 = stubs.stub_share( 'fake_share2_id', status=constants.STATUS_AVAILABLE) expected_values = snap.copy() for name in ('id', 'created_at'): expected_values.pop(name, None) expected_member_1_values = { 'share_group_snapshot_id': snap['id'], 'user_id': self.context.user_id, 'project_id': self.context.project_id, 'status': constants.STATUS_CREATING, 'size': share['size'], 'share_proto': share['share_proto'], 'share_instance_id': mock.ANY, } expected_member_2_values = { 'share_group_snapshot_id': snap['id'], 'user_id': self.context.user_id, 'project_id': self.context.project_id, 'status': constants.STATUS_CREATING, 'size': share_2['size'], 'share_proto': share_2['share_proto'], 'share_instance_id': mock.ANY, } self.mock_object( db_driver, 'share_group_get', mock.Mock(return_value=share_group)) self.mock_object( db_driver, 'share_group_snapshot_create', mock.Mock(return_value=snap)) self.mock_object( db_driver, 'share_get_all_by_share_group_id', mock.Mock(return_value=[share, share_2])) self.mock_object(db_driver, 'share_group_snapshot_member_create') self.api.create_share_group_snapshot( self.context, share_group_id=share_group['id']) db_driver.share_group_get.assert_called_once_with( self.context, share_group['id']) db_driver.share_group_snapshot_create.assert_called_once_with( self.context, expected_values) db_driver.share_group_snapshot_member_create.assert_any_call( self.context, expected_member_1_values) db_driver.share_group_snapshot_member_create.assert_any_call( self.context, expected_member_2_values) self.share_rpcapi.create_share_group_snapshot.assert_called_once_with( self.context, snap, share_group['host']) share_group_api.QUOTAS.reserve.assert_called_once_with( self.context, share_group_snapshots=1) share_group_api.QUOTAS.commit.assert_called_once_with( self.context, share_group_api.QUOTAS.reserve.return_value) share_group_api.QUOTAS.rollback.assert_not_called() def test_create_share_group_snapshot_error_creating_member(self): share_group = fake_share_group( 'fake_group_id', user_id=self.context.user_id, project_id=self.context.project_id, status=constants.STATUS_AVAILABLE) snap = fake_share_group_snapshot( 'fakeid', user_id=self.context.user_id, project_id=self.context.project_id, share_group_id=share_group['id'], status=constants.STATUS_CREATING) share = stubs.stub_share( 'fake_share_id', status=constants.STATUS_AVAILABLE) expected_values = snap.copy() for name in ('id', 'created_at'): expected_values.pop(name, None) expected_member_values = { 'share_group_snapshot_id': snap['id'], 'user_id': self.context.user_id, 'project_id': self.context.project_id, 'status': constants.STATUS_CREATING, 'size': share['size'], 'share_proto': share['share_proto'], 'share_instance_id': mock.ANY, } self.mock_object( db_driver, 'share_group_get', mock.Mock(return_value=share_group)) self.mock_object( db_driver, 'share_group_snapshot_create', mock.Mock(return_value=snap)) self.mock_object(db_driver, 'share_group_snapshot_destroy') self.mock_object( db_driver, 'share_group_snapshot_member_create', mock.Mock(side_effect=exception.Error)) self.mock_object( db_driver, 'share_get_all_by_share_group_id', mock.Mock(return_value=[share])) self.assertRaises( exception.Error, self.api.create_share_group_snapshot, self.context, share_group_id=share_group['id']) db_driver.share_group_get.assert_called_once_with( self.context, share_group['id']) db_driver.share_group_snapshot_create.assert_called_once_with( self.context, expected_values) db_driver.share_group_snapshot_member_create.assert_called_once_with( self.context, expected_member_values) db_driver.share_group_snapshot_destroy.assert_called_once_with( self.context, snap['id']) share_group_api.QUOTAS.reserve.assert_called_once_with( self.context, share_group_snapshots=1) share_group_api.QUOTAS.commit.assert_not_called() share_group_api.QUOTAS.rollback.assert_called_once_with( self.context, share_group_api.QUOTAS.reserve.return_value) def test_delete_share_group_snapshot(self): share_group = fake_share_group('fake_id', host="fake_host") sg_snap = fake_share_group_snapshot( 'fake_groupsnap_id', share_group_id='fake_id', status=constants.STATUS_AVAILABLE) self.mock_object(db_driver, 'share_group_get', mock.Mock(return_value=share_group)) self.mock_object(db_driver, 'share_group_snapshot_update') self.api.delete_share_group_snapshot(self.context, sg_snap) db_driver.share_group_get.assert_called_once_with( self.context, "fake_id") db_driver.share_group_snapshot_update.assert_called_once_with( self.context, sg_snap['id'], {'status': constants.STATUS_DELETING}) self.share_rpcapi.delete_share_group_snapshot.assert_called_once_with( self.context, sg_snap, share_group['host']) share_group_api.QUOTAS.reserve.assert_called_once_with( self.context, share_group_snapshots=-1, project_id=share_group['project_id'], user_id=share_group['user_id']) share_group_api.QUOTAS.commit.assert_called_once_with( self.context, share_group_api.QUOTAS.reserve.return_value, project_id=share_group['project_id'], user_id=share_group['user_id']) share_group_api.QUOTAS.rollback.assert_not_called() def test_delete_share_group_snapshot_fail_on_quota_reserve(self): share_group = fake_share_group('fake_id', host="fake_host") sg_snap = fake_share_group_snapshot( 'fake_groupsnap_id', share_group_id='fake_id', status=constants.STATUS_AVAILABLE) self.mock_object(db_driver, 'share_group_get', mock.Mock(return_value=share_group)) self.mock_object(db_driver, 'share_group_snapshot_update') share_group_api.QUOTAS.reserve.side_effect = exception.OverQuota( 'Failure') self.mock_object(share_group_api.LOG, 'exception') self.api.delete_share_group_snapshot(self.context, sg_snap) db_driver.share_group_get.assert_called_once_with( self.context, "fake_id") db_driver.share_group_snapshot_update.assert_called_once_with( self.context, sg_snap['id'], {'status': constants.STATUS_DELETING}) self.share_rpcapi.delete_share_group_snapshot.assert_called_once_with( self.context, sg_snap, share_group['host']) share_group_api.QUOTAS.reserve.assert_called_once_with( self.context, share_group_snapshots=-1, project_id=share_group['project_id'], user_id=share_group['user_id']) share_group_api.QUOTAS.commit.assert_not_called() share_group_api.QUOTAS.rollback.assert_not_called() share_group_api.LOG.exception.assert_called_once_with( mock.ANY, mock.ANY) def test_delete_share_group_snapshot_group_does_not_exist(self): snap = fake_share_group_snapshot( 'fake_groupsnap_id', share_group_id='fake_id') self.mock_object( db_driver, 'share_group_get', mock.Mock(side_effect=exception.ShareGroupNotFound( share_group_id='fake_id'))) self.assertRaises( exception.ShareGroupNotFound, self.api.delete_share_group_snapshot, self.context, snap) db_driver.share_group_get.assert_called_once_with( self.context, "fake_id") share_group_api.QUOTAS.reserve.assert_not_called() share_group_api.QUOTAS.commit.assert_not_called() share_group_api.QUOTAS.rollback.assert_not_called() def test_delete_share_group_snapshot_creating_status(self): snap = fake_share_group_snapshot( 'fake_groupsnap_id', share_group_id='fake_id', status=constants.STATUS_CREATING) self.mock_object(db_driver, 'share_group_get') self.assertRaises( exception.InvalidShareGroupSnapshot, self.api.delete_share_group_snapshot, self.context, snap) db_driver.share_group_get.assert_called_once_with( self.context, snap['share_group_id']) share_group_api.QUOTAS.reserve.assert_not_called() share_group_api.QUOTAS.commit.assert_not_called() share_group_api.QUOTAS.rollback.assert_not_called() @ddt.data({}, {"name": "fake_name"}) def test_update_share_group_snapshot_no_values(self, expected_values): snap = fake_share_group_snapshot( 'fakeid', user_id=self.context.user_id, project_id=self.context.project_id, status=constants.STATUS_CREATING) self.mock_object( db_driver, 'share_group_snapshot_update', mock.Mock(return_value=snap)) self.api.update_share_group_snapshot( self.context, snap, expected_values) db_driver.share_group_snapshot_update.assert_called_once_with( self.context, snap['id'], expected_values) def test_share_group_snapshot_get(self): expected = fake_share_group_snapshot( 'fakeid', user_id=self.context.user_id, project_id=self.context.project_id, status=constants.STATUS_CREATING) self.mock_object( db_driver, 'share_group_snapshot_get', mock.Mock(return_value=expected)) actual = self.api.get_share_group_snapshot( self.context, expected['id']) self.assertEqual(expected, actual) def test_share_group_snapshot_get_all_no_groups(self): self.mock_object( db_driver, 'share_group_snapshot_get_all', mock.Mock(return_value=[])) actual = self.api.get_all_share_group_snapshots(self.context) self.assertEqual([], actual) def test_share_group_snapshot_get_all(self): expected = [fake_share_group( 'fakeid', user_id=self.context.user_id, project_id=self.context.project_id, status=constants.STATUS_CREATING)] self.mock_object( db_driver, 'share_group_snapshot_get_all_by_project', mock.Mock(return_value=expected)) actual = self.api.get_all_share_group_snapshots( self.context, detailed=True) self.assertEqual(expected, actual) def test_share_group_snapshot_get_all_all_tenants_not_admin(self): cxt = context.RequestContext( user_id=None, project_id=None, is_admin=False) expected = [fake_share_group( 'fakeid', user_id=cxt.user_id, project_id=cxt.project_id, status=constants.STATUS_CREATING)] self.mock_object( db_driver, 'share_group_snapshot_get_all_by_project', mock.Mock(return_value=expected)) actual = self.api.get_all_share_group_snapshots( cxt, search_opts={'all_tenants': True}) self.assertEqual(expected, actual) def test_share_group_snapshot_get_all_all_tenants_as_admin(self): expected = [fake_share_group( 'fakeid', user_id=self.context.user_id, project_id=self.context.project_id, status=constants.STATUS_CREATING)] self.mock_object( db_driver, 'share_group_snapshot_get_all', mock.Mock(return_value=expected)) actual = self.api.get_all_share_group_snapshots( self.context, search_opts={'all_tenants': True}) self.assertEqual(expected, actual) db_driver.share_group_snapshot_get_all.assert_called_once_with( self.context, detailed=True, filters={}, sort_dir=None, sort_key=None) def test_get_all_share_group_snapshot_members(self): self.mock_object( db_driver, 'share_group_snapshot_members_get_all', mock.Mock(return_value=[])) self.api.get_all_share_group_snapshot_members(self.context, 'fake_id') db_driver.share_group_snapshot_members_get_all.assert_called_once_with( self.context, 'fake_id') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/share_group/test_share_group_types.py0000664000175000017500000001261500000000000025157 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Test of Share Type methods for Manila.""" import copy import datetime from unittest import mock import ddt from manila.common import constants from manila import context from manila import db from manila import exception from manila.share_group import share_group_types from manila import test def create_share_group_type_dict(group_specs=None): return { 'fake_type': { 'name': 'fake1', 'group_specs': group_specs } } @ddt.ddt class ShareGroupTypesTestCase(test.TestCase): fake_type = { 'test': { 'created_at': datetime.datetime(2015, 1, 22, 11, 43, 24), 'deleted': '0', 'deleted_at': None, 'group_specs': {}, 'id': u'fooid-1', 'name': u'test', 'updated_at': None } } fake_group_specs = {u'gold': u'True'} fake_share_group_type_id = u'fooid-2' fake_type_w_extra = { 'test_with_extra': { 'created_at': datetime.datetime(2015, 1, 22, 11, 45, 31), 'deleted': '0', 'deleted_at': None, 'group_specs': fake_group_specs, 'id': fake_share_group_type_id, 'name': u'test_with_extra', 'updated_at': None } } fake_type_w_valid_extra = { 'test_with_extra': { 'created_at': datetime.datetime(2015, 1, 22, 11, 45, 31), 'deleted': '0', 'deleted_at': None, 'group_specs': { constants.ExtraSpecs.DRIVER_HANDLES_SHARE_SERVERS: 'true' }, 'id': u'fooid-2', 'name': u'test_with_extra', 'updated_at': None } } fake_types = fake_type.copy() fake_types.update(fake_type_w_extra) fake_types.update(fake_type_w_valid_extra) fake_share_group = { 'id': u'fooid-1', 'share_group_type_id': fake_share_group_type_id, } def setUp(self): super(ShareGroupTypesTestCase, self).setUp() self.context = context.get_admin_context() @ddt.data({}, fake_type, fake_type_w_extra, fake_types) def test_get_all_types(self, share_group_type): self.mock_object( db, 'share_group_type_get_all', mock.Mock(return_value=copy.deepcopy(share_group_type))) returned_type = share_group_types.get_all(self.context) self.assertEqual(sorted(share_group_type), sorted(returned_type)) def test_get_all_types_search(self): share_group_type = self.fake_type_w_extra search_filter = {'group_specs': {'gold': 'True'}, 'is_public': True} self.mock_object( db, 'share_group_type_get_all', mock.Mock(return_value=share_group_type)) returned_type = share_group_types.get_all( self.context, search_opts=search_filter) db.share_group_type_get_all.assert_called_once_with( mock.ANY, 0, filters={'is_public': True}) self.assertEqual(sorted(share_group_type), sorted(returned_type)) search_filter = {'group_specs': {'gold': 'False'}} returned_type = share_group_types.get_all( self.context, search_opts=search_filter) self.assertEqual({}, returned_type) share_group_type = self.fake_type_w_extra search_filter = {'group_specs': {'gold': 'True'}} returned_type = share_group_types.get_all( self.context, search_opts=search_filter) self.assertCountEqual(share_group_type, returned_type) def test_add_access(self): project_id = '456' share_group_type = share_group_types.create(self.context, 'type2', []) share_group_type_id = share_group_type.get('id') share_group_types.add_share_group_type_access( self.context, share_group_type_id, project_id) stype_access = db.share_group_type_access_get_all( self.context, share_group_type_id) self.assertIn(project_id, [a.project_id for a in stype_access]) def test_add_access_invalid(self): self.assertRaises( exception.InvalidShareGroupType, share_group_types.add_share_group_type_access, 'fake', None, 'fake') def test_remove_access(self): project_id = '456' share_group_type = share_group_types.create( self.context, 'type3', [], projects=['456']) share_group_type_id = share_group_type.get('id') share_group_types.remove_share_group_type_access( self.context, share_group_type_id, project_id) stype_access = db.share_group_type_access_get_all( self.context, share_group_type_id) self.assertNotIn(project_id, stype_access) def test_remove_access_invalid(self): self.assertRaises( exception.InvalidShareGroupType, share_group_types.remove_share_group_type_access, 'fake', None, 'fake') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/test_api.py0000664000175000017500000000511400000000000017644 0ustar00zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Unit tests for the API endpoint.""" import http.client as http_client import io import webob class FakeHttplibSocket(object): """A fake socket implementation for http_client.HTTPResponse, trivial.""" def __init__(self, response_string): self.response_string = response_string self._buffer = io.StringIO(response_string) def makefile(self, _mode, _other): """Returns the socket's internal buffer.""" return self._buffer class FakeHttplibConnection(object): """A fake http_client.HTTPConnection for boto. requests made via this connection actually get translated and routed into our WSGI app, we then wait for the response and turn it back into the http_client.HTTPResponse that boto expects. """ def __init__(self, app, host, is_secure=False): self.app = app self.host = host def request(self, method, path, data, headers): req = webob.Request.blank(path) req.method = method req.body = data req.headers = headers req.headers['Accept'] = 'text/html' req.host = self.host # Call the WSGI app, get the HTTP response resp = str(req.get_response(self.app)) # For some reason, the response doesn't have "HTTP/1.0 " prepended; I # guess that's a function the web server usually provides. resp = "HTTP/1.0 %s" % resp self.sock = FakeHttplibSocket(resp) self.http_response = http_client.HTTPResponse(self.sock) # NOTE(vish): boto is accessing private variables for some reason self._HTTPConnection__response = self.http_response self.http_response.begin() def getresponse(self): return self.http_response def getresponsebody(self): return self.sock.response_string def close(self): """Required for compatibility with boto/tornado.""" pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/test_conf.py0000664000175000017500000000557400000000000020032 0ustar00zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # Copyright 2011 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from manila import test CONF = cfg.CONF CONF.register_opt(cfg.StrOpt('conf_unittest', default='foo', help='for testing purposes only')) class ConfigTestCase(test.TestCase): def test_declare(self): self.assertNotIn('answer', CONF) CONF.import_opt('answer', 'manila.tests.declare_conf') self.assertIn('answer', CONF) self.assertEqual(42, CONF.answer) # Make sure we don't overwrite anything CONF.set_override('answer', 256) self.assertEqual(256, CONF.answer) CONF.import_opt('answer', 'manila.tests.declare_conf') self.assertEqual(256, CONF.answer) def test_runtime_and_unknown_flags(self): self.assertNotIn('runtime_answer', CONF) import manila.tests.runtime_conf # noqa self.assertIn('runtime_answer', CONF) self.assertEqual(54, CONF.runtime_answer) def test_long_vs_short_flags(self): CONF.clear() CONF.register_cli_opt(cfg.StrOpt('duplicate_answer_long', default='val', help='desc')) CONF.register_cli_opt(cfg.IntOpt('duplicate_answer', default=50, help='desc')) argv = ['--duplicate_answer=60'] CONF(argv, default_config_files=[]) self.assertEqual(60, CONF.duplicate_answer) self.assertEqual('val', CONF.duplicate_answer_long) def test_flag_leak_left(self): self.assertEqual('foo', CONF.conf_unittest) self.flags(conf_unittest='bar') self.assertEqual('bar', CONF.conf_unittest) def test_flag_leak_right(self): self.assertEqual('foo', CONF.conf_unittest) self.flags(conf_unittest='bar') self.assertEqual('bar', CONF.conf_unittest) def test_flag_overrides(self): self.assertEqual('foo', CONF.conf_unittest) self.flags(conf_unittest='bar') self.assertEqual('bar', CONF.conf_unittest) CONF.reset() self.assertEqual('foo', CONF.conf_unittest) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/test_context.py0000664000175000017500000000541700000000000020565 0ustar00zuulzuul00000000000000# Copyright 2011 OpenStack LLC # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from manila import context from manila import test class ContextTestCase(test.TestCase): def test_request_context_elevated(self): user_context = context.RequestContext( 'fake_user', 'fake_project', is_admin=False) self.assertFalse(user_context.is_admin) self.assertEqual([], user_context.roles) admin_context = user_context.elevated() self.assertFalse(user_context.is_admin) self.assertTrue(admin_context.is_admin) self.assertNotIn('admin', user_context.roles) self.assertIn('admin', admin_context.roles) def test_request_context_sets_is_admin(self): ctxt = context.RequestContext('111', '222', roles=['admin', 'weasel']) self.assertTrue(ctxt.is_admin) def test_request_context_sets_is_service(self): ctxt = context.RequestContext('111', '222', roles=['service', 'admin'], service_roles=['service']) self.assertTrue(ctxt.is_service) def test_request_context_sets_is_admin_upcase(self): ctxt = context.RequestContext('111', '222', roles=['Admin', 'weasel']) self.assertTrue(ctxt.is_admin) def test_request_context_read_deleted(self): ctxt = context.RequestContext('111', '222', read_deleted='yes') self.assertEqual('yes', ctxt.read_deleted) ctxt.read_deleted = 'no' self.assertEqual('no', ctxt.read_deleted) def test_request_context_read_deleted_invalid(self): self.assertRaises(ValueError, context.RequestContext, '111', '222', read_deleted=True) ctxt = context.RequestContext('111', '222') self.assertRaises(ValueError, setattr, ctxt, 'read_deleted', True) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/test_coordination.py0000664000175000017500000000730700000000000021571 0ustar00zuulzuul00000000000000# Copyright 2015 Intel # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import ddt from tooz import coordination as tooz_coordination from tooz import locking as tooz_locking from manila import coordination from manila import test class Locked(Exception): pass class MockToozLock(tooz_locking.Lock): active_locks = set() def acquire(self, blocking=True): if self.name not in self.active_locks: self.active_locks.add(self.name) return True elif not blocking: return False else: raise Locked def release(self): self.active_locks.remove(self.name) @ddt.ddt class CoordinatorTestCase(test.TestCase): def setUp(self): super(CoordinatorTestCase, self).setUp() self.get_coordinator = self.mock_object(tooz_coordination, 'get_coordinator') def test_coordinator_start(self): crd = self.get_coordinator.return_value agent = coordination.Coordinator() agent.start() self.assertTrue(self.get_coordinator.called) self.assertTrue(crd.start.called) self.assertTrue(agent.started) def test_coordinator_stop(self): crd = self.get_coordinator.return_value agent = coordination.Coordinator() agent.start() self.assertIsNotNone(agent.coordinator) agent.stop() self.assertTrue(crd.stop.called) self.assertIsNone(agent.coordinator) self.assertFalse(agent.started) def test_coordinator_lock(self): crd = self.get_coordinator.return_value crd.get_lock.side_effect = lambda n: MockToozLock(n) agent1 = coordination.Coordinator() agent1.start() agent2 = coordination.Coordinator() agent2.start() lock_string = 'lock' expected_lock = lock_string.encode('ascii') self.assertNotIn(expected_lock, MockToozLock.active_locks) with agent1.get_lock(lock_string): self.assertIn(expected_lock, MockToozLock.active_locks) self.assertRaises(Locked, agent1.get_lock(lock_string).acquire) self.assertRaises(Locked, agent2.get_lock(lock_string).acquire) self.assertNotIn(expected_lock, MockToozLock.active_locks) def test_coordinator_offline(self): crd = self.get_coordinator.return_value crd.start.side_effect = tooz_coordination.ToozConnectionError('err') agent = coordination.Coordinator() self.assertRaises(tooz_coordination.ToozError, agent.start) self.assertFalse(agent.started) @mock.patch.object(coordination.LOCK_COORDINATOR, 'get_lock') class CoordinationTestCase(test.TestCase): def test_lock(self, get_lock): with coordination.Lock('lock'): self.assertTrue(get_lock.called) def test_synchronized(self, get_lock): @coordination.synchronized('lock-{f_name}-{foo.val}-{bar[val]}') def func(foo, bar): pass foo = mock.Mock() foo.val = 7 bar = mock.MagicMock() bar.__getitem__.return_value = 8 func(foo, bar) get_lock.assert_called_with('lock-func-7-8') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/test_exception.py0000664000175000017500000005767100000000000021110 0ustar00zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright 2014 Mirantis, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ddt from manila import exception from manila import test class FakeNotifier(object): """Acts like the manila.openstack.common.notifier.api module.""" ERROR = 88 def __init__(self): self.provided_publisher = None self.provided_event = None self.provided_priority = None self.provided_payload = None def notify(self, context, publisher, event, priority, payload): self.provided_publisher = publisher self.provided_event = event self.provided_priority = priority self.provided_payload = payload @ddt.ddt class ManilaExceptionTestCase(test.TestCase): def test_default_error_msg(self): class FakeManilaException(exception.ManilaException): message = "default message" exc = FakeManilaException() self.assertEqual('default message', str(exc)) def test_error_msg(self): self.assertEqual('test', str(exception.ManilaException('test'))) def test_default_error_msg_with_kwargs(self): class FakeManilaException(exception.ManilaException): message = "default message: %(code)s" exc = FakeManilaException(code=500) self.assertEqual('default message: 500', str(exc)) def test_error_msg_exception_with_kwargs(self): # NOTE(dprince): disable format errors for this test self.flags(fatal_exception_format_errors=False) class FakeManilaException(exception.ManilaException): message = "default message: %(misspelled_code)s" exc = FakeManilaException(code=500) self.assertEqual('default message: %(misspelled_code)s', str(exc)) def test_default_error_code(self): class FakeManilaException(exception.ManilaException): code = 404 exc = FakeManilaException() self.assertEqual(404, exc.kwargs['code']) def test_error_code_from_kwarg(self): class FakeManilaException(exception.ManilaException): code = 500 exc = FakeManilaException(code=404) self.assertEqual(404, exc.kwargs['code']) def test_error_msg_is_exception_to_string(self): msg = 'test message' exc1 = Exception(msg) exc2 = exception.ManilaException(exc1) self.assertEqual(msg, exc2.msg) def test_exception_kwargs_to_string(self): msg = 'test message' exc1 = Exception(msg) exc2 = exception.ManilaException(kwarg1=exc1) self.assertEqual(msg, exc2.kwargs['kwarg1']) def test_exception_multi_kwargs_to_string(self): exc = exception.ManilaException( 'fake_msg', foo=Exception('foo_msg'), bar=Exception('bar_msg')) self.assertEqual('fake_msg', exc.msg) self.assertEqual('foo_msg', exc.kwargs['foo']) self.assertEqual('bar_msg', exc.kwargs['bar']) self.assertNotIn('fake_msg', exc.kwargs) self.assertNotIn('foo_msg', exc.kwargs) self.assertNotIn('bar_msg', exc.kwargs) @ddt.data("test message.", "test message....", ".") def test_exception_not_redundant_period(self, msg): exc1 = Exception(msg) exc2 = exception.ManilaException(exc1) self.assertEqual(msg, exc2.msg) def test_exception_redundant_period(self): msg = "test message.." exc1 = Exception(msg) exc2 = exception.ManilaException(exc1) self.assertEqual("test message.", exc2.msg) def test_replication_exception(self): # Verify response code for exception.ReplicationException reason = "Something bad happened." e = exception.ReplicationException(reason=reason) self.assertEqual(500, e.code) self.assertIn(reason, e.msg) def test_snapshot_access_already_exists(self): # Verify response code for exception.ShareSnapshotAccessExists access_type = "fake_type" access = "fake_access" e = exception.ShareSnapshotAccessExists(access_type=access_type, access=access) self.assertEqual(400, e.code) self.assertIn(access_type, e.msg) self.assertIn(access, e.msg) def test_manage_share_server_error(self): # Verify response code for exception.ManageShareServerError reason = 'Invalid share server id.' share_server_id = 'fake' e = exception.ManageShareServerError(reason=reason, share_server_id=share_server_id) self.assertEqual(500, e.code) self.assertIn(reason, e.msg) def test_ip_address_generation_failure(self): # verify response code for exception.IpAddressGenerationFailureClient e = exception.IpAddressGenerationFailureClient() self.assertEqual(500, e.code) class ManilaExceptionResponseCode400(test.TestCase): def test_invalid(self): # Verify response code for exception.Invalid e = exception.Invalid() self.assertEqual(400, e.code) def test_invalid_input(self): # Verify response code for exception.InvalidInput reason = "fake_reason" e = exception.InvalidInput(reason=reason) self.assertEqual(400, e.code) self.assertIn(reason, e.msg) def test_invalid_request(self): # Verify response code for exception.InvalidRequest e = exception.InvalidRequest() self.assertEqual(400, e.code) def test_invalid_results(self): # Verify response code for exception.InvalidResults e = exception.InvalidResults() self.assertEqual(400, e.code) def test_invalid_uuid(self): # Verify response code for exception.InvalidUUID uuid = "fake_uuid" e = exception.InvalidUUID(uuid=uuid) self.assertEqual(400, e.code) self.assertIn(uuid, e.msg) def test_invalid_content_type(self): # Verify response code for exception.InvalidContentType content_type = "fake_content_type" e = exception.InvalidContentType(content_type=content_type) self.assertEqual(400, e.code) self.assertIn(content_type, e.msg) def test_invalid_parameter_value(self): # Verify response code for exception.InvalidParameterValue err = "fake_err" e = exception.InvalidParameterValue(err=err) self.assertEqual(400, e.code) self.assertIn(err, e.msg) def test_invalid_reservation_expiration(self): # Verify response code for exception.InvalidReservationExpiration expire = "fake_expire" e = exception.InvalidReservationExpiration(expire=expire) self.assertEqual(400, e.code) self.assertIn(expire, e.msg) def test_invalid_quota_value(self): # Verify response code for exception.InvalidQuotaValue unders = '-1' e = exception.InvalidQuotaValue(unders=unders) self.assertEqual(400, e.code) def test_invalid_share(self): # Verify response code for exception.InvalidShare reason = "fake_reason" e = exception.InvalidShare(reason=reason) self.assertEqual(400, e.code) self.assertIn(reason, e.msg) def test_invalid_share_access(self): # Verify response code for exception.InvalidShareAccess reason = "fake_reason" e = exception.InvalidShareAccess(reason=reason) self.assertEqual(400, e.code) self.assertIn(reason, e.msg) def test_invalid_share_snapshot(self): # Verify response code for exception.InvalidShareSnapshot reason = "fake_reason" e = exception.InvalidShareSnapshot(reason=reason) self.assertEqual(400, e.code) self.assertIn(reason, e.msg) def test_invalid_share_metadata(self): # Verify response code for exception.InvalidMetadata e = exception.InvalidMetadata() self.assertEqual(400, e.code) def test_invalid_share_metadata_size(self): # Verify response code for exception.InvalidMetadataSize e = exception.InvalidMetadataSize() self.assertEqual(400, e.code) def test_invalid_volume(self): # Verify response code for exception.InvalidVolume e = exception.InvalidVolume() self.assertEqual(400, e.code) def test_invalid_share_type(self): # Verify response code for exception.InvalidShareType reason = "fake_reason" e = exception.InvalidShareType(reason=reason) self.assertEqual(400, e.code) self.assertIn(reason, e.msg) def test_manage_invalid_share_snapshot(self): # Verify response code for exception.ManageInvalidShareSnapshot reason = "fake_reason" e = exception.ManageInvalidShareSnapshot(reason=reason) self.assertEqual(400, e.code) self.assertIn(reason, e.msg) def test_unmanage_invalid_share_snapshot(self): # Verify response code for exception.UnmanageInvalidShareSnapshot reason = "fake_reason" e = exception.UnmanageInvalidShareSnapshot(reason=reason) self.assertEqual(400, e.code) self.assertIn(reason, e.msg) def test_invalid_share_snapshot_instance(self): # Verify response code for exception.InvalidShareSnapshotInstance reason = "fake_reason" e = exception.InvalidShareSnapshotInstance(reason=reason) self.assertEqual(400, e.code) self.assertIn(reason, e.msg) class ManilaExceptionResponseCode403(test.TestCase): def test_not_authorized(self): # Verify response code for exception.NotAuthorized e = exception.NotAuthorized() self.assertEqual(403, e.code) def test_admin_required(self): # Verify response code for exception.AdminRequired e = exception.AdminRequired() self.assertEqual(403, e.code) def test_policy_not_authorized(self): # Verify response code for exception.PolicyNotAuthorized action = "fake_action" e = exception.PolicyNotAuthorized(action=action) self.assertEqual(403, e.code) self.assertIn(action, e.msg) class ManilaExceptionResponseCode404(test.TestCase): def test_not_found(self): # Verify response code for exception.NotFound e = exception.NotFound() self.assertEqual(404, e.code) def test_share_network_not_found(self): # Verify response code for exception.ShareNetworkNotFound share_network_id = "fake_share_network_id" e = exception.ShareNetworkNotFound(share_network_id=share_network_id) self.assertEqual(404, e.code) self.assertIn(share_network_id, e.msg) def test_share_network_subnet_not_found(self): # Verify response code for exception.ShareNetworkSubnetNotFound share_network_subnet_id = "fake_share_network_subnet_id" e = exception.ShareNetworkSubnetNotFound( share_network_subnet_id=share_network_subnet_id) self.assertEqual(404, e.code) self.assertIn(share_network_subnet_id, e.msg) def test_share_server_not_found(self): # Verify response code for exception.ShareServerNotFound share_server_id = "fake_share_server_id" e = exception.ShareServerNotFound(share_server_id=share_server_id) self.assertEqual(404, e.code) self.assertIn(share_server_id, e.msg) def test_share_server_not_found_by_filters(self): # Verify response code for exception.ShareServerNotFoundByFilters filters_description = "host = fakeHost" e = exception.ShareServerNotFoundByFilters( filters_description=filters_description) self.assertEqual(404, e.code) self.assertIn(filters_description, e.msg) def test_service_not_found(self): # Verify response code for exception.ServiceNotFound service_id = "fake_service_id" e = exception.ServiceNotFound(service_id=service_id) self.assertEqual(404, e.code) self.assertIn(service_id, e.msg) def test_host_not_found(self): # Verify response code for exception.HostNotFound host = "fake_host" e = exception.HostNotFound(host=host) self.assertEqual(404, e.code) self.assertIn(host, e.msg) def test_scheduler_host_filter_not_found(self): # Verify response code for exception.SchedulerHostFilterNotFound filter_name = "fake_filter_name" e = exception.SchedulerHostFilterNotFound(filter_name=filter_name) self.assertEqual(404, e.code) self.assertIn(filter_name, e.msg) def test_scheduler_host_weigher_not_found(self): # Verify response code for exception.SchedulerHostWeigherNotFound weigher_name = "fake_weigher_name" e = exception.SchedulerHostWeigherNotFound(weigher_name=weigher_name) self.assertEqual(404, e.code) self.assertIn(weigher_name, e.msg) def test_host_binary_not_found(self): # Verify response code for exception.HostBinaryNotFound host = "fake_host" binary = "fake_binary" e = exception.HostBinaryNotFound(binary=binary, host=host) self.assertEqual(404, e.code) self.assertIn(binary, e.msg) self.assertIn(host, e.msg) def test_quota_not_found(self): # Verify response code for exception.QuotaNotFound e = exception.QuotaNotFound() self.assertEqual(404, e.code) def test_quota_resource_unknown(self): # Verify response code for exception.QuotaResourceUnknown unknown = "fake_quota_resource" e = exception.QuotaResourceUnknown(unknown=unknown) self.assertEqual(404, e.code) def test_project_quota_not_found(self): # Verify response code for exception.ProjectQuotaNotFound project_id = "fake_tenant_id" e = exception.ProjectQuotaNotFound(project_id=project_id) self.assertEqual(404, e.code) def test_quota_class_not_found(self): # Verify response code for exception.QuotaClassNotFound class_name = "FakeQuotaClass" e = exception.QuotaClassNotFound(class_name=class_name) self.assertEqual(404, e.code) def test_quota_usage_not_found(self): # Verify response code for exception.QuotaUsageNotFound project_id = "fake_tenant_id" e = exception.QuotaUsageNotFound(project_id=project_id) self.assertEqual(404, e.code) def test_reservation_not_found(self): # Verify response code for exception.ReservationNotFound uuid = "fake_uuid" e = exception.ReservationNotFound(uuid=uuid) self.assertEqual(404, e.code) def test_migration_not_found(self): # Verify response code for exception.MigrationNotFound migration_id = "fake_migration_id" e = exception.MigrationNotFound(migration_id=migration_id) self.assertEqual(404, e.code) self.assertIn(migration_id, e.msg) def test_migration_not_found_by_status(self): # Verify response code for exception.MigrationNotFoundByStatus status = "fake_status" instance_id = "fake_instance_id" e = exception.MigrationNotFoundByStatus(status=status, instance_id=instance_id) self.assertEqual(404, e.code) self.assertIn(status, e.msg) self.assertIn(instance_id, e.msg) def test_file_not_found(self): # Verify response code for exception.FileNotFound file_path = "fake_file_path" e = exception.FileNotFound(file_path=file_path) self.assertEqual(404, e.code) self.assertIn(file_path, e.msg) def test_config_not_found(self): # Verify response code for exception.ConfigNotFound path = "fake_path" e = exception.ConfigNotFound(path=path) self.assertEqual(404, e.code) self.assertIn(path, e.msg) def test_paste_app_not_found(self): # Verify response code for exception.PasteAppNotFound name = "fake_name" path = "fake_path" e = exception.PasteAppNotFound(name=name, path=path) self.assertEqual(404, e.code) self.assertIn(name, e.msg) self.assertIn(path, e.msg) def test_share_snapshot_not_found(self): # Verify response code for exception.ShareSnapshotNotFound snapshot_id = "fake_snapshot_id" e = exception.ShareSnapshotNotFound(snapshot_id=snapshot_id) self.assertEqual(404, e.code) self.assertIn(snapshot_id, e.msg) def test_metadata_item_not_found(self): # verify response code for exception.MetadataItemNotFound e = exception.MetadataItemNotFound() self.assertEqual(404, e.code) def test_security_service_not_found(self): # verify response code for exception.SecurityServiceNotFound security_service_id = "fake_security_service_id" e = exception.SecurityServiceNotFound( security_service_id=security_service_id) self.assertEqual(404, e.code) self.assertIn(security_service_id, e.msg) def test_volume_not_found(self): # verify response code for exception.VolumeNotFound volume_id = "fake_volume_id" e = exception.VolumeNotFound(volume_id=volume_id) self.assertEqual(404, e.code) self.assertIn(volume_id, e.msg) def test_volume_snapshot_not_found(self): # verify response code for exception.VolumeSnapshotNotFound snapshot_id = "fake_snapshot_id" e = exception.VolumeSnapshotNotFound(snapshot_id=snapshot_id) self.assertEqual(404, e.code) self.assertIn(snapshot_id, e.msg) def test_share_type_not_found(self): # verify response code for exception.ShareTypeNotFound share_type_id = "fake_share_type_id" e = exception.ShareTypeNotFound(share_type_id=share_type_id) self.assertEqual(404, e.code) self.assertIn(share_type_id, e.msg) def test_share_type_not_found_by_name(self): # verify response code for exception.ShareTypeNotFoundByName share_type_name = "fake_share_type_name" e = exception.ShareTypeNotFoundByName( share_type_name=share_type_name) self.assertEqual(404, e.code) self.assertIn(share_type_name, e.msg) def test_share_type_does_not_exist(self): # verify response code for exception.ShareTypeDoesNotExist share_type = "fake_share_type_1234" e = exception.ShareTypeDoesNotExist(share_type=share_type) self.assertEqual(404, e.code) self.assertIn(share_type, e.msg) def test_share_type_extra_specs_not_found(self): # verify response code for exception.ShareTypeExtraSpecsNotFound share_type_id = "fake_share_type_id" extra_specs_key = "fake_extra_specs_key" e = exception.ShareTypeExtraSpecsNotFound( share_type_id=share_type_id, extra_specs_key=extra_specs_key) self.assertEqual(404, e.code) self.assertIn(share_type_id, e.msg) self.assertIn(extra_specs_key, e.msg) def test_default_share_type_not_configured(self): # Verify response code for exception.DefaultShareTypeNotConfigured e = exception.DefaultShareTypeNotConfigured() self.assertEqual(404, e.code) def test_instance_not_found(self): # verify response code for exception.InstanceNotFound instance_id = "fake_instance_id" e = exception.InstanceNotFound(instance_id=instance_id) self.assertEqual(404, e.code) self.assertIn(instance_id, e.msg) def test_share_replica_not_found_exception(self): # Verify response code for exception.ShareReplicaNotFound replica_id = "FAKE_REPLICA_ID" e = exception.ShareReplicaNotFound(replica_id=replica_id) self.assertEqual(404, e.code) self.assertIn(replica_id, e.msg) def test_storage_resource_not_found(self): # verify response code for exception.StorageResourceNotFound name = "fake_name" e = exception.StorageResourceNotFound(name=name) self.assertEqual(404, e.code) self.assertIn(name, e.msg) def test_snapshot_resource_not_found(self): # verify response code for exception.SnapshotResourceNotFound name = "fake_name" e = exception.SnapshotResourceNotFound(name=name) self.assertEqual(404, e.code) self.assertIn(name, e.msg) def test_snapshot_instance_not_found(self): # verify response code for exception.ShareSnapshotInstanceNotFound instance_id = 'fake_instance_id' e = exception.ShareSnapshotInstanceNotFound(instance_id=instance_id) self.assertEqual(404, e.code) self.assertIn(instance_id, e.msg) def test_export_location_not_found(self): # verify response code for exception.ExportLocationNotFound uuid = "fake-export-location-uuid" e = exception.ExportLocationNotFound(uuid=uuid) self.assertEqual(404, e.code) self.assertIn(uuid, e.msg) def test_share_resource_not_found(self): # verify response code for exception.ShareResourceNotFound share_id = "fake_share_id" e = exception.ShareResourceNotFound(share_id=share_id) self.assertEqual(404, e.code) self.assertIn(share_id, e.msg) def test_share_not_found(self): # verify response code for exception.ShareNotFound share_id = "fake_share_id" e = exception.ShareNotFound(share_id=share_id) self.assertEqual(404, e.code) self.assertIn(share_id, e.msg) def test_resource_lock_not_found(self): # verify response code for exception.ResourceLockNotFound lock_id = "fake_lock_id" e = exception.ResourceLockNotFound(lock_id=lock_id) self.assertEqual(404, e.code) self.assertIn(lock_id, e.msg) class ManilaExceptionResponseCode413(test.TestCase): def test_quota_error(self): # verify response code for exception.QuotaError e = exception.QuotaError() self.assertEqual(413, e.code) def test_share_size_exceeds_available_quota(self): # verify response code for exception.ShareSizeExceedsAvailableQuota e = exception.ShareSizeExceedsAvailableQuota() self.assertEqual(413, e.code) def test_share_limit_exceeded(self): # verify response code for exception.ShareLimitExceeded allowed = 776 # amount of allowed shares e = exception.ShareLimitExceeded(allowed=allowed) self.assertEqual(413, e.code) self.assertIn(str(allowed), e.msg) def test_snapshot_limit_exceeded(self): # verify response code for exception.SnapshotLimitExceeded allowed = 777 # amount of allowed snapshots e = exception.SnapshotLimitExceeded(allowed=allowed) self.assertEqual(413, e.code) self.assertIn(str(allowed), e.msg) def test_share_networks_limit_exceeded(self): # verify response code for exception.ShareNetworksLimitExceeded allowed = 778 # amount of allowed share networks e = exception.ShareNetworksLimitExceeded(allowed=allowed) self.assertEqual(413, e.code) self.assertIn(str(allowed), e.msg) def test_port_limit_exceeded(self): # verify response code for exception.PortLimitExceeded e = exception.PortLimitExceeded() self.assertEqual(413, e.code) def test_per_share_limit_exceeded(self): # verify response code for exception.ShareSizeExceedsLimit size = 779 # amount of share size limit = 775 # amount of allowed share size limit e = exception.ShareSizeExceedsLimit(size=size, limit=limit) self.assertEqual(413, e.code) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/test_hacking.py0000664000175000017500000002521000000000000020476 0ustar00zuulzuul00000000000000# Copyright 2014 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import itertools import textwrap from unittest import mock import ddt import pycodestyle from manila import test from manila.tests.hacking import checks @ddt.ddt class HackingTestCase(test.TestCase): """Hacking test cases This class tests the hacking checks in manila.tests.hacking.checks by passing strings to the check methods like the pep8/flake8 parser would. The parser loops over each line in the file and then passes the parameters to the check method. The parameter names in the check method dictate what type of object is passed to the check method. The parameter types are:: logical_line: A processed line with the following modifications: - Multi-line statements converted to a single line. - Stripped left and right. - Contents of strings replaced with "xxx" of same length. - Comments removed. physical_line: Raw line of text from the input file. lines: a list of the raw lines from the input file tokens: the tokens that contribute to this logical line line_number: line number in the input file total_lines: number of lines in the input file blank_lines: blank lines before this one indent_char: indentation character in this file (" " or "\t") indent_level: indentation (with tabs expanded to multiples of 8) previous_indent_level: indentation on previous line previous_logical: previous logical line filename: Path of the file being run through pep8 When running a test on a check method the return will be False/None if there is no violation in the sample input. If there is an error a tuple is returned with a position in the line, and a message. So to check the result just assertTrue if the check is expected to fail and assertFalse if it should pass. """ @ddt.data(*itertools.product( ('', '_', '_LE', '_LI', '_LW'), ('audit', 'debug', 'error', 'info', 'warn', 'warning', 'critical', 'exception',))) @ddt.unpack def test_no_translate_logs(self, log_marker, log_method): code = "LOG.{0}({1}('foo'))".format(log_method, log_marker) if log_marker: self.assertEqual(1, len(list(checks.no_translate_logs(code)))) else: self.assertEqual(0, len(list(checks.no_translate_logs(code)))) def test_check_explicit_underscore_import(self): self.assertEqual(1, len(list(checks.check_explicit_underscore_import( "LOG.info(_('My info message'))", "manila/tests/other_files.py")))) self.assertEqual(1, len(list(checks.check_explicit_underscore_import( "msg = _('My message')", "manila/tests/other_files.py")))) self.assertEqual(0, len(list(checks.check_explicit_underscore_import( "from manila.i18n import _", "manila/tests/other_files.py")))) self.assertEqual(0, len(list(checks.check_explicit_underscore_import( "LOG.info(_('My info message'))", "manila/tests/other_files.py")))) self.assertEqual(0, len(list(checks.check_explicit_underscore_import( "msg = _('My message')", "manila/tests/other_files.py")))) self.assertEqual(0, len(list(checks.check_explicit_underscore_import( "from manila.i18n import _LE, _, _LW", "manila/tests/other_files2.py")))) self.assertEqual(0, len(list(checks.check_explicit_underscore_import( "msg = _('My message')", "manila/tests/other_files2.py")))) self.assertEqual(0, len(list(checks.check_explicit_underscore_import( "_ = translations.ugettext", "manila/tests/other_files3.py")))) self.assertEqual(0, len(list(checks.check_explicit_underscore_import( "msg = _('My message')", "manila/tests/other_files3.py")))) # Complete code coverage by falling through all checks self.assertEqual(0, len(list(checks.check_explicit_underscore_import( "LOG.info('My info message')", "manila.tests.unit/other_files4.py")))) self.assertEqual(0, len(list(checks.check_explicit_underscore_import( "from manila.i18n import _LW", "manila.tests.unit/other_files5.py")))) self.assertEqual(1, len(list(checks.check_explicit_underscore_import( "msg = _('My message')", "manila.tests.unit/other_files5.py")))) # We are patching pep8 so that only the check under test is actually # installed. @mock.patch('pycodestyle._checks', {'physical_line': {}, 'logical_line': {}, 'tree': {}}) def _run_check(self, code, checker, filename=None): pycodestyle.register_check(checker) lines = textwrap.dedent(code).strip().splitlines(True) checker = pycodestyle.Checker(filename=filename, lines=lines) checker.check_all() checker.report._deferred_print.sort() return checker.report._deferred_print def _assert_has_errors(self, code, checker, expected_errors=None, filename=None): actual_errors = [e[:3] for e in self._run_check(code, checker, filename)] self.assertEqual(expected_errors or [], actual_errors) def _assert_has_no_errors(self, code, checker, filename=None): self._assert_has_errors(code, checker, filename=filename) def test_logging_format_no_tuple_arguments(self): checker = checks.CheckLoggingFormatArgs code = """ import logging LOG = logging.getLogger() LOG.info("Message without a second argument.") LOG.critical("Message with %s arguments.", 'two') LOG.debug("Volume %s caught fire and is at %d degrees C and" " climbing.", 'volume1', 500) """ self._assert_has_no_errors(code, checker) @ddt.data(*checks.CheckLoggingFormatArgs.LOG_METHODS) def test_logging_with_tuple_argument(self, log_method): checker = checks.CheckLoggingFormatArgs code = """ import logging LOG = logging.getLogger() LOG.{0}("Volume %s caught fire and is at %d degrees C and " "climbing.", ('volume1', 500)) """ self._assert_has_errors(code.format(log_method), checker, expected_errors=[(4, mock.ANY, 'M310')]) def test_trans_add(self): checker = checks.CheckForTransAdd code = """ def fake_tran(msg): return msg _ = fake_tran _LI = _ _LW = _ _LE = _ _LC = _ def f(a, b): msg = _('test') + 'add me' msg = _LI('test') + 'add me' msg = _LW('test') + 'add me' msg = _LE('test') + 'add me' msg = _LC('test') + 'add me' msg = 'add to me' + _('test') return msg """ errors = [(13, 10, 'M326'), (14, 10, 'M326'), (15, 10, 'M326'), (16, 10, 'M326'), (17, 10, 'M326'), (18, 24, 'M326')] self._assert_has_errors(code, checker, expected_errors=errors) code = """ def f(a, b): msg = 'test' + 'add me' return msg """ errors = [] self._assert_has_errors(code, checker, expected_errors=errors) def test_dict_constructor_with_list_copy(self): self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy( " dict([(i, connect_info[i])")))) self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy( " attrs = dict([(k, _from_json(v))")))) self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy( " type_names = dict((value, key) for key, value in")))) self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy( " dict((value, key) for key, value in")))) self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy( "foo(param=dict((k, v) for k, v in bar.items()))")))) self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy( " dict([[i,i] for i in range(3)])")))) self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy( " dd = dict([i,i] for i in range(3))")))) self.assertEqual(0, len(list(checks.dict_constructor_with_list_copy( " create_kwargs = dict(snapshot=snapshot,")))) self.assertEqual(0, len(list(checks.dict_constructor_with_list_copy( " self._render_dict(xml, data_el, data.__dict__)")))) def test_no_xrange(self): self.assertEqual(1, len(list(checks.no_xrange("xrange(45)")))) self.assertEqual(0, len(list(checks.no_xrange("range(45)")))) def test_validate_assertTrue(self): test_value = True self.assertEqual(0, len(list(checks.validate_assertTrue( "assertTrue(True)")))) self.assertEqual(1, len(list(checks.validate_assertTrue( "assertEqual(True, %s)" % test_value)))) def test_check_uuid4(self): code = """ fake_uuid = uuid.uuid4() """ errors = [(1, 0, 'M354')] self._assert_has_errors(code, checks.check_uuid4, expected_errors=errors) code = """ hex_uuid = uuid.uuid4().hex """ self._assert_has_no_errors(code, checks.check_uuid4) def test_no_log_warn_check(self): self.assertEqual(0, len(list(checks.no_log_warn_check( "LOG.warning('This should not trigger LOG.warn " "hacking check.')")))) self.assertEqual(1, len(list(checks.no_log_warn_check( "LOG.warn('We should not use LOG.warn')")))) foo = """ LOG.warn('Catch me too, please' ) """ self.assertEqual(1, len(list(checks.no_log_warn_check(foo)))) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/test_manager.py0000664000175000017500000001231400000000000020505 0ustar00zuulzuul00000000000000# Copyright 2014 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Test of Base Manager for Manila.""" from unittest import mock import ddt from oslo_utils import importutils from manila import manager from manila import test @ddt.ddt class ManagerTestCase(test.TestCase): def setUp(self): super(ManagerTestCase, self).setUp() self.host = 'host' self.db_driver = 'fake_driver' self.mock_object(importutils, 'import_module') def test_verify_manager_instance(self): fake_manager = manager.Manager(self.host, self.db_driver) self.assertTrue(hasattr(fake_manager, '_periodic_tasks')) self.assertTrue(hasattr(fake_manager, 'additional_endpoints')) self.assertTrue(hasattr(fake_manager, 'host')) self.assertTrue(hasattr(fake_manager, 'periodic_tasks')) self.assertTrue(hasattr(fake_manager, 'init_host')) self.assertTrue(hasattr(fake_manager, 'service_version')) self.assertTrue(hasattr(fake_manager, 'service_config')) self.assertEqual(self.host, fake_manager.host) importutils.import_module.assert_called_once_with(self.db_driver) @ddt.data(True, False) def test_periodic_tasks(self, raise_on_error): fake_manager = manager.Manager(self.host, self.db_driver) fake_context = 'fake_context' self.mock_object(fake_manager, 'run_periodic_tasks') fake_manager.periodic_tasks(fake_context, raise_on_error) fake_manager.run_periodic_tasks.assert_called_once_with( fake_context, raise_on_error=raise_on_error) @ddt.ddt class SchedulerDependentManagerTestCase(test.TestCase): def setUp(self): super(SchedulerDependentManagerTestCase, self).setUp() self.context = 'fake_context' self.host = 'host' self.db_driver = 'fake_driver' self.service_name = 'fake_service_name' self.mock_object(importutils, 'import_module') self.sched_manager = manager.SchedulerDependentManager( self.host, self.db_driver, self.service_name) def test_verify_scheduler_dependent_manager_instance(self): self.assertTrue(hasattr(self.sched_manager, '_periodic_tasks')) self.assertTrue(hasattr(self.sched_manager, 'additional_endpoints')) self.assertTrue(hasattr(self.sched_manager, 'host')) self.assertTrue(hasattr(self.sched_manager, 'periodic_tasks')) self.assertTrue(hasattr(self.sched_manager, 'init_host')) self.assertTrue(hasattr(self.sched_manager, 'service_version')) self.assertTrue(hasattr(self.sched_manager, 'service_config')) self.assertTrue(hasattr(self.sched_manager, 'last_capabilities')) self.assertTrue(hasattr(self.sched_manager, 'service_name')) self.assertTrue(hasattr(self.sched_manager, 'scheduler_rpcapi')) self.assertTrue(hasattr(self.sched_manager, 'update_service_capabilities')) self.assertTrue(hasattr(self.sched_manager, '_publish_service_capabilities')) self.assertEqual(self.host, self.sched_manager.host) self.assertEqual(self.service_name, self.sched_manager.service_name) importutils.import_module.assert_called_once_with(self.db_driver) @ddt.data(None, {}, [], '') def test__publish_service_capabilities_no_update(self, last_capabilities): self.sched_manager.last_capabilities = last_capabilities self.mock_object( self.sched_manager.scheduler_rpcapi, 'update_service_capabilities') self.sched_manager._publish_service_capabilities('fake_context') self.assertFalse( self.sched_manager.scheduler_rpcapi.update_service_capabilities. called) @ddt.data('fake_last_capabilities', {'foo': 'bar'}) def test__publish_service_capabilities_with_update(self, last_capabilities): self.sched_manager.last_capabilities = last_capabilities self.mock_object( self.sched_manager.scheduler_rpcapi, 'update_service_capabilities') self.mock_object(manager.LOG, 'debug') self.sched_manager._publish_service_capabilities(self.context) (self.sched_manager.scheduler_rpcapi.update_service_capabilities. assert_called_once_with( self.context, self.service_name, self.host, last_capabilities)) manager.LOG.debug.assert_called_once_with(mock.ANY) @ddt.data(None, '', [], {}, {'foo': 'bar'}) def test_update_service_capabilities(self, capabilities): self.sched_manager.update_service_capabilities(capabilities) self.assertEqual(capabilities, self.sched_manager.last_capabilities) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/test_misc.py0000664000175000017500000000423100000000000020025 0ustar00zuulzuul00000000000000# Copyright 2010 OpenStack LLC # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import glob import os from manila import exception from manila import test class ExceptionTestCase(test.TestCase): @staticmethod def _raise_exc(exc): raise exc() def test_exceptions_raise(self): # NOTE(dprince): disable format errors since we are not passing kwargs self.flags(fatal_exception_format_errors=False) for name in dir(exception): exc = getattr(exception, name) if isinstance(exc, type): self.assertRaises(exc, self._raise_exc, exc) class ProjectTestCase(test.TestCase): def test_all_migrations_have_downgrade(self): topdir = os.path.normpath(os.path.dirname(__file__) + '/../../') py_glob = os.path.join(topdir, "manila", "db", "sqlalchemy", "migrate_repo", "versions", "*.py") missing_downgrade = [] for path in glob.iglob(py_glob): has_upgrade = False has_downgrade = False with open(path, "r") as f: for line in f: if 'def upgrade(' in line: has_upgrade = True if 'def downgrade(' in line: has_downgrade = True if has_upgrade and not has_downgrade: fname = os.path.basename(path) missing_downgrade.append(fname) helpful_msg = ("The following migrations are missing a downgrade:" "\n\t%s") % '\n\t'.join(sorted(missing_downgrade)) self.assertTrue(not missing_downgrade, helpful_msg) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/test_network.py0000664000175000017500000001662700000000000020577 0ustar00zuulzuul00000000000000# Copyright 2015 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ddt from oslo_config import cfg from oslo_utils import importutils from manila import exception from manila import network from manila import test CONF = cfg.CONF @ddt.ddt class APITestCase(test.TestCase): def setUp(self): super(APITestCase, self).setUp() self.mock_object(importutils, 'import_class') def test_init_api_with_default_config_group_name(self): network.API() importutils.import_class.assert_called_once_with( CONF.network_api_class) importutils.import_class.return_value.assert_called_once_with( config_group_name=None, label='user') def test_init_api_with_custom_config_group_name(self): group_name = 'FOO_GROUP_NAME' network.API(config_group_name=group_name) importutils.import_class.assert_called_once_with( getattr(CONF, group_name).network_api_class) importutils.import_class.return_value.assert_called_once_with( config_group_name=group_name, label='user') def test_init_api_with_custom_config_group_name_and_label(self): group_name = 'FOO_GROUP_NAME' label = 'custom_label' network.API(config_group_name=group_name, label=label) importutils.import_class.assert_called_once_with( getattr(CONF, group_name).network_api_class) importutils.import_class.return_value.assert_called_once_with( config_group_name=group_name, label=label) @ddt.ddt class NetworkBaseAPITestCase(test.TestCase): def setUp(self): super(NetworkBaseAPITestCase, self).setUp() self.db_driver = 'fake_driver' self.mock_object(importutils, 'import_module') def test_inherit_network_base_api_no_redefinitions(self): class FakeNetworkAPI(network.NetworkBaseAPI): pass self.assertRaises(TypeError, FakeNetworkAPI) def test_inherit_network_base_api_deallocate_not_redefined(self): class FakeNetworkAPI(network.NetworkBaseAPI): def allocate_network(self, *args, **kwargs): pass def manage_network_allocations( self, context, allocations, share_server, share_network=None): pass def unmanage_network_allocations(self, context, share_server_id): pass def include_network_info(self, share_network_subnet): pass self.assertRaises(TypeError, FakeNetworkAPI) def test_inherit_network_base_api_allocate_not_redefined(self): class FakeNetworkAPI(network.NetworkBaseAPI): def deallocate_network(self, *args, **kwargs): pass def manage_network_allocations( self, context, allocations, share_server, share_network=None): pass def unmanage_network_allocations(self, context, share_server_id): pass def include_network_info(self, share_network_subnet): pass self.assertRaises(TypeError, FakeNetworkAPI) def test_inherit_network_base_api(self): class FakeNetworkAPI(network.NetworkBaseAPI): def allocate_network(self, *args, **kwargs): pass def deallocate_network(self, *args, **kwargs): pass def manage_network_allocations( self, context, allocations, share_server, share_network=None): pass def unmanage_network_allocations(self, context, share_server_id): pass def include_network_info(self, share_network_subnet): pass result = FakeNetworkAPI() self.assertTrue(hasattr(result, '_verify_share_network')) self.assertTrue(hasattr(result, 'allocate_network')) self.assertTrue(hasattr(result, 'deallocate_network')) def test__verify_share_network_ok(self): class FakeNetworkAPI(network.NetworkBaseAPI): def allocate_network(self, *args, **kwargs): pass def deallocate_network(self, *args, **kwargs): pass def manage_network_allocations( self, context, allocations, share_server, share_network=None): pass def unmanage_network_allocations(self, context, share_server_id): pass def include_network_info(self, share_network_subnet): pass result = FakeNetworkAPI() result._verify_share_network('foo_id', {'id': 'bar_id'}) def test__verify_share_network_fail(self): class FakeNetworkAPI(network.NetworkBaseAPI): def allocate_network(self, *args, **kwargs): pass def deallocate_network(self, *args, **kwargs): pass def manage_network_allocations( self, context, allocations, share_server, share_network=None): pass def unmanage_network_allocations(self, context, share_server_id): pass def include_network_info(self, share_network_subnet): pass result = FakeNetworkAPI() self.assertRaises( exception.NetworkBadConfigurationException, result._verify_share_network, 'foo_id', None) @ddt.data((True, False, set([6])), (False, True, set([4])), (True, True, set([4, 6])), (False, False, set())) @ddt.unpack def test_enabled_ip_versions(self, network_plugin_ipv6_enabled, network_plugin_ipv4_enabled, enable_ip_versions): class FakeNetworkAPI(network.NetworkBaseAPI): def allocate_network(self, *args, **kwargs): pass def deallocate_network(self, *args, **kwargs): pass def manage_network_allocations( self, context, allocations, share_server, share_network=None): pass def unmanage_network_allocations(self, context, share_server_id): pass def include_network_info(self, share_network_subnet): pass network.CONF.set_default( 'network_plugin_ipv6_enabled', network_plugin_ipv6_enabled) network.CONF.set_default( 'network_plugin_ipv4_enabled', network_plugin_ipv4_enabled) result = FakeNetworkAPI() if enable_ip_versions: self.assertTrue(hasattr(result, 'enabled_ip_versions')) self.assertEqual(enable_ip_versions, result.enabled_ip_versions) else: self.assertRaises(exception.NetworkBadConfigurationException, getattr, result, 'enabled_ip_versions') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/test_policy.py0000664000175000017500000002170000000000000020371 0ustar00zuulzuul00000000000000# Copyright 2011 Piston Cloud Computing, Inc. # All Rights Reserved. # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Test of Policy Engine For Manila.""" import ddt from oslo_config import cfg from oslo_policy import policy as common_policy from manila import context from manila import exception from manila import policy from manila import test CONF = cfg.CONF @ddt.ddt class PolicyTestCase(test.TestCase): def setUp(self): super(PolicyTestCase, self).setUp() rules = [ common_policy.RuleDefault("true", '@'), common_policy.RuleDefault("test:allowed", '@'), common_policy.RuleDefault("test:denied", "!"), common_policy.RuleDefault("test:my_file", "role:compute_admin or " "project_id:%(project_id)s"), common_policy.RuleDefault("test:early_and_fail", "! and @"), common_policy.RuleDefault("test:early_or_success", "@ or !"), common_policy.RuleDefault("test:lowercase_admin", "role:admin"), common_policy.RuleDefault("test:uppercase_admin", "role:ADMIN"), ] policy.reset() policy.init(suppress_deprecation_warnings=True) # before a policy rule can be used, its default has to be registered. policy._ENFORCER.register_defaults(rules) self.context = context.RequestContext('fake', 'fake', roles=['member']) self.target = {} self.addCleanup(policy.reset) def test_authorize_nonexistent_action_throws(self): action = "test:noexist" self.assertRaises(common_policy.PolicyNotRegistered, policy.authorize, self.context, action, self.target) def test_authorize_bad_action_throws(self): action = "test:denied" self.assertRaises(exception.PolicyNotAuthorized, policy.authorize, self.context, action, self.target) def test_authorize_bad_action_noraise(self): action = "test:denied" result = policy.authorize(self.context, action, self.target, False) self.assertFalse(result) def test_authorize_good_action(self): action = "test:allowed" result = policy.authorize(self.context, action, self.target) self.assertTrue(result) def test_templatized_authorization(self): target_mine = {'project_id': 'fake'} target_not_mine = {'project_id': 'another'} action = "test:my_file" policy.authorize(self.context, action, target_mine) self.assertRaises(exception.PolicyNotAuthorized, policy.authorize, self.context, action, target_not_mine) def test_early_AND_authorization(self): action = "test:early_and_fail" self.assertRaises(exception.PolicyNotAuthorized, policy.authorize, self.context, action, self.target) def test_early_OR_authorization(self): action = "test:early_or_success" policy.authorize(self.context, action, self.target) def test_ignore_case_role_check(self): lowercase_action = "test:lowercase_admin" uppercase_action = "test:uppercase_admin" admin_context = context.RequestContext('admin', 'fake', roles=['AdMiN']) policy.authorize(admin_context, lowercase_action, self.target) policy.authorize(admin_context, uppercase_action, self.target) @ddt.data('enforce', 'authorize') def test_authorize_properly_handles_invalid_scope_exception(self, method): self.fixture.config(enforce_scope=True, group='oslo_policy') project_context = context.RequestContext(project_id='fake-project-id', roles=['bar']) policy.reset() policy.init(suppress_deprecation_warnings=True) rule = common_policy.RuleDefault('foo', 'role:bar', scope_types=['system']) policy._ENFORCER.register_defaults([rule]) self.assertRaises(exception.PolicyNotAuthorized, getattr(policy, method), project_context, 'foo', {}) @ddt.data('enforce', 'authorize') def test_authorize_does_not_raise_forbidden(self, method): self.fixture.config(enforce_scope=False, group='oslo_policy') project_context = context.RequestContext(project_id='fake-project-id', roles=['bar']) policy.reset() policy.init(suppress_deprecation_warnings=True) rule = common_policy.RuleDefault('foo', 'role:bar', scope_types=['system']) policy._ENFORCER.register_defaults([rule]) self.assertTrue(getattr(policy, method)(project_context, 'foo', {})) class DefaultPolicyTestCase(test.TestCase): """This test case calls into the "enforce" method in policy enforce() in contrast with authorize() allows "default" rules to apply to policies that have not been registered. """ def setUp(self): super(DefaultPolicyTestCase, self).setUp() policy.reset() policy.init(suppress_deprecation_warnings=True) self.rules = { "default": [], "example:exist": "false:false" } self._set_rules('default') self.context = context.RequestContext('fake', 'fake') def tearDown(self): super(DefaultPolicyTestCase, self).tearDown() policy.reset() def _set_rules(self, default_rule): these_rules = common_policy.Rules.from_dict(self.rules, default_rule=default_rule) policy._ENFORCER.set_rules(these_rules) def test_policy_called(self): self.assertRaises(exception.PolicyNotAuthorized, policy.enforce, self.context, "example:exist", {}) def test_not_found_policy_calls_default(self): policy.enforce(self.context, "example:noexist", {}) def test_default_not_found(self): new_default_rule = "default_noexist" # FIXME(gyee): need to overwrite the Enforcer's default_rule first # as it is recreating the rules with its own default_rule instead # of the default_rule passed in from set_rules(). I think this is a # bug in Oslo policy. policy._ENFORCER.default_rule = new_default_rule self._set_rules(new_default_rule) self.assertRaises(exception.PolicyNotAuthorized, policy.enforce, self.context, "example:noexist", {}) class ContextIsAdminPolicyTestCase(test.TestCase): def setUp(self): super(ContextIsAdminPolicyTestCase, self).setUp() policy.reset() policy.init(suppress_deprecation_warnings=True) def _set_rules(self, rules, default_rule): these_rules = common_policy.Rules.from_dict(rules, default_rule=default_rule) policy._ENFORCER.set_rules(these_rules) def test_default_admin_role_is_admin(self): ctx = context.RequestContext('fake', 'fake', roles=['johnny-admin']) self.assertFalse(ctx.is_admin) ctx = context.RequestContext('fake', 'fake', roles=['admin']) self.assertTrue(ctx.is_admin) def test_custom_admin_role_is_admin(self): # define explicit rules for context_is_admin rules = { 'context_is_admin': [["role:administrator"], ["role:johnny-admin"]] } self._set_rules(rules, CONF.oslo_policy.policy_default_rule) ctx = context.RequestContext('fake', 'fake', roles=['johnny-admin']) self.assertTrue(ctx.is_admin) ctx = context.RequestContext('fake', 'fake', roles=['administrator']) self.assertTrue(ctx.is_admin) # default rule no longer applies ctx = context.RequestContext('fake', 'fake', roles=['admin']) self.assertFalse(ctx.is_admin) def test_context_is_admin_undefined(self): rules = { "admin_or_owner": "role:admin or project_id:%(project_id)s", "default": "rule:admin_or_owner", } self._set_rules(rules, CONF.oslo_policy.policy_default_rule) ctx = context.RequestContext('fake', 'fake') self.assertTrue(ctx.is_admin) ctx = context.RequestContext('fake', 'fake', roles=['admin']) self.assertTrue(ctx.is_admin) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/test_quota.py0000664000175000017500000007032500000000000020232 0ustar00zuulzuul00000000000000# Copyright 2017 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import ddt from oslo_config import cfg from manila import exception from manila import quota from manila import test CONF = cfg.CONF @ddt.ddt class DbQuotaDriverTestCase(test.TestCase): def setUp(self): super(DbQuotaDriverTestCase, self).setUp() self.project_id = 'fake_project_id' self.user_id = 'fake_user_id' self.share_type_id = 'fake_share_type_id' self.ctxt = type( 'FakeContext', (object, ), {'project_id': self.project_id, 'user_id': self.user_id, 'quota_class': 'fake_quota_class', 'elevated': mock.Mock()}) self.driver = quota.DbQuotaDriver() self.reservations = ['foo', 'bar'] self.resources = {k: quota.BaseResource(k) for k in ('foo', 'bar')} def test_get_by_class(self): self.mock_object(quota.db, 'quota_class_get') result = self.driver.get_by_class( self.ctxt, 'fake_quota_class', 'fake_res') self.assertEqual(quota.db.quota_class_get.return_value, result) quota.db.quota_class_get.assert_called_once_with( self.ctxt, 'fake_quota_class', 'fake_res') def test_get_defaults(self): self.mock_object( quota.db, 'quota_class_get_default', mock.Mock(return_value={'foo': 13})) result = self.driver.get_defaults(self.ctxt, self.resources) self.assertEqual( {'foo': 13, 'bar': self.resources['bar'].default}, result) quota.db.quota_class_get_default.assert_called_once_with(self.ctxt) @ddt.data(True, False) def test_get_class_quotas(self, defaults): self.mock_object( quota.db, 'quota_class_get_all_by_name', mock.Mock(return_value={'foo': 13})) result = self.driver.get_class_quotas( self.ctxt, self.resources, 'fake_quota_class', defaults) expected = {'foo': 13, 'bar': -1} if defaults else {'foo': 13} self.assertEqual(expected, result) quota.db.quota_class_get_all_by_name.assert_called_once_with( self.ctxt, 'fake_quota_class') @ddt.data( ('fake_project_id', {'foo': 20}, None, True, None, True), ('fake_different_project_id', {'bar': 40}, 'fake_quota_class', True, {'foo': {'in_use': 3, 'reserved': 2}}, False), ('fake_project_id', {'bar': 30}, 'fake_quota_class', True, None, False) ) @ddt.unpack def test__process_quotas(self, project_id, quotas, quota_class, defaults, usages, remains): self.mock_object(quota.db, 'quota_get_all', mock.Mock(return_value=[])) self.mock_object(quota.db, 'quota_class_get_all_by_name') self.mock_object( self.driver, 'get_defaults', mock.Mock(return_value={'foo': 11, 'bar': 12})) self.mock_object( quota.db, 'quota_get_all', mock.Mock(return_value=[])) result = self.driver._process_quotas( self.ctxt, self.resources, project_id, quotas, quota_class, defaults, usages, remains) expected = {key: {'limit': mock.ANY} for key in ('foo', 'bar')} if usages: for res in self.resources.values(): usage = usages.get(res.name, {}) expected[res.name].update( in_use=usage.get('in_use', 0), reserved=usage.get('reserved', 0)) if remains: quota.db.quota_get_all.assert_called_once_with( self.ctxt, project_id) for res in self.resources.values(): expected[res.name]['remains'] = mock.ANY else: self.assertEqual(0, quota.db.quota_get_all.call_count) self.assertEqual(expected, result) if quota_class or project_id == self.ctxt.project_id: quota.db.quota_class_get_all_by_name.assert_called_once_with( self.ctxt, quota_class or self.ctxt.quota_class) else: self.assertEqual( 0, quota.db.quota_class_get_all_by_name.call_count) @ddt.data( ('fake_quota_class', True, None, 'fake_remains'), (None, False, 'fake_usages', False), ) @ddt.unpack def test_get_project_quotas(self, quota_class, defaults, usages, remains): self.mock_object(quota.db, 'quota_get_all_by_project') self.mock_object(quota.db, 'quota_usage_get_all_by_project') self.mock_object(self.driver, '_process_quotas') result = self.driver.get_project_quotas( self.ctxt, self.resources, self.project_id, quota_class, defaults, usages, remains) self.assertEqual( result, self.driver._process_quotas.return_value) project_usages = None if usages: project_usages = ( quota.db.quota_usage_get_all_by_project.return_value) self.driver._process_quotas.assert_called_once_with( self.ctxt, self.resources, self.project_id, quota.db.quota_get_all_by_project.return_value, quota_class, defaults=defaults, usages=project_usages, remains=remains) quota.db.quota_get_all_by_project.assert_called_once_with( self.ctxt, self.project_id) if usages: quota.db.quota_usage_get_all_by_project.assert_called_once_with( self.ctxt, self.project_id) else: self.assertEqual( 0, quota.db.quota_usage_get_all_by_project.call_count) @ddt.data( (None, True, True), ('fake_quota_class', False, True), ('fake_quota_class', True, False), ) @ddt.unpack def test_get_user_quotas(self, quota_class, defaults, usages): project_quotas = {'fake_resource': 5} self.mock_object( quota.db, 'quota_get_all_by_project', mock.Mock(return_value=project_quotas)) self.mock_object( quota.db, 'quota_get_all_by_project_and_user', mock.Mock(return_value={'fake_user_defined_resource': 14})) mock_user_usages = self.mock_object( quota.db, 'quota_usage_get_all_by_project_and_user') self.mock_object(self.driver, '_process_quotas') result = self.driver.get_user_quotas( self.ctxt, self.resources, self.project_id, self.user_id, quota_class, defaults, usages) self.assertEqual( self.driver._process_quotas.return_value, result) quota.db.quota_get_all_by_project.assert_called_once_with( self.ctxt, self.project_id) quota.db.quota_get_all_by_project_and_user.assert_called_once_with( self.ctxt, self.project_id, self.user_id) if usages: user_usages = mock_user_usages.return_value mock_user_usages.assert_called_once_with( self.ctxt, self.project_id, self.user_id) else: user_usages = None self.assertEqual(0, mock_user_usages.call_count) expected_user_quotas = {'fake_user_defined_resource': 14} expected_user_quotas.update(project_quotas) self.driver._process_quotas.assert_called_once_with( self.ctxt, self.resources, self.project_id, expected_user_quotas, quota_class, defaults=defaults, usages=user_usages) @ddt.data( (None, True, True), ('fake_quota_class', False, True), ('fake_quota_class', True, False), ) @ddt.unpack def test_get_share_type_quotas(self, quota_class, defaults, usages): project_quotas = {'fake_resource': 5} self.mock_object( quota.db, 'quota_get_all_by_project', mock.Mock(return_value=project_quotas)) mock_st_quotas = self.mock_object( quota.db, 'quota_get_all_by_project_and_share_type', mock.Mock(return_value={'fake_st_defined_resource': 14})) mock_st_usages = self.mock_object( quota.db, 'quota_usage_get_all_by_project_and_share_type') self.mock_object(self.driver, '_process_quotas') result = self.driver.get_share_type_quotas( self.ctxt, self.resources, self.project_id, self.share_type_id, quota_class, defaults, usages) self.assertEqual( self.driver._process_quotas.return_value, result) quota.db.quota_get_all_by_project.assert_called_once_with( self.ctxt, self.project_id) mock_st_quotas.assert_called_once_with( self.ctxt, self.project_id, self.share_type_id) if usages: st_usages = mock_st_usages.return_value mock_st_usages.assert_called_once_with( self.ctxt, self.project_id, self.share_type_id) else: st_usages = None self.assertEqual(0, mock_st_usages.call_count) expected_st_quotas = {'fake_st_defined_resource': 14} expected_st_quotas.update(project_quotas) self.driver._process_quotas.assert_called_once_with( self.ctxt, self.resources, self.project_id, expected_st_quotas, quota_class, defaults=defaults, usages=st_usages) @ddt.data((None, None), (None, 'foo_st_id'), ('foo_user_id', None)) @ddt.unpack def test_get_settable_quotas(self, user_id, st_id): project_quotas = {'fake': { 'limit': 13, 'in_use': 7, 'reserved': 5, 'remains': 1, }} user_or_st_quotas = {'fake': { 'limit': 11, 'in_use': 5, 'reserved': 2, }} self.mock_object( self.driver, 'get_project_quotas', mock.Mock(return_value=project_quotas)) self.mock_object( self.driver, 'get_user_quotas', mock.Mock(return_value=user_or_st_quotas)) self.mock_object( self.driver, 'get_share_type_quotas', mock.Mock(return_value=user_or_st_quotas)) result = self.driver.get_settable_quotas( self.ctxt, self.resources, self.project_id, user_id, st_id) if user_id: self.driver.get_user_quotas.assert_called_once_with( self.ctxt, self.resources, self.project_id, user_id) else: self.assertEqual(0, self.driver.get_user_quotas.call_count) if st_id: self.driver.get_share_type_quotas.assert_called_once_with( self.ctxt, self.resources, self.project_id, st_id) else: self.assertEqual(0, self.driver.get_share_type_quotas.call_count) if user_id or st_id: expected_settable_quotas = {'fake': {'maximum': 13, 'minimum': 7}} else: expected_settable_quotas = {'fake': {'maximum': -1, 'minimum': 12}} self.driver.get_project_quotas.assert_called_once_with( self.ctxt, self.resources, self.project_id, remains=True) self.assertEqual(expected_settable_quotas, result) @ddt.data((None, None), (None, 'fake_st_id'), ('fake_user_id', None)) @ddt.unpack def test__get_quotas(self, user_id, st_id): quotas = {'foo': {'limit': 5}, 'bar': {'limit': 13}} self.mock_object( self.driver, 'get_project_quotas', mock.Mock(return_value=quotas)) self.mock_object( self.driver, 'get_user_quotas', mock.Mock(return_value=quotas)) self.mock_object( self.driver, 'get_share_type_quotas', mock.Mock(return_value=quotas)) result = self.driver._get_quotas( self.ctxt, self.resources, ('foo', 'bar'), False, self.project_id, user_id, st_id) expected = {k: v['limit'] for k, v in quotas.items()} self.assertEqual(expected, result) sub_resources = {k: v for k, v in self.resources.items()} if user_id: self.driver.get_user_quotas.assert_called_once_with( self.ctxt, sub_resources, self.project_id, user_id, self.ctxt.quota_class, usages=False) self.assertEqual(0, self.driver.get_project_quotas.call_count) self.assertEqual(0, self.driver.get_share_type_quotas.call_count) elif st_id: self.driver.get_share_type_quotas.assert_called_once_with( self.ctxt, sub_resources, self.project_id, st_id, self.ctxt.quota_class, usages=False) self.assertEqual(0, self.driver.get_project_quotas.call_count) self.assertEqual(0, self.driver.get_user_quotas.call_count) else: self.driver.get_project_quotas.assert_called_once_with( self.ctxt, sub_resources, self.project_id, self.ctxt.quota_class, usages=False) self.assertEqual(0, self.driver.get_user_quotas.call_count) self.assertEqual(0, self.driver.get_share_type_quotas.call_count) def test__get_quotas_unknown(self): quotas = {'foo': {'limit': 5}, 'bar': {'limit': 13}} self.mock_object( self.driver, 'get_project_quotas', mock.Mock(return_value=quotas)) self.mock_object( self.driver, 'get_user_quotas', mock.Mock(return_value=quotas)) self.mock_object( self.driver, 'get_share_type_quotas', mock.Mock(return_value=quotas)) self.assertRaises( exception.QuotaResourceUnknown, self.driver._get_quotas, self.ctxt, self.resources, ['foo', 'bar'], True, self.project_id, self.user_id, self.share_type_id) self.assertEqual(0, self.driver.get_project_quotas.call_count) self.assertEqual(0, self.driver.get_user_quotas.call_count) self.assertEqual(0, self.driver.get_share_type_quotas.call_count) @ddt.data( {}, {'project_id': 'fake_project'}, {'user_id': 'fake_user'}, {'share_type_id': 'fake_share_type_id'}, ) def test_reserve(self, kwargs): self.mock_object(quota.db, 'quota_reserve') deltas = {'delta1': 1, 'delta2': 2} quotas, user_quotas, st_quotas = 'fake1', 'fake2', 'fake3' self.mock_object( self.driver, '_get_quotas', mock.Mock( side_effect=[quotas, user_quotas, st_quotas])) result = self.driver.reserve( self.ctxt, self.resources, deltas, None, **kwargs) expected_kwargs = { 'project_id': self.ctxt.project_id, 'user_id': self.ctxt.user_id, 'share_type_id': None, 'overquota_allowed': False } expected_kwargs.update(kwargs) st_quotas = st_quotas if kwargs.get('share_type_id') else {} self.assertEqual(quota.db.quota_reserve.return_value, result) quota.db.quota_reserve.assert_called_once_with( self.ctxt, self.resources, quotas, user_quotas, st_quotas, deltas, mock.ANY, CONF.quota.until_refresh, CONF.quota.max_age, **expected_kwargs) self.assertEqual( 3 if kwargs.get('share_type_id') else 2, self.driver._get_quotas.call_count) def test_reserve_wrong_expire(self): self.assertRaises( exception.InvalidReservationExpiration, self.driver.reserve, self.ctxt, self.resources, 'fake_deltas', 'fake_expire') def test_commit(self): self.mock_object(quota.db, 'reservation_commit') result = self.driver.commit( self.ctxt, self.reservations, self.project_id, self.user_id, self.share_type_id) self.assertIsNone(result) quota.db.reservation_commit.assert_called_once_with( self.ctxt, self.reservations, project_id=self.project_id, user_id=self.user_id, share_type_id=self.share_type_id) @ddt.data( (None, None), ('fake_project_id', 'fake_user_id'), ) @ddt.unpack def test_rollback(self, project_id, user_id): self.mock_object(quota.db, 'reservation_rollback') result = self.driver.rollback( self.ctxt, self.reservations, project_id, user_id, self.share_type_id) expected_project_id = project_id or self.ctxt.project_id expected_user_id = user_id or self.ctxt.user_id self.assertIsNone(result) quota.db.reservation_rollback.assert_called_once_with( self.ctxt, self.reservations, project_id=expected_project_id, user_id=expected_user_id, share_type_id=self.share_type_id) def test_usage_reset(self): self.mock_object( quota.db, 'quota_usage_update', mock.Mock(side_effect=[ 'foo', exception.QuotaUsageNotFound(project_id=self.project_id)])) result = self.driver.usage_reset(self.ctxt, ['foo', 'bar']) self.assertIsNone(result) quota.db.quota_usage_update.assert_has_calls([ mock.call( self.ctxt.elevated.return_value, self.ctxt.project_id, self.ctxt.user_id, res, in_use=-1) for res in ('foo', 'bar') ]) def test_destroy_all_by_project(self): self.mock_object(quota.db, 'quota_destroy_all_by_project') result = self.driver.destroy_all_by_project(self.ctxt, self.project_id) self.assertIsNone(result) quota.db.quota_destroy_all_by_project.assert_called_once_with( self.ctxt, self.project_id) def test_destroy_all_by_project_and_user(self): self.mock_object(quota.db, 'quota_destroy_all_by_project_and_user') result = self.driver.destroy_all_by_project_and_user( self.ctxt, self.project_id, self.user_id) self.assertIsNone(result) quota.db.quota_destroy_all_by_project_and_user.assert_called_once_with( self.ctxt, self.project_id, self.user_id) def test_destroy_all_by_project_and_share_type(self): mock_destroy_all = self.mock_object( quota.db, 'quota_destroy_all_by_share_type') result = self.driver.destroy_all_by_project_and_share_type( self.ctxt, self.project_id, self.share_type_id) self.assertIsNone(result) mock_destroy_all.assert_called_once_with( self.ctxt, self.share_type_id, project_id=self.project_id) def test_expire(self): self.mock_object(quota.db, 'reservation_expire') result = self.driver.expire(self.ctxt) self.assertIsNone(result) quota.db.reservation_expire.assert_called_once_with(self.ctxt) @ddt.ddt class QuotaEngineTestCase(test.TestCase): def setUp(self): super(QuotaEngineTestCase, self).setUp() self.ctxt = 'fake_context' self.mock_class('manila.quota.DbQuotaDriver') self.engine = quota.QuotaEngine() self.driver = self.engine._driver self.resources = [quota.BaseResource('foo'), quota.BaseResource('bar')] self.project_id = 'fake_project_id' self.user_id = 'fake_user_id' self.share_type_id = 'fake_share_type_id' self.quota_class = 'fake_quota_class' def test_register_resource(self): self.assertNotIn(self.resources[0].name, self.engine) self.engine.register_resource(self.resources[0]) self.assertIn(self.resources[0].name, self.engine) def test_register_resources(self): for res in self.resources: self.assertNotIn(res.name, self.engine) self.engine.register_resources(self.resources) for res in self.resources: self.assertIn(res.name, self.engine) def test_get_by_class(self): result = self.engine.get_by_class( self.ctxt, self.quota_class, 'fake_res') self.assertEqual(result, self.driver.get_by_class.return_value) self.driver.get_by_class.assert_called_once_with( self.ctxt, self.quota_class, 'fake_res') def test_get_defaults(self): result = self.engine.get_defaults(self.ctxt) self.assertEqual(result, self.driver.get_defaults.return_value) self.driver.get_defaults.assert_called_once_with( self.ctxt, self.engine._resources) @ddt.data(None, True, False) def test_get_class_quotas(self, defaults): kwargs = {} if defaults is not None: kwargs['defaults'] = defaults result = self.engine.get_class_quotas( self.ctxt, self.quota_class, **kwargs) self.assertEqual(result, self.driver.get_class_quotas.return_value) kwargs['defaults'] = defaults if defaults is not None else True self.driver.get_class_quotas.assert_called_once_with( self.ctxt, self.engine._resources, self.quota_class, **kwargs) @ddt.data( {}, {'quota_class': 'foo'}, {'defaults': False}, {'usages': False}, ) def test_get_user_quotas(self, kwargs): expected_kwargs = { 'quota_class': None, 'defaults': True, 'usages': True, } expected_kwargs.update(kwargs) result = self.engine.get_user_quotas( self.ctxt, self.project_id, self.user_id, **kwargs) self.assertEqual(result, self.driver.get_user_quotas.return_value) self.driver.get_user_quotas.assert_called_once_with( self.ctxt, self.engine._resources, self.project_id, self.user_id, **expected_kwargs) @ddt.data( {}, {'quota_class': 'foo'}, {'defaults': False}, {'usages': False}, ) def test_get_share_type_quotas(self, kwargs): expected_kwargs = { 'quota_class': None, 'defaults': True, 'usages': True, } expected_kwargs.update(kwargs) result = self.engine.get_share_type_quotas( self.ctxt, self.project_id, self.share_type_id, **kwargs) self.assertEqual( result, self.driver.get_share_type_quotas.return_value) self.driver.get_share_type_quotas.assert_called_once_with( self.ctxt, self.engine._resources, self.project_id, self.share_type_id, **expected_kwargs) @ddt.data( {}, {'quota_class': 'foo'}, {'defaults': False}, {'usages': False}, {'remains': True}, ) def test_get_project_quotas(self, kwargs): expected_kwargs = { 'quota_class': None, 'defaults': True, 'usages': True, 'remains': False, } expected_kwargs.update(kwargs) result = self.engine.get_project_quotas( self.ctxt, self.project_id, **kwargs) self.assertEqual(result, self.driver.get_project_quotas.return_value) self.driver.get_project_quotas.assert_called_once_with( self.ctxt, self.engine._resources, self.project_id, **expected_kwargs) @ddt.data( {}, {'user_id': 'fake_user_id'}, {'share_type_id': 'fake_share_type_id'}, ) def test_get_settable_quotas(self, kwargs): expected_kwargs = {'user_id': None, 'share_type_id': None} expected_kwargs.update(kwargs) result = self.engine.get_settable_quotas( self.ctxt, self.project_id, **kwargs) self.assertEqual(result, self.driver.get_settable_quotas.return_value) self.driver.get_settable_quotas.assert_called_once_with( self.ctxt, self.engine._resources, self.project_id, **expected_kwargs) def test_count(self): mock_count = mock.Mock() resource = quota.CountableResource('FakeCountableResource', mock_count) self.engine.register_resource(resource) result = self.engine.count(self.ctxt, resource.name) self.assertEqual(mock_count.return_value, result) def test_count_unknown_resource(self): self.assertRaises( exception.QuotaResourceUnknown, self.engine.count, self.ctxt, 'nonexistent_resource', 'foo_arg', foo='kwarg') def test_reserve(self): result = self.engine.reserve( self.ctxt, 'fake_expire', self.project_id, self.user_id, self.share_type_id, delta1=1, delta2=2) self.assertEqual(self.driver.reserve.return_value, result) self.driver.reserve.assert_called_once_with( self.ctxt, self.engine._resources, {'delta1': 1, 'delta2': 2}, expire='fake_expire', project_id=self.project_id, user_id=self.user_id, share_type_id=self.share_type_id, overquota_allowed=False) @ddt.data(Exception('FakeException'), [None]) def test_commit(self, side_effect): fake_reservations = ['foo', 'bar'] self.driver.commit.side_effect = side_effect self.mock_object(quota.LOG, 'exception') result = self.engine.commit( self.ctxt, fake_reservations, 'fake_project_id', 'fake_user_id', 'fake_share_type_id') self.assertIsNone(result) self.driver.commit.assert_called_once_with( self.ctxt, fake_reservations, project_id='fake_project_id', user_id='fake_user_id', share_type_id='fake_share_type_id') if side_effect == [None]: self.assertEqual(0, quota.LOG.exception.call_count) else: quota.LOG.exception.assert_called_once_with( mock.ANY, fake_reservations) @ddt.data(Exception('FakeException'), [None]) def test_rollback(self, side_effect): fake_reservations = ['foo', 'bar'] self.driver.rollback.side_effect = side_effect self.mock_object(quota.LOG, 'exception') result = self.engine.rollback( self.ctxt, fake_reservations, 'fake_project_id', 'fake_user_id', 'fake_share_type_id') self.assertIsNone(result) self.driver.rollback.assert_called_once_with( self.ctxt, fake_reservations, project_id='fake_project_id', user_id='fake_user_id', share_type_id='fake_share_type_id') if side_effect == [None]: self.assertEqual(0, quota.LOG.exception.call_count) else: quota.LOG.exception.assert_called_once_with( mock.ANY, fake_reservations) def test_usage_reset(self): result = self.engine.usage_reset(self.ctxt, 'fake_resources') self.assertIsNone(result) self.driver.usage_reset.assert_called_once_with( self.ctxt, 'fake_resources') def test_destroy_all_by_project_and_user(self): result = self.engine.destroy_all_by_project_and_user( self.ctxt, 'fake_project_id', 'fake_user_id') self.assertIsNone(result) self.driver.destroy_all_by_project_and_user.assert_called_once_with( self.ctxt, 'fake_project_id', 'fake_user_id') def test_destroy_all_by_project_and_share_type(self): result = self.engine.destroy_all_by_project_and_share_type( self.ctxt, 'fake_project_id', 'fake_st_id') self.assertIsNone(result) mock_destroy_all_by_project_and_share_type = ( self.driver.destroy_all_by_project_and_share_type) mock_destroy_all_by_project_and_share_type.assert_called_once_with( self.ctxt, 'fake_project_id', 'fake_st_id') def test_destroy_all_by_project(self): result = self.engine.destroy_all_by_project( self.ctxt, 'fake_project_id') self.assertIsNone(result) self.driver.destroy_all_by_project.assert_called_once_with( self.ctxt, 'fake_project_id') def test_expire(self): result = self.engine.expire(self.ctxt) self.assertIsNone(result) self.driver.expire.assert_called_once_with(self.ctxt) def test_resources(self): self.engine.register_resources(self.resources) self.assertEqual(['bar', 'foo'], self.engine.resources) def test_current_common_resources(self): self.assertEqual( sorted(['gigabytes', 'per_share_gigabytes', 'replica_gigabytes', 'share_group_snapshots', 'share_groups', 'share_networks', 'share_replicas', 'shares', 'snapshot_gigabytes', 'snapshots', 'backups', 'backup_gigabytes', 'encryption_keys']), quota.QUOTAS.resources) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/test_rpc.py0000664000175000017500000000272600000000000017665 0ustar00zuulzuul00000000000000# Copyright 2017 Red Hat, Inc. # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import ddt from manila import rpc from manila import test @ddt.ddt class RPCTestCase(test.TestCase): @ddt.data([], ['noop'], ['noop', 'noop']) @mock.patch('oslo_messaging.JsonPayloadSerializer', wraps=True) def test_init_no_notifications(self, driver, serializer_mock): self.override_config('driver', driver, group='oslo_messaging_notifications') rpc.init(test.CONF) self.assertEqual(rpc.utils.DO_NOTHING, rpc.NOTIFIER) serializer_mock.assert_not_called() @mock.patch.object(rpc, 'messaging') def test_init_notifications(self, messaging_mock): rpc.init(test.CONF) self.assertTrue(messaging_mock.JsonPayloadSerializer.called) self.assertTrue(messaging_mock.Notifier.called) self.assertEqual(rpc.NOTIFIER, messaging_mock.Notifier.return_value) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/test_service.py0000664000175000017500000003152300000000000020536 0ustar00zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright 2014 NetApp, Inc. # Copyright 2014 Mirantis, Inc. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Unit Tests for remote procedure calls using queue """ from datetime import timedelta from unittest import mock import ddt from oslo_config import cfg from oslo_service import wsgi from oslo_utils import timeutils from manila import context from manila import db from manila import exception from manila import manager from manila import service from manila import test from manila import utils test_service_opts = [ cfg.StrOpt("fake_manager", default="manila.tests.test_service.FakeManager", help="Manager for testing"), cfg.StrOpt("test_service_listen", help="Host to bind test service to"), cfg.IntOpt("test_service_listen_port", default=0, help="Port number to bind test service to"), ] CONF = cfg.CONF CONF.register_opts(test_service_opts) class FakeManager(manager.Manager): """Fake manager for tests.""" RPC_API_VERSION = "1.0" def __init__(self, host=None, db_driver=None, service_name=None): super(FakeManager, self).__init__(host=host, db_driver=db_driver) def test_method(self): return 'manager' class ExtendedService(service.Service): def test_method(self): return 'service' class ServiceManagerTestCase(test.TestCase): """Test cases for Services.""" def test_message_gets_to_manager(self): serv = service.Service('test', 'test', 'test', CONF.fake_manager) serv.start() self.assertEqual('manager', serv.test_method()) def test_override_manager_method(self): serv = ExtendedService('test', 'test', 'test', CONF.fake_manager) serv.start() self.assertEqual('service', serv.test_method()) class ServiceFlagsTestCase(test.TestCase): def test_service_enabled_on_create_based_on_flag(self): self.flags(enable_new_services=True) host = 'foo' binary = 'manila-fake' app = service.Service.create(host=host, binary=binary) app.start() app.stop() ref = db.service_get(context.get_admin_context(), app.service_id) db.service_destroy(context.get_admin_context(), app.service_id) self.assertFalse(ref['disabled']) def test_service_disabled_on_create_based_on_flag(self): self.flags(enable_new_services=False) host = 'foo' binary = 'manila-fake' app = service.Service.create(host=host, binary=binary) app.start() app.stop() ref = db.service_get(context.get_admin_context(), app.service_id) db.service_destroy(context.get_admin_context(), app.service_id) self.assertTrue(ref['disabled']) def fake_service_get_by_args(*args, **kwargs): raise exception.NotFound() def fake_service_get(*args, **kwargs): raise Exception() host = 'foo' binary = 'bar' topic = 'test' service_create = { 'host': host, 'binary': binary, 'topic': topic, 'state': 'up', 'report_count': 0, 'availability_zone': 'nova', } service_create_other_az = { 'host': host, 'binary': binary, 'topic': topic, 'state': 'up', 'report_count': 0, 'availability_zone': 'other-zone', } service_ref = { 'host': host, 'binary': binary, 'topic': topic, 'state': 'up', 'report_count': 0, 'availability_zone': {'name': 'nova'}, 'id': 1, } service_ref_stopped = { 'host': host, 'binary': binary, 'topic': topic, 'state': 'stopped', 'report_count': 0, 'availability_zone': {'name': 'nova'}, 'id': 1, } @ddt.ddt class ServiceTestCase(test.TestCase): """Test cases for Services.""" def test_create(self): app = service.Service.create(host='foo', binary='manila-fake', topic='fake') self.assertTrue(app) @ddt.data(True, False) def test_periodic_tasks(self, raise_on_error): serv = service.Service(host, binary, topic, CONF.fake_manager) self.mock_object( context, 'get_admin_context', mock.Mock(side_effect=context.get_admin_context)) self.mock_object(serv.manager, 'periodic_tasks') serv.periodic_tasks(raise_on_error=raise_on_error) context.get_admin_context.assert_called_once_with() serv.manager.periodic_tasks.assert_called_once_with( utils.IsAMatcher(context.RequestContext), raise_on_error=raise_on_error) @mock.patch.object(service.db, 'service_get_by_args', mock.Mock(side_effect=fake_service_get_by_args)) @mock.patch.object(service.db, 'service_create', mock.Mock(return_value=service_ref)) @mock.patch.object(service.db, 'service_get', mock.Mock(side_effect=fake_service_get)) def test_report_state_newly_disconnected(self): serv = service.Service(host, binary, topic, CONF.fake_manager) serv.start() serv.report_state() self.assertTrue(serv.model_disconnected) service.db.service_get_by_args.assert_called_once_with( mock.ANY, host, binary) service.db.service_create.assert_called_once_with( mock.ANY, service_create) service.db.service_get.assert_called_once_with(mock.ANY, mock.ANY) @mock.patch.object(service.db, 'service_get_by_args', mock.Mock(side_effect=fake_service_get_by_args)) @mock.patch.object(service.db, 'service_create', mock.Mock(return_value=service_ref)) @mock.patch.object(service.db, 'service_get', mock.Mock(return_value=service_ref)) @mock.patch.object(service.db, 'service_update', mock.Mock(return_value=service_ref. update({'report_count': 1}))) @mock.patch.object(utils, 'service_is_up', mock.Mock(return_value=True)) def test_report_state_newly_connected(self): serv = service.Service(host, binary, topic, CONF.fake_manager) serv.start() serv.model_disconnected = True serv.report_state() self.assertFalse(serv.model_disconnected) service.db.service_get_by_args.assert_called_once_with( mock.ANY, host, binary) service.db.service_create.assert_called_once_with( mock.ANY, service_create) service.db.service_get.assert_called_once_with( mock.ANY, service_ref['id']) service.db.service_update.assert_called_once_with( mock.ANY, service_ref['id'], mock.ANY) @mock.patch.object(service.db, 'service_get_by_args', mock.Mock(side_effect=fake_service_get_by_args)) @mock.patch.object(service.db, 'service_create', mock.Mock(return_value=service_ref)) @mock.patch.object(service.db, 'service_get', mock.Mock(return_value=service_ref)) @mock.patch.object(service.db, 'service_update', mock.Mock(return_value=service_ref. update({'report_count': 1}))) @mock.patch.object(utils, 'service_is_up', mock.Mock(return_value=True)) def test_report_state_newly_connected_different_az(self): serv = service.Service(host, binary, topic, CONF.fake_manager) serv.availability_zone = 'other-zone' serv.start() serv.model_disconnected = True serv.report_state() self.assertFalse(serv.model_disconnected) service.db.service_get_by_args.assert_called_once_with( mock.ANY, host, binary) service.db.service_create.assert_called_once_with( mock.ANY, service_create_other_az) service.db.service_get.assert_called_once_with( mock.ANY, service_ref['id']) service.db.service_update.assert_called_once_with( mock.ANY, service_ref['id'], mock.ANY) @mock.patch.object(service.db, 'service_get_by_args', mock.Mock(side_effect=fake_service_get_by_args)) @mock.patch.object(service.db, 'service_create', mock.Mock(return_value=service_ref)) @mock.patch.object(service.db, 'service_get', mock.Mock(side_effect=[exception.NotFound, service_ref])) @mock.patch.object(service.db, 'service_update', mock.Mock(return_value=service_ref. update({'report_count': 1}))) @mock.patch.object(utils, 'service_is_up', mock.Mock(return_value=True)) def test_report_state_newly_connected_not_found(self): serv = service.Service(host, binary, topic, CONF.fake_manager) serv.start() serv.model_disconnected = True serv.report_state() self.assertFalse(serv.model_disconnected) service.db.service_get_by_args.assert_called_once_with( mock.ANY, host, binary) service.db.service_create.assert_has_calls([ mock.call(mock.ANY, service_create), mock.call(mock.ANY, service_create)]) service.db.service_get.assert_has_calls([ mock.call(mock.ANY, service_ref['id']), mock.call(mock.ANY, service_ref['id'])]) service.db.service_update.assert_called_once_with( mock.ANY, service_ref['id'], mock.ANY) def test_report_state_service_not_ready(self): with mock.patch.object(service, 'db') as mock_db: mock_db.service_get.return_value = service_ref serv = service.Service(host, binary, topic, CONF.fake_manager) serv.manager.is_service_ready = mock.Mock(return_value=False) serv.start() serv.report_state() serv.manager.is_service_ready.assert_called_once() @ddt.data(True, False) def test_cleanup_services(self, cleanup_interval_done): with mock.patch.object(service, 'db') as mock_db: mock_db.service_get_all.return_value = [service_ref] serv = service.Service(host, binary, topic, CONF.fake_manager) serv.start() serv.cleanup_services() mock_db.service_destroy.assert_not_called() if cleanup_interval_done: service_ref_stopped['updated_at'] = ( timeutils.utcnow() - timedelta(minutes=10)) else: service_ref_stopped['updated_at'] = timeutils.utcnow() mock_db.service_get_all_by_topic.return_value = [ service_ref_stopped] serv.stop() serv.cleanup_services() if cleanup_interval_done: mock_db.service_destroy.assert_called_once_with( mock.ANY, service_ref_stopped['id']) class TestWSGIService(test.TestCase): def setUp(self): super(TestWSGIService, self).setUp() self.mock_object(wsgi.Loader, 'load_app') self.test_service = service.WSGIService("test_service") def test_service_random_port(self): self.assertEqual(0, self.test_service.port) self.test_service.start() self.assertNotEqual(0, self.test_service.port) self.test_service.stop() wsgi.Loader.load_app.assert_called_once_with("test_service") def test_reset_pool_size_to_default(self): self.test_service.start() # Stopping the service, which in turn sets pool size to 0 self.test_service.stop() self.assertEqual(0, self.test_service.server._pool.size) # Resetting pool size to default self.test_service.reset() self.test_service.start() self.assertGreater(self.test_service.server._pool.size, 0) wsgi.Loader.load_app.assert_called_once_with("test_service") @mock.patch('oslo_service.wsgi.Server') @mock.patch('oslo_service.wsgi.Loader') def test_ssl_enabled(self, mock_loader, mock_server): self.override_config('osapi_share_use_ssl', True) service.WSGIService("osapi_share") mock_server.assert_called_once_with(mock.ANY, mock.ANY, mock.ANY, port=mock.ANY, host=mock.ANY, use_ssl=True) self.assertTrue(mock_loader.called) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/test_ssh_utils.py0000664000175000017500000001613600000000000021116 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from manila import exception from manila import ssh_utils from manila import test from oslo_utils import uuidutils import paramiko from unittest import mock class FakeSock(object): def settimeout(self, timeout): pass class FakeTransport(object): def __init__(self): self.active = True self.sock = FakeSock() def set_keepalive(self, timeout): pass def is_active(self): return self.active class FakeSSHClient(object): def __init__(self): self.id = uuidutils.generate_uuid() self.transport = FakeTransport() def set_missing_host_key_policy(self, policy): pass def connect(self, ip, port=22, username=None, password=None, key_filename=None, look_for_keys=None, timeout=10, banner_timeout=10): pass def get_transport(self): return self.transport def close(self): pass def __call__(self, *args, **kwargs): pass class SSHPoolTestCase(test.TestCase): """Unit test for SSH Connection Pool.""" def test_single_ssh_connect(self): with mock.patch.object(paramiko, "SSHClient", mock.Mock(return_value=FakeSSHClient())): sshpool = ssh_utils.SSHPool("127.0.0.1", 22, 10, "test", password="test", min_size=1, max_size=1) with sshpool.item() as ssh: first_id = ssh.id with sshpool.item() as ssh: second_id = ssh.id self.assertEqual(first_id, second_id) paramiko.SSHClient.assert_called_once_with() def test_create_ssh_with_password(self): fake_ssh_client = mock.Mock() ssh_pool = ssh_utils.SSHPool("127.0.0.1", 22, 10, "test", password="test") with mock.patch.object(paramiko, "SSHClient", return_value=fake_ssh_client): ssh_pool.create() fake_ssh_client.connect.assert_called_once_with( "127.0.0.1", port=22, username="test", password="test", key_filename=None, look_for_keys=False, timeout=10, banner_timeout=10) def test_create_ssh_with_key(self): path_to_private_key = "/fakepath/to/privatekey" fake_ssh_client = mock.Mock() ssh_pool = ssh_utils.SSHPool("127.0.0.1", 22, 10, "test", privatekey="/fakepath/to/privatekey") with mock.patch.object(paramiko, "SSHClient", return_value=fake_ssh_client): ssh_pool.create() fake_ssh_client.connect.assert_called_once_with( "127.0.0.1", port=22, username="test", password=None, key_filename=path_to_private_key, look_for_keys=False, timeout=10, banner_timeout=10) def test_create_ssh_with_nothing(self): fake_ssh_client = mock.Mock() ssh_pool = ssh_utils.SSHPool("127.0.0.1", 22, 10, "test") with mock.patch.object(paramiko, "SSHClient", return_value=fake_ssh_client): ssh_pool.create() fake_ssh_client.connect.assert_called_once_with( "127.0.0.1", port=22, username="test", password=None, key_filename=None, look_for_keys=True, timeout=10, banner_timeout=10) def test_create_ssh_error_connecting(self): attrs = {'connect.side_effect': paramiko.SSHException, } fake_ssh_client = mock.Mock(**attrs) ssh_pool = ssh_utils.SSHPool("127.0.0.1", 22, 10, "test") with mock.patch.object(paramiko, "SSHClient", return_value=fake_ssh_client): self.assertRaises(exception.SSHException, ssh_pool.create) fake_ssh_client.connect.assert_called_once_with( "127.0.0.1", port=22, username="test", password=None, key_filename=None, look_for_keys=True, timeout=10, banner_timeout=10) def test_closed_reopend_ssh_connections(self): with mock.patch.object(paramiko, "SSHClient", mock.Mock(return_value=FakeSSHClient())): sshpool = ssh_utils.SSHPool("127.0.0.1", 22, 10, "test", password="test", min_size=1, max_size=2) with sshpool.item() as ssh: first_id = ssh.id with sshpool.item() as ssh: second_id = ssh.id # Close the connection and test for a new connection ssh.get_transport().active = False self.assertEqual(first_id, second_id) paramiko.SSHClient.assert_called_once_with() # Expected new ssh pool with mock.patch.object(paramiko, "SSHClient", mock.Mock(return_value=FakeSSHClient())): with sshpool.item() as ssh: third_id = ssh.id self.assertNotEqual(first_id, third_id) paramiko.SSHClient.assert_called_once_with() @mock.patch('builtins.open') @mock.patch('paramiko.SSHClient') @mock.patch('os.path.isfile', return_value=True) def test_sshpool_remove(self, mock_isfile, mock_sshclient, mock_open): ssh_to_remove = mock.Mock() mock_sshclient.side_effect = [mock.Mock(), ssh_to_remove, mock.Mock()] sshpool = ssh_utils.SSHPool("127.0.0.1", 22, 10, "test", password="test", min_size=3, max_size=3) self.assertIn(ssh_to_remove, list(sshpool.free_items)) sshpool.remove(ssh_to_remove) self.assertNotIn(ssh_to_remove, list(sshpool.free_items)) @mock.patch('builtins.open') @mock.patch('paramiko.SSHClient') @mock.patch('os.path.isfile', return_value=True) def test_sshpool_remove_object_not_in_pool(self, mock_isfile, mock_sshclient, mock_open): # create an SSH Client that is not a part of sshpool. ssh_to_remove = mock.Mock() mock_sshclient.side_effect = [mock.Mock(), mock.Mock()] sshpool = ssh_utils.SSHPool("127.0.0.1", 22, 10, "test", password="test", min_size=2, max_size=2) listBefore = list(sshpool.free_items) self.assertNotIn(ssh_to_remove, listBefore) sshpool.remove(ssh_to_remove) self.assertEqual(listBefore, list(sshpool.free_items)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/test_test.py0000664000175000017500000000360400000000000020054 0ustar00zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for the testing base code.""" from oslo_config import cfg import oslo_messaging as messaging from manila import rpc from manila import test class IsolationTestCase(test.TestCase): """Ensure that things are cleaned up after failed tests. These tests don't really do much here, but if isolation fails a bunch of other tests should fail. """ def test_service_isolation(self): self.start_service('share') def test_rpc_consumer_isolation(self): class NeverCalled(object): def __getattribute__(self, name): if name == 'target' or name == 'oslo_rpc_server_ping': # oslo.messaging >=5.31.0 explicitly looks for 'target' # on the endpoint and checks its type, so we can't avoid # it here. In 12.4.0, the package added a ping endpoint # Just ignore if either case. return assert False, "I should never get called - name: %s" % name target = messaging.Target(topic='share', server=cfg.CONF.host) server = rpc.get_server(target=target, endpoints=[NeverCalled()]) server.start() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/test_test_utils.py0000664000175000017500000000200500000000000021266 0ustar00zuulzuul00000000000000# Copyright 2010 OpenStack LLC # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from manila import test from manila.tests import utils as test_utils class TestUtilsTestCase(test.TestCase): def test_get_test_admin_context(self): """get_test_admin_context's return value behaves like admin context.""" ctxt = test_utils.get_test_admin_context() # TODO(soren): This should verify the full interface context # objects expose. self.assertTrue(ctxt.is_admin) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/test_utils.py0000664000175000017500000006625300000000000020246 0ustar00zuulzuul00000000000000# Copyright 2011 Justin Santa Barbara # Copyright 2014 NetApp, Inc. # Copyright 2014 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import json import time from unittest import mock import ddt from oslo_config import cfg from oslo_utils import encodeutils from oslo_utils import timeutils import tenacity from webob import exc import manila from manila.common import constants from manila import context from manila.db import api as db from manila import exception from manila import test from manila import utils CONF = cfg.CONF @ddt.ddt class GenericUtilsTestCase(test.TestCase): def test_service_is_up(self): fts_func = datetime.datetime.fromtimestamp fake_now = 1000 down_time = 5 self.flags(service_down_time=down_time) with mock.patch.object(timeutils, 'utcnow', mock.Mock(return_value=fts_func(fake_now))): # Up (equal) service = {'updated_at': fts_func(fake_now - down_time), 'created_at': fts_func(fake_now - down_time)} result = utils.service_is_up(service) self.assertTrue(result) timeutils.utcnow.assert_called_once_with() with mock.patch.object(timeutils, 'utcnow', mock.Mock(return_value=fts_func(fake_now))): # Up service = {'updated_at': fts_func(fake_now - down_time + 1), 'created_at': fts_func(fake_now - down_time + 1)} result = utils.service_is_up(service) self.assertTrue(result) timeutils.utcnow.assert_called_once_with() with mock.patch.object(timeutils, 'utcnow', mock.Mock(return_value=fts_func(fake_now))): # Down service = {'updated_at': fts_func(fake_now - down_time - 1), 'created_at': fts_func(fake_now - down_time - 1)} result = utils.service_is_up(service) self.assertFalse(result) timeutils.utcnow.assert_called_once_with() @ddt.data(['ssh', '-D', 'my_name@name_of_remote_computer'], ['echo', '"quoted arg with space"'], ['echo', "'quoted arg with space'"]) def test_check_ssh_injection(self, cmd): cmd_list = cmd self.assertIsNone(utils.check_ssh_injection(cmd_list)) @ddt.data(['ssh', 'my_name@ name_of_remote_computer'], ['||', 'my_name@name_of_remote_computer'], ['cmd', 'virus;ls'], ['cmd', '"arg\"withunescaped"'], ['cmd', 'virus;"quoted argument"'], ['echo', '"quoted argument";rm -rf'], ['echo', "'quoted argument `rm -rf`'"], ['echo', '"quoted";virus;"quoted"'], ['echo', '"quoted";virus;\'quoted\'']) def test_check_ssh_injection_on_error0(self, cmd): self.assertRaises(exception.SSHInjectionThreat, utils.check_ssh_injection, cmd) @ddt.data( (("3G", "G"), 3.0), (("4.1G", "G"), 4.1), (("4,1G", "G"), 4.1), (("5.23G", "G"), 5.23), (("5,23G", "G"), 5.23), (("9728M", "G"), 9.5), (("8192K", "G"), 0.0078125), (("2T", "G"), 2048.0), (("2.1T", "G"), 2150.4), (("2,1T", "G"), 2150.4), (("3P", "G"), 3145728.0), (("3.4P", "G"), 3565158.4), (("3,4P", "G"), 3565158.4), (("9728M", "M"), 9728.0), (("9728.2381T", "T"), 9728.2381), (("9728,2381T", "T"), 9728.2381), (("0", "G"), 0.0), (("512", "M"), 0.00048828125), (("2097152.", "M"), 2.0), ((".1024", "K"), 0.0001), ((",1024", "K"), 0.0001), (("2048G", "T"), 2.0), (("65536G", "P"), 0.0625), ) @ddt.unpack def test_translate_string_size_to_float_positive(self, request, expected): actual = utils.translate_string_size_to_float(*request) self.assertEqual(expected, actual) @ddt.data( (None, "G"), ("fake", "G"), ("1fake", "G"), ("2GG", "G"), ("1KM", "G"), ("K1M", "G"), ("M1K", "G"), ("1.2fake", "G"), ("1,2fake", "G"), ("2.2GG", "G"), ("1.1KM", "G"), ("K2.2M", "G"), ("K2,2M", "G"), ("M2.2K", "G"), ("M2,2K", "G"), ("", "G"), (23, "G"), (23.0, "G"), ) @ddt.unpack def test_translate_string_size_to_float_negative(self, string, multiplier): actual = utils.translate_string_size_to_float(string, multiplier) self.assertIsNone(actual) class MonkeyPatchTestCase(test.TestCase): """Unit test for utils.monkey_patch().""" def setUp(self): super(MonkeyPatchTestCase, self).setUp() self.example_package = 'manila.tests.monkey_patch_example.' self.flags( monkey_patch=True, monkey_patch_modules=[self.example_package + 'example_a' + ':' + self.example_package + 'example_decorator']) def test_monkey_patch(self): utils.monkey_patch() manila.tests.monkey_patch_example.CALLED_FUNCTION = [] from manila.tests.monkey_patch_example import example_a from manila.tests.monkey_patch_example import example_b self.assertEqual('Example function', example_a.example_function_a()) exampleA = example_a.ExampleClassA() exampleA.example_method() ret_a = exampleA.example_method_add(3, 5) self.assertEqual(8, ret_a) self.assertEqual('Example function', example_b.example_function_b()) exampleB = example_b.ExampleClassB() exampleB.example_method() ret_b = exampleB.example_method_add(3, 5) self.assertEqual(8, ret_b) package_a = self.example_package + 'example_a.' self.assertIn(package_a + 'example_function_a', manila.tests.monkey_patch_example.CALLED_FUNCTION) self.assertIn(package_a + 'ExampleClassA.example_method', manila.tests.monkey_patch_example.CALLED_FUNCTION) self.assertIn(package_a + 'ExampleClassA.example_method_add', manila.tests.monkey_patch_example.CALLED_FUNCTION) package_b = self.example_package + 'example_b.' self.assertNotIn(package_b + 'example_function_b', manila.tests.monkey_patch_example.CALLED_FUNCTION) self.assertNotIn(package_b + 'ExampleClassB.example_method', manila.tests.monkey_patch_example.CALLED_FUNCTION) self.assertNotIn(package_b + 'ExampleClassB.example_method_add', manila.tests.monkey_patch_example.CALLED_FUNCTION) @ddt.ddt class CidrToNetmaskTestCase(test.TestCase): """Unit test for cidr to netmask.""" @ddt.data( ('10.0.0.0/0', '0.0.0.0'), ('10.0.0.0/24', '255.255.255.0'), ('10.0.0.0/5', '248.0.0.0'), ('10.0.0.0/32', '255.255.255.255'), ('10.0.0.1', '255.255.255.255'), ) @ddt.unpack def test_cidr_to_netmask(self, cidr, expected_netmask): result = utils.cidr_to_netmask(cidr) self.assertEqual(expected_netmask, result) @ddt.data( '10.0.0.0/33', '', '10.0.0.555/33' ) def test_cidr_to_netmask_invalid(self, cidr): self.assertRaises(exception.InvalidInput, utils.cidr_to_netmask, cidr) @ddt.ddt class CidrToPrefixLenTestCase(test.TestCase): """Unit test for cidr to prefix length.""" @ddt.data( ('10.0.0.0/0', 0), ('10.0.0.0/24', 24), ('10.0.0.1', 32), ('fdf8:f53b:82e1::1/0', 0), ('fdf8:f53b:82e1::1/64', 64), ('fdf8:f53b:82e1::1', 128), ) @ddt.unpack def test_cidr_to_prefixlen(self, cidr, expected_prefixlen): result = utils.cidr_to_prefixlen(cidr) self.assertEqual(expected_prefixlen, result) @ddt.data( '10.0.0.0/33', '', '10.0.0.555/33', 'fdf8:f53b:82e1::1/129', 'fdf8:f53b:82e1::fffff' ) def test_cidr_to_prefixlen_invalid(self, cidr): self.assertRaises(exception.InvalidInput, utils.cidr_to_prefixlen, cidr) @ddt.ddt class ParseBoolValueTestCase(test.TestCase): @ddt.data( ('t', True), ('on', True), ('1', True), ('false', False), ('n', False), ('no', False), ('0', False),) @ddt.unpack def test_bool_with_valid_string(self, string, value): fake_dict = {'fake_key': string} result = utils.get_bool_from_api_params('fake_key', fake_dict) self.assertEqual(value, result) @ddt.data('None', 'invalid', 'falses') def test_bool_with_invalid_string(self, string): fake_dict = {'fake_key': string} self.assertRaises(exc.HTTPBadRequest, utils.get_bool_from_api_params, 'fake_key', fake_dict) @ddt.data('undefined', None) def test_bool_with_key_not_found_raise_error(self, def_val): fake_dict = {'fake_key1': 'value1'} self.assertRaises(exc.HTTPBadRequest, utils.get_bool_from_api_params, 'fake_key2', fake_dict, def_val) @ddt.data((False, False, False), (True, True, False), ('true', True, False), ('false', False, False), ('undefined', 'undefined', False), (False, False, True), ('true', True, True)) @ddt.unpack def test_bool_with_key_not_found(self, def_val, expected, strict): fake_dict = {'fake_key1': 'value1'} invalid_default = utils.get_bool_from_api_params('fake_key2', fake_dict, def_val, strict) self.assertEqual(expected, invalid_default) @ddt.ddt class IsValidIPVersion(test.TestCase): """Test suite for function 'is_valid_ip_address'.""" @ddt.data('0.0.0.0', '255.255.255.255', '192.168.0.1') def test_valid_v4(self, addr): for vers in (4, '4'): self.assertTrue(utils.is_valid_ip_address(addr, vers)) @ddt.data( '2001:cdba:0000:0000:0000:0000:3257:9652', '2001:cdba:0:0:0:0:3257:9652', '2001:cdba::3257:9652') def test_valid_v6(self, addr): for vers in (6, '6'): self.assertTrue(utils.is_valid_ip_address(addr, vers)) @ddt.data( {'addr': '1.1.1.1', 'vers': 3}, {'addr': '1.1.1.1', 'vers': 5}, {'addr': '1.1.1.1', 'vers': 7}, {'addr': '2001:cdba::3257:9652', 'vers': '3'}, {'addr': '2001:cdba::3257:9652', 'vers': '5'}, {'addr': '2001:cdba::3257:9652', 'vers': '7'}) @ddt.unpack def test_provided_invalid_version(self, addr, vers): self.assertRaises( exception.ManilaException, utils.is_valid_ip_address, addr, vers) def test_provided_none_version(self): self.assertRaises(TypeError, utils.is_valid_ip_address, '', None) @ddt.data(None, 'fake', '1.1.1.1') def test_provided_invalid_v6_address(self, addr): for vers in (6, '6'): self.assertFalse(utils.is_valid_ip_address(addr, vers)) @ddt.data(None, 'fake', '255.255.255.256', '2001:cdba::3257:9652', '') def test_provided_invalid_v4_address(self, addr): for vers in (4, '4'): self.assertFalse(utils.is_valid_ip_address(addr, vers)) class Comparable(utils.ComparableMixin): def __init__(self, value): self.value = value def _cmpkey(self): return self.value class TestComparableMixin(test.TestCase): def setUp(self): super(TestComparableMixin, self).setUp() self.one = Comparable(1) self.two = Comparable(2) def test_lt(self): self.assertTrue(self.one < self.two) self.assertFalse(self.two < self.one) self.assertFalse(self.one < self.one) def test_le(self): self.assertTrue(self.one <= self.two) self.assertFalse(self.two <= self.one) self.assertTrue(self.one <= self.one) def test_eq(self): self.assertFalse(self.one == self.two) self.assertFalse(self.two == self.one) self.assertTrue(self.one == self.one) def test_ge(self): self.assertFalse(self.one >= self.two) self.assertTrue(self.two >= self.one) self.assertTrue(self.one >= self.one) def test_gt(self): self.assertFalse(self.one > self.two) self.assertTrue(self.two > self.one) self.assertFalse(self.one > self.one) def test_ne(self): self.assertTrue(self.one != self.two) self.assertTrue(self.two != self.one) self.assertFalse(self.one != self.one) def test_compare(self): self.assertEqual(NotImplemented, self.one._compare(1, self.one._cmpkey)) class WrongException(Exception): pass class TestRetryDecorator(test.TestCase): def test_no_retry_required(self): self.counter = 0 with mock.patch('tenacity.nap.sleep') as mock_sleep: @utils.retry(retry_param=exception.ManilaException, interval=2, retries=3, backoff_rate=2) def succeeds(): self.counter += 1 return 'success' ret = succeeds() self.assertFalse(mock_sleep.called) self.assertEqual('success', ret) self.assertEqual(1, self.counter) def test_no_retry_required_random(self): self.counter = 0 with mock.patch('tenacity.nap.sleep') as mock_sleep: @utils.retry(retry_param=exception.ManilaException, interval=2, retries=3, backoff_rate=2, wait_random=True) def succeeds(): self.counter += 1 return 'success' ret = succeeds() self.assertFalse(mock_sleep.called) self.assertEqual('success', ret) self.assertEqual(1, self.counter) def test_retries_once(self): self.counter = 0 interval = 2 backoff_rate = 2 retries = 3 with mock.patch('tenacity.nap.sleep') as mock_sleep: @utils.retry(retry_param=exception.ManilaException, interval=interval, retries=retries, backoff_rate=backoff_rate) def fails_once(): self.counter += 1 if self.counter < 2: raise exception.ManilaException(data='fake') else: return 'success' ret = fails_once() self.assertEqual('success', ret) self.assertEqual(2, self.counter) self.assertEqual(1, mock_sleep.call_count) mock_sleep.assert_called_with(interval) def test_retries_once_random(self): self.counter = 0 interval = 2 backoff_rate = 2 retries = 3 with mock.patch('tenacity.nap.sleep') as mock_sleep: @utils.retry(retry_param=exception.ManilaException, interval=interval, retries=retries, backoff_rate=backoff_rate, wait_random=True) def fails_once(): self.counter += 1 if self.counter < 2: raise exception.ManilaException(data='fake') else: return 'success' ret = fails_once() self.assertEqual('success', ret) self.assertEqual(2, self.counter) self.assertEqual(1, mock_sleep.call_count) self.assertTrue(mock_sleep.called) def test_limit_is_reached(self): self.counter = 0 retries = 3 interval = 2 backoff_rate = 4 with mock.patch('tenacity.nap.sleep') as mock_sleep: @utils.retry(retry_param=exception.ManilaException, interval=interval, retries=retries, backoff_rate=backoff_rate) def always_fails(): self.counter += 1 raise exception.ManilaException(data='fake') self.assertRaises(exception.ManilaException, always_fails) self.assertEqual(retries, self.counter) expected_sleep_arg = [] for i in range(retries): if i > 0: interval *= (backoff_rate ** (i - 1)) expected_sleep_arg.append(float(interval)) mock_sleep.assert_has_calls( list(map(mock.call, expected_sleep_arg))) def test_wrong_exception_no_retry(self): with mock.patch('tenacity.nap.sleep') as mock_sleep: @utils.retry(retry_param=exception.ManilaException) def raise_unexpected_error(): raise WrongException("wrong exception") self.assertRaises(WrongException, raise_unexpected_error) self.assertFalse(mock_sleep.called) @mock.patch('tenacity.nap.sleep') def test_retry_exit_code(self, sleep_mock): exit_code = 5 exception = utils.processutils.ProcessExecutionError @utils.retry(retry=utils.retry_if_exit_code, retry_param=exit_code) def raise_retriable_exit_code(): raise exception(exit_code=exit_code) self.assertRaises(exception, raise_retriable_exit_code) # we should be sleeping 1 less time than the number of retries, # default (10) self.assertEqual(9, sleep_mock.call_count) sleep_mock.assert_has_calls([mock.call(1.0), mock.call(2.0), mock.call(4.0), mock.call(8.0), mock.call(16.0), mock.call(32.0), mock.call(64.0), mock.call(128.0), mock.call(256.0)]) @mock.patch('tenacity.nap.sleep') def test_retry_exit_code_non_retriable(self, sleep_mock): exit_code = 5 exception = utils.processutils.ProcessExecutionError @utils.retry(retry=utils.retry_if_exit_code, retry_param=exit_code) def raise_non_retriable_exit_code(): raise exception(exit_code=exit_code + 1) self.assertRaises(exception, raise_non_retriable_exit_code) sleep_mock.assert_not_called() def test_infinite_retry(self): retry_param = exception.ManilaException class FakeTenacityRetry(tenacity.Retrying): def __init__(self, *args, **kwargs): pass with mock.patch('tenacity.Retrying', autospec=FakeTenacityRetry) as tenacity_retry: @utils.retry(retry_param=retry_param, wait_random=True, infinite=True) def some_retriable_function(): pass some_retriable_function() tenacity_retry.assert_called_once_with( sleep=tenacity.nap.sleep, before_sleep=mock.ANY, after=mock.ANY, stop=tenacity.stop.stop_never, reraise=True, retry=utils.IsAMatcher(tenacity.retry_if_exception_type), wait=utils.IsAMatcher(tenacity.wait_random_exponential)) def test_max_backoff_sleep(self): self.counter = 0 with mock.patch('tenacity.nap.sleep') as mock_sleep: @utils.retry(retry_param=exception.ManilaException, infinite=True, backoff_rate=2, backoff_sleep_max=4) def fails_then_passes(): self.counter += 1 if self.counter < 5: raise exception.ManilaException(data='fake') else: return 'success' self.assertEqual('success', fails_then_passes()) mock_sleep.assert_has_calls( [mock.call(1), mock.call(2), mock.call(4), mock.call(4)]) @ddt.ddt class RequireDriverInitializedTestCase(test.TestCase): @ddt.data(True, False) def test_require_driver_initialized(self, initialized): class FakeDriver(object): @property def initialized(self): return initialized class FakeException(Exception): pass class FakeManager(object): driver = FakeDriver() @utils.require_driver_initialized def call_me(self): raise FakeException( "Should be raised only if manager.driver.initialized " "('%s') is equal to 'True'." % initialized) if initialized: expected_exception = FakeException else: expected_exception = exception.DriverNotInitialized self.assertRaises(expected_exception, FakeManager().call_me) @ddt.ddt class ShareMigrationHelperTestCase(test.TestCase): """Tests DataMigrationHelper.""" def setUp(self): super(ShareMigrationHelperTestCase, self).setUp() self.context = context.get_admin_context() def test_wait_for_access_update(self): sid = 1 fake_share_instances = [ { 'id': sid, 'access_rules_status': constants.SHARE_INSTANCE_RULES_SYNCING, }, { 'id': sid, 'access_rules_status': constants.STATUS_ACTIVE, }, ] self.mock_object(time, 'sleep') self.mock_object(db, 'share_instance_get', mock.Mock(side_effect=fake_share_instances)) utils.wait_for_access_update(self.context, db, fake_share_instances[0], 1) db.share_instance_get.assert_has_calls( [mock.call(mock.ANY, sid), mock.call(mock.ANY, sid)] ) time.sleep.assert_called_once_with(1.414) @ddt.data( ( { 'id': '1', 'access_rules_status': constants.SHARE_INSTANCE_RULES_ERROR, }, exception.ShareMigrationFailed ), ( { 'id': '1', 'access_rules_status': constants.SHARE_INSTANCE_RULES_SYNCING, }, exception.ShareMigrationFailed ), ) @ddt.unpack def test_wait_for_access_update_invalid(self, fake_instance, expected_exc): self.mock_object(time, 'sleep') self.mock_object(db, 'share_instance_get', mock.Mock(return_value=fake_instance)) now = time.time() timeout = now + 100 self.mock_object(time, 'time', mock.Mock(side_effect=[now, timeout])) self.assertRaises(expected_exc, utils.wait_for_access_update, self.context, db, fake_instance, 1) @ddt.ddt class ConvertStrTestCase(test.TestCase): def test_convert_str_str_input(self): self.mock_object(encodeutils, 'safe_encode') input_value = "string_input" output_value = utils.convert_str(input_value) self.assertEqual(0, encodeutils.safe_encode.call_count) self.assertEqual(input_value, output_value) def test_convert_str_bytes_input(self): self.mock_object(encodeutils, 'safe_encode') input_value = bytes("binary_input", "utf-8") output_value = utils.convert_str(input_value) self.assertEqual(0, encodeutils.safe_encode.call_count) self.assertIsInstance(output_value, str) self.assertEqual(str("binary_input"), output_value) @ddt.ddt class TestDisableNotifications(test.TestCase): def test_do_nothing_getter(self): """Test any attribute will always return the same instance (self).""" donothing = utils.DoNothing() self.assertIs(donothing, donothing.anyname) def test_do_nothing_caller(self): """Test calling the object will always return the same instance.""" donothing = utils.DoNothing() self.assertIs(donothing, donothing()) def test_do_nothing_json_serializable(self): """Test calling the object will always return the same instance.""" donothing = utils.DoNothing() self.assertEqual('""', json.dumps(donothing)) @utils.if_notifications_enabled def _decorated_method(self): return mock.sentinel.success def test_if_notification_enabled_when_enabled(self): """Test method is called when notifications are enabled.""" result = self._decorated_method() self.assertEqual(mock.sentinel.success, result) @ddt.data([], ['noop'], ['noop', 'noop']) def test_if_notification_enabled_when_disabled(self, driver): """Test method is not called when notifications are disabled.""" self.override_config('driver', driver, group='oslo_messaging_notifications') result = self._decorated_method() self.assertEqual(utils.DO_NOTHING, result) @ddt.ddt class TestAllTenantsValueCase(test.TestCase): @ddt.data(None, '', '1', 'true', 'True') def test_is_all_tenants_true(self, value): search_opts = {'all_tenants': value} self.assertTrue(utils.is_all_tenants(search_opts)) self.assertIn('all_tenants', search_opts) @ddt.data('0', 'false', 'False') def test_is_all_tenants_false(self, value): search_opts = {'all_tenants': value} self.assertFalse(utils.is_all_tenants(search_opts)) self.assertIn('all_tenants', search_opts) def test_is_all_tenants_missing(self): self.assertFalse(utils.is_all_tenants({})) def test_is_all_tenants_invalid(self): search_opts = {'all_tenants': 'wonk'} self.assertRaises(exception.InvalidInput, utils.is_all_tenants, search_opts) @ddt.data( ("8minutes", "PT8M"), ("10hours", "PT10H"), ("6months", "P6M"), ("2years", "P2Y") ) @ddt.unpack def test_convert_time_duration_to_iso_format(self, time_duration, expected): result = utils.convert_time_duration_to_iso_format(time_duration) self.assertEqual(expected, result) def test_convert_time_duration_to_iso_format_negative(self): self.assertRaises(exception.ManilaException, utils.convert_time_duration_to_iso_format, 'invalid_duration') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/utils.py0000664000175000017500000001033300000000000017173 0ustar00zuulzuul00000000000000# Copyright 2011 OpenStack LLC # Copyright 2015 Mirantic, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import fixtures import functools import os from oslo_config import cfg from manila import context from manila import utils CONF = cfg.CONF class NamedBinaryStr(bytes): """Wrapper for bytes to facilitate overriding __name__.""" class NamedUnicodeStr(str): """Unicode string look-alike to facilitate overriding __name__.""" def __init__(self, value): self._value = value def __str__(self): return self._value def encode(self, enc): return self._value.encode(enc) def __format__(self, formatstr): """Workaround for ddt bug. DDT will always call __format__ even when __name__ exists, which blows up for Unicode strings under Py2. """ return '' class NamedDict(dict): """Wrapper for dict to facilitate overriding __name__.""" class NamedTuple(tuple): """Wrapper for dict to facilitate overriding __name__.""" def annotated(test_name, test_input): if isinstance(test_input, dict): annotated_input = NamedDict(test_input) elif isinstance(test_input, str): annotated_input = NamedUnicodeStr(test_input) elif isinstance(test_input, tuple): annotated_input = NamedTuple(test_input) else: annotated_input = NamedBinaryStr(test_input) setattr(annotated_input, '__name__', test_name) return annotated_input def get_test_admin_context(): return context.get_admin_context() def is_manila_installed(): if os.path.exists('../../manila.manila.egg-info'): return True else: return False def set_timeout(timeout): """Timeout decorator for unit test methods. Use this decorator for tests that are expected to pass in very specific amount of time, not common for all other tests. It can have either big or small value. """ def _decorator(f): @functools.wraps(f) def _wrapper(self, *args, **kwargs): self.useFixture(fixtures.Timeout(timeout, gentle=True)) return f(self, *args, **kwargs) return _wrapper return _decorator class create_temp_config_with_opts(object): """Creates temporary config file with provided opts and values. usage: data = {'FOO_GROUP': {'foo_opt': 'foo_value'}} assert CONF.FOO_GROUP.foo_opt != 'foo_value' with create_temp_config_with_opts(data): assert CONF.FOO_GROUP.foo_opt == 'foo_value' assert CONF.FOO_GROUP.foo_opt != 'foo_value' :param data: dict -- expected dict with two layers, first is name of config group and second is opts with values. Example: {'DEFAULT': {'foo_opt': 'foo_v'}, 'BAR_GROUP': {'bar_opt': 'bar_v'}} """ def __init__(self, data): self.data = data def __enter__(self): config_filename = 'fake_config' with utils.tempdir() as tmpdir: tmpfilename = os.path.join(tmpdir, '%s.conf' % config_filename) with open(tmpfilename, "w") as configfile: for group, opts in self.data.items(): configfile.write("""[%s]\n""" % group) for opt, value in opts.items(): configfile.write( """%(k)s = %(v)s\n""" % {'k': opt, 'v': value}) configfile.write("""\n""") # Add config file with updated opts CONF.default_config_files = [configfile.name] # Reload config instance to use redefined opts CONF.reload_config_files() return CONF def __exit__(self, exc_type, exc_value, exc_traceback): return False # do not suppress errors ././@PaxHeader0000000000000000000000000000003200000000000011450 xustar000000000000000026 mtime=1759315602.04567 manila-21.0.0/manila/tests/var/0000775000175000017500000000000000000000000016251 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/var/ca.crt0000664000175000017500000000415700000000000017355 0ustar00zuulzuul00000000000000-----BEGIN CERTIFICATE----- MIIGDDCCA/SgAwIBAgIJAPSvwQYk4qI4MA0GCSqGSIb3DQEBBQUAMGExCzAJBgNV BAYTAkFVMRMwEQYDVQQIEwpTb21lLVN0YXRlMRUwEwYDVQQKEwxPcGVuc3RhY2sg Q0ExEjAQBgNVBAsTCUdsYW5jZSBDQTESMBAGA1UEAxMJR2xhbmNlIENBMB4XDTEy MDIwOTE3MTAwMloXDTIyMDIwNjE3MTAwMlowYTELMAkGA1UEBhMCQVUxEzARBgNV BAgTClNvbWUtU3RhdGUxFTATBgNVBAoTDE9wZW5zdGFjayBDQTESMBAGA1UECxMJ R2xhbmNlIENBMRIwEAYDVQQDEwlHbGFuY2UgQ0EwggIiMA0GCSqGSIb3DQEBAQUA A4ICDwAwggIKAoICAQDmf+fapWfzy1Uylus0KGalw4X/5xZ+ltPVOr+IdCPbstvi RTC5g+O+TvXeOP32V/cnSY4ho/+f2q730za+ZA/cgWO252rcm3Q7KTJn3PoqzJvX /l3EXe3/TCrbzgZ7lW3QLTCTEE2eEzwYG3wfDTOyoBq+F6ct6ADh+86gmpbIRfYI N+ixB0hVyz9427PTof97fL7qxxkjAayB28OfwHrkEBl7iblNhUC0RoH+/H9r5GEl GnWiebxfNrONEHug6PHgiaGq7/Dj+u9bwr7J3/NoS84I08ajMnhlPZxZ8bS/O8If ceWGZv7clPozyhABT/otDfgVcNH1UdZ4zLlQwc1MuPYN7CwxrElxc8Quf94ttGjb tfGTl4RTXkDofYdG1qBWW962PsGl2tWmbYDXV0q5JhV/IwbrE1X9f+OksJQne1/+ dZDxMhdf2Q1V0P9hZZICu4+YhmTMs5Mc9myKVnzp4NYdX5fXoB/uNYph+G7xG5IK WLSODKhr1wFGTTcuaa8LhOH5UREVenGDJuc6DdgX9a9PzyJGIi2ngQ03TJIkCiU/ 4J/r/vsm81ezDiYZSp2j5JbME+ixW0GBLTUWpOIxUSHgUFwH5f7lQwbXWBOgwXQk BwpZTmdQx09MfalhBtWeu4/6BnOCOj7e/4+4J0eVxXST0AmVyv8YjJ2nz1F9oQID AQABo4HGMIHDMB0GA1UdDgQWBBTk7Krj4bEsTjHXaWEtI2GZ5ACQyTCBkwYDVR0j BIGLMIGIgBTk7Krj4bEsTjHXaWEtI2GZ5ACQyaFlpGMwYTELMAkGA1UEBhMCQVUx EzARBgNVBAgTClNvbWUtU3RhdGUxFTATBgNVBAoTDE9wZW5zdGFjayBDQTESMBAG A1UECxMJR2xhbmNlIENBMRIwEAYDVQQDEwlHbGFuY2UgQ0GCCQD0r8EGJOKiODAM BgNVHRMEBTADAQH/MA0GCSqGSIb3DQEBBQUAA4ICAQA8Zrss/MiwFHGmDlercE0h UvzA54n/EvKP9nP3jHM2qW/VPfKdnFw99nEPFLhb+lN553vdjOpCYFm+sW0Z5Mi4 qsFkk4AmXIIEFOPt6zKxMioLYDQ9Sw/BUv6EZGeANWr/bhmaE+dMcKJt5le/0jJm 2ahsVB9fbFu9jBFeYb7Ba/x2aLkEGMxaDLla+6EQhj148fTnS1wjmX9G2cNzJvj/ +C2EfKJIuDJDqw2oS2FGVpP37FA2Bz2vga0QatNneLkGKCFI3ZTenBznoN+fmurX TL3eJE4IFNrANCcdfMpdyLAtXz4KpjcehqpZMu70er3d30zbi1l0Ajz4dU+WKz/a NQES+vMkT2wqjXHVTjrNwodxw3oLK/EuTgwoxIHJuplx5E5Wrdx9g7Gl1PBIJL8V xiOYS5N7CakyALvdhP7cPubA2+TPAjNInxiAcmhdASS/Vrmpvrkat6XhGn8h9liv ysDOpMQmYQkmgZBpW8yBKK7JABGGsJADJ3E6J5MMWBX2RR4kFoqVGAzdOU3oyaTy I0kz5sfuahaWpdYJVlkO+esc0CRXw8fLDYivabK2tOgUEWeZsZGZ9uK6aV1VxTAY 9Guu3BJ4Rv/KP/hk7mP8rIeCwotV66/2H8nq72ImQhzSVyWcxbFf2rJiFQJ3BFwA WoRMgEwjGJWqzhJZUYpUAQ== -----END CERTIFICATE----- ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/var/certificate.crt0000664000175000017500000000350200000000000021245 0ustar00zuulzuul00000000000000-----BEGIN CERTIFICATE----- MIIFLjCCAxYCAQEwDQYJKoZIhvcNAQEFBQAwYTELMAkGA1UEBhMCQVUxEzARBgNV BAgTClNvbWUtU3RhdGUxFTATBgNVBAoTDE9wZW5zdGFjayBDQTESMBAGA1UECxMJ R2xhbmNlIENBMRIwEAYDVQQDEwlHbGFuY2UgQ0EwHhcNMTIwMjA5MTcxMDUzWhcN MjIwMjA2MTcxMDUzWjBZMQswCQYDVQQGEwJBVTETMBEGA1UECBMKU29tZS1TdGF0 ZTESMBAGA1UEChMJT3BlbnN0YWNrMQ8wDQYDVQQLEwZHbGFuY2UxEDAOBgNVBAMT BzAuMC4wLjAwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDXpUkQN6pu avo+gz3o1K4krVdPl1m7NjNJDyD/+ZH0EGNcEN7iag1qPE7JsjqGPNZsQK1dMoXb Sz+OSi9qvNeJnBcfwUx5qTAtwyAb9AxGkwuMafIU+lWbsclo+dPGsja01ywbXTCZ bF32iqnpOMYhfxWUdoQYiBkhxxhW9eMPKLS/KkP8/bx+Vaa2XJiAebqkd9nrksAA BeGc9mlafYBEmiChPdJEPw+1ePA4QVq9aPepDsqAKtGN8JLpmoC3BdxQQTbbwL3Q 8fTXK4tCNUaVk4AbDy/McFq6y0ocQoBPJjihOY35mWG/OLtcI99yPOpWGnps/5aG /64DDJ2D67Fnaj6gKHV+6TXFO8KZxlnxtgtiZDJBZkneTBt9ArSOv+l6NBsumRz0 iEJ4o4H1S2TSMnprAvX7WnGtc6Xi9gXahYcDHEelwwYzqAiTBv6hxSp4MZ2dNXa+ KzOitC7ZbV2qsg0au0wjfE/oSQ3NvsvUr8nOmfutJTvHRAwbC1v4G/tuAsO7O0w2 0u2B3u+pG06m5+rnEqp+rB9hmukRYTfgEFRRsVIvpFl/cwvPXKRcX03UIMx+lLr9 Ft+ep7YooBhY3wY2kwCxD4lRYNmbwsCIVywZt40f/4ad98TkufR9NhsfycxGeqbr mTMFlZ8TTlmP82iohekKCOvoyEuTIWL2+wIDAQABMA0GCSqGSIb3DQEBBQUAA4IC AQBMUBgV0R+Qltf4Du7u/8IFmGAoKR/mktB7R1gRRAqsvecUt7kIwBexGdavGg1y 0pU0+lgUZjJ20N1SlPD8gkNHfXE1fL6fmMjWz4dtYJjzRVhpufHPeBW4tl8DgHPN rBGAYQ+drDSXaEjiPQifuzKx8WS+DGA3ki4co5mPjVnVH1xvLIdFsk89z3b3YD1k yCJ/a9K36x6Z/c67JK7s6MWtrdRF9+MVnRKJ2PK4xznd1kBz16V+RA466wBDdARY vFbtkafbEqOb96QTonIZB7+fAldKDPZYnwPqasreLmaGOaM8sxtlPYAJ5bjDONbc AaXG8BMRQyO4FyH237otDKlxPyHOFV66BaffF5S8OlwIMiZoIvq+IcTZOdtDUSW2 KHNLfe5QEDZdKjWCBrfqAfvNuG13m03WqfmcMHl3o/KiPJlx8l9Z4QEzZ9xcyQGL cncgeHM9wJtzi2cD/rTDNFsx/gxvoyutRmno7I3NRbKmpsXF4StZioU3USRspB07 hYXOVnG3pS+PjVby7ThT3gvFHSocguOsxClx1epdUJAmJUbmM7NmOp5WVBVtMtC2 Su4NG/xJciXitKzw+btb7C7RjO6OEqv/1X/oBDzKBWQAwxUC+lqmnM7W6oqWJFEM YfTLnrjs7Hj6ThMGcEnfvc46dWK3dz0RjsQzUxugPuEkLA== -----END CERTIFICATE----- ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/var/privatekey.key0000664000175000017500000000625300000000000021154 0ustar00zuulzuul00000000000000-----BEGIN RSA PRIVATE KEY----- MIIJKAIBAAKCAgEA16VJEDeqbmr6PoM96NSuJK1XT5dZuzYzSQ8g//mR9BBjXBDe 4moNajxOybI6hjzWbECtXTKF20s/jkovarzXiZwXH8FMeakwLcMgG/QMRpMLjGny FPpVm7HJaPnTxrI2tNcsG10wmWxd9oqp6TjGIX8VlHaEGIgZIccYVvXjDyi0vypD /P28flWmtlyYgHm6pHfZ65LAAAXhnPZpWn2ARJogoT3SRD8PtXjwOEFavWj3qQ7K gCrRjfCS6ZqAtwXcUEE228C90PH01yuLQjVGlZOAGw8vzHBaustKHEKATyY4oTmN +Zlhvzi7XCPfcjzqVhp6bP+Whv+uAwydg+uxZ2o+oCh1fuk1xTvCmcZZ8bYLYmQy QWZJ3kwbfQK0jr/pejQbLpkc9IhCeKOB9Utk0jJ6awL1+1pxrXOl4vYF2oWHAxxH pcMGM6gIkwb+ocUqeDGdnTV2viszorQu2W1dqrINGrtMI3xP6EkNzb7L1K/Jzpn7 rSU7x0QMGwtb+Bv7bgLDuztMNtLtgd7vqRtOpufq5xKqfqwfYZrpEWE34BBUUbFS L6RZf3MLz1ykXF9N1CDMfpS6/Rbfnqe2KKAYWN8GNpMAsQ+JUWDZm8LAiFcsGbeN H/+GnffE5Ln0fTYbH8nMRnqm65kzBZWfE05Zj/NoqIXpCgjr6MhLkyFi9vsCAwEA AQKCAgAA96baQcWr9SLmQOR4NOwLEhQAMWefpWCZhU3amB4FgEVR1mmJjnw868RW t0v36jH0Dl44us9K6o2Ab+jCi9JTtbWM2Osk6JNkwSlVtsSPVH2KxbbmTTExH50N sYE3tPj12rlB7isXpRrOzlRwzWZmJBHOtrFlAsdKFYCQc03vdXlKGkBv1BuSXYP/ 8W5ltSYXMspxehkOZvhaIejbFREMPbzDvGlDER1a7Q320qQ7kUr7ISvbY1XJUzj1 f1HwgEA6w/AhED5Jv6wfgvx+8Yo9hYnflTPbsO1XRS4x7kJxGHTMlFuEsSF1ICYH Bcos0wUiGcBO2N6uAFuhe98BBn+nOwAPZYWwGkmVuK2psm2mXAHx94GT/XqgK/1r VWGSoOV7Fhjauc2Nv8/vJU18DXT3OY5hc4iXVeEBkuZwRb/NVUtnFoHxVO/Mp5Fh /W5KZaLWVrLghzvSQ/KUIM0k4lfKDZpY9ZpOdNgWDyZY8tNrXumUZZimzWdXZ9vR dBssmd8qEKs1AHGFnMDt56IjLGou6j0qnWsLdR1e/WEFsYzGXLVHCv6vXRNkbjqh WFw5nA+2Dw1YAsy+YkTfgx2pOe+exM/wxsVPa7tG9oZ374dywUi1k6VoHw5dkmJw 1hbXqSLZtx2N51G+SpGmNAV4vLUF0y3dy2wnrzFkFT4uxh1w8QKCAQEA+h6LwHTK hgcJx6CQQ6zYRqXo4wdvMooY1FcqJOq7LvJUA2CX5OOLs8qN1TyFrOCuAUTurOrM ABlQ0FpsIaP8TOGz72dHe2eLB+dD6Bqjn10sEFMn54zWd/w9ympQrO9jb5X3ViTh sCcdYyXVS9Hz8nzbbIF+DaKlxF2Hh71uRDxXpMPxRcGbOIuKZXUj6RkTIulzqT6o uawlegWxch05QSgzq/1ASxtjTzo4iuDCAii3N45xqxnB+fV9NXEt4R2oOGquBRPJ LxKcOnaQKBD0YNX4muTq+zPlv/kOb8/ys2WGWDUrNkpyJXqhTve4KONjqM7+iL/U 4WdJuiCjonzk/QKCAQEA3Lc+kNq35FNLxMcnCVcUgkmiCWZ4dyGZZPdqjOPww1+n bbudGPzY1nxOvE60dZM4or/tm6qlXYfb2UU3+OOJrK9s297EQybZ8DTZu2GHyitc NSFV3Gl4cgvKdbieGKkk9X2dV9xSNesNvX9lJEnQxuwHDTeo8ubLHtV88Ml1xokn 7W+IFiyEuUIL4e5/fadbrI3EwMrbCF4+9VcfABx4PTNMzdc8LsncCMXE+jFX8AWp TsT2JezTe5o2WpvBoKMAYhJQNQiaWATn00pDVY/70H1vK3ljomAa1IUdOr/AhAF7 3jL0MYMgXSHzXZOKAtc7yf+QfFWF1Ls8+sen1clJVwKCAQEAp59rB0r+Iz56RmgL 5t7ifs5XujbURemY5E2aN+18DuVmenD0uvfoO1DnJt4NtCNLWhxpXEdq+jH9H/VJ fG4a+ydT4IC1vjVRTrWlo9qeh4H4suQX3S1c2kKY4pvHf25blH/Lp9bFzbkZD8Ze IRcOxxb4MsrBwL+dGnGYD9dbG63ZCtoqSxaKQSX7VS1hKKmeUopj8ivFBdIht5oz JogBQ/J+Vqg9u1gagRFCrYgdXTcOOtRix0lW336vL+6u0ax/fXe5MjvlW3+8Zc3p pIBgVrlvh9ccx8crFTIDg9m4DJRgqaLQV+0ifI2np3WK3RQvSQWYPetZ7sm69ltD bvUGvQKCAQAz5CEhjUqOs8asjOXwnDiGKSmfbCgGWi/mPQUf+rcwN9z1P5a/uTKB utgIDbj/q401Nkp2vrgCNV7KxitSqKxFnTjKuKUL5KZ4gvRtyZBTR751/1BgcauP pJYE91K0GZBG5zGG5pWtd4XTd5Af5/rdycAeq2ddNEWtCiRFuBeohbaNbBtimzTZ GV4R0DDJKf+zoeEQMqEsZnwG0mTHceoS+WylOGU92teQeG7HI7K5C5uymTwFzpgq ByegRd5QFgKRDB0vWsZuyzh1xI/wHdnmOpdYcUGre0zTijhFB7ALWQ32P6SJv3ps av78kSNxZ4j3BM7DbJf6W8sKasZazOghAoIBAHekpBcLq9gRv2+NfLYxWN2sTZVB 1ldwioG7rWvk5YQR2akukecI3NRjtC5gG2vverawG852Y4+oLfgRMHxgp0qNStwX juTykzPkCwZn8AyR+avC3mkrtJyM3IigcYOu4/UoaRDFa0xvCC1EfumpnKXIpHag miSQZf2sVbgqb3/LWvHIg/ceOP9oGJve87/HVfQtBoLaIe5RXCWkqB7mcI/exvTS 8ShaW6v2Fe5Bzdvawj7sbsVYRWe93Aq2tmIgSX320D2RVepb6mjD4nr0IUaM3Yed TFT7e2ikWXyDLLgVkDTU4Qe8fr3ZKGfanCIDzvgNw6H1gRi+2WQgOmjilMQ= -----END RSA PRIVATE KEY----- ././@PaxHeader0000000000000000000000000000003200000000000011450 xustar000000000000000026 mtime=1759315602.04567 manila-21.0.0/manila/tests/volume/0000775000175000017500000000000000000000000016770 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/volume/__init__.py0000664000175000017500000000000000000000000021067 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/volume/test_cinder.py0000664000175000017500000002624200000000000021653 0ustar00zuulzuul00000000000000# Copyright 2014 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from cinderclient import exceptions as cinder_exception import ddt from manila import context from manila import exception from manila import test from manila.tests import utils as test_utils from manila.volume import cinder class FakeCinderClient(object): class Volumes(object): def get(self, volume_id): return {'id': volume_id} def list(self, detailed, search_opts={}): return [{'id': 'id1'}, {'id': 'id2'}] def create(self, *args, **kwargs): return {'id': 'created_id'} def __getattr__(self, item): return None def __init__(self): self.volumes = self.Volumes() self.volume_snapshots = self.volumes def get_fake_auth_obj(): return type('FakeAuthObj', (object, ), {'get_client': mock.Mock()}) class CinderclientTestCase(test.TestCase): @mock.patch('manila.volume.cinder.AUTH_OBJ', None) def test_no_auth_obj(self): mock_client_loader = self.mock_object( cinder.client_auth, 'AuthClientLoader') fake_context = 'fake_context' data = { 'cinder': { 'http_retries': 3, 'endpoint_type': 'internalURL', 'region_name': 'foo_region_name', } } with test_utils.create_temp_config_with_opts(data): cinder.cinderclient(fake_context) mock_client_loader.assert_called_once_with( client_class=cinder.cinder_client.Client, cfg_group=cinder.CINDER_GROUP ) mock_client_loader.return_value.get_client.assert_called_once_with( fake_context, retries=data['cinder']['http_retries'], endpoint_type=data['cinder']['endpoint_type'], region_name=data['cinder']['region_name'], ) @mock.patch('manila.volume.cinder.AUTH_OBJ', get_fake_auth_obj()) def test_with_auth_obj(self): fake_context = 'fake_context' data = { 'cinder': { 'http_retries': 3, 'endpoint_type': 'internalURL', 'region_name': 'foo_region_name', } } with test_utils.create_temp_config_with_opts(data): cinder.cinderclient(fake_context) cinder.AUTH_OBJ.get_client.assert_called_once_with( fake_context, retries=data['cinder']['http_retries'], endpoint_type=data['cinder']['endpoint_type'], region_name=data['cinder']['region_name'], ) @ddt.ddt class CinderApiTestCase(test.TestCase): def setUp(self): super(CinderApiTestCase, self).setUp() self.api = cinder.API() self.cinderclient = FakeCinderClient() self.ctx = context.get_admin_context() self.mock_object(cinder, 'cinderclient', mock.Mock(return_value=self.cinderclient)) self.mock_object(cinder, '_untranslate_volume_summary_view', lambda ctx, vol: vol) self.mock_object(cinder, '_untranslate_snapshot_summary_view', lambda ctx, snap: snap) def test_get(self): volume_id = 'volume_id1' result = self.api.get(self.ctx, volume_id) self.assertEqual(volume_id, result['id']) @ddt.data( {'cinder_e': cinder_exception.NotFound(404), 'manila_e': exception.VolumeNotFound}, {'cinder_e': cinder_exception.BadRequest(400), 'manila_e': exception.InvalidInput}, ) @ddt.unpack def test_get_failed(self, cinder_e, manila_e): cinder.cinderclient.side_effect = cinder_e volume_id = 'volume_id' self.assertRaises(manila_e, self.api.get, self.ctx, volume_id) def test_create(self): result = self.api.create(self.ctx, 1, '', '') self.assertEqual('created_id', result['id']) def test_create_failed(self): cinder.cinderclient.side_effect = cinder_exception.BadRequest(400) self.assertRaises(exception.InvalidInput, self.api.create, self.ctx, 1, '', '') def test_create_not_found_error(self): cinder.cinderclient.side_effect = cinder_exception.NotFound(404) self.assertRaises(exception.NotFound, self.api.create, self.ctx, 1, '', '') def test_create_failed_exception(self): cinder.cinderclient.side_effect = Exception("error msg") self.assertRaises(exception.ManilaException, self.api.create, self.ctx, 1, '', '') def test_get_all(self): cinder._untranslate_volume_summary_view.return_value = ['id1', 'id2'] self.assertEqual([{'id': 'id1'}, {'id': 'id2'}], self.api.get_all(self.ctx)) def test_check_attach_volume_status_error(self): volume = {'status': 'error'} self.assertRaises(exception.InvalidVolume, self.api.check_attach, self.ctx, volume) def test_check_attach_volume_already_attached(self): volume = {'status': 'available'} volume['attach_status'] = "attached" self.assertRaises(exception.InvalidVolume, self.api.check_attach, self.ctx, volume) def test_check_attach_availability_zone_differs(self): volume = {'status': 'available'} volume['attach_status'] = "detached" instance = {'availability_zone': 'zone1'} volume['availability_zone'] = 'zone2' cinder.CONF.set_override('cross_az_attach', False, 'cinder') self.assertRaises(exception.InvalidVolume, self.api.check_attach, self.ctx, volume, instance) volume['availability_zone'] = 'zone1' self.assertIsNone(self.api.check_attach(self.ctx, volume, instance)) cinder.CONF.reset() def test_check_attach(self): volume = {'status': 'available'} volume['attach_status'] = "detached" volume['availability_zone'] = 'zone1' instance = {'availability_zone': 'zone1'} cinder.CONF.set_override('cross_az_attach', False, 'cinder') self.assertIsNone(self.api.check_attach(self.ctx, volume, instance)) cinder.CONF.reset() def test_check_detach(self): volume = {'status': 'available'} self.assertRaises(exception.InvalidVolume, self.api.check_detach, self.ctx, volume) volume['status'] = 'non-available' self.assertIsNone(self.api.check_detach(self.ctx, volume)) def test_update(self): fake_volume = {'fake': 'fake'} self.mock_object(self.cinderclient.volumes, 'get', mock.Mock(return_value=fake_volume)) self.mock_object(self.cinderclient.volumes, 'update') fake_volume_id = 'fake_volume' fake_data = {'test': 'test'} self.api.update(self.ctx, fake_volume_id, fake_data) self.cinderclient.volumes.get.assert_called_once_with(fake_volume_id) self.cinderclient.volumes.update.assert_called_once_with(fake_volume, **fake_data) def test_reserve_volume(self): self.mock_object(self.cinderclient.volumes, 'reserve') self.api.reserve_volume(self.ctx, 'id1') self.cinderclient.volumes.reserve.assert_called_once_with('id1') def test_unreserve_volume(self): self.mock_object(self.cinderclient.volumes, 'unreserve') self.api.unreserve_volume(self.ctx, 'id1') self.cinderclient.volumes.unreserve.assert_called_once_with('id1') def test_begin_detaching(self): self.mock_object(self.cinderclient.volumes, 'begin_detaching') self.api.begin_detaching(self.ctx, 'id1') self.cinderclient.volumes.begin_detaching.assert_called_once_with( 'id1') def test_roll_detaching(self): self.mock_object(self.cinderclient.volumes, 'roll_detaching') self.api.roll_detaching(self.ctx, 'id1') self.cinderclient.volumes.roll_detaching.assert_called_once_with('id1') def test_attach(self): self.mock_object(self.cinderclient.volumes, 'attach') self.api.attach(self.ctx, 'id1', 'uuid', 'point') self.cinderclient.volumes.attach.assert_called_once_with('id1', 'uuid', 'point') def test_detach(self): self.mock_object(self.cinderclient.volumes, 'detach') self.api.detach(self.ctx, 'id1') self.cinderclient.volumes.detach.assert_called_once_with('id1') def test_initialize_connection(self): self.mock_object(self.cinderclient.volumes, 'initialize_connection') self.api.initialize_connection(self.ctx, 'id1', 'connector') (self.cinderclient.volumes.initialize_connection. assert_called_once_with('id1', 'connector')) def test_terminate_connection(self): self.mock_object(self.cinderclient.volumes, 'terminate_connection') self.api.terminate_connection(self.ctx, 'id1', 'connector') (self.cinderclient.volumes.terminate_connection. assert_called_once_with('id1', 'connector')) def test_delete(self): self.mock_object(self.cinderclient.volumes, 'delete') self.api.delete(self.ctx, 'id1') self.cinderclient.volumes.delete.assert_called_once_with('id1') def test_get_snapshot(self): snapshot_id = 'snapshot_id1' result = self.api.get_snapshot(self.ctx, snapshot_id) self.assertEqual(snapshot_id, result['id']) def test_get_snapshot_failed(self): cinder.cinderclient.side_effect = cinder_exception.NotFound(404) snapshot_id = 'snapshot_id' self.assertRaises(exception.VolumeSnapshotNotFound, self.api.get_snapshot, self.ctx, snapshot_id) def test_get_all_snapshots(self): cinder._untranslate_snapshot_summary_view.return_value = ['id1', 'id2'] self.assertEqual([{'id': 'id1'}, {'id': 'id2'}], self.api.get_all_snapshots(self.ctx)) def test_create_snapshot(self): result = self.api.create_snapshot(self.ctx, {'id': 'id1'}, '', '') self.assertEqual('created_id', result['id']) def test_create_force(self): result = self.api.create_snapshot_force(self.ctx, {'id': 'id1'}, '', '') self.assertEqual('created_id', result['id']) def test_delete_snapshot(self): self.mock_object(self.cinderclient.volume_snapshots, 'delete') self.api.delete_snapshot(self.ctx, 'id1') self.cinderclient.volume_snapshots.delete.assert_called_once_with( 'id1') ././@PaxHeader0000000000000000000000000000003200000000000011450 xustar000000000000000026 mtime=1759315602.04567 manila-21.0.0/manila/tests/wsgi/0000775000175000017500000000000000000000000016432 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/wsgi/__init__.py0000664000175000017500000000000000000000000020531 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/wsgi/test_common.py0000664000175000017500000000271700000000000021342 0ustar00zuulzuul00000000000000# Copyright 2017 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from manila import test from manila.wsgi import common class FakeApp(common.Application): def __init__(self, **kwargs): for k, v in kwargs.items(): setattr(self, k, v) class WSGICommonTestCase(test.TestCase): def test_application_factory(self): fake_global_config = mock.Mock() kwargs = {"k1": "v1", "k2": "v2"} result = FakeApp.factory(fake_global_config, **kwargs) fake_global_config.assert_not_called() self.assertIsInstance(result, FakeApp) for k, v in kwargs.items(): self.assertTrue(hasattr(result, k)) self.assertEqual(getattr(result, k), v) def test_application___call__(self): self.assertRaises( NotImplementedError, common.Application(), 'fake_environ', 'fake_start_response') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/wsgi/test_wsgi.py0000664000175000017500000000352500000000000021021 0ustar00zuulzuul00000000000000# Copyright 2017 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from manila import test from manila.wsgi import wsgi class WSGITestCase(test.TestCase): def test_initialize_application(self): self.mock_object(wsgi.logging, 'register_options') self.mock_object(wsgi.cfg.ConfigOpts, '__call__') self.mock_object(wsgi.config, 'verify_share_protocols') self.mock_object(wsgi.logging, 'setup') self.mock_object(wsgi.rpc, 'init') self.mock_object(wsgi.wsgi, 'Loader') wsgi.sys.argv = ['--verbose', '--debug'] result = wsgi.initialize_application() self.assertEqual( wsgi.wsgi.Loader.return_value.load_app.return_value, result) wsgi.logging.register_options.assert_called_once_with(mock.ANY) wsgi.cfg.ConfigOpts.__call__.assert_called_once_with( mock.ANY, project="manila", version=wsgi.version.version_string()) wsgi.config.verify_share_protocols.assert_called_once_with() wsgi.logging.setup.assert_called_once_with(mock.ANY, "manila") wsgi.rpc.init.assert_called_once_with(mock.ANY) wsgi.wsgi.Loader.assert_called_once_with(mock.ANY) wsgi.wsgi.Loader.return_value.load_app.assert_called_once_with( name='osapi_share') ././@PaxHeader0000000000000000000000000000003200000000000011450 xustar000000000000000026 mtime=1759315602.04567 manila-21.0.0/manila/tests/xenapi/0000775000175000017500000000000000000000000016745 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/tests/xenapi/__init__.py0000664000175000017500000000000000000000000021044 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003200000000000011450 xustar000000000000000026 mtime=1759315602.04567 manila-21.0.0/manila/transfer/0000775000175000017500000000000000000000000016143 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/transfer/__init__.py0000664000175000017500000000000000000000000020242 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/transfer/api.py0000664000175000017500000004732500000000000017301 0ustar00zuulzuul00000000000000# Copyright (C) 2022 China Telecom Digital Intelligence. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Handles all requests relating to transferring ownership of shares. """ import hashlib import hmac import os from oslo_config import cfg from oslo_log import log as logging from oslo_utils import excutils from oslo_utils import strutils from manila.common import constants from manila.db import base from manila import exception from manila.i18n import _ from manila import policy from manila import quota from manila.share import api as share_api from manila.share import share_types from manila.share import utils as share_utils share_transfer_opts = [ cfg.IntOpt('share_transfer_salt_length', default=8, help='The number of characters in the salt.', min=8, max=255), cfg.IntOpt('share_transfer_key_length', default=16, help='The number of characters in the autogenerated auth key.', min=16, max=255), ] CONF = cfg.CONF CONF.register_opts(share_transfer_opts) LOG = logging.getLogger(__name__) QUOTAS = quota.QUOTAS class API(base.Base): """API for interacting share transfers.""" def __init__(self): self.share_api = share_api.API() super().__init__() def get(self, context, transfer_id): transfer = self.db.transfer_get(context, transfer_id) return transfer def delete(self, context, transfer_id): """Delete a share transfer.""" transfer = self.db.transfer_get(context, transfer_id) policy.check_policy(context, 'share_transfer', 'delete', target_obj={ 'project_id': transfer['source_project_id']}) update_share_status = True share_ref = None try: share_ref = self.db.share_get(context, transfer.resource_id) except exception.NotFound: update_share_status = False if update_share_status: share_instance = share_ref['instance'] if share_ref['status'] != constants.STATUS_AWAITING_TRANSFER: msg = (_('Transfer %(transfer_id)s: share id %(share_id)s ' 'expected in awaiting_transfer state.')) msg_payload = {'transfer_id': transfer_id, 'share_id': share_ref['id']} LOG.error(msg, msg_payload) raise exception.InvalidShare(reason=msg) if update_share_status: share_utils.notify_about_share_usage(context, share_ref, share_instance, "transfer.delete.start") self.db.transfer_destroy(context, transfer_id, update_share_status=update_share_status) if update_share_status: share_utils.notify_about_share_usage(context, share_ref, share_instance, "transfer.delete.end") LOG.info('Transfer %s has been deleted successful.', transfer_id) def get_all(self, context, limit=None, sort_key=None, sort_dir=None, filters=None, offset=None): filters = filters or {} all_tenants = strutils.bool_from_string(filters.pop('all_tenants', 'false')) query_by_project = False if all_tenants: try: policy.check_policy(context, 'share_transfer', 'get_all_tenant') except exception.PolicyNotAuthorized: query_by_project = True else: query_by_project = True if query_by_project: transfers = self.db.transfer_get_all_by_project( context, context.project_id, limit=limit, sort_key=sort_key, sort_dir=sort_dir, filters=filters, offset=offset) else: transfers = self.db.transfer_get_all(context, limit=limit, sort_key=sort_key, sort_dir=sort_dir, filters=filters, offset=offset) return transfers def _get_random_string(self, length): """Get a random hex string of the specified length.""" rndstr = "" # Note that the string returned by this function must contain only # characters that the recipient can enter on their keyboard. The # function sha256().hexdigit() achieves this by generating a hash # which will only contain hexadecimal digits. while len(rndstr) < length: rndstr += hashlib.sha256(os.urandom(255)).hexdigest() return rndstr[0:length] def _get_crypt_hash(self, salt, auth_key): """Generate a random hash based on the salt and the auth key.""" def _format_str(input_str): if not isinstance(input_str, (bytes, str)): input_str = str(input_str) if isinstance(input_str, str): input_str = input_str.encode('utf-8') return input_str salt = _format_str(salt) auth_key = _format_str(auth_key) return hmac.new(salt, auth_key, hashlib.sha256).hexdigest() def create(self, context, share_id, display_name): """Creates an entry in the transfers table.""" LOG.debug("Generating transfer record for share %s", share_id) try: share_ref = self.share_api.get(context, share_id) except exception.NotFound: msg = _("Share specified was not found.") raise exception.InvalidShare(reason=msg) policy.check_policy(context, "share_transfer", "create", target_obj=share_ref) share_instance = share_ref['instance'] mount_point_name = share_instance['mount_point_name'] if (mount_point_name and mount_point_name.startswith(share_ref['project_id'])): msg = _('Share %s has a custom mount_point_name %s.' ' This has the project_id encoded in it.' ' Transferring such' ' a share isn\'t supported') % (share_ref['name'], mount_point_name) raise exception.Invalid(reason=msg) if share_ref['status'] != "available": raise exception.InvalidShare(reason=_("Share's status must be " "available")) if share_ref['share_network_id']: raise exception.InvalidShare(reason=_( "Shares exported over share networks cannot be transferred.")) if share_ref['share_group_id']: raise exception.InvalidShare(reason=_( "Shares within share groups cannot be transferred.")) if share_ref.has_replicas: raise exception.InvalidShare(reason=_( "Shares with replicas cannot be transferred.")) snapshots = self.db.share_snapshot_get_all_for_share(context, share_id) for snapshot in snapshots: if snapshot['status'] != "available": msg = _("Snapshot: %s status must be " "available") % snapshot['id'] raise exception.InvalidSnapshot(reason=msg) share_utils.notify_about_share_usage(context, share_ref, share_instance, "transfer.create.start") # The salt is just a short random string. salt = self._get_random_string(CONF.share_transfer_salt_length) auth_key = self._get_random_string(CONF.share_transfer_key_length) crypt_hash = self._get_crypt_hash(salt, auth_key) transfer_rec = {'resource_type': constants.SHARE_RESOURCE_TYPE, 'resource_id': share_id, 'display_name': display_name, 'salt': salt, 'crypt_hash': crypt_hash, 'expires_at': None, 'source_project_id': share_ref['project_id']} try: transfer = self.db.transfer_create(context, transfer_rec) except Exception: with excutils.save_and_reraise_exception(): LOG.error("Failed to create transfer record for %s", share_id) share_utils.notify_about_share_usage(context, share_ref, share_instance, "transfer.create.end") return {'id': transfer['id'], 'resource_type': transfer['resource_type'], 'resource_id': transfer['resource_id'], 'display_name': transfer['display_name'], 'auth_key': auth_key, 'created_at': transfer['created_at'], 'source_project_id': transfer['source_project_id'], 'destination_project_id': transfer['destination_project_id'], 'accepted': transfer['accepted'], 'expires_at': transfer['expires_at']} def _handle_snapshot_quota(self, context, snapshots, donor_id): snapshots_num = len(snapshots) share_snap_sizes = 0 for snapshot in snapshots: share_snap_sizes += snapshot['size'] try: reserve_opts = {'snapshots': snapshots_num, 'gigabytes': share_snap_sizes} reservations = QUOTAS.reserve(context, **reserve_opts) except exception.OverQuota as e: reservations = None overs = e.kwargs['overs'] usages = e.kwargs['usages'] quotas = e.kwargs['quotas'] def _consumed(name): return (usages[name]['reserved'] + usages[name]['in_use']) if 'snapshot_gigabytes' in overs: msg = ("Quota exceeded for %(s_pid)s, tried to accept " "%(s_size)sG snapshot (%(d_consumed)dG of " "%(d_quota)dG already consumed).") LOG.warning(msg, { 's_pid': context.project_id, 's_size': share_snap_sizes, 'd_consumed': _consumed('snapshot_gigabytes'), 'd_quota': quotas['snapshot_gigabytes']}) raise exception.SnapshotSizeExceedsAvailableQuota() elif 'snapshots' in overs: msg = ("Quota exceeded for %(s_pid)s, tried to accept " "%(s_num)s snapshot (%(d_consumed)d of " "%(d_quota)d already consumed).") LOG.warning(msg, {'s_pid': context.project_id, 's_num': snapshots_num, 'd_consumed': _consumed('snapshots'), 'd_quota': quotas['snapshots']}) raise exception.SnapshotLimitExceeded( allowed=quotas['snapshots']) try: reserve_opts = {'snapshots': -snapshots_num, 'gigabytes': -share_snap_sizes} donor_reservations = QUOTAS.reserve(context, project_id=donor_id, **reserve_opts) except exception.OverQuota: donor_reservations = None LOG.exception("Failed to update share providing snapshots quota:" " Over quota.") return reservations, donor_reservations @staticmethod def _check_share_type_access(context, share_type_id, share_id): share_type = share_types.get_share_type( context, share_type_id, expected_fields=['projects']) if not share_type['is_public']: if context.project_id not in share_type['projects']: msg = _("Share type of share %(share_id)s is not public, " "and current project can not access the share " "type ") % {'share_id': share_id} LOG.error(msg) raise exception.InvalidShare(reason=msg) def _check_transferred_project_quota(self, context, share_ref_size): try: reserve_opts = {'shares': 1, 'gigabytes': share_ref_size} reservations = QUOTAS.reserve(context, **reserve_opts) except exception.OverQuota as exc: reservations = None self.share_api.check_if_share_quotas_exceeded(context, exc, share_ref_size) return reservations @staticmethod def _check_donor_project_quota(context, donor_id, share_ref_size, transfer_id): try: reserve_opts = {'shares': -1, 'gigabytes': -share_ref_size} donor_reservations = QUOTAS.reserve(context.elevated(), project_id=donor_id, **reserve_opts) except Exception: donor_reservations = None LOG.exception("Failed to update quota donating share" " transfer id %s", transfer_id) return donor_reservations @staticmethod def _check_snapshot_status(snapshots, transfer_id): for snapshot in snapshots: # Only check snapshot with instances if snapshot.get('status'): if snapshot['status'] != 'available': msg = (_('Transfer %(transfer_id)s: Snapshot ' '%(snapshot_id)s is not in the expected ' 'available state.') % {'transfer_id': transfer_id, 'snapshot_id': snapshot['id']}) LOG.error(msg) raise exception.InvalidSnapshot(reason=msg) def accept(self, context, transfer_id, auth_key, clear_rules=False): """Accept a share that has been offered for transfer.""" # We must use an elevated context to make sure we can find the # transfer. transfer = self.db.transfer_get(context.elevated(), transfer_id) crypt_hash = self._get_crypt_hash(transfer['salt'], auth_key) if crypt_hash != transfer['crypt_hash']: msg = (_("Attempt to transfer %s with invalid auth key.") % transfer_id) LOG.error(msg) raise exception.InvalidAuthKey(reason=msg) share_id = transfer['resource_id'] try: # We must use an elevated context to see the share that is still # owned by the donor. share_ref = self.share_api.get(context.elevated(), share_id) except exception.NotFound: msg = _("Share specified was not found.") raise exception.InvalidShare(reason=msg) share_instance = share_ref['instance'] if share_ref['status'] != constants.STATUS_AWAITING_TRANSFER: msg = (_('Transfer %(transfer_id)s: share id %(share_id)s ' 'expected in awaiting_transfer state.') % {'transfer_id': transfer_id, 'share_id': share_id}) LOG.error(msg) raise exception.InvalidShare(reason=msg) share_ref_size = share_ref['size'] share_type_id = share_ref.get('share_type_id') # check share type access if share_type_id: self._check_share_type_access(context, share_type_id, share_id) # check per share quota limit self.share_api.check_is_share_size_within_per_share_quota_limit( context, share_ref_size) # check accept transferred project quotas reservations = self._check_transferred_project_quota( context, share_ref_size) # check donor project quotas donor_id = share_ref['project_id'] donor_reservations = self._check_donor_project_quota( context, donor_id, share_ref_size, transfer_id) snap_res = None snap_donor_res = None accept_snapshots = False snapshots = self.db.share_snapshot_get_all_for_share( context.elevated(), share_id) if snapshots: self._check_snapshot_status(snapshots, transfer_id) accept_snapshots = True snap_res, snap_donor_res = self._handle_snapshot_quota( context, snapshots, share_ref['project_id']) share_utils.notify_about_share_usage(context, share_ref, share_instance, "transfer.accept.start") try: self.share_api.transfer_accept(context, share_ref, context.user_id, context.project_id, clear_rules=clear_rules) # Transfer ownership of the share now, must use an elevated # context. self.db.transfer_accept(context.elevated(), transfer_id, context.user_id, context.project_id, accept_snapshots=accept_snapshots) if reservations: QUOTAS.commit(context, reservations) if snap_res: QUOTAS.commit(context, snap_res) if donor_reservations: QUOTAS.commit(context, donor_reservations, project_id=donor_id) if snap_donor_res: QUOTAS.commit(context, snap_donor_res, project_id=donor_id) LOG.info("share %s has been transferred.", share_id) except Exception: with excutils.save_and_reraise_exception(): try: # storage try to rollback self.share_api.transfer_accept(context, share_ref, share_ref['user_id'], share_ref['project_id']) # db try to rollback self.db.transfer_accept_rollback( context.elevated(), transfer_id, share_ref['user_id'], share_ref['project_id'], rollback_snap=accept_snapshots) finally: if reservations: QUOTAS.rollback(context, reservations) if snap_res: QUOTAS.rollback(context, snap_res) if donor_reservations: QUOTAS.rollback(context, donor_reservations, project_id=donor_id) if snap_donor_res: QUOTAS.rollback(context, snap_donor_res, project_id=donor_id) share_utils.notify_about_share_usage(context, share_ref, share_instance, "transfer.accept.end") ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/utils.py0000664000175000017500000005210100000000000016030 0ustar00zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright 2011 Justin Santa Barbara # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Utilities and helper functions.""" import contextlib import functools import inspect import pyclbr import re import shutil import sys import tempfile import tenacity import time import logging import netaddr from oslo_concurrency import lockutils from oslo_concurrency import processutils from oslo_config import cfg from oslo_log import log from oslo_utils import importutils from oslo_utils import netutils from oslo_utils import strutils from oslo_utils import timeutils from webob import exc from manila.common import constants from manila.db import api as db_api from manila import exception from manila.i18n import _ CONF = cfg.CONF LOG = log.getLogger(__name__) if getattr(CONF, 'debug', False): logging.getLogger("paramiko").setLevel(logging.DEBUG) _ISO8601_TIME_FORMAT_SUBSECOND = '%Y-%m-%dT%H:%M:%S.%f' _ISO8601_TIME_FORMAT = '%Y-%m-%dT%H:%M:%S' synchronized = lockutils.synchronized_with_prefix('manila-') def isotime(at=None, subsecond=False): """Stringify time in ISO 8601 format.""" # Python provides a similar instance method for datetime.datetime objects # called isoformat(). The format of the strings generated by isoformat() # have a couple of problems: # 1) The strings generated by isotime are used in tokens and other public # APIs that we can't change without a deprecation period. The strings # generated by isoformat are not the same format, so we can't just # change to it. # 2) The strings generated by isoformat do not include the microseconds if # the value happens to be 0. This will likely show up as random failures # as parsers may be written to always expect microseconds, and it will # parse correctly most of the time. if not at: at = timeutils.utcnow() st = at.strftime(_ISO8601_TIME_FORMAT if not subsecond else _ISO8601_TIME_FORMAT_SUBSECOND) tz = at.tzinfo.tzname(None) if at.tzinfo else 'UTC' # Need to handle either iso8601 or python UTC format st += ('Z' if tz in ['UTC', 'UTC+00:00'] else tz) return st def _get_root_helper(): return 'sudo manila-rootwrap %s' % CONF.rootwrap_config def execute(*cmd, **kwargs): """Convenience wrapper around oslo's execute() function.""" kwargs.setdefault('root_helper', _get_root_helper()) if getattr(CONF, 'debug', False): kwargs['loglevel'] = logging.DEBUG return processutils.execute(*cmd, **kwargs) def check_ssh_injection(cmd_list): ssh_injection_pattern = ['`', '$', '|', '||', ';', '&', '&&', '>', '>>', '<'] # Check whether injection attacks exist for arg in cmd_list: arg = arg.strip() # Check for matching quotes on the ends is_quoted = re.match('^(?P[\'"])(?P.*)(?P=quote)$', arg) if is_quoted: # Check for unescaped quotes within the quoted argument quoted = is_quoted.group('quoted') if quoted: if (re.match('[\'"]', quoted) or re.search('[^\\\\][\'"]', quoted)): raise exception.SSHInjectionThreat(command=cmd_list) else: # We only allow spaces within quoted arguments, and that # is the only special character allowed within quotes if len(arg.split()) > 1: raise exception.SSHInjectionThreat(command=cmd_list) # Second, check whether danger character in command. So the shell # special operator must be a single argument. for c in ssh_injection_pattern: if c not in arg: continue result = arg.find(c) if not result == -1: if result == 0 or not arg[result - 1] == '\\': raise exception.SSHInjectionThreat(command=cmd_list) class LazyPluggable(object): """A pluggable backend loaded lazily based on some value.""" def __init__(self, pivot, **backends): self.__backends = backends self.__pivot = pivot self.__backend = None def __get_backend(self): if not self.__backend: backend_name = CONF[self.__pivot] if backend_name not in self.__backends: raise exception.Error(_('Invalid backend: %s') % backend_name) backend = self.__backends[backend_name] if isinstance(backend, tuple): name = backend[0] fromlist = backend[1] else: name = backend fromlist = backend self.__backend = __import__(name, None, None, fromlist) LOG.debug('backend %s', self.__backend) return self.__backend def __getattr__(self, key): backend = self.__get_backend() return getattr(backend, key) def monkey_patch(): """Patch decorator. If the Flags.monkey_patch set as True, this function patches a decorator for all functions in specified modules. You can set decorators for each modules using CONF.monkey_patch_modules. The format is "Module path:Decorator function". Example: 'manila.api.ec2.cloud:' \ manila.openstack.common.notifier.api.notify_decorator' Parameters of the decorator is as follows. (See manila.openstack.common.notifier.api.notify_decorator) name - name of the function function - object of the function """ # If CONF.monkey_patch is not True, this function do nothing. if not CONF.monkey_patch: return # Get list of modules and decorators for module_and_decorator in CONF.monkey_patch_modules: module, decorator_name = module_and_decorator.split(':') # import decorator function decorator = importutils.import_class(decorator_name) __import__(module) # Retrieve module information using pyclbr module_data = pyclbr.readmodule_ex(module) for key in module_data.keys(): # set the decorator for the class methods if isinstance(module_data[key], pyclbr.Class): clz = importutils.import_class("%s.%s" % (module, key)) # NOTE(vponomaryov): we need to distinguish class methods types # for py2 and py3, because the concept of 'unbound methods' has # been removed from the python3.x member_type = inspect.isfunction for method, func in inspect.getmembers(clz, member_type): setattr( clz, method, decorator("%s.%s.%s" % (module, key, method), func)) # set the decorator for the function if isinstance(module_data[key], pyclbr.Function): func = importutils.import_class("%s.%s" % (module, key)) setattr(sys.modules[module], key, decorator("%s.%s" % (module, key), func)) def file_open(*args, **kwargs): """Open file see built-in open() documentation for more details Note: The reason this is kept in a separate module is to easily be able to provide a stub module that doesn't alter system state at all (for unit tests) """ return open(*args, **kwargs) def service_is_up(service): """Check whether a service is up based on last heartbeat.""" last_heartbeat = service['updated_at'] or service['created_at'] # Timestamps in DB are UTC. tdelta = timeutils.utcnow() - last_heartbeat elapsed = tdelta.total_seconds() return abs(elapsed) <= CONF.service_down_time def validate_service_host(context, host): service = db_api.service_get_by_host_and_topic(context, host, 'manila-share') if not service_is_up(service): raise exception.ServiceIsDown(service=service['host']) return service @contextlib.contextmanager def tempdir(**kwargs): tmpdir = tempfile.mkdtemp(**kwargs) try: yield tmpdir finally: try: shutil.rmtree(tmpdir) except OSError as e: LOG.debug('Could not remove tmpdir: %s', e) def walk_class_hierarchy(clazz, encountered=None): """Walk class hierarchy, yielding most derived classes first.""" if not encountered: encountered = [] for subclass in clazz.__subclasses__(): if subclass not in encountered: encountered.append(subclass) # drill down to leaves first for subsubclass in walk_class_hierarchy(subclass, encountered): yield subsubclass yield subclass def cidr_to_network(cidr): """Convert cidr to network.""" try: network = netaddr.IPNetwork(cidr) return network except netaddr.AddrFormatError: raise exception.InvalidInput(_("Invalid cidr supplied %s") % cidr) def cidr_to_netmask(cidr): """Convert cidr to netmask.""" return str(cidr_to_network(cidr).netmask) def cidr_to_prefixlen(cidr): """Convert cidr to prefix length.""" return cidr_to_network(cidr).prefixlen def is_valid_ip_address(ip_address, ip_version): ip_version = ([int(ip_version)] if not isinstance(ip_version, list) else ip_version) if not set(ip_version).issubset(set([4, 6])): raise exception.ManilaException( _("Provided improper IP version '%s'.") % ip_version) if not isinstance(ip_address, str): return False if 4 in ip_version: if netutils.is_valid_ipv4(ip_address): return True if 6 in ip_version: if netutils.is_valid_ipv6(ip_address): return True return False def get_bool_param(param_string, params, default=False): param = params.get(param_string, default) if not strutils.is_valid_boolstr(param): msg = _("Value '%(param)s' for '%(param_string)s' is not " "a boolean.") % {'param': param, 'param_string': param_string} raise exception.InvalidParameterValue(err=msg) return strutils.bool_from_string(param, strict=True) def is_all_tenants(search_opts): """Checks to see if the all_tenants flag is in search_opts :param dict search_opts: The search options for a request :returns: boolean indicating if all_tenants are being requested or not """ all_tenants = search_opts.get('all_tenants') if all_tenants: try: all_tenants = strutils.bool_from_string(all_tenants, True) except ValueError as err: raise exception.InvalidInput(str(err)) else: # The empty string is considered enabling all_tenants all_tenants = 'all_tenants' in search_opts return all_tenants class IsAMatcher(object): def __init__(self, expected_value=None): self.expected_value = expected_value def __eq__(self, actual_value): return isinstance(actual_value, self.expected_value) class ComparableMixin(object): def _compare(self, other, method): try: return method(self._cmpkey(), other._cmpkey()) except (AttributeError, TypeError): # _cmpkey not implemented, or return different type, # so I can't compare with "other". return NotImplemented def __lt__(self, other): return self._compare(other, lambda s, o: s < o) def __le__(self, other): return self._compare(other, lambda s, o: s <= o) def __eq__(self, other): return self._compare(other, lambda s, o: s == o) def __ge__(self, other): return self._compare(other, lambda s, o: s >= o) def __gt__(self, other): return self._compare(other, lambda s, o: s > o) def __ne__(self, other): return self._compare(other, lambda s, o: s != o) class retry_if_exit_code(tenacity.retry_if_exception): """Retry on ProcessExecutionError specific exit codes.""" def __init__(self, codes): self.codes = (codes,) if isinstance(codes, int) else codes super(retry_if_exit_code, self).__init__(self._check_exit_code) def _check_exit_code(self, exc): return (exc and isinstance(exc, processutils.ProcessExecutionError) and exc.exit_code in self.codes) def retry(retry_param=Exception, interval=1, retries=10, backoff_rate=2, backoff_sleep_max=None, wait_random=False, infinite=False, retry=tenacity.retry_if_exception_type): if retries < 1: raise ValueError('Retries must be greater than or ' 'equal to 1 (received: %s). ' % retries) if wait_random: kwargs = {'multiplier': interval} if backoff_sleep_max is not None: kwargs.update({'max': backoff_sleep_max}) wait = tenacity.wait_random_exponential(**kwargs) else: kwargs = {'multiplier': interval, 'min': 0, 'exp_base': backoff_rate} if backoff_sleep_max is not None: kwargs.update({'max': backoff_sleep_max}) wait = tenacity.wait_exponential(**kwargs) if infinite: stop = tenacity.stop.stop_never else: stop = tenacity.stop_after_attempt(retries) def _decorator(f): @functools.wraps(f) def _wrapper(*args, **kwargs): r = tenacity.Retrying( sleep=tenacity.nap.sleep, before_sleep=tenacity.before_sleep_log(LOG, logging.DEBUG), after=tenacity.after_log(LOG, logging.DEBUG), stop=stop, reraise=True, retry=retry(retry_param), wait=wait) return r(f, *args, **kwargs) return _wrapper return _decorator def get_bool_from_api_params(key, params, default=False, strict=True): """Parse bool value from request params. HTTPBadRequest will be directly raised either of the cases below: 1. invalid bool string was found by key(with strict on). 2. key not found while default value is invalid(with strict on). """ param = params.get(key, default) try: param = strutils.bool_from_string(param, strict=strict, default=default) except ValueError: msg = _('Invalid value %(param)s for %(param_string)s. ' 'Expecting a boolean.') % {'param': param, 'param_string': key} raise exc.HTTPBadRequest(explanation=msg) return param def check_params_exist(keys, params): """Validates if keys exist in params. :param keys: List of keys to check :param params: Parameters received from REST API """ if any(set(keys) - set(params)): msg = _("Must specify all mandatory parameters: %s") % keys raise exc.HTTPBadRequest(explanation=msg) def check_params_are_boolean(keys, params, default=False): """Validates if keys in params are boolean. :param keys: List of keys to check :param params: Parameters received from REST API :param default: default value when it does not exist :return: a dictionary with keys and respective retrieved value """ result = {} for key in keys: value = get_bool_from_api_params(key, params, default, strict=True) result[key] = value return result def require_driver_initialized(func): @functools.wraps(func) def wrapper(self, *args, **kwargs): # we can't do anything if the driver didn't init if not self.driver.initialized: driver_name = self.driver.__class__.__name__ raise exception.DriverNotInitialized(driver=driver_name) return func(self, *args, **kwargs) return wrapper def convert_str(text): """Convert to native string. Convert bytes and Unicode strings to native strings: * convert to Unicode on Python 3: decode bytes from UTF-8 """ if isinstance(text, bytes): return text.decode('utf-8') else: return text def translate_string_size_to_float(string, multiplier='G'): """Translates human-readable storage size to float value. Supported values for 'multiplier' are following: K - kilo | 1 M - mega | 1024 G - giga | 1024 * 1024 T - tera | 1024 * 1024 * 1024 P = peta | 1024 * 1024 * 1024 * 1024 returns: - float if correct input data provided - None if incorrect """ if not isinstance(string, str): return None multipliers = ('K', 'M', 'G', 'T', 'P') mapping = { k: 1024.0 ** v for k, v in zip(multipliers, range(len(multipliers))) } if multiplier not in multipliers: raise exception.ManilaException( "'multiplier' arg should be one of following: " "'%(multipliers)s'. But it is '%(multiplier)s'." % { 'multiplier': multiplier, 'multipliers': "', '".join(multipliers), } ) try: value = float(string.replace(",", ".")) / 1024.0 value = value / mapping[multiplier] return value except (ValueError, TypeError): matched = re.match( r"^(\d*[.,]*\d*)([%s])$" % ''.join(multipliers), string) if matched: # The replace() is needed in case decimal separator is a comma value = float(matched.groups()[0].replace(",", ".")) multiplier = mapping[matched.groups()[1]] / mapping[multiplier] return value * multiplier def wait_for_access_update(context, db, share_instance, migration_wait_access_rules_timeout): starttime = time.time() deadline = starttime + migration_wait_access_rules_timeout tries = 0 while True: instance = db.share_instance_get(context, share_instance['id']) if instance['access_rules_status'] == constants.STATUS_ACTIVE: break tries += 1 now = time.time() if (instance['access_rules_status'] == constants.SHARE_INSTANCE_RULES_ERROR): msg = _("Failed to update access rules" " on share instance %s") % share_instance['id'] raise exception.ShareMigrationFailed(reason=msg) elif now > deadline: msg = _("Timeout trying to update access rules" " on share instance %(share_id)s. Timeout " "was %(timeout)s seconds.") % { 'share_id': share_instance['id'], 'timeout': migration_wait_access_rules_timeout} raise exception.ShareMigrationFailed(reason=msg) else: # 1.414 = square-root of 2 time.sleep(1.414 ** tries) class DoNothing(str): """Class that literrally does nothing. We inherit from str in case it's called with json.dumps. """ def __call__(self, *args, **kwargs): return self def __getattr__(self, name): return self DO_NOTHING = DoNothing() def notifications_enabled(conf): """Check if oslo notifications are enabled.""" notifications_driver = set(conf.oslo_messaging_notifications.driver) return notifications_driver and notifications_driver != {'noop'} def if_notifications_enabled(function): """Calls decorated method only if notifications are enabled.""" @functools.wraps(function) def wrapped(*args, **kwargs): if notifications_enabled(CONF): return function(*args, **kwargs) return DO_NOTHING return wrapped def write_remote_file(ssh, filename, contents, as_root=False): tmp_filename = "%s.tmp" % filename if as_root: cmd = 'sudo tee "%s" > /dev/null' % tmp_filename cmd2 = 'sudo mv -f "%s" "%s"' % (tmp_filename, filename) else: cmd = 'cat > "%s"' % tmp_filename cmd2 = 'mv -f "%s" "%s"' % (tmp_filename, filename) stdin, __, __ = ssh.exec_command(cmd) stdin.write(contents) stdin.close() stdin.channel.shutdown_write() ssh.exec_command(cmd2) def convert_time_duration_to_iso_format(time_duration): """Covert time duration to ISO 8601 format""" unit_mapping = { 'minutes': 'M', 'hours': 'H', 'days': 'D', 'months': 'M', 'years': 'Y', } pattern = re.compile(r'(\d+)\s*(minutes|hours|days|months|years)') match = pattern.match(time_duration) if not match: raise exception.ManilaException( f"Invalid time duration format: {time_duration}") value, unit = match.groups() if unit in ["minutes", "hours", "days"]: iso_format = f"PT{value}{unit_mapping[unit]}" else: iso_format = f"P{value}{unit_mapping[unit]}" return iso_format ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/version.py0000664000175000017500000000157400000000000016365 0ustar00zuulzuul00000000000000# Copyright 2011 OpenStack LLC # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from pbr import version as pbr_version MANILA_VENDOR = "OpenStack Foundation" MANILA_PRODUCT = "OpenStack Manila" MANILA_PACKAGE = None # OS distro package version suffix loaded = False version_info = pbr_version.VersionInfo('manila') version_string = version_info.version_string ././@PaxHeader0000000000000000000000000000003200000000000011450 xustar000000000000000026 mtime=1759315602.04567 manila-21.0.0/manila/volume/0000775000175000017500000000000000000000000015626 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/volume/__init__.py0000664000175000017500000000221500000000000017737 0ustar00zuulzuul00000000000000# Copyright 2014 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import oslo_config.cfg import oslo_utils.importutils _volume_opts = [ oslo_config.cfg.StrOpt('volume_api_class', default='manila.volume.cinder.API', help='The full class name of the ' 'Volume API class to use.'), ] oslo_config.cfg.CONF.register_opts(_volume_opts) def API(): importutils = oslo_utils.importutils volume_api_class = oslo_config.cfg.CONF.volume_api_class cls = importutils.import_class(volume_api_class) return cls() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/volume/cinder.py0000664000175000017500000002710700000000000017453 0ustar00zuulzuul00000000000000# Copyright 2014 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Handles all requests relating to volumes + cinder. """ import copy from cinderclient import exceptions as cinder_exception from cinderclient.v3 import client as cinder_client from keystoneauth1 import loading as ks_loading from oslo_config import cfg from manila.common import client_auth from manila.common.config import core_opts import manila.context as ctxt from manila.db import base from manila import exception from manila.i18n import _ CINDER_GROUP = 'cinder' AUTH_OBJ = None cinder_opts = [ cfg.BoolOpt('cross_az_attach', default=True, help='Allow attaching between instances and volumes in ' 'different availability zones.'), cfg.IntOpt('http_retries', default=3, help='Number of cinderclient retries on failed HTTP calls.'), cfg.StrOpt('endpoint_type', default='publicURL', choices=['publicURL', 'internalURL', 'adminURL', 'public', 'internal', 'admin'], help='Endpoint type to be used with cinder client calls.'), cfg.StrOpt('region_name', help='Region name for connecting to cinder.'), ] CONF = cfg.CONF CONF.register_opts(core_opts) CONF.register_opts(cinder_opts, CINDER_GROUP) ks_loading.register_session_conf_options(CONF, CINDER_GROUP) ks_loading.register_auth_conf_options(CONF, CINDER_GROUP) def list_opts(): return client_auth.AuthClientLoader.list_opts(CINDER_GROUP) def cinderclient(context): global AUTH_OBJ if not AUTH_OBJ: AUTH_OBJ = client_auth.AuthClientLoader( client_class=cinder_client.Client, cfg_group=CINDER_GROUP) return AUTH_OBJ.get_client(context, retries=CONF[CINDER_GROUP].http_retries, endpoint_type=CONF[CINDER_GROUP].endpoint_type, region_name=CONF[CINDER_GROUP].region_name) def _untranslate_volume_summary_view(context, vol): """Maps keys for volumes summary view.""" d = {} d['id'] = vol.id d['status'] = vol.status d['size'] = vol.size d['availability_zone'] = vol.availability_zone d['created_at'] = vol.created_at d['attach_time'] = "" d['mountpoint'] = "" if vol.attachments: att = vol.attachments[0] d['attach_status'] = 'attached' d['instance_uuid'] = att['server_id'] d['mountpoint'] = att['device'] else: d['attach_status'] = 'detached' d['name'] = vol.name d['description'] = vol.description d['volume_type_id'] = vol.volume_type d['snapshot_id'] = vol.snapshot_id d['volume_metadata'] = {} for key, value in vol.metadata.items(): d['volume_metadata'][key] = value if hasattr(vol, 'volume_image_metadata'): d['volume_image_metadata'] = copy.deepcopy(vol.volume_image_metadata) return d def _untranslate_snapshot_summary_view(context, snapshot): """Maps keys for snapshots summary view.""" d = {} d['id'] = snapshot.id d['status'] = snapshot.status d['progress'] = snapshot.progress d['size'] = snapshot.size d['created_at'] = snapshot.created_at d['name'] = snapshot.name d['description'] = snapshot.description d['volume_id'] = snapshot.volume_id d['project_id'] = snapshot.project_id d['volume_size'] = snapshot.size return d def translate_volume_exception(method): """Transforms the exception for the volume, keeps its traceback intact.""" def wrapper(self, ctx, volume_id, *args, **kwargs): try: res = method(self, ctx, volume_id, *args, **kwargs) except cinder_exception.ClientException as e: if isinstance(e, cinder_exception.NotFound): raise exception.VolumeNotFound(volume_id=volume_id) elif isinstance(e, cinder_exception.BadRequest): raise exception.InvalidInput(reason=str(e)) return res return wrapper def translate_snapshot_exception(method): """Transforms the exception for the snapshot. Note: Keeps its traceback intact. """ def wrapper(self, ctx, snapshot_id, *args, **kwargs): try: res = method(self, ctx, snapshot_id, *args, **kwargs) except cinder_exception.ClientException as e: if isinstance(e, cinder_exception.NotFound): raise exception.VolumeSnapshotNotFound(snapshot_id=snapshot_id) return res return wrapper class API(base.Base): """API for interacting with the volume manager.""" @translate_volume_exception def get(self, context, volume_id): item = cinderclient(context).volumes.get(volume_id) return _untranslate_volume_summary_view(context, item) def get_all(self, context, search_opts={}): items = cinderclient(context).volumes.list(detailed=True, search_opts=search_opts) rval = [] for item in items: rval.append(_untranslate_volume_summary_view(context, item)) return rval def check_attached(self, context, volume): """Raise exception if volume in use.""" if volume['status'] != "in-use": msg = _("status must be 'in-use'") raise exception.InvalidVolume(msg) def check_attach(self, context, volume, instance=None): if volume['status'] != "available": msg = _("status must be 'available'") raise exception.InvalidVolume(msg) if volume['attach_status'] == "attached": msg = _("already attached") raise exception.InvalidVolume(msg) if instance and not CONF[CINDER_GROUP].cross_az_attach: if instance['availability_zone'] != volume['availability_zone']: msg = _("Instance and volume not in same availability_zone") raise exception.InvalidVolume(msg) def check_detach(self, context, volume): if volume['status'] == "available": msg = _("already detached") raise exception.InvalidVolume(msg) @translate_volume_exception def reserve_volume(self, context, volume_id): cinderclient(context).volumes.reserve(volume_id) @translate_volume_exception def unreserve_volume(self, context, volume_id): cinderclient(context).volumes.unreserve(volume_id) @translate_volume_exception def begin_detaching(self, context, volume_id): cinderclient(context).volumes.begin_detaching(volume_id) @translate_volume_exception def roll_detaching(self, context, volume_id): cinderclient(context).volumes.roll_detaching(volume_id) @translate_volume_exception def attach(self, context, volume_id, instance_uuid, mountpoint): cinderclient(context).volumes.attach(volume_id, instance_uuid, mountpoint) @translate_volume_exception def detach(self, context, volume_id): cinderclient(context).volumes.detach(volume_id) @translate_volume_exception def initialize_connection(self, context, volume_id, connector): return cinderclient(context).volumes.initialize_connection(volume_id, connector) @translate_volume_exception def terminate_connection(self, context, volume_id, connector): return cinderclient(context).volumes.terminate_connection(volume_id, connector) def create(self, context, size, name, description, snapshot=None, image_id=None, volume_type=None, metadata=None, availability_zone=None): if snapshot is not None: snapshot_id = snapshot['id'] else: snapshot_id = None kwargs = dict(snapshot_id=snapshot_id, name=name, description=description, volume_type=volume_type, user_id=context.user_id, project_id=context.project_id, availability_zone=availability_zone, metadata=metadata, imageRef=image_id) try: item = cinderclient(context).volumes.create(size, **kwargs) return _untranslate_volume_summary_view(context, item) except cinder_exception.BadRequest as e: raise exception.InvalidInput(reason=str(e)) except cinder_exception.NotFound: raise exception.NotFound( _("Error in creating cinder " "volume. Cinder volume type %s not exist. Check parameter " "cinder_volume_type in configuration file.") % volume_type) except Exception as e: raise exception.ManilaException(e) @translate_volume_exception def extend(self, context, volume_id, new_size): cinderclient(context).volumes.extend(volume_id, new_size) @translate_volume_exception def delete(self, context, volume_id): cinderclient(context).volumes.delete(volume_id) @translate_volume_exception def update(self, context, volume_id, fields): # Use Manila's context as far as Cinder's is restricted to update # volumes. manila_admin_context = ctxt.get_admin_context() client = cinderclient(manila_admin_context) item = client.volumes.get(volume_id) client.volumes.update(item, **fields) @translate_snapshot_exception def get_snapshot(self, context, snapshot_id): item = cinderclient(context).volume_snapshots.get(snapshot_id) return _untranslate_snapshot_summary_view(context, item) def get_all_snapshots(self, context, search_opts=None): items = cinderclient(context).volume_snapshots.list( detailed=True, search_opts=search_opts) rvals = [] for item in items: rvals.append(_untranslate_snapshot_summary_view(context, item)) return rvals @translate_volume_exception def create_snapshot(self, context, volume_id, name, description): item = cinderclient(context).volume_snapshots.create(volume_id, False, name, description) return _untranslate_snapshot_summary_view(context, item) @translate_volume_exception def create_snapshot_force(self, context, volume_id, name, description): item = cinderclient(context).volume_snapshots.create(volume_id, True, name, description) return _untranslate_snapshot_summary_view(context, item) @translate_snapshot_exception def delete_snapshot(self, context, snapshot_id): cinderclient(context).volume_snapshots.delete(snapshot_id) ././@PaxHeader0000000000000000000000000000003200000000000011450 xustar000000000000000026 mtime=1759315602.04567 manila-21.0.0/manila/wsgi/0000775000175000017500000000000000000000000015270 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/wsgi/__init__.py0000664000175000017500000000000000000000000017367 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/wsgi/api.py0000664000175000017500000000142000000000000016410 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """WSGI application entry-point for the Manila API.""" import threading from manila.wsgi import wsgi application = None lock = threading.Lock() with lock: if application is None: application = wsgi.initialize_application() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/wsgi/common.py0000664000175000017500000001203100000000000017127 0ustar00zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright 2010 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Utility methods for working with WSGI servers.""" import webob.dec import webob.exc from manila.i18n import _ class Request(webob.Request): pass class Application(object): """Base WSGI application wrapper. Subclasses need to implement __call__.""" @classmethod def factory(cls, global_config, **local_config): """Used for paste app factories in paste.deploy config files. Any local configuration (that is, values under the [app:APPNAME] section of the paste config) will be passed into the `__init__` method as kwargs. A hypothetical configuration would look like: [app:wadl] latest_version = 1.3 paste.app_factory = manila.api.fancy_api:Wadl.factory which would result in a call to the `Wadl` class as import manila.api.fancy_api fancy_api.Wadl(latest_version='1.3') You could of course re-implement the `factory` method in subclasses, but using the kwarg passing it shouldn't be necessary. """ return cls(**local_config) def __call__(self, environ, start_response): r"""Subclasses will probably want to implement __call__ like this: @webob.dec.wsgify(RequestClass=Request) def __call__(self, req): # Any of the following objects work as responses: # Option 1: simple string res = 'message\n' # Option 2: a nicely formatted HTTP exception page res = exc.HTTPForbidden(detail='Nice try') # Option 3: a webob Response object (in case you need to play with # headers, or you want to be treated like an iterable, or or or) res = Response(); res.app_iter = open('somefile') # Option 4: any wsgi app to be run next res = self.application # Option 5: you can get a Response object for a wsgi app, too, to # play with headers etc res = req.get_response(self.application) # You can then just return your response... return res # ... or set req.response and return None. req.response = res See the end of http://pythonpaste.org/webob/modules/dec.html for more info. """ raise NotImplementedError(_('You must implement __call__')) class Middleware(Application): """Base WSGI middleware. These classes require an application to be initialized that will be called next. By default the middleware will simply call its wrapped app, or you can override __call__ to customize its behavior. """ @classmethod def factory(cls, global_config, **local_config): """Used for paste app factories in paste.deploy config files. Any local configuration (that is, values under the [filter:APPNAME] section of the paste config) will be passed into the `__init__` method as kwargs. A hypothetical configuration would look like: [filter:analytics] redis_host = 127.0.0.1 paste.filter_factory = manila.api.analytics:Analytics.factory which would result in a call to the `Analytics` class as import manila.api.analytics analytics.Analytics(app_from_paste, redis_host='127.0.0.1') You could of course re-implement the `factory` method in subclasses, but using the kwarg passing it shouldn't be necessary. """ def _factory(app): return cls(app, **local_config) return _factory def __init__(self, application): self.application = application def process_request(self, req): """Called on each request. If this returns None, the next application down the stack will be executed. If it returns a response then that response will be returned and execution will stop here. """ return None def process_response(self, response): """Do whatever you'd like to the response.""" return response @webob.dec.wsgify(RequestClass=Request) def __call__(self, req): # pylint: disable=assignment-from-none response = self.process_request(req) if response: return response response = req.get_response(self.application) return self.process_response(response) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/wsgi/eventlet_server.py0000664000175000017500000000413600000000000021062 0ustar00zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright 2010 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Utility methods for working with WSGI servers.""" import socket from oslo_config import cfg from oslo_service import wsgi from oslo_utils import netutils socket_opts = [ cfg.BoolOpt('tcp_keepalive', default=True, help="Sets the value of TCP_KEEPALIVE (True/False) for each " "server socket."), cfg.IntOpt('tcp_keepalive_interval', help="Sets the value of TCP_KEEPINTVL in seconds for each " "server socket. Not supported on OS X."), cfg.IntOpt('tcp_keepalive_count', help="Sets the value of TCP_KEEPCNT for each " "server socket. Not supported on OS X."), ] CONF = cfg.CONF CONF.register_opts(socket_opts) class Server(wsgi.Server): """Server class to manage a WSGI server, serving a WSGI application.""" def _set_socket_opts(self, _socket): _socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) # NOTE(praneshp): Call set_tcp_keepalive in oslo to set # tcp keepalive parameters. Sockets can hang around forever # without keepalive netutils.set_tcp_keepalive( _socket, self.conf.tcp_keepalive, self.conf.tcp_keepidle, self.conf.tcp_keepalive_count, self.conf.tcp_keepalive_interval, ) return _socket ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/manila/wsgi/wsgi.py0000664000175000017500000000275700000000000016626 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Manila OS API WSGI application.""" import sys from oslo_config import cfg from oslo_log import log as logging from oslo_reports import guru_meditation_report as gmr from oslo_reports import opts as gmr_opts from oslo_service import wsgi # Need to register global_opts from manila.common import config from manila import rpc from manila import service from manila import version CONF = cfg.CONF LOG = logging.getLogger(__name__) def initialize_application(): logging.register_options(CONF) gmr_opts.set_defaults(CONF) CONF(sys.argv[1:], project="manila", version=version.version_string()) config.verify_share_protocols() config.set_lib_defaults() logging.setup(CONF, "manila") CONF.log_opt_values(LOG, logging.DEBUG) gmr.TextGuruMeditation.setup_autorun(version, conf=CONF) rpc.init(CONF) service.setup_profiler("manila-api", CONF.host) return wsgi.Loader(CONF).load_app(name='osapi_share') ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315602.2176678 manila-21.0.0/manila.egg-info/0000775000175000017500000000000000000000000016011 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315601.0 manila-21.0.0/manila.egg-info/PKG-INFO0000644000175000017500000000701200000000000017104 0ustar00zuulzuul00000000000000Metadata-Version: 2.1 Name: manila Version: 21.0.0 Summary: Shared Storage for OpenStack Home-page: https://docs.openstack.org/manila/latest/ Author: OpenStack Author-email: openstack-discuss@lists.openstack.org Classifier: Environment :: OpenStack Classifier: Intended Audience :: Information Technology Classifier: Intended Audience :: System Administrators Classifier: License :: OSI Approved :: Apache Software License Classifier: Operating System :: POSIX :: Linux Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 3 :: Only Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.10 Classifier: Programming Language :: Python :: 3.11 Classifier: Programming Language :: Python :: 3.12 Requires-Python: >=3.10 License-File: LICENSE Requires-Dist: pbr>=5.5.0 Requires-Dist: alembic>=1.4.2 Requires-Dist: castellan>=3.7.0 Requires-Dist: defusedxml>=0.7.1 Requires-Dist: eventlet>=0.27.0 Requires-Dist: greenlet>=0.4.16 Requires-Dist: lxml>=4.5.2 Requires-Dist: netaddr>=0.8.0 Requires-Dist: oslo.config>=8.3.2 Requires-Dist: oslo.context>=3.1.1 Requires-Dist: oslo.db>=8.4.0 Requires-Dist: oslo.i18n>=5.0.1 Requires-Dist: oslo.log>=4.4.0 Requires-Dist: oslo.messaging>=14.1.0 Requires-Dist: oslo.middleware>=4.1.1 Requires-Dist: oslo.policy>=4.5.0 Requires-Dist: oslo.privsep>=2.4.0 Requires-Dist: oslo.reports>=2.2.0 Requires-Dist: oslo.rootwrap>=6.2.0 Requires-Dist: oslo.serialization>=4.0.1 Requires-Dist: oslo.service>=2.4.0 Requires-Dist: oslo.upgradecheck>=1.3.0 Requires-Dist: oslo.utils>=7.0.0 Requires-Dist: oslo.concurrency>=4.3.0 Requires-Dist: osprofiler>=3.4.0 Requires-Dist: paramiko>=2.7.2 Requires-Dist: Paste>=3.4.3 Requires-Dist: PasteDeploy>=2.1.0 Requires-Dist: pyparsing>=2.4.7 Requires-Dist: python-neutronclient>=6.7.0 Requires-Dist: keystoneauth1>=4.2.1 Requires-Dist: keystonemiddleware>=9.1.0 Requires-Dist: requests>=2.23.0 Requires-Dist: tenacity>=6.3.1 Requires-Dist: Routes>=2.4.1 Requires-Dist: SQLAlchemy>=1.4.0 Requires-Dist: SQLAlchemy-Utils>=0.38.3 Requires-Dist: stevedore>=3.2.2 Requires-Dist: tooz>=2.7.1 Requires-Dist: python-cinderclient>=4.0.1 Requires-Dist: python-novaclient>=17.2.1 Requires-Dist: python-glanceclient>=3.2.2 Requires-Dist: WebOb>=1.8.6 Requires-Dist: cachetools>=4.2.1 Requires-Dist: packaging>=20.9 ====== Manila ====== .. image:: https://governance.openstack.org/tc/badges/manila.svg .. Change things from this point on You have come across an OpenStack shared file system service. It has identified itself as "Manila". It was abstracted from the Cinder project. * Wiki: https://wiki.openstack.org/wiki/Manila * Developer docs: https://docs.openstack.org/manila/latest/ Getting Started --------------- If you'd like to run from the master branch, you can clone the git repo: git clone https://opendev.org/openstack/manila For developer information please see `HACKING.rst `_ You can raise bugs here https://bugs.launchpad.net/manila Python client ------------- https://opendev.org/openstack/python-manilaclient * Documentation for the project can be found at: https://docs.openstack.org/manila/latest/ * Release notes for the project can be found at: https://docs.openstack.org/releasenotes/manila/ * Source for the project: https://opendev.org/openstack/manila * Bugs: https://bugs.launchpad.net/manila * Blueprints: https://blueprints.launchpad.net/manila * Design specifications are tracked at: https://specs.openstack.org/openstack/manila-specs/ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315601.0 manila-21.0.0/manila.egg-info/SOURCES.txt0000664000175000017500000037706400000000000017716 0ustar00zuulzuul00000000000000.coveragerc .pre-commit-config.yaml .pylintrc .stestr.conf AUTHORS CONTRIBUTING.rst ChangeLog HACKING.rst LICENSE README.rst bindep.txt pyproject.toml requirements.txt setup.cfg setup.py test-requirements.txt tox.ini api-ref/source/availability-zones.inc api-ref/source/conf.py api-ref/source/experimental.inc api-ref/source/extensions.inc api-ref/source/index.rst api-ref/source/limits.inc api-ref/source/os-share-manage.inc api-ref/source/parameters.yaml api-ref/source/quota-classes.inc api-ref/source/quota-sets.inc api-ref/source/resource-locks.inc api-ref/source/scheduler-stats.inc api-ref/source/security-services.inc api-ref/source/services.inc api-ref/source/share-access-rule-metadata.inc api-ref/source/share-access-rules.inc api-ref/source/share-actions.inc api-ref/source/share-backups.inc api-ref/source/share-export-locations.inc api-ref/source/share-group-snapshots.inc api-ref/source/share-group-types.inc api-ref/source/share-groups.inc api-ref/source/share-instance-export-locations.inc api-ref/source/share-instances.inc api-ref/source/share-metadata.inc api-ref/source/share-migration.inc api-ref/source/share-network-subnets-metadata.inc api-ref/source/share-network-subnets.inc api-ref/source/share-networks.inc api-ref/source/share-replica-export-locations.inc api-ref/source/share-replicas.inc api-ref/source/share-server-migration.inc api-ref/source/share-servers.inc api-ref/source/share-transfers.inc api-ref/source/share-types.inc api-ref/source/shares.inc api-ref/source/snapshot-instances.inc api-ref/source/snapshot-metadata.inc api-ref/source/snapshots.inc api-ref/source/status.yaml api-ref/source/user-messages.inc api-ref/source/versions.inc api-ref/source/common/share-network-span-multiple-subnets-note.rst api-ref/source/samples/availability-zones-list-response.json api-ref/source/samples/export-location-list-response.json api-ref/source/samples/export-location-show-response.json api-ref/source/samples/extensions-list-response.json api-ref/source/samples/limits-response.json api-ref/source/samples/pools-list-detailed-response.json api-ref/source/samples/pools-list-response.json api-ref/source/samples/quota-classes-show-response.json api-ref/source/samples/quota-classes-update-request.json api-ref/source/samples/quota-classes-update-response.json api-ref/source/samples/quota-show-detail-response.json api-ref/source/samples/quota-show-response.json api-ref/source/samples/quota-update-request.json api-ref/source/samples/quota-update-response.json api-ref/source/samples/resource-lock-create-request.json api-ref/source/samples/resource-lock-create-response.json api-ref/source/samples/resource-lock-get-all-response.json api-ref/source/samples/resource-lock-get-response.json api-ref/source/samples/resource-lock-update-request.json api-ref/source/samples/resource-lock-update-response.json api-ref/source/samples/security-service-create-request.json api-ref/source/samples/security-service-create-response.json api-ref/source/samples/security-service-show-response.json api-ref/source/samples/security-service-update-request.json api-ref/source/samples/security-service-update-response.json api-ref/source/samples/security-services-list-detailed-response.json api-ref/source/samples/security-services-list-for-share-network-response.json api-ref/source/samples/security-services-list-response.json api-ref/source/samples/service-disable-request.json api-ref/source/samples/service-disable-response.json api-ref/source/samples/service-enable-request.json api-ref/source/samples/service-enable-response.json api-ref/source/samples/service-ensure-shares-request.json api-ref/source/samples/services-list-response.json api-ref/source/samples/services-list-with-filters-response.json api-ref/source/samples/share-access-rules-list-response.json api-ref/source/samples/share-access-rules-show-response.json api-ref/source/samples/share-access-rules-update-metadata-request.json api-ref/source/samples/share-access-rules-update-metadata-response.json api-ref/source/samples/share-access-rules-update-request.json api-ref/source/samples/share-access-rules-update-response.json api-ref/source/samples/share-actions-extend-request.json api-ref/source/samples/share-actions-force-delete-request.json api-ref/source/samples/share-actions-grant-access-request.json api-ref/source/samples/share-actions-grant-access-response.json api-ref/source/samples/share-actions-list-access-rules-request.json api-ref/source/samples/share-actions-list-access-rules-response.json api-ref/source/samples/share-actions-reset-state-request.json api-ref/source/samples/share-actions-restore-request.json api-ref/source/samples/share-actions-revert-to-snapshot-request.json api-ref/source/samples/share-actions-revoke-access-request.json api-ref/source/samples/share-actions-shrink-request.json api-ref/source/samples/share-actions-soft-delete-request.json api-ref/source/samples/share-actions-unmanage-request.json api-ref/source/samples/share-backup-create-request.json api-ref/source/samples/share-backup-create-response.json api-ref/source/samples/share-backup-reset-status-request.json api-ref/source/samples/share-backup-restore-request.json api-ref/source/samples/share-backup-restore-response.json api-ref/source/samples/share-backup-show-response.json api-ref/source/samples/share-backup-update-request.json api-ref/source/samples/share-backup-update-response.json api-ref/source/samples/share-backups-list-detailed-response.json api-ref/source/samples/share-backups-list-response.json api-ref/source/samples/share-create-request.json api-ref/source/samples/share-create-response.json api-ref/source/samples/share-group-create-request.json api-ref/source/samples/share-group-create-response.json api-ref/source/samples/share-group-reset-state-request.json api-ref/source/samples/share-group-show-response.json api-ref/source/samples/share-group-snapshot-actions-reset-state-request.json api-ref/source/samples/share-group-snapshot-create-request.json api-ref/source/samples/share-group-snapshot-create-response.json api-ref/source/samples/share-group-snapshot-show-response.json api-ref/source/samples/share-group-snapshot-update-request.json api-ref/source/samples/share-group-snapshot-update-response.json api-ref/source/samples/share-group-snapshots-list-detailed-response.json api-ref/source/samples/share-group-snapshots-list-members-response.json api-ref/source/samples/share-group-snapshots-list-response.json api-ref/source/samples/share-group-type-create-request.json api-ref/source/samples/share-group-type-create-response.json api-ref/source/samples/share-group-type-grant-access-request.json api-ref/source/samples/share-group-type-revoke-access-request.json api-ref/source/samples/share-group-type-set-request.json api-ref/source/samples/share-group-type-set-response.json api-ref/source/samples/share-group-types-default-list-response.json api-ref/source/samples/share-group-types-group-specs-list-response.json api-ref/source/samples/share-group-types-list-access-response.json api-ref/source/samples/share-group-types-list-response.json api-ref/source/samples/share-group-update-request.json api-ref/source/samples/share-group-update-response.json api-ref/source/samples/share-groups-list-detailed-response.json api-ref/source/samples/share-groups-list-response.json api-ref/source/samples/share-instance-actions-force-delete-request.json api-ref/source/samples/share-instance-actions-reset-state-request.json api-ref/source/samples/share-instances-list-response.json api-ref/source/samples/share-manage-request.json api-ref/source/samples/share-manage-response.json api-ref/source/samples/share-migration-cancel-request.json api-ref/source/samples/share-migration-complete-request.json api-ref/source/samples/share-migration-get-process-request.json api-ref/source/samples/share-migration-get-process-response.json api-ref/source/samples/share-migration-start-request.json api-ref/source/samples/share-network-add-security-service-check-request.json api-ref/source/samples/share-network-add-security-service-check-response.json api-ref/source/samples/share-network-add-security-service-request.json api-ref/source/samples/share-network-add-security-service-response-with-subnets.json api-ref/source/samples/share-network-add-security-service-response.json api-ref/source/samples/share-network-create-request.json api-ref/source/samples/share-network-create-response-with-subnets.json api-ref/source/samples/share-network-create-response.json api-ref/source/samples/share-network-remove-security-service-request.json api-ref/source/samples/share-network-remove-security-service-response-with-subnets.json api-ref/source/samples/share-network-remove-security-service-response.json api-ref/source/samples/share-network-reset-state-request.json api-ref/source/samples/share-network-show-response-with-subnets.json api-ref/source/samples/share-network-show-response.json api-ref/source/samples/share-network-subnet-create-request.json api-ref/source/samples/share-network-subnet-create-response.json api-ref/source/samples/share-network-subnet-list-response.json api-ref/source/samples/share-network-subnet-set-metadata-request.json api-ref/source/samples/share-network-subnet-set-metadata-response.json api-ref/source/samples/share-network-subnet-show-metadata-response.json api-ref/source/samples/share-network-subnet-show-response.json api-ref/source/samples/share-network-subnet-update-metadata-request.json api-ref/source/samples/share-network-subnet-update-metadata-response.json api-ref/source/samples/share-network-subnet-update-null-metadata-request.json api-ref/source/samples/share-network-subnet-update-null-metadata-response.json api-ref/source/samples/share-network-update-request.json api-ref/source/samples/share-network-update-response-with-subnets.json api-ref/source/samples/share-network-update-response.json api-ref/source/samples/share-network-update-security-service-check-request.json api-ref/source/samples/share-network-update-security-service-check-response.json api-ref/source/samples/share-network-update-security-service-request.json api-ref/source/samples/share-network-update-security-service-response.json api-ref/source/samples/share-networks-list-detailed-response-with-subnets.json api-ref/source/samples/share-networks-list-detailed-response.json api-ref/source/samples/share-networks-list-response.json api-ref/source/samples/share-replica-create-request.json api-ref/source/samples/share-replica-create-response.json api-ref/source/samples/share-replica-export-location-list-response.json api-ref/source/samples/share-replica-export-location-show-response.json api-ref/source/samples/share-replica-promote-request.json api-ref/source/samples/share-replica-resync-request.json api-ref/source/samples/share-replicas-force-delete-request.json api-ref/source/samples/share-replicas-list-detail-response.json api-ref/source/samples/share-replicas-list-response.json api-ref/source/samples/share-replicas-reset-replica-state-request.json api-ref/source/samples/share-replicas-reset-state-request.json api-ref/source/samples/share-replicas-show-response.json api-ref/source/samples/share-server-manage-request.json api-ref/source/samples/share-server-manage-response.json api-ref/source/samples/share-server-migration-cancel-request.json api-ref/source/samples/share-server-migration-check-compatibility-request.json api-ref/source/samples/share-server-migration-check-compatibility-response.json api-ref/source/samples/share-server-migration-complete-request.json api-ref/source/samples/share-server-migration-complete-response.json api-ref/source/samples/share-server-migration-get-progress-request.json api-ref/source/samples/share-server-migration-get-progress-response.json api-ref/source/samples/share-server-migration-start-request.json api-ref/source/samples/share-server-reset-state-request.json api-ref/source/samples/share-server-show-details-response.json api-ref/source/samples/share-server-show-response.json api-ref/source/samples/share-server-unmanage-request.json api-ref/source/samples/share-servers-list-response.json api-ref/source/samples/share-set-metadata-request.json api-ref/source/samples/share-set-metadata-response.json api-ref/source/samples/share-show-instance-response.json api-ref/source/samples/share-show-metadata-item-response.json api-ref/source/samples/share-show-metadata-response.json api-ref/source/samples/share-show-network-subnet-metadata-item-response.json api-ref/source/samples/share-show-response.json api-ref/source/samples/share-transfer-accept-request.json api-ref/source/samples/share-transfer-create-request.json api-ref/source/samples/share-transfer-create-response.json api-ref/source/samples/share-transfer-show-response.json api-ref/source/samples/share-transfers-list-detailed-response.json api-ref/source/samples/share-transfers-list-response.json api-ref/source/samples/share-type-create-request.json api-ref/source/samples/share-type-create-response.json api-ref/source/samples/share-type-grant-access-request.json api-ref/source/samples/share-type-revoke-access-request.json api-ref/source/samples/share-type-set-request.json api-ref/source/samples/share-type-set-response.json api-ref/source/samples/share-type-show-response.json api-ref/source/samples/share-type-update-request.json api-ref/source/samples/share-type-update-response.json api-ref/source/samples/share-types-default-list-response.json api-ref/source/samples/share-types-extra-specs-list-response.json api-ref/source/samples/share-types-list-access-response.json api-ref/source/samples/share-types-list-response.json api-ref/source/samples/share-update-metadata-request.json api-ref/source/samples/share-update-metadata-response.json api-ref/source/samples/share-update-null-metadata-request.json api-ref/source/samples/share-update-null-metadata-response.json api-ref/source/samples/share-update-request.json api-ref/source/samples/share-update-response.json api-ref/source/samples/shares-list-detailed-response.json api-ref/source/samples/shares-list-response.json api-ref/source/samples/snapshot-actions-force-delete-request.json api-ref/source/samples/snapshot-actions-reset-state-request.json api-ref/source/samples/snapshot-actions-unmanage-request.json api-ref/source/samples/snapshot-create-request.json api-ref/source/samples/snapshot-create-response.json api-ref/source/samples/snapshot-instance-actions-reset-state-request.json api-ref/source/samples/snapshot-instance-show-response.json api-ref/source/samples/snapshot-instances-list-response.json api-ref/source/samples/snapshot-instances-list-with-detail-response.json api-ref/source/samples/snapshot-manage-request.json api-ref/source/samples/snapshot-manage-response.json api-ref/source/samples/snapshot-set-metadata-request.json api-ref/source/samples/snapshot-set-metadata-response.json api-ref/source/samples/snapshot-show-metadata-item-response.json api-ref/source/samples/snapshot-show-metadata-response.json api-ref/source/samples/snapshot-show-response.json api-ref/source/samples/snapshot-update-metadata-request.json api-ref/source/samples/snapshot-update-metadata-response.json api-ref/source/samples/snapshot-update-null-metadata-request.json api-ref/source/samples/snapshot-update-null-metadata-response.json api-ref/source/samples/snapshot-update-request.json api-ref/source/samples/snapshot-update-response.json api-ref/source/samples/snapshots-list-detailed-response.json api-ref/source/samples/snapshots-list-response.json api-ref/source/samples/user-message-show-response.json api-ref/source/samples/user-messages-list-response.json api-ref/source/samples/versions-get-version-response.json api-ref/source/samples/versions-index-response.json contrib/ci/post_test_hook.sh contrib/ci/pre_test_hook.sh contrib/share_driver_hooks/README.rst contrib/share_driver_hooks/zaqar_notification.py contrib/share_driver_hooks/zaqar_notification_example_consumer.py contrib/share_driver_hooks/zaqarclientwrapper.py devstack/README.rst devstack/apache-manila.template devstack/plugin.sh devstack/settings devstack/files/debs/manila devstack/files/rpms/manila devstack/files/rpms-suse/manila devstack/upgrade/resources.sh devstack/upgrade/settings devstack/upgrade/shutdown.sh devstack/upgrade/upgrade.sh doc/README.rst doc/requirements.txt doc/ext/__init__.py doc/source/conf.py doc/source/index.rst doc/source/_extra/.htaccess doc/source/admin/capabilities_and_extra_specs.rst doc/source/admin/container_driver.rst doc/source/admin/dell_emc_powerscale_driver.rst doc/source/admin/emc_vnx_driver.rst doc/source/admin/export_location_metadata.rst doc/source/admin/generic_driver.rst doc/source/admin/glusterfs_driver.rst doc/source/admin/glusterfs_native_driver.rst doc/source/admin/gpfs_driver.rst doc/source/admin/group_capabilities_and_extra_specs.rst doc/source/admin/hdfs_native_driver.rst doc/source/admin/hitachi_hnas_driver.rst doc/source/admin/hpe_3par_driver.rst doc/source/admin/huawei_nas_driver.rst doc/source/admin/index.rst doc/source/admin/infortrend_driver.rst doc/source/admin/macrosan_driver.rst doc/source/admin/netapp_cluster_mode_driver.rst doc/source/admin/nexentastor5_driver.rst doc/source/admin/purestorage_flashblade_driver.rst doc/source/admin/share_back_ends_feature_support_mapping.rst doc/source/admin/share_mount_point_name.rst doc/source/admin/shared-file-systems-crud-share.rst doc/source/admin/shared-file-systems-healthcheck.rst doc/source/admin/shared-file-systems-key-concepts.rst doc/source/admin/shared-file-systems-manage-and-unmanage-share.rst doc/source/admin/shared-file-systems-manage-and-unmanage-snapshot.rst doc/source/admin/shared-file-systems-multi-backend.rst doc/source/admin/shared-file-systems-network-plugins.rst doc/source/admin/shared-file-systems-networking.rst doc/source/admin/shared-file-systems-profiling.rst doc/source/admin/shared-file-systems-quotas.rst doc/source/admin/shared-file-systems-scheduling.rst doc/source/admin/shared-file-systems-security-services.rst doc/source/admin/shared-file-systems-services-manage.rst doc/source/admin/shared-file-systems-share-backup-management.rst doc/source/admin/shared-file-systems-share-group-types.rst doc/source/admin/shared-file-systems-share-groups.rst doc/source/admin/shared-file-systems-share-management.rst doc/source/admin/shared-file-systems-share-migration.rst doc/source/admin/shared-file-systems-share-networks.rst doc/source/admin/shared-file-systems-share-replication.rst doc/source/admin/shared-file-systems-share-resize.rst doc/source/admin/shared-file-systems-share-revert-to-snapshot.rst doc/source/admin/shared-file-systems-share-server-management.rst doc/source/admin/shared-file-systems-share-server-migration.rst doc/source/admin/shared-file-systems-share-types.rst doc/source/admin/shared-file-systems-snapshots.rst doc/source/admin/shared-file-systems-troubleshoot.rst doc/source/admin/shared-file-systems-upgrades.rst doc/source/admin/tegile_driver.rst doc/source/admin/zadara_driver.rst doc/source/admin/zfs_on_linux_driver.rst doc/source/cli/index.rst doc/source/cli/manila-manage.rst doc/source/cli/manila-status.rst doc/source/cli/manila.rst doc/source/configuration/index.rst doc/source/configuration/figures/hds_network.jpg doc/source/configuration/figures/hsp_network.png doc/source/configuration/figures/openstack-spectrumscale-setup.JPG doc/source/configuration/shared-file-systems/api.rst doc/source/configuration/shared-file-systems/config-options.rst doc/source/configuration/shared-file-systems/drivers.rst doc/source/configuration/shared-file-systems/log-files.rst doc/source/configuration/shared-file-systems/overview.rst doc/source/configuration/shared-file-systems/drivers/cephfs_driver.rst doc/source/configuration/shared-file-systems/drivers/dell-emc-powerflex-driver.rst doc/source/configuration/shared-file-systems/drivers/dell-emc-powermax-driver.rst doc/source/configuration/shared-file-systems/drivers/dell-emc-powerscale-driver.rst doc/source/configuration/shared-file-systems/drivers/dell-emc-powerstore-driver.rst doc/source/configuration/shared-file-systems/drivers/dell-emc-unity-driver.rst doc/source/configuration/shared-file-systems/drivers/dell-emc-vnx-driver.rst doc/source/configuration/shared-file-systems/drivers/generic-driver.rst doc/source/configuration/shared-file-systems/drivers/glusterfs-driver.rst doc/source/configuration/shared-file-systems/drivers/glusterfs-native-driver.rst doc/source/configuration/shared-file-systems/drivers/hdfs-native-driver.rst doc/source/configuration/shared-file-systems/drivers/hitachi-hnas-driver.rst doc/source/configuration/shared-file-systems/drivers/hitachi-hsp-driver.rst doc/source/configuration/shared-file-systems/drivers/hpe-3par-share-driver.rst doc/source/configuration/shared-file-systems/drivers/huawei-nas-driver.rst doc/source/configuration/shared-file-systems/drivers/ibm-spectrumscale-driver.rst doc/source/configuration/shared-file-systems/drivers/infinidat-share-driver.rst doc/source/configuration/shared-file-systems/drivers/infortrend-nas-driver.rst doc/source/configuration/shared-file-systems/drivers/lvm-driver.rst doc/source/configuration/shared-file-systems/drivers/maprfs-native-driver.rst doc/source/configuration/shared-file-systems/drivers/netapp-cluster-mode-driver.rst doc/source/configuration/shared-file-systems/drivers/nexentastor5-driver.rst doc/source/configuration/shared-file-systems/drivers/purestorage-flashblade-driver.rst doc/source/configuration/shared-file-systems/drivers/quobyte-driver.rst doc/source/configuration/shared-file-systems/drivers/vastdata_driver.rst doc/source/configuration/shared-file-systems/drivers/windows-smb-driver.rst doc/source/configuration/shared-file-systems/drivers/zfs-on-linux-driver.rst doc/source/configuration/shared-file-systems/drivers/zfssa-manila-driver.rst doc/source/configuration/shared-file-systems/samples/api-paste.ini.rst doc/source/configuration/shared-file-systems/samples/index.rst doc/source/configuration/shared-file-systems/samples/manila.conf.rst doc/source/configuration/shared-file-systems/samples/policy.rst doc/source/configuration/shared-file-systems/samples/rootwrap.conf.rst doc/source/configuration/shared-file-systems/samples/sample_policy.rst doc/source/configuration/tables/manila-api.inc doc/source/configuration/tables/manila-ca.inc doc/source/configuration/tables/manila-cephfs.inc doc/source/configuration/tables/manila-common.inc doc/source/configuration/tables/manila-compute.inc doc/source/configuration/tables/manila-emc.inc doc/source/configuration/tables/manila-ganesha.inc doc/source/configuration/tables/manila-generic.inc doc/source/configuration/tables/manila-glusterfs.inc doc/source/configuration/tables/manila-hdfs.inc doc/source/configuration/tables/manila-hds_hnas.inc doc/source/configuration/tables/manila-hds_hsp.inc doc/source/configuration/tables/manila-hnas.inc doc/source/configuration/tables/manila-hpe3par.inc doc/source/configuration/tables/manila-huawei.inc doc/source/configuration/tables/manila-infinidat.inc doc/source/configuration/tables/manila-infortrend.inc doc/source/configuration/tables/manila-lvm.inc doc/source/configuration/tables/manila-maprfs.inc doc/source/configuration/tables/manila-netapp.inc doc/source/configuration/tables/manila-nexentastor5.inc doc/source/configuration/tables/manila-powermax.inc doc/source/configuration/tables/manila-purestorage-flashblade.inc doc/source/configuration/tables/manila-quobyte.inc doc/source/configuration/tables/manila-quota.inc doc/source/configuration/tables/manila-redis.inc doc/source/configuration/tables/manila-san.inc doc/source/configuration/tables/manila-scheduler.inc doc/source/configuration/tables/manila-share.inc doc/source/configuration/tables/manila-spectrumscale_ces.inc doc/source/configuration/tables/manila-spectrumscale_knfs.inc doc/source/configuration/tables/manila-tegile.inc doc/source/configuration/tables/manila-unity.inc doc/source/configuration/tables/manila-vastdata.inc doc/source/configuration/tables/manila-vnx.inc doc/source/configuration/tables/manila-winrm.inc doc/source/configuration/tables/manila-zfs.inc doc/source/configuration/tables/manila-zfssa.inc doc/source/contributor/adding_release_notes.rst doc/source/contributor/addmethod.openstackapi.rst doc/source/contributor/apache-httpd.rst doc/source/contributor/api_microversion_dev.rst doc/source/contributor/api_microversion_history.rst doc/source/contributor/architecture.rst doc/source/contributor/auth.rst doc/source/contributor/commit_message_tags.rst doc/source/contributor/contributing.rst doc/source/contributor/database.rst doc/source/contributor/development-environment-devstack.rst doc/source/contributor/development.environment.rst doc/source/contributor/documenting_your_work.rst doc/source/contributor/driver_filter_goodness_weigher.rst doc/source/contributor/driver_requirements.rst doc/source/contributor/experimental_apis.rst doc/source/contributor/fakes.rst doc/source/contributor/ganesha.rst doc/source/contributor/gerrit.rst doc/source/contributor/guru_meditation_report.rst doc/source/contributor/i18n.rst doc/source/contributor/index.rst doc/source/contributor/intro.rst doc/source/contributor/irc-quick-and-dirty.rst doc/source/contributor/launchpad.rst doc/source/contributor/manila-review-policy.rst doc/source/contributor/manila.rst doc/source/contributor/new_feature_workflow.rst doc/source/contributor/pool-aware-manila-scheduler.rst doc/source/contributor/project-team-lead.rst doc/source/contributor/rpc.rst doc/source/contributor/scheduler.rst doc/source/contributor/services.rst doc/source/contributor/share.rst doc/source/contributor/share_hooks.rst doc/source/contributor/share_migration.rst doc/source/contributor/share_replication.rst doc/source/contributor/share_server_migration.rst doc/source/contributor/tempest_tests.rst doc/source/contributor/threading.rst doc/source/contributor/unit_tests.rst doc/source/contributor/user_messages.rst doc/source/contributor/samples/cephfs_local.conf doc/source/contributor/samples/container_local.conf doc/source/contributor/samples/dummy_local.conf doc/source/contributor/samples/generic_local.conf doc/source/contributor/samples/lvm_local.conf doc/source/contributor/samples/zfsonlinux_local.conf doc/source/images/rpc/arch.png doc/source/images/rpc/arch.svg doc/source/images/rpc/flow1.png doc/source/images/rpc/flow1.svg doc/source/images/rpc/flow2.png doc/source/images/rpc/flow2.svg doc/source/images/rpc/hds_network.jpg doc/source/images/rpc/rabt.png doc/source/images/rpc/rabt.svg doc/source/images/rpc/state.png doc/source/install/get-started-with-shared-file-systems.rst doc/source/install/index.rst doc/source/install/install-controller-debian.rst doc/source/install/install-controller-node.rst doc/source/install/install-controller-rdo.rst doc/source/install/install-controller-ubuntu.rst doc/source/install/install-share-debian.rst doc/source/install/install-share-node.rst doc/source/install/install-share-rdo.rst doc/source/install/install-share-ubuntu.rst doc/source/install/next-steps.rst doc/source/install/post-install.rst doc/source/install/verify.rst doc/source/install/common/controller-node-common-configuration.rst doc/source/install/common/controller-node-prerequisites.rst doc/source/install/common/dhss-false-mode-configuration.rst doc/source/install/common/dhss-false-mode-intro.rst doc/source/install/common/dhss-false-mode-using-shared-file-systems.rst doc/source/install/common/dhss-true-mode-configuration.rst doc/source/install/common/dhss-true-mode-intro.rst doc/source/install/common/dhss-true-mode-using-shared-file-systems.rst doc/source/install/common/figures doc/source/install/common/share-node-common-configuration.rst doc/source/install/common/share-node-share-server-modes.rst doc/source/install/figures/hwreqs.graffle doc/source/install/figures/hwreqs.png doc/source/install/figures/hwreqs.svg doc/source/reference/glossary.rst doc/source/reference/index.rst doc/source/user/create-and-manage-shares.rst doc/source/user/index.rst doc/source/user/share-network-operations.rst doc/source/user/share-network-subnet-operations.rst doc/source/user/troubleshooting-asynchronous-failures.rst etc/manila/README.manila.conf etc/manila/api-paste.ini etc/manila/logging_sample.conf etc/manila/manila-policy-generator.conf etc/manila/rootwrap.conf etc/manila/rootwrap.d/share.filters etc/oslo-config-generator/manila.conf httpd/manila-uwsgi.ini httpd/mod_wsgi-manila.conf httpd/uwsgi-manila.conf manila/__init__.py manila/context.py manila/coordination.py manila/exception.py manila/i18n.py manila/manager.py manila/opts.py manila/policy.py manila/quota.py manila/rpc.py manila/service.py manila/ssh_utils.py manila/test.py manila/utils.py manila/version.py manila.egg-info/PKG-INFO manila.egg-info/SOURCES.txt manila.egg-info/dependency_links.txt manila.egg-info/entry_points.txt manila.egg-info/not-zip-safe manila.egg-info/pbr.json manila.egg-info/requires.txt manila.egg-info/top_level.txt manila/api/__init__.py manila/api/common.py manila/api/extensions.py manila/api/urlmap.py manila/api/versions.py manila/api/contrib/__init__.py manila/api/middleware/__init__.py manila/api/middleware/auth.py manila/api/middleware/fault.py manila/api/openstack/__init__.py manila/api/openstack/api_version_request.py manila/api/openstack/rest_api_version_history.rst manila/api/openstack/urlmap.py manila/api/openstack/versioned_method.py manila/api/openstack/wsgi.py manila/api/schemas/__init__.py manila/api/schemas/availability_zones.py manila/api/schemas/messages.py manila/api/schemas/resource_locks.py manila/api/v1/__init__.py manila/api/v1/limits.py manila/api/v1/router.py manila/api/v1/scheduler_stats.py manila/api/v1/security_service.py manila/api/v1/share_manage.py manila/api/v1/share_metadata.py manila/api/v1/share_servers.py manila/api/v1/share_snapshots.py manila/api/v1/share_types_extra_specs.py manila/api/v1/share_unmanage.py manila/api/v1/shares.py manila/api/v2/__init__.py manila/api/v2/availability_zones.py manila/api/v2/messages.py manila/api/v2/metadata.py manila/api/v2/quota_class_sets.py manila/api/v2/quota_sets.py manila/api/v2/resource_locks.py manila/api/v2/router.py manila/api/v2/services.py manila/api/v2/share_access_metadata.py manila/api/v2/share_accesses.py manila/api/v2/share_backups.py manila/api/v2/share_export_locations.py manila/api/v2/share_group_snapshots.py manila/api/v2/share_group_type_specs.py manila/api/v2/share_group_types.py manila/api/v2/share_groups.py manila/api/v2/share_instance_export_locations.py manila/api/v2/share_instances.py manila/api/v2/share_network_subnets.py manila/api/v2/share_networks.py manila/api/v2/share_replica_export_locations.py manila/api/v2/share_replicas.py manila/api/v2/share_servers.py manila/api/v2/share_snapshot_export_locations.py manila/api/v2/share_snapshot_instance_export_locations.py manila/api/v2/share_snapshot_instances.py manila/api/v2/share_snapshots.py manila/api/v2/share_transfer.py manila/api/v2/share_types.py manila/api/v2/shares.py manila/api/validation/__init__.py manila/api/validation/helpers.py manila/api/validation/parameter_types.py manila/api/validation/parameters.yaml manila/api/validation/response_types.py manila/api/validation/validators.py manila/api/views/__init__.py manila/api/views/availability_zones.py manila/api/views/export_locations.py manila/api/views/limits.py manila/api/views/messages.py manila/api/views/quota_class_sets.py manila/api/views/quota_sets.py manila/api/views/resource_locks.py manila/api/views/scheduler_stats.py manila/api/views/security_service.py manila/api/views/services.py manila/api/views/share_accesses.py manila/api/views/share_backups.py manila/api/views/share_group_snapshots.py manila/api/views/share_group_types.py manila/api/views/share_groups.py manila/api/views/share_instance.py manila/api/views/share_migration.py manila/api/views/share_network_subnets.py manila/api/views/share_networks.py manila/api/views/share_replicas.py manila/api/views/share_server_migration.py manila/api/views/share_servers.py manila/api/views/share_snapshot_export_locations.py manila/api/views/share_snapshot_instances.py manila/api/views/share_snapshots.py manila/api/views/shares.py manila/api/views/transfers.py manila/api/views/types.py manila/api/views/versions.py manila/cmd/__init__.py manila/cmd/api.py manila/cmd/data.py manila/cmd/manage.py manila/cmd/scheduler.py manila/cmd/share.py manila/cmd/status.py manila/common/__init__.py manila/common/client_auth.py manila/common/config.py manila/common/constants.py manila/compute/__init__.py manila/compute/nova.py manila/data/__init__.py manila/data/backup_driver.py manila/data/helper.py manila/data/manager.py manila/data/rpcapi.py manila/data/utils.py manila/data/drivers/__init__.py manila/data/drivers/nfs.py manila/db/__init__.py manila/db/api.py manila/db/base.py manila/db/migration.py manila/db/migrations/__init__.py manila/db/migrations/alembic.ini manila/db/migrations/utils.py manila/db/migrations/alembic/__init__.py manila/db/migrations/alembic/env.py manila/db/migrations/alembic/migration.py manila/db/migrations/alembic/script.py.mako manila/db/migrations/alembic/versions/0274d20c560f_add_ou_to_security_service.py manila/db/migrations/alembic/versions/03da71c0e321_convert_cgs_to_share_groups.py manila/db/migrations/alembic/versions/097fad24d2fc_add_share_instances_share_id_index.py manila/db/migrations/alembic/versions/0c23aec99b74_add_per_share_gigabytes_quota_class.py manila/db/migrations/alembic/versions/0d8c8f6d54a4_modify_share_servers_table.py manila/db/migrations/alembic/versions/11ee96se625f3_add_metadata_for_access.py manila/db/migrations/alembic/versions/162a3e673105_manila_init.py manila/db/migrations/alembic/versions/17115072e1c3_add_nova_net_id_column_to_share_networks.py manila/db/migrations/alembic/versions/1946cb97bb8d_add_is_soft_deleted_and_scheduled_to_be_deleted_at_to_shares_table.py manila/db/migrations/alembic/versions/1e2d600bf972_add_transfers.py manila/db/migrations/alembic/versions/1f0bd302c1a6_add_availability_zones_table.py manila/db/migrations/alembic/versions/211836bf835c_add_access_level.py manila/db/migrations/alembic/versions/221a83cfd85b_change_user_project_id_length.py manila/db/migrations/alembic/versions/238720805ce1_add_messages_table.py manila/db/migrations/alembic/versions/27cb96d991fa_add_description_for_share_type.py manila/db/migrations/alembic/versions/293fac1130ca_add_replication_attrs.py manila/db/migrations/alembic/versions/2d708a9a3ba9_backup_change_az_to_az_id.py manila/db/migrations/alembic/versions/2f27d904214c_add_backup_type_column.py manila/db/migrations/alembic/versions/30cb96d995fa_add_is_public_column_for_share.py manila/db/migrations/alembic/versions/323840a08dc4_add_shares_task_state.py manila/db/migrations/alembic/versions/344c1ac4747f_add_share_instance_access_rules_status.py manila/db/migrations/alembic/versions/3651e16d7c43_add_consistency_groups.py manila/db/migrations/alembic/versions/38e632621e5a_change_volume_type_to_share_type.py manila/db/migrations/alembic/versions/3a482171410f_add_drivers_private_data_table.py manila/db/migrations/alembic/versions/3db9992c30f3_transform_statuses_to_lowercase.py manila/db/migrations/alembic/versions/3e7d62517afa_add_create_share_from_snapshot_support.py manila/db/migrations/alembic/versions/40d1f2374e89_add_mount_point_name_to_share_instances.py manila/db/migrations/alembic/versions/478c445d8d3e_add_security_service_update_control_fields.py manila/db/migrations/alembic/versions/48a7beae3117_move_share_type_id_to_instances.py manila/db/migrations/alembic/versions/493eaffd79e1_add_mtu_network_allocations_share_networks.py manila/db/migrations/alembic/versions/4a482571410f_add_backends_info_table.py manila/db/migrations/alembic/versions/4ee2cf4be19a_remove_share_snapshots_export_location.py manila/db/migrations/alembic/versions/5077ffcc5f1c_add_share_instances.py manila/db/migrations/alembic/versions/5155c7077f99_add_more_network_info_attributes_to_network_allocations_table.py manila/db/migrations/alembic/versions/5237b6625330_add_availability_zone_id_field_to_share_groups.py manila/db/migrations/alembic/versions/533646c7af38_remove_unused_attr_status.py manila/db/migrations/alembic/versions/54667b9cade7_restore_share_instance_access_map_state.py manila/db/migrations/alembic/versions/55761e5f59c5_add_snapshot_support_extra_spec_to_share_types.py manila/db/migrations/alembic/versions/56cdbe267881_add_share_export_locations_table.py manila/db/migrations/alembic/versions/579c267fbb4d_add_share_instances_access_map.py manila/db/migrations/alembic/versions/59eb64046740_add_required_extra_spec.py manila/db/migrations/alembic/versions/5aa813ae673d_add_task_state_field_for_share_servers.py manila/db/migrations/alembic/versions/63809d875e32_add_access_key.py manila/db/migrations/alembic/versions/6a3fd2984bc31_add_is_auto_deletable_and_identifier_fields_for_share_servers.py manila/db/migrations/alembic/versions/7d142971c4ef_add_reservation_expire_index.py manila/db/migrations/alembic/versions/805685098bd2_add_share_network_subnets_table_and_modify_share_servers_table.py manila/db/migrations/alembic/versions/829a09b0ddd4_fix_project_share_type_quotas_unique_constraint.py manila/db/migrations/alembic/versions/87ce15c59bbe_add_revert_to_snapshot_support.py manila/db/migrations/alembic/versions/927920b37453_add_provider_location_for_share_group_snapshot_members_model.py manila/db/migrations/alembic/versions/95e3cf760840_remove_nova_net_id_column_from_share_.py manila/db/migrations/alembic/versions/99d328f0a3d2_add_disable_reason_to_service.py manila/db/migrations/alembic/versions/9afbe2df4945_add_backup.py manila/db/migrations/alembic/versions/a77e2ad5012d_add_share_snapshot_access.py manila/db/migrations/alembic/versions/a87e0fb17dee_multiple_share_server_subnets.py manila/db/migrations/alembic/versions/ac0620cbe74d_add_share_network_subnet_metadata.py manila/db/migrations/alembic/versions/aebe2a413e13_add_state_column_for_service.py manila/db/migrations/alembic/versions/b10fb432c042_squash_share_group_snapshot_members_and_share_snapshot_instance_models.py manila/db/migrations/alembic/versions/b516de97bfee_add_quota_per_share_type_model.py manila/db/migrations/alembic/versions/bb5938d74b73_add_snapshot_metadata_table.py manila/db/migrations/alembic/versions/c476aeb186ec_add_default_ad_site_to_security_service.py manila/db/migrations/alembic/versions/cb20f743ca7b_add_resource_locks.py manila/db/migrations/alembic/versions/cdefa6287df8_add_ensuring_field_to_services.py manila/db/migrations/alembic/versions/d5db24264f5c_add_consistent_snapshot_support_attr_to_share_group_model.py manila/db/migrations/alembic/versions/dda6de06349_add_export_locations_metadata.py manila/db/migrations/alembic/versions/e1949a93157a_add_share_group_types_table.py manila/db/migrations/alembic/versions/e6d88547b381_add_progress_field_to_share_instance.py manila/db/migrations/alembic/versions/e8ea58723178_remove_host_from_driver_private_data.py manila/db/migrations/alembic/versions/e975ea83b712_add_share_server_encryption.py manila/db/migrations/alembic/versions/e9f79621d83f_add_cast_rules_to_readonly_to_share_instances.py manila/db/migrations/alembic/versions/eb6d5544cbbd_add_provider_location_to_share_snapshot_instances.py manila/db/migrations/alembic/versions/ef0c02b4366_add_share_type_projects.py manila/db/migrations/alembic/versions/fbdfabcba377_change_the_mysql_datetime_precision.py manila/db/migrations/alembic/versions/fdfb668d19e1_add_gateway_to_network_allocations_table.py manila/db/sqlalchemy/__init__.py manila/db/sqlalchemy/api.py manila/db/sqlalchemy/models.py manila/db/sqlalchemy/query.py manila/db/sqlalchemy/utils.py manila/image/__init__.py manila/image/glance.py manila/keymgr/__init__.py manila/keymgr/barbican.py manila/lock/__init__.py manila/lock/api.py manila/message/__init__.py manila/message/api.py manila/message/message_field.py manila/message/message_levels.py manila/network/__init__.py manila/network/standalone_network_plugin.py manila/network/linux/__init__.py manila/network/linux/interface.py manila/network/linux/ip_lib.py manila/network/linux/ovs_lib.py manila/network/neutron/__init__.py manila/network/neutron/api.py manila/network/neutron/constants.py manila/network/neutron/neutron_network_plugin.py manila/policies/__init__.py manila/policies/availability_zone.py manila/policies/base.py manila/policies/message.py manila/policies/quota_class_set.py manila/policies/quota_set.py manila/policies/resource_lock.py manila/policies/scheduler_stats.py manila/policies/security_service.py manila/policies/service.py manila/policies/share_access.py manila/policies/share_access_metadata.py manila/policies/share_backup.py manila/policies/share_export_location.py manila/policies/share_group.py manila/policies/share_group_snapshot.py manila/policies/share_group_type.py manila/policies/share_group_types_spec.py manila/policies/share_instance.py manila/policies/share_instance_export_location.py manila/policies/share_network.py manila/policies/share_network_subnet.py manila/policies/share_replica.py manila/policies/share_replica_export_location.py manila/policies/share_server.py manila/policies/share_snapshot.py manila/policies/share_snapshot_export_location.py manila/policies/share_snapshot_instance.py manila/policies/share_snapshot_instance_export_location.py manila/policies/share_transfer.py manila/policies/share_type.py manila/policies/share_types_extra_spec.py manila/policies/shares.py manila/privsep/__init__.py manila/privsep/common.py manila/privsep/filesystem.py manila/privsep/lvm.py manila/privsep/os.py manila/scheduler/__init__.py manila/scheduler/base_handler.py manila/scheduler/host_manager.py manila/scheduler/manager.py manila/scheduler/rpcapi.py manila/scheduler/scheduler_options.py manila/scheduler/utils.py manila/scheduler/drivers/__init__.py manila/scheduler/drivers/base.py manila/scheduler/drivers/chance.py manila/scheduler/drivers/filter.py manila/scheduler/drivers/simple.py manila/scheduler/evaluator/__init__.py manila/scheduler/evaluator/evaluator.py manila/scheduler/filters/__init__.py manila/scheduler/filters/affinity.py manila/scheduler/filters/availability_zone.py manila/scheduler/filters/base.py manila/scheduler/filters/base_host.py manila/scheduler/filters/capabilities.py manila/scheduler/filters/capacity.py manila/scheduler/filters/create_from_snapshot.py manila/scheduler/filters/driver.py manila/scheduler/filters/extra_specs_ops.py manila/scheduler/filters/host.py manila/scheduler/filters/ignore_attempted_hosts.py manila/scheduler/filters/json.py manila/scheduler/filters/retry.py manila/scheduler/filters/share_replication.py manila/scheduler/filters/share_group_filters/__init__.py manila/scheduler/filters/share_group_filters/consistent_snapshot.py manila/scheduler/weighers/__init__.py manila/scheduler/weighers/base.py manila/scheduler/weighers/base_host.py manila/scheduler/weighers/capacity.py manila/scheduler/weighers/goodness.py manila/scheduler/weighers/host_affinity.py manila/scheduler/weighers/netapp_aiq.py manila/scheduler/weighers/pool.py manila/services/__init__.py manila/services/api.py manila/share/__init__.py manila/share/access.py manila/share/api.py manila/share/configuration.py manila/share/driver.py manila/share/drivers_private_data.py manila/share/hook.py manila/share/manager.py manila/share/migration.py manila/share/rpcapi.py manila/share/share_types.py manila/share/snapshot_access.py manila/share/utils.py manila/share/drivers/__init__.py manila/share/drivers/generic.py manila/share/drivers/helpers.py manila/share/drivers/lvm.py manila/share/drivers/service_instance.py manila/share/drivers/cephfs/__init__.py manila/share/drivers/cephfs/driver.py manila/share/drivers/cephfs/conf/cephfs-export-template.conf manila/share/drivers/container/__init__.py manila/share/drivers/container/container_helper.py manila/share/drivers/container/driver.py manila/share/drivers/container/protocol_helper.py manila/share/drivers/container/security_service_helper.py manila/share/drivers/container/storage_helper.py manila/share/drivers/dell_emc/__init__.py manila/share/drivers/dell_emc/driver.py manila/share/drivers/dell_emc/plugin_manager.py manila/share/drivers/dell_emc/common/__init__.py manila/share/drivers/dell_emc/common/enas/__init__.py manila/share/drivers/dell_emc/common/enas/connector.py manila/share/drivers/dell_emc/common/enas/constants.py manila/share/drivers/dell_emc/common/enas/utils.py manila/share/drivers/dell_emc/common/enas/xml_api_parser.py manila/share/drivers/dell_emc/plugins/__init__.py manila/share/drivers/dell_emc/plugins/base.py manila/share/drivers/dell_emc/plugins/powerflex/__init__.py manila/share/drivers/dell_emc/plugins/powerflex/connection.py manila/share/drivers/dell_emc/plugins/powerflex/object_manager.py manila/share/drivers/dell_emc/plugins/powermax/__init__.py manila/share/drivers/dell_emc/plugins/powermax/connection.py manila/share/drivers/dell_emc/plugins/powermax/object_manager.py manila/share/drivers/dell_emc/plugins/powerscale/__init__.py manila/share/drivers/dell_emc/plugins/powerscale/powerscale.py manila/share/drivers/dell_emc/plugins/powerscale/powerscale_api.py manila/share/drivers/dell_emc/plugins/powerstore/__init__.py manila/share/drivers/dell_emc/plugins/powerstore/client.py manila/share/drivers/dell_emc/plugins/powerstore/connection.py manila/share/drivers/dell_emc/plugins/unity/__init__.py manila/share/drivers/dell_emc/plugins/unity/client.py manila/share/drivers/dell_emc/plugins/unity/connection.py manila/share/drivers/dell_emc/plugins/unity/utils.py manila/share/drivers/dell_emc/plugins/vnx/__init__.py manila/share/drivers/dell_emc/plugins/vnx/connection.py manila/share/drivers/dell_emc/plugins/vnx/object_manager.py manila/share/drivers/ganesha/__init__.py manila/share/drivers/ganesha/manager.py manila/share/drivers/ganesha/utils.py manila/share/drivers/ganesha/conf/00-base-export-template.conf manila/share/drivers/glusterfs/__init__.py manila/share/drivers/glusterfs/common.py manila/share/drivers/glusterfs/glusterfs_native.py manila/share/drivers/glusterfs/layout.py manila/share/drivers/glusterfs/layout_directory.py manila/share/drivers/glusterfs/layout_volume.py manila/share/drivers/glusterfs/conf/10-glusterfs-export-template.conf manila/share/drivers/hdfs/__init__.py manila/share/drivers/hdfs/hdfs_native.py manila/share/drivers/hitachi/__init__.py manila/share/drivers/hitachi/hnas/__init__.py manila/share/drivers/hitachi/hnas/driver.py manila/share/drivers/hitachi/hnas/ssh.py manila/share/drivers/hitachi/hsp/__init__.py manila/share/drivers/hitachi/hsp/driver.py manila/share/drivers/hitachi/hsp/rest.py manila/share/drivers/hpe/__init__.py manila/share/drivers/hpe/hpe_3par_driver.py manila/share/drivers/hpe/hpe_3par_mediator.py manila/share/drivers/huawei/__init__.py manila/share/drivers/huawei/base.py manila/share/drivers/huawei/constants.py manila/share/drivers/huawei/huawei_nas.py manila/share/drivers/huawei/huawei_utils.py manila/share/drivers/huawei/v3/__init__.py manila/share/drivers/huawei/v3/connection.py manila/share/drivers/huawei/v3/helper.py manila/share/drivers/huawei/v3/replication.py manila/share/drivers/huawei/v3/rpcapi.py manila/share/drivers/huawei/v3/smartx.py manila/share/drivers/ibm/__init__.py manila/share/drivers/ibm/gpfs.py manila/share/drivers/infinidat/__init__.py manila/share/drivers/infinidat/infinibox.py manila/share/drivers/infortrend/__init__.py manila/share/drivers/infortrend/driver.py manila/share/drivers/infortrend/infortrend_nas.py manila/share/drivers/inspur/__init__.py manila/share/drivers/inspur/as13000/__init__.py manila/share/drivers/inspur/as13000/as13000_nas.py manila/share/drivers/inspur/instorage/__init__.py manila/share/drivers/inspur/instorage/cli_helper.py manila/share/drivers/inspur/instorage/instorage.py manila/share/drivers/macrosan/__init__.py manila/share/drivers/macrosan/macrosan_constants.py manila/share/drivers/macrosan/macrosan_helper.py manila/share/drivers/macrosan/macrosan_nas.py manila/share/drivers/macrosan/rest_helper.py manila/share/drivers/maprfs/__init__.py manila/share/drivers/maprfs/driver_util.py manila/share/drivers/maprfs/maprfs_native.py manila/share/drivers/netapp/__init__.py manila/share/drivers/netapp/common.py manila/share/drivers/netapp/options.py manila/share/drivers/netapp/utils.py manila/share/drivers/netapp/dataontap/__init__.py manila/share/drivers/netapp/dataontap/client/__init__.py manila/share/drivers/netapp/dataontap/client/api.py manila/share/drivers/netapp/dataontap/client/client_base.py manila/share/drivers/netapp/dataontap/client/client_cmode.py manila/share/drivers/netapp/dataontap/client/client_cmode_rest.py manila/share/drivers/netapp/dataontap/client/rest_api.py manila/share/drivers/netapp/dataontap/client/rest_endpoints.py manila/share/drivers/netapp/dataontap/cluster_mode/__init__.py manila/share/drivers/netapp/dataontap/cluster_mode/data_motion.py manila/share/drivers/netapp/dataontap/cluster_mode/drv_multi_svm.py manila/share/drivers/netapp/dataontap/cluster_mode/drv_single_svm.py manila/share/drivers/netapp/dataontap/cluster_mode/lib_base.py manila/share/drivers/netapp/dataontap/cluster_mode/lib_multi_svm.py manila/share/drivers/netapp/dataontap/cluster_mode/lib_single_svm.py manila/share/drivers/netapp/dataontap/cluster_mode/performance.py manila/share/drivers/netapp/dataontap/protocols/__init__.py manila/share/drivers/netapp/dataontap/protocols/base.py manila/share/drivers/netapp/dataontap/protocols/cifs_cmode.py manila/share/drivers/netapp/dataontap/protocols/nfs_cmode.py manila/share/drivers/nexenta/__init__.py manila/share/drivers/nexenta/options.py manila/share/drivers/nexenta/utils.py manila/share/drivers/nexenta/ns4/__init__.py manila/share/drivers/nexenta/ns4/jsonrpc.py manila/share/drivers/nexenta/ns4/nexenta_nas.py manila/share/drivers/nexenta/ns4/nexenta_nfs_helper.py manila/share/drivers/nexenta/ns5/__init__.py manila/share/drivers/nexenta/ns5/jsonrpc.py manila/share/drivers/nexenta/ns5/nexenta_nas.py manila/share/drivers/purestorage/__init__.py manila/share/drivers/purestorage/flashblade.py manila/share/drivers/qnap/__init__.py manila/share/drivers/qnap/api.py manila/share/drivers/qnap/qnap.py manila/share/drivers/quobyte/__init__.py manila/share/drivers/quobyte/jsonrpc.py manila/share/drivers/quobyte/quobyte.py manila/share/drivers/tegile/__init__.py manila/share/drivers/tegile/tegile.py manila/share/drivers/vastdata/__init__.py manila/share/drivers/vastdata/driver.py manila/share/drivers/vastdata/driver_util.py manila/share/drivers/vastdata/rest.py manila/share/drivers/veritas/__init__.py manila/share/drivers/veritas/veritas_isa.py manila/share/drivers/windows/__init__.py manila/share/drivers/windows/service_instance.py manila/share/drivers/windows/windows_smb_driver.py manila/share/drivers/windows/windows_smb_helper.py manila/share/drivers/windows/windows_utils.py manila/share/drivers/windows/winrm_helper.py manila/share/drivers/zadara/__init__.py manila/share/drivers/zadara/common.py manila/share/drivers/zadara/zadara.py manila/share/drivers/zfsonlinux/__init__.py manila/share/drivers/zfsonlinux/driver.py manila/share/drivers/zfsonlinux/utils.py manila/share/drivers/zfssa/__init__.py manila/share/drivers/zfssa/restclient.py manila/share/drivers/zfssa/zfssarest.py manila/share/drivers/zfssa/zfssashare.py manila/share/hooks/__init__.py manila/share_group/__init__.py manila/share_group/api.py manila/share_group/share_group_types.py manila/testing/README.rst manila/tests/__init__.py manila/tests/conf_fixture.py manila/tests/db_utils.py manila/tests/declare_conf.py manila/tests/fake_backup_driver.py manila/tests/fake_client_exception_class.py manila/tests/fake_compute.py manila/tests/fake_driver.py manila/tests/fake_image.py manila/tests/fake_network.py manila/tests/fake_notifier.py manila/tests/fake_service_instance.py manila/tests/fake_share.py manila/tests/fake_utils.py manila/tests/fake_volume.py manila/tests/fake_zfssa.py manila/tests/policy.yaml manila/tests/runtime_conf.py manila/tests/test_api.py manila/tests/test_conf.py manila/tests/test_context.py manila/tests/test_coordination.py manila/tests/test_exception.py manila/tests/test_hacking.py manila/tests/test_manager.py manila/tests/test_misc.py manila/tests/test_network.py manila/tests/test_policy.py manila/tests/test_quota.py manila/tests/test_rpc.py manila/tests/test_service.py manila/tests/test_ssh_utils.py manila/tests/test_test.py manila/tests/test_test_utils.py manila/tests/test_utils.py manila/tests/utils.py manila/tests/api/__init__.py manila/tests/api/common.py manila/tests/api/fakes.py manila/tests/api/test_common.py manila/tests/api/test_extensions.py manila/tests/api/test_middleware.py manila/tests/api/test_schemas.py manila/tests/api/test_validation.py manila/tests/api/test_versions.py manila/tests/api/test_wsgi.py manila/tests/api/contrib/__init__.py manila/tests/api/contrib/stubs.py manila/tests/api/extensions/__init__.py manila/tests/api/extensions/foxinsocks.py manila/tests/api/middleware/__init__.py manila/tests/api/middleware/test_auth.py manila/tests/api/middleware/test_faults.py manila/tests/api/openstack/__init__.py manila/tests/api/openstack/test_api_version_request.py manila/tests/api/openstack/test_versioned_method.py manila/tests/api/openstack/test_wsgi.py manila/tests/api/v1/__init__.py manila/tests/api/v1/stubs.py manila/tests/api/v1/test_limits.py manila/tests/api/v1/test_scheduler_stats.py manila/tests/api/v1/test_security_service.py manila/tests/api/v1/test_share_manage.py manila/tests/api/v1/test_share_metadata.py manila/tests/api/v1/test_share_servers.py manila/tests/api/v1/test_share_snapshots.py manila/tests/api/v1/test_share_types_extra_specs.py manila/tests/api/v1/test_share_unmanage.py manila/tests/api/v1/test_shares.py manila/tests/api/v2/__init__.py manila/tests/api/v2/stubs.py manila/tests/api/v2/test_availability_zones.py manila/tests/api/v2/test_messages.py manila/tests/api/v2/test_metadata.py manila/tests/api/v2/test_quota_class_sets.py manila/tests/api/v2/test_quota_sets.py manila/tests/api/v2/test_resource_locks.py manila/tests/api/v2/test_security_services.py manila/tests/api/v2/test_services.py manila/tests/api/v2/test_share_access_metadata.py manila/tests/api/v2/test_share_accesses.py manila/tests/api/v2/test_share_backups.py manila/tests/api/v2/test_share_export_locations.py manila/tests/api/v2/test_share_group_snapshots.py manila/tests/api/v2/test_share_group_type_specs.py manila/tests/api/v2/test_share_group_types.py manila/tests/api/v2/test_share_groups.py manila/tests/api/v2/test_share_instance_export_locations.py manila/tests/api/v2/test_share_instances.py manila/tests/api/v2/test_share_network_subnets.py manila/tests/api/v2/test_share_networks.py manila/tests/api/v2/test_share_replica_export_locations.py manila/tests/api/v2/test_share_replicas.py manila/tests/api/v2/test_share_servers.py manila/tests/api/v2/test_share_snapshot_export_locations.py manila/tests/api/v2/test_share_snapshot_instance_export_locations.py manila/tests/api/v2/test_share_snapshot_instances.py manila/tests/api/v2/test_share_snapshots.py manila/tests/api/v2/test_share_transfer.py manila/tests/api/v2/test_share_types.py manila/tests/api/v2/test_shares.py manila/tests/api/views/__init__.py manila/tests/api/views/test_quota_class_sets.py manila/tests/api/views/test_quota_sets.py manila/tests/api/views/test_scheduler_stats.py manila/tests/api/views/test_share_accesses.py manila/tests/api/views/test_share_network_subnets.py manila/tests/api/views/test_share_networks.py manila/tests/api/views/test_shares.py manila/tests/api/views/test_versions.py manila/tests/cmd/__init__.py manila/tests/cmd/test_api.py manila/tests/cmd/test_data.py manila/tests/cmd/test_manage.py manila/tests/cmd/test_scheduler.py manila/tests/cmd/test_share.py manila/tests/cmd/test_status.py manila/tests/common/__init__.py manila/tests/common/test_client_auth.py manila/tests/common/test_config.py manila/tests/compute/__init__.py manila/tests/compute/test_nova.py manila/tests/data/__init__.py manila/tests/data/test_helper.py manila/tests/data/test_manager.py manila/tests/data/test_rpcapi.py manila/tests/data/test_utils.py manila/tests/db/__init__.py manila/tests/db/fakes.py manila/tests/db/test_api.py manila/tests/db/test_migration.py manila/tests/db/migrations/__init__.py manila/tests/db/migrations/test_utils.py manila/tests/db/migrations/alembic/__init__.py manila/tests/db/migrations/alembic/migrations_data_checks.py manila/tests/db/migrations/alembic/test_migration.py manila/tests/db/sqlalchemy/__init__.py manila/tests/db/sqlalchemy/test_api.py manila/tests/db/sqlalchemy/test_models.py manila/tests/hacking/__init__.py manila/tests/hacking/checks.py manila/tests/image/__init__.py manila/tests/image/test_image.py manila/tests/integrated/__init__.py manila/tests/integrated/integrated_helpers.py manila/tests/integrated/test_extensions.py manila/tests/integrated/test_login.py manila/tests/integrated/api/__init__.py manila/tests/integrated/api/client.py manila/tests/keymgr/__init__.py manila/tests/keymgr/test_barbican.py manila/tests/lock/__init__.py manila/tests/lock/test_api.py manila/tests/message/__init__.py manila/tests/message/test_api.py manila/tests/message/test_message_field.py manila/tests/monkey_patch_example/__init__.py manila/tests/monkey_patch_example/example_a.py manila/tests/monkey_patch_example/example_b.py manila/tests/network/__init__.py manila/tests/network/test_standalone_network_plugin.py manila/tests/network/linux/__init__.py manila/tests/network/linux/test_interface.py manila/tests/network/linux/test_ip_lib.py manila/tests/network/linux/test_ovs_lib.py manila/tests/network/neutron/__init__.py manila/tests/network/neutron/test_neutron_api.py manila/tests/network/neutron/test_neutron_plugin.py manila/tests/scheduler/__init__.py manila/tests/scheduler/fakes.py manila/tests/scheduler/test_host_manager.py manila/tests/scheduler/test_manager.py manila/tests/scheduler/test_rpcapi.py manila/tests/scheduler/test_scheduler_options.py manila/tests/scheduler/test_utils.py manila/tests/scheduler/drivers/__init__.py manila/tests/scheduler/drivers/test_base.py manila/tests/scheduler/drivers/test_filter.py manila/tests/scheduler/drivers/test_simple.py manila/tests/scheduler/evaluator/__init__.py manila/tests/scheduler/evaluator/test_evaluator.py manila/tests/scheduler/filters/__init__.py manila/tests/scheduler/filters/test_affinity.py manila/tests/scheduler/filters/test_availability_zone.py manila/tests/scheduler/filters/test_base.py manila/tests/scheduler/filters/test_base_host.py manila/tests/scheduler/filters/test_capabilities.py manila/tests/scheduler/filters/test_capacity.py manila/tests/scheduler/filters/test_create_from_snapshot.py manila/tests/scheduler/filters/test_driver.py manila/tests/scheduler/filters/test_extra_specs_ops.py manila/tests/scheduler/filters/test_host.py manila/tests/scheduler/filters/test_ignore_attempted_hosts.py manila/tests/scheduler/filters/test_json.py manila/tests/scheduler/filters/test_retry.py manila/tests/scheduler/filters/test_share_replication.py manila/tests/scheduler/weighers/__init__.py manila/tests/scheduler/weighers/test_base.py manila/tests/scheduler/weighers/test_capacity.py manila/tests/scheduler/weighers/test_goodness.py manila/tests/scheduler/weighers/test_host_affinity.py manila/tests/scheduler/weighers/test_netapp_aiq.py manila/tests/scheduler/weighers/test_pool.py manila/tests/services/__init__.py manila/tests/services/test_api.py manila/tests/share/__init__.py manila/tests/share/test_access.py manila/tests/share/test_api.py manila/tests/share/test_driver.py manila/tests/share/test_drivers_private_data.py manila/tests/share/test_hook.py manila/tests/share/test_manager.py manila/tests/share/test_migration.py manila/tests/share/test_rpcapi.py manila/tests/share/test_share_types.py manila/tests/share/test_share_utils.py manila/tests/share/test_snapshot_access.py manila/tests/share/drivers/__init__.py manila/tests/share/drivers/dummy.py manila/tests/share/drivers/test_ganesha.py manila/tests/share/drivers/test_generic.py manila/tests/share/drivers/test_glusterfs.py manila/tests/share/drivers/test_helpers.py manila/tests/share/drivers/test_lvm.py manila/tests/share/drivers/test_service_instance.py manila/tests/share/drivers/cephfs/__init__.py manila/tests/share/drivers/cephfs/test_driver.py manila/tests/share/drivers/container/__init__.py manila/tests/share/drivers/container/fakes.py manila/tests/share/drivers/container/test_container_helper.py manila/tests/share/drivers/container/test_driver.py manila/tests/share/drivers/container/test_protocol_helper.py manila/tests/share/drivers/container/test_security_service_helper.py manila/tests/share/drivers/container/test_storage_helper.py manila/tests/share/drivers/dell_emc/__init__.py manila/tests/share/drivers/dell_emc/test_driver.py manila/tests/share/drivers/dell_emc/common/__init__.py manila/tests/share/drivers/dell_emc/common/enas/__init__.py manila/tests/share/drivers/dell_emc/common/enas/fakes.py manila/tests/share/drivers/dell_emc/common/enas/test_connector.py manila/tests/share/drivers/dell_emc/common/enas/test_utils.py manila/tests/share/drivers/dell_emc/common/enas/utils.py manila/tests/share/drivers/dell_emc/plugins/__init__.py manila/tests/share/drivers/dell_emc/plugins/powerflex/__init__.py manila/tests/share/drivers/dell_emc/plugins/powerflex/test_connection.py manila/tests/share/drivers/dell_emc/plugins/powerflex/test_object_manager.py manila/tests/share/drivers/dell_emc/plugins/powerflex/mockup/create_filesystem_response.json manila/tests/share/drivers/dell_emc/plugins/powerflex/mockup/create_nfs_export_response.json manila/tests/share/drivers/dell_emc/plugins/powerflex/mockup/create_nfs_snapshot_response.json manila/tests/share/drivers/dell_emc/plugins/powerflex/mockup/get_fileystem_id_response.json manila/tests/share/drivers/dell_emc/plugins/powerflex/mockup/get_fsid_from_export_name_response.json manila/tests/share/drivers/dell_emc/plugins/powerflex/mockup/get_fsid_from_snapshot_name_response.json manila/tests/share/drivers/dell_emc/plugins/powerflex/mockup/get_nas_server_id_response.json manila/tests/share/drivers/dell_emc/plugins/powerflex/mockup/get_nfs_export_id_response.json manila/tests/share/drivers/dell_emc/plugins/powerflex/mockup/get_nfs_export_name_response.json manila/tests/share/drivers/dell_emc/plugins/powerflex/mockup/get_storage_pool_id_response.json manila/tests/share/drivers/dell_emc/plugins/powerflex/mockup/get_storage_pool_spare_percentage.json manila/tests/share/drivers/dell_emc/plugins/powerflex/mockup/get_storage_pool_statistic.json manila/tests/share/drivers/dell_emc/plugins/powerflex/mockup/login_response.json manila/tests/share/drivers/dell_emc/plugins/powermax/__init__.py manila/tests/share/drivers/dell_emc/plugins/powermax/test_connection.py manila/tests/share/drivers/dell_emc/plugins/powermax/test_object_manager.py manila/tests/share/drivers/dell_emc/plugins/powerscale/__init__.py manila/tests/share/drivers/dell_emc/plugins/powerscale/test_powerscale.py manila/tests/share/drivers/dell_emc/plugins/powerscale/test_powerscale_api.py manila/tests/share/drivers/dell_emc/plugins/powerstore/__init__.py manila/tests/share/drivers/dell_emc/plugins/powerstore/test_client.py manila/tests/share/drivers/dell_emc/plugins/powerstore/test_connection.py manila/tests/share/drivers/dell_emc/plugins/powerstore/mockup/clone_snapshot_response.json manila/tests/share/drivers/dell_emc/plugins/powerstore/mockup/create_filesystem_response.json manila/tests/share/drivers/dell_emc/plugins/powerstore/mockup/create_nfs_export_response.json manila/tests/share/drivers/dell_emc/plugins/powerstore/mockup/create_smb_share_response.json manila/tests/share/drivers/dell_emc/plugins/powerstore/mockup/create_snapshot_response.json manila/tests/share/drivers/dell_emc/plugins/powerstore/mockup/get_cluster_id_response.json manila/tests/share/drivers/dell_emc/plugins/powerstore/mockup/get_fileystem_id_response.json manila/tests/share/drivers/dell_emc/plugins/powerstore/mockup/get_fsid_from_export_name_response.json manila/tests/share/drivers/dell_emc/plugins/powerstore/mockup/get_fsid_from_share_name_response.json manila/tests/share/drivers/dell_emc/plugins/powerstore/mockup/get_nas_server_id_response.json manila/tests/share/drivers/dell_emc/plugins/powerstore/mockup/get_nas_server_interfaces_response.json manila/tests/share/drivers/dell_emc/plugins/powerstore/mockup/get_nas_server_smb_netbios_response.json manila/tests/share/drivers/dell_emc/plugins/powerstore/mockup/get_nfs_export_id_response.json manila/tests/share/drivers/dell_emc/plugins/powerstore/mockup/get_nfs_export_name_response.json manila/tests/share/drivers/dell_emc/plugins/powerstore/mockup/get_smb_share_id_response.json manila/tests/share/drivers/dell_emc/plugins/powerstore/mockup/resize_filesystem_shrink_failure_response.json manila/tests/share/drivers/dell_emc/plugins/powerstore/mockup/retreive_cluster_capacity_metrics_response.json manila/tests/share/drivers/dell_emc/plugins/unity/__init__.py manila/tests/share/drivers/dell_emc/plugins/unity/fake_exceptions.py manila/tests/share/drivers/dell_emc/plugins/unity/mocked_manila.yaml manila/tests/share/drivers/dell_emc/plugins/unity/mocked_unity.yaml manila/tests/share/drivers/dell_emc/plugins/unity/res_mock.py manila/tests/share/drivers/dell_emc/plugins/unity/test_client.py manila/tests/share/drivers/dell_emc/plugins/unity/test_connection.py manila/tests/share/drivers/dell_emc/plugins/unity/test_utils.py manila/tests/share/drivers/dell_emc/plugins/unity/utils.py manila/tests/share/drivers/dell_emc/plugins/vnx/__init__.py manila/tests/share/drivers/dell_emc/plugins/vnx/test_connection.py manila/tests/share/drivers/dell_emc/plugins/vnx/test_object_manager.py manila/tests/share/drivers/ganesha/__init__.py manila/tests/share/drivers/ganesha/test_manager.py manila/tests/share/drivers/ganesha/test_utils.py manila/tests/share/drivers/glusterfs/__init__.py manila/tests/share/drivers/glusterfs/test_common.py manila/tests/share/drivers/glusterfs/test_glusterfs_native.py manila/tests/share/drivers/glusterfs/test_layout.py manila/tests/share/drivers/glusterfs/test_layout_directory.py manila/tests/share/drivers/glusterfs/test_layout_volume.py manila/tests/share/drivers/hdfs/__init__.py manila/tests/share/drivers/hdfs/test_hdfs_native.py manila/tests/share/drivers/hitachi/__init__.py manila/tests/share/drivers/hitachi/hnas/__init__.py manila/tests/share/drivers/hitachi/hnas/test_driver.py manila/tests/share/drivers/hitachi/hnas/test_ssh.py manila/tests/share/drivers/hitachi/hsp/__init__.py manila/tests/share/drivers/hitachi/hsp/fakes.py manila/tests/share/drivers/hitachi/hsp/test_driver.py manila/tests/share/drivers/hitachi/hsp/test_rest.py manila/tests/share/drivers/hpe/__init__.py manila/tests/share/drivers/hpe/test_hpe_3par_constants.py manila/tests/share/drivers/hpe/test_hpe_3par_driver.py manila/tests/share/drivers/hpe/test_hpe_3par_mediator.py manila/tests/share/drivers/huawei/__init__.py manila/tests/share/drivers/huawei/test_huawei_nas.py manila/tests/share/drivers/ibm/__init__.py manila/tests/share/drivers/ibm/test_gpfs.py manila/tests/share/drivers/infinidat/__init__.py manila/tests/share/drivers/infinidat/test_infinidat.py manila/tests/share/drivers/infortrend/__init__.py manila/tests/share/drivers/infortrend/fake_infortrend_manila_data.py manila/tests/share/drivers/infortrend/fake_infortrend_nas_data.py manila/tests/share/drivers/infortrend/test_infortrend_nas.py manila/tests/share/drivers/inspur/__init__.py manila/tests/share/drivers/inspur/as13000/__init__.py manila/tests/share/drivers/inspur/as13000/test_as13000_nas.py manila/tests/share/drivers/inspur/instorage/__init__.py manila/tests/share/drivers/inspur/instorage/test_instorage.py manila/tests/share/drivers/macrosan/__init__.py manila/tests/share/drivers/macrosan/test_macrosan_nas.py manila/tests/share/drivers/maprfs/__init__.py manila/tests/share/drivers/maprfs/test_maprfs.py manila/tests/share/drivers/netapp/__init__.py manila/tests/share/drivers/netapp/fakes.py manila/tests/share/drivers/netapp/test_common.py manila/tests/share/drivers/netapp/test_utils.py manila/tests/share/drivers/netapp/dataontap/__init__.py manila/tests/share/drivers/netapp/dataontap/fakes.py manila/tests/share/drivers/netapp/dataontap/client/__init__.py manila/tests/share/drivers/netapp/dataontap/client/fakes.py manila/tests/share/drivers/netapp/dataontap/client/test_api.py manila/tests/share/drivers/netapp/dataontap/client/test_client_base.py manila/tests/share/drivers/netapp/dataontap/client/test_client_cmode.py manila/tests/share/drivers/netapp/dataontap/client/test_client_cmode_rest.py manila/tests/share/drivers/netapp/dataontap/client/test_rest_api.py manila/tests/share/drivers/netapp/dataontap/cluster_mode/__init__.py manila/tests/share/drivers/netapp/dataontap/cluster_mode/test_data_motion.py manila/tests/share/drivers/netapp/dataontap/cluster_mode/test_driver_interfaces.py manila/tests/share/drivers/netapp/dataontap/cluster_mode/test_lib_base.py manila/tests/share/drivers/netapp/dataontap/cluster_mode/test_lib_multi_svm.py manila/tests/share/drivers/netapp/dataontap/cluster_mode/test_lib_single_svm.py manila/tests/share/drivers/netapp/dataontap/cluster_mode/test_performance.py manila/tests/share/drivers/netapp/dataontap/protocols/__init__.py manila/tests/share/drivers/netapp/dataontap/protocols/fakes.py manila/tests/share/drivers/netapp/dataontap/protocols/test_base.py manila/tests/share/drivers/netapp/dataontap/protocols/test_cifs_cmode.py manila/tests/share/drivers/netapp/dataontap/protocols/test_nfs_cmode.py manila/tests/share/drivers/nexenta/__init__.py manila/tests/share/drivers/nexenta/test_utils.py manila/tests/share/drivers/nexenta/ns4/__init__.py manila/tests/share/drivers/nexenta/ns4/test_jsonrpc.py manila/tests/share/drivers/nexenta/ns4/test_nexenta_nas.py manila/tests/share/drivers/nexenta/ns5/__init__.py manila/tests/share/drivers/nexenta/ns5/test_jsonrpc.py manila/tests/share/drivers/nexenta/ns5/test_nexenta_nas.py manila/tests/share/drivers/purestorage/__init__.py manila/tests/share/drivers/purestorage/test_flashblade.py manila/tests/share/drivers/qnap/__init__.py manila/tests/share/drivers/qnap/fakes.py manila/tests/share/drivers/qnap/test_api.py manila/tests/share/drivers/qnap/test_qnap.py manila/tests/share/drivers/quobyte/__init__.py manila/tests/share/drivers/quobyte/test_jsonrpc.py manila/tests/share/drivers/quobyte/test_quobyte.py manila/tests/share/drivers/tegile/__init__.py manila/tests/share/drivers/tegile/test_tegile.py manila/tests/share/drivers/vastdata/__init__.py manila/tests/share/drivers/vastdata/test_driver.py manila/tests/share/drivers/vastdata/test_driver_util.py manila/tests/share/drivers/vastdata/test_rest.py manila/tests/share/drivers/veritas/__init__.py manila/tests/share/drivers/veritas/test_veritas_isa.py manila/tests/share/drivers/windows/__init__.py manila/tests/share/drivers/windows/test_service_instance.py manila/tests/share/drivers/windows/test_windows_smb_driver.py manila/tests/share/drivers/windows/test_windows_smb_helper.py manila/tests/share/drivers/windows/test_windows_utils.py manila/tests/share/drivers/windows/test_winrm_helper.py manila/tests/share/drivers/zadara/__init__.py manila/tests/share/drivers/zadara/test_zadara.py manila/tests/share/drivers/zfsonlinux/__init__.py manila/tests/share/drivers/zfsonlinux/test_driver.py manila/tests/share/drivers/zfsonlinux/test_utils.py manila/tests/share/drivers/zfssa/__init__.py manila/tests/share/drivers/zfssa/test_zfssarest.py manila/tests/share/drivers/zfssa/test_zfssashare.py manila/tests/share_group/__init__.py manila/tests/share_group/test_api.py manila/tests/share_group/test_share_group_types.py manila/tests/var/ca.crt manila/tests/var/certificate.crt manila/tests/var/privatekey.key manila/tests/volume/__init__.py manila/tests/volume/test_cinder.py manila/tests/wsgi/__init__.py manila/tests/wsgi/test_common.py manila/tests/wsgi/test_wsgi.py manila/tests/xenapi/__init__.py manila/transfer/__init__.py manila/transfer/api.py manila/volume/__init__.py manila/volume/cinder.py manila/wsgi/__init__.py manila/wsgi/api.py manila/wsgi/common.py manila/wsgi/eventlet_server.py manila/wsgi/wsgi.py playbooks/manila-tox-genconfig/post.yaml rally-jobs/rally-manila-no-ss.yaml rally-jobs/rally-manila.yaml releasenotes/notes/1741425-fix-service-image-retrieval-issue-bf7ae3e5aa9446e9.yaml releasenotes/notes/1841035-dellemc-unity-fix-ace-enable-error-b00281bb306d176b.yaml releasenotes/notes/1896949-netapp-fix-cifs-access-rules-promoted-replica-70b32991cc90e1ca.yaml releasenotes/notes/1900191-netapp-fix-ssl-cert-path-option-35354c9b7a9c37e6.yaml releasenotes/notes/1900469-netapp-cache-pool-status-6dc7da824b9f41c1.yaml releasenotes/notes/1901189-netapp-fix-kerberos-setup-357753068a5645ad.yaml releasenotes/notes/3par-add-update-access-68fc12ffc099f480.yaml releasenotes/notes/3par-fix-get_vfs-driver-bootup-db6b085eb6094f5f.yaml releasenotes/notes/3par-pool-support-fb43b368214c9eda.yaml releasenotes/notes/Huawei-driver-utilize-requests-lib-67f2c4e7ae0d2efa.yaml releasenotes/notes/Use-http_proxy_to_wsgi-instead-of-ssl-middleware-df533a2c2d9c3a61.yaml releasenotes/notes/add-ability-to-check-tenant-quota-usages-7fs17djahy61nsd6.yaml releasenotes/notes/add-access-key-to-share-access-map-2fda4c06a750e24e.yaml releasenotes/notes/add-access-visibility-and-delete-locks-52a7ef235813d147.yaml releasenotes/notes/add-admin-only-keys-to-share-metadata-5301424ccd9edf8a.yaml releasenotes/notes/add-and-update-security-services-to-in-use-share-servers-on-container-driver-52193447c18e6d10.yaml releasenotes/notes/add-cast-rules-to-readonly-field-62ead37b728db654.yaml releasenotes/notes/add-cleanup-create-from-snap-hnas-0e0431f1fc861a4e.yaml releasenotes/notes/add-count-info-in-share-21a6b36c0f4c87b9.yaml releasenotes/notes/add-count-info-in-share-snapshot-eee90f1471f7a5c4.yaml releasenotes/notes/add-create-share-from-snapshot-another-pool-or-backend-98d61fe753b85632.yaml releasenotes/notes/add-create_share_from_snapshot_support-extra-spec-9b1c3ad6796dd07d.yaml releasenotes/notes/add-defaultadsite-to-security-service-e90854c1a69be581.yaml releasenotes/notes/add-details-to-migration-get-progress-df8b3f2c524db1bd.yaml releasenotes/notes/add-ensure-shares-api-9ac10877a99ab0c5.yaml releasenotes/notes/add-export-location-filter-92ead37b728db654.yaml releasenotes/notes/add-export-locations-api-6fc6086c6a081faa.yaml releasenotes/notes/add-flashblade-driver-de20b758a8ce2640.yaml releasenotes/notes/add-format-output-to-manila-manage-c0bbccb16369e5d3.yaml releasenotes/notes/add-gathering-usage-size-8454sd45deopb14e.yaml releasenotes/notes/add-healthcheck-middleware-8f659afb7ee0451c.yaml releasenotes/notes/add-hsp-default-filter-function-0af60a819faabfec.yaml releasenotes/notes/add-ipv6-32d89161a9a1e0b4.yaml releasenotes/notes/add-is-default-e49727d276dd9bc3.yaml releasenotes/notes/add-like-filter-4c1d6dc02f40d5a5.yaml releasenotes/notes/add-manage-db-purge-b32a24ee045d8d45.yaml releasenotes/notes/add-manila-wsgi-module-ad9bb910f7f3b816.yaml releasenotes/notes/add-per-share-gigabytes-quotas-f495eb0b27378660.yaml releasenotes/notes/add-perodic-task-7454sd45deopb13e.yaml releasenotes/notes/add-policy-in-code-c31a24ee045d8d21.yaml releasenotes/notes/add-quiesce-wait-time-to-promote-replica-0757f36d46304a93.yaml releasenotes/notes/add-quotas-section-0e1e638a8f14d26e.yaml releasenotes/notes/add-reserved-share-from-snapshot-percentage-2d913ae1fc533690.yaml releasenotes/notes/add-sec-service-user-msg-10054933ff33e347.yaml releasenotes/notes/add-share-access-metadata-4fda2c06e750e83c.yaml releasenotes/notes/add-share-and-share-server-user-provided-encryption-key-191a7587c49dc0f3.yaml releasenotes/notes/add-share-group-quotas-4e426907eed4c000.yaml releasenotes/notes/add-share-migration-support-in-zfsonlinux-driver-88e6da5692b50810.yaml releasenotes/notes/add-share-server-limits-fb25ea9f214534af.yaml releasenotes/notes/add-share-server-migration-51deb30212859277.yaml releasenotes/notes/add-share-server-migration-enhancements-bbbc98a7fb419700.yaml releasenotes/notes/add-share-type-filter-to-pool-list-api-267614b4d93j12de.yaml releasenotes/notes/add-share-type-info-to-notifications-7fb4597642a6e8e5.yaml releasenotes/notes/add-share-type-quotas-33a6b36c0f4c88b1.yaml releasenotes/notes/add-snapshot-instances-admin-api-959a1121aa407629.yaml releasenotes/notes/add-support-filter-search-for-share-type-fdbaaa9510cc59dd.yaml-5655800975cec5d4.yaml releasenotes/notes/add-tegile-driver-1859114513edb13e.yaml releasenotes/notes/add-tenant-quota-for-share-replicas-and-replicas-size-565ffca315afb6f0.yaml releasenotes/notes/add-two-new-fields-to-share-groups-api-bc576dddd58a3086.yaml releasenotes/notes/add-update-host-command-to-manila-manage-b32ad5017b564c9e.yaml releasenotes/notes/add-update-security-service-for-in-use-share-networks-c60d82898c71eb4a.yaml releasenotes/notes/add-updated-at-in-share-instance-show-bdd934b26bdab414.yaml releasenotes/notes/add-user-id-echo-8f42db469b27ff14.yaml releasenotes/notes/add-vastdriver-5a2ca79a81bc9280.yaml releasenotes/notes/add_disabled_reason_to_services-8369aaa2985ada25.yaml releasenotes/notes/add_export_location_metadata-d3c279b73f4c4728.yaml releasenotes/notes/add_gateway_into_db-1f3cd3f392ae81cf.yaml releasenotes/notes/add_mtu_info_db-3c1d6dc02f40d5a6.yaml releasenotes/notes/add_share_network_subnet_metadata-ddee482d93030fc3.yaml releasenotes/notes/add_snapshot_metadata-bd986e338220c90e.yaml releasenotes/notes/add_support_multiple_subnet_per_az-e7b0359f4e8eca48.yaml releasenotes/notes/add_user_id_and_project_id_to_snapshot_APIs-157614b4b8d01e15.yaml releasenotes/notes/added-possibility-to-run-manila-api-with-web-servers-that-support-wsgi-apps-cfffe0b789f8670a.yaml releasenotes/notes/admin-only-metadata-xena-issue-91690edef7bc13aa.yaml releasenotes/notes/affinity-filter-747d3d7c51157172.yaml releasenotes/notes/allow-and-deny-access-rule-if-any-instance-is-valid-0e092913d30dbcdd.yaml releasenotes/notes/api-versions-mark-v1-deprecated-3540d39279fbd60e.yaml releasenotes/notes/blueprint-netapp-snapshot-visibility-4f090a20145fbf34.yaml releasenotes/notes/blueprint-share-and-snapshot-deferred-deletion-b3453718fd1e4b56.yaml releasenotes/notes/bp-admin-network-hnas-9b714736e521101e.yaml releasenotes/notes/bp-allow-locking-shares-against-deletion-5a715292e720a254.yaml releasenotes/notes/bp-create-share-from-snapshot-cephfs-080bd6c2ece74c5b.yaml releasenotes/notes/bp-dell-powerflex-manila-driver-2c496483242e555a.yaml releasenotes/notes/bp-dell-powerscale-ensure-shares-f2634d498a679d23.yaml releasenotes/notes/bp-dell-powerscale-thin-provisioning-71a8c25322d67a6b.yaml releasenotes/notes/bp-dell-powerscale-update-share-stats-1408fac91ab3528b.yaml releasenotes/notes/bp-dell-powerstore-manila-driver-263489b0d0b10e2e.yaml releasenotes/notes/bp-export-locations-az-api-changes-c8aa1a3a5bc86312.yaml releasenotes/notes/bp-integrate-os-profiler-b637041861029175.yaml releasenotes/notes/bp-netapp-active-iq-scheduler-weigher-df0a6709a63a1f6f.yaml releasenotes/notes/bp-netapp-ontap-storage-based-cryptograpy-bb7e28896e2a2539.yaml releasenotes/notes/bp-ocata-migration-improvements-c8c5675e266100da.yaml releasenotes/notes/bp-pass-resource-metadata-updates-to-backend-drivers-7fff302f64fda2d7.yaml releasenotes/notes/bp-pass-share-network-subnet-metadata-updates-to-backend-drivers-10441eee8375f146.yaml releasenotes/notes/bp-remove-project-id-from-urls-9f338371b8ffa203.yaml releasenotes/notes/bp-rename-isilon-to-powerscale-8e29d71c9e3629c3.yaml releasenotes/notes/bp-share-transfer-between-project-5c2ba9944b17e26e.yaml releasenotes/notes/bp-share-type-supported-azs-2e12ed406f181b3b.yaml releasenotes/notes/bp-support-group-spec-search-share-group-type-api-df55d056b622ced7.yaml releasenotes/notes/bp-support-query-user-message-by-timestamp-c0a02b3b3e337e12.yaml releasenotes/notes/bp-update-cephfs-drivers-9ac5165f31669030.yaml releasenotes/notes/bp-update-share-type-name-or-description-a39c5991b930932f.yaml releasenotes/notes/bug-1271568-fix-rpc-init-host-with-rpc-6e76afa553b4f2af.yaml releasenotes/notes/bug-1475351-handle-successful-deletion-of-snapshot-if-quota-commit-fails-4d150bf0b71a2fd9.yaml releasenotes/notes/bug-1578328-fix-replica-deletion-in-cDOT-7e4502fb50b69507.yaml releasenotes/notes/bug-1591357-fix-cannot-remove-user-rule-for-NFS-8e1130e2accabd56.yaml releasenotes/notes/bug-1597940-fix-hpe3par-delete-share-0daf75193f318c41.yaml releasenotes/notes/bug-1602525-port_binding_mandatory-2aaba0fa72b82676.yaml releasenotes/notes/bug-1607029-fix-share-server-deletion-when-interfaces-dont-exist-4d00fe9dafadc252.yaml releasenotes/notes/bug-1613303-fix-config-generator-18b9f9be40d7eee6.yaml releasenotes/notes/bug-1624526-netapp-cdot-filter-root-aggregates-c30ac5064d530b86.yaml releasenotes/notes/bug-1626249-reintroduce-per-share-instance-access-rule-state-7c08a91373b21557.yaml releasenotes/notes/bug-1626523-migration-rw-access-fix-7da3365c7b5b90a1.yaml releasenotes/notes/bug-1634278-unmount-orig-active-after-promote-8e24c099ddc1e564.yaml releasenotes/notes/bug-1634734-fix-backend-extraspec-for-replication-d611d2227997ae3e.yaml releasenotes/notes/bug-1638896-missing-migration-completing-state-1e4926ed56eb268c.yaml releasenotes/notes/bug-1638994-drop-fake-cg-support-from-generic-driver-16efce98f94b1b6b.yaml releasenotes/notes/bug-1639188-fix-extend-operation-of-shrinked-share-in-generic-driver-5c7f82faefaf26ea.yaml releasenotes/notes/bug-1639662-fix-share-service-VM-restart-problem-1110f9133cc294e8.yaml releasenotes/notes/bug-1640169-check-ceph-connection-on-setup-c92bde41ced43326.yaml releasenotes/notes/bug-1645746-fix-inheritance-of-access-rules-from-parent-share-by-zfsonlinux-child-shares-4f85908c8e9871ef.yaml releasenotes/notes/bug-1645751-fixed-shares-created-from-snapshots-for-lvm-and-generic-drivers-94a1161a9e0b5a85.yaml releasenotes/notes/bug-1646603-netapp-broadcast-domains-411a626d38835177.yaml releasenotes/notes/bug-1649782-fixed-incorrect-exportfs-exportfs.yaml releasenotes/notes/bug-1650043-gpfs-access-bugs-8c10f26ff1f795f4.yaml releasenotes/notes/bug-1651578-gpfs-prepend-beb99f408cf20bb5.yaml releasenotes/notes/bug-1651587-deny-access-verify-563ef2f3f6b8c13b.yaml releasenotes/notes/bug-1654598-enforce-policy-checks-for-share-export-locations-a5cea1ec123b1469.yaml releasenotes/notes/bug-1657033-fix-share-metadata-error-when-deleting-share.yaml releasenotes/notes/bug-1658133-fix-lvm-revert-34a90e70c9aa7354.yaml releasenotes/notes/bug-1659023-netapp-cg-fix-56bb77b7bc61c3f5.yaml releasenotes/notes/bug-1660319-1660336-migration-share-groups-e66a1478634947ad.yaml releasenotes/notes/bug-1660321-fix-default-approach-for-share-group-snapshot-creation-3e843155c395e861.yaml releasenotes/notes/bug-1660425-snapshot-access-in-error-bce279ee310060f5.yaml releasenotes/notes/bug-1660686-snapshot-export-locations-mount-not-supported-cdc2f5a3b57a9319.yaml releasenotes/notes/bug-1660726-migration-export-locations-5670734670435015.yaml releasenotes/notes/bug-1661266-add-consistent-snapshot-support-attr-to-share-groups-DB-model-daa1d05129802796.yaml releasenotes/notes/bug-1661271-hnas-snapshot-readonly-4e50183100ed2b19.yaml releasenotes/notes/bug-1661381-migration-snapshot-export-locations-169786dcec386402.yaml releasenotes/notes/bug-1662615-hnas-snapshot-concurrency-2147159ea6b086c5.yaml releasenotes/notes/bug-1663300-554e9c78ca2ba992.yaml releasenotes/notes/bug-1664201-fix-share-replica-status-update-concurrency-in-replica-promotion-feature-63b15d96106c65da.yaml releasenotes/notes/bug-1665002-hnas-driver-version-f3a8f6bff3dbe054.yaml releasenotes/notes/bug-1665072-migration-success-fix-3da1e80fbab666de.yaml releasenotes/notes/bug-1666541-quobyte-resize-list-param-bc5b9c42bdc94c9f.yaml releasenotes/notes/bug-1667450-migration-stale-source-9c092fee267a7a0f.yaml releasenotes/notes/bug-1674908-allow-user-access-fix-495b3e42bdc985ec.yaml releasenotes/notes/bug-1678524-check-snaprestore-license-for-snapshot-revert-6d32afdc5d0b2b51.yaml releasenotes/notes/bug-1682795-share-access-list-api-5b1e86218959f796.yaml releasenotes/notes/bug-1684032-6e4502fdceb693dr7.yaml releasenotes/notes/bug-1688620-netapp-migration-cancelation-fb913131eb8eb82a.yaml releasenotes/notes/bug-1690159-retry-backend-init-58486ea420feaf51.yaml releasenotes/notes/bug-1690785-fix-gpfs-path-91a354bc69bf6a47.yaml releasenotes/notes/bug-1694768-fix-netapp-cdot-revert-to-snapshot-5e1be65260454988.yaml releasenotes/notes/bug-1696000-netapp-fix-security-style-on-cifs-shares-cbdd557a27d11961.yaml releasenotes/notes/bug-1696669-add-ou-to-security-service-06b69615bd417d40.yaml releasenotes/notes/bug-1698250-netapp-cdot-fix-share-server-deletion-494ab3ad1c0a97c0.yaml releasenotes/notes/bug-1698258-netapp-fix-tenant-network-gateways-85935582e89a72a0.yaml releasenotes/notes/bug-1699836-disallow-share-type-deletion-with-active-share-group-types-83809532d06ef0dd.yaml releasenotes/notes/bug-1700346-new-exception-for-no-default-share-type-b1dd9bbe8c9cb3df.yaml releasenotes/notes/bug-1700871-ontap-allow-extend-of-replicated-share-2c9709180d954308.yaml releasenotes/notes/bug-1703581-cifs-extension-failing-because-of-volume-in-use-3fea31c4a58e2f1b.yaml releasenotes/notes/bug-1703660-fix-netapp-driver-preferred-state-0ce1a62961cded35.yaml releasenotes/notes/bug-1704622-netapp-cdot-fix-share-specs-on-migration-bfbbebec26533652.yaml releasenotes/notes/bug-1704971-fix-name-description-filter-85935582e89a72a0.yaml releasenotes/notes/bug-1705533-manage-api-error-message-fix-967b0d44c09b914a.yaml releasenotes/notes/bug-1706137-netapp-manila-set-valid-qos-during-migration-4405fff02bd6fa83.yaml releasenotes/notes/bug-1707066-deny-ipv6-access-in-error-bce379ee310060f6.yaml releasenotes/notes/bug-1707084-netapp-manila-driver-to-honour-std-extra-specs-d32fae4e9411b503.yaml releasenotes/notes/bug-1707943-make-lvm-revert-synchronous-0ef5baee3367fd27.yaml releasenotes/notes/bug-1707946-nfs-helper-0-netmask-224da94b82056f93.yaml releasenotes/notes/bug-1714691-decimal-separators-in-locales-392c0c794c49c1c2.yaml releasenotes/notes/bug-1716922-security-group-creation-failed-d46085d11370d918.yaml releasenotes/notes/bug-1717135-ganesha-cleanup-of-tmp-config-files-66082b2384ace0a5.yaml releasenotes/notes/bug-1717263-netapp-ontap-fix-size-for-share-from-snapshot-02385baa7e085f39.yaml releasenotes/notes/bug-1717392-fix-downgrade-share-access-map-bbd5fe9cc7002f2d.yaml releasenotes/notes/bug-172112-fix-drives-private-storage-update-deleted-entries-7516ba624da2dda7.yaml releasenotes/notes/bug-1721787-fix-getting-share-networks-and-security-services-error-7e5e7981fcbf2b53.yaml releasenotes/notes/bug-1730509-netapp-ipv6-hostname-39abc7f40d48c844.yaml releasenotes/notes/bug-1733494-allow-user-group-name-with-blank-access-fix-665b3e42bdc985ac.yaml releasenotes/notes/bug-1734127-a239d022bef4a002.yaml releasenotes/notes/bug-1735832-43e9291ddd73286d.yaml releasenotes/notes/bug-1736370-qnap-fix-access-rule-override-1b79b70ae48ad9e6.yaml releasenotes/notes/bug-1745436-78c46f8a0c96cbca.yaml releasenotes/notes/bug-1745436-remove-data-node-access-ip-config-opt-709f330c57cdb0d5.yaml releasenotes/notes/bug-1746202-fix-unicodeDecodeError-when-decode-API-input-4e4502fb50b69502.yaml releasenotes/notes/bug-1746723-8b89633062885f0b.yaml releasenotes/notes/bug-1747695-fixed-ip-version-in-neutron-bind-network-plugin-526958e2d83df072.yaml releasenotes/notes/bug-1748139-generic-dont-check-socket-login-e2dd1dbc42ae281a.yaml releasenotes/notes/bug-1749184-eb06929e76a14fce.yaml releasenotes/notes/bug-1750074-fix-rabbitmq-password-in-debug-mode-4e136ff86223c4ea.yaml releasenotes/notes/bug-1765420-netapp-fix-delete-share-for-vsadmins-b5dc9e0224cb3ba2.yaml releasenotes/notes/bug-1767430-access-control-raise-ip-address-conflict-on-host-routes-0c298125fee4a640.yaml releasenotes/notes/bug-1772026-nve-license-not-present-fix-e5d2e0d6c5df9227.yaml releasenotes/notes/bug-1772647-b98025c07553e35d.yaml releasenotes/notes/bug-1773761-qnap-fix-manage-share-size-override-a18acdf1a41909b0.yaml releasenotes/notes/bug-1773929-a5cb52c8417ec5fc.yaml releasenotes/notes/bug-1774159-0afe3dbc39e3c6b0.yaml releasenotes/notes/bug-1774604-qb-driver-b7e717cbc71d6189.yaml releasenotes/notes/bug-1777126-netapp-skip-route-setup-if-no-gateway-e841635dcd20fd12.yaml releasenotes/notes/bug-1777551-security-networks-api-all-tenants-fix-a061274afe15180d.yaml releasenotes/notes/bug-1777551-security-services-api-all-tenants-fix-e820ec370d7df473.yaml releasenotes/notes/bug-1778975-fix-quota-update-does-not-require-a-value-496ec846d2c43963.yaml releasenotes/notes/bug-1783736-add-share-proto-filtering-to-the-capabilities-scheduler-d8391183335def9f.yaml releasenotes/notes/bug-1785129-fix-sighup-behavior-with-scheduler-8ee803ad0e543cce.yaml releasenotes/notes/bug-1785180-zfsonlinux-retry-unmounting-during-manage-872cf46313c5a4ff.yaml releasenotes/notes/bug-1794402-fix-share-stats-container-driver-b3cb1fa2987ad4b1.yaml releasenotes/notes/bug-1795463-fix-pagination-slowness-8fcda3746aa13940.yaml releasenotes/notes/bug-1798219-fix-snapshot-creation-lvm-and-generic-driver-55e349e02e7fa370.yaml releasenotes/notes/bug-1801763-gate-public-share-creation-by-policy-a0ad84e4127a3fc3.yaml releasenotes/notes/bug-1802424-add-user-message-when-shrinking-fails-83d0f60ead6f4a4b.yaml releasenotes/notes/bug-1804651-netapp-cdot-add-peferred-dc-to-cifs-ad-99072ce663762e83.yaml releasenotes/notes/bug-1804656-netapp-cdot-add-port-ids-to-share-server-backend-424ca11a1eb44826.yaml releasenotes/notes/bug-1804659-speed-up-pools-detail-18f539a96042099a.yaml releasenotes/notes/bug-1811680-destroy-quotas-usages-reservations-when-deleting-share-type-a18f2e00a65fe922.yaml releasenotes/notes/bug-1813054-remove-share-usage-size-audit-period-conf-opt-7331013d1cdb7b43.yaml releasenotes/notes/bug-1815038-extend-remove_version_from_href-support-ea479daaaf5c5700.yaml releasenotes/notes/bug-1815532-supply-request-id-in-all-apis-74419bc1b1feea1e.yaml releasenotes/notes/bug-1816420-validate-access-type-for-ganehas-c42ce6f859fa0c8c.yaml releasenotes/notes/bug-1818081-fix-inferred-script-name-in-case-of-proxy-urls-e33466af856708b4.yaml releasenotes/notes/bug-1822099-fix-multisegment-mtu.yaml-ac2e31c084d8bbb6.yaml releasenotes/notes/bug-1831092-netapp-fix-race-condition-524555133aaa6ca8.yaml releasenotes/notes/bug-1845135-fix-Unity-cannot-use-mgmt-ipv6-9407710a3fc7f4aa.yaml releasenotes/notes/bug-1845147-powermax-read-only-policy-585c29c5ff020007.yaml releasenotes/notes/bug-1845147-vnx-read-only-policy-75b0f414ea5ef471.yaml releasenotes/notes/bug-1845452-unity--fix-fail-to-delete-cifs-share-c502a10ae306e506.yaml releasenotes/notes/bug-1846836-fix-share-network-update-unexpected-success-eba8f40db392c467.yaml releasenotes/notes/bug-1848608-1893718-fix-manage-api-for-shares-with-multiple-export-locations-32ade25e9d82535b.yaml releasenotes/notes/bug-1848889-netapp-fix-share-replica-update-check-failure-90aa964417e7734c.yaml releasenotes/notes/bug-1850264-add-async-error-when-share-extend-error-a0c458204b395994.yaml releasenotes/notes/bug-1853940-not-send-heartbeat-if-driver-not-initial-9c3cee39e8c725d1.yaml releasenotes/notes/bug-1855391-extend-share-will-go-through-scheduler-3a29093756dc88c1.yaml releasenotes/notes/bug-1858328-netapp-fix-shrinking-error-48bcfffe694f5e81.yaml releasenotes/notes/bug-1859775-snapshot-over-quota-exception-bb6691612af03ddf.yaml releasenotes/notes/bug-1859785-share-list-speed-6b09e7717624e037.yaml releasenotes/notes/bug-1860061-fix-pagination-query-5c893bb8edaf1350.yaml releasenotes/notes/bug-1861485-fix-share-network-retrieval-31768dcda5aeeaaa.yaml releasenotes/notes/bug-1862833-fix-backref-by-eager-loading-2d897976e7598625.yaml releasenotes/notes/bug-1863298-fix-manage-overquota-issue-37031a593b66f8ba.yaml releasenotes/notes/bug-1867030-delete-share-55663c74a93e77fd.yaml releasenotes/notes/bug-1869148-if-only-pyc-exist-the-extension-API-cannot-be-loaded-172cb9153ebd4b56.yaml releasenotes/notes/bug-1869712-fix-increased-scheduled-time-for-non-thin-provisioned-backends-1da2cc33d365ba4f.yaml releasenotes/notes/bug-1870751-cleanup-share-type-and-group-type-project-access-when-deleted-4fcd49ba6e6c40bd.yaml releasenotes/notes/bug-1871252-cephfs-doesnt-support-subvolume-group-snapshots-344efbb9ba74e05c.yaml releasenotes/notes/bug-1871999-dell-emc-vnx-powermax-wrong-export-locations-e9763631c621656f.yaml releasenotes/notes/bug-1872243-netapp-fix-vserver-peer-with-same-vserver-8bc65816f1764784.yaml releasenotes/notes/bug-1872872-fix-quota-checking-b06fd372be143101.yaml releasenotes/notes/bug-1872873-fix-consume-from-share-eea5941de17a5bcc.yaml releasenotes/notes/bug-1873963-netapp-fix-vserver-peer-intra-cluster-966398cf3a621edd.yaml releasenotes/notes/bug-1878993-netapp-fix-https-3eddf9eb5b762f3a.yaml releasenotes/notes/bug-1879368-netapp-fix-cifs-promote-back-issue-d8fe28466f9dde49.yaml releasenotes/notes/bug-1879754-teardown-network-d1887cdf6eb83388.yaml releasenotes/notes/bug-1880747-netapp-fix-do-not-delete-default-ipspace-aee638279e0f8e93.yaml releasenotes/notes/bug-1881098-1895323-manila-manage-update-host-fixes-bbbc4fe2da48cae9.yaml releasenotes/notes/bug-1881112-add-manila-manage-service-cleanup-37019840f01bfa2f.yaml releasenotes/notes/bug-1881865-add-generic-fuzzy-matching-logic-in-database-d83917727d12677d.yaml releasenotes/notes/bug-1882590-fix-svm-scoped-netapp-85b53830135f7558.yaml releasenotes/notes/bug-1883506-fix-delete-manage-error-share-will-lead-to-quota-error-085fd3b7d15ae109.yaml releasenotes/notes/bug-1885956-enforce-policy-check-getting-share-type-by-name-5eca17b02bea5261.yaml releasenotes/notes/bug-1886010-Glusterfs-fix-del-share-89dabc8751ed4fec.yaml releasenotes/notes/bug-1886232-netapp-fix-python-package-name-0f3ec3f2deec8887.yaml releasenotes/notes/bug-1886690-edit-services-down-message-c857de1a678b6781.yaml releasenotes/notes/bug-1887643-netapp-add-cifs-dc-add-skip-check-c8ea9b952cedb643.yaml releasenotes/notes/bug-1888905-fix-group-snapshot-create-delete-0595f9d7a4c0c343.yaml releasenotes/notes/bug-1888915-harden-lvm-deletions-2a735ab0ee4a4903.yaml releasenotes/notes/bug-1889549-fix-migration-get-progress-race-15aea537efec6daf.yaml releasenotes/notes/bug-1890833-fix-cephfs-incorrect-capacity-report-3a9bdaffcc62ec71.yaml releasenotes/notes/bug-1894362-fix-Glusterfs-del-share-3c8467e1d9f0c6e4.yaml releasenotes/notes/bug-1896322-container-fix-search-veth-regex-7f2156a6fd411bdb.yaml releasenotes/notes/bug-1898924-fix-share-replica-update-missing-share-server-model-c1c060a7c06e4512.yaml releasenotes/notes/bug-1900752-early-validate-mandatory-security-service-association-f48aecbbc47418cd.yaml releasenotes/notes/bug-1900755-netapp-add-exception-cifs-creation-error-user-privileges-or-credentials.yaml releasenotes/notes/bug-1901210-return-404-if-share-access-forbidden-02ca9a9552ad3e15.yaml releasenotes/notes/bug-1901937-netapp-nfs-for-windows-465e704524277ea2.yaml releasenotes/notes/bug-1903773-fix-lvmdriver-share-unmounting-after-migration-75640e3c9dc62dba.yaml releasenotes/notes/bug-1903773-fix-zfsonlinux-share-unmounting-after-migration-329b1eb2f33f78a3.yaml releasenotes/notes/bug-1904015-cve-2020-27781-cephx-asynchronous-msgs-6a683076a1fb5a54.yaml releasenotes/notes/bug-1908352-add-explicit-error-message-c33c7b75a7e49257.yaml releasenotes/notes/bug-1908963-scheduler-ignore-earlier-time-service-capabilities-0b97bb70ba4fbb7f.yaml releasenotes/notes/bug-1909847-put-ensure-share-into_thread-pool-e658f21c7caad668.yaml releasenotes/notes/bug-1909951-fix-extend-have-wrong-size-7938eaa6591bd2ad.yaml releasenotes/notes/bug-1910752-fix-migration-replication-quotas-eaa013b743d721cd.yaml releasenotes/notes/bug-1911695-resize-share-world-accessable-b444d88b67b05af0.yaml releasenotes/notes/bug-1915237-netapp-fix-encrypt-check-on-migrate-1e39bd7f19651972.yaml releasenotes/notes/bug-1916102-fix-security-service-policy-check-8e72254fa9fedc9e.yaml releasenotes/notes/bug-1916534-netapp-fix-ldap-security-service-c8ee6d36598722cf.yaml releasenotes/notes/bug-1917417-fix-rbac-check-on-share-access-rules-efdddaf9e6f68fdf.yaml releasenotes/notes/bug-1917520-avoid-sending-traceback-to-user-if-action-forbidden-0da51825756fd5fc.yaml releasenotes/notes/bug-1917980-zadara-share-assisted-migration-2d8f8fdb51718faa.yaml releasenotes/notes/bug-1918323-add-validation-to-share-network-94571f35cb39c815.yaml releasenotes/notes/bug-1920937-fixed-cifs-share-migration-752fde9631fb077a.yaml releasenotes/notes/bug-1920942-fix-migration-server-selection-3ad50e6c73ae03df.yaml releasenotes/notes/bug-1921927-handle-service-client-unauthorized-exceptions-b2ebc08a072f7e12.yaml releasenotes/notes/bug-1922075-fix-Glusterfs-create-share-from-snapshot-failed-053a583522a6fc0e.yaml releasenotes/notes/bug-1922243-fix-project-only-replica-listing-f5f2b95ef14c3ded.yaml releasenotes/notes/bug-1923008-fix-logic-of-share-network-query-f314ec3010c06045.yaml releasenotes/notes/bug-1923181-direct-mgr-commands-monmgr-5e8babb4a1067e92.yaml releasenotes/notes/bug-1923566-fix-notfound-replica-parent-share-d8e50659c02b941a.yaml releasenotes/notes/bug-1924230-skip-periodic-task-for-active-replica-030a982af92f8a62.yaml releasenotes/notes/bug-1924806-sqlalchemy-view-only-relationships-807d406cf8fac06c.yaml releasenotes/notes/bug-1925342-fix-snapshot-support-api-error-msg-eaf5fd2b1df97d15.yaml releasenotes/notes/bug-1925486-add-share-network-option-to-replica-create-api-7d2ff3628e93fc77.yaml releasenotes/notes/bug-1927060-fix-replica-state-on-migration-complete-4fb4d8ba59b58505.yaml releasenotes/notes/bug-1927823-fix-create-not-home-aggr-e9bd1ebf0d8e4e1e.yaml releasenotes/notes/bug-1928241-d1b48e79aceb3cc4.yaml releasenotes/notes/bug-192912-fix-filtering-shares-by-extra-specs-b79235301306bcf2.yaml releasenotes/notes/bug-1929421-netapp-fix-thick-provision-volume-create-for-AFF-c22c72ce4c3fac16.yaml releasenotes/notes/bug-1930459-add-ceph-version-check-88eee324bc6134ea.yaml releasenotes/notes/bug-1934345-fix-ipaddress-hosts-invocation-80d419d7e62a5f51.yaml releasenotes/notes/bug-1934889-netapp-fix-replica-delete-for-scoped-account-8fa193c0424af9b1.yaml releasenotes/notes/bug-1940072-dell-manila-fix-cifs-value-type-f65e162ee27d3e67.yaml releasenotes/notes/bug-1942124-fix-list-mandatory-services-for-cifs-share-0c524831e8fc6175.yaml releasenotes/notes/bug-1944478-change-status-for-shares-api-5dbc4986d032c8e1.yaml releasenotes/notes/bug-1945365-netapp-fix-port-conf-91552d3f61378c94.yaml releasenotes/notes/bug-1945463-allow-override-instance-name-b730d106a9c32c83.yaml releasenotes/notes/bug-1946990-fix-ignored-neutron-opts-c438a089de9e2066.yaml releasenotes/notes/bug-1955627-add-check-to-reset-status-baa126a7145a45bb.yaml releasenotes/notes/bug-1957075-fix-replica-promote-autosize-attributes-c180bb7db328bece.yaml releasenotes/notes/bug-1959472-fix-type-error-jsonfilter-fc7f87c288cc69.yaml releasenotes/notes/bug-1961087-add-reserved-share-extend-percentage-c6da2ac8a0113d2c.yaml releasenotes/notes/bug-1964089-fix-share-creation-via-rest-9bb4180fc87af6c2.yaml releasenotes/notes/bug-1964696-fix-GaneshaNFSHelper-update_access-6124a79e34e63030.yaml releasenotes/notes/bug-1967760-shorten-snapshot-names-cephfs-a220e2b9f7ba5739.yaml releasenotes/notes/bug-1968069-fix-share-metadata-api-c31aca75bac66501.yaml releasenotes/notes/bug-1968891-fix-scheduler-capacity-filter-fails-to-pass-when-extending-share-6b60799e7aa41e19.yaml releasenotes/notes/bug-1971530-fix-cephfs-native-deny-access-facf37fa7053c30d.yaml releasenotes/notes/bug-1973621-add-scheduler-default-extend-filters-1c4f2a1863d0d95b.yaml releasenotes/notes/bug-1975483-rollback-quota-if-share-network-create-fails-628312233bf0c179.yaml releasenotes/notes/bug-1975715-fix-driverfilter-string-evaluations-3886a68d4d7fa3a1.yaml releasenotes/notes/bug-1976370-stop-logging-login-information-b726d05ee805df27.yaml releasenotes/notes/bug-1978962-fix-find-available-servers-2dec3a4f3f0ef7e4.yaml releasenotes/notes/bug-1982808-fix-netapp-api-failed-relationship-is-in-use-ecc9ede4d7f0f5b9.yaml releasenotes/notes/bug-1982808-netapp-fix-snapmirror-snapshots-not-cleaned-up-63cc98cd468adbd1.yaml releasenotes/notes/bug-1983125-cb9118c3fa26a6f2.yaml releasenotes/notes/bug-1986653-infinidat-add-ssl-options-ee91f152bbd28080.yaml releasenotes/notes/bug-1989283-infinidat-version-bump-6f791d9d97ccf75b.yaml releasenotes/notes/bug-1990150-cephadm-cephnfs-backend-fails-start-raise-exc-7459302bf662fdd6.yaml releasenotes/notes/bug-1990839-add-state-column-for-service-c4fe2a6e312a1651.yaml releasenotes/notes/bug-1991378-fix-cluster_id-param-cephfs-nfs-2ddc4ff98141b9b9.yaml releasenotes/notes/bug-1991396-add-glance-endpoint-type-151777f255e423a3.yaml releasenotes/notes/bug-1991776-cephfs-configured-ip-versions-fd87976fdb848e8c.yaml releasenotes/notes/bug-1991938-add-filesystem-info-cephfs-nfs-fsal-b39ae5ebaeb6fba1.yaml releasenotes/notes/bug-1992443-infinidat-host-assisted-migration-4344c4d076b66796.yaml releasenotes/notes/bug-1993828-init-share-server-updated-at-affb6ef54c71939d.yaml releasenotes/notes/bug-1993829-netapp-guard-vserver-name-c65ab9e811b398a8.yaml releasenotes/notes/bug-1995733-netapp-cifs-server-force-delete-d513c548ebf56448.yaml releasenotes/notes/bug-1996859-update-timedelta-and-old-snapmirror-schedules-b565d4163663ffa0.yaml releasenotes/notes/bug-2000171-make-netapp_snapmirror_quiesce_timeout-end-user-option-4dc090eb7da3f7eb.yaml releasenotes/notes/bug-2000253-handle-access-rules-on-replica-c7304ae55c68857f.yaml releasenotes/notes/bug-2002394-fix-bad-mode-enforcement-on-ensure-shares-a2e4d8f6c07c8cf5.yaml releasenotes/notes/bug-2004212-prevent-subnet-deletion-when-group-exists-a35355feb1bf6848.yaml releasenotes/notes/bug-2004230-fix-cross-project-rbac-328134c64c96c200.yaml releasenotes/notes/bug-2006792-fix-add-share-network-subnet-non-admin-463347a723069997.yaml releasenotes/notes/bug-2007060-fix-error-message-7a34357c0212d8f9.yaml releasenotes/notes/bug-2007560-fix-netapp-ipspace-naming-7c097743e187b920.yaml releasenotes/notes/bug-2008497-speed-up-replica-snapshots-create-request-6facee90320fecca.yaml releasenotes/notes/bug-2012742-remove-stanza-check-from-netapp-driver-code.yaml releasenotes/notes/bug-2015094-fix-read-deleted-sqlalchemy-cda2dca772ce8d0a.yaml releasenotes/notes/bug-2015328-disallow-reset-replica-state-on-active-replicas-a3d4511ff1352d68.yaml releasenotes/notes/bug-2017501-fix-share-export-location-update.yaml releasenotes/notes/bug-2020187-scheduler-performance-2edc4c706b2fea2f.yaml releasenotes/notes/bug-2020745-dell-unity-lacp-8653da49ad901c5c.yaml releasenotes/notes/bug-2023754-fix-share-replica-url-367797a27a9c314d.yaml releasenotes/notes/bug-2023964-fix-limit-for-display-name-and-description-827d4ccb777ea632.yaml releasenotes/notes/bug-2024556-skip-error-deleting-replicas-aa2ef4154d5e38f6.yaml releasenotes/notes/bug-2024658-fix-duplicate-entries-of-share-server-backend-details-adf45b417d45b437.yaml releasenotes/notes/bug-2025075-BadRequest-share-server-migration-get-progress-bf6fe476f7ab3111.yaml releasenotes/notes/bug-2025641-pause-and-resume-clone-split-during-snapshot-rename-fd0f990d50644d9c.yaml releasenotes/notes/bug-2029366-network-deleted-without-security-associaton-ae56473f6d32c47e.yaml releasenotes/notes/bug-2031048-fix-provider-location-validation-b6d1e977f50643bb.yaml releasenotes/notes/bug-2031193-set-updated_at-access-rules-a382a3e352f3ef7d.yaml releasenotes/notes/bug-2032681-flashblade-relogin-f1781c6bdb15df71.yaml releasenotes/notes/bug-2033604-fix-count-in-shares-and-snapshots-list-api-683f3103e587b898.yaml releasenotes/notes/bug-2035137-ceph-nfs-set-preferred-export-location-d1f228a51df8c8b4.yaml releasenotes/notes/bug-2035137-cephfs-support-ensure-shares-b72fe18381af274a.yaml releasenotes/notes/bug-2035572-ignore-errors-remove-export-nfs-ganesha-fd0f8eb1db800d31.yaml releasenotes/notes/bug-2036931-fix-flexgroup-dedup-compression-aeb013a6ef24f610.yaml releasenotes/notes/bug-2037109-netapp-use-identifier-to-derive-vserver-name-during-migration-81fd7d24b36b4dbe.yaml releasenotes/notes/bug-2037422-delete-share-network-subnet-on-network-deletion-b28b42ce4a42b554.yaml releasenotes/notes/bug-2038607-fix-protocol-access-mapping-opts-e7d61db3e2a84be7.yaml releasenotes/notes/bug-2049507-retry-on-connection-error-to-neutron-df7d2ddac5f30773.yaml releasenotes/notes/bug-2049538-cephfs-driver-return-allocated-gb-6ecf908dae8f369d.yaml releasenotes/notes/bug-2050010-add-filesystem-name-metadata-to-cephfs-shares-5725d751980360ec.yaml releasenotes/notes/bug-2050010-allow-configuring-admin-and-driver-metadata-b0ede7d7cf057b5b.yaml releasenotes/notes/bug-2051691-cors-defaults-15989a221a9cb920.yaml releasenotes/notes/bug-2052785-netapp-allow-share-server-migration-with-replicas-971fece378440aba.yaml releasenotes/notes/bug-2053100-fix-ceph-driver-preferred-path-update-70147668e0f19c4d.yaml releasenotes/notes/bug-2053100-fix-export-path-preferred-attr-updates-32db001aacfc8563.yaml releasenotes/notes/bug-2058027-fix-backup-status-in-creating-state-forever-for-wrong-config-a9e10419f33ecb97.yaml releasenotes/notes/bug-2058642-fix-backup-delete-source-destination-same_vserver-7e165f9acfec123c.yaml releasenotes/notes/bug-2059399-fix-backup-restore-failing-for-rest-bc060fcf893ae0f6.yaml releasenotes/notes/bug-2064502-netapp-fix-revert-to-snapshot-f23ab4dc325b2c42.yaml releasenotes/notes/bug-2064907-allow-empty-mount-point-prefix.yaml releasenotes/notes/bug-2064907-fix-share-server-migration.yaml releasenotes/notes/bug-2066840-dell-powerflex-default-port-fc50b82979e3b11b.yaml releasenotes/notes/bug-2066871-allow-to-update-access-level-for-access-rule-741f8fc3cc190701.yaml releasenotes/notes/bug-2067266-Fix-leak-of-Manila-ports-on-share-server-deletion-b6faf19725727988.yaml releasenotes/notes/bug-2067456-handle-share-and-snapshot-show-for-deferred-deletion-37654e034eabccc6.yaml releasenotes/notes/bug-2067609-make-osprofiler-initialization-compatible-with-forks.yaml releasenotes/notes/bug-2068043-update-deferred-deletion-dc5ea4207e06bb64.yaml releasenotes/notes/bug-2069125-fix-manila-driver-error-with-ontap-svm-scoped-user-when-add-rule-1ae120a96dd8f68a.yaml releasenotes/notes/bug-2071359-netapp-retry-sis-operatin-if-already-active-4625605175f76d07.yaml releasenotes/notes/bug-2072552-allow-scheduling-to-disabled-host-82c93468ec322256.yaml releasenotes/notes/bug-2073766-svm-scope-exclude-management-lif-when-validate-kerberos-config-ef0f1249fcc4445b.yaml releasenotes/notes/bug-2074504-disable-ports-on-neutron-ext-nets-af3ff56da9a928df.yaml releasenotes/notes/bug-2075967-lock-shares-deletion-when-rule-is-locked-9ce9c6914acc1edb.yaml releasenotes/notes/bug-2082944-sqlalchemy-tracing.yml releasenotes/notes/bug-2084529-add-db_retry-on-update-methods-538f3a295a110f3f.yaml releasenotes/notes/bug-2084532-add-new-policy-list-all-project-for-shares-and-snapshots-0b02bea6e121c6a2.yaml releasenotes/notes/bug-2084783-improve-get-all-instances-with-share-data-f217df37bac9b647.yaml releasenotes/notes/bug-2085112-netapp-make-deleted-volumes-retention-period-configurable-403ec227f256e24b.yaml releasenotes/notes/bug-2088269-vast-add-multiple-export-locations-39243a9091c145f9.yaml releasenotes/notes/bug-2089061-fix-access-rules-locks-lookup-b5efbd41397acba3.yaml releasenotes/notes/bug-2089534-dont-fail-stop-if-service-doesnt-exist-68448b4d775a2b1e.yaml releasenotes/notes/bug-2089634-fix-like-search-option-name-f06d8a50163070bf.yaml releasenotes/notes/bug-2089826-allow-filtering-shares-on-mount-point-name.yaml releasenotes/notes/bug-2096656-fix-netapp-create-share-from-snapshot-with-mount-point-name.yaml releasenotes/notes/bug-2097522-netapp-delete-vlan-even-if-ipspace-is-reused-5bc8b49ad6f91eb7.yaml releasenotes/notes/bug-2098083-Pass-on-port-delete-not-found-error-5acafa7a7810a210.yaml releasenotes/notes/bug-2099273-stop-overriding-cephfs-nfs-protocols-cf7e3949f688ad6f.yaml releasenotes/notes/bug-2100829-dell-powerscale-http-auth-38831162175686c4.yaml releasenotes/notes/bug-2102673-fix-shares-stuck-in-ensuring-190ce5519c33baad.yaml releasenotes/notes/bug-2104357-Fix-server_migrating-status-of-non-active-replica-6af28a67a4684d16.yaml releasenotes/notes/bug-2106382-NetApp-Preserve-custom-snapshot-policies-d3bd010a12325506.yaml releasenotes/notes/bug-2111918-netapp-fix-rentention-period-rest-api-e73b358ccc6e7b37.yaml releasenotes/notes/bug-2114260-Add-guard-to-qualified_replica-e02bd4b21acee803.yaml releasenotes/notes/bug-2114969-netapp-guard-already-existing-cifs-access-df01145c6782e880.yaml releasenotes/notes/bug-2120176-Handle-Neuton-Subnet-Full-Exception-9cb634909f0dc716.yaml releasenotes/notes/bug-2120291-netapp-rest-basic-share-creation-failure-fixes-f768ba19f3157db4.yaml releasenotes/notes/bug-2120650-enforce-policy-check-for-share-snapshots-retrieval-b4c66a3e90bd38af.yaml releasenotes/notes/bug-2121141-fixing-cifs-share-creation-through-netapp-legacy-client-4c3868fca9de2df2.yaml releasenotes/notes/bug-667744-fix-c64071e6e5a098f7.yaml releasenotes/notes/bug_1564623_change-e286060a27b02f64.yaml releasenotes/notes/bug_1582931-1437eae20fa544d1.yaml releasenotes/notes/bug_1844046-fix-image-not-found-629415d50cd6042a.yaml releasenotes/notes/bugfix-1771958-1771970-bcec841e7ae6b9f6.yaml releasenotes/notes/cephfs-add-nfs-protocol-support-44764094c9d784d8.yaml releasenotes/notes/cephfs-fix-export-ip-escaping-on-hostname-e2866be32a8f5e38.yaml releasenotes/notes/cephfs-native-add-readonly-shares-support-067ccab0217ab5f5.yaml releasenotes/notes/cephfs-native-enhance-update-access-support-e1a1258084c997ca.yaml releasenotes/notes/cephfs-native-fix-evict-c45fd2de8f520757.yaml releasenotes/notes/cephfs-nfs-ipv6-support-2ffd9c0448c2f47e.yaml releasenotes/notes/cephfs-set-mode-b7fb3ec51300c220.yaml releasenotes/notes/cephfs-snapshots-enabled-4886147664270c32.yaml releasenotes/notes/cephfs-support-statement-for-victoria-and-beyond-e94baa7857b1624c.yaml releasenotes/notes/change_user_project_length-93cc8d1c32926e75.yaml releasenotes/notes/check-thin-provisioning-4bb702535f6b10b6.yaml releasenotes/notes/clean-expired-messages-6161094d0c108aa7.yaml releasenotes/notes/config-for-cephfs-volume-prefix-67f2513f603cb614.yaml releasenotes/notes/container-add-share-server-migration-1f4509ade926aec6.yaml releasenotes/notes/container-driver-5d972cc40e314663.yaml releasenotes/notes/container-driver-hardening-against-races-30c9f517a6392b9d.yaml releasenotes/notes/container-manage-unmanage-share-servers-880d889828ee7ce3.yaml releasenotes/notes/container-multiple-subnets-per-az-702aad41d6f91b59.yaml releasenotes/notes/dedupe-support-hnas-driver-017d2f2a93a8b487.yaml releasenotes/notes/delete_vlan_on_vserver_delete-a7acd145c0b8236d.yaml releasenotes/notes/dell-emc-unity-use-user-capacity-322f8bbb7c536453.yaml releasenotes/notes/dellemc-fix-capacity-report-25f75a6c96e12b40.yaml releasenotes/notes/deprecate-ceph-nfs-protocol-helper-ba5ec5095d9eaca7.yaml releasenotes/notes/deprecate-ceph-nfs-protocol-helper-options-bacaf4565478e38f.yaml releasenotes/notes/deprecate-dellemc-vnx-20c9daec7aec541c.yaml releasenotes/notes/deprecate-glustefs-8e0c863aaa58eb6b.yaml releasenotes/notes/deprecate-json-formatted-policy-file-fd0345f215e6ccd5.yaml releasenotes/notes/deprecate-memcached-servers-config-option-f4456382b9b4d6db.yaml releasenotes/notes/deprecate-old-ks-opts-in-nova-neutron-cinder-groups-e395015088d93fdc.yaml releasenotes/notes/deprecate-service-instance-network-helper-option-82ff62a038f2bfa3.yaml releasenotes/notes/deprecate-tegile-04c7466e29570ad5.yaml releasenotes/notes/deprecate-use_forwarded_for-2b47e9b63d5f2fc1.yaml releasenotes/notes/deprecate-windows-smb-654983cf22856e31.yaml releasenotes/notes/disable-share-groups-api-by-default-0627b97ac2cda4cb.yaml releasenotes/notes/do-not-create-default-route-66ff4199b60e35c7.yaml releasenotes/notes/driver-filter-91e2c60c9d1a48dd.yaml releasenotes/notes/drop-python-3-6-and-3-7-924b62070c5b60ff.yaml releasenotes/notes/drop-python2-support-e160ff36811a5964.yaml releasenotes/notes/drop-support-for-lvm-share-export-ip-e031ef4c5f95b534.yaml releasenotes/notes/emc-unity-manila-support-d4f5a410501cfdae.yaml releasenotes/notes/emc_vnx_interface_ports_configuration-00d454b3003ef981.yaml releasenotes/notes/enable-enforce-scope-c2d57db049741896.yaml releasenotes/notes/enhance-ensure-share-58fc14ffc099f481.yaml releasenotes/notes/error-share-set-size-ff5d4f4ac2d56755.yaml releasenotes/notes/estimate-provisioned-capacity-34f0d2d7c6c56621.yaml releasenotes/notes/extra_specs_case_insensitive-e9d4ca10d94f2307.yaml releasenotes/notes/feature-certificate-based-authentication-for-netapp-drivers-e5163559d2335643.yaml releasenotes/notes/fix-1870280-share-type-user-message-902275047410bdbf.yaml releasenotes/notes/fix-consistency-groups-api-dd9b5b99138e22eb.yaml releasenotes/notes/fix-creating-from-snapshot-server-limit-7b575c7cbf081efc.yaml releasenotes/notes/fix-ganesha-allow-access-for-all-ips-09773a79dc76ad44.yaml releasenotes/notes/fix-generic-driver-resize-0fde9c8674db5951.yaml releasenotes/notes/fix-generic-driver-using-uuid-to-mount-volumes-291208b283120224.yaml releasenotes/notes/fix-hds-hnas-unconfined-09b79f3bdb24a83c.yaml releasenotes/notes/fix-hnas-mount-on-manage-snapshot-91e094c579ddf1a3.yaml releasenotes/notes/fix-huawei-driver-cifs-mount-issue-2d7bff5a7e6e3ad6.yaml releasenotes/notes/fix-huawei-driver-qos-deletion-9ad62db3d7415980.yaml releasenotes/notes/fix-huawei-exception-a09b73234ksd94kd.yaml releasenotes/notes/fix-issue-about-provisioned_capaciti_gb-is_None-d700c0544a8a88e8.yaml releasenotes/notes/fix-managing-twice-hnas-4956a7653d27e320.yaml releasenotes/notes/fix-netApp-drivers-share-server-migration-is-failing-eee991ccbab4cd5a.yaml releasenotes/notes/fix-netapp-manage-snapshot-f6ed571bd4f9a2ac.yaml releasenotes/notes/fix-neutron-plugin-invalid-key-dict-68c3d6bcbf2f19f0.yaml releasenotes/notes/fix-no-router-server-0d5bf587063f22fc.yaml releasenotes/notes/fix-py3-netapp-a9815186ddc865d4.yaml releasenotes/notes/fix-race-condition-netapp-5a36f6ba95a49c5e.yaml releasenotes/notes/fix-share-instance-list-with-limit-db7b5b99138e22ee.yaml releasenotes/notes/fix-share-manager-shrinking-data-loss-state-edc87ba2fd7e32d8.yaml releasenotes/notes/fix-verser-client-in-case-vserver-not-present-92b8a710a08f90e6.yaml releasenotes/notes/fix-volume-efficiency-status-2102ad630c5407a8.yaml releasenotes/notes/fix_access_level_managed_shares_hnas-c76a09beed365b46.yaml releasenotes/notes/fix_cephx_validation-cba4df77f9f45c6e.yaml releasenotes/notes/fix_create_from_snapshot-5d8e470b46aac67d.yaml releasenotes/notes/fix_limit_formating_routes-1b0e1a475de6ac44.yaml releasenotes/notes/fix_manage_snapshots_hnas-2c0e1a47b5e6ac33.yaml releasenotes/notes/fix_policy_file-4a382ac241c718c6.yaml releasenotes/notes/fix_share_server_api_2025649-f818f594e97c59e6.yaml releasenotes/notes/fix_share_server_manager_match_id-276202295539dc0f.yaml releasenotes/notes/fixed-netapp-cdot-autosupport-3fabd8ac2e407f70.yaml releasenotes/notes/fixing-driver-filter-14022294c8c04d2d.yaml releasenotes/notes/ganesha-dynamic-update-access-be80bd1cb785e733.yaml releasenotes/notes/ganesha-store-exports-and-export-counter-in-ceph-rados-052b925f8ea460f4.yaml releasenotes/notes/generic-driver-noop-interface-driver-24abcf7af1e08ff9.yaml releasenotes/notes/generic-route-racing-adf92d212f1ab4de.yaml releasenotes/notes/glusterfs-add-directory-layout-extend-shrink-fd2a008f152edbf5.yaml releasenotes/notes/glusterfs-handle-new-volume-option-xml-schema-dad06253453c572c.yaml releasenotes/notes/gpfs-nfs-server-type-default-value-change-58890adba373737c.yaml releasenotes/notes/graduate-share-groups-feature-5f751b49ccc62969.yaml releasenotes/notes/graduate-share-replication-feature-17aec111b6c5bf0f.yaml releasenotes/notes/guru-meditation-support-7872da69f529a6c2.yaml releasenotes/notes/hitachi-driver-cifs-user-support-3f1a8b894fe3e9bb.yaml releasenotes/notes/hnas-driver-rename-7ef74fe720f7e04b.yaml releasenotes/notes/hnas-manage-unmanage-snapshot-support-0d939e1764c9ebb9.yaml releasenotes/notes/hnas-mountable-snapshots-4fbffa05656112c4.yaml releasenotes/notes/hnas-revert-to-snapshot-a2405cd6653b1e85.yaml releasenotes/notes/hnas_allow_managed_fix-4ec7794e2035d3f2.yaml releasenotes/notes/hostonly-filter-1a17a70dd0aafb86.yaml releasenotes/notes/hpe3par-rw-snapshot-shares-f7c33b4bf528bf00.yaml releasenotes/notes/hsp-driver-e00aff5bc89d4b54.yaml releasenotes/notes/huawei-driver-replication-8ed62c8d26ad5060.yaml releasenotes/notes/huawei-driver-sectorsize-config-da776132ba6da2a7.yaml releasenotes/notes/huawei-driver-support-snapshot-revert-1208c586bd8db98e.yaml releasenotes/notes/huawei-pool-disktype-support-0a52ba5d44da55f9.yaml releasenotes/notes/huawei-support-access-all-ip-4994c10ff75ac683.yaml releasenotes/notes/human-readable-export-location-share-support-a72cd2f0e92c41c7.yaml releasenotes/notes/hybrid-aggregates-in-netapp-cdot-drivers-e7c90fb62426c281.yaml releasenotes/notes/ibm-gpfs-ces-support-3498e35d9fea1b55.yaml releasenotes/notes/ibm-gpfs-manage-support-c110120c350728e3.yaml releasenotes/notes/infinidat-add-infinibox-driver-ec652258e710d6a0.yaml releasenotes/notes/infinidat-balance-network-spaces-ips-25a9f1e587b87156.yaml releasenotes/notes/infinidat-delete-datasets-with-snapshots-4d18f8c197918606.yaml releasenotes/notes/infortrend-manila-driver-a1a2af20de6368cb.yaml releasenotes/notes/inspur-as13000-driver-41f6b7caea82e46e.yaml releasenotes/notes/inspur-instorage-driver-51d7a67f253f3ecd.yaml releasenotes/notes/inspur-support-rwx-for-cifs-permission-4279f1fe7a59fd00.yaml releasenotes/notes/introduce-tooz-library-5fed75b8caffcf42.yaml releasenotes/notes/limiting-ssh-access-from-tenant-network-6519efd6d6895076.yaml releasenotes/notes/lv-mounting-inside-containers-af8f84d1fab256d1.yaml releasenotes/notes/lvm-export-ips-5f73f30df94381d3.yaml releasenotes/notes/macrosan-add-configuration-option-282fa1026748c4f9.yaml releasenotes/notes/macrosan-manila-driver-4644ed2cdd51b030.yaml releasenotes/notes/manage-share-in-zfsonlinux-driver-e80921081206f75b.yaml releasenotes/notes/manage-share-snapshot-in-huawei-driver-007b2c763fbdf480.yaml releasenotes/notes/manage-snapshot-in-zfsonlinux-driver-6478d8d5b3c6a97f.yaml releasenotes/notes/manage-unmanage-replicated-share-fa90ce34372b6df5.yaml releasenotes/notes/manage-unmanage-share-servers-cd4a6523d8e9fbdf.yaml releasenotes/notes/manage-unmanage-snapshot-bd92164472638f44.yaml releasenotes/notes/manage-unmanage-snapshot-in-netapp-cdot-driver-5cb4b1619c39625a.yaml releasenotes/notes/manila-netapp-storage-efficiency-policy-5fa0b2b15901bf93.yaml releasenotes/notes/manila-share-support-recycle-bin-1cc7859affaf8887.yaml releasenotes/notes/manila-status-upgrade-check-framework-aef9b5cf9d8e3bda.yaml releasenotes/notes/manila-wsgi-debug-log-opts-691a7647655b4778.yaml releasenotes/notes/maprfs-manila-drivers-1541296f26cf78fd.yaml releasenotes/notes/max-share-extend-size-on-type-0528be9a5c27678b.yaml releasenotes/notes/migration-access-fix-71a0f52ea7a152a3.yaml releasenotes/notes/migration-empty-files-01d1a3caa2e9705e.yaml releasenotes/notes/migration-share-type-98e3d3c4c6f47bd9.yaml releasenotes/notes/min-max-share-size-on-type-a7c69046e5c57944.yaml releasenotes/notes/mount-volume-path-ff3c3f83039e1a3f.yaml releasenotes/notes/move-emc-share-driver-to-dell-emc-dir-1ec34dee0544270d.yaml releasenotes/notes/multi-segment-support-fa171a8e3201d54e.yaml releasenotes/notes/netapp-add-async-snapmirror-schedule-config-54a33647735f751c.yaml releasenotes/notes/netapp-add-extra-spec-max_files_multiplier-b74692c0d54f4678.yaml releasenotes/notes/netapp-add-fpolicy-support-dd31628a1c8e64d6.yaml releasenotes/notes/netapp-add-migration-through-svm-migrate-c1e29fce19758324.yaml releasenotes/notes/netapp-add-new-security-certificate-for-vserver-aba543211ae6b811.yaml releasenotes/notes/netapp-add-security-service-update-718a68ebe60fd2b5.yaml releasenotes/notes/netapp-add-share-server-migration-663f7ced1ef93558.yaml releasenotes/notes/netapp-add-support-for-adaptive-qos-d036238e7f29cf75.yaml releasenotes/notes/netapp-add-support-for-logical-space-reporting-bb3b582f162664c8.yaml releasenotes/notes/netapp-add-update-from-network-subnet-metadata-method-0615490d86958c3d.yaml releasenotes/notes/netapp-add-update-share-from-metadata-method-71f308c2b05d59bb.yaml releasenotes/notes/netapp-barbican-share-encryption-support-46ec89c70557e526.yaml releasenotes/notes/netapp-bug-2061976-only-modify-qos-policy-if-troughput-changed-ce1b56a3bb3c3d78.yaml releasenotes/notes/netapp-cdot-add-max-over-subscription-ratio-pool-stats-eea763b3b9b3ba7d.yaml releasenotes/notes/netapp-cdot-apply-mtu-from-network-provider-d12179a2374cdda0.yaml releasenotes/notes/netapp-cdot-clone-split-control-a68b5fc80f1fc368.yaml releasenotes/notes/netapp-cdot-configure-nfs-versions-83e3f319c4592c39.yaml releasenotes/notes/netapp-cdot-multi-svm-configure-nfs-95c9154e1aa28751.yaml releasenotes/notes/netapp-cdot-optimized-migration-within-share-server-92cfa1bcf0c317fc.yaml releasenotes/notes/netapp-cdot-quality-of-service-limits-c1fe8601d00cb5a8.yaml releasenotes/notes/netapp-cdot-ss-multiple-dns-ip-df42a217977ce44d.yaml releasenotes/notes/netapp-cdot-switch-volume-efficiency-bd22733445d146f0.yaml releasenotes/notes/netapp-cdot-use-security-service-ou-4dc5835c9e00ad9d.yaml releasenotes/notes/netapp-check-snapshot-after-creation-1b59038a3a117c65.yaml releasenotes/notes/netapp-consider-last-transfer-size-error-for-replica-state-7ef49186a1b8a5a0.yaml releasenotes/notes/netapp-create-share-from-snapshot-another-pool-330639b57aa5f04d.yaml releasenotes/notes/netapp-default-ipv6-route-13a9fd4959928524.yaml releasenotes/notes/netapp-delay-clone-split-after-share-creation-fbae159d988fe2a0.yaml releasenotes/notes/netapp-enable-aes-encryption-for-cifs-a1f98e5cb0010ea0.yaml releasenotes/notes/netapp-fix-export-location-for-readable-replica-promote-8e0c4be5f1966e53.yaml releasenotes/notes/netapp-flexgroup-support-9b3f30afb94d3a86.yaml releasenotes/notes/netapp-human-readable-export-location-support-b04af9f5054ad541.yaml releasenotes/notes/netapp-ipv6-support-f448e99a7c112362.yaml releasenotes/notes/netapp-manage-unmanage-share-servers-635496b46e306920.yaml releasenotes/notes/netapp-multiple-subnets-support-274a37c5ddb43ca1.yaml releasenotes/notes/netapp-ontap-rest-api-client-4c83c7b931f950cf.yaml releasenotes/notes/netapp-readable-replica-cb7d5460ad7b3b0e.yaml releasenotes/notes/netapp-remove-last-transfer-size-limit-check-ec66035ff30ad70b.yaml releasenotes/notes/netapp-replication-dhss-true-5b2887de8e9a2cb5.yaml releasenotes/notes/netapp-restrict-lif-creation-per-ha-pair-249021556be5189d.yaml releasenotes/notes/netapp-retry-requests-0a77a31f5222d4b2.yaml releasenotes/notes/netapp-set-discovery-mode-to-none-d66b2125a8d12740.yaml releasenotes/notes/netapp-stop-clone-split-during-share-delete-720456e55031ef65.yaml releasenotes/notes/netapp-support-filtering-api-tracing-02d1f4271f44d24c.yaml releasenotes/notes/netapp-svm-get-progress-596cd387c66dea1b.yaml releasenotes/notes/netapp-use-default-ad-site-security-service-55748f54c2390fad.yaml releasenotes/notes/netapp_cdot_performance_utilization-aff1b498a159470e.yaml releasenotes/notes/netapp_configure_net_with_metadata-c5d1b5f542967276.yaml releasenotes/notes/neutron-binding-driver-43f01565051b031b.yaml releasenotes/notes/new-config-option-dhss-4931db193fd76656.yaml releasenotes/notes/new-secure-rbac-defaults-in-wallaby-13c0583afdfcfcc7.yaml releasenotes/notes/newton-migration-improvements-cf9d3d6e37e19c94.yaml releasenotes/notes/nexenta-manila-drivers-cbd0b376a076ec50.yaml releasenotes/notes/nexentastor5-v1.1-1ad6c8f7b5cc11b6.yaml releasenotes/notes/optimize-deferred-deletion-get-share-instance-query-b6366b7c3b0a64db.yaml releasenotes/notes/per-backend-az-590c68be0e2cb4bd.yaml releasenotes/notes/powermax-rebrand-manila-a46a0c2ac0aa77ed.yaml releasenotes/notes/privsep-migration-846819fdb181d83a.yaml releasenotes/notes/pure_antelope_bump-6b3bd1b35f632aee.yaml releasenotes/notes/pure_version_bump-2f1280f16391f6f9.yaml releasenotes/notes/qb-bug-1733807-581e71e6581de28e.yaml releasenotes/notes/qnap-enhance-support-53848fda525b7ea4.yaml releasenotes/notes/qnap-fix-manage-snapshot-not-exist-4b111982ddc5fdae.yaml releasenotes/notes/qnap-fix-share-and-snapshot-inconsistant-bd628c6e14eeab14.yaml releasenotes/notes/qnap-manila-driver-a30fe4011cb90801.yaml releasenotes/notes/qnap-support-qes-200-639f3ad70687023d.yaml releasenotes/notes/qnap-support-qes-210-8775e6c210f3ca9f.yaml releasenotes/notes/qnap-tds-support-qes-24704313a0881c8c.yaml releasenotes/notes/remove-AllocType-from-huawei-driver-8b279802f36efb00.yaml releasenotes/notes/remove-confusing-deprecation-warnings-a17c20d8973ef2bb.yaml releasenotes/notes/remove-deprecated-default-options-00fed1238fb6dca0.yaml releasenotes/notes/remove-deprecated-options-and-auth-4d497e03ad47e872.yaml releasenotes/notes/remove-deprecated-public-share-creation-policies-051d59249e556b44.yaml releasenotes/notes/remove-deprecated-size-limiter-9d7c8ab69cf85aea.yaml releasenotes/notes/remove-host-field-from-shares-and-replicas-a087f85bc4a4ba45.yaml releasenotes/notes/remove-intree-tempest-plugin-9fcf6edbeba47cba.yaml releasenotes/notes/remove-manila-wsgi-script-07f9a4d00a165f4c.yaml releasenotes/notes/remove-nova-net-support-from-service-instance-module-dd7559803fa01d45.yaml releasenotes/notes/remove-nova-network-support-f5bcb8b2fcd38581.yaml releasenotes/notes/remove-old-notif-drivers-ea9f3837c8e82a41.yaml releasenotes/notes/remove-os-region-name-82e3cd4c7fb05ff4.yaml releasenotes/notes/remove-py38-5c619aee267bc1f3.yaml releasenotes/notes/remove-py39-dec4a09ae03e3583.yaml releasenotes/notes/remove-root-helper-config-option-fd517b0603031afa.yaml releasenotes/notes/remove-standalone-network-plugin-ip-version-440ebcf27ffd22f8.yaml releasenotes/notes/remove-vmax-a3d97ba80ced4895.yaml releasenotes/notes/rename-cephfs-native-driver-3d9b4e3c6c78ee98.yaml releasenotes/notes/report-PortLimitExceeded-error-to-customer-528990dc9574688f.yaml releasenotes/notes/reset_tap_device_after_node_restart-0690a6beca077b95.yaml releasenotes/notes/revert-switch-to-use-glanceclient-bc462a5477d6b8cb.yaml releasenotes/notes/rules-for-managed-share-f28a26ffc980f6fb.yaml releasenotes/notes/scheduler_hints_share_replica-ffeed5cf9adeddff.yaml releasenotes/notes/server-migration-with-network-extension-7433a5c38c8278e4.yaml releasenotes/notes/share-backup-d5f68ba6f9aef776.yaml releasenotes/notes/share-backup-netapp-driver-8bbcf3fbc1d20614.yaml releasenotes/notes/share-backup-netapp-driver-core-6a2328756b14f541.yaml releasenotes/notes/share-backup-out-of-place-restore-1db334d8a22be3fa.yaml releasenotes/notes/share-mount-snapshots-b52bf3433d1e7afb.yaml releasenotes/notes/share-network-with-multiple-subnets-a56be8b646b9e463.yaml releasenotes/notes/share-replication-81ecf4a32a5c83b6.yaml releasenotes/notes/share-revert-to-snapshot-3d028fa00620651e.yaml releasenotes/notes/share-revert-to-snapshot-in-netapp-cdot-driver-37f645ec3c14313c.yaml releasenotes/notes/share-server-delete-failure-ca29d6b286a2c790.yaml releasenotes/notes/snaplock-support-for-netapp-driver-9b639386c07c4990.yaml releasenotes/notes/snapshot-force-delete-4432bebfb5a0bbc9.yaml releasenotes/notes/support-ipv6-in-drivers-and-network-plugins-1833121513edb13d.yaml releasenotes/notes/support-qes-114-5881c0ff0e7da512.yaml releasenotes/notes/switch-to-use-glanceclient-dde019b0b141caf8.yaml releasenotes/notes/unexpected-data-of-share-from-snap-134189fc0f3eeedf.yaml releasenotes/notes/unity-default-filter-function-support-2eefc8044a5add5d.yaml releasenotes/notes/unity-drvier-support-1gb-share-48f032dff8a6a789.yaml releasenotes/notes/unity-manage-server-share-snapshot-support-6a0bbbed74da13c7.yaml releasenotes/notes/unity-manila-ipv6-support-dd9bcf23064baceb.yaml releasenotes/notes/unity-revert-to-snapshot-support-1cffc3914982003d.yaml releasenotes/notes/unity-shrink-share-support-cc748daebfe8f562.yaml releasenotes/notes/unity-un-handles-share-server-mode-support-e179c092ab148948.yaml releasenotes/notes/unity-vnx-rename-options-1656168dd4bdba70.yaml releasenotes/notes/use-cephadm-nfs-ganesha-b9e071924de738fd.yaml releasenotes/notes/use-oslo-logging-for-config-options-388da64bb4ce45db.yaml releasenotes/notes/use-tooz-heartbeat-c6aa7e15444e63c3.yaml releasenotes/notes/user-messages-api-589ee7d68ccba70c.yaml releasenotes/notes/vastdata-add-api-token-based-auth-f6ee3fdce1ba6450.yaml releasenotes/notes/veritas-access-manila-driver-d75558c01ce6d428.yaml releasenotes/notes/vlan-enhancement-in-unity-driver-0f1d972f2f6d00d9.yaml releasenotes/notes/vmax-manila-support-7c655fc094c09367.yaml releasenotes/notes/vmax-rename-options-44d8123d14a23f94.yaml releasenotes/notes/vnx-manila-ipv6-support-9ae986431549cc63.yaml releasenotes/notes/vnx-ssl-verification-2d26a24e7e73bf81.yaml releasenotes/notes/windows-smb-fix-default-access-d4b9eee899e400a0.yaml releasenotes/notes/x-openstack-request-id-2f26a301f3109689.yaml releasenotes/notes/zadara-manila-driver-cb22b647e60f7ab8.yaml releasenotes/notes/zed-secure-rbac-direction-change-2329bbf442b9a2da.yaml releasenotes/notes/zfsonlinux-driver-improvement-create-share-from-snapshot-another-backend-44296f572681be35.yaml releasenotes/notes/zfssa-driver-add-share-manage-unmanage-9bd6d2e25cc86c35.yaml releasenotes/source/2023.1.rst releasenotes/source/2023.2.rst releasenotes/source/2024.1.rst releasenotes/source/2024.2.rst releasenotes/source/2025.1.rst releasenotes/source/conf.py releasenotes/source/index.rst releasenotes/source/liberty.rst releasenotes/source/mitaka.rst releasenotes/source/newton.rst releasenotes/source/ocata.rst releasenotes/source/pike.rst releasenotes/source/queens.rst releasenotes/source/rocky.rst releasenotes/source/stein.rst releasenotes/source/train.rst releasenotes/source/unreleased.rst releasenotes/source/ussuri.rst releasenotes/source/victoria.rst releasenotes/source/wallaby.rst releasenotes/source/xena.rst releasenotes/source/yoga.rst releasenotes/source/zed.rst releasenotes/source/_static/.placeholder releasenotes/source/_templates/.placeholder releasenotes/source/locale/de/LC_MESSAGES/releasenotes.po releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po releasenotes/source/locale/es/LC_MESSAGES/releasenotes.po releasenotes/source/locale/fr/LC_MESSAGES/releasenotes.po tools/coding-checks.sh tools/cover.sh tools/enable-pre-commit-hook.sh tools/fast8.sh tools/install_venv.py tools/install_venv_common.py tools/test-setup.sh tools/validate-json-files.py tools/with_venv.sh zuul.d/grenade-jobs.yaml zuul.d/project.yaml././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315601.0 manila-21.0.0/manila.egg-info/dependency_links.txt0000664000175000017500000000000100000000000022057 0ustar00zuulzuul00000000000000 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315601.0 manila-21.0.0/manila.egg-info/entry_points.txt0000664000175000017500000000522300000000000021311 0ustar00zuulzuul00000000000000[console_scripts] manila-api = manila.cmd.api:main manila-data = manila.cmd.data:main manila-manage = manila.cmd.manage:main manila-rootwrap = oslo_rootwrap.cmd:main manila-scheduler = manila.cmd.scheduler:main manila-share = manila.cmd.share:main manila-status = manila.cmd.status:main [manila.scheduler.filters] AffinityFilter = manila.scheduler.filters.affinity:AffinityFilter AntiAffinityFilter = manila.scheduler.filters.affinity:AntiAffinityFilter AvailabilityZoneFilter = manila.scheduler.filters.availability_zone:AvailabilityZoneFilter CapabilitiesFilter = manila.scheduler.filters.capabilities:CapabilitiesFilter CapacityFilter = manila.scheduler.filters.capacity:CapacityFilter ConsistentSnapshotFilter = manila.scheduler.filters.share_group_filters.consistent_snapshot:ConsistentSnapshotFilter CreateFromSnapshotFilter = manila.scheduler.filters.create_from_snapshot:CreateFromSnapshotFilter DriverFilter = manila.scheduler.filters.driver:DriverFilter IgnoreAttemptedHostsFilter = manila.scheduler.filters.ignore_attempted_hosts:IgnoreAttemptedHostsFilter JsonFilter = manila.scheduler.filters.json:JsonFilter OnlyHostFilter = manila.scheduler.filters.host:OnlyHostFilter RetryFilter = manila.scheduler.filters.retry:RetryFilter ShareReplicationFilter = manila.scheduler.filters.share_replication:ShareReplicationFilter [manila.scheduler.weighers] CapacityWeigher = manila.scheduler.weighers.capacity:CapacityWeigher GoodnessWeigher = manila.scheduler.weighers.goodness:GoodnessWeigher HostAffinityWeigher = manila.scheduler.weighers.host_affinity:HostAffinityWeigher NetAppAIQWeigher = manila.scheduler.weighers.netapp_aiq:NetAppAIQWeigher PoolWeigher = manila.scheduler.weighers.pool:PoolWeigher [manila.share.drivers.dell_emc.plugins] powerflex = manila.share.drivers.dell_emc.plugins.powerflex.connection:PowerFlexStorageConnection powermax = manila.share.drivers.dell_emc.plugins.powermax.connection:PowerMaxStorageConnection powerscale = manila.share.drivers.dell_emc.plugins.powerscale.powerscale:PowerScaleStorageConnection powerstore = manila.share.drivers.dell_emc.plugins.powerstore.connection:PowerStoreStorageConnection unity = manila.share.drivers.dell_emc.plugins.unity.connection:UnityStorageConnection vnx = manila.share.drivers.dell_emc.plugins.vnx.connection:VNXStorageConnection [manila.tests.scheduler.fakes] FakeWeigher1 = manila.tests.scheduler.fakes:FakeWeigher1 FakeWeigher2 = manila.tests.scheduler.fakes:FakeWeigher2 [oslo.config.opts] manila = manila.opts:list_opts [oslo.config.opts.defaults] manila = manila.common.config:set_lib_defaults [oslo.policy.enforcer] manila = manila.policy:get_enforcer [oslo.policy.policies] manila = manila.policies:list_rules ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315600.0 manila-21.0.0/manila.egg-info/not-zip-safe0000664000175000017500000000000100000000000020237 0ustar00zuulzuul00000000000000 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315601.0 manila-21.0.0/manila.egg-info/pbr.json0000664000175000017500000000006000000000000017463 0ustar00zuulzuul00000000000000{"git_version": "1e090676e", "is_release": true}././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315601.0 manila-21.0.0/manila.egg-info/requires.txt0000664000175000017500000000152400000000000020413 0ustar00zuulzuul00000000000000pbr>=5.5.0 alembic>=1.4.2 castellan>=3.7.0 defusedxml>=0.7.1 eventlet>=0.27.0 greenlet>=0.4.16 lxml>=4.5.2 netaddr>=0.8.0 oslo.config>=8.3.2 oslo.context>=3.1.1 oslo.db>=8.4.0 oslo.i18n>=5.0.1 oslo.log>=4.4.0 oslo.messaging>=14.1.0 oslo.middleware>=4.1.1 oslo.policy>=4.5.0 oslo.privsep>=2.4.0 oslo.reports>=2.2.0 oslo.rootwrap>=6.2.0 oslo.serialization>=4.0.1 oslo.service>=2.4.0 oslo.upgradecheck>=1.3.0 oslo.utils>=7.0.0 oslo.concurrency>=4.3.0 osprofiler>=3.4.0 paramiko>=2.7.2 Paste>=3.4.3 PasteDeploy>=2.1.0 pyparsing>=2.4.7 python-neutronclient>=6.7.0 keystoneauth1>=4.2.1 keystonemiddleware>=9.1.0 requests>=2.23.0 tenacity>=6.3.1 Routes>=2.4.1 SQLAlchemy>=1.4.0 SQLAlchemy-Utils>=0.38.3 stevedore>=3.2.2 tooz>=2.7.1 python-cinderclient>=4.0.1 python-novaclient>=17.2.1 python-glanceclient>=3.2.2 WebOb>=1.8.6 cachetools>=4.2.1 packaging>=20.9 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315601.0 manila-21.0.0/manila.egg-info/top_level.txt0000664000175000017500000000000700000000000020540 0ustar00zuulzuul00000000000000manila ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315601.6856747 manila-21.0.0/playbooks/0000775000175000017500000000000000000000000015061 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003200000000000011450 xustar000000000000000026 mtime=1759315602.04967 manila-21.0.0/playbooks/manila-tox-genconfig/0000775000175000017500000000000000000000000021067 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/playbooks/manila-tox-genconfig/post.yaml0000664000175000017500000000043700000000000022744 0ustar00zuulzuul00000000000000- hosts: all roles: - role: fetch-tox-output tasks: - name: Copy generated config sample file synchronize: src: "{{ zuul.project.src_dir }}/etc/manila/manila.conf.sample" dest: "{{ zuul.executor.log_root }}" mode: pull verify_host: true ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/pyproject.toml0000664000175000017500000000010500000000000015766 0ustar00zuulzuul00000000000000[build-system] requires = ["pbr>=6.1.1"] build-backend = "pbr.build" ././@PaxHeader0000000000000000000000000000003200000000000011450 xustar000000000000000026 mtime=1759315602.04967 manila-21.0.0/rally-jobs/0000775000175000017500000000000000000000000015134 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/rally-jobs/rally-manila-no-ss.yaml0000664000175000017500000000354200000000000021443 0ustar00zuulzuul00000000000000--- Dummy.openstack: - description: "Check quotas context" runner: type: "constant" times: 1 concurrency: 1 context: users: tenants: 1 users_per_tenant: 1 quotas: manila: shares: -1 gigabytes: -1 snapshots: -1 snapshot_gigabytes: -1 share_networks: -1 ManilaShares.list_shares: - args: detailed: True runner: type: "constant" times: 10 concurrency: 10 context: users: tenants: 1 users_per_tenant: 1 sla: failure_rate: max: 0 {% for s in ("create_and_delete_share", "create_and_list_share") %} ManilaShares.{{s}}: - args: share_proto: "nfs" size: 1 share_type: "dhss_false" min_sleep: 1 max_sleep: 2 runner: type: "constant" times: 10 concurrency: 10 context: quotas: manila: shares: -1 gigabytes: -1 users: tenants: 2 users_per_tenant: 1 sla: failure_rate: max: 0 {% endfor %} ManilaShares.set_and_delete_metadata: - args: sets: 1 set_size: 3 delete_size: 3 key_min_length: 1 key_max_length: 256 value_min_length: 1 value_max_length: 1024 runner: type: "constant" times: 10 concurrency: 10 context: quotas: manila: shares: -1 gigabytes: -1 users: tenants: 1 users_per_tenant: 1 manila_shares: shares_per_tenant: 1 share_proto: "NFS" size: 1 share_type: "dhss_false" sla: failure_rate: max: 0 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/rally-jobs/rally-manila.yaml0000664000175000017500000001022700000000000020404 0ustar00zuulzuul00000000000000--- Dummy.openstack: - description: "Check quotas context" runner: type: "constant" times: 1 concurrency: 1 context: users: tenants: 1 users_per_tenant: 1 quotas: manila: shares: -1 gigabytes: -1 snapshots: -1 snapshot_gigabytes: -1 share_networks: -1 ManilaShares.list_shares: - args: detailed: True runner: type: "constant" times: 10 concurrency: 10 context: users: tenants: 3 users_per_tenant: 4 user_choice_method: "round_robin" sla: failure_rate: max: 0 {% for s in ("create_and_delete_share", "create_and_list_share") %} ManilaShares.{{s}}: - args: share_proto: "nfs" size: 1 share_type: "dhss_true" min_sleep: 1 max_sleep: 2 runner: type: "constant" times: 10 concurrency: 10 context: quotas: manila: shares: -1 gigabytes: -1 share_networks: -1 users: tenants: 2 users_per_tenant: 1 user_choice_method: "round_robin" manila_share_networks: use_share_networks: True sla: failure_rate: max: 0 {% endfor %} ManilaShares.create_share_network_and_delete: - args: name: "rally" runner: type: "constant" times: 10 concurrency: 10 context: quotas: manila: share_networks: -1 users: tenants: 2 users_per_tenant: 1 sla: failure_rate: max: 0 ManilaShares.create_share_network_and_list: - args: name: "rally" detailed: True search_opts: name: "rally" runner: type: "constant" times: 10 concurrency: 10 context: quotas: manila: share_networks: -1 users: tenants: 2 users_per_tenant: 1 sla: failure_rate: max: 0 ManilaShares.list_share_servers: - args: search_opts: {} runner: type: "constant" times: 10 concurrency: 10 sla: failure_rate: max: 0 ManilaShares.create_security_service_and_delete: {% for s in ("ldap", "kerberos", "active_directory") %} - args: security_service_type: {{s}} dns_ip: "fake_dns_ip" server: "fake-server" domain: "fake_domain" user: "fake_user" password: "fake_password" name: "fake_name" description: "fake_description" runner: type: "constant" times: 10 concurrency: 10 context: users: tenants: 1 users_per_tenant: 1 sla: failure_rate: max: 0 {% endfor %} ManilaShares.attach_security_service_to_share_network: {% for s in ("ldap", "kerberos", "active_directory") %} - args: security_service_type: {{s}} runner: type: "constant" times: 10 concurrency: 10 context: users: tenants: 1 users_per_tenant: 1 quotas: manila: share_networks: -1 sla: failure_rate: max: 0 {% endfor %} ManilaShares.set_and_delete_metadata: - args: sets: 1 set_size: 3 delete_size: 3 key_min_length: 1 key_max_length: 256 value_min_length: 1 value_max_length: 1024 runner: type: "constant" times: 10 concurrency: 10 context: quotas: manila: shares: -1 gigabytes: -1 share_networks: -1 users: tenants: 1 users_per_tenant: 1 manila_share_networks: use_share_networks: True manila_shares: shares_per_tenant: 1 share_proto: "NFS" size: 1 share_type: "dhss_true" sla: failure_rate: max: 0 ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315601.6856747 manila-21.0.0/releasenotes/0000775000175000017500000000000000000000000015547 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315602.209668 manila-21.0.0/releasenotes/notes/0000775000175000017500000000000000000000000016677 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/1741425-fix-service-image-retrieval-issue-bf7ae3e5aa9446e9.yaml0000664000175000017500000000206000000000000031421 0ustar00zuulzuul00000000000000--- upgrade: - | When using a driver with the ``service-instance`` module, ``manila.conf`` now requires a ``[glance]`` section in addition the the previously required sections for ``[neutron]``, ``[nova]``, and ``cinder`` since the glanceclient is now required as well as the clients for these other services. To generate a sample manila.conf that includes sections for all of these services run `` tox -egenconfig`` from the top of the manila source repository. fixes: - | Share creation sometimes failed with drivers that use the ``service-instance`` module (currently, the ``generic`` and ``windows smb`` because the service-instance image could not be found. The service instance module used the ``novaclient`` to discover the images, it paginates lists of images, and if there are more than 25 images the service-image may not be in the list. This fix switches to use the ``glanceclient`` -- a more direct and appropriate client for OpenStack images that is not subject to the pagination limitation. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/1841035-dellemc-unity-fix-ace-enable-error-b00281bb306d176b.yaml0000664000175000017500000000025700000000000031165 0ustar00zuulzuul00000000000000--- fixes: - | Dell EMC Unity Driver: Fixes `bug 1841035 `__ to avoid lots of error messages displayed in logs. ././@PaxHeader0000000000000000000000000000021400000000000011452 xustar0000000000000000118 path=manila-21.0.0/releasenotes/notes/1896949-netapp-fix-cifs-access-rules-promoted-replica-70b32991cc90e1ca.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/1896949-netapp-fix-cifs-access-rules-promoted-replica-70b32991cc90e0000664000175000017500000000041600000000000032150 0ustar00zuulzuul00000000000000--- fixes: - | Fixed an issue on ONTAP NetApp driver that caused access rules not to be applied to a promoted replica using CIFS protocol. Please refer to the `Launchpad bug #1896949 `_ for more details. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/1900191-netapp-fix-ssl-cert-path-option-35354c9b7a9c37e6.yaml0000664000175000017500000000217300000000000030625 0ustar00zuulzuul00000000000000--- upgrade: - Added a new config option `netapp_ssl_cert_path` for NetApp driver. This option enables the user to choose the directory with certificates of trusted CA or the CA bundle. If set to a directory, it must have been processed using the c_rehash utility supplied with OpenSSL. If not informed, it will use the Mozilla's carefully curated collection of Root Certificates for validating the trustworthiness of SSL certificates. fixes: - | Fixed an issue on ONTAP NetApp driver that was forcing the location of CA certificates for SSL verification during HTTPS requests. It adds the `netapp_ssl_cert_path` configuration, enabling the user to choose the directory with certificates of trusted CA or the CA bundle. If set to a directory, it must have been processed using the c_rehash utility supplied with OpenSSL. If not informed, it will use the Mozilla's carefully curated collection of Root Certificates for validating the trustworthiness of SSL certificates. Please refer to the `Launchpad bug #1900191 `_ for more details. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/1900469-netapp-cache-pool-status-6dc7da824b9f41c1.yaml0000664000175000017500000000073500000000000027540 0ustar00zuulzuul00000000000000--- fixes: - | In order to optimize the NetApp ONTAP driver, this patch is caching the status of driver pools and reusing for the each share server, given that the pool is not separated by share server. It adds the option `netapp_cached_aggregates_status_lifetime` for controlling the time that the cached values is considered valid. Please refer to the `Launchpad bug #1900469 `_ for more details. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/1901189-netapp-fix-kerberos-setup-357753068a5645ad.yaml0000664000175000017500000000113300000000000027441 0ustar00zuulzuul00000000000000--- fixes: - | NetApp ONTAP driver is now fixed to properly configure and clean up share servers with Kerberos security service, for clustered ONTAP 8.3 or higher. Access rules are now configured with the correct NFS authentication methods based on the security service configured in the share server. Please refer to `Launchpad Bug #1901189 `_, `Launchpad Bug #1904746 `_, and `Launchpad Bug #1907669 `_ for more details. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/3par-add-update-access-68fc12ffc099f480.yaml0000664000175000017500000000010000000000000026116 0ustar00zuulzuul00000000000000--- features: - Add update_access support to HPE 3PAR driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/3par-fix-get_vfs-driver-bootup-db6b085eb6094f5f.yaml0000664000175000017500000000055500000000000027756 0ustar00zuulzuul00000000000000--- issues: - 3parclient up to version 4.2.1 always returns only 1 VFS IP address. This may cause 3PAR driver boot up failure while validating VFS IP addresses against IP addresses configured in manila.conf. fixes: - Fixed 3PAR driver boot up failure while validating share server IP address provided in manila.conf against IP address set on array. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/3par-pool-support-fb43b368214c9eda.yaml0000664000175000017500000000050500000000000025405 0ustar00zuulzuul00000000000000 --- features: - HPE 3PAR driver now supports configuring multiple pools per backend. upgrade: - HPE 3PAR driver no longer uses hpe3par_share_ip_address option in configuration. With pool support, configuration just requires hpe3par_fpg option or optionally supply share IP address(es) along with hpe3par_fpg.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/Huawei-driver-utilize-requests-lib-67f2c4e7ae0d2efa.yaml0000664000175000017500000000056500000000000031041 0ustar00zuulzuul00000000000000--- fixes: - | For the latest Python 2.7 release, urllib uses the SSL certification while launching URL connection by default, which causes Huawei driver failed to connect backend storage because it doesn't support SSL certification. Utilize the requests lib for Huawei driver instead, and set no SSL certification for backend storage connection. ././@PaxHeader0000000000000000000000000000020700000000000011454 xustar0000000000000000113 path=manila-21.0.0/releasenotes/notes/Use-http_proxy_to_wsgi-instead-of-ssl-middleware-df533a2c2d9c3a61.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/Use-http_proxy_to_wsgi-instead-of-ssl-middleware-df533a2c2d9c3a61.y0000664000175000017500000000064200000000000033026 0ustar00zuulzuul00000000000000--- security: - http_proxy_to_wsgi is taken into use instead of the deprecated ssl middleware. This makes it easier for deployers to have Manila running behind a proxy that terminates TLS connections. This middleware addition adds the enable_proxy_headers_parsing option to the oslo_middleware section which needs to be set in the configuration file in order to enable middleware to do its work. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/add-ability-to-check-tenant-quota-usages-7fs17djahy61nsd6.yaml0000664000175000017500000000016000000000000032170 0ustar00zuulzuul00000000000000--- features: - Added detail API to show user and tenant specific usages through the quota-sets resource. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/add-access-key-to-share-access-map-2fda4c06a750e24e.yaml0000664000175000017500000000030000000000000030347 0ustar00zuulzuul00000000000000--- features: - Driver may return ``access_key``, an access credential, for client identities granted share access. - Added ``access_key`` to the JSON response of ``access_list`` API. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/add-access-visibility-and-delete-locks-52a7ef235813d147.yaml0000664000175000017500000000107700000000000031132 0ustar00zuulzuul00000000000000--- features: - | Added the possibility to lock the deletion of access rules, as well as the visibility of the sensitive fields `access_to` and `access_type` while creating share access rules. When the visibility is restricted, only the owner or more privileged users will be able to visualize the context of the sensitive fields. Both locks can also be imposed by the recently introduced resource locks APIs. - | It is now possible to filter access rules based on the `access_to`, `access_type`, `access_key` and `access_level` keys. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/add-admin-only-keys-to-share-metadata-5301424ccd9edf8a.yaml0000664000175000017500000000040000000000000031114 0ustar00zuulzuul00000000000000--- fixes: - | User specified scheduler hints such as "affinity_same_host" and "affinity_different_host" are stored as share metadata. These are stored as admin-only metadata keys that cannot be deleted or manipulated by nonadmin users. ././@PaxHeader0000000000000000000000000000024300000000000011454 xustar0000000000000000141 path=manila-21.0.0/releasenotes/notes/add-and-update-security-services-to-in-use-share-servers-on-container-driver-52193447c18e6d10.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/add-and-update-security-services-to-in-use-share-servers-on-contain0000664000175000017500000000037600000000000033536 0ustar00zuulzuul00000000000000--- features: - | The Container Driver is now able to handle LDAP security services configuration while setting up share servers. Also, the Container Driver allows adding or updating ``LDAP`` security services to in use share networks. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/add-cast-rules-to-readonly-field-62ead37b728db654.yaml0000664000175000017500000000046400000000000030134 0ustar00zuulzuul00000000000000--- features: - Improvements have been made to ensure read only rule semantics for shares and readable replicas. When invoked with administrative context, the share instance and share replica APIs will return ``cast_rules_to_readonly`` as an additional field in the detailed JSON response. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/add-cleanup-create-from-snap-hnas-0e0431f1fc861a4e.yaml0000664000175000017500000000017300000000000030234 0ustar00zuulzuul00000000000000--- fixes: - Fixed Hitachi HNAS driver not cleaning up data in backend when failing to create a share from snapshot. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/add-count-info-in-share-21a6b36c0f4c87b9.yaml0000664000175000017500000000012600000000000026311 0ustar00zuulzuul00000000000000--- features: - Added total count info in Manila's /shares and /shares/detail APIs. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/add-count-info-in-share-snapshot-eee90f1471f7a5c4.yaml0000664000175000017500000000014200000000000030232 0ustar00zuulzuul00000000000000--- features: - | Added total count info in Manila's /snapshots and /snapshots/detail APIs. ././@PaxHeader0000000000000000000000000000021500000000000011453 xustar0000000000000000119 path=manila-21.0.0/releasenotes/notes/add-create-share-from-snapshot-another-pool-or-backend-98d61fe753b85632.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/add-create-share-from-snapshot-another-pool-or-backend-98d61fe753b80000664000175000017500000000136600000000000032704 0ustar00zuulzuul00000000000000--- features: - The scheduler was improved to select and weigh compatible back ends when creating shares from snapshots. This change only affects the existing behavior if the option ``use_scheduler_creating_share_from_snapshot`` is enabled. - A new share status `creating_from_snapshot` was added to inform the user that a share creation from snapshot is in progress and may take some time to be concluded. In order to quantify the share creation progress a new field called ``progress`` was added to shares and share instances information, to indicate the conclusion percentage of share create operation (0 to 100%). fixes: - The availability zone parameter is now being considered when creating shares from snapshots. ././@PaxHeader0000000000000000000000000000021000000000000011446 xustar0000000000000000114 path=manila-21.0.0/releasenotes/notes/add-create_share_from_snapshot_support-extra-spec-9b1c3ad6796dd07d.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/add-create_share_from_snapshot_support-extra-spec-9b1c3ad6796dd07d.0000664000175000017500000000043100000000000033163 0ustar00zuulzuul00000000000000--- features: - Added optional create_share_from_snapshot_support extra spec, which was previously implied by the overloaded snapshot_support extra spec. upgrade: - The snapshot_support extra spec is now optional and has no default value set when creating share types. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/add-defaultadsite-to-security-service-e90854c1a69be581.yaml0000664000175000017500000000030300000000000031213 0ustar00zuulzuul00000000000000--- features: - | From API version 2.76, added 'default_ad_site' field to 'security_service' object. This field can not be used along-with 'server' field of the 'security_service'. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/add-details-to-migration-get-progress-df8b3f2c524db1bd.yaml0000664000175000017500000000017000000000000031411 0ustar00zuulzuul00000000000000--- features: - | Microversion 2.59 adds optional driver `details` to the response of migration get progress. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/add-ensure-shares-api-9ac10877a99ab0c5.yaml0000664000175000017500000000143100000000000026062 0ustar00zuulzuul00000000000000--- features: - | A new API to start the ensure shares procedure for Manila has been added. Through this API, OpenStack administrators will be able to recalculate the shares' export location without restarting the shares manager service. Additionally, a new configuration option named `update_shares_status_on_ensure` is now available to help OpenStack administrators determine whether the shares' status should be modified during the ensure shares procedure or not. upgrade: - | When restarting the service on an upgrade, when ensure shares is being run it will automatically transition the shares status to `ensuring`. In case you would like to prevent it, please change the value of the `update_shares_status_on_ensure` configuration option. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/add-export-location-filter-92ead37b728db654.yaml0000664000175000017500000000016000000000000027144 0ustar00zuulzuul00000000000000--- features: - It is now possible to filter shares and share instances with export location ID or path.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/add-export-locations-api-6fc6086c6a081faa.yaml0000664000175000017500000000036100000000000026665 0ustar00zuulzuul00000000000000--- features: - Added APIs for listing export locations per share and share instances. deprecations: - Removed 'export_location' and 'export_locations' attributes from share and share instance views starting with microversion '2.9'. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/add-flashblade-driver-de20b758a8ce2640.yaml0000664000175000017500000000026600000000000026106 0ustar00zuulzuul00000000000000--- features: - | Added Pure Storage FlashBlade driver. Driver supports NFS protocol. Share operations include create, delete, resize, snapshot and revert-to-snapshot. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/add-format-output-to-manila-manage-c0bbccb16369e5d3.yaml0000664000175000017500000000033700000000000030615 0ustar00zuulzuul00000000000000--- features: - | The possibility to specify the desired output format while issuing the `manila-manage service list` command was added. It is now possible to display the output also in yaml and json formats. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/add-gathering-usage-size-8454sd45deopb14e.yaml0000664000175000017500000000052400000000000026762 0ustar00zuulzuul00000000000000--- features: - Added periodic task to gather share usage size. upgrade: - Added enable_gathering_share_usage_size and share_usage_size_update_interval options in the manila.conf file to allow configuration of gathering share usage size support and to allow configuration of interval time of gathering share usage size. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/add-healthcheck-middleware-8f659afb7ee0451c.yaml0000664000175000017500000000045400000000000027200 0ustar00zuulzuul00000000000000--- features: - | The oslo.middleware /healthcheck is now activated by default in the Manila api-paste.ini. Operators can use it to configure HAproxy or the monitoring of Manila APIs. Edit the ``api-paste.ini`` file and remove any healthcheck entries to disable this functionality. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/add-hsp-default-filter-function-0af60a819faabfec.yaml0000664000175000017500000000011300000000000030333 0ustar00zuulzuul00000000000000--- fixes: - Added missing default filter function on Hitachi HSP driver.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/add-ipv6-32d89161a9a1e0b4.yaml0000664000175000017500000000016500000000000023325 0ustar00zuulzuul00000000000000--- features: - Validation of IPv6 based addresses has been added for allow access API when access type is IP. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/add-is-default-e49727d276dd9bc3.yaml0000664000175000017500000000034600000000000024604 0ustar00zuulzuul00000000000000--- features: - The share type and share group type APIs in API version 2.46 return field "is_default" which is set to 'true' if the share type or the share group type is the default as configured by the administrator. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/add-like-filter-4c1d6dc02f40d5a5.yaml0000664000175000017500000000017600000000000025005 0ustar00zuulzuul00000000000000--- features: - Added like filter support in ``shares``, ``snapshots``, ``share-networks``, ``share-groups`` list APIs. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/add-manage-db-purge-b32a24ee045d8d45.yaml0000664000175000017500000000021600000000000025452 0ustar00zuulzuul00000000000000--- features: - Add ``purge`` sub command to the ``manila-manage db`` command for administrators to be able to purge soft-deleted rows. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/add-manila-wsgi-module-ad9bb910f7f3b816.yaml0000664000175000017500000000106400000000000026305 0ustar00zuulzuul00000000000000--- features: - | A new module, ``manila.wsgi``, has been added as a place to gather WSGI ``application`` objects. This is intended to ease deployment by providing a consistent location for these objects. For example, if using uWSGI then instead of: .. code-block:: ini [uwsgi] wsgi-file = /bin/manila-wsgi You can now use: .. code-block:: ini [uwsgi] module = manila.wsgi.api:application This also simplifies deployment with other WSGI servers that expect module paths such as gunicorn. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/add-per-share-gigabytes-quotas-f495eb0b27378660.yaml0000664000175000017500000000032500000000000027552 0ustar00zuulzuul00000000000000--- features: - | 'quota_per_share_gigabytes' config option allows admin to set per share size limit for a project. The default value is -1["No Limit"] always unless changed in manila.conf by admin. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/add-perodic-task-7454sd45deopb13e.yaml0000664000175000017500000000014600000000000025323 0ustar00zuulzuul00000000000000--- features: - Added perodic task to clean up expired reservation at manila scheduler service. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/add-policy-in-code-c31a24ee045d8d21.yaml0000664000175000017500000000125200000000000025325 0ustar00zuulzuul00000000000000--- features: - Default Role Based Access Control (RBAC) policies for all the Manila APIs have moved into code from the auxiliary ``policy.json`` file. upgrade: - Removed the default ``policy.json`` file. - Operators need not maintain the ``policy.json`` file if they were not overriding default manila policies. - If Operators need to override certain RBAC policies, they can do so by creating a JSON formatted file named ``policy.json`` and populate it with the necessary overrides. This file must be placed in the config directory. The default RBAC policies are documented in the configuration reference alongside other sample configuration files.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/add-quiesce-wait-time-to-promote-replica-0757f36d46304a93.yaml0000664000175000017500000000037500000000000031373 0ustar00zuulzuul00000000000000--- features: - | Starting with microversion 2.75, user can specify quiesce wait time seconds in share replica promote API. Be aware that not all drivers support this parameter, when not supported, the value would be silently ignored. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/add-quotas-section-0e1e638a8f14d26e.yaml0000664000175000017500000000267200000000000025514 0ustar00zuulzuul00000000000000--- deprecations: - | Configuration options to define default quota and behavior of the quota feature must now be configured in the new ``[quota]`` section rather than the ``[DEFAULT]`` section. The existing options in the ``[DEFAULT]`` section have been deprecated and will be removed in a future release. Options that have changed in this releases are: - ``[quota]/shares`` - previously ``[DEFAULT]/shares_quota`` - ``[quota]/snapshots`` - previously ``[DEFAULT]/snapshots_quota`` - ``[quota]/gigabytes`` - previously ``[DEFAULT]/quota_gigabytes`` - ``[quota]/per_share_gigabytes`` - previously ``[DEFAULT]/quota_per_share_gigabytes`` - ``[quota]/snapshot_gigabytes`` - previously ``[DEFAULT]/quota_snapshot_gigabytes`` - ``[quota]/share_networks`` - previously ``[DEFAULT]/quota_share_networks`` - ``[quota]/share_groups`` - previously ``[DEFAULT]/quota_share_groups`` - ``[quota]/share_group_snapshots`` - previously ``[DEFAULT]/quota_share_group_snapshots`` - ``[quota]/share_replicas`` - previously ``[DEFAULT]/quota_share_replicas`` - ``[quota]/replica_gigabytes`` - previously ``[DEFAULT]/quota_replica_gigabytes`` - ``[quota]/until_refresh`` - previously ``[DEFAULT]/until_refresh`` - ``[quota]/reservation_expire`` previously ``[DEFAULT]/reservation_expire`` - ``[quota]/driver`` - previously ``[DEFAULT]/quota_driver`` - ``[quota]/max_age`` - previously ``[DEFAULT]/max_age`` ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/add-reserved-share-from-snapshot-percentage-2d913ae1fc533690.yaml0000664000175000017500000000076000000000000032276 0ustar00zuulzuul00000000000000--- features: - | 'reserved_share_from_snapshot_percentage' backend config option allows Manila to consider different reservation percentage for shares that are being created from the snapshot. In case this config option is not set, the shares created from snapshot will use reservation percentage value set in 'reserved_share_percentage'. This will be useful for users who want to keep same reservation percentage for both non-snapshot/regular and snapshot shares. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/add-sec-service-user-msg-10054933ff33e347.yaml0000664000175000017500000000067700000000000026266 0ustar00zuulzuul00000000000000--- fixes: - | `Bug #1900755 `_: Added a driver-agnostic exception to handle insufficient privileges on a security service when trying to create a share. Added a user message to provide useful information to end users. Note that vendors will need to implement the exception provided in this patch in their drivers to take advantage of the more convenient user message. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/add-share-access-metadata-4fda2c06e750e83c.yaml0000664000175000017500000000156600000000000026733 0ustar00zuulzuul00000000000000--- features: - | Metadata can be added to share access rules as key=value pairs, and also introduced the GET /share-access-rules API with API version 2.45. The prior API to retrieve access rules of a given share, POST /shares/{share-id}/action {'access-list: null} has been removed in API version 2.45. upgrade: - | The API GET /share-access-rules?share_id={share-id} replaces POST /shares/{share-id}/action with body {'access_list': null} in API version 2.45. The new API supports access rule metadata and is expected to support sorting, filtering and pagination features along with newer fields to interact with access rules in future versions. The API request header 'X-OpenStack-Manila-API-Version' can be set to 2.44 to continue using the prior API to retrieve access rules, but no new features will be added to that API. ././@PaxHeader0000000000000000000000000000021600000000000011454 xustar0000000000000000120 path=manila-21.0.0/releasenotes/notes/add-share-and-share-server-user-provided-encryption-key-191a7587c49dc0f3.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/add-share-and-share-server-user-provided-encryption-key-191a7587c490000664000175000017500000000146400000000000032713 0ustar00zuulzuul00000000000000--- features: - | Starting with microversion 2.90, Manila allows end users to provide barbican secret_ref as encryption key reference in share create API. This will be used as either share encryption key reference or share server encryption key reference, based on backend driver support for share encryption. issues: - | When using share types with ``encryption_support=share_server``, share servers created cannot currently be unmanaged from Manila. This limitation will be addressed in a future release. Additionally, care must be taken when managing existing shares with ``driver_handles_share_servers=True`` (DHSS=True). You must use a share type that supports encryption if the share must be encrypted or if its existing encryption status needs to be retained. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/add-share-group-quotas-4e426907eed4c000.yaml0000664000175000017500000000041700000000000026210 0ustar00zuulzuul00000000000000--- features: - Added quotas for amount of share groups and share group snapshots. upgrade: - Two new config options are available for setting default quotas for share groups and share group snapshots - 'quota_share_groups' and 'quota_share_group_snapshots'. ././@PaxHeader0000000000000000000000000000020700000000000011454 xustar0000000000000000113 path=manila-21.0.0/releasenotes/notes/add-share-migration-support-in-zfsonlinux-driver-88e6da5692b50810.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/add-share-migration-support-in-zfsonlinux-driver-88e6da5692b50810.y0000664000175000017500000000013200000000000032613 0ustar00zuulzuul00000000000000--- features: - Added support for driver-assisted share migration to ZFSonLinux driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/add-share-server-limits-fb25ea9f214534af.yaml0000664000175000017500000000172000000000000026511 0ustar00zuulzuul00000000000000--- features: - | Two new backend capabilities were added to Manila in order to help administrators to control and balance their cloud resources. The capability called `max_shares_per_share_server` allows the administrators to define a maximum amount of shares that a share server can have. The capability called `max_share_server_size` allows the administrator to set a maximum number of gigabytes a share server can grow to, considering its instances, replicas and snapshots. Both capabilities accept only integer values. If at least one of these limits is reached, Manila won't consider reusing the referred share server. If there aren't share servers available to reuse, Manila will create another one to place incoming request. If none of these limits were specified in the backend stanza, Manila will consider them as unlimited and allow share servers to be reused regardless the amount of shares or the size they have. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/add-share-server-migration-51deb30212859277.yaml0000664000175000017500000000274700000000000026714 0ustar00zuulzuul00000000000000--- features: - | Added the ability to migrate share servers within and across backends in Manila. As designed in share migration, a two-phase approach is now available for share servers, with the addition of a new API to check the feasibility of a migration, called ``share-server-migration-check``. Now, Manila can start, complete, cancel and retrieve the progress of a share server migration. These operations were designed for Administrators and will work only when operating under `driver_handles_share_servers=True` mode. When starting a share server migration, it is possible to choose which capabilities must be supported by the driver: remain ``writable`` during the first phase, ``preserve_snapshots``, be ``nondisruptive`` and migrate to a different share network. upgrade: - | The share server entity now contains two new fields: ``task_state`` and `source_share_server_id`. The `task_state` field helps tracking the migration progress of a share server. The ``source_share_server_id`` field will hold the source share server identification until the migration gets completed or cancelled. New statuses were added in order to control whether a share server, its shares or snapshots are being migrated to a different location. Share server shares’ are going to remain in the status ``server_migrating`` while the migration is in course. When the migration gets completed, the statuses are going to be updated. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/add-share-server-migration-enhancements-bbbc98a7fb419700.yaml0000664000175000017500000000037100000000000031651 0ustar00zuulzuul00000000000000--- features: - | Added share server migration enhancements. Share back ends that support non-disruptive migration are able to do so, in case no network changes were identified and if the back end driver supports reusing ip addresses. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/add-share-type-filter-to-pool-list-api-267614b4d93j12de.yaml0000664000175000017500000000012200000000000031122 0ustar00zuulzuul00000000000000--- features: - Added share_type to filter results of scheduler-stats/pools API.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/add-share-type-info-to-notifications-7fb4597642a6e8e5.yaml0000664000175000017500000000026700000000000030776 0ustar00zuulzuul00000000000000--- features: - Added share type information to Ceilometer notifications. It is useful for billing to be able to charge customers differently for shares of different types. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/add-share-type-quotas-33a6b36c0f4c88b1.yaml0000664000175000017500000000025500000000000026116 0ustar00zuulzuul00000000000000--- features: - Added possibility to set quotas per share type. It is useful for deployments with multiple backends that are accessible via different share types. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/add-snapshot-instances-admin-api-959a1121aa407629.yaml0000664000175000017500000000012600000000000027761 0ustar00zuulzuul00000000000000--- features: - Add list, show, and reset-status admin APIs for snapshot instances. ././@PaxHeader0000000000000000000000000000022500000000000011454 xustar0000000000000000127 path=manila-21.0.0/releasenotes/notes/add-support-filter-search-for-share-type-fdbaaa9510cc59dd.yaml-5655800975cec5d4.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/add-support-filter-search-for-share-type-fdbaaa9510cc59dd.yaml-56550000664000175000017500000000011000000000000032517 0ustar00zuulzuul00000000000000--- features: - Share types can now be filtered with its extra_specs. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/add-tegile-driver-1859114513edb13e.yaml0000664000175000017500000000010000000000000025111 0ustar00zuulzuul00000000000000--- features: - Added driver for Tegile IntelliFlash arrays. ././@PaxHeader0000000000000000000000000000021400000000000011452 xustar0000000000000000118 path=manila-21.0.0/releasenotes/notes/add-tenant-quota-for-share-replicas-and-replicas-size-565ffca315afb6f0.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/add-tenant-quota-for-share-replicas-and-replicas-size-565ffca315afb0000664000175000017500000000040000000000000033015 0ustar00zuulzuul00000000000000--- features: - | Added quotas for amount of share replicas and share replica gigabytes. upgrade: - | Two new config options are available for setting default quotas for share replicas: `quota_share_replicas` and `quota_replica_gigabytes`. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/add-two-new-fields-to-share-groups-api-bc576dddd58a3086.yaml0000664000175000017500000000027300000000000031266 0ustar00zuulzuul00000000000000--- features: - added two new fields to share groups API - 'availability_zone_id' and 'consistent_snapshot_support' to be able to get to know these attributes of a share group. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/add-update-host-command-to-manila-manage-b32ad5017b564c9e.yaml0000664000175000017500000000060100000000000031474 0ustar00zuulzuul00000000000000--- features: - The ``manila-manage`` utility now has a new command to update the host attribute of shares. This is useful when the share manager process has been migrated to a different host, or if changes are made to the ``host`` config option or the backend section name in ``manila.conf``. Execute ``manila-manage share update_host -h`` to see usage instructions.././@PaxHeader0000000000000000000000000000021400000000000011452 xustar0000000000000000118 path=manila-21.0.0/releasenotes/notes/add-update-security-service-for-in-use-share-networks-c60d82898c71eb4a.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/add-update-security-service-for-in-use-share-networks-c60d82898c71e0000664000175000017500000000167500000000000032737 0ustar00zuulzuul00000000000000--- features: - | Added the possibility to add and update an entire security service when a share network is already being used. A new field called ``status`` was added to the share network model and its default value is ``active``. Some operations might be blocked depending on the share network status. A boolean field called ``security_service_update_support`` was added to the share server's model. This field defaults to ``False``, and all of the already deployed share servers are going to get the default value even if their backend support it. Administrators will be able to update the field value using ``manila-manage`` commands. The scheduler will filter out backend that does not handle this request during some operations. upgrade: - | ``manila-manage`` now supports share server commands, which allow administrators to modify the field value of some share server's capabilities. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/add-updated-at-in-share-instance-show-bdd934b26bdab414.yaml0000664000175000017500000000025600000000000031173 0ustar00zuulzuul00000000000000--- fixes: - | In order to let user know when was the last time share instance updated, a field ``updated_at`` is added in the response of share instance show API. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/add-user-id-echo-8f42db469b27ff14.yaml0000664000175000017500000000011500000000000025020 0ustar00zuulzuul00000000000000--- features: - User ID is added to the JSON response of the /shares APIs. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/add-vastdriver-5a2ca79a81bc9280.yaml0000664000175000017500000000006700000000000024720 0ustar00zuulzuul00000000000000--- features: - Added driver for VastData filesystem.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/add_disabled_reason_to_services-8369aaa2985ada25.yaml0000664000175000017500000000054600000000000030342 0ustar00zuulzuul00000000000000--- features: - Added a new field 'disabled_reason' to services table. Users can set 'disabled_reason' when disabling a service, and query the disabled reason while listing services. When re-enabling the service, the disabled reason will be cleared. See `bug 2037700 `_ for more details. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/add_export_location_metadata-d3c279b73f4c4728.yaml0000664000175000017500000000053600000000000027617 0ustar00zuulzuul00000000000000--- features: - | Added share export location metadata capabilities including create, update all, update single, show and delete metadata. Allows configuration of `admin_only_el_metadata`, such that keys in this list are able to be manipulated only by those with admin privileges. By default, this includes "preferred" key. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/add_gateway_into_db-1f3cd3f392ae81cf.yaml0000664000175000017500000000017300000000000026110 0ustar00zuulzuul00000000000000--- features: - Store network gateway value in DB. - Gateway is added to the JSON response of the /share-networks API. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/add_mtu_info_db-3c1d6dc02f40d5a6.yaml0000664000175000017500000000022700000000000025142 0ustar00zuulzuul00000000000000--- features: - Store network MTU value into DB to make it possible for drivers with share server support to support different values than 1500. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/add_share_network_subnet_metadata-ddee482d93030fc3.yaml0000664000175000017500000000022300000000000030757 0ustar00zuulzuul00000000000000--- features: - | Adds share network subnet metadata capabilities including create, update all, update single, show and delete metadata. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/add_snapshot_metadata-bd986e338220c90e.yaml0000664000175000017500000000037400000000000026240 0ustar00zuulzuul00000000000000--- features: - | Adds snapshot metadata capabilities inlcuding, create, update all, update single, show, and delete metadata. Snapshots may be filtered using metadata keys. Snapshot metadata is available to admin and nonadmin users. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/add_support_multiple_subnet_per_az-e7b0359f4e8eca48.yaml0000664000175000017500000000237300000000000031257 0ustar00zuulzuul00000000000000--- features: - | Add support for multiple subnet per availability zone. The multiple configuration can be done either on share server deployment or updating a pre-existent share server. The new field ``network_allocation_update_support`` was added to share server's model This field defaults to ``False``, and all of the already deployed share servers are going to get the default value even if their backend support it. Administrators will be able to update the field value using ``manila-manage`` commands. The driver will report its support for adding a subnet on a pre-existent share server through ``network_allocation_update_support``. Also, it will report the support for creating the server with multiple subnets with the ``share_server_multiple_subnet_support``. The scheduler will filter out backend that does not handle this request during some operations. Example, creating a share with a share network containing multiple subnets, only hosts that support this deployment will be selected. deprecations: - | Remove 'share_network_subnet_id' attribute from share server view and add 'share_network_subnet_ids' starting with microversion '2.70'. The share server has a list of subnets. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/add_user_id_and_project_id_to_snapshot_APIs-157614b4b8d01e15.yaml0000664000175000017500000000014600000000000032414 0ustar00zuulzuul00000000000000--- features: - user_id and project_id fields are added to the JSON response of /snapshots APIs.././@PaxHeader0000000000000000000000000000024200000000000011453 xustar0000000000000000140 path=manila-21.0.0/releasenotes/notes/added-possibility-to-run-manila-api-with-web-servers-that-support-wsgi-apps-cfffe0b789f8670a.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/added-possibility-to-run-manila-api-with-web-servers-that-support-w0000664000175000017500000000045600000000000033617 0ustar00zuulzuul00000000000000--- features: - Manila API service now can be run using web servers that support WSGI applications. upgrade: - Deprecated path 'manila.api.openstack:FaultWrapper' to 'FaultWrapper' was removed and now only current path is available, which is 'manila.api.middleware.fault:FaultWrapper'. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/admin-only-metadata-xena-issue-91690edef7bc13aa.yaml0000664000175000017500000000067100000000000030047 0ustar00zuulzuul00000000000000--- issues: - | User specified scheduler hints such as "same_host" and "different_host" are stored as share metadata with keys such as "__affinity_same_host" and "__affinity_different_host" respectively. These can be manipulated or deleted by end users like all metadata unless prevented by RBAC policy. In a future release, the service will restrict the deletion or manipulation of these specific metadata items. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/affinity-filter-747d3d7c51157172.yaml0000664000175000017500000000135000000000000024657 0ustar00zuulzuul00000000000000--- features: - Add AffinityFilter and AntiAffinityFilter to manila's scheduler. These hard affinity and anti-affinity filter needs user to specify affinity/anti-affinity share ids to the field "share.scheduler_hints.same_host" or "share.scheduler_hints.different_host" in the request payload when creating a manila share. The hints are stored as share metadata. The filter properties are populated from this metadata during share migration and so filters will be applied when migrating a manila share. upgrade: - To add AffinityFilter and AntiAffinityFilter to an active deployment, their references must be added to the manila.scheduler.filters section in setup.cfg and must be enabled in manila.conf. ././@PaxHeader0000000000000000000000000000021200000000000011450 xustar0000000000000000116 path=manila-21.0.0/releasenotes/notes/allow-and-deny-access-rule-if-any-instance-is-valid-0e092913d30dbcdd.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/allow-and-deny-access-rule-if-any-instance-is-valid-0e092913d30dbcd0000664000175000017500000000052700000000000032454 0ustar00zuulzuul00000000000000--- fixes: - | With replication setup at least one backend is working and serving the shares. Starting from API version 2.74, allowing and denying access to shares will only fail if any of the instances is in a transitional state. Please refer to `Launchpad bug 1965561 `_ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/api-versions-mark-v1-deprecated-3540d39279fbd60e.yaml0000664000175000017500000000133400000000000027717 0ustar00zuulzuul00000000000000--- deprecations: - Deprecation of the manila v1 API was announced in the mitaka release. The versions response from the API has been fixed to state that this version has been deprecated. If you are using v1 API, consider switching to the v2 API to take advantage of newer features. v2 API has support for 'microversions'. Any endpoint on the v2 API can be requested with the HTTP header 'X-OpenStack-Manila-API-Version' and providing a value '2.x', where '2' is the major version and 'x' is the minor (or 'micro') version. To continue exploiting feature functionality that was part of the v1 API, you may use the v2 API with the microversion '2.0', which is behaviourally identical to the v1 API. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/blueprint-netapp-snapshot-visibility-4f090a20145fbf34.yaml0000664000175000017500000000065200000000000031223 0ustar00zuulzuul00000000000000--- features: - Snapshot directories of shares created by the NetApp driver can now be controlled through extra-specs for newly created shares and through a config option for existing shares. upgrades: - A new config option ``netapp_reset_snapdir_visibility`` has been added to the NetApp driver, allowing existing shares to have their snapshot directory visibility setting changed at driver startup. ././@PaxHeader0000000000000000000000000000020500000000000011452 xustar0000000000000000111 path=manila-21.0.0/releasenotes/notes/blueprint-share-and-snapshot-deferred-deletion-b3453718fd1e4b56.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/blueprint-share-and-snapshot-deferred-deletion-b3453718fd1e4b56.yam0000664000175000017500000000050300000000000032624 0ustar00zuulzuul00000000000000--- features: - | With deferred deletion, when resource(share or snapshot) is deleted, the quota is freed immediately and periodic tasks will delete the resource (i.e. share or snapshot) in driver. The resources errored during deletion are retried for deletion after some time in the same periodic tasks. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bp-admin-network-hnas-9b714736e521101e.yaml0000664000175000017500000000027100000000000025660 0ustar00zuulzuul00000000000000--- features: - Added admin network support to Hitachi HNAS Driver upgrade: - Added a new config option to Hitachi HNAS Driver to allow configuration of Admin Network support. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bp-allow-locking-shares-against-deletion-5a715292e720a254.yaml0000664000175000017500000000070500000000000031425 0ustar00zuulzuul00000000000000--- features: - | Added new API endpoints and methods to create, retrieve, update and delete resource locks. Resource locks can be used to restrict certain actions from occurring on the resource. Currently users can prevent deletion of a share (including soft-deletion, transfer and unmanage operations) by creating a resource lock against the share. In future releases, more resource actions may be supported by this feature. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bp-create-share-from-snapshot-cephfs-080bd6c2ece74c5b.yaml0000664000175000017500000000077400000000000031151 0ustar00zuulzuul00000000000000--- features: - | Create share from snapshot is now available in CephFS Native and CephFS NFS drivers. This feature is available in Ceph since the Ceph Nautilus release, so a deployment with Ceph Nautilus (v14.2.18 or higher) or Ceph Octopus (v15.2.10 or higher) is required. deprecations: - | The CephFS driver ``cephfs_enable_snapshots`` configuration option has been removed. It was deprecated for removal in the Victoria release. Snapshot support is always enabled now. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bp-dell-powerflex-manila-driver-2c496483242e555a.yaml0000664000175000017500000000022100000000000027637 0ustar00zuulzuul00000000000000--- features: - | Added a new Manila driver to support Dell PowerFlex storage backend. It supports the minimum set of Manila features. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bp-dell-powerscale-ensure-shares-f2634d498a679d23.yaml0000664000175000017500000000012100000000000030113 0ustar00zuulzuul00000000000000--- features: - | Dell PowerScale Driver: Added support for ensure shares. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bp-dell-powerscale-thin-provisioning-71a8c25322d67a6b.yaml0000664000175000017500000000012000000000000031052 0ustar00zuulzuul00000000000000--- features: - | Added support for thin provisioning on Dell PowerScale. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bp-dell-powerscale-update-share-stats-1408fac91ab3528b.yaml0000664000175000017500000000012600000000000031164 0ustar00zuulzuul00000000000000--- features: - | Dell PowerScale Driver: Added support for update share stats. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bp-dell-powerstore-manila-driver-263489b0d0b10e2e.yaml0000664000175000017500000000024700000000000030166 0ustar00zuulzuul00000000000000--- features: - | Added a new Manila driver to support Dell PowerStore storage backend. It supports NFS and CIFS shares operations, and snapshot operations. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bp-export-locations-az-api-changes-c8aa1a3a5bc86312.yaml0000664000175000017500000000223200000000000030536 0ustar00zuulzuul00000000000000--- features: - | New experimental APIs were introduced version ``2.47`` to retrieve export locations of share replicas. With API versions ``2.46`` and prior, export locations of non-active or secondary share replicas are included in the share export locations APIs, albeit these APIs do not provide all the necessary distinguishing information (availability zone, replica state and replica ID). See the `API reference `_ for more information regarding these API changes. deprecations: - | In API version ``2.47``, export locations APIs: ``GET /v2/{tenant_id}/shares/{share_id}/export_locations`` and ``GET /v2/{tenant_id}/shares/{share_id}/export_locations/​{export_location_id }`` no longer provide export locations of non-active or secondary share replicas where available. Use the newly introduced share replica export locations APIs to gather this information: ``GET /v2/{tenant_id}/share-replicas/{share_replica_id}/export-locations`` and ``GET /v2/{tenant_id}/share-replicas/{share_replica_id}/export -locations/{export_location_id}``. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bp-integrate-os-profiler-b637041861029175.yaml0000664000175000017500000000120600000000000026235 0ustar00zuulzuul00000000000000--- features: - | OSprofiler support was introduced. To allow its usage the api-paste.ini file needs to be modified to contain osprofiler middleware. Also `[profiler]` section needs to be added to the manila.conf file with `enabled`, `hmac_keys` and `trace_sqlalchemy` flags defined. security: - OSprofiler support requires passing of trace information between various OpenStack services. This information is securely signed by one of HMAC keys, defined in manila.conf configuration file. To allow cross-project tracing user should use the key, that is common among all OpenStack services they want to trace. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bp-netapp-active-iq-scheduler-weigher-df0a6709a63a1f6f.yaml0000664000175000017500000000075100000000000031234 0ustar00zuulzuul00000000000000--- features: - | Added the ``NetAppAIQWeigher`` scheduler weigher that relies on the `NetApp Active IQ `_ to weigh the hosts. It only works with NetApp backends. When other backends exist, the weigher is skipped. Added a new NetApp specific pool information called ``netapp_cluster_name`` that contains the name of the cluster where the pool is located, it can be set by a new NetApp configuration option. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bp-netapp-ontap-storage-based-cryptograpy-bb7e28896e2a2539.yaml0000664000175000017500000000024600000000000032042 0ustar00zuulzuul00000000000000--- features: - Now Manila NetApp ONTAP driver supports NetApp Volume Encryption (NVE) which allows the creation of volumes that will be encrypted at rest. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bp-ocata-migration-improvements-c8c5675e266100da.yaml0000664000175000017500000000215100000000000030120 0ustar00zuulzuul00000000000000--- prelude: > The share migration feature was improved to support migrating snapshots where possible and provide a more deterministic user experience. features: - Added 'preserve_snapshots' parameter to share migration API. upgrade: - All share migration driver-assisted API parameters are now mandatory. - Improvements to the share migration API have been qualified with the driver assisted migration support that exists in the ZFSOnLinux driver. However, this driver does not currently support preserving snapshots on migration. - Snapshot restriction in share migration API has been changed to return error only when parameter force-host-assisted-migration is True. deprecations: - Support for the experimental share migration APIs has been dropped for API microversions prior to 2.30. fixes: - Added check to validate that host assisted migration cannot be forced while specifying driver assisted migration options. - The share migration API can only be invoked when at least one parameter within (host, share-network, share-type) is expected to be changed. ././@PaxHeader0000000000000000000000000000021300000000000011451 xustar0000000000000000117 path=manila-21.0.0/releasenotes/notes/bp-pass-resource-metadata-updates-to-backend-drivers-7fff302f64fda2d7.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bp-pass-resource-metadata-updates-to-backend-drivers-7fff302f64fda20000664000175000017500000000106000000000000033043 0ustar00zuulzuul00000000000000--- features: - | OpenStack operators can now make use of a new config option named `driver_updatable_metadata` to determine which share metadata updates the back end driver needs to be notified about. The config option contains list of share metadata keys. When the share's metadata gets updated and Manila identifies that the new metadata keys match the metadata keys from the provided list, the share back end will be notified and it will apply the necessary changes. The result will be communicated through user messages. ././@PaxHeader0000000000000000000000000000022700000000000011456 xustar0000000000000000129 path=manila-21.0.0/releasenotes/notes/bp-pass-share-network-subnet-metadata-updates-to-backend-drivers-10441eee8375f146.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bp-pass-share-network-subnet-metadata-updates-to-backend-drivers-100000664000175000017500000000154000000000000033374 0ustar00zuulzuul00000000000000--- features: - | OpenStack operators can now make use of a new config option named `driver_updatable_subnet_metadata` to determine which share network subnet metadata updates the back end driver needs to be notified about. The config option contains list of share network subnet metadata keys. When the share network subnet's metadata gets updated and Manila identifies that the new metadata keys match the metadata keys from the provided list, the share back end will be notified and it will apply the necessary changes. The result will be communicated through user messages. This feature is supported from microversion '2.89'. Since, with share network migration, metadata belonging to the old share network subnet is ignored when moving to a new share network, updates will not be passed to new share servers. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bp-remove-project-id-from-urls-9f338371b8ffa203.yaml0000664000175000017500000000255100000000000027605 0ustar00zuulzuul00000000000000--- prelude: > Manila v2 API URLs no longer require a project_id to be specified. features: - | It is now possible to omit the %{project_id}s from the API endpoints for the v2 API. While the behavior of the APIs have not been altered, the service recognizes URLs with and without project ids in the path. It is recommended that you adjust the service catalog in your cloud to remove the project_id substitution, especially if you would like to enable users operating at system scope. - | A new "noauth" auth strategy is available, and is named "noauthv2". It can be enabled by setting the configuration option ``[DEFAULT]/auth_strategy`` to ``noauthv2``. This auth strategy can be used when project_id substitution is removed from the manila endpoint URL. upgrade: - | In order to make project_id optional in urls, the possible values of project_id had to be constrained. A new configuration option called ``project_id_regex`` has been added in the ``[DEFAULT]`` section. The default value for this option is ``[0-9a-f\-]+`` and it matches hex UUIDs with and without dashes, therefore covering the formats supported by the OpenStack Identity service. If your cloud uses other formats, set this configuration option accordingly, or remove project_id from the manila endpoint URL in your service catalog. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bp-rename-isilon-to-powerscale-8e29d71c9e3629c3.yaml0000664000175000017500000000101200000000000027654 0ustar00zuulzuul00000000000000--- features: - | Rebrand from Isilon to PowerScale includes changing of tag names, directory structure, file names and documentation. upgrade: - | Dell PowerScale Driver was previously the EMC Isilon Driver. If the extra-spec``share_backend_name`` was set to ``isilon`` in share types in the past, this needs to be changed to ``powerscale``. ``emc_share_backend`` configuration option must be switched from ``isilon`` to ``powerscale`` in manila.conf when you add the storage backend. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bp-share-transfer-between-project-5c2ba9944b17e26e.yaml0000664000175000017500000000014500000000000030421 0ustar00zuulzuul00000000000000--- features: - Share can be transferred between project with API version ``2.77`` and beyond. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bp-share-type-supported-azs-2e12ed406f181b3b.yaml0000664000175000017500000000105600000000000027264 0ustar00zuulzuul00000000000000--- features: - | A new common user-visible share types extra-spec called "availability_zones" has been introduced. When using API version 2.48, user requests to create new shares in a specific availability zone will be validated against the configured availability zones of the share type. Similarly, users requests to create share groups and share replicas are validated against the share type ``availability_zones`` extra-spec when present. Users can also filter share types by one or more AZs that are supported by them.././@PaxHeader0000000000000000000000000000021000000000000011446 xustar0000000000000000114 path=manila-21.0.0/releasenotes/notes/bp-support-group-spec-search-share-group-type-api-df55d056b622ced7.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bp-support-group-spec-search-share-group-type-api-df55d056b622ced7.0000664000175000017500000000011600000000000032615 0ustar00zuulzuul00000000000000--- features: - Share group types can now be filtered with its group_specs. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bp-support-query-user-message-by-timestamp-c0a02b3b3e337e12.yaml0000664000175000017500000000024100000000000032234 0ustar00zuulzuul00000000000000--- features: - User messages can be queried by timestamp with query keys ``created_since`` and ``created_before`` starting with API version ``2.52``. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bp-update-cephfs-drivers-9ac5165f31669030.yaml0000664000175000017500000000344200000000000026366 0ustar00zuulzuul00000000000000--- deprecations: - | As of the Wallaby release the CephFS driver no longer recognizes the scoped extra-spec ``cephfs:data_isolated`` because it is no longer supported by the Ceph community. This style of data isolation required dedicating a Ceph pool for each share and scaled and performed poorly. - | The ``ceph_volume_client`` is deprecated by the CephFS driver in favor of a python rados client that connects to the Ceph manager daemon to interact with the Ceph cluster. This new connection method will enable functionality not available with older client, which has been deprecated by the Ceph community and will be removed in the Quincy release. upgrade: - | Manila's CephFS drivers now **require** the "python3-ceph-argparse" and "python3-rados" packages. Do not upgrade without adding these packages to the environment where the ``manila-share`` service runs since without them the driver will refuse to start up. This breaking change is necessary because the old ``ceph_volume_client`` has been deprecated by the Ceph community. features: - | The Ceph backend can now work with multiple filesystem clusters. The filesystem to be used by manila can be specified by the driver option 'cephfs_filesystem_name'. If this option is not specified, the driver will assume that a single filesystem is present in the Ceph cluster and will attempt to use it. - | Deletion of shares offerd by the CephFS driver (CephFS and NFS) is now faster. Now the Ceph manager moves deleted share's content to a trash folder and purges the contents asynchronously rather than handling this as part of the synchronous delete operation. The purge can take considerable time if a share contains a significant amount of data. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bp-update-share-type-name-or-description-a39c5991b930932f.yaml0000664000175000017500000000025600000000000031472 0ustar00zuulzuul00000000000000--- features: - The ``name``, ``description`` and/or ``share_type_access:is_public`` attributes of share types can be updated with API version ``2.50`` and beyond. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1271568-fix-rpc-init-host-with-rpc-6e76afa553b4f2af.yaml0000664000175000017500000000036400000000000030572 0ustar00zuulzuul00000000000000--- fixes: - | An issue with RPC handling on service restart was addressed by ensuring proper initialization before creating the RPC consumer. See `bug 1271568 `_ for more details. ././@PaxHeader0000000000000000000000000000023700000000000011457 xustar0000000000000000137 path=manila-21.0.0/releasenotes/notes/bug-1475351-handle-successful-deletion-of-snapshot-if-quota-commit-fails-4d150bf0b71a2fd9.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1475351-handle-successful-deletion-of-snapshot-if-quota-commit-0000664000175000017500000000032500000000000032762 0ustar00zuulzuul00000000000000--- fixes: - | Fixed an issue during snapshot creation where a database error was being mishandled with dead code. See `Launchpad bug 1475351 `_ for more details. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1578328-fix-replica-deletion-in-cDOT-7e4502fb50b69507.yaml0000664000175000017500000000027200000000000030453 0ustar00zuulzuul00000000000000--- fixes: - Changed share replica deletion logic in the NetApp cDOT driver to disregard invalid replication relationships from among those recorded by the driver to clean up. ././@PaxHeader0000000000000000000000000000020600000000000011453 xustar0000000000000000112 path=manila-21.0.0/releasenotes/notes/bug-1591357-fix-cannot-remove-user-rule-for-NFS-8e1130e2accabd56.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1591357-fix-cannot-remove-user-rule-for-NFS-8e1130e2accabd56.ya0000664000175000017500000000015700000000000031675 0ustar00zuulzuul00000000000000--- fixes: - The generic driver has been fixed to allow removing inappropriate CIFS rules on NFS shares. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1597940-fix-hpe3par-delete-share-0daf75193f318c41.yaml0000664000175000017500000000014100000000000030013 0ustar00zuulzuul00000000000000--- fixes: - HPE3PAR driver fix to allow delete of a share that does not exist on the backend. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1602525-port_binding_mandatory-2aaba0fa72b82676.yaml0000664000175000017500000000022600000000000030177 0ustar00zuulzuul00000000000000--- fixes: - Raises an exception in case the host_id is specified when creating a neutron port but the port_binding extension is not activated. ././@PaxHeader0000000000000000000000000000022700000000000011456 xustar0000000000000000129 path=manila-21.0.0/releasenotes/notes/bug-1607029-fix-share-server-deletion-when-interfaces-dont-exist-4d00fe9dafadc252.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1607029-fix-share-server-deletion-when-interfaces-dont-exist-4d0000664000175000017500000000017500000000000032702 0ustar00zuulzuul00000000000000--- fixes: - Fixed issue with NetApp cDOT share server cleanup when LIF creation fails while setting up a new vServer. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1613303-fix-config-generator-18b9f9be40d7eee6.yaml0000664000175000017500000000035300000000000027565 0ustar00zuulzuul00000000000000--- fixes: - Fixed the generation of options in the correct option groups. Using the config generator (``tox -e genconfig``), [cinder], [nova] and [neutron] options are now generated in the right groups instead of [default]. ././@PaxHeader0000000000000000000000000000020500000000000011452 xustar0000000000000000111 path=manila-21.0.0/releasenotes/notes/bug-1624526-netapp-cdot-filter-root-aggregates-c30ac5064d530b86.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1624526-netapp-cdot-filter-root-aggregates-c30ac5064d530b86.yam0000664000175000017500000000027300000000000031726 0ustar00zuulzuul00000000000000--- fixes: - The NetApp cDOT driver now explicitly filters root aggregates from the pools reported to the manila scheduler if the driver is operating with cluster credentials. ././@PaxHeader0000000000000000000000000000022300000000000011452 xustar0000000000000000125 path=manila-21.0.0/releasenotes/notes/bug-1626249-reintroduce-per-share-instance-access-rule-state-7c08a91373b21557.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1626249-reintroduce-per-share-instance-access-rule-state-7c08a90000664000175000017500000000253200000000000032476 0ustar00zuulzuul00000000000000--- features: - New micro-states ('applying', 'denying'), appear in the 'state' field of access rules list API. These transitional states signify the state of an access rule while its application or denial is being processed asynchronously. - Access rules can be added regardless of the 'access_rules_status' of the share or any of its replicas. fixes: - Fixed a bug with the share manager losing access rule updates when multiple access rules are added to a given share simultaneously. - Instead of all existing access rules transitioning to 'error' state when some error occurs while applying or denying access rules to a given share, only the rules that were in transitional statuses ('applying', 'denying') during an update will transition to 'error' state. This change is expected to aid in identifying any 'bad' rules that require a resolution by the user. - Share action APIs dealing with allowing and denying access to shares now perform the policy check for authorization to invoke those APIs as a preliminary step. - As before, when a share is replicated (or being migrated), all replicas (or migration instances) of the share must be in a valid state in order to allow or deny access to the share (where such actions are otherwise allowed). The check enforcing this in the API is fixed. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1626523-migration-rw-access-fix-7da3365c7b5b90a1.yaml0000664000175000017500000000015700000000000030041 0ustar00zuulzuul00000000000000--- fixes: - Fixed share remaining with read/write access rules during a host-assisted share migration. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1634278-unmount-orig-active-after-promote-8e24c099ddc1e564.yaml0000664000175000017500000000022000000000000032107 0ustar00zuulzuul00000000000000--- fixes: - The NetApp ONTAP driver is now fixed to unmount the original active share volume after one of its replica gets promoted. ././@PaxHeader0000000000000000000000000000021000000000000011446 xustar0000000000000000114 path=manila-21.0.0/releasenotes/notes/bug-1634734-fix-backend-extraspec-for-replication-d611d2227997ae3e.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1634734-fix-backend-extraspec-for-replication-d611d2227997ae3e.0000664000175000017500000000052600000000000031712 0ustar00zuulzuul00000000000000--- fixes: - | Share type extra-specification ``share_backend_name`` is now ignored when creating share replicas. This ensures that backends in the same replication domain need not have the same value of ``share_backend_name``. See `launchpad bug #1634734 `_ for details. ././@PaxHeader0000000000000000000000000000020500000000000011452 xustar0000000000000000111 path=manila-21.0.0/releasenotes/notes/bug-1638896-missing-migration-completing-state-1e4926ed56eb268c.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1638896-missing-migration-completing-state-1e4926ed56eb268c.yam0000664000175000017500000000022300000000000032170 0ustar00zuulzuul00000000000000--- fixes: - Added missing 'migration_completing' task state when requesting migration-complete for a driver-assisted share migration. ././@PaxHeader0000000000000000000000000000021300000000000011451 xustar0000000000000000117 path=manila-21.0.0/releasenotes/notes/bug-1638994-drop-fake-cg-support-from-generic-driver-16efce98f94b1b6b.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1638994-drop-fake-cg-support-from-generic-driver-16efce98f94b1b0000664000175000017500000000021600000000000032216 0ustar00zuulzuul00000000000000--- other: - Removed fake Consistency Group support from Generic driver. It was added only for testing purpose and now it is redundant. ././@PaxHeader0000000000000000000000000000023300000000000011453 xustar0000000000000000133 path=manila-21.0.0/releasenotes/notes/bug-1639188-fix-extend-operation-of-shrinked-share-in-generic-driver-5c7f82faefaf26ea.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1639188-fix-extend-operation-of-shrinked-share-in-generic-drive0000664000175000017500000000043600000000000032746 0ustar00zuulzuul00000000000000--- fixes: - In the Generic driver, the backing volume size is greater than the share size when the share has been shrunk. So share extend logic in this driver was changed to only extend the backing volume if its size is less than the size of the new, extended share. ././@PaxHeader0000000000000000000000000000020700000000000011454 xustar0000000000000000113 path=manila-21.0.0/releasenotes/notes/bug-1639662-fix-share-service-VM-restart-problem-1110f9133cc294e8.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1639662-fix-share-service-VM-restart-problem-1110f9133cc294e8.y0000664000175000017500000000034200000000000031535 0ustar00zuulzuul00000000000000--- fixes: - Changed sync mount permanently logic in the Generic driver to select the newly mounted share from /etc/mtab and insert it into /etc/fstab. Added corresponding remove mount permanently functionality. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1640169-check-ceph-connection-on-setup-c92bde41ced43326.yaml0000664000175000017500000000016600000000000031361 0ustar00zuulzuul00000000000000--- fixes: - Added a check on driver startup for CEPHFS back ends to verify whether the back end is accessible. ././@PaxHeader0000000000000000000000000000025700000000000011461 xustar0000000000000000153 path=manila-21.0.0/releasenotes/notes/bug-1645746-fix-inheritance-of-access-rules-from-parent-share-by-zfsonlinux-child-shares-4f85908c8e9871ef.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1645746-fix-inheritance-of-access-rules-from-parent-share-by-zf0000664000175000017500000000034600000000000032661 0ustar00zuulzuul00000000000000--- fixes: - Fix inheritance of access rules from parent share by ZFSonLinux child shares. It was inherited before, now it is not, as expected. Now, each share created from snapshot will not have inherited access rules. ././@PaxHeader0000000000000000000000000000024200000000000011453 xustar0000000000000000140 path=manila-21.0.0/releasenotes/notes/bug-1645751-fixed-shares-created-from-snapshots-for-lvm-and-generic-drivers-94a1161a9e0b5a85.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1645751-fixed-shares-created-from-snapshots-for-lvm-and-generic0000664000175000017500000000043300000000000032731 0ustar00zuulzuul00000000000000--- fixes: - Fixed shares created from snapshots on the LVM and Generic drivers to no longer share the same filesystem handle as the source shares. The cause was the same as described in Ubuntu launchpad bug https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1071733 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1646603-netapp-broadcast-domains-411a626d38835177.yaml0000664000175000017500000000021500000000000027762 0ustar00zuulzuul00000000000000--- fixes: - Changed NetApp cDOT driver when running with DHSS=True to maintain a 1-1 relation between IPSpaces and broadcast domains. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1649782-fixed-incorrect-exportfs-exportfs.yaml0000664000175000017500000000016100000000000030003 0ustar00zuulzuul00000000000000--- fixes: - Fixed incorrect exportfs command used while extending and shrinking shares on Generic driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1650043-gpfs-access-bugs-8c10f26ff1f795f4.yaml0000664000175000017500000000037500000000000026550 0ustar00zuulzuul00000000000000--- fixes: - Fixed GPFS CES to allow adding a first access rule to a share. - Fixed GPFS CES to allow deleting a share with no access rules. - Fixed GPFS CES to allow deletion of a failed access rule when there are no successful access rules. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1651578-gpfs-prepend-beb99f408cf20bb5.yaml0000664000175000017500000000040300000000000026141 0ustar00zuulzuul00000000000000--- fixes: - Fixed GPFS KNFS generation of NFS server allow/deny commands when there are multiple servers in gpfs_nfs_server_list so that the remote ssh login prefix used for one server is not carried over to the commands for following servers. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1651587-deny-access-verify-563ef2f3f6b8c13b.yaml0000664000175000017500000000016600000000000027177 0ustar00zuulzuul00000000000000--- fixes: - Fixed GPFS KNFS deny access so that it will not fail when the access can be verified to not exist. ././@PaxHeader0000000000000000000000000000022300000000000011452 xustar0000000000000000125 path=manila-21.0.0/releasenotes/notes/bug-1654598-enforce-policy-checks-for-share-export-locations-a5cea1ec123b1469.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1654598-enforce-policy-checks-for-share-export-locations-a5cea10000664000175000017500000000027300000000000032647 0ustar00zuulzuul00000000000000--- security: - | Closes a gap where a user can see the export locations for another user's share if the uuid of the other share is leaked, stolen, or (improbably) guessed. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1657033-fix-share-metadata-error-when-deleting-share.yaml0000664000175000017500000000015300000000000031606 0ustar00zuulzuul00000000000000--- fixes: - Fixed the error that share metadata records are not soft-deleted when deleting a share. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1658133-fix-lvm-revert-34a90e70c9aa7354.yaml0000664000175000017500000000020400000000000026204 0ustar00zuulzuul00000000000000--- fixes: - Fixed failure when reverting a share to a snapshot using the LVM driver while access rules exist for that share. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1659023-netapp-cg-fix-56bb77b7bc61c3f5.yaml0000664000175000017500000000031200000000000026123 0ustar00zuulzuul00000000000000--- fixes: - Re-enabled the consistent snapshot code in the NetApp driver, now compatible with the Manila Share Groups API instead of the deprecated and removed Manila Consistency Groups API. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1660319-1660336-migration-share-groups-e66a1478634947ad.yaml0000664000175000017500000000013200000000000030506 0ustar00zuulzuul00000000000000--- fixes: - Shares can no longer be migrated while being members of share groups. ././@PaxHeader0000000000000000000000000000023100000000000011451 xustar0000000000000000131 path=manila-21.0.0/releasenotes/notes/bug-1660321-fix-default-approach-for-share-group-snapshot-creation-3e843155c395e861.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1660321-fix-default-approach-for-share-group-snapshot-creation-0000664000175000017500000000045000000000000032751 0ustar00zuulzuul00000000000000--- fixes: - Fixed default approach for creating share group snapshots that uses common share driver interface by making proper call of this method. Before, some drivers that were depending on some specific data from 'snapshot' object were failing not being able to get these data. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1660425-snapshot-access-in-error-bce279ee310060f5.yaml0000664000175000017500000000016400000000000030225 0ustar00zuulzuul00000000000000--- fixes: - Snapshot access rules in error state no longer cause other rules to go into error state as well. ././@PaxHeader0000000000000000000000000000022000000000000011447 xustar0000000000000000122 path=manila-21.0.0/releasenotes/notes/bug-1660686-snapshot-export-locations-mount-not-supported-cdc2f5a3b57a9319.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1660686-snapshot-export-locations-mount-not-supported-cdc2f5a3b0000664000175000017500000000017100000000000033014 0ustar00zuulzuul00000000000000--- fixes: - Fixed snapshot export locations being created for shares with property mount_snapshot_support=False. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1660726-migration-export-locations-5670734670435015.yaml0000664000175000017500000000017200000000000030244 0ustar00zuulzuul00000000000000--- fixes: - Export locations pertaining to migration destinations are no longer shown until migration is complete. ././@PaxHeader0000000000000000000000000000024000000000000011451 xustar0000000000000000138 path=manila-21.0.0/releasenotes/notes/bug-1661266-add-consistent-snapshot-support-attr-to-share-groups-DB-model-daa1d05129802796.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1661266-add-consistent-snapshot-support-attr-to-share-groups-DB0000664000175000017500000000024600000000000033006 0ustar00zuulzuul00000000000000--- fixes: - Added 'consistent_snapshot_support' attribute to 'share_groups' DB model, to ease possible future backport of bugfixes for 'share groups' feature. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1661271-hnas-snapshot-readonly-4e50183100ed2b19.yaml0000664000175000017500000000015700000000000027626 0ustar00zuulzuul00000000000000--- fixes: - Fixed HNAS driver creating snapshots of NFS shares without first changing it to read-only. ././@PaxHeader0000000000000000000000000000020600000000000011453 xustar0000000000000000112 path=manila-21.0.0/releasenotes/notes/bug-1661381-migration-snapshot-export-locations-169786dcec386402.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1661381-migration-snapshot-export-locations-169786dcec386402.ya0000664000175000017500000000013300000000000032057 0ustar00zuulzuul00000000000000--- fixes: - Fixed error in driver-assisted share migration of mountable snapshots. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1662615-hnas-snapshot-concurrency-2147159ea6b086c5.yaml0000664000175000017500000000016200000000000030361 0ustar00zuulzuul00000000000000--- fixes: - Fixed HNAS driver error when creating or deleting snapshots caused by concurrency in backend. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1663300-554e9c78ca2ba992.yaml0000664000175000017500000000013100000000000023403 0ustar00zuulzuul00000000000000fixes: - | The Windows driver issues regarding share creation have been fixed. ././@PaxHeader0000000000000000000000000000025300000000000011455 xustar0000000000000000149 path=manila-21.0.0/releasenotes/notes/bug-1664201-fix-share-replica-status-update-concurrency-in-replica-promotion-feature-63b15d96106c65da.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1664201-fix-share-replica-status-update-concurrency-in-replica-0000664000175000017500000000064000000000000032750 0ustar00zuulzuul00000000000000--- fixes: - Fixed share replica status update concurrency in share replica promotion feature. Before it was possible to see two active replicas, having 'dr' or 'readable' type of replication, performing 'share replica promotion' action. Now, replica that becomes active is always updated last, so, at some period of time we will have zero 'active' replicas at once instead of two of them. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1665002-hnas-driver-version-f3a8f6bff3dbe054.yaml0000664000175000017500000000014700000000000027526 0ustar00zuulzuul00000000000000--- fixes: - Fixed HNAS driver version according to the new content added in the Ocata release. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1665072-migration-success-fix-3da1e80fbab666de.yaml0000664000175000017500000000022700000000000030043 0ustar00zuulzuul00000000000000--- fixes: - Fixed ``task_state`` field in the share model being set to ``migration_success`` before actually completing a share migration. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1666541-quobyte-resize-list-param-bc5b9c42bdc94c9f.yaml0000664000175000017500000000013500000000000030670 0ustar00zuulzuul00000000000000--- fixes: - Quobyte share extend/shrink operations now work with all Quobyte API versions ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1667450-migration-stale-source-9c092fee267a7a0f.yaml0000664000175000017500000000017400000000000030071 0ustar00zuulzuul00000000000000fixes: - Fixed host-assisted share migration not deleting the source share from the storage backend upon completion. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1674908-allow-user-access-fix-495b3e42bdc985ec.yaml0000664000175000017500000000024300000000000027620 0ustar00zuulzuul00000000000000--- fixes: - Changed user access name limit from 32 to 255 characters since there are security services that allow user names longer than 32 characters. ././@PaxHeader0000000000000000000000000000022000000000000011447 xustar0000000000000000122 path=manila-21.0.0/releasenotes/notes/bug-1678524-check-snaprestore-license-for-snapshot-revert-6d32afdc5d0b2b51.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1678524-check-snaprestore-license-for-snapshot-revert-6d32afdc50000664000175000017500000000022300000000000032575 0ustar00zuulzuul00000000000000--- fixes: - The NetApp ONTAP driver is now fixed to set revert_to_snapshot_support to True or False depending upon SnapRestore License. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1682795-share-access-list-api-5b1e86218959f796.yaml0000664000175000017500000000024300000000000027307 0ustar00zuulzuul00000000000000--- features: - Beginning in API version 2.33, share access APIs return "created_at" and "updated_at" for each access rule as part of the JSON response. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1684032-6e4502fdceb693dr7.yaml0000664000175000017500000000060300000000000023652 0ustar00zuulzuul00000000000000--- fixes: - When upgrading manila to a new release, if a share driver has added support for storage pools in the new release, shares belonging to such drivers would be iteratively updated by manila. This is done by querying the back end driver for the storage pool that each share is hosted within. A bug affecting the database update in this path has been fixed. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1688620-netapp-migration-cancelation-fb913131eb8eb82a.yaml0000664000175000017500000000200600000000000031214 0ustar00zuulzuul00000000000000--- fixes: - | NetApp ONTAP share delete operation can fail sometimes when is triggered immediately after migration cancelation on a overloaded NetApp backend. Canceling an ongoing migration is an asynchronous operation on an ONTAP storage system. Now the NetApp driver checks if the asynchronous API has ended its operation before reporting migration cancelation success. If the operation of the asynchronous API did not end within the specified timeout, the ``migration cancel`` cancel operation will be considered unsuccessful. To do so, a new configuration option ``netapp_migration_cancel_timeout`` has been added. upgrade: - | The configuration option ``netapp_migration_cancel_timeout`` can be specified in the NetApp backend section to redefine the amount of time that the NetApp driver must attempt to wait on the asynchronous operation to cancel an ongoing migration. This option is set to 3600 seconds by default, which is sufficient time in most cases. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1690159-retry-backend-init-58486ea420feaf51.yaml0000664000175000017500000000035200000000000027105 0ustar00zuulzuul00000000000000--- fixes: - Retry to initialize the manila-share driver for every backend in case there was an error during initialization. That way even a temporary broken backend can be initialized later without restarting manila-share. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1690785-fix-gpfs-path-91a354bc69bf6a47.yaml0000664000175000017500000000013200000000000026072 0ustar00zuulzuul00000000000000--- fixes: - Fixed the prerequisite of GPFS path export needed for initializing driver. ././@PaxHeader0000000000000000000000000000020500000000000011452 xustar0000000000000000111 path=manila-21.0.0/releasenotes/notes/bug-1694768-fix-netapp-cdot-revert-to-snapshot-5e1be65260454988.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1694768-fix-netapp-cdot-revert-to-snapshot-5e1be65260454988.yam0000664000175000017500000000015300000000000031623 0ustar00zuulzuul00000000000000--- fixes: - Fixed the NetApp ONTAP driver to handle reverting to replicated and migrated snapshots. ././@PaxHeader0000000000000000000000000000021300000000000011451 xustar0000000000000000117 path=manila-21.0.0/releasenotes/notes/bug-1696000-netapp-fix-security-style-on-cifs-shares-cbdd557a27d11961.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1696000-netapp-fix-security-style-on-cifs-shares-cbdd557a27d1190000664000175000017500000000017200000000000032161 0ustar00zuulzuul00000000000000--- fixes: - The NetApp ONTAP driver has been fixed to ensure the "security style" on CIFS shares is always "ntfs". ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1696669-add-ou-to-security-service-06b69615bd417d40.yaml0000664000175000017500000000022200000000000030350 0ustar00zuulzuul00000000000000--- features: - | Added 'ou' field to 'security_service' object to be able to configure in which organizational unit the share ends up. ././@PaxHeader0000000000000000000000000000021000000000000011446 xustar0000000000000000114 path=manila-21.0.0/releasenotes/notes/bug-1698250-netapp-cdot-fix-share-server-deletion-494ab3ad1c0a97c0.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1698250-netapp-cdot-fix-share-server-deletion-494ab3ad1c0a97c0.0000664000175000017500000000023100000000000031773 0ustar00zuulzuul00000000000000--- fixes: - The NetApp cDOT DHSS=True drivers have been fixed to not assume that share servers are only provisioned on segmented (VLAN) networks. ././@PaxHeader0000000000000000000000000000020500000000000011452 xustar0000000000000000111 path=manila-21.0.0/releasenotes/notes/bug-1698258-netapp-fix-tenant-network-gateways-85935582e89a72a0.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1698258-netapp-fix-tenant-network-gateways-85935582e89a72a0.yam0000664000175000017500000000051600000000000031716 0ustar00zuulzuul00000000000000--- fixes: - The NetApp DHSS=True driver now creates static routes with the gateway specified on the tenant networks. Potential beneficiaries of this bug-fix are deployers/users whose CIFS security service (e.g. Active Directory) is not part of the tenant network, but a route exists via the tenant network gateway. ././@PaxHeader0000000000000000000000000000023500000000000011455 xustar0000000000000000135 path=manila-21.0.0/releasenotes/notes/bug-1699836-disallow-share-type-deletion-with-active-share-group-types-83809532d06ef0dd.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1699836-disallow-share-type-deletion-with-active-share-group-ty0000664000175000017500000000027200000000000033051 0ustar00zuulzuul00000000000000--- fixes: - | Fixed `Launchpad bug 1699836 `_ by preventing share type deletion when there are share group types associated with them. ././@PaxHeader0000000000000000000000000000021200000000000011450 xustar0000000000000000116 path=manila-21.0.0/releasenotes/notes/bug-1700346-new-exception-for-no-default-share-type-b1dd9bbe8c9cb3df.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1700346-new-exception-for-no-default-share-type-b1dd9bbe8c9cb3d0000664000175000017500000000025000000000000032332 0ustar00zuulzuul00000000000000--- fixes: - A new exception will be thrown when a default share type was not configured and no other share type was specified on any sort of share creation. ././@PaxHeader0000000000000000000000000000021100000000000011447 xustar0000000000000000115 path=manila-21.0.0/releasenotes/notes/bug-1700871-ontap-allow-extend-of-replicated-share-2c9709180d954308.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1700871-ontap-allow-extend-of-replicated-share-2c9709180d9543080000664000175000017500000000020400000000000031501 0ustar00zuulzuul00000000000000--- fixes: - The NetApp ONTAP driver is now fixed to allow extension and shrinking of share replicas after they get promoted. ././@PaxHeader0000000000000000000000000000022200000000000011451 xustar0000000000000000124 path=manila-21.0.0/releasenotes/notes/bug-1703581-cifs-extension-failing-because-of-volume-in-use-3fea31c4a58e2f1b.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1703581-cifs-extension-failing-because-of-volume-in-use-3fea31c0000664000175000017500000000022700000000000032426 0ustar00zuulzuul00000000000000--- fixes: - Fixed the Generic driver to evict and kill any user processes accessing a share before attempting to extend or shrink a CIFS share. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1703660-fix-netapp-driver-preferred-state-0ce1a62961cded35.yaml0000664000175000017500000000030600000000000032106 0ustar00zuulzuul00000000000000--- fixes: - | Fixed the NetApp driver to report the correct value of the "preferred" export location metadata where it cannot determine if there are any "preferred" export locations. ././@PaxHeader0000000000000000000000000000021300000000000011451 xustar0000000000000000117 path=manila-21.0.0/releasenotes/notes/bug-1704622-netapp-cdot-fix-share-specs-on-migration-bfbbebec26533652.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1704622-netapp-cdot-fix-share-specs-on-migration-bfbbebec2653360000664000175000017500000000032200000000000032234 0ustar00zuulzuul00000000000000--- fixes: - The NetApp driver has been fixed to ensure that share type changes during driver optimized share migration will result in correction of share properties as per the requested extra-specs. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1704971-fix-name-description-filter-85935582e89a72a0.yaml0000664000175000017500000000116600000000000030517 0ustar00zuulzuul00000000000000--- fixes: - Fix the ``exact`` filters (name, description) in ``shares``, ``snapshots``, ``share-networks`` list can be filter by ``inexact`` value. We got the error because the ``description`` filter will be skipped in ``shares``, ``snapshots`` list API, and we will directly remove the ``inexact`` filter flag('~') and process the ``exact`` filters (name, description) by ``inexact`` filter logic. Now, we added ``description`` filter in ``shares``, ``snapshots`` list, and check whether the filter keys has the '~' in the end in ``shares``, ``snapshots``, ``share-networks`` list firstly. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1705533-manage-api-error-message-fix-967b0d44c09b914a.yaml0000664000175000017500000000007400000000000030656 0ustar00zuulzuul00000000000000--- fixes: - | Error message changed for manage API. ././@PaxHeader0000000000000000000000000000021700000000000011455 xustar0000000000000000121 path=manila-21.0.0/releasenotes/notes/bug-1706137-netapp-manila-set-valid-qos-during-migration-4405fff02bd6fa83.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1706137-netapp-manila-set-valid-qos-during-migration-4405fff02b0000664000175000017500000000033600000000000032276 0ustar00zuulzuul00000000000000--- fixes: - NetApp cDOT driver is now fixed to remove the QoS Policy on the backend volume when a share is migrated from an extra-spec which had QoS defined to another extra-spec which has no QoS defined in it. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1707066-deny-ipv6-access-in-error-bce379ee310060f6.yaml0000664000175000017500000000051500000000000030214 0ustar00zuulzuul00000000000000--- fixes: - The access-allow API accepts ipv6 rules and ignores them if the configured backend does not support ipv6 access rules. However, when the access-deny API is invoked to remove such rules, they used to be stuck in "denying" state. This bug has been fixed and ipv6 access rules can be denied successfully. ././@PaxHeader0000000000000000000000000000022100000000000011450 xustar0000000000000000123 path=manila-21.0.0/releasenotes/notes/bug-1707084-netapp-manila-driver-to-honour-std-extra-specs-d32fae4e9411b503.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1707084-netapp-manila-driver-to-honour-std-extra-specs-d32fae4e0000664000175000017500000000020000000000000032503 0ustar00zuulzuul00000000000000--- fixes: - The NetApp cDOT driver is now fixed to honour the standard extra_specs during migration and manage/unmanage. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1707943-make-lvm-revert-synchronous-0ef5baee3367fd27.yaml0000664000175000017500000000021600000000000031157 0ustar00zuulzuul00000000000000--- fixes: - Changed implementation of revert-to-snapshot in LVM driver to be synchronous, preventing failure of subsequent operations. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1707946-nfs-helper-0-netmask-224da94b82056f93.yaml0000664000175000017500000000032000000000000027110 0ustar00zuulzuul00000000000000--- fixes: - Fixed application of access rules with type ``ip`` and netmask length 0 in the ``NFSHelper`` plugin, affecting LVM and Generic drivers. Previously these rules silently failed to apply. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1714691-decimal-separators-in-locales-392c0c794c49c1c2.yaml0000664000175000017500000000032400000000000031130 0ustar00zuulzuul00000000000000--- fixes: - Fixed issue where locales other than POSIX and en_US.UTF-8 might cause the translate_string_size_to_float method to fail on a comma decimal separator instead of a period decimal separator. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1716922-security-group-creation-failed-d46085d11370d918.yaml0000664000175000017500000000014300000000000031210 0ustar00zuulzuul00000000000000--- fixes: - Fixed creation of security group and security group rule - neutronclient mapping././@PaxHeader0000000000000000000000000000020600000000000011453 xustar0000000000000000112 path=manila-21.0.0/releasenotes/notes/bug-1717135-ganesha-cleanup-of-tmp-config-files-66082b2384ace0a5.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1717135-ganesha-cleanup-of-tmp-config-files-66082b2384ace0a5.ya0000664000175000017500000000025200000000000031555 0ustar00zuulzuul00000000000000--- fixes: - Added operation of cleaning up the temp config files when moving the config file from temp location to the correct ganesha config location goes wrong. ././@PaxHeader0000000000000000000000000000022000000000000011447 xustar0000000000000000122 path=manila-21.0.0/releasenotes/notes/bug-1717263-netapp-ontap-fix-size-for-share-from-snapshot-02385baa7e085f39.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1717263-netapp-ontap-fix-size-for-share-from-snapshot-02385baa70000664000175000017500000000021700000000000032263 0ustar00zuulzuul00000000000000--- fixes: - The NetApp ONTAP driver has been fixed to honor the share size as requested when creating shares from an existing snapshot. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1717392-fix-downgrade-share-access-map-bbd5fe9cc7002f2d.yaml0000664000175000017500000000042300000000000031471 0ustar00zuulzuul00000000000000--- fixes: - The `Launchpad bug 1717392 `_ has been fixed and database downgrades do not fail if the database contains deleted access rules. Database downgrades are not recommended in production environments. ././@PaxHeader0000000000000000000000000000022300000000000011452 xustar0000000000000000125 path=manila-21.0.0/releasenotes/notes/bug-172112-fix-drives-private-storage-update-deleted-entries-7516ba624da2dda7.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-172112-fix-drives-private-storage-update-deleted-entries-7516ba0000664000175000017500000000023100000000000032554 0ustar00zuulzuul00000000000000--- fixes: - | Fixed the database update query for the drivers' private data store that was failing to update any rows marked as deleted. ././@PaxHeader0000000000000000000000000000023100000000000011451 xustar0000000000000000131 path=manila-21.0.0/releasenotes/notes/bug-1721787-fix-getting-share-networks-and-security-services-error-7e5e7981fcbf2b53.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1721787-fix-getting-share-networks-and-security-services-error-0000664000175000017500000000047000000000000033065 0ustar00zuulzuul00000000000000--- fixes: - Non admin users may invoke GET /share-networks and GET /security-services APIs with the 'all-tenants' flag in the query, however, the flag is ignored, and only resources belonging to the project will be served. This API change was made to fix bug 1721787 in the manila client project. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1730509-netapp-ipv6-hostname-39abc7f40d48c844.yaml0000664000175000017500000000016500000000000027376 0ustar00zuulzuul00000000000000--- fixes: IPv6 addresses are handled corrected when specified for the netapp_server_hostname driver option. ././@PaxHeader0000000000000000000000000000021600000000000011454 xustar0000000000000000120 path=manila-21.0.0/releasenotes/notes/bug-1733494-allow-user-group-name-with-blank-access-fix-665b3e42bdc985ac.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1733494-allow-user-group-name-with-blank-access-fix-665b3e42bdc0000664000175000017500000000016400000000000032204 0ustar00zuulzuul00000000000000--- fixes: - Allows the use of blank in user group name, since the AD allow user group name to include blank. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1734127-a239d022bef4a002.yaml0000664000175000017500000000015200000000000023354 0ustar00zuulzuul00000000000000--- fixes: - Fixed logic in driver base class that determines whether IPv6 is supported at runtime. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1735832-43e9291ddd73286d.yaml0000664000175000017500000000017000000000000023340 0ustar00zuulzuul00000000000000--- fixes: - Root uses can now correctly read files on read-only shares when the LVM or generic drivers are used. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1736370-qnap-fix-access-rule-override-1b79b70ae48ad9e6.yaml0000664000175000017500000000017400000000000031240 0ustar00zuulzuul00000000000000--- fixes: - | Fixed the QNAP driver that the access rule setting is overridden by the later access rule setting. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1745436-78c46f8a0c96cbca.yaml0000664000175000017500000000062400000000000023560 0ustar00zuulzuul00000000000000--- fixes: - Improved responsiveness of Host-assisted share migration by changing the waiting function of resource waiters. upgrade: - Added config option 'data_node_access_ips' that accepts a list of IP addresses. Those IPs can be either IPv4 or IPv6. deprecations: - Config option 'data_node_access_ip' has been deprecated in favor of 'data_node_access_ips', and marked for removal. ././@PaxHeader0000000000000000000000000000021000000000000011446 xustar0000000000000000114 path=manila-21.0.0/releasenotes/notes/bug-1745436-remove-data-node-access-ip-config-opt-709f330c57cdb0d5.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1745436-remove-data-node-access-ip-config-opt-709f330c57cdb0d5.0000664000175000017500000000045100000000000031557 0ustar00zuulzuul00000000000000--- upgrade: - | The configuration option for the manila-data service, ``data_node_access_ip`` from the [DEFAULT] section is no longer supported. It was deprecated in favor of ``data_node_access_ips`` in the OpenStack Shared File Systems (manila) service release 6.0.0 (Queens). ././@PaxHeader0000000000000000000000000000021700000000000011455 xustar0000000000000000121 path=manila-21.0.0/releasenotes/notes/bug-1746202-fix-unicodeDecodeError-when-decode-API-input-4e4502fb50b69502.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1746202-fix-unicodeDecodeError-when-decode-API-input-4e4502fb500000664000175000017500000000025500000000000031774 0ustar00zuulzuul00000000000000--- fixes: - This patch converts UnicodeDecodeError exception into BadRequest, plus an explicit error message. Fix invalid query parameter could lead to HTTP 500. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1746723-8b89633062885f0b.yaml0000664000175000017500000000015300000000000023203 0ustar00zuulzuul00000000000000--- fixes: - LVM driver now correctly parses IPv6 addresses during a Host-assisted share migration. ././@PaxHeader0000000000000000000000000000022200000000000011451 xustar0000000000000000124 path=manila-21.0.0/releasenotes/notes/bug-1747695-fixed-ip-version-in-neutron-bind-network-plugin-526958e2d83df072.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1747695-fixed-ip-version-in-neutron-bind-network-plugin-526958e0000664000175000017500000000021400000000000032344 0ustar00zuulzuul00000000000000--- fixes: - | Fixed multi segment neutron data save in NeutronBindNetworkPlugin to provide IP version for neutron port creation. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1748139-generic-dont-check-socket-login-e2dd1dbc42ae281a.yaml0000664000175000017500000000060300000000000031636 0ustar00zuulzuul00000000000000--- fixes: - | The service instance module, used by some drivers supporting `driver_handles_share_servers=True` mode, now checks for login instead of polling the SSH port for connectivity. This is expected to improve robustness of share creation operations that need a new share server. See `bug 1748139 `_ for more details. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1749184-eb06929e76a14fce.yaml0000664000175000017500000000032300000000000023501 0ustar00zuulzuul00000000000000--- fixes: - The database migration has been adjusted to work with mariadb >= 10.2.8 by ensuring that a primary key constraint is first dropped and re-added when a column is removed that is part of it ././@PaxHeader0000000000000000000000000000020600000000000011453 xustar0000000000000000112 path=manila-21.0.0/releasenotes/notes/bug-1750074-fix-rabbitmq-password-in-debug-mode-4e136ff86223c4ea.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1750074-fix-rabbitmq-password-in-debug-mode-4e136ff86223c4ea.ya0000664000175000017500000000014000000000000031702 0ustar00zuulzuul00000000000000--- fixes: - rabbitmq password is no longer exposed in the logs when debugging is enabled.././@PaxHeader0000000000000000000000000000020700000000000011454 xustar0000000000000000113 path=manila-21.0.0/releasenotes/notes/bug-1765420-netapp-fix-delete-share-for-vsadmins-b5dc9e0224cb3ba2.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1765420-netapp-fix-delete-share-for-vsadmins-b5dc9e0224cb3ba2.y0000664000175000017500000000025500000000000032053 0ustar00zuulzuul00000000000000--- fixes: - The `Launchpad bug 1765420 `_ that affected the NetApp ONTAP driver during share deletion has been fixed. ././@PaxHeader0000000000000000000000000000023200000000000011452 xustar0000000000000000132 path=manila-21.0.0/releasenotes/notes/bug-1767430-access-control-raise-ip-address-conflict-on-host-routes-0c298125fee4a640.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1767430-access-control-raise-ip-address-conflict-on-host-routes0000664000175000017500000000041200000000000032777 0ustar00zuulzuul00000000000000--- fixes: - | The access-allow API has now been fixed to validate duplicate IP addresses by different notation styles. For example, if a host with IP 172.16.21.24 already has access to an NFS share, access cannot be requested for 172.16.21.24/32. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1772026-nve-license-not-present-fix-e5d2e0d6c5df9227.yaml0000664000175000017500000000031700000000000030737 0ustar00zuulzuul00000000000000fixes: - | Since the addition of NVE support, the Netapp driver used to fail to start when a VE license is not present on an ONTAP > 9.1. Now the driver starts but it reports NVE not supported.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1772647-b98025c07553e35d.yaml0000664000175000017500000000015600000000000023260 0ustar00zuulzuul00000000000000--- fixes: - Fix ensure_shares running every time despite not having any configuration option changed. ././@PaxHeader0000000000000000000000000000020600000000000011453 xustar0000000000000000112 path=manila-21.0.0/releasenotes/notes/bug-1773761-qnap-fix-manage-share-size-override-a18acdf1a41909b0.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1773761-qnap-fix-manage-share-size-override-a18acdf1a41909b0.ya0000664000175000017500000000020600000000000031756 0ustar00zuulzuul00000000000000--- fixes: - | Fixed the QNAP driver so that it does not modify the share size on the back end when manila manages a share. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1773929-a5cb52c8417ec5fc.yaml0000664000175000017500000000021700000000000023557 0ustar00zuulzuul00000000000000--- upgrade: - | The Quobyte driver now provides an option to adapt the export path to the Quobyte NFS services PSEUDO path setting. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1774159-0afe3dbc39e3c6b0.yaml0000664000175000017500000000030700000000000023623 0ustar00zuulzuul00000000000000--- fixes: - | The NetApp ONTAP DHSS=True driver has been fixed to allow multiple shares to use the same ipspace and VLAN port across all subnets belonging to the same neutron network. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1774604-qb-driver-b7e717cbc71d6189.yaml0000664000175000017500000000022600000000000025311 0ustar00zuulzuul00000000000000--- fixes: - | Fixed a bug in the Quobyte driver that allowed share resizing to incorrectly address the share to be resized in the backend. ././@PaxHeader0000000000000000000000000000021000000000000011446 xustar0000000000000000114 path=manila-21.0.0/releasenotes/notes/bug-1777126-netapp-skip-route-setup-if-no-gateway-e841635dcd20fd12.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1777126-netapp-skip-route-setup-if-no-gateway-e841635dcd20fd12.0000664000175000017500000000034100000000000031706 0ustar00zuulzuul00000000000000--- fixes: - The NetApp driver has been fixed to not enforce route creation when the share network provided has no gateway. See `Launchpad bug 1777126 `_ for details. ././@PaxHeader0000000000000000000000000000021000000000000011446 xustar0000000000000000114 path=manila-21.0.0/releasenotes/notes/bug-1777551-security-networks-api-all-tenants-fix-a061274afe15180d.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1777551-security-networks-api-all-tenants-fix-a061274afe15180d.0000664000175000017500000000047700000000000031736 0ustar00zuulzuul00000000000000--- fixes: - | The ``all_tenants`` query parameter in the share networks API (GET /v2/{project_id}/share-networks) has been fixed to accept 'f', 'false', 'off', 'n', 'no', or '0'. Setting the flag to any of these values will retrieve security services only from the requester's project namespace. ././@PaxHeader0000000000000000000000000000021000000000000011446 xustar0000000000000000114 path=manila-21.0.0/releasenotes/notes/bug-1777551-security-services-api-all-tenants-fix-e820ec370d7df473.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1777551-security-services-api-all-tenants-fix-e820ec370d7df473.0000664000175000017500000000050500000000000031775 0ustar00zuulzuul00000000000000--- fixes: - | The ``all_tenants`` query parameter in the security services API (GET /v2/{project_id}/security-services) has been fixed to accept 'f', 'false', 'off', 'n', 'no', or '0'. Setting the flag to any of these values will retrieve security services only from the requester's project namespace. ././@PaxHeader0000000000000000000000000000021400000000000011452 xustar0000000000000000118 path=manila-21.0.0/releasenotes/notes/bug-1778975-fix-quota-update-does-not-require-a-value-496ec846d2c43963.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1778975-fix-quota-update-does-not-require-a-value-496ec846d2c430000664000175000017500000000017700000000000032032 0ustar00zuulzuul00000000000000--- fixes: - | Fixed a manila issue while updating quotas. Now manila requires at least a quota value to be updated. ././@PaxHeader0000000000000000000000000000023200000000000011452 xustar0000000000000000132 path=manila-21.0.0/releasenotes/notes/bug-1783736-add-share-proto-filtering-to-the-capabilities-scheduler-d8391183335def9f.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1783736-add-share-proto-filtering-to-the-capabilities-scheduler0000664000175000017500000000024200000000000033006 0ustar00zuulzuul00000000000000--- fixes: - | A bug with storage protocol filtering in the scheduler has been fixed. See `bug `_ for more details. ././@PaxHeader0000000000000000000000000000020500000000000011452 xustar0000000000000000111 path=manila-21.0.0/releasenotes/notes/bug-1785129-fix-sighup-behavior-with-scheduler-8ee803ad0e543cce.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1785129-fix-sighup-behavior-with-scheduler-8ee803ad0e543cce.yam0000664000175000017500000000034300000000000032200 0ustar00zuulzuul00000000000000--- fixes: - The SIGHUP behavior for the manila-scheduler service has been fixed. Previously, only the manila-share service was responding to SIGHUP and reloading its configuration, now manila-scheduler does the same.././@PaxHeader0000000000000000000000000000021400000000000011452 xustar0000000000000000118 path=manila-21.0.0/releasenotes/notes/bug-1785180-zfsonlinux-retry-unmounting-during-manage-872cf46313c5a4ff.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1785180-zfsonlinux-retry-unmounting-during-manage-872cf46313c5a0000664000175000017500000000032100000000000032344 0ustar00zuulzuul00000000000000--- fixes: - | The ZFSOnLinux driver now retries unmounting zfs shares to perform the manage operation. See `Launchpad bug 1785180 `_ for details. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1794402-fix-share-stats-container-driver-b3cb1fa2987ad4b1.yaml0000664000175000017500000000032700000000000032030 0ustar00zuulzuul00000000000000--- fixes: - | Pool stats collection has been fixed in the container driver to reflect the differences in formatting of information for the underlying volume groups across different operating systems. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1795463-fix-pagination-slowness-8fcda3746aa13940.yaml0000664000175000017500000000050100000000000030174 0ustar00zuulzuul00000000000000--- fixes: - | When the OpenStack administrator has a busy environment that contains many shares, the list operation with `--limit` parameter was taking too long to respond. This lag has now been fixed. See the `launchpad bug 1795463 `_ for more details. ././@PaxHeader0000000000000000000000000000021700000000000011455 xustar0000000000000000121 path=manila-21.0.0/releasenotes/notes/bug-1798219-fix-snapshot-creation-lvm-and-generic-driver-55e349e02e7fa370.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1798219-fix-snapshot-creation-lvm-and-generic-driver-55e349e02e0000664000175000017500000000046300000000000032236 0ustar00zuulzuul00000000000000--- fixes: - | The generic and LVM drivers have been fixed to always perform a filesystem check on newly created snapshots and derivative shares before attempting to assign a UUID to them. See `Launchpad bug 1798219 `_ for more details. ././@PaxHeader0000000000000000000000000000020700000000000011454 xustar0000000000000000113 path=manila-21.0.0/releasenotes/notes/bug-1801763-gate-public-share-creation-by-policy-a0ad84e4127a3fc3.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1801763-gate-public-share-creation-by-policy-a0ad84e4127a3fc3.y0000664000175000017500000000216700000000000031766 0ustar00zuulzuul00000000000000--- features: - | New API policies (share:create_public_share and share:set_public_share) have been introduced for the "create" (POST /shares) and "update" (PUT /shares) APIs to validate requests to create publicly visible shares. deprecations: - | The API policies to create publicly visible shares (share:create_public_share) or modify existing shares to become publicly visible (share:set_public_share) have their default value changed to rule:admin_api. This means that these APIs (POST /shares and PUT /shares) will allow the 'is_public' parameter to be set to True in the request body if the requester's role is set to an Administrator role. These policies will allow their previous default behavior in the Stein release (8.0.0) (i.e., any user can create publicly visible shares and even non-privileged users within a project can update their shares to become publicly visible). If the previous default behavior is always desired, deployers *must* explicitly set "share:create_public_share" and "share:set_public_share" to "rule:default" in their policy.json file.././@PaxHeader0000000000000000000000000000021000000000000011446 xustar0000000000000000114 path=manila-21.0.0/releasenotes/notes/bug-1802424-add-user-message-when-shrinking-fails-83d0f60ead6f4a4b.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1802424-add-user-message-when-shrinking-fails-83d0f60ead6f4a4b.0000664000175000017500000000017600000000000032025 0ustar00zuulzuul00000000000000 fixes: - | Added a new user message when share shrinking fails due to operation not being supported by the driver. ././@PaxHeader0000000000000000000000000000021100000000000011447 xustar0000000000000000115 path=manila-21.0.0/releasenotes/notes/bug-1804651-netapp-cdot-add-peferred-dc-to-cifs-ad-99072ce663762e83.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1804651-netapp-cdot-add-peferred-dc-to-cifs-ad-99072ce663762e830000664000175000017500000000053500000000000031312 0ustar00zuulzuul00000000000000--- features: - | For NetApp CIFS share provisioning users can now specify the optional "server" API parameter to provide an active directory domain controller IP address for when creating a security service. Multiple IP addresses can be given separated by comma. This represents the "Preferred DC" at the vserver cifs domain. ././@PaxHeader0000000000000000000000000000022300000000000011452 xustar0000000000000000125 path=manila-21.0.0/releasenotes/notes/bug-1804656-netapp-cdot-add-port-ids-to-share-server-backend-424ca11a1eb44826.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1804656-netapp-cdot-add-port-ids-to-share-server-backend-424ca10000664000175000017500000000046500000000000032245 0ustar00zuulzuul00000000000000--- features: - | The Neutron Port IDs and IP addresses of the network allocation when using the NetApp cDOT driver with DHSS=true are made accessible for administrators at share server backend_details of newly created share servers. Those are corresponding to the NetApp lifs of a vserver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1804659-speed-up-pools-detail-18f539a96042099a.yaml0000664000175000017500000000015500000000000027317 0ustar00zuulzuul00000000000000--- fixes: - | Added caching of host state map to speed up calls for scheduler-stats/pools/detail. ././@PaxHeader0000000000000000000000000000023600000000000011456 xustar0000000000000000136 path=manila-21.0.0/releasenotes/notes/bug-1811680-destroy-quotas-usages-reservations-when-deleting-share-type-a18f2e00a65fe922.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1811680-destroy-quotas-usages-reservations-when-deleting-share-0000664000175000017500000000043700000000000033146 0ustar00zuulzuul00000000000000--- fixes: - | Share type quotas, usages and reservations will now be correctly cleaned up if a share type has been deleted. See `launchpad bug #1811680 `_ for details regarding the bug that prevented this cleanup prior. ././@PaxHeader0000000000000000000000000000022000000000000011447 xustar0000000000000000122 path=manila-21.0.0/releasenotes/notes/bug-1813054-remove-share-usage-size-audit-period-conf-opt-7331013d1cdb7b43.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1813054-remove-share-usage-size-audit-period-conf-opt-7331013d10000664000175000017500000000070100000000000032040 0ustar00zuulzuul00000000000000--- deprecations: - | The configuration option ``share_usage_audit_period`` from the [DEFAULT] section has been deprecated. Specifying this option never had any effect on manila and so it will be removed in an upcoming release. This option should not be confused with ``share_usage_size_update_interval`` from the back end section, which can be used to gather usage size for some back ends that support that feature. ././@PaxHeader0000000000000000000000000000021200000000000011450 xustar0000000000000000116 path=manila-21.0.0/releasenotes/notes/bug-1815038-extend-remove_version_from_href-support-ea479daaaf5c5700.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1815038-extend-remove_version_from_href-support-ea479daaaf5c5700000664000175000017500000000032500000000000032604 0ustar00zuulzuul00000000000000--- fixes: - | `Launchpad bug 1815038 `_ has been fixed and now we correctly parse the base URL from manila's endpoint url, accounting for proxy URLs. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1815532-supply-request-id-in-all-apis-74419bc1b1feea1e.yaml0000664000175000017500000000017700000000000031257 0ustar00zuulzuul00000000000000--- fixes: - APIs that were not returning a request ID ('x-compute-request-id') in the response headers have been fixed. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1816420-validate-access-type-for-ganehas-c42ce6f859fa0c8c.yaml0000664000175000017500000000055100000000000031746 0ustar00zuulzuul00000000000000--- fixes: - | Access rule type for shares served via nfs-ganesha is now validated, fixing `launchpad bug #1816420 `_ where ``cephx`` access type was allowed though only ``ip`` access type is effective. This fix also validates ``access_level`` to ensure that it is set to ``RW`` or ``RO``. ././@PaxHeader0000000000000000000000000000022100000000000011450 xustar0000000000000000123 path=manila-21.0.0/releasenotes/notes/bug-1818081-fix-inferred-script-name-in-case-of-proxy-urls-e33466af856708b4.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1818081-fix-inferred-script-name-in-case-of-proxy-urls-e33466af0000664000175000017500000000047500000000000032255 0ustar00zuulzuul00000000000000--- fixes: - | When manila API is run behind a proxy webserver, the API service was parsing the major API version requested incorrectly, leading to incorrect responses. This behavior has now been fixed. See `launchpad bug 1818081 `_ for more details. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1822099-fix-multisegment-mtu.yaml-ac2e31c084d8bbb6.yaml0000664000175000017500000000023000000000000030575 0ustar00zuulzuul00000000000000--- fixes: - | Update share networks with MTU before creating network allocations so that the first allocation in a share network is correct. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1831092-netapp-fix-race-condition-524555133aaa6ca8.yaml0000664000175000017500000000020100000000000030246 0ustar00zuulzuul00000000000000--- fixes: - | Fixed an issue with the NetApp driver failing during a rollback operation in the share server creation. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1845135-fix-Unity-cannot-use-mgmt-ipv6-9407710a3fc7f4aa.yaml0000664000175000017500000000017600000000000031175 0ustar00zuulzuul00000000000000--- fixes: - | Fixed an issue with the Dell EMC Unity driver to work with a management IP configured in IPv6 format.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1845147-powermax-read-only-policy-585c29c5ff020007.yaml0000664000175000017500000000032000000000000030265 0ustar00zuulzuul00000000000000--- fixes: - | Manila PowerMax fix ensuring that hosts that are given access to a share i.e read only, will always precede '-0.0.0.0/0.0.0.0'. Any host after this string will be denied access. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1845147-vnx-read-only-policy-75b0f414ea5ef471.yaml0000664000175000017500000000031300000000000027402 0ustar00zuulzuul00000000000000--- fixes: - | Manila VNX fix ensuring that hosts that are given access to a share i.e read only, will always precede '-0.0.0.0/0.0.0.0'. Any host after this string will be denied access. ././@PaxHeader0000000000000000000000000000020700000000000011454 xustar0000000000000000113 path=manila-21.0.0/releasenotes/notes/bug-1845452-unity--fix-fail-to-delete-cifs-share-c502a10ae306e506.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1845452-unity--fix-fail-to-delete-cifs-share-c502a10ae306e506.y0000664000175000017500000000015000000000000031426 0ustar00zuulzuul00000000000000--- fixes: - Fixed an issue with Unity driver fails to delete CIFS share if wrong access was set. ././@PaxHeader0000000000000000000000000000021600000000000011454 xustar0000000000000000120 path=manila-21.0.0/releasenotes/notes/bug-1846836-fix-share-network-update-unexpected-success-eba8f40db392c467.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1846836-fix-share-network-update-unexpected-success-eba8f40db390000664000175000017500000000033400000000000032514 0ustar00zuulzuul00000000000000--- fixes: - | Fixed unexpected behavior when updating a share network's `neutron_net_id` or `neutron_subnet_id`. Now, Manila does not allow updating a share network that does not contain a default subnet. ././@PaxHeader0000000000000000000000000000024300000000000011454 xustar0000000000000000141 path=manila-21.0.0/releasenotes/notes/bug-1848608-1893718-fix-manage-api-for-shares-with-multiple-export-locations-32ade25e9d82535b.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1848608-1893718-fix-manage-api-for-shares-with-multiple-export-0000664000175000017500000000105000000000000032123 0ustar00zuulzuul00000000000000--- fixes: - | The `API to import shares into manila `_ could sometimes allow a share to be "managed" into manila multiple times via different export paths. This API could also incorrectly disallow a manage operation citing a new share in question was already managed. Both issues have now been fixed. See `bug #1848608 `_ and `bug #1893718 `_ for more details. ././@PaxHeader0000000000000000000000000000022000000000000011447 xustar0000000000000000122 path=manila-21.0.0/releasenotes/notes/bug-1848889-netapp-fix-share-replica-update-check-failure-90aa964417e7734c.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1848889-netapp-fix-share-replica-update-check-failure-90aa964410000664000175000017500000000062400000000000032162 0ustar00zuulzuul00000000000000--- fixes: - | Fixed an issue in NetApp driver share replica periodic check that erroneously set a replica state to 'error'. In this routine, a SnapMirror resync operation was being triggered while the replica data transfering is still in progress, receiving an error from the storage side. The driver now skips resync operation for all in progress SnapMirror relationship status. ././@PaxHeader0000000000000000000000000000021200000000000011450 xustar0000000000000000116 path=manila-21.0.0/releasenotes/notes/bug-1850264-add-async-error-when-share-extend-error-a0c458204b395994.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1850264-add-async-error-when-share-extend-error-a0c458204b395990000664000175000017500000000015600000000000031610 0ustar00zuulzuul00000000000000--- fixes: - | A new user message has been added in case of share extensions failing asynchronously.././@PaxHeader0000000000000000000000000000021300000000000011451 xustar0000000000000000117 path=manila-21.0.0/releasenotes/notes/bug-1853940-not-send-heartbeat-if-driver-not-initial-9c3cee39e8c725d1.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1853940-not-send-heartbeat-if-driver-not-initial-9c3cee39e8c7250000664000175000017500000000043200000000000032110 0ustar00zuulzuul00000000000000--- fixes: - | `Launchpad bug 1853940 `_ has been fixed. When drivers are still initializing or when they fail to initialize, the share service will be reported as being "down" until the driver has been initialized. ././@PaxHeader0000000000000000000000000000021100000000000011447 xustar0000000000000000115 path=manila-21.0.0/releasenotes/notes/bug-1855391-extend-share-will-go-through-scheduler-3a29093756dc88c1.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1855391-extend-share-will-go-through-scheduler-3a29093756dc88c10000664000175000017500000000046100000000000031711 0ustar00zuulzuul00000000000000--- fixes: - | `Launchpad bug 1855391 `_ has been fixed. The action of extend share will go through scheduler, if there is no available share backend host, the share will rollback to available state and create an user message about extend.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1858328-netapp-fix-shrinking-error-48bcfffe694f5e81.yaml0000664000175000017500000000035000000000000030771 0ustar00zuulzuul00000000000000--- fixes: - | Fixed an issue in NetApp driver when shrinking shares to a size smaller than the current used space. Now it will return a more appropriate error status called ``shrinking_possible_data_loss_error``. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1859775-snapshot-over-quota-exception-bb6691612af03ddf.yaml0000664000175000017500000000024700000000000031433 0ustar00zuulzuul00000000000000--- fixes: - | Fixed Quota exceeded exception for snapshot creation. Consumed gigabytes now reports the snapshot gigabytes instead of share gigabytes usage. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1859785-share-list-speed-6b09e7717624e037.yaml0000664000175000017500000000041000000000000026356 0ustar00zuulzuul00000000000000--- fixes: - | Improved share list speed using lazy='subquery'. The sqlalchemy models of Share and Share Instance relationships previously had lazy='immediate'. This resulted in at least three extra queries when we queried for all share details. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1860061-fix-pagination-query-5c893bb8edaf1350.yaml0000664000175000017500000000036000000000000027536 0ustar00zuulzuul00000000000000--- fixes: - | Fixed the issue that caused pagination queries to return erroneous results when the argument `limit` was specified. Also improved the queries performance by moving some filtering operations to the database. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1861485-fix-share-network-retrieval-31768dcda5aeeaaa.yaml0000664000175000017500000000042000000000000031245 0ustar00zuulzuul00000000000000--- security: - | CVE-2020-9543: An issue with share network retrieval has been addressed in the API by scoping unprivileged access to project only. Please see `launchpad bug #1861485 `_ for more details. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1862833-fix-backref-by-eager-loading-2d897976e7598625.yaml0000664000175000017500000000024100000000000030434 0ustar00zuulzuul00000000000000--- fixes: - | Some resources will be eagerly loaded from the database to avoid cyclical references and faulty results if their retrieval is deferred. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1863298-fix-manage-overquota-issue-37031a593b66f8ba.yaml0000664000175000017500000000066700000000000030532 0ustar00zuulzuul00000000000000--- fixes: - | Fixed an issue while bringing shares under Manila management. Now, when a share is being managed and there is no available quota to complete this operation, the service will allow the quotas to be exceeded and the operation will be completed. The administrator will need to adjust the quotas after. Please see `Launchpad bug `_ for more details. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1867030-delete-share-55663c74a93e77fd.yaml0000664000175000017500000000020300000000000025677 0ustar00zuulzuul00000000000000--- fixes: - | Launchpad `bug 1867030 `_ has been fixed for delete share. ././@PaxHeader0000000000000000000000000000022700000000000011456 xustar0000000000000000129 path=manila-21.0.0/releasenotes/notes/bug-1869148-if-only-pyc-exist-the-extension-API-cannot-be-loaded-172cb9153ebd4b56.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1869148-if-only-pyc-exist-the-extension-API-cannot-be-loaded-170000664000175000017500000000034400000000000032256 0ustar00zuulzuul00000000000000--- fixes: - | `Launchpad bug 1869148 `_ has been fixed. This bug could have affected environments where extension APIs were provided in compiled files rather than source code. ././@PaxHeader0000000000000000000000000000024100000000000011452 xustar0000000000000000139 path=manila-21.0.0/releasenotes/notes/bug-1869712-fix-increased-scheduled-time-for-non-thin-provisioned-backends-1da2cc33d365ba4f.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1869712-fix-increased-scheduled-time-for-non-thin-provisioned-b0000664000175000017500000000047700000000000032751 0ustar00zuulzuul00000000000000--- fixes: - | Reduces an increase of schedule time for non thin provisioned backends. On those backends, there is no need to calculate provisioned_capacity_gb, as it is not used during the scheduling. This calculation was not scaling properly on big environments as it implies many database queries. ././@PaxHeader0000000000000000000000000000024000000000000011451 xustar0000000000000000138 path=manila-21.0.0/releasenotes/notes/bug-1870751-cleanup-share-type-and-group-type-project-access-when-deleted-4fcd49ba6e6c40bd.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1870751-cleanup-share-type-and-group-type-project-access-when-d0000664000175000017500000000043600000000000032674 0ustar00zuulzuul00000000000000--- fixes: - | Fixed the cleanup for private share types and share group types to include clearing out the database entries recording project specific access rules to these types. See `Launchpad bug 1870751 `_ for more details. ././@PaxHeader0000000000000000000000000000022200000000000011451 xustar0000000000000000124 path=manila-21.0.0/releasenotes/notes/bug-1871252-cephfs-doesnt-support-subvolume-group-snapshots-344efbb9ba74e05c.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1871252-cephfs-doesnt-support-subvolume-group-snapshots-344efbb0000664000175000017500000000045700000000000033026 0ustar00zuulzuul00000000000000--- deprecations: - | Create share group snapshot feature is no longer supported in manila CephFS drivers (both Native and NFS Ganesha) since the subvolume group snapshot feature is no longer supported in mainline CephFS (existing group snapshots can still be listed and deleted). ././@PaxHeader0000000000000000000000000000021700000000000011455 xustar0000000000000000121 path=manila-21.0.0/releasenotes/notes/bug-1871999-dell-emc-vnx-powermax-wrong-export-locations-e9763631c621656f.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1871999-dell-emc-vnx-powermax-wrong-export-locations-e9763631c60000664000175000017500000000035000000000000032277 0ustar00zuulzuul00000000000000--- fixes: - | Dell EMC VNX and PowerMax Drivers: Fixes `bug 1871999 `__ to make `create_share` and `create_share_from_snapshot` return correct list of export locations. ././@PaxHeader0000000000000000000000000000021400000000000011452 xustar0000000000000000118 path=manila-21.0.0/releasenotes/notes/bug-1872243-netapp-fix-vserver-peer-with-same-vserver-8bc65816f1764784.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1872243-netapp-fix-vserver-peer-with-same-vserver-8bc65816f17640000664000175000017500000000062600000000000032071 0ustar00zuulzuul00000000000000--- fixes: - | NetApp cDOT driver is now fixed to not create peer relationship between same share servers when handling share replica creation and promotion. This issue was happening when operating in `driver_handles_share_servers` enabled mode with backends configured with more than one pool. See `Launchpad bug 1872243 `_ for more details. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1872872-fix-quota-checking-b06fd372be143101.yaml0000664000175000017500000000056200000000000027003 0ustar00zuulzuul00000000000000--- fixes: - | Fixed quota issue that made it impossible to create resources when the project had the quotas set to unlimited, and the user had a limited amount of quotas to use. Now, operations in the mentioned quota scenario are working properly. Please see `Launchpad bug 1872872 `_ for more details. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1872873-fix-consume-from-share-eea5941de17a5bcc.yaml0000664000175000017500000000041000000000000030122 0ustar00zuulzuul00000000000000--- fixes: - Updated the scheduler pool attributes ``provisioned_capacity_gb`` and ``allocated_capacity_gb`` to accommodate shares being created. This helps maintain an approximate tally of these attributes in between back end scheduler updates. ././@PaxHeader0000000000000000000000000000021000000000000011446 xustar0000000000000000114 path=manila-21.0.0/releasenotes/notes/bug-1873963-netapp-fix-vserver-peer-intra-cluster-966398cf3a621edd.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1873963-netapp-fix-vserver-peer-intra-cluster-966398cf3a621edd.0000664000175000017500000000070300000000000032036 0ustar00zuulzuul00000000000000--- fixes: - | NetApp cDOT driver is now fixed to not trigger peer accept operation between share servers that belong to the same cluster, when handling share replica creation and promotion. This issue was happening when operating in `driver_handles_share_servers` enabled mode with multiple backends configured within the same cluster. See `Launchpad bug 1873963 `_ for more details. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1878993-netapp-fix-https-3eddf9eb5b762f3a.yaml0000664000175000017500000000026700000000000027073 0ustar00zuulzuul00000000000000--- fixes: - | Fixed `bug #1878993 `_ that caused a failure on HTTPS connections within NetApp backend using python 3.7. ././@PaxHeader0000000000000000000000000000020500000000000011452 xustar0000000000000000111 path=manila-21.0.0/releasenotes/notes/bug-1879368-netapp-fix-cifs-promote-back-issue-d8fe28466f9dde49.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1879368-netapp-fix-cifs-promote-back-issue-d8fe28466f9dde49.yam0000664000175000017500000000034200000000000032063 0ustar00zuulzuul00000000000000--- fixes: - | Fixed an issue while promoting back share replicas created using CIFS protocol. Please refer to the `Launchpad bug #1879368 `_ for more details. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1879754-teardown-network-d1887cdf6eb83388.yaml0000664000175000017500000000026700000000000026764 0ustar00zuulzuul00000000000000--- fixes: - Fixed unneeded all ports list request to Neutron in service instance helper module on tearing down service subnet, Neutron can filter them by subnet_id itself. ././@PaxHeader0000000000000000000000000000021300000000000011451 xustar0000000000000000117 path=manila-21.0.0/releasenotes/notes/bug-1880747-netapp-fix-do-not-delete-default-ipspace-aee638279e0f8e93.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1880747-netapp-fix-do-not-delete-default-ipspace-aee638279e0f8e0000664000175000017500000000060100000000000032072 0ustar00zuulzuul00000000000000--- fixes: - | NetApp ONTAP driver is now fixed to avoid the deletion of `Cluster` and `Default` ipspaces when deleting a share server. This issue was happening only when operating in `driver_handles_share_servers` enabled mode and creating shares using `flat` network type. See `Launchpad bug 1880747 `_ for more details. ././@PaxHeader0000000000000000000000000000021200000000000011450 xustar0000000000000000116 path=manila-21.0.0/releasenotes/notes/bug-1881098-1895323-manila-manage-update-host-fixes-bbbc4fe2da48cae9.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1881098-1895323-manila-manage-update-host-fixes-bbbc4fe2da48cae0000664000175000017500000000024100000000000031625 0ustar00zuulzuul00000000000000--- fixes: - | The ``manila-manage share update_host`` command now updates the host attribute of share servers and share groups in addition to shares. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1881112-add-manila-manage-service-cleanup-37019840f01bfa2f.yaml0000664000175000017500000000061000000000000031603 0ustar00zuulzuul00000000000000--- features: - | Added ``cleanup`` sub command to the ``manila-manage service`` command for administrators to be able to soft-delete services marked as down. fixes: - | Added ``manila-manage service cleanup`` command to soft-delete entries from the services table for services, that are down. E.g. this fixed the removal of services for hosts, that had been renamed. ././@PaxHeader0000000000000000000000000000021700000000000011455 xustar0000000000000000121 path=manila-21.0.0/releasenotes/notes/bug-1881865-add-generic-fuzzy-matching-logic-in-database-d83917727d12677d.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1881865-add-generic-fuzzy-matching-logic-in-database-d83917727d0000664000175000017500000000042300000000000032057 0ustar00zuulzuul00000000000000--- fixes: - | Fixed `bug #1881865 `_ Added generic fuzzy matching logic to the database layer, This logic is applied to query share snapshot list, This will greatly improve the speed of paging fuzzy queries. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1882590-fix-svm-scoped-netapp-85b53830135f7558.yaml0000664000175000017500000000027100000000000027254 0ustar00zuulzuul00000000000000--- fixes: - | Fixed `bug #1882590 `_ that caused an error on starting a NetApp backend when using the SVM scoped account. ././@PaxHeader0000000000000000000000000000023100000000000011451 xustar0000000000000000131 path=manila-21.0.0/releasenotes/notes/bug-1883506-fix-delete-manage-error-share-will-lead-to-quota-error-085fd3b7d15ae109.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1883506-fix-delete-manage-error-share-will-lead-to-quota-error-0000664000175000017500000000026600000000000032563 0ustar00zuulzuul00000000000000--- fixes: - | Fixed `bug #1883506 `_ that caused a quota error when delete or unmanage a share that failed to manage. ././@PaxHeader0000000000000000000000000000022200000000000011451 xustar0000000000000000124 path=manila-21.0.0/releasenotes/notes/bug-1885956-enforce-policy-check-getting-share-type-by-name-5eca17b02bea5261.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1885956-enforce-policy-check-getting-share-type-by-name-5eca17b0000664000175000017500000000057600000000000032436 0ustar00zuulzuul00000000000000--- fixes: - | Fixed `launchpad bug #1885956 `_ by ensuring that policy checks are enforced when looking up a share-type by name. This prevents a problem where shares could be stuck in CREATING status when a user attempts to create a share using the name of a private share-type to which the user lacks access. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1886010-Glusterfs-fix-del-share-89dabc8751ed4fec.yaml0000664000175000017500000000034200000000000030236 0ustar00zuulzuul00000000000000--- fixes: - | Fixed `bug #1886010 `_ This bug caused glusterfs shares to still be readable/writable to connected clients while the share was deleted from manila. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1886232-netapp-fix-python-package-name-0f3ec3f2deec8887.yaml0000664000175000017500000000053100000000000031462 0ustar00zuulzuul00000000000000--- fixes: - | Fixed `bug #1886232 `_ that causes an INFO message saying the python-manila package was not found. Now, the package name was updated to python3-manila. This fix solves only in the case that the user installed the manila using the default packages found in OS. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1886690-edit-services-down-message-c857de1a678b6781.yaml0000664000175000017500000000031600000000000030512 0ustar00zuulzuul00000000000000--- fixes: - | Fixed `bug #1886690 `_ that was a misleading user message when share services are down. The message is now clear and descriptive.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1887643-netapp-add-cifs-dc-add-skip-check-c8ea9b952cedb643.yaml0000664000175000017500000000043200000000000031653 0ustar00zuulzuul00000000000000--- fixes: - | The NetApp cDOT driver now validates the configuration of preferred domain controller(s) added in CIFS security service server setup. The mandatory option ``skip-config-validation`` was introduced to ``cifs-domain-preferred-dc-add`` with ONTAP 9.5. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1888905-fix-group-snapshot-create-delete-0595f9d7a4c0c343.yaml0000664000175000017500000000033000000000000031613 0ustar00zuulzuul00000000000000--- fixes: - | An error with share group snapshot creation and deletion due to missing attributes has been fixed. See `Launchpad bug 1888905 `_ for more information. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1888915-harden-lvm-deletions-2a735ab0ee4a4903.yaml0000664000175000017500000000033500000000000027426 0ustar00zuulzuul00000000000000--- fixes: - | The LVM driver no longer fails to delete shares, snapshots and access rules that are missing from storage. See `Launchpad bug #1888915 `_ for more details. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1889549-fix-migration-get-progress-race-15aea537efec6daf.yaml0000664000175000017500000000044000000000000032026 0ustar00zuulzuul00000000000000--- fixes: - | In the share migration_get_progress API a race condition was fixed. If the share manager reports ``InvalidShare`` the share's task state is evaluated again to return progress 0 or 100 based on known task states instead of raising ``ShareMigrationError``. ././@PaxHeader0000000000000000000000000000020700000000000011454 xustar0000000000000000113 path=manila-21.0.0/releasenotes/notes/bug-1890833-fix-cephfs-incorrect-capacity-report-3a9bdaffcc62ec71.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1890833-fix-cephfs-incorrect-capacity-report-3a9bdaffcc62ec71.y0000664000175000017500000000220000000000000032334 0ustar00zuulzuul00000000000000--- upgrade: - | This version includes a fix to the CephFS drivers to address `an issue `_ with total and free space calculation in the CephFS driver. When you update, you will notice that the space calculations reflect reality in your Ceph clusters, and provisioning may fail if the share sizes exceed the cluster's free space. CephFS shares are always thin provisioned, and the driver does not support oversubscription via Manila; so space can be claimed for new shares as long as there is free space on the cluster. Use the "reserved_share_percentage" back end configuration option to ensure there's always space left aside for provisioned workloads to grow over time. fixes: - | The CephFS driver has now been fixed to report total and available space on the storage system correctly. See `Launchpad bug#1890833 `_ for more details. - | The CephFS driver now honors the configuration option "reserved_share_percentage", and it can be used to prevent save space for provisioned workloads to grow over time. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1894362-fix-Glusterfs-del-share-3c8467e1d9f0c6e4.yaml0000664000175000017500000000037400000000000030033 0ustar00zuulzuul00000000000000--- fixes: - | Fixed `bug #1894362 `_ Fixed the problem of Couldn't find the'gluster_used_vols' error when deploying glusterfs driver multi-backend service and deleting share instance. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1896322-container-fix-search-veth-regex-7f2156a6fd411bdb.yaml0000664000175000017500000000051400000000000031546 0ustar00zuulzuul00000000000000--- fixes: - | Fixed an issue on Container driver when managing share servers. The regex used to search for container-name in the available network interfaces was updated to support newer versions of Open vSwitch. Refer to the `Bug #1896322 `_ for more details. ././@PaxHeader0000000000000000000000000000022600000000000011455 xustar0000000000000000128 path=manila-21.0.0/releasenotes/notes/bug-1898924-fix-share-replica-update-missing-share-server-model-c1c060a7c06e4512.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1898924-fix-share-replica-update-missing-share-server-model-c1c0000664000175000017500000000047600000000000032652 0ustar00zuulzuul00000000000000--- fixes: - | Fixed an issue that can lead a share replica to fail during the status update operation, due to a concurrency between `share replica create` and `shara replica update` operations. Refer to `Launchpad Bug #1898924 `_ for more details.././@PaxHeader0000000000000000000000000000023000000000000011450 xustar0000000000000000130 path=manila-21.0.0/releasenotes/notes/bug-1900752-early-validate-mandatory-security-service-association-f48aecbbc47418cd.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1900752-early-validate-mandatory-security-service-association-f0000664000175000017500000000060400000000000033161 0ustar00zuulzuul00000000000000--- fixes: - | For some drivers, to create a share with specific protocol it is mandatory to add a security service to the share network beforehand. If this is forgotten the share ends up in error. From now on, Manila won't allow shares to be created when the specified protocol requires a specific security service type that is not associated to the share network. ././@PaxHeader0000000000000000000000000000023100000000000011451 xustar0000000000000000131 path=manila-21.0.0/releasenotes/notes/bug-1900755-netapp-add-exception-cifs-creation-error-user-privileges-or-credentials.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1900755-netapp-add-exception-cifs-creation-error-user-privilege0000664000175000017500000000044000000000000033047 0ustar00zuulzuul00000000000000--- fixes: - | `Bug #1900755 `_: When failing to create shares using security services due to insufficient privileges or wrong credentials, the NetApp ONTAP driver will now report more accurate information on the failure. ././@PaxHeader0000000000000000000000000000020700000000000011454 xustar0000000000000000113 path=manila-21.0.0/releasenotes/notes/bug-1901210-return-404-if-share-access-forbidden-02ca9a9552ad3e15.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1901210-return-404-if-share-access-forbidden-02ca9a9552ad3e15.y0000664000175000017500000000032000000000000031370 0ustar00zuulzuul00000000000000--- fixes: - | The GET /shares/{share_id} API now responds with HTTP 404 (Not Found) for inaccessible resources. See `bug 1901210 `_ for further information. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1901937-netapp-nfs-for-windows-465e704524277ea2.yaml0000664000175000017500000000022400000000000027515 0ustar00zuulzuul00000000000000--- fixes: - | The NetApp cDOT driver now sets the required NFS options for clients running Windows operating systems with NFSv3 support. ././@PaxHeader0000000000000000000000000000022100000000000011450 xustar0000000000000000123 path=manila-21.0.0/releasenotes/notes/bug-1903773-fix-lvmdriver-share-unmounting-after-migration-75640e3c9dc62dba.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1903773-fix-lvmdriver-share-unmounting-after-migration-75640e3c0000664000175000017500000000035200000000000032473 0ustar00zuulzuul00000000000000--- fixes: - | Share cleanup for the LVM driver has been enhanced to retry on known errors that could occur due to mount propagation. See `bug 1903773 `_ for more details. ././@PaxHeader0000000000000000000000000000022200000000000011451 xustar0000000000000000124 path=manila-21.0.0/releasenotes/notes/bug-1903773-fix-zfsonlinux-share-unmounting-after-migration-329b1eb2f33f78a3.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1903773-fix-zfsonlinux-share-unmounting-after-migration-329b1eb0000664000175000017500000000036100000000000032667 0ustar00zuulzuul00000000000000--- fixes: - | Share cleanup for the ZFSOnLinux driver has been enhanced to retry on known errors that could occur due to mount propagation. See `bug 1903773 `_ for more details. ././@PaxHeader0000000000000000000000000000021100000000000011447 xustar0000000000000000115 path=manila-21.0.0/releasenotes/notes/bug-1904015-cve-2020-27781-cephx-asynchronous-msgs-6a683076a1fb5a54.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1904015-cve-2020-27781-cephx-asynchronous-msgs-6a683076a1fb5a540000664000175000017500000000065100000000000031004 0ustar00zuulzuul00000000000000--- fixes: - | New user messages now alert users of possible remediations during access rule creation errors with CephFS shares. This includes hints to users to not use cephx client users that are prohibited by CephFS or the share driver. See `CVE-2020-27781 `_ and bug #1904015 `_ for more details. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1908352-add-explicit-error-message-c33c7b75a7e49257.yaml0000664000175000017500000000032500000000000030455 0ustar00zuulzuul00000000000000--- fixes: - | When a quota value greater than 2147483647 is set, the error message "ERROR: Invalid input received:Quota limit should not exceed 2147483647. (HTTP 400)" is communicated to the user. ././@PaxHeader0000000000000000000000000000022500000000000011454 xustar0000000000000000127 path=manila-21.0.0/releasenotes/notes/bug-1908963-scheduler-ignore-earlier-time-service-capabilities-0b97bb70ba4fbb7f.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1908963-scheduler-ignore-earlier-time-service-capabilities-0b970000664000175000017500000000026200000000000032616 0ustar00zuulzuul00000000000000--- fixes: - | Share scheduler will ignore earlier time service capabilities. See `bug 1908963 `_ for more details. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1909847-put-ensure-share-into_thread-pool-e658f21c7caad668.yaml0000664000175000017500000000125000000000000032160 0ustar00zuulzuul00000000000000--- upgrade: - | This fix introduces a new configuration item, which named "share_service_inithost_offload", default value is False, if set it True, will put ensure_share operation into thread pool to speed up startup of manila share service. fixes: - | The manila share servie now can put ensure_share operation into thread pool during service startup process. See `Launchpad bug#1890833 `_ for more details. - | The manila share service now honors the configuration option "share_service_inithost_offload", and it can be used to reduce the time required for the manila share aervice to start up. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1909951-fix-extend-have-wrong-size-7938eaa6591bd2ad.yaml0000664000175000017500000000035100000000000030573 0ustar00zuulzuul00000000000000--- fixes: - | Fixed a bug that if extend a volume after shrink it under generic driver, it may have a wrong real size. Please see `Launchpad bug `_ for more details. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1910752-fix-migration-replication-quotas-eaa013b743d721cd.yaml0000664000175000017500000000036500000000000032043 0ustar00zuulzuul00000000000000--- fixes: - | Fixed the issue of not accounting replica quotas while triggering the migration for a given share. Please refer to the `Launchpad bug #1910752 `_ for more details. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1911695-resize-share-world-accessable-b444d88b67b05af0.yaml0000664000175000017500000000012300000000000031214 0ustar00zuulzuul00000000000000--- fixes: - | Resizing 0.0.0.0/24 accessible NFS shares with generic driver ././@PaxHeader0000000000000000000000000000020600000000000011453 xustar0000000000000000112 path=manila-21.0.0/releasenotes/notes/bug-1915237-netapp-fix-encrypt-check-on-migrate-1e39bd7f19651972.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1915237-netapp-fix-encrypt-check-on-migrate-1e39bd7f19651972.ya0000664000175000017500000000026100000000000031577 0ustar00zuulzuul00000000000000--- fixes: - | NetApp OnTap driver `Bug #1915237 `_: Fixed encryption compatibility check on manila share migrate. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1916102-fix-security-service-policy-check-8e72254fa9fedc9e.yaml0000664000175000017500000000030000000000000032207 0ustar00zuulzuul00000000000000--- fixes: - | Decoupled the RBAC ``share:get_all_security_services`` from ``context_is_admin``, potentially allowing the use of the ``all_tenants`` query by non-administrators. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1916534-netapp-fix-ldap-security-service-c8ee6d36598722cf.yaml0000664000175000017500000000174200000000000031727 0ustar00zuulzuul00000000000000--- fixes: - | NetApp ONTAP driver is now fixed to properly configure SVM LDAP client when configuration is provided through `ldap` security service. Now, the driver chooses the correct LDAP schema based on the given security service parameters. The `RFC-2307` schema will be set for Linux/Unix LDAP servers and `RFC-2307bis` for Active Directory servers. When using a Linux/Unix LDAP server, the security service should be configured setting the `server` parameter with servers IPs or host names. For Active Directory LDAP server, the domain information must be configured using the the `domain` parameter. Users should provide at least one DNS server when configuring servers by its host or domain names. The base search `distinguished name` used for LDAP queries can now be configured using security service `ou` parameter. Please refer to `Launchpad Bug #1916534 `_ for more details. ././@PaxHeader0000000000000000000000000000020700000000000011454 xustar0000000000000000113 path=manila-21.0.0/releasenotes/notes/bug-1917417-fix-rbac-check-on-share-access-rules-efdddaf9e6f68fdf.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1917417-fix-rbac-check-on-share-access-rules-efdddaf9e6f68fdf.y0000664000175000017500000000042500000000000032252 0ustar00zuulzuul00000000000000--- security: - | An RBAC policy check has been enforced against the GET /share-access-rules API to ensure that users are permitted to access the share that the access rule belongs to. See `bug 1917417 `_ for more details. ././@PaxHeader0000000000000000000000000000022600000000000011455 xustar0000000000000000128 path=manila-21.0.0/releasenotes/notes/bug-1917520-avoid-sending-traceback-to-user-if-action-forbidden-0da51825756fd5fc.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1917520-avoid-sending-traceback-to-user-if-action-forbidden-0da0000664000175000017500000000041000000000000032521 0ustar00zuulzuul00000000000000--- fixes: - | The scheduler stats resource APIs (/scheduler-stats/pools and /scheduler-stats/pools/detail) have been fixed to not return an arbitrary traceback in the error message body to the caller when access to the resource has been denied. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1917980-zadara-share-assisted-migration-2d8f8fdb51718faa.yaml0000664000175000017500000000030100000000000031721 0ustar00zuulzuul00000000000000--- fixes: - | Fixed an issue in Zadara driver to support host assisted migration. The existing access rules required to be updated with share migration are deleted and re-added. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1918323-add-validation-to-share-network-94571f35cb39c815.yaml0000664000175000017500000000026300000000000031345 0ustar00zuulzuul00000000000000 fixes: - Adds a check when associating a security service to a share network, so that both resources must have the same project_id. If not, HTTP Bad Request is raised. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1920937-fixed-cifs-share-migration-752fde9631fb077a.yaml0000664000175000017500000000056500000000000030534 0ustar00zuulzuul00000000000000--- fixes: - | Fixed non-disruptive share migration of CIFS shares in the NetApp ONTAP driver using ZAPI API. During the CIFS share migration the creation of a new export path is skipped and the actual export path is taken from the backend. For more details, please refer to `launchpad bug #1920937 `_. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1920942-fix-migration-server-selection-3ad50e6c73ae03df.yaml0000664000175000017500000000043300000000000031577 0ustar00zuulzuul00000000000000--- fixes: - | Non-disruptive share migration will no longer choose a different destination server even if limits of shares or gigabytes were exceeded in the source. For more details, please see `bug #1920942 `_. ././@PaxHeader0000000000000000000000000000022000000000000011447 xustar0000000000000000122 path=manila-21.0.0/releasenotes/notes/bug-1921927-handle-service-client-unauthorized-exceptions-b2ebc08a072f7e12.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1921927-handle-service-client-unauthorized-exceptions-b2ebc08a00000664000175000017500000000036100000000000032640 0ustar00zuulzuul00000000000000--- fixes: - | Authentication errors when loading service clients of OpenStack Compute (nova), OpenStack Image (glance), OpenStack Volume (cinder) and OpenStack Networking (neutron) services are now handled in a better manner. ././@PaxHeader0000000000000000000000000000022200000000000011451 xustar0000000000000000124 path=manila-21.0.0/releasenotes/notes/bug-1922075-fix-Glusterfs-create-share-from-snapshot-failed-053a583522a6fc0e.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1922075-fix-Glusterfs-create-share-from-snapshot-failed-053a5830000664000175000017500000000035700000000000032262 0ustar00zuulzuul00000000000000--- fixes: - | Fixed `bug #1922075 `_ Fixed the problem that "gluster volume set nfs.rpc-auth-reject '*'" failed when the glusterfs driver created an instance from a snapshot. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1922243-fix-project-only-replica-listing-f5f2b95ef14c3ded.yaml0000664000175000017500000000057400000000000032130 0ustar00zuulzuul00000000000000--- fixes: - | Fixed an issue that caused Manila to return all projects' share replicas even when the user was not an administrator. Now, when the user is not an administrator, only the replicas in the project perspective are going to be displayed. For more details, please refer to `Launchpad Bug #1922243 `_ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1923008-fix-logic-of-share-network-query-f314ec3010c06045.yaml0000664000175000017500000000031200000000000031424 0ustar00zuulzuul00000000000000--- fixes: - | Fix the query logic for share network list, put "created_since", "created_before" search opts into database to increase query speed, integrate the database query interface. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1923181-direct-mgr-commands-monmgr-5e8babb4a1067e92.yaml0000664000175000017500000000100000000000000030575 0ustar00zuulzuul00000000000000--- fixes: - | mgr-commands are now directed to the mgr-daemon instead of the mon-daemon in the CephFS drivers upgrade: - | MON write caps are not longer needed to interact with the backend on the Ceph drivers. The capabilities of the driver user (configured with ``cephfs_auth_id``) can hence be reduced. See the `administrator docs `_ for the capabilities required. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1923566-fix-notfound-replica-parent-share-d8e50659c02b941a.yaml0000664000175000017500000000026500000000000031757 0ustar00zuulzuul00000000000000--- fixes: - Fixed NotFound error in share replica periodic tasks. It could happen that the parent share of the replica that was being worked on had already been deleted. ././@PaxHeader0000000000000000000000000000021000000000000011446 xustar0000000000000000114 path=manila-21.0.0/releasenotes/notes/bug-1924230-skip-periodic-task-for-active-replica-030a982af92f8a62.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1924230-skip-periodic-task-for-active-replica-030a982af92f8a62.0000664000175000017500000000037400000000000031612 0ustar00zuulzuul00000000000000--- fixes: - Fixed periodic_share_replica_update() to skip active replicas similarly to periodic_share_replica_snapshot_update(). The intention is to check on non-active replicas, that can be 'in_sync', 'out_of_sync' or in 'error' state. ././@PaxHeader0000000000000000000000000000020500000000000011452 xustar0000000000000000111 path=manila-21.0.0/releasenotes/notes/bug-1924806-sqlalchemy-view-only-relationships-807d406cf8fac06c.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1924806-sqlalchemy-view-only-relationships-807d406cf8fac06c.yam0000664000175000017500000000024500000000000032267 0ustar00zuulzuul00000000000000--- fixes: - | View-only relationships in database objects have been appropriately tagged to avoid sqlalchemy deprecation messages flooding the log files. ././@PaxHeader0000000000000000000000000000020500000000000011452 xustar0000000000000000111 path=manila-21.0.0/releasenotes/notes/bug-1925342-fix-snapshot-support-api-error-msg-eaf5fd2b1df97d15.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1925342-fix-snapshot-support-api-error-msg-eaf5fd2b1df97d15.yam0000664000175000017500000000073400000000000032275 0ustar00zuulzuul00000000000000--- fixes: - | Corrected an error message for attempts to create snapshots from shares that do not support this operation. The message said that the share backend has no such support but that is not always true. The original share for the snapshot does not support snapshots because it was created with a share type without the ``snapshot_support`` extra-spec set, irrespective of whether the back end used can itself support snapshots or not. ././@PaxHeader0000000000000000000000000000022100000000000011450 xustar0000000000000000123 path=manila-21.0.0/releasenotes/notes/bug-1925486-add-share-network-option-to-replica-create-api-7d2ff3628e93fc77.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1925486-add-share-network-option-to-replica-create-api-7d2ff3620000664000175000017500000000050300000000000032266 0ustar00zuulzuul00000000000000--- fixes: - | `Bug #1925486 `_ Share replica create API does not support share network option and uses parent share's share network. Fixed it to allow any share network by providing option ``share-network``. Added in API microversion starting with '2.72'. ././@PaxHeader0000000000000000000000000000021200000000000011450 xustar0000000000000000116 path=manila-21.0.0/releasenotes/notes/bug-1927060-fix-replica-state-on-migration-complete-4fb4d8ba59b58505.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1927060-fix-replica-state-on-migration-complete-4fb4d8ba59b58500000664000175000017500000000066200000000000032115 0ustar00zuulzuul00000000000000--- fixes: - | Fixed an issue that made migrated shares with replication support to do not have a share instance with its `replica_state` set to active. Now, when the share supports replication, the destination share instance will have its replica state set as active right after the migration gets completed. For more details, please refer to `bug 1927060 `_ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1927823-fix-create-not-home-aggr-e9bd1ebf0d8e4e1e.yaml0000664000175000017500000000075600000000000030413 0ustar00zuulzuul00000000000000--- fixes: - | NetApp driver: it is not reporting the home state of the aggregate pool. Operators may want to know this information to avoid those kind of pools during maintenance task. The patch adds the boolean capability `netapp_is_home` enabling the requester to avoid not home pools using the scheduler CapabilitiesFilter and share_type extra_specs. For more details, please refer to `launchpad bug #1927823 `_ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1928241-d1b48e79aceb3cc4.yaml0000664000175000017500000000036700000000000023633 0ustar00zuulzuul00000000000000--- fixes: - | Fixes `bug #1928241 `_. The NetApp ONTAP driver will now avoid reusing a share server during the share creation in case the share server does not span the selected pool. ././@PaxHeader0000000000000000000000000000020500000000000011452 xustar0000000000000000111 path=manila-21.0.0/releasenotes/notes/bug-192912-fix-filtering-shares-by-extra-specs-b79235301306bcf2.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-192912-fix-filtering-shares-by-extra-specs-b79235301306bcf2.yam0000664000175000017500000000016400000000000031671 0ustar00zuulzuul00000000000000--- fixes: - | Filtering shares by share-type "extra_specs" as key=value now returns the expected output. ././@PaxHeader0000000000000000000000000000022300000000000011452 xustar0000000000000000125 path=manila-21.0.0/releasenotes/notes/bug-1929421-netapp-fix-thick-provision-volume-create-for-AFF-c22c72ce4c3fac16.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1929421-netapp-fix-thick-provision-volume-create-for-AFF-c22c720000664000175000017500000000044400000000000032255 0ustar00zuulzuul00000000000000--- fixes: - | Fixed an issue with ONTAP AFF platforms while creating shares that forced volumes to have efficient data saving even when the contrary was specified. For more details, please refer to `launchpad bug #1929421 `_ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1930459-add-ceph-version-check-88eee324bc6134ea.yaml0000664000175000017500000000041600000000000027674 0ustar00zuulzuul00000000000000--- fixes: - | A Ceph version check has been added as part of this change to address the absense of the mon-mgr target in Ceph Nautilus. With this change, Ceph Nautilus users can leverage their storage backend with the OpenStack manila Wallaby release.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1934345-fix-ipaddress-hosts-invocation-80d419d7e62a5f51.yaml0000664000175000017500000000027100000000000031400 0ustar00zuulzuul00000000000000--- fixes: - | The Infinidat driver's been fixed to process single IP Addresses (/32) correctly. See `bug 1934345 `_ for more details. ././@PaxHeader0000000000000000000000000000021700000000000011455 xustar0000000000000000121 path=manila-21.0.0/releasenotes/notes/bug-1934889-netapp-fix-replica-delete-for-scoped-account-8fa193c0424af9b1.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1934889-netapp-fix-replica-delete-for-scoped-account-8fa193c0420000664000175000017500000000050100000000000032163 0ustar00zuulzuul00000000000000--- fixes: - | NetApp driver: fixed an issue with the ONTAP 9.8 and older, for scoped account users, where the operation of deleting a replica was not working, but returned a message of success. For more details, please refer to `launchpad bug #1934889 `_././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1940072-dell-manila-fix-cifs-value-type-f65e162ee27d3e67.yaml0000664000175000017500000000023600000000000031371 0ustar00zuulzuul00000000000000--- fixes: - | Change cifs value from string to list for Dell manila drivers. Fixed `bug 1940072 `_ ././@PaxHeader0000000000000000000000000000021500000000000011453 xustar0000000000000000119 path=manila-21.0.0/releasenotes/notes/bug-1942124-fix-list-mandatory-services-for-cifs-share-0c524831e8fc6175.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1942124-fix-list-mandatory-services-for-cifs-share-0c524831e8fc0000664000175000017500000000036700000000000032146 0ustar00zuulzuul00000000000000--- fixes: - | Changes the list of security services required for CIFS share creation when the NetApp driver is being used. For more details, please refer to `launchpad bug #1942124 `_ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1944478-change-status-for-shares-api-5dbc4986d032c8e1.yaml0000664000175000017500000000017100000000000031003 0ustar00zuulzuul00000000000000--- fixes: - | Changed the error and status code that was raised when share types are not handled in shares api././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1945365-netapp-fix-port-conf-91552d3f61378c94.yaml0000664000175000017500000000043700000000000027171 0ustar00zuulzuul00000000000000--- fixes: - | NetApp cDOT driver Custom port configuration using ``netapp_server_port`` was accidentally ignored after a refactor. This option should now be properly read. See `Launchpad bug 1945365 `_ for more details. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1945463-allow-override-instance-name-b730d106a9c32c83.yaml0000664000175000017500000000071600000000000030776 0ustar00zuulzuul00000000000000--- upgrade: - | The option ``service_instance_name_template`` will start being honored by the Generic driver, so review your configuration and revert to the default if you don't want it to be taken into account. fixes: - | The ``service_instance_name_template`` option was not being taken into account by the Generic driver, this issue is now addressed. See `bug #1945463 `_ for more information. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1946990-fix-ignored-neutron-opts-c438a089de9e2066.yaml0000664000175000017500000000070100000000000030233 0ustar00zuulzuul00000000000000--- deprecations: - | The ``[neutron] url_timeout`` option and the ``[neutron] auth_strategy`` option have been deprecated and will be removed in a future release. These two options have had no effect since 2.0.0 . fixes: - | `Bug #1946990 `_: Fix the ignored ``[neutron] url`` option. Now the parameter overrides the endpoint url which Manila uses to access Neutron API. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1955627-add-check-to-reset-status-baa126a7145a45bb.yaml0000664000175000017500000000062000000000000030332 0ustar00zuulzuul00000000000000--- fixes: - | Role based access control is enforced on the POST /shares/{share_id}/action API to reset status, task state, replica state and similar fields. This prevents the situation where deployments allow some users access to these APIs, but they don't belong to projects where the resources exist. See `bug 1955627 `_ for more context. ././@PaxHeader0000000000000000000000000000021200000000000011450 xustar0000000000000000116 path=manila-21.0.0/releasenotes/notes/bug-1957075-fix-replica-promote-autosize-attributes-c180bb7db328bece.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1957075-fix-replica-promote-autosize-attributes-c180bb7db328bec0000664000175000017500000000051100000000000032425 0ustar00zuulzuul00000000000000--- fixes: - | NetApp driver: Fixed the issue with replica promotion where the autosize attributes were not being updated on ONTAP. Now, the autosize attributes are updated after promoting the replica. For more details, please refer to `launchpad bug #1957075 `_ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1959472-fix-type-error-jsonfilter-fc7f87c288cc69.yaml0000664000175000017500000000034600000000000030350 0ustar00zuulzuul00000000000000--- fixes: - Fix the bug of TypeError with JsonFilter. If the scheduler_hints value is None, the TypeError exception may occur when creating share with JsonFilter. The TypeError exception is added to solve this problem. ././@PaxHeader0000000000000000000000000000020700000000000011454 xustar0000000000000000113 path=manila-21.0.0/releasenotes/notes/bug-1961087-add-reserved-share-extend-percentage-c6da2ac8a0113d2c.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1961087-add-reserved-share-extend-percentage-c6da2ac8a0113d2c.y0000664000175000017500000000055100000000000032077 0ustar00zuulzuul00000000000000--- features: - | 'reserved_share_extend_percentage' backend config option allows Manila to consider different reservation percentage for share extend operation. This distinct option is useful if operators want to prevent provisioning of new shares but allow extensions of existing shares on storage pools beyond their reserved space. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1964089-fix-share-creation-via-rest-9bb4180fc87af6c2.yaml0000664000175000017500000000046500000000000030726 0ustar00zuulzuul00000000000000--- fixes: - | NetApp driver `bug #2080951 `_: Fixed the share creation workflow via REST. Changed the order of dedupe/compression to disable compression first followed by disabling dedupe so that the share creation workflow goes through. ././@PaxHeader0000000000000000000000000000020500000000000011452 xustar0000000000000000111 path=manila-21.0.0/releasenotes/notes/bug-1964696-fix-GaneshaNFSHelper-update_access-6124a79e34e63030.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1964696-fix-GaneshaNFSHelper-update_access-6124a79e34e63030.yam0000664000175000017500000000032400000000000031461 0ustar00zuulzuul00000000000000--- fixes: - | `Bug #1964696 `_: Fix calling the GaneshaNASHelper `update_access` method from the gluster GaneshaNFSHelper with the wrong signature. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1967760-shorten-snapshot-names-cephfs-a220e2b9f7ba5739.yaml0000664000175000017500000000026700000000000031306 0ustar00zuulzuul00000000000000--- fixes: - | Make snapshot names in CephFS drivers shorter to avoid limitation in Ceph clusters which truncates the subvolume name and makes the snapshots inaccesible.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1968069-fix-share-metadata-api-c31aca75bac66501.yaml0000664000175000017500000000021500000000000027670 0ustar00zuulzuul00000000000000--- fixes: - | Fixes regression for show_metadata and the response dictionary. The correct response is: {meta: {'key': 'value}}. ././@PaxHeader0000000000000000000000000000024300000000000011454 xustar0000000000000000141 path=manila-21.0.0/releasenotes/notes/bug-1968891-fix-scheduler-capacity-filter-fails-to-pass-when-extending-share-6b60799e7aa41e19.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1968891-fix-scheduler-capacity-filter-fails-to-pass-when-extend0000664000175000017500000000034300000000000032770 0ustar00zuulzuul00000000000000--- fixes: - | `Launchpad bug 1968891 `_ has been fixed. scheduler will use size increase rather than share size to calculate provisioned_ratio when extending share.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1971530-fix-cephfs-native-deny-access-facf37fa7053c30d.yaml0000664000175000017500000000034300000000000031250 0ustar00zuulzuul00000000000000--- fixes: - | The CephFS driver no longer fails to delete access rules that were never applied or were missing from the back end storage. See `LP #1971530 `_ for more details. ././@PaxHeader0000000000000000000000000000020700000000000011454 xustar0000000000000000113 path=manila-21.0.0/releasenotes/notes/bug-1973621-add-scheduler-default-extend-filters-1c4f2a1863d0d95b.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1973621-add-scheduler-default-extend-filters-1c4f2a1863d0d95b.y0000664000175000017500000000034600000000000031772 0ustar00zuulzuul00000000000000--- upgrade: - | When using scheduler filters during share extend, only few filters are necessary. To provide those configurable list of filters for share extend, added new option `scheduler_default_extend_filters`. ././@PaxHeader0000000000000000000000000000021700000000000011455 xustar0000000000000000121 path=manila-21.0.0/releasenotes/notes/bug-1975483-rollback-quota-if-share-network-create-fails-628312233bf0c179.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1975483-rollback-quota-if-share-network-create-fails-628312233b0000664000175000017500000000056700000000000032044 0ustar00zuulzuul00000000000000--- fixes: - | During share network create API, if either share network or share network subnet db creation fails, manila raises an exception. However quota is not rolled back and its usable only after quota reservations timed out (waiting conf.reservation_expire seconds). Fixed by introducing immediate quota rollback in case any db create api fails. ././@PaxHeader0000000000000000000000000000020600000000000011453 xustar0000000000000000112 path=manila-21.0.0/releasenotes/notes/bug-1975715-fix-driverfilter-string-evaluations-3886a68d4d7fa3a1.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1975715-fix-driverfilter-string-evaluations-3886a68d4d7fa3a1.ya0000664000175000017500000000036300000000000032214 0ustar00zuulzuul00000000000000--- fixes: - | Goodness_function expects integer or float else raise parseException. This causes example such as "(share.share_proto == 'CIFS') ? 100 : 50" to fail during evaluation. Fix it by adding support of string evalution. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1976370-stop-logging-login-information-b726d05ee805df27.yaml0000664000175000017500000000016000000000000031454 0ustar00zuulzuul00000000000000--- security: - | The SSH utility module no longer logs usernames and passwords as debug information. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1978962-fix-find-available-servers-2dec3a4f3f0ef7e4.yaml0000664000175000017500000000057100000000000030756 0ustar00zuulzuul00000000000000--- fixes: - | Drivers using DHSS True mode has the server creation phase. This phase tries to reuse one of available share servers, however, the Manila code is considering all share servers states as available, rather than considering only the active or creating ones. Now, only the correct share servers are passed to drivers as available to be reused. ././@PaxHeader0000000000000000000000000000021700000000000011455 xustar0000000000000000121 path=manila-21.0.0/releasenotes/notes/bug-1982808-fix-netapp-api-failed-relationship-is-in-use-ecc9ede4d7f0f5b9.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1982808-fix-netapp-api-failed-relationship-is-in-use-ecc9ede4d70000664000175000017500000000047000000000000032427 0ustar00zuulzuul00000000000000--- fixes: - | NetApp driver `bug #1982808 `_: Fixed issue to add the retry logic to delete and release the snapmirror relationship. Delete the vserver pairing when there is no snapmirror relationship exist on source and destination cluster. ././@PaxHeader0000000000000000000000000000022100000000000011450 xustar0000000000000000123 path=manila-21.0.0/releasenotes/notes/bug-1982808-netapp-fix-snapmirror-snapshots-not-cleaned-up-63cc98cd468adbd1.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1982808-netapp-fix-snapmirror-snapshots-not-cleaned-up-63cc98cd0000664000175000017500000000047700000000000032570 0ustar00zuulzuul00000000000000--- fixes: - | NetApp driver `bug #1982808 `_: Fixed issue preventing the storage system from proper clean up unused SnapMirror snapshots after a replica promote, significantly increasing the amount of space consumed in ONTAP volumes by snapshots. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1983125-cb9118c3fa26a6f2.yaml0000664000175000017500000000031000000000000023455 0ustar00zuulzuul00000000000000--- fixes: - | `Bug #1983125 `_: Fixed the remaining reference to a deprecated quota option in code, which was causing a warning message. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1986653-infinidat-add-ssl-options-ee91f152bbd28080.yaml0000664000175000017500000000063600000000000030402 0ustar00zuulzuul00000000000000--- fixes: - | Infinidat Driver `bug #1986653 `_: Fixed Infinidat driver to use TLS/SSL communication between the Manila share service and the storage backend. Admin can set `True` or `False` for the `infinidat_use_ssl` and `infinidat_suppress_ssl_warnings` options in the driver section of manila.conf to enable or disable these features. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1989283-infinidat-version-bump-6f791d9d97ccf75b.yaml0000664000175000017500000000057700000000000030132 0ustar00zuulzuul00000000000000--- fixes: - | Infinidat Driver `bug #1989283 `_: Incremented driver version to 1.1, where SSL support was added. For more details on enabling SSL, please check the `Infinidat driver documentation `_. ././@PaxHeader0000000000000000000000000000022000000000000011447 xustar0000000000000000122 path=manila-21.0.0/releasenotes/notes/bug-1990150-cephadm-cephnfs-backend-fails-start-raise-exc-7459302bf662fdd6.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1990150-cephadm-cephnfs-backend-fails-start-raise-exc-7459302bf0000664000175000017500000000114700000000000032110 0ustar00zuulzuul00000000000000--- fixes: - | When deploying Manila CephFS NFS with cephadm, the manila share service fails to start with the error "Backend cephfsnfs supports neither IPv4 nor IPv6". This happens because the NFS Ganesha daemon fails to start for some reason, and therefore the driver never gets the location of the NFS Ganesha service that will be used as the backend. We rely on the operator to make sure the CephFS NFS cluster is available when initializing the driver. With this fix in place, we raise an exception to explicitly notify the operator and allow them to take further action. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1990839-add-state-column-for-service-c4fe2a6e312a1651.yaml0000664000175000017500000000117100000000000030766 0ustar00zuulzuul00000000000000--- fixes: - | In cluster deployments, where multiple instances of manila services are deployed via PODs, unique hostname is derived from node name. However if pods are deployed again and launched on new hosts/nodes, the old entries of manila service remains as it is. Fixed it by adding per service cleanup function and also introducing 'state' column in 'services' table. The service will be in either of 'up', 'down' or 'stopped' state. Cleanup will delete DB entries of 'stopeed' services. For more details please refer, `Launchpad bug 1990839 `_ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1991378-fix-cluster_id-param-cephfs-nfs-2ddc4ff98141b9b9.yaml0000664000175000017500000000053500000000000031564 0ustar00zuulzuul00000000000000--- fixes: - | The CephFS NFS driver, specifically the NFSProtocolHelper implementation, was passing a wrong param to the Ceph backend and this was preventing users to add and deny access to the created shares. With this fix, users of the CephFS NFS NFSProtocolHelper can normally create and remove access to their shares. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1991396-add-glance-endpoint-type-151777f255e423a3.yaml0000664000175000017500000000044600000000000027756 0ustar00zuulzuul00000000000000--- fixes: - | Deployers now can specify ``[glance]endpoint_type`` configuration option (defaults to ``publicURL`` for backward compatibility) so that Manila uses Glance endpoint other than the public one (see `bug 1991396 `_). ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1991776-cephfs-configured-ip-versions-fd87976fdb848e8c.yaml0000664000175000017500000000034000000000000031403 0ustar00zuulzuul00000000000000--- fixes: - | `Bug 1991776 `_ was fixed within the CephFS driver. The driver no longer emits repeated warnings concerning supported IP versions when using the NFS protocol. ././@PaxHeader0000000000000000000000000000020600000000000011453 xustar0000000000000000112 path=manila-21.0.0/releasenotes/notes/bug-1991938-add-filesystem-info-cephfs-nfs-fsal-b39ae5ebaeb6fba1.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1991938-add-filesystem-info-cephfs-nfs-fsal-b39ae5ebaeb6fba1.ya0000664000175000017500000000030300000000000032253 0ustar00zuulzuul00000000000000--- fixes: - | Add the filesystem info in the exports created by the CephFS NFS driver. This fixes inconsistencies when deploying Manila with CephFS NFS with multiple filesystems. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1992443-infinidat-host-assisted-migration-4344c4d076b66796.yaml0000664000175000017500000000215500000000000031732 0ustar00zuulzuul00000000000000--- features: - | The special `.snapshot` directories for shares created by the Infinidat driver can now be controlled through configuration options: `infinidat_snapdir_accessible` and `infinidat_snapdir_visible`. By default, each share allows access to its own `.snapshot` directory, which contains files and directories of each snapshot taken. To restrict access to the `.snapshot` directory, the `infinidat_snapdir_accessible` should be set to `False`. The `infinidat_snapdir_visible` option controls visibility of the `.snapshot` directory. By default, the `.snapshot` directory is hidden. To make the `.snapshot` directory visible on the client side, this option should be set to `True`. fixes: - | Infinidat Driver `bug #1992443 `_: Fixed an issue in Infinidat driver to support host assisted migration. The `snapdir_visible` filesystem property must be disabled to hide `.snapshot` directory on the client side. However, this behavior can be changed using the `infinidat_snapdir_visible` configuration option. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1993828-init-share-server-updated-at-affb6ef54c71939d.yaml0000664000175000017500000000042400000000000031172 0ustar00zuulzuul00000000000000--- fixes: - | On share server creation initialize the updated_at field with the current timestamp to fix broken automatic cleanup in rare occasions where a share server that would be an automatic cleanup target would be pending in state `creating` forever. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1993829-netapp-guard-vserver-name-c65ab9e811b398a8.yaml0000664000175000017500000000035500000000000030433 0ustar00zuulzuul00000000000000--- fixes: - | NetApp driver: Added a guard on getting share server backend detail vserver name when trying to reuse share server. Please refer to `Launchpad Bug #1993829 `_. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1995733-netapp-cifs-server-force-delete-d513c548ebf56448.yaml0000664000175000017500000000033400000000000031417 0ustar00zuulzuul00000000000000--- fixes: - | NetApp driver has been fixed to the error in `launchpad bug 1995733 `_ preventing share server cleanup with active directory security service. ././@PaxHeader0000000000000000000000000000022000000000000011447 xustar0000000000000000122 path=manila-21.0.0/releasenotes/notes/bug-1996859-update-timedelta-and-old-snapmirror-schedules-b565d4163663ffa0.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-1996859-update-timedelta-and-old-snapmirror-schedules-b565d41630000664000175000017500000000076000000000000032330 0ustar00zuulzuul00000000000000--- fixes: - | NetApp ONTAP driver fixed to consider timestamp delta calculated from `netapp_snapmirror_schedule` config option instead of fixed one hour value. Delta is calculated as twice the time of the option. Also, ensure periodically that existent snapmirrors have the schedule property according to the `netapp_snapmirror_schedule` configuration value. For more details, please refer `Launchpad bug #1996859 `_ ././@PaxHeader0000000000000000000000000000023100000000000011451 xustar0000000000000000131 path=manila-21.0.0/releasenotes/notes/bug-2000171-make-netapp_snapmirror_quiesce_timeout-end-user-option-4dc090eb7da3f7eb.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-2000171-make-netapp_snapmirror_quiesce_timeout-end-user-option-0000664000175000017500000000050100000000000033246 0ustar00zuulzuul00000000000000--- fixes: - | In case of NetApp ONTAP driver, user can now set the quiesce timeout during promote and this timeout has precedence over NetApp configuration `netapp_snapsmirror_quiesce_timeout`. For more details, please refer to `launchpad bug 2000171 `_ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-2000253-handle-access-rules-on-replica-c7304ae55c68857f.yaml0000664000175000017500000000057200000000000031175 0ustar00zuulzuul00000000000000--- fixes: - | While creating share replicas, the rules that are copied from source share would hang in 'queued_to_apply' forever. Fixed it by checking status of access_rule of the created replica and conditionally changed from 'queued_to_apply' to 'active'. For more details check `Launchpad bug 2000253 `_ ././@PaxHeader0000000000000000000000000000021400000000000011452 xustar0000000000000000118 path=manila-21.0.0/releasenotes/notes/bug-2002394-fix-bad-mode-enforcement-on-ensure-shares-a2e4d8f6c07c8cf5.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-2002394-fix-bad-mode-enforcement-on-ensure-shares-a2e4d8f6c07c80000664000175000017500000000141500000000000032140 0ustar00zuulzuul00000000000000--- fixes: - | Fixed an issue that made the CephFS driver to override the permissions in a share. After `a bugfix `_, Ceph's idempotent creation of shares had a change on its behavior. If a share mode was modified outside of Manila, or the configuration value for `cephfs_volume_mode` was changed in Manila when shares had already been created, these shares would have their mode changed while Manila attempted to ensure that such share exists using the idempotent creation, potentially breaking clients. The CephFS driver will no longer send create calls to the backend when ensuring a share exists. For more details, please refer to `Bug #2002394 `_ ././@PaxHeader0000000000000000000000000000021400000000000011452 xustar0000000000000000118 path=manila-21.0.0/releasenotes/notes/bug-2004212-prevent-subnet-deletion-when-group-exists-a35355feb1bf6848.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-2004212-prevent-subnet-deletion-when-group-exists-a35355feb1bf60000664000175000017500000000051600000000000032356 0ustar00zuulzuul00000000000000--- fixes: - | Fixed an issue that allowed share network subnets to be deleted when they were still related to a share group. An exception will now be raised when Manila identify such existing relationship. For more details, please refer to `Launchpad Bug 2004212 `_. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-2004230-fix-cross-project-rbac-328134c64c96c200.yaml0000664000175000017500000000026500000000000027425 0ustar00zuulzuul00000000000000--- fixes: - | Metadata APIs have been fixed to respond with HTTP 404 / Not Found when the requester does not have access to a resource that the metadata pertains to. ././@PaxHeader0000000000000000000000000000021100000000000011447 xustar0000000000000000115 path=manila-21.0.0/releasenotes/notes/bug-2006792-fix-add-share-network-subnet-non-admin-463347a723069997.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-2006792-fix-add-share-network-subnet-non-admin-463347a7230699970000664000175000017500000000033000000000000031354 0ustar00zuulzuul00000000000000--- fixes: - | Share Network Subnet Metadata now can be add by any user, as the project_id used to verify the policy in the case of a non-admin user now is present in the Share Network Subnet db model. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-2007060-fix-error-message-7a34357c0212d8f9.yaml0000664000175000017500000000030700000000000026567 0ustar00zuulzuul00000000000000--- fixes: - | Fixed several Manila API error messages with their contents. For more details, please refer to `launchpad bug #2007060 `_ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-2007560-fix-netapp-ipspace-naming-7c097743e187b920.yaml0000664000175000017500000000040200000000000030126 0ustar00zuulzuul00000000000000--- fixes: - | NetApp driver: Fixed NetApp ipspace naming to follow the neutron network id, instead of the neutron subnet id as before. For more details please refer to `launchpad bug #2007560 `. ././@PaxHeader0000000000000000000000000000021400000000000011452 xustar0000000000000000118 path=manila-21.0.0/releasenotes/notes/bug-2008497-speed-up-replica-snapshots-create-request-6facee90320fecca.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-2008497-speed-up-replica-snapshots-create-request-6facee90320fe0000664000175000017500000000057200000000000032402 0ustar00zuulzuul00000000000000--- fixes: - | Share replica snapshot creation was taking considerable time due to db operations and resulted in delay of execution for periodic tasks. Fixed it by fetching replicas without share data or without share server data wherever necessary. For more details, please check `Launchpad bug #2008497 `_ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-2012742-remove-stanza-check-from-netapp-driver-code.yaml0000664000175000017500000000044000000000000031450 0ustar00zuulzuul00000000000000--- fixes: - | Remove stanza check from netapp driver code to make the use of oslo-config drivers possible. Stanzas are not always written in config files anymore. For more details, please check `Launchpad bug #2012742 `_ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-2015094-fix-read-deleted-sqlalchemy-cda2dca772ce8d0a.yaml0000664000175000017500000000035700000000000031134 0ustar00zuulzuul00000000000000--- fixes: - | Internal sqlalchemy model_query has been fixed to honor the options of the `read_deleted` parameter. For more details, please refer to `launchpad bug #2015094 `_ ././@PaxHeader0000000000000000000000000000022200000000000011451 xustar0000000000000000124 path=manila-21.0.0/releasenotes/notes/bug-2015328-disallow-reset-replica-state-on-active-replicas-a3d4511ff1352d68.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-2015328-disallow-reset-replica-state-on-active-replicas-a3d45110000664000175000017500000000027700000000000032361 0ustar00zuulzuul00000000000000--- fixes: - | The "replica_state" attribute of "active" replicas cannot be modified. Please see `Launchpad bug 2015328 `_ for more details. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-2017501-fix-share-export-location-update.yaml0000664000175000017500000000035200000000000027446 0ustar00zuulzuul00000000000000--- fixes: - | Fixed share export location update when subnet is added to a network containing shares. Please refer to the `Launchpad bug #2017501 `_ for more details. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-2020187-scheduler-performance-2edc4c706b2fea2f.yaml0000664000175000017500000000016700000000000030067 0ustar00zuulzuul00000000000000--- fixes: - | Improve scheduler performance to estimate the allocated capacity for thin provisioning hosts. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-2020745-dell-unity-lacp-8653da49ad901c5c.yaml0000664000175000017500000000025600000000000026404 0ustar00zuulzuul00000000000000--- fixes: - | Dell Unity Driver `Bug #2020745 `_: Fixed driver startup issue with link aggregation configured. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-2023754-fix-share-replica-url-367797a27a9c314d.yaml0000664000175000017500000000025300000000000027354 0ustar00zuulzuul00000000000000--- fixes: - | Launchpad `bug 2023754 `_ has been fixed for "next links" broken for limited share replicas api. ././@PaxHeader0000000000000000000000000000021500000000000011453 xustar0000000000000000119 path=manila-21.0.0/releasenotes/notes/bug-2023964-fix-limit-for-display-name-and-description-827d4ccb777ea632.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-2023964-fix-limit-for-display-name-and-description-827d4ccb777e0000664000175000017500000000056000000000000032205 0ustar00zuulzuul00000000000000--- fixes: - | During share/snapshot create/update API calls, if display name or description is above max db limit i.e. 255, Manila throws error. But in this case, end user message is not meaningful. Fixed it by adding valid error message. For more details, please check `Launchpad Bug #2023964 `_ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-2024556-skip-error-deleting-replicas-aa2ef4154d5e38f6.yaml0000664000175000017500000000033400000000000031150 0ustar00zuulzuul00000000000000--- fixes: - | Share replicas in state `error_deleting` are now skipped during periodic updates. For more details, please refer to `launchpad bug #2024556 `_ ././@PaxHeader0000000000000000000000000000023000000000000011450 xustar0000000000000000130 path=manila-21.0.0/releasenotes/notes/bug-2024658-fix-duplicate-entries-of-share-server-backend-details-adf45b417d45b437.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-2024658-fix-duplicate-entries-of-share-server-backend-details-a0000664000175000017500000000057700000000000032677 0ustar00zuulzuul00000000000000--- fixes: - | Share server backend details set function adds db records without checking existing entries. This results in duplicate records for the combination of given share server id and key. Fixed it by updating records if already exist else creating new. See the `launchpad bug 2024658 `_ for more details. ././@PaxHeader0000000000000000000000000000022100000000000011450 xustar0000000000000000123 path=manila-21.0.0/releasenotes/notes/bug-2025075-BadRequest-share-server-migration-get-progress-bf6fe476f7ab3111.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-2025075-BadRequest-share-server-migration-get-progress-bf6fe4760000664000175000017500000000030700000000000032517 0ustar00zuulzuul00000000000000--- fixes: - | Fix BadRequest error from share-server-migration-get-progress. For more details please refer to `launchpad bug #2025075 ` ././@PaxHeader0000000000000000000000000000022600000000000011455 xustar0000000000000000128 path=manila-21.0.0/releasenotes/notes/bug-2025641-pause-and-resume-clone-split-during-snapshot-rename-fd0f990d50644d9c.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-2025641-pause-and-resume-clone-split-during-snapshot-rename-fd00000664000175000017500000000063500000000000032663 0ustar00zuulzuul00000000000000--- fixes: - | In case of NetApp ONTAP driver, when snpashots are soft deleted (i.e. they are renamed if delete fails) sometime we face issue in during rename. This is due to busy snapshots. To overcome this, Manila will stop clone split, perform rename and start clone split again. For more details, please refer to `launchpad bug 2025641 `_ ././@PaxHeader0000000000000000000000000000021600000000000011454 xustar0000000000000000120 path=manila-21.0.0/releasenotes/notes/bug-2029366-network-deleted-without-security-associaton-ae56473f6d32c47e.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-2029366-network-deleted-without-security-associaton-ae56473f6d30000664000175000017500000000031000000000000032503 0ustar00zuulzuul00000000000000--- fixes: - | Fixed cleanup of share network security service associations at network delete. See `Launchpad bug 2029366 ` for more details. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-2031048-fix-provider-location-validation-b6d1e977f50643bb.yaml0000664000175000017500000000033000000000000031747 0ustar00zuulzuul00000000000000--- fixes: - | The "manage" API for snapshots now validates the format of "provider_location" and "share_id" fields and handles errors appropriately. These fields are expected to contain string values. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-2031193-set-updated_at-access-rules-a382a3e352f3ef7d.yaml0000664000175000017500000000022400000000000030743 0ustar00zuulzuul00000000000000--- fixes: - | The ``updated_at`` field is correctly set on share and snapshot access rules when an update has been made on the database. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-2032681-flashblade-relogin-f1781c6bdb15df71.yaml0000664000175000017500000000035200000000000027200 0ustar00zuulzuul00000000000000--- fixes: - | Pure Storage FlashBlade driver: Retry login for periodic task should network issue cause loss of connection. See `launchpad bug 2032681 `_ for more details. ././@PaxHeader0000000000000000000000000000021500000000000011453 xustar0000000000000000119 path=manila-21.0.0/releasenotes/notes/bug-2033604-fix-count-in-shares-and-snapshots-list-api-683f3103e587b898.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-2033604-fix-count-in-shares-and-snapshots-list-api-683f3103e5870000664000175000017500000000051400000000000031723 0ustar00zuulzuul00000000000000--- fixes: - | The 'count' returned by shares and snapshots list API is fixed to provide correct value i.e. count of shares/snapshots instead of shareInstances/shareSnapshotInstances respectively. Please refer to the `Launchpad bug #2033604 `_ for more details. ././@PaxHeader0000000000000000000000000000021100000000000011447 xustar0000000000000000115 path=manila-21.0.0/releasenotes/notes/bug-2035137-ceph-nfs-set-preferred-export-location-d1f228a51df8c8b4.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-2035137-ceph-nfs-set-preferred-export-location-d1f228a51df8c8b40000664000175000017500000000260000000000000032110 0ustar00zuulzuul00000000000000--- features: - | It is now possible to configure `cephfs_ganesha_export_ips` (or alternatively, `cephfs_ganesha_server_ip`) alongside `cephfs_nfs_cluster_id`. Setting these options will allow the CephFS driver to report additional export paths. These additional export paths will have the "preferred" metadata key set to False. The export paths pertaining to the NFS service host discovered by the driver will have the "preferred" metadata key set to True. It is expected that administrators will configure additional IP addresses when preparing to migrate from a standalone NFS-Ganesha service to a NFS service cluster setup facilitated by the Ceph orchestration service. Eventually, when the migration has completed, these configuration options can be removed and the corresponding share export path records will be dropped from Manila. Note that the CephFS driver will not create or manipulate access rules within the NFS service configured via `cephfs_ganesha_export_ips` or `cephfs_ganesha_server_ip`. upgrades: - | In order to assist the user experience when migrating from a standalone CephFS NFS (NFS-Ganesha) service to an NFS service created with the Ceph Orchestrator, the CephFS driver allows configuring `cephfs_ganesha_export_ips` (or alternatively, `cephfs_ganesha_server_ip`) alongside `cephfs_nfs_cluster_id`. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-2035137-cephfs-support-ensure-shares-b72fe18381af274a.yaml0000664000175000017500000000136500000000000031146 0ustar00zuulzuul00000000000000--- fixes: - | The CephFS backend driver now supports a bulk share recovery mechanism (``ensure_shares``). At startup time, a combination of driver configuration options will determine if the driver must re-evaluate export paths of previously created shares. If these configuration options do not change, service startup will skip through this recovery stage. - | The CephFS backend driver will also reapply access rules when performing a recovery of pre-existing shares. upgrade: - | A new configuration option called ``cephfs_ensure_all_shares_salt`` has been introduced to assist cloud administrators that would like the CephFS driver to reconcile export paths of existing shares during service startup. ././@PaxHeader0000000000000000000000000000021200000000000011450 xustar0000000000000000116 path=manila-21.0.0/releasenotes/notes/bug-2035572-ignore-errors-remove-export-nfs-ganesha-fd0f8eb1db800d31.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-2035572-ignore-errors-remove-export-nfs-ganesha-fd0f8eb1db800d30000664000175000017500000000107200000000000032303 0ustar00zuulzuul00000000000000--- fixes: - | The CephFS driver uses a `RemoveExport` DBUS API call to the NFS/Ganesha service when a user deletes an access rule, or when deleting the share. If this call fails, the driver now provides a log of the failure, and continues cleaning up. Prior to this change, share deletion could fail if the service failed the DBUS command to drop the export. This would leave the share with an "error_deleting" status, needing administrator intervention. See `bug #2035572 `_ for more information. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-2036931-fix-flexgroup-dedup-compression-aeb013a6ef24f610.yaml0000664000175000017500000000036000000000000031675 0ustar00zuulzuul00000000000000--- fixes: - | NetApp driver: fixed FlexGroup share creation while modifying the dedup and compression configurations. For more details please refer to `launchpad bug #2036931 ` ././@PaxHeader0000000000000000000000000000024000000000000011451 xustar0000000000000000138 path=manila-21.0.0/releasenotes/notes/bug-2037109-netapp-use-identifier-to-derive-vserver-name-during-migration-81fd7d24b36b4dbe.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-2037109-netapp-use-identifier-to-derive-vserver-name-during-mig0000664000175000017500000000053500000000000032770 0ustar00zuulzuul00000000000000--- fixes: - | NetApp ONTAP driver during migration uses share server id which was causing failure for migrate a share server that had been migrated before. Fixed it by using share server identifier instead of id. Please refer to the `Launchpad bug #2037109 `_ for more details. ././@PaxHeader0000000000000000000000000000022200000000000011451 xustar0000000000000000124 path=manila-21.0.0/releasenotes/notes/bug-2037422-delete-share-network-subnet-on-network-deletion-b28b42ce4a42b554.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-2037422-delete-share-network-subnet-on-network-deletion-b28b42c0000664000175000017500000000042600000000000032515 0ustar00zuulzuul00000000000000--- fixes: - | Now, default share network subnet is deleted when share network is deleted, in case it is the only subnet present in share network. Please refer to the `Launchpad bug #2037422 `_ for more details. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-2038607-fix-protocol-access-mapping-opts-e7d61db3e2a84be7.yaml0000664000175000017500000000050600000000000032045 0ustar00zuulzuul00000000000000--- fixes: - | `bug 2038607 `_: Fixed parse of the protocol_access_mapping option and the backup_protocol_access_mapping option. This change requires that the values used for these options should be formatted like `key0=[value00, value01],key1=[value10]`. ././@PaxHeader0000000000000000000000000000020700000000000011454 xustar0000000000000000113 path=manila-21.0.0/releasenotes/notes/bug-2049507-retry-on-connection-error-to-neutron-df7d2ddac5f30773.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-2049507-retry-on-connection-error-to-neutron-df7d2ddac5f30773.y0000664000175000017500000000037100000000000032227 0ustar00zuulzuul00000000000000--- fixes: - | Manila will retry neutron API calls e.g. create_port(), show_port() in case of keystoneauth1 connection error. For more details, please refer to `launchpad bug #2049507 `_ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-2049538-cephfs-driver-return-allocated-gb-6ecf908dae8f369d.yaml0000664000175000017500000000150300000000000032163 0ustar00zuulzuul00000000000000--- fixes: - | Fixed a scheduling issue that affected the CephFS Native and NFS drivers when a Ceph storage back end was configured with multiple filesystems. The share driver will now provide the necessary information so Manila can evenly distribute the shares between the different configured storage back ends. For more details, please refer to `Launchpad bug #2049538 `_. other: - | A new configuration option named ``cephfs_cached_allocated_capacity_update_interval`` has been added to the CephFS Native and NFS drivers, so that OpenStack Operators are able to define the amount in seconds for the cached allocation capacity gigabytes timeout to be renewed. The cache validity is measured by a stop watch that is not thread-safe. ././@PaxHeader0000000000000000000000000000022000000000000011447 xustar0000000000000000122 path=manila-21.0.0/releasenotes/notes/bug-2050010-add-filesystem-name-metadata-to-cephfs-shares-5725d751980360ec.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-2050010-add-filesystem-name-metadata-to-cephfs-shares-5725d75190000664000175000017500000000077500000000000032064 0ustar00zuulzuul00000000000000--- upgrade: - | Shares created using the CEPHFS Native protocol will now have a new `__mount_options` metadata containing the `cephfs_filesystem_name` to enhance usability while mounting shares. Existing shares will be updated through the ensure shares workflow. To preserve backwards compatibility, this metadata is mutable by end users. It is strongly recommended that administrators include "__mount_options" in the `[DEFAULT]/admin_only_metadata_keys` configuration option. ././@PaxHeader0000000000000000000000000000021600000000000011454 xustar0000000000000000120 path=manila-21.0.0/releasenotes/notes/bug-2050010-allow-configuring-admin-and-driver-metadata-b0ede7d7cf057b5b.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-2050010-allow-configuring-admin-and-driver-metadata-b0ede7d7cf00000664000175000017500000000055300000000000032420 0ustar00zuulzuul00000000000000--- upgrades: - | A new configuration option called ``admin_only_metadata`` has been introduced to assist cloud administrators while defining share metadata that should only be modified by administrators. This configuration option defaults to the existing admin only metadata keys, i.e., scheduler hints to preserve backwards compatibility. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-2051691-cors-defaults-15989a221a9cb920.yaml0000664000175000017500000000035600000000000026017 0ustar00zuulzuul00000000000000--- fixes: - | `bug 2051691 `_: Fixed the default values of the following options. - ``[cors] allowed_headers`` - ``[cors] expose_headers`` - ``[cors] allow_methods`` ././@PaxHeader0000000000000000000000000000022400000000000011453 xustar0000000000000000126 path=manila-21.0.0/releasenotes/notes/bug-2052785-netapp-allow-share-server-migration-with-replicas-971fece378440aba.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-2052785-netapp-allow-share-server-migration-with-replicas-971fe0000664000175000017500000000037100000000000032630 0ustar00zuulzuul00000000000000--- fixes: - | Netapp ONTAP driver now allows migration of share server even if one or more shares on share server has replicas. For more details, please check `Launchpad bug #2052785 `_ ././@PaxHeader0000000000000000000000000000021000000000000011446 xustar0000000000000000114 path=manila-21.0.0/releasenotes/notes/bug-2053100-fix-ceph-driver-preferred-path-update-70147668e0f19c4d.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-2053100-fix-ceph-driver-preferred-path-update-70147668e0f19c4d.0000664000175000017500000000037300000000000031534 0ustar00zuulzuul00000000000000--- fixes: - | The CephFS driver used to set the "preferred" export path incorrectly. It is now fixed to set it as part of export location metadata. See `Launchpad bug 2053100 `_ for more details. ././@PaxHeader0000000000000000000000000000021100000000000011447 xustar0000000000000000115 path=manila-21.0.0/releasenotes/notes/bug-2053100-fix-export-path-preferred-attr-updates-32db001aacfc8563.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-2053100-fix-export-path-preferred-attr-updates-32db001aacfc85630000664000175000017500000000034400000000000032111 0ustar00zuulzuul00000000000000--- fixes: - | Share manager drivers can now update export location metadata (such as the `preferred` attribute) during the `ensure_shares` routine. (`Launchpad bug: 2053100 `_) ././@PaxHeader0000000000000000000000000000023700000000000011457 xustar0000000000000000137 path=manila-21.0.0/releasenotes/notes/bug-2058027-fix-backup-status-in-creating-state-forever-for-wrong-config-a9e10419f33ecb97.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-2058027-fix-backup-status-in-creating-state-forever-for-wrong-c0000664000175000017500000000054200000000000032715 0ustar00zuulzuul00000000000000--- fixes: - | NetApp driver `bug #2058027 `_: Fix the issue for NetApp driver for backup feature. Backup status update used to fail when administrator misconfigured backup settings configuration or SnapMirror relationship created during backup creation was not in healthy state. ././@PaxHeader0000000000000000000000000000022400000000000011453 xustar0000000000000000126 path=manila-21.0.0/releasenotes/notes/bug-2058642-fix-backup-delete-source-destination-same_vserver-7e165f9acfec123c.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-2058642-fix-backup-delete-source-destination-same_vserver-7e1650000664000175000017500000000051300000000000032576 0ustar00zuulzuul00000000000000--- fixes: - | NetApp driver `bug #2058642 `_: Fix the backup delete issue for NetApp driver when source and destination vserver are same. Added the logic in resource cleanup part to delete the vserver peering when source and destination vserver are not same. ././@PaxHeader0000000000000000000000000000020600000000000011453 xustar0000000000000000112 path=manila-21.0.0/releasenotes/notes/bug-2059399-fix-backup-restore-failing-for-rest-bc060fcf893ae0f6.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-2059399-fix-backup-restore-failing-for-rest-bc060fcf893ae0f6.ya0000664000175000017500000000063200000000000032105 0ustar00zuulzuul00000000000000--- fixes: - | NetApp driver `bug #2059399 `_: Fix the backup restore issue for NetApp driver for REST client. Backup restore is failing across the ONTAP cluster for REST client. Added the logic to use to destination vserver client for REST client to restore the backup and modify the restore lib for cmode REST client accordingly. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-2064502-netapp-fix-revert-to-snapshot-f23ab4dc325b2c42.yaml0000664000175000017500000000064200000000000031272 0ustar00zuulzuul00000000000000--- fixes: - | When reverting shares to snapshots that are larger or smaller than the share, some storage systems such as NetApp ONTAP always defer to the larger of the two sizes. Manila's share manager interface now accounts for this behavior, and adjusts project quotas appropriately. For more details, please check Launchpad `bug 2064502 `_ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-2064907-allow-empty-mount-point-prefix.yaml0000664000175000017500000000025500000000000027225 0ustar00zuulzuul00000000000000--- features: - | Allow using an empty prefix for shares created with mount_point_name. - | Allow configuring default mount_point_name prefix through an option. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-2064907-fix-share-server-migration.yaml0000664000175000017500000000064500000000000026355 0ustar00zuulzuul00000000000000--- fixes: - | When performing a share server migration without new share network, reused allocations are properly updated with new share_server_id. - | In NetApp driver functions related to share server migration, vserver_name is now retrieved directly from backend_details instead of templating. This way, vserver_name is correct even for share servers that have already been migrated once. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-2066840-dell-powerflex-default-port-fc50b82979e3b11b.yaml0000664000175000017500000000030000000000000030731 0ustar00zuulzuul00000000000000--- fixes: - | PowerFlex Driver `bug #2066840 `_: The default port value of 443 has been added for the PowerFlex NAS backend. ././@PaxHeader0000000000000000000000000000021700000000000011455 xustar0000000000000000121 path=manila-21.0.0/releasenotes/notes/bug-2066871-allow-to-update-access-level-for-access-rule-741f8fc3cc190701.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-2066871-allow-to-update-access-level-for-access-rule-741f8fc3cc0000664000175000017500000000050200000000000032250 0ustar00zuulzuul00000000000000--- fixes: - | Since microversion 2.88, Manila will allow to update access_level of access rule using `openstack share access update` API. Currently this is supported only for NetApp ONTAP backend. For more details, please check `Launchpad bug #2066871 `_ ././@PaxHeader0000000000000000000000000000022400000000000011453 xustar0000000000000000126 path=manila-21.0.0/releasenotes/notes/bug-2067266-Fix-leak-of-Manila-ports-on-share-server-deletion-b6faf19725727988.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-2067266-Fix-leak-of-Manila-ports-on-share-server-deletion-b6faf0000664000175000017500000000116700000000000032443 0ustar00zuulzuul00000000000000--- fixes: - | Share server deletion happens asynchronously and failure during this delete results in leakage of neutron ports. This is fixed in two steps, first by trying to delete ports before share server deletion. Second, after ports from Manila db entries are deleted, query is made to neutron to get ports which are allocated for share server and missing in db. And then try to delete those ports. Also made sure that the interfaces are disabled since neutron ports are already deleted. For more details please check Launchpad `bug 2067266 `_ ././@PaxHeader0000000000000000000000000000022700000000000011456 xustar0000000000000000129 path=manila-21.0.0/releasenotes/notes/bug-2067456-handle-share-and-snapshot-show-for-deferred-deletion-37654e034eabccc6.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-2067456-handle-share-and-snapshot-show-for-deferred-deletion-370000664000175000017500000000040200000000000032524 0ustar00zuulzuul00000000000000--- fixes: - | Share and snapshot in deferred deletion states will be hidden from non-admin user in show API along-with existing list API. For more details, please check Launchpad `bug 2067456 `_ ././@PaxHeader0000000000000000000000000000020600000000000011453 xustar0000000000000000112 path=manila-21.0.0/releasenotes/notes/bug-2067609-make-osprofiler-initialization-compatible-with-forks.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-2067609-make-osprofiler-initialization-compatible-with-forks.ya0000664000175000017500000000024100000000000033171 0ustar00zuulzuul00000000000000--- fixes: - | OSprofiler is now initialized after forking because at least its opentelemetry driver loses tracing context when process is forked. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-2068043-update-deferred-deletion-dc5ea4207e06bb64.yaml0000664000175000017500000000103300000000000030305 0ustar00zuulzuul00000000000000--- fixes: - | When deferred deletion is enabled, Manila will attempt regular deletion. While quota is released, and these objects (shares, snapshots) are hidden from the user. Any error during deferred deletion will put resource in `error_deferred_deleting` state. After that, it will be handled in periodic task. Also, Manila will make sure snapshot instances are deleted before share instance delete. For more details, please check Launchpad `bug 2068043 `_ ././@PaxHeader0000000000000000000000000000024300000000000011454 xustar0000000000000000141 path=manila-21.0.0/releasenotes/notes/bug-2069125-fix-manila-driver-error-with-ontap-svm-scoped-user-when-add-rule-1ae120a96dd8f68a.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-2069125-fix-manila-driver-error-with-ontap-svm-scoped-user-when0000664000175000017500000000050200000000000032746 0ustar00zuulzuul00000000000000--- fixes: - | NetApp driver `bug #2069125 `_: Fixed the issue for the NetApp ONTAP driver in the ZAPI workflow, where certain vserver accounts failed to add access rules for a share when the vserver network interface was not configured with kerberos. ././@PaxHeader0000000000000000000000000000021600000000000011454 xustar0000000000000000120 path=manila-21.0.0/releasenotes/notes/bug-2071359-netapp-retry-sis-operatin-if-already-active-4625605175f76d07.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-2071359-netapp-retry-sis-operatin-if-already-active-4625605175f0000664000175000017500000000062300000000000032013 0ustar00zuulzuul00000000000000--- fixes: - | NetApp ONTAP driver will now retry the sis operation (e.g. dedupe) if sis operation is currently active. This is needed because NetApp turns on efficiency (by default) on latest hardware which causes conflicting sis operation when Manila tries to turn it off. For more details, please check Launchpad `bug #2071359 `_ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-2072552-allow-scheduling-to-disabled-host-82c93468ec322256.yaml0000664000175000017500000000050400000000000031566 0ustar00zuulzuul00000000000000--- fixes: - | Manila now allows OpenStack administrators to schedule shares on hosts that are currently running and marked as under maintenance (`disabled`) through the `only_host` scheduler hint. For more details, please refer to `launchpad bug 2072552 `_ ././@PaxHeader0000000000000000000000000000024100000000000011452 xustar0000000000000000139 path=manila-21.0.0/releasenotes/notes/bug-2073766-svm-scope-exclude-management-lif-when-validate-kerberos-config-ef0f1249fcc4445b.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-2073766-svm-scope-exclude-management-lif-when-validate-kerberos0000664000175000017500000000054100000000000033016 0ustar00zuulzuul00000000000000--- fixes: - | NetApp driver `bug #2073766 `_: Kerberos can only be configured for the SVM data network interface. Therefore, this fix excludes the vserver management network interface for certain SVM accounts when checking the SVM Kerberos configuration for network interfaces. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-2074504-disable-ports-on-neutron-ext-nets-af3ff56da9a928df.yaml0000664000175000017500000000061000000000000032247 0ustar00zuulzuul00000000000000--- fixes: - | When using Neutron networks tagged as `external` (unmanaged provider networks) as share networks, Manila now creates ports with `admin_state_up=False` (disabled). This change addresses ARP failures that can occur when using OVN as the Neutron ML2 plugin. For more information, refer to `bug 2074504 `_. ././@PaxHeader0000000000000000000000000000021300000000000011451 xustar0000000000000000117 path=manila-21.0.0/releasenotes/notes/bug-2075967-lock-shares-deletion-when-rule-is-locked-9ce9c6914acc1edb.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-2075967-lock-shares-deletion-when-rule-is-locked-9ce9c6914acc1e0000664000175000017500000000100300000000000032153 0ustar00zuulzuul00000000000000--- upgrades: - | Creating access rules with a deletion lock will now result in the share being locked alongside in order to avoid disconnections. For more details, please refer to `launchpad bug 2075967 `_. fixes: - | When creating access rules with a deletion lock, the shares will also be locked to prevent disconnections. For more details, please refer to `launchpad bug 2075967 `_. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-2082944-sqlalchemy-tracing.yml0000664000175000017500000000020100000000000024607 0ustar00zuulzuul00000000000000--- fixes: - | Added tracing to SQLAlchemy engine if profiler is enabled and profiler.trace_sqlalchemy is set to true. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-2084529-add-db_retry-on-update-methods-538f3a295a110f3f.yaml0000664000175000017500000000030600000000000031214 0ustar00zuulzuul00000000000000--- fixes: - | Added db_retry on deadlock for various database update methods. Please check `launchpad bug 2084529 `_ for more details. ././@PaxHeader0000000000000000000000000000023300000000000011453 xustar0000000000000000133 path=manila-21.0.0/releasenotes/notes/bug-2084532-add-new-policy-list-all-project-for-shares-and-snapshots-0b02bea6e121c6a2.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-2084532-add-new-policy-list-all-project-for-shares-and-snapshot0000664000175000017500000000047000000000000032666 0ustar00zuulzuul00000000000000--- upgrade: - | Administrator can now configure new policy `list_all_projects` for share and share snapshot. This policy is applicable for listing of respective resources in all projects. Please check `launchpad bug 2084532 `_ for more details. ././@PaxHeader0000000000000000000000000000021400000000000011452 xustar0000000000000000118 path=manila-21.0.0/releasenotes/notes/bug-2084783-improve-get-all-instances-with-share-data-f217df37bac9b647.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-2084783-improve-get-all-instances-with-share-data-f217df37bac9b0000664000175000017500000000034600000000000032250 0ustar00zuulzuul00000000000000--- fixes: - | Share instance/replica get with share data query is being optimized to improve the performance. Please check `launchpad bug 2084783 `_ for more details. ././@PaxHeader0000000000000000000000000000023400000000000011454 xustar0000000000000000134 path=manila-21.0.0/releasenotes/notes/bug-2085112-netapp-make-deleted-volumes-retention-period-configurable-403ec227f256e24b.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-2085112-netapp-make-deleted-volumes-retention-period-configurab0000664000175000017500000000075200000000000033121 0ustar00zuulzuul00000000000000--- fixes: - | The NetApp ONTAP driver now supports user configurable volume delete retention period (``netapp_volume_delete_retention_hours``). The max value of config option ``unused_share_server_cleanup_interval`` is updated to possible max value of retention period because if a share server gets deleted, the volume recovery queue also gets purged. For more details, please check `Launchpad bug #2085112 `_ ././@PaxHeader0000000000000000000000000000020500000000000011452 xustar0000000000000000111 path=manila-21.0.0/releasenotes/notes/bug-2088269-vast-add-multiple-export-locations-39243a9091c145f9.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-2088269-vast-add-multiple-export-locations-39243a9091c145f9.yam0000664000175000017500000000056500000000000031676 0ustar00zuulzuul00000000000000--- fixes: - | `Launchpad bug #2037422 `_: Fixed an issue within the VASTData driver that made only one export path available for mounting, regardless if more mount paths were available. Now, the amount of export locations will correspond to the way the storage is configured for better load balancing. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-2089061-fix-access-rules-locks-lookup-b5efbd41397acba3.yaml0000664000175000017500000000057500000000000031420 0ustar00zuulzuul00000000000000--- fixes: - | While displaying and deleting access rules, manila was limiting the search for locks to the context of the request. Now, manila will search within all of the projects for locks and properly apply visibility and deletion restrictions. For more details, please refer to `launchpad bug #2089061 `_. ././@PaxHeader0000000000000000000000000000021100000000000011447 xustar0000000000000000115 path=manila-21.0.0/releasenotes/notes/bug-2089534-dont-fail-stop-if-service-doesnt-exist-68448b4d775a2b1e.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-2089534-dont-fail-stop-if-service-doesnt-exist-68448b4d775a2b1e0000664000175000017500000000025500000000000031714 0ustar00zuulzuul00000000000000--- fixes: - | Fixed an issue where stopping a service would produce unhelpful database traceback if a service entry didn't exist in the database for the service. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-2089634-fix-like-search-option-name-f06d8a50163070bf.yaml0000664000175000017500000000036000000000000030515 0ustar00zuulzuul00000000000000--- fixes: - | Fixed an issue where using `%` in `name~` filter would match all records instead of those literally containing `%` in their name. The filter now properly escapes special characters used in SQL LIKE queries. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-2089826-allow-filtering-shares-on-mount-point-name.yaml0000664000175000017500000000011200000000000031371 0ustar00zuulzuul00000000000000--- fixes: - | Allow filtering shares on mount_point_name property. ././@PaxHeader0000000000000000000000000000021500000000000011453 xustar0000000000000000119 path=manila-21.0.0/releasenotes/notes/bug-2096656-fix-netapp-create-share-from-snapshot-with-mount-point-name.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-2096656-fix-netapp-create-share-from-snapshot-with-mount-point-0000664000175000017500000000017100000000000032762 0ustar00zuulzuul00000000000000--- fixes: - The NetApp cDOT driver now allows setting share mount_point_name when creating share from a snapshot. ././@PaxHeader0000000000000000000000000000021700000000000011455 xustar0000000000000000121 path=manila-21.0.0/releasenotes/notes/bug-2097522-netapp-delete-vlan-even-if-ipspace-is-reused-5bc8b49ad6f91eb7.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-2097522-netapp-delete-vlan-even-if-ipspace-is-reused-5bc8b49ad60000664000175000017500000000040700000000000032233 0ustar00zuulzuul00000000000000--- fixes: - | The NetApp ONTAP driver now cleans VLAN and IPSpace correctly thereby making sure to delete VLAN even if IPspace is reused. For more details, please check `Launchpad bug #2097522 `_ ././@PaxHeader0000000000000000000000000000020600000000000011453 xustar0000000000000000112 path=manila-21.0.0/releasenotes/notes/bug-2098083-Pass-on-port-delete-not-found-error-5acafa7a7810a210.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-2098083-Pass-on-port-delete-not-found-error-5acafa7a7810a210.ya0000664000175000017500000000042700000000000031715 0ustar00zuulzuul00000000000000--- fixes: - | Manila will no longer fail while attempting to delete a neutron port that already has been deleted. Instead, a log warning will be created. For more details, please check `Launchpad bug #2098083 `_ ././@PaxHeader0000000000000000000000000000020700000000000011454 xustar0000000000000000113 path=manila-21.0.0/releasenotes/notes/bug-2099273-stop-overriding-cephfs-nfs-protocols-cf7e3949f688ad6f.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-2099273-stop-overriding-cephfs-nfs-protocols-cf7e3949f688ad6f.y0000664000175000017500000000052200000000000032237 0ustar00zuulzuul00000000000000--- fixes: - | `Launchpad bug 2099273 ` has been addressed by removing the NFS protocol override within the request sent to the Ceph Manager API. This allows users to mount shares exported by the Ceph NFS service with any NFS protocol version exposed by that service. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-2100829-dell-powerscale-http-auth-38831162175686c4.yaml0000664000175000017500000000103500000000000030023 0ustar00zuulzuul00000000000000--- fixes: - | Dell PowerScale Driver `Bug #2100829 `_: * Fixed SSL verification error for `Delete NFS share`, `Delete CIFS Share` and `Allow CIFS share access`. * Fixed mounted NFS share is inaccessible. * Fixed return type error for `Create Share from snapshot`. * Fixed `Delete a share` did not remove the quota and the directory. * Implemented session based PowerScale API auth and enabled CSRF token. * Enabled configurable SSL cert verification. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-2102673-fix-shares-stuck-in-ensuring-190ce5519c33baad.yaml0000664000175000017500000000044700000000000031105 0ustar00zuulzuul00000000000000--- fixes: - | An issue that caused the shares to be stuck in the 'ensuring' status when the driver was restarted in the middle of the ensure operation has been fixed. For more details, please refer to `Launchpad bug #2102673 `_ ././@PaxHeader0000000000000000000000000000022400000000000011453 xustar0000000000000000126 path=manila-21.0.0/releasenotes/notes/bug-2104357-Fix-server_migrating-status-of-non-active-replica-6af28a67a4684d16.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-2104357-Fix-server_migrating-status-of-non-active-replica-6af280000664000175000017500000000050400000000000032537 0ustar00zuulzuul00000000000000--- fixes: - | Manila now correctly handles the 'server_migrating' status of share and snapshot instances during share server migration especially during share server belonging to non-active replica. For more details, please check `Launchpad bug #2104357 `_ ././@PaxHeader0000000000000000000000000000021300000000000011451 xustar0000000000000000117 path=manila-21.0.0/releasenotes/notes/bug-2106382-NetApp-Preserve-custom-snapshot-policies-d3bd010a12325506.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-2106382-NetApp-Preserve-custom-snapshot-policies-d3bd010a1232550000664000175000017500000000073700000000000032037 0ustar00zuulzuul00000000000000--- fixes: - | Netapp ONTAP driver allows to set snapshot policy names that will be kept during operations like migration_complete and manage_container, instead of overwriting by snapshot policy that is set via share type extra specs. Config option 'netapp_volume_snapshot_policy_exceptions' will be used to configure exception snapshot policies. For more details, please check `Launchpad bug #2106382 `_ ././@PaxHeader0000000000000000000000000000021000000000000011446 xustar0000000000000000114 path=manila-21.0.0/releasenotes/notes/bug-2111918-netapp-fix-rentention-period-rest-api-e73b358ccc6e7b37.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-2111918-netapp-fix-rentention-period-rest-api-e73b358ccc6e7b37.0000664000175000017500000000050300000000000032042 0ustar00zuulzuul00000000000000--- fixes: - | Fixed an issue while creating shares due to an undesired retention period parameter in NetApp ONTAP driver. Fixed this by moving retention_period in PATCH instead of POST api request. For more details, please check `Launchpad bug #2111918 `_ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-2114260-Add-guard-to-qualified_replica-e02bd4b21acee803.yaml0000664000175000017500000000036000000000000031356 0ustar00zuulzuul00000000000000--- fixes: - | ShareSnapshot model properties fixed to handle race condition where share instance is deleted mean-time. For more details, please check `Launchpad bug #2114260 `_ ././@PaxHeader0000000000000000000000000000021400000000000011452 xustar0000000000000000118 path=manila-21.0.0/releasenotes/notes/bug-2114969-netapp-guard-already-existing-cifs-access-df01145c6782e880.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-2114969-netapp-guard-already-existing-cifs-access-df01145c6782e0000664000175000017500000000037600000000000032100 0ustar00zuulzuul00000000000000--- fixes: - | NetApp ONTAP driver CIFS access control creation fixed for duplicate entry exception which means access already exist. For more details, please check `Launchpad bug #2114969 `_ ././@PaxHeader0000000000000000000000000000020600000000000011453 xustar0000000000000000112 path=manila-21.0.0/releasenotes/notes/bug-2120176-Handle-Neuton-Subnet-Full-Exception-9cb634909f0dc716.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-2120176-Handle-Neuton-Subnet-Full-Exception-9cb634909f0dc716.ya0000664000175000017500000000043300000000000031525 0ustar00zuulzuul00000000000000--- fixes: - | Manila will report IpAddressGenerationFailureClient exception if neutron subnet is full i.e. no more ports can be created on neutron subnet. For more details, please refer to `launchpad bug #2120176 `_ ././@PaxHeader0000000000000000000000000000022100000000000011450 xustar0000000000000000123 path=manila-21.0.0/releasenotes/notes/bug-2120291-netapp-rest-basic-share-creation-failure-fixes-f768ba19f3157db4.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-2120291-netapp-rest-basic-share-creation-failure-fixes-f768ba190000664000175000017500000000057200000000000032350 0ustar00zuulzuul00000000000000--- fixes: - | The NetApp ONTAP driver failed while checking for WORM shares compliance and data ports for LIF creaton during the shares creation. The issue is now fixed, and share creation works fine with both rest and netapp legacy client. For more details, please refer to `Launchpad bug #2120291 `_. ././@PaxHeader0000000000000000000000000000022500000000000011454 xustar0000000000000000127 path=manila-21.0.0/releasenotes/notes/bug-2120650-enforce-policy-check-for-share-snapshots-retrieval-b4c66a3e90bd38af.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-2120650-enforce-policy-check-for-share-snapshots-retrieval-b4c60000664000175000017500000000026400000000000032640 0ustar00zuulzuul00000000000000--- security: - | Closes a gap where a user can see the snapshots belonging to another project if the uuid of the snapshot is leaked, stolen or (improbably) guessed. ././@PaxHeader0000000000000000000000000000023200000000000011452 xustar0000000000000000132 path=manila-21.0.0/releasenotes/notes/bug-2121141-fixing-cifs-share-creation-through-netapp-legacy-client-4c3868fca9de2df2.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-2121141-fixing-cifs-share-creation-through-netapp-legacy-client0000664000175000017500000000053600000000000033003 0ustar00zuulzuul00000000000000--- fixes: - | The NetApp ONTAP driver failed while creating a CIFS share through netapp legacy client with aes encryption error. The issue is now fixed, and share creation works as expected with netapp legacy client. For more details, please refer to `Launchpad bug #2121141 `_. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug-667744-fix-c64071e6e5a098f7.yaml0000664000175000017500000000033200000000000024047 0ustar00zuulzuul00000000000000fixes: - Launchpad bug `1822815 `_ has been fixed. The user no longer gets an error if the list command has no rows when executing `manila list --count True`. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug_1564623_change-e286060a27b02f64.yaml0000664000175000017500000000022600000000000024625 0ustar00zuulzuul00000000000000--- fixes: - For a delete snapshot request, if backend reports that snapshot is busy then the state of snapshot is changed to 'error_deleting'. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug_1582931-1437eae20fa544d1.yaml0000664000175000017500000000014700000000000023460 0ustar00zuulzuul00000000000000--- fixes: - HPE3PAR Driver fix to reduce the fsquota when a share is deleted for shared fstores.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bug_1844046-fix-image-not-found-629415d50cd6042a.yaml0000664000175000017500000000042600000000000027074 0ustar00zuulzuul00000000000000--- fixes: - | The Generic driver has been fixed to invoke compute image retrieval by ID rather than list all images and implement a filter. This prevents failures in case there are a lot of images available and the image service returns a paginated response. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/bugfix-1771958-1771970-bcec841e7ae6b9f6.yaml0000664000175000017500000000031700000000000025216 0ustar00zuulzuul00000000000000--- fixes: - | New shares created on a Quobyte backend are now initialized with the correct quota. - | fixes a bug causing incorrect quotas being set in the backend when resizing Quobyte shares. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/cephfs-add-nfs-protocol-support-44764094c9d784d8.yaml0000664000175000017500000000011200000000000027732 0ustar00zuulzuul00000000000000--- features: - Added NFS protocol support for shares backed by CephFS. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/cephfs-fix-export-ip-escaping-on-hostname-e2866be32a8f5e38.yaml0000664000175000017500000000052700000000000032015 0ustar00zuulzuul00000000000000--- fixes: - | When ``cephfs_ganesha_server_ip`` is not set, the current hostname is used as a default for such config option. The driver was treating this value as an IP address and trying to perform validations on it. The CEPH NFS driver will no longer treat hostnames as ip addresses and try to validate them as such. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/cephfs-native-add-readonly-shares-support-067ccab0217ab5f5.yaml0000664000175000017500000000012000000000000032126 0ustar00zuulzuul00000000000000--- features: - For cephfs_native driver, added read-only shares support. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/cephfs-native-enhance-update-access-support-e1a1258084c997ca.yaml0000664000175000017500000000034300000000000032307 0ustar00zuulzuul00000000000000--- features: - | Enhanced ``cephfs_native`` driver's update_access() to, - remove undesired rules existing in the backend during recovery mode. - return ``access_keys`` of ceph auth IDs that are allowed access. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/cephfs-native-fix-evict-c45fd2de8f520757.yaml0000664000175000017500000000013100000000000026434 0ustar00zuulzuul00000000000000--- fixes: - In cephfs_native driver, fixed client eviction call during access denial. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/cephfs-nfs-ipv6-support-2ffd9c0448c2f47e.yaml0000664000175000017500000000011300000000000026514 0ustar00zuulzuul00000000000000--- features: - IPv6 support for CephFS Manila driver with NFS gateway. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/cephfs-set-mode-b7fb3ec51300c220.yaml0000664000175000017500000000046700000000000024746 0ustar00zuulzuul00000000000000--- fixes: - | Shares backed by CephFS no longer have hard-coded mode 755. Use the ``cephfs_volume_mode`` configuration option to set another mode, such as 775 when using manila dynamic external storage provider with OpenShift. The default value remains 755 for backwards compatibility. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/cephfs-snapshots-enabled-4886147664270c32.yaml0000664000175000017500000000111200000000000026302 0ustar00zuulzuul00000000000000--- upgrade: - | The default value for the CephFS driver configuration option ``cephfs_enable_snapshots`` has changed to True. This option has also been deprecated, and will be removed in a future release. If snapshots are not desired with this back end, set the share type extra spec ``snapshot_support`` to False. deprecations: - | The CephFS driver configuration option ``cephfs_enable_snapshots`` has been deprecated, and will be removed in a future release. Use the share type extra-spec ``snapshot_support`` to enable or disable snapshots. ././@PaxHeader0000000000000000000000000000020700000000000011454 xustar0000000000000000113 path=manila-21.0.0/releasenotes/notes/cephfs-support-statement-for-victoria-and-beyond-e94baa7857b1624c.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/cephfs-support-statement-for-victoria-and-beyond-e94baa7857b1624c.y0000664000175000017500000000073500000000000032722 0ustar00zuulzuul00000000000000--- upgrade: - | This version of OpenStack Manila has not been tested with Ceph clusters prior to Nautilus. CephFS drivers interact with Ceph clusters via a python binding called "ceph_volume_client". This is being replaced by supplying management operations via the ceph manager interface that was introduced in the Nautilus release of Ceph. So it is advised that you upgrade your Ceph deployment prior to upgrading to Manila's Victoria release. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/change_user_project_length-93cc8d1c32926e75.yaml0000664000175000017500000000013200000000000027277 0ustar00zuulzuul00000000000000--- fixes: - User_id and project_id DB fields are extended to also support LDAP setups. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/check-thin-provisioning-4bb702535f6b10b6.yaml0000664000175000017500000000047500000000000026454 0ustar00zuulzuul00000000000000--- fixes: - Capacity filter and weigher scheduler logic was modified to account for back ends that can support thin and thick provisioning for shares. Over subscription calculation is triggered with the presence of the ``thin_provisioning`` extra-spec in the share type of the share being created. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/clean-expired-messages-6161094d0c108aa7.yaml0000664000175000017500000000023500000000000026151 0ustar00zuulzuul00000000000000--- features: - | Added a periodic task which cleans up expired user messages. Cleanup interval can be set by message_reap_interval config option. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/config-for-cephfs-volume-prefix-67f2513f603cb614.yaml0000664000175000017500000000017600000000000027740 0ustar00zuulzuul00000000000000--- features: - cephfs volume path prefix is now configurable in order to enable support for multiple cephfs back ends. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/container-add-share-server-migration-1f4509ade926aec6.yaml0000664000175000017500000000041000000000000031161 0ustar00zuulzuul00000000000000--- features: - | The container driver now supports driver assisted share migration and share server migration across share networks, and across backends that share the same underlying volume group (configuration option: container_volume_group). ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/container-driver-5d972cc40e314663.yaml0000664000175000017500000000072400000000000025117 0ustar00zuulzuul00000000000000--- prelude: > A new Container driver is added. It uses docker container as a share server. features: - The Container driver allows using a docker container as a share server. This allows for very fast share server startup. - The Container driver supports CIFS protocol. issues: - | The Container driver has the following known issues: * Only basic driver operations are supported: create/delete share, update access and extend share. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/container-driver-hardening-against-races-30c9f517a6392b9d.yaml0000664000175000017500000000017100000000000031654 0ustar00zuulzuul00000000000000--- fixes: - Container driver. Fixed share and share server deletion concurrencies by adding shared external lock. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/container-manage-unmanage-share-servers-880d889828ee7ce3.yaml0000664000175000017500000000024200000000000031541 0ustar00zuulzuul00000000000000--- features: - Added managing and unmanaging of share servers functionality to the Container Driver, allowing for shares to be managed and unmanaged. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/container-multiple-subnets-per-az-702aad41d6f91b59.yaml0000664000175000017500000000050700000000000030470 0ustar00zuulzuul00000000000000--- features: - | The Container driver is now able to: - Create shares using share networks that have multiple share network subnets in the same availability zone. - Add more network interfaces into share servers that are already deployed based on the share network subnets within the share network. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/dedupe-support-hnas-driver-017d2f2a93a8b487.yaml0000664000175000017500000000031200000000000027115 0ustar00zuulzuul00000000000000--- fixes: - Hitachi HNAS driver now reports ``dedupe`` capability and it can be used in extra-specs to choose a HNAS file system that has dedupe enabled when creating a manila share on HNAS. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/delete_vlan_on_vserver_delete-a7acd145c0b8236d.yaml0000664000175000017500000000013300000000000030117 0ustar00zuulzuul00000000000000--- features: - NetApp cMode driver - configured VLAN will be deleted on Vserver removal ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/dell-emc-unity-use-user-capacity-322f8bbb7c536453.yaml0000664000175000017500000000035700000000000030115 0ustar00zuulzuul00000000000000--- features: - Dell EMC Unity driver is changed to create shares with the available space instead of allocated space as the same as the size specified by user. - Dell EMC Unity driver version is changed to 3.0.0 for Pike release. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/dellemc-fix-capacity-report-25f75a6c96e12b40.yaml0000664000175000017500000000061400000000000027216 0ustar00zuulzuul00000000000000--- fixes: - | Dell EMC Manila Driver: Fixes wrong capacity in pool_stat. `bug 1890372 `_ powermax manila return size in MB, `bug 1890375 `_ vnx manila return size in MB, `bug 1890376 `_ unity manila return size in bytes. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/deprecate-ceph-nfs-protocol-helper-ba5ec5095d9eaca7.yaml0000664000175000017500000000024100000000000030767 0ustar00zuulzuul00000000000000--- deprecations: - | Usage of the NFSProtocolHelper with the CephFS driver has been deprecated and it will be removed in the next skip-level release. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/deprecate-ceph-nfs-protocol-helper-options-bacaf4565478e38f.yaml0000664000175000017500000000051500000000000032335 0ustar00zuulzuul00000000000000--- deprecations: - | The following options of the CephFS driver have been deprecated, because these are used only by the deprecated NFSProtocolHelper. - ``cephfs_ganesha_server_is_remote`` - ``cephfs_ganesha_server_username`` - ``cephfs_ganesha_path_to_private_key`` - ``cephfs_ganesha_server_password`` ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/deprecate-dellemc-vnx-20c9daec7aec541c.yaml0000664000175000017500000000025300000000000026360 0ustar00zuulzuul00000000000000--- deprecations: - | The Dell EMC VNX share driver has been deprecated because the product has reached its EOL. The driver will be removed in a future release. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/deprecate-glustefs-8e0c863aaa58eb6b.yaml0000664000175000017500000000023200000000000025723 0ustar00zuulzuul00000000000000--- deprecations: - | The GlusterFS driver the GlusterFS Native driver have been deprecated. These drivers will be removed in a future release. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/deprecate-json-formatted-policy-file-fd0345f215e6ccd5.yaml0000664000175000017500000000176000000000000031162 0ustar00zuulzuul00000000000000--- upgrade: - | The default value of ``[oslo_policy] policy_file`` config option has been changed from ``policy.json`` to ``policy.yaml``. Operators who are utilizing customized or previously generated static policy JSON files (which are not needed by default), should generate new policy files or convert them in YAML format. Use the `oslopolicy-convert-json-to-yaml `_ tool to convert a JSON to YAML formatted policy file in backward compatible way. deprecations: - | Use of JSON policy files was deprecated by the ``oslo.policy`` library during the Victoria development cycle. As a result, this deprecation is being noted in the Wallaby cycle with an anticipated future removal of support by ``oslo.policy``. As such operators will need to convert to YAML policy files. Please see the upgrade notes for details on migration of any custom policy files. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/deprecate-memcached-servers-config-option-f4456382b9b4d6db.yaml0000664000175000017500000000060400000000000032107 0ustar00zuulzuul00000000000000--- deprecations: - | The configuration option "memcached_servers" from the [DEFAULT] section is deprecated. This option has currently no effect and will be removed in future releases. To specify memcached servers for the authentication middleware when using keystone, please use the option "memcached_servers" from the [keystone_authtoken] configuration group. ././@PaxHeader0000000000000000000000000000021200000000000011450 xustar0000000000000000116 path=manila-21.0.0/releasenotes/notes/deprecate-old-ks-opts-in-nova-neutron-cinder-groups-e395015088d93fdc.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/deprecate-old-ks-opts-in-nova-neutron-cinder-groups-e395015088d93fd0000664000175000017500000000165500000000000032556 0ustar00zuulzuul00000000000000--- fixes: - | `Launchpad bug 1809318 `_ has been fixed. The deprecated options ``api_insecure`` and ``ca_certificates_file`` from nova, cinder, neutron or DEFAULT configuration groups no longer override the newer ``insecure`` option if provided. Always use ``insecure`` and ``cafile`` to control SSL and validation since the deprecated options will be removed in a future release. deprecations: - | The options ``ca_certificates_file``, ``nova_ca_certificates_file``, ``cinder_ca_certificates_file``, ``api_insecure``, ``nova_api_insecure`` and ``cinder_api_insecure`` have been deprecated from the ``DEFAULT`` group as well as ``nova``, ``neutron`` and ``cinder`` configuration groups. Use ``cafile`` to specify the CA certificates and ``insecure`` to turn off SSL validation in these respective groups (nova, neutron and cinder). ././@PaxHeader0000000000000000000000000000020700000000000011454 xustar0000000000000000113 path=manila-21.0.0/releasenotes/notes/deprecate-service-instance-network-helper-option-82ff62a038f2bfa3.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/deprecate-service-instance-network-helper-option-82ff62a038f2bfa3.y0000664000175000017500000000027700000000000033040 0ustar00zuulzuul00000000000000--- upgrade: - Deprecated the ``service_instance_network_helper_type`` option for removal. This option is no longer used for anything since nova networking is no longer supported. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/deprecate-tegile-04c7466e29570ad5.yaml0000664000175000017500000000016500000000000025051 0ustar00zuulzuul00000000000000--- deprecations: - | The Tegile share driver has been deprecated and will be removed in a future release. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/deprecate-use_forwarded_for-2b47e9b63d5f2fc1.yaml0000664000175000017500000000042300000000000027513 0ustar00zuulzuul00000000000000--- deprecations: - | The ``[DEFAULT] use_forwarded_for`` parameter has been deprecated. Instead of using this parameter, add the ``HTTPProxyToWSGI`` middleware to api pipelines, and ``[oslo_middleware] enable_proxy_headers_parsing = True`` to manila.conf. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/deprecate-windows-smb-654983cf22856e31.yaml0000664000175000017500000000017200000000000025777 0ustar00zuulzuul00000000000000--- deprecations: - | The Windows SMB share driver has been deprecated and will be removed in a future release. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/disable-share-groups-api-by-default-0627b97ac2cda4cb.yaml0000664000175000017500000000126100000000000030751 0ustar00zuulzuul00000000000000--- issues: - Share groups replaced the experimental consistency groups feature in Ocata. The APIs for share groups have a default role-based-access-control policy set to "!". This means that these APIs are not enabled by default on upgrading to the Ocata release. Modify policy.json appropriately in your deployment to enable these APIs. You may set these policies to "rule:default" to allow access to all tenants and "rule:admin_api" to restrict the access only to tenants with those privileges. upgrade: - Policies relating to "consistency_group" and "cgsnapshot" APIs have been removed from manila. These policies can be removed from "policy.json". ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/do-not-create-default-route-66ff4199b60e35c7.yaml0000664000175000017500000000017000000000000027153 0ustar00zuulzuul00000000000000--- fixes: - default route for service subnet wouldn't be created if connect_share_server_to_tenant_network is on ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/driver-filter-91e2c60c9d1a48dd.yaml0000664000175000017500000000066300000000000024643 0ustar00zuulzuul00000000000000--- features: - Add DriverFilter and GoodnessWeigher to manila's scheduler. These can use two new properties provided by backends, 'filter_function' and 'goodness_function', which can be used to filter and weigh qualified backends, respectively. upgrade: - To add DriverFilter and GoodnessWeigher to an active deployment, their references must be added to the filters and weighers sections on entry_points.txt. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/drop-python-3-6-and-3-7-924b62070c5b60ff.yaml0000664000175000017500000000020100000000000025537 0ustar00zuulzuul00000000000000--- upgrade: - | Python 3.6 & 3.7 support has been dropped. The minimum version of Python now supported is Python 3.8. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/drop-python2-support-e160ff36811a5964.yaml0000664000175000017500000000034400000000000025724 0ustar00zuulzuul00000000000000--- upgrade: - | Python 2.7 support has been dropped. Last release of openstack/manila to support python 2.7 is OpenStack Train (9.x). The minimum version of Python now supported by openstack/manila is Python 3.6. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/drop-support-for-lvm-share-export-ip-e031ef4c5f95b534.yaml0000664000175000017500000000044500000000000031070 0ustar00zuulzuul00000000000000--- upgrade: - | The LVM driver configuration option ``lvm_share_export_ip`` is no longer supported. This option has been replaced by ``lvm_share_export_ips`` which accepts a comma-separated string of IP addresses of the host exporting the LVM shares (NFS/CIFS share server).././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/emc-unity-manila-support-d4f5a410501cfdae.yaml0000664000175000017500000000060700000000000027010 0ustar00zuulzuul00000000000000--- prelude: > Add a new EMC Unity plugin in manila which allows user to create NFS/CIFS share with an EMC Unity backend. features: - | Add a new Unity plugin in manila which allows user to create NFS/CIFS share with an EMC Unity backend. This plugin performs the operations on Unity by REST API. issues: - EMC Unity does not support the same IP in different VLANs. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/emc_vnx_interface_ports_configuration-00d454b3003ef981.yaml0000664000175000017500000000025000000000000031460 0ustar00zuulzuul00000000000000--- fixes: - EMC VNX driver supports interface ports configuration now. The ports of Data Mover that can be used by share server interfaces are configurable. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/enable-enforce-scope-c2d57db049741896.yaml0000664000175000017500000000233000000000000025625 0ustar00zuulzuul00000000000000--- upgrade: - | The service now requires a user's "scope" to be defined with the OpenStack Identity service (Keystone) by the virtue of default Role Based Access Control (RBAC). This is being done in addition to changing the RBAC rules for GET requests to permit users possessing the "reader" role. The Default value of config options ``[oslo_policy] enforce_scope`` and ``[oslo_policy] oslo_policy.enforce_new_defaults`` has now changed to ``True``. This means that, if you are using system scoped tokens to access Manila's API, requests will fail with HTTP 403. Users must obtain a project scoped token to interact with the Manila API. You may also provide users with "reader" role where appropriate if they intend to make read-only API requests to Manila. If you would like to disable these changes, modify ``manila.conf`` file to set:: [oslo_policy] enforce_new_defaults=False enforce_scope=False However, be aware that you may not be able to disable these options in the future. Please see `OpenStack's Consistent and Secure Default RBAC goal `_ for more details././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/enhance-ensure-share-58fc14ffc099f481.yaml0000664000175000017500000000021700000000000026017 0ustar00zuulzuul00000000000000--- fixes: - Adds the ability to solve the potential problem of slow start up, and deal with non-user-initiated state changes to shares. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/error-share-set-size-ff5d4f4ac2d56755.yaml0000664000175000017500000000027300000000000026065 0ustar00zuulzuul00000000000000--- fixes: - Any errors that may occur during 'managing' a share into manila will result in the share's size being set to 1, aside from transitioning the status to 'manage_error'. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/estimate-provisioned-capacity-34f0d2d7c6c56621.yaml0000664000175000017500000000024600000000000027665 0ustar00zuulzuul00000000000000--- fixes: - Improve max_over_subscription_ratio enforcement by providing a reasonable estimate of backend provisioned-capacity when drivers cannot supply it. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/extra_specs_case_insensitive-e9d4ca10d94f2307.yaml0000664000175000017500000000022500000000000027730 0ustar00zuulzuul00000000000000--- upgrade: - The values of share type extra-specs will be considered case insensitive for comparison in the scheduler's capabilities filter. ././@PaxHeader0000000000000000000000000000022200000000000011451 xustar0000000000000000124 path=manila-21.0.0/releasenotes/notes/feature-certificate-based-authentication-for-netapp-drivers-e5163559d2335643.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/feature-certificate-based-authentication-for-netapp-drivers-e5163550000664000175000017500000000043000000000000033263 0ustar00zuulzuul00000000000000--- features: - | The NetApp ONTAP driver now supports Certificate-Based-Authentication (CBA) for operators that desire certificate based authentication instead of user and password. Please check the NetApp Deployment and Operations Guide for more instructions. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/fix-1870280-share-type-user-message-902275047410bdbf.yaml0000664000175000017500000000044200000000000027721 0ustar00zuulzuul00000000000000--- fixes: - | New user message now alerts users when attempting to create a new share without identifying a share type, either through request body or by setting a default share type. See `bug #1870280 `_ for more details. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/fix-consistency-groups-api-dd9b5b99138e22eb.yaml0000664000175000017500000000046500000000000027306 0ustar00zuulzuul00000000000000--- fixes: - Consistency Group APIs return share_server_id information correctly to administrators. - When using a consistency group snapshot to create another consistency group, share server and network information is persisted from the source consistency group to the new consistency group. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/fix-creating-from-snapshot-server-limit-7b575c7cbf081efc.yaml0000664000175000017500000000047100000000000031746 0ustar00zuulzuul00000000000000--- fixes: - | Fix creating from snapshot operation with server limits. If the new share and parent are in the same host, the share server must be resued, so the limits must be ignored. For more details, please refer to `launchpad bug #1918845 `_ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/fix-ganesha-allow-access-for-all-ips-09773a79dc76ad44.yaml0000664000175000017500000000024400000000000030624 0ustar00zuulzuul00000000000000--- fixes: - | Drivers using ganesha can now handle 'manila access-allow ip 0.0.0.0/0' as a way to allow access to the share from all IPs. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/fix-generic-driver-resize-0fde9c8674db5951.yaml0000664000175000017500000000032100000000000027002 0ustar00zuulzuul00000000000000--- fixes: - | For generic driver, when resize a share it may failed due to exportfs can't recognize "" as part of the share path. This issue has been fixed by replacing "" to "*". ././@PaxHeader0000000000000000000000000000020500000000000011452 xustar0000000000000000111 path=manila-21.0.0/releasenotes/notes/fix-generic-driver-using-uuid-to-mount-volumes-291208b283120224.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/fix-generic-driver-using-uuid-to-mount-volumes-291208b283120224.yam0000664000175000017500000000034200000000000032253 0ustar00zuulzuul00000000000000--- fixes: - | Generic driver - volumes mounted to the service instance are now mounted via its uuid (/dev/disk/by-uuid/...) instead of canonical paths (/dev/vdb) to prevent messing up the shares after restarts. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/fix-hds-hnas-unconfined-09b79f3bdb24a83c.yaml0000664000175000017500000000025300000000000026500 0ustar00zuulzuul00000000000000--- fixes: - Crash when using unconfined filesystems in HDS HNAS driver using SSH backend. - HDS HNAS Driver no longer mounts unmounted filesystems automatically. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/fix-hnas-mount-on-manage-snapshot-91e094c579ddf1a3.yaml0000664000175000017500000000014600000000000030362 0ustar00zuulzuul00000000000000--- fixes: - Fixed Hitachi HNAS driver not checking export on backend when managing a snapshot. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/fix-huawei-driver-cifs-mount-issue-2d7bff5a7e6e3ad6.yaml0000664000175000017500000000022200000000000030771 0ustar00zuulzuul00000000000000--- fixes: - | Change the CIFS mounting parameter of Huawei driver from form "user=" to "username=", which is compatible in various OS. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/fix-huawei-driver-qos-deletion-9ad62db3d7415980.yaml0000664000175000017500000000016300000000000027665 0ustar00zuulzuul00000000000000--- fixes: - Fixed qos deletion failing in huawei driver when qos status is 'idle' by deactivating it first. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/fix-huawei-exception-a09b73234ksd94kd.yaml0000664000175000017500000000011200000000000026151 0ustar00zuulzuul00000000000000--- fixes: - Fix exception in update_access not found in Huawei driver. ././@PaxHeader0000000000000000000000000000020600000000000011453 xustar0000000000000000112 path=manila-21.0.0/releasenotes/notes/fix-issue-about-provisioned_capaciti_gb-is_None-d700c0544a8a88e8.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/fix-issue-about-provisioned_capaciti_gb-is_None-d700c0544a8a88e8.ya0000664000175000017500000000056400000000000032656 0ustar00zuulzuul00000000000000--- fixes: - | Added synchronized lock to fix issue where provisioned_capacity_gb value was initialized to None immediately on another thread while checking capacity filter, causing shared creation to fail due to conflicting provisioned_capacity_gb updates. please refer to `launchpad bug #1871768 `_. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/fix-managing-twice-hnas-4956a7653d27e320.yaml0000664000175000017500000000020100000000000026163 0ustar00zuulzuul00000000000000--- fixes: - Fixed Hitachi HNAS driver allowing a share to be managed twice through a malformed export location parameter. ././@PaxHeader0000000000000000000000000000021300000000000011451 xustar0000000000000000117 path=manila-21.0.0/releasenotes/notes/fix-netApp-drivers-share-server-migration-is-failing-eee991ccbab4cd5a.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/fix-netApp-drivers-share-server-migration-is-failing-eee991ccbab4cd0000664000175000017500000000032000000000000033264 0ustar00zuulzuul00000000000000--- fixes: - | Netapp driver: Fix Netapp share server migration with multiple subnets. For more details please refer to `launchpad bug #2018300 ` ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/fix-netapp-manage-snapshot-f6ed571bd4f9a2ac.yaml0000664000175000017500000000036400000000000027363 0ustar00zuulzuul00000000000000--- fixes: - | NetApp driver: fix some issues caused with managed snapshot, it is no more renaming the managed snapshot. For more details, please refer to `launchpad bug #1936648 `_ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/fix-neutron-plugin-invalid-key-dict-68c3d6bcbf2f19f0.yaml0000664000175000017500000000041200000000000031045 0ustar00zuulzuul00000000000000--- fixes: - | Some neutron integrations might not have the network type, so the neutron network plugin is fixed by taking that scenario in consideration. `Launchpad bug #1987315 `_ for more details. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/fix-no-router-server-0d5bf587063f22fc.yaml0000664000175000017500000000042400000000000026023 0ustar00zuulzuul00000000000000--- fixes: - | Fix subnet cleanup for server instances without routers. Previously, when tearing down a server instance that had no router specified in its details, the associated subnet was not cleaned up because the subnet cleanup code was never executed. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/fix-py3-netapp-a9815186ddc865d4.yaml0000664000175000017500000000027600000000000024526 0ustar00zuulzuul00000000000000--- fixes: - | Fixed the size value not being present in share snapshot instances, which caused the NetApp driver to crash when creating a share from a snapshot using python3. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/fix-race-condition-netapp-5a36f6ba95a49c5e.yaml0000664000175000017500000000026500000000000027041 0ustar00zuulzuul00000000000000--- fixes: - | Fixed an issue with the NetApp driver leaving leftover resources when it was handling too many share server creation and deletion requests in parallel. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/fix-share-instance-list-with-limit-db7b5b99138e22ee.yaml0000664000175000017500000000025100000000000030613 0ustar00zuulzuul00000000000000--- fixes: - Fixed share instance list with limit number API display error. Change _collection_name to _collection_links when we want to show instances_links. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/fix-share-manager-shrinking-data-loss-state-edc87ba2fd7e32d8.yaml0000664000175000017500000000045500000000000032526 0ustar00zuulzuul00000000000000--- fixes: - | When attempting to shrink a share to a size smaller than the current used space, the share status will remain as ``available`` instead of ``shrinking_possible_data_loss_error``. The user will receive warning message saying that the shrink operation was not completed. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/fix-verser-client-in-case-vserver-not-present-92b8a710a08f90e6.yaml0000664000175000017500000000037200000000000032554 0ustar00zuulzuul00000000000000--- fixes: - | NetApp driver: driver can now delete the share server in error state that had not been created on storage side. For more details, please refer to `launchpad bug #1964592 `_ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/fix-volume-efficiency-status-2102ad630c5407a8.yaml0000664000175000017500000000020000000000000027325 0ustar00zuulzuul00000000000000fixes: - | Fixed an issue while getting efficiency status from the NetApp backend while creating or updating volumes. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/fix_access_level_managed_shares_hnas-c76a09beed365b46.yaml0000664000175000017500000000016400000000000031415 0ustar00zuulzuul00000000000000--- fixes: - HNAS driver correctly handles rule updates to pre-existing access rules on a managed CIFS share. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/fix_cephx_validation-cba4df77f9f45c6e.yaml0000664000175000017500000000026200000000000026425 0ustar00zuulzuul00000000000000--- fixes: - Check the Cephx ID used when granting access to a CephFS share to make sure it's not the same as the one Manila uses to communicate with the Ceph backend. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/fix_create_from_snapshot-5d8e470b46aac67d.yaml0000664000175000017500000000035000000000000027134 0ustar00zuulzuul00000000000000--- fixes: - | Netapp driver: Fix Netapp driver create from snapshot accross pools using SVM scoped account. For more details please refer to `launchpad bug #1922512 `_ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/fix_limit_formating_routes-1b0e1a475de6ac44.yaml0000664000175000017500000000020200000000000027472 0ustar00zuulzuul00000000000000--- fixes: - Fixed routes.mapper.Mapper.resource adds a bunch of formatted routes that cannot accept something after a '.'. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/fix_manage_snapshots_hnas-2c0e1a47b5e6ac33.yaml0000664000175000017500000000014500000000000027254 0ustar00zuulzuul00000000000000--- fixes: - Fixed HNAS driver error when managing snapshots caused by concurrency in backend. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/fix_policy_file-4a382ac241c718c6.yaml0000664000175000017500000000011400000000000025051 0ustar00zuulzuul00000000000000--- fixes: - Adapted policy.json file to correct snapshot policy values. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/fix_share_server_api_2025649-f818f594e97c59e6.yaml0000664000175000017500000000020500000000000027057 0ustar00zuulzuul00000000000000--- fixes: - | `bug #2025649 `_: Fixed share server API error message. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/fix_share_server_manager_match_id-276202295539dc0f.yaml0000664000175000017500000000044000000000000030437 0ustar00zuulzuul00000000000000--- fixes: - | Fix share server manager API match of identifiers by preffix. "Invalid input received Error" now raises only if the identifiers suffixes matches. For more details, please refer to `launchpad bug #1982429 `_ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/fixed-netapp-cdot-autosupport-3fabd8ac2e407f70.yaml0000664000175000017500000000015100000000000030046 0ustar00zuulzuul00000000000000--- fixes: - The NetApp cDOT driver's autosupport reporting now works on Python 2.7.12 and later. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/fixing-driver-filter-14022294c8c04d2d.yaml0000664000175000017500000000057600000000000025674 0ustar00zuulzuul00000000000000--- fixes: - | Fixed the driver filter to not check for hard equality between the share_backend_name and the name reported by the host as it defeats the purpose of the capabilities filter giving the ability to use "" selection operator in the extra-spec. Refer to `Launchpad bug 1815700 `_ for more details. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/ganesha-dynamic-update-access-be80bd1cb785e733.yaml0000664000175000017500000000062300000000000027631 0ustar00zuulzuul00000000000000--- features: - The new class `ganesha.GaneshaNASHelper2` in the ganesha library uses dynamic update of export feature of NFS-Ganesha versions v2.4 or newer to modify access rules of a share in a clean way. It modifies exports created per share rather than per share access rule (as with `ganesha.GaneshaNASHelper`) that introduced limitations and unintuitive end user experience. ././@PaxHeader0000000000000000000000000000021500000000000011453 xustar0000000000000000119 path=manila-21.0.0/releasenotes/notes/ganesha-store-exports-and-export-counter-in-ceph-rados-052b925f8ea460f4.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/ganesha-store-exports-and-export-counter-in-ceph-rados-052b925f8ea40000664000175000017500000000022500000000000033004 0ustar00zuulzuul00000000000000--- features: - Added ganesha driver feature to store NFS-Ganesha's exports and export counter directly in a HA storage, Ceph's RADOS objects. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/generic-driver-noop-interface-driver-24abcf7af1e08ff9.yaml0000664000175000017500000000071300000000000031331 0ustar00zuulzuul00000000000000--- features: - | A "no-op" interface driver (manila.network.linux.interface.NoopInterfaceDriver) has been introduced to work with drivers that create and manage lifecycle of share servers (``driver_handles_share_servers=True``) through service instance virtual machines using OpenStack Compute. This interface driver can be used when manila-share is running on a machine that has access to the administrator network used by Manila. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/generic-route-racing-adf92d212f1ab4de.yaml0000664000175000017500000000014100000000000026213 0ustar00zuulzuul00000000000000--- fixes: - Fixed race-condition in generic driver while updating network routes in host. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/glusterfs-add-directory-layout-extend-shrink-fd2a008f152edbf5.yaml0000664000175000017500000000011700000000000032772 0ustar00zuulzuul00000000000000--- features: - For glusterfs_nfs driver, added share extend/shrink support. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/glusterfs-handle-new-volume-option-xml-schema-dad06253453c572c.yaml0000664000175000017500000000014600000000000032617 0ustar00zuulzuul00000000000000--- fixes: - GlusterFS drivers now handle the volume option XML schema of GlusterFS >= 3.7.14. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/gpfs-nfs-server-type-default-value-change-58890adba373737c.yaml0000664000175000017500000000030100000000000031706 0ustar00zuulzuul00000000000000--- other: - | Changing the default value of 'gpfs_nfs_server_type' configuration parameter from KNFS to CES as Spectrum Scale provide NFS service with Ganesha server by default. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/graduate-share-groups-feature-5f751b49ccc62969.yaml0000664000175000017500000000101700000000000027601 0ustar00zuulzuul00000000000000--- prelude: > - | Share group APIs have graduated from their `experimental feature state `_ from API version ``2.55``. Share group types can be created to encompass one or more share types, share groups can be created, updated, snapshotted and deleted, and shares can be created within share groups. These actions no longer require the inclusion of ``X-OpenStack-Manila-API-Experimental`` header in the API requests. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/graduate-share-replication-feature-17aec111b6c5bf0f.yaml0000664000175000017500000000076300000000000030764 0ustar00zuulzuul00000000000000--- prelude: > - | Share replication APIs have graduated from their `experimental feature state `_ from API version ``2.56``. One or more share replicas can be created from a given share. They can also be promoted to be considered the active share, resynchronized and deleted. These actions no longer require the inclusion of ``X-OpenStack-Manila-API-Experimental`` header in the API requests. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/guru-meditation-support-7872da69f529a6c2.yaml0000664000175000017500000000051200000000000026563 0ustar00zuulzuul00000000000000--- features: - Adds support to generate Guru Meditation Reports(GMR) for manila services. GMR provides useful debugging information that can be used to obtain an accurate view on the current live state of the system. For example, what threads are running, what configuration parameters are in effect, and more. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/hitachi-driver-cifs-user-support-3f1a8b894fe3e9bb.yaml0000664000175000017500000000046100000000000030465 0ustar00zuulzuul00000000000000--- prelude: > Add support for CIFS protocol in Manila HNAS driver. features: - Added support for CIFS shares in Hitachi HNAS driver. It supports user access type, where a permission for a user or a group can be added/removed. Also, accepts 'read write' and 'read only' as access level. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/hnas-driver-rename-7ef74fe720f7e04b.yaml0000664000175000017500000000165400000000000025566 0ustar00zuulzuul00000000000000--- features: - Renamed all HDS mentions on HNAS driver to Hitachi and moved driver to another folder. upgrade: - HNAS driver vendor changed from HDS to Hitachi. - New HNAS driver location. - New HNAS config options hitachi_hnas_ip, hitachi_hnas_user, hitachi_hnas_password, hitachi_hnas_evs_id, hitachi_hnas_evs_ip, hitachi_hnas_file_system_name, hitachi_hnas_ssh_private_key, hitachi_hnas_cluster_admin_ip0, hitachi_hnas_stalled_job_timeout, hitachi_hnas_driver_helper and hitachi_hnas_allow_cifs_snapshot_while_mounted. deprecations: - HNAS driver location was deprecated. - All HNAS driver config options were deprecated hds_hnas_ip, hds_hnas_user, hds_hnas_password, hds_hnas_evs_id, hds_hnas_evs_ip, hds_hnas_file_system_name, hds_hnas_ssh_private_key, hds_hnas_cluster_admin_ip0, hds_hnas_stalled_job_timeout, hds_hnas_driver_helper and hds_hnas_allow_cifs_snapshot_while_mounted. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/hnas-manage-unmanage-snapshot-support-0d939e1764c9ebb9.yaml0000664000175000017500000000012100000000000031327 0ustar00zuulzuul00000000000000--- features: - Added manage/unmanage snapshot support to Hitachi HNAS Driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/hnas-mountable-snapshots-4fbffa05656112c4.yaml0000664000175000017500000000047700000000000026737 0ustar00zuulzuul00000000000000--- features: - Added Mountable Snapshots support to HNAS driver. upgrade: - If using existing share types with the HNAS back end, set the 'mount_snapshot_support' extra-spec to allow creating shares that support mountable snapshots. This modification will not affect existing shares of such types. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/hnas-revert-to-snapshot-a2405cd6653b1e85.yaml0000664000175000017500000000054600000000000026434 0ustar00zuulzuul00000000000000--- features: - Added Revert-to-snapshot functionality to Hitachi NAS driver. upgrades: - If using existing share types with the HNAS back end, set the 'revert_to_snapshot_support' extra-spec to allow creating shares that support in-place revert-to-snapshot functionality. This modification will not affect existing shares of such types. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/hnas_allow_managed_fix-4ec7794e2035d3f2.yaml0000664000175000017500000000046100000000000026377 0ustar00zuulzuul00000000000000--- fixes: - Fixed error when allowing access to a managed share in HDS HNAS driver. - Fixed error when attempting to create a new share from a snapshot taken from a managed share in HDS HNAS driver. - Fixed ID inconsistencies in log when handling managed shares in HDS HNAS driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/hostonly-filter-1a17a70dd0aafb86.yaml0000664000175000017500000000072300000000000025266 0ustar00zuulzuul00000000000000--- features: - Add OnlyHostFilter to manila's scheduler. This filter needs admin to specify host@backend#pool to "share.scheduler_hints.only_host" in the request payload when creating a manila share. The hint is used only for share creation and not stored as share metadata. For non-admin users the OnlyHostFilter will always be ignored. upgrade: - To add OnlyHostFilter to an active deployment, its reference must be enabled in manila.conf. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/hpe3par-rw-snapshot-shares-f7c33b4bf528bf00.yaml0000664000175000017500000000012300000000000027157 0ustar00zuulzuul00000000000000--- features: - Add read-write functionality for HPE 3PAR shares from snapshots. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/hsp-driver-e00aff5bc89d4b54.yaml0000664000175000017500000000033500000000000024224 0ustar00zuulzuul00000000000000--- prelude: > Add Hitachi HSP driver. features: - Added new Hitachi HSP driver, that supports manage/unmanage and shrinking of shares, along with all the minimum driver features. Does not support snapshots.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/huawei-driver-replication-8ed62c8d26ad5060.yaml0000664000175000017500000000065700000000000027072 0ustar00zuulzuul00000000000000--- features: - Huawei driver now supports replication. It reports a replication type 'dr'(Disaster Recovery), so "replication_type=dr" can be used in the share type extra specs to schedule shares to the Huawei driver when configured for replication. - The huawei driver now supports turning off snapshot support. issues: - When snapshot support is turned on in the Huawei driver, replication cannot be used. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/huawei-driver-sectorsize-config-da776132ba6da2a7.yaml0000664000175000017500000000036400000000000030260 0ustar00zuulzuul00000000000000--- features: - Huawei driver supports setting the backend 'sectorsize' while creating shares and administrators can use this capability via the share types extra-spec 'huawei_sectorsize:sectorsize' or via the XML configuration file. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/huawei-driver-support-snapshot-revert-1208c586bd8db98e.yaml0000664000175000017500000000020700000000000031441 0ustar00zuulzuul00000000000000--- features: - | Huawei driver implements the snapshot reverting feature, by Huawei storage's snapshot-rollback capability. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/huawei-pool-disktype-support-0a52ba5d44da55f9.yaml0000664000175000017500000000031400000000000027645 0ustar00zuulzuul00000000000000--- features: - Add support for reporting pool disk type in Huawei driver. `huawei_disk_type` extra-spec in the share type. Valid values for this extra-spec are 'ssd', 'sas', 'nl_sas' or 'mix'. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/huawei-support-access-all-ip-4994c10ff75ac683.yaml0000664000175000017500000000013300000000000027337 0ustar00zuulzuul00000000000000--- fixes: - Huawei driver now properly handles access for all IP addresses (0.0.0.0/0). ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/human-readable-export-location-share-support-a72cd2f0e92c41c7.yaml0000664000175000017500000000120400000000000032654 0ustar00zuulzuul00000000000000--- features: - A human readable ``mount_point_name`` can now be specified while creating shares through the mount_point_name parameter. Manila will prepend a prefix to the mount point name which can be configured through the ``provisioning:mount_point_prefix`` share type extra spec. In case this extra spec is not available in the share type, Manila will prepend a project identification to the mount point name. Project id will be added to this friendly name ``provisioning:mount_point_prefix`` share type is not provided during provisioning. The LVM driver now supports human readable export locations.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/hybrid-aggregates-in-netapp-cdot-drivers-e7c90fb62426c281.yaml0000664000175000017500000000012100000000000031600 0ustar00zuulzuul00000000000000--- features: - Add support for hybrid aggregates to the NetApp cDOT drivers. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/ibm-gpfs-ces-support-3498e35d9fea1b55.yaml0000664000175000017500000000100100000000000025771 0ustar00zuulzuul00000000000000--- prelude: > Refactored GPFS driver to support NFS Ganesha through Spectrum Scale CES framework. upgrade: - Added a new config option is_gpfs_node which will determine if manila share service is running on GPFS node or not. Added mmnfs commands in the root wrap share.filters. Removed scp and ssh commands from root wrap share.filters. deprecations: - Deprecated knfs_export_options configuration parameter as export options are now configured in extra specs of share types. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/ibm-gpfs-manage-support-c110120c350728e3.yaml0000664000175000017500000000045200000000000026201 0ustar00zuulzuul00000000000000--- features: - Added manila manage/unmanage feature support for GPFS driver. The existing fileset should be an independent fileset and should not have any NFS export over the fileset path. With this prerequisite existing GPFS filesets can be brought under Manila management. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/infinidat-add-infinibox-driver-ec652258e710d6a0.yaml0000664000175000017500000000012000000000000027654 0ustar00zuulzuul00000000000000--- features: - Added a new driver for the INFINIDAT InfiniBox storage array. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/infinidat-balance-network-spaces-ips-25a9f1e587b87156.yaml0000664000175000017500000000024400000000000030742 0ustar00zuulzuul00000000000000--- features: - The INFINIDAT share driver now supports multiple export locations per share, defined by the enabled IP addresses in the chosen network space. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/infinidat-delete-datasets-with-snapshots-4d18f8c197918606.yaml0000664000175000017500000000020700000000000031604 0ustar00zuulzuul00000000000000--- fixes: - Fixed a failure in the INFINIDAT share driver which occurs while deleting shares with externally created snapshots. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/infortrend-manila-driver-a1a2af20de6368cb.yaml0000664000175000017500000000023500000000000027025 0ustar00zuulzuul00000000000000--- prelude: > Added Manila share driver for Infortrend storage systems. features: - The new Infortrend driver supports GS/GSe Family storage systems. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/inspur-as13000-driver-41f6b7caea82e46e.yaml0000664000175000017500000000026000000000000025736 0ustar00zuulzuul00000000000000--- prelude: > Add Inspur AS13000 driver. features: - Added new Inspur AS13000 driver, which supports snapshots operation along with all the minimum driver features. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/inspur-instorage-driver-51d7a67f253f3ecd.yaml0000664000175000017500000000026300000000000026667 0ustar00zuulzuul00000000000000--- prelude: > Add Inspur InStorage driver. features: - Add new Inspur InStorage driver, support share create, delete, extend, and access through NFS and CIFS protocol. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/inspur-support-rwx-for-cifs-permission-4279f1fe7a59fd00.yaml0000664000175000017500000000021200000000000031547 0ustar00zuulzuul00000000000000--- fixes: - Fixed CIFS permission issue with Inspur AS13000 driver so that files and folders can be created and deleted correctly. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/introduce-tooz-library-5fed75b8caffcf42.yaml0000664000175000017500000000131300000000000026736 0ustar00zuulzuul00000000000000--- features: - Add support for the tooz library. - Allow configuration of file/distributed locking for the share manager service. upgrade: - New options are necessary in manila.conf to specify the coordination back-end URL (for example, a Distributed Locking Manager (DLM) back-end or a file based lock location). The configuration determines the tooz driver invoked for the locking/coordination. fixes: - Share replication workflows are coordinated by the share-manager service with the help of the tooz library instead of oslo_concurrency. This allows for deployers to configure Distributed Locking Management if multiple manila-share services are run across different nodes. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/limiting-ssh-access-from-tenant-network-6519efd6d6895076.yaml0000664000175000017500000000024100000000000031451 0ustar00zuulzuul00000000000000--- security: - | Service Instance Module - Added option to block port 22 from other subnets than manila service network using neutron security groups.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/lv-mounting-inside-containers-af8f84d1fab256d1.yaml0000664000175000017500000000010700000000000030032 0ustar00zuulzuul00000000000000--- fixes: - Makes docker containers actually mount logical volumes. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/lvm-export-ips-5f73f30df94381d3.yaml0000664000175000017500000000115600000000000024641 0ustar00zuulzuul00000000000000--- features: - Added new config option 'lvm_share_export_ips' which allows a list of IP addresses to use for export locations for the LVM driver. Every share created will be exported on every IP address. This new option supercedes 'lvm_share_export_ip'. upgrade: - After upgrading, rename lvm_share_export_ip to lvm_share_export_ips in the manila.conf file to avoid a deprecation warning. As long as the list remains a single element, functionality is unchanged. deprecations: - The 'lvm_share_export_ip' option is deprecated and will be removed. Use 'lvm_share_export_ips' instead. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/macrosan-add-configuration-option-282fa1026748c4f9.yaml0000664000175000017500000000034700000000000030355 0ustar00zuulzuul00000000000000--- upgrade: - Added a new configuration option ``macrosan_ssl_cert_verfiy`` to configure whether the driver should only allow verified ssl certificates. This option defaults to ``False`` to allow backwards compatibility.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/macrosan-manila-driver-4644ed2cdd51b030.yaml0000664000175000017500000000010300000000000026310 0ustar00zuulzuul00000000000000--- features: - Added Manila driver for Macrosan storage system. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/manage-share-in-zfsonlinux-driver-e80921081206f75b.yaml0000664000175000017500000000012000000000000030211 0ustar00zuulzuul00000000000000--- features: - Added support of 'manage share' feature to ZFSonLinux driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/manage-share-snapshot-in-huawei-driver-007b2c763fbdf480.yaml0000664000175000017500000000010300000000000031330 0ustar00zuulzuul00000000000000--- features: - Manage share snapshot on array in huawei driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/manage-snapshot-in-zfsonlinux-driver-6478d8d5b3c6a97f.yaml0000664000175000017500000000012300000000000031216 0ustar00zuulzuul00000000000000--- features: - Added support of 'manage snapshot' feature to ZFSonLinux driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/manage-unmanage-replicated-share-fa90ce34372b6df5.yaml0000664000175000017500000000060400000000000030315 0ustar00zuulzuul00000000000000--- features: - Share can be managed with replication_type extra-spec in the share_type issues: - Managing a share with replication_type can only be possible if the share does not already have replicas. fixes: - Retrying to manage shares in ``manage_error`` status works as expected. - Snapshot manage and unmange operations are disabled for shares with replicas. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/manage-unmanage-share-servers-cd4a6523d8e9fbdf.yaml0000664000175000017500000000210400000000000030035 0ustar00zuulzuul00000000000000--- features: - Added APIs with default policy set to 'rule:admin_api' that allow managing and unmanaging share servers. Managing Share servers is useful for importing pre-existing shares and snapshots into Manila's management when the driver is configured in ``driver_handles_share_servers`` enabled mode. Unmanaging removes manila share servers from the database without removing them from the back end. Managed share servers, or share servers that have had one or more shares unmanaged will not be deleted automatically when they do not have any shares managed by Manila, even if the config options [DEFAULT]/delete_share_server_with_last_share or [DEFAULT]/automatic_share_server_cleanup have been set to True. - Updated Manage Share API to be able to manage shares in ``driver_handles_share_servers`` enabled driver mode by supplying the Share Server ID. - Updated Unmanage Share and Unmanage Snapshot APIs to allow unmanaging shares and snapshots in ``driver_handles_share_servers`` enabled driver mode. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/manage-unmanage-snapshot-bd92164472638f44.yaml0000664000175000017500000000006000000000000026442 0ustar00zuulzuul00000000000000--- features: - Manage and unmanage snapshot. ././@PaxHeader0000000000000000000000000000020500000000000011452 xustar0000000000000000111 path=manila-21.0.0/releasenotes/notes/manage-unmanage-snapshot-in-netapp-cdot-driver-5cb4b1619c39625a.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/manage-unmanage-snapshot-in-netapp-cdot-driver-5cb4b1619c39625a.yam0000664000175000017500000000013300000000000032526 0ustar00zuulzuul00000000000000--- features: - Add support for snapshot manage/unmanage to the NetApp cDOT driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/manila-netapp-storage-efficiency-policy-5fa0b2b15901bf93.yaml0000664000175000017500000000100300000000000031547 0ustar00zuulzuul00000000000000features: - | It is now possible to specify pre-created NetApp efficiency policies through the use of the `netapp:efficiency_policy` share type extra spec. In the case of DHSS=True, the share server is not available upfront for efficiency policy creation. Users can retype to apply the policy, or if the share-network is constant (i.e., one share service is created for one share network), they can create an efficiency policy for the share server and apply it to DHSS=True shares.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/manila-share-support-recycle-bin-1cc7859affaf8887.yaml0000664000175000017500000000153200000000000030355 0ustar00zuulzuul00000000000000--- features: - | Manila now supports a "recycle bin" for shares. End users can soft-delete their shares and have the ability to restore them for a specified interval. This interval defaults to 7 days and is configurable via "soft_deleted_share_retention_time". After this time has elapsed, soft-deleted shares are automatically cleaned up. upgrade: - | The share entity now contains two new fields: ``is_soft_deleted`` and ``scheduled_to_be_deleted_at``. The ``is_soft_deleted`` will be used to identify shares in the recycle bin.. The ``scheduled_to_be_deleted_at`` field to show when the share will be deleted automatically. A new parameter called ``is_soft_deleted`` was added to the share list API, and users will be able to query shares and filter out the ones that are currently in the recycle bin. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/manila-status-upgrade-check-framework-aef9b5cf9d8e3bda.yaml0000664000175000017500000000072300000000000031650 0ustar00zuulzuul00000000000000--- prelude: > Added new tool ``manila-status upgrade check``. features: - | New framework for ``manila-status upgrade check`` command is added. This framework allows adding various checks which can be run before a Manila upgrade to ensure if the upgrade can be performed safely. upgrade: - | Operator can now use new CLI tool ``manila-status upgrade check`` to check if Manila deployment can be safely upgraded from N-1 to N release. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/manila-wsgi-debug-log-opts-691a7647655b4778.yaml0000664000175000017500000000022700000000000026564 0ustar00zuulzuul00000000000000--- features: - | Now the ``manila-wsgi`` WSGI application records all options and loaded values to its log file, when debug log is enabled. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/maprfs-manila-drivers-1541296f26cf78fd.yaml0000664000175000017500000000007300000000000026137 0ustar00zuulzuul00000000000000--- features: - Added share backend drivers for MapR-FS. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/max-share-extend-size-on-type-0528be9a5c27678b.yaml0000664000175000017500000000031000000000000027435 0ustar00zuulzuul00000000000000--- features: - Admin-only ability to add maximum share extend size restrictions which can be set on a per share-type granularity. Added new extra spec 'provisioning:max_share_extend_size'. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/migration-access-fix-71a0f52ea7a152a3.yaml0000664000175000017500000000036000000000000025773 0ustar00zuulzuul00000000000000--- fixes: - Fixed access_allow and access_deny displaying incorrect error message during migration of a share. - Fixed access rule concurrency in migration that was preventing new rules from being added to the migrated share. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/migration-empty-files-01d1a3caa2e9705e.yaml0000664000175000017500000000014300000000000026263 0ustar00zuulzuul00000000000000--- fixes: - Fixed share migration error using Data Service when there are only empty files. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/migration-share-type-98e3d3c4c6f47bd9.yaml0000664000175000017500000000012300000000000026144 0ustar00zuulzuul00000000000000--- features: - Administrators can now change a share's type during a migration. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/min-max-share-size-on-type-a7c69046e5c57944.yaml0000664000175000017500000000033600000000000026667 0ustar00zuulzuul00000000000000--- features: - Ability to add minimum and maximum share size restrictions which can be set on a per share-type granularity. Added new extra specs 'provisioning:max_share_size' and 'provisioning:min_share_size'. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/mount-volume-path-ff3c3f83039e1a3f.yaml0000664000175000017500000000036400000000000025466 0ustar00zuulzuul00000000000000--- upgrade: - Added a new config option ``container_volume_mount_path``. This option defines the path where ContainerShareDriver driver should mount a logical volume on the host prior to providing access to it from a container. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/move-emc-share-driver-to-dell-emc-dir-1ec34dee0544270d.yaml0000664000175000017500000000035000000000000030743 0ustar00zuulzuul00000000000000--- upgrade: - The EMCShareDriver is moved to the dell_emc directory. share_driver entry in manila.conf needs to be changed to manila.share.drivers.dell_emc.driver.EMCShareDriver. Vendor name is changed to "Dell EMC". ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/multi-segment-support-fa171a8e3201d54e.yaml0000664000175000017500000000013200000000000026266 0ustar00zuulzuul00000000000000--- features: - Added port binding support for neutron networks with multiple segments. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/netapp-add-async-snapmirror-schedule-config-54a33647735f751c.yaml0000664000175000017500000000034500000000000032147 0ustar00zuulzuul00000000000000--- features: - | Added new config option `netapp_snapmirror_schedule` which allows admin to configure netapp asynchronous snapmirror schedule. Admin must make sure schedule is already created on the backend filer. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/netapp-add-extra-spec-max_files_multiplier-b74692c0d54f4678.yaml0000664000175000017500000000034000000000000032153 0ustar00zuulzuul00000000000000--- features: - | Netapp ONTAP driver introduced new extra-spec 'max_files_multiplier'. This will specify how much inodes (files) should be possible at maximum depending on volume (share) size. Max value is 8. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/netapp-add-fpolicy-support-dd31628a1c8e64d6.yaml0000664000175000017500000000216400000000000027177 0ustar00zuulzuul00000000000000--- features: - | Added support for FPolicy on NetApp ONTAP driver. FPolicy allows creation of file policies that specify file operation permissions according to file type. This feature can be enabled using the following extra-specs: - ``netapp:fpolicy_extensions_to_include``: specifies file extensions to be included for screening. Values should be provided as comma separated list. - ``netapp:fpolicy_extensions_to_exclude``: specifies file extensions to be excluded for screening. Values should be provided as comma separated list. - ``netapp:fpolicy_file_operations``: specifies all file operations to be monitored. Values should be provided as comma separated list. FPolicy works for backends with and without share server management. When using NetApp backends with SVM administrator accounts, make sure that the assigned access-control role has access set to "all" for "vserver fpolicy" directory. This feature does not work with share replicas to avoid failures on replica promotion, due to lack of FPolicy resources in the destination SVM. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/netapp-add-migration-through-svm-migrate-c1e29fce19758324.yaml0000664000175000017500000000021100000000000031636 0ustar00zuulzuul00000000000000--- features: - | The NetApp ONTAP driver now supports nondisruptive share server migration for clusters with version >= 9.10. ././@PaxHeader0000000000000000000000000000020600000000000011453 xustar0000000000000000112 path=manila-21.0.0/releasenotes/notes/netapp-add-new-security-certificate-for-vserver-aba543211ae6b811.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/netapp-add-new-security-certificate-for-vserver-aba543211ae6b811.ya0000664000175000017500000000056000000000000032633 0ustar00zuulzuul00000000000000--- features: - | NetApp ONTAP driver now allows cloud operator to define security certificate expire days for vserver. So instead of using vserver's default security certificate with 365 expire days, cloud operator can ask backend to create new security certificate with given expire days using config option 'netapp_security_cert_expire_days'. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/netapp-add-security-service-update-718a68ebe60fd2b5.yaml0000664000175000017500000000065400000000000030671 0ustar00zuulzuul00000000000000--- features: - | NetApp ONTAP driver now supports add and update security services when they are associated with in use share networks. Both add and update operations are supported by all three security service types: ``active_directory``, ``kerberos`` and ``ldap``. In order to update their parameters in a non-disruptive way, ``active_directory`` and ``kerberos`` don't support ``domain`` updates. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/netapp-add-share-server-migration-663f7ced1ef93558.yaml0000664000175000017500000000212300000000000030430 0ustar00zuulzuul00000000000000--- features: - | The NetApp ONTAP driver now supports migration of share servers across clusters. While migrating a share server, the source remains writable during the first phase of the migration, until the cutover is issued. It is possible to specify a new share network for the destination share server, only if the associated security services remain unchanged. Share server migration relies on ONTAP features available only in versions equal and greater than ``9.4``. In order to have share server migration working across ONTAP clusters, they must be peered in advance. In order to adapt to different workloads and provide more flexibility on managing cluster’s free space a new configuration option was added: - ``netapp_server_migration_check_capacity``: Specifies if a capacity validation at the destination backend must be made before proceeding with the share server migration. When enabled, the NetApp driver will validate if the destination pools can hold all shares and snapshots belonging to the source share server. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/netapp-add-support-for-adaptive-qos-d036238e7f29cf75.yaml0000664000175000017500000000146600000000000030651 0ustar00zuulzuul00000000000000--- features: - | Added support for Adaptive QoS policies that have been pre-created on the storage system, with clustered ONTAP version 9.4 or higher. To use this feature, configure a Manila share type with the extra-spec "netapp:adaptive_qos_policy_group" and value set to the qos policy group on the ONTAP storage system, for example: netapp:adaptive_qos_policy_group=platform3 Note that a cluster scoped account must be used in the driver configuration in order to use QoS in clustered ONTAP. Other notes: - This only works for backends without share server management. - This does not work for share replicas or can fail when creating share from snapshot across backends, if the destination backend does not have the pre-created "adaptive_qos_policy_group". ././@PaxHeader0000000000000000000000000000020500000000000011452 xustar0000000000000000111 path=manila-21.0.0/releasenotes/notes/netapp-add-support-for-logical-space-reporting-bb3b582f162664c8.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/netapp-add-support-for-logical-space-reporting-bb3b582f162664c8.yam0000664000175000017500000000036200000000000032572 0ustar00zuulzuul00000000000000--- features: - | NetApp ONTAP driver now allow to create share server with logical space reporting. New config option "netapp_enable_logical_space_reporting" is added and its value must be set to 'True' to enable this feature. ././@PaxHeader0000000000000000000000000000021400000000000011452 xustar0000000000000000118 path=manila-21.0.0/releasenotes/notes/netapp-add-update-from-network-subnet-metadata-method-0615490d86958c3d.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/netapp-add-update-from-network-subnet-metadata-method-0615490d869580000664000175000017500000000067100000000000032523 0ustar00zuulzuul00000000000000--- fixes: - | The NetApp ONTAP driver is now able to update the `showmount` and `pnfs` configurations in a pre-created share server. Please use the share network subnet metadata set feature to update these values. The valid values for those metadata keys are 'true/false'. Also note, earlier support of updating `showmount` using share metadata is removed since its supported now by share network subnet metadata. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/netapp-add-update-share-from-metadata-method-71f308c2b05d59bb.yaml0000664000175000017500000000035200000000000032365 0ustar00zuulzuul00000000000000--- fixes: - | The NetApp ONTAP driver is now able to update the current `snapshot_policy` and/or both `showmount` configurations in a pre-created share. Please use the share metadata set feature to update these values. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/netapp-barbican-share-encryption-support-46ec89c70557e526.yaml0000664000175000017500000000062100000000000031707 0ustar00zuulzuul00000000000000--- features: - | The NetApp driver now supports configuring barbican keystore with the share server whenever an encryption-key-ref is passed in share create command and it enables encryption on the created share. The feature at this level offers only encrypted share creation and deletion. Note: Controlling encryption keys with NetApp will only be possible with DHSS=True. ././@PaxHeader0000000000000000000000000000022500000000000011454 xustar0000000000000000127 path=manila-21.0.0/releasenotes/notes/netapp-bug-2061976-only-modify-qos-policy-if-troughput-changed-ce1b56a3bb3c3d78.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/netapp-bug-2061976-only-modify-qos-policy-if-troughput-changed-ce1b0000664000175000017500000000034100000000000032711 0ustar00zuulzuul00000000000000--- fixes: - | NetApp ONTAP driver fixed to apply QoS policy on share only if the throughput is changed. For more details, please check `Launchpad bug #2061976 `_ ././@PaxHeader0000000000000000000000000000021500000000000011453 xustar0000000000000000119 path=manila-21.0.0/releasenotes/notes/netapp-cdot-add-max-over-subscription-ratio-pool-stats-eea763b3b9b3ba7d.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/netapp-cdot-add-max-over-subscription-ratio-pool-stats-eea763b3b9b30000664000175000017500000000027500000000000033164 0ustar00zuulzuul00000000000000--- features: - | The NetApp cDOT driver now reports the max_over_subscription_ratio configuration, which can be set per share back end, via scheduler-stats/pools/detail API. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/netapp-cdot-apply-mtu-from-network-provider-d12179a2374cdda0.yaml0000664000175000017500000000041600000000000032412 0ustar00zuulzuul00000000000000--- features: - The NetApp cDOT driver operating in ``driver_handles_share_servers = True`` mode applies the Maximum Transmission Unit (MTU) from the network provider where available when creating Logical Interfaces (LIFs) for newly created share servers. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/netapp-cdot-clone-split-control-a68b5fc80f1fc368.yaml0000664000175000017500000000054100000000000030213 0ustar00zuulzuul00000000000000--- features: - NetApp cDOT driver now supports a scoped extra-spec ``netapp:split_clone_on_create`` to be used in share types when creating shares (NetApp FlexClone) from snapshots. If this extra-spec is not included, or set to ``false``, the cDOT driver will perform the clone-split only if/when the parent snapshot is being deleted.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/netapp-cdot-configure-nfs-versions-83e3f319c4592c39.yaml0000664000175000017500000000075300000000000030510 0ustar00zuulzuul00000000000000--- features: - NFS Versions can be configured when using the NetApp cDOT driver with driver mode ``driver_handles_share_servers = True``. upgrade: - Added new configuration option ``netapp_enabled_share_protocols`` to configure NFS versions with the NetApp cDOT driver operating in driver mode ``driver_handles_share_servers = True``. If this option is not specified, new share servers (NetApp vServers) will be created supporting NFS Version 3 and NFS Version 4.0. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/netapp-cdot-multi-svm-configure-nfs-95c9154e1aa28751.yaml0000664000175000017500000000123000000000000030553 0ustar00zuulzuul00000000000000--- features: - | For NetApp ONTAP driver, administrators are now able to set share servers max NFS transfer limits. These limits can be configured by setting the `netapp:tcp_max_xfer_size` and `netapp:udp_max_xfer_size` extra-specs. The driver will consider these limits while deciding to create or reuse share servers. While bringing a share under Manila management, the driver will check if the share type extra-specs values match the share server configured NFS limits. This change does not have effect in DHSS=False environments and relies on ONTAP features available only in versions equal to and greater than ``9.4``. ././@PaxHeader0000000000000000000000000000021200000000000011450 xustar0000000000000000116 path=manila-21.0.0/releasenotes/notes/netapp-cdot-optimized-migration-within-share-server-92cfa1bcf0c317fc.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/netapp-cdot-optimized-migration-within-share-server-92cfa1bcf0c317f0000664000175000017500000000032100000000000033213 0ustar00zuulzuul00000000000000--- features: - Driver assisted migration support has been added to the NetApp cDOT driver to efficiently and nondisruptively migrate shares within Vservers by ensuring data, snapshots and metadata. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/netapp-cdot-quality-of-service-limits-c1fe8601d00cb5a8.yaml0000664000175000017500000000120400000000000031305 0ustar00zuulzuul00000000000000--- features: - The NetApp driver now supports Quality of Service extra specs. To create a share on ONTAP with qos support, set the 'qos' extra-spec in your share type to True and use one of 'netapp:maxiops' and 'netapp:maxbps' scoped extra-specs to set absolute limits. To set size based limits, use scoped extra-specs 'netapp:maxiopspergib' or 'netapp:maxbpspergib'. QoS policies on the back end are created exclusive to each manila share. upgrade: - A new configuration option 'netapp_qos_policy_group_name_template' has been added to allow overriding the naming of QoS policies created by the NetApp driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/netapp-cdot-ss-multiple-dns-ip-df42a217977ce44d.yaml0000664000175000017500000000033600000000000027672 0ustar00zuulzuul00000000000000--- features: - | The NetApp ONTAP driver security service dns_ip parameter also takes a list of comma separated DNS IPs for vserver dns configuration. Allows HA setup, where DNS can be down for maintenance. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/netapp-cdot-switch-volume-efficiency-bd22733445d146f0.yaml0000664000175000017500000000043100000000000030753 0ustar00zuulzuul00000000000000--- fixes: - | NetApp driver volume efficiency settings now behave consistently: like on volume creation now also modification, which is currently consumed by manage and migration, will make sure that deduplication and compression settings are applied correctly. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/netapp-cdot-use-security-service-ou-4dc5835c9e00ad9d.yaml0000664000175000017500000000032400000000000031016 0ustar00zuulzuul00000000000000--- features: - | The NetApp cDOT driver uses the ou field from security services to set the organizational unit of a vserver's active directory configuration. This is done at CIFS server creation. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/netapp-check-snapshot-after-creation-1b59038a3a117c65.yaml0000664000175000017500000000060000000000000030724 0ustar00zuulzuul00000000000000--- fixes: - | NetApp driver now checks after sending snapshot-create request that the snapshot has been properly created. If snapshot is not checked, manila assumes that working snapshot is available. But when user tries to access the snapshot, request could fail. `Launchpad bug 1971710 `_ for more details. ././@PaxHeader0000000000000000000000000000022100000000000011450 xustar0000000000000000123 path=manila-21.0.0/releasenotes/notes/netapp-consider-last-transfer-size-error-for-replica-state-7ef49186a1b8a5a0.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/netapp-consider-last-transfer-size-error-for-replica-state-7ef491860000664000175000017500000000075000000000000033213 0ustar00zuulzuul00000000000000--- features: - | NetApp driver now considers ``last-transfer-size`` and ``last-transfer-error`` fields of the snapmirror in addition to existing ``last-transfer-end-timestamp`` to decide whether replica is in_sync or out_of_sync. Added new config option `netapp_snapmirror_last_transfer_size_limit` (default 1MB). If value of `last-transfer-size` field is greater than config value or if `last-transfer-error` field is present, then replica is out_of_sync. ././@PaxHeader0000000000000000000000000000020500000000000011452 xustar0000000000000000111 path=manila-21.0.0/releasenotes/notes/netapp-create-share-from-snapshot-another-pool-330639b57aa5f04d.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/netapp-create-share-from-snapshot-another-pool-330639b57aa5f04d.yam0000664000175000017500000000046100000000000032573 0ustar00zuulzuul00000000000000--- features: - | The NetApp driver now supports efficiently creating new shares from snapshots in pools or back ends different than that of the source share. In order to have this functionality working across different back ends, replication must be enabled and configured accordingly. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/netapp-default-ipv6-route-13a9fd4959928524.yaml0000664000175000017500000000012400000000000026525 0ustar00zuulzuul00000000000000--- features: Added support for IPv6 default gateways to the NetApp driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/netapp-delay-clone-split-after-share-creation-fbae159d988fe2a0.yaml0000664000175000017500000000050400000000000032762 0ustar00zuulzuul00000000000000--- fixes: - | NetApp driver `bug #2029467 `_: When creating a share from a snapshot, the clone split operation is delayed until the share creation is complete. This ensures that the share creation operation is not blocked by the clone split operation. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/netapp-enable-aes-encryption-for-cifs-a1f98e5cb0010ea0.yaml0000664000175000017500000000054100000000000031216 0ustar00zuulzuul00000000000000--- fixes: - | The aes-encryption fixes KRB5KDC_ERR_ETYPE_NOSUPP error observed during CIFS share create on NetApp ONTAP storage. Admin needs to set 'netapp_cifs_aes_encryption' config option to 'True' in order to enable it. For more details, please check `Launchpad bug #2111813 `_ ././@PaxHeader0000000000000000000000000000021600000000000011454 xustar0000000000000000120 path=manila-21.0.0/releasenotes/notes/netapp-fix-export-location-for-readable-replica-promote-8e0c4be5f1966e53.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/netapp-fix-export-location-for-readable-replica-promote-8e0c4be5f190000664000175000017500000000037600000000000033206 0ustar00zuulzuul00000000000000--- fixes: - | NetApp ONTAP driver now updates correct export location after promotion of share replica of replication type readable. For more details, please check `Launchpad bug #2104153 `_ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/netapp-flexgroup-support-9b3f30afb94d3a86.yaml0000664000175000017500000000317700000000000027105 0ustar00zuulzuul00000000000000--- features: - | The NetApp driver has been working with FlexVol ONTAP volumes. The driver does not support scaling FlexVol volumes higher than 100 TiB, which was a theoretical limit for the large namespace that these containers were meant to handle. ONTAP's Flexgroup volumes eliminate such limitations. So, added the support for provisioning share as FlexGroup in the NetApp driver. The FlexGroup provision is enabled by new option ``netapp_enable_flexgroup``, which will make the driver report a single pool represeting all aggregates. The selection on which aggregates the FlexGroup share will reside is up to ONTAP. If the administrator desires to control that selection through Manila scheduler, the configuration option ``netapp_flexgroup_pools`` can be used to tune the storage pool layout. When enabling FlexGroup, the FlexVol pools continue enabled by default. For having only FlexGroup, the new option ``netapp_flexgroup_pool_only`` must be set to `True`. Now, each NetApp pool will report the capability: `netapp_flexgroup` informing which type of share resides there (FlexGroup or FlexVol). The following operations are allowed with FlexGroup shares (DHSS True/False and NFS/CIFS): - Create/Delete share; - Shrink/Extend share; - Create/Delete snapshot; - Revert to snapshot; - Manage/Unmanage snapshots; - Create from snapshot; - Replication; - Manage/Unmanage shares; FlexGroup feature requires ONTAP version 9.8 or newer. Replication with more than one non-active replica per share requires ONTAP 9.9.1 or newer. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/netapp-human-readable-export-location-support-b04af9f5054ad541.yaml0000664000175000017500000000027700000000000032770 0ustar00zuulzuul00000000000000features: - | The NetApp ONTAP driver now supports the common capability "mount_point_name_support". It allows users to specify a custom "mount_point_name" when creating shares.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/netapp-ipv6-support-f448e99a7c112362.yaml0000664000175000017500000000014400000000000025531 0ustar00zuulzuul00000000000000--- features: Added support for IPv6 export location and access rules to the NetApp driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/netapp-manage-unmanage-share-servers-635496b46e306920.yaml0000664000175000017500000000035400000000000030605 0ustar00zuulzuul00000000000000--- features: - Added managing and unmanaging of share servers functionality to the NetApp driver, allowing for shares and snapshots to be managed and unmanaged in driver mode ``driver_handles_share_servers`` set to True. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/netapp-multiple-subnets-support-274a37c5ddb43ca1.yaml0000664000175000017500000000042700000000000030364 0ustar00zuulzuul00000000000000--- features: - | NetApp ONTAP: Add support for multiple subnets per availability zone when in the same network segment. In addition, new share network subnets can now be added to share networks with in-use share servers (that has one or more shares in place). ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/netapp-ontap-rest-api-client-4c83c7b931f950cf.yaml0000664000175000017500000000144100000000000027414 0ustar00zuulzuul00000000000000--- features: - | NetApp driver: introduced the option to request ONTAP operations through REST API when using ONTAP 9.12.1 or greater. The new option `netapp_use_legacy_client` allows switching between the old ZAPI client approach and new REST client. It is default to `True`, meaning that the drivers will keep working as before using ZAPI operations. If desired, this option can be set to `False` connecting with new REST client that performs REST API operations if it is available, otherwise falls back to ZAPI. Also, an option called `netapp_rest_operation_timeout` was added to allow the user to set the maximum amount of time expected to get an output from a synchronous operation when using REST API. By default, the timeout value is 60 seconds. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/netapp-readable-replica-cb7d5460ad7b3b0e.yaml0000664000175000017500000000023700000000000026572 0ustar00zuulzuul00000000000000--- features: - | NetApp ONTAP driver: added support for `readable` replication. The driver will continue having support for the `dr` type as well. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/netapp-remove-last-transfer-size-limit-check-ec66035ff30ad70b.yaml0000664000175000017500000000046700000000000032564 0ustar00zuulzuul00000000000000--- upgrade: - | Removed the check for the last transfer size during updating replica state in the ONTAP driver. Also removed the corresponding option `netapp_snapmirror_last_transfer_size_limit`. See `launchpad bug 2066031 `_ for more details. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/netapp-replication-dhss-true-5b2887de8e9a2cb5.yaml0000664000175000017500000000050000000000000027601 0ustar00zuulzuul00000000000000--- features: - | The NetApp driver now supports replication with ``driver_handles_share_servers`` set to True, in addition to the mode where the driver does not handle the creation and management of share servers. For replication to work across ONTAP clusters, clusters must be peered in advance. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/netapp-restrict-lif-creation-per-ha-pair-249021556be5189d.yaml0000664000175000017500000000074500000000000031373 0ustar00zuulzuul00000000000000--- fixes: - | NetApp driver `bug #2100835 `_: The NetApp driver can now prevent the creation of a share server if the total number of data LIFs on one node of HA pair, including those that can be migrated in case of failure,exceeds the maximum number data LIFs supported by the node. This option guarantees that, in the event of a node failure, the partner node will be able to takeover all data LIFs. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/netapp-retry-requests-0a77a31f5222d4b2.yaml0000664000175000017500000000053300000000000026205 0ustar00zuulzuul00000000000000--- fixes: - | Sometimes NetApp API call fails due to name resolution(DNS) issue. In such case, a client will now make 5 retries on connect and 2 on read calls. Also, the connection retry will be visible in the log. For more details, please refer to `launchpad bug #1971542 `_ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/netapp-set-discovery-mode-to-none-d66b2125a8d12740.yaml0000664000175000017500000000046500000000000030220 0ustar00zuulzuul00000000000000--- features: - | If user is configuring 'Servers' in AD Server in the security service then, for NetApp ONTAP, the discovery mode should be changed to 'none'. Value of 'none' indicates that domain controller discovery will not be done, and it will depend only on preferred DC's configured. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/netapp-stop-clone-split-during-share-delete-720456e55031ef65.yaml0000664000175000017500000000060000000000000032104 0ustar00zuulzuul00000000000000--- fixes: - | On NetApp ONTAP backend, if share created from snapshot is deleted while clone split job is in progress, the deletion fails due to unmount error. Fixed this issue by stopping clone split if its in progress and then move to unmount of share. For more details, please check `Launchpad Bug #1960239 `_ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/netapp-support-filtering-api-tracing-02d1f4271f44d24c.yaml0000664000175000017500000000043200000000000031063 0ustar00zuulzuul00000000000000--- features: - The NetApp driver supports a new configuration option ``netapp_api_trace_pattern`` to enable filtering backend API interactions to log. This option must be specified in the backend section when desired and it accepts a valid python regular expression. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/netapp-svm-get-progress-596cd387c66dea1b.yaml0000664000175000017500000000016600000000000026605 0ustar00zuulzuul00000000000000--- features: - | NetApp driver is now able to inform the migration percentage of a share server migration. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/netapp-use-default-ad-site-security-service-55748f54c2390fad.yaml0000664000175000017500000000031600000000000032262 0ustar00zuulzuul00000000000000--- features: - | For Netapp ONTAP driver, the 'default_ad_site' will restrict the Domain controller's discovery mode to 'site' i.e. only Domain Controller's in local site will be discovered. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/netapp_cdot_performance_utilization-aff1b498a159470e.yaml0000664000175000017500000000052200000000000031320 0ustar00zuulzuul00000000000000--- features: - The NetApp cDOT drivers now include the cluster node utilization metrics for each pool reported to the manila scheduler. These values are designed to be included in the filter & goodness functions used by the scheduler, so the cDOT drivers now also report those functions to the scheduler for each pool. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/netapp_configure_net_with_metadata-c5d1b5f542967276.yaml0000664000175000017500000000066400000000000030751 0ustar00zuulzuul00000000000000--- features: - | NetApp driver with DHSS True mode now supports setup the share server network VLAN segmentation through share network subnet metadata. To do so, the field `set_vlan` must be informed with VLAN number. It can set the network MTU passing the metadata field `set_mtu`. If the subnet metadata contains the `set_vlan` without the `set_mtu` field the MTU is configured according to network plugin. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/neutron-binding-driver-43f01565051b031b.yaml0000664000175000017500000000007600000000000026132 0ustar00zuulzuul00000000000000--- features: - Added neutron driver for port bind actions. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/new-config-option-dhss-4931db193fd76656.yaml0000664000175000017500000000031200000000000026151 0ustar00zuulzuul00000000000000--- features: - | Add new config option 'service_network_host' for service instance with 'dhss'=True. This helps us to define network host for ports and able to seperate from manila host. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/new-secure-rbac-defaults-in-wallaby-13c0583afdfcfcc7.yaml0000664000175000017500000000427500000000000031052 0ustar00zuulzuul00000000000000--- prelude: > The default check strings of all manila API RBAC policies have been updated to support default roles and system-scope from the OpenStack Identity Service (Keystone). This includes support for project member, project reader, project administrator user roles as well as system member, system reader and system administrator roles. A manila "admin" persona is eventually expected to transition to the system scoped "admin" persona and some isolated project administrator privileges (like force-deleting a resource, resyncing a share replica or resetting state of a resource) are retained to an admin user operating within the scope of a project. Do read further impact in the ``upgrade`` section of these notes. upgrade: - | During the Wallaby release, API RBAC policies have new defaults, however, all old behavior is preserved as is, and the new defaults are not enabled. We encourage operators to generate the default policies via ``oslopolicy-sample-generator --namespace manila`` and compare them with any overrides you may currently have. These new default rules may enable you to remove matching overrides and reduce your policy maintenance burden. Refer to `the document on Service API protection `_ from the OpenStack Identity Service to understand roles, scopes, user personas and the motivation behind these changes. To be able to use systems scoped personas, you will need to enable the ``enforce_scope`` configuration option in the ``[oslo_policy]`` section of manila.conf. To enforce the new defaults, the configuration option ``enforce_new_defaults`` must be enabled from the ``[oslo_policy]`` section of manila.conf. We do not advise enabling the new defaults in production deployments yet. The manila developer community is actively adding test coverage and we aim to backport fixes to the Wallaby release and update code when we find any deficiencies. Refer to the future service release notes and the administrator documentation to keep abreast of the changes and the progress with these new defaults. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/newton-migration-improvements-cf9d3d6e37e19c94.yaml0000664000175000017500000000247100000000000030134 0ustar00zuulzuul00000000000000--- prelude: > Added new parameters to Share Migration experimental API and more combinations of share protocols and access types support to the Data Service. features: - Share Migration now has parameters to force share migration procedure to maintain the share writable, preserve its metadata and be non-disruptive when migrating. - Added CIFS protocol support to Data Service, along with respective 'user' access type support, through the 'data_node_access_admin_user' configuration option. - Added possibility to include options to mount commands issued by the Data Service through the 'data_node_mount_options' configuration option. - Administrators can now change share's share network during a migration. - Added possibility of having files hash verified during migration. deprecations: - Renamed Share Migration 'force_host_copy' parameter to 'force_host_assisted_migration', to better represent the parameter's functionality in API version 2.22. - API version 2.22 is now required for all Share Migration APIs. upgrades: - Removed Share Migration 'notify' parameter, it is no longer possible to perform a 1-phase migration. - Removed 'migrate_share' API support. - Added 'None' to 'reset_task_state' API possible values so it can unset the task_state. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/nexenta-manila-drivers-cbd0b376a076ec50.yaml0000664000175000017500000000013100000000000026416 0ustar00zuulzuul00000000000000features: - Added share backend drivers for NexentaStor4 and NexentaStor5 appliances. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/nexentastor5-v1.1-1ad6c8f7b5cc11b6.yaml0000664000175000017500000000237300000000000025262 0ustar00zuulzuul00000000000000--- features: - Added revert to snapshot support for NexentaStor5 driver. - Added manage existing support for NexentaStor5 driver. upgrade: - Added a new config option ``nexenta_ssl_cert_verify``. This option defines whether the NexentaStor5 driver should check ssl certificate. - Added a new config option ``nexenta_rest_connect_timeout``. This option specifies the time limit (in seconds), within which the connection to NexentaStor management REST API server must be established. - Added a new config option ``nexenta_rest_read_timeout``. This option specifies the time limit (in seconds), within which NexentaStor management REST API server must send a response. - Added a new config option ``nexenta_rest_backoff_factor``. This option specifies the backoff factor to apply between connection attempts to NexentaStor management REST API server. - Added a new config option ``nexenta_rest_retry_count``. This option specifies the number of times to repeat NexentaStor management REST API call in case of connection errors and NexentaStor appliance EBUSY or ENOENT errors. - Added a new config option ``nexenta_dataset_record_size``. This option specifies a suggested block size in for files in a filesystem' ././@PaxHeader0000000000000000000000000000021200000000000011450 xustar0000000000000000116 path=manila-21.0.0/releasenotes/notes/optimize-deferred-deletion-get-share-instance-query-b6366b7c3b0a64db.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/optimize-deferred-deletion-get-share-instance-query-b6366b7c3b0a64d0000664000175000017500000000053600000000000033013 0ustar00zuulzuul00000000000000--- fixes: - | The periodic database queries made by the share manager service to process deferred deletion of shares has been fixed to consider the host in addition to the share's state. This both improves performance of the periodic task, as well as fixes incorrect behavior where incorrect shares are retrieved by the query. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/per-backend-az-590c68be0e2cb4bd.yaml0000664000175000017500000000131100000000000024713 0ustar00zuulzuul00000000000000--- features: - | Availability zones may now be configured per backend in a multi-backend configuration. Individual back end sections can now have the configuration option ``backend_availability_zone`` set. If set, this value will override the ``storage_availability_zone`` option from the [DEFAULT] section. upgrade: - The ``storage_availability_zone`` option can now be overridden per backend by using the ``backend_availability_zone`` option within the backend stanza. This allows enabling multiple storage backends that may be deployed in different AZs in the same ``manila.conf`` file if desired, simplifying service architecture around the Share Replication feature. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/powermax-rebrand-manila-a46a0c2ac0aa77ed.yaml0000664000175000017500000000163500000000000026717 0ustar00zuulzuul00000000000000--- features: - | Rebrand from VMAX to PowerMax includes changing of tag names, directory structure, file names and documentation. deprecations: - | The following have been deprecated but will remain until the V release ``vmax_server_container`` is now ``powermax_server_container`` ``vmax_share_data_pools`` is now ``powermax_share_data_pools`` ``vmax_ethernet_ports`` is now ``powermax_ethernet_ports`` upgrade: - | - ``emc_share_backend`` configuration option must be switched from ``vmax`` to ``powermax`` if using a newly rebranded PowerMax storage backend. - If using a PowerMax storage backend, deprecated options ``emc_nas_server_container``, ``emc_nas_pool_names`` and ``emc_interface_ports`` can no longer be used. They must be replaced by ``powermax_server_container``, ``powermax_share_data_pools`` and ``powermax_ethernet_ports`` respectively. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/privsep-migration-846819fdb181d83a.yaml0000664000175000017500000000037000000000000025411 0ustar00zuulzuul00000000000000--- security: - | Privsep transitions. Manila is transitioning from using the older style rootwrap privilege escalation path to the new style Oslo privsep path. This should improve performance and security of Manila in the long term. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/pure_antelope_bump-6b3bd1b35f632aee.yaml0000664000175000017500000000017200000000000026015 0ustar00zuulzuul00000000000000--- other: - | Pure Storage FlashBlade driver - Version number incremented for tracking purposes(Antelope release). ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/pure_version_bump-2f1280f16391f6f9.yaml0000664000175000017500000000015000000000000025407 0ustar00zuulzuul00000000000000--- other: - | Pure Storage FlashBlade driver - Version number incremented for tracking purposes. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/qb-bug-1733807-581e71e6581de28e.yaml0000664000175000017500000000015500000000000023744 0ustar00zuulzuul00000000000000--- fixes: - | The Quobyte driver now handles updated error codes from Quobyte API versions 1.4+ . ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/qnap-enhance-support-53848fda525b7ea4.yaml0000664000175000017500000000025100000000000026054 0ustar00zuulzuul00000000000000--- features: - | Added enhanced support to the QNAP Manila driver, including ``Thin Provisioning``, ``SSD Cache``, ``Deduplication`` and ``Compression``. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/qnap-fix-manage-snapshot-not-exist-4b111982ddc5fdae.yaml0000664000175000017500000000016500000000000030674 0ustar00zuulzuul00000000000000--- fixes: - | Fixed the QNAP driver so that the snapshot which does not exist in NAS will not be managed. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/qnap-fix-share-and-snapshot-inconsistant-bd628c6e14eeab14.yaml0000664000175000017500000000023600000000000032066 0ustar00zuulzuul00000000000000--- fixes: - | Fixed the QNAP driver so that the managed snapshot and the share which created from snapshot will not be inconsistent in some cases. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/qnap-manila-driver-a30fe4011cb90801.yaml0000664000175000017500000000012100000000000025355 0ustar00zuulzuul00000000000000--- features: - Added Manila share driver for QNAP ES series storage systems. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/qnap-support-qes-200-639f3ad70687023d.yaml0000664000175000017500000000011300000000000025376 0ustar00zuulzuul00000000000000--- features: - | QNAP Manila driver added support for QES fw 2.0.0. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/qnap-support-qes-210-8775e6c210f3ca9f.yaml0000664000175000017500000000011300000000000025542 0ustar00zuulzuul00000000000000--- features: - | QNAP Manila driver added support for QES fw 2.1.0. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/qnap-tds-support-qes-24704313a0881c8c.yaml0000664000175000017500000000011600000000000025575 0ustar00zuulzuul00000000000000--- features: - | QNAP Manila driver supports QES FW on TDS series NAS. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/remove-AllocType-from-huawei-driver-8b279802f36efb00.yaml0000664000175000017500000000061300000000000030623 0ustar00zuulzuul00000000000000--- prelude: > Manila scheduler checks "thin_provisioning" in extra specs of the share type and decides whether to use the logic for thin or thick. If "thin_provisioning" not given in extra specs, default use thin. upgrade: - Remove the "AllocType" configuration from huawei driver configuration file. If "thin_provisioning" not given, default create new share by "thin" type. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/remove-confusing-deprecation-warnings-a17c20d8973ef2bb.yaml0000664000175000017500000000047400000000000031472 0ustar00zuulzuul00000000000000--- fixes: - | Removed confusing manila.db.sqlalchemy model messages indicating deprecated properties for ``share_type``, ``host``, ``share_server_id``, ``share_network_id``, ``available_zone``. These are exposed in the API as properties of shares and are not in fact actually deprecated as such. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/remove-deprecated-default-options-00fed1238fb6dca0.yaml0000664000175000017500000000013600000000000030631 0ustar00zuulzuul00000000000000--- deprecations: - | Remove deprecated cinder, neutron, nova options in DEFAULT group. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/remove-deprecated-options-and-auth-4d497e03ad47e872.yaml0000664000175000017500000002433000000000000030517 0ustar00zuulzuul00000000000000--- upgrade: - | Deprecations made prior to the Ussuri release have been enforced, with the following impact to ``manila.conf``: - The deprecated ``memcached_servers`` option in the [DEFAULT] section had no effect and has been removed. - The deprecated ``share_usage_audit_period`` option in the [DEFAULT] section had no effect and has been removed. - The deprecated ``nova_api_microversion`` option in the [DEFAULT] has been removed. Use 'api_microversion' in the [nova] section instead. - The deprecated ``ca_certificates_file`` option in the [DEFAULT], [nova], [cinder], and [neutron] sections had no effect and has been removed. - The deprecated ``nova_ca_certificates_file`` option in the [DEFAULT] section had no effect and has been removed. - The deprecated ``cinder_ca_certificates_file`` option in the [DEFAULT] section had no effect and has been removed. - The deprecated ``api_insecure`` option in the [DEFAULT], [nova], [cinder], and [neutron[ sections had no effect and has been removed. - The deprecated ``nova_api_insecure`` option in the [DEFAULT] section had no effect and has been removed. - The deprecated ``cinder_api_insecure`` option in the [DEFAULT] section had no effect and has been removed. - The deprecated ``migration_tmp_location`` option is no longer recognized. Use ``mount_tmp_location`` instead. - The ``network_api_class`` option can no longer be set in the [DEFAULT] section. Set it in backend sections instead. - The ``neutron_url`` option can no longer be set in the [DEFAULT] section. Use the ``url`` option in the [neutron] section instead. - The ``neutron_url_timeout`` option can no longer be set in the [DEFAULT] section. Use the ``url_timeout`` option in the [neutron] section instead. - The ``auth_strategy`` option for neutron can no longer be set in the [DEFAULT] section. Set it in the [neutron] secton instead. - The ``neutron_physical_net_name`` option for neutron can no longer be set in the [DEFAULT] section. Set it in the [neutron] secton instead. - The ``neutron_net_id`` option for neutron can no longer be set in the [DEFAULT] section. Set it in the [neutron] secton instead. - The ``neutron_subnet_id`` option for neutron can no longer be set in the [DEFAULT] section. Set it in the [neutron] secton insteaad. - The ``standalone_network_plugin_gateway`` option can no longer be set in the [DEFAULT] section. Set it in backend sections instead. - The ``standalone_network_plugin_mask`` option can no longer be set in the [DEFAULT] section. Set it in backend sections instead. - The ``standalone_network_plugin_type`` option can no longer be set in the [DEFAULT] section. Set it in backend sections instead. - The ``standalone_network_plugin_segmentation_id`` option can no longer be set in the [DEFAULT] section. Set it in backend sections instead. - The ``standalone_network_plugin_allowed_ip_ranges`` option can no longer be set in the [DEFAULT] section. Set it in backend sections instead. - The ``standalone_network_plugin_mtu`` option can no longer be set in the [DEFAULT] section. Set it in backend sections instead. - The deprecated ``migration_readonly_rules_support`` is longer recognized. All manila back ends are now required to support read only access rules. - The deprecated Dell-EMC PowerMax ``vmax_server_container`` option is no longer recognized. Use ``powermax_server_container`` instead. - The deprecated Dell-EMC PowerMax ``vmax_share_data_pools`` option is no longer recognized. Use ``powermax_share_data_pools`` instead. - The deprecated Dell-EMC PowerMax ``vmax_ethernet_ports`` option is no longer recognized. Use ``powermax_ethernet_ports`` instead. - The deprecated Dell-EMC Unity ``emc_nas_server_pool`` option is no longer recognized. Use ``unity_server_meta_pool`` instead. - The deprecated Dell-EMC Unity ``emc_nas_pool_names`` option is no longer recognized. Use ``unity_share_data_pools`` instead. - The deprecated Dell-EMC Unity ``emc_interface_ports`` option is no longer recognized. Use ``unity_ethernet_ports`` instead. - The deprecated Dell-EMC Unity ``emc_nas_server_container`` option has no effect and has been removed. - The deprecated Dell-EMC VNX ``emc_nas_server_container`` option has been removed. Use ``vnx_server_container`` instead. - The deprecated Dell-EMC VNX ``emc_nas_pool_names`` option has been removed. Use ``vnx_share_data_pools`` instead. - The deprecated Dell-EMC VNX ``emc_interface_ports`` option has been removed. Use ``vnx_ethernet_ports`` instead. - The deprecated GlusterFS ``glusterfs_native_server_password`` option has been removed. Use ``glusterfs_server_password`` instead. - The deprecated GlusterFS ``glusterfs_native_path_to_private_key`` option has been removed. Use ``glusterfs_path_to_private_key`` instead. - The deprecated GlusterFS ``glusterfs_targets`` option has been removed. Use ``glusterfs_servers`` instead. - The deprecated Hitachi HNAS ``hds_hnas_driver_helper`` option has been removed. Use ``hitachi_hnas_driver_helper`` instead. - The deprecated Hitachi HNAS ``hds_hnas_ip`` option has been removed. Use ``hitachi_hnas_ip`` instead. - The deprecated Hitachi HNAS ``hds_hnas_user`` option has been removed. Use ``hitachi_hnas_user`` instead. - The deprecated Hitachi HNAS ``hds_hnas_password`` option has been removed. Use ``hitachi_hnas_password`` instead. - The deprecated Hitachi HNAS ``hds_hnas_evs_id`` option has been removed. Use ``hitachi_evs_id`` instead. - The deprecated Hitachi HNAS ``hds_hnas_file_system_name`` option has been removed. Use ``hitachi_hnas_file_system_name`` instead. - The deprecated Hitachi HNAS ``hds_hnas_cluster_admin_ip0`` option has been removed. Use ``hitachi_hnas_cluster_admin_ip0`` instead. - The deprecated Hitachi HNAS ``hds_hnas_stalled_job_timeout`` option has been removed. Use ``hitachi_hnas_stalled_job_timeout`` instead. - The deprecated Hitachi HNAS ``hds_hnas_driver_helper`` option has been removed. Use ``hitachi_hnas_driver_helper`` instead. - The deprecated Hitachi HNAS ``hds_hnas_allow_cifs_snapshot_while_mounted`` option has been removed. Use ``hitachi_allow_cifs_snapshot_while_mounted`` instead. - The deprecated HPE 3PAR ``hp3par_api_url`` option has been removed. Use ``hpe3par_api_url`` instead. - The deprecated HPE 3PAR ``hp3par_username`` option has been removed. Use ``hpe3par_username`` instead. - The deprecated HPE 3PAR ``hp3par_password`` option has been removed. Use ``hpe3par_password`` instead. - The deprecated HPE 3PAR ``hp3par_san_ip`` option has been removed. Use ``hpe3par_san_ip`` instead. - The deprecated HPE 3PAR ``hp3par_san_login`` option has been removed. Use ``hpe3par_san_login`` instead. - The deprecated HPE 3PAR ``hp3par_san_password`` option has been removed. Use ``hpe3par_san_password`` instead. - The deprecated HPE 3PAR ``hp3par_san_ssh_port`` option has been removed. Use ``hpe3par_san_ssh_port`` instead. - The deprecated HPE 3PAR ``hp3par_fpg`` option has been removed. Use ``hpe3par_fpg`` instead. - The deprecated HPE 3PAR ``hp3par_fstore_per_share`` option has been removed. Use ``hpe3par_fstore_per_share`` instead. - The deprecated HPE 3PAR ``hp3par_debug`` option has been removed. Use ``hpe3par_debug`` instead. - The deprecated HPE 3PAR ``hp3par_cifs_admin_access_username`` option has been removed. Use ``hpe3par_cifs_admin_access_username`` instead. - The deprecated HPE 3PAR ``hp3par_cifs_admin_access_password`` option has been removed. Use ``hpe3par_cifs_admin_access_password`` instead. - The deprecated HPE 3PAR ``hp3par_cifs_admin_access_domain`` option has been removed. Use ``hpe3par_cifs_admin_access_domain`` instead. - The deprecated HPE 3PAR ``hp3par_share_mount_path`` option has been removed. Use ``hpe3par_share_mount_path`` instead. - The deprecated IBM GPFS ``knfs_export_options`` option had no effect and has been removed. - The deprecated Netapp ``netapp_nas_server_hostname`` option has been removed. Use ``netapp_server_hostname`` instead. - The deprecated Netapp ``netapp_nas_transport_type`` option has been removed. Use ``netapp_transport_type`` instead. - The deprecated Netapp ``netapp_nas_login`` option has been removed. Use ``netapp_login`` instead. - The deprecated Netapp ``netapp_nas_password`` option has been removed. Use ``netapp_password`` instead. - The deprecated Netapp ``netapp_nas_volume_name_template`` option has been removed. Use ``netapp_volume_name_template`` instead. - The deprecated Netapp ``netapp_root_volume_name`` option has been removed. Use ``netapp_root_volume`` instead. - The deprecated Nexenta ``nexenta_host`` option has been removed. Use ``nexenta_nas_host`` instead. - The ``enable_pre_hooks`` option can no longer be set in the [DEFAULT] section. Set it in backend sections instead. - The ``enable_post_hooks`` option can no longer be set in the [DEFAULT] section. Set it in backend sections instead. - The ``enable_periodic_hooks`` option can no longer be set in the [DEFAULT] section. Set it in backend sections instead. - The ``enable_pre_hooks_errors`` option can no longer be set in the [DEFAULT] section. Set it in backend sections instead. - The ``enable_post_hooks_errors`` option can no longer be set in the [DEFAULT] section. Set it in backend sections instead. - The ``periodic_hooks_interval`` option can no longer be set in the [DEFAULT] section. Set it in backend sections instead. - The ``hook_drivers`` option can no longer be set in the [DEFAULT] section. Set it in backend sections instead. - The ``automatic_share_server_cleanup`` option can no longer be set in the [DEFAULT] section. Set it in backend sections instead. - The ``unused_share_server_cleanup_interval`` option can no longer be set in the [DEFAULT] section. Set it in backend sections instead. - The ``cinder_cross_az_attach`` option can no longer be set in the [DEFAULT] section. Use ``cross_az_attach`` in the [CINDER] section instead. - The ``cinder_http_retries`` option can no longer be set in the [DEFAULT] section. Use ``http_retries`` in the [CINDER] section instead. ././@PaxHeader0000000000000000000000000000020700000000000011454 xustar0000000000000000113 path=manila-21.0.0/releasenotes/notes/remove-deprecated-public-share-creation-policies-051d59249e556b44.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/remove-deprecated-public-share-creation-policies-051d59249e556b44.y0000664000175000017500000000104400000000000032451 0ustar00zuulzuul00000000000000--- upgrade: - | The ability to create a public share (RBAC: "share:create_public_share") and to update a share to become publicly visible (RBAC: "share:set_public_share") are now restricted to administator users operating at system scope. Adjust your policy file overrides if you would like to retain the older behavior of allowing all users to create public shares or to update private ones to public. If you do that, be sure that your users are aware of the security implications of publicly accessible shares. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/remove-deprecated-size-limiter-9d7c8ab69cf85aea.yaml0000664000175000017500000000066400000000000030252 0ustar00zuulzuul00000000000000deprecations: - Removed manila RequestBodySizeLimiter shims and deprecation log messages since it has been deprecated since equivalent oslo.middleware library object was added in kilo. upgrade: - Ensure that /etc/manila/api-paste.ini is up-to-date with etc/manila/api-paste.ini, in particular that [filter:sizelimit] section has paste.filter_factory = oslo_middleware.sizelimit:RequestBodySizeLimiter.factory ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/remove-host-field-from-shares-and-replicas-a087f85bc4a4ba45.yaml0000664000175000017500000000111100000000000032164 0ustar00zuulzuul00000000000000--- critical: - The "host" field is no longer returned in the JSON response of the /shares and /share-replicas APIs when these APIs are invoked with non-admin privileges. Applications that depend on this field must be updated as necessary. The value of this field is privileged information and the request context must specify administrator privileges when using these APIs for the "host" field to be present. The use of "host" as a filter key in the GET /shares API is controlled with the policy "list_by_host". This policy defaults to "rule:admin_api". ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/remove-intree-tempest-plugin-9fcf6edbeba47cba.yaml0000664000175000017500000000033100000000000030173 0ustar00zuulzuul00000000000000--- other: - | Remove in-tree manila tempest plugin because it now lives in the new repo openstack/manila-tempest-plugin From now on changes to manila tempest tests should be made in this new repo. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/remove-manila-wsgi-script-07f9a4d00a165f4c.yaml0000664000175000017500000000053300000000000026777 0ustar00zuulzuul00000000000000--- upgrade: - | The WSGI script ``manila-wsgi`` has been removed. Deployment tooling should instead reference the Python module path for the wsgi module in Manila, ``manila.wsgi.api:application`` if their chosen WSGI server supports this (gunicorn, uWSGI, etc.) or implement a ``.wsgi`` script themselves if not (mod_wsgi). ././@PaxHeader0000000000000000000000000000021300000000000011451 xustar0000000000000000117 path=manila-21.0.0/releasenotes/notes/remove-nova-net-support-from-service-instance-module-dd7559803fa01d45.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/remove-nova-net-support-from-service-instance-module-dd7559803fa01d0000664000175000017500000000030500000000000033026 0ustar00zuulzuul00000000000000--- upgrade: - Removed nova net support from the service instance module since legacy nova networking was deprecated in Newton and is no longer supported in regular deployments in Ocata. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/remove-nova-network-support-f5bcb8b2fcd38581.yaml0000664000175000017500000000031600000000000027603 0ustar00zuulzuul00000000000000--- upgrade: - Removed support for ``nova_net_id`` in share_networks API and in the ShareNetwork DB model. Also removed the nova network plugins themselves and corresponding manila.conf options. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/remove-old-notif-drivers-ea9f3837c8e82a41.yaml0000664000175000017500000000067700000000000026666 0ustar00zuulzuul00000000000000--- upgrade: - | The following aliases of notification drivers are no longer available. Use the driver names in the ``oslo.messaging`` library instead. - ``manila.openstack.common.notifier.log_notifier`` - ``manila.openstack.common.notifier.no_op_notifier`` - ``manila.openstack.common.notifier.rpc_notifier`` - ``manila.openstack.common.notifier.rpc_notifier2`` - ``manila.openstack.common.notifier.test_notifier`` ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/remove-os-region-name-82e3cd4c7fb05ff4.yaml0000664000175000017500000000020300000000000026252 0ustar00zuulzuul00000000000000--- other: - | The configuration option "os_region_name" from the [DEFAULT] group got removed. It was not used anywhere. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/remove-py38-5c619aee267bc1f3.yaml0000664000175000017500000000016600000000000024162 0ustar00zuulzuul00000000000000--- upgrade: - | Support for Python 3.8 has been removed. Now the minimum python version supported is 3.9 . ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/remove-py39-dec4a09ae03e3583.yaml0000664000175000017500000000016600000000000024155 0ustar00zuulzuul00000000000000--- upgrade: - | Support for Python 3.9 has been removed. Now Python 3.10 is the minimum version supported. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/remove-root-helper-config-option-fd517b0603031afa.yaml0000664000175000017500000000040400000000000030260 0ustar00zuulzuul00000000000000--- other: - | The "root_helper" configuration option from the [DEFAULT] section got removed. This option was not used anywhere in the codebase. Manila uses "sudo" together with "rootwrap" to allow unprivileged users running actions as root. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/remove-standalone-network-plugin-ip-version-440ebcf27ffd22f8.yaml0000664000175000017500000000032400000000000032641 0ustar00zuulzuul00000000000000--- upgrade: - | The deprecated configuration option 'standalone_network_plugin_ip_version' has been removed. 'network_plugin_ipv4_enabled' and 'network_plugin_ipv6_enabled' should be used instead. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/remove-vmax-a3d97ba80ced4895.yaml0000664000175000017500000000016300000000000024337 0ustar00zuulzuul00000000000000--- upgrade: - | The ``emc_share_backend`` option no longer supports ``vmax``. Use ``powermax`` instead. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/rename-cephfs-native-driver-3d9b4e3c6c78ee98.yaml0000664000175000017500000000100200000000000027367 0ustar00zuulzuul00000000000000--- upgrade: - To use the CephFS driver, which enables CephFS access via the native Ceph protocol, set the `share_driver` in the driver section of the config file as `manila.share.drivers.cephfs.driver.CephFSDriver`. The previous `share_driver` setting in Mitaka/Newton/Ocata releases `manila.share.drivers.cephfs.cephfs_native.CephFSNativeDriver` would still work (usually until one more release, Queens, as part of standard deprecation process.), but it's usage is no longer preferred. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/report-PortLimitExceeded-error-to-customer-528990dc9574688f.yaml0000664000175000017500000000047100000000000032110 0ustar00zuulzuul00000000000000--- fixes: - | When share creation fails due to missing ports quota, error message propogated to user is not useful. So reported PortLimitExceeded error to user instead of generic error. For more details, please refer (`launchpad bug 2019846 `_). ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/reset_tap_device_after_node_restart-0690a6beca077b95.yaml0000664000175000017500000000017400000000000031240 0ustar00zuulzuul00000000000000--- fixes: - When use driver_handles_share_servers driver, reset the tap device after manila-share service start. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/revert-switch-to-use-glanceclient-bc462a5477d6b8cb.yaml0000664000175000017500000000026200000000000030530 0ustar00zuulzuul00000000000000--- fixes: - | Change Id905d47600bda9923cebae617749c8286552ec94 is causing gate failures with the generic driver so we need to revert it for now and revisit after rc. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/rules-for-managed-share-f28a26ffc980f6fb.yaml0000664000175000017500000000032400000000000026564 0ustar00zuulzuul00000000000000--- fixes: - Fixed HSP driver not supporting adding rules that exist in backend for managed shares. - Fixed HSP driver not supporting deleting share if it has rules in backend that are not in Manila. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/scheduler_hints_share_replica-ffeed5cf9adeddff.yaml0000664000175000017500000000057400000000000030671 0ustar00zuulzuul00000000000000--- features: - Added option "scheduler_hints" to share replica create API. For now, the onlyHostFilter will be supported using this option. The filter needs admin to specify host@backend#pool to "share_replica.scheduler_hints.only_host" in the request payload when creating a manila share replica. For non-admin users the onlyHostFilter will always be ignored. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/server-migration-with-network-extension-7433a5c38c8278e4.yaml0000664000175000017500000000043100000000000031617 0ustar00zuulzuul00000000000000--- features: - | Added support for share server migrations between different physical networks. It is achieved by creating an inactive port binding on the target host during share server migration, then cut it over to target host during migration-complete step. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/share-backup-d5f68ba6f9aef776.yaml0000664000175000017500000000041500000000000024532 0ustar00zuulzuul00000000000000--- features: - Added support to share-backup feature. From this release, a backup of a share can be can be created, deleted, listed, queried for detail, updated its name/description and also restored to original share. Sample NFS backup-driver is added. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/share-backup-netapp-driver-8bbcf3fbc1d20614.yaml0000664000175000017500000000061400000000000027251 0ustar00zuulzuul00000000000000--- features: - | The NetApp ONTAP driver now supports driver-advantaged share backup. NetApp SnapVault technology is used to create and restore backups for NetApp ONTAP shares. Backup delete workflow just deletes the transferred snapshots from destination backup volume. How to get the config data for backup, refer https://etherpad.opendev.org/p/manila-share-backup link. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/share-backup-netapp-driver-core-6a2328756b14f541.yaml0000664000175000017500000000033400000000000027630 0ustar00zuulzuul00000000000000--- fixes: - | Driver-advantaged share backups can now have a "backup_type". It is also possible to create share backups in environments that support hard multi-tenancy (driver_handles_share_servers=True). ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/share-backup-out-of-place-restore-1db334d8a22be3fa.yaml0000664000175000017500000000052000000000000030437 0ustar00zuulzuul00000000000000--- features: - | Added support for targeted share backup restores in the share backup API to allow the restore a share backup from one source share to another target share, given the backup or share driver provides support for the operation. Available from microversion 2.91. and supported by the default NFS driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/share-mount-snapshots-b52bf3433d1e7afb.yaml0000664000175000017500000000036300000000000026414 0ustar00zuulzuul00000000000000--- features: - Added mountable snapshots feature to manila. Access can now be allowed and denied to snapshots of shares created with a share type that supports this feature. - Added mountable snapshots support to the LVM driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/share-network-with-multiple-subnets-a56be8b646b9e463.yaml0000664000175000017500000000206400000000000031070 0ustar00zuulzuul00000000000000--- features: - Added APIs with default policy set to 'rule:default' that allow the creation of share networks with multiple subnets. This gives the users the ability to create multiple subnets in a share network for different availability zones. Also, users will be able to delete and show existing subnets. - Updated the share server API to make possible to manage share servers in a specific subnet when the driver is operating in ``driver_handles_share_servers`` enabled mode. - Share servers are now associated with a single share network subnet, which pertain to a share network. upgrade: - On upgrading to this release, all existing share networks will be updated to accommodate an availability zone assignment. Existing share networks will have their availability zone set to "empty" indicating that they are available across all storage availability zones known to manila. fixes: - A share network cannot be provided while creating a share replica. Replicas will inherit the share's share network if one exists. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/share-replication-81ecf4a32a5c83b6.yaml0000664000175000017500000000020600000000000025464 0ustar00zuulzuul00000000000000--- features: - Shares can be replicated. Replicas can be added, listed, queried for detail, promoted to be 'active' or removed.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/share-revert-to-snapshot-3d028fa00620651e.yaml0000664000175000017500000000021600000000000026504 0ustar00zuulzuul00000000000000--- features: - Added revert-to-snapshot feature for regular and replicated shares. - Added revert-to-snapshot support to the LVM driver. ././@PaxHeader0000000000000000000000000000020500000000000011452 xustar0000000000000000111 path=manila-21.0.0/releasenotes/notes/share-revert-to-snapshot-in-netapp-cdot-driver-37f645ec3c14313c.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/share-revert-to-snapshot-in-netapp-cdot-driver-37f645ec3c14313c.yam0000664000175000017500000000054600000000000032542 0ustar00zuulzuul00000000000000--- features: - Added support for share revert-to-snapshot to NetApp Data ONTAP drivers. upgrades: - If using existing share types with Data ONTAP, set the 'revert_to_snapshot_support' extra spec to allow creating shares that support in-place revert-to-snapshot functionality. This modification will not affect existing shares of such types. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/share-server-delete-failure-ca29d6b286a2c790.yaml0000664000175000017500000000031200000000000027270 0ustar00zuulzuul00000000000000--- fixes: - Fix the issue of deleting share server in VNX driver. The VNX driver failed to detect the NFS interface of share server, so the detach and deletion of NFS interface were skipped. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/snaplock-support-for-netapp-driver-9b639386c07c4990.yaml0000664000175000017500000000043700000000000030473 0ustar00zuulzuul00000000000000--- features: - | NetApp Driver: The NetApp Driver now supports the creation of WORM shares using the SnapLock feature. To create these shares, set the 'netapp_snaplock_type' in the share type extra specs, along with the SnapLock related retention period extra specs. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/snapshot-force-delete-4432bebfb5a0bbc9.yaml0000664000175000017500000000031200000000000026373 0ustar00zuulzuul00000000000000--- fixes: - force-delete API requests for snapshots are now propagated to the manila-share service and will not fail even if share drivers cannot remove the snapshots on the storage backend. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/support-ipv6-in-drivers-and-network-plugins-1833121513edb13d.yaml0000664000175000017500000000051100000000000032200 0ustar00zuulzuul00000000000000--- features: - Added optional extra spec 'ipv4_support' and 'ipv6_support' for share type. - Added new capabilities 'ipv4_support' and 'ipv6_support' for IP based drivers. - Added IPv6 support in network plugins. (support either IPv6 or IPv4) - Added IPv6 support in the lvm driver. (support both IPv6 and IPv4) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/support-qes-114-5881c0ff0e7da512.yaml0000664000175000017500000000011200000000000024575 0ustar00zuulzuul00000000000000--- features: - | QNAP Manila driver adds support for QES fw 1.1.4. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/switch-to-use-glanceclient-dde019b0b141caf8.yaml0000664000175000017500000000017500000000000027267 0ustar00zuulzuul00000000000000--- fixes: - | Switch to use glance client to retrive image list, novaclient is rarely maintained with glance API. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/unexpected-data-of-share-from-snap-134189fc0f3eeedf.yaml0000664000175000017500000000026400000000000030624 0ustar00zuulzuul00000000000000--- fixes: - Fixed bug in Dell EMC Unity driver that caused shares created from snapshots to contain data from the original shares, instead of data from their snapshots. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/unity-default-filter-function-support-2eefc8044a5add5d.yaml0000664000175000017500000000111600000000000031626 0ustar00zuulzuul00000000000000--- upgrade: - | A new configuration option called ``report_default_filter_function`` has been added to the Dell EMC Unity driver. It can be set to True or False, and the default value is False. When set to True, the scheduler will disallow the creation of shares smaller than 3 GiB on the Dell EMC Unity back end. The default value of this option will be changed to ``True`` in a future release, so always set the desired value in your manila.conf per your expectations. features: - | Dell EMC Unity: Default filter function support for 3GB share size. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/unity-drvier-support-1gb-share-48f032dff8a6a789.yaml0000664000175000017500000000030400000000000027736 0ustar00zuulzuul00000000000000--- fixes: - Shares under 3 GB cannot be created on the Dell EMC Unity back end. If users create shares smaller than 3 GB, they will be allocated a 3 GB file system on the Unity system. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/unity-manage-server-share-snapshot-support-6a0bbbed74da13c7.yaml0000664000175000017500000000020000000000000032545 0ustar00zuulzuul00000000000000--- features: - Dell EMC Unity Manila driver now supports manage/unmange share server, share instance and share snapshot. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/unity-manila-ipv6-support-dd9bcf23064baceb.yaml0000664000175000017500000000010100000000000027263 0ustar00zuulzuul00000000000000--- features: - IPv6 support for Dell EMC Unity Manila driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/unity-revert-to-snapshot-support-1cffc3914982003d.yaml0000664000175000017500000000011700000000000030361 0ustar00zuulzuul00000000000000--- features: - Revert to snapshot support for Dell EMC Unity Manila driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/unity-shrink-share-support-cc748daebfe8f562.yaml0000664000175000017500000000013000000000000027503 0ustar00zuulzuul00000000000000--- features: - Shrink share support has been added for Dell EMC Unity Manila driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/unity-un-handles-share-server-mode-support-e179c092ab148948.yaml0000664000175000017500000000022400000000000032113 0ustar00zuulzuul00000000000000--- features: - Dell EMC Unity Manila driver now supports the mode in which it does not itself create and destroy share servers (DHSS=False). ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/unity-vnx-rename-options-1656168dd4bdba70.yaml0000664000175000017500000000131500000000000026714 0ustar00zuulzuul00000000000000--- upgrade: - For Dell EMC Unity Manila driver, replaced emc_nas_pool_names with unity_share_data_pools, emc_nas_server_pool with unity_server_meta_pool, emc_interface_ports with unity_ethernet_ports, - For Dell EMC VNX Manila driver, replaced emc_nas_pool_names with vnx_share_data_pools, emc_interface_ports with vnx_ethernet_ports, emc_nas_server_container with vnx_server_container. deprecations: - For Dell EMC Unity Manila driver, options emc_nas_pool_names, emc_nas_server_pool, emc_interface_ports, emc_nas_server_container are deprecated. - For Dell EMC VNX Manila driver, options emc_nas_pool_names, emc_interface_ports, emc_nas_server_container are deprecated. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/use-cephadm-nfs-ganesha-b9e071924de738fd.yaml0000664000175000017500000000235200000000000026401 0ustar00zuulzuul00000000000000--- features: - | NFSClusterProtocolHelper has been added to allow users to consume to export CephFS shares over a clustered NFS gateway. This presents many advantages, since the operator no longer needs to maintain their own instances of NFS Ganesha apart of the Ceph cluster. For this, we now communicate with ceph mgr using the nfs plugin. Read more about this plugin in https://docs.ceph.com/en/latest/cephfs/nfs/ upgrade: - | The CephFS driver now supports a new configuration option: * cephfs_nfs_cluster_id (string option): name of the nfs cluster to use. This option can be used to specify which NFS cluster to use. other: - | Since the CephFS driver is now capable of using ceph manager commands to manage NFS exports, we would like to deprecate and remove support for managing exports with the help of DBUS in a future release. Please use cephadm deployed NFS ganesha clusters in greenfield deployments with OpenStack Manila and refrain from using a standalone non-clustered nfs-ganesha service with this driver. As this solution is hardened for HA within Ceph, we expect to provide code to help migrate existing nfs-ganesha exports to the nfs-ganesha clusters in a future release. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/use-oslo-logging-for-config-options-388da64bb4ce45db.yaml0000664000175000017500000000015100000000000031050 0ustar00zuulzuul00000000000000--- fixes: - Use Oslo's logging features to securely output the configuration options for Manila. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/use-tooz-heartbeat-c6aa7e15444e63c3.yaml0000664000175000017500000000044100000000000025516 0ustar00zuulzuul00000000000000--- features: - Switched to tooz internal heartbeat feature in the coordination system. deprecations: - Deprecated 'coordination.heartbeat', 'coordination.initial_reconnect_backoff' and 'coordination.max_reconnect_backoff' configuration options which are not used anymore. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/user-messages-api-589ee7d68ccba70c.yaml0000664000175000017500000000050100000000000025502 0ustar00zuulzuul00000000000000--- features: - Added new user messages API - GET /messages, GET /messages/ and DELETE /messages/. - Added sorting, filtering and pagination to the user messages listing. - Added 'message_ttl' configuration option which can be used for configuring message expiration time. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/vastdata-add-api-token-based-auth-f6ee3fdce1ba6450.yaml0000664000175000017500000000044500000000000030456 0ustar00zuulzuul00000000000000--- features: - Added support for authentication using an API token in the VAST Manila driver. Introduced the `vast_api_token` configuration option, allowing users to authenticate with a pre-generated API token instead of using `vast_mgmt_user` and `vast_mgmt_password`. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/veritas-access-manila-driver-d75558c01ce6d428.yaml0000664000175000017500000000007200000000000027371 0ustar00zuulzuul00000000000000--- features: - Added Manila driver for Veritas Access. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/vlan-enhancement-in-unity-driver-0f1d972f2f6d00d9.yaml0000664000175000017500000000075200000000000030274 0ustar00zuulzuul00000000000000--- features: - Dell EMC Unity driver deprecated the option `emc_nas_server_container`. The driver will choose storage processor automatically to load balance the nas servers. - Dell EMC Unity driver is enhanced to use different tenant in Unity for each vlan. Thus the nas server in different vlan could have isolated IP address space. - Dell EMC Unity driver is enhanced to select the appropriate port on the system to create interfaces based on the network MTU. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/vmax-manila-support-7c655fc094c09367.yaml0000664000175000017500000000005600000000000025601 0ustar00zuulzuul00000000000000--- features: - Support for VMAX in Manila. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/vmax-rename-options-44d8123d14a23f94.yaml0000664000175000017500000000055600000000000025553 0ustar00zuulzuul00000000000000--- upgrade: - For Dell EMC VMAX Manila driver, replaced emc_nas_pool_names with vmax_share_data_pools, emc_interface_ports with vmax_ethernet_ports, emc_nas_server_container with vmax_server_container. deprecations: - For Dell EMC VMAX Manila driver, options emc_nas_pool_names, emc_interface_ports, emc_nas_server_container are deprecated. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/vnx-manila-ipv6-support-9ae986431549cc63.yaml0000664000175000017500000000007700000000000026331 0ustar00zuulzuul00000000000000--- features: - IPv6 support for Dell EMC VNX Manila driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/vnx-ssl-verification-2d26a24e7e73bf81.yaml0000664000175000017500000000024300000000000026072 0ustar00zuulzuul00000000000000upgrade: - Added ``emc_ssl_cert_verify`` and ``emc_ssl_cert_path`` options for VNX SSL verification. For more details, see OpenStack official documentation. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/windows-smb-fix-default-access-d4b9eee899e400a0.yaml0000664000175000017500000000022400000000000030002 0ustar00zuulzuul00000000000000--- security: - Ensure we don't grant read access to 'Everyone' by default when creating CIFS shares and the Windows SMB backend is used. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/x-openstack-request-id-2f26a301f3109689.yaml0000664000175000017500000000103200000000000026067 0ustar00zuulzuul00000000000000--- upgrade: - | The `request_id` pipeline filter has been added to the `api_paste.ini` file. This change ensures that the unique API request ID is returned in API response headers through the `x-openstack-request-id` header in addition to the `x-openstack-compute-request-id` header. This change ensures consistency with other OpenStack API services. Operators should **update their `api_paste.ini` configuration** to include the `request_id` filter in pipelines to take advantage of this new behavior. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/zadara-manila-driver-cb22b647e60f7ab8.yaml0000664000175000017500000000012100000000000026043 0ustar00zuulzuul00000000000000--- features: - Added Manila driver for Zadara VPSA Storage Array/Flash-Array. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/zed-secure-rbac-direction-change-2329bbf442b9a2da.yaml0000664000175000017500000000224700000000000030230 0ustar00zuulzuul00000000000000--- prelude: > RBAC defaults of all Shared File System service (manila) APIs have been updated to remove "system" scope personas. This is being done in concert with other OpenStack services, and in reaction to operator feedback that the use of system "scope" introduces backwards incompatibility in existing workflows. The new defaults support the use of "scope", however, no RBAC rule by default includes "system" scope. At this time, we do not recommend the use of system scoped personas to interact with the Shared File Systems service (manila) APIs since it is largely un-tested. "reader" role from the OpenStack Identity service (keystone) is fully supported with this release. Currently, these new "defaults" are available as "opt-in" only to prevent breaking existing deployments. To enforce default RBAC rules, set ``[oslo_policy]/enforce_new_defaults`` to True in your deployment. This option will be set to True by default in a future release. See `the OpenStack TC Secure RBAC goal `_ for more information regarding these changes. ././@PaxHeader0000000000000000000000000000023700000000000011457 xustar0000000000000000137 path=manila-21.0.0/releasenotes/notes/zfsonlinux-driver-improvement-create-share-from-snapshot-another-backend-44296f572681be35.yaml 22 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/zfsonlinux-driver-improvement-create-share-from-snapshot-another-ba0000664000175000017500000000053000000000000034050 0ustar00zuulzuul00000000000000--- upgrade: In this release, the operation create share from snapshot was improved in the ZFSonLinux driver. Now, the operator using the ZFSonLinux driver can create a share from snapshot in different pools or backends by specifying the Manila API configuration option [DEFAULT]/use_scheduler_creating_share_from_snapshot. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/notes/zfssa-driver-add-share-manage-unmanage-9bd6d2e25cc86c35.yaml0000664000175000017500000000030200000000000031345 0ustar00zuulzuul00000000000000--- features: - Oracle ZFSSA driver now supports share manage/unmanage feature, where a ZFSSA share can be brought under Manila's management, or can be released from Manila's management. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315602.2136679 manila-21.0.0/releasenotes/source/0000775000175000017500000000000000000000000017047 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/source/2023.1.rst0000664000175000017500000000021000000000000020317 0ustar00zuulzuul00000000000000=========================== 2023.1 Series Release Notes =========================== .. release-notes:: :branch: unmaintained/2023.1 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/source/2023.2.rst0000664000175000017500000000020200000000000020321 0ustar00zuulzuul00000000000000=========================== 2023.2 Series Release Notes =========================== .. release-notes:: :branch: stable/2023.2 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/source/2024.1.rst0000664000175000017500000000020200000000000020321 0ustar00zuulzuul00000000000000=========================== 2024.1 Series Release Notes =========================== .. release-notes:: :branch: stable/2024.1 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/source/2024.2.rst0000664000175000017500000000020200000000000020322 0ustar00zuulzuul00000000000000=========================== 2024.2 Series Release Notes =========================== .. release-notes:: :branch: stable/2024.2 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/source/2025.1.rst0000664000175000017500000000020200000000000020322 0ustar00zuulzuul00000000000000=========================== 2025.1 Series Release Notes =========================== .. release-notes:: :branch: stable/2025.1 ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315602.2136679 manila-21.0.0/releasenotes/source/_static/0000775000175000017500000000000000000000000020475 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/source/_static/.placeholder0000664000175000017500000000000000000000000022746 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315602.2136679 manila-21.0.0/releasenotes/source/_templates/0000775000175000017500000000000000000000000021204 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/source/_templates/.placeholder0000664000175000017500000000000000000000000023455 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/source/conf.py0000664000175000017500000002125600000000000020354 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # Manila Release Notes documentation build configuration file, created by # sphinx-quickstart on Tue Nov 3 17:40:50 2015. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # sys.path.insert(0, os.path.abspath('.')) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'reno.sphinxext', 'openstackdocstheme', ] # openstackdocstheme options openstackdocs_repo_name = 'openstack/manila' openstackdocs_bug_project = 'manila' openstackdocs_bug_tag = 'release notes' # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. # source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. copyright = '2015, Manila Developers' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: # today = '' # Else, today_fmt is used as the format for a strftime call. # today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = [] # The reST default role (used for this markup: `text`) to use for all # documents. # default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. # add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). # add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. # show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'native' # A list of ignored prefixes for module index sorting. # modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. # keep_warnings = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'openstackdocs' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. # html_theme_path = [openstackdocstheme.get_html_theme_path()] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". # html_title = None # A shorter title for the navigation bar. Default is the same as html_title. # html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. # html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. # html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. # html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. # html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. # html_use_smartypants = True # Custom sidebar templates, maps document names to template names. # html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. # html_additional_pages = {} # If false, no module index is generated. # html_domain_indices = True # If false, no index is generated. # html_use_index = True # If true, the index is split into individual pages for each letter. # html_split_index = False # If true, links to the reST sources are added to the pages. # html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. # html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. # html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. # html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). # html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'ManilaReleaseNotesdoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. # 'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ ('index', 'ManilaReleaseNotes.tex', 'Manila Release Notes Documentation', 'Manila Developers', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. # latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. # latex_use_parts = False # If true, show page references after internal links. # latex_show_pagerefs = False # If true, show URL addresses after external links. # latex_show_urls = False # Documents to append as an appendix to all manuals. # latex_appendices = [] # If false, no module index is generated. # latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'manilareleasenotes', 'Manila Release Notes Documentation', ['Manila Developers'], 1) ] # If true, show URL addresses after external links. # man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'ManilaReleaseNotes', 'Manila Release Notes Documentation', 'Manila Developers', 'ManilaReleaseNotes', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. # texinfo_appendices = [] # If false, no module index is generated. # texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. # texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. # texinfo_no_detailmenu = False # -- Options for Internationalization output ------------------------------ locale_dirs = ['locale/'] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/source/index.rst0000664000175000017500000000167400000000000020720 0ustar00zuulzuul00000000000000.. Copyright 2016-2017 OpenStack Foundation All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==================== Manila Release Notes ==================== .. toctree:: :maxdepth: 1 unreleased 2025.1 2024.2 2024.1 2023.2 2023.1 zed yoga xena wallaby victoria ussuri train stein rocky queens pike ocata newton mitaka liberty ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/source/liberty.rst0000664000175000017500000000021500000000000021251 0ustar00zuulzuul00000000000000============================ Liberty Series Release Notes ============================ .. release-notes:: :branch: origin/stable/liberty ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315601.6896746 manila-21.0.0/releasenotes/source/locale/0000775000175000017500000000000000000000000020306 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315601.6896746 manila-21.0.0/releasenotes/source/locale/de/0000775000175000017500000000000000000000000020676 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315602.2136679 manila-21.0.0/releasenotes/source/locale/de/LC_MESSAGES/0000775000175000017500000000000000000000000022463 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/source/locale/de/LC_MESSAGES/releasenotes.po0000664000175000017500000000400200000000000025510 0ustar00zuulzuul00000000000000# Andreas Jaeger , 2019. #zanata msgid "" msgstr "" "Project-Id-Version: manila\n" "Report-Msgid-Bugs-To: \n" "POT-Creation-Date: 2022-09-16 18:45+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2019-09-26 08:00+0000\n" "Last-Translator: Andreas Jaeger \n" "Language-Team: German\n" "Language: de\n" "X-Generator: Zanata 4.3.3\n" "Plural-Forms: nplurals=2; plural=(n != 1)\n" msgid "1.0.1" msgstr "1.0.1" msgid "2.0.0" msgstr "2.0.0" msgid "2.0.0-44" msgstr "2.0.0-44" msgid "3.0.0" msgstr "3.0.0" msgid "3.0.0-20" msgstr "3.0.0-20" msgid "4.0.0" msgstr "4.0.0" msgid "4.0.1" msgstr "4.0.1" msgid "4.0.2" msgstr "4.0.2" msgid "5.0.0" msgstr "5.0.0" msgid "5.0.1" msgstr "5.0.1" msgid "5.0.2" msgstr "5.0.2" msgid "5.0.3" msgstr "5.0.3" msgid "5.1.0" msgstr "5.1.0" msgid "6.0.0" msgstr "6.0.0" msgid "6.0.1" msgstr "6.0.1" msgid "6.0.2" msgstr "6.0.2" msgid "6.1.0" msgstr "6.1.0" msgid "6.2.0" msgstr "6.2.0" msgid "6.3.0" msgstr "6.3.0" msgid "6.3.1" msgstr "6.3.1" msgid "7.0.0" msgstr "7.0.0" msgid "7.1.0" msgstr "7.1.0" msgid "7.2.0" msgstr "7.2.0" msgid "7.3.0" msgstr "7.3.0" msgid "8.0.0" msgstr "8.0.0" msgid "8.0.1" msgstr "8.0.1" msgid "Current Series Release Notes" msgstr "Aktuelle Serie Releasenotes" msgid "Liberty Series Release Notes" msgstr "Liberty Serie Releasenotes" msgid "Manila Release Notes" msgstr "Manila Releasenotes" msgid "Mitaka Series Release Notes" msgstr "Mitaka Serie Releasenotes" msgid "Newton Series Release Notes" msgstr "Newton Serie Releasenotes" msgid "Ocata Series Release Notes" msgstr "Ocata Serie Releasenotes" msgid "Pike Series Release Notes" msgstr "Pike Serie Releasenotes" msgid "Queens Series Release Notes" msgstr "Queens Serie Releasenotes" msgid "Rocky Series Release Notes" msgstr "Rocky Serie Releasenotes" msgid "Stein Series Release Notes" msgstr "Stein Serie Releasenotes" msgid "Upgrade Notes" msgstr "Aktualisierungsnotizen" ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315601.6896746 manila-21.0.0/releasenotes/source/locale/en_GB/0000775000175000017500000000000000000000000021260 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315602.2176678 manila-21.0.0/releasenotes/source/locale/en_GB/LC_MESSAGES/0000775000175000017500000000000000000000000023045 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po0000664000175000017500000012233700000000000026106 0ustar00zuulzuul00000000000000# Andi Chandler , 2017. #zanata # Andi Chandler , 2018. #zanata # Andi Chandler , 2019. #zanata # Andi Chandler , 2020. #zanata # Andi Chandler , 2022. #zanata # Andi Chandler , 2023. #zanata # Andi Chandler , 2024. #zanata msgid "" msgstr "" "Project-Id-Version: manila\n" "Report-Msgid-Bugs-To: \n" "POT-Creation-Date: 2025-06-22 06:43+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2024-12-17 09:55+0000\n" "Last-Translator: Andi Chandler \n" "Language-Team: English (United Kingdom)\n" "Language: en_GB\n" "X-Generator: Zanata 4.3.3\n" "Plural-Forms: nplurals=2; plural=(n != 1)\n" msgid "" "'quota_per_share_gigabytes' config option allows admin to set per share size " "limit for a project. The default value is -1[\"No Limit\"] always unless " "changed in manila.conf by admin." msgstr "" "'quota_per_share_gigabytes' config option allows admin to set per share size " "limit for a project. The default value is -1[\"No Limit\"] always unless " "changed in manila.conf by admin." msgid "" "'reserved_share_extend_percentage' backend config option allows Manila to " "consider different reservation percentage for share extend operation. This " "distinct option is useful if operators want to prevent provisioning of new " "shares but allow extensions of existing shares on storage pools beyond their " "reserved space." msgstr "" "'reserved_share_extend_percentage' backend config option allows Manila to " "consider different reservation percentage for share extend operation. This " "distinct option is useful if operators want to prevent provisioning of new " "shares but allow extensions of existing shares on storage pools beyond their " "reserved space." msgid "" "'reserved_share_from_snapshot_percentage' backend config option allows " "Manila to consider different reservation percentage for shares that are " "being created from the snapshot. In case this config option is not set, the " "shares created from snapshot will use reservation percentage value set in " "'reserved_share_percentage'. This will be useful for users who want to keep " "same reservation percentage for both non-snapshot/regular and snapshot " "shares." msgstr "" "'reserved_share_from_snapshot_percentage' backend config option allows " "Manila to consider different reservation percentages for shares that are " "being created from the snapshot. In case this config option is not set, the " "shares created from the snapshot will use the reservation percentage value " "set in 'reserved_share_percentage'. This will be useful for users who want " "to keep the same reservation percentage for both non-snapshot/regular and " "snapshot shares." msgid "1.0.1" msgstr "1.0.1" msgid "10.0.0" msgstr "10.0.0" msgid "10.0.1" msgstr "10.0.1" msgid "10.0.2" msgstr "10.0.2" msgid "10.1.0" msgstr "10.1.0" msgid "10.2.0" msgstr "10.2.0" msgid "10.2.0-15" msgstr "10.2.0-15" msgid "11.0.0" msgstr "11.0.0" msgid "11.0.1" msgstr "11.0.1" msgid "11.1.0" msgstr "11.1.0" msgid "11.1.1" msgstr "11.1.1" msgid "11.1.2" msgstr "11.1.2" msgid "11.1.2-8" msgstr "11.1.2-8" msgid "12.0.0" msgstr "12.0.0" msgid "12.1.0" msgstr "12.1.0" msgid "12.1.1" msgstr "12.1.1" msgid "12.1.2" msgstr "12.1.2" msgid "12.1.2-29" msgstr "12.1.2-29" msgid "13.0.0" msgstr "13.0.0" msgid "13.0.1" msgstr "13.0.1" msgid "13.0.2" msgstr "13.0.2" msgid "13.0.3" msgstr "13.0.3" msgid "13.0.4" msgstr "13.0.4" msgid "13.1.0" msgstr "13.1.0" msgid "13.2.0" msgstr "13.2.0" msgid "13.2.0-15" msgstr "13.2.0-15" msgid "14.0.0" msgstr "14.0.0" msgid "14.0.1" msgstr "14.0.1" msgid "14.1.0" msgstr "14.1.0" msgid "14.1.1" msgstr "14.1.1" msgid "14.2.0" msgstr "14.2.0" msgid "14.2.0-4" msgstr "14.2.0-4" msgid "15.0.0" msgstr "15.0.0" msgid "15.1.0" msgstr "15.1.0" msgid "15.2.0" msgstr "15.2.0" msgid "15.3.0" msgstr "15.3.0" msgid "15.4.0" msgstr "15.4.0" msgid "15.4.1" msgstr "15.4.1" msgid "15.4.1-4" msgstr "15.4.1-4" msgid "16.0.0" msgstr "16.0.0" msgid "16.1.0" msgstr "16.1.0" msgid "16.2.0" msgstr "16.2.0" msgid "16.2.1" msgstr "16.2.1" msgid "17.0.0" msgstr "17.0.0" msgid "17.1.0" msgstr "17.1.0" msgid "17.2.0" msgstr "17.2.0" msgid "17.3.0" msgstr "17.3.0" msgid "17.3.1" msgstr "17.3.1" msgid "18.0.1" msgstr "18.0.1" msgid "18.2.0" msgstr "18.2.0" msgid "18.2.1" msgstr "18.2.1" msgid "19.0.0" msgstr "19.0.0" msgid "19.0.1" msgstr "19.0.1" msgid "2.0.0" msgstr "2.0.0" msgid "2.0.0-44" msgstr "2.0.0-44" msgid "2023.1 Series Release Notes" msgstr "2023.1 Series Release Notes" msgid "2023.1-eom" msgstr "2023.1-eom" msgid "2023.2 Series Release Notes" msgstr "2023.2 Series Release Notes" msgid "2024.1 Series Release Notes" msgstr "2024.1 Series Release Notes" msgid "2024.2 Series Release Notes" msgstr "2024.2 Series Release Notes" msgid "3.0.0" msgstr "3.0.0" msgid "3.0.0-20" msgstr "3.0.0-20" msgid "" "3parclient up to version 4.2.1 always returns only 1 VFS IP address. This " "may cause 3PAR driver boot up failure while validating VFS IP addresses " "against IP addresses configured in manila.conf." msgstr "" "3parclient up to version 4.2.1 always returns only 1 VFS IP address. This " "may cause 3PAR driver boot up failure while validating VFS IP addresses " "against IP addresses configured in manila.conf." msgid "4.0.0" msgstr "4.0.0" msgid "4.0.1" msgstr "4.0.1" msgid "4.0.2" msgstr "4.0.2" msgid "5.0.0" msgstr "5.0.0" msgid "5.0.1" msgstr "5.0.1" msgid "5.0.2" msgstr "5.0.2" msgid "5.0.3" msgstr "5.0.3" msgid "5.1.0" msgstr "5.1.0" msgid "5.1.0-4" msgstr "5.1.0-4" msgid "6.0.0" msgstr "6.0.0" msgid "6.0.1" msgstr "6.0.1" msgid "6.0.2" msgstr "6.0.2" msgid "6.1.0" msgstr "6.1.0" msgid "6.2.0" msgstr "6.2.0" msgid "6.3.0" msgstr "6.3.0" msgid "6.3.1" msgstr "6.3.1" msgid "6.3.2" msgstr "6.3.2" msgid "6.3.2-38" msgstr "6.3.2-38" msgid "7.0.0" msgstr "7.0.0" msgid "7.1.0" msgstr "7.1.0" msgid "7.2.0" msgstr "7.2.0" msgid "7.3.0" msgstr "7.3.0" msgid "7.4.0" msgstr "7.4.0" msgid "7.4.1" msgstr "7.4.1" msgid "7.4.1-38" msgstr "7.4.1-38" msgid "8.0.0" msgstr "8.0.0" msgid "8.0.1" msgstr "8.0.1" msgid "8.1.0" msgstr "8.1.0" msgid "8.1.1" msgstr "8.1.1" msgid "8.1.2" msgstr "8.1.2" msgid "8.1.3" msgstr "8.1.3" msgid "8.1.4" msgstr "8.1.4" msgid "9.0.0" msgstr "9.0.0" msgid "9.1.0" msgstr "9.1.0" msgid "9.1.1" msgstr "9.1.1" msgid "9.1.2" msgstr "9.1.2" msgid "9.1.3" msgstr "9.1.3" msgid "9.1.4" msgstr "9.1.4" msgid "9.1.5" msgstr "9.1.5" msgid "9.1.5-34" msgstr "9.1.5-34" msgid "" "A \"no-op\" interface driver (manila.network.linux.interface." "NoopInterfaceDriver) has been introduced to work with drivers that create " "and manage lifecycle of share servers " "(``driver_handles_share_servers=True``) through service instance virtual " "machines using OpenStack Compute. This interface driver can be used when " "manila-share is running on a machine that has access to the administrator " "network used by Manila." msgstr "" "A \"no-op\" interface driver (manila.network.linux.interface." "NoopInterfaceDriver) has been introduced to work with drivers that create " "and manage lifecycle of share servers " "(``driver_handles_share_servers=True``) through service instance virtual " "machines using OpenStack Compute. This interface driver can be used when " "Manila-share is running on a machine that has access to the administrator " "network used by Manila." msgid "" "A Ceph version check has been added as part of this change to address the " "absense of the mon-mgr target in Ceph Nautilus. With this change, Ceph " "Nautilus users can leverage their storage backend with the OpenStack manila " "Wallaby release." msgstr "" "A Ceph version check has been added as part of this change to address the " "absense of the mon-mgr target in Ceph Nautilus. With this change, Ceph " "Nautilus users can leverage their storage backend with the OpenStack Manila " "Wallaby release." msgid "" "A bug with storage protocol filtering in the scheduler has been fixed. See " "`bug `_ for more details." msgstr "" "A bug with storage protocol filtering in the scheduler has been fixed. See " "`bug `_ for more details." msgid "" "A human readable ``mount_point_name`` can now be specified while creating " "shares through the mount_point_name parameter. Manila will prepend a prefix " "to the mount point name which can be configured through the ``provisioning:" "mount_point_prefix`` share type extra spec. In case this extra spec is not " "available in the share type, Manila will prepend a project identification to " "the mount point name. Project id will be added to this friendly name " "``provisioning:mount_point_prefix`` share type is not provided during " "provisioning. The LVM driver now supports human readable export locations." msgstr "" "A human-readable ``mount_point_name`` can now be specified while creating " "shares through the mount_point_name parameter. Manila will prepend a prefix " "to the mount point name which can be configured through the ``provisioning:" "mount_point_prefix`` share type extra spec. In case this extra spec is not " "available in the share type, Manila will prepend a project identification to " "the mount point name. Project ID will be added to this friendly name " "``provisioning:mount_point_prefix`` share type is not provided during " "provisioning. The LVM driver now supports human-readable export locations." msgid "" "A new \"noauth\" auth strategy is available, and is named \"noauthv2\". It " "can be enabled by setting the configuration option ``[DEFAULT]/" "auth_strategy`` to ``noauthv2``. This auth strategy can be used when " "project_id substitution is removed from the manila endpoint URL." msgstr "" "A new \"noauth\" auth strategy is available, and is named \"noauthv2\". It " "can be enabled by setting the configuration option ``[DEFAULT]/" "auth_strategy`` to ``noauthv2``. This auth strategy can be used when " "project_id substitution is removed from the manila endpoint URL." msgid "" "A new API to start the ensure shares procedure for Manila has been added. " "Through this API, OpenStack administrators will be able to recalculate the " "shares' export location without restarting the shares manager service. " "Additionally, a new configuration option named " "`update_shares_status_on_ensure` is now available to help OpenStack " "administrators determine whether the shares' status should be modified " "during the ensure shares procedure or not." msgstr "" "A new API to start the ensure shares procedure for Manila has been added. " "Through this API, OpenStack administrators will be able to recalculate the " "shares' export location without restarting the shares manager service. " "Additionally, a new configuration option named " "`update_shares_status_on_ensure` is now available to help OpenStack " "administrators determine whether the shares' status should be modified " "during the ensure shares procedure or not." msgid "" "A new Container driver is added. It uses docker container as a share server." msgstr "" "A new Container driver is added. It uses docker container as a share server." msgid "" "A new common user-visible share types extra-spec called " "\"availability_zones\" has been introduced. When using API version 2.48, " "user requests to create new shares in a specific availability zone will be " "validated against the configured availability zones of the share type. " "Similarly, users requests to create share groups and share replicas are " "validated against the share type ``availability_zones`` extra-spec when " "present. Users can also filter share types by one or more AZs that are " "supported by them." msgstr "" "A new common user-visible share types extra-spec called " "\"availability_zones\" has been introduced. When using API version 2.48, " "user requests to create new shares in a specific availability zone will be " "validated against the configured availability zones of the share type. " "Similarly, users' requests to create share groups and share replicas are " "validated against the share type ``availability_zones`` extra-spec when " "present. Users can also filter share types by one or more AZs that are " "supported by them." msgid "" "A new configuration option 'netapp_qos_policy_group_name_template' has been " "added to allow overriding the naming of QoS policies created by the NetApp " "driver." msgstr "" "A new configuration option 'netapp_qos_policy_group_name_template' has been " "added to allow overriding the naming of QoS policies created by the NetApp " "driver." msgid "" "A new configuration option called ``cephfs_ensure_all_shares_salt`` has been " "introduced to assist cloud administrators that would like the CephFS driver " "to reconcile export paths of existing shares during service startup." msgstr "" "A new configuration option called ``cephfs_ensure_all_shares_salt`` has been " "introduced to assist cloud administrators who would like the CephFS driver " "to reconcile export paths of existing shares during service startup." msgid "" "A new configuration option called ``report_default_filter_function`` has " "been added to the Dell EMC Unity driver. It can be set to True or False, and " "the default value is False. When set to True, the scheduler will disallow " "the creation of shares smaller than 3 GiB on the Dell EMC Unity back end. " "The default value of this option will be changed to ``True`` in a future " "release, so always set the desired value in your manila.conf per your " "expectations." msgstr "" "A new configuration option called ``report_default_filter_function`` has " "been added to the Dell EMC Unity driver. It can be set to True or False, and " "the default value is False. When set to True, the scheduler will disallow " "the creation of shares smaller than 3 GiB on the Dell EMC Unity back end. " "The default value of this option will be changed to ``True`` in a future " "release, so always set the desired value in your manila.conf per your " "expectations." msgid "" "A new exception will be thrown when a default share type was not configured " "and no other share type was specified on any sort of share creation." msgstr "" "A new exception will be thrown when a default share type was not configured " "and no other share type was specified on any sort of share creation." msgid "" "A new share status `creating_from_snapshot` was added to inform the user " "that a share creation from snapshot is in progress and may take some time to " "be concluded. In order to quantify the share creation progress a new field " "called ``progress`` was added to shares and share instances information, to " "indicate the conclusion percentage of share create operation (0 to 100%)." msgstr "" "A new share status `creating_from_snapshot` was added to inform the user " "that a share creation from snapshot is in progress and may take some time to " "be concluded. In order to quantify the share creation progress a new field " "called ``progress`` was added to shares and share instances information, to " "indicate the conclusion percentage of share create operation (0 to 100%)." msgid "" "A new user message has been added in case of share extensions failing " "asynchronously." msgstr "" "A new user message has been added in case of share extensions failing " "asynchronously." msgid "" "A share network cannot be provided while creating a share replica. Replicas " "will inherit the share's share network if one exists." msgstr "" "A shared network cannot be provided while creating a share replica. Replicas " "will inherit the share's share network if one exists." msgid "API version 2.22 is now required for all Share Migration APIs." msgstr "API version 2.22 is now required for all Share Migration APIs." msgid "" "APIs that were not returning a request ID ('x-compute-request-id') in the " "response headers have been fixed." msgstr "" "APIs that were not returning a request ID ('x-compute-request-id') in the " "response headers have been fixed." msgid "" "Ability to add minimum and maximum share size restrictions which can be set " "on a per share-type granularity. Added new extra specs 'provisioning:" "max_share_size' and 'provisioning:min_share_size'." msgstr "" "Ability to add minimum and maximum share size restrictions which can be set " "on a per share-type granularity. Added new extra specs 'provisioning:" "max_share_size' and 'provisioning:min_share_size'." msgid "" "Access rule type for shares served via nfs-ganesha is now validated, fixing " "`launchpad bug #1816420 `_ " "where ``cephx`` access type was allowed though only ``ip`` access type is " "effective. This fix also validates ``access_level`` to ensure that it is " "set to ``RW`` or ``RO``." msgstr "" "Access rule type for shares served via nfs-ganesha is now validated, fixing " "`launchpad bug #1816420 `_ " "where ``cephx`` access type was allowed though only ``ip`` access type is " "effective. This fix also validates ``access_level`` to ensure that it is " "set to ``RW`` or ``RO``." msgid "" "Access rules can be added regardless of the 'access_rules_status' of the " "share or any of its replicas." msgstr "" "Access rules can be added regardless of the 'access_rules_status' of the " "share or any of its replicas." msgid "Adapted policy.json file to correct snapshot policy values." msgstr "Adapted policy.json file to correct snapshot policy values." msgid "Current Series Release Notes" msgstr "Current Series Release Notes" msgid "Liberty Series Release Notes" msgstr "Liberty Series Release Notes" msgid "Manila Release Notes" msgstr "Manila Release Notes" msgid "Mitaka Series Release Notes" msgstr "Mitaka Series Release Notes" msgid "Newton Series Release Notes" msgstr "Newton Series Release Notes" msgid "Ocata Series Release Notes" msgstr "Ocata Series Release Notes" msgid "Pike Series Release Notes" msgstr "Pike Series Release Notes" msgid "Queens Series Release Notes" msgstr "Queens Series Release Notes" msgid "Rocky Series Release Notes" msgstr "Rocky Series Release Notes" msgid "Stein Series Release Notes" msgstr "Stein Series Release Notes" msgid "Train Series Release Notes" msgstr "Train Series Release Notes" msgid "" "User messages can be queried by timestamp with query keys ``created_since`` " "and ``created_before`` starting with API version ``2.52``." msgstr "" "User messages can be queried by timestamp with query keys ``created_since`` " "and ``created_before`` starting with API version ``2.52``." msgid "" "User specified scheduler hints such as \"affinity_same_host\" and " "\"affinity_different_host\" are stored as share metadata. These are stored " "as admin-only metadata keys that cannot be deleted or manipulated by " "nonadmin users." msgstr "" "User-specified scheduler hints such as \"affinity_same_host\" and " "\"affinity_different_host\" are stored as shared metadata. These are stored " "as admin-only metadata keys that cannot be deleted or manipulated by non-" "admin users." msgid "" "User specified scheduler hints such as \"same_host\" and \"different_host\" " "are stored as share metadata with keys such as \"__affinity_same_host\" and " "\"__affinity_different_host\" respectively. These can be manipulated or " "deleted by end users like all metadata unless prevented by RBAC policy. In a " "future release, the service will restrict the deletion or manipulation of " "these specific metadata items." msgstr "" "User-specified scheduler hints such as \"same_host\" and \"different_host\" " "are stored as share metadata with keys such as \"__affinity_same_host\" and " "\"__affinity_different_host\" respectively. These can be manipulated or " "deleted by end users like all metadata unless prevented by RBAC policy. In a " "future release, the service will restrict the deletion or manipulation of " "these specific metadata items." msgid "" "User_id and project_id DB fields are extended to also support LDAP setups." msgstr "" "User_id and project_id DB fields are extended to also support LDAP setups." msgid "Ussuri Series Release Notes" msgstr "Ussuri Series Release Notes" msgid "" "Validation of IPv6 based addresses has been added for allow access API when " "access type is IP." msgstr "" "Validation of IPv6-based addresses has been added to allow access API when " "the access type is IP." msgid "Victoria Series Release Notes" msgstr "Victoria Series Release Notes" msgid "" "View-only relationships in database objects have been appropriately tagged " "to avoid sqlalchemy deprecation messages flooding the log files." msgstr "" "View-only relationships in database objects have been appropriately tagged " "to avoid sqlalchemy deprecation messages flooding the log files." msgid "Wallaby Series Release Notes" msgstr "Wallaby Series Release Notes" msgid "" "When ``cephfs_ganesha_server_ip`` is not set, the current hostname is used " "as a default for such config option. The driver was treating this value as " "an IP address and trying to perform validations on it. The CEPH NFS driver " "will no longer treat hostnames as ip addresses and try to validate them as " "such." msgstr "" "When ``cephfs_ganesha_server_ip`` is not set, the current hostname is used " "as a default for such config option. The driver was treating this value as " "an IP address and trying to perform validations on it. The CEPH NFS driver " "will no longer treat hostnames as IP addresses and try to validate them as " "such." msgid "" "When a quota value greater than 2147483647 is set, the error message " "\"ERROR: Invalid input received:Quota limit should not exceed 2147483647. " "(HTTP 400)\" is communicated to the user." msgstr "" "When a quota value greater than 2147483647 is set, the error message " "\"ERROR: Invalid input received:Quota limit should not exceed 2147483647. " "(HTTP 400)\" is communicated to the user." msgid "" "When attempting to shrink a share to a size smaller than the current used " "space, the share status will remain as ``available`` instead of " "``shrinking_possible_data_loss_error``. The user will receive warning " "message saying that the shrink operation was not completed." msgstr "" "When attempting to shrink a share to a size smaller than the current used " "space, the share status will remain as ``available`` instead of " "``shrinking_possible_data_loss_error``. The user will receive a warning " "message saying that the shrink operation was not completed." msgid "" "When creating access rules with a deletion lock, the shares will also be " "locked to prevent disconnections. For more details, please refer to " "`launchpad bug 2075967 `_." msgstr "" "When creating access rules with a deletion lock, the shares will also be " "locked to prevent disconnections. For more details, please refer to " "`launchpad bug 2075967 `_." msgid "" "While creating share replicas, the rules that are copied from source share " "would hang in 'queued_to_apply' forever. Fixed it by checking status of " "access_rule of the created replica and conditionally changed from " "'queued_to_apply' to 'active'. For more details check `Launchpad bug 2000253 " "`_" msgstr "" "While creating share replicas, the rules that are copied from source share " "would hang in 'queued_to_apply' forever. Fixed it by checking status of " "access_rule of the created replica and conditionally changed from " "'queued_to_apply' to 'active'. For more details check `Launchpad bug 2000253 " "`_" msgid "" "With deferred deletion, when resource(share or snapshot) is deleted, the " "quota is freed immediately and periodic tasks will delete the resource (i.e. " "share or snapshot) in driver. The resources errored during deletion are " "retried for deletion after some time in the same periodic tasks." msgstr "" "With deferred deletion, when resource(share or snapshot) is deleted, the " "quota is freed immediately and periodic tasks will delete the resource (i.e. " "share or snapshot) in driver. The resources errored during deletion are " "retried for deletion after some time in the same periodic tasks." msgid "" "With replication setup at least one backend is working and serving the " "shares. Starting from API version 2.74, allowing and denying access to " "shares will only fail if any of the instances is in a transitional state. " "Please refer to `Launchpad bug 1965561 `_" msgstr "" "With replication setup at least one backend is working and serving the " "shares. Starting from API version 2.74, allowing and denying access to " "shares will only fail if any of the instances is in a transitional state. " "Please refer to `Launchpad bug 1965561 `_" msgid "Xena Series Release Notes" msgstr "Xena Series Release Notes" msgid "Yoga Series Release Notes" msgstr "Yoga Series Release Notes" msgid "Zed Series Release Notes" msgstr "Zed Series Release Notes" msgid "" "`Bug #1900755 `_: Added a " "driver-agnostic exception to handle insufficient privileges on a security " "service when trying to create a share. Added a user message to provide " "useful information to end users. Note that vendors will need to implement " "the exception provided in this patch in their drivers to take advantage of " "the more convenient user message." msgstr "" "`Bug #1900755 `_: Added a " "driver-agnostic exception to handle insufficient privileges on a security " "service when trying to create a share. Added a user message to provide " "useful information to end users. Note that vendors will need to implement " "the exception provided in this patch in their drivers to take advantage of " "the more convenient user message." msgid "" "`Bug #1900755 `_: When " "failing to create shares using security services due to insufficient " "privileges or wrong credentials, the NetApp ONTAP driver will now report " "more accurate information on the failure." msgstr "" "`Bug #1900755 `_: When " "failing to create shares using security services due to insufficient " "privileges or wrong credentials, the NetApp ONTAP driver will now report " "more accurate information on the failure." msgid "" "`Bug #1925486 `_ Share " "replica create API does not support share network option and uses parent " "share's share network. Fixed it to allow any share network by providing " "option ``share-network``. Added in API microversion starting with '2.72'." msgstr "" "`Bug #1925486 `_ Share " "replica create API does not support share network option and uses parent " "share's share network. Fixed it to allow any share network by providing " "option ``share-network``. Added in API microversion starting with '2.72'." msgid "" "`Bug #1946990 `_: Fix the " "ignored ``[neutron] url`` option. Now the parameter overrides the endpoint " "url which Manila uses to access Neutron API." msgstr "" "`Bug #1946990 `_: Fix the " "ignored ``[neutron] url`` option. Now the parameter overrides the endpoint " "URL which Manila uses to access Neutron API." msgid "" "`Bug #1964696 `_: Fix " "calling the GaneshaNASHelper `update_access` method from the gluster " "GaneshaNFSHelper with the wrong signature." msgstr "" "`Bug #1964696 `_: Fix " "calling the GaneshaNASHelper `update_access` method from the gluster " "GaneshaNFSHelper with the wrong signature." msgid "" "`Bug #1983125 `_: Fixed the " "remaining reference to a deprecated quota option in code, which was causing " "a warning message." msgstr "" "`Bug #1983125 `_: Fixed the " "remaining reference to a deprecated quota option in code, which was causing " "a warning message." msgid "" "`Bug 1991776 `_ was fixed within the " "CephFS driver. The driver no longer emits repeated warnings concerning " "supported IP versions when using the NFS protocol." msgstr "" "`Bug 1991776 `_ was fixed within the " "CephFS driver. The driver no longer emits repeated warnings concerning " "supported IP versions when using the NFS protocol." msgid "" "`Launchpad bug 1809318 `_ " "has been fixed. The deprecated options ``api_insecure`` and " "``ca_certificates_file`` from nova, cinder, neutron or DEFAULT configuration " "groups no longer override the newer ``insecure`` option if provided. Always " "use ``insecure`` and ``cafile`` to control SSL and validation since the " "deprecated options will be removed in a future release." msgstr "" "`Launchpad bug 1809318 `_ " "has been fixed. The deprecated options ``api_insecure`` and " "``ca_certificates_file`` from nova, cinder, neutron or DEFAULT configuration " "groups no longer override the newer ``insecure`` option if provided. Always " "use ``insecure`` and ``cafile`` to control SSL and validation since the " "deprecated options will be removed in a future release." msgid "" "`Launchpad bug 1815038 `_ " "has been fixed and now we correctly parse the base URL from manila's " "endpoint url, accounting for proxy URLs." msgstr "" "`Launchpad bug 1815038 `_ " "has been fixed and now we correctly parse the base URL from Manila's " "endpoint URL, accounting for proxy URLs." msgid "" "`Launchpad bug 1853940 `_ " "has been fixed. When drivers are still initializing or when they fail to " "initialize, the share service will be reported as being \"down\" until the " "driver has been initialized." msgstr "" "`Launchpad bug 1853940 `_ " "has been fixed. When drivers are still initialising or when they fail to " "initialise, the share service will be reported as being \"down\" until the " "driver has been initialised." msgid "" "`Launchpad bug 1855391 `_ " "has been fixed. The action of extend share will go through scheduler, if " "there is no available share backend host, the share will rollback to " "available state and create an user message about extend." msgstr "" "`Launchpad bug 1855391 `_ " "has been fixed. The action of extend share will go through scheduler, if " "there is no available share backend host, the share will rollback to " "available state and create an user message about extend." msgid "" "`Launchpad bug 1869148 `_ has been " "fixed. This bug could have affected environments where extension APIs were " "provided in compiled files rather than source code." msgstr "" "`Launchpad bug 1869148 `_ has been " "fixed. This bug could have affected environments where extension APIs were " "provided in compiled files rather than source code." msgid "" "`Launchpad bug 1968891 `_ " "has been fixed. scheduler will use size increase rather than share size to " "calculate provisioned_ratio when extending share." msgstr "" "`Launchpad bug 1968891 `_ " "has been fixed. scheduler will use size increase rather than share size to " "calculate provisioned_ratio when extending share." msgid "``[cors] allow_methods``" msgstr "``[cors] allow_methods``" msgid "``[cors] allowed_headers``" msgstr "``[cors] allowed_headers``" msgid "``[cors] expose_headers``" msgstr "``[cors] expose_headers``" msgid "``[quota]/driver`` - previously ``[DEFAULT]/quota_driver``" msgstr "``[quota]/driver`` - previously ``[DEFAULT]/quota_driver``" msgid "``[quota]/gigabytes`` - previously ``[DEFAULT]/quota_gigabytes``" msgstr "``[quota]/gigabytes`` - previously ``[DEFAULT]/quota_gigabytes``" msgid "``[quota]/max_age`` - previously ``[DEFAULT]/max_age``" msgstr "``[quota]/max_age`` - previously ``[DEFAULT]/max_age``" msgid "" "``[quota]/per_share_gigabytes`` - previously ``[DEFAULT]/" "quota_per_share_gigabytes``" msgstr "" "``[quota]/per_share_gigabytes`` - previously ``[DEFAULT]/" "quota_per_share_gigabytes``" msgid "" "``[quota]/replica_gigabytes`` - previously ``[DEFAULT]/" "quota_replica_gigabytes``" msgstr "" "``[quota]/replica_gigabytes`` - previously ``[DEFAULT]/" "quota_replica_gigabytes``" msgid "" "``[quota]/reservation_expire`` previously ``[DEFAULT]/reservation_expire``" msgstr "" "``[quota]/reservation_expire`` previously ``[DEFAULT]/reservation_expire``" msgid "" "``[quota]/share_group_snapshots`` - previously ``[DEFAULT]/" "quota_share_group_snapshots``" msgstr "" "``[quota]/share_group_snapshots`` - previously ``[DEFAULT]/" "quota_share_group_snapshots``" msgid "``[quota]/share_groups`` - previously ``[DEFAULT]/quota_share_groups``" msgstr "``[quota]/share_groups`` - previously ``[DEFAULT]/quota_share_groups``" msgid "" "``[quota]/share_networks`` - previously ``[DEFAULT]/quota_share_networks``" msgstr "" "``[quota]/share_networks`` - previously ``[DEFAULT]/quota_share_networks``" msgid "" "``[quota]/share_replicas`` - previously ``[DEFAULT]/quota_share_replicas``" msgstr "" "``[quota]/share_replicas`` - previously ``[DEFAULT]/quota_share_replicas``" msgid "``[quota]/shares`` - previously ``[DEFAULT]/shares_quota``" msgstr "``[quota]/shares`` - previously ``[DEFAULT]/shares_quota``" msgid "" "``[quota]/snapshot_gigabytes`` - previously ``[DEFAULT]/" "quota_snapshot_gigabytes``" msgstr "" "``[quota]/snapshot_gigabytes`` - previously ``[DEFAULT]/" "quota_snapshot_gigabytes``" msgid "``[quota]/snapshots`` - previously ``[DEFAULT]/snapshots_quota``" msgstr "``[quota]/snapshots`` - previously ``[DEFAULT]/snapshots_quota``" msgid "``[quota]/until_refresh`` - previously ``[DEFAULT]/until_refresh``" msgstr "``[quota]/until_refresh`` - previously ``[DEFAULT]/until_refresh``" msgid "" "``emc_share_backend`` configuration option must be switched from ``vmax`` to " "``powermax`` if using a newly rebranded PowerMax storage backend." msgstr "" "``emc_share_backend`` configuration option must be switched from ``vmax`` to " "``powermax`` if using a newly rebranded PowerMax storage backend." msgid "" "``manila-manage`` now supports share server commands, which allow " "administrators to modify the field value of some share server's capabilities." msgstr "" "``manila-manage`` now supports share server commands, which allow " "administrators to modify the field value of some share server's capabilities." msgid "``manila.openstack.common.notifier.log_notifier``" msgstr "``manila.openstack.common.notifier.log_notifier``" msgid "``manila.openstack.common.notifier.no_op_notifier``" msgstr "``manila.openstack.common.notifier.no_op_notifier``" msgid "``manila.openstack.common.notifier.rpc_notifier2``" msgstr "``manila.openstack.common.notifier.rpc_notifier2``" msgid "``manila.openstack.common.notifier.rpc_notifier``" msgstr "``manila.openstack.common.notifier.rpc_notifier``" msgid "``manila.openstack.common.notifier.test_notifier``" msgstr "``manila.openstack.common.notifier.test_notifier``" msgid "" "``netapp:fpolicy_extensions_to_exclude``: specifies file extensions to be " "excluded for screening. Values should be provided as comma separated list." msgstr "" "``netapp:fpolicy_extensions_to_exclude``: specifies file extensions to be " "excluded for screening. Values should be provided as comma separated list." msgid "" "``netapp:fpolicy_extensions_to_include``: specifies file extensions to be " "included for screening. Values should be provided as comma separated list." msgstr "" "``netapp:fpolicy_extensions_to_include``: specifies file extensions to be " "included for screening. Values should be provided as comma separated list." msgid "" "``netapp:fpolicy_file_operations``: specifies all file operations to be " "monitored. Values should be provided as comma separated list." msgstr "" "``netapp:fpolicy_file_operations``: specifies all file operations to be " "monitored. Values should be provided as comma separated list." msgid "" "``netapp_server_migration_check_capacity``: Specifies if a capacity " "validation at the destination backend must be made before proceeding with " "the share server migration. When enabled, the NetApp driver will validate if " "the destination pools can hold all shares and snapshots belonging to the " "source share server." msgstr "" "``netapp_server_migration_check_capacity``: Specifies if a capacity " "validation at the destination backend must be made before proceeding with " "the share server migration. When enabled, the NetApp driver will validate if " "the destination pools can hold all shares and snapshots belonging to the " "source share server." msgid "" "`bug #2025649 `_: Fixed " "share server API error message." msgstr "" "`bug #2025649 `_: Fixed " "share server API error message." msgid "" "`bug 2038607 `_: Fixed parse " "of the protocol_access_mapping option and the backup_protocol_access_mapping " "option. This change requires that the values used for these options should " "be formatted like `key0=[value00, value01],key1=[value10]`." msgstr "" "`bug 2038607 `_: Fixed parse " "of the protocol_access_mapping option and the backup_protocol_access_mapping " "option. This change requires that the values used for these options should " "be formatted like `key0=[value00, value01],key1=[value10]`." msgid "" "`bug 2051691 `_: Fixed the " "default values of the following options." msgstr "" "`bug 2051691 `_: Fixed the " "default values of the following options." msgid "" "added two new fields to share groups API - 'availability_zone_id' and " "'consistent_snapshot_support' to be able to get to know these attributes of " "a share group." msgstr "" "added two new fields to share groups API - 'availability_zone_id' and " "'consistent_snapshot_support' to be able to get to know these attributes of " "a share group." msgid "" "cephfs volume path prefix is now configurable in order to enable support for " "multiple cephfs back ends." msgstr "" "CephFS volume path prefix is now configurable in order to enable support for " "multiple CephFS back ends." msgid "" "default route for service subnet wouldn't be created if " "connect_share_server_to_tenant_network is on" msgstr "" "default route for service subnet wouldn't be created if " "connect_share_server_to_tenant_network is on" msgid "" "fixes a bug causing incorrect quotas being set in the backend when resizing " "Quobyte shares." msgstr "" "fixes a bug causing incorrect quotas being set in the backend when resizing " "Quobyte shares." msgid "" "force-delete API requests for snapshots are now propagated to the manila-" "share service and will not fail even if share drivers cannot remove the " "snapshots on the storage backend." msgstr "" "force-delete API requests for snapshots are now propagated to the manila-" "share service and will not fail even if share drivers cannot remove the " "snapshots on the storage backend." msgid "" "http_proxy_to_wsgi is taken into use instead of the deprecated ssl " "middleware. This makes it easier for deployers to have Manila running behind " "a proxy that terminates TLS connections. This middleware addition adds the " "enable_proxy_headers_parsing option to the oslo_middleware section which " "needs to be set in the configuration file in order to enable middleware to " "do its work." msgstr "" "http_proxy_to_wsgi is taken into use instead of the deprecated SSL " "middleware. This makes it easier for deployers to have Manila running behind " "a proxy that terminates TLS connections. This middleware addition adds the " "enable_proxy_headers_parsing option to the oslo_middleware section which " "needs to be set in the configuration file in order to enable middleware to " "do its work." msgid "" "mgr-commands are now directed to the mgr-daemon instead of the mon-daemon in " "the CephFS drivers" msgstr "" "mgr-commands are now directed to the mgr-daemon instead of the mon-daemon in " "the CephFS drivers" msgid "netapp:adaptive_qos_policy_group=platform3" msgstr "netapp:adaptive_qos_policy_group=platform3" msgid "" "rabbitmq password is no longer exposed in the logs when debugging is enabled." msgstr "" "RabbitMQ password is no longer exposed in the logs when debugging is enabled." msgid "remove undesired rules existing in the backend during recovery mode." msgstr "remove undesired rules existing in the backend during recovery mode." msgid "return ``access_keys`` of ceph auth IDs that are allowed access." msgstr "return ``access_keys`` of ceph auth IDs that are allowed access." msgid "" "user_id and project_id fields are added to the JSON response of /snapshots " "APIs." msgstr "" "user_id and project_id fields are added to the JSON response of /snapshots " "APIs." ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315601.6896746 manila-21.0.0/releasenotes/source/locale/es/0000775000175000017500000000000000000000000020715 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315602.2176678 manila-21.0.0/releasenotes/source/locale/es/LC_MESSAGES/0000775000175000017500000000000000000000000022502 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/source/locale/es/LC_MESSAGES/releasenotes.po0000664000175000017500000000606600000000000025543 0ustar00zuulzuul00000000000000# Jose Porrua , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: Manila Release Notes 5.0.0\n" "Report-Msgid-Bugs-To: \n" "POT-Creation-Date: 2017-08-03 15:36+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-07-28 02:25+0000\n" "Last-Translator: Jose Porrua \n" "Language-Team: Spanish\n" "Language: es\n" "X-Generator: Zanata 3.9.6\n" "Plural-Forms: nplurals=2; plural=(n != 1)\n" msgid "1.0.1" msgstr "1.0.1" msgid "2.0.0" msgstr "2.0.0" msgid "2.0.0-44" msgstr "2.0.0-44" msgid "Add support for hybrid aggregates to the NetApp cDOT drivers." msgstr "" "Añadir funcionalidad para user agrupaciones híbridas en los drivers de " "Clustered Data ONTAP de NetApp." msgid "Add support for snapshot manage/unmanage to the NetApp cDOT driver." msgstr "" "Añadir funcionalidad para administrar instantáneas en el driver de NetApp " "cDOT." msgid "Added APIs for listing export locations per share and share instances." msgstr "" "Se añadió APIs para obtener una lista con las localidades de exportación por " "recurso compartido y por instancia de recurso compartido." msgid "Added driver for Tegile IntelliFlash arrays." msgstr "" "Se añadió un controlador para interactuar con los sistemas de almacenamiento " "de datos Tegile IntelliFlash." msgid "Added support of 'manage share' feature to ZFSonLinux driver." msgstr "" "Se añadió la funcionalidad para gestionar recursos compartidos en el " "controlador de ZFSonLinux." msgid "Added support of 'manage snapshot' feature to ZFSonLinux driver." msgstr "" "Se añadió la funcionalidad para gestionar instantáneas en el controlador de " "ZFSonLinux." msgid "Bug Fixes" msgstr "Corrigiendo errores." msgid "Current Series Release Notes" msgstr "Notas de la versión actual" msgid "Deprecation Notes" msgstr "Notas de desuso" msgid "Known Issues" msgstr "Problemas Conocidos" msgid "Liberty Series Release Notes" msgstr "Notas de la versión para Liberty" msgid "Manage and unmanage snapshot." msgstr "Gestionar o dejar de gestionar una instantánea." msgid "Manage share snapshot on array in huawei driver." msgstr "" "Gestionar una instantánea de un recurso compartido en el controlador Huawei." msgid "Manila Release Notes" msgstr "Notas de la versión para Manila" msgid "Mitaka Series Release Notes" msgstr "Notas de la versión para Mitaka" msgid "New Features" msgstr "Nuevas Funcionalidades" msgid "Other Notes" msgstr "Otras Notas" msgid "Security Issues" msgstr "Problemas de Seguridad" msgid "Store network gateway value in DB." msgstr "Guardar el valor de la red de pasarela en la base de datos." msgid "Upgrade Notes" msgstr "Notas de Actualización" msgid "User ID is added to the JSON response of the /shares APIs." msgstr "Se añadió el ID de usuario a la respuesta JSON de la API /shares." msgid "" "user_id and project_id fields are added to the JSON response of /snapshots " "APIs." msgstr "" "Se añadieron los campos user_id y project_id a la respuesta JSON de la API /" "snapshots." ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315601.6896746 manila-21.0.0/releasenotes/source/locale/fr/0000775000175000017500000000000000000000000020715 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315602.2176678 manila-21.0.0/releasenotes/source/locale/fr/LC_MESSAGES/0000775000175000017500000000000000000000000022502 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/source/locale/fr/LC_MESSAGES/releasenotes.po0000664000175000017500000000267000000000000025540 0ustar00zuulzuul00000000000000# Gérald LONLAS , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: Manila Release Notes 5.0.0\n" "Report-Msgid-Bugs-To: \n" "POT-Creation-Date: 2017-08-03 15:36+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-10-22 05:29+0000\n" "Last-Translator: Gérald LONLAS \n" "Language-Team: French\n" "Language: fr\n" "X-Generator: Zanata 3.9.6\n" "Plural-Forms: nplurals=2; plural=(n > 1)\n" msgid "1.0.1" msgstr "1.0.1" msgid "2.0.0" msgstr "2.0.0" msgid "3.0.0" msgstr "3.0.0" msgid "Bug Fixes" msgstr "Corrections de bugs" msgid "Current Series Release Notes" msgstr "Note de la release actuelle" msgid "Deprecation Notes" msgstr "Notes dépréciées " msgid "Known Issues" msgstr "Problèmes connus" msgid "Liberty Series Release Notes" msgstr "Note de release pour Liberty" msgid "Manila Release Notes" msgstr "Note de release de Manila" msgid "Mitaka Series Release Notes" msgstr "Note de release pour Mitaka" msgid "New Features" msgstr "Nouvelles fonctionnalités" msgid "Newton Series Release Notes" msgstr "Note de release pour Newton" msgid "Other Notes" msgstr "Autres notes" msgid "Security Issues" msgstr "Problèmes de sécurités" msgid "Start using reno to manage release notes." msgstr "Commence à utiliser reno pour la gestion des notes de release" msgid "Upgrade Notes" msgstr "Notes de mises à jours" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/source/mitaka.rst0000664000175000017500000000023200000000000021044 0ustar00zuulzuul00000000000000=================================== Mitaka Series Release Notes =================================== .. release-notes:: :branch: origin/stable/mitaka ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/source/newton.rst0000664000175000017500000000023200000000000021110 0ustar00zuulzuul00000000000000=================================== Newton Series Release Notes =================================== .. release-notes:: :branch: origin/stable/newton ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/source/ocata.rst0000664000175000017500000000023000000000000020663 0ustar00zuulzuul00000000000000=================================== Ocata Series Release Notes =================================== .. release-notes:: :branch: origin/stable/ocata ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/source/pike.rst0000664000175000017500000000017500000000000020534 0ustar00zuulzuul00000000000000========================== Pike Series Release Notes ========================== .. release-notes:: :branch: stable/pike ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/source/queens.rst0000664000175000017500000000022300000000000021076 0ustar00zuulzuul00000000000000=================================== Queens Series Release Notes =================================== .. release-notes:: :branch: stable/queens ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/source/rocky.rst0000664000175000017500000000022100000000000020723 0ustar00zuulzuul00000000000000=================================== Rocky Series Release Notes =================================== .. release-notes:: :branch: stable/rocky ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/source/stein.rst0000664000175000017500000000022100000000000020716 0ustar00zuulzuul00000000000000=================================== Stein Series Release Notes =================================== .. release-notes:: :branch: stable/stein ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/source/train.rst0000664000175000017500000000017600000000000020722 0ustar00zuulzuul00000000000000========================== Train Series Release Notes ========================== .. release-notes:: :branch: stable/train ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/source/unreleased.rst0000664000175000017500000000015300000000000021727 0ustar00zuulzuul00000000000000============================ Current Series Release Notes ============================ .. release-notes:: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/source/ussuri.rst0000664000175000017500000000020200000000000021125 0ustar00zuulzuul00000000000000=========================== Ussuri Series Release Notes =========================== .. release-notes:: :branch: stable/ussuri ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/source/victoria.rst0000664000175000017500000000022000000000000021413 0ustar00zuulzuul00000000000000============================= Victoria Series Release Notes ============================= .. release-notes:: :branch: unmaintained/victoria ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/source/wallaby.rst0000664000175000017500000000021400000000000021231 0ustar00zuulzuul00000000000000============================ Wallaby Series Release Notes ============================ .. release-notes:: :branch: unmaintained/wallaby ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/source/xena.rst0000664000175000017500000000020000000000000020524 0ustar00zuulzuul00000000000000========================= Xena Series Release Notes ========================= .. release-notes:: :branch: unmaintained/xena ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/source/yoga.rst0000664000175000017500000000020000000000000020530 0ustar00zuulzuul00000000000000========================= Yoga Series Release Notes ========================= .. release-notes:: :branch: unmaintained/yoga ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/releasenotes/source/zed.rst0000664000175000017500000000017400000000000020365 0ustar00zuulzuul00000000000000======================== Zed Series Release Notes ======================== .. release-notes:: :branch: unmaintained/zed ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/requirements.txt0000664000175000017500000000307600000000000016350 0ustar00zuulzuul00000000000000# Requirements lower bounds listed here are our best effort to keep them up to # date but we do not test them so no guarantee of having them all correct. If # you find any incorrect lower bounds, let us know or propose a fix. # pbr should be first pbr>=5.5.0 # Apache-2.0 alembic>=1.4.2 # MIT castellan>=3.7.0 # Apache-2.0 defusedxml>=0.7.1 # PSFL eventlet>=0.27.0 # MIT greenlet>=0.4.16 # MIT lxml>=4.5.2 # BSD netaddr>=0.8.0 # BSD oslo.config>=8.3.2 # Apache-2.0 oslo.context>=3.1.1 # Apache-2.0 oslo.db>=8.4.0 # Apache-2.0 oslo.i18n>=5.0.1 # Apache-2.0 oslo.log>=4.4.0 # Apache-2.0 oslo.messaging>=14.1.0 # Apache-2.0 oslo.middleware>=4.1.1 # Apache-2.0 oslo.policy>=4.5.0 # Apache-2.0 oslo.privsep>=2.4.0 # Apache-2.0 oslo.reports>=2.2.0 # Apache-2.0 oslo.rootwrap>=6.2.0 # Apache-2.0 oslo.serialization>=4.0.1 # Apache-2.0 oslo.service>=2.4.0 # Apache-2.0 oslo.upgradecheck>=1.3.0 # Apache-2.0 oslo.utils>=7.0.0 # Apache-2.0 oslo.concurrency>=4.3.0 # Apache-2.0 osprofiler>=3.4.0 # Apache-2.0 paramiko>=2.7.2 # LGPLv2.1+ Paste>=3.4.3 # MIT PasteDeploy>=2.1.0 # MIT pyparsing>=2.4.7 # MIT python-neutronclient>=6.7.0 # Apache-2.0 keystoneauth1>=4.2.1 # Apache-2.0 keystonemiddleware>=9.1.0 # Apache-2.0 requests>=2.23.0 # Apache-2.0 tenacity>=6.3.1 # Apache-2.0 Routes>=2.4.1 # MIT SQLAlchemy>=1.4.0 # MIT SQLAlchemy-Utils>=0.38.3 # BSD License stevedore>=3.2.2 # Apache-2.0 tooz>=2.7.1 # Apache-2.0 python-cinderclient>=4.0.1 # Apache-2.0 python-novaclient>=17.2.1 # Apache-2.0 python-glanceclient>=3.2.2 # Apache-2.0 WebOb>=1.8.6 # MIT cachetools>=4.2.1 # MIT packaging>=20.9 # Apache-2.0 ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315602.2216678 manila-21.0.0/setup.cfg0000664000175000017500000000711000000000000014676 0ustar00zuulzuul00000000000000[metadata] name = manila summary = Shared Storage for OpenStack description_file = README.rst author = OpenStack author_email = openstack-discuss@lists.openstack.org home_page = https://docs.openstack.org/manila/latest/ python_requires = >=3.10 classifier = Environment :: OpenStack Intended Audience :: Information Technology Intended Audience :: System Administrators License :: OSI Approved :: Apache Software License Operating System :: POSIX :: Linux Programming Language :: Python Programming Language :: Python :: 3 :: Only Programming Language :: Python :: 3 Programming Language :: Python :: 3.10 Programming Language :: Python :: 3.11 Programming Language :: Python :: 3.12 [files] data_files = etc/manila = etc/manila/api-paste.ini etc/manila/rootwrap.conf etc/manila/rootwrap.d = etc/manila/rootwrap.d/* packages = manila [entry_points] console_scripts = manila-api = manila.cmd.api:main manila-data = manila.cmd.data:main manila-manage = manila.cmd.manage:main manila-rootwrap = oslo_rootwrap.cmd:main manila-scheduler = manila.cmd.scheduler:main manila-share = manila.cmd.share:main manila-status = manila.cmd.status:main manila.scheduler.filters = AffinityFilter = manila.scheduler.filters.affinity:AffinityFilter AntiAffinityFilter = manila.scheduler.filters.affinity:AntiAffinityFilter OnlyHostFilter = manila.scheduler.filters.host:OnlyHostFilter AvailabilityZoneFilter = manila.scheduler.filters.availability_zone:AvailabilityZoneFilter CapabilitiesFilter = manila.scheduler.filters.capabilities:CapabilitiesFilter CapacityFilter = manila.scheduler.filters.capacity:CapacityFilter DriverFilter = manila.scheduler.filters.driver:DriverFilter IgnoreAttemptedHostsFilter = manila.scheduler.filters.ignore_attempted_hosts:IgnoreAttemptedHostsFilter JsonFilter = manila.scheduler.filters.json:JsonFilter RetryFilter = manila.scheduler.filters.retry:RetryFilter ShareReplicationFilter = manila.scheduler.filters.share_replication:ShareReplicationFilter CreateFromSnapshotFilter = manila.scheduler.filters.create_from_snapshot:CreateFromSnapshotFilter ConsistentSnapshotFilter = manila.scheduler.filters.share_group_filters.consistent_snapshot:ConsistentSnapshotFilter manila.scheduler.weighers = CapacityWeigher = manila.scheduler.weighers.capacity:CapacityWeigher GoodnessWeigher = manila.scheduler.weighers.goodness:GoodnessWeigher PoolWeigher = manila.scheduler.weighers.pool:PoolWeigher HostAffinityWeigher = manila.scheduler.weighers.host_affinity:HostAffinityWeigher NetAppAIQWeigher = manila.scheduler.weighers.netapp_aiq:NetAppAIQWeigher oslo.config.opts = manila = manila.opts:list_opts oslo.config.opts.defaults = manila = manila.common.config:set_lib_defaults oslo.policy.enforcer = manila = manila.policy:get_enforcer oslo.policy.policies = manila = manila.policies:list_rules manila.share.drivers.dell_emc.plugins = vnx = manila.share.drivers.dell_emc.plugins.vnx.connection:VNXStorageConnection unity = manila.share.drivers.dell_emc.plugins.unity.connection:UnityStorageConnection powerscale = manila.share.drivers.dell_emc.plugins.powerscale.powerscale:PowerScaleStorageConnection powermax = manila.share.drivers.dell_emc.plugins.powermax.connection:PowerMaxStorageConnection powerstore = manila.share.drivers.dell_emc.plugins.powerstore.connection:PowerStoreStorageConnection powerflex = manila.share.drivers.dell_emc.plugins.powerflex.connection:PowerFlexStorageConnection manila.tests.scheduler.fakes = FakeWeigher1 = manila.tests.scheduler.fakes:FakeWeigher1 FakeWeigher2 = manila.tests.scheduler.fakes:FakeWeigher2 [egg_info] tag_build = tag_date = 0 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/setup.py0000664000175000017500000000122100000000000014564 0ustar00zuulzuul00000000000000# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import setuptools setuptools.setup(pbr=True) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/test-requirements.txt0000664000175000017500000000113700000000000017321 0ustar00zuulzuul00000000000000hacking>=6.1.0,<6.2.0 # Apache-2.0 coverage>=5.2.1 # Apache-2.0 ddt>=1.4.1 # MIT fixtures>=3.0.0 # Apache-2.0/BSD oslotest>=4.4.1 # Apache-2.0 # Do not remove 'PyMySQL' and 'psycopg2-binary' dependencies. They are used # by oslo_db lib for running MySQL and PostgreSQL DB migration tests. # See https://docs.openstack.org/oslo.db/latest/contributor/index.html#how-to-run-unit-tests PyMySQL>=0.10.0 # MIT License psycopg2-binary>=2.8.5 # LGPL/ZPL requests-mock>=1.7.0 # Apache-2.0 stestr>=3.0.1 # Apache-2.0 testresources>=2.0.1 # Apache-2.0/BSD testscenarios>=0.4 # Apache-2.0/BSD testtools>=2.4.0 # MIT ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315602.2176678 manila-21.0.0/tools/0000775000175000017500000000000000000000000014216 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/tools/coding-checks.sh0000775000175000017500000000353300000000000017262 0ustar00zuulzuul00000000000000#!/bin/bash set -eu usage() { echo "Usage: $0 [OPTION]..." echo "Run Manila's coding check(s)" echo "" echo " -Y, --pylint [] Run pylint check on the entire manila module or just files changed in basecommit (e.g. HEAD~1)" echo " -h, --help Print this usage message" echo exit 0 } process_options() { i=1 while [ $i -le $# ]; do eval opt=\$$i case $opt in -h|--help) usage;; -Y|--pylint) pylint=1;; *) scriptargs="$scriptargs $opt" esac i=$((i+1)) done } run_pylint() { local target="${scriptargs:-HEAD~1}" CODE_OKAY=0 if [[ "$target" = *"all"* ]]; then files=$(find manila/ -type f -name "*.py" -and ! -path "manila/tests*") test_files=$(find manila/tests/ -type f -name "*.py") else files=$(git diff --name-only --diff-filter=ACMRU HEAD~1 ':!manila/tests/*' '*.py') test_files=$(git diff --name-only --diff-filter=ACMRU HEAD~1 'manila/tests/*.py') fi if [[ -z "${files}" || -z "${test_files}" ]]; then echo "No python changes in this commit, pylint check not required." exit 0 fi if [[ -n "${files}" ]]; then echo "Running pylint against manila code modules:" printf "\t%s\n" "${files[@]}" pylint --rcfile=.pylintrc --output-format=colorized ${files} \ -E -j 0 || CODE_OKAY=1 fi if [[ -n "${test_files}" ]]; then echo "Running pylint against manila test modules:" printf "\t%s\n" "${test_files[@]}" pylint --rcfile=.pylintrc --output-format=colorized ${test_files} \ -E -d "no-member,assignment-from-no-return,assignment-from-none" \ -j 0 || CODE_OKAY=1 fi exit $CODE_OKAY } scriptargs= pylint=1 process_options $@ if [ $pylint -eq 1 ]; then run_pylint exit 0 fi ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/tools/cover.sh0000775000175000017500000000465000000000000015700 0ustar00zuulzuul00000000000000#!/bin/bash # # Copyright 2015: Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. ALLOWED_EXTRA_MISSING=4 TESTR_ARGS="$*" show_diff () { head -1 $1 diff -U 0 $1 $2 | sed 1,2d } # Stash uncommitted changes, checkout master and save coverage report uncommitted=$(git status --porcelain | grep -v "^??") [[ -n $uncommitted ]] && git stash > /dev/null git checkout HEAD^ baseline_report=$(mktemp -t manila_coverageXXXXXXX) find . -type f -name "*.py[c|o]" -delete && stestr run "$TESTR_ARGS" && coverage combine && coverage html -d cover coverage report --ignore-errors > $baseline_report baseline_missing=$(awk 'END { print $3 }' $baseline_report) # Checkout back and unstash uncommitted changes (if any) git checkout - [[ -n $uncommitted ]] && git stash pop > /dev/null # Generate and save coverage report current_report=$(mktemp -t manila_coverageXXXXXXX) find . -type f -name "*.py[c|o]" -delete && stestr run "$TESTR_ARGS" && coverage combine && coverage html -d cover coverage report --ignore-errors > $current_report current_missing=$(awk 'END { print $3 }' $current_report) # Show coverage details allowed_missing=$((baseline_missing+ALLOWED_EXTRA_MISSING)) echo "Allowed to introduce missing lines : ${ALLOWED_EXTRA_MISSING}" echo "Missing lines in master : ${baseline_missing}" echo "Missing lines in proposed change : ${current_missing}" if [ $allowed_missing -gt $current_missing ]; then if [ $baseline_missing -lt $current_missing ]; then show_diff $baseline_report $current_report echo "I believe you can cover all your code with 100% coverage!" else echo "Thank you! You are awesome! Keep writing unit tests! :)" fi exit_code=0 else show_diff $baseline_report $current_report echo "Please write more unit tests, we should keep our test coverage :( " exit_code=1 fi rm $baseline_report $current_report exit $exit_code ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/tools/enable-pre-commit-hook.sh0000775000175000017500000000230700000000000021015 0ustar00zuulzuul00000000000000#!/bin/sh # Copyright 2011 OpenStack LLC # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. PRE_COMMIT_SCRIPT=.git/hooks/pre-commit make_hook() { echo "exec tox -e fast8" >> $PRE_COMMIT_SCRIPT chmod +x $PRE_COMMIT_SCRIPT if [ -w $PRE_COMMIT_SCRIPT -a -x $PRE_COMMIT_SCRIPT ]; then echo "pre-commit hook was created successfully" else echo "unable to create pre-commit hook" fi } # NOTE(jk0): Make sure we are in manila's root directory before adding the hook. if [ ! -d ".git" ]; then echo "unable to find .git; moving up a directory" cd .. if [ -d ".git" ]; then make_hook else echo "still unable to find .git; hook not created" fi else make_hook fi ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/tools/fast8.sh0000775000175000017500000000045300000000000015604 0ustar00zuulzuul00000000000000#!/bin/bash cd $(dirname "$0")/.. CHANGED=$(git diff --name-only HEAD~1 | tr '\n' ' ') # Skip files that don't exist # (have been git rm'd) CHECK="" for FILE in $CHANGED; do if [ -f "$FILE" ]; then CHECK="$CHECK $FILE" fi done diff -u --from-file /dev/null $CHECK | flake8 --diff ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/tools/install_venv.py0000664000175000017500000000447700000000000017310 0ustar00zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Copyright 2010 OpenStack Foundation # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import sys import install_venv_common as install_venv # noqa def print_help(venv, root): help = """ OpenStack development environment setup is complete. OpenStack development uses virtualenv to track and manage Python dependencies while in development and testing. To activate the OpenStack virtualenv for the extent of your current shell session you can run: $ source %s/bin/activate Or, if you prefer, you can run commands in the virtualenv on a case by case basis by running: $ %s/tools/with_venv.sh Also, make test will automatically use the virtualenv. """ print(help % (venv, root)) def main(argv): root = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) if os.environ.get('TOOLS_PATH'): root = os.environ['TOOLS_PATH'] venv = os.path.join(root, '.venv') if os.environ.get('VENV'): venv = os.environ['VENV'] pip_requires = os.path.join(root, 'requirements.txt') test_requires = os.path.join(root, 'test-requirements.txt') py_version = "python%s.%s" % (sys.version_info[0], sys.version_info[1]) project = 'OpenStack' install = install_venv.InstallVenv(root, venv, pip_requires, test_requires, py_version, project) options = install.parse_args(argv) install.check_dependencies() install.create_virtualenv(no_site_packages=options.no_site_packages) install.install_dependencies() print_help(venv, root) if __name__ == '__main__': main(sys.argv) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/tools/install_venv_common.py0000664000175000017500000001324200000000000020646 0ustar00zuulzuul00000000000000# Copyright 2013 OpenStack Foundation # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Provides methods needed by installation script for OpenStack development virtual environments. Since this script is used to bootstrap a virtualenv from the system's Python environment, it should be kept strictly compatible with Python 2.6. Synced in from openstack-common """ import optparse import os import subprocess import sys class InstallVenv(object): def __init__(self, root, venv, requirements, test_requirements, py_version, project): self.root = root self.venv = venv self.requirements = requirements self.test_requirements = test_requirements self.py_version = py_version self.project = project def die(self, message, *args): print(message % args, file=sys.stderr) sys.exit(1) def run_command_with_code(self, cmd, redirect_output=True, check_exit_code=True): """Runs a command in an out-of-process shell. Returns the output of that command. Working directory is self.root. """ if redirect_output: stdout = subprocess.PIPE else: stdout = None proc = subprocess.Popen(cmd, cwd=self.root, stdout=stdout) output = proc.communicate()[0] if check_exit_code and proc.returncode != 0: self.die('Command "%s" failed.\n%s', ' '.join(cmd), output) return (output, proc.returncode) def run_command(self, cmd, redirect_output=True, check_exit_code=True): return self.run_command_with_code(cmd, redirect_output, check_exit_code)[0] def get_distro(self): if (os.path.exists('/etc/fedora-release') or os.path.exists('/etc/redhat-release')): return Fedora( self.root, self.venv, self.requirements, self.test_requirements, self.py_version, self.project) else: return Distro( self.root, self.venv, self.requirements, self.test_requirements, self.py_version, self.project) def check_dependencies(self): self.get_distro().install_virtualenv() def create_virtualenv(self, no_site_packages=True): """Creates the virtual environment and installs PIP. Creates the virtual environment and installs PIP only into the virtual environment. """ if not os.path.isdir(self.venv): print('Creating venv...', end=' ') if no_site_packages: self.run_command(['virtualenv', '-q', '--no-site-packages', self.venv]) else: self.run_command(['virtualenv', '-q', self.venv]) print('done.') else: print("venv already exists...") pass def pip_install(self, *args): self.run_command(['tools/with_venv.sh', 'pip', 'install', '--upgrade'] + list(args), redirect_output=False) def install_dependencies(self): print('Installing dependencies with pip (this can take a while)...') # First things first, make sure our venv has the latest pip and # setuptools and pbr self.pip_install('pip>=1.4') self.pip_install('setuptools') self.pip_install('pbr') self.pip_install('-r', self.requirements, '-r', self.test_requirements) def parse_args(self, argv): """Parses command-line arguments.""" parser = optparse.OptionParser() parser.add_option('-n', '--no-site-packages', action='store_true', help="Do not inherit packages from global Python " "install.") return parser.parse_args(argv[1:])[0] class Distro(InstallVenv): def check_cmd(self, cmd): return bool(self.run_command(['which', cmd], check_exit_code=False).strip()) def install_virtualenv(self): if self.check_cmd('virtualenv'): return if self.check_cmd('easy_install'): print('Installing virtualenv via easy_install...', end=' ') if self.run_command(['easy_install', 'virtualenv']): print('Succeeded') return else: print('Failed') self.die('ERROR: virtualenv not found.\n\n%s development' ' requires virtualenv, please install it using your' ' favorite package management tool' % self.project) class Fedora(Distro): """This covers all Fedora-based distributions. Includes: Fedora, RHEL, CentOS, Scientific Linux """ def check_pkg(self, pkg): return self.run_command_with_code(['rpm', '-q', pkg], check_exit_code=False)[1] == 0 def install_virtualenv(self): if self.check_cmd('virtualenv'): return if not self.check_pkg('python-virtualenv'): self.die("Please install 'python-virtualenv'.") super(Fedora, self).install_virtualenv() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/tools/test-setup.sh0000775000175000017500000000374000000000000016676 0ustar00zuulzuul00000000000000#!/bin/bash -xe # This script will be run by OpenStack CI before unit tests are run, # it sets up the test system as needed. # Developers should setup their test systems in a similar way. # This setup needs to be run as a user that can run sudo. # The root password for the MySQL database; pass it in via # MYSQL_ROOT_PW. DB_ROOT_PW=${MYSQL_ROOT_PW:-insecure_slave} # This user and its password are used by the tests, if you change it, # your tests might fail. DB_USER=openstack_citest DB_PW=openstack_citest sudo -H mysqladmin -u root password $DB_ROOT_PW # It's best practice to remove anonymous users from the database. If # an anonymous user exists, then it matches first for connections and # other connections from that host will not work. sudo -H mysql -u root -p$DB_ROOT_PW -h localhost -e " DELETE FROM mysql.user WHERE User=''; FLUSH PRIVILEGES; CREATE USER '$DB_USER'@'%' IDENTIFIED BY '$DB_PW'; GRANT ALL PRIVILEGES ON *.* TO '$DB_USER'@'%' WITH GRANT OPTION;" # Now create our database. mysql -u $DB_USER -p$DB_PW -h 127.0.0.1 -e " SET default_storage_engine=MYISAM; DROP DATABASE IF EXISTS openstack_citest; CREATE DATABASE openstack_citest CHARACTER SET utf8;" # Same for PostgreSQL # The root password for the PostgreSQL database; pass it in via # POSTGRES_ROOT_PW. DB_ROOT_PW=${POSTGRES_ROOT_PW:-insecure_slave} # Setup user root_roles=$(sudo -H -u postgres psql -t -c " SELECT 'HERE' from pg_roles where rolname='$DB_USER'") if [[ ${root_roles} == *HERE ]];then sudo -H -u postgres psql -c "ALTER ROLE $DB_USER WITH SUPERUSER LOGIN PASSWORD '$DB_PW'" else sudo -H -u postgres psql -c "CREATE ROLE $DB_USER WITH SUPERUSER LOGIN PASSWORD '$DB_PW'" fi # Store password for tests cat << EOF > $HOME/.pgpass *:*:*:$DB_USER:$DB_PW EOF chmod 0600 $HOME/.pgpass # Now create our database psql -h 127.0.0.1 -U $DB_USER -d template1 -c "DROP DATABASE IF EXISTS openstack_citest" createdb -h 127.0.0.1 -U $DB_USER -l C -T template0 -E utf8 openstack_citest ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/tools/validate-json-files.py0000664000175000017500000000260000000000000020426 0ustar00zuulzuul00000000000000#!/usr/bin/env python3 # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import os import sys if len(sys.argv) < 2: print("Usage: %s " % sys.argv[0]) sys.exit(1) directory = sys.argv[1] invalid_json_files = [] print("Validating JSON files in directory: ", directory) for dirpath, dirname, files in os.walk(directory): json_files = [f for f in files if f.endswith('.json')] for json_file in json_files: path = os.path.join(dirpath, json_file) with open(path) as json_file_content: try: content = json.load(json_file_content) except ValueError as e: print("File %s has invalid JSON: %s" % (path, e)) invalid_json_files.append(path) if invalid_json_files: print("%d JSON files are invalid." % len(invalid_json_files)) sys.exit(1) else: print("All JSON files are valid.") ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/tools/with_venv.sh0000775000175000017500000000030600000000000016565 0ustar00zuulzuul00000000000000#!/bin/bash TOOLS_PATH=${TOOLS_PATH:-$(dirname $0)/../} VENV_PATH=${VENV_PATH:-${TOOLS_PATH}} VENV_DIR=${VENV_DIR:-/.venv} VENV=${VENV:-${VENV_PATH}/${VENV_DIR}} source ${VENV}/bin/activate && "$@" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/tox.ini0000664000175000017500000001223400000000000014373 0ustar00zuulzuul00000000000000[tox] minversion = 3.18.0 envlist = py3,pep8 # Automatic envs (pyXX) will only use the python version appropriate to that # env and ignore basepython inherited from [testenv] if we set # ignore_basepython_conflict. ignore_basepython_conflict = true [testenv] basepython = {env:TOX_PYTHON:python3} usedevelop = true setenv = VIRTUAL_ENV={envdir} PYTHONDONTWRITEBYTECODE=1 deps = -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/2025.2} -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt commands = stestr run {posargs} stestr slowest [testenv:releasenotes] deps = -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/2025.2} -r{toxinidir}/doc/requirements.txt commands = rm -rf releasenotes/build sphinx-build -a -E -W -d releasenotes/build/doctrees \ -b html releasenotes/source releasenotes/build/html allowlist_externals = rm [testenv:newnote] allowlist_externals = reno deps = -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/2025.2} -r{toxinidir}/doc/requirements.txt commands = reno new {posargs} [testenv:debug] commands = oslo_debug_helper {posargs} [testenv:pep8] skip_install = true deps = pre-commit commands = pre-commit run --all-files --show-diff-on-failure [testenv:genconfig] allowlist_externals = bash commands = oslo-config-generator --config-file etc/oslo-config-generator/manila.conf [testenv:genpolicy] commands = oslopolicy-sample-generator --config-file=etc/manila/manila-policy-generator.conf [testenv:venv] commands = {posargs} [testenv:bandit] deps = bandit commands = bandit -r manila --ini tox.ini -n5 -ll [bandit] exclude = tests,tegile,hitachi,glusterfs,vnx,ssh_utils.py [testenv:docs] # NOTE(elod.illes): requirements.txt is needed because otherwise # dependencies are installed during 'develop-inst' tox phase without # constraints which could cause failures in stable branches. deps = -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/2025.2} -r{toxinidir}/requirements.txt -r{toxinidir}/doc/requirements.txt commands = rm -rf doc/build sphinx-build -W -b html doc/source doc/build/html allowlist_externals = rm [testenv:pdf-docs] deps = {[testenv:docs]deps} allowlist_externals = make commands = sphinx-build -W -b latex doc/source doc/build/pdf make -C doc/build/pdf [testenv:bindep] # Do not install any requirements. We want this to be fast and work even if # system dependencies are missing, since it's used to tell you what system # dependencies are missing! This also means that bindep must be installed # separately, outside of the requirements files, and develop mode disabled # explicitly to avoid unnecessarily installing the checked-out repo too (this # further relies on "tox.skipsdist = True" above). deps = bindep commands = bindep test usedevelop = False [testenv:cover] setenv = {[testenv]setenv} PYTHON=coverage run --source manila --parallel-mode allowlist_externals = {toxinidir}/tools/cover.sh commands = {toxinidir}/tools/cover.sh {posargs} [testenv:fast8] # Let's run fast8 under py3 by default because the py3 checks are stricter. allowlist_externals = {toxinidir}/tools/fast8.sh commands = {toxinidir}/tools/fast8.sh [testenv:pylint] deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt pylint==3.0.2 allowlist_externals = bash commands = bash ./tools/coding-checks.sh --pylint {posargs} [testenv:api-ref] # This environment is called from CI scripts to test and publish # the API Ref to docs.openstack.org. deps = {[testenv:docs]deps} allowlist_externals = rm commands = rm -rf api-ref/build python {toxinidir}/tools/validate-json-files.py {toxinidir}/api-ref/source/samples/ sphinx-build -W -b html -d api-ref/build/doctrees api-ref/source api-ref/build/html [testenv:dbrevision] deps = -r{toxinidir}/requirements.txt commands = alembic -c manila/db/migrations/alembic.ini revision -m ""{posargs} [flake8] # Following checks are ignored on purpose: # Following checks should be evaluated and fixed: # E123 closing bracket does not match indentation of opening bracket's line # E402 module level import not at top of file # W503 line break before binary operator # W504 line break after binary operator ignore = E123,E402,W503,W504 builtins = _ # [H106] Don't put vim configuration in source files. # [H203] Use assertIs(Not)None to check for None. # [H904] Use ',' instead of '%', String interpolation should be delayed to be handled by the logging code, # rather than being done at the point of the logging call.. enable-extensions = H106,H203,H904 exclude = .git,.tox,.testrepository,.venv,build,cover,dist,doc,*egg,api-ref/build,*/source/conf.py [hacking] import_exceptions = manila.i18n [flake8:local-plugins] extension = M310 = checks:CheckLoggingFormatArgs M313 = checks:validate_assertTrue M323 = checks:check_explicit_underscore_import M326 = checks:CheckForTransAdd M333 = checks:check_oslo_namespace_imports M336 = checks:dict_constructor_with_list_copy M337 = checks:no_xrange M338 = checks:no_log_warn_check M354 = checks:check_uuid4 M359 = checks:no_translate_logs paths = ./manila/tests/hacking ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315602.2176678 manila-21.0.0/zuul.d/0000775000175000017500000000000000000000000014277 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/zuul.d/grenade-jobs.yaml0000664000175000017500000000615700000000000017534 0ustar00zuulzuul00000000000000- job: name: manila-grenade parent: grenade irrelevant-files: - ^(test-|)requirements.txt$ - ^.*\.rst$ - ^api-ref/.*$ - ^doc/.*$ - ^manila/hacking/.*$ - ^manila/tests/.*$ - ^\.pre-commit-config\.yaml$ - ^releasenotes/.*$ - ^setup.cfg$ - ^tools/.*$ - ^tox.ini$ required-projects: - openstack/grenade - openstack/manila - openstack/python-manilaclient - openstack/manila-tempest-plugin vars: grenade_devstack_localrc: shared: # This job performs data path tests, it's good to test # both DHSS driver modes with it, but for now, it'll use the LVM # driver (DHSS=False) MANILA_ALLOW_NAS_SERVER_PORTS_ON_HOST: true MANILA_DEFAULT_SHARE_TYPE_EXTRA_SPECS: 'snapshot_support=True create_share_from_snapshot_support=True revert_to_snapshot_support=True mount_snapshot_support=True' MANILA_CONFIGURE_DEFAULT_TYPES: true SHARE_DRIVER: manila.share.drivers.lvm.LVMShareDriver MANILA_ENABLED_BACKENDS: london,paris MANILA_OPTGROUP_london_driver_handles_share_servers: false MANILA_OPTGROUP_paris_driver_handles_share_servers: false SHARE_BACKING_FILE_SIZE: 32000M devstack_plugins: manila: https://opendev.org/openstack/manila.git devstack_services: manila: true m-api: true m-sch: true m-shr: true m-dat: true # don't need some services c-api: false c-bak: false c-vol: false cinder: false # These services can be turned off, if not # for: https://launchpad.net/bugs/1887835 # g-api: false # n-api: false # n-api-meta: false # n-cond: false # n-cpu: false # n-novnc: false # n-sch: false # placement-api: false # q-agt: false # q-dhcp: false # q-l3: false # q-meta: false # q-metering: false # q-svc: false s-account: false s-container: false s-object: false s-proxy: false devstack_local_conf: test-config: "$TEMPEST_CONFIG": share: default_share_type_name: default run_mount_snapshot_tests: true run_shrink_tests: false run_revert_to_snapshot_tests: true enable_user_rules_for_protocols: cifs enable_ip_rules_for_protocols: nfs multitenancy_enabled: False backend_names: LONDON,PARIS multi_backend: true tempest_plugins: - manila-tempest-plugin tempest_test_regex: ^manila_tempest_tests.tests.api tox_envlist: all - job: name: manila-grenade-skip-level-always description: | Grenade job that skips a release, validating that deployers can skip specific releases as prescribed by our process. parent: manila-grenade vars: # Move this forward when a new release cycle start. grenade_from_branch # should be always N-2 from master. grenade_from_branch: stable/2024.1 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315554.0 manila-21.0.0/zuul.d/project.yaml0000664000175000017500000000514400000000000016635 0ustar00zuulzuul00000000000000- project: templates: - publish-openstack-docs-pti - openstack-cover-jobs - openstack-python3-jobs - check-requirements - release-notes-jobs-python3 - periodic-stable-jobs - manila-tempest-plugin-jobs-using-service-image queue: manila check: jobs: - manila-tox-genconfig - openstack-tox-pylint: voting: false timeout: 5400 - openstack-tox-cover: voting: false - manila-tempest-plugin-dummy-no-dhss - manila-tempest-plugin-dummy-dhss - manila-tempest-plugin-container: voting: false - manila-tempest-plugin-lvm-fips-py311: voting: false - manila-grenade - manila-grenade-skip-level-always: voting: false - manila-rally-no-ss: voting: false - manila-rally-ss: voting: false - manila-tox-bandit: voting: false # Test OpenAPI spec generation. - codegenerator-openapi-shared-file-system-tips-with-api-ref: voting: false gate: jobs: - manila-tempest-plugin-dummy-no-dhss - manila-tempest-plugin-dummy-dhss - job: name: manila-tox-genconfig parent: openstack-tox description: | Run tests for manila project. Uses tox with the ``genconfig`` environment. post-run: playbooks/manila-tox-genconfig/post.yaml vars: tox_envlist: genconfig - job: name: manila-rally-no-ss parent: rally-task-manila-no-ss irrelevant-files: &irrelevant-files - ^(test-|)requirements.txt$ - ^.*\.rst$ - ^api-ref/.*$ - ^doc/.*$ - ^manila/tests/.*$ - ^\.pre-commit-config\.yaml$ - ^releasenotes/.*$ - ^setup.cfg$ - ^tools/.*$ - ^tox.ini$ vars: rally_task: rally-jobs/rally-manila-no-ss.yaml devstack_plugins: rally-openstack: https://opendev.org/openstack/rally-openstack devstack_localrc: GLOBAL_VENV: false required-projects: - openstack/rally-openstack - job: name: manila-rally-ss parent: rally-task-manila-ss irrelevant-files: *irrelevant-files vars: rally_task: rally-jobs/rally-manila.yaml devstack_plugins: rally-openstack: https://opendev.org/openstack/rally-openstack devstack_localrc: GLOBAL_VENV: false required-projects: - openstack/rally-openstack - job: # Security testing for known issues name: manila-tox-bandit parent: openstack-tox timeout: 2400 vars: tox_envlist: bandit irrelevant-files: *irrelevant-files